From d0bc9e268d33b495cadeafc6613029989bc26aac Mon Sep 17 00:00:00 2001 From: Michael D Date: Sat, 21 Nov 2020 03:07:47 +0100 Subject: [PATCH 0001/1543] Add solution for Project Euler problem 188 (#2880) * Project Euler problem 188 solution * fix superscript notation * split out modexpt() function, and rename parameters * Add some more doctest, and add type hints * Add some reference links * Update docstrings and mark helper function private * Fix doctests and remove/improve redundant comments * fix as per style guide --- project_euler/problem_188/__init__.py | 0 project_euler/problem_188/sol1.py | 68 +++++++++++++++++++++++++++ 2 files changed, 68 insertions(+) create mode 100644 project_euler/problem_188/__init__.py create mode 100644 project_euler/problem_188/sol1.py diff --git a/project_euler/problem_188/__init__.py b/project_euler/problem_188/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/project_euler/problem_188/sol1.py b/project_euler/problem_188/sol1.py new file mode 100644 index 000000000000..6473c63620ed --- /dev/null +++ b/project_euler/problem_188/sol1.py @@ -0,0 +1,68 @@ +""" +Project Euler Problem 188: https://projecteuler.net/problem=188 + +The hyperexponentiation of a number + +The hyperexponentiation or tetration of a number a by a positive integer b, +denoted by a↑↑b or b^a, is recursively defined by: + +a↑↑1 = a, +a↑↑(k+1) = a(a↑↑k). + +Thus we have e.g. 3↑↑2 = 3^3 = 27, hence 3↑↑3 = 3^27 = 7625597484987 and +3↑↑4 is roughly 103.6383346400240996*10^12. + +Find the last 8 digits of 1777↑↑1855. + +References: + - https://en.wikipedia.org/wiki/Tetration +""" + + +# small helper function for modular exponentiation +def _modexpt(base: int, exponent: int, modulo_value: int) -> int: + """ + Returns the modular exponentiation, that is the value + of `base ** exponent % modulo_value`, without calculating + the actual number. + >>> _modexpt(2, 4, 10) + 6 + >>> _modexpt(2, 1024, 100) + 16 + >>> _modexpt(13, 65535, 7) + 6 + """ + + if exponent == 1: + return base + if exponent % 2 == 0: + x = _modexpt(base, exponent / 2, modulo_value) % modulo_value + return (x * x) % modulo_value + else: + return (base * _modexpt(base, exponent - 1, modulo_value)) % modulo_value + + +def solution(base: int = 1777, height: int = 1855, digits: int = 8) -> int: + """ + Returns the last 8 digits of the hyperexponentiation of base by + height, i.e. the number base↑↑height: + + >>> solution(base=3, height=2) + 27 + >>> solution(base=3, height=3) + 97484987 + >>> solution(base=123, height=456, digits=4) + 2547 + """ + + # calculate base↑↑height by right-assiciative repeated modular + # exponentiation + result = base + for i in range(1, height): + result = _modexpt(base, result, 10 ** digits) + + return result + + +if __name__ == "__main__": + print(f"{solution() = }") From 28d33f4b2d72b6de8730d4526de12975364fdca0 Mon Sep 17 00:00:00 2001 From: Peter Yao Date: Fri, 20 Nov 2020 18:42:07 -0800 Subject: [PATCH 0002/1543] Project Euler 70 Solution (#3041) * Add solution for Project Euler 70, Fixes: #2695 * Remove parameter from solution() * Add tests for all functions, add fstring and positional arg for solution() * Rename directory to 070 * Move up explanation to module code block * Move solution() below helper functions, rename variables * Remove whitespace from defining min_numerator * Add whitespace * Improve type hints with typing.List Co-authored-by: Dhruv Manilawala --- DIRECTORY.md | 2 + project_euler/problem_070/__init__.py | 0 project_euler/problem_070/sol1.py | 119 ++++++++++++++++++++++++++ 3 files changed, 121 insertions(+) create mode 100644 project_euler/problem_070/__init__.py create mode 100644 project_euler/problem_070/sol1.py diff --git a/DIRECTORY.md b/DIRECTORY.md index cd8f6fb8578c..71da6a402b31 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -697,6 +697,8 @@ * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_067/sol1.py) * Problem 069 * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_069/sol1.py) + * Problem 070 + * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_070/sol1.py) * Problem 071 * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_071/sol1.py) * Problem 072 diff --git a/project_euler/problem_070/__init__.py b/project_euler/problem_070/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/project_euler/problem_070/sol1.py b/project_euler/problem_070/sol1.py new file mode 100644 index 000000000000..9d27119ba95c --- /dev/null +++ b/project_euler/problem_070/sol1.py @@ -0,0 +1,119 @@ +""" +Project Euler Problem 70: https://projecteuler.net/problem=70 + +Euler's Totient function, φ(n) [sometimes called the phi function], is used to +determine the number of positive numbers less than or equal to n which are +relatively prime to n. For example, as 1, 2, 4, 5, 7, and 8, are all less than +nine and relatively prime to nine, φ(9)=6. + +The number 1 is considered to be relatively prime to every positive number, so +φ(1)=1. + +Interestingly, φ(87109)=79180, and it can be seen that 87109 is a permutation +of 79180. + +Find the value of n, 1 < n < 10^7, for which φ(n) is a permutation of n and +the ratio n/φ(n) produces a minimum. + +----- + +This is essentially brute force. Calculate all totients up to 10^7 and +find the minimum ratio of n/φ(n) that way. To minimize the ratio, we want +to minimize n and maximize φ(n) as much as possible, so we can store the +minimum fraction's numerator and denominator and calculate new fractions +with each totient to compare against. To avoid dividing by zero, I opt to +use cross multiplication. + +References: +Finding totients +https://en.wikipedia.org/wiki/Euler's_totient_function#Euler's_product_formula +""" +from typing import List + + +def get_totients(max_one: int) -> List[int]: + """ + Calculates a list of totients from 0 to max_one exclusive, using the + definition of Euler's product formula. + + >>> get_totients(5) + [0, 1, 1, 2, 2] + + >>> get_totients(10) + [0, 1, 1, 2, 2, 4, 2, 6, 4, 6] + """ + totients = [0] * max_one + + for i in range(0, max_one): + totients[i] = i + + for i in range(2, max_one): + if totients[i] == i: + for j in range(i, max_one, i): + totients[j] -= totients[j] // i + + return totients + + +def has_same_digits(num1: int, num2: int) -> bool: + """ + Return True if num1 and num2 have the same frequency of every digit, False + otherwise. + + digits[] is a frequency table where the index represents the digit from + 0-9, and the element stores the number of appearances. Increment the + respective index every time you see the digit in num1, and decrement if in + num2. At the end, if the numbers have the same digits, every index must + contain 0. + + >>> has_same_digits(123456789, 987654321) + True + + >>> has_same_digits(123, 12) + False + + >>> has_same_digits(1234566, 123456) + False + """ + digits = [0] * 10 + + while num1 > 0 and num2 > 0: + digits[num1 % 10] += 1 + digits[num2 % 10] -= 1 + num1 //= 10 + num2 //= 10 + + for digit in digits: + if digit != 0: + return False + + return True + + +def solution(max: int = 10000000) -> int: + """ + Finds the value of n from 1 to max such that n/φ(n) produces a minimum. + + >>> solution(100) + 21 + + >>> solution(10000) + 4435 + """ + + min_numerator = 1 # i + min_denominator = 0 # φ(i) + totients = get_totients(max + 1) + + for i in range(2, max + 1): + t = totients[i] + + if i * min_denominator < min_numerator * t and has_same_digits(i, t): + min_numerator = i + min_denominator = t + + return min_numerator + + +if __name__ == "__main__": + print(f"{solution() = }") From c938e7311ff0ab2c06399348aa261b8d2ea70d3e Mon Sep 17 00:00:00 2001 From: fpringle Date: Sat, 21 Nov 2020 03:52:26 +0100 Subject: [PATCH 0003/1543] Added solution for Project Euler problem 129. (#3113) * Added solution for Project Euler problem 129. * Added doctest for solution() in project_euler/problem_129/sol1.py * Update formatting. Reference: #3256 * More descriptive function and variable names, more doctests. --- project_euler/problem_129/__init__.py | 0 project_euler/problem_129/sol1.py | 57 +++++++++++++++++++++++++++ 2 files changed, 57 insertions(+) create mode 100644 project_euler/problem_129/__init__.py create mode 100644 project_euler/problem_129/sol1.py diff --git a/project_euler/problem_129/__init__.py b/project_euler/problem_129/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/project_euler/problem_129/sol1.py b/project_euler/problem_129/sol1.py new file mode 100644 index 000000000000..8afe82df162e --- /dev/null +++ b/project_euler/problem_129/sol1.py @@ -0,0 +1,57 @@ +""" +Project Euler Problem 129: https://projecteuler.net/problem=129 + +A number consisting entirely of ones is called a repunit. We shall define R(k) to be +a repunit of length k; for example, R(6) = 111111. + +Given that n is a positive integer and GCD(n, 10) = 1, it can be shown that there +always exists a value, k, for which R(k) is divisible by n, and let A(n) be the least +such value of k; for example, A(7) = 6 and A(41) = 5. + +The least value of n for which A(n) first exceeds ten is 17. + +Find the least value of n for which A(n) first exceeds one-million. +""" + + +def least_divisible_repunit(divisor: int) -> int: + """ + Return the least value k such that the Repunit of length k is divisible by divisor. + >>> least_divisible_repunit(7) + 6 + >>> least_divisible_repunit(41) + 5 + >>> least_divisible_repunit(1234567) + 34020 + """ + if divisor % 5 == 0 or divisor % 2 == 0: + return 0 + repunit = 1 + repunit_index = 1 + while repunit: + repunit = (10 * repunit + 1) % divisor + repunit_index += 1 + return repunit_index + + +def solution(limit: int = 1000000) -> int: + """ + Return the least value of n for which least_divisible_repunit(n) + first exceeds limit. + >>> solution(10) + 17 + >>> solution(100) + 109 + >>> solution(1000) + 1017 + """ + divisor = limit - 1 + if divisor % 2 == 0: + divisor += 1 + while least_divisible_repunit(divisor) <= limit: + divisor += 2 + return divisor + + +if __name__ == "__main__": + print(f"{solution() = }") From 06f01c0eeb4ddae0c829da610c2e9b5283893727 Mon Sep 17 00:00:00 2001 From: Dhruv Manilawala Date: Sat, 21 Nov 2020 09:04:49 +0530 Subject: [PATCH 0004/1543] Remove stale action workflow file (#3915) --- .github/workflows/stale.yml | 31 ------------------------------- 1 file changed, 31 deletions(-) delete mode 100644 .github/workflows/stale.yml diff --git a/.github/workflows/stale.yml b/.github/workflows/stale.yml deleted file mode 100644 index 42353d233a29..000000000000 --- a/.github/workflows/stale.yml +++ /dev/null @@ -1,31 +0,0 @@ -# name: Mark/Close stale issues and pull requests -# on: -# schedule: -# - cron: "0 * * * *" # Run every hour -# jobs: -# stale: -# runs-on: ubuntu-latest -# steps: -# - uses: actions/stale@v3.0.13 -# with: -# repo-token: ${{ secrets.GITHUB_TOKEN }} -# days-before-stale: 30 -# days-before-close: 7 -# stale-issue-message: > -# This issue has been automatically marked as stale because it has not had -# recent activity. It will be closed if no further activity occurs. Thank you -# for your contributions. -# close-issue-message: > -# Please reopen this issue once you add more information and updates here. -# If this is not the case and you need some help, feel free to seek help -# from our [Gitter](https://gitter.im/TheAlgorithms) or ping one of the -# reviewers. Thank you for your contributions! -# stale-pr-message: > -# This pull request has been automatically marked as stale because it has not had -# recent activity. It will be closed if no further activity occurs. Thank you -# for your contributions. -# close-pr-message: > -# Please reopen this pull request once you commit the changes requested -# or make improvements on the code. If this is not the case and you need -# some help, feel free to seek help from our [Gitter](https://gitter.im/TheAlgorithms) -# or ping one of the reviewers. Thank you for your contributions! From b55e132b8024c71ff186b0c741fb11a070ee2265 Mon Sep 17 00:00:00 2001 From: Cory Metcalfe Date: Fri, 20 Nov 2020 23:29:29 -0600 Subject: [PATCH 0005/1543] Add solution for Project Euler: Problem 89 (#2948) * add solution for euler problem 89 * updates to accommodate euler solution guideline updates * use more descriptive vars * updating DIRECTORY.md Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- DIRECTORY.md | 6 + project_euler/problem_089/__init__.py | 1 + .../problem_089/numeralcleanup_test.txt | 5 + project_euler/problem_089/p089_roman.txt | 1000 +++++++++++++++++ project_euler/problem_089/sol1.py | 141 +++ 5 files changed, 1153 insertions(+) create mode 100644 project_euler/problem_089/__init__.py create mode 100644 project_euler/problem_089/numeralcleanup_test.txt create mode 100644 project_euler/problem_089/p089_roman.txt create mode 100644 project_euler/problem_089/sol1.py diff --git a/DIRECTORY.md b/DIRECTORY.md index 71da6a402b31..2b3f3073c3d4 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -717,6 +717,8 @@ * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_081/sol1.py) * Problem 087 * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_087/sol1.py) + * Problem 089 + * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_089/sol1.py) * Problem 091 * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_091/sol1.py) * Problem 097 @@ -735,10 +737,14 @@ * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_123/sol1.py) * Problem 125 * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_125/sol1.py) + * Problem 129 + * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_129/sol1.py) * Problem 173 * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_173/sol1.py) * Problem 174 * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_174/sol1.py) + * Problem 188 + * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_188/sol1.py) * Problem 191 * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_191/sol1.py) * Problem 203 diff --git a/project_euler/problem_089/__init__.py b/project_euler/problem_089/__init__.py new file mode 100644 index 000000000000..792d6005489e --- /dev/null +++ b/project_euler/problem_089/__init__.py @@ -0,0 +1 @@ +# diff --git a/project_euler/problem_089/numeralcleanup_test.txt b/project_euler/problem_089/numeralcleanup_test.txt new file mode 100644 index 000000000000..06142142cca9 --- /dev/null +++ b/project_euler/problem_089/numeralcleanup_test.txt @@ -0,0 +1,5 @@ +IIII +IV +IIIIIIIIII +X +VIIIII diff --git a/project_euler/problem_089/p089_roman.txt b/project_euler/problem_089/p089_roman.txt new file mode 100644 index 000000000000..50651c355a5b --- /dev/null +++ b/project_euler/problem_089/p089_roman.txt @@ -0,0 +1,1000 @@ +MMMMDCLXXII +MMDCCCLXXXIII +MMMDLXVIIII +MMMMDXCV +DCCCLXXII +MMCCCVI +MMMCDLXXXVII +MMMMCCXXI +MMMCCXX +MMMMDCCCLXXIII +MMMCCXXXVII +MMCCCLXXXXIX +MDCCCXXIIII +MMCXCVI +CCXCVIII +MMMCCCXXXII +MDCCXXX +MMMDCCCL +MMMMCCLXXXVI +MMDCCCXCVI +MMMDCII +MMMCCXII +MMMMDCCCCI +MMDCCCXCII +MDCXX +CMLXXXVII +MMMXXI +MMMMCCCXIV +MLXXII +MCCLXXVIIII +MMMMCCXXXXI +MMDCCCLXXII +MMMMXXXI +MMMDCCLXXX +MMDCCCLXXIX +MMMMLXXXV +MCXXI +MDCCCXXXVII +MMCCCLXVII +MCDXXXV +CCXXXIII +CMXX +MMMCLXIV +MCCCLXXXVI +DCCCXCVIII +MMMDCCCCXXXIV +CDXVIIII +MMCCXXXV +MDCCCXXXII +MMMMD +MMDCCLXIX +MMMMCCCLXXXXVI +MMDCCXLII +MMMDCCCVIIII +DCCLXXXIIII +MDCCCCXXXII +MMCXXVII +DCCCXXX +CCLXIX +MMMXI +MMMMCMLXXXXVIII +MMMMDLXXXVII +MMMMDCCCLX +MMCCLIV +CMIX +MMDCCCLXXXIIII +CLXXXII +MMCCCCXXXXV +MMMMDLXXXVIIII +MMMDCCCXXI +MMDCCCCLXXVI +MCCCCLXX +MMCDLVIIII +MMMDCCCLIX +MMMMCCCCXIX +MMMDCCCLXXV +XXXI +CDLXXXIII +MMMCXV +MMDCCLXIII +MMDXXX +MMMMCCCLVII +MMMDCI +MMMMCDLXXXIIII +MMMMCCCXVI +CCCLXXXVIII +MMMMCML +MMMMXXIV +MMMCCCCXXX +DCCX +MMMCCLX +MMDXXXIII +CCCLXIII +MMDCCXIII +MMMCCCXLIV +CLXXXXI +CXVI +MMMMCXXXIII +CLXX +DCCCXVIII +MLXVII +DLXXXX +MMDXXI +MMMMDLXXXXVIII +MXXII +LXI +DCCCCXLIII +MMMMDV +MMMMXXXIV +MDCCCLVIII +MMMCCLXXII +MMMMDCCXXXVI +MMMMLXXXIX +MDCCCLXXXI +MMMMDCCCXV +MMMMCCCCXI +MMMMCCCLIII +MDCCCLXXI +MMCCCCXI +MLXV +MMCDLXII +MMMMDXXXXII +MMMMDCCCXL +MMMMCMLVI +CCLXXXIV +MMMDCCLXXXVI +MMCLII +MMMCCCCXV +MMLXXXIII +MMMV +MMMV +DCCLXII +MMDCCCCXVI +MMDCXLVIII +CCLIIII +CCCXXV +MMDCCLXXXVIIII +MMMMDCLXXVIII +MMMMDCCCXCI +MMMMCCCXX +MMCCXLV +MMMDCCCLXIX +MMCCLXIIII +MMMDCCCXLIX +MMMMCCCLXIX +CMLXXXXI +MCMLXXXIX +MMCDLXI +MMDCLXXVIII +MMMMDCCLXI +MCDXXV +DL +CCCLXXII +MXVIIII +MCCCCLXVIII +CIII +MMMDCCLXXIIII +MMMDVIII +MMMMCCCLXXXXVII +MMDXXVII +MMDCCLXXXXV +MMMMCXLVI +MMMDCCLXXXII +MMMDXXXVI +MCXXII +CLI +DCLXXXIX +MMMCLI +MDCLXIII +MMMMDCCXCVII +MMCCCLXXXV +MMMDCXXVIII +MMMCDLX +MMMCMLII +MMMIV +MMMMDCCCLVIII +MMMDLXXXVIII +MCXXIV +MMMMLXXVI +CLXXIX +MMMCCCCXXVIIII +DCCLXXXV +MMMDCCCVI +LI +CLXXXVI +MMMMCCCLXXVI +MCCCLXVI +CCXXXIX +MMDXXXXI +MMDCCCXLI +DCCCLXXXVIII +MMMMDCCCIV +MDCCCCXV +MMCMVI +MMMMCMLXXXXV +MMDCCLVI +MMMMCCXLVIII +DCCCCIIII +MMCCCCIII +MMMDCCLXXXVIIII +MDCCCLXXXXV +DVII +MMMV +DCXXV +MMDCCCXCV +DCVIII +MMCDLXVI +MCXXVIII +MDCCXCVIII +MMDCLX +MMMDCCLXIV +MMCDLXXVII +MMDLXXXIIII +MMMMCCCXXII +MMMDCCCXLIIII +DCCCCLXVII +MMMCLXXXXIII +MCCXV +MMMMDCXI +MMMMDCLXXXXV +MMMCCCLII +MMCMIX +MMDCCXXV +MMDLXXXVI +MMMMDCXXVIIII +DCCCCXXXVIIII +MMCCXXXIIII +MMDCCLXXVIII +MDCCLXVIIII +MMCCLXXXV +MMMMDCCCLXXXVIII +MMCMXCI +MDXLII +MMMMDCCXIV +MMMMLI +DXXXXIII +MMDCCXI +MMMMCCLXXXIII +MMMDCCCLXXIII +MDCLVII +MMCD +MCCCXXVII +MMMMDCCIIII +MMMDCCXLVI +MMMCLXXXVII +MMMCCVIIII +MCCCCLXXIX +DL +DCCCLXXVI +MMDXCI +MMMMDCCCCXXXVI +MMCII +MMMDCCCXXXXV +MMMCDXLV +MMDCXXXXIV +MMD +MDCCCLXXXX +MMDCXLIII +MMCCXXXII +MMDCXXXXVIIII +DCCCLXXI +MDXCVIIII +MMMMCCLXXVIII +MDCLVIIII +MMMCCCLXXXIX +MDCLXXXV +MDLVIII +MMMMCCVII +MMMMDCXIV +MMMCCCLXIIII +MMIIII +MMMMCCCLXXIII +CCIII +MMMCCLV +MMMDXIII +MMMCCCXC +MMMDCCCXXI +MMMMCCCCXXXII +CCCLVI +MMMCCCLXXXVI +MXVIIII +MMMCCCCXIIII +CLXVII +MMMCCLXX +CCCCLXIV +MMXXXXII +MMMMCCLXXXX +MXL +CCXVI +CCCCLVIIII +MMCCCII +MCCCLVIII +MMMMCCCX +MCDLXXXXIV +MDCCCXIII +MMDCCCXL +MMMMCCCXXIII +DXXXIV +CVI +MMMMDCLXXX +DCCCVII +MMCMLXIIII +MMMDCCCXXXIII +DCCC +MDIII +MMCCCLXVI +MMMCCCCLXXI +MMDCCCCXVIII +CCXXXVII +CCCXXV +MDCCCXII +MMMCMV +MMMMCMXV +MMMMDCXCI +DXXI +MMCCXLVIIII +MMMMCMLII +MDLXXX +MMDCLXVI +CXXI +MMMDCCCLIIII +MMMCXXI +MCCIII +MMDCXXXXI +CCXCII +MMMMDXXXV +MMMCCCLXV +MMMMDLXV +MMMCCCCXXXII +MMMCCCVIII +DCCCCLXXXXII +MMCLXIV +MMMMCXI +MLXXXXVII +MMMCDXXXVIII +MDXXII +MLV +MMMMDLXVI +MMMCXII +XXXIII +MMMMDCCCXXVI +MMMLXVIIII +MMMLX +MMMCDLXVII +MDCCCLVII +MMCXXXVII +MDCCCCXXX +MMDCCCLXIII +MMMMDCXLIX +MMMMCMXLVIII +DCCCLXXVIIII +MDCCCLIII +MMMCMLXI +MMMMCCLXI +MMDCCCLIII +MMMDCCCVI +MMDXXXXIX +MMCLXXXXV +MMDXXX +MMMXIII +DCLXXIX +DCCLXII +MMMMDCCLXVIII +MDCCXXXXIII +CCXXXII +MMMMDCXXV +MMMCCCXXVIII +MDCVIII +MMMCLXXXXIIII +CLXXXI +MDCCCCXXXIII +MMMMDCXXX +MMMDCXXIV +MMMCCXXXVII +MCCCXXXXIIII +CXVIII +MMDCCCCIV +MMMMCDLXXV +MMMDLXIV +MDXCIII +MCCLXXXI +MMMDCCCXXIV +MCXLIII +MMMDCCCI +MCCLXXX +CCXV +MMDCCLXXI +MMDLXXXIII +MMMMDCXVII +MMMCMLXV +MCLXVIII +MMMMCCLXXVI +MMMDCCLXVIIII +MMMMDCCCIX +DLXXXXIX +DCCCXXII +MMMMIII +MMMMCCCLXXVI +DCCCXCIII +DXXXI +MXXXIIII +CCXII +MMMDCCLXXXIIII +MMMCXX +MMMCMXXVII +DCCCXXXX +MMCDXXXVIIII +MMMMDCCXVIII +LV +MMMDCCCCVI +MCCCII +MMCMLXVIIII +MDCCXI +MMMMDLXVII +MMCCCCLXI +MMDCCV +MMMCCCXXXIIII +MMMMDI +MMMDCCCXCV +MMDCCLXXXXI +MMMDXXVI +MMMDCCCLVI +MMDCXXX +MCCCVII +MMMMCCCLXII +MMMMXXV +MMCMXXV +MMLVI +MMDXXX +MMMMCVII +MDC +MCCIII +MMMMDCC +MMCCLXXV +MMDCCCXXXXVI +MMMMCCCLXV +CDXIIII +MLXIIII +CCV +MMMCMXXXI +CCCCLXVI +MDXXXII +MMMMCCCLVIII +MMV +MMMCLII +MCMLI +MMDCCXX +MMMMCCCCXXXVI +MCCLXXXI +MMMCMVI +DCCXXX +MMMMCCCLXV +DCCCXI +MMMMDCCCXIV +CCCXXI +MMDLXXV +CCCCLXXXX +MCCCLXXXXII +MMDCIX +DCCXLIIII +DXIV +MMMMCLII +CDLXI +MMMCXXVII +MMMMDCCCCLXIII +MMMDCLIIII +MCCCCXXXXII +MMCCCLX +CCCCLIII +MDCCLXXVI +MCMXXIII +MMMMDLXXVIII +MMDCCCCLX +MMMCCCLXXXX +MMMCDXXVI +MMMDLVIII +CCCLXI +MMMMDCXXII +MMDCCCXXI +MMDCCXIII +MMMMCLXXXVI +MDCCCCXXVI +MDV +MMDCCCCLXXVI +MMMMCCXXXVII +MMMDCCLXXVIIII +MMMCCCCLXVII +DCCXLI +MMCLXXXVIII +MCCXXXVI +MMDCXLVIII +MMMMCXXXII +MMMMDCCLXVI +MMMMCMLI +MMMMCLXV +MMMMDCCCXCIV +MCCLXXVII +LXXVIIII +DCCLII +MMMCCCXCVI +MMMCLV +MMDCCCXXXXVIII +DCCCXV +MXC +MMDCCLXXXXVII +MMMMCML +MMDCCCLXXVIII +DXXI +MCCCXLI +DCLXXXXI +MMCCCLXXXXVIII +MDCCCCLXXVIII +MMMMDXXV +MMMDCXXXVI +MMMCMXCVII +MMXVIIII +MMMDCCLXXIV +MMMCXXV +DXXXVIII +MMMMCLXVI +MDXII +MMCCCLXX +CCLXXI +DXIV +MMMCLIII +DLII +MMMCCCXLIX +MMCCCCXXVI +MMDCXLIII +MXXXXII +CCCLXXXV +MDCLXXVI +MDCXII +MMMCCCLXXXIII +MMDCCCCLXXXII +MMMMCCCLXXXV +MMDCXXI +DCCCXXX +MMMDCCCCLII +MMMDCCXXII +MMMMCDXCVIII +MMMCCLXVIIII +MMXXV +MMMMCDXIX +MMMMCCCX +MMMCCCCLXVI +MMMMDCLXXVIIII +MMMMDCXXXXIV +MMMCMXII +MMMMXXXIII +MMMMDLXXXII +DCCCLIV +MDXVIIII +MMMCLXXXXV +CCCCXX +MMDIX +MMCMLXXXVIII +DCCXLIII +DCCLX +D +MCCCVII +MMMMCCCLXXXIII +MDCCCLXXIIII +MMMDCCCCLXXXVII +MMMMCCCVII +MMMDCCLXXXXVI +CDXXXIV +MCCLXVIII +MMMMDLX +MMMMDXII +MMMMCCCCLIIII +MCMLXXXXIII +MMMMDCCCIII +MMDCLXXXIII +MDCCCXXXXIV +XXXXVII +MMMDCCCXXXII +MMMDCCCXLII +MCXXXV +MDCXXVIIII +MMMCXXXXIIII +MMMMCDXVII +MMMDXXIII +MMMMCCCCLXI +DCLXXXXVIIII +LXXXXI +CXXXIII +MCDX +MCCLVII +MDCXXXXII +MMMCXXIV +MMMMLXXXX +MMDCCCCXLV +MLXXX +MMDCCCCLX +MCDLIII +MMMCCCLXVII +MMMMCCCLXXIV +MMMDCVIII +DCCCCXXIII +MMXCI +MMDCCIV +MMMMDCCCXXXIV +CCCLXXI +MCCLXXXII +MCMIII +CCXXXI +DCCXXXVIII +MMMMDCCXLVIIII +MMMMCMXXXV +DCCCLXXV +DCCXCI +MMMMDVII +MMMMDCCCLXVIIII +CCCXCV +MMMMDCCXX +MCCCCII +MMMCCCXC +MMMCCCII +MMDCCLXXVII +MMDCLIIII +CCXLIII +MMMDCXVIII +MMMCCCIX +MCXV +MMCCXXV +MLXXIIII +MDCCXXVI +MMMCCCXX +MMDLXX +MMCCCCVI +MMDCCXX +MMMMDCCCCXCV +MDCCCXXXII +MMMMDCCCCXXXX +XCIV +MMCCCCLX +MMXVII +MLXXI +MMMDXXVIII +MDCCCCII +MMMCMLVII +MMCLXXXXVIII +MDCCCCLV +MCCCCLXXIIII +MCCCLII +MCDXLVI +MMMMDXVIII +DCCLXXXIX +MMMDCCLXIV +MDCCCCXLIII +CLXXXXV +MMMMCCXXXVI +MMMDCCCXXI +MMMMCDLXXVII +MCDLIII +MMCCXLVI +DCCCLV +MCDLXX +DCLXXVIII +MMDCXXXIX +MMMMDCLX +MMDCCLI +MMCXXXV +MMMCCXII +MMMMCMLXII +MMMMCCV +MCCCCLXIX +MMMMCCIII +CLXVII +MCCCLXXXXIIII +MMMMDCVIII +MMDCCCLXI +MMLXXIX +CMLXIX +MMDCCCXLVIIII +DCLXII +MMMCCCXLVII +MDCCCXXXV +MMMMDCCXCVI +DCXXX +XXVI +MMLXIX +MMCXI +DCXXXVII +MMMMCCCXXXXVIII +MMMMDCLXI +MMMMDCLXXIIII +MMMMVIII +MMMMDCCCLXII +MDCXCI +MMCCCXXIIII +CCCCXXXXV +MMDCCCXXI +MCVI +MMDCCLXVIII +MMMMCXL +MLXVIII +CMXXVII +CCCLV +MDCCLXXXIX +MMMCCCCLXV +MMDCCLXII +MDLXVI +MMMCCCXVIII +MMMMCCLXXXI +MMCXXVII +MMDCCCLXVIII +MMMCXCII +MMMMDCLVIII +MMMMDCCCXXXXII +MMDCCCCLXXXXVI +MDCCXL +MDCCLVII +MMMMDCCCLXXXVI +DCCXXXIII +MMMMDCCCCLXXXV +MMCCXXXXVIII +MMMCCLXXVIII +MMMDCLXXVIII +DCCCI +MMMMLXXXXVIIII +MMMCCCCLXXII +MMCLXXXVII +CCLXVI +MCDXLIII +MMCXXVIII +MDXIV +CCCXCVIII +CLXXVIII +MMCXXXXVIIII +MMMDCLXXXIV +CMLVIII +MCDLIX +MMMMDCCCXXXII +MMMMDCXXXIIII +MDCXXI +MMMDCXLV +MCLXXVIII +MCDXXII +IV +MCDLXXXXIII +MMMMDCCLXV +CCLI +MMMMDCCCXXXVIII +DCLXII +MCCCLXVII +MMMMDCCCXXXVI +MMDCCXLI +MLXI +MMMCDLXVIII +MCCCCXCIII +XXXIII +MMMDCLXIII +MMMMDCL +DCCCXXXXIIII +MMDLVII +DXXXVII +MCCCCXXIIII +MCVII +MMMMDCCXL +MMMMCXXXXIIII +MCCCCXXIV +MMCLXVIII +MMXCIII +MDCCLXXX +MCCCLIIII +MMDCLXXI +MXI +MCMLIV +MMMCCIIII +DCCLXXXVIIII +MDCLIV +MMMDCXIX +CMLXXXI +DCCLXXXVII +XXV +MMMXXXVI +MDVIIII +CLXIII +MMMCDLVIIII +MMCCCCVII +MMMLXX +MXXXXII +MMMMCCCLXVIII +MMDCCCXXVIII +MMMMDCXXXXI +MMMMDCCCXXXXV +MMMXV +MMMMCCXVIIII +MMDCCXIIII +MMMXXVII +MDCCLVIIII +MMCXXIIII +MCCCLXXIV +DCLVIII +MMMLVII +MMMCXLV +MMXCVII +MMMCCCLXXXVII +MMMMCCXXII +DXII +MMMDLV +MCCCLXXVIII +MMMCLIIII +MMMMCLXXXX +MMMCLXXXIIII +MDCXXIII +MMMMCCXVI +MMMMDLXXXIII +MMMDXXXXIII +MMMMCCCCLV +MMMDLXXXI +MMMCCLXXVI +MMMMXX +MMMMDLVI +MCCCCLXXX +MMMXXII +MMXXII +MMDCCCCXXXI +MMMDXXV +MMMDCLXXXVIIII +MMMDLXXXXVII +MDLXIIII +CMXC +MMMXXXVIII +MDLXXXVIII +MCCCLXXVI +MMCDLIX +MMDCCCXVIII +MDCCCXXXXVI +MMMMCMIV +MMMMDCIIII +MMCCXXXV +XXXXVI +MMMMCCXVII +MMCCXXIV +MCMLVIIII +MLXXXIX +MMMMLXXXIX +CLXXXXIX +MMMDCCCCLVIII +MMMMCCLXXIII +MCCCC +DCCCLIX +MMMCCCLXXXII +MMMCCLXVIIII +MCLXXXV +CDLXXXVII +DCVI +MMX +MMCCXIII +MMMMDCXX +MMMMXXVIII +DCCCLXII +MMMMCCCXLIII +MMMMCLXV +DXCI +MMMMCLXXX +MMMDCCXXXXI +MMMMXXXXVI +DCLX +MMMCCCXI +MCCLXXX +MMCDLXXII +DCCLXXI +MMMCCCXXXVI +MCCCCLXXXVIIII +CDLVIII +DCCLVI +MMMMDCXXXVIII +MMCCCLXXXIII +MMMMDCCLXXV +MMMXXXVI +CCCLXXXXIX +CV +CCCCXIII +CCCCXVI +MDCCCLXXXIIII +MMDCCLXXXII +MMMMCCCCLXXXI +MXXV +MMCCCLXXVIIII +MMMCCXII +MMMMCCXXXIII +MMCCCLXXXVI +MMMDCCCLVIIII +MCCXXXVII +MDCLXXV +XXXV +MMDLI +MMMCCXXX +MMMMCXXXXV +CCCCLIX +MMMMDCCCLXXIII +MMCCCXVII +DCCCXVI +MMMCCCXXXXV +MDCCCCXCV +CLXXXI +MMMMDCCLXX +MMMDCCCIII +MMCLXXVII +MMMDCCXXIX +MMDCCCXCIIII +MMMCDXXIIII +MMMMXXVIII +MMMMDCCCCLXVIII +MDCCCXX +MMMMCDXXI +MMMMDLXXXIX +CCXVI +MDVIII +MMCCLXXI +MMMDCCCLXXI +MMMCCCLXXVI +MMCCLXI +MMMMDCCCXXXIV +DLXXXVI +MMMMDXXXII +MMMXXIIII +MMMMCDIV +MMMMCCCXLVIII +MMMMCXXXVIII +MMMCCCLXVI +MDCCXVIII +MMCXX +CCCLIX +MMMMDCCLXXII +MDCCCLXXV +MMMMDCCCXXIV +DCCCXXXXVIII +MMMDCCCCXXXVIIII +MMMMCCXXXV +MDCLXXXIII +MMCCLXXXIV +MCLXXXXIIII +DXXXXIII +MCCCXXXXVIII +MMCLXXIX +MMMMCCLXIV +MXXII +MMMCXIX +MDCXXXVII +MMDCCVI +MCLXXXXVIII +MMMCXVI +MCCCLX +MMMCDX +CCLXVIIII +MMMCCLX +MCXXVIII +LXXXII +MCCCCLXXXI +MMMI +MMMCCCLXIV +MMMCCCXXVIIII +CXXXVIII +MMCCCXX +MMMCCXXVIIII +MCCLXVI +MMMCCCCXXXXVI +MMDCCXCIX +MCMLXXI +MMCCLXVIII +CDLXXXXIII +MMMMDCCXXII +MMMMDCCLXXXVII +MMMDCCLIV +MMCCLXIII +MDXXXVII +DCCXXXIIII +MCII +MMMDCCCLXXI +MMMLXXIII +MDCCCLIII +MMXXXVIII +MDCCXVIIII +MDCCCCXXXVII +MMCCCXVI +MCMXXII +MMMCCCLVIII +MMMMDCCCXX +MCXXIII +MMMDLXI +MMMMDXXII +MDCCCX +MMDXCVIIII +MMMDCCCCVIII +MMMMDCCCCXXXXVI +MMDCCCXXXV +MMCXCIV +MCMLXXXXIII +MMMCCCLXXVI +MMMMDCLXXXV +CMLXIX +DCXCII +MMXXVIII +MMMMCCCXXX +XXXXVIIII \ No newline at end of file diff --git a/project_euler/problem_089/sol1.py b/project_euler/problem_089/sol1.py new file mode 100644 index 000000000000..11582aa4ab1a --- /dev/null +++ b/project_euler/problem_089/sol1.py @@ -0,0 +1,141 @@ +""" +Project Euler Problem 89: https://projecteuler.net/problem=89 + +For a number written in Roman numerals to be considered valid there are basic rules +which must be followed. Even though the rules allow some numbers to be expressed in +more than one way there is always a "best" way of writing a particular number. + +For example, it would appear that there are at least six ways of writing the number +sixteen: + +IIIIIIIIIIIIIIII +VIIIIIIIIIII +VVIIIIII +XIIIIII +VVVI +XVI + +However, according to the rules only XIIIIII and XVI are valid, and the last example +is considered to be the most efficient, as it uses the least number of numerals. + +The 11K text file, roman.txt (right click and 'Save Link/Target As...'), contains one +thousand numbers written in valid, but not necessarily minimal, Roman numerals; see +About... Roman Numerals for the definitive rules for this problem. + +Find the number of characters saved by writing each of these in their minimal form. + +Note: You can assume that all the Roman numerals in the file contain no more than four +consecutive identical units. +""" + +import os + +SYMBOLS = {"I": 1, "V": 5, "X": 10, "L": 50, "C": 100, "D": 500, "M": 1000} + + +def parse_roman_numerals(numerals: str) -> int: + """ + Converts a string of roman numerals to an integer. + e.g. + >>> parse_roman_numerals("LXXXIX") + 89 + >>> parse_roman_numerals("IIII") + 4 + """ + + total_value = 0 + + index = 0 + while index < len(numerals) - 1: + current_value = SYMBOLS[numerals[index]] + next_value = SYMBOLS[numerals[index + 1]] + if current_value < next_value: + total_value -= current_value + else: + total_value += current_value + index += 1 + total_value += SYMBOLS[numerals[index]] + + return total_value + + +def generate_roman_numerals(num: int) -> str: + """ + Generates a string of roman numerals for a given integer. + e.g. + >>> generate_roman_numerals(89) + 'LXXXIX' + >>> generate_roman_numerals(4) + 'IV' + """ + + numerals = "" + + m_count = num // 1000 + numerals += m_count * "M" + num %= 1000 + + c_count = num // 100 + if c_count == 9: + numerals += "CM" + c_count -= 9 + elif c_count == 4: + numerals += "CD" + c_count -= 4 + if c_count >= 5: + numerals += "D" + c_count -= 5 + numerals += c_count * "C" + num %= 100 + + x_count = num // 10 + if x_count == 9: + numerals += "XC" + x_count -= 9 + elif x_count == 4: + numerals += "XL" + x_count -= 4 + if x_count >= 5: + numerals += "L" + x_count -= 5 + numerals += x_count * "X" + num %= 10 + + if num == 9: + numerals += "IX" + num -= 9 + elif num == 4: + numerals += "IV" + num -= 4 + if num >= 5: + numerals += "V" + num -= 5 + numerals += num * "I" + + return numerals + + +def solution(roman_numerals_filename: str = "/p089_roman.txt") -> int: + """ + Calculates and returns the answer to project euler problem 89. + + >>> solution("/numeralcleanup_test.txt") + 16 + """ + + savings = 0 + + file1 = open(os.path.dirname(__file__) + roman_numerals_filename, "r") + lines = file1.readlines() + for line in lines: + original = line.strip() + num = parse_roman_numerals(original) + shortened = generate_roman_numerals(num) + savings += len(original) - len(shortened) + + return savings + + +if __name__ == "__main__": + + print(f"{solution() = }") From fa364dfd274349ab3e674ea97780a7b9977cb7ef Mon Sep 17 00:00:00 2001 From: Akash G Krishnan Date: Sat, 21 Nov 2020 12:28:52 +0530 Subject: [PATCH 0006/1543] Changed how the Visited nodes are tracked (#3811) Updated the code to track visited Nodes with Set data structure instead of Lists to bring down the lookup time in visited from O(N) to O(1) as doing O(N) lookup each time in the visited List will become significantly slow when the graph grows --- graphs/bfs_shortest_path.py | 16 ++++------------ 1 file changed, 4 insertions(+), 12 deletions(-) diff --git a/graphs/bfs_shortest_path.py b/graphs/bfs_shortest_path.py index 1655ca64208d..754ba403537e 100644 --- a/graphs/bfs_shortest_path.py +++ b/graphs/bfs_shortest_path.py @@ -1,8 +1,6 @@ """Breadth-first search shortest path implementations. - doctest: python -m doctest -v bfs_shortest_path.py - Manual test: python bfs_shortest_path.py """ @@ -19,22 +17,19 @@ def bfs_shortest_path(graph: dict, start, goal) -> str: """Find shortest path between `start` and `goal` nodes. - Args: graph (dict): node/list of neighboring nodes key/value pairs. start: start node. goal: target node. - Returns: Shortest path between `start` and `goal` nodes as a string of nodes. 'Not found' string if no path found. - Example: >>> bfs_shortest_path(graph, "G", "D") ['G', 'C', 'A', 'B', 'D'] """ # keep track of explored nodes - explored = [] + explored = set() # keep track of all the paths to be checked queue = [[start]] @@ -61,7 +56,7 @@ def bfs_shortest_path(graph: dict, start, goal) -> str: return new_path # mark node as explored - explored.append(node) + explored.add(node) # in case there's no path between the 2 nodes return "So sorry, but a connecting path doesn't exist :(" @@ -69,16 +64,13 @@ def bfs_shortest_path(graph: dict, start, goal) -> str: def bfs_shortest_path_distance(graph: dict, start, target) -> int: """Find shortest path distance between `start` and `target` nodes. - Args: graph: node/list of neighboring nodes key/value pairs. start: node to start search from. target: node to search for. - Returns: Number of edges in shortest path between `start` and `target` nodes. -1 if no path exists. - Example: >>> bfs_shortest_path_distance(graph, "G", "D") 4 @@ -92,7 +84,7 @@ def bfs_shortest_path_distance(graph: dict, start, target) -> int: if start == target: return 0 queue = [start] - visited = [start] + visited = set(start) # Keep tab on distances from `start` node. dist = {start: 0, target: -1} while queue: @@ -103,7 +95,7 @@ def bfs_shortest_path_distance(graph: dict, start, target) -> int: ) for adjacent in graph[node]: if adjacent not in visited: - visited.append(adjacent) + visited.add(adjacent) queue.append(adjacent) dist[adjacent] = dist[node] + 1 return dist[target] From f036b9f3587fca094ce85f40b587e930e26ac726 Mon Sep 17 00:00:00 2001 From: Niranjan Hegde Date: Sat, 21 Nov 2020 13:34:08 +0530 Subject: [PATCH 0007/1543] Web programming contribution (#2436) * Currency Converter * currency converter * Currency Converter * currency converter * implemented changes * Implemented changes requested * TESTING = os.getenv("CONTINUOUS_INTEGRATION", False) * Update currency_converter.py * Update currency_converter.py Co-authored-by: Christian Clauss --- web_programming/currency_converter.py | 192 ++++++++++++++++++++++++++ 1 file changed, 192 insertions(+) create mode 100644 web_programming/currency_converter.py diff --git a/web_programming/currency_converter.py b/web_programming/currency_converter.py new file mode 100644 index 000000000000..6aed2a5578a5 --- /dev/null +++ b/web_programming/currency_converter.py @@ -0,0 +1,192 @@ +""" +This is used to convert the currency using the Amdoren Currency API +https://www.amdoren.com +""" + +import os + +import requests + +URL_BASE = "https://www.amdoren.com/api/currency.php" +TESTING = os.getenv("CI", False) +API_KEY = os.getenv("AMDOREN_API_KEY") +if not API_KEY and not TESTING: + raise KeyError("Please put your API key in an environment variable.") + + +# Currency and their description +list_of_currencies = """ +AED United Arab Emirates Dirham +AFN Afghan Afghani +ALL Albanian Lek +AMD Armenian Dram +ANG Netherlands Antillean Guilder +AOA Angolan Kwanza +ARS Argentine Peso +AUD Australian Dollar +AWG Aruban Florin +AZN Azerbaijani Manat +BAM Bosnia & Herzegovina Convertible Mark +BBD Barbadian Dollar +BDT Bangladeshi Taka +BGN Bulgarian Lev +BHD Bahraini Dinar +BIF Burundian Franc +BMD Bermudian Dollar +BND Brunei Dollar +BOB Bolivian Boliviano +BRL Brazilian Real +BSD Bahamian Dollar +BTN Bhutanese Ngultrum +BWP Botswana Pula +BYN Belarus Ruble +BZD Belize Dollar +CAD Canadian Dollar +CDF Congolese Franc +CHF Swiss Franc +CLP Chilean Peso +CNY Chinese Yuan +COP Colombian Peso +CRC Costa Rican Colon +CUC Cuban Convertible Peso +CVE Cape Verdean Escudo +CZK Czech Republic Koruna +DJF Djiboutian Franc +DKK Danish Krone +DOP Dominican Peso +DZD Algerian Dinar +EGP Egyptian Pound +ERN Eritrean Nakfa +ETB Ethiopian Birr +EUR Euro +FJD Fiji Dollar +GBP British Pound Sterling +GEL Georgian Lari +GHS Ghanaian Cedi +GIP Gibraltar Pound +GMD Gambian Dalasi +GNF Guinea Franc +GTQ Guatemalan Quetzal +GYD Guyanaese Dollar +HKD Hong Kong Dollar +HNL Honduran Lempira +HRK Croatian Kuna +HTG Haiti Gourde +HUF Hungarian Forint +IDR Indonesian Rupiah +ILS Israeli Shekel +INR Indian Rupee +IQD Iraqi Dinar +IRR Iranian Rial +ISK Icelandic Krona +JMD Jamaican Dollar +JOD Jordanian Dinar +JPY Japanese Yen +KES Kenyan Shilling +KGS Kyrgystani Som +KHR Cambodian Riel +KMF Comorian Franc +KPW North Korean Won +KRW South Korean Won +KWD Kuwaiti Dinar +KYD Cayman Islands Dollar +KZT Kazakhstan Tenge +LAK Laotian Kip +LBP Lebanese Pound +LKR Sri Lankan Rupee +LRD Liberian Dollar +LSL Lesotho Loti +LYD Libyan Dinar +MAD Moroccan Dirham +MDL Moldovan Leu +MGA Malagasy Ariary +MKD Macedonian Denar +MMK Myanma Kyat +MNT Mongolian Tugrik +MOP Macau Pataca +MRO Mauritanian Ouguiya +MUR Mauritian Rupee +MVR Maldivian Rufiyaa +MWK Malawi Kwacha +MXN Mexican Peso +MYR Malaysian Ringgit +MZN Mozambican Metical +NAD Namibian Dollar +NGN Nigerian Naira +NIO Nicaragua Cordoba +NOK Norwegian Krone +NPR Nepalese Rupee +NZD New Zealand Dollar +OMR Omani Rial +PAB Panamanian Balboa +PEN Peruvian Nuevo Sol +PGK Papua New Guinean Kina +PHP Philippine Peso +PKR Pakistani Rupee +PLN Polish Zloty +PYG Paraguayan Guarani +QAR Qatari Riyal +RON Romanian Leu +RSD Serbian Dinar +RUB Russian Ruble +RWF Rwanda Franc +SAR Saudi Riyal +SBD Solomon Islands Dollar +SCR Seychellois Rupee +SDG Sudanese Pound +SEK Swedish Krona +SGD Singapore Dollar +SHP Saint Helena Pound +SLL Sierra Leonean Leone +SOS Somali Shilling +SRD Surinamese Dollar +SSP South Sudanese Pound +STD Sao Tome and Principe Dobra +SYP Syrian Pound +SZL Swazi Lilangeni +THB Thai Baht +TJS Tajikistan Somoni +TMT Turkmenistani Manat +TND Tunisian Dinar +TOP Tonga Paanga +TRY Turkish Lira +TTD Trinidad and Tobago Dollar +TWD New Taiwan Dollar +TZS Tanzanian Shilling +UAH Ukrainian Hryvnia +UGX Ugandan Shilling +USD United States Dollar +UYU Uruguayan Peso +UZS Uzbekistan Som +VEF Venezuelan Bolivar +VND Vietnamese Dong +VUV Vanuatu Vatu +WST Samoan Tala +XAF Central African CFA franc +XCD East Caribbean Dollar +XOF West African CFA franc +XPF CFP Franc +YER Yemeni Rial +ZAR South African Rand +ZMW Zambian Kwacha +""" + + +def convert_currency( + from_: str = "USD", to: str = "INR", amount: float = 1.0, api_key: str = API_KEY +) -> str: + """https://www.amdoren.com/currency-api/""" + params = locals() + params["from"] = params.pop("from_") + res = requests.get(URL_BASE, params=params).json() + return str(res["amount"]) if res["error"] == 0 else res["error_message"] + + +if __name__ == "__main__": + print( + convert_currency( + input("Enter from currency: ").strip(), + input("Enter to currency: ").strip(), + float(input("Enter the amount: ").strip()), + ) + ) From f2c1f98a234677b7b0265751597c17692286e004 Mon Sep 17 00:00:00 2001 From: Dhruv Manilawala Date: Sat, 21 Nov 2020 16:12:00 +0530 Subject: [PATCH 0008/1543] Remove workflow file, task checked by the bot (#3917) --- .github/workflows/auto_close_empty_issues.yml | 20 ------------------- 1 file changed, 20 deletions(-) delete mode 100644 .github/workflows/auto_close_empty_issues.yml diff --git a/.github/workflows/auto_close_empty_issues.yml b/.github/workflows/auto_close_empty_issues.yml deleted file mode 100644 index a6334d6ade32..000000000000 --- a/.github/workflows/auto_close_empty_issues.yml +++ /dev/null @@ -1,20 +0,0 @@ -# GitHub Action that uses close-issue auto-close empty issues after they are opened. -# If the issue body text is empty the Action auto-closes it and sends a notification. -# Otherwise if the issue body is not empty, it does nothing and the issue remains open. -# https://github.com/marketplace/actions/close-issue - -name: auto_close_empty_issues -on: - issues: - types: [opened] -jobs: - check-issue-body-not-empty: - runs-on: ubuntu-latest - steps: - - if: github.event.issue.body == 0 - name: Close Issue - uses: peter-evans/close-issue@v1 - with: - comment: | - Issue body must contain content. - Auto-closing this issue. From 03e7f3732996f1a83ebdca71a2efc2665ed49f38 Mon Sep 17 00:00:00 2001 From: Joyce Date: Mon, 23 Nov 2020 13:37:42 +0800 Subject: [PATCH 0009/1543] [mypy] math/sieve_of_eratosthenes: Add type hints (#2627) * add type hints to math/sieve * add doctest * math/sieve: remove manual doctest * add check for negative * Update maths/sieve_of_eratosthenes.py * Update sieve_of_eratosthenes.py Co-authored-by: Dhruv Manilawala --- maths/sieve_of_eratosthenes.py | 38 +++++++++++++++++++--------------- 1 file changed, 21 insertions(+), 17 deletions(-) diff --git a/maths/sieve_of_eratosthenes.py b/maths/sieve_of_eratosthenes.py index faf6fc0f9a98..47a086546900 100644 --- a/maths/sieve_of_eratosthenes.py +++ b/maths/sieve_of_eratosthenes.py @@ -8,54 +8,58 @@ Reference: https://en.wikipedia.org/wiki/Sieve_of_Eratosthenes doctest provider: Bruno Simas Hadlich (https://github.com/brunohadlich) -Also thanks Dmitry (https://github.com/LizardWizzard) for finding the problem +Also thanks to Dmitry (https://github.com/LizardWizzard) for finding the problem """ import math +from typing import List -def sieve(n): +def prime_sieve(num: int) -> List[int]: """ Returns a list with all prime numbers up to n. - >>> sieve(50) + >>> prime_sieve(50) [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47] - >>> sieve(25) + >>> prime_sieve(25) [2, 3, 5, 7, 11, 13, 17, 19, 23] - >>> sieve(10) + >>> prime_sieve(10) [2, 3, 5, 7] - >>> sieve(9) + >>> prime_sieve(9) [2, 3, 5, 7] - >>> sieve(2) + >>> prime_sieve(2) [2] - >>> sieve(1) + >>> prime_sieve(1) [] """ - l = [True] * (n + 1) # noqa: E741 + if num <= 0: + raise ValueError(f"{num}: Invalid input, please enter a positive integer.") + + sieve = [True] * (num + 1) prime = [] start = 2 - end = int(math.sqrt(n)) + end = int(math.sqrt(num)) while start <= end: # If start is a prime - if l[start] is True: + if sieve[start] is True: prime.append(start) # Set multiples of start be False - for i in range(start * start, n + 1, start): - if l[i] is True: - l[i] = False + for i in range(start * start, num + 1, start): + if sieve[i] is True: + sieve[i] = False start += 1 - for j in range(end + 1, n + 1): - if l[j] is True: + for j in range(end + 1, num + 1): + if sieve[j] is True: prime.append(j) return prime if __name__ == "__main__": - print(sieve(int(input("Enter n: ").strip()))) + print(prime_sieve(int(input("Enter a positive integer: ").strip()))) From 49d0c41905fd69338d6a84a73fe1f74c2be14adf Mon Sep 17 00:00:00 2001 From: Mikail Farid Date: Mon, 23 Nov 2020 07:11:28 +0100 Subject: [PATCH 0010/1543] Renamed octal_to_decimal to octal_to_decimal.py (#3420) * Renamed octal_to_decimal to octal_to_decimal.py * Updated octal_to_decimal.py * modified doctests * updated octal_to_decimal.py --- conversions/{octal_to_decimal => octal_to_decimal.py} | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) rename conversions/{octal_to_decimal => octal_to_decimal.py} (77%) diff --git a/conversions/octal_to_decimal b/conversions/octal_to_decimal.py similarity index 77% rename from conversions/octal_to_decimal rename to conversions/octal_to_decimal.py index a5b027e3ae8d..5a7373fef7e3 100644 --- a/conversions/octal_to_decimal +++ b/conversions/octal_to_decimal.py @@ -9,10 +9,16 @@ def oct_to_decimal(oct_string: str) -> int: >>> oct_to_decimal("-45") -37 >>> oct_to_decimal("2-0Fm") + Traceback (most recent call last): + ... ValueError: Non-octal value was passed to the function >>> oct_to_decimal("") - ValueError: Empty string value was passed to the function + Traceback (most recent call last): + ... + ValueError: Empty string was passed to the function >>> oct_to_decimal("19") + Traceback (most recent call last): + ... ValueError: Non-octal value was passed to the function """ oct_string = str(oct_string).strip() @@ -21,7 +27,7 @@ def oct_to_decimal(oct_string: str) -> int: is_negative = oct_string[0] == "-" if is_negative: oct_string = oct_string[1:] - if not all(0 <= int(char) <= 7 for char in oct_string): + if not oct_string.isdigit() or not all(0 <= int(char) <= 7 for char in oct_string): raise ValueError("Non-octal value was passed to the function") decimal_number = 0 for char in oct_string: From 9bf7b183e744a325a48a573165740d01bf60b2cf Mon Sep 17 00:00:00 2001 From: Tan Yong He Date: Mon, 23 Nov 2020 15:31:43 +0800 Subject: [PATCH 0011/1543] Improve Base16 Codebase (#3534) * Add doctest and remove input() usage * Apply suggestions from code review Co-authored-by: Dhruv Manilawala --- ciphers/base16.py | 19 ++++++++++++++----- 1 file changed, 14 insertions(+), 5 deletions(-) diff --git a/ciphers/base16.py b/ciphers/base16.py index 0210315d54e6..f27ea4628e54 100644 --- a/ciphers/base16.py +++ b/ciphers/base16.py @@ -1,13 +1,22 @@ import base64 -def main(): - inp = input("->") +def encode_to_b16(inp: str) -> bytes: + """ + Encodes a given utf-8 string into base-16. + >>> encode_to_b16('Hello World!') + b'48656C6C6F20576F726C6421' + >>> encode_to_b16('HELLO WORLD!') + b'48454C4C4F20574F524C4421' + >>> encode_to_b16('') + b'' + """ encoded = inp.encode("utf-8") # encoded the input (we need a bytes like object) b16encoded = base64.b16encode(encoded) # b16encoded the encoded string - print(b16encoded) - print(base64.b16decode(b16encoded).decode("utf-8")) # decoded it + return b16encoded if __name__ == "__main__": - main() + import doctest + + doctest.testmod() From 3fdbf9741dd910a78f3d614771d26c3dda3527fd Mon Sep 17 00:00:00 2001 From: Christian Clauss Date: Tue, 24 Nov 2020 12:41:10 +0100 Subject: [PATCH 0012/1543] Python 3.9 (#3926) * Upgrade to Python 3.9 * pip install wheel for faster builds * updating DIRECTORY.md * requirements.txt: tensorflow; python_version < '3.9' * keras requires tensorflow * Rename lstm_prediction.py to lstm_prediction.py_tf * Update requirements.txt * updating DIRECTORY.md * Update requirements.txt Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> Co-authored-by: Dhruv Manilawala --- .github/workflows/build.yml | 4 ++-- DIRECTORY.md | 3 +-- .../lstm/{lstm_prediction.py => lstm_prediction.py_tf} | 0 requirements.txt | 4 ++-- 4 files changed, 5 insertions(+), 6 deletions(-) rename machine_learning/lstm/{lstm_prediction.py => lstm_prediction.py_tf} (100%) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 01ac9aea7a7c..ae9b4e36b1ce 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -12,14 +12,14 @@ jobs: - uses: actions/checkout@v2 - uses: actions/setup-python@v2 with: - python-version: "3.8" + python-version: "3.9" - uses: actions/cache@v2 with: path: ~/.cache/pip key: ${{ runner.os }}-pip-${{ hashFiles('requirements.txt') }} - name: Install dependencies run: | - python -m pip install --upgrade pip setuptools six + python -m pip install --upgrade pip setuptools six wheel python -m pip install pytest-cov -r requirements.txt - name: Run tests run: pytest --doctest-modules --ignore=project_euler/ --cov-report=term-missing:skip-covered --cov=. . diff --git a/DIRECTORY.md b/DIRECTORY.md index 2b3f3073c3d4..e1e57307d593 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -356,8 +356,6 @@ * [Linear Discriminant Analysis](https://github.com/TheAlgorithms/Python/blob/master/machine_learning/linear_discriminant_analysis.py) * [Linear Regression](https://github.com/TheAlgorithms/Python/blob/master/machine_learning/linear_regression.py) * [Logistic Regression](https://github.com/TheAlgorithms/Python/blob/master/machine_learning/logistic_regression.py) - * Lstm - * [Lstm Prediction](https://github.com/TheAlgorithms/Python/blob/master/machine_learning/lstm/lstm_prediction.py) * [Multilayer Perceptron Classifier](https://github.com/TheAlgorithms/Python/blob/master/machine_learning/multilayer_perceptron_classifier.py) * [Polymonial Regression](https://github.com/TheAlgorithms/Python/blob/master/machine_learning/polymonial_regression.py) * [Random Forest Classifier](https://github.com/TheAlgorithms/Python/blob/master/machine_learning/random_forest_classifier.py) @@ -866,6 +864,7 @@ * [Covid Stats Via Xpath](https://github.com/TheAlgorithms/Python/blob/master/web_programming/covid_stats_via_xpath.py) * [Crawl Google Results](https://github.com/TheAlgorithms/Python/blob/master/web_programming/crawl_google_results.py) * [Crawl Google Scholar Citation](https://github.com/TheAlgorithms/Python/blob/master/web_programming/crawl_google_scholar_citation.py) + * [Currency Converter](https://github.com/TheAlgorithms/Python/blob/master/web_programming/currency_converter.py) * [Current Stock Price](https://github.com/TheAlgorithms/Python/blob/master/web_programming/current_stock_price.py) * [Current Weather](https://github.com/TheAlgorithms/Python/blob/master/web_programming/current_weather.py) * [Daily Horoscope](https://github.com/TheAlgorithms/Python/blob/master/web_programming/daily_horoscope.py) diff --git a/machine_learning/lstm/lstm_prediction.py b/machine_learning/lstm/lstm_prediction.py_tf similarity index 100% rename from machine_learning/lstm/lstm_prediction.py rename to machine_learning/lstm/lstm_prediction.py_tf diff --git a/requirements.txt b/requirements.txt index 8bbb8d524ed4..349d88944656 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,6 +1,6 @@ beautifulsoup4 fake_useragent -keras +keras; python_version < '3.9' lxml matplotlib numpy @@ -13,5 +13,5 @@ scikit-fuzzy sklearn statsmodels sympy -tensorflow +tensorflow; python_version < '3.9' xgboost From e031ad3db627a46d3c1cc0ae37739420f00c239d Mon Sep 17 00:00:00 2001 From: Sullivan <38718448+Epic-R-R@users.noreply.github.com> Date: Tue, 24 Nov 2020 19:48:00 +0330 Subject: [PATCH 0013/1543] Create instagram_pic (#3945) * Create instagram_pic * Update instagram_pic * Update instagram_pic * isort * Update instagram_pic.py Co-authored-by: Christian Clauss --- web_programming/instagram_pic.py | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) create mode 100644 web_programming/instagram_pic.py diff --git a/web_programming/instagram_pic.py b/web_programming/instagram_pic.py new file mode 100644 index 000000000000..8521da674d7d --- /dev/null +++ b/web_programming/instagram_pic.py @@ -0,0 +1,16 @@ +from datetime import datetime + +import requests +from bs4 import BeautifulSoup + +if __name__ == "__main__": + url = input("Enter image url: ").strip() + print(f"Downloading image from {url} ...") + soup = BeautifulSoup(requests.get(url).content, "html.parser") + # The image URL is in the content field of the first meta tag with property og:image + image_url = soup.find("meta", {"property": "og:image"})["content"] + image_data = requests.get(image_url).content + file_name = f"{datetime.now():%Y-%m-%d_%H:%M:%S}.jpg" + with open(file_name, "wb") as fp: + fp.write(image_data) + print(f"Done. Image saved to disk as {file_name}.") From 287bf26bc87a0f68de51f817758b497e76d94271 Mon Sep 17 00:00:00 2001 From: Cho Yin Yong Date: Tue, 24 Nov 2020 19:30:15 -0500 Subject: [PATCH 0014/1543] Add a divide and conquer method in finding the maximum difference pair (#3692) * A divide and conquer method in finding the maximum difference pair * fix formatting issues * fix formatting issues * add doctest runner --- divide_and_conquer/max_difference_pair.py | 47 +++++++++++++++++++++++ 1 file changed, 47 insertions(+) create mode 100644 divide_and_conquer/max_difference_pair.py diff --git a/divide_and_conquer/max_difference_pair.py b/divide_and_conquer/max_difference_pair.py new file mode 100644 index 000000000000..b976aca43137 --- /dev/null +++ b/divide_and_conquer/max_difference_pair.py @@ -0,0 +1,47 @@ +from typing import List + + +def max_difference(a: List[int]) -> (int, int): + """ + We are given an array A[1..n] of integers, n >= 1. We want to + find a pair of indices (i, j) such that + 1 <= i <= j <= n and A[j] - A[i] is as large as possible. + + Explanation: + https://www.geeksforgeeks.org/maximum-difference-between-two-elements/ + + >>> max_difference([5, 11, 2, 1, 7, 9, 0, 7]) + (1, 9) + """ + # base case + if len(a) == 1: + return a[0], a[0] + else: + # split A into half. + first = a[: len(a) // 2] + second = a[len(a) // 2 :] + + # 2 sub problems, 1/2 of original size. + small1, big1 = max_difference(first) + small2, big2 = max_difference(second) + + # get min of first and max of second + # linear time + min_first = min(first) + max_second = max(second) + + # 3 cases, either (small1, big1), + # (min_first, max_second), (small2, big2) + # constant comparisons + if big2 - small2 > max_second - min_first and big2 - small2 > big1 - small1: + return small2, big2 + elif big1 - small1 > max_second - min_first: + return small1, big1 + else: + return min_first, max_second + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From 098f02bc04550343767e3e1cfaef5f57af6b1046 Mon Sep 17 00:00:00 2001 From: lawric1 <67882089+lawric1@users.noreply.github.com> Date: Wed, 25 Nov 2020 02:13:14 -0300 Subject: [PATCH 0015/1543] Fixes: #3944 Authentication error; use tokens instead (#3949) * fixes #3944 authentication error * Fixes: #3944 authentication error * Fixed docstring failure in pre-commit, Fixed request.get params to GitHub REST API standards * run black formatter * Add USER_TOKEN constant and checks if empty, removes deprecated docstring * Add descriptive dict type hint, change headers format to f-string * Add Accept header * Fix pre-commit error * Fix pre-commit error * Add test for fetch_github_info * Remove test function from main file * Create test_fetch_github_info.py * Update test_fetch_github_info.py * Update test_fetch_github_info.py * No need to cover __name__ == __main__ block Co-authored-by: Dhruv Manilawala --- web_programming/fetch_github_info.py | 50 +++++++++++++++++------ web_programming/test_fetch_github_info.py | 27 ++++++++++++ 2 files changed, 64 insertions(+), 13 deletions(-) create mode 100644 web_programming/test_fetch_github_info.py diff --git a/web_programming/fetch_github_info.py b/web_programming/fetch_github_info.py index 227598bb20ab..c9198460f211 100644 --- a/web_programming/fetch_github_info.py +++ b/web_programming/fetch_github_info.py @@ -1,26 +1,50 @@ #!/usr/bin/env python3 - """ Created by sarathkaul on 14/11/19 +Updated by lawric1 on 24/11/20 -Basic authentication using an API password is deprecated and will soon no longer work. -Visit https://developer.github.com/changes/2020-02-14-deprecating-password-auth -for more information around suggested workarounds and removal dates. -""" +Authentication will be made via access token. +To generate your personal access token visit https://github.com/settings/tokens. + +NOTE: +Never hardcode any credential information in the code. Always use an environment +file to store the private information and use the `os` module to get the information +during runtime. +Create a ".env" file in the root directory and write these two lines in that file +with your token:: + +#!/usr/bin/env bash +export USER_TOKEN="" +""" +import os +from typing import Any, Dict import requests -_GITHUB_API = "https://api.github.com/user" +BASE_URL = "https://api.github.com" +# https://docs.github.com/en/free-pro-team@latest/rest/reference/users#get-the-authenticated-user +AUTHENTICATED_USER_ENDPOINT = BASE_URL + "/user" -def fetch_github_info(auth_user: str, auth_pass: str) -> dict: +# https://github.com/settings/tokens +USER_TOKEN = os.environ.get("USER_TOKEN", "") + + +def fetch_github_info(auth_token: str) -> Dict[Any, Any]: """ Fetch GitHub info of a user using the requests module """ - return requests.get(_GITHUB_API, auth=(auth_user, auth_pass)).json() - - -if __name__ == "__main__": - for key, value in fetch_github_info("", "").items(): - print(f"{key}: {value}") + headers = { + "Authorization": f"token {auth_token}", + "Accept": "application/vnd.github.v3+json", + } + return requests.get(AUTHENTICATED_USER_ENDPOINT, headers=headers).json() + + +if __name__ == "__main__": # pragma: no cover + if USER_TOKEN: + for key, value in fetch_github_info(USER_TOKEN).items(): + print(f"{key}: {value}") + else: + raise ValueError("'USER_TOKEN' field cannot be empty.") diff --git a/web_programming/test_fetch_github_info.py b/web_programming/test_fetch_github_info.py new file mode 100644 index 000000000000..2da97c782df7 --- /dev/null +++ b/web_programming/test_fetch_github_info.py @@ -0,0 +1,27 @@ +import json + +import requests + +from .fetch_github_info import AUTHENTICATED_USER_ENDPOINT, fetch_github_info + + +def test_fetch_github_info(monkeypatch): + class FakeResponse: + def __init__(self, content) -> None: + assert isinstance(content, (bytes, str)) + self.content = content + + def json(self): + return json.loads(self.content) + + def mock_response(*args, **kwargs): + assert args[0] == AUTHENTICATED_USER_ENDPOINT + assert "Authorization" in kwargs["headers"] + assert kwargs["headers"]["Authorization"].startswith("token ") + assert "Accept" in kwargs["headers"] + return FakeResponse(b'{"login":"test","id":1}') + + monkeypatch.setattr(requests, "get", mock_response) + result = fetch_github_info("token") + assert result["login"] == "test" + assert result["id"] == 1 From 5eb5483d655482b937abe0e59c34e41a455dfff8 Mon Sep 17 00:00:00 2001 From: Dhruv Manilawala Date: Wed, 25 Nov 2020 13:23:49 +0530 Subject: [PATCH 0016/1543] Update stalebot to take 5 actions per hour (#3922) * Update stalebot to take 5 actions per hour * updating DIRECTORY.md Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- .github/stale.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/stale.yml b/.github/stale.yml index ba6fd155d7a3..36ca56266b26 100644 --- a/.github/stale.yml +++ b/.github/stale.yml @@ -27,7 +27,7 @@ exemptAssignees: false staleLabel: stale # Limit the number of actions per hour, from 1-30. Default is 30 -limitPerRun: 30 +limitPerRun: 5 # Comment to post when removing the stale label. # unmarkComment: > From 2b50aaf2d3acdd54409316ce71985d4e161912f5 Mon Sep 17 00:00:00 2001 From: YeonJeongLee00 <67946956+YeonJeongLee00@users.noreply.github.com> Date: Wed, 25 Nov 2020 17:54:31 +0900 Subject: [PATCH 0017/1543] Create intro_sort.py (#3877) * Create intro_sort.py * modified intro_sort.py * add doctest * modified code black intro_sort.py * add more test * Update intro_sort.py added doctest, modified code * black intro_sort.py * add type hint * modified code --- sorts/intro_sort.py | 173 ++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 173 insertions(+) create mode 100644 sorts/intro_sort.py diff --git a/sorts/intro_sort.py b/sorts/intro_sort.py new file mode 100644 index 000000000000..f0e3645adbb7 --- /dev/null +++ b/sorts/intro_sort.py @@ -0,0 +1,173 @@ +""" +Introspective Sort is hybrid sort (Quick Sort + Heap Sort + Insertion Sort) +if the size of the list is under 16, use insertion sort +https://en.wikipedia.org/wiki/Introsort +""" +import math + + +def insertion_sort(array: list, start: int = 0, end: int = 0) -> list: + """ + >>> array = [4, 2, 6, 8, 1, 7, 8, 22, 14, 56, 27, 79, 23, 45, 14, 12] + + >>> insertion_sort(array, 0, len(array)) + [1, 2, 4, 6, 7, 8, 8, 12, 14, 14, 22, 23, 27, 45, 56, 79] + """ + end = end or len(array) + for i in range(start, end): + temp_index = i + temp_index_value = array[i] + while temp_index != start and temp_index_value < array[temp_index - 1]: + array[temp_index] = array[temp_index - 1] + temp_index -= 1 + array[temp_index] = temp_index_value + return array + + +def heapify(array: list, index: int, heap_size: int) -> None: # Max Heap + """ + >>> array = [4, 2, 6, 8, 1, 7, 8, 22, 14, 56, 27, 79, 23, 45, 14, 12] + + >>> heapify(array, len(array) // 2 ,len(array)) + """ + largest = index + left_index = 2 * index + 1 # Left Node + right_index = 2 * index + 2 # Right Node + + if left_index < heap_size and array[largest] < array[left_index]: + largest = left_index + + if right_index < heap_size and array[largest] < array[right_index]: + largest = right_index + + if largest != index: + array[index], array[largest] = array[largest], array[index] + heapify(array, largest, heap_size) + + +def heap_sort(array: list) -> list: + """ + >>> array = [4, 2, 6, 8, 1, 7, 8, 22, 14, 56, 27, 79, 23, 45, 14, 12] + + >>> heap_sort(array) + [1, 2, 4, 6, 7, 8, 8, 12, 14, 14, 22, 23, 27, 45, 56, 79] + """ + n = len(array) + + for i in range(n // 2, -1, -1): + heapify(array, i, n) + + for i in range(n - 1, 0, -1): + array[i], array[0] = array[0], array[i] + heapify(array, 0, i) + + return array + + +def median_of_3( + array: list, first_index: int, middle_index: int, last_index: int +) -> int: + """ + >>> array = [4, 2, 6, 8, 1, 7, 8, 22, 14, 56, 27, 79, 23, 45, 14, 12] + + >>> median_of_3(array, 0, 0 + ((len(array) - 0) // 2) + 1, len(array) - 1) + 12 + """ + if (array[first_index] > array[middle_index]) != ( + array[first_index] > array[last_index] + ): + return array[first_index] + elif (array[middle_index] > array[first_index]) != ( + array[middle_index] > array[last_index] + ): + return array[middle_index] + else: + return array[last_index] + + +def partition(array: list, low: int, high: int, pivot: int) -> int: + """ + >>> array = [4, 2, 6, 8, 1, 7, 8, 22, 14, 56, 27, 79, 23, 45, 14, 12] + + >>> partition(array, 0, len(array), 12) + 8 + """ + i = low + j = high + while True: + while array[i] < pivot: + i += 1 + j -= 1 + while pivot < array[j]: + j -= 1 + if i >= j: + return i + array[i], array[j] = array[j], array[i] + i += 1 + + +def sort(array: list) -> list: + """ + :param collection: some mutable ordered collection with heterogeneous + comparable items inside + :return: the same collection ordered by ascending + + Examples: + >>> sort([4, 2, 6, 8, 1, 7, 8, 22, 14, 56, 27, 79, 23, 45, 14, 12]) + [1, 2, 4, 6, 7, 8, 8, 12, 14, 14, 22, 23, 27, 45, 56, 79] + + >>> sort([-1, -5, -3, -13, -44]) + [-44, -13, -5, -3, -1] + + >>> sort([]) + [] + + >>> sort([5]) + [5] + + >>> sort([-3, 0, -7, 6, 23, -34]) + [-34, -7, -3, 0, 6, 23] + + >>> sort([1.7, 1.0, 3.3, 2.1, 0.3 ]) + [0.3, 1.0, 1.7, 2.1, 3.3] + + >>> sort(['d', 'a', 'b', 'e', 'c']) + ['a', 'b', 'c', 'd', 'e'] + """ + if len(array) == 0: + return array + max_depth = 2 * math.ceil(math.log2(len(array))) + size_threshold = 16 + return intro_sort(array, 0, len(array), size_threshold, max_depth) + + +def intro_sort( + array: list, start: int, end: int, size_threshold: int, max_depth: int +) -> list: + """ + >>> array = [4, 2, 6, 8, 1, 7, 8, 22, 14, 56, 27, 79, 23, 45, 14, 12] + + >>> max_depth = 2 * math.ceil(math.log2(len(array))) + + >>> intro_sort(array, 0, len(array), 16, max_depth) + [1, 2, 4, 6, 7, 8, 8, 12, 14, 14, 22, 23, 27, 45, 56, 79] + """ + while end - start > size_threshold: + if max_depth == 0: + return heap_sort(array) + max_depth -= 1 + pivot = median_of_3(array, start, start + ((end - start) // 2) + 1, end - 1) + p = partition(array, start, end, pivot) + intro_sort(array, p, end, size_threshold, max_depth) + end = p + return insertion_sort(array, start, end) + + +if __name__ == "__main__": + import doctest + + doctest.testmod() + + user_input = input("Enter numbers separated by a comma : ").strip() + unsorted = [float(item) for item in user_input.split(",")] + print(sort(unsorted)) From 4191b9594237a989555b29cfc287b7238050634f Mon Sep 17 00:00:00 2001 From: Erdum Date: Wed, 25 Nov 2020 16:01:49 +0500 Subject: [PATCH 0018/1543] Ohm's Law algorithm added (#3934) * New algorithm added * Errors resolvedc * New Algorithm * New algorithm added * Added new algorithm * work * New algorithm added * Hope this is final * Update electronics/ohms_law.py Co-authored-by: xcodz-dot <71920621+xcodz-dot@users.noreply.github.com> * update decimal value & negative value test * update as cclauss suggest * Update electronics/ohms_law.py Co-authored-by: Christian Clauss * updated as suggested by cclauss * update as suggested by cclauss * Update as suggested by cclauss * Update ohms_law.py Co-authored-by: xcodz-dot <71920621+xcodz-dot@users.noreply.github.com> Co-authored-by: Christian Clauss --- electronics/ohms_law.py | 39 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 39 insertions(+) create mode 100644 electronics/ohms_law.py diff --git a/electronics/ohms_law.py b/electronics/ohms_law.py new file mode 100644 index 000000000000..a7b37b635397 --- /dev/null +++ b/electronics/ohms_law.py @@ -0,0 +1,39 @@ +# https://en.wikipedia.org/wiki/Ohm%27s_law + + +def ohms_law(voltage: float, current: float, resistance: float) -> float: + """ + Apply Ohm's Law, on any two given electrical values, which can be voltage, current, + and resistance, and then in a Python dict return name/value pair of the zero value. + + >>> ohms_law(voltage=10, resistance=5, current=0) + {'current': 2.0} + >>> ohms_law(voltage=0, current=0, resistance=10) + Traceback (most recent call last): + ... + ValueError: One and only one argument must be 0 + >>> ohms_law(voltage=0, current=1, resistance=-2) + Traceback (most recent call last): + ... + ValueError: Resistance cannot be negative + >>> ohms_law(resistance=0, voltage=-10, current=1) + {'resistance': -10.0} + >>> ohms_law(voltage=0, current=-1.5, resistance=2) + {'voltage': -3.0} + """ + if (voltage, current, resistance).count(0) != 1: + raise ValueError("One and only one argument must be 0") + if resistance < 0: + raise ValueError("Resistance cannot be negative") + if voltage == 0: + return {"voltage": float(current * resistance)} + elif current == 0: + return {"current": voltage / resistance} + elif resistance == 0: + return {"resistance": voltage / current} + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From ce3ce3f8a8618e36624d997bc934b2ea997fa5d7 Mon Sep 17 00:00:00 2001 From: Hafidh <32499116+hfz1337@users.noreply.github.com> Date: Wed, 25 Nov 2020 13:38:02 +0100 Subject: [PATCH 0019/1543] Replace base64_cipher.py with an easy to understand version (#3925) * rename base64_cipher.py to base64_encoding.py * edit base64_encoding.py * import necessary modules inside doctests * make it behave like the official implementation * replace format with f-string where possible * replace format with f-string Co-authored-by: Christian Clauss * fix: syntax error due to closing parenthese * reformat code Co-authored-by: Christian Clauss --- ciphers/base64_cipher.py | 89 ----------------------- ciphers/base64_encoding.py | 142 +++++++++++++++++++++++++++++++++++++ 2 files changed, 142 insertions(+), 89 deletions(-) delete mode 100644 ciphers/base64_cipher.py create mode 100644 ciphers/base64_encoding.py diff --git a/ciphers/base64_cipher.py b/ciphers/base64_cipher.py deleted file mode 100644 index 1dbe74a20fe7..000000000000 --- a/ciphers/base64_cipher.py +++ /dev/null @@ -1,89 +0,0 @@ -def encode_base64(text: str) -> str: - r""" - >>> encode_base64('WELCOME to base64 encoding 😁') - 'V0VMQ09NRSB0byBiYXNlNjQgZW5jb2Rpbmcg8J+YgQ==' - >>> encode_base64('AÅᐃ𐀏🤓') - 'QcOF4ZCD8JCAj/CfpJM=' - >>> encode_base64('A'*60) - 'QUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFB\r\nQUFB' - """ - base64_chars = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/" - - byte_text = bytes(text, "utf-8") # put text in bytes for unicode support - r = "" # the result - c = -len(byte_text) % 3 # the length of padding - p = "=" * c # the padding - s = byte_text + b"\x00" * c # the text to encode - - i = 0 - while i < len(s): - if i > 0 and ((i / 3 * 4) % 76) == 0: - r = r + "\r\n" # for unix newline, put "\n" - - n = (s[i] << 16) + (s[i + 1] << 8) + s[i + 2] - - n1 = (n >> 18) & 63 - n2 = (n >> 12) & 63 - n3 = (n >> 6) & 63 - n4 = n & 63 - - r += base64_chars[n1] + base64_chars[n2] + base64_chars[n3] + base64_chars[n4] - i += 3 - - return r[0 : len(r) - len(p)] + p - - -def decode_base64(text: str) -> str: - r""" - >>> decode_base64('V0VMQ09NRSB0byBiYXNlNjQgZW5jb2Rpbmcg8J+YgQ==') - 'WELCOME to base64 encoding 😁' - >>> decode_base64('QcOF4ZCD8JCAj/CfpJM=') - 'AÅᐃ𐀏🤓' - >>> decode_base64("QUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUF" - ... "BQUFBQUFBQUFB\r\nQUFB") - 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA' - """ - base64_chars = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/" - s = "" - - for i in text: - if i in base64_chars: - s += i - c = "" - else: - if i == "=": - c += "=" - - p = "" - if c == "=": - p = "A" - else: - if c == "==": - p = "AA" - - r = b"" - s = s + p - - i = 0 - while i < len(s): - n = ( - (base64_chars.index(s[i]) << 18) - + (base64_chars.index(s[i + 1]) << 12) - + (base64_chars.index(s[i + 2]) << 6) - + base64_chars.index(s[i + 3]) - ) - - r += bytes([(n >> 16) & 255]) + bytes([(n >> 8) & 255]) + bytes([n & 255]) - - i += 4 - - return str(r[0 : len(r) - len(p)], "utf-8") - - -def main(): - print(encode_base64("WELCOME to base64 encoding 😁")) - print(decode_base64(encode_base64("WELCOME to base64 encoding 😁"))) - - -if __name__ == "__main__": - main() diff --git a/ciphers/base64_encoding.py b/ciphers/base64_encoding.py new file mode 100644 index 000000000000..634afcb89873 --- /dev/null +++ b/ciphers/base64_encoding.py @@ -0,0 +1,142 @@ +B64_CHARSET = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/" + + +def base64_encode(data: bytes) -> bytes: + """Encodes data according to RFC4648. + + The data is first transformed to binary and appended with binary digits so that its + length becomes a multiple of 6, then each 6 binary digits will match a character in + the B64_CHARSET string. The number of appended binary digits would later determine + how many "=" sign should be added, the padding. + For every 2 binary digits added, a "=" sign is added in the output. + We can add any binary digits to make it a multiple of 6, for instance, consider the + following example: + "AA" -> 0010100100101001 -> 001010 010010 1001 + As can be seen above, 2 more binary digits should be added, so there's 4 + possibilities here: 00, 01, 10 or 11. + That being said, Base64 encoding can be used in Steganography to hide data in these + appended digits. + + >>> from base64 import b64encode + >>> a = b"This pull request is part of Hacktoberfest20!" + >>> b = b"https://tools.ietf.org/html/rfc4648" + >>> c = b"A" + >>> base64_encode(a) == b64encode(a) + True + >>> base64_encode(b) == b64encode(b) + True + >>> base64_encode(c) == b64encode(c) + True + >>> base64_encode("abc") + Traceback (most recent call last): + ... + TypeError: a bytes-like object is required, not 'str' + """ + # Make sure the supplied data is a bytes-like object + if not isinstance(data, bytes): + raise TypeError( + f"a bytes-like object is required, not '{data.__class__.__name__}'" + ) + + binary_stream = "".join(bin(byte)[2:].zfill(8) for byte in data) + + padding_needed = len(binary_stream) % 6 != 0 + + if padding_needed: + # The padding that will be added later + padding = b"=" * ((6 - len(binary_stream) % 6) // 2) + + # Append binary_stream with arbitrary binary digits (0's by default) to make its + # length a multiple of 6. + binary_stream += "0" * (6 - len(binary_stream) % 6) + else: + padding = b"" + + # Encode every 6 binary digits to their corresponding Base64 character + return ( + "".join( + B64_CHARSET[int(binary_stream[index : index + 6], 2)] + for index in range(0, len(binary_stream), 6) + ).encode() + + padding + ) + + +def base64_decode(encoded_data: str) -> bytes: + """Decodes data according to RFC4648. + + This does the reverse operation of base64_encode. + We first transform the encoded data back to a binary stream, take off the + previously appended binary digits according to the padding, at this point we + would have a binary stream whose length is multiple of 8, the last step is + to convert every 8 bits to a byte. + + >>> from base64 import b64decode + >>> a = "VGhpcyBwdWxsIHJlcXVlc3QgaXMgcGFydCBvZiBIYWNrdG9iZXJmZXN0MjAh" + >>> b = "aHR0cHM6Ly90b29scy5pZXRmLm9yZy9odG1sL3JmYzQ2NDg=" + >>> c = "QQ==" + >>> base64_decode(a) == b64decode(a) + True + >>> base64_decode(b) == b64decode(b) + True + >>> base64_decode(c) == b64decode(c) + True + >>> base64_decode("abc") + Traceback (most recent call last): + ... + AssertionError: Incorrect padding + """ + # Make sure encoded_data is either a string or a bytes-like object + if not isinstance(encoded_data, bytes) and not isinstance(encoded_data, str): + raise TypeError( + "argument should be a bytes-like object or ASCII string, not " + f"'{encoded_data.__class__.__name__}'" + ) + + # In case encoded_data is a bytes-like object, make sure it contains only + # ASCII characters so we convert it to a string object + if isinstance(encoded_data, bytes): + try: + encoded_data = encoded_data.decode("utf-8") + except UnicodeDecodeError: + raise ValueError("base64 encoded data should only contain ASCII characters") + + padding = encoded_data.count("=") + + # Check if the encoded string contains non base64 characters + if padding: + assert all( + char in B64_CHARSET for char in encoded_data[:-padding] + ), "Invalid base64 character(s) found." + else: + assert all( + char in B64_CHARSET for char in encoded_data + ), "Invalid base64 character(s) found." + + # Check the padding + assert len(encoded_data) % 4 == 0 and padding < 3, "Incorrect padding" + + if padding: + # Remove padding if there is one + encoded_data = encoded_data[:-padding] + + binary_stream = "".join( + bin(B64_CHARSET.index(char))[2:].zfill(6) for char in encoded_data + )[: -padding * 2] + else: + binary_stream = "".join( + bin(B64_CHARSET.index(char))[2:].zfill(6) for char in encoded_data + ) + + data = [ + int(binary_stream[index : index + 8], 2) + for index in range(0, len(binary_stream), 8) + ] + + return bytes(data) + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From ac6a160f1b390ac3162e3834066dcc787d32d29b Mon Sep 17 00:00:00 2001 From: Vivek Date: Thu, 26 Nov 2020 06:57:00 +0530 Subject: [PATCH 0020/1543] added binary_count_trailing_zeros.py (#2557) * added binary_count_trailing_zeros.py * updated binary_count_trailing_zeros.py file * changed file name to count_trailing_zeros.py * updated count_trailing_zeros.py * resolved flake8 error * renamed to binary_count_trailing_zeros.py * added required changes * resolved pre-commit error * added count_setbits.py * resolved errors * changed name to binary_count_setbits.py * updated file * reformated file --- bit_manipulation/binary_count_setbits.py | 41 +++++++++++++++++ .../binary_count_trailing_zeros.py | 44 +++++++++++++++++++ 2 files changed, 85 insertions(+) create mode 100644 bit_manipulation/binary_count_setbits.py create mode 100644 bit_manipulation/binary_count_trailing_zeros.py diff --git a/bit_manipulation/binary_count_setbits.py b/bit_manipulation/binary_count_setbits.py new file mode 100644 index 000000000000..3c92694533aa --- /dev/null +++ b/bit_manipulation/binary_count_setbits.py @@ -0,0 +1,41 @@ +def binary_count_setbits(a: int) -> int: + """ + Take in 1 integer, return a number that is + the number of 1's in binary representation of that number. + + >>> binary_count_setbits(25) + 3 + >>> binary_count_setbits(36) + 2 + >>> binary_count_setbits(16) + 1 + >>> binary_count_setbits(58) + 4 + >>> binary_count_setbits(4294967295) + 32 + >>> binary_count_setbits(0) + 0 + >>> binary_count_setbits(-10) + Traceback (most recent call last): + ... + ValueError: Input value must be a positive integer + >>> binary_count_setbits(0.8) + Traceback (most recent call last): + ... + TypeError: Input value must be a 'int' type + >>> binary_count_setbits("0") + Traceback (most recent call last): + ... + TypeError: '<' not supported between instances of 'str' and 'int' + """ + if a < 0: + raise ValueError("Input value must be a positive integer") + elif isinstance(a, float): + raise TypeError("Input value must be a 'int' type") + return bin(a).count("1") + + +if __name__ == "__main__": + import doctest + + doctest.testmod() diff --git a/bit_manipulation/binary_count_trailing_zeros.py b/bit_manipulation/binary_count_trailing_zeros.py new file mode 100644 index 000000000000..f401c4ab9266 --- /dev/null +++ b/bit_manipulation/binary_count_trailing_zeros.py @@ -0,0 +1,44 @@ +from math import log2 + + +def binary_count_trailing_zeros(a: int) -> int: + """ + Take in 1 integer, return a number that is + the number of trailing zeros in binary representation of that number. + + >>> binary_count_trailing_zeros(25) + 0 + >>> binary_count_trailing_zeros(36) + 2 + >>> binary_count_trailing_zeros(16) + 4 + >>> binary_count_trailing_zeros(58) + 1 + >>> binary_count_trailing_zeros(4294967296) + 32 + >>> binary_count_trailing_zeros(0) + 0 + >>> binary_count_trailing_zeros(-10) + Traceback (most recent call last): + ... + ValueError: Input value must be a positive integer + >>> binary_count_trailing_zeros(0.8) + Traceback (most recent call last): + ... + TypeError: Input value must be a 'int' type + >>> binary_count_trailing_zeros("0") + Traceback (most recent call last): + ... + TypeError: '<' not supported between instances of 'str' and 'int' + """ + if a < 0: + raise ValueError("Input value must be a positive integer") + elif isinstance(a, float): + raise TypeError("Input value must be a 'int' type") + return 0 if (a == 0) else int(log2(a & -a)) + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From c5fb0a95043cb93ca522ca0633bf75a656d63b7f Mon Sep 17 00:00:00 2001 From: Dhruv Manilawala Date: Fri, 27 Nov 2020 15:27:12 +0530 Subject: [PATCH 0021/1543] Cleaned up knapsack and images directory (#3972) --- images/Travis_CI_fail_1.png | Bin 80257 -> 0 bytes images/Travis_CI_fail_2.png | Bin 45660 -> 0 bytes images/__init__.py | 0 {greedy_method => knapsack}/greedy_knapsack.py | 0 {greedy_method => knapsack/tests}/__init__.py | 0 .../tests/test_greedy_knapsack.py | 2 +- knapsack/{ => tests}/test_knapsack.py | 0 7 files changed, 1 insertion(+), 1 deletion(-) delete mode 100644 images/Travis_CI_fail_1.png delete mode 100644 images/Travis_CI_fail_2.png delete mode 100644 images/__init__.py rename {greedy_method => knapsack}/greedy_knapsack.py (100%) rename {greedy_method => knapsack/tests}/__init__.py (100%) rename greedy_method/test_knapsack.py => knapsack/tests/test_greedy_knapsack.py (98%) rename knapsack/{ => tests}/test_knapsack.py (100%) diff --git a/images/Travis_CI_fail_1.png b/images/Travis_CI_fail_1.png deleted file mode 100644 index 451e54e4844a0b9f7d501c3d7682869d9acd97fc..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 80257 zcmeFYg;!lW*Efn5cQ0C6ibL^Y8+Rz~?(XhVqD}1) zN_+bB%6SXUJF62cxL?5thR0v{m2^Ca2q&Gl9|??f94w>*Yrbk89ql6*7bXc~>x(8} z8G5|OZOM#uw*B>0TPUDAwgTLz3+>yE=sX^5y6_dJ`lmPuAAVe1y4aA<4LG_*xRj_H z8*n>PjBcdJEUqDi(EwSrHrQlvAM#2SJtDAAFg@RZMwdUo=1}-s`V;t~ZfN*(FL+?X zY$*8~8ygsWJA>^q8uSkCepDYZaFxUkCEqR7`V4(a{`1GA>hLSD|Gb^-qb6%=H8 zHUTC>uZ*>t(T|jX!OQME)sDoQAh-Lfj6G8$2@j0C#O@1(s8gwQVvbQtKd?Nmh!{MU zQH@~lZRSv>kK2uMJ#As8M#07xLz7t`CqO)9WO{dGNa~IS-^i0`T#iuS$lcx0v7V}b zi&xo~apTX{J}tM1gW3;RUqJ4;PX#2drzMFGjr@I*l42<>tjKdf?>=(16Z&5d1NOm_ z-JqA{E8a%UUxbuv)I!MHBda#|Op=DsLDEb=AUsGQ`8&WhNZt*SVB|njW=i| z!~|A$|09Y`Ump)*{0bM{T9wo)3*v>741_`koP+O&FOW(d z$X~&o*O8w1zQ77_1jB@a!N7E)*$|{aU9S@~K$ZC= zXJB0U1ZAKg`YG!`d4TDoLT!*R3i>jC;2yxO@;@TM8Nj#*;?IPoAQJa;`HA|Ak2(`c z23!FumJcR_Y7eIXtqDpqs43%7hKCudHK477A^}QXmv9~`4YIid@hen>A5JINSA;QN z`;Af?N)2%0?}zIWXHpF?C*QR@vnhP!cqe2YD5zr)hH$U~#d&ixdJ1L=JQU!HAQZow z@z-WrWk1Mt7XVFYlKk?rOUF<4NF11x!pHc_g{}GGGb1w&$4mE?_PnlOA26GcXefge%qCyiF$kNN9|GnaTUf~G-2>VFOg0>ECDS)se zV$ zj}#bTa8~|bAc87VWJJFTnvgD&BBJ;Gpc0hNf?)_r5Z4f)A)7@lMQKV4(nrS1IslCA@7HhzFK}Pn=Ag44(T#jiUuuqsM$@;-P8e>52Ldg#6p79>; zKAKc$LM^IrKvpALrA#G%zVuElIK!Z%M@FqACR-uPL3}@MvVxIsL2yAiF}FpnQKM1z zKJB6aUZ_`zOp;8%Fmy;Z_K=ER!M5mg(Mu7*jK<9OqIg9Z3W|^qz42Xn78wLt8kvZ4 zl?w7y(MjU88ASO`G9uFoGX&Fm({Y8d#i`1N*_kGvnLpE}Dv5H6HA_5Zm1U2~>=s!o zmCK$gy%sP?bgO?ajw+Wep2$8IiZ0tPPt0r7tk$}$#8w;TH}{>DEA>=Ut_d`jHEsmz z0f*8Z?Peh@RhIZU`P~C<_>P^9WeGJ?c?qM&tWt+Lx|^6atydIQ^i~j>1l%yWrMZ2z zDp!SC;~iKyzNdYo&#j*nxvIVXer5OobH#bpk0^|oA|u4^K(={U2#cg>AFs;&ia_IG-EMe|2 z*3dYi`W}kjPkjo#9%0v^v?0f#$r$xiCq^wwEhKFpz~fP)ZNBO{WWIK#BEVn}VqBu1 z(lKh*SWH~^s)bJBq-$r@_}6YLpns-h-8Y$3R>CdzV@Vp3T#e@Hx-mS}Yu|LS4`CP3 zK(Ru*bqx^_mmNY#gRzFO{&Ct%zk!0_0d45VR>)WQD9@7@kgJ#%p5LE0XSB}ri}8Ix zQ5UnWd7jly)d*cwE`{`TJSeV~$y|_t6hPzqDfAPP92Fx??eK^JzTxvi+5%`HJ+6<2 zyH?UL(MI*rE)I3;x(h?JkHF830X^v=?ODHiM?yP}<%^L=h( z3ge5zj3$ud$Lptuw!pibnW zwi{P3i*TIi$B>{A9osbS?9O8MX69{HiK)DgTrF=NuF_Y%>mqx?5!=w(@w}TssXNU> zx5%gJ!rdb29s_q@ZWvypck(}`YXa&$gP$)vEcTU`=O(Rp){@$SJiM9H7yxv7!K%^O zVDPeE$)J7eW5IrIMJ;4vfNgda;A1T$zQ})~ym6AR6dkAuzGnu!e1iegzyMq8+1k=~@9%FXX7Rv6lh)PZ_-McSk;X0Wq<+r@X$M zp`n$%iM7Kwg}BqVl{L4if{KHRlq83~wI!Xdfwi6?or|T-ZxS#r7ml~0rJ;i^iHoI$ zl|6?GH|bwJINr*?tLaHe{_5gj&P}Q!B|{=$ZD&ZrLdQ(^g%ki!LPEl2XJEu3Cn)?M z^0)uENlhFaY&huYot>TOoSEpX?TqOe*xA|XzcA7>GSa^FptX0ka?o|5wX*;CH3=0z+5hLY-d>RYcMClO-52`* z6Pbgl(f@_)cgx>of4$e=;<$b@<509Wv=gwlv^2DG0Q}duTz^UWqw&9b{!J)j>SAc2 zB53+XY5(?405)dk|4{w!mj5HA>VGMj8Cm{K`LC9LQT~1f4rx2nH$8QK+YrFOMgRY* z`%ix^`riirm%;xY&A)2jh7$nKMgKpp0f476nf?F<#tSAU$fw`}ev}I1AwPrDFKucn zp=z0FF|_Lb(S1Nk22)5D&ES+9njowlwt!*)ec>xAIE){O`)O-tO^lo{+sc-=7n|n? zsc2@)DY^9&Ao-xXbNgvCJ^Sslz4Z~g$^dVR=QGmSwhSyR_y-awe;-s-2!38zSl=+x zb=4Z3s;p`7Ts?`Qia@H2(j8w!#?DnRU`;#sLKRt>tSj+nXDAUTvj?+ykkf*bsZ^U12TZX|KCq~pPe>&4AE3m&;I2ZYz{(|$Ym z<5a-bP%Xa0{!pz$Y!q4|3NfeH%PnyNjoYKQ``*jSZ`ze=A$%1>j8@6B4O#mp6?>X&W&i)8zVeUSD6y6W*)RnPkp;T zZ+(&x(Yb9CsL?y7KiJ;L27Az*c~Hn+xVUwCc#iazOQp|dqEWAm_?FuGvm~jZ!9}g{ zMw!FrPNk~h3hJr#nTNxk&ed}^2JU1X1upVO4)NTRWTtK&OxkQr{Df&+0#hVlZzUcA z0@6}TQK`Yp-m`xPhZ-9GFIVhf5?c>Y7|$%|xWFSq<72a4@%n7NTs^tx+|(*ejqO>Q zgb>PT^eN4`vK}a096y#nr+ut5C9%@zPMivGW{@)Ub62X}OK!^e9uzU2*)2jz#WAKX)mEYxxP-TXJ!0_Yv_LN~I?6q+-wUrhk@txxVGu2YSDN_Fq-ZRj3@F5`~dE!_@liQWu zlVg6~?)G+P?IF(OLno;B`Q>D%p-S8{Yhrz|eUeLCpRm1d}f3ooV`u zHBqU?Q}_@h&X2Tbk?|Bf`9c{!6t#9;3`;|!hOrAniOW`eVPxbc5g3r=IsgL=Pwd)k zE<+cRNAA{fexA|;xRw!kZHA#TQ>GezL(Cz3er`KmVk>Ykb*ZU{^DdOB?J4l&o+pE= z(O&ktD0ab&Kv2FfQgrgAaH>70+3RK7`4m8?JC_uoJz6#IjL39*#G_#g^ZMXuCU=Um zDm&r*r8xD-D~2|iyY)ug-K$o9qoTyzbCOI1+3mJ;u0ZiJ2gt7@gX(zj^sce7aWXu) zzQA*d^$K|7T{t0uWcs#mq#)(;bGs(w#i7?Uva;PyVxtcyVh?L-zs@VtA8krExn8^; zSq;_Dup962@-zW!XSG`7x8Ly;Kp{F<{?6g-8Cqow{FeSLRnk(+Dz~dYEL~6}f{5JR z-QBIxQ6lMlfq5vFDW9tTQ2lh0+o|~AX6LZR;$9r9PH#c4NV&d_gVp1EGH}-0@wV19 z`(e8M_L1tnCz&72yZV+EO2CJ-IQo?QNnLF6u!U{lZr`o(CsW`Eo!9I{TyhE#5Mzk4 zA4m%YvR-b9B?=0rL{)X?Yk z#9nK%_~Y>jFqAkI&~)h>bl%EHBM}ksR~T4dAUWe#8^1l_=@HlTG+QWPAmnsa^Uhxf zRM-x$l_ap`P6#|s1+nx!eGkK#?TR_L~A8u}zIOC0zi*xnp3CA+Ftz1)-K^(i`?LAErVN`I{&bykS==g3UCbt~t* zb}6RbZ5A({v(AV(R-*<@zP<<~4G;NO3RWOM#sDaUWcY_;=)~-I1@?a^41F0gSTTiuS>L$=d<6=m;Q|dUm^(@tW8%VlLBa7bD{iEoK$aO&*(rF>KenXRI-P z#bEo`Fs(qLtTdy4hy5NqPve`}Oo6a|5Q>8Mma+C8&0}Q=udoRDb$_5-@SJA5dkv`k zn5)AjGEa&i_Z@8V>tY`L!`}JSbSMly_o`c}Yl>cc{3R9Lk!{0fFh+%+9U$j5=*ldJ z)eIXP#1GhI<;5dl@&Kb&3ZH2eQ4crNNk^IP$e}}l_#E~=D~tA;u={E1bC_KI`YT<% zd^AQraI)aT^GwHPe7HIV6^ELT)Gli#5{3wh;M)v<25G`7NAAuY{9BHZrVh1^-2-tLmP*t(qZc&P)A2%lHW z^IG-}=#utjKA3=h5S5wA^?|emd z%1#SLf~^xk$$&?5$5Y!iiFlgKHi<740uf6Q+KchCVZfuKygyBAmp}2W1zO)upwElV5iC+ zXqXeUi4vb`&H?dzOHP9@d3E>GZMbtLvyKS^O=I8qQfCNg0FNMqRM?fk*mrf zL)6Dhmumiq?cblzYTN>*u6ZIIz(>GoTIvg zqJ8j?VH>`KpOEM1-B(Q?Un|crtj^LrDsjK!y3A~;{@8l={q|^q@X8YdI6*}OCU91= z@t8x7GV+0pD|BLzX?L1owPc?YhfY&*jWEG>%JWnq2L&c9V~ZSkhNx($I9mZuT<9*} zq9S{(p|inpQv2iYwZ8< zO^2eOy`@h&LtgTz7_t-6I8+&RAZHpo`@V)8bFl%Xli~yvPYJm+&plcYf8HRW(!oSu zn?Ro#-vLImEic+!&tYApSuu8CommLX5JghK840%$6v+T&W!?(ahxX1^@GQ;9JOkaL zl_bK}#Y#pf>W@aB*H69|KCGTP8;4q41QRwOn{19<`|VoR#qqNTxQSu( z8J$a`50E$9uAi)DP(z+qP!aREhPZi*tCNY`C+YA`@8!Om&Qg_MY;`6MPrj=x@;=yx zyRvdlB`wZnc@bO-=n#KCfoZ*(sQB~|v5YaPWtx|*qqlLEr}pb{br>H{C-=Pb*$-?X z+!S-awlF-3kqDg(q(Squqh_1*DJWocoG`b!2M^02@Ow;MsRJaKzv`58jj`(EDqp+E ztH|0MrbDUq2LeA;YP8?GM;}y|I=B`acn+HQvyXvGFrTDPmWTY1UpFC$?tfV%8#fdn z^Ry&bv0}=X!3LgF5}FU4bKg+U#)wY`6WhuA5j#BbN~d$ID%1cF#mN@od-?Hkb>0)j zSy32PFq&tn{*3Bpn>gkV9o_>Hu&2cGuq`$vd(O-~wY(k9Q9et4*L_Y3Al6;1#rj5| z(E;Q6+Ld*Zsvzh(lW&d`H-8bQzvR(uiNlXIJgub*)QL?wAcJx4i#Fevx3i& zQE#X3y9q6XfK7iVum6D@KexI3J^9_WbUY!~Uwr)R64a>O7L!=)!pLmmVMGVu(6=oXz<%**996M7+>KaJ%~n9I4PKMiR47Nqa45U);XCY7vs>&q#Rd;d z>a6O#*Q#?AtRPxgmQJ6t*9zN%m+2xcp?iq#NecvUX9}Im_E)}9unetbe0vDCbc7aU zaO}zH8$y+a$VzpncUf_tf~u!+P^=Ic3G8XC+~eY(8iZF+Mz%9vXLf2aN&f~y&YsX+GHLdfoQ~w+<1z!@ zhal6ay}r~d(?a+YpWF^BQ5u4K6)d9gXuPi&z?hv{c|@k-QR}v&aYo~8C0qKsqM+5m zX4w}%=g~k` zJMYqz_iG4OubVW}^2slASMgRla@dl%yYE_oEkmYQOCcegB_JV;vA3)wd*BVtqo1fF zY@R`2qDj&juM2iQ?I@!`y_440wQ%#dhFKs}3#mi^DzDMX9`k$qNw?Q&H`)@O)%TBa zK?X)DbB^WYo$72bm7&0I!LEY$|I#_`s!6mjT$4$q#+p^n(9Zj`Xn_P+J3Xg6UU2te zwY(Q-T^B(nc7XM$Lm}XjbiXB(*|m|v<1vbnn^pvCMaq#|+ui5LpToCR(^ z3l~iF&2qoD$~6W)_-r8{s5hk$twBIHzOZ*#xww;TIiO++ZMhcFxC?Q3`sBNA!aozZ zTBsuetCifS*tqx@Z&ppRSU-v^Mp+-6P6bQ{ZeAAuqCffp#{aPN{NvazK(5R zmlaI|@-$rf@d&o$wLV{do~Tbg(<6g9;4*IUJE3e1?RAZVOasp*PwK&Rnc}YiPc<$8 zPby113`LpQ&8JV8lE2D{JHkXABps?QqKGZ~x+K*v-{6(fArO2u5vm0~Ts^);q3O4< zIRcd>=W%PBH6+ij(HUQxia?>2#jCubrd@CCd&%zp6`675CmcQt3q1s zYaE^U(yL{R<*BbbenKOZa1W-)L?fzO0DYCX(l#a;p1QzVrMvsh%#0tX<7n$%6z$iO zc>Wzsg6jcQ8S3uZ85BwK*yqW{;d&O9nra+(0Cj4a%~H$nlpuoZb&kpGd6gw4`Teli z%xMb3z9Dua%(f>ZG;I{w_;1Hjit(Qi=Pbq9MMzb4&Kb!}s%@L4awRNrZwgod+>2~p&hAU^RcGP! zxpZsi!a(WkO=Takgzmab^z$;3GwNUot0*^+7@P?A2lVrObE#mcFq>DwP@FYNy}RPX3i*bW?3-ygY`z(mD0 zpyH74US84I&vmOU&|ivUEu4Gh1LKB=&6;ocnjAwO@NYc(4eOjnwW+Ajf4bKw)gWxLuj*YE-Yh@i9SS)HenV8EG8-ZM;e+`-o zA6$owMvVZHW2l6YyG_L?kEi@gVaMme+|5T zdUFr8pB{fh$p4*(Ki@$V-tfJeFVS^n$=s!Ax7tc52CrzG!HE1AUi|g5HkrKi56LbY za5;;95#CoR?9Q1GFueryKO)Pu<5oL+Upqfi?L|q~Dz5&oDXK*F+i*A#WGgBe1nFpZmP-gkITDia(kRD{0MH^PmZp#k z3Qc@RN*;U-n!yBZnO{_8pFW^zMM|ev?3^5kDZqDuOaHN7w<5F z=XjBtwz0nQI4NV+)3MI=AqF`8syDbI)&2&_mLOKrvW4*qfbXmempL#VQ%|rW<7SGr zuX$drigmbMBKs;?&0Be!$P&3p6eJ&&`8=r+@%G{Wm#ezmz|C7k?a?Teos$dqK?t?V zH!CsQ$yg^bd7^s)Tw~fp4EA5lDy^26-))mK>al6?J&WiR{ApdvpW z+Nw_#Dczf8a1m%q)9Y(uwGGo=X6lq?aaYqj^{mgSt_XV{9#fXoWIhjlD^58*dq+sG z%(-8z$IY=;Mh$(snm(&Hy5oMe%LB!hF5>;9&pLWZ6orQfkzn7EzC>Yj7_m#3uaW-u zG|chsC=%#ob_yp(Qhx);7?npT(?vP#n3gq|2{#+;ooNp3jF@;J(QMD*F!AGSUt1>g z;5kkP%RGaoiSZJv^70*kM@Mqvx|$xs)canc-Z}0PN-A>+%sKoEFFa*Z0^i2EQt1*a zga8+LIGTrSDg2RXqS&cvK9iS5zs!^46fQ0Y_bGQ0$EXq0_Ls=RaefaS7Y zO_6!RwHOi4S;k=sGQ#HCyK)s{OQy+`lhw&s%=9iv|J<(2Ix}Gwl+%oZ=9e4@?zInH zd*mN{J=$5Tp+EQM3faw+@fqTbk?r<)$4Ds&zaN%T`lbfSnE8Tk5v_)S*flU$5bKNt% z+gMiROBoW|tY_%8k3-3%`sfs&NVPO0KLm~D3n(t!3T&C&N#7Ri<7lZ(TrCaCP_b_$ zQPuCw7hesZqhifD#}czJh|am4SQcI0N7uDJl%hDQJBVmU(4sI3^3A=66(Wqg>Ma@X z0(zV!6pEV$As{-rcD^m{-aF2?U+N2!=0}4@<_N}$)V9~|= z?f@V1{qkU?3fje!3k}FhMmZozyQPGlDeQDKmyIW5>j=WWMb5Knypa?7lsjYd`^$<@ zz8OolyknBQM^ChA8B7ZkN*9pzIAd~!0Kk{pjiQbr0n!`K90D8oYGb z^mu*KAwKq0R7p=tEU2h)TJo6`^7Se<$gjXKAO*8q^`P?NJtmPz@jI;iL~HDv<9z1Sfclugay1-?lTdO znH;I>N2wGC)!WD{Ms&&jF&TLvJ2`Kw%YbItY5#7ztUx!VD``^19vm>74 zy9)UB3DmGypGRb%R}>^NYk(=m>M|jz>KB*kw;f#(UwRhQEQ)g8j*Htp!b_7)>bO8V zdLOBaA+0ShfQXfiC zyzeywIAbaDy0Sb@i!S%vZ)aS^K}PmYpE*TfV#!^kH+6hGw-hxswAW|771=j->CcI| zpwGuxBPm7h?H(R#zvHgnzjO6_S&~rUO++P>f@5e3I)33PohMSLQKrwjGAoiX8+}fs=ez@acY;fWF@}P8g{A-QuxufL?}|`6 z_VlDa6iZ@6qenzoR@_2HgoXz)zS&z)H-D_iX`{)8_JfGUVUmFhx7~k|On|JuAQ-;_ zXlme1$qbUkhOm}}RFEPe1a(zvHld{`vX;)Qy3Lrxe`zaGO1ZMJ$}V> zDRKOhp$fgXFWgG3e(wC6YF3_qx~X0N5Xp*5^UceYuNiv&>U1$82j6m z%@|XYJ)bAqV|oI$sb`Oj+sex2K%<9)0Jccxa46j$0!cBS7Eg(&$9Lt#jTixRej*+v zJ;IL$p-0~16Ft& zTko~p1I!5Kg=-^@6d)rqddy2JobYAX*)(A1$FKI(buPr7$lUZyZcn58d@fa2*%%;)beAaqA-3zw^`6_-K~^$$6#asJsU z=t!>TIOkNk5I3Ol%>WwtUYCYu9k543yjIzM!8aQ#rEMN}_<91I^WQyGZ*R^f;r;;E`+$5N>Lt8~}`FX)#=xQm@? z+$Un{{v;RMXPGo(Yd7B5er@@3L(X}bOt?Mhmhla4vknG^pU0H7~fKwa_El{PS z{W_4#{Gm^559_4m+u=>1TQ}dYft3JmEi(z!Y7tMWN{gd{cwMnntm7xX$smXwk`G{! zSdd$a-{kLB08s40+81j0{(MJ=MG1E>qFU`HKKQE_rrHpXjwn!XcTZF(=vtXCD)<(; zZR>XKC98`7!+Bz2lWX-@I1;EePEUgbuu-85kx5uttYf56^DY6^DwL@Qv0|RxI=PJ~ z*@?ocm&@jTPsV?Bogy@Z3lhTLTr6sQV7?7Br@96iKkWVdE;7wPvx}x&UWfxx)wrGE zvT95O>LY79(m|DT7?Ftu+)7*zhSx(PJsIb7)c2~FHPse$z^q&gsdn$2H-=JqrsaiG zSYP%X;JRC!{0Avg9Y{yzE+@bXY$AeAi?53vbd(QjlDN zG`}nBPkfZDYvcQiuj4GQ6wmC-?3*f`Eac(CI~hmp$1|*9-YLkLaD?0nlV-gt`^IGR zkdV=i*Z3BO>)o@FvFHwhDs8Xnx)Z&{NykkuA2KOntHP+TdGV z-5l*{)}n4onVc@U->;XmV)#DUgBJVn<*T{IFK)Ql5}~Wqk|*qhd*RltlzyHoONwGv zKUh>n-$Tg1?J~{gl8_`UArO)Y*h%79D7Gn!efyF4})0j8byeU24>YMHj{ou zcqG{&E2HOB3YU~lHPBF$MFhahjbw|6GYEOil(zqhHnwHa@i~Oro;dX`isumX5FjXN zxYs()xH0Uv{Yo7Hu3lG!>6%=*Y4U1-Sq$ps?Mbq`tQ)6Hv*8Zbox@dmV1Tuy0~Ic! z+wJ|(a&@+Ttx{?k1RmXAlEKYkYEW#grh7$wLMGq9DVO1*zX~v*SK#$Dv!;usdfR(3S#2A$QSxDJ3zlK1pu>wfjHI!dMShTO68QnL7Yw;vHpWiFGs5 zU3EebbM($$*B|UC0(Mk<*eeCRKo@rX^hxk8dnGgFRS*wbV_;qUlSU;y$i1xL8f9er zGxM==9jB?+E)7+@@9k4%`Z&F|qz)gjKT@n~S9`mA-s}e|7#2UvI`MQujF<_fnx3ig zV5QBnHC)E*@O362y_mi0NQFTnep(+cHNDS1vca;KDdc*vCYibgs-Qes0X z)lGkxXuq3s*{S~S+=(|5Du8oK)pc|s6B+1g%6{p~LNw&&L}b!o`qDKD`*4S~vffj{ zrB0Zy#%DdC#HTXG0wO?~xM3)l%|$QGD=EpyM^PVvJPA{R#Z=TMBV5R+Fdu3(Qj{+I zYJSK}*B@RFpn1Ng328eNF-~@tn{eKx&y=$V%r&llIysQ@xWH|t4%Nk5NOAczVtP?Q z?>5kxef9H|gq&i+xTWJ!x}+=6HAW&b7NnPf`nJ^sRJSV$HPl%N(_szm!yuk{1}U!O zsu^}u$#DgeB6P0jT(P}M#H~if+Pz{OD!N#7ME8D#{c&+F6{iJV;V#DSfcTiU-!@Xx z>KquLI{Xe28BxP#4Uuf|)pw)tKd`b7YR2ff!b(bgTYZgS(VR=tG)$`-R&Fh{qEB50p8u&BfbB_2CvRfYFpNowWVhGc}#Z6EdG2ZC8A_$=7iS!boXq}ja-Aq^ ziVGFrLtZatwmT47*Cxsm0f8?A!PqR&hb!M#fiyU>Kc>_wS3@68ZxrPFJi_hNjF7rS7{Eb zcD%BLNRR8H(#+hu&P-O*eqt@g2rlKJVH*n+4#VHKHiBm8$Ze=Qv*e^?%5HH$6zjGG z?8d;jk(E#5J&q=>=l6#`PtUHP+U*Bmt!KzVq^%D) zjFt_MvWJdGM~@Zo*M4SCc)~FH@bU^&FYH*cHU{^p$RYf*Ro= z3)!f|j>Ah=DZp(w=E*HseR1K?J=d+Q?B46EIQNH+HCA^Zg;7>^dTl6~Y-nH9TBDhp z9gQW!Z%*W0&$kpSp`B~hSI^B5a1Hcskk0}wH7dYe=ZiNrsi_wY8EB*lBgNpOlw9sE zw)3$UE6KsL#MpA#k{L^GKv%p4Z79GBn@c~=BUhv2Ar#^X7Ccb4EUd}t2-)8JiTUE? zNrFX`LA4Ugcea9Zx;@@1qeIDk#ACjBM-khRIwbznKM>6by3eF(NecdFr!lMkkZNCH zOI%Z%Fi)WJ5u6Miz?XS+@J}DHv~r710v)bcBT}6Z~eKdw8qikVCG*RB*IinJP2 zdYR)$%g?&LJrnP%h?ttqaoX}y1WG!EtW7Hvxhz5IN^4hi#H^`so z$OHCEnX-p!n>3eeEA2z{H~joHH6`IRqBdP6#1ZwLyfp*>OuLkzhS|!T@1uL1&ao&Q zlMDPMy5E_dzD1Y~OQ=s%8vYmFR}J-HUtyl1qk9YToAb?2LKsKPdf|R$l$(8k-97}7 zTJMsY9tK)<8Z|1Q0>O*zbmzk$xV%sT;&6eTl#&tEPuk@%Ly5$Yi6GrChNEHcUZQ5= z^pNm#(EeE>lR#;4K`?5kg}9&0<^?Dgo|dYZ+wm3D*bF&;mKQ|sOeMzNUk=Qu%DPGdiTM>X<% z@cZSJS-o!wA3ofp4abxop4)i`g42J4RlB`#WsLCmXfE#X=sPM`sJY;#^d&T*m|YU&^ToYpcT<4P3WszM*e%#O}PxP?RzKa~%r8=FjpgcI~| zx_mV>A(U(QH!#mT`|(ZDwCkjVYc>DpDunCDFWv70tE7$xQ+2V#JO6RPzi=?sz_*hN z(-y2dWzop`<~)Y_dy6}2w;0+#E;^j)p}(De5#F};TtyZRRDkbOd@!9Q6?lx=owgN& z1WEoISziwz)tQ8O4j}zG78)RTysfJ63~6Y=tCJ+&y*7@M&Kz>#xF3`E^h@jyqWcdd z*bey15)`v6H!o6TfW(0zH``(fK{cTC=Yp0xr7>bI55d>t1!)Eb2Bmr!IP29WsgLB6 z+5Z0i3ZUU1Jt7h5S@t+|#)1=xN}tPg2lfUrZ|-dKW)c@eh|W9g#s`Am(y?==jcTbp zh2yOiKfc|pJY?LHTBB7Ix?K~6fGo_j-Hq*dTbxw zsM$5Un<~;ez=8lmfmdcM?Xl|iV?`3JOdp=_+h3UsE4g``K)l5>(Ii8{xQip`3;8iK zMGEcRo0w`NNBo(zmIN$w@671s2p6RqZL0}|bTA(5_Lmo!(L@GCVj~w+0(9})Ct?cd zm55tUAgpTRi84G&WQMON;EO6(%mQ9@`v>|{PK6c~w4~?z{?Aj~2)?*`Jylh4Gtlsc z9v&WnEKJ$M^+Mg23&IsW_LyD3=t_e=CL@h}mfgA*FY|-pJ?1`O+WGA_qm`hT#i$8_ zD!qFQuS;?f7|unWHUlM#v>Gv312~AR4C4;V0oCgF)N9hG#JuMM?hi-MRrU4x-Y+Y$ zf_x~jh@aZsZ<*GMO6e~m%89wXEEe6+@$rYTjZ^v4B4L<;P0S_A$!9}D5M!acz_E(Y zMh^FNCU@22!s;6Njf`pihiqOqG@hpkTY8~qC6mbN8lf>Ce&4wGeYfRO#aK=15t&GF zcHFCVHJgm5EVCm+$IgCKqTTW(S=*cP@DAoVPt4Lb}01FuLNv4} zW}P0FgGXJIp{VY}9q(7j-&y=^t97MGQxPj1x7qJ!sHT3tv<|!_%EvmNU(7hHpD{oL z6~$5)_#z&-JciRzX}M38RSrsAo4#gdD~xx4Z%bha7+EQQy8u%1&T?(!MUvXD??US( zSKLj``=x(T3sPjVXF{M*F2HWv`xSij-U#jfVMV_zV!Fr`!|}NR2U0{b11>vI1{4C? ze~_YoIP$(U*B=qgJ9mDQZ;kR^XW`?)$K4*X=`lDv{u>doWwHUXT4>ZMlips z3C=FG*hgnJe$nIbKCe3HH|hWSqV0FIP+hFid_$CTjn67;VuFiBsqu3thN^S!Qs%na z8k;T~mk2CEOk+MAfaF|84@j0YDc8jpk$#AIl;mrAOU!9nJB*|MG7OC+A}l;n(zu3R zh6L=QKGy!$0)I*D{ZcK!9m&8@sZb_KnbgzF*4y7(+%|RCdNNIVuAM5K%3Yq8Yi;CR zmZsm0E>$~)?Fj~HD4&wOU^uo2SU=e@t3q6+mItcrMp&BOC?mXmDTv$BkrhhC$=-9< z@ZbTr!;|hPXUgTHo7xwIu6-llMds~<yZwYm{e2WobSV0v)9!^zrG@C%x!`(`0MTdC&w4)di=QnRX&efTtNIv zvt#8vvd8O2v*Fv-A_bl0D;HFesVq=BENH_?433WW)2=VbUM)qVOh&}jqj;jFd3K*}&)_yp$GvXK?-Cr(BD1|x>|W_rSR+G0~IO$@kPBih8dlN%?7 zT3gb>hP}%h@v$5oGw5GjczZCy9EY37XE(?OI@3hg#;BLEPdGs#?8Q#lV~09bwDwI{ zIfLzM`KPx~au9QhIT;_H0R>-Uccsxj=!E#(Wh7@^gatiU>&MICCbO9q)Mst|(z06c zU#zt%!~9W*AuKy=6i%Z>iD^y2f~6vx%Axy7kxDzRO|wRWQg7hv>5`Bc$=P|s#ueQu zlJ{GN-{N2hxxw3Ls|GtN5513xMM{3zCT?&qscFk!d?$owNuiWeAzt3PA!4}LBP(7x z(MdoZ{&DDaulfJ5_s-vyE!+Qh$41AtZ9AQ$W7{2dY}-c19ox2T z+qP|fcb|LDz4v^^^Cvt%t+B`6Yp<$WvoLGEU$fvK8S-KCO?nGDiy*86Ie*ybI4DsZeEAyEtEAX6vr)|Wt7BNCj+`UM~o{Gbs|m)w2+5e+OEpGDA? zO~?Dv+4{rpi}f{f^zqHzZO$F}uk%HM+AW`3*%JOOHXgyK@B!vVB4T35@yDT=V>_Fv zE-7Qs#NeDw58h5^1!;72Z@HIc;6M~0ARsmy?pq8p^Y6h-Mn!4FYgHa;si)W>%qUu) za5~69VBjDPNNpI{{tc3<6*E&n?elt9BlV{&kE{Ju*dAew1hnJ+QQ`(c0J)OCG6hCV z))r#FQu-Bk1?qY5jMbXxT@o@6hxz2^j|kszQaTER5l(O#3>4w+)dqWZKiZMR&W2f^ zXg)dDm`88>PQ~{QJs|AhM*#2nIEA}7=#a*BzkIgK2bjY9vCTvZem*JCE!CX}N=SN|O zCO{JY+uwa|Z?Z$qDBD8VcYA9kGL^+c4}bpilHX%UGV@CIj>5_6!G)yFcS!xMPrFg$yZt*#r5qP|jVhNwvrhd~MfbI5ZhS5m+O))b zN>TnhYEiMu;92a=*Pg9a}2+%9U-z7BESSJk;sK_EFc1|qLIBgvM9P@A^K6Fce&Sf6!~w!mLTvH8XUy$ zO^+=RsCA;EqVDiCqY3e=?r~Hw-Z;>xS3(cGIkf|OQ(%nVoF7_)!K$T8jSZ6`%W=;J zkWf)YsFIl(Cp88-*R z<0?l&4j~Dd?`tx!N@f80D+;-x53pv3h3UJ{#;nO?Em%ie2aP>M;!BV%-dKlT45;so z{l^ZfTj^mhWVHn}M*m&o|N?+bjmE#uGN zUtelf7{09CwUkc=+uLt)8wA~}zPEu_!o4IxjK0F-Y%dF}-xj0F0kExnNCN>MUz!f| z>BXzF9v@#Pb}KbmNtvKYQt+3QWV1ow`+sutm^^~g)AOm64lxC9;H?ycw|as(O! z$jy5OIRacmd~aQY1O1&N!jo2fPnb=@iV9bnf&9C%)K+@ADZ=O_4TOVSmj>c?V556T z&51$B>nu?thuSbtjh5+}Phh2KYByBElYWr4WKg%E5f_k##LU}BDTVV6+x($<(&F>$ zO8tzzU#l4_N+ZQI$L7l^AEqYJB+qqmt`^#tYP9I}#cAQtIBW{q&u%lkl3M%vNHTb)cBpva1@7@NQRrGJHqs?;B;4QunJC884=JwY1uJTIl z$#E*}0MQgLKu&m1krC~08H!d&>uqh2ZyWrAq?)MKK$m!M{4p1TF3T$1jmKlA5NqN5 z?!eDsga79^ z)fa=#AwWbjjwxTFL|7_wQ9@~Iyq}nGrI_-NW3Yl_o=UYZ8%YZpWl7UwJIF4wzpTnl zNI%dBE79Fvfx$K=z(p;U(Gea>DNG#IF@jy{w|NEqi$E?2Yy^w=u~+>TW*k7*M!AUH zG>t)pqJhb7JJuF;i>fQkvc@Mqw%f-@QeYUv4 z;$v(KHB&!#pVyCDYt6fnGBgP45_W2;E&ZiIJxl4+6c}fJR>Z`npXYwukz_`2FBxdY zZ>CG15&%}L0?vwk9E&~6h?cCSv_mQkuZK|@sLsOLD|r=T_C}~ zjoQ29G3fSznLN2xG>H$yvm8yB4U7aHoQH)2k985<`T%G2po<)3;tNK_*~ARmtZnkT zXEzfMNIVA)|bC@gjL?@DRA6R^=o3_A-fI+mMY|DT3Qx_O}>*nyg zl=QgFXJIa-V!l1N4>7@h1}sSmAXwa)TN5z8fB(jV+kq+wqVg(FpjkRBgZ?{JTjH)q zAE5r75#$VJP_eVCCXY9g32Qo8+pi&pDRe%(OX(5;=eO11Hg`V({~A)MI-^d?eUN_& zGGxAlruORjhE#O(WUgUb@^;J9g$VWzg~aJ5)Rkqz4;7zllGG*Bu(1=IC%C@y65fSE zJ1ikK7iIG zP^shRUn@_M3$C|x>5w}bs#dlju=yi`?sL`Ny1Ip6n|DiVnNaCR(2~C~q9cgRs$K1d zA3IBDu?UI>0z>-F-v%q6CWJ>D|6Z8Ucn!r@?QOA@PV~5n9IbcgjAN zaM0*4n0(u_2;MI|JdkB_6%Ju^x*DhrM zs2%y#3)5H`sD9jW*?L=H_h3t+Xzxw>?<;35j$)qpThEL#*2(O=gO08XFc{|QJaFh3@{zQT%v-Rq?0es)}O%5J~hvep_pL7|u2 z_!`Ttw9xzOA(Fn>qj&S|10HuC)x9Powqa5)dW_G+*%PqN)2++!6@NbhnwA2?dPO@HYf)h%%kC(e ziZKn3q-nIn+m)bTACdgi^$#vg7;b&q=`xn5z8*%Jx|xTUAa6>vwQ{3O8@1o1wVI%y z9mz2sZ7!pWld_)0!qd{>0+<0DS7x|RXx_C~!G`@^MqfIzxBT$gC?={L%wrYW65%tK zLbu17!zE1pW`YAVfBLf_`pqLR`Z1-p-5HjwRz<`|>65m*y_3BzRH}r11QatXjKq>` z%)AQ;QOq@c1WKwRy`^0{J(WguWwuC@bACK8Ua^U?83+pMCvLBCv7!Vi-GmLlfxaFdPNyx17EzYG@T2_~sR zZqSLoP$`~o1I?3wbYkJoH-2P_eQ8T?hWs;E)?{3HQx*f1IBJ!KfMBR1h}K|qf1y$Q0y za5V`tc14rXNTgsi1Zx)h!Sy0$r8ip7D!_%C)BJb zz86G-odWAU_YtkrAJArH8#t_L3l(2}9GJiC$pL4pnRQ&MMYI93XWV(+zXXUvQ1It> z{2~dTe*d^sTn1if;1$Y!KH4{o?3)V;_F5AeTK5n&m! zicEH$O#Rq|w#X~j(bEe1jBb{;OSG?WgR&|Od`na^pjq5{&*0#LL{{Y-#8 z*|^BKmT$}Qy9;f(EvF&mDo7_n8VeBAsCws^gB+REu2$W*D3r3_hU(MRm3+Ff-|88u zAw+P2A9>eufuC=0v$P*;ja@!t2A47S4ALc;p7gI1qmD;a1sX)2qFR6e2WNH0aKvYtP(yCfaJD@*0Z6=xk| z1s6H6Q6-?LAHqKWS=VZXGnka{t?ns3SCS&XWi)M-Y9r7&mXo76o{LTSa%>T1%26kc zYYReme?VC4VyqG7tCMH57k|pcW<=I|*;%Is@AH^ZEcnncjBr;}xmOF}GmOo!3B6A4 zNy2FR7uA#-(jQoy+@CSqlJi*Iz%)->Sf5LJzJz6GD{Q(0MuhlfM+5g+tKBzIA~wfg z)ufhhc|$ANXteO*u*&0M-+Y4Xv4ou?<9BH5A5l0o-%Jnk_LM=RgjAx_D0ejL zdS4fJsA-{BP?md6BB9^WQphYMMw)lkW9UNA40;Ad$S0~dh31J^OheGvHZ({V(}GN* z$6K0XYOKQN)y_Z4n4PdBEGTg^Tp_^0Fu!V-B>1;AHFEif6sVY0x5$pU>~N1fy<$}- z?$F#@+CPHTLK0?Y4}+sX4q8ZRkn*HB@9vbt-yVF|0N3SPH!8LNs$#)v#98jRM1+r4 z_G7f4NA#RUmCZmNMyr}NFZ*hu#q3qImx0rZ5i)sJy5H~P$J(c%>hBeNe#HeG6ln7V z&V8KggPZMO;(AK~*8#tE5E`fz6Y<|Nlp9SJb*l=l1ILQ?@aD%Qj@WYz;c8+&#inX! zb?dkaiiw;T#grfm#}V;&b<+x)-MlqQPHvT>3SI^x+@xM|2a&g3H&N|n)#x4DV^wJjAnn@=PZZLui8(@0_y9bQ&AJ$ZbI@& z5FoW2cpSb7D^}&Wv%VZ{O^Iubmcr{$N_`qHe=v;MMH_d<``t&Dl?R%IP!YT2?P)Y? zKA1%xM`S=6kn96#9R+s{ei+L{wtvN|1XduEYqP@rGT1U=CI2_HBLJBPh!2AC7Tc?~ zl*EkvBlraPke39AKOK`&JpTi`2=IY}3h}cNPFDCosLOYHfP*wEDR=X)><|CpORxa} zs`Vg-e2RaPIWanbm_}hz*4|&*^#6t|*+>YWTj2C1)Oc_KkK@O^P$>OpsTsrp zZSw#B{Qt9My1EUEKnubJl%_DD+uIYbFwt?Prn`MnSQyFoiaf*tGOYFAStb5s;pphtcfNzpjWLg%^zvQ(%Ek%yit~MeLDTEc;m1OU7btg^bc*66 z|BT#%5d#d4%lfHz)?oN@ODzm8TGn2Cy2H!CbLwZy>-i2#5+UY43?~EOPxsp8d%-`I z6kKe~%MHj=*O%^O_0pK>a!3OtjUFo_X5mOt~lt_xj+fL?`5r0A|z$?5=_~Ty6z#BzeO8$usFZw zDxoJWcu*>{c$<6yz!@?U!%RFn9#gX+7>T!2tM0SOs~!zv*q;Q2Ie~w|+Xu+`h`WtH zIiDvq1>&#z(DC>#P#5X)aZpDKW4O)5Al~XDg&yz=0+!V5F4qWqvE9Aj!Na_9WSUPG z36sncnA;0yP2cg19&?F8m7oJ?($5?dHUzUzVEhz}eR1&sh`0-J(`Fl!k@G{#4 zfV{1UU$vTxmXfno_J4f?3_3iYpk!|cvQh9(b>1GsBb~;KZXbCM>sgv51lVX;&g3blj8j0HzNpMEu4z!n9H9PN{-&vDY*J9-ByU8 z-LNjccuXkPq#X|F`q?FUWTt7A-j;$-r+)jRk$lRhzWce02F>d>V=?aBsh#8GW@|Y+ zV}WJ=6#xSfZZ>Zo=bWDn@tMek!NBA;#mGH4dIy`GeFoQ{Uoj+U_XG+AAzta;BZFCf zM%%F$5kt2?_2~1sOvm3c-F6ZzCMkZNrBbN){WXH3LA9xqBW#n#-a`Tg&=GgPbq%~U zJZrWj10B_yqG!_mtDlK9a1F|R)SuYtP-r5T$MzIEHC86*yL}tX2XY!HcfkN{TG)ur zlz+*4?XQ~s5pjJ$*lYiyJwfDs`k=EIkP&BK{W>A1NxpZQF3E7|VV7|m!(i^Z=2DNIHL$Jth= zsHnY#>_|o^&cEgnLr7o#i6HsOd}oTS)vywercMW4yWk7{5W-fRVd(e~7qnKUaf=?2U3ZM~F+eIPKNotm_>pEJ~WECrT)ai-&78*W4Qw0GI7 z7V~88+-8cj=;h~{5jCr)c54Ws`Ox^fg|BH9S;55P-i3(uIZ!-vo-16AVS{1e-IK%g?Ylexh>75Q=Jh-jQVQLRoVO(v&YP>c8I%sEqor){6W~Ud@Fn^FID#wt^6PO zU{m#c!uMLXaF2;)Q-`d43~i6TpJ#cq^!4)l+k7vdO>E*f@h{WQ@YZru6TDQifP7&d z+U3U|DVcHc+39)>V(Z=&V)yk0xI^YQhw+G!#d1V|1jnk2A?BmI`j-a13k3+^Ft6AUkqn>K_>M}rDDilA z)@(J} z6y+9Z7gk-*A28BBimoOt(ym`)*=>A1Tgb8Bot;sC`R3_Zu$d>OZ(C=lXZw<84)l`b zc^F*go+F0Se3$ITW~Y2DR{&8l-Jd_VpnBjw`w_0sl7#vqf{#j?*@a;?5Z09K198&n z#Gm36MJG-dKqr}i#XG1rc`adyw`WeDpu$0O~1q8?Hd;o^+J>+EpSi- zC$)I@5sP^N6Ih@|w;lI~^#BroUQeXPBVy*;N9*SwFDy~I?eo1<;A?x_dhVx|Bi(9R z2oPu=vnHRU@qcrIhrhd8lzyrCLk&EIe9!06mmCIAiY(2S-0`(Al^x?oq;zKcY}*(( zIq&$01s|-wJ%!Dn1T-V7!EIz-5(#W&3gTtO_%QsHZJWB@+Il|bt*i$IblgEV0C7R7 zj3*fY4enobD`J7auu}T*V%Yyh0=Mk0N#+Si43O%MulC@V8UC5jBStXL6Nh}JzsiF4zzAu zP8~-QXGP$c6i&pZoiBNhP1d~b5|UpLlwTt@>70(UFG!bH}^?E^xCc7NNrY?;|P**z*ACF&8RC?Yik3fZ^1G>m{J%JSRRp?DeuFQ4{ zc=!7h$$`Gl55#?YRhU@WzZTQtz*Rbr?}W#Xq}K)Z4;HxWiT>sjq`Zho(|#9f<7kJ53KLe`}gXiq(IXjmalGes{u4h1&CA8F>-Jfio|2(sZt zY#K2TJl{QX!CIxubR#vceipmoHoT?E$OI-0HE3w~(+&7((6WPpQ zlaws@45-mKoROzm>r8^~&0%YFDlfj+H&C)WP}1h2LjWvVbrc=Yx&cZ+`EzE7qJ?4J z5;_VTFJqcU01z-uou9y7p5LMxr8qq;tmFz9Bjv&hxVG6T4vo+~^T9!%TiXl#s@32n zSo%_4tfO|FYKdTsS{Fijm0r()M%Jq`MlKs|e!t9ZAnoiP&#bm5*unSLo#=K_81dK) zS}B9aAFPl+Dnx>;LWbo@CP<*zF&szusi)}|dAWvb) zIx3^=LUrOqd9ED*i&4DVFDg5_lfswOG1*9GC=SSi>J=+sO<$x0I~Tpm zd+bmoyhSzW2xf9V`<{-{Lf$KHi43Fwa_eg2YRpF4d}^;YC>(G5>pZTAQ*oJ9sl8u$>-Xs^U0uu$R}wzZbZPcn5VCtx@<8`&xQF-$lK5#aME;YDUFs z(jwsxg!81d(Wa9=n)=5(Bj5!siJ!Mb7IK zzN@Y=(IoO0|Al>aqd!6gkNwq2$QIurbR?ohVtn}qJ~W!WT)6mqZX>eJ@ak-iA56Oj zZxWx;@LP*7C@>Y8lfw3P8IPt}DLR5iNUHqe*2)N~O&)+4yW1D7p2vex|5`zL7^2m` z@Gv#xV#a)n)_h$Lr|m5ZCv=Bt>5_$wv{cn~*jzN&(SonBYu@K4rwC99ACILAPy5X) zU2f>bToKY!+?SQ5UaxhG%Ns}B99_11|iq&6o&GP_`UOf1hUS!Ihg$o4~V?UhXM z)y6;5zAf2h(j8Z@qB*imc?}HPYTVfkq8@eGCiEP(zK*798nSW9)RQEScWxpi&>iy+ z9(e~F;qN1~)@kPkDGUEvmKB`NQN9zg*&}n(v%XTVFao;+gK#9x>7qhFm^f-cs*ZdH zzO&NO%+=4@^&{$K+d6Z-SD3Okq)y3u&eA%e^QrHbTe#OUn0@?@^l|matWwao1MWj^ zqxt|MoxlY0vl^gbutq{OSU~L6HR;CLQ<>5O%>5W#f@BQPBF|x%uD_mfN&^eYNT;(H)=VW%LB4ynHR2A$`k?gUO!57 z(w4406?-0`5_Jw0NF5HzHh2r86wiBQ*km&L6OeGnR-J$7<;`0bv-vNM`{Z)w~U%a6&`P1LOq$+8f(n=dOc7TqcVWhY+qo*Xt9BGgp~k6Unh{wtjW z4;(P+G7&sL0@7Fzuft*!T&$q<*y?gWdb9V7`elRj&0;=E zF|6hH9+|&6%HgG))ug3Lk>uWJ#Y`7DF0>2k17;&h;tCdovL6IdRZU~bQBIhzGEN0m zC%Ri{#X0&)n4i@# zJa)ebSUMy0bo!6R#5;+OiZ+|Mz6rXoZKslEU>ESpOFb~=js&vxz4%zrPzBTP-fHD* z#(Mg}dlb4Vx@`(`*-hO@6AM4l{b?~-Np%#tVumwlYb2Is^Fb5PLQ(R;} z6}acTMX{a^PC%XsVjnGo1qj(u)NCUAr}fCO)Vk(-lvSpXT^y{W-}Q2vhe-6hn4zuC zHL+C-wm&$l$0PBE%xHG=l(hJfun&p-t%Z$6aV@Od|LN zK_ZF+>=BDCjh2APzD$VhOa%!n;}-Lg*aFp2QSa?;HE7b;CN}+6xwfpPq%_X{lm57c zHBHtm;lMSS_)4e?avj98=E~4~uLvN#w}b5zKRe}REvsxk{31&P-i-^1XqQbdiX$8I z8IQ^8b&|CPhFZeELN!y;olY?X-n4l zZPr5HN0aVp((;~H79ykCs&4UGL+^|i)>9J zOVLD!;gs_M>)!H06o6fKh0EJhg5iP$MIg#gDGIXEku4dTS>z|{*cK~w8-A@WOH-$o zlk#&qR~)`=pOH}ytTuqvQPX;Q8gqr#!#~{lG`Zy3%BASIYJ>uXjFAu$KWcx}$!8Y< zrCtI- zPhhgFT4YN3Zay}@=f)|L7wS=)$VZ&`>p^`bE&vD)7N=BMyhHsua;!EvppK2byH zw>6c?uLw?Sl%4O=SOHZ|ss4KEFLuYf^zxbD)Irx-q{A)^WVQoAk4AeNWN;KmJc%Nv z;9fjKxtOqMSjKytI}d6t3MQLmsF2t~#U$!AeUJ1$#wly=4$A&4UOLHHn;IQx*{O}b z=wEluVE|#caU@$_(`9Y92iR~JC`Pt@ZA3&+P+uBCy}?dO!%%CpRvRyF8kb5Gou_lG z6i?4z@WAOOocg?AziUvl2!`wNlLQ}iE3zg`SYX6eQrb>(DP5TJ+c(z0W}O4F5Wtjc zSW6V-<_mwbCR=bDf!%R>>%gV*Q|5xh(snfGNyOhy1aIneuwyW2i=4#Go}PB%C#o!~ zyG1@KlONLljAr$h%r7}Ay%4F2uDiON`I7sfy3bG+&fTGl7hoC+LtRhSES)((S3FVM zALS0)hgQ$K{raJaVfE8=qRE<7=!J>xecyp7>50oOub4db@qC7vcw?Dwku zTe|G;exG}rq2C-7Un`^HeciS3py2W-hLRE1~v2elj3wEQ^z^;=(r}H59h+W zT;m%_f1mZT_b@`GIR0*m_WPSA6WJ>{#jH37m_@7v*aW4Yf^dO~G}rmg5w!R!CHqO@ z2*QI|uqbI!!`P~%s}9IbSn6V`if*&FSg4JdB6zBrMD5RwPE%u$FBg+$5}iLWB+Y{q zyQoKfcwi2*!4e~g8@$o|tlXvW)gsS04U0hw+jkut*ePni=|mIljo*upAa$#2FG;|E zBBF0#<}fwdlze+Q!6MNy^2(N?QD>dl>1lCDgnWYi)X3jAm}HJaVSAv`sM&ef$+cIZ zU~;Nc7vqrIGynlsmIox5QmRdzerOMFj}G?_q7pLhA;^qXvGK>7e2|(KdAO)C3~Aq! z>Qc|?vRv2b&}er_m~9i`9TslUPCuQSLxD{xAzJA+W%!*{tD5Gx_$-m6)URe{cNiPJ zazLQf+ZL>#yZQNz_Mw*&>9(m!?#nf%BOe4#fl>cUCv=pN9wl5M$DS0w2c4t=hpv~X zZT+06XeMzZ-TAy;Q0<=0iGX#OsorsL^Yxegg)r8WlqGka7;p*ye)fQD}b zMmF84&t}U8B6%h-84FSRc!&?xsCIXf9m&#EbeCrVxkRCvOoK%E;|h&RTox)h z6vuh60yz_MNNWqE8|&++@bt5hCGJ03)>opFpEii7!h{9x)nq2a-X_2B|-tPJB~ zPidFX4VAqv=TXunb*lU11qJs3_ADnjo3UbuhtEEXGCE>K9;rz)B*Ip5S}jZN$_{6N zJ@Xs;bvR9Rc?6$W#on(2bxbxg0G%k+mPyKtTJV;~vK)35ixcknvi4~fZestH^qEqy z7IUDDX0ab6KA!6$Vr_LMw?hs@e0GaPi5H}8yv0WNA>r3y(M?)$Ni+|n5z%;eewleC zho#@mRT3Vf7B2!1hQT{0vYXe08e0G7=F(W9+k8|XzbNk8uhYiS!x#sd8Ec6h=E@uI z-QkdVO5Xh72BqxJ2RhJ(?WT+tfyRK_=D?){b4=OIU_9gj{VR<3MMz%t9Q zC`Rv0?x~7cCeFn?)}kZp$6g2VVKnHWq_H|x#;hVJW$Aq+!J(4P`% zE?7$Y)UMVm(+ZrDuT9>7|Di;z$-6<+q>7vO#B_@~IwkYHphe!Ui(oyFnz>D)EU$Yi z9#P;;6C|@{j5ZX^C?1)PidfTpIJ3`G>Z;C{_^InQ`BifGLe7{VDxU#?N4g~HW$byJ zQ-{F1dM;%=&kl`kjrO*AI1)k+&ngEc&l8#8)T`}#wKl17R?zxy-2eq8Xe5rgH(YFfAE{mPZf&ExD!S!;b4I9F1Btsk zU48Kd5#y{4_qUhEfWfq->i?Fo|!bfzcsBysNKj2#Ls_yiE??M3j)ns^`fnLmv7AV;`va`G( zyBlcoZRH0pJ}1%N)dSSBD>n+zG8SvwIdFx&V#X*2{ z;ApvR-Dd4RX}hq= z{js9{huNozZge-W#&P_4(&k4Om4;us<$`Yb)%d5E!YaaP=F^jl2ZskJ-7IwU0u67Q zU9Y1?YXe04=9_D^k?w-kQbZ=XbfZy%Lek%F=W2?801JSiLNDaqQa{a61Z1R!2XOET z*V|>-%jEx*6c3NSRQ7m%cljo^DcC4;KJ^%=gZ9c~zaga6wCBIU+tR(UvGD`bfILns zJ(1Fc+xr^n>`ZF6B_IIw>QtZYLRUZ!v4=80pL7&br+J%;R(rL#YVuEMyeo8Wdz)dB zEY$Po{@p1EAbD6IAq5+^%k`|eI)4a76ZV)?xJ7qj2Q4wo0*iu`C<);e3Q@?_*Ju#V zErcU7>OZ@&xbB3b2YQc1vQ(bE^^g8sE#YiI3YDGgz<+M;C#rn(+}Qb8t6|P{*?%}4 zqkR;9q5SV_BLkS_VOLz2DeCQfaeCXq#xVauU*S}hjYIYsJ$>gAb|pN9MLiVXKuuIV zc7xvT0&X&^Be2uiMzF}J3ek}m1$2@oA(jZtd#R>VOF zm;nHqk*qX{f9%7Z(dld@cxbh8Td4_=&!sVwDoKai`J!*joBu;7<72cW{UUv}uw-@V zmwX$OeY)uS8=mJOJa`*FEN|wSAFBC>h?Epf82WABR5yrxSy6D$8A4B)ME?zk%N|FI zgGG3f>43q<&i+!95kyW-u6*A*s&)GcM7^1kva&AG8=2HIF1w#qj)3o*Sxmj{sm$10 zq_H&8s`+%?SEq~R=o~yejMw!fz$pTd2Z$Mse!^R4X923ZZfMfj^Q!mnt)|!?I&TOV z?6T2BF6Ec@MVh7HJwFGwgRkShV>u*T7~##8>7e4bvwulRbR*Ti7;Oseq!d@q;CVQC zd)#zoHZ>-r;laW8U=q4-E;9Mrw(%I4cPd_L&12ttpTXmaW@~#*ajXoS3G0e(RY6yB zIgr`Z#1WA-?_w<`;tvt0TA?%eq+3Wx!U7w3ynMX&0mXBxgr3{f_MednffqPO(EG?* zet>(a$Sa*hC~ZL<(Ejpyr6RLITb5)@il52hBdFAReTDehc)!j%(dZOkn=`ILzJL3+ zA){>M@-2YK&cg=^FR9gT9wF1?i7thuES&YzD#%+Pi|ewr8X0BT{dGKv@n_emnN62} zz|zu^(*4Db1m53~nvMwxkgis*#kFx;E5-qoL22PV>na{=7A9|gt)n2m;O83~@ zOkt|V6|+UDo9bu|k5)KnpS4(?8Q!9BOg+8C*ZV{GN{k(ekUeA33_pZcn?141)H|U7 zXrx$h1MU1)|9n4~f!7PFFT3Ad)=4TOzkE9s#zVKjomWe14+j~U=)6f?W2)U5Gn7~3 zr+HYJOfrxEg{7+p>zlArxsqW2R~rvt$C zEx3pmI}ARIC4|9G?m$QE#v-S&PlixDz8@?^$Ip7epF)>~wg5c$s;jpOnS3(KF`Vp# z1Nj`Y_Mli;DzE%L5+>r=6sg`_LmP&lnWyW??T#Z?<9^~Ad5!P!epHrEfDZx!OHHc4 zHxhfm^AXj>r{O76x#2pSN|Y+3D8=Oh5X-1~`N>l%7 zF-cCl0Px%a3RYZdDgQ|8!3YS5W2^yYL@-?3`N8p3=WOU-KMfjk+~y@*@;~+V!RQrB z@`#Rgy>7BwoE?v0@xD|yuCHylJU^499ggV&LlBjV()3X8@tOd)UvB9W9Tf1bx1g4_ zT?>ckVGr1XOLh1Q3l9_PAlDX3v%r(OoBT$cjPiJ#CuiZ7Q^KWI9MN>Wjr2>PidU?K zeEV$zMkbq?ztMQUVqR~pL)t<$$+wd?XJ)FTY{-Emhl^i z#N4WDr*6NfZ+#bAWM2guX1=Y#QcD_+rho+p?1;s(X%RicrV^GiK(^F7`B`l!_($A? z`or@z3O(1n87tNC)j?etX0$}CEc;4 zn*^cmO|C0sdBDRS5Ui3?+Qhy2T`k%E`V67aF5`0CoH5cn2Ev3#QdO7<#fVUl-1Ny| zTVhr#T0i1)SgsoPy-FT)llUS|aKhD_HLRY2J#hl#K)Ee>#jsgc!?V_bJ{G}0{Uk7n}YZ+P9JCRo{`^$WMH9CFrY-oL~Re#(m1vCY)BJM?QP6>AZSJ6A#v z+91hPaBU*`H zBB8a;=jrp}aNA1kSDWN;{{j}mj2?hH@y#bX+dp7Q!C4!=L507N9!+5>5$leVr}`cb z!mn<3C-M215O92S9Y>Y?G*tbmbbucvsApg|)_mFdycunl3i9n%ajPod<9Ur7p2+B& zKh9$z0^#`%8`UOFrSnNm!*f?1C$Vm<@QVsOlF=F8=Yjj%)975DnR>@ulc4s@jx(|6 zn~I}l*{Sz?I>IrgT7fYh;0U&Yk=j8WslQjgL$rX=jJI*EnC4DVa!l3gSZUUx({v=0 zbt9Dm84_Z#dY&K@EY8fUdil5I?P1?~7Sfyi@9N1Xi#BXW8@b&)Q|9VeG|rJYa)cMY zO=@+5Z262$bsa`OKcL@7)qsrC!+FKBW~wFpqTHDXxDl12k;+`wbaOT8#QA}Xb$5d~ z3s)m!js+(e=Ud%SCRO?de($JY%o?p|3mFg_P2#@s-jsG@n2sDv^H-wLFJ>48W69;< zgg62tfV>@wdX%ZtvLG-UgP3+o#>u3k$^w*^<_k@}H)PCTD>a%GVns8>ufr$#%0p;7 znK8X~(+NGoMz{=IivBo}Y2}Bjlfge&zTRc)Mku+fkIY@(^NIJD@nW0aN6LPr0S9n$ zj~3yR6}3R4(uBxT4iM(sMt}_BRW-Z^|I*D8Yn)JPauAvE)42bs5|Rn>pH)0TBSF^m zU0sGISL(VK9={`AeM!E|aniaQe!?y1l5C+jPM4|qu0s@E{7xkR^C~fW*kjVi=Rd#} z`JkyER2NO2V#=N@tCk98T}_+c6~uhp9{0sBJ|j8<9H`upJuO=)`?eX&e3+bc=-~HR zrXhwD&n5BF>u7Vc^)k3{xEYHbu>ABQQvb{IVJ?7@x0xSm|yc9SFSum4CJfF^Zg`TMy?eG z5c|oWCi8A1k~$(_@ik zwX=`(xBK>Q&)0X&58!33*%jtx{@pbH@-m~rfQ6>;!&hn0|7Ud%_(_cBA7@wdBK}VC z?>_dI1HDx(AOQJdV4J#6^w$p5f4yoL;MhMskn*_jZwKkGwFW@uJ(c4X(%xITCfJD$ za~1wb+BZuLH1#Z^yfkfg)HcFV*GK zF&ay~`ZLN}qa~(umm=LuxnpTDl|229c1yJ1^ZvGoW}*!foP0Gd)gWd=_@~o=cdY0Ya{TY z-}jPOKPlu(Q7@vQs62f7qwkjh_Bd0AL|i{qLjT6;kX|4bZ6Q$+b0K}R-8{Cn>Wq{L z=fyI1xG&}{FrQ(1nk1c2_RVr7D#s4o6(Ir?_)%z6f}T8fXs!aTTm<4LEB2Q??L{@b z-Wp8oWJCvS9}4AvzRx8{pxDYIpv5eFLZUWTRGU4DUq&}SQ#FBz_9noNMOJK7W7Ay{ zz$Fri)g?@Sl1luFT8qo0_?E1$I}yOcYhcOMINEaWb2|0@oz8wd*pL}L8)E%S&vyl6 zEckD7oEH)tUmtNHvTsYJzWyxd1{z-l*{6#0YX8{~+Jbm@XO)bD{;y4*Aw*y-^&n^o z!c^j{xaBs5VmZX7O~@E@&9Aqft67y#*c|S^+t!Su+2ZV+iS{nOg^a3#mn;UVG$)#- zvTkD%?krSREYyxdBH$yF7$6BWU_nsHrpKTzJHj9iG zj`u_pWGc>g{ck=q1Sjm@FFo}j?(%DWkWDU4N0*<^h3pc3e9I`E1I4QIQK>AZ)MG{% zmbz#ZkrPd39+dXvZHeYN;}bx1T~vp$jv(cM7CK#s8+w_Y;^>NJJyCf>UuY^uynFRD z$mjiQc)^ngJej`r6l`Bcn4pl;m%wDXhkz zg%adn>tsA+XUDDxHDW;nB^gk)h~#iwq~ zD7*gn$BmG^TQ823X>vbxlQUMK7fH_lW9uEmD~r;t?Mf=PQx)5+*tTs~Y-h))*!E5; zwyR>>wr$(r?tZ(^=|0!@Yya8T@*Hcf=Nb2yRn-P??@vZERf%TnaR+<#K@W2(Q=T2N zPIwt$TZU>Z^#gK*JjgBZ(CA_R64oWWfSHZ{grAx0)w(pY=wO@HU;}%e8T)&=L|2ML zD1@=RAN*8JX8d>@6#;tRAoUZDMa_2GcpXCn>GN0kqMP!NxtKNDJ02xk(WBp>8@nB6 z1spiv3D1P3@`0R3VU=pA@sn*aY1(peh7Gr5 z5*N!01*UI+&Xa`@rS8=0q&@R(6Gq{jl4fm1o&1TQK+suY;kV9ChCY6Y8!+Fg=PA- zA+rAOEC4(-&T^Sz{esil3VHOvj}l%OC;We1!T+an6);cwL|UVq77|peu+z#(Jj}m2>3B%<5F@9oab-eOsR!GfLGdwovghBpDghXhuv4>s5H>4aNUf z<^FlpODF-KNbwCO{Fc?Ea>*%V_D-F#0amW`3~MI6DTEzma6BS@0F=8Fg}4o1zDYn& zXpGQi9S#h<)PlivE@hAi|NoI^h=3sni%~P7+h+OhI8~GFHc5|bdg=V7tLaowO?fK& z#{%yrvgdsns~~YDJR@(5#b0eX%&dKi>0i0Hl0&Wk^+xqSM_Fpb#Hp?TglbYbDz&o# zVszZWvUF`0#qBCeJ^7B-fnOuIGd(Y{p?qd(j=F`UG)N)%VBwbkXPWWPc})#0_;e%n zpDh7F!J1O;*ubRIH^{F1Uvg!W_sI=jaGvxeCrRTWat58?SM4x;iSjwJGjufH2-*c6 zlKf^TCR}FgJzcMp>RE}qoqQ)M7N`&ZB?P=c&}c(g!4`9#X@ z#7D6^EIoZ|Sy`t^5C*%yo_^{yo(pPT0s8va^L*YAe?q_VPE4KgDOKB{dw6-3JH3TU z@kfFv8cHD&+GPTm-x+*LN?ZN5z87{3o2RbxhsYF z{&Mb^EU2=SuP3V0D2}*G?!v{J^tnp(h+;oVVmJ@;cjw4Qb~Ip1l-YNY${iw1*IY#< zcv0^UT-ln;Yuf3$ZX|Q^&Nrdx!|Ygfe)CRcHYzyp%c^t_p4s=NgwZRZAqgen^2vvG zSfjh#R9J)5d>p!4v_fvtEY4X#K)`0b1uyv+zS@Jhr=*RL0>& zR?Wp?lNt6K&v*=#=gpje6ttA^9`|gSSWkeMN+0&_jpi+Ut2}!s8BvcO9{P$)39~4x~=>6Nr?YTWY z2Y33gv{hyp_t9AW&(i?AY#O7_0C>$7R*XsBTf9nsugTjFCSr82M+TRSps_*c*ng`K zGCla&y?&$0;N-1q69%|A;+GYUKO)2u8Pk-OH$W5agQ{s}=RZ&yl=$sDMHHJ1@;E>% zkr~|^Oh8j1|HqX6+wNNJX@?*TVA2vxSZlqA?AXC43$@nsTq)V=E3`$@2m~KRfK#5q zFZK2g6qoWaK5%G{GNu?&zg9N}@k2LLHHiD=dfzLC?O#J3_o(&zXg6xNqQNa2Fo*~u zQ_&8p<;q#CUW+x>lXk53D^VRNvyVW7ma`z|Lq<(eI!1cIsVZCC$icdndfQNj0)?o4 zAij>Fz5NJjMtVz6Pfu;AItvfrhOA7jGH@Uow|jj4t{?UL_i#`sOkPqWc9|M!r7qT3 zNiSzIbBVkwCawD1Qu4(sv`EO#imL#fJ`*xD5>eMhgquYKO2O2M{_Q6{^6p_h1P zVEDng8lD=BM$sR_G12USZi%z?NdbQ#pHpLD4Ng)=Sr||AamCWwv&@#)J`O)2`?pgL z+wSMvRHmbIj@r*Dr8iJwhPdxNEF5OR^TX>;cgeKZ$2?O`#~aoSkA3pF5_K*K?G98I zbT4E)VFw4Q6VMcJg-z;|iUMYj2%O)So1Jhf8S1V5kj=$TNI*9HKQV~;xdTQ1eqaXep+f7> zDfo!Ck7^JtXSJ{MNxRAZtdKFyASg1i3~SGw1R5tfoLw(cL_cvaw0d#nPuj%The66s z%|K4?g66J$;)BIDezdPbGAe;*^9X3^uvADMbd<<-Y?v+4^5b)Vs6zOqq8#x;P;!;< z&XngG=G#K!H*Qr|(d>Bsv^QlXR$7H_G%iDC@{y?^8E%^MmI$w|ednoslHlCKFAK|=R+*kszc$==N;;!eTKKH&|Izd9~O5mKC&whv?pxTq4Mu5M*VM&<8gvnSuaBe@Mf~sPg;|ByMT`{I*nf@ptzO?0V2|2?SH_mb@ETN&H$FlEvtW>L!2n`Gno;|*CyPo)8n{+CuGvpISaVw|TuNGnY zrZWLMR4?8I3+YU}`CVTS-o9F7MADJ5%HGowF->u1oxKOeh7{pQUbE2y1&}dmk`&Y* zj9$AQNr1li2V+_Bg;nPpUMRE+6_lk|@8D_6tM(lU6haEQ-@p0R>a||P-eNPB+XC4l zM!RxVG5>DooVPw0p2n;nO;@sj`cA5Bek?B%4VKFYl{yKB!%%J)Jz`M=Jg7)_el5BA zr0n*q;SYDR9W#eoxN{7k@2M12L;B|q6`U51#pxRHlS}>&ZW|?q{7YwaDOo*G9Cd^5 zXwQ8MN&KWcxODPV7x#6g=v%4{Np^E}QJ70J8d>7;7Z)o#Eslx3x25m+qwKe5Ojq8N zh<|8wtfpP)Wm_5(rFSdr!R2Q(AfU6@t1T0D<|-^sSp1?Pls-SmVJAHk`fF@L_j#hr zPLE{Yx13~09;aoyM?h%TigTpQ-y@F}wkvf;IVty;ocY>_7($}9rfg*(H#gh-R8{z~ zwz7A>78FmUS1*Zw`=A`oX9gx1hk0}dDOIH8!MnkuCyEDRm^|5h%pTnh9dN(x@b*!{ z@W%6I0N3F5ZC;M3V}1fL-wUgG(F{jNCRWsV?K}o-Vp!qF5_T)@arP~}KblHJ8NkTxz zQSa06P$5uUJdJxAY6k)N`;#X~H6T^r1IYWTV7OZ(_vF=0a_4g|hqux=`ZBD=sa$EL z)^Lnvb zzz#C;bVB^$vBwXTaT-{=8Ljv=qttI~fC3KZ@>ZCUL}Gx^-~lJHeYBGyy|I5(-RS-$ zTI%uopwci3?-qSLX}v!QHXWCWkk1!Q;Qa>IOg=l#YHNTmll5Ey=SrU(uS0r%I_X_f zjp!?nZ_Yb*Ob%x-tciqauFDOH`*8HjK^O6p#}!YB8tLPtU5$DAur-F|;3!JzUo0D~ zUldHsiNK#2C{}jNwJmUX`<{8mtS=M(B=w0{$I_D@PUeZlUhwh92H4ECbJMiJsv(nH zPTGSFu8k)kVqHZXI+Y}LA(%jv5Y`K6UV$nbaU2vqCF9E}UaRC$Uku%k_@#F2H8-B#F zimX1Ef2}16Ja5_v^E%{pC6RvNa_jr8aJyOF3*-+jQ9ADMGWn4zuIWjvLJ5|83RGm@r8;1<~wT0D-nms0?ESI0u01>dMuCm1T~4B z{0yR#@8KPnLwXk3QD`ilu*DPs?`l5H!$oTtH(T4*i8_qAfl6#5pjj2FV2?u z_Rxki405z+Aadl0JCltzgX^{1%K})X{gLD@#=n0~7ij}G3RNhR z=vB1OUoZRwP?ilMQw@A!{U)-4UR+NNM+(BzxV(^3-s-`zhwBdy4njB#{2;44U*I|j zU(r*lHN&q3KKuuwamu(o1va6DL~A%IbP9)DROdio=dR=mvu{teX2VZvP3@k7r);oy zx)G}ntqx%+6HmTd){dt$bT#`~is-JsY*P|CFGt5BdpTUKVdae@*&u9RwK7dGLc~`L z9=9`9h<1}y%I)p#06_hxVr8XvJ68VSn}4;0cRK^pkrpg(omisahW2o8!b9rvVV(H}dj+yw(ma+cXF6?jfran>OKL{|Y&g&Gl&sR+a*s!(>Jwtiu^PD8#$0 zZ7j3#GLQPGRWze)=e$wBkope%r0u)vC(s8vIjI6vaA@-3!C@=c5V{1GLT|P{oDI%k zmZQ3`X!a}czW+3jc(p)&NHQq!>VG)sLT0`T3J_%~o-X~u#cL~B+?CrdhNa?-u4!| z$m*4mvf@2$i6PpC!4w}Rn=!tY>#?=H6%-W^`f_l#rXv~nIXOX1Z{>!C@JYBYB2@<$Jv`NI(1C~Y` zI@3$fqIGa6K9c>%p#wZc-&Wb#&pE1wQ!eS)JWs!8$dyRee`yWaU$rnZt`EW4N1KX& z(Q1sKR!rH2LKv2BId=xi68O|O5{%OgDalb5f9MpZef>Dm0H^nB64t-gk%Gu>-^-zE z4q!?s;pAR6(Y05&t1|euS=k&6d$-e&4h)*y-L10wwlIt@f(^Z0{l4Ag#Y6B6ziP6f z(?4AoNP02j-)}txpd;7|R>wTf8@Jiqw`gPSB}D!0IHWN|@O8CmYnI`T$ZQc#k>^av z+~mx{YJlAiT_yR312q7y-dE&&At9@T~Y(bixSnB zk1B4N8!So%4G7V-{%9V@t1}hkex0OZ!cd(WXLoTJzP65}$l3n>co%>hmBDhR_@vpU zlh)bQ_?AvY*!#^X2CL;7%6KM6f6G1YRd3UE($__>OJtl9W$u{wYx5i4Y(bD6R`of# ze6a+}$ESL0?szt0@5l1p?PDnMqh_lc-$xc3F5=02|4?-&ne90}Nl$M#_id8f-I3#; z(`EhP@X=T@)8$rY+;@u%sISzM_5J3ryhXJ&@pTFNv>zY7Qfi?Rzsjv+jN^?#%W;H# z8ARfoNhR6+V>oRb{0Dm60^#|}=U2$DopbliuU^#v#oG~F`)z@l-A4OOEOJYVy~FTC zRRa=qzwMdR)4^YlGp3`|uL?SC>!0*^{GJ$-?Nh03y6*7{@5f73_&(;VSb7Ov^aBY5Gzl(;*m~L$%rhaHQ8-8r%Jj(d*6Mmio)3K{@^Qr1I}m`v6?Ixlt?Dz zPlo#VgBtB7-8=iz9dn=29aM5b&BkJUSIbk~TLI_hgq;d~JjfYrmj2r!vi1CDnEWD- zXW72hr2$Yf>J+lTs*DI?iav-r#w#eO2S0rLmA}%b`ssX`DEBJQ)L!jD;pnG~Pq)wtHQI1s~s~JJ()z z8Mz!Usk8OgvU)S_9IrEVY&a}%#c8$Xr22Sw6P(YGw_7+|Gi)0mnOF9We!IVEy{5bN zasyHl;K}C&Ks3HCPd!?O)bWCBAmOKE<;EsX6G8PH)E9vC+%D;OScDqyQRHXhBSIkeFpJ#A=d{8flx5vuQ~6m_1#XS^>LK5!zmv( zM}uz^S3!m*UX|Z{+{Gw~vh*pd`nS$E-=~sXc4YYP#GOf2K#P$MQg?o9o9}^SY%IbR z*3*_7)VDqe><=YjvetnKad+$ue3a>_%;od^G<%UdxFku*5Y6<3OSpeYr}m{f&cI&n zpbA1>X*K3WM3H}`3{0!|Ng;5>8Um(7;5sR6=@GC3n!G=%E>idl(`5Np?}N9HAGFiD7a-$2aZ-L&^Jgi}JsoNfN5lc7xRF zm*F~?0T~AMFOk9OJiW{^82IAP-|Tgco3Y}H46N8v0~_6Xs~oi8!vYQ0bE#5^#b`>r z!Pr1A^>~?wp_mslhz0wl+zG&gR>~?_W7ujDp~b}Og#KGH+3^KT)Kspy~YlD~ExPJlExIS91-zxHk_RZlld_!@iR+ zlHaC}c6wL6pB*auP)ImYm8+K-{o z@KNwGx*^D~ob5#w1c%*2JVA0SB)Va2A0YMXi~W2R&+Y{}xN~S!X#N30<@4X^Ha2Ld z7Jg;qdzD42T^ECJ`V7cF+GzgjmU!5vog5)mhw)(1d4D01QkMIj+MIlIi_w?DN5S>D z1cvId*rOQ z&KB3N<84o*kdcF5$18`=9+d$VU^-)A=L7VDakV|9s(?jKFz{a3!#EPzxD}CzG5Opk^~iuKIOFn5DTt}mW>wM~$r0#GI4qd&Tyv=) z;*!@IK^3{tL-KDhlg(SMA2%`4pveI`8d4>iOoUF@ejDh{tT5+j#1(Z&wdHW^&#U4l zv3#3=NF<1K8Bg~WAM=xb!t-@}5yN4ev8O?Il^*oa==HqGBP7OeHu7Ez9WXlEKQvTl zAN1yy#+sMhwS5`$6Df_+*~XD_cafz;?krj_CuK^@o}!RK=E<+L?9n3WO?S|=OZ;AL zAT|iE#|gR5H>P>6zM#{axzQEL>UFgVz#nb=5_x?8O0%-a5B6>em8*K~b*O1G8{fsk zEh{e__p^6Fy%Hk&ZD;~wz;O7tqy-XAhu?t6Iqu@>*FUjhhR4&A-qZ8=rjcn!cD+A} zphNgh-*(I|u>eyBmM6cMNA1eM3d_1i2=j1Y!XqT+)*-y-y5-s{Fviy|U|IlYE#|ur zkazEt5bF@3M|m4=Jrvt;7NC?k z>eo}dYds|(Y?82CO6CB2U4thg1T5dq^wyqssj*fWB$n5QJE9aBohiMYMKVhHI(ID< zZahn56mNMw90dUUCI{15%!eZzh2DqwrdwR=>fW|XDO*k80oj_H%)<$w1Q-c~jVq^wZQgvj0Y@*ydMFg34KEC! z%-sHu?HiZ-g>cbDtfRi!<}=_sho|U_Wymj+`4&V?Av9D9FWHGSJlaV~<>4q>4^J#9 znj@{gY%>S&xx=_A#X0s_SeA6w0!ArHGCPOXGbujFy)lo0=Qz}oj5_a;Ng)pV(vEs@ z&tZY`MEn@}xBEluy{*YRo8=L<5uS;F1~nQrS@pH~`Mb}+PeMnZ;3o<8X;pQi^{1bn zopM2~0tau|-sVvJ^JHsToWUXKj+wK*--{n=vJP{St>@{)`fv7U5T4k|N#-e#sWcgS z7mdFSjy^p@;}EK}uigM|3NLZp#J!o$Mt`0^L9Y>&CLRM*^1q|^j-=9>*zo_jWaWzF z6@6Kv9ZSGoU@2}m-FwfL#W_0_B10!e_V66oOKqP%)A;?V?cyhA?$AZi*d1FsjpTKG z8t;;C#D?hgN&5YO0bgz8i)*=+)S;Gj>^>gUV1p_}3^;7M%JrF`lG9WQO-kG4e!D*! zkgS|2!8iMjTh@1(%gWB1KT(tbfeFBY3{9hSVC?R1?bunTAcrd6?)TIxO>F~H$ZOAY?1{$wj-FjVGbyY|IP*7J;CbK z7l4QSw=Sro2)=4Jo6vxs(n&5TYb7Vs_cDroLTftPfHHi@f~Lz={5fTa&v0q0@R zx}Vul{!5FuU$7n3m9jH-l-~r)x;b?;jgdqQ8()&RNjNdDc6=U9t~!jTcFFQi+H<{q zhizobY)S8ED{J86Yd+0!hTUH}*kUI!r7p@Z=W8Rs^~asUitPhv7wwH7KU6H{D&Xvr z9D-!@dG2!UV`0#G_3n_xeI+#R&UuKU)$4pM?!F}?qi_*k@0*iFTSz3N`Y~JGJxXEL zAq2y|yah)(KAdsEsh|HYdP{@@J!v#;$10Sx_|G5j(9GA1!zXPq_$zrvK~TdHi^S(V zi5?5avG~sRTi4GT1}5Jvw~OD|sr3+FveG+g^%|8Nw}peF$5k(Fwg2=TU3RD)BdHXf znf&Mxn1a%RmkeSmC|l}5BKRVk{YGo2oUITC^n%wjpBm4+qEdz+kHDs^eqh7f=nVD6 zlSL**7pEZ6jX7`AD(kt$W}vVpuI%sx%rbt}>V3Jt9*o+Y`zsWPn2@^hZuBhd7B#Ff zk`$38@35VKNp^1$Z7zj8fpZ!_h%hP7QaZ8E&ZtuJ+lVgM@Un?~*HCtWq*I`VP!oRA zlx2oc1kF^JP=>fJaejYOPAB}Xs{a(DV534xL`H9Xj-O(=Q-L6-)cJ>U8uVBEl|MxY ze4jmsBbel$+qZ$1hL>WNqRTy!JO)HLmj0z~nolZovns7^lg zU`#!gT#sFQ?O%;EOt4&~V)&VFh6kgS@FM|_7ZOhZdX6$r*1%6{K=f+qyYyF|3`v&m zu7Ffov)LaHbu7n^q{0eDgfzw6sfTSRZMmq$@;1ODDf8;*`g1nkb@|>lwJyfUu!2jn zXILNCbg#NutH~8+O6lXH1JPqN_#^4^_c`=O-@SA?XtdIO>wSM^B*i~JtM1TyTvch| z+oo0W>xz_lubMEWC%x0;oxanL2M9w-3u*mMDqLY{jD_apW$7Ma&N*!vF|3bfec**x ziuqV=zeU)up``czUhTDL`3F)zNfPnun1pHwTO*uPqgrOe=(ICk{j|=3C}rrZI#VRs zZH*Xt92{db)P&6Wnoy@(_;{{$BOBWveh;w8)P`Y|Iu^SOT7C0c_w=iKK9qc`houWw z^1SIw4AWc71D3WbRc_n0c=}k!VY_2?k+Sf`eiWk`0ghuMhDr07I_-WXah$R8mL!2G1e|4C^-D1|uFQGS{Iyl9xP?J68pCE(QCCu66Do>yH|=43=*L$zI~`o? zzBn9Lr46;~uDBMG zV5NFF!2zRYL7kz)7I`KJBJ&iORY6^tw_Y&n>Jmz0b0^sNydo8wNZ(?&o>?kcIN?Fp zuSNsmdGciDb!N|Sdm3Pt)WckD3sg9N(c z4w0Ij_0W`2M4X?bC7JcSb%D#S>*<_wJyL}V;p6NiMhQ0--7a%u!^;SP4)8md!x0LF zTzUi|E=MBeyO!GLeEDMB#yhosUK7rh?31Z-T{`UTLw((N5Hh>3`FA9*Xj1#l328O; zNz2t1$U^fr(Ulv(Qk^Y!$D3Q!d|i})4uR8Pu(EewL8+fMy)_<{tn;bFCr_!%i8p?s zWbwyhULnSsrx@aec+B_M-#LEP7z&R*hZjCuZX1+8r!}2z{$TMc<59`oZnnlr zQI_tj=f^0Kihy2_yX7gy{#osYFf5uezpy2yzjs&R0-HhpuppHw*Wy$bJL~}Q{&>(3 z*{N)ns?rC!{5oFLw$(5`lf;gAtd^<{Tli&`{%)sR$SulsFDv`t0UxorR8FL*muu1; zfBJe_p~3S+dG=o0da(7IVgMBUp+$w?NnTak>{;_c_dr8GH&iT}WoKo!edN3Bm{wJ< z=My=u3j%JAf46}0)d~NNB>E=)C-JaO$KPImbGD@7ZvW>mqgJnlqayBk$BBKbrP*BK z9N8|iT5NeFuReLTug0vtYbX^?iJ`yN`qwz*zCP&R!xQtn$hQmLEm zIMjy6Zaso1T#{fW?NCH`%obwi^G)QidOJAgp88znOU#@5ce_2uFxerIl@^a*ahgmC zd88xQlf{9|dh2{Y*2Ls5hs?~!xU{)$-`$~iuHsE-T4B7jbBOrpt<%TPwPrqIKA*9Z zw+r1WwGToQ{=PnXeWeWRdtZZ6w*twKu z_`!WmDRvN5$Mrtp=`aB{u{frGhJn6Y!Sz~ptedl*7*YIU-j#ctx9$Dn z70!=8l&$d-_ntn!D?#$|0|9T!0Pn?wTu|*+iUZpXP2yQXKC+z zxq4B@6-H|*;af^mx{IedTwIpKuZ(@viqg-dKs~PeEsgwvvLC!xub_w&E1vykspK@< zw?CkCtOk=s{;N?YB>zg4@{pIbI)Z#ZaV^>!a{P%Cs7(o=+=~+i7ZzG{`|Z2IW#%5U zdF2C%w9)Hv*yYjpQJJ8f^U%PH7ir`uB_WdXW76$xJ)8ZCqUpjkTBpSPgziUZA;o!SoJ0yZ`RDTQZL;{T zgFkXFj!4RDQZL83iip)N!Eive_P znSH$^^v`^D2AG7DMtg=jN2M}Wi8}C;5QqH7;{wU21u34S%@JcsPvE0qZYa==| z{iK8zYV}0=HlknSD{#2t@DygFk&syCx3s=xU~h}p2jKwd?>rR&p_ovXrlpksi)4V9#a)d9zCc*impk~PxXjlS~&oPIAT#}^EV=4~{VgF6o7 zTUc@$Vlxef2)-}59PDR2(WX2Kxpc5coYbA@^g3=AD~Q?&%kfy!4nm#XIVxR+u8fXu zjQ&Bm=@wKz*6cL9E`b%W2h`_L-%f=Y^;%@XJPI+X$A`GMltNT^TUz1{HXR=KL`lP4 zZ~R>CV$H!2Vsin`-NeMwnV*k=!P0hpU@dg?Kj_*!ZuqwR4ctivsp;Ead%tr#fqz~2 zQGeFAJWOqxTKE18@qYCL`IN8HN~x-6w7uX#4z*4447dJ8(X>@T#!XuFhB#n!m}j%TQalI( zec1GQz(7<~WA-DJ8m5j?%0Suo-ANq7E_U{%Q~2DG-+|pTipd=luu1K@gUi=Y;II0y zGpQx_id-w&r)sb+&HduST@E>qXm&>)$8%~? zM%)Z@>~!>u9oa5r`b@k_P8wV4dBb{YN4hQ0@~>^bLCtG4)cT^_#`Z5p3B|Is4fe!l z%~L`~8r3oC*!}=93_g+Po3+^&=#atQ`DrA&#NjAM+%vJsA^#uao{ZT-XHy(;pMg0v zMfYb=wp->?(n)B6Kne#f-V!XDJl?}IE`{{UexhRn#=|vXF@Pmbw%-t^q~pE7t>aan zW#9DS+a0}90tr7H#)QK39zZeQiCVV<>(}leLr-BC%bH~e0~Kf1hY5Gr^fLOq)toq} zu68Ul@=d`E6@`mlgZOIk4H661v#KTRb!u5eBGv?apPW$$9sv;7aKs9^{sR?{O6PMtNSO zni?5TXh|f@^<=4h%XmzqLTTRo&=zLHUfEJ#JZkzQhH_(f>N&O_XiW}c>gw*9q1x`UhdUx}8(DV{6lCGhGI!Nkg# z#6;(!oG9eGM{CH#V9v4-oXk8cmQl|>4rHq z4v>Z&>^*Em3qQ66ZwcQ9R-)nM&Nl1z1zQA0zpQ-SvZST4#gK@5;HnKdqrgJ z-jpPkJN>pKJ@J!-AITiM814{2 z>tE-<=bGfOmytZk+IA5UGH*5*Jx~5ZhK#Q(eg6sl48ehOOKIOh?J-Z<=Vh&|+)uG9 z!T~dr=EW2+y37lA);y4^OFUf&8-hDkm_n#M9;$fudHsG9Htdim>cx9ZC3at1cryXo zEyd)e=li?!{!lNvj`hVa)&C@C)h6W%;5~x&VffwDh;HSP>aSp~(3Dc)&#%X$5_2aE z2eI>jAvN)x3`7p#C;Q3j?24BA%4@3sr;VrU;M33>HBq##qqQ1D|4rI%%@@bXyyWi!&?1Z(E85Sh^>Y zNJ@La`U%_IXj9)}bTq~#8cGH@$}`pfnxdCQ{P%EQR2UNy9pJXSM5K7md~?De^zTdX z3vXTX??Fphi&J>%{XJf$v*z*wUrAz~iis!4QTA|JzJ=G1bG{P}jU(;5e0UCjyy>b(<&47)wN7c-rd+{acQF^0{`_3JMHh znf)+u|GSg@TkUm70Qir`?xGp=f#ub!Vq4ayqiTWF9I z+DybpSc36-|8fZb>xW(3zb@&hx(yjcYw_b5_bPurJ8$>*_(x>e3D?midO-E^7l$IX zj5EoP1RZ*cjH+{*1~z!=Y!W?6(;6l9E&@-|n=IsSNUBv*!xNV2TC@}p>u^kUAaLwI zVpF;XV3?#XUm0BV>7DnzEwG1!@MHq=d8DA6!qtCXEJJ&a$_pODHRMs4X0{TJmad$j zQOgk6#SeURicDdBN|l!o{Qe%)C(CB{{F}oP={=gbC>?xZv)D*@YW9l+(*Kfi|FqP9 z3wRT4Gx+wU-Gkac{H)fb%XY+R2OG-v=I2(I8yt-fY>7T$l2eQ_EB~Hob~s>2ZsgGy zb!A|GYN3)SPhQ`JeS+b5AFAD1;i#Qj44P3qDL^O`#U&W{hWa08B#6>v4^so(Z0mls zQ5YcRC%9T49Vj6pYx(W*Q=&Z9S3V#KG1vXrHX+C(B{$d`PC$1i^_%v3e0cLFrnZL# ze#8B!99WIGr+aZ*gOe2`$wBpWpj>u9&&2l-jr@Kaf=toHe*i!rI^+jJ3yd3s+TF42T=u%;hq6EK^ck>Yav%~>Oc5(1 zPN!?Xe#*^0@UIE_@fhjleLA?1QXb6R!8sN58!g#lsidd#DE~DG5&5mo#trPDOO;oW z{6Dw?3^-SypwLZ}fY{2dHX<0w@lq@1iYXyUPc4Pd3)oG`Ec`}Q1C6E1(S4>0RMJS1 zSd8Q>62AbWinjEjh4t>de15*aB#tN(Gx8Q>DiYW$70+mCJ52zy_^>CQO4vi3-10(!l2^;*R@tY(}Ci&4Bingy%% zaZ9v89*%i`5-~6dlHLjpP@1A?gpfY8irqbeMTh0Fi?(1GG{jwt$ayI?>g|ceCVe=@#vx)lX4w4TF< z7jbv+pRmMDFDb{AInL(Wor6L(;^vAui4MdE44n(x6Hg)vSY5*1JQ;`qPr;_BFn^Y@ z-&#mIjD&i)yRTJh;=AMg5t1DrkTi_-6^DWd9|{c#j_eDrrP6O0>V0LBp8Rhek%hr{ zgvj`t8$}f`k{R=rC)a=%@f^wrsW3Q(1XLTHu<=}m7N0uBxY<^9y0e&<$DpwH40zj9 zqs)_Mq?ppiGmSQ~=jtRGQkfv*4L0Egh~0(%*_VgRZH(L1<+RGME{-}W71U)CJ|sgOlsJ@67#Jgb6Y`&>&s0bQQg}I5>Dqvai*F_((n}puZKtMc=T*%_LXZH8D$k-s)9A zc*-`mbnZXY9wcpX=rD1=An#^)9>}UWDyO8H2k+s{bM@L`bZ>v#(14IM>U?Qg1lC>= z-_*AK3T6#Pk~2E%TA8HQn!R8Ze(tFG|{k^nPhVzK{O_{T=Q}*2+3_Jxjg5@HY`SZG{ z6g;|%yqBa<0XKA_6W;_w(Oopd1%`w4434>HV*<{#FA*ukyKgR??f-f3dBL-s;tdK% z$gOF^yC_#*uY8F*JN_G1{f7YZ(t$F7{4_6q-TeeV0#SUUCjS%u@vqJEivHvO zVRj)dV*hn2AfPyqVem0Y8%Kkk$_{YH^e*@(^ zAwNOwiQZbqJ3mB#f(>^;L6ujqVT~@5|L2hcsll>y{efU;f%Gb|q5MF!EyYfX{#u#R zfcGk#eWM;_VG$Vw2`OpfdQidFI^zGG5X54FKNuwPF=*g@!$3fGUzGT8LPTz9)>vDL zA(aD6MdQN2!;6TDLV%Ke;y0(`?@lOo?EFoV4ky3IH`%PTVfuc2P^r}wD2bIl3R*pG z`f(WJxA*k)ZYveQ+#D8@yA31Z^GEGc%I|>s_oe!%sM}Qf72jeActY|AKZEdx-F5XW zrSw_gVG;ZPp7S{FpMXrlDl2D_6L#WaV)=tCliM?4 z)~R%hy_7hd-!(&mgY#gFZ0+rH_C1mDc*A_ZZm2bZy7t7Q_e(c@CY17}hb3XHne^q! zEoi7$_w8jLozGJ|oyM0N!SM9K<7h2efB)g$ozQ!oDo}dQvKyheuhwn^J7u>K+6}q4 zQ8sX+Y4%+wlu4;B`mhlmHIu7TNtT-gt>*w#knj#|b#yGz?5LP;Y5p{bc_T=sNeRw8 z&HwbW#f0IC`_+~1uUWx~6HcmOJVzVzvynXO){F}jMQTio3xWg`-II{Zl7~WX2mngv z1;|!_LlLRQHuZ_2jf>1iVa2&#J~eU@M=40Di%nE?+{pG-_pW?RCDn2z5j{mt)ScXN z4HRrw+-cFV-4X;H0%+)8*lvM*@haPkpyy1cZeyq0Z3BGZ&ezN>e7F2N9 zG{4q){+8UTd^|fZ*hIZwAO38GS9<31n-Bz!>DExCr`eO z)~SH*_sYqx;WpmHTacYpurh;f++Am_18|G=((wSb(&z(Di2N*=3=4~>vIroh8rtDP zHa_g5o}C6YnN+JZKZIC6hZ?!uyV0`v{FL5vc9z*FG=t%>dyc9Fg6k7iV)XsId0|h{ zoMId7K!x?YPY!eDVVEuNoR0SoN(`E#umd?9^Sz{3ZP2>g0usks-rl_96h#`+F#-%Q zW1EBl!>9J^I?bVk#5d?||Iac%jDqkVhIxeKPTWbUAF{~52@z4=dUt-f0O{ciC);e6 zS4`A3bdwl`=gV081Hy-1wY}K0Ah!< zSIbVMlXW~^td{7NV`0(h2fChqh<&<48Lb}pF+a7tqkj8V;_z~k8DDl5#>lUdBjb@Q z`LOpU5fn~Ld+pj=TiK#0U%UbJ`xU2ZiPWp~qxrlZcj<5Z zFE`uy)=!KruLho8^qUjxmZ8;%(t!J|?z@WZ()-Z!!4e&M1{u*-bGZyqh#n$(r=U=Z ztJ~_?8nbnZwkO?5y!MZPcm2`n7h64mpdkw26?ds+MRu_f#0-JL-n!H5JN>Qm>d9x5 z_ohJUSzHgT)iagrqvf2w@5g?}DT&~*-@#eIyiJVBU8m2KKHc`^#+JTCp)ovsyAyq3 zq5r3!dulID7MaU_k+YYw|3 zrfb)&A+9dI-91%TcQbQtZm=&&rL&k3o3RApReIdz=OF*}4!dO)GCqGGJT^nFX{4Ey zq#uhF>&?Ei;HCb8R96R&;|NTlchnzQXxTESp>k-pwVj#J`7CT?*|`HYU7qQQ==iy zn~UGccM#)933$@7`{6JJeY8E3Ou6O{i_{ZDXx*Hm$aq>?be4 zi)*qKx1)-cyZiRQewkFG;Tb=sn@m)T#lO+q=3BYlWYJtPgo8eyo>m%^I0(m7Jfwf} zvQusc0}D8M_a@BZb4QBCSr{B7^z|=OD=*pgdc^N)%JEfaA-1!#JDMpa<$vEZA(wgq zx!?`&GyZ&glJ}DZ{*aR@_q;y~PzdpbfX?V2uE^$f70(7i9j3Yu*Zn1gzUn`nEIlme7ZP zZf}mS9xZF;kMP7}Ki_NUi@tOJQGExcnKAj!us*#)(+sQU9C{jo3?fG}we;M7t%IsN zaf{o!Wp|#mGJb4(;k@bg&TX0pnNw&U+~2^rPuDeD6181?p6Nk*M=#>~qQV->kcfoH zo$0k!DyiyJs!7{wtBEN^taxGmQM1J#FeqKdQS(yqLJ!9bJd1nVJY4hUFsSmfw<95& zetdtwXy0^y08DUgK4v|(9j*zuPn~?e|L!q!bt$OIUQu3FCRbe=1}%p?p4m9ZpP)=0 zp)@(kqaqG;pltk-dUXYJLvQCnO!9tXD($FaN;Zf6!+Y$fll!EM-b$m=;~|4?LHOn* zs97dHtA}J#DK{VooX{LpBzk;(4M2u5lMZ@$%;Rl#E`7BRi7GJq`kGK&4DEWl94qa> zlf&<|p;%-rR&uGa4nWHRl91+G9JB?y+!*wp=!~DJ!I+sV0B#n_B{b9+-WQ8(S1r#k zan-m%exp1E9k${VHQI8bg&SD!tGJ?ZUBN~B4OJufYtFZqa*&PoV9aIo^Y08?>K(oG z>hhQ!Vq-SYFec~bYw)jMN|%hS355?|p=QnBOJ<h?`s%&&obz~S-g6|?>I^EB zTEqupjlQULcF+00`t*9*mt!y(v`d>%$QM_8`A)#4dEKq9+_Rp(g5I1r$j;bKufNLK z2Rw9ZJhfPn``orfievLYbr;9t40O91b>1n- zso&(EW^9>Pb>Y$}XWuQ9S+K3tiE*PhzDdqL^$yO&k7{6-+UKv$eP;8{+0XN7J6dEQkH&pb<>o`#TmjGXTDjom??`<^aO)I(sS7tqaRh+twOo@0M9H6H;2dN5EZSyDqX(!c09EZh; z6J10jja*h&Sod=-|^jK|$c3H}h#p=`T5Ig{9Cmz+aZ+V$rtxKZPtFH0jZx|G_jl)XPR2e^em>vHYT0&m#q@C~!|dRf+|c`* zKVSU*4Zd*+v))wARpL6@7$qN3`&e3g2Y~$(xEfFA&gZ2YXtFrZwwG`Z7o%P0zSqfZ z^6@;k`!&xscjS?Pv(L3wv*AniR$})Hxh6}J-6U62@g<>H!2yBtAhc(a;!k3vJtHa= z3Wt>R)n?hb(nw0KJ2@W$gfBb^*)FqSu54}v2(Q2s6S8!~!PH?efPV=W1vrPzbafFC z=KVS3RKp4Q-O5j(0dd{$TB)pYnGoFXS&B1qBYgx5n> zN2pr*H;)Jo>-(eTS3vjX%_1Bn_A6}f+H0_)r<^vlXa>%`vy^l?XUVTL;?!Z%8M1x4 zD{a)iCN)4GsBT~mwt|$(OG)h2eQ6^7?97^L4m?iyfm5ck+4+#~cC9$?*V7sIv*H2+ zYQbbXm)h~fo`G}$dQ{%FGK&rJ zeSrw`#xb3eE2zh0c$|9dq2YYjg&GB)`!@D{yX^jzc3op=4u>;8N}KDbHDZZVOmy_# zxi{AMv8x(HBL&By4rc(BZRLWRF;Sw6q*7d+Er|!IzE27vGqLJX)mRQmIw6naPtyN!j{FO15PCp%dr5-&)f1s`3wx~ zW9MKZ=@$Z}vmoUCHJ2U}Qo7i*@nqY_VAtx`^pr{nZnqKCZn3_bvRh&cS1i=%Fy;up zXLF8kWYj5m*ZXlg5T18Ayh=1l$;$U*BD2Y+&hHdHV-F&-`(HSUtgPm$nYLg3Uzb#d z#Y!T%vU;2GrhI}L4-?bVWo5*dfK?| z+1~>A!IKF;OUxz_rwwhZN*;5oNwnB}jGDRJ= zyz_WYHBQ6DL#42r0yVFj3KOwVQn_t-%Cw%F8CKhL?z5jtIqH;}98G0BpAa+4mfvz= zcZVXr>CR)Te#kJmZ6!1mcf? zp8d367#x31UHOp+Dq#1d%JH9N(ewo=nwXH&G(EjG2r+%(vHxtf2y$Q?IM}Qfiso_27!Na_xXN4b zv)v{Wh13HbZ;0uxFdc2RaCf$2Fi|JZ0;*kvAS^3a0I6%nsfPml=fkyYITjF5VQ65nONp8La!m-lR zUa~sSwS4;H8DbLdXZ)3n+S5qI{5ZPA{a7O7iRv|7DGt0l9k00<@x&XYOOf#30uyU> zF9#ogt`~ZJ6%vrXAUlk03z~(_h5LREVs9>@e-K#jB@ZVj61A9IF%#xJ1t*!wV0jEd zE+t5^E}c{UMkbY77TaYA@#Uwfp~ZY@Fwd8sH_2lmXbKB)YL5q&R@jNRp)q z?=3+ghIT(I-UYs5j;EHBS!3xy`wyb3ber>t8VvZX6_4D51TiIBnu71#aV?WR=#!GM zQ88sc(nhaK7=@S!Zu&vaP{MfeTfQ%}%vwIb+H@PUPy`r%{~k5GVwnPd9cR!lGr8Py zxsu={lJC-|FaKmbYOYA%jwmBvZ{HNd?DRkoNZzHJ2j+>ZUtU3lxT^A@B8jjSaowU& z6P2>$6V+F1JKO?9QYi%6L#D4^O5iAEG`3uEj>vibg`n zfqO3W@YZp`SKqM@tVuIQC=r;CAGnOTH3%#wc*|0L_m#C)q<@<009AklXN`Yjrc7xo zPt@=%4fIClB|M<>)256#9-jF7H}$i~s~6%`@<%M!gx8jPp)nQHf!tp;MTK z#rN_RWW26svFz&Z?w)x)oHjAtS-31N9U{DgyJbL6>j3uyMH5*4LSQ6S_{#OX5&b6K zrt5&FgYrjnt-+z{Ic_XpzMBV_F%FTv57k=SbdPWxeH3`5Np0YJKJQ66CG`JH*Sb%1 zgxi)ecKVfy{xF$mHq#Xx%<4%GRlUVAdC4(M3}WZCZ%;}L-E)S$0fR&&Y ztxjW~XTON6Z)W2KiE64T|M!Rxey}^<@o|J-!ee&7{n=UsL(wS;y>4~r6`p<%(ocO@ zOH$2>G$prqB;ZnJpsh5<`YG(vL!+HrDgH)@Ds!rc!gWqRr`P2OklM|>{K}Xa1#43D zYP*C2v83N>7^>A+jV6PSF|V4}sT#0|xq~Z{L~qw&C<0h%bhz3HN_u%=6-=LVF|CHNtL`UI?HOQ z6rs(_08vvxz66pf?xxXjR7bUV*p zFj5PFNyjpnHa%B}WNS$IBFaX3?rcYOH>@nU-u~Y{${inZn3=`^LPHyXbSrA)fM6hE}AdgAWVW6Ltcd>;UoQAy6f)mZOx=*|01v$N#GqJacq@ z_@rg`<25BLq7iB$%H!c(pamL$Hpl}OZ6^nFJ0l`}<|^L6;TN~%Gki^4!f2}8$GMlS z>2g8LE~+|B*N>CcES@bNmTS#&m@nyzI`S3HRkuuN;iGqubNb}@S+&%Fx(Ke7t4^n! z=NI;^R>Sh#ZAUpk7Kw+RWc?NjyNyi3(#KUZTB^Y3)LL^&K+L+AMp+m0{NmL8`Lf2M z9{zVHzXV#HPLaXW-SzmHqwT%RsqR76RJKwDx*rRpGLK%IskPyIZNH05`L>6Jnu>`I^QNYg)m^Y9qCh3=RAriuS91nsoGgO3xN36NW_3VLB()}qVifWwgMO;$iNo7wU+O^vl0 zrDJHXEbuNd{`u69%s=e?aA3#ju@exJsnh16l&s)WDPT)NJmQ%dFI_0*1GDsL8!OvP zN}_5GZJ~p!(2ltK$A=xvB}FOb(KfoMD6JJ6Lg;v=f^2Fqvt z>C%-QBc23IwrKzL>)WizhKV$F?{ww-R@$ByB86gy$g6(uM|DFf9~x*|FMOF@!=?FJ z+of{~)~^D1^zPFnS=CRKk2uPrbC*B9rFpY_cayXxAeF!(Lrgdd2tiRM*IkZ^SAkXz zuQp=gYDci+?Ma81R0TP;JvDK`6xPt7`h28chP~T}%l>hv4Xs+7lzT^fH8uN2HD9vOWe zWwfl!;)mi0HiJDGEL)7k5x;zKK31lQCl+-|9Q&GqnCSqsw<|kf;1!eJZY#DJM-D7f z#MIK-67q;hoK|^z2@|p}?=@ST;%mX6(A{!xXBjpF8knIBTcHv%ESF4^5qu#`MITVE1{pdY< zl4ZIHF0S#3&2)ts3}*a%J8$)iH-CB-@~5n&kyJ?jC0j}u=3Fa{T3wtCB%{8~;nY`V zFv5>9FF!{)f9?<@oMP6LF|$Gl)I393%<+pB)CBz`2Xuq2N64F*rU<0rYyTpra~m87 z47M)@J%L;JFf3w?>@luafBoKPxli{r5m3Yw5AQJP2N|)TH*NN8twS-5y>mRI{+vj< z01^6D-Vb&gzyUqL+~X^T0_?8r4N@B=S!;o%P;b!nv+}!}WZwCeVw`Z) zx|C1SY+;{r&#BuL!l2L=t_L{lIX9V#)N(gHLUf2vmg-!Hr>8;i5)^}#iU8o<>9wn^B=9E7ZUW;TMl=ZWVA(`ltHTEtZ{zV+g&RbLcO&pdmW0qQ}VQ% zKl2QTCrkrdza4HH3h!3uimyC_BNKiKkuhWa1n#wf7RJr6&;fTH!`ec)sHrngnnycc zRTP8zZUe3C!(64T95C7URZ>s=>IbwT&K2Ex;s;~;uhZm1NJQUGD)tbD;Yj62o zotiU5p2TnMUCBA^9=NPlUOcb#B6^s z@18n@v>7R2C0aTuCJu6vFvp^L!Jz1r-Jvi(RFH+_pslX06|x4Hv+hr%`KPs?7-0A| z3Wu(6w+=+HXkdfC9CW4g!jM58o-$tfFHe}&Fom_dn6kBBFe>29UF13pi=3s~eF5|U z@wJZ@unH5kb#8T0w9_!gGDg2_#2lfaTG|J6v}`{QqTY7S*5<1*-F9S9n1Nf_%#Zy( z=n90pQ9h||+B10y+s5KGyRD+sXa`%b-V2kdv9s47-QRZMXBpWjjLZsG;(>p+j$V=t z-Nxs4$+F^cdU0uKc&eT)R90I2ZE!Vll7kqrxwd2Pb+h>iXne}KG`b`Q4Lr5tG3#B* ze#pnm5&oQ4;dW0PhY60Fh;11fkoTA=^72WcRO@Qr;b=6Zje75erG9TTb?N~IfDTrs zQxUmWIuhJ=uoi(qr&j)QJRfpB&Em_zMO)d3kNlH*C$a8bP#`V|r zgFb{IBk~+1uL#Hbo-7>kuAK+={K#V|5&`@UG|Q7Ndw40aiPM?v3Y;;cpX6%v8U4xy^?nH$a+7bH6Ml09DrMoI4XZan9l)V)je#ILdzevu#pXrsmud z#DhWXjkSMM-&DF@8O0Jkj!oNY+i}a$_oM(_|2)cIX@9Q}mU|u#4{C_iB;Va3o%x9& zB@38T6*-L8K_*H9Z}gpaOb{vqX;Tr!1C%jQ(au{SIVLKAnY1{%eFR#+Wf9f4wm(Wo z@Fx9eygXz(`(=Rfq>ewq4jh(wIN_;@mO~q|K^mlD6IdFLZpXm?AuCu+ z-rp@q?E4CXF}HzC8t9c=_L#8-<$IBZl2bZM&1aY3Taew;#P4$u*ObVKNoS%;LEd0u z1#_TJ3Nhm+Yp%%Ay zH0E0+sYOz!-KXx#@3P3mj<}(}iAI#QZqizEKl~sn6IRGr9uc zqu<9kXES1RdOLTJ0SMEBl%l)|FAV%*@P4zuPF_CyYTcD!B$dWdt7(4a4{3Q{xGEr8 zSLlzDt()H)%Aq$wl&Z0y4JT!rK}SWN-p}FT>O+^7?)1WQAfuXFLNuU38z*P7vY<)l z@phP(P1D^KOzla51E{iD7}c_r^~}I*6H<6b#wT%E($oh?yh3k)kefBjm#Us^rCBiy zI~@t1mxzzY47}Qn#boFKV&MdcrjxafD5&$yCyQ_@g+LpRUZk#HP^)M~^EW(*7U2$8 zgM?A4?&w|KuXxxS6=VF)hVtnwD%L}TI3e1-R2aD^AT?LQyHdMeatgo1YMLF7x2_D+ zqkAYCa^M}i$*LmXh3%FfjlLPSJZrTOtv^!_SHAFy)c zbZcm&9Xo?2kjU!T7Nl`;EUOr%VpY3WzVHwH)Ox`Fk%FIkP ztRS%JCQMA*KGtE!psX}uNbe{;iBkLgls#km%L=GciOCdb>(^} zhHT+VLdxx6_ru>BA)finSlZuOowQaV`MHQDmy3htAb;0o^$Y~#`0)@&sT3<^W1hEk zB(-P2{@b+{Vn1z}Q%$rszN$)>KH1h#!^>uEAVCvl=sm0iU>$B)s-^^qfW9LyaF+JHX!^Z}X2lhGFcMmGOZ7Kr@>CIPXE0cm~}$0sj_4xJS1fHstt zs;s(qP4rJZX$2Apy;Fg7q7sk(HpbmZ!b$cxC0@=b#`*W3wTwZl*5!Ktws=jMr%}gF zx^8#Mm#}fdtf83=-oxEliB6zlAPN6FD9De`{*Q7Fhpc3vnUAUnz$wLqk~$Mj>>rS& z4r*}yEqx3OyMF>Eks#!uKo3RX-@HTl7Xs*C&yys?2i>Hm+Vf8o7ax)+HVC#<@cAsZ z*4t0|MI-82yn-U~G^m|NA)qK&;e2Eu{tu;r^3u8^Sk^pAXt? zXoI%#pOnzh;Goh4KkNVNAbj6fctfC88Z#J0|Ns5KL%-}pHzkT_3Ul}w83@QvN+}qEI?-9Qo+&r zaC~QDR}3T<60J~kPx0H|f02FbpQy)+bpZmc&%@q%L*(O{xl?U{fZC?XEImi(UK_{M zkjLp~Uor9JF$%=SpJpPo8O)g3=_C-Ov43CUW{6-CD~ikqbPEafnABwN*w@LGNMruFmZw<3tHqd$PVY~!wQ$&90tXv;+}{r!6jOT4+9T~Prap73 z=W~KUM@R9B=b9zbj->)tu-p!`Vvj9#wlS8C9ugk^ut6|C0p9*= zMd$U|Ve*_>`!Ow5EjY2#ncd=%QPFhHa0nEeU!;yVVB9-ixgs9owS&lW9=Y=H8cZ6$ z4oPiTNJ%W)l#GpXPQ^9rk&w0itFsLyVuI8ks38xl=q0fp>Js|Zp-s*iR<$Y$YX})6 zWa=%IlP!lgmuH@Ft;R|@&#f6MI2BTKx+wMzD~hm>_oY`unz;nK$Iqm@g-TkhI#1bj z+>7qfS67qKQEzJ!gyhn4cg69AAhesuE8Qv<2Y;_rBH>34&l|i9Q3Dy@l6i&n{PURk z4KK&t5->_U>5|J0$J(2fCTD%7IM17h4&>|uQ&0~`=xPriGqBG~uL3Fh;=KeF*CyrU zAhM%wY$DSo5!TXoo_ViJ;K|oL64SZe1R%YWL>Eg`2p2SU&NzJh5Wgw`@ZINI;#!qi zuJAa0rWQ?@1Bur>7j{b=TcuQB_jQ^zIn}I>VMk;Jr}GSZr#VJV z837dxqaBy#D#H>FaZzP0WdnIhPEH`378=RX-*3s^6!NCf4xJ@pG~clu#KLgC;$=;7 z%{MXi{`hi|WWWy3f~=bzc|-K^bKM1U{fpAez_PQ_le(K1z*}}Rj`)h(J7xpQFT`W4 z&QfMGvD|zuRF{hBYrK=s0qfArud5Te7Z~eEx zbWXwjWhW;65T`zlN0n!Uy$d-Hp0`iqq@H>6eEw40^H~SL#?UkaBHPzy21?5(>SP!k zb^aHb_IHN(wJKjm!*St-%#!nF_LBFj#McZ79o)ao?JhDBYT^$d7;|3&tM(CKuzN{s z{U;TLU|p%3$xnE;2SP~!6l_}!vm#VjlQH}++5_93!O$s=q8%$jM{LeH&NCHn$3yd$ zm}3F+RgN&lwof0F*d3y)m5K@nwrf?dOg@k2E?S6sQ+ivnSu~kYGL{W+ppIlq?8){z z1P|oQ?_Y}6Gn5CynRX6;eNDYC%@lxmZJfS%o~I?|a6W%b7RPbrn@-2_z0-_wh}>y* zgNa{z{DiK>no+ydOYtI(`&b`Gwp2Ef4yp!USdVLTK}M(69qVGZw?esmny4Bgb*~+Q z9k&;8k_$@XKqFIeW%*#A_wG!|^QMRKwkva%Woe@*?*&$aQK<7ePm>PRo&NUAV=}|I zxVrm&DSDC;EAoKaTOVDAUYK5pe=D#CmI$ z&J}k@2Ke$3`VHvki)PT-)jaG)=il1~^WKLk@QFbs6N}ku1Y| zLm{AROx`QvG#+Lxj@Y)I;BSvPD(kQMi$$`M0figkAv?FLY(p~7Ze4pK<^mlEkol1e z8zb{iEhqLnvZqi?7^6VahXK1+6huexj7x4%s%STuK3{(X{&Nbvr`nSt4s(Gbbi2n|xi|d%_{JY6o5Q0ezB? z9s`~IRNA&lKBjvVz7}jqJZ`=jUft$ofCqYXUjnPvjxp>#0vMQ+au&Obu>vhh8V<`g z!X(=RQio0O8(Y8{>88a(rw%@6&@iY1k`|h7pm;oqFs?+8y9CCNLT=>Xd}G(F1%cZk zz1kp;pS)9@S5K;5D}6;6e_>8g_Y7r_jO-!M^sE6YW_&gifp$Bj4i>yW{AIi*1?yTr z7$--c93h4Ly^D-n@w+%+7%(~8dyv}8RxT|p()d?>m@gVg>&J~Kh2Tg#Q!0z!MM{j5 zsOwCXct1tUQ2=HsrJ&I<(L9CQw5R_C`fe@( zBIT|UE>SfVM<*3`bKiy{bo6ZEfOn}jn8*gQCk)kz!Atv+i)MwL#YVt&%|`Uz75vI{ zpSMNtXH<1a`;;@Zl8-PRmTELKSrbA!-q5|-#z%v~5i$UZiy^J%#KySGmP~jhh~as) z9%ZZ_4-`>55YcQO^dyd??*AcNRBh@Gh6G|BTvUEjn;@>G%N$uNGyf^fOF?;e-L-!d z?JHBsaK}Q6DCtR0;rnCdLZSWYKcdah;6-p?40d8a+)09{El9ow;P!A)?BEJXNT!b~ z1%?oaQb4F1M01U@t};^tSEI=-<^2@oFFK2e*a2FvgaGubP*gzz{^fV&g1(NlKJO*V zRNy{kc6`N273(6sYvb*!rBSgzzzom&ZCbo^zqRu6h7$D>CxRtQpGTUvPij)u zk?B7~#1*qpS&l5EkJ#L6DO=Q>W5h8_uor-^zEX{36A2bp!3ug4PEn6DU;E|T)RB11 z$j7)o74-l5tF(phNDw4yN!KW)uWC^g@}}eB9rSQd!Cy3FnD{);LNnPo|pEAl%E?6Y-FkNK~Nd$ERrOJyk`0YT~X z{10Re58uQLm+sE2L2kLQ629s9*Wvg*c;F{^u`18+FJ&;w(2)xSSpySnelhluYFPhe-4{2i6P2x01cIBr+?6iwGbh#xq`MreJgJb=1zElN( zlZKL$yl`;+_j|UeE5r}~^k1w42e>BBsj|$kL9(WkEK*H}eUkxGVv^~XQ=zji+thZ6 zb@(L)D68&{5ttBchf_EsoP4aULGZGy6ZGVrG2zi%_^kOa+Yb$bqD>2D`q@(bP}1wO z!!zvxpvwP9t6aM+SYYF24|CgLD-WVp59maSRR}|*Fwi#fk6=xleZ7usLW0q>N1wzC zG?tM|RkuSK@0MDQYlJ=-^i-i?$l#-9z3Ay1b;LgTOp7*k7MwU3^EIck_+$j|bc;pbXgbMYTT3H-hz8PtWO-`@B0JQ7 z7kX-AgC{f;xw;3dD3=1`k>5)DG5pGr?WW_Ip?7M22ZTg*}PLCOLQHgIB(Z%K|ZWDlKn} zMxJoZzv~}HDXK6Qy4iqi5r&oeCj9TO`ID=!I#tjYd8B-E|+TtiujOMKiXQR zi_mpv^pG%`XZ!6RuWJ3)`CTZAqWbTH|6eB{!6-4PVHl}jOpRL2but2y%h?20JA_1k zO*kl&bFnT6Iw4V(ao<})c(cWxb~8Lb9gZ}@83SB9 z-jy3Y=Xr1F4`S5bF@L@|EwDcs-Ja8>NQU;b+uw5D;?c*wBU0SHRhZhaXrlt^JzMoF<1JeYx>p^&ix9_z>R8->wu z2SP}=wgr&moqEUg?(?0_sw=$eA+A4{EMIl)=DGkbvD|bG=1sfV3&3-H3qSg7hJCuy z%GP;N#KGt2$h-+iA*G39VA&=&LWP*(1a~rP1ux2bImE>scm(Vnq1wNoLB6$x2uT0l z^PXvuk-_Iu>ZW>4Bz_r0{MYZ~Q%r)%U`Y$oH}*Wk#ht~_1^hPQ1( zr01a9^?c*fU3?}pPAy{xq@Byo=3BGwJzg`u3_QgaQ@tZzLo}rgw|dJbFulVz(JVhn zYMCj(2GZ5$tl!wFx}5LJ8tf&n^I5)*@@sj*I;apqf!`@O!Q~;d%~%5>pJ}=7U5YR( zD~`$p;mJ>?i9)XBTVSxrL>&I!WFs8rAqY3Ov$-ek-~JeCZ#Z6lbb&gbrI{QHbU)`; zs^F|GQCL<^7*7=ORIYgT&(2Pl-}HMt)L5%L*0re^f&lpDQK`rg$TZmR3`n1R<9+B+%#t)^FbY-iImtXMv#F&mxWSH^m%tBfmy@Z4;AX*>dLk zedSr<(L#IA+W*}$i27>ua^ zzSO7JQb@=BGwz`@4pb#d9p%Q<=ju7(Y4(V3XThR4eoY-SrZ2zvfx4Hltn-YecOFZ% z1xGmF90YFbY9r@{W;d_b(2$Y&E#__VX}r&}A(_Z~kJrDIo@sI@A(^&B;eO|zCXWoogfy0pr;5vqO7+Zq`cXjF zmcKN+p6QeUl-+Eye>YppW!N424agsVi@_NGElk`m1|^*3T^P;{!?N=%q|GM|6N`XRC;c28hPIcU)&{MGC3vEH>HI{_nRq zUd0vqwH5~*6_0AszsbgvN+hirje$A#c@pxV;vu_bAxbOneW$V_<626}H$B}!mysmq zl;xkOTCGlkw-mWyUg04dx2g4LZ#1cGHsPOYa?Nnh&sv|0E84Y6IA1}~D0OC&BM15< zDuXQWHRFsL7i~SEt^`IRVu|sLM!3XD8tv9E3=J8Sh)W=dL3khhyYgyO2B!k|^V7{i z@!9U5oAMMCC_@p;xda@CPc?ux!-0^;nblYSpd?t%XXOAe?Pi|Jw$QxV*T7BYY!pFugiuD-TE?B#&5je%kD8b~s)Vj}NrtrPUS-gngQ*_jG5pqxb<*DXHO<_|#uGTqS^>k}@=?>Xmls^*DW15$C)S!(xRuSKlLkw zcZnZW5@^3WG6=Z0e9?5#_gEuXoi?G9?F8qc)B<9X1tV>+d90wF-A@ixV9R)1eD2&C zO-o9cTpsqP<~Bi95VjZ(*B62w!{fGYx(7}KJ+-ivbFVtB#h0h?e!2uyyBF{$W?q^o_08Pe zUAWuMR}9lhi>6Lyng$j6&D1v1Er`#X=)-iv8FSm%2l)Kpq^Ix^#+EPs!Eh@t$7bC2 zrf-F=Es#Q1=2F!a`VxFCQSF}Sgq!mf(DY5HFARhBM`hXUuup|M5-@4tQ;H)}!ybNH z0iu3O&|R71;MNh(!6fn1&r!FZtzj_VnBA-z-lApH+`SGCV1(kXT$9#$Kj@!yvu-1q zzPzpI5$McKb%{gf8Z0LWr`er%<;z72ND>h&(8GdrL>~u4bX;m;Zq)qT3b9!P$lRIv~&(|o9%Bd;pxeXTE9AwA#xTH ziC5Qocj>2rrh{r1s_GUt7)A7YSkuix|Lapj6(U8{4-d6&MTx$t1|MEqtd(}Q_k5M@ zi<`#lp|#gC^;t#7QZ&7AfyzXvT$4@o?(DA}S<(Kc=+ZL*QC44v-|{nqGnD~WC4Fx)0qG&!{ccm4 z=@+yog-T7jz3Xe_77<|XMF53_#M{z9K+o&zQX{N@YPwPyQsaP^!zM|>?B^F@=j!sP zMF*-2X_0i-I5%t&R+D(dU|Q?mfdIfjC+uCK2tNX6ebX$xvSC*E#t4gfBGndlx@CE3 z!rxRX%qTau%Z5JJz?oWSu{>vw*FS9pG^wI+S5bAS@-v0sRg%&qrK$of*j}LZfSkmo z1;NdXDpOm#kt|A+XP2n}6?|qe4hKUn=D}Ff8X~TSesKs^&!gk87VC#q`4kaNiyhoe z2D{216R76IJ@rbKMfGJrJ!0(DjsdD6NnD-?s5>~t>gqUVuj}$!>lTVKi>^6z-6Zg* zVdy9dzSy87^~u8E^h_-InuXW8=(B2=*UL`(WnZY7=Ja!EWTY^;ra`Y9`*N5kP(fQFP-CcXM-6oY&hpZ78bbZz8!OdEiVCIH(5V1}sgFkB7Zuma zQZ%M5>$n`%*ynm=pTH2Ykeu6KFO`ueyNWh%eyZ0O3gwOr*l9$!~FMJNC!=Jhyp zDcDgQsPk#Y)ij1oPm_JGx;-urLKMWe*Bd=8SYmunIncn3Bp%gEvL8UqEPRwg1ky?| z;&9)3#lL_P@XW3k7FyP4QS{~uw(==JSv^c;s1)UCM(1Ybkjqur1ReQfSMV!lB|lo7 zLe+M{l3_6IzZ%}ijPz=>>_oxKuSYQA8U#z3&;7z4fvk){mzXEWssv?MoftcNgotIo z;Y&(KUYL>75sBhREGK!qRJ(oEWAp{``&IIzr~c5RLYxM-)C_2?BJG4j3BXquO1ej* z(d2`a5?9Z6RV>q#Gi$DL^QrUKqLz#%OY3K zM4@VqTu=Uk-Nqo6WV7;H-5hgM?=#zC+{b(odZog9WtyRFFg2 zV*bTxdLUml<%zuCQy_^5QDPh1iy&@#AB)xStuw_oTdSgx3hPt-`J=TDauv`AXEKcna@N4i4r2~v|j=qg~a2AajD6PuY8NZ>n{ zJzBIAw0b@urrU}SWbd{kfijbEl$~@~*ZuWbcoVra+Ha{u1TGZLdNjzudeB8t_jHHx zXw5y-O-*nc67}P+Moz4i?Mhj&pE?Vr1)G~EaT-<~0c_GmzOS$2l1uzSgq4s`a!lkM zofI;7PsRhLyYLYvpqFexT|$D1Zo+nE%9y!U;ubE|!}12=akp0?#F)pEP~6rjidgIlsIBcP*x8CBjCk0>Mz(ot90Lx> z1^XrCy(t47CkiT$beHlk{0~-fF7Y5Fz4$-tNmz@~Z)$goG=e)rON(?Jg@*wEcBJAN zv|UO1Fl;p_?Gj0dU9X*KBmopdhGv=s+&6S-TsKM}@l0BRefoKYF1y>G0-holc*%BL z$a7DGl7qG=@njvy<1Ch(eN4gtm+e6kEdwm4eg|u`_hzQCEuFXHFSI(+KZoGS5RH2! z)#uV{M7t9h)!mH=;7;b?2gq^@3RH<$=Vo(d{ zUQCWsl0vJ=ID=}tVT2d!w$HKF_>4dIr{pF@Eqmjk+CG<0jwZmp`z$lGmucbCjhE}( zg>5<{lnI-h>VBs4<;8?LEA@@)w6TZA=f$qD%s0D&=rGr|4Es7=Ea;3LA(Qk=uPng~ zrSCaliV+HE-$|aOdg;kPHwTs4t0{qg>PrtJCScWrBV#auI100saxD?*My^P!eHfx= zRoAFb!PdhzPreD`yw+mLP|ktK^CS~N!E?Wsb`RIA)^!z#I!CGtGab+exqRAxgUv3P z7f{#B4pe$rnD`M`&!PpyPgnAAi)Vm+e>VSIv1Nk#{^0{vkk}7?#dPd*tJca}j#ODU zF;8RNv}~&%pmZ<6PeR?0)KDmHc*$&SCuFPI;TE^k{A%|=3_!-U`6F72-YXf6#!x7L z19+WRB?+%#j;nXCR`8^pBj_G$ikt&PBKWrw3G_Q#-^{-K%#7x|YB~jsTX>@hOHa9wM1UZb` zjS!6}>n6T1_MjvTCaZw9l8S&nA^m0L<&pHXTR!!POapY3d?t1Zm_0+)%}HquaY?Ve zCtLk33q8QoNQWv0h>Oq{=+DPFLOdmHr&F5y|G|B9sAJw;EG=ESjdM-wYyxQPbDn;d zqF_nbcJAStL~g6Lw|E2)yuAoMqY_KlO@)4!w5$YL4!)3<%?J)Xnj_P{Agf@dX19nX zGjg;njp*=O!$iO>gm(A9-AxyXT?AV{;6t5UcJ#EO;47IA((hn{s_0J$6&L{ud;wRz(CtZ19RqtX^@p5}A_X{9}}0{2h$o{84zRw`i!CHI=_|{_#(c_r~~V_OVw; zRsQ>L)F3mCb`eGXS7&@ff1oQywuzemG%o~L4X7nHxG?tr20l{wgXTZ3Q_CLxZ&v)X zQbs_*_f=}@{{p*#w!F`O?gvoLgG&D}Fdq_qm=BWU|8s-THJMYwr6ICC-IIcabrXn@ zi^1sr6-*GFfK*vuPrlFPI?zpHqfS{D_n+aoSb?yh>A&(JIS>&Lh;3}>z>AQPA!6Zt zr%lFWVRY7a#A)RYe+Uc5wzu>4$};adI7ldJ!hi?>LAcX@*>5n)C#iu1e7fTqaiF_f zYrvI=9*|jt4<^aFy1Gh1f1!6oR~je#@7a4m{9`iYh!<*3b-Z$=FAHn`wKlFd>3ZkL< ztMUJNYKo{o3&G|ih3|hYLfs!AxGpm6-@pU^)0#XU=nbVvJ)# zudgp1#V0XkB>Je1-yjN)^Hui|_co|!oGl{(n?o^rJiO6fckmB8ycK?H;*TNsY zlb^hhuEJ^8-Fdv^hq7hnmu_o2zCE2@pjz;2H?1G#$c)TA=b5C9VB>PQ#es-K;zx?w z(g(k`BTFC5=cC+)n<7NzTohu#M*hklT*fQ3n6?#b^`2x%nehOioEt?x=f;t<^dS4a z!KxE-aixKn_l1txEM?nMLCS#eIB%KF_h+qsD^|lq%*+9_T5UZLtK)UhbQEaNoI}1L z;i5-^x|`XgigjB!HxG8WgN=6EWRD57G8*qMRJZ8t$fTT3FC_5ToLtBZ^3D_3-|?mr znCt>PVB25FEm?WXp(>hXi09Vp8wF&NRX;zT(!Sb1RE^pJeUeCA;2R`+pLFtzyUSyT z6ERJPGVFGz5_dUKkJ5S9XS1h*6bN^ww*4-(n@zdbGt8wMvR2h!R=rr`+)5h^CUwp) z7q7nWT=hb3_nT}O9W^|#`YN{mx>0ovu1iB;O@%lL)}*(5!PNPavZl#TbJB-u~=JOVGuv8hfT=7;cWe_2|1lFpM<45Aa%P4}Y1` zPjNDU>2|XU>Sy}l;LXtH0n^)%j9qvWGrp2m}K`gis-^D zbq)w3E=!?zg^l>aqnJf+cmPeQRg5-Up|Q^!Dt=UY7xm7S-PDVF_;sBc=Kpl{)lqRg z&Dwz=0RkjJf&@vhKyY_=3GVLh5FCONERe+(*+s(Q?z#aI+}#Q8uym@xR^zv^p<5zOQpBgN?X8sL z$Ae7;e4x0q0D+_>(5fSd;m702uFU=<-TMi&{M<^FAzPYLVN!w@K=`WIXchn->6@Ua zR^~>GosoRaVT~2`Gf_Rre@XA~X}9IoExf|Wf7hyoH%N59FR0cEMn?Mq?SYQZ!pp1v z!)`w$@WvyBqhP%&i^q|-wU6u^24km@;LFIko|r099q8+e28jeHK0cyomFcL=>WM-& z`&8A|yCaL`lRuPBr?Z(tJaVo^5pklomi+c~Q_LWK`Ch%KU6Ab{dErab~nn*yq@HyWt zcb)B+gl?sg`f8z4Iz1*Prr@A=Tzst7jdqzq>e2CW>XUQtCbFXb9JVf{%`g@S(yFz> z+QzJl>WRh!G&S*|qoYrB++7uW6e@0=oKQN#H~WrZ@$K_s^tV8V?~4tN>a*qgSk3_( zzp=1im{pKjf`xoone>ql&miy~pGyRf(a+h|R?brH`%k=_Hua_1iap}Fhw)^Zdh-=B z?Q*1blS$?Ud-stzTY$A_4E#M+Z6@7~OQ=T% zCZ;CTwX-nf@}$5i-m}Rir^Gl*RV(A-@F^GQxjRu|_^6fA*ELM4$ygPKkS~L4DFY5Ig;xzSiP;_FLn9;W^N9h&cqQ9Q#erB+2$2D>w6?2n`Kl+ydn zl!pPn8nV48aPN*~uT+GvJD22ZuL@?c)`0AYEi9Ijz&aiD*3KxdeJfjJU3d1HCA5BD z>*Mj!Vaa6lY)4=0l9#iS1aenB!`cTbu8p_fP|k7KV!z#IJ35|BHpCOUIP><&jTHqJ z?`w%IhVaCf689^Ic8!sg8F~${Wpi46RxJ>7gCy42i$C$_G9ZDCK;hb4P_e{A>-pSe4tHGN z;en}6@ij%87tPwnpAuKF$$cVrVbcL`u6J2T&VK-L59P***3zl(P|=?
PRXcf+TwXz$0(Yxn3eVg#EKAXWAFTM+Ql^q|1Q>K>FVk#UMbM;x) zU|}I)@XVK&$T>dER?Kl9cvx1zCz?yw*8J3)2<_>CPeDSY>_k=X!5lVCs2}!!XP|0* z+2RSVY|$K%#E=pH!!cK4*>E)dFk*zp#T)-UF5a{?et)m7$zuw3g-B}I1@NAny$sWH zuoV{KvXYkzAPFC57%SFDW2gr&tdDRSN^MEkA>VOMdvlU?|5cZMw;xy1Kgb3mn4l zn{)g5P=zh?V?4T#PdeISe_4KTKbW>}9oFl{KX$)3AewB#+&Ec+nzk&Q{Ty0Y_%`r> zeEPzUOUbE~yo-7hG>y~50&p7(JgN7ZKxHz8y!1Yq zH3oWwCSi(jwhK43q81ei-*BXYI-oX3Rg+;Yb;WnP#`gCS16~<-yo=t~dmPh`Q@r)Q z>%tA`mfb%~m;@(pqrdruqTcNH`PDJ*ozY~9_;lJ&>P(Genl!nHWEC7k)^u14IB25F z>e&a-N~*Qwf&z^~>TW5W%>y>ESd8ps4fz%W-p4Pp;i+|3US8XuFw)G$$Bp^_+ zXdbq0af)m6Q(M_cDO%Moy<|!4!7UBJgqN}P8qgn{0j{uxko~6cS0GJ%6%!gzYO=rH`V;+ z?M-o5`g3m^P0A58j10ExLzINa{%??=14V|)C=EFmA>?7MZ5~>8v#|dW{|HDRr&O!t z9h-jw*^J#%BlIKZ#Sp6%5BghG&jtK&X`R?K>*v+w8baW>OMt|2<` zWh%LX^mPW@tS3ZwVYD1utCHV*~~-kd)vuHsF*pRmka zRU|ORN`B zbM|a@MR?ox+1aZt-4uVy-_pI~E}&xO2VttzOSfDUCuEVMsEX}!z6RyxIk_tTGD#7} z8bW$D-p-P6?5)zid3NSu8F*$i4@BjgXm;PS+h4r+x3}U2(=Y%WdB5yoQEowU1#4cH z!*5v^-g?^`Z8z#|gIB^U{wg!$(BG_3qyt^c;~1t?BIg^y;%q<~S1@g+>13 zCWu-)zz}R~ldG8e8LNy&!@6clCr|O3xF|dtk5T3J@j0f}I)XivRTIL!Jq*-bF>Uu% zf0!*-4<7(=h3cIww|r73x-hF?lP_-KOEaCC;Gl<6r&b?E(Z3-RY{X=u6S@_V~vIe>AR##{R_vZM<*WA0h%PAf< z#{*;rj3s6)<$f5Klsd#?@s-r5G7*_wwSrK@Pe}DLg#?2JxIoko0hdSSA8d*b>1Icz zO0_D>>o@u>H+xl-o2xYTCkx{j<~bOvhUm%0Ur1aYuN4J5&Xvme!MD^GFOG%2eB=uZ zeYI*nB1~_3!A}XBJ@6TNA|_(h>L133}s zQRRVOnT%D40_P-eiRb5Yr81_ z76h%&OMEO3b?#n-84_$O zn)>OoJ+lG48eB*0yxbmX(B$x%%}8rnGuVVu%aXVZ2~BZ-$VAzPSt5ggY#nV$%YCO} z_*j-J(sw{czl4cT331R~i%BPT`C??oITYl|RYhiPQQ=5o;%D#+jXCAcO41U0(bZY` zp?SAv^1QLaLGOWdjBLG5SiM^j-%31u`JlC0Nw!qZ;?Oq4gmDOkPlK`(!0qZ{D#PZdS&v()eKxvXwk-;E5uq+tVCWmNN5(;EPmof7kb zQO!Hv+GrvX3he?4AH{yxjM=$z8@iioqP_B(|0@+1_L5B7mCDr{A3^mK3+|-y?udd= zK#nBkE!xV$06Tvu`4(vwSRD06=9xt2?wG0C+yuu&?AH1gnTi^3LiT0~h22Ine!l=3 zK}mU{{!A(ANrOuHy19ApH$ojTYJ5wC79ZQ063yS@EE5x60XY-aWz{t^mDcq1^a*L4 zPvS1Z5@-U^W|OE>c}nS3^UC9{tLK{TIQ#VcJf+&&E_MKO8rdl%lzKc_2XX7d)7D{hsQ;pO#f%`yi~Br?p~1+RS0cv}rKq`85TV4M!Q%)CV}I+nU`ZSx;UH*%H9Sg6&g&w5b_-H-soErT-zqs7?b0MQ# zw(5L}1lz8wocfv1Q3*wsRJ<*M$mzaJjh1WVX*vO3!~3Q-Zp1~r*AW44rxO^U7h^vi zAGYx2PsSmD0D|4ES;hdUDq2q#$E78DhjnBUK9dO)L4on>NU-5_w`vKjH4CrfTW|j+ zb*Z3N7oS6=4jU;pT1;(m1%KL%9K^;;31}3JX5F6*a>%*=w|3^k$JflI{>;9y%qtNB2gCQR&cE? zKvGkSQfa=dZ@x8c=T@d0Wl4MZjo}+ral}kLXTUATs~`IN&w9EzbBPuuZ=kt72`U#k zi^wrw`ZpCQ?f{)Y45I=P6UXQi^P=WjCkO-@%Te~)9d(Ao&)K7TJlbfgG=yV}M!*JZYLcJuG(Fd}{m|%K4c@ zC^HI=^k>?2^h&n?UnqdJ4|31oSF z3RfO*$Iw}nHG((4V=QTW9LijdE5QD!8Jp~cDR$Hp;A9KRfphK$tE1TKxAMtTFk)Iv z(|kcJ+yc7Vtz6qMmJI}88KHhL^eEpq-E>@p8M_piNiEAfqZu?3lzKDA?2?)9yUQ_b zUhn{?3B5Ryil7+&eTIeO`JcCDFz3Zx@A)!XYbm_MO!oB#lpc~A%Td<+=_3w8LIA8B zvt(G&VA*Fi*l+$61@3L10E)_k;+Mqz6?~MpiO$*R^B_MmL>09xyPqvK$=P{oP9oxZ&+Wb8+mP92Ik(nsdhjwx z%sOARxV2aGVeX|I&dtiz)Qi=I^`+X_ZWY zaXJ_L2Bz-0xznuku821^8{ciw+}ApUL>_5p6jRVT$}eWnXgt$wTc zQI5IMXo;?<wV*Vqr*gX?8V0{`|S1nUxLCMjq12ygqT5DJ@ZngN2(F z&};~9yXW6t9C&2-q!LqP2H0H?e8SqcRq~$?#Kbad^L*Wi?1x6g0VusUoNxG)}qbcQh}KofW83pO6dm^(;+uA>|zqZ>=m(2uaPS z4`#Rb8?a%)nkn%r`B4g1$>Pv-CNr#cLO+#g9va#rN?l>vVAF3jN=8uGjBsSVh~-#M#T-s0C4%Z-^hXqOQ? zKC2o2K2kP&V3+CYY?28c;qB&sPWlBTrJrSx$VcjciinYMDoaSPa}If_wuKGBRq<7L z*SImh+Ev-OlK+f}?;w+SWXF_2PH6E@eTuC zZmwhr#CK=M^OQCiu)OosmWBYs0g-NA}!NXT^NZku=hX2wARt5+atx57N~Zh24_clU{45 z_V}Hh?WY}AMz@QOr&kJ#OPyEQ2oSjMSht|;GG8n`jSv=$w%34 zJD&Cnzo69w&k@CJ2af8`xxfIVnNc)TBvlLOh#d@QZ3yD#PwKqTV|%a3l~KtS&k(ii zU*3M4O?92~rtBgcSmBdL$1ivZW72o{GWUxjML8DAtDI@$$*PFj&2~TXl^UVL)A0f#~Ggl6y7EoL6Pzb~r1<+-h5GnunF zoVp4etY~v$XI&cb3K_iO?^|iZ`+L)2F*9=MN5~T;xk8WQ-z^sv`!3bB)%<>UMt8&x zq8~w&oaTu=#j3Ga*MZr&n#Jo}8@%PL<`kweQD!&E_rRk^HNUfaymniNT)<5(_|3OB zJUAcDO8O8pFpp7dGi_~-k+>>WbWd=h$eRz;5LA+h{%{UyssA&~HJk}2z9$gt@hy3F z*L=QJi{GlpuGWAhn&Oqk|5DUyY&y-0EtVkdA@}AvVsU3=c1)OG-y)|L&gq!Ci{|90 z320zyXgsM6tp<#Oy{0P|DjzymPnh?ERw`8k>dQsEo;qxZ!sMue?%BR<`0XAyExeR+ zvVDEuP37xW@c^9QXKCD*dsW`(Hlg7;aSm)84*ZCoFhy1_c5MoNXtTG=j{Nxeey6)c zGm*6_ntbzylFkS;R+!L|KFl%4Y~tYASx15KXSBU}0>lUGC0X7j(4EbUYFx3|$q||J z$7QYzkuobHc!a_ic@}R~o%_pXEhviU4Aj>Ne9_Y6 zEQtwyfIA!(k*#7@^q*%z-z^xq08Ooz-vl)q!JcmPWTDTys^+>?q9sFSd|ngoS-mid zxD z1omvj<9OyXNoC-y&g7)Zc2omuYaPZz&?X*M%~{lb`$lF-R2e2FfIMn6UQgY&9G23h z*}E)J9r3&d=}gVIiRG)-0?sVms18M-Oi3d&dp)OvR;{dJ5|d#@R;y#Xi$rDmb*zcZ zkNx3Sz{t4tg<)l8H^xdTrkxazck{Mxa-%v1Zq$FXmrGt;#pr=k&iFm&3n}MM2m`q`g^o8`vU zoetu*qDY1pY9rCmf&ySnOr0Yh^JaLGEtl&tZgUQD$%-A=ChAv~LcU*cfzd9@cRr_! zHJ0Fo>oMKG8a!ei8g@cjU!fRX4hvi>jK4-Nywg{MU5f+|3`M&uN90SZ((If1s~7fu zOW2N*shE7e5*hrm$@euEuA$U$)W^A zTW%go9!!=fu(r0APmbaEK&~`RLK@M6>8i7R@SKV@o7+8%+tR;)QI7a|=hwmQ=3PDC za0pSBJ#A3rP=~Eb#$Uq<)uB3kfPZ*zVv~7ie+>RkH9I+42+255BIedH1MAN^!2HRnoq$eo*LjHTGv|p z!BV&{tn~o~xbt~z?>b(1gjj=t+}E#ECHDe?VP&k%7b!N*ptNptn8M{gj%GV<__n9s z*8n%UdOLhJ3a6m#@Fm1e*4@Trhi8_%bM@NrR{-{J+~(5_QNLxsPPCuHO}z7rnBU^NbVl>8_C_WZa{a`(%o~avVm=g!djSVEl2zI)jwU@5 zRp8yn6}{}R(Jy;%rWU?w#do-HR61WN$fTnne;n(@BBE^slqegmz%Gq0-&PVlf}N{X zyh+Lj>mE{B9E08mi2}~ zFwUeE<9cq_$ludX=jU4R-2y*auVERehjF{dfDLC~i+2chT>- z{68XO2VFrN+h2l`*jo`zTKqpPR}V{Gf^C+j%02Gu3#9sgH$qwz#yxaDy=lhRuTcKe zLM`a1(N{M6x&FNOG*OZ^%Y9w#x2IGGWw*&qFx-6o?{T4$wEr4Z9A8?}oh5WNpMK!0 zrK$eW9Co;;gKX z!&*}Rndg5^_zev;x(G+TgWf-+_ec+1*+R(vp&*F_>49%~+CBe}BGqEIigC^SuTzk- z4*mT?UPAR{1sU=Gxawb>5M-PlfF)7?vG~7yODl_vfrZ^n^*^N71W92hW6C`L8y2Lw z)Er0!1X5*DGXE1SWak(7E1_42P3imp4)wpy`}R613`AnU!~75FBU-GY1REo%MQdBz z1LOZ^$@zPfgP+qX1wV@k&y!0$Jaad5Q_hRIB$ocK5uI<5VT1CIn=^gVNWLSFK$Wr9 zcsxR|4L@GuP0h>@VM&w!!#ZjOR6063NGWF|IJSRZW|rp76Svg`8w g-y2C&r~P_DxiLxqJYbt7@eFw>$b6Qrlr#(be=t97vH$=8 diff --git a/images/Travis_CI_fail_2.png b/images/Travis_CI_fail_2.png deleted file mode 100644 index caa406099da12215efb783b13ae780c4c052fe7d..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 45660 zcmeEtWk8%gvo>02p*SrL#a#=9#ogUqx46sVF2&s)3KUx?#a)YgaW8HQ#hq{ayzhA* zIp_TT{v5brvq>h&OmZhP*G$5d6eM52B6tM@1M^y1N=yX?1|9%SUn9SO{-0raCkg}e zYTQy(R7qM?luXIV{-dQ05C%pnJUJCfP1W*^|MT^fsE91GxXhk>oIz1AiI6bhlZ=!I zmJBl8mw8-A1W6oBOhyU-dBM7t1(z5ON|1kn7Lu8g^7pR5*U3JeSRdWSAvtZk%jbSg zP0vgx56`^UutKv&;bQyM>@fHurQGBbVI*&Jh5BE^kWauwbmGp}%wu6v@bTf0v9~?x z3oIi}^m;B?k`2YyBj-w7@aj^g!1;^d;V~yiP&d9cF2ke4 z*w}cvBhT(hj>hQ`QJN^AjMA!RF>>W(uds-5&@2!h7y@N6b9r-bB*umKsnap3ua7n$jMA9`FCvK}Dg@mED7& zjAclK!`7!{uk-O!R>u?BevEGnDuf`CL-KC|w{-F<(p) zKIf=jxZl?MNDhjvCY9dyD9exG=4V4w1>0@{sdTZqJ+TqlJ2t{250)vP#Y0DKZibFc zw1ZoH!K@SJ)b{?Z*QkSrcet$EUPTXI$UIIe((jr?zRSr;XSH&n&5iqgr_q71B-{-+ zhfnovKP_JfHR&f1)2%Uxqiv0>I^1%|0TII#IX=O8lfjF0!s?N|7$n2~3{RH_FGvQ1 zMJD3=6~-y+^D=NMe8-zZe7Fye#8AeZtRY6${MEwjw|zIRdN7RK@cVs&%0jqYCu&3h z2WIABU-*aRp`!+B8XM~odG|_!?-ANU+B<2}5LAtc=Mi$?TRKs>5n=-0cJXne zj0HGvR65YM!NY~JBS<=)bs?*;N5Z)nuy zcP*1;RyjGM&@kUVeMu|=fG=t(kC&0wsC6N*^7~+E8<4 z^QLi=@u6H-r&)MgttAm&5wDOd;bXUBWn%qoA>#NkyX;{=PxE_y`xUhnlNFR^QBNFx zMg9PTudCv1sVas@&^>*RzB_2>7@t37YYW@!k;D z4!bFy)K*x5RwbSrZtaN=@Oa9Z8;9uGOdg4FbFXE1!9 z>35Aj?jB6am+n{#0XLGqj6NnVgJ#=?{MZyw!LoSSq*E;k6r^;z-Oyyd#je}H>DdjveFKNda?Kjb3d zA)LaM!Armq!)qdNAsGjX1lA+1m>#e$jL`2$RXd-St;`(s$5xG2vi5hdsThe4+{P~X&)wN5NZ7>K z=U6Aq?E1Aflb3xuk<)K;4GU_6>$cResOh-&Ev7Uz@X7-@Ge+cA6ZyKYfF~v$Ki!=p zctrKk?J%!BA|z#$hfs2m?2+t0o%FG6prd#r0s}vk3YXri3KoYHsTW6=^cT;4T<1t& zCkQF)es64DY`0T0!W>^jtN0^zJEe}pT8xNXfH8nFlJd0*Jv(FF@Q4{9@Npq$VS6Dr z6P%o$`2 zKS!c1sh3(0nY-jVjGXOQ!)VE;)1(2}LOQ>9M$dRec>HYj%3S_%{0#dTv>p~SdX)*X zf65sj>`*pRRZ=;1Me!PrCbjire021-9-562De3lEs$L3Sg4kUz`hJ!wqAjs7>Te}> z?YsE-@bj8C^{Cb0r&5jITgOxzO+!ly%O1PJl_fVnrp?sLr2C{f$mHrbkMaJtthSZ= z*6W$0xdp@O=V8UbTu*cak}u_~rL;Vs_|tq&7xL3YZ3SB%^jx(L9lAE`le>r7DKI5< z%CpMLVysA+DniSFGot-iv92W7ysJv4>dtv78oCZ}S#jC!x3W_#AjQxu8=h^avYpyr z`~2p#=4Xd_&D*XNmk)RCp*Nx6@9W=FsIQAT%qzz=a;60}eGQJiHxPe@ETe3`b!Lv? zip&UO`NjOYmeJC~odd3@y$f#9w{P8WDts!Q9&L8( zzvAcsJb4!Plomo#&MQL|=EjP_P5N(}+nQNcu3CI9(ay(4b8h+dL4_w*tFoS zj)IR*nMe!h-$}qe!5&G^TDOja*vB-u9qG>+OS?Zn?Q2yXkCt20Vem%*njNyf&w#Z1 zsE3NJrq%PZw{9$BuMtsQ+x2doPLua$<{ef^>HUs8Y_IMva##K8V|$}f+c7)v{aUuO zcUp$7(N1(FdL(nb2X0ue*q#%2N+@#m1sZ(9AJ4pP_BEI1rtEjtGTOtu{od!W2{4<4 zYbO@Myi(?-LiBG)hKbvZU&zOX`Pp5H@1>GgdJ-rN_Z(ibb1Ll!mp4V|zgGtnOwKVR zAQp{D{qC?g4QKnck(XD~lPV?Mm7D5Use#(?+xG%b3P><|*f49oo13Oy{rw%pA`xxq zLe8^4Yy{tFZAF$0sL2~%9!1y6omQQpcVpn&)^wt5K{Z?xpqBJUd3hLmXc`#?4i*ar z9-4xM{=$S=!XW&ShJm4menJ)FgishH=qDERPbm-Xze?c&dGP<0hPnP-QCL+}S{nMP zYU%_8+BsX;yC{5K!i0f=!On*f>JZ5@7-!0@>Pph;Vxi!qtI zt&N>Cz@4A`j~W1I`gb-9IoTgoT&(%Ywd9q^MD3k`WSq?JnOVsNUXhWJ@j01&1gMBf z{F@y5ji21Y#l-=@!U6(;m_Z!O_D<$3Y&<+XEUfG-?Cea?8cfa}b}q*5Om@x`|7zsF z+7Safn>txKxLDfTk^OGh*u>t|g`b@KcSryE`PVps?v{V{Was>EwxA5M{4QZ(V`gRf zpU7M+KmIRdzf1l__6J}8>W=SsV*qt$pp&S*tu4^bMc_~2^8L}%-wOX6=f4P*EZu=N zT4I(^N@plf0^ICe|EBt9$^S^H{U;^+`wxGm{7cDSD1Wm6P;|0{j??(J3KJr+^vd0}nU1L&6Lu8G%N12)E0q{+CE zgB!(>WX5X43tMMbW(LyEh3D*-<07B+Qc(A&kR>#`Bae6;wA>)845YJF*afNHk9W~z z6lXc~Fd@hqx$eJydJbI;L3q6&fqcGaS}G+nw<`LZ+kRC8Cw?QTp02C$?vC&vd$3^# z#M~<&`83~(<(C1y0hnI3?ISUd%gv{9eE^g0EF(K;*52;gYw_3In)!BkO;WLR;^I3( zU%!sAG`c#gc8Zq9L9WnizX>xs1ONbUYmAtHxGsU!-^F z8&rMWd4HFea)E!d;>^H{90Ty3dQYd`T5UF%Bs^=sSmvi&B#Yzfu_{CrI_qJ$O38 zmsdr@slbW#e3b&NNq<^^s$V)7cT|WD09i!H!Q5qjppeF*NwPS|sKk1cL#pJo{Cagm zABaO_KctYDa=Z;$$(tRIC1_{4J>N=5ww^sSr^Cq(k<}O~?2o1VjGH|HsH&5{X=%6F z*15eu;_1N2)~u_%8;-e^URNi>P!jLg{J`la^wU@nQesGHy@RT*|hKS6-Mt{_FX}7s9U2gLF(3?b*8C&B8sv|9tOC@f(woLJ9 zc2&!+zCqJtTf|M-Inrx>s2a)oCd7(`OqV7Y+p1YuMyZfAvC*t&i%QI+P;awFl{}Oh z-yoA3XE9kYbf$i22I($xI-g~-yi$hO3=6T)DRQ#6>btGG;Tj$^^vg|LI8PnOq>4n0 zEHL$6%x1BgPYZY2n$JZWELEXS9Xl3umFki5)v{obNN1A`i#&BM)LeUl6EA!;uPn0K zj=++CT-{~SqJ)3(8dD1#PmTRU}hx!eO&j+6!qPMnn zIjXv^bx)cd+^jvCHj;#daEUeTtlAqtdo;gwXQP%$6Ka7;8-xI5a8-#5EzUN#_N}&d zTJz3bp1Q^HfAxs*6iF-SMO9pzK> ze|}!yP${&c3fQK~0IZ?+w8w|YY7Rv$oL@eqm`Nu}F#)6LG#dSxpKrod*>6+Vw#O4o zl2HuAih6+wEQ|GV9m$8&nm!$q9>t?+6dmQ8G>@lkBihwsDPTPc(@?aDdp`a0QpGj^ zCor@IRajUi>6CN-!NXcQXQTn0rl4>`K_Q!`FgXhull)VO{dhrCwb4vK0=#AtF9;$N zg~xTco^hF3Hx79!SRF5$GIAe_eW>MBb+r@6B zy)rqgnDj%+n^D29&i+(L#}0jEGgDxB2CuvJN2INDmX?9R#1cy25DvS^o>4$K(bmZ~ z^CHdMry9reH~j0T3eg8=9!usjQ%%mhMdrCxz@FmKMH4l!4gUVzJwuzFVAnJ*t+w+n zQ(P|f0~x*=FX5N^=QIw*hHVw9NZAs!b#{{km}Yu$Y4wikvyzHTufjRIlVB z&nlj+pkB>FzdbJ;GH=Bg@AgHp$l-laf8ADv(dk)9N> zA85o3s!a$|0M4z#Cl@#vFmUum;wWX01T{OGpRINH8G^_+UMLlbXdkC=8c(r!f;5V7 zHmJOxyWrcR!)|AZTV_zmE0@)Yuk3u<WO0O!ypqcvIM&B`-Q4)K-EvOgEo6dTx`3UDt zVb2WGc4;nQ=Oe-E<~1KN$m2~tnfXF47(-C-Tpfc(bLEjx*srj+0zDvKCLC`{kXlgT zv)CJ+EZBeBIjB#k69rUStS66`KxPA{U+?&KNbWP91FYs6NpFS=0ZCMm z3pnVsPn=2YAy=zjogh#Dygr&}gu-k<|eNA$Vb?tk6ZMGU1GSzNsu3GEE zYKxiO{18c!jv)CS>-hx5ENEh0~G8Wjwrtm7Y=ck3Ael3 zKpNs?C{e32Y4PP_$?lu^HRfTkV@u21D9A!y9kodUh-?61y_lc))$axAxvSe=zKy&x zmyD}PI@D`eT^leQnn8ioi@(}QeMCZ+?GUc#3Vm>|%F%GP{iP>Y`hq^sJNHZk83(qm4~*PPXs`L)BtcTPv+iHps8+B2Y#vvuymor;xK5A2pY zxM1P_fp`jOeolXc6IgegqZL1?lV|nU!C<{_!c!&kg@<80tzoewbNAkl<1;O<>LA~gwZjHyG zg8mf<-cy==TLmWI=U1!a7Xj$?@e)>Iq|735+g4BTpJ!X_4>8Zl5!d2;$_n%D5_OD& zis*^e&q>zU!=Kd3gP3u8%}5XrlwSwGxqaXNB-%>$y{wLf{DovxUJn)$B_@b zl3byD&Ii_OqoyUCrB(b1Mpf(&{vlw_K*7jUx9YLv>sir59_GPx!(k#}GkP_-$t6ys zzxacCaC>UbYqs&}zWH&c!a$vX)4Grs_v6+@sLIU;o7Ekq_KsUeRye9o@|Wc^Q0+iw ziZZ;e9B27M9!xT$hSsyNhqIfs3wIN+XQl1ClsQ6jm1^vh%wR|1ShAE(`5LJNFuPA5 zf9zJoc_~@EpH^_}GLW+zcvwGGq$)X>!&Ke_2`Zy_IT%M$IKEmW+QM}<8TQ&$A_|O7 z$fE-yo3ouK!;3a-F$t$s%#l0kD9!;TmMA1XbKZ;1+RxMOCUbg@jTj!g`eS{w>5~ad zRLFWSo_c%i@2m3Fo2Tt$V6t#10#EnA@3dV^{mmJ9$g9$^W9hI>KHJ9TTnqafjvD#b z)WWPcTa4fs(AYV2jnbUG>IR!@xnI3}mHF^q9)R9M-u)lx9WDdU88hX3 zT_{?baXT`3wL<9!3){~3K2nKyt_uZI(PXFN4Uf$XK#Cd?DVrZh9NNXp&Y29d%3Tj0 z$=6~Zk9A@ms!1*%cf7j3v4yGll0jBm@LGehLOf?RPGpdAj8L2L()smzad0};{Xv{Yy9=0EdhD49&$_6-22ZAB#>P78U|o|K0$ zC6=NPk$IgMsD`9|bt58T|8TV3SEDS#5=W8ga{^uj5qPizbO^3h$`k6I;^- zjfWf>F5D`qx#0sj@dk4^zqui?E{%WdMP@JnIBdfy+DD9?XHnF~gcCFDIA;?SPGi(6 z_N=H2x36+!E`{v5VR1;HN*g*Sjm z-ge=iYBmZP0P)+ZB#pyp-=?kV^EUEs6)Aj>iPkQtp;MvM6c(`>7+G*Wa+UP5n7A<| zE~*Ssahqi{v{?&Fp351tY;oKSbvlXKs2-n@c=Gl*9zm zCbI`lT10(5S^cJ&YOh{v)vl_4YHrp)wo+qxu2K59+M-f;#{ujpZZ5|E_EEMyuYh%rahgJrp2n31^)5;$J3hJ?ZQw&s4#VUndrI(d!iCKA6lX^#QbrcX};(760T zSb;o33+#bL{O%f_ha71S?TngbPn|iBNIl-Vm)kn&VF(6@aPPo-Mh5=sBPg(wmaO(L zKOu~ZV^v1A8|6Sc8)(1Ai~N##XZiI}m&r}&#JhMr6F3a* zcToo)XRo+CAIw&o!VcfTVF<0?1-zx*S?x?7&NfqqWhXmj2?O@hSI4cR6T=&xq}RRH z9(HPg*!zOE$$XU7AT>sVue7VEBPgsepPJolOi7b3T`J120v}!<&Z?dFV3oz_5%}DE z7<&6$>lE-6vHa7H($?^@ft+}>c)yLgI>myZ;+^uirPcD60EK#bp6X9gBmzIo8d#jo z#*63BN%`pq8HX6LCO@8{c1yZPrIYZm87OUK^j#9~qic)5B8YIweCBl2meR67XJ&wylfP(NY_NmODX*?d-#YF7=-6F*PAk!`*~qU}daP%A zJFbl}IC{IXpVfcI+$CV`+vw0k2zPRDF;ru#9w^5%yIFZX!`Csfv+{D0CMSO6mocE9)iRKTOUzGfKM}h?ip&0+>JKI)!}fk7Glltk z6Qb4oVofJ`8G_b)sB;-IGF`Lm`y#K~bQAHT1g59&*%rZX^ur*@mtjc;{ zLCl$W#m>xjVwFiSBcU7ELV(bI$pzxd{J9BUwNyjGmN~6mGe1<8KL084RCR|o)xLvF zjfr(VqEF0UM7ZXnMO|gk2KKpNvqvhdaVSauW>l&efd(KLJ4PHDsn7PJGK3JhiT5{& znp9?%_{@6eH$iL$q4t{P=|=yeI8*ZHT3x5Stp`5Kz!l(#&dJ68locZI6x8!sE&ONM zCf;QGyS4pk;^3kPejh}5$u9!-9D;2!+fw^$lv*;r=+2uc~7ykbG-b` zu%o4Q#_zVQjtf**B7*}*X5?GWVp1XH5Y82Vbh=tkFHkzearK#C&u02(dP2Zqr_j3> zml#I~Z;GJS=@-TDU5p*V{KtEG@G6Mgt>NcsMP>cfgoa^;U2;|Gh&Sb_3^wu}%hW+J zm+w|Xv$Orq0$y3MbF6LQQJN1pC2jmHsvkTbBX0p)I~C$N`a*D88jiNrK0a6Ku$LG1?yQ(9&u1{TS=%@0XXoPPFC@ zAOg0V^x;o+%ZQPwhfy1?BHP-xX{-1#bloBbe{zE24ilJW zQMupfNHOjC^sOCe0mzruuKb-%<(hO=OgzOTH*w3_mrfNrUn^ARRnpAvv4Fvd0Vb$L z*b!(rB*=_BO8!w(M-ae-uTPSe>ZHN>-SDxw-!q) z=;!xvifP|R{ZmZE;1r3LU({3)Z&kCazMM_cJSUQ@0IWaEqZzu*P^_Lm-RMWWWhm31 z6L@HB($KJ@Pi-Ds@!D$=mY56(p>0>Opnm4ou$nEUA)dKJC1jTZxE+)&=UxrQno3Gz z0V|hV>~l57e)dP6RO)zklye2Rg~S?(ObqH*yN4J^{$Sf(r#7zS-$Qws|J>S^_@Y!Q zT1gL3YniWK^Q%SF%Zc*?T9+{OGrOqg6=^|_o+-u&4K5ziiGm@5U6m4}+ZT(N>grAr zNclQ{CXcD7t!=m7!k$c*1O{Tc5eydQ+6&(gDw16MPrEOVo$M-(k5o2)md zQ5{k*`w16jHFBF4Hy&)tXIna5v5<8U;L7&!K5#acbOi=r+6V7lQm!Tu-@r7euZ`nZ zk$>I@ut7i{P{?hV%4AIi?N8|NWAwii*%HZHzc{#SGWhVU&5K@4YAnxCBBEO`9pR54 zp~Tg08Z^LyOz}DZSgPu^)FjU{8<)cQV-L$amrc>U;GxWreeXiVGC=ngwYi9LCn*l2 z+5=8DNSzP`=f@oE;jAI;%ReD$_PQ&A!e zDTX*XJ0)U&z%jA*5Gh8q#bSy4OF0&RfW^xzAaikpLh?BEc{(W+cXr=&R8`rHq6L#4>_hrd19b?aqr!WA@ z&Suoj>JHX}&chA6?U0z|3b^o*XkKtAgjXM&#q-%Km8EEW4u1PX5!vNkS^0bW%k&?m z=J)bK_yoLGjX|>6<$(0}nznGvY9Dj(yR!&+{C*__6=1(v3@U9Yzb)yqUU7Vg-fV@Q zuay}-l!ku1BTwytGlg|dn87G%--MLA%yC1bZn~Zxy3b+l${~~@2$6^?;b=*A0j6Z1;__~Cm#EpNrgi`a(wCn7BXNsaMbrv?OUK4{4u_QmCcDQ)~%nT$w|C6 z6u^teNWCKt@vaPLHy_(QuZ`wH)*pIEIkoU#LIDiiv?q2rJ>I#d9wWwgO$<6+@>bWq zT?&(X?oLI4sS_v!@FuEY;VsK*!&= zTpN1rafui4=s$%p2orAY){YKMh2vFxnHk|&lw$4$9a4vYmx!)YaLBn z@{$a^1qTSiM~Q;Wv2pA)$Eo>-9ondcqIkaN7Ii?KClBEBFoyF4At`KRz-MfK0tAsk zVa{I|=T~Is{U7GmWMj((AUm<^!h2=TVeAuh;RG{2Na*P2Qt^S!H(KMQ1J->WdDY9x zLuoTLru-e}0E0N=4T}7+OEj?UCS#@m^N)}%@UJB z?9DotY_#>HZo%SU+~ZB`-8xOMF*3v1A|snl9#U@BG1dB-IWK0M;?t{LSu<`Bu*Di1 zeAYpK+%;2&NIyEw`_frU&Vj&i3|)NN15u-RH@=8r_Dk3B@S^P-VHDO5r15-}J`^On zu|(eNW1oW?J`@%IR0O|O(8i;I#C;telQL1-lLe$Qz%AT;85$q#lzE25HAzRyco zoU7JYbXI)fZv^nb9| z2JO4j0>-e|WDvr0OY`sU)yO5nPg*mV{&~o!iH6?Wi3yw5qhO*e#9q^>4^0A*WJGZfp2V5*rg7B=CJuS9|nc>N9mQypk6w? z&P|UK109A`H&7DNTvgLZ7!0CiIgwLSZ1{g04GU~+#8{KD>DKb;SLWg1gv85TM=Ux! zqSX6r>|DN9YQHM*p2mW7TwDGtg^AJIYE56>zy!@F@)<+{MZELV&X75&zla$F4~}Ga zPcQ2xa`@X_`Bol=H!sahrtFU}F?~PL-Nmn^%;SAxf7zJ+5cz4UzpnW1PRjAwjmAW5 zz)u7R&GVTA(@Kn_>|PHf?Zl%)MUtnIor|Nd_F+3i^Wmz%ZnG2PTG4MYd=2Jsyl%(I z@uq$WqLMKlV*_n}Ww448wjp9n9XGf$ne5cRfrsb!Db0qJnt&HRl_npyJ*wKS?FxmN zDM-D4sDRfRHyj=&;x}FHsyKE5pYL0s2)TszSI@TbFeZa`9qPxkmgRQm{$0~Fwj1Q5 zRWG4)LEoQAZ4*$J_|Toa?)Jb}!DGv+gQ4H?D9a&{PMaSm0>W=2H3vfcn2|1~u8H}` zGaHVlb{pLuLq`m%)s$#n{W+h()UXZ8a}W~1&l-bI;Hv^G8e37^@JjnQlZU!JK993p zQG6?sfoa=sF7w3I(M>2R12fihMa!8~R2m~`u)OFfZt_MwJGMHd(Ac*~pj_^(Yq2Kf zrFzbEpNC$R6va01-@!JWvLc<81?PJb6O+b2@ZWtlRKrkMOy=|5v&eT$?$53L9Sy-r z$R(Q%T1zQnm>c@-!Rtn-lD4hWFLTG#$HBqT3Vh=8&>@$MVRW^gL3WBrEEykQ=@?BZ z?0-%QLnoCk2(FTD(HN!F&-gKQa1PcP>=ItNLrr2#XO}6t$~|B&9#5kvpRT}y3e477=jtszH^RC*d+_)LfTb)vwnyVz-M+j1nzJ#Eg$|!%6R*xz{`*a8VN(G zm?CySE8aFi=yCZ3#RLbd*#w84g2J{}+wK$IOOuygjxDLQN(r~E)#hGXsr}Y620Aps zWnNFsKg!R+TMp;Eto@DF&cw}EDZ|eO$ya5MP+(d*5cQBy*XUoiVeCLeAc-9}Q`Lx?<|zX~c2t+ZTU1Y{I*bT4KRIOU2uaa+~x@Vew}~ zckuKS6fq|Np8VZR_HS0ykhB{{-?;NImeQ%G^ zE0H$@a=Be4#vj?Xd09r;Ad7m7KdKI!tJGgL#*Z$!?bUiWIcHEZOaDdnF^KcmjT}Qs zxmO`3`JiBff2r>`^Dc(c=LOn~Mi8PEMC7sz7ePk0_Y)?tcis0)=fNG00Tl;_0-aPK z={E?&Buo{FJ3E-`y(kR&K3?vDxps9gqf>uafty|hs7pC&8r<6Z8MQ@W`mo;s`Zoh&mGT*YJIjK2xAJo6_erq zQ1H`^Ii_V|n#HsIgY27WVx1(_rod6xBdGIeYyMYav&G1ve*%1|M&G0>Ei^o;+1Us3 zfVe;Fl~;SREDKp~D?cYd30kdwK$@d>iFntBgc{F9+%kA0EqG{4E18ssV4#>T!oV}H zTDqJG=o!^*s0)M(c>%>s-P)q_;H$wnOc$XF*B6EX4<0TCwBH<@gR=Pj46aB46>}Uw zsq$nZ8#VXxT7~=jWskP^&$#q)id7QoZv<(je#g2^$%3Anteq{a*-XSzbsOxh4}U_p zSQpIi`eXVY8~nr(s5k`X`3akH!GG!oq0m!=#%Xz;I=1I5CX);iExpxdxubp#7$c=@ zGZZ?Uog&sqD-142R@7iqE`i40Sjnf*D(B#X3%+=8$MwgOG61&%3>qN)e8lBsHM)`Y z*)vklSir|yO+>;^`AkByd}D_W<$-z5*{s5F_lC10_*5-~h*M2br}egNNb4^Gtlz;Y zB++Y#F);jg@6K|IT@0mA73QdJN2^{~j4z7Rei@%7U2a&pABZoUkMt*p_js6RGr`lt ziWZBpvG)47v@+$%Kg$*NAdZ4N5mvcYE6*zVQsq}^LeYntTv@oWA+%(o)U>q!oVTY* zzc~)g9GrcAVmk@hSmERB1L8w=Al&8(=bkdzwMtD~_`*uCBb)wPqsc3jhg5nwQE5Ev zrAdhIRKH&UZg0Y;1Dm+xE(seBif-*U5|W#HvS~EYZjI+Zc#5>vVOyUWhbwy`OFT|$ zOG>txF1?v5boSc664x#L!ibwZoI2xilS>hxI>b2|yvvpCCsy6_KuAlLwi_&jG3}dM ziO#55oMZ}Xb(m5Eb4xF=(W}QaG8tgAYGq9`II~P@i1_M!&77D?sG0GDE!sW3;A<&Z z>#?Tuw{@JD=h3vQFY$n3GBF^H5#fI;7#rFCg}2=z6!8%Xmrw;R1!w7Cl!+IC6x(Oj zet)^GRwE=ufcUc>Wv}#7C$QO>TrUbk$Y|v|lS~s&{!S;Q+L|m6xbl74VVuEeY4xie z;o8TVFf<8&WUIKEqDYh-10#0c9jRK#guDv1V%ZRnG5-$%d${-}3}Tcups;Vb5!y?f z%B~*2IC|Wn0}t}svn?F&S}d>~G>$;inH_C!5R#^6Utp^}jC^vagy(=h)tV!-rh7_z z^BzDjJDkc?G>u7&E@PJe!3JVBpQ=q)kyaS=$@){&mG-b=+>x@w_|Lw`^2$I9!qVik zsFPnpLmjwk;ZB-(1z+BM>I%C&oY#lM&FN?M2#oY~5s_>K;IQaOvE1g`BXE=Ue^3|^ z|5Hl3k!S_w;t|J-KF3iJ8kbt5qQ5kApt@YDo8QJ+^{04>PgQS-N6al28*_3%K?>AK z7*SUxN)bbGPyB&Nxq|l0{G@O)Xn?_)a%82k(mE6*e_8|0 zSkfWzSK?4-M_Y43Bg(sMGA~i6>YK?T&a>6kivQ46M;8HPMr41j{3gYg_j>Q-Yo{%0 ze9b8_v1VZSN!s{^Ug#joT>ZV30g}Ye73g*^7G-fLmdV!{8Gaa!P35$gmHAlKLtIN^ z=hiMVOgH@z!P2~XiU{DOr#)0G1t!uik`3bu|5}DT72=VZuB)in?_i@Le5B?liiS;LHp2g-}JYEbohop zkJU~>*aomQH>>65os-87pT{~*$g6v(x=9~K_&)iEKCip0X;2FRBM&4gYk-YHAT=S# zbN!y?=A3$hhR@YZUUEjC2k$G0qz*K8SQZq0?(`J07dJW9IzmKe?b+JsSefZbOzN^y z>Dlc8dWffy&Us%fOTv$0c&ey>wcUdU_31R@)K66h@<317k zMo=+qGStUZPhiU^Ux1q5m#UwBa+#`z zc!6tL-&?LO5`lRv6U1b`%8<>_r-I6B+upzJjrg~w48*bQX&Dn<=hT z7NQB9R^6F+JIG|cFUna0GI1Yv)ku+5VC1o5Xh3id|6{9SEdDknl?GkbE>bK?RT^I_ zte=DggJfei`eMr8by59kGWTQy<_U|i)VGyZvY34W)NIJ8!wCKt* zkM#Z)PXC*yYOGV;Nj7uIy?vXvei4j><_kbllhw#3&mxSaRPQV zT;xI^M116_=A-Hx#jyj0gbiDejY2qa8mrbDEIJv!N6ZOsOTwE6HYHW&)IU{{e_=PL ztq2BMlOC(s=iL7^{-BM23ZPHh{%_;7z|1Sa*4gqf_&Y^_|L=U(TE?`$!wO`z$V!th zna=oF{wAR~q0Zm=csg(7{$i&7h7fcRzU&3R@yJR2M>|H)d<0Y`6zadI^1p?^^c#?h zhrTIlEN^Ja|?`V>f_?u}s zaTs{+QdR2LYQGr&BLy;4jJ!h~yhiDYzu&ls^Y?=a=B7Wg=^q_-!$27_UZ?&V@sANc zK__aHlx63if`tDq_<+n_`hReGBk3Oz$rkim=r}g!B9yh1Tc%9vU%tq(11nPSJsSZx;u>Bq3E&XSt^Q zhRJ|l^u_BqD|LpYbDpXT&Fw6HF9iYnx$nlBj}r1`hgZaCmb1~uYd#Fg3@HuNZKrn| z6#c(r&w(=;byIl;E4%b3_Zx+^1;N(@lq#R4RPNhA+vVVFDjA%m!>QEk92S}n+i9)b zn!nwNy1OitiV!{Og-s`Dc>39kVV(Tu=u=u%n3F#o}x?FuMx5LOUHsj)pa-ocY zE7-u$H8~2OBW)|yx5=kpVWHlqq*eV&elUx!aQN=$^*CiDhOXK1jzyu2FcCMf$_k z3G7YR&0ggL&^YbivE()8&%$)7HD4Nz46aWW{G=4JZ*{HIUaqcH&tW5x^+eWqd`XJ1 z4~tIj7}u|!gIq#EA<=S0N~f*22PR9y_rULZ3)LPiKD-(ucvD~HoiAJKFI!?s1nCN? z<;(Cw`r1M31^|G;%>506Uu?dPn8$1N$8baOv*A%Q&Q>Np+@4GK+>!2Wf=(6~?ypGWYAvUv2C@2FbiR^@Tm}tg8Flwc3*%(J#HPty zIm^A1gd#%7{BV0V#WLwrKtxtJ{YE9>)cbCo0+w;`dV4W6P;R(6S(Zu%Y9SVFF>2TA z>G)hVfi*50%B4~m(}Bm$uLJK@z*9>#j>j_LxE#f8i+haCiGI&*1<6Gsh^uj5lnc1c zyh^?=HQP`!Yt@Q@^_s}>)@4$eD`hYT)81tpyx8?q8mIw|l9w7PUh4H-K|Tl-Kk&(S zRGliwq_GA(+}+EiKP{p1IR5#?X(HJyHmmBsH9!>K6<>(TeXl>3(?Kgw@FQiJ z=i6X&v@URGxsTmv$J_Gsi5Gl&n_Ta)#9XfaMRke2`iTNB9|0{9`XHVdee328StF6FH$krPs6{+_9HBCCAr6Squ9F;OLNBsABRT2oP6)&PQWvDrS z3OFv~mFDZsL3MqVk8-(X*UxO`IjDqeI%9^7c~h#=Ax^*|(fe^dfgsPz{i@jg${a7! zRzuyY&Gh98ackR`=^UW;ALWNjwCh7N))&8U8Iq=D5B=)a$}QKRQ1}01@14T)>bAJ= zMvcujw%I0WY}+;)+eTwHwt2^m&Bk`_sIhImce|hcY`pv6JNUk{?6s?Wlx&t3uIteeMl!)ybfd@#iNt??6(- z>_%()b%a-0<_y{sK1=em1s}O&2odAKJJM^%X7w79=@x6~u_mRTw=Lhf&R5Qi$J?MB zblNHP>AUMU3BJH63^1+Fgf1eeB{RIdfxV}NWiShuz3^jdm@nB{%WEjyz(kc zt>@+|n3L1g>Z*5hSianimx=5yKOPU&V4Yd=nqG+!aw8|!<(qOJ(HDU$_UQ7YjYRmO zQobEbmpcCgXAHO26F8KFnTJXzZNcQs zzjqn@JBfzkL!Pyx4ib`PwQ=F8c~2&_%dxs5ku8HY@uN?d8iQ~0{%Da*rli()JMP1k zDo^NU&pQIPz`>3{G2>&oneB?{{%iM*vIJrsi&79t7WwqGWut8>k2<(PL1zfGW!7QS zROx-8;pom|;_z@hqT2$|CPb}fS0A4Dp2lLKgbpP7iS@iY9l_>#KlpT}I=AxWA#RJ( zxNmW2T7Wvx?ttrMK|`l_Uhg^~;K5NUzlC)tvi94{mmBuN0-d>}NI>fJ$XnWM;{yJv z@4i>>q2E!tdP712!{+jn>C)j%t)#{jC>T^aLQ^oV&^i98up4*1b39AUCbaXgMK~Ns z0kMrdnQaW$lHwMQM@JyNy7u6cc~ct}UzWpFtEnv|NU9c(%va6hm=P1QQ=wcVc=@g6 zCuoZ)d?*8IjE&)U?w1Qj#I{AzTl8UADJ3|II61cYKb&^Pe-vY z{4@N6l&d_mdZ5|Vx2W_&@y!47aV^ekGe7nHzBVYTXYAZ&axDkr;G5=er*K0&-YDWf4=I)z z+kfA74&xLW=2g*cBhVkx&nhmVO~@jXKs&;I`+URw=CRS}a5?*xx)KB1>BlBXC=&A; zxxP_58-TPzznk634EN}*EKU=6Wg1|u@<57PhVc3$)P}9=Jvrfh2MRD^2M^kja+(T z-^UyYN^W$?PfBMK+?`w_Uiflv=o5>yhTmlFw@d${%=7DIJDtIp&x53M7dqv1$b(w@ zQQRJcyDS1(C1wbm1Fmx%ksDv|VvtxD$yMiNslqW6Z(#St%)AB%VJ$WuiV|pa#aRmF zNaY78Ub%&^!4}Fup-BmvLiKrossb%z=Y$4TiqH<7e3|KlUfESw3l#0FsENRGgJaeB zVHD%()&}aUPg!vT=c@7qU!_Xj z)=Eqh$LI2nNW2nwV4Ybu!^cWUQ7jtx@y+~(A#d@KWcHW@YMtTZ+ZJiiRIorYQF3=& zkBTRuklZv9hsFMKVz~+r|J#SUEFSetc-$)OH6omYCy?44D!&4=Ts7Gm6ka1dYa;ZW z<~zO3uKz7+RUlPy-5zVC`I?Nx9TdgCq}l8g_c+Cu#0?d0v11BMLA^9Bjkkcb2#tKR zk3U|n6SGkEUBT-ihYi979|(sKf%X+0l6 zX32qo8JQ>=(@G@45Yq~qYzEdiCc9i|m5JUFpS2%-VMqOHisq3hiFqT=0ggcATU#Je zvm$^*4G#;cN@&)NC*^uYmw9D~n@KQ=+?_#S8z@~F&HmNSQnyyWvaHG)#quhtB26Zh zT}on_Ou~37-ekH+2f7f$CjP7gzl5v7?t%pjqdI!rW~wJRLBz0~T9wftHROlVd_-fp za?(K5M{~;;Z`S1#I?uahj7&O<*bxoIVjrFJy$HV%{F~Ei$sguZCs&?;gL!8sy4?Go zL+I`sNXZ;-!3$9{tHj#j)H_^YKXVN9yl7JlW7b8R5{KT=uBFAEA+BFA_z>KFt<9_GVvWZuIRAKS z_fTE0!-y*joJhltCP?<^xI{n47e7kHcVDxq!kP@I6d z8!iuRh2FALM69+@j?qNES+&%=XB|2MSBwe;ImgScT>}0KALe|+X6s;vg*t#s^|cU3 z!dlMO`-4LMR|BqFSl0&xlI2BFKUm>+dRXn+(;RQk5-oX~&kSs2Vr1FM-G&UI3fCJ+ z6N8PvFK|f6^NU4i1NI!}6dl_E)AhZ@;3F~wJ8SP7g|*?ANehhbUWXkladT=2wk%$_ z3gv$_T1JZysLyr~ut-9$G5ze)G}XqOpk8PR9j;Y*$}Br{{~(gd71KglEHkg>^MOf( za$JdTAss6Ue;#Rcg85SR#=?i1^Gl_D{^Rl?i;{-cbfHu^7cBm)u(~a&_EVN-N*Nz( z)D~2$%^V9xsY!C}J%5>69jokpX(wm_%!3v+4ynCL1T`19%0|svd&-o->8%M@;w~E7 z=I(Gr{*fyfvWFJTby(I-#lf)LEJVR!l7gi`u2?S!2a1r>tH7bWB{x0%ppEn$JVs{W zh?=BqAq=r5IflRVhfm8~_eIu;zU6@H2alWvUedvL@4XJG8X1~;IN%S#nqAK-uNXQT zFQkRNFCx8AsdWJ*Izs{+BFfNwiUorKIqyQfTuw8r1J0~jnS#f~3gLFH`4HZp5EOZcA{c5|A`@L2$5@Yo4dB68tJg<)C9$=V8-)IX^06DZz4n_cRw$SKI zAfwuZ%J#Os6bg`FA0Z(hNl|GZ&1S4>D+HE7kT=<;#27gSTUa=v2w1Ukp2$tua@%ILNtd6`JvOw-Vgz?}L_Y>gk|{st(?7y_ay$e4^wbo4 z>dpv;8rFfyKfOC$M_0Gl@Yt@(Y5%}ZjF&DyhS>rF#Tj-lcW8%yU8I4YmIU~766}tw zy2SfMMQn9Eu4=*W>?Ay(bxS`nV1vLgD`O5k0hHzFGIu{m=vZb- zz{iZk=kkZ_N4^8Kr^=&n=me?j5J~D0Y~YywjXx`uUKSoWIpU2AmecrInb~ZNk}r0X z?rP^7zZvs{n5|b#=9j{3Xnh;VBn&FfCuID%>3(Zfq`HYYG-sw6Wc`}nmyT`nTC#+* zs*uRp8|#HjqgGtf;h&)8FOfV`g%rSOeN0QBLW?thr;~c-ZIZzrCLAmnAKX&2vzxSj zTK%9x5lv4c%f7;L-Zrl3&^5fRX;)&#RPw3PyI!Tjy!+W^^*w#Dr-^D{2Y$f8wDfFB z`;%1?J_eW9eq*hKj7zZG7qT(Od*~!uEm1kF;Nab##R4$rhIbUm2}78_7~sAwA?vA- zn!aS2Kr-b;5Sl__GioTkPbNblfaPE6=Ef)8R2k~N>17?GsRkyLYs*X+w3R(~WmYim zj4X*tBv+_YAF(fTX4P9Pi@pMO^m&@Y2ZO?nQdYoSf&Eg+Qs0Z{jr;ZqGwKfAQN)8* zAhaU^;A%A*b$iS7!t>sh^9t-R7!}y4O~e{dSQj2qywg_}llI~XRJqQL4gz)dek&QJ z5Ws0|*kwGx367tj@t3Af175g1XAY@TZMHWLb;=B=hwk_DNVWmZpZ4#y-V|G_b2LW$ z4Fkn6^C<}FL{Q4%)0g(n&?Mn@U2o1-Mtn*>0D6H);@Ue%rgRz` zYR!sJa~$=}LI$1o_`RHv`F-P;`v+Xu2Qh>zYn|%s5dKuX84yw#5Lc$euRtzuKjgP= z^3uB64WhJ9%VfQd-YtJoN>e>X6^MSlM5y zE0xQ&Wm{LARRv)6@wE%JDTwXlju(qkRiksb4YNLC9GsJY)$JQkB&mD~(s;g0ty2yi zPxHVK=YdLq^(tLcYtk3!HFvB_Lswu-_=)?U|*AY3=$aC0wK`7Sm!=un=FEPZLY zP`O}Za=+Pdwx}Veh|lE_Vuu_N8f2}EoPLH@pR+SSY6W>(^j*!%e0sLZ{B9e zqWaULEDeW5Yl@`po(Y!thgNh1rf*ktxo%qoTgdBp0sVIOC4>c6*x5&SM8nc4Ou4^h zM%c&^NSFEq7A&kSQ7(`pNNXntGlg?-IrhH2V5089jS$ONbcn4kE4ym?e1Pxr4t8=m zR^6W9N7d7*p}rk!Tt*@S&1=R}mvfr-=5Gg-WxZx~rux&~cJP9)hAiCLKbiOth7bIE z#a6T4pIYN6x0isEsrLubvYm>}d;4p%qhiQqzOMLu6n=ETDxZ#epb~rIrbaw$vvf`F z$-t7{4~Y-tps619n%?);LFiE;P3eI%5n(yCEqZphIEAAl5_)o7@MfnP$aXz4bD@Qr z`?QWQtQgE8kOGL2c#75Wv`*^5liRaif`>0k%=Sqw-QCET3Ku2H%dlRrSL=d0!GeoY zk?w*Ug{p&le-p)uux=7C2eUa$3}X`AEDJMiP%o_+dhH7IC1f~7kbK;w)3 z+z8HDh^$UWJOT~m4G6EsM-qT8(VV04sO5G^kwgDt?9sx!k=~s~2+WZz>3Z*0DTFfH ztufgx`eU?G?`E3ND3jYc4TM5UYej9aDAfwvuMWBLGEA<3RGCTeG-dmgZyHf?!e3o(%*TWTP)`;PVBo~`6q3=*(xX)iUY=5knfF>37EHDcV)8tb7 z4@&c(C?KjeEZj%Iq`mRf8G$%4U18mz;0n_dv^+*#ZjXsb)?4}ykaNGy+zPby4c>Zl|l(FGLED|92f z&vu=H)&6|Dj0#vmf}Bn_XeNap?q6{5D-bJ-KJC#>qn~vitZIPxKjQR;lY}yh4n*Mm zC=lQ4ZAfFo$93};xpTHEZQ++1-VoLp_70H*Ev&-;KghYnV2G<41uQx|J0W-*f$bk4 zBE%euG4G>tNIe`ld80bk$)jpLnsI>>KSxj=C2(x=aoA}W^J zoeeIC+9k_h5=OL_>h_gLFm6D+a=v~0U4r3vaqp|WHd)^>v`K!-w!Al}h@+39;G1>U zuhod%LC>c;9kbsNFE)e+<=grssXCAKe6s+U2x{7yV_v4Xj2tk}{ucp(oeft%1)|m) zK73$7uDPVyrXl#oto#n5Z98{6=M$65tep;p^e#+uc=H!iK7z|mx%52Tl+r+IJ;Lql z?65l9OU{#B3XqzER<Go@A-tBsX;|sw>Lf`iIn9GQufZpO#ps2w-5x)gb+p6 z^BGpOP%k{VbILb!_2dP|_y`W!vrYlazGy9+nDFwT0-)sIp+Z6jTHn)wVGJ%GP9*9R z?s^WyV9TKtq9@@~ao9!Sa#^Uy3IP_-jf)9kvxX1WZlq4KO1LbC(ZK~UbF@FIkvfj; zI_=^gWJJGIPBTrsPr_GM?jU5Aj4604S{rj?|3!cR%JUI5tT(q=vZ?w0V7;*zDIQGZ zePq|7o83ObA)k7wyqufP#uYM*;X7vME@(;SMmU9?Z)Fck@($;`XJr_Sp^cu<$r7`u z+WLi#_%yw>QLmihsqR?o*6DPTg43_Bi_M0e2hEzpmJ%HTskA|q zqtetf(!nbPNvC^!+8oV+?Mv76MMZc@u(?I`wv*NB=#Igz)3~z>V5QD1Si;rGx$f9k zGl#lLx8m?p-Mzgl+z-ZFL%GB}l*9JxO*^45a&KFf-dhS|*lb2Y%;Cbcz&obK_rxU^ z6;*O7{O`oa4l$jLzl_Ela+wE38-BAJh552U2V;p?2<;^!)rk#{R>zLN7 z0A_%ho!lprbaIl>ZpE4Xz%)+DESYYCvIi2h^qL&jukvE+j6huBY$2T~QB+kPZgEJc z$*I)O{Ay9Cxl+jtrIVSdCyeLsGKY2PC>{osqPjFTqWx@PiK3fnyF<=@QL&>K^*ASn zd<8|h85#XQnK4QpCUo<8U}vaBMjHz6z(+^FM8I*DftKb8ePQTN&%mCmi%~t_(iqpG z1ql9*`CO5shJY?;TC53I%deO9bgmB@AA=tl_P5(gfbE`YOxSQ&sX%9nj0u0+Mq)ZT zaIvN|rjrc5$(8$YWbF)nYFKLjc<2REbkvE5HhC5lZwzOa^?~%epJ%tWKw`cxhFOVR zBZMu4^55cVuu0WGS%tZpa`F+>esV4>SoLN?-_3K{uOGullMA8~^4UwG(2vf-JIbO2 zc-*t|@MMA8XxjNpmhT=Bm06jOfaC+^ROT0}uSm?O1Ogb50a1@6$}9;)d(B~sA>)rG zb3>wEtZcz)ap=gZBh~u}JM0L9oZ)U@kptMiogbvm0;$v_5l$D{J&}(UpnD4uR=eDTNUN134erV$ zd5%J~zOjj;E18>zu6z0!RM8$(5bLqIZ+fNp7k$!&2)1(U!_JlZUsw-FWfunmW~$v5 z)Xn~r3jvD(aVKtly_x@osr>)v{{<-hzh)o)ZBT#w*6&d_{@c_jWsvvB2rAFq9DjjX zZsflyBwa^lh~EW4#NhIRipA0&zgdiy{Y?ZxvV*l$La{3({kQz@@Hag4NUsm|FF@!I zJ{6D-YV{3@;miLj7t92e%XdNn{*%s%<+nozQAI#u4TAr|#Y%pcA2#}7{Tn?% zy~mF|MTaChErtr-C+&su#}ySasie_bm(}-6wa)1Y<<}ryqE*qbKk{o8djj>p$-jO# zE&n3vIk3_)7_N<2&-tX_Epjej_#*H2hoMM>_$Hdk_SPkT;NWTsFI_wN%S~H_A<*fGn z8i=A5i@{$Amn;yED{!kJaoC?k{S)#dUNz=lUwce(hsTn9>);`G*dDZ4r^!tEB*t3U z#XWVj3rsP(64|ps`(%nxsVSExUwI9(yeD3L^0ib1b9m?l$| zPw+cVen|cXdTGHRfR)F8%F(m!CWIVDT3rP1%_#iW`<5KVPJ~kQdLPLwc&A8v$6~qv z_;Air007$YYcITF)T&VgWw|R^u$=Q+$VI4&<927!`0SeWo5mpE_9BW%W=N)x(Hab$ z;!B~sHKEuUf8T6(LHJwU#)D5wDVI5)$z(+AusHM^l*@+wLdyZNp3u3N$xpMO(Z8`y z6lqAoKIbvb1?H`Rqp{pTXvw+Z#O-XUWXP$>Nc|3(()j)xVAhb5&(zTPm9_L52+$b3 z!DZh*;^nDd!QFi%s_w4p9E(+@PfNG*2y5m@X?4^NE;?P>vt)%XNtXTD=)Uap8jPo0 zX}x}ZMhjZ?I(&)71-+eL(fte|pnisG{LIKyqfXo8WW{k5;t9+EC5>KO=3!UJ3mR{I zUKWkTg4Aoy2dt2G5Rz0COP20^WKam?j1`1}7vGf@s92r<-c^WwML_J<(QUGr*wl(; zxdvM19&oD9a98w_#GGZBh-a&cB0b4Y#0f7~u6kY@*p{EaTx|7UDF>}T>2G$ro@EgT z(}Jj=&X_&0e~cPcNd72lPSeFwl(_8HaR#KKM_rJ0vq2_i{!br!c#SHN1XYV=Us@co z+S|>C6kq5G+xGda)b&Eb29QSI?8(QlH`SWS5 zo3vhyL%oOU$*j9+*`DeOTlR2`k*{P`M*~be{&M|A*?tC(D~QUH*b=qD#J61tkaRk^ z4*qPTyl#|P!9^*XMN=%BJ?|@Tb3Ru40FOO%Pe^%S^>Dch>mpLD6|;7Ee_=Eh93FjR zW0(1R80;bR*LT*r+JVTTe_K!n*?|^xi+Ba2fdkYb5S+*v%Xnq_mFF^~Pm?t}3a&h6 z@5{F3Vh$)ho4hQov`*(Cx#J$=?64FgChu~hvC8_`2T;Nk)r|AYOdi*)(*pe<$EBWT zG<-GMU6S%0dNd!WBWaxsI$I1Ji{3AH%t>1bDaM)B`A<62Mo!(VPAgX&HzPH~=X? z;>qE62P?E1$v~RQN2qAqWp#3k1S$u&Y-@}ej#W*Hab~Z5eI5_co`VP`0cXvIx*T|8 z6K2HT5sbCj>!TElsW>CEVV~U>!Sq!jpIcMxotNM#`CF6J#q2NI}dA=x)itXzE<2IloknS68L;q~5) z6jqx*2X!mS#9>VQ!zTSHLyc8wWyQ59HAjSVn!H0nSpMOevtk<)b%^@+1OGe8}Wd#VsD@?2r7LX0ss~_5_dx z0@XOAcCfgM@a3W0zSlmq`*SfWA$yI7wSM{O8%H zw@I76b!5UqozzXu<8mSv?TQw}<8Y}LTm1 zdY*?-S@;wv+_IGpro9StujJ6lfe`M6z2mhVshwLjxhnP&H8sh06^RAk0RRWM$X-?F zYXfbZyn`x(4pCKqOw|>;<&*DxC3OEij{F%$Ln@9M<62U1gW0;Z93?NB*XFj*QtQIdCXJZ$AkG zK;N`#mU=l+?bZQlN|UfpOx9J`-!8-&${ivzI_(6+wm+{ht5oBZd<{(qJImVS;ucLS zA^IXZB(RK)zpOCDZ1d9U{h4!j3+RI=Kcs#-W%M0BB|_3~Kg?HUfg&u9U-miJ zk!Ty~Nk4otW-9kc1r7Wm)_QCd&rCGp4eB*kmFBLeK$Y;BclhJ$oMIG%U_N9-l9?b8 zC4QZ?P8O%LCK6%2Zn;rT@ZO7V+zp;K<)4*-n={O0-qj&cN*DHbx)$MgsDmEVu53+F z#2K%k{N?x-u5Yj2At$YOryN#Kb#ZhWip;8+Qpt=`z$W|SkT=JJc<V6=YN#u7{uzxl-EF9g{obyQmNZ+ zy1LEzXZ6MZ<@X9CgZ-T@FKC{m52M}jT-6K28Krc;-p_q8K4M?)Xf;A>uvxKDebL<= z&gJ!Z{>-_eh?&(aawT_cLZ@j3e`Y1;){XfI8JZAWr_L};ILy#93gIL7b+eQYsjn_R z#YZS2Y3DWW<`WZqZYK~*&Y9n6^fQ{A!_m0$OsgOdkov$aE?n~UpK~%C(4r0j_Kz%m z0}AGN+4_l}g^m?8dVc4)!djG)%07KN(Sdfu5W#(AOTZv1nDk_nR1%PvFM|uGQiLpA zekXte28Id=uZRJKArBBog%a^?r3#oXz!oUDliklee(P{#PacbtIu!?(NI7%Ab+NSt zeMlv9wf!u!N}4$)F*qm$i$K#u7v~F(#?on`39ag&^r(bmp<>11SeB4j6ag)9F0tZ= zApe6T-}FxlS6Y%*`?oXJQ6E-m)GDbQUsGraeOL~{%gt!4=`&kyV`J6`Lmqr3ho6HZ zX8Xz`p@*~iveSx-tx~6ZnrE794{CSddQD{rW9ic zo(VZZf&Sx5zI%qeaCb&C~v07 zw=^1$)2g8O+xF~HXto!a30MVNZtT482F`0C#*jeu=pxcbX|h3uX|bsSYB&A)S6l=#9Fv|#=Hogo zpEo=08aF57t*@@;XUnqCC|>X8S`*{h(1jZ{x%tnTJW+i6*6pgnalbT1q!@3p$N750 z#Osv@v^fdoGIBaS4$qIp^+~CFSd1s%vHBO+X;-Y+e&4KW$5Tsh)k*5Bmuu9j(pa4G za&I&8bkDW5CAN{JR<6#GemXY#3K>wilhyihaKoPwW)FV4=X#+{RWl(t26BuH;KIbd^B29pHHA$gMQ8b z2n9^Ysj^p=J=qFna37`sI`8|`Q z@4sTx;Qng+-!}h!)$dzdM)?tQF0??8KKze^7-C&0nHV9n%I5z(|F`S><8p0FNQ9~8 zc^>_7+yDDu!58pv`vqSLiUj{Y|8Ki(Xo5U%y#5r-(dYj@NYsxyIQl^GFDw6hT_c0| zcZV4Xhr}0*D*OLD!(f?!!h5KHyG^AdB+F0TFFu(<`V8x!QwH5&3V5eD-0sZ>0>`xD z{x%K^3V3Qiivj0{627s*-&IqC^#@L&svSu0e*V)x{(4AQ@Kk!*t3*7rXy$(y>|X}> zebrYiFm)%E!li-KZy$b}^FNQi_oo#A8IV?B4A1C)*ytcIcF8jMt5 zUA8MW72@04;AapO{s#g1$^hpE}1{TATdhWw?(35zO%Oz}+*^zkkdw5rs*3 zj|R+I_8)bR4}Lc^6nsUpHy)nBN6FUMoU!+qVd;%p@a<_l3-#%mI&OwiMAm>K(E83k zi}g_c#!g1DU?@g&OPfrTZJvysj6S^24KSI(vMBHBKpM9 zD?&ojAKukhsV+56RpV%gpDG0?0g`Yyga-vs#a-LM!eU0O3kf8d6SI&F>MhD}{Bbio zo!{Q;y!x{H3)Er!Ed$=I_KgR&Wq`Zq&IMy(YwF7qU_Wi>#>HLjQ-LubvJhdCJfw{PRY zr1-vv#xb4wW4i)4a3|_O_*Cyp8~_7lH6G^EJD9P`3KFiy@ODqG2G?*rZlr}zg@;cI zk#zcG8Uk)+>?|z-Q>+LP(NXp`y-;Jne2QB?G^!vV9@_t0bOX*hsmcyKnXmoT6gm7=R6F8PST_nOJ?Yk#xnXxBaP~W$3&9bE1tA68NjAIwzjB0W1 z@(Auvy1A&XcFcKs5b%S;M5R{NKJMx7r;*2)&oNIXr}Au4=g?<{Lvq%UYs(+Q{;PX8 zW&L(Yk3(z63Q#}mniKFFejP$L?5&rMHsbi9cWjL-!uCdcyNgJo)#!iBKP4AAGg*cf znUW$;bxTY{N^G|Y7kr7t&S0aZyj`~^Z=;8XbOf!_S=~@8Ff{QVo3X{6nc=-9CZW&* zWBf-7WO4ch#)0eD(Jl)kOWH);Uc*9w8YTa2bnjU7V$+3!jfc~OkX4-JCR@@cA`vt3 zcszd8M)Yczmqy%o^vW+^XIK&maT4h0Rl^(%lWF!y_)v-XIQ%8PR1^E8HI?R%hWyoB z#!jHu=+HioRpsxV+{6hZ-)sg%*fQ6zsA2C#tMN%(JW}n|bbOZtA5wd2g$gS}H7MqS z0&-wd18shJenY@M_pr589%>^R6z%&hwLH|Hf_L--HItm*lY)eMtm$hPc^Nkgbs4&X znI*`0BKdj@30*2AP%x98WysYg8GJ(ZIETm(Y@`p^8sSY`Ug&RuopaR3FtIJug?%)7 z8pjTaeE~aSe%U=ps)UL9o0kJAJb%6AV*NqwK?&hp)QkaYp$!_+u46MYwh)7wr5X|A z6P6k^2CSv(58)iZsy*7ZXOi9FE1OiA=tL;c_}>6wGW!!QC1!7j+j z*i}2gE8XG|{UT_m7udp>6W;1QQ}vS%MIgbRZ>P9$Q#^6`f<)A1TXbl$eY;R^?^2@A)f zWdwW*{h$EHIj@?V7mDd`nWj*rRBQI@5d1;GJIIX+_d>qbzG9K9ky!Nbo%XMNRhVS< zlhdLYnph>kN7v9<0Hdk|N-21-PGulPmKdq={6DI&N&>Q7%L(U<-S6Ir4NfiT6l*_P zl-K4QxOin2@v?F^nDE|}bs$(?+@{B%ekiKzj@w;@sSWtf63kEtkVmdF|50VqOB)>$ zi8Sh_$T>7o^B_DmZ)F9n?-ft<8&^^5JR01{6g_){vl@C+lZvwj5Qo-8yi|1X9}`yo z7NM@D%EDHLPl#;4=Q|(4VFDT({WeA)WRJ5u{GVo~nv@4Kj>wULrEo;^fd#uoE5bDV z3Zm2R(D!}jJg5%PiE|;obE?lBHPv=ozsMb{h^zal)8JOx)M7L9F^8UTK&Ul!CWW`- z*sIR*E1y~W^HzYt%b}_Pl&8&D$p0D@iK2-0={UavgrproCH~u_u~rdCtFug(wr>x0 zi=X&JG|j1cIs5I&YTYq~YyB=0h09i{Rp&G-`@mB22gErA-(9^f;>H5>F9?S*6&n1m zj{`kdI8X@SST;2fH^%3i64evSNao>ALkBX)f}vM*_yr@YT>pEAf=d)yA`oqIiPLOI2Kckgfi7aHWsg-K%ex{CmhlZ?OFL~pA@cqH)%W^1HAtZ$f8@G11 z8*lhfI~-1LR-iCv=MHcw&<<*SynWeII{Hzh`@7ps7P^#dBTNw7b1)ecwqG-bdEc=% zF%!#t{^Y{!lK;8Zfz^UWAm%P1r+LE zkSm7D@wo-?Jl)x}E!3{raiXKFqjvWcYr*y*?`Y}^ewMuMt4C>&ll8W5`&@XiB=(Qy zRcq8Ta%AP9+>Ui`qaMj$-S!tx+wVHwcypT0TV}pv3}>rkfpxc@sgCBx`3K+qLIq-L zBSkSXMtn<1n0GCz>S(c8b-V~z9N7)rM!?jFwz3!D+G_d7l2~ce zDq|;`^hZ{AaoH@$2fs+KD{FVNXHR=5aNu&|n_nC6FPVp3UvVaZAyi@kL_)OkGrO8) zSAkK?c!{75-xJ8wC^b)fH{A{-=yju34k6*0OEhrNoBRf7v)Z7u_I7UMo4)7T4B8YZ_lW81BXf&%~~i89@ym#A#_ zdaGs)iUv$={*Y3VwDe4n#8{xVr5P^x()`VF$ej7XZCw|ostp|qT@`PNmIvz)zPO4{ z^4Kww&|>vj8{L|R=(uiZV!A%&#~Dt&77{Mk)mR-}AEL(V62e9A19thZ1w?0Y&2O1W z^XRT4SMr>UT`l*gq)lQ|W}?4Hy9yv`vZFCIpo7z^@MM{m@}ksv^stWRmn(lvMhgXu zBz~Wi#58%^pK=KEy&fGGVS7iuDAcmtR-!}Ir-usP?WD@Mxx*7*^`jWKH)6;ZYzV(n?<4{Em#;G zX|te0z&a}_r&tfAR2T7}=;Bvz>+>}<;n4nj)$w3@ljx`6f7f)rx@5Me0-;3@k zz?F);iEX3Ye+tdx70Wn%m)vPZUrYX=HzxT*+v4s1F20b(=^nS#I}M}at$FWBD%oaY zIIrJ#uis@bkAjM*>|=JZe5F- z+6o%l>h?tH{$+1;f}f=s~U znAZu>7!nNRM>)GQEylkoP48xAf<7!x=4))wApY!bl%SV`EgPFO|8?)9M&@jNHBgVb^+yM5aa-8byelDX4xMt~#nYW{t7ulI zS3@X9#5jqZSIb>^`t(6vd#%NHOJl2B`2HO%{nc8{Ca>ENOJzg-D{jS3FBfXCk;V5- zU1OT|f@?3#veYD%GPZEZ`^Txqq(1AByZ{iaNI}m{VU)`~MyM@VQo!ET?)1DbLLBY8 zL9j);+mNhH+DUIIBCe$lM~j6J=Z$lt>(}9KqX=YuIBaEbCf(6szvzG^^IFS6W7#-@ zxZL}=Ky`r`7%z%U*1*T;c=K|oW45!f7e1U>*lRo!UDxyEtG@*dC%WQr-zODJ&M_$H`g$raWHw$1$C%d7iI97D_T567^1AU)Jk0BlO zYE>B6e7(PCPtH5OB3t+s;s_)ChBZt*7$(Vy5_}&uA0s6{(~yEp=^8*^%VL%^xI0F^ zJHoirl~NPmD&{(rfu3a91}R&mO)gd>owVOzE%1#-$L0A7(NThyJ3k>?0B5bMj`Q>) zK#|i{wZ7`}n(cAp4>FO9@x^n|4`JiG1SjIATP1k-JTB5Sfpp{dUbTAlFPFXShbf@o z<)aF(MsPrEWX`+}8l+nN;-UeGFJrQN+Z`9?2>BT2U2?P$UlQ=)a%hARQN9>)%3c1$jpZ8A?9G6|aR~MmDF3@Y?YfKP&9=`3xo_!E+dT*HB zXcxpy2Ct4(u!6B*PTDxpQ+ov|J})J2ycd$-;_u($Nb8YRuj2jBP%SUnP&> z0&aJ}hs0qw2kAw$Qbg-V7$;US5y8>uu%}K`1Om)W4yPwgc-#?^7=LM&%2#6u^UYG@ zQp7-5-U<=Bw%QI70?v_KTZL+Ehw}&*u@j&L>?pe0Yg9{_SVMKu7M5yEgUpi$`W_kN z+)N7#>IgJTBsbN)2~I}_6)`Kr6QZEUY2mlFDd9@bdVM+RI5zS=Aqo$G&v%gVWfiB} zSAEBb!$V__zuy2OVymV zU^8=!dB@{Tkn1}!){4Nx`aAD!XxWYwqEl{<^1X?VCQzfTR%ViLEJuwwBCuWhkzF=# z^gN~Y>Xy z$<}R;vu zQ%CM!Z|!hLUsCOGT~5|24lA_lzX6>rW!!g{bOl|J=xO=+wHi-K4m^;Z z_Ny*$*x$Z$9j+BFU!Uinn4GqCa(BAh0^jrRS{ZxlWe5YJD({dn=@UQ2Ih&%TkDK8>?v6F>p)?`YCmN ziPump`A1ODsf^Vdc~g9}yII>ZF9_hQMhw(*msxgl6U-E9{aBmn%4(5!%W6AEp{wV` z@-@7Lq9NFNx2I$4MdPG_YmPal!BaZ4zVUZ5r_A&xJ&y-gl{F0tX1ceGPX0wKL;H1z zvF#^9Uk8GZw&@EZYPCKP@7+%$Hf6CTV~J?5gRXNqXO+$%8j+_Jb_WfRjMAF#7H zn=bh4LQM9n<1W_+WE9`B25LPg2qw?P{NSzD(Y;pH<%i{j<$>nZd?-Kh5uoGJR`y{#wPmrmR!tuR-vuOGIbB23&z zpM&qu9K;QsIl_kyX3Uhi?g<{wx8ziCm&Vs#-os?0)BBD-&0JM(p2@kMeK_OYbCkz^ zOSM8iqvq{gzU;r_yx6jB+ueKWqzXYIex0H(y%?W&K5_8a~TUIU573<`6VMLl=l9EZ2gWRH*v!2n1J6RYz`H~lXH zBAJ4&&0OECzl)amf{#1DBb{06X(E724EB~M$*i?or8db-s~hHv0NWA^-voM06^v>W zJ95F10_bKSB`}uf?V`{af0}z+NH%)JO%q}PH0)#mA|L1DigQXzLL$m{wU+wYXHzMxv0YNH_yy7_>O0;4WH}2K}fK@3IK7!-s{Agx09$E z5K%#x;m1}b5hgcom|}duBCFTeAhkE6i_tAhcD-S)@`gICxZQH`0JutQPFWA@n%EZ{ zY1^4fS@ix&neWr>ghLl^(d$^M3srgZsky`wZ%MUrWo;=AUR4%BYU7ZM9#0`wIME*} zRadZT#MPR{d|+V43nY`V9VWJB9&|togj+3cey=E-5tXXPcw0r$NFg8)b4#Vh9~PC_0R9lf@bLgC^UGX%i?n&5~B zDR*G2g4|wfej)5K7v#%{RITY4O|C(ut0_g(gNUdwQL2V1)hq<}#s3K+Bi-B|RWW(z zjvYItd(Yl#&AN8%*riC$1t<#Ma{TxSY1z7+ip8jK!wpgt#N(%5Wtz#EFxNjCj|R(08k510*ZB0i!uCLN9Ak$EEo1dFOG9|6Za;wa@LQ$G9Z^@z=id4teM8HzixPY%V3j9fRboliLTOAe=biBBH(fz97q2 ztdbG0zal-lb&;80e%r}Opcnnal0<^adobGk>X1RmYF|K}?bR3kzS8>?>N~HCu4L>xVq5!;n)1O% zpL(#N@=BaJn9!_g6Y1H#Yod4t2V+pQL~#{Ir%$+f%T^U9cH3>$6`5dt=g(hIKPQDD zT~1?;#^|cl`oMvMa?Bm)7|1=%gLxnJANZ2AZ2piu-T6sbuxPP7_GA~;cb6{3D(mlQ zM@$ym>1wa**>yfo&PK7%yFb@QvS!I5yvGV+Isp=8f2DlcvWmFqL6)CkQRI1l|6`u2 zf&(lZtf_e8jh-qw7;;~ud!*abos=8f$Wfy`zd74UQq2AVEa^PR0u;Qojk=6CK*#CHK8exfOv+O82o(Pa_sA&2VK`F#HAX9+hI?A0irG~#EiFML zN1rb~CPlpx5c~z#EV`RNV;QrDTu2@{OoQGoa#?Nt#S?^EDwI+(X&UqV8%g7%O%gs=dbsW!7DAlil z3~5&vT5Kp~^SKD*TfZJw?oK42#F~~Jf2D%k>edE*+`yrP^Z4o@oq_b7zvoP^e9>UFN)bCAdThb*M!mluUUV#DUKdg$W#|Vz7k+ zDzuo`ASFyVp`We$PhOHwkTG&p%e+3%N_R!N6jGRkr0R>Cgzm5X14v@xUHRurjwzD# z{qx1-_L43F(;XH)?aJz1zUc$^OMd*Rw(a<&+zDlD{Mffuk$diHAa!fsAx$4>BAMYD z$oL&5D_8{L;Ix={*T?Ta|2kWdnbr@tfIFG%Pq;gN7)2E=z zEa~_}XZdTv!i1`ezb^#uCQck0h9otx`q@VgN>)2o+;>d;viKGT5;yI$Pb5p0%pd|y zrQ*%E%D3PBD7~I_S+Vp1R3a#kb!aCgiWgH@(4~(Sp`_%$|29fBkiq*JHB?1wRKHaz z?g<^&t;MW8a;SP*M2{7KFWf_sKUir^^lH%c(MDN|%yffBQXN z`48V8FWgU|<%G&n$98QbIyxHH{;^c4TuJre{rBCguK({P%Y@{Wk~>!}`C!7kst@Yd zts{5TyBqb-LBHQGpTcrd2gGmWuvb;t2bwg7<*1(8=EtA&=YuF7ka@rTEcx^2Q~Qq| zJqCh24aDmSPdRSn%o#3JuzDdzhv(+DmV9~h$QM&5!Slm4-plx>TDRXOqec!>Mc9^F zb?#Cmb=Ye!>%(Nqq>m&5M7dhkTcj~8Y(1WNTJ;|b(0ahpbZ^NH0_$uCNtaW6VM&5a zx3EbcPsBg~^qQs*NbLp9{Vo@zK@~%W;s14?mt151SFMzv{t) z&rM=@wgk~70im^p0}`~L@Ejzf-ia~%#W10Eb6+6$w625K!uJNHSkWSi2=aL^T*&o~ z)8ztLGhThoSD}T(LCv>;{WA2~IeUh3sk!tsYxKDe6{BnSy zr2Buz-@2Uj@*T{!a$tehB9a*nD)7F0W$-|`@rLs1-ARH*%M9bQC?q zRvnP`c=0lQ;Ud+~{fEtj1xGMqN#AlUA7jLmM2GG)t=$>7e*WiVygGE*pO>AGj-Tw2 zZm=FyE)=5TP#B9+qeQ4D#z&W3h{OYwESW?q;*Wlf`#bmDgAsN9`7yb$8aBl1016_}RtJD7D7v+r>d6i{l z?uOF}!%)Y0bP%;p91%-}5TbK1Ty*@IpZAf1I~!b~DBjAkPUNul@Oi}NxXeNNJnVDg z?{D9!6J+3zc}Pak2idZ!g1j(3rxz6G{!{m16yJ4Ul^(E34&xr8M8l;@C*pWz#(^YJ z(U!Opxw6W-6KABj*I)rF&}EXW>{=EG4vCu|1*4vV$t0CeWKu*0mI7!>h{;(AceSaK z>zQtHe#;iwKk^O9)AL!$#F!m75xUaLW2~E&yWn<9qQhs8WS-LYLzf!{xA_b(evm|n z*1Cj5#|H7Yl8Cp;0*mn9Ai~KJW?&OMs+#1mu18^A_C<7jovTs9yJa^1U>Q7q7}g6- z1pLXA#Zhbu-9%QeL97*I`;MJzy?V_WkDD4te>DMpq*Y7*h|Z;pFeShuuWx4DFP8-S zd`LNr=#u16xaLUa&Yin1D_t9;j(`4CEJ*lBS{}InKBZh-R@iGtD-b$nVWaET-B9#6 znyQJ3S5YZjrVK0*^FVIK%V8)gR0KI1o{*4ek*GQAdg#F>!bJ;}A5C(&@78L!$d@y| zRzFiQKb!xoygUAVbsolH@m#0*T)Bl-9^D5VMdtbF!a{c=x*pZ5TU!@ZiY7^l4%S}1 z+SRvo0n@U1>sECfkM~v#bs6tDlEaZ_lDj7#>)es6yzs;Ge zlvZ8G%{Nuno01fscQ>e~?nf%Ve0Hd)lC-hiPVpB*=P};ff!x#jSdwBbw2V?{kx35G zp~a=@%{QrgPuG3VUG@DYIwVJYJ{E$|lrLXaS(e@$@wzS= zoy`-tB}IsiH_6e72fPy$q!!uH7sIu-G;oBnFJfhw*7~Ckt``!2X8<-WB=n1?wT3e7 z?}W(*<9!&D&~y4er8vLxa9(vxJtRKx7P_m^eIo*{KNH&(kUDQ{Q}H=;0UD2361oJ9 zf(66%SKxhg+;=J|I@msLZ{k~Yr_;#@yi3wrMd4`vc#sp4FvbTYSLoNzWCV3lZ)3P3 z^+4RsTfZEZ1#m~xdfkYh4k;@D6Gyax(sRTU1iejPvOh_Zq*-mL~=K8U$LvSGh#P-6%(*iD61wD&!6Rv>?lPb+1t zA|=JnkwauJ;yL!ls0l-@Qa2#ouuG{)u${coL2}MOiH^YtU{u>S5?Y~xT4K*aVH-0B z#OE2wj8V|!2%iHjPKzFn9UF1yw|xpVnEWr&vrAgena{skz>T7s;&YflPK-lHU*wAj&#vu*o! z89wx7U6p6;FNTR(D+9Tw^@*L&Ru=btNW!bd=~+YQ7zHqx)N?cYP?xR)p$8Sl3#wlwn-G#Z((%G#`C;92SuaQpwefj=}pOwXv zR!_RwemU(k5BW}wcQ)ImCGKw5d#+%u35_!*$2(?+$qBB8Fvyxhw>8cwL(<_bUpRtf z>2FTUO+s>C#T>#+YCrZUD%0TJ(-=PRlr_8KUVP5uI#1pMKXsw`!?z>ouo~Hj-`_(lwD1B(NJZdy~4isq0S5FsK zbL=cY7Onx9GpJz&*T^)<359V7d&ZWNRp_T|!SR(c#zj#EfxP^OOizQo(q+$Z_gPb$a_QTq9di82KJZX|dKGNEiU5$UFxJbtA1Y=bH z&^zzAQctJC1uN?bpro7Hv?mLyg{i||#Iu%Iat)@1*22M=)3D~mFjXVQ+zU$WIYKKppQBg zO)3PlW_&JReKA$}%+rl4F`@ECqHvTXSZzvnArj0i0>^XJYMx_os<{E#vMALOl_?#1iZgVQYn0|XrU$3AK|-#gv3dVXq^c}>fBjNS)_mZ z~DV=^`07<}I~(Z5qR+yX$5=Zsvi8LE24ApC_TZ{MqMEJuf4WnQkS# zURs;BD_5_Js(K!;gN$`u8e_foZpQ^EffU_#D!Mb=sf#aqS z?c6Rlug*Lm)0Z4q?lqxMEDs^BfL1oAbb+hYZlsl_Fpcq_hykK#&ylx~F8l4-M^rF1 z`GH?q6SRsY)pyI`&cy^22cYETMHV(a60S~?fj+7Y+${azrly4?CJNcRXzirX zwWw_G|0F*aCS@4mwvOrY-s`3aIPZixd7RSiqdk)QX=MFEVu@Nqwy3#bXgwmvoGV7V z!m(d?Zhw<z(uxfDIsOc$$?R>7k^4$12aa_U99G?ZSo5ep71#=)qkVhhvXiK z2~+0}MGE)-!J1lg_*SL7w}<7MmK_TN7Sk3T6(;%kWWiB+`Ro0ORpt5Zp__B4w%jp% zo6KB(Ld~@^;v1Kx)l(>m4Q+*?^21|j)Ap6t@ipTj(5 zo`$80@l?zN#mQisHg6p-<6Etow<~3d{YPb#ZLHt$e(BodIZqu|veBY>Q+2;F83HX( zeQ^JgOfuO*N(spnU4V34LsIoNYx`i}R@r?3F}5|Y#|-Fhfu!a1UN zkJBB}m-vI{9d69u8TXz%gg7Bzk`#s&ZoENVJ2TiZPNj3#?w+awxu-n`VWTTD&GC8V z^31cn6=5w8qC=|(Nx|f4(}fw-_}mY9b*S@DpW6>U&{(-b_UzLS)|VB+ENVR7Dg4eD zLSMo`2wIzH5vlKX#pFAP6O+=?%y_3Ok%wD0R|5fjHrTel0|u$AgGqT=Cv~n?Z8`|E zS~hCZOp#BLJ(A-R#fz&$nT&z+e>H3QNW7|k{P`E*v8~&7Q1>>89fij;IR<}g)TF5} z?&(`3H6h94`-Rt^;_vKuEsOgSXK#+%7w{J2PJG3lknGfXWlO+yIu95)eZO#V$~KJn zQ;G5x?x%7`Ve$bgSKh*TS*UP%3*FD0buu+{i?>}$1Kq2&+k+sxNzQ=LXlZy zGvaH!+wi~}+Y>r8;C=qo!*e%+@`wL77jEhplps#T`5nynqtop2Kw29r^xdek$uVxp8$*^Ef=L&-a3WU5m^jP$(|27Q zu??*6dKTi9Tyf%`+`6vC@j-R(lUSyio&hD|AgmsLESA_4$Hi*(q*zsJi4_fnF;m_xhsUb-Z>b`IV{Jr$19f9+|ky6Ehfz z!Igy`y zEx)&dvcwd_2?-StCKGt3XCFlpS%xFz<8VHIAt`tdKyP}j}?aSGe_&gfCj>Mq-Mp4Yjp3-Vsz z2s9NsDr3BN1`m8esN_?0`*eBz-Pxb~Eup&gG0e{P+OUzzwQu7(S4^4yEur=82x?~= zn9^EL6w5fSZe2PjR9x+2T+tp7G?IBr-P(7m%wgP)YjfOF><^y2FBzA^Hqb)qEkSM& zlESce&mJH+OMt-$RRt4GFpiGx3gn*F=jS`34H7PNLke;dB@&4D-W{W|rLoMYk;A3K z6P;8BHGY4*V|$hHPvgLS&TW3DFz$!XA;|=jFYr81J>D^18gvp!XIgF7k#V2IjIlN( z&9stI*cX)AchpkrKt6OkY5hHfI5tj{%v9Zcp6Rm6bLkkJKW~V{mDk4Cl^C*-m9b0u3Pi;<>0TEa=C`yVH%p35P*$4ND z{nc-bb~sLwC%afpT8LGnP`&@>lj^l3m6i zCM29UtUv@36kAUOg@NfQ&|%0vXwP--r_ZXXBIwT{UY=rR+i(6WFIM2tzcfDYxHAeUgeXM7KZ zE=@ISH&7$Q6DPbAuP*j8U6ih*eeo)CeBt@%R+CzjU{WC`kqWfUoO>iPl4;m|Ka`xi zg_cpeZSorED&`$);<(hyHd6}Eq%dULIKi&3d)D83T3-M$J$dq!Pxrh72M?;tCF&kYN5R<9U5))z&K-E^B+g^MFrBQl1g5`x?mR5V+Fss>t^z|D>;kk!p_imnc?D#Z)m98{V)F7MBGj-308sXz63mmyIJl;p6E zgp_UXx}D#z#5F3V@JtHBq}moJ{=_t~wqNP9#p_52cMclRt+5^w4qPko)wblc#}|oV zok=~<>;um82dDRvftaGTU9@Tw*o2 zSAw4GB~}I86IsyLMp6|4&QC*Y9y3!dUA|mpZF>Z`N(_q>g5S2gwbRT(&$ccr7I8&W0}QU4{a} zT#P}$AYc%k5ICB037R^XD+)KVR%$XNkRLIo#Qhc;ggC{59dB z*I0=HNLYZN<;u6ht*r#??63snghePjTz?!146dtANGlh64sL13VU0U_1aVi0!2)>| zWYe=(pf8UijlHKIF)~YnQ@n&&weA+{t_NI62LhZ^CaqkyX@^0;AYc$M2w>dNBqK1l zfkD6^kkJrGQKF;4IeQw)@nW&({vh^0D+N>CBaQsdAH0zW_0Ir0b`^Y|dT;#+tk>i+3VE1-RrUA>`nq^r3i~Vqq z)#Qd}4+B%pi#{t>(q)~i-B7FoMGQer*`6`A83YUh27zmWfFU~91a@;$gMdLGH4#V^ zqN5v(3@HuBW_0*a<4<7M3*QmR@n+7r7`(mQ7F?6X zT#+6t8;DP6m{F>|VDsxMNe!%Roae|sjSOFmn*zD9_wG_j1%&CL*Z%`64)#0AC@V*X zI*CRf2#62K4`Y22NhO<>AW7A>kkBCQvoJH-8CcL@SyPj^ zg4oyxG2hQFSZtV3VB01ToGri}oT&H97SF?{CL`JsNaFJRnvSF>*`!Vru3`oxr zxKW*gyHrBTH|t_{ue>?o@>UcsQ-xt=D1`3?#mbWpsg1Fp#0T@F=*8kT74s}v)7*SM zzMx5RKdfYO0R)9^XIQxQEN5V`QON^e?obMhpT50fT@+AVVWyh)#y?UUP8_!y=^1XRR1ReH!GV8 zJp<-XfcTs`jRoZAEC_-x%a3=RRPM7wxsRY?57L7POf61gnjKaky1_B=fm&yC#RJhD zPR;p4@&guy2_%IX#nhSvh!tYtS}*#XBruHNS44)J?rqGXjj6P;xYr~z%FPUx8CYUO z#m*pp25~fM`y{UQ#8HqM&QF8|Go;H=T>E*}@2AlHFAUd^4J3~DJ0P|Vh}J07z7Lg*DV772ls00w*hZ^f&c&j07*qoM6N<$f|Ya Date: Fri, 27 Nov 2020 09:48:20 -0500 Subject: [PATCH 0022/1543] Create euclidean_distance.py (#3350) * Create distance_formula.py * Remove whitespace * Update distance_formula.py * Update distance_formula.py * Update distance_formula.py * Generalize * Grammar mistake * Rename distance_formula.py to euclidean_distance.py * Update euclidean_distance.py * v1 - > v2 * Update euclidean_distance.py * Update euclidean_distance.py * Update euclidean_distance.py * Update euclidean_distance.py * Update euclidean_distance.py * Update euclidean_distance.py * Update maths/euclidean_distance.py Co-authored-by: Christian Clauss * Update euclidean_distance.py * Update euclidean_distance.py * Update euclidean_distance.py * Update euclidean_distance.py * Update euclidean_distance.py * Update euclidean_distance.py * Update euclidean_distance.py * Update euclidean_distance.py * Update euclidean_distance.py * Update euclidean_distance.py * Update euclidean_distance.py * Update euclidean_distance.py * Update euclidean_distance.py * Update euclidean_distance.py * Update euclidean_distance.py * Update euclidean_distance.py * Update euclidean_distance.py * Update euclidean_distance.py * Update euclidean_distance.py * Update maths/euclidean_distance.py Co-authored-by: Christian Clauss Co-authored-by: Christian Clauss --- maths/euclidean_distance.py | 62 +++++++++++++++++++++++++++++++++++++ 1 file changed, 62 insertions(+) create mode 100644 maths/euclidean_distance.py diff --git a/maths/euclidean_distance.py b/maths/euclidean_distance.py new file mode 100644 index 000000000000..6e0da6370219 --- /dev/null +++ b/maths/euclidean_distance.py @@ -0,0 +1,62 @@ +from typing import Iterable, Union + +import numpy as np + +Vector = Union[Iterable[float], Iterable[int], np.ndarray] +VectorOut = Union[np.float64, int, float] + + +def euclidean_distance(vector_1: Vector, vector_2: Vector) -> VectorOut: + """ + Calculate the distance between the two endpoints of two vectors. + A vector is defined as a list, tuple, or numpy 1D array. + >>> euclidean_distance((0, 0), (2, 2)) + 2.8284271247461903 + >>> euclidean_distance(np.array([0, 0, 0]), np.array([2, 2, 2])) + 3.4641016151377544 + >>> euclidean_distance(np.array([1, 2, 3, 4]), np.array([5, 6, 7, 8])) + 8.0 + >>> euclidean_distance([1, 2, 3, 4], [5, 6, 7, 8]) + 8.0 + """ + return np.sqrt(np.sum((np.asarray(vector_1) - np.asarray(vector_2)) ** 2)) + + +def euclidean_distance_no_np(vector_1: Vector, vector_2: Vector) -> VectorOut: + """ + Calculate the distance between the two endpoints of two vectors without numpy. + A vector is defined as a list, tuple, or numpy 1D array. + >>> euclidean_distance_no_np((0, 0), (2, 2)) + 2.8284271247461903 + >>> euclidean_distance_no_np([1, 2, 3, 4], [5, 6, 7, 8]) + 8.0 + """ + return sum((v1 - v2) ** 2 for v1, v2 in zip(vector_1, vector_2)) ** (1 / 2) + + +if __name__ == "__main__": + + def benchmark() -> None: + """ + Benchmarks + """ + from timeit import timeit + + print("Without Numpy") + print( + timeit( + "euclidean_distance_no_np([1, 2, 3], [4, 5, 6])", + number=10000, + globals=globals(), + ) + ) + print("With Numpy") + print( + timeit( + "euclidean_distance([1, 2, 3], [4, 5, 6])", + number=10000, + globals=globals(), + ) + ) + + benchmark() From 9333d2f0174d76f36ff27961f18d8110d67cbc67 Mon Sep 17 00:00:00 2001 From: Anubhav Solanki <55892162+AnubhavSolanki@users.noreply.github.com> Date: Fri, 27 Nov 2020 21:33:17 +0530 Subject: [PATCH 0023/1543] Create weight_conversion.py (#3964) * Create weight_conversion.py * Update weight_conversion.py Co-authored-by: Dhruv Manilawala --- conversions/weight_conversion.py | 287 +++++++++++++++++++++++++++++++ 1 file changed, 287 insertions(+) create mode 100644 conversions/weight_conversion.py diff --git a/conversions/weight_conversion.py b/conversions/weight_conversion.py new file mode 100644 index 000000000000..85515f2f6f88 --- /dev/null +++ b/conversions/weight_conversion.py @@ -0,0 +1,287 @@ +""" +Conversion of weight units. + +__author__ = "Anubhav Solanki" +__license__ = "MIT" +__version__ = "1.0.0" +__maintainer__ = "Anubhav Solanki" +__email__ = "anubhavsolanki0@gmail.com" + +USAGE : +-> Import this file into their respective project. +-> Use the function weight_conversion() for conversion of weight units. +-> Parameters : + -> from_type : From which type you want to convert + -> to_type : To which type you want to convert + -> value : the value which you want to convert + +REFERENCES : + +-> Wikipedia reference: https://en.wikipedia.org/wiki/Kilogram +-> Wikipedia reference: https://en.wikipedia.org/wiki/Gram +-> Wikipedia reference: https://en.wikipedia.org/wiki/Millimetre +-> Wikipedia reference: https://en.wikipedia.org/wiki/Tonne +-> Wikipedia reference: https://en.wikipedia.org/wiki/Long_ton +-> Wikipedia reference: https://en.wikipedia.org/wiki/Short_ton +-> Wikipedia reference: https://en.wikipedia.org/wiki/Pound +-> Wikipedia reference: https://en.wikipedia.org/wiki/Ounce +-> Wikipedia reference: https://en.wikipedia.org/wiki/Fineness#Karat +-> Wikipedia reference: https://en.wikipedia.org/wiki/Dalton_(unit) +""" + +KILOGRAM_CHART = { + "kilogram": 1, + "gram": pow(10, 3), + "milligram": pow(10, 6), + "metric-ton": pow(10, -3), + "long-ton": 0.0009842073, + "short-ton": 0.0011023122, + "pound": 2.2046244202, + "ounce": 35.273990723, + "carrat": 5000, + "atomic-mass-unit": 6.022136652e26, +} + +WEIGHT_TYPE_CHART = { + "kilogram": 1, + "gram": pow(10, -3), + "milligram": pow(10, -6), + "metric-ton": pow(10, 3), + "long-ton": 1016.04608, + "short-ton": 907.184, + "pound": 0.453592, + "ounce": 0.0283495, + "carrat": 0.0002, + "atomic-mass-unit": 1.660540199e-27, +} + + +def weight_conversion(from_type: str, to_type: str, value: float) -> float: + """ + Conversion of weight unit with the help of KILOGRAM_CHART + + "kilogram" : 1, + "gram" : pow(10, 3), + "milligram" : pow(10, 6), + "metric-ton" : pow(10, -3), + "long-ton" : 0.0009842073, + "short-ton" : 0.0011023122, + "pound" : 2.2046244202, + "ounce" : 35.273990723, + "carrat" : 5000, + "atomic-mass-unit" : 6.022136652E+26 + + >>> weight_conversion("kilogram","kilogram",4) + 4 + >>> weight_conversion("kilogram","gram",1) + 1000 + >>> weight_conversion("kilogram","milligram",4) + 4000000 + >>> weight_conversion("kilogram","metric-ton",4) + 0.004 + >>> weight_conversion("kilogram","long-ton",3) + 0.0029526219 + >>> weight_conversion("kilogram","short-ton",1) + 0.0011023122 + >>> weight_conversion("kilogram","pound",4) + 8.8184976808 + >>> weight_conversion("kilogram","ounce",4) + 141.095962892 + >>> weight_conversion("kilogram","carrat",3) + 15000 + >>> weight_conversion("kilogram","atomic-mass-unit",1) + 6.022136652e+26 + >>> weight_conversion("gram","kilogram",1) + 0.001 + >>> weight_conversion("gram","gram",3) + 3.0 + >>> weight_conversion("gram","milligram",2) + 2000.0 + >>> weight_conversion("gram","metric-ton",4) + 4e-06 + >>> weight_conversion("gram","long-ton",3) + 2.9526219e-06 + >>> weight_conversion("gram","short-ton",3) + 3.3069366000000003e-06 + >>> weight_conversion("gram","pound",3) + 0.0066138732606 + >>> weight_conversion("gram","ounce",1) + 0.035273990723 + >>> weight_conversion("gram","carrat",2) + 10.0 + >>> weight_conversion("gram","atomic-mass-unit",1) + 6.022136652e+23 + >>> weight_conversion("milligram","kilogram",1) + 1e-06 + >>> weight_conversion("milligram","gram",2) + 0.002 + >>> weight_conversion("milligram","milligram",3) + 3.0 + >>> weight_conversion("milligram","metric-ton",3) + 3e-09 + >>> weight_conversion("milligram","long-ton",3) + 2.9526219e-09 + >>> weight_conversion("milligram","short-ton",1) + 1.1023122e-09 + >>> weight_conversion("milligram","pound",3) + 6.6138732605999995e-06 + >>> weight_conversion("milligram","ounce",2) + 7.054798144599999e-05 + >>> weight_conversion("milligram","carrat",1) + 0.005 + >>> weight_conversion("milligram","atomic-mass-unit",1) + 6.022136652e+20 + >>> weight_conversion("metric-ton","kilogram",2) + 2000 + >>> weight_conversion("metric-ton","gram",2) + 2000000 + >>> weight_conversion("metric-ton","milligram",3) + 3000000000 + >>> weight_conversion("metric-ton","metric-ton",2) + 2.0 + >>> weight_conversion("metric-ton","long-ton",3) + 2.9526219 + >>> weight_conversion("metric-ton","short-ton",2) + 2.2046244 + >>> weight_conversion("metric-ton","pound",3) + 6613.8732606 + >>> weight_conversion("metric-ton","ounce",4) + 141095.96289199998 + >>> weight_conversion("metric-ton","carrat",4) + 20000000 + >>> weight_conversion("metric-ton","atomic-mass-unit",1) + 6.022136652e+29 + >>> weight_conversion("long-ton","kilogram",4) + 4064.18432 + >>> weight_conversion("long-ton","gram",4) + 4064184.32 + >>> weight_conversion("long-ton","milligram",3) + 3048138240.0 + >>> weight_conversion("long-ton","metric-ton",4) + 4.06418432 + >>> weight_conversion("long-ton","long-ton",3) + 2.999999907217152 + >>> weight_conversion("long-ton","short-ton",1) + 1.119999989746176 + >>> weight_conversion("long-ton","pound",3) + 6720.000000049448 + >>> weight_conversion("long-ton","ounce",1) + 35840.000000060514 + >>> weight_conversion("long-ton","carrat",4) + 20320921.599999998 + >>> weight_conversion("long-ton","atomic-mass-unit",4) + 2.4475073353955697e+30 + >>> weight_conversion("short-ton","kilogram",3) + 2721.5519999999997 + >>> weight_conversion("short-ton","gram",3) + 2721552.0 + >>> weight_conversion("short-ton","milligram",1) + 907184000.0 + >>> weight_conversion("short-ton","metric-ton",4) + 3.628736 + >>> weight_conversion("short-ton","long-ton",3) + 2.6785713457296 + >>> weight_conversion("short-ton","short-ton",3) + 2.9999999725344 + >>> weight_conversion("short-ton","pound",2) + 4000.0000000294335 + >>> weight_conversion("short-ton","ounce",4) + 128000.00000021611 + >>> weight_conversion("short-ton","carrat",4) + 18143680.0 + >>> weight_conversion("short-ton","atomic-mass-unit",1) + 5.463186016507968e+29 + >>> weight_conversion("pound","kilogram",4) + 1.814368 + >>> weight_conversion("pound","gram",2) + 907.184 + >>> weight_conversion("pound","milligram",3) + 1360776.0 + >>> weight_conversion("pound","metric-ton",3) + 0.001360776 + >>> weight_conversion("pound","long-ton",2) + 0.0008928571152432 + >>> weight_conversion("pound","short-ton",1) + 0.0004999999954224 + >>> weight_conversion("pound","pound",3) + 3.0000000000220752 + >>> weight_conversion("pound","ounce",1) + 16.000000000027015 + >>> weight_conversion("pound","carrat",1) + 2267.96 + >>> weight_conversion("pound","atomic-mass-unit",4) + 1.0926372033015936e+27 + >>> weight_conversion("ounce","kilogram",3) + 0.0850485 + >>> weight_conversion("ounce","gram",3) + 85.0485 + >>> weight_conversion("ounce","milligram",4) + 113398.0 + >>> weight_conversion("ounce","metric-ton",4) + 0.000113398 + >>> weight_conversion("ounce","long-ton",4) + 0.0001116071394054 + >>> weight_conversion("ounce","short-ton",4) + 0.0001249999988556 + >>> weight_conversion("ounce","pound",1) + 0.0625000000004599 + >>> weight_conversion("ounce","ounce",2) + 2.000000000003377 + >>> weight_conversion("ounce","carrat",1) + 141.7475 + >>> weight_conversion("ounce","atomic-mass-unit",1) + 1.70724563015874e+25 + >>> weight_conversion("carrat","kilogram",1) + 0.0002 + >>> weight_conversion("carrat","gram",4) + 0.8 + >>> weight_conversion("carrat","milligram",2) + 400.0 + >>> weight_conversion("carrat","metric-ton",2) + 4.0000000000000003e-07 + >>> weight_conversion("carrat","long-ton",3) + 5.9052438e-07 + >>> weight_conversion("carrat","short-ton",4) + 8.818497600000002e-07 + >>> weight_conversion("carrat","pound",1) + 0.00044092488404000004 + >>> weight_conversion("carrat","ounce",2) + 0.0141095962892 + >>> weight_conversion("carrat","carrat",4) + 4.0 + >>> weight_conversion("carrat","atomic-mass-unit",4) + 4.8177093216e+23 + >>> weight_conversion("atomic-mass-unit","kilogram",4) + 6.642160796e-27 + >>> weight_conversion("atomic-mass-unit","gram",2) + 3.321080398e-24 + >>> weight_conversion("atomic-mass-unit","milligram",2) + 3.3210803980000002e-21 + >>> weight_conversion("atomic-mass-unit","metric-ton",3) + 4.9816205970000004e-30 + >>> weight_conversion("atomic-mass-unit","long-ton",3) + 4.9029473573977584e-30 + >>> weight_conversion("atomic-mass-unit","short-ton",1) + 1.830433719948128e-30 + >>> weight_conversion("atomic-mass-unit","pound",3) + 1.0982602420317504e-26 + >>> weight_conversion("atomic-mass-unit","ounce",2) + 1.1714775914938915e-25 + >>> weight_conversion("atomic-mass-unit","carrat",2) + 1.660540199e-23 + >>> weight_conversion("atomic-mass-unit","atomic-mass-unit",2) + 1.999999998903455 + """ + if to_type not in KILOGRAM_CHART or from_type not in WEIGHT_TYPE_CHART: + raise ValueError( + f"Invalid 'from_type' or 'to_type' value: {from_type!r}, {to_type!r}\n" + f"Supported values are: {', '.join(WEIGHT_TYPE_CHART)}" + ) + return value * KILOGRAM_CHART[to_type] * WEIGHT_TYPE_CHART[from_type] + + +if __name__ == "__main__": + + import doctest + + doctest.testmod() From 1e1708b8a1644d994ad004cc6fe20893a328519c Mon Sep 17 00:00:00 2001 From: fpringle Date: Fri, 27 Nov 2020 17:08:14 +0100 Subject: [PATCH 0024/1543] Added solution for Project Euler problem 77 (#3132) * Added solution for Project Euler problem 77. * Update docstrings, doctest, type annotations and 0-padding in directory name. Reference: #3256 * Implemented lru_cache, better type hints, more doctests for problem 77 * updating DIRECTORY.md * updating DIRECTORY.md * Added solution for Project Euler problem 77. Fixes: 2695 * Update docstrings, doctest, type annotations and 0-padding in directory name. Reference: #3256 * Implemented lru_cache, better type hints, more doctests for problem 77 * better variable names Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- DIRECTORY.md | 2 + project_euler/problem_077/__init__.py | 0 project_euler/problem_077/sol1.py | 81 +++++++++++++++++++++++++++ 3 files changed, 83 insertions(+) create mode 100644 project_euler/problem_077/__init__.py create mode 100644 project_euler/problem_077/sol1.py diff --git a/DIRECTORY.md b/DIRECTORY.md index e1e57307d593..7fe7c63a2571 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -709,6 +709,8 @@ * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_075/sol1.py) * Problem 076 * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_076/sol1.py) + * Problem 077 + * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_077/sol1.py) * Problem 080 * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_080/sol1.py) * Problem 081 diff --git a/project_euler/problem_077/__init__.py b/project_euler/problem_077/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/project_euler/problem_077/sol1.py b/project_euler/problem_077/sol1.py new file mode 100644 index 000000000000..e92992a90ab3 --- /dev/null +++ b/project_euler/problem_077/sol1.py @@ -0,0 +1,81 @@ +""" +Project Euler Problem 77: https://projecteuler.net/problem=77 + +It is possible to write ten as the sum of primes in exactly five different ways: + +7 + 3 +5 + 5 +5 + 3 + 2 +3 + 3 + 2 + 2 +2 + 2 + 2 + 2 + 2 + +What is the first value which can be written as the sum of primes in over +five thousand different ways? +""" + +from functools import lru_cache +from math import ceil +from typing import Optional, Set + +NUM_PRIMES = 100 + +primes = set(range(3, NUM_PRIMES, 2)) +primes.add(2) +prime: int + +for prime in range(3, ceil(NUM_PRIMES ** 0.5), 2): + if prime not in primes: + continue + primes.difference_update(set(range(prime * prime, NUM_PRIMES, prime))) + + +@lru_cache(maxsize=100) +def partition(number_to_partition: int) -> Set[int]: + """ + Return a set of integers corresponding to unique prime partitions of n. + The unique prime partitions can be represented as unique prime decompositions, + e.g. (7+3) <-> 7*3 = 12, (3+3+2+2) = 3*3*2*2 = 36 + >>> partition(10) + {32, 36, 21, 25, 30} + >>> partition(15) + {192, 160, 105, 44, 112, 243, 180, 150, 216, 26, 125, 126} + >>> len(partition(20)) + 26 + """ + if number_to_partition < 0: + return set() + elif number_to_partition == 0: + return {1} + + ret: Set[int] = set() + prime: int + sub: int + + for prime in primes: + if prime > number_to_partition: + continue + for sub in partition(number_to_partition - prime): + ret.add(sub * prime) + + return ret + + +def solution(number_unique_partitions: int = 5000) -> Optional[int]: + """ + Return the smallest integer that can be written as the sum of primes in over + m unique ways. + >>> solution(4) + 10 + >>> solution(500) + 45 + >>> solution(1000) + 53 + """ + for number_to_partition in range(1, NUM_PRIMES): + if len(partition(number_to_partition)) > number_unique_partitions: + return number_to_partition + return None + + +if __name__ == "__main__": + print(f"{solution() = }") From 9c6080a6fc31ae7737535ba305de20a5cc0805b5 Mon Sep 17 00:00:00 2001 From: arif599 <59244462+arif599@users.noreply.github.com> Date: Sat, 28 Nov 2020 05:50:18 +0000 Subject: [PATCH 0025/1543] data_structures/linked_list: Add __str__() function (#3961) * Adding __str__() function * Removing white space * Update data_structures/linked_list/__init__.py Co-authored-by: xcodz-dot <71920621+xcodz-dot@users.noreply.github.com> * Adding type hints * Update __init__.py * Update __init__.py * Adding the changes requested * Updating to fix pre-commit * Updating __init__.py * Updating __init__.py Co-authored-by: xcodz-dot <71920621+xcodz-dot@users.noreply.github.com> --- data_structures/linked_list/__init__.py | 46 +++++++++++++++++++++---- 1 file changed, 40 insertions(+), 6 deletions(-) diff --git a/data_structures/linked_list/__init__.py b/data_structures/linked_list/__init__.py index 3ddfea5c5abf..a5f5537b1d96 100644 --- a/data_structures/linked_list/__init__.py +++ b/data_structures/linked_list/__init__.py @@ -1,19 +1,30 @@ +""" +Linked Lists consists of Nodes. +Nodes contain data and also may link to other nodes: + - Head Node: First node, the address of the + head node gives us access of the complete list + - Last node: points to null +""" + +from typing import Any + + class Node: - def __init__(self, item, next): + def __init__(self, item: Any, next: Any) -> None: self.item = item self.next = next class LinkedList: - def __init__(self): + def __init__(self) -> None: self.head = None self.size = 0 - def add(self, item): + def add(self, item: Any) -> None: self.head = Node(item, self.head) self.size += 1 - def remove(self): + def remove(self) -> Any: if self.is_empty(): return None else: @@ -22,10 +33,33 @@ def remove(self): self.size -= 1 return item - def is_empty(self): + def is_empty(self) -> bool: return self.head is None - def __len__(self): + def __str__(self) -> str: + """ + >>> linked_list = LinkedList() + >>> linked_list.add(23) + >>> linked_list.add(14) + >>> linked_list.add(9) + >>> print(linked_list) + 9 --> 14 --> 23 + """ + if not self.is_empty: + return "" + else: + iterate = self.head + item_str = "" + item_list = [] + while iterate: + item_list.append(str(iterate.item)) + iterate = iterate.next + + item_str = " --> ".join(item_list) + + return item_str + + def __len__(self) -> int: """ >>> linked_list = LinkedList() >>> len(linked_list) From 8cafadd759a519ce5b3c60f3ba4a0819e42d060f Mon Sep 17 00:00:00 2001 From: xcodz-dot <71920621+xcodz-dot@users.noreply.github.com> Date: Sat, 28 Nov 2020 19:09:27 +0530 Subject: [PATCH 0026/1543] Update CONTRIBUTING.md with pre-commit plugin instructions (#3979) * Update CONTRIBUTING.md * Update CONTRIBUTING.md * Update CONTRIBUTING.md * Update CONTRIBUTING.md Co-authored-by: Christian Clauss * Update CONTRIBUTING.md Co-authored-by: Christian Clauss * Update CONTRIBUTING.md Co-authored-by: Dhruv Manilawala Co-authored-by: Christian Clauss --- CONTRIBUTING.md | 17 +++++++++++++++-- 1 file changed, 15 insertions(+), 2 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index eedcb0250169..e4c81a5ecd98 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -49,6 +49,19 @@ Algorithms should: Algorithms in this repo should not be how-to examples for existing Python packages. Instead, they should perform internal calculations or manipulations to convert input values into different output values. Those calculations or manipulations can use data types, classes, or functions of existing Python packages but each algorithm in this repo should add unique value. +#### Pre-commit plugin +Use [pre-commit](https://pre-commit.com/#installation) to automatically format your code to match our coding style: + +```bash +python3 -m pip install pre-commit # required only once +pre-commit install +``` +That's it! The plugin will run every time you commit any changes. If there are any errors found during the run, fix them and commit those changes. You can even run the plugin manually on all files: + +```bash +pre-commit run --all-files --show-diff-on-failure +``` + #### Coding Style We want your work to be readable by others; therefore, we encourage you to note the following: @@ -64,14 +77,14 @@ We want your work to be readable by others; therefore, we encourage you to note - Please consider running [__psf/black__](https://github.com/python/black) on your Python file(s) before submitting your pull request. This is not yet a requirement but it does make your code more readable and automatically aligns it with much of [PEP 8](https://www.python.org/dev/peps/pep-0008/). There are other code formatters (autopep8, yapf) but the __black__ formatter is now hosted by the Python Software Foundation. To use it, ```bash - pip3 install black # only required the first time + python3 -m pip install black # only required the first time black . ``` - All submissions will need to pass the test __flake8 . --ignore=E203,W503 --max-line-length=88__ before they will be accepted so if possible, try this test locally on your Python file(s) before submitting your pull request. ```bash - pip3 install flake8 # only required the first time + python3 -m pip install flake8 # only required the first time flake8 . --ignore=E203,W503 --max-line-length=88 --show-source ``` From bfb0c3533d88f82c18f7ae792b2d2e154b9b51c7 Mon Sep 17 00:00:00 2001 From: Du Yuanchao Date: Sat, 28 Nov 2020 22:42:30 +0800 Subject: [PATCH 0027/1543] Fixed LGTM and typehint (#3970) * fixed LGTM fixed typehint * updating DIRECTORY.md * Update lucas_series.py * Update lucas_series.py Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> Co-authored-by: John Law --- DIRECTORY.md | 12 +++++++++++- maths/lucas_series.py | 27 ++++++++++++--------------- 2 files changed, 23 insertions(+), 16 deletions(-) diff --git a/DIRECTORY.md b/DIRECTORY.md index 7fe7c63a2571..079dae1884bc 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -26,6 +26,8 @@ ## Bit Manipulation * [Binary And Operator](https://github.com/TheAlgorithms/Python/blob/master/bit_manipulation/binary_and_operator.py) + * [Binary Count Setbits](https://github.com/TheAlgorithms/Python/blob/master/bit_manipulation/binary_count_setbits.py) + * [Binary Count Trailing Zeros](https://github.com/TheAlgorithms/Python/blob/master/bit_manipulation/binary_count_trailing_zeros.py) * [Binary Or Operator](https://github.com/TheAlgorithms/Python/blob/master/bit_manipulation/binary_or_operator.py) * [Binary Xor Operator](https://github.com/TheAlgorithms/Python/blob/master/bit_manipulation/binary_xor_operator.py) * [Single Bit Manipulation Operations](https://github.com/TheAlgorithms/Python/blob/master/bit_manipulation/single_bit_manipulation_operations.py) @@ -47,7 +49,7 @@ * [Atbash](https://github.com/TheAlgorithms/Python/blob/master/ciphers/atbash.py) * [Base16](https://github.com/TheAlgorithms/Python/blob/master/ciphers/base16.py) * [Base32](https://github.com/TheAlgorithms/Python/blob/master/ciphers/base32.py) - * [Base64 Cipher](https://github.com/TheAlgorithms/Python/blob/master/ciphers/base64_cipher.py) + * [Base64 Encoding](https://github.com/TheAlgorithms/Python/blob/master/ciphers/base64_encoding.py) * [Base85](https://github.com/TheAlgorithms/Python/blob/master/ciphers/base85.py) * [Beaufort Cipher](https://github.com/TheAlgorithms/Python/blob/master/ciphers/beaufort_cipher.py) * [Brute Force Caesar Cipher](https://github.com/TheAlgorithms/Python/blob/master/ciphers/brute_force_caesar_cipher.py) @@ -101,6 +103,7 @@ * [Decimal To Octal](https://github.com/TheAlgorithms/Python/blob/master/conversions/decimal_to_octal.py) * [Hexadecimal To Decimal](https://github.com/TheAlgorithms/Python/blob/master/conversions/hexadecimal_to_decimal.py) * [Molecular Chemistry](https://github.com/TheAlgorithms/Python/blob/master/conversions/molecular_chemistry.py) + * [Octal To Decimal](https://github.com/TheAlgorithms/Python/blob/master/conversions/octal_to_decimal.py) * [Prefix Conversions](https://github.com/TheAlgorithms/Python/blob/master/conversions/prefix_conversions.py) * [Roman To Integer](https://github.com/TheAlgorithms/Python/blob/master/conversions/roman_to_integer.py) * [Temperature Conversions](https://github.com/TheAlgorithms/Python/blob/master/conversions/temperature_conversions.py) @@ -207,6 +210,7 @@ * [Heaps Algorithm Iterative](https://github.com/TheAlgorithms/Python/blob/master/divide_and_conquer/heaps_algorithm_iterative.py) * [Inversions](https://github.com/TheAlgorithms/Python/blob/master/divide_and_conquer/inversions.py) * [Kth Order Statistic](https://github.com/TheAlgorithms/Python/blob/master/divide_and_conquer/kth_order_statistic.py) + * [Max Difference Pair](https://github.com/TheAlgorithms/Python/blob/master/divide_and_conquer/max_difference_pair.py) * [Max Subarray Sum](https://github.com/TheAlgorithms/Python/blob/master/divide_and_conquer/max_subarray_sum.py) * [Mergesort](https://github.com/TheAlgorithms/Python/blob/master/divide_and_conquer/mergesort.py) * [Peak](https://github.com/TheAlgorithms/Python/blob/master/divide_and_conquer/peak.py) @@ -243,6 +247,9 @@ * [Subset Generation](https://github.com/TheAlgorithms/Python/blob/master/dynamic_programming/subset_generation.py) * [Sum Of Subset](https://github.com/TheAlgorithms/Python/blob/master/dynamic_programming/sum_of_subset.py) +## Electronics + * [Ohms Law](https://github.com/TheAlgorithms/Python/blob/master/electronics/ohms_law.py) + ## File Transfer * [Receive File](https://github.com/TheAlgorithms/Python/blob/master/file_transfer/receive_file.py) * [Send File](https://github.com/TheAlgorithms/Python/blob/master/file_transfer/send_file.py) @@ -804,6 +811,7 @@ * [Gnome Sort](https://github.com/TheAlgorithms/Python/blob/master/sorts/gnome_sort.py) * [Heap Sort](https://github.com/TheAlgorithms/Python/blob/master/sorts/heap_sort.py) * [Insertion Sort](https://github.com/TheAlgorithms/Python/blob/master/sorts/insertion_sort.py) + * [Intro Sort](https://github.com/TheAlgorithms/Python/blob/master/sorts/intro_sort.py) * [Iterative Merge Sort](https://github.com/TheAlgorithms/Python/blob/master/sorts/iterative_merge_sort.py) * [Merge Insertion Sort](https://github.com/TheAlgorithms/Python/blob/master/sorts/merge_insertion_sort.py) * [Merge Sort](https://github.com/TheAlgorithms/Python/blob/master/sorts/merge_sort.py) @@ -877,6 +885,8 @@ * [Get Imdb Top 250 Movies Csv](https://github.com/TheAlgorithms/Python/blob/master/web_programming/get_imdb_top_250_movies_csv.py) * [Get Imdbtop](https://github.com/TheAlgorithms/Python/blob/master/web_programming/get_imdbtop.py) * [Instagram Crawler](https://github.com/TheAlgorithms/Python/blob/master/web_programming/instagram_crawler.py) + * [Instagram Pic](https://github.com/TheAlgorithms/Python/blob/master/web_programming/instagram_pic.py) * [Recaptcha Verification](https://github.com/TheAlgorithms/Python/blob/master/web_programming/recaptcha_verification.py) * [Slack Message](https://github.com/TheAlgorithms/Python/blob/master/web_programming/slack_message.py) + * [Test Fetch Github Info](https://github.com/TheAlgorithms/Python/blob/master/web_programming/test_fetch_github_info.py) * [World Covid19 Stats](https://github.com/TheAlgorithms/Python/blob/master/web_programming/world_covid19_stats.py) diff --git a/maths/lucas_series.py b/maths/lucas_series.py index 02eae8d8c658..6b32c2022e13 100644 --- a/maths/lucas_series.py +++ b/maths/lucas_series.py @@ -3,7 +3,7 @@ """ -def recursive_lucas_number(n): +def recursive_lucas_number(n_th_number: int) -> int: """ Returns the nth lucas number >>> recursive_lucas_number(1) @@ -19,17 +19,19 @@ def recursive_lucas_number(n): ... TypeError: recursive_lucas_number accepts only integer arguments. """ - if n == 1: - return n - if n == 0: - return 2 - if not isinstance(n, int): + if not isinstance(n_th_number, int): raise TypeError("recursive_lucas_number accepts only integer arguments.") + if n_th_number == 0: + return 2 + if n_th_number == 1: + return 1 - return recursive_lucas_number(n - 1) + recursive_lucas_number(n - 2) + return recursive_lucas_number(n_th_number - 1) + recursive_lucas_number( + n_th_number - 2 + ) -def dynamic_lucas_number(n: int) -> int: +def dynamic_lucas_number(n_th_number: int) -> int: """ Returns the nth lucas number >>> dynamic_lucas_number(1) @@ -45,14 +47,10 @@ def dynamic_lucas_number(n: int) -> int: ... TypeError: dynamic_lucas_number accepts only integer arguments. """ - if not isinstance(n, int): + if not isinstance(n_th_number, int): raise TypeError("dynamic_lucas_number accepts only integer arguments.") - if n == 0: - return 2 - if n == 1: - return 1 a, b = 2, 1 - for i in range(n): + for i in range(n_th_number): a, b = b, a + b return a @@ -62,7 +60,6 @@ def dynamic_lucas_number(n: int) -> int: testmod() n = int(input("Enter the number of terms in lucas series:\n").strip()) - n = int(input("Enter the number of terms in lucas series:\n").strip()) print("Using recursive function to calculate lucas series:") print(" ".join(str(recursive_lucas_number(i)) for i in range(n))) print("\nUsing dynamic function to calculate lucas series:") From 52a6213ddc804f571e1c54fdd8e9b0068acaf06e Mon Sep 17 00:00:00 2001 From: Sullivan <38718448+Epic-R-R@users.noreply.github.com> Date: Sun, 29 Nov 2020 15:44:18 +0330 Subject: [PATCH 0028/1543] Instagram Video and IGTV downloader (#3981) * Instagram Video and IGTV downloader Download Video and IGTV from Instagram * Update * Update * Some Change * Update * Update instagram_video.py * Update instagram_video.py * Update instagram_video.py * Update instagram_video.py Co-authored-by: Christian Clauss Co-authored-by: Dhruv Manilawala --- web_programming/instagram_video.py | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) create mode 100644 web_programming/instagram_video.py diff --git a/web_programming/instagram_video.py b/web_programming/instagram_video.py new file mode 100644 index 000000000000..243cece1a50e --- /dev/null +++ b/web_programming/instagram_video.py @@ -0,0 +1,17 @@ +from datetime import datetime + +import requests + + +def download_video(url: str) -> bytes: + base_url = "https://downloadgram.net/wp-json/wppress/video-downloader/video?url=" + video_url = requests.get(base_url + url).json()[0]["urls"][0]["src"] + return requests.get(video_url).content + + +if __name__ == "__main__": + url = input("Enter Video/IGTV url: ").strip() + file_name = f"{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4" + with open(file_name, "wb") as fp: + fp.write(download_video(url)) + print(f"Done. Video saved to disk as {file_name}.") From 5de90aafc7491f81b0a2b606639a518167bc15cf Mon Sep 17 00:00:00 2001 From: jenra Date: Sun, 29 Nov 2020 11:09:33 -0500 Subject: [PATCH 0029/1543] Hacktoberfest 2020: Conway's Game of Life (#3070) * Created conways_game_of_life.py * Added new_generation(list[int[int]]) -> list[list[int]] * Added glider example * Added comments and shortened glider example * Fixed index out of bounds error * Added test * Added blinker example * Added ability to generate images * Moved image generating code into a separate function * Added comments * Comment * Reformatted file * Formatting * Removed glider test * Update cellular_automata/conways_game_of_life.py Co-authored-by: John Law * Update conways_game_of_life.py * Update conways_game_of_life.py Co-authored-by: John Law --- cellular_automata/conways_game_of_life.py | 100 ++++++++++++++++++++++ 1 file changed, 100 insertions(+) create mode 100644 cellular_automata/conways_game_of_life.py diff --git a/cellular_automata/conways_game_of_life.py b/cellular_automata/conways_game_of_life.py new file mode 100644 index 000000000000..321baa3a3794 --- /dev/null +++ b/cellular_automata/conways_game_of_life.py @@ -0,0 +1,100 @@ +""" +Conway's Game of Life implemented in Python. +https://en.wikipedia.org/wiki/Conway%27s_Game_of_Life +""" + +from __future__ import annotations + +from typing import List + +from PIL import Image + +# Define glider example +GLIDER = [ + [0, 1, 0, 0, 0, 0, 0, 0], + [0, 0, 1, 0, 0, 0, 0, 0], + [1, 1, 1, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], +] + +# Define blinker example +BLINKER = [[0, 1, 0], [0, 1, 0], [0, 1, 0]] + + +def new_generation(cells: List[List[int]]) -> List[List[int]]: + """ + Generates the next generation for a given state of Conway's Game of Life. + >>> new_generation(BLINKER) + [[0, 0, 0], [1, 1, 1], [0, 0, 0]] + """ + next_generation = [] + for i in range(len(cells)): + next_generation_row = [] + for j in range(len(cells[i])): + # Get the number of live neighbours + neighbour_count = 0 + if i > 0 and j > 0: + neighbour_count += cells[i - 1][j - 1] + if i > 0: + neighbour_count += cells[i - 1][j] + if i > 0 and j < len(cells[i]) - 1: + neighbour_count += cells[i - 1][j + 1] + if j > 0: + neighbour_count += cells[i][j - 1] + if j < len(cells[i]) - 1: + neighbour_count += cells[i][j + 1] + if i < len(cells) - 1 and j > 0: + neighbour_count += cells[i + 1][j - 1] + if i < len(cells) - 1: + neighbour_count += cells[i + 1][j] + if i < len(cells) - 1 and j < len(cells[i]) - 1: + neighbour_count += cells[i + 1][j + 1] + + # Rules of the game of life (excerpt from Wikipedia): + # 1. Any live cell with two or three live neighbours survives. + # 2. Any dead cell with three live neighbours becomes a live cell. + # 3. All other live cells die in the next generation. + # Similarly, all other dead cells stay dead. + alive = cells[i][j] == 1 + if ( + (alive and 2 <= neighbour_count <= 3) + or not alive + and neighbour_count == 3 + ): + next_generation_row.append(1) + else: + next_generation_row.append(0) + + next_generation.append(next_generation_row) + return next_generation + + +def generate_images(cells: list[list[int]], frames) -> list[Image.Image]: + """ + Generates a list of images of subsequent Game of Life states. + """ + images = [] + for _ in range(frames): + # Create output image + img = Image.new("RGB", (len(cells[0]), len(cells))) + pixels = img.load() + + # Save cells to image + for x in range(len(cells)): + for y in range(len(cells[0])): + colour = 255 - cells[y][x] * 255 + pixels[x, y] = (colour, colour, colour) + + # Save image + images.append(img) + cells = new_generation(cells) + return images + + +if __name__ == "__main__": + images = generate_images(GLIDER, 16) + images[0].save("out.gif", save_all=True, append_images=images[1:]) From e07766230d5aea4ddf090d9c5347a90e8c2137b3 Mon Sep 17 00:00:00 2001 From: Jenia Dysin Date: Sun, 29 Nov 2020 18:19:50 +0200 Subject: [PATCH 0030/1543] Add static typing to backtracking algorithms (#2684) * Added static typing to backtracking algorithms * Ran psf/black to fix some minor issues. * updating DIRECTORY.md * updating DIRECTORY.md Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> Co-authored-by: John Law --- DIRECTORY.md | 13 ++++++++----- backtracking/all_combinations.py | 10 ++++++++-- backtracking/all_permutations.py | 6 ++++-- backtracking/n_queens.py | 6 +++--- backtracking/rat_in_maze.py | 4 ++-- backtracking/sum_of_subsets.py | 11 +++++++++-- 6 files changed, 34 insertions(+), 16 deletions(-) diff --git a/DIRECTORY.md b/DIRECTORY.md index 079dae1884bc..00da7922d54d 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -41,6 +41,7 @@ * [Quine Mc Cluskey](https://github.com/TheAlgorithms/Python/blob/master/boolean_algebra/quine_mc_cluskey.py) ## Cellular Automata + * [Conways Game Of Life](https://github.com/TheAlgorithms/Python/blob/master/cellular_automata/conways_game_of_life.py) * [One Dimensional](https://github.com/TheAlgorithms/Python/blob/master/cellular_automata/one_dimensional.py) ## Ciphers @@ -107,6 +108,7 @@ * [Prefix Conversions](https://github.com/TheAlgorithms/Python/blob/master/conversions/prefix_conversions.py) * [Roman To Integer](https://github.com/TheAlgorithms/Python/blob/master/conversions/roman_to_integer.py) * [Temperature Conversions](https://github.com/TheAlgorithms/Python/blob/master/conversions/temperature_conversions.py) + * [Weight Conversion](https://github.com/TheAlgorithms/Python/blob/master/conversions/weight_conversion.py) ## Data Structures * Binary Tree @@ -321,10 +323,6 @@ * [Test Min Spanning Tree Kruskal](https://github.com/TheAlgorithms/Python/blob/master/graphs/tests/test_min_spanning_tree_kruskal.py) * [Test Min Spanning Tree Prim](https://github.com/TheAlgorithms/Python/blob/master/graphs/tests/test_min_spanning_tree_prim.py) -## Greedy Method - * [Greedy Knapsack](https://github.com/TheAlgorithms/Python/blob/master/greedy_method/greedy_knapsack.py) - * [Test Knapsack](https://github.com/TheAlgorithms/Python/blob/master/greedy_method/test_knapsack.py) - ## Hashes * [Adler32](https://github.com/TheAlgorithms/Python/blob/master/hashes/adler32.py) * [Chaos Machine](https://github.com/TheAlgorithms/Python/blob/master/hashes/chaos_machine.py) @@ -336,8 +334,11 @@ * [Sha1](https://github.com/TheAlgorithms/Python/blob/master/hashes/sha1.py) ## Knapsack + * [Greedy Knapsack](https://github.com/TheAlgorithms/Python/blob/master/knapsack/greedy_knapsack.py) * [Knapsack](https://github.com/TheAlgorithms/Python/blob/master/knapsack/knapsack.py) - * [Test Knapsack](https://github.com/TheAlgorithms/Python/blob/master/knapsack/test_knapsack.py) + * Tests + * [Test Greedy Knapsack](https://github.com/TheAlgorithms/Python/blob/master/knapsack/tests/test_greedy_knapsack.py) + * [Test Knapsack](https://github.com/TheAlgorithms/Python/blob/master/knapsack/tests/test_knapsack.py) ## Linear Algebra * Src @@ -400,6 +401,7 @@ * [Combinations](https://github.com/TheAlgorithms/Python/blob/master/maths/combinations.py) * [Decimal Isolate](https://github.com/TheAlgorithms/Python/blob/master/maths/decimal_isolate.py) * [Entropy](https://github.com/TheAlgorithms/Python/blob/master/maths/entropy.py) + * [Euclidean Distance](https://github.com/TheAlgorithms/Python/blob/master/maths/euclidean_distance.py) * [Eulers Totient](https://github.com/TheAlgorithms/Python/blob/master/maths/eulers_totient.py) * [Explicit Euler](https://github.com/TheAlgorithms/Python/blob/master/maths/explicit_euler.py) * [Extended Euclidean Algorithm](https://github.com/TheAlgorithms/Python/blob/master/maths/extended_euclidean_algorithm.py) @@ -886,6 +888,7 @@ * [Get Imdbtop](https://github.com/TheAlgorithms/Python/blob/master/web_programming/get_imdbtop.py) * [Instagram Crawler](https://github.com/TheAlgorithms/Python/blob/master/web_programming/instagram_crawler.py) * [Instagram Pic](https://github.com/TheAlgorithms/Python/blob/master/web_programming/instagram_pic.py) + * [Instagram Video](https://github.com/TheAlgorithms/Python/blob/master/web_programming/instagram_video.py) * [Recaptcha Verification](https://github.com/TheAlgorithms/Python/blob/master/web_programming/recaptcha_verification.py) * [Slack Message](https://github.com/TheAlgorithms/Python/blob/master/web_programming/slack_message.py) * [Test Fetch Github Info](https://github.com/TheAlgorithms/Python/blob/master/web_programming/test_fetch_github_info.py) diff --git a/backtracking/all_combinations.py b/backtracking/all_combinations.py index 854dc5198422..0444ed093449 100644 --- a/backtracking/all_combinations.py +++ b/backtracking/all_combinations.py @@ -16,7 +16,13 @@ def generate_all_combinations(n: int, k: int) -> [[int]]: return result -def create_all_state(increment, total_number, level, current_list, total_list): +def create_all_state( + increment: int, + total_number: int, + level: int, + current_list: [int], + total_list: [int], +) -> None: if level == 0: total_list.append(current_list[:]) return @@ -27,7 +33,7 @@ def create_all_state(increment, total_number, level, current_list, total_list): current_list.pop() -def print_all_state(total_list): +def print_all_state(total_list: [int]) -> None: for i in total_list: print(*i) diff --git a/backtracking/all_permutations.py b/backtracking/all_permutations.py index 5244fef97f93..59c7b7bbf41e 100644 --- a/backtracking/all_permutations.py +++ b/backtracking/all_permutations.py @@ -7,11 +7,13 @@ """ -def generate_all_permutations(sequence): +def generate_all_permutations(sequence: [int]) -> None: create_state_space_tree(sequence, [], 0, [0 for i in range(len(sequence))]) -def create_state_space_tree(sequence, current_sequence, index, index_used): +def create_state_space_tree( + sequence: [int], current_sequence: [int], index: int, index_used: int +) -> None: """ Creates a state space tree to iterate through each branch using DFS. We know that each state has exactly len(sequence) - index children. diff --git a/backtracking/n_queens.py b/backtracking/n_queens.py index ca7beb830bba..31696b4a84d3 100644 --- a/backtracking/n_queens.py +++ b/backtracking/n_queens.py @@ -10,7 +10,7 @@ solution = [] -def isSafe(board, row, column): +def isSafe(board: [[int]], row: int, column: int) -> bool: """ This function returns a boolean value True if it is safe to place a queen there considering the current state of the board. @@ -38,7 +38,7 @@ def isSafe(board, row, column): return True -def solve(board, row): +def solve(board: [[int]], row: int) -> bool: """ It creates a state space tree and calls the safe function until it receives a False Boolean and terminates that branch and backtracks to the next @@ -68,7 +68,7 @@ def solve(board, row): return False -def printboard(board): +def printboard(board: [[int]]) -> None: """ Prints the boards that have a successful combination. """ diff --git a/backtracking/rat_in_maze.py b/backtracking/rat_in_maze.py index 788aeac13c09..8dc484c3f92d 100644 --- a/backtracking/rat_in_maze.py +++ b/backtracking/rat_in_maze.py @@ -1,4 +1,4 @@ -def solve_maze(maze: list) -> bool: +def solve_maze(maze: [[int]]) -> bool: """ This method solves the "rat in maze" problem. In this problem we have some n by n matrix, a start point and an end point. @@ -67,7 +67,7 @@ def solve_maze(maze: list) -> bool: return solved -def run_maze(maze, i, j, solutions): +def run_maze(maze: [[int]], i: int, j: int, solutions: [[int]]) -> bool: """ This method is recursive starting from (i, j) and going in one of four directions: up, down, left, right. diff --git a/backtracking/sum_of_subsets.py b/backtracking/sum_of_subsets.py index 425ddcff927e..b71edc2eefb5 100644 --- a/backtracking/sum_of_subsets.py +++ b/backtracking/sum_of_subsets.py @@ -8,7 +8,7 @@ """ -def generate_sum_of_subsets_soln(nums, max_sum): +def generate_sum_of_subsets_soln(nums: [int], max_sum: [int]) -> [int]: result = [] path = [] num_index = 0 @@ -17,7 +17,14 @@ def generate_sum_of_subsets_soln(nums, max_sum): return result -def create_state_space_tree(nums, max_sum, num_index, path, result, remaining_nums_sum): +def create_state_space_tree( + nums: [int], + max_sum: int, + num_index: int, + path: [int], + result: [int], + remaining_nums_sum: int, +) -> None: """ Creates a state space tree to iterate through each branch using DFS. It terminates the branching of a node when any of the two conditions From 0febbd397ec2a41cff7f9fa2c182c14516f5bb36 Mon Sep 17 00:00:00 2001 From: Jenia Dysin Date: Sun, 29 Nov 2020 18:20:54 +0200 Subject: [PATCH 0031/1543] Add typehints to blockchain (#3149) * updating DIRECTORY.md * updating DIRECTORY.md * Added type hints to blockchain algorithms * updating DIRECTORY.md Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- blockchain/chinese_remainder_theorem.py | 8 ++++---- blockchain/diophantine_equation.py | 8 ++++---- blockchain/modular_division.py | 12 ++++++------ 3 files changed, 14 insertions(+), 14 deletions(-) diff --git a/blockchain/chinese_remainder_theorem.py b/blockchain/chinese_remainder_theorem.py index b6a486f0b1ed..3e4b2b7b4f10 100644 --- a/blockchain/chinese_remainder_theorem.py +++ b/blockchain/chinese_remainder_theorem.py @@ -12,7 +12,7 @@ # Extended Euclid -def extended_euclid(a, b): +def extended_euclid(a: int, b: int) -> (int, int): """ >>> extended_euclid(10, 6) (-1, 2) @@ -29,7 +29,7 @@ def extended_euclid(a, b): # Uses ExtendedEuclid to find inverses -def chinese_remainder_theorem(n1, r1, n2, r2): +def chinese_remainder_theorem(n1: int, r1: int, n2: int, r2: int) -> int: """ >>> chinese_remainder_theorem(5,1,7,3) 31 @@ -51,7 +51,7 @@ def chinese_remainder_theorem(n1, r1, n2, r2): # ----------SAME SOLUTION USING InvertModulo instead ExtendedEuclid---------------- # This function find the inverses of a i.e., a^(-1) -def invert_modulo(a, n): +def invert_modulo(a: int, n: int) -> int: """ >>> invert_modulo(2, 5) 3 @@ -67,7 +67,7 @@ def invert_modulo(a, n): # Same a above using InvertingModulo -def chinese_remainder_theorem2(n1, r1, n2, r2): +def chinese_remainder_theorem2(n1: int, r1: int, n2: int, r2: int) -> int: """ >>> chinese_remainder_theorem2(5,1,7,3) 31 diff --git a/blockchain/diophantine_equation.py b/blockchain/diophantine_equation.py index 751b0efb7227..a92c2a13cfd5 100644 --- a/blockchain/diophantine_equation.py +++ b/blockchain/diophantine_equation.py @@ -5,7 +5,7 @@ # GCD ( Greatest Common Divisor ) or HCF ( Highest Common Factor ) -def diophantine(a, b, c): +def diophantine(a: int, b: int, c: int) -> (int, int): """ >>> diophantine(10,6,14) (-7.0, 14.0) @@ -37,7 +37,7 @@ def diophantine(a, b, c): # n is the number of solution you want, n = 2 by default -def diophantine_all_soln(a, b, c, n=2): +def diophantine_all_soln(a: int, b: int, c: int, n: int = 2) -> None: """ >>> diophantine_all_soln(10, 6, 14) -7.0 14.0 @@ -72,7 +72,7 @@ def diophantine_all_soln(a, b, c, n=2): # Euclid's Algorithm -def greatest_common_divisor(a, b): +def greatest_common_divisor(a: int, b: int) -> int: """ >>> greatest_common_divisor(7,5) 1 @@ -98,7 +98,7 @@ def greatest_common_divisor(a, b): # x and y, then d = gcd(a,b) -def extended_gcd(a, b): +def extended_gcd(a: int, b: int) -> (int, int, int): """ >>> extended_gcd(10, 6) (2, -1, 2) diff --git a/blockchain/modular_division.py b/blockchain/modular_division.py index 8fcf6e37cbed..e012db28fab8 100644 --- a/blockchain/modular_division.py +++ b/blockchain/modular_division.py @@ -14,7 +14,7 @@ # Uses ExtendedEuclid to find the inverse of a -def modular_division(a, b, n): +def modular_division(a: int, b: int, n: int) -> int: """ >>> modular_division(4,8,5) 2 @@ -33,7 +33,7 @@ def modular_division(a, b, n): # This function find the inverses of a i.e., a^(-1) -def invert_modulo(a, n): +def invert_modulo(a: int, n: int) -> int: """ >>> invert_modulo(2, 5) 3 @@ -51,7 +51,7 @@ def invert_modulo(a, n): # ------------------ Finding Modular division using invert_modulo ------------------- # This function used the above inversion of a to find x = (b*a^(-1))mod n -def modular_division2(a, b, n): +def modular_division2(a: int, b: int, n: int) -> int: """ >>> modular_division2(4,8,5) 2 @@ -72,7 +72,7 @@ def modular_division2(a, b, n): # and y, then d = gcd(a,b) -def extended_gcd(a, b): +def extended_gcd(a: int, b: int) -> (int, int, int): """ >>> extended_gcd(10, 6) (2, -1, 2) @@ -99,7 +99,7 @@ def extended_gcd(a, b): # Extended Euclid -def extended_euclid(a, b): +def extended_euclid(a: int, b: int) -> (int, int): """ >>> extended_euclid(10, 6) (-1, 2) @@ -119,7 +119,7 @@ def extended_euclid(a, b): # Euclid's Algorithm -def greatest_common_divisor(a, b): +def greatest_common_divisor(a: int, b: int) -> int: """ >>> greatest_common_divisor(7,5) 1 From 25164bb6380ae760bed5fe3efc5f2fc3ec5c38a1 Mon Sep 17 00:00:00 2001 From: John Law Date: Mon, 30 Nov 2020 01:30:31 +0800 Subject: [PATCH 0032/1543] Fix mypy in #2684 (#3987) * Fix mypy in #2684 * fix pre-commit --- backtracking/all_combinations.py | 11 ++++++----- backtracking/all_permutations.py | 14 +++++++++----- backtracking/n_queens.py | 10 ++++++---- backtracking/rat_in_maze.py | 8 ++++++-- backtracking/sum_of_subsets.py | 13 +++++++------ 5 files changed, 34 insertions(+), 22 deletions(-) diff --git a/backtracking/all_combinations.py b/backtracking/all_combinations.py index 0444ed093449..76462837ce35 100644 --- a/backtracking/all_combinations.py +++ b/backtracking/all_combinations.py @@ -3,15 +3,16 @@ numbers out of 1 ... n. We use backtracking to solve this problem. Time complexity: O(C(n,k)) which is O(n choose k) = O((n!/(k! * (n - k)!))) """ +from typing import List -def generate_all_combinations(n: int, k: int) -> [[int]]: +def generate_all_combinations(n: int, k: int) -> List[List[int]]: """ >>> generate_all_combinations(n=4, k=2) [[1, 2], [1, 3], [1, 4], [2, 3], [2, 4], [3, 4]] """ - result = [] + result: List[List[int]] = [] create_all_state(1, n, k, [], result) return result @@ -20,8 +21,8 @@ def create_all_state( increment: int, total_number: int, level: int, - current_list: [int], - total_list: [int], + current_list: List[int], + total_list: List[List[int]], ) -> None: if level == 0: total_list.append(current_list[:]) @@ -33,7 +34,7 @@ def create_all_state( current_list.pop() -def print_all_state(total_list: [int]) -> None: +def print_all_state(total_list: List[List[int]]) -> None: for i in total_list: print(*i) diff --git a/backtracking/all_permutations.py b/backtracking/all_permutations.py index 59c7b7bbf41e..a0032c5ca814 100644 --- a/backtracking/all_permutations.py +++ b/backtracking/all_permutations.py @@ -5,14 +5,18 @@ Time complexity: O(n! * n), where n denotes the length of the given sequence. """ +from typing import List, Union -def generate_all_permutations(sequence: [int]) -> None: +def generate_all_permutations(sequence: List[Union[int, str]]) -> None: create_state_space_tree(sequence, [], 0, [0 for i in range(len(sequence))]) def create_state_space_tree( - sequence: [int], current_sequence: [int], index: int, index_used: int + sequence: List[Union[int, str]], + current_sequence: List[Union[int, str]], + index: int, + index_used: List[int], ) -> None: """ Creates a state space tree to iterate through each branch using DFS. @@ -40,8 +44,8 @@ def create_state_space_tree( sequence = list(map(int, input().split())) """ -sequence = [3, 1, 2, 4] +sequence: List[Union[int, str]] = [3, 1, 2, 4] generate_all_permutations(sequence) -sequence = ["A", "B", "C"] -generate_all_permutations(sequence) +sequence_2: List[Union[int, str]] = ["A", "B", "C"] +generate_all_permutations(sequence_2) diff --git a/backtracking/n_queens.py b/backtracking/n_queens.py index 31696b4a84d3..29b8d819acf3 100644 --- a/backtracking/n_queens.py +++ b/backtracking/n_queens.py @@ -7,10 +7,12 @@ diagonal lines. """ +from typing import List + solution = [] -def isSafe(board: [[int]], row: int, column: int) -> bool: +def isSafe(board: List[List[int]], row: int, column: int) -> bool: """ This function returns a boolean value True if it is safe to place a queen there considering the current state of the board. @@ -38,7 +40,7 @@ def isSafe(board: [[int]], row: int, column: int) -> bool: return True -def solve(board: [[int]], row: int) -> bool: +def solve(board: List[List[int]], row: int) -> bool: """ It creates a state space tree and calls the safe function until it receives a False Boolean and terminates that branch and backtracks to the next @@ -53,7 +55,7 @@ def solve(board: [[int]], row: int) -> bool: solution.append(board) printboard(board) print() - return + return True for i in range(len(board)): """ For every row it iterates through each column to check if it is feasible to @@ -68,7 +70,7 @@ def solve(board: [[int]], row: int) -> bool: return False -def printboard(board: [[int]]) -> None: +def printboard(board: List[List[int]]) -> None: """ Prints the boards that have a successful combination. """ diff --git a/backtracking/rat_in_maze.py b/backtracking/rat_in_maze.py index 8dc484c3f92d..cd2a8f41daa8 100644 --- a/backtracking/rat_in_maze.py +++ b/backtracking/rat_in_maze.py @@ -1,4 +1,7 @@ -def solve_maze(maze: [[int]]) -> bool: +from typing import List + + +def solve_maze(maze: List[List[int]]) -> bool: """ This method solves the "rat in maze" problem. In this problem we have some n by n matrix, a start point and an end point. @@ -67,7 +70,7 @@ def solve_maze(maze: [[int]]) -> bool: return solved -def run_maze(maze: [[int]], i: int, j: int, solutions: [[int]]) -> bool: +def run_maze(maze: List[List[int]], i: int, j: int, solutions: List[List[int]]) -> bool: """ This method is recursive starting from (i, j) and going in one of four directions: up, down, left, right. @@ -106,6 +109,7 @@ def run_maze(maze: [[int]], i: int, j: int, solutions: [[int]]) -> bool: solutions[i][j] = 0 return False + return False if __name__ == "__main__": diff --git a/backtracking/sum_of_subsets.py b/backtracking/sum_of_subsets.py index b71edc2eefb5..f695b8f7a80e 100644 --- a/backtracking/sum_of_subsets.py +++ b/backtracking/sum_of_subsets.py @@ -6,11 +6,12 @@ Summation of the chosen numbers must be equal to given number M and one number can be used only once. """ +from typing import List -def generate_sum_of_subsets_soln(nums: [int], max_sum: [int]) -> [int]: - result = [] - path = [] +def generate_sum_of_subsets_soln(nums: List[int], max_sum: int) -> List[List[int]]: + result: List[List[int]] = [] + path: List[int] = [] num_index = 0 remaining_nums_sum = sum(nums) create_state_space_tree(nums, max_sum, num_index, path, result, remaining_nums_sum) @@ -18,11 +19,11 @@ def generate_sum_of_subsets_soln(nums: [int], max_sum: [int]) -> [int]: def create_state_space_tree( - nums: [int], + nums: List[int], max_sum: int, num_index: int, - path: [int], - result: [int], + path: List[int], + result: List[List[int]], remaining_nums_sum: int, ) -> None: """ From ba6310b6470346fdac85ea4ef697e5939c30b180 Mon Sep 17 00:00:00 2001 From: Dhruv Manilawala Date: Sun, 29 Nov 2020 23:11:09 +0530 Subject: [PATCH 0033/1543] Validate only submitted Project Euler solution (#3977) * Update validate solution script to fetch only submitted solution * Update workflow file with the updated PE script * Fix: do not fetch `validate_solutions.py` script * Update script to use the requests package for API calls * Fix: install requests module * Pytest ignore scripts/ directory --- .github/workflows/build.yml | 2 +- .github/workflows/project_euler.yml | 18 +++++++----- scripts/validate_solutions.py | 45 +++++++++++++++++++++++++++-- 3 files changed, 55 insertions(+), 10 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index ae9b4e36b1ce..9e15d18ade8e 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -22,6 +22,6 @@ jobs: python -m pip install --upgrade pip setuptools six wheel python -m pip install pytest-cov -r requirements.txt - name: Run tests - run: pytest --doctest-modules --ignore=project_euler/ --cov-report=term-missing:skip-covered --cov=. . + run: pytest --doctest-modules --ignore=project_euler/ --ignore=scripts/ --cov-report=term-missing:skip-covered --cov=. . - if: ${{ success() }} run: scripts/build_directory_md.py 2>&1 | tee DIRECTORY.md diff --git a/.github/workflows/project_euler.yml b/.github/workflows/project_euler.yml index e8b011af20a6..995295fcaa9a 100644 --- a/.github/workflows/project_euler.yml +++ b/.github/workflows/project_euler.yml @@ -1,12 +1,14 @@ on: pull_request: - # only check if a file is changed within the project_euler directory and related files + # Run only if a file is changed within the project_euler directory and related files paths: - - 'project_euler/**' - - '.github/workflows/project_euler.yml' - - 'scripts/validate_solutions.py' + - "project_euler/**" + - ".github/workflows/project_euler.yml" + - "scripts/validate_solutions.py" + schedule: + - cron: "0 0 * * *" # Run everyday -name: 'Project Euler' +name: "Project Euler" jobs: project-euler: @@ -24,8 +26,10 @@ jobs: steps: - uses: actions/checkout@v2 - uses: actions/setup-python@v2 - - name: Install pytest + - name: Install pytest and requests run: | python -m pip install --upgrade pip - python -m pip install --upgrade pytest + python -m pip install --upgrade pytest requests - run: pytest scripts/validate_solutions.py + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} diff --git a/scripts/validate_solutions.py b/scripts/validate_solutions.py index e1f68ff843bb..fd804ea5aa31 100755 --- a/scripts/validate_solutions.py +++ b/scripts/validate_solutions.py @@ -1,11 +1,13 @@ #!/usr/bin/env python3 import importlib.util import json +import os import pathlib from types import ModuleType from typing import Dict, List import pytest +import requests PROJECT_EULER_DIR_PATH = pathlib.Path.cwd().joinpath("project_euler") PROJECT_EULER_ANSWERS_PATH = pathlib.Path.cwd().joinpath( @@ -24,7 +26,7 @@ def convert_path_to_module(file_path: pathlib.Path) -> ModuleType: return module -def collect_solution_file_paths() -> List[pathlib.Path]: +def all_solution_file_paths() -> List[pathlib.Path]: """Collects all the solution file path in the Project Euler directory""" solution_file_paths = [] for problem_dir_path in PROJECT_EULER_DIR_PATH.iterdir(): @@ -37,12 +39,51 @@ def collect_solution_file_paths() -> List[pathlib.Path]: return solution_file_paths +def get_files_url() -> str: + """Return the pull request number which triggered this action.""" + with open(os.environ["GITHUB_EVENT_PATH"]) as file: + event = json.load(file) + return event["pull_request"]["url"] + "/files" + + +def added_solution_file_path() -> List[pathlib.Path]: + """Collects only the solution file path which got added in the current + pull request. + + This will only be triggered if the script is ran from GitHub Actions. + """ + solution_file_paths = [] + headers = { + "Accept": "application/vnd.github.v3+json", + "Authorization": "token " + os.environ["GITHUB_TOKEN"], + } + files = requests.get(get_files_url(), headers=headers).json() + for file in files: + filepath = pathlib.Path.cwd().joinpath(file["filename"]) + if ( + filepath.suffix != ".py" + or filepath.name.startswith(("_", "test")) + or not filepath.name.startswith("sol") + ): + continue + solution_file_paths.append(filepath) + return solution_file_paths + + +def collect_solution_file_paths() -> List[pathlib.Path]: + if os.environ.get("CI") and os.environ.get("GITHUB_EVENT_NAME") == "pull_request": + # Return only if there are any, otherwise default to all solutions + if filepaths := added_solution_file_path(): + return filepaths + return all_solution_file_paths() + + @pytest.mark.parametrize( "solution_path", collect_solution_file_paths(), ids=lambda path: f"{path.parent.name}/{path.name}", ) -def test_project_euler(solution_path: pathlib.Path): +def test_project_euler(solution_path: pathlib.Path) -> None: """Testing for all Project Euler solutions""" # problem_[extract this part] and pad it with zeroes for width 3 problem_number: str = solution_path.parent.name[8:].zfill(3) From 06dad4f9d8624d9b9a4be56fef47a657f6ce6b82 Mon Sep 17 00:00:00 2001 From: John Law Date: Mon, 30 Nov 2020 01:46:26 +0800 Subject: [PATCH 0034/1543] Fix mypy in #3149 (#3988) * Fix mypy in #3149 * Fix pre-commit --- blockchain/chinese_remainder_theorem.py | 21 +++++---- blockchain/diophantine_equation.py | 50 ++++++++++----------- blockchain/modular_division.py | 58 +++++++++++++------------ 3 files changed, 67 insertions(+), 62 deletions(-) diff --git a/blockchain/chinese_remainder_theorem.py b/blockchain/chinese_remainder_theorem.py index 3e4b2b7b4f10..b50147ac1215 100644 --- a/blockchain/chinese_remainder_theorem.py +++ b/blockchain/chinese_remainder_theorem.py @@ -1,18 +1,21 @@ -# Chinese Remainder Theorem: -# GCD ( Greatest Common Divisor ) or HCF ( Highest Common Factor ) +""" +Chinese Remainder Theorem: +GCD ( Greatest Common Divisor ) or HCF ( Highest Common Factor ) -# If GCD(a,b) = 1, then for any remainder ra modulo a and any remainder rb modulo b -# there exists integer n, such that n = ra (mod a) and n = ra(mod b). If n1 and n2 are -# two such integers, then n1=n2(mod ab) +If GCD(a,b) = 1, then for any remainder ra modulo a and any remainder rb modulo b +there exists integer n, such that n = ra (mod a) and n = ra(mod b). If n1 and n2 are +two such integers, then n1=n2(mod ab) -# Algorithm : +Algorithm : -# 1. Use extended euclid algorithm to find x,y such that a*x + b*y = 1 -# 2. Take n = ra*by + rb*ax +1. Use extended euclid algorithm to find x,y such that a*x + b*y = 1 +2. Take n = ra*by + rb*ax +""" +from typing import Tuple # Extended Euclid -def extended_euclid(a: int, b: int) -> (int, int): +def extended_euclid(a: int, b: int) -> Tuple[int, int]: """ >>> extended_euclid(10, 6) (-1, 2) diff --git a/blockchain/diophantine_equation.py b/blockchain/diophantine_equation.py index a92c2a13cfd5..7df674cb1438 100644 --- a/blockchain/diophantine_equation.py +++ b/blockchain/diophantine_equation.py @@ -1,12 +1,14 @@ -# Diophantine Equation : Given integers a,b,c ( at least one of a and b != 0), the -# diophantine equation a*x + b*y = c has a solution (where x and y are integers) -# iff gcd(a,b) divides c. +from typing import Tuple -# GCD ( Greatest Common Divisor ) or HCF ( Highest Common Factor ) - -def diophantine(a: int, b: int, c: int) -> (int, int): +def diophantine(a: int, b: int, c: int) -> Tuple[float, float]: """ + Diophantine Equation : Given integers a,b,c ( at least one of a and b != 0), the + diophantine equation a*x + b*y = c has a solution (where x and y are integers) + iff gcd(a,b) divides c. + + GCD ( Greatest Common Divisor ) or HCF ( Highest Common Factor ) + >>> diophantine(10,6,14) (-7.0, 14.0) @@ -26,19 +28,19 @@ def diophantine(a: int, b: int, c: int) -> (int, int): return (r * x, r * y) -# Lemma : if n|ab and gcd(a,n) = 1, then n|b. - -# Finding All solutions of Diophantine Equations: +def diophantine_all_soln(a: int, b: int, c: int, n: int = 2) -> None: + """ + Lemma : if n|ab and gcd(a,n) = 1, then n|b. -# Theorem : Let gcd(a,b) = d, a = d*p, b = d*q. If (x0,y0) is a solution of Diophantine -# Equation a*x + b*y = c. a*x0 + b*y0 = c, then all the solutions have the form -# a(x0 + t*q) + b(y0 - t*p) = c, where t is an arbitrary integer. + Finding All solutions of Diophantine Equations: -# n is the number of solution you want, n = 2 by default + Theorem : Let gcd(a,b) = d, a = d*p, b = d*q. If (x0,y0) is a solution of + Diophantine Equation a*x + b*y = c. a*x0 + b*y0 = c, then all the + solutions have the form a(x0 + t*q) + b(y0 - t*p) = c, + where t is an arbitrary integer. + n is the number of solution you want, n = 2 by default -def diophantine_all_soln(a: int, b: int, c: int, n: int = 2) -> None: - """ >>> diophantine_all_soln(10, 6, 14) -7.0 14.0 -4.0 9.0 @@ -67,13 +69,12 @@ def diophantine_all_soln(a: int, b: int, c: int, n: int = 2) -> None: print(x, y) -# Euclid's Lemma : d divides a and b, if and only if d divides a-b and b - -# Euclid's Algorithm - - def greatest_common_divisor(a: int, b: int) -> int: """ + Euclid's Lemma : d divides a and b, if and only if d divides a-b and b + + Euclid's Algorithm + >>> greatest_common_divisor(7,5) 1 @@ -94,12 +95,11 @@ def greatest_common_divisor(a: int, b: int) -> int: return b -# Extended Euclid's Algorithm : If d divides a and b and d = a*x + b*y for integers -# x and y, then d = gcd(a,b) - - -def extended_gcd(a: int, b: int) -> (int, int, int): +def extended_gcd(a: int, b: int) -> Tuple[int, int, int]: """ + Extended Euclid's Algorithm : If d divides a and b and d = a*x + b*y for integers + x and y, then d = gcd(a,b) + >>> extended_gcd(10, 6) (2, -1, 2) diff --git a/blockchain/modular_division.py b/blockchain/modular_division.py index e012db28fab8..4f7f50a92ad0 100644 --- a/blockchain/modular_division.py +++ b/blockchain/modular_division.py @@ -1,21 +1,23 @@ -# Modular Division : -# An efficient algorithm for dividing b by a modulo n. +from typing import Tuple -# GCD ( Greatest Common Divisor ) or HCF ( Highest Common Factor ) -# Given three integers a, b, and n, such that gcd(a,n)=1 and n>1, the algorithm should -# return an integer x such that 0≤x≤n−1, and b/a=x(modn) (that is, b=ax(modn)). +def modular_division(a: int, b: int, n: int) -> int: + """ + Modular Division : + An efficient algorithm for dividing b by a modulo n. -# Theorem: -# a has a multiplicative inverse modulo n iff gcd(a,n) = 1 + GCD ( Greatest Common Divisor ) or HCF ( Highest Common Factor ) + Given three integers a, b, and n, such that gcd(a,n)=1 and n>1, the algorithm should + return an integer x such that 0≤x≤n−1, and b/a=x(modn) (that is, b=ax(modn)). -# This find x = b*a^(-1) mod n -# Uses ExtendedEuclid to find the inverse of a + Theorem: + a has a multiplicative inverse modulo n iff gcd(a,n) = 1 -def modular_division(a: int, b: int, n: int) -> int: - """ + This find x = b*a^(-1) mod n + Uses ExtendedEuclid to find the inverse of a + >>> modular_division(4,8,5) 2 @@ -32,9 +34,10 @@ def modular_division(a: int, b: int, n: int) -> int: return x -# This function find the inverses of a i.e., a^(-1) def invert_modulo(a: int, n: int) -> int: """ + This function find the inverses of a i.e., a^(-1) + >>> invert_modulo(2, 5) 3 @@ -50,9 +53,11 @@ def invert_modulo(a: int, n: int) -> int: # ------------------ Finding Modular division using invert_modulo ------------------- -# This function used the above inversion of a to find x = (b*a^(-1))mod n + def modular_division2(a: int, b: int, n: int) -> int: """ + This function used the above inversion of a to find x = (b*a^(-1))mod n + >>> modular_division2(4,8,5) 2 @@ -68,17 +73,15 @@ def modular_division2(a: int, b: int, n: int) -> int: return x -# Extended Euclid's Algorithm : If d divides a and b and d = a*x + b*y for integers x -# and y, then d = gcd(a,b) - - -def extended_gcd(a: int, b: int) -> (int, int, int): +def extended_gcd(a: int, b: int) -> Tuple[int, int, int]: """ - >>> extended_gcd(10, 6) - (2, -1, 2) + Extended Euclid's Algorithm : If d divides a and b and d = a*x + b*y for integers x + and y, then d = gcd(a,b) + >>> extended_gcd(10, 6) + (2, -1, 2) - >>> extended_gcd(7, 5) - (1, -2, 3) + >>> extended_gcd(7, 5) + (1, -2, 3) ** extended_gcd function is used when d = gcd(a,b) is required in output @@ -98,9 +101,9 @@ def extended_gcd(a: int, b: int) -> (int, int, int): return (d, x, y) -# Extended Euclid -def extended_euclid(a: int, b: int) -> (int, int): +def extended_euclid(a: int, b: int) -> Tuple[int, int]: """ + Extended Euclid >>> extended_euclid(10, 6) (-1, 2) @@ -115,12 +118,11 @@ def extended_euclid(a: int, b: int) -> (int, int): return (y, x - k * y) -# Euclid's Lemma : d divides a and b, if and only if d divides a-b and b -# Euclid's Algorithm - - def greatest_common_divisor(a: int, b: int) -> int: """ + Euclid's Lemma : d divides a and b, if and only if d divides a-b and b + Euclid's Algorithm + >>> greatest_common_divisor(7,5) 1 From daceb87a9685d5e12f43c2e4135ee4b06c0669f1 Mon Sep 17 00:00:00 2001 From: Erdum Date: Mon, 30 Nov 2020 03:07:10 +0500 Subject: [PATCH 0035/1543] Electric power (#3976) * Electric power * updated as suggested by cclauss * updated as suggested by cclauss * decimal value error * All done --- electronics/electric_power.py | 49 +++++++++++++++++++++++++++++++++++ 1 file changed, 49 insertions(+) create mode 100644 electronics/electric_power.py diff --git a/electronics/electric_power.py b/electronics/electric_power.py new file mode 100644 index 000000000000..768c3d5c7232 --- /dev/null +++ b/electronics/electric_power.py @@ -0,0 +1,49 @@ +# https://en.m.wikipedia.org/wiki/Electric_power +from collections import namedtuple + + +def electric_power(voltage: float, current: float, power: float) -> float: + """ + This function can calculate any one of the three (voltage, current, power), + fundamental value of electrical system. + examples are below: + >>> electric_power(voltage=0, current=2, power=5) + result(name='voltage', value=2.5) + >>> electric_power(voltage=2, current=2, power=0) + result(name='power', value=4.0) + >>> electric_power(voltage=-2, current=3, power=0) + result(name='power', value=6.0) + >>> electric_power(voltage=2, current=4, power=2) + Traceback (most recent call last): + File "", line 15, in + ValueError: Only one argument must be 0 + >>> electric_power(voltage=0, current=0, power=2) + Traceback (most recent call last): + File "", line 19, in + ValueError: Only one argument must be 0 + >>> electric_power(voltage=0, current=2, power=-4) + Traceback (most recent call last): + File "", line 23, in >> electric_power(voltage=2.2, current=2.2, power=0) + result(name='power', value=4.84) + """ + result = namedtuple("result", "name value") + if (voltage, current, power).count(0) != 1: + raise ValueError("Only one argument must be 0") + elif power < 0: + raise ValueError( + "Power cannot be negative in any electrical/electronics system" + ) + elif voltage == 0: + return result("voltage", power / current) + elif current == 0: + return result("current", power / voltage) + elif power == 0: + return result("power", float(round(abs(voltage * current), 2))) + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From 07a4ce9fb89031b4f1f3991fcf769b86a10a4aa8 Mon Sep 17 00:00:00 2001 From: wuyudi Date: Mon, 30 Nov 2020 22:59:23 +0800 Subject: [PATCH 0036/1543] Update pigeon_sort.py (#2359) * Update pigeon_sort.py * Update pigeon_sort.py * Update pigeon_sort.py --- sorts/pigeon_sort.py | 28 +++++++++------------------- 1 file changed, 9 insertions(+), 19 deletions(-) diff --git a/sorts/pigeon_sort.py b/sorts/pigeon_sort.py index cc6205f804dc..3126e47c719e 100644 --- a/sorts/pigeon_sort.py +++ b/sorts/pigeon_sort.py @@ -26,29 +26,17 @@ def pigeon_sort(array): if len(array) == 0: return array - # Manually finds the minimum and maximum of the array. - min = array[0] - max = array[0] - - for i in range(len(array)): - if array[i] < min: - min = array[i] - elif array[i] > max: - max = array[i] + _min, _max = min(array), max(array) # Compute the variables - holes_range = max - min + 1 - holes = [0 for _ in range(holes_range)] - holes_repeat = [0 for _ in range(holes_range)] + holes_range = _max - _min + 1 + holes, holes_repeat = [0] * holes_range, [0] * holes_range # Make the sorting. - for i in range(len(array)): - index = array[i] - min - if holes[index] != array[i]: - holes[index] = array[i] - holes_repeat[index] += 1 - else: - holes_repeat[index] += 1 + for i in array: + index = i - _min + holes[index] = i + holes_repeat[index] += 1 # Makes the array back by replacing the numbers. index = 0 @@ -63,6 +51,8 @@ def pigeon_sort(array): if __name__ == "__main__": + import doctest + doctest.testmod() user_input = input("Enter numbers separated by comma:\n") unsorted = [int(x) for x in user_input.split(",")] print(pigeon_sort(unsorted)) From f8b2c43fda28efdcb7bb50c4beb443330b9b64e9 Mon Sep 17 00:00:00 2001 From: Dhruv Manilawala Date: Mon, 30 Nov 2020 21:03:29 +0530 Subject: [PATCH 0037/1543] Fix pre-commit error on master (#3992) * Update pigeon_sort.py * updating DIRECTORY.md * Add type hints and return annotation Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- DIRECTORY.md | 1 + sorts/pigeon_sort.py | 4 +++- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/DIRECTORY.md b/DIRECTORY.md index 00da7922d54d..2307685f1330 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -250,6 +250,7 @@ * [Sum Of Subset](https://github.com/TheAlgorithms/Python/blob/master/dynamic_programming/sum_of_subset.py) ## Electronics + * [Electric Power](https://github.com/TheAlgorithms/Python/blob/master/electronics/electric_power.py) * [Ohms Law](https://github.com/TheAlgorithms/Python/blob/master/electronics/ohms_law.py) ## File Transfer diff --git a/sorts/pigeon_sort.py b/sorts/pigeon_sort.py index 3126e47c719e..3d81f0643865 100644 --- a/sorts/pigeon_sort.py +++ b/sorts/pigeon_sort.py @@ -9,9 +9,10 @@ For manual testing run: python pigeon_sort.py """ +from typing import List -def pigeon_sort(array): +def pigeon_sort(array: List[int]) -> List[int]: """ Implementation of pigeon hole sort algorithm :param array: Collection of comparable items @@ -52,6 +53,7 @@ def pigeon_sort(array): if __name__ == "__main__": import doctest + doctest.testmod() user_input = input("Enter numbers separated by comma:\n") unsorted = [int(x) for x in user_input.split(",")] From 860d4f547bcfbe96b5c1e1b507124b13c0dc7399 Mon Sep 17 00:00:00 2001 From: Maliha Date: Thu, 3 Dec 2020 07:02:48 -0800 Subject: [PATCH 0038/1543] Create merge_two_lists.py that implements merging of two sorted linked lists (#3874) * Create merge_two_lists.py that implements merging of two sorted linked lists * Update merge_two_lists.py Fixed formatting errors * Fixed trailing whitespace * Change name of function to def __str__() * updating DIRECTORY.md * Imported classes from singly_linked_list.py * Update merge_two_lists.py * Update merge_two_lists.py Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> Co-authored-by: Christian Clauss --- DIRECTORY.md | 1 + .../linked_list/merge_two_lists.py | 83 +++++++++++++++++++ 2 files changed, 84 insertions(+) create mode 100644 data_structures/linked_list/merge_two_lists.py diff --git a/DIRECTORY.md b/DIRECTORY.md index 2307685f1330..c9c3a09eb599 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -153,6 +153,7 @@ * [From Sequence](https://github.com/TheAlgorithms/Python/blob/master/data_structures/linked_list/from_sequence.py) * [Has Loop](https://github.com/TheAlgorithms/Python/blob/master/data_structures/linked_list/has_loop.py) * [Is Palindrome](https://github.com/TheAlgorithms/Python/blob/master/data_structures/linked_list/is_palindrome.py) + * [Merge Two Lists](https://github.com/TheAlgorithms/Python/blob/master/data_structures/linked_list/merge_two_lists.py) * [Middle Element Of Linked List](https://github.com/TheAlgorithms/Python/blob/master/data_structures/linked_list/middle_element_of_linked_list.py) * [Print Reverse](https://github.com/TheAlgorithms/Python/blob/master/data_structures/linked_list/print_reverse.py) * [Singly Linked List](https://github.com/TheAlgorithms/Python/blob/master/data_structures/linked_list/singly_linked_list.py) diff --git a/data_structures/linked_list/merge_two_lists.py b/data_structures/linked_list/merge_two_lists.py new file mode 100644 index 000000000000..96ec6b8abc85 --- /dev/null +++ b/data_structures/linked_list/merge_two_lists.py @@ -0,0 +1,83 @@ +""" +Algorithm that merges two sorted linked lists into one sorted linked list. +""" +from __future__ import annotations + +from collections.abc import Iterable, Iterator +from dataclasses import dataclass +from typing import Optional + +test_data_odd = (3, 9, -11, 0, 7, 5, 1, -1) +test_data_even = (4, 6, 2, 0, 8, 10, 3, -2) + + +@dataclass +class Node: + data: int + next: Optional[Node] + + +class SortedLinkedList: + def __init__(self, ints: Iterable[int]) -> None: + self.head: Optional[Node] = None + for i in reversed(sorted(ints)): + self.head = Node(i, self.head) + + def __iter__(self) -> Iterator[int]: + """ + >>> tuple(SortedLinkedList(test_data_odd)) == tuple(sorted(test_data_odd)) + True + >>> tuple(SortedLinkedList(test_data_even)) == tuple(sorted(test_data_even)) + True + """ + node = self.head + while node: + yield node.data + node = node.next + + def __len__(self) -> int: + """ + >>> for i in range(3): + ... len(SortedLinkedList(range(i))) == i + True + True + True + >>> len(SortedLinkedList(test_data_odd)) + 8 + """ + return len(tuple(iter(self))) + + def __str__(self) -> str: + """ + >>> str(SortedLinkedList([])) + '' + >>> str(SortedLinkedList(test_data_odd)) + '-11 -> -1 -> 0 -> 1 -> 3 -> 5 -> 7 -> 9' + >>> str(SortedLinkedList(test_data_even)) + '-2 -> 0 -> 2 -> 3 -> 4 -> 6 -> 8 -> 10' + """ + return " -> ".join([str(node) for node in self]) + + +def merge_lists( + sll_one: SortedLinkedList, sll_two: SortedLinkedList +) -> SortedLinkedList: + """ + >>> SSL = SortedLinkedList + >>> merged = merge_lists(SSL(test_data_odd), SSL(test_data_even)) + >>> len(merged) + 16 + >>> str(merged) + '-11 -> -2 -> -1 -> 0 -> 0 -> 1 -> 2 -> 3 -> 3 -> 4 -> 5 -> 6 -> 7 -> 8 -> 9 -> 10' + >>> list(merged) == list(sorted(test_data_odd + test_data_even)) + True + """ + return SortedLinkedList(list(sll_one) + list(sll_two)) + + +if __name__ == "__main__": + import doctest + + doctest.testmod() + SSL = SortedLinkedList + print(merge_lists(SSL(test_data_odd), SSL(test_data_even))) From c359768e257a7bcbfe93e137a0fc1e81b92d6573 Mon Sep 17 00:00:00 2001 From: Jogendra Singh <58473917+Joe-Sin7h@users.noreply.github.com> Date: Wed, 9 Dec 2020 11:38:49 +0530 Subject: [PATCH 0039/1543] Update bitonic_sort with type hints, doctest, snake_case names (#4016) * Updated input * Fix pre-commit error * Add type hints, doctests, black, snake_case Co-authored-by: Dhruv Manilawala --- sorts/bitonic_sort.py | 144 ++++++++++++++++++++++++++---------------- 1 file changed, 91 insertions(+), 53 deletions(-) diff --git a/sorts/bitonic_sort.py b/sorts/bitonic_sort.py index be3499de13cd..c718973e5ecb 100644 --- a/sorts/bitonic_sort.py +++ b/sorts/bitonic_sort.py @@ -1,58 +1,96 @@ -# Python program for Bitonic Sort. Note that this program -# works only when size of input is a power of 2. - - -# The parameter dir indicates the sorting direction, ASCENDING -# or DESCENDING; if (a[i] > a[j]) agrees with the direction, -# then a[i] and a[j] are interchanged. -def compAndSwap(a, i, j, dire): - if (dire == 1 and a[i] > a[j]) or (dire == 0 and a[i] < a[j]): - a[i], a[j] = a[j], a[i] - - # It recursively sorts a bitonic sequence in ascending order, - - -# if dir = 1, and in descending order otherwise (means dir=0). -# The sequence to be sorted starts at index position low, -# the parameter cnt is the number of elements to be sorted. -def bitonic_merge(a, low, cnt, dire): - if cnt > 1: - k = int(cnt / 2) - for i in range(low, low + k): - compAndSwap(a, i, i + k, dire) - bitonic_merge(a, low, k, dire) - bitonic_merge(a, low + k, k, dire) - - # This function first produces a bitonic sequence by recursively - - -# sorting its two halves in opposite sorting orders, and then -# calls bitonic_merge to make them in the same order -def bitonic_sort(a, low, cnt, dire): - if cnt > 1: - k = int(cnt / 2) - bitonic_sort(a, low, k, 1) - bitonic_sort(a, low + k, k, 0) - bitonic_merge(a, low, cnt, dire) - - # Caller of bitonic_sort for sorting the entire array of length N - - -# in ASCENDING order -def sort(a, N, up): - bitonic_sort(a, 0, N, up) +""" +Python program for Bitonic Sort. + +Note that this program works only when size of input is a power of 2. +""" +from typing import List + + +def comp_and_swap(array: List[int], index1: int, index2: int, direction: int) -> None: + """Compare the value at given index1 and index2 of the array and swap them as per + the given direction. + + The parameter direction indicates the sorting direction, ASCENDING(1) or + DESCENDING(0); if (a[i] > a[j]) agrees with the direction, then a[i] and a[j] are + interchanged. + + >>> arr = [12, 42, -21, 1] + >>> comp_and_swap(arr, 1, 2, 1) + >>> print(arr) + [12, -21, 42, 1] + + >>> comp_and_swap(arr, 1, 2, 0) + >>> print(arr) + [12, 42, -21, 1] + + >>> comp_and_swap(arr, 0, 3, 1) + >>> print(arr) + [1, 42, -21, 12] + + >>> comp_and_swap(arr, 0, 3, 0) + >>> print(arr) + [12, 42, -21, 1] + """ + if (direction == 1 and array[index1] > array[index2]) or ( + direction == 0 and array[index1] < array[index2] + ): + array[index1], array[index2] = array[index2], array[index1] + + +def bitonic_merge(array: List[int], low: int, length: int, direction: int) -> None: + """ + It recursively sorts a bitonic sequence in ascending order, if direction = 1, and in + descending if direction = 0. + The sequence to be sorted starts at index position low, the parameter length is the + number of elements to be sorted. + + >>> arr = [12, 42, -21, 1] + >>> bitonic_merge(arr, 0, 4, 1) + >>> print(arr) + [-21, 1, 12, 42] + + >>> bitonic_merge(arr, 0, 4, 0) + >>> print(arr) + [42, 12, 1, -21] + """ + if length > 1: + middle = int(length / 2) + for i in range(low, low + middle): + comp_and_swap(array, i, i + middle, direction) + bitonic_merge(array, low, middle, direction) + bitonic_merge(array, low + middle, middle, direction) + + +def bitonic_sort(array: List[int], low: int, length: int, direction: int) -> None: + """ + This function first produces a bitonic sequence by recursively sorting its two + halves in opposite sorting orders, and then calls bitonic_merge to make them in the + same order. + + >>> arr = [12, 34, 92, -23, 0, -121, -167, 145] + >>> bitonic_sort(arr, 0, 8, 1) + >>> arr + [-167, -121, -23, 0, 12, 34, 92, 145] + + >>> bitonic_sort(arr, 0, 8, 0) + >>> arr + [145, 92, 34, 12, 0, -23, -121, -167] + """ + if length > 1: + middle = int(length / 2) + bitonic_sort(array, low, middle, 1) + bitonic_sort(array, low + middle, middle, 0) + bitonic_merge(array, low, length, direction) if __name__ == "__main__": + user_input = input("Enter numbers separated by a comma:\n").strip() + unsorted = [int(item.strip()) for item in user_input.split(",")] - a = [] - - n = int(input().strip()) - for i in range(n): - a.append(int(input().strip())) - up = 1 + bitonic_sort(unsorted, 0, len(unsorted), 1) + print("\nSorted array in ascending order is: ", end="") + print(*unsorted, sep=", ") - sort(a, n, up) - print("\n\nSorted array is") - for i in range(n): - print("%d" % a[i]) + bitonic_merge(unsorted, 0, len(unsorted), 0) + print("Sorted array in descending order is: ", end="") + print(*unsorted, sep=", ") From c39be1d8b84fbc788160051c06e6b8f2bd66ed4f Mon Sep 17 00:00:00 2001 From: Lewis Tian Date: Wed, 9 Dec 2020 17:21:46 +0800 Subject: [PATCH 0040/1543] update graphs/breadth_first_search.py (#3908) * update graphs/breadth_first_search.py - update naming style to snake_case - add type hints * add doctests --- graphs/breadth_first_search.py | 74 ++++++++++++++++++++++++---------- 1 file changed, 53 insertions(+), 21 deletions(-) diff --git a/graphs/breadth_first_search.py b/graphs/breadth_first_search.py index e40ec9d1d06d..ee9855bd0c2d 100644 --- a/graphs/breadth_first_search.py +++ b/graphs/breadth_first_search.py @@ -2,24 +2,52 @@ """ Author: OMKAR PATHAK """ +from typing import Set + class Graph: - def __init__(self): + def __init__(self) -> None: self.vertices = {} - def printGraph(self): - """prints adjacency list representation of graaph""" - for i in self.vertices.keys(): + def print_graph(self) -> None: + """ + prints adjacency list representation of graaph + >>> g = Graph() + >>> g.print_graph() + >>> g.add_edge(0, 1) + >>> g.print_graph() + 0 : 1 + """ + for i in self.vertices: print(i, " : ", " -> ".join([str(j) for j in self.vertices[i]])) - def addEdge(self, fromVertex, toVertex): - """adding the edge between two vertices""" - if fromVertex in self.vertices.keys(): - self.vertices[fromVertex].append(toVertex) + def add_edge(self, from_vertex: int, to_vertex: int) -> None: + """ + adding the edge between two vertices + >>> g = Graph() + >>> g.print_graph() + >>> g.add_edge(0, 1) + >>> g.print_graph() + 0 : 1 + """ + if from_vertex in self.vertices: + self.vertices[from_vertex].append(to_vertex) else: - self.vertices[fromVertex] = [toVertex] + self.vertices[from_vertex] = [to_vertex] - def BFS(self, startVertex): + def bfs(self, start_vertex: int) -> Set[int]: + """ + >>> g = Graph() + >>> g.add_edge(0, 1) + >>> g.add_edge(0, 1) + >>> g.add_edge(0, 2) + >>> g.add_edge(1, 2) + >>> g.add_edge(2, 0) + >>> g.add_edge(2, 3) + >>> g.add_edge(3, 3) + >>> sorted(g.bfs(2)) + [0, 1, 2, 3] + """ # initialize set for storing already visited vertices visited = set() @@ -27,8 +55,8 @@ def BFS(self, startVertex): queue = [] # mark the source node as visited and enqueue it - visited.add(startVertex) - queue.append(startVertex) + visited.add(start_vertex) + queue.append(start_vertex) while queue: vertex = queue.pop(0) @@ -42,18 +70,22 @@ def BFS(self, startVertex): if __name__ == "__main__": + from doctest import testmod + + testmod(verbose=True) + g = Graph() - g.addEdge(0, 1) - g.addEdge(0, 2) - g.addEdge(1, 2) - g.addEdge(2, 0) - g.addEdge(2, 3) - g.addEdge(3, 3) - - g.printGraph() + g.add_edge(0, 1) + g.add_edge(0, 2) + g.add_edge(1, 2) + g.add_edge(2, 0) + g.add_edge(2, 3) + g.add_edge(3, 3) + + g.print_graph() # 0 : 1 -> 2 # 1 : 2 # 2 : 0 -> 3 # 3 : 3 - assert sorted(g.BFS(2)) == [0, 1, 2, 3] + assert sorted(g.bfs(2)) == [0, 1, 2, 3] From e7ab06f5dedd2a3f216bb90b794edd760d9f8f4d Mon Sep 17 00:00:00 2001 From: Alex Joslin Date: Wed, 9 Dec 2020 01:22:07 -0800 Subject: [PATCH 0041/1543] Implemented minimum steps to one using tabulation. (#3911) * Implemented minimum steps to one using tabulation. * Update minimum_steps_to_one.py Made the parameter "n" more descriptive. Changed it to number * `n` to `number` Co-authored-by: John Law --- dynamic_programming/minimum_steps_to_one.py | 65 +++++++++++++++++++++ 1 file changed, 65 insertions(+) create mode 100644 dynamic_programming/minimum_steps_to_one.py diff --git a/dynamic_programming/minimum_steps_to_one.py b/dynamic_programming/minimum_steps_to_one.py new file mode 100644 index 000000000000..f4eb7033dd20 --- /dev/null +++ b/dynamic_programming/minimum_steps_to_one.py @@ -0,0 +1,65 @@ +""" +YouTube Explanation: https://www.youtube.com/watch?v=f2xi3c1S95M + +Given an integer n, return the minimum steps to 1 + +AVAILABLE STEPS: + * Decrement by 1 + * if n is divisible by 2, divide by 2 + * if n is divisible by 3, divide by 3 + + +Example 1: n = 10 +10 -> 9 -> 3 -> 1 +Result: 3 steps + +Example 2: n = 15 +15 -> 5 -> 4 -> 2 -> 1 +Result: 4 steps + +Example 3: n = 6 +6 -> 2 -> 1 +Result: 2 step +""" + +from __future__ import annotations + +__author__ = "Alexander Joslin" + + +def min_steps_to_one(number: int) -> int: + """ + Minimum steps to 1 implemented using tabulation. + >>> min_steps_to_one(10) + 3 + >>> min_steps_to_one(15) + 4 + >>> min_steps_to_one(6) + 2 + + :param number: + :return int: + """ + + if number <= 0: + raise ValueError(f"n must be greater than 0. Got n = {number}") + + table = [number + 1] * (number + 1) + + # starting position + table[1] = 0 + for i in range(1, number): + table[i + 1] = min(table[i + 1], table[i] + 1) + # check if out of bounds + if i * 2 <= number: + table[i * 2] = min(table[i * 2], table[i] + 1) + # check if out of bounds + if i * 3 <= number: + table[i * 3] = min(table[i * 3], table[i] + 1) + return table[number] + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From b1801398ec339a4e3813b38054f0ec34dfa43bfe Mon Sep 17 00:00:00 2001 From: fpringle Date: Wed, 9 Dec 2020 12:14:51 +0100 Subject: [PATCH 0042/1543] Add Project Euler Problem 180 (#4017) * Added solution for Project Euler problem 180 * Fixed minor details in Project Euler problem 180 * updating DIRECTORY.md Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- DIRECTORY.md | 3 + project_euler/problem_180/__init__.py | 0 project_euler/problem_180/sol1.py | 174 ++++++++++++++++++++++++++ 3 files changed, 177 insertions(+) create mode 100644 project_euler/problem_180/__init__.py create mode 100644 project_euler/problem_180/sol1.py diff --git a/DIRECTORY.md b/DIRECTORY.md index c9c3a09eb599..10523a85c48e 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -245,6 +245,7 @@ * [Max Sum Contiguous Subsequence](https://github.com/TheAlgorithms/Python/blob/master/dynamic_programming/max_sum_contiguous_subsequence.py) * [Minimum Cost Path](https://github.com/TheAlgorithms/Python/blob/master/dynamic_programming/minimum_cost_path.py) * [Minimum Partition](https://github.com/TheAlgorithms/Python/blob/master/dynamic_programming/minimum_partition.py) + * [Minimum Steps To One](https://github.com/TheAlgorithms/Python/blob/master/dynamic_programming/minimum_steps_to_one.py) * [Optimal Binary Search Tree](https://github.com/TheAlgorithms/Python/blob/master/dynamic_programming/optimal_binary_search_tree.py) * [Rod Cutting](https://github.com/TheAlgorithms/Python/blob/master/dynamic_programming/rod_cutting.py) * [Subset Generation](https://github.com/TheAlgorithms/Python/blob/master/dynamic_programming/subset_generation.py) @@ -754,6 +755,8 @@ * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_173/sol1.py) * Problem 174 * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_174/sol1.py) + * Problem 180 + * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_180/sol1.py) * Problem 188 * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_188/sol1.py) * Problem 191 diff --git a/project_euler/problem_180/__init__.py b/project_euler/problem_180/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/project_euler/problem_180/sol1.py b/project_euler/problem_180/sol1.py new file mode 100644 index 000000000000..6112db2ea370 --- /dev/null +++ b/project_euler/problem_180/sol1.py @@ -0,0 +1,174 @@ +""" +Project Euler Problem 234: https://projecteuler.net/problem=234 + +For any integer n, consider the three functions + +f1,n(x,y,z) = x^(n+1) + y^(n+1) - z^(n+1) +f2,n(x,y,z) = (xy + yz + zx)*(x^(n-1) + y^(n-1) - z^(n-1)) +f3,n(x,y,z) = xyz*(xn-2 + yn-2 - zn-2) + +and their combination + +fn(x,y,z) = f1,n(x,y,z) + f2,n(x,y,z) - f3,n(x,y,z) + +We call (x,y,z) a golden triple of order k if x, y, and z are all rational numbers +of the form a / b with 0 < a < b ≤ k and there is (at least) one integer n, +so that fn(x,y,z) = 0. + +Let s(x,y,z) = x + y + z. +Let t = u / v be the sum of all distinct s(x,y,z) for all golden triples +(x,y,z) of order 35. +All the s(x,y,z) and t must be in reduced form. + +Find u + v. + + +Solution: + +By expanding the brackets it is easy to show that +fn(x, y, z) = (x + y + z) * (x^n + y^n - z^n). + +Since x,y,z are positive, the requirement fn(x, y, z) = 0 is fulfilled if and +only if x^n + y^n = z^n. + +By Fermat's Last Theorem, this means that the absolute value of n can not +exceed 2, i.e. n is in {-2, -1, 0, 1, 2}. We can eliminate n = 0 since then the +equation would reduce to 1 + 1 = 1, for which there are no solutions. + +So all we have to do is iterate through the possible numerators and denominators +of x and y, calculate the corresponding z, and check if the corresponding numerator and +denominator are integer and satisfy 0 < z_num < z_den <= 0. We use a set "uniquq_s" +to make sure there are no duplicates, and the fractions.Fraction class to make sure +we get the right numerator and denominator. + +Reference: +https://en.wikipedia.org/wiki/Fermat%27s_Last_Theorem +""" + + +from fractions import Fraction +from math import gcd, sqrt +from typing import Tuple + + +def is_sq(number: int) -> bool: + """ + Check if number is a perfect square. + + >>> is_sq(1) + True + >>> is_sq(1000001) + False + >>> is_sq(1000000) + True + """ + sq: int = int(number ** 0.5) + return number == sq * sq + + +def add_three( + x_num: int, x_den: int, y_num: int, y_den: int, z_num: int, z_den: int +) -> Tuple[int, int]: + """ + Given the numerators and denominators of three fractions, return the + numerator and denominator of their sum in lowest form. + >>> add_three(1, 3, 1, 3, 1, 3) + (1, 1) + >>> add_three(2, 5, 4, 11, 12, 3) + (262, 55) + """ + top: int = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den + bottom: int = x_den * y_den * z_den + hcf: int = gcd(top, bottom) + top //= hcf + bottom //= hcf + return top, bottom + + +def solution(order: int = 35) -> int: + """ + Find the sum of the numerator and denominator of the sum of all s(x,y,z) for + golden triples (x,y,z) of the given order. + + >>> solution(5) + 296 + >>> solution(10) + 12519 + >>> solution(20) + 19408891927 + """ + unique_s: set = set() + hcf: int + total: Fraction = Fraction(0) + fraction_sum: Tuple[int, int] + + for x_num in range(1, order + 1): + for x_den in range(x_num + 1, order + 1): + for y_num in range(1, order + 1): + for y_den in range(y_num + 1, order + 1): + # n=1 + z_num = x_num * y_den + x_den * y_num + z_den = x_den * y_den + hcf = gcd(z_num, z_den) + z_num //= hcf + z_den //= hcf + if 0 < z_num < z_den <= order: + fraction_sum = add_three( + x_num, x_den, y_num, y_den, z_num, z_den + ) + unique_s.add(fraction_sum) + + # n=2 + z_num = ( + x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num + ) + z_den = x_den * x_den * y_den * y_den + if is_sq(z_num) and is_sq(z_den): + z_num = int(sqrt(z_num)) + z_den = int(sqrt(z_den)) + hcf = gcd(z_num, z_den) + z_num //= hcf + z_den //= hcf + if 0 < z_num < z_den <= order: + fraction_sum = add_three( + x_num, x_den, y_num, y_den, z_num, z_den + ) + unique_s.add(fraction_sum) + + # n=-1 + z_num = x_num * y_num + z_den = x_den * y_num + x_num * y_den + hcf = gcd(z_num, z_den) + z_num //= hcf + z_den //= hcf + if 0 < z_num < z_den <= order: + fraction_sum = add_three( + x_num, x_den, y_num, y_den, z_num, z_den + ) + unique_s.add(fraction_sum) + + # n=2 + z_num = x_num * x_num * y_num * y_num + z_den = ( + x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den + ) + if is_sq(z_num) and is_sq(z_den): + z_num = int(sqrt(z_num)) + z_den = int(sqrt(z_den)) + hcf = gcd(z_num, z_den) + z_num //= hcf + z_den //= hcf + if 0 < z_num < z_den <= order: + fraction_sum = add_three( + x_num, x_den, y_num, y_den, z_num, z_den + ) + unique_s.add(fraction_sum) + + for num, den in unique_s: + total += Fraction(num, den) + + return total.denominator + total.numerator + + +if __name__ == "__main__": + print(f"{solution() = }") From bd4b83fcc7fba84e2d71d560f65299fd56c15640 Mon Sep 17 00:00:00 2001 From: Umair Kamran Date: Wed, 9 Dec 2020 19:01:58 +0500 Subject: [PATCH 0043/1543] Chore: Added type hints to searches/binary_search.py (#2682) * Chore: Added type hints to searches/binary_search.py * Use -1 as the sentinal value * Wrap long lines * Update binary_search.py * Update binary_search.py Co-authored-by: Christian Clauss --- searches/binary_search.py | 74 +++++++++++++++------------------------ 1 file changed, 28 insertions(+), 46 deletions(-) diff --git a/searches/binary_search.py b/searches/binary_search.py index d0f6296168fa..35e0dd0596d2 100644 --- a/searches/binary_search.py +++ b/searches/binary_search.py @@ -1,18 +1,21 @@ +#!/usr/bin/env python3 + """ This is pure Python implementation of binary search algorithms For doctests run following command: -python -m doctest -v binary_search.py -or python3 -m doctest -v binary_search.py For manual testing run: -python binary_search.py +python3 binary_search.py """ import bisect +from typing import List, Optional -def bisect_left(sorted_collection, item, lo=0, hi=None): +def bisect_left( + sorted_collection: List[int], item: int, lo: int = 0, hi: int = -1 +) -> int: """ Locates the first element in a sorted array that is larger or equal to a given value. @@ -43,7 +46,7 @@ def bisect_left(sorted_collection, item, lo=0, hi=None): >>> bisect_left([0, 5, 7, 10, 15], 6, 2) 2 """ - if hi is None: + if hi < 0: hi = len(sorted_collection) while lo < hi: @@ -56,7 +59,9 @@ def bisect_left(sorted_collection, item, lo=0, hi=None): return lo -def bisect_right(sorted_collection, item, lo=0, hi=None): +def bisect_right( + sorted_collection: List[int], item: int, lo: int = 0, hi: int = -1 +) -> int: """ Locates the first element in a sorted array that is larger than a given value. @@ -86,7 +91,7 @@ def bisect_right(sorted_collection, item, lo=0, hi=None): >>> bisect_right([0, 5, 7, 10, 15], 6, 2) 2 """ - if hi is None: + if hi < 0: hi = len(sorted_collection) while lo < hi: @@ -99,7 +104,9 @@ def bisect_right(sorted_collection, item, lo=0, hi=None): return lo -def insort_left(sorted_collection, item, lo=0, hi=None): +def insort_left( + sorted_collection: List[int], item: int, lo: int = 0, hi: int = -1 +) -> None: """ Inserts a given value into a sorted array before other values with the same value. @@ -140,7 +147,9 @@ def insort_left(sorted_collection, item, lo=0, hi=None): sorted_collection.insert(bisect_left(sorted_collection, item, lo, hi), item) -def insort_right(sorted_collection, item, lo=0, hi=None): +def insort_right( + sorted_collection: List[int], item: int, lo: int = 0, hi: int = -1 +) -> None: """ Inserts a given value into a sorted array after other values with the same value. @@ -181,7 +190,7 @@ def insort_right(sorted_collection, item, lo=0, hi=None): sorted_collection.insert(bisect_right(sorted_collection, item, lo, hi), item) -def binary_search(sorted_collection, item): +def binary_search(sorted_collection: List[int], item: int) -> Optional[int]: """Pure implementation of binary search algorithm in Python Be careful collection must be ascending sorted, otherwise result will be @@ -219,7 +228,7 @@ def binary_search(sorted_collection, item): return None -def binary_search_std_lib(sorted_collection, item): +def binary_search_std_lib(sorted_collection: List[int], item: int) -> Optional[int]: """Pure implementation of binary search algorithm in Python using stdlib Be careful collection must be ascending sorted, otherwise result will be @@ -248,7 +257,9 @@ def binary_search_std_lib(sorted_collection, item): return None -def binary_search_by_recursion(sorted_collection, item, left, right): +def binary_search_by_recursion( + sorted_collection: List[int], item: int, left: int, right: int +) -> Optional[int]: """Pure implementation of binary search algorithm in Python by recursion @@ -286,41 +297,12 @@ def binary_search_by_recursion(sorted_collection, item, left, right): return binary_search_by_recursion(sorted_collection, item, midpoint + 1, right) -def __assert_sorted(collection): - """Check if collection is ascending sorted, if not - raises :py:class:`ValueError` - - :param collection: collection - :return: True if collection is ascending sorted - :raise: :py:class:`ValueError` if collection is not ascending sorted - - Examples: - >>> __assert_sorted([0, 1, 2, 4]) - True - - >>> __assert_sorted([10, -1, 5]) - Traceback (most recent call last): - ... - ValueError: Collection must be ascending sorted - """ - if collection != sorted(collection): - raise ValueError("Collection must be ascending sorted") - return True - - if __name__ == "__main__": - import sys - user_input = input("Enter numbers separated by comma:\n").strip() - collection = [int(item) for item in user_input.split(",")] - try: - __assert_sorted(collection) - except ValueError: - sys.exit("Sequence must be ascending sorted to apply binary search") - - target_input = input("Enter a single number to be found in the list:\n") - target = int(target_input) + collection = sorted(int(item) for item in user_input.split(",")) + target = int(input("Enter a single number to be found in the list:\n")) result = binary_search(collection, target) - if result is not None: - print(f"{target} found at positions: {result}") + if result is None: + print(f"{target} was not found in {collection}.") else: - print("Not found") + print(f"{target} was found at position {result} in {collection}.") From 75759fae22e44aa101d8d81705f0a995d038612c Mon Sep 17 00:00:00 2001 From: fpringle Date: Thu, 10 Dec 2020 14:18:17 +0100 Subject: [PATCH 0044/1543] Add solution for Project Euler problem 085 (#4024) * Added solution for Project Euler problem 085. * updating DIRECTORY.md * Minor tweaks to Project Euler problem 85 * Variable comments for project euler problem 85 Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- DIRECTORY.md | 2 + project_euler/problem_085/__init__.py | 0 project_euler/problem_085/sol1.py | 108 ++++++++++++++++++++++++++ 3 files changed, 110 insertions(+) create mode 100644 project_euler/problem_085/__init__.py create mode 100644 project_euler/problem_085/sol1.py diff --git a/DIRECTORY.md b/DIRECTORY.md index 10523a85c48e..cb582e793ade 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -727,6 +727,8 @@ * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_080/sol1.py) * Problem 081 * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_081/sol1.py) + * Problem 085 + * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_085/sol1.py) * Problem 087 * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_087/sol1.py) * Problem 089 diff --git a/project_euler/problem_085/__init__.py b/project_euler/problem_085/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/project_euler/problem_085/sol1.py b/project_euler/problem_085/sol1.py new file mode 100644 index 000000000000..74e36b1301a4 --- /dev/null +++ b/project_euler/problem_085/sol1.py @@ -0,0 +1,108 @@ +""" +Project Euler Problem 85: https://projecteuler.net/problem=85 + +By counting carefully it can be seen that a rectangular grid measuring 3 by 2 +contains eighteen rectangles. + +Although there exists no rectangular grid that contains exactly two million +rectangles, find the area of the grid with the nearest solution. + +Solution: + + For a grid with side-lengths a and b, the number of rectangles contained in the grid + is [a*(a+1)/2] * [b*(b+1)/2)], which happens to be the product of the a-th and b-th + triangle numbers. So to find the solution grid (a,b), we need to find the two + triangle numbers whose product is closest to two million. + + Denote these two triangle numbers Ta and Tb. We want their product Ta*Tb to be + as close as possible to 2m. Assuming that the best solution is fairly close to 2m, + We can assume that both Ta and Tb are roughly bounded by 2m. Since Ta = a(a+1)/2, + we can assume that a (and similarly b) are roughly bounded by sqrt(2 * 2m) = 2000. + Since this is a rough bound, to be on the safe side we add 10%. Therefore we start + by generating all the triangle numbers Ta for 1 <= a <= 2200. This can be done + iteratively since the ith triangle number is the sum of 1,2, ... ,i, and so + T(i) = T(i-1) + i. + + We then search this list of triangle numbers for the two that give a product + closest to our target of two million. Rather than testing every combination of 2 + elements of the list, which would find the result in quadratic time, we can find + the best pair in linear time. + + We iterate through the list of triangle numbers using enumerate() so we have a + and Ta. Since we want Ta * Tb to be as close as possible to 2m, we know that Tb + needs to be roughly 2m / Ta. Using the formula Tb = b*(b+1)/2 as well as the + quadratic formula, we can solve for b: + b is roughly (-1 + sqrt(1 + 8 * 2m / Ta)) / 2. + + Since the closest integers to this estimate will give product closest to 2m, + we only need to consider the integers above and below. It's then a simple matter + to get the triangle numbers corresponding to those integers, calculate the product + Ta * Tb, compare that product to our target 2m, and keep track of the (a,b) pair + that comes the closest. + + +Reference: https://en.wikipedia.org/wiki/Triangular_number + https://en.wikipedia.org/wiki/Quadratic_formula +""" + + +from math import ceil, floor, sqrt +from typing import List + + +def solution(target: int = 2000000) -> int: + """ + Find the area of the grid which contains as close to two million rectangles + as possible. + >>> solution(20) + 6 + >>> solution(2000) + 72 + >>> solution(2000000000) + 86595 + """ + triangle_numbers: List[int] = [0] + idx: int + + for idx in range(1, ceil(sqrt(target * 2) * 1.1)): + triangle_numbers.append(triangle_numbers[-1] + idx) + + # we want this to be as close as possible to target + best_product: int = 0 + # the area corresponding to the grid that gives the product closest to target + area: int = 0 + # an estimate of b, using the quadratic formula + b_estimate: float + # the largest integer less than b_estimate + b_floor: int + # the largest integer less than b_estimate + b_ceil: int + # the triangle number corresponding to b_floor + triangle_b_first_guess: int + # the triangle number corresponding to b_ceil + triangle_b_second_guess: int + + for idx_a, triangle_a in enumerate(triangle_numbers[1:], 1): + b_estimate = (-1 + sqrt(1 + 8 * target / triangle_a)) / 2 + b_floor = floor(b_estimate) + b_ceil = ceil(b_estimate) + triangle_b_first_guess = triangle_numbers[b_floor] + triangle_b_second_guess = triangle_numbers[b_ceil] + + if abs(target - triangle_b_first_guess * triangle_a) < abs( + target - best_product + ): + best_product = triangle_b_first_guess * triangle_a + area = idx_a * b_floor + + if abs(target - triangle_b_second_guess * triangle_a) < abs( + target - best_product + ): + best_product = triangle_b_second_guess * triangle_a + area = idx_a * b_ceil + + return area + + +if __name__ == "__main__": + print(f"{solution() = }") From 110a740d5d026c4675489ea2acfefda773c4e032 Mon Sep 17 00:00:00 2001 From: Abdeldjaouad Nusayr Medakene <31663979+MrGeek1337@users.noreply.github.com> Date: Thu, 10 Dec 2020 18:25:57 +0100 Subject: [PATCH 0045/1543] Update ciphers/caesar_cipher.py with type hints (#3860) * Update caesar_cipher.py improved for conciseness and readability * Add type hints Co-authored-by: Dhruv Manilawala --- ciphers/caesar_cipher.py | 32 ++++++++------------------------ 1 file changed, 8 insertions(+), 24 deletions(-) diff --git a/ciphers/caesar_cipher.py b/ciphers/caesar_cipher.py index 4038919e5dde..4b2f76c7d873 100644 --- a/ciphers/caesar_cipher.py +++ b/ciphers/caesar_cipher.py @@ -1,7 +1,8 @@ from string import ascii_letters +from typing import Dict, Optional -def encrypt(input_string: str, key: int, alphabet=None) -> str: +def encrypt(input_string: str, key: int, alphabet: Optional[str] = None) -> str: """ encrypt ======= @@ -79,7 +80,7 @@ def encrypt(input_string: str, key: int, alphabet=None) -> str: return result -def decrypt(input_string: str, key: int, alphabet=None) -> str: +def decrypt(input_string: str, key: int, alphabet: Optional[str] = None) -> str: """ decrypt ======= @@ -144,7 +145,7 @@ def decrypt(input_string: str, key: int, alphabet=None) -> str: return encrypt(input_string, key, alphabet) -def brute_force(input_string: str, alphabet=None) -> dict: +def brute_force(input_string: str, alphabet: Optional[str] = None) -> Dict[int, str]: """ brute_force =========== @@ -193,31 +194,18 @@ def brute_force(input_string: str, alphabet=None) -> dict: # Set default alphabet to lower and upper case english chars alpha = alphabet or ascii_letters - # The key during testing (will increase) - key = 1 - - # The encoded result - result = "" - # To store data on all the combinations brute_force_data = {} # Cycle through each combination - while key <= len(alpha): - # Decrypt the message - result = decrypt(input_string, key, alpha) - - # Update the data - brute_force_data[key] = result - - # Reset result and increase the key - result = "" - key += 1 + for key in range(1, len(alpha) + 1): + # Decrypt the message and store the result in the data + brute_force_data[key] = decrypt(input_string, key, alpha) return brute_force_data -def main(): +if __name__ == "__main__": while True: print(f'\n{"-" * 10}\n Menu\n{"-" * 10}') print(*["1.Encrypt", "2.Decrypt", "3.BruteForce", "4.Quit"], sep="\n") @@ -248,7 +236,3 @@ def main(): elif choice == "4": print("Goodbye.") break - - -if __name__ == "__main__": - main() From 533e36d32ba415be382e9c8c0803d5261b489afd Mon Sep 17 00:00:00 2001 From: zakademic <67771932+zakademic@users.noreply.github.com> Date: Fri, 11 Dec 2020 20:40:23 -0800 Subject: [PATCH 0046/1543] Add conjugate gradient method algorithm (#2486) * Initial commit of the conjugate gradient method * Update linear_algebra/src/conjugate_gradient.py * Added documentation links, changed variable names to lower case and more descriptive naming, added check for symmetry in _is_matrix_spd * Made changes to some variable naming to be more clear * Update conjugate_gradient.py Co-authored-by: Zeyad Zaky Co-authored-by: Christian Clauss Co-authored-by: Dhruv Manilawala --- linear_algebra/src/conjugate_gradient.py | 173 +++++++++++++++++++++++ 1 file changed, 173 insertions(+) create mode 100644 linear_algebra/src/conjugate_gradient.py diff --git a/linear_algebra/src/conjugate_gradient.py b/linear_algebra/src/conjugate_gradient.py new file mode 100644 index 000000000000..1a65b8ccf019 --- /dev/null +++ b/linear_algebra/src/conjugate_gradient.py @@ -0,0 +1,173 @@ +""" +Resources: +- https://en.wikipedia.org/wiki/Conjugate_gradient_method +- https://en.wikipedia.org/wiki/Definite_symmetric_matrix +""" +import numpy as np + + +def _is_matrix_spd(matrix: np.array) -> bool: + """ + Returns True if input matrix is symmetric positive definite. + Returns False otherwise. + + For a matrix to be SPD, all eigenvalues must be positive. + + >>> import numpy as np + >>> matrix = np.array([ + ... [4.12401784, -5.01453636, -0.63865857], + ... [-5.01453636, 12.33347422, -3.40493586], + ... [-0.63865857, -3.40493586, 5.78591885]]) + >>> _is_matrix_spd(matrix) + True + >>> matrix = np.array([ + ... [0.34634879, 1.96165514, 2.18277744], + ... [0.74074469, -1.19648894, -1.34223498], + ... [-0.7687067 , 0.06018373, -1.16315631]]) + >>> _is_matrix_spd(matrix) + False + """ + # Ensure matrix is square. + assert np.shape(matrix)[0] == np.shape(matrix)[1] + + # If matrix not symmetric, exit right away. + if np.allclose(matrix, matrix.T) is False: + return False + + # Get eigenvalues and eignevectors for a symmetric matrix. + eigen_values, _ = np.linalg.eigh(matrix) + + # Check sign of all eigenvalues. + return np.all(eigen_values > 0) + + +def _create_spd_matrix(dimension: np.int64) -> np.array: + """ + Returns a symmetric positive definite matrix given a dimension. + + Input: + dimension gives the square matrix dimension. + + Output: + spd_matrix is an diminesion x dimensions symmetric positive definite (SPD) matrix. + + >>> import numpy as np + >>> dimension = 3 + >>> spd_matrix = _create_spd_matrix(dimension) + >>> _is_matrix_spd(spd_matrix) + True + """ + random_matrix = np.random.randn(dimension, dimension) + spd_matrix = np.dot(random_matrix, random_matrix.T) + assert _is_matrix_spd(spd_matrix) + return spd_matrix + + +def conjugate_gradient( + spd_matrix: np.array, + load_vector: np.array, + max_iterations: int = 1000, + tol: float = 1e-8, +) -> np.array: + """ + Returns solution to the linear system np.dot(spd_matrix, x) = b. + + Input: + spd_matrix is an NxN Symmetric Positive Definite (SPD) matrix. + load_vector is an Nx1 vector. + + Output: + x is an Nx1 vector that is the solution vector. + + >>> import numpy as np + >>> spd_matrix = np.array([ + ... [8.73256573, -5.02034289, -2.68709226], + ... [-5.02034289, 3.78188322, 0.91980451], + ... [-2.68709226, 0.91980451, 1.94746467]]) + >>> b = np.array([ + ... [-5.80872761], + ... [ 3.23807431], + ... [ 1.95381422]]) + >>> conjugate_gradient(spd_matrix, b) + array([[-0.63114139], + [-0.01561498], + [ 0.13979294]]) + """ + # Ensure proper dimensionality. + assert np.shape(spd_matrix)[0] == np.shape(spd_matrix)[1] + assert np.shape(load_vector)[0] == np.shape(spd_matrix)[0] + assert _is_matrix_spd(spd_matrix) + + # Initialize solution guess, residual, search direction. + x0 = np.zeros((np.shape(load_vector)[0], 1)) + r0 = np.copy(load_vector) + p0 = np.copy(r0) + + # Set initial errors in solution guess and residual. + error_residual = 1e9 + error_x_solution = 1e9 + error = 1e9 + + # Set iteration counter to threshold number of iterations. + iterations = 0 + + while error > tol: + + # Save this value so we only calculate the matrix-vector product once. + w = np.dot(spd_matrix, p0) + + # The main algorithm. + + # Update search direction magnitude. + alpha = np.dot(r0.T, r0) / np.dot(p0.T, w) + # Update solution guess. + x = x0 + alpha * p0 + # Calculate new residual. + r = r0 - alpha * w + # Calculate new Krylov subspace scale. + beta = np.dot(r.T, r) / np.dot(r0.T, r0) + # Calculate new A conjuage search direction. + p = r + beta * p0 + + # Calculate errors. + error_residual = np.linalg.norm(r - r0) + error_x_solution = np.linalg.norm(x - x0) + error = np.maximum(error_residual, error_x_solution) + + # Update variables. + x0 = np.copy(x) + r0 = np.copy(r) + p0 = np.copy(p) + + # Update number of iterations. + iterations += 1 + + return x + + +def test_conjugate_gradient() -> None: + """ + >>> test_conjugate_gradient() # self running tests + """ + # Create linear system with SPD matrix and known solution x_true. + dimension = 3 + spd_matrix = _create_spd_matrix(dimension) + x_true = np.random.randn(dimension, 1) + b = np.dot(spd_matrix, x_true) + + # Numpy solution. + x_numpy = np.linalg.solve(spd_matrix, b) + + # Our implementation. + x_conjugate_gradient = conjugate_gradient(spd_matrix, b) + + # Ensure both solutions are close to x_true (and therefore one another). + assert np.linalg.norm(x_numpy - x_true) <= 1e-6 + assert np.linalg.norm(x_conjugate_gradient - x_true) <= 1e-6 + + +if __name__ == "__main__": + import doctest + + doctest.testmod() + test_conjugate_gradient() From a6f6eb264995c41db646467a0078aa24e4ebec48 Mon Sep 17 00:00:00 2001 From: fpringle Date: Sat, 12 Dec 2020 06:19:35 +0100 Subject: [PATCH 0047/1543] Add solution for Project Euler problem 86 (#4025) * Added solution for Project Euler problem 86 * updating DIRECTORY.md Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- DIRECTORY.md | 2 + project_euler/problem_086/__init__.py | 0 project_euler/problem_086/sol1.py | 105 ++++++++++++++++++++++++++ 3 files changed, 107 insertions(+) create mode 100644 project_euler/problem_086/__init__.py create mode 100644 project_euler/problem_086/sol1.py diff --git a/DIRECTORY.md b/DIRECTORY.md index cb582e793ade..7eec7e0811dd 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -729,6 +729,8 @@ * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_081/sol1.py) * Problem 085 * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_085/sol1.py) + * Problem 086 + * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_086/sol1.py) * Problem 087 * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_087/sol1.py) * Problem 089 diff --git a/project_euler/problem_086/__init__.py b/project_euler/problem_086/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/project_euler/problem_086/sol1.py b/project_euler/problem_086/sol1.py new file mode 100644 index 000000000000..0bf66e6b5a31 --- /dev/null +++ b/project_euler/problem_086/sol1.py @@ -0,0 +1,105 @@ +""" +Project Euler Problem 86: https://projecteuler.net/problem=86 + +A spider, S, sits in one corner of a cuboid room, measuring 6 by 5 by 3, and a fly, F, +sits in the opposite corner. By travelling on the surfaces of the room the shortest +"straight line" distance from S to F is 10 and the path is shown on the diagram. + +However, there are up to three "shortest" path candidates for any given cuboid and the +shortest route doesn't always have integer length. + +It can be shown that there are exactly 2060 distinct cuboids, ignoring rotations, with +integer dimensions, up to a maximum size of M by M by M, for which the shortest route +has integer length when M = 100. This is the least value of M for which the number of +solutions first exceeds two thousand; the number of solutions when M = 99 is 1975. + +Find the least value of M such that the number of solutions first exceeds one million. + +Solution: + Label the 3 side-lengths of the cuboid a,b,c such that 1 <= a <= b <= c <= M. + By conceptually "opening up" the cuboid and laying out its faces on a plane, + it can be seen that the shortest distance between 2 opposite corners is + sqrt((a+b)^2 + c^2). This distance is an integer if and only if (a+b),c make up + the first 2 sides of a pythagorean triplet. + + The second useful insight is rather than calculate the number of cuboids + with integral shortest distance for each maximum cuboid side-length M, + we can calculate this number iteratively each time we increase M, as follows. + The set of cuboids satisfying this property with maximum side-length M-1 is a + subset of the cuboids satisfying the property with maximum side-length M + (since any cuboids with side lengths <= M-1 are also <= M). To calculate the + number of cuboids in the larger set (corresponding to M) we need only consider + the cuboids which have at least one side of length M. Since we have ordered the + side lengths a <= b <= c, we can assume that c = M. Then we just need to count + the number of pairs a,b satisfying the conditions: + sqrt((a+b)^2 + M^2) is integer + 1 <= a <= b <= M + + To count the number of pairs (a,b) satisfying these conditions, write d = a+b. + Now we have: + 1 <= a <= b <= M => 2 <= d <= 2*M + we can actually make the second equality strict, + since d = 2*M => d^2 + M^2 = 5M^2 + => shortest distance = M * sqrt(5) + => not integral. + a + b = d => b = d - a + and a <= b + => a <= d/2 + also a <= M + => a <= min(M, d//2) + + a + b = d => a = d - b + and b <= M + => a >= d - M + also a >= 1 + => a >= max(1, d - M) + + So a is in range(max(1, d - M), min(M, d // 2) + 1) + + For a given d, the number of cuboids satisfying the required property with c = M + and a + b = d is the length of this range, which is + min(M, d // 2) + 1 - max(1, d - M). + + In the code below, d is sum_shortest_sides + and M is max_cuboid_size. + + +""" + + +from math import sqrt + + +def solution(limit: int = 1000000) -> int: + """ + Return the least value of M such that there are more than one million cuboids + of side lengths 1 <= a,b,c <= M such that the shortest distance between two + opposite vertices of the cuboid is integral. + >>> solution(100) + 24 + >>> solution(1000) + 72 + >>> solution(2000) + 100 + >>> solution(20000) + 288 + """ + num_cuboids: int = 0 + max_cuboid_size: int = 0 + sum_shortest_sides: int + + while num_cuboids <= limit: + max_cuboid_size += 1 + for sum_shortest_sides in range(2, 2 * max_cuboid_size + 1): + if sqrt(sum_shortest_sides ** 2 + max_cuboid_size ** 2).is_integer(): + num_cuboids += ( + min(max_cuboid_size, sum_shortest_sides // 2) + - max(1, sum_shortest_sides - max_cuboid_size) + + 1 + ) + + return max_cuboid_size + + +if __name__ == "__main__": + print(f"{solution() = }") From ae8a5f86754ea1cc466314fa40a664c7322d4be9 Mon Sep 17 00:00:00 2001 From: fpringle Date: Sun, 13 Dec 2020 12:09:52 +0100 Subject: [PATCH 0048/1543] Add solution for Project Euler problem 59 (#4031) * Added solution for Project Euler problem 59 * updating DIRECTORY.md * Formatting, type hints, no more evil map functions * Doctests * Added doctests for Project Euler problem 59 Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- DIRECTORY.md | 3 + project_euler/problem_059/__init__.py | 0 project_euler/problem_059/p059_cipher.txt | 1 + project_euler/problem_059/sol1.py | 128 ++++++++++++++++++++++ project_euler/problem_059/test_cipher.txt | 1 + 5 files changed, 133 insertions(+) create mode 100644 project_euler/problem_059/__init__.py create mode 100644 project_euler/problem_059/p059_cipher.txt create mode 100644 project_euler/problem_059/sol1.py create mode 100644 project_euler/problem_059/test_cipher.txt diff --git a/DIRECTORY.md b/DIRECTORY.md index 7eec7e0811dd..929a986b0f3b 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -345,6 +345,7 @@ ## Linear Algebra * Src + * [Conjugate Gradient](https://github.com/TheAlgorithms/Python/blob/master/linear_algebra/src/conjugate_gradient.py) * [Lib](https://github.com/TheAlgorithms/Python/blob/master/linear_algebra/src/lib.py) * [Polynom For Points](https://github.com/TheAlgorithms/Python/blob/master/linear_algebra/src/polynom_for_points.py) * [Power Iteration](https://github.com/TheAlgorithms/Python/blob/master/linear_algebra/src/power_iteration.py) @@ -695,6 +696,8 @@ * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_057/sol1.py) * Problem 058 * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_058/sol1.py) + * Problem 059 + * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_059/sol1.py) * Problem 062 * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_062/sol1.py) * Problem 063 diff --git a/project_euler/problem_059/__init__.py b/project_euler/problem_059/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/project_euler/problem_059/p059_cipher.txt b/project_euler/problem_059/p059_cipher.txt new file mode 100644 index 000000000000..b3b3247298d1 --- /dev/null +++ b/project_euler/problem_059/p059_cipher.txt @@ -0,0 +1 @@ +36,22,80,0,0,4,23,25,19,17,88,4,4,19,21,11,88,22,23,23,29,69,12,24,0,88,25,11,12,2,10,28,5,6,12,25,10,22,80,10,30,80,10,22,21,69,23,22,69,61,5,9,29,2,66,11,80,8,23,3,17,88,19,0,20,21,7,10,17,17,29,20,69,8,17,21,29,2,22,84,80,71,60,21,69,11,5,8,21,25,22,88,3,0,10,25,0,10,5,8,88,2,0,27,25,21,10,31,6,25,2,16,21,82,69,35,63,11,88,4,13,29,80,22,13,29,22,88,31,3,88,3,0,10,25,0,11,80,10,30,80,23,29,19,12,8,2,10,27,17,9,11,45,95,88,57,69,16,17,19,29,80,23,29,19,0,22,4,9,1,80,3,23,5,11,28,92,69,9,5,12,12,21,69,13,30,0,0,0,0,27,4,0,28,28,28,84,80,4,22,80,0,20,21,2,25,30,17,88,21,29,8,2,0,11,3,12,23,30,69,30,31,23,88,4,13,29,80,0,22,4,12,10,21,69,11,5,8,88,31,3,88,4,13,17,3,69,11,21,23,17,21,22,88,65,69,83,80,84,87,68,69,83,80,84,87,73,69,83,80,84,87,65,83,88,91,69,29,4,6,86,92,69,15,24,12,27,24,69,28,21,21,29,30,1,11,80,10,22,80,17,16,21,69,9,5,4,28,2,4,12,5,23,29,80,10,30,80,17,16,21,69,27,25,23,27,28,0,84,80,22,23,80,17,16,17,17,88,25,3,88,4,13,29,80,17,10,5,0,88,3,16,21,80,10,30,80,17,16,25,22,88,3,0,10,25,0,11,80,12,11,80,10,26,4,4,17,30,0,28,92,69,30,2,10,21,80,12,12,80,4,12,80,10,22,19,0,88,4,13,29,80,20,13,17,1,10,17,17,13,2,0,88,31,3,88,4,13,29,80,6,17,2,6,20,21,69,30,31,9,20,31,18,11,94,69,54,17,8,29,28,28,84,80,44,88,24,4,14,21,69,30,31,16,22,20,69,12,24,4,12,80,17,16,21,69,11,5,8,88,31,3,88,4,13,17,3,69,11,21,23,17,21,22,88,25,22,88,17,69,11,25,29,12,24,69,8,17,23,12,80,10,30,80,17,16,21,69,11,1,16,25,2,0,88,31,3,88,4,13,29,80,21,29,2,12,21,21,17,29,2,69,23,22,69,12,24,0,88,19,12,10,19,9,29,80,18,16,31,22,29,80,1,17,17,8,29,4,0,10,80,12,11,80,84,67,80,10,10,80,7,1,80,21,13,4,17,17,30,2,88,4,13,29,80,22,13,29,69,23,22,69,12,24,12,11,80,22,29,2,12,29,3,69,29,1,16,25,28,69,12,31,69,11,92,69,17,4,69,16,17,22,88,4,13,29,80,23,25,4,12,23,80,22,9,2,17,80,70,76,88,29,16,20,4,12,8,28,12,29,20,69,26,9,69,11,80,17,23,80,84,88,31,3,88,4,13,29,80,21,29,2,12,21,21,17,29,2,69,12,31,69,12,24,0,88,20,12,25,29,0,12,21,23,86,80,44,88,7,12,20,28,69,11,31,10,22,80,22,16,31,18,88,4,13,25,4,69,12,24,0,88,3,16,21,80,10,30,80,17,16,25,22,88,3,0,10,25,0,11,80,17,23,80,7,29,80,4,8,0,23,23,8,12,21,17,17,29,28,28,88,65,75,78,68,81,65,67,81,72,70,83,64,68,87,74,70,81,75,70,81,67,80,4,22,20,69,30,2,10,21,80,8,13,28,17,17,0,9,1,25,11,31,80,17,16,25,22,88,30,16,21,18,0,10,80,7,1,80,22,17,8,73,88,17,11,28,80,17,16,21,11,88,4,4,19,25,11,31,80,17,16,21,69,11,1,16,25,2,0,88,2,10,23,4,73,88,4,13,29,80,11,13,29,7,29,2,69,75,94,84,76,65,80,65,66,83,77,67,80,64,73,82,65,67,87,75,72,69,17,3,69,17,30,1,29,21,1,88,0,23,23,20,16,27,21,1,84,80,18,16,25,6,16,80,0,0,0,23,29,3,22,29,3,69,12,24,0,88,0,0,10,25,8,29,4,0,10,80,10,30,80,4,88,19,12,10,19,9,29,80,18,16,31,22,29,80,1,17,17,8,29,4,0,10,80,12,11,80,84,86,80,35,23,28,9,23,7,12,22,23,69,25,23,4,17,30,69,12,24,0,88,3,4,21,21,69,11,4,0,8,3,69,26,9,69,15,24,12,27,24,69,49,80,13,25,20,69,25,2,23,17,6,0,28,80,4,12,80,17,16,25,22,88,3,16,21,92,69,49,80,13,25,6,0,88,20,12,11,19,10,14,21,23,29,20,69,12,24,4,12,80,17,16,21,69,11,5,8,88,31,3,88,4,13,29,80,22,29,2,12,29,3,69,73,80,78,88,65,74,73,70,69,83,80,84,87,72,84,88,91,69,73,95,87,77,70,69,83,80,84,87,70,87,77,80,78,88,21,17,27,94,69,25,28,22,23,80,1,29,0,0,22,20,22,88,31,11,88,4,13,29,80,20,13,17,1,10,17,17,13,2,0,88,31,3,88,4,13,29,80,6,17,2,6,20,21,75,88,62,4,21,21,9,1,92,69,12,24,0,88,3,16,21,80,10,30,80,17,16,25,22,88,29,16,20,4,12,8,28,12,29,20,69,26,9,69,65,64,69,31,25,19,29,3,69,12,24,0,88,18,12,9,5,4,28,2,4,12,21,69,80,22,10,13,2,17,16,80,21,23,7,0,10,89,69,23,22,69,12,24,0,88,19,12,10,19,16,21,22,0,10,21,11,27,21,69,23,22,69,12,24,0,88,0,0,10,25,8,29,4,0,10,80,10,30,80,4,88,19,12,10,19,9,29,80,18,16,31,22,29,80,1,17,17,8,29,4,0,10,80,12,11,80,84,86,80,36,22,20,69,26,9,69,11,25,8,17,28,4,10,80,23,29,17,22,23,30,12,22,23,69,49,80,13,25,6,0,88,28,12,19,21,18,17,3,0,88,18,0,29,30,69,25,18,9,29,80,17,23,80,1,29,4,0,10,29,12,22,21,69,12,24,0,88,3,16,21,3,69,23,22,69,12,24,0,88,3,16,26,3,0,9,5,0,22,4,69,11,21,23,17,21,22,88,25,11,88,7,13,17,19,13,88,4,13,29,80,0,0,0,10,22,21,11,12,3,69,25,2,0,88,21,19,29,30,69,22,5,8,26,21,23,11,94 \ No newline at end of file diff --git a/project_euler/problem_059/sol1.py b/project_euler/problem_059/sol1.py new file mode 100644 index 000000000000..1f55029b2613 --- /dev/null +++ b/project_euler/problem_059/sol1.py @@ -0,0 +1,128 @@ +""" +Each character on a computer is assigned a unique code and the preferred standard is +ASCII (American Standard Code for Information Interchange). +For example, uppercase A = 65, asterisk (*) = 42, and lowercase k = 107. + +A modern encryption method is to take a text file, convert the bytes to ASCII, then +XOR each byte with a given value, taken from a secret key. The advantage with the +XOR function is that using the same encryption key on the cipher text, restores +the plain text; for example, 65 XOR 42 = 107, then 107 XOR 42 = 65. + +For unbreakable encryption, the key is the same length as the plain text message, and +the key is made up of random bytes. The user would keep the encrypted message and the +encryption key in different locations, and without both "halves", it is impossible to +decrypt the message. + +Unfortunately, this method is impractical for most users, so the modified method is +to use a password as a key. If the password is shorter than the message, which is +likely, the key is repeated cyclically throughout the message. The balance for this +method is using a sufficiently long password key for security, but short enough to +be memorable. + +Your task has been made easy, as the encryption key consists of three lower case +characters. Using p059_cipher.txt (right click and 'Save Link/Target As...'), a +file containing the encrypted ASCII codes, and the knowledge that the plain text +must contain common English words, decrypt the message and find the sum of the ASCII +values in the original text. +""" + + +import string +from itertools import cycle, product +from pathlib import Path +from typing import List, Optional, Set, Tuple + +VALID_CHARS: str = ( + string.ascii_letters + string.digits + string.punctuation + string.whitespace +) +LOWERCASE_INTS: List[int] = [ord(letter) for letter in string.ascii_lowercase] +VALID_INTS: Set[int] = {ord(char) for char in VALID_CHARS} + +COMMON_WORDS: List[str] = ["the", "be", "to", "of", "and", "in", "that", "have"] + + +def try_key(ciphertext: List[int], key: Tuple[int, ...]) -> Optional[str]: + """ + Given an encrypted message and a possible 3-character key, decrypt the message. + If the decrypted message contains a invalid character, i.e. not an ASCII letter, + a digit, punctuation or whitespace, then we know the key is incorrect, so return + None. + >>> try_key([0, 17, 20, 4, 27], (104, 116, 120)) + 'hello' + >>> try_key([68, 10, 300, 4, 27], (104, 116, 120)) is None + True + """ + decoded: str = "" + keychar: int + cipherchar: int + decodedchar: int + + for keychar, cipherchar in zip(cycle(key), ciphertext): + decodedchar = cipherchar ^ keychar + if decodedchar not in VALID_INTS: + return None + decoded += chr(decodedchar) + + return decoded + + +def filter_valid_chars(ciphertext: List[int]) -> List[str]: + """ + Given an encrypted message, test all 3-character strings to try and find the + key. Return a list of the possible decrypted messages. + >>> from itertools import cycle + >>> text = "The enemy's gate is down" + >>> key = "end" + >>> encoded = [ord(k) ^ ord(c) for k,c in zip(cycle(key), text)] + >>> text in filter_valid_chars(encoded) + True + """ + possibles: List[str] = [] + for key in product(LOWERCASE_INTS, repeat=3): + encoded = try_key(ciphertext, key) + if encoded is not None: + possibles.append(encoded) + return possibles + + +def filter_common_word(possibles: List[str], common_word: str) -> List[str]: + """ + Given a list of possible decoded messages, narrow down the possibilities + for checking for the presence of a specified common word. Only decoded messages + containing common_word will be returned. + >>> filter_common_word(['asfla adf', 'I am here', ' !?! #a'], 'am') + ['I am here'] + >>> filter_common_word(['athla amf', 'I am here', ' !?! #a'], 'am') + ['athla amf', 'I am here'] + """ + return [possible for possible in possibles if common_word in possible.lower()] + + +def solution(filename: str = "p059_cipher.txt") -> int: + """ + Test the ciphertext against all possible 3-character keys, then narrow down the + possibilities by filtering using common words until there's only one possible + decoded message. + >>> solution("test_cipher.txt") + 3000 + """ + ciphertext: List[int] + possibles: List[str] + common_word: str + decoded_text: str + data: str = Path(__file__).parent.joinpath(filename).read_text(encoding="utf-8") + + ciphertext = [int(number) for number in data.strip().split(",")] + + possibles = filter_valid_chars(ciphertext) + for common_word in COMMON_WORDS: + possibles = filter_common_word(possibles, common_word) + if len(possibles) == 1: + break + + decoded_text = possibles[0] + return sum([ord(char) for char in decoded_text]) + + +if __name__ == "__main__": + print(f"{solution() = }") diff --git a/project_euler/problem_059/test_cipher.txt b/project_euler/problem_059/test_cipher.txt new file mode 100644 index 000000000000..27c53740cc1a --- /dev/null +++ b/project_euler/problem_059/test_cipher.txt @@ -0,0 +1 @@ +63,13,28,75,0,23,14,8,0,76,22,89,12,4,13,14,69,16,24,69,29,4,18,23,69,69,59,14,69,11,14,4,29,18 From 53371b2381c2233c057c0ad75d377d8c03ff83c8 Mon Sep 17 00:00:00 2001 From: Du Yuanchao Date: Fri, 18 Dec 2020 17:39:51 +0800 Subject: [PATCH 0049/1543] Optimization for shell sort (#4038) * fixed shell sort * udpate code style * Update sorts/shell_sort.py Co-authored-by: John Law Co-authored-by: John Law --- sorts/shell_sort.py | 23 +++++++++-------------- 1 file changed, 9 insertions(+), 14 deletions(-) diff --git a/sorts/shell_sort.py b/sorts/shell_sort.py index bf3c2c7f9cc6..2e749e43d056 100644 --- a/sorts/shell_sort.py +++ b/sorts/shell_sort.py @@ -1,13 +1,5 @@ """ -This is a pure Python implementation of the shell sort algorithm - -For doctests run following command: -python -m doctest -v shell_sort.py -or -python3 -m doctest -v shell_sort.py - -For manual testing run: -python shell_sort.py +https://en.wikipedia.org/wiki/Shellsort#Pseudocode """ @@ -19,26 +11,29 @@ def shell_sort(collection): >>> shell_sort([0, 5, 3, 2, 2]) [0, 2, 2, 3, 5] - >>> shell_sort([]) [] - >>> shell_sort([-2, -5, -45]) [-45, -5, -2] """ # Marcin Ciura's gap sequence - gaps = [701, 301, 132, 57, 23, 10, 4, 1] + gaps = [701, 301, 132, 57, 23, 10, 4, 1] for gap in gaps: for i in range(gap, len(collection)): + insert_value = collection[i] j = i - while j >= gap and collection[j] < collection[j - gap]: - collection[j], collection[j - gap] = collection[j - gap], collection[j] + while j >= gap and collection[j - gap] > insert_value: + collection[j] = collection[j - gap] j -= gap + collection[j] = insert_value return collection if __name__ == "__main__": + from doctest import testmod + + testmod() user_input = input("Enter numbers separated by a comma:\n").strip() unsorted = [int(item) for item in user_input.split(",")] print(shell_sort(unsorted)) From 00f22a9970c6638ff59891d8cb271db52bab1bc4 Mon Sep 17 00:00:00 2001 From: sharmapulkit04 <39304055+sharmapulkit04@users.noreply.github.com> Date: Sat, 19 Dec 2020 11:46:15 +0530 Subject: [PATCH 0050/1543] Add solution for Project Euler problem 135 (#4035) --- project_euler/problem_135/__init__.py | 0 project_euler/problem_135/sol1.py | 61 +++++++++++++++++++++++++++ 2 files changed, 61 insertions(+) create mode 100644 project_euler/problem_135/__init__.py create mode 100644 project_euler/problem_135/sol1.py diff --git a/project_euler/problem_135/__init__.py b/project_euler/problem_135/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/project_euler/problem_135/sol1.py b/project_euler/problem_135/sol1.py new file mode 100644 index 000000000000..d71a0439c7e9 --- /dev/null +++ b/project_euler/problem_135/sol1.py @@ -0,0 +1,61 @@ +""" +Project Euler Problem 135: https://projecteuler.net/problem=135 + +Given the positive integers, x, y, and z, +are consecutive terms of an arithmetic progression, +the least value of the positive integer, n, +for which the equation, +x2 − y2 − z2 = n, has exactly two solutions is n = 27: + +342 − 272 − 202 = 122 − 92 − 62 = 27 + +It turns out that n = 1155 is the least value +which has exactly ten solutions. + +How many values of n less than one million +have exactly ten distinct solutions? + + +Taking x,y,z of the form a+d,a,a-d respectively, +the given equation reduces to a*(4d-a)=n. +Calculating no of solutions for every n till 1 million by fixing a +,and n must be multiple of a. +Total no of steps=n*(1/1+1/2+1/3+1/4..+1/n) +,so roughly O(nlogn) time complexity. + +""" + + +def solution(limit: int = 1000000) -> int: + """ + returns the values of n less than or equal to the limit + have exactly ten distinct solutions. + >>> solution(100) + 0 + >>> solution(10000) + 45 + >>> solution(50050) + 292 + """ + limit = limit + 1 + frequency = [0] * limit + for first_term in range(1, limit): + for n in range(first_term, limit, first_term): + common_difference = first_term + n / first_term + if common_difference % 4: # d must be divisble by 4 + continue + else: + common_difference /= 4 + if ( + first_term > common_difference + and first_term < 4 * common_difference + ): # since x,y,z are positive integers + frequency[n] += 1 # so z>0 and a>d ,also 4d Date: Mon, 21 Dec 2020 13:55:59 -0800 Subject: [PATCH 0051/1543] add integer to roman function (#4050) * add integer to roman function simply added fastest method i found. * Rename roman_to_integer.py to roman_numerals.py * Update roman_numerals.py * Update roman_numerals.py Co-authored-by: Christian Clauss --- ...{roman_to_integer.py => roman_numerals.py} | 32 +++++++++++++++++++ 1 file changed, 32 insertions(+) rename conversions/{roman_to_integer.py => roman_numerals.py} (50%) diff --git a/conversions/roman_to_integer.py b/conversions/roman_numerals.py similarity index 50% rename from conversions/roman_to_integer.py rename to conversions/roman_numerals.py index ce52b6fb7cbb..9933e6a78a4d 100644 --- a/conversions/roman_to_integer.py +++ b/conversions/roman_numerals.py @@ -21,6 +21,38 @@ def roman_to_int(roman: str) -> int: return total +def int_to_roman(number: int) -> str: + """ + Given a integer, convert it to an roman numeral. + https://en.wikipedia.org/wiki/Roman_numerals + >>> tests = {"III": 3, "CLIV": 154, "MIX": 1009, "MMD": 2500, "MMMCMXCIX": 3999} + >>> all(int_to_roman(value) == key for key, value in tests.items()) + True + """ + ROMAN = [ + (1000, "M"), + (900, "CM"), + (500, "D"), + (400, "CD"), + (100, "C"), + (90, "XC"), + (50, "L"), + (40, "XL"), + (10, "X"), + (9, "IX"), + (5, "V"), + (4, "IV"), + (1, "I"), + ] + result = [] + for (arabic, roman) in ROMAN: + (factor, number) = divmod(number, arabic) + result.append(roman * factor) + if number == 0: + break + return "".join(result) + + if __name__ == "__main__": import doctest From 2ff2ccbeecf24d3171841e362600944b547a4e51 Mon Sep 17 00:00:00 2001 From: fpringle Date: Tue, 22 Dec 2020 13:02:31 +0100 Subject: [PATCH 0052/1543] Add solution for Project Euler problem 101 (#4033) * Added solution for Project Euler problem 101 * Got rid of map functions * updating DIRECTORY.md * Better function/variable names * Better variable names * Type hints * Doctest for nested function Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- DIRECTORY.md | 2 + project_euler/problem_101/__init__.py | 0 project_euler/problem_101/sol1.py | 219 ++++++++++++++++++++++++++ 3 files changed, 221 insertions(+) create mode 100644 project_euler/problem_101/__init__.py create mode 100644 project_euler/problem_101/sol1.py diff --git a/DIRECTORY.md b/DIRECTORY.md index 929a986b0f3b..1f1bb9907e52 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -744,6 +744,8 @@ * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_097/sol1.py) * Problem 099 * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_099/sol1.py) + * Problem 101 + * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_101/sol1.py) * Problem 112 * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_112/sol1.py) * Problem 113 diff --git a/project_euler/problem_101/__init__.py b/project_euler/problem_101/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/project_euler/problem_101/sol1.py b/project_euler/problem_101/sol1.py new file mode 100644 index 000000000000..e66316090fb2 --- /dev/null +++ b/project_euler/problem_101/sol1.py @@ -0,0 +1,219 @@ +""" +If we are presented with the first k terms of a sequence it is impossible to say with +certainty the value of the next term, as there are infinitely many polynomial functions +that can model the sequence. + +As an example, let us consider the sequence of cube +numbers. This is defined by the generating function, +u(n) = n3: 1, 8, 27, 64, 125, 216, ... + +Suppose we were only given the first two terms of this sequence. Working on the +principle that "simple is best" we should assume a linear relationship and predict the +next term to be 15 (common difference 7). Even if we were presented with the first three +terms, by the same principle of simplicity, a quadratic relationship should be +assumed. + +We shall define OP(k, n) to be the nth term of the optimum polynomial +generating function for the first k terms of a sequence. It should be clear that +OP(k, n) will accurately generate the terms of the sequence for n ≤ k, and potentially +the first incorrect term (FIT) will be OP(k, k+1); in which case we shall call it a +bad OP (BOP). + +As a basis, if we were only given the first term of sequence, it would be most +sensible to assume constancy; that is, for n ≥ 2, OP(1, n) = u(1). + +Hence we obtain the +following OPs for the cubic sequence: + +OP(1, n) = 1 1, 1, 1, 1, ... +OP(2, n) = 7n-6 1, 8, 15, ... +OP(3, n) = 6n^2-11n+6 1, 8, 27, 58, ... +OP(4, n) = n^3 1, 8, 27, 64, 125, ... + +Clearly no BOPs exist for k ≥ 4. + +By considering the sum of FITs generated by the BOPs (indicated in red above), we +obtain 1 + 15 + 58 = 74. + +Consider the following tenth degree polynomial generating function: + +1 - n + n^2 - n^3 + n^4 - n^5 + n^6 - n^7 + n^8 - n^9 + n^10 + +Find the sum of FITs for the BOPs. +""" + + +from typing import Callable, List, Union + +Matrix = List[List[Union[float, int]]] + + +def solve(matrix: Matrix, vector: Matrix) -> Matrix: + """ + Solve the linear system of equations Ax = b (A = "matrix", b = "vector") + for x using Gaussian elimination and back substitution. We assume that A + is an invertible square matrix and that b is a column vector of the + same height. + >>> solve([[1, 0], [0, 1]], [[1],[2]]) + [[1.0], [2.0]] + >>> solve([[2, 1, -1],[-3, -1, 2],[-2, 1, 2]],[[8], [-11],[-3]]) + [[2.0], [3.0], [-1.0]] + """ + size: int = len(matrix) + augmented: Matrix = [[0 for _ in range(size + 1)] for _ in range(size)] + row: int + row2: int + col: int + col2: int + pivot_row: int + ratio: float + + for row in range(size): + for col in range(size): + augmented[row][col] = matrix[row][col] + + augmented[row][size] = vector[row][0] + + row = 0 + col = 0 + while row < size and col < size: + # pivoting + pivot_row = max( + [(abs(augmented[row2][col]), row2) for row2 in range(col, size)] + )[1] + if augmented[pivot_row][col] == 0: + col += 1 + continue + else: + augmented[row], augmented[pivot_row] = augmented[pivot_row], augmented[row] + + for row2 in range(row + 1, size): + ratio = augmented[row2][col] / augmented[row][col] + augmented[row2][col] = 0 + for col2 in range(col + 1, size + 1): + augmented[row2][col2] -= augmented[row][col2] * ratio + + row += 1 + col += 1 + + # back substitution + for col in range(1, size): + for row in range(col): + ratio = augmented[row][col] / augmented[col][col] + for col2 in range(col, size + 1): + augmented[row][col2] -= augmented[col][col2] * ratio + + # round to get rid of numbers like 2.000000000000004 + return [ + [round(augmented[row][size] / augmented[row][row], 10)] for row in range(size) + ] + + +def interpolate(y_list: List[int]) -> Callable[[int], int]: + """ + Given a list of data points (1,y0),(2,y1), ..., return a function that + interpolates the data points. We find the coefficients of the interpolating + polynomial by solving a system of linear equations corresponding to + x = 1, 2, 3... + + >>> interpolate([1])(3) + 1 + >>> interpolate([1, 8])(3) + 15 + >>> interpolate([1, 8, 27])(4) + 58 + >>> interpolate([1, 8, 27, 64])(6) + 216 + """ + + size: int = len(y_list) + matrix: Matrix = [[0 for _ in range(size)] for _ in range(size)] + vector: Matrix = [[0] for _ in range(size)] + coeffs: Matrix + x_val: int + y_val: int + col: int + + for x_val, y_val in enumerate(y_list): + for col in range(size): + matrix[x_val][col] = (x_val + 1) ** (size - col - 1) + vector[x_val][0] = y_val + + coeffs = solve(matrix, vector) + + def interpolated_func(var: int) -> int: + """ + >>> interpolate([1])(3) + 1 + >>> interpolate([1, 8])(3) + 15 + >>> interpolate([1, 8, 27])(4) + 58 + >>> interpolate([1, 8, 27, 64])(6) + 216 + """ + return sum( + round(coeffs[x_val][0]) * (var ** (size - x_val - 1)) + for x_val in range(size) + ) + + return interpolated_func + + +def question_function(variable: int) -> int: + """ + The generating function u as specified in the question. + >>> question_function(0) + 1 + >>> question_function(1) + 1 + >>> question_function(5) + 8138021 + >>> question_function(10) + 9090909091 + """ + return ( + 1 + - variable + + variable ** 2 + - variable ** 3 + + variable ** 4 + - variable ** 5 + + variable ** 6 + - variable ** 7 + + variable ** 8 + - variable ** 9 + + variable ** 10 + ) + + +def solution(func: Callable[[int], int] = question_function, order: int = 10) -> int: + """ + Find the sum of the FITs of the BOPS. For each interpolating polynomial of order + 1, 2, ... , 10, find the first x such that the value of the polynomial at x does + not equal u(x). + >>> solution(lambda n: n ** 3, 3) + 74 + """ + data_points: List[int] = [func(x_val) for x_val in range(1, order + 1)] + + polynomials: List[Callable[[int], int]] = [ + interpolate(data_points[:max_coeff]) for max_coeff in range(1, order + 1) + ] + + ret: int = 0 + poly: int + x_val: int + + for poly in polynomials: + x_val = 1 + while func(x_val) == poly(x_val): + x_val += 1 + + ret += poly(x_val) + + return ret + + +if __name__ == "__main__": + print(f"{solution() = }") From ad5108d6a49155bc0a5aca498426265004b0265f Mon Sep 17 00:00:00 2001 From: Dhruv Manilawala Date: Wed, 23 Dec 2020 15:22:43 +0530 Subject: [PATCH 0053/1543] Fix mypy errors for arithmetic analysis algorithms (#4053) --- arithmetic_analysis/in_static_equilibrium.py | 11 +-- arithmetic_analysis/lu_decomposition.py | 70 +++++++++++++------ .../newton_forward_interpolation.py | 7 +- arithmetic_analysis/newton_raphson.py | 5 +- arithmetic_analysis/secant_method.py | 57 +++++++-------- 5 files changed, 90 insertions(+), 60 deletions(-) diff --git a/arithmetic_analysis/in_static_equilibrium.py b/arithmetic_analysis/in_static_equilibrium.py index f08b39c3505c..9b2892151850 100644 --- a/arithmetic_analysis/in_static_equilibrium.py +++ b/arithmetic_analysis/in_static_equilibrium.py @@ -1,19 +1,14 @@ """ Checks if a system of forces is in static equilibrium. - -python/black : true -flake8 : passed -mypy : passed """ +from typing import List -from __future__ import annotations - -from numpy import array, cos, cross, radians, sin # type: ignore +from numpy import array, cos, cross, radians, sin def polar_force( magnitude: float, angle: float, radian_mode: bool = False -) -> list[float]: +) -> List[float]: """ Resolves force along rectangular components. (force, angle) => (force_x, force_y) diff --git a/arithmetic_analysis/lu_decomposition.py b/arithmetic_analysis/lu_decomposition.py index 763ba60f32b7..ef37d1b7b4ef 100644 --- a/arithmetic_analysis/lu_decomposition.py +++ b/arithmetic_analysis/lu_decomposition.py @@ -1,34 +1,64 @@ -"""Lower-Upper (LU) Decomposition.""" +"""Lower-Upper (LU) Decomposition. -# lower–upper (LU) decomposition - https://en.wikipedia.org/wiki/LU_decomposition -import numpy +Reference: +- https://en.wikipedia.org/wiki/LU_decomposition +""" +from typing import Tuple +import numpy as np +from numpy import ndarray -def LUDecompose(table): + +def lower_upper_decomposition(table: ndarray) -> Tuple[ndarray, ndarray]: + """Lower-Upper (LU) Decomposition + + Example: + + >>> matrix = np.array([[2, -2, 1], [0, 1, 2], [5, 3, 1]]) + >>> outcome = lower_upper_decomposition(matrix) + >>> outcome[0] + array([[1. , 0. , 0. ], + [0. , 1. , 0. ], + [2.5, 8. , 1. ]]) + >>> outcome[1] + array([[ 2. , -2. , 1. ], + [ 0. , 1. , 2. ], + [ 0. , 0. , -17.5]]) + + >>> matrix = np.array([[2, -2, 1], [0, 1, 2]]) + >>> lower_upper_decomposition(matrix) + Traceback (most recent call last): + ... + ValueError: 'table' has to be of square shaped array but got a 2x3 array: + [[ 2 -2 1] + [ 0 1 2]] + """ # Table that contains our data # Table has to be a square array so we need to check first - rows, columns = numpy.shape(table) - L = numpy.zeros((rows, columns)) - U = numpy.zeros((rows, columns)) + rows, columns = np.shape(table) if rows != columns: - return [] + raise ValueError( + f"'table' has to be of square shaped array but got a {rows}x{columns} " + + f"array:\n{table}" + ) + lower = np.zeros((rows, columns)) + upper = np.zeros((rows, columns)) for i in range(columns): for j in range(i): - sum = 0 + total = 0 for k in range(j): - sum += L[i][k] * U[k][j] - L[i][j] = (table[i][j] - sum) / U[j][j] - L[i][i] = 1 + total += lower[i][k] * upper[k][j] + lower[i][j] = (table[i][j] - total) / upper[j][j] + lower[i][i] = 1 for j in range(i, columns): - sum1 = 0 + total = 0 for k in range(i): - sum1 += L[i][k] * U[k][j] - U[i][j] = table[i][j] - sum1 - return L, U + total += lower[i][k] * upper[k][j] + upper[i][j] = table[i][j] - total + return lower, upper if __name__ == "__main__": - matrix = numpy.array([[2, -2, 1], [0, 1, 2], [5, 3, 1]]) - L, U = LUDecompose(matrix) - print(L) - print(U) + import doctest + + doctest.testmod() diff --git a/arithmetic_analysis/newton_forward_interpolation.py b/arithmetic_analysis/newton_forward_interpolation.py index d32e3efbd1f2..66cde4b73c4f 100644 --- a/arithmetic_analysis/newton_forward_interpolation.py +++ b/arithmetic_analysis/newton_forward_interpolation.py @@ -1,10 +1,11 @@ # https://www.geeksforgeeks.org/newton-forward-backward-interpolation/ import math +from typing import List # for calculating u value -def ucal(u, p): +def ucal(u: float, p: int) -> float: """ >>> ucal(1, 2) 0 @@ -19,9 +20,9 @@ def ucal(u, p): return temp -def main(): +def main() -> None: n = int(input("enter the numbers of values: ")) - y = [] + y: List[List[float]] = [] for i in range(n): y.append([]) for i in range(n): diff --git a/arithmetic_analysis/newton_raphson.py b/arithmetic_analysis/newton_raphson.py index 948759a09a2a..146bb0aa5adf 100644 --- a/arithmetic_analysis/newton_raphson.py +++ b/arithmetic_analysis/newton_raphson.py @@ -4,11 +4,14 @@ # quickly find a good approximation for the root of a real-valued function from decimal import Decimal from math import * # noqa: F401, F403 +from typing import Union from sympy import diff -def newton_raphson(func: str, a: int, precision: int = 10 ** -10) -> float: +def newton_raphson( + func: str, a: Union[float, Decimal], precision: float = 10 ** -10 +) -> float: """Finds root from the point 'a' onwards by Newton-Raphson method >>> newton_raphson("sin(x)", 2) 3.1415926536808043 diff --git a/arithmetic_analysis/secant_method.py b/arithmetic_analysis/secant_method.py index b05d44c627d8..7eb1dd8f5c6b 100644 --- a/arithmetic_analysis/secant_method.py +++ b/arithmetic_analysis/secant_method.py @@ -1,28 +1,29 @@ -# Implementing Secant method in Python -# Author: dimgrichr - - -from math import exp - - -def f(x): - """ - >>> f(5) - 39.98652410600183 - """ - return 8 * x - 2 * exp(-x) - - -def SecantMethod(lower_bound, upper_bound, repeats): - """ - >>> SecantMethod(1, 3, 2) - 0.2139409276214589 - """ - x0 = lower_bound - x1 = upper_bound - for i in range(0, repeats): - x0, x1 = x1, x1 - (f(x1) * (x1 - x0)) / (f(x1) - f(x0)) - return x1 - - -print(f"The solution is: {SecantMethod(1, 3, 2)}") +""" +Implementing Secant method in Python +Author: dimgrichr +""" +from math import exp + + +def f(x: float) -> float: + """ + >>> f(5) + 39.98652410600183 + """ + return 8 * x - 2 * exp(-x) + + +def secant_method(lower_bound: float, upper_bound: float, repeats: int) -> float: + """ + >>> secant_method(1, 3, 2) + 0.2139409276214589 + """ + x0 = lower_bound + x1 = upper_bound + for i in range(0, repeats): + x0, x1 = x1, x1 - (f(x1) * (x1 - x0)) / (f(x1) - f(x0)) + return x1 + + +if __name__ == "__main__": + print(f"Example: {secant_method(1, 3, 2) = }") From 0ccb213c1140d094e4c86bc04c767290b4ebaf15 Mon Sep 17 00:00:00 2001 From: fpringle Date: Wed, 23 Dec 2020 18:48:19 +0100 Subject: [PATCH 0054/1543] Add solution for Project Euler problem 102 (#4051) * Added solution for Project Euler problem 102 * Got rid of map functions * Snake case variable names * Type hints * updating DIRECTORY.md Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- DIRECTORY.md | 6 +- project_euler/problem_102/__init__.py | 0 project_euler/problem_102/p102_triangles.txt | 1000 ++++++++++++++++++ project_euler/problem_102/sol1.py | 81 ++ project_euler/problem_102/test_triangles.txt | 2 + 5 files changed, 1088 insertions(+), 1 deletion(-) create mode 100644 project_euler/problem_102/__init__.py create mode 100644 project_euler/problem_102/p102_triangles.txt create mode 100644 project_euler/problem_102/sol1.py create mode 100644 project_euler/problem_102/test_triangles.txt diff --git a/DIRECTORY.md b/DIRECTORY.md index 1f1bb9907e52..d73ae11eb7c2 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -106,7 +106,7 @@ * [Molecular Chemistry](https://github.com/TheAlgorithms/Python/blob/master/conversions/molecular_chemistry.py) * [Octal To Decimal](https://github.com/TheAlgorithms/Python/blob/master/conversions/octal_to_decimal.py) * [Prefix Conversions](https://github.com/TheAlgorithms/Python/blob/master/conversions/prefix_conversions.py) - * [Roman To Integer](https://github.com/TheAlgorithms/Python/blob/master/conversions/roman_to_integer.py) + * [Roman Numerals](https://github.com/TheAlgorithms/Python/blob/master/conversions/roman_numerals.py) * [Temperature Conversions](https://github.com/TheAlgorithms/Python/blob/master/conversions/temperature_conversions.py) * [Weight Conversion](https://github.com/TheAlgorithms/Python/blob/master/conversions/weight_conversion.py) @@ -746,6 +746,8 @@ * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_099/sol1.py) * Problem 101 * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_101/sol1.py) + * Problem 102 + * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_102/sol1.py) * Problem 112 * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_112/sol1.py) * Problem 113 @@ -760,6 +762,8 @@ * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_125/sol1.py) * Problem 129 * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_129/sol1.py) + * Problem 135 + * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_135/sol1.py) * Problem 173 * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_173/sol1.py) * Problem 174 diff --git a/project_euler/problem_102/__init__.py b/project_euler/problem_102/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/project_euler/problem_102/p102_triangles.txt b/project_euler/problem_102/p102_triangles.txt new file mode 100644 index 000000000000..3f01a1ac1f41 --- /dev/null +++ b/project_euler/problem_102/p102_triangles.txt @@ -0,0 +1,1000 @@ +-340,495,-153,-910,835,-947 +-175,41,-421,-714,574,-645 +-547,712,-352,579,951,-786 +419,-864,-83,650,-399,171 +-429,-89,-357,-930,296,-29 +-734,-702,823,-745,-684,-62 +-971,762,925,-776,-663,-157 +162,570,628,485,-807,-896 +641,91,-65,700,887,759 +215,-496,46,-931,422,-30 +-119,359,668,-609,-358,-494 +440,929,968,214,760,-857 +-700,785,838,29,-216,411 +-770,-458,-325,-53,-505,633 +-752,-805,349,776,-799,687 +323,5,561,-36,919,-560 +-907,358,264,320,204,274 +-728,-466,350,969,292,-345 +940,836,272,-533,748,185 +411,998,813,520,316,-949 +-152,326,658,-762,148,-651 +330,507,-9,-628,101,174 +551,-496,772,-541,-702,-45 +-164,-489,-90,322,631,-59 +673,366,-4,-143,-606,-704 +428,-609,801,-449,740,-269 +453,-924,-785,-346,-853,111 +-738,555,-181,467,-426,-20 +958,-692,784,-343,505,-569 +620,27,263,54,-439,-726 +804,87,998,859,871,-78 +-119,-453,-709,-292,-115,-56 +-626,138,-940,-476,-177,-274 +-11,160,142,588,446,158 +538,727,550,787,330,810 +420,-689,854,-546,337,516 +872,-998,-607,748,473,-192 +653,440,-516,-985,808,-857 +374,-158,331,-940,-338,-641 +137,-925,-179,771,734,-715 +-314,198,-115,29,-641,-39 +759,-574,-385,355,590,-603 +-189,-63,-168,204,289,305 +-182,-524,-715,-621,911,-255 +331,-816,-833,471,168,126 +-514,581,-855,-220,-731,-507 +129,169,576,651,-87,-458 +783,-444,-881,658,-266,298 +603,-430,-598,585,368,899 +43,-724,962,-376,851,409 +-610,-646,-883,-261,-482,-881 +-117,-237,978,641,101,-747 +579,125,-715,-712,208,534 +672,-214,-762,372,874,533 +-564,965,38,715,367,242 +500,951,-700,-981,-61,-178 +-382,-224,-959,903,-282,-60 +-355,295,426,-331,-591,655 +892,128,958,-271,-993,274 +-454,-619,302,138,-790,-874 +-642,601,-574,159,-290,-318 +266,-109,257,-686,54,975 +162,628,-478,840,264,-266 +466,-280,982,1,904,-810 +721,839,730,-807,777,981 +-129,-430,748,263,943,96 +434,-94,410,-990,249,-704 +237,42,122,-732,44,-51 +909,-116,-229,545,292,717 +824,-768,-807,-370,-262,30 +675,58,332,-890,-651,791 +363,825,-717,254,684,240 +405,-715,900,166,-589,422 +-476,686,-830,-319,634,-807 +633,837,-971,917,-764,207 +-116,-44,-193,-70,908,809 +-26,-252,998,408,70,-713 +-601,645,-462,842,-644,-591 +-160,653,274,113,-138,687 +369,-273,-181,925,-167,-693 +-338,135,480,-967,-13,-840 +-90,-270,-564,695,161,907 +607,-430,869,-713,461,-469 +919,-165,-776,522,606,-708 +-203,465,288,207,-339,-458 +-453,-534,-715,975,838,-677 +-973,310,-350,934,546,-805 +-835,385,708,-337,-594,-772 +-14,914,900,-495,-627,594 +833,-713,-213,578,-296,699 +-27,-748,484,455,915,291 +270,889,739,-57,442,-516 +119,811,-679,905,184,130 +-678,-469,925,553,612,482 +101,-571,-732,-842,644,588 +-71,-737,566,616,957,-663 +-634,-356,90,-207,936,622 +598,443,964,-895,-58,529 +847,-467,929,-742,91,10 +-633,829,-780,-408,222,-30 +-818,57,275,-38,-746,198 +-722,-825,-549,597,-391,99 +-570,908,430,873,-103,-360 +342,-681,512,434,542,-528 +297,850,479,609,543,-357 +9,784,212,548,56,859 +-152,560,-240,-969,-18,713 +140,-133,34,-635,250,-163 +-272,-22,-169,-662,989,-604 +471,-765,355,633,-742,-118 +-118,146,942,663,547,-376 +583,16,162,264,715,-33 +-230,-446,997,-838,561,555 +372,397,-729,-318,-276,649 +92,982,-970,-390,-922,922 +-981,713,-951,-337,-669,670 +-999,846,-831,-504,7,-128 +455,-954,-370,682,-510,45 +822,-960,-892,-385,-662,314 +-668,-686,-367,-246,530,-341 +-723,-720,-926,-836,-142,757 +-509,-134,384,-221,-873,-639 +-803,-52,-706,-669,373,-339 +933,578,631,-616,770,555 +741,-564,-33,-605,-576,275 +-715,445,-233,-730,734,-704 +120,-10,-266,-685,-490,-17 +-232,-326,-457,-946,-457,-116 +811,52,639,826,-200,147 +-329,279,293,612,943,955 +-721,-894,-393,-969,-642,453 +-688,-826,-352,-75,371,79 +-809,-979,407,497,858,-248 +-485,-232,-242,-582,-81,849 +141,-106,123,-152,806,-596 +-428,57,-992,811,-192,478 +864,393,122,858,255,-876 +-284,-780,240,457,354,-107 +956,605,-477,44,26,-678 +86,710,-533,-815,439,327 +-906,-626,-834,763,426,-48 +201,-150,-904,652,475,412 +-247,149,81,-199,-531,-148 +923,-76,-353,175,-121,-223 +427,-674,453,472,-410,585 +931,776,-33,85,-962,-865 +-655,-908,-902,208,869,792 +-316,-102,-45,-436,-222,885 +-309,768,-574,653,745,-975 +896,27,-226,993,332,198 +323,655,-89,260,240,-902 +501,-763,-424,793,813,616 +993,375,-938,-621,672,-70 +-880,-466,-283,770,-824,143 +63,-283,886,-142,879,-116 +-964,-50,-521,-42,-306,-161 +724,-22,866,-871,933,-383 +-344,135,282,966,-80,917 +-281,-189,420,810,362,-582 +-515,455,-588,814,162,332 +555,-436,-123,-210,869,-943 +589,577,232,286,-554,876 +-773,127,-58,-171,-452,125 +-428,575,906,-232,-10,-224 +437,276,-335,-348,605,878 +-964,511,-386,-407,168,-220 +307,513,912,-463,-423,-416 +-445,539,273,886,-18,760 +-396,-585,-670,414,47,364 +143,-506,754,906,-971,-203 +-544,472,-180,-541,869,-465 +-779,-15,-396,890,972,-220 +-430,-564,503,182,-119,456 +89,-10,-739,399,506,499 +954,162,-810,-973,127,870 +890,952,-225,158,828,237 +-868,952,349,465,574,750 +-915,369,-975,-596,-395,-134 +-135,-601,575,582,-667,640 +413,890,-560,-276,-555,-562 +-633,-269,561,-820,-624,499 +371,-92,-784,-593,864,-717 +-971,655,-439,367,754,-951 +172,-347,36,279,-247,-402 +633,-301,364,-349,-683,-387 +-780,-211,-713,-948,-648,543 +72,58,762,-465,-66,462 +78,502,781,-832,713,836 +-431,-64,-484,-392,208,-343 +-64,101,-29,-860,-329,844 +398,391,828,-858,700,395 +578,-896,-326,-604,314,180 +97,-321,-695,185,-357,852 +854,839,283,-375,951,-209 +194,96,-564,-847,162,524 +-354,532,494,621,580,560 +419,-678,-450,926,-5,-924 +-661,905,519,621,-143,394 +-573,268,296,-562,-291,-319 +-211,266,-196,158,564,-183 +18,-585,-398,777,-581,864 +790,-894,-745,-604,-418,70 +848,-339,150,773,11,851 +-954,-809,-53,-20,-648,-304 +658,-336,-658,-905,853,407 +-365,-844,350,-625,852,-358 +986,-315,-230,-159,21,180 +-15,599,45,-286,-941,847 +-613,-68,184,639,-987,550 +334,675,-56,-861,923,340 +-848,-596,960,231,-28,-34 +707,-811,-994,-356,-167,-171 +-470,-764,72,576,-600,-204 +379,189,-542,-576,585,800 +440,540,-445,-563,379,-334 +-155,64,514,-288,853,106 +-304,751,481,-520,-708,-694 +-709,132,594,126,-844,63 +723,471,421,-138,-962,892 +-440,-263,39,513,-672,-954 +775,809,-581,330,752,-107 +-376,-158,335,-708,-514,578 +-343,-769,456,-187,25,413 +548,-877,-172,300,-500,928 +938,-102,423,-488,-378,-969 +-36,564,-55,131,958,-800 +-322,511,-413,503,700,-847 +-966,547,-88,-17,-359,-67 +637,-341,-437,-181,527,-153 +-74,449,-28,3,485,189 +-997,658,-224,-948,702,-807 +-224,736,-896,127,-945,-850 +-395,-106,439,-553,-128,124 +-841,-445,-758,-572,-489,212 +633,-327,13,-512,952,771 +-940,-171,-6,-46,-923,-425 +-142,-442,-817,-998,843,-695 +340,847,-137,-920,-988,-658 +-653,217,-679,-257,651,-719 +-294,365,-41,342,74,-892 +690,-236,-541,494,408,-516 +180,-807,225,790,494,59 +707,605,-246,656,284,271 +65,294,152,824,442,-442 +-321,781,-540,341,316,415 +420,371,-2,545,995,248 +56,-191,-604,971,615,449 +-981,-31,510,592,-390,-362 +-317,-968,913,365,97,508 +832,63,-864,-510,86,202 +-483,456,-636,340,-310,676 +981,-847,751,-508,-962,-31 +-157,99,73,797,63,-172 +220,858,872,924,866,-381 +996,-169,805,321,-164,971 +896,11,-625,-973,-782,76 +578,-280,730,-729,307,-905 +-580,-749,719,-698,967,603 +-821,874,-103,-623,662,-491 +-763,117,661,-644,672,-607 +592,787,-798,-169,-298,690 +296,644,-526,-762,-447,665 +534,-818,852,-120,57,-379 +-986,-549,-329,294,954,258 +-133,352,-660,-77,904,-356 +748,343,215,500,317,-277 +311,7,910,-896,-809,795 +763,-602,-753,313,-352,917 +668,619,-474,-597,-650,650 +-297,563,-701,-987,486,-902 +-461,-740,-657,233,-482,-328 +-446,-250,-986,-458,-629,520 +542,-49,-327,-469,257,-947 +121,-575,-634,-143,-184,521 +30,504,455,-645,-229,-945 +-12,-295,377,764,771,125 +-686,-133,225,-25,-376,-143 +-6,-46,338,270,-405,-872 +-623,-37,582,467,963,898 +-804,869,-477,420,-475,-303 +94,41,-842,-193,-768,720 +-656,-918,415,645,-357,460 +-47,-486,-911,468,-608,-686 +-158,251,419,-394,-655,-895 +272,-695,979,508,-358,959 +-776,650,-918,-467,-690,-534 +-85,-309,-626,167,-366,-429 +-880,-732,-186,-924,970,-875 +517,645,-274,962,-804,544 +721,402,104,640,478,-499 +198,684,-134,-723,-452,-905 +-245,745,239,238,-826,441 +-217,206,-32,462,-981,-895 +-51,989,526,-173,560,-676 +-480,-659,-976,-580,-727,466 +-996,-90,-995,158,-239,642 +302,288,-194,-294,17,924 +-943,969,-326,114,-500,103 +-619,163,339,-880,230,421 +-344,-601,-795,557,565,-779 +590,345,-129,-202,-125,-58 +-777,-195,159,674,775,411 +-939,312,-665,810,121,855 +-971,254,712,815,452,581 +442,-9,327,-750,61,757 +-342,869,869,-160,390,-772 +620,601,565,-169,-69,-183 +-25,924,-817,964,321,-970 +-64,-6,-133,978,825,-379 +601,436,-24,98,-115,940 +-97,502,614,-574,922,513 +-125,262,-946,695,99,-220 +429,-721,719,-694,197,-558 +326,689,-70,-908,-673,338 +-468,-856,-902,-254,-358,305 +-358,530,542,355,-253,-47 +-438,-74,-362,963,988,788 +137,717,467,622,319,-380 +-86,310,-336,851,918,-288 +721,395,646,-53,255,-425 +255,175,912,84,-209,878 +-632,-485,-400,-357,991,-608 +235,-559,992,-297,857,-591 +87,-71,148,130,647,578 +-290,-584,-639,-788,-21,592 +386,984,625,-731,-993,-336 +-538,634,-209,-828,-150,-774 +-754,-387,607,-781,976,-199 +412,-798,-664,295,709,-537 +-412,932,-880,-232,561,852 +-656,-358,-198,-964,-433,-848 +-762,-668,-632,186,-673,-11 +-876,237,-282,-312,-83,682 +403,73,-57,-436,-622,781 +-587,873,798,976,-39,329 +-369,-622,553,-341,817,794 +-108,-616,920,-849,-679,96 +290,-974,234,239,-284,-321 +-22,394,-417,-419,264,58 +-473,-551,69,923,591,-228 +-956,662,-113,851,-581,-794 +-258,-681,413,-471,-637,-817 +-866,926,992,-653,-7,794 +556,-350,602,917,831,-610 +188,245,-906,361,492,174 +-720,384,-818,329,638,-666 +-246,846,890,-325,-59,-850 +-118,-509,620,-762,-256,15 +-787,-536,-452,-338,-399,813 +458,560,525,-311,-608,-419 +494,-811,-825,-127,-812,894 +-801,890,-629,-860,574,925 +-709,-193,-213,138,-410,-403 +861,91,708,-187,5,-222 +789,646,777,154,90,-49 +-267,-830,-114,531,591,-698 +-126,-82,881,-418,82,652 +-894,130,-726,-935,393,-815 +-142,563,654,638,-712,-597 +-759,60,-23,977,100,-765 +-305,595,-570,-809,482,762 +-161,-267,53,963,998,-529 +-300,-57,798,353,703,486 +-990,696,-764,699,-565,719 +-232,-205,566,571,977,369 +740,865,151,-817,-204,-293 +94,445,-768,229,537,-406 +861,620,37,-424,-36,656 +390,-369,952,733,-464,569 +-482,-604,959,554,-705,-626 +-396,-615,-991,108,272,-723 +143,780,535,142,-917,-147 +138,-629,-217,-908,905,115 +915,103,-852,64,-468,-642 +570,734,-785,-268,-326,-759 +738,531,-332,586,-779,24 +870,440,-217,473,-383,415 +-296,-333,-330,-142,-924,950 +118,120,-35,-245,-211,-652 +61,634,153,-243,838,789 +726,-582,210,105,983,537 +-313,-323,758,234,29,848 +-847,-172,-593,733,-56,617 +54,255,-512,156,-575,675 +-873,-956,-148,623,95,200 +700,-370,926,649,-978,157 +-639,-202,719,130,747,222 +194,-33,955,943,505,114 +-226,-790,28,-930,827,783 +-392,-74,-28,714,218,-612 +209,626,-888,-683,-912,495 +487,751,614,933,631,445 +-348,-34,-411,-106,835,321 +-689,872,-29,-800,312,-542 +-52,566,827,570,-862,-77 +471,992,309,-402,389,912 +24,520,-83,-51,555,503 +-265,-317,283,-970,-472,690 +606,526,137,71,-651,150 +217,-518,663,66,-605,-331 +-562,232,-76,-503,205,-323 +842,-521,546,285,625,-186 +997,-927,344,909,-546,974 +-677,419,81,121,-705,771 +719,-379,-944,-797,784,-155 +-378,286,-317,-797,-111,964 +-288,-573,784,80,-532,-646 +-77,407,-248,-797,769,-816 +-24,-637,287,-858,-927,-333 +-902,37,894,-823,141,684 +125,467,-177,-516,686,399 +-321,-542,641,-590,527,-224 +-400,-712,-876,-208,632,-543 +-676,-429,664,-242,-269,922 +-608,-273,-141,930,687,380 +786,-12,498,494,310,326 +-739,-617,606,-960,804,188 +384,-368,-243,-350,-459,31 +-550,397,320,-868,328,-279 +969,-179,853,864,-110,514 +910,793,302,-822,-285,488 +-605,-128,218,-283,-17,-227 +16,324,667,708,750,3 +485,-813,19,585,71,930 +-218,816,-687,-97,-732,-360 +-497,-151,376,-23,3,315 +-412,-989,-610,-813,372,964 +-878,-280,87,381,-311,69 +-609,-90,-731,-679,150,585 +889,27,-162,605,75,-770 +448,617,-988,0,-103,-504 +-800,-537,-69,627,608,-668 +534,686,-664,942,830,920 +-238,775,495,932,-793,497 +-343,958,-914,-514,-691,651 +568,-136,208,359,728,28 +286,912,-794,683,556,-102 +-638,-629,-484,445,-64,-497 +58,505,-801,-110,872,632 +-390,777,353,267,976,369 +-993,515,105,-133,358,-572 +964,996,355,-212,-667,38 +-725,-614,-35,365,132,-196 +237,-536,-416,-302,312,477 +-664,574,-210,224,48,-925 +869,-261,-256,-240,-3,-698 +712,385,32,-34,916,-315 +895,-409,-100,-346,728,-624 +-806,327,-450,889,-781,-939 +-586,-403,698,318,-939,899 +557,-57,-920,659,333,-51 +-441,232,-918,-205,246,1 +783,167,-797,-595,245,-736 +-36,-531,-486,-426,-813,-160 +777,-843,817,313,-228,-572 +735,866,-309,-564,-81,190 +-413,645,101,719,-719,218 +-83,164,767,796,-430,-459 +122,779,-15,-295,-96,-892 +462,379,70,548,834,-312 +-630,-534,124,187,-737,114 +-299,-604,318,-591,936,826 +-879,218,-642,-483,-318,-866 +-691,62,-658,761,-895,-854 +-822,493,687,569,910,-202 +-223,784,304,-5,541,925 +-914,541,737,-662,-662,-195 +-622,615,414,358,881,-878 +339,745,-268,-968,-280,-227 +-364,855,148,-709,-827,472 +-890,-532,-41,664,-612,577 +-702,-859,971,-722,-660,-920 +-539,-605,737,149,973,-802 +800,42,-448,-811,152,511 +-933,377,-110,-105,-374,-937 +-766,152,482,120,-308,390 +-568,775,-292,899,732,890 +-177,-317,-502,-259,328,-511 +612,-696,-574,-660,132,31 +-119,563,-805,-864,179,-672 +425,-627,183,-331,839,318 +-711,-976,-749,152,-916,261 +181,-63,497,211,262,406 +-537,700,-859,-765,-928,77 +892,832,231,-749,-82,613 +816,216,-642,-216,-669,-912 +-6,624,-937,-370,-344,268 +737,-710,-869,983,-324,-274 +565,952,-547,-158,374,-444 +51,-683,645,-845,515,636 +-953,-631,114,-377,-764,-144 +-8,470,-242,-399,-675,-730 +-540,689,-20,47,-607,590 +-329,-710,-779,942,-388,979 +123,829,674,122,203,563 +46,782,396,-33,386,610 +872,-846,-523,-122,-55,-190 +388,-994,-525,974,127,596 +781,-680,796,-34,-959,-62 +-749,173,200,-384,-745,-446 +379,618,136,-250,-224,970 +-58,240,-921,-760,-901,-626 +366,-185,565,-100,515,688 +489,999,-893,-263,-637,816 +838,-496,-316,-513,419,479 +107,676,-15,882,98,-397 +-999,941,-903,-424,670,-325 +171,-979,835,178,169,-984 +-609,-607,378,-681,184,402 +-316,903,-575,-800,224,983 +591,-18,-460,551,-167,918 +-756,405,-117,441,163,-320 +456,24,6,881,-836,-539 +-489,-585,915,651,-892,-382 +-177,-122,73,-711,-386,591 +181,724,530,686,-131,241 +737,288,886,216,233,33 +-548,-386,-749,-153,-85,-982 +-835,227,904,160,-99,25 +-9,-42,-162,728,840,-963 +217,-763,870,771,47,-846 +-595,808,-491,556,337,-900 +-134,281,-724,441,-134,708 +-789,-508,651,-962,661,315 +-839,-923,339,402,41,-487 +300,-790,48,703,-398,-811 +955,-51,462,-685,960,-717 +910,-880,592,-255,-51,-776 +-885,169,-793,368,-565,458 +-905,940,-492,-630,-535,-988 +245,797,763,869,-82,550 +-310,38,-933,-367,-650,824 +-95,32,-83,337,226,990 +-218,-975,-191,-208,-785,-293 +-672,-953,517,-901,-247,465 +681,-148,261,-857,544,-923 +640,341,446,-618,195,769 +384,398,-846,365,671,815 +578,576,-911,907,762,-859 +548,-428,144,-630,-759,-146 +710,-73,-700,983,-97,-889 +-46,898,-973,-362,-817,-717 +151,-81,-125,-900,-478,-154 +483,615,-537,-932,181,-68 +786,-223,518,25,-306,-12 +-422,268,-809,-683,635,468 +983,-734,-694,-608,-110,4 +-786,-196,749,-354,137,-8 +-181,36,668,-200,691,-973 +-629,-838,692,-736,437,-871 +-208,-536,-159,-596,8,197 +-3,370,-686,170,913,-376 +44,-998,-149,-993,-200,512 +-519,136,859,497,536,434 +77,-985,972,-340,-705,-837 +-381,947,250,360,344,322 +-26,131,699,750,707,384 +-914,655,299,193,406,955 +-883,-921,220,595,-546,794 +-599,577,-569,-404,-704,489 +-594,-963,-624,-460,880,-760 +-603,88,-99,681,55,-328 +976,472,139,-453,-531,-860 +192,-290,513,-89,666,432 +417,487,575,293,567,-668 +655,711,-162,449,-980,972 +-505,664,-685,-239,603,-592 +-625,-802,-67,996,384,-636 +365,-593,522,-666,-200,-431 +-868,708,560,-860,-630,-355 +-702,785,-637,-611,-597,960 +-137,-696,-93,-803,408,406 +891,-123,-26,-609,-610,518 +133,-832,-198,555,708,-110 +791,617,-69,487,696,315 +-900,694,-565,517,-269,-416 +914,135,-781,600,-71,-600 +991,-915,-422,-351,-837,313 +-840,-398,-302,21,590,146 +62,-558,-702,-384,-625,831 +-363,-426,-924,-496,792,-908 +73,361,-817,-466,400,922 +-626,-164,-626,860,-524,286 +255,26,-944,809,-606,986 +-457,-256,-103,50,-867,-871 +-223,803,196,480,612,136 +-820,-928,700,780,-977,721 +717,332,53,-933,-128,793 +-602,-648,562,593,890,702 +-469,-875,-527,911,-475,-222 +110,-281,-552,-536,-816,596 +-981,654,413,-981,-75,-95 +-754,-742,-515,894,-220,-344 +795,-52,156,408,-603,76 +474,-157,423,-499,-807,-791 +260,688,40,-52,702,-122 +-584,-517,-390,-881,302,-504 +61,797,665,708,14,668 +366,166,458,-614,564,-983 +72,539,-378,796,381,-824 +-485,201,-588,842,736,379 +-149,-894,-298,705,-303,-406 +660,-935,-580,521,93,633 +-382,-282,-375,-841,-828,171 +-567,743,-100,43,144,122 +-281,-786,-749,-551,296,304 +11,-426,-792,212,857,-175 +594,143,-699,289,315,137 +341,596,-390,107,-631,-804 +-751,-636,-424,-854,193,651 +-145,384,749,675,-786,517 +224,-865,-323,96,-916,258 +-309,403,-388,826,35,-270 +-942,709,222,158,-699,-103 +-589,842,-997,29,-195,-210 +264,426,566,145,-217,623 +217,965,507,-601,-453,507 +-206,307,-982,4,64,-292 +676,-49,-38,-701,550,883 +5,-850,-438,659,745,-773 +933,238,-574,-570,91,-33 +-866,121,-928,358,459,-843 +-568,-631,-352,-580,-349,189 +-737,849,-963,-486,-662,970 +135,334,-967,-71,-365,-792 +789,21,-227,51,990,-275 +240,412,-886,230,591,256 +-609,472,-853,-754,959,661 +401,521,521,314,929,982 +-499,784,-208,71,-302,296 +-557,-948,-553,-526,-864,793 +270,-626,828,44,37,14 +-412,224,617,-593,502,699 +41,-908,81,562,-849,163 +165,917,761,-197,331,-341 +-687,314,799,755,-969,648 +-164,25,578,439,-334,-576 +213,535,874,-177,-551,24 +-689,291,-795,-225,-496,-125 +465,461,558,-118,-568,-909 +567,660,-810,46,-485,878 +-147,606,685,-690,-774,984 +568,-886,-43,854,-738,616 +-800,386,-614,585,764,-226 +-518,23,-225,-732,-79,440 +-173,-291,-689,636,642,-447 +-598,-16,227,410,496,211 +-474,-930,-656,-321,-420,36 +-435,165,-819,555,540,144 +-969,149,828,568,394,648 +65,-848,257,720,-625,-851 +981,899,275,635,465,-877 +80,290,792,760,-191,-321 +-605,-858,594,33,706,593 +585,-472,318,-35,354,-927 +-365,664,803,581,-965,-814 +-427,-238,-480,146,-55,-606 +879,-193,250,-890,336,117 +-226,-322,-286,-765,-836,-218 +-913,564,-667,-698,937,283 +872,-901,810,-623,-52,-709 +473,171,717,38,-429,-644 +225,824,-219,-475,-180,234 +-530,-797,-948,238,851,-623 +85,975,-363,529,598,28 +-799,166,-804,210,-769,851 +-687,-158,885,736,-381,-461 +447,592,928,-514,-515,-661 +-399,-777,-493,80,-544,-78 +-884,631,171,-825,-333,551 +191,268,-577,676,137,-33 +212,-853,709,798,583,-56 +-908,-172,-540,-84,-135,-56 +303,311,406,-360,-240,811 +798,-708,824,59,234,-57 +491,693,-74,585,-85,877 +509,-65,-936,329,-51,722 +-122,858,-52,467,-77,-609 +850,760,547,-495,-953,-952 +-460,-541,890,910,286,724 +-914,843,-579,-983,-387,-460 +989,-171,-877,-326,-899,458 +846,175,-915,540,-1000,-982 +-852,-920,-306,496,530,-18 +338,-991,160,85,-455,-661 +-186,-311,-460,-563,-231,-414 +-932,-302,959,597,793,748 +-366,-402,-788,-279,514,53 +-940,-956,447,-956,211,-285 +564,806,-911,-914,934,754 +575,-858,-277,15,409,-714 +848,462,100,-381,135,242 +330,718,-24,-190,860,-78 +479,458,941,108,-866,-653 +212,980,962,-962,115,841 +-827,-474,-206,881,323,765 +506,-45,-30,-293,524,-133 +832,-173,547,-852,-561,-842 +-397,-661,-708,819,-545,-228 +521,51,-489,852,36,-258 +227,-164,189,465,-987,-882 +-73,-997,641,-995,449,-615 +151,-995,-638,415,257,-400 +-663,-297,-748,537,-734,198 +-585,-401,-81,-782,-80,-105 +99,-21,238,-365,-704,-368 +45,416,849,-211,-371,-1 +-404,-443,795,-406,36,-933 +272,-363,981,-491,-380,77 +713,-342,-366,-849,643,911 +-748,671,-537,813,961,-200 +-194,-909,703,-662,-601,188 +281,500,724,286,267,197 +-832,847,-595,820,-316,637 +520,521,-54,261,923,-10 +4,-808,-682,-258,441,-695 +-793,-107,-969,905,798,446 +-108,-739,-590,69,-855,-365 +380,-623,-930,817,468,713 +759,-849,-236,433,-723,-931 +95,-320,-686,124,-69,-329 +-655,518,-210,-523,284,-866 +144,303,639,70,-171,269 +173,-333,947,-304,55,40 +274,878,-482,-888,-835,375 +-982,-854,-36,-218,-114,-230 +905,-979,488,-485,-479,114 +877,-157,553,-530,-47,-321 +350,664,-881,442,-220,-284 +434,-423,-365,878,-726,584 +535,909,-517,-447,-660,-141 +-966,191,50,353,182,-642 +-785,-634,123,-907,-162,511 +146,-850,-214,814,-704,25 +692,1,521,492,-637,274 +-662,-372,-313,597,983,-647 +-962,-526,68,-549,-819,231 +740,-890,-318,797,-666,948 +-190,-12,-468,-455,948,284 +16,478,-506,-888,628,-154 +272,630,-976,308,433,3 +-169,-391,-132,189,302,-388 +109,-784,474,-167,-265,-31 +-177,-532,283,464,421,-73 +650,635,592,-138,1,-387 +-932,703,-827,-492,-355,686 +586,-311,340,-618,645,-434 +-951,736,647,-127,-303,590 +188,444,903,718,-931,500 +-872,-642,-296,-571,337,241 +23,65,152,125,880,470 +512,823,-42,217,823,-263 +180,-831,-380,886,607,762 +722,443,-149,-216,-115,759 +-19,660,-36,901,923,231 +562,-322,-626,-968,194,-825 +204,-920,938,784,362,150 +-410,-266,-715,559,-672,124 +-198,446,-140,454,-461,-447 +83,-346,830,-493,-759,-382 +-881,601,581,234,-134,-925 +-494,914,-42,899,235,629 +-390,50,956,437,774,-700 +-514,514,44,-512,-576,-313 +63,-688,808,-534,-570,-399 +-726,572,-896,102,-294,-28 +-688,757,401,406,955,-511 +-283,423,-485,480,-767,908 +-541,952,-594,116,-854,451 +-273,-796,236,625,-626,257 +-407,-493,373,826,-309,297 +-750,955,-476,641,-809,713 +8,415,695,226,-111,2 +733,209,152,-920,401,995 +921,-103,-919,66,871,-947 +-907,89,-869,-214,851,-559 +-307,748,524,-755,314,-711 +188,897,-72,-763,482,103 +545,-821,-232,-596,-334,-754 +-217,-788,-820,388,-200,-662 +779,160,-723,-975,-142,-998 +-978,-519,-78,-981,842,904 +-504,-736,-295,21,-472,-482 +391,115,-705,574,652,-446 +813,-988,865,830,-263,487 +194,80,774,-493,-761,-872 +-415,-284,-803,7,-810,670 +-484,-4,881,-872,55,-852 +-379,822,-266,324,-48,748 +-304,-278,406,-60,959,-89 +404,756,577,-643,-332,658 +291,460,125,491,-312,83 +311,-734,-141,582,282,-557 +-450,-661,-981,710,-177,794 +328,264,-787,971,-743,-407 +-622,518,993,-241,-738,229 +273,-826,-254,-917,-710,-111 +809,770,96,368,-818,725 +-488,773,502,-342,534,745 +-28,-414,236,-315,-484,363 +179,-466,-566,713,-683,56 +560,-240,-597,619,916,-940 +893,473,872,-868,-642,-461 +799,489,383,-321,-776,-833 +980,490,-508,764,-512,-426 +917,961,-16,-675,440,559 +-812,212,784,-987,-132,554 +-886,454,747,806,190,231 +910,341,21,-66,708,725 +29,929,-831,-494,-303,389 +-103,492,-271,-174,-515,529 +-292,119,419,788,247,-951 +483,543,-347,-673,664,-549 +-926,-871,-437,337,162,-877 +299,472,-771,5,-88,-643 +-103,525,-725,-998,264,22 +-505,708,550,-545,823,347 +-738,931,59,147,-156,-259 +456,968,-162,889,132,-911 +535,120,968,-517,-864,-541 +24,-395,-593,-766,-565,-332 +834,611,825,-576,280,629 +211,-548,140,-278,-592,929 +-999,-240,-63,-78,793,573 +-573,160,450,987,529,322 +63,353,315,-187,-461,577 +189,-950,-247,656,289,241 +209,-297,397,664,-805,484 +-655,452,435,-556,917,874 +253,-756,262,-888,-778,-214 +793,-451,323,-251,-401,-458 +-396,619,-651,-287,-668,-781 +698,720,-349,742,-807,546 +738,280,680,279,-540,858 +-789,387,530,-36,-551,-491 +162,579,-427,-272,228,710 +689,356,917,-580,729,217 +-115,-638,866,424,-82,-194 +411,-338,-917,172,227,-29 +-612,63,630,-976,-64,-204 +-200,911,583,-571,682,-579 +91,298,396,-183,788,-955 +141,-873,-277,149,-396,916 +321,958,-136,573,541,-777 +797,-909,-469,-877,988,-653 +784,-198,129,883,-203,399 +-68,-810,223,-423,-467,-512 +531,-445,-603,-997,-841,641 +-274,-242,174,261,-636,-158 +-574,494,-796,-798,-798,99 +95,-82,-613,-954,-753,986 +-883,-448,-864,-401,938,-392 +913,930,-542,-988,310,410 +506,-99,43,512,790,-222 +724,31,49,-950,260,-134 +-287,-947,-234,-700,56,588 +-33,782,-144,948,105,-791 +548,-546,-652,-293,881,-520 +691,-91,76,991,-631,742 +-520,-429,-244,-296,724,-48 +778,646,377,50,-188,56 +-895,-507,-898,-165,-674,652 +654,584,-634,177,-349,-620 +114,-980,355,62,182,975 +516,9,-442,-298,274,-579 +-238,262,-431,-896,506,-850 +47,748,846,821,-537,-293 +839,726,593,285,-297,840 +634,-486,468,-304,-887,-567 +-864,914,296,-124,335,233 +88,-253,-523,-956,-554,803 +-587,417,281,-62,-409,-363 +-136,-39,-292,-768,-264,876 +-127,506,-891,-331,-744,-430 +778,584,-750,-129,-479,-94 +-876,-771,-987,-757,180,-641 +-777,-694,411,-87,329,190 +-347,-999,-882,158,-754,232 +-105,918,188,237,-110,-591 +-209,703,-838,77,838,909 +-995,-339,-762,750,860,472 +185,271,-289,173,811,-300 +2,65,-656,-22,36,-139 +765,-210,883,974,961,-905 +-212,295,-615,-840,77,474 +211,-910,-440,703,-11,859 +-559,-4,-196,841,-277,969 +-73,-159,-887,126,978,-371 +-569,633,-423,-33,512,-393 +503,143,-383,-109,-649,-998 +-663,339,-317,-523,-2,596 +690,-380,570,378,-652,132 +72,-744,-930,399,-525,935 +865,-983,115,37,995,826 +594,-621,-872,443,188,-241 +-1000,291,754,234,-435,-869 +-868,901,654,-907,59,181 +-868,-793,-431,596,-446,-564 +900,-944,-680,-796,902,-366 +331,430,943,853,-851,-942 +315,-538,-354,-909,139,721 +170,-884,-225,-818,-808,-657 +-279,-34,-533,-871,-972,552 +691,-986,-800,-950,654,-747 +603,988,899,841,-630,591 +876,-949,809,562,602,-536 +-693,363,-189,495,738,-1000 +-383,431,-633,297,665,959 +-740,686,-207,-803,188,-520 +-820,226,31,-339,10,121 +-312,-844,624,-516,483,621 +-822,-529,69,-278,800,328 +834,-82,-759,420,811,-264 +-960,-240,-921,561,173,46 +-324,909,-790,-814,-2,-785 +976,334,-290,-891,704,-581 +150,-798,689,-823,237,-639 +-551,-320,876,-502,-622,-628 +-136,845,904,595,-702,-261 +-857,-377,-522,-101,-943,-805 +-682,-787,-888,-459,-752,-985 +-571,-81,623,-133,447,643 +-375,-158,72,-387,-324,-696 +-660,-650,340,188,569,526 +727,-218,16,-7,-595,-988 +-966,-684,802,-783,-272,-194 +115,-566,-888,47,712,180 +-237,-69,45,-272,981,-812 +48,897,439,417,50,325 +348,616,180,254,104,-784 +-730,811,-548,612,-736,790 +138,-810,123,930,65,865 +-768,-299,-49,-895,-692,-418 +487,-531,802,-159,-12,634 +808,-179,552,-73,470,717 +720,-644,886,-141,625,144 +-485,-505,-347,-244,-916,66 +600,-565,995,-5,324,227 +-771,-35,904,-482,753,-303 +-701,65,426,-763,-504,-479 +409,733,-823,475,64,718 +865,975,368,893,-413,-433 +812,-597,-970,819,813,624 +193,-642,-381,-560,545,398 +711,28,-316,771,717,-865 +-509,462,809,-136,786,635 +618,-49,484,169,635,547 +-747,685,-882,-496,-332,82 +-501,-851,870,563,290,570 +-279,-829,-509,397,457,816 +-508,80,850,-188,483,-326 +860,-100,360,119,-205,787 +-870,21,-39,-827,-185,932 +826,284,-136,-866,-330,-97 +-944,-82,745,899,-97,365 +929,262,564,632,-115,632 +244,-276,713,330,-897,-214 +-890,-109,664,876,-974,-907 +716,249,816,489,723,141 +-96,-560,-272,45,-70,645 +762,-503,414,-828,-254,-646 +909,-13,903,-422,-344,-10 +658,-486,743,545,50,674 +-241,507,-367,18,-48,-241 +886,-268,884,-762,120,-486 +-412,-528,879,-647,223,-393 +851,810,234,937,-726,797 +-999,942,839,-134,-996,-189 +100,979,-527,-521,378,800 +544,-844,-832,-530,-77,-641 +43,889,31,442,-934,-503 +-330,-370,-309,-439,173,547 +169,945,62,-753,-542,-597 +208,751,-372,-647,-520,70 +765,-840,907,-257,379,918 +334,-135,-689,730,-427,618 +137,-508,66,-695,78,169 +-962,-123,400,-417,151,969 +328,689,666,427,-555,-642 +-907,343,605,-341,-647,582 +-667,-363,-571,818,-265,-399 +525,-938,904,898,725,692 +-176,-802,-858,-9,780,275 +580,170,-740,287,691,-97 +365,557,-375,361,-288,859 +193,737,842,-808,520,282 +-871,65,-799,836,179,-720 +958,-144,744,-789,797,-48 +122,582,662,912,68,757 +595,241,-801,513,388,186 +-103,-677,-259,-731,-281,-857 +921,319,-696,683,-88,-997 +775,200,78,858,648,768 +316,821,-763,68,-290,-741 +564,664,691,504,760,787 +694,-119,973,-385,309,-760 +777,-947,-57,990,74,19 +971,626,-496,-781,-602,-239 +-651,433,11,-339,939,294 +-965,-728,560,569,-708,-247 diff --git a/project_euler/problem_102/sol1.py b/project_euler/problem_102/sol1.py new file mode 100644 index 000000000000..00af726656ce --- /dev/null +++ b/project_euler/problem_102/sol1.py @@ -0,0 +1,81 @@ +""" +Three distinct points are plotted at random on a Cartesian plane, +for which -1000 ≤ x, y ≤ 1000, such that a triangle is formed. + +Consider the following two triangles: + +A(-340,495), B(-153,-910), C(835,-947) + +X(-175,41), Y(-421,-714), Z(574,-645) + +It can be verified that triangle ABC contains the origin, whereas +triangle XYZ does not. + +Using triangles.txt (right click and 'Save Link/Target As...'), a 27K text +file containing the coordinates of one thousand "random" triangles, find +the number of triangles for which the interior contains the origin. + +NOTE: The first two examples in the file represent the triangles in the +example given above. +""" + +from pathlib import Path +from typing import List, Tuple + + +def vector_product(point1: Tuple[int, int], point2: Tuple[int, int]) -> int: + """ + Return the 2-d vector product of two vectors. + >>> vector_product((1, 2), (-5, 0)) + 10 + >>> vector_product((3, 1), (6, 10)) + 24 + """ + return point1[0] * point2[1] - point1[1] * point2[0] + + +def contains_origin(x1: int, y1: int, x2: int, y2: int, x3: int, y3: int) -> bool: + """ + Check if the triangle given by the points A(x1, y1), B(x2, y2), C(x3, y3) + contains the origin. + >>> contains_origin(-340, 495, -153, -910, 835, -947) + True + >>> contains_origin(-175, 41, -421, -714, 574, -645) + False + """ + point_a: Tuple[int, int] = (x1, y1) + point_a_to_b: Tuple[int, int] = (x2 - x1, y2 - y1) + point_a_to_c: Tuple[int, int] = (x3 - x1, y3 - y1) + a: float = -vector_product(point_a, point_a_to_b) / vector_product( + point_a_to_c, point_a_to_b + ) + b: float = +vector_product(point_a, point_a_to_c) / vector_product( + point_a_to_c, point_a_to_b + ) + + return a > 0 and b > 0 and a + b < 1 + + +def solution(filename: str = "p102_triangles.txt") -> int: + """ + Find the number of triangles whose interior contains the origin. + >>> solution("test_triangles.txt") + 1 + """ + data: str = Path(__file__).parent.joinpath(filename).read_text(encoding="utf-8") + + triangles: List[List[int]] = [] + for line in data.strip().split("\n"): + triangles.append([int(number) for number in line.split(",")]) + + ret: int = 0 + triangle: List[int] + + for triangle in triangles: + ret += contains_origin(*triangle) + + return ret + + +if __name__ == "__main__": + print(f"{solution() = }") diff --git a/project_euler/problem_102/test_triangles.txt b/project_euler/problem_102/test_triangles.txt new file mode 100644 index 000000000000..5c10cd651e9b --- /dev/null +++ b/project_euler/problem_102/test_triangles.txt @@ -0,0 +1,2 @@ +-340,495,-153,-910,835,-947 +-175,41,-421,-714,574,-645 From f3ba9b6c508a24cd0e10fb08d0235c1f838fb73a Mon Sep 17 00:00:00 2001 From: Dhruv Manilawala Date: Thu, 24 Dec 2020 18:16:21 +0530 Subject: [PATCH 0055/1543] [mypy] Add/fix type annotations for backtracking algorithms (#4055) * Fix mypy errors for backtracking algorithms * Fix CI failure --- backtracking/all_subsequences.py | 28 ++++++-------- backtracking/coloring.py | 8 ++-- backtracking/hamiltonian_cycle.py | 8 ++-- backtracking/knight_tour.py | 12 +++--- backtracking/minimax.py | 26 ++++++------- backtracking/n_queens_math.py | 63 ++++++++++++++----------------- backtracking/sudoku.py | 59 ++++++++++++++++------------- 7 files changed, 98 insertions(+), 106 deletions(-) diff --git a/backtracking/all_subsequences.py b/backtracking/all_subsequences.py index 9086e3a3d659..99db4ea46589 100644 --- a/backtracking/all_subsequences.py +++ b/backtracking/all_subsequences.py @@ -1,12 +1,11 @@ -from typing import Any, List - """ - In this problem, we want to determine all possible subsequences - of the given sequence. We use backtracking to solve this problem. +In this problem, we want to determine all possible subsequences +of the given sequence. We use backtracking to solve this problem. - Time complexity: O(2^n), - where n denotes the length of the given sequence. +Time complexity: O(2^n), +where n denotes the length of the given sequence. """ +from typing import Any, List def generate_all_subsequences(sequence: List[Any]) -> None: @@ -32,15 +31,10 @@ def create_state_space_tree( current_subsequence.pop() -""" -remove the comment to take an input from the user - -print("Enter the elements") -sequence = list(map(int, input().split())) -""" - -sequence = [3, 1, 2, 4] -generate_all_subsequences(sequence) +if __name__ == "__main__": + seq: List[Any] = [3, 1, 2, 4] + generate_all_subsequences(seq) -sequence = ["A", "B", "C"] -generate_all_subsequences(sequence) + seq.clear() + seq.extend(["A", "B", "C"]) + generate_all_subsequences(seq) diff --git a/backtracking/coloring.py b/backtracking/coloring.py index ceaffe3fae76..3956b21a9182 100644 --- a/backtracking/coloring.py +++ b/backtracking/coloring.py @@ -5,11 +5,11 @@ Wikipedia: https://en.wikipedia.org/wiki/Graph_coloring """ -from __future__ import annotations +from typing import List def valid_coloring( - neighbours: list[int], colored_vertices: list[int], color: int + neighbours: List[int], colored_vertices: List[int], color: int ) -> bool: """ For each neighbour check if coloring constraint is satisfied @@ -35,7 +35,7 @@ def valid_coloring( def util_color( - graph: list[list[int]], max_colors: int, colored_vertices: list[int], index: int + graph: List[List[int]], max_colors: int, colored_vertices: List[int], index: int ) -> bool: """ Pseudo-Code @@ -86,7 +86,7 @@ def util_color( return False -def color(graph: list[list[int]], max_colors: int) -> list[int]: +def color(graph: List[List[int]], max_colors: int) -> List[int]: """ Wrapper function to call subroutine called util_color which will either return True or False. diff --git a/backtracking/hamiltonian_cycle.py b/backtracking/hamiltonian_cycle.py index bf15cce4aca4..7be1ea350d7c 100644 --- a/backtracking/hamiltonian_cycle.py +++ b/backtracking/hamiltonian_cycle.py @@ -6,11 +6,11 @@ Wikipedia: https://en.wikipedia.org/wiki/Hamiltonian_path """ -from __future__ import annotations +from typing import List def valid_connection( - graph: list[list[int]], next_ver: int, curr_ind: int, path: list[int] + graph: List[List[int]], next_ver: int, curr_ind: int, path: List[int] ) -> bool: """ Checks whether it is possible to add next into path by validating 2 statements @@ -47,7 +47,7 @@ def valid_connection( return not any(vertex == next_ver for vertex in path) -def util_hamilton_cycle(graph: list[list[int]], path: list[int], curr_ind: int) -> bool: +def util_hamilton_cycle(graph: List[List[int]], path: List[int], curr_ind: int) -> bool: """ Pseudo-Code Base Case: @@ -108,7 +108,7 @@ def util_hamilton_cycle(graph: list[list[int]], path: list[int], curr_ind: int) return False -def hamilton_cycle(graph: list[list[int]], start_index: int = 0) -> list[int]: +def hamilton_cycle(graph: List[List[int]], start_index: int = 0) -> List[int]: r""" Wrapper function to call subroutine called util_hamilton_cycle, which will either return array of vertices indicating hamiltonian cycle diff --git a/backtracking/knight_tour.py b/backtracking/knight_tour.py index 2413ba468838..8e6613e07d8b 100644 --- a/backtracking/knight_tour.py +++ b/backtracking/knight_tour.py @@ -1,9 +1,9 @@ # Knight Tour Intro: https://www.youtube.com/watch?v=ab_dY3dZFHM -from __future__ import annotations +from typing import List, Tuple -def get_valid_pos(position: tuple[int], n: int) -> list[tuple[int]]: +def get_valid_pos(position: Tuple[int, int], n: int) -> List[Tuple[int, int]]: """ Find all the valid positions a knight can move to from the current position. @@ -32,7 +32,7 @@ def get_valid_pos(position: tuple[int], n: int) -> list[tuple[int]]: return permissible_positions -def is_complete(board: list[list[int]]) -> bool: +def is_complete(board: List[List[int]]) -> bool: """ Check if the board (matrix) has been completely filled with non-zero values. @@ -46,7 +46,9 @@ def is_complete(board: list[list[int]]) -> bool: return not any(elem == 0 for row in board for elem in row) -def open_knight_tour_helper(board: list[list[int]], pos: tuple[int], curr: int) -> bool: +def open_knight_tour_helper( + board: List[List[int]], pos: Tuple[int, int], curr: int +) -> bool: """ Helper function to solve knight tour problem. """ @@ -66,7 +68,7 @@ def open_knight_tour_helper(board: list[list[int]], pos: tuple[int], curr: int) return False -def open_knight_tour(n: int) -> list[list[int]]: +def open_knight_tour(n: int) -> List[List[int]]: """ Find the solution for the knight tour problem for a board of size n. Raises ValueError if the tour cannot be performed for the given size. diff --git a/backtracking/minimax.py b/backtracking/minimax.py index 91188090c899..dda29b47d6cc 100644 --- a/backtracking/minimax.py +++ b/backtracking/minimax.py @@ -1,18 +1,18 @@ -from __future__ import annotations - -import math +""" +Minimax helps to achieve maximum score in a game by checking all possible moves +depth is current depth in game tree. -""" Minimax helps to achieve maximum score in a game by checking all possible moves - depth is current depth in game tree. - nodeIndex is index of current node in scores[]. - if move is of maximizer return true else false - leaves of game tree is stored in scores[] - height is maximum height of Game tree +nodeIndex is index of current node in scores[]. +if move is of maximizer return true else false +leaves of game tree is stored in scores[] +height is maximum height of Game tree """ +import math +from typing import List def minimax( - depth: int, node_index: int, is_max: bool, scores: list[int], height: float + depth: int, node_index: int, is_max: bool, scores: List[int], height: float ) -> int: """ >>> import math @@ -32,10 +32,6 @@ def minimax( >>> height = math.log(len(scores), 2) >>> minimax(0, 0, True, scores, height) 12 - >>> minimax('1', 2, True, [], 2 ) - Traceback (most recent call last): - ... - TypeError: '<' not supported between instances of 'str' and 'int' """ if depth < 0: @@ -59,7 +55,7 @@ def minimax( ) -def main(): +def main() -> None: scores = [90, 23, 6, 33, 21, 65, 123, 34423] height = math.log(len(scores), 2) print("Optimal value : ", end="") diff --git a/backtracking/n_queens_math.py b/backtracking/n_queens_math.py index 811611971616..a8651c5c362e 100644 --- a/backtracking/n_queens_math.py +++ b/backtracking/n_queens_math.py @@ -75,14 +75,14 @@ for another one or vice versa. """ -from __future__ import annotations +from typing import List def depth_first_search( - possible_board: list[int], - diagonal_right_collisions: list[int], - diagonal_left_collisions: list[int], - boards: list[list[str]], + possible_board: List[int], + diagonal_right_collisions: List[int], + diagonal_left_collisions: List[int], + boards: List[List[str]], n: int, ) -> None: """ @@ -94,40 +94,33 @@ def depth_first_search( ['. . Q . ', 'Q . . . ', '. . . Q ', '. Q . . '] """ - """ Get next row in the current board (possible_board) to fill it with a queen """ + # Get next row in the current board (possible_board) to fill it with a queen row = len(possible_board) - """ - If row is equal to the size of the board it means there are a queen in each row in - the current board (possible_board) - """ + # If row is equal to the size of the board it means there are a queen in each row in + # the current board (possible_board) if row == n: - """ - We convert the variable possible_board that looks like this: [1, 3, 0, 2] to - this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . '] - """ - possible_board = [". " * i + "Q " + ". " * (n - 1 - i) for i in possible_board] - boards.append(possible_board) + # We convert the variable possible_board that looks like this: [1, 3, 0, 2] to + # this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . '] + boards.append([". " * i + "Q " + ". " * (n - 1 - i) for i in possible_board]) return - """ We iterate each column in the row to find all possible results in each row """ + # We iterate each column in the row to find all possible results in each row for col in range(n): - """ - We apply that we learned previously. First we check that in the current board - (possible_board) there are not other same value because if there is it means - that there are a collision in vertical. Then we apply the two formulas we - learned before: - - 45º: y - x = b or 45: row - col = b - 135º: y + x = b or row + col = b. - - And we verify if the results of this two formulas not exist in their variables - respectively. (diagonal_right_collisions, diagonal_left_collisions) - - If any or these are True it means there is a collision so we continue to the - next value in the for loop. - """ + # We apply that we learned previously. First we check that in the current board + # (possible_board) there are not other same value because if there is it means + # that there are a collision in vertical. Then we apply the two formulas we + # learned before: + # + # 45º: y - x = b or 45: row - col = b + # 135º: y + x = b or row + col = b. + # + # And we verify if the results of this two formulas not exist in their variables + # respectively. (diagonal_right_collisions, diagonal_left_collisions) + # + # If any or these are True it means there is a collision so we continue to the + # next value in the for loop. if ( col in possible_board or row - col in diagonal_right_collisions @@ -135,7 +128,7 @@ def depth_first_search( ): continue - """ If it is False we call dfs function again and we update the inputs """ + # If it is False we call dfs function again and we update the inputs depth_first_search( possible_board + [col], diagonal_right_collisions + [row - col], @@ -146,10 +139,10 @@ def depth_first_search( def n_queens_solution(n: int) -> None: - boards = [] + boards: List[List[str]] = [] depth_first_search([], [], [], boards, n) - """ Print all the boards """ + # Print all the boards for board in boards: for column in board: print(column) diff --git a/backtracking/sudoku.py b/backtracking/sudoku.py index 614bdb8530ac..3bfaddd6e56f 100644 --- a/backtracking/sudoku.py +++ b/backtracking/sudoku.py @@ -1,20 +1,20 @@ -from typing import List, Tuple, Union +""" +Given a partially filled 9×9 2D array, the objective is to fill a 9×9 +square grid with digits numbered 1 to 9, so that every row, column, and +and each of the nine 3×3 sub-grids contains all of the digits. + +This can be solved using Backtracking and is similar to n-queens. +We check to see if a cell is safe or not and recursively call the +function on the next column to see if it returns True. if yes, we +have solved the puzzle. else, we backtrack and place another number +in that cell and repeat this process. +""" +from typing import List, Optional, Tuple Matrix = List[List[int]] -""" - Given a partially filled 9×9 2D array, the objective is to fill a 9×9 - square grid with digits numbered 1 to 9, so that every row, column, and - and each of the nine 3×3 sub-grids contains all of the digits. - - This can be solved using Backtracking and is similar to n-queens. - We check to see if a cell is safe or not and recursively call the - function on the next column to see if it returns True. if yes, we - have solved the puzzle. else, we backtrack and place another number - in that cell and repeat this process. -""" # assigning initial values to the grid -initial_grid = [ +initial_grid: Matrix = [ [3, 0, 6, 5, 0, 8, 4, 0, 0], [5, 2, 0, 0, 0, 0, 0, 0, 0], [0, 8, 7, 0, 0, 0, 0, 3, 1], @@ -27,7 +27,7 @@ ] # a grid with no solution -no_solution = [ +no_solution: Matrix = [ [5, 0, 6, 5, 0, 8, 4, 0, 3], [5, 2, 0, 0, 0, 0, 0, 0, 2], [1, 8, 7, 0, 0, 0, 0, 3, 1], @@ -80,7 +80,7 @@ def is_completed(grid: Matrix) -> bool: return all(all(cell != 0 for cell in row) for row in grid) -def find_empty_location(grid: Matrix) -> Tuple[int, int]: +def find_empty_location(grid: Matrix) -> Optional[Tuple[int, int]]: """ This function finds an empty location so that we can assign a number for that particular row and column. @@ -89,9 +89,10 @@ def find_empty_location(grid: Matrix) -> Tuple[int, int]: for j in range(9): if grid[i][j] == 0: return i, j + return None -def sudoku(grid: Matrix) -> Union[Matrix, bool]: +def sudoku(grid: Matrix) -> Optional[Matrix]: """ Takes a partially filled-in grid and attempts to assign values to all unassigned locations in such a way to meet the requirements @@ -107,25 +108,30 @@ def sudoku(grid: Matrix) -> Union[Matrix, bool]: [1, 3, 8, 9, 4, 7, 2, 5, 6], [6, 9, 2, 3, 5, 1, 8, 7, 4], [7, 4, 5, 2, 8, 6, 3, 1, 9]] - >>> sudoku(no_solution) - False + >>> sudoku(no_solution) is None + True """ if is_completed(grid): return grid - row, column = find_empty_location(grid) + location = find_empty_location(grid) + if location is not None: + row, column = location + else: + # If the location is ``None``, then the grid is solved. + return grid for digit in range(1, 10): if is_safe(grid, row, column, digit): grid[row][column] = digit - if sudoku(grid): + if sudoku(grid) is not None: return grid grid[row][column] = 0 - return False + return None def print_solution(grid: Matrix) -> None: @@ -141,11 +147,12 @@ def print_solution(grid: Matrix) -> None: if __name__ == "__main__": # make a copy of grid so that you can compare with the unmodified grid - for grid in (initial_grid, no_solution): - grid = list(map(list, grid)) - solution = sudoku(grid) - if solution: - print("grid after solving:") + for example_grid in (initial_grid, no_solution): + print("\nExample grid:\n" + "=" * 20) + print_solution(example_grid) + print("\nExample grid solution:") + solution = sudoku(example_grid) + if solution is not None: print_solution(solution) else: print("Cannot find a solution.") From 8f47d9f807fae641bffe97ee28ea2e213c2818d8 Mon Sep 17 00:00:00 2001 From: Dhruv Manilawala Date: Thu, 24 Dec 2020 18:16:44 +0530 Subject: [PATCH 0056/1543] Fix type annotations for bit manipulation algorithms (#4056) --- bit_manipulation/binary_and_operator.py | 2 +- bit_manipulation/binary_or_operator.py | 2 +- bit_manipulation/binary_xor_operator.py | 2 +- bit_manipulation/single_bit_manipulation_operations.py | 6 +++--- 4 files changed, 6 insertions(+), 6 deletions(-) diff --git a/bit_manipulation/binary_and_operator.py b/bit_manipulation/binary_and_operator.py index f1b910f8cc9b..191ff8eb44a4 100644 --- a/bit_manipulation/binary_and_operator.py +++ b/bit_manipulation/binary_and_operator.py @@ -1,7 +1,7 @@ # https://www.tutorialspoint.com/python3/bitwise_operators_example.htm -def binary_and(a: int, b: int): +def binary_and(a: int, b: int) -> str: """ Take in 2 integers, convert them to binary, return a binary number that is the diff --git a/bit_manipulation/binary_or_operator.py b/bit_manipulation/binary_or_operator.py index e83a86d6a8bc..dabf5bcb09fd 100644 --- a/bit_manipulation/binary_or_operator.py +++ b/bit_manipulation/binary_or_operator.py @@ -1,7 +1,7 @@ # https://www.tutorialspoint.com/python3/bitwise_operators_example.htm -def binary_or(a: int, b: int): +def binary_or(a: int, b: int) -> str: """ Take in 2 integers, convert them to binary, and return a binary number that is the result of a binary or operation on the integers provided. diff --git a/bit_manipulation/binary_xor_operator.py b/bit_manipulation/binary_xor_operator.py index 0edf2ba6606d..6f8962192ad8 100644 --- a/bit_manipulation/binary_xor_operator.py +++ b/bit_manipulation/binary_xor_operator.py @@ -1,7 +1,7 @@ # https://www.tutorialspoint.com/python3/bitwise_operators_example.htm -def binary_xor(a: int, b: int): +def binary_xor(a: int, b: int) -> str: """ Take in 2 integers, convert them to binary, return a binary number that is the diff --git a/bit_manipulation/single_bit_manipulation_operations.py b/bit_manipulation/single_bit_manipulation_operations.py index 114eafe3235b..e4a54028d9ee 100644 --- a/bit_manipulation/single_bit_manipulation_operations.py +++ b/bit_manipulation/single_bit_manipulation_operations.py @@ -3,7 +3,7 @@ """Provide the functionality to manipulate a single bit.""" -def set_bit(number: int, position: int): +def set_bit(number: int, position: int) -> int: """ Set the bit at position to 1. @@ -21,7 +21,7 @@ def set_bit(number: int, position: int): return number | (1 << position) -def clear_bit(number: int, position: int): +def clear_bit(number: int, position: int) -> int: """ Set the bit at position to 0. @@ -37,7 +37,7 @@ def clear_bit(number: int, position: int): return number & ~(1 << position) -def flip_bit(number: int, position: int): +def flip_bit(number: int, position: int) -> int: """ Flip the bit at position. From 207ac957ef02a5885aeb75728ed257a0d76f9974 Mon Sep 17 00:00:00 2001 From: Mark Huang Date: Sat, 26 Dec 2020 11:12:37 +0800 Subject: [PATCH 0057/1543] [mypy] Add type hints and docstrings to heap.py (#3013) * Add type hints and docstrings to heap.py - Add type hints - Add docstrings - Add explanatory comments - Improve code readability - Change to use f-string * Fix import sorting * fixup! Format Python code with psf/black push * Fix static type error * Fix failing test * Fix type hints * Add return annotation Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> Co-authored-by: Dhruv Manilawala --- data_structures/heap/heap.py | 186 ++++++++++++++++++++--------------- 1 file changed, 107 insertions(+), 79 deletions(-) diff --git a/data_structures/heap/heap.py b/data_structures/heap/heap.py index 2dc047436a77..8592362c23b9 100644 --- a/data_structures/heap/heap.py +++ b/data_structures/heap/heap.py @@ -1,101 +1,138 @@ -#!/usr/bin/python3 +from typing import Iterable, List, Optional class Heap: - """ + """A Max Heap Implementation + >>> unsorted = [103, 9, 1, 7, 11, 15, 25, 201, 209, 107, 5] >>> h = Heap() - >>> h.build_heap(unsorted) - >>> h.display() + >>> h.build_max_heap(unsorted) + >>> print(h) [209, 201, 25, 103, 107, 15, 1, 9, 7, 11, 5] >>> - >>> h.get_max() + >>> h.extract_max() 209 - >>> h.display() + >>> print(h) [201, 107, 25, 103, 11, 15, 1, 9, 7, 5] >>> >>> h.insert(100) - >>> h.display() + >>> print(h) [201, 107, 25, 103, 100, 15, 1, 9, 7, 5, 11] >>> >>> h.heap_sort() - >>> h.display() + >>> print(h) [1, 5, 7, 9, 11, 15, 25, 100, 103, 107, 201] - >>> """ - def __init__(self): - self.h = [] - self.curr_size = 0 + def __init__(self) -> None: + self.h: List[float] = [] + self.heap_size: int = 0 + + def __repr__(self) -> str: + return str(self.h) - def get_left_child_index(self, i): - left_child_index = 2 * i + 1 - if left_child_index < self.curr_size: + def parent_index(self, child_idx: int) -> Optional[int]: + """ return the parent index of given child """ + if child_idx > 0: + return (child_idx - 1) // 2 + return None + + def left_child_idx(self, parent_idx: int) -> Optional[int]: + """ + return the left child index if the left child exists. + if not, return None. + """ + left_child_index = 2 * parent_idx + 1 + if left_child_index < self.heap_size: return left_child_index return None - def get_right_child(self, i): - right_child_index = 2 * i + 2 - if right_child_index < self.curr_size: + def right_child_idx(self, parent_idx: int) -> Optional[int]: + """ + return the right child index if the right child exists. + if not, return None. + """ + right_child_index = 2 * parent_idx + 2 + if right_child_index < self.heap_size: return right_child_index return None - def max_heapify(self, index): - if index < self.curr_size: - largest = index - lc = self.get_left_child_index(index) - rc = self.get_right_child(index) - if lc is not None and self.h[lc] > self.h[largest]: - largest = lc - if rc is not None and self.h[rc] > self.h[largest]: - largest = rc - if largest != index: - self.h[largest], self.h[index] = self.h[index], self.h[largest] - self.max_heapify(largest) - - def build_heap(self, collection): - self.curr_size = len(collection) + def max_heapify(self, index: int) -> None: + """ + correct a single violation of the heap property in a subtree's root. + """ + if index < self.heap_size: + violation: int = index + left_child = self.left_child_idx(index) + right_child = self.right_child_idx(index) + # check which child is larger than its parent + if left_child is not None and self.h[left_child] > self.h[violation]: + violation = left_child + if right_child is not None and self.h[right_child] > self.h[violation]: + violation = right_child + # if violation indeed exists + if violation != index: + # swap to fix the violation + self.h[violation], self.h[index] = self.h[index], self.h[violation] + # fix the subsequent violation recursively if any + self.max_heapify(violation) + + def build_max_heap(self, collection: Iterable[float]) -> None: + """ build max heap from an unsorted array""" self.h = list(collection) - if self.curr_size <= 1: - return - for i in range(self.curr_size // 2 - 1, -1, -1): - self.max_heapify(i) - - def get_max(self): - if self.curr_size >= 2: + self.heap_size = len(self.h) + if self.heap_size > 1: + # max_heapify from right to left but exclude leaves (last level) + for i in range(self.heap_size // 2 - 1, -1, -1): + self.max_heapify(i) + + def max(self) -> float: + """ return the max in the heap """ + if self.heap_size >= 1: + return self.h[0] + else: + raise Exception("Empty heap") + + def extract_max(self) -> float: + """ get and remove max from heap """ + if self.heap_size >= 2: me = self.h[0] self.h[0] = self.h.pop(-1) - self.curr_size -= 1 + self.heap_size -= 1 self.max_heapify(0) return me - elif self.curr_size == 1: - self.curr_size -= 1 + elif self.heap_size == 1: + self.heap_size -= 1 return self.h.pop(-1) - return None - - def heap_sort(self): - size = self.curr_size + else: + raise Exception("Empty heap") + + def insert(self, value: float) -> None: + """ insert a new value into the max heap """ + self.h.append(value) + idx = (self.heap_size - 1) // 2 + self.heap_size += 1 + while idx >= 0: + self.max_heapify(idx) + idx = (idx - 1) // 2 + + def heap_sort(self) -> None: + size = self.heap_size for j in range(size - 1, 0, -1): self.h[0], self.h[j] = self.h[j], self.h[0] - self.curr_size -= 1 + self.heap_size -= 1 self.max_heapify(0) - self.curr_size = size + self.heap_size = size - def insert(self, data): - self.h.append(data) - curr = (self.curr_size - 1) // 2 - self.curr_size += 1 - while curr >= 0: - self.max_heapify(curr) - curr = (curr - 1) // 2 - def display(self): - print(self.h) +if __name__ == "__main__": + import doctest + # run doc test + doctest.testmod() -def main(): + # demo for unsorted in [ - [], [0], [2], [3, 5], @@ -110,26 +147,17 @@ def main(): [103, 9, 1, 7, 11, 15, 25, 201, 209, 107, 5], [-45, -2, -5], ]: - print("source unsorted list: %s" % unsorted) + print(f"unsorted array: {unsorted}") - h = Heap() - h.build_heap(unsorted) - print("after build heap: ", end=" ") - h.display() + heap = Heap() + heap.build_max_heap(unsorted) + print(f"after build heap: {heap}") - print("max value: %s" % h.get_max()) - print("delete max value: ", end=" ") - h.display() + print(f"max value: {heap.extract_max()}") + print(f"after max value removed: {heap}") - h.insert(100) - print("after insert new value 100: ", end=" ") - h.display() + heap.insert(100) + print(f"after new value 100 inserted: {heap}") - h.heap_sort() - print("heap sort: ", end=" ") - h.display() - print() - - -if __name__ == "__main__": - main() + heap.heap_sort() + print(f"heap-sorted array: {heap}\n") From 64d85041700157055b02b19011886de3ff745ca0 Mon Sep 17 00:00:00 2001 From: Ramandeep Singh Date: Sat, 26 Dec 2020 21:43:20 +0530 Subject: [PATCH 0058/1543] Add 2-hidden layer neural network with back propagation using sigmoid activation function (#4037) * added neural network with 2 hidden layers * Revert "added neural network with 2 hidden layers" This reverts commit fa4e2ac86eceaae018cb18c720420665b485f3b7. * added neural network with 2 hidden layers * passing pre-commit requirements * doctest completed * added return hints * added example * example added * completed doctest's * changes made as per the review * changes made * changes after review * changes * spacing * changed return type * changed dtype --- .../2_hidden_layers_neural_network.py | 295 ++++++++++++++++++ 1 file changed, 295 insertions(+) create mode 100644 neural_network/2_hidden_layers_neural_network.py diff --git a/neural_network/2_hidden_layers_neural_network.py b/neural_network/2_hidden_layers_neural_network.py new file mode 100644 index 000000000000..baa4316200d9 --- /dev/null +++ b/neural_network/2_hidden_layers_neural_network.py @@ -0,0 +1,295 @@ +""" +References: + - http://neuralnetworksanddeeplearning.com/chap2.html (Backpropagation) + - https://en.wikipedia.org/wiki/Sigmoid_function (Sigmoid activation function) + - https://en.wikipedia.org/wiki/Feedforward_neural_network (Feedforward) +""" + +import numpy + + +class TwoHiddenLayerNeuralNetwork: + def __init__(self, input_array: numpy.ndarray, output_array: numpy.ndarray) -> None: + """ + This function initializes the TwoHiddenLayerNeuralNetwork class with random + weights for every layer and initializes predicted output with zeroes. + + input_array : input values for training the neural network (i.e training data) . + output_array : expected output values of the given inputs. + """ + + # Input values provided for training the model. + self.input_array = input_array + + # Random initial weights are assigned where first argument is the + # number of nodes in previous layer and second argument is the + # number of nodes in the next layer. + + # Random initial weights are assigned. + # self.input_array.shape[1] is used to represent number of nodes in input layer. + # First hidden layer consists of 4 nodes. + self.input_layer_and_first_hidden_layer_weights = numpy.random.rand( + self.input_array.shape[1], 4 + ) + + # Random initial values for the first hidden layer. + # First hidden layer has 4 nodes. + # Second hidden layer has 3 nodes. + self.first_hidden_layer_and_second_hidden_layer_weights = numpy.random.rand( + 4, 3 + ) + + # Random initial values for the second hidden layer. + # Second hidden layer has 3 nodes. + # Output layer has 1 node. + self.second_hidden_layer_and_output_layer_weights = numpy.random.rand(3, 1) + + # Real output values provided. + self.output_array = output_array + + # Predicted output values by the neural network. + # Predicted_output array initially consists of zeroes. + self.predicted_output = numpy.zeros(output_array.shape) + + def feedforward(self) -> numpy.ndarray: + """ + The information moves in only one direction i.e. forward from the input nodes, + through the two hidden nodes and to the output nodes. + There are no cycles or loops in the network. + + Return layer_between_second_hidden_layer_and_output + (i.e the last layer of the neural network). + + >>> input_val = numpy.array(([0, 0, 0], [0, 0, 0], [0, 0, 0]), dtype=float) + >>> output_val = numpy.array(([0], [0], [0]), dtype=float) + >>> nn = TwoHiddenLayerNeuralNetwork(input_val, output_val) + >>> res = nn.feedforward() + >>> array_sum = numpy.sum(res) + >>> numpy.isnan(array_sum) + False + """ + # Layer_between_input_and_first_hidden_layer is the layer connecting the + # input nodes with the first hidden layer nodes. + self.layer_between_input_and_first_hidden_layer = sigmoid( + numpy.dot(self.input_array, self.input_layer_and_first_hidden_layer_weights) + ) + + # layer_between_first_hidden_layer_and_second_hidden_layer is the layer + # connecting the first hidden set of nodes with the second hidden set of nodes. + self.layer_between_first_hidden_layer_and_second_hidden_layer = sigmoid( + numpy.dot( + self.layer_between_input_and_first_hidden_layer, + self.first_hidden_layer_and_second_hidden_layer_weights, + ) + ) + + # layer_between_second_hidden_layer_and_output is the layer connecting + # second hidden layer with the output node. + self.layer_between_second_hidden_layer_and_output = sigmoid( + numpy.dot( + self.layer_between_first_hidden_layer_and_second_hidden_layer, + self.second_hidden_layer_and_output_layer_weights, + ) + ) + + return self.layer_between_second_hidden_layer_and_output + + def back_propagation(self) -> None: + """ + Function for fine-tuning the weights of the neural net based on the + error rate obtained in the previous epoch (i.e., iteration). + Updation is done using derivative of sogmoid activation function. + + >>> input_val = numpy.array(([0, 0, 0], [0, 0, 0], [0, 0, 0]), dtype=float) + >>> output_val = numpy.array(([0], [0], [0]), dtype=float) + >>> nn = TwoHiddenLayerNeuralNetwork(input_val, output_val) + >>> res = nn.feedforward() + >>> nn.back_propagation() + >>> updated_weights = nn.second_hidden_layer_and_output_layer_weights + >>> (res == updated_weights).all() + False + """ + + updated_second_hidden_layer_and_output_layer_weights = numpy.dot( + self.layer_between_first_hidden_layer_and_second_hidden_layer.T, + 2 + * (self.output_array - self.predicted_output) + * sigmoid_derivative(self.predicted_output), + ) + updated_first_hidden_layer_and_second_hidden_layer_weights = numpy.dot( + self.layer_between_input_and_first_hidden_layer.T, + numpy.dot( + 2 + * (self.output_array - self.predicted_output) + * sigmoid_derivative(self.predicted_output), + self.second_hidden_layer_and_output_layer_weights.T, + ) + * sigmoid_derivative( + self.layer_between_first_hidden_layer_and_second_hidden_layer + ), + ) + updated_input_layer_and_first_hidden_layer_weights = numpy.dot( + self.input_array.T, + numpy.dot( + numpy.dot( + 2 + * (self.output_array - self.predicted_output) + * sigmoid_derivative(self.predicted_output), + self.second_hidden_layer_and_output_layer_weights.T, + ) + * sigmoid_derivative( + self.layer_between_first_hidden_layer_and_second_hidden_layer + ), + self.first_hidden_layer_and_second_hidden_layer_weights.T, + ) + * sigmoid_derivative(self.layer_between_input_and_first_hidden_layer), + ) + + self.input_layer_and_first_hidden_layer_weights += ( + updated_input_layer_and_first_hidden_layer_weights + ) + self.first_hidden_layer_and_second_hidden_layer_weights += ( + updated_first_hidden_layer_and_second_hidden_layer_weights + ) + self.second_hidden_layer_and_output_layer_weights += ( + updated_second_hidden_layer_and_output_layer_weights + ) + + def train(self, output: numpy.ndarray, iterations: int, give_loss: bool) -> None: + """ + Performs the feedforwarding and back propagation process for the + given number of iterations. + Every iteration will update the weights of neural network. + + output : real output values,required for calculating loss. + iterations : number of times the weights are to be updated. + give_loss : boolean value, If True then prints loss for each iteration, + If False then nothing is printed + + >>> input_val = numpy.array(([0, 0, 0], [0, 1, 0], [0, 0, 1]), dtype=float) + >>> output_val = numpy.array(([0], [1], [1]), dtype=float) + >>> nn = TwoHiddenLayerNeuralNetwork(input_val, output_val) + >>> first_iteration_weights = nn.feedforward() + >>> nn.back_propagation() + >>> updated_weights = nn.second_hidden_layer_and_output_layer_weights + >>> (first_iteration_weights == updated_weights).all() + False + """ + for iteration in range(1, iterations + 1): + self.output = self.feedforward() + self.back_propagation() + if give_loss: + loss = numpy.mean(numpy.square(output - self.feedforward())) + print(f"Iteration {iteration} Loss: {loss}") + + def predict(self, input: numpy.ndarray) -> int: + """ + Predict's the output for the given input values using + the trained neural network. + + The output value given by the model ranges in-between 0 and 1. + The predict function returns 1 if the model value is greater + than the threshold value else returns 0, + as the real output values are in binary. + + >>> input_val = numpy.array(([0, 0, 0], [0, 1, 0], [0, 0, 1]), dtype=float) + >>> output_val = numpy.array(([0], [1], [1]), dtype=float) + >>> nn = TwoHiddenLayerNeuralNetwork(input_val, output_val) + >>> nn.train(output_val, 1000, False) + >>> nn.predict([0,1,0]) + 1 + """ + + # Input values for which the predictions are to be made. + self.array = input + + self.layer_between_input_and_first_hidden_layer = sigmoid( + numpy.dot(self.array, self.input_layer_and_first_hidden_layer_weights) + ) + + self.layer_between_first_hidden_layer_and_second_hidden_layer = sigmoid( + numpy.dot( + self.layer_between_input_and_first_hidden_layer, + self.first_hidden_layer_and_second_hidden_layer_weights, + ) + ) + + self.layer_between_second_hidden_layer_and_output = sigmoid( + numpy.dot( + self.layer_between_first_hidden_layer_and_second_hidden_layer, + self.second_hidden_layer_and_output_layer_weights, + ) + ) + + return int(self.layer_between_second_hidden_layer_and_output > 0.6) + + +def sigmoid(value: numpy.ndarray) -> numpy.ndarray: + """ + Applies sigmoid activation function. + + return normalized values + + >>> sigmoid(numpy.array(([1, 0, 2], [1, 0, 0]), dtype=numpy.float64)) + array([[0.73105858, 0.5 , 0.88079708], + [0.73105858, 0.5 , 0.5 ]]) + """ + return 1 / (1 + numpy.exp(-value)) + + +def sigmoid_derivative(value: numpy.ndarray) -> numpy.ndarray: + """ + Provides the derivative value of the sigmoid function. + + returns derivative of the sigmoid value + + >>> sigmoid_derivative(numpy.array(([1, 0, 2], [1, 0, 0]), dtype=numpy.float64)) + array([[ 0., 0., -2.], + [ 0., 0., 0.]]) + """ + return (value) * (1 - (value)) + + +def example() -> int: + """ + Example for "how to use the neural network class and use the + respected methods for the desired output". + Calls the TwoHiddenLayerNeuralNetwork class and + provides the fixed input output values to the model. + Model is trained for a fixed amount of iterations then the predict method is called. + In this example the output is divided into 2 classes i.e. binary classification, + the two classes are represented by '0' and '1'. + + >>> example() + 1 + """ + # Input values. + input = numpy.array( + ( + [0, 0, 0], + [0, 0, 1], + [0, 1, 0], + [0, 1, 1], + [1, 0, 0], + [1, 0, 1], + [1, 1, 0], + [1, 1, 1], + ), + dtype=numpy.float64, + ) + + # True output values for the given input values. + output = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]), dtype=numpy.float64) + + # Calling neural network class. + neural_network = TwoHiddenLayerNeuralNetwork(input_array=input, output_array=output) + + # Calling training function. + # Set give_loss to True if you want to see loss in every iteration. + neural_network.train(output=output, iterations=10, give_loss=False) + + return neural_network.predict(numpy.array(([1, 1, 1]), dtype=numpy.float64)) + + +if __name__ == "__main__": + example() From 00e279ea44d34f8daf363dbdf1d5bee72a8da4c3 Mon Sep 17 00:00:00 2001 From: shan7030 <42472191+shan7030@users.noreply.github.com> Date: Mon, 28 Dec 2020 09:34:40 +0530 Subject: [PATCH 0059/1543] [mypy] Add/fix type annotations for scheduling algorithms (#4074) * Fix mypy errors for scheduling/first_come_first_served * Fix mypy errors for scheduling/round_robin.py * Fix mypy errors for scheduling/shortest_job_first.py * Fix isort errors --- scheduling/first_come_first_served.py | 12 ++++++------ scheduling/round_robin.py | 9 ++++----- scheduling/shortest_job_first.py | 16 ++++++++-------- 3 files changed, 18 insertions(+), 19 deletions(-) diff --git a/scheduling/first_come_first_served.py b/scheduling/first_come_first_served.py index c5f61720f97e..b51fc9fe0c04 100644 --- a/scheduling/first_come_first_served.py +++ b/scheduling/first_come_first_served.py @@ -2,10 +2,10 @@ # In this Algorithm we just care about the order that the processes arrived # without carring about their duration time # https://en.wikipedia.org/wiki/Scheduling_(computing)#First_come,_first_served -from __future__ import annotations +from typing import List -def calculate_waiting_times(duration_times: list[int]) -> list[int]: +def calculate_waiting_times(duration_times: List[int]) -> List[int]: """ This function calculates the waiting time of some processes that have a specified duration time. @@ -24,8 +24,8 @@ def calculate_waiting_times(duration_times: list[int]) -> list[int]: def calculate_turnaround_times( - duration_times: list[int], waiting_times: list[int] -) -> list[int]: + duration_times: List[int], waiting_times: List[int] +) -> List[int]: """ This function calculates the turnaround time of some processes. Return: The time difference between the completion time and the @@ -44,7 +44,7 @@ def calculate_turnaround_times( ] -def calculate_average_turnaround_time(turnaround_times: list[int]) -> float: +def calculate_average_turnaround_time(turnaround_times: List[int]) -> float: """ This function calculates the average of the turnaround times Return: The average of the turnaround times. @@ -58,7 +58,7 @@ def calculate_average_turnaround_time(turnaround_times: list[int]) -> float: return sum(turnaround_times) / len(turnaround_times) -def calculate_average_waiting_time(waiting_times: list[int]) -> float: +def calculate_average_waiting_time(waiting_times: List[int]) -> float: """ This function calculates the average of the waiting times Return: The average of the waiting times. diff --git a/scheduling/round_robin.py b/scheduling/round_robin.py index e8d54dd9a553..4a79301c1816 100644 --- a/scheduling/round_robin.py +++ b/scheduling/round_robin.py @@ -3,12 +3,11 @@ In Round Robin each process is assigned a fixed time slot in a cyclic way. https://en.wikipedia.org/wiki/Round-robin_scheduling """ -from __future__ import annotations - from statistics import mean +from typing import List -def calculate_waiting_times(burst_times: list[int]) -> list[int]: +def calculate_waiting_times(burst_times: List[int]) -> List[int]: """ Calculate the waiting times of a list of processes that have a specified duration. @@ -41,8 +40,8 @@ def calculate_waiting_times(burst_times: list[int]) -> list[int]: def calculate_turn_around_times( - burst_times: list[int], waiting_times: list[int] -) -> list[int]: + burst_times: List[int], waiting_times: List[int] +) -> List[int]: """ >>> calculate_turn_around_times([1, 2, 3, 4], [0, 1, 3]) [1, 3, 6] diff --git a/scheduling/shortest_job_first.py b/scheduling/shortest_job_first.py index f9e2ad975627..a49d037d6a23 100644 --- a/scheduling/shortest_job_first.py +++ b/scheduling/shortest_job_first.py @@ -3,17 +3,17 @@ Please note arrival time and burst Please use spaces to separate times entered. """ -from __future__ import annotations +from typing import List import pandas as pd def calculate_waitingtime( - arrival_time: list[int], burst_time: list[int], no_of_processes: int -) -> list[int]: + arrival_time: List[int], burst_time: List[int], no_of_processes: int +) -> List[int]: """ Calculate the waiting time of each processes - Return: list of waiting times. + Return: List of waiting times. >>> calculate_waitingtime([1,2,3,4],[3,3,5,1],4) [0, 3, 5, 0] >>> calculate_waitingtime([1,2,3],[2,5,1],3) @@ -72,8 +72,8 @@ def calculate_waitingtime( def calculate_turnaroundtime( - burst_time: list[int], no_of_processes: int, waiting_time: list[int] -) -> list[int]: + burst_time: List[int], no_of_processes: int, waiting_time: List[int] +) -> List[int]: """ Calculate the turn around time of each Processes Return: list of turn around times. @@ -91,8 +91,8 @@ def calculate_turnaroundtime( def calculate_average_times( - waiting_time: list[int], turn_around_time: list[int], no_of_processes: int -): + waiting_time: List[int], turn_around_time: List[int], no_of_processes: int +) -> None: """ This function calculates the average of the waiting & turnaround times Prints: Average Waiting time & Average Turn Around Time From 80f5213df5726c9268b6e3771ae6aaf1b6e3bc82 Mon Sep 17 00:00:00 2001 From: fpringle Date: Mon, 28 Dec 2020 08:51:02 +0100 Subject: [PATCH 0060/1543] Add solution for Project Euler problem 107 (#4066) * Added solution for Project Euler problem 107 * Doctests and better variable names * Type hints * Small edits * Forward reference for typing hint * updating DIRECTORY.md Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- DIRECTORY.md | 3 + project_euler/problem_107/__init__.py | 0 project_euler/problem_107/p107_network.txt | 40 +++++++ project_euler/problem_107/sol1.py | 128 +++++++++++++++++++++ project_euler/problem_107/test_network.txt | 7 ++ 5 files changed, 178 insertions(+) create mode 100644 project_euler/problem_107/__init__.py create mode 100644 project_euler/problem_107/p107_network.txt create mode 100644 project_euler/problem_107/sol1.py create mode 100644 project_euler/problem_107/test_network.txt diff --git a/DIRECTORY.md b/DIRECTORY.md index d73ae11eb7c2..4f17cf9c03ed 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -499,6 +499,7 @@ * [Minimum Cut](https://github.com/TheAlgorithms/Python/blob/master/networking_flow/minimum_cut.py) ## Neural Network + * [2 Hidden Layers Neural Network](https://github.com/TheAlgorithms/Python/blob/master/neural_network/2_hidden_layers_neural_network.py) * [Back Propagation Neural Network](https://github.com/TheAlgorithms/Python/blob/master/neural_network/back_propagation_neural_network.py) * [Convolution Neural Network](https://github.com/TheAlgorithms/Python/blob/master/neural_network/convolution_neural_network.py) * [Perceptron](https://github.com/TheAlgorithms/Python/blob/master/neural_network/perceptron.py) @@ -748,6 +749,8 @@ * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_101/sol1.py) * Problem 102 * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_102/sol1.py) + * Problem 107 + * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_107/sol1.py) * Problem 112 * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_112/sol1.py) * Problem 113 diff --git a/project_euler/problem_107/__init__.py b/project_euler/problem_107/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/project_euler/problem_107/p107_network.txt b/project_euler/problem_107/p107_network.txt new file mode 100644 index 000000000000..fcc3c4192b96 --- /dev/null +++ b/project_euler/problem_107/p107_network.txt @@ -0,0 +1,40 @@ +-,-,-,427,668,495,377,678,-,177,-,-,870,-,869,624,300,609,131,-,251,-,-,-,856,221,514,-,591,762,182,56,-,884,412,273,636,-,-,774 +-,-,262,-,-,508,472,799,-,956,578,363,940,143,-,162,122,910,-,729,802,941,922,573,531,539,667,607,-,920,-,-,315,649,937,-,185,102,636,289 +-,262,-,-,926,-,958,158,647,47,621,264,81,-,402,813,649,386,252,391,264,637,349,-,-,-,108,-,727,225,578,699,-,898,294,-,575,168,432,833 +427,-,-,-,366,-,-,635,-,32,962,468,893,854,718,427,448,916,258,-,760,909,529,311,404,-,-,588,680,875,-,615,-,409,758,221,-,-,76,257 +668,-,926,366,-,-,-,250,268,-,503,944,-,677,-,727,793,457,981,191,-,-,-,351,969,925,987,328,282,589,-,873,477,-,-,19,450,-,-,- +495,508,-,-,-,-,-,765,711,819,305,302,926,-,-,582,-,861,-,683,293,-,-,66,-,27,-,-,290,-,786,-,554,817,33,-,54,506,386,381 +377,472,958,-,-,-,-,-,-,120,42,-,134,219,457,639,538,374,-,-,-,966,-,-,-,-,-,449,120,797,358,232,550,-,305,997,662,744,686,239 +678,799,158,635,250,765,-,-,-,35,-,106,385,652,160,-,890,812,605,953,-,-,-,79,-,712,613,312,452,-,978,900,-,901,-,-,225,533,770,722 +-,-,647,-,268,711,-,-,-,283,-,172,-,663,236,36,403,286,986,-,-,810,761,574,53,793,-,-,777,330,936,883,286,-,174,-,-,-,828,711 +177,956,47,32,-,819,120,35,283,-,50,-,565,36,767,684,344,489,565,-,-,103,810,463,733,665,494,644,863,25,385,-,342,470,-,-,-,730,582,468 +-,578,621,962,503,305,42,-,-,50,-,155,519,-,-,256,990,801,154,53,474,650,402,-,-,-,966,-,-,406,989,772,932,7,-,823,391,-,-,933 +-,363,264,468,944,302,-,106,172,-,155,-,-,-,380,438,-,41,266,-,-,104,867,609,-,270,861,-,-,165,-,675,250,686,995,366,191,-,433,- +870,940,81,893,-,926,134,385,-,565,519,-,-,313,851,-,-,-,248,220,-,826,359,829,-,234,198,145,409,68,359,-,814,218,186,-,-,929,203,- +-,143,-,854,677,-,219,652,663,36,-,-,313,-,132,-,433,598,-,-,168,870,-,-,-,128,437,-,383,364,966,227,-,-,807,993,-,-,526,17 +869,-,402,718,-,-,457,160,236,767,-,380,851,132,-,-,596,903,613,730,-,261,-,142,379,885,89,-,848,258,112,-,900,-,-,818,639,268,600,- +624,162,813,427,727,582,639,-,36,684,256,438,-,-,-,-,539,379,664,561,542,-,999,585,-,-,321,398,-,-,950,68,193,-,697,-,390,588,848,- +300,122,649,448,793,-,538,890,403,344,990,-,-,433,596,539,-,-,73,-,318,-,-,500,-,968,-,291,-,-,765,196,504,757,-,542,-,395,227,148 +609,910,386,916,457,861,374,812,286,489,801,41,-,598,903,379,-,-,-,946,136,399,-,941,707,156,757,258,251,-,807,-,-,-,461,501,-,-,616,- +131,-,252,258,981,-,-,605,986,565,154,266,248,-,613,664,73,-,-,686,-,-,575,627,817,282,-,698,398,222,-,649,-,-,-,-,-,654,-,- +-,729,391,-,191,683,-,953,-,-,53,-,220,-,730,561,-,946,686,-,-,389,729,553,304,703,455,857,260,-,991,182,351,477,867,-,-,889,217,853 +251,802,264,760,-,293,-,-,-,-,474,-,-,168,-,542,318,136,-,-,-,-,392,-,-,-,267,407,27,651,80,927,-,974,977,-,-,457,117,- +-,941,637,909,-,-,966,-,810,103,650,104,826,870,261,-,-,399,-,389,-,-,-,202,-,-,-,-,867,140,403,962,785,-,511,-,1,-,707,- +-,922,349,529,-,-,-,-,761,810,402,867,359,-,-,999,-,-,575,729,392,-,-,388,939,-,959,-,83,463,361,-,-,512,931,-,224,690,369,- +-,573,-,311,351,66,-,79,574,463,-,609,829,-,142,585,500,941,627,553,-,202,388,-,164,829,-,620,523,639,936,-,-,490,-,695,-,505,109,- +856,531,-,404,969,-,-,-,53,733,-,-,-,-,379,-,-,707,817,304,-,-,939,164,-,-,616,716,728,-,889,349,-,963,150,447,-,292,586,264 +221,539,-,-,925,27,-,712,793,665,-,270,234,128,885,-,968,156,282,703,-,-,-,829,-,-,-,822,-,-,-,736,576,-,697,946,443,-,205,194 +514,667,108,-,987,-,-,613,-,494,966,861,198,437,89,321,-,757,-,455,267,-,959,-,616,-,-,-,349,156,339,-,102,790,359,-,439,938,809,260 +-,607,-,588,328,-,449,312,-,644,-,-,145,-,-,398,291,258,698,857,407,-,-,620,716,822,-,-,293,486,943,-,779,-,6,880,116,775,-,947 +591,-,727,680,282,290,120,452,777,863,-,-,409,383,848,-,-,251,398,260,27,867,83,523,728,-,349,293,-,212,684,505,341,384,9,992,507,48,-,- +762,920,225,875,589,-,797,-,330,25,406,165,68,364,258,-,-,-,222,-,651,140,463,639,-,-,156,486,212,-,-,349,723,-,-,186,-,36,240,752 +182,-,578,-,-,786,358,978,936,385,989,-,359,966,112,950,765,807,-,991,80,403,361,936,889,-,339,943,684,-,-,965,302,676,725,-,327,134,-,147 +56,-,699,615,873,-,232,900,883,-,772,675,-,227,-,68,196,-,649,182,927,962,-,-,349,736,-,-,505,349,965,-,474,178,833,-,-,555,853,- +-,315,-,-,477,554,550,-,286,342,932,250,814,-,900,193,504,-,-,351,-,785,-,-,-,576,102,779,341,723,302,474,-,689,-,-,-,451,-,- +884,649,898,409,-,817,-,901,-,470,7,686,218,-,-,-,757,-,-,477,974,-,512,490,963,-,790,-,384,-,676,178,689,-,245,596,445,-,-,343 +412,937,294,758,-,33,305,-,174,-,-,995,186,807,-,697,-,461,-,867,977,511,931,-,150,697,359,6,9,-,725,833,-,245,-,949,-,270,-,112 +273,-,-,221,19,-,997,-,-,-,823,366,-,993,818,-,542,501,-,-,-,-,-,695,447,946,-,880,992,186,-,-,-,596,949,-,91,-,768,273 +636,185,575,-,450,54,662,225,-,-,391,191,-,-,639,390,-,-,-,-,-,1,224,-,-,443,439,116,507,-,327,-,-,445,-,91,-,248,-,344 +-,102,168,-,-,506,744,533,-,730,-,-,929,-,268,588,395,-,654,889,457,-,690,505,292,-,938,775,48,36,134,555,451,-,270,-,248,-,371,680 +-,636,432,76,-,386,686,770,828,582,-,433,203,526,600,848,227,616,-,217,117,707,369,109,586,205,809,-,-,240,-,853,-,-,-,768,-,371,-,540 +774,289,833,257,-,381,239,722,711,468,933,-,-,17,-,-,148,-,-,853,-,-,-,-,264,194,260,947,-,752,147,-,-,343,112,273,344,680,540,- diff --git a/project_euler/problem_107/sol1.py b/project_euler/problem_107/sol1.py new file mode 100644 index 000000000000..80a10e499f76 --- /dev/null +++ b/project_euler/problem_107/sol1.py @@ -0,0 +1,128 @@ +""" +The following undirected network consists of seven vertices and twelve edges +with a total weight of 243. + +The same network can be represented by the matrix below. + + A B C D E F G +A - 16 12 21 - - - +B 16 - - 17 20 - - +C 12 - - 28 - 31 - +D 21 17 28 - 18 19 23 +E - 20 - 18 - - 11 +F - - 31 19 - - 27 +G - - - 23 11 27 - + +However, it is possible to optimise the network by removing some edges and still +ensure that all points on the network remain connected. The network which achieves +the maximum saving is shown below. It has a weight of 93, representing a saving of +243 - 93 = 150 from the original network. + +Using network.txt (right click and 'Save Link/Target As...'), a 6K text file +containing a network with forty vertices, and given in matrix form, find the maximum +saving which can be achieved by removing redundant edges whilst ensuring that the +network remains connected. + +Solution: + We use Prim's algorithm to find a Minimum Spanning Tree. + Reference: https://en.wikipedia.org/wiki/Prim%27s_algorithm +""" + +import os +from typing import Dict, List, Mapping, Set, Tuple + +EdgeT = Tuple[int, int] + + +class Graph: + """ + A class representing an undirected weighted graph. + """ + + def __init__(self, vertices: Set[int], edges: Mapping[EdgeT, int]) -> None: + self.vertices: Set[int] = vertices + self.edges: Dict[EdgeT, int] = { + (min(edge), max(edge)): weight for edge, weight in edges.items() + } + + def add_edge(self, edge: EdgeT, weight: int) -> None: + """ + Add a new edge to the graph. + >>> graph = Graph({1, 2}, {(2, 1): 4}) + >>> graph.add_edge((3, 1), 5) + >>> sorted(graph.vertices) + [1, 2, 3] + >>> sorted([(v,k) for k,v in graph.edges.items()]) + [(4, (1, 2)), (5, (1, 3))] + """ + self.vertices.add(edge[0]) + self.vertices.add(edge[1]) + self.edges[(min(edge), max(edge))] = weight + + def prims_algorithm(self) -> "Graph": + """ + Run Prim's algorithm to find the minimum spanning tree. + Reference: https://en.wikipedia.org/wiki/Prim%27s_algorithm + >>> graph = Graph({1,2,3,4},{(1,2):5, (1,3):10, (1,4):20, (2,4):30, (3,4):1}) + >>> mst = graph.prims_algorithm() + >>> sorted(mst.vertices) + [1, 2, 3, 4] + >>> sorted(mst.edges) + [(1, 2), (1, 3), (3, 4)] + """ + subgraph: Graph = Graph({min(self.vertices)}, {}) + min_edge: EdgeT + min_weight: int + edge: EdgeT + weight: int + + while len(subgraph.vertices) < len(self.vertices): + min_weight = max(self.edges.values()) + 1 + for edge, weight in self.edges.items(): + if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices): + if weight < min_weight: + min_edge = edge + min_weight = weight + + subgraph.add_edge(min_edge, min_weight) + + return subgraph + + +def solution(filename: str = "p107_network.txt") -> int: + """ + Find the maximum saving which can be achieved by removing redundant edges + whilst ensuring that the network remains connected. + >>> solution("test_network.txt") + 150 + """ + script_dir: str = os.path.abspath(os.path.dirname(__file__)) + network_file: str = os.path.join(script_dir, filename) + adjacency_matrix: List[List[str]] + edges: Dict[EdgeT, int] = dict() + data: List[str] + edge1: int + edge2: int + + with open(network_file, "r") as f: + data = f.read().strip().split("\n") + + adjaceny_matrix = [line.split(",") for line in data] + + for edge1 in range(1, len(adjaceny_matrix)): + for edge2 in range(edge1): + if adjaceny_matrix[edge1][edge2] != "-": + edges[(edge2, edge1)] = int(adjaceny_matrix[edge1][edge2]) + + graph: Graph = Graph(set(range(len(adjaceny_matrix))), edges) + + subgraph: Graph = graph.prims_algorithm() + + initial_total: int = sum(graph.edges.values()) + optimal_total: int = sum(subgraph.edges.values()) + + return initial_total - optimal_total + + +if __name__ == "__main__": + print(f"{solution() = }") diff --git a/project_euler/problem_107/test_network.txt b/project_euler/problem_107/test_network.txt new file mode 100644 index 000000000000..f5f2accb5720 --- /dev/null +++ b/project_euler/problem_107/test_network.txt @@ -0,0 +1,7 @@ +-,16,12,21,-,-,- +16,-,-,17,20,-,- +12,-,-,28,-,31,- +21,17,28,-,18,19,23 +-,20,-,18,-,-,11 +-,-,31,19,-,-,27 +-,-,-,23,11,27,- From dd4b2656806e64d1c28301ded3b5c4d863de76db Mon Sep 17 00:00:00 2001 From: SiddhantJain15 Date: Mon, 28 Dec 2020 13:36:57 +0530 Subject: [PATCH 0061/1543] Add function to calculate area of triangle using Heron's formula (#4065) * Update area.py Modified area of triangle function. Added a new algorithm to calculate area when 3 sides are known * Add files via upload * Update area.py * Update area.py * Update area.py * Update area.py * Remove unnecessary whitespace Co-authored-by: Dhruv Manilawala --- maths/area.py | 81 ++++++++++++++++++++++++++++++++++----------------- 1 file changed, 55 insertions(+), 26 deletions(-) diff --git a/maths/area.py b/maths/area.py index 24216e223ebf..8689f323cc9a 100644 --- a/maths/area.py +++ b/maths/area.py @@ -1,7 +1,7 @@ """ Find the area of various geometric shapes """ -from math import pi +from math import pi, sqrt def surface_area_cube(side_length: float) -> float: @@ -26,7 +26,7 @@ def surface_area_sphere(radius: float) -> float: """ Calculate the Surface Area of a Sphere. Wikipedia reference: https://en.wikipedia.org/wiki/Sphere - :return 4 * pi * r^2 + Formula: 4 * pi * r^2 >>> surface_area_sphere(5) 314.1592653589793 @@ -44,7 +44,7 @@ def surface_area_sphere(radius: float) -> float: def area_rectangle(length: float, width: float) -> float: """ - Calculate the area of a rectangle + Calculate the area of a rectangle. >>> area_rectangle(10, 20) 200 @@ -68,7 +68,7 @@ def area_rectangle(length: float, width: float) -> float: def area_square(side_length: float) -> float: """ - Calculate the area of a square + Calculate the area of a square. >>> area_square(10) 100 @@ -84,7 +84,7 @@ def area_square(side_length: float) -> float: def area_triangle(base: float, height: float) -> float: """ - Calculate the area of a triangle + Calculate the area of a triangle given the base and height. >>> area_triangle(10, 10) 50.0 @@ -106,9 +106,42 @@ def area_triangle(base: float, height: float) -> float: return (base * height) / 2 +def area_triangle_three_sides(side1: float, side2: float, side3: float) -> float: + """ + Calculate area of triangle when the length of 3 sides are known. + + This function uses Heron's formula: https://en.wikipedia.org/wiki/Heron%27s_formula + + >>> area_triangle_three_sides(5, 12, 13) + 30.0 + >>> area_triangle_three_sides(10, 11, 12) + 51.521233486786784 + >>> area_triangle_three_sides(-1, -2, -1) + Traceback (most recent call last): + ... + ValueError: area_triangle_three_sides() only accepts non-negative values + >>> area_triangle_three_sides(1, -2, 1) + Traceback (most recent call last): + ... + ValueError: area_triangle_three_sides() only accepts non-negative values + """ + if side1 < 0 or side2 < 0 or side3 < 0: + raise ValueError("area_triangle_three_sides() only accepts non-negative values") + elif side1 + side2 < side3 or side1 + side3 < side2 or side2 + side3 < side1: + raise ValueError("Given three sides do not form a triangle") + semi_perimeter = (side1 + side2 + side3) / 2 + area = sqrt( + semi_perimeter + * (semi_perimeter - side1) + * (semi_perimeter - side2) + * (semi_perimeter - side3) + ) + return area + + def area_parallelogram(base: float, height: float) -> float: """ - Calculate the area of a parallelogram + Calculate the area of a parallelogram. >>> area_parallelogram(10, 20) 200 @@ -132,7 +165,7 @@ def area_parallelogram(base: float, height: float) -> float: def area_trapezium(base1: float, base2: float, height: float) -> float: """ - Calculate the area of a trapezium + Calculate the area of a trapezium. >>> area_trapezium(10, 20, 30) 450.0 @@ -172,7 +205,7 @@ def area_trapezium(base1: float, base2: float, height: float) -> float: def area_circle(radius: float) -> float: """ - Calculate the area of a circle + Calculate the area of a circle. >>> area_circle(20) 1256.6370614359173 @@ -188,7 +221,7 @@ def area_circle(radius: float) -> float: def area_ellipse(radius_x: float, radius_y: float) -> float: """ - Calculate the area of a ellipse + Calculate the area of a ellipse. >>> area_ellipse(10, 10) 314.1592653589793 @@ -214,7 +247,7 @@ def area_ellipse(radius_x: float, radius_y: float) -> float: def area_rhombus(diagonal_1: float, diagonal_2: float) -> float: """ - Calculate the area of a rhombus + Calculate the area of a rhombus. >>> area_rhombus(10, 20) 100.0 @@ -236,24 +269,20 @@ def area_rhombus(diagonal_1: float, diagonal_2: float) -> float: return 1 / 2 * diagonal_1 * diagonal_2 -def main(): - print("Areas of various geometric shapes: \n") - print(f"Rectangle: {area_rectangle(10, 20)}") - print(f"Square: {area_square(10)}") - print(f"Triangle: {area_triangle(10, 10)}") - print(f"Parallelogram: {area_parallelogram(10, 20)}") - print(f"Trapezium: {area_trapezium(10, 20, 30)}") - print(f"Circle: {area_circle(20)}") - print("\nSurface Areas of various geometric shapes: \n") - print(f"Cube: {surface_area_cube(20)}") - print(f"Sphere: {surface_area_sphere(20)}") - print(f"Rhombus: {area_rhombus(10, 20)}") - - if __name__ == "__main__": - import doctest doctest.testmod(verbose=True) # verbose so we can see methods missing tests - main() + print("[DEMO] Areas of various geometric shapes: \n") + print(f"Rectangle: {area_rectangle(10, 20) = }") + print(f"Square: {area_square(10) = }") + print(f"Triangle: {area_triangle(10, 10) = }") + print(f"Triangle: {area_triangle_three_sides(5, 12, 13) = }") + print(f"Parallelogram: {area_parallelogram(10, 20) = }") + print(f"Trapezium: {area_trapezium(10, 20, 30) = }") + print(f"Circle: {area_circle(20) = }") + print("\nSurface Areas of various geometric shapes: \n") + print(f"Cube: {surface_area_cube(20) = }") + print(f"Sphere: {surface_area_sphere(20) = }") + print(f"Rhombus: {area_rhombus(10, 20) = }") From 677d48d6c06234750a04e75ee0f9d5a349d78c54 Mon Sep 17 00:00:00 2001 From: Kanak <63765823+Cosmicoppai@users.noreply.github.com> Date: Mon, 11 Jan 2021 16:55:15 +0530 Subject: [PATCH 0062/1543] Rename coin_change.py to minimum_coin_change.py (#4108) --- dynamic_programming/{coin_change.py => minimum_coin_change.py} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename dynamic_programming/{coin_change.py => minimum_coin_change.py} (100%) diff --git a/dynamic_programming/coin_change.py b/dynamic_programming/minimum_coin_change.py similarity index 100% rename from dynamic_programming/coin_change.py rename to dynamic_programming/minimum_coin_change.py From 03d34350f63fea41bac77264b7380f22c4c912f2 Mon Sep 17 00:00:00 2001 From: Nwachukwu Chidiebere Godwin Date: Tue, 12 Jan 2021 14:41:48 +0100 Subject: [PATCH 0063/1543] Graph list patch (#4113) * new implementation for adjacency list graph * add example code for undirected graph * reduce length to 88 columns max to fix build errors7 * fix pre commit issues * replace print_list method with __str__ * return object in add_edge method to enable fluent syntax * improve class docstring and include doctests * add end of file line * fix pre-commit issues * remove __str__ method * trigger build * Update graph_list.py * Update graph_list.py Co-authored-by: gnc Co-authored-by: Christian Clauss --- graphs/graph_list.py | 169 ++++++++++++++++++++++++++++++++++--------- 1 file changed, 134 insertions(+), 35 deletions(-) diff --git a/graphs/graph_list.py b/graphs/graph_list.py index a812fecd961e..bab6d6893a89 100644 --- a/graphs/graph_list.py +++ b/graphs/graph_list.py @@ -1,44 +1,143 @@ -#!/usr/bin/python +#!/usr/bin/env python3 -# Author: OMKAR PATHAK +# Author: OMKAR PATHAK, Nwachukwu Chidiebere -# We can use Python's dictionary for constructing the graph. +# Use a Python dictionary to construct the graph. +from pprint import pformat -class AdjacencyList: - def __init__(self): - self.adj_list = {} - def add_edge(self, from_vertex: int, to_vertex: int) -> None: - # check if vertex is already present - if from_vertex in self.adj_list: - self.adj_list[from_vertex].append(to_vertex) - else: - self.adj_list[from_vertex] = [to_vertex] +class GraphAdjacencyList: + """ + Adjacency List type Graph Data Structure that accounts for directed and undirected + Graphs. Initialize graph object indicating whether it's directed or undirected. - def print_list(self) -> None: - for i in self.adj_list: - print((i, "->", " -> ".join([str(j) for j in self.adj_list[i]]))) + Directed graph example: + >>> d_graph = GraphAdjacencyList() + >>> d_graph + {} + >>> d_graph.add_edge(0, 1) + {0: [1], 1: []} + >>> d_graph.add_edge(1, 2).add_edge(1, 4).add_edge(1, 5) + {0: [1], 1: [2, 4, 5], 2: [], 4: [], 5: []} + >>> d_graph.add_edge(2, 0).add_edge(2, 6).add_edge(2, 7) + {0: [1], 1: [2, 4, 5], 2: [0, 6, 7], 4: [], 5: [], 6: [], 7: []} + >>> print(d_graph) + {0: [1], 1: [2, 4, 5], 2: [0, 6, 7], 4: [], 5: [], 6: [], 7: []} + >>> print(repr(d_graph)) + {0: [1], 1: [2, 4, 5], 2: [0, 6, 7], 4: [], 5: [], 6: [], 7: []} + + Undirected graph example: + >>> u_graph = GraphAdjacencyList(directed=False) + >>> u_graph.add_edge(0, 1) + {0: [1], 1: [0]} + >>> u_graph.add_edge(1, 2).add_edge(1, 4).add_edge(1, 5) + {0: [1], 1: [0, 2, 4, 5], 2: [1], 4: [1], 5: [1]} + >>> u_graph.add_edge(2, 0).add_edge(2, 6).add_edge(2, 7) + {0: [1, 2], 1: [0, 2, 4, 5], 2: [1, 0, 6, 7], 4: [1], 5: [1], 6: [2], 7: [2]} + >>> u_graph.add_edge(4, 5) + {0: [1, 2], + 1: [0, 2, 4, 5], + 2: [1, 0, 6, 7], + 4: [1, 5], + 5: [1, 4], + 6: [2], + 7: [2]} + >>> print(u_graph) + {0: [1, 2], + 1: [0, 2, 4, 5], + 2: [1, 0, 6, 7], + 4: [1, 5], + 5: [1, 4], + 6: [2], + 7: [2]} + >>> print(repr(u_graph)) + {0: [1, 2], + 1: [0, 2, 4, 5], + 2: [1, 0, 6, 7], + 4: [1, 5], + 5: [1, 4], + 6: [2], + 7: [2]} + """ + + def __init__(self, directed: bool = True): + """ + Parameters: + directed: (bool) Indicates if graph is directed or undirected. Default is True. + """ + + self.adj_list = {} # dictionary of lists + self.directed = directed + + def add_edge(self, source_vertex: int, destination_vertex: int) -> object: + """ + Connects vertices together. Creates and Edge from source vertex to destination + vertex. + Vertices will be created if not found in graph + """ + + if not self.directed: # For undirected graphs + # if both source vertex and destination vertex are both present in the + # adjacency list, add destination vertex to source vertex list of adjacent + # vertices and add source vertex to destination vertex list of adjacent + # vertices. + if source_vertex in self.adj_list and destination_vertex in self.adj_list: + self.adj_list[source_vertex].append(destination_vertex) + self.adj_list[destination_vertex].append(source_vertex) + # if only source vertex is present in adjacency list, add destination vertex + # to source vertex list of adjacent vertices, then create a new vertex with + # destination vertex as key and assign a list containing the source vertex + # as it's first adjacent vertex. + elif source_vertex in self.adj_list: + self.adj_list[source_vertex].append(destination_vertex) + self.adj_list[destination_vertex] = [source_vertex] + # if only destination vertex is present in adjacency list, add source vertex + # to destination vertex list of adjacent vertices, then create a new vertex + # with source vertex as key and assign a list containing the source vertex + # as it's first adjacent vertex. + elif destination_vertex in self.adj_list: + self.adj_list[destination_vertex].append(source_vertex) + self.adj_list[source_vertex] = [destination_vertex] + # if both source vertex and destination vertex are not present in adjacency + # list, create a new vertex with source vertex as key and assign a list + # containing the destination vertex as it's first adjacent vertex also + # create a new vertex with destination vertex as key and assign a list + # containing the source vertex as it's first adjacent vertex. + else: + self.adj_list[source_vertex] = [destination_vertex] + self.adj_list[destination_vertex] = [source_vertex] + else: # For directed graphs + # if both source vertex and destination vertex are present in adjacency + # list, add destination vertex to source vertex list of adjacent vertices. + if source_vertex in self.adj_list and destination_vertex in self.adj_list: + self.adj_list[source_vertex].append(destination_vertex) + # if only source vertex is present in adjacency list, add destination + # vertex to source vertex list of adjacent vertices and create a new vertex + # with destination vertex as key, which has no adjacent vertex + elif source_vertex in self.adj_list: + self.adj_list[source_vertex].append(destination_vertex) + self.adj_list[destination_vertex] = [] + # if only destination vertex is present in adjacency list, create a new + # vertex with source vertex as key and assign a list containing destination + # vertex as first adjacent vertex + elif destination_vertex in self.adj_list: + self.adj_list[source_vertex] = [destination_vertex] + # if both source vertex and destination vertex are not present in adjacency + # list, create a new vertex with source vertex as key and a list containing + # destination vertex as it's first adjacent vertex. Then create a new vertex + # with destination vertex as key, which has no adjacent vertex + else: + self.adj_list[source_vertex] = [destination_vertex] + self.adj_list[destination_vertex] = [] + + return self + + def __repr__(self) -> str: + return pformat(self.adj_list) if __name__ == "__main__": - al = AdjacencyList() - al.add_edge(0, 1) - al.add_edge(0, 4) - al.add_edge(4, 1) - al.add_edge(4, 3) - al.add_edge(1, 0) - al.add_edge(1, 4) - al.add_edge(1, 3) - al.add_edge(1, 2) - al.add_edge(2, 3) - al.add_edge(3, 4) - - al.print_list() - - # OUTPUT: - # 0 -> 1 -> 4 - # 1 -> 0 -> 4 -> 3 -> 2 - # 2 -> 3 - # 3 -> 4 - # 4 -> 1 -> 3 + import doctest + + doctest.testmod() From 0728cf1128486a1c67e7c7efe42628d7a5eb4c1b Mon Sep 17 00:00:00 2001 From: Gaurav Jindal <54955413+jindal2309@users.noreply.github.com> Date: Sun, 17 Jan 2021 23:38:22 -0800 Subject: [PATCH 0064/1543] Added code to merge two trees (#4121) * Added code to merge two trees * Added doctest and type hints * Added pre-commit --- .../binary_tree/merge_two_binary_trees.py | 93 +++++++++++++++++++ 1 file changed, 93 insertions(+) create mode 100644 data_structures/binary_tree/merge_two_binary_trees.py diff --git a/data_structures/binary_tree/merge_two_binary_trees.py b/data_structures/binary_tree/merge_two_binary_trees.py new file mode 100644 index 000000000000..6b202adb3cf5 --- /dev/null +++ b/data_structures/binary_tree/merge_two_binary_trees.py @@ -0,0 +1,93 @@ +#!/usr/local/bin/python3 +""" +Problem Description: Given two binary tree, return the merged tree. +The rule for merging is that if two nodes overlap, then put the value sum of +both nodes to the new value of the merged node. Otherwise, the NOT null node +will be used as the node of new tree. +""" +from typing import Optional + + +class Node: + """ + A binary node has value variable and pointers to its left and right node. + """ + + def __init__(self, value: int = 0) -> None: + self.value = value + self.left: Optional[Node] = None + self.right: Optional[Node] = None + + +def merge_two_binary_trees(tree1: Optional[Node], tree2: Optional[Node]) -> Node: + """ + Returns root node of the merged tree. + + >>> tree1 = Node(5) + >>> tree1.left = Node(6) + >>> tree1.right = Node(7) + >>> tree1.left.left = Node(2) + >>> tree2 = Node(4) + >>> tree2.left = Node(5) + >>> tree2.right = Node(8) + >>> tree2.left.right = Node(1) + >>> tree2.right.right = Node(4) + >>> merged_tree = merge_two_binary_trees(tree1, tree2) + >>> print_preorder(merged_tree) + 9 + 11 + 2 + 1 + 15 + 4 + """ + if tree1 is None: + return tree2 + if tree2 is None: + return tree1 + + tree1.value = tree1.value + tree2.value + tree1.left = merge_two_binary_trees(tree1.left, tree2.left) + tree1.right = merge_two_binary_trees(tree1.right, tree2.right) + return tree1 + + +def print_preorder(root: Optional[Node]) -> None: + """ + Print pre-order traversal of the tree. + + >>> root = Node(1) + >>> root.left = Node(2) + >>> root.right = Node(3) + >>> print_preorder(root) + 1 + 2 + 3 + >>> print_preorder(root.right) + 3 + """ + if root: + print(root.value) + print_preorder(root.left) + print_preorder(root.right) + + +if __name__ == "__main__": + tree1 = Node(1) + tree1.left = Node(2) + tree1.right = Node(3) + tree1.left.left = Node(4) + + tree2 = Node(2) + tree2.left = Node(4) + tree2.right = Node(6) + tree2.left.right = Node(9) + tree2.right.right = Node(5) + + print("Tree1 is: ") + print_preorder(tree1) + print("Tree2 is: ") + print_preorder(tree2) + merged_tree = merge_two_binary_trees(tree1, tree2) + print("Merged Tree is: ") + print_preorder(merged_tree) From 7d26ba707559842a8caebe7fb6e68011df593c84 Mon Sep 17 00:00:00 2001 From: Tapajyoti Bose <44058757+ruppysuppy@users.noreply.github.com> Date: Thu, 21 Jan 2021 08:30:47 +0530 Subject: [PATCH 0065/1543] Added diffie-hellman algorithm (#4128) * updating DIRECTORY.md * feat: added diffie-hellman key exchange algorithm * fix: enforce maxline length = 88 * fix: fixed import order * fix: used flake to correct styling * updating DIRECTORY.md Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- DIRECTORY.md | 3 +- ciphers/diffie_hellman.py | 271 ++++++++++++++++++++++++++++++++++++++ 2 files changed, 273 insertions(+), 1 deletion(-) create mode 100644 ciphers/diffie_hellman.py diff --git a/DIRECTORY.md b/DIRECTORY.md index 4f17cf9c03ed..d487b39490ed 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -59,6 +59,7 @@ * [Decrypt Caesar With Chi Squared](https://github.com/TheAlgorithms/Python/blob/master/ciphers/decrypt_caesar_with_chi_squared.py) * [Deterministic Miller Rabin](https://github.com/TheAlgorithms/Python/blob/master/ciphers/deterministic_miller_rabin.py) * [Diffie](https://github.com/TheAlgorithms/Python/blob/master/ciphers/diffie.py) + * [Diffie Hellman](https://github.com/TheAlgorithms/Python/blob/master/ciphers/diffie_hellman.py) * [Elgamal Key Generator](https://github.com/TheAlgorithms/Python/blob/master/ciphers/elgamal_key_generator.py) * [Enigma Machine2](https://github.com/TheAlgorithms/Python/blob/master/ciphers/enigma_machine2.py) * [Hill Cipher](https://github.com/TheAlgorithms/Python/blob/master/ciphers/hill_cipher.py) @@ -224,7 +225,6 @@ * [Abbreviation](https://github.com/TheAlgorithms/Python/blob/master/dynamic_programming/abbreviation.py) * [Bitmask](https://github.com/TheAlgorithms/Python/blob/master/dynamic_programming/bitmask.py) * [Climbing Stairs](https://github.com/TheAlgorithms/Python/blob/master/dynamic_programming/climbing_stairs.py) - * [Coin Change](https://github.com/TheAlgorithms/Python/blob/master/dynamic_programming/coin_change.py) * [Edit Distance](https://github.com/TheAlgorithms/Python/blob/master/dynamic_programming/edit_distance.py) * [Factorial](https://github.com/TheAlgorithms/Python/blob/master/dynamic_programming/factorial.py) * [Fast Fibonacci](https://github.com/TheAlgorithms/Python/blob/master/dynamic_programming/fast_fibonacci.py) @@ -243,6 +243,7 @@ * [Max Non Adjacent Sum](https://github.com/TheAlgorithms/Python/blob/master/dynamic_programming/max_non_adjacent_sum.py) * [Max Sub Array](https://github.com/TheAlgorithms/Python/blob/master/dynamic_programming/max_sub_array.py) * [Max Sum Contiguous Subsequence](https://github.com/TheAlgorithms/Python/blob/master/dynamic_programming/max_sum_contiguous_subsequence.py) + * [Minimum Coin Change](https://github.com/TheAlgorithms/Python/blob/master/dynamic_programming/minimum_coin_change.py) * [Minimum Cost Path](https://github.com/TheAlgorithms/Python/blob/master/dynamic_programming/minimum_cost_path.py) * [Minimum Partition](https://github.com/TheAlgorithms/Python/blob/master/dynamic_programming/minimum_partition.py) * [Minimum Steps To One](https://github.com/TheAlgorithms/Python/blob/master/dynamic_programming/minimum_steps_to_one.py) diff --git a/ciphers/diffie_hellman.py b/ciphers/diffie_hellman.py new file mode 100644 index 000000000000..ea35b67b483e --- /dev/null +++ b/ciphers/diffie_hellman.py @@ -0,0 +1,271 @@ +from binascii import hexlify +from hashlib import sha256 +from os import urandom + +# RFC 3526 - More Modular Exponential (MODP) Diffie-Hellman groups for +# Internet Key Exchange (IKE) https://tools.ietf.org/html/rfc3526 + +primes = { + # 1536-bit + 5: { + "prime": int( + "FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1" + + "29024E088A67CC74020BBEA63B139B22514A08798E3404DD" + + "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245" + + "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED" + + "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D" + + "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F" + + "83655D23DCA3AD961C62F356208552BB9ED529077096966D" + + "670C354E4ABC9804F1746C08CA237327FFFFFFFFFFFFFFFF", + base=16, + ), + "generator": 2, + }, + # 2048-bit + 14: { + "prime": int( + "FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1" + + "29024E088A67CC74020BBEA63B139B22514A08798E3404DD" + + "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245" + + "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED" + + "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D" + + "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F" + + "83655D23DCA3AD961C62F356208552BB9ED529077096966D" + + "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B" + + "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9" + + "DE2BCBF6955817183995497CEA956AE515D2261898FA0510" + + "15728E5A8AACAA68FFFFFFFFFFFFFFFF", + base=16, + ), + "generator": 2, + }, + # 3072-bit + 15: { + "prime": int( + "FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1" + + "29024E088A67CC74020BBEA63B139B22514A08798E3404DD" + + "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245" + + "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED" + + "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D" + + "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F" + + "83655D23DCA3AD961C62F356208552BB9ED529077096966D" + + "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B" + + "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9" + + "DE2BCBF6955817183995497CEA956AE515D2261898FA0510" + + "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64" + + "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7" + + "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B" + + "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C" + + "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31" + + "43DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF", + base=16, + ), + "generator": 2, + }, + # 4096-bit + 16: { + "prime": int( + "FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1" + + "29024E088A67CC74020BBEA63B139B22514A08798E3404DD" + + "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245" + + "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED" + + "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D" + + "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F" + + "83655D23DCA3AD961C62F356208552BB9ED529077096966D" + + "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B" + + "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9" + + "DE2BCBF6955817183995497CEA956AE515D2261898FA0510" + + "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64" + + "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7" + + "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B" + + "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C" + + "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31" + + "43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7" + + "88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA" + + "2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6" + + "287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED" + + "1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9" + + "93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199" + + "FFFFFFFFFFFFFFFF", + base=16, + ), + "generator": 2, + }, + # 6144-bit + 17: { + "prime": int( + "FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E08" + + "8A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B" + + "302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9" + + "A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE6" + + "49286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8" + + "FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D" + + "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C" + + "180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF695581718" + + "3995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D" + + "04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7D" + + "B3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D226" + + "1AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200C" + + "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFC" + + "E0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B26" + + "99C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB" + + "04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2" + + "233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127" + + "D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492" + + "36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406" + + "AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918" + + "DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B33205151" + + "2BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03" + + "F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97F" + + "BEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA" + + "CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58B" + + "B7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632" + + "387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E" + + "6DCC4024FFFFFFFFFFFFFFFF", + base=16, + ), + "generator": 2, + }, + # 8192-bit + 18: { + "prime": int( + "FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1" + + "29024E088A67CC74020BBEA63B139B22514A08798E3404DD" + + "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245" + + "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED" + + "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D" + + "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F" + + "83655D23DCA3AD961C62F356208552BB9ED529077096966D" + + "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B" + + "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9" + + "DE2BCBF6955817183995497CEA956AE515D2261898FA0510" + + "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64" + + "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7" + + "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B" + + "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C" + + "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31" + + "43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7" + + "88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA" + + "2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6" + + "287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED" + + "1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9" + + "93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492" + + "36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BD" + + "F8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831" + + "179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1B" + + "DB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF" + + "5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6" + + "D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F3" + + "23A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA" + + "CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE328" + + "06A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55C" + + "DA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE" + + "12BF2D5B0B7474D6E694F91E6DBE115974A3926F12FEE5E4" + + "38777CB6A932DF8CD8BEC4D073B931BA3BC832B68D9DD300" + + "741FA7BF8AFC47ED2576F6936BA424663AAB639C5AE4F568" + + "3423B4742BF1C978238F16CBE39D652DE3FDB8BEFC848AD9" + + "22222E04A4037C0713EB57A81A23F0C73473FC646CEA306B" + + "4BCBC8862F8385DDFA9D4B7FA2C087E879683303ED5BDD3A" + + "062B3CF5B3A278A66D2A13F83F44F82DDF310EE074AB6A36" + + "4597E899A0255DC164F31CC50846851DF9AB48195DED7EA1" + + "B1D510BD7EE74D73FAF36BC31ECFA268359046F4EB879F92" + + "4009438B481C6CD7889A002ED5EE382BC9190DA6FC026E47" + + "9558E4475677E9AA9E3050E2765694DFC81F56E880B96E71" + + "60C980DD98EDD3DFFFFFFFFFFFFFFFFF", + base=16, + ), + "generator": 2, + }, +} + + +class DiffieHellman: + """ + Class to represent the Diffie-Hellman key exchange protocol + + + >>> alice = DiffieHellman() + >>> bob = DiffieHellman() + + >>> alice_private = alice.get_private_key() + >>> alice_public = alice.generate_public_key() + + >>> bob_private = bob.get_private_key() + >>> bob_public = bob.generate_public_key() + + >>> # generating shared key using the DH object + >>> alice_shared = alice.generate_shared_key(bob_public) + >>> bob_shared = bob.generate_shared_key(alice_public) + + >>> assert alice_shared == bob_shared + + >>> # generating shared key using static methods + >>> alice_shared = DiffieHellman.generate_shared_key_static( + ... alice_private, bob_public + ... ) + >>> bob_shared = DiffieHellman.generate_shared_key_static( + ... bob_private, alice_public + ... ) + + >>> assert alice_shared == bob_shared + """ + + # Current minimum recommendation is 2048 bit (group 14) + def __init__(self, group: int = 14) -> None: + if group not in primes: + raise ValueError("Unsupported Group") + self.prime = primes[group]["prime"] + self.generator = primes[group]["generator"] + + self.__private_key = int(hexlify(urandom(32)), base=16) + + def get_private_key(self) -> str: + return hex(self.__private_key)[2:] + + def generate_public_key(self) -> str: + public_key = pow(self.generator, self.__private_key, self.prime) + return hex(public_key)[2:] + + def is_valid_public_key(self, key: int) -> bool: + # check if the other public key is valid based on NIST SP800-56 + if 2 <= key and key <= self.prime - 2: + if pow(key, (self.prime - 1) // 2, self.prime) == 1: + return True + return False + + def generate_shared_key(self, other_key_str: str) -> str: + other_key = int(other_key_str, base=16) + if not self.is_valid_public_key(other_key): + raise ValueError("Invalid public key") + shared_key = pow(other_key, self.__private_key, self.prime) + return sha256(str(shared_key).encode()).hexdigest() + + @staticmethod + def is_valid_public_key_static( + local_private_key_str: str, remote_public_key_str: str, prime: int + ) -> bool: + # check if the other public key is valid based on NIST SP800-56 + if 2 <= remote_public_key_str and remote_public_key_str <= prime - 2: + if pow(remote_public_key_str, (prime - 1) // 2, prime) == 1: + return True + return False + + @staticmethod + def generate_shared_key_static( + local_private_key_str: str, remote_public_key_str: str, group: int = 14 + ) -> str: + local_private_key = int(local_private_key_str, base=16) + remote_public_key = int(remote_public_key_str, base=16) + prime = primes[group]["prime"] + if not DiffieHellman.is_valid_public_key_static( + local_private_key, remote_public_key, prime + ): + raise ValueError("Invalid public key") + shared_key = pow(remote_public_key, local_private_key, prime) + return sha256(str(shared_key).encode()).hexdigest() + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From d594f4556cdc0c964c76daf5d9c891c32c5823bf Mon Sep 17 00:00:00 2001 From: Steve Kim <54872857+SteveKimSR@users.noreply.github.com> Date: Fri, 22 Jan 2021 13:40:21 +0900 Subject: [PATCH 0066/1543] [mypy] Add/fix type annotations for similarity search in machine learning (#4088) * [mypy] Add/fix type annotations for similarity search in machine learning * fix annotation * fix annotation (Union) * isort --- machine_learning/similarity_search.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/machine_learning/similarity_search.py b/machine_learning/similarity_search.py index 6bfb12ed88cb..af845c9109b1 100644 --- a/machine_learning/similarity_search.py +++ b/machine_learning/similarity_search.py @@ -8,6 +8,7 @@ 2. distance between the vector and the nearest vector (float) """ import math +from typing import List, Union import numpy as np @@ -30,7 +31,9 @@ def euclidean(input_a: np.ndarray, input_b: np.ndarray) -> float: return math.sqrt(sum(pow(a - b, 2) for a, b in zip(input_a, input_b))) -def similarity_search(dataset: np.ndarray, value_array: np.ndarray) -> list: +def similarity_search( + dataset: np.ndarray, value_array: np.ndarray +) -> List[List[Union[List[float], float]]]: """ :param dataset: Set containing the vectors. Should be ndarray. :param value_array: vector/vectors we want to know the nearest vector from dataset. From 3f1e376bbc41fc3855bb9562e3e7797342304035 Mon Sep 17 00:00:00 2001 From: ayushbisht2001 <61404154+ayushbisht2001@users.noreply.github.com> Date: Wed, 27 Jan 2021 15:54:57 +0530 Subject: [PATCH 0067/1543] add reverse_bits.py (#4120) * add reverse_bits.py * check * Delete binary_xor_operator_new.py * Fix All the errors Co-authored-by: xcodz-dot --- bit_manipulation/reverse_bits.py | 85 ++++++++++++++++++++++++++++++++ 1 file changed, 85 insertions(+) create mode 100644 bit_manipulation/reverse_bits.py diff --git a/bit_manipulation/reverse_bits.py b/bit_manipulation/reverse_bits.py new file mode 100644 index 000000000000..55608ae12908 --- /dev/null +++ b/bit_manipulation/reverse_bits.py @@ -0,0 +1,85 @@ +def get_reverse_bit_string(number: int) -> str: + """ + return the bit string of an integer + + >>> get_reverse_bit_string(9) + '10010000000000000000000000000000' + >>> get_reverse_bit_string(43) + '11010100000000000000000000000000' + >>> get_reverse_bit_string(2873) + '10011100110100000000000000000000' + >>> get_reverse_bit_string("this is not a number") + Traceback (most recent call last): + ... + TypeError: operation can not be conducted on a object of type str + """ + if not isinstance(number, int): + raise TypeError( + "operation can not be conducted on a object of type " + f"{type(number).__name__}" + ) + bit_string = "" + for _ in range(0, 32): + bit_string += str(number % 2) + number = number >> 1 + return bit_string + + +def reverse_bit(number: int) -> str: + """ + Take in an 32 bit integer, reverse its bits, + return a string of reverse bits + + result of a reverse_bit and operation on the integer provided. + + >>> reverse_bit(25) + '00000000000000000000000000011001' + >>> reverse_bit(37) + '00000000000000000000000000100101' + >>> reverse_bit(21) + '00000000000000000000000000010101' + >>> reverse_bit(58) + '00000000000000000000000000111010' + >>> reverse_bit(0) + '00000000000000000000000000000000' + >>> reverse_bit(256) + '00000000000000000000000100000000' + >>> reverse_bit(-1) + Traceback (most recent call last): + ... + ValueError: the value of input must be positive + + >>> reverse_bit(1.1) + Traceback (most recent call last): + ... + TypeError: Input value must be a 'int' type + + >>> reverse_bit("0") + Traceback (most recent call last): + ... + TypeError: '<' not supported between instances of 'str' and 'int' + """ + if number < 0: + raise ValueError("the value of input must be positive") + elif isinstance(number, float): + raise TypeError("Input value must be a 'int' type") + elif isinstance(number, str): + raise TypeError("'<' not supported between instances of 'str' and 'int'") + result = 0 + # iterator over [1 to 32],since we are dealing with 32 bit integer + for _ in range(1, 33): + # left shift the bits by unity + result = result << 1 + # get the end bit + end_bit = number % 2 + # right shift the bits by unity + number = number >> 1 + # add that bit to our ans + result = result | end_bit + return get_reverse_bit_string(result) + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From 97b6ca2b19a3ef92016e586569aa514e53f01067 Mon Sep 17 00:00:00 2001 From: Ayush Raj Date: Thu, 4 Feb 2021 22:28:29 +0530 Subject: [PATCH 0068/1543] [mypy] Add/fix type annotations for boolean_algebra (#4172) * [mypy] Add/fix type annotations for boolean_algebra * [mypy] Add/fix type annotations for boolean_algebra * [mypy] Add/fix annotations for boolean_algebra --- boolean_algebra/quine_mc_cluskey.py | 19 ++++++++++++------- 1 file changed, 12 insertions(+), 7 deletions(-) diff --git a/boolean_algebra/quine_mc_cluskey.py b/boolean_algebra/quine_mc_cluskey.py index 19bac336f6c5..70cdf25a701d 100644 --- a/boolean_algebra/quine_mc_cluskey.py +++ b/boolean_algebra/quine_mc_cluskey.py @@ -1,10 +1,13 @@ +from typing import List + + def compare_string(string1: str, string2: str) -> str: """ >>> compare_string('0010','0110') '0_10' >>> compare_string('0110','1101') - -1 + 'X' """ l1 = list(string1) l2 = list(string2) @@ -14,12 +17,12 @@ def compare_string(string1: str, string2: str) -> str: count += 1 l1[i] = "_" if count > 1: - return -1 + return "X" else: return "".join(l1) -def check(binary: [str]) -> [str]: +def check(binary: List[str]) -> List[str]: """ >>> check(['0.00.01.5']) ['0.00.01.5'] @@ -31,7 +34,7 @@ def check(binary: [str]) -> [str]: for i in range(len(binary)): for j in range(i + 1, len(binary)): k = compare_string(binary[i], binary[j]) - if k != -1: + if k != "X": check1[i] = "*" check1[j] = "*" temp.append(k) @@ -43,7 +46,7 @@ def check(binary: [str]) -> [str]: binary = list(set(temp)) -def decimal_to_binary(no_of_variable: int, minterms: [float]) -> [str]: +def decimal_to_binary(no_of_variable: int, minterms: List[float]) -> List[str]: """ >>> decimal_to_binary(3,[1.5]) ['0.00.01.5'] @@ -79,7 +82,7 @@ def is_for_table(string1: str, string2: str, count: int) -> bool: return False -def selection(chart: [[int]], prime_implicants: [str]) -> [str]: +def selection(chart: List[List[int]], prime_implicants: List[str]) -> List[str]: """ >>> selection([[1]],['0.00.01.5']) ['0.00.01.5'] @@ -126,7 +129,9 @@ def selection(chart: [[int]], prime_implicants: [str]) -> [str]: chart[j][i] = 0 -def prime_implicant_chart(prime_implicants: [str], binary: [str]) -> [[int]]: +def prime_implicant_chart( + prime_implicants: List[str], binary: List[str] +) -> List[List[int]]: """ >>> prime_implicant_chart(['0.00.01.5'],['0.00.01.5']) [[1]] From 2595cf059d677c39513a9d75f1736bc5b84d6298 Mon Sep 17 00:00:00 2001 From: Hao LI <8520588+Leo-LiHao@users.noreply.github.com> Date: Fri, 5 Feb 2021 00:59:38 +0800 Subject: [PATCH 0069/1543] [mypy] Add/fix type annotations for binary trees in data structures (#4085) * fix mypy: data_structures:binary_tree * mypy --strict for binary_trees in data_structures * fix pre-commit Co-authored-by: LiHao --- .../binary_search_tree_recursive.py | 99 ++++++++++++------- .../binary_tree/lazy_segment_tree.py | 9 +- data_structures/binary_tree/treap.py | 33 ++++--- 3 files changed, 84 insertions(+), 57 deletions(-) diff --git a/data_structures/binary_tree/binary_search_tree_recursive.py b/data_structures/binary_tree/binary_search_tree_recursive.py index f1e46e33cd24..a05e28a7bd54 100644 --- a/data_structures/binary_tree/binary_search_tree_recursive.py +++ b/data_structures/binary_tree/binary_search_tree_recursive.py @@ -8,21 +8,22 @@ python binary_search_tree_recursive.py """ import unittest +from typing import Iterator, Optional class Node: - def __init__(self, label: int, parent): + def __init__(self, label: int, parent: Optional["Node"]) -> None: self.label = label self.parent = parent - self.left = None - self.right = None + self.left: Optional[Node] = None + self.right: Optional[Node] = None class BinarySearchTree: - def __init__(self): - self.root = None + def __init__(self) -> None: + self.root: Optional[Node] = None - def empty(self): + def empty(self) -> None: """ Empties the tree @@ -46,7 +47,7 @@ def is_empty(self) -> bool: """ return self.root is None - def put(self, label: int): + def put(self, label: int) -> None: """ Put a new node in the tree @@ -65,7 +66,9 @@ def put(self, label: int): """ self.root = self._put(self.root, label) - def _put(self, node: Node, label: int, parent: Node = None) -> Node: + def _put( + self, node: Optional[Node], label: int, parent: Optional[Node] = None + ) -> Node: if node is None: node = Node(label, parent) else: @@ -95,7 +98,7 @@ def search(self, label: int) -> Node: """ return self._search(self.root, label) - def _search(self, node: Node, label: int) -> Node: + def _search(self, node: Optional[Node], label: int) -> Node: if node is None: raise Exception(f"Node with label {label} does not exist") else: @@ -106,7 +109,7 @@ def _search(self, node: Node, label: int) -> Node: return node - def remove(self, label: int): + def remove(self, label: int) -> None: """ Removes a node in the tree @@ -122,13 +125,7 @@ def remove(self, label: int): Exception: Node with label 3 does not exist """ node = self.search(label) - if not node.right and not node.left: - self._reassign_nodes(node, None) - elif not node.right and node.left: - self._reassign_nodes(node, node.left) - elif node.right and not node.left: - self._reassign_nodes(node, node.right) - else: + if node.right and node.left: lowest_node = self._get_lowest_node(node.right) lowest_node.left = node.left lowest_node.right = node.right @@ -136,8 +133,14 @@ def remove(self, label: int): if node.right: node.right.parent = lowest_node self._reassign_nodes(node, lowest_node) + elif not node.right and node.left: + self._reassign_nodes(node, node.left) + elif node.right and not node.left: + self._reassign_nodes(node, node.right) + else: + self._reassign_nodes(node, None) - def _reassign_nodes(self, node: Node, new_children: Node): + def _reassign_nodes(self, node: Node, new_children: Optional[Node]) -> None: if new_children: new_children.parent = node.parent @@ -192,7 +195,7 @@ def get_max_label(self) -> int: >>> t.get_max_label() 10 """ - if self.is_empty(): + if self.root is None: raise Exception("Binary search tree is empty") node = self.root @@ -216,7 +219,7 @@ def get_min_label(self) -> int: >>> t.get_min_label() 8 """ - if self.is_empty(): + if self.root is None: raise Exception("Binary search tree is empty") node = self.root @@ -225,7 +228,7 @@ def get_min_label(self) -> int: return node.label - def inorder_traversal(self) -> list: + def inorder_traversal(self) -> Iterator[Node]: """ Return the inorder traversal of the tree @@ -241,13 +244,13 @@ def inorder_traversal(self) -> list: """ return self._inorder_traversal(self.root) - def _inorder_traversal(self, node: Node) -> list: + def _inorder_traversal(self, node: Optional[Node]) -> Iterator[Node]: if node is not None: yield from self._inorder_traversal(node.left) yield node yield from self._inorder_traversal(node.right) - def preorder_traversal(self) -> list: + def preorder_traversal(self) -> Iterator[Node]: """ Return the preorder traversal of the tree @@ -263,7 +266,7 @@ def preorder_traversal(self) -> list: """ return self._preorder_traversal(self.root) - def _preorder_traversal(self, node: Node) -> list: + def _preorder_traversal(self, node: Optional[Node]) -> Iterator[Node]: if node is not None: yield node yield from self._preorder_traversal(node.left) @@ -272,7 +275,7 @@ def _preorder_traversal(self, node: Node) -> list: class BinarySearchTreeTest(unittest.TestCase): @staticmethod - def _get_binary_search_tree(): + def _get_binary_search_tree() -> BinarySearchTree: r""" 8 / \ @@ -298,7 +301,7 @@ def _get_binary_search_tree(): return t - def test_put(self): + def test_put(self) -> None: t = BinarySearchTree() assert t.is_empty() @@ -306,6 +309,7 @@ def test_put(self): r""" 8 """ + assert t.root is not None assert t.root.parent is None assert t.root.label == 8 @@ -315,6 +319,7 @@ def test_put(self): \ 10 """ + assert t.root.right is not None assert t.root.right.parent == t.root assert t.root.right.label == 10 @@ -324,6 +329,7 @@ def test_put(self): / \ 3 10 """ + assert t.root.left is not None assert t.root.left.parent == t.root assert t.root.left.label == 3 @@ -335,6 +341,7 @@ def test_put(self): \ 6 """ + assert t.root.left.right is not None assert t.root.left.right.parent == t.root.left assert t.root.left.right.label == 6 @@ -346,13 +353,14 @@ def test_put(self): / \ 1 6 """ + assert t.root.left.left is not None assert t.root.left.left.parent == t.root.left assert t.root.left.left.label == 1 with self.assertRaises(Exception): t.put(1) - def test_search(self): + def test_search(self) -> None: t = self._get_binary_search_tree() node = t.search(6) @@ -364,7 +372,7 @@ def test_search(self): with self.assertRaises(Exception): t.search(2) - def test_remove(self): + def test_remove(self) -> None: t = self._get_binary_search_tree() t.remove(13) @@ -379,6 +387,9 @@ def test_remove(self): \ 5 """ + assert t.root is not None + assert t.root.right is not None + assert t.root.right.right is not None assert t.root.right.right.right is None assert t.root.right.right.left is None @@ -394,6 +405,9 @@ def test_remove(self): \ 5 """ + assert t.root.left is not None + assert t.root.left.right is not None + assert t.root.left.right.left is not None assert t.root.left.right.right is None assert t.root.left.right.left.label == 4 @@ -407,6 +421,8 @@ def test_remove(self): \ 5 """ + assert t.root.left.left is not None + assert t.root.left.right.right is not None assert t.root.left.left.label == 1 assert t.root.left.right.label == 4 assert t.root.left.right.right.label == 5 @@ -422,6 +438,7 @@ def test_remove(self): / \ \ 1 5 14 """ + assert t.root is not None assert t.root.left.label == 4 assert t.root.left.right.label == 5 assert t.root.left.left.label == 1 @@ -437,13 +454,15 @@ def test_remove(self): / \ 1 14 """ + assert t.root.left is not None + assert t.root.left.left is not None assert t.root.left.label == 5 assert t.root.left.right is None assert t.root.left.left.label == 1 assert t.root.left.parent == t.root assert t.root.left.left.parent == t.root.left - def test_remove_2(self): + def test_remove_2(self) -> None: t = self._get_binary_search_tree() t.remove(3) @@ -456,6 +475,12 @@ def test_remove_2(self): / \ / 5 7 13 """ + assert t.root is not None + assert t.root.left is not None + assert t.root.left.left is not None + assert t.root.left.right is not None + assert t.root.left.right.left is not None + assert t.root.left.right.right is not None assert t.root.left.label == 4 assert t.root.left.right.label == 6 assert t.root.left.left.label == 1 @@ -466,25 +491,25 @@ def test_remove_2(self): assert t.root.left.left.parent == t.root.left assert t.root.left.right.left.parent == t.root.left.right - def test_empty(self): + def test_empty(self) -> None: t = self._get_binary_search_tree() t.empty() assert t.root is None - def test_is_empty(self): + def test_is_empty(self) -> None: t = self._get_binary_search_tree() assert not t.is_empty() t.empty() assert t.is_empty() - def test_exists(self): + def test_exists(self) -> None: t = self._get_binary_search_tree() assert t.exists(6) assert not t.exists(-1) - def test_get_max_label(self): + def test_get_max_label(self) -> None: t = self._get_binary_search_tree() assert t.get_max_label() == 14 @@ -493,7 +518,7 @@ def test_get_max_label(self): with self.assertRaises(Exception): t.get_max_label() - def test_get_min_label(self): + def test_get_min_label(self) -> None: t = self._get_binary_search_tree() assert t.get_min_label() == 1 @@ -502,20 +527,20 @@ def test_get_min_label(self): with self.assertRaises(Exception): t.get_min_label() - def test_inorder_traversal(self): + def test_inorder_traversal(self) -> None: t = self._get_binary_search_tree() inorder_traversal_nodes = [i.label for i in t.inorder_traversal()] assert inorder_traversal_nodes == [1, 3, 4, 5, 6, 7, 8, 10, 13, 14] - def test_preorder_traversal(self): + def test_preorder_traversal(self) -> None: t = self._get_binary_search_tree() preorder_traversal_nodes = [i.label for i in t.preorder_traversal()] assert preorder_traversal_nodes == [8, 3, 1, 6, 4, 5, 7, 10, 14, 13] -def binary_search_tree_example(): +def binary_search_tree_example() -> None: r""" Example 8 diff --git a/data_structures/binary_tree/lazy_segment_tree.py b/data_structures/binary_tree/lazy_segment_tree.py index 5bc79e74efcd..9066db294613 100644 --- a/data_structures/binary_tree/lazy_segment_tree.py +++ b/data_structures/binary_tree/lazy_segment_tree.py @@ -1,6 +1,7 @@ from __future__ import annotations import math +from typing import List, Union class SegmentTree: @@ -37,7 +38,7 @@ def right(self, idx: int) -> int: return idx * 2 + 1 def build( - self, idx: int, left_element: int, right_element: int, A: list[int] + self, idx: int, left_element: int, right_element: int, A: List[int] ) -> None: if left_element == right_element: self.segment_tree[idx] = A[left_element - 1] @@ -88,7 +89,7 @@ def update( # query with O(lg n) def query( self, idx: int, left_element: int, right_element: int, a: int, b: int - ) -> int: + ) -> Union[int, float]: """ query(1, 1, size, a, b) for query max of [a,b] >>> A = [1, 2, -4, 7, 3, -5, 6, 11, -20, 9, 14, 15, 5, 2, -8] @@ -118,8 +119,8 @@ def query( q2 = self.query(self.right(idx), mid + 1, right_element, a, b) return max(q1, q2) - def __str__(self) -> None: - return [self.query(1, 1, self.size, i, i) for i in range(1, self.size + 1)] + def __str__(self) -> str: + return str([self.query(1, 1, self.size, i, i) for i in range(1, self.size + 1)]) if __name__ == "__main__": diff --git a/data_structures/binary_tree/treap.py b/data_structures/binary_tree/treap.py index 26648f7aba61..a09dcc928143 100644 --- a/data_structures/binary_tree/treap.py +++ b/data_structures/binary_tree/treap.py @@ -3,6 +3,7 @@ from __future__ import annotations from random import random +from typing import Optional, Tuple class Node: @@ -11,13 +12,13 @@ class Node: Treap is a binary tree by value and heap by priority """ - def __init__(self, value: int = None): + def __init__(self, value: Optional[int] = None): self.value = value self.prior = random() - self.left = None - self.right = None + self.left: Optional[Node] = None + self.right: Optional[Node] = None - def __repr__(self): + def __repr__(self) -> str: from pprint import pformat if self.left is None and self.right is None: @@ -27,14 +28,14 @@ def __repr__(self): {f"{self.value}: {self.prior:.5}": (self.left, self.right)}, indent=1 ) - def __str__(self): + def __str__(self) -> str: value = str(self.value) + " " left = str(self.left or "") right = str(self.right or "") return value + left + right -def split(root: Node, value: int) -> tuple[Node, Node]: +def split(root: Optional[Node], value: int) -> Tuple[Optional[Node], Optional[Node]]: """ We split current tree into 2 trees with value: @@ -42,9 +43,9 @@ def split(root: Node, value: int) -> tuple[Node, Node]: Right tree contains all values greater or equal, than split value """ if root is None: # None tree is split into 2 Nones - return (None, None) + return None, None elif root.value is None: - return (None, None) + return None, None else: if value < root.value: """ @@ -54,16 +55,16 @@ def split(root: Node, value: int) -> tuple[Node, Node]: Right tree's left son: right part of that split """ left, root.left = split(root.left, value) - return (left, root) + return left, root else: """ Just symmetric to previous case """ root.right, right = split(root.right, value) - return (root, right) + return root, right -def merge(left: Node, right: Node) -> Node: +def merge(left: Optional[Node], right: Optional[Node]) -> Optional[Node]: """ We merge 2 trees into one. Note: all left tree's values must be less than all right tree's @@ -85,7 +86,7 @@ def merge(left: Node, right: Node) -> Node: return right -def insert(root: Node, value: int) -> Node: +def insert(root: Optional[Node], value: int) -> Optional[Node]: """ Insert element @@ -98,7 +99,7 @@ def insert(root: Node, value: int) -> Node: return merge(merge(left, node), right) -def erase(root: Node, value: int) -> Node: +def erase(root: Optional[Node], value: int) -> Optional[Node]: """ Erase element @@ -111,7 +112,7 @@ def erase(root: Node, value: int) -> Node: return merge(left, right) -def inorder(root: Node): +def inorder(root: Optional[Node]) -> None: """ Just recursive print of a tree """ @@ -123,7 +124,7 @@ def inorder(root: Node): inorder(root.right) -def interactTreap(root, args): +def interactTreap(root: Optional[Node], args: str) -> Optional[Node]: """ Commands: + value to add value into treap @@ -160,7 +161,7 @@ def interactTreap(root, args): return root -def main(): +def main() -> None: """After each command, program prints treap""" root = None print( From 4903a657799738bb16d43a763e055e3ef7e68e23 Mon Sep 17 00:00:00 2001 From: Abdeldjaouad Nusayr Medakene <31663979+MrGeek1337@users.noreply.github.com> Date: Tue, 9 Feb 2021 17:13:48 +0100 Subject: [PATCH 0070/1543] Create slowsort.py (#3865) * Create slowsort.py added slowsort algorithm implementation to sorts * Update slowsort.py * Update slowsort.py * Update slowsort.py * Update slowsort.py * Update slowsort.py * Update slowsort.py * Update slowsort.py * Update slowsort.py * Update slowsort.py * Update slowsort.py * Update slowsort.py * Update slowsort.py --- sorts/slowsort.py | 63 +++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 63 insertions(+) create mode 100644 sorts/slowsort.py diff --git a/sorts/slowsort.py b/sorts/slowsort.py new file mode 100644 index 000000000000..53bb14554ee2 --- /dev/null +++ b/sorts/slowsort.py @@ -0,0 +1,63 @@ +""" +Slowsort is a sorting algorithm. It is of humorous nature and not useful. +It's based on the principle of multiply and surrender, +a tongue-in-cheek joke of divide and conquer. +It was published in 1986 by Andrei Broder and Jorge Stolfi +in their paper Pessimal Algorithms and Simplexity Analysis +(a parody of optimal algorithms and complexity analysis). + +Source: https://en.wikipedia.org/wiki/Slowsort +""" + +from typing import Optional + + +def slowsort( + sequence: list, start: Optional[int] = None, end: Optional[int] = None +) -> None: + """ + Sorts sequence[start..end] (both inclusive) in-place. + start defaults to 0 if not given. + end defaults to len(sequence) - 1 if not given. + It returns None. + >>> seq = [1, 6, 2, 5, 3, 4, 4, 5]; slowsort(seq); seq + [1, 2, 3, 4, 4, 5, 5, 6] + >>> seq = []; slowsort(seq); seq + [] + >>> seq = [2]; slowsort(seq); seq + [2] + >>> seq = [1, 2, 3, 4]; slowsort(seq); seq + [1, 2, 3, 4] + >>> seq = [4, 3, 2, 1]; slowsort(seq); seq + [1, 2, 3, 4] + >>> seq = [9, 8, 7, 6, 5, 4, 3, 2, 1, 0]; slowsort(seq, 2, 7); seq + [9, 8, 2, 3, 4, 5, 6, 7, 1, 0] + >>> seq = [9, 8, 7, 6, 5, 4, 3, 2, 1, 0]; slowsort(seq, end = 4); seq + [5, 6, 7, 8, 9, 4, 3, 2, 1, 0] + >>> seq = [9, 8, 7, 6, 5, 4, 3, 2, 1, 0]; slowsort(seq, start = 5); seq + [9, 8, 7, 6, 5, 0, 1, 2, 3, 4] + """ + if start is None: + start = 0 + + if end is None: + end = len(sequence) - 1 + + if start >= end: + return + + mid = (start + end) // 2 + + slowsort(sequence, start, mid) + slowsort(sequence, mid + 1, end) + + if sequence[end] < sequence[mid]: + sequence[end], sequence[mid] = sequence[mid], sequence[end] + + slowsort(sequence, start, end - 1) + + +if __name__ == "__main__": + from doctest import testmod + + testmod() From f66325a01bd790107c535f922aa0d9cfd75935e3 Mon Sep 17 00:00:00 2001 From: Ocean Monjur <75680423+OCM-7898@users.noreply.github.com> Date: Thu, 11 Feb 2021 22:49:53 +0600 Subject: [PATCH 0071/1543] odd_even_sort.py (#4199) * odd_even_sort.py * Update odd_even_sort.py * Update odd_even_sort.py --- sorts/odd_even_sort.py | 47 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 47 insertions(+) create mode 100644 sorts/odd_even_sort.py diff --git a/sorts/odd_even_sort.py b/sorts/odd_even_sort.py new file mode 100644 index 000000000000..557337ee77bc --- /dev/null +++ b/sorts/odd_even_sort.py @@ -0,0 +1,47 @@ +"""For reference +https://en.wikipedia.org/wiki/Odd%E2%80%93even_sort +""" + + +def odd_even_sort(input_list: list) -> list: + """this algorithm uses the same idea of bubblesort, + but by first dividing in two phase (odd and even). + Originally developed for use on parallel processors + with local interconnections. + :param collection: mutable ordered sequence of elements + :return: same collection in ascending order + Examples: + >>> odd_even_sort([5 , 4 ,3 ,2 ,1]) + [1, 2, 3, 4, 5] + >>> odd_even_sort([]) + [] + >>> odd_even_sort([-10 ,-1 ,10 ,2]) + [-10, -1, 2, 10] + >>> odd_even_sort([1 ,2 ,3 ,4]) + [1, 2, 3, 4] + """ + sorted = False + while sorted is False: # Until all the indices are traversed keep looping + sorted = True + for i in range(0, len(input_list) - 1, 2): # iterating over all even indices + if input_list[i] > input_list[i + 1]: + + input_list[i], input_list[i + 1] = input_list[i + 1], input_list[i] + # swapping if elements not in order + sorted = False + + for i in range(1, len(input_list) - 1, 2): # iterating over all odd indices + if input_list[i] > input_list[i + 1]: + input_list[i], input_list[i + 1] = input_list[i + 1], input_list[i] + # swapping if elements not in order + sorted = False + return input_list + + +if __name__ == "__main__": + print("Enter list to be sorted") + input_list = [int(x) for x in input().split()] + # inputing elements of the list in one line + sorted_list = odd_even_sort(input_list) + print("The sorted list is") + print(sorted_list) From d3ac521b63aad890b1fdcb9ae7321b658d5c4764 Mon Sep 17 00:00:00 2001 From: Ayush Bisht <61404154+ayushbisht2001@users.noreply.github.com> Date: Fri, 12 Feb 2021 07:59:24 +0530 Subject: [PATCH 0072/1543] add count_number_of_one_bits.py (#4195) * count-bits * update --- bit_manipulation/count_number_of_one_bits.py | 34 ++++++++++++++++++++ 1 file changed, 34 insertions(+) create mode 100644 bit_manipulation/count_number_of_one_bits.py diff --git a/bit_manipulation/count_number_of_one_bits.py b/bit_manipulation/count_number_of_one_bits.py new file mode 100644 index 000000000000..51fd2b630483 --- /dev/null +++ b/bit_manipulation/count_number_of_one_bits.py @@ -0,0 +1,34 @@ +def get_set_bits_count(number: int) -> int: + """ + Count the number of set bits in a 32 bit integer + >>> get_set_bits_count(25) + 3 + >>> get_set_bits_count(37) + 3 + >>> get_set_bits_count(21) + 3 + >>> get_set_bits_count(58) + 4 + >>> get_set_bits_count(0) + 0 + >>> get_set_bits_count(256) + 1 + >>> get_set_bits_count(-1) + Traceback (most recent call last): + ... + ValueError: the value of input must be positive + """ + if number < 0: + raise ValueError("the value of input must be positive") + result = 0 + while number: + if number % 2 == 1: + result += 1 + number = number >> 1 + return result + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From 78ddb460660bc2c3b14f1b7197aa52fe0e9ac542 Mon Sep 17 00:00:00 2001 From: MarineJoker Date: Wed, 17 Feb 2021 22:28:50 +0800 Subject: [PATCH 0073/1543] Quick sort with lomuto partition (#3875) * add quick sort algorithm with Lomuto partition * fix(lomuto_partition): fix snake_case --- sorts/quick_sort_3_partition.py | 47 +++++++++++++++++++++++++++++++++ 1 file changed, 47 insertions(+) diff --git a/sorts/quick_sort_3_partition.py b/sorts/quick_sort_3_partition.py index 18c6e0f876d2..1a6db6a364f0 100644 --- a/sorts/quick_sort_3_partition.py +++ b/sorts/quick_sort_3_partition.py @@ -18,6 +18,53 @@ def quick_sort_3partition(sorting: list, left: int, right: int) -> None: quick_sort_3partition(sorting, b + 1, right) +def quick_sort_lomuto_partition(sorting: list, left: int, right: int) -> None: + """ + A pure Python implementation of quick sort algorithm(in-place) + with Lomuto partition scheme: + https://en.wikipedia.org/wiki/Quicksort#Lomuto_partition_scheme + + :param sorting: sort list + :param left: left endpoint of sorting + :param right: right endpoint of sorting + :return: None + + Examples: + >>> nums1 = [0, 5, 3, 1, 2] + >>> quick_sort_lomuto_partition(nums1, 0, 4) + >>> nums1 + [0, 1, 2, 3, 5] + >>> nums2 = [] + >>> quick_sort_lomuto_partition(nums2, 0, 0) + >>> nums2 + [] + >>> nums3 = [-2, 5, 0, -4] + >>> quick_sort_lomuto_partition(nums3, 0, 3) + >>> nums3 + [-4, -2, 0, 5] + """ + if left < right: + pivot_index = lomuto_partition(sorting, left, right) + quick_sort_lomuto_partition(sorting, left, pivot_index - 1) + quick_sort_lomuto_partition(sorting, pivot_index + 1, right) + + +def lomuto_partition(sorting: list, left: int, right: int) -> int: + """ + Example: + >>> lomuto_partition([1,5,7,6], 0, 3) + 2 + """ + pivot = sorting[right] + store_index = left + for i in range(left, right): + if sorting[i] < pivot: + sorting[store_index], sorting[i] = sorting[i], sorting[store_index] + store_index += 1 + sorting[right], sorting[store_index] = sorting[store_index], sorting[right] + return store_index + + def three_way_radix_quicksort(sorting: list) -> list: """ Three-way radix quicksort: From bfb5700c67d629140070212ad58457ea34e769b9 Mon Sep 17 00:00:00 2001 From: Liu Baolin--CN Date: Sat, 20 Feb 2021 02:15:19 +0800 Subject: [PATCH 0074/1543] Update LICENSE.md (#4210) --- LICENSE.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/LICENSE.md b/LICENSE.md index 3b7951527ab3..c3c2857cd312 100644 --- a/LICENSE.md +++ b/LICENSE.md @@ -1,6 +1,6 @@ MIT License -Copyright (c) 2020 The Algorithms +Copyright (c) 2016-2021 The Algorithms Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal From 6bb9a027bb4368d53d6bb3716007f67bdc8748f6 Mon Sep 17 00:00:00 2001 From: algobytewise Date: Sat, 20 Feb 2021 18:39:39 +0530 Subject: [PATCH 0075/1543] Implementation of the algorithm for the Koch snowflake (#4207) * Add files via upload Implementation of the algorithm for the Koch snowflake * added underscore to variable names * added newline and comment I fixed the sorting of the imports and I added a comment to the plot-function to explain what it does and why it doesn't use a doctest. Thank you to user mrmaxguns for suggesting these changes. * fixed accidental newline in the middle of expression * improved looping * moved "koch_snowflake.py" from "other" to "graphics" * Update koch_snowflake.py Co-authored-by: Christian Clauss --- graphics/koch_snowflake.py | 116 +++++++++++++++++++++++++++++++++++++ 1 file changed, 116 insertions(+) create mode 100644 graphics/koch_snowflake.py diff --git a/graphics/koch_snowflake.py b/graphics/koch_snowflake.py new file mode 100644 index 000000000000..07c1835b41ed --- /dev/null +++ b/graphics/koch_snowflake.py @@ -0,0 +1,116 @@ +""" +Description + The Koch snowflake is a fractal curve and one of the earliest fractals to + have been described. The Koch snowflake can be built up iteratively, in a + sequence of stages. The first stage is an equilateral triangle, and each + successive stage is formed by adding outward bends to each side of the + previous stage, making smaller equilateral triangles. + This can be achieved through the following steps for each line: + 1. divide the line segment into three segments of equal length. + 2. draw an equilateral triangle that has the middle segment from step 1 + as its base and points outward. + 3. remove the line segment that is the base of the triangle from step 2. + (description adapted from https://en.wikipedia.org/wiki/Koch_snowflake ) + (for a more detailed explanation and an implementation in the + Processing language, see https://natureofcode.com/book/chapter-8-fractals/ + #84-the-koch-curve-and-the-arraylist-technique ) + +Requirements (pip): + - matplotlib + - numpy +""" + + +from __future__ import annotations + +import matplotlib.pyplot as plt # type: ignore +import numpy + +# initial triangle of Koch snowflake +VECTOR_1 = numpy.array([0, 0]) +VECTOR_2 = numpy.array([0.5, 0.8660254]) +VECTOR_3 = numpy.array([1, 0]) +INITIAL_VECTORS = [VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1] + +# uncomment for simple Koch curve instead of Koch snowflake +# INITIAL_VECTORS = [VECTOR_1, VECTOR_3] + + +def iterate(initial_vectors: list[numpy.ndarray], steps: int) -> list[numpy.ndarray]: + """ + Go through the number of iterations determined by the argument "steps". + Be careful with high values (above 5) since the time to calculate increases + exponentially. + >>> iterate([numpy.array([0, 0]), numpy.array([1, 0])], 1) + [array([0, 0]), array([0.33333333, 0. ]), array([0.5 , \ +0.28867513]), array([0.66666667, 0. ]), array([1, 0])] + """ + vectors = initial_vectors + for i in range(steps): + vectors = iteration_step(vectors) + return vectors + + +def iteration_step(vectors: list[numpy.ndarray]) -> list[numpy.ndarray]: + """ + Loops through each pair of adjacent vectors. Each line between two adjacent + vectors is divided into 4 segments by adding 3 additional vectors in-between + the original two vectors. The vector in the middle is constructed through a + 60 degree rotation so it is bent outwards. + >>> iteration_step([numpy.array([0, 0]), numpy.array([1, 0])]) + [array([0, 0]), array([0.33333333, 0. ]), array([0.5 , \ +0.28867513]), array([0.66666667, 0. ]), array([1, 0])] + """ + new_vectors = [] + for i, start_vector in enumerate(vectors[:-1]): + end_vector = vectors[i + 1] + new_vectors.append(start_vector) + difference_vector = end_vector - start_vector + new_vectors.append(start_vector + difference_vector / 3) + new_vectors.append( + start_vector + difference_vector / 3 + rotate(difference_vector / 3, 60) + ) + new_vectors.append(start_vector + difference_vector * 2 / 3) + new_vectors.append(vectors[-1]) + return new_vectors + + +def rotate(vector: numpy.ndarray, angle_in_degrees: float) -> numpy.ndarray: + """ + Standard rotation of a 2D vector with a rotation matrix + (see https://en.wikipedia.org/wiki/Rotation_matrix ) + >>> rotate(numpy.array([1, 0]), 60) + array([0.5 , 0.8660254]) + >>> rotate(numpy.array([1, 0]), 90) + array([6.123234e-17, 1.000000e+00]) + """ + theta = numpy.radians(angle_in_degrees) + c, s = numpy.cos(theta), numpy.sin(theta) + rotation_matrix = numpy.array(((c, -s), (s, c))) + return numpy.dot(rotation_matrix, vector) + + +def plot(vectors: list[numpy.ndarray]) -> None: + """ + Utility function to plot the vectors using matplotlib.pyplot + No doctest was implemented since this function does not have a return value + """ + # avoid stretched display of graph + axes = plt.gca() + axes.set_aspect("equal") + + # matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all + # y-coordinates as inputs, which are constructed from the vector-list using + # zip() + x_coordinates, y_coordinates = zip(*vectors) + plt.plot(x_coordinates, y_coordinates) + plt.show() + + +if __name__ == "__main__": + import doctest + + doctest.testmod() + + processed_vectors = iterate(INITIAL_VECTORS, 5) + plot(processed_vectors) From 81c46dfd55ad9c7326a0d0d231d2cf0caa691d34 Mon Sep 17 00:00:00 2001 From: Matthew Date: Sat, 20 Feb 2021 22:10:23 +0000 Subject: [PATCH 0076/1543] [mypy] Add/fix type annotations for quick_sort(#4085) (#4215) Co-authored-by: goodm2 <4qjpngu8mem8cz> --- sorts/quick_sort.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/sorts/quick_sort.py b/sorts/quick_sort.py index c6687a7fa8d5..6f51f6eca7db 100644 --- a/sorts/quick_sort.py +++ b/sorts/quick_sort.py @@ -7,6 +7,7 @@ For manual testing run: python3 quick_sort.py """ +from typing import List def quick_sort(collection: list) -> list: @@ -26,8 +27,8 @@ def quick_sort(collection: list) -> list: if len(collection) < 2: return collection pivot = collection.pop() # Use the last element as the first pivot - greater = [] # All elements greater than pivot - lesser = [] # All elements less than or equal to pivot + greater: List[int] = [] # All elements greater than pivot + lesser: List[int] = [] # All elements less than or equal to pivot for element in collection: (greater if element > pivot else lesser).append(element) return quick_sort(lesser) + [pivot] + quick_sort(greater) From 2a6e4bbdb6593767d9c790a3cd35dcc6fed65739 Mon Sep 17 00:00:00 2001 From: algobytewise Date: Mon, 22 Feb 2021 05:24:29 +0530 Subject: [PATCH 0077/1543] [mypy] Add/fix type annotations for "conways_game_of_life.py" & "one_dimensional.py" (#4216) Related Issue: #4052 --- cellular_automata/conways_game_of_life.py | 2 +- cellular_automata/one_dimensional.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/cellular_automata/conways_game_of_life.py b/cellular_automata/conways_game_of_life.py index 321baa3a3794..dc349b7ac507 100644 --- a/cellular_automata/conways_game_of_life.py +++ b/cellular_automata/conways_game_of_life.py @@ -7,7 +7,7 @@ from typing import List -from PIL import Image +from PIL import Image # type: ignore # Define glider example GLIDER = [ diff --git a/cellular_automata/one_dimensional.py b/cellular_automata/one_dimensional.py index da77e444502f..5de2c5b994e3 100644 --- a/cellular_automata/one_dimensional.py +++ b/cellular_automata/one_dimensional.py @@ -6,7 +6,7 @@ from __future__ import annotations -from PIL import Image +from PIL import Image # type: ignore # Define the first generation of cells # fmt: off From f680806894d39265f810e7257d50aa0beaf2152e Mon Sep 17 00:00:00 2001 From: Hao LI <8520588+Leo-LiHao@users.noreply.github.com> Date: Mon, 22 Feb 2021 07:58:17 +0800 Subject: [PATCH 0078/1543] add type hints for avl_tree (#4214) Co-authored-by: LiHao --- data_structures/binary_tree/avl_tree.py | 155 +++++++++++++----------- 1 file changed, 87 insertions(+), 68 deletions(-) diff --git a/data_structures/binary_tree/avl_tree.py b/data_structures/binary_tree/avl_tree.py index 3362610b9303..e0d3e4d438a8 100644 --- a/data_structures/binary_tree/avl_tree.py +++ b/data_structures/binary_tree/avl_tree.py @@ -8,84 +8,85 @@ import math import random +from typing import Any, List, Optional class my_queue: - def __init__(self): - self.data = [] - self.head = 0 - self.tail = 0 + def __init__(self) -> None: + self.data: List[Any] = [] + self.head: int = 0 + self.tail: int = 0 - def is_empty(self): + def is_empty(self) -> bool: return self.head == self.tail - def push(self, data): + def push(self, data: Any) -> None: self.data.append(data) self.tail = self.tail + 1 - def pop(self): + def pop(self) -> Any: ret = self.data[self.head] self.head = self.head + 1 return ret - def count(self): + def count(self) -> int: return self.tail - self.head - def print(self): + def print(self) -> None: print(self.data) print("**************") print(self.data[self.head : self.tail]) class my_node: - def __init__(self, data): + def __init__(self, data: Any) -> None: self.data = data - self.left = None - self.right = None - self.height = 1 + self.left: Optional[my_node] = None + self.right: Optional[my_node] = None + self.height: int = 1 - def get_data(self): + def get_data(self) -> Any: return self.data - def get_left(self): + def get_left(self) -> Optional["my_node"]: return self.left - def get_right(self): + def get_right(self) -> Optional["my_node"]: return self.right - def get_height(self): + def get_height(self) -> int: return self.height - def set_data(self, data): + def set_data(self, data: Any) -> None: self.data = data return - def set_left(self, node): + def set_left(self, node: Optional["my_node"]) -> None: self.left = node return - def set_right(self, node): + def set_right(self, node: Optional["my_node"]) -> None: self.right = node return - def set_height(self, height): + def set_height(self, height: int) -> None: self.height = height return -def get_height(node): +def get_height(node: Optional["my_node"]) -> int: if node is None: return 0 return node.get_height() -def my_max(a, b): +def my_max(a: int, b: int) -> int: if a > b: return a return b -def right_rotation(node): +def right_rotation(node: my_node) -> my_node: r""" A B / \ / \ @@ -98,6 +99,7 @@ def right_rotation(node): """ print("left rotation node:", node.get_data()) ret = node.get_left() + assert ret is not None node.set_left(ret.get_right()) ret.set_right(node) h1 = my_max(get_height(node.get_right()), get_height(node.get_left())) + 1 @@ -107,12 +109,13 @@ def right_rotation(node): return ret -def left_rotation(node): +def left_rotation(node: my_node) -> my_node: """ a mirror symmetry rotation of the left_rotation """ print("right rotation node:", node.get_data()) ret = node.get_right() + assert ret is not None node.set_right(ret.get_left()) ret.set_left(node) h1 = my_max(get_height(node.get_right()), get_height(node.get_left())) + 1 @@ -122,7 +125,7 @@ def left_rotation(node): return ret -def lr_rotation(node): +def lr_rotation(node: my_node) -> my_node: r""" A A Br / \ / \ / \ @@ -133,16 +136,20 @@ def lr_rotation(node): UB Bl RR = right_rotation LR = left_rotation """ - node.set_left(left_rotation(node.get_left())) + left_child = node.get_left() + assert left_child is not None + node.set_left(left_rotation(left_child)) return right_rotation(node) -def rl_rotation(node): - node.set_right(right_rotation(node.get_right())) +def rl_rotation(node: my_node) -> my_node: + right_child = node.get_right() + assert right_child is not None + node.set_right(right_rotation(right_child)) return left_rotation(node) -def insert_node(node, data): +def insert_node(node: Optional["my_node"], data: Any) -> Optional["my_node"]: if node is None: return my_node(data) if data < node.get_data(): @@ -150,8 +157,10 @@ def insert_node(node, data): if ( get_height(node.get_left()) - get_height(node.get_right()) == 2 ): # an unbalance detected + left_child = node.get_left() + assert left_child is not None if ( - data < node.get_left().get_data() + data < left_child.get_data() ): # new node is the left child of the left child node = right_rotation(node) else: @@ -159,7 +168,9 @@ def insert_node(node, data): else: node.set_right(insert_node(node.get_right(), data)) if get_height(node.get_right()) - get_height(node.get_left()) == 2: - if data < node.get_right().get_data(): + right_child = node.get_right() + assert right_child is not None + if data < right_child.get_data(): node = rl_rotation(node) else: node = left_rotation(node) @@ -168,52 +179,59 @@ def insert_node(node, data): return node -def get_rightMost(root): - while root.get_right() is not None: - root = root.get_right() +def get_rightMost(root: my_node) -> Any: + while True: + right_child = root.get_right() + if right_child is None: + break + root = right_child return root.get_data() -def get_leftMost(root): - while root.get_left() is not None: - root = root.get_left() +def get_leftMost(root: my_node) -> Any: + while True: + left_child = root.get_left() + if left_child is None: + break + root = left_child return root.get_data() -def del_node(root, data): +def del_node(root: my_node, data: Any) -> Optional["my_node"]: + left_child = root.get_left() + right_child = root.get_right() if root.get_data() == data: - if root.get_left() is not None and root.get_right() is not None: - temp_data = get_leftMost(root.get_right()) + if left_child is not None and right_child is not None: + temp_data = get_leftMost(right_child) root.set_data(temp_data) - root.set_right(del_node(root.get_right(), temp_data)) - elif root.get_left() is not None: - root = root.get_left() + root.set_right(del_node(right_child, temp_data)) + elif left_child is not None: + root = left_child + elif right_child is not None: + root = right_child else: - root = root.get_right() + return None elif root.get_data() > data: - if root.get_left() is None: + if left_child is None: print("No such data") return root else: - root.set_left(del_node(root.get_left(), data)) - elif root.get_data() < data: - if root.get_right() is None: + root.set_left(del_node(left_child, data)) + else: # root.get_data() < data + if right_child is None: return root else: - root.set_right(del_node(root.get_right(), data)) - if root is None: - return root - if get_height(root.get_right()) - get_height(root.get_left()) == 2: - if get_height(root.get_right().get_right()) > get_height( - root.get_right().get_left() - ): + root.set_right(del_node(right_child, data)) + + if get_height(right_child) - get_height(left_child) == 2: + assert right_child is not None + if get_height(right_child.get_right()) > get_height(right_child.get_left()): root = left_rotation(root) else: root = rl_rotation(root) - elif get_height(root.get_right()) - get_height(root.get_left()) == -2: - if get_height(root.get_left().get_left()) > get_height( - root.get_left().get_right() - ): + elif get_height(right_child) - get_height(left_child) == -2: + assert left_child is not None + if get_height(left_child.get_left()) > get_height(left_child.get_right()): root = right_rotation(root) else: root = lr_rotation(root) @@ -256,25 +274,26 @@ class AVLtree: ************************************* """ - def __init__(self): - self.root = None + def __init__(self) -> None: + self.root: Optional[my_node] = None - def get_height(self): - # print("yyy") + def get_height(self) -> int: return get_height(self.root) - def insert(self, data): + def insert(self, data: Any) -> None: print("insert:" + str(data)) self.root = insert_node(self.root, data) - def del_node(self, data): + def del_node(self, data: Any) -> None: print("delete:" + str(data)) if self.root is None: print("Tree is empty!") return self.root = del_node(self.root, data) - def __str__(self): # a level traversale, gives a more intuitive look on the tree + def __str__( + self, + ) -> str: # a level traversale, gives a more intuitive look on the tree output = "" q = my_queue() q.push(self.root) @@ -308,7 +327,7 @@ def __str__(self): # a level traversale, gives a more intuitive look on the tre return output -def _test(): +def _test() -> None: import doctest doctest.testmod() From 61f3119467584de53a2f4395e3c03a8e12d67d30 Mon Sep 17 00:00:00 2001 From: CarsonHam Date: Mon, 22 Feb 2021 23:53:49 -0600 Subject: [PATCH 0079/1543] Change occurrences of str.format to f-strings (#4118) * f-string update rsa_cipher.py * f-string update rsa_key_generator.py * f-string update burrows_wheeler.py * f-string update non_recursive_segment_tree.py * f-string update red_black_tree.py * f-string update deque_doubly.py * f-string update climbing_stairs.py * f-string update iterating_through_submasks.py * f-string update knn_sklearn.py * f-string update 3n_plus_1.py * f-string update quadratic_equations_complex_numbers.py * f-string update nth_fibonacci_using_matrix_exponentiation.py * f-string update sherman_morrison.py * f-string update levenshtein_distance.py * fix lines that were too long --- ciphers/rsa_cipher.py | 2 +- ciphers/rsa_key_generator.py | 4 ++-- compression/burrows_wheeler.py | 13 +++++++------ .../binary_tree/non_recursive_segment_tree.py | 2 +- data_structures/binary_tree/red_black_tree.py | 8 +++++--- data_structures/linked_list/deque_doubly.py | 4 ++-- dynamic_programming/climbing_stairs.py | 5 +++-- dynamic_programming/iterating_through_submasks.py | 5 +++-- machine_learning/knn_sklearn.py | 4 ++-- maths/3n_plus_1.py | 2 +- maths/quadratic_equations_complex_numbers.py | 4 ++-- matrix/nth_fibonacci_using_matrix_exponentiation.py | 10 +++++----- matrix/sherman_morrison.py | 6 ++---- strings/levenshtein_distance.py | 6 +----- 14 files changed, 37 insertions(+), 38 deletions(-) diff --git a/ciphers/rsa_cipher.py b/ciphers/rsa_cipher.py index 57c916a44d4b..0df37d6ea3ff 100644 --- a/ciphers/rsa_cipher.py +++ b/ciphers/rsa_cipher.py @@ -118,7 +118,7 @@ def encryptAndWriteToFile( for i in range(len(encryptedBlocks)): encryptedBlocks[i] = str(encryptedBlocks[i]) encryptedContent = ",".join(encryptedBlocks) - encryptedContent = "{}_{}_{}".format(len(message), blockSize, encryptedContent) + encryptedContent = f"{len(message)}_{blockSize}_{encryptedContent}" with open(messageFilename, "w") as fo: fo.write(encryptedContent) return encryptedContent diff --git a/ciphers/rsa_key_generator.py b/ciphers/rsa_key_generator.py index 5693aa637ee9..e456d9d9f6f1 100644 --- a/ciphers/rsa_key_generator.py +++ b/ciphers/rsa_key_generator.py @@ -49,11 +49,11 @@ def makeKeyFiles(name: int, keySize: int) -> None: publicKey, privateKey = generateKey(keySize) print("\nWriting public key to file %s_pubkey.txt..." % name) with open("%s_pubkey.txt" % name, "w") as out_file: - out_file.write("{},{},{}".format(keySize, publicKey[0], publicKey[1])) + out_file.write(f"{keySize},{publicKey[0]},{publicKey[1]}") print("Writing private key to file %s_privkey.txt..." % name) with open("%s_privkey.txt" % name, "w") as out_file: - out_file.write("{},{},{}".format(keySize, privateKey[0], privateKey[1])) + out_file.write(f"{keySize},{privateKey[0]},{privateKey[1]}") if __name__ == "__main__": diff --git a/compression/burrows_wheeler.py b/compression/burrows_wheeler.py index 1a6610915e65..7d705af7428e 100644 --- a/compression/burrows_wheeler.py +++ b/compression/burrows_wheeler.py @@ -157,11 +157,12 @@ def reverse_bwt(bwt_string: str, idx_original_string: int) -> str: entry_msg = "Provide a string that I will generate its BWT transform: " s = input(entry_msg).strip() result = bwt_transform(s) - bwt_output_msg = "Burrows Wheeler transform for string '{}' results in '{}'" - print(bwt_output_msg.format(s, result["bwt_string"])) + print( + f"Burrows Wheeler transform for string '{s}' results " + f"in '{result['bwt_string']}'" + ) original_string = reverse_bwt(result["bwt_string"], result["idx_original_string"]) - fmt = ( - "Reversing Burrows Wheeler transform for entry '{}' we get original" - " string '{}'" + print( + f"Reversing Burrows Wheeler transform for entry '{result['bwt_string']}' " + f"we get original string '{original_string}'" ) - print(fmt.format(result["bwt_string"], original_string)) diff --git a/data_structures/binary_tree/non_recursive_segment_tree.py b/data_structures/binary_tree/non_recursive_segment_tree.py index 064e5aded7b4..c914079e0a8d 100644 --- a/data_structures/binary_tree/non_recursive_segment_tree.py +++ b/data_structures/binary_tree/non_recursive_segment_tree.py @@ -49,7 +49,7 @@ def __init__(self, arr: list[T], fnc: Callable[[T, T], T]) -> None: :param arr: list of elements for the segment tree :param fnc: commutative function for combine two elements - >>> SegmentTree(['a', 'b', 'c'], lambda a, b: '{}{}'.format(a, b)).query(0, 2) + >>> SegmentTree(['a', 'b', 'c'], lambda a, b: f'{a}{b}').query(0, 2) 'abc' >>> SegmentTree([(1, 2), (2, 3), (3, 4)], ... lambda a, b: (a[0] + b[0], a[1] + b[1])).query(0, 2) diff --git a/data_structures/binary_tree/red_black_tree.py b/data_structures/binary_tree/red_black_tree.py index 5d721edfa45b..de971a712fc1 100644 --- a/data_structures/binary_tree/red_black_tree.py +++ b/data_structures/binary_tree/red_black_tree.py @@ -475,11 +475,13 @@ def __repr__(self) -> str: from pprint import pformat if self.left is None and self.right is None: - return "'{} {}'".format(self.label, (self.color and "red") or "blk") + return f"'{self.label} {(self.color and 'red') or 'blk'}'" return pformat( { - "%s %s" - % (self.label, (self.color and "red") or "blk"): (self.left, self.right) + f"{self.label} {(self.color and 'red') or 'blk'}": ( + self.left, + self.right, + ) }, indent=1, ) diff --git a/data_structures/linked_list/deque_doubly.py b/data_structures/linked_list/deque_doubly.py index 894f91d561cc..c9ae8b3d1ba2 100644 --- a/data_structures/linked_list/deque_doubly.py +++ b/data_structures/linked_list/deque_doubly.py @@ -20,8 +20,8 @@ def __init__(self, link_p, element, link_n): self._next = link_n def has_next_and_prev(self): - return " Prev -> {}, Next -> {}".format( - self._prev is not None, self._next is not None + return ( + f" Prev -> {self._prev is not None}, Next -> {self._next is not None}" ) def __init__(self): diff --git a/dynamic_programming/climbing_stairs.py b/dynamic_programming/climbing_stairs.py index 79605261f981..048d57aed1be 100644 --- a/dynamic_programming/climbing_stairs.py +++ b/dynamic_programming/climbing_stairs.py @@ -25,8 +25,9 @@ def climb_stairs(n: int) -> int: ... AssertionError: n needs to be positive integer, your input -7 """ - fmt = "n needs to be positive integer, your input {}" - assert isinstance(n, int) and n > 0, fmt.format(n) + assert ( + isinstance(n, int) and n > 0 + ), f"n needs to be positive integer, your input {n}" if n == 1: return 1 dp = [0] * (n + 1) diff --git a/dynamic_programming/iterating_through_submasks.py b/dynamic_programming/iterating_through_submasks.py index 855af61d6707..21c64dba4ecc 100644 --- a/dynamic_programming/iterating_through_submasks.py +++ b/dynamic_programming/iterating_through_submasks.py @@ -37,8 +37,9 @@ def list_of_submasks(mask: int) -> list[int]: """ - fmt = "mask needs to be positive integer, your input {}" - assert isinstance(mask, int) and mask > 0, fmt.format(mask) + assert ( + isinstance(mask, int) and mask > 0 + ), f"mask needs to be positive integer, your input {mask}" """ first submask iterated will be mask itself then operation will be performed diff --git a/machine_learning/knn_sklearn.py b/machine_learning/knn_sklearn.py index 9a9114102ff3..4a621a4244b6 100644 --- a/machine_learning/knn_sklearn.py +++ b/machine_learning/knn_sklearn.py @@ -26,6 +26,6 @@ prediction = knn.predict(X_new) print( - "\nNew array: \n {}" - "\n\nTarget Names Prediction: \n {}".format(X_new, iris["target_names"][prediction]) + f"\nNew array: \n {X_new}\n\nTarget Names Prediction: \n" + f" {iris['target_names'][prediction]}" ) diff --git a/maths/3n_plus_1.py b/maths/3n_plus_1.py index 28c9fd7b426f..e455a158e619 100644 --- a/maths/3n_plus_1.py +++ b/maths/3n_plus_1.py @@ -9,7 +9,7 @@ def n31(a: int) -> tuple[list[int], int]: """ if not isinstance(a, int): - raise TypeError("Must be int, not {}".format(type(a).__name__)) + raise TypeError(f"Must be int, not {type(a).__name__}") if a < 1: raise ValueError(f"Given integer must be greater than 1, not {a}") diff --git a/maths/quadratic_equations_complex_numbers.py b/maths/quadratic_equations_complex_numbers.py index 01a411bc560d..1035171e4ec3 100644 --- a/maths/quadratic_equations_complex_numbers.py +++ b/maths/quadratic_equations_complex_numbers.py @@ -30,8 +30,8 @@ def quadratic_roots(a: int, b: int, c: int) -> tuple[complex, complex]: def main(): - solutions = quadratic_roots(a=5, b=6, c=1) - print("The solutions are: {} and {}".format(*solutions)) + solution1, solution2 = quadratic_roots(a=5, b=6, c=1) + print(f"The solutions are: {solution1} and {solution2}") if __name__ == "__main__": diff --git a/matrix/nth_fibonacci_using_matrix_exponentiation.py b/matrix/nth_fibonacci_using_matrix_exponentiation.py index 296c36e88691..8c39de0f23b6 100644 --- a/matrix/nth_fibonacci_using_matrix_exponentiation.py +++ b/matrix/nth_fibonacci_using_matrix_exponentiation.py @@ -71,13 +71,13 @@ def nth_fibonacci_bruteforce(n): def main(): - fmt = ( - "{} fibonacci number using matrix exponentiation is {} and using bruteforce " - "is {}\n" - ) for ordinal in "0th 1st 2nd 3rd 10th 100th 1000th".split(): n = int("".join(c for c in ordinal if c in "0123456789")) # 1000th --> 1000 - print(fmt.format(ordinal, nth_fibonacci_matrix(n), nth_fibonacci_bruteforce(n))) + print( + f"{ordinal} fibonacci number using matrix exponentiation is " + f"{nth_fibonacci_matrix(n)} and using bruteforce is " + f"{nth_fibonacci_bruteforce(n)}\n" + ) # from timeit import timeit # print(timeit("nth_fibonacci_matrix(1000000)", # "from main import nth_fibonacci_matrix", number=5)) diff --git a/matrix/sherman_morrison.py b/matrix/sherman_morrison.py index 4920ec6c13db..3466b3d4a01f 100644 --- a/matrix/sherman_morrison.py +++ b/matrix/sherman_morrison.py @@ -175,9 +175,7 @@ def __mul__(self, another): result[r, c] += self[r, i] * another[i, c] return result else: - raise TypeError( - "Unsupported type given for another ({})".format(type(another)) - ) + raise TypeError(f"Unsupported type given for another ({type(another)})") def transpose(self): """ @@ -260,7 +258,7 @@ def test1(): print(f"v is {v}") print("uv^T is %s" % (u * v.transpose())) # Sherman Morrison - print("(a + uv^T)^(-1) is {}".format(ainv.ShermanMorrison(u, v))) + print(f"(a + uv^T)^(-1) is {ainv.ShermanMorrison(u, v)}") def test2(): import doctest diff --git a/strings/levenshtein_distance.py b/strings/levenshtein_distance.py index 54948a96670b..540a21c93da3 100644 --- a/strings/levenshtein_distance.py +++ b/strings/levenshtein_distance.py @@ -69,8 +69,4 @@ def levenshtein_distance(first_word: str, second_word: str) -> int: second_word = input("Enter the second word:\n").strip() result = levenshtein_distance(first_word, second_word) - print( - "Levenshtein distance between {} and {} is {}".format( - first_word, second_word, result - ) - ) + print(f"Levenshtein distance between {first_word} and {second_word} is {result}") From 02d9bc66c16a9cc851200f149fabbb07df611525 Mon Sep 17 00:00:00 2001 From: Leyza <56138111+Leyza@users.noreply.github.com> Date: Tue, 23 Feb 2021 01:15:00 -0500 Subject: [PATCH 0080/1543] Added binary shifts and twos complement functions to bit manipulation (#4068) * Added binary shifts and twos complement functions to bit manipulation package * Fixed problem representing 0 wrong * More testing * Fixed problems * Fixed formatting * More format fixes * Format fixes * Fixed docstrings and added url * Minor change to url --- bit_manipulation/binary_shifts.py | 111 +++++++++++++++++++++ bit_manipulation/binary_twos_complement.py | 43 ++++++++ 2 files changed, 154 insertions(+) create mode 100644 bit_manipulation/binary_shifts.py create mode 100644 bit_manipulation/binary_twos_complement.py diff --git a/bit_manipulation/binary_shifts.py b/bit_manipulation/binary_shifts.py new file mode 100644 index 000000000000..fe62880f941c --- /dev/null +++ b/bit_manipulation/binary_shifts.py @@ -0,0 +1,111 @@ +# Information on binary shifts: +# https://docs.python.org/3/library/stdtypes.html#bitwise-operations-on-integer-types +# https://www.interviewcake.com/concept/java/bit-shift + + +def logical_left_shift(number: int, shift_amount: int) -> str: + """ + Take in 2 positive integers. + 'number' is the integer to be logically left shifted 'shift_amount' times. + i.e. (number << shift_amount) + Return the shifted binary representation. + + >>> logical_left_shift(0, 1) + '0b00' + >>> logical_left_shift(1, 1) + '0b10' + >>> logical_left_shift(1, 5) + '0b100000' + >>> logical_left_shift(17, 2) + '0b1000100' + >>> logical_left_shift(1983, 4) + '0b111101111110000' + >>> logical_left_shift(1, -1) + Traceback (most recent call last): + ... + ValueError: both inputs must be positive integers + """ + if number < 0 or shift_amount < 0: + raise ValueError("both inputs must be positive integers") + + binary_number = str(bin(number)) + binary_number += "0" * shift_amount + return binary_number + + +def logical_right_shift(number: int, shift_amount: int) -> str: + """ + Take in positive 2 integers. + 'number' is the integer to be logically right shifted 'shift_amount' times. + i.e. (number >>> shift_amount) + Return the shifted binary representation. + + >>> logical_right_shift(0, 1) + '0b0' + >>> logical_right_shift(1, 1) + '0b0' + >>> logical_right_shift(1, 5) + '0b0' + >>> logical_right_shift(17, 2) + '0b100' + >>> logical_right_shift(1983, 4) + '0b1111011' + >>> logical_right_shift(1, -1) + Traceback (most recent call last): + ... + ValueError: both inputs must be positive integers + """ + if number < 0 or shift_amount < 0: + raise ValueError("both inputs must be positive integers") + + binary_number = str(bin(number))[2:] + if shift_amount >= len(binary_number): + return "0b0" + shifted_binary_number = binary_number[: len(binary_number) - shift_amount] + return "0b" + shifted_binary_number + + +def arithmetic_right_shift(number: int, shift_amount: int) -> str: + """ + Take in 2 integers. + 'number' is the integer to be arithmetically right shifted 'shift_amount' times. + i.e. (number >> shift_amount) + Return the shifted binary representation. + + >>> arithmetic_right_shift(0, 1) + '0b00' + >>> arithmetic_right_shift(1, 1) + '0b00' + >>> arithmetic_right_shift(-1, 1) + '0b11' + >>> arithmetic_right_shift(17, 2) + '0b000100' + >>> arithmetic_right_shift(-17, 2) + '0b111011' + >>> arithmetic_right_shift(-1983, 4) + '0b111110000100' + """ + if number >= 0: # Get binary representation of positive number + binary_number = "0" + str(bin(number)).strip("-")[2:] + else: # Get binary (2's complement) representation of negative number + binary_number_length = len(bin(number)[3:]) # Find 2's complement of number + binary_number = bin(abs(number) - (1 << binary_number_length))[3:] + binary_number = ( + ("1" + "0" * (binary_number_length - len(binary_number)) + binary_number) + if number < 0 + else "0" + ) + + if shift_amount >= len(binary_number): + return "0b" + binary_number[0] * len(binary_number) + return ( + "0b" + + binary_number[0] * shift_amount + + binary_number[: len(binary_number) - shift_amount] + ) + + +if __name__ == "__main__": + import doctest + + doctest.testmod() diff --git a/bit_manipulation/binary_twos_complement.py b/bit_manipulation/binary_twos_complement.py new file mode 100644 index 000000000000..2c064ec142d7 --- /dev/null +++ b/bit_manipulation/binary_twos_complement.py @@ -0,0 +1,43 @@ +# Information on 2's complement: https://en.wikipedia.org/wiki/Two%27s_complement + + +def twos_complement(number: int) -> str: + """ + Take in a negative integer 'number'. + Return the two's complement representation of 'number'. + + >>> twos_complement(0) + '0b0' + >>> twos_complement(-1) + '0b11' + >>> twos_complement(-5) + '0b1011' + >>> twos_complement(-17) + '0b101111' + >>> twos_complement(-207) + '0b100110001' + >>> twos_complement(1) + Traceback (most recent call last): + ... + ValueError: input must be a negative integer + """ + if number > 0: + raise ValueError("input must be a negative integer") + binary_number_length = len(bin(number)[3:]) + twos_complement_number = bin(abs(number) - (1 << binary_number_length))[3:] + twos_complement_number = ( + ( + "1" + + "0" * (binary_number_length - len(twos_complement_number)) + + twos_complement_number + ) + if number < 0 + else "0" + ) + return "0b" + twos_complement_number + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From a4726ca248b3cf0470e5453ac1d9878eded38d27 Mon Sep 17 00:00:00 2001 From: Matthew Date: Tue, 23 Feb 2021 09:02:30 +0000 Subject: [PATCH 0081/1543] [mypy]Correction of all errors in the sorts directory (#4224) * [mypy] Add/fix type annotations for recursive_insertion_sort(#4085) * [mypy] Add/fix type annotations for bucket_sort(#4085) * [mypy] Reworked code for cocktail_shaker_sort so that missing return statement error is resolved(#4085) * [mypy] Add/fix type annotations for patience_sort(#4085) * [mypy] Add/fix type annotations for radix_sort(#4085) Co-authored-by: goodm2 <4qjpngu8mem8cz> --- sorts/bucket_sort.py | 3 ++- sorts/cocktail_shaker_sort.py | 3 ++- sorts/patience_sort.py | 3 ++- sorts/radix_sort.py | 2 +- sorts/recursive_insertion_sort.py | 8 +++++--- 5 files changed, 12 insertions(+), 7 deletions(-) diff --git a/sorts/bucket_sort.py b/sorts/bucket_sort.py index a0566be662e3..1ac76774f4ba 100644 --- a/sorts/bucket_sort.py +++ b/sorts/bucket_sort.py @@ -27,6 +27,7 @@ Source: https://en.wikipedia.org/wiki/Bucket_sort """ +from typing import List def bucket_sort(my_list: list) -> list: @@ -51,7 +52,7 @@ def bucket_sort(my_list: list) -> list: return [] min_value, max_value = min(my_list), max(my_list) bucket_count = int(max_value - min_value) + 1 - buckets = [[] for _ in range(bucket_count)] + buckets: List[list] = [[] for _ in range(bucket_count)] for i in range(len(my_list)): buckets[(int(my_list[i] - min_value) // bucket_count)].append(my_list[i]) diff --git a/sorts/cocktail_shaker_sort.py b/sorts/cocktail_shaker_sort.py index 42015abc5f97..b738ff31d768 100644 --- a/sorts/cocktail_shaker_sort.py +++ b/sorts/cocktail_shaker_sort.py @@ -33,7 +33,8 @@ def cocktail_shaker_sort(unsorted: list) -> list: swapped = True if not swapped: - return unsorted + break + return unsorted if __name__ == "__main__": diff --git a/sorts/patience_sort.py b/sorts/patience_sort.py index f4e35d9a0ac6..87f5a4078612 100644 --- a/sorts/patience_sort.py +++ b/sorts/patience_sort.py @@ -1,6 +1,7 @@ from bisect import bisect_left from functools import total_ordering from heapq import merge +from typing import List """ A pure Python implementation of the patience sort algorithm @@ -43,7 +44,7 @@ def patience_sort(collection: list) -> list: >>> patience_sort([-3, -17, -48]) [-48, -17, -3] """ - stacks = [] + stacks: List[Stack] = [] # sort into stacks for element in collection: new_stacks = Stack([element]) diff --git a/sorts/radix_sort.py b/sorts/radix_sort.py index 57dbbaa79076..b802b5278119 100644 --- a/sorts/radix_sort.py +++ b/sorts/radix_sort.py @@ -30,7 +30,7 @@ def radix_sort(list_of_ints: List[int]) -> List[int]: max_digit = max(list_of_ints) while placement <= max_digit: # declare and initialize empty buckets - buckets = [list() for _ in range(RADIX)] + buckets: List[list] = [list() for _ in range(RADIX)] # split list_of_ints between the buckets for i in list_of_ints: tmp = int((i / placement) % RADIX) diff --git a/sorts/recursive_insertion_sort.py b/sorts/recursive_insertion_sort.py index 66dd08157df1..89f88b4a961b 100644 --- a/sorts/recursive_insertion_sort.py +++ b/sorts/recursive_insertion_sort.py @@ -4,6 +4,8 @@ from __future__ import annotations +from typing import List + def rec_insertion_sort(collection: list, n: int): """ @@ -70,6 +72,6 @@ def insert_next(collection: list, index: int): if __name__ == "__main__": numbers = input("Enter integers separated by spaces: ") - numbers = [int(num) for num in numbers.split()] - rec_insertion_sort(numbers, len(numbers)) - print(numbers) + number_list: List[int] = [int(num) for num in numbers.split()] + rec_insertion_sort(number_list, len(number_list)) + print(number_list) From 7df393f123d4d23808de7ba367c4dbf3d76851ee Mon Sep 17 00:00:00 2001 From: algobytewise Date: Tue, 23 Feb 2021 14:45:04 +0530 Subject: [PATCH 0082/1543] mypy-fix for bezier_curve.py (#4220) --- graphics/bezier_curve.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/graphics/bezier_curve.py b/graphics/bezier_curve.py index 295ff47e8cdc..2bb764fdc916 100644 --- a/graphics/bezier_curve.py +++ b/graphics/bezier_curve.py @@ -2,7 +2,7 @@ # https://www.tutorialspoint.com/computer_graphics/computer_graphics_curves.htm from __future__ import annotations -from scipy.special import comb +from scipy.special import comb # type: ignore class BezierCurve: @@ -78,7 +78,7 @@ def plot_curve(self, step_size: float = 0.01): step_size: defines the step(s) at which to evaluate the Bezier curve. The smaller the step size, the finer the curve produced. """ - from matplotlib import pyplot as plt + from matplotlib import pyplot as plt # type: ignore to_plot_x: list[float] = [] # x coordinates of points to plot to_plot_y: list[float] = [] # y coordinates of points to plot From 4c6b92f30f1fb964b08123adce4345efd017b1fe Mon Sep 17 00:00:00 2001 From: algobytewise Date: Tue, 23 Feb 2021 17:59:56 +0530 Subject: [PATCH 0083/1543] Add Mandelbrot algorithm --- graphics/mandelbrot.py | 150 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 150 insertions(+) create mode 100644 graphics/mandelbrot.py diff --git a/graphics/mandelbrot.py b/graphics/mandelbrot.py new file mode 100644 index 000000000000..21a70a56f17b --- /dev/null +++ b/graphics/mandelbrot.py @@ -0,0 +1,150 @@ +""" +The Mandelbrot set is the set of complex numbers "c" for which the series +"z_(n+1) = z_n * z_n + c" does not diverge, i.e. remains bounded. Thus, a +complex number "c" is a member of the Mandelbrot set if, when starting with +"z_0 = 0" and applying the iteration repeatedly, the absolute value of +"z_n" remains bounded for all "n > 0". Complex numbers can be written as +"a + b*i": "a" is the real component, usually drawn on the x-axis, and "b*i" +is the imaginary component, usually drawn on the y-axis. Most visualizations +of the Mandelbrot set use a color-coding to indicate after how many steps in +the series the numbers outside the set diverge. Images of the Mandelbrot set +exhibit an elaborate and infinitely complicated boundary that reveals +progressively ever-finer recursive detail at increasing magnifications, making +the boundary of the Mandelbrot set a fractal curve. +(description adapted from https://en.wikipedia.org/wiki/Mandelbrot_set ) +(see also https://en.wikipedia.org/wiki/Plotting_algorithms_for_the_Mandelbrot_set ) +""" + + +import colorsys + +from PIL import Image # type: ignore + + +def getDistance(x: float, y: float, max_step: int) -> float: + """ + Return the relative distance (= step/max_step) after which the complex number + constituted by this x-y-pair diverges. Members of the Mandelbrot set do not + diverge so their distance is 1. + + >>> getDistance(0, 0, 50) + 1.0 + >>> getDistance(0.5, 0.5, 50) + 0.061224489795918366 + >>> getDistance(2, 0, 50) + 0.0 + """ + a = x + b = y + for step in range(max_step): + a_new = a * a - b * b + x + b = 2 * a * b + y + a = a_new + + # divergence happens for all complex number with an absolute value + # greater than 4 + if a * a + b * b > 4: + break + return step / (max_step - 1) + + +def get_black_and_white_rgb(distance: float) -> tuple: + """ + Black&white color-coding that ignores the relative distance. The Mandelbrot + set is black, everything else is white. + + >>> get_black_and_white_rgb(0) + (255, 255, 255) + >>> get_black_and_white_rgb(0.5) + (255, 255, 255) + >>> get_black_and_white_rgb(1) + (0, 0, 0) + """ + if distance == 1: + return (0, 0, 0) + else: + return (255, 255, 255) + + +def get_color_coded_rgb(distance: float) -> tuple: + """ + Color-coding taking the relative distance into account. The Mandelbrot set + is black. + + >>> get_color_coded_rgb(0) + (255, 0, 0) + >>> get_color_coded_rgb(0.5) + (0, 255, 255) + >>> get_color_coded_rgb(1) + (0, 0, 0) + """ + if distance == 1: + return (0, 0, 0) + else: + return tuple(round(i * 255) for i in colorsys.hsv_to_rgb(distance, 1, 1)) + + +def get_image( + image_width: int = 800, + image_height: int = 600, + figure_center_x: float = -0.6, + figure_center_y: float = 0, + figure_width: float = 3.2, + max_step: int = 50, + use_distance_color_coding: bool = True, +) -> Image.Image: + """ + Function to generate the image of the Mandelbrot set. Two types of coordinates + are used: image-coordinates that refer to the pixels and figure-coordinates + that refer to the complex numbers inside and outside the Mandelbrot set. The + figure-coordinates in the arguments of this function determine which section + of the Mandelbrot set is viewed. The main area of the Mandelbrot set is + roughly between "-1.5 < x < 0.5" and "-1 < y < 1" in the figure-coordinates. + + >>> get_image().load()[0,0] + (255, 0, 0) + >>> get_image(use_distance_color_coding = False).load()[0,0] + (255, 255, 255) + """ + img = Image.new("RGB", (image_width, image_height)) + pixels = img.load() + + # loop through the image-coordinates + for image_x in range(image_width): + for image_y in range(image_height): + + # determine the figure-coordinates based on the image-coordinates + figure_height = figure_width / image_width * image_height + figure_x = figure_center_x + (image_x / image_width - 0.5) * figure_width + figure_y = figure_center_y + (image_y / image_height - 0.5) * figure_height + + distance = getDistance(figure_x, figure_y, max_step) + + # color the corresponding pixel based on the selected coloring-function + if use_distance_color_coding: + pixels[image_x, image_y] = get_color_coded_rgb(distance) + else: + pixels[image_x, image_y] = get_black_and_white_rgb(distance) + + return img + + +if __name__ == "__main__": + import doctest + + doctest.testmod() + + # colored version, full figure + img = get_image() + + # uncomment for colored version, different section, zoomed in + # img = get_image(figure_center_x = -0.6, figure_center_y = -0.4, + # figure_width = 0.8) + + # uncomment for black and white version, full figure + # img = get_image(use_distance_color_coding = False) + + # uncomment to save the image + # img.save("mandelbrot.png") + + img.show() From 7bf1d622ef13bb67de262c7ad2e5061bd501880b Mon Sep 17 00:00:00 2001 From: algobytewise Date: Tue, 23 Feb 2021 18:04:42 +0530 Subject: [PATCH 0084/1543] snake_case-fix --- graphics/mandelbrot.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/graphics/mandelbrot.py b/graphics/mandelbrot.py index 21a70a56f17b..a0cb8fe88027 100644 --- a/graphics/mandelbrot.py +++ b/graphics/mandelbrot.py @@ -21,7 +21,7 @@ from PIL import Image # type: ignore -def getDistance(x: float, y: float, max_step: int) -> float: +def get_distance(x: float, y: float, max_step: int) -> float: """ Return the relative distance (= step/max_step) after which the complex number constituted by this x-y-pair diverges. Members of the Mandelbrot set do not From 71b1202d040c2ed1d57e6650a400a1a9502b15ce Mon Sep 17 00:00:00 2001 From: algobytewise Date: Tue, 23 Feb 2021 18:08:16 +0530 Subject: [PATCH 0085/1543] fixed-renaming --- graphics/mandelbrot.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/graphics/mandelbrot.py b/graphics/mandelbrot.py index a0cb8fe88027..de795bb3fc6f 100644 --- a/graphics/mandelbrot.py +++ b/graphics/mandelbrot.py @@ -27,11 +27,11 @@ def get_distance(x: float, y: float, max_step: int) -> float: constituted by this x-y-pair diverges. Members of the Mandelbrot set do not diverge so their distance is 1. - >>> getDistance(0, 0, 50) + >>> get_distance(0, 0, 50) 1.0 - >>> getDistance(0.5, 0.5, 50) + >>> get_distance(0.5, 0.5, 50) 0.061224489795918366 - >>> getDistance(2, 0, 50) + >>> get_distance(2, 0, 50) 0.0 """ a = x @@ -118,7 +118,7 @@ def get_image( figure_x = figure_center_x + (image_x / image_width - 0.5) * figure_width figure_y = figure_center_y + (image_y / image_height - 0.5) * figure_height - distance = getDistance(figure_x, figure_y, max_step) + distance = get_distance(figure_x, figure_y, max_step) # color the corresponding pixel based on the selected coloring-function if use_distance_color_coding: From 67b33a295bc0a18f9fbdeb393724835b2645dbd8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E6=9D=9C=E8=BF=9C=E8=B6=85?= Date: Fri, 26 Feb 2021 09:01:50 +0800 Subject: [PATCH 0086/1543] Optimization shell sort (#4119) * optimization * updating DIRECTORY.md Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- sorts/shell_sort.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/sorts/shell_sort.py b/sorts/shell_sort.py index 2e749e43d056..10ae9ba407ec 100644 --- a/sorts/shell_sort.py +++ b/sorts/shell_sort.py @@ -26,7 +26,8 @@ def shell_sort(collection): while j >= gap and collection[j - gap] > insert_value: collection[j] = collection[j - gap] j -= gap - collection[j] = insert_value + if j != i: + collection[j] = insert_value return collection From 4c76e3cba07b3252e97113bbf809d921058482fd Mon Sep 17 00:00:00 2001 From: algobytewise Date: Fri, 26 Feb 2021 19:00:35 +0530 Subject: [PATCH 0087/1543] [mypy] Added/fixed type annotations for "rotate_matrix.py" & "test_matrix_operation.py" (#4221) * [mypy] Added/fixed type annotations for "rotate_matrix.py" * [mypy] Added/fixed type annotations for "test_matrix_operation.py" --- matrix/rotate_matrix.py | 18 ++++++++++-------- matrix/tests/test_matrix_operation.py | 2 +- 2 files changed, 11 insertions(+), 9 deletions(-) diff --git a/matrix/rotate_matrix.py b/matrix/rotate_matrix.py index 6daf7e0cf2c5..f638597ae35d 100644 --- a/matrix/rotate_matrix.py +++ b/matrix/rotate_matrix.py @@ -5,8 +5,10 @@ https://stackoverflow.com/questions/42519/how-do-you-rotate-a-two-dimensional-array """ +from __future__ import annotations -def make_matrix(row_size: int = 4) -> [[int]]: + +def make_matrix(row_size: int = 4) -> list[list]: """ >>> make_matrix() [[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12], [13, 14, 15, 16]] @@ -23,7 +25,7 @@ def make_matrix(row_size: int = 4) -> [[int]]: return [[1 + x + y * row_size for x in range(row_size)] for y in range(row_size)] -def rotate_90(matrix: [[]]) -> [[]]: +def rotate_90(matrix: list[list]) -> list[list]: """ >>> rotate_90(make_matrix()) [[4, 8, 12, 16], [3, 7, 11, 15], [2, 6, 10, 14], [1, 5, 9, 13]] @@ -35,7 +37,7 @@ def rotate_90(matrix: [[]]) -> [[]]: # OR.. transpose(reverse_column(matrix)) -def rotate_180(matrix: [[]]) -> [[]]: +def rotate_180(matrix: list[list]) -> list[list]: """ >>> rotate_180(make_matrix()) [[16, 15, 14, 13], [12, 11, 10, 9], [8, 7, 6, 5], [4, 3, 2, 1]] @@ -47,7 +49,7 @@ def rotate_180(matrix: [[]]) -> [[]]: # OR.. reverse_column(reverse_row(matrix)) -def rotate_270(matrix: [[]]) -> [[]]: +def rotate_270(matrix: list[list]) -> list[list]: """ >>> rotate_270(make_matrix()) [[13, 9, 5, 1], [14, 10, 6, 2], [15, 11, 7, 3], [16, 12, 8, 4]] @@ -59,22 +61,22 @@ def rotate_270(matrix: [[]]) -> [[]]: # OR.. transpose(reverse_row(matrix)) -def transpose(matrix: [[]]) -> [[]]: +def transpose(matrix: list[list]) -> list[list]: matrix[:] = [list(x) for x in zip(*matrix)] return matrix -def reverse_row(matrix: [[]]) -> [[]]: +def reverse_row(matrix: list[list]) -> list[list]: matrix[:] = matrix[::-1] return matrix -def reverse_column(matrix: [[]]) -> [[]]: +def reverse_column(matrix: list[list]) -> list[list]: matrix[:] = [x[::-1] for x in matrix] return matrix -def print_matrix(matrix: [[]]) -> [[]]: +def print_matrix(matrix: list[list]) -> None: for i in matrix: print(*i) diff --git a/matrix/tests/test_matrix_operation.py b/matrix/tests/test_matrix_operation.py index 3500dfeb0641..65b35fd7e78b 100644 --- a/matrix/tests/test_matrix_operation.py +++ b/matrix/tests/test_matrix_operation.py @@ -12,7 +12,7 @@ import sys import numpy as np -import pytest +import pytest # type: ignore # Custom/local libraries from matrix import matrix_operation as matop From 0435128cc097726bf54db2b34bdda61b546ecbe2 Mon Sep 17 00:00:00 2001 From: Ayush Bisht <61404154+ayushbisht2001@users.noreply.github.com> Date: Tue, 2 Mar 2021 03:00:16 +0530 Subject: [PATCH 0088/1543] Add geometric_mean.py (#4244) * geometric_mean3 * update-GM.PY * update-AM.PY * Revert "update-AM.PY" This reverts commit 11792ec9747bec5d00e51edcd462bf87150cdba9. --- maths/series/geometric_mean.py | 75 ++++++++++++++++++++++++++++++++++ 1 file changed, 75 insertions(+) create mode 100644 maths/series/geometric_mean.py diff --git a/maths/series/geometric_mean.py b/maths/series/geometric_mean.py new file mode 100644 index 000000000000..50ae54ad6574 --- /dev/null +++ b/maths/series/geometric_mean.py @@ -0,0 +1,75 @@ +""" +GEOMETRIC MEAN : https://en.wikipedia.org/wiki/Geometric_mean +""" + + +def is_geometric_series(series: list) -> bool: + """ + checking whether the input series is geometric series or not + + >>> is_geometric_series([2, 4, 8]) + True + >>> is_geometric_series([3, 6, 12, 24]) + True + >>> is_geometric_series([1, 2, 3]) + False + >>> is_geometric_series([0, 0, 3]) + False + + """ + if len(series) == 1: + return True + try: + common_ratio = series[1] / series[0] + for index in range(len(series) - 1): + if series[index + 1] / series[index] != common_ratio: + return False + except ZeroDivisionError: + return False + return True + + +def geometric_mean(series: list) -> float: + """ + return the geometric mean of series + + >>> geometric_mean([2, 4, 8]) + 3.9999999999999996 + >>> geometric_mean([3, 6, 12, 24]) + 8.48528137423857 + >>> geometric_mean([4, 8, 16]) + 7.999999999999999 + >>> geometric_mean(4) + Traceback (most recent call last): + ... + ValueError: Input series is not valid, valid series - [2, 4, 8] + >>> geometric_mean([1, 2, 3]) + Traceback (most recent call last): + ... + ValueError: Input list is not a geometric series + >>> geometric_mean([0, 2, 3]) + Traceback (most recent call last): + ... + ValueError: Input list is not a geometric series + >>> geometric_mean([]) + Traceback (most recent call last): + ... + ValueError: Input list must be a non empty list + + """ + if not isinstance(series, list): + raise ValueError("Input series is not valid, valid series - [2, 4, 8]") + if len(series) == 0: + raise ValueError("Input list must be a non empty list") + if not is_geometric_series(series): + raise ValueError("Input list is not a geometric series") + answer = 1 + for value in series: + answer *= value + return pow(answer, 1 / len(series)) + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From a796ccf1ce2594bffdb938156987a0cbb16ee52e Mon Sep 17 00:00:00 2001 From: ulwlu Date: Tue, 2 Mar 2021 21:24:41 +0900 Subject: [PATCH 0089/1543] Add graham scan algorithm (#4205) * Add graham scan algorithm * Fix argument name p with point * Add tests in inner function * updating DIRECTORY.md * Fix graham scan for isort --profile=black Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- DIRECTORY.md | 7 ++ other/graham_scan.py | 171 +++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 178 insertions(+) create mode 100644 other/graham_scan.py diff --git a/DIRECTORY.md b/DIRECTORY.md index d487b39490ed..61e20eeb571d 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -30,6 +30,8 @@ * [Binary Count Trailing Zeros](https://github.com/TheAlgorithms/Python/blob/master/bit_manipulation/binary_count_trailing_zeros.py) * [Binary Or Operator](https://github.com/TheAlgorithms/Python/blob/master/bit_manipulation/binary_or_operator.py) * [Binary Xor Operator](https://github.com/TheAlgorithms/Python/blob/master/bit_manipulation/binary_xor_operator.py) + * [Count Number Of One Bits](https://github.com/TheAlgorithms/Python/blob/master/bit_manipulation/count_number_of_one_bits.py) + * [Reverse Bits](https://github.com/TheAlgorithms/Python/blob/master/bit_manipulation/reverse_bits.py) * [Single Bit Manipulation Operations](https://github.com/TheAlgorithms/Python/blob/master/bit_manipulation/single_bit_manipulation_operations.py) ## Blockchain @@ -122,6 +124,7 @@ * [Fenwick Tree](https://github.com/TheAlgorithms/Python/blob/master/data_structures/binary_tree/fenwick_tree.py) * [Lazy Segment Tree](https://github.com/TheAlgorithms/Python/blob/master/data_structures/binary_tree/lazy_segment_tree.py) * [Lowest Common Ancestor](https://github.com/TheAlgorithms/Python/blob/master/data_structures/binary_tree/lowest_common_ancestor.py) + * [Merge Two Binary Trees](https://github.com/TheAlgorithms/Python/blob/master/data_structures/binary_tree/merge_two_binary_trees.py) * [Non Recursive Segment Tree](https://github.com/TheAlgorithms/Python/blob/master/data_structures/binary_tree/non_recursive_segment_tree.py) * [Number Of Possible Binary Trees](https://github.com/TheAlgorithms/Python/blob/master/data_structures/binary_tree/number_of_possible_binary_trees.py) * [Red Black Tree](https://github.com/TheAlgorithms/Python/blob/master/data_structures/binary_tree/red_black_tree.py) @@ -274,6 +277,7 @@ ## Graphics * [Bezier Curve](https://github.com/TheAlgorithms/Python/blob/master/graphics/bezier_curve.py) + * [Koch Snowflake](https://github.com/TheAlgorithms/Python/blob/master/graphics/koch_snowflake.py) * [Vector3 For 2D Rendering](https://github.com/TheAlgorithms/Python/blob/master/graphics/vector3_for_2d_rendering.py) ## Graphs @@ -520,6 +524,7 @@ * [Frequency Finder](https://github.com/TheAlgorithms/Python/blob/master/other/frequency_finder.py) * [Game Of Life](https://github.com/TheAlgorithms/Python/blob/master/other/game_of_life.py) * [Gauss Easter](https://github.com/TheAlgorithms/Python/blob/master/other/gauss_easter.py) + * [Graham Scan](https://github.com/TheAlgorithms/Python/blob/master/other/graham_scan.py) * [Greedy](https://github.com/TheAlgorithms/Python/blob/master/other/greedy.py) * [Integeration By Simpson Approx](https://github.com/TheAlgorithms/Python/blob/master/other/integeration_by_simpson_approx.py) * [Largest Subarray Sum](https://github.com/TheAlgorithms/Python/blob/master/other/largest_subarray_sum.py) @@ -840,6 +845,7 @@ * [Merge Insertion Sort](https://github.com/TheAlgorithms/Python/blob/master/sorts/merge_insertion_sort.py) * [Merge Sort](https://github.com/TheAlgorithms/Python/blob/master/sorts/merge_sort.py) * [Natural Sort](https://github.com/TheAlgorithms/Python/blob/master/sorts/natural_sort.py) + * [Odd Even Sort](https://github.com/TheAlgorithms/Python/blob/master/sorts/odd_even_sort.py) * [Odd Even Transposition Parallel](https://github.com/TheAlgorithms/Python/blob/master/sorts/odd_even_transposition_parallel.py) * [Odd Even Transposition Single Threaded](https://github.com/TheAlgorithms/Python/blob/master/sorts/odd_even_transposition_single_threaded.py) * [Pancake Sort](https://github.com/TheAlgorithms/Python/blob/master/sorts/pancake_sort.py) @@ -856,6 +862,7 @@ * [Recursive Quick Sort](https://github.com/TheAlgorithms/Python/blob/master/sorts/recursive_quick_sort.py) * [Selection Sort](https://github.com/TheAlgorithms/Python/blob/master/sorts/selection_sort.py) * [Shell Sort](https://github.com/TheAlgorithms/Python/blob/master/sorts/shell_sort.py) + * [Slowsort](https://github.com/TheAlgorithms/Python/blob/master/sorts/slowsort.py) * [Stooge Sort](https://github.com/TheAlgorithms/Python/blob/master/sorts/stooge_sort.py) * [Strand Sort](https://github.com/TheAlgorithms/Python/blob/master/sorts/strand_sort.py) * [Tim Sort](https://github.com/TheAlgorithms/Python/blob/master/sorts/tim_sort.py) diff --git a/other/graham_scan.py b/other/graham_scan.py new file mode 100644 index 000000000000..67c5cd8ab9d8 --- /dev/null +++ b/other/graham_scan.py @@ -0,0 +1,171 @@ +""" +This is a pure Python implementation of the merge-insertion sort algorithm +Source: https://en.wikipedia.org/wiki/Graham_scan + +For doctests run following command: +python3 -m doctest -v graham_scan.py +""" + +from __future__ import annotations + +from collections import deque +from enum import Enum +from math import atan2, degrees +from sys import maxsize + + +def graham_scan(points: list[list[int, int]]) -> list[list[int, int]]: + """Pure implementation of graham scan algorithm in Python + + :param points: The unique points on coordinates. + :return: The points on convex hell. + + Examples: + >>> graham_scan([(9, 6), (3, 1), (0, 0), (5, 5), (5, 2), (7, 0), (3, 3), (1, 4)]) + [(0, 0), (7, 0), (9, 6), (5, 5), (1, 4)] + + >>> graham_scan([(0, 0), (1, 0), (1, 1), (0, 1)]) + [(0, 0), (1, 0), (1, 1), (0, 1)] + + >>> graham_scan([(0, 0), (1, 1), (2, 2), (3, 3), (-1, 2)]) + [(0, 0), (1, 1), (2, 2), (3, 3), (-1, 2)] + + >>> graham_scan([(-100, 20), (99, 3), (1, 10000001), (5133186, -25), (-66, -4)]) + [(5133186, -25), (1, 10000001), (-100, 20), (-66, -4)] + """ + + if len(points) <= 2: + # There is no convex hull + raise ValueError("graham_scan: argument must contain more than 3 points.") + if len(points) == 3: + return points + # find the lowest and the most left point + minidx = 0 + miny, minx = maxsize, maxsize + for i, point in enumerate(points): + x = point[0] + y = point[1] + if y < miny: + miny = y + minx = x + minidx = i + if y == miny: + if x < minx: + minx = x + minidx = i + + # remove the lowest and the most left point from points for preparing for sort + points.pop(minidx) + + def angle_comparer(point: list[int, int], minx: int, miny: int) -> float: + """Return the angle toward to point from (minx, miny) + + :param point: The target point + minx: The starting point's x + miny: The starting point's y + :return: the angle + + Examples: + >>> angle_comparer([1,1], 0, 0) + 45.0 + + >>> angle_comparer([100,1], 10, 10) + -5.710593137499642 + + >>> angle_comparer([5,5], 2, 3) + 33.690067525979785 + """ + # sort the points accorgind to the angle from the lowest and the most left point + x = point[0] + y = point[1] + angle = degrees(atan2(y - miny, x - minx)) + return angle + + sorted_points = sorted(points, key=lambda point: angle_comparer(point, minx, miny)) + # This insert actually costs complexity, + # and you should insteadly add (minx, miny) into stack later. + # I'm using insert just for easy understanding. + sorted_points.insert(0, (minx, miny)) + + # traversal from the lowest and the most left point in anti-clockwise direction + # if direction gets right, the previous point is not the convex hull. + class Direction(Enum): + left = 1 + straight = 2 + right = 3 + + def check_direction( + starting: list[int, int], via: list[int, int], target: list[int, int] + ) -> Direction: + """Return the direction toward to the line from via to target from starting + + :param starting: The starting point + via: The via point + target: The target point + :return: the Direction + + Examples: + >>> check_direction([1,1], [2,2], [3,3]) + Direction.straight + + >>> check_direction([60,1], [-50,199], [30,2]) + Direction.left + + >>> check_direction([0,0], [5,5], [10,0]) + Direction.right + """ + x0, y0 = starting + x1, y1 = via + x2, y2 = target + via_angle = degrees(atan2(y1 - y0, x1 - x0)) + if via_angle < 0: + via_angle += 360 + target_angle = degrees(atan2(y2 - y0, x2 - x0)) + if target_angle < 0: + target_angle += 360 + # t- + # \ \ + # \ v + # \| + # s + # via_angle is always lower than target_angle, if direction is left. + # If they are same, it means they are on a same line of convex hull. + if target_angle > via_angle: + return Direction.left + if target_angle == via_angle: + return Direction.straight + if target_angle < via_angle: + return Direction.right + + stack = deque() + stack.append(sorted_points[0]) + stack.append(sorted_points[1]) + stack.append(sorted_points[2]) + # In any ways, the first 3 points line are towards left. + # Because we sort them the angle from minx, miny. + current_direction = Direction.left + + for i in range(3, len(sorted_points)): + while True: + starting = stack[-2] + via = stack[-1] + target = sorted_points[i] + next_direction = check_direction(starting, via, target) + + if next_direction == Direction.left: + current_direction = Direction.left + break + if next_direction == Direction.straight: + if current_direction == Direction.left: + # We keep current_direction as left. + # Because if the straight line keeps as straight, + # we want to know if this straight line is towards left. + break + elif current_direction == Direction.right: + # If the straight line is towards right, + # every previous points on those straigh line is not convex hull. + stack.pop() + if next_direction == Direction.right: + stack.pop() + stack.append(sorted_points[i]) + return list(stack) From ecf9b8164fc3046ad5e6bbabb4abf3ac6a8a3cbb Mon Sep 17 00:00:00 2001 From: fpringle Date: Sat, 6 Mar 2021 14:29:52 +0100 Subject: [PATCH 0090/1543] Added solution for Project Euler problem 109 (#4080) * Added solution for Project Euler problem 109 * New subscriptable builtin types * updating DIRECTORY.md Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- DIRECTORY.md | 6 ++ project_euler/problem_109/__init__.py | 0 project_euler/problem_109/sol1.py | 89 +++++++++++++++++++++++++++ 3 files changed, 95 insertions(+) create mode 100644 project_euler/problem_109/__init__.py create mode 100644 project_euler/problem_109/sol1.py diff --git a/DIRECTORY.md b/DIRECTORY.md index 61e20eeb571d..dfb673dea829 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -29,6 +29,8 @@ * [Binary Count Setbits](https://github.com/TheAlgorithms/Python/blob/master/bit_manipulation/binary_count_setbits.py) * [Binary Count Trailing Zeros](https://github.com/TheAlgorithms/Python/blob/master/bit_manipulation/binary_count_trailing_zeros.py) * [Binary Or Operator](https://github.com/TheAlgorithms/Python/blob/master/bit_manipulation/binary_or_operator.py) + * [Binary Shifts](https://github.com/TheAlgorithms/Python/blob/master/bit_manipulation/binary_shifts.py) + * [Binary Twos Complement](https://github.com/TheAlgorithms/Python/blob/master/bit_manipulation/binary_twos_complement.py) * [Binary Xor Operator](https://github.com/TheAlgorithms/Python/blob/master/bit_manipulation/binary_xor_operator.py) * [Count Number Of One Bits](https://github.com/TheAlgorithms/Python/blob/master/bit_manipulation/count_number_of_one_bits.py) * [Reverse Bits](https://github.com/TheAlgorithms/Python/blob/master/bit_manipulation/reverse_bits.py) @@ -278,6 +280,7 @@ ## Graphics * [Bezier Curve](https://github.com/TheAlgorithms/Python/blob/master/graphics/bezier_curve.py) * [Koch Snowflake](https://github.com/TheAlgorithms/Python/blob/master/graphics/koch_snowflake.py) + * [Mandelbrot](https://github.com/TheAlgorithms/Python/blob/master/graphics/mandelbrot.py) * [Vector3 For 2D Rendering](https://github.com/TheAlgorithms/Python/blob/master/graphics/vector3_for_2d_rendering.py) ## Graphs @@ -469,6 +472,7 @@ * [Runge Kutta](https://github.com/TheAlgorithms/Python/blob/master/maths/runge_kutta.py) * [Segmented Sieve](https://github.com/TheAlgorithms/Python/blob/master/maths/segmented_sieve.py) * Series + * [Geometric Mean](https://github.com/TheAlgorithms/Python/blob/master/maths/series/geometric_mean.py) * [Geometric Series](https://github.com/TheAlgorithms/Python/blob/master/maths/series/geometric_series.py) * [Harmonic Series](https://github.com/TheAlgorithms/Python/blob/master/maths/series/harmonic_series.py) * [P Series](https://github.com/TheAlgorithms/Python/blob/master/maths/series/p_series.py) @@ -757,6 +761,8 @@ * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_102/sol1.py) * Problem 107 * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_107/sol1.py) + * Problem 109 + * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_109/sol1.py) * Problem 112 * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_112/sol1.py) * Problem 113 diff --git a/project_euler/problem_109/__init__.py b/project_euler/problem_109/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/project_euler/problem_109/sol1.py b/project_euler/problem_109/sol1.py new file mode 100644 index 000000000000..91c71eb9f4cb --- /dev/null +++ b/project_euler/problem_109/sol1.py @@ -0,0 +1,89 @@ +""" +In the game of darts a player throws three darts at a target board which is +split into twenty equal sized sections numbered one to twenty. + +The score of a dart is determined by the number of the region that the dart +lands in. A dart landing outside the red/green outer ring scores zero. The black +and cream regions inside this ring represent single scores. However, the red/green +outer ring and middle ring score double and treble scores respectively. + +At the centre of the board are two concentric circles called the bull region, or +bulls-eye. The outer bull is worth 25 points and the inner bull is a double, +worth 50 points. + +There are many variations of rules but in the most popular game the players will +begin with a score 301 or 501 and the first player to reduce their running total +to zero is a winner. However, it is normal to play a "doubles out" system, which +means that the player must land a double (including the double bulls-eye at the +centre of the board) on their final dart to win; any other dart that would reduce +their running total to one or lower means the score for that set of three darts +is "bust". + +When a player is able to finish on their current score it is called a "checkout" +and the highest checkout is 170: T20 T20 D25 (two treble 20s and double bull). + +There are exactly eleven distinct ways to checkout on a score of 6: + +D3 +D1 D2 +S2 D2 +D2 D1 +S4 D1 +S1 S1 D2 +S1 T1 D1 +S1 S3 D1 +D1 D1 D1 +D1 S2 D1 +S2 S2 D1 + +Note that D1 D2 is considered different to D2 D1 as they finish on different +doubles. However, the combination S1 T1 D1 is considered the same as T1 S1 D1. + +In addition we shall not include misses in considering combinations; for example, +D3 is the same as 0 D3 and 0 0 D3. + +Incredibly there are 42336 distinct ways of checking out in total. + +How many distinct ways can a player checkout with a score less than 100? + +Solution: + We first construct a list of the possible dart values, separated by type. + We then iterate through the doubles, followed by the possible 2 following throws. + If the total of these three darts is less than the given limit, we increment + the counter. +""" + +from itertools import combinations_with_replacement + + +def solution(limit: int = 100) -> int: + """ + Count the number of distinct ways a player can checkout with a score + less than limit. + >>> solution(171) + 42336 + >>> solution(50) + 12577 + """ + singles: list[int] = [x for x in range(1, 21)] + [25] + doubles: list[int] = [2 * x for x in range(1, 21)] + [50] + triples: list[int] = [3 * x for x in range(1, 21)] + all_values: list[int] = singles + doubles + triples + [0] + + num_checkouts: int = 0 + double: int + throw1: int + throw2: int + checkout_total: int + + for double in doubles: + for throw1, throw2 in combinations_with_replacement(all_values, 2): + checkout_total = double + throw1 + throw2 + if checkout_total < limit: + num_checkouts += 1 + + return num_checkouts + + +if __name__ == "__main__": + print(f"{solution() = }") From ced83bed2cda5a1a4353f3ced2871a884d380879 Mon Sep 17 00:00:00 2001 From: Ayush Bisht <61404154+ayushbisht2001@users.noreply.github.com> Date: Fri, 12 Mar 2021 12:55:54 +0530 Subject: [PATCH 0091/1543] Add arithmetic_mean.py (#4243) * arithmetic_mean * arithmetic_mean * checks * checked * Revert "checked" This reverts commit 3913a39ae2c4ee183443eed67ee7427e3d322ad4. * checks-3 * update-1 --- maths/series/arithmetic_mean.py | 66 +++++++++++++++++++++++++++++++++ 1 file changed, 66 insertions(+) create mode 100644 maths/series/arithmetic_mean.py diff --git a/maths/series/arithmetic_mean.py b/maths/series/arithmetic_mean.py new file mode 100644 index 000000000000..b5d64b63ac3f --- /dev/null +++ b/maths/series/arithmetic_mean.py @@ -0,0 +1,66 @@ +""" +ARITHMETIC MEAN : https://en.wikipedia.org/wiki/Arithmetic_mean + +""" + + +def is_arithmetic_series(series: list) -> bool: + """ + checking whether the input series is arithmetic series or not + + >>> is_arithmetic_series([2, 4, 6]) + True + >>> is_arithmetic_series([3, 6, 12, 24]) + False + >>> is_arithmetic_series([1, 2, 3]) + True + """ + if len(series) == 1: + return True + common_diff = series[1] - series[0] + for index in range(len(series) - 1): + if series[index + 1] - series[index] != common_diff: + return False + return True + + +def arithmetic_mean(series: list) -> float: + """ + return the arithmetic mean of series + + >>> arithmetic_mean([2, 4, 6]) + 4.0 + >>> arithmetic_mean([3, 6, 9, 12]) + 7.5 + >>> arithmetic_mean(4) + Traceback (most recent call last): + ... + ValueError: Input series is not valid, valid series - [2, 4, 6] + >>> arithmetic_mean([4, 8, 1]) + Traceback (most recent call last): + ... + ValueError: Input list is not an arithmetic series + >>> arithmetic_mean([1, 2, 3]) + 2.0 + >>> arithmetic_mean([]) + Traceback (most recent call last): + ... + ValueError: Input list must be a non empty list + + """ + if not isinstance(series, list): + raise ValueError("Input series is not valid, valid series - [2, 4, 6]") + if len(series) == 0: + raise ValueError("Input list must be a non empty list") + if not is_arithmetic_series(series): + raise ValueError("Input list is not an arithmetic series") + answer = 0 + for val in series: + answer += val + return answer / len(series) + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From 4f6a929503ac4ee427e85896d1354b50f465ddb4 Mon Sep 17 00:00:00 2001 From: Shantanu Joshi <42472191+shan7030@users.noreply.github.com> Date: Thu, 18 Mar 2021 13:09:53 +0530 Subject: [PATCH 0092/1543] [mypy] Add/fix type annotations for electronics algorithms (#4247) * Fix mypy errors for scheduling/first_come_first_served * Fix mypy errors for scheduling/round_robin.py * Fix mypy errors for scheduling/shortest_job_first.py * Fix isort errors * Fix mypy errors for electronics/ohms_law.py * Fix mypy errors for electronics/electric_power.py * Fix black errors --- electronics/electric_power.py | 3 ++- electronics/ohms_law.py | 3 ++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/electronics/electric_power.py b/electronics/electric_power.py index 768c3d5c7232..8f0293bd2d10 100644 --- a/electronics/electric_power.py +++ b/electronics/electric_power.py @@ -1,8 +1,9 @@ # https://en.m.wikipedia.org/wiki/Electric_power from collections import namedtuple +from typing import Tuple -def electric_power(voltage: float, current: float, power: float) -> float: +def electric_power(voltage: float, current: float, power: float) -> Tuple: """ This function can calculate any one of the three (voltage, current, power), fundamental value of electrical system. diff --git a/electronics/ohms_law.py b/electronics/ohms_law.py index a7b37b635397..c53619a10935 100644 --- a/electronics/ohms_law.py +++ b/electronics/ohms_law.py @@ -1,7 +1,8 @@ # https://en.wikipedia.org/wiki/Ohm%27s_law +from typing import Dict -def ohms_law(voltage: float, current: float, resistance: float) -> float: +def ohms_law(voltage: float, current: float, resistance: float) -> Dict[str, float]: """ Apply Ohm's Law, on any two given electrical values, which can be voltage, current, and resistance, and then in a Python dict return name/value pair of the zero value. From 8e488dd53d4fd3e09e71aa5e09098f0792626938 Mon Sep 17 00:00:00 2001 From: algobytewise Date: Fri, 19 Mar 2021 10:57:32 +0530 Subject: [PATCH 0093/1543] mypy-fix for "covid_stats_via_xpath.py" (#4233) --- web_programming/covid_stats_via_xpath.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/web_programming/covid_stats_via_xpath.py b/web_programming/covid_stats_via_xpath.py index d22ed017878c..85ea5d940d85 100644 --- a/web_programming/covid_stats_via_xpath.py +++ b/web_programming/covid_stats_via_xpath.py @@ -7,7 +7,7 @@ from collections import namedtuple import requests -from lxml import html +from lxml import html # type: ignore covid_data = namedtuple("covid_data", "cases deaths recovered") From ffa53c02a7da4ac4149a8ee1b14d4f023d2e2d78 Mon Sep 17 00:00:00 2001 From: algobytewise Date: Fri, 19 Mar 2021 15:59:54 +0530 Subject: [PATCH 0094/1543] Include mypy instructions in CONTRIBUTING.md (#4271) * reupload * Include mypy instructions * delete file * fixed trailing whitespaces * options before file path Co-authored-by: Christian Clauss --- CONTRIBUTING.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index e4c81a5ecd98..76ee1312f345 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -155,6 +155,8 @@ We want your work to be readable by others; therefore, we encourage you to note return a + b ``` + Instructions on how to install mypy can be found [here](https://github.com/python/mypy). Please use the command `mypy --ignore-missing-imports .` to test all files or `mypy --ignore-missing-imports path/to/file.py` to test a specific file. + - [__List comprehensions and generators__](https://docs.python.org/3/tutorial/datastructures.html#list-comprehensions) are preferred over the use of `lambda`, `map`, `filter`, `reduce` but the important thing is to demonstrate the power of Python in code that is easy to read and maintain. - Avoid importing external libraries for basic algorithms. Only use those libraries for complicated algorithms. From b8a19ccfea0ceca9f83912aa2f5ad2c15114416a Mon Sep 17 00:00:00 2001 From: Christian Clauss Date: Sat, 20 Mar 2021 06:09:56 +0100 Subject: [PATCH 0095/1543] GitHub Actions: fast-fail on black formatting issues (#4268) * GitHub Actions: fast-fail on black formatting issues Give fast feedback to contributors https://github.com/psf/black#github-actions * updating DIRECTORY.md Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- .github/workflows/pre-commit.yml | 1 + DIRECTORY.md | 1 + 2 files changed, 2 insertions(+) diff --git a/.github/workflows/pre-commit.yml b/.github/workflows/pre-commit.yml index 96175cfecea5..17fdad1204e9 100644 --- a/.github/workflows/pre-commit.yml +++ b/.github/workflows/pre-commit.yml @@ -14,6 +14,7 @@ jobs: ~/.cache/pip key: ${{ runner.os }}-pre-commit-${{ hashFiles('.pre-commit-config.yaml') }} - uses: actions/setup-python@v2 + - uses: psf/black@stable - name: Install pre-commit run: | python -m pip install --upgrade pip diff --git a/DIRECTORY.md b/DIRECTORY.md index dfb673dea829..136825e41976 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -472,6 +472,7 @@ * [Runge Kutta](https://github.com/TheAlgorithms/Python/blob/master/maths/runge_kutta.py) * [Segmented Sieve](https://github.com/TheAlgorithms/Python/blob/master/maths/segmented_sieve.py) * Series + * [Arithmetic Mean](https://github.com/TheAlgorithms/Python/blob/master/maths/series/arithmetic_mean.py) * [Geometric Mean](https://github.com/TheAlgorithms/Python/blob/master/maths/series/geometric_mean.py) * [Geometric Series](https://github.com/TheAlgorithms/Python/blob/master/maths/series/geometric_series.py) * [Harmonic Series](https://github.com/TheAlgorithms/Python/blob/master/maths/series/harmonic_series.py) From 987567360e53da1bef786364580e9d5c6dce3fc6 Mon Sep 17 00:00:00 2001 From: Christian Clauss Date: Sat, 20 Mar 2021 06:12:17 +0100 Subject: [PATCH 0096/1543] Update our pre-commit dependencies (#4273) * .pre-commit-config.yaml: mypy directories that pass * updating DIRECTORY.md Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- .pre-commit-config.yaml | 12 ++++++------ machine_learning/k_nearest_neighbours.py | 2 +- maths/polynomial_evaluation.py | 4 ++-- searches/hill_climbing.py | 2 +- 4 files changed, 10 insertions(+), 10 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index a3288e1c5eef..d6be7f60f714 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,6 +1,6 @@ repos: - repo: https://github.com/pre-commit/pre-commit-hooks - rev: v3.2.0 + rev: v3.4.0 hooks: - id: check-executables-have-shebangs - id: check-yaml @@ -13,17 +13,17 @@ repos: )$ - id: requirements-txt-fixer - repo: https://github.com/psf/black - rev: stable + rev: 20.8b1 hooks: - id: black - repo: https://github.com/PyCQA/isort - rev: 5.5.3 + rev: 5.7.0 hooks: - id: isort args: - --profile=black - repo: https://gitlab.com/pycqa/flake8 - rev: 3.8.3 + rev: 3.9.0 hooks: - id: flake8 args: @@ -38,11 +38,11 @@ repos: # args: # - --ignore-missing-imports - repo: https://github.com/codespell-project/codespell - rev: v1.17.1 + rev: v2.0.0 hooks: - id: codespell args: - - --ignore-words-list=ans,fo,followings,hist,iff,mater,secant,som,tim + - --ignore-words-list=ans,crate,fo,followings,hist,iff,mater,secant,som,tim - --skip="./.*,./other/dictionary.txt,./other/words,./project_euler/problem_022/p022_names.txt" - --quiet-level=2 exclude: | diff --git a/machine_learning/k_nearest_neighbours.py b/machine_learning/k_nearest_neighbours.py index e90ea09a58c1..2a90cfe5987a 100644 --- a/machine_learning/k_nearest_neighbours.py +++ b/machine_learning/k_nearest_neighbours.py @@ -32,7 +32,7 @@ def classifier(train_data, train_target, classes, point, k=5): :train_data: Set of points that are classified into two or more classes :train_target: List of classes in the order of train_data points :classes: Labels of the classes - :point: The data point that needs to be classifed + :point: The data point that needs to be classified >>> X_train = [[0, 0], [1, 0], [0, 1], [0.5, 0.5], [3, 3], [2, 3], [3, 2]] >>> y_train = [0, 0, 0, 0, 1, 1, 1] diff --git a/maths/polynomial_evaluation.py b/maths/polynomial_evaluation.py index e929a2d02972..68ff97ddd25d 100644 --- a/maths/polynomial_evaluation.py +++ b/maths/polynomial_evaluation.py @@ -5,7 +5,7 @@ def evaluate_poly(poly: Sequence[float], x: float) -> float: """Evaluate a polynomial f(x) at specified point x and return the value. Arguments: - poly -- the coeffiecients of a polynomial as an iterable in order of + poly -- the coefficients of a polynomial as an iterable in order of ascending degree x -- the point at which to evaluate the polynomial @@ -26,7 +26,7 @@ def horner(poly: Sequence[float], x: float) -> float: https://en.wikipedia.org/wiki/Horner's_method Arguments: - poly -- the coeffiecients of a polynomial as an iterable in order of + poly -- the coefficients of a polynomial as an iterable in order of ascending degree x -- the point at which to evaluate the polynomial diff --git a/searches/hill_climbing.py b/searches/hill_climbing.py index 70622ebefb4e..bb24e781a6c1 100644 --- a/searches/hill_climbing.py +++ b/searches/hill_climbing.py @@ -60,7 +60,7 @@ def get_neighbors(self): def __hash__(self): """ - hash the string represetation of the current search state. + hash the string representation of the current search state. """ return hash(str(self)) From 8f5f32bc00e9b06ec71ae4f06ed388c607af3be5 Mon Sep 17 00:00:00 2001 From: algobytewise Date: Sat, 20 Mar 2021 11:19:30 +0530 Subject: [PATCH 0097/1543] New fractals folder (#4277) * reupload * delete file * Move koch_snowflake.py to fractals-folder * Move mandelbrot.py to fractals-folder * Move sierpinski_triangle.py to fractals-folder --- {graphics => fractals}/koch_snowflake.py | 0 {graphics => fractals}/mandelbrot.py | 0 {other => fractals}/sierpinski_triangle.py | 0 3 files changed, 0 insertions(+), 0 deletions(-) rename {graphics => fractals}/koch_snowflake.py (100%) rename {graphics => fractals}/mandelbrot.py (100%) rename {other => fractals}/sierpinski_triangle.py (100%) diff --git a/graphics/koch_snowflake.py b/fractals/koch_snowflake.py similarity index 100% rename from graphics/koch_snowflake.py rename to fractals/koch_snowflake.py diff --git a/graphics/mandelbrot.py b/fractals/mandelbrot.py similarity index 100% rename from graphics/mandelbrot.py rename to fractals/mandelbrot.py diff --git a/other/sierpinski_triangle.py b/fractals/sierpinski_triangle.py similarity index 100% rename from other/sierpinski_triangle.py rename to fractals/sierpinski_triangle.py From 89a43c81e50e0b5b46da78143a239967e198c1fa Mon Sep 17 00:00:00 2001 From: fpringle Date: Sat, 20 Mar 2021 06:59:48 +0100 Subject: [PATCH 0098/1543] feat: Add solution for Project Euler Problem 121 (#4261) * Added solution for Project Euler problem 121 * Updated typing for 3.9 * updating DIRECTORY.md Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- DIRECTORY.md | 2 + project_euler/problem_121/__init__.py | 0 project_euler/problem_121/sol1.py | 64 +++++++++++++++++++++++++++ 3 files changed, 66 insertions(+) create mode 100644 project_euler/problem_121/__init__.py create mode 100644 project_euler/problem_121/sol1.py diff --git a/DIRECTORY.md b/DIRECTORY.md index 136825e41976..070119f2f674 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -772,6 +772,8 @@ * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_119/sol1.py) * Problem 120 * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_120/sol1.py) + * Problem 121 + * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_121/sol1.py) * Problem 123 * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_123/sol1.py) * Problem 125 diff --git a/project_euler/problem_121/__init__.py b/project_euler/problem_121/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/project_euler/problem_121/sol1.py b/project_euler/problem_121/sol1.py new file mode 100644 index 000000000000..93679cf4a897 --- /dev/null +++ b/project_euler/problem_121/sol1.py @@ -0,0 +1,64 @@ +""" +A bag contains one red disc and one blue disc. In a game of chance a player takes a +disc at random and its colour is noted. After each turn the disc is returned to the +bag, an extra red disc is added, and another disc is taken at random. + +The player pays £1 to play and wins if they have taken more blue discs than red +discs at the end of the game. + +If the game is played for four turns, the probability of a player winning is exactly +11/120, and so the maximum prize fund the banker should allocate for winning in this +game would be £10 before they would expect to incur a loss. Note that any payout will +be a whole number of pounds and also includes the original £1 paid to play the game, +so in the example given the player actually wins £9. + +Find the maximum prize fund that should be allocated to a single game in which +fifteen turns are played. + + +Solution: + For each 15-disc sequence of red and blue for which there are more red than blue, + we calculate the probability of that sequence and add it to the total probability + of the player winning. The inverse of this probability gives an upper bound for + the prize if the banker wants to avoid an expected loss. +""" + +from itertools import product + + +def solution(num_turns: int = 15) -> int: + """ + Find the maximum prize fund that should be allocated to a single game in which + fifteen turns are played. + >>> solution(4) + 10 + >>> solution(10) + 225 + """ + total_prob: float = 0.0 + prob: float + num_blue: int + num_red: int + ind: int + col: int + series: tuple[int, ...] + + for series in product(range(2), repeat=num_turns): + num_blue = series.count(1) + num_red = num_turns - num_blue + if num_red >= num_blue: + continue + prob = 1.0 + for ind, col in enumerate(series, 2): + if col == 0: + prob *= (ind - 1) / ind + else: + prob *= 1 / ind + + total_prob += prob + + return int(1 / total_prob) + + +if __name__ == "__main__": + print(f"{solution() = }") From 8d7ef6a7f59be10cce808484949d2ab4f0429a63 Mon Sep 17 00:00:00 2001 From: Christian Clauss Date: Sat, 20 Mar 2021 07:01:13 +0100 Subject: [PATCH 0099/1543] build.yml: Run mypy --ignore-missing-imports (#4276) * build.yml: Run mypy --ignore-missing-imports * pip install mypy * Remove failing directories * Add fractals and drop python-m --- .github/workflows/build.yml | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 9e15d18ade8e..1e8d04126002 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -20,7 +20,22 @@ jobs: - name: Install dependencies run: | python -m pip install --upgrade pip setuptools six wheel - python -m pip install pytest-cov -r requirements.txt + python -m pip install mypy pytest-cov -r requirements.txt + # FIXME: #4052 fix mypy errors in other directories and add them here + - run: mypy --ignore-missing-imports + backtracking + bit_manipulation + blockchain + boolean_algebra + cellular_automata + computer_vision + fractals + fuzzy_logic + genetic_algorithm + geodesy + knapsack + networking_flow + scheduling sorts - name: Run tests run: pytest --doctest-modules --ignore=project_euler/ --ignore=scripts/ --cov-report=term-missing:skip-covered --cov=. . - if: ${{ success() }} From 2dc2c99f2b9ec3c1fe02064720718f65f19e7292 Mon Sep 17 00:00:00 2001 From: algobytewise Date: Sat, 20 Mar 2021 11:41:10 +0530 Subject: [PATCH 0100/1543] refactor: Rename explicit_euler.py to euler_method.py (#4275) * reupload * Rename explicit_euler.py to euler_method.py * Delete file --- maths/{explicit_euler.py => euler_method.py} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename maths/{explicit_euler.py => euler_method.py} (100%) diff --git a/maths/explicit_euler.py b/maths/euler_method.py similarity index 100% rename from maths/explicit_euler.py rename to maths/euler_method.py From dd757dce383b575021c1765f84d8a40acff16799 Mon Sep 17 00:00:00 2001 From: DevanshiPatel18 <61454611+DevanshiPatel18@users.noreply.github.com> Date: Sat, 20 Mar 2021 11:48:38 +0530 Subject: [PATCH 0101/1543] feat: Add greedy_coin_change.py algorithm (#3805) * Hacktoberfest: Add greedy_coin_change.py file added the file in greedy_methods folder to implement the same method Altered the code according to the changes that were requested. * Added doctests. doctests added to the function find_minimum_change. * Added Greedy change file in Maths folder. * updating DIRECTORY.md * Deleted Greedy Method Folder * updating DIRECTORY.md * Update greedy_coin_change.py * fix: black formatting issues Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> Co-authored-by: Dhruv Manilawala --- DIRECTORY.md | 9 ++-- maths/greedy_coin_change.py | 102 ++++++++++++++++++++++++++++++++++++ 2 files changed, 108 insertions(+), 3 deletions(-) create mode 100644 maths/greedy_coin_change.py diff --git a/DIRECTORY.md b/DIRECTORY.md index 070119f2f674..2f57a9db5769 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -267,6 +267,11 @@ * Tests * [Test Send File](https://github.com/TheAlgorithms/Python/blob/master/file_transfer/tests/test_send_file.py) +## Fractals + * [Koch Snowflake](https://github.com/TheAlgorithms/Python/blob/master/fractals/koch_snowflake.py) + * [Mandelbrot](https://github.com/TheAlgorithms/Python/blob/master/fractals/mandelbrot.py) + * [Sierpinski Triangle](https://github.com/TheAlgorithms/Python/blob/master/fractals/sierpinski_triangle.py) + ## Fuzzy Logic * [Fuzzy Operations](https://github.com/TheAlgorithms/Python/blob/master/fuzzy_logic/fuzzy_operations.py) @@ -279,8 +284,6 @@ ## Graphics * [Bezier Curve](https://github.com/TheAlgorithms/Python/blob/master/graphics/bezier_curve.py) - * [Koch Snowflake](https://github.com/TheAlgorithms/Python/blob/master/graphics/koch_snowflake.py) - * [Mandelbrot](https://github.com/TheAlgorithms/Python/blob/master/graphics/mandelbrot.py) * [Vector3 For 2D Rendering](https://github.com/TheAlgorithms/Python/blob/master/graphics/vector3_for_2d_rendering.py) ## Graphs @@ -432,6 +435,7 @@ * [Gamma](https://github.com/TheAlgorithms/Python/blob/master/maths/gamma.py) * [Gaussian](https://github.com/TheAlgorithms/Python/blob/master/maths/gaussian.py) * [Greatest Common Divisor](https://github.com/TheAlgorithms/Python/blob/master/maths/greatest_common_divisor.py) + * [Greedy Coin Change](https://github.com/TheAlgorithms/Python/blob/master/maths/greedy_coin_change.py) * [Hardy Ramanujanalgo](https://github.com/TheAlgorithms/Python/blob/master/maths/hardy_ramanujanalgo.py) * [Is Square Free](https://github.com/TheAlgorithms/Python/blob/master/maths/is_square_free.py) * [Jaccard Similarity](https://github.com/TheAlgorithms/Python/blob/master/maths/jaccard_similarity.py) @@ -547,7 +551,6 @@ * [Primelib](https://github.com/TheAlgorithms/Python/blob/master/other/primelib.py) * [Scoring Algorithm](https://github.com/TheAlgorithms/Python/blob/master/other/scoring_algorithm.py) * [Sdes](https://github.com/TheAlgorithms/Python/blob/master/other/sdes.py) - * [Sierpinski Triangle](https://github.com/TheAlgorithms/Python/blob/master/other/sierpinski_triangle.py) * [Tower Of Hanoi](https://github.com/TheAlgorithms/Python/blob/master/other/tower_of_hanoi.py) * [Triplet Sum](https://github.com/TheAlgorithms/Python/blob/master/other/triplet_sum.py) * [Two Pointer](https://github.com/TheAlgorithms/Python/blob/master/other/two_pointer.py) diff --git a/maths/greedy_coin_change.py b/maths/greedy_coin_change.py new file mode 100644 index 000000000000..5a7d9e8d84ae --- /dev/null +++ b/maths/greedy_coin_change.py @@ -0,0 +1,102 @@ +""" +Test cases: +Do you want to enter your denominations ? (Y/N) :N +Enter the change you want to make in Indian Currency: 987 +Following is minimal change for 987 : +500 100 100 100 100 50 20 10 5 2 + +Do you want to enter your denominations ? (Y/N) :Y +Enter number of denomination:10 +1 +5 +10 +20 +50 +100 +200 +500 +1000 +2000 +Enter the change you want to make: 18745 +Following is minimal change for 18745 : +2000 2000 2000 2000 2000 2000 2000 2000 2000 500 200 20 20 5 + +Do you want to enter your denominations ? (Y/N) :N +Enter the change you want to make: 0 +The total value cannot be zero or negative. +Do you want to enter your denominations ? (Y/N) :N +Enter the change you want to make: -98 +The total value cannot be zero or negative. + +Do you want to enter your denominations ? (Y/N) :Y +Enter number of denomination:5 +1 +5 +100 +500 +1000 +Enter the change you want to make: 456 +Following is minimal change for 456 : +100 100 100 100 5 5 5 5 5 5 5 5 5 5 5 1 +""" + + +def find_minimum_change(denominations: list[int], value: int) -> list[int]: + """ + Find the minimum change from the given denominations and value + >>> find_minimum_change([1, 5, 10, 20, 50, 100, 200, 500, 1000,2000], 18745) + [2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 500, 200, 20, 20, 5] + >>> find_minimum_change([1, 2, 5, 10, 20, 50, 100, 500, 2000], 987) + [500, 100, 100, 100, 100, 50, 20, 10, 5, 2] + >>> find_minimum_change([1, 2, 5, 10, 20, 50, 100, 500, 2000], 0) + [] + >>> find_minimum_change([1, 2, 5, 10, 20, 50, 100, 500, 2000], -98) + [] + >>> find_minimum_change([1, 5, 100, 500, 1000], 456) + [100, 100, 100, 100, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 1] + """ + total_value = int(value) + + # Initialize Result + answer = [] + + # Traverse through all denomination + for denomination in reversed(denominations): + + # Find denominations + while int(total_value) >= int(denomination): + total_value -= int(denomination) + answer.append(denomination) # Append the "answers" array + + return answer + + +# Driver Code +if __name__ == "__main__": + + denominations = list() + value = 0 + + if ( + input("Do you want to enter your denominations ? (yY/n): ").strip().lower() + == "y" + ): + n = int(input("Enter the number of denominations you want to add: ").strip()) + + for i in range(0, n): + denominations.append(int(input(f"Denomination {i}: ").strip())) + value = input("Enter the change you want to make in Indian Currency: ").strip() + else: + # All denominations of Indian Currency if user does not enter + denominations = [1, 2, 5, 10, 20, 50, 100, 500, 2000] + value = input("Enter the change you want to make: ").strip() + + if int(value) == 0 or int(value) < 0: + print("The total value cannot be zero or negative.") + + else: + print(f"Following is minimal change for {value}: ") + answer = find_minimum_change(denominations, value) + # Print result + for i in range(len(answer)): + print(answer[i], end=" ") From 2c6f553ccb671918eb057f1eddea8e9f1b171fb1 Mon Sep 17 00:00:00 2001 From: "Novice :)" <72334601+noviicee@users.noreply.github.com> Date: Sat, 20 Mar 2021 12:02:16 +0530 Subject: [PATCH 0102/1543] [mypy] Fix type annotations for cellular_automata (#4236) * [mypy] Fix type annotations for cellullar_automata * mypy --ignore-missing-imports * mypy --ignore-missing-imports * Blank lines * Blank lines Co-authored-by: Christian Clauss --- cellular_automata/conways_game_of_life.py | 2 +- cellular_automata/one_dimensional.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/cellular_automata/conways_game_of_life.py b/cellular_automata/conways_game_of_life.py index dc349b7ac507..321baa3a3794 100644 --- a/cellular_automata/conways_game_of_life.py +++ b/cellular_automata/conways_game_of_life.py @@ -7,7 +7,7 @@ from typing import List -from PIL import Image # type: ignore +from PIL import Image # Define glider example GLIDER = [ diff --git a/cellular_automata/one_dimensional.py b/cellular_automata/one_dimensional.py index 5de2c5b994e3..da77e444502f 100644 --- a/cellular_automata/one_dimensional.py +++ b/cellular_automata/one_dimensional.py @@ -6,7 +6,7 @@ from __future__ import annotations -from PIL import Image # type: ignore +from PIL import Image # Define the first generation of cells # fmt: off From 99a42f2b5821356718fb7d011ac711a6b4a63a83 Mon Sep 17 00:00:00 2001 From: algobytewise Date: Sun, 21 Mar 2021 16:35:10 +0530 Subject: [PATCH 0103/1543] Move files to strings folder (#4283) * Move files to strings-folder * moved the file "words" back to the original folder * moved "anagram.py" also back * fix the codespell ignore-list --- .pre-commit-config.yaml | 4 ++-- {other => strings}/autocomplete_using_trie.py | 0 {other => strings}/detecting_english_programmatically.py | 0 {other => strings}/dictionary.txt | 0 {other => strings}/frequency_finder.py | 0 {other => strings}/palindrome.py | 0 {other => strings}/word_patterns.py | 0 7 files changed, 2 insertions(+), 2 deletions(-) rename {other => strings}/autocomplete_using_trie.py (100%) rename {other => strings}/detecting_english_programmatically.py (100%) rename {other => strings}/dictionary.txt (100%) rename {other => strings}/frequency_finder.py (100%) rename {other => strings}/palindrome.py (100%) rename {other => strings}/word_patterns.py (100%) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index d6be7f60f714..ee422e61a03b 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -43,11 +43,11 @@ repos: - id: codespell args: - --ignore-words-list=ans,crate,fo,followings,hist,iff,mater,secant,som,tim - - --skip="./.*,./other/dictionary.txt,./other/words,./project_euler/problem_022/p022_names.txt" + - --skip="./.*,./strings/dictionary.txt,./other/words,./project_euler/problem_022/p022_names.txt" - --quiet-level=2 exclude: | (?x)^( - other/dictionary.txt | + strings/dictionary.txt | other/words | project_euler/problem_022/p022_names.txt )$ diff --git a/other/autocomplete_using_trie.py b/strings/autocomplete_using_trie.py similarity index 100% rename from other/autocomplete_using_trie.py rename to strings/autocomplete_using_trie.py diff --git a/other/detecting_english_programmatically.py b/strings/detecting_english_programmatically.py similarity index 100% rename from other/detecting_english_programmatically.py rename to strings/detecting_english_programmatically.py diff --git a/other/dictionary.txt b/strings/dictionary.txt similarity index 100% rename from other/dictionary.txt rename to strings/dictionary.txt diff --git a/other/frequency_finder.py b/strings/frequency_finder.py similarity index 100% rename from other/frequency_finder.py rename to strings/frequency_finder.py diff --git a/other/palindrome.py b/strings/palindrome.py similarity index 100% rename from other/palindrome.py rename to strings/palindrome.py diff --git a/other/word_patterns.py b/strings/word_patterns.py similarity index 100% rename from other/word_patterns.py rename to strings/word_patterns.py From 14bcb580d55ee90614ad258ea31ef8fe4e4b5c40 Mon Sep 17 00:00:00 2001 From: Dhruv Manilawala Date: Mon, 22 Mar 2021 12:29:51 +0530 Subject: [PATCH 0104/1543] fix(mypy): Fix annotations for 13 cipher algorithms (#4278) * Initial fix for mypy errors in some cipher algorithms * fix(mypy): Update type hints * fix(mypy): Update type hints for enigma_machine2.py * Update as per the suggestion Co-authored-by: Christian Clauss Co-authored-by: Christian Clauss --- ciphers/a1z26.py | 8 ++--- ciphers/affine_cipher.py | 42 +++++++++++----------- ciphers/atbash.py | 4 +-- ciphers/base32.py | 2 +- ciphers/base85.py | 2 +- ciphers/beaufort_cipher.py | 2 +- ciphers/brute_force_caesar_cipher.py | 2 +- ciphers/cryptomath_module.py | 4 +-- ciphers/decrypt_caesar_with_chi_squared.py | 30 ++++++++-------- ciphers/diffie.py | 25 ++++++++----- ciphers/elgamal_key_generator.py | 36 +++++++++---------- ciphers/enigma_machine2.py | 24 ++++++++----- ciphers/rsa_key_generator.py | 9 +++-- 13 files changed, 101 insertions(+), 89 deletions(-) diff --git a/ciphers/a1z26.py b/ciphers/a1z26.py index 92710ec44b0e..e6684fb1e6fc 100644 --- a/ciphers/a1z26.py +++ b/ciphers/a1z26.py @@ -7,7 +7,7 @@ """ -def encode(plain: str) -> list: +def encode(plain: str) -> list[int]: """ >>> encode("myname") [13, 25, 14, 1, 13, 5] @@ -15,7 +15,7 @@ def encode(plain: str) -> list: return [ord(elem) - 96 for elem in plain] -def decode(encoded: list) -> str: +def decode(encoded: list[int]) -> str: """ >>> decode([13, 25, 14, 1, 13, 5]) 'myname' @@ -23,8 +23,8 @@ def decode(encoded: list) -> str: return "".join(chr(elem + 96) for elem in encoded) -def main(): - encoded = encode(input("->").strip().lower()) +def main() -> None: + encoded = encode(input("-> ").strip().lower()) print("Encoded: ", encoded) print("Decoded:", decode(encoded)) diff --git a/ciphers/affine_cipher.py b/ciphers/affine_cipher.py index cf8c0d5f4c1d..d3b806ba1eeb 100644 --- a/ciphers/affine_cipher.py +++ b/ciphers/affine_cipher.py @@ -9,26 +9,6 @@ ) -def main(): - """ - >>> key = get_random_key() - >>> msg = "This is a test!" - >>> decrypt_message(key, encrypt_message(key, msg)) == msg - True - """ - message = input("Enter message: ").strip() - key = int(input("Enter key [2000 - 9000]: ").strip()) - mode = input("Encrypt/Decrypt [E/D]: ").strip().lower() - - if mode.startswith("e"): - mode = "encrypt" - translated = encrypt_message(key, message) - elif mode.startswith("d"): - mode = "decrypt" - translated = decrypt_message(key, message) - print(f"\n{mode.title()}ed text: \n{translated}") - - def check_keys(keyA: int, keyB: int, mode: str) -> None: if mode == "encrypt": if keyA == 1: @@ -80,7 +60,7 @@ def decrypt_message(key: int, message: str) -> str: keyA, keyB = divmod(key, len(SYMBOLS)) check_keys(keyA, keyB, "decrypt") plainText = "" - modInverseOfkeyA = cryptomath.findModInverse(keyA, len(SYMBOLS)) + modInverseOfkeyA = cryptomath.find_mod_inverse(keyA, len(SYMBOLS)) for symbol in message: if symbol in SYMBOLS: symIndex = SYMBOLS.find(symbol) @@ -98,6 +78,26 @@ def get_random_key() -> int: return keyA * len(SYMBOLS) + keyB +def main() -> None: + """ + >>> key = get_random_key() + >>> msg = "This is a test!" + >>> decrypt_message(key, encrypt_message(key, msg)) == msg + True + """ + message = input("Enter message: ").strip() + key = int(input("Enter key [2000 - 9000]: ").strip()) + mode = input("Encrypt/Decrypt [E/D]: ").strip().lower() + + if mode.startswith("e"): + mode = "encrypt" + translated = encrypt_message(key, message) + elif mode.startswith("d"): + mode = "decrypt" + translated = decrypt_message(key, message) + print(f"\n{mode.title()}ed text: \n{translated}") + + if __name__ == "__main__": import doctest diff --git a/ciphers/atbash.py b/ciphers/atbash.py index c17d1e34f37a..5c2aea610bff 100644 --- a/ciphers/atbash.py +++ b/ciphers/atbash.py @@ -61,6 +61,6 @@ def benchmark() -> None: if __name__ == "__main__": - for sequence in ("ABCDEFGH", "123GGjj", "testStringtest", "with space"): - print(f"{sequence} encrypted in atbash: {atbash(sequence)}") + for example in ("ABCDEFGH", "123GGjj", "testStringtest", "with space"): + print(f"{example} encrypted in atbash: {atbash(example)}") benchmark() diff --git a/ciphers/base32.py b/ciphers/base32.py index 5bba8c4dd685..da289a7210e8 100644 --- a/ciphers/base32.py +++ b/ciphers/base32.py @@ -1,7 +1,7 @@ import base64 -def main(): +def main() -> None: inp = input("->") encoded = inp.encode("utf-8") # encoded the input (we need a bytes like object) b32encoded = base64.b32encode(encoded) # b32encoded the encoded string diff --git a/ciphers/base85.py b/ciphers/base85.py index ebfd0480f794..9740299b9771 100644 --- a/ciphers/base85.py +++ b/ciphers/base85.py @@ -1,7 +1,7 @@ import base64 -def main(): +def main() -> None: inp = input("->") encoded = inp.encode("utf-8") # encoded the input (we need a bytes like object) a85encoded = base64.a85encode(encoded) # a85encoded the encoded string diff --git a/ciphers/beaufort_cipher.py b/ciphers/beaufort_cipher.py index c885dec74001..8eae847a7ff7 100644 --- a/ciphers/beaufort_cipher.py +++ b/ciphers/beaufort_cipher.py @@ -66,7 +66,7 @@ def original_text(cipher_text: str, key_new: str) -> str: return or_txt -def main(): +def main() -> None: message = "THE GERMAN ATTACK" key = "SECRET" key_new = generate_key(message, key) diff --git a/ciphers/brute_force_caesar_cipher.py b/ciphers/brute_force_caesar_cipher.py index 13a165245403..8ab6e77307b4 100644 --- a/ciphers/brute_force_caesar_cipher.py +++ b/ciphers/brute_force_caesar_cipher.py @@ -43,7 +43,7 @@ def decrypt(message: str) -> None: print(f"Decryption using Key #{key}: {translated}") -def main(): +def main() -> None: message = input("Encrypted message: ") message = message.upper() decrypt(message) diff --git a/ciphers/cryptomath_module.py b/ciphers/cryptomath_module.py index ffeac1617f64..be8764ff38c3 100644 --- a/ciphers/cryptomath_module.py +++ b/ciphers/cryptomath_module.py @@ -4,9 +4,9 @@ def gcd(a: int, b: int) -> int: return b -def findModInverse(a: int, m: int) -> int: +def find_mod_inverse(a: int, m: int) -> int: if gcd(a, m) != 1: - return None + raise ValueError(f"mod inverse of {a!r} and {m!r} does not exist") u1, u2, u3 = 1, 0, a v1, v2, v3 = 0, 1, m while v3 != 0: diff --git a/ciphers/decrypt_caesar_with_chi_squared.py b/ciphers/decrypt_caesar_with_chi_squared.py index 41b4a12ba453..e7faeae73773 100644 --- a/ciphers/decrypt_caesar_with_chi_squared.py +++ b/ciphers/decrypt_caesar_with_chi_squared.py @@ -1,14 +1,14 @@ #!/usr/bin/env python3 -from typing import Tuple +from typing import Optional def decrypt_caesar_with_chi_squared( ciphertext: str, - cipher_alphabet: str = None, - frequencies_dict: str = None, + cipher_alphabet: Optional[list[str]] = None, + frequencies_dict: Optional[dict[str, float]] = None, case_sensetive: bool = False, -) -> Tuple[int, float, str]: +) -> tuple[int, float, str]: """ Basic Usage =========== @@ -123,9 +123,9 @@ def decrypt_caesar_with_chi_squared( AttributeError: 'int' object has no attribute 'lower' """ alphabet_letters = cipher_alphabet or [chr(i) for i in range(97, 123)] - frequencies_dict = frequencies_dict or {} - if frequencies_dict == {}: + # If the argument is None or the user provided an empty dictionary + if not frequencies_dict: # Frequencies of letters in the english language (how much they show up) frequencies = { "a": 0.08497, @@ -163,7 +163,7 @@ def decrypt_caesar_with_chi_squared( ciphertext = ciphertext.lower() # Chi squared statistic values - chi_squared_statistic_values = {} + chi_squared_statistic_values: dict[int, tuple[float, str]] = {} # cycle through all of the shifts for shift in range(len(alphabet_letters)): @@ -215,22 +215,22 @@ def decrypt_caesar_with_chi_squared( chi_squared_statistic += chi_letter_value # Add the data to the chi_squared_statistic_values dictionary - chi_squared_statistic_values[shift] = [ + chi_squared_statistic_values[shift] = ( chi_squared_statistic, decrypted_with_shift, - ] + ) # Get the most likely cipher by finding the cipher with the smallest chi squared # statistic - most_likely_cipher = min( + most_likely_cipher: int = min( chi_squared_statistic_values, key=chi_squared_statistic_values.get - ) + ) # type: ignore # First argument to `min` is not optional # Get all the data from the most likely cipher (key, decoded message) - most_likely_cipher_chi_squared_value = chi_squared_statistic_values[ - most_likely_cipher - ][0] - decoded_most_likely_cipher = chi_squared_statistic_values[most_likely_cipher][1] + ( + most_likely_cipher_chi_squared_value, + decoded_most_likely_cipher, + ) = chi_squared_statistic_values[most_likely_cipher] # Return the data on the most likely shift return ( diff --git a/ciphers/diffie.py b/ciphers/diffie.py index 44b12bf9d103..a23a8104afe2 100644 --- a/ciphers/diffie.py +++ b/ciphers/diffie.py @@ -1,4 +1,7 @@ -def find_primitive(n: int) -> int: +from typing import Optional + + +def find_primitive(n: int) -> Optional[int]: for r in range(1, n): li = [] for x in range(n - 1): @@ -8,18 +11,22 @@ def find_primitive(n: int) -> int: li.append(val) else: return r + return None if __name__ == "__main__": q = int(input("Enter a prime number q: ")) a = find_primitive(q) - a_private = int(input("Enter private key of A: ")) - a_public = pow(a, a_private, q) - b_private = int(input("Enter private key of B: ")) - b_public = pow(a, b_private, q) + if a is None: + print(f"Cannot find the primitive for the value: {a!r}") + else: + a_private = int(input("Enter private key of A: ")) + a_public = pow(a, a_private, q) + b_private = int(input("Enter private key of B: ")) + b_public = pow(a, b_private, q) - a_secret = pow(b_public, a_private, q) - b_secret = pow(a_public, b_private, q) + a_secret = pow(b_public, a_private, q) + b_secret = pow(a_public, b_private, q) - print("The key value generated by A is: ", a_secret) - print("The key value generated by B is: ", b_secret) + print("The key value generated by A is: ", a_secret) + print("The key value generated by B is: ", b_secret) diff --git a/ciphers/elgamal_key_generator.py b/ciphers/elgamal_key_generator.py index 52cf69074187..f557b0e0dc91 100644 --- a/ciphers/elgamal_key_generator.py +++ b/ciphers/elgamal_key_generator.py @@ -2,24 +2,18 @@ import random import sys -from . import cryptomath_module as cryptoMath -from . import rabin_miller as rabinMiller +from . import cryptomath_module as cryptomath +from . import rabin_miller min_primitive_root = 3 -def main(): - print("Making key files...") - makeKeyFiles("elgamal", 2048) - print("Key files generation successful") - - # I have written my code naively same as definition of primitive root # however every time I run this program, memory exceeded... # so I used 4.80 Algorithm in # Handbook of Applied Cryptography(CRC Press, ISBN : 0-8493-8523-7, October 1996) # and it seems to run nicely! -def primitiveRoot(p_val: int) -> int: +def primitive_root(p_val: int) -> int: print("Generating primitive root of p") while True: g = random.randrange(3, p_val) @@ -30,20 +24,20 @@ def primitiveRoot(p_val: int) -> int: return g -def generateKey(keySize: int) -> ((int, int, int, int), (int, int)): +def generate_key(key_size: int) -> tuple[tuple[int, int, int, int], tuple[int, int]]: print("Generating prime p...") - p = rabinMiller.generateLargePrime(keySize) # select large prime number. - e_1 = primitiveRoot(p) # one primitive root on modulo p. + p = rabin_miller.generateLargePrime(key_size) # select large prime number. + e_1 = primitive_root(p) # one primitive root on modulo p. d = random.randrange(3, p) # private_key -> have to be greater than 2 for safety. - e_2 = cryptoMath.findModInverse(pow(e_1, d, p), p) + e_2 = cryptomath.find_mod_inverse(pow(e_1, d, p), p) - publicKey = (keySize, e_1, e_2, p) - privateKey = (keySize, d) + public_key = (key_size, e_1, e_2, p) + private_key = (key_size, d) - return publicKey, privateKey + return public_key, private_key -def makeKeyFiles(name: str, keySize: int): +def make_key_files(name: str, keySize: int) -> None: if os.path.exists("%s_pubkey.txt" % name) or os.path.exists( "%s_privkey.txt" % name ): @@ -55,7 +49,7 @@ def makeKeyFiles(name: str, keySize: int): ) sys.exit() - publicKey, privateKey = generateKey(keySize) + publicKey, privateKey = generate_key(keySize) print("\nWriting public key to file %s_pubkey.txt..." % name) with open("%s_pubkey.txt" % name, "w") as fo: fo.write( @@ -67,5 +61,11 @@ def makeKeyFiles(name: str, keySize: int): fo.write("%d,%d" % (privateKey[0], privateKey[1])) +def main() -> None: + print("Making key files...") + make_key_files("elgamal", 2048) + print("Key files generation successful") + + if __name__ == "__main__": main() diff --git a/ciphers/enigma_machine2.py b/ciphers/enigma_machine2.py index 4344db0056fd..f4ce5a075f46 100644 --- a/ciphers/enigma_machine2.py +++ b/ciphers/enigma_machine2.py @@ -15,6 +15,10 @@ Created by TrapinchO """ +RotorPositionT = tuple[int, int, int] +RotorSelectionT = tuple[str, str, str] + + # used alphabet -------------------------- # from string.ascii_uppercase abc = "ABCDEFGHIJKLMNOPQRSTUVWXYZ" @@ -63,7 +67,9 @@ rotor9 = "KOAEGVDHXPQZMLFTYWJNBRCIUS" -def _validator(rotpos: tuple, rotsel: tuple, pb: str) -> tuple: +def _validator( + rotpos: RotorPositionT, rotsel: RotorSelectionT, pb: str +) -> tuple[RotorPositionT, RotorSelectionT, dict[str, str]]: """ Checks if the values can be used for the 'enigma' function @@ -99,12 +105,12 @@ def _validator(rotpos: tuple, rotsel: tuple, pb: str) -> tuple: ) # Validates string and returns dict - pb = _plugboard(pb) + pbdict = _plugboard(pb) - return rotpos, rotsel, pb + return rotpos, rotsel, pbdict -def _plugboard(pbstring: str) -> dict: +def _plugboard(pbstring: str) -> dict[str, str]: """ https://en.wikipedia.org/wiki/Enigma_machine#Plugboard @@ -145,17 +151,17 @@ def _plugboard(pbstring: str) -> dict: # Created the dictionary pb = {} - for i in range(0, len(pbstring) - 1, 2): - pb[pbstring[i]] = pbstring[i + 1] - pb[pbstring[i + 1]] = pbstring[i] + for j in range(0, len(pbstring) - 1, 2): + pb[pbstring[j]] = pbstring[j + 1] + pb[pbstring[j + 1]] = pbstring[j] return pb def enigma( text: str, - rotor_position: tuple, - rotor_selection: tuple = (rotor1, rotor2, rotor3), + rotor_position: RotorPositionT, + rotor_selection: RotorSelectionT = (rotor1, rotor2, rotor3), plugb: str = "", ) -> str: """ diff --git a/ciphers/rsa_key_generator.py b/ciphers/rsa_key_generator.py index e456d9d9f6f1..584066d8970f 100644 --- a/ciphers/rsa_key_generator.py +++ b/ciphers/rsa_key_generator.py @@ -1,19 +1,18 @@ import os import random import sys -from typing import Tuple from . import cryptomath_module as cryptoMath from . import rabin_miller as rabinMiller -def main(): +def main() -> None: print("Making key files...") makeKeyFiles("rsa", 1024) print("Key files generation successful.") -def generateKey(keySize: int) -> Tuple[Tuple[int, int], Tuple[int, int]]: +def generateKey(keySize: int) -> tuple[tuple[int, int], tuple[int, int]]: print("Generating prime p...") p = rabinMiller.generateLargePrime(keySize) print("Generating prime q...") @@ -27,14 +26,14 @@ def generateKey(keySize: int) -> Tuple[Tuple[int, int], Tuple[int, int]]: break print("Calculating d that is mod inverse of e...") - d = cryptoMath.findModInverse(e, (p - 1) * (q - 1)) + d = cryptoMath.find_mod_inverse(e, (p - 1) * (q - 1)) publicKey = (n, e) privateKey = (n, d) return (publicKey, privateKey) -def makeKeyFiles(name: int, keySize: int) -> None: +def makeKeyFiles(name: str, keySize: int) -> None: if os.path.exists("%s_pubkey.txt" % (name)) or os.path.exists( "%s_privkey.txt" % (name) ): From 8d51c2cfd917b85b95de4488ebb157549b42af3c Mon Sep 17 00:00:00 2001 From: algobytewise Date: Mon, 22 Mar 2021 15:22:26 +0530 Subject: [PATCH 0105/1543] move-files-and-2-renames (#4285) --- {other => maths}/binary_exponentiation_2.py | 0 .../binary_exponentiation_3.py | 0 {other => maths}/euclidean_gcd.py | 0 .../integration_by_simpson_approx.py | 0 {other => maths}/largest_subarray_sum.py | 0 {other => maths}/max_sum_sliding_window.py | 90 ++++----- {other => maths}/median_of_two_arrays.py | 0 {other => maths}/primelib.py | 0 {other => maths}/triplet_sum.py | 178 +++++++++--------- {other => maths}/two_pointer.py | 0 {other => maths}/two_sum.py | 0 11 files changed, 134 insertions(+), 134 deletions(-) rename {other => maths}/binary_exponentiation_2.py (100%) rename other/binary_exponentiation.py => maths/binary_exponentiation_3.py (100%) rename {other => maths}/euclidean_gcd.py (100%) rename other/integeration_by_simpson_approx.py => maths/integration_by_simpson_approx.py (100%) rename {other => maths}/largest_subarray_sum.py (100%) rename {other => maths}/max_sum_sliding_window.py (96%) rename {other => maths}/median_of_two_arrays.py (100%) rename {other => maths}/primelib.py (100%) rename {other => maths}/triplet_sum.py (96%) rename {other => maths}/two_pointer.py (100%) rename {other => maths}/two_sum.py (100%) diff --git a/other/binary_exponentiation_2.py b/maths/binary_exponentiation_2.py similarity index 100% rename from other/binary_exponentiation_2.py rename to maths/binary_exponentiation_2.py diff --git a/other/binary_exponentiation.py b/maths/binary_exponentiation_3.py similarity index 100% rename from other/binary_exponentiation.py rename to maths/binary_exponentiation_3.py diff --git a/other/euclidean_gcd.py b/maths/euclidean_gcd.py similarity index 100% rename from other/euclidean_gcd.py rename to maths/euclidean_gcd.py diff --git a/other/integeration_by_simpson_approx.py b/maths/integration_by_simpson_approx.py similarity index 100% rename from other/integeration_by_simpson_approx.py rename to maths/integration_by_simpson_approx.py diff --git a/other/largest_subarray_sum.py b/maths/largest_subarray_sum.py similarity index 100% rename from other/largest_subarray_sum.py rename to maths/largest_subarray_sum.py diff --git a/other/max_sum_sliding_window.py b/maths/max_sum_sliding_window.py similarity index 96% rename from other/max_sum_sliding_window.py rename to maths/max_sum_sliding_window.py index 4be7d786f215..593cb5c8bd67 100644 --- a/other/max_sum_sliding_window.py +++ b/maths/max_sum_sliding_window.py @@ -1,45 +1,45 @@ -""" -Given an array of integer elements and an integer 'k', we are required to find the -maximum sum of 'k' consecutive elements in the array. - -Instead of using a nested for loop, in a Brute force approach we will use a technique -called 'Window sliding technique' where the nested loops can be converted to a single -loop to reduce time complexity. -""" -from typing import List - - -def max_sum_in_array(array: List[int], k: int) -> int: - """ - Returns the maximum sum of k consecutive elements - >>> arr = [1, 4, 2, 10, 2, 3, 1, 0, 20] - >>> k = 4 - >>> max_sum_in_array(arr, k) - 24 - >>> k = 10 - >>> max_sum_in_array(arr,k) - Traceback (most recent call last): - ... - ValueError: Invalid Input - >>> arr = [1, 4, 2, 10, 2, 13, 1, 0, 2] - >>> k = 4 - >>> max_sum_in_array(arr, k) - 27 - """ - if len(array) < k or k < 0: - raise ValueError("Invalid Input") - max_sum = current_sum = sum(array[:k]) - for i in range(len(array) - k): - current_sum = current_sum - array[i] + array[i + k] - max_sum = max(max_sum, current_sum) - return max_sum - - -if __name__ == "__main__": - from doctest import testmod - from random import randint - - testmod() - array = [randint(-1000, 1000) for i in range(100)] - k = randint(0, 110) - print(f"The maximum sum of {k} consecutive elements is {max_sum_in_array(array,k)}") +""" +Given an array of integer elements and an integer 'k', we are required to find the +maximum sum of 'k' consecutive elements in the array. + +Instead of using a nested for loop, in a Brute force approach we will use a technique +called 'Window sliding technique' where the nested loops can be converted to a single +loop to reduce time complexity. +""" +from typing import List + + +def max_sum_in_array(array: List[int], k: int) -> int: + """ + Returns the maximum sum of k consecutive elements + >>> arr = [1, 4, 2, 10, 2, 3, 1, 0, 20] + >>> k = 4 + >>> max_sum_in_array(arr, k) + 24 + >>> k = 10 + >>> max_sum_in_array(arr,k) + Traceback (most recent call last): + ... + ValueError: Invalid Input + >>> arr = [1, 4, 2, 10, 2, 13, 1, 0, 2] + >>> k = 4 + >>> max_sum_in_array(arr, k) + 27 + """ + if len(array) < k or k < 0: + raise ValueError("Invalid Input") + max_sum = current_sum = sum(array[:k]) + for i in range(len(array) - k): + current_sum = current_sum - array[i] + array[i + k] + max_sum = max(max_sum, current_sum) + return max_sum + + +if __name__ == "__main__": + from doctest import testmod + from random import randint + + testmod() + array = [randint(-1000, 1000) for i in range(100)] + k = randint(0, 110) + print(f"The maximum sum of {k} consecutive elements is {max_sum_in_array(array,k)}") diff --git a/other/median_of_two_arrays.py b/maths/median_of_two_arrays.py similarity index 100% rename from other/median_of_two_arrays.py rename to maths/median_of_two_arrays.py diff --git a/other/primelib.py b/maths/primelib.py similarity index 100% rename from other/primelib.py rename to maths/primelib.py diff --git a/other/triplet_sum.py b/maths/triplet_sum.py similarity index 96% rename from other/triplet_sum.py rename to maths/triplet_sum.py index 0e78bb52bb72..22fab17d30c2 100644 --- a/other/triplet_sum.py +++ b/maths/triplet_sum.py @@ -1,89 +1,89 @@ -""" -Given an array of integers and another integer target, -we are required to find a triplet from the array such that it's sum is equal to -the target. -""" -from __future__ import annotations - -from itertools import permutations -from random import randint -from timeit import repeat - - -def make_dataset() -> tuple[list[int], int]: - arr = [randint(-1000, 1000) for i in range(10)] - r = randint(-5000, 5000) - return (arr, r) - - -dataset = make_dataset() - - -def triplet_sum1(arr: list[int], target: int) -> tuple[int, int, int]: - """ - Returns a triplet in the array with sum equal to target, - else (0, 0, 0). - >>> triplet_sum1([13, 29, 7, 23, 5], 35) - (5, 7, 23) - >>> triplet_sum1([37, 9, 19, 50, 44], 65) - (9, 19, 37) - >>> arr = [6, 47, 27, 1, 15] - >>> target = 11 - >>> triplet_sum1(arr, target) - (0, 0, 0) - """ - for triplet in permutations(arr, 3): - if sum(triplet) == target: - return tuple(sorted(triplet)) - return (0, 0, 0) - - -def triplet_sum2(arr: list[int], target: int) -> tuple[int, int, int]: - """ - Returns a triplet in the array with sum equal to target, - else (0, 0, 0). - >>> triplet_sum2([13, 29, 7, 23, 5], 35) - (5, 7, 23) - >>> triplet_sum2([37, 9, 19, 50, 44], 65) - (9, 19, 37) - >>> arr = [6, 47, 27, 1, 15] - >>> target = 11 - >>> triplet_sum2(arr, target) - (0, 0, 0) - """ - arr.sort() - n = len(arr) - for i in range(n - 1): - left, right = i + 1, n - 1 - while left < right: - if arr[i] + arr[left] + arr[right] == target: - return (arr[i], arr[left], arr[right]) - elif arr[i] + arr[left] + arr[right] < target: - left += 1 - elif arr[i] + arr[left] + arr[right] > target: - right -= 1 - return (0, 0, 0) - - -def solution_times() -> tuple[float, float]: - setup_code = """ -from __main__ import dataset, triplet_sum1, triplet_sum2 -""" - test_code1 = """ -triplet_sum1(*dataset) -""" - test_code2 = """ -triplet_sum2(*dataset) -""" - times1 = repeat(setup=setup_code, stmt=test_code1, repeat=5, number=10000) - times2 = repeat(setup=setup_code, stmt=test_code2, repeat=5, number=10000) - return (min(times1), min(times2)) - - -if __name__ == "__main__": - from doctest import testmod - - testmod() - times = solution_times() - print(f"The time for naive implementation is {times[0]}.") - print(f"The time for optimized implementation is {times[1]}.") +""" +Given an array of integers and another integer target, +we are required to find a triplet from the array such that it's sum is equal to +the target. +""" +from __future__ import annotations + +from itertools import permutations +from random import randint +from timeit import repeat + + +def make_dataset() -> tuple[list[int], int]: + arr = [randint(-1000, 1000) for i in range(10)] + r = randint(-5000, 5000) + return (arr, r) + + +dataset = make_dataset() + + +def triplet_sum1(arr: list[int], target: int) -> tuple[int, int, int]: + """ + Returns a triplet in the array with sum equal to target, + else (0, 0, 0). + >>> triplet_sum1([13, 29, 7, 23, 5], 35) + (5, 7, 23) + >>> triplet_sum1([37, 9, 19, 50, 44], 65) + (9, 19, 37) + >>> arr = [6, 47, 27, 1, 15] + >>> target = 11 + >>> triplet_sum1(arr, target) + (0, 0, 0) + """ + for triplet in permutations(arr, 3): + if sum(triplet) == target: + return tuple(sorted(triplet)) + return (0, 0, 0) + + +def triplet_sum2(arr: list[int], target: int) -> tuple[int, int, int]: + """ + Returns a triplet in the array with sum equal to target, + else (0, 0, 0). + >>> triplet_sum2([13, 29, 7, 23, 5], 35) + (5, 7, 23) + >>> triplet_sum2([37, 9, 19, 50, 44], 65) + (9, 19, 37) + >>> arr = [6, 47, 27, 1, 15] + >>> target = 11 + >>> triplet_sum2(arr, target) + (0, 0, 0) + """ + arr.sort() + n = len(arr) + for i in range(n - 1): + left, right = i + 1, n - 1 + while left < right: + if arr[i] + arr[left] + arr[right] == target: + return (arr[i], arr[left], arr[right]) + elif arr[i] + arr[left] + arr[right] < target: + left += 1 + elif arr[i] + arr[left] + arr[right] > target: + right -= 1 + return (0, 0, 0) + + +def solution_times() -> tuple[float, float]: + setup_code = """ +from __main__ import dataset, triplet_sum1, triplet_sum2 +""" + test_code1 = """ +triplet_sum1(*dataset) +""" + test_code2 = """ +triplet_sum2(*dataset) +""" + times1 = repeat(setup=setup_code, stmt=test_code1, repeat=5, number=10000) + times2 = repeat(setup=setup_code, stmt=test_code2, repeat=5, number=10000) + return (min(times1), min(times2)) + + +if __name__ == "__main__": + from doctest import testmod + + testmod() + times = solution_times() + print(f"The time for naive implementation is {times[0]}.") + print(f"The time for optimized implementation is {times[1]}.") diff --git a/other/two_pointer.py b/maths/two_pointer.py similarity index 100% rename from other/two_pointer.py rename to maths/two_pointer.py diff --git a/other/two_sum.py b/maths/two_sum.py similarity index 100% rename from other/two_sum.py rename to maths/two_sum.py From ce99859ad54cd4f0a43a78fdc6cf41cb3e336dc2 Mon Sep 17 00:00:00 2001 From: algobytewise Date: Mon, 22 Mar 2021 15:24:04 +0530 Subject: [PATCH 0106/1543] Move files to various folders (#4286) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Move files to cellular_automata * Rename other/davis–putnam–logemann–loveland.py to backtracking/davis–putnam–logemann–loveland.py * Rename other/markov_chain.py to graphs/markov_chain.py * undid rename: need to fix mypy first --- {other => cellular_automata}/game_of_life.py | 0 {other => graphs}/markov_chain.py | 0 2 files changed, 0 insertions(+), 0 deletions(-) rename {other => cellular_automata}/game_of_life.py (100%) rename {other => graphs}/markov_chain.py (100%) diff --git a/other/game_of_life.py b/cellular_automata/game_of_life.py similarity index 100% rename from other/game_of_life.py rename to cellular_automata/game_of_life.py diff --git a/other/markov_chain.py b/graphs/markov_chain.py similarity index 100% rename from other/markov_chain.py rename to graphs/markov_chain.py From 0ee8f792e3105cda122ecb8696575eb0354b09bb Mon Sep 17 00:00:00 2001 From: algobytewise Date: Mon, 22 Mar 2021 16:10:23 +0530 Subject: [PATCH 0107/1543] Moved "other/anagrams.py" to the string folder (#4289) * move&rename, changed code accordingly * adjusted codespell ignore-list --- .pre-commit-config.yaml | 4 ++-- {other => strings}/anagrams.py | 2 +- other/words => strings/words.txt | 0 3 files changed, 3 insertions(+), 3 deletions(-) rename {other => strings}/anagrams.py (95%) rename other/words => strings/words.txt (100%) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index ee422e61a03b..b48da86ee57d 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -43,12 +43,12 @@ repos: - id: codespell args: - --ignore-words-list=ans,crate,fo,followings,hist,iff,mater,secant,som,tim - - --skip="./.*,./strings/dictionary.txt,./other/words,./project_euler/problem_022/p022_names.txt" + - --skip="./.*,./strings/dictionary.txt,./strings/words.txt,./project_euler/problem_022/p022_names.txt" - --quiet-level=2 exclude: | (?x)^( strings/dictionary.txt | - other/words | + strings/words.txt | project_euler/problem_022/p022_names.txt )$ - repo: local diff --git a/other/anagrams.py b/strings/anagrams.py similarity index 95% rename from other/anagrams.py rename to strings/anagrams.py index 0be013d5bc47..1a7c675d6719 100644 --- a/other/anagrams.py +++ b/strings/anagrams.py @@ -6,7 +6,7 @@ start_time = time.time() print("creating word list...") path = os.path.split(os.path.realpath(__file__)) -with open(path[0] + "/words") as f: +with open(path[0] + "/words.txt") as f: word_list = sorted(list({word.strip().lower() for word in f})) diff --git a/other/words b/strings/words.txt similarity index 100% rename from other/words rename to strings/words.txt From a8db5d4b93410d8ad3c281d1edd9ce26c6e087e0 Mon Sep 17 00:00:00 2001 From: algobytewise Date: Mon, 22 Mar 2021 23:54:05 +0530 Subject: [PATCH 0108/1543] [mypy] fix compression folder (#4290) * Update lempel_ziv.py * Update build.yml * updating DIRECTORY.md * fix doctest in 2_hidden_layers_neural_network.py * one more doctest * simplified tests Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- .github/workflows/build.yml | 1 + DIRECTORY.md | 40 +++++++++---------- compression/lempel_ziv.py | 2 +- .../2_hidden_layers_neural_network.py | 8 ++-- 4 files changed, 26 insertions(+), 25 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 1e8d04126002..c85b82330c67 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -28,6 +28,7 @@ jobs: blockchain boolean_algebra cellular_automata + compression computer_vision fractals fuzzy_logic diff --git a/DIRECTORY.md b/DIRECTORY.md index 2f57a9db5769..f5297db05fe1 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -46,6 +46,7 @@ ## Cellular Automata * [Conways Game Of Life](https://github.com/TheAlgorithms/Python/blob/master/cellular_automata/conways_game_of_life.py) + * [Game Of Life](https://github.com/TheAlgorithms/Python/blob/master/cellular_automata/game_of_life.py) * [One Dimensional](https://github.com/TheAlgorithms/Python/blob/master/cellular_automata/one_dimensional.py) ## Ciphers @@ -322,6 +323,7 @@ * [Kahns Algorithm Long](https://github.com/TheAlgorithms/Python/blob/master/graphs/kahns_algorithm_long.py) * [Kahns Algorithm Topo](https://github.com/TheAlgorithms/Python/blob/master/graphs/kahns_algorithm_topo.py) * [Karger](https://github.com/TheAlgorithms/Python/blob/master/graphs/karger.py) + * [Markov Chain](https://github.com/TheAlgorithms/Python/blob/master/graphs/markov_chain.py) * [Minimum Spanning Tree Boruvka](https://github.com/TheAlgorithms/Python/blob/master/graphs/minimum_spanning_tree_boruvka.py) * [Minimum Spanning Tree Kruskal](https://github.com/TheAlgorithms/Python/blob/master/graphs/minimum_spanning_tree_kruskal.py) * [Minimum Spanning Tree Kruskal2](https://github.com/TheAlgorithms/Python/blob/master/graphs/minimum_spanning_tree_kruskal2.py) @@ -407,6 +409,8 @@ * [Basic Maths](https://github.com/TheAlgorithms/Python/blob/master/maths/basic_maths.py) * [Binary Exp Mod](https://github.com/TheAlgorithms/Python/blob/master/maths/binary_exp_mod.py) * [Binary Exponentiation](https://github.com/TheAlgorithms/Python/blob/master/maths/binary_exponentiation.py) + * [Binary Exponentiation 2](https://github.com/TheAlgorithms/Python/blob/master/maths/binary_exponentiation_2.py) + * [Binary Exponentiation 3](https://github.com/TheAlgorithms/Python/blob/master/maths/binary_exponentiation_3.py) * [Binomial Coefficient](https://github.com/TheAlgorithms/Python/blob/master/maths/binomial_coefficient.py) * [Binomial Distribution](https://github.com/TheAlgorithms/Python/blob/master/maths/binomial_distribution.py) * [Bisection](https://github.com/TheAlgorithms/Python/blob/master/maths/bisection.py) @@ -417,8 +421,9 @@ * [Decimal Isolate](https://github.com/TheAlgorithms/Python/blob/master/maths/decimal_isolate.py) * [Entropy](https://github.com/TheAlgorithms/Python/blob/master/maths/entropy.py) * [Euclidean Distance](https://github.com/TheAlgorithms/Python/blob/master/maths/euclidean_distance.py) + * [Euclidean Gcd](https://github.com/TheAlgorithms/Python/blob/master/maths/euclidean_gcd.py) + * [Euler Method](https://github.com/TheAlgorithms/Python/blob/master/maths/euler_method.py) * [Eulers Totient](https://github.com/TheAlgorithms/Python/blob/master/maths/eulers_totient.py) - * [Explicit Euler](https://github.com/TheAlgorithms/Python/blob/master/maths/explicit_euler.py) * [Extended Euclidean Algorithm](https://github.com/TheAlgorithms/Python/blob/master/maths/extended_euclidean_algorithm.py) * [Factorial Iterative](https://github.com/TheAlgorithms/Python/blob/master/maths/factorial_iterative.py) * [Factorial Python](https://github.com/TheAlgorithms/Python/blob/master/maths/factorial_python.py) @@ -437,6 +442,7 @@ * [Greatest Common Divisor](https://github.com/TheAlgorithms/Python/blob/master/maths/greatest_common_divisor.py) * [Greedy Coin Change](https://github.com/TheAlgorithms/Python/blob/master/maths/greedy_coin_change.py) * [Hardy Ramanujanalgo](https://github.com/TheAlgorithms/Python/blob/master/maths/hardy_ramanujanalgo.py) + * [Integration By Simpson Approx](https://github.com/TheAlgorithms/Python/blob/master/maths/integration_by_simpson_approx.py) * [Is Square Free](https://github.com/TheAlgorithms/Python/blob/master/maths/is_square_free.py) * [Jaccard Similarity](https://github.com/TheAlgorithms/Python/blob/master/maths/jaccard_similarity.py) * [Kadanes](https://github.com/TheAlgorithms/Python/blob/master/maths/kadanes.py) @@ -444,11 +450,14 @@ * [Krishnamurthy Number](https://github.com/TheAlgorithms/Python/blob/master/maths/krishnamurthy_number.py) * [Kth Lexicographic Permutation](https://github.com/TheAlgorithms/Python/blob/master/maths/kth_lexicographic_permutation.py) * [Largest Of Very Large Numbers](https://github.com/TheAlgorithms/Python/blob/master/maths/largest_of_very_large_numbers.py) + * [Largest Subarray Sum](https://github.com/TheAlgorithms/Python/blob/master/maths/largest_subarray_sum.py) * [Least Common Multiple](https://github.com/TheAlgorithms/Python/blob/master/maths/least_common_multiple.py) * [Line Length](https://github.com/TheAlgorithms/Python/blob/master/maths/line_length.py) * [Lucas Lehmer Primality Test](https://github.com/TheAlgorithms/Python/blob/master/maths/lucas_lehmer_primality_test.py) * [Lucas Series](https://github.com/TheAlgorithms/Python/blob/master/maths/lucas_series.py) * [Matrix Exponentiation](https://github.com/TheAlgorithms/Python/blob/master/maths/matrix_exponentiation.py) + * [Max Sum Sliding Window](https://github.com/TheAlgorithms/Python/blob/master/maths/max_sum_sliding_window.py) + * [Median Of Two Arrays](https://github.com/TheAlgorithms/Python/blob/master/maths/median_of_two_arrays.py) * [Miller Rabin](https://github.com/TheAlgorithms/Python/blob/master/maths/miller_rabin.py) * [Mobius Function](https://github.com/TheAlgorithms/Python/blob/master/maths/mobius_function.py) * [Modular Exponential](https://github.com/TheAlgorithms/Python/blob/master/maths/modular_exponential.py) @@ -467,6 +476,7 @@ * [Prime Factors](https://github.com/TheAlgorithms/Python/blob/master/maths/prime_factors.py) * [Prime Numbers](https://github.com/TheAlgorithms/Python/blob/master/maths/prime_numbers.py) * [Prime Sieve Eratosthenes](https://github.com/TheAlgorithms/Python/blob/master/maths/prime_sieve_eratosthenes.py) + * [Primelib](https://github.com/TheAlgorithms/Python/blob/master/maths/primelib.py) * [Pythagoras](https://github.com/TheAlgorithms/Python/blob/master/maths/pythagoras.py) * [Qr Decomposition](https://github.com/TheAlgorithms/Python/blob/master/maths/qr_decomposition.py) * [Quadratic Equations Complex Numbers](https://github.com/TheAlgorithms/Python/blob/master/maths/quadratic_equations_complex_numbers.py) @@ -491,6 +501,9 @@ * [Sum Of Geometric Progression](https://github.com/TheAlgorithms/Python/blob/master/maths/sum_of_geometric_progression.py) * [Test Prime Check](https://github.com/TheAlgorithms/Python/blob/master/maths/test_prime_check.py) * [Trapezoidal Rule](https://github.com/TheAlgorithms/Python/blob/master/maths/trapezoidal_rule.py) + * [Triplet Sum](https://github.com/TheAlgorithms/Python/blob/master/maths/triplet_sum.py) + * [Two Pointer](https://github.com/TheAlgorithms/Python/blob/master/maths/two_pointer.py) + * [Two Sum](https://github.com/TheAlgorithms/Python/blob/master/maths/two_sum.py) * [Ugly Numbers](https://github.com/TheAlgorithms/Python/blob/master/maths/ugly_numbers.py) * [Volume](https://github.com/TheAlgorithms/Python/blob/master/maths/volume.py) * [Zellers Congruence](https://github.com/TheAlgorithms/Python/blob/master/maths/zellers_congruence.py) @@ -520,42 +533,23 @@ ## Other * [Activity Selection](https://github.com/TheAlgorithms/Python/blob/master/other/activity_selection.py) - * [Anagrams](https://github.com/TheAlgorithms/Python/blob/master/other/anagrams.py) - * [Autocomplete Using Trie](https://github.com/TheAlgorithms/Python/blob/master/other/autocomplete_using_trie.py) - * [Binary Exponentiation](https://github.com/TheAlgorithms/Python/blob/master/other/binary_exponentiation.py) - * [Binary Exponentiation 2](https://github.com/TheAlgorithms/Python/blob/master/other/binary_exponentiation_2.py) * [Davis–Putnam–Logemann–Loveland](https://github.com/TheAlgorithms/Python/blob/master/other/davis–putnam–logemann–loveland.py) - * [Detecting English Programmatically](https://github.com/TheAlgorithms/Python/blob/master/other/detecting_english_programmatically.py) * [Dijkstra Bankers Algorithm](https://github.com/TheAlgorithms/Python/blob/master/other/dijkstra_bankers_algorithm.py) * [Doomsday](https://github.com/TheAlgorithms/Python/blob/master/other/doomsday.py) - * [Euclidean Gcd](https://github.com/TheAlgorithms/Python/blob/master/other/euclidean_gcd.py) * [Fischer Yates Shuffle](https://github.com/TheAlgorithms/Python/blob/master/other/fischer_yates_shuffle.py) - * [Frequency Finder](https://github.com/TheAlgorithms/Python/blob/master/other/frequency_finder.py) - * [Game Of Life](https://github.com/TheAlgorithms/Python/blob/master/other/game_of_life.py) * [Gauss Easter](https://github.com/TheAlgorithms/Python/blob/master/other/gauss_easter.py) * [Graham Scan](https://github.com/TheAlgorithms/Python/blob/master/other/graham_scan.py) * [Greedy](https://github.com/TheAlgorithms/Python/blob/master/other/greedy.py) - * [Integeration By Simpson Approx](https://github.com/TheAlgorithms/Python/blob/master/other/integeration_by_simpson_approx.py) - * [Largest Subarray Sum](https://github.com/TheAlgorithms/Python/blob/master/other/largest_subarray_sum.py) * [Least Recently Used](https://github.com/TheAlgorithms/Python/blob/master/other/least_recently_used.py) * [Lfu Cache](https://github.com/TheAlgorithms/Python/blob/master/other/lfu_cache.py) * [Linear Congruential Generator](https://github.com/TheAlgorithms/Python/blob/master/other/linear_congruential_generator.py) * [Lru Cache](https://github.com/TheAlgorithms/Python/blob/master/other/lru_cache.py) * [Magicdiamondpattern](https://github.com/TheAlgorithms/Python/blob/master/other/magicdiamondpattern.py) - * [Markov Chain](https://github.com/TheAlgorithms/Python/blob/master/other/markov_chain.py) - * [Max Sum Sliding Window](https://github.com/TheAlgorithms/Python/blob/master/other/max_sum_sliding_window.py) - * [Median Of Two Arrays](https://github.com/TheAlgorithms/Python/blob/master/other/median_of_two_arrays.py) * [Nested Brackets](https://github.com/TheAlgorithms/Python/blob/master/other/nested_brackets.py) - * [Palindrome](https://github.com/TheAlgorithms/Python/blob/master/other/palindrome.py) * [Password Generator](https://github.com/TheAlgorithms/Python/blob/master/other/password_generator.py) - * [Primelib](https://github.com/TheAlgorithms/Python/blob/master/other/primelib.py) * [Scoring Algorithm](https://github.com/TheAlgorithms/Python/blob/master/other/scoring_algorithm.py) * [Sdes](https://github.com/TheAlgorithms/Python/blob/master/other/sdes.py) * [Tower Of Hanoi](https://github.com/TheAlgorithms/Python/blob/master/other/tower_of_hanoi.py) - * [Triplet Sum](https://github.com/TheAlgorithms/Python/blob/master/other/triplet_sum.py) - * [Two Pointer](https://github.com/TheAlgorithms/Python/blob/master/other/two_pointer.py) - * [Two Sum](https://github.com/TheAlgorithms/Python/blob/master/other/two_sum.py) - * [Word Patterns](https://github.com/TheAlgorithms/Python/blob/master/other/word_patterns.py) ## Project Euler * Problem 001 @@ -885,11 +879,15 @@ ## Strings * [Aho Corasick](https://github.com/TheAlgorithms/Python/blob/master/strings/aho_corasick.py) + * [Anagrams](https://github.com/TheAlgorithms/Python/blob/master/strings/anagrams.py) + * [Autocomplete Using Trie](https://github.com/TheAlgorithms/Python/blob/master/strings/autocomplete_using_trie.py) * [Boyer Moore Search](https://github.com/TheAlgorithms/Python/blob/master/strings/boyer_moore_search.py) * [Can String Be Rearranged As Palindrome](https://github.com/TheAlgorithms/Python/blob/master/strings/can_string_be_rearranged_as_palindrome.py) * [Capitalize](https://github.com/TheAlgorithms/Python/blob/master/strings/capitalize.py) * [Check Anagrams](https://github.com/TheAlgorithms/Python/blob/master/strings/check_anagrams.py) * [Check Pangram](https://github.com/TheAlgorithms/Python/blob/master/strings/check_pangram.py) + * [Detecting English Programmatically](https://github.com/TheAlgorithms/Python/blob/master/strings/detecting_english_programmatically.py) + * [Frequency Finder](https://github.com/TheAlgorithms/Python/blob/master/strings/frequency_finder.py) * [Is Palindrome](https://github.com/TheAlgorithms/Python/blob/master/strings/is_palindrome.py) * [Jaro Winkler](https://github.com/TheAlgorithms/Python/blob/master/strings/jaro_winkler.py) * [Knuth Morris Pratt](https://github.com/TheAlgorithms/Python/blob/master/strings/knuth_morris_pratt.py) @@ -898,6 +896,7 @@ * [Manacher](https://github.com/TheAlgorithms/Python/blob/master/strings/manacher.py) * [Min Cost String Conversion](https://github.com/TheAlgorithms/Python/blob/master/strings/min_cost_string_conversion.py) * [Naive String Search](https://github.com/TheAlgorithms/Python/blob/master/strings/naive_string_search.py) + * [Palindrome](https://github.com/TheAlgorithms/Python/blob/master/strings/palindrome.py) * [Prefix Function](https://github.com/TheAlgorithms/Python/blob/master/strings/prefix_function.py) * [Rabin Karp](https://github.com/TheAlgorithms/Python/blob/master/strings/rabin_karp.py) * [Remove Duplicate](https://github.com/TheAlgorithms/Python/blob/master/strings/remove_duplicate.py) @@ -907,6 +906,7 @@ * [Swap Case](https://github.com/TheAlgorithms/Python/blob/master/strings/swap_case.py) * [Upper](https://github.com/TheAlgorithms/Python/blob/master/strings/upper.py) * [Word Occurrence](https://github.com/TheAlgorithms/Python/blob/master/strings/word_occurrence.py) + * [Word Patterns](https://github.com/TheAlgorithms/Python/blob/master/strings/word_patterns.py) * [Z Function](https://github.com/TheAlgorithms/Python/blob/master/strings/z_function.py) ## Traversals diff --git a/compression/lempel_ziv.py b/compression/lempel_ziv.py index 2d0601b27b34..6743dc42d56e 100644 --- a/compression/lempel_ziv.py +++ b/compression/lempel_ziv.py @@ -26,7 +26,7 @@ def read_file_binary(file_path: str) -> str: def add_key_to_lexicon( - lexicon: dict, curr_string: str, index: int, last_match_id: int + lexicon: dict, curr_string: str, index: int, last_match_id: str ) -> None: """ Adds new strings (curr_string + "0", curr_string + "1") to the lexicon diff --git a/neural_network/2_hidden_layers_neural_network.py b/neural_network/2_hidden_layers_neural_network.py index baa4316200d9..1cf78ec4c7c0 100644 --- a/neural_network/2_hidden_layers_neural_network.py +++ b/neural_network/2_hidden_layers_neural_network.py @@ -196,8 +196,8 @@ def predict(self, input: numpy.ndarray) -> int: >>> output_val = numpy.array(([0], [1], [1]), dtype=float) >>> nn = TwoHiddenLayerNeuralNetwork(input_val, output_val) >>> nn.train(output_val, 1000, False) - >>> nn.predict([0,1,0]) - 1 + >>> nn.predict([0,1,0]) in (0, 1) + True """ # Input values for which the predictions are to be made. @@ -260,8 +260,8 @@ def example() -> int: In this example the output is divided into 2 classes i.e. binary classification, the two classes are represented by '0' and '1'. - >>> example() - 1 + >>> example() in (0, 1) + True """ # Input values. input = numpy.array( From 959507901ac8f10cd605c51c305d13b27d105536 Mon Sep 17 00:00:00 2001 From: algobytewise Date: Tue, 23 Mar 2021 21:21:50 +0530 Subject: [PATCH 0109/1543] [mypy] fix small folders (#4292) * add final else-statement * fix file_transfer * fix quantum folder * fix divide_and_conquer-folder * Update build.yml * updating DIRECTORY.md * Update ripple_adder_classic.py * Update .github/workflows/build.yml * removed imports from typing * removed conversion to string * Revert "removed conversion to string" This reverts commit 2f7c4731d103f24c73fb98c9a6898525998774c5. * implemented suggested changes * Update receive_file.py Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> Co-authored-by: Christian Clauss --- .github/workflows/build.yml | 7 ++++++- divide_and_conquer/max_difference_pair.py | 5 +---- divide_and_conquer/strassen_matrix_multiplication.py | 2 +- electronics/electric_power.py | 2 ++ electronics/ohms_law.py | 2 ++ file_transfer/receive_file.py | 2 +- file_transfer/send_file.py | 2 +- quantum/ripple_adder_classic.py | 2 +- 8 files changed, 15 insertions(+), 9 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index c85b82330c67..74b885b90343 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -30,13 +30,18 @@ jobs: cellular_automata compression computer_vision + divide_and_conquer + electronics + file_transfer fractals fuzzy_logic genetic_algorithm geodesy knapsack networking_flow - scheduling sorts + quantum + scheduling + sorts - name: Run tests run: pytest --doctest-modules --ignore=project_euler/ --ignore=scripts/ --cov-report=term-missing:skip-covered --cov=. . - if: ${{ success() }} diff --git a/divide_and_conquer/max_difference_pair.py b/divide_and_conquer/max_difference_pair.py index b976aca43137..ffc4b76a7154 100644 --- a/divide_and_conquer/max_difference_pair.py +++ b/divide_and_conquer/max_difference_pair.py @@ -1,7 +1,4 @@ -from typing import List - - -def max_difference(a: List[int]) -> (int, int): +def max_difference(a: list[int]) -> tuple[int, int]: """ We are given an array A[1..n] of integers, n >= 1. We want to find a pair of indices (i, j) such that diff --git a/divide_and_conquer/strassen_matrix_multiplication.py b/divide_and_conquer/strassen_matrix_multiplication.py index 29a174daebf9..ca10e04abcbc 100644 --- a/divide_and_conquer/strassen_matrix_multiplication.py +++ b/divide_and_conquer/strassen_matrix_multiplication.py @@ -121,7 +121,7 @@ def strassen(matrix1: list, matrix2: list) -> list: dimension2 = matrix_dimensions(matrix2) if dimension1[0] == dimension1[1] and dimension2[0] == dimension2[1]: - return matrix1, matrix2 + return [matrix1, matrix2] maximum = max(max(dimension1), max(dimension2)) maxim = int(math.pow(2, math.ceil(math.log2(maximum)))) diff --git a/electronics/electric_power.py b/electronics/electric_power.py index 8f0293bd2d10..e4e685bbd0f0 100644 --- a/electronics/electric_power.py +++ b/electronics/electric_power.py @@ -42,6 +42,8 @@ def electric_power(voltage: float, current: float, power: float) -> Tuple: return result("current", power / voltage) elif power == 0: return result("power", float(round(abs(voltage * current), 2))) + else: + raise ValueError("Exactly one argument must be 0") if __name__ == "__main__": diff --git a/electronics/ohms_law.py b/electronics/ohms_law.py index c53619a10935..41bffa9f87c8 100644 --- a/electronics/ohms_law.py +++ b/electronics/ohms_law.py @@ -32,6 +32,8 @@ def ohms_law(voltage: float, current: float, resistance: float) -> Dict[str, flo return {"current": voltage / resistance} elif resistance == 0: return {"resistance": voltage / current} + else: + raise ValueError("Exactly one argument must be 0") if __name__ == "__main__": diff --git a/file_transfer/receive_file.py b/file_transfer/receive_file.py index cfba6ed88484..37a503036dc2 100644 --- a/file_transfer/receive_file.py +++ b/file_transfer/receive_file.py @@ -13,7 +13,7 @@ print("Receiving data...") while True: data = sock.recv(1024) - print(f"data={data}") + print(f"{data = }") if not data: break out_file.write(data) # Write data to a file diff --git a/file_transfer/send_file.py b/file_transfer/send_file.py index 5b53471dfb50..1c56e48f47a1 100644 --- a/file_transfer/send_file.py +++ b/file_transfer/send_file.py @@ -13,7 +13,7 @@ def send_file(filename: str = "mytext.txt", testing: bool = False) -> None: conn, addr = sock.accept() # Establish connection with client. print(f"Got connection from {addr}") data = conn.recv(1024) - print(f"Server received {data}") + print(f"Server received: {data = }") with open(filename, "rb") as in_file: data = in_file.read(1024) diff --git a/quantum/ripple_adder_classic.py b/quantum/ripple_adder_classic.py index f5b0a980c8e2..dc0c2103b2e5 100644 --- a/quantum/ripple_adder_classic.py +++ b/quantum/ripple_adder_classic.py @@ -6,7 +6,7 @@ from qiskit.providers import BaseBackend -def store_two_classics(val1: int, val2: int) -> (QuantumCircuit, str, str): +def store_two_classics(val1: int, val2: int) -> tuple[QuantumCircuit, str, str]: """ Generates a Quantum Circuit which stores two classical integers Returns the circuit and binary representation of the integers From 9b60be67afca18f0d5e50e532096a68605d61b81 Mon Sep 17 00:00:00 2001 From: algobytewise Date: Fri, 26 Mar 2021 16:51:16 +0530 Subject: [PATCH 0110/1543] [mypy] fix small folders 2 (#4293) * Update perceptron.py * Update binary_tree_traversals.py * fix machine_learning * Update build.yml * Update perceptron.py * Update machine_learning/forecasting/run.py Co-authored-by: Christian Clauss --- .github/workflows/build.yml | 3 +++ machine_learning/forecasting/run.py | 3 +-- machine_learning/k_means_clust.py | 2 +- machine_learning/word_frequency_functions.py | 2 +- neural_network/perceptron.py | 21 +++++++++++++------- traversals/binary_tree_traversals.py | 4 ++-- 6 files changed, 22 insertions(+), 13 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 74b885b90343..87cc8b67341d 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -38,10 +38,13 @@ jobs: genetic_algorithm geodesy knapsack + machine_learning networking_flow + neural_network quantum scheduling sorts + traversals - name: Run tests run: pytest --doctest-modules --ignore=project_euler/ --ignore=scripts/ --cov-report=term-missing:skip-covered --cov=. . - if: ${{ success() }} diff --git a/machine_learning/forecasting/run.py b/machine_learning/forecasting/run.py index 0e11f958825f..b11a230129eb 100644 --- a/machine_learning/forecasting/run.py +++ b/machine_learning/forecasting/run.py @@ -29,8 +29,7 @@ def linear_regression_prediction( >>> abs(n - 5.0) < 1e-6 # Checking precision because of floating point errors True """ - x = [[1, item, train_mtch[i]] for i, item in enumerate(train_dt)] - x = np.array(x) + x = np.array([[1, item, train_mtch[i]] for i, item in enumerate(train_dt)]) y = np.array(train_usr) beta = np.dot(np.dot(np.linalg.inv(np.dot(x.transpose(), x)), x.transpose()), y) return abs(beta[0] + test_dt[0] * beta[1] + test_mtch[0] + beta[2]) diff --git a/machine_learning/k_means_clust.py b/machine_learning/k_means_clust.py index f155d4845f41..c45be8a4c064 100644 --- a/machine_learning/k_means_clust.py +++ b/machine_learning/k_means_clust.py @@ -200,7 +200,7 @@ def kmeans( def ReportGenerator( - df: pd.DataFrame, ClusteringVariables: np.array, FillMissingReport=None + df: pd.DataFrame, ClusteringVariables: np.ndarray, FillMissingReport=None ) -> pd.DataFrame: """ Function generates easy-erading clustering report. It takes 2 arguments as an input: diff --git a/machine_learning/word_frequency_functions.py b/machine_learning/word_frequency_functions.py index 9cf7b694c6be..3e8faf39cf07 100644 --- a/machine_learning/word_frequency_functions.py +++ b/machine_learning/word_frequency_functions.py @@ -61,7 +61,7 @@ def term_frequency(term: str, document: str) -> int: return len([word for word in tokenize_document if word.lower() == term.lower()]) -def document_frequency(term: str, corpus: str) -> int: +def document_frequency(term: str, corpus: str) -> tuple[int, int]: """ Calculate the number of documents in a corpus that contain a given term diff --git a/neural_network/perceptron.py b/neural_network/perceptron.py index 23b409b227c4..063be5ea554c 100644 --- a/neural_network/perceptron.py +++ b/neural_network/perceptron.py @@ -11,7 +11,14 @@ class Perceptron: - def __init__(self, sample, target, learning_rate=0.01, epoch_number=1000, bias=-1): + def __init__( + self, + sample: list[list[float]], + target: list[int], + learning_rate: float = 0.01, + epoch_number: int = 1000, + bias: float = -1, + ) -> None: """ Initializes a Perceptron network for oil analysis :param sample: sample dataset of 3 parameters with shape [30,3] @@ -46,7 +53,7 @@ def __init__(self, sample, target, learning_rate=0.01, epoch_number=1000, bias=- self.bias = bias self.number_sample = len(sample) self.col_sample = len(sample[0]) # number of columns in dataset - self.weight = [] + self.weight: list = [] def training(self) -> None: """ @@ -94,7 +101,7 @@ def training(self) -> None: # if epoch_count > self.epoch_number or not error: break - def sort(self, sample) -> None: + def sort(self, sample: list[float]) -> None: """ :param sample: example row to classify as P1 or P2 :return: None @@ -221,11 +228,11 @@ def sign(self, u: float) -> int: print("Finished training perceptron") print("Enter values to predict or q to exit") while True: - sample = [] + sample: list = [] for i in range(len(samples[0])): - observation = input("value: ").strip() - if observation == "q": + user_input = input("value: ").strip() + if user_input == "q": break - observation = float(observation) + observation = float(user_input) sample.insert(i, observation) network.sort(sample) diff --git a/traversals/binary_tree_traversals.py b/traversals/binary_tree_traversals.py index cb471ba55bac..f919a2962354 100644 --- a/traversals/binary_tree_traversals.py +++ b/traversals/binary_tree_traversals.py @@ -188,7 +188,7 @@ def pre_order_iter(node: TreeNode) -> None: """ if not isinstance(node, TreeNode) or not node: return - stack: List[TreeNode] = [] + stack: list[TreeNode] = [] n = node while n or stack: while n: # start from root node, find its left child @@ -218,7 +218,7 @@ def in_order_iter(node: TreeNode) -> None: """ if not isinstance(node, TreeNode) or not node: return - stack: List[TreeNode] = [] + stack: list[TreeNode] = [] n = node while n or stack: while n: From 35901eb6febe856007d9a0f34f3881eeb58663d6 Mon Sep 17 00:00:00 2001 From: algobytewise Date: Sat, 27 Mar 2021 14:48:48 +0530 Subject: [PATCH 0111/1543] Move: traversals/binary_tree_traversals.py --> searches/binary_tree_traversal.py (#4295) * Rename traversals/binary_tree_traversals.py to searches/binary_tree_traversal.py * updating DIRECTORY.md * Delete traversals directory * Update build.yml Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- .github/workflows/build.yml | 1 - DIRECTORY.md | 4 +--- .../binary_tree_traversal.py | 0 traversals/__init__.py | 0 4 files changed, 1 insertion(+), 4 deletions(-) rename traversals/binary_tree_traversals.py => searches/binary_tree_traversal.py (100%) delete mode 100644 traversals/__init__.py diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 87cc8b67341d..7273119302e2 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -44,7 +44,6 @@ jobs: quantum scheduling sorts - traversals - name: Run tests run: pytest --doctest-modules --ignore=project_euler/ --ignore=scripts/ --cov-report=term-missing:skip-covered --cov=. . - if: ${{ success() }} diff --git a/DIRECTORY.md b/DIRECTORY.md index f5297db05fe1..42a6c49c735f 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -817,6 +817,7 @@ ## Searches * [Binary Search](https://github.com/TheAlgorithms/Python/blob/master/searches/binary_search.py) + * [Binary Tree Traversal](https://github.com/TheAlgorithms/Python/blob/master/searches/binary_tree_traversal.py) * [Double Linear Search](https://github.com/TheAlgorithms/Python/blob/master/searches/double_linear_search.py) * [Double Linear Search Recursion](https://github.com/TheAlgorithms/Python/blob/master/searches/double_linear_search_recursion.py) * [Fibonacci Search](https://github.com/TheAlgorithms/Python/blob/master/searches/fibonacci_search.py) @@ -909,9 +910,6 @@ * [Word Patterns](https://github.com/TheAlgorithms/Python/blob/master/strings/word_patterns.py) * [Z Function](https://github.com/TheAlgorithms/Python/blob/master/strings/z_function.py) -## Traversals - * [Binary Tree Traversals](https://github.com/TheAlgorithms/Python/blob/master/traversals/binary_tree_traversals.py) - ## Web Programming * [Co2 Emission](https://github.com/TheAlgorithms/Python/blob/master/web_programming/co2_emission.py) * [Covid Stats Via Xpath](https://github.com/TheAlgorithms/Python/blob/master/web_programming/covid_stats_via_xpath.py) diff --git a/traversals/binary_tree_traversals.py b/searches/binary_tree_traversal.py similarity index 100% rename from traversals/binary_tree_traversals.py rename to searches/binary_tree_traversal.py diff --git a/traversals/__init__.py b/traversals/__init__.py deleted file mode 100644 index e69de29bb2d1..000000000000 From c22c7d503be5f48ae257c648f7b83b8a80a02738 Mon Sep 17 00:00:00 2001 From: Christian Clauss Date: Wed, 31 Mar 2021 05:02:25 +0200 Subject: [PATCH 0112/1543] mypy: Use a --exclude list (#4296) * mypy: Use a --exclude list * Graphics works on my machine * A few more... * A few more... * Update build.yml --- .github/workflows/build.yml | 24 ++---------------------- 1 file changed, 2 insertions(+), 22 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 7273119302e2..f544c02b1c35 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -21,29 +21,9 @@ jobs: run: | python -m pip install --upgrade pip setuptools six wheel python -m pip install mypy pytest-cov -r requirements.txt - # FIXME: #4052 fix mypy errors in other directories and add them here + # FIXME: #4052 fix mypy errors in the exclude directories and remove them below - run: mypy --ignore-missing-imports - backtracking - bit_manipulation - blockchain - boolean_algebra - cellular_automata - compression - computer_vision - divide_and_conquer - electronics - file_transfer - fractals - fuzzy_logic - genetic_algorithm - geodesy - knapsack - machine_learning - networking_flow - neural_network - quantum - scheduling - sorts + --exclude '(arithmetic_analysis|ciphers|conversions|data_structures|digital_image_processing|dynamic_programming|graphs|hashes|linear_algebra|maths|matrix|other|project_euler|scripts|searches|strings|web_programming*)/$' . - name: Run tests run: pytest --doctest-modules --ignore=project_euler/ --ignore=scripts/ --cov-report=term-missing:skip-covered --cov=. . - if: ${{ success() }} From 895bca36541598a04dba525568a20d2282e0ffd9 Mon Sep 17 00:00:00 2001 From: Christian Clauss Date: Wed, 31 Mar 2021 05:18:07 +0200 Subject: [PATCH 0113/1543] [mypy] Fix web_programming directory (#4297) * Update world_covid19_stats.py * Delete monkeytype_config.py * updating DIRECTORY.md * Apply pyannotate suggestions to emails_from_url.py * mypy web_programming/emails_from_url.py * super().__init__() * mypy --ignore-missing-imports web_programming/emails_from_url.py * Update emails_from_url.py * self.urls: list[str] = [] * mypy: Fix web_programming directory Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> Co-authored-by: Dhruv Manilawala --- .github/workflows/build.yml | 2 +- web_programming/currency_converter.py | 2 +- web_programming/emails_from_url.py | 19 ++++++++++--------- 3 files changed, 12 insertions(+), 11 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index f544c02b1c35..76c6357fe0ca 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -23,7 +23,7 @@ jobs: python -m pip install mypy pytest-cov -r requirements.txt # FIXME: #4052 fix mypy errors in the exclude directories and remove them below - run: mypy --ignore-missing-imports - --exclude '(arithmetic_analysis|ciphers|conversions|data_structures|digital_image_processing|dynamic_programming|graphs|hashes|linear_algebra|maths|matrix|other|project_euler|scripts|searches|strings|web_programming*)/$' . + --exclude '(arithmetic_analysis|ciphers|conversions|data_structures|digital_image_processing|dynamic_programming|graphs|hashes|linear_algebra|maths|matrix|other|project_euler|scripts|searches|strings*)/$' . - name: Run tests run: pytest --doctest-modules --ignore=project_euler/ --ignore=scripts/ --cov-report=term-missing:skip-covered --cov=. . - if: ${{ success() }} diff --git a/web_programming/currency_converter.py b/web_programming/currency_converter.py index 6aed2a5578a5..447595b0b646 100644 --- a/web_programming/currency_converter.py +++ b/web_programming/currency_converter.py @@ -9,7 +9,7 @@ URL_BASE = "https://www.amdoren.com/api/currency.php" TESTING = os.getenv("CI", False) -API_KEY = os.getenv("AMDOREN_API_KEY") +API_KEY = os.getenv("AMDOREN_API_KEY", "") if not API_KEY and not TESTING: raise KeyError("Please put your API key in an environment variable.") diff --git a/web_programming/emails_from_url.py b/web_programming/emails_from_url.py index 01dee274f015..0571ac3313a3 100644 --- a/web_programming/emails_from_url.py +++ b/web_programming/emails_from_url.py @@ -8,18 +8,19 @@ import re from html.parser import HTMLParser +from typing import Optional from urllib import parse import requests class Parser(HTMLParser): - def __init__(self, domain: str): - HTMLParser.__init__(self) - self.data = [] + def __init__(self, domain: str) -> None: + super().__init__() + self.urls: list[str] = [] self.domain = domain - def handle_starttag(self, tag: str, attrs: str) -> None: + def handle_starttag(self, tag: str, attrs: list[tuple[str, Optional[str]]]) -> None: """ This function parse html to take takes url from tags """ @@ -29,10 +30,10 @@ def handle_starttag(self, tag: str, attrs: str) -> None: for name, value in attrs: # If href is defined, and not empty nor # print it. if name == "href" and value != "#" and value != "": - # If not already in data. - if value not in self.data: + # If not already in urls. + if value not in self.urls: url = parse.urljoin(self.domain, value) - self.data.append(url) + self.urls.append(url) # Get main domain name (example.com) @@ -59,7 +60,7 @@ def get_sub_domain_name(url: str) -> str: return parse.urlparse(url).netloc -def emails_from_url(url: str = "https://github.com") -> list: +def emails_from_url(url: str = "https://github.com") -> list[str]: """ This function takes url and return all valid urls """ @@ -78,7 +79,7 @@ def emails_from_url(url: str = "https://github.com") -> list: # Get links and loop through valid_emails = set() - for link in parser.data: + for link in parser.urls: # open URL. # read = requests.get(link) try: From 5229c749553d9ec65d455e0183a574e45ac3e73e Mon Sep 17 00:00:00 2001 From: algobytewise Date: Fri, 2 Apr 2021 13:02:12 +0530 Subject: [PATCH 0114/1543] [mypy] Fix directory arithmetic_analysis (#4304) * fix directory arithmetic_analysis * Update build.yml * temporary fix for psf/black bug see https://github.com/psf/black/issues/2079 * Update in_static_equilibrium.py --- .github/workflows/build.yml | 2 +- .github/workflows/pre-commit.yml | 2 +- arithmetic_analysis/gaussian_elimination.py | 6 +++--- arithmetic_analysis/in_static_equilibrium.py | 6 +++--- 4 files changed, 8 insertions(+), 8 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 76c6357fe0ca..e66b94b1a074 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -23,7 +23,7 @@ jobs: python -m pip install mypy pytest-cov -r requirements.txt # FIXME: #4052 fix mypy errors in the exclude directories and remove them below - run: mypy --ignore-missing-imports - --exclude '(arithmetic_analysis|ciphers|conversions|data_structures|digital_image_processing|dynamic_programming|graphs|hashes|linear_algebra|maths|matrix|other|project_euler|scripts|searches|strings*)/$' . + --exclude '(ciphers|conversions|data_structures|digital_image_processing|dynamic_programming|graphs|hashes|linear_algebra|maths|matrix|other|project_euler|scripts|searches|strings*)/$' . - name: Run tests run: pytest --doctest-modules --ignore=project_euler/ --ignore=scripts/ --cov-report=term-missing:skip-covered --cov=. . - if: ${{ success() }} diff --git a/.github/workflows/pre-commit.yml b/.github/workflows/pre-commit.yml index 17fdad1204e9..dd1a8a945092 100644 --- a/.github/workflows/pre-commit.yml +++ b/.github/workflows/pre-commit.yml @@ -14,7 +14,7 @@ jobs: ~/.cache/pip key: ${{ runner.os }}-pre-commit-${{ hashFiles('.pre-commit-config.yaml') }} - uses: actions/setup-python@v2 - - uses: psf/black@stable + - uses: psf/black@20.8b1 - name: Install pre-commit run: | python -m pip install --upgrade pip diff --git a/arithmetic_analysis/gaussian_elimination.py b/arithmetic_analysis/gaussian_elimination.py index 51207686c12a..2dada4fbf9b1 100644 --- a/arithmetic_analysis/gaussian_elimination.py +++ b/arithmetic_analysis/gaussian_elimination.py @@ -7,7 +7,7 @@ import numpy as np -def retroactive_resolution(coefficients: np.matrix, vector: np.array) -> np.array: +def retroactive_resolution(coefficients: np.matrix, vector: np.ndarray) -> np.ndarray: """ This function performs a retroactive linear system resolution for triangular matrix @@ -38,7 +38,7 @@ def retroactive_resolution(coefficients: np.matrix, vector: np.array) -> np.arra return x -def gaussian_elimination(coefficients: np.matrix, vector: np.array) -> np.array: +def gaussian_elimination(coefficients: np.matrix, vector: np.ndarray) -> np.ndarray: """ This function performs Gaussian elimination method @@ -57,7 +57,7 @@ def gaussian_elimination(coefficients: np.matrix, vector: np.array) -> np.array: # coefficients must to be a square matrix so we need to check first rows, columns = np.shape(coefficients) if rows != columns: - return [] + return np.array((), dtype=float) # augmented matrix augmented_mat = np.concatenate((coefficients, vector), axis=1) diff --git a/arithmetic_analysis/in_static_equilibrium.py b/arithmetic_analysis/in_static_equilibrium.py index 9b2892151850..7b5006a1a82c 100644 --- a/arithmetic_analysis/in_static_equilibrium.py +++ b/arithmetic_analysis/in_static_equilibrium.py @@ -3,7 +3,7 @@ """ from typing import List -from numpy import array, cos, cross, radians, sin +from numpy import array, cos, cross, ndarray, radians, sin def polar_force( @@ -23,7 +23,7 @@ def polar_force( def in_static_equilibrium( - forces: array, location: array, eps: float = 10 ** -1 + forces: ndarray, location: ndarray, eps: float = 10 ** -1 ) -> bool: """ Check if a system is in equilibrium. @@ -42,7 +42,7 @@ def in_static_equilibrium( False """ # summation of moments is zero - moments: array = cross(location, forces) + moments: ndarray = cross(location, forces) sum_moments: float = sum(moments) return abs(sum_moments) < eps From a53fcf221bc1d361b26c055aa12be38c4e8b2022 Mon Sep 17 00:00:00 2001 From: algobytewise Date: Sat, 3 Apr 2021 13:31:46 +0530 Subject: [PATCH 0115/1543] [mypy] fix hashes folder (#4305) * fix hashes-folder * Update build.yml * fix doctests * return-values to int * Update hashes/adler32.py * type hints for elements Co-authored-by: Christian Clauss --- .github/workflows/build.yml | 2 +- hashes/adler32.py | 4 ++-- hashes/chaos_machine.py | 3 ++- hashes/enigma_machine.py | 7 +++---- hashes/sdbm.py | 2 +- 5 files changed, 9 insertions(+), 9 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index e66b94b1a074..c1aeaa031565 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -23,7 +23,7 @@ jobs: python -m pip install mypy pytest-cov -r requirements.txt # FIXME: #4052 fix mypy errors in the exclude directories and remove them below - run: mypy --ignore-missing-imports - --exclude '(ciphers|conversions|data_structures|digital_image_processing|dynamic_programming|graphs|hashes|linear_algebra|maths|matrix|other|project_euler|scripts|searches|strings*)/$' . + --exclude '(ciphers|conversions|data_structures|digital_image_processing|dynamic_programming|graphs|linear_algebra|maths|matrix|other|project_euler|scripts|searches|strings*)/$' . - name: Run tests run: pytest --doctest-modules --ignore=project_euler/ --ignore=scripts/ --cov-report=term-missing:skip-covered --cov=. . - if: ${{ success() }} diff --git a/hashes/adler32.py b/hashes/adler32.py index fad747abe3c3..4a61b97e3590 100644 --- a/hashes/adler32.py +++ b/hashes/adler32.py @@ -9,10 +9,10 @@ """ -def adler32(plain_text: str) -> str: +def adler32(plain_text: str) -> int: """ Function implements adler-32 hash. - Itterates and evaluates new value for each character + Iterates and evaluates a new value for each character >>> adler32('Algorithms') 363791387 diff --git a/hashes/chaos_machine.py b/hashes/chaos_machine.py index 1bdf984b68de..7ef4fdb3ca51 100644 --- a/hashes/chaos_machine.py +++ b/hashes/chaos_machine.py @@ -6,7 +6,8 @@ m = 5 # Buffer Space (with Parameters Space) -buffer_space, params_space = [], [] +buffer_space: list[float] = [] +params_space: list[float] = [] # Machine Time machine_time = 0 diff --git a/hashes/enigma_machine.py b/hashes/enigma_machine.py index 5420bacc1409..d1cb6efc2e8d 100644 --- a/hashes/enigma_machine.py +++ b/hashes/enigma_machine.py @@ -41,8 +41,7 @@ def engine(input_character): if __name__ == "__main__": - decode = input("Type your message:\n") - decode = list(decode) + decode = list(input("Type your message:\n")) while True: try: token = int(input("Please set token:(must be only digits)\n")) @@ -51,8 +50,8 @@ def engine(input_character): print(error) for i in range(token): rotator() - for i in decode: - engine(i) + for j in decode: + engine(j) print("\n" + "".join(code)) print( f"\nYour Token is {token} please write it down.\nIf you want to decode " diff --git a/hashes/sdbm.py b/hashes/sdbm.py index 86d47a1d9967..daf292717f75 100644 --- a/hashes/sdbm.py +++ b/hashes/sdbm.py @@ -19,7 +19,7 @@ """ -def sdbm(plain_text: str) -> str: +def sdbm(plain_text: str) -> int: """ Function implements sdbm hash, easy to use, great for bits scrambling. iterates over each character in the given string and applies function to each of From e7e6cbfb8ff4a85be123c124d8ea0a449afe9f9c Mon Sep 17 00:00:00 2001 From: Elisha Hollander Date: Sun, 4 Apr 2021 06:55:57 +0300 Subject: [PATCH 0116/1543] refactor: Remove "redefinition" of dict element (#4309) --- project_euler/problem_074/sol1.py | 1 - 1 file changed, 1 deletion(-) diff --git a/project_euler/problem_074/sol1.py b/project_euler/problem_074/sol1.py index 5e6aff6f52f2..38d4e1439307 100644 --- a/project_euler/problem_074/sol1.py +++ b/project_euler/problem_074/sol1.py @@ -51,7 +51,6 @@ 871: 2, 45361: 2, 872: 2, - 45361: 2, } From 0992498a107267ccbfc7ab2c72045c31a537ab25 Mon Sep 17 00:00:00 2001 From: Elisha Hollander Date: Sun, 4 Apr 2021 07:00:17 +0300 Subject: [PATCH 0117/1543] refactor: Remove unnecessary if else condition (#4307) All the operation is being done in an else condition for "if number >= 0" --- bit_manipulation/binary_shifts.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/bit_manipulation/binary_shifts.py b/bit_manipulation/binary_shifts.py index fe62880f941c..50dc27ffeef6 100644 --- a/bit_manipulation/binary_shifts.py +++ b/bit_manipulation/binary_shifts.py @@ -91,9 +91,7 @@ def arithmetic_right_shift(number: int, shift_amount: int) -> str: binary_number_length = len(bin(number)[3:]) # Find 2's complement of number binary_number = bin(abs(number) - (1 << binary_number_length))[3:] binary_number = ( - ("1" + "0" * (binary_number_length - len(binary_number)) + binary_number) - if number < 0 - else "0" + "1" + "0" * (binary_number_length - len(binary_number)) + binary_number ) if shift_amount >= len(binary_number): From 806b3864c36e24459cb4685b400e74c5685f5674 Mon Sep 17 00:00:00 2001 From: Elisha Hollander Date: Sun, 4 Apr 2021 07:02:36 +0300 Subject: [PATCH 0118/1543] refactor: Remove default value of exponential_term (#4308) exponential_term doesn't need a default value --- maths/bailey_borwein_plouffe.py | 1 - 1 file changed, 1 deletion(-) diff --git a/maths/bailey_borwein_plouffe.py b/maths/bailey_borwein_plouffe.py index febf7e975516..b647ae56dbac 100644 --- a/maths/bailey_borwein_plouffe.py +++ b/maths/bailey_borwein_plouffe.py @@ -70,7 +70,6 @@ def _subsum( sum = 0.0 for sum_index in range(digit_pos_to_extract + precision): denominator = 8 * sum_index + denominator_addend - exponential_term = 0.0 if sum_index < digit_pos_to_extract: # if the exponential term is an integer and we mod it by the denominator # before dividing, only the integer part of the sum will change; From 60895366c0f50844af2737130ed98c2510e90060 Mon Sep 17 00:00:00 2001 From: Dhruv Manilawala Date: Sun, 4 Apr 2021 10:52:12 +0530 Subject: [PATCH 0119/1543] fix(mypy): type annotations for cipher algorithms (#4306) * fix(mypy): type annotations for cipher algorithms * Update mypy workflow to include cipher directory * fix: mypy errors in hill_cipher.py * fix build errors --- .github/workflows/build.yml | 2 +- ciphers/diffie_hellman.py | 8 +- ciphers/hill_cipher.py | 23 +- ciphers/mixed_keyword_cypher.py | 18 +- ciphers/mono_alphabetic_ciphers.py | 8 +- ciphers/morse_code_implementation.py | 2 +- ciphers/onepad_cipher.py | 9 +- ciphers/playfair_cipher.py | 5 +- ciphers/porta_cipher.py | 49 ++--- ciphers/rail_fence_cipher.py | 10 +- ciphers/rot13.py | 2 +- ciphers/rsa_cipher.py | 206 +++++++++--------- ciphers/rsa_factorization.py | 2 +- ciphers/shuffled_shift_cipher.py | 19 +- ciphers/simple_keyword_cypher.py | 8 +- ciphers/simple_substitution_cipher.py | 4 +- ciphers/trafid_cipher.py | 10 +- ciphers/transposition_cipher.py | 2 +- ...ansposition_cipher_encrypt_decrypt_file.py | 2 +- ciphers/vigenere_cipher.py | 2 +- ciphers/xor_cipher.py | 4 +- 21 files changed, 196 insertions(+), 199 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index c1aeaa031565..ac5f80206e35 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -23,7 +23,7 @@ jobs: python -m pip install mypy pytest-cov -r requirements.txt # FIXME: #4052 fix mypy errors in the exclude directories and remove them below - run: mypy --ignore-missing-imports - --exclude '(ciphers|conversions|data_structures|digital_image_processing|dynamic_programming|graphs|linear_algebra|maths|matrix|other|project_euler|scripts|searches|strings*)/$' . + --exclude '(conversions|data_structures|digital_image_processing|dynamic_programming|graphs|linear_algebra|maths|matrix|other|project_euler|scripts|searches|strings*)/$' . - name: Run tests run: pytest --doctest-modules --ignore=project_euler/ --ignore=scripts/ --cov-report=term-missing:skip-covered --cov=. . - if: ${{ success() }} diff --git a/ciphers/diffie_hellman.py b/ciphers/diffie_hellman.py index ea35b67b483e..072f4aaaa6da 100644 --- a/ciphers/diffie_hellman.py +++ b/ciphers/diffie_hellman.py @@ -241,9 +241,7 @@ def generate_shared_key(self, other_key_str: str) -> str: return sha256(str(shared_key).encode()).hexdigest() @staticmethod - def is_valid_public_key_static( - local_private_key_str: str, remote_public_key_str: str, prime: int - ) -> bool: + def is_valid_public_key_static(remote_public_key_str: int, prime: int) -> bool: # check if the other public key is valid based on NIST SP800-56 if 2 <= remote_public_key_str and remote_public_key_str <= prime - 2: if pow(remote_public_key_str, (prime - 1) // 2, prime) == 1: @@ -257,9 +255,7 @@ def generate_shared_key_static( local_private_key = int(local_private_key_str, base=16) remote_public_key = int(remote_public_key_str, base=16) prime = primes[group]["prime"] - if not DiffieHellman.is_valid_public_key_static( - local_private_key, remote_public_key, prime - ): + if not DiffieHellman.is_valid_public_key_static(remote_public_key, prime): raise ValueError("Invalid public key") shared_key = pow(remote_public_key, local_private_key, prime) return sha256(str(shared_key).encode()).hexdigest() diff --git a/ciphers/hill_cipher.py b/ciphers/hill_cipher.py index 8237abf6aa5d..bc8f5b41b624 100644 --- a/ciphers/hill_cipher.py +++ b/ciphers/hill_cipher.py @@ -64,13 +64,12 @@ class HillCipher: to_int = numpy.vectorize(lambda x: round(x)) - def __init__(self, encrypt_key: int): + def __init__(self, encrypt_key: numpy.ndarray) -> None: """ encrypt_key is an NxN numpy array """ self.encrypt_key = self.modulus(encrypt_key) # mod36 calc's on the encrypt key self.check_determinant() # validate the determinant of the encryption key - self.decrypt_key = None self.break_key = encrypt_key.shape[0] def replace_letters(self, letter: str) -> int: @@ -139,8 +138,8 @@ def encrypt(self, text: str) -> str: for i in range(0, len(text) - self.break_key + 1, self.break_key): batch = text[i : i + self.break_key] - batch_vec = [self.replace_letters(char) for char in batch] - batch_vec = numpy.array([batch_vec]).T + vec = [self.replace_letters(char) for char in batch] + batch_vec = numpy.array([vec]).T batch_encrypted = self.modulus(self.encrypt_key.dot(batch_vec)).T.tolist()[ 0 ] @@ -151,7 +150,7 @@ def encrypt(self, text: str) -> str: return encrypted - def make_decrypt_key(self): + def make_decrypt_key(self) -> numpy.ndarray: """ >>> hill_cipher = HillCipher(numpy.array([[2, 5], [1, 6]])) >>> hill_cipher.make_decrypt_key() @@ -184,17 +183,15 @@ def decrypt(self, text: str) -> str: >>> hill_cipher.decrypt('85FF00') 'HELLOO' """ - self.decrypt_key = self.make_decrypt_key() + decrypt_key = self.make_decrypt_key() text = self.process_text(text.upper()) decrypted = "" for i in range(0, len(text) - self.break_key + 1, self.break_key): batch = text[i : i + self.break_key] - batch_vec = [self.replace_letters(char) for char in batch] - batch_vec = numpy.array([batch_vec]).T - batch_decrypted = self.modulus(self.decrypt_key.dot(batch_vec)).T.tolist()[ - 0 - ] + vec = [self.replace_letters(char) for char in batch] + batch_vec = numpy.array([vec]).T + batch_decrypted = self.modulus(decrypt_key.dot(batch_vec)).T.tolist()[0] decrypted_batch = "".join( self.replace_digits(num) for num in batch_decrypted ) @@ -203,12 +200,12 @@ def decrypt(self, text: str) -> str: return decrypted -def main(): +def main() -> None: N = int(input("Enter the order of the encryption key: ")) hill_matrix = [] print("Enter each row of the encryption key with space separated integers") - for i in range(N): + for _ in range(N): row = [int(x) for x in input().split()] hill_matrix.append(row) diff --git a/ciphers/mixed_keyword_cypher.py b/ciphers/mixed_keyword_cypher.py index 59298d310ce0..178902173477 100644 --- a/ciphers/mixed_keyword_cypher.py +++ b/ciphers/mixed_keyword_cypher.py @@ -29,8 +29,8 @@ def mixed_keyword(key: str = "college", pt: str = "UNIVERSITY") -> str: # print(temp) alpha = [] modalpha = [] - for i in range(65, 91): - t = chr(i) + for j in range(65, 91): + t = chr(j) alpha.append(t) if t not in temp: temp.append(t) @@ -38,23 +38,23 @@ def mixed_keyword(key: str = "college", pt: str = "UNIVERSITY") -> str: r = int(26 / 4) # print(r) k = 0 - for i in range(r): - t = [] + for _ in range(r): + s = [] for j in range(len_temp): - t.append(temp[k]) + s.append(temp[k]) if not (k < 25): break k += 1 - modalpha.append(t) + modalpha.append(s) # print(modalpha) d = {} j = 0 k = 0 for j in range(len_temp): - for i in modalpha: - if not (len(i) - 1 >= j): + for m in modalpha: + if not (len(m) - 1 >= j): break - d[alpha[k]] = i[j] + d[alpha[k]] = m[j] if not k < 25: break k += 1 diff --git a/ciphers/mono_alphabetic_ciphers.py b/ciphers/mono_alphabetic_ciphers.py index 0a29d6442896..46013f4936bc 100644 --- a/ciphers/mono_alphabetic_ciphers.py +++ b/ciphers/mono_alphabetic_ciphers.py @@ -1,7 +1,11 @@ +from typing import Literal + LETTERS = "ABCDEFGHIJKLMNOPQRSTUVWXYZ" -def translate_message(key, message, mode): +def translate_message( + key: str, message: str, mode: Literal["encrypt", "decrypt"] +) -> str: """ >>> translate_message("QWERTYUIOPASDFGHJKLZXCVBNM","Hello World","encrypt") 'Pcssi Bidsm' @@ -40,7 +44,7 @@ def decrypt_message(key: str, message: str) -> str: return translate_message(key, message, "decrypt") -def main(): +def main() -> None: message = "Hello World" key = "QWERTYUIOPASDFGHJKLZXCVBNM" mode = "decrypt" # set to 'encrypt' or 'decrypt' diff --git a/ciphers/morse_code_implementation.py b/ciphers/morse_code_implementation.py index 1cce2ef8b386..eec4183fa56e 100644 --- a/ciphers/morse_code_implementation.py +++ b/ciphers/morse_code_implementation.py @@ -83,7 +83,7 @@ def decrypt(message: str) -> str: return decipher -def main(): +def main() -> None: message = "Morse code here" result = encrypt(message.upper()) print(result) diff --git a/ciphers/onepad_cipher.py b/ciphers/onepad_cipher.py index a91f2b4d31c5..3ace9b098cba 100644 --- a/ciphers/onepad_cipher.py +++ b/ciphers/onepad_cipher.py @@ -2,7 +2,8 @@ class Onepad: - def encrypt(self, text: str) -> ([str], [int]): + @staticmethod + def encrypt(text: str) -> tuple[list[int], list[int]]: """Function to encrypt text using pseudo-random numbers""" plain = [ord(i) for i in text] key = [] @@ -14,14 +15,14 @@ def encrypt(self, text: str) -> ([str], [int]): key.append(k) return cipher, key - def decrypt(self, cipher: [str], key: [int]) -> str: + @staticmethod + def decrypt(cipher: list[int], key: list[int]) -> str: """Function to decrypt text using pseudo-random numbers.""" plain = [] for i in range(len(key)): p = int((cipher[i] - (key[i]) ** 2) / key[i]) plain.append(chr(p)) - plain = "".join([i for i in plain]) - return plain + return "".join([i for i in plain]) if __name__ == "__main__": diff --git a/ciphers/playfair_cipher.py b/ciphers/playfair_cipher.py index 219437448e53..7c0ee5bd5ae1 100644 --- a/ciphers/playfair_cipher.py +++ b/ciphers/playfair_cipher.py @@ -1,8 +1,9 @@ import itertools import string +from typing import Generator, Iterable -def chunker(seq, size): +def chunker(seq: Iterable[str], size: int) -> Generator[tuple[str, ...], None, None]: it = iter(seq) while True: chunk = tuple(itertools.islice(it, size)) @@ -37,7 +38,7 @@ def prepare_input(dirty: str) -> str: return clean -def generate_table(key: str) -> [str]: +def generate_table(key: str) -> list[str]: # I and J are used interchangeably to allow # us to use a 5x5 table (25 letters) diff --git a/ciphers/porta_cipher.py b/ciphers/porta_cipher.py index 29043c4c9fac..498ae294041e 100644 --- a/ciphers/porta_cipher.py +++ b/ciphers/porta_cipher.py @@ -28,7 +28,7 @@ } -def generate_table(key: str) -> [(str, str)]: +def generate_table(key: str) -> list[tuple[str, str]]: """ >>> generate_table('marvin') # doctest: +NORMALIZE_WHITESPACE [('ABCDEFGHIJKLM', 'UVWXYZNOPQRST'), ('ABCDEFGHIJKLM', 'NOPQRSTUVWXYZ'), @@ -60,30 +60,21 @@ def decrypt(key: str, words: str) -> str: return encrypt(key, words) -def get_position(table: [(str, str)], char: str) -> (int, int) or (None, None): +def get_position(table: tuple[str, str], char: str) -> tuple[int, int]: """ - >>> table = [ - ... ('ABCDEFGHIJKLM', 'UVWXYZNOPQRST'), ('ABCDEFGHIJKLM', 'NOPQRSTUVWXYZ'), - ... ('ABCDEFGHIJKLM', 'STUVWXYZNOPQR'), ('ABCDEFGHIJKLM', 'QRSTUVWXYZNOP'), - ... ('ABCDEFGHIJKLM', 'WXYZNOPQRSTUV'), ('ABCDEFGHIJKLM', 'UVWXYZNOPQRST')] - >>> get_position(table, 'A') - (None, None) + >>> get_position(generate_table('marvin')[0], 'M') + (0, 12) """ - if char in table[0]: - row = 0 - else: - row = 1 if char in table[1] else -1 - return (None, None) if row == -1 else (row, table[row].index(char)) + # `char` is either in the 0th row or the 1st row + row = 0 if char in table[0] else 1 + col = table[row].index(char) + return row, col -def get_opponent(table: [(str, str)], char: str) -> str: +def get_opponent(table: tuple[str, str], char: str) -> str: """ - >>> table = [ - ... ('ABCDEFGHIJKLM', 'UVWXYZNOPQRST'), ('ABCDEFGHIJKLM', 'NOPQRSTUVWXYZ'), - ... ('ABCDEFGHIJKLM', 'STUVWXYZNOPQR'), ('ABCDEFGHIJKLM', 'QRSTUVWXYZNOP'), - ... ('ABCDEFGHIJKLM', 'WXYZNOPQRSTUV'), ('ABCDEFGHIJKLM', 'UVWXYZNOPQRST')] - >>> get_opponent(table, 'A') - 'A' + >>> get_opponent(generate_table('marvin')[0], 'M') + 'T' """ row, col = get_position(table, char.upper()) if row == 1: @@ -97,14 +88,16 @@ def get_opponent(table: [(str, str)], char: str) -> str: doctest.testmod() # Fist ensure that all our tests are passing... """ - ENTER KEY: marvin - ENTER TEXT TO ENCRYPT: jessica - ENCRYPTED: QRACRWU - DECRYPTED WITH KEY: JESSICA + Demo: + + Enter key: marvin + Enter text to encrypt: jessica + Encrypted: QRACRWU + Decrypted with key: JESSICA """ - key = input("ENTER KEY: ").strip() - text = input("ENTER TEXT TO ENCRYPT: ").strip() + key = input("Enter key: ").strip() + text = input("Enter text to encrypt: ").strip() cipher_text = encrypt(key, text) - print(f"ENCRYPTED: {cipher_text}") - print(f"DECRYPTED WITH KEY: {decrypt(key, cipher_text)}") + print(f"Encrypted: {cipher_text}") + print(f"Decrypted with key: {decrypt(key, cipher_text)}") diff --git a/ciphers/rail_fence_cipher.py b/ciphers/rail_fence_cipher.py index 2596415207ae..cba593ca7335 100644 --- a/ciphers/rail_fence_cipher.py +++ b/ciphers/rail_fence_cipher.py @@ -20,7 +20,7 @@ def encrypt(input_string: str, key: int) -> str: ... TypeError: sequence item 0: expected str instance, int found """ - grid = [[] for _ in range(key)] + temp_grid: list[list[str]] = [[] for _ in range(key)] lowest = key - 1 if key <= 0: @@ -31,8 +31,8 @@ def encrypt(input_string: str, key: int) -> str: for position, character in enumerate(input_string): num = position % (lowest * 2) # puts it in bounds num = min(num, lowest * 2 - num) # creates zigzag pattern - grid[num].append(character) - grid = ["".join(row) for row in grid] + temp_grid[num].append(character) + grid = ["".join(row) for row in temp_grid] output_string = "".join(grid) return output_string @@ -63,7 +63,7 @@ def decrypt(input_string: str, key: int) -> str: if key == 1: return input_string - temp_grid = [[] for _ in range(key)] # generates template + temp_grid: list[list[str]] = [[] for _ in range(key)] # generates template for position in range(len(input_string)): num = position % (lowest * 2) # puts it in bounds num = min(num, lowest * 2 - num) # creates zigzag pattern @@ -84,7 +84,7 @@ def decrypt(input_string: str, key: int) -> str: return output_string -def bruteforce(input_string: str) -> dict: +def bruteforce(input_string: str) -> dict[int, str]: """Uses decrypt function by guessing every key >>> bruteforce("HWe olordll")[4] diff --git a/ciphers/rot13.py b/ciphers/rot13.py index 21dbda98eecc..b367c3215127 100644 --- a/ciphers/rot13.py +++ b/ciphers/rot13.py @@ -20,7 +20,7 @@ def dencrypt(s: str, n: int = 13) -> str: return out -def main(): +def main() -> None: s0 = input("Enter message: ") s1 = dencrypt(s0, 13) diff --git a/ciphers/rsa_cipher.py b/ciphers/rsa_cipher.py index 0df37d6ea3ff..b1e8a73f33c6 100644 --- a/ciphers/rsa_cipher.py +++ b/ciphers/rsa_cipher.py @@ -7,144 +7,144 @@ BYTE_SIZE = 256 -def main(): - filename = "encrypted_file.txt" - response = input(r"Encrypt\Decrypt [e\d]: ") - - if response.lower().startswith("e"): - mode = "encrypt" - elif response.lower().startswith("d"): - mode = "decrypt" - - if mode == "encrypt": - if not os.path.exists("rsa_pubkey.txt"): - rkg.makeKeyFiles("rsa", 1024) - - message = input("\nEnter message: ") - pubKeyFilename = "rsa_pubkey.txt" - print("Encrypting and writing to %s..." % (filename)) - encryptedText = encryptAndWriteToFile(filename, pubKeyFilename, message) - - print("\nEncrypted text:") - print(encryptedText) - - elif mode == "decrypt": - privKeyFilename = "rsa_privkey.txt" - print("Reading from %s and decrypting..." % (filename)) - decryptedText = readFromFileAndDecrypt(filename, privKeyFilename) - print("writing decryption to rsa_decryption.txt...") - with open("rsa_decryption.txt", "w") as dec: - dec.write(decryptedText) - - print("\nDecryption:") - print(decryptedText) - - -def getBlocksFromText(message: int, blockSize: int = DEFAULT_BLOCK_SIZE) -> [int]: - messageBytes = message.encode("ascii") - - blockInts = [] - for blockStart in range(0, len(messageBytes), blockSize): - blockInt = 0 - for i in range(blockStart, min(blockStart + blockSize, len(messageBytes))): - blockInt += messageBytes[i] * (BYTE_SIZE ** (i % blockSize)) - blockInts.append(blockInt) - return blockInts - - -def getTextFromBlocks( - blockInts: [int], messageLength: int, blockSize: int = DEFAULT_BLOCK_SIZE +def get_blocks_from_text( + message: str, block_size: int = DEFAULT_BLOCK_SIZE +) -> list[int]: + message_bytes = message.encode("ascii") + + block_ints = [] + for block_start in range(0, len(message_bytes), block_size): + block_int = 0 + for i in range(block_start, min(block_start + block_size, len(message_bytes))): + block_int += message_bytes[i] * (BYTE_SIZE ** (i % block_size)) + block_ints.append(block_int) + return block_ints + + +def get_text_from_blocks( + block_ints: list[int], message_length: int, block_size: int = DEFAULT_BLOCK_SIZE ) -> str: - message = [] - for blockInt in blockInts: - blockMessage = [] - for i in range(blockSize - 1, -1, -1): - if len(message) + i < messageLength: - asciiNumber = blockInt // (BYTE_SIZE ** i) - blockInt = blockInt % (BYTE_SIZE ** i) - blockMessage.insert(0, chr(asciiNumber)) - message.extend(blockMessage) + message: list[str] = [] + for block_int in block_ints: + block_message: list[str] = [] + for i in range(block_size - 1, -1, -1): + if len(message) + i < message_length: + ascii_number = block_int // (BYTE_SIZE ** i) + block_int = block_int % (BYTE_SIZE ** i) + block_message.insert(0, chr(ascii_number)) + message.extend(block_message) return "".join(message) -def encryptMessage( - message: str, key: (int, int), blockSize: int = DEFAULT_BLOCK_SIZE -) -> [int]: - encryptedBlocks = [] +def encrypt_message( + message: str, key: tuple[int, int], blockSize: int = DEFAULT_BLOCK_SIZE +) -> list[int]: + encrypted_blocks = [] n, e = key - for block in getBlocksFromText(message, blockSize): - encryptedBlocks.append(pow(block, e, n)) - return encryptedBlocks + for block in get_blocks_from_text(message, blockSize): + encrypted_blocks.append(pow(block, e, n)) + return encrypted_blocks -def decryptMessage( - encryptedBlocks: [int], - messageLength: int, - key: (int, int), - blockSize: int = DEFAULT_BLOCK_SIZE, +def decrypt_message( + encrypted_blocks: list[int], + message_length: int, + key: tuple[int, int], + block_size: int = DEFAULT_BLOCK_SIZE, ) -> str: - decryptedBlocks = [] + decrypted_blocks = [] n, d = key - for block in encryptedBlocks: - decryptedBlocks.append(pow(block, d, n)) - return getTextFromBlocks(decryptedBlocks, messageLength, blockSize) + for block in encrypted_blocks: + decrypted_blocks.append(pow(block, d, n)) + return get_text_from_blocks(decrypted_blocks, message_length, block_size) -def readKeyFile(keyFilename: str) -> (int, int, int): - with open(keyFilename) as fo: +def read_key_file(key_filename: str) -> tuple[int, int, int]: + with open(key_filename) as fo: content = fo.read() - keySize, n, EorD = content.split(",") - return (int(keySize), int(n), int(EorD)) + key_size, n, EorD = content.split(",") + return (int(key_size), int(n), int(EorD)) -def encryptAndWriteToFile( - messageFilename: str, - keyFilename: str, +def encrypt_and_write_to_file( + message_filename: str, + key_filename: str, message: str, - blockSize: int = DEFAULT_BLOCK_SIZE, + block_size: int = DEFAULT_BLOCK_SIZE, ) -> str: - keySize, n, e = readKeyFile(keyFilename) - if keySize < blockSize * 8: + key_size, n, e = read_key_file(key_filename) + if key_size < block_size * 8: sys.exit( "ERROR: Block size is %s bits and key size is %s bits. The RSA cipher " "requires the block size to be equal to or greater than the key size. " "Either decrease the block size or use different keys." - % (blockSize * 8, keySize) + % (block_size * 8, key_size) ) - encryptedBlocks = encryptMessage(message, (n, e), blockSize) + encrypted_blocks = [str(i) for i in encrypt_message(message, (n, e), block_size)] - for i in range(len(encryptedBlocks)): - encryptedBlocks[i] = str(encryptedBlocks[i]) - encryptedContent = ",".join(encryptedBlocks) - encryptedContent = f"{len(message)}_{blockSize}_{encryptedContent}" - with open(messageFilename, "w") as fo: - fo.write(encryptedContent) - return encryptedContent + encrypted_content = ",".join(encrypted_blocks) + encrypted_content = f"{len(message)}_{block_size}_{encrypted_content}" + with open(message_filename, "w") as fo: + fo.write(encrypted_content) + return encrypted_content -def readFromFileAndDecrypt(messageFilename: str, keyFilename: str) -> str: - keySize, n, d = readKeyFile(keyFilename) - with open(messageFilename) as fo: +def read_from_file_and_decrypt(message_filename: str, key_filename: str) -> str: + key_size, n, d = read_key_file(key_filename) + with open(message_filename) as fo: content = fo.read() - messageLength, blockSize, encryptedMessage = content.split("_") - messageLength = int(messageLength) - blockSize = int(blockSize) + message_length_str, block_size_str, encrypted_message = content.split("_") + message_length = int(message_length_str) + block_size = int(block_size_str) - if keySize < blockSize * 8: + if key_size < block_size * 8: sys.exit( "ERROR: Block size is %s bits and key size is %s bits. The RSA cipher " "requires the block size to be equal to or greater than the key size. " "Did you specify the correct key file and encrypted file?" - % (blockSize * 8, keySize) + % (block_size * 8, key_size) ) - encryptedBlocks = [] - for block in encryptedMessage.split(","): - encryptedBlocks.append(int(block)) + encrypted_blocks = [] + for block in encrypted_message.split(","): + encrypted_blocks.append(int(block)) + + return decrypt_message(encrypted_blocks, message_length, (n, d), block_size) + + +def main() -> None: + filename = "encrypted_file.txt" + response = input(r"Encrypt\Decrypt [e\d]: ") + + if response.lower().startswith("e"): + mode = "encrypt" + elif response.lower().startswith("d"): + mode = "decrypt" - return decryptMessage(encryptedBlocks, messageLength, (n, d), blockSize) + if mode == "encrypt": + if not os.path.exists("rsa_pubkey.txt"): + rkg.makeKeyFiles("rsa", 1024) + + message = input("\nEnter message: ") + pubkey_filename = "rsa_pubkey.txt" + print("Encrypting and writing to %s..." % (filename)) + encryptedText = encrypt_and_write_to_file(filename, pubkey_filename, message) + + print("\nEncrypted text:") + print(encryptedText) + + elif mode == "decrypt": + privkey_filename = "rsa_privkey.txt" + print("Reading from %s and decrypting..." % (filename)) + decrypted_text = read_from_file_and_decrypt(filename, privkey_filename) + print("writing decryption to rsa_decryption.txt...") + with open("rsa_decryption.txt", "w") as dec: + dec.write(decrypted_text) + + print("\nDecryption:") + print(decrypted_text) if __name__ == "__main__": diff --git a/ciphers/rsa_factorization.py b/ciphers/rsa_factorization.py index b18aab609e2d..6df32b6cc887 100644 --- a/ciphers/rsa_factorization.py +++ b/ciphers/rsa_factorization.py @@ -13,7 +13,7 @@ import random -def rsafactor(d: int, e: int, N: int) -> [int]: +def rsafactor(d: int, e: int, N: int) -> list[int]: """ This function returns the factors of N, where p*q=N Return: [p, q] diff --git a/ciphers/shuffled_shift_cipher.py b/ciphers/shuffled_shift_cipher.py index 22628f3c9d9e..01d099641dd2 100644 --- a/ciphers/shuffled_shift_cipher.py +++ b/ciphers/shuffled_shift_cipher.py @@ -1,5 +1,6 @@ import random import string +from typing import Optional class ShuffledShiftCipher: @@ -26,7 +27,7 @@ class ShuffledShiftCipher: cip2 = ShuffledShiftCipher() """ - def __init__(self, passcode: str = None): + def __init__(self, passcode: Optional[str] = None) -> None: """ Initializes a cipher object with a passcode as it's entity Note: No new passcode is generated if user provides a passcode @@ -36,13 +37,13 @@ def __init__(self, passcode: str = None): self.__key_list = self.__make_key_list() self.__shift_key = self.__make_shift_key() - def __str__(self): + def __str__(self) -> str: """ :return: passcode of the cipher object """ return "Passcode is: " + "".join(self.__passcode) - def __neg_pos(self, iterlist: list) -> list: + def __neg_pos(self, iterlist: list[int]) -> list[int]: """ Mutates the list by changing the sign of each alternate element @@ -54,7 +55,7 @@ def __neg_pos(self, iterlist: list) -> list: iterlist[i] *= -1 return iterlist - def __passcode_creator(self) -> list: + def __passcode_creator(self) -> list[str]: """ Creates a random password from the selection buffer of 1. uppercase letters of the English alphabet @@ -65,10 +66,10 @@ def __passcode_creator(self) -> list: :return: a password of a random length between 10 to 20 """ choices = string.ascii_letters + string.digits - password = [random.choice(choices) for i in range(random.randint(10, 20))] + password = [random.choice(choices) for _ in range(random.randint(10, 20))] return password - def __make_key_list(self) -> list: + def __make_key_list(self) -> list[str]: """ Shuffles the ordered character choices by pivoting at breakpoints Breakpoints are the set of characters in the passcode @@ -99,7 +100,7 @@ def __make_key_list(self) -> list: # creates points known as breakpoints to break the key_list_options at those # points and pivot each substring breakpoints = sorted(set(self.__passcode)) - temp_list = [] + temp_list: list[str] = [] # algorithm for creating a new shuffled list, keys_l, out of key_list_options for i in key_list_options: @@ -109,7 +110,7 @@ def __make_key_list(self) -> list: # keys_l if i in breakpoints or i == key_list_options[-1]: keys_l.extend(temp_list[::-1]) - temp_list = [] + temp_list.clear() # returning a shuffled keys_l to prevent brute force guessing of shift key return keys_l @@ -167,7 +168,7 @@ def encrypt(self, plaintext: str) -> str: return encoded_message -def test_end_to_end(msg: str = "Hello, this is a modified Caesar cipher"): +def test_end_to_end(msg: str = "Hello, this is a modified Caesar cipher") -> str: """ >>> test_end_to_end() 'Hello, this is a modified Caesar cipher' diff --git a/ciphers/simple_keyword_cypher.py b/ciphers/simple_keyword_cypher.py index 71c3083e9dfc..447bacfc2e6c 100644 --- a/ciphers/simple_keyword_cypher.py +++ b/ciphers/simple_keyword_cypher.py @@ -15,7 +15,7 @@ def remove_duplicates(key: str) -> str: return key_no_dups -def create_cipher_map(key: str) -> dict: +def create_cipher_map(key: str) -> dict[str, str]: """ Returns a cipher map given a keyword. :param key: keyword to use @@ -40,7 +40,7 @@ def create_cipher_map(key: str) -> dict: return cipher_alphabet -def encipher(message: str, cipher_map: dict) -> str: +def encipher(message: str, cipher_map: dict[str, str]) -> str: """ Enciphers a message given a cipher map. :param message: Message to encipher @@ -52,7 +52,7 @@ def encipher(message: str, cipher_map: dict) -> str: return "".join(cipher_map.get(ch, ch) for ch in message.upper()) -def decipher(message: str, cipher_map: dict) -> str: +def decipher(message: str, cipher_map: dict[str, str]) -> str: """ Deciphers a message given a cipher map :param message: Message to decipher @@ -67,7 +67,7 @@ def decipher(message: str, cipher_map: dict) -> str: return "".join(rev_cipher_map.get(ch, ch) for ch in message.upper()) -def main(): +def main() -> None: """ Handles I/O :return: void diff --git a/ciphers/simple_substitution_cipher.py b/ciphers/simple_substitution_cipher.py index 646ea449fc06..a763bd6b6b48 100644 --- a/ciphers/simple_substitution_cipher.py +++ b/ciphers/simple_substitution_cipher.py @@ -4,7 +4,7 @@ LETTERS = "ABCDEFGHIJKLMNOPQRSTUVWXYZ" -def main(): +def main() -> None: message = input("Enter message: ") key = "LFWOAYUISVKMNXPBDCRJTQEGHZ" resp = input("Encrypt/Decrypt [e/d]: ") @@ -68,7 +68,7 @@ def translateMessage(key: str, message: str, mode: str) -> str: return translated -def getRandomKey(): +def getRandomKey() -> str: key = list(LETTERS) random.shuffle(key) return "".join(key) diff --git a/ciphers/trafid_cipher.py b/ciphers/trafid_cipher.py index 328814f97744..1c8ea3024d33 100644 --- a/ciphers/trafid_cipher.py +++ b/ciphers/trafid_cipher.py @@ -1,7 +1,7 @@ # https://en.wikipedia.org/wiki/Trifid_cipher -def __encryptPart(messagePart: str, character2Number: dict) -> str: +def __encryptPart(messagePart: str, character2Number: dict[str, str]) -> str: one, two, three = "", "", "" tmp = [] @@ -16,7 +16,9 @@ def __encryptPart(messagePart: str, character2Number: dict) -> str: return one + two + three -def __decryptPart(messagePart: str, character2Number: dict) -> (str, str, str): +def __decryptPart( + messagePart: str, character2Number: dict[str, str] +) -> tuple[str, str, str]: tmp, thisPart = "", "" result = [] @@ -32,7 +34,9 @@ def __decryptPart(messagePart: str, character2Number: dict) -> (str, str, str): return result[0], result[1], result[2] -def __prepare(message: str, alphabet: str) -> (str, str, dict, dict): +def __prepare( + message: str, alphabet: str +) -> tuple[str, str, dict[str, str], dict[str, str]]: # Validate message and alphabet, set to upper and remove spaces alphabet = alphabet.replace(" ", "").upper() message = message.replace(" ", "").upper() diff --git a/ciphers/transposition_cipher.py b/ciphers/transposition_cipher.py index 6a0a22d3e31d..589bb8cb5cd5 100644 --- a/ciphers/transposition_cipher.py +++ b/ciphers/transposition_cipher.py @@ -8,7 +8,7 @@ """ -def main(): +def main() -> None: message = input("Enter message: ") key = int(input("Enter key [2-%s]: " % (len(message) - 1))) mode = input("Encryption/Decryption [e/d]: ") diff --git a/ciphers/transposition_cipher_encrypt_decrypt_file.py b/ciphers/transposition_cipher_encrypt_decrypt_file.py index 45aab056109a..b91c73c9f2ad 100644 --- a/ciphers/transposition_cipher_encrypt_decrypt_file.py +++ b/ciphers/transposition_cipher_encrypt_decrypt_file.py @@ -5,7 +5,7 @@ from . import transposition_cipher as transCipher -def main(): +def main() -> None: inputFile = "Prehistoric Men.txt" outputFile = "Output.txt" key = int(input("Enter key: ")) diff --git a/ciphers/vigenere_cipher.py b/ciphers/vigenere_cipher.py index eb523d078005..d97a96949fb8 100644 --- a/ciphers/vigenere_cipher.py +++ b/ciphers/vigenere_cipher.py @@ -1,7 +1,7 @@ LETTERS = "ABCDEFGHIJKLMNOPQRSTUVWXYZ" -def main(): +def main() -> None: message = input("Enter message: ") key = input("Enter key [alphanumeric]: ") mode = input("Encrypt/Decrypt [e/d]: ") diff --git a/ciphers/xor_cipher.py b/ciphers/xor_cipher.py index 32a350d4e61c..12d580e720bc 100644 --- a/ciphers/xor_cipher.py +++ b/ciphers/xor_cipher.py @@ -28,7 +28,7 @@ def __init__(self, key: int = 0): # private field self.__key = key - def encrypt(self, content: str, key: int) -> [str]: + def encrypt(self, content: str, key: int) -> list[str]: """ input: 'content' of type string and 'key' of type int output: encrypted string 'content' as a list of chars @@ -53,7 +53,7 @@ def encrypt(self, content: str, key: int) -> [str]: return ans - def decrypt(self, content: str, key: int) -> [str]: + def decrypt(self, content: str, key: int) -> list[str]: """ input: 'content' of type list and 'key' of type int output: decrypted string 'content' as a list of chars From 536fb4bca48f69cb66cfbd03aeb02550def07977 Mon Sep 17 00:00:00 2001 From: algobytewise Date: Sun, 4 Apr 2021 16:53:48 +0530 Subject: [PATCH 0120/1543] Add algorithm for N-body simulation - retry (#4298) * add n_body_simulation.py * updating DIRECTORY.md * Rename other/n_body_simulation.py to physics/n_body_simulation.py * updating DIRECTORY.md * Update build.yml * refactor examples & add doctests * removed type-hints from self-parameter * Apply suggestions from code review * Update physics/n_body_simulation.py * Update physics/n_body_simulation.py * Update physics/n_body_simulation.py * Don't forget self * Fix velocity Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> Co-authored-by: Dhruv Manilawala Co-authored-by: Christian Clauss --- DIRECTORY.md | 3 + physics/n_body_simulation.py | 348 +++++++++++++++++++++++++++++++++++ 2 files changed, 351 insertions(+) create mode 100644 physics/n_body_simulation.py diff --git a/DIRECTORY.md b/DIRECTORY.md index 42a6c49c735f..e6ce3ae718b3 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -551,6 +551,9 @@ * [Sdes](https://github.com/TheAlgorithms/Python/blob/master/other/sdes.py) * [Tower Of Hanoi](https://github.com/TheAlgorithms/Python/blob/master/other/tower_of_hanoi.py) +## Physics + * [N Body Simulation](https://github.com/TheAlgorithms/Python/blob/master/physics/n_body_simulation.py) + ## Project Euler * Problem 001 * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_001/sol1.py) diff --git a/physics/n_body_simulation.py b/physics/n_body_simulation.py new file mode 100644 index 000000000000..045a49f7ff00 --- /dev/null +++ b/physics/n_body_simulation.py @@ -0,0 +1,348 @@ +""" +In physics and astronomy, a gravitational N-body simulation is a simulation of a +dynamical system of particles under the influence of gravity. The system +consists of a number of bodies, each of which exerts a gravitational force on all +other bodies. These forces are calculated using Newton's law of universal +gravitation. The Euler method is used at each time-step to calculate the change in +velocity and position brought about by these forces. Softening is used to prevent +numerical divergences when a particle comes too close to another (and the force +goes to infinity). +(Description adapted from https://en.wikipedia.org/wiki/N-body_simulation ) +(See also http://www.shodor.org/refdesk/Resources/Algorithms/EulersMethod/ ) +""" + + +from __future__ import annotations + +import random + +from matplotlib import animation +from matplotlib import pyplot as plt + + +class Body: + def __init__( + self, + position_x: float, + position_y: float, + velocity_x: float, + velocity_y: float, + mass: float = 1.0, + size: float = 1.0, + color: str = "blue", + ) -> None: + """ + The parameters "size" & "color" are not relevant for the simulation itself, + they are only used for plotting. + """ + self.position_x = position_x + self.position_y = position_y + self.velocity_x = velocity_x + self.velocity_y = velocity_y + self.mass = mass + self.size = size + self.color = color + + @property + def position(self) -> tuple[float, float]: + return self.position_x, self.position_y + + @property + def velocity(self) -> tuple[float, float]: + return self.velocity_x, self.velocity_y + + def update_velocity( + self, force_x: float, force_y: float, delta_time: float + ) -> None: + """ + Euler algorithm for velocity + + >>> body_1 = Body(0.,0.,0.,0.) + >>> body_1.update_velocity(1.,0.,1.) + >>> body_1.velocity + (1.0, 0.0) + + >>> body_1.update_velocity(1.,0.,1.) + >>> body_1.velocity + (2.0, 0.0) + + >>> body_2 = Body(0.,0.,5.,0.) + >>> body_2.update_velocity(0.,-10.,10.) + >>> body_2.velocity + (5.0, -100.0) + + >>> body_2.update_velocity(0.,-10.,10.) + >>> body_2.velocity + (5.0, -200.0) + """ + self.velocity_x += force_x * delta_time + self.velocity_y += force_y * delta_time + + def update_position(self, delta_time: float) -> None: + """ + Euler algorithm for position + + >>> body_1 = Body(0.,0.,1.,0.) + >>> body_1.update_position(1.) + >>> body_1.position + (1.0, 0.0) + + >>> body_1.update_position(1.) + >>> body_1.position + (2.0, 0.0) + + >>> body_2 = Body(10.,10.,0.,-2.) + >>> body_2.update_position(1.) + >>> body_2.position + (10.0, 8.0) + + >>> body_2.update_position(1.) + >>> body_2.position + (10.0, 6.0) + """ + self.position_x += self.velocity_x * delta_time + self.position_y += self.velocity_y * delta_time + + +class BodySystem: + """ + This class is used to hold the bodies, the gravitation constant, the time + factor and the softening factor. The time factor is used to control the speed + of the simulation. The softening factor is used for softening, a numerical + trick for N-body simulations to prevent numerical divergences when two bodies + get too close to each other. + """ + + def __init__( + self, + bodies: list[Body], + gravitation_constant: float = 1.0, + time_factor: float = 1.0, + softening_factor: float = 0.0, + ) -> None: + self.bodies = bodies + self.gravitation_constant = gravitation_constant + self.time_factor = time_factor + self.softening_factor = softening_factor + + def __len__(self) -> int: + return len(self.bodies) + + def update_system(self, delta_time: float) -> None: + """ + For each body, loop through all other bodies to calculate the total + force they exert on it. Use that force to update the body's velocity. + + >>> body_system_1 = BodySystem([Body(0,0,0,0), Body(10,0,0,0)]) + >>> len(body_system_1) + 2 + >>> body_system_1.update_system(1) + >>> body_system_1.bodies[0].position + (0.01, 0.0) + >>> body_system_1.bodies[0].velocity + (0.01, 0.0) + + >>> body_system_2 = BodySystem([Body(-10,0,0,0), Body(10,0,0,0, mass=4)], 1, 10) + >>> body_system_2.update_system(1) + >>> body_system_2.bodies[0].position + (-9.0, 0.0) + >>> body_system_2.bodies[0].velocity + (0.1, 0.0) + """ + for body1 in self.bodies: + force_x = 0.0 + force_y = 0.0 + for body2 in self.bodies: + if body1 != body2: + dif_x = body2.position_x - body1.position_x + dif_y = body2.position_y - body1.position_y + + # Calculation of the distance using Pythagoras's theorem + # Extra factor due to the softening technique + distance = (dif_x ** 2 + dif_y ** 2 + self.softening_factor) ** ( + 1 / 2 + ) + + # Newton's law of universal gravitation. + force_x += ( + self.gravitation_constant * body2.mass * dif_x / distance ** 3 + ) + force_y += ( + self.gravitation_constant * body2.mass * dif_y / distance ** 3 + ) + + # Update the body's velocity once all the force components have been added + body1.update_velocity(force_x, force_y, delta_time * self.time_factor) + + # Update the positions only after all the velocities have been updated + for body in self.bodies: + body.update_position(delta_time * self.time_factor) + + +def update_step( + body_system: BodySystem, delta_time: float, patches: list[plt.Circle] +) -> None: + """ + Updates the body-system and applies the change to the patch-list used for plotting + + >>> body_system_1 = BodySystem([Body(0,0,0,0), Body(10,0,0,0)]) + >>> patches_1 = [plt.Circle((body.position_x, body.position_y), body.size, + ... fc=body.color)for body in body_system_1.bodies] #doctest: +ELLIPSIS + >>> update_step(body_system_1, 1, patches_1) + >>> patches_1[0].center + (0.01, 0.0) + + >>> body_system_2 = BodySystem([Body(-10,0,0,0), Body(10,0,0,0, mass=4)], 1, 10) + >>> patches_2 = [plt.Circle((body.position_x, body.position_y), body.size, + ... fc=body.color)for body in body_system_2.bodies] #doctest: +ELLIPSIS + >>> update_step(body_system_2, 1, patches_2) + >>> patches_2[0].center + (-9.0, 0.0) + """ + # Update the positions of the bodies + body_system.update_system(delta_time) + + # Update the positions of the patches + for patch, body in zip(patches, body_system.bodies): + patch.center = (body.position_x, body.position_y) + + +def plot( + title: str, + body_system: BodySystem, + x_start: float = -1, + x_end: float = 1, + y_start: float = -1, + y_end: float = 1, +) -> None: + """ + Utility function to plot how the given body-system evolves over time. + No doctest provided since this function does not have a return value. + """ + + INTERVAL = 20 # Frame rate of the animation + DELTA_TIME = INTERVAL / 1000 # Time between time steps in seconds + + fig = plt.figure() + fig.canvas.set_window_title(title) + ax = plt.axes( + xlim=(x_start, x_end), ylim=(y_start, y_end) + ) # Set section to be plotted + plt.gca().set_aspect("equal") # Fix aspect ratio + + # Each body is drawn as a patch by the plt-function + patches = [ + plt.Circle((body.position_x, body.position_y), body.size, fc=body.color) + for body in body_system.bodies + ] + + for patch in patches: + ax.add_patch(patch) + + # Function called at each step of the animation + def update(frame: int) -> list[plt.Circle]: + update_step(body_system, DELTA_TIME, patches) + return patches + + anim = animation.FuncAnimation( # noqa: F841 + fig, update, interval=INTERVAL, blit=True + ) + + plt.show() + + +def example_1() -> BodySystem: + """ + Example 1: figure-8 solution to the 3-body-problem + This example can be seen as a test of the implementation: given the right + initial conditions, the bodies should move in a figure-8. + (initial conditions taken from http://www.artcompsci.org/vol_1/v1_web/node56.html) + >>> body_system = example_1() + >>> len(body_system) + 3 + """ + + position_x = 0.9700436 + position_y = -0.24308753 + velocity_x = 0.466203685 + velocity_y = 0.43236573 + + bodies1 = [ + Body(position_x, position_y, velocity_x, velocity_y, size=0.2, color="red"), + Body(-position_x, -position_y, velocity_x, velocity_y, size=0.2, color="green"), + Body(0, 0, -2 * velocity_x, -2 * velocity_y, size=0.2, color="blue"), + ] + return BodySystem(bodies1, time_factor=3) + + +def example_2() -> BodySystem: + """ + Example 2: Moon's orbit around the earth + This example can be seen as a test of the implementation: given the right + initial conditions, the moon should orbit around the earth as it actually does. + (mass, velocity and distance taken from https://en.wikipedia.org/wiki/Earth + and https://en.wikipedia.org/wiki/Moon) + No doctest provided since this function does not have a return value. + """ + + moon_mass = 7.3476e22 + earth_mass = 5.972e24 + velocity_dif = 1022 + earth_moon_distance = 384399000 + gravitation_constant = 6.674e-11 + + # Calculation of the respective velocities so that total impulse is zero, + # i.e. the two bodies together don't move + moon_velocity = earth_mass * velocity_dif / (earth_mass + moon_mass) + earth_velocity = moon_velocity - velocity_dif + + moon = Body(-earth_moon_distance, 0, 0, moon_velocity, moon_mass, 10000000, "grey") + earth = Body(0, 0, 0, earth_velocity, earth_mass, 50000000, "blue") + return BodySystem([earth, moon], gravitation_constant, time_factor=1000000) + + +def example_3() -> BodySystem: + """ + Example 3: Random system with many bodies. + No doctest provided since this function does not have a return value. + """ + + bodies = [] + for i in range(10): + velocity_x = random.uniform(-0.5, 0.5) + velocity_y = random.uniform(-0.5, 0.5) + + # Bodies are created pairwise with opposite velocities so that the + # total impulse remains zero + bodies.append( + Body( + random.uniform(-0.5, 0.5), + random.uniform(-0.5, 0.5), + velocity_x, + velocity_y, + size=0.05, + ) + ) + bodies.append( + Body( + random.uniform(-0.5, 0.5), + random.uniform(-0.5, 0.5), + -velocity_x, + -velocity_y, + size=0.05, + ) + ) + return BodySystem(bodies, 0.01, 10, 0.1) + + +if __name__ == "__main__": + plot("Figure-8 solution to the 3-body-problem", example_1(), -2, 2, -2, 2) + plot( + "Moon's orbit around the earth", + example_2(), + -430000000, + 430000000, + -430000000, + 430000000, + ) + plot("Random system with many bodies", example_3(), -1.5, 1.5, -1.5, 1.5) From 20c7518028efbb6e8ae46a42b28c3f2e27acb2a2 Mon Sep 17 00:00:00 2001 From: Dhruv Manilawala Date: Sun, 4 Apr 2021 18:55:49 +0530 Subject: [PATCH 0121/1543] fix(mypy): type annotations for conversions algorithms (#4314) * fix(mypy): type annotations for conversions algorithms * refactor(CI): include conversions algorithms for mypy tests --- .github/workflows/build.yml | 2 +- conversions/binary_to_octal.py | 2 +- conversions/decimal_to_binary.py | 6 +++--- conversions/decimal_to_hexadecimal.py | 3 ++- conversions/decimal_to_octal.py | 4 ++-- conversions/prefix_conversions.py | 14 ++++++++------ conversions/weight_conversion.py | 4 ++-- 7 files changed, 19 insertions(+), 16 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index ac5f80206e35..eb54d85ea035 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -23,7 +23,7 @@ jobs: python -m pip install mypy pytest-cov -r requirements.txt # FIXME: #4052 fix mypy errors in the exclude directories and remove them below - run: mypy --ignore-missing-imports - --exclude '(conversions|data_structures|digital_image_processing|dynamic_programming|graphs|linear_algebra|maths|matrix|other|project_euler|scripts|searches|strings*)/$' . + --exclude '(data_structures|digital_image_processing|dynamic_programming|graphs|linear_algebra|maths|matrix|other|project_euler|scripts|searches|strings*)/$' . - name: Run tests run: pytest --doctest-modules --ignore=project_euler/ --ignore=scripts/ --cov-report=term-missing:skip-covered --cov=. . - if: ${{ success() }} diff --git a/conversions/binary_to_octal.py b/conversions/binary_to_octal.py index 8b594887867e..35ede95b134d 100644 --- a/conversions/binary_to_octal.py +++ b/conversions/binary_to_octal.py @@ -28,7 +28,7 @@ def bin_to_octal(bin_string: str) -> str: bin_string = "0" + bin_string bin_string_in_3_list = [ bin_string[index : index + 3] - for index, value in enumerate(bin_string) + for index in range(len(bin_string)) if index % 3 == 0 ] for bin_group in bin_string_in_3_list: diff --git a/conversions/decimal_to_binary.py b/conversions/decimal_to_binary.py index 7e83aee4f7a5..c21cdbcaec68 100644 --- a/conversions/decimal_to_binary.py +++ b/conversions/decimal_to_binary.py @@ -28,9 +28,9 @@ def decimal_to_binary(num: int) -> str: TypeError: 'str' object cannot be interpreted as an integer """ - if type(num) == float: + if isinstance(num, float): raise TypeError("'float' object cannot be interpreted as an integer") - if type(num) == str: + if isinstance(num, str): raise TypeError("'str' object cannot be interpreted as an integer") if num == 0: @@ -42,7 +42,7 @@ def decimal_to_binary(num: int) -> str: negative = True num = -num - binary = [] + binary: list[int] = [] while num > 0: binary.insert(0, num % 2) num >>= 1 diff --git a/conversions/decimal_to_hexadecimal.py b/conversions/decimal_to_hexadecimal.py index 433f78dfecb7..2389c6d1f2a1 100644 --- a/conversions/decimal_to_hexadecimal.py +++ b/conversions/decimal_to_hexadecimal.py @@ -21,7 +21,7 @@ } -def decimal_to_hexadecimal(decimal): +def decimal_to_hexadecimal(decimal: float) -> str: """ take integer decimal value, return hexadecimal representation as str beginning with 0x @@ -58,6 +58,7 @@ def decimal_to_hexadecimal(decimal): True """ assert type(decimal) in (int, float) and decimal == int(decimal) + decimal = int(decimal) hexadecimal = "" negative = False if decimal < 0: diff --git a/conversions/decimal_to_octal.py b/conversions/decimal_to_octal.py index 8dc04830ad87..4c313bddf64c 100644 --- a/conversions/decimal_to_octal.py +++ b/conversions/decimal_to_octal.py @@ -17,14 +17,14 @@ def decimal_to_octal(num: int) -> str: counter = 0 while num > 0: remainder = num % 8 - octal = octal + (remainder * math.pow(10, counter)) + octal = octal + (remainder * math.floor(math.pow(10, counter))) counter += 1 num = math.floor(num / 8) # basically /= 8 without remainder if any # This formatting removes trailing '.0' from `octal`. return f"0o{int(octal)}" -def main(): +def main() -> None: """Print octal equivalents of decimal numbers.""" print("\n2 in octal is:") print(decimal_to_octal(2)) # = 2 diff --git a/conversions/prefix_conversions.py b/conversions/prefix_conversions.py index c2440d1cf886..78db4a91709c 100644 --- a/conversions/prefix_conversions.py +++ b/conversions/prefix_conversions.py @@ -59,10 +59,12 @@ def convert_si_prefix( 1000 """ if isinstance(known_prefix, str): - known_prefix: SI_Unit = SI_Unit[known_prefix.lower()] + known_prefix = SI_Unit[known_prefix.lower()] if isinstance(unknown_prefix, str): - unknown_prefix: SI_Unit = SI_Unit[unknown_prefix.lower()] - unknown_amount = known_amount * (10 ** (known_prefix.value - unknown_prefix.value)) + unknown_prefix = SI_Unit[unknown_prefix.lower()] + unknown_amount: float = known_amount * ( + 10 ** (known_prefix.value - unknown_prefix.value) + ) return unknown_amount @@ -85,10 +87,10 @@ def convert_binary_prefix( 1024 """ if isinstance(known_prefix, str): - known_prefix: Binary_Unit = Binary_Unit[known_prefix.lower()] + known_prefix = Binary_Unit[known_prefix.lower()] if isinstance(unknown_prefix, str): - unknown_prefix: Binary_Unit = Binary_Unit[unknown_prefix.lower()] - unknown_amount = known_amount * ( + unknown_prefix = Binary_Unit[unknown_prefix.lower()] + unknown_amount: float = known_amount * ( 2 ** ((known_prefix.value - unknown_prefix.value) * 10) ) return unknown_amount diff --git a/conversions/weight_conversion.py b/conversions/weight_conversion.py index 85515f2f6f88..c344416be5f5 100644 --- a/conversions/weight_conversion.py +++ b/conversions/weight_conversion.py @@ -29,7 +29,7 @@ -> Wikipedia reference: https://en.wikipedia.org/wiki/Dalton_(unit) """ -KILOGRAM_CHART = { +KILOGRAM_CHART: dict[str, float] = { "kilogram": 1, "gram": pow(10, 3), "milligram": pow(10, 6), @@ -42,7 +42,7 @@ "atomic-mass-unit": 6.022136652e26, } -WEIGHT_TYPE_CHART = { +WEIGHT_TYPE_CHART: dict[str, float] = { "kilogram": 1, "gram": pow(10, -3), "milligram": pow(10, -6), From 8c2986026bc42d81a6d9386c9fe621fea8ff2d15 Mon Sep 17 00:00:00 2001 From: Dhruv Manilawala Date: Mon, 5 Apr 2021 19:07:38 +0530 Subject: [PATCH 0122/1543] fix(mypy): type annotations for linear algebra algorithms (#4317) * fix(mypy): type annotations for linear algebra algorithms * refactor: remove linear algebra directory from mypy exclude --- .github/workflows/build.yml | 2 +- linear_algebra/src/conjugate_gradient.py | 17 +++-- linear_algebra/src/lib.py | 83 ++++++++++++++--------- linear_algebra/src/polynom_for_points.py | 13 ++-- linear_algebra/src/power_iteration.py | 7 +- linear_algebra/src/rayleigh_quotient.py | 10 ++- linear_algebra/src/test_linear_algebra.py | 40 +++++------ linear_algebra/src/transformations_2d.py | 2 - 8 files changed, 100 insertions(+), 74 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index eb54d85ea035..ca3e8092276e 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -23,7 +23,7 @@ jobs: python -m pip install mypy pytest-cov -r requirements.txt # FIXME: #4052 fix mypy errors in the exclude directories and remove them below - run: mypy --ignore-missing-imports - --exclude '(data_structures|digital_image_processing|dynamic_programming|graphs|linear_algebra|maths|matrix|other|project_euler|scripts|searches|strings*)/$' . + --exclude '(data_structures|digital_image_processing|dynamic_programming|graphs|maths|matrix|other|project_euler|scripts|searches|strings*)/$' . - name: Run tests run: pytest --doctest-modules --ignore=project_euler/ --ignore=scripts/ --cov-report=term-missing:skip-covered --cov=. . - if: ${{ success() }} diff --git a/linear_algebra/src/conjugate_gradient.py b/linear_algebra/src/conjugate_gradient.py index 1a65b8ccf019..418ae88a5f41 100644 --- a/linear_algebra/src/conjugate_gradient.py +++ b/linear_algebra/src/conjugate_gradient.py @@ -3,10 +3,12 @@ - https://en.wikipedia.org/wiki/Conjugate_gradient_method - https://en.wikipedia.org/wiki/Definite_symmetric_matrix """ +from typing import Any + import numpy as np -def _is_matrix_spd(matrix: np.array) -> bool: +def _is_matrix_spd(matrix: np.ndarray) -> bool: """ Returns True if input matrix is symmetric positive definite. Returns False otherwise. @@ -38,10 +40,11 @@ def _is_matrix_spd(matrix: np.array) -> bool: eigen_values, _ = np.linalg.eigh(matrix) # Check sign of all eigenvalues. - return np.all(eigen_values > 0) + # np.all returns a value of type np.bool_ + return bool(np.all(eigen_values > 0)) -def _create_spd_matrix(dimension: np.int64) -> np.array: +def _create_spd_matrix(dimension: int) -> Any: """ Returns a symmetric positive definite matrix given a dimension. @@ -64,11 +67,11 @@ def _create_spd_matrix(dimension: np.int64) -> np.array: def conjugate_gradient( - spd_matrix: np.array, - load_vector: np.array, + spd_matrix: np.ndarray, + load_vector: np.ndarray, max_iterations: int = 1000, tol: float = 1e-8, -) -> np.array: +) -> Any: """ Returns solution to the linear system np.dot(spd_matrix, x) = b. @@ -141,6 +144,8 @@ def conjugate_gradient( # Update number of iterations. iterations += 1 + if iterations > max_iterations: + break return x diff --git a/linear_algebra/src/lib.py b/linear_algebra/src/lib.py index 353c8334093b..5e2f82018f38 100644 --- a/linear_algebra/src/lib.py +++ b/linear_algebra/src/lib.py @@ -22,6 +22,7 @@ import math import random +from typing import Collection, Optional, Union, overload class Vector: @@ -45,7 +46,7 @@ class Vector: TODO: compare-operator """ - def __init__(self, components=None): + def __init__(self, components: Optional[Collection[float]] = None) -> None: """ input: components or nothing simple constructor for init the vector @@ -54,7 +55,7 @@ def __init__(self, components=None): components = [] self.__components = list(components) - def set(self, components): + def set(self, components: Collection[float]) -> None: """ input: new components changes the components of the vector. @@ -65,13 +66,13 @@ def set(self, components): else: raise Exception("please give any vector") - def __str__(self): + def __str__(self) -> str: """ returns a string representation of the vector """ return "(" + ",".join(map(str, self.__components)) + ")" - def component(self, i): + def component(self, i: int) -> float: """ input: index (start at 0) output: the i-th component of the vector. @@ -81,22 +82,22 @@ def component(self, i): else: raise Exception("index out of range") - def __len__(self): + def __len__(self) -> int: """ returns the size of the vector """ return len(self.__components) - def euclidLength(self): + def euclidLength(self) -> float: """ returns the euclidean length of the vector """ - summe = 0 + summe: float = 0 for c in self.__components: summe += c ** 2 return math.sqrt(summe) - def __add__(self, other): + def __add__(self, other: "Vector") -> "Vector": """ input: other vector assumes: other vector has the same size @@ -109,7 +110,7 @@ def __add__(self, other): else: raise Exception("must have the same size") - def __sub__(self, other): + def __sub__(self, other: "Vector") -> "Vector": """ input: other vector assumes: other vector has the same size @@ -122,7 +123,15 @@ def __sub__(self, other): else: # error case raise Exception("must have the same size") - def __mul__(self, other): + @overload + def __mul__(self, other: float) -> "Vector": + ... + + @overload + def __mul__(self, other: "Vector") -> float: + ... + + def __mul__(self, other: Union[float, "Vector"]) -> Union[float, "Vector"]: """ mul implements the scalar multiplication and the dot-product @@ -132,20 +141,20 @@ def __mul__(self, other): return Vector(ans) elif isinstance(other, Vector) and (len(self) == len(other)): size = len(self) - summe = 0 + summe: float = 0 for i in range(size): summe += self.__components[i] * other.component(i) return summe else: # error case raise Exception("invalid operand!") - def copy(self): + def copy(self) -> "Vector": """ copies this vector and returns it. """ return Vector(self.__components) - def changeComponent(self, pos, value): + def changeComponent(self, pos: int, value: float) -> None: """ input: an index (pos) and a value changes the specified component (pos) with the @@ -156,7 +165,7 @@ def changeComponent(self, pos, value): self.__components[pos] = value -def zeroVector(dimension): +def zeroVector(dimension: int) -> Vector: """ returns a zero-vector of size 'dimension' """ @@ -165,7 +174,7 @@ def zeroVector(dimension): return Vector([0] * dimension) -def unitBasisVector(dimension, pos): +def unitBasisVector(dimension: int, pos: int) -> Vector: """ returns a unit basis vector with a One at index 'pos' (indexing at 0) @@ -177,7 +186,7 @@ def unitBasisVector(dimension, pos): return Vector(ans) -def axpy(scalar, x, y): +def axpy(scalar: float, x: Vector, y: Vector) -> Vector: """ input: a 'scalar' and two vectors 'x' and 'y' output: a vector @@ -192,7 +201,7 @@ def axpy(scalar, x, y): return x * scalar + y -def randomVector(N, a, b): +def randomVector(N: int, a: int, b: int) -> Vector: """ input: size (N) of the vector. random range (a,b) @@ -200,7 +209,7 @@ def randomVector(N, a, b): random integer components between 'a' and 'b'. """ random.seed(None) - ans = [random.randint(a, b) for i in range(N)] + ans = [random.randint(a, b) for _ in range(N)] return Vector(ans) @@ -222,7 +231,7 @@ class Matrix: operator - _ implements the matrix-subtraction """ - def __init__(self, matrix, w, h): + def __init__(self, matrix: list[list[float]], w: int, h: int) -> None: """ simple constructor for initializing the matrix with components. @@ -231,7 +240,7 @@ def __init__(self, matrix, w, h): self.__width = w self.__height = h - def __str__(self): + def __str__(self) -> str: """ returns a string representation of this matrix. @@ -246,7 +255,7 @@ def __str__(self): ans += str(self.__matrix[i][j]) + "|\n" return ans - def changeComponent(self, x, y, value): + def changeComponent(self, x: int, y: int, value: float) -> None: """ changes the x-y component of this matrix """ @@ -255,7 +264,7 @@ def changeComponent(self, x, y, value): else: raise Exception("changeComponent: indices out of bounds") - def component(self, x, y): + def component(self, x: int, y: int) -> float: """ returns the specified (x,y) component """ @@ -264,13 +273,13 @@ def component(self, x, y): else: raise Exception("changeComponent: indices out of bounds") - def width(self): + def width(self) -> int: """ getter for the width """ return self.__width - def height(self): + def height(self) -> int: """ getter for the height """ @@ -303,7 +312,15 @@ def determinate(self) -> float: else: raise Exception("matrix is not square") - def __mul__(self, other): + @overload + def __mul__(self, other: float) -> "Matrix": + ... + + @overload + def __mul__(self, other: Vector) -> Vector: + ... + + def __mul__(self, other: Union[float, Vector]) -> Union[Vector, "Matrix"]: """ implements the matrix-vector multiplication. implements the matrix-scalar multiplication @@ -312,7 +329,7 @@ def __mul__(self, other): if len(other) == self.__width: ans = zeroVector(self.__height) for i in range(self.__height): - summe = 0 + summe: float = 0 for j in range(self.__width): summe += other.component(j) * self.__matrix[i][j] ans.changeComponent(i, summe) @@ -330,7 +347,7 @@ def __mul__(self, other): ] return Matrix(matrix, self.__width, self.__height) - def __add__(self, other): + def __add__(self, other: "Matrix") -> "Matrix": """ implements the matrix-addition. """ @@ -345,7 +362,7 @@ def __add__(self, other): else: raise Exception("matrix must have the same dimension!") - def __sub__(self, other): + def __sub__(self, other: "Matrix") -> "Matrix": """ implements the matrix-subtraction. """ @@ -361,19 +378,21 @@ def __sub__(self, other): raise Exception("matrix must have the same dimension!") -def squareZeroMatrix(N): +def squareZeroMatrix(N: int) -> Matrix: """ returns a square zero-matrix of dimension NxN """ - ans = [[0] * N for i in range(N)] + ans: list[list[float]] = [[0] * N for _ in range(N)] return Matrix(ans, N, N) -def randomMatrix(W, H, a, b): +def randomMatrix(W: int, H: int, a: int, b: int) -> Matrix: """ returns a random matrix WxH with integer components between 'a' and 'b' """ random.seed(None) - matrix = [[random.randint(a, b) for j in range(W)] for i in range(H)] + matrix: list[list[float]] = [ + [random.randint(a, b) for _ in range(W)] for _ in range(H) + ] return Matrix(matrix, W, H) diff --git a/linear_algebra/src/polynom_for_points.py b/linear_algebra/src/polynom_for_points.py index 7a363723d9d2..091849542ffe 100644 --- a/linear_algebra/src/polynom_for_points.py +++ b/linear_algebra/src/polynom_for_points.py @@ -1,6 +1,3 @@ -from __future__ import annotations - - def points_to_polynomial(coordinates: list[list[int]]) -> str: """ coordinates is a two dimensional matrix: [[x, y], [x, y], ...] @@ -55,12 +52,12 @@ def points_to_polynomial(coordinates: list[list[int]]) -> str: if check == 1: count_of_line = 0 - matrix = [] + matrix: list[list[float]] = [] # put the x and x to the power values in a matrix while count_of_line < x: count_in_line = 0 a = coordinates[count_of_line][0] - count_line: list[int] = [] + count_line: list[float] = [] while count_in_line < x: count_line.append(a ** (x - (count_in_line + 1))) count_in_line += 1 @@ -69,7 +66,7 @@ def points_to_polynomial(coordinates: list[list[int]]) -> str: count_of_line = 0 # put the y values into a vector - vector: list[int] = [] + vector: list[float] = [] while count_of_line < x: vector.append(coordinates[count_of_line][1]) count_of_line += 1 @@ -96,14 +93,14 @@ def points_to_polynomial(coordinates: list[list[int]]) -> str: # make solutions solution: list[str] = [] while count < x: - solution.append(vector[count] / matrix[count][count]) + solution.append(str(vector[count] / matrix[count][count])) count += 1 count = 0 solved = "f(x)=" while count < x: - remove_e: list[str] = str(solution[count]).split("E") + remove_e: list[str] = solution[count].split("E") if len(remove_e) > 1: solution[count] = remove_e[0] + "*10^" + remove_e[1] solved += "x^" + str(x - (count + 1)) + "*" + str(solution[count]) diff --git a/linear_algebra/src/power_iteration.py b/linear_algebra/src/power_iteration.py index 476361e0d433..2cf22838e4a1 100644 --- a/linear_algebra/src/power_iteration.py +++ b/linear_algebra/src/power_iteration.py @@ -2,8 +2,11 @@ def power_iteration( - input_matrix: np.array, vector: np.array, error_tol=1e-12, max_iterations=100 -) -> [float, np.array]: + input_matrix: np.ndarray, + vector: np.ndarray, + error_tol: float = 1e-12, + max_iterations: int = 100, +) -> tuple[float, np.ndarray]: """ Power Iteration. Find the largest eignevalue and corresponding eigenvector diff --git a/linear_algebra/src/rayleigh_quotient.py b/linear_algebra/src/rayleigh_quotient.py index 69bbbac119e8..78083aa755f1 100644 --- a/linear_algebra/src/rayleigh_quotient.py +++ b/linear_algebra/src/rayleigh_quotient.py @@ -1,10 +1,12 @@ """ https://en.wikipedia.org/wiki/Rayleigh_quotient """ +from typing import Any + import numpy as np -def is_hermitian(matrix: np.array) -> bool: +def is_hermitian(matrix: np.ndarray) -> bool: """ Checks if a matrix is Hermitian. >>> import numpy as np @@ -24,7 +26,7 @@ def is_hermitian(matrix: np.array) -> bool: return np.array_equal(matrix, matrix.conjugate().T) -def rayleigh_quotient(A: np.array, v: np.array) -> float: +def rayleigh_quotient(A: np.ndarray, v: np.ndarray) -> Any: """ Returns the Rayleigh quotient of a Hermitian matrix A and vector v. @@ -43,7 +45,9 @@ def rayleigh_quotient(A: np.array, v: np.array) -> float: array([[3.]]) """ v_star = v.conjugate().T - return (v_star.dot(A).dot(v)) / (v_star.dot(v)) + v_star_dot = v_star.dot(A) + assert isinstance(v_star_dot, np.ndarray) + return (v_star_dot.dot(v)) / (v_star.dot(v)) def tests() -> None: diff --git a/linear_algebra/src/test_linear_algebra.py b/linear_algebra/src/test_linear_algebra.py index 6eba3a1638bd..0954a2d932b7 100644 --- a/linear_algebra/src/test_linear_algebra.py +++ b/linear_algebra/src/test_linear_algebra.py @@ -12,7 +12,7 @@ class Test(unittest.TestCase): - def test_component(self): + def test_component(self) -> None: """ test for method component """ @@ -21,28 +21,28 @@ def test_component(self): self.assertEqual(x.component(2), 3) _ = Vector() - def test_str(self): + def test_str(self) -> None: """ test for toString() method """ x = Vector([0, 0, 0, 0, 0, 1]) self.assertEqual(str(x), "(0,0,0,0,0,1)") - def test_size(self): + def test_size(self) -> None: """ test for size()-method """ x = Vector([1, 2, 3, 4]) self.assertEqual(len(x), 4) - def test_euclidLength(self): + def test_euclidLength(self) -> None: """ test for the eulidean length """ x = Vector([1, 2]) self.assertAlmostEqual(x.euclidLength(), 2.236, 3) - def test_add(self): + def test_add(self) -> None: """ test for + operator """ @@ -52,7 +52,7 @@ def test_add(self): self.assertEqual((x + y).component(1), 3) self.assertEqual((x + y).component(2), 4) - def test_sub(self): + def test_sub(self) -> None: """ test for - operator """ @@ -62,7 +62,7 @@ def test_sub(self): self.assertEqual((x - y).component(1), 1) self.assertEqual((x - y).component(2), 2) - def test_mul(self): + def test_mul(self) -> None: """ test for * operator """ @@ -72,19 +72,19 @@ def test_mul(self): self.assertEqual(str(x * 3.0), "(3.0,6.0,9.0)") self.assertEqual((a * b), 0) - def test_zeroVector(self): + def test_zeroVector(self) -> None: """ test for the global function zeroVector(...) """ self.assertTrue(str(zeroVector(10)).count("0") == 10) - def test_unitBasisVector(self): + def test_unitBasisVector(self) -> None: """ test for the global function unitBasisVector(...) """ self.assertEqual(str(unitBasisVector(3, 1)), "(0,1,0)") - def test_axpy(self): + def test_axpy(self) -> None: """ test for the global function axpy(...) (operation) """ @@ -92,7 +92,7 @@ def test_axpy(self): y = Vector([1, 0, 1]) self.assertEqual(str(axpy(2, x, y)), "(3,4,7)") - def test_copy(self): + def test_copy(self) -> None: """ test for the copy()-method """ @@ -100,7 +100,7 @@ def test_copy(self): y = x.copy() self.assertEqual(str(x), str(y)) - def test_changeComponent(self): + def test_changeComponent(self) -> None: """ test for the changeComponent(...)-method """ @@ -109,43 +109,43 @@ def test_changeComponent(self): x.changeComponent(1, 1) self.assertEqual(str(x), "(0,1,0)") - def test_str_matrix(self): + def test_str_matrix(self) -> None: A = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]], 3, 3) self.assertEqual("|1,2,3|\n|2,4,5|\n|6,7,8|\n", str(A)) - def test_determinate(self): + def test_determinate(self) -> None: """ test for determinate() """ A = Matrix([[1, 1, 4, 5], [3, 3, 3, 2], [5, 1, 9, 0], [9, 7, 7, 9]], 4, 4) self.assertEqual(-376, A.determinate()) - def test__mul__matrix(self): + def test__mul__matrix(self) -> None: A = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]], 3, 3) x = Vector([1, 2, 3]) self.assertEqual("(14,32,50)", str(A * x)) self.assertEqual("|2,4,6|\n|8,10,12|\n|14,16,18|\n", str(A * 2)) - def test_changeComponent_matrix(self): + def test_changeComponent_matrix(self) -> None: A = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]], 3, 3) A.changeComponent(0, 2, 5) self.assertEqual("|1,2,5|\n|2,4,5|\n|6,7,8|\n", str(A)) - def test_component_matrix(self): + def test_component_matrix(self) -> None: A = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]], 3, 3) self.assertEqual(7, A.component(2, 1), 0.01) - def test__add__matrix(self): + def test__add__matrix(self) -> None: A = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]], 3, 3) B = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]], 3, 3) self.assertEqual("|2,4,10|\n|4,8,10|\n|12,14,18|\n", str(A + B)) - def test__sub__matrix(self): + def test__sub__matrix(self) -> None: A = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]], 3, 3) B = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]], 3, 3) self.assertEqual("|0,0,-4|\n|0,0,0|\n|0,0,-2|\n", str(A - B)) - def test_squareZeroMatrix(self): + def test_squareZeroMatrix(self) -> None: self.assertEqual( "|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|" + "\n|0,0,0,0,0|\n", str(squareZeroMatrix(5)), diff --git a/linear_algebra/src/transformations_2d.py b/linear_algebra/src/transformations_2d.py index 6a15189c5676..cdf42100d5d9 100644 --- a/linear_algebra/src/transformations_2d.py +++ b/linear_algebra/src/transformations_2d.py @@ -11,8 +11,6 @@ reflection(45) = [[0.05064397763545947, 0.893996663600558], [0.893996663600558, 0.7018070490682369]] """ -from __future__ import annotations - from math import cos, sin From c49fa088a0beb989f7553d697ec706f1af3a663d Mon Sep 17 00:00:00 2001 From: Dhruv Manilawala Date: Tue, 6 Apr 2021 16:24:26 +0530 Subject: [PATCH 0123/1543] feat: Add mypy configuration file (#4315) * feat: Add mypy config file * refactor: Remove mypy options from build workflow * Remove linear_algebra Co-authored-by: Christian Clauss --- .github/workflows/build.yml | 4 +--- mypy.ini | 5 +++++ 2 files changed, 6 insertions(+), 3 deletions(-) create mode 100644 mypy.ini diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index ca3e8092276e..2ffc2aa293b0 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -21,9 +21,7 @@ jobs: run: | python -m pip install --upgrade pip setuptools six wheel python -m pip install mypy pytest-cov -r requirements.txt - # FIXME: #4052 fix mypy errors in the exclude directories and remove them below - - run: mypy --ignore-missing-imports - --exclude '(data_structures|digital_image_processing|dynamic_programming|graphs|maths|matrix|other|project_euler|scripts|searches|strings*)/$' . + - run: mypy . - name: Run tests run: pytest --doctest-modules --ignore=project_euler/ --ignore=scripts/ --cov-report=term-missing:skip-covered --cov=. . - if: ${{ success() }} diff --git a/mypy.ini b/mypy.ini new file mode 100644 index 000000000000..cf0ba26cfc82 --- /dev/null +++ b/mypy.ini @@ -0,0 +1,5 @@ +[mypy] +ignore_missing_imports = True + +; FIXME: #4052 fix mypy errors in the exclude directories and remove them below +exclude = (data_structures|digital_image_processing|dynamic_programming|graphs|maths|matrix|other|project_euler|scripts|searches|strings*)/$ From 531d2d6d7e36b7aa793c4382fa14aa5977af0d7e Mon Sep 17 00:00:00 2001 From: algobytewise Date: Tue, 6 Apr 2021 19:04:18 +0530 Subject: [PATCH 0124/1543] Mypy fix rotation.py (#4319) * fix type-hints arguments * fix matrices & image-path * Update build.yml * Revert "Update build.yml" This reverts commit c2d04aef65292af6b5f9ca9581c25ac8aadf4a50. * use pathlib * feat: Add mypy configuration file (#4315) * feat: Add mypy config file * refactor: Remove mypy options from build workflow * Remove linear_algebra Co-authored-by: Christian Clauss * rebase & update mypy.ini * fix pre-commit errors Co-authored-by: Dhruv Manilawala Co-authored-by: Christian Clauss --- digital_image_processing/rotation/rotation.py | 18 +++++++++++------- mypy.ini | 2 +- 2 files changed, 12 insertions(+), 8 deletions(-) diff --git a/digital_image_processing/rotation/rotation.py b/digital_image_processing/rotation/rotation.py index 2951f18fc0ec..958d16fafb91 100644 --- a/digital_image_processing/rotation/rotation.py +++ b/digital_image_processing/rotation/rotation.py @@ -1,11 +1,13 @@ +from pathlib import Path + import cv2 import numpy as np from matplotlib import pyplot as plt def get_rotation( - img: np.array, pt1: np.float32, pt2: np.float32, rows: int, cols: int -) -> np.array: + img: np.ndarray, pt1: np.ndarray, pt2: np.ndarray, rows: int, cols: int +) -> np.ndarray: """ Get image rotation :param img: np.array @@ -21,17 +23,19 @@ def get_rotation( if __name__ == "__main__": # read original image - image = cv2.imread("lena.jpg") + image = cv2.imread( + str(Path(__file__).resolve().parent.parent / "image_data" / "lena.jpg") + ) # turn image in gray scale value gray_img = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) # get image shape img_rows, img_cols = gray_img.shape # set different points to rotate image - pts1 = np.float32([[50, 50], [200, 50], [50, 200]]) - pts2 = np.float32([[10, 100], [200, 50], [100, 250]]) - pts3 = np.float32([[50, 50], [150, 50], [120, 200]]) - pts4 = np.float32([[10, 100], [80, 50], [180, 250]]) + pts1 = np.array([[50, 50], [200, 50], [50, 200]], np.float32) + pts2 = np.array([[10, 100], [200, 50], [100, 250]], np.float32) + pts3 = np.array([[50, 50], [150, 50], [120, 200]], np.float32) + pts4 = np.array([[10, 100], [80, 50], [180, 250]], np.float32) # add all rotated images in a list images = [ diff --git a/mypy.ini b/mypy.ini index cf0ba26cfc82..b6c4d6fe2785 100644 --- a/mypy.ini +++ b/mypy.ini @@ -2,4 +2,4 @@ ignore_missing_imports = True ; FIXME: #4052 fix mypy errors in the exclude directories and remove them below -exclude = (data_structures|digital_image_processing|dynamic_programming|graphs|maths|matrix|other|project_euler|scripts|searches|strings*)/$ +exclude = (data_structures|dynamic_programming|graphs|maths|matrix|other|project_euler|scripts|searches|strings*)/$ From 252df0a149502143a14e7283424d40b785dd451c Mon Sep 17 00:00:00 2001 From: Christian Clauss Date: Wed, 7 Apr 2021 04:42:56 +0200 Subject: [PATCH 0125/1543] fix(mypy): Fix files in scripts/ (#4320) --- mypy.ini | 2 +- scripts/validate_filenames.py | 2 +- scripts/validate_solutions.py | 4 ++-- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/mypy.ini b/mypy.ini index b6c4d6fe2785..9eec22e22717 100644 --- a/mypy.ini +++ b/mypy.ini @@ -2,4 +2,4 @@ ignore_missing_imports = True ; FIXME: #4052 fix mypy errors in the exclude directories and remove them below -exclude = (data_structures|dynamic_programming|graphs|maths|matrix|other|project_euler|scripts|searches|strings*)/$ +exclude = (data_structures|dynamic_programming|graphs|maths|matrix|other|project_euler|searches|strings*)/$ diff --git a/scripts/validate_filenames.py b/scripts/validate_filenames.py index 419295fe679d..ed23f3907114 100755 --- a/scripts/validate_filenames.py +++ b/scripts/validate_filenames.py @@ -4,7 +4,7 @@ try: from .build_directory_md import good_file_paths except ImportError: - from build_directory_md import good_file_paths + from build_directory_md import good_file_paths # type: ignore filepaths = list(good_file_paths()) assert filepaths, "good_file_paths() failed!" diff --git a/scripts/validate_solutions.py b/scripts/validate_solutions.py index fd804ea5aa31..364c71e2fb1c 100755 --- a/scripts/validate_solutions.py +++ b/scripts/validate_solutions.py @@ -22,7 +22,7 @@ def convert_path_to_module(file_path: pathlib.Path) -> ModuleType: """Converts a file path to a Python module""" spec = importlib.util.spec_from_file_location(file_path.name, str(file_path)) module = importlib.util.module_from_spec(spec) - spec.loader.exec_module(module) + spec.loader.exec_module(module) # type: ignore return module @@ -89,5 +89,5 @@ def test_project_euler(solution_path: pathlib.Path) -> None: problem_number: str = solution_path.parent.name[8:].zfill(3) expected: str = PROBLEM_ANSWERS[problem_number] solution_module = convert_path_to_module(solution_path) - answer = str(solution_module.solution()) + answer = str(solution_module.solution()) # type: ignore assert answer == expected, f"Expected {expected} but got {answer}" From cbe4d5f952e0596211643251817a822714a9fc1b Mon Sep 17 00:00:00 2001 From: Oliver Dewitz <65554808+OliverDew@users.noreply.github.com> Date: Mon, 12 Apr 2021 13:40:10 +0200 Subject: [PATCH 0126/1543] Fixed typo in docstring (#4326) --- strings/lower.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/strings/lower.py b/strings/lower.py index b7abe9fc957d..9ae419123ceb 100644 --- a/strings/lower.py +++ b/strings/lower.py @@ -1,6 +1,6 @@ def lower(word: str) -> str: """ - Will convert the entire string to lowecase letters + Will convert the entire string to lowercase letters >>> lower("wow") 'wow' From ba974810d601254091d2f8076b96d7bb584a80f1 Mon Sep 17 00:00:00 2001 From: Christian Clauss Date: Thu, 22 Apr 2021 10:52:54 +0200 Subject: [PATCH 0127/1543] Simplify password_generator() (#4333) --- other/password_generator.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/other/password_generator.py b/other/password_generator.py index 35e11e4dfb78..cf7250c814ff 100644 --- a/other/password_generator.py +++ b/other/password_generator.py @@ -16,7 +16,7 @@ def password_generator(length=8): >>> len(password_generator(-1)) 0 """ - chars = tuple(ascii_letters) + tuple(digits) + tuple(punctuation) + chars = ascii_letters + digits + punctuation return "".join(choice(chars) for x in range(length)) From 1a3997053875ca5e5e9d813176ed2f98c4933307 Mon Sep 17 00:00:00 2001 From: algobytewise Date: Fri, 23 Apr 2021 11:54:01 +0530 Subject: [PATCH 0128/1543] Add rgb_hsv_conversion.py (#4334) * Add rgb_hsv_conversion.py * updating DIRECTORY.md * snake-case Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- DIRECTORY.md | 1 + conversions/rgb_hsv_conversion.py | 159 ++++++++++++++++++++++++++++++ 2 files changed, 160 insertions(+) create mode 100644 conversions/rgb_hsv_conversion.py diff --git a/DIRECTORY.md b/DIRECTORY.md index e6ce3ae718b3..26929255d1a0 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -112,6 +112,7 @@ * [Molecular Chemistry](https://github.com/TheAlgorithms/Python/blob/master/conversions/molecular_chemistry.py) * [Octal To Decimal](https://github.com/TheAlgorithms/Python/blob/master/conversions/octal_to_decimal.py) * [Prefix Conversions](https://github.com/TheAlgorithms/Python/blob/master/conversions/prefix_conversions.py) + * [Rgb Hsv Conversion](https://github.com/TheAlgorithms/Python/blob/master/conversions/rgb_hsv_conversion.py) * [Roman Numerals](https://github.com/TheAlgorithms/Python/blob/master/conversions/roman_numerals.py) * [Temperature Conversions](https://github.com/TheAlgorithms/Python/blob/master/conversions/temperature_conversions.py) * [Weight Conversion](https://github.com/TheAlgorithms/Python/blob/master/conversions/weight_conversion.py) diff --git a/conversions/rgb_hsv_conversion.py b/conversions/rgb_hsv_conversion.py new file mode 100644 index 000000000000..081cfe1d75e0 --- /dev/null +++ b/conversions/rgb_hsv_conversion.py @@ -0,0 +1,159 @@ +""" +The RGB color model is an additive color model in which red, green, and blue light +are added together in various ways to reproduce a broad array of colors. The name +of the model comes from the initials of the three additive primary colors, red, +green, and blue. Meanwhile, the HSV representation models how colors appear under +light. In it, colors are represented using three components: hue, saturation and +(brightness-)value. This file provides functions for converting colors from one +representation to the other. + +(description adapted from https://en.wikipedia.org/wiki/RGB_color_model and +https://en.wikipedia.org/wiki/HSL_and_HSV). +""" + + +def hsv_to_rgb(hue: float, saturation: float, value: float) -> list[int]: + """ + Conversion from the HSV-representation to the RGB-representation. + Expected RGB-values taken from + https://www.rapidtables.com/convert/color/hsv-to-rgb.html + + >>> hsv_to_rgb(0, 0, 0) + [0, 0, 0] + >>> hsv_to_rgb(0, 0, 1) + [255, 255, 255] + >>> hsv_to_rgb(0, 1, 1) + [255, 0, 0] + >>> hsv_to_rgb(60, 1, 1) + [255, 255, 0] + >>> hsv_to_rgb(120, 1, 1) + [0, 255, 0] + >>> hsv_to_rgb(240, 1, 1) + [0, 0, 255] + >>> hsv_to_rgb(300, 1, 1) + [255, 0, 255] + >>> hsv_to_rgb(180, 0.5, 0.5) + [64, 128, 128] + >>> hsv_to_rgb(234, 0.14, 0.88) + [193, 196, 224] + >>> hsv_to_rgb(330, 0.75, 0.5) + [128, 32, 80] + """ + if hue < 0 or hue > 360: + raise Exception("hue should be between 0 and 360") + + if saturation < 0 or saturation > 1: + raise Exception("saturation should be between 0 and 1") + + if value < 0 or value > 1: + raise Exception("value should be between 0 and 1") + + chroma = value * saturation + hue_section = hue / 60 + second_largest_component = chroma * (1 - abs(hue_section % 2 - 1)) + match_value = value - chroma + + if hue_section >= 0 and hue_section <= 1: + red = round(255 * (chroma + match_value)) + green = round(255 * (second_largest_component + match_value)) + blue = round(255 * (match_value)) + elif hue_section > 1 and hue_section <= 2: + red = round(255 * (second_largest_component + match_value)) + green = round(255 * (chroma + match_value)) + blue = round(255 * (match_value)) + elif hue_section > 2 and hue_section <= 3: + red = round(255 * (match_value)) + green = round(255 * (chroma + match_value)) + blue = round(255 * (second_largest_component + match_value)) + elif hue_section > 3 and hue_section <= 4: + red = round(255 * (match_value)) + green = round(255 * (second_largest_component + match_value)) + blue = round(255 * (chroma + match_value)) + elif hue_section > 4 and hue_section <= 5: + red = round(255 * (second_largest_component + match_value)) + green = round(255 * (match_value)) + blue = round(255 * (chroma + match_value)) + else: + red = round(255 * (chroma + match_value)) + green = round(255 * (match_value)) + blue = round(255 * (second_largest_component + match_value)) + + return [red, green, blue] + + +def rgb_to_hsv(red: int, green: int, blue: int) -> list[float]: + """ + Conversion from the RGB-representation to the HSV-representation. + The tested values are the reverse values from the hsv_to_rgb-doctests. + Function "approximately_equal_hsv" is needed because of small deviations due to + rounding for the RGB-values. + + >>> approximately_equal_hsv(rgb_to_hsv(0, 0, 0), [0, 0, 0]) + True + >>> approximately_equal_hsv(rgb_to_hsv(255, 255, 255), [0, 0, 1]) + True + >>> approximately_equal_hsv(rgb_to_hsv(255, 0, 0), [0, 1, 1]) + True + >>> approximately_equal_hsv(rgb_to_hsv(255, 255, 0), [60, 1, 1]) + True + >>> approximately_equal_hsv(rgb_to_hsv(0, 255, 0), [120, 1, 1]) + True + >>> approximately_equal_hsv(rgb_to_hsv(0, 0, 255), [240, 1, 1]) + True + >>> approximately_equal_hsv(rgb_to_hsv(255, 0, 255), [300, 1, 1]) + True + >>> approximately_equal_hsv(rgb_to_hsv(64, 128, 128), [180, 0.5, 0.5]) + True + >>> approximately_equal_hsv(rgb_to_hsv(193, 196, 224), [234, 0.14, 0.88]) + True + >>> approximately_equal_hsv(rgb_to_hsv(128, 32, 80), [330, 0.75, 0.5]) + True + """ + if red < 0 or red > 255: + raise Exception("red should be between 0 and 255") + + if green < 0 or green > 255: + raise Exception("green should be between 0 and 255") + + if blue < 0 or blue > 255: + raise Exception("blue should be between 0 and 255") + + float_red = red / 255 + float_green = green / 255 + float_blue = blue / 255 + value = max(max(float_red, float_green), float_blue) + chroma = value - min(min(float_red, float_green), float_blue) + saturation = 0 if value == 0 else chroma / value + + if chroma == 0: + hue = 0.0 + elif value == float_red: + hue = 60 * (0 + (float_green - float_blue) / chroma) + elif value == float_green: + hue = 60 * (2 + (float_blue - float_red) / chroma) + else: + hue = 60 * (4 + (float_red - float_green) / chroma) + + hue = (hue + 360) % 360 + + return [hue, saturation, value] + + +def approximately_equal_hsv(hsv_1: list[float], hsv_2: list[float]) -> bool: + """ + Utility-function to check that two hsv-colors are approximately equal + + >>> approximately_equal_hsv([0, 0, 0], [0, 0, 0]) + True + >>> approximately_equal_hsv([180, 0.5, 0.3], [179.9999, 0.500001, 0.30001]) + True + >>> approximately_equal_hsv([0, 0, 0], [1, 0, 0]) + False + >>> approximately_equal_hsv([180, 0.5, 0.3], [179.9999, 0.6, 0.30001]) + False + """ + check_hue = abs(hsv_1[0] - hsv_2[0]) < 0.2 + check_saturation = abs(hsv_1[1] - hsv_2[1]) < 0.002 + check_value = abs(hsv_1[2] - hsv_2[2]) < 0.002 + + return check_hue and check_saturation and check_value From 2ce6be009a2f374c0c6e5ab1f7f7c7274c5e0c55 Mon Sep 17 00:00:00 2001 From: David Leal Date: Sun, 25 Apr 2021 23:31:34 -0500 Subject: [PATCH 0129/1543] feat: Add Discord badge in `README.md` (#4357) * feat: Add Discord badge in `README.md` * Update README.md Co-authored-by: Dhruv Manilawala --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index f81031b53ebb..1e85ed0daa7c 100644 --- a/README.md +++ b/README.md @@ -1,5 +1,6 @@ # The Algorithms - Python [![Gitpod Ready-to-Code](https://img.shields.io/badge/Gitpod-Ready--to--Code-blue?logo=gitpod&style=flat-square)](https://gitpod.io/#https://github.com/TheAlgorithms/Python)  +[![Discord chat](https://img.shields.io/discord/808045925556682782.svg?logo=discord&colorB=7289DA&style=flat-square)](https://discord.gg/c7MnfGFGa6)  [![Gitter chat](https://img.shields.io/badge/Chat-Gitter-ff69b4.svg?label=Chat&logo=gitter&style=flat-square)](https://gitter.im/TheAlgorithms)  [![GitHub Workflow Status](https://img.shields.io/github/workflow/status/TheAlgorithms/Python/build?label=CI&logo=github&style=flat-square)](https://github.com/TheAlgorithms/Python/actions)  [![LGTM](https://img.shields.io/lgtm/alerts/github/TheAlgorithms/Python.svg?label=LGTM&logo=LGTM&style=flat-square)](https://lgtm.com/projects/g/TheAlgorithms/Python/alerts)  From 69457357e8c6a3530034aca9707e22ce769da067 Mon Sep 17 00:00:00 2001 From: Christian Clauss Date: Mon, 26 Apr 2021 06:45:26 +0200 Subject: [PATCH 0130/1543] binary_tree_traversals.py: Simplify with dataclasses (#4336) * binary_tree_traversals.py: Simplify with dataclasses * Update data_structures/binary_tree/binary_tree_traversals.py Co-authored-by: Dhruv Manilawala * Optional["Node"] Co-authored-by: Dhruv Manilawala --- .../binary_tree/binary_tree_traversals.py | 21 +++++++------------ 1 file changed, 7 insertions(+), 14 deletions(-) diff --git a/data_structures/binary_tree/binary_tree_traversals.py b/data_structures/binary_tree/binary_tree_traversals.py index 7c0ee1dbbc2a..7857880dada9 100644 --- a/data_structures/binary_tree/binary_tree_traversals.py +++ b/data_structures/binary_tree/binary_tree_traversals.py @@ -1,24 +1,17 @@ # https://en.wikipedia.org/wiki/Tree_traversal +from dataclasses import dataclass +from typing import Optional +@dataclass class Node: - """ - A Node has data variable and pointers to its left and right nodes. - """ - - def __init__(self, data): - self.left = None - self.right = None - self.data = data + data: int + left: Optional["Node"] = None + right: Optional["Node"] = None def make_tree() -> Node: - root = Node(1) - root.left = Node(2) - root.right = Node(3) - root.left.left = Node(4) - root.left.right = Node(5) - return root + return Node(1, Node(2, Node(4), Node(5)), Node(3)) def preorder(root: Node): From 6f21f76696ff6657bff6fc2239315a1650924190 Mon Sep 17 00:00:00 2001 From: Dhruv Manilawala Date: Mon, 26 Apr 2021 11:16:50 +0530 Subject: [PATCH 0131/1543] fix(ci): Update pre-commit hooks and apply new black (#4359) * fix(ci): Update pre-commit hooks and apply new black * remove empty docstring --- .github/workflows/pre-commit.yml | 2 +- .pre-commit-config.yaml | 6 +++--- data_structures/binary_tree/binary_search_tree.py | 2 +- data_structures/heap/heap.py | 10 +++++----- data_structures/heap/max_heap.py | 10 +++++----- data_structures/linked_list/deque_doubly.py | 2 +- data_structures/stacks/stack.py | 10 +++++----- digital_image_processing/sepia.py | 2 +- hashes/md5.py | 1 - machine_learning/linear_discriminant_analysis.py | 2 +- machine_learning/linear_regression.py | 2 +- maths/monte_carlo_dice.py | 2 +- other/least_recently_used.py | 2 +- 13 files changed, 26 insertions(+), 27 deletions(-) diff --git a/.github/workflows/pre-commit.yml b/.github/workflows/pre-commit.yml index dd1a8a945092..27a5a97c0b6c 100644 --- a/.github/workflows/pre-commit.yml +++ b/.github/workflows/pre-commit.yml @@ -14,7 +14,7 @@ jobs: ~/.cache/pip key: ${{ runner.os }}-pre-commit-${{ hashFiles('.pre-commit-config.yaml') }} - uses: actions/setup-python@v2 - - uses: psf/black@20.8b1 + - uses: psf/black@21.4b0 - name: Install pre-commit run: | python -m pip install --upgrade pip diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index b48da86ee57d..b666e88aa162 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -13,17 +13,17 @@ repos: )$ - id: requirements-txt-fixer - repo: https://github.com/psf/black - rev: 20.8b1 + rev: 21.4b0 hooks: - id: black - repo: https://github.com/PyCQA/isort - rev: 5.7.0 + rev: 5.8.0 hooks: - id: isort args: - --profile=black - repo: https://gitlab.com/pycqa/flake8 - rev: 3.9.0 + rev: 3.9.1 hooks: - id: flake8 args: diff --git a/data_structures/binary_tree/binary_search_tree.py b/data_structures/binary_tree/binary_search_tree.py index 45c3933fe899..a1ed1d0ac2a5 100644 --- a/data_structures/binary_tree/binary_search_tree.py +++ b/data_structures/binary_tree/binary_search_tree.py @@ -150,7 +150,7 @@ def inorder(self, arr: list, node: Node): self.inorder(arr, node.right) def find_kth_smallest(self, k: int, node: Node) -> int: - """Return the kth smallest element in a binary search tree """ + """Return the kth smallest element in a binary search tree""" arr = [] self.inorder(arr, node) # append all values to list using inorder traversal return arr[k - 1] diff --git a/data_structures/heap/heap.py b/data_structures/heap/heap.py index 8592362c23b9..65a70e468d1c 100644 --- a/data_structures/heap/heap.py +++ b/data_structures/heap/heap.py @@ -32,7 +32,7 @@ def __repr__(self) -> str: return str(self.h) def parent_index(self, child_idx: int) -> Optional[int]: - """ return the parent index of given child """ + """return the parent index of given child""" if child_idx > 0: return (child_idx - 1) // 2 return None @@ -78,7 +78,7 @@ def max_heapify(self, index: int) -> None: self.max_heapify(violation) def build_max_heap(self, collection: Iterable[float]) -> None: - """ build max heap from an unsorted array""" + """build max heap from an unsorted array""" self.h = list(collection) self.heap_size = len(self.h) if self.heap_size > 1: @@ -87,14 +87,14 @@ def build_max_heap(self, collection: Iterable[float]) -> None: self.max_heapify(i) def max(self) -> float: - """ return the max in the heap """ + """return the max in the heap""" if self.heap_size >= 1: return self.h[0] else: raise Exception("Empty heap") def extract_max(self) -> float: - """ get and remove max from heap """ + """get and remove max from heap""" if self.heap_size >= 2: me = self.h[0] self.h[0] = self.h.pop(-1) @@ -108,7 +108,7 @@ def extract_max(self) -> float: raise Exception("Empty heap") def insert(self, value: float) -> None: - """ insert a new value into the max heap """ + """insert a new value into the max heap""" self.h.append(value) idx = (self.heap_size - 1) // 2 self.heap_size += 1 diff --git a/data_structures/heap/max_heap.py b/data_structures/heap/max_heap.py index 2a08f8fa2cd1..fbc8eed09226 100644 --- a/data_structures/heap/max_heap.py +++ b/data_structures/heap/max_heap.py @@ -21,7 +21,7 @@ def __init__(self): self.__size = 0 def __swap_up(self, i: int) -> None: - """ Swap the element up """ + """Swap the element up""" temporary = self.__heap[i] while i // 2 > 0: if self.__heap[i] > self.__heap[i // 2]: @@ -30,13 +30,13 @@ def __swap_up(self, i: int) -> None: i //= 2 def insert(self, value: int) -> None: - """ Insert new element """ + """Insert new element""" self.__heap.append(value) self.__size += 1 self.__swap_up(self.__size) def __swap_down(self, i: int) -> None: - """ Swap the element down """ + """Swap the element down""" while self.__size >= 2 * i: if 2 * i + 1 > self.__size: bigger_child = 2 * i @@ -52,7 +52,7 @@ def __swap_down(self, i: int) -> None: i = bigger_child def pop(self) -> int: - """ Pop the root element """ + """Pop the root element""" max_value = self.__heap[1] self.__heap[1] = self.__heap[self.__size] self.__size -= 1 @@ -65,7 +65,7 @@ def get_list(self): return self.__heap[1:] def __len__(self): - """ Length of the array """ + """Length of the array""" return self.__size diff --git a/data_structures/linked_list/deque_doubly.py b/data_structures/linked_list/deque_doubly.py index c9ae8b3d1ba2..2b9d70c223c4 100644 --- a/data_structures/linked_list/deque_doubly.py +++ b/data_structures/linked_list/deque_doubly.py @@ -9,7 +9,7 @@ class _DoublyLinkedBase: - """ A Private class (to be inherited) """ + """A Private class (to be inherited)""" class _Node: __slots__ = "_prev", "_data", "_next" diff --git a/data_structures/stacks/stack.py b/data_structures/stacks/stack.py index 840cde099d38..276684e12184 100644 --- a/data_structures/stacks/stack.py +++ b/data_structures/stacks/stack.py @@ -22,28 +22,28 @@ def __str__(self) -> str: return str(self.stack) def push(self, data): - """ Push an element to the top of the stack.""" + """Push an element to the top of the stack.""" if len(self.stack) >= self.limit: raise StackOverflowError self.stack.append(data) def pop(self): - """ Pop an element off of the top of the stack.""" + """Pop an element off of the top of the stack.""" return self.stack.pop() def peek(self): - """ Peek at the top-most element of the stack.""" + """Peek at the top-most element of the stack.""" return self.stack[-1] def is_empty(self) -> bool: - """ Check if a stack is empty.""" + """Check if a stack is empty.""" return not bool(self.stack) def is_full(self) -> bool: return self.size() == self.limit def size(self) -> int: - """ Return the size of the stack.""" + """Return the size of the stack.""" return len(self.stack) def __contains__(self, item) -> bool: diff --git a/digital_image_processing/sepia.py b/digital_image_processing/sepia.py index dfb5951676aa..e9dd2c06066d 100644 --- a/digital_image_processing/sepia.py +++ b/digital_image_processing/sepia.py @@ -19,7 +19,7 @@ def to_grayscale(blue, green, red): return 0.2126 * red + 0.587 * green + 0.114 * blue def normalize(value): - """ Helper function to normalize R/G/B value -> return 255 if value > 255""" + """Helper function to normalize R/G/B value -> return 255 if value > 255""" return min(value, 255) for i in range(pixel_h): diff --git a/hashes/md5.py b/hashes/md5.py index b7888fb610ac..b08ab957340a 100644 --- a/hashes/md5.py +++ b/hashes/md5.py @@ -94,7 +94,6 @@ def not32(i): def sum32(a, b): - """""" return (a + b) % 2 ** 32 diff --git a/machine_learning/linear_discriminant_analysis.py b/machine_learning/linear_discriminant_analysis.py index 0d19e970e973..18553a77ad1c 100644 --- a/machine_learning/linear_discriminant_analysis.py +++ b/machine_learning/linear_discriminant_analysis.py @@ -283,7 +283,7 @@ def valid_input( # Main Function def main(): - """ This function starts execution phase """ + """This function starts execution phase""" while True: print(" Linear Discriminant Analysis ".center(50, "*")) print("*" * 50, "\n") diff --git a/machine_learning/linear_regression.py b/machine_learning/linear_regression.py index a726629efe00..b0bbc7b904c3 100644 --- a/machine_learning/linear_regression.py +++ b/machine_learning/linear_regression.py @@ -88,7 +88,7 @@ def run_linear_regression(data_x, data_y): def main(): - """ Driver function """ + """Driver function""" data = collect_dataset() len_data = data.shape[0] diff --git a/maths/monte_carlo_dice.py b/maths/monte_carlo_dice.py index e8e3abe83a99..17cedbdbcb18 100644 --- a/maths/monte_carlo_dice.py +++ b/maths/monte_carlo_dice.py @@ -7,7 +7,7 @@ class Dice: NUM_SIDES = 6 def __init__(self): - """ Initialize a six sided dice """ + """Initialize a six sided dice""" self.sides = list(range(1, Dice.NUM_SIDES + 1)) def roll(self): diff --git a/other/least_recently_used.py b/other/least_recently_used.py index 213339636469..d0e27efc6dc8 100644 --- a/other/least_recently_used.py +++ b/other/least_recently_used.py @@ -4,7 +4,7 @@ class LRUCache: - """ Page Replacement Algorithm, Least Recently Used (LRU) Caching.""" + """Page Replacement Algorithm, Least Recently Used (LRU) Caching.""" dq_store = object() # Cache store of keys key_reference_map = object() # References of the keys in cache From 727341e3db4a28dae3f1bbf166522844f3de8f6d Mon Sep 17 00:00:00 2001 From: ngxingyu Date: Tue, 4 May 2021 14:49:41 +0800 Subject: [PATCH 0132/1543] Create check_pangram.py (#4389) --- strings/check_pangram.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/strings/check_pangram.py b/strings/check_pangram.py index e3695b918524..81384bfd4cc6 100644 --- a/strings/check_pangram.py +++ b/strings/check_pangram.py @@ -38,9 +38,9 @@ def check_pangram_faster( """ >>> check_pangram_faster("The quick brown fox jumps over the lazy dog") True - >>> check_pangram("Waltz, bad nymph, for quick jigs vex.") + >>> check_pangram_faster("Waltz, bad nymph, for quick jigs vex.") True - >>> check_pangram("Jived fox nymph grabs quick waltz.") + >>> check_pangram_faster("Jived fox nymph grabs quick waltz.") True >>> check_pangram_faster("The quick brown fox jumps over the la_y dog") False @@ -50,7 +50,9 @@ def check_pangram_faster( flag = [False] * 26 for char in input_str: if char.islower(): - flag[ord(char) - ord("a")] = True + flag[ord(char) - 97] = True + elif char.isupper(): + flag[ord(char) - 65] = True return all(flag) From deb71167e7d5aabe24ae4a5c33e8e73dd3af8ece Mon Sep 17 00:00:00 2001 From: Ahmed Haj Abdel Khaleq <31858489+AhmedHaj@users.noreply.github.com> Date: Wed, 12 May 2021 02:22:42 -0400 Subject: [PATCH 0133/1543] [mypy] Fix type annotations for linked_stack.py, evaluate_postfix_notations.py, stack.py in data structures (#4409) * [mypy] Fix type annotations for linked_stack.py, next_greater_element.py, stack.py * Reformatted files according to black --- data_structures/stacks/evaluate_postfix_notations.py | 4 +++- data_structures/stacks/linked_stack.py | 6 ++++-- data_structures/stacks/stack.py | 5 ++++- 3 files changed, 11 insertions(+), 4 deletions(-) diff --git a/data_structures/stacks/evaluate_postfix_notations.py b/data_structures/stacks/evaluate_postfix_notations.py index a03cb43bb020..2a4baf9d6b52 100644 --- a/data_structures/stacks/evaluate_postfix_notations.py +++ b/data_structures/stacks/evaluate_postfix_notations.py @@ -1,3 +1,5 @@ +from typing import Any, List + """ The Reverse Polish Nation also known as Polish postfix notation or simply postfix notation. @@ -21,7 +23,7 @@ def evaluate_postfix(postfix_notation: list) -> int: return 0 operations = {"+", "-", "*", "/"} - stack = [] + stack: List[Any] = [] for token in postfix_notation: if token in operations: diff --git a/data_structures/stacks/linked_stack.py b/data_structures/stacks/linked_stack.py index 1a2d07f20e7c..0b9c9d45e61f 100644 --- a/data_structures/stacks/linked_stack.py +++ b/data_structures/stacks/linked_stack.py @@ -1,5 +1,5 @@ """ A Stack using a linked list like structure """ -from typing import Any +from typing import Any, Optional class Node: @@ -42,7 +42,7 @@ class LinkedStack: """ def __init__(self) -> None: - self.top = None + self.top: Optional[Node] = None def __iter__(self): node = self.top @@ -134,6 +134,8 @@ def peek(self) -> Any: """ if self.is_empty(): raise IndexError("peek from empty stack") + + assert self.top is not None return self.top.data def clear(self) -> None: diff --git a/data_structures/stacks/stack.py b/data_structures/stacks/stack.py index 276684e12184..245d39b32c07 100644 --- a/data_structures/stacks/stack.py +++ b/data_structures/stacks/stack.py @@ -1,3 +1,6 @@ +from typing import List + + class StackOverflowError(BaseException): pass @@ -12,7 +15,7 @@ class Stack: """ def __init__(self, limit: int = 10): - self.stack = [] + self.stack: List[int] = [] self.limit = limit def __bool__(self) -> bool: From 03d9b6747bee1ef3537da204d09771c911093a01 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Malte=20J=C3=BCrgens?= Date: Wed, 12 May 2021 07:48:23 +0000 Subject: [PATCH 0134/1543] feat(ci): Hash project euler solutions (#4411) * hash project euler solutions * fix errors * Return missing annotation * Fix typo * Extract variable to shorten excessively long line * Update scripts/validate_solutions.py * Update scripts/validate_solutions.py * Simplify with str.encode() * PEP 585: type hinting generics in standard collections; * str().encode() * Texas two step to placate black, flake8, mypy Co-authored-by: Andrii Siriak Co-authored-by: Christian Clauss --- scripts/project_euler_answers.json | 1450 ++++++++++++++-------------- scripts/validate_solutions.py | 15 +- 2 files changed, 734 insertions(+), 731 deletions(-) diff --git a/scripts/project_euler_answers.json b/scripts/project_euler_answers.json index 05c144d1e06a..6d354363ee5f 100644 --- a/scripts/project_euler_answers.json +++ b/scripts/project_euler_answers.json @@ -1,727 +1,727 @@ { - "001": "233168", - "002": "4613732", - "003": "6857", - "004": "906609", - "005": "232792560", - "006": "25164150", - "007": "104743", - "008": "23514624000", - "009": "31875000", - "010": "142913828922", - "011": "70600674", - "012": "76576500", - "013": "5537376230", - "014": "837799", - "015": "137846528820", - "016": "1366", - "017": "21124", - "018": "1074", - "019": "171", - "020": "648", - "021": "31626", - "022": "871198282", - "023": "4179871", - "024": "2783915460", - "025": "4782", - "026": "983", - "027": "-59231", - "028": "669171001", - "029": "9183", - "030": "443839", - "031": "73682", - "032": "45228", - "033": "100", - "034": "40730", - "035": "55", - "036": "872187", - "037": "748317", - "038": "932718654", - "039": "840", - "040": "210", - "041": "7652413", - "042": "162", - "043": "16695334890", - "044": "5482660", - "045": "1533776805", - "046": "5777", - "047": "134043", - "048": "9110846700", - "049": "296962999629", - "050": "997651", - "051": "121313", - "052": "142857", - "053": "4075", - "054": "376", - "055": "249", - "056": "972", - "057": "153", - "058": "26241", - "059": "129448", - "060": "26033", - "061": "28684", - "062": "127035954683", - "063": "49", - "064": "1322", - "065": "272", - "066": "661", - "067": "7273", - "068": "6531031914842725", - "069": "510510", - "070": "8319823", - "071": "428570", - "072": "303963552391", - "073": "7295372", - "074": "402", - "075": "161667", - "076": "190569291", - "077": "71", - "078": "55374", - "079": "73162890", - "080": "40886", - "081": "427337", - "082": "260324", - "083": "425185", - "084": "101524", - "085": "2772", - "086": "1818", - "087": "1097343", - "088": "7587457", - "089": "743", - "090": "1217", - "091": "14234", - "092": "8581146", - "093": "1258", - "094": "518408346", - "095": "14316", - "096": "24702", - "097": "8739992577", - "098": "18769", - "099": "709", - "100": "756872327473", - "101": "37076114526", - "102": "228", - "103": "20313839404245", - "104": "329468", - "105": "73702", - "106": "21384", - "107": "259679", - "108": "180180", - "109": "38182", - "110": "9350130049860600", - "111": "612407567715", - "112": "1587000", - "113": "51161058134250", - "114": "16475640049", - "115": "168", - "116": "20492570929", - "117": "100808458960497", - "118": "44680", - "119": "248155780267521", - "120": "333082500", - "121": "2269", - "122": "1582", - "123": "21035", - "124": "21417", - "125": "2906969179", - "126": "18522", - "127": "18407904", - "128": "14516824220", - "129": "1000023", - "130": "149253", - "131": "173", - "132": "843296", - "133": "453647705", - "134": "18613426663617118", - "135": "4989", - "136": "2544559", - "137": "1120149658760", - "138": "1118049290473932", - "139": "10057761", - "140": "5673835352990", - "141": "878454337159", - "142": "1006193", - "143": "30758397", - "144": "354", - "145": "608720", - "146": "676333270", - "147": "846910284", - "148": "2129970655314432", - "149": "52852124", - "150": "-271248680", - "151": "0.464399", - "152": "301", - "153": "17971254122360635", - "154": "479742450", - "155": "3857447", - "156": "21295121502550", - "157": "53490", - "158": "409511334375", - "159": "14489159", - "160": "16576", - "161": "20574308184277971", - "162": "3D58725572C62302", - "163": "343047", - "164": "378158756814587", - "165": "2868868", - "166": "7130034", - "167": "3916160068885", - "168": "59206", - "169": "178653872807", - "170": "9857164023", - "171": "142989277", - "172": "227485267000992000", - "173": "1572729", - "174": "209566", - "175": "1,13717420,8", - "176": "96818198400000", - "177": "129325", - "178": "126461847755", - "179": "986262", - "180": "285196020571078987", - "181": "83735848679360680", - "182": "399788195976", - "183": "48861552", - "184": "1725323624056", - "185": "4640261571849533", - "186": "2325629", - "187": "17427258", - "188": "95962097", - "189": "10834893628237824", - "190": "371048281", - "191": "1918080160", - "192": "57060635927998347", - "193": "684465067343069", - "194": "61190912", - "195": "75085391", - "196": "322303240771079935", - "197": "1.710637717", - "198": "52374425", - "199": "0.00396087", - "200": "229161792008", - "201": "115039000", - "202": "1209002624", - "203": "34029210557338", - "204": "2944730", - "205": "0.5731441", - "206": "1389019170", - "207": "44043947822", - "208": "331951449665644800", - "209": "15964587728784", - "210": "1598174770174689458", - "211": "1922364685", - "212": "328968937309", - "213": "330.721154", - "214": "1677366278943", - "215": "806844323190414", - "216": "5437849", - "217": "6273134", - "218": "0", - "219": "64564225042", - "220": "139776,963904", - "221": "1884161251122450", - "222": "1590933", - "223": "61614848", - "224": "4137330", - "225": "2009", - "226": "0.11316017", - "227": "3780.618622", - "228": "86226", - "229": "11325263", - "230": "850481152593119296", - "231": "7526965179680", - "232": "0.83648556", - "233": "271204031455541309", - "234": "1259187438574927161", - "235": "1.002322108633", - "236": "123/59", - "237": "15836928", - "238": "9922545104535661", - "239": "0.001887854841", - "240": "7448717393364181966", - "241": "482316491800641154", - "242": "997104142249036713", - "243": "892371480", - "244": "96356848", - "245": "288084712410001", - "246": "810834388", - "247": "782252", - "248": "23507044290", - "249": "9275262564250418", - "250": "1425480602091519", - "251": "18946051", - "252": "104924.0", - "253": "11.492847", - "254": "8184523820510", - "255": "4.4474011180", - "256": "85765680", - "257": "139012411", - "258": "12747994", - "259": "20101196798", - "260": "167542057", - "261": "238890850232021", - "262": "2531.205", - "263": "2039506520", - "264": "2816417.1055", - "265": "209110240768", - "266": "1096883702440585", - "267": "0.999992836187", - "268": "785478606870985", - "269": "1311109198529286", - "270": "82282080", - "271": "4617456485273129588", - "272": "8495585919506151122", - "273": "2032447591196869022", - "274": "1601912348822", - "275": "15030564", - "276": "5777137137739632912", - "277": "1125977393124310", - "278": "1228215747273908452", - "279": "416577688", - "280": "430.088247", - "281": "1485776387445623", - "282": "1098988351", - "283": "28038042525570324", - "284": "5a411d7b", - "285": "157055.80999", - "286": "52.6494571953", - "287": "313135496", - "288": "605857431263981935", - "289": "6567944538", - "290": "20444710234716473", - "291": "4037526", - "292": "3600060866", - "293": "2209", - "294": "789184709", - "295": "4884650818", - "296": "1137208419", - "297": "2252639041804718029", - "298": "1.76882294", - "299": "549936643", - "300": "8.0540771484375", - "301": "2178309", - "302": "1170060", - "303": "1111981904675169", - "304": "283988410192", - "305": "18174995535140", - "306": "852938", - "307": "0.7311720251", - "308": "1539669807660924", - "309": "210139", - "310": "2586528661783", - "311": "2466018557", - "312": "324681947", - "313": "2057774861813004", - "314": "132.52756426", - "315": "13625242", - "316": "542934735751917735", - "317": "1856532.8455", - "318": "709313889", - "319": "268457129", - "320": "278157919195482643", - "321": "2470433131948040", - "322": "999998760323313995", - "323": "6.3551758451", - "324": "96972774", - "325": "54672965", - "326": "1966666166408794329", - "327": "34315549139516", - "328": "260511850222", - "329": "199740353/29386561536000", - "330": "15955822", - "331": "467178235146843549", - "332": "2717.751525", - "333": "3053105", - "334": "150320021261690835", - "335": "5032316", - "336": "CAGBIHEFJDK", - "337": "85068035", - "338": "15614292", - "339": "19823.542204", - "340": "291504964", - "341": "56098610614277014", - "342": "5943040885644", - "343": "269533451410884183", - "344": "65579304332", - "345": "13938", - "346": "336108797689259276", - "347": "11109800204052", - "348": "1004195061", - "349": "115384615384614952", - "350": "84664213", - "351": "11762187201804552", - "352": "378563.260589", - "353": "1.2759860331", - "354": "58065134", - "355": "1726545007", - "356": "28010159", - "357": "1739023853137", - "358": "3284144505", - "359": "40632119", - "360": "878825614395267072", - "361": "178476944", - "362": "457895958010", - "363": "0.0000372091", - "364": "44855254", - "365": "162619462356610313", - "366": "88351299", - "367": "48271207", - "368": "253.6135092068", - "369": "862400558448", - "370": "41791929448408", - "371": "40.66368097", - "372": "301450082318807027", - "373": "727227472448913", - "374": "334420941", - "375": "7435327983715286168", - "376": "973059630185670", - "377": "732385277", - "378": "147534623725724718", - "379": "132314136838185", - "380": "6.3202e25093", - "381": "139602943319822", - "382": "697003956", - "383": "22173624649806", - "384": "3354706415856332783", - "385": "3776957309612153700", - "386": "528755790", - "387": "696067597313468", - "388": "831907372805129931", - "389": "2406376.3623", - "390": "2919133642971", - "391": "61029882288", - "392": "3.1486734435", - "393": "112398351350823112", - "394": "3.2370342194", - "395": "28.2453753155", - "396": "173214653", - "397": "141630459461893728", - "398": "2010.59096", - "399": "1508395636674243,6.5e27330467", - "400": "438505383468410633", - "401": "281632621", - "402": "356019862", - "403": "18224771", - "404": "1199215615081353", - "405": "237696125", - "406": "36813.12757207", - "407": "39782849136421", - "408": "299742733", - "409": "253223948", - "410": "799999783589946560", - "411": "9936352", - "412": "38788800", - "413": "3079418648040719", - "414": "552506775824935461", - "415": "55859742", - "416": "898082747", - "417": "446572970925740", - "418": "1177163565297340320", - "419": "998567458,1046245404,43363922", - "420": "145159332", - "421": "2304215802083466198", - "422": "92060460", - "423": "653972374", - "424": "1059760019628", - "425": "46479497324", - "426": "31591886008", - "427": "97138867", - "428": "747215561862", - "429": "98792821", - "430": "5000624921.38", - "431": "23.386029052", - "432": "754862080", - "433": "326624372659664", - "434": "863253606", - "435": "252541322550", - "436": "0.5276662759", - "437": "74204709657207", - "438": "2046409616809", - "439": "968697378", - "440": "970746056", - "441": "5000088.8395", - "442": "1295552661530920149", - "443": "2744233049300770", - "444": "1.200856722e263", - "445": "659104042", - "446": "907803852", - "447": "530553372", - "448": "106467648", - "449": "103.37870096", - "450": "583333163984220940", - "451": "153651073760956", - "452": "345558983", - "453": "104354107", - "454": "5435004633092", - "455": "450186511399999", - "456": "333333208685971546", - "457": "2647787126797397063", - "458": "423341841", - "459": "3996390106631", - "460": "18.420738199", - "461": "159820276", - "462": "5.5350769703e1512", - "463": "808981553", - "464": "198775297232878", - "465": "585965659", - "466": "258381958195474745", - "467": "775181359", - "468": "852950321", - "469": "0.56766764161831", - "470": "147668794", - "471": "1.895093981e31", - "472": "73811586", - "473": "35856681704365", - "474": "9690646731515010", - "475": "75780067", - "476": "110242.87794", - "477": "25044905874565165", - "478": "59510340", - "479": "191541795", - "480": "turnthestarson", - "481": "729.12106947", - "482": "1400824879147", - "483": "4.993401567e22", - "484": "8907904768686152599", - "485": "51281274340", - "486": "11408450515", - "487": "106650212746", - "488": "216737278", - "489": "1791954757162", - "490": "777577686", - "491": "194505988824000", - "492": "242586962923928", - "493": "6.818741802", - "494": "2880067194446832666", - "495": "789107601", - "496": "2042473533769142717", - "497": "684901360", - "498": "472294837", - "499": "0.8660312", - "500": "35407281", - "501": "197912312715", - "502": "749485217", - "503": "3.8694550145", - "504": "694687", - "505": "714591308667615832", - "506": "18934502", - "507": "316558047002627270", - "508": "891874596", - "509": "151725678", - "510": "315306518862563689", - "511": "935247012", - "512": "50660591862310323", - "513": "2925619196", - "514": "8986.86698", - "515": "2422639000800", - "516": "939087315", - "517": "581468882", - "518": "100315739184392", - "519": "804739330", - "520": "238413705", - "521": "44389811", - "522": "96772715", - "523": "37125450.44", - "524": "2432925835413407847", - "525": "44.69921807", - "526": "49601160286750947", - "527": "11.92412011", - "528": "779027989", - "529": "23624465", - "530": "207366437157977206", - "531": "4515432351156203105", - "532": "827306.56", - "533": "789453601", - "534": "11726115562784664", - "535": "611778217", - "536": "3557005261906288", - "537": "779429131", - "538": "22472871503401097", - "539": "426334056", - "540": "500000000002845", - "541": "4580726482872451", - "542": "697586734240314852", - "543": "199007746081234640", - "544": "640432376", - "545": "921107572", - "546": "215656873", - "547": "11730879.0023", - "548": "12144044603581281", - "549": "476001479068717", - "550": "328104836", - "551": "73597483551591773", - "552": "326227335", - "553": "57717170", - "554": "89539872", - "555": "208517717451208352", - "556": "52126939292957", - "557": "2699929328", - "558": "226754889", - "559": "684724920", - "560": "994345168", - "561": "452480999988235494", - "562": "51208732914368", - "563": "27186308211734760", - "564": "12363.698850", - "565": "2992480851924313898", - "566": "329569369413585", - "567": "75.44817535", - "568": "4228020", - "569": "21025060", - "570": "271197444", - "571": "30510390701978", - "572": "19737656", - "573": "1252.9809", - "574": "5780447552057000454", - "575": "0.000989640561", - "576": "344457.5871", - "577": "265695031399260211", - "578": "9219696799346", - "579": "3805524", - "580": "2327213148095366", - "581": "2227616372734", - "582": "19903", - "583": "1174137929000", - "584": "32.83822408", - "585": "17714439395932", - "586": "82490213", - "587": "2240", - "588": "11651930052", - "589": "131776959.25", - "590": "834171904", - "591": "526007984625966", - "592": "13415DF2BE9C", - "593": "96632320042.0", - "594": "47067598", - "595": "54.17529329", - "596": "734582049", - "597": "0.5001817828", - "598": "543194779059", - "599": "12395526079546335", - "600": "2668608479740672", - "601": "1617243", - "602": "269496760", - "603": "879476477", - "604": "1398582231101", - "605": "59992576", - "606": "158452775", - "607": "13.1265108586", - "608": "439689828", - "609": "172023848", - "610": "319.30207833", - "611": "49283233900", - "612": "819963842", - "613": "0.3916721504", - "614": "130694090", - "615": "108424772", - "616": "310884668312456458", - "617": "1001133757", - "618": "634212216", - "619": "857810883", - "620": "1470337306", - "621": "11429712", - "622": "3010983666182123972", - "623": "3679796", - "624": "984524441", - "625": "551614306", - "626": "695577663", - "627": "220196142", - "628": "210286684", - "629": "626616617", - "630": "9669182880384", - "631": "869588692", - "632": "728378714", - "633": "1.0012e-10", - "634": "4019680944", - "635": "689294705", - "636": "888316", - "637": "49000634845039", - "638": "18423394", - "639": "797866893", - "640": "50.317928", - "641": "793525366", - "642": "631499044", - "643": "968274154", - "644": "20.11208767", - "645": "48894.2174", - "646": "845218467", - "647": "563132994232918611", - "648": "301483197", - "649": "924668016", - "650": "538319652", - "651": "448233151", - "652": "983924497", - "653": "1130658687", - "654": "815868280", - "655": "2000008332", - "656": "888873503555187", - "657": "219493139", - "658": "958280177", - "659": "238518915714422000", - "660": "474766783", - "661": "646231.2177", - "662": "860873428", - "663": "1884138010064752", - "664": "35295862", - "665": "11541685709674", - "666": "0.48023168", - "667": "1.5276527928", - "668": "2811077773", - "669": "56342087360542122", - "670": "551055065", - "671": "946106780", - "672": "91627537", - "673": "700325380", - "674": "416678753", - "675": "416146418", - "676": "3562668074339584", - "677": "984183023", - "678": "1986065", - "679": "644997092988678", - "680": "563917241", - "681": "2611227421428", - "682": "290872710", - "683": "2.38955315e11", - "684": "922058210", - "685": "662878999", - "686": "193060223", - "687": "0.3285320869", - "688": "110941813", - "689": "0.56565454", - "690": "415157690", - "691": "11570761", - "692": "842043391019219959", - "693": "699161", - "694": "1339784153569958487", - "695": "0.1017786859", - "696": "436944244", - "697": "4343871.06", - "698": "57808202", - "699": "37010438774467572", - "700": "1517926517777556", - "701": "13.51099836", - "702": "622305608172525546", - "703": "843437991", - "704": "501985601490518144", - "705": "480440153", - "706": "884837055", - "707": "652907799", - "708": "28874142998632109", - "709": "773479144", - "710": "1275000", - "711": "541510990", - "712": "413876461", - "713": "788626351539895", - "714": "2.452767775565e20", - "715": "883188017", - "716": "238948623", - "717": "1603036763131", - "718": "228579116", - "719": "128088830547982", - "720": "688081048", - "721": "700792959", - "722": "3.376792776502e132", - "723": "1395793419248", - "724": "18128250110", - "725": "4598797036650685" + "001": "c0b20f4665d0388d564f0b6ecf3edc9f9480cb15fff87198b95701d9f5fe1f7b", + "002": "1f5882e19314ac13acca52ad5503184b3cb1fd8dbeea82e0979d799af2361704", + "003": "5c09f0554518a413e58e6bc5964ba90655713483d0b2bbc94572ad6b0b4dda28", + "004": "aa74f52b4c428d89606b411bc165eb81a6266821ecc9b4f30cdb70c5c930f4d9", + "005": "1ba90ab11bfb2d2400545337212b0de2a5c7f399215175ade6396e91388912b1", + "006": "537942be3eb323c507623a6a73fa87bf5aeb97b7c7422993a82aa7c15f6d9cd6", + "007": "ecbe74e25cfa4763dbc304ccac2ffb9912e9625cd9993a84bd0dd6d7dc0ca021", + "008": "b9fb30b6553415e9150051ce5710a93d0f55b22557c0068d8e16619a388f145a", + "009": "d912d9d473ef86f12da1fb2011c5c0c155bd3a0ebdb4bbd7ea275cecdcb63731", + "010": "bed2d160e02f0540f19a64ca738aacb79cfcd08ba7e2421567b16cb6e7e3e90e", + "011": "9ded5bc849d33e477aa9c944138d34f0aacc485a372e84464e8a572712a5b7da", + "012": "3e7be445b6c19e6db58c2482005c1f78cb74011a4279249ca632011a9f1b61a2", + "013": "3cb265a96c5645a9ad11d47551f015c25f3f99792c951617656d84626fbc4868", + "014": "78a262dd40eba0f7195686ec7f3891a39437523456f8d16fa9065a34409eeac6", + "015": "7b8f812ca89e311e1b16b903de76fa7b0800a939b3028d9dc4d35f6fa4050281", + "016": "a6f988d30328bd706c66f8ac0d92aac21dd732149cdd69cb31f459dca20c5abe", + "017": "1a455b216c6e916943acf3fa4c7e57a7a5cac66d97cc51befca810c223ef9c23", + "018": "fde3f2e7127f6810eb4160bf7bb0563240d78c9d75a9a590b6d6244748a7f4ff", + "019": "284de502c9847342318c17d474733ef468fbdbe252cddf6e4b4be0676706d9d0", + "020": "c86a2932e1c79343a3c16fb218b9944791aaeedd3e30c87d1c7f505c0e588f7c", + "021": "e8c6ef4a1736a245b5682e0262c5c43862cfb233ca5e286be2f5bb4d8a974ecf", + "022": "85148c096c25e3ed3da55c7e9c89448018b0f5f53ad8d042129c33d9beac6736", + "023": "42e2552a2f589e021824339e2508629ffa00b3489ea467f47e77a1ea97e735c9", + "024": "4677b3d9daa3b30a9665e4558f826e04f7833dda886b8ef24f7176519a0db537", + "025": "7d398da8791745001b3d1c41030676d1c036687eb1ab32e0b5a1832e7579c073", + "026": "fbe10beedf9d29cf53137ba38859ffd1dbe7642cedb7ef0a102a3ab109b47842", + "027": "e4110e0852a2f70703f0081fc91c4a20f595919a038729cb37c564d68b875c6f", + "028": "261171a770d594f6a7fc76c1a839eda7f6dd4e9495e00e75048578fc86d8adf0", + "029": "a207c35d8417aeed4c9e78bcf83f936cd8191c702893be62aa690ce16bc909ca", + "030": "46e68e4199ab0a663ab306651528b06756556c9f0d8b819095af45e036dfbe6b", + "031": "8de34b4ba97b184c7a2096b9266776175242b87d67bc8d77d7289be6f70cd105", + "032": "0d246750daa7f1b367a21f55da454ddc8f62e0a95d163062e9b9273320d5130f", + "033": "ad57366865126e55649ecb23ae1d48887544976efea46a48eb5d85a6eeb4d306", + "034": "728b8d7d6d5d34cad9cbb7c3ea15f807ae57144594b1740b3c73b82314ccd1ed", + "035": "02d20bbd7e394ad5999a4cebabac9619732c343a4cac99470c03e23ba2bdc2bc", + "036": "9480c0160719234b57defc0681c0949a175ffb3ff4a3bf5e8163ac843f383f35", + "037": "e9800abda89919edac504e90dac91f95e0778e3ba0f21a0bac4e77a84766eaaf", + "038": "b2004522103364a6e842b9d042c0707d79af68dec7810078729d061fb7948912", + "039": "fd0f7e53c5b02b688a57ee37f3d52065cb168a7b9fd5a3abd93d37e1559fbd30", + "040": "d29d53701d3c859e29e1b90028eec1ca8e2f29439198b6e036c60951fb458aa1", + "041": "bf05020e70de94e26dba112bb6fb7b0755db5ca88c7225e99187c5a08c8a0428", + "042": "79d6eaa2676189eb927f2e16a70091474078e2117c3fc607d35cdc6b591ef355", + "043": "6512f20c244844b6130204379601855098826afa1b55ff91c293c853ddf67db5", + "044": "97e2524fd3796e83b06c0f89fdcb16e4c544e76e9c0496f57ac84834869f4cc3", + "045": "8b0300d71656b9cf0716318be9453c99a13bb8644d227fd683d06124e6a28b35", + "046": "8485ee802cc628b8cbd82476133d11b57af87e00711516a703525a9af0193b12", + "047": "c7274da71333bd93201fa1e05b1ed54e0074d83f259bd7148c70ddc43082bde1", + "048": "743d17cbff06ab458b99ecbb32e1d6bb9a7ff2ac804118f7743177dd969cfc61", + "049": "47c6094ff1ff6e37788def89190c8256619ef1511681c503fea02c171569d16e", + "050": "6ee74ef623df9fb69facd30b91ed78fe70370462bb267097f0dfeef9d9b057bb", + "051": "d17cec28356b4f9a7f1ec0f20cca4c89e270aeb0e75d70d485b05bb1f28e9f6d", + "052": "ebd72b510911af3e254a030cd891cb804e1902189eee7a0f6199472eb5e4dba2", + "053": "9705cc6128a60cc22581217b715750a6053b2ddda67cc3af7e14803b27cf0c1f", + "054": "12e2c8df501501b2bb531e941a737ffa7a2a491e849c5c5841e3b6132291bc35", + "055": "9f484139a27415ae2e8612bf6c65a8101a18eb5e9b7809e74ca63a45a65f17f4", + "056": "3658d7fa3c43456f3c9c87db0490e872039516e6375336254560167cc3db2ea2", + "057": "620c9c332101a5bae955c66ae72268fbcd3972766179522c8deede6a249addb7", + "058": "196f327021627b6a48db9c6e0a3388d110909d4bb957eb3fbc90ff1ecbda42cb", + "059": "0295239a9d71f7452b93e920b7e0e462f712af5444579d25e06b9614ed77de74", + "060": "ad7c26db722221bfb1bf7e3c36b501bedf8be857b1cfa8664fccb074b54354f9", + "061": "94e4fb283c1abcccae4b8b28e39a294a323cdc9732c3d3ce1133c518d0a286f6", + "062": "d25a595036aa8722157aca38f90084acb369b00df1070f49e203d5a3b7a0736d", + "063": "0e17daca5f3e175f448bacace3bc0da47d0655a74c8dd0dc497a3afbdad95f1f", + "064": "6d62aa4b52071e39f064a930d190b85ab327eb1a5045a8050ac538666ee765ca", + "065": "1c6c0bb2c7ecdc3be8e134f79b9de45155258c1f554ae7542dce48f5cc8d63f0", + "066": "316c0f93c7fe125865d85d6e7e7a31b79e9a46c414c45078b732080fa22ef2a3", + "067": "53f66b6783cb7552d83015df01b0d5229569fce1dd7d1856335c7244b9a3ded6", + "068": "4bf689d09a156621220881a2264dc031b2bfb181213b26d6ff2c338408cf94c3", + "069": "79555e4b891e2885525c136f8b834cc0b1e9416960b12e371111a5cb2da0479f", + "070": "08c6a7c8c06a01d2b17993ada398084b0707652bcfbd580f9173bcddf120ac2c", + "071": "63f032489227c969135c6a6571fe9b33d6970dc6eca32c2086c61a4a099c98fa", + "072": "9ef8a4249d4b8f24147ab6e9ad2536eb04f10fb886a8099e88e0e7c41cf7c616", + "073": "ae9f9c786cd0f24fe03196d5061545862d87a208580570d46e2cfb371319aa68", + "074": "b7c7470e59e2a2df1bfd0a4705488ee6fe0c5c125de15cccdfab0e00d6c03dc0", + "075": "8a426e100572b8e2ea7c1b404a1ee694699346632cf4942705c54f05162bc07a", + "076": "81c54809c3bdfc23f844fde21ae645525817b6e1bee1525270f49282888a5546", + "077": "7f2253d7e228b22a08bda1f09c516f6fead81df6536eb02fa991a34bb38d9be8", + "078": "71374036b661ac8ffe4b78c191050c3ccd1c956ca8a5f465ea1956f7ce571f63", + "079": "2df095aea1862ebfed8df7fb26e8c4a518ca1a8f604a31cfba9da991fc1d6422", + "080": "58bfe3a44f8ae452aaa6ef6267bafc3e841cfe7f9672bdfeb841d2e3a62c1587", + "081": "04bad90d08bdf11010267ec9d1c9bbb49a813194dace245868ea8140aec9a1f7", + "082": "52c42c55daea3131d5357498b8a0ddcf99d1babd16f6ccaee67cb3d0a665b772", + "083": "a825281bc5ce8fe70d66a04e96314e7de070f11fed0f78bc81e007ca7c92e8b0", + "084": "692a776beae0e92d1121fed36427c10d0860344614ead6b4760d1b7091a6ab1f", + "085": "7b2e7211fb4f4d8352c9215c591252344775c56d58b9a5ff88bda8358628ec4e", + "086": "8ffe8459134b46975acd31df13a50c51dbeacf1c19a764bf1602ba7c73ffc8fb", + "087": "cec1917df3b3ee1f43b3468596ed3042df700dc7a752fefc06c4142a2832995d", + "088": "c06356fdcaff01810e1f794263f3e44a75f28e8902a145a0d01a1fff77614722", + "089": "0df5486b7bca884d5f00c502e216f734b2865b202397f24bca25ac9b8a95ab4a", + "090": "cb69775effd93fc34ef38dfbfcdc4c593b1a3d8e7ab70c0f05d627dbc5cbd298", + "091": "327f057e054d1e6a9a1be4ac6acc4b1dedc63d8a88222396ffe98b3194067347", + "092": "538cd20a275b610698691d714b2adf4e4c321915def05667f4d25d97413ec076", + "093": "d8ed8ca27d83a63df6982905ea53b4613b9d7974edcee06f301cf43d63177f47", + "094": "d1b79281d95ce5bfa848060de4e0c80af2c3cae1ff7453cca31ff31e2d67ac14", + "095": "0a3ddcd71cf30a567070630f947ab79fc168865ba0bf112aed9b71fb4e76c32f", + "096": "9c527d233befbf357335e18e6dd5b14ef3a62e19ef34f90bd3fb9e5a2a0a0111", + "097": "f0e2911e303617c9648692ee8056beeb045d89e469315716abed47cd94a3cd56", + "098": "ededac5db280586f534cde4f69ce2c134d2360d6b5da3c3ebc400494cc016e78", + "099": "92c5fd0421c1d619cbf1bdba83a207261f2c5f764aed46db9b4d2de03b72b654", + "100": "993189cbf49fef4c913aa081f2ef44d360b84bf33d19df93fce4663ac34e9927", + "101": "e8539f8b271851cad65d551354874d3086fa9ff7b6f6a2ab9890d63f5ba16c68", + "102": "9d693eeee1d1899cbc50b6d45df953d3835acf28ee869879b45565fccc814765", + "103": "1f17277005b8d58ad32f2cbee4c482cb8c0f3687c3cfe764ec30ee99827c3b1d", + "104": "87dfcf5471e77980d098ff445701dbada0f6f7bac2fa5e43fa7685ec435040e1", + "105": "a76f4e7fa1357a955743d5c0acb2e641c50bcaf0eec27eb4aaffebb45fe12994", + "106": "197f5e68d1e83af7e40a7c7717acc6a99767bf8c53eece9253131a3790a02063", + "107": "bf13bc90121776d7de3c4c3ca4c400a4c12284c3da684b3d530113236813ce81", + "108": "3dea386e2c4a8a0633b667fdd4beacd8bb3fe27c282f886c828ad7d6b42c2d73", + "109": "735cc3e619b9a1e3ac503ba5195c43c02d968113fd3795373ca085ed7777b54d", + "110": "01b4e8163485356b46f612c9d40ed4b8602621d4d02289623e7dbb3dcbe03395", + "111": "97c1b054c094337ec1397cd5ccdf6c9efe1067ad16f531824a94eaadb3c0953b", + "112": "c99c843e0f6d6566132d97c829780332218e005efc14b633e25a5badb912d63a", + "113": "8dbc8319e5d8923ef7ab42108341ee2c32a34ffc0d19d5ae5677f1564813314a", + "114": "b3b9ebc9f9ddadb6b630eeef5d7ba724b3bb4d071b249318093eb7547949bbb9", + "115": "80c3cd40fa35f9088b8741bd8be6153de05f661cfeeb4625ffbf5f4a6c3c02c4", + "116": "a39208d7130682b772d6206acd746bc3779cc1bc0033f0a472e97993d0a32d5b", + "117": "54201fbc7a70d21c1b0acede7708f1658d8e87032ab666001e888e7887c67d50", + "118": "834e6235764ae632737ebf7cd0be66634c4fb70fe1e55e858efd260a66a0e3a9", + "119": "bcabd9609d7293a3a3f1640c2937e302fa52ff03a95c117f87f2c465817eba5e", + "120": "2bd8cabf5aecfcadde03beda142ac26c00b6ccfc59fdcb685672cd79a92f63a6", + "121": "5292478e83f6b244c7c7c5c1fe272121abdc2982f66ed11fcbc6ea7e73af124d", + "122": "6d78b19a042a64f08cc4df0d42fb91cd757829718e60e82a54e3498f03f3ef32", + "123": "057b9b6e49d03958b3f38e812d2cfdd0f500e35e537b4fa9afedd2f3444db8a2", + "124": "d251170c5287da10bffc1ac8af344e0c434ef5f649fd430fcf1049f90d45cf45", + "125": "e9b7a676dc359ffce7075886373af79e3348ddbf344502614d9940eecd0532c1", + "126": "38752ed2e711a3c001d5139cb3c945c0f780939db4ea80d68f31e6763b11cfba", + "127": "e707d9f315269a34d94d9d9fa4f8b29328e66b53447ef38419c6033e57d5d123", + "128": "5e15922fba7f61ddccb2ee579b5ec35034cc32def25ff156ae2b0a3e98c4414e", + "129": "3cc4ad1254491787f52a66e595dbb573e13ceb554c51d81e42d5490a575da070", + "130": "7a6e9899cccb6a01e05013c622422717f54853f7f2581bc3b88a78b25981da08", + "131": "4a8596a7790b5ca9e067da401c018b3206befbcf95c38121854d1a0158e7678a", + "132": "ed77e05f47f7f19f09cae9b272bfd6daa1682b426d39dcb7f473234c0c9381c5", + "133": "e456d3fec55d88653dd88c3f8bbde1f8240c8ceb7882016d19e6f329e412a4ae", + "134": "b144116982f4f0930038299efbdd56afc1528ef59047fb75bade8777309fde4b", + "135": "0709e1008834c2ca8648376ac62d74ac8df5457069cbfedf2b0776dab07a3c5b", + "136": "84692ebaa4fc17e9cfce27126b3fc5a92c1e33e1d94658de0544f8b35a597592", + "137": "6eca481578c967fb9373fe4ce7930b39d8eefe1c0c9c7cb5af241a766bd4dfbc", + "138": "1b5f0f504917592dea2e878497b0e12655366f2a7a163e0a081d785124982d2c", + "139": "0d2f26ec4004c99171fc415282ec714afa617208480b45aeb104c039dc653f5d", + "140": "78ceab5e434a86a6a6bb4f486707bffaf536ef6cb2cc5b45a90b3edd89a03283", + "141": "d74ae4b07f05779065fb038b35d85a21444ed3bed2373f51d9e22d85a16a704c", + "142": "f59af8b0b63a3d0eb580405092c1539261aec18890ea5b6d6e2d93697d67cd38", + "143": "66e9d1093f00eef9a32e704232219f462138f13c493cc0775c507cf51cb231ed", + "144": "09a1b036b82baba3177d83c27c1f7d0beacaac6de1c5fdcc9680c49f638c5fb9", + "145": "b910b9b7bf3c3f071e410e0474958931a022d20c717a298a568308250ed2b0da", + "146": "5292f0166523ea1a89c9f7f2d69064dee481a7d3c119841442cd36f03c42b657", + "147": "cdb162a8a376b1df385dac44ce7b10777c9fea049961cb303078ebbd08d70de8", + "148": "54f631973f7bc002a958b818a1e99e2fc1a91c41eafe19b9136fac9a4eb8d7b8", + "149": "c49382eb9fc41e750239ac7b209513a266e80a268c83cf4d0c79f571629bac48", + "150": "c89b0574a2e2f4a63845fe0fd9d51122d9d4149d887995051c3e53d2244bba41", + "151": "5d09e3b57ced9fd215acc96186743e422ce48900b8992c9b6c74d3e4117e4140", + "152": "c3ea99f86b2f8a74ef4145bb245155ff5f91cd856f287523481c15a1959d5fd1", + "153": "fb57f89f289ee59c36cede64d2d13828b8997766b49aa4530aabfc18ff4a4f17", + "154": "c877d90a178511a52ae2b2119e99e0b8b643cec44d5fd864bd3ef3e0d7f1f4bb", + "155": "58801bebc14c905b79c209affab74e176e2e971c1d9799a1a342ae6a3c2afbc1", + "156": "983d2222220ab7ffa243f48274f6eb82f92258445b93b23724770995575d77fe", + "157": "023344e94ad747fbc529e3e68b95e596badcc445c85c1c7c8fa590e3d492779a", + "158": "d1b58f4c07d1db5eb97785807b6d97a0d1ee1340e7dbcc7bb289f3547559f2fc", + "159": "cd3a3d2cf8973c5f2c97ebed2460784818513e7d0fee8f98f4fdcf510295e159", + "160": "3a926519b024ea9df5e7ad79d0b1c4400f78f58d07834f5ecd7be522112b676d", + "161": "2b3d09a4c76b282db1428342c82c5a55c0ab57c7a7640e0850d82021164477e9", + "162": "d50ce1ab3a25a5c5e020517092128ab3ec4a3bd5b58673b2e6cda86bcc0c48a0", + "163": "7e17ce0fca5d559f76015765c652d76b8468f9ddc91c2069d7799867b9d52769", + "164": "5c680d0b2c4dfac8aade87be60cb4d04a4c3d4db398f51e2cbf131d699b630a8", + "165": "304de2e63f91f8f74faaebae7a7ec2e0c6e0d8d322d8a747e4e3be88de2d3505", + "166": "14212843872dab188a86eb1f0f7012e1b72ea1097545f29377b3b9b52822af76", + "167": "18c18f8710f831a82eb74ae979bd36d609bee818c972ff88f8d8fa065270f951", + "168": "66640021d592f79b510f9d6101bd8eca89893187d23919c8edff4075e73ae390", + "169": "819b01e0394727fd089f84b9351243176920f03d0c6e33e5ff136016da5d8d4e", + "170": "e68fadd33a8c41d9a259577a278d8518aeb8b81c67b8cf03ccf43fc451ec8bd8", + "171": "33bf9ed4714b0e5da8828f8b3d9d3e9d0cf55c1d496324acb04a3f195252749c", + "172": "b9a27b513dc15448772cac5e914de61f02efe323f528892c0bff86d19913a6bd", + "173": "1b2a5e44fda5dfee3ce230f44fe73c672249f6620cdbaa343ba0ba808034958c", + "174": "98aabf085c6c8647f5e8a4775dc1d036513742d8e03b8c5c51e41bdfc9c3e7ae", + "175": "c03dcb22b7faf121d99542018dd10a07a778abee2678d35c03394a8d207b826b", + "176": "4fff1a7beda4291446d76e5ed5177c3f36e52a10481009fdaf2976da39e802ae", + "177": "614d3df0ba5fdffab2328eff8e9ca2d76b09bbc447c06bf1fab0419ae278fae9", + "178": "094a2ba3011118efdd9d4c1f839e6747dee8ba953b52e9012fe2977e32599375", + "179": "9f5563a5ea90ca7023f0304acba78005ee6b7351245a8a668a94dfef160f8d29", + "180": "dbef09115a57784ea4ea14c1fe35571301b0d6139bea29d1b9e0babf2c2aae05", + "181": "3920627e86db42beb1cdf61d026f9f7798082f1221db25a17fb7feb6c1d49027", + "182": "58096166bb8199abf4e07a9ef0f960065e5a635443c1937a1a3c527ade51d594", + "183": "bdf84a73b16a5dd5ece189dc970ab2c8f0cb5334c96bdd1d6ba2bad6e7f8a213", + "184": "c1e8c0f1b1eb3c258923e9daa46ef055bd79512b485c7dc73a9c5e395d5e6911", + "185": "0ea72907eb6c1120740cd80ee6b9a935cd754edcf69379328f24dfc3f09b9986", + "186": "3c0078aeae0362b6b7561518d3eb28400193fec73aab35980f138c28b6579433", + "187": "f2bc655b33e35669ee9adc841cbda98e0834083eb0657d10f7170e70081db7e0", + "188": "38e0291a3f5438779b157e5efcae6cef9db21cbac5607cd08882844cf981febd", + "189": "9b2a65ac4c35f6b392501dee2a28221a3975aac5f0866b476f5e6a5a59f3fcc2", + "190": "606fe2cb6525dabfcdab93afb594dbc8399cb967fc05f0ca93f6084d3f8fb591", + "191": "ea1977e7b22df383de61bded2a4bb3064cf17fcc0170be452330256f938b8d55", + "192": "91d614f139082168d730003f04b87135c64e13709ced2a96001ed60796867825", + "193": "65648f18a50a7f9195fe56bb8cb9e25421c6d777ad2447a3b566dc8c54f3399a", + "194": "cdd31847c6138853597261756d5e795884566220a9361217daa5ba7f87560404", + "195": "d12224510de6c98076f6289cbe342a7ec7ea3c5453f6e3cf8d37d9eea74bd07e", + "196": "1349b472d2821dff215e54d683dbfca49f0b490ade6a30b1db9667bc94e5312d", + "197": "e2aa8f7cb3ba893af8bddbffa6240e7eb71a4f4c4498f2a360f3db7b513094df", + "198": "a29d9edd0dceca9a72d2238a50dbb728846cd06594baec35a1b2c053faeab93d", + "199": "50a6b9725ef854537a554584ca74612a4d37d0ec35d85d04812c3ae730a4c8cc", + "200": "5b439098a3081d99496a7b1b2df6500ca5f66c2f170c709a57b27c6be717538a", + "201": "b4e86186652a11df0b9ec8f601c68b4823ae0bafd96357051371fde5d11a25ed", + "202": "057243f52fd25fa90a16219d945950ed5523ddb7eb6f2f361b33f8b85af25930", + "203": "2742f7af8ce9e20185e940bb4e27afc5fefe8cd7d01d7d8e16c7a5aaf3ad47aa", + "204": "15f5e9ae4636a6bf8bdd52f582774b9696b485671f4a64ab8c717166dc085205", + "205": "e03c2f4ceabf677ec502d235064a62271ce2ee91132b33f57382c4150c295784", + "206": "16bb96da8f20d738bbd735404ea148818ef5942d4d1bc4c707171f9e5e476b1e", + "207": "133fea765d0b055894d8aba573f47891c1f7f715f53edeefb200fbda758a1884", + "208": "90831cd89b4cceacaf099c9bae2452132cfa2f2b5553c651ef4948460e53d1f3", + "209": "570fab1574a3fd9aca6404083dec1c150e858e137692ee0c8011e702ec3e902f", + "210": "ae9a76ce3418c06d0eac3375a82744fb4250a2f380e723c404334d8572faead0", + "211": "aa4b2bc3a136b27bf10a858ac6a8d48f41e40f769182c444df89c5b9f0ed84e5", + "212": "81489bf56605b69cc48f0bce22615d4415b2eea882a11a33e1b317c4facba6eb", + "213": "a497e789f49b77d647de0e80cd2699acd3d384cc29c534d6609c700968124d14", + "214": "409520c6a94de382003db04a3dfee85a6dbb71324f8bd033e566e510ad47e747", + "215": "0eccb27846f417380a12dfd588a353e151b328425ecf3794c9cf7b7eec1a1110", + "216": "f735b4b441635ecded989bdc2862e35c75f5179d118d0533ae827a84ed29e81b", + "217": "9aa88ac109aefaa7ce79c7b085495863a70679058b106a5deb76b2772a531faa", + "218": "5feceb66ffc86f38d952786c6d696c79c2dbc239dd4e91b46729d73a27fb57e9", + "219": "9da1307fd12f4c9a21a34e612920cec286d937418a2d5040676408ba0c47f3d8", + "220": "a262318d02a14747ed2137c701f94248bf8651a23d1f82826952e66c25029588", + "221": "bfb4e53578fa42f83eda027da0467a351298dd65e3e8e84a987d69fc275e9f2d", + "222": "4308f4374b84e657aa7a21e5f5fe42ed16386b6dc7a74bff0d24d08ad62acd26", + "223": "3790f82f65ce7bc071b4096ca22544548b3413a755f58bfc401eff3ddf487a86", + "224": "96356c050fa66d919c75212d789544db81b012bbaf1f7915b053cb9ba2d67de7", + "225": "f37f3f2b0dc57a86dee4ba6ff855283bb4d2f0dea1c5bd1b708853444c2ffcec", + "226": "49bd28d97a79875007a6713aa9700df14464217c28a6e76bc93ded57b75a33f5", + "227": "b1f73471a3e6ea1dfb04610bd89ccb110994780084280fae872d42a2786f9131", + "228": "e38da154f6cccd06cd0001924ec2dad8de5bdcd0b78b68e8d8347768d99ac0bd", + "229": "098ffc6baaa32734053275ce38f4bbe58efe0ff946bf31e0e2df4c6a169e23d8", + "230": "2c72b887a8638941b85765645b45c5cdb73255427e14d5114f6830f847a6b861", + "231": "4aa0c92e77eeed08994650ac6129d77db9f026ae2aee78ad5c9fde132fac0505", + "232": "5f7905b71cb897bc7cc6db7e29cc38ee459e2fd8f5d58ba4746d3acd4e46d444", + "233": "8d986e287ad21475728b0dbd9e935623d69db4e5fdca0d42bc32d70eda48985b", + "234": "2d9d03b778af897e305caa8a1a14a117737bbdd549003c6d7477dd3be8331694", + "235": "7168cff545d365b09e8997bb9450343c7090913876c8f7eb9f0e9849c6fc7dd5", + "236": "ceb3002bad36c22c5da82fd422b36bad91b97a7d3f5409ed5d16aa9b42dc137a", + "237": "c857d8fa78c8fde91f29b3fbe332c2e781f7e8b54532f4c352693d6676fda2a8", + "238": "3e2edae8b8ddbcfaecd5aa6c69cb5960b84cc16f7b3232f3386aae9ecbd23f20", + "239": "49df3a63ca6509687cabb3d208e92b057630231e66b87fe8c781baabb12a55f8", + "240": "5034a21557b2de1c5c2d2aadfe8ffe62162c23f42d1aaabc705ed8519e91a3c1", + "241": "85abbe1913df41c57d1e6f00cecea416edb19c64681d1bb43fb5024e2f48d409", + "242": "4da30e6198a3d9ae6a538c2366e08ee218de6efe2c5b8f231493e21489c21a7e", + "243": "7404bb7881a010271831847e71162ee7a393843922ee93cf7cf3455a0074279c", + "244": "21aa3213adeb0a562ec7161e1cfcb5f1701303f1c9f03ed726c536283e080db6", + "245": "22b9cfa9ab97c84eb64e3478a43acd4d95b90cae8c3453c968457a89c6387a81", + "246": "729e3de7687fc597be2eb4c592d697ff29c78cff6945c9690cfb1ee67550eeed", + "247": "f49b98df95a1f826c24cf622ba4d80427a0e0767dffcc28a9206c58508863cca", + "248": "44b8116c29dafbdfa984b3751c1dfd057b3e93fc57c8cd18495f1c0f605496bc", + "249": "49e96b6ba41e88901dbd118139ef6f013b4fc59e2347058a7e726cf6a2f14067", + "250": "f0e0dc05fb555ae5ba9820586bef3bb8a3a82905ece3d8a998a3015fc91c1c3e", + "251": "8c1ece1b350c210380456da2bab70054f717731b0dfb34bc3cf4abfacf696f15", + "252": "ad20a49374f9176bd26460d35f96f30d734df3cf6fc63b4642380b4e866848de", + "253": "ba1a2bbccabbcddbf29ee0b56d0d56b4f026e8a7b97e97de2d48c133ccbdf2a1", + "254": "381a2eac64a984a81671722bd95ca5b8b6508a6f490af46780e9f393c6797223", + "255": "5e6ece13372bad4a6ea011c933389dfaefedad5860aefba2ab97fe5f59156c42", + "256": "068d4a3c845803bf66a9e5320261a0fd7b6292a8230b271a6a83f0dc8c73e907", + "257": "d80ac9215ffa7adacb22711cc88f5b580272d0d65c49e1ea48e69d17e264d91a", + "258": "256c4d399703b7f16dadef9201efc0ef9f6aa6ee05ddfa2d3e26ff6efe09704d", + "259": "275a4e84039a1596ac7e8bbe186163dcfb02bfa99c209653ff5d505a29b4cb10", + "260": "f461ff2df66653be1a2e579b1aea515d4a84f2ae5ebea3aa21fb2477a53699f4", + "261": "178ecd56cd79c7aaec1353093571ce89845130991d64c5a715a02da83a2705ab", + "262": "2e0cb5e8fc8ef04c50a5b9ab9a9eecad446598ebc2527b19c447143e5ae09736", + "263": "c870fd75ed0d5ed92ec35789c522d029f364328a16282a1c5eb9b3b7e121eff3", + "264": "da5d6bdd89eacf70a88810935f80e4725da4feaf2aa86adb13985d7d9e1c247f", + "265": "13f16351c3971c286fae5e9cfbaf6f0a128a6507804fd280971a600019e352e8", + "266": "4f39cdd293598de9259231592e99bfc5fde82a0bc1820a4c5faeb54f96037f00", + "267": "3e054d92034d3d32c3d4e7acadf1c09232e468fc2520d23d2c7d183ec0735aa3", + "268": "2d47c47a2b19178cef9e4eba1a53dd39b5f8657bbe011a71c8d402d294d50132", + "269": "4448f310ab9bff796ca70c7b7d0cd3b9c517f72744a8615112f65ba30a6d61f7", + "270": "ce71f5bd1db540762e4bc6c4798d8b7f3d2b7068e91c300fd271a46298aea2aa", + "271": "5a05e212b9b6ccf6092081f567aa73d27da399d45418f674628a8154f9182b6b", + "272": "a326c2d7121d80861aaf110826615e560aa4efdec0cc9fdfce051c6b9038e781", + "273": "d32b75411f407c5da6a9a6b4a3907b9a9ebbca6b651324c03432d549671bb837", + "274": "b5740ac928d58f53537b05ecc80b7463dc1fd5a53400f57aa55573ecbd5faa56", + "275": "e1c843ff0e97692a180e384c1a9c03c7de06ef92ccad5aa6157fabf0dbe5b804", + "276": "2edf523574e0a062cacf21f51ed6f81128537f27a3cd27b84a8b5d2478d0092d", + "277": "130c990ad499345b7638e57dce365442e2ab2d2571546aae60a9fa6ed3834b8d", + "278": "2204d89df74e664621dfe8892277d50e7774f60613561d70ae06ee0eb4c483d4", + "279": "4618456c7239784964b8fcd27155e01cf5417a16cdca7b163cc885d598ba93f4", + "280": "4b2d9501483d450371ec4413519b0b3461502aabb970fb2b07766d0a3d3a3f85", + "281": "b04a4a02fa0ae20b657dcfe3f80ef84fd446daa1521aabae006b61bb8fa5a7da", + "282": "6dab2ee10b0dc8db525aeaa2f000f3bd35580ba91e71fe92bcd120ad83cf82c5", + "283": "c964c01082a258f4c6bb66a983615686cb4ae363f4d25bd0bdad14cd628cfce8", + "284": "df960dabff27b2404555d6b83aed7a58ef9a887104d85b6d5296f1c379b28494", + "285": "087de77e5f379e7733505f196e483390596050c75dad56a999b1079ea89246ed", + "286": "8f3e5fda508a37403238471d09494dde8c206beadfa0a71381bd8c6ac93abaf4", + "287": "5d834d4c0ca68d0dca107ffe9dbaddac7fc038b0ad6ccc7ba3cfb53920236103", + "288": "20a3ef9e411065c7265deff5e9b4d398cab6f71faa134353ccea35d2b279af04", + "289": "9dda7eb623939f599551ad1d39dbf405211352ae4e65ddd87fe3e31899ca571b", + "290": "a629c35ad526f4a6c0bb42f35f3e3fa1077c34e1898eac658775072730c40d6b", + "291": "81b1e5196bec98afe72f4412cf907a2816515bad0680bd4062f2b2c715643088", + "292": "614950a1cff05f4cf403f55393ed9d7807febbae49522ef83b97e0390038ae03", + "293": "9e4067ac93c6febda554d724d453d78bf3e28a7742cdec57ee47c5c706fbe940", + "294": "9ac900bf0fbb4c3c7e26986ac33698c46c6c3e8102ab75b40b8df94fc3a0c7a1", + "295": "2fdcd631f3c68bef3c90f8679b7aef685fa33f20c2d6eb5965cd2a62267c2ffa", + "296": "dfc947e61ea2138ebe47234ba503cf5246ecec530b12e363acb240822ddf0b34", + "297": "4d5af88ba8a28b49a79773d3e6521e8731ff07d49765203b157481469e6ae6d0", + "298": "94aa77eadafaad7327acb8e653888f00e114cca2fbe04691dabdafa2a0c8cd64", + "299": "0f221ba58a8ea417e13847ef91af9ff029561ac18c74bbeeb3f5509af81a3b03", + "300": "50a79fb6e417fb4a34e155a9af683aa9a74ee922a6c156a58bfedd22cf3185c4", + "301": "eb09a0097a47e7a95b892ad7230475a1a28343b47db4faeb3e47f227aeb04738", + "302": "fcf9736fe8c20a6d02f00e9b1e719de06aff4afa99d2eba166592aeff1b8f3b7", + "303": "e6266f575c94d805a67fcd3f2391d0961b4b121b8a66afbfbae76dfc34e5c06b", + "304": "189bd2a8daf5b638ede7c50035fcf426d125de87a401382f66ab75f35b2ac1f7", + "305": "0ac58c6eb8513f4ffe911bf0f044e0153862ee89c04939fd9b294860a37ec9ce", + "306": "335998d7e2a3fae2da75a5192d62c37dd006be96831fd37e7438ec6d84451c44", + "307": "4f1f2695b1b6b1660f3ef6ac31a81630ca74da9368eafbfb214ec1980705c13c", + "308": "bc5ae127f8690ba7f6e9ddad98a49137acb45abf4e187eaf3648f197c72fbe90", + "309": "6b78ed4c4bfc942b9b5dc340961f19c690d56f9d721b6b764d1db31da53139db", + "310": "0d183ec2ff1cbc768a9eb7eb06c2a354f6c8bab00e64ca2aed2da00825f33c05", + "311": "3ae7fdad095eed78e0c63cfe4e28ab7ba2b977259590ed38722e9c44727e598b", + "312": "329d107b5743a96e3551084832587a6346e410aa89641d83f03f1242a7244473", + "313": "ecc63ee12cbe487e5390007271890b8aa5df2cf48b8e692404895d3c2af20758", + "314": "5fa65495795c52818aea910c24e4d3176c71817f5268c34e1cb259b256737352", + "315": "95bd03b9913be37d24809d30e7bfd31a1b7a127d03d65e52086857bb3a364b5d", + "316": "ca6ec6c9159e10719cd8d2cfcfaf2fe2d3637fb3d497e2c80866de6b593632e6", + "317": "5b0d72d34b406ce20714a59f1c4d5340c5559285e340497dbcad68729a9db786", + "318": "3e2b479fafb86b8ab097588b8fa12ae8a078f8b5801e15c7faa1ef23d87a631b", + "319": "e04b18947b36771937dea491f47b75fedf42a6db684035f5690e6c2bd7e6031e", + "320": "e546e4a4c9020669c78a095aa5c5038242dd78e0f98517c0e23c43aefeb58138", + "321": "3da0198df2f98a7306ee6d2e12b96ba9a6ad837a6c2d4f316d3cd8589b6af308", + "322": "07e511e9002147c33739c924c17a61126d12823d143069535a615a97f86d936f", + "323": "be514911dd6258f860c2773253f6df6c22ca975a10c4e34db5903269f2975faf", + "324": "53ed94369b59a84d003ff3155edbf481a0eef362325539d6ab1a7f370ce919c8", + "325": "43c8dc1907d3e1eb30deb565475ec1ad4f807baf6ef34178508ec85071722f0a", + "326": "b08d72606988ea5a82e0caf15e68d81b4f2e8dbb4af6a22437916f3fc53e3dea", + "327": "f70bb9cb351daf610a91a3c769d84bbb3f3b8f1169b10839196b65b8585e7c38", + "328": "6e26ed661a0add2e583229066d304f7e765a0ea337b6a93bf979e4027b70b94e", + "329": "89d8b56a1e05d90ccde0df482ff2fec3d44270739810f3c5d06856c38d801380", + "330": "2dfac8e04d08dc5eefcbba4e475164103d339f844896a75ef3af2229185118f9", + "331": "a20f9b06c126f4ee65e3f3a0bf345007b35ecb69d035dd0ad848e09300130fcb", + "332": "6593d40f4e3f53a73191c704d388c7cd1639403da6e679c8e4169b26ade19f3f", + "333": "7499bc84f6bd2211365fec34943d64f6be80a53ee2efb21c099c1c910ca29967", + "334": "f24dd99fe5b46bb7a7a30c5eff61e71cab21e05f1b03132d7da9c943f65713f6", + "335": "8e2111c24160d92b1b29dd010b8b3a0a4f9af55f1d30bd5892756c58ffaec201", + "336": "eed2e8d970c1c5031220476e6b700d16e5065d7893a2766a53600825b4ad3ae5", + "337": "44e298d1b55c51c9f127989da1149ccf6bda24c40041f777d35d5b8f192753d2", + "338": "b3a60e80296f79cfdfc02354acc674162faefcb3fb78b9672254c9cfc6eb113f", + "339": "2b55688ba27d72202632783186211ee24ea39c53915066578291fffd9db73128", + "340": "15765221271275022a6ef57634d836b052ffbab6d7d5a6899992972143841e3c", + "341": "f7340563f85e057709a2fcc71bd448fed8d6de6907d8ba5f91fefa2abffda6cf", + "342": "f252eec230c2e92ed1fa04834bc0738b79597c3b0d2a66c787fdd520e63cb3d3", + "343": "1d65c53a04f7eea94ebf76d797c0f79fe3d251bd33e5edc16c780715531b4345", + "344": "86d3fb095439bddbc0d6e6e8e433d54aff04350e2da2ad05f53d607113075c8b", + "345": "21db551743591f9cd20fffcedf3bda17f9f178bc9fbca528a56c2c61b9e7c731", + "346": "f326e2241b7e57320914aa279f9ba2e155ea77f809a188958e0b590bea9c3ada", + "347": "0fb6749b98280cc8c26950a2cb9c9dbecac18f8760e161e9bab887dcb0077653", + "348": "0cdb77330ae73fbbd0f287240f82b7547a0ef42d37004003a9c759f86b686d61", + "349": "690ad38e4357b34368966b9de08d89e0c095246bf55969842f373f1976f86062", + "350": "5b427d47f98e296cb78875619fe67d42f41868b78886d560d8fcac89043fe945", + "351": "93dcda27a0c12f0c32cc35f0de161e7f7792d11abe5d4c50d7fd5192ab8b11c0", + "352": "d01c0cd49e7649289a1f13162757de494bb9104b20ac8bdb30a4180df5225889", + "353": "3d856f38821d7b221aaaa9baa3d7927f6e360919e8f8505d7499f9bbd85c44b8", + "354": "36dd3030dec4a8050d2079678250c9c6c86c66c64fdbe7f5b82e79024bb8d5a7", + "355": "b0a915b700e415ba3acc3ef261128680b921b5df9bd6fb1d35c2d1180e7f61d7", + "356": "a309814f13708f2eb5ee8dd1a3114e8f8b15646b8c797bc7119ceaa3f6911f0e", + "357": "61c9c81a41fd294a8f07033c8373706694faab4df3652d310e84904356cf5e6c", + "358": "7d59500b8883d81040173b88462a73849e0d386a53830d599e6a042f4c1c165f", + "359": "0793805920db4896155cbce40fb58570a3cc952d0c15ee57393fa3c6ca7a8222", + "360": "ee8cacd40fb7515e510cbbe7deb6005369ce7d9800ecff897f3fd8721fd6ef71", + "361": "e96f225fa470174b4ac787b21579ad1556804de85c0c83da99a92ddc2c56c7ac", + "362": "9a4ce079c1a882a306e21e0c145dab75a2698cba3860152f03dafc802ad9006e", + "363": "258a6e6ea10385ca3c0cf08377d13ef31135bd9479d5a4983beadf158e19ccc6", + "364": "13aefde214541fab44d2a3013c532637a3da82199fb6c0a1a941c3108f76b9cf", + "365": "0cd978902035027c6898d6b5fc11fb5931f06f8e8ec9c24b4706143c91de9450", + "366": "47495a92574a6d7b150eb3f4338748ba03672ff93162139f98e03847f96551cb", + "367": "fad9203cd26fccb99f0f89fdc569c230eda46cd72ed3fb7e5f6fbcce78ced1a9", + "368": "a237e13fa6c32b66695b8c8de6472d5c93c7650989f047f62a17438c07036845", + "369": "da4c450ba0c4f76556fce54bc3f6b2a754a626cf1f87ba3280f545e023942640", + "370": "5000899cd3070e1937d42a68766c840bdb9629a49c6112bea5cff52fdb4e9f7a", + "371": "7afb55ee21c0447f7b961265abe7ccf87f59af6206949bb1da19fd36334b09df", + "372": "fcf734716ed1fa724e7228a489304e3c55e813734fb5792a83f806ab04e40485", + "373": "83c98f0431cf944440dfe0a9831275ed451b0d16856aba4100f53170c55c2e6c", + "374": "d998ea6616a5a7a9f7beb3ec02f8cbed4a9c5f17be978c31f32ac0f9f4e4460d", + "375": "6a72aba5c61e27e281235b1f001ab68b840f6e8bef0e6bbd7bfd8eec1abf844e", + "376": "980dce9435a9fc03250df4e809c2f68c48601b64c30d32c6e67bf1faa35fe274", + "377": "7b4a0b6958cf23951636b5e27af8041dd9901256c53de44c9be313ffd0a01ea0", + "378": "a1b13bda78da3ccab1af6c330d3e768fce62841f924933285e7b1f7a8b7dcd5f", + "379": "c957fcbb90e1afe9a342e95608ca035596a7dfd4cef398ada55e05a2462aba14", + "380": "b794fae83475a77832f46e69799419f9881bd774e1bfda56773b587c42591039", + "381": "e7208f3630a20b01a5e1bf5d0537be9dae9fd7529773cac12b96c4ac2b0f8dbf", + "382": "70480c0d26a6d76eba0faf3ee047d6214b2ca4d1442070ae5e79893192ffa699", + "383": "3c814d251089cb2a92a78ec3424b2a729cfbbfc6a996fd48148261312364a9a8", + "384": "f709015ae0f8ad20bd2efd94d01af3858234e381942b5b15391ff7f77211b116", + "385": "0bca6cad1f4ff336b93c9f86c4ac872bda67ee0cd41b1862a7a663852717535d", + "386": "3e1748647b60bbf292aacae65b3608ccce8e55e203a36ff062ee787cd8c14480", + "387": "cf592fa81780e727a56553df32410beba6de9c33543dd1ef1155b368ba9a9b9f", + "388": "911326fcfb0638154f69eabb87e4c0c141df09e56274c8522e9c13b7b577f00f", + "389": "cdd56fb06838a10149f2c7229bbc76f78b4a5a58945fb70a47261f1bf635c404", + "390": "07dde4848eb878808635fb7b366261b1e9cb158635e76577eecc48ccf941323f", + "391": "76cd3def1eea8e2631d333798f4d282bf40f6254b2d18c02c78cb56b33462093", + "392": "c4f7ecf21a8738c3ad0114a1ee6a2d16668e71b499741381f30827ed451dc817", + "393": "7bbc419f89fde57d2862bfb3678ddab96614693dfca109d0f444e4762a2b7a8f", + "394": "7781ca3332d6da18b1b9be5e2eff634b526ae9e8088f6e479b49d657f4f41525", + "395": "5b5de0def2c4a989a54ae3e748362c78cd018778d5adc4dec13c1bde6ffdc139", + "396": "d42c389d6abc7d8102b8cd1b906e4600da08394388d4dcd432ec955e6d8b311d", + "397": "629e23dc358ed2a8c202e1b870e270e401aecc5d726a679b542df8e6becb4200", + "398": "c30114e73097c3fa4efb203915f3b828b1b8c432ddeab2b7e1ba3fe63c50e190", + "399": "a681ef7bdb22145a3e051ecf7bfb694c18b255c80dae6fb8d49f187d28f3c58f", + "400": "c993a792804e09c9f60313f4144953eec072ca6a8a27f44d8718ce53d9429585", + "401": "074b576ae2054cd030ffcfa132b1465f8f49b836f505cd4bb01af4a98f4f5337", + "402": "d45f88fc3c00673ef7e628d867a54a4ea281b3b2620735cea85a8da3b06321df", + "403": "a09086d3cdab7d6ff8a9fba1746c5d236e0ad0abe088be99bb172e80c6f0f8f3", + "404": "55a774ac3423440dda50d73e472887195940d5e9df605b30deeb0f2528b001a4", + "405": "ee9fa61ae8153df7979be3afe6377e584fbdad624833424a5cff64f6ea94c9da", + "406": "584cba4abd5711b8f558fde97620b8ff0fe91586bad052ccff87c49c13f72555", + "407": "ac50b37409f7ea91f90856bbfa716731013deffb5f5b51540a99736e08e5378e", + "408": "2c12c3cf062c3d9cf2c53e6e4dafce70ca5c7a38c97479c3b013cd91076ecf4a", + "409": "5a55b5fb584c359f4b6ee2d21deb62923b0b25e1b4c3da0a6f351079ce657173", + "410": "9e224b6ab0b7f20759b63d1799b426a8652c9e637b1f38d3eaf8beff73c80c67", + "411": "66c0c1ab79e9887b5daf2c510f2c2c4097044b69fee6bd4ffcff73ad4816b8c7", + "412": "27f1768d99e22f8b55d010b8b7acd904e8b66751d5310d32c4d017a0ad34d650", + "413": "7c99634a1161e424a14d60b516291655096eb90ed055326325d7f5de7a44a3e7", + "414": "4e03e038e99870b1faf45a0a29d6124379d05a0a3553a11aaaa91b8ba56eac5f", + "415": "955e433ea745016af2a5df015f1cc223ddd84ddccaee60d5302b7ad61542d9e1", + "416": "8d07a87b9012a166f5bec4dcd646d5957c9b3633a1a37c40c584ede75cb7ad22", + "417": "3f738338cef45597e3b839536953104186f11d94d16877c77abd8a067c152dc3", + "418": "0c813356b30108f89fb37e8774a98af4f9eca3df49e963f985ecea82a88b1437", + "419": "8ea8d93a9e874f8c8ceeb240f1f1245a077a7c0a62287d3044feaf855b5dae78", + "420": "af7ac1e90e07f189afbb284ae24614d9e713e32098bc39bb81d6484d47351444", + "421": "f45e155846624f37cf2ee08be2a63cb1ca35bf795fb0f770b4c91ab549f22b25", + "422": "69d728f7e25055dbebd41684bc6de61be6b4db4119d7ecdcef5b5d8ead976537", + "423": "3e78c62395be704a59a3a6a65e457725105619e0a6f9f3aa6b311c4f7762b0a0", + "424": "fbd6edb36c3754a35e7de936839c4fd0564db873924ba97b35cd43e065835582", + "425": "ee5bb631b2a9edf8ed05781b192f42e24ae748f3aa4ba5e635374c094d28ddac", + "426": "3e913e088a689d2d33bc797040cea94512bf54a61f96501f60576ab22ed0304b", + "427": "415e6da4c7f92da36e2d8c43fa8056d0050ae127e648451e2fada49bf2c936d1", + "428": "389bded7b0c14212fb69b559fd1ade4f5b235b976c9655365c45481c3afda486", + "429": "3007beefa50c509b89b86c54f53757ff701f795dc5f7ed47a1520c2b092455f7", + "430": "59ec8ec2866ca502ad558ade9f8a06a9ff815a1ed649bd1cb513f417f1d4727c", + "431": "d3f28dffa4e22b3bed74c3c2c9ded1e4a8be49d3757368e4e3efaf7f79affb15", + "432": "59fd80dbc8eb4af9596e4ce8a87313d363da41313351a69ab3525faeb905c27e", + "433": "471a7ddde597fbaaaed1941f42ca1fc0f4f047e17f2197f8999dea98b38213f3", + "434": "319cb430c66d9f418aa90a3d6f9c2dfc8171383d6f4af5803a73684afcf18e15", + "435": "aa29c0119ca84133617c8bc7455afdfcf5b05a569393ff21ebcb10d32ffde2c8", + "436": "928f772ad7a9fc501f71cdef6dfe60e2d8cb5d5c5800b519d01afeae0681dd08", + "437": "ea70162a014b8294ede65af6fcdc11fb365ab2b126aef8d47983d58816fd6a54", + "438": "43633662392854b5d9f9f0fa564605212d016c9ea9377d2a6ab52137238d4191", + "439": "42f7e88fab5c9cb31d4bb34403d7958abd5023e9cf9ac05cd29626c5df763584", + "440": "cd08ef4f14b804e3106ee88f9d2b24864d5e2fec6c7cd7dddfa2713e1431375a", + "441": "daa69bac44ce5f57b4b43ab6ece3b2b3561292c0f4c6e82a506ce2973713f749", + "442": "910d2abf184cfd7b1964cec906a79f3e45f59e3d42ec20b22f56de59c9018927", + "443": "7a14ac86724d318e6d40464e710c17625d441d1e7adf83d3062305de2f85d445", + "444": "390877dded07897360921e8d0d126bf45d6a379d47292c90826d775bd1897f2f", + "445": "5ee5723341b0b81c9e0172fcb654f8b24322244bc2d1b55afcb78b180ada180b", + "446": "8b2dcb0168e8701dc9da286489a1e68e43e1b17638e5990edd882196d7fd5a29", + "447": "179af1c75faa5f42e89ce3b41496a64b2d2846361f76dd5d87f4ce97ec2bec07", + "448": "18173b14e0c0bf403b5f0d4aa23515ecf44622b3a860d80e866cd498f107123c", + "449": "22d7739bccf54ea1159ce6aca3e215482deba85a4db0676cf86d82a760c44a6c", + "450": "938bf7cdedab94bd7208b69047014e3d9ab7b54d1223bd649eb3de0bd61ab47e", + "451": "abd88e378f54b649e818d6e1d8e06c9f8cf225ac96b4085523acbb1f0c1df24b", + "452": "4119701c51dd8c457b74a19ed7ae3bdf069f5fd915c8085e9a15d909a39db036", + "453": "381ba093e8ece9e14efc965ee94bb8adbd1c0bf140875ef95f8f11050d5ed489", + "454": "b7613128b0401fdbc07a4015eb3935f6677b84dff936fc5e7e9f424d0ba1006e", + "455": "35ee11c9763f48a68f2b60b4b9c3919d3a895afc7071e8dcac5abd5845dfe79f", + "456": "8b129a3c7163dae86f1f43c18557296240a02bdac70ad29538eb5dce00e51f4d", + "457": "629c99f9af0e962f00b812057c0967861a9b6db9dd652233ac4b37f368d09206", + "458": "02df8a1d11130bde8af932dfc5cafe7d8e6c2fc12b82df5d222a91e4eed8e2f8", + "459": "062b225facc7a897e0e42e6b0f95deeb8b02de64267bf5cea4cb5280ccec1562", + "460": "a05f9a7cb049c40760ea2196eb41df1826ad492e6e5fc4696ce7bfcf7a842811", + "461": "95e5e99da04c0cd73e1818a62be3fc0de98c76d5cbdc81261672824ed5b8c1a7", + "462": "69eafed1b3d4022fc245a8416c1120bdcd039716db8cd43351a96e6c7d10691d", + "463": "018efbd353bb456112cf2c760b4d96aef02aa899ef74d4aadfb3dcf374a22987", + "464": "cd4447e836cdbed7f6a3998b50c4ab467aedaeb8e54c377da34245e90fddbe12", + "465": "da0612471988c89ea2fb190838f9f5e9029fd106330a801e66280c967ff1c52b", + "466": "8d16100c0148ed7bd41003b4a0612cbc5fa150ddabe5f9916ed6eac3fcfdefa4", + "467": "d6ea164cb91d14d6aba2d482926cb6cbd1a3644737a0530abac635083a97b8a4", + "468": "8d0e3f6bff322ff11d1267f1f8303a8ce1e2d796b7dc2d9eb3e3da939dd850b5", + "469": "35e2072f22c7cb980fbe797e30c25e9224328813eb81d07d3c88820492ce9a1f", + "470": "4993f275946ae0d444410821faa3ef4a448f10888c50ff59f7ae01d0b50328d9", + "471": "b9af9323a0237fbf88fdb14b8bce95c084351325249629ffd4fbb32fe9d6da5d", + "472": "5b278c08ab97d82c1779411fb1018b07feac7ddf38a69e4d398240a495c54271", + "473": "4448b03417a784f554c44eb15ad2d4cc022bd9cb5abe2547811eb8085355aaaa", + "474": "1c64fc4076d6b00aff86a180fd9af927b7c1c9ba87a2ca3c83dd80ba5e5ea973", + "475": "e571b4b8218a2961ed2b04f62f816eb18686d82b7f2693694b9c774acef4a0ff", + "476": "a6383ed918d7851ed7503921a64201a032a33c9e1cbd4e08d1233f543bd21be9", + "477": "c871da03e684e099190c4ce787a9588ae85841246ad7bcc9cb4c302d617f881d", + "478": "96d8bec6b787a7aea2da8dfa8a1226e00881afc218c211fc59da830775d55acb", + "479": "b35720df96afbd98c6a4f081ae1173fdce21d63f75f7b455f4c2b9fc0aa672c2", + "480": "2db876e9625c8638c66103ad0206c9a51b68d4c6a3222f403b195a81837856e3", + "481": "bac35824e79af403a2058b08cbc84f8e4df93a21d1766e4ea1de6414e2a8a926", + "482": "0f9797e2f3691bc7291d81d1ddd5d88cb4e10b0be555e2ebfbd3c5b12b7cd2b2", + "483": "8f3348df383ec9ee00e18d41c419370d42ca6ebf71c510690aa5435a679b7e4f", + "484": "3b3bac32669c5b66faaa42b89a2dcb4de0bb9aa0bd279d60061dbe9e7039f5dc", + "485": "25d0335a0576f974617351ef5aec889f311fc8d7cddb997862b10b2496842d4d", + "486": "93b9a59a937594d2196271416ea3b2221d32b3b40a04bbebbdf97e8bdc557e0a", + "487": "a643c75a8d062b87a1c8635fdf439c04d949ce01f75dde10ab6edba90cbaee77", + "488": "984593c12abbff5d009091cd3c1883c87efc535f760727ed12f06df0902bfa75", + "489": "926ac61244f94e10270a2d40169de025be6db342b3de7f0db33a50b07176c143", + "490": "e2c8142e501b0b0b808d2d36f5f38266f99cd3aaca7d2f70f4bba386ae1d2025", + "491": "1a1c8b472424f8057c94a9f5e0c0b673551fbe9ea4cde5ca2d90df1de76a5c76", + "492": "345a83966ead821efa2a9de93aeb0fd5bd60a8f50e162caae2447f1f4d9462bd", + "493": "ee7018d63b08bc7226d6f77c2345a87e09fc7cc87b0a003aaf3a4a3f622edffd", + "494": "3d69e540997d79f21f249d4d8f73cd75119d81bcfb8bd80782863249f0d7c62c", + "495": "b717f1088b0ce24851c30d54bc8dad9f3ae93402b91c874e385e5c699323a5e2", + "496": "fbe77ec1978ad86e73e5a3f494fa7c198fe334b511298f5a0f2d04d6a7f51d01", + "497": "a4a66d6c7c555a2997ca59a8dbab512388adf20902293a5617132a16df76d954", + "498": "d71813b8175fa2d70181d87ae8f839e79792516a1cfa99a7e6b29500c057617f", + "499": "477d5b817df8c0b6f0928d02a58fc39fde2224493cec89393bd6dc349e5235bf", + "500": "3ac8e26d4864c538936efa7c5920435107a50c01306adaee5a4aeaa2ef378f7d", + "501": "766448b05b248ac3d6e991baa3e4b2d53b02aac426bda312c2299b2b983e145e", + "502": "50218b55f5b7207438137f2b0c71e3f6d37afd76aa5b1f2106111f3432b4cef8", + "503": "1d7c24799a287d42e97dd4ccc5bbd3713ce139e6294896cc5fe2efb80a1be7ad", + "504": "9878db5eb2218b18568dc8cfa13bc8363a1c93e6a59a05cc76da0588fd54af46", + "505": "872fc20275833f09c8aaef277abfe77f67be6bd443b489e0cb8bdf9d4ca9fac7", + "506": "d66834cc7ebe58cce2ee1c02bb11ae69672d711ead6a0a58ab592339cddbf02e", + "507": "ae955394665befbbc89e2ba85b5e520cb293b8d03209b1f71d78ce2cc807a437", + "508": "3917ce4173af47bfaf8525f0917736bde3f4bee0ed5fae721c3e2fa957ab1675", + "509": "2f64571cd71f0e59006da84808abf3d3ccff9a38884321533d448b3e8e3cae05", + "510": "41ce72f4701e786427413b68fb70bd77d921c06648ca15033ce1926a9f1224cb", + "511": "c9fc787389265492e60d5503f279714d5b19760ea7b2e1a720e6fc0251fe087c", + "512": "a8af6acf3744af13cde63540e37bb9bc722ea19a012656e3a3c5bfff8292c423", + "513": "506b90816555d1083be7d211f02a5db364e5c2337fc85b1ba845c1a806689373", + "514": "e4e9536766181eda627721723bfbdbca85859a3ba92d439f58ac0009c102430c", + "515": "16daaa62fa87776bc4843d226988cc83ee846ceef7b885ab63e10789b30071ae", + "516": "44b6de4eb51dd8f762142f284b154d3153592549cdea3b94467fa95484a4f172", + "517": "bb72c6d437197a8c1f1132626b3b47adb9827f4f9b912d1069cfcc75575371b5", + "518": "ff57c1f518651af805bb4b258130c7c5b0726422c3390327217562088785b4ba", + "519": "159b59f1261b7a31d7172cdc28d9515d0731e5117cb30f34a497bc3bd0496da2", + "520": "bdfb7f17c8c841c0b61ee7f00e51f09e4c78c90f7977548b72050a7aa12dfa3f", + "521": "bd27ca9292c19160cbb0568f750b247fbb805b85f4a2316fcf2c3a35d3ae031d", + "522": "98e0ef155297aac8a4060d204614753f26f6ba5357deb78c683783dc7ae30191", + "523": "9bfc344c80d1200fe12bea3ba4cacf8d5ac9693258962f2f15f42b30ce8ef3ef", + "524": "8df22d8716d7ca6354ea42b8e522d286ff9362cfa5881f527efcf1a953ed1151", + "525": "13dc6d869fbe2c3d95f715e55f02bc3d5787874b4c88d7da1d05360afd2025fa", + "526": "dfbe442040ce9afba654773fb14f307d67ab614267d3feb6b18df03182b5b60f", + "527": "ba634833af68fcf0ca7bcb08fa699b2c5fab934eb81ecd85e7464b01bca131ca", + "528": "016f7b569dc1c3466c97754c7dcc0f76c2a32a76c22357cc521bcc330d86daf5", + "529": "4960ff863e3d21a58f9e81c3d94075cb7a4daea5fcf396812382111e462fc57f", + "530": "6a2e45fdfcad65e0ee84d206d59cbac998026d7415d16a5c0b8c55e4a7d6bb3f", + "531": "95ec72fa8c409255d43e7c8d4e957bcb9239534973187b3b4cc2557b09bdba98", + "532": "fee6802490757983c499a08831d9bdc75a9eff08700bd29e8e5c134583ee07b3", + "533": "8a056666bd75d853a12d22b8317042a3f5500cfb21f6698d90ab41e01edcf81d", + "534": "8f65c9feb935e09a04c87143d1b2c63e38f08738199ebcc2758f67ee914d8a48", + "535": "ed8970f8ef1e2374289fc735aedff90b010c311a3b80d16df6bca2d3c250fdeb", + "536": "f82635851b442ec0ee95c5c2b7377ba382aa364cc49ff4e981d509ef324bb356", + "537": "54fc97bb6f3d7c724d4e245df37111c20334972300297fe38b590354fb9dfe92", + "538": "650c7f5f382c295cf6e7fb092db6fdfff164c861bcfcfe1fb38a50268f53f50a", + "539": "0bfb3df290912d8a70dc5e1e2761151cdf2c4b75d4b37c8fdcbed7483ada85fd", + "540": "08f1b2bffa88a9d01eecb8c9da6636b5e668a5478d8876a63ec3a74d7f932205", + "541": "5e59cf440336e86b67c17ed61f7bee7e548c434f475c415294b3b652d1aec606", + "542": "1257b6a3ad900df97f5aabc1e18b9f7ddae8c7d7ad60216ae21b5b7310cbda84", + "543": "8a783bfbe11c7f7b24431a15a0eb582f6fe5f75d1d21a3d55f8d8d81ba6b411c", + "544": "ce93bedef94ffbf62ad449cb0c68e8103a0bd005563ab854daa5e470664b4d7b", + "545": "40c253003d601fd2c90908bffcd8133e77489fe247e74ec03901895318fe69de", + "546": "d40739115f18fee96817266232ff1b8845e7966778fdcc644028fe5c759469be", + "547": "fa32a8de8fdcfc551d808c5dd0ff5545a199027acd32e380959b91f3b3d04643", + "548": "72e66168068b6ffcd2988e24124c8b1dba9a5b52a383a937397575e3c1e3f031", + "549": "a23baaa745a976b4f212836beb81a0a7b42d9f2e923c2412e2c07c63ff660ceb", + "550": "f58ff320639b2c47c76ed8aba487e31da0fd4656c3be6e33807cd00f77456e5d", + "551": "0449ec4d6d5b2e88603e62f3ec0287ed711cff682bbdfe298a197144ab24e80b", + "552": "f125761e8a0d02b17b1dc4be40216f2791727fd4e4bc56f60ebea1925c2fbf36", + "553": "dbb93b2a6cbf972bb1f94d1f8656cd113a09a02cbc44f25737e7d75c986646e1", + "554": "dcfd1e7a4a32ff0fae296b8211b5c9e91ab81844a0308933f598c712c1bc313d", + "555": "cebbca914f917f990202f110e77285132d2a5a3ba9a7475c93e3561d8ba88ea0", + "556": "0d5518ef165979b758fcc8df9c8cf536861f376f8640541ba6112ee7610ed82e", + "557": "0547c86b57c7c8c590f6d7a5131778f5b6ab2eeccc5e819e5fd095a6d4e68b08", + "558": "e763aa1dd494e097251484381ddb057c7d79b739c3f8644b1759e786e12f5b40", + "559": "e48eea4c3b4c9d58fe02739accf31bb64dd9c31623ad4cc06c740463d664c098", + "560": "77a09dc1ea6f1ae669004b8c9429dd83ead1148c62e0d945173edac45d9000a4", + "561": "756d226727e611d4bd22aa33747da2f635eeec070906dbc3262ef29e341e2a6d", + "562": "29de450d6e440c528287b98bcb4b76fb5155ab573df4721467446114661936ed", + "563": "7703d943dbbfdccb90acad65ed7c0eb13a10034ad01809472a55eb3162b7e53b", + "564": "65712c105411e6fc0ed35b9347de8cbaea33b0c5e57cf162f48dc257dd4f05b5", + "565": "2945ef4779089c9e49a9a9f5e2a67ba7e393aa20a955ed9302da6677cb03a9cd", + "566": "95d936e1d454df2e1e7d486c43af387b39a50cb57e57c7712d967bc9ec556f41", + "567": "2abe8af9ee20c6b8ad5034bc31fc1f4f16769595d5b4fc2837db3e76a90ac405", + "568": "fdc104338866e50ae2bffc1ea19719136f639df6c25f38a8680a70e9375a9378", + "569": "25677266de2b900788dfa047cb53f5585c37b564b3a711243fad52e186ec184a", + "570": "9101edb48d98c3742ceb713de591d261b79e90481d28f83f2d2c74d7034f4b46", + "571": "c364dde8cce2080d073eb1f9666cca97ccdeba61b2bf19ca0c84987e6f8d3576", + "572": "9cc3049e9464376b95fb88d6fff4331e0e40196f92a0a9aa1c5d10dfe33079f7", + "573": "2ade73491e183608b340f312d08cfd39c10ecb581c87b873443590452580a43e", + "574": "96325b210d18a7a1d6873e00a859648c4754bd4c91c324aa812ed78bd047118b", + "575": "33942a261a9150e2b5ce2ffe5b934a81f3972cf5aa5a9414a9d5f63f6b55324b", + "576": "7fca01a835681914b5fe5014d5649b5170faf459375ccc2bf9ad71ebaa73940c", + "577": "2bdc7a0e8adacf885c6ea0f6534b935b8a9dd338c5dcff05a74c162c3e9dd531", + "578": "ce6b6de1d907c8839b84f5f3967f6af7e9a3644a0bd7dffe80cfe531de08f8ca", + "579": "03dbff2575902a3c56a64483c8e8ca38d9888f72c6a71a6236eb07b808fb24ab", + "580": "96892003c30358ed55a39e13e6159fad09ebc3916e34492b91d63832fa86f731", + "581": "4fc5533c52133e54f8b54dcbfc4555638ae809676dbfec9d1400ab032f30648d", + "582": "7ba9b154acf699c8a123df5471fd40ad556cf6fc630136c686c87b09c88ff546", + "583": "ec10ea6801eadac9ae8ead5f222e0580f419b67d2ad5cd5c8ac914dcf5cfd69f", + "584": "510001c4104c80517a13f967df6ee071f15fb7b65e97229bc91b2925cbe4e93e", + "585": "ced737da53940337c5dff81720024fbaf4cee38aed1d3514d2a75c7b1271acf8", + "586": "9ab074d1d480d718930c9abac8b616a0bc5c30846381d6d9bce1741e9bca1991", + "587": "ec3fdcd8136188e3b476270894351cdc05dc44a4df50d1c4ed727294fb89430f", + "588": "31400607f95129fcc531604b7b0478a748d2495746280dc07ff30e39cd6f4a97", + "589": "3051de9b2a7ced941140aa1074952029f532e133beb41c18bfd990f43bfbd9ae", + "590": "4af295f83800334d77a04d56be7524ff6241e3d8b2f23820c9c54580b7996086", + "591": "2ecf2c1ab8d9e5cef5224842732af17bd2259598e4363e1d46cb172dccc39022", + "592": "2e71a26370d45781f31ede0c7810c2705706ce63291a52d5cd6f060ae16aeb01", + "593": "423867f77b64f725f823204796301ae09b427190cdbb62d472bc1395507da9a2", + "594": "6c28830e35913c59000dfce4432db255f7dd34809285881f05a9e9749f5d8452", + "595": "53fc00ae32e0b0d701175ac17ac0b91e05859ae6d7f3e5bf0548dad36e3d68f9", + "596": "9ccbee33387383d458e7ffa2c9c0cb9e4f5bbe3d1b949463a98232ae67d29956", + "597": "921102754e24e8ba99480e77652d88764020202e6dcd67adddbb1660204e8e78", + "598": "430f975f490ce37df74bc346556cb2186f7a47a58d3b282ab42f35b33a812f7c", + "599": "b603988248769444a1566b058ef3660cac528086b8193efd6d0be4080b834780", + "600": "dd539cd38fade63aa0d14899c7c75ff459ab839148b15b4efacd4bdfa0408dae", + "601": "571c5ade4cd89b460b7d2568a44d1efb05e2927ec840d8ecf149dc9e0ff09734", + "602": "edef32d6c2c7193b4b30a0e2c7d3ab37e0ec21db62543f4bf78169b683792e41", + "603": "cce7491b7ddf0e3ebde191e0e57614e61602cfaa2b52be5c2d657d9ae5e1f1b1", + "604": "d08fe0e5c0fc10640043f9d645446e23fa8efbfdf29c93c87794e5b6405ff51e", + "605": "1bdd74af73e2434db6149fd8089bd294defe3cedfaaf92f532568ddc6c48e2ea", + "606": "30e44b49f18048323d1c1bf4631587df8f0dbd477ebc79b7ef860a792953d932", + "607": "2d9b6a1b4810a39471e5dae85eadf595fc108097eeda746c8925a7be057464de", + "608": "cd3fdc5ee5b6e606349b9e5775d6e632e0424d6190f632632bd7435d5622b20d", + "609": "8b86933e27e64e6840bedc8087fa31326d9527a424c63ecc61823894c81f867d", + "610": "a781fd7cb6970e8f6f679296be5bb0fe7ea62207caa7ce86635257186a5a70d9", + "611": "4a3a0b9877d68deb8d7db624ec2d7f4b1c467fe337f803a220292ac6131acc05", + "612": "6e95bb170c3a521fc7befa446cad879a36b7b3d0e0e8eab1df6ddbd753156ab7", + "613": "afe8c7002c5e15859be829b4b69f0da00c1298971d5afa469b050016fc021978", + "614": "f85495a58ad9d5c4d16167084bbc3581ea22e6dfc39423b70d7fe486e316d951", + "615": "8da9fc3356df220081c71ccfc9c67251e6dd7058fb11258ecfc88ea9b8c00c92", + "616": "0fadf4975e2c27aae12447e080505d604258102f61c8667a5c2594ee033567e8", + "617": "06d9e8723de7ffd20129f1d8b5993926a97cad1261dc0cf01a37d8fa728ee996", + "618": "04d0dc62694f26c61871d8129259540884ad2296a3cf455f6b92fc911f98c336", + "619": "a93d0ec83cbbd4ec0866c97b372e4374a9d6724cc3767f5230e8316734cbb0eb", + "620": "071da5dc1dd87c2558b45247c29a92092bc5a00ef3cd46d70d08e18b791d2926", + "621": "458ca388a6b74c57ae13d1233984d5b66abb1f18dbfa12aa14ba868a9b5a708d", + "622": "1ad0227dc5f8c259ada5120d9db05ac7a013bd1bd84cbbca2f0ae6b174dac129", + "623": "d82d0401e10767b022417dbb64d348bc6c03ed4bb7e4553493e8d9e65525d229", + "624": "1d25005c86a9635d3483ea63ce95fa097f95792ebab86319c12bc66ea1d2ac83", + "625": "3fc397ed884cabc16bf30bb7487c8211424a08279a166d4fa4da6dc151a02cd1", + "626": "7c42e09e504cb269512dae989ee7fffe1f3bfea499c990e8edea796761331ccb", + "627": "5062b75aa39c974a579b0a3360c4da32e481d2242de72106f651c7d7de631cf1", + "628": "dc656eef13928f18d14a9265be6a923bc7d76048b861cdf1523e397801a8ef52", + "629": "9eefedc5b5995658be337f48146e37020db4ed3bb61e2af1fc57f698bd398b0d", + "630": "6e17ecc4a4d07ffbd67c49a59d31b7efeabd3bfead49fdf1ec005836e6030ebf", + "631": "781372694518c122f62566aec8867772e492fefef32c00e24b5604297dc1d44c", + "632": "c978055ae1d71dfdfd8bb4e845bb82fc4211b14560bf6001edefa4367e1d4403", + "633": "af4ff4b546369974642b3f68d4d3e90f0a0496b3b5d1572b638378fb49c7b4fa", + "634": "f6af89331ee087a2fc03e0bddd738e2716b49ed616ceb3b47743cf3806c6d8c2", + "635": "e4251ea6989571d8b83993560b537b7a9d0777ba54e6941757580cbfc14aab5f", + "636": "dc15b5ccabd8fd3141c244b7dbc6fe95078299ea3ce3016cbb483893fcdd4236", + "637": "053571be83ed06ab23a96d4e8fa129a4ce7e740de17dc35b000fb56c35a5ab80", + "638": "df57e1f418a24e38b39011048084c6b5cc91a56c1deb643ab605e0350f329b4b", + "639": "56930902baea90d1a8e505a227e5d7ac4da6b60f6c370ab75a0011cb3746818f", + "640": "c105d171242fa8e35f26491ba2f932d1577dfab2a4a6e75034ae69f062e8aa71", + "641": "0f6c3873a87ce630accf7f3b19feb764aac3fa0c3933042a817a82e6a9963aea", + "642": "c20081830b70a00d1bcc6f4b6572d511d534986c10ea3c057db304a1f26df2da", + "643": "143de023a92c7c8ce5fc0b839644e897267c44c8ba4e715743dc99686415a8b5", + "644": "7a8c9f1db1b1bce9a3b8d91e5b1a39a92a478029d975f5e45d593b7ca81a7134", + "645": "932a51e4c0cc5e30041ca5db1fd0674820638563a9df1300bece7df12c23017e", + "646": "a49cbd2966ea8248816b0a53b6eedea4aef2525aac45272b862d7d52e604625a", + "647": "40ad98735f3b5417ea1916f6146b69b7659963263caed186abf0790de0d9dae9", + "648": "53b288529a83c376f2399e986e5ca25c5993a6640063386fdb2de491afba2e81", + "649": "c0bebda0473186148087feb9828a418ab8d50726a1ff5c39ec69c4a6232c6b67", + "650": "98ac68d2bc42f89dbe97b3392ac691ed6c2c4f36a44665555bf7f816ca97cd27", + "651": "81f8287532f504b4f4a21e6d6ed573845bff197c479fb52e4c5b6f2fc1cfc40f", + "652": "fa32d8e7c1c766a6126b0f1cdd9d752cad55f54d0c05839e89d4da238615d9ed", + "653": "311cec39a42837f803ce8cfa5e6df32cc27fe541de108e3e7cf7ba3242e414ee", + "654": "f2b3d205c2da66cdf9a596e2caa1098132b832758eea2b14da071b8dd9584ec9", + "655": "39517ea688972769cccd46ad15b4f06ac2a6175d053dc97f849fa11a63a163e8", + "656": "2a21e5e89d9b019c1195c50af7c6e1864cbab05068d10e11519fb6d4766ceae5", + "657": "bbf54db41dc18753a3caa5001aae99c0c998e8a07b6e7390932054d7882498e3", + "658": "ca8e7b53b095939e5fafefa56e9b45b40c396145acf2a767f9f2430fbba75a79", + "659": "3d6c8492fbfe1c76e3f9d66485a7447489b89763623127deb6ed327a0c2a011b", + "660": "23d754ebe35981ad5de850f66bb2294a22280a8ad0b4160b1c29dfb5487505d9", + "661": "fd7eaca9690ee0384770e855ed600c96080c5c23565bfdae01c6045a87d9550a", + "662": "b93bc0a52860ee0a1fdc28adeca7b39288b1119e0f318467f0a193236e00f99c", + "663": "f73e3335b21c11b78987deb5a6eace1cef327981322f53a070adfbe31b56e7d0", + "664": "f3f0de955603850bd411690d11a5391e63f515a29e31e9241c66c62d688bcf72", + "665": "f2650e75f39098e5a114077b6e07bc15325adce22e1ab4b20569a4eeda5c6ca6", + "666": "a01a34d1c29aff5618a96046605adb74fa49b834975051d4ac82672567727a21", + "667": "2db51646a4038b38c88512738f79bb21776d39c7bfa3086538cccba0b63024db", + "668": "2f3336b7f1211fcc180cd76dc6442fecb412771aa45ef1a7675aa437d04e582b", + "669": "28bf022d827392eff1ec8ec121767ec24778f1b69da8605b4ab059023b8ad28a", + "670": "38db5dbb2a3ce2d31d1958f5b3ca4c3555eb0ac4193ebffa3f42ffd6bb4806e3", + "671": "0580cf2ef8abd3afbf91fba2032c2d51e43306bebb7f979bb750c3d7bd14c961", + "672": "394f1b74ccfac5a4fa958d813b5932371c5f8c2f3dbd1eb7202af2223aa08afb", + "673": "61c90400cd197b8ea6d7de90fcd1af0959fc37625fe163363fdae0ac4a724bfd", + "674": "6037c38f696b10fb531c26396890cd3b48d5408c5b37e61d03a72ae2f7b64ed6", + "675": "39c8b1bd1d534381b811bd8050e54b753106c1bfaf5d3cc63d8fe92a94471915", + "676": "346d1e2de9915fa2f4ce3675ccebadccb8e9d14239f1e53b6d08d09f5c26297d", + "677": "36841bba8f77d669e9d8f4e09ec580ce2c7a36c37da719815e65cc641eb1fdeb", + "678": "09532ddbaffb710f02964e270f8658bd8a09149744726a82f618708b35a5fa26", + "679": "774f8d6f89a5875342b21e8337aa5e3ab0539960a5b42554bc8d8a0fffce7d65", + "680": "48d62baa62c2a9c561612192ec39a7dbcecc8badadc0ddc755191648597a42f9", + "681": "7adc09dd86f3e73979d9f8a4b3232102ca52bc41d043614fe989cd908ed88c76", + "682": "522f0ff3ae2f1761dca78207dec1c9b52556eba2db7503ca03441abf62f65c76", + "683": "376e3c3e4b88ee76cb0f02390751a7248fcf1562013b1390b1e276a3f3d7da63", + "684": "6363f306f081683781908acd4bedd92b3a75c796243cdacadc4b9896d8cfaaaa", + "685": "29f2c4c5325cf626b392a910e6e22b6d2a989bfbb38439c20162b7b786b5e2f8", + "686": "990ae3583a1f7a32b7581a8ace626511c812e0bd910b8064fefb31e625b9c22d", + "687": "7e78b4b91851b976f5cc2a1341b9389ae7bdd0248ae7f7c53e7ebb2d86bbc73c", + "688": "1ada92e769892b4bb540d75cbf40017a24b5b309b28a097ed62eb7f2727518e7", + "689": "17a0ba5b100d0a92f3f82e2e6f31c71a6ca53a0f043094a6419331e22036150b", + "690": "f9658a8f0687d69f420f655c500304c3c0888f298a68075ab6a2165a3bc47c53", + "691": "3ff8aa53eb2f7e700fdc7cb838ca7f7b495948bb997ef70d196c10592fa64680", + "692": "c01c3e579b2743866cd3d0c1d9039871356143a99c572593d2702f387e9f629f", + "693": "c08e2dd3686459c2989cd6a367d2cc64b2bc2af460417102e9856e91b5f78fa4", + "694": "063e59bfd9cbed08afa508954ac9c1c313b80331d6a917fd2202e15e1eeb00e9", + "695": "c3259eeed96a5837a6630fd9d1245de7c77e10d0733b6129a3dc99548bd92800", + "696": "9ab20a4d8c3c0de897a1c8afa95733d0f7f79870c6379064ef4cf1f5baae67e6", + "697": "62c07adf4da24a20a723f6c32e35a51f2b942e363dc9fa35070e34991a5a9c1d", + "698": "632f1a4eba12f5c80401d82c4bad7c5679f55ccc89bf2da3e3930ff3d6671ba1", + "699": "8c40c5c92fad7ed2774080ddd39f62cdc94ca05dde4273344497ab4206499484", + "700": "3dccee8e873d2c9c2f8359417e666b702f97b60b90b229e3c41190909ff9388b", + "701": "65a57fc7ebcdab77821276a1eba1c1a625bf2bae575b025359de492592ded205", + "702": "c1b0ade78aadbf0d5576489c2200439ef825fe74452115edbc908e9ff955efc0", + "703": "1e5ea7fffdcdbca5fc91694b200db8e2e3737e829b7694e4dcf3b937b41be330", + "704": "9ddf38880f294ac1a759c764c394cacd4635735880f326a0b5e4a896e4fdce8c", + "705": "2bb033d9eeb9157fc6ae835e99b9523bfb1d61173cfb34941cbfdc4c0d3ea67e", + "706": "51a0e8daacbd6537efd583c48c5815a9bd22fef0eb9b8e15dbe2ee87c76e2a6b", + "707": "9f50d3b52dc4ebae279c6f6021258ca8cd60b8cd13e358f29a2879caa390a774", + "708": "42e0a9be7737aaab1fd27543c0273f4c97dd3bd6471e6ec04b1fc7b79542db71", + "709": "ac2605c16873ea2b5f0ce5008089a55e37588f45313ad06ccc7dfd96f407eb8a", + "710": "09214942caed4184e7155b4016b1e0de37c0a142deaebee3879c770438a28276", + "711": "8d8ea19a78bcb10e502f91a057bac1b200ab17db66e11cdf42b63ec65a8e6c18", + "712": "001493340cc232a48125f958308be6d0567ff2684e0625e55af8b0a024c4ccca", + "713": "98a124df4ffa11cca86fbd959f4d091665fc871a4a86cc1024429d1c116b556e", + "714": "cd175b00873a9a3369c628861c1f20df57a4ca75074530ebf5b974d04b8b93c4", + "715": "cdb954d8620ad2d95915f94243cdcf71170cfc363334b2f831544f55f0d15746", + "716": "abb62293fb9df9bc7a6e80ea24f0da1049f894ade937367e24563a3277f953ef", + "717": "319369720bf1831be4c73600c26f5d08dcf6cf85fd32340c28263e39c1dda5e6", + "718": "412ce061b1ae228d2226fdb3bf2cb68421870465d6a8cf7ae58515c02fe54684", + "719": "c461587d4f3a41c375628e94fb9f971cc2829b8608d3c7aca840e62a6c8f1929", + "720": "3651d0d1f023c90e42be5c6ccf28ca71203d1c67d85249323d35db28f146786f", + "721": "8430fc43038ba44efb6e9ecbd5aa3dfeaeaf73f2d04a2d5596855c7de5de9c20", + "722": "9687101dfe209fd65f57a10603baa38ba83c9152e43a8b802b96f1e07f568e0e", + "723": "74832787e7d4e0cb7991256c8f6d02775dffec0684de234786f25f898003f2de", + "724": "fa05e2b497e7eafa64574017a4c45aadef6b163d907b03d63ba3f4021096d329", + "725": "005c873563f51bbebfdb1f8dbc383259e9a98e506bc87ae8d8c9044b81fc6418" } diff --git a/scripts/validate_solutions.py b/scripts/validate_solutions.py index 364c71e2fb1c..68461dca6710 100755 --- a/scripts/validate_solutions.py +++ b/scripts/validate_solutions.py @@ -1,10 +1,10 @@ #!/usr/bin/env python3 +import hashlib import importlib.util import json import os import pathlib from types import ModuleType -from typing import Dict, List import pytest import requests @@ -15,7 +15,7 @@ ) with open(PROJECT_EULER_ANSWERS_PATH) as file_handle: - PROBLEM_ANSWERS: Dict[str, str] = json.load(file_handle) + PROBLEM_ANSWERS: dict[str, str] = json.load(file_handle) def convert_path_to_module(file_path: pathlib.Path) -> ModuleType: @@ -26,7 +26,7 @@ def convert_path_to_module(file_path: pathlib.Path) -> ModuleType: return module -def all_solution_file_paths() -> List[pathlib.Path]: +def all_solution_file_paths() -> list[pathlib.Path]: """Collects all the solution file path in the Project Euler directory""" solution_file_paths = [] for problem_dir_path in PROJECT_EULER_DIR_PATH.iterdir(): @@ -46,7 +46,7 @@ def get_files_url() -> str: return event["pull_request"]["url"] + "/files" -def added_solution_file_path() -> List[pathlib.Path]: +def added_solution_file_path() -> list[pathlib.Path]: """Collects only the solution file path which got added in the current pull request. @@ -70,7 +70,7 @@ def added_solution_file_path() -> List[pathlib.Path]: return solution_file_paths -def collect_solution_file_paths() -> List[pathlib.Path]: +def collect_solution_file_paths() -> list[pathlib.Path]: if os.environ.get("CI") and os.environ.get("GITHUB_EVENT_NAME") == "pull_request": # Return only if there are any, otherwise default to all solutions if filepaths := added_solution_file_path(): @@ -90,4 +90,7 @@ def test_project_euler(solution_path: pathlib.Path) -> None: expected: str = PROBLEM_ANSWERS[problem_number] solution_module = convert_path_to_module(solution_path) answer = str(solution_module.solution()) # type: ignore - assert answer == expected, f"Expected {expected} but got {answer}" + answer = hashlib.sha256(answer.encode()).hexdigest() + assert ( + answer == expected + ), f"Expected solution to {problem_number} to have hash {expected}, got {answer}" From 4ab39c20019b1fe41c1b24d2bc667fb852aa62a3 Mon Sep 17 00:00:00 2001 From: The Data Lady Date: Mon, 17 May 2021 04:36:14 -0700 Subject: [PATCH 0135/1543] Removed keras dependency from requirements.txt (#4374) --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 349d88944656..76eb109ee3b9 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,6 +1,6 @@ beautifulsoup4 fake_useragent -keras; python_version < '3.9' +keras lxml matplotlib numpy From 002c545aee474626a6ffdaf05af2d431645e14f8 Mon Sep 17 00:00:00 2001 From: onlinejudge95 <44158581+onlinejudge95@users.noreply.github.com> Date: Mon, 17 May 2021 17:28:51 +0530 Subject: [PATCH 0136/1543] Removes python_version condition on tensorflow (#4435) --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 76eb109ee3b9..8bbb8d524ed4 100644 --- a/requirements.txt +++ b/requirements.txt @@ -13,5 +13,5 @@ scikit-fuzzy sklearn statsmodels sympy -tensorflow; python_version < '3.9' +tensorflow xgboost From 7d7c7972aeba412c14384fa1a6c9009c1260b1ab Mon Sep 17 00:00:00 2001 From: Jenil Shah <60750701+Jenil-S@users.noreply.github.com> Date: Mon, 17 May 2021 17:58:04 +0530 Subject: [PATCH 0137/1543] Updated name from lstm_prediction.py_tf to lstm_prediction.py and also imported keras (#4422) * Updated name from lstm_prediction.py_lf to lstm_prediction.py and also imported keras * Edited the changes * tensorflow 2.5 is has shipped!!! * Update lstm_prediction.py * Update lstm_prediction.py * One blank line, not two? Co-authored-by: Christian Clauss --- .../lstm/{lstm_prediction.py_tf => lstm_prediction.py} | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) rename machine_learning/lstm/{lstm_prediction.py_tf => lstm_prediction.py} (95%) diff --git a/machine_learning/lstm/lstm_prediction.py_tf b/machine_learning/lstm/lstm_prediction.py similarity index 95% rename from machine_learning/lstm/lstm_prediction.py_tf rename to machine_learning/lstm/lstm_prediction.py index 5452f0443f62..6fd3cf29131d 100644 --- a/machine_learning/lstm/lstm_prediction.py_tf +++ b/machine_learning/lstm/lstm_prediction.py @@ -6,9 +6,9 @@ """ import numpy as np import pandas as pd -from keras.layers import LSTM, Dense -from keras.models import Sequential from sklearn.preprocessing import MinMaxScaler +from tensorflow.keras.layers import LSTM, Dense +from tensorflow.keras.models import Sequential if __name__ == "__main__": """ From 8d173438c38cb7d92f6daf2c0e3405067bf4166f Mon Sep 17 00:00:00 2001 From: Cere Blanco <743526+cereblanco@users.noreply.github.com> Date: Tue, 18 May 2021 22:54:34 +0800 Subject: [PATCH 0138/1543] Bit manipulation: get the bit at a given position (#4438) --- .../single_bit_manipulation_operations.py | 20 +++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/bit_manipulation/single_bit_manipulation_operations.py b/bit_manipulation/single_bit_manipulation_operations.py index e4a54028d9ee..b43ff07b776f 100644 --- a/bit_manipulation/single_bit_manipulation_operations.py +++ b/bit_manipulation/single_bit_manipulation_operations.py @@ -74,6 +74,26 @@ def is_bit_set(number: int, position: int) -> bool: return ((number >> position) & 1) == 1 +def get_bit(number: int, position: int) -> int: + """ + Get the bit at the given position + + Details: perform bitwise and for the given number and X, + Where X is a number with all the bits – zeroes and bit on given position – one. + If the result is not equal to 0, then the bit on the given position is 1, else 0. + + >>> get_bit(0b1010, 0) + 0 + >>> get_bit(0b1010, 1) + 1 + >>> get_bit(0b1010, 2) + 0 + >>> get_bit(0b1010, 3) + 1 + """ + return int((number & (1 << position)) != 0) + + if __name__ == "__main__": import doctest From 368ce7aecc2acd5d3b472dc9141d160d98164353 Mon Sep 17 00:00:00 2001 From: TANMAY SRIVASTAVA <77936821+ktsrivastava29@users.noreply.github.com> Date: Thu, 20 May 2021 13:58:00 +0530 Subject: [PATCH 0139/1543] Added a hex-bin.py file in conversion.py (#4433) * Added a file that converts hexa to binary * Added file to convert hexadecimal to binary * Update hex-bin.py * added type hint in the code * Added doctest * Added code to handle exception * Resolved doctest issue * Update hex-bin.py * Modified convert function * Added WhiteSpace around operators. * Made more pythonic * removed whitespace * Updated doctest command * Removed whitespace * imported union * Replaced flag with is_negative * updated return type * removed pip command * Resolved doctest issue * Resolved doctest error * Reformated the code * Changes function name * Changed exception handling statements * Update and rename hex-bin.py to hex_to_bin.py * Update newton_method.py * Update matrix_operation.py * Update can_string_be_rearranged_as_palindrome.py * Update hex_to_bin.py Co-authored-by: Christian Clauss --- conversions/hex_to_bin.py | 56 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 56 insertions(+) create mode 100644 conversions/hex_to_bin.py diff --git a/conversions/hex_to_bin.py b/conversions/hex_to_bin.py new file mode 100644 index 000000000000..e358d810b581 --- /dev/null +++ b/conversions/hex_to_bin.py @@ -0,0 +1,56 @@ +def hex_to_bin(hex_num: str) -> int: + """ + Convert a hexadecimal value to its binary equivalent + #https://stackoverflow.com/questions/1425493/convert-hex-to-binary + Here, we have used the bitwise right shift operator: >> + Shifts the bits of the number to the right and fills 0 on voids left as a result. + Similar effect as of dividing the number with some power of two. + Example: + a = 10 + a >> 1 = 5 + + >>> hex_to_bin("AC") + 10101100 + >>> hex_to_bin("9A4") + 100110100100 + >>> hex_to_bin(" 12f ") + 100101111 + >>> hex_to_bin("FfFf") + 1111111111111111 + >>> hex_to_bin("-fFfF") + -1111111111111111 + >>> hex_to_bin("F-f") + Traceback (most recent call last): + ... + ValueError: Invalid value was passed to the function + >>> hex_to_bin("") + Traceback (most recent call last): + ... + ValueError: No value was passed to the function + """ + + hex_num = hex_num.strip() + if not hex_num: + raise ValueError("No value was passed to the function") + + is_negative = hex_num[0] == "-" + if is_negative: + hex_num = hex_num[1:] + + try: + int_num = int(hex_num, 16) + except ValueError: + raise ValueError("Invalid value was passed to the function") + + bin_str = "" + while int_num > 0: + bin_str = str(int_num % 2) + bin_str + int_num >>= 1 + + return int(("-" + bin_str) if is_negative else bin_str) + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From b11e5314b76c66709f1c2620e43827e56ea07ef5 Mon Sep 17 00:00:00 2001 From: Tobias <38182275+Txbias@users.noreply.github.com> Date: Thu, 20 May 2021 19:15:51 +0000 Subject: [PATCH 0140/1543] Added implementation for MSD radix sort algorithm based on binary representation (#4441) * Added MSD radix sort algorithm * Fixed typos * Added doctests * Added link to wikipedia * Added doctest and improved code --- sorts/msd_radix_sort.py | 80 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 80 insertions(+) create mode 100644 sorts/msd_radix_sort.py diff --git a/sorts/msd_radix_sort.py b/sorts/msd_radix_sort.py new file mode 100644 index 000000000000..ee152bbc696a --- /dev/null +++ b/sorts/msd_radix_sort.py @@ -0,0 +1,80 @@ +""" +Python implementation of the MSD radix sort algorithm. +It used the binary representation of the integers to sort +them. +https://en.wikipedia.org/wiki/Radix_sort +""" +from typing import List + + +def msd_radix_sort(list_of_ints: List[int]) -> List[int]: + """ + Implementation of the MSD radix sort algorithm. Only works + with positive integers + :param list_of_ints: A list of integers + :return: Returns the sorted list + >>> msd_radix_sort([40, 12, 1, 100, 4]) + [1, 4, 12, 40, 100] + >>> msd_radix_sort([]) + [] + >>> msd_radix_sort([123, 345, 123, 80]) + [80, 123, 123, 345] + >>> msd_radix_sort([1209, 834598, 1, 540402, 45]) + [1, 45, 1209, 540402, 834598] + >>> msd_radix_sort([-1, 34, 45]) + Traceback (most recent call last): + ... + ValueError: All numbers must be positive + """ + if not list_of_ints: + return [] + + if min(list_of_ints) < 0: + raise ValueError("All numbers must be positive") + + most_bits = max(len(bin(x)[2:]) for x in list_of_ints) + return _msd_radix_sort(list_of_ints, most_bits) + + +def _msd_radix_sort(list_of_ints: List[int], bit_position: int) -> List[int]: + """ + Sort the given list based on the bit at bit_position. Numbers with a + 0 at that position will be at the start of the list, numbers with a + 1 at the end. + :param list_of_ints: A list of integers + :param bit_position: the position of the bit that gets compared + :return: Returns a partially sorted list + >>> _msd_radix_sort([45, 2, 32], 1) + [2, 32, 45] + >>> _msd_radix_sort([10, 4, 12], 2) + [4, 12, 10] + """ + if bit_position == 0 or len(list_of_ints) in [0, 1]: + return list_of_ints + + zeros = list() + ones = list() + # Split numbers based on bit at bit_position from the right + for number in list_of_ints: + if (number >> (bit_position - 1)) & 1: + # number has a one at bit bit_position + ones.append(number) + else: + # number has a zero at bit bit_position + zeros.append(number) + + # recursively split both lists further + zeros = _msd_radix_sort(zeros, bit_position - 1) + ones = _msd_radix_sort(ones, bit_position - 1) + + # recombine lists + res = zeros + res.extend(ones) + + return res + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From 92836d57f6efa8b619d71ac5824189d74b93876f Mon Sep 17 00:00:00 2001 From: Dhruv Manilawala Date: Fri, 21 May 2021 14:31:56 +0530 Subject: [PATCH 0141/1543] feat: action to approve workflow run (#4444) --- .github/workflows/approve_workflow_run.yml | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) create mode 100644 .github/workflows/approve_workflow_run.yml diff --git a/.github/workflows/approve_workflow_run.yml b/.github/workflows/approve_workflow_run.yml new file mode 100644 index 000000000000..ff12d892532f --- /dev/null +++ b/.github/workflows/approve_workflow_run.yml @@ -0,0 +1,21 @@ +# https://docs.github.com/en/rest/reference/actions#approve-a-workflow-run-for-a-fork-pull-request + +name: Approve Workflow Run + +on: + workflow_run: + types: + - completed + +jobs: + approve: + runs-on: ubuntu-latest + if: ${{ github.event.workflow_run.conclusion == "action_required" }} + steps: + - name: Automatically approve a workflow run + run: | + curl \ + --request POST \ + --header "Accept: application/vnd.github.v3+json" \ + --header "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ + --url "https://api.github.com/repos/${{ github.repository }}/actions/runs/${{ github.event.workflow_run.id }}/approve" From ac29f707555baaf0c38963312b291cd9a210b535 Mon Sep 17 00:00:00 2001 From: Dhruv Manilawala Date: Fri, 21 May 2021 19:03:56 +0530 Subject: [PATCH 0142/1543] fix(action): correct indentation for types key (#4445) * fix(action): correct indentation for types key * updating DIRECTORY.md * refactor: add quotes around name key Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- .github/workflows/approve_workflow_run.yml | 8 ++++---- DIRECTORY.md | 4 ++++ 2 files changed, 8 insertions(+), 4 deletions(-) diff --git a/.github/workflows/approve_workflow_run.yml b/.github/workflows/approve_workflow_run.yml index ff12d892532f..ff6ebd99653e 100644 --- a/.github/workflows/approve_workflow_run.yml +++ b/.github/workflows/approve_workflow_run.yml @@ -1,18 +1,18 @@ # https://docs.github.com/en/rest/reference/actions#approve-a-workflow-run-for-a-fork-pull-request -name: Approve Workflow Run +name: "Approve Workflow Run" on: workflow_run: - types: - - completed + types: + - completed jobs: approve: runs-on: ubuntu-latest if: ${{ github.event.workflow_run.conclusion == "action_required" }} steps: - - name: Automatically approve a workflow run + - name: "Automatically approve a workflow run" run: | curl \ --request POST \ diff --git a/DIRECTORY.md b/DIRECTORY.md index 26929255d1a0..59365e047061 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -108,6 +108,7 @@ * [Decimal To Binary Recursion](https://github.com/TheAlgorithms/Python/blob/master/conversions/decimal_to_binary_recursion.py) * [Decimal To Hexadecimal](https://github.com/TheAlgorithms/Python/blob/master/conversions/decimal_to_hexadecimal.py) * [Decimal To Octal](https://github.com/TheAlgorithms/Python/blob/master/conversions/decimal_to_octal.py) + * [Hex To Bin](https://github.com/TheAlgorithms/Python/blob/master/conversions/hex_to_bin.py) * [Hexadecimal To Decimal](https://github.com/TheAlgorithms/Python/blob/master/conversions/hexadecimal_to_decimal.py) * [Molecular Chemistry](https://github.com/TheAlgorithms/Python/blob/master/conversions/molecular_chemistry.py) * [Octal To Decimal](https://github.com/TheAlgorithms/Python/blob/master/conversions/octal_to_decimal.py) @@ -382,6 +383,8 @@ * [Linear Discriminant Analysis](https://github.com/TheAlgorithms/Python/blob/master/machine_learning/linear_discriminant_analysis.py) * [Linear Regression](https://github.com/TheAlgorithms/Python/blob/master/machine_learning/linear_regression.py) * [Logistic Regression](https://github.com/TheAlgorithms/Python/blob/master/machine_learning/logistic_regression.py) + * Lstm + * [Lstm Prediction](https://github.com/TheAlgorithms/Python/blob/master/machine_learning/lstm/lstm_prediction.py) * [Multilayer Perceptron Classifier](https://github.com/TheAlgorithms/Python/blob/master/machine_learning/multilayer_perceptron_classifier.py) * [Polymonial Regression](https://github.com/TheAlgorithms/Python/blob/master/machine_learning/polymonial_regression.py) * [Random Forest Classifier](https://github.com/TheAlgorithms/Python/blob/master/machine_learning/random_forest_classifier.py) @@ -855,6 +858,7 @@ * [Iterative Merge Sort](https://github.com/TheAlgorithms/Python/blob/master/sorts/iterative_merge_sort.py) * [Merge Insertion Sort](https://github.com/TheAlgorithms/Python/blob/master/sorts/merge_insertion_sort.py) * [Merge Sort](https://github.com/TheAlgorithms/Python/blob/master/sorts/merge_sort.py) + * [Msd Radix Sort](https://github.com/TheAlgorithms/Python/blob/master/sorts/msd_radix_sort.py) * [Natural Sort](https://github.com/TheAlgorithms/Python/blob/master/sorts/natural_sort.py) * [Odd Even Sort](https://github.com/TheAlgorithms/Python/blob/master/sorts/odd_even_sort.py) * [Odd Even Transposition Parallel](https://github.com/TheAlgorithms/Python/blob/master/sorts/odd_even_transposition_parallel.py) From 32e9072627bd39e5937bd8b9003a8db956a4cdaa Mon Sep 17 00:00:00 2001 From: Dhruv Manilawala Date: Fri, 21 May 2021 19:25:59 +0530 Subject: [PATCH 0143/1543] fix(action): testing and fixing errors (#4446) * fix(action): testing and fixing errors * fix: testing if all is a valid entry for workflows * fix: more events to trigger workflow tests * fix: double quotes -> single quotes * fix: add workflows name to the list * revert: remove added events This reverts commit 3daeeb2ba34b8a9cde93fce2cac682378aea5e9a --- .github/workflows/approve_workflow_run.yml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.github/workflows/approve_workflow_run.yml b/.github/workflows/approve_workflow_run.yml index ff6ebd99653e..7c5e64452d5f 100644 --- a/.github/workflows/approve_workflow_run.yml +++ b/.github/workflows/approve_workflow_run.yml @@ -4,13 +4,14 @@ name: "Approve Workflow Run" on: workflow_run: + workflows: ['build', 'project_euler', 'pre-commit', 'directory_writer'] types: - completed jobs: approve: runs-on: ubuntu-latest - if: ${{ github.event.workflow_run.conclusion == "action_required" }} + if: ${{ github.event.workflow_run.conclusion == 'action_required' }} steps: - name: "Automatically approve a workflow run" run: | From b913a0d83aaf2328108c2fb8b47c5d253116cef3 Mon Sep 17 00:00:00 2001 From: Tobias <38182275+Txbias@users.noreply.github.com> Date: Mon, 24 May 2021 20:36:57 +0000 Subject: [PATCH 0144/1543] Implemented MSD radix sort algorithm in-place (#4449) * Implemented MSD radix sort algorithm inplace * Fixed formatting --- sorts/msd_radix_sort.py | 80 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 80 insertions(+) diff --git a/sorts/msd_radix_sort.py b/sorts/msd_radix_sort.py index ee152bbc696a..4c3cea30ef68 100644 --- a/sorts/msd_radix_sort.py +++ b/sorts/msd_radix_sort.py @@ -74,6 +74,86 @@ def _msd_radix_sort(list_of_ints: List[int], bit_position: int) -> List[int]: return res +def msd_radix_sort_inplace(list_of_ints: List[int]): + """ + Inplace implementation of the MSD radix sort algorithm. + Sorts based on the binary representation of the integers. + >>> lst = [1, 345, 23, 89, 0, 3] + >>> msd_radix_sort_inplace(lst) + >>> lst == sorted(lst) + True + >>> lst = [1, 43, 0, 0, 0, 24, 3, 3] + >>> msd_radix_sort_inplace(lst) + >>> lst == sorted(lst) + True + >>> lst = [] + >>> msd_radix_sort_inplace(lst) + >>> lst == [] + True + >>> lst = [-1, 34, 23, 4, -42] + >>> msd_radix_sort_inplace(lst) + Traceback (most recent call last): + ... + ValueError: All numbers must be positive + """ + + length = len(list_of_ints) + if not list_of_ints or length == 1: + return + + if min(list_of_ints) < 0: + raise ValueError("All numbers must be positive") + + most_bits = max(len(bin(x)[2:]) for x in list_of_ints) + _msd_radix_sort_inplace(list_of_ints, most_bits, 0, length) + + +def _msd_radix_sort_inplace( + list_of_ints: List[int], bit_position: int, begin_index: int, end_index: int +): + """ + Sort the given list based on the bit at bit_position. Numbers with a + 0 at that position will be at the start of the list, numbers with a + 1 at the end. + >>> lst = [45, 2, 32, 24, 534, 2932] + >>> _msd_radix_sort_inplace(lst, 1, 0, 3) + >>> lst == [32, 2, 45, 24, 534, 2932] + True + >>> lst = [0, 2, 1, 3, 12, 10, 4, 90, 54, 2323, 756] + >>> _msd_radix_sort_inplace(lst, 2, 4, 7) + >>> lst == [0, 2, 1, 3, 12, 4, 10, 90, 54, 2323, 756] + True + """ + if bit_position == 0 or end_index - begin_index <= 1: + return + + bit_position -= 1 + + i = begin_index + j = end_index - 1 + while i <= j: + changed = False + if not ((list_of_ints[i] >> bit_position) & 1): + # found zero at the beginning + i += 1 + changed = True + if (list_of_ints[j] >> bit_position) & 1: + # found one at the end + j -= 1 + changed = True + + if changed: + continue + + list_of_ints[i], list_of_ints[j] = list_of_ints[j], list_of_ints[i] + j -= 1 + if not j == i: + i += 1 + + _msd_radix_sort_inplace(list_of_ints, bit_position, begin_index, i) + _msd_radix_sort_inplace(list_of_ints, bit_position, i, end_index) + + if __name__ == "__main__": import doctest From 650039a279f0eef4f2e267d27c6b2d5be2e18fa0 Mon Sep 17 00:00:00 2001 From: Benjamin Fein Date: Sun, 30 May 2021 11:27:42 -0400 Subject: [PATCH 0145/1543] Add a recursive merge sort algorithm that accepts an array as input. (#4462) This is a different recursive implementation of the merge sort algorithm. * Recursive Merge Sort That Accepts an Array Recursive Merge Sort That Accepts an Array * Add Wikipedia Link * Fixes naming conventions * Update sorts/recursive_mergesort_array.py Co-authored-by: Maxim R. <49735721+mrmaxguns@users.noreply.github.com> * Update sorts/recursive_mergesort_array.py Co-authored-by: Maxim R. <49735721+mrmaxguns@users.noreply.github.com> * Update sorts/recursive_mergesort_array.py Co-authored-by: Maxim R. <49735721+mrmaxguns@users.noreply.github.com> * Update sorts/recursive_mergesort_array.py Co-authored-by: Maxim R. <49735721+mrmaxguns@users.noreply.github.com> * Update sorts/recursive_mergesort_array.py Co-authored-by: Maxim R. <49735721+mrmaxguns@users.noreply.github.com> * Adds black format * Removes unused variables * Fixes variable names and adds documentation * Fixes variable names to use snake_case. * Removes double #. * Update sorts/recursive_mergesort_array.py Co-authored-by: Maxim R. <49735721+mrmaxguns@users.noreply.github.com> Co-authored-by: Maxim R. <49735721+mrmaxguns@users.noreply.github.com> Co-authored-by: Benjamin Fein --- sorts/recursive_mergesort_array.py | 64 ++++++++++++++++++++++++++++++ 1 file changed, 64 insertions(+) create mode 100644 sorts/recursive_mergesort_array.py diff --git a/sorts/recursive_mergesort_array.py b/sorts/recursive_mergesort_array.py new file mode 100644 index 000000000000..f714d02380cf --- /dev/null +++ b/sorts/recursive_mergesort_array.py @@ -0,0 +1,64 @@ +"""A merge sort which accepts an array as input and recursively +splits an array in half and sorts and combines them. +""" + +"""https://en.wikipedia.org/wiki/Merge_sort """ + + +def merge(arr: list[int]) -> list[int]: + """Return a sorted array. + >>> merge([10,9,8,7,6,5,4,3,2,1]) + [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] + >>> merge([1,2,3,4,5,6,7,8,9,10]) + [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] + >>> merge([10,22,1,2,3,9,15,23]) + [1, 2, 3, 9, 10, 15, 22, 23] + >>> merge([100]) + [100] + >>> merge([]) + [] + """ + if len(arr) > 1: + middle_length = len(arr) // 2 # Finds the middle of the array + left_array = arr[ + :middle_length + ] # Creates an array of the elements in the first half. + right_array = arr[ + middle_length: + ] # Creates an array of the elements in the second half. + left_size = len(left_array) + right_size = len(right_array) + merge(left_array) # Starts sorting the left. + merge(right_array) # Starts sorting the right + left_index = 0 # Left Counter + right_index = 0 # Right Counter + index = 0 # Position Counter + while ( + left_index < left_size and right_index < right_size + ): # Runs until the lowers size of the left and right are sorted. + if left_array[left_index] < right_array[right_index]: + arr[index] = left_array[left_index] + left_index = left_index + 1 + else: + arr[index] = right_array[right_index] + right_index = right_index + 1 + index = index + 1 + while ( + left_index < left_size + ): # Adds the left over elements in the left half of the array + arr[index] = left_array[left_index] + left_index = left_index + 1 + index = index + 1 + while ( + right_index < right_size + ): # Adds the left over elements in the right half of the array + arr[index] = right_array[right_index] + right_index = right_index + 1 + index = index + 1 + return arr + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From b3b89d9460a56d88ac4ba528363bbdcc55bb3fa6 Mon Sep 17 00:00:00 2001 From: Grigoriy Hanin <43445998+haningrisha@users.noreply.github.com> Date: Mon, 31 May 2021 02:41:07 +0300 Subject: [PATCH 0146/1543] Armstrong number definition fix (#4466) This is a documentation fix. This fix patches a mistake in the description of the Armstrong number. * * Doc fix * Armstrong number is not the sum of cube's of number digits, but the sum of number's digits each raised to the power of the number of digits * Update armstrong_numbers.py Line shorten --- maths/armstrong_numbers.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/maths/armstrong_numbers.py b/maths/armstrong_numbers.py index d30ed2e430a0..af25688dbacc 100644 --- a/maths/armstrong_numbers.py +++ b/maths/armstrong_numbers.py @@ -1,5 +1,6 @@ """ -An Armstrong number is equal to the sum of the cubes of its digits. +An Armstrong number is equal to the sum of its own digits each raised +to the power of the number of digits. For example, 370 is an Armstrong number because 3*3*3 + 7*7*7 + 0*0*0 = 370. An Armstrong number is often called Narcissistic number. """ From 04f156a8973d6156a4357e0717d9eb0aa264d086 Mon Sep 17 00:00:00 2001 From: Vivian Dai <38384400+vivian-dai@users.noreply.github.com> Date: Mon, 31 May 2021 04:56:11 -0400 Subject: [PATCH 0147/1543] markdown consistency (#4461) * markdown consistency * Swap ** for __ Co-authored-by: Christian Clauss --- CONTRIBUTING.md | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 76ee1312f345..d93b5db67fe8 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -2,7 +2,7 @@ ## Before contributing -Welcome to [TheAlgorithms/Python](https://github.com/TheAlgorithms/Python)! Before sending your pull requests, make sure that you **read the whole guidelines**. If you have any doubt on the contributing guide, please feel free to [state it clearly in an issue](https://github.com/TheAlgorithms/Python/issues/new) or ask the community in [Gitter](https://gitter.im/TheAlgorithms). +Welcome to [TheAlgorithms/Python](https://github.com/TheAlgorithms/Python)! Before sending your pull requests, make sure that you __read the whole guidelines__. If you have any doubt on the contributing guide, please feel free to [state it clearly in an issue](https://github.com/TheAlgorithms/Python/issues/new) or ask the community in [Gitter](https://gitter.im/TheAlgorithms). ## Contributing @@ -15,9 +15,9 @@ We are very happy that you consider implementing algorithms and data structure f - Your work will be distributed under [MIT License](LICENSE.md) once your pull request is merged - You submitted work fulfils or mostly fulfils our styles and standards -**New implementation** is welcome! For example, new solutions for a problem, different representations for a graph data structure or algorithm designs with different complexity but **identical implementation** of an existing implementation is not allowed. Please check whether the solution is already implemented or not before submitting your pull request. +__New implementation__ is welcome! For example, new solutions for a problem, different representations for a graph data structure or algorithm designs with different complexity but __identical implementation__ of an existing implementation is not allowed. Please check whether the solution is already implemented or not before submitting your pull request. -**Improving comments** and **writing proper tests** are also highly welcome. +__Improving comments__ and __writing proper tests__ are also highly welcome. ### Contribution @@ -33,7 +33,7 @@ An Algorithm is one or more functions (or classes) that: * take one or more inputs, * perform some internal calculations or data manipulations, * return one or more outputs, -* have minimal side effects (Ex. print(), plot(), read(), write()). +* have minimal side effects (Ex. `print()`, `plot()`, `read()`, `write()`). Algorithms should be packaged in a way that would make it easy for readers to put them into larger programs. @@ -42,7 +42,7 @@ Algorithms should: * use Python naming conventions and intuitive variable names to ease comprehension * be flexible to take different input values * have Python type hints for their input parameters and return values -* raise Python exceptions (ValueError, etc.) on erroneous input values +* raise Python exceptions (`ValueError`, etc.) on erroneous input values * have docstrings with clear explanations and/or URLs to source materials * contain doctests that test both valid and erroneous input values * return all calculation results instead of printing or plotting them @@ -66,10 +66,10 @@ pre-commit run --all-files --show-diff-on-failure We want your work to be readable by others; therefore, we encourage you to note the following: -- Please write in Python 3.7+. For instance: __print()__ is a function in Python 3 so __print "Hello"__ will _not_ work but __print("Hello")__ will. +- Please write in Python 3.7+. For instance: `print()` is a function in Python 3 so `print "Hello"` will *not* work but `print("Hello")` will. - Please focus hard on naming of functions, classes, and variables. Help your reader by using __descriptive names__ that can help you to remove redundant comments. - - Single letter variable names are _old school_ so please avoid them unless their life only spans a few lines. - - Expand acronyms because __gcd()__ is hard to understand but __greatest_common_divisor()__ is not. + - Single letter variable names are *old school* so please avoid them unless their life only spans a few lines. + - Expand acronyms because `gcd()` is hard to understand but `greatest_common_divisor()` is not. - Please follow the [Python Naming Conventions](https://pep8.org/#prescriptive-naming-conventions) so variable_names and function_names should be lower_case, CONSTANTS in UPPERCASE, ClassNames should be CamelCase, etc. - We encourage the use of Python [f-strings](https://realpython.com/python-f-strings/#f-strings-a-new-and-improved-way-to-format-strings-in-python) where they make the code easier to read. @@ -81,7 +81,7 @@ We want your work to be readable by others; therefore, we encourage you to note black . ``` -- All submissions will need to pass the test __flake8 . --ignore=E203,W503 --max-line-length=88__ before they will be accepted so if possible, try this test locally on your Python file(s) before submitting your pull request. +- All submissions will need to pass the test `flake8 . --ignore=E203,W503 --max-line-length=88` before they will be accepted so if possible, try this test locally on your Python file(s) before submitting your pull request. ```bash python3 -m pip install flake8 # only required the first time @@ -134,7 +134,7 @@ We want your work to be readable by others; therefore, we encourage you to note python3 -m doctest -v my_submission.py ``` - The use of the Python builtin __input()__ function is **not** encouraged: + The use of the Python builtin `input()` function is __not__ encouraged: ```python input('Enter your input:') @@ -142,7 +142,7 @@ We want your work to be readable by others; therefore, we encourage you to note input = eval(input("Enter your input: ")) ``` - However, if your code uses __input()__ then we encourage you to gracefully deal with leading and trailing whitespace in user input by adding __.strip()__ as in: + However, if your code uses `input()` then we encourage you to gracefully deal with leading and trailing whitespace in user input by adding `.strip()` as in: ```python starting_value = int(input("Please enter a starting value: ").strip()) @@ -175,8 +175,8 @@ We want your work to be readable by others; therefore, we encourage you to note - All submissions will be tested with [__mypy__](http://www.mypy-lang.org) so we encourage to add [__Python type hints__](https://docs.python.org/3/library/typing.html) where it makes sense to do so. - Most importantly, - - **Be consistent in the use of these guidelines when submitting.** - - **Join** [Gitter](https://gitter.im/TheAlgorithms) **now!** + - __Be consistent in the use of these guidelines when submitting.__ + - __Join__ [Gitter](https://gitter.im/TheAlgorithms) __now!__ - Happy coding! Writer [@poyea](https://github.com/poyea), Jun 2019. From 71b458cefe20226c2904223265589dd788c110e0 Mon Sep 17 00:00:00 2001 From: Christian Clauss Date: Mon, 31 May 2021 17:49:09 +0200 Subject: [PATCH 0148/1543] Rename harriscorner.py to harris_corner.py (#4470) * Rename harriscorner.py to harris_corner.py * updating DIRECTORY.md * Rename meanthreshold.py to mean_threshold.py * updating DIRECTORY.md Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- DIRECTORY.md | 5 +++-- computer_vision/{harriscorner.py => harris_corner.py} | 0 computer_vision/{meanthreshold.py => mean_threshold.py} | 0 3 files changed, 3 insertions(+), 2 deletions(-) rename computer_vision/{harriscorner.py => harris_corner.py} (100%) rename computer_vision/{meanthreshold.py => mean_threshold.py} (100%) diff --git a/DIRECTORY.md b/DIRECTORY.md index 59365e047061..9905753b2d24 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -97,8 +97,8 @@ * [Peak Signal To Noise Ratio](https://github.com/TheAlgorithms/Python/blob/master/compression/peak_signal_to_noise_ratio.py) ## Computer Vision - * [Harriscorner](https://github.com/TheAlgorithms/Python/blob/master/computer_vision/harriscorner.py) - * [Meanthreshold](https://github.com/TheAlgorithms/Python/blob/master/computer_vision/meanthreshold.py) + * [Harris Corner](https://github.com/TheAlgorithms/Python/blob/master/computer_vision/harris_corner.py) + * [Mean Threshold](https://github.com/TheAlgorithms/Python/blob/master/computer_vision/mean_threshold.py) ## Conversions * [Binary To Decimal](https://github.com/TheAlgorithms/Python/blob/master/conversions/binary_to_decimal.py) @@ -874,6 +874,7 @@ * [Random Pivot Quick Sort](https://github.com/TheAlgorithms/Python/blob/master/sorts/random_pivot_quick_sort.py) * [Recursive Bubble Sort](https://github.com/TheAlgorithms/Python/blob/master/sorts/recursive_bubble_sort.py) * [Recursive Insertion Sort](https://github.com/TheAlgorithms/Python/blob/master/sorts/recursive_insertion_sort.py) + * [Recursive Mergesort Array](https://github.com/TheAlgorithms/Python/blob/master/sorts/recursive_mergesort_array.py) * [Recursive Quick Sort](https://github.com/TheAlgorithms/Python/blob/master/sorts/recursive_quick_sort.py) * [Selection Sort](https://github.com/TheAlgorithms/Python/blob/master/sorts/selection_sort.py) * [Shell Sort](https://github.com/TheAlgorithms/Python/blob/master/sorts/shell_sort.py) diff --git a/computer_vision/harriscorner.py b/computer_vision/harris_corner.py similarity index 100% rename from computer_vision/harriscorner.py rename to computer_vision/harris_corner.py diff --git a/computer_vision/meanthreshold.py b/computer_vision/mean_threshold.py similarity index 100% rename from computer_vision/meanthreshold.py rename to computer_vision/mean_threshold.py From cb0a5480a7c198a34069e4e65707c18f0ee6b7b9 Mon Sep 17 00:00:00 2001 From: Lakshay Akula Date: Mon, 31 May 2021 20:55:01 -0400 Subject: [PATCH 0149/1543] Add catalan_numbers.py (#4455) Reviewed by @mrmaxguns. This is an implementation of Catalan Numbers. --- dynamic_programming/catalan_numbers.py | 79 ++++++++++++++++++++++++++ 1 file changed, 79 insertions(+) create mode 100644 dynamic_programming/catalan_numbers.py diff --git a/dynamic_programming/catalan_numbers.py b/dynamic_programming/catalan_numbers.py new file mode 100644 index 000000000000..7b74f2763d43 --- /dev/null +++ b/dynamic_programming/catalan_numbers.py @@ -0,0 +1,79 @@ +""" +Print all the Catalan numbers from 0 to n, n being the user input. + + * The Catalan numbers are a sequence of positive integers that + * appear in many counting problems in combinatorics [1]. Such + * problems include counting [2]: + * - The number of Dyck words of length 2n + * - The number well-formed expressions with n pairs of parentheses + * (e.g., `()()` is valid but `())(` is not) + * - The number of different ways n + 1 factors can be completely + * parenthesized (e.g., for n = 2, C(n) = 2 and (ab)c and a(bc) + * are the two valid ways to parenthesize. + * - The number of full binary trees with n + 1 leaves + + * A Catalan number satisfies the following recurrence relation + * which we will use in this algorithm [1]. + * C(0) = C(1) = 1 + * C(n) = sum(C(i).C(n-i-1)), from i = 0 to n-1 + + * In addition, the n-th Catalan number can be calculated using + * the closed form formula below [1]: + * C(n) = (1 / (n + 1)) * (2n choose n) + + * Sources: + * [1] https://brilliant.org/wiki/catalan-numbers/ + * [2] https://en.wikipedia.org/wiki/Catalan_number +""" + + +def catalan_numbers(upper_limit: int) -> "list[int]": + """ + Return a list of the Catalan number sequence from 0 through `upper_limit`. + + >>> catalan_numbers(5) + [1, 1, 2, 5, 14, 42] + >>> catalan_numbers(2) + [1, 1, 2] + >>> catalan_numbers(-1) + Traceback (most recent call last): + ValueError: Limit for the Catalan sequence must be ≥ 0 + """ + if upper_limit < 0: + raise ValueError("Limit for the Catalan sequence must be ≥ 0") + + catalan_list = [0] * (upper_limit + 1) + + # Base case: C(0) = C(1) = 1 + catalan_list[0] = 1 + if upper_limit > 0: + catalan_list[1] = 1 + + # Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i + for i in range(2, upper_limit + 1): + for j in range(i): + catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1] + + return catalan_list + + +if __name__ == "__main__": + print("\n********* Catalan Numbers Using Dynamic Programming ************\n") + print("\n*** Enter -1 at any time to quit ***") + print("\nEnter the upper limit (≥ 0) for the Catalan number sequence: ", end="") + try: + while True: + N = int(input().strip()) + if N < 0: + print("\n********* Goodbye!! ************") + break + else: + print(f"The Catalan numbers from 0 through {N} are:") + print(catalan_numbers(N)) + print("Try another upper limit for the sequence: ", end="") + except (NameError, ValueError): + print("\n********* Invalid input, goodbye! ************\n") + + import doctest + + doctest.testmod() From 40e357f688c512c0c587f7455b75902c574336a7 Mon Sep 17 00:00:00 2001 From: Grigoriy Hanin <43445998+haningrisha@users.noreply.github.com> Date: Fri, 4 Jun 2021 23:16:32 +0300 Subject: [PATCH 0150/1543] Mistake in maths/average_mode.py fixed. (#4464) A serious bug was addressed with this pull request. The mode function previously didn't return the mode of the input list. Instead, it always returned the first value. Due to lacking tests, the bug was never caught. This pull request also adds new functionality to the function, allowing support for more than one mode. See #4464 for details. * * Mistake in average_mode.py fixed. The previous solution was to returnthe value on the first loop iteration, which is not correct, more than that it used to delete repeating values, so result's array and check array lost relation between each other * Type hint added * redundant check_list deleted Co-authored-by: Maxim R. <49735721+mrmaxguns@users.noreply.github.com> * Suggestions resolved * output typing changed to Any * test cases added * Black done File formatted * Unused statistics import statistics only used in doctest, now they are imported in doctest * Several modes support added Several modes support added * Comment fix * Update maths/average_mode.py Co-authored-by: Maxim R. <49735721+mrmaxguns@users.noreply.github.com> * Suggestions added Co-authored-by: Maxim R. <49735721+mrmaxguns@users.noreply.github.com> --- maths/average_mode.py | 40 +++++++++++++++++++++++----------------- 1 file changed, 23 insertions(+), 17 deletions(-) diff --git a/maths/average_mode.py b/maths/average_mode.py index d472dc04d4bf..83db820072bf 100644 --- a/maths/average_mode.py +++ b/maths/average_mode.py @@ -1,7 +1,4 @@ -import statistics - - -def mode(input_list): # Defining function "mode." +def mode(input_list: list) -> list: # Defining function "mode." """This function returns the mode(Mode as in the measures of central tendency) of the input data. @@ -9,23 +6,32 @@ def mode(input_list): # Defining function "mode." >>> input_list = [2, 3, 4, 5, 3, 4, 2, 5, 2, 2, 4, 2, 2, 2] >>> mode(input_list) - 2 - >>> input_list = [2, 3, 4, 5, 3, 4, 2, 5, 2, 2, 4, 2, 2, 2] - >>> mode(input_list) == statistics.mode(input_list) - True + [2] + >>> input_list = [3, 4, 5, 3, 4, 2, 5, 2, 2, 4, 4, 2, 2, 2] + >>> mode(input_list) + [2] + >>> input_list = [3, 4, 5, 3, 4, 2, 5, 2, 2, 4, 4, 4, 2, 2, 4, 2] + >>> mode(input_list) + [2, 4] + >>> input_list = ["x", "y", "y", "z"] + >>> mode(input_list) + ['y'] + >>> input_list = ["x", "x" , "y", "y", "z"] + >>> mode(input_list) + ['x', 'y'] """ - # Copying input_list to check with the index number later. - check_list = input_list.copy() result = list() # Empty list to store the counts of elements in input_list for x in input_list: result.append(input_list.count(x)) - input_list.remove(x) - y = max(result) # Gets the maximum value in the result list. - # Returns the value with the maximum number of repetitions. - return check_list[result.index(y)] + if not result: + return [] + y = max(result) # Gets the maximum value in the result list. + # Gets values of modes + result = {input_list[i] for i, value in enumerate(result) if value == y} + return sorted(result) if __name__ == "__main__": - data = [2, 3, 4, 5, 3, 4, 2, 5, 2, 2, 4, 2, 2, 2] - print(mode(data)) - print(statistics.mode(data)) + import doctest + + doctest.testmod() From f37d415227a21017398144a090a66f1c690705eb Mon Sep 17 00:00:00 2001 From: Anderson Torres Date: Fri, 4 Jun 2021 17:28:26 -0300 Subject: [PATCH 0151/1543] Add new algorithm for Armstrong numbers (#4474) * Add a new algorithm for Armstrong numbers * FAILING = (-153, -1, 0, 1.2, 200, "A", [], {}, None) Co-authored-by: Christian Clauss --- maths/armstrong_numbers.py | 70 +++++++++++++++++++++++++++----------- 1 file changed, 50 insertions(+), 20 deletions(-) diff --git a/maths/armstrong_numbers.py b/maths/armstrong_numbers.py index af25688dbacc..ce8c62182fd9 100644 --- a/maths/armstrong_numbers.py +++ b/maths/armstrong_numbers.py @@ -1,26 +1,24 @@ """ -An Armstrong number is equal to the sum of its own digits each raised -to the power of the number of digits. +An Armstrong number is equal to the sum of its own digits each raised to the +power of the number of digits. + For example, 370 is an Armstrong number because 3*3*3 + 7*7*7 + 0*0*0 = 370. -An Armstrong number is often called Narcissistic number. + +Armstrong numbers are also called Narcissistic numbers and Pluperfect numbers. + +On-Line Encyclopedia of Integer Sequences entry: https://oeis.org/A005188 """ +PASSING = (1, 153, 370, 371, 1634, 24678051, 115132219018763992565095597973971522401) +FAILING = (-153, -1, 0, 1.2, 200, "A", [], {}, None) def armstrong_number(n: int) -> bool: """ Return True if n is an Armstrong number or False if it is not. - >>> armstrong_number(153) + >>> all(armstrong_number(n) for n in PASSING) True - >>> armstrong_number(200) - False - >>> armstrong_number(1634) - True - >>> armstrong_number(0) - False - >>> armstrong_number(-1) - False - >>> armstrong_number(1.2) + >>> any(armstrong_number(n) for n in FAILING) False """ if not isinstance(n, int) or n < 1: @@ -43,15 +41,46 @@ def armstrong_number(n: int) -> bool: return n == sum -def narcissistic_number(n: int) -> bool: - """Return True if n is a narcissistic number or False if it is not""" +def pluperfect_number(n: int) -> bool: + """Return True if n is a pluperfect number or False if it is not + + >>> all(armstrong_number(n) for n in PASSING) + True + >>> any(armstrong_number(n) for n in FAILING) + False + """ + if not isinstance(n, int) or n < 1: + return False + + # Init a "histogram" of the digits + digit_histogram = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0] + digit_total = 0 + sum = 0 + temp = n + while temp > 0: + temp, rem = divmod(temp, 10) + digit_histogram[rem] += 1 + digit_total += 1 + + for (cnt, i) in zip(digit_histogram, range(len(digit_histogram))): + sum += cnt * i ** digit_total + + return n == sum - expo = len(str(n)) # power, all number will be raised to - # each digit will be multiplied expo times - temp = [(int(i) ** expo) for i in str(n)] - # check if sum of cube of each digit is equal to number - return n == sum(temp) +def narcissistic_number(n: int) -> bool: + """Return True if n is a narcissistic number or False if it is not. + + >>> all(armstrong_number(n) for n in PASSING) + True + >>> any(armstrong_number(n) for n in FAILING) + False + """ + if not isinstance(n, int) or n < 1: + return False + expo = len(str(n)) # the power that all digits will be raised to + # check if sum of each digit multiplied expo times is equal to number + return n == sum(int(i) ** expo for i in str(n)) def main(): @@ -61,6 +90,7 @@ def main(): num = int(input("Enter an integer to see if it is an Armstrong number: ").strip()) print(f"{num} is {'' if armstrong_number(num) else 'not '}an Armstrong number.") print(f"{num} is {'' if narcissistic_number(num) else 'not '}an Armstrong number.") + print(f"{num} is {'' if pluperfect_number(num) else 'not '}an Armstrong number.") if __name__ == "__main__": From b743e442599a5bf7e1cb14d9dc41bd17bde1504c Mon Sep 17 00:00:00 2001 From: Aniruddha Bhattacharjee Date: Wed, 9 Jun 2021 02:19:33 +0530 Subject: [PATCH 0152/1543] Wavelet tree (#4267) * Added the matrix_exponentiation.py file in maths directory * Implemented the requested changes * Update matrix_exponentiation.py * resolve merge conflict with upstream branch * add new line at end of file * add wavelet_tree * fix isort issue * updating DIRECTORY.md * fix variable names in wavelet_tree and correct typo * Add type hints and variable renaming * Update data_structures/binary_tree/wavelet_tree.py Add doctests to placate the algorithm-bot, thanks to @cclauss. Co-authored-by: Christian Clauss * Move doctest to individual functions and reformat code * Move common test array to the global scope and reuse in tests * MMove test array to global scope and minor linting changes * Correct the failing pytest tests * MUse built-in list for type annotation * Update wavelet_tree.py * types-requests * updating DIRECTORY.md * Update wavelet_tree.py * # type: ignore * # type: ignore * Update decrypt_caesar_with_chi_squared.py * , * Update decrypt_caesar_with_chi_squared.py Co-authored-by: Christian Clauss Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> Co-authored-by: Aniruddha Bhattacharjee --- DIRECTORY.md | 2 + ciphers/decrypt_caesar_with_chi_squared.py | 7 +- data_structures/binary_tree/wavelet_tree.py | 206 ++++++++++++++++++++ requirements.txt | 1 + scripts/validate_solutions.py | 2 +- 5 files changed, 214 insertions(+), 4 deletions(-) create mode 100644 data_structures/binary_tree/wavelet_tree.py diff --git a/DIRECTORY.md b/DIRECTORY.md index 9905753b2d24..e5ca6d62fe45 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -136,6 +136,7 @@ * [Segment Tree](https://github.com/TheAlgorithms/Python/blob/master/data_structures/binary_tree/segment_tree.py) * [Segment Tree Other](https://github.com/TheAlgorithms/Python/blob/master/data_structures/binary_tree/segment_tree_other.py) * [Treap](https://github.com/TheAlgorithms/Python/blob/master/data_structures/binary_tree/treap.py) + * [Wavelet Tree](https://github.com/TheAlgorithms/Python/blob/master/data_structures/binary_tree/wavelet_tree.py) * Disjoint Set * [Alternate Disjoint Set](https://github.com/TheAlgorithms/Python/blob/master/data_structures/disjoint_set/alternate_disjoint_set.py) * [Disjoint Set](https://github.com/TheAlgorithms/Python/blob/master/data_structures/disjoint_set/disjoint_set.py) @@ -232,6 +233,7 @@ ## Dynamic Programming * [Abbreviation](https://github.com/TheAlgorithms/Python/blob/master/dynamic_programming/abbreviation.py) * [Bitmask](https://github.com/TheAlgorithms/Python/blob/master/dynamic_programming/bitmask.py) + * [Catalan Numbers](https://github.com/TheAlgorithms/Python/blob/master/dynamic_programming/catalan_numbers.py) * [Climbing Stairs](https://github.com/TheAlgorithms/Python/blob/master/dynamic_programming/climbing_stairs.py) * [Edit Distance](https://github.com/TheAlgorithms/Python/blob/master/dynamic_programming/edit_distance.py) * [Factorial](https://github.com/TheAlgorithms/Python/blob/master/dynamic_programming/factorial.py) diff --git a/ciphers/decrypt_caesar_with_chi_squared.py b/ciphers/decrypt_caesar_with_chi_squared.py index e7faeae73773..7e3705b8f71f 100644 --- a/ciphers/decrypt_caesar_with_chi_squared.py +++ b/ciphers/decrypt_caesar_with_chi_squared.py @@ -222,9 +222,10 @@ def decrypt_caesar_with_chi_squared( # Get the most likely cipher by finding the cipher with the smallest chi squared # statistic - most_likely_cipher: int = min( - chi_squared_statistic_values, key=chi_squared_statistic_values.get - ) # type: ignore # First argument to `min` is not optional + most_likely_cipher: int = min( # type: ignore + chi_squared_statistic_values, # type: ignore + key=chi_squared_statistic_values.get, # type: ignore + ) # type: ignore # Get all the data from the most likely cipher (key, decoded message) ( diff --git a/data_structures/binary_tree/wavelet_tree.py b/data_structures/binary_tree/wavelet_tree.py new file mode 100644 index 000000000000..1607244f74ed --- /dev/null +++ b/data_structures/binary_tree/wavelet_tree.py @@ -0,0 +1,206 @@ +""" +Wavelet tree is a data-structure designed to efficiently answer various range queries +for arrays. Wavelets trees are different from other binary trees in the sense that +the nodes are split based on the actual values of the elements and not on indices, +such as the with segment trees or fenwick trees. You can read more about them here: +1. https://users.dcc.uchile.cl/~jperez/papers/ioiconf16.pdf +2. https://www.youtube.com/watch?v=4aSv9PcecDw&t=811s +3. https://www.youtube.com/watch?v=CybAgVF-MMc&t=1178s +""" + +from typing import Optional + +test_array = [2, 1, 4, 5, 6, 0, 8, 9, 1, 2, 0, 6, 4, 2, 0, 6, 5, 3, 2, 7] + + +class Node: + def __init__(self, length: int) -> None: + self.minn: int = -1 + self.maxx: int = -1 + self.map_left: list[int] = [-1] * length + self.left: Optional[Node] = None + self.right: Optional[Node] = None + + def __repr__(self) -> str: + """ + >>> node = Node(length=27) + >>> repr(node) + 'min_value: -1, max_value: -1' + >>> repr(node) == str(node) + True + """ + return f"min_value: {self.minn}, max_value: {self.maxx}" + + +def build_tree(arr: list[int]) -> Node: + """ + Builds the tree for arr and returns the root + of the constructed tree + + >>> build_tree(test_array) + min_value: 0, max_value: 9 + """ + root = Node(len(arr)) + root.minn, root.maxx = min(arr), max(arr) + # Leaf node case where the node contains only one unique value + if root.minn == root.maxx: + return root + """ + Take the mean of min and max element of arr as the pivot and + partition arr into left_arr and right_arr with all elements <= pivot in the + left_arr and the rest in right_arr, maintaining the order of the elements, + then recursively build trees for left_arr and right_arr + """ + pivot = (root.minn + root.maxx) // 2 + left_arr, right_arr = [], [] + for index, num in enumerate(arr): + if num <= pivot: + left_arr.append(num) + else: + right_arr.append(num) + root.map_left[index] = len(left_arr) + root.left = build_tree(left_arr) + root.right = build_tree(right_arr) + return root + + +def rank_till_index(node: Node, num: int, index: int) -> int: + """ + Returns the number of occurrences of num in interval [0, index] in the list + + >>> root = build_tree(test_array) + >>> rank_till_index(root, 6, 6) + 1 + >>> rank_till_index(root, 2, 0) + 1 + >>> rank_till_index(root, 1, 10) + 2 + >>> rank_till_index(root, 17, 7) + 0 + >>> rank_till_index(root, 0, 9) + 1 + """ + if index < 0: + return 0 + # Leaf node cases + if node.minn == node.maxx: + return index + 1 if node.minn == num else 0 + pivot = (node.minn + node.maxx) // 2 + if num <= pivot: + # go the left subtree and map index to the left subtree + return rank_till_index(node.left, num, node.map_left[index] - 1) + else: + # go to the right subtree and map index to the right subtree + return rank_till_index(node.right, num, index - node.map_left[index]) + + +def rank(node: Node, num: int, start: int, end: int) -> int: + """ + Returns the number of occurrences of num in interval [start, end] in the list + + >>> root = build_tree(test_array) + >>> rank(root, 6, 3, 13) + 2 + >>> rank(root, 2, 0, 19) + 4 + >>> rank(root, 9, 2 ,2) + 0 + >>> rank(root, 0, 5, 10) + 2 + """ + if start > end: + return 0 + rank_till_end = rank_till_index(node, num, end) + rank_before_start = rank_till_index(node, num, start - 1) + return rank_till_end - rank_before_start + + +def quantile(node: Node, index: int, start: int, end: int) -> int: + """ + Returns the index'th smallest element in interval [start, end] in the list + index is 0-indexed + + >>> root = build_tree(test_array) + >>> quantile(root, 2, 2, 5) + 5 + >>> quantile(root, 5, 2, 13) + 4 + >>> quantile(root, 0, 6, 6) + 8 + >>> quantile(root, 4, 2, 5) + -1 + """ + if index > (end - start) or start > end: + return -1 + # Leaf node case + if node.minn == node.maxx: + return node.minn + # Number of elements in the left subtree in interval [start, end] + num_elements_in_left_tree = node.map_left[end] - ( + node.map_left[start - 1] if start else 0 + ) + if num_elements_in_left_tree > index: + return quantile( + node.left, + index, + (node.map_left[start - 1] if start else 0), + node.map_left[end] - 1, + ) + else: + return quantile( + node.right, + index - num_elements_in_left_tree, + start - (node.map_left[start - 1] if start else 0), + end - node.map_left[end], + ) + + +def range_counting( + node: Node, start: int, end: int, start_num: int, end_num: int +) -> int: + """ + Returns the number of elememts in range [start_num, end_num] + in interval [start, end] in the list + + >>> root = build_tree(test_array) + >>> range_counting(root, 1, 10, 3, 7) + 3 + >>> range_counting(root, 2, 2, 1, 4) + 1 + >>> range_counting(root, 0, 19, 0, 100) + 20 + >>> range_counting(root, 1, 0, 1, 100) + 0 + >>> range_counting(root, 0, 17, 100, 1) + 0 + """ + if ( + start > end + or start_num > end_num + or node.minn > end_num + or node.maxx < start_num + ): + return 0 + if start_num <= node.minn and node.maxx <= end_num: + return end - start + 1 + left = range_counting( + node.left, + (node.map_left[start - 1] if start else 0), + node.map_left[end] - 1, + start_num, + end_num, + ) + right = range_counting( + node.right, + start - (node.map_left[start - 1] if start else 0), + end - node.map_left[end], + start_num, + end_num, + ) + return left + right + + +if __name__ == "__main__": + import doctest + + doctest.testmod() diff --git a/requirements.txt b/requirements.txt index 8bbb8d524ed4..4867de26f8f1 100644 --- a/requirements.txt +++ b/requirements.txt @@ -14,4 +14,5 @@ sklearn statsmodels sympy tensorflow +types-requests xgboost diff --git a/scripts/validate_solutions.py b/scripts/validate_solutions.py index 68461dca6710..ca4af5261a8f 100755 --- a/scripts/validate_solutions.py +++ b/scripts/validate_solutions.py @@ -21,7 +21,7 @@ def convert_path_to_module(file_path: pathlib.Path) -> ModuleType: """Converts a file path to a Python module""" spec = importlib.util.spec_from_file_location(file_path.name, str(file_path)) - module = importlib.util.module_from_spec(spec) + module = importlib.util.module_from_spec(spec) # type: ignore spec.loader.exec_module(module) # type: ignore return module From c824b90ead698da4f10ac38e431844d96af109b6 Mon Sep 17 00:00:00 2001 From: GDWR <57012020+GDWR@users.noreply.github.com> Date: Thu, 10 Jun 2021 17:44:41 +0100 Subject: [PATCH 0153/1543] Remove redundent function in Backtracking Sudoku (#4499) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Remove redundent function After reviewing this code, I've noticed that the `is_completed` function is a redundant operation. Increasing the number of loops required for each step of the sudoku solver. This should remove n² operations where n is the width of the grid. * Update sudoku.py Remove additional newline --- backtracking/sudoku.py | 28 +--------------------------- 1 file changed, 1 insertion(+), 27 deletions(-) diff --git a/backtracking/sudoku.py b/backtracking/sudoku.py index 3bfaddd6e56f..593fa52d6d8a 100644 --- a/backtracking/sudoku.py +++ b/backtracking/sudoku.py @@ -59,27 +59,6 @@ def is_safe(grid: Matrix, row: int, column: int, n: int) -> bool: return True -def is_completed(grid: Matrix) -> bool: - """ - This function checks if the puzzle is completed or not. - it is completed when all the cells are assigned with a non-zero number. - - >>> is_completed([[0]]) - False - >>> is_completed([[1]]) - True - >>> is_completed([[1, 2], [0, 4]]) - False - >>> is_completed([[1, 2], [3, 4]]) - True - >>> is_completed(initial_grid) - False - >>> is_completed(no_solution) - False - """ - return all(all(cell != 0 for cell in row) for row in grid) - - def find_empty_location(grid: Matrix) -> Optional[Tuple[int, int]]: """ This function finds an empty location so that we can assign a number @@ -111,12 +90,7 @@ def sudoku(grid: Matrix) -> Optional[Matrix]: >>> sudoku(no_solution) is None True """ - - if is_completed(grid): - return grid - - location = find_empty_location(grid) - if location is not None: + if location := find_empty_location(grid): row, column = location else: # If the location is ``None``, then the grid is solved. From 977511b3a3711ad9067cc1e8478c696e9f5f157d Mon Sep 17 00:00:00 2001 From: Hasanul Islam Date: Thu, 10 Jun 2021 23:06:41 +0600 Subject: [PATCH 0154/1543] Add/fix mypy type annotations at BFS, DFS in graphs (#4488) --- graphs/breadth_first_search.py | 4 ++-- graphs/depth_first_search.py | 13 +++++++------ 2 files changed, 9 insertions(+), 8 deletions(-) diff --git a/graphs/breadth_first_search.py b/graphs/breadth_first_search.py index ee9855bd0c2d..305db01e19e4 100644 --- a/graphs/breadth_first_search.py +++ b/graphs/breadth_first_search.py @@ -2,12 +2,12 @@ """ Author: OMKAR PATHAK """ -from typing import Set +from typing import Dict, List, Set class Graph: def __init__(self) -> None: - self.vertices = {} + self.vertices: Dict[int, List[int]] = {} def print_graph(self) -> None: """ diff --git a/graphs/depth_first_search.py b/graphs/depth_first_search.py index 907cc172f253..5d74a6db9c6b 100644 --- a/graphs/depth_first_search.py +++ b/graphs/depth_first_search.py @@ -2,20 +2,21 @@ from __future__ import annotations +from typing import Set -def depth_first_search(graph: dict, start: str) -> set[int]: + +def depth_first_search(graph: dict, start: str) -> Set[str]: """Depth First Search on Graph :param graph: directed graph in dictionary format - :param vertex: starting vertex as a string + :param start: starting vertex as a string :returns: the trace of the search - >>> G = { "A": ["B", "C", "D"], "B": ["A", "D", "E"], + >>> input_G = { "A": ["B", "C", "D"], "B": ["A", "D", "E"], ... "C": ["A", "F"], "D": ["B", "D"], "E": ["B", "F"], ... "F": ["C", "E", "G"], "G": ["F"] } - >>> start = "A" >>> output_G = list({'A', 'B', 'C', 'D', 'E', 'F', 'G'}) - >>> all(x in output_G for x in list(depth_first_search(G, "A"))) + >>> all(x in output_G for x in list(depth_first_search(input_G, "A"))) True - >>> all(x in output_G for x in list(depth_first_search(G, "G"))) + >>> all(x in output_G for x in list(depth_first_search(input_G, "G"))) True """ explored, stack = set(start), [start] From daeb6a7e08c3350f0fb08e9fa787ab28a3805f69 Mon Sep 17 00:00:00 2001 From: Dhruv Manilawala Date: Thu, 10 Jun 2021 22:48:40 +0530 Subject: [PATCH 0155/1543] fix(action): delete approve workflow as it does not work (#4453) --- .github/workflows/approve_workflow_run.yml | 22 ---------------------- 1 file changed, 22 deletions(-) delete mode 100644 .github/workflows/approve_workflow_run.yml diff --git a/.github/workflows/approve_workflow_run.yml b/.github/workflows/approve_workflow_run.yml deleted file mode 100644 index 7c5e64452d5f..000000000000 --- a/.github/workflows/approve_workflow_run.yml +++ /dev/null @@ -1,22 +0,0 @@ -# https://docs.github.com/en/rest/reference/actions#approve-a-workflow-run-for-a-fork-pull-request - -name: "Approve Workflow Run" - -on: - workflow_run: - workflows: ['build', 'project_euler', 'pre-commit', 'directory_writer'] - types: - - completed - -jobs: - approve: - runs-on: ubuntu-latest - if: ${{ github.event.workflow_run.conclusion == 'action_required' }} - steps: - - name: "Automatically approve a workflow run" - run: | - curl \ - --request POST \ - --header "Accept: application/vnd.github.v3+json" \ - --header "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ - --url "https://api.github.com/repos/${{ github.repository }}/actions/runs/${{ github.event.workflow_run.id }}/approve" From 10d38eae6746c451d64ac592200b647480b20b5a Mon Sep 17 00:00:00 2001 From: Christian Clauss Date: Sun, 13 Jun 2021 06:29:06 +0200 Subject: [PATCH 0156/1543] CONTRIBUTING.md: Write for current Python (#4507) --- CONTRIBUTING.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index d93b5db67fe8..13d330a90dc5 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -66,7 +66,7 @@ pre-commit run --all-files --show-diff-on-failure We want your work to be readable by others; therefore, we encourage you to note the following: -- Please write in Python 3.7+. For instance: `print()` is a function in Python 3 so `print "Hello"` will *not* work but `print("Hello")` will. +- Please write in Python 3.9+. For instance: `print()` is a function in Python 3 so `print "Hello"` will *not* work but `print("Hello")` will. - Please focus hard on naming of functions, classes, and variables. Help your reader by using __descriptive names__ that can help you to remove redundant comments. - Single letter variable names are *old school* so please avoid them unless their life only spans a few lines. - Expand acronyms because `gcd()` is hard to understand but `greatest_common_divisor()` is not. From 95a4957d9ed3913c9d73bbafa9f398afb31a839d Mon Sep 17 00:00:00 2001 From: QuantumNovice <43876848+QuantumNovice@users.noreply.github.com> Date: Sun, 13 Jun 2021 23:19:44 +0500 Subject: [PATCH 0157/1543] Luhn algorithm (#4487) * Luhn algorithm Perform Luhn validation on input string Algorithm: * Double every other digit starting from 2nd last digit. * Subtract 9 if number is greater than 9. * Sum the numbers https://en.wikipedia.org/wiki/Luhn_algorithm * Update DIRECTORY.md * Update luhn.py * Update luhn.py * Update luhn.py * Update luhn.py * Update DIRECTORY.md --- hashes/luhn.py | 46 ++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 46 insertions(+) create mode 100644 hashes/luhn.py diff --git a/hashes/luhn.py b/hashes/luhn.py new file mode 100644 index 000000000000..69e7b4ccf59b --- /dev/null +++ b/hashes/luhn.py @@ -0,0 +1,46 @@ +""" Luhn Algorithm """ +from typing import List + + +def is_luhn(string: str) -> bool: + """ + Perform Luhn validation on input string + Algorithm: + * Double every other digit starting from 2nd last digit. + * Subtract 9 if number is greater than 9. + * Sum the numbers + * + >>> test_cases = [79927398710, 79927398711, 79927398712, 79927398713, + ... 79927398714, 79927398715, 79927398716, 79927398717, 79927398718, + ... 79927398719] + >>> test_cases = list(map(str, test_cases)) + >>> list(map(is_luhn, test_cases)) + [False, False, False, True, False, False, False, False, False, False] + """ + check_digit: int + _vector: List[str] = list(string) + __vector, check_digit = _vector[:-1], int(_vector[-1]) + vector: List[int] = [*map(int, __vector)] + + vector.reverse() + for idx, i in enumerate(vector): + + if idx & 1 == 0: + doubled: int = vector[idx] * 2 + if doubled > 9: + doubled -= 9 + + check_digit += doubled + else: + check_digit += i + + if (check_digit) % 10 == 0: + return True + return False + + +if __name__ == "__main__": + import doctest + + doctest.testmod() + assert is_luhn("79927398713") From 0eabdb54b181f3a0ef28c912fb7ff5b99147093d Mon Sep 17 00:00:00 2001 From: Grigoriy Hanin <43445998+haningrisha@users.noreply.github.com> Date: Mon, 14 Jun 2021 23:39:51 +0300 Subject: [PATCH 0158/1543] Average median type hint (#4483) * Update average_median.py * Wikipediad link added --- maths/average_median.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/maths/average_median.py b/maths/average_median.py index 0257e3f76f1a..57e01368b7b2 100644 --- a/maths/average_median.py +++ b/maths/average_median.py @@ -1,6 +1,10 @@ -def median(nums): +from typing import Union + + +def median(nums: Union[int, float]) -> Union[int, float]: """ Find median of a list of numbers. + Wiki: https://en.wikipedia.org/wiki/Median >>> median([0]) 0 From 7d19d54f6f247bb16b4299d2a20548d6a279d635 Mon Sep 17 00:00:00 2001 From: Grigoriy Hanin <43445998+haningrisha@users.noreply.github.com> Date: Wed, 16 Jun 2021 09:33:23 +0300 Subject: [PATCH 0159/1543] Average mean refactor (#4485) * Average mean refactor Added doctests and type hints to average_mean * Wiki link added * Empty list check added Empty list check added Type hint changed to typing.List --- maths/average_mean.py | 34 +++++++++++++++++++++------------- 1 file changed, 21 insertions(+), 13 deletions(-) diff --git a/maths/average_mean.py b/maths/average_mean.py index 4beca1f741a0..e02e307f20c8 100644 --- a/maths/average_mean.py +++ b/maths/average_mean.py @@ -1,20 +1,28 @@ -"""Find mean of a list of numbers.""" +from typing import List -def average(nums): - """Find mean of a list of numbers.""" - return sum(nums) / len(nums) - - -def test_average(): +def mean(nums: List) -> float: """ - >>> test_average() + Find mean of a list of numbers. + Wiki: https://en.wikipedia.org/wiki/Mean + + >>> mean([3, 6, 9, 12, 15, 18, 21]) + 12.0 + >>> mean([5, 10, 15, 20, 25, 30, 35]) + 20.0 + >>> mean([1, 2, 3, 4, 5, 6, 7, 8]) + 4.5 + >>> mean([]) + Traceback (most recent call last): + ... + ValueError: List is empty """ - assert 12.0 == average([3, 6, 9, 12, 15, 18, 21]) - assert 20 == average([5, 10, 15, 20, 25, 30, 35]) - assert 4.5 == average([1, 2, 3, 4, 5, 6, 7, 8]) + if not nums: + raise ValueError("List is empty") + return sum(nums) / len(nums) if __name__ == "__main__": - """Call average module to find mean of a specific list of numbers.""" - print(average([2, 4, 6, 8, 20, 50, 70])) + import doctest + + doctest.testmod() From 4f9ee4330aedd20485a7923c634b3d27f7a82c8b Mon Sep 17 00:00:00 2001 From: Grigoriy Hanin <43445998+haningrisha@users.noreply.github.com> Date: Wed, 16 Jun 2021 09:34:32 +0300 Subject: [PATCH 0160/1543] basic_maths input check (#4486) * Prime factor input check * number_of_divisors input check * sum_of_divisors input check --- maths/basic_maths.py | 37 +++++++++++++++++++++++++++++++++---- 1 file changed, 33 insertions(+), 4 deletions(-) diff --git a/maths/basic_maths.py b/maths/basic_maths.py index 07ee3b3df296..47d3d91b397d 100644 --- a/maths/basic_maths.py +++ b/maths/basic_maths.py @@ -6,7 +6,17 @@ def prime_factors(n: int) -> list: """Find Prime Factors. >>> prime_factors(100) [2, 2, 5, 5] + >>> prime_factors(0) + Traceback (most recent call last): + ... + ValueError: Only positive integers have prime factors + >>> prime_factors(-10) + Traceback (most recent call last): + ... + ValueError: Only positive integers have prime factors """ + if n <= 0: + raise ValueError("Only positive integers have prime factors") pf = [] while n % 2 == 0: pf.append(2) @@ -24,7 +34,17 @@ def number_of_divisors(n: int) -> int: """Calculate Number of Divisors of an Integer. >>> number_of_divisors(100) 9 + >>> number_of_divisors(0) + Traceback (most recent call last): + ... + ValueError: Only positive numbers are accepted + >>> number_of_divisors(-10) + Traceback (most recent call last): + ... + ValueError: Only positive numbers are accepted """ + if n <= 0: + raise ValueError("Only positive numbers are accepted") div = 1 temp = 1 while n % 2 == 0: @@ -44,7 +64,17 @@ def sum_of_divisors(n: int) -> int: """Calculate Sum of Divisors. >>> sum_of_divisors(100) 217 + >>> sum_of_divisors(0) + Traceback (most recent call last): + ... + ValueError: Only positive numbers are accepted + >>> sum_of_divisors(-10) + Traceback (most recent call last): + ... + ValueError: Only positive numbers are accepted """ + if n <= 0: + raise ValueError("Only positive numbers are accepted") s = 1 temp = 1 while n % 2 == 0: @@ -74,7 +104,6 @@ def euler_phi(n: int) -> int: if __name__ == "__main__": - print(prime_factors(100)) - print(number_of_divisors(100)) - print(sum_of_divisors(100)) - print(euler_phi(100)) + import doctest + + doctest.testmod() From 2899cdac207822f6f3ca454f27f55d7b73eaac9e Mon Sep 17 00:00:00 2001 From: Harshit Agarwal <43147421+9harshit@users.noreply.github.com> Date: Thu, 24 Jun 2021 11:58:23 +0530 Subject: [PATCH 0161/1543] feat: CNN classification added to computer vision (#4350) * cnn classification file * black formatted * flake8 corrected * added cnn classification * Delete requirements.txt * Update cnn_classification.py * Create cnn_classification.py * using keras from tensorflow only * update tensorflow * Update cnn_classification.py * Delete computer_vision/cnn_classification directory --- computer_vision/cnn_classification.py | 98 +++++++++++++++++++++++++++ 1 file changed, 98 insertions(+) create mode 100644 computer_vision/cnn_classification.py diff --git a/computer_vision/cnn_classification.py b/computer_vision/cnn_classification.py new file mode 100644 index 000000000000..6d4f19639c24 --- /dev/null +++ b/computer_vision/cnn_classification.py @@ -0,0 +1,98 @@ +""" +Convolutional Neural Network + +Objective : To train a CNN model detect if TB is present in Lung X-ray or not. + +Resources CNN Theory : + https://en.wikipedia.org/wiki/Convolutional_neural_network +Resources Tensorflow : https://www.tensorflow.org/tutorials/images/cnn + +Download dataset from : +https://lhncbc.nlm.nih.gov/LHC-publications/pubs/TuberculosisChestXrayImageDataSets.html + +1. Download the dataset folder and create two folder training set and test set +in the parent dataste folder +2. Move 30-40 image from both TB positive and TB Negative folder +in the test set folder +3. The labels of the iamges will be extracted from the folder name +the image is present in. + +""" + +# Part 1 - Building the CNN + +import numpy as np + +# Importing the Keras libraries and packages +import tensorflow as tf +from tensorflow.keras import layers, models + +if __name__ == "__main__": + + # Initialising the CNN + classifier = models.Sequential() + + # Step 1 - Convolution + classifier.add( + layers.Conv2D(32, (3, 3), input_shape=(64, 64, 3), activation="relu") + ) + + # Step 2 - Pooling + classifier.add(layers.MaxPooling2D(pool_size=(2, 2))) + + # Adding a second convolutional layer + classifier.add(layers.Conv2D(32, (3, 3), activation="relu")) + classifier.add(layers.MaxPooling2D(pool_size=(2, 2))) + + # Step 3 - Flattening + classifier.add(layers.Flatten()) + + # Step 4 - Full connection + classifier.add(layers.Dense(units=128, activation="relu")) + classifier.add(layers.Dense(units=1, activation="sigmoid")) + + # Compiling the CNN + classifier.compile( + optimizer="adam", loss="binary_crossentropy", metrics=["accuracy"] + ) + + # Part 2 - Fitting the CNN to the images + + # Load Trained model weights + + # from keras.models import load_model + # regressor=load_model('cnn.h5') + + train_datagen = tf.keras.preprocessing.image.ImageDataGenerator( + rescale=1.0 / 255, shear_range=0.2, zoom_range=0.2, horizontal_flip=True + ) + + test_datagen = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 255) + + training_set = train_datagen.flow_from_directory( + "dataset/training_set", target_size=(64, 64), batch_size=32, class_mode="binary" + ) + + test_set = test_datagen.flow_from_directory( + "dataset/test_set", target_size=(64, 64), batch_size=32, class_mode="binary" + ) + + classifier.fit_generator( + training_set, steps_per_epoch=5, epochs=30, validation_data=test_set + ) + + classifier.save("cnn.h5") + + # Part 3 - Making new predictions + + test_image = tf.keras.preprocessing.image.load_img( + "dataset/single_prediction/image.png", target_size=(64, 64) + ) + test_image = tf.keras.preprocessing.image.img_to_array(test_image) + test_image = np.expand_dims(test_image, axis=0) + result = classifier.predict(test_image) + training_set.class_indices + if result[0][0] == 0: + prediction = "Normal" + if result[0][0] == 1: + prediction = "Abnormality detected" From 3ea5a13334f2d573167456c0c9ee4c90497c9466 Mon Sep 17 00:00:00 2001 From: Hasanul Islam Date: Thu, 24 Jun 2021 12:50:23 +0600 Subject: [PATCH 0162/1543] Add doctest and fix mypy type annotation in bellman ford (#4506) --- graphs/bellman_ford.py | 85 +++++++++++++++++++++++++----------------- 1 file changed, 51 insertions(+), 34 deletions(-) diff --git a/graphs/bellman_ford.py b/graphs/bellman_ford.py index ace7985647bb..d6d6b2ac7349 100644 --- a/graphs/bellman_ford.py +++ b/graphs/bellman_ford.py @@ -1,56 +1,73 @@ from __future__ import annotations -def printDist(dist, V): - print("Vertex Distance") - distances = ("INF" if d == float("inf") else d for d in dist) - print("\t".join(f"{i}\t{d}" for i, d in enumerate(distances))) +def print_distance(distance: list[float], src): + print(f"Vertex\tShortest Distance from vertex {src}") + for i, d in enumerate(distance): + print(f"{i}\t\t{d}") -def BellmanFord(graph: list[dict[str, int]], V: int, E: int, src: int) -> int: +def check_negative_cycle( + graph: list[dict[str, int]], distance: list[float], edge_count: int +): + for j in range(edge_count): + u, v, w = [graph[j][k] for k in ["src", "dst", "weight"]] + if distance[u] != float("inf") and distance[u] + w < distance[v]: + return True + return False + + +def bellman_ford( + graph: list[dict[str, int]], vertex_count: int, edge_count: int, src: int +) -> list[float]: """ Returns shortest paths from a vertex src to all other vertices. + >>> edges = [(2, 1, -10), (3, 2, 3), (0, 3, 5), (0, 1, 4)] + >>> g = [{"src": s, "dst": d, "weight": w} for s, d, w in edges] + >>> bellman_ford(g, 4, 4, 0) + [0.0, -2.0, 8.0, 5.0] + >>> g = [{"src": s, "dst": d, "weight": w} for s, d, w in edges + [(1, 3, 5)]] + >>> bellman_ford(g, 4, 5, 0) + Traceback (most recent call last): + ... + Exception: Negative cycle found """ - mdist = [float("inf") for i in range(V)] - mdist[src] = 0.0 + distance = [float("inf")] * vertex_count + distance[src] = 0.0 - for i in range(V - 1): - for j in range(E): - u = graph[j]["src"] - v = graph[j]["dst"] - w = graph[j]["weight"] + for i in range(vertex_count - 1): + for j in range(edge_count): + u, v, w = [graph[j][k] for k in ["src", "dst", "weight"]] - if mdist[u] != float("inf") and mdist[u] + w < mdist[v]: - mdist[v] = mdist[u] + w - for j in range(E): - u = graph[j]["src"] - v = graph[j]["dst"] - w = graph[j]["weight"] + if distance[u] != float("inf") and distance[u] + w < distance[v]: + distance[v] = distance[u] + w - if mdist[u] != float("inf") and mdist[u] + w < mdist[v]: - print("Negative cycle found. Solution not possible.") - return + negative_cycle_exists = check_negative_cycle(graph, distance, edge_count) + if negative_cycle_exists: + raise Exception("Negative cycle found") - printDist(mdist, V) - return src + return distance if __name__ == "__main__": + import doctest + + doctest.testmod() + V = int(input("Enter number of vertices: ").strip()) E = int(input("Enter number of edges: ").strip()) - graph = [dict() for j in range(E)] + graph: list[dict[str, int]] = [dict() for j in range(E)] for i in range(E): - graph[i][i] = 0.0 + print("Edge ", i + 1) + src, dest, weight = [ + int(x) + for x in input("Enter source, destination, weight: ").strip().split(" ") + ] + graph[i] = {"src": src, "dst": dest, "weight": weight} - for i in range(E): - print("\nEdge ", i + 1) - src = int(input("Enter source:").strip()) - dst = int(input("Enter destination:").strip()) - weight = float(input("Enter weight:").strip()) - graph[i] = {"src": src, "dst": dst, "weight": weight} - - gsrc = int(input("\nEnter shortest path source:").strip()) - BellmanFord(graph, V, E, gsrc) + source = int(input("\nEnter shortest path source:").strip()) + shortest_distance = bellman_ford(graph, V, E, source) + print_distance(shortest_distance, 0) From 62d44188516521fd14e9a4e1f18957ea4eaeeb37 Mon Sep 17 00:00:00 2001 From: Hasanul Islam Date: Tue, 29 Jun 2021 17:44:35 +0600 Subject: [PATCH 0163/1543] Fix mypy error and add more doctest on bfs_shortest_path (#4512) --- graphs/bfs_shortest_path.py | 24 ++++++++++++++---------- 1 file changed, 14 insertions(+), 10 deletions(-) diff --git a/graphs/bfs_shortest_path.py b/graphs/bfs_shortest_path.py index 754ba403537e..b0c8d353ba04 100644 --- a/graphs/bfs_shortest_path.py +++ b/graphs/bfs_shortest_path.py @@ -4,7 +4,7 @@ Manual test: python bfs_shortest_path.py """ -graph = { +demo_graph = { "A": ["B", "C", "E"], "B": ["A", "D", "E"], "C": ["A", "F", "G"], @@ -15,7 +15,7 @@ } -def bfs_shortest_path(graph: dict, start, goal) -> str: +def bfs_shortest_path(graph: dict, start, goal) -> list[str]: """Find shortest path between `start` and `goal` nodes. Args: graph (dict): node/list of neighboring nodes key/value pairs. @@ -25,8 +25,12 @@ def bfs_shortest_path(graph: dict, start, goal) -> str: Shortest path between `start` and `goal` nodes as a string of nodes. 'Not found' string if no path found. Example: - >>> bfs_shortest_path(graph, "G", "D") + >>> bfs_shortest_path(demo_graph, "G", "D") ['G', 'C', 'A', 'B', 'D'] + >>> bfs_shortest_path(demo_graph, "G", "G") + ['G'] + >>> bfs_shortest_path(demo_graph, "G", "Unknown") + [] """ # keep track of explored nodes explored = set() @@ -35,7 +39,7 @@ def bfs_shortest_path(graph: dict, start, goal) -> str: # return path if start is goal if start == goal: - return "That was easy! Start = goal" + return [start] # keeps looping until all possible paths have been checked while queue: @@ -59,7 +63,7 @@ def bfs_shortest_path(graph: dict, start, goal) -> str: explored.add(node) # in case there's no path between the 2 nodes - return "So sorry, but a connecting path doesn't exist :(" + return [] def bfs_shortest_path_distance(graph: dict, start, target) -> int: @@ -72,11 +76,11 @@ def bfs_shortest_path_distance(graph: dict, start, target) -> int: Number of edges in shortest path between `start` and `target` nodes. -1 if no path exists. Example: - >>> bfs_shortest_path_distance(graph, "G", "D") + >>> bfs_shortest_path_distance(demo_graph, "G", "D") 4 - >>> bfs_shortest_path_distance(graph, "A", "A") + >>> bfs_shortest_path_distance(demo_graph, "A", "A") 0 - >>> bfs_shortest_path_distance(graph, "A", "H") + >>> bfs_shortest_path_distance(demo_graph, "A", "Unknown") -1 """ if not graph or start not in graph or target not in graph: @@ -102,5 +106,5 @@ def bfs_shortest_path_distance(graph: dict, start, target) -> int: if __name__ == "__main__": - print(bfs_shortest_path(graph, "G", "D")) # returns ['G', 'C', 'A', 'B', 'D'] - print(bfs_shortest_path_distance(graph, "G", "D")) # returns 4 + print(bfs_shortest_path(demo_graph, "G", "D")) # returns ['G', 'C', 'A', 'B', 'D'] + print(bfs_shortest_path_distance(demo_graph, "G", "D")) # returns 4 From 86baec0bc9d790e2be6f49492e2e4f0f788060af Mon Sep 17 00:00:00 2001 From: Hasanul Islam Date: Fri, 2 Jul 2021 17:52:26 +0600 Subject: [PATCH 0164/1543] Fix mypy errors at bfs_zero_one_shortest_path (#4521) --- graphs/bfs_zero_one_shortest_path.py | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/graphs/bfs_zero_one_shortest_path.py b/graphs/bfs_zero_one_shortest_path.py index a725fae7e48f..a68b5602c2d1 100644 --- a/graphs/bfs_zero_one_shortest_path.py +++ b/graphs/bfs_zero_one_shortest_path.py @@ -1,6 +1,7 @@ from collections import deque +from collections.abc import Iterator from dataclasses import dataclass -from typing import Iterator, List +from typing import Optional, Union """ Finding the shortest path in 0-1-graph in O(E + V) which is faster than dijkstra. @@ -21,7 +22,7 @@ class AdjacencyList: """Graph adjacency list.""" def __init__(self, size: int): - self._graph: List[List[Edge]] = [[] for _ in range(size)] + self._graph: list[list[Edge]] = [[] for _ in range(size)] self._size = size def __getitem__(self, vertex: int) -> Iterator[Edge]: @@ -58,7 +59,7 @@ def add_edge(self, from_vertex: int, to_vertex: int, weight: int): self._graph[from_vertex].append(Edge(to_vertex, weight)) - def get_shortest_path(self, start_vertex: int, finish_vertex: int) -> int: + def get_shortest_path(self, start_vertex: int, finish_vertex: int) -> Optional[int]: """ Return the shortest distance from start_vertex to finish_vertex in 0-1-graph. 1 1 1 @@ -106,18 +107,21 @@ def get_shortest_path(self, start_vertex: int, finish_vertex: int) -> int: ValueError: No path from start_vertex to finish_vertex. """ queue = deque([start_vertex]) - distances = [None for i in range(self.size)] + distances: list[Union[int, None]] = [None] * self.size distances[start_vertex] = 0 while queue: current_vertex = queue.popleft() current_distance = distances[current_vertex] + if current_distance is None: + continue for edge in self[current_vertex]: new_distance = current_distance + edge.weight + dest_vertex_distance = distances[edge.destination_vertex] if ( - distances[edge.destination_vertex] is not None - and new_distance >= distances[edge.destination_vertex] + isinstance(dest_vertex_distance, int) + and new_distance >= dest_vertex_distance ): continue distances[edge.destination_vertex] = new_distance From 95862303a6527f4bf111e6f3f783fd66b7b426f3 Mon Sep 17 00:00:00 2001 From: Hasanul Islam Date: Mon, 5 Jul 2021 12:23:18 +0600 Subject: [PATCH 0165/1543] Fix mypy at prims_algo_2 (#4527) --- graphs/minimum_spanning_tree_prims2.py | 56 +++++++++++++------------- 1 file changed, 27 insertions(+), 29 deletions(-) diff --git a/graphs/minimum_spanning_tree_prims2.py b/graphs/minimum_spanning_tree_prims2.py index 10ed736c9d17..c3444c36f1cf 100644 --- a/graphs/minimum_spanning_tree_prims2.py +++ b/graphs/minimum_spanning_tree_prims2.py @@ -8,7 +8,9 @@ """ from sys import maxsize -from typing import Dict, Optional, Tuple, Union +from typing import Generic, Optional, TypeVar + +T = TypeVar("T") def get_parent_position(position: int) -> int: @@ -43,7 +45,7 @@ def get_child_right_position(position: int) -> int: return (2 * position) + 2 -class MinPriorityQueue: +class MinPriorityQueue(Generic[T]): """ Minimum Priority Queue Class @@ -80,9 +82,9 @@ class MinPriorityQueue: """ def __init__(self) -> None: - self.heap = [] - self.position_map = {} - self.elements = 0 + self.heap: list[tuple[T, int]] = [] + self.position_map: dict[T, int] = {} + self.elements: int = 0 def __len__(self) -> int: return self.elements @@ -94,14 +96,14 @@ def is_empty(self) -> bool: # Check if the priority queue is empty return self.elements == 0 - def push(self, elem: Union[int, str], weight: int) -> None: + def push(self, elem: T, weight: int) -> None: # Add an element with given priority to the queue self.heap.append((elem, weight)) self.position_map[elem] = self.elements self.elements += 1 self._bubble_up(elem) - def extract_min(self) -> Union[int, str]: + def extract_min(self) -> T: # Remove and return the element with lowest weight (highest priority) if self.elements > 1: self._swap_nodes(0, self.elements - 1) @@ -113,7 +115,7 @@ def extract_min(self) -> Union[int, str]: self._bubble_down(bubble_down_elem) return elem - def update_key(self, elem: Union[int, str], weight: int) -> None: + def update_key(self, elem: T, weight: int) -> None: # Update the weight of the given key position = self.position_map[elem] self.heap[position] = (elem, weight) @@ -127,7 +129,7 @@ def update_key(self, elem: Union[int, str], weight: int) -> None: else: self._bubble_down(elem) - def _bubble_up(self, elem: Union[int, str]) -> None: + def _bubble_up(self, elem: T) -> None: # Place a node at the proper position (upward movement) [to be used internally # only] curr_pos = self.position_map[elem] @@ -141,7 +143,7 @@ def _bubble_up(self, elem: Union[int, str]) -> None: return self._bubble_up(elem) return - def _bubble_down(self, elem: Union[int, str]) -> None: + def _bubble_down(self, elem: T) -> None: # Place a node at the proper position (downward movement) [to be used # internally only] curr_pos = self.position_map[elem] @@ -182,7 +184,7 @@ def _swap_nodes(self, node1_pos: int, node2_pos: int) -> None: self.position_map[node2_elem] = node1_pos -class GraphUndirectedWeighted: +class GraphUndirectedWeighted(Generic[T]): """ Graph Undirected Weighted Class @@ -192,8 +194,8 @@ class GraphUndirectedWeighted: """ def __init__(self) -> None: - self.connections = {} - self.nodes = 0 + self.connections: dict[T, dict[T, int]] = {} + self.nodes: int = 0 def __repr__(self) -> str: return str(self.connections) @@ -201,15 +203,13 @@ def __repr__(self) -> str: def __len__(self) -> int: return self.nodes - def add_node(self, node: Union[int, str]) -> None: + def add_node(self, node: T) -> None: # Add a node in the graph if it is not in the graph if node not in self.connections: self.connections[node] = {} self.nodes += 1 - def add_edge( - self, node1: Union[int, str], node2: Union[int, str], weight: int - ) -> None: + def add_edge(self, node1: T, node2: T, weight: int) -> None: # Add an edge between 2 nodes in the graph self.add_node(node1) self.add_node(node2) @@ -218,8 +218,8 @@ def add_edge( def prims_algo( - graph: GraphUndirectedWeighted, -) -> Tuple[Dict[str, int], Dict[str, Optional[str]]]: + graph: GraphUndirectedWeighted[T], +) -> tuple[dict[T, int], dict[T, Optional[T]]]: """ >>> graph = GraphUndirectedWeighted() @@ -239,10 +239,13 @@ def prims_algo( 13 """ # prim's algorithm for minimum spanning tree - dist = {node: maxsize for node in graph.connections} - parent = {node: None for node in graph.connections} - priority_queue = MinPriorityQueue() - [priority_queue.push(node, weight) for node, weight in dist.items()] + dist: dict[T, int] = {node: maxsize for node in graph.connections} + parent: dict[T, Optional[T]] = {node: None for node in graph.connections} + + priority_queue: MinPriorityQueue[T] = MinPriorityQueue() + for node, weight in dist.items(): + priority_queue.push(node, weight) + if priority_queue.is_empty(): return dist, parent @@ -254,6 +257,7 @@ def prims_algo( dist[neighbour] = dist[node] + graph.connections[node][neighbour] priority_queue.update_key(neighbour, dist[neighbour]) parent[neighbour] = node + # running prim's algorithm while not priority_queue.is_empty(): node = priority_queue.extract_min() @@ -263,9 +267,3 @@ def prims_algo( priority_queue.update_key(neighbour, dist[neighbour]) parent[neighbour] = node return dist, parent - - -if __name__ == "__main__": - from doctest import testmod - - testmod() From 4412eafaac315764ca43e2b63913772776689fa0 Mon Sep 17 00:00:00 2001 From: strambake <86815109+strambake@users.noreply.github.com> Date: Tue, 6 Jul 2021 12:38:33 +0530 Subject: [PATCH 0166/1543] [mypy] Fix mypy error (#4524) --- graphs/scc_kosaraju.py | 29 ++++++++++++++++------------- 1 file changed, 16 insertions(+), 13 deletions(-) diff --git a/graphs/scc_kosaraju.py b/graphs/scc_kosaraju.py index 573c1bf5e363..2b34170149bc 100644 --- a/graphs/scc_kosaraju.py +++ b/graphs/scc_kosaraju.py @@ -1,25 +1,28 @@ +from typing import List + + def dfs(u): - global g, r, scc, component, visit, stack + global graph, reversedGraph, scc, component, visit, stack if visit[u]: return visit[u] = True - for v in g[u]: + for v in graph[u]: dfs(v) stack.append(u) def dfs2(u): - global g, r, scc, component, visit, stack + global graph, reversedGraph, scc, component, visit, stack if visit[u]: return visit[u] = True component.append(u) - for v in r[u]: + for v in reversedGraph[u]: dfs2(v) def kosaraju(): - global g, r, scc, component, visit, stack + global graph, reversedGraph, scc, component, visit, stack for i in range(n): dfs(i) visit = [False] * n @@ -36,16 +39,16 @@ def kosaraju(): # n - no of nodes, m - no of edges n, m = list(map(int, input().strip().split())) - g = [[] for i in range(n)] # graph - r = [[] for i in range(n)] # reversed graph + graph: List[List[int]] = [[] for i in range(n)] # graph + reversedGraph: List[List[int]] = [[] for i in range(n)] # reversed graph # input graph data (edges) for i in range(m): u, v = list(map(int, input().strip().split())) - g[u].append(v) - r[v].append(u) + graph[u].append(v) + reversedGraph[v].append(u) - stack = [] - visit = [False] * n - scc = [] - component = [] + stack: List[int] = [] + visit: List[bool] = [False] * n + scc: List[int] = [] + component: List[int] = [] print(kosaraju()) From 256c319ce231eb0a158ec3506e2236d48ca4d6a5 Mon Sep 17 00:00:00 2001 From: Hasanul Islam Date: Thu, 8 Jul 2021 12:46:43 +0600 Subject: [PATCH 0167/1543] Fix mypy errors at kruskal_2 (#4528) --- graphs/minimum_spanning_tree_kruskal2.py | 104 +++++++++++++---------- 1 file changed, 58 insertions(+), 46 deletions(-) diff --git a/graphs/minimum_spanning_tree_kruskal2.py b/graphs/minimum_spanning_tree_kruskal2.py index dfb87efeb89a..0ddb43ce8e6e 100644 --- a/graphs/minimum_spanning_tree_kruskal2.py +++ b/graphs/minimum_spanning_tree_kruskal2.py @@ -1,78 +1,93 @@ from __future__ import annotations +from typing import Generic, TypeVar -class DisjointSetTreeNode: +T = TypeVar("T") + + +class DisjointSetTreeNode(Generic[T]): # Disjoint Set Node to store the parent and rank - def __init__(self, key: int) -> None: - self.key = key + def __init__(self, data: T) -> None: + self.data = data self.parent = self self.rank = 0 -class DisjointSetTree: +class DisjointSetTree(Generic[T]): # Disjoint Set DataStructure - def __init__(self): + def __init__(self) -> None: # map from node name to the node object - self.map = {} + self.map: dict[T, DisjointSetTreeNode[T]] = {} - def make_set(self, x: int) -> None: + def make_set(self, data: T) -> None: # create a new set with x as its member - self.map[x] = DisjointSetTreeNode(x) + self.map[data] = DisjointSetTreeNode(data) - def find_set(self, x: int) -> DisjointSetTreeNode: + def find_set(self, data: T) -> DisjointSetTreeNode[T]: # find the set x belongs to (with path-compression) - elem_ref = self.map[x] + elem_ref = self.map[data] if elem_ref != elem_ref.parent: - elem_ref.parent = self.find_set(elem_ref.parent.key) + elem_ref.parent = self.find_set(elem_ref.parent.data) return elem_ref.parent - def link(self, x: int, y: int) -> None: + def link( + self, node1: DisjointSetTreeNode[T], node2: DisjointSetTreeNode[T] + ) -> None: # helper function for union operation - if x.rank > y.rank: - y.parent = x + if node1.rank > node2.rank: + node2.parent = node1 else: - x.parent = y - if x.rank == y.rank: - y.rank += 1 + node1.parent = node2 + if node1.rank == node2.rank: + node2.rank += 1 - def union(self, x: int, y: int) -> None: + def union(self, data1: T, data2: T) -> None: # merge 2 disjoint sets - self.link(self.find_set(x), self.find_set(y)) + self.link(self.find_set(data1), self.find_set(data2)) -class GraphUndirectedWeighted: - def __init__(self): +class GraphUndirectedWeighted(Generic[T]): + def __init__(self) -> None: # connections: map from the node to the neighbouring nodes (with weights) - self.connections = {} + self.connections: dict[T, dict[T, int]] = {} - def add_node(self, node: int) -> None: + def add_node(self, node: T) -> None: # add a node ONLY if its not present in the graph if node not in self.connections: self.connections[node] = {} - def add_edge(self, node1: int, node2: int, weight: int) -> None: + def add_edge(self, node1: T, node2: T, weight: int) -> None: # add an edge with the given weight self.add_node(node1) self.add_node(node2) self.connections[node1][node2] = weight self.connections[node2][node1] = weight - def kruskal(self) -> GraphUndirectedWeighted: + def kruskal(self) -> GraphUndirectedWeighted[T]: # Kruskal's Algorithm to generate a Minimum Spanning Tree (MST) of a graph """ Details: https://en.wikipedia.org/wiki/Kruskal%27s_algorithm Example: - - >>> graph = GraphUndirectedWeighted() - >>> graph.add_edge(1, 2, 1) - >>> graph.add_edge(2, 3, 2) - >>> graph.add_edge(3, 4, 1) - >>> graph.add_edge(3, 5, 100) # Removed in MST - >>> graph.add_edge(4, 5, 5) - >>> assert 5 in graph.connections[3] - >>> mst = graph.kruskal() + >>> g1 = GraphUndirectedWeighted[int]() + >>> g1.add_edge(1, 2, 1) + >>> g1.add_edge(2, 3, 2) + >>> g1.add_edge(3, 4, 1) + >>> g1.add_edge(3, 5, 100) # Removed in MST + >>> g1.add_edge(4, 5, 5) + >>> assert 5 in g1.connections[3] + >>> mst = g1.kruskal() >>> assert 5 not in mst.connections[3] + + >>> g2 = GraphUndirectedWeighted[str]() + >>> g2.add_edge('A', 'B', 1) + >>> g2.add_edge('B', 'C', 2) + >>> g2.add_edge('C', 'D', 1) + >>> g2.add_edge('C', 'E', 100) # Removed in MST + >>> g2.add_edge('D', 'E', 5) + >>> assert 'E' in g2.connections["C"] + >>> mst = g2.kruskal() + >>> assert 'E' not in mst.connections['C'] """ # getting the edges in ascending order of weights @@ -84,26 +99,23 @@ def kruskal(self) -> GraphUndirectedWeighted: seen.add((end, start)) edges.append((start, end, self.connections[start][end])) edges.sort(key=lambda x: x[2]) + # creating the disjoint set - disjoint_set = DisjointSetTree() - [disjoint_set.make_set(node) for node in self.connections] + disjoint_set = DisjointSetTree[T]() + for node in self.connections: + disjoint_set.make_set(node) + # MST generation num_edges = 0 index = 0 - graph = GraphUndirectedWeighted() + graph = GraphUndirectedWeighted[T]() while num_edges < len(self.connections) - 1: u, v, w = edges[index] index += 1 - parentu = disjoint_set.find_set(u) - parentv = disjoint_set.find_set(v) - if parentu != parentv: + parent_u = disjoint_set.find_set(u) + parent_v = disjoint_set.find_set(v) + if parent_u != parent_v: num_edges += 1 graph.add_edge(u, v, w) disjoint_set.union(u, v) return graph - - -if __name__ == "__main__": - import doctest - - doctest.testmod() From 8c13a7786f4fe15af4d133fed14e5e2fb0888926 Mon Sep 17 00:00:00 2001 From: fpringle Date: Thu, 8 Jul 2021 10:35:10 +0200 Subject: [PATCH 0168/1543] feat: add solution for Project Euler problem 144 (#4280) * Added solution for Project Euler problem 144 * updating DIRECTORY.md * Better variable names * updating DIRECTORY.md Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- DIRECTORY.md | 2 + project_euler/problem_144/__init__.py | 0 project_euler/problem_144/sol1.py | 101 ++++++++++++++++++++++++++ 3 files changed, 103 insertions(+) create mode 100644 project_euler/problem_144/__init__.py create mode 100644 project_euler/problem_144/sol1.py diff --git a/DIRECTORY.md b/DIRECTORY.md index e5ca6d62fe45..adc9bb9e4699 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -788,6 +788,8 @@ * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_129/sol1.py) * Problem 135 * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_135/sol1.py) + * Problem 144 + * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_144/sol1.py) * Problem 173 * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_173/sol1.py) * Problem 174 diff --git a/project_euler/problem_144/__init__.py b/project_euler/problem_144/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/project_euler/problem_144/sol1.py b/project_euler/problem_144/sol1.py new file mode 100644 index 000000000000..3f7a766be20f --- /dev/null +++ b/project_euler/problem_144/sol1.py @@ -0,0 +1,101 @@ +""" +In laser physics, a "white cell" is a mirror system that acts as a delay line for the +laser beam. The beam enters the cell, bounces around on the mirrors, and eventually +works its way back out. + +The specific white cell we will be considering is an ellipse with the equation +4x^2 + y^2 = 100 + +The section corresponding to −0.01 ≤ x ≤ +0.01 at the top is missing, allowing the +light to enter and exit through the hole. + +The light beam in this problem starts at the point (0.0,10.1) just outside the white +cell, and the beam first impacts the mirror at (1.4,-9.6). + +Each time the laser beam hits the surface of the ellipse, it follows the usual law of +reflection "angle of incidence equals angle of reflection." That is, both the incident +and reflected beams make the same angle with the normal line at the point of incidence. + +In the figure on the left, the red line shows the first two points of contact between +the laser beam and the wall of the white cell; the blue line shows the line tangent to +the ellipse at the point of incidence of the first bounce. + +The slope m of the tangent line at any point (x,y) of the given ellipse is: m = −4x/y + +The normal line is perpendicular to this tangent line at the point of incidence. + +The animation on the right shows the first 10 reflections of the beam. + +How many times does the beam hit the internal surface of the white cell before exiting? +""" + + +from math import isclose, sqrt + + +def next_point( + point_x: float, point_y: float, incoming_gradient: float +) -> tuple[float, float, float]: + """ + Given that a laser beam hits the interior of the white cell at point + (point_x, point_y) with gradient incoming_gradient, return a tuple (x,y,m1) + where the next point of contact with the interior is (x,y) with gradient m1. + >>> next_point(5.0, 0.0, 0.0) + (-5.0, 0.0, 0.0) + >>> next_point(5.0, 0.0, -2.0) + (0.0, -10.0, 2.0) + """ + # normal_gradient = gradient of line through which the beam is reflected + # outgoing_gradient = gradient of reflected line + normal_gradient = point_y / 4 / point_x + s2 = 2 * normal_gradient / (1 + normal_gradient * normal_gradient) + c2 = (1 - normal_gradient * normal_gradient) / ( + 1 + normal_gradient * normal_gradient + ) + outgoing_gradient = (s2 - c2 * incoming_gradient) / (c2 + s2 * incoming_gradient) + + # to find the next point, solve the simultaeneous equations: + # y^2 + 4x^2 = 100 + # y - b = m * (x - a) + # ==> A x^2 + B x + C = 0 + quadratic_term = outgoing_gradient ** 2 + 4 + linear_term = 2 * outgoing_gradient * (point_y - outgoing_gradient * point_x) + constant_term = (point_y - outgoing_gradient * point_x) ** 2 - 100 + + x_minus = ( + -linear_term - sqrt(linear_term ** 2 - 4 * quadratic_term * constant_term) + ) / (2 * quadratic_term) + x_plus = ( + -linear_term + sqrt(linear_term ** 2 - 4 * quadratic_term * constant_term) + ) / (2 * quadratic_term) + + # two solutions, one of which is our input point + next_x = x_minus if isclose(x_plus, point_x) else x_plus + next_y = point_y + outgoing_gradient * (next_x - point_x) + + return next_x, next_y, outgoing_gradient + + +def solution(first_x_coord: float = 1.4, first_y_coord: float = -9.6) -> int: + """ + Return the number of times that the beam hits the interior wall of the + cell before exiting. + >>> solution(0.00001,-10) + 1 + >>> solution(5, 0) + 287 + """ + num_reflections: int = 0 + point_x: float = first_x_coord + point_y: float = first_y_coord + gradient: float = (10.1 - point_y) / (0.0 - point_x) + + while not (-0.01 <= point_x <= 0.01 and point_y > 0): + point_x, point_y, gradient = next_point(point_x, point_y, gradient) + num_reflections += 1 + + return num_reflections + + +if __name__ == "__main__": + print(f"{solution() = }") From 307ffd8c29d1b2b156c349fde424e62e8493428a Mon Sep 17 00:00:00 2001 From: Hasanul Islam Date: Mon, 12 Jul 2021 12:10:07 +0600 Subject: [PATCH 0169/1543] Fix mypy errors at bidirectional_bfs (#4531) --- graphs/bidirectional_breadth_first_search.py | 25 ++++++++++++-------- 1 file changed, 15 insertions(+), 10 deletions(-) diff --git a/graphs/bidirectional_breadth_first_search.py b/graphs/bidirectional_breadth_first_search.py index 39d8dc7d4187..9b84ab21bf7f 100644 --- a/graphs/bidirectional_breadth_first_search.py +++ b/graphs/bidirectional_breadth_first_search.py @@ -5,6 +5,9 @@ from __future__ import annotations import time +from typing import Optional + +Path = list[tuple[int, int]] grid = [ [0, 0, 0, 0, 0, 0, 0], @@ -20,7 +23,9 @@ class Node: - def __init__(self, pos_x, pos_y, goal_x, goal_y, parent): + def __init__( + self, pos_x: int, pos_y: int, goal_x: int, goal_y: int, parent: Optional[Node] + ): self.pos_x = pos_x self.pos_y = pos_y self.pos = (pos_y, pos_x) @@ -45,14 +50,14 @@ class BreadthFirstSearch: (5, 1), (5, 2), (5, 3), (5, 4), (5, 5), (6, 5), (6, 6)] """ - def __init__(self, start, goal): + def __init__(self, start: tuple[int, int], goal: tuple[int, int]): self.start = Node(start[1], start[0], goal[1], goal[0], None) self.target = Node(goal[1], goal[0], goal[1], goal[0], None) self.node_queue = [self.start] self.reached = False - def search(self) -> list[tuple[int]]: + def search(self) -> Optional[Path]: while self.node_queue: current_node = self.node_queue.pop(0) @@ -65,8 +70,9 @@ def search(self) -> list[tuple[int]]: for node in successors: self.node_queue.append(node) - if not (self.reached): - return [(self.start.pos)] + if not self.reached: + return [self.start.pos] + return None def get_successors(self, parent: Node) -> list[Node]: """ @@ -87,7 +93,7 @@ def get_successors(self, parent: Node) -> list[Node]: ) return successors - def retrace_path(self, node: Node) -> list[tuple[int]]: + def retrace_path(self, node: Optional[Node]) -> Path: """ Retrace the path from parents to parents until start node """ @@ -119,7 +125,7 @@ def __init__(self, start, goal): self.bwd_bfs = BreadthFirstSearch(goal, start) self.reached = False - def search(self) -> list[tuple[int]]: + def search(self) -> Optional[Path]: while self.fwd_bfs.node_queue or self.bwd_bfs.node_queue: current_fwd_node = self.fwd_bfs.node_queue.pop(0) current_bwd_node = self.bwd_bfs.node_queue.pop(0) @@ -144,10 +150,9 @@ def search(self) -> list[tuple[int]]: if not self.reached: return [self.fwd_bfs.start.pos] + return None - def retrace_bidirectional_path( - self, fwd_node: Node, bwd_node: Node - ) -> list[tuple[int]]: + def retrace_bidirectional_path(self, fwd_node: Node, bwd_node: Node) -> Path: fwd_path = self.fwd_bfs.retrace_path(fwd_node) bwd_path = self.bwd_bfs.retrace_path(bwd_node) bwd_path.pop() From 2cbadc88ab1f3252364465c7660cdb96a377eace Mon Sep 17 00:00:00 2001 From: dpittaluga76 Date: Mon, 12 Jul 2021 03:16:31 -0300 Subject: [PATCH 0170/1543] Improves readability and processing time (#4510) * Removes overuse of lambdas, improves readability and processing time when it finds bitstring to print out. Removes overuse of lambdas, uses dictionary instead. This improves readability and processing time when it finds the bitstring to print out. * Update huffman.py --- compression/huffman.py | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/compression/huffman.py b/compression/huffman.py index 3a3cbfa4b0c6..b6cc4de1e8e6 100644 --- a/compression/huffman.py +++ b/compression/huffman.py @@ -5,7 +5,7 @@ class Letter: def __init__(self, letter, freq): self.letter = letter self.freq = freq - self.bitstring = "" + self.bitstring = {} def __repr__(self): return f"{self.letter}:{self.freq}" @@ -51,10 +51,10 @@ def build_tree(letters): def traverse_tree(root, bitstring): """ Recursively traverse the Huffman Tree to set each - Letter's bitstring, and return the list of Letters + Letter's bitstring dictionary, and return the list of Letters """ if type(root) is Letter: - root.bitstring = bitstring + root.bitstring[root.letter] = bitstring return [root] letters = [] letters += traverse_tree(root.left, bitstring + "0") @@ -65,20 +65,21 @@ def traverse_tree(root, bitstring): def huffman(file_path): """ Parse the file, build the tree, then run through the file - again, using the list of Letters to find and print out the + again, using the letters dictionary to find and print out the bitstring for each letter. """ letters_list = parse_file(file_path) root = build_tree(letters_list) - letters = traverse_tree(root, "") - print(f"Huffman Coding of {file_path}: ") + letters = { + k: v for letter in traverse_tree(root, "") for k, v in letter.bitstring.items() + } + print(f"Huffman Coding of {file_path}: ") with open(file_path) as f: while True: c = f.read(1) if not c: break - le = list(filter(lambda l: l.letter == c, letters))[0] - print(le.bitstring, end=" ") + print(letters[c], end=" ") print() From 7046fdcdc800badef84ee06f801386d62a3914e4 Mon Sep 17 00:00:00 2001 From: bum_fuzzle <72404701+bumfuzzle33@users.noreply.github.com> Date: Mon, 19 Jul 2021 19:36:43 +0530 Subject: [PATCH 0171/1543] fixed #4529 (#4547) fixed an indentation mistake --- hashes/sha1.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/hashes/sha1.py b/hashes/sha1.py index cca38b7c3fdc..dde1efc557bb 100644 --- a/hashes/sha1.py +++ b/hashes/sha1.py @@ -118,13 +118,13 @@ def final_hash(self): c, d, ) - self.h = ( - self.h[0] + a & 0xFFFFFFFF, - self.h[1] + b & 0xFFFFFFFF, - self.h[2] + c & 0xFFFFFFFF, - self.h[3] + d & 0xFFFFFFFF, - self.h[4] + e & 0xFFFFFFFF, - ) + self.h = ( + self.h[0] + a & 0xFFFFFFFF, + self.h[1] + b & 0xFFFFFFFF, + self.h[2] + c & 0xFFFFFFFF, + self.h[3] + d & 0xFFFFFFFF, + self.h[4] + e & 0xFFFFFFFF, + ) return "%08x%08x%08x%08x%08x" % tuple(self.h) From eca37b1537ce9df227e1c2a915b2841093b9c028 Mon Sep 17 00:00:00 2001 From: Lucifer <63491234+ashish-patwal@users.noreply.github.com> Date: Mon, 19 Jul 2021 21:10:18 +0530 Subject: [PATCH 0172/1543] Random anime character info (#4553) * fixed colons and spaces * fixed colons and spaces * random anime character python script * more tests passed * type hint updated Co-authored-by: Christian Clauss * type hint updated again Co-authored-by: Christian Clauss * Update random_anime_character.py Co-authored-by: Christian Clauss --- web_programming/random_anime_character.py | 37 +++++++++++++++++++++++ 1 file changed, 37 insertions(+) create mode 100644 web_programming/random_anime_character.py diff --git a/web_programming/random_anime_character.py b/web_programming/random_anime_character.py new file mode 100644 index 000000000000..f15a9c05d9e5 --- /dev/null +++ b/web_programming/random_anime_character.py @@ -0,0 +1,37 @@ +import os + +import requests +from bs4 import BeautifulSoup +from fake_useragent import UserAgent + +headers = {"UserAgent": UserAgent().random} +URL = "https://www.mywaifulist.moe/random" + + +def save_image(image_url: str, image_title: str) -> None: + """ + Saves the image of anime character + """ + image = requests.get(image_url, headers=headers) + with open(image_title, "wb") as file: + file.write(image.content) + + +def random_anime_character() -> tuple[str, str, str]: + """ + Returns the Title, Description, and Image Title of a random anime character . + """ + soup = BeautifulSoup(requests.get(URL, headers=headers).text, "html.parser") + title = soup.find("meta", attrs={"property": "og:title"}).attrs["content"] + image_url = soup.find("meta", attrs={"property": "og:image"}).attrs["content"] + description = soup.find("p", id="description").get_text() + _, image_extension = os.path.splitext(os.path.basename(image_url)) + image_title = title.strip().replace(" ", "_") + image_title = f"{image_title}{image_extension}" + save_image(image_url, image_title) + return (title, description, image_title) + + +if __name__ == "__main__": + title, desc, image_title = random_anime_character() + print(f"{title}\n\n{desc}\n\nImage saved : {image_title}") From 72aa4cc315ca0a70e09026884556cb724145d12e Mon Sep 17 00:00:00 2001 From: SURYAPRATAP SINGH SURYAVANSHI <67123991+suryapratapsinghsuryavanshi@users.noreply.github.com> Date: Tue, 20 Jul 2021 13:05:21 +0530 Subject: [PATCH 0173/1543] add phone_validator method (#4552) * add phone_validator method * change the phone_validator to indian_phone_validator * Unnecessary comments removed * all comments deleted * Fixes: #{} new line issue * code reformatted using black --- strings/indian_phone_validator.py | 28 ++++++++++++++++++++++++++++ 1 file changed, 28 insertions(+) create mode 100644 strings/indian_phone_validator.py diff --git a/strings/indian_phone_validator.py b/strings/indian_phone_validator.py new file mode 100644 index 000000000000..d544e92661b1 --- /dev/null +++ b/strings/indian_phone_validator.py @@ -0,0 +1,28 @@ +import re + + +def indian_phone_validator(phone: str) -> bool: + """ + Determine whether the string is a valid phone number or not + :param phone: + :return: Boolean + >>> indian_phone_validator("+91123456789") + False + >>> indian_phone_validator("+919876543210") + True + >>> indian_phone_validator("01234567896") + False + >>> indian_phone_validator("919876543218") + True + >>> indian_phone_validator("+91-1234567899") + False + """ + pat = re.compile(r"^(\+91[\-\s]?)?[0]?(91)?[789]\d{9}$") + match = re.search(pat, phone) + if match: + return match.string == phone + return False + + +if __name__ == "__main__": + print(indian_phone_validator("+918827897895")) From 4a2216b69a941b39ce279e475e383db44836df1d Mon Sep 17 00:00:00 2001 From: Hasanul Islam Date: Tue, 20 Jul 2021 13:36:14 +0600 Subject: [PATCH 0174/1543] Fix mypy errors at bidirectional_a_star (#4556) --- graphs/bidirectional_a_star.py | 42 +++++++++++++++++++--------------- 1 file changed, 23 insertions(+), 19 deletions(-) diff --git a/graphs/bidirectional_a_star.py b/graphs/bidirectional_a_star.py index 72ff4fa65ff0..729d8957bdef 100644 --- a/graphs/bidirectional_a_star.py +++ b/graphs/bidirectional_a_star.py @@ -8,6 +8,8 @@ from math import sqrt # 1 for manhattan, 0 for euclidean +from typing import Optional + HEURISTIC = 0 grid = [ @@ -22,6 +24,8 @@ delta = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right +TPosition = tuple[int, int] + class Node: """ @@ -39,7 +43,15 @@ class Node: True """ - def __init__(self, pos_x, pos_y, goal_x, goal_y, g_cost, parent): + def __init__( + self, + pos_x: int, + pos_y: int, + goal_x: int, + goal_y: int, + g_cost: int, + parent: Optional[Node], + ) -> None: self.pos_x = pos_x self.pos_y = pos_y self.pos = (pos_y, pos_x) @@ -61,7 +73,7 @@ def calculate_heuristic(self) -> float: else: return sqrt(dy ** 2 + dx ** 2) - def __lt__(self, other) -> bool: + def __lt__(self, other: Node) -> bool: return self.f_cost < other.f_cost @@ -81,23 +93,22 @@ class AStar: (4, 3), (4, 4), (5, 4), (5, 5), (6, 5), (6, 6)] """ - def __init__(self, start, goal): + def __init__(self, start: TPosition, goal: TPosition): self.start = Node(start[1], start[0], goal[1], goal[0], 0, None) self.target = Node(goal[1], goal[0], goal[1], goal[0], 99999, None) self.open_nodes = [self.start] - self.closed_nodes = [] + self.closed_nodes: list[Node] = [] self.reached = False - def search(self) -> list[tuple[int]]: + def search(self) -> list[TPosition]: while self.open_nodes: # Open Nodes are sorted using __lt__ self.open_nodes.sort() current_node = self.open_nodes.pop(0) if current_node.pos == self.target.pos: - self.reached = True return self.retrace_path(current_node) self.closed_nodes.append(current_node) @@ -118,8 +129,7 @@ def search(self) -> list[tuple[int]]: else: self.open_nodes.append(better_node) - if not (self.reached): - return [(self.start.pos)] + return [self.start.pos] def get_successors(self, parent: Node) -> list[Node]: """ @@ -147,7 +157,7 @@ def get_successors(self, parent: Node) -> list[Node]: ) return successors - def retrace_path(self, node: Node) -> list[tuple[int]]: + def retrace_path(self, node: Optional[Node]) -> list[TPosition]: """ Retrace the path from parents to parents until start node """ @@ -173,12 +183,12 @@ class BidirectionalAStar: (2, 5), (3, 5), (4, 5), (5, 5), (5, 6), (6, 6)] """ - def __init__(self, start, goal): + def __init__(self, start: TPosition, goal: TPosition) -> None: self.fwd_astar = AStar(start, goal) self.bwd_astar = AStar(goal, start) self.reached = False - def search(self) -> list[tuple[int]]: + def search(self) -> list[TPosition]: while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes: self.fwd_astar.open_nodes.sort() self.bwd_astar.open_nodes.sort() @@ -186,7 +196,6 @@ def search(self) -> list[tuple[int]]: current_bwd_node = self.bwd_astar.open_nodes.pop(0) if current_bwd_node.pos == current_fwd_node.pos: - self.reached = True return self.retrace_bidirectional_path( current_fwd_node, current_bwd_node ) @@ -220,12 +229,11 @@ def search(self) -> list[tuple[int]]: else: astar.open_nodes.append(better_node) - if not self.reached: - return [self.fwd_astar.start.pos] + return [self.fwd_astar.start.pos] def retrace_bidirectional_path( self, fwd_node: Node, bwd_node: Node - ) -> list[tuple[int]]: + ) -> list[TPosition]: fwd_path = self.fwd_astar.retrace_path(fwd_node) bwd_path = self.bwd_astar.retrace_path(bwd_node) bwd_path.pop() @@ -236,9 +244,6 @@ def retrace_bidirectional_path( if __name__ == "__main__": # all coordinates are given in format [y,x] - import doctest - - doctest.testmod() init = (0, 0) goal = (len(grid) - 1, len(grid[0]) - 1) for elem in grid: @@ -252,6 +257,5 @@ def retrace_bidirectional_path( bd_start_time = time.time() bidir_astar = BidirectionalAStar(init, goal) - path = bidir_astar.search() bd_end_time = time.time() - bd_start_time print(f"BidirectionalAStar execution time = {bd_end_time:f} seconds") From bc09ba9abfa220c893b23969a4b8de05f5ced1e1 Mon Sep 17 00:00:00 2001 From: Hasanul Islam Date: Tue, 20 Jul 2021 17:24:27 +0600 Subject: [PATCH 0175/1543] Fix mypy errors at graph_list (#4557) --- graphs/graph_list.py | 27 +++++++++++++++++---------- 1 file changed, 17 insertions(+), 10 deletions(-) diff --git a/graphs/graph_list.py b/graphs/graph_list.py index bab6d6893a89..f04b7a92390d 100644 --- a/graphs/graph_list.py +++ b/graphs/graph_list.py @@ -3,11 +3,15 @@ # Author: OMKAR PATHAK, Nwachukwu Chidiebere # Use a Python dictionary to construct the graph. +from __future__ import annotations from pprint import pformat +from typing import Generic, TypeVar +T = TypeVar("T") -class GraphAdjacencyList: + +class GraphAdjacencyList(Generic[T]): """ Adjacency List type Graph Data Structure that accounts for directed and undirected Graphs. Initialize graph object indicating whether it's directed or undirected. @@ -59,18 +63,27 @@ class GraphAdjacencyList: 5: [1, 4], 6: [2], 7: [2]} + >>> char_graph = GraphAdjacencyList(directed=False) + >>> char_graph.add_edge('a', 'b') + {'a': ['b'], 'b': ['a']} + >>> char_graph.add_edge('b', 'c').add_edge('b', 'e').add_edge('b', 'f') + {'a': ['b'], 'b': ['a', 'c', 'e', 'f'], 'c': ['b'], 'e': ['b'], 'f': ['b']} + >>> print(char_graph) + {'a': ['b'], 'b': ['a', 'c', 'e', 'f'], 'c': ['b'], 'e': ['b'], 'f': ['b']} """ - def __init__(self, directed: bool = True): + def __init__(self, directed: bool = True) -> None: """ Parameters: directed: (bool) Indicates if graph is directed or undirected. Default is True. """ - self.adj_list = {} # dictionary of lists + self.adj_list: dict[T, list[T]] = {} # dictionary of lists self.directed = directed - def add_edge(self, source_vertex: int, destination_vertex: int) -> object: + def add_edge( + self, source_vertex: T, destination_vertex: T + ) -> GraphAdjacencyList[T]: """ Connects vertices together. Creates and Edge from source vertex to destination vertex. @@ -135,9 +148,3 @@ def add_edge(self, source_vertex: int, destination_vertex: int) -> object: def __repr__(self) -> str: return pformat(self.adj_list) - - -if __name__ == "__main__": - import doctest - - doctest.testmod() From 7342b336587af1e57eb9888203a1ae80832bde28 Mon Sep 17 00:00:00 2001 From: Hasanul Islam Date: Wed, 21 Jul 2021 11:59:18 +0600 Subject: [PATCH 0176/1543] Fix mypy erros at strongly connected component (#4558) --- graphs/strongly_connected_components.py | 18 ++++++++---------- 1 file changed, 8 insertions(+), 10 deletions(-) diff --git a/graphs/strongly_connected_components.py b/graphs/strongly_connected_components.py index d469df0c625b..325e5c1f33a3 100644 --- a/graphs/strongly_connected_components.py +++ b/graphs/strongly_connected_components.py @@ -10,7 +10,9 @@ test_graph_2 = {0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]} -def topology_sort(graph: dict, vert: int, visited: list) -> list: +def topology_sort( + graph: dict[int, list[int]], vert: int, visited: list[bool] +) -> list[int]: """ Use depth first search to sort graph At this time graph is the same as input @@ -32,7 +34,9 @@ def topology_sort(graph: dict, vert: int, visited: list) -> list: return order -def find_components(reversed_graph: dict, vert: int, visited: list) -> list: +def find_components( + reversed_graph: dict[int, list[int]], vert: int, visited: list[bool] +) -> list[int]: """ Use depth first search to find strongliy connected vertices. Now graph is reversed @@ -52,7 +56,7 @@ def find_components(reversed_graph: dict, vert: int, visited: list) -> list: return component -def strongly_connected_components(graph: dict) -> list: +def strongly_connected_components(graph: dict[int, list[int]]) -> list[list[int]]: """ This function takes graph as a parameter and then returns the list of strongly connected components @@ -63,7 +67,7 @@ def strongly_connected_components(graph: dict) -> list: """ visited = len(graph) * [False] - reversed_graph = {vert: [] for vert in range(len(graph))} + reversed_graph: dict[int, list[int]] = {vert: [] for vert in range(len(graph))} for vert, neighbours in graph.items(): for neighbour in neighbours: @@ -84,9 +88,3 @@ def strongly_connected_components(graph: dict) -> list: components_list.append(component) return components_list - - -if __name__ == "__main__": - import doctest - - doctest.testmod() From 407c97906393ddaea43e2c21174c6cdaeb57dcfa Mon Sep 17 00:00:00 2001 From: Suyash Shrivastava <65887107+suyash2796@users.noreply.github.com> Date: Wed, 21 Jul 2021 11:31:55 +0530 Subject: [PATCH 0177/1543] [Mypy fix] fix secant method (#4501) * case switch using python * review comments * added type hints * general code format * [mypy] Fix type annotations for secant_method.py * remove bad push --- arithmetic_analysis/secant_method.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arithmetic_analysis/secant_method.py b/arithmetic_analysis/secant_method.py index 7eb1dd8f5c6b..45bcb185fc3e 100644 --- a/arithmetic_analysis/secant_method.py +++ b/arithmetic_analysis/secant_method.py @@ -26,4 +26,4 @@ def secant_method(lower_bound: float, upper_bound: float, repeats: int) -> float if __name__ == "__main__": - print(f"Example: {secant_method(1, 3, 2) = }") + print(f"Example: {secant_method(1, 3, 2)}") From 7634cf0d60d15986456e35d82d3f3eb1c6a53c26 Mon Sep 17 00:00:00 2001 From: Hasanul Islam Date: Mon, 26 Jul 2021 18:45:40 +0600 Subject: [PATCH 0178/1543] Fix mypy errors at gale_shapely_bigraph (#4568) --- graphs/gale_shapley_bigraph.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/graphs/gale_shapley_bigraph.py b/graphs/gale_shapley_bigraph.py index 59baf8296ea6..56b8c6c77bcb 100644 --- a/graphs/gale_shapley_bigraph.py +++ b/graphs/gale_shapley_bigraph.py @@ -1,7 +1,9 @@ from __future__ import annotations -def stable_matching(donor_pref: list[int], recipient_pref: list[int]) -> list[int]: +def stable_matching( + donor_pref: list[list[int]], recipient_pref: list[list[int]] +) -> list[int]: """ Finds the stable match in any bipartite graph, i.e a pairing where no 2 objects prefer each other over their partner. The function accepts the preferences of @@ -19,11 +21,13 @@ def stable_matching(donor_pref: list[int], recipient_pref: list[int]) -> list[in [1, 2, 3, 0] """ assert len(donor_pref) == len(recipient_pref) + n = len(donor_pref) unmatched_donors = list(range(n)) donor_record = [-1] * n # who the donor has donated to rec_record = [-1] * n # who the recipient has received from num_donations = [0] * n + while unmatched_donors: donor = unmatched_donors[0] donor_preference = donor_pref[donor] @@ -31,6 +35,7 @@ def stable_matching(donor_pref: list[int], recipient_pref: list[int]) -> list[in num_donations[donor] += 1 rec_preference = recipient_pref[recipient] prev_donor = rec_record[recipient] + if prev_donor != -1: if rec_preference.index(prev_donor) > rec_preference.index(donor): rec_record[recipient] = donor From 6732fa013119aa2003f24d1dac1bdc4ee435ee5e Mon Sep 17 00:00:00 2001 From: arfy slowy Date: Mon, 26 Jul 2021 19:52:52 +0700 Subject: [PATCH 0179/1543] [fixed] module 'numpy' is imported with both 'import' and 'import from' (#4544) * [fixed] module 'numy' is imported with both 'import' and 'import from' * remove commented --- arithmetic_analysis/lu_decomposition.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/arithmetic_analysis/lu_decomposition.py b/arithmetic_analysis/lu_decomposition.py index ef37d1b7b4ef..5bb631758c21 100644 --- a/arithmetic_analysis/lu_decomposition.py +++ b/arithmetic_analysis/lu_decomposition.py @@ -6,10 +6,9 @@ from typing import Tuple import numpy as np -from numpy import ndarray -def lower_upper_decomposition(table: ndarray) -> Tuple[ndarray, ndarray]: +def lower_upper_decomposition(table: np.ndarray) -> Tuple[np.ndarray, np.ndarray]: """Lower-Upper (LU) Decomposition Example: From c5003a2c462c7c775992ad9a733c360126702dd4 Mon Sep 17 00:00:00 2001 From: Hasanul Islam Date: Tue, 27 Jul 2021 14:09:17 +0600 Subject: [PATCH 0180/1543] Fix mypy errors at bfs_shortest_path algo (#4572) --- graphs/breadth_first_search_shortest_path.py | 28 +++++++++++--------- 1 file changed, 16 insertions(+), 12 deletions(-) diff --git a/graphs/breadth_first_search_shortest_path.py b/graphs/breadth_first_search_shortest_path.py index b43479d4659c..48f8ab1a4956 100644 --- a/graphs/breadth_first_search_shortest_path.py +++ b/graphs/breadth_first_search_shortest_path.py @@ -3,6 +3,8 @@ """ from __future__ import annotations +from typing import Optional + graph = { "A": ["B", "C", "E"], "B": ["A", "D", "E"], @@ -15,17 +17,19 @@ class Graph: - def __init__(self, graph: dict[str, str], source_vertex: str) -> None: - """Graph is implemented as dictionary of adjacency lists. Also, + def __init__(self, graph: dict[str, list[str]], source_vertex: str) -> None: + """ + Graph is implemented as dictionary of adjacency lists. Also, Source vertex have to be defined upon initialization. """ self.graph = graph # mapping node to its parent in resulting breadth first tree - self.parent = {} + self.parent: dict[str, Optional[str]] = {} self.source_vertex = source_vertex def breath_first_search(self) -> None: - """This function is a helper for running breath first search on this graph. + """ + This function is a helper for running breath first search on this graph. >>> g = Graph(graph, "G") >>> g.breath_first_search() >>> g.parent @@ -44,7 +48,8 @@ def breath_first_search(self) -> None: queue.append(adjacent_vertex) def shortest_path(self, target_vertex: str) -> str: - """This shortest path function returns a string, describing the result: + """ + This shortest path function returns a string, describing the result: 1.) No path is found. The string is a human readable message to indicate this. 2.) The shortest path is found. The string is in the form `v1(->v2->v3->...->vn)`, where v1 is the source vertex and vn is the target @@ -64,17 +69,16 @@ def shortest_path(self, target_vertex: str) -> str: 'G' """ if target_vertex == self.source_vertex: - return f"{self.source_vertex}" - elif not self.parent.get(target_vertex): + return self.source_vertex + + target_vertex_parent = self.parent.get(target_vertex) + if target_vertex_parent is None: return f"No path from vertex:{self.source_vertex} to vertex:{target_vertex}" - else: - return self.shortest_path(self.parent[target_vertex]) + f"->{target_vertex}" + return self.shortest_path(target_vertex_parent) + f"->{target_vertex}" -if __name__ == "__main__": - import doctest - doctest.testmod() +if __name__ == "__main__": g = Graph(graph, "G") g.breath_first_search() print(g.shortest_path("D")) From a4b7d12262d41969cf1fc9d5dbf9b9a01f165f4c Mon Sep 17 00:00:00 2001 From: Hasanul Islam Date: Tue, 27 Jul 2021 17:21:00 +0600 Subject: [PATCH 0181/1543] Fix mypy errors at greedy best first algo (#4575) --- graphs/greedy_best_first.py | 35 ++++++++++++++++++++++++----------- 1 file changed, 24 insertions(+), 11 deletions(-) diff --git a/graphs/greedy_best_first.py b/graphs/greedy_best_first.py index 4b80a6853d3f..d5e80247a9b4 100644 --- a/graphs/greedy_best_first.py +++ b/graphs/greedy_best_first.py @@ -4,6 +4,10 @@ from __future__ import annotations +from typing import Optional + +Path = list[tuple[int, int]] + grid = [ [0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles @@ -33,7 +37,15 @@ class Node: True """ - def __init__(self, pos_x, pos_y, goal_x, goal_y, g_cost, parent): + def __init__( + self, + pos_x: int, + pos_y: int, + goal_x: int, + goal_y: int, + g_cost: float, + parent: Optional[Node], + ): self.pos_x = pos_x self.pos_y = pos_y self.pos = (pos_y, pos_x) @@ -72,16 +84,16 @@ class GreedyBestFirst: (6, 2), (6, 3), (5, 3), (5, 4), (5, 5), (6, 5), (6, 6)] """ - def __init__(self, start, goal): + def __init__(self, start: tuple[int, int], goal: tuple[int, int]): self.start = Node(start[1], start[0], goal[1], goal[0], 0, None) self.target = Node(goal[1], goal[0], goal[1], goal[0], 99999, None) self.open_nodes = [self.start] - self.closed_nodes = [] + self.closed_nodes: list[Node] = [] self.reached = False - def search(self) -> list[tuple[int]]: + def search(self) -> Optional[Path]: """ Search for the path, if a path is not found, only the starting position is returned @@ -113,8 +125,9 @@ def search(self) -> list[tuple[int]]: else: self.open_nodes.append(better_node) - if not (self.reached): + if not self.reached: return [self.start.pos] + return None def get_successors(self, parent: Node) -> list[Node]: """ @@ -143,7 +156,7 @@ def get_successors(self, parent: Node) -> list[Node]: ) return successors - def retrace_path(self, node: Node) -> list[tuple[int]]: + def retrace_path(self, node: Optional[Node]) -> Path: """ Retrace the path from parents to parents until start node """ @@ -166,9 +179,9 @@ def retrace_path(self, node: Node) -> list[tuple[int]]: greedy_bf = GreedyBestFirst(init, goal) path = greedy_bf.search() + if path: + for pos_x, pos_y in path: + grid[pos_x][pos_y] = 2 - for elem in path: - grid[elem[0]][elem[1]] = 2 - - for elem in grid: - print(elem) + for elem in grid: + print(elem) From 40d85d54433eccff19b4434c7073f2a7b6127426 Mon Sep 17 00:00:00 2001 From: Milton Chandro Bhowmick Date: Wed, 28 Jul 2021 16:50:21 +0600 Subject: [PATCH 0182/1543] Modified the a_star [dot] py for making readable (#4576) --- graphs/a_star.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/graphs/a_star.py b/graphs/a_star.py index cb5b2fcd16e8..d3657cb19540 100644 --- a/graphs/a_star.py +++ b/graphs/a_star.py @@ -44,7 +44,7 @@ def search(grid, init, goal, cost, heuristic): x = init[0] y = init[1] g = 0 - f = g + heuristic[init[0]][init[0]] + f = g + heuristic[x][y] # cost from starting cell to destination cell cell = [[f, g, x, y]] found = False # flag that is set when search is complete From a5bcf0f6749a93a44f7a981edc9b0e35fbd066f2 Mon Sep 17 00:00:00 2001 From: Hasanul Islam Date: Thu, 29 Jul 2021 19:14:35 +0600 Subject: [PATCH 0183/1543] Fix mypy errors at even_tree algo (#4579) --- graphs/even_tree.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/graphs/even_tree.py b/graphs/even_tree.py index c9aef6e7861f..92ffb4b232f7 100644 --- a/graphs/even_tree.py +++ b/graphs/even_tree.py @@ -16,12 +16,12 @@ from collections import defaultdict -def dfs(start): +def dfs(start: int) -> int: """DFS traversal""" # pylint: disable=redefined-outer-name ret = 1 visited[start] = True - for v in tree.get(start): + for v in tree[start]: if v not in visited: ret += dfs(v) if ret % 2 == 0: @@ -48,8 +48,8 @@ def even_tree(): if __name__ == "__main__": n, m = 10, 9 tree = defaultdict(list) - visited = {} - cuts = [] + visited: dict[int, bool] = {} + cuts: list[int] = [] count = 0 edges = [(2, 1), (3, 1), (4, 3), (5, 2), (6, 1), (7, 2), (8, 6), (9, 8), (10, 8)] for u, v in edges: From da71184b04837d2bc934f9947b4c262da096f349 Mon Sep 17 00:00:00 2001 From: Hasanul Islam Date: Mon, 2 Aug 2021 18:40:48 +0600 Subject: [PATCH 0184/1543] Fix mypy errors at mst_kruskal (#4581) --- graphs/minimum_spanning_tree_kruskal.py | 15 +++++++-------- graphs/tests/test_min_spanning_tree_kruskal.py | 4 ++-- 2 files changed, 9 insertions(+), 10 deletions(-) diff --git a/graphs/minimum_spanning_tree_kruskal.py b/graphs/minimum_spanning_tree_kruskal.py index a51f970341f7..f21a87a7d534 100644 --- a/graphs/minimum_spanning_tree_kruskal.py +++ b/graphs/minimum_spanning_tree_kruskal.py @@ -1,15 +1,14 @@ -from typing import List, Tuple - - -def kruskal(num_nodes: int, num_edges: int, edges: List[Tuple[int, int, int]]) -> int: +def kruskal( + num_nodes: int, edges: list[tuple[int, int, int]] +) -> list[tuple[int, int, int]]: """ - >>> kruskal(4, 3, [(0, 1, 3), (1, 2, 5), (2, 3, 1)]) + >>> kruskal(4, [(0, 1, 3), (1, 2, 5), (2, 3, 1)]) [(2, 3, 1), (0, 1, 3), (1, 2, 5)] - >>> kruskal(4, 5, [(0, 1, 3), (1, 2, 5), (2, 3, 1), (0, 2, 1), (0, 3, 2)]) + >>> kruskal(4, [(0, 1, 3), (1, 2, 5), (2, 3, 1), (0, 2, 1), (0, 3, 2)]) [(2, 3, 1), (0, 2, 1), (0, 1, 3)] - >>> kruskal(4, 6, [(0, 1, 3), (1, 2, 5), (2, 3, 1), (0, 2, 1), (0, 3, 2), + >>> kruskal(4, [(0, 1, 3), (1, 2, 5), (2, 3, 1), (0, 2, 1), (0, 3, 2), ... (2, 1, 1)]) [(2, 3, 1), (0, 2, 1), (2, 1, 1)] """ @@ -44,4 +43,4 @@ def find_parent(i): node1, node2, cost = [int(x) for x in input().strip().split()] edges.append((node1, node2, cost)) - kruskal(num_nodes, num_edges, edges) + kruskal(num_nodes, edges) diff --git a/graphs/tests/test_min_spanning_tree_kruskal.py b/graphs/tests/test_min_spanning_tree_kruskal.py index 3a527aef384f..d6df242ec6d1 100644 --- a/graphs/tests/test_min_spanning_tree_kruskal.py +++ b/graphs/tests/test_min_spanning_tree_kruskal.py @@ -2,7 +2,7 @@ def test_kruskal_successful_result(): - num_nodes, num_edges = 9, 14 + num_nodes = 9 edges = [ [0, 1, 4], [0, 7, 8], @@ -20,7 +20,7 @@ def test_kruskal_successful_result(): [1, 7, 11], ] - result = kruskal(num_nodes, num_edges, edges) + result = kruskal(num_nodes, edges) expected = [ [7, 6, 1], From 5957eabd3e0c92650dba0962779b8729d2875209 Mon Sep 17 00:00:00 2001 From: jonabtc <39396756+jonabtc@users.noreply.github.com> Date: Tue, 3 Aug 2021 01:03:22 -0500 Subject: [PATCH 0185/1543] Adding the double factorial algorithm (#4550) --- maths/double_factorial_recursive.py | 31 +++++++++++++++++++++++++++++ 1 file changed, 31 insertions(+) create mode 100644 maths/double_factorial_recursive.py diff --git a/maths/double_factorial_recursive.py b/maths/double_factorial_recursive.py new file mode 100644 index 000000000000..05c9b29680a7 --- /dev/null +++ b/maths/double_factorial_recursive.py @@ -0,0 +1,31 @@ +def double_factorial(n: int) -> int: + """ + Compute double factorial using recursive method. + Recursion can be costly for large numbers. + + To learn about the theory behind this algorithm: + https://en.wikipedia.org/wiki/Double_factorial + + >>> import math + >>> all(double_factorial(i) == math.prod(range(i, 0, -2)) for i in range(20)) + True + >>> double_factorial(0.1) + Traceback (most recent call last): + ... + ValueError: double_factorial() only accepts integral values + >>> double_factorial(-1) + Traceback (most recent call last): + ... + ValueError: double_factorial() not defined for negative values + """ + if not isinstance(n, int): + raise ValueError("double_factorial() only accepts integral values") + if n < 0: + raise ValueError("double_factorial() not defined for negative values") + return 1 if n <= 1 else n * double_factorial(n - 2) + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From f432bc76a6a8851b8bd2f29a45761a4ec538561f Mon Sep 17 00:00:00 2001 From: SURYAPRATAP SINGH SURYAVANSHI <67123991+suryapratapsinghsuryavanshi@users.noreply.github.com> Date: Fri, 6 Aug 2021 15:45:42 +0530 Subject: [PATCH 0186/1543] add alternative_string_arrange method (#4595) * add alternative_string_arrange method * fix issue * fix one more issue * changed the variable name li to output_list --- strings/alternative_string_arrange.py | 31 +++++++++++++++++++++++++++ 1 file changed, 31 insertions(+) create mode 100644 strings/alternative_string_arrange.py diff --git a/strings/alternative_string_arrange.py b/strings/alternative_string_arrange.py new file mode 100644 index 000000000000..d81ddd8a1574 --- /dev/null +++ b/strings/alternative_string_arrange.py @@ -0,0 +1,31 @@ +def alternative_string_arrange(first_str: str, second_str: str) -> str: + """ + Return the alternative arrangements of the two strings. + :param first_str: + :param second_str: + :return: String + >>> alternative_string_arrange("ABCD", "XY") + 'AXBYCD' + >>> alternative_string_arrange("XY", "ABCD") + 'XAYBCD' + >>> alternative_string_arrange("AB", "XYZ") + 'AXBYZ' + >>> alternative_string_arrange("ABC", "") + 'ABC' + """ + first_str_length: int = len(first_str) + second_str_length: int = len(second_str) + abs_length: int = ( + first_str_length if first_str_length > second_str_length else second_str_length + ) + output_list: list = [] + for char_count in range(abs_length): + if char_count < first_str_length: + output_list.append(first_str[char_count]) + if char_count < second_str_length: + output_list.append(second_str[char_count]) + return "".join(output_list) + + +if __name__ == "__main__": + print(alternative_string_arrange("AB", "XYZ"), end=" ") From 63ac09eeae6805ae25a3f994cb18e5df254ba4b6 Mon Sep 17 00:00:00 2001 From: Shubham Ganar <67952129+shubhamsg199@users.noreply.github.com> Date: Sun, 8 Aug 2021 23:51:26 +0530 Subject: [PATCH 0187/1543] Created check_valid_ip_address.py (#4602) * Created check_valid_ip_address.py * fixed typos error Co-authored-by: root --- maths/check_valid_ip_address.py | 46 +++++++++++++++++++++++++++++++++ 1 file changed, 46 insertions(+) create mode 100644 maths/check_valid_ip_address.py diff --git a/maths/check_valid_ip_address.py b/maths/check_valid_ip_address.py new file mode 100644 index 000000000000..6e8d35ebc44c --- /dev/null +++ b/maths/check_valid_ip_address.py @@ -0,0 +1,46 @@ +""" +Checking valid Ip Address. +A valid IP address must be in the form of A.B.C.D, +where A,B,C and D are numbers from 0-254 +for example: 192.168.23.1, 172.254.254.254 are valid IP address + 192.168.255.0, 255.192.3.121 are Invalid IP address +""" + + +def check_valid_ip(ip: str) -> bool: + """ + print "Valid IP address" If IP is valid. + or + print "Invalid IP address" If IP is Invalid. + + >>> check_valid_ip("192.168.0.23") + True + + >>> check_valid_ip("192.255.15.8") + False + + >>> check_valid_ip("172.100.0.8") + True + + >>> check_valid_ip("254.255.0.255") + False + """ + ip1 = ip.replace(".", " ") + list1 = [int(i) for i in ip1.split() if i.isdigit()] + count = 0 + for i in list1: + if i > 254: + count += 1 + break + if count: + return False + return True + + +if __name__ == "__main__": + ip = input() + output = check_valid_ip(ip) + if output is True: + print(f"{ip} is a Valid IP address") + else: + print(f"{ip} is an Invalid IP address") From d668c172b07bf9f54d63dc295016a96ec782a541 Mon Sep 17 00:00:00 2001 From: Hasanul Islam Date: Thu, 12 Aug 2021 02:48:53 +0600 Subject: [PATCH 0188/1543] Refactor graph_initialization at basic_graph.py (#4601) --- graphs/basic_graphs.py | 97 +++++++++++++++++++++++++++--------------- 1 file changed, 62 insertions(+), 35 deletions(-) diff --git a/graphs/basic_graphs.py b/graphs/basic_graphs.py index 0f73d8d07b2a..9cd6dd0f9635 100644 --- a/graphs/basic_graphs.py +++ b/graphs/basic_graphs.py @@ -1,42 +1,69 @@ from collections import deque + +def _input(message): + return input(message).strip().split(" ") + + +def initialize_unweighted_directed_graph( + node_count: int, edge_count: int +) -> dict[int, list[int]]: + graph: dict[int, list[int]] = {} + for i in range(node_count): + graph[i + 1] = [] + + for e in range(edge_count): + x, y = [int(i) for i in _input(f"Edge {e + 1}: ")] + graph[x].append(y) + return graph + + +def initialize_unweighted_undirected_graph( + node_count: int, edge_count: int +) -> dict[int, list[int]]: + graph: dict[int, list[int]] = {} + for i in range(node_count): + graph[i + 1] = [] + + for e in range(edge_count): + x, y = [int(i) for i in _input(f"Edge {e + 1}: ")] + graph[x].append(y) + graph[y].append(x) + return graph + + +def initialize_weighted_undirected_graph( + node_count: int, edge_count: int +) -> dict[int, list[tuple[int, int]]]: + graph: dict[int, list[tuple[int, int]]] = {} + for i in range(node_count): + graph[i + 1] = [] + + for e in range(edge_count): + x, y, w = [int(i) for i in _input(f"Edge {e + 1}: ")] + graph[x].append((y, w)) + graph[y].append((x, w)) + return graph + + if __name__ == "__main__": - # Accept No. of Nodes and edges - n, m = map(int, input().split(" ")) + n, m = [int(i) for i in _input("Number of nodes and edges: ")] + + graph_choice = int( + _input( + "Press 1 or 2 or 3 \n" + "1. Unweighted directed \n" + "2. Unweighted undirected \n" + "3. Weighted undirected \n" + )[0] + ) + + g = { + 1: initialize_unweighted_directed_graph, + 2: initialize_unweighted_undirected_graph, + 3: initialize_weighted_undirected_graph, + }[graph_choice](n, m) - # Initialising Dictionary of edges - g = {} - for i in range(n): - g[i + 1] = [] - - """ - ---------------------------------------------------------------------------- - Accepting edges of Unweighted Directed Graphs - ---------------------------------------------------------------------------- - """ - for _ in range(m): - x, y = map(int, input().strip().split(" ")) - g[x].append(y) - - """ - ---------------------------------------------------------------------------- - Accepting edges of Unweighted Undirected Graphs - ---------------------------------------------------------------------------- - """ - for _ in range(m): - x, y = map(int, input().strip().split(" ")) - g[x].append(y) - g[y].append(x) - - """ - ---------------------------------------------------------------------------- - Accepting edges of Weighted Undirected Graphs - ---------------------------------------------------------------------------- - """ - for _ in range(m): - x, y, r = map(int, input().strip().split(" ")) - g[x].append([y, r]) - g[y].append([x, r]) """ -------------------------------------------------------------------------------- From cd987372e4c3a9f87d65b757ab46a48527fc9fa9 Mon Sep 17 00:00:00 2001 From: Hasanul Islam Date: Fri, 13 Aug 2021 13:10:24 +0600 Subject: [PATCH 0189/1543] Fix multi heuristic astar algo (#4612) --- graphs/multi_heuristic_astar.py | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) diff --git a/graphs/multi_heuristic_astar.py b/graphs/multi_heuristic_astar.py index 77ca5760d5f0..8607f51d8f52 100644 --- a/graphs/multi_heuristic_astar.py +++ b/graphs/multi_heuristic_astar.py @@ -2,6 +2,8 @@ import numpy as np +TPos = tuple[int, int] + class PriorityQueue: def __init__(self): @@ -53,24 +55,24 @@ def get(self): return (priority, item) -def consistent_heuristic(P, goal): +def consistent_heuristic(P: TPos, goal: TPos): # euclidean distance a = np.array(P) b = np.array(goal) return np.linalg.norm(a - b) -def heuristic_2(P, goal): +def heuristic_2(P: TPos, goal: TPos): # integer division by time variable return consistent_heuristic(P, goal) // t -def heuristic_1(P, goal): +def heuristic_1(P: TPos, goal: TPos): # manhattan distance return abs(P[0] - goal[0]) + abs(P[1] - goal[1]) -def key(start, i, goal, g_function): +def key(start: TPos, i: int, goal: TPos, g_function: dict[TPos, float]): ans = g_function[start] + W1 * heuristics[i](start, goal) return ans @@ -117,7 +119,7 @@ def do_something(back_pointer, goal, start): quit() -def valid(p): +def valid(p: TPos): if p[0] < 0 or p[0] > n - 1: return False if p[1] < 0 or p[1] > n - 1: @@ -215,7 +217,6 @@ def make_common_ground(): (18, 1), (19, 1), ] -blocks_no = [] blocks_all = make_common_ground() @@ -233,7 +234,7 @@ def make_common_ground(): t = 1 -def multi_a_star(start, goal, n_heuristic): +def multi_a_star(start: TPos, goal: TPos, n_heuristic: int): g_function = {start: 0, goal: float("inf")} back_pointer = {start: -1, goal: -1} open_list = [] @@ -243,8 +244,8 @@ def multi_a_star(start, goal, n_heuristic): open_list.append(PriorityQueue()) open_list[i].put(start, key(start, i, goal, g_function)) - close_list_anchor = [] - close_list_inad = [] + close_list_anchor: list[int] = [] + close_list_inad: list[int] = [] while open_list[0].minkey() < float("inf"): for i in range(1, n_heuristic): # print(open_list[0].minkey(), open_list[i].minkey()) From 3c225247b843233a306a94907f862bece6e637dc Mon Sep 17 00:00:00 2001 From: Shubham Ganar <67952129+shubhamsg199@users.noreply.github.com> Date: Fri, 13 Aug 2021 12:40:52 +0530 Subject: [PATCH 0190/1543] [mypy] Fix type annotations for strings/naive_string_search.py (#4611) --- strings/naive_string_search.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/strings/naive_string_search.py b/strings/naive_string_search.py index f28950264121..31599008720c 100644 --- a/strings/naive_string_search.py +++ b/strings/naive_string_search.py @@ -1,10 +1,8 @@ """ https://en.wikipedia.org/wiki/String-searching_algorithm#Na%C3%AFve_string_search - this algorithm tries to find the pattern from every position of the mainString if pattern is found from position i it add it to the answer and does the same for position i+1 - Complexity : O(n*m) n=length of main string m=length of pattern string @@ -39,4 +37,4 @@ def naive_pattern_search(s: str, pattern: str) -> list: if __name__ == "__main__": assert naive_pattern_search("ABCDEFG", "DE") == [3] - print(f"{naive_pattern_search('ABAAABCDBBABCDDEBCABC', 'ABC') = }") + print(naive_pattern_search("ABAAABCDBBABCDDEBCABC", "ABC")) From 032999f36ed6eef61752e6bc5e399020988b06bd Mon Sep 17 00:00:00 2001 From: Bonnie <58572137+bonbon99@users.noreply.github.com> Date: Sun, 15 Aug 2021 01:43:05 -0400 Subject: [PATCH 0191/1543] Create exchange_sort.py (#4600) * Create exchange_sort.py added exchange sort * Fixed doctest in exchange_sort.py * Fixed formatting error and added new length variable added empty line at end of exchange_sort.py and turned len(numbers) into a variable * Fixed formatting errors with black added empty line --- sorts/exchange_sort.py | 27 +++++++++++++++++++++++++++ 1 file changed, 27 insertions(+) create mode 100644 sorts/exchange_sort.py diff --git a/sorts/exchange_sort.py b/sorts/exchange_sort.py new file mode 100644 index 000000000000..1ce78a9dc0cb --- /dev/null +++ b/sorts/exchange_sort.py @@ -0,0 +1,27 @@ +def exchange_sort(numbers: list[int]) -> list[int]: + """ + Uses exchange sort to sort a list of numbers. + Source: https://en.wikipedia.org/wiki/Sorting_algorithm#Exchange_sort + >>> exchange_sort([5, 4, 3, 2, 1]) + [1, 2, 3, 4, 5] + >>> exchange_sort([-1, -2, -3]) + [-3, -2, -1] + >>> exchange_sort([1, 2, 3, 4, 5]) + [1, 2, 3, 4, 5] + >>> exchange_sort([0, 10, -2, 5, 3]) + [-2, 0, 3, 5, 10] + >>> exchange_sort([]) + [] + """ + numbers_length = len(numbers) + for i in range(numbers_length): + for j in range(i + 1, numbers_length): + if numbers[j] < numbers[i]: + numbers[i], numbers[j] = numbers[j], numbers[i] + return numbers + + +if __name__ == "__main__": + user_input = input("Enter numbers separated by a comma:\n").strip() + unsorted = [int(item) for item in user_input.split(",")] + print(exchange_sort(unsorted)) From d009cea391414bfef17520ba6b64e4c2d97163ed Mon Sep 17 00:00:00 2001 From: imp Date: Mon, 16 Aug 2021 03:15:53 +0800 Subject: [PATCH 0192/1543] Fix mypy error at maths (#4613) * Fix mypy errors for maths/greedy_coin_change.py * Fix mypy errors for maths/two_sum.py * Fix mypy errors for maths/triplet_sum.py * Fix the format of maths/greedy_coin_change.py * Fix the format of maths/greedy_coin_change.py * Fix format with pre-commit --- maths/greedy_coin_change.py | 4 ++-- maths/triplet_sum.py | 2 +- maths/two_sum.py | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/maths/greedy_coin_change.py b/maths/greedy_coin_change.py index 5a7d9e8d84ae..5233ee1cbc12 100644 --- a/maths/greedy_coin_change.py +++ b/maths/greedy_coin_change.py @@ -41,7 +41,7 @@ """ -def find_minimum_change(denominations: list[int], value: int) -> list[int]: +def find_minimum_change(denominations: list[int], value: str) -> list[int]: """ Find the minimum change from the given denominations and value >>> find_minimum_change([1, 5, 10, 20, 50, 100, 200, 500, 1000,2000], 18745) @@ -75,7 +75,7 @@ def find_minimum_change(denominations: list[int], value: int) -> list[int]: if __name__ == "__main__": denominations = list() - value = 0 + value = "0" if ( input("Do you want to enter your denominations ? (yY/n): ").strip().lower() diff --git a/maths/triplet_sum.py b/maths/triplet_sum.py index 22fab17d30c2..af77ed145bce 100644 --- a/maths/triplet_sum.py +++ b/maths/triplet_sum.py @@ -19,7 +19,7 @@ def make_dataset() -> tuple[list[int], int]: dataset = make_dataset() -def triplet_sum1(arr: list[int], target: int) -> tuple[int, int, int]: +def triplet_sum1(arr: list[int], target: int) -> tuple[int, ...]: """ Returns a triplet in the array with sum equal to target, else (0, 0, 0). diff --git a/maths/two_sum.py b/maths/two_sum.py index 5209acbc7e44..12ad332d6c4e 100644 --- a/maths/two_sum.py +++ b/maths/two_sum.py @@ -31,7 +31,7 @@ def two_sum(nums: list[int], target: int) -> list[int]: >>> two_sum([3 * i for i in range(10)], 19) [] """ - chk_map = {} + chk_map: dict[int, int] = {} for index, val in enumerate(nums): compl = target - val if compl in chk_map: From 4545270ace03411ec861361329345a36195b881d Mon Sep 17 00:00:00 2001 From: imp Date: Wed, 18 Aug 2021 18:44:26 +0800 Subject: [PATCH 0193/1543] [mypy] Fix type annotations for graphs (#4622) * Fix mypy error for frequent_pattern_graph_miner.py * Fix mypy error for markov_chain.py --- graphs/frequent_pattern_graph_miner.py | 2 +- graphs/markov_chain.py | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/graphs/frequent_pattern_graph_miner.py b/graphs/frequent_pattern_graph_miner.py index ff7063082267..8f344b7bd3ae 100644 --- a/graphs/frequent_pattern_graph_miner.py +++ b/graphs/frequent_pattern_graph_miner.py @@ -227,6 +227,6 @@ def preprocess(edge_array): support = get_support(cluster) graph = construct_graph(cluster, nodes) find_freq_subgraph_given_support(60, cluster, graph) - paths = [] + paths: list = [] freq_subgraph_edge_list = freq_subgraphs_edge_list(paths) print_all() diff --git a/graphs/markov_chain.py b/graphs/markov_chain.py index b93c408cd288..0b6659822dc4 100644 --- a/graphs/markov_chain.py +++ b/graphs/markov_chain.py @@ -35,6 +35,7 @@ def transition(self, node: str) -> str: current_probability += self.connections[node][dest] if current_probability > random_value: return dest + return "" def get_transitions( From af0810fca133dde19b39fc7735572b6989ea269b Mon Sep 17 00:00:00 2001 From: imp Date: Wed, 18 Aug 2021 18:45:07 +0800 Subject: [PATCH 0194/1543] [mypy] Fix type annotations for maths (#4617) * Fix mypy errors for armstrong_numbers.py * Fix mypy errors for harmonic_series.py * Fix mypy errors for average_median.py --- maths/armstrong_numbers.py | 2 +- maths/average_median.py | 4 ++-- maths/series/harmonic_series.py | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/maths/armstrong_numbers.py b/maths/armstrong_numbers.py index ce8c62182fd9..4e62737e1333 100644 --- a/maths/armstrong_numbers.py +++ b/maths/armstrong_numbers.py @@ -9,7 +9,7 @@ On-Line Encyclopedia of Integer Sequences entry: https://oeis.org/A005188 """ PASSING = (1, 153, 370, 371, 1634, 24678051, 115132219018763992565095597973971522401) -FAILING = (-153, -1, 0, 1.2, 200, "A", [], {}, None) +FAILING: tuple = (-153, -1, 0, 1.2, 200, "A", [], {}, None) def armstrong_number(n: int) -> bool: diff --git a/maths/average_median.py b/maths/average_median.py index 57e01368b7b2..497bf0c3a714 100644 --- a/maths/average_median.py +++ b/maths/average_median.py @@ -1,14 +1,14 @@ from typing import Union -def median(nums: Union[int, float]) -> Union[int, float]: +def median(nums: list) -> Union[int, float]: """ Find median of a list of numbers. Wiki: https://en.wikipedia.org/wiki/Median >>> median([0]) 0 - >>> median([4,1,3,2]) + >>> median([4, 1, 3, 2]) 2.5 >>> median([2, 70, 6, 50, 20, 8, 4]) 8 diff --git a/maths/series/harmonic_series.py b/maths/series/harmonic_series.py index 91b5944583e4..d42d13d912f1 100644 --- a/maths/series/harmonic_series.py +++ b/maths/series/harmonic_series.py @@ -33,8 +33,8 @@ def harmonic_series(n_term: str) -> list: ['1'] """ if n_term == "": - return n_term - series = [] + return [] + series: list = [] for temp in range(int(n_term)): series.append(f"1/{temp + 1}" if series else "1") return series From 9cb5760e895179f8aaa97dd577442189064c724d Mon Sep 17 00:00:00 2001 From: SURYAPRATAP SINGH SURYAVANSHI <67123991+suryapratapsinghsuryavanshi@users.noreply.github.com> Date: Wed, 18 Aug 2021 17:35:41 +0530 Subject: [PATCH 0195/1543] add date_to_weekday finder method (#4599) * add date_to_weekday finder method * reformat date_to_weekday method * remove time * remove hardcode weekdays list * fix return type error * fixing fail issue * Finding the test failing issue * after testing the pre-commit in local environment --- other/date_to_weekday.py | 27 +++++++++++++++++++++++++++ 1 file changed, 27 insertions(+) create mode 100644 other/date_to_weekday.py diff --git a/other/date_to_weekday.py b/other/date_to_weekday.py new file mode 100644 index 000000000000..bb17130c0da5 --- /dev/null +++ b/other/date_to_weekday.py @@ -0,0 +1,27 @@ +from calendar import day_name +from datetime import datetime + + +def date_to_weekday(inp_date: str) -> str: + """ + It returns the day name of the given date string. + :param inp_date: + :return: String + >>> date_to_weekday("7/8/2035") + 'Tuesday' + >>> date_to_weekday("7/8/2021") + 'Saturday' + >>> date_to_weekday("1/1/2021") + 'Friday' + """ + day, month, year = [int(x) for x in inp_date.split("/")] + if year % 100 == 0: + year = "00" + new_base_date: str = f"{day}/{month}/{year%100} 0:0:0" + date_time_obj: datetime.date = datetime.strptime(new_base_date, "%d/%m/%y %H:%M:%S") + out_put_day: int = date_time_obj.weekday() + return day_name[out_put_day] + + +if __name__ == "__main__": + print(date_to_weekday("1/1/2021"), end=" ") From 20a4fdf38465c2731100c3fbd1aac847cd0b9322 Mon Sep 17 00:00:00 2001 From: imp Date: Thu, 19 Aug 2021 20:08:20 +0800 Subject: [PATCH 0196/1543] [mypy] Fix type annotations for strings (#4637) * Fix mypy error for can_string_be_rearranged_as_pal * Fix mypy error for levenshtein_distance.py * Fix mypy error for word_patterns.py * Fix mypy error for word_occurrence.py --- strings/can_string_be_rearranged_as_palindrome.py | 2 +- strings/levenshtein_distance.py | 2 +- strings/word_occurrence.py | 2 +- strings/word_patterns.py | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/strings/can_string_be_rearranged_as_palindrome.py b/strings/can_string_be_rearranged_as_palindrome.py index 7fedc5877e26..ddc4828c773b 100644 --- a/strings/can_string_be_rearranged_as_palindrome.py +++ b/strings/can_string_be_rearranged_as_palindrome.py @@ -43,7 +43,7 @@ def can_string_be_rearranged_as_palindrome(input_str: str = "") -> bool: return True lower_case_input_str = input_str.replace(" ", "").lower() # character_freq_dict: Stores the frequency of every character in the input string - character_freq_dict = {} + character_freq_dict: dict[str, int] = {} for character in lower_case_input_str: character_freq_dict[character] = character_freq_dict.get(character, 0) + 1 diff --git a/strings/levenshtein_distance.py b/strings/levenshtein_distance.py index 540a21c93da3..9f7a7e3e65c4 100644 --- a/strings/levenshtein_distance.py +++ b/strings/levenshtein_distance.py @@ -41,7 +41,7 @@ def levenshtein_distance(first_word: str, second_word: str) -> int: if len(second_word) == 0: return len(first_word) - previous_row = range(len(second_word) + 1) + previous_row = list(range(len(second_word) + 1)) for i, c1 in enumerate(first_word): diff --git a/strings/word_occurrence.py b/strings/word_occurrence.py index ef612e12dfa4..4acfa41adf11 100644 --- a/strings/word_occurrence.py +++ b/strings/word_occurrence.py @@ -14,7 +14,7 @@ def word_occurence(sentence: str) -> dict: >>> dict(word_occurence("Two spaces")) {'Two': 1, 'spaces': 1} """ - occurrence = defaultdict(int) + occurrence: dict = defaultdict(int) # Creating a dictionary containing count of each word for word in sentence.split(): occurrence[word] += 1 diff --git a/strings/word_patterns.py b/strings/word_patterns.py index d229954dea93..90b092a20dc8 100644 --- a/strings/word_patterns.py +++ b/strings/word_patterns.py @@ -28,7 +28,7 @@ def get_word_pattern(word: str) -> str: with open("dictionary.txt") as in_file: wordList = in_file.read().splitlines() - all_patterns = {} + all_patterns: dict = {} for word in wordList: pattern = get_word_pattern(word) if pattern in all_patterns: From 4ed7c7f09c74c358f9c31d7a13a29285264bd261 Mon Sep 17 00:00:00 2001 From: Shiva Rama Krishna <45482631+srkchowdary2000@users.noreply.github.com> Date: Mon, 23 Aug 2021 16:05:20 +0530 Subject: [PATCH 0197/1543] =?UTF-8?q?Added=20Bor=C5=AFvka's=20algorithm.?= =?UTF-8?q?=20(#4645)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Added Borůvka's algorithm. * Added Borůvka's algorithm, a graph algorithm that finds the minimum spanning tree. * Added Borůvka's algorithm, a graph algorithm that finds the minimum spanning tree. * Added Borůvka's algorithm, a graph algorithm that finds the minimum spanning tree. * Added Borůvka's algorithm, a graph algorithm that finds the minimum spanning tree. * Added Borůvka's algorithm, a graph algorithm that finds the minimum spanning tree. Solved Test Cases Errors. * Added Borůvka's algorithm, a graph algorithm that finds the minimum spanning tree. Solved Test Cases Errors. * Added Borůvka's algorithm, a graph algorithm that finds the minimum spanning tree. Solved Test Cases Errors. * Added Borůvka's algorithm, a graph algorithm that finds the minimum spanning tree. Solved Test Cases Errors. * Added Borůvka's algorithm, a graph algorithm that finds the minimum spanning tree. Solved Test Cases Errors. * Added Borůvka's algorithm, a graph algorithm that finds the minimum spanning tree. Solved Test Cases Errors. * Added Borůvka's algorithm, a graph algorithm that finds the minimum spanning tree. Solved Test Cases Errors.Removed WhiteSpaces. * Added Borůvka's algorithm, a graph algorithm that finds the minimum spanning tree. Code Changes. * Added Borůvka's algorithm, a graph algorithm that finds the minimum spanning tree. Code Changes. --- graphs/boruvka.py | 198 ++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 198 insertions(+) create mode 100644 graphs/boruvka.py diff --git a/graphs/boruvka.py b/graphs/boruvka.py new file mode 100644 index 000000000000..b95bcc39850e --- /dev/null +++ b/graphs/boruvka.py @@ -0,0 +1,198 @@ +"""Borůvka's algorithm. + + Determines the minimum spanning tree(MST) of a graph using the Borůvka's algorithm. + Borůvka's algorithm is a greedy algorithm for finding a minimum spanning tree in a + graph,or a minimum spanning forest in the case of a graph that is not connected. + + The time complexity of this algorithm is O(ELogV), where E represents the number + of edges, while V represents the number of nodes. + + The space complexity of this algorithm is O(V + E), since we have to keep a couple + of lists whose sizes are equal to the number of nodes, as well as keep all the + edges of a graph inside of the data structure itself. + + Borůvka's algorithm gives us pretty much the same result as other MST Algorithms - + they all find the minimum spanning tree, and the time complexity is approximately + the same. + + One advantage that Borůvka's algorithm has compared to the alternatives is that it + doesn't need to presort the edges or maintain a priority queue in order to find the + minimum spanning tree. + Even though that doesn't help its complexity, since it still passes the edges logE + times, it is a bit more simple to code. + + Details: https://en.wikipedia.org/wiki/Bor%C5%AFvka%27s_algorithm +""" + + +class Graph: + def __init__(self, num_of_nodes: int) -> None: + """ + Arguments: + num_of_nodes - the number of nodes in the graph + Attributes: + m_v - the number of nodes in the graph. + m_edges - the list of edges. + m_component - the dictionary which stores the index of the component which + a node belongs to. + """ + + self.m_v = num_of_nodes + self.m_edges = [] + self.m_component = {} + + def add_edge(self, u_node: int, v_node: int, weight: int) -> None: + """Adds an edge in the format [first, second, edge weight] to graph.""" + + self.m_edges.append([u_node, v_node, weight]) + + def find_component(self, u_node: int) -> int: + """Propagates a new component throughout a given component.""" + + if self.m_component[u_node] == u_node: + return u_node + return self.find_component(self.m_component[u_node]) + + def set_component(self, u_node: int) -> None: + """Finds the component index of a given node""" + + if self.m_component[u_node] != u_node: + for k in self.m_component.keys(): + self.m_component[k] = self.find_component(k) + + def union(self, component_size: list, u_node: int, v_node: int) -> None: + """Union finds the roots of components for two nodes, compares the components + in terms of size, and attaches the smaller one to the larger one to form + single component""" + + if component_size[u_node] <= component_size[v_node]: + self.m_component[u_node] = v_node + component_size[v_node] += component_size[u_node] + self.set_component(u_node) + + elif component_size[u_node] >= component_size[v_node]: + self.m_component[v_node] = self.find_component(u_node) + component_size[u_node] += component_size[v_node] + self.set_component(v_node) + + def boruvka(self) -> None: + """Performs Borůvka's algorithm to find MST.""" + + # Initialize additional lists required to algorithm. + component_size = [] + mst_weight = 0 + + minimum_weight_edge = [-1] * self.m_v + + # A list of components (initialized to all of the nodes) + for node in range(self.m_v): + self.m_component.update({node: node}) + component_size.append(1) + + num_of_components = self.m_v + + while num_of_components > 1: + l_edges = len(self.m_edges) + for i in range(l_edges): + + u = self.m_edges[i][0] + v = self.m_edges[i][1] + w = self.m_edges[i][2] + + u_component = self.m_component[u] + v_component = self.m_component[v] + + if u_component != v_component: + """If the current minimum weight edge of component u doesn't + exist (is -1), or if it's greater than the edge we're + observing right now, we will assign the value of the edge + we're observing to it. + + If the current minimum weight edge of component v doesn't + exist (is -1), or if it's greater than the edge we're + observing right now, we will assign the value of the edge + we're observing to it""" + + if ( + minimum_weight_edge[u_component] == -1 + or minimum_weight_edge[u_component][2] > w + ): + minimum_weight_edge[u_component] = [u, v, w] + if ( + minimum_weight_edge[v_component] == -1 + or minimum_weight_edge[v_component][2] > w + ): + minimum_weight_edge[v_component] = [u, v, w] + + for node in range(self.m_v): + if minimum_weight_edge[node] != -1: + u = minimum_weight_edge[node][0] + v = minimum_weight_edge[node][1] + w = minimum_weight_edge[node][2] + + u_component = self.m_component[u] + v_component = self.m_component[v] + + if u_component != v_component: + mst_weight += w + self.union(component_size, u_component, v_component) + print( + "Added edge [" + + str(u) + + " - " + + str(v) + + "]\n" + + "Added weight: " + + str(w) + + "\n" + ) + num_of_components -= 1 + + minimum_weight_edge = [-1] * self.m_v + print("The total weight of the minimal spanning tree is: " + str(mst_weight)) + + +def test_vector() -> None: + """ + >>> g=Graph(8) + >>> g.add_edge(0, 1, 10) + >>> g.add_edge(0, 2, 6) + >>> g.add_edge(0, 3, 5) + >>> g.add_edge(1, 3, 15) + >>> g.add_edge(2, 3, 4) + >>> g.add_edge(3, 4, 8) + >>> g.add_edge(4, 5, 10) + >>> g.add_edge(4, 6, 6) + >>> g.add_edge(4, 7, 5) + >>> g.add_edge(5, 7, 15) + >>> g.add_edge(6, 7, 4) + >>> g.boruvka() + Added edge [0 - 3] + Added weight: 5 + + Added edge [0 - 1] + Added weight: 10 + + Added edge [2 - 3] + Added weight: 4 + + Added edge [4 - 7] + Added weight: 5 + + Added edge [4 - 5] + Added weight: 10 + + Added edge [6 - 7] + Added weight: 4 + + Added edge [3 - 4] + Added weight: 8 + + The total weight of the minimal spanning tree is: 46 + """ + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From 78a5d3a5587ef649c2b4d2286cfb949886095467 Mon Sep 17 00:00:00 2001 From: Christian Clauss Date: Tue, 24 Aug 2021 15:27:31 +0200 Subject: [PATCH 0198/1543] boruvka.py: A few simplifications and f-strings (#4660) * boruvka.py: A few simplifications and f-strings Python f-strings simplify the code and [should speed up execution](https://www.scivision.dev/python-f-string-speed). @srkchowdary2000 Your review, please. * updating DIRECTORY.md * fixup! Streamline the test Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- DIRECTORY.md | 10 ++++++ graphs/boruvka.py | 84 ++++++++++++++++------------------------------- 2 files changed, 39 insertions(+), 55 deletions(-) diff --git a/DIRECTORY.md b/DIRECTORY.md index adc9bb9e4699..41485f6f0ca4 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -97,6 +97,7 @@ * [Peak Signal To Noise Ratio](https://github.com/TheAlgorithms/Python/blob/master/compression/peak_signal_to_noise_ratio.py) ## Computer Vision + * [Cnn Classification](https://github.com/TheAlgorithms/Python/blob/master/computer_vision/cnn_classification.py) * [Harris Corner](https://github.com/TheAlgorithms/Python/blob/master/computer_vision/harris_corner.py) * [Mean Threshold](https://github.com/TheAlgorithms/Python/blob/master/computer_vision/mean_threshold.py) @@ -300,6 +301,7 @@ * [Bfs Zero One Shortest Path](https://github.com/TheAlgorithms/Python/blob/master/graphs/bfs_zero_one_shortest_path.py) * [Bidirectional A Star](https://github.com/TheAlgorithms/Python/blob/master/graphs/bidirectional_a_star.py) * [Bidirectional Breadth First Search](https://github.com/TheAlgorithms/Python/blob/master/graphs/bidirectional_breadth_first_search.py) + * [Boruvka](https://github.com/TheAlgorithms/Python/blob/master/graphs/boruvka.py) * [Breadth First Search](https://github.com/TheAlgorithms/Python/blob/master/graphs/breadth_first_search.py) * [Breadth First Search 2](https://github.com/TheAlgorithms/Python/blob/master/graphs/breadth_first_search_2.py) * [Breadth First Search Shortest Path](https://github.com/TheAlgorithms/Python/blob/master/graphs/breadth_first_search_shortest_path.py) @@ -349,6 +351,7 @@ * [Djb2](https://github.com/TheAlgorithms/Python/blob/master/hashes/djb2.py) * [Enigma Machine](https://github.com/TheAlgorithms/Python/blob/master/hashes/enigma_machine.py) * [Hamming Code](https://github.com/TheAlgorithms/Python/blob/master/hashes/hamming_code.py) + * [Luhn](https://github.com/TheAlgorithms/Python/blob/master/hashes/luhn.py) * [Md5](https://github.com/TheAlgorithms/Python/blob/master/hashes/md5.py) * [Sdbm](https://github.com/TheAlgorithms/Python/blob/master/hashes/sdbm.py) * [Sha1](https://github.com/TheAlgorithms/Python/blob/master/hashes/sha1.py) @@ -421,10 +424,12 @@ * [Binomial Distribution](https://github.com/TheAlgorithms/Python/blob/master/maths/binomial_distribution.py) * [Bisection](https://github.com/TheAlgorithms/Python/blob/master/maths/bisection.py) * [Ceil](https://github.com/TheAlgorithms/Python/blob/master/maths/ceil.py) + * [Check Valid Ip Address](https://github.com/TheAlgorithms/Python/blob/master/maths/check_valid_ip_address.py) * [Chudnovsky Algorithm](https://github.com/TheAlgorithms/Python/blob/master/maths/chudnovsky_algorithm.py) * [Collatz Sequence](https://github.com/TheAlgorithms/Python/blob/master/maths/collatz_sequence.py) * [Combinations](https://github.com/TheAlgorithms/Python/blob/master/maths/combinations.py) * [Decimal Isolate](https://github.com/TheAlgorithms/Python/blob/master/maths/decimal_isolate.py) + * [Double Factorial Recursive](https://github.com/TheAlgorithms/Python/blob/master/maths/double_factorial_recursive.py) * [Entropy](https://github.com/TheAlgorithms/Python/blob/master/maths/entropy.py) * [Euclidean Distance](https://github.com/TheAlgorithms/Python/blob/master/maths/euclidean_distance.py) * [Euclidean Gcd](https://github.com/TheAlgorithms/Python/blob/master/maths/euclidean_gcd.py) @@ -539,6 +544,7 @@ ## Other * [Activity Selection](https://github.com/TheAlgorithms/Python/blob/master/other/activity_selection.py) + * [Date To Weekday](https://github.com/TheAlgorithms/Python/blob/master/other/date_to_weekday.py) * [Davis–Putnam–Logemann–Loveland](https://github.com/TheAlgorithms/Python/blob/master/other/davis–putnam–logemann–loveland.py) * [Dijkstra Bankers Algorithm](https://github.com/TheAlgorithms/Python/blob/master/other/dijkstra_bankers_algorithm.py) * [Doomsday](https://github.com/TheAlgorithms/Python/blob/master/other/doomsday.py) @@ -854,6 +860,7 @@ * [Counting Sort](https://github.com/TheAlgorithms/Python/blob/master/sorts/counting_sort.py) * [Cycle Sort](https://github.com/TheAlgorithms/Python/blob/master/sorts/cycle_sort.py) * [Double Sort](https://github.com/TheAlgorithms/Python/blob/master/sorts/double_sort.py) + * [Exchange Sort](https://github.com/TheAlgorithms/Python/blob/master/sorts/exchange_sort.py) * [External Sort](https://github.com/TheAlgorithms/Python/blob/master/sorts/external_sort.py) * [Gnome Sort](https://github.com/TheAlgorithms/Python/blob/master/sorts/gnome_sort.py) * [Heap Sort](https://github.com/TheAlgorithms/Python/blob/master/sorts/heap_sort.py) @@ -893,6 +900,7 @@ ## Strings * [Aho Corasick](https://github.com/TheAlgorithms/Python/blob/master/strings/aho_corasick.py) + * [Alternative String Arrange](https://github.com/TheAlgorithms/Python/blob/master/strings/alternative_string_arrange.py) * [Anagrams](https://github.com/TheAlgorithms/Python/blob/master/strings/anagrams.py) * [Autocomplete Using Trie](https://github.com/TheAlgorithms/Python/blob/master/strings/autocomplete_using_trie.py) * [Boyer Moore Search](https://github.com/TheAlgorithms/Python/blob/master/strings/boyer_moore_search.py) @@ -902,6 +910,7 @@ * [Check Pangram](https://github.com/TheAlgorithms/Python/blob/master/strings/check_pangram.py) * [Detecting English Programmatically](https://github.com/TheAlgorithms/Python/blob/master/strings/detecting_english_programmatically.py) * [Frequency Finder](https://github.com/TheAlgorithms/Python/blob/master/strings/frequency_finder.py) + * [Indian Phone Validator](https://github.com/TheAlgorithms/Python/blob/master/strings/indian_phone_validator.py) * [Is Palindrome](https://github.com/TheAlgorithms/Python/blob/master/strings/is_palindrome.py) * [Jaro Winkler](https://github.com/TheAlgorithms/Python/blob/master/strings/jaro_winkler.py) * [Knuth Morris Pratt](https://github.com/TheAlgorithms/Python/blob/master/strings/knuth_morris_pratt.py) @@ -941,6 +950,7 @@ * [Instagram Crawler](https://github.com/TheAlgorithms/Python/blob/master/web_programming/instagram_crawler.py) * [Instagram Pic](https://github.com/TheAlgorithms/Python/blob/master/web_programming/instagram_pic.py) * [Instagram Video](https://github.com/TheAlgorithms/Python/blob/master/web_programming/instagram_video.py) + * [Random Anime Character](https://github.com/TheAlgorithms/Python/blob/master/web_programming/random_anime_character.py) * [Recaptcha Verification](https://github.com/TheAlgorithms/Python/blob/master/web_programming/recaptcha_verification.py) * [Slack Message](https://github.com/TheAlgorithms/Python/blob/master/web_programming/slack_message.py) * [Test Fetch Github Info](https://github.com/TheAlgorithms/Python/blob/master/web_programming/test_fetch_github_info.py) diff --git a/graphs/boruvka.py b/graphs/boruvka.py index b95bcc39850e..3fa5c6fd2a26 100644 --- a/graphs/boruvka.py +++ b/graphs/boruvka.py @@ -1,11 +1,12 @@ """Borůvka's algorithm. - Determines the minimum spanning tree(MST) of a graph using the Borůvka's algorithm. + Determines the minimum spanning tree (MST) of a graph using the Borůvka's algorithm. Borůvka's algorithm is a greedy algorithm for finding a minimum spanning tree in a - graph,or a minimum spanning forest in the case of a graph that is not connected. + connected graph, or a minimum spanning forest if a graph that is not connected. The time complexity of this algorithm is O(ELogV), where E represents the number of edges, while V represents the number of nodes. + O(number_of_edges Log number_of_nodes) The space complexity of this algorithm is O(V + E), since we have to keep a couple of lists whose sizes are equal to the number of nodes, as well as keep all the @@ -19,7 +20,7 @@ doesn't need to presort the edges or maintain a priority queue in order to find the minimum spanning tree. Even though that doesn't help its complexity, since it still passes the edges logE - times, it is a bit more simple to code. + times, it is a bit simpler to code. Details: https://en.wikipedia.org/wiki/Bor%C5%AFvka%27s_algorithm """ @@ -31,13 +32,13 @@ def __init__(self, num_of_nodes: int) -> None: Arguments: num_of_nodes - the number of nodes in the graph Attributes: - m_v - the number of nodes in the graph. + m_num_of_nodes - the number of nodes in the graph. m_edges - the list of edges. m_component - the dictionary which stores the index of the component which a node belongs to. """ - self.m_v = num_of_nodes + self.m_num_of_nodes = num_of_nodes self.m_edges = [] self.m_component = {} @@ -57,7 +58,7 @@ def set_component(self, u_node: int) -> None: """Finds the component index of a given node""" if self.m_component[u_node] != u_node: - for k in self.m_component.keys(): + for k in self.m_component: self.m_component[k] = self.find_component(k) def union(self, component_size: list, u_node: int, v_node: int) -> None: @@ -82,22 +83,18 @@ def boruvka(self) -> None: component_size = [] mst_weight = 0 - minimum_weight_edge = [-1] * self.m_v + minimum_weight_edge = [-1] * self.m_num_of_nodes # A list of components (initialized to all of the nodes) - for node in range(self.m_v): + for node in range(self.m_num_of_nodes): self.m_component.update({node: node}) component_size.append(1) - num_of_components = self.m_v + num_of_components = self.m_num_of_nodes while num_of_components > 1: - l_edges = len(self.m_edges) - for i in range(l_edges): - - u = self.m_edges[i][0] - v = self.m_edges[i][1] - w = self.m_edges[i][2] + for edge in self.m_edges: + u, v, w = edge u_component = self.m_component[u] v_component = self.m_component[v] @@ -113,22 +110,16 @@ def boruvka(self) -> None: observing right now, we will assign the value of the edge we're observing to it""" - if ( - minimum_weight_edge[u_component] == -1 - or minimum_weight_edge[u_component][2] > w - ): - minimum_weight_edge[u_component] = [u, v, w] - if ( - minimum_weight_edge[v_component] == -1 - or minimum_weight_edge[v_component][2] > w - ): - minimum_weight_edge[v_component] = [u, v, w] - - for node in range(self.m_v): - if minimum_weight_edge[node] != -1: - u = minimum_weight_edge[node][0] - v = minimum_weight_edge[node][1] - w = minimum_weight_edge[node][2] + for component in (u_component, v_component): + if ( + minimum_weight_edge[component] == -1 + or minimum_weight_edge[component][2] > w + ): + minimum_weight_edge[component] = [u, v, w] + + for edge in minimum_weight_edge: + if edge != -1: + u, v, w = edge u_component = self.m_component[u] v_component = self.m_component[v] @@ -136,36 +127,19 @@ def boruvka(self) -> None: if u_component != v_component: mst_weight += w self.union(component_size, u_component, v_component) - print( - "Added edge [" - + str(u) - + " - " - + str(v) - + "]\n" - + "Added weight: " - + str(w) - + "\n" - ) + print(f"Added edge [{u} - {v}]\nAdded weight: {w}\n") num_of_components -= 1 - minimum_weight_edge = [-1] * self.m_v - print("The total weight of the minimal spanning tree is: " + str(mst_weight)) + minimum_weight_edge = [-1] * self.m_num_of_nodes + print(f"The total weight of the minimal spanning tree is: {mst_weight}") def test_vector() -> None: """ - >>> g=Graph(8) - >>> g.add_edge(0, 1, 10) - >>> g.add_edge(0, 2, 6) - >>> g.add_edge(0, 3, 5) - >>> g.add_edge(1, 3, 15) - >>> g.add_edge(2, 3, 4) - >>> g.add_edge(3, 4, 8) - >>> g.add_edge(4, 5, 10) - >>> g.add_edge(4, 6, 6) - >>> g.add_edge(4, 7, 5) - >>> g.add_edge(5, 7, 15) - >>> g.add_edge(6, 7, 4) + >>> g = Graph(8) + >>> for u_v_w in ((0, 1, 10), (0, 2, 6), (0, 3, 5), (1, 3, 15), (2, 3, 4), + ... (3, 4, 8), (4, 5, 10), (4, 6, 6), (4, 7, 5), (5, 7, 15), (6, 7, 4)): + ... g.add_edge(*u_v_w) >>> g.boruvka() Added edge [0 - 3] Added weight: 5 From 5e7eed610ce81fa96e033f4d2a1781ed8637cb41 Mon Sep 17 00:00:00 2001 From: imp Date: Wed, 25 Aug 2021 19:35:36 +0800 Subject: [PATCH 0199/1543] [mypy] Fix type annotations for strings (#4641) * Fix mypy error for min_cost_string_conversion.py * Fix mypy error for manacher.py * Fix mypy error for aho_corasick.py --- strings/aho_corasick.py | 22 +++++++++++++--------- strings/manacher.py | 25 +++++++++++++------------ strings/min_cost_string_conversion.py | 8 +++----- 3 files changed, 29 insertions(+), 26 deletions(-) diff --git a/strings/aho_corasick.py b/strings/aho_corasick.py index b959dbd58c32..712cb338aa7e 100644 --- a/strings/aho_corasick.py +++ b/strings/aho_corasick.py @@ -3,8 +3,8 @@ class Automaton: - def __init__(self, keywords: List[str]): - self.adlist = list() + def __init__(self, keywords: list[str]): + self.adlist: list[dict] = list() self.adlist.append( {"value": "", "next_states": [], "fail_state": 0, "output": []} ) @@ -22,9 +22,8 @@ def find_next_state(self, current_state: int, char: str) -> Union[int, None]: def add_keyword(self, keyword: str) -> None: current_state = 0 for character in keyword: - if self.find_next_state(current_state, character): - current_state = self.find_next_state(current_state, character) - else: + next_state = self.find_next_state(current_state, character) + if next_state is None: self.adlist.append( { "value": character, @@ -35,10 +34,12 @@ def add_keyword(self, keyword: str) -> None: ) self.adlist[current_state]["next_states"].append(len(self.adlist) - 1) current_state = len(self.adlist) - 1 + else: + current_state = next_state self.adlist[current_state]["output"].append(keyword) def set_fail_transitions(self) -> None: - q = deque() + q: deque = deque() for node in self.adlist[0]["next_states"]: q.append(node) self.adlist[node]["fail_state"] = 0 @@ -68,7 +69,9 @@ def search_in(self, string: str) -> Dict[str, List[int]]: >>> A.search_in("whatever, err ... , wherever") {'what': [0], 'hat': [1], 'ver': [5, 25], 'er': [6, 10, 22, 26]} """ - result = dict() # returns a dict with keywords and list of its occurrences + result: dict = ( + dict() + ) # returns a dict with keywords and list of its occurrences current_state = 0 for i in range(len(string)): while ( @@ -76,10 +79,11 @@ def search_in(self, string: str) -> Dict[str, List[int]]: and current_state != 0 ): current_state = self.adlist[current_state]["fail_state"] - current_state = self.find_next_state(current_state, string[i]) - if current_state is None: + next_state = self.find_next_state(current_state, string[i]) + if next_state is None: current_state = 0 else: + current_state = next_state for key in self.adlist[current_state]["output"]: if not (key in result): result[key] = [] diff --git a/strings/manacher.py b/strings/manacher.py index 5476e06839b7..e6ea71cde12f 100644 --- a/strings/manacher.py +++ b/strings/manacher.py @@ -35,27 +35,28 @@ def palindromic_string(input_string: str) -> str: length = [1 for i in range(len(new_input_string))] # for each character in new_string find corresponding palindromic string - for i in range(len(new_input_string)): - k = 1 if i > r else min(length[l + r - i] // 2, r - i + 1) + start = 0 + for j in range(len(new_input_string)): + k = 1 if j > r else min(length[l + r - j] // 2, r - j + 1) while ( - i - k >= 0 - and i + k < len(new_input_string) - and new_input_string[k + i] == new_input_string[i - k] + j - k >= 0 + and j + k < len(new_input_string) + and new_input_string[k + j] == new_input_string[j - k] ): k += 1 - length[i] = 2 * k - 1 + length[j] = 2 * k - 1 # does this string is ending after the previously explored end (that is r) ? # if yes the update the new r to the last index of this - if i + k - 1 > r: - l = i - k + 1 # noqa: E741 - r = i + k - 1 + if j + k - 1 > r: + l = j - k + 1 # noqa: E741 + r = j + k - 1 # update max_length and start position - if max_length < length[i]: - max_length = length[i] - start = i + if max_length < length[j]: + max_length = length[j] + start = j # create that string s = new_input_string[start - max_length // 2 : start + max_length // 2 + 1] diff --git a/strings/min_cost_string_conversion.py b/strings/min_cost_string_conversion.py index e990aaa2679b..147bc6fc740a 100644 --- a/strings/min_cost_string_conversion.py +++ b/strings/min_cost_string_conversion.py @@ -1,5 +1,3 @@ -from typing import List, Tuple - """ Algorithm for calculating the most cost-efficient sequence for converting one string into another. @@ -18,7 +16,7 @@ def compute_transform_tables( replace_cost: int, delete_cost: int, insert_cost: int, -) -> Tuple[List[int], List[str]]: +) -> tuple[list[list[int]], list[list[str]]]: source_seq = list(source_string) destination_seq = list(destination_string) len_source_seq = len(source_seq) @@ -28,7 +26,7 @@ def compute_transform_tables( [0 for _ in range(len_destination_seq + 1)] for _ in range(len_source_seq + 1) ] ops = [ - [0 for _ in range(len_destination_seq + 1)] for _ in range(len_source_seq + 1) + ["0" for _ in range(len_destination_seq + 1)] for _ in range(len_source_seq + 1) ] for i in range(1, len_source_seq + 1): @@ -59,7 +57,7 @@ def compute_transform_tables( return costs, ops -def assemble_transformation(ops: List[str], i: int, j: int) -> List[str]: +def assemble_transformation(ops: list[list[str]], i: int, j: int) -> list[str]: if i == 0 and j == 0: return [] else: From 46e56fa6f2e473d1300846b2b96e56f498872400 Mon Sep 17 00:00:00 2001 From: Christian Clauss Date: Fri, 27 Aug 2021 11:45:14 +0200 Subject: [PATCH 0200/1543] luhn.py: Favor list comprehensions over maps (#4663) * luhn.py: Favor list comprehensions over maps As discussed in CONTRIBUTING.md. * updating DIRECTORY.md Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- hashes/luhn.py | 26 +++++++++++--------------- 1 file changed, 11 insertions(+), 15 deletions(-) diff --git a/hashes/luhn.py b/hashes/luhn.py index 69e7b4ccf59b..81014120dd80 100644 --- a/hashes/luhn.py +++ b/hashes/luhn.py @@ -4,39 +4,34 @@ def is_luhn(string: str) -> bool: """ - Perform Luhn validation on input string + Perform Luhn validation on an input string Algorithm: * Double every other digit starting from 2nd last digit. * Subtract 9 if number is greater than 9. * Sum the numbers * - >>> test_cases = [79927398710, 79927398711, 79927398712, 79927398713, + >>> test_cases = (79927398710, 79927398711, 79927398712, 79927398713, ... 79927398714, 79927398715, 79927398716, 79927398717, 79927398718, - ... 79927398719] - >>> test_cases = list(map(str, test_cases)) - >>> list(map(is_luhn, test_cases)) + ... 79927398719) + >>> [is_luhn(str(test_case)) for test_case in test_cases] [False, False, False, True, False, False, False, False, False, False] """ check_digit: int _vector: List[str] = list(string) __vector, check_digit = _vector[:-1], int(_vector[-1]) - vector: List[int] = [*map(int, __vector)] + vector: List[int] = [int(digit) for digit in __vector] vector.reverse() - for idx, i in enumerate(vector): - - if idx & 1 == 0: - doubled: int = vector[idx] * 2 + for i, digit in enumerate(vector): + if i & 1 == 0: + doubled: int = digit * 2 if doubled > 9: doubled -= 9 - check_digit += doubled else: - check_digit += i + check_digit += digit - if (check_digit) % 10 == 0: - return True - return False + return check_digit % 10 == 0 if __name__ == "__main__": @@ -44,3 +39,4 @@ def is_luhn(string: str) -> bool: doctest.testmod() assert is_luhn("79927398713") + assert not is_luhn("79927398714") From 8e5c3536c728dd7451ca301dc2d5bfb3f68b0e1a Mon Sep 17 00:00:00 2001 From: arfy slowy Date: Sun, 29 Aug 2021 01:07:10 +0700 Subject: [PATCH 0201/1543] [fixed] unused variable, standalone running, import doctest module (#4673) * [fixed] unused variable, standalone running, import doctest module information [standalone running](https://www.geeksforgeeks.org/what-does-the-if-__name__-__main__-do/) Signed-off-by: slowy07 * Update other/fischer_yates_shuffle.py Co-authored-by: Christian Clauss * [fixed] change to tuple and fixing callfunction Signed-off-by: slowy07 * Update matrix/spiral_print.py Co-authored-by: Christian Clauss * Update matrix/spiral_print.py Co-authored-by: Christian Clauss * fixing Co-authored-by: Christian Clauss * [fixed] sprial matrix Signed-off-by: slowy07 * Update spiral_print.py * Update spiral_print.py * Update spiral_print.py * Update spiral_print.py Co-authored-by: Christian Clauss --- ...h_fibonacci_using_matrix_exponentiation.py | 3 ++ matrix/spiral_print.py | 34 +++++++++---------- other/fischer_yates_shuffle.py | 6 ++-- 3 files changed, 23 insertions(+), 20 deletions(-) diff --git a/matrix/nth_fibonacci_using_matrix_exponentiation.py b/matrix/nth_fibonacci_using_matrix_exponentiation.py index 8c39de0f23b6..341a02e1a95d 100644 --- a/matrix/nth_fibonacci_using_matrix_exponentiation.py +++ b/matrix/nth_fibonacci_using_matrix_exponentiation.py @@ -88,4 +88,7 @@ def main(): if __name__ == "__main__": + import doctest + + doctest.testmod() main() diff --git a/matrix/spiral_print.py b/matrix/spiral_print.py index 21dab76156e9..6f699c1ab662 100644 --- a/matrix/spiral_print.py +++ b/matrix/spiral_print.py @@ -4,36 +4,35 @@ Matrix must satisfy below conditions i) matrix should be only one or two dimensional - ii)column of all the row should be equal + ii) number of column of all rows should be equal """ +from collections.abc import Iterable -def checkMatrix(a): + +def check_matrix(matrix): # must be - if type(a) == list and len(a) > 0: - if type(a[0]) == list: - prevLen = 0 - for i in a: - if prevLen == 0: - prevLen = len(i) - result = True - elif prevLen == len(i): + if matrix and isinstance(matrix, Iterable): + if isinstance(matrix[0], Iterable): + prev_len = 0 + for row in matrix: + if prev_len == 0: + prev_len = len(row) result = True else: - result = False + result = prev_len == len(row) else: result = True else: result = False + return result def spiralPrint(a): - - if checkMatrix(a) and len(a) > 0: - + if check_matrix(a) and len(a) > 0: matRow = len(a) - if type(a[0]) == list: + if isinstance(a[0], Iterable): matCol = len(a[0]) else: for dat in a: @@ -64,5 +63,6 @@ def spiralPrint(a): # driver code -a = [[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]] -spiralPrint(a) +if __name__ == "__main__": + a = ([1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]) + spiralPrint(a) diff --git a/other/fischer_yates_shuffle.py b/other/fischer_yates_shuffle.py index 6eec738c02e1..035fcb482380 100644 --- a/other/fischer_yates_shuffle.py +++ b/other/fischer_yates_shuffle.py @@ -8,8 +8,8 @@ import random -def FYshuffle(list): - for i in range(len(list)): +def fisher_yates_shuffle(data: list) -> list: + for _ in range(len(list)): a = random.randint(0, len(list) - 1) b = random.randint(0, len(list) - 1) list[a], list[b] = list[b], list[a] @@ -21,4 +21,4 @@ def FYshuffle(list): strings = ["python", "says", "hello", "!"] print("Fisher-Yates Shuffle:") print("List", integers, strings) - print("FY Shuffle", FYshuffle(integers), FYshuffle(strings)) + print("FY Shuffle", fisher_yates_shuffle(integers), fisher_yates_shuffle(strings)) From 3acca3d1d188f89c6aa0fb4c0139ac62426ea296 Mon Sep 17 00:00:00 2001 From: Aswin Murali Date: Mon, 30 Aug 2021 13:36:59 +0530 Subject: [PATCH 0202/1543] Fix type annotations for integer_partition.py #4052 (#4689) --- dynamic_programming/integer_partition.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/dynamic_programming/integer_partition.py b/dynamic_programming/integer_partition.py index 4eb06348ce84..8ed2e51bd4bd 100644 --- a/dynamic_programming/integer_partition.py +++ b/dynamic_programming/integer_partition.py @@ -6,8 +6,8 @@ """ -def partition(m): - memo = [[0 for _ in range(m)] for _ in range(m + 1)] +def partition(m: int) -> int: + memo: list[list[int]] = [[0 for _ in range(m)] for _ in range(m + 1)] for i in range(m + 1): memo[i][0] = 1 From 097f83023866d625a38c891a46ce3a70d73d7f63 Mon Sep 17 00:00:00 2001 From: Christian Clauss Date: Tue, 31 Aug 2021 06:56:15 +0200 Subject: [PATCH 0203/1543] Avoid mutable default arguments (#4691) --- graphs/eulerian_path_and_circuit_for_undirected_graph.py | 4 ++-- graphs/frequent_pattern_graph_miner.py | 4 ++-- maths/radix2_fft.py | 6 +++--- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/graphs/eulerian_path_and_circuit_for_undirected_graph.py b/graphs/eulerian_path_and_circuit_for_undirected_graph.py index 7850933b0201..fa4f73abd86f 100644 --- a/graphs/eulerian_path_and_circuit_for_undirected_graph.py +++ b/graphs/eulerian_path_and_circuit_for_undirected_graph.py @@ -6,8 +6,8 @@ # using dfs for finding eulerian path traversal -def dfs(u, graph, visited_edge, path=[]): - path = path + [u] +def dfs(u, graph, visited_edge, path=None): + path = (path or []) + [u] for v in graph[u]: if visited_edge[u][v] is False: visited_edge[u][v], visited_edge[v][u] = True, True diff --git a/graphs/frequent_pattern_graph_miner.py b/graphs/frequent_pattern_graph_miner.py index 8f344b7bd3ae..548ce3c54ffe 100644 --- a/graphs/frequent_pattern_graph_miner.py +++ b/graphs/frequent_pattern_graph_miner.py @@ -168,11 +168,11 @@ def construct_graph(cluster, nodes): return graph -def myDFS(graph, start, end, path=[]): +def myDFS(graph, start, end, path=None): """ find different DFS walk from given node to Header node """ - path = path + [start] + path = (path or []) + [start] if start == end: paths.append(path) for node in graph[start]: diff --git a/maths/radix2_fft.py b/maths/radix2_fft.py index de87071e5440..9fc9f843e685 100644 --- a/maths/radix2_fft.py +++ b/maths/radix2_fft.py @@ -49,10 +49,10 @@ class FFT: A*B = 0*x^(-0+0j) + 1*x^(2+0j) + 2*x^(3+0j) + 3*x^(8+0j) + 4*x^(6+0j) + 5*x^(8+0j) """ - def __init__(self, polyA=[0], polyB=[0]): + def __init__(self, polyA=None, polyB=None): # Input as list - self.polyA = list(polyA)[:] - self.polyB = list(polyB)[:] + self.polyA = list(polyA or [0])[:] + self.polyB = list(polyB or [0])[:] # Remove leading zero coefficients while self.polyA[-1] == 0: From ef9827166e778879e0bc5847c4bcee6720073657 Mon Sep 17 00:00:00 2001 From: Christian Clauss Date: Tue, 31 Aug 2021 07:56:19 +0200 Subject: [PATCH 0204/1543] Approve functions used as default arguments (#4699) * Approve functions used as default argumenets * The default value for **seed** is the result of a function call The default value for **seed** is the result of a function call which is not normally recommended and causes flake8-bugbear to raise a B008 error. However, in this case, it is accptable because `LinearCongruentialGenerator.__init__()` will only be called once per instance and it ensures that each instance will generate a unique sequence of numbers. * The default value for **backend** is the result of a function call The default value for **backend** is the result of a function call which is not normally recommended and causes flake8-bugbear to raise a B008 error. However, in this case, it is accptable because `Aer.get_backend()` is called when the function is definition and that same backend is then reused for function calls. * Update linear_congruential_generator.py * Update ripple_adder_classic.py * Update ripple_adder_classic.py * Update ripple_adder_classic.py * Update ripple_adder_classic.py * Update ripple_adder_classic.py --- other/linear_congruential_generator.py | 8 +++++++- quantum/ripple_adder_classic.py | 14 +++++++++++--- 2 files changed, 18 insertions(+), 4 deletions(-) diff --git a/other/linear_congruential_generator.py b/other/linear_congruential_generator.py index f8b604b8562d..777ee6355b9b 100644 --- a/other/linear_congruential_generator.py +++ b/other/linear_congruential_generator.py @@ -8,7 +8,13 @@ class LinearCongruentialGenerator: A pseudorandom number generator. """ - def __init__(self, multiplier, increment, modulo, seed=int(time())): + # The default value for **seed** is the result of a function call which is not + # normally recommended and causes flake8-bugbear to raise a B008 error. However, + # in this case, it is accptable because `LinearCongruentialGenerator.__init__()` + # will only be called once per instance and it ensures that each instance will + # generate a unique sequence of numbers. + + def __init__(self, multiplier, increment, modulo, seed=int(time())): # noqa: B008 """ These parameters are saved and used when nextNumber() is called. diff --git a/quantum/ripple_adder_classic.py b/quantum/ripple_adder_classic.py index dc0c2103b2e5..8539a62afd52 100644 --- a/quantum/ripple_adder_classic.py +++ b/quantum/ripple_adder_classic.py @@ -53,8 +53,16 @@ def full_adder( circuit.cx(input1_loc, input2_loc) +# The default value for **backend** is the result of a function call which is not +# normally recommended and causes flake8-bugbear to raise a B008 error. However, +# in this case, this is accptable because `Aer.get_backend()` is called when the +# function is defined and that same backend is then reused for all function calls. + + def ripple_adder( - val1: int, val2: int, backend: BaseBackend = Aer.get_backend("qasm_simulator") + val1: int, + val2: int, + backend: BaseBackend = Aer.get_backend("qasm_simulator"), # noqa: B008 ) -> int: """ Quantum Equivalent of a Ripple Adder Circuit @@ -63,7 +71,7 @@ def ripple_adder( Currently only adds 'emulated' Classical Bits but nothing prevents us from doing this with hadamard'd bits :) - Only supports adding +ve Integers + Only supports adding positive integers >>> ripple_adder(3, 4) 7 @@ -99,7 +107,7 @@ def ripple_adder( res = execute(circuit, backend, shots=1).result() # The result is in binary. Convert it back to int - return int(list(res.get_counts().keys())[0], 2) + return int(list(res.get_counts())[0], 2) if __name__ == "__main__": From 757d4fb84f77865fc569aae4129999faf75c970f Mon Sep 17 00:00:00 2001 From: Kiran Hipparagi <49370990+KiranHipparagi@users.noreply.github.com> Date: Wed, 1 Sep 2021 01:36:49 +0530 Subject: [PATCH 0205/1543] Added Dutch National Flag algorithm #4636 (#4639) * Added Dutch national flag sort Algorithm * Changed file name to dnf_sort.py * Added descriptive name and type hint Added descriptive name and type hint for parameter with doctest for the function dnf_sort. * Added test cases * Added doctest cases * Update sorts/dnf_sort.py * Added doctest for dutch_national_flag_sort sorts/dnf_sort.py * Update sorts/dnf_sort.py * Added doctest for the function dutch_national_flag_sort * update file as per black code formatter * Update dnf_sort.py * Update and rename dnf_sort.py to dutch_national_flag_sort.py Co-authored-by: Christian Clauss --- sorts/dutch_national_flag_sort.py | 100 ++++++++++++++++++++++++++++++ 1 file changed, 100 insertions(+) create mode 100644 sorts/dutch_national_flag_sort.py diff --git a/sorts/dutch_national_flag_sort.py b/sorts/dutch_national_flag_sort.py new file mode 100644 index 000000000000..79afefa73afe --- /dev/null +++ b/sorts/dutch_national_flag_sort.py @@ -0,0 +1,100 @@ +""" +A pure implementation of Dutch national flag (DNF) sort algorithm in Python. +Dutch National Flag algorithm is an algorithm originally designed by Edsger Dijkstra. +It is the most optimal sort for 3 unique values (eg. 0, 1, 2) in a sequence. DNF can +sort a sequence of n size with [0 <= a[i] <= 2] at guaranteed O(n) complexity in a +single pass. + +The flag of the Netherlands consists of three colors: white, red, and blue. +The task is to randomly arrange balls of white, red, and blue in such a way that balls +of the same color are placed together. DNF sorts a sequence of 0, 1, and 2's in linear +time that does not consume any extra space. This algorithm can be implemented only on +a sequence that contains three unique elements. + +1) Time complexity is O(n). +2) Space complexity is O(1). + +More info on: https://en.wikipedia.org/wiki/Dutch_national_flag_problem + +For doctests run following command: +python3 -m doctest -v dutch_national_flag_sort.py + +For manual testing run: +python dnf_sort.py +""" + + +# Python program to sort a sequence containing only 0, 1 and 2 in a single pass. +red = 0 # The first color of the flag. +white = 1 # The second color of the flag. +blue = 2 # The third color of the flag. +colors = (red, white, blue) + + +def dutch_national_flag_sort(sequence: list) -> list: + """ + A pure Python implementation of Dutch National Flag sort algorithm. + :param data: 3 unique integer values (e.g., 0, 1, 2) in an sequence + :return: The same collection in ascending order + + >>> dutch_national_flag_sort([]) + [] + >>> dutch_national_flag_sort([0]) + [0] + >>> dutch_national_flag_sort([2, 1, 0, 0, 1, 2]) + [0, 0, 1, 1, 2, 2] + >>> dutch_national_flag_sort([0, 1, 1, 0, 1, 2, 1, 2, 0, 0, 0, 1]) + [0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 2, 2] + >>> dutch_national_flag_sort("abacab") + Traceback (most recent call last): + ... + ValueError: The elements inside the sequence must contains only (0, 1, 2) values + >>> dutch_national_flag_sort("Abacab") + Traceback (most recent call last): + ... + ValueError: The elements inside the sequence must contains only (0, 1, 2) values + >>> dutch_national_flag_sort([3, 2, 3, 1, 3, 0, 3]) + Traceback (most recent call last): + ... + ValueError: The elements inside the sequence must contains only (0, 1, 2) values + >>> dutch_national_flag_sort([-1, 2, -1, 1, -1, 0, -1]) + Traceback (most recent call last): + ... + ValueError: The elements inside the sequence must contains only (0, 1, 2) values + >>> dutch_national_flag_sort([1.1, 2, 1.1, 1, 1.1, 0, 1.1]) + Traceback (most recent call last): + ... + ValueError: The elements inside the sequence must contains only (0, 1, 2) values + """ + if not sequence: + return [] + if len(sequence) == 1: + return list(sequence) + low = 0 + high = len(sequence) - 1 + mid = 0 + while mid <= high: + if sequence[mid] == colors[0]: + sequence[low], sequence[mid] = sequence[mid], sequence[low] + low += 1 + mid += 1 + elif sequence[mid] == colors[1]: + mid += 1 + elif sequence[mid] == colors[2]: + sequence[mid], sequence[high] = sequence[high], sequence[mid] + high -= 1 + else: + raise ValueError( + f"The elements inside the sequence must contains only {colors} values" + ) + return sequence + + +if __name__ == "__main__": + import doctest + + doctest.testmod() + + user_input = input("Enter numbers separated by commas:\n").strip() + unsorted = [int(item.strip()) for item in user_input.split(",")] + print(f"{dutch_national_flag_sort(unsorted)}") From c1b15a86baabd110347bd4d478d88bc2820824e5 Mon Sep 17 00:00:00 2001 From: imp Date: Fri, 3 Sep 2021 17:49:23 +0800 Subject: [PATCH 0206/1543] [mypy] Fix type annotations for dynamic programming (#4687) * Fix mypy error for knapsack.py * Fix mypy error for longest_increasing_subsequence * Fix mypy error for fractional_knapsack_2.py --- dynamic_programming/fractional_knapsack_2.py | 19 ++++++------------- dynamic_programming/knapsack.py | 2 +- .../longest_increasing_subsequence.py | 2 +- 3 files changed, 8 insertions(+), 15 deletions(-) diff --git a/dynamic_programming/fractional_knapsack_2.py b/dynamic_programming/fractional_knapsack_2.py index cae57738311b..bd776723c146 100644 --- a/dynamic_programming/fractional_knapsack_2.py +++ b/dynamic_programming/fractional_knapsack_2.py @@ -7,7 +7,7 @@ def fractional_knapsack( value: list[int], weight: list[int], capacity: int -) -> tuple[int, list[int]]: +) -> tuple[float, list[float]]: """ >>> value = [1, 3, 5, 7, 9] >>> weight = [0.9, 0.7, 0.5, 0.3, 0.1] @@ -32,8 +32,8 @@ def fractional_knapsack( ratio = [v / w for v, w in zip(value, weight)] index.sort(key=lambda i: ratio[i], reverse=True) - max_value = 0 - fractions = [0] * len(value) + max_value: float = 0 + fractions: list[float] = [0] * len(value) for i in index: if weight[i] <= capacity: fractions[i] = 1 @@ -48,13 +48,6 @@ def fractional_knapsack( if __name__ == "__main__": - n = int(input("Enter number of items: ")) - value = input(f"Enter the values of the {n} item(s) in order: ").split() - value = [int(v) for v in value] - weight = input(f"Enter the positive weights of the {n} item(s) in order: ".split()) - weight = [int(w) for w in weight] - capacity = int(input("Enter maximum weight: ")) - - max_value, fractions = fractional_knapsack(value, weight, capacity) - print("The maximum value of items that can be carried:", max_value) - print("The fractions in which the items should be taken:", fractions) + import doctest + + doctest.testmod() diff --git a/dynamic_programming/knapsack.py b/dynamic_programming/knapsack.py index 69e54c00aa4e..804d7d4f12f5 100644 --- a/dynamic_programming/knapsack.py +++ b/dynamic_programming/knapsack.py @@ -91,7 +91,7 @@ def knapsack_with_example_solution(W: int, wt: list, val: list): ) optimal_val, dp_table = knapsack(W, wt, val, num_items) - example_optional_set = set() + example_optional_set: set = set() _construct_solution(dp_table, wt, num_items, W, example_optional_set) return optimal_val, example_optional_set diff --git a/dynamic_programming/longest_increasing_subsequence.py b/dynamic_programming/longest_increasing_subsequence.py index f5ca8a2b5cdc..a029f9be7d98 100644 --- a/dynamic_programming/longest_increasing_subsequence.py +++ b/dynamic_programming/longest_increasing_subsequence.py @@ -36,7 +36,7 @@ def longest_subsequence(array: list[int]) -> list[int]: # This function is recu pivot = array[0] isFound = False i = 1 - longest_subseq = [] + longest_subseq: list[int] = [] while not isFound and i < array_length: if array[i] < pivot: isFound = True From 5d5831bdd07fff31278c84c4e7b313d633abf752 Mon Sep 17 00:00:00 2001 From: Aviv Faraj <73610201+avivfaraj@users.noreply.github.com> Date: Mon, 6 Sep 2021 17:57:18 -0400 Subject: [PATCH 0207/1543] Physics new code (#4709) * added gamma_function * Add files via upload * Resolved issue with str.format And also changed output to math notation * Update gamma_function.py * Rename physics/gamma_function.py to maths/gamma_recursive.py * Fixes: #4709 Fixed issues for pre-commit test * Fixes: #4709 solved issues with doctests And comments * Fixes: #4709 Added failed tests to doctest * Align with Python's Standard Library math.gamma() Replicate the exceptions of https://docs.python.org/3/library/math.html#math.gamma * Update gamma_recursive.py * Update gamma_recursive.py * Update gamma_recursive.py * Update gamma_recursive.py Co-authored-by: Christian Clauss --- maths/gamma_recursive.py | 78 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 78 insertions(+) create mode 100644 maths/gamma_recursive.py diff --git a/maths/gamma_recursive.py b/maths/gamma_recursive.py new file mode 100644 index 000000000000..683d7adb1aa8 --- /dev/null +++ b/maths/gamma_recursive.py @@ -0,0 +1,78 @@ +""" +Gamma function is a very useful tool in math and physics. +It helps calculating complex integral in a convenient way. +for more info: https://en.wikipedia.org/wiki/Gamma_function + +Python's Standard Library math.gamma() function overflows around gamma(171.624). +""" +from math import pi, sqrt + + +def gamma(num: float) -> float: + """ + Calculates the value of Gamma function of num + where num is either an integer (1, 2, 3..) or a half-integer (0.5, 1.5, 2.5 ...). + Implemented using recursion + Examples: + >>> from math import isclose, gamma as math_gamma + >>> gamma(0.5) + 1.7724538509055159 + >>> gamma(2) + 1.0 + >>> gamma(3.5) + 3.3233509704478426 + >>> gamma(171.5) + 9.483367566824795e+307 + >>> all(isclose(gamma(num), math_gamma(num)) for num in (0.5, 2, 3.5, 171.5)) + True + >>> gamma(0) + Traceback (most recent call last): + ... + ValueError: math domain error + >>> gamma(-1.1) + Traceback (most recent call last): + ... + ValueError: math domain error + >>> gamma(-4) + Traceback (most recent call last): + ... + ValueError: math domain error + >>> gamma(172) + Traceback (most recent call last): + ... + OverflowError: math range error + >>> gamma(1.1) + Traceback (most recent call last): + ... + NotImplementedError: num must be an integer or a half-integer + """ + if num <= 0: + raise ValueError("math domain error") + if num > 171.5: + raise OverflowError("math range error") + elif num - int(num) not in (0, 0.5): + raise NotImplementedError("num must be an integer or a half-integer") + elif num == 0.5: + return sqrt(pi) + else: + return 1.0 if num == 1 else (num - 1) * gamma(num - 1) + + +def test_gamma() -> None: + """ + >>> test_gamma() + """ + assert gamma(0.5) == sqrt(pi) + assert gamma(1) == 1.0 + assert gamma(2) == 1.0 + + +if __name__ == "__main__": + from doctest import testmod + + testmod() + num = 1 + while num: + num = float(input("Gamma of: ")) + print(f"gamma({num}) = {gamma(num)}") + print("\nEnter 0 to exit...") From cecf43d6481173e831af829da77911e1a3868a6c Mon Sep 17 00:00:00 2001 From: Christian Clauss Date: Tue, 7 Sep 2021 13:37:03 +0200 Subject: [PATCH 0208/1543] Pyupgrade to Python 3.9 (#4718) * Pyupgrade to Python 3.9 * updating DIRECTORY.md Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- DIRECTORY.md | 3 ++- arithmetic_analysis/in_static_equilibrium.py | 4 +-- arithmetic_analysis/lu_decomposition.py | 4 +-- .../newton_forward_interpolation.py | 4 +-- arithmetic_analysis/newton_raphson.py | 5 ++-- backtracking/all_combinations.py | 12 ++++----- backtracking/all_permutations.py | 14 +++++----- backtracking/all_subsequences.py | 10 ++++--- backtracking/coloring.py | 7 +++-- backtracking/hamiltonian_cycle.py | 7 +++-- backtracking/knight_tour.py | 10 +++---- backtracking/minimax.py | 5 ++-- backtracking/n_queens.py | 8 +++--- backtracking/n_queens_math.py | 12 ++++----- backtracking/rat_in_maze.py | 6 ++--- backtracking/sudoku.py | 8 +++--- backtracking/sum_of_subsets.py | 14 +++++----- blockchain/chinese_remainder_theorem.py | 4 +-- blockchain/diophantine_equation.py | 6 ++--- blockchain/modular_division.py | 6 ++--- boolean_algebra/quine_mc_cluskey.py | 12 ++++----- cellular_automata/conways_game_of_life.py | 5 +--- ciphers/caesar_cipher.py | 9 ++++--- ciphers/decrypt_caesar_with_chi_squared.py | 7 +++-- ciphers/diffie.py | 4 +-- ciphers/shuffled_shift_cipher.py | 5 ++-- compression/huffman.py | 2 +- conversions/molecular_chemistry.py | 2 +- conversions/prefix_conversions.py | 11 ++++---- data_structures/binary_tree/avl_tree.py | 25 +++++++++--------- .../binary_tree/basic_binary_tree.py | 10 +++---- .../binary_search_tree_recursive.py | 24 ++++++++--------- .../binary_tree/binary_tree_traversals.py | 7 ++--- .../binary_tree/lazy_segment_tree.py | 5 ++-- .../binary_tree/merge_two_binary_trees.py | 10 +++---- data_structures/binary_tree/red_black_tree.py | 26 ++++++++++--------- data_structures/binary_tree/treap.py | 21 +++++++-------- data_structures/binary_tree/wavelet_tree.py | 7 +++-- data_structures/hashing/hash_table.py | 2 +- .../hashing/hash_table_with_linked_list.py | 2 +- data_structures/heap/heap.py | 12 +++++---- data_structures/heap/randomized_heap.py | 16 ++++++------ data_structures/heap/skew_heap.py | 14 +++++----- .../linked_list/merge_two_lists.py | 5 ++-- data_structures/linked_list/print_reverse.py | 4 +-- data_structures/linked_list/skip_list.py | 7 +++-- .../stacks/evaluate_postfix_notations.py | 7 ++--- data_structures/stacks/linked_stack.py | 6 +++-- data_structures/stacks/stack.py | 4 +-- divide_and_conquer/convex_hull.py | 19 +++++++------- divide_and_conquer/kth_order_statistic.py | 5 ++-- divide_and_conquer/mergesort.py | 6 ++--- divide_and_conquer/peak.py | 4 +-- electronics/electric_power.py | 5 ++-- electronics/ohms_law.py | 4 +-- graphs/basic_graphs.py | 8 +++--- graphs/bellman_ford.py | 8 +++--- graphs/bfs_zero_one_shortest_path.py | 14 +++++----- graphs/bidirectional_a_star.py | 7 ++--- graphs/bidirectional_breadth_first_search.py | 10 +++---- graphs/breadth_first_search.py | 7 +++-- graphs/breadth_first_search_shortest_path.py | 4 +-- graphs/depth_first_search.py | 5 +--- graphs/greedy_best_first.py | 8 +++--- graphs/minimum_spanning_tree_kruskal.py | 2 +- graphs/minimum_spanning_tree_prims2.py | 7 ++--- graphs/page_rank.py | 2 +- graphs/scc_kosaraju.py | 14 +++++----- hashes/luhn.py | 6 ++--- knapsack/knapsack.py | 5 ++-- linear_algebra/src/lib.py | 26 +++++++++---------- machine_learning/similarity_search.py | 5 ++-- maths/area_under_curve.py | 9 ++++--- maths/average_mean.py | 4 +-- maths/average_median.py | 4 +-- maths/entropy.py | 6 ++--- maths/euclidean_distance.py | 2 ++ maths/extended_euclidean_algorithm.py | 4 +-- maths/hardy_ramanujanalgo.py | 2 +- maths/line_length.py | 10 ++++--- maths/max_sum_sliding_window.py | 4 +-- maths/median_of_two_arrays.py | 4 +-- maths/numerical_integration.py | 9 ++++--- maths/sieve_of_eratosthenes.py | 5 ++-- maths/volume.py | 5 ++-- matrix/searching_in_sorted_matrix.py | 4 +-- other/date_to_weekday.py | 2 +- .../davisb_putnamb_logemannb_loveland.py | 24 ++++++++--------- other/lfu_cache.py | 6 +++-- other/lru_cache.py | 6 +++-- project_euler/problem_001/sol1.py | 2 +- project_euler/problem_001/sol5.py | 2 +- project_euler/problem_006/sol3.py | 2 +- project_euler/problem_008/sol2.py | 5 +--- project_euler/problem_012/sol2.py | 2 +- project_euler/problem_013/sol1.py | 2 +- project_euler/problem_014/sol2.py | 6 ++--- project_euler/problem_020/sol2.py | 2 +- project_euler/problem_021/sol1.py | 8 +++--- project_euler/problem_033/sol1.py | 5 ++-- project_euler/problem_036/sol1.py | 5 ++-- project_euler/problem_038/sol1.py | 5 ++-- project_euler/problem_049/sol1.py | 2 +- project_euler/problem_050/sol1.py | 4 +-- project_euler/problem_051/sol1.py | 6 ++--- project_euler/problem_054/sol1.py | 4 +-- project_euler/problem_056/sol1.py | 8 +++--- project_euler/problem_059/sol1.py | 23 ++++++++-------- project_euler/problem_070/sol1.py | 4 +-- project_euler/problem_074/sol1.py | 2 +- project_euler/problem_077/sol1.py | 8 +++--- project_euler/problem_080/sol1.py | 2 +- project_euler/problem_081/sol1.py | 2 +- project_euler/problem_085/sol1.py | 5 ++-- project_euler/problem_089/sol1.py | 2 +- project_euler/problem_101/sol1.py | 18 ++++++------- project_euler/problem_102/sol1.py | 14 +++++----- project_euler/problem_107/sol1.py | 21 ++++++++------- project_euler/problem_119/sol1.py | 2 +- project_euler/problem_123/sol1.py | 5 ++-- project_euler/problem_180/sol1.py | 7 +++-- project_euler/problem_203/sol1.py | 12 ++++----- scheduling/first_come_first_served.py | 12 ++++----- scheduling/round_robin.py | 9 ++++--- scheduling/shortest_job_first.py | 12 ++++----- searches/binary_search.py | 19 +++++++------- searches/fibonacci_search.py | 2 +- searches/ternary_search.py | 8 +++--- sorts/bitonic_sort.py | 8 +++--- sorts/bucket_sort.py | 4 +-- sorts/msd_radix_sort.py | 10 +++---- sorts/patience_sort.py | 7 ++--- sorts/pigeon_sort.py | 4 +-- sorts/quick_sort.py | 6 ++--- sorts/radix_sort.py | 6 ++--- sorts/recursive_insertion_sort.py | 5 +--- sorts/slowsort.py | 7 ++--- strings/aho_corasick.py | 7 ++--- strings/boyer_moore_search.py | 4 +-- strings/knuth_morris_pratt.py | 4 +-- web_programming/emails_from_url.py | 5 ++-- web_programming/fetch_github_info.py | 6 +++-- 142 files changed, 523 insertions(+), 530 deletions(-) rename "other/davis\342\200\223putnam\342\200\223logemann\342\200\223loveland.py" => other/davisb_putnamb_logemannb_loveland.py (94%) diff --git a/DIRECTORY.md b/DIRECTORY.md index 41485f6f0ca4..0c00d5ca7f70 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -545,7 +545,7 @@ ## Other * [Activity Selection](https://github.com/TheAlgorithms/Python/blob/master/other/activity_selection.py) * [Date To Weekday](https://github.com/TheAlgorithms/Python/blob/master/other/date_to_weekday.py) - * [Davis–Putnam–Logemann–Loveland](https://github.com/TheAlgorithms/Python/blob/master/other/davis–putnam–logemann–loveland.py) + * [Davisb Putnamb Logemannb Loveland](https://github.com/TheAlgorithms/Python/blob/master/other/davisb_putnamb_logemannb_loveland.py) * [Dijkstra Bankers Algorithm](https://github.com/TheAlgorithms/Python/blob/master/other/dijkstra_bankers_algorithm.py) * [Doomsday](https://github.com/TheAlgorithms/Python/blob/master/other/doomsday.py) * [Fischer Yates Shuffle](https://github.com/TheAlgorithms/Python/blob/master/other/fischer_yates_shuffle.py) @@ -860,6 +860,7 @@ * [Counting Sort](https://github.com/TheAlgorithms/Python/blob/master/sorts/counting_sort.py) * [Cycle Sort](https://github.com/TheAlgorithms/Python/blob/master/sorts/cycle_sort.py) * [Double Sort](https://github.com/TheAlgorithms/Python/blob/master/sorts/double_sort.py) + * [Dutch National Flag Sort](https://github.com/TheAlgorithms/Python/blob/master/sorts/dutch_national_flag_sort.py) * [Exchange Sort](https://github.com/TheAlgorithms/Python/blob/master/sorts/exchange_sort.py) * [External Sort](https://github.com/TheAlgorithms/Python/blob/master/sorts/external_sort.py) * [Gnome Sort](https://github.com/TheAlgorithms/Python/blob/master/sorts/gnome_sort.py) diff --git a/arithmetic_analysis/in_static_equilibrium.py b/arithmetic_analysis/in_static_equilibrium.py index 7b5006a1a82c..6e8d1d043036 100644 --- a/arithmetic_analysis/in_static_equilibrium.py +++ b/arithmetic_analysis/in_static_equilibrium.py @@ -1,14 +1,14 @@ """ Checks if a system of forces is in static equilibrium. """ -from typing import List +from __future__ import annotations from numpy import array, cos, cross, ndarray, radians, sin def polar_force( magnitude: float, angle: float, radian_mode: bool = False -) -> List[float]: +) -> list[float]: """ Resolves force along rectangular components. (force, angle) => (force_x, force_y) diff --git a/arithmetic_analysis/lu_decomposition.py b/arithmetic_analysis/lu_decomposition.py index 5bb631758c21..b488b1bb3211 100644 --- a/arithmetic_analysis/lu_decomposition.py +++ b/arithmetic_analysis/lu_decomposition.py @@ -3,12 +3,12 @@ Reference: - https://en.wikipedia.org/wiki/LU_decomposition """ -from typing import Tuple +from __future__ import annotations import numpy as np -def lower_upper_decomposition(table: np.ndarray) -> Tuple[np.ndarray, np.ndarray]: +def lower_upper_decomposition(table: np.ndarray) -> tuple[np.ndarray, np.ndarray]: """Lower-Upper (LU) Decomposition Example: diff --git a/arithmetic_analysis/newton_forward_interpolation.py b/arithmetic_analysis/newton_forward_interpolation.py index 66cde4b73c4f..490e0687f15f 100644 --- a/arithmetic_analysis/newton_forward_interpolation.py +++ b/arithmetic_analysis/newton_forward_interpolation.py @@ -1,7 +1,7 @@ # https://www.geeksforgeeks.org/newton-forward-backward-interpolation/ +from __future__ import annotations import math -from typing import List # for calculating u value @@ -22,7 +22,7 @@ def ucal(u: float, p: int) -> float: def main() -> None: n = int(input("enter the numbers of values: ")) - y: List[List[float]] = [] + y: list[list[float]] = [] for i in range(n): y.append([]) for i in range(n): diff --git a/arithmetic_analysis/newton_raphson.py b/arithmetic_analysis/newton_raphson.py index 146bb0aa5adf..1a820538630f 100644 --- a/arithmetic_analysis/newton_raphson.py +++ b/arithmetic_analysis/newton_raphson.py @@ -2,15 +2,16 @@ # Author: Syed Haseeb Shah (github.com/QuantumNovice) # The Newton-Raphson method (also known as Newton's method) is a way to # quickly find a good approximation for the root of a real-valued function +from __future__ import annotations + from decimal import Decimal from math import * # noqa: F401, F403 -from typing import Union from sympy import diff def newton_raphson( - func: str, a: Union[float, Decimal], precision: float = 10 ** -10 + func: str, a: float | Decimal, precision: float = 10 ** -10 ) -> float: """Finds root from the point 'a' onwards by Newton-Raphson method >>> newton_raphson("sin(x)", 2) diff --git a/backtracking/all_combinations.py b/backtracking/all_combinations.py index 76462837ce35..bde60f0328ba 100644 --- a/backtracking/all_combinations.py +++ b/backtracking/all_combinations.py @@ -3,16 +3,16 @@ numbers out of 1 ... n. We use backtracking to solve this problem. Time complexity: O(C(n,k)) which is O(n choose k) = O((n!/(k! * (n - k)!))) """ -from typing import List +from __future__ import annotations -def generate_all_combinations(n: int, k: int) -> List[List[int]]: +def generate_all_combinations(n: int, k: int) -> list[list[int]]: """ >>> generate_all_combinations(n=4, k=2) [[1, 2], [1, 3], [1, 4], [2, 3], [2, 4], [3, 4]] """ - result: List[List[int]] = [] + result: list[list[int]] = [] create_all_state(1, n, k, [], result) return result @@ -21,8 +21,8 @@ def create_all_state( increment: int, total_number: int, level: int, - current_list: List[int], - total_list: List[List[int]], + current_list: list[int], + total_list: list[list[int]], ) -> None: if level == 0: total_list.append(current_list[:]) @@ -34,7 +34,7 @@ def create_all_state( current_list.pop() -def print_all_state(total_list: List[List[int]]) -> None: +def print_all_state(total_list: list[list[int]]) -> None: for i in total_list: print(*i) diff --git a/backtracking/all_permutations.py b/backtracking/all_permutations.py index a0032c5ca814..ff8a53e0dd0e 100644 --- a/backtracking/all_permutations.py +++ b/backtracking/all_permutations.py @@ -5,18 +5,18 @@ Time complexity: O(n! * n), where n denotes the length of the given sequence. """ -from typing import List, Union +from __future__ import annotations -def generate_all_permutations(sequence: List[Union[int, str]]) -> None: +def generate_all_permutations(sequence: list[int | str]) -> None: create_state_space_tree(sequence, [], 0, [0 for i in range(len(sequence))]) def create_state_space_tree( - sequence: List[Union[int, str]], - current_sequence: List[Union[int, str]], + sequence: list[int | str], + current_sequence: list[int | str], index: int, - index_used: List[int], + index_used: list[int], ) -> None: """ Creates a state space tree to iterate through each branch using DFS. @@ -44,8 +44,8 @@ def create_state_space_tree( sequence = list(map(int, input().split())) """ -sequence: List[Union[int, str]] = [3, 1, 2, 4] +sequence: list[int | str] = [3, 1, 2, 4] generate_all_permutations(sequence) -sequence_2: List[Union[int, str]] = ["A", "B", "C"] +sequence_2: list[int | str] = ["A", "B", "C"] generate_all_permutations(sequence_2) diff --git a/backtracking/all_subsequences.py b/backtracking/all_subsequences.py index 99db4ea46589..c465fc542407 100644 --- a/backtracking/all_subsequences.py +++ b/backtracking/all_subsequences.py @@ -5,15 +5,17 @@ Time complexity: O(2^n), where n denotes the length of the given sequence. """ -from typing import Any, List +from __future__ import annotations +from typing import Any -def generate_all_subsequences(sequence: List[Any]) -> None: + +def generate_all_subsequences(sequence: list[Any]) -> None: create_state_space_tree(sequence, [], 0) def create_state_space_tree( - sequence: List[Any], current_subsequence: List[Any], index: int + sequence: list[Any], current_subsequence: list[Any], index: int ) -> None: """ Creates a state space tree to iterate through each branch using DFS. @@ -32,7 +34,7 @@ def create_state_space_tree( if __name__ == "__main__": - seq: List[Any] = [3, 1, 2, 4] + seq: list[Any] = [3, 1, 2, 4] generate_all_subsequences(seq) seq.clear() diff --git a/backtracking/coloring.py b/backtracking/coloring.py index 3956b21a9182..8bda4b5871df 100644 --- a/backtracking/coloring.py +++ b/backtracking/coloring.py @@ -5,11 +5,10 @@ Wikipedia: https://en.wikipedia.org/wiki/Graph_coloring """ -from typing import List def valid_coloring( - neighbours: List[int], colored_vertices: List[int], color: int + neighbours: list[int], colored_vertices: list[int], color: int ) -> bool: """ For each neighbour check if coloring constraint is satisfied @@ -35,7 +34,7 @@ def valid_coloring( def util_color( - graph: List[List[int]], max_colors: int, colored_vertices: List[int], index: int + graph: list[list[int]], max_colors: int, colored_vertices: list[int], index: int ) -> bool: """ Pseudo-Code @@ -86,7 +85,7 @@ def util_color( return False -def color(graph: List[List[int]], max_colors: int) -> List[int]: +def color(graph: list[list[int]], max_colors: int) -> list[int]: """ Wrapper function to call subroutine called util_color which will either return True or False. diff --git a/backtracking/hamiltonian_cycle.py b/backtracking/hamiltonian_cycle.py index 7be1ea350d7c..19751b347320 100644 --- a/backtracking/hamiltonian_cycle.py +++ b/backtracking/hamiltonian_cycle.py @@ -6,11 +6,10 @@ Wikipedia: https://en.wikipedia.org/wiki/Hamiltonian_path """ -from typing import List def valid_connection( - graph: List[List[int]], next_ver: int, curr_ind: int, path: List[int] + graph: list[list[int]], next_ver: int, curr_ind: int, path: list[int] ) -> bool: """ Checks whether it is possible to add next into path by validating 2 statements @@ -47,7 +46,7 @@ def valid_connection( return not any(vertex == next_ver for vertex in path) -def util_hamilton_cycle(graph: List[List[int]], path: List[int], curr_ind: int) -> bool: +def util_hamilton_cycle(graph: list[list[int]], path: list[int], curr_ind: int) -> bool: """ Pseudo-Code Base Case: @@ -108,7 +107,7 @@ def util_hamilton_cycle(graph: List[List[int]], path: List[int], curr_ind: int) return False -def hamilton_cycle(graph: List[List[int]], start_index: int = 0) -> List[int]: +def hamilton_cycle(graph: list[list[int]], start_index: int = 0) -> list[int]: r""" Wrapper function to call subroutine called util_hamilton_cycle, which will either return array of vertices indicating hamiltonian cycle diff --git a/backtracking/knight_tour.py b/backtracking/knight_tour.py index 8e6613e07d8b..6e9b31bd1133 100644 --- a/backtracking/knight_tour.py +++ b/backtracking/knight_tour.py @@ -1,9 +1,9 @@ # Knight Tour Intro: https://www.youtube.com/watch?v=ab_dY3dZFHM -from typing import List, Tuple +from __future__ import annotations -def get_valid_pos(position: Tuple[int, int], n: int) -> List[Tuple[int, int]]: +def get_valid_pos(position: tuple[int, int], n: int) -> list[tuple[int, int]]: """ Find all the valid positions a knight can move to from the current position. @@ -32,7 +32,7 @@ def get_valid_pos(position: Tuple[int, int], n: int) -> List[Tuple[int, int]]: return permissible_positions -def is_complete(board: List[List[int]]) -> bool: +def is_complete(board: list[list[int]]) -> bool: """ Check if the board (matrix) has been completely filled with non-zero values. @@ -47,7 +47,7 @@ def is_complete(board: List[List[int]]) -> bool: def open_knight_tour_helper( - board: List[List[int]], pos: Tuple[int, int], curr: int + board: list[list[int]], pos: tuple[int, int], curr: int ) -> bool: """ Helper function to solve knight tour problem. @@ -68,7 +68,7 @@ def open_knight_tour_helper( return False -def open_knight_tour(n: int) -> List[List[int]]: +def open_knight_tour(n: int) -> list[list[int]]: """ Find the solution for the knight tour problem for a board of size n. Raises ValueError if the tour cannot be performed for the given size. diff --git a/backtracking/minimax.py b/backtracking/minimax.py index dda29b47d6cc..6e310131e069 100644 --- a/backtracking/minimax.py +++ b/backtracking/minimax.py @@ -7,12 +7,13 @@ leaves of game tree is stored in scores[] height is maximum height of Game tree """ +from __future__ import annotations + import math -from typing import List def minimax( - depth: int, node_index: int, is_max: bool, scores: List[int], height: float + depth: int, node_index: int, is_max: bool, scores: list[int], height: float ) -> int: """ >>> import math diff --git a/backtracking/n_queens.py b/backtracking/n_queens.py index 29b8d819acf3..b8ace59781f5 100644 --- a/backtracking/n_queens.py +++ b/backtracking/n_queens.py @@ -7,12 +7,12 @@ diagonal lines. """ -from typing import List +from __future__ import annotations solution = [] -def isSafe(board: List[List[int]], row: int, column: int) -> bool: +def isSafe(board: list[list[int]], row: int, column: int) -> bool: """ This function returns a boolean value True if it is safe to place a queen there considering the current state of the board. @@ -40,7 +40,7 @@ def isSafe(board: List[List[int]], row: int, column: int) -> bool: return True -def solve(board: List[List[int]], row: int) -> bool: +def solve(board: list[list[int]], row: int) -> bool: """ It creates a state space tree and calls the safe function until it receives a False Boolean and terminates that branch and backtracks to the next @@ -70,7 +70,7 @@ def solve(board: List[List[int]], row: int) -> bool: return False -def printboard(board: List[List[int]]) -> None: +def printboard(board: list[list[int]]) -> None: """ Prints the boards that have a successful combination. """ diff --git a/backtracking/n_queens_math.py b/backtracking/n_queens_math.py index a8651c5c362e..c12aa6c3387d 100644 --- a/backtracking/n_queens_math.py +++ b/backtracking/n_queens_math.py @@ -75,14 +75,14 @@ for another one or vice versa. """ -from typing import List +from __future__ import annotations def depth_first_search( - possible_board: List[int], - diagonal_right_collisions: List[int], - diagonal_left_collisions: List[int], - boards: List[List[str]], + possible_board: list[int], + diagonal_right_collisions: list[int], + diagonal_left_collisions: list[int], + boards: list[list[str]], n: int, ) -> None: """ @@ -139,7 +139,7 @@ def depth_first_search( def n_queens_solution(n: int) -> None: - boards: List[List[str]] = [] + boards: list[list[str]] = [] depth_first_search([], [], [], boards, n) # Print all the boards diff --git a/backtracking/rat_in_maze.py b/backtracking/rat_in_maze.py index cd2a8f41daa8..2860880db540 100644 --- a/backtracking/rat_in_maze.py +++ b/backtracking/rat_in_maze.py @@ -1,7 +1,7 @@ -from typing import List +from __future__ import annotations -def solve_maze(maze: List[List[int]]) -> bool: +def solve_maze(maze: list[list[int]]) -> bool: """ This method solves the "rat in maze" problem. In this problem we have some n by n matrix, a start point and an end point. @@ -70,7 +70,7 @@ def solve_maze(maze: List[List[int]]) -> bool: return solved -def run_maze(maze: List[List[int]], i: int, j: int, solutions: List[List[int]]) -> bool: +def run_maze(maze: list[list[int]], i: int, j: int, solutions: list[list[int]]) -> bool: """ This method is recursive starting from (i, j) and going in one of four directions: up, down, left, right. diff --git a/backtracking/sudoku.py b/backtracking/sudoku.py index 593fa52d6d8a..698dedcc2125 100644 --- a/backtracking/sudoku.py +++ b/backtracking/sudoku.py @@ -9,9 +9,9 @@ have solved the puzzle. else, we backtrack and place another number in that cell and repeat this process. """ -from typing import List, Optional, Tuple +from __future__ import annotations -Matrix = List[List[int]] +Matrix = list[list[int]] # assigning initial values to the grid initial_grid: Matrix = [ @@ -59,7 +59,7 @@ def is_safe(grid: Matrix, row: int, column: int, n: int) -> bool: return True -def find_empty_location(grid: Matrix) -> Optional[Tuple[int, int]]: +def find_empty_location(grid: Matrix) -> tuple[int, int] | None: """ This function finds an empty location so that we can assign a number for that particular row and column. @@ -71,7 +71,7 @@ def find_empty_location(grid: Matrix) -> Optional[Tuple[int, int]]: return None -def sudoku(grid: Matrix) -> Optional[Matrix]: +def sudoku(grid: Matrix) -> Matrix | None: """ Takes a partially filled-in grid and attempts to assign values to all unassigned locations in such a way to meet the requirements diff --git a/backtracking/sum_of_subsets.py b/backtracking/sum_of_subsets.py index f695b8f7a80e..8348544c0175 100644 --- a/backtracking/sum_of_subsets.py +++ b/backtracking/sum_of_subsets.py @@ -6,12 +6,12 @@ Summation of the chosen numbers must be equal to given number M and one number can be used only once. """ -from typing import List +from __future__ import annotations -def generate_sum_of_subsets_soln(nums: List[int], max_sum: int) -> List[List[int]]: - result: List[List[int]] = [] - path: List[int] = [] +def generate_sum_of_subsets_soln(nums: list[int], max_sum: int) -> list[list[int]]: + result: list[list[int]] = [] + path: list[int] = [] num_index = 0 remaining_nums_sum = sum(nums) create_state_space_tree(nums, max_sum, num_index, path, result, remaining_nums_sum) @@ -19,11 +19,11 @@ def generate_sum_of_subsets_soln(nums: List[int], max_sum: int) -> List[List[int def create_state_space_tree( - nums: List[int], + nums: list[int], max_sum: int, num_index: int, - path: List[int], - result: List[List[int]], + path: list[int], + result: list[list[int]], remaining_nums_sum: int, ) -> None: """ diff --git a/blockchain/chinese_remainder_theorem.py b/blockchain/chinese_remainder_theorem.py index b50147ac1215..54d861dd9f10 100644 --- a/blockchain/chinese_remainder_theorem.py +++ b/blockchain/chinese_remainder_theorem.py @@ -11,11 +11,11 @@ 1. Use extended euclid algorithm to find x,y such that a*x + b*y = 1 2. Take n = ra*by + rb*ax """ -from typing import Tuple +from __future__ import annotations # Extended Euclid -def extended_euclid(a: int, b: int) -> Tuple[int, int]: +def extended_euclid(a: int, b: int) -> tuple[int, int]: """ >>> extended_euclid(10, 6) (-1, 2) diff --git a/blockchain/diophantine_equation.py b/blockchain/diophantine_equation.py index 7df674cb1438..22b0cad75c63 100644 --- a/blockchain/diophantine_equation.py +++ b/blockchain/diophantine_equation.py @@ -1,7 +1,7 @@ -from typing import Tuple +from __future__ import annotations -def diophantine(a: int, b: int, c: int) -> Tuple[float, float]: +def diophantine(a: int, b: int, c: int) -> tuple[float, float]: """ Diophantine Equation : Given integers a,b,c ( at least one of a and b != 0), the diophantine equation a*x + b*y = c has a solution (where x and y are integers) @@ -95,7 +95,7 @@ def greatest_common_divisor(a: int, b: int) -> int: return b -def extended_gcd(a: int, b: int) -> Tuple[int, int, int]: +def extended_gcd(a: int, b: int) -> tuple[int, int, int]: """ Extended Euclid's Algorithm : If d divides a and b and d = a*x + b*y for integers x and y, then d = gcd(a,b) diff --git a/blockchain/modular_division.py b/blockchain/modular_division.py index 4f7f50a92ad0..a9d0f65c5b27 100644 --- a/blockchain/modular_division.py +++ b/blockchain/modular_division.py @@ -1,4 +1,4 @@ -from typing import Tuple +from __future__ import annotations def modular_division(a: int, b: int, n: int) -> int: @@ -73,7 +73,7 @@ def modular_division2(a: int, b: int, n: int) -> int: return x -def extended_gcd(a: int, b: int) -> Tuple[int, int, int]: +def extended_gcd(a: int, b: int) -> tuple[int, int, int]: """ Extended Euclid's Algorithm : If d divides a and b and d = a*x + b*y for integers x and y, then d = gcd(a,b) @@ -101,7 +101,7 @@ def extended_gcd(a: int, b: int) -> Tuple[int, int, int]: return (d, x, y) -def extended_euclid(a: int, b: int) -> Tuple[int, int]: +def extended_euclid(a: int, b: int) -> tuple[int, int]: """ Extended Euclid >>> extended_euclid(10, 6) diff --git a/boolean_algebra/quine_mc_cluskey.py b/boolean_algebra/quine_mc_cluskey.py index 70cdf25a701d..9cc99b1eeabb 100644 --- a/boolean_algebra/quine_mc_cluskey.py +++ b/boolean_algebra/quine_mc_cluskey.py @@ -1,4 +1,4 @@ -from typing import List +from __future__ import annotations def compare_string(string1: str, string2: str) -> str: @@ -22,7 +22,7 @@ def compare_string(string1: str, string2: str) -> str: return "".join(l1) -def check(binary: List[str]) -> List[str]: +def check(binary: list[str]) -> list[str]: """ >>> check(['0.00.01.5']) ['0.00.01.5'] @@ -46,7 +46,7 @@ def check(binary: List[str]) -> List[str]: binary = list(set(temp)) -def decimal_to_binary(no_of_variable: int, minterms: List[float]) -> List[str]: +def decimal_to_binary(no_of_variable: int, minterms: list[float]) -> list[str]: """ >>> decimal_to_binary(3,[1.5]) ['0.00.01.5'] @@ -82,7 +82,7 @@ def is_for_table(string1: str, string2: str, count: int) -> bool: return False -def selection(chart: List[List[int]], prime_implicants: List[str]) -> List[str]: +def selection(chart: list[list[int]], prime_implicants: list[str]) -> list[str]: """ >>> selection([[1]],['0.00.01.5']) ['0.00.01.5'] @@ -130,8 +130,8 @@ def selection(chart: List[List[int]], prime_implicants: List[str]) -> List[str]: def prime_implicant_chart( - prime_implicants: List[str], binary: List[str] -) -> List[List[int]]: + prime_implicants: list[str], binary: list[str] +) -> list[list[int]]: """ >>> prime_implicant_chart(['0.00.01.5'],['0.00.01.5']) [[1]] diff --git a/cellular_automata/conways_game_of_life.py b/cellular_automata/conways_game_of_life.py index 321baa3a3794..079fb4d04499 100644 --- a/cellular_automata/conways_game_of_life.py +++ b/cellular_automata/conways_game_of_life.py @@ -2,11 +2,8 @@ Conway's Game of Life implemented in Python. https://en.wikipedia.org/wiki/Conway%27s_Game_of_Life """ - from __future__ import annotations -from typing import List - from PIL import Image # Define glider example @@ -25,7 +22,7 @@ BLINKER = [[0, 1, 0], [0, 1, 0], [0, 1, 0]] -def new_generation(cells: List[List[int]]) -> List[List[int]]: +def new_generation(cells: list[list[int]]) -> list[list[int]]: """ Generates the next generation for a given state of Conway's Game of Life. >>> new_generation(BLINKER) diff --git a/ciphers/caesar_cipher.py b/ciphers/caesar_cipher.py index 4b2f76c7d873..8cd9fab58471 100644 --- a/ciphers/caesar_cipher.py +++ b/ciphers/caesar_cipher.py @@ -1,8 +1,9 @@ +from __future__ import annotations + from string import ascii_letters -from typing import Dict, Optional -def encrypt(input_string: str, key: int, alphabet: Optional[str] = None) -> str: +def encrypt(input_string: str, key: int, alphabet: str | None = None) -> str: """ encrypt ======= @@ -80,7 +81,7 @@ def encrypt(input_string: str, key: int, alphabet: Optional[str] = None) -> str: return result -def decrypt(input_string: str, key: int, alphabet: Optional[str] = None) -> str: +def decrypt(input_string: str, key: int, alphabet: str | None = None) -> str: """ decrypt ======= @@ -145,7 +146,7 @@ def decrypt(input_string: str, key: int, alphabet: Optional[str] = None) -> str: return encrypt(input_string, key, alphabet) -def brute_force(input_string: str, alphabet: Optional[str] = None) -> Dict[int, str]: +def brute_force(input_string: str, alphabet: str | None = None) -> dict[int, str]: """ brute_force =========== diff --git a/ciphers/decrypt_caesar_with_chi_squared.py b/ciphers/decrypt_caesar_with_chi_squared.py index 7e3705b8f71f..89477914a030 100644 --- a/ciphers/decrypt_caesar_with_chi_squared.py +++ b/ciphers/decrypt_caesar_with_chi_squared.py @@ -1,12 +1,11 @@ #!/usr/bin/env python3 - -from typing import Optional +from __future__ import annotations def decrypt_caesar_with_chi_squared( ciphertext: str, - cipher_alphabet: Optional[list[str]] = None, - frequencies_dict: Optional[dict[str, float]] = None, + cipher_alphabet: list[str] | None = None, + frequencies_dict: dict[str, float] | None = None, case_sensetive: bool = False, ) -> tuple[int, float, str]: """ diff --git a/ciphers/diffie.py b/ciphers/diffie.py index a23a8104afe2..4ff90be009c1 100644 --- a/ciphers/diffie.py +++ b/ciphers/diffie.py @@ -1,7 +1,7 @@ -from typing import Optional +from __future__ import annotations -def find_primitive(n: int) -> Optional[int]: +def find_primitive(n: int) -> int | None: for r in range(1, n): li = [] for x in range(n - 1): diff --git a/ciphers/shuffled_shift_cipher.py b/ciphers/shuffled_shift_cipher.py index 01d099641dd2..3b84f97f6769 100644 --- a/ciphers/shuffled_shift_cipher.py +++ b/ciphers/shuffled_shift_cipher.py @@ -1,6 +1,7 @@ +from __future__ import annotations + import random import string -from typing import Optional class ShuffledShiftCipher: @@ -27,7 +28,7 @@ class ShuffledShiftCipher: cip2 = ShuffledShiftCipher() """ - def __init__(self, passcode: Optional[str] = None) -> None: + def __init__(self, passcode: str | None = None) -> None: """ Initializes a cipher object with a passcode as it's entity Note: No new passcode is generated if user provides a passcode diff --git a/compression/huffman.py b/compression/huffman.py index b6cc4de1e8e6..8f37a53ce2b7 100644 --- a/compression/huffman.py +++ b/compression/huffman.py @@ -30,7 +30,7 @@ def parse_file(file_path): if not c: break chars[c] = chars[c] + 1 if c in chars.keys() else 1 - return sorted([Letter(c, f) for c, f in chars.items()], key=lambda l: l.freq) + return sorted((Letter(c, f) for c, f in chars.items()), key=lambda l: l.freq) def build_tree(letters): diff --git a/conversions/molecular_chemistry.py b/conversions/molecular_chemistry.py index 8c68459965b0..0024eb5cb5b8 100644 --- a/conversions/molecular_chemistry.py +++ b/conversions/molecular_chemistry.py @@ -20,7 +20,7 @@ def molarity_to_normality(nfactor: int, moles: float, volume: float) -> float: >>> molarity_to_normality(4, 11.4, 5.7) 8 """ - return round((float(moles / volume) * nfactor)) + return round(float(moles / volume) * nfactor) def moles_to_pressure(volume: float, moles: float, temperature: float) -> float: diff --git a/conversions/prefix_conversions.py b/conversions/prefix_conversions.py index 78db4a91709c..a77556433c66 100644 --- a/conversions/prefix_conversions.py +++ b/conversions/prefix_conversions.py @@ -1,8 +1,9 @@ """ Convert International System of Units (SI) and Binary prefixes """ +from __future__ import annotations + from enum import Enum -from typing import Union class SI_Unit(Enum): @@ -41,8 +42,8 @@ class Binary_Unit(Enum): def convert_si_prefix( known_amount: float, - known_prefix: Union[str, SI_Unit], - unknown_prefix: Union[str, SI_Unit], + known_prefix: str | SI_Unit, + unknown_prefix: str | SI_Unit, ) -> float: """ Wikipedia reference: https://en.wikipedia.org/wiki/Binary_prefix @@ -70,8 +71,8 @@ def convert_si_prefix( def convert_binary_prefix( known_amount: float, - known_prefix: Union[str, Binary_Unit], - unknown_prefix: Union[str, Binary_Unit], + known_prefix: str | Binary_Unit, + unknown_prefix: str | Binary_Unit, ) -> float: """ Wikipedia reference: https://en.wikipedia.org/wiki/Metric_prefix diff --git a/data_structures/binary_tree/avl_tree.py b/data_structures/binary_tree/avl_tree.py index e0d3e4d438a8..1ab13777b7a6 100644 --- a/data_structures/binary_tree/avl_tree.py +++ b/data_structures/binary_tree/avl_tree.py @@ -5,15 +5,16 @@ For testing run: python avl_tree.py """ +from __future__ import annotations import math import random -from typing import Any, List, Optional +from typing import Any class my_queue: def __init__(self) -> None: - self.data: List[Any] = [] + self.data: list[Any] = [] self.head: int = 0 self.tail: int = 0 @@ -41,17 +42,17 @@ def print(self) -> None: class my_node: def __init__(self, data: Any) -> None: self.data = data - self.left: Optional[my_node] = None - self.right: Optional[my_node] = None + self.left: my_node | None = None + self.right: my_node | None = None self.height: int = 1 def get_data(self) -> Any: return self.data - def get_left(self) -> Optional["my_node"]: + def get_left(self) -> my_node | None: return self.left - def get_right(self) -> Optional["my_node"]: + def get_right(self) -> my_node | None: return self.right def get_height(self) -> int: @@ -61,11 +62,11 @@ def set_data(self, data: Any) -> None: self.data = data return - def set_left(self, node: Optional["my_node"]) -> None: + def set_left(self, node: my_node | None) -> None: self.left = node return - def set_right(self, node: Optional["my_node"]) -> None: + def set_right(self, node: my_node | None) -> None: self.right = node return @@ -74,7 +75,7 @@ def set_height(self, height: int) -> None: return -def get_height(node: Optional["my_node"]) -> int: +def get_height(node: my_node | None) -> int: if node is None: return 0 return node.get_height() @@ -149,7 +150,7 @@ def rl_rotation(node: my_node) -> my_node: return left_rotation(node) -def insert_node(node: Optional["my_node"], data: Any) -> Optional["my_node"]: +def insert_node(node: my_node | None, data: Any) -> my_node | None: if node is None: return my_node(data) if data < node.get_data(): @@ -197,7 +198,7 @@ def get_leftMost(root: my_node) -> Any: return root.get_data() -def del_node(root: my_node, data: Any) -> Optional["my_node"]: +def del_node(root: my_node, data: Any) -> my_node | None: left_child = root.get_left() right_child = root.get_right() if root.get_data() == data: @@ -275,7 +276,7 @@ class AVLtree: """ def __init__(self) -> None: - self.root: Optional[my_node] = None + self.root: my_node | None = None def get_height(self) -> int: return get_height(self.root) diff --git a/data_structures/binary_tree/basic_binary_tree.py b/data_structures/binary_tree/basic_binary_tree.py index 575b157ee78a..65dccf247b51 100644 --- a/data_structures/binary_tree/basic_binary_tree.py +++ b/data_structures/binary_tree/basic_binary_tree.py @@ -1,4 +1,4 @@ -from typing import Optional +from __future__ import annotations class Node: @@ -8,11 +8,11 @@ class Node: def __init__(self, data: int) -> None: self.data = data - self.left: Optional[Node] = None - self.right: Optional[Node] = None + self.left: Node | None = None + self.right: Node | None = None -def display(tree: Optional[Node]) -> None: # In Order traversal of the tree +def display(tree: Node | None) -> None: # In Order traversal of the tree """ >>> root = Node(1) >>> root.left = Node(0) @@ -30,7 +30,7 @@ def display(tree: Optional[Node]) -> None: # In Order traversal of the tree display(tree.right) -def depth_of_tree(tree: Optional[Node]) -> int: +def depth_of_tree(tree: Node | None) -> int: """ Recursive function that returns the depth of a binary tree. diff --git a/data_structures/binary_tree/binary_search_tree_recursive.py b/data_structures/binary_tree/binary_search_tree_recursive.py index a05e28a7bd54..4bdf4e33dcc3 100644 --- a/data_structures/binary_tree/binary_search_tree_recursive.py +++ b/data_structures/binary_tree/binary_search_tree_recursive.py @@ -7,21 +7,23 @@ To run an example: python binary_search_tree_recursive.py """ +from __future__ import annotations + import unittest -from typing import Iterator, Optional +from typing import Iterator class Node: - def __init__(self, label: int, parent: Optional["Node"]) -> None: + def __init__(self, label: int, parent: Node | None) -> None: self.label = label self.parent = parent - self.left: Optional[Node] = None - self.right: Optional[Node] = None + self.left: Node | None = None + self.right: Node | None = None class BinarySearchTree: def __init__(self) -> None: - self.root: Optional[Node] = None + self.root: Node | None = None def empty(self) -> None: """ @@ -66,9 +68,7 @@ def put(self, label: int) -> None: """ self.root = self._put(self.root, label) - def _put( - self, node: Optional[Node], label: int, parent: Optional[Node] = None - ) -> Node: + def _put(self, node: Node | None, label: int, parent: Node | None = None) -> Node: if node is None: node = Node(label, parent) else: @@ -98,7 +98,7 @@ def search(self, label: int) -> Node: """ return self._search(self.root, label) - def _search(self, node: Optional[Node], label: int) -> Node: + def _search(self, node: Node | None, label: int) -> Node: if node is None: raise Exception(f"Node with label {label} does not exist") else: @@ -140,7 +140,7 @@ def remove(self, label: int) -> None: else: self._reassign_nodes(node, None) - def _reassign_nodes(self, node: Node, new_children: Optional[Node]) -> None: + def _reassign_nodes(self, node: Node, new_children: Node | None) -> None: if new_children: new_children.parent = node.parent @@ -244,7 +244,7 @@ def inorder_traversal(self) -> Iterator[Node]: """ return self._inorder_traversal(self.root) - def _inorder_traversal(self, node: Optional[Node]) -> Iterator[Node]: + def _inorder_traversal(self, node: Node | None) -> Iterator[Node]: if node is not None: yield from self._inorder_traversal(node.left) yield node @@ -266,7 +266,7 @@ def preorder_traversal(self) -> Iterator[Node]: """ return self._preorder_traversal(self.root) - def _preorder_traversal(self, node: Optional[Node]) -> Iterator[Node]: + def _preorder_traversal(self, node: Node | None) -> Iterator[Node]: if node is not None: yield node yield from self._preorder_traversal(node.left) diff --git a/data_structures/binary_tree/binary_tree_traversals.py b/data_structures/binary_tree/binary_tree_traversals.py index 7857880dada9..de9e9d60d272 100644 --- a/data_structures/binary_tree/binary_tree_traversals.py +++ b/data_structures/binary_tree/binary_tree_traversals.py @@ -1,13 +1,14 @@ # https://en.wikipedia.org/wiki/Tree_traversal +from __future__ import annotations + from dataclasses import dataclass -from typing import Optional @dataclass class Node: data: int - left: Optional["Node"] = None - right: Optional["Node"] = None + left: Node | None = None + right: Node | None = None def make_tree() -> Node: diff --git a/data_structures/binary_tree/lazy_segment_tree.py b/data_structures/binary_tree/lazy_segment_tree.py index 9066db294613..94329cb43a76 100644 --- a/data_structures/binary_tree/lazy_segment_tree.py +++ b/data_structures/binary_tree/lazy_segment_tree.py @@ -1,7 +1,6 @@ from __future__ import annotations import math -from typing import List, Union class SegmentTree: @@ -38,7 +37,7 @@ def right(self, idx: int) -> int: return idx * 2 + 1 def build( - self, idx: int, left_element: int, right_element: int, A: List[int] + self, idx: int, left_element: int, right_element: int, A: list[int] ) -> None: if left_element == right_element: self.segment_tree[idx] = A[left_element - 1] @@ -89,7 +88,7 @@ def update( # query with O(lg n) def query( self, idx: int, left_element: int, right_element: int, a: int, b: int - ) -> Union[int, float]: + ) -> int | float: """ query(1, 1, size, a, b) for query max of [a,b] >>> A = [1, 2, -4, 7, 3, -5, 6, 11, -20, 9, 14, 15, 5, 2, -8] diff --git a/data_structures/binary_tree/merge_two_binary_trees.py b/data_structures/binary_tree/merge_two_binary_trees.py index 6b202adb3cf5..d169e0e75b82 100644 --- a/data_structures/binary_tree/merge_two_binary_trees.py +++ b/data_structures/binary_tree/merge_two_binary_trees.py @@ -5,7 +5,7 @@ both nodes to the new value of the merged node. Otherwise, the NOT null node will be used as the node of new tree. """ -from typing import Optional +from __future__ import annotations class Node: @@ -15,11 +15,11 @@ class Node: def __init__(self, value: int = 0) -> None: self.value = value - self.left: Optional[Node] = None - self.right: Optional[Node] = None + self.left: Node | None = None + self.right: Node | None = None -def merge_two_binary_trees(tree1: Optional[Node], tree2: Optional[Node]) -> Node: +def merge_two_binary_trees(tree1: Node | None, tree2: Node | None) -> Node: """ Returns root node of the merged tree. @@ -52,7 +52,7 @@ def merge_two_binary_trees(tree1: Optional[Node], tree2: Optional[Node]) -> Node return tree1 -def print_preorder(root: Optional[Node]) -> None: +def print_preorder(root: Node | None) -> None: """ Print pre-order traversal of the tree. diff --git a/data_structures/binary_tree/red_black_tree.py b/data_structures/binary_tree/red_black_tree.py index de971a712fc1..e27757f20062 100644 --- a/data_structures/binary_tree/red_black_tree.py +++ b/data_structures/binary_tree/red_black_tree.py @@ -2,7 +2,9 @@ python/black : true flake8 : passed """ -from typing import Iterator, Optional +from __future__ import annotations + +from typing import Iterator class RedBlackTree: @@ -21,11 +23,11 @@ class RedBlackTree: def __init__( self, - label: Optional[int] = None, + label: int | None = None, color: int = 0, - parent: Optional["RedBlackTree"] = None, - left: Optional["RedBlackTree"] = None, - right: Optional["RedBlackTree"] = None, + parent: RedBlackTree | None = None, + left: RedBlackTree | None = None, + right: RedBlackTree | None = None, ) -> None: """Initialize a new Red-Black Tree node with the given values: label: The value associated with this node @@ -42,7 +44,7 @@ def __init__( # Here are functions which are specific to red-black trees - def rotate_left(self) -> "RedBlackTree": + def rotate_left(self) -> RedBlackTree: """Rotate the subtree rooted at this node to the left and returns the new root to this subtree. Performing one rotation can be done in O(1). @@ -62,7 +64,7 @@ def rotate_left(self) -> "RedBlackTree": right.parent = parent return right - def rotate_right(self) -> "RedBlackTree": + def rotate_right(self) -> RedBlackTree: """Rotate the subtree rooted at this node to the right and returns the new root to this subtree. Performing one rotation can be done in O(1). @@ -82,7 +84,7 @@ def rotate_right(self) -> "RedBlackTree": left.parent = parent return left - def insert(self, label: int) -> "RedBlackTree": + def insert(self, label: int) -> RedBlackTree: """Inserts label into the subtree rooted at self, performs any rotations necessary to maintain balance, and then returns the new root to this subtree (likely self). @@ -139,7 +141,7 @@ def _insert_repair(self) -> None: self.grandparent.color = 1 self.grandparent._insert_repair() - def remove(self, label: int) -> "RedBlackTree": + def remove(self, label: int) -> RedBlackTree: """Remove label from this tree.""" if self.label == label: if self.left and self.right: @@ -337,7 +339,7 @@ def __contains__(self, label) -> bool: """ return self.search(label) is not None - def search(self, label: int) -> "RedBlackTree": + def search(self, label: int) -> RedBlackTree: """Search through the tree for label, returning its node if it's found, and None otherwise. This method is guaranteed to run in O(log(n)) time. @@ -411,7 +413,7 @@ def get_min(self) -> int: return self.label @property - def grandparent(self) -> "RedBlackTree": + def grandparent(self) -> RedBlackTree: """Get the current node's grandparent, or None if it doesn't exist.""" if self.parent is None: return None @@ -419,7 +421,7 @@ def grandparent(self) -> "RedBlackTree": return self.parent.parent @property - def sibling(self) -> "RedBlackTree": + def sibling(self) -> RedBlackTree: """Get the current node's sibling, or None if it doesn't exist.""" if self.parent is None: return None diff --git a/data_structures/binary_tree/treap.py b/data_structures/binary_tree/treap.py index a09dcc928143..0526b139b3c7 100644 --- a/data_structures/binary_tree/treap.py +++ b/data_structures/binary_tree/treap.py @@ -1,9 +1,6 @@ -# flake8: noqa - from __future__ import annotations from random import random -from typing import Optional, Tuple class Node: @@ -12,11 +9,11 @@ class Node: Treap is a binary tree by value and heap by priority """ - def __init__(self, value: Optional[int] = None): + def __init__(self, value: int | None = None): self.value = value self.prior = random() - self.left: Optional[Node] = None - self.right: Optional[Node] = None + self.left: Node | None = None + self.right: Node | None = None def __repr__(self) -> str: from pprint import pformat @@ -35,7 +32,7 @@ def __str__(self) -> str: return value + left + right -def split(root: Optional[Node], value: int) -> Tuple[Optional[Node], Optional[Node]]: +def split(root: Node | None, value: int) -> tuple[Node | None, Node | None]: """ We split current tree into 2 trees with value: @@ -64,7 +61,7 @@ def split(root: Optional[Node], value: int) -> Tuple[Optional[Node], Optional[No return root, right -def merge(left: Optional[Node], right: Optional[Node]) -> Optional[Node]: +def merge(left: Node | None, right: Node | None) -> Node | None: """ We merge 2 trees into one. Note: all left tree's values must be less than all right tree's @@ -86,7 +83,7 @@ def merge(left: Optional[Node], right: Optional[Node]) -> Optional[Node]: return right -def insert(root: Optional[Node], value: int) -> Optional[Node]: +def insert(root: Node | None, value: int) -> Node | None: """ Insert element @@ -99,7 +96,7 @@ def insert(root: Optional[Node], value: int) -> Optional[Node]: return merge(merge(left, node), right) -def erase(root: Optional[Node], value: int) -> Optional[Node]: +def erase(root: Node | None, value: int) -> Node | None: """ Erase element @@ -112,7 +109,7 @@ def erase(root: Optional[Node], value: int) -> Optional[Node]: return merge(left, right) -def inorder(root: Optional[Node]) -> None: +def inorder(root: Node | None) -> None: """ Just recursive print of a tree """ @@ -124,7 +121,7 @@ def inorder(root: Optional[Node]) -> None: inorder(root.right) -def interactTreap(root: Optional[Node], args: str) -> Optional[Node]: +def interactTreap(root: Node | None, args: str) -> Node | None: """ Commands: + value to add value into treap diff --git a/data_structures/binary_tree/wavelet_tree.py b/data_structures/binary_tree/wavelet_tree.py index 1607244f74ed..173a88ab7316 100644 --- a/data_structures/binary_tree/wavelet_tree.py +++ b/data_structures/binary_tree/wavelet_tree.py @@ -7,8 +7,7 @@ 2. https://www.youtube.com/watch?v=4aSv9PcecDw&t=811s 3. https://www.youtube.com/watch?v=CybAgVF-MMc&t=1178s """ - -from typing import Optional +from __future__ import annotations test_array = [2, 1, 4, 5, 6, 0, 8, 9, 1, 2, 0, 6, 4, 2, 0, 6, 5, 3, 2, 7] @@ -18,8 +17,8 @@ def __init__(self, length: int) -> None: self.minn: int = -1 self.maxx: int = -1 self.map_left: list[int] = [-1] * length - self.left: Optional[Node] = None - self.right: Optional[Node] = None + self.left: Node | None = None + self.right: Node | None = None def __repr__(self) -> str: """ diff --git a/data_structures/hashing/hash_table.py b/data_structures/hashing/hash_table.py index fd9e6eec134c..f4422de53821 100644 --- a/data_structures/hashing/hash_table.py +++ b/data_structures/hashing/hash_table.py @@ -19,7 +19,7 @@ def keys(self): return self._keys def balanced_factor(self): - return sum([1 for slot in self.values if slot is not None]) / ( + return sum(1 for slot in self.values if slot is not None) / ( self.size_table * self.charge_factor ) diff --git a/data_structures/hashing/hash_table_with_linked_list.py b/data_structures/hashing/hash_table_with_linked_list.py index fe838268fce8..f404c5251246 100644 --- a/data_structures/hashing/hash_table_with_linked_list.py +++ b/data_structures/hashing/hash_table_with_linked_list.py @@ -14,7 +14,7 @@ def _set_value(self, key, data): def balanced_factor(self): return ( - sum([self.charge_factor - len(slot) for slot in self.values]) + sum(self.charge_factor - len(slot) for slot in self.values) / self.size_table * self.charge_factor ) diff --git a/data_structures/heap/heap.py b/data_structures/heap/heap.py index 65a70e468d1c..550439edd239 100644 --- a/data_structures/heap/heap.py +++ b/data_structures/heap/heap.py @@ -1,4 +1,6 @@ -from typing import Iterable, List, Optional +from __future__ import annotations + +from typing import Iterable class Heap: @@ -25,19 +27,19 @@ class Heap: """ def __init__(self) -> None: - self.h: List[float] = [] + self.h: list[float] = [] self.heap_size: int = 0 def __repr__(self) -> str: return str(self.h) - def parent_index(self, child_idx: int) -> Optional[int]: + def parent_index(self, child_idx: int) -> int | None: """return the parent index of given child""" if child_idx > 0: return (child_idx - 1) // 2 return None - def left_child_idx(self, parent_idx: int) -> Optional[int]: + def left_child_idx(self, parent_idx: int) -> int | None: """ return the left child index if the left child exists. if not, return None. @@ -47,7 +49,7 @@ def left_child_idx(self, parent_idx: int) -> Optional[int]: return left_child_index return None - def right_child_idx(self, parent_idx: int) -> Optional[int]: + def right_child_idx(self, parent_idx: int) -> int | None: """ return the right child index if the right child exists. if not, return None. diff --git a/data_structures/heap/randomized_heap.py b/data_structures/heap/randomized_heap.py index 0ddc2272efe8..f584f5cb3342 100644 --- a/data_structures/heap/randomized_heap.py +++ b/data_structures/heap/randomized_heap.py @@ -3,7 +3,7 @@ from __future__ import annotations import random -from typing import Generic, Iterable, List, Optional, TypeVar +from typing import Generic, Iterable, TypeVar T = TypeVar("T") @@ -16,8 +16,8 @@ class RandomizedHeapNode(Generic[T]): def __init__(self, value: T) -> None: self._value: T = value - self.left: Optional[RandomizedHeapNode[T]] = None - self.right: Optional[RandomizedHeapNode[T]] = None + self.left: RandomizedHeapNode[T] | None = None + self.right: RandomizedHeapNode[T] | None = None @property def value(self) -> T: @@ -26,8 +26,8 @@ def value(self) -> T: @staticmethod def merge( - root1: Optional[RandomizedHeapNode[T]], root2: Optional[RandomizedHeapNode[T]] - ) -> Optional[RandomizedHeapNode[T]]: + root1: RandomizedHeapNode[T] | None, root2: RandomizedHeapNode[T] | None + ) -> RandomizedHeapNode[T] | None: """Merge 2 nodes together.""" if not root1: return root2 @@ -69,13 +69,13 @@ class RandomizedHeap(Generic[T]): [-1, 0, 1] """ - def __init__(self, data: Optional[Iterable[T]] = ()) -> None: + def __init__(self, data: Iterable[T] | None = ()) -> None: """ >>> rh = RandomizedHeap([3, 1, 3, 7]) >>> rh.to_sorted_list() [1, 3, 3, 7] """ - self._root: Optional[RandomizedHeapNode[T]] = None + self._root: RandomizedHeapNode[T] | None = None for item in data: self.insert(item) @@ -151,7 +151,7 @@ def clear(self): """ self._root = None - def to_sorted_list(self) -> List[T]: + def to_sorted_list(self) -> list[T]: """ Returns sorted list containing all the values in the heap. diff --git a/data_structures/heap/skew_heap.py b/data_structures/heap/skew_heap.py index 417a383f733e..b59441389a91 100644 --- a/data_structures/heap/skew_heap.py +++ b/data_structures/heap/skew_heap.py @@ -2,7 +2,7 @@ from __future__ import annotations -from typing import Generic, Iterable, Iterator, Optional, TypeVar +from typing import Generic, Iterable, Iterator, TypeVar T = TypeVar("T") @@ -15,8 +15,8 @@ class SkewNode(Generic[T]): def __init__(self, value: T) -> None: self._value: T = value - self.left: Optional[SkewNode[T]] = None - self.right: Optional[SkewNode[T]] = None + self.left: SkewNode[T] | None = None + self.right: SkewNode[T] | None = None @property def value(self) -> T: @@ -25,8 +25,8 @@ def value(self) -> T: @staticmethod def merge( - root1: Optional[SkewNode[T]], root2: Optional[SkewNode[T]] - ) -> Optional[SkewNode[T]]: + root1: SkewNode[T] | None, root2: SkewNode[T] | None + ) -> SkewNode[T] | None: """Merge 2 nodes together.""" if not root1: return root2 @@ -69,13 +69,13 @@ class SkewHeap(Generic[T]): [-1, 0, 1] """ - def __init__(self, data: Optional[Iterable[T]] = ()) -> None: + def __init__(self, data: Iterable[T] | None = ()) -> None: """ >>> sh = SkewHeap([3, 1, 3, 7]) >>> list(sh) [1, 3, 3, 7] """ - self._root: Optional[SkewNode[T]] = None + self._root: SkewNode[T] | None = None for item in data: self.insert(item) diff --git a/data_structures/linked_list/merge_two_lists.py b/data_structures/linked_list/merge_two_lists.py index 96ec6b8abc85..43dd461867f1 100644 --- a/data_structures/linked_list/merge_two_lists.py +++ b/data_structures/linked_list/merge_two_lists.py @@ -5,7 +5,6 @@ from collections.abc import Iterable, Iterator from dataclasses import dataclass -from typing import Optional test_data_odd = (3, 9, -11, 0, 7, 5, 1, -1) test_data_even = (4, 6, 2, 0, 8, 10, 3, -2) @@ -14,12 +13,12 @@ @dataclass class Node: data: int - next: Optional[Node] + next: Node | None class SortedLinkedList: def __init__(self, ints: Iterable[int]) -> None: - self.head: Optional[Node] = None + self.head: Node | None = None for i in reversed(sorted(ints)): self.head = Node(i, self.head) diff --git a/data_structures/linked_list/print_reverse.py b/data_structures/linked_list/print_reverse.py index c46f228e7260..f83d5607ffdd 100644 --- a/data_structures/linked_list/print_reverse.py +++ b/data_structures/linked_list/print_reverse.py @@ -1,4 +1,4 @@ -from typing import List +from __future__ import annotations class Node: @@ -16,7 +16,7 @@ def __repr__(self): return "->".join(string_rep) -def make_linked_list(elements_list: List): +def make_linked_list(elements_list: list): """Creates a Linked List from the elements of the given sequence (list/tuple) and returns the head of the Linked List. >>> make_linked_list([]) diff --git a/data_structures/linked_list/skip_list.py b/data_structures/linked_list/skip_list.py index 8f06e6193d52..ee0b4460730c 100644 --- a/data_structures/linked_list/skip_list.py +++ b/data_structures/linked_list/skip_list.py @@ -2,11 +2,10 @@ Based on "Skip Lists: A Probabilistic Alternative to Balanced Trees" by William Pugh https://epaperpress.com/sortsearch/download/skiplist.pdf """ - from __future__ import annotations from random import random -from typing import Generic, Optional, TypeVar +from typing import Generic, TypeVar KT = TypeVar("KT") VT = TypeVar("VT") @@ -124,7 +123,7 @@ def random_level(self) -> int: return level - def _locate_node(self, key) -> tuple[Optional[Node[KT, VT]], list[Node[KT, VT]]]: + def _locate_node(self, key) -> tuple[Node[KT, VT] | None, list[Node[KT, VT]]]: """ :param key: Searched key, :return: Tuple with searched node (or None if given key is not present) @@ -222,7 +221,7 @@ def insert(self, key: KT, value: VT): else: update_node.forward[i] = new_node - def find(self, key: VT) -> Optional[VT]: + def find(self, key: VT) -> VT | None: """ :param key: Search key. :return: Value associated with given key or None if given key is not present. diff --git a/data_structures/stacks/evaluate_postfix_notations.py b/data_structures/stacks/evaluate_postfix_notations.py index 2a4baf9d6b52..51ea353b17de 100644 --- a/data_structures/stacks/evaluate_postfix_notations.py +++ b/data_structures/stacks/evaluate_postfix_notations.py @@ -1,5 +1,3 @@ -from typing import Any, List - """ The Reverse Polish Nation also known as Polish postfix notation or simply postfix notation. @@ -8,6 +6,9 @@ Valid operators are +, -, *, /. Each operand may be an integer or another expression. """ +from __future__ import annotations + +from typing import Any def evaluate_postfix(postfix_notation: list) -> int: @@ -23,7 +24,7 @@ def evaluate_postfix(postfix_notation: list) -> int: return 0 operations = {"+", "-", "*", "/"} - stack: List[Any] = [] + stack: list[Any] = [] for token in postfix_notation: if token in operations: diff --git a/data_structures/stacks/linked_stack.py b/data_structures/stacks/linked_stack.py index 0b9c9d45e61f..85b59a940e39 100644 --- a/data_structures/stacks/linked_stack.py +++ b/data_structures/stacks/linked_stack.py @@ -1,5 +1,7 @@ """ A Stack using a linked list like structure """ -from typing import Any, Optional +from __future__ import annotations + +from typing import Any class Node: @@ -42,7 +44,7 @@ class LinkedStack: """ def __init__(self) -> None: - self.top: Optional[Node] = None + self.top: Node | None = None def __iter__(self): node = self.top diff --git a/data_structures/stacks/stack.py b/data_structures/stacks/stack.py index 245d39b32c07..c62412150626 100644 --- a/data_structures/stacks/stack.py +++ b/data_structures/stacks/stack.py @@ -1,4 +1,4 @@ -from typing import List +from __future__ import annotations class StackOverflowError(BaseException): @@ -15,7 +15,7 @@ class Stack: """ def __init__(self, limit: int = 10): - self.stack: List[int] = [] + self.stack: list[int] = [] self.limit = limit def __bool__(self) -> bool: diff --git a/divide_and_conquer/convex_hull.py b/divide_and_conquer/convex_hull.py index 9c096f671385..63f8dbb20cc0 100644 --- a/divide_and_conquer/convex_hull.py +++ b/divide_and_conquer/convex_hull.py @@ -12,8 +12,9 @@ which have not been implemented here, yet. """ +from __future__ import annotations -from typing import Iterable, List, Set, Union +from typing import Iterable class Point: @@ -84,8 +85,8 @@ def __hash__(self): def _construct_points( - list_of_tuples: Union[List[Point], List[List[float]], Iterable[List[float]]] -) -> List[Point]: + list_of_tuples: list[Point] | list[list[float]] | Iterable[list[float]], +) -> list[Point]: """ constructs a list of points from an array-like object of numbers @@ -114,7 +115,7 @@ def _construct_points( [] """ - points: List[Point] = [] + points: list[Point] = [] if list_of_tuples: for p in list_of_tuples: if isinstance(p, Point): @@ -130,7 +131,7 @@ def _construct_points( return points -def _validate_input(points: Union[List[Point], List[List[float]]]) -> List[Point]: +def _validate_input(points: list[Point] | list[list[float]]) -> list[Point]: """ validates an input instance before a convex-hull algorithms uses it @@ -218,7 +219,7 @@ def _det(a: Point, b: Point, c: Point) -> float: return det -def convex_hull_bf(points: List[Point]) -> List[Point]: +def convex_hull_bf(points: list[Point]) -> list[Point]: """ Constructs the convex hull of a set of 2D points using a brute force algorithm. The algorithm basically considers all combinations of points (i, j) and uses the @@ -291,7 +292,7 @@ def convex_hull_bf(points: List[Point]) -> List[Point]: return sorted(convex_set) -def convex_hull_recursive(points: List[Point]) -> List[Point]: +def convex_hull_recursive(points: list[Point]) -> list[Point]: """ Constructs the convex hull of a set of 2D points using a divide-and-conquer strategy The algorithm exploits the geometric properties of the problem by repeatedly @@ -362,7 +363,7 @@ def convex_hull_recursive(points: List[Point]) -> List[Point]: def _construct_hull( - points: List[Point], left: Point, right: Point, convex_set: Set[Point] + points: list[Point], left: Point, right: Point, convex_set: set[Point] ) -> None: """ @@ -405,7 +406,7 @@ def _construct_hull( _construct_hull(candidate_points, extreme_point, right, convex_set) -def convex_hull_melkman(points: List[Point]) -> List[Point]: +def convex_hull_melkman(points: list[Point]) -> list[Point]: """ Constructs the convex hull of a set of 2D points using the melkman algorithm. The algorithm works by iteratively inserting points of a simple polygonal chain diff --git a/divide_and_conquer/kth_order_statistic.py b/divide_and_conquer/kth_order_statistic.py index f6e81a306bff..666ad1a39b8a 100644 --- a/divide_and_conquer/kth_order_statistic.py +++ b/divide_and_conquer/kth_order_statistic.py @@ -8,8 +8,9 @@ For more information of this algorithm: https://web.stanford.edu/class/archive/cs/cs161/cs161.1138/lectures/08/Small08.pdf """ +from __future__ import annotations + from random import choice -from typing import List def random_pivot(lst): @@ -21,7 +22,7 @@ def random_pivot(lst): return choice(lst) -def kth_number(lst: List[int], k: int) -> int: +def kth_number(lst: list[int], k: int) -> int: """ Return the kth smallest number in lst. >>> kth_number([2, 1, 3, 4, 5], 3) diff --git a/divide_and_conquer/mergesort.py b/divide_and_conquer/mergesort.py index 46a46941cab3..628080cefc9b 100644 --- a/divide_and_conquer/mergesort.py +++ b/divide_and_conquer/mergesort.py @@ -1,7 +1,7 @@ -from typing import List +from __future__ import annotations -def merge(left_half: List, right_half: List) -> List: +def merge(left_half: list, right_half: list) -> list: """Helper function for mergesort. >>> left_half = [-2] @@ -57,7 +57,7 @@ def merge(left_half: List, right_half: List) -> List: return sorted_array -def merge_sort(array: List) -> List: +def merge_sort(array: list) -> list: """Returns a list of sorted array elements using merge sort. >>> from random import shuffle diff --git a/divide_and_conquer/peak.py b/divide_and_conquer/peak.py index f94f83ed3fcb..e60f28bfbe29 100644 --- a/divide_and_conquer/peak.py +++ b/divide_and_conquer/peak.py @@ -7,10 +7,10 @@ (From Kleinberg and Tardos. Algorithm Design. Addison Wesley 2006: Chapter 5 Solved Exercise 1) """ -from typing import List +from __future__ import annotations -def peak(lst: List[int]) -> int: +def peak(lst: list[int]) -> int: """ Return the peak value of `lst`. >>> peak([1, 2, 3, 4, 5, 4, 3, 2, 1]) diff --git a/electronics/electric_power.py b/electronics/electric_power.py index e4e685bbd0f0..ac673d7e3a94 100644 --- a/electronics/electric_power.py +++ b/electronics/electric_power.py @@ -1,9 +1,10 @@ # https://en.m.wikipedia.org/wiki/Electric_power +from __future__ import annotations + from collections import namedtuple -from typing import Tuple -def electric_power(voltage: float, current: float, power: float) -> Tuple: +def electric_power(voltage: float, current: float, power: float) -> tuple: """ This function can calculate any one of the three (voltage, current, power), fundamental value of electrical system. diff --git a/electronics/ohms_law.py b/electronics/ohms_law.py index 41bffa9f87c8..66e737c1f909 100644 --- a/electronics/ohms_law.py +++ b/electronics/ohms_law.py @@ -1,8 +1,8 @@ # https://en.wikipedia.org/wiki/Ohm%27s_law -from typing import Dict +from __future__ import annotations -def ohms_law(voltage: float, current: float, resistance: float) -> Dict[str, float]: +def ohms_law(voltage: float, current: float, resistance: float) -> dict[str, float]: """ Apply Ohm's Law, on any two given electrical values, which can be voltage, current, and resistance, and then in a Python dict return name/value pair of the zero value. diff --git a/graphs/basic_graphs.py b/graphs/basic_graphs.py index 9cd6dd0f9635..db0ef8e7b3ac 100644 --- a/graphs/basic_graphs.py +++ b/graphs/basic_graphs.py @@ -13,7 +13,7 @@ def initialize_unweighted_directed_graph( graph[i + 1] = [] for e in range(edge_count): - x, y = [int(i) for i in _input(f"Edge {e + 1}: ")] + x, y = (int(i) for i in _input(f"Edge {e + 1}: ")) graph[x].append(y) return graph @@ -26,7 +26,7 @@ def initialize_unweighted_undirected_graph( graph[i + 1] = [] for e in range(edge_count): - x, y = [int(i) for i in _input(f"Edge {e + 1}: ")] + x, y = (int(i) for i in _input(f"Edge {e + 1}: ")) graph[x].append(y) graph[y].append(x) return graph @@ -40,14 +40,14 @@ def initialize_weighted_undirected_graph( graph[i + 1] = [] for e in range(edge_count): - x, y, w = [int(i) for i in _input(f"Edge {e + 1}: ")] + x, y, w = (int(i) for i in _input(f"Edge {e + 1}: ")) graph[x].append((y, w)) graph[y].append((x, w)) return graph if __name__ == "__main__": - n, m = [int(i) for i in _input("Number of nodes and edges: ")] + n, m = (int(i) for i in _input("Number of nodes and edges: ")) graph_choice = int( _input( diff --git a/graphs/bellman_ford.py b/graphs/bellman_ford.py index d6d6b2ac7349..0f654a510b59 100644 --- a/graphs/bellman_ford.py +++ b/graphs/bellman_ford.py @@ -11,7 +11,7 @@ def check_negative_cycle( graph: list[dict[str, int]], distance: list[float], edge_count: int ): for j in range(edge_count): - u, v, w = [graph[j][k] for k in ["src", "dst", "weight"]] + u, v, w = (graph[j][k] for k in ["src", "dst", "weight"]) if distance[u] != float("inf") and distance[u] + w < distance[v]: return True return False @@ -38,7 +38,7 @@ def bellman_ford( for i in range(vertex_count - 1): for j in range(edge_count): - u, v, w = [graph[j][k] for k in ["src", "dst", "weight"]] + u, v, w = (graph[j][k] for k in ["src", "dst", "weight"]) if distance[u] != float("inf") and distance[u] + w < distance[v]: distance[v] = distance[u] + w @@ -62,10 +62,10 @@ def bellman_ford( for i in range(E): print("Edge ", i + 1) - src, dest, weight = [ + src, dest, weight = ( int(x) for x in input("Enter source, destination, weight: ").strip().split(" ") - ] + ) graph[i] = {"src": src, "dst": dest, "weight": weight} source = int(input("\nEnter shortest path source:").strip()) diff --git a/graphs/bfs_zero_one_shortest_path.py b/graphs/bfs_zero_one_shortest_path.py index a68b5602c2d1..78047c5d2237 100644 --- a/graphs/bfs_zero_one_shortest_path.py +++ b/graphs/bfs_zero_one_shortest_path.py @@ -1,13 +1,13 @@ -from collections import deque -from collections.abc import Iterator -from dataclasses import dataclass -from typing import Optional, Union - """ Finding the shortest path in 0-1-graph in O(E + V) which is faster than dijkstra. 0-1-graph is the weighted graph with the weights equal to 0 or 1. Link: https://codeforces.com/blog/entry/22276 """ +from __future__ import annotations + +from collections import deque +from collections.abc import Iterator +from dataclasses import dataclass @dataclass @@ -59,7 +59,7 @@ def add_edge(self, from_vertex: int, to_vertex: int, weight: int): self._graph[from_vertex].append(Edge(to_vertex, weight)) - def get_shortest_path(self, start_vertex: int, finish_vertex: int) -> Optional[int]: + def get_shortest_path(self, start_vertex: int, finish_vertex: int) -> int | None: """ Return the shortest distance from start_vertex to finish_vertex in 0-1-graph. 1 1 1 @@ -107,7 +107,7 @@ def get_shortest_path(self, start_vertex: int, finish_vertex: int) -> Optional[i ValueError: No path from start_vertex to finish_vertex. """ queue = deque([start_vertex]) - distances: list[Union[int, None]] = [None] * self.size + distances: list[int | None] = [None] * self.size distances[start_vertex] = 0 while queue: diff --git a/graphs/bidirectional_a_star.py b/graphs/bidirectional_a_star.py index 729d8957bdef..071f1cd685b1 100644 --- a/graphs/bidirectional_a_star.py +++ b/graphs/bidirectional_a_star.py @@ -1,15 +1,12 @@ """ https://en.wikipedia.org/wiki/Bidirectional_search """ - from __future__ import annotations import time from math import sqrt # 1 for manhattan, 0 for euclidean -from typing import Optional - HEURISTIC = 0 grid = [ @@ -50,7 +47,7 @@ def __init__( goal_x: int, goal_y: int, g_cost: int, - parent: Optional[Node], + parent: Node | None, ) -> None: self.pos_x = pos_x self.pos_y = pos_y @@ -157,7 +154,7 @@ def get_successors(self, parent: Node) -> list[Node]: ) return successors - def retrace_path(self, node: Optional[Node]) -> list[TPosition]: + def retrace_path(self, node: Node | None) -> list[TPosition]: """ Retrace the path from parents to parents until start node """ diff --git a/graphs/bidirectional_breadth_first_search.py b/graphs/bidirectional_breadth_first_search.py index 9b84ab21bf7f..27e4f0b16bbf 100644 --- a/graphs/bidirectional_breadth_first_search.py +++ b/graphs/bidirectional_breadth_first_search.py @@ -1,11 +1,9 @@ """ https://en.wikipedia.org/wiki/Bidirectional_search """ - from __future__ import annotations import time -from typing import Optional Path = list[tuple[int, int]] @@ -24,7 +22,7 @@ class Node: def __init__( - self, pos_x: int, pos_y: int, goal_x: int, goal_y: int, parent: Optional[Node] + self, pos_x: int, pos_y: int, goal_x: int, goal_y: int, parent: Node | None ): self.pos_x = pos_x self.pos_y = pos_y @@ -57,7 +55,7 @@ def __init__(self, start: tuple[int, int], goal: tuple[int, int]): self.node_queue = [self.start] self.reached = False - def search(self) -> Optional[Path]: + def search(self) -> Path | None: while self.node_queue: current_node = self.node_queue.pop(0) @@ -93,7 +91,7 @@ def get_successors(self, parent: Node) -> list[Node]: ) return successors - def retrace_path(self, node: Optional[Node]) -> Path: + def retrace_path(self, node: Node | None) -> Path: """ Retrace the path from parents to parents until start node """ @@ -125,7 +123,7 @@ def __init__(self, start, goal): self.bwd_bfs = BreadthFirstSearch(goal, start) self.reached = False - def search(self) -> Optional[Path]: + def search(self) -> Path | None: while self.fwd_bfs.node_queue or self.bwd_bfs.node_queue: current_fwd_node = self.fwd_bfs.node_queue.pop(0) current_bwd_node = self.bwd_bfs.node_queue.pop(0) diff --git a/graphs/breadth_first_search.py b/graphs/breadth_first_search.py index 305db01e19e4..7c626429e5c0 100644 --- a/graphs/breadth_first_search.py +++ b/graphs/breadth_first_search.py @@ -1,13 +1,12 @@ #!/usr/bin/python """ Author: OMKAR PATHAK """ - -from typing import Dict, List, Set +from __future__ import annotations class Graph: def __init__(self) -> None: - self.vertices: Dict[int, List[int]] = {} + self.vertices: dict[int, list[int]] = {} def print_graph(self) -> None: """ @@ -35,7 +34,7 @@ def add_edge(self, from_vertex: int, to_vertex: int) -> None: else: self.vertices[from_vertex] = [to_vertex] - def bfs(self, start_vertex: int) -> Set[int]: + def bfs(self, start_vertex: int) -> set[int]: """ >>> g = Graph() >>> g.add_edge(0, 1) diff --git a/graphs/breadth_first_search_shortest_path.py b/graphs/breadth_first_search_shortest_path.py index 48f8ab1a4956..697a8c634859 100644 --- a/graphs/breadth_first_search_shortest_path.py +++ b/graphs/breadth_first_search_shortest_path.py @@ -3,8 +3,6 @@ """ from __future__ import annotations -from typing import Optional - graph = { "A": ["B", "C", "E"], "B": ["A", "D", "E"], @@ -24,7 +22,7 @@ def __init__(self, graph: dict[str, list[str]], source_vertex: str) -> None: """ self.graph = graph # mapping node to its parent in resulting breadth first tree - self.parent: dict[str, Optional[str]] = {} + self.parent: dict[str, str | None] = {} self.source_vertex = source_vertex def breath_first_search(self) -> None: diff --git a/graphs/depth_first_search.py b/graphs/depth_first_search.py index 5d74a6db9c6b..f20a503ca395 100644 --- a/graphs/depth_first_search.py +++ b/graphs/depth_first_search.py @@ -1,11 +1,8 @@ """Non recursive implementation of a DFS algorithm.""" - from __future__ import annotations -from typing import Set - -def depth_first_search(graph: dict, start: str) -> Set[str]: +def depth_first_search(graph: dict, start: str) -> set[str]: """Depth First Search on Graph :param graph: directed graph in dictionary format :param start: starting vertex as a string diff --git a/graphs/greedy_best_first.py b/graphs/greedy_best_first.py index d5e80247a9b4..d49e65b9d814 100644 --- a/graphs/greedy_best_first.py +++ b/graphs/greedy_best_first.py @@ -4,8 +4,6 @@ from __future__ import annotations -from typing import Optional - Path = list[tuple[int, int]] grid = [ @@ -44,7 +42,7 @@ def __init__( goal_x: int, goal_y: int, g_cost: float, - parent: Optional[Node], + parent: Node | None, ): self.pos_x = pos_x self.pos_y = pos_y @@ -93,7 +91,7 @@ def __init__(self, start: tuple[int, int], goal: tuple[int, int]): self.reached = False - def search(self) -> Optional[Path]: + def search(self) -> Path | None: """ Search for the path, if a path is not found, only the starting position is returned @@ -156,7 +154,7 @@ def get_successors(self, parent: Node) -> list[Node]: ) return successors - def retrace_path(self, node: Optional[Node]) -> Path: + def retrace_path(self, node: Node | None) -> Path: """ Retrace the path from parents to parents until start node """ diff --git a/graphs/minimum_spanning_tree_kruskal.py b/graphs/minimum_spanning_tree_kruskal.py index f21a87a7d534..85d937010489 100644 --- a/graphs/minimum_spanning_tree_kruskal.py +++ b/graphs/minimum_spanning_tree_kruskal.py @@ -40,7 +40,7 @@ def find_parent(i): edges = [] for _ in range(num_edges): - node1, node2, cost = [int(x) for x in input().strip().split()] + node1, node2, cost = (int(x) for x in input().strip().split()) edges.append((node1, node2, cost)) kruskal(num_nodes, edges) diff --git a/graphs/minimum_spanning_tree_prims2.py b/graphs/minimum_spanning_tree_prims2.py index c3444c36f1cf..d924ee3db1e5 100644 --- a/graphs/minimum_spanning_tree_prims2.py +++ b/graphs/minimum_spanning_tree_prims2.py @@ -6,9 +6,10 @@ at a time, from an arbitrary starting vertex, at each step adding the cheapest possible connection from the tree to another vertex. """ +from __future__ import annotations from sys import maxsize -from typing import Generic, Optional, TypeVar +from typing import Generic, TypeVar T = TypeVar("T") @@ -219,7 +220,7 @@ def add_edge(self, node1: T, node2: T, weight: int) -> None: def prims_algo( graph: GraphUndirectedWeighted[T], -) -> tuple[dict[T, int], dict[T, Optional[T]]]: +) -> tuple[dict[T, int], dict[T, T | None]]: """ >>> graph = GraphUndirectedWeighted() @@ -240,7 +241,7 @@ def prims_algo( """ # prim's algorithm for minimum spanning tree dist: dict[T, int] = {node: maxsize for node in graph.connections} - parent: dict[T, Optional[T]] = {node: None for node in graph.connections} + parent: dict[T, T | None] = {node: None for node in graph.connections} priority_queue: MinPriorityQueue[T] = MinPriorityQueue() for node, weight in dist.items(): diff --git a/graphs/page_rank.py b/graphs/page_rank.py index 0f5129146ddf..672405b7345b 100644 --- a/graphs/page_rank.py +++ b/graphs/page_rank.py @@ -43,7 +43,7 @@ def page_rank(nodes, limit=3, d=0.85): print(f"======= Iteration {i + 1} =======") for j, node in enumerate(nodes): ranks[node.name] = (1 - d) + d * sum( - [ranks[ib] / outbounds[ib] for ib in node.inbound] + ranks[ib] / outbounds[ib] for ib in node.inbound ) print(ranks) diff --git a/graphs/scc_kosaraju.py b/graphs/scc_kosaraju.py index 2b34170149bc..fa182aa2faf1 100644 --- a/graphs/scc_kosaraju.py +++ b/graphs/scc_kosaraju.py @@ -1,4 +1,4 @@ -from typing import List +from __future__ import annotations def dfs(u): @@ -39,16 +39,16 @@ def kosaraju(): # n - no of nodes, m - no of edges n, m = list(map(int, input().strip().split())) - graph: List[List[int]] = [[] for i in range(n)] # graph - reversedGraph: List[List[int]] = [[] for i in range(n)] # reversed graph + graph: list[list[int]] = [[] for i in range(n)] # graph + reversedGraph: list[list[int]] = [[] for i in range(n)] # reversed graph # input graph data (edges) for i in range(m): u, v = list(map(int, input().strip().split())) graph[u].append(v) reversedGraph[v].append(u) - stack: List[int] = [] - visit: List[bool] = [False] * n - scc: List[int] = [] - component: List[int] = [] + stack: list[int] = [] + visit: list[bool] = [False] * n + scc: list[int] = [] + component: list[int] = [] print(kosaraju()) diff --git a/hashes/luhn.py b/hashes/luhn.py index 81014120dd80..bb77fd05c556 100644 --- a/hashes/luhn.py +++ b/hashes/luhn.py @@ -1,5 +1,5 @@ """ Luhn Algorithm """ -from typing import List +from __future__ import annotations def is_luhn(string: str) -> bool: @@ -17,9 +17,9 @@ def is_luhn(string: str) -> bool: [False, False, False, True, False, False, False, False, False, False] """ check_digit: int - _vector: List[str] = list(string) + _vector: list[str] = list(string) __vector, check_digit = _vector[:-1], int(_vector[-1]) - vector: List[int] = [int(digit) for digit in __vector] + vector: list[int] = [int(digit) for digit in __vector] vector.reverse() for i, digit in enumerate(vector): diff --git a/knapsack/knapsack.py b/knapsack/knapsack.py index 756443ea6163..18a36c3bcdda 100644 --- a/knapsack/knapsack.py +++ b/knapsack/knapsack.py @@ -1,11 +1,10 @@ -from typing import List - """ A naive recursive implementation of 0-1 Knapsack Problem https://en.wikipedia.org/wiki/Knapsack_problem """ +from __future__ import annotations -def knapsack(capacity: int, weights: List[int], values: List[int], counter: int) -> int: +def knapsack(capacity: int, weights: list[int], values: list[int], counter: int) -> int: """ Returns the maximum value that can be put in a knapsack of a capacity cap, whereby each weight w has a specific value val. diff --git a/linear_algebra/src/lib.py b/linear_algebra/src/lib.py index 5e2f82018f38..74aeb9137666 100644 --- a/linear_algebra/src/lib.py +++ b/linear_algebra/src/lib.py @@ -18,11 +18,11 @@ - function squareZeroMatrix(N) - function randomMatrix(W,H,a,b) """ - +from __future__ import annotations import math import random -from typing import Collection, Optional, Union, overload +from typing import Collection, overload class Vector: @@ -46,7 +46,7 @@ class Vector: TODO: compare-operator """ - def __init__(self, components: Optional[Collection[float]] = None) -> None: + def __init__(self, components: Collection[float] | None = None) -> None: """ input: components or nothing simple constructor for init the vector @@ -97,7 +97,7 @@ def euclidLength(self) -> float: summe += c ** 2 return math.sqrt(summe) - def __add__(self, other: "Vector") -> "Vector": + def __add__(self, other: Vector) -> Vector: """ input: other vector assumes: other vector has the same size @@ -110,7 +110,7 @@ def __add__(self, other: "Vector") -> "Vector": else: raise Exception("must have the same size") - def __sub__(self, other: "Vector") -> "Vector": + def __sub__(self, other: Vector) -> Vector: """ input: other vector assumes: other vector has the same size @@ -124,14 +124,14 @@ def __sub__(self, other: "Vector") -> "Vector": raise Exception("must have the same size") @overload - def __mul__(self, other: float) -> "Vector": + def __mul__(self, other: float) -> Vector: ... @overload - def __mul__(self, other: "Vector") -> float: + def __mul__(self, other: Vector) -> float: ... - def __mul__(self, other: Union[float, "Vector"]) -> Union[float, "Vector"]: + def __mul__(self, other: float | Vector) -> float | Vector: """ mul implements the scalar multiplication and the dot-product @@ -148,7 +148,7 @@ def __mul__(self, other: Union[float, "Vector"]) -> Union[float, "Vector"]: else: # error case raise Exception("invalid operand!") - def copy(self) -> "Vector": + def copy(self) -> Vector: """ copies this vector and returns it. """ @@ -313,14 +313,14 @@ def determinate(self) -> float: raise Exception("matrix is not square") @overload - def __mul__(self, other: float) -> "Matrix": + def __mul__(self, other: float) -> Matrix: ... @overload def __mul__(self, other: Vector) -> Vector: ... - def __mul__(self, other: Union[float, Vector]) -> Union[Vector, "Matrix"]: + def __mul__(self, other: float | Vector) -> Vector | Matrix: """ implements the matrix-vector multiplication. implements the matrix-scalar multiplication @@ -347,7 +347,7 @@ def __mul__(self, other: Union[float, Vector]) -> Union[Vector, "Matrix"]: ] return Matrix(matrix, self.__width, self.__height) - def __add__(self, other: "Matrix") -> "Matrix": + def __add__(self, other: Matrix) -> Matrix: """ implements the matrix-addition. """ @@ -362,7 +362,7 @@ def __add__(self, other: "Matrix") -> "Matrix": else: raise Exception("matrix must have the same dimension!") - def __sub__(self, other: "Matrix") -> "Matrix": + def __sub__(self, other: Matrix) -> Matrix: """ implements the matrix-subtraction. """ diff --git a/machine_learning/similarity_search.py b/machine_learning/similarity_search.py index af845c9109b1..ec1b9f9e3e13 100644 --- a/machine_learning/similarity_search.py +++ b/machine_learning/similarity_search.py @@ -7,8 +7,9 @@ 1. the nearest vector 2. distance between the vector and the nearest vector (float) """ +from __future__ import annotations + import math -from typing import List, Union import numpy as np @@ -33,7 +34,7 @@ def euclidean(input_a: np.ndarray, input_b: np.ndarray) -> float: def similarity_search( dataset: np.ndarray, value_array: np.ndarray -) -> List[List[Union[List[float], float]]]: +) -> list[list[list[float] | float]]: """ :param dataset: Set containing the vectors. Should be ndarray. :param value_array: vector/vectors we want to know the nearest vector from dataset. diff --git a/maths/area_under_curve.py b/maths/area_under_curve.py index 2d01e414b63b..ce0932426ef6 100644 --- a/maths/area_under_curve.py +++ b/maths/area_under_curve.py @@ -1,14 +1,15 @@ """ Approximates the area under the curve using the trapezoidal rule """ +from __future__ import annotations -from typing import Callable, Union +from typing import Callable def trapezoidal_area( - fnc: Callable[[Union[int, float]], Union[int, float]], - x_start: Union[int, float], - x_end: Union[int, float], + fnc: Callable[[int | float], int | float], + x_start: int | float, + x_end: int | float, steps: int = 100, ) -> float: """ diff --git a/maths/average_mean.py b/maths/average_mean.py index e02e307f20c8..274c434ab885 100644 --- a/maths/average_mean.py +++ b/maths/average_mean.py @@ -1,7 +1,7 @@ -from typing import List +from __future__ import annotations -def mean(nums: List) -> float: +def mean(nums: list) -> float: """ Find mean of a list of numbers. Wiki: https://en.wikipedia.org/wiki/Mean diff --git a/maths/average_median.py b/maths/average_median.py index 497bf0c3a714..cd1ec1574893 100644 --- a/maths/average_median.py +++ b/maths/average_median.py @@ -1,7 +1,7 @@ -from typing import Union +from __future__ import annotations -def median(nums: list) -> Union[int, float]: +def median(nums: list) -> int | float: """ Find median of a list of numbers. Wiki: https://en.wikipedia.org/wiki/Median diff --git a/maths/entropy.py b/maths/entropy.py index 43bb3860fc12..498c28f31bc4 100644 --- a/maths/entropy.py +++ b/maths/entropy.py @@ -68,7 +68,7 @@ def calculate_prob(text: str) -> None: my_fir_sum += prob * math.log2(prob) # entropy formula. # print entropy - print("{:.1f}".format(round(-1 * my_fir_sum))) + print(f"{round(-1 * my_fir_sum):.1f}") # two len string all_sum = sum(two_char_strings.values()) @@ -83,10 +83,10 @@ def calculate_prob(text: str) -> None: my_sec_sum += prob * math.log2(prob) # print second entropy - print("{:.1f}".format(round(-1 * my_sec_sum))) + print(f"{round(-1 * my_sec_sum):.1f}") # print the difference between them - print("{:.1f}".format(round((-1 * my_sec_sum) - (-1 * my_fir_sum)))) + print(f"{round((-1 * my_sec_sum) - (-1 * my_fir_sum)):.1f}") def analyze_text(text: str) -> tuple[dict, dict]: diff --git a/maths/euclidean_distance.py b/maths/euclidean_distance.py index 6e0da6370219..a2078161374b 100644 --- a/maths/euclidean_distance.py +++ b/maths/euclidean_distance.py @@ -1,3 +1,5 @@ +from __future__ import annotations + from typing import Iterable, Union import numpy as np diff --git a/maths/extended_euclidean_algorithm.py b/maths/extended_euclidean_algorithm.py index e7087636ce09..72afd40aa707 100644 --- a/maths/extended_euclidean_algorithm.py +++ b/maths/extended_euclidean_algorithm.py @@ -12,12 +12,12 @@ # @Email: silentcat@protonmail.com # @Last modified by: pikulet # @Last modified time: 2020-10-02 +from __future__ import annotations import sys -from typing import Tuple -def extended_euclidean_algorithm(a: int, b: int) -> Tuple[int, int]: +def extended_euclidean_algorithm(a: int, b: int) -> tuple[int, int]: """ Extended Euclidean Algorithm. diff --git a/maths/hardy_ramanujanalgo.py b/maths/hardy_ramanujanalgo.py index 90e4913c70a7..e36f763da19e 100644 --- a/maths/hardy_ramanujanalgo.py +++ b/maths/hardy_ramanujanalgo.py @@ -37,7 +37,7 @@ def exactPrimeFactorCount(n): if __name__ == "__main__": n = 51242183 print(f"The number of distinct prime factors is/are {exactPrimeFactorCount(n)}") - print("The value of log(log(n)) is {:.4f}".format(math.log(math.log(n)))) + print(f"The value of log(log(n)) is {math.log(math.log(n)):.4f}") """ The number of distinct prime factors is/are 3 diff --git a/maths/line_length.py b/maths/line_length.py index 1d386b44b50d..c4d986279cda 100644 --- a/maths/line_length.py +++ b/maths/line_length.py @@ -1,11 +1,13 @@ +from __future__ import annotations + import math -from typing import Callable, Union +from typing import Callable def line_length( - fnc: Callable[[Union[int, float]], Union[int, float]], - x_start: Union[int, float], - x_end: Union[int, float], + fnc: Callable[[int | float], int | float], + x_start: int | float, + x_end: int | float, steps: int = 100, ) -> float: diff --git a/maths/max_sum_sliding_window.py b/maths/max_sum_sliding_window.py index 593cb5c8bd67..c6f9b4ed0ad7 100644 --- a/maths/max_sum_sliding_window.py +++ b/maths/max_sum_sliding_window.py @@ -6,10 +6,10 @@ called 'Window sliding technique' where the nested loops can be converted to a single loop to reduce time complexity. """ -from typing import List +from __future__ import annotations -def max_sum_in_array(array: List[int], k: int) -> int: +def max_sum_in_array(array: list[int], k: int) -> int: """ Returns the maximum sum of k consecutive elements >>> arr = [1, 4, 2, 10, 2, 3, 1, 0, 20] diff --git a/maths/median_of_two_arrays.py b/maths/median_of_two_arrays.py index cde12f5d7e3b..55aa587a9c4b 100644 --- a/maths/median_of_two_arrays.py +++ b/maths/median_of_two_arrays.py @@ -1,7 +1,7 @@ -from typing import List +from __future__ import annotations -def median_of_two_arrays(nums1: List[float], nums2: List[float]) -> float: +def median_of_two_arrays(nums1: list[float], nums2: list[float]) -> float: """ >>> median_of_two_arrays([1, 2], [3]) 2 diff --git a/maths/numerical_integration.py b/maths/numerical_integration.py index 87184a76b740..577c41a4440e 100644 --- a/maths/numerical_integration.py +++ b/maths/numerical_integration.py @@ -1,14 +1,15 @@ """ Approximates the area under the curve using the trapezoidal rule """ +from __future__ import annotations -from typing import Callable, Union +from typing import Callable def trapezoidal_area( - fnc: Callable[[Union[int, float]], Union[int, float]], - x_start: Union[int, float], - x_end: Union[int, float], + fnc: Callable[[int | float], int | float], + x_start: int | float, + x_end: int | float, steps: int = 100, ) -> float: diff --git a/maths/sieve_of_eratosthenes.py b/maths/sieve_of_eratosthenes.py index 47a086546900..3cd6ce0b4d9d 100644 --- a/maths/sieve_of_eratosthenes.py +++ b/maths/sieve_of_eratosthenes.py @@ -10,13 +10,12 @@ doctest provider: Bruno Simas Hadlich (https://github.com/brunohadlich) Also thanks to Dmitry (https://github.com/LizardWizzard) for finding the problem """ - +from __future__ import annotations import math -from typing import List -def prime_sieve(num: int) -> List[int]: +def prime_sieve(num: int) -> list[int]: """ Returns a list with all prime numbers up to n. diff --git a/maths/volume.py b/maths/volume.py index 41d2331db3cb..51b2b9fc0334 100644 --- a/maths/volume.py +++ b/maths/volume.py @@ -3,11 +3,12 @@ Wikipedia reference: https://en.wikipedia.org/wiki/Volume """ +from __future__ import annotations + from math import pi, pow -from typing import Union -def vol_cube(side_length: Union[int, float]) -> float: +def vol_cube(side_length: int | float) -> float: """ Calculate the Volume of a Cube. diff --git a/matrix/searching_in_sorted_matrix.py b/matrix/searching_in_sorted_matrix.py index ca6263a32f50..ae81361499e5 100644 --- a/matrix/searching_in_sorted_matrix.py +++ b/matrix/searching_in_sorted_matrix.py @@ -1,10 +1,8 @@ from __future__ import annotations -from typing import Union - def search_in_a_sorted_matrix( - mat: list[list], m: int, n: int, key: Union[int, float] + mat: list[list], m: int, n: int, key: int | float ) -> None: """ >>> search_in_a_sorted_matrix( diff --git a/other/date_to_weekday.py b/other/date_to_weekday.py index bb17130c0da5..9dc68666e3b4 100644 --- a/other/date_to_weekday.py +++ b/other/date_to_weekday.py @@ -14,7 +14,7 @@ def date_to_weekday(inp_date: str) -> str: >>> date_to_weekday("1/1/2021") 'Friday' """ - day, month, year = [int(x) for x in inp_date.split("/")] + day, month, year = (int(x) for x in inp_date.split("/")) if year % 100 == 0: year = "00" new_base_date: str = f"{day}/{month}/{year%100} 0:0:0" diff --git "a/other/davis\342\200\223putnam\342\200\223logemann\342\200\223loveland.py" b/other/davisb_putnamb_logemannb_loveland.py similarity index 94% rename from "other/davis\342\200\223putnam\342\200\223logemann\342\200\223loveland.py" rename to other/davisb_putnamb_logemannb_loveland.py index d16de6dd988b..00068930b89e 100644 --- "a/other/davis\342\200\223putnam\342\200\223logemann\342\200\223loveland.py" +++ b/other/davisb_putnamb_logemannb_loveland.py @@ -8,9 +8,9 @@ For more information about the algorithm: https://en.wikipedia.org/wiki/DPLL_algorithm """ +from __future__ import annotations import random -from typing import Dict, List class Clause: @@ -27,7 +27,7 @@ class Clause: True """ - def __init__(self, literals: List[int]) -> None: + def __init__(self, literals: list[int]) -> None: """ Represent the literals and an assignment in a clause." """ @@ -52,7 +52,7 @@ def __len__(self) -> int: """ return len(self.literals) - def assign(self, model: Dict[str, bool]) -> None: + def assign(self, model: dict[str, bool]) -> None: """ Assign values to literals of the clause as given by model. """ @@ -68,7 +68,7 @@ def assign(self, model: Dict[str, bool]) -> None: value = not value self.literals[literal] = value - def evaluate(self, model: Dict[str, bool]) -> bool: + def evaluate(self, model: dict[str, bool]) -> bool: """ Evaluates the clause with the assignments in model. This has the following steps: @@ -97,7 +97,7 @@ class Formula: {{A1, A2, A3'}, {A5', A2', A1}} is ((A1 v A2 v A3') and (A5' v A2' v A1)) """ - def __init__(self, clauses: List[Clause]) -> None: + def __init__(self, clauses: list[Clause]) -> None: """ Represent the number of clauses and the clauses themselves. """ @@ -146,7 +146,7 @@ def generate_formula() -> Formula: return Formula(set(clauses)) -def generate_parameters(formula: Formula) -> (List[Clause], List[str]): +def generate_parameters(formula: Formula) -> (list[Clause], list[str]): """ Return the clauses and symbols from a formula. A symbol is the uncomplemented form of a literal. @@ -173,8 +173,8 @@ def generate_parameters(formula: Formula) -> (List[Clause], List[str]): def find_pure_symbols( - clauses: List[Clause], symbols: List[str], model: Dict[str, bool] -) -> (List[str], Dict[str, bool]): + clauses: list[Clause], symbols: list[str], model: dict[str, bool] +) -> (list[str], dict[str, bool]): """ Return pure symbols and their values to satisfy clause. Pure symbols are symbols in a formula that exist only @@ -225,8 +225,8 @@ def find_pure_symbols( def find_unit_clauses( - clauses: List[Clause], model: Dict[str, bool] -) -> (List[str], Dict[str, bool]): + clauses: list[Clause], model: dict[str, bool] +) -> (list[str], dict[str, bool]): """ Returns the unit symbols and their values to satisfy clause. Unit symbols are symbols in a formula that are: @@ -273,8 +273,8 @@ def find_unit_clauses( def dpll_algorithm( - clauses: List[Clause], symbols: List[str], model: Dict[str, bool] -) -> (bool, Dict[str, bool]): + clauses: list[Clause], symbols: list[str], model: dict[str, bool] +) -> (bool, dict[str, bool]): """ Returns the model if the formula is satisfiable, else None This has the following steps: diff --git a/other/lfu_cache.py b/other/lfu_cache.py index 40268242f564..88167ac1f2cb 100644 --- a/other/lfu_cache.py +++ b/other/lfu_cache.py @@ -1,4 +1,6 @@ -from typing import Callable, Optional +from __future__ import annotations + +from typing import Callable class DoubleLinkedListNode: @@ -119,7 +121,7 @@ def __contains__(self, key: int) -> bool: """ return key in self.cache - def get(self, key: int) -> Optional[int]: + def get(self, key: int) -> int | None: """ Returns the value for the input key and updates the Double Linked List. Returns None if key is not present in cache diff --git a/other/lru_cache.py b/other/lru_cache.py index 2a9d7e49b279..b74c0a45caf9 100644 --- a/other/lru_cache.py +++ b/other/lru_cache.py @@ -1,4 +1,6 @@ -from typing import Callable, Optional +from __future__ import annotations + +from typing import Callable class DoubleLinkedListNode: @@ -125,7 +127,7 @@ def __contains__(self, key: int) -> bool: return key in self.cache - def get(self, key: int) -> Optional[int]: + def get(self, key: int) -> int | None: """ Returns the value for the input key and updates the Double Linked List. Returns None if key is not present in cache diff --git a/project_euler/problem_001/sol1.py b/project_euler/problem_001/sol1.py index 85ad32294c9b..fcc24c86ec54 100644 --- a/project_euler/problem_001/sol1.py +++ b/project_euler/problem_001/sol1.py @@ -26,7 +26,7 @@ def solution(n: int = 1000) -> int: 0 """ - return sum([e for e in range(3, n) if e % 3 == 0 or e % 5 == 0]) + return sum(e for e in range(3, n) if e % 3 == 0 or e % 5 == 0) if __name__ == "__main__": diff --git a/project_euler/problem_001/sol5.py b/project_euler/problem_001/sol5.py index 7f0b0bd1bc7c..3edc6f245a67 100644 --- a/project_euler/problem_001/sol5.py +++ b/project_euler/problem_001/sol5.py @@ -25,7 +25,7 @@ def solution(n: int = 1000) -> int: 83700 """ - return sum([i for i in range(n) if i % 3 == 0 or i % 5 == 0]) + return sum(i for i in range(n) if i % 3 == 0 or i % 5 == 0) if __name__ == "__main__": diff --git a/project_euler/problem_006/sol3.py b/project_euler/problem_006/sol3.py index c87931309574..529f233c9f8e 100644 --- a/project_euler/problem_006/sol3.py +++ b/project_euler/problem_006/sol3.py @@ -33,7 +33,7 @@ def solution(n: int = 100) -> int: 1582700 """ - sum_of_squares = sum([i * i for i in range(1, n + 1)]) + sum_of_squares = sum(i * i for i in range(1, n + 1)) square_of_sum = int(math.pow(sum(range(1, n + 1)), 2)) return square_of_sum - sum_of_squares diff --git a/project_euler/problem_008/sol2.py b/project_euler/problem_008/sol2.py index d2c1b4f7ca48..7f0540263278 100644 --- a/project_euler/problem_008/sol2.py +++ b/project_euler/problem_008/sol2.py @@ -70,10 +70,7 @@ def solution(n: str = N) -> int: """ return max( - [ - reduce(lambda x, y: int(x) * int(y), n[i : i + 13]) - for i in range(len(n) - 12) - ] + reduce(lambda x, y: int(x) * int(y), n[i : i + 13]) for i in range(len(n) - 12) ) diff --git a/project_euler/problem_012/sol2.py b/project_euler/problem_012/sol2.py index 5ff0d8349b90..7578caa98938 100644 --- a/project_euler/problem_012/sol2.py +++ b/project_euler/problem_012/sol2.py @@ -29,7 +29,7 @@ def triangle_number_generator(): def count_divisors(n): - return sum([2 for i in range(1, int(n ** 0.5) + 1) if n % i == 0 and i * i != n]) + return sum(2 for i in range(1, int(n ** 0.5) + 1) if n % i == 0 and i * i != n) def solution(): diff --git a/project_euler/problem_013/sol1.py b/project_euler/problem_013/sol1.py index 1ea08b12ee93..7a414a9379e0 100644 --- a/project_euler/problem_013/sol1.py +++ b/project_euler/problem_013/sol1.py @@ -18,7 +18,7 @@ def solution(): """ file_path = os.path.join(os.path.dirname(__file__), "num.txt") with open(file_path) as file_hand: - return str(sum([int(line) for line in file_hand]))[:10] + return str(sum(int(line) for line in file_hand))[:10] if __name__ == "__main__": diff --git a/project_euler/problem_014/sol2.py b/project_euler/problem_014/sol2.py index 20ad96327498..7ed68273bcd7 100644 --- a/project_euler/problem_014/sol2.py +++ b/project_euler/problem_014/sol2.py @@ -25,10 +25,10 @@ Which starting number, under one million, produces the longest chain? """ -from typing import List +from __future__ import annotations -def collatz_sequence(n: int) -> List[int]: +def collatz_sequence(n: int) -> list[int]: """Returns the Collatz sequence for n.""" sequence = [n] while n != 1: @@ -54,7 +54,7 @@ def solution(n: int = 1000000) -> int: 13255 """ - result = max([(len(collatz_sequence(i)), i) for i in range(1, n)]) + result = max((len(collatz_sequence(i)), i) for i in range(1, n)) return result[1] diff --git a/project_euler/problem_020/sol2.py b/project_euler/problem_020/sol2.py index 92e1e724a647..676e96e7836a 100644 --- a/project_euler/problem_020/sol2.py +++ b/project_euler/problem_020/sol2.py @@ -28,7 +28,7 @@ def solution(num: int = 100) -> int: >>> solution(1) 1 """ - return sum([int(x) for x in str(factorial(num))]) + return sum(int(x) for x in str(factorial(num))) if __name__ == "__main__": diff --git a/project_euler/problem_021/sol1.py b/project_euler/problem_021/sol1.py index 3fac79156e41..353510ae8f94 100644 --- a/project_euler/problem_021/sol1.py +++ b/project_euler/problem_021/sol1.py @@ -41,11 +41,9 @@ def solution(n: int = 10000) -> int: 0 """ total = sum( - [ - i - for i in range(1, n) - if sum_of_divisors(sum_of_divisors(i)) == i and sum_of_divisors(i) != i - ] + i + for i in range(1, n) + if sum_of_divisors(sum_of_divisors(i)) == i and sum_of_divisors(i) != i ) return total diff --git a/project_euler/problem_033/sol1.py b/project_euler/problem_033/sol1.py index ba6e553d8689..e0c9a058af53 100644 --- a/project_euler/problem_033/sol1.py +++ b/project_euler/problem_033/sol1.py @@ -14,8 +14,9 @@ If the product of these four fractions is given in its lowest common terms, find the value of the denominator. """ +from __future__ import annotations + from fractions import Fraction -from typing import List def is_digit_cancelling(num: int, den: int) -> bool: @@ -26,7 +27,7 @@ def is_digit_cancelling(num: int, den: int) -> bool: return False -def fraction_list(digit_len: int) -> List[str]: +def fraction_list(digit_len: int) -> list[str]: """ >>> fraction_list(2) ['16/64', '19/95', '26/65', '49/98'] diff --git a/project_euler/problem_036/sol1.py b/project_euler/problem_036/sol1.py index 13a749862e5f..425c41221395 100644 --- a/project_euler/problem_036/sol1.py +++ b/project_euler/problem_036/sol1.py @@ -14,11 +14,10 @@ (Please note that the palindromic number, in either base, may not include leading zeros.) """ +from __future__ import annotations -from typing import Union - -def is_palindrome(n: Union[int, str]) -> bool: +def is_palindrome(n: int | str) -> bool: """ Return true if the input n is a palindrome. Otherwise return false. n can be an integer or a string. diff --git a/project_euler/problem_038/sol1.py b/project_euler/problem_038/sol1.py index 6d54f6df7ff8..e4a6d09f8f7d 100644 --- a/project_euler/problem_038/sol1.py +++ b/project_euler/problem_038/sol1.py @@ -37,8 +37,7 @@ => 100 <= a < 334, candidate = a * 10^6 + 2a * 10^3 + 3a = 1002003 * a """ - -from typing import Union +from __future__ import annotations def is_9_pandigital(n: int) -> bool: @@ -55,7 +54,7 @@ def is_9_pandigital(n: int) -> bool: return len(s) == 9 and set(s) == set("123456789") -def solution() -> Union[int, None]: +def solution() -> int | None: """ Return the largest 1 to 9 pandigital 9-digital number that can be formed as the concatenated product of an integer with (1,2,...,n) where n > 1. diff --git a/project_euler/problem_049/sol1.py b/project_euler/problem_049/sol1.py index c0d0715be91c..dd2ef71a38a8 100644 --- a/project_euler/problem_049/sol1.py +++ b/project_euler/problem_049/sol1.py @@ -132,7 +132,7 @@ def solution(): for seq in passed: answer.add("".join([str(i) for i in seq])) - return max([int(x) for x in answer]) + return max(int(x) for x in answer) if __name__ == "__main__": diff --git a/project_euler/problem_050/sol1.py b/project_euler/problem_050/sol1.py index 7d142e5ffc91..cfb1911df5de 100644 --- a/project_euler/problem_050/sol1.py +++ b/project_euler/problem_050/sol1.py @@ -15,10 +15,10 @@ Which prime, below one-million, can be written as the sum of the most consecutive primes? """ -from typing import List +from __future__ import annotations -def prime_sieve(limit: int) -> List[int]: +def prime_sieve(limit: int) -> list[int]: """ Sieve of Erotosthenes Function to return all the prime numbers up to a number 'limit' diff --git a/project_euler/problem_051/sol1.py b/project_euler/problem_051/sol1.py index b160b5a2dbd4..5f607e3ffb42 100644 --- a/project_euler/problem_051/sol1.py +++ b/project_euler/problem_051/sol1.py @@ -15,12 +15,12 @@ Find the smallest prime which, by replacing part of the number (not necessarily adjacent digits) with the same digit, is part of an eight prime value family. """ +from __future__ import annotations from collections import Counter -from typing import List -def prime_sieve(n: int) -> List[int]: +def prime_sieve(n: int) -> list[int]: """ Sieve of Erotosthenes Function to return all the prime numbers up to a certain number @@ -52,7 +52,7 @@ def prime_sieve(n: int) -> List[int]: return primes -def digit_replacements(number: int) -> List[List[int]]: +def digit_replacements(number: int) -> list[list[int]]: """ Returns all the possible families of digit replacements in a number which contains at least one repeating digit diff --git a/project_euler/problem_054/sol1.py b/project_euler/problem_054/sol1.py index d2fd810d1b69..9af7aef5a716 100644 --- a/project_euler/problem_054/sol1.py +++ b/project_euler/problem_054/sol1.py @@ -135,7 +135,7 @@ def hand(self): """Returns the self hand""" return self._hand - def compare_with(self, other: "PokerHand") -> str: + def compare_with(self, other: PokerHand) -> str: """ Determines the outcome of comparing self hand with other hand. Returns the output as 'Win', 'Loss', 'Tie' according to the rules of @@ -220,7 +220,7 @@ def hand_name(self) -> str: else: return name + f", {high}" - def _compare_cards(self, other: "PokerHand") -> str: + def _compare_cards(self, other: PokerHand) -> str: # Enumerate gives us the index as well as the element of a list for index, card_value in enumerate(self._card_values): if card_value != other._card_values[index]: diff --git a/project_euler/problem_056/sol1.py b/project_euler/problem_056/sol1.py index 8eaa6e553342..f1ec03c497be 100644 --- a/project_euler/problem_056/sol1.py +++ b/project_euler/problem_056/sol1.py @@ -30,11 +30,9 @@ def solution(a: int = 100, b: int = 100) -> int: # RETURN the MAXIMUM from the list of SUMs of the list of INT converted from STR of # BASE raised to the POWER return max( - [ - sum([int(x) for x in str(base ** power)]) - for base in range(a) - for power in range(b) - ] + sum(int(x) for x in str(base ** power)) + for base in range(a) + for power in range(b) ) diff --git a/project_euler/problem_059/sol1.py b/project_euler/problem_059/sol1.py index 1f55029b2613..b795dd243b08 100644 --- a/project_euler/problem_059/sol1.py +++ b/project_euler/problem_059/sol1.py @@ -25,23 +25,22 @@ must contain common English words, decrypt the message and find the sum of the ASCII values in the original text. """ - +from __future__ import annotations import string from itertools import cycle, product from pathlib import Path -from typing import List, Optional, Set, Tuple VALID_CHARS: str = ( string.ascii_letters + string.digits + string.punctuation + string.whitespace ) -LOWERCASE_INTS: List[int] = [ord(letter) for letter in string.ascii_lowercase] -VALID_INTS: Set[int] = {ord(char) for char in VALID_CHARS} +LOWERCASE_INTS: list[int] = [ord(letter) for letter in string.ascii_lowercase] +VALID_INTS: set[int] = {ord(char) for char in VALID_CHARS} -COMMON_WORDS: List[str] = ["the", "be", "to", "of", "and", "in", "that", "have"] +COMMON_WORDS: list[str] = ["the", "be", "to", "of", "and", "in", "that", "have"] -def try_key(ciphertext: List[int], key: Tuple[int, ...]) -> Optional[str]: +def try_key(ciphertext: list[int], key: tuple[int, ...]) -> str | None: """ Given an encrypted message and a possible 3-character key, decrypt the message. If the decrypted message contains a invalid character, i.e. not an ASCII letter, @@ -66,7 +65,7 @@ def try_key(ciphertext: List[int], key: Tuple[int, ...]) -> Optional[str]: return decoded -def filter_valid_chars(ciphertext: List[int]) -> List[str]: +def filter_valid_chars(ciphertext: list[int]) -> list[str]: """ Given an encrypted message, test all 3-character strings to try and find the key. Return a list of the possible decrypted messages. @@ -77,7 +76,7 @@ def filter_valid_chars(ciphertext: List[int]) -> List[str]: >>> text in filter_valid_chars(encoded) True """ - possibles: List[str] = [] + possibles: list[str] = [] for key in product(LOWERCASE_INTS, repeat=3): encoded = try_key(ciphertext, key) if encoded is not None: @@ -85,7 +84,7 @@ def filter_valid_chars(ciphertext: List[int]) -> List[str]: return possibles -def filter_common_word(possibles: List[str], common_word: str) -> List[str]: +def filter_common_word(possibles: list[str], common_word: str) -> list[str]: """ Given a list of possible decoded messages, narrow down the possibilities for checking for the presence of a specified common word. Only decoded messages @@ -106,8 +105,8 @@ def solution(filename: str = "p059_cipher.txt") -> int: >>> solution("test_cipher.txt") 3000 """ - ciphertext: List[int] - possibles: List[str] + ciphertext: list[int] + possibles: list[str] common_word: str decoded_text: str data: str = Path(__file__).parent.joinpath(filename).read_text(encoding="utf-8") @@ -121,7 +120,7 @@ def solution(filename: str = "p059_cipher.txt") -> int: break decoded_text = possibles[0] - return sum([ord(char) for char in decoded_text]) + return sum(ord(char) for char in decoded_text) if __name__ == "__main__": diff --git a/project_euler/problem_070/sol1.py b/project_euler/problem_070/sol1.py index 9d27119ba95c..e106800d5716 100644 --- a/project_euler/problem_070/sol1.py +++ b/project_euler/problem_070/sol1.py @@ -28,10 +28,10 @@ Finding totients https://en.wikipedia.org/wiki/Euler's_totient_function#Euler's_product_formula """ -from typing import List +from __future__ import annotations -def get_totients(max_one: int) -> List[int]: +def get_totients(max_one: int) -> list[int]: """ Calculates a list of totients from 0 to max_one exclusive, using the definition of Euler's product formula. diff --git a/project_euler/problem_074/sol1.py b/project_euler/problem_074/sol1.py index 38d4e1439307..a40a629033fa 100644 --- a/project_euler/problem_074/sol1.py +++ b/project_euler/problem_074/sol1.py @@ -66,7 +66,7 @@ def sum_digit_factorials(n: int) -> int: """ if n in CACHE_SUM_DIGIT_FACTORIALS: return CACHE_SUM_DIGIT_FACTORIALS[n] - ret = sum([DIGIT_FACTORIALS[let] for let in str(n)]) + ret = sum(DIGIT_FACTORIALS[let] for let in str(n)) CACHE_SUM_DIGIT_FACTORIALS[n] = ret return ret diff --git a/project_euler/problem_077/sol1.py b/project_euler/problem_077/sol1.py index e92992a90ab3..214e258793f6 100644 --- a/project_euler/problem_077/sol1.py +++ b/project_euler/problem_077/sol1.py @@ -12,10 +12,10 @@ What is the first value which can be written as the sum of primes in over five thousand different ways? """ +from __future__ import annotations from functools import lru_cache from math import ceil -from typing import Optional, Set NUM_PRIMES = 100 @@ -30,7 +30,7 @@ @lru_cache(maxsize=100) -def partition(number_to_partition: int) -> Set[int]: +def partition(number_to_partition: int) -> set[int]: """ Return a set of integers corresponding to unique prime partitions of n. The unique prime partitions can be represented as unique prime decompositions, @@ -47,7 +47,7 @@ def partition(number_to_partition: int) -> Set[int]: elif number_to_partition == 0: return {1} - ret: Set[int] = set() + ret: set[int] = set() prime: int sub: int @@ -60,7 +60,7 @@ def partition(number_to_partition: int) -> Set[int]: return ret -def solution(number_unique_partitions: int = 5000) -> Optional[int]: +def solution(number_unique_partitions: int = 5000) -> int | None: """ Return the smallest integer that can be written as the sum of primes in over m unique ways. diff --git a/project_euler/problem_080/sol1.py b/project_euler/problem_080/sol1.py index db69d7e8451c..517be3fc0ba8 100644 --- a/project_euler/problem_080/sol1.py +++ b/project_euler/problem_080/sol1.py @@ -27,7 +27,7 @@ def solution() -> int: if len(str(sqrt_number)) > 1: answer += int(str(sqrt_number)[0]) sqrt_number = str(sqrt_number)[2:101] - answer += sum([int(x) for x in sqrt_number]) + answer += sum(int(x) for x in sqrt_number) return answer diff --git a/project_euler/problem_081/sol1.py b/project_euler/problem_081/sol1.py index afa143f23b33..aef6106b54df 100644 --- a/project_euler/problem_081/sol1.py +++ b/project_euler/problem_081/sol1.py @@ -22,7 +22,7 @@ def solution(filename: str = "matrix.txt") -> int: >>> solution() 427337 """ - with open(os.path.join(os.path.dirname(__file__), filename), "r") as in_file: + with open(os.path.join(os.path.dirname(__file__), filename)) as in_file: data = in_file.read() grid = [[int(cell) for cell in row.split(",")] for row in data.strip().splitlines()] diff --git a/project_euler/problem_085/sol1.py b/project_euler/problem_085/sol1.py index 74e36b1301a4..d0f29796498c 100644 --- a/project_euler/problem_085/sol1.py +++ b/project_euler/problem_085/sol1.py @@ -44,10 +44,9 @@ Reference: https://en.wikipedia.org/wiki/Triangular_number https://en.wikipedia.org/wiki/Quadratic_formula """ - +from __future__ import annotations from math import ceil, floor, sqrt -from typing import List def solution(target: int = 2000000) -> int: @@ -61,7 +60,7 @@ def solution(target: int = 2000000) -> int: >>> solution(2000000000) 86595 """ - triangle_numbers: List[int] = [0] + triangle_numbers: list[int] = [0] idx: int for idx in range(1, ceil(sqrt(target * 2) * 1.1)): diff --git a/project_euler/problem_089/sol1.py b/project_euler/problem_089/sol1.py index 11582aa4ab1a..1c4e2600f847 100644 --- a/project_euler/problem_089/sol1.py +++ b/project_euler/problem_089/sol1.py @@ -125,7 +125,7 @@ def solution(roman_numerals_filename: str = "/p089_roman.txt") -> int: savings = 0 - file1 = open(os.path.dirname(__file__) + roman_numerals_filename, "r") + file1 = open(os.path.dirname(__file__) + roman_numerals_filename) lines = file1.readlines() for line in lines: original = line.strip() diff --git a/project_euler/problem_101/sol1.py b/project_euler/problem_101/sol1.py index e66316090fb2..553f8f442bb8 100644 --- a/project_euler/problem_101/sol1.py +++ b/project_euler/problem_101/sol1.py @@ -41,11 +41,11 @@ Find the sum of FITs for the BOPs. """ +from __future__ import annotations +from typing import Callable, Union -from typing import Callable, List, Union - -Matrix = List[List[Union[float, int]]] +Matrix = list[list[Union[float, int]]] def solve(matrix: Matrix, vector: Matrix) -> Matrix: @@ -78,9 +78,9 @@ def solve(matrix: Matrix, vector: Matrix) -> Matrix: col = 0 while row < size and col < size: # pivoting - pivot_row = max( - [(abs(augmented[row2][col]), row2) for row2 in range(col, size)] - )[1] + pivot_row = max((abs(augmented[row2][col]), row2) for row2 in range(col, size))[ + 1 + ] if augmented[pivot_row][col] == 0: col += 1 continue @@ -109,7 +109,7 @@ def solve(matrix: Matrix, vector: Matrix) -> Matrix: ] -def interpolate(y_list: List[int]) -> Callable[[int], int]: +def interpolate(y_list: list[int]) -> Callable[[int], int]: """ Given a list of data points (1,y0),(2,y1), ..., return a function that interpolates the data points. We find the coefficients of the interpolating @@ -195,9 +195,9 @@ def solution(func: Callable[[int], int] = question_function, order: int = 10) -> >>> solution(lambda n: n ** 3, 3) 74 """ - data_points: List[int] = [func(x_val) for x_val in range(1, order + 1)] + data_points: list[int] = [func(x_val) for x_val in range(1, order + 1)] - polynomials: List[Callable[[int], int]] = [ + polynomials: list[Callable[[int], int]] = [ interpolate(data_points[:max_coeff]) for max_coeff in range(1, order + 1) ] diff --git a/project_euler/problem_102/sol1.py b/project_euler/problem_102/sol1.py index 00af726656ce..4f6e6361e3e8 100644 --- a/project_euler/problem_102/sol1.py +++ b/project_euler/problem_102/sol1.py @@ -18,12 +18,12 @@ NOTE: The first two examples in the file represent the triangles in the example given above. """ +from __future__ import annotations from pathlib import Path -from typing import List, Tuple -def vector_product(point1: Tuple[int, int], point2: Tuple[int, int]) -> int: +def vector_product(point1: tuple[int, int], point2: tuple[int, int]) -> int: """ Return the 2-d vector product of two vectors. >>> vector_product((1, 2), (-5, 0)) @@ -43,9 +43,9 @@ def contains_origin(x1: int, y1: int, x2: int, y2: int, x3: int, y3: int) -> boo >>> contains_origin(-175, 41, -421, -714, 574, -645) False """ - point_a: Tuple[int, int] = (x1, y1) - point_a_to_b: Tuple[int, int] = (x2 - x1, y2 - y1) - point_a_to_c: Tuple[int, int] = (x3 - x1, y3 - y1) + point_a: tuple[int, int] = (x1, y1) + point_a_to_b: tuple[int, int] = (x2 - x1, y2 - y1) + point_a_to_c: tuple[int, int] = (x3 - x1, y3 - y1) a: float = -vector_product(point_a, point_a_to_b) / vector_product( point_a_to_c, point_a_to_b ) @@ -64,12 +64,12 @@ def solution(filename: str = "p102_triangles.txt") -> int: """ data: str = Path(__file__).parent.joinpath(filename).read_text(encoding="utf-8") - triangles: List[List[int]] = [] + triangles: list[list[int]] = [] for line in data.strip().split("\n"): triangles.append([int(number) for number in line.split(",")]) ret: int = 0 - triangle: List[int] + triangle: list[int] for triangle in triangles: ret += contains_origin(*triangle) diff --git a/project_euler/problem_107/sol1.py b/project_euler/problem_107/sol1.py index 80a10e499f76..6a411a11473d 100644 --- a/project_euler/problem_107/sol1.py +++ b/project_euler/problem_107/sol1.py @@ -27,11 +27,12 @@ We use Prim's algorithm to find a Minimum Spanning Tree. Reference: https://en.wikipedia.org/wiki/Prim%27s_algorithm """ +from __future__ import annotations import os -from typing import Dict, List, Mapping, Set, Tuple +from typing import Mapping -EdgeT = Tuple[int, int] +EdgeT = tuple[int, int] class Graph: @@ -39,9 +40,9 @@ class Graph: A class representing an undirected weighted graph. """ - def __init__(self, vertices: Set[int], edges: Mapping[EdgeT, int]) -> None: - self.vertices: Set[int] = vertices - self.edges: Dict[EdgeT, int] = { + def __init__(self, vertices: set[int], edges: Mapping[EdgeT, int]) -> None: + self.vertices: set[int] = vertices + self.edges: dict[EdgeT, int] = { (min(edge), max(edge)): weight for edge, weight in edges.items() } @@ -59,7 +60,7 @@ def add_edge(self, edge: EdgeT, weight: int) -> None: self.vertices.add(edge[1]) self.edges[(min(edge), max(edge))] = weight - def prims_algorithm(self) -> "Graph": + def prims_algorithm(self) -> Graph: """ Run Prim's algorithm to find the minimum spanning tree. Reference: https://en.wikipedia.org/wiki/Prim%27s_algorithm @@ -98,13 +99,13 @@ def solution(filename: str = "p107_network.txt") -> int: """ script_dir: str = os.path.abspath(os.path.dirname(__file__)) network_file: str = os.path.join(script_dir, filename) - adjacency_matrix: List[List[str]] - edges: Dict[EdgeT, int] = dict() - data: List[str] + adjacency_matrix: list[list[str]] + edges: dict[EdgeT, int] = dict() + data: list[str] edge1: int edge2: int - with open(network_file, "r") as f: + with open(network_file) as f: data = f.read().strip().split("\n") adjaceny_matrix = [line.split(",") for line in data] diff --git a/project_euler/problem_119/sol1.py b/project_euler/problem_119/sol1.py index 7f343ac242e9..60ec16cda1aa 100644 --- a/project_euler/problem_119/sol1.py +++ b/project_euler/problem_119/sol1.py @@ -23,7 +23,7 @@ def digit_sum(n: int) -> int: >>> digit_sum(78910) 25 """ - return sum([int(digit) for digit in str(n)]) + return sum(int(digit) for digit in str(n)) def solution(n: int = 30) -> int: diff --git a/project_euler/problem_123/sol1.py b/project_euler/problem_123/sol1.py index 85350c8bae49..91913222759b 100644 --- a/project_euler/problem_123/sol1.py +++ b/project_euler/problem_123/sol1.py @@ -37,8 +37,9 @@ r = 2pn when n is odd r = 2 when n is even. """ +from __future__ import annotations -from typing import Dict, Generator +from typing import Generator def sieve() -> Generator[int, None, None]: @@ -60,7 +61,7 @@ def sieve() -> Generator[int, None, None]: >>> next(primes) 13 """ - factor_map: Dict[int, int] = {} + factor_map: dict[int, int] = {} prime = 2 while True: factor = factor_map.pop(prime, None) diff --git a/project_euler/problem_180/sol1.py b/project_euler/problem_180/sol1.py index 6112db2ea370..f7c097323c62 100644 --- a/project_euler/problem_180/sol1.py +++ b/project_euler/problem_180/sol1.py @@ -44,11 +44,10 @@ Reference: https://en.wikipedia.org/wiki/Fermat%27s_Last_Theorem """ - +from __future__ import annotations from fractions import Fraction from math import gcd, sqrt -from typing import Tuple def is_sq(number: int) -> bool: @@ -68,7 +67,7 @@ def is_sq(number: int) -> bool: def add_three( x_num: int, x_den: int, y_num: int, y_den: int, z_num: int, z_den: int -) -> Tuple[int, int]: +) -> tuple[int, int]: """ Given the numerators and denominators of three fractions, return the numerator and denominator of their sum in lowest form. @@ -100,7 +99,7 @@ def solution(order: int = 35) -> int: unique_s: set = set() hcf: int total: Fraction = Fraction(0) - fraction_sum: Tuple[int, int] + fraction_sum: tuple[int, int] for x_num in range(1, order + 1): for x_den in range(x_num + 1, order + 1): diff --git a/project_euler/problem_203/sol1.py b/project_euler/problem_203/sol1.py index 227b476da131..030cf12f2a85 100644 --- a/project_euler/problem_203/sol1.py +++ b/project_euler/problem_203/sol1.py @@ -27,12 +27,12 @@ References: - https://en.wikipedia.org/wiki/Pascal%27s_triangle """ +from __future__ import annotations import math -from typing import List, Set -def get_pascal_triangle_unique_coefficients(depth: int) -> Set[int]: +def get_pascal_triangle_unique_coefficients(depth: int) -> set[int]: """ Returns the unique coefficients of a Pascal's triangle of depth "depth". @@ -61,7 +61,7 @@ def get_pascal_triangle_unique_coefficients(depth: int) -> Set[int]: return coefficients -def get_primes_squared(max_number: int) -> List[int]: +def get_primes_squared(max_number: int) -> list[int]: """ Calculates all primes between 2 and round(sqrt(max_number)) and returns them squared up. @@ -92,7 +92,7 @@ def get_primes_squared(max_number: int) -> List[int]: def get_squared_primes_to_use( - num_to_look: int, squared_primes: List[int], previous_index: int + num_to_look: int, squared_primes: list[int], previous_index: int ) -> int: """ Returns an int indicating the last index on which squares of primes @@ -128,8 +128,8 @@ def get_squared_primes_to_use( def get_squarefree( - unique_coefficients: Set[int], squared_primes: List[int] -) -> Set[int]: + unique_coefficients: set[int], squared_primes: list[int] +) -> set[int]: """ Calculates the squarefree numbers inside unique_coefficients given a list of square of primes. diff --git a/scheduling/first_come_first_served.py b/scheduling/first_come_first_served.py index b51fc9fe0c04..c5f61720f97e 100644 --- a/scheduling/first_come_first_served.py +++ b/scheduling/first_come_first_served.py @@ -2,10 +2,10 @@ # In this Algorithm we just care about the order that the processes arrived # without carring about their duration time # https://en.wikipedia.org/wiki/Scheduling_(computing)#First_come,_first_served -from typing import List +from __future__ import annotations -def calculate_waiting_times(duration_times: List[int]) -> List[int]: +def calculate_waiting_times(duration_times: list[int]) -> list[int]: """ This function calculates the waiting time of some processes that have a specified duration time. @@ -24,8 +24,8 @@ def calculate_waiting_times(duration_times: List[int]) -> List[int]: def calculate_turnaround_times( - duration_times: List[int], waiting_times: List[int] -) -> List[int]: + duration_times: list[int], waiting_times: list[int] +) -> list[int]: """ This function calculates the turnaround time of some processes. Return: The time difference between the completion time and the @@ -44,7 +44,7 @@ def calculate_turnaround_times( ] -def calculate_average_turnaround_time(turnaround_times: List[int]) -> float: +def calculate_average_turnaround_time(turnaround_times: list[int]) -> float: """ This function calculates the average of the turnaround times Return: The average of the turnaround times. @@ -58,7 +58,7 @@ def calculate_average_turnaround_time(turnaround_times: List[int]) -> float: return sum(turnaround_times) / len(turnaround_times) -def calculate_average_waiting_time(waiting_times: List[int]) -> float: +def calculate_average_waiting_time(waiting_times: list[int]) -> float: """ This function calculates the average of the waiting times Return: The average of the waiting times. diff --git a/scheduling/round_robin.py b/scheduling/round_robin.py index 4a79301c1816..e8d54dd9a553 100644 --- a/scheduling/round_robin.py +++ b/scheduling/round_robin.py @@ -3,11 +3,12 @@ In Round Robin each process is assigned a fixed time slot in a cyclic way. https://en.wikipedia.org/wiki/Round-robin_scheduling """ +from __future__ import annotations + from statistics import mean -from typing import List -def calculate_waiting_times(burst_times: List[int]) -> List[int]: +def calculate_waiting_times(burst_times: list[int]) -> list[int]: """ Calculate the waiting times of a list of processes that have a specified duration. @@ -40,8 +41,8 @@ def calculate_waiting_times(burst_times: List[int]) -> List[int]: def calculate_turn_around_times( - burst_times: List[int], waiting_times: List[int] -) -> List[int]: + burst_times: list[int], waiting_times: list[int] +) -> list[int]: """ >>> calculate_turn_around_times([1, 2, 3, 4], [0, 1, 3]) [1, 3, 6] diff --git a/scheduling/shortest_job_first.py b/scheduling/shortest_job_first.py index a49d037d6a23..17409108a34e 100644 --- a/scheduling/shortest_job_first.py +++ b/scheduling/shortest_job_first.py @@ -3,14 +3,14 @@ Please note arrival time and burst Please use spaces to separate times entered. """ -from typing import List +from __future__ import annotations import pandas as pd def calculate_waitingtime( - arrival_time: List[int], burst_time: List[int], no_of_processes: int -) -> List[int]: + arrival_time: list[int], burst_time: list[int], no_of_processes: int +) -> list[int]: """ Calculate the waiting time of each processes Return: List of waiting times. @@ -72,8 +72,8 @@ def calculate_waitingtime( def calculate_turnaroundtime( - burst_time: List[int], no_of_processes: int, waiting_time: List[int] -) -> List[int]: + burst_time: list[int], no_of_processes: int, waiting_time: list[int] +) -> list[int]: """ Calculate the turn around time of each Processes Return: list of turn around times. @@ -91,7 +91,7 @@ def calculate_turnaroundtime( def calculate_average_times( - waiting_time: List[int], turn_around_time: List[int], no_of_processes: int + waiting_time: list[int], turn_around_time: list[int], no_of_processes: int ) -> None: """ This function calculates the average of the waiting & turnaround times diff --git a/searches/binary_search.py b/searches/binary_search.py index 35e0dd0596d2..0966cd8de857 100644 --- a/searches/binary_search.py +++ b/searches/binary_search.py @@ -9,12 +9,13 @@ For manual testing run: python3 binary_search.py """ +from __future__ import annotations + import bisect -from typing import List, Optional def bisect_left( - sorted_collection: List[int], item: int, lo: int = 0, hi: int = -1 + sorted_collection: list[int], item: int, lo: int = 0, hi: int = -1 ) -> int: """ Locates the first element in a sorted array that is larger or equal to a given @@ -60,7 +61,7 @@ def bisect_left( def bisect_right( - sorted_collection: List[int], item: int, lo: int = 0, hi: int = -1 + sorted_collection: list[int], item: int, lo: int = 0, hi: int = -1 ) -> int: """ Locates the first element in a sorted array that is larger than a given value. @@ -105,7 +106,7 @@ def bisect_right( def insort_left( - sorted_collection: List[int], item: int, lo: int = 0, hi: int = -1 + sorted_collection: list[int], item: int, lo: int = 0, hi: int = -1 ) -> None: """ Inserts a given value into a sorted array before other values with the same value. @@ -148,7 +149,7 @@ def insort_left( def insort_right( - sorted_collection: List[int], item: int, lo: int = 0, hi: int = -1 + sorted_collection: list[int], item: int, lo: int = 0, hi: int = -1 ) -> None: """ Inserts a given value into a sorted array after other values with the same value. @@ -190,7 +191,7 @@ def insort_right( sorted_collection.insert(bisect_right(sorted_collection, item, lo, hi), item) -def binary_search(sorted_collection: List[int], item: int) -> Optional[int]: +def binary_search(sorted_collection: list[int], item: int) -> int | None: """Pure implementation of binary search algorithm in Python Be careful collection must be ascending sorted, otherwise result will be @@ -228,7 +229,7 @@ def binary_search(sorted_collection: List[int], item: int) -> Optional[int]: return None -def binary_search_std_lib(sorted_collection: List[int], item: int) -> Optional[int]: +def binary_search_std_lib(sorted_collection: list[int], item: int) -> int | None: """Pure implementation of binary search algorithm in Python using stdlib Be careful collection must be ascending sorted, otherwise result will be @@ -258,8 +259,8 @@ def binary_search_std_lib(sorted_collection: List[int], item: int) -> Optional[i def binary_search_by_recursion( - sorted_collection: List[int], item: int, left: int, right: int -) -> Optional[int]: + sorted_collection: list[int], item: int, left: int, right: int +) -> int | None: """Pure implementation of binary search algorithm in Python by recursion diff --git a/searches/fibonacci_search.py b/searches/fibonacci_search.py index ac8ecc99a187..55fc05d39eeb 100644 --- a/searches/fibonacci_search.py +++ b/searches/fibonacci_search.py @@ -13,7 +13,7 @@ from functools import lru_cache -@lru_cache() +@lru_cache def fibonacci(k: int) -> int: """Finds fibonacci number in index k. diff --git a/searches/ternary_search.py b/searches/ternary_search.py index 9422a4ccb966..01e437723473 100644 --- a/searches/ternary_search.py +++ b/searches/ternary_search.py @@ -6,7 +6,7 @@ Time Complexity : O(log3 N) Space Complexity : O(1) """ -from typing import List +from __future__ import annotations # This is the precision for this function which can be altered. # It is recommended for users to keep this number greater than or equal to 10. @@ -16,7 +16,7 @@ # This is the linear search that will occur after the search space has become smaller. -def lin_search(left: int, right: int, array: List[int], target: int) -> int: +def lin_search(left: int, right: int, array: list[int], target: int) -> int: """Perform linear search in list. Returns -1 if element is not found. Parameters @@ -58,7 +58,7 @@ def lin_search(left: int, right: int, array: List[int], target: int) -> int: return -1 -def ite_ternary_search(array: List[int], target: int) -> int: +def ite_ternary_search(array: list[int], target: int) -> int: """Iterative method of the ternary search algorithm. >>> test_list = [0, 1, 2, 8, 13, 17, 19, 32, 42] >>> ite_ternary_search(test_list, 3) @@ -110,7 +110,7 @@ def ite_ternary_search(array: List[int], target: int) -> int: return -1 -def rec_ternary_search(left: int, right: int, array: List[int], target: int) -> int: +def rec_ternary_search(left: int, right: int, array: list[int], target: int) -> int: """Recursive method of the ternary search algorithm. >>> test_list = [0, 1, 2, 8, 13, 17, 19, 32, 42] diff --git a/sorts/bitonic_sort.py b/sorts/bitonic_sort.py index c718973e5ecb..201fecd2ce86 100644 --- a/sorts/bitonic_sort.py +++ b/sorts/bitonic_sort.py @@ -3,10 +3,10 @@ Note that this program works only when size of input is a power of 2. """ -from typing import List +from __future__ import annotations -def comp_and_swap(array: List[int], index1: int, index2: int, direction: int) -> None: +def comp_and_swap(array: list[int], index1: int, index2: int, direction: int) -> None: """Compare the value at given index1 and index2 of the array and swap them as per the given direction. @@ -37,7 +37,7 @@ def comp_and_swap(array: List[int], index1: int, index2: int, direction: int) -> array[index1], array[index2] = array[index2], array[index1] -def bitonic_merge(array: List[int], low: int, length: int, direction: int) -> None: +def bitonic_merge(array: list[int], low: int, length: int, direction: int) -> None: """ It recursively sorts a bitonic sequence in ascending order, if direction = 1, and in descending if direction = 0. @@ -61,7 +61,7 @@ def bitonic_merge(array: List[int], low: int, length: int, direction: int) -> No bitonic_merge(array, low + middle, middle, direction) -def bitonic_sort(array: List[int], low: int, length: int, direction: int) -> None: +def bitonic_sort(array: list[int], low: int, length: int, direction: int) -> None: """ This function first produces a bitonic sequence by recursively sorting its two halves in opposite sorting orders, and then calls bitonic_merge to make them in the diff --git a/sorts/bucket_sort.py b/sorts/bucket_sort.py index 1ac76774f4ba..58242a1cb1f8 100644 --- a/sorts/bucket_sort.py +++ b/sorts/bucket_sort.py @@ -27,7 +27,7 @@ Source: https://en.wikipedia.org/wiki/Bucket_sort """ -from typing import List +from __future__ import annotations def bucket_sort(my_list: list) -> list: @@ -52,7 +52,7 @@ def bucket_sort(my_list: list) -> list: return [] min_value, max_value = min(my_list), max(my_list) bucket_count = int(max_value - min_value) + 1 - buckets: List[list] = [[] for _ in range(bucket_count)] + buckets: list[list] = [[] for _ in range(bucket_count)] for i in range(len(my_list)): buckets[(int(my_list[i] - min_value) // bucket_count)].append(my_list[i]) diff --git a/sorts/msd_radix_sort.py b/sorts/msd_radix_sort.py index 4c3cea30ef68..3cdec4bd0711 100644 --- a/sorts/msd_radix_sort.py +++ b/sorts/msd_radix_sort.py @@ -4,10 +4,10 @@ them. https://en.wikipedia.org/wiki/Radix_sort """ -from typing import List +from __future__ import annotations -def msd_radix_sort(list_of_ints: List[int]) -> List[int]: +def msd_radix_sort(list_of_ints: list[int]) -> list[int]: """ Implementation of the MSD radix sort algorithm. Only works with positive integers @@ -36,7 +36,7 @@ def msd_radix_sort(list_of_ints: List[int]) -> List[int]: return _msd_radix_sort(list_of_ints, most_bits) -def _msd_radix_sort(list_of_ints: List[int], bit_position: int) -> List[int]: +def _msd_radix_sort(list_of_ints: list[int], bit_position: int) -> list[int]: """ Sort the given list based on the bit at bit_position. Numbers with a 0 at that position will be at the start of the list, numbers with a @@ -74,7 +74,7 @@ def _msd_radix_sort(list_of_ints: List[int], bit_position: int) -> List[int]: return res -def msd_radix_sort_inplace(list_of_ints: List[int]): +def msd_radix_sort_inplace(list_of_ints: list[int]): """ Inplace implementation of the MSD radix sort algorithm. Sorts based on the binary representation of the integers. @@ -109,7 +109,7 @@ def msd_radix_sort_inplace(list_of_ints: List[int]): def _msd_radix_sort_inplace( - list_of_ints: List[int], bit_position: int, begin_index: int, end_index: int + list_of_ints: list[int], bit_position: int, begin_index: int, end_index: int ): """ Sort the given list based on the bit at bit_position. Numbers with a diff --git a/sorts/patience_sort.py b/sorts/patience_sort.py index 87f5a4078612..845db517420b 100644 --- a/sorts/patience_sort.py +++ b/sorts/patience_sort.py @@ -1,7 +1,8 @@ +from __future__ import annotations + from bisect import bisect_left from functools import total_ordering from heapq import merge -from typing import List """ A pure Python implementation of the patience sort algorithm @@ -44,7 +45,7 @@ def patience_sort(collection: list) -> list: >>> patience_sort([-3, -17, -48]) [-48, -17, -3] """ - stacks: List[Stack] = [] + stacks: list[Stack] = [] # sort into stacks for element in collection: new_stacks = Stack([element]) @@ -55,7 +56,7 @@ def patience_sort(collection: list) -> list: stacks.append(new_stacks) # use a heap-based merge to merge stack efficiently - collection[:] = merge(*[reversed(stack) for stack in stacks]) + collection[:] = merge(*(reversed(stack) for stack in stacks)) return collection diff --git a/sorts/pigeon_sort.py b/sorts/pigeon_sort.py index 3d81f0643865..3e6d4c09c46f 100644 --- a/sorts/pigeon_sort.py +++ b/sorts/pigeon_sort.py @@ -9,10 +9,10 @@ For manual testing run: python pigeon_sort.py """ -from typing import List +from __future__ import annotations -def pigeon_sort(array: List[int]) -> List[int]: +def pigeon_sort(array: list[int]) -> list[int]: """ Implementation of pigeon hole sort algorithm :param array: Collection of comparable items diff --git a/sorts/quick_sort.py b/sorts/quick_sort.py index 6f51f6eca7db..b099c78861ba 100644 --- a/sorts/quick_sort.py +++ b/sorts/quick_sort.py @@ -7,7 +7,7 @@ For manual testing run: python3 quick_sort.py """ -from typing import List +from __future__ import annotations def quick_sort(collection: list) -> list: @@ -27,8 +27,8 @@ def quick_sort(collection: list) -> list: if len(collection) < 2: return collection pivot = collection.pop() # Use the last element as the first pivot - greater: List[int] = [] # All elements greater than pivot - lesser: List[int] = [] # All elements less than or equal to pivot + greater: list[int] = [] # All elements greater than pivot + lesser: list[int] = [] # All elements less than or equal to pivot for element in collection: (greater if element > pivot else lesser).append(element) return quick_sort(lesser) + [pivot] + quick_sort(greater) diff --git a/sorts/radix_sort.py b/sorts/radix_sort.py index b802b5278119..e433bc507a1e 100644 --- a/sorts/radix_sort.py +++ b/sorts/radix_sort.py @@ -9,10 +9,8 @@ """ from __future__ import annotations -from typing import List - -def radix_sort(list_of_ints: List[int]) -> List[int]: +def radix_sort(list_of_ints: list[int]) -> list[int]: """ Examples: >>> radix_sort([0, 5, 3, 2, 2]) @@ -30,7 +28,7 @@ def radix_sort(list_of_ints: List[int]) -> List[int]: max_digit = max(list_of_ints) while placement <= max_digit: # declare and initialize empty buckets - buckets: List[list] = [list() for _ in range(RADIX)] + buckets: list[list] = [list() for _ in range(RADIX)] # split list_of_ints between the buckets for i in list_of_ints: tmp = int((i / placement) % RADIX) diff --git a/sorts/recursive_insertion_sort.py b/sorts/recursive_insertion_sort.py index 89f88b4a961b..ab2716f8eae5 100644 --- a/sorts/recursive_insertion_sort.py +++ b/sorts/recursive_insertion_sort.py @@ -1,11 +1,8 @@ """ A recursive implementation of the insertion sort algorithm """ - from __future__ import annotations -from typing import List - def rec_insertion_sort(collection: list, n: int): """ @@ -72,6 +69,6 @@ def insert_next(collection: list, index: int): if __name__ == "__main__": numbers = input("Enter integers separated by spaces: ") - number_list: List[int] = [int(num) for num in numbers.split()] + number_list: list[int] = [int(num) for num in numbers.split()] rec_insertion_sort(number_list, len(number_list)) print(number_list) diff --git a/sorts/slowsort.py b/sorts/slowsort.py index 53bb14554ee2..a5f4e873ebb2 100644 --- a/sorts/slowsort.py +++ b/sorts/slowsort.py @@ -8,13 +8,10 @@ Source: https://en.wikipedia.org/wiki/Slowsort """ +from __future__ import annotations -from typing import Optional - -def slowsort( - sequence: list, start: Optional[int] = None, end: Optional[int] = None -) -> None: +def slowsort(sequence: list, start: int | None = None, end: int | None = None) -> None: """ Sorts sequence[start..end] (both inclusive) in-place. start defaults to 0 if not given. diff --git a/strings/aho_corasick.py b/strings/aho_corasick.py index 712cb338aa7e..b9a6a80728f6 100644 --- a/strings/aho_corasick.py +++ b/strings/aho_corasick.py @@ -1,5 +1,6 @@ +from __future__ import annotations + from collections import deque -from typing import Dict, List, Union class Automaton: @@ -13,7 +14,7 @@ def __init__(self, keywords: list[str]): self.add_keyword(keyword) self.set_fail_transitions() - def find_next_state(self, current_state: int, char: str) -> Union[int, None]: + def find_next_state(self, current_state: int, char: str) -> int | None: for state in self.adlist[current_state]["next_states"]: if char == self.adlist[state]["value"]: return state @@ -63,7 +64,7 @@ def set_fail_transitions(self) -> None: + self.adlist[self.adlist[child]["fail_state"]]["output"] ) - def search_in(self, string: str) -> Dict[str, List[int]]: + def search_in(self, string: str) -> dict[str, list[int]]: """ >>> A = Automaton(["what", "hat", "ver", "er"]) >>> A.search_in("whatever, err ... , wherever") diff --git a/strings/boyer_moore_search.py b/strings/boyer_moore_search.py index a3e6cf614eab..8d8ff22f67bd 100644 --- a/strings/boyer_moore_search.py +++ b/strings/boyer_moore_search.py @@ -17,7 +17,7 @@ n=length of main string m=length of pattern string """ -from typing import List +from __future__ import annotations class BoyerMooreSearch: @@ -59,7 +59,7 @@ def mismatch_in_text(self, currentPos: int) -> int: return currentPos + i return -1 - def bad_character_heuristic(self) -> List[int]: + def bad_character_heuristic(self) -> list[int]: # searches pattern in text and returns index positions positions = [] for i in range(self.textLen - self.patLen + 1): diff --git a/strings/knuth_morris_pratt.py b/strings/knuth_morris_pratt.py index a205ce37e3e5..a488c171a93b 100644 --- a/strings/knuth_morris_pratt.py +++ b/strings/knuth_morris_pratt.py @@ -1,4 +1,4 @@ -from typing import List +from __future__ import annotations def kmp(pattern: str, text: str) -> bool: @@ -36,7 +36,7 @@ def kmp(pattern: str, text: str) -> bool: return False -def get_failure_array(pattern: str) -> List[int]: +def get_failure_array(pattern: str) -> list[int]: """ Calculates the new index we should go to if we fail a comparison :param pattern: diff --git a/web_programming/emails_from_url.py b/web_programming/emails_from_url.py index 0571ac3313a3..afaee5bbe854 100644 --- a/web_programming/emails_from_url.py +++ b/web_programming/emails_from_url.py @@ -1,4 +1,6 @@ """Get the site emails from URL.""" +from __future__ import annotations + __author__ = "Muhammad Umer Farooq" __license__ = "MIT" __version__ = "1.0.0" @@ -8,7 +10,6 @@ import re from html.parser import HTMLParser -from typing import Optional from urllib import parse import requests @@ -20,7 +21,7 @@ def __init__(self, domain: str) -> None: self.urls: list[str] = [] self.domain = domain - def handle_starttag(self, tag: str, attrs: list[tuple[str, Optional[str]]]) -> None: + def handle_starttag(self, tag: str, attrs: list[tuple[str, str | None]]) -> None: """ This function parse html to take takes url from tags """ diff --git a/web_programming/fetch_github_info.py b/web_programming/fetch_github_info.py index c9198460f211..aa4e1d7b1963 100644 --- a/web_programming/fetch_github_info.py +++ b/web_programming/fetch_github_info.py @@ -17,8 +17,10 @@ #!/usr/bin/env bash export USER_TOKEN="" """ +from __future__ import annotations + import os -from typing import Any, Dict +from typing import Any import requests @@ -31,7 +33,7 @@ USER_TOKEN = os.environ.get("USER_TOKEN", "") -def fetch_github_info(auth_token: str) -> Dict[Any, Any]: +def fetch_github_info(auth_token: str) -> dict[Any, Any]: """ Fetch GitHub info of a user using the requests module """ From 01d58562ccbfbea9d16aca6a876676b603026238 Mon Sep 17 00:00:00 2001 From: Maxim Smolskiy Date: Sat, 18 Sep 2021 22:33:03 +0300 Subject: [PATCH 0209/1543] Fix typos in Project Euler problem 034 solution 1 (#4748) * Fix comment * Fix output --- project_euler/problem_034/sol1.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/project_euler/problem_034/sol1.py b/project_euler/problem_034/sol1.py index 78b318b76d06..11c84ab96ac6 100644 --- a/project_euler/problem_034/sol1.py +++ b/project_euler/problem_034/sol1.py @@ -11,7 +11,7 @@ def sum_of_digit_factorial(n: int) -> int: """ - Returns the sum of the digits in n + Returns the sum of the factorial of digits in n >>> sum_of_digit_factorial(15) 121 >>> sum_of_digit_factorial(0) @@ -33,4 +33,4 @@ def solution() -> int: if __name__ == "__main__": - print(f"{solution()} = ") + print(f"{solution() = }") From 4761fef1a5a3904167285af8819091018c8e04b1 Mon Sep 17 00:00:00 2001 From: jonabtc <39396756+jonabtc@users.noreply.github.com> Date: Sat, 18 Sep 2021 20:22:47 -0500 Subject: [PATCH 0210/1543] Double factorial iterative (#4760) * Adding the double factorial algorithm * Adding the double factorial algorithm Co-authored-by: Jonathan Ocles --- maths/double_factorial_iterative.py | 33 +++++++++++++++++++++++++++++ 1 file changed, 33 insertions(+) create mode 100644 maths/double_factorial_iterative.py diff --git a/maths/double_factorial_iterative.py b/maths/double_factorial_iterative.py new file mode 100644 index 000000000000..b2b58aa04c28 --- /dev/null +++ b/maths/double_factorial_iterative.py @@ -0,0 +1,33 @@ +def double_factorial(num: int) -> int: + """ + Compute double factorial using iterative method. + + To learn about the theory behind this algorithm: + https://en.wikipedia.org/wiki/Double_factorial + + >>> import math + >>> all(double_factorial(i) == math.prod(range(i, 0, -2)) for i in range(20)) + True + >>> double_factorial(0.1) + Traceback (most recent call last): + ... + ValueError: double_factorial() only accepts integral values + >>> double_factorial(-1) + Traceback (most recent call last): + ... + ValueError: double_factorial() not defined for negative values + """ + if not isinstance(num, int): + raise ValueError("double_factorial() only accepts integral values") + if num < 0: + raise ValueError("double_factorial() not defined for negative values") + value = 1 + for i in range(num, 0, -2): + value *= i + return value + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From a7b9e28bc34478850ea22e31f1d5a022502e2350 Mon Sep 17 00:00:00 2001 From: Maxim Smolskiy Date: Tue, 21 Sep 2021 14:28:27 +0300 Subject: [PATCH 0211/1543] Improve Project Euler problem 009 solution 1 (#4749) * Improve solution * Uncomment code that has been commented due to slow execution affecting Travis --- project_euler/problem_009/sol1.py | 19 ++++++++----------- 1 file changed, 8 insertions(+), 11 deletions(-) diff --git a/project_euler/problem_009/sol1.py b/project_euler/problem_009/sol1.py index a58ea943e48b..c50dfeecfd22 100644 --- a/project_euler/problem_009/sol1.py +++ b/project_euler/problem_009/sol1.py @@ -25,18 +25,16 @@ def solution() -> int: 2. a**2 + b**2 = c**2 3. a + b + c = 1000 - # The code below has been commented due to slow execution affecting Travis. - # >>> solution() - # 31875000 + >>> solution() + 31875000 """ for a in range(300): - for b in range(400): - for c in range(500): - if a < b < c: + for b in range(a + 1, 400): + for c in range(b + 1, 500): + if (a + b + c) == 1000: if (a ** 2) + (b ** 2) == (c ** 2): - if (a + b + c) == 1000: - return a * b * c + return a * b * c def solution_fast() -> int: @@ -47,9 +45,8 @@ def solution_fast() -> int: 2. a**2 + b**2 = c**2 3. a + b + c = 1000 - # The code below has been commented due to slow execution affecting Travis. - # >>> solution_fast() - # 31875000 + >>> solution_fast() + 31875000 """ for a in range(300): From abc725f12de0ef186db67e2d6bc48161ed894644 Mon Sep 17 00:00:00 2001 From: Christian Clauss Date: Wed, 22 Sep 2021 19:37:18 +0200 Subject: [PATCH 0212/1543] mypy --install-types --non-interactive . (#4530) * mypy --install-types --non-interactive . @dhruvmanila Is this useful/needed given that we do not pin our dependencies? https://mypy-lang.blogspot.com/2021/06/mypy-0910-released.html * updating DIRECTORY.md Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- .github/workflows/build.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 2ffc2aa293b0..7c2255275091 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -21,7 +21,7 @@ jobs: run: | python -m pip install --upgrade pip setuptools six wheel python -m pip install mypy pytest-cov -r requirements.txt - - run: mypy . + - run: mypy --install-types --non-interactive . - name: Run tests run: pytest --doctest-modules --ignore=project_euler/ --ignore=scripts/ --cov-report=term-missing:skip-covered --cov=. . - if: ${{ success() }} From dc07a850763d8154e012c9d1be7f8fe78326e8fb Mon Sep 17 00:00:00 2001 From: Christian Clauss Date: Wed, 22 Sep 2021 20:03:11 +0200 Subject: [PATCH 0213/1543] Update and rename check_valid_ip_address.py to is_ip_v4_address_valid.py (#4665) * Update and rename check_valid_ip_address.py to is_ip_v4_address_valid.py New test cases that the algorithm must detect: * [ ] an octet much bigger than 255 * [ ] an octet is negative * [ ] number of octets is less than 4 * [ ] number of octets is greater than 4 * [ ] an octet is a letter * updating DIRECTORY.md * Add two more tests to is_ip_v4_address_valid.py Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> Co-authored-by: John Law --- DIRECTORY.md | 2 +- maths/check_valid_ip_address.py | 46 --------------------------- maths/is_ip_v4_address_valid.py | 56 +++++++++++++++++++++++++++++++++ 3 files changed, 57 insertions(+), 47 deletions(-) delete mode 100644 maths/check_valid_ip_address.py create mode 100644 maths/is_ip_v4_address_valid.py diff --git a/DIRECTORY.md b/DIRECTORY.md index 0c00d5ca7f70..0d44e10ad50a 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -424,7 +424,6 @@ * [Binomial Distribution](https://github.com/TheAlgorithms/Python/blob/master/maths/binomial_distribution.py) * [Bisection](https://github.com/TheAlgorithms/Python/blob/master/maths/bisection.py) * [Ceil](https://github.com/TheAlgorithms/Python/blob/master/maths/ceil.py) - * [Check Valid Ip Address](https://github.com/TheAlgorithms/Python/blob/master/maths/check_valid_ip_address.py) * [Chudnovsky Algorithm](https://github.com/TheAlgorithms/Python/blob/master/maths/chudnovsky_algorithm.py) * [Collatz Sequence](https://github.com/TheAlgorithms/Python/blob/master/maths/collatz_sequence.py) * [Combinations](https://github.com/TheAlgorithms/Python/blob/master/maths/combinations.py) @@ -454,6 +453,7 @@ * [Greedy Coin Change](https://github.com/TheAlgorithms/Python/blob/master/maths/greedy_coin_change.py) * [Hardy Ramanujanalgo](https://github.com/TheAlgorithms/Python/blob/master/maths/hardy_ramanujanalgo.py) * [Integration By Simpson Approx](https://github.com/TheAlgorithms/Python/blob/master/maths/integration_by_simpson_approx.py) + * [Is Ip V4 Address Valid](https://github.com/TheAlgorithms/Python/blob/master/maths/is_ip_v4_address_valid.py) * [Is Square Free](https://github.com/TheAlgorithms/Python/blob/master/maths/is_square_free.py) * [Jaccard Similarity](https://github.com/TheAlgorithms/Python/blob/master/maths/jaccard_similarity.py) * [Kadanes](https://github.com/TheAlgorithms/Python/blob/master/maths/kadanes.py) diff --git a/maths/check_valid_ip_address.py b/maths/check_valid_ip_address.py deleted file mode 100644 index 6e8d35ebc44c..000000000000 --- a/maths/check_valid_ip_address.py +++ /dev/null @@ -1,46 +0,0 @@ -""" -Checking valid Ip Address. -A valid IP address must be in the form of A.B.C.D, -where A,B,C and D are numbers from 0-254 -for example: 192.168.23.1, 172.254.254.254 are valid IP address - 192.168.255.0, 255.192.3.121 are Invalid IP address -""" - - -def check_valid_ip(ip: str) -> bool: - """ - print "Valid IP address" If IP is valid. - or - print "Invalid IP address" If IP is Invalid. - - >>> check_valid_ip("192.168.0.23") - True - - >>> check_valid_ip("192.255.15.8") - False - - >>> check_valid_ip("172.100.0.8") - True - - >>> check_valid_ip("254.255.0.255") - False - """ - ip1 = ip.replace(".", " ") - list1 = [int(i) for i in ip1.split() if i.isdigit()] - count = 0 - for i in list1: - if i > 254: - count += 1 - break - if count: - return False - return True - - -if __name__ == "__main__": - ip = input() - output = check_valid_ip(ip) - if output is True: - print(f"{ip} is a Valid IP address") - else: - print(f"{ip} is an Invalid IP address") diff --git a/maths/is_ip_v4_address_valid.py b/maths/is_ip_v4_address_valid.py new file mode 100644 index 000000000000..0ae8e021ead1 --- /dev/null +++ b/maths/is_ip_v4_address_valid.py @@ -0,0 +1,56 @@ +""" +Is IP v4 address valid? +A valid IP address must be four octets in the form of A.B.C.D, +where A,B,C and D are numbers from 0-254 +for example: 192.168.23.1, 172.254.254.254 are valid IP address + 192.168.255.0, 255.192.3.121 are invalid IP address +""" + + +def is_ip_v4_address_valid(ip_v4_address: str) -> bool: + """ + print "Valid IP address" If IP is valid. + or + print "Invalid IP address" If IP is invalid. + + >>> is_ip_v4_address_valid("192.168.0.23") + True + + >>> is_ip_v4_address_valid("192.255.15.8") + False + + >>> is_ip_v4_address_valid("172.100.0.8") + True + + >>> is_ip_v4_address_valid("254.255.0.255") + False + + >>> is_ip_v4_address_valid("1.2.33333333.4") + False + + >>> is_ip_v4_address_valid("1.2.-3.4") + False + + >>> is_ip_v4_address_valid("1.2.3") + False + + >>> is_ip_v4_address_valid("1.2.3.4.5") + False + + >>> is_ip_v4_address_valid("1.2.A.4") + False + + >>> is_ip_v4_address_valid("0.0.0.0") + True + + >>> is_ip_v4_address_valid("1.2.3.") + False + """ + octets = [int(i) for i in ip_v4_address.split(".") if i.isdigit()] + return len(octets) == 4 and all(0 <= int(octet) <= 254 for octet in octets) + + +if __name__ == "__main__": + ip = input().strip() + valid_or_invalid = "valid" if is_ip_v4_address_valid(ip) else "invalid" + print(f"{ip} is a {valid_or_invalid} IP v4 address.") From 15d1cfabb15903c5e9d4d103e2390876efb3f85f Mon Sep 17 00:00:00 2001 From: Christian Clauss Date: Wed, 22 Sep 2021 23:11:51 +0200 Subject: [PATCH 0214/1543] from __future__ import annotations (#4763) * from __future__ import annotations * updating DIRECTORY.md * from __future__ import annotations * from __future__ import annotations * Update xor_cipher.py Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- DIRECTORY.md | 1 + ciphers/a1z26.py | 1 + ciphers/enigma_machine2.py | 1 + ciphers/trafid_cipher.py | 1 + ciphers/xor_cipher.py | 27 +++++++-------------------- 5 files changed, 11 insertions(+), 20 deletions(-) diff --git a/DIRECTORY.md b/DIRECTORY.md index 0d44e10ad50a..2e9942b5bf93 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -448,6 +448,7 @@ * [Find Min Recursion](https://github.com/TheAlgorithms/Python/blob/master/maths/find_min_recursion.py) * [Floor](https://github.com/TheAlgorithms/Python/blob/master/maths/floor.py) * [Gamma](https://github.com/TheAlgorithms/Python/blob/master/maths/gamma.py) + * [Gamma Recursive](https://github.com/TheAlgorithms/Python/blob/master/maths/gamma_recursive.py) * [Gaussian](https://github.com/TheAlgorithms/Python/blob/master/maths/gaussian.py) * [Greatest Common Divisor](https://github.com/TheAlgorithms/Python/blob/master/maths/greatest_common_divisor.py) * [Greedy Coin Change](https://github.com/TheAlgorithms/Python/blob/master/maths/greedy_coin_change.py) diff --git a/ciphers/a1z26.py b/ciphers/a1z26.py index e6684fb1e6fc..0f0eb7c5c083 100644 --- a/ciphers/a1z26.py +++ b/ciphers/a1z26.py @@ -5,6 +5,7 @@ https://www.dcode.fr/letter-number-cipher http://bestcodes.weebly.com/a1z26.html """ +from __future__ import annotations def encode(plain: str) -> list[int]: diff --git a/ciphers/enigma_machine2.py b/ciphers/enigma_machine2.py index f4ce5a075f46..9252dd0edbf7 100644 --- a/ciphers/enigma_machine2.py +++ b/ciphers/enigma_machine2.py @@ -14,6 +14,7 @@ Created by TrapinchO """ +from __future__ import annotations RotorPositionT = tuple[int, int, int] RotorSelectionT = tuple[str, str, str] diff --git a/ciphers/trafid_cipher.py b/ciphers/trafid_cipher.py index 1c8ea3024d33..b12ceff72907 100644 --- a/ciphers/trafid_cipher.py +++ b/ciphers/trafid_cipher.py @@ -1,4 +1,5 @@ # https://en.wikipedia.org/wiki/Trifid_cipher +from __future__ import annotations def __encryptPart(messagePart: str, character2Number: dict[str, str]) -> str: diff --git a/ciphers/xor_cipher.py b/ciphers/xor_cipher.py index 12d580e720bc..ca9dfe20f7b6 100644 --- a/ciphers/xor_cipher.py +++ b/ciphers/xor_cipher.py @@ -16,6 +16,7 @@ - encrypt_file : boolean - decrypt_file : boolean """ +from __future__ import annotations class XORCipher: @@ -41,17 +42,10 @@ def encrypt(self, content: str, key: int) -> list[str]: key = key or self.__key or 1 - # make sure key can be any size - while key > 255: - key -= 255 - - # This will be returned - ans = [] - - for ch in content: - ans.append(chr(ord(ch) ^ key)) + # make sure key is an appropriate size + key %= 255 - return ans + return [chr(ord(ch) ^ key) for ch in content] def decrypt(self, content: str, key: int) -> list[str]: """ @@ -66,17 +60,10 @@ def decrypt(self, content: str, key: int) -> list[str]: key = key or self.__key or 1 - # make sure key can be any size - while key > 255: - key -= 255 - - # This will be returned - ans = [] - - for ch in content: - ans.append(chr(ord(ch) ^ key)) + # make sure key is an appropriate size + key %= 255 - return ans + return [chr(ord(ch) ^ key) for ch in content] def encrypt_string(self, content: str, key: int = 0) -> str: """ From 66a528b171b433a9cb298fba395180445fe1f3e1 Mon Sep 17 00:00:00 2001 From: Maxim Smolskiy Date: Thu, 23 Sep 2021 21:55:18 +0300 Subject: [PATCH 0215/1543] Improve Project Euler problem 014 solution 2 (#4752) --- project_euler/problem_014/sol2.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/project_euler/problem_014/sol2.py b/project_euler/problem_014/sol2.py index 7ed68273bcd7..0a58f8d9a05a 100644 --- a/project_euler/problem_014/sol2.py +++ b/project_euler/problem_014/sol2.py @@ -28,16 +28,16 @@ from __future__ import annotations -def collatz_sequence(n: int) -> list[int]: - """Returns the Collatz sequence for n.""" - sequence = [n] +def collatz_sequence_length(n: int) -> int: + """Returns the Collatz sequence length for n.""" + sequence_length = 1 while n != 1: if n % 2 == 0: n //= 2 else: n = 3 * n + 1 - sequence.append(n) - return sequence + sequence_length += 1 + return sequence_length def solution(n: int = 1000000) -> int: @@ -54,7 +54,7 @@ def solution(n: int = 1000000) -> int: 13255 """ - result = max((len(collatz_sequence(i)), i) for i in range(1, n)) + result = max((collatz_sequence_length(i), i) for i in range(1, n)) return result[1] From 5d02103b273dffb2637264c0fc2136ca4fd41b57 Mon Sep 17 00:00:00 2001 From: Jogendra Singh <58473917+Joe-Sin7h@users.noreply.github.com> Date: Fri, 24 Sep 2021 16:24:38 +0530 Subject: [PATCH 0216/1543] Fixed #4764 (#4779) * Fixed #4764 * Fixes #4764 --- data_structures/disjoint_set/disjoint_set.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/data_structures/disjoint_set/disjoint_set.py b/data_structures/disjoint_set/disjoint_set.py index a93b89621c4a..bf5ab415d5e4 100644 --- a/data_structures/disjoint_set/disjoint_set.py +++ b/data_structures/disjoint_set/disjoint_set.py @@ -26,7 +26,10 @@ def union_set(x, y): disjoint set tree will be more flat. """ x, y = find_set(x), find_set(y) - if x.rank > y.rank: + if x == y: + return + + elif x.rank > y.rank: y.parent = x else: x.parent = y From 02bc4bf4171497277354c01387c96e044f2dedfe Mon Sep 17 00:00:00 2001 From: Alexandre De Zotti Date: Wed, 29 Sep 2021 06:42:11 +0100 Subject: [PATCH 0217/1543] Add Julia sets to fractals (#4382) * Added Julia sets drawing * Forgot the .py extension * Update julia_sets.py Added online sources for comparison. Added more examples of fractal Julia sets. Added all type hints. Only show one picture Silented RuntuleWarning's (there's no way of avoiding them and they're not an issue per se) * Added doctest example for "show_results" * Filtering Nan's and infinites * added 1 missing type hint * in iterate_function, convert to dtype=complex64 * RuntimeWarning (fine) filtering * Type hint, test for ignore_warnings function, typo in header * Update julia_sets.py Type of expected output value for iterate function int array -> complex array (throws an error on test) * Update julia_sets.py - More accurate type for tests cases in eval_quadratic_polynomial and iterate_function - added more characters for variables c & z in eval_quadratic_polynomial and eval_exponential to silent bot warnings * Function def formatting Blocked by black * Update julia_sets.py * Update fractals/julia_sets.py Co-authored-by: John Law * Update fractals/julia_sets.py Co-authored-by: John Law * Update fractals/julia_sets.py Co-authored-by: John Law * Update fractals/julia_sets.py Co-authored-by: John Law * Update fractals/julia_sets.py Co-authored-by: John Law * Update fractals/julia_sets.py Co-authored-by: John Law * Update fractals/julia_sets.py Co-authored-by: John Law * added more doctests for eval_exponential * Update fractals/julia_sets.py Co-authored-by: John Law Co-authored-by: John Law --- fractals/julia_sets.py | 219 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 219 insertions(+) create mode 100644 fractals/julia_sets.py diff --git a/fractals/julia_sets.py b/fractals/julia_sets.py new file mode 100644 index 000000000000..0168a0153de1 --- /dev/null +++ b/fractals/julia_sets.py @@ -0,0 +1,219 @@ +"""Author Alexandre De Zotti + +Draws Julia sets of quadratic polynomials and exponential maps. + More specifically, this iterates the function a fixed number of times + then plots whether the absolute value of the last iterate is greater than + a fixed threshold (named "escape radius"). For the exponential map this is not + really an escape radius but rather a convenient way to approximate the Julia + set with bounded orbits. + +The examples presented here are: +- The Cauliflower Julia set, see e.g. +https://en.wikipedia.org/wiki/File:Julia_z2%2B0,25.png +- Other examples from https://en.wikipedia.org/wiki/Julia_set +- An exponential map Julia set, ambiantly homeomorphic to the examples in +http://www.math.univ-toulouse.fr/~cheritat/GalII/galery.html + and +https://ddd.uab.cat/pub/pubmat/02141493v43n1/02141493v43n1p27.pdf + +Remark: Some overflow runtime warnings are suppressed. This is because of the + way the iteration loop is implemented, using numpy's efficient computations. + Overflows and infinites are replaced after each step by a large number. +""" + +import warnings +from typing import Any, Callable + +import numpy +from matplotlib import pyplot + +c_cauliflower = 0.25 + 0.0j +c_polynomial_1 = -0.4 + 0.6j +c_polynomial_2 = -0.1 + 0.651j +c_exponential = -2.0 +nb_iterations = 56 +window_size = 2.0 +nb_pixels = 666 + + +def eval_exponential(c_parameter: complex, z_values: numpy.ndarray) -> numpy.ndarray: + """ + Evaluate $e^z + c$. + >>> eval_exponential(0, 0) + 1.0 + >>> abs(eval_exponential(1, numpy.pi*1.j)) < 1e-15 + True + >>> abs(eval_exponential(1.j, 0)-1-1.j) < 1e-15 + True + """ + return numpy.exp(z_values) + c_parameter + + +def eval_quadratic_polynomial( + c_parameter: complex, z_values: numpy.ndarray +) -> numpy.ndarray: + """ + >>> eval_quadratic_polynomial(0, 2) + 4 + >>> eval_quadratic_polynomial(-1, 1) + 0 + >>> round(eval_quadratic_polynomial(1.j, 0).imag) + 1 + >>> round(eval_quadratic_polynomial(1.j, 0).real) + 0 + """ + return z_values * z_values + c_parameter + + +def prepare_grid(window_size: float, nb_pixels: int) -> numpy.ndarray: + """ + Create a grid of complex values of size nb_pixels*nb_pixels with real and + imaginary parts ranging from -window_size to window_size (inclusive). + Returns a numpy array. + + >>> prepare_grid(1,3) + array([[-1.-1.j, -1.+0.j, -1.+1.j], + [ 0.-1.j, 0.+0.j, 0.+1.j], + [ 1.-1.j, 1.+0.j, 1.+1.j]]) + """ + x = numpy.linspace(-window_size, window_size, nb_pixels) + x = x.reshape((nb_pixels, 1)) + y = numpy.linspace(-window_size, window_size, nb_pixels) + y = y.reshape((1, nb_pixels)) + return x + 1.0j * y + + +def iterate_function( + eval_function: Callable[[Any, numpy.ndarray], numpy.ndarray], + function_params: Any, + nb_iterations: int, + z_0: numpy.ndarray, + infinity: float = None, +) -> numpy.ndarray: + """ + Iterate the function "eval_function" exactly nb_iterations times. + The first argument of the function is a parameter which is contained in + function_params. The variable z_0 is an array that contains the initial + values to iterate from. + This function returns the final iterates. + + >>> iterate_function(eval_quadratic_polynomial, 0, 3, numpy.array([0,1,2])).shape + (3,) + >>> numpy.round(iterate_function(eval_quadratic_polynomial, + ... 0, + ... 3, + ... numpy.array([0,1,2]))[0]) + 0j + >>> numpy.round(iterate_function(eval_quadratic_polynomial, + ... 0, + ... 3, + ... numpy.array([0,1,2]))[1]) + (1+0j) + >>> numpy.round(iterate_function(eval_quadratic_polynomial, + ... 0, + ... 3, + ... numpy.array([0,1,2]))[2]) + (256+0j) + """ + + z_n = z_0.astype("complex64") + for i in range(nb_iterations): + z_n = eval_function(function_params, z_n) + if infinity is not None: + numpy.nan_to_num(z_n, copy=False, nan=infinity) + z_n[abs(z_n) == numpy.inf] = infinity + return z_n + + +def show_results( + function_label: str, + function_params: Any, + escape_radius: float, + z_final: numpy.ndarray, +) -> None: + """ + Plots of whether the absolute value of z_final is greater than + the value of escape_radius. Adds the function_label and function_params to + the title. + + >>> show_results('80', 0, 1, numpy.array([[0,1,.5],[.4,2,1.1],[.2,1,1.3]])) + """ + + abs_z_final = (abs(z_final)).transpose() + abs_z_final[:, :] = abs_z_final[::-1, :] + pyplot.matshow(abs_z_final < escape_radius) + pyplot.title(f"Julia set of ${function_label}$, $c={function_params}$") + pyplot.show() + + +def ignore_overflow_warnings() -> None: + """ + Ignore some overflow and invalid value warnings. + + >>> ignore_overflow_warnings() + """ + warnings.filterwarnings( + "ignore", category=RuntimeWarning, message="overflow encountered in multiply" + ) + warnings.filterwarnings( + "ignore", + category=RuntimeWarning, + message="invalid value encountered in multiply", + ) + warnings.filterwarnings( + "ignore", category=RuntimeWarning, message="overflow encountered in absolute" + ) + warnings.filterwarnings( + "ignore", category=RuntimeWarning, message="overflow encountered in exp" + ) + + +if __name__ == "__main__": + + z_0 = prepare_grid(window_size, nb_pixels) + + ignore_overflow_warnings() # See file header for explanations + + nb_iterations = 24 + escape_radius = 2 * abs(c_cauliflower) + 1 + z_final = iterate_function( + eval_quadratic_polynomial, + c_cauliflower, + nb_iterations, + z_0, + infinity=1.1 * escape_radius, + ) + show_results("z^2+c", c_cauliflower, escape_radius, z_final) + + nb_iterations = 64 + escape_radius = 2 * abs(c_polynomial_1) + 1 + z_final = iterate_function( + eval_quadratic_polynomial, + c_polynomial_1, + nb_iterations, + z_0, + infinity=1.1 * escape_radius, + ) + show_results("z^2+c", c_polynomial_1, escape_radius, z_final) + + nb_iterations = 161 + escape_radius = 2 * abs(c_polynomial_2) + 1 + z_final = iterate_function( + eval_quadratic_polynomial, + c_polynomial_2, + nb_iterations, + z_0, + infinity=1.1 * escape_radius, + ) + show_results("z^2+c", c_polynomial_2, escape_radius, z_final) + + nb_iterations = 12 + escape_radius = 10000.0 + z_final = iterate_function( + eval_exponential, + c_exponential, + nb_iterations, + z_0 + 2, + infinity=1.0e10, + ) + show_results("e^z+c", c_exponential, escape_radius, z_final) From b9f18152b74e7a1b0b60c6c1781580e6228f4ba4 Mon Sep 17 00:00:00 2001 From: "Arghya Sarkar (ASRA)" <67339217+sarkarghya@users.noreply.github.com> Date: Wed, 29 Sep 2021 22:19:42 +0530 Subject: [PATCH 0218/1543] Create check_polygon.py (#4605) * Create check_polygon.py * Update check_polygon.py * Update maths/check_polygon.py * Update check_polygon.py * Update check_polygon.py Co-authored-by: John Law --- maths/check_polygon.py | 31 +++++++++++++++++++++++++++++++ 1 file changed, 31 insertions(+) create mode 100644 maths/check_polygon.py diff --git a/maths/check_polygon.py b/maths/check_polygon.py new file mode 100644 index 000000000000..0e771197331f --- /dev/null +++ b/maths/check_polygon.py @@ -0,0 +1,31 @@ +from typing import List + + +def check_polygon(nums: List) -> bool: + """ + Takes list of possible side lengths and determines whether a + two-dimensional polygon with such side lengths can exist. + + Returns a boolean value for the < comparison + of the largest side length with sum of the rest. + Wiki: https://en.wikipedia.org/wiki/Triangle_inequality + + >>> check_polygon([6, 10, 5]) + True + >>> check_polygon([3, 7, 13, 2]) + False + >>> check_polygon([]) + Traceback (most recent call last): + ... + ValueError: List is invalid + """ + if not nums: + raise ValueError("List is invalid") + nums.sort() + return nums.pop() < sum(nums) + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From d1e70cfa3a56914f03a1be7d92447197b9a6a5dc Mon Sep 17 00:00:00 2001 From: ss1208 <87578327+ss1208@users.noreply.github.com> Date: Wed, 29 Sep 2021 23:34:35 +0530 Subject: [PATCH 0219/1543] docs: renovate README (#4620) Conjunctive adverbs should be followed by a comma. For more details, kindly refer: https://www.aje.com/arc/editing-tip-commas-conjunctive-adverbs/ Separate out the labels into two rows Co-authored-by: John Law Co-authored-by: Dhruv Manilawala --- README.md | 77 +++++++++++++++++++++++++++++++++++++------------------ 1 file changed, 52 insertions(+), 25 deletions(-) diff --git a/README.md b/README.md index 1e85ed0daa7c..0298d46020ac 100644 --- a/README.md +++ b/README.md @@ -1,28 +1,55 @@ -# The Algorithms - Python -[![Gitpod Ready-to-Code](https://img.shields.io/badge/Gitpod-Ready--to--Code-blue?logo=gitpod&style=flat-square)](https://gitpod.io/#https://github.com/TheAlgorithms/Python)  -[![Discord chat](https://img.shields.io/discord/808045925556682782.svg?logo=discord&colorB=7289DA&style=flat-square)](https://discord.gg/c7MnfGFGa6)  -[![Gitter chat](https://img.shields.io/badge/Chat-Gitter-ff69b4.svg?label=Chat&logo=gitter&style=flat-square)](https://gitter.im/TheAlgorithms)  -[![GitHub Workflow Status](https://img.shields.io/github/workflow/status/TheAlgorithms/Python/build?label=CI&logo=github&style=flat-square)](https://github.com/TheAlgorithms/Python/actions)  -[![LGTM](https://img.shields.io/lgtm/alerts/github/TheAlgorithms/Python.svg?label=LGTM&logo=LGTM&style=flat-square)](https://lgtm.com/projects/g/TheAlgorithms/Python/alerts)  -[![contributions welcome](https://img.shields.io/static/v1.svg?label=Contributions&message=Welcome&color=0059b3&style=flat-square)](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md)  -[![Donate](https://img.shields.io/badge/Donate-PayPal-green.svg?logo=paypal&style=flat-square)](https://www.paypal.me/TheAlgorithms/100)  -![](https://img.shields.io/github/repo-size/TheAlgorithms/Python.svg?label=Repo%20size&style=flat-square)  -[![pre-commit](https://img.shields.io/badge/pre--commit-enabled-brightgreen?logo=pre-commit&logoColor=white&style=flat-square)](https://github.com/pre-commit/pre-commit)  -[![code style: black](https://img.shields.io/static/v1?label=code%20style&message=black&color=black&style=flat-square)](https://github.com/psf/black)  - - -### All algorithms implemented in Python (for education) - -These implementations are for learning purposes only. Therefore they may be less efficient than the implementations in the Python standard library. - -## Contribution Guidelines - -Read our [Contribution Guidelines](CONTRIBUTING.md) before you contribute. - -## Community Channel - -We're on [Gitter](https://gitter.im/TheAlgorithms)! Please join us. +
+ + + + +

The Algorithms - Python

+ + + + Gitpod Ready-to-Code + + + Contributions Welcome + + + Donate + + + + Discord chat + + + Gitter chat + + +
+ + GitHub Workflow Status + + + LGTM + + + pre-commit + + + code style: black + + +

All algorithms implemented in Python - for education

+
+ +Implementations are for learning purposes only. As they may be less efficient than the implementations in the Python standard library, use them at your discretion. + +## Getting Started + +Read through our [Contribution Guidelines](CONTRIBUTING.md) before you contribute. + +## Community Channels + +We're on [Discord](https://discord.gg/c7MnfGFGa6) and [Gitter](https://gitter.im/TheAlgorithms)! Community channels are great for you to ask questions and get help. Please join us! ## List of Algorithms -See our [directory](DIRECTORY.md). +See our [directory](DIRECTORY.md) for easier navigation and better overview of the project. From 6341f351aab0ff510fcf1d9ce135be680763a971 Mon Sep 17 00:00:00 2001 From: DukicDev Date: Fri, 1 Oct 2021 23:48:47 +0200 Subject: [PATCH 0220/1543] Fix comments in backtracking/coloring.py (#4857) --- backtracking/coloring.py | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/backtracking/coloring.py b/backtracking/coloring.py index 8bda4b5871df..9d539de8a3c4 100644 --- a/backtracking/coloring.py +++ b/backtracking/coloring.py @@ -1,7 +1,7 @@ """ Graph Coloring also called "m coloring problem" - consists of coloring given graph with at most m colors - such that no adjacent vertices are assigned same color + consists of coloring a given graph with at most m colors + such that no adjacent vertices are assigned the same color Wikipedia: https://en.wikipedia.org/wiki/Graph_coloring """ @@ -11,9 +11,9 @@ def valid_coloring( neighbours: list[int], colored_vertices: list[int], color: int ) -> bool: """ - For each neighbour check if coloring constraint is satisfied + For each neighbour check if the coloring constraint is satisfied If any of the neighbours fail the constraint return False - If all neighbours validate constraint return True + If all neighbours validate the constraint return True >>> neighbours = [0,1,0,1,0] >>> colored_vertices = [0, 2, 1, 2, 0] @@ -41,14 +41,14 @@ def util_color( Base Case: 1. Check if coloring is complete - 1.1 If complete return True (meaning that we successfully colored graph) + 1.1 If complete return True (meaning that we successfully colored the graph) Recursive Step: - 2. Itterates over each color: - Check if current coloring is valid: + 2. Iterates over each color: + Check if the current coloring is valid: 2.1. Color given vertex - 2.2. Do recursive call check if this coloring leads to solving problem - 2.4. if current coloring leads to solution return + 2.2. Do recursive call, check if this coloring leads to a solution + 2.4. if current coloring leads to a solution return 2.5. Uncolor given vertex >>> graph = [[0, 1, 0, 0, 0], From 31b34af9fa7e09b4832af36071be86df37959e0d Mon Sep 17 00:00:00 2001 From: DukicDev Date: Sat, 2 Oct 2021 15:37:28 +0200 Subject: [PATCH 0221/1543] Correct grammar in backtracking/n_queens_math.py (#4869) --- backtracking/n_queens_math.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/backtracking/n_queens_math.py b/backtracking/n_queens_math.py index c12aa6c3387d..2de784ded06b 100644 --- a/backtracking/n_queens_math.py +++ b/backtracking/n_queens_math.py @@ -1,7 +1,7 @@ r""" Problem: -The n queens problem is of placing N queens on a N * N chess board such that no queen +The n queens problem is: placing N queens on a N * N chess board such that no queen can attack any other queens placed on that chess board. This means that one queen cannot have any other queen on its horizontal, vertical and diagonal lines. @@ -31,7 +31,7 @@ other we know that at least the queens can't attack each other in horizontal and vertical. -At this point we have that halfway completed and we will treat the chessboard as a +At this point we have it halfway completed and we will treat the chessboard as a Cartesian plane. Hereinafter we are going to remember basic math, so in the school we learned this formula: @@ -47,7 +47,7 @@ See:: https://www.enotes.com/homework-help/write-equation-line-that-hits-origin-45-degree-1474860 -Then we have this another formula: +Then we have this other formula: Slope intercept: @@ -59,7 +59,7 @@ y - mx = b -And like we already have the m values for the angles 45º and 135º, this formula would +And since we already have the m values for the angles 45º and 135º, this formula would look like this: 45º: y - (1)x = b @@ -71,7 +71,7 @@ y = row x = column -Applying this two formulas we can check if a queen in some position is being attacked +Applying these two formulas we can check if a queen in some position is being attacked for another one or vice versa. """ From c873fa0b1bab5dcfeffb386dddd6a755ef2aa0f9 Mon Sep 17 00:00:00 2001 From: DukicDev Date: Sat, 2 Oct 2021 15:51:53 +0200 Subject: [PATCH 0222/1543] Correct grammar of comment in backtracking/hamiltonian_cycle.py (#4868) --- backtracking/hamiltonian_cycle.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/backtracking/hamiltonian_cycle.py b/backtracking/hamiltonian_cycle.py index 19751b347320..500e993e5c8b 100644 --- a/backtracking/hamiltonian_cycle.py +++ b/backtracking/hamiltonian_cycle.py @@ -15,8 +15,8 @@ def valid_connection( Checks whether it is possible to add next into path by validating 2 statements 1. There should be path between current and next vertex 2. Next vertex should not be in path - If both validations succeeds we return True saying that it is possible to connect - this vertices either we return False + If both validations succeed we return True, saying that it is possible to connect + this vertices, otherwise we return False Case 1:Use exact graph as in main function, with initialized values >>> graph = [[0, 1, 0, 1, 0], From d530d2bcf42391a8172c720e8d7c7d354a748abf Mon Sep 17 00:00:00 2001 From: Nolan Emirot Date: Sun, 3 Oct 2021 20:33:42 -0700 Subject: [PATCH 0223/1543] fix: comment in patience sort (#4972) --- sorts/patience_sort.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sorts/patience_sort.py b/sorts/patience_sort.py index 845db517420b..63c2c8ffe99c 100644 --- a/sorts/patience_sort.py +++ b/sorts/patience_sort.py @@ -29,7 +29,7 @@ def __eq__(self, other): def patience_sort(collection: list) -> list: - """A pure implementation of quick sort algorithm in Python + """A pure implementation of patience sort algorithm in Python :param collection: some mutable ordered collection with heterogeneous comparable items inside From 90db98304e06a60a3c578e9f55fe139524a4c3f8 Mon Sep 17 00:00:00 2001 From: Sarvesh Kumar Dwivedi Date: Mon, 4 Oct 2021 09:37:58 +0530 Subject: [PATCH 0224/1543] Fix word typos in comments (#4928) * fixed: spelling nonegative -> non-negative * fixed: spelling transpostiion -> transposition * fixed: spelling topolical -> topological * fixed: spelling sufix -> suffix --- sorts/bead_sort.py | 2 +- sorts/odd_even_transposition_single_threaded.py | 2 +- sorts/topological_sort.py | 2 +- strings/prefix_function.py | 4 ++-- 4 files changed, 5 insertions(+), 5 deletions(-) diff --git a/sorts/bead_sort.py b/sorts/bead_sort.py index 3767e842d8c2..26a3fabc4807 100644 --- a/sorts/bead_sort.py +++ b/sorts/bead_sort.py @@ -1,5 +1,5 @@ """ -Bead sort only works for sequences of nonegative integers. +Bead sort only works for sequences of non-negative integers. https://en.wikipedia.org/wiki/Bead_sort """ diff --git a/sorts/odd_even_transposition_single_threaded.py b/sorts/odd_even_transposition_single_threaded.py index fe06459e8dd1..f6cf7fba2a71 100644 --- a/sorts/odd_even_transposition_single_threaded.py +++ b/sorts/odd_even_transposition_single_threaded.py @@ -1,7 +1,7 @@ """ Source: https://en.wikipedia.org/wiki/Odd%E2%80%93even_sort -This is a non-parallelized implementation of odd-even transpostiion sort. +This is a non-parallelized implementation of odd-even transposition sort. Normally the swaps in each set happen simultaneously, without that the algorithm is no better than bubble sort. diff --git a/sorts/topological_sort.py b/sorts/topological_sort.py index e7a52f7c7714..59a0c8571b53 100644 --- a/sorts/topological_sort.py +++ b/sorts/topological_sort.py @@ -10,7 +10,7 @@ def topological_sort(start, visited, sort): - """Perform topolical sort on a directed acyclic graph.""" + """Perform topological sort on a directed acyclic graph.""" current = start # add current to visited visited.append(current) diff --git a/strings/prefix_function.py b/strings/prefix_function.py index 9e6dbbf5408f..6eca01635fe3 100644 --- a/strings/prefix_function.py +++ b/strings/prefix_function.py @@ -14,7 +14,7 @@ def prefix_function(input_string: str) -> list: """ For the given string this function computes value for each index(i), - which represents the longest coincidence of prefix and sufix + which represents the longest coincidence of prefix and suffix for given substring (input_str[0...i]) For the value of the first element the algorithm always returns 0 @@ -45,7 +45,7 @@ def prefix_function(input_string: str) -> list: def longest_prefix(input_str: str) -> int: """ Prefix-function use case - Finding longest prefix which is sufix as well + Finding longest prefix which is suffix as well >>> longest_prefix("aabcdaabc") 4 From a4d68d69f17f08f71ec81e5c5b681e1213ec8f7d Mon Sep 17 00:00:00 2001 From: Lewis Tian Date: Wed, 6 Oct 2021 22:06:49 +0800 Subject: [PATCH 0225/1543] bugfix: Add abs_max.py & abs_min.py empty list detection (#4844) * bugfix: Add abs_max.py & abs_min.py empty list detection * fix shebangs check --- maths/abs_max.py | 17 ++++++++++++++++- maths/abs_min.py | 19 +++++++++++++++---- 2 files changed, 31 insertions(+), 5 deletions(-) diff --git a/maths/abs_max.py b/maths/abs_max.py index e5a8219657ac..4a4b4d9ebca3 100644 --- a/maths/abs_max.py +++ b/maths/abs_max.py @@ -7,7 +7,13 @@ def abs_max(x: list[int]) -> int: 11 >>> abs_max([3,-10,-2]) -10 + >>> abs_max([]) + Traceback (most recent call last): + ... + ValueError: abs_max() arg is an empty sequence """ + if len(x) == 0: + raise ValueError("abs_max() arg is an empty sequence") j = x[0] for i in x: if abs(i) > abs(j): @@ -15,13 +21,19 @@ def abs_max(x: list[int]) -> int: return j -def abs_max_sort(x): +def abs_max_sort(x: list[int]) -> int: """ >>> abs_max_sort([0,5,1,11]) 11 >>> abs_max_sort([3,-10,-2]) -10 + >>> abs_max_sort([]) + Traceback (most recent call last): + ... + ValueError: abs_max_sort() arg is an empty sequence """ + if len(x) == 0: + raise ValueError("abs_max_sort() arg is an empty sequence") return sorted(x, key=abs)[-1] @@ -32,4 +44,7 @@ def main(): if __name__ == "__main__": + import doctest + + doctest.testmod(verbose=True) main() diff --git a/maths/abs_min.py b/maths/abs_min.py index eb84de37ce23..00dbcb025cfb 100644 --- a/maths/abs_min.py +++ b/maths/abs_min.py @@ -1,13 +1,21 @@ +from __future__ import annotations + from .abs import abs_val -def absMin(x): +def abs_min(x: list[int]) -> int: """ - >>> absMin([0,5,1,11]) + >>> abs_min([0,5,1,11]) 0 - >>> absMin([3,-10,-2]) + >>> abs_min([3,-10,-2]) -2 + >>> abs_min([]) + Traceback (most recent call last): + ... + ValueError: abs_min() arg is an empty sequence """ + if len(x) == 0: + raise ValueError("abs_min() arg is an empty sequence") j = x[0] for i in x: if abs_val(i) < abs_val(j): @@ -17,8 +25,11 @@ def absMin(x): def main(): a = [-3, -1, 2, -11] - print(absMin(a)) # = -1 + print(abs_min(a)) # = -1 if __name__ == "__main__": + import doctest + + doctest.testmod(verbose=True) main() From 629369a34fdcaee70392d1fcd8cbcae5418c350b Mon Sep 17 00:00:00 2001 From: Maxim Smolskiy Date: Wed, 6 Oct 2021 17:11:15 +0300 Subject: [PATCH 0226/1543] Improve Project Euler problem 203 solution 1 (#4807) --- project_euler/problem_203/sol1.py | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/project_euler/problem_203/sol1.py b/project_euler/problem_203/sol1.py index 030cf12f2a85..fe4d14b20c92 100644 --- a/project_euler/problem_203/sol1.py +++ b/project_euler/problem_203/sol1.py @@ -75,17 +75,15 @@ def get_primes_squared(max_number: int) -> list[int]: >>> get_primes_squared(100) [4, 9, 25, 49] """ - max_prime = round(math.sqrt(max_number)) - non_primes = set() + max_prime = math.isqrt(max_number) + non_primes = [False] * (max_prime + 1) primes = [] for num in range(2, max_prime + 1): - if num in non_primes: + if non_primes[num]: continue - counter = 2 - while num * counter <= max_prime: - non_primes.add(num * counter) - counter += 1 + for num_counter in range(num ** 2, max_prime + 1, num): + non_primes[num_counter] = True primes.append(num ** 2) return primes From d654806eae5dc6027911424ea828e566a64641fd Mon Sep 17 00:00:00 2001 From: Maxim Smolskiy Date: Wed, 6 Oct 2021 17:11:50 +0300 Subject: [PATCH 0227/1543] Improve Project Euler problem 112 solution 1 (#4808) --- project_euler/problem_112/sol1.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/project_euler/problem_112/sol1.py b/project_euler/problem_112/sol1.py index d8cb334c9508..b3ea6b35654a 100644 --- a/project_euler/problem_112/sol1.py +++ b/project_euler/problem_112/sol1.py @@ -47,7 +47,9 @@ def check_bouncy(n: int) -> bool: """ if not isinstance(n, int): raise ValueError("check_bouncy() accepts only integer arguments") - return "".join(sorted(str(n))) != str(n) and "".join(sorted(str(n)))[::-1] != str(n) + str_n = str(n) + sorted_str_n = "".join(sorted(str_n)) + return sorted_str_n != str_n and sorted_str_n[::-1] != str_n def solution(percent: float = 99) -> int: From d324f91fe75cc859335ee1f7c9c6307d958d0558 Mon Sep 17 00:00:00 2001 From: Parth Satodiya Date: Thu, 7 Oct 2021 20:48:23 +0530 Subject: [PATCH 0228/1543] Fix mypy errors for data_structures->linked_list directory files (#4927) --- data_structures/linked_list/__init__.py | 11 +++++++---- .../linked_list/circular_linked_list.py | 14 +++++++------- data_structures/linked_list/has_loop.py | 6 +++--- .../linked_list/middle_element_of_linked_list.py | 8 ++++++-- data_structures/linked_list/skip_list.py | 6 +++--- 5 files changed, 26 insertions(+), 19 deletions(-) diff --git a/data_structures/linked_list/__init__.py b/data_structures/linked_list/__init__.py index a5f5537b1d96..8ae171d71035 100644 --- a/data_structures/linked_list/__init__.py +++ b/data_structures/linked_list/__init__.py @@ -6,7 +6,7 @@ - Last node: points to null """ -from typing import Any +from typing import Any, Optional class Node: @@ -17,7 +17,7 @@ def __init__(self, item: Any, next: Any) -> None: class LinkedList: def __init__(self) -> None: - self.head = None + self.head: Optional[Node] = None self.size = 0 def add(self, item: Any) -> None: @@ -25,7 +25,10 @@ def add(self, item: Any) -> None: self.size += 1 def remove(self) -> Any: - if self.is_empty(): + # Switched 'self.is_empty()' to 'self.head is None' + # because mypy was considering the possibility that 'self.head' + # can be None in below else part and giving error + if self.head is None: return None else: item = self.head.item @@ -50,7 +53,7 @@ def __str__(self) -> str: else: iterate = self.head item_str = "" - item_list = [] + item_list: list[str] = [] while iterate: item_list.append(str(iterate.item)) iterate = iterate.next diff --git a/data_structures/linked_list/circular_linked_list.py b/data_structures/linked_list/circular_linked_list.py index f67c1e8f2cf7..42794ba793a7 100644 --- a/data_structures/linked_list/circular_linked_list.py +++ b/data_structures/linked_list/circular_linked_list.py @@ -1,10 +1,10 @@ -from typing import Any +from typing import Any, Iterator, Optional class Node: def __init__(self, data: Any): - self.data = data - self.next = None + self.data: Any = data + self.next: Optional[Node] = None class CircularLinkedList: @@ -12,7 +12,7 @@ def __init__(self): self.head = None self.tail = None - def __iter__(self): + def __iter__(self) -> Iterator[Any]: node = self.head while self.head: yield node.data @@ -54,10 +54,10 @@ def insert_nth(self, index: int, data: Any) -> None: def delete_front(self): return self.delete_nth(0) - def delete_tail(self) -> None: + def delete_tail(self) -> Any: return self.delete_nth(len(self) - 1) - def delete_nth(self, index: int = 0): + def delete_nth(self, index: int = 0) -> Any: if not 0 <= index < len(self): raise IndexError("list index out of range.") delete_node = self.head @@ -76,7 +76,7 @@ def delete_nth(self, index: int = 0): self.tail = temp return delete_node.data - def is_empty(self): + def is_empty(self) -> bool: return len(self) == 0 diff --git a/data_structures/linked_list/has_loop.py b/data_structures/linked_list/has_loop.py index 405ece7e27c8..a155ab4c7c89 100644 --- a/data_structures/linked_list/has_loop.py +++ b/data_structures/linked_list/has_loop.py @@ -1,4 +1,4 @@ -from typing import Any +from typing import Any, Optional class ContainsLoopError(Exception): @@ -7,8 +7,8 @@ class ContainsLoopError(Exception): class Node: def __init__(self, data: Any) -> None: - self.data = data - self.next_node = None + self.data: Any = data + self.next_node: Optional[Node] = None def __iter__(self): node = self diff --git a/data_structures/linked_list/middle_element_of_linked_list.py b/data_structures/linked_list/middle_element_of_linked_list.py index 185c4ccbbb0a..296696897715 100644 --- a/data_structures/linked_list/middle_element_of_linked_list.py +++ b/data_structures/linked_list/middle_element_of_linked_list.py @@ -1,5 +1,8 @@ +from typing import Optional + + class Node: - def __init__(self, data: int) -> int: + def __init__(self, data: int) -> None: self.data = data self.next = None @@ -14,7 +17,7 @@ def push(self, new_data: int) -> int: self.head = new_node return self.head.data - def middle_element(self) -> int: + def middle_element(self) -> Optional[int]: """ >>> link = LinkedList() >>> link.middle_element() @@ -54,6 +57,7 @@ def middle_element(self) -> int: return slow_pointer.data else: print("No element found.") + return None if __name__ == "__main__": diff --git a/data_structures/linked_list/skip_list.py b/data_structures/linked_list/skip_list.py index ee0b4460730c..be30592ec77d 100644 --- a/data_structures/linked_list/skip_list.py +++ b/data_structures/linked_list/skip_list.py @@ -5,14 +5,14 @@ from __future__ import annotations from random import random -from typing import Generic, TypeVar +from typing import Generic, Optional, TypeVar, Union KT = TypeVar("KT") VT = TypeVar("VT") class Node(Generic[KT, VT]): - def __init__(self, key: KT, value: VT): + def __init__(self, key: Union[KT, str] = "root", value: Optional[VT] = None): self.key = key self.value = value self.forward: list[Node[KT, VT]] = [] @@ -49,7 +49,7 @@ def level(self) -> int: class SkipList(Generic[KT, VT]): def __init__(self, p: float = 0.5, max_level: int = 16): - self.head = Node("root", None) + self.head: Node[KT, VT] = Node[KT, VT]() self.level = 0 self.p = p self.max_level = max_level From 77b243e62b09cdf6201916af6762b03c54d8f77a Mon Sep 17 00:00:00 2001 From: Lewis Tian Date: Thu, 7 Oct 2021 23:20:32 +0800 Subject: [PATCH 0229/1543] bugfix: Add empty list detection for find_max/min (#4881) * bugfix: Add empty list detection for find_max/min * fix shebangs check --- maths/find_max.py | 20 ++++++++++++------- maths/find_max_recursion.py | 39 ++++++++++++++++++++++++++++++++++--- maths/find_min.py | 21 ++++++++++++++------ maths/find_min_recursion.py | 39 ++++++++++++++++++++++++++++++++++--- 4 files changed, 100 insertions(+), 19 deletions(-) diff --git a/maths/find_max.py b/maths/find_max.py index 4d92e37eb2e1..684fbe8161e8 100644 --- a/maths/find_max.py +++ b/maths/find_max.py @@ -1,7 +1,7 @@ -# NguyenU +from __future__ import annotations -def find_max(nums): +def find_max(nums: list[int | float]) -> int | float: """ >>> for nums in ([3, 2, 1], [-3, -2, -1], [3, -3, 0], [3.0, 3.1, 2.9]): ... find_max(nums) == max(nums) @@ -9,7 +9,15 @@ def find_max(nums): True True True + >>> find_max([2, 4, 9, 7, 19, 94, 5]) + 94 + >>> find_max([]) + Traceback (most recent call last): + ... + ValueError: find_max() arg is an empty sequence """ + if len(nums) == 0: + raise ValueError("find_max() arg is an empty sequence") max_num = nums[0] for x in nums: if x > max_num: @@ -17,9 +25,7 @@ def find_max(nums): return max_num -def main(): - print(find_max([2, 4, 9, 7, 19, 94, 5])) # 94 - - if __name__ == "__main__": - main() + import doctest + + doctest.testmod(verbose=True) diff --git a/maths/find_max_recursion.py b/maths/find_max_recursion.py index 03fb81950dcb..629932e0818f 100644 --- a/maths/find_max_recursion.py +++ b/maths/find_max_recursion.py @@ -1,5 +1,8 @@ +from __future__ import annotations + + # Divide and Conquer algorithm -def find_max(nums, left, right): +def find_max(nums: list[int | float], left: int, right: int) -> int | float: """ find max value in list :param nums: contains elements @@ -7,10 +10,39 @@ def find_max(nums, left, right): :param right: index of last element :return: max in nums + >>> for nums in ([3, 2, 1], [-3, -2, -1], [3, -3, 0], [3.0, 3.1, 2.9]): + ... find_max(nums, 0, len(nums) - 1) == max(nums) + True + True + True + True >>> nums = [1, 3, 5, 7, 9, 2, 4, 6, 8, 10] >>> find_max(nums, 0, len(nums) - 1) == max(nums) True + >>> find_max([], 0, 0) + Traceback (most recent call last): + ... + ValueError: find_max() arg is an empty sequence + >>> find_max(nums, 0, len(nums)) == max(nums) + Traceback (most recent call last): + ... + IndexError: list index out of range + >>> find_max(nums, -len(nums), -1) == max(nums) + True + >>> find_max(nums, -len(nums) - 1, -1) == max(nums) + Traceback (most recent call last): + ... + IndexError: list index out of range """ + if len(nums) == 0: + raise ValueError("find_max() arg is an empty sequence") + if ( + left >= len(nums) + or left < -len(nums) + or right >= len(nums) + or right < -len(nums) + ): + raise IndexError("list index out of range") if left == right: return nums[left] mid = (left + right) >> 1 # the middle @@ -21,5 +53,6 @@ def find_max(nums, left, right): if __name__ == "__main__": - nums = [1, 3, 5, 7, 9, 2, 4, 6, 8, 10] - assert find_max(nums, 0, len(nums) - 1) == 10 + import doctest + + doctest.testmod(verbose=True) diff --git a/maths/find_min.py b/maths/find_min.py index 2af2e44ba353..228205ed7feb 100644 --- a/maths/find_min.py +++ b/maths/find_min.py @@ -1,4 +1,7 @@ -def find_min(nums): +from __future__ import annotations + + +def find_min(nums: list[int | float]) -> int | float: """ Find Minimum Number in a List :param nums: contains elements @@ -10,7 +13,15 @@ def find_min(nums): True True True + >>> find_min([0, 1, 2, 3, 4, 5, -3, 24, -56]) + -56 + >>> find_min([]) + Traceback (most recent call last): + ... + ValueError: find_min() arg is an empty sequence """ + if len(nums) == 0: + raise ValueError("find_min() arg is an empty sequence") min_num = nums[0] for num in nums: if min_num > num: @@ -18,9 +29,7 @@ def find_min(nums): return min_num -def main(): - assert find_min([0, 1, 2, 3, 4, 5, -3, 24, -56]) == -56 - - if __name__ == "__main__": - main() + import doctest + + doctest.testmod(verbose=True) diff --git a/maths/find_min_recursion.py b/maths/find_min_recursion.py index 4488967cc57a..4d11015efcd5 100644 --- a/maths/find_min_recursion.py +++ b/maths/find_min_recursion.py @@ -1,5 +1,8 @@ +from __future__ import annotations + + # Divide and Conquer algorithm -def find_min(nums, left, right): +def find_min(nums: list[int | float], left: int, right: int) -> int | float: """ find min value in list :param nums: contains elements @@ -7,10 +10,39 @@ def find_min(nums, left, right): :param right: index of last element :return: min in nums + >>> for nums in ([3, 2, 1], [-3, -2, -1], [3, -3, 0], [3.0, 3.1, 2.9]): + ... find_min(nums, 0, len(nums) - 1) == min(nums) + True + True + True + True >>> nums = [1, 3, 5, 7, 9, 2, 4, 6, 8, 10] >>> find_min(nums, 0, len(nums) - 1) == min(nums) True + >>> find_min([], 0, 0) + Traceback (most recent call last): + ... + ValueError: find_min() arg is an empty sequence + >>> find_min(nums, 0, len(nums)) == min(nums) + Traceback (most recent call last): + ... + IndexError: list index out of range + >>> find_min(nums, -len(nums), -1) == min(nums) + True + >>> find_min(nums, -len(nums) - 1, -1) == min(nums) + Traceback (most recent call last): + ... + IndexError: list index out of range """ + if len(nums) == 0: + raise ValueError("find_min() arg is an empty sequence") + if ( + left >= len(nums) + or left < -len(nums) + or right >= len(nums) + or right < -len(nums) + ): + raise IndexError("list index out of range") if left == right: return nums[left] mid = (left + right) >> 1 # the middle @@ -21,5 +53,6 @@ def find_min(nums, left, right): if __name__ == "__main__": - nums = [1, 3, 5, 7, 9, 2, 4, 6, 8, 10] - assert find_min(nums, 0, len(nums) - 1) == 1 + import doctest + + doctest.testmod(verbose=True) From 7578e0b920831c0ee3274cbfdaa6dc2b6a82cc97 Mon Sep 17 00:00:00 2001 From: Rohanrbharadwaj <89947037+Rohanrbharadwaj@users.noreply.github.com> Date: Sun, 10 Oct 2021 23:22:38 +0530 Subject: [PATCH 0230/1543] Used in-built method (#5183) * Used in-built method * Delete swap_case.py Co-authored-by: Christian Clauss --- strings/swap_case.py | 37 ------------------------------------- 1 file changed, 37 deletions(-) delete mode 100644 strings/swap_case.py diff --git a/strings/swap_case.py b/strings/swap_case.py deleted file mode 100644 index 107fda4b52ec..000000000000 --- a/strings/swap_case.py +++ /dev/null @@ -1,37 +0,0 @@ -""" -This algorithm helps you to swap cases. - -User will give input and then program will perform swap cases. - -In other words, convert all lowercase letters to uppercase letters and vice versa. -For example: -1. Please input sentence: Algorithm.Python@89 - aLGORITHM.pYTHON@89 -2. Please input sentence: github.com/mayur200 - GITHUB.COM/MAYUR200 - -""" - - -def swap_case(sentence: str) -> str: - """ - This function will convert all lowercase letters to uppercase letters - and vice versa. - - >>> swap_case('Algorithm.Python@89') - 'aLGORITHM.pYTHON@89' - """ - new_string = "" - for char in sentence: - if char.isupper(): - new_string += char.lower() - elif char.islower(): - new_string += char.upper() - else: - new_string += char - - return new_string - - -if __name__ == "__main__": - print(swap_case(input("Please input sentence: "))) From 97562c19f8f1f079714b393b1b2afa895e930916 Mon Sep 17 00:00:00 2001 From: Rohanrbharadwaj <89947037+Rohanrbharadwaj@users.noreply.github.com> Date: Sun, 10 Oct 2021 23:30:04 +0530 Subject: [PATCH 0231/1543] Added doctest (#5182) --- strings/indian_phone_validator.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/strings/indian_phone_validator.py b/strings/indian_phone_validator.py index d544e92661b1..7f3fda5db949 100644 --- a/strings/indian_phone_validator.py +++ b/strings/indian_phone_validator.py @@ -16,6 +16,8 @@ def indian_phone_validator(phone: str) -> bool: True >>> indian_phone_validator("+91-1234567899") False + >>> indian_phone_validator("+91-9876543218") + True """ pat = re.compile(r"^(\+91[\-\s]?)?[0]?(91)?[789]\d{9}$") match = re.search(pat, phone) From 729b4d875a07bd15bc6e5e8a3c79f16fa1e003e6 Mon Sep 17 00:00:00 2001 From: Jordan Rinder Date: Sun, 10 Oct 2021 14:02:44 -0400 Subject: [PATCH 0232/1543] Add Sylvester's sequence to maths (#5171) * Add Sylvester's sequence to maths * Update sylvester_sequence.py Co-authored-by: Christian Clauss --- maths/sylvester_sequence.py | 43 +++++++++++++++++++++++++++++++++++++ 1 file changed, 43 insertions(+) create mode 100644 maths/sylvester_sequence.py diff --git a/maths/sylvester_sequence.py b/maths/sylvester_sequence.py new file mode 100644 index 000000000000..0cd99affe046 --- /dev/null +++ b/maths/sylvester_sequence.py @@ -0,0 +1,43 @@ +""" + +Calculates the nth number in Sylvester's sequence + +Source: + https://en.wikipedia.org/wiki/Sylvester%27s_sequence + +""" + + +def sylvester(number: int) -> int: + """ + :param number: nth number to calculate in the sequence + :return: the nth number in Sylvester's sequence + + >>> sylvester(8) + 113423713055421844361000443 + + >>> sylvester(-1) + Traceback (most recent call last): + ... + ValueError: The input value of [n=-1] has to be > 0 + + >>> sylvester(8.0) + Traceback (most recent call last): + ... + AssertionError: The input value of [n=8.0] is not an integer + """ + assert isinstance(number, int), f"The input value of [n={number}] is not an integer" + + if number == 1: + return 2 + elif number < 1: + raise ValueError(f"The input value of [n={number}] has to be > 0") + else: + num = sylvester(number - 1) + lower = num - 1 + upper = num + return lower * upper + 1 + + +if __name__ == "__main__": + print(f"The 8th number in Sylvester's sequence: {sylvester(8)}") From fadb97609f7b84f83a47fcf8a253145562469b23 Mon Sep 17 00:00:00 2001 From: Sidhaant Thakker <59668364+SidhaantThakker@users.noreply.github.com> Date: Mon, 11 Oct 2021 21:59:52 +0530 Subject: [PATCH 0233/1543] Add carrier concentrations calculation algorithm (#4791) * added carrier concentrations algorithm * Add more references Added more references to the carrier concentrations file * Update electronics/carrier_concentration.py Co-authored-by: John Law * Update electronics/carrier_concentration.py Co-authored-by: John Law Co-authored-by: John Law --- electronics/carrier_concentration.py | 75 ++++++++++++++++++++++++++++ 1 file changed, 75 insertions(+) create mode 100644 electronics/carrier_concentration.py diff --git a/electronics/carrier_concentration.py b/electronics/carrier_concentration.py new file mode 100644 index 000000000000..87bcad8df398 --- /dev/null +++ b/electronics/carrier_concentration.py @@ -0,0 +1,75 @@ +# https://en.wikipedia.org/wiki/Charge_carrier_density +# https://www.pveducation.org/pvcdrom/pn-junctions/equilibrium-carrier-concentration +# http://www.ece.utep.edu/courses/ee3329/ee3329/Studyguide/ToC/Fundamentals/Carriers/concentrations.html + +from __future__ import annotations + + +def carrier_concentration( + electron_conc: float, + hole_conc: float, + intrinsic_conc: float, +) -> tuple: + """ + This function can calculate any one of the three - + 1. Electron Concentration + 2, Hole Concentration + 3. Intrinsic Concentration + given the other two. + Examples - + >>> carrier_concentration(electron_conc=25, hole_conc=100, intrinsic_conc=0) + ('intrinsic_conc', 50.0) + >>> carrier_concentration(electron_conc=0, hole_conc=1600, intrinsic_conc=200) + ('electron_conc', 25.0) + >>> carrier_concentration(electron_conc=1000, hole_conc=0, intrinsic_conc=1200) + ('hole_conc', 1440.0) + >>> carrier_concentration(electron_conc=1000, hole_conc=400, intrinsic_conc=1200) + Traceback (most recent call last): + File "", line 37, in + ValueError: You cannot supply more or less than 2 values + >>> carrier_concentration(electron_conc=-1000, hole_conc=0, intrinsic_conc=1200) + Traceback (most recent call last): + File "", line 40, in + ValueError: Electron concentration cannot be negative in a semiconductor + >>> carrier_concentration(electron_conc=0, hole_conc=-400, intrinsic_conc=1200) + Traceback (most recent call last): + File "", line 44, in + ValueError: Hole concentration cannot be negative in a semiconductor + >>> carrier_concentration(electron_conc=0, hole_conc=400, intrinsic_conc=-1200) + Traceback (most recent call last): + File "", line 48, in + ValueError: Intrinsic concentration cannot be negative in a semiconductor + """ + if (electron_conc, hole_conc, intrinsic_conc).count(0) != 1: + raise ValueError("You cannot supply more or less than 2 values") + elif electron_conc < 0: + raise ValueError("Electron concentration cannot be negative in a semiconductor") + elif hole_conc < 0: + raise ValueError("Hole concentration cannot be negative in a semiconductor") + elif intrinsic_conc < 0: + raise ValueError( + "Intrinsic concentration cannot be negative in a semiconductor" + ) + elif electron_conc == 0: + return ( + "electron_conc", + intrinsic_conc ** 2 / hole_conc, + ) + elif hole_conc == 0: + return ( + "hole_conc", + intrinsic_conc ** 2 / electron_conc, + ) + elif intrinsic_conc == 0: + return ( + "intrinsic_conc", + (electron_conc * hole_conc) ** 0.5, + ) + else: + return (-1, -1) + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From e311b02e704891b2f31a1e2c8fe2df77a032b09b Mon Sep 17 00:00:00 2001 From: Muhammad Hammad Sani <58339378+mhammadsaani@users.noreply.github.com> Date: Mon, 11 Oct 2021 21:33:06 +0500 Subject: [PATCH 0234/1543] Remove unnecessary branch (#4824) * Algorithm Optimized * Update divide_and_conquer/inversions.py Co-authored-by: John Law * Update divide_and_conquer/inversions.py Co-authored-by: John Law * Update divide_and_conquer/inversions.py Co-authored-by: John Law Co-authored-by: John Law --- divide_and_conquer/inversions.py | 36 +++++++++----------------------- 1 file changed, 10 insertions(+), 26 deletions(-) diff --git a/divide_and_conquer/inversions.py b/divide_and_conquer/inversions.py index 9bb656229321..b471456025be 100644 --- a/divide_and_conquer/inversions.py +++ b/divide_and_conquer/inversions.py @@ -2,31 +2,25 @@ Given an array-like data structure A[1..n], how many pairs (i, j) for all 1 <= i < j <= n such that A[i] > A[j]? These pairs are called inversions. Counting the number of such inversions in an array-like -object is the important. Among other things, counting inversions can help -us determine how close a given array is to being sorted - +object is the important. Among other things, counting inversions can help +us determine how close a given array is to being sorted. In this implementation, I provide two algorithms, a divide-and-conquer algorithm which runs in nlogn and the brute-force n^2 algorithm. - """ def count_inversions_bf(arr): """ Counts the number of inversions using a a naive brute-force algorithm - Parameters ---------- arr: arr: array-like, the list containing the items for which the number of inversions is desired. The elements of `arr` must be comparable. - Returns ------- num_inversions: The total number of inversions in `arr` - Examples --------- - >>> count_inversions_bf([1, 4, 2, 4, 1]) 4 >>> count_inversions_bf([1, 1, 2, 4, 4]) @@ -49,20 +43,16 @@ def count_inversions_bf(arr): def count_inversions_recursive(arr): """ Counts the number of inversions using a divide-and-conquer algorithm - Parameters ----------- arr: array-like, the list containing the items for which the number of inversions is desired. The elements of `arr` must be comparable. - Returns ------- C: a sorted copy of `arr`. num_inversions: int, the total number of inversions in 'arr' - Examples -------- - >>> count_inversions_recursive([1, 4, 2, 4, 1]) ([1, 1, 2, 4, 4], 4) >>> count_inversions_recursive([1, 1, 2, 4, 4]) @@ -72,40 +62,34 @@ def count_inversions_recursive(arr): """ if len(arr) <= 1: return arr, 0 - else: - mid = len(arr) // 2 - P = arr[0:mid] - Q = arr[mid:] + mid = len(arr) // 2 + P = arr[0:mid] + Q = arr[mid:] - A, inversion_p = count_inversions_recursive(P) - B, inversions_q = count_inversions_recursive(Q) - C, cross_inversions = _count_cross_inversions(A, B) + A, inversion_p = count_inversions_recursive(P) + B, inversions_q = count_inversions_recursive(Q) + C, cross_inversions = _count_cross_inversions(A, B) - num_inversions = inversion_p + inversions_q + cross_inversions - return C, num_inversions + num_inversions = inversion_p + inversions_q + cross_inversions + return C, num_inversions def _count_cross_inversions(P, Q): """ Counts the inversions across two sorted arrays. And combine the two arrays into one sorted array - For all 1<= i<=len(P) and for all 1 <= j <= len(Q), if P[i] > Q[j], then (i, j) is a cross inversion - Parameters ---------- P: array-like, sorted in non-decreasing order Q: array-like, sorted in non-decreasing order - Returns ------ R: array-like, a sorted array of the elements of `P` and `Q` num_inversion: int, the number of inversions across `P` and `Q` - Examples -------- - >>> _count_cross_inversions([1, 2, 3], [0, 2, 5]) ([0, 1, 2, 2, 3, 5], 4) >>> _count_cross_inversions([1, 2, 3], [3, 4, 5]) From bcfca67faa120cc4cb42775af302d6d52d3a3f1e Mon Sep 17 00:00:00 2001 From: Joyce Date: Tue, 12 Oct 2021 00:33:44 +0800 Subject: [PATCH 0235/1543] [mypy] fix type annotations for all Project Euler problems (#4747) * [mypy] fix type annotations for problem003/sol1 and problem003/sol3 * [mypy] fix type annotations for project euler problem007/sol2 * [mypy] fix type annotations for project euler problem008/sol2 * [mypy] fix type annotations for project euler problem009/sol1 * [mypy] fix type annotations for project euler problem014/sol1 * [mypy] fix type annotations for project euler problem 025/sol2 * [mypy] fix type annotations for project euler problem026/sol1.py * [mypy] fix type annotations for project euler problem037/sol1 * [mypy] fix type annotations for project euler problem044/sol1 * [mypy] fix type annotations for project euler problem046/sol1 * [mypy] fix type annotations for project euler problem051/sol1 * [mypy] fix type annotations for project euler problem074/sol2 * [mypy] fix type annotations for project euler problem080/sol1 * [mypy] fix type annotations for project euler problem099/sol1 * [mypy] fix type annotations for project euler problem101/sol1 * [mypy] fix type annotations for project euler problem188/sol1 * [mypy] fix type annotations for project euler problem191/sol1 * [mypy] fix type annotations for project euler problem207/sol1 * [mypy] fix type annotations for project euler problem551/sol1 --- project_euler/problem_003/sol1.py | 4 ++-- project_euler/problem_003/sol3.py | 2 +- project_euler/problem_007/sol2.py | 2 +- project_euler/problem_008/sol2.py | 4 +++- project_euler/problem_009/sol1.py | 4 ++++ project_euler/problem_014/sol1.py | 2 +- project_euler/problem_025/sol2.py | 3 ++- project_euler/problem_026/sol1.py | 2 +- project_euler/problem_037/sol1.py | 2 +- project_euler/problem_044/sol1.py | 2 ++ project_euler/problem_046/sol1.py | 2 ++ project_euler/problem_051/sol1.py | 8 +++++--- project_euler/problem_074/sol2.py | 4 ++-- project_euler/problem_080/sol1.py | 5 +++-- project_euler/problem_099/sol1.py | 10 ++++++---- project_euler/problem_101/sol1.py | 2 +- project_euler/problem_188/sol1.py | 4 ++-- project_euler/problem_191/sol1.py | 2 +- project_euler/problem_207/sol1.py | 2 +- project_euler/problem_551/sol1.py | 3 ++- 20 files changed, 43 insertions(+), 26 deletions(-) diff --git a/project_euler/problem_003/sol1.py b/project_euler/problem_003/sol1.py index 3441dbf9e0b3..1f329984203a 100644 --- a/project_euler/problem_003/sol1.py +++ b/project_euler/problem_003/sol1.py @@ -92,8 +92,8 @@ def solution(n: int = 600851475143) -> int: return n for i in range(3, int(math.sqrt(n)) + 1, 2): if n % i == 0: - if isprime(n / i): - max_number = n / i + if isprime(n // i): + max_number = n // i break elif isprime(i): max_number = i diff --git a/project_euler/problem_003/sol3.py b/project_euler/problem_003/sol3.py index bc6f1d2f61ca..e13a0eb74ec1 100644 --- a/project_euler/problem_003/sol3.py +++ b/project_euler/problem_003/sol3.py @@ -57,7 +57,7 @@ def solution(n: int = 600851475143) -> int: i += 1 ans = i while n % i == 0: - n = n / i + n = n // i i += 1 return int(ans) diff --git a/project_euler/problem_007/sol2.py b/project_euler/problem_007/sol2.py index b395c631b766..20c2ddf21ab8 100644 --- a/project_euler/problem_007/sol2.py +++ b/project_euler/problem_007/sol2.py @@ -73,7 +73,7 @@ def solution(nth: int = 10001) -> int: raise TypeError("Parameter nth must be int or castable to int.") from None if nth <= 0: raise ValueError("Parameter nth must be greater than or equal to one.") - primes = [] + primes: list[int] = [] num = 2 while len(primes) < nth: if isprime(num): diff --git a/project_euler/problem_008/sol2.py b/project_euler/problem_008/sol2.py index 7f0540263278..889c3a3143c2 100644 --- a/project_euler/problem_008/sol2.py +++ b/project_euler/problem_008/sol2.py @@ -70,7 +70,9 @@ def solution(n: str = N) -> int: """ return max( - reduce(lambda x, y: int(x) * int(y), n[i : i + 13]) for i in range(len(n) - 12) + # mypy cannot properly interpret reduce + int(reduce(lambda x, y: str(int(x) * int(y)), n[i : i + 13])) + for i in range(len(n) - 12) ) diff --git a/project_euler/problem_009/sol1.py b/project_euler/problem_009/sol1.py index c50dfeecfd22..83c88acf1f8b 100644 --- a/project_euler/problem_009/sol1.py +++ b/project_euler/problem_009/sol1.py @@ -36,6 +36,8 @@ def solution() -> int: if (a ** 2) + (b ** 2) == (c ** 2): return a * b * c + return -1 + def solution_fast() -> int: """ @@ -55,6 +57,8 @@ def solution_fast() -> int: if a < b < c and (a ** 2) + (b ** 2) == (c ** 2): return a * b * c + return -1 + def benchmark() -> None: """ diff --git a/project_euler/problem_014/sol1.py b/project_euler/problem_014/sol1.py index 1745ec931e5a..43aa4e726af2 100644 --- a/project_euler/problem_014/sol1.py +++ b/project_euler/problem_014/sol1.py @@ -44,7 +44,7 @@ def solution(n: int = 1000000) -> int: while number > 1: if number % 2 == 0: - number /= 2 + number //= 2 counter += 1 else: number = (3 * number) + 1 diff --git a/project_euler/problem_025/sol2.py b/project_euler/problem_025/sol2.py index ed3b54bb351f..b041afd98c86 100644 --- a/project_euler/problem_025/sol2.py +++ b/project_euler/problem_025/sol2.py @@ -23,9 +23,10 @@ What is the index of the first term in the Fibonacci sequence to contain 1000 digits? """ +from typing import Generator -def fibonacci_generator() -> int: +def fibonacci_generator() -> Generator[int, None, None]: """ A generator that produces numbers in the Fibonacci sequence diff --git a/project_euler/problem_026/sol1.py b/project_euler/problem_026/sol1.py index 64e0bbfef472..75d48df7910c 100644 --- a/project_euler/problem_026/sol1.py +++ b/project_euler/problem_026/sol1.py @@ -39,7 +39,7 @@ def solution(numerator: int = 1, digit: int = 1000) -> int: longest_list_length = 0 for divide_by_number in range(numerator, digit + 1): - has_been_divided = [] + has_been_divided: list[int] = [] now_divide = numerator for division_cycle in range(1, digit + 1): if now_divide in has_been_divided: diff --git a/project_euler/problem_037/sol1.py b/project_euler/problem_037/sol1.py index 5423aac37c01..0411ad41ba2f 100644 --- a/project_euler/problem_037/sol1.py +++ b/project_euler/problem_037/sol1.py @@ -76,7 +76,7 @@ def compute_truncated_primes(count: int = 11) -> list[int]: >>> compute_truncated_primes(11) [23, 37, 53, 73, 313, 317, 373, 797, 3137, 3797, 739397] """ - list_truncated_primes = [] + list_truncated_primes: list[int] = [] num = 13 while len(list_truncated_primes) != count: if validate(num): diff --git a/project_euler/problem_044/sol1.py b/project_euler/problem_044/sol1.py index d3ae6476d45f..3b75b6a56a8e 100644 --- a/project_euler/problem_044/sol1.py +++ b/project_euler/problem_044/sol1.py @@ -42,6 +42,8 @@ def solution(limit: int = 5000) -> int: if is_pentagonal(a) and is_pentagonal(b): return b + return -1 + if __name__ == "__main__": print(f"{solution() = }") diff --git a/project_euler/problem_046/sol1.py b/project_euler/problem_046/sol1.py index 3fdf567551cc..550c4c7c4268 100644 --- a/project_euler/problem_046/sol1.py +++ b/project_euler/problem_046/sol1.py @@ -85,6 +85,8 @@ def compute_nums(n: int) -> list[int]: if len(list_nums) == n: return list_nums + return [] + def solution() -> int: """Return the solution to the problem""" diff --git a/project_euler/problem_051/sol1.py b/project_euler/problem_051/sol1.py index 5f607e3ffb42..eedb02379e62 100644 --- a/project_euler/problem_051/sol1.py +++ b/project_euler/problem_051/sol1.py @@ -63,12 +63,12 @@ def digit_replacements(number: int) -> list[list[int]]: >>> digit_replacements(3112) [[3002, 3112, 3222, 3332, 3442, 3552, 3662, 3772, 3882, 3992]] """ - number = str(number) + number_str = str(number) replacements = [] digits = ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9"] - for duplicate in Counter(number) - Counter(set(number)): - family = [int(number.replace(duplicate, digit)) for digit in digits] + for duplicate in Counter(number_str) - Counter(set(number_str)): + family = [int(number_str.replace(duplicate, digit)) for digit in digits] replacements.append(family) return replacements @@ -106,6 +106,8 @@ def solution(family_length: int = 8) -> int: return min(primes_in_family) + return -1 + if __name__ == "__main__": print(solution()) diff --git a/project_euler/problem_074/sol2.py b/project_euler/problem_074/sol2.py index 689593277a81..55e67c6b98dd 100644 --- a/project_euler/problem_074/sol2.py +++ b/project_euler/problem_074/sol2.py @@ -20,8 +20,8 @@ counter increases. """ -factorial_cache = {} -factorial_sum_cache = {} +factorial_cache: dict[int, int] = {} +factorial_sum_cache: dict[int, int] = {} def factorial(a: int) -> int: diff --git a/project_euler/problem_080/sol1.py b/project_euler/problem_080/sol1.py index 517be3fc0ba8..916998bdd8ad 100644 --- a/project_euler/problem_080/sol1.py +++ b/project_euler/problem_080/sol1.py @@ -26,8 +26,8 @@ def solution() -> int: sqrt_number = number.sqrt(decimal_context) if len(str(sqrt_number)) > 1: answer += int(str(sqrt_number)[0]) - sqrt_number = str(sqrt_number)[2:101] - answer += sum(int(x) for x in sqrt_number) + sqrt_number_str = str(sqrt_number)[2:101] + answer += sum(int(x) for x in sqrt_number_str) return answer @@ -35,3 +35,4 @@ def solution() -> int: import doctest doctest.testmod() + print(f"{solution() = }") diff --git a/project_euler/problem_099/sol1.py b/project_euler/problem_099/sol1.py index 88912e1f0f9e..bf5621c6583c 100644 --- a/project_euler/problem_099/sol1.py +++ b/project_euler/problem_099/sol1.py @@ -22,12 +22,14 @@ def solution(data_file: str = "base_exp.txt") -> int: >>> solution() 709 """ - largest = [0, 0] + largest: float = 0 + result = 0 for i, line in enumerate(open(os.path.join(os.path.dirname(__file__), data_file))): a, x = list(map(int, line.split(","))) - if x * log10(a) > largest[0]: - largest = [x * log10(a), i + 1] - return largest[1] + if x * log10(a) > largest: + largest = x * log10(a) + result = i + 1 + return result if __name__ == "__main__": diff --git a/project_euler/problem_101/sol1.py b/project_euler/problem_101/sol1.py index 553f8f442bb8..14013c435241 100644 --- a/project_euler/problem_101/sol1.py +++ b/project_euler/problem_101/sol1.py @@ -202,7 +202,7 @@ def solution(func: Callable[[int], int] = question_function, order: int = 10) -> ] ret: int = 0 - poly: int + poly: Callable[[int], int] x_val: int for poly in polynomials: diff --git a/project_euler/problem_188/sol1.py b/project_euler/problem_188/sol1.py index 6473c63620ed..c8cd9eb10aeb 100644 --- a/project_euler/problem_188/sol1.py +++ b/project_euler/problem_188/sol1.py @@ -19,7 +19,7 @@ """ -# small helper function for modular exponentiation +# small helper function for modular exponentiation (fast exponentiation algorithm) def _modexpt(base: int, exponent: int, modulo_value: int) -> int: """ Returns the modular exponentiation, that is the value @@ -36,7 +36,7 @@ def _modexpt(base: int, exponent: int, modulo_value: int) -> int: if exponent == 1: return base if exponent % 2 == 0: - x = _modexpt(base, exponent / 2, modulo_value) % modulo_value + x = _modexpt(base, exponent // 2, modulo_value) % modulo_value return (x * x) % modulo_value else: return (base * _modexpt(base, exponent - 1, modulo_value)) % modulo_value diff --git a/project_euler/problem_191/sol1.py b/project_euler/problem_191/sol1.py index 38325b363b89..6bff9d54eeca 100644 --- a/project_euler/problem_191/sol1.py +++ b/project_euler/problem_191/sol1.py @@ -26,7 +26,7 @@ """ -cache = {} +cache: dict[tuple[int, int, int], int] = {} def _calculate(days: int, absent: int, late: int) -> int: diff --git a/project_euler/problem_207/sol1.py b/project_euler/problem_207/sol1.py index fb901fde1624..99d1a91746d2 100644 --- a/project_euler/problem_207/sol1.py +++ b/project_euler/problem_207/sol1.py @@ -90,7 +90,7 @@ def solution(max_proportion: float = 1 / 12345) -> int: perfect_partitions += 1 if perfect_partitions > 0: if perfect_partitions / total_partitions < max_proportion: - return partition_candidate + return int(partition_candidate) integer += 1 diff --git a/project_euler/problem_551/sol1.py b/project_euler/problem_551/sol1.py index 71956691a56d..005d2e98514b 100644 --- a/project_euler/problem_551/sol1.py +++ b/project_euler/problem_551/sol1.py @@ -12,9 +12,10 @@ Find a(10^15) """ + ks = [k for k in range(2, 20 + 1)] base = [10 ** k for k in range(ks[-1] + 1)] -memo = {} +memo: dict[int, dict[int, list[list[int]]]] = {} def next_term(a_i, k, i, n): From abaa0d754b8dc24abacac2a4d7ecade2d3ddacb6 Mon Sep 17 00:00:00 2001 From: scfenton6 <91698851+scfenton6@users.noreply.github.com> Date: Mon, 11 Oct 2021 18:34:30 +0200 Subject: [PATCH 0236/1543] Add type annotations (#4814) --- data_structures/disjoint_set/disjoint_set.py | 20 +++++++++++--------- 1 file changed, 11 insertions(+), 9 deletions(-) diff --git a/data_structures/disjoint_set/disjoint_set.py b/data_structures/disjoint_set/disjoint_set.py index bf5ab415d5e4..f8500bf2c3af 100644 --- a/data_structures/disjoint_set/disjoint_set.py +++ b/data_structures/disjoint_set/disjoint_set.py @@ -1,17 +1,19 @@ """ - disjoint set + Disjoint set. Reference: https://en.wikipedia.org/wiki/Disjoint-set_data_structure """ class Node: - def __init__(self, data): + def __init__(self, data: int) -> None: self.data = data + self.rank: int + self.parent: Node -def make_set(x): +def make_set(x: Node) -> None: """ - make x as a set. + Make x as a set. """ # rank is the distance from x to its' parent # root's rank is 0 @@ -19,9 +21,9 @@ def make_set(x): x.parent = x -def union_set(x, y): +def union_set(x: Node, y: Node) -> None: """ - union two sets. + Union of two sets. set with bigger rank should be parent, so that the disjoint set tree will be more flat. """ @@ -37,9 +39,9 @@ def union_set(x, y): y.rank += 1 -def find_set(x): +def find_set(x: Node) -> Node: """ - return the parent of x + Return the parent of x """ if x != x.parent: x.parent = find_set(x.parent) @@ -57,7 +59,7 @@ def find_python_set(node: Node) -> set: raise ValueError(f"{node.data} is not in {sets}") -def test_disjoint_set(): +def test_disjoint_set() -> None: """ >>> test_disjoint_set() """ From 9586a6a98ef4bad7894bfe31da4fab42f6b3d6cd Mon Sep 17 00:00:00 2001 From: poloso Date: Mon, 11 Oct 2021 11:44:38 -0500 Subject: [PATCH 0237/1543] Change comments for improved consistency (#5223) * Change comments for improved consistency https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md#L56 https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md#L80 https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md#L87 * updating DIRECTORY.md Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- CONTRIBUTING.md | 2 +- DIRECTORY.md | 5 ++++- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 13d330a90dc5..e9cf0e6a18b7 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -53,7 +53,7 @@ Algorithms in this repo should not be how-to examples for existing Python packag Use [pre-commit](https://pre-commit.com/#installation) to automatically format your code to match our coding style: ```bash -python3 -m pip install pre-commit # required only once +python3 -m pip install pre-commit # only required the first time pre-commit install ``` That's it! The plugin will run every time you commit any changes. If there are any errors found during the run, fix them and commit those changes. You can even run the plugin manually on all files: diff --git a/DIRECTORY.md b/DIRECTORY.md index 2e9942b5bf93..6c227fd2b5ad 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -274,6 +274,7 @@ * [Test Send File](https://github.com/TheAlgorithms/Python/blob/master/file_transfer/tests/test_send_file.py) ## Fractals + * [Julia Sets](https://github.com/TheAlgorithms/Python/blob/master/fractals/julia_sets.py) * [Koch Snowflake](https://github.com/TheAlgorithms/Python/blob/master/fractals/koch_snowflake.py) * [Mandelbrot](https://github.com/TheAlgorithms/Python/blob/master/fractals/mandelbrot.py) * [Sierpinski Triangle](https://github.com/TheAlgorithms/Python/blob/master/fractals/sierpinski_triangle.py) @@ -424,10 +425,12 @@ * [Binomial Distribution](https://github.com/TheAlgorithms/Python/blob/master/maths/binomial_distribution.py) * [Bisection](https://github.com/TheAlgorithms/Python/blob/master/maths/bisection.py) * [Ceil](https://github.com/TheAlgorithms/Python/blob/master/maths/ceil.py) + * [Check Polygon](https://github.com/TheAlgorithms/Python/blob/master/maths/check_polygon.py) * [Chudnovsky Algorithm](https://github.com/TheAlgorithms/Python/blob/master/maths/chudnovsky_algorithm.py) * [Collatz Sequence](https://github.com/TheAlgorithms/Python/blob/master/maths/collatz_sequence.py) * [Combinations](https://github.com/TheAlgorithms/Python/blob/master/maths/combinations.py) * [Decimal Isolate](https://github.com/TheAlgorithms/Python/blob/master/maths/decimal_isolate.py) + * [Double Factorial Iterative](https://github.com/TheAlgorithms/Python/blob/master/maths/double_factorial_iterative.py) * [Double Factorial Recursive](https://github.com/TheAlgorithms/Python/blob/master/maths/double_factorial_recursive.py) * [Entropy](https://github.com/TheAlgorithms/Python/blob/master/maths/entropy.py) * [Euclidean Distance](https://github.com/TheAlgorithms/Python/blob/master/maths/euclidean_distance.py) @@ -511,6 +514,7 @@ * [Sum Of Arithmetic Series](https://github.com/TheAlgorithms/Python/blob/master/maths/sum_of_arithmetic_series.py) * [Sum Of Digits](https://github.com/TheAlgorithms/Python/blob/master/maths/sum_of_digits.py) * [Sum Of Geometric Progression](https://github.com/TheAlgorithms/Python/blob/master/maths/sum_of_geometric_progression.py) + * [Sylvester Sequence](https://github.com/TheAlgorithms/Python/blob/master/maths/sylvester_sequence.py) * [Test Prime Check](https://github.com/TheAlgorithms/Python/blob/master/maths/test_prime_check.py) * [Trapezoidal Rule](https://github.com/TheAlgorithms/Python/blob/master/maths/trapezoidal_rule.py) * [Triplet Sum](https://github.com/TheAlgorithms/Python/blob/master/maths/triplet_sum.py) @@ -928,7 +932,6 @@ * [Reverse Letters](https://github.com/TheAlgorithms/Python/blob/master/strings/reverse_letters.py) * [Reverse Words](https://github.com/TheAlgorithms/Python/blob/master/strings/reverse_words.py) * [Split](https://github.com/TheAlgorithms/Python/blob/master/strings/split.py) - * [Swap Case](https://github.com/TheAlgorithms/Python/blob/master/strings/swap_case.py) * [Upper](https://github.com/TheAlgorithms/Python/blob/master/strings/upper.py) * [Word Occurrence](https://github.com/TheAlgorithms/Python/blob/master/strings/word_occurrence.py) * [Word Patterns](https://github.com/TheAlgorithms/Python/blob/master/strings/word_patterns.py) From 1b0ac73da25915d4cb4d2754f7c12d1b81fc9f90 Mon Sep 17 00:00:00 2001 From: Aman kanojiya <50018596+AMANKANOJIYA@users.noreply.github.com> Date: Tue, 12 Oct 2021 15:21:27 +0530 Subject: [PATCH 0238/1543] Magnitude and Angle of Vector (#5225) * Magnitude and Angle Core function to find Magnitude and Angle of two Given Vector * Magnitude and Angle with Doctest added Doctest to the functions * Update linear_algebra/src/lib.py Co-authored-by: Christian Clauss * Update linear_algebra/src/lib.py Co-authored-by: Christian Clauss * Changes done and Magnitude and Angle Issues * black Co-authored-by: Christian Clauss --- linear_algebra/src/lib.py | 30 ++++++++++++++++++++++++++++++ 1 file changed, 30 insertions(+) diff --git a/linear_algebra/src/lib.py b/linear_algebra/src/lib.py index 74aeb9137666..6a18df5e15c3 100644 --- a/linear_algebra/src/lib.py +++ b/linear_algebra/src/lib.py @@ -148,6 +148,36 @@ def __mul__(self, other: float | Vector) -> float | Vector: else: # error case raise Exception("invalid operand!") + def magnitude(self) -> float: + """ + Magnitude of a Vector + + >>> Vector([2, 3, 4]).magnitude() + 5.385164807134504 + + """ + return sum([i ** 2 for i in self.__components]) ** (1 / 2) + + def angle(self, other: Vector, deg: bool = False) -> float: + """ + find angle between two Vector (self, Vector) + + >>> Vector([3, 4, -1]).angle(Vector([2, -1, 1])) + 1.4906464636572374 + >>> Vector([3, 4, -1]).angle(Vector([2, -1, 1]), deg = True) + 85.40775111366095 + >>> Vector([3, 4, -1]).angle(Vector([2, -1])) + Traceback (most recent call last): + ... + Exception: invalid operand! + """ + num = self * other + den = self.magnitude() * other.magnitude() + if deg: + return math.degrees(math.acos(num / den)) + else: + return math.acos(num / den) + def copy(self) -> Vector: """ copies this vector and returns it. From 943e03fc545f91482dae08d7a2f1335d9d1faf17 Mon Sep 17 00:00:00 2001 From: Raj-Pansuriya <72313592+Raj-Pansuriya@users.noreply.github.com> Date: Thu, 14 Oct 2021 16:21:13 +0530 Subject: [PATCH 0239/1543] Added Optimal Merge Pattern Algorithm (#5274) * Minor changes due to precommit * Update optimal_merge_pattern.py Co-authored-by: Christian Clauss --- greedy_methods/optimal_merge_pattern.py | 56 +++++++++++++++++++++++++ 1 file changed, 56 insertions(+) create mode 100644 greedy_methods/optimal_merge_pattern.py diff --git a/greedy_methods/optimal_merge_pattern.py b/greedy_methods/optimal_merge_pattern.py new file mode 100644 index 000000000000..911e1966f3b9 --- /dev/null +++ b/greedy_methods/optimal_merge_pattern.py @@ -0,0 +1,56 @@ +""" +This is a pure Python implementation of the greedy-merge-sort algorithm +reference: https://www.geeksforgeeks.org/optimal-file-merge-patterns/ + +For doctests run following command: +python3 -m doctest -v greedy_merge_sort.py + +Objective +Merge a set of sorted files of different length into a single sorted file. +We need to find an optimal solution, where the resultant file +will be generated in minimum time. + +Approach +If the number of sorted files are given, there are many ways +to merge them into a single sorted file. +This merge can be performed pair wise. +To merge a m-record file and a n-record file requires possibly m+n record moves +the optimal choice being, +merge the two smallest files together at each step (greedy approach). +""" + + +def optimal_merge_pattern(files: list) -> float: + """Function to merge all the files with optimum cost + + Args: + files [list]: A list of sizes of different files to be merged + + Returns: + optimal_merge_cost [int]: Optimal cost to merge all those files + + Examples: + >>> optimal_merge_pattern([2, 3, 4]) + 14 + >>> optimal_merge_pattern([5, 10, 20, 30, 30]) + 205 + >>> optimal_merge_pattern([8, 8, 8, 8, 8]) + 96 + """ + optimal_merge_cost = 0 + while len(files) > 1: + temp = 0 + # Consider two files with minimum cost to be merged + for i in range(2): + min_index = files.index(min(files)) + temp += files[min_index] + files.pop(min_index) + files.append(temp) + optimal_merge_cost += temp + return optimal_merge_cost + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From 9b4cb05ee5a89609590785b683f81f42a77dadf1 Mon Sep 17 00:00:00 2001 From: Aman kanojiya <50018596+AMANKANOJIYA@users.noreply.github.com> Date: Thu, 14 Oct 2021 16:23:03 +0530 Subject: [PATCH 0240/1543] Modified Euler's Method (#5258) * Magnitude and Angle Core function to find Magnitude and Angle of two Given Vector * Magnitude and Angle with Doctest added Doctest to the functions * Update linear_algebra/src/lib.py Co-authored-by: Christian Clauss * Update linear_algebra/src/lib.py Co-authored-by: Christian Clauss * Changes done and Magnitude and Angle Issues * black * Modified Euler's Method Adding Modified Euler's method, which was the further change to a Euler method and known for better accuracy to the given value * Modified Euler's Method (changed the typing of function) Modified function is used for better accuracy * Link added Added link to an explanation as per Contributions Guidelines * Resolving Pre-Commit error * Pre-Commit Error Resolved * Pre-Commit Error import statement Change * Removed Import Math * import math built issue * adding space pre-commit error * statement sorter for doc Co-authored-by: Christian Clauss --- maths/euler_modified.py | 54 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 54 insertions(+) create mode 100644 maths/euler_modified.py diff --git a/maths/euler_modified.py b/maths/euler_modified.py new file mode 100644 index 000000000000..bf0c07c17f48 --- /dev/null +++ b/maths/euler_modified.py @@ -0,0 +1,54 @@ +from typing import Callable + +import numpy as np + + +def euler_modified( + ode_func: Callable, y0: float, x0: float, step_size: float, x_end: float +) -> np.array: + """ + Calculate solution at each step to an ODE using Euler's Modified Method + The Euler is straightforward to implement, but can't give accurate solutions. + So, they Proposed some changes to improve the accuracy + + https://en.wikipedia.org/wiki/Euler_method + + Arguments: + ode_func -- The ode as a function of x and y + y0 -- the initial value for y + x0 -- the initial value for x + stepsize -- the increment value for x + x_end -- the end value for x + + >>> # the exact solution is math.exp(x) + >>> def f1(x, y): + ... return -2*x*(y**2) + >>> y = euler_modified(f1, 1.0, 0.0, 0.2, 1.0) + >>> y[-1] + 0.503338255442106 + >>> import math + >>> def f2(x, y): + ... return -2*y + (x**3)*math.exp(-2*x) + >>> y = euler_modified(f2, 1.0, 0.0, 0.1, 0.3) + >>> y[-1] + 0.5525976431951775 + """ + N = int(np.ceil((x_end - x0) / step_size)) + y = np.zeros((N + 1,)) + y[0] = y0 + x = x0 + + for k in range(N): + y_get = y[k] + step_size * ode_func(x, y[k]) + y[k + 1] = y[k] + ( + (step_size / 2) * (ode_func(x, y[k]) + ode_func(x + step_size, y_get)) + ) + x += step_size + + return y + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From d561de0bd93a4aef31765ae84b97dafdb606a7e6 Mon Sep 17 00:00:00 2001 From: Rohan R Bharadwaj <89947037+Rohanrbharadwaj@users.noreply.github.com> Date: Thu, 14 Oct 2021 16:53:18 +0530 Subject: [PATCH 0241/1543] Add surface area of cone and cylinder and hemisphere (#5220) * Update area.py * Update area.py * Update area.py * Update area.py * Update area.py * Update area.py * Update area.py * Update area.py --- maths/area.py | 84 ++++++++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 83 insertions(+), 1 deletion(-) diff --git a/maths/area.py b/maths/area.py index 8689f323cc9a..13c05af5f68e 100644 --- a/maths/area.py +++ b/maths/area.py @@ -42,6 +42,85 @@ def surface_area_sphere(radius: float) -> float: return 4 * pi * radius ** 2 +def surface_area_hemisphere(radius: float) -> float: + """ + Calculate the Surface Area of a Hemisphere. + Formula: 3 * pi * r^2 + + >>> surface_area_hemisphere(5) + 235.61944901923448 + >>> surface_area_hemisphere(1) + 9.42477796076938 + >>> surface_area_hemisphere(0) + 0.0 + >>> surface_area_hemisphere(1.1) + 11.40398133253095 + >>> surface_area_hemisphere(-1) + Traceback (most recent call last): + ... + ValueError: surface_area_hemisphere() only accepts non-negative values + """ + if radius < 0: + raise ValueError("surface_area_hemisphere() only accepts non-negative values") + return 3 * pi * radius ** 2 + + +def surface_area_cone(radius: float, height: float) -> float: + """ + Calculate the Surface Area of a Cone. + Wikipedia reference: https://en.wikipedia.org/wiki/Cone + Formula: pi * r * (r + (h ** 2 + r ** 2) ** 0.5) + + >>> surface_area_cone(10, 24) + 1130.9733552923256 + >>> surface_area_cone(6, 8) + 301.59289474462014 + >>> surface_area_cone(-1, -2) + Traceback (most recent call last): + ... + ValueError: surface_area_cone() only accepts non-negative values + >>> surface_area_cone(1, -2) + Traceback (most recent call last): + ... + ValueError: surface_area_cone() only accepts non-negative values + >>> surface_area_cone(-1, 2) + Traceback (most recent call last): + ... + ValueError: surface_area_cone() only accepts non-negative values + """ + if radius < 0 or height < 0: + raise ValueError("surface_area_cone() only accepts non-negative values") + return pi * radius * (radius + (height ** 2 + radius ** 2) ** 0.5) + + +def surface_area_cylinder(radius: float, height: float) -> float: + """ + Calculate the Surface Area of a Cylinder. + Wikipedia reference: https://en.wikipedia.org/wiki/Cylinder + Formula: 2 * pi * r * (h + r) + + >>> surface_area_cylinder(7, 10) + 747.6990515543707 + >>> surface_area_cylinder(6, 8) + 527.7875658030853 + >>> surface_area_cylinder(-1, -2) + Traceback (most recent call last): + ... + ValueError: surface_area_cylinder() only accepts non-negative values + >>> surface_area_cylinder(1, -2) + Traceback (most recent call last): + ... + ValueError: surface_area_cylinder() only accepts non-negative values + >>> surface_area_cylinder(-1, 2) + Traceback (most recent call last): + ... + ValueError: surface_area_cylinder() only accepts non-negative values + """ + if radius < 0 or height < 0: + raise ValueError("surface_area_cylinder() only accepts non-negative values") + return 2 * pi * radius * (height + radius) + + def area_rectangle(length: float, width: float) -> float: """ Calculate the area of a rectangle. @@ -280,9 +359,12 @@ def area_rhombus(diagonal_1: float, diagonal_2: float) -> float: print(f"Triangle: {area_triangle(10, 10) = }") print(f"Triangle: {area_triangle_three_sides(5, 12, 13) = }") print(f"Parallelogram: {area_parallelogram(10, 20) = }") + print(f"Rhombus: {area_rhombus(10, 20) = }") print(f"Trapezium: {area_trapezium(10, 20, 30) = }") print(f"Circle: {area_circle(20) = }") print("\nSurface Areas of various geometric shapes: \n") print(f"Cube: {surface_area_cube(20) = }") print(f"Sphere: {surface_area_sphere(20) = }") - print(f"Rhombus: {area_rhombus(10, 20) = }") + print(f"Hemisphere: {surface_area_hemisphere(20) = }") + print(f"Cone: {surface_area_cone(10, 20) = }") + print(f"Cylinder: {surface_area_cylinder(10, 20) = }") From bb37ebbe50b7c7140b6efc52b95f3c32f230ea0a Mon Sep 17 00:00:00 2001 From: Rohan R Bharadwaj <89947037+Rohanrbharadwaj@users.noreply.github.com> Date: Thu, 14 Oct 2021 19:31:38 +0530 Subject: [PATCH 0242/1543] Create baconian_cipher.py (#5251) * Create baconian_cipher.py * Update baconian_cipher.py * Update baconian_cipher.py * Update baconian_cipher.py * Update baconian_cipher.py * Update baconian_cipher.py * Update baconian_cipher.py * Update baconian_cipher.py * Update baconian_cipher.py * Update baconian_cipher.py * Update baconian_cipher.py * Update baconian_cipher.py * Update baconian_cipher.py * Update baconian_cipher.py * Update baconian_cipher.py * Update baconian_cipher.py * Update baconian_cipher.py * Update baconian_cipher.py * Update baconian_cipher.py * Update baconian_cipher.py * Update baconian_cipher.py * Update baconian_cipher.py * Update baconian_cipher.py * Update baconian_cipher.py * Update baconian_cipher.py * Update baconian_cipher.py * Update baconian_cipher.py * Update baconian_cipher.py * Update baconian_cipher.py * Update baconian_cipher.py * Update baconian_cipher.py * Update baconian_cipher.py * Update baconian_cipher.py * Update baconian_cipher.py --- ciphers/baconian_cipher.py | 89 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 89 insertions(+) create mode 100644 ciphers/baconian_cipher.py diff --git a/ciphers/baconian_cipher.py b/ciphers/baconian_cipher.py new file mode 100644 index 000000000000..027fbc50e89d --- /dev/null +++ b/ciphers/baconian_cipher.py @@ -0,0 +1,89 @@ +""" +Program to encode and decode Baconian or Bacon's Cipher +Wikipedia reference : https://en.wikipedia.org/wiki/Bacon%27s_cipher +""" + +encode_dict = { + "a": "AAAAA", + "b": "AAAAB", + "c": "AAABA", + "d": "AAABB", + "e": "AABAA", + "f": "AABAB", + "g": "AABBA", + "h": "AABBB", + "i": "ABAAA", + "j": "BBBAA", + "k": "ABAAB", + "l": "ABABA", + "m": "ABABB", + "n": "ABBAA", + "o": "ABBAB", + "p": "ABBBA", + "q": "ABBBB", + "r": "BAAAA", + "s": "BAAAB", + "t": "BAABA", + "u": "BAABB", + "v": "BBBAB", + "w": "BABAA", + "x": "BABAB", + "y": "BABBA", + "z": "BABBB", + " ": " ", +} + + +decode_dict = {value: key for key, value in encode_dict.items()} + + +def encode(word: str) -> str: + """ + Encodes to Baconian cipher + + >>> encode("hello") + 'AABBBAABAAABABAABABAABBAB' + >>> encode("hello world") + 'AABBBAABAAABABAABABAABBAB BABAAABBABBAAAAABABAAAABB' + >>> encode("hello world!") + Traceback (most recent call last): + ... + Exception: encode() accepts only letters of the alphabet and spaces + """ + encoded = "" + for letter in word.lower(): + if letter.isalpha() or letter == " ": + encoded += encode_dict[letter] + else: + raise Exception("encode() accepts only letters of the alphabet and spaces") + return encoded + + +def decode(coded: str) -> str: + """ + Decodes from Baconian cipher + + >>> decode("AABBBAABAAABABAABABAABBAB BABAAABBABBAAAAABABAAAABB") + 'hello world' + >>> decode("AABBBAABAAABABAABABAABBAB") + 'hello' + >>> decode("AABBBAABAAABABAABABAABBAB BABAAABBABBAAAAABABAAAABB!") + Traceback (most recent call last): + ... + Exception: decode() accepts only 'A', 'B' and spaces + """ + if set(coded) - {"A", "B", " "} != set(): + raise Exception("decode() accepts only 'A', 'B' and spaces") + decoded = "" + for word in coded.split(): + while len(word) != 0: + decoded += decode_dict[word[:5]] + word = word[5:] + decoded += " " + return decoded.strip() + + +if "__name__" == "__main__": + from doctest import testmod + + testmod() From 618f9ca885a6f4e0c2f7dfcf1768ef7b0f717ba6 Mon Sep 17 00:00:00 2001 From: Jordan Rinder Date: Thu, 14 Oct 2021 10:30:52 -0400 Subject: [PATCH 0243/1543] Add Proth number to maths (#5246) * Add Proth number to maths * Add test for 0 and more informative output * Fixing test failure issue - unused variable * Update proth_number.py Co-authored-by: Christian Clauss --- maths/proth_number.py | 77 +++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 77 insertions(+) create mode 100644 maths/proth_number.py diff --git a/maths/proth_number.py b/maths/proth_number.py new file mode 100644 index 000000000000..065244ed7607 --- /dev/null +++ b/maths/proth_number.py @@ -0,0 +1,77 @@ +""" +Calculate the nth Proth number + +Source: + https://handwiki.org/wiki/Proth_number +""" + +import math + + +def proth(number: int) -> int: + """ + :param number: nth number to calculate in the sequence + :return: the nth number in Proth number + + Note: indexing starts at 1 i.e. proth(1) gives the first Proth number of 3 + + >>> proth(6) + 25 + + >>> proth(0) + Traceback (most recent call last): + ... + ValueError: Input value of [number=0] must be > 0 + + >>> proth(-1) + Traceback (most recent call last): + ... + ValueError: Input value of [number=-1] must be > 0 + + >>> proth(6.0) + Traceback (most recent call last): + ... + TypeError: Input value of [number=6.0] must be an integer + """ + + if not isinstance(number, int): + raise TypeError(f"Input value of [number={number}] must be an integer") + + if number < 1: + raise ValueError(f"Input value of [number={number}] must be > 0") + elif number == 1: + return 3 + elif number == 2: + return 5 + else: + block_index = number // 3 + """ + +1 for binary starting at 0 i.e. 2^0, 2^1, etc. + +1 to start the sequence at the 3rd Proth number + Hence, we have a +2 in the below statement + """ + block_index = math.log(block_index, 2) + 2 + block_index = int(block_index) + + proth_list = [3, 5] + proth_index = 2 + increment = 3 + for block in range(1, block_index): + for move in range(increment): + proth_list.append(2 ** (block + 1) + proth_list[proth_index - 1]) + proth_index += 1 + increment *= 2 + + return proth_list[number - 1] + + +if __name__ == "__main__": + for number in range(11): + value = 0 + try: + value = proth(number) + except ValueError: + print(f"ValueError: there is no {number}th Proth number") + continue + + print(f"The {number}th Proth number: {value}") From ca842b4add2a81c6d50e4fba2bf33ad07f54dbca Mon Sep 17 00:00:00 2001 From: Christian Clauss Date: Thu, 14 Oct 2021 18:19:47 +0200 Subject: [PATCH 0244/1543] It is OK to test ./scripts (#5290) * It is OK to test ./scripts * updating DIRECTORY.md * Update build.yml Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- .github/workflows/build.yml | 2 +- DIRECTORY.md | 5 +++++ 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 7c2255275091..f710e1e0ed54 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -23,6 +23,6 @@ jobs: python -m pip install mypy pytest-cov -r requirements.txt - run: mypy --install-types --non-interactive . - name: Run tests - run: pytest --doctest-modules --ignore=project_euler/ --ignore=scripts/ --cov-report=term-missing:skip-covered --cov=. . + run: pytest --doctest-modules --ignore=project_euler/ --ignore=scripts/validate_solutions.py --cov-report=term-missing:skip-covered --cov=. . - if: ${{ success() }} run: scripts/build_directory_md.py 2>&1 | tee DIRECTORY.md diff --git a/DIRECTORY.md b/DIRECTORY.md index 6c227fd2b5ad..d92dccca5afb 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -264,6 +264,7 @@ * [Sum Of Subset](https://github.com/TheAlgorithms/Python/blob/master/dynamic_programming/sum_of_subset.py) ## Electronics + * [Carrier Concentration](https://github.com/TheAlgorithms/Python/blob/master/electronics/carrier_concentration.py) * [Electric Power](https://github.com/TheAlgorithms/Python/blob/master/electronics/electric_power.py) * [Ohms Law](https://github.com/TheAlgorithms/Python/blob/master/electronics/ohms_law.py) @@ -346,6 +347,9 @@ * [Test Min Spanning Tree Kruskal](https://github.com/TheAlgorithms/Python/blob/master/graphs/tests/test_min_spanning_tree_kruskal.py) * [Test Min Spanning Tree Prim](https://github.com/TheAlgorithms/Python/blob/master/graphs/tests/test_min_spanning_tree_prim.py) +## Greedy Methods + * [Optimal Merge Pattern](https://github.com/TheAlgorithms/Python/blob/master/greedy_methods/optimal_merge_pattern.py) + ## Hashes * [Adler32](https://github.com/TheAlgorithms/Python/blob/master/hashes/adler32.py) * [Chaos Machine](https://github.com/TheAlgorithms/Python/blob/master/hashes/chaos_machine.py) @@ -436,6 +440,7 @@ * [Euclidean Distance](https://github.com/TheAlgorithms/Python/blob/master/maths/euclidean_distance.py) * [Euclidean Gcd](https://github.com/TheAlgorithms/Python/blob/master/maths/euclidean_gcd.py) * [Euler Method](https://github.com/TheAlgorithms/Python/blob/master/maths/euler_method.py) + * [Euler Modified](https://github.com/TheAlgorithms/Python/blob/master/maths/euler_modified.py) * [Eulers Totient](https://github.com/TheAlgorithms/Python/blob/master/maths/eulers_totient.py) * [Extended Euclidean Algorithm](https://github.com/TheAlgorithms/Python/blob/master/maths/extended_euclidean_algorithm.py) * [Factorial Iterative](https://github.com/TheAlgorithms/Python/blob/master/maths/factorial_iterative.py) From 545fec7a1446348d117f8c840cb4e334a67e25da Mon Sep 17 00:00:00 2001 From: Rohan R Bharadwaj <89947037+Rohanrbharadwaj@users.noreply.github.com> Date: Fri, 15 Oct 2021 16:03:39 +0530 Subject: [PATCH 0245/1543] Fix documentation (#5311) --- maths/euler_modified.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/maths/euler_modified.py b/maths/euler_modified.py index bf0c07c17f48..7c76a0ee0b86 100644 --- a/maths/euler_modified.py +++ b/maths/euler_modified.py @@ -8,8 +8,8 @@ def euler_modified( ) -> np.array: """ Calculate solution at each step to an ODE using Euler's Modified Method - The Euler is straightforward to implement, but can't give accurate solutions. - So, they Proposed some changes to improve the accuracy + The Euler Method is straightforward to implement, but can't give accurate solutions. + So, some changes were proposed to improve accuracy. https://en.wikipedia.org/wiki/Euler_method From 908cb4f1e72e16726ef5ca8365b36d473fcf2e00 Mon Sep 17 00:00:00 2001 From: Manuel Di Lullo <39048927+manueldilullo@users.noreply.github.com> Date: Fri, 15 Oct 2021 15:04:38 +0200 Subject: [PATCH 0246/1543] Greedy min vertex cover hacktoberfest (#5241) * added complete graph generator function * added doctest, type hints, wikipedia explanation * added return type hint for function complete_graph * added descriptive name for the parameter: n * random graph generator with doctest and type hints * added Greedy min vertex algorithm * pre-commit hook(s) made changes * Delete complete_graph_generator.py * Delete random_graph_generator.py * fixed doctest * updated commit following highligths * fixed following pre-commit highlights * modified variables names --- graphs/greedy_min_vertex_cover.py | 65 +++++++++++++++++++++++++++++++ 1 file changed, 65 insertions(+) create mode 100644 graphs/greedy_min_vertex_cover.py diff --git a/graphs/greedy_min_vertex_cover.py b/graphs/greedy_min_vertex_cover.py new file mode 100644 index 000000000000..056c5b89bedf --- /dev/null +++ b/graphs/greedy_min_vertex_cover.py @@ -0,0 +1,65 @@ +""" +* Author: Manuel Di Lullo (https://github.com/manueldilullo) +* Description: Approximization algorithm for minimum vertex cover problem. + Greedy Approach. Uses graphs represented with an adjacency list + +URL: https://mathworld.wolfram.com/MinimumVertexCover.html +URL: https://cs.stackexchange.com/questions/129017/greedy-algorithm-for-vertex-cover +""" + +import heapq + + +def greedy_min_vertex_cover(graph: dict) -> set: + """ + Greedy APX Algorithm for min Vertex Cover + @input: graph (graph stored in an adjacency list where each vertex + is represented with an integer) + @example: + >>> graph = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]} + >>> greedy_min_vertex_cover(graph) + {0, 1, 2, 4} + """ + # queue used to store nodes and their rank + queue = [] + + # for each node and his adjacency list add them and the rank of the node to queue + # using heapq module the queue will be filled like a Priority Queue + # heapq works with a min priority queue, so I used -1*len(v) to build it + for key, value in graph.items(): + # O(log(n)) + heapq.heappush(queue, [-1 * len(value), (key, value)]) + + # chosen_vertices = set of chosen vertices + chosen_vertices = set() + + # while queue isn't empty and there are still edges + # (queue[0][0] is the rank of the node with max rank) + while queue and queue[0][0] != 0: + # extract vertex with max rank from queue and add it to chosen_vertices + argmax = heapq.heappop(queue)[1][0] + chosen_vertices.add(argmax) + + # Remove all arcs adjacent to argmax + for elem in queue: + # if v haven't adjacent node, skip + if elem[0] == 0: + continue + # if argmax is reachable from elem + # remove argmax from elem's adjacent list and update his rank + if argmax in elem[1][1]: + index = elem[1][1].index(argmax) + del elem[1][1][index] + elem[0] += 1 + # re-order the queue + heapq.heapify(queue) + return chosen_vertices + + +if __name__ == "__main__": + import doctest + + doctest.testmod() + + # graph = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]} + # print(f"Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}") From 1d457be29d0ac55ebafe049a9a13f9b9c09f1380 Mon Sep 17 00:00:00 2001 From: Manuel Di Lullo <39048927+manueldilullo@users.noreply.github.com> Date: Fri, 15 Oct 2021 17:03:57 +0200 Subject: [PATCH 0247/1543] Matching min vertex cover (#5326) * matching algorithm for min vertex cover problem * fixed hint on row 37 * changed variable names * provided doctest for get_edges function * Removed dict.keys() iteration * Update matching_min_vertex_cover.py Co-authored-by: Christian Clauss --- graphs/matching_min_vertex_cover.py | 62 +++++++++++++++++++++++++++++ 1 file changed, 62 insertions(+) create mode 100644 graphs/matching_min_vertex_cover.py diff --git a/graphs/matching_min_vertex_cover.py b/graphs/matching_min_vertex_cover.py new file mode 100644 index 000000000000..5ac944ec1a07 --- /dev/null +++ b/graphs/matching_min_vertex_cover.py @@ -0,0 +1,62 @@ +""" +* Author: Manuel Di Lullo (https://github.com/manueldilullo) +* Description: Approximization algorithm for minimum vertex cover problem. + Matching Approach. Uses graphs represented with an adjacency list + +URL: https://mathworld.wolfram.com/MinimumVertexCover.html +URL: https://www.princeton.edu/~aaa/Public/Teaching/ORF523/ORF523_Lec6.pdf +""" + + +def matching_min_vertex_cover(graph: dict) -> set: + """ + APX Algorithm for min Vertex Cover using Matching Approach + @input: graph (graph stored in an adjacency list where each vertex + is represented as an integer) + @example: + >>> graph = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]} + >>> matching_min_vertex_cover(graph) + {0, 1, 2, 4} + """ + # chosen_vertices = set of chosen vertices + chosen_vertices = set() + # edges = list of graph's edges + edges = get_edges(graph) + + # While there are still elements in edges list, take an arbitrary edge + # (from_node, to_node) and add his extremity to chosen_vertices and then + # remove all arcs adjacent to the from_node and to_node + while edges: + from_node, to_node = edges.pop() + chosen_vertices.add(from_node) + chosen_vertices.add(to_node) + for edge in edges.copy(): + if from_node in edge or to_node in edge: + edges.discard(edge) + return chosen_vertices + + +def get_edges(graph: dict) -> set: + """ + Return a set of couples that represents all of the edges. + @input: graph (graph stored in an adjacency list where each vertex is + represented as an integer) + @example: + >>> graph = {0: [1, 3], 1: [0, 3], 2: [0, 3], 3: [0, 1, 2]} + >>> get_edges(graph) + {(0, 1), (3, 1), (0, 3), (2, 0), (3, 0), (2, 3), (1, 0), (3, 2), (1, 3)} + """ + edges = set() + for from_node, to_nodes in graph.items(): + for to_node in to_nodes: + edges.add((from_node, to_node)) + return edges + + +if __name__ == "__main__": + import doctest + + doctest.testmod() + + # graph = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]} + # print(f"Matching vertex cover:\n{matching_min_vertex_cover(graph)}") From 4cf1aaeb967790b88c85d5c773538754ffd89a44 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Murilo=20Gon=C3=A7alves?= <38800183+murilo-goncalves@users.noreply.github.com> Date: Fri, 15 Oct 2021 18:57:41 -0300 Subject: [PATCH 0248/1543] Updated mypy.ini, removed ok folders that were excluded (#5331) --- mypy.ini | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mypy.ini b/mypy.ini index 9eec22e22717..ba552f878e30 100644 --- a/mypy.ini +++ b/mypy.ini @@ -2,4 +2,4 @@ ignore_missing_imports = True ; FIXME: #4052 fix mypy errors in the exclude directories and remove them below -exclude = (data_structures|dynamic_programming|graphs|maths|matrix|other|project_euler|searches|strings*)/$ +exclude = (data_structures|graphs|maths|matrix|other|searches)/$ From 152261765a93c2a12eb4af0abdd297652766d47d Mon Sep 17 00:00:00 2001 From: Appledora Date: Sat, 16 Oct 2021 06:02:44 +0600 Subject: [PATCH 0249/1543] Show images from google query (#4853) * Added new script to open the google image tab with a search query. * Added new script to open the google image tab with a search query. * Added new script to open the google image tab with a search query with doctests. * Fixed doctest error, removed print() from method, changed return type * Update web_programming/show_image_tab_from_google_query.py using iterators instead of lists Co-authored-by: Christian Clauss * Update web_programming/show_image_tab_from_google_query.py Improve readability by removing one-time used variable Co-authored-by: Christian Clauss * Update web_programming/show_image_tab_from_google_query.py Decreasing complication through standard practices. Co-authored-by: Christian Clauss * Update web_programming/show_image_tab_from_google_query.py Exception Handling Co-authored-by: Christian Clauss * changed complete method to download images from google search query * Update download_images_from_google_query.py * Delete show_image_tab_from_google_query.py Co-authored-by: Christian Clauss --- .../download_images_from_google_query.py | 99 +++++++++++++++++++ 1 file changed, 99 insertions(+) create mode 100644 web_programming/download_images_from_google_query.py diff --git a/web_programming/download_images_from_google_query.py b/web_programming/download_images_from_google_query.py new file mode 100644 index 000000000000..c26262788c4c --- /dev/null +++ b/web_programming/download_images_from_google_query.py @@ -0,0 +1,99 @@ +import json +import os +import re +import sys +import urllib.request + +import requests +from bs4 import BeautifulSoup + +headers = { + "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36" + " (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582" +} + + +def download_images_from_google_query(query: str = "dhaka", max_images: int = 5) -> int: + """Searches google using the provided query term and downloads the images in a folder. + + Args: + query : The image search term to be provided by the user. Defaults to + "dhaka". + image_numbers : [description]. Defaults to 5. + + Returns: + The number of images successfully downloaded. + + >>> download_images_from_google_query() + 5 + >>> download_images_from_google_query("potato") + 5 + """ + max_images = min(max_images, 50) # Prevent abuse! + params = { + "q": query, + "tbm": "isch", + "hl": "en", + "ijn": "0", + } + + html = requests.get("https://www.google.com/search", params=params, headers=headers) + soup = BeautifulSoup(html.text, "html.parser") + matched_images_data = "".join( + re.findall(r"AF_initDataCallback\(([^<]+)\);", str(soup.select("script"))) + ) + + matched_images_data_fix = json.dumps(matched_images_data) + matched_images_data_json = json.loads(matched_images_data_fix) + + matched_google_image_data = re.findall( + r"\[\"GRID_STATE0\",null,\[\[1,\[0,\".*?\",(.*),\"All\",", + matched_images_data_json, + ) + if not matched_google_image_data: + return 0 + + removed_matched_google_images_thumbnails = re.sub( + r"\[\"(https\:\/\/encrypted-tbn0\.gstatic\.com\/images\?.*?)\",\d+,\d+\]", + "", + str(matched_google_image_data), + ) + + matched_google_full_resolution_images = re.findall( + r"(?:'|,),\[\"(https:|http.*?)\",\d+,\d+\]", + removed_matched_google_images_thumbnails, + ) + for index, fixed_full_res_image in enumerate(matched_google_full_resolution_images): + if index >= max_images: + return index + original_size_img_not_fixed = bytes(fixed_full_res_image, "ascii").decode( + "unicode-escape" + ) + original_size_img = bytes(original_size_img_not_fixed, "ascii").decode( + "unicode-escape" + ) + opener = urllib.request.build_opener() + opener.addheaders = [ + ( + "User-Agent", + "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36" + " (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582", + ) + ] + urllib.request.install_opener(opener) + path_name = f"query_{query.replace(' ', '_')}" + if not os.path.exists(path_name): + os.makedirs(path_name) + urllib.request.urlretrieve( + original_size_img, f"{path_name}/original_size_img_{index}.jpg" + ) + return index + + +if __name__ == "__main__": + try: + image_count = download_images_from_google_query(sys.argv[1]) + print(f"{image_count} images were downloaded to disk.") + except IndexError: + print("Please provide a search term.") + raise From 37385883aaa140a505488d23fa37be4049768f2b Mon Sep 17 00:00:00 2001 From: Appledora Date: Sat, 16 Oct 2021 07:32:33 +0600 Subject: [PATCH 0250/1543] Improved readability of web_programming/get_imdbtop.py and added documentations with doctests (#4855) * improved readability of the existing method by reformatting, adding documentations with doctests. * improved readability of the existing method by reformatting, adding documentations with doctests. * fixed typo in test * added doctest to parse dictionary method * added doctest to parse dictionary method * Changed return type, removed print() from method and implemented doctests as suggested * Fixed doctest error, removed print() from method, created new script as suggested * Update get_imdbtop.py * Fix typo discovered by codespell * return () Co-authored-by: Christian Clauss --- web_programming/get_imdbtop.py | 57 +++++++++++++++++++++++++++------- 1 file changed, 45 insertions(+), 12 deletions(-) diff --git a/web_programming/get_imdbtop.py b/web_programming/get_imdbtop.py index 669e7f89824b..5f7105f83239 100644 --- a/web_programming/get_imdbtop.py +++ b/web_programming/get_imdbtop.py @@ -1,20 +1,53 @@ +import bs4 import requests -from bs4 import BeautifulSoup -def imdb_top(imdb_top_n): +def get_movie_data_from_soup(soup: bs4.element.ResultSet) -> dict[str, str]: + return { + "name": soup.h3.a.text, + "genre": soup.find("span", class_="genre").text.strip(), + "rating": soup.strong.text, + "page_link": f"https://www.imdb.com{soup.a.get('href')}", + } + + +def get_imdb_top_movies(num_movies: int = 5) -> tuple: + """Get the top num_movies most highly rated movies from IMDB and + return a tuple of dicts describing each movie's name, genre, rating, and URL. + + Args: + num_movies: The number of movies to get. Defaults to 5. + + Returns: + A list of tuples containing information about the top n movies. + + >>> len(get_imdb_top_movies(5)) + 5 + >>> len(get_imdb_top_movies(-3)) + 0 + >>> len(get_imdb_top_movies(4.99999)) + 4 + """ + num_movies = int(float(num_movies)) + if num_movies < 1: + return () base_url = ( - f"https://www.imdb.com/search/title?title_type=" - f"feature&sort=num_votes,desc&count={imdb_top_n}" + "https://www.imdb.com/search/title?title_type=" + f"feature&sort=num_votes,desc&count={num_movies}" + ) + source = bs4.BeautifulSoup(requests.get(base_url).content, "html.parser") + return tuple( + get_movie_data_from_soup(movie) + for movie in source.find_all("div", class_="lister-item mode-advanced") ) - source = BeautifulSoup(requests.get(base_url).content, "html.parser") - for m in source.findAll("div", class_="lister-item mode-advanced"): - print("\n" + m.h3.a.text) # movie's name - print(m.find("span", attrs={"class": "genre"}).text) # genre - print(m.strong.text) # movie's rating - print(f"https://www.imdb.com{m.a.get('href')}") # movie's page link - print("*" * 40) if __name__ == "__main__": - imdb_top(input("How many movies would you like to see? ")) + import json + + num_movies = int(input("How many movies would you like to see? ")) + print( + ", ".join( + json.dumps(movie, indent=4) for movie in get_imdb_top_movies(num_movies) + ) + ) From 433b804f7d1a19403cb0856232f0d23f09d4b4a0 Mon Sep 17 00:00:00 2001 From: Saurabh Suresh Powar <66636289+Spnetic-5@users.noreply.github.com> Date: Sat, 16 Oct 2021 20:02:40 +0530 Subject: [PATCH 0251/1543] Added morphological operations, fixes: #5197 (#5199) * Added morphological operations, fixes: #5197 * Added dilation tests and type hints * Added erosion tests and type hints * fixes: TheAlgorithms#5197 * fixes: TheAlgorithms#5197 * Update erosion_operation.py * made suggested changes in dilation * made suggested changes in erosion * made suggested changes in dilation * removed extra spaces in the tests * removed extra spaces in the tests --- .../dilation_operation.py | 74 +++++++++++++++++++ .../erosion_operation.py | 74 +++++++++++++++++++ 2 files changed, 148 insertions(+) create mode 100644 digital_image_processing/morphological_operations/dilation_operation.py create mode 100644 digital_image_processing/morphological_operations/erosion_operation.py diff --git a/digital_image_processing/morphological_operations/dilation_operation.py b/digital_image_processing/morphological_operations/dilation_operation.py new file mode 100644 index 000000000000..274880b0a50a --- /dev/null +++ b/digital_image_processing/morphological_operations/dilation_operation.py @@ -0,0 +1,74 @@ +import numpy as np +from PIL import Image + + +def rgb2gray(rgb: np.array) -> np.array: + """ + Return gray image from rgb image + >>> rgb2gray(np.array([[[127, 255, 0]]])) + array([[187.6453]]) + >>> rgb2gray(np.array([[[0, 0, 0]]])) + array([[0.]]) + >>> rgb2gray(np.array([[[2, 4, 1]]])) + array([[3.0598]]) + >>> rgb2gray(np.array([[[26, 255, 14], [5, 147, 20], [1, 200, 0]]])) + array([[159.0524, 90.0635, 117.6989]]) + """ + r, g, b = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2] + return 0.2989 * r + 0.5870 * g + 0.1140 * b + + +def gray2binary(gray: np.array) -> np.array: + """ + Return binary image from gray image + >>> gray2binary(np.array([[127, 255, 0]])) + array([[False, True, False]]) + >>> gray2binary(np.array([[0]])) + array([[False]]) + >>> gray2binary(np.array([[26.2409, 4.9315, 1.4729]])) + array([[False, False, False]]) + >>> gray2binary(np.array([[26, 255, 14], [5, 147, 20], [1, 200, 0]])) + array([[False, True, False], + [False, True, False], + [False, True, False]]) + """ + return (127 < gray) & (gray <= 255) + + +def dilation(image: np.array, kernel: np.array) -> np.array: + """ + Return dilated image + >>> dilation(np.array([[True, False, True]]), np.array([[0, 1, 0]])) + array([[False, False, False]]) + >>> dilation(np.array([[False, False, True]]), np.array([[1, 0, 1]])) + array([[False, False, False]]) + """ + output = np.zeros_like(image) + image_padded = np.zeros( + (image.shape[0] + kernel.shape[0] - 1, image.shape[1] + kernel.shape[1] - 1) + ) + + # Copy image to padded image + image_padded[kernel.shape[0] - 2 : -1 :, kernel.shape[1] - 2 : -1 :] = image + + # Iterate over image & apply kernel + for x in range(image.shape[1]): + for y in range(image.shape[0]): + summation = ( + kernel * image_padded[y : y + kernel.shape[0], x : x + kernel.shape[1]] + ).sum() + output[y, x] = int(summation > 0) + return output + + +# kernel to be applied +structuring_element = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]]) + + +if __name__ == "__main__": + # read original image + image = np.array(Image.open(r"..\image_data\lena.jpg")) + output = dilation(gray2binary(rgb2gray(image)), structuring_element) + # Save the output image + pil_img = Image.fromarray(output).convert("RGB") + pil_img.save("result_dilation.png") diff --git a/digital_image_processing/morphological_operations/erosion_operation.py b/digital_image_processing/morphological_operations/erosion_operation.py new file mode 100644 index 000000000000..4b0a5eee8c03 --- /dev/null +++ b/digital_image_processing/morphological_operations/erosion_operation.py @@ -0,0 +1,74 @@ +import numpy as np +from PIL import Image + + +def rgb2gray(rgb: np.array) -> np.array: + """ + Return gray image from rgb image + >>> rgb2gray(np.array([[[127, 255, 0]]])) + array([[187.6453]]) + >>> rgb2gray(np.array([[[0, 0, 0]]])) + array([[0.]]) + >>> rgb2gray(np.array([[[2, 4, 1]]])) + array([[3.0598]]) + >>> rgb2gray(np.array([[[26, 255, 14], [5, 147, 20], [1, 200, 0]]])) + array([[159.0524, 90.0635, 117.6989]]) + """ + r, g, b = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2] + return 0.2989 * r + 0.5870 * g + 0.1140 * b + + +def gray2binary(gray: np.array) -> np.array: + """ + Return binary image from gray image + >>> gray2binary(np.array([[127, 255, 0]])) + array([[False, True, False]]) + >>> gray2binary(np.array([[0]])) + array([[False]]) + >>> gray2binary(np.array([[26.2409, 4.9315, 1.4729]])) + array([[False, False, False]]) + >>> gray2binary(np.array([[26, 255, 14], [5, 147, 20], [1, 200, 0]])) + array([[False, True, False], + [False, True, False], + [False, True, False]]) + """ + return (127 < gray) & (gray <= 255) + + +def erosion(image: np.array, kernel: np.array) -> np.array: + """ + Return eroded image + >>> erosion(np.array([[True, True, False]]), np.array([[0, 1, 0]])) + array([[False, False, False]]) + >>> erosion(np.array([[True, False, False]]), np.array([[1, 1, 0]])) + array([[False, False, False]]) + """ + output = np.zeros_like(image) + image_padded = np.zeros( + (image.shape[0] + kernel.shape[0] - 1, image.shape[1] + kernel.shape[1] - 1) + ) + + # Copy image to padded image + image_padded[kernel.shape[0] - 2 : -1 :, kernel.shape[1] - 2 : -1 :] = image + + # Iterate over image & apply kernel + for x in range(image.shape[1]): + for y in range(image.shape[0]): + summation = ( + kernel * image_padded[y : y + kernel.shape[0], x : x + kernel.shape[1]] + ).sum() + output[y, x] = int(summation == 5) + return output + + +# kernel to be applied +structuring_element = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]]) + +if __name__ == "__main__": + # read original image + image = np.array(Image.open(r"..\image_data\lena.jpg")) + # Apply erosion operation to a binary image + output = erosion(gray2binary(rgb2gray(image)), structuring_element) + # Save the output image + pil_img = Image.fromarray(output).convert("RGB") + pil_img.save("result_erosion.png") From 8dc7cdbc57e0a51c7ef0d6a5974d990c5f27cf4a Mon Sep 17 00:00:00 2001 From: Christian Clauss Date: Sat, 16 Oct 2021 18:57:38 +0200 Subject: [PATCH 0252/1543] Add tests to morse_code.py (#5337) * Add tests to morse_code.py @dhruvmanila @poyea Your reviews, please. * updating DIRECTORY.md Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- DIRECTORY.md | 7 +- ciphers/morse_code.py | 58 +++++++++++++++++ ciphers/morse_code_implementation.py | 97 ---------------------------- 3 files changed, 64 insertions(+), 98 deletions(-) create mode 100644 ciphers/morse_code.py delete mode 100644 ciphers/morse_code_implementation.py diff --git a/DIRECTORY.md b/DIRECTORY.md index d92dccca5afb..219877a3aee8 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -53,6 +53,7 @@ * [A1Z26](https://github.com/TheAlgorithms/Python/blob/master/ciphers/a1z26.py) * [Affine Cipher](https://github.com/TheAlgorithms/Python/blob/master/ciphers/affine_cipher.py) * [Atbash](https://github.com/TheAlgorithms/Python/blob/master/ciphers/atbash.py) + * [Baconian Cipher](https://github.com/TheAlgorithms/Python/blob/master/ciphers/baconian_cipher.py) * [Base16](https://github.com/TheAlgorithms/Python/blob/master/ciphers/base16.py) * [Base32](https://github.com/TheAlgorithms/Python/blob/master/ciphers/base32.py) * [Base64 Encoding](https://github.com/TheAlgorithms/Python/blob/master/ciphers/base64_encoding.py) @@ -70,7 +71,7 @@ * [Hill Cipher](https://github.com/TheAlgorithms/Python/blob/master/ciphers/hill_cipher.py) * [Mixed Keyword Cypher](https://github.com/TheAlgorithms/Python/blob/master/ciphers/mixed_keyword_cypher.py) * [Mono Alphabetic Ciphers](https://github.com/TheAlgorithms/Python/blob/master/ciphers/mono_alphabetic_ciphers.py) - * [Morse Code Implementation](https://github.com/TheAlgorithms/Python/blob/master/ciphers/morse_code_implementation.py) + * [Morse Code](https://github.com/TheAlgorithms/Python/blob/master/ciphers/morse_code.py) * [Onepad Cipher](https://github.com/TheAlgorithms/Python/blob/master/ciphers/onepad_cipher.py) * [Playfair Cipher](https://github.com/TheAlgorithms/Python/blob/master/ciphers/playfair_cipher.py) * [Porta Cipher](https://github.com/TheAlgorithms/Python/blob/master/ciphers/porta_cipher.py) @@ -328,10 +329,12 @@ * [Graph Matrix](https://github.com/TheAlgorithms/Python/blob/master/graphs/graph_matrix.py) * [Graphs Floyd Warshall](https://github.com/TheAlgorithms/Python/blob/master/graphs/graphs_floyd_warshall.py) * [Greedy Best First](https://github.com/TheAlgorithms/Python/blob/master/graphs/greedy_best_first.py) + * [Greedy Min Vertex Cover](https://github.com/TheAlgorithms/Python/blob/master/graphs/greedy_min_vertex_cover.py) * [Kahns Algorithm Long](https://github.com/TheAlgorithms/Python/blob/master/graphs/kahns_algorithm_long.py) * [Kahns Algorithm Topo](https://github.com/TheAlgorithms/Python/blob/master/graphs/kahns_algorithm_topo.py) * [Karger](https://github.com/TheAlgorithms/Python/blob/master/graphs/karger.py) * [Markov Chain](https://github.com/TheAlgorithms/Python/blob/master/graphs/markov_chain.py) + * [Matching Min Vertex Cover](https://github.com/TheAlgorithms/Python/blob/master/graphs/matching_min_vertex_cover.py) * [Minimum Spanning Tree Boruvka](https://github.com/TheAlgorithms/Python/blob/master/graphs/minimum_spanning_tree_boruvka.py) * [Minimum Spanning Tree Kruskal](https://github.com/TheAlgorithms/Python/blob/master/graphs/minimum_spanning_tree_kruskal.py) * [Minimum Spanning Tree Kruskal2](https://github.com/TheAlgorithms/Python/blob/master/graphs/minimum_spanning_tree_kruskal2.py) @@ -497,6 +500,7 @@ * [Prime Numbers](https://github.com/TheAlgorithms/Python/blob/master/maths/prime_numbers.py) * [Prime Sieve Eratosthenes](https://github.com/TheAlgorithms/Python/blob/master/maths/prime_sieve_eratosthenes.py) * [Primelib](https://github.com/TheAlgorithms/Python/blob/master/maths/primelib.py) + * [Proth Number](https://github.com/TheAlgorithms/Python/blob/master/maths/proth_number.py) * [Pythagoras](https://github.com/TheAlgorithms/Python/blob/master/maths/pythagoras.py) * [Qr Decomposition](https://github.com/TheAlgorithms/Python/blob/master/maths/qr_decomposition.py) * [Quadratic Equations Complex Numbers](https://github.com/TheAlgorithms/Python/blob/master/maths/quadratic_equations_complex_numbers.py) @@ -951,6 +955,7 @@ * [Current Stock Price](https://github.com/TheAlgorithms/Python/blob/master/web_programming/current_stock_price.py) * [Current Weather](https://github.com/TheAlgorithms/Python/blob/master/web_programming/current_weather.py) * [Daily Horoscope](https://github.com/TheAlgorithms/Python/blob/master/web_programming/daily_horoscope.py) + * [Download Images From Google Query](https://github.com/TheAlgorithms/Python/blob/master/web_programming/download_images_from_google_query.py) * [Emails From Url](https://github.com/TheAlgorithms/Python/blob/master/web_programming/emails_from_url.py) * [Fetch Bbc News](https://github.com/TheAlgorithms/Python/blob/master/web_programming/fetch_bbc_news.py) * [Fetch Github Info](https://github.com/TheAlgorithms/Python/blob/master/web_programming/fetch_github_info.py) diff --git a/ciphers/morse_code.py b/ciphers/morse_code.py new file mode 100644 index 000000000000..0370c26fe4a6 --- /dev/null +++ b/ciphers/morse_code.py @@ -0,0 +1,58 @@ +#!/usr/bin/env python3 + +""" +Python program to translate to and from Morse code. + +https://en.wikipedia.org/wiki/Morse_code +""" + +# fmt: off +MORSE_CODE_DICT = { + "A": ".-", "B": "-...", "C": "-.-.", "D": "-..", "E": ".", "F": "..-.", "G": "--.", + "H": "....", "I": "..", "J": ".---", "K": "-.-", "L": ".-..", "M": "--", "N": "-.", + "O": "---", "P": ".--.", "Q": "--.-", "R": ".-.", "S": "...", "T": "-", "U": "..-", + "V": "...-", "W": ".--", "X": "-..-", "Y": "-.--", "Z": "--..", "1": ".----", + "2": "..---", "3": "...--", "4": "....-", "5": ".....", "6": "-....", "7": "--...", + "8": "---..", "9": "----.", "0": "-----", "&": ".-...", "@": ".--.-.", + ":": "---...", ",": "--..--", ".": ".-.-.-", "'": ".----.", '"': ".-..-.", + "?": "..--..", "/": "-..-.", "=": "-...-", "+": ".-.-.", "-": "-....-", + "(": "-.--.", ")": "-.--.-", "!": "-.-.--", " ": "/" +} # Exclamation mark is not in ITU-R recommendation +# fmt: on +REVERSE_DICT = {value: key for key, value in MORSE_CODE_DICT.items()} + + +def encrypt(message: str) -> str: + """ + >>> encrypt("Sos!") + '... --- ... -.-.--' + >>> encrypt("SOS!") == encrypt("sos!") + True + """ + return " ".join(MORSE_CODE_DICT[char] for char in message.upper()) + + +def decrypt(message: str) -> str: + """ + >>> decrypt('... --- ... -.-.--') + 'SOS!' + """ + return "".join(REVERSE_DICT[char] for char in message.split()) + + +def main() -> None: + """ + >>> s = "".join(MORSE_CODE_DICT) + >>> decrypt(encrypt(s)) == s + True + """ + message = "Morse code here!" + print(message) + message = encrypt(message) + print(message) + message = decrypt(message) + print(message) + + +if __name__ == "__main__": + main() diff --git a/ciphers/morse_code_implementation.py b/ciphers/morse_code_implementation.py deleted file mode 100644 index eec4183fa56e..000000000000 --- a/ciphers/morse_code_implementation.py +++ /dev/null @@ -1,97 +0,0 @@ -# Python program to implement Morse Code Translator - -# Dictionary representing the morse code chart -MORSE_CODE_DICT = { - "A": ".-", - "B": "-...", - "C": "-.-.", - "D": "-..", - "E": ".", - "F": "..-.", - "G": "--.", - "H": "....", - "I": "..", - "J": ".---", - "K": "-.-", - "L": ".-..", - "M": "--", - "N": "-.", - "O": "---", - "P": ".--.", - "Q": "--.-", - "R": ".-.", - "S": "...", - "T": "-", - "U": "..-", - "V": "...-", - "W": ".--", - "X": "-..-", - "Y": "-.--", - "Z": "--..", - "1": ".----", - "2": "..---", - "3": "...--", - "4": "....-", - "5": ".....", - "6": "-....", - "7": "--...", - "8": "---..", - "9": "----.", - "0": "-----", - "&": ".-...", - "@": ".--.-.", - ":": "---...", - ",": "--..--", - ".": ".-.-.-", - "'": ".----.", - '"': ".-..-.", - "?": "..--..", - "/": "-..-.", - "=": "-...-", - "+": ".-.-.", - "-": "-....-", - "(": "-.--.", - ")": "-.--.-", - # Exclamation mark is not in ITU-R recommendation - "!": "-.-.--", -} - - -def encrypt(message: str) -> str: - cipher = "" - for letter in message: - if letter != " ": - cipher += MORSE_CODE_DICT[letter] + " " - else: - cipher += "/ " - - # Remove trailing space added on line 64 - return cipher[:-1] - - -def decrypt(message: str) -> str: - decipher = "" - letters = message.split(" ") - for letter in letters: - if letter != "/": - decipher += list(MORSE_CODE_DICT.keys())[ - list(MORSE_CODE_DICT.values()).index(letter) - ] - else: - decipher += " " - - return decipher - - -def main() -> None: - message = "Morse code here" - result = encrypt(message.upper()) - print(result) - - message = result - result = decrypt(message) - print(result) - - -if __name__ == "__main__": - main() From 7ef2e4d1d06a0d7497bb7cbca0cad32fc1d9c6cd Mon Sep 17 00:00:00 2001 From: Srishtik Bhandarkar <53395406+srishtik2310@users.noreply.github.com> Date: Sat, 16 Oct 2021 22:38:41 +0530 Subject: [PATCH 0253/1543] Add Project Euler Problem 092 (#5091) * adde solution to problem 092 * added solution to problem 092 * fixed the pre-comit shebang issue --- project_euler/problem_092/__init__.py | 0 project_euler/problem_092/sol1.py | 93 +++++++++++++++++++++++++++ 2 files changed, 93 insertions(+) create mode 100644 project_euler/problem_092/__init__.py create mode 100644 project_euler/problem_092/sol1.py diff --git a/project_euler/problem_092/__init__.py b/project_euler/problem_092/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/project_euler/problem_092/sol1.py b/project_euler/problem_092/sol1.py new file mode 100644 index 000000000000..a02629a7bc7d --- /dev/null +++ b/project_euler/problem_092/sol1.py @@ -0,0 +1,93 @@ +""" +Project Euler Problem 092: https://projecteuler.net/problem=92 +Square digit chains +A number chain is created by continuously adding the square of the digits in +a number to form a new number until it has been seen before. +For example, +44 → 32 → 13 → 10 → 1 → 1 +85 → 89 → 145 → 42 → 20 → 4 → 16 → 37 → 58 → 89 +Therefore any chain that arrives at 1 or 89 will become stuck in an endless loop. +What is most amazing is that EVERY starting number will eventually arrive at 1 or 89. +How many starting numbers below ten million will arrive at 89? +""" + + +def next_number(number: int) -> int: + """ + Returns the next number of the chain by adding the square of each digit + to form a neww number. + For example if number = 12, next_number() will return 1^2 + 2^2 = 5. + Therefore 5 is the next number of the chain. + >>> next_number(44) + 32 + >>> next_number(10) + 1 + >>> next_number(32) + 13 + """ + num = 0 + for i in range(len(str(number))): + num += int(str(number)[i]) ** 2 + + return num + + +def chain(number: int) -> bool: + """ + Generates the chain of numbers until the nest number generated is 1 0r 89. + for example, if starting number is 44, then the function generates the + following chain of numbers. + chain: 44 → 32 → 13 → 10 → 1 → 1 + once the next number generated is 1 or 89, the function + Returns True if the next number generated by next_number() if 1. + Returns False if the next number generated by next_number() is 89. + >>> chain(10) + True + >>> chain(58) + False + >>> chain(1) + True + """ + while number != 1 and number != 89: + number = next_number(number) + + if number == 1: + return True + + elif number == 89: + return False + + +def solution(number: int = 10000000) -> int: + """ + The function returns the total numbers that end up in 89 after the chain generation. + The function accepts a range number and the function checks all the values + under value number. + if the chain generation leads to the end number as 1 or 89. If the chain() + returns True, then total is incremented, implying that the number we + started with ended up with 1 else total2 is incremented, implying that + the number we started with ended up in 89 after chain generation. + But the function returns total2 as the requirement of question is + to find out how many ended up in 89. + + >>> solution(100) + 80 + >>> solution(10000000) + 8581146 + """ + total = 0 + total2 = 0 + for i in range(1, number): + val = chain(i) + + if val is True: + total += 1 + + elif val is False: + total2 += 1 + + return total2 + + +if __name__ == "__main__": + print(f"{solution() = }") From 4bf2eedd3c234eddd04fbea314005ca06d32e923 Mon Sep 17 00:00:00 2001 From: John Law Date: Sun, 17 Oct 2021 14:07:45 +0800 Subject: [PATCH 0254/1543] [mypy] fix mypy error in Project Euler Problem 092 solution 1 (#5357) * fix mypy error * updating DIRECTORY.md * simplify code * run black * fix doc consistency * Fix doc * fix doc Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- DIRECTORY.md | 5 +++ project_euler/problem_092/sol1.py | 53 +++++++++++-------------------- 2 files changed, 23 insertions(+), 35 deletions(-) diff --git a/DIRECTORY.md b/DIRECTORY.md index 219877a3aee8..c197dd88032e 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -211,6 +211,9 @@ * Histogram Equalization * [Histogram Stretch](https://github.com/TheAlgorithms/Python/blob/master/digital_image_processing/histogram_equalization/histogram_stretch.py) * [Index Calculation](https://github.com/TheAlgorithms/Python/blob/master/digital_image_processing/index_calculation.py) + * Morphological Operations + * [Dilation Operation](https://github.com/TheAlgorithms/Python/blob/master/digital_image_processing/morphological_operations/dilation_operation.py) + * [Erosion Operation](https://github.com/TheAlgorithms/Python/blob/master/digital_image_processing/morphological_operations/erosion_operation.py) * Resize * [Resize](https://github.com/TheAlgorithms/Python/blob/master/digital_image_processing/resize/resize.py) * Rotation @@ -778,6 +781,8 @@ * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_089/sol1.py) * Problem 091 * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_091/sol1.py) + * Problem 092 + * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_092/sol1.py) * Problem 097 * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_097/sol1.py) * Problem 099 diff --git a/project_euler/problem_092/sol1.py b/project_euler/problem_092/sol1.py index a02629a7bc7d..dcda3a48679e 100644 --- a/project_euler/problem_092/sol1.py +++ b/project_euler/problem_092/sol1.py @@ -17,7 +17,7 @@ def next_number(number: int) -> int: Returns the next number of the chain by adding the square of each digit to form a neww number. For example if number = 12, next_number() will return 1^2 + 2^2 = 5. - Therefore 5 is the next number of the chain. + Therefore, 5 is the next number of the chain. >>> next_number(44) 32 >>> next_number(10) @@ -25,22 +25,22 @@ def next_number(number: int) -> int: >>> next_number(32) 13 """ - num = 0 - for i in range(len(str(number))): - num += int(str(number)[i]) ** 2 + sum_of_digits_squared = 0 + while number: + sum_of_digits_squared += (number % 10) ** 2 + number //= 10 - return num + return sum_of_digits_squared def chain(number: int) -> bool: """ - Generates the chain of numbers until the nest number generated is 1 0r 89. - for example, if starting number is 44, then the function generates the - following chain of numbers. - chain: 44 → 32 → 13 → 10 → 1 → 1 - once the next number generated is 1 or 89, the function - Returns True if the next number generated by next_number() if 1. - Returns False if the next number generated by next_number() is 89. + The function generates the chain of numbers until the next number is 1 or 89. + For example, if starting number is 44, then the function generates the + following chain of numbers: + 44 → 32 → 13 → 10 → 1 → 1. + Once the next number generated is 1 or 89, the function returns whether + or not the the next number generated by next_number() is 1. >>> chain(10) True >>> chain(58) @@ -51,43 +51,26 @@ def chain(number: int) -> bool: while number != 1 and number != 89: number = next_number(number) - if number == 1: - return True - - elif number == 89: - return False + return number == 1 def solution(number: int = 10000000) -> int: """ - The function returns the total numbers that end up in 89 after the chain generation. + The function returns the number of integers that end up being 89 in each chain. The function accepts a range number and the function checks all the values under value number. - if the chain generation leads to the end number as 1 or 89. If the chain() - returns True, then total is incremented, implying that the number we - started with ended up with 1 else total2 is incremented, implying that - the number we started with ended up in 89 after chain generation. - But the function returns total2 as the requirement of question is - to find out how many ended up in 89. >>> solution(100) 80 >>> solution(10000000) 8581146 """ - total = 0 - total2 = 0 - for i in range(1, number): - val = chain(i) - - if val is True: - total += 1 + return sum(1 for i in range(1, number) if not chain(i)) - elif val is False: - total2 += 1 - return total2 +if __name__ == "__main__": + import doctest + doctest.testmod() -if __name__ == "__main__": print(f"{solution() = }") From 08d4d226d797c94254f53be0c71d3304ea093bd4 Mon Sep 17 00:00:00 2001 From: Meysam Date: Sun, 17 Oct 2021 19:26:12 +0300 Subject: [PATCH 0255/1543] [mypy] Fix type annotations for graphs/boruvka (#4867) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * fix: type annotations for pypi 🏷️ Fixes #4052 * updating DIRECTORY.md * apply suggestions from code review Co-authored-by: Christian Clauss Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> Co-authored-by: Christian Clauss --- graphs/boruvka.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/graphs/boruvka.py b/graphs/boruvka.py index 3fa5c6fd2a26..eea0b0009941 100644 --- a/graphs/boruvka.py +++ b/graphs/boruvka.py @@ -24,6 +24,7 @@ Details: https://en.wikipedia.org/wiki/Bor%C5%AFvka%27s_algorithm """ +from __future__ import annotations class Graph: @@ -39,8 +40,8 @@ def __init__(self, num_of_nodes: int) -> None: """ self.m_num_of_nodes = num_of_nodes - self.m_edges = [] - self.m_component = {} + self.m_edges: list[list[int]] = [] + self.m_component: dict[int, int] = {} def add_edge(self, u_node: int, v_node: int, weight: int) -> None: """Adds an edge in the format [first, second, edge weight] to graph.""" @@ -83,7 +84,7 @@ def boruvka(self) -> None: component_size = [] mst_weight = 0 - minimum_weight_edge = [-1] * self.m_num_of_nodes + minimum_weight_edge: list[int] = [-1] * self.m_num_of_nodes # A list of components (initialized to all of the nodes) for node in range(self.m_num_of_nodes): From 1e64bf4600e933e820701a769a453ee379c8ea2c Mon Sep 17 00:00:00 2001 From: Atharva Deshpande Date: Mon, 18 Oct 2021 10:16:23 +0530 Subject: [PATCH 0256/1543] Re-organize math/series (#5044) * added harmonic mean * Update maths/series/harmonic_mean.py Updated the write-up of reference given in the code. Co-authored-by: John Law * changes in arithmetic and geometric mean code * mean and series added in a single file Co-authored-by: John Law --- .../{arithmetic_mean.py => arithmetic.py} | 25 +++-- .../{geometric_mean.py => geometric.py} | 30 +++--- maths/series/harmonic.py | 92 +++++++++++++++++++ 3 files changed, 129 insertions(+), 18 deletions(-) rename maths/series/{arithmetic_mean.py => arithmetic.py} (68%) rename maths/series/{geometric_mean.py => geometric.py} (75%) create mode 100644 maths/series/harmonic.py diff --git a/maths/series/arithmetic_mean.py b/maths/series/arithmetic.py similarity index 68% rename from maths/series/arithmetic_mean.py rename to maths/series/arithmetic.py index b5d64b63ac3f..dc28c5c7bc5f 100644 --- a/maths/series/arithmetic_mean.py +++ b/maths/series/arithmetic.py @@ -1,20 +1,35 @@ """ -ARITHMETIC MEAN : https://en.wikipedia.org/wiki/Arithmetic_mean +Arithmetic mean +Reference: https://en.wikipedia.org/wiki/Arithmetic_mean +Arithmetic series +Reference: https://en.wikipedia.org/wiki/Arithmetic_series +(The URL above will redirect you to arithmetic progression) """ def is_arithmetic_series(series: list) -> bool: """ checking whether the input series is arithmetic series or not - >>> is_arithmetic_series([2, 4, 6]) True >>> is_arithmetic_series([3, 6, 12, 24]) False >>> is_arithmetic_series([1, 2, 3]) True + >>> is_arithmetic_series(4) + Traceback (most recent call last): + ... + ValueError: Input series is not valid, valid series - [2, 4, 6] + >>> is_arithmetic_series([]) + Traceback (most recent call last): + ... + ValueError: Input list must be a non empty list """ + if not isinstance(series, list): + raise ValueError("Input series is not valid, valid series - [2, 4, 6]") + if len(series) == 0: + raise ValueError("Input list must be a non empty list") if len(series) == 1: return True common_diff = series[1] - series[0] @@ -37,9 +52,7 @@ def arithmetic_mean(series: list) -> float: ... ValueError: Input series is not valid, valid series - [2, 4, 6] >>> arithmetic_mean([4, 8, 1]) - Traceback (most recent call last): - ... - ValueError: Input list is not an arithmetic series + 4.333333333333333 >>> arithmetic_mean([1, 2, 3]) 2.0 >>> arithmetic_mean([]) @@ -52,8 +65,6 @@ def arithmetic_mean(series: list) -> float: raise ValueError("Input series is not valid, valid series - [2, 4, 6]") if len(series) == 0: raise ValueError("Input list must be a non empty list") - if not is_arithmetic_series(series): - raise ValueError("Input list is not an arithmetic series") answer = 0 for val in series: answer += val diff --git a/maths/series/geometric_mean.py b/maths/series/geometric.py similarity index 75% rename from maths/series/geometric_mean.py rename to maths/series/geometric.py index 50ae54ad6574..7b6239b1585d 100644 --- a/maths/series/geometric_mean.py +++ b/maths/series/geometric.py @@ -1,12 +1,15 @@ """ -GEOMETRIC MEAN : https://en.wikipedia.org/wiki/Geometric_mean +Geometric Mean +Reference : https://en.wikipedia.org/wiki/Geometric_mean + +Geometric series +Reference: https://en.wikipedia.org/wiki/Geometric_series """ def is_geometric_series(series: list) -> bool: """ checking whether the input series is geometric series or not - >>> is_geometric_series([2, 4, 8]) True >>> is_geometric_series([3, 6, 12, 24]) @@ -15,8 +18,19 @@ def is_geometric_series(series: list) -> bool: False >>> is_geometric_series([0, 0, 3]) False - + >>> is_geometric_series([]) + Traceback (most recent call last): + ... + ValueError: Input list must be a non empty list + >>> is_geometric_series(4) + Traceback (most recent call last): + ... + ValueError: Input series is not valid, valid series - [2, 4, 8] """ + if not isinstance(series, list): + raise ValueError("Input series is not valid, valid series - [2, 4, 8]") + if len(series) == 0: + raise ValueError("Input list must be a non empty list") if len(series) == 1: return True try: @@ -44,13 +58,9 @@ def geometric_mean(series: list) -> float: ... ValueError: Input series is not valid, valid series - [2, 4, 8] >>> geometric_mean([1, 2, 3]) - Traceback (most recent call last): - ... - ValueError: Input list is not a geometric series + 1.8171205928321397 >>> geometric_mean([0, 2, 3]) - Traceback (most recent call last): - ... - ValueError: Input list is not a geometric series + 0.0 >>> geometric_mean([]) Traceback (most recent call last): ... @@ -61,8 +71,6 @@ def geometric_mean(series: list) -> float: raise ValueError("Input series is not valid, valid series - [2, 4, 8]") if len(series) == 0: raise ValueError("Input list must be a non empty list") - if not is_geometric_series(series): - raise ValueError("Input list is not a geometric series") answer = 1 for value in series: answer *= value diff --git a/maths/series/harmonic.py b/maths/series/harmonic.py new file mode 100644 index 000000000000..50f29c93dd5f --- /dev/null +++ b/maths/series/harmonic.py @@ -0,0 +1,92 @@ +""" +Harmonic mean +Reference: https://en.wikipedia.org/wiki/Harmonic_mean + +Harmonic series +Reference: https://en.wikipedia.org/wiki/Harmonic_series(mathematics) +""" + + +def is_harmonic_series(series: list) -> bool: + """ + checking whether the input series is arithmetic series or not + >>> is_harmonic_series([ 1, 2/3, 1/2, 2/5, 1/3]) + True + >>> is_harmonic_series([ 1, 2/3, 2/5, 1/3]) + False + >>> is_harmonic_series([1, 2, 3]) + False + >>> is_harmonic_series([1/2, 1/3, 1/4]) + True + >>> is_harmonic_series([2/5, 2/10, 2/15, 2/20, 2/25]) + True + >>> is_harmonic_series(4) + Traceback (most recent call last): + ... + ValueError: Input series is not valid, valid series - [1, 2/3, 2] + >>> is_harmonic_series([]) + Traceback (most recent call last): + ... + ValueError: Input list must be a non empty list + >>> is_harmonic_series([0]) + Traceback (most recent call last): + ... + ValueError: Input series cannot have 0 as an element + >>> is_harmonic_series([1,2,0,6]) + Traceback (most recent call last): + ... + ValueError: Input series cannot have 0 as an element + """ + if not isinstance(series, list): + raise ValueError("Input series is not valid, valid series - [1, 2/3, 2]") + if len(series) == 0: + raise ValueError("Input list must be a non empty list") + if len(series) == 1 and series[0] != 0: + return True + rec_series = [] + series_len = len(series) + for i in range(0, series_len): + if series[i] == 0: + raise ValueError("Input series cannot have 0 as an element") + rec_series.append(1 / series[i]) + common_diff = rec_series[1] - rec_series[0] + for index in range(2, series_len): + if rec_series[index] - rec_series[index - 1] != common_diff: + return False + return True + + +def harmonic_mean(series: list) -> float: + """ + return the harmonic mean of series + + >>> harmonic_mean([1, 4, 4]) + 2.0 + >>> harmonic_mean([3, 6, 9, 12]) + 5.759999999999999 + >>> harmonic_mean(4) + Traceback (most recent call last): + ... + ValueError: Input series is not valid, valid series - [2, 4, 6] + >>> harmonic_mean([1, 2, 3]) + 1.6363636363636365 + >>> harmonic_mean([]) + Traceback (most recent call last): + ... + ValueError: Input list must be a non empty list + + """ + if not isinstance(series, list): + raise ValueError("Input series is not valid, valid series - [2, 4, 6]") + if len(series) == 0: + raise ValueError("Input list must be a non empty list") + answer = 0 + for val in series: + answer += 1 / val + return len(series) / answer + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From 0935ab0cb2c3634b553406eefabb5f97011f2e84 Mon Sep 17 00:00:00 2001 From: Jaydeep Das Date: Mon, 18 Oct 2021 12:46:42 +0530 Subject: [PATCH 0257/1543] Added giphy.py to fetch gifs on a given topic (#5378) * Added giphy.py to fetch gifs on a given topic * Modified code [*]Added doctest [*]Formatted with black * Minor change * Minor refactoring to avoid name clash * Made necessary changes as per review * Update web_programming/giphy.py Co-authored-by: Christian Clauss * Apply suggestions from code review * Final cleanup * Placate psf/black Co-authored-by: Christian Clauss --- web_programming/giphy.py | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) create mode 100644 web_programming/giphy.py diff --git a/web_programming/giphy.py b/web_programming/giphy.py new file mode 100644 index 000000000000..dc8c6be08caa --- /dev/null +++ b/web_programming/giphy.py @@ -0,0 +1,19 @@ +#!/usr/bin/env python3 +import requests + +giphy_api_key = "YOUR API KEY" +# Can be fetched from https://developers.giphy.com/dashboard/ + + +def get_gifs(query: str, api_key: str = giphy_api_key) -> list: + """ + Get a list of URLs of GIFs based on a given query.. + """ + formatted_query = "+".join(query.split()) + url = f"http://api.giphy.com/v1/gifs/search?q={formatted_query}&api_key={api_key}" + gifs = requests.get(url).json()["data"] + return [gif["url"] for gif in gifs] + + +if __name__ == "__main__": + print("\n".join(get_gifs("space ship"))) From fa88559cab4aa2e935df97b8e2710b34402fc10f Mon Sep 17 00:00:00 2001 From: Rohan R Bharadwaj <89947037+Rohanrbharadwaj@users.noreply.github.com> Date: Mon, 18 Oct 2021 19:05:35 +0530 Subject: [PATCH 0258/1543] Create join.py (#5363) * Create join.py Because we have a split.py * Update join.py * Update join.py * Update join.py * Update join.py * Update join.py * Update strings/join.py Co-authored-by: John Law * Update join.py * Update join.py * Update join.py * Update join.py Co-authored-by: John Law --- strings/join.py | 32 ++++++++++++++++++++++++++++++++ 1 file changed, 32 insertions(+) create mode 100644 strings/join.py diff --git a/strings/join.py b/strings/join.py new file mode 100644 index 000000000000..0cb88b76065d --- /dev/null +++ b/strings/join.py @@ -0,0 +1,32 @@ +""" +Program to join a list of strings with a given separator +""" + + +def join(separator: str, separated: list) -> str: + """ + >>> join("", ["a", "b", "c", "d"]) + 'abcd' + >>> join("#", ["a", "b", "c", "d"]) + 'a#b#c#d' + >>> join("#", "a") + 'a' + >>> join(" ", ["You", "are", "amazing!"]) + 'You are amazing!' + >>> join("#", ["a", "b", "c", 1]) + Traceback (most recent call last): + ... + Exception: join() accepts only strings to be joined + """ + joined = "" + for word_or_phrase in separated: + if not isinstance(word_or_phrase, str): + raise Exception("join() accepts only strings to be joined") + joined += word_or_phrase + separator + return joined.strip(separator) + + +if "__name__" == "__main__": + from doctest import testmod + + testmod() From 4af521504227c4cada677538163033779cd4df07 Mon Sep 17 00:00:00 2001 From: iradonov <86876427+iradonov@users.noreply.github.com> Date: Mon, 18 Oct 2021 19:46:47 +0300 Subject: [PATCH 0259/1543] added Schur complement to linear algebra (#4793) * added schur complement and tests to linear algebra * updated according to checklist * updated variable names and typing * added two testcases for input validation * fixed import order Co-authored-by: Ivan Radonov --- linear_algebra/src/schur_complement.py | 94 ++++++++++++++++++++++++++ 1 file changed, 94 insertions(+) create mode 100644 linear_algebra/src/schur_complement.py diff --git a/linear_algebra/src/schur_complement.py b/linear_algebra/src/schur_complement.py new file mode 100644 index 000000000000..f3cb736d9084 --- /dev/null +++ b/linear_algebra/src/schur_complement.py @@ -0,0 +1,94 @@ +import unittest + +import numpy as np + + +def schur_complement( + mat_a: np.ndarray, + mat_b: np.ndarray, + mat_c: np.ndarray, + pseudo_inv: np.ndarray = None, +) -> np.ndarray: + """ + Schur complement of a symmetric matrix X given as a 2x2 block matrix + consisting of matrices A, B and C. + Matrix A must be quadratic and non-singular. + In case A is singular, a pseudo-inverse may be provided using + the pseudo_inv argument. + + Link to Wiki: https://en.wikipedia.org/wiki/Schur_complement + See also Convex Optimization – Boyd and Vandenberghe, A.5.5 + >>> import numpy as np + >>> a = np.array([[1, 2], [2, 1]]) + >>> b = np.array([[0, 3], [3, 0]]) + >>> c = np.array([[2, 1], [6, 3]]) + >>> schur_complement(a, b, c) + array([[ 5., -5.], + [ 0., 6.]]) + """ + shape_a = np.shape(mat_a) + shape_b = np.shape(mat_b) + shape_c = np.shape(mat_c) + + if shape_a[0] != shape_b[0]: + raise ValueError( + f"Expected the same number of rows for A and B. \ + Instead found A of size {shape_a} and B of size {shape_b}" + ) + + if shape_b[1] != shape_c[1]: + raise ValueError( + f"Expected the same number of columns for B and C. \ + Instead found B of size {shape_b} and C of size {shape_c}" + ) + + a_inv = pseudo_inv + if a_inv is None: + try: + a_inv = np.linalg.inv(mat_a) + except np.linalg.LinAlgError: + raise ValueError( + "Input matrix A is not invertible. Cannot compute Schur complement." + ) + + return mat_c - mat_b.T @ a_inv @ mat_b + + +class TestSchurComplement(unittest.TestCase): + def test_schur_complement(self) -> None: + a = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]]) + b = np.array([[0, 3], [3, 0], [2, 3]]) + c = np.array([[2, 1], [6, 3]]) + + s = schur_complement(a, b, c) + + input_matrix = np.block([[a, b], [b.T, c]]) + + det_x = np.linalg.det(input_matrix) + det_a = np.linalg.det(a) + det_s = np.linalg.det(s) + + self.assertAlmostEqual(det_x, det_a * det_s) + + def test_improper_a_b_dimensions(self) -> None: + a = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]]) + b = np.array([[0, 3], [3, 0], [2, 3]]) + c = np.array([[2, 1], [6, 3]]) + + with self.assertRaises(ValueError): + schur_complement(a, b, c) + + def test_improper_b_c_dimensions(self) -> None: + a = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]]) + b = np.array([[0, 3], [3, 0], [2, 3]]) + c = np.array([[2, 1, 3], [6, 3, 5]]) + + with self.assertRaises(ValueError): + schur_complement(a, b, c) + + +if __name__ == "__main__": + import doctest + + doctest.testmod() + unittest.main() From d28463c75df4dbaee795c99c05a4021d4cc5e386 Mon Sep 17 00:00:00 2001 From: Jainendra Mandavi Date: Mon, 18 Oct 2021 22:23:10 +0530 Subject: [PATCH 0260/1543] Create count_1s_brian_kernighan_method (#5385) Ref - http://graphics.stanford.edu/~seander/bithacks.html#CountBitsSetKernighan --- .../count_1s_brian_kernighan_method.py | 43 +++++++++++++++++++ 1 file changed, 43 insertions(+) create mode 100644 bit_manipulation/count_1s_brian_kernighan_method.py diff --git a/bit_manipulation/count_1s_brian_kernighan_method.py b/bit_manipulation/count_1s_brian_kernighan_method.py new file mode 100644 index 000000000000..d217af90b3d9 --- /dev/null +++ b/bit_manipulation/count_1s_brian_kernighan_method.py @@ -0,0 +1,43 @@ +def get_1s_count(number: int) -> int: + """ + Count the number of set bits in a 32 bit integer using Brian Kernighan's way. + Ref - http://graphics.stanford.edu/~seander/bithacks.html#CountBitsSetKernighan + >>> get_1s_count(25) + 3 + >>> get_1s_count(37) + 3 + >>> get_1s_count(21) + 3 + >>> get_1s_count(58) + 4 + >>> get_1s_count(0) + 0 + >>> get_1s_count(256) + 1 + >>> get_1s_count(-1) + Traceback (most recent call last): + ... + ValueError: the value of input must be positive + >>> get_1s_count(0.8) + Traceback (most recent call last): + ... + TypeError: Input value must be an 'int' type + """ + if number < 0: + raise ValueError("the value of input must be positive") + elif isinstance(number, float): + raise TypeError("Input value must be an 'int' type") + count = 0 + while number: + # This way we arrive at next set bit (next 1) instead of looping + # through each bit and checking for 1s hence the + # loop won't run 32 times it will only run the number of `1` times + number &= number - 1 + count += 1 + return count + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From 66c96aa0378eca8bc2c39ca6ae2204096df4c728 Mon Sep 17 00:00:00 2001 From: Sabari Ganesh <64348740+SabariGanesh-K@users.noreply.github.com> Date: Tue, 19 Oct 2021 10:56:03 +0530 Subject: [PATCH 0261/1543] Added length unit conversions (#5373) * Added length unit conversions Conversion of length units were added with respective tests being implemented and passed. Available Units:- Metre,Kilometre,Feet,Inch,Centimeter,Yard,Foot,Mile,Millimeter * Formatted File File was formatted to go as per repo rules * Reformatted file * Reformatted code once again * Added more test Added test to evaluate whether the code handles wrong arguements passed * Update length_conversions.py * Update length_conversions.py * Update length_conversions.py * Update length_conversions.py * Update length_conversions.py * Update length_conversions.py * Update length_conversions.py * Fixed Minor errors in test One of the test was failing and it was fixed Co-authored-by: Christian Clauss --- conversions/length_conversions.py | 108 ++++++++++++++++++++++++++++++ 1 file changed, 108 insertions(+) create mode 100644 conversions/length_conversions.py diff --git a/conversions/length_conversions.py b/conversions/length_conversions.py new file mode 100644 index 000000000000..811a9a916b70 --- /dev/null +++ b/conversions/length_conversions.py @@ -0,0 +1,108 @@ +""" +Conversion of length units. +Available Units:- Metre,Kilometre,Feet,Inch,Centimeter,Yard,Foot,Mile,Millimeter + +USAGE : +-> Import this file into their respective project. +-> Use the function length_conversion() for conversion of length units. +-> Parameters : + -> value : The number of from units you want to convert + -> from_type : From which type you want to convert + -> to_type : To which type you want to convert + +REFERENCES : +-> Wikipedia reference: https://en.wikipedia.org/wiki/Meter +-> Wikipedia reference: https://en.wikipedia.org/wiki/Kilometer +-> Wikipedia reference: https://en.wikipedia.org/wiki/Feet +-> Wikipedia reference: https://en.wikipedia.org/wiki/Inch +-> Wikipedia reference: https://en.wikipedia.org/wiki/Centimeter +-> Wikipedia reference: https://en.wikipedia.org/wiki/Yard +-> Wikipedia reference: https://en.wikipedia.org/wiki/Foot +-> Wikipedia reference: https://en.wikipedia.org/wiki/Mile +-> Wikipedia reference: https://en.wikipedia.org/wiki/Millimeter +""" + +from collections import namedtuple + +from_to = namedtuple("from_to", "from_ to") + +METRIC_CONVERSION = { + "meter": from_to(1, 1), + "kilometer": from_to(1000, 0.001), + "feet": from_to(0.3048, 3.28084), + "inch": from_to(0.0254, 39.3701), + "centimeter": from_to(0.01, 100), + "yard": from_to(0.9144, 1.09361), + "foot": from_to(0.3048, 3.28084), + "mile": from_to(1609.34, 0.000621371), + "millimeter": from_to(0.001, 1000), +} + + +def length_conversion(value: float, from_type: str, to_type: str) -> float: + """ + Conversion between length units. + + >>> length_conversion(4, "meter", "feet") + 13.12336 + >>> length_conversion(1, "meter", "kilometer") + 0.001 + >>> length_conversion(1, "kilometer", "inch") + 39370.1 + >>> length_conversion(3, "kilometer", "mile") + 1.8641130000000001 + >>> length_conversion(2, "feet", "meter") + 0.6096 + >>> length_conversion(4, "feet", "yard") + 1.333329312 + >>> length_conversion(1, "inch", "meter") + 0.0254 + >>> length_conversion(2, "inch", "mile") + 3.15656468e-05 + >>> length_conversion(2, "centimeter", "millimeter") + 20.0 + >>> length_conversion(2, "centimeter", "yard") + 0.0218722 + >>> length_conversion(4, "yard", "meter") + 3.6576 + >>> length_conversion(4, "yard", "kilometer") + 0.0036576 + >>> length_conversion(3, "foot", "meter") + 0.9144000000000001 + >>> length_conversion(3, "foot", "inch") + 36.00001944 + >>> length_conversion(4, "mile", "kilometer") + 6.43736 + >>> length_conversion(2, "mile", "inch") + 126719.753468 + >>> length_conversion(3, "millimeter", "centimeter") + 0.3 + >>> length_conversion(3, "millimeter", "inch") + 0.1181103 + >>> length_conversion(4, "wrongUnit", "inch") + Traceback (most recent call last): + File "/usr/lib/python3.8/doctest.py", line 1336, in __run + exec(compile(example.source, filename, "single", + File "", line 1, in + length_conversion(4, "wrongUnit", "inch") + File "", line 85, in length_conversion + ValueError: Invalid 'from_type' value: 'wrongUnit' Supported values are: + meter, kilometer, feet, inch, centimeter, yard, foot, mile, millimeter + """ + if from_type not in METRIC_CONVERSION: + raise ValueError( + f"Invalid 'from_type' value: {from_type!r} Supported values are:\n" + + ", ".join(METRIC_CONVERSION) + ) + if to_type not in METRIC_CONVERSION: + raise ValueError( + f"Invalid 'to_type' value: {to_type!r}. Supported values are:\n" + + ", ".join(METRIC_CONVERSION) + ) + return value * METRIC_CONVERSION[from_type].from_ * METRIC_CONVERSION[to_type].to + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From aa0ace4df7dfff3a70685466e96c51dd264e5083 Mon Sep 17 00:00:00 2001 From: Christian Clauss Date: Tue, 19 Oct 2021 08:05:20 +0200 Subject: [PATCH 0262/1543] Remove exception detail from doctest (#5430) * Remove exception detail from doctest These details are configuration dependant so should be removed according to https://docs.python.org/3/library/doctest.html * updating DIRECTORY.md Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- DIRECTORY.md | 10 ++++++++-- conversions/length_conversions.py | 6 +----- 2 files changed, 9 insertions(+), 7 deletions(-) diff --git a/DIRECTORY.md b/DIRECTORY.md index c197dd88032e..cf187a0db8a2 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -32,6 +32,7 @@ * [Binary Shifts](https://github.com/TheAlgorithms/Python/blob/master/bit_manipulation/binary_shifts.py) * [Binary Twos Complement](https://github.com/TheAlgorithms/Python/blob/master/bit_manipulation/binary_twos_complement.py) * [Binary Xor Operator](https://github.com/TheAlgorithms/Python/blob/master/bit_manipulation/binary_xor_operator.py) + * [Count 1S Brian Kernighan Method](https://github.com/TheAlgorithms/Python/blob/master/bit_manipulation/count_1s_brian_kernighan_method.py) * [Count Number Of One Bits](https://github.com/TheAlgorithms/Python/blob/master/bit_manipulation/count_number_of_one_bits.py) * [Reverse Bits](https://github.com/TheAlgorithms/Python/blob/master/bit_manipulation/reverse_bits.py) * [Single Bit Manipulation Operations](https://github.com/TheAlgorithms/Python/blob/master/bit_manipulation/single_bit_manipulation_operations.py) @@ -112,6 +113,7 @@ * [Decimal To Octal](https://github.com/TheAlgorithms/Python/blob/master/conversions/decimal_to_octal.py) * [Hex To Bin](https://github.com/TheAlgorithms/Python/blob/master/conversions/hex_to_bin.py) * [Hexadecimal To Decimal](https://github.com/TheAlgorithms/Python/blob/master/conversions/hexadecimal_to_decimal.py) + * [Length Conversions](https://github.com/TheAlgorithms/Python/blob/master/conversions/length_conversions.py) * [Molecular Chemistry](https://github.com/TheAlgorithms/Python/blob/master/conversions/molecular_chemistry.py) * [Octal To Decimal](https://github.com/TheAlgorithms/Python/blob/master/conversions/octal_to_decimal.py) * [Prefix Conversions](https://github.com/TheAlgorithms/Python/blob/master/conversions/prefix_conversions.py) @@ -381,6 +383,7 @@ * [Polynom For Points](https://github.com/TheAlgorithms/Python/blob/master/linear_algebra/src/polynom_for_points.py) * [Power Iteration](https://github.com/TheAlgorithms/Python/blob/master/linear_algebra/src/power_iteration.py) * [Rayleigh Quotient](https://github.com/TheAlgorithms/Python/blob/master/linear_algebra/src/rayleigh_quotient.py) + * [Schur Complement](https://github.com/TheAlgorithms/Python/blob/master/linear_algebra/src/schur_complement.py) * [Test Linear Algebra](https://github.com/TheAlgorithms/Python/blob/master/linear_algebra/src/test_linear_algebra.py) * [Transformations 2D](https://github.com/TheAlgorithms/Python/blob/master/linear_algebra/src/transformations_2d.py) @@ -513,9 +516,10 @@ * [Runge Kutta](https://github.com/TheAlgorithms/Python/blob/master/maths/runge_kutta.py) * [Segmented Sieve](https://github.com/TheAlgorithms/Python/blob/master/maths/segmented_sieve.py) * Series - * [Arithmetic Mean](https://github.com/TheAlgorithms/Python/blob/master/maths/series/arithmetic_mean.py) - * [Geometric Mean](https://github.com/TheAlgorithms/Python/blob/master/maths/series/geometric_mean.py) + * [Arithmetic](https://github.com/TheAlgorithms/Python/blob/master/maths/series/arithmetic.py) + * [Geometric](https://github.com/TheAlgorithms/Python/blob/master/maths/series/geometric.py) * [Geometric Series](https://github.com/TheAlgorithms/Python/blob/master/maths/series/geometric_series.py) + * [Harmonic](https://github.com/TheAlgorithms/Python/blob/master/maths/series/harmonic.py) * [Harmonic Series](https://github.com/TheAlgorithms/Python/blob/master/maths/series/harmonic_series.py) * [P Series](https://github.com/TheAlgorithms/Python/blob/master/maths/series/p_series.py) * [Sieve Of Eratosthenes](https://github.com/TheAlgorithms/Python/blob/master/maths/sieve_of_eratosthenes.py) @@ -933,6 +937,7 @@ * [Indian Phone Validator](https://github.com/TheAlgorithms/Python/blob/master/strings/indian_phone_validator.py) * [Is Palindrome](https://github.com/TheAlgorithms/Python/blob/master/strings/is_palindrome.py) * [Jaro Winkler](https://github.com/TheAlgorithms/Python/blob/master/strings/jaro_winkler.py) + * [Join](https://github.com/TheAlgorithms/Python/blob/master/strings/join.py) * [Knuth Morris Pratt](https://github.com/TheAlgorithms/Python/blob/master/strings/knuth_morris_pratt.py) * [Levenshtein Distance](https://github.com/TheAlgorithms/Python/blob/master/strings/levenshtein_distance.py) * [Lower](https://github.com/TheAlgorithms/Python/blob/master/strings/lower.py) @@ -967,6 +972,7 @@ * [Fetch Jobs](https://github.com/TheAlgorithms/Python/blob/master/web_programming/fetch_jobs.py) * [Get Imdb Top 250 Movies Csv](https://github.com/TheAlgorithms/Python/blob/master/web_programming/get_imdb_top_250_movies_csv.py) * [Get Imdbtop](https://github.com/TheAlgorithms/Python/blob/master/web_programming/get_imdbtop.py) + * [Giphy](https://github.com/TheAlgorithms/Python/blob/master/web_programming/giphy.py) * [Instagram Crawler](https://github.com/TheAlgorithms/Python/blob/master/web_programming/instagram_crawler.py) * [Instagram Pic](https://github.com/TheAlgorithms/Python/blob/master/web_programming/instagram_pic.py) * [Instagram Video](https://github.com/TheAlgorithms/Python/blob/master/web_programming/instagram_video.py) diff --git a/conversions/length_conversions.py b/conversions/length_conversions.py index 811a9a916b70..8d0d3a424260 100644 --- a/conversions/length_conversions.py +++ b/conversions/length_conversions.py @@ -81,11 +81,7 @@ def length_conversion(value: float, from_type: str, to_type: str) -> float: 0.1181103 >>> length_conversion(4, "wrongUnit", "inch") Traceback (most recent call last): - File "/usr/lib/python3.8/doctest.py", line 1336, in __run - exec(compile(example.source, filename, "single", - File "", line 1, in - length_conversion(4, "wrongUnit", "inch") - File "", line 85, in length_conversion + ... ValueError: Invalid 'from_type' value: 'wrongUnit' Supported values are: meter, kilometer, feet, inch, centimeter, yard, foot, mile, millimeter """ From 4880931c2451d00a6b9b830f84b23af847b276a0 Mon Sep 17 00:00:00 2001 From: Vinicius Cordeiro <78505368+cordeirossauro@users.noreply.github.com> Date: Tue, 19 Oct 2021 03:37:51 -0300 Subject: [PATCH 0263/1543] Add Polybius cipher (#5409) * Add polybius cipher * Fix polybius.py build issues and add test --- ciphers/polybius.py | 96 +++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 96 insertions(+) create mode 100644 ciphers/polybius.py diff --git a/ciphers/polybius.py b/ciphers/polybius.py new file mode 100644 index 000000000000..9e1dc4cbb5a8 --- /dev/null +++ b/ciphers/polybius.py @@ -0,0 +1,96 @@ +#!/usr/bin/env python3 + +""" +A Polybius Square is a table that allows someone to translate letters into numbers. + +https://www.braingle.com/brainteasers/codes/polybius.php +""" + +import numpy as np + + +class PolybiusCipher: + def __init__(self) -> None: + SQUARE = [ + ["a", "b", "c", "d", "e"], + ["f", "g", "h", "i", "k"], + ["l", "m", "n", "o", "p"], + ["q", "r", "s", "t", "u"], + ["v", "w", "x", "y", "z"], + ] + self.SQUARE = np.array(SQUARE) + + def letter_to_numbers(self, letter: str) -> np.ndarray: + """ + Return the pair of numbers that represents the given letter in the + polybius square + >>> np.array_equal(PolybiusCipher().letter_to_numbers('a'), [1,1]) + True + + >>> np.array_equal(PolybiusCipher().letter_to_numbers('u'), [4,5]) + True + """ + index1, index2 = np.where(self.SQUARE == letter) + indexes = np.concatenate([index1 + 1, index2 + 1]) + return indexes + + def numbers_to_letter(self, index1: int, index2: int) -> str: + """ + Return the letter corresponding to the position [index1, index2] in + the polybius square + + >>> PolybiusCipher().numbers_to_letter(4, 5) == "u" + True + + >>> PolybiusCipher().numbers_to_letter(1, 1) == "a" + True + """ + letter = self.SQUARE[index1 - 1, index2 - 1] + return letter + + def encode(self, message: str) -> str: + """ + Return the encoded version of message according to the polybius cipher + + >>> PolybiusCipher().encode("test message") == "44154344 32154343112215" + True + + >>> PolybiusCipher().encode("Test Message") == "44154344 32154343112215" + True + """ + message = message.lower() + message = message.replace("j", "i") + + encoded_message = "" + for letter_index in range(len(message)): + if message[letter_index] != " ": + numbers = self.letter_to_numbers(message[letter_index]) + encoded_message = encoded_message + str(numbers[0]) + str(numbers[1]) + elif message[letter_index] == " ": + encoded_message = encoded_message + " " + + return encoded_message + + def decode(self, message: str) -> str: + """ + Return the decoded version of message according to the polybius cipher + + >>> PolybiusCipher().decode("44154344 32154343112215") == "test message" + True + + >>> PolybiusCipher().decode("4415434432154343112215") == "testmessage" + True + """ + message = message.replace(" ", " ") + decoded_message = "" + for numbers_index in range(int(len(message) / 2)): + if message[numbers_index * 2] != " ": + index1 = message[numbers_index * 2] + index2 = message[numbers_index * 2 + 1] + + letter = self.numbers_to_letter(int(index1), int(index2)) + decoded_message = decoded_message + letter + elif message[numbers_index * 2] == " ": + decoded_message = decoded_message + " " + + return decoded_message From f7804334f15681510d4f3a008bafe742cb65d97f Mon Sep 17 00:00:00 2001 From: Christian Clauss Date: Tue, 19 Oct 2021 11:11:49 +0200 Subject: [PATCH 0264/1543] length_conversion.py: Deal with uppercase and abbreviations (#5433) * length_conversion.py: Deal with uppercase and abbreviations * updating DIRECTORY.md Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- DIRECTORY.md | 2 +- ...th_conversions.py => length_conversion.py} | 60 ++++++++++++------- 2 files changed, 40 insertions(+), 22 deletions(-) rename conversions/{length_conversions.py => length_conversion.py} (60%) diff --git a/DIRECTORY.md b/DIRECTORY.md index cf187a0db8a2..e3b40530a6b7 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -113,7 +113,7 @@ * [Decimal To Octal](https://github.com/TheAlgorithms/Python/blob/master/conversions/decimal_to_octal.py) * [Hex To Bin](https://github.com/TheAlgorithms/Python/blob/master/conversions/hex_to_bin.py) * [Hexadecimal To Decimal](https://github.com/TheAlgorithms/Python/blob/master/conversions/hexadecimal_to_decimal.py) - * [Length Conversions](https://github.com/TheAlgorithms/Python/blob/master/conversions/length_conversions.py) + * [Length Conversion](https://github.com/TheAlgorithms/Python/blob/master/conversions/length_conversion.py) * [Molecular Chemistry](https://github.com/TheAlgorithms/Python/blob/master/conversions/molecular_chemistry.py) * [Octal To Decimal](https://github.com/TheAlgorithms/Python/blob/master/conversions/octal_to_decimal.py) * [Prefix Conversions](https://github.com/TheAlgorithms/Python/blob/master/conversions/prefix_conversions.py) diff --git a/conversions/length_conversions.py b/conversions/length_conversion.py similarity index 60% rename from conversions/length_conversions.py rename to conversions/length_conversion.py index 8d0d3a424260..790d9c116845 100644 --- a/conversions/length_conversions.py +++ b/conversions/length_conversion.py @@ -26,16 +26,28 @@ from_to = namedtuple("from_to", "from_ to") +TYPE_CONVERSION = { + "millimeter": "mm", + "centimeter": "cm", + "meter": "m", + "kilometer": "km", + "inch": "in", + "inche": "in", # Trailing 's' has been stripped off + "feet": "ft", + "foot": "ft", + "yard": "yd", + "mile": "mi", +} + METRIC_CONVERSION = { - "meter": from_to(1, 1), - "kilometer": from_to(1000, 0.001), - "feet": from_to(0.3048, 3.28084), - "inch": from_to(0.0254, 39.3701), - "centimeter": from_to(0.01, 100), - "yard": from_to(0.9144, 1.09361), - "foot": from_to(0.3048, 3.28084), - "mile": from_to(1609.34, 0.000621371), - "millimeter": from_to(0.001, 1000), + "mm": from_to(0.001, 1000), + "cm": from_to(0.01, 100), + "m": from_to(1, 1), + "km": from_to(1000, 0.001), + "in": from_to(0.0254, 39.3701), + "ft": from_to(0.3048, 3.28084), + "yd": from_to(0.9144, 1.09361), + "mi": from_to(1609.34, 0.000621371), } @@ -43,7 +55,9 @@ def length_conversion(value: float, from_type: str, to_type: str) -> float: """ Conversion between length units. - >>> length_conversion(4, "meter", "feet") + >>> length_conversion(4, "METER", "FEET") + 13.12336 + >>> length_conversion(4, "M", "FT") 13.12336 >>> length_conversion(1, "meter", "kilometer") 0.001 @@ -73,29 +87,33 @@ def length_conversion(value: float, from_type: str, to_type: str) -> float: 36.00001944 >>> length_conversion(4, "mile", "kilometer") 6.43736 - >>> length_conversion(2, "mile", "inch") + >>> length_conversion(2, "miles", "InChEs") 126719.753468 >>> length_conversion(3, "millimeter", "centimeter") 0.3 - >>> length_conversion(3, "millimeter", "inch") + >>> length_conversion(3, "mm", "in") 0.1181103 >>> length_conversion(4, "wrongUnit", "inch") Traceback (most recent call last): ... - ValueError: Invalid 'from_type' value: 'wrongUnit' Supported values are: - meter, kilometer, feet, inch, centimeter, yard, foot, mile, millimeter + ValueError: Invalid 'from_type' value: 'wrongUnit'. + Conversion abbreviations are: mm, cm, m, km, in, ft, yd, mi """ - if from_type not in METRIC_CONVERSION: + new_from = from_type.lower().rstrip("s") + new_from = TYPE_CONVERSION.get(new_from, new_from) + new_to = to_type.lower().rstrip("s") + new_to = TYPE_CONVERSION.get(new_to, new_to) + if new_from not in METRIC_CONVERSION: raise ValueError( - f"Invalid 'from_type' value: {from_type!r} Supported values are:\n" - + ", ".join(METRIC_CONVERSION) + f"Invalid 'from_type' value: {from_type!r}.\n" + f"Conversion abbreviations are: {', '.join(METRIC_CONVERSION)}" ) - if to_type not in METRIC_CONVERSION: + if new_to not in METRIC_CONVERSION: raise ValueError( - f"Invalid 'to_type' value: {to_type!r}. Supported values are:\n" - + ", ".join(METRIC_CONVERSION) + f"Invalid 'to_type' value: {to_type!r}.\n" + f"Conversion abbreviations are: {', '.join(METRIC_CONVERSION)}" ) - return value * METRIC_CONVERSION[from_type].from_ * METRIC_CONVERSION[to_type].to + return value * METRIC_CONVERSION[new_from].from_ * METRIC_CONVERSION[new_to].to if __name__ == "__main__": From 21cf3cc2603de8598b717ef13a530f5fa12b9c47 Mon Sep 17 00:00:00 2001 From: Rohan R Bharadwaj <89947037+Rohanrbharadwaj@users.noreply.github.com> Date: Wed, 20 Oct 2021 01:06:01 +0530 Subject: [PATCH 0265/1543] Typo (#5443) --- CONTRIBUTING.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index e9cf0e6a18b7..4723c6c39a7d 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -13,7 +13,7 @@ We are very happy that you consider implementing algorithms and data structure f - You did your work - no plagiarism allowed - Any plagiarized work will not be merged. - Your work will be distributed under [MIT License](LICENSE.md) once your pull request is merged -- You submitted work fulfils or mostly fulfils our styles and standards +- Your submitted work fulfils or mostly fulfils our styles and standards __New implementation__ is welcome! For example, new solutions for a problem, different representations for a graph data structure or algorithm designs with different complexity but __identical implementation__ of an existing implementation is not allowed. Please check whether the solution is already implemented or not before submitting your pull request. From d32d0158a3b56a4a4ad7bcfdd66e5835f2d594c7 Mon Sep 17 00:00:00 2001 From: Alvin Philips Date: Wed, 20 Oct 2021 01:09:15 +0530 Subject: [PATCH 0266/1543] Fixed typo (#5439) Changed it's (it is) to its --- data_structures/binary_tree/binary_tree_mirror.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/data_structures/binary_tree/binary_tree_mirror.py b/data_structures/binary_tree/binary_tree_mirror.py index dc7f657b37c7..cdd56e35d765 100644 --- a/data_structures/binary_tree/binary_tree_mirror.py +++ b/data_structures/binary_tree/binary_tree_mirror.py @@ -1,6 +1,6 @@ """ Problem Description: -Given a binary tree, return it's mirror. +Given a binary tree, return its mirror. """ From c886a66d34b1bf8796d261c94f983a43453828d7 Mon Sep 17 00:00:00 2001 From: Snimerjot Singh Date: Wed, 20 Oct 2021 11:05:41 +0530 Subject: [PATCH 0267/1543] Added check_strong_password.py (#4950) * Added check_strong_password.py * Corrected Comment * Updated * Updated check_strong_password.py * Ran Pre-Commit --- other/check_strong_password.py | 47 ++++++++++++++++++++++++++++++++++ 1 file changed, 47 insertions(+) create mode 100644 other/check_strong_password.py diff --git a/other/check_strong_password.py b/other/check_strong_password.py new file mode 100644 index 000000000000..95bb327addf4 --- /dev/null +++ b/other/check_strong_password.py @@ -0,0 +1,47 @@ +# This Will Check Whether A Given Password Is Strong Or Not +# It Follows The Rule that Length Of Password Should Be At Least 8 Characters +# And At Least 1 Lower, 1 Upper, 1 Number And 1 Special Character + +from string import ascii_lowercase, ascii_uppercase, digits, punctuation + + +def strong_password_detector(password: str, min_length: int = 8) -> str: + """ + >>> strong_password_detector('Hwea7$2!') + 'This is a strong Password' + + >>> strong_password_detector('Sh0r1') + 'Your Password must be at least 8 characters long' + + >>> strong_password_detector('Hello123') + 'Password should contain UPPERCASE, lowercase, numbers, special characters' + + >>> strong_password_detector('Hello1238udfhiaf038fajdvjjf!jaiuFhkqi1') + 'This is a strong Password' + + >>> strong_password_detector(0) + 'Your Password must be at least 8 characters long' + """ + + if len(str(password)) < 8: + return "Your Password must be at least 8 characters long" + + upper = any(char in ascii_uppercase for char in password) + lower = any(char in ascii_lowercase for char in password) + num = any(char in digits for char in password) + spec_char = any(char in punctuation for char in password) + + if upper and lower and num and spec_char: + return "This is a strong Password" + + else: + return ( + "Password should contain UPPERCASE, lowercase, " + "numbers, special characters" + ) + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From 2e2e1b656cf12789c0cc93ad4d2a4771be86cbab Mon Sep 17 00:00:00 2001 From: Immiel Date: Wed, 20 Oct 2021 15:08:39 +0700 Subject: [PATCH 0268/1543] singly_linked_list: Added additional documentation, type hints and test cases (#4988) This is a followup to https://github.com/TheAlgorithms/Python/pull/4973#issuecomment-933117382 As per given suggestion, I've added type hints to certain methods that don't have them. I have also added documentation and example doctests as a usage example for (most of) those that don't have them. I have also added another test case following the previous test case's format. I noticed that the existing test case from previous pull request might be redundant with the ones I've made, so I decided to create a specific situation where the linked list would have to keep different kinds of data types for each node, in `test_singly_linked_list_2` test function. Some minor changes in strings has been done to keep things consistent with other parts of the document. If it is undesirable, please let me know. --- .../linked_list/singly_linked_list.py | 295 +++++++++++++++++- 1 file changed, 280 insertions(+), 15 deletions(-) diff --git a/data_structures/linked_list/singly_linked_list.py b/data_structures/linked_list/singly_linked_list.py index e45a210a1785..4a5dc8263f79 100644 --- a/data_structures/linked_list/singly_linked_list.py +++ b/data_structures/linked_list/singly_linked_list.py @@ -1,17 +1,53 @@ +from typing import Any + + class Node: - def __init__(self, data): + def __init__(self, data: Any): + """ + Create and initialize Node class instance. + >>> Node(20) + Node(20) + >>> Node("Hello, world!") + Node(Hello, world!) + >>> Node(None) + Node(None) + >>> Node(True) + Node(True) + """ self.data = data self.next = None - def __repr__(self): + def __repr__(self) -> str: + """ + Get the string representation of this node. + >>> Node(10).__repr__() + 'Node(10)' + """ return f"Node({self.data})" class LinkedList: def __init__(self): + """ + Create and initialize LinkedList class instance. + >>> linked_list = LinkedList() + """ self.head = None - def __iter__(self): + def __iter__(self) -> Any: + """ + This function is intended for iterators to access + and iterate through data inside linked list. + >>> linked_list = LinkedList() + >>> linked_list.insert_tail("tail") + >>> linked_list.insert_tail("tail_1") + >>> linked_list.insert_tail("tail_2") + >>> for node in linked_list: # __iter__ used here. + ... node + 'tail' + 'tail_1' + 'tail_2' + """ node = self.head while node: yield node.data @@ -23,7 +59,7 @@ def __len__(self) -> int: >>> linked_list = LinkedList() >>> len(linked_list) 0 - >>> linked_list.insert_tail("head") + >>> linked_list.insert_tail("tail") >>> len(linked_list) 1 >>> linked_list.insert_head("head") @@ -38,13 +74,18 @@ def __len__(self) -> int: """ return len(tuple(iter(self))) - def __repr__(self): + def __repr__(self) -> str: """ String representation/visualization of a Linked Lists + >>> linked_list = LinkedList() + >>> linked_list.insert_tail(1) + >>> linked_list.insert_tail(3) + >>> linked_list.__repr__() + '1->3' """ return "->".join([str(item) for item in self]) - def __getitem__(self, index): + def __getitem__(self, index: int) -> Any: """ Indexing Support. Used to get a node at particular position >>> linked_list = LinkedList() @@ -68,7 +109,7 @@ def __getitem__(self, index): return node # Used to change the data of a particular node - def __setitem__(self, index, data): + def __setitem__(self, index: int, data: Any) -> None: """ >>> linked_list = LinkedList() >>> for i in range(0, 10): @@ -95,13 +136,54 @@ def __setitem__(self, index, data): current = current.next current.data = data - def insert_tail(self, data) -> None: + def insert_tail(self, data: Any) -> None: + """ + Insert data to the end of linked list. + >>> linked_list = LinkedList() + >>> linked_list.insert_tail("tail") + >>> linked_list + tail + >>> linked_list.insert_tail("tail_2") + >>> linked_list + tail->tail_2 + >>> linked_list.insert_tail("tail_3") + >>> linked_list + tail->tail_2->tail_3 + """ self.insert_nth(len(self), data) - def insert_head(self, data) -> None: + def insert_head(self, data: Any) -> None: + """ + Insert data to the beginning of linked list. + >>> linked_list = LinkedList() + >>> linked_list.insert_head("head") + >>> linked_list + head + >>> linked_list.insert_head("head_2") + >>> linked_list + head_2->head + >>> linked_list.insert_head("head_3") + >>> linked_list + head_3->head_2->head + """ self.insert_nth(0, data) - def insert_nth(self, index: int, data) -> None: + def insert_nth(self, index: int, data: Any) -> None: + """ + Insert data at given index. + >>> linked_list = LinkedList() + >>> linked_list.insert_tail("first") + >>> linked_list.insert_tail("second") + >>> linked_list.insert_tail("third") + >>> linked_list + first->second->third + >>> linked_list.insert_nth(1, "fourth") + >>> linked_list + first->fourth->second->third + >>> linked_list.insert_nth(3, "fifth") + >>> linked_list + first->fourth->second->fifth->third + """ if not 0 <= index <= len(self): raise IndexError("list index out of range") new_node = Node(data) @@ -118,17 +200,96 @@ def insert_nth(self, index: int, data) -> None: temp.next = new_node def print_list(self) -> None: # print every node data + """ + This method prints every node data. + >>> linked_list = LinkedList() + >>> linked_list.insert_tail("first") + >>> linked_list.insert_tail("second") + >>> linked_list.insert_tail("third") + >>> linked_list + first->second->third + """ print(self) - def delete_head(self): + def delete_head(self) -> Any: + """ + Delete the first node and return the + node's data. + >>> linked_list = LinkedList() + >>> linked_list.insert_tail("first") + >>> linked_list.insert_tail("second") + >>> linked_list.insert_tail("third") + >>> linked_list + first->second->third + >>> linked_list.delete_head() + 'first' + >>> linked_list + second->third + >>> linked_list.delete_head() + 'second' + >>> linked_list + third + >>> linked_list.delete_head() + 'third' + >>> linked_list.delete_head() + Traceback (most recent call last): + ... + IndexError: List index out of range. + """ return self.delete_nth(0) - def delete_tail(self): # delete from tail + def delete_tail(self) -> Any: # delete from tail + """ + Delete the tail end node and return the + node's data. + >>> linked_list = LinkedList() + >>> linked_list.insert_tail("first") + >>> linked_list.insert_tail("second") + >>> linked_list.insert_tail("third") + >>> linked_list + first->second->third + >>> linked_list.delete_tail() + 'third' + >>> linked_list + first->second + >>> linked_list.delete_tail() + 'second' + >>> linked_list + first + >>> linked_list.delete_tail() + 'first' + >>> linked_list.delete_tail() + Traceback (most recent call last): + ... + IndexError: List index out of range. + """ return self.delete_nth(len(self) - 1) - def delete_nth(self, index: int = 0): + def delete_nth(self, index: int = 0) -> Any: + """ + Delete node at given index and return the + node's data. + >>> linked_list = LinkedList() + >>> linked_list.insert_tail("first") + >>> linked_list.insert_tail("second") + >>> linked_list.insert_tail("third") + >>> linked_list + first->second->third + >>> linked_list.delete_nth(1) # delete middle + 'second' + >>> linked_list + first->third + >>> linked_list.delete_nth(5) # this raises error + Traceback (most recent call last): + ... + IndexError: List index out of range. + >>> linked_list.delete_nth(-1) # this also raises error + Traceback (most recent call last): + ... + IndexError: List index out of range. + """ if not 0 <= index <= len(self) - 1: # test if index is valid - raise IndexError("list index out of range") + raise IndexError("List index out of range.") delete_node = self.head # default first node if index == 0: self.head = self.head.next @@ -141,9 +302,30 @@ def delete_nth(self, index: int = 0): return delete_node.data def is_empty(self) -> bool: + """ + Check if linked list is empty. + >>> linked_list = LinkedList() + >>> linked_list.is_empty() + True + >>> linked_list.insert_head("first") + >>> linked_list.is_empty() + False + """ return self.head is None - def reverse(self): + def reverse(self) -> None: + """ + This reverses the linked list order. + >>> linked_list = LinkedList() + >>> linked_list.insert_tail("first") + >>> linked_list.insert_tail("second") + >>> linked_list.insert_tail("third") + >>> linked_list + first->second->third + >>> linked_list.reverse() + >>> linked_list + third->second->first + """ prev = None current = self.head @@ -201,6 +383,89 @@ def test_singly_linked_list() -> None: linked_list[i] = -i assert all(linked_list[i] == -i for i in range(0, 9)) is True + linked_list.reverse() + assert str(linked_list) == "->".join(str(i) for i in range(-8, 1)) + + +def test_singly_linked_list_2() -> None: + """ + This section of the test used varying data types for input. + >>> test_singly_linked_list_2() + """ + input = [ + -9, + 100, + Node(77345112), + "dlrow olleH", + 7, + 5555, + 0, + -192.55555, + "Hello, world!", + 77.9, + Node(10), + None, + None, + 12.20, + ] + linked_list = LinkedList() + [linked_list.insert_tail(i) for i in input] + + # Check if it's empty or not + assert linked_list.is_empty() is False + assert ( + str(linked_list) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->" + "-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2" + ) + + # Delete the head + result = linked_list.delete_head() + assert result == -9 + assert ( + str(linked_list) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->" + "Hello, world!->77.9->Node(10)->None->None->12.2" + ) + + # Delete the tail + result = linked_list.delete_tail() + assert result == 12.2 + assert ( + str(linked_list) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->" + "Hello, world!->77.9->Node(10)->None->None" + ) + + # Delete a node in specific location in linked list + result = linked_list.delete_nth(10) + assert result is None + assert ( + str(linked_list) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->" + "Hello, world!->77.9->Node(10)->None" + ) + + # Add a Node instance to its head + linked_list.insert_head(Node("Hello again, world!")) + assert ( + str(linked_list) + == "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->" + "7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None" + ) + + # Add None to its tail + linked_list.insert_tail(None) + assert ( + str(linked_list) + == "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->" + "7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None" + ) + + # Reverse the linked list + linked_list.reverse() + assert ( + str(linked_list) + == "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->" + "7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)" + ) + def main(): from doctest import testmod From 83cf5786cddd694a2af25827f8861b7dbcbf706c Mon Sep 17 00:00:00 2001 From: P U N I T H <55887644+punithbajaj@users.noreply.github.com> Date: Wed, 20 Oct 2021 14:00:58 +0530 Subject: [PATCH 0269/1543] Add wildcard pattern matching using dynamic programming (#5334) * Added regular expression implimentation using dp * replaced input() with example values * Apply suggestions from code review Co-authored-by: Christian Clauss * changed returning value to bool and added test cases * added doctest Co-authored-by: John Law * added test cases * Apply suggestions from code review Co-authored-by: John Law * shifted to strings * Changed filename * Update function name to match_pattern Co-authored-by: John Law * Update function name to match_pattern Co-authored-by: John Law Co-authored-by: Christian Clauss Co-authored-by: John Law --- strings/wildcard_pattern_matching.py | 112 +++++++++++++++++++++++++++ 1 file changed, 112 insertions(+) create mode 100644 strings/wildcard_pattern_matching.py diff --git a/strings/wildcard_pattern_matching.py b/strings/wildcard_pattern_matching.py new file mode 100644 index 000000000000..83c8d834cca9 --- /dev/null +++ b/strings/wildcard_pattern_matching.py @@ -0,0 +1,112 @@ +""" +Implementation of regular expression matching with support for '.' and '*'. +'.' Matches any single character. +'*' Matches zero or more of the preceding element. +The matching should cover the entire input string (not partial). + +""" + + +def match_pattern(input_string: str, pattern: str) -> bool: + """ + uses bottom-up dynamic programming solution for matching the input + string with a given pattern. + + Runtime: O(len(input_string)*len(pattern)) + + Arguments + -------- + input_string: str, any string which should be compared with the pattern + pattern: str, the string that represents a pattern and may contain + '.' for single character matches and '*' for zero or more of preceding character + matches + + Note + ---- + the pattern cannot start with a '*', + because there should be at least one character before * + + Returns + ------- + A Boolean denoting whether the given string follows the pattern + + Examples + ------- + >>> match_pattern("aab", "c*a*b") + True + >>> match_pattern("dabc", "*abc") + False + >>> match_pattern("aaa", "aa") + False + >>> match_pattern("aaa", "a.a") + True + >>> match_pattern("aaab", "aa*") + False + >>> match_pattern("aaab", ".*") + True + >>> match_pattern("a", "bbbb") + False + >>> match_pattern("", "bbbb") + False + >>> match_pattern("a", "") + False + >>> match_pattern("", "") + True + """ + + len_string = len(input_string) + 1 + len_pattern = len(pattern) + 1 + + # dp is a 2d matrix where dp[i][j] denotes whether prefix string of + # length i of input_string matches with prefix string of length j of + # given pattern. + # "dp" stands for dynamic programming. + dp = [[0 for i in range(len_pattern)] for j in range(len_string)] + + # since string of zero length match pattern of zero length + dp[0][0] = 1 + + # since pattern of zero length will never match with string of non-zero length + for i in range(1, len_string): + dp[i][0] = 0 + + # since string of zero length will match with pattern where there + # is at least one * alternatively + for j in range(1, len_pattern): + dp[0][j] = dp[0][j - 2] if pattern[j - 1] == "*" else 0 + + # now using bottom-up approach to find for all remaining lengths + for i in range(1, len_string): + for j in range(1, len_pattern): + if input_string[i - 1] == pattern[j - 1] or pattern[j - 1] == ".": + dp[i][j] = dp[i - 1][j - 1] + + elif pattern[j - 1] == "*": + if dp[i][j - 2] == 1: + dp[i][j] = 1 + elif pattern[j - 2] in (input_string[i - 1], "."): + dp[i][j] = dp[i - 1][j] + else: + dp[i][j] = 0 + else: + dp[i][j] = 0 + + return bool(dp[-1][-1]) + + +if __name__ == "__main__": + import doctest + + doctest.testmod() + # inputing the strings + # input_string = input("input a string :") + # pattern = input("input a pattern :") + + input_string = "aab" + pattern = "c*a*b" + + # using function to check whether given string matches the given pattern + if match_pattern(input_string, pattern): + print(f"{input_string} matches the given pattern {pattern}") + else: + print(f"{input_string} does not match with the given pattern {pattern}") From 50485f7c8e33b0a3bf6e603cdae3505d40b1d97a Mon Sep 17 00:00:00 2001 From: Manan Rathi <76519771+Manan-Rathi@users.noreply.github.com> Date: Wed, 20 Oct 2021 14:12:32 +0530 Subject: [PATCH 0270/1543] Fix typos in Sorts and Bit_manipulation (#4949) * Fix several typos * Update bit_manipulation/README.md Co-authored-by: John Law * Update double_sort.py Co-authored-by: John Law --- bit_manipulation/README.md | 13 ++++++------- bit_manipulation/binary_and_operator.py | 4 ++-- bit_manipulation/binary_or_operator.py | 4 ++-- bit_manipulation/binary_xor_operator.py | 4 ++-- sorts/bead_sort.py | 6 +++--- sorts/double_sort.py | 6 +++--- 6 files changed, 18 insertions(+), 19 deletions(-) diff --git a/bit_manipulation/README.md b/bit_manipulation/README.md index 2ef1661524f2..e5f82a270e28 100644 --- a/bit_manipulation/README.md +++ b/bit_manipulation/README.md @@ -1,7 +1,6 @@ -https://docs.python.org/3/reference/expressions.html#binary-bitwise-operations -https://docs.python.org/3/reference/expressions.html#unary-arithmetic-and-bitwise-operations -https://docs.python.org/3/library/stdtypes.html#bitwise-operations-on-integer-types - -https://wiki.python.org/moin/BitManipulation -https://wiki.python.org/moin/BitwiseOperators -https://www.tutorialspoint.com/python3/bitwise_operators_example.htm +* https://docs.python.org/3/reference/expressions.html#binary-bitwise-operations +* https://docs.python.org/3/reference/expressions.html#unary-arithmetic-and-bitwise-operations +* https://docs.python.org/3/library/stdtypes.html#bitwise-operations-on-integer-types +* https://wiki.python.org/moin/BitManipulation +* https://wiki.python.org/moin/BitwiseOperators +* https://www.tutorialspoint.com/python3/bitwise_operators_example.htm diff --git a/bit_manipulation/binary_and_operator.py b/bit_manipulation/binary_and_operator.py index 191ff8eb44a4..36f6c668d9b3 100644 --- a/bit_manipulation/binary_and_operator.py +++ b/bit_manipulation/binary_and_operator.py @@ -22,7 +22,7 @@ def binary_and(a: int, b: int) -> str: >>> binary_and(0, -1) Traceback (most recent call last): ... - ValueError: the value of both input must be positive + ValueError: the value of both inputs must be positive >>> binary_and(0, 1.1) Traceback (most recent call last): ... @@ -33,7 +33,7 @@ def binary_and(a: int, b: int) -> str: TypeError: '<' not supported between instances of 'str' and 'int' """ if a < 0 or b < 0: - raise ValueError("the value of both input must be positive") + raise ValueError("the value of both inputs must be positive") a_binary = str(bin(a))[2:] # remove the leading "0b" b_binary = str(bin(b))[2:] # remove the leading "0b" diff --git a/bit_manipulation/binary_or_operator.py b/bit_manipulation/binary_or_operator.py index dabf5bcb09fd..95f61f1da64e 100644 --- a/bit_manipulation/binary_or_operator.py +++ b/bit_manipulation/binary_or_operator.py @@ -21,7 +21,7 @@ def binary_or(a: int, b: int) -> str: >>> binary_or(0, -1) Traceback (most recent call last): ... - ValueError: the value of both input must be positive + ValueError: the value of both inputs must be positive >>> binary_or(0, 1.1) Traceback (most recent call last): ... @@ -32,7 +32,7 @@ def binary_or(a: int, b: int) -> str: TypeError: '<' not supported between instances of 'str' and 'int' """ if a < 0 or b < 0: - raise ValueError("the value of both input must be positive") + raise ValueError("the value of both inputs must be positive") a_binary = str(bin(a))[2:] # remove the leading "0b" b_binary = str(bin(b))[2:] max_len = max(len(a_binary), len(b_binary)) diff --git a/bit_manipulation/binary_xor_operator.py b/bit_manipulation/binary_xor_operator.py index 6f8962192ad8..6206c70a99f6 100644 --- a/bit_manipulation/binary_xor_operator.py +++ b/bit_manipulation/binary_xor_operator.py @@ -22,7 +22,7 @@ def binary_xor(a: int, b: int) -> str: >>> binary_xor(0, -1) Traceback (most recent call last): ... - ValueError: the value of both input must be positive + ValueError: the value of both inputs must be positive >>> binary_xor(0, 1.1) Traceback (most recent call last): ... @@ -33,7 +33,7 @@ def binary_xor(a: int, b: int) -> str: TypeError: '<' not supported between instances of 'str' and 'int' """ if a < 0 or b < 0: - raise ValueError("the value of both input must be positive") + raise ValueError("the value of both inputs must be positive") a_binary = str(bin(a))[2:] # remove the leading "0b" b_binary = str(bin(b))[2:] # remove the leading "0b" diff --git a/sorts/bead_sort.py b/sorts/bead_sort.py index 26a3fabc4807..d22367c52fa9 100644 --- a/sorts/bead_sort.py +++ b/sorts/bead_sort.py @@ -21,15 +21,15 @@ def bead_sort(sequence: list) -> list: >>> bead_sort([1, .9, 0.0, 0, -1, -.9]) Traceback (most recent call last): ... - TypeError: Sequence must be list of nonnegative integers + TypeError: Sequence must be list of non-negative integers >>> bead_sort("Hello world") Traceback (most recent call last): ... - TypeError: Sequence must be list of nonnegative integers + TypeError: Sequence must be list of non-negative integers """ if any(not isinstance(x, int) or x < 0 for x in sequence): - raise TypeError("Sequence must be list of nonnegative integers") + raise TypeError("Sequence must be list of non-negative integers") for _ in range(len(sequence)): for i, (rod_upper, rod_lower) in enumerate(zip(sequence, sequence[1:])): if rod_upper > rod_lower: diff --git a/sorts/double_sort.py b/sorts/double_sort.py index 04e18682017c..4e08e27b3c21 100644 --- a/sorts/double_sort.py +++ b/sorts/double_sort.py @@ -1,7 +1,7 @@ def double_sort(lst): - """this sorting algorithm sorts an array using the principle of bubble sort, - but does it both from left to right and right to left, - hence i decided to call it "double sort" + """This sorting algorithm sorts an array using the principle of bubble sort, + but does it both from left to right and right to left. + Hence, it's called "Double sort" :param collection: mutable ordered sequence of elements :return: the same collection in ascending order Examples: From 672a0c8816fcebd8e426d64c0053cef4c42d7ec6 Mon Sep 17 00:00:00 2001 From: Hithru De Alwis Date: Wed, 20 Oct 2021 19:04:31 +0530 Subject: [PATCH 0271/1543] Fixed Typo (#5477) Change how many "=" sign to how many "=" signs --- ciphers/base64_encoding.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ciphers/base64_encoding.py b/ciphers/base64_encoding.py index 634afcb89873..38a952acc307 100644 --- a/ciphers/base64_encoding.py +++ b/ciphers/base64_encoding.py @@ -7,7 +7,7 @@ def base64_encode(data: bytes) -> bytes: The data is first transformed to binary and appended with binary digits so that its length becomes a multiple of 6, then each 6 binary digits will match a character in the B64_CHARSET string. The number of appended binary digits would later determine - how many "=" sign should be added, the padding. + how many "=" signs should be added, the padding. For every 2 binary digits added, a "=" sign is added in the output. We can add any binary digits to make it a multiple of 6, for instance, consider the following example: From 2e955aea460d6ac173d5bfeab0c71db0658e2bcb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Benjam=C3=ADn=20Garc=C3=ADa=20Roqu=C3=A9s?= <62822419+grbenjamin@users.noreply.github.com> Date: Thu, 21 Oct 2021 00:38:04 -0300 Subject: [PATCH 0272/1543] Replace double_ended_queue.py (#5429) * Add deque_from_scratch.py * added deque_from_scratch.py * add extend, extendleft and make comparison * updated operations list * fix doctest on Deque.__iter__ * pre-commit fix * time complexity comments, change type hints * pre-commit fix * added more doctests --- data_structures/queue/double_ended_queue.py | 495 ++++++++++++++++++-- 1 file changed, 454 insertions(+), 41 deletions(-) diff --git a/data_structures/queue/double_ended_queue.py b/data_structures/queue/double_ended_queue.py index dd003b7c98ac..36106d8bc0d9 100644 --- a/data_structures/queue/double_ended_queue.py +++ b/data_structures/queue/double_ended_queue.py @@ -1,57 +1,470 @@ -# Python code to demonstrate working of -# extend(), extendleft(), rotate(), reverse() +""" +Implementation of double ended queue. +""" +from dataclasses import dataclass +from typing import Any, Iterable -# importing "collections" for deque operations -import collections -# initializing deque -de = collections.deque([1, 2, 3]) +class Deque: + """ + Deque data structure. -# using extend() to add numbers to right end -# adds 4,5,6 to right end -de.extend([4, 5, 6]) + Operations + ---------- + append(val: Any) -> None -# printing modified deque -print("The deque after extending deque at end is : ") -print(de) + appendleft(val: Any) -> None -# using extendleft() to add numbers to left end -# adds 7,8,9 to right end -de.extendleft([7, 8, 9]) + extend(iter: Iterable) -> None -# printing modified deque -print("The deque after extending deque at beginning is : ") -print(de) + extendleft(iter: Iterable) -> None -# using rotate() to rotate the deque -# rotates by 3 to left -de.rotate(-3) + pop() -> Any -# printing modified deque -print("The deque after rotating deque is : ") -print(de) + popleft() -> Any -# using reverse() to reverse the deque -de.reverse() -# printing modified deque -print("The deque after reversing deque is : ") -print(de) + Observers + --------- + is_empty() -> bool -# get right-end value and eliminate -startValue = de.pop() -print("The deque after popping value at end is : ") -print(de) + Attributes + ---------- + _front: _Node + front of the deque a.k.a. the first element -# get left-end value and eliminate -endValue = de.popleft() + _back: _Node + back of the element a.k.a. the last element -print("The deque after popping value at start is : ") -print(de) + _len: int + the number of nodes + """ -# eliminate element searched by value -de.remove(5) + __slots__ = ["_front", "_back", "_len"] -print("The deque after eliminating element searched by value : ") -print(de) + @dataclass + class _Node: + """ + Representation of a node. + Contains a value and a pointer to the next node as well as to the previous one. + """ + + val: Any = None + next: "Deque._Node" = None + prev: "Deque._Node" = None + + class _Iterator: + """ + Helper class for iteration. Will be used to implement iteration. + + Attributes + ---------- + _cur: _Node + the current node of the iteration. + """ + + __slots__ = ["_cur"] + + def __init__(self, cur: "Deque._Node") -> None: + self._cur = cur + + def __iter__(self) -> "Deque._Iterator": + """ + >>> our_deque = Deque([1, 2, 3]) + >>> iterator = iter(our_deque) + """ + return self + + def __next__(self) -> Any: + """ + >>> our_deque = Deque([1, 2, 3]) + >>> iterator = iter(our_deque) + >>> next(iterator) + 1 + >>> next(iterator) + 2 + >>> next(iterator) + 3 + """ + if self._cur is None: + # finished iterating + raise StopIteration + val = self._cur.val + self._cur = self._cur.next + + return val + + def __init__(self, iterable: Iterable = None) -> None: + self._front = self._back = None + self._len = 0 + + if iterable is not None: + # append every value to the deque + for val in iterable: + self.append(val) + + def append(self, val: Any) -> None: + """ + Adds val to the end of the deque. + Time complexity: O(1) + + >>> our_deque_1 = Deque([1, 2, 3]) + >>> our_deque_1.append(4) + >>> our_deque_1 + [1, 2, 3, 4] + >>> our_deque_2 = Deque('ab') + >>> our_deque_2.append('c') + >>> our_deque_2 + ['a', 'b', 'c'] + + >>> from collections import deque + >>> deque_collections_1 = deque([1, 2, 3]) + >>> deque_collections_1.append(4) + >>> deque_collections_1 + deque([1, 2, 3, 4]) + >>> deque_collections_2 = deque('ab') + >>> deque_collections_2.append('c') + >>> deque_collections_2 + deque(['a', 'b', 'c']) + + >>> list(our_deque_1) == list(deque_collections_1) + True + >>> list(our_deque_2) == list(deque_collections_2) + True + """ + node = self._Node(val, None, None) + if self.is_empty(): + # front = back + self._front = self._back = node + self._len = 1 + else: + # connect nodes + self._back.next = node + node.prev = self._back + self._back = node # assign new back to the new node + + self._len += 1 + + # make sure there were no errors + assert not self.is_empty(), "Error on appending value." + + def appendleft(self, val: Any) -> None: + """ + Adds val to the beginning of the deque. + Time complexity: O(1) + + >>> our_deque_1 = Deque([2, 3]) + >>> our_deque_1.appendleft(1) + >>> our_deque_1 + [1, 2, 3] + >>> our_deque_2 = Deque('bc') + >>> our_deque_2.appendleft('a') + >>> our_deque_2 + ['a', 'b', 'c'] + + >>> from collections import deque + >>> deque_collections_1 = deque([2, 3]) + >>> deque_collections_1.appendleft(1) + >>> deque_collections_1 + deque([1, 2, 3]) + >>> deque_collections_2 = deque('bc') + >>> deque_collections_2.appendleft('a') + >>> deque_collections_2 + deque(['a', 'b', 'c']) + + >>> list(our_deque_1) == list(deque_collections_1) + True + >>> list(our_deque_2) == list(deque_collections_2) + True + """ + node = self._Node(val, None, None) + if self.is_empty(): + # front = back + self._front = self._back = node + self._len = 1 + else: + # connect nodes + node.next = self._front + self._front.prev = node + self._front = node # assign new front to the new node + + self._len += 1 + + # make sure there were no errors + assert not self.is_empty(), "Error on appending value." + + def extend(self, iter: Iterable) -> None: + """ + Appends every value of iter to the end of the deque. + Time complexity: O(n) + + >>> our_deque_1 = Deque([1, 2, 3]) + >>> our_deque_1.extend([4, 5]) + >>> our_deque_1 + [1, 2, 3, 4, 5] + >>> our_deque_2 = Deque('ab') + >>> our_deque_2.extend('cd') + >>> our_deque_2 + ['a', 'b', 'c', 'd'] + + >>> from collections import deque + >>> deque_collections_1 = deque([1, 2, 3]) + >>> deque_collections_1.extend([4, 5]) + >>> deque_collections_1 + deque([1, 2, 3, 4, 5]) + >>> deque_collections_2 = deque('ab') + >>> deque_collections_2.extend('cd') + >>> deque_collections_2 + deque(['a', 'b', 'c', 'd']) + + >>> list(our_deque_1) == list(deque_collections_1) + True + >>> list(our_deque_2) == list(deque_collections_2) + True + """ + for val in iter: + self.append(val) + + def extendleft(self, iter: Iterable) -> None: + """ + Appends every value of iter to the beginning of the deque. + Time complexity: O(n) + + >>> our_deque_1 = Deque([1, 2, 3]) + >>> our_deque_1.extendleft([0, -1]) + >>> our_deque_1 + [-1, 0, 1, 2, 3] + >>> our_deque_2 = Deque('cd') + >>> our_deque_2.extendleft('ba') + >>> our_deque_2 + ['a', 'b', 'c', 'd'] + + >>> from collections import deque + >>> deque_collections_1 = deque([1, 2, 3]) + >>> deque_collections_1.extendleft([0, -1]) + >>> deque_collections_1 + deque([-1, 0, 1, 2, 3]) + >>> deque_collections_2 = deque('cd') + >>> deque_collections_2.extendleft('ba') + >>> deque_collections_2 + deque(['a', 'b', 'c', 'd']) + + >>> list(our_deque_1) == list(deque_collections_1) + True + >>> list(our_deque_2) == list(deque_collections_2) + True + """ + for val in iter: + self.appendleft(val) + + def pop(self) -> Any: + """ + Removes the last element of the deque and returns it. + Time complexity: O(1) + + @returns topop.val: the value of the node to pop. + + >>> our_deque = Deque([1, 2, 3, 15182]) + >>> our_popped = our_deque.pop() + >>> our_popped + 15182 + >>> our_deque + [1, 2, 3] + + >>> from collections import deque + >>> deque_collections = deque([1, 2, 3, 15182]) + >>> collections_popped = deque_collections.pop() + >>> collections_popped + 15182 + >>> deque_collections + deque([1, 2, 3]) + + >>> list(our_deque) == list(deque_collections) + True + >>> our_popped == collections_popped + True + """ + # make sure the deque has elements to pop + assert not self.is_empty(), "Deque is empty." + + topop = self._back + self._back = self._back.prev # set new back + self._back.next = ( + None # drop the last node - python will deallocate memory automatically + ) + + self._len -= 1 + + return topop.val + + def popleft(self) -> Any: + """ + Removes the first element of the deque and returns it. + Time complexity: O(1) + + @returns topop.val: the value of the node to pop. + + >>> our_deque = Deque([15182, 1, 2, 3]) + >>> our_popped = our_deque.popleft() + >>> our_popped + 15182 + >>> our_deque + [1, 2, 3] + + >>> from collections import deque + >>> deque_collections = deque([15182, 1, 2, 3]) + >>> collections_popped = deque_collections.popleft() + >>> collections_popped + 15182 + >>> deque_collections + deque([1, 2, 3]) + + >>> list(our_deque) == list(deque_collections) + True + >>> our_popped == collections_popped + True + """ + # make sure the deque has elements to pop + assert not self.is_empty(), "Deque is empty." + + topop = self._front + self._front = self._front.next # set new front and drop the first node + self._front.prev = None + + self._len -= 1 + + return topop.val + + def is_empty(self) -> bool: + """ + Checks if the deque is empty. + Time complexity: O(1) + + >>> our_deque = Deque([1, 2, 3]) + >>> our_deque.is_empty() + False + >>> our_empty_deque = Deque() + >>> our_empty_deque.is_empty() + True + + >>> from collections import deque + >>> empty_deque_collections = deque() + >>> list(our_empty_deque) == list(empty_deque_collections) + True + """ + return self._front is None + + def __len__(self) -> int: + """ + Implements len() function. Returns the length of the deque. + Time complexity: O(1) + + >>> our_deque = Deque([1, 2, 3]) + >>> len(our_deque) + 3 + >>> our_empty_deque = Deque() + >>> len(our_empty_deque) + 0 + + >>> from collections import deque + >>> deque_collections = deque([1, 2, 3]) + >>> len(deque_collections) + 3 + >>> empty_deque_collections = deque() + >>> len(empty_deque_collections) + 0 + >>> len(our_empty_deque) == len(empty_deque_collections) + True + """ + return self._len + + def __eq__(self, other: "Deque") -> bool: + """ + Implements "==" operator. Returns if *self* is equal to *other*. + Time complexity: O(n) + + >>> our_deque_1 = Deque([1, 2, 3]) + >>> our_deque_2 = Deque([1, 2, 3]) + >>> our_deque_1 == our_deque_2 + True + >>> our_deque_3 = Deque([1, 2]) + >>> our_deque_1 == our_deque_3 + False + + >>> from collections import deque + >>> deque_collections_1 = deque([1, 2, 3]) + >>> deque_collections_2 = deque([1, 2, 3]) + >>> deque_collections_1 == deque_collections_2 + True + >>> deque_collections_3 = deque([1, 2]) + >>> deque_collections_1 == deque_collections_3 + False + + >>> (our_deque_1 == our_deque_2) == (deque_collections_1 == deque_collections_2) + True + >>> (our_deque_1 == our_deque_3) == (deque_collections_1 == deque_collections_3) + True + """ + me = self._front + oth = other._front + + # if the length of the deques are not the same, they are not equal + if len(self) != len(other): + return False + + while me is not None and oth is not None: + # compare every value + if me.val != oth.val: + return False + me = me.next + oth = oth.next + + return True + + def __iter__(self) -> "_Iterator": + """ + Implements iteration. + Time complexity: O(1) + + >>> our_deque = Deque([1, 2, 3]) + >>> for v in our_deque: + ... print(v) + 1 + 2 + 3 + + >>> from collections import deque + >>> deque_collections = deque([1, 2, 3]) + >>> for v in deque_collections: + ... print(v) + 1 + 2 + 3 + """ + return Deque._Iterator(self._front) + + def __repr__(self) -> str: + """ + Implements representation of the deque. + Represents it as a list, with its values between '[' and ']'. + Time complexity: O(n) + + >>> our_deque = Deque([1, 2, 3]) + >>> our_deque + [1, 2, 3] + """ + values_list = [] + aux = self._front + while aux is not None: + # append the values in a list to display + values_list.append(aux.val) + aux = aux.next + + return "[" + ", ".join(repr(val) for val in values_list) + "]" + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From c0acfd46cbd6b29847d9e0e226431ab6004b8e9b Mon Sep 17 00:00:00 2001 From: John Law Date: Thu, 21 Oct 2021 15:06:32 +0800 Subject: [PATCH 0273/1543] Fix factorial issues (#5496) * updating DIRECTORY.md * pass integer to `math.factorial` in `project_euler/problem_015` * remove duplicated factorial function * updating DIRECTORY.md * Update maths/factorial_iterative.py Co-authored-by: Christian Clauss * Update factorial_iterative.py Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> Co-authored-by: Christian Clauss --- DIRECTORY.md | 4 +++- maths/factorial_iterative.py | 23 ++++++++++++++++----- maths/factorial_python.py | 34 ------------------------------- project_euler/problem_015/sol1.py | 2 +- 4 files changed, 22 insertions(+), 41 deletions(-) delete mode 100644 maths/factorial_python.py diff --git a/DIRECTORY.md b/DIRECTORY.md index e3b40530a6b7..10149eac5aac 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -75,6 +75,7 @@ * [Morse Code](https://github.com/TheAlgorithms/Python/blob/master/ciphers/morse_code.py) * [Onepad Cipher](https://github.com/TheAlgorithms/Python/blob/master/ciphers/onepad_cipher.py) * [Playfair Cipher](https://github.com/TheAlgorithms/Python/blob/master/ciphers/playfair_cipher.py) + * [Polybius](https://github.com/TheAlgorithms/Python/blob/master/ciphers/polybius.py) * [Porta Cipher](https://github.com/TheAlgorithms/Python/blob/master/ciphers/porta_cipher.py) * [Rabin Miller](https://github.com/TheAlgorithms/Python/blob/master/ciphers/rabin_miller.py) * [Rail Fence Cipher](https://github.com/TheAlgorithms/Python/blob/master/ciphers/rail_fence_cipher.py) @@ -453,7 +454,6 @@ * [Eulers Totient](https://github.com/TheAlgorithms/Python/blob/master/maths/eulers_totient.py) * [Extended Euclidean Algorithm](https://github.com/TheAlgorithms/Python/blob/master/maths/extended_euclidean_algorithm.py) * [Factorial Iterative](https://github.com/TheAlgorithms/Python/blob/master/maths/factorial_iterative.py) - * [Factorial Python](https://github.com/TheAlgorithms/Python/blob/master/maths/factorial_python.py) * [Factorial Recursive](https://github.com/TheAlgorithms/Python/blob/master/maths/factorial_recursive.py) * [Factors](https://github.com/TheAlgorithms/Python/blob/master/maths/factors.py) * [Fermat Little Theorem](https://github.com/TheAlgorithms/Python/blob/master/maths/fermat_little_theorem.py) @@ -565,6 +565,7 @@ ## Other * [Activity Selection](https://github.com/TheAlgorithms/Python/blob/master/other/activity_selection.py) + * [Check Strong Password](https://github.com/TheAlgorithms/Python/blob/master/other/check_strong_password.py) * [Date To Weekday](https://github.com/TheAlgorithms/Python/blob/master/other/date_to_weekday.py) * [Davisb Putnamb Logemannb Loveland](https://github.com/TheAlgorithms/Python/blob/master/other/davisb_putnamb_logemannb_loveland.py) * [Dijkstra Bankers Algorithm](https://github.com/TheAlgorithms/Python/blob/master/other/dijkstra_bankers_algorithm.py) @@ -952,6 +953,7 @@ * [Reverse Words](https://github.com/TheAlgorithms/Python/blob/master/strings/reverse_words.py) * [Split](https://github.com/TheAlgorithms/Python/blob/master/strings/split.py) * [Upper](https://github.com/TheAlgorithms/Python/blob/master/strings/upper.py) + * [Wildcard Pattern Matching](https://github.com/TheAlgorithms/Python/blob/master/strings/wildcard_pattern_matching.py) * [Word Occurrence](https://github.com/TheAlgorithms/Python/blob/master/strings/word_occurrence.py) * [Word Patterns](https://github.com/TheAlgorithms/Python/blob/master/strings/word_patterns.py) * [Z Function](https://github.com/TheAlgorithms/Python/blob/master/strings/z_function.py) diff --git a/maths/factorial_iterative.py b/maths/factorial_iterative.py index 64314790c11c..c6cf7de57ab2 100644 --- a/maths/factorial_iterative.py +++ b/maths/factorial_iterative.py @@ -1,8 +1,11 @@ -# factorial of a positive integer -- https://en.wikipedia.org/wiki/Factorial +"""Factorial of a positive integer -- https://en.wikipedia.org/wiki/Factorial +""" -def factorial(n: int) -> int: +def factorial(number: int) -> int: """ + Calculate the factorial of specified number (n!). + >>> import math >>> all(factorial(i) == math.factorial(i) for i in range(20)) True @@ -14,17 +17,27 @@ def factorial(n: int) -> int: Traceback (most recent call last): ... ValueError: factorial() not defined for negative values + >>> factorial(1) + 1 + >>> factorial(6) + 720 + >>> factorial(0) + 1 """ - if n != int(n): + if number != int(number): raise ValueError("factorial() only accepts integral values") - if n < 0: + if number < 0: raise ValueError("factorial() not defined for negative values") value = 1 - for i in range(1, n + 1): + for i in range(1, number + 1): value *= i return value if __name__ == "__main__": + import doctest + + doctest.testmod() + n = int(input("Enter a positive integer: ").strip() or 0) print(f"factorial{n} is {factorial(n)}") diff --git a/maths/factorial_python.py b/maths/factorial_python.py deleted file mode 100644 index 46688261af56..000000000000 --- a/maths/factorial_python.py +++ /dev/null @@ -1,34 +0,0 @@ -def factorial(input_number: int) -> int: - """ - Calculate the factorial of specified number - - >>> factorial(1) - 1 - >>> factorial(6) - 720 - >>> factorial(0) - 1 - >>> factorial(-1) - Traceback (most recent call last): - ... - ValueError: factorial() not defined for negative values - >>> factorial(0.1) - Traceback (most recent call last): - ... - ValueError: factorial() only accepts integral values - """ - - if input_number < 0: - raise ValueError("factorial() not defined for negative values") - if not isinstance(input_number, int): - raise ValueError("factorial() only accepts integral values") - result = 1 - for i in range(1, input_number): - result = result * (i + 1) - return result - - -if __name__ == "__main__": - import doctest - - doctest.testmod() diff --git a/project_euler/problem_015/sol1.py b/project_euler/problem_015/sol1.py index da079d26120a..fb2020d6179f 100644 --- a/project_euler/problem_015/sol1.py +++ b/project_euler/problem_015/sol1.py @@ -26,7 +26,7 @@ def solution(n: int = 20) -> int: """ n = 2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1, # 2, 3,... - k = n / 2 + k = n // 2 return int(factorial(n) / (factorial(k) * factorial(n - k))) From fdf095f69f428b161e939b18971812285608495a Mon Sep 17 00:00:00 2001 From: poloso Date: Thu, 21 Oct 2021 08:13:42 -0500 Subject: [PATCH 0274/1543] [mypy] check polygon and corrections (#5419) * Update annotations to Python 3.10 #4052 * Add floats doctest * Copy list to avoid changing input unpredictably * Refactor code to make it readable * updating DIRECTORY.md * Improve raised ValueErrors and add doctest * Split doctest in multiples lines * Change ValueError to Monogons and Digons are not poly * Correct doctest refering number of sides Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- maths/check_polygon.py | 27 ++++++++++++++++++++------- 1 file changed, 20 insertions(+), 7 deletions(-) diff --git a/maths/check_polygon.py b/maths/check_polygon.py index 0e771197331f..1e8dce7183ad 100644 --- a/maths/check_polygon.py +++ b/maths/check_polygon.py @@ -1,7 +1,7 @@ -from typing import List +from __future__ import annotations -def check_polygon(nums: List) -> bool: +def check_polygon(nums: list[float]) -> bool: """ Takes list of possible side lengths and determines whether a two-dimensional polygon with such side lengths can exist. @@ -14,15 +14,28 @@ def check_polygon(nums: List) -> bool: True >>> check_polygon([3, 7, 13, 2]) False + >>> check_polygon([1, 4.3, 5.2, 12.2]) + False + >>> nums = [3, 7, 13, 2] + >>> _ = check_polygon(nums) # Run function, do not show answer in output + >>> nums # Check numbers are not reordered + [3, 7, 13, 2] >>> check_polygon([]) Traceback (most recent call last): ... - ValueError: List is invalid + ValueError: Monogons and Digons are not polygons in the Euclidean space + >>> check_polygon([-2, 5, 6]) + Traceback (most recent call last): + ... + ValueError: All values must be greater than 0 """ - if not nums: - raise ValueError("List is invalid") - nums.sort() - return nums.pop() < sum(nums) + if len(nums) < 2: + raise ValueError("Monogons and Digons are not polygons in the Euclidean space") + if any(i <= 0 for i in nums): + raise ValueError("All values must be greater than 0") + copy_nums = nums.copy() + copy_nums.sort() + return copy_nums[-1] < sum(copy_nums[:-1]) if __name__ == "__main__": From 9153db2d275086bb951b696b3a4628c76a14ac90 Mon Sep 17 00:00:00 2001 From: Sherman Hui <11592023+shermanhui@users.noreply.github.com> Date: Thu, 21 Oct 2021 20:39:18 -0700 Subject: [PATCH 0275/1543] [mypy] fix: fix mypy error in singly_linked_list.py (#5517) The list comprehension shortcut was implicitly expecting a return value causing a mypy error since `insert_tail` doesn't return a value --- data_structures/linked_list/singly_linked_list.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/data_structures/linked_list/singly_linked_list.py b/data_structures/linked_list/singly_linked_list.py index 4a5dc8263f79..a4156b650776 100644 --- a/data_structures/linked_list/singly_linked_list.py +++ b/data_structures/linked_list/singly_linked_list.py @@ -409,7 +409,9 @@ def test_singly_linked_list_2() -> None: 12.20, ] linked_list = LinkedList() - [linked_list.insert_tail(i) for i in input] + + for i in input: + linked_list.insert_tail(i) # Check if it's empty or not assert linked_list.is_empty() is False From b373c991f69e20d7e1dc92d1613e60a5605bf1a8 Mon Sep 17 00:00:00 2001 From: Sherman Hui <11592023+shermanhui@users.noreply.github.com> Date: Thu, 21 Oct 2021 20:40:17 -0700 Subject: [PATCH 0276/1543] [mypy] fix: fix mypy error in trie.py(#5516) --- data_structures/trie/trie.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/data_structures/trie/trie.py b/data_structures/trie/trie.py index 6582be24fd0c..766294c23ac8 100644 --- a/data_structures/trie/trie.py +++ b/data_structures/trie/trie.py @@ -11,7 +11,7 @@ def __init__(self): self.nodes = dict() # Mapping from char to TrieNode self.is_leaf = False - def insert_many(self, words: [str]): + def insert_many(self, words: list[str]): """ Inserts a list of words into the Trie :param words: list of string words From 57a7e5738b8224f58941019964da67ece679eab9 Mon Sep 17 00:00:00 2001 From: Jenny Vo <40080855+ovynnej@users.noreply.github.com> Date: Fri, 22 Oct 2021 04:52:39 +0100 Subject: [PATCH 0277/1543] Add implementation of Coulomb's Law (#4897) --- electronics/coulombs_law.py | 86 +++++++++++++++++++++++++++++++++++++ 1 file changed, 86 insertions(+) create mode 100644 electronics/coulombs_law.py diff --git a/electronics/coulombs_law.py b/electronics/coulombs_law.py new file mode 100644 index 000000000000..e4c8391c9f9a --- /dev/null +++ b/electronics/coulombs_law.py @@ -0,0 +1,86 @@ +# https://en.wikipedia.org/wiki/Coulomb%27s_law + +from __future__ import annotations + +COULOMBS_CONSTANT = 8.988e9 # units = N * m^s * C^-2 + + +def couloumbs_law( + force: float, charge1: float, charge2: float, distance: float +) -> dict[str, float]: + + """ + Apply Coulomb's Law on any three given values. These can be force, charge1, + charge2, or distance, and then in a Python dict return name/value pair of + the zero value. + + Coulomb's Law states that the magnitude of the electrostatic force of + attraction or repulsion between two point charges is directly proportional + to the product of the magnitudes of charges and inversely proportional to + the square of the distance between them. + + Reference + ---------- + Coulomb (1785) "Premier mémoire sur l’électricité et le magnétisme," + Histoire de l’Académie Royale des Sciences, pp. 569–577. + + Parameters + ---------- + force : float with units in Newtons + + charge1 : float with units in Coulombs + + charge2 : float with units in Coulombs + + distance : float with units in meters + + Returns + ------- + result : dict name/value pair of the zero value + + >>> couloumbs_law(force=0, charge1=3, charge2=5, distance=2000) + {'force': 33705.0} + + >>> couloumbs_law(force=10, charge1=3, charge2=5, distance=0) + {'distance': 116112.01488218177} + + >>> couloumbs_law(force=10, charge1=0, charge2=5, distance=2000) + {'charge1': 0.0008900756564307966} + + >>> couloumbs_law(force=0, charge1=0, charge2=5, distance=2000) + Traceback (most recent call last): + ... + ValueError: One and only one argument must be 0 + + >>> couloumbs_law(force=0, charge1=3, charge2=5, distance=-2000) + Traceback (most recent call last): + ... + ValueError: Distance cannot be negative + + """ + + charge_product = abs(charge1 * charge2) + + if (force, charge1, charge2, distance).count(0) != 1: + raise ValueError("One and only one argument must be 0") + if distance < 0: + raise ValueError("Distance cannot be negative") + if force == 0: + force = COULOMBS_CONSTANT * charge_product / (distance ** 2) + return {"force": force} + elif charge1 == 0: + charge1 = abs(force) * (distance ** 2) / (COULOMBS_CONSTANT * charge2) + return {"charge1": charge1} + elif charge2 == 0: + charge2 = abs(force) * (distance ** 2) / (COULOMBS_CONSTANT * charge1) + return {"charge2": charge2} + elif distance == 0: + distance = (COULOMBS_CONSTANT * charge_product / abs(force)) ** 0.5 + return {"distance": distance} + raise ValueError("Exactly one argument must be 0") + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From 83a63d9c22cb0003c4e279cf5e22a2f07b83d652 Mon Sep 17 00:00:00 2001 From: Erwin Junge Date: Fri, 22 Oct 2021 06:14:45 +0200 Subject: [PATCH 0278/1543] [mypy] Add missing type annotation in conways_game_of_life.py (#5490) --- cellular_automata/conways_game_of_life.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cellular_automata/conways_game_of_life.py b/cellular_automata/conways_game_of_life.py index 079fb4d04499..84f4d5be40da 100644 --- a/cellular_automata/conways_game_of_life.py +++ b/cellular_automata/conways_game_of_life.py @@ -70,7 +70,7 @@ def new_generation(cells: list[list[int]]) -> list[list[int]]: return next_generation -def generate_images(cells: list[list[int]], frames) -> list[Image.Image]: +def generate_images(cells: list[list[int]], frames: int) -> list[Image.Image]: """ Generates a list of images of subsequent Game of Life states. """ From 08254eb2e4da3fba23d019f39f7f22a05532cd0e Mon Sep 17 00:00:00 2001 From: Erwin Junge Date: Fri, 22 Oct 2021 11:45:19 +0200 Subject: [PATCH 0279/1543] [mypy] Fix type annotations for boolean_algebra/quine_mc_cluskey.py (#5489) * Add missing type annotation * Fix conversion bug This failed when called with the documented example of `1.5` and was correctly pointed out by `mypy --strict` --- boolean_algebra/quine_mc_cluskey.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/boolean_algebra/quine_mc_cluskey.py b/boolean_algebra/quine_mc_cluskey.py index 9cc99b1eeabb..0342e5c67753 100644 --- a/boolean_algebra/quine_mc_cluskey.py +++ b/boolean_algebra/quine_mc_cluskey.py @@ -146,10 +146,10 @@ def prime_implicant_chart( return chart -def main(): +def main() -> None: no_of_variable = int(input("Enter the no. of variables\n")) minterms = [ - int(x) + float(x) for x in input( "Enter the decimal representation of Minterms 'Spaces Separated'\n" ).split() From d924a8051bfe0fe8de164e5074eb4f5f8fa6afb3 Mon Sep 17 00:00:00 2001 From: Erwin Junge Date: Fri, 22 Oct 2021 11:45:30 +0200 Subject: [PATCH 0280/1543] [mypy] Add missing type annotation (#5491) --- cellular_automata/game_of_life.py | 21 +++++++++++---------- 1 file changed, 11 insertions(+), 10 deletions(-) diff --git a/cellular_automata/game_of_life.py b/cellular_automata/game_of_life.py index 09863993dc3a..c5324da73dbf 100644 --- a/cellular_automata/game_of_life.py +++ b/cellular_automata/game_of_life.py @@ -40,18 +40,18 @@ random.shuffle(choice) -def create_canvas(size): +def create_canvas(size: int) -> list[list[bool]]: canvas = [[False for i in range(size)] for j in range(size)] return canvas -def seed(canvas): +def seed(canvas: list[list[bool]]) -> None: for i, row in enumerate(canvas): for j, _ in enumerate(row): canvas[i][j] = bool(random.getrandbits(1)) -def run(canvas): +def run(canvas: list[list[bool]]) -> list[list[bool]]: """This function runs the rules of game through all points, and changes their status accordingly.(in the same canvas) @Args: @@ -62,21 +62,22 @@ def run(canvas): -- None """ - canvas = np.array(canvas) - next_gen_canvas = np.array(create_canvas(canvas.shape[0])) - for r, row in enumerate(canvas): + current_canvas = np.array(canvas) + next_gen_canvas = np.array(create_canvas(current_canvas.shape[0])) + for r, row in enumerate(current_canvas): for c, pt in enumerate(row): # print(r-1,r+2,c-1,c+2) next_gen_canvas[r][c] = __judge_point( - pt, canvas[r - 1 : r + 2, c - 1 : c + 2] + pt, current_canvas[r - 1 : r + 2, c - 1 : c + 2] ) - canvas = next_gen_canvas + current_canvas = next_gen_canvas del next_gen_canvas # cleaning memory as we move on. - return canvas.tolist() + return_canvas: list[list[bool]] = current_canvas.tolist() + return return_canvas -def __judge_point(pt, neighbours): +def __judge_point(pt: bool, neighbours: list[list[bool]]) -> bool: dead = 0 alive = 0 # finding dead or alive neighbours count. From 061614880d0c4e1b3483665b1b82cc124932ff51 Mon Sep 17 00:00:00 2001 From: poloso Date: Fri, 22 Oct 2021 05:07:28 -0500 Subject: [PATCH 0281/1543] [mypy] fix type annotations for graphs/a_star.py #4052 (#5224) * [mypy] fix type annotations for graphs/a_star.py #4052 * updating DIRECTORY.md * Add from __future__ import anotations * rename delta by DIRECTIONS Co-authored-by: John Law * Rename delta by DIRECTIONS in all code * Enclose script in __main__ code block * Refactor DIRECTIONS with comments for readibility * Delete heuristic example comment * Do not print, return all values * Fix multilines * fix black * Update a_star.py Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> Co-authored-by: John Law --- graphs/a_star.py | 92 ++++++++++++++++++++++++++---------------------- 1 file changed, 50 insertions(+), 42 deletions(-) diff --git a/graphs/a_star.py b/graphs/a_star.py index d3657cb19540..e0f24734a4cb 100644 --- a/graphs/a_star.py +++ b/graphs/a_star.py @@ -1,37 +1,21 @@ -grid = [ - [0, 1, 0, 0, 0, 0], - [0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles - [0, 1, 0, 0, 0, 0], - [0, 1, 0, 0, 1, 0], - [0, 0, 0, 0, 1, 0], -] - -""" -heuristic = [[9, 8, 7, 6, 5, 4], - [8, 7, 6, 5, 4, 3], - [7, 6, 5, 4, 3, 2], - [6, 5, 4, 3, 2, 1], - [5, 4, 3, 2, 1, 0]]""" - -init = [0, 0] -goal = [len(grid) - 1, len(grid[0]) - 1] # all coordinates are given in format [y,x] -cost = 1 - -# the cost map which pushes the path closer to the goal -heuristic = [[0 for row in range(len(grid[0]))] for col in range(len(grid))] -for i in range(len(grid)): - for j in range(len(grid[0])): - heuristic[i][j] = abs(i - goal[0]) + abs(j - goal[1]) - if grid[i][j] == 1: - heuristic[i][j] = 99 # added extra penalty in the heuristic map - +from __future__ import annotations -# the actions we can take -delta = [[-1, 0], [0, -1], [1, 0], [0, 1]] # go up # go left # go down # go right +DIRECTIONS = [ + [-1, 0], # left + [0, -1], # down + [1, 0], # right + [0, 1], # up +] # function to search the path -def search(grid, init, goal, cost, heuristic): +def search( + grid: list[list[int]], + init: list[int], + goal: list[int], + cost: int, + heuristic: list[list[int]], +) -> tuple[list[list[int]], list[list[int]]]: closed = [ [0 for col in range(len(grid[0]))] for row in range(len(grid)) @@ -52,7 +36,7 @@ def search(grid, init, goal, cost, heuristic): while not found and not resign: if len(cell) == 0: - return "FAIL" + raise ValueError("Algorithm is unable to find solution") else: # to choose the least costliest action so as to move closer to the goal cell.sort() cell.reverse() @@ -64,9 +48,9 @@ def search(grid, init, goal, cost, heuristic): if x == goal[0] and y == goal[1]: found = True else: - for i in range(len(delta)): # to try out different valid actions - x2 = x + delta[i][0] - y2 = y + delta[i][1] + for i in range(len(DIRECTIONS)): # to try out different valid actions + x2 = x + DIRECTIONS[i][0] + y2 = y + DIRECTIONS[i][1] if x2 >= 0 and x2 < len(grid) and y2 >= 0 and y2 < len(grid[0]): if closed[x2][y2] == 0 and grid[x2][y2] == 0: g2 = g + cost @@ -79,8 +63,8 @@ def search(grid, init, goal, cost, heuristic): y = goal[1] invpath.append([x, y]) # we get the reverse path from here while x != init[0] or y != init[1]: - x2 = x - delta[action[x][y]][0] - y2 = y - delta[action[x][y]][1] + x2 = x - DIRECTIONS[action[x][y]][0] + y2 = y - DIRECTIONS[action[x][y]][1] x = x2 y = y2 invpath.append([x, y]) @@ -88,13 +72,37 @@ def search(grid, init, goal, cost, heuristic): path = [] for i in range(len(invpath)): path.append(invpath[len(invpath) - 1 - i]) + return path, action + + +if __name__ == "__main__": + grid = [ + [0, 1, 0, 0, 0, 0], + [0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles + [0, 1, 0, 0, 0, 0], + [0, 1, 0, 0, 1, 0], + [0, 0, 0, 0, 1, 0], + ] + + init = [0, 0] + # all coordinates are given in format [y,x] + goal = [len(grid) - 1, len(grid[0]) - 1] + cost = 1 + + # the cost map which pushes the path closer to the goal + heuristic = [[0 for row in range(len(grid[0]))] for col in range(len(grid))] + for i in range(len(grid)): + for j in range(len(grid[0])): + heuristic[i][j] = abs(i - goal[0]) + abs(j - goal[1]) + if grid[i][j] == 1: + # added extra penalty in the heuristic map + heuristic[i][j] = 99 + + path, action = search(grid, init, goal, cost, heuristic) + print("ACTION MAP") for i in range(len(action)): print(action[i]) - return path - - -a = search(grid, init, goal, cost, heuristic) -for i in range(len(a)): - print(a[i]) + for i in range(len(path)): + print(path[i]) From d82cf5292fbd0ffe1764a1da5c96a39b244618eb Mon Sep 17 00:00:00 2001 From: QuartzAl <55610038+QuartzAl@users.noreply.github.com> Date: Fri, 22 Oct 2021 18:14:35 +0700 Subject: [PATCH 0282/1543] split into usable functions and added docstrings for base32 cipher (#5466) * split into usable functions and added docstrings * Simplified code Co-authored-by: Christian Clauss * Simplified code Co-authored-by: Christian Clauss Co-authored-by: Christian Clauss --- ciphers/base32.py | 43 ++++++++++++++++++++++++++++++++++++------- 1 file changed, 36 insertions(+), 7 deletions(-) diff --git a/ciphers/base32.py b/ciphers/base32.py index da289a7210e8..fee53ccaf0c4 100644 --- a/ciphers/base32.py +++ b/ciphers/base32.py @@ -1,13 +1,42 @@ import base64 -def main() -> None: - inp = input("->") - encoded = inp.encode("utf-8") # encoded the input (we need a bytes like object) - b32encoded = base64.b32encode(encoded) # b32encoded the encoded string - print(b32encoded) - print(base64.b32decode(b32encoded).decode("utf-8")) # decoded it +def base32_encode(string: str) -> bytes: + """ + Encodes a given string to base32, returning a bytes-like object + >>> base32_encode("Hello World!") + b'JBSWY3DPEBLW64TMMQQQ====' + >>> base32_encode("123456") + b'GEZDGNBVGY======' + >>> base32_encode("some long complex string") + b'ONXW2ZJANRXW4ZZAMNXW24DMMV4CA43UOJUW4ZY=' + """ + + # encoded the input (we need a bytes like object) + # then, b32encoded the bytes-like object + return base64.b32encode(string.encode("utf-8")) + + +def base32_decode(encoded_bytes: bytes) -> str: + """ + Decodes a given bytes-like object to a string, returning a string + >>> base32_decode(b'JBSWY3DPEBLW64TMMQQQ====') + 'Hello World!' + >>> base32_decode(b'GEZDGNBVGY======') + '123456' + >>> base32_decode(b'ONXW2ZJANRXW4ZZAMNXW24DMMV4CA43UOJUW4ZY=') + 'some long complex string' + """ + + # decode the bytes from base32 + # then, decode the bytes-like object to return as a string + return base64.b32decode(encoded_bytes).decode("utf-8") if __name__ == "__main__": - main() + test = "Hello World!" + encoded = base32_encode(test) + print(encoded) + + decoded = base32_decode(encoded) + print(decoded) From 629848e3721d9354d25fad6cb4729e6afdbbf799 Mon Sep 17 00:00:00 2001 From: Sherman Hui <11592023+shermanhui@users.noreply.github.com> Date: Fri, 22 Oct 2021 07:07:05 -0700 Subject: [PATCH 0283/1543] [mypy] Fix type annotations in `data_structures/binary_tree` (#5518) * fix: fix mypy errors Update binary_search_tree `arr` argument to be typed as a list within `find_kth_smallest` function Update return type of `merge_two_binary_trees` as both inputs can be None which means that a None type value can be returned from this function * updating DIRECTORY.md Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- DIRECTORY.md | 1 + data_structures/binary_tree/binary_search_tree.py | 2 +- data_structures/binary_tree/merge_two_binary_trees.py | 4 +++- 3 files changed, 5 insertions(+), 2 deletions(-) diff --git a/DIRECTORY.md b/DIRECTORY.md index 10149eac5aac..950d8e2c0c4b 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -272,6 +272,7 @@ ## Electronics * [Carrier Concentration](https://github.com/TheAlgorithms/Python/blob/master/electronics/carrier_concentration.py) + * [Coulombs Law](https://github.com/TheAlgorithms/Python/blob/master/electronics/coulombs_law.py) * [Electric Power](https://github.com/TheAlgorithms/Python/blob/master/electronics/electric_power.py) * [Ohms Law](https://github.com/TheAlgorithms/Python/blob/master/electronics/ohms_law.py) diff --git a/data_structures/binary_tree/binary_search_tree.py b/data_structures/binary_tree/binary_search_tree.py index a1ed1d0ac2a5..ce490fd98524 100644 --- a/data_structures/binary_tree/binary_search_tree.py +++ b/data_structures/binary_tree/binary_search_tree.py @@ -151,7 +151,7 @@ def inorder(self, arr: list, node: Node): def find_kth_smallest(self, k: int, node: Node) -> int: """Return the kth smallest element in a binary search tree""" - arr = [] + arr: list = [] self.inorder(arr, node) # append all values to list using inorder traversal return arr[k - 1] diff --git a/data_structures/binary_tree/merge_two_binary_trees.py b/data_structures/binary_tree/merge_two_binary_trees.py index d169e0e75b82..7487268940d3 100644 --- a/data_structures/binary_tree/merge_two_binary_trees.py +++ b/data_structures/binary_tree/merge_two_binary_trees.py @@ -7,6 +7,8 @@ """ from __future__ import annotations +from typing import Optional + class Node: """ @@ -19,7 +21,7 @@ def __init__(self, value: int = 0) -> None: self.right: Node | None = None -def merge_two_binary_trees(tree1: Node | None, tree2: Node | None) -> Node: +def merge_two_binary_trees(tree1: Node | None, tree2: Node | None) -> Optional[Node]: """ Returns root node of the merged tree. From 0ca12799974aa2d0756aec608d38fea04f1239d7 Mon Sep 17 00:00:00 2001 From: Toki345 <91814435+Toki345@users.noreply.github.com> Date: Fri, 22 Oct 2021 22:07:57 +0800 Subject: [PATCH 0284/1543] Fixed grammar on Anagram Description (#5512) Made the description more formal, also fixed a few grammatical issues. --- strings/check_anagrams.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/strings/check_anagrams.py b/strings/check_anagrams.py index 3083000cbb5d..62a4441a0c00 100644 --- a/strings/check_anagrams.py +++ b/strings/check_anagrams.py @@ -5,7 +5,7 @@ def check_anagrams(first_str: str, second_str: str) -> bool: """ - Two strings are anagrams if they are made of the same letters + Two strings are anagrams if they are made up of the same letters but are arranged differently (ignoring the case). >>> check_anagrams('Silent', 'Listen') True From 2ddd81df211332b580887a929367a1d529c56dbe Mon Sep 17 00:00:00 2001 From: Rohan R Bharadwaj <89947037+Rohanrbharadwaj@users.noreply.github.com> Date: Fri, 22 Oct 2021 22:44:08 +0530 Subject: [PATCH 0285/1543] Remove wrongly placed double qoutes (#5530) * Update baconian_cipher.py * Update join.py * Updated type hint --- ciphers/baconian_cipher.py | 2 +- strings/join.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/ciphers/baconian_cipher.py b/ciphers/baconian_cipher.py index 027fbc50e89d..f146ba91b78f 100644 --- a/ciphers/baconian_cipher.py +++ b/ciphers/baconian_cipher.py @@ -83,7 +83,7 @@ def decode(coded: str) -> str: return decoded.strip() -if "__name__" == "__main__": +if __name__ == "__main__": from doctest import testmod testmod() diff --git a/strings/join.py b/strings/join.py index 0cb88b76065d..c17ddd144597 100644 --- a/strings/join.py +++ b/strings/join.py @@ -3,7 +3,7 @@ """ -def join(separator: str, separated: list) -> str: +def join(separator: str, separated: list[str]) -> str: """ >>> join("", ["a", "b", "c", "d"]) 'abcd' @@ -26,7 +26,7 @@ def join(separator: str, separated: list) -> str: return joined.strip(separator) -if "__name__" == "__main__": +if __name__ == "__main__": from doctest import testmod testmod() From 07141e4bcce7770300f4cf7c3042bdfc5e3c9934 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Murilo=20Gon=C3=A7alves?= <38800183+murilo-goncalves@users.noreply.github.com> Date: Fri, 22 Oct 2021 14:14:27 -0300 Subject: [PATCH 0286/1543] Add doctests to prime_check function (#5503) * added doctests to prime_check function * fix doctests function name --- maths/prime_check.py | 21 ++++++++++++++++++++- 1 file changed, 20 insertions(+), 1 deletion(-) diff --git a/maths/prime_check.py b/maths/prime_check.py index e2bcb7b8f151..92d31cfeee80 100644 --- a/maths/prime_check.py +++ b/maths/prime_check.py @@ -5,9 +5,28 @@ def prime_check(number: int) -> bool: - """Checks to see if a number is a prime. + """Checks to see if a number is a prime in O(sqrt(n)). A number is prime if it has exactly two factors: 1 and itself. + + >>> prime_check(0) + False + >>> prime_check(1) + False + >>> prime_check(2) + True + >>> prime_check(3) + True + >>> prime_check(27) + False + >>> prime_check(87) + False + >>> prime_check(563) + True + >>> prime_check(2999) + True + >>> prime_check(67483) + False """ if 1 < number < 4: From 11ec2fd3fb472a8bcac738f372e6e0f731326d3b Mon Sep 17 00:00:00 2001 From: Edward Nuno Date: Fri, 22 Oct 2021 10:21:41 -0700 Subject: [PATCH 0287/1543] [mypy] Fix type annotations for trie.py (#5022) * Fix type annotations for trie.py * Add strict type annotations to trie.py Annotate return type for all functions and type for "nodes" * updating DIRECTORY.md * Format trie.py Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- data_structures/trie/trie.py | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/data_structures/trie/trie.py b/data_structures/trie/trie.py index 766294c23ac8..162d08d1d678 100644 --- a/data_structures/trie/trie.py +++ b/data_structures/trie/trie.py @@ -7,11 +7,11 @@ class TrieNode: - def __init__(self): - self.nodes = dict() # Mapping from char to TrieNode + def __init__(self) -> None: + self.nodes: dict[str, TrieNode] = dict() # Mapping from char to TrieNode self.is_leaf = False - def insert_many(self, words: list[str]): + def insert_many(self, words: list[str]) -> None: """ Inserts a list of words into the Trie :param words: list of string words @@ -20,7 +20,7 @@ def insert_many(self, words: list[str]): for word in words: self.insert(word) - def insert(self, word: str): + def insert(self, word: str) -> None: """ Inserts a word into the Trie :param word: word to be inserted @@ -46,14 +46,14 @@ def find(self, word: str) -> bool: curr = curr.nodes[char] return curr.is_leaf - def delete(self, word: str): + def delete(self, word: str) -> None: """ Deletes a word in a Trie :param word: word to delete :return: None """ - def _delete(curr: TrieNode, word: str, index: int): + def _delete(curr: TrieNode, word: str, index: int) -> bool: if index == len(word): # If word does not exist if not curr.is_leaf: @@ -75,7 +75,7 @@ def _delete(curr: TrieNode, word: str, index: int): _delete(self, word, 0) -def print_words(node: TrieNode, word: str): +def print_words(node: TrieNode, word: str) -> None: """ Prints all the words in a Trie :param node: root node of Trie @@ -89,7 +89,7 @@ def print_words(node: TrieNode, word: str): print_words(value, word + key) -def test_trie(): +def test_trie() -> bool: words = "banana bananas bandana band apple all beast".split() root = TrieNode() root.insert_many(words) @@ -112,11 +112,11 @@ def print_results(msg: str, passes: bool) -> None: print(str(msg), "works!" if passes else "doesn't work :(") -def pytests(): +def pytests() -> None: assert test_trie() -def main(): +def main() -> None: """ >>> pytests() """ From 20e09c3ec2021edf7e6cfd85299544c651012919 Mon Sep 17 00:00:00 2001 From: Dylan Buchi Date: Sat, 23 Oct 2021 06:56:58 -0300 Subject: [PATCH 0288/1543] [mypy] Add type annotations for linked queue in data structures (#5533) * [mypy] Add/fix type annotations for linked queue in data_structures * add return type annotation to __iter__ * Add more readable syntax --- data_structures/queue/linked_queue.py | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/data_structures/queue/linked_queue.py b/data_structures/queue/linked_queue.py index 8526ad311ed0..21970e7df965 100644 --- a/data_structures/queue/linked_queue.py +++ b/data_structures/queue/linked_queue.py @@ -1,11 +1,13 @@ """ A Queue using a linked list like structure """ -from typing import Any +from __future__ import annotations + +from typing import Any, Iterator class Node: def __init__(self, data: Any) -> None: - self.data = data - self.next = None + self.data: Any = data + self.next: Node | None = None def __str__(self) -> str: return f"{self.data}" @@ -39,9 +41,10 @@ class LinkedQueue: """ def __init__(self) -> None: - self.front = self.rear = None + self.front: Node | None = None + self.rear: Node | None = None - def __iter__(self): + def __iter__(self) -> Iterator[Any]: node = self.front while node: yield node.data @@ -87,7 +90,7 @@ def is_empty(self) -> bool: """ return len(self) == 0 - def put(self, item) -> None: + def put(self, item: Any) -> None: """ >>> queue = LinkedQueue() >>> queue.get() From c50f0c56aa23e8ab9ab019bc61f80465da135ef8 Mon Sep 17 00:00:00 2001 From: Atishaye Jain <64211411+atishaye@users.noreply.github.com> Date: Sat, 23 Oct 2021 15:59:42 +0530 Subject: [PATCH 0289/1543] add check_cycle.py (#5475) * add check_cycle.py * Update graphs/check_cycle.py Co-authored-by: John Law * Update check_cycle.py * Apply suggestions from code review Co-authored-by: John Law Co-authored-by: Christian Clauss --- graphs/check_cycle.py | 55 +++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 55 insertions(+) create mode 100644 graphs/check_cycle.py diff --git a/graphs/check_cycle.py b/graphs/check_cycle.py new file mode 100644 index 000000000000..71d42b4689b7 --- /dev/null +++ b/graphs/check_cycle.py @@ -0,0 +1,55 @@ +""" +Program to check if a cycle is present in a given graph +""" + + +def check_cycle(graph: dict) -> bool: + """ + Returns True if graph is cyclic else False + + >>> check_cycle(graph={0:[], 1:[0, 3], 2:[0, 4], 3:[5], 4:[5], 5:[]}) + False + >>> check_cycle(graph={0:[1, 2], 1:[2], 2:[0, 3], 3:[3]}) + True + """ + # Keep track of visited nodes + visited = set() + # To detect a back edge, keep track of vertices currently in the recursion stack + rec_stk = set() + for node in graph: + if node not in visited: + if depth_first_search(graph, node, visited, rec_stk): + return True + return False + + +def depth_first_search(graph: dict, vertex: int, visited: set, rec_stk: set) -> bool: + """ + Recur for all neighbours. + If any neighbour is visited and in rec_stk then graph is cyclic. + + >>> graph = {0:[], 1:[0, 3], 2:[0, 4], 3:[5], 4:[5], 5:[]} + >>> vertex, visited, rec_stk = 0, set(), set() + >>> depth_first_search(graph, vertex, visited, rec_stk) + False + """ + # Mark current node as visited and add to recursion stack + visited.add(vertex) + rec_stk.add(vertex) + + for node in graph[vertex]: + if node not in visited: + if depth_first_search(graph, node, visited, rec_stk): + return True + elif node in rec_stk: + return True + + # The node needs to be removed from recursion stack before function ends + rec_stk.remove(vertex) + return False + + +if __name__ == "__main__": + from doctest import testmod + + testmod() From 27f2465135dc00abb6ec569b050780e9e62d02f3 Mon Sep 17 00:00:00 2001 From: Jaydeep Das Date: Sat, 23 Oct 2021 16:26:26 +0530 Subject: [PATCH 0290/1543] Added new file: nasa_data.py (#5543) * Added new file: nasa_data.py * Modified as per review * Minor change * print(get_archive_data("apollo 2011")["collection"]["items"][0]["data"][0]["description"]) * Update nasa_data.py Co-authored-by: Christian Clauss --- web_programming/nasa_data.py | 27 +++++++++++++++++++++++++++ 1 file changed, 27 insertions(+) create mode 100644 web_programming/nasa_data.py diff --git a/web_programming/nasa_data.py b/web_programming/nasa_data.py new file mode 100644 index 000000000000..9b15c38a7f05 --- /dev/null +++ b/web_programming/nasa_data.py @@ -0,0 +1,27 @@ +import requests + + +def get_apod_data(api_key: str) -> dict: + """ + Get the APOD(Astronomical Picture of the day) data + Get the API Key from : https://api.nasa.gov/ + """ + url = "https://api.nasa.gov/planetary/apod/" + return requests.get(url, params={"api_key": api_key}).json() + + +def get_archive_data(query: str) -> dict: + """ + Get the data of a particular query from NASA archives + """ + endpoint = "https://images-api.nasa.gov/search" + return requests.get(endpoint, params={"q": query}).json() + + +if __name__ == "__main__": + print(get_apod_data("YOUR API KEY")) + print( + get_archive_data("apollo 2011")["collection"]["items"][0]["data"][0][ + "description" + ] + ) From b64fd56776f03342559034df27948269d49c3375 Mon Sep 17 00:00:00 2001 From: Jaydeep Das Date: Sat, 23 Oct 2021 18:08:25 +0530 Subject: [PATCH 0291/1543] Added feature to `web_programming/nasa_data.py` : Can download the APOD image to a specified location on disk. (#5551) * Added a feature to download images. * Minor changes * Update nasa_data.py * : Co-authored-by: Christian Clauss --- web_programming/nasa_data.py | 33 ++++++++++++++++++++++----------- 1 file changed, 22 insertions(+), 11 deletions(-) diff --git a/web_programming/nasa_data.py b/web_programming/nasa_data.py index 9b15c38a7f05..c0a2c4fdd1a7 100644 --- a/web_programming/nasa_data.py +++ b/web_programming/nasa_data.py @@ -1,27 +1,38 @@ +import shutil + import requests -def get_apod_data(api_key: str) -> dict: +def get_apod_data(api_key: str, download: bool = False, path: str = ".") -> dict: """ Get the APOD(Astronomical Picture of the day) data - Get the API Key from : https://api.nasa.gov/ + Get your API Key from: https://api.nasa.gov/ """ - url = "https://api.nasa.gov/planetary/apod/" + url = "https://api.nasa.gov/planetary/apod" return requests.get(url, params={"api_key": api_key}).json() +def save_apod(api_key: str, path: str = ".") -> dict: + apod_data = get_apod_data(api_key) + img_url = apod_data["url"] + img_name = img_url.split("/")[-1] + response = requests.get(img_url, stream=True) + + with open(f"{path}/{img_name}", "wb+") as img_file: + shutil.copyfileobj(response.raw, img_file) + del response + return apod_data + + def get_archive_data(query: str) -> dict: """ Get the data of a particular query from NASA archives """ - endpoint = "https://images-api.nasa.gov/search" - return requests.get(endpoint, params={"q": query}).json() + url = "https://images-api.nasa.gov/search" + return requests.get(url, params={"q": query}).json() if __name__ == "__main__": - print(get_apod_data("YOUR API KEY")) - print( - get_archive_data("apollo 2011")["collection"]["items"][0]["data"][0][ - "description" - ] - ) + print(save_apod("YOUR API KEY")) + apollo_2011_items = get_archive_data("apollo 2011")["collection"]["items"] + print(apollo_2011_items[0]["data"][0]["description"]) From b72a66b713bb998354df7bfd165c179a756e3b91 Mon Sep 17 00:00:00 2001 From: Mitheel <81575947+mitheelgajare@users.noreply.github.com> Date: Sat, 23 Oct 2021 18:24:41 +0530 Subject: [PATCH 0292/1543] Fixed grammatical errors in CONTRIBUTING.md (#5555) --- CONTRIBUTING.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 4723c6c39a7d..f5c123674f4a 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -8,7 +8,7 @@ Welcome to [TheAlgorithms/Python](https://github.com/TheAlgorithms/Python)! Befo ### Contributor -We are very happy that you consider implementing algorithms and data structure for others! This repository is referenced and used by learners from all over the globe. Being one of our contributors, you agree and confirm that: +We are very happy that you consider implementing algorithms and data structures for others! This repository is referenced and used by learners from all over the globe. Being one of our contributors, you agree and confirm that: - You did your work - no plagiarism allowed - Any plagiarized work will not be merged. @@ -25,7 +25,7 @@ We appreciate any contribution, from fixing a grammar mistake in a comment to im Your contribution will be tested by our [automated testing on Travis CI](https://travis-ci.org/TheAlgorithms/Python/pull_requests) to save time and mental energy. After you have submitted your pull request, you should see the Travis tests start to run at the bottom of your submission page. If those tests fail, then click on the ___details___ button try to read through the Travis output to understand the failure. If you do not understand, please leave a comment on your submission page and a community member will try to help. -Please help us keep our issue list small by adding fixes: #{$ISSUE_NO} to the commit message of pull requests that resolve open issues. GitHub will use this tag to auto close the issue when the PR is merged. +Please help us keep our issue list small by adding fixes: #{$ISSUE_NO} to the commit message of pull requests that resolve open issues. GitHub will use this tag to auto-close the issue when the PR is merged. #### What is an Algorithm? From 80a885c97563ffc3d93a0678bd7219945be1a166 Mon Sep 17 00:00:00 2001 From: Limbad Yash <56826569+limbad-YK@users.noreply.github.com> Date: Sat, 23 Oct 2021 18:48:09 +0530 Subject: [PATCH 0293/1543] Update pop function (#5544) * Updated Pop function Added underflow condition * Update Pop Function Added condition to check underflow of stack * Update stack.py * if not self.stack: raise StackUnderflowError * Add doctests * StackUnderflowError * ..., not .... * Update stack.py Co-authored-by: Christian Clauss --- data_structures/stacks/stack.py | 38 ++++++++++++++++++++++++++------- 1 file changed, 30 insertions(+), 8 deletions(-) diff --git a/data_structures/stacks/stack.py b/data_structures/stacks/stack.py index c62412150626..4bc032f72561 100644 --- a/data_structures/stacks/stack.py +++ b/data_structures/stacks/stack.py @@ -5,6 +5,10 @@ class StackOverflowError(BaseException): pass +class StackUnderflowError(BaseException): + pass + + class Stack: """A stack is an abstract data type that serves as a collection of elements with two principal operations: push() and pop(). push() adds an @@ -31,11 +35,29 @@ def push(self, data): self.stack.append(data) def pop(self): - """Pop an element off of the top of the stack.""" + """ + Pop an element off of the top of the stack. + + >>> Stack().pop() + Traceback (most recent call last): + ... + data_structures.stacks.stack.StackUnderflowError + """ + if not self.stack: + raise StackUnderflowError return self.stack.pop() def peek(self): - """Peek at the top-most element of the stack.""" + """ + Peek at the top-most element of the stack. + + >>> Stack().pop() + Traceback (most recent call last): + ... + data_structures.stacks.stack.StackUnderflowError + """ + if not self.stack: + raise StackUnderflowError return self.stack[-1] def is_empty(self) -> bool: @@ -67,22 +89,22 @@ def test_stack() -> None: try: _ = stack.pop() assert False # This should not happen - except IndexError: + except StackUnderflowError: assert True # This should happen try: _ = stack.peek() assert False # This should not happen - except IndexError: + except StackUnderflowError: assert True # This should happen for i in range(10): assert stack.size() == i stack.push(i) - assert bool(stack) is True - assert stack.is_empty() is False - assert stack.is_full() is True + assert bool(stack) + assert not stack.is_empty() + assert stack.is_full() assert str(stack) == str(list(range(10))) assert stack.pop() == 9 assert stack.peek() == 8 @@ -96,7 +118,7 @@ def test_stack() -> None: except StackOverflowError: assert True # This should happen - assert stack.is_empty() is False + assert not stack.is_empty() assert stack.size() == 10 assert 5 in stack From 218d8921dbb264c9636bef5bd10e23acafd032eb Mon Sep 17 00:00:00 2001 From: Studiex <80968515+Studiex@users.noreply.github.com> Date: Sat, 23 Oct 2021 17:20:52 +0400 Subject: [PATCH 0294/1543] Implementation of SHA-256 using Python (#5532) * Add files via upload * Update sha256.py * Update sha256.py * Update sha256.py * Update sha256.py * Update sha256.py * Update sha256.py * Update sha256.py * Update sha256.py * @staticmethod def preprocessing(data: bytes) -> bytes: Co-authored-by: Christian Clauss --- hashes/sha256.py | 248 +++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 248 insertions(+) create mode 100644 hashes/sha256.py diff --git a/hashes/sha256.py b/hashes/sha256.py new file mode 100644 index 000000000000..9d4f250fe353 --- /dev/null +++ b/hashes/sha256.py @@ -0,0 +1,248 @@ +# Author: M. Yathurshan +# Black Formatter: True + +""" +Implementation of SHA256 Hash function in a Python class and provides utilities +to find hash of string or hash of text from a file. + +Usage: python sha256.py --string "Hello World!!" + python sha256.py --file "hello_world.txt" + When run without any arguments, + it prints the hash of the string "Hello World!! Welcome to Cryptography" + +References: +https://qvault.io/cryptography/how-sha-2-works-step-by-step-sha-256/ +https://en.wikipedia.org/wiki/SHA-2 +""" + +import argparse +import struct +import unittest + + +class SHA256: + """ + Class to contain the entire pipeline for SHA1 Hashing Algorithm + + >>> SHA256(b'Python').hash + '18885f27b5af9012df19e496460f9294d5ab76128824c6f993787004f6d9a7db' + + >>> SHA256(b'hello world').hash + 'b94d27b9934d3e08a52e52d7da7dabfac484efe37a5380ee9088f7ace2efcde9' + """ + + def __init__(self, data: bytes) -> None: + self.data = data + + # Initialize hash values + self.hashes = [ + 0x6A09E667, + 0xBB67AE85, + 0x3C6EF372, + 0xA54FF53A, + 0x510E527F, + 0x9B05688C, + 0x1F83D9AB, + 0x5BE0CD19, + ] + + # Initialize round constants + self.round_constants = [ + 0x428A2F98, + 0x71374491, + 0xB5C0FBCF, + 0xE9B5DBA5, + 0x3956C25B, + 0x59F111F1, + 0x923F82A4, + 0xAB1C5ED5, + 0xD807AA98, + 0x12835B01, + 0x243185BE, + 0x550C7DC3, + 0x72BE5D74, + 0x80DEB1FE, + 0x9BDC06A7, + 0xC19BF174, + 0xE49B69C1, + 0xEFBE4786, + 0x0FC19DC6, + 0x240CA1CC, + 0x2DE92C6F, + 0x4A7484AA, + 0x5CB0A9DC, + 0x76F988DA, + 0x983E5152, + 0xA831C66D, + 0xB00327C8, + 0xBF597FC7, + 0xC6E00BF3, + 0xD5A79147, + 0x06CA6351, + 0x14292967, + 0x27B70A85, + 0x2E1B2138, + 0x4D2C6DFC, + 0x53380D13, + 0x650A7354, + 0x766A0ABB, + 0x81C2C92E, + 0x92722C85, + 0xA2BFE8A1, + 0xA81A664B, + 0xC24B8B70, + 0xC76C51A3, + 0xD192E819, + 0xD6990624, + 0xF40E3585, + 0x106AA070, + 0x19A4C116, + 0x1E376C08, + 0x2748774C, + 0x34B0BCB5, + 0x391C0CB3, + 0x4ED8AA4A, + 0x5B9CCA4F, + 0x682E6FF3, + 0x748F82EE, + 0x78A5636F, + 0x84C87814, + 0x8CC70208, + 0x90BEFFFA, + 0xA4506CEB, + 0xBEF9A3F7, + 0xC67178F2, + ] + + self.preprocessed_data = self.preprocessing(self.data) + self.final_hash() + + @staticmethod + def preprocessing(data: bytes) -> bytes: + padding = b"\x80" + (b"\x00" * (63 - (len(data) + 8) % 64)) + big_endian_integer = struct.pack(">Q", (len(data) * 8)) + return data + padding + big_endian_integer + + def final_hash(self) -> None: + # Convert into blocks of 64 bytes + self.blocks = [ + self.preprocessed_data[x : x + 64] + for x in range(0, len(self.preprocessed_data), 64) + ] + + for block in self.blocks: + # Convert the given block into a list of 4 byte integers + words = list(struct.unpack(">16L", block)) + # add 48 0-ed integers + words += [0] * 48 + + a, b, c, d, e, f, g, h = self.hashes + + for index in range(0, 64): + if index > 15: + # modify the zero-ed indexes at the end of the array + s0 = ( + self.ror(words[index - 15], 7) + ^ self.ror(words[index - 15], 18) + ^ (words[index - 15] >> 3) + ) + s1 = ( + self.ror(words[index - 2], 17) + ^ self.ror(words[index - 2], 19) + ^ (words[index - 2] >> 10) + ) + + words[index] = ( + words[index - 16] + s0 + words[index - 7] + s1 + ) % 0x100000000 + + # Compression + S1 = self.ror(e, 6) ^ self.ror(e, 11) ^ self.ror(e, 25) + ch = (e & f) ^ ((~e & (0xFFFFFFFF)) & g) + temp1 = ( + h + S1 + ch + self.round_constants[index] + words[index] + ) % 0x100000000 + S0 = self.ror(a, 2) ^ self.ror(a, 13) ^ self.ror(a, 22) + maj = (a & b) ^ (a & c) ^ (b & c) + temp2 = (S0 + maj) % 0x100000000 + + h, g, f, e, d, c, b, a = ( + g, + f, + e, + ((d + temp1) % 0x100000000), + c, + b, + a, + ((temp1 + temp2) % 0x100000000), + ) + + mutated_hash_values = [a, b, c, d, e, f, g, h] + + # Modify final values + self.hashes = [ + ((element + mutated_hash_values[index]) % 0x100000000) + for index, element in enumerate(self.hashes) + ] + + self.hash = "".join([hex(value)[2:].zfill(8) for value in self.hashes]) + + def ror(self, value: int, rotations: int) -> int: + """ + Right rotate a given unsigned number by a certain amount of rotations + """ + return 0xFFFFFFFF & (value << (32 - rotations)) | (value >> rotations) + + +class SHA256HashTest(unittest.TestCase): + """ + Test class for the SHA256 class. Inherits the TestCase class from unittest + """ + + def test_match_hashes(self) -> None: + import hashlib + + msg = bytes("Test String", "utf-8") + self.assertEqual(SHA256(msg).hash, hashlib.sha256(msg).hexdigest()) + + +def main() -> None: + """ + Provides option 'string' or 'file' to take input + and prints the calculated SHA-256 hash + """ + + # unittest.main() + + import doctest + + doctest.testmod() + + parser = argparse.ArgumentParser() + parser.add_argument( + "-s", + "--string", + dest="input_string", + default="Hello World!! Welcome to Cryptography", + help="Hash the string", + ) + parser.add_argument( + "-f", "--file", dest="input_file", help="Hash contents of a file" + ) + + args = parser.parse_args() + + input_string = args.input_string + + # hash input should be a bytestring + if args.input_file: + with open(args.input_file, "rb") as f: + hash_input = f.read() + else: + hash_input = bytes(input_string, "utf-8") + + print(SHA256(hash_input).hash) + + +if __name__ == "__main__": + main() From bd9464e4ac6ccccd4699bf52bddefa2bfb1dafea Mon Sep 17 00:00:00 2001 From: Christian Clauss Date: Sat, 23 Oct 2021 18:15:30 +0200 Subject: [PATCH 0295/1543] mandelbrot.py: Commenting out long running tests (#5558) * mandelbrot.py: Commenting out long running tests * updating DIRECTORY.md * Comment out 9 sec doctests * Update bidirectional_breadth_first_search.py * Comment out slow tests * Comment out slow (9.15 sec) pytests... * # Comment out slow (4.20s call) doctests * Comment out slow (3.45s) doctests * Update miller_rabin.py * Update miller_rabin.py Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- DIRECTORY.md | 3 +++ fractals/mandelbrot.py | 6 ++++-- graphs/bidirectional_breadth_first_search.py | 15 +++++++++------ maths/miller_rabin.py | 3 ++- .../download_images_from_google_query.py | 5 +++-- 5 files changed, 21 insertions(+), 11 deletions(-) diff --git a/DIRECTORY.md b/DIRECTORY.md index 950d8e2c0c4b..66d5f8040951 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -317,6 +317,7 @@ * [Breadth First Search Shortest Path](https://github.com/TheAlgorithms/Python/blob/master/graphs/breadth_first_search_shortest_path.py) * [Check Bipartite Graph Bfs](https://github.com/TheAlgorithms/Python/blob/master/graphs/check_bipartite_graph_bfs.py) * [Check Bipartite Graph Dfs](https://github.com/TheAlgorithms/Python/blob/master/graphs/check_bipartite_graph_dfs.py) + * [Check Cycle](https://github.com/TheAlgorithms/Python/blob/master/graphs/check_cycle.py) * [Connected Components](https://github.com/TheAlgorithms/Python/blob/master/graphs/connected_components.py) * [Depth First Search](https://github.com/TheAlgorithms/Python/blob/master/graphs/depth_first_search.py) * [Depth First Search 2](https://github.com/TheAlgorithms/Python/blob/master/graphs/depth_first_search_2.py) @@ -370,6 +371,7 @@ * [Md5](https://github.com/TheAlgorithms/Python/blob/master/hashes/md5.py) * [Sdbm](https://github.com/TheAlgorithms/Python/blob/master/hashes/sdbm.py) * [Sha1](https://github.com/TheAlgorithms/Python/blob/master/hashes/sha1.py) + * [Sha256](https://github.com/TheAlgorithms/Python/blob/master/hashes/sha256.py) ## Knapsack * [Greedy Knapsack](https://github.com/TheAlgorithms/Python/blob/master/knapsack/greedy_knapsack.py) @@ -979,6 +981,7 @@ * [Instagram Crawler](https://github.com/TheAlgorithms/Python/blob/master/web_programming/instagram_crawler.py) * [Instagram Pic](https://github.com/TheAlgorithms/Python/blob/master/web_programming/instagram_pic.py) * [Instagram Video](https://github.com/TheAlgorithms/Python/blob/master/web_programming/instagram_video.py) + * [Nasa Data](https://github.com/TheAlgorithms/Python/blob/master/web_programming/nasa_data.py) * [Random Anime Character](https://github.com/TheAlgorithms/Python/blob/master/web_programming/random_anime_character.py) * [Recaptcha Verification](https://github.com/TheAlgorithms/Python/blob/master/web_programming/recaptcha_verification.py) * [Slack Message](https://github.com/TheAlgorithms/Python/blob/master/web_programming/slack_message.py) diff --git a/fractals/mandelbrot.py b/fractals/mandelbrot.py index de795bb3fc6f..5d61b72e172f 100644 --- a/fractals/mandelbrot.py +++ b/fractals/mandelbrot.py @@ -101,9 +101,11 @@ def get_image( of the Mandelbrot set is viewed. The main area of the Mandelbrot set is roughly between "-1.5 < x < 0.5" and "-1 < y < 1" in the figure-coordinates. - >>> get_image().load()[0,0] + Commenting out tests that slow down pytest... + # 13.35s call fractals/mandelbrot.py::mandelbrot.get_image + # >>> get_image().load()[0,0] (255, 0, 0) - >>> get_image(use_distance_color_coding = False).load()[0,0] + # >>> get_image(use_distance_color_coding = False).load()[0,0] (255, 255, 255) """ img = Image.new("RGB", (image_width, image_height)) diff --git a/graphs/bidirectional_breadth_first_search.py b/graphs/bidirectional_breadth_first_search.py index 27e4f0b16bbf..511b080a9add 100644 --- a/graphs/bidirectional_breadth_first_search.py +++ b/graphs/bidirectional_breadth_first_search.py @@ -34,16 +34,19 @@ def __init__( class BreadthFirstSearch: """ - >>> bfs = BreadthFirstSearch((0, 0), (len(grid) - 1, len(grid[0]) - 1)) - >>> (bfs.start.pos_y + delta[3][0], bfs.start.pos_x + delta[3][1]) + # Comment out slow pytests... + # 9.15s call graphs/bidirectional_breadth_first_search.py:: \ + # graphs.bidirectional_breadth_first_search.BreadthFirstSearch + # >>> bfs = BreadthFirstSearch((0, 0), (len(grid) - 1, len(grid[0]) - 1)) + # >>> (bfs.start.pos_y + delta[3][0], bfs.start.pos_x + delta[3][1]) (0, 1) - >>> [x.pos for x in bfs.get_successors(bfs.start)] + # >>> [x.pos for x in bfs.get_successors(bfs.start)] [(1, 0), (0, 1)] - >>> (bfs.start.pos_y + delta[2][0], bfs.start.pos_x + delta[2][1]) + # >>> (bfs.start.pos_y + delta[2][0], bfs.start.pos_x + delta[2][1]) (1, 0) - >>> bfs.retrace_path(bfs.start) + # >>> bfs.retrace_path(bfs.start) [(0, 0)] - >>> bfs.search() # doctest: +NORMALIZE_WHITESPACE + # >>> bfs.search() # doctest: +NORMALIZE_WHITESPACE [(0, 0), (1, 0), (2, 0), (3, 0), (3, 1), (4, 1), (5, 1), (5, 2), (5, 3), (5, 4), (5, 5), (6, 5), (6, 6)] """ diff --git a/maths/miller_rabin.py b/maths/miller_rabin.py index fe992027190b..2b0944508b4b 100644 --- a/maths/miller_rabin.py +++ b/maths/miller_rabin.py @@ -9,7 +9,8 @@ def is_prime(n, prec=1000): """ >>> from .prime_check import prime_check - >>> all(is_prime(i) == prime_check(i) for i in range(1000)) + >>> # all(is_prime(i) == prime_check(i) for i in range(1000)) # 3.45s + >>> all(is_prime(i) == prime_check(i) for i in range(256)) True """ if n < 2: diff --git a/web_programming/download_images_from_google_query.py b/web_programming/download_images_from_google_query.py index c26262788c4c..b11a7f883085 100644 --- a/web_programming/download_images_from_google_query.py +++ b/web_programming/download_images_from_google_query.py @@ -24,9 +24,10 @@ def download_images_from_google_query(query: str = "dhaka", max_images: int = 5) Returns: The number of images successfully downloaded. - >>> download_images_from_google_query() + # Comment out slow (4.20s call) doctests + # >>> download_images_from_google_query() 5 - >>> download_images_from_google_query("potato") + # >>> download_images_from_google_query("potato") 5 """ max_images = min(max_images, 50) # Prevent abuse! From 00a67010e8c28cdaa6142d0c4a386282bbf29421 Mon Sep 17 00:00:00 2001 From: Martmists Date: Sat, 23 Oct 2021 23:19:25 +0200 Subject: [PATCH 0296/1543] Simple audio filters (#5230) * Add IIR Filter and Butterworth design functions Signed-off-by: Martmists * naming conventions and missing type hints Signed-off-by: Martmists * Link wikipedia in IIRFilter Signed-off-by: Martmists * Add doctests and None return types Signed-off-by: Martmists * More doctests Signed-off-by: Martmists * Requested changes Signed-off-by: Martmists * run pre-commit Signed-off-by: Martmists * Make mypy stop complaining about ints vs floats Signed-off-by: Martmists * Use slower listcomp to make it more readable Signed-off-by: Martmists * Make doctests happy Signed-off-by: Martmists * Remove scipy Signed-off-by: Martmists * Test coefficients from bw filters Signed-off-by: Martmists * Protocol test Co-authored-by: Christian Clauss * Make requested change Signed-off-by: Martmists * Types Signed-off-by: Martmists * Apply suggestions from code review * Apply suggestions from code review * Update butterworth_filter.py Co-authored-by: Christian Clauss --- audio_filters/__init__.py | 0 audio_filters/butterworth_filter.py | 217 ++++++++++++++++++++++++++++ audio_filters/iir_filter.py | 92 ++++++++++++ audio_filters/show_response.py | 94 ++++++++++++ 4 files changed, 403 insertions(+) create mode 100644 audio_filters/__init__.py create mode 100644 audio_filters/butterworth_filter.py create mode 100644 audio_filters/iir_filter.py create mode 100644 audio_filters/show_response.py diff --git a/audio_filters/__init__.py b/audio_filters/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/audio_filters/butterworth_filter.py b/audio_filters/butterworth_filter.py new file mode 100644 index 000000000000..409cfeb1d95c --- /dev/null +++ b/audio_filters/butterworth_filter.py @@ -0,0 +1,217 @@ +from math import cos, sin, sqrt, tau + +from audio_filters.iir_filter import IIRFilter + +""" +Create 2nd-order IIR filters with Butterworth design. + +Code based on https://webaudio.github.io/Audio-EQ-Cookbook/audio-eq-cookbook.html +Alternatively you can use scipy.signal.butter, which should yield the same results. +""" + + +def make_lowpass( + frequency: int, samplerate: int, q_factor: float = 1 / sqrt(2) +) -> IIRFilter: + """ + Creates a low-pass filter + + >>> filter = make_lowpass(1000, 48000) + >>> filter.a_coeffs + filter.b_coeffs # doctest: +NORMALIZE_WHITESPACE + [1.0922959556412573, -1.9828897227476208, 0.9077040443587427, 0.004277569313094809, + 0.008555138626189618, 0.004277569313094809] + """ + w0 = tau * frequency / samplerate + _sin = sin(w0) + _cos = cos(w0) + alpha = _sin / (2 * q_factor) + + b0 = (1 - _cos) / 2 + b1 = 1 - _cos + + a0 = 1 + alpha + a1 = -2 * _cos + a2 = 1 - alpha + + filt = IIRFilter(2) + filt.set_coefficients([a0, a1, a2], [b0, b1, b0]) + return filt + + +def make_highpass( + frequency: int, samplerate: int, q_factor: float = 1 / sqrt(2) +) -> IIRFilter: + """ + Creates a high-pass filter + + >>> filter = make_highpass(1000, 48000) + >>> filter.a_coeffs + filter.b_coeffs # doctest: +NORMALIZE_WHITESPACE + [1.0922959556412573, -1.9828897227476208, 0.9077040443587427, 0.9957224306869052, + -1.9914448613738105, 0.9957224306869052] + """ + w0 = tau * frequency / samplerate + _sin = sin(w0) + _cos = cos(w0) + alpha = _sin / (2 * q_factor) + + b0 = (1 + _cos) / 2 + b1 = -1 - _cos + + a0 = 1 + alpha + a1 = -2 * _cos + a2 = 1 - alpha + + filt = IIRFilter(2) + filt.set_coefficients([a0, a1, a2], [b0, b1, b0]) + return filt + + +def make_bandpass( + frequency: int, samplerate: int, q_factor: float = 1 / sqrt(2) +) -> IIRFilter: + """ + Creates a band-pass filter + + >>> filter = make_bandpass(1000, 48000) + >>> filter.a_coeffs + filter.b_coeffs # doctest: +NORMALIZE_WHITESPACE + [1.0922959556412573, -1.9828897227476208, 0.9077040443587427, 0.06526309611002579, + 0, -0.06526309611002579] + """ + w0 = tau * frequency / samplerate + _sin = sin(w0) + _cos = cos(w0) + alpha = _sin / (2 * q_factor) + + b0 = _sin / 2 + b1 = 0 + b2 = -b0 + + a0 = 1 + alpha + a1 = -2 * _cos + a2 = 1 - alpha + + filt = IIRFilter(2) + filt.set_coefficients([a0, a1, a2], [b0, b1, b2]) + return filt + + +def make_allpass( + frequency: int, samplerate: int, q_factor: float = 1 / sqrt(2) +) -> IIRFilter: + """ + Creates an all-pass filter + + >>> filter = make_allpass(1000, 48000) + >>> filter.a_coeffs + filter.b_coeffs # doctest: +NORMALIZE_WHITESPACE + [1.0922959556412573, -1.9828897227476208, 0.9077040443587427, 0.9077040443587427, + -1.9828897227476208, 1.0922959556412573] + """ + w0 = tau * frequency / samplerate + _sin = sin(w0) + _cos = cos(w0) + alpha = _sin / (2 * q_factor) + + b0 = 1 - alpha + b1 = -2 * _cos + b2 = 1 + alpha + + filt = IIRFilter(2) + filt.set_coefficients([b2, b1, b0], [b0, b1, b2]) + return filt + + +def make_peak( + frequency: int, samplerate: int, gain_db: float, q_factor: float = 1 / sqrt(2) +) -> IIRFilter: + """ + Creates a peak filter + + >>> filter = make_peak(1000, 48000, 6) + >>> filter.a_coeffs + filter.b_coeffs # doctest: +NORMALIZE_WHITESPACE + [1.0653405327119334, -1.9828897227476208, 0.9346594672880666, 1.1303715025601122, + -1.9828897227476208, 0.8696284974398878] + """ + w0 = tau * frequency / samplerate + _sin = sin(w0) + _cos = cos(w0) + alpha = _sin / (2 * q_factor) + big_a = 10 ** (gain_db / 40) + + b0 = 1 + alpha * big_a + b1 = -2 * _cos + b2 = 1 - alpha * big_a + a0 = 1 + alpha / big_a + a1 = -2 * _cos + a2 = 1 - alpha / big_a + + filt = IIRFilter(2) + filt.set_coefficients([a0, a1, a2], [b0, b1, b2]) + return filt + + +def make_lowshelf( + frequency: int, samplerate: int, gain_db: float, q_factor: float = 1 / sqrt(2) +) -> IIRFilter: + """ + Creates a low-shelf filter + + >>> filter = make_lowshelf(1000, 48000, 6) + >>> filter.a_coeffs + filter.b_coeffs # doctest: +NORMALIZE_WHITESPACE + [3.0409336710888786, -5.608870992220748, 2.602157875636628, 3.139954022810743, + -5.591841778072785, 2.5201667380627257] + """ + w0 = tau * frequency / samplerate + _sin = sin(w0) + _cos = cos(w0) + alpha = _sin / (2 * q_factor) + big_a = 10 ** (gain_db / 40) + pmc = (big_a + 1) - (big_a - 1) * _cos + ppmc = (big_a + 1) + (big_a - 1) * _cos + mpc = (big_a - 1) - (big_a + 1) * _cos + pmpc = (big_a - 1) + (big_a + 1) * _cos + aa2 = 2 * sqrt(big_a) * alpha + + b0 = big_a * (pmc + aa2) + b1 = 2 * big_a * mpc + b2 = big_a * (pmc - aa2) + a0 = ppmc + aa2 + a1 = -2 * pmpc + a2 = ppmc - aa2 + + filt = IIRFilter(2) + filt.set_coefficients([a0, a1, a2], [b0, b1, b2]) + return filt + + +def make_highshelf( + frequency: int, samplerate: int, gain_db: float, q_factor: float = 1 / sqrt(2) +) -> IIRFilter: + """ + Creates a high-shelf filter + + >>> filter = make_highshelf(1000, 48000, 6) + >>> filter.a_coeffs + filter.b_coeffs # doctest: +NORMALIZE_WHITESPACE + [2.2229172136088806, -3.9587208137297303, 1.7841414181566304, 4.295432981120543, + -7.922740859457287, 3.6756456963725253] + """ + w0 = tau * frequency / samplerate + _sin = sin(w0) + _cos = cos(w0) + alpha = _sin / (2 * q_factor) + big_a = 10 ** (gain_db / 40) + pmc = (big_a + 1) - (big_a - 1) * _cos + ppmc = (big_a + 1) + (big_a - 1) * _cos + mpc = (big_a - 1) - (big_a + 1) * _cos + pmpc = (big_a - 1) + (big_a + 1) * _cos + aa2 = 2 * sqrt(big_a) * alpha + + b0 = big_a * (ppmc + aa2) + b1 = -2 * big_a * pmpc + b2 = big_a * (ppmc - aa2) + a0 = pmc + aa2 + a1 = 2 * mpc + a2 = pmc - aa2 + + filt = IIRFilter(2) + filt.set_coefficients([a0, a1, a2], [b0, b1, b2]) + return filt diff --git a/audio_filters/iir_filter.py b/audio_filters/iir_filter.py new file mode 100644 index 000000000000..aae320365012 --- /dev/null +++ b/audio_filters/iir_filter.py @@ -0,0 +1,92 @@ +from __future__ import annotations + + +class IIRFilter: + r""" + N-Order IIR filter + Assumes working with float samples normalized on [-1, 1] + + --- + + Implementation details: + Based on the 2nd-order function from + https://en.wikipedia.org/wiki/Digital_biquad_filter, + this generalized N-order function was made. + + Using the following transfer function + H(z)=\frac{b_{0}+b_{1}z^{-1}+b_{2}z^{-2}+...+b_{k}z^{-k}}{a_{0}+a_{1}z^{-1}+a_{2}z^{-2}+...+a_{k}z^{-k}} + we can rewrite this to + y[n]={\frac{1}{a_{0}}}\left(\left(b_{0}x[n]+b_{1}x[n-1]+b_{2}x[n-2]+...+b_{k}x[n-k]\right)-\left(a_{1}y[n-1]+a_{2}y[n-2]+...+a_{k}y[n-k]\right)\right) + """ + + def __init__(self, order: int) -> None: + self.order = order + + # a_{0} ... a_{k} + self.a_coeffs = [1.0] + [0.0] * order + # b_{0} ... b_{k} + self.b_coeffs = [1.0] + [0.0] * order + + # x[n-1] ... x[n-k] + self.input_history = [0.0] * self.order + # y[n-1] ... y[n-k] + self.output_history = [0.0] * self.order + + def set_coefficients(self, a_coeffs: list[float], b_coeffs: list[float]) -> None: + """ + Set the coefficients for the IIR filter. These should both be of size order + 1. + a_0 may be left out, and it will use 1.0 as default value. + + This method works well with scipy's filter design functions + >>> # Make a 2nd-order 1000Hz butterworth lowpass filter + >>> import scipy.signal + >>> b_coeffs, a_coeffs = scipy.signal.butter(2, 1000, + ... btype='lowpass', + ... fs=48000) + >>> filt = IIRFilter(2) + >>> filt.set_coefficients(a_coeffs, b_coeffs) + """ + if len(a_coeffs) < self.order: + a_coeffs = [1.0] + a_coeffs + + if len(a_coeffs) != self.order + 1: + raise ValueError( + f"Expected a_coeffs to have {self.order + 1} elements for {self.order}" + f"-order filter, got {len(a_coeffs)}" + ) + + if len(b_coeffs) != self.order + 1: + raise ValueError( + f"Expected b_coeffs to have {self.order + 1} elements for {self.order}" + f"-order filter, got {len(a_coeffs)}" + ) + + self.a_coeffs = a_coeffs + self.b_coeffs = b_coeffs + + def process(self, sample: float) -> float: + """ + Calculate y[n] + + >>> filt = IIRFilter(2) + >>> filt.process(0) + 0.0 + """ + result = 0.0 + + # Start at index 1 and do index 0 at the end. + for i in range(1, self.order + 1): + result += ( + self.b_coeffs[i] * self.input_history[i - 1] + - self.a_coeffs[i] * self.output_history[i - 1] + ) + + result = (result + self.b_coeffs[0] * sample) / self.a_coeffs[0] + + self.input_history[1:] = self.input_history[:-1] + self.output_history[1:] = self.output_history[:-1] + + self.input_history[0] = sample + self.output_history[0] = result + + return result diff --git a/audio_filters/show_response.py b/audio_filters/show_response.py new file mode 100644 index 000000000000..6e2731a58419 --- /dev/null +++ b/audio_filters/show_response.py @@ -0,0 +1,94 @@ +from __future__ import annotations + +from math import pi +from typing import Protocol + +import matplotlib.pyplot as plt +import numpy as np + + +class FilterType(Protocol): + def process(self, sample: float) -> float: + """ + Calculate y[n] + + >>> issubclass(FilterType, Protocol) + True + """ + return 0.0 + + +def get_bounds( + fft_results: np.ndarray, samplerate: int +) -> tuple[int | float, int | float]: + """ + Get bounds for printing fft results + + >>> import numpy + >>> array = numpy.linspace(-20.0, 20.0, 1000) + >>> get_bounds(array, 1000) + (-20, 20) + """ + lowest = min([-20, np.min(fft_results[1 : samplerate // 2 - 1])]) + highest = max([20, np.max(fft_results[1 : samplerate // 2 - 1])]) + return lowest, highest + + +def show_frequency_response(filter: FilterType, samplerate: int) -> None: + """ + Show frequency response of a filter + + >>> from audio_filters.iir_filter import IIRFilter + >>> filt = IIRFilter(4) + >>> show_frequency_response(filt, 48000) + """ + + size = 512 + inputs = [1] + [0] * (size - 1) + outputs = [filter.process(item) for item in inputs] + + filler = [0] * (samplerate - size) # zero-padding + outputs += filler + fft_out = np.abs(np.fft.fft(outputs)) + fft_db = 20 * np.log10(fft_out) + + # Frequencies on log scale from 24 to nyquist frequency + plt.xlim(24, samplerate / 2 - 1) + plt.xlabel("Frequency (Hz)") + plt.xscale("log") + + # Display within reasonable bounds + bounds = get_bounds(fft_db, samplerate) + plt.ylim(max([-80, bounds[0]]), min([80, bounds[1]])) + plt.ylabel("Gain (dB)") + + plt.plot(fft_db) + plt.show() + + +def show_phase_response(filter: FilterType, samplerate: int) -> None: + """ + Show phase response of a filter + + >>> from audio_filters.iir_filter import IIRFilter + >>> filt = IIRFilter(4) + >>> show_phase_response(filt, 48000) + """ + + size = 512 + inputs = [1] + [0] * (size - 1) + outputs = [filter.process(item) for item in inputs] + + filler = [0] * (samplerate - size) # zero-padding + outputs += filler + fft_out = np.angle(np.fft.fft(outputs)) + + # Frequencies on log scale from 24 to nyquist frequency + plt.xlim(24, samplerate / 2 - 1) + plt.xlabel("Frequency (Hz)") + plt.xscale("log") + + plt.ylim(-2 * pi, 2 * pi) + plt.ylabel("Phase shift (Radians)") + plt.plot(np.unwrap(fft_out, -2 * pi)) + plt.show() From aaaa175b66f2b335023433a3c40635388b98c489 Mon Sep 17 00:00:00 2001 From: Erwin Junge Date: Sat, 23 Oct 2021 23:26:21 +0200 Subject: [PATCH 0297/1543] [mypy] annotate `computer_vision` (#5571) --- computer_vision/harris_corner.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/computer_vision/harris_corner.py b/computer_vision/harris_corner.py index fb7f560f7873..02deb54084ef 100644 --- a/computer_vision/harris_corner.py +++ b/computer_vision/harris_corner.py @@ -21,11 +21,11 @@ def __init__(self, k: float, window_size: int): else: raise ValueError("invalid k value") - def __str__(self): + def __str__(self) -> str: return f"Harris Corner detection with k : {self.k}" - def detect(self, img_path: str): + def detect(self, img_path: str) -> tuple[cv2.Mat, list[list[int]]]: """ Returns the image with corners identified @@ -35,7 +35,7 @@ def detect(self, img_path: str): img = cv2.imread(img_path, 0) h, w = img.shape - corner_list = [] + corner_list: list[list[int]] = [] color_img = img.copy() color_img = cv2.cvtColor(color_img, cv2.COLOR_GRAY2RGB) dy, dx = np.gradient(img) From 5772d0734b72c307c0e08c76121b3eca589f50d4 Mon Sep 17 00:00:00 2001 From: Marcel Kuhmann Date: Sun, 24 Oct 2021 18:44:15 +0200 Subject: [PATCH 0298/1543] fix dead link (#5572) --- project_euler/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/project_euler/README.md b/project_euler/README.md index 1cc6f8150e38..c4c0a854472f 100644 --- a/project_euler/README.md +++ b/project_euler/README.md @@ -28,7 +28,7 @@ Welcome to [TheAlgorithms/Python](https://github.com/TheAlgorithms/Python)! Befo * When the `solution` function is called without any arguments like so: `solution()`, it should return the answer to the problem. * Every function, which includes all the helper functions, if any, and the main solution function, should have `doctest` in the function docstring along with a brief statement mentioning what the function is about. - * There should not be a `doctest` for testing the answer as that is done by our Travis CI build using this [script](https://github.com/TheAlgorithms/Python/blob/master/project_euler/validate_solutions.py). Keeping in mind the above example of [Problem 1](https://projecteuler.net/problem=1): + * There should not be a `doctest` for testing the answer as that is done by our Travis CI build using this [script](https://github.com/TheAlgorithms/Python/blob/master/scripts/validate_solutions.py). Keeping in mind the above example of [Problem 1](https://projecteuler.net/problem=1): ```python def solution(limit: int = 1000): From fc5d29e214c5624edec43300311b24c9d3962ac2 Mon Sep 17 00:00:00 2001 From: Vinicius Cordeiro Date: Sun, 24 Oct 2021 17:33:53 -0300 Subject: [PATCH 0299/1543] Add Bifid cipher (#5493) * Add Bifid cipher * Add missing type hint * Fix variable names --- ciphers/bifid.py | 110 +++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 110 insertions(+) create mode 100644 ciphers/bifid.py diff --git a/ciphers/bifid.py b/ciphers/bifid.py new file mode 100644 index 000000000000..c1b071155917 --- /dev/null +++ b/ciphers/bifid.py @@ -0,0 +1,110 @@ +#!/usr/bin/env python3 + +""" +The Bifid Cipher uses a Polybius Square to encipher a message in a way that +makes it fairly difficult to decipher without knowing the secret. + +https://www.braingle.com/brainteasers/codes/bifid.php +""" + +import numpy as np + + +class BifidCipher: + def __init__(self) -> None: + SQUARE = [ + ["a", "b", "c", "d", "e"], + ["f", "g", "h", "i", "k"], + ["l", "m", "n", "o", "p"], + ["q", "r", "s", "t", "u"], + ["v", "w", "x", "y", "z"], + ] + self.SQUARE = np.array(SQUARE) + + def letter_to_numbers(self, letter: str) -> np.ndarray: + """ + Return the pair of numbers that represents the given letter in the + polybius square + + >>> np.array_equal(BifidCipher().letter_to_numbers('a'), [1,1]) + True + + >>> np.array_equal(BifidCipher().letter_to_numbers('u'), [4,5]) + True + """ + index1, index2 = np.where(self.SQUARE == letter) + indexes = np.concatenate([index1 + 1, index2 + 1]) + return indexes + + def numbers_to_letter(self, index1: int, index2: int) -> str: + """ + Return the letter corresponding to the position [index1, index2] in + the polybius square + + >>> BifidCipher().numbers_to_letter(4, 5) == "u" + True + + >>> BifidCipher().numbers_to_letter(1, 1) == "a" + True + """ + letter = self.SQUARE[index1 - 1, index2 - 1] + return letter + + def encode(self, message: str) -> str: + """ + Return the encoded version of message according to the polybius cipher + + >>> BifidCipher().encode('testmessage') == 'qtltbdxrxlk' + True + + >>> BifidCipher().encode('Test Message') == 'qtltbdxrxlk' + True + + >>> BifidCipher().encode('test j') == BifidCipher().encode('test i') + True + """ + message = message.lower() + message = message.replace(" ", "") + message = message.replace("j", "i") + + first_step = np.empty((2, len(message))) + for letter_index in range(len(message)): + numbers = self.letter_to_numbers(message[letter_index]) + + first_step[0, letter_index] = numbers[0] + first_step[1, letter_index] = numbers[1] + + second_step = first_step.reshape(2 * len(message)) + encoded_message = "" + for numbers_index in range(len(message)): + index1 = int(second_step[numbers_index * 2]) + index2 = int(second_step[(numbers_index * 2) + 1]) + letter = self.numbers_to_letter(index1, index2) + encoded_message = encoded_message + letter + + return encoded_message + + def decode(self, message: str) -> str: + """ + Return the decoded version of message according to the polybius cipher + + >>> BifidCipher().decode('qtltbdxrxlk') == 'testmessage' + True + """ + message = message.lower() + message.replace(" ", "") + first_step = np.empty(2 * len(message)) + for letter_index in range(len(message)): + numbers = self.letter_to_numbers(message[letter_index]) + first_step[letter_index * 2] = numbers[0] + first_step[letter_index * 2 + 1] = numbers[1] + + second_step = first_step.reshape((2, len(message))) + decoded_message = "" + for numbers_index in range(len(message)): + index1 = int(second_step[0, numbers_index]) + index2 = int(second_step[1, numbers_index]) + letter = self.numbers_to_letter(index1, index2) + decoded_message = decoded_message + letter + + return decoded_message From 568c107e6802dbcb8e6b9b82a31c9884645ac6b4 Mon Sep 17 00:00:00 2001 From: Francisco Perez <92104963+franciscoperez2021@users.noreply.github.com> Date: Mon, 25 Oct 2021 08:58:24 +0100 Subject: [PATCH 0300/1543] add bin_to_hexadecimal (#5156) --- DIRECTORY.md | 1 + conversions/binary_to_hexadecimal.py | 65 ++++++++++++++++++++++++++++ 2 files changed, 66 insertions(+) create mode 100644 conversions/binary_to_hexadecimal.py diff --git a/DIRECTORY.md b/DIRECTORY.md index 66d5f8040951..13a360ab67f3 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -107,6 +107,7 @@ ## Conversions * [Binary To Decimal](https://github.com/TheAlgorithms/Python/blob/master/conversions/binary_to_decimal.py) * [Binary To Octal](https://github.com/TheAlgorithms/Python/blob/master/conversions/binary_to_octal.py) + * [Binary To Hexadecimal](https://github.com/TheAlgorithms/Python/blob/master/conversions/binary_to_hexadecimal.py) * [Decimal To Any](https://github.com/TheAlgorithms/Python/blob/master/conversions/decimal_to_any.py) * [Decimal To Binary](https://github.com/TheAlgorithms/Python/blob/master/conversions/decimal_to_binary.py) * [Decimal To Binary Recursion](https://github.com/TheAlgorithms/Python/blob/master/conversions/decimal_to_binary_recursion.py) diff --git a/conversions/binary_to_hexadecimal.py b/conversions/binary_to_hexadecimal.py new file mode 100644 index 000000000000..f94a12390607 --- /dev/null +++ b/conversions/binary_to_hexadecimal.py @@ -0,0 +1,65 @@ +def bin_to_hexadecimal(binary_str: str) -> str: + """ + Converting a binary string into hexadecimal using Grouping Method + + >>> bin_to_hexadecimal('101011111') + '0x15f' + >>> bin_to_hexadecimal(' 1010 ') + '0x0a' + >>> bin_to_hexadecimal('-11101') + '-0x1d' + >>> bin_to_hexadecimal('a') + Traceback (most recent call last): + ... + ValueError: Non-binary value was passed to the function + >>> bin_to_hexadecimal('') + Traceback (most recent call last): + ... + ValueError: Empty string was passed to the function + """ + BITS_TO_HEX = { + "0000": "0", + "0001": "1", + "0010": "2", + "0011": "3", + "0100": "4", + "0101": "5", + "0110": "6", + "0111": "7", + "1000": "8", + "1001": "9", + "1010": "a", + "1011": "b", + "1100": "c", + "1101": "d", + "1110": "e", + "1111": "f", + } + + # Sanitising parameter + binary_str = str(binary_str).strip() + + # Exceptions + if not binary_str: + raise ValueError("Empty string was passed to the function") + is_negative = binary_str[0] == "-" + binary_str = binary_str[1:] if is_negative else binary_str + if not all(char in "01" for char in binary_str): + raise ValueError("Non-binary value was passed to the function") + + binary_str = ( + "0" * (4 * (divmod(len(binary_str), 4)[0] + 1) - len(binary_str)) + binary_str + ) + + hexadecimal = [] + for x in range(0, len(binary_str), 4): + hexadecimal.append(BITS_TO_HEX[binary_str[x : x + 4]]) + hexadecimal_str = "0x" + "".join(hexadecimal) + + return "-" + hexadecimal_str if is_negative else hexadecimal_str + + +if __name__ == "__main__": + from doctest import testmod + + testmod() From ba710054844fde4ccca666464c4bd08207e64a0d Mon Sep 17 00:00:00 2001 From: Manuel Di Lullo <39048927+manueldilullo@users.noreply.github.com> Date: Mon, 25 Oct 2021 09:59:52 +0200 Subject: [PATCH 0301/1543] Add random graph generator (#5240) * added complete graph generator function * added doctest, type hints, wikipedia explanation * added return type hint for function complete_graph * added descriptive name for the parameter: n * random graph generator with doctest and type hints * validated using pre-commit * Delete complete_graph_generator.py * fixed doctest * updated following reviews * simplified the code following reviews * fixed doctest and solved consistency issues * consistency fixes --- graphs/random_graph_generator.py | 67 ++++++++++++++++++++++++++++++++ 1 file changed, 67 insertions(+) create mode 100644 graphs/random_graph_generator.py diff --git a/graphs/random_graph_generator.py b/graphs/random_graph_generator.py new file mode 100644 index 000000000000..d7d5de8a37c0 --- /dev/null +++ b/graphs/random_graph_generator.py @@ -0,0 +1,67 @@ +""" +* Author: Manuel Di Lullo (https://github.com/manueldilullo) +* Description: Random graphs generator. + Uses graphs represented with an adjacency list. + +URL: https://en.wikipedia.org/wiki/Random_graph +""" + +import random + + +def random_graph( + vertices_number: int, probability: float, directed: bool = False +) -> dict: + """ + Generate a random graph + @input: vertices_number (number of vertices), + probability (probability that a generic edge (u,v) exists), + directed (if True: graph will be a directed graph, + otherwise it will be an undirected graph) + @examples: + >>> random.seed(1) + >>> random_graph(4, 0.5) + {0: [1], 1: [0, 2, 3], 2: [1, 3], 3: [1, 2]} + >>> random.seed(1) + >>> random_graph(4, 0.5, True) + {0: [1], 1: [2, 3], 2: [3], 3: []} + """ + graph = {i: [] for i in range(vertices_number)} + + # if probability is greater or equal than 1, then generate a complete graph + if probability >= 1: + return complete_graph(vertices_number) + # if probability is lower or equal than 0, then return a graph without edges + if probability <= 0: + return graph + + # for each couple of nodes, add an edge from u to v + # if the number randomly generated is greater than probability probability + for i in range(vertices_number): + for j in range(i + 1, vertices_number): + if random.random() < probability: + graph[i].append(j) + if not directed: + # if the graph is undirected, add an edge in from j to i, either + graph[j].append(i) + return graph + + +def complete_graph(vertices_number: int) -> dict: + """ + Generate a complete graph with vertices_number vertices. + @input: vertices_number (number of vertices), + directed (False if the graph is undirected, True otherwise) + @example: + >>> print(complete_graph(3)) + {0: [1, 2], 1: [0, 2], 2: [0, 1]} + """ + return { + i: [j for j in range(vertices_number) if i != j] for i in range(vertices_number) + } + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From 5f7bb3e9f7b458d039e1027b48b3d24d9a577f96 Mon Sep 17 00:00:00 2001 From: Maxim Smolskiy Date: Mon, 25 Oct 2021 11:03:22 +0300 Subject: [PATCH 0302/1543] Improve Project Euler problem 034 solution 1 (#5165) --- project_euler/problem_034/sol1.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/project_euler/problem_034/sol1.py b/project_euler/problem_034/sol1.py index 11c84ab96ac6..8d8432dbbb7a 100644 --- a/project_euler/problem_034/sol1.py +++ b/project_euler/problem_034/sol1.py @@ -8,6 +8,8 @@ from math import factorial +DIGIT_FACTORIAL = {str(d): factorial(d) for d in range(10)} + def sum_of_digit_factorial(n: int) -> int: """ @@ -17,7 +19,7 @@ def sum_of_digit_factorial(n: int) -> int: >>> sum_of_digit_factorial(0) 1 """ - return sum(factorial(int(char)) for char in str(n)) + return sum(DIGIT_FACTORIAL[d] for d in str(n)) def solution() -> int: From b55da046029e681c1811f5f1cb24c35117d462ce Mon Sep 17 00:00:00 2001 From: Maxim Smolskiy Date: Mon, 25 Oct 2021 11:07:10 +0300 Subject: [PATCH 0303/1543] Improve Project Euler problem 058 solution 1 (#4782) * Fix typo * Improve solution * Retest * Replace n with number --- project_euler/problem_058/sol1.py | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/project_euler/problem_058/sol1.py b/project_euler/problem_058/sol1.py index d3b15157fbbd..ed407edf7158 100644 --- a/project_euler/problem_058/sol1.py +++ b/project_euler/problem_058/sol1.py @@ -33,11 +33,12 @@ count of current primes. """ +from math import isqrt -def isprime(d: int) -> int: +def isprime(number: int) -> int: """ - returns whether the given digit is prime or not + returns whether the given number is prime or not >>> isprime(1) 0 >>> isprime(17) @@ -45,14 +46,15 @@ def isprime(d: int) -> int: >>> isprime(10000) 0 """ - if d == 1: + if number == 1: return 0 - i = 2 - while i * i <= d: - if d % i == 0: + if number % 2 == 0 and number > 2: + return 0 + + for i in range(3, isqrt(number) + 1, 2): + if number % i == 0: return 0 - i = i + 1 return 1 From 74e442e979b1ffdbafa97765193ec04058893fca Mon Sep 17 00:00:00 2001 From: Mohammad Firmansyah <76118762+dimasdh842@users.noreply.github.com> Date: Mon, 25 Oct 2021 17:18:41 +0000 Subject: [PATCH 0304/1543] add an algorithm to spin some words (#5597) * add an algorithm to spin some words * Update index.py * Adding type hint of spin_words function * Update and rename python_codewars_disemvowel/index.py to strings/reverse_long_words.py Co-authored-by: Christian Clauss --- strings/reverse_long_words.py | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) create mode 100644 strings/reverse_long_words.py diff --git a/strings/reverse_long_words.py b/strings/reverse_long_words.py new file mode 100644 index 000000000000..39ef11513f40 --- /dev/null +++ b/strings/reverse_long_words.py @@ -0,0 +1,21 @@ +def reverse_long_words(sentence: str) -> str: + """ + Reverse all words that are longer than 4 characters in a sentence. + + >>> reverse_long_words("Hey wollef sroirraw") + 'Hey fellow warriors' + >>> reverse_long_words("nohtyP is nohtyP") + 'Python is Python' + >>> reverse_long_words("1 12 123 1234 54321 654321") + '1 12 123 1234 12345 123456' + """ + return " ".join( + "".join(word[::-1]) if len(word) > 4 else word for word in sentence.split() + ) + + +if __name__ == "__main__": + import doctest + + doctest.testmod() + print(reverse_long_words("Hey wollef sroirraw")) From 12e81ea6a2e74691173895418f2129bb4d2752c2 Mon Sep 17 00:00:00 2001 From: "@im_8055" <38890773+Bhargavishnu@users.noreply.github.com> Date: Tue, 26 Oct 2021 10:51:07 +0530 Subject: [PATCH 0305/1543] Add credit card string validator (#5583) * Add credit card validator *  * Add return type hint * Add test cases for validator function * Add test cases * Feature: Rename file * Update strings/cc_validator.py Co-authored-by: Christian Clauss * Update strings/cc_validator.py Co-authored-by: Christian Clauss * Update strings/cc_validator.py Co-authored-by: Christian Clauss * Review: Fix redundant checks * Review: Refactor * Fix: Update test cases * Refactor * Update credit_card_validator.py Co-authored-by: Christian Clauss --- strings/credit_card_validator.py | 105 +++++++++++++++++++++++++++++++ 1 file changed, 105 insertions(+) create mode 100644 strings/credit_card_validator.py diff --git a/strings/credit_card_validator.py b/strings/credit_card_validator.py new file mode 100644 index 000000000000..3b5a1aae6dc9 --- /dev/null +++ b/strings/credit_card_validator.py @@ -0,0 +1,105 @@ +""" +Functions for testing the validity of credit card numbers. + +https://en.wikipedia.org/wiki/Luhn_algorithm +""" + + +def validate_initial_digits(credit_card_number: str) -> bool: + """ + Function to validate initial digits of a given credit card number. + >>> valid = "4111111111111111 41111111111111 34 35 37 412345 523456 634567" + >>> all(validate_initial_digits(cc) for cc in valid.split()) + True + >>> invalid = "32323 36111111111111" + >>> all(validate_initial_digits(cc) is False for cc in invalid.split()) + True + """ + if len(credit_card_number) < 2: + return False + return credit_card_number[0] in "456" or credit_card_number[1] in "457" + + +def luhn_validation(credit_card_number: str) -> bool: + """ + Function to luhn algorithm validation for a given credit card number. + >>> luhn_validation('4111111111111111') + True + >>> luhn_validation('36111111111111') + True + >>> luhn_validation('41111111111111') + False + """ + cc_number = credit_card_number + total = 0 + half_len = len(cc_number) - 2 + for i in range(half_len, -1, -2): + # double the value of every second digit + digit = int(cc_number[i]) + digit *= 2 + # If doubling of a number results in a two digit number + # i.e greater than 9(e.g., 6 × 2 = 12), + # then add the digits of the product (e.g., 12: 1 + 2 = 3, 15: 1 + 5 = 6), + # to get a single digit number. + if digit > 9: + digit %= 10 + digit += 1 + cc_number = cc_number[:i] + str(digit) + cc_number[i + 1 :] + total += digit + + # Sum up the remaining digits + for i in range(len(cc_number) - 1, -1, -2): + total += int(cc_number[i]) + + return total % 10 == 0 + + +def validate_credit_card_number(credit_card_number: str) -> bool: + """ + Function to validate the given credit card number. + >>> validate_credit_card_number('4111111111111111') + 4111111111111111 is a valid credit card number. + True + >>> validate_credit_card_number('helloworld$') + helloworld$ is an invalid credit card number because it has nonnumerical characters. + False + >>> validate_credit_card_number('32323') + 32323 is an invalid credit card number because of its length. + False + >>> validate_credit_card_number('32323323233232332323') + 32323323233232332323 is an invalid credit card number because of its length. + False + >>> validate_credit_card_number('36111111111111') + 36111111111111 is an invalid credit card number because of its first two digits. + False + >>> validate_credit_card_number('41111111111111') + 41111111111111 is an invalid credit card number because it fails the Lhun check. + False + """ + error_message = f"{credit_card_number} is an invalid credit card number because" + if not credit_card_number.isdigit(): + print(f"{error_message} it has nonnumerical characters.") + return False + + if not 13 <= len(credit_card_number) <= 16: + print(f"{error_message} of its length.") + return False + + if not validate_initial_digits(credit_card_number): + print(f"{error_message} of its first two digits.") + return False + + if not luhn_validation(credit_card_number): + print(f"{error_message} it fails the Lhun check.") + return False + + print(f"{credit_card_number} is a valid credit card number.") + return True + + +if __name__ == "__main__": + import doctest + + doctest.testmod() + validate_credit_card_number("4111111111111111") + validate_credit_card_number("32323") From 716beb32ed231217c05782be9323ef43f6c1a0ce Mon Sep 17 00:00:00 2001 From: Leoriem-code <73761711+Leoriem-code@users.noreply.github.com> Date: Tue, 26 Oct 2021 09:21:44 +0200 Subject: [PATCH 0306/1543] Improved prime_numbers.py (#5592) * Improved prime_numbers.py * update prime_numbers.py * Increase the timeit number to 1_000_000 Co-authored-by: Christian Clauss --- maths/prime_numbers.py | 55 ++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 53 insertions(+), 2 deletions(-) diff --git a/maths/prime_numbers.py b/maths/prime_numbers.py index 38bebddeee41..183fbd39349e 100644 --- a/maths/prime_numbers.py +++ b/maths/prime_numbers.py @@ -58,6 +58,38 @@ def primes(max: int) -> Generator[int, None, None]: yield i +def fast_primes(max: int) -> Generator[int, None, None]: + """ + Return a list of all primes numbers up to max. + >>> list(fast_primes(0)) + [] + >>> list(fast_primes(-1)) + [] + >>> list(fast_primes(-10)) + [] + >>> list(fast_primes(25)) + [2, 3, 5, 7, 11, 13, 17, 19, 23] + >>> list(fast_primes(11)) + [2, 3, 5, 7, 11] + >>> list(fast_primes(33)) + [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31] + >>> list(fast_primes(10000))[-1] + 9973 + """ + numbers: Generator = (i for i in range(1, (max + 1), 2)) + # It's useless to test even numbers as they will not be prime + if max > 2: + yield 2 # Because 2 will not be tested, it's necessary to yield it now + for i in (n for n in numbers if n > 1): + bound = int(math.sqrt(i)) + 1 + for j in range(3, bound, 2): + # As we removed the even numbers, we don't need them now + if (i % j) == 0: + break + else: + yield i + + if __name__ == "__main__": number = int(input("Calculate primes up to:\n>> ").strip()) for ret in primes(number): @@ -66,5 +98,24 @@ def primes(max: int) -> Generator[int, None, None]: # Let's benchmark them side-by-side... from timeit import timeit - print(timeit("slow_primes(1_000_000)", setup="from __main__ import slow_primes")) - print(timeit("primes(1_000_000)", setup="from __main__ import primes")) + print( + timeit( + "slow_primes(1_000_000_000_000)", + setup="from __main__ import slow_primes", + number=1_000_000, + ) + ) + print( + timeit( + "primes(1_000_000_000_000)", + setup="from __main__ import primes", + number=1_000_000, + ) + ) + print( + timeit( + "fast_primes(1_000_000_000_000)", + setup="from __main__ import fast_primes", + number=1_000_000, + ) + ) From 8e857e8692bf8e07112dedb7042bdab1ecb1d20e Mon Sep 17 00:00:00 2001 From: Leoriem-code <73761711+Leoriem-code@users.noreply.github.com> Date: Tue, 26 Oct 2021 09:57:49 +0200 Subject: [PATCH 0307/1543] add implementation of Nagel and Schrekenberg algo (#5584) * add implementation of Nagel and Schrekenberg algo * Update cellular_automata/nasch.py Co-authored-by: Christian Clauss * Update nasch.py * Update and rename nasch.py to nagel_schrekenberg.py * Update cellular_automata/nagel_schrekenberg.py Co-authored-by: Christian Clauss * Update nagel_schrekenberg.py * Update nagel_schrekenberg.py * Update nagel_schrekenberg.py * update nagel_schrekenberg.py * Update nagel_schrekenberg.py Co-authored-by: Christian Clauss --- cellular_automata/nagel_schrekenberg.py | 140 ++++++++++++++++++++++++ 1 file changed, 140 insertions(+) create mode 100644 cellular_automata/nagel_schrekenberg.py diff --git a/cellular_automata/nagel_schrekenberg.py b/cellular_automata/nagel_schrekenberg.py new file mode 100644 index 000000000000..be44761ecf82 --- /dev/null +++ b/cellular_automata/nagel_schrekenberg.py @@ -0,0 +1,140 @@ +""" +Simulate the evolution of a highway with only one road that is a loop. +The highway is divided in cells, each cell can have at most one car in it. +The highway is a loop so when a car comes to one end, it will come out on the other. +Each car is represented by its speed (from 0 to 5). + +Some information about speed: + -1 means that the cell on the highway is empty + 0 to 5 are the speed of the cars with 0 being the lowest and 5 the highest + +highway: list[int] Where every position and speed of every car will be stored +probability The probability that a driver will slow down +initial_speed The speed of the cars a the start +frequency How many cells there are between two cars at the start +max_speed The maximum speed a car can go to +number_of_cells How many cell are there in the highway +number_of_update How many times will the position be updated + +More information here: https://en.wikipedia.org/wiki/Nagel%E2%80%93Schreckenberg_model + +Examples for doctest: +>>> simulate(construct_highway(6, 3, 0), 2, 0, 2) +[[0, -1, -1, 0, -1, -1], [-1, 1, -1, -1, 1, -1], [-1, -1, 1, -1, -1, 1]] +>>> simulate(construct_highway(5, 2, -2), 3, 0, 2) +[[0, -1, 0, -1, 0], [0, -1, 0, -1, -1], [0, -1, -1, 1, -1], [-1, 1, -1, 0, -1]] +""" +from random import randint, random + + +def construct_highway( + number_of_cells: int, + frequency: int, + initial_speed: int, + random_frequency: bool = False, + random_speed: bool = False, + max_speed: int = 5, +) -> list: + """ + Build the highway following the parameters given + >>> construct_highway(10, 2, 6) + [[6, -1, 6, -1, 6, -1, 6, -1, 6, -1]] + >>> construct_highway(10, 10, 2) + [[2, -1, -1, -1, -1, -1, -1, -1, -1, -1]] + """ + + highway = [[-1] * number_of_cells] # Create a highway without any car + i = 0 + if initial_speed < 0: + initial_speed = 0 + while i < number_of_cells: + highway[0][i] = ( + randint(0, max_speed) if random_speed else initial_speed + ) # Place the cars + i += ( + randint(1, max_speed * 2) if random_frequency else frequency + ) # Arbitrary number, may need tuning + return highway + + +def get_distance(highway_now: list, car_index: int) -> int: + """ + Get the distance between a car (at index car_index) and the next car + >>> get_distance([6, -1, 6, -1, 6], 2) + 1 + >>> get_distance([2, -1, -1, -1, 3, 1, 0, 1, 3, 2], 0) + 3 + >>> get_distance([-1, -1, -1, -1, 2, -1, -1, -1, 3], -1) + 4 + """ + + distance = 0 + cells = highway_now[car_index + 1 :] + for cell in range(len(cells)): # May need a better name for this + if cells[cell] != -1: # If the cell is not empty then + return distance # we have the distance we wanted + distance += 1 + # Here if the car is near the end of the highway + return distance + get_distance(highway_now, -1) + + +def update(highway_now: list, probability: float, max_speed: int) -> list: + """ + Update the speed of the cars + >>> update([-1, -1, -1, -1, -1, 2, -1, -1, -1, -1, 3], 0.0, 5) + [-1, -1, -1, -1, -1, 3, -1, -1, -1, -1, 4] + >>> update([-1, -1, 2, -1, -1, -1, -1, 3], 0.0, 5) + [-1, -1, 3, -1, -1, -1, -1, 1] + """ + + number_of_cells = len(highway_now) + # Beforce calculations, the highway is empty + next_highway = [-1] * number_of_cells + + for car_index in range(number_of_cells): + if highway_now[car_index] != -1: + # Add 1 to the current speed of the car and cap the speed + next_highway[car_index] = min(highway_now[car_index] + 1, max_speed) + # Number of empty cell before the next car + dn = get_distance(highway_now, car_index) - 1 + # We can't have the car causing an accident + next_highway[car_index] = min(next_highway[car_index], dn) + if random() < probability: + # Randomly, a driver will slow down + next_highway[car_index] = max(next_highway[car_index] - 1, 0) + return next_highway + + +def simulate( + highway: list, number_of_update: int, probability: float, max_speed: int +) -> list: + """ + The main function, it will simulate the evolution of the highway + >>> simulate([[-1, 2, -1, -1, -1, 3]], 2, 0.0, 3) + [[-1, 2, -1, -1, -1, 3], [-1, -1, -1, 2, -1, 0], [1, -1, -1, 0, -1, -1]] + >>> simulate([[-1, 2, -1, 3]], 4, 0.0, 3) + [[-1, 2, -1, 3], [-1, 0, -1, 0], [-1, 0, -1, 0], [-1, 0, -1, 0], [-1, 0, -1, 0]] + """ + + number_of_cells = len(highway[0]) + + for i in range(number_of_update): + next_speeds_calculated = update(highway[i], probability, max_speed) + real_next_speeds = [-1] * number_of_cells + + for car_index in range(number_of_cells): + speed = next_speeds_calculated[car_index] + if speed != -1: + # Change the position based on the speed (with % to create the loop) + index = (car_index + speed) % number_of_cells + # Commit the change of position + real_next_speeds[index] = speed + highway.append(real_next_speeds) + + return highway + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From f93c7d4d80bcad6b4679d09ecefa7cc3874aae54 Mon Sep 17 00:00:00 2001 From: Prakhar Gurunani Date: Tue, 26 Oct 2021 13:35:13 +0530 Subject: [PATCH 0308/1543] Get user tweets (#5593) * updating DIRECTORY.md * Create get_user_tweets.py * updating DIRECTORY.md * Reformat code with black * Add argument type * Add return type * Add tweepy * Fix isort issues * Fix flake8 issues * WIP: doctest * Doctest setup and format with pre-commit * Remove doctests * Update web_programming/get_user_tweets.py Co-authored-by: Christian Clauss * Update get_user_tweets.py Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> Co-authored-by: Christian Clauss --- DIRECTORY.md | 10 ++++- requirements.txt | 1 + web_programming/get_user_tweets.py | 60 ++++++++++++++++++++++++++++++ 3 files changed, 70 insertions(+), 1 deletion(-) create mode 100644 web_programming/get_user_tweets.py diff --git a/DIRECTORY.md b/DIRECTORY.md index 13a360ab67f3..e2b2442fd1b1 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -10,6 +10,11 @@ * [Newton Raphson](https://github.com/TheAlgorithms/Python/blob/master/arithmetic_analysis/newton_raphson.py) * [Secant Method](https://github.com/TheAlgorithms/Python/blob/master/arithmetic_analysis/secant_method.py) +## Audio Filters + * [Butterworth Filter](https://github.com/TheAlgorithms/Python/blob/master/audio_filters/butterworth_filter.py) + * [Iir Filter](https://github.com/TheAlgorithms/Python/blob/master/audio_filters/iir_filter.py) + * [Show Response](https://github.com/TheAlgorithms/Python/blob/master/audio_filters/show_response.py) + ## Backtracking * [All Combinations](https://github.com/TheAlgorithms/Python/blob/master/backtracking/all_combinations.py) * [All Permutations](https://github.com/TheAlgorithms/Python/blob/master/backtracking/all_permutations.py) @@ -60,6 +65,7 @@ * [Base64 Encoding](https://github.com/TheAlgorithms/Python/blob/master/ciphers/base64_encoding.py) * [Base85](https://github.com/TheAlgorithms/Python/blob/master/ciphers/base85.py) * [Beaufort Cipher](https://github.com/TheAlgorithms/Python/blob/master/ciphers/beaufort_cipher.py) + * [Bifid](https://github.com/TheAlgorithms/Python/blob/master/ciphers/bifid.py) * [Brute Force Caesar Cipher](https://github.com/TheAlgorithms/Python/blob/master/ciphers/brute_force_caesar_cipher.py) * [Caesar Cipher](https://github.com/TheAlgorithms/Python/blob/master/ciphers/caesar_cipher.py) * [Cryptomath Module](https://github.com/TheAlgorithms/Python/blob/master/ciphers/cryptomath_module.py) @@ -106,8 +112,8 @@ ## Conversions * [Binary To Decimal](https://github.com/TheAlgorithms/Python/blob/master/conversions/binary_to_decimal.py) - * [Binary To Octal](https://github.com/TheAlgorithms/Python/blob/master/conversions/binary_to_octal.py) * [Binary To Hexadecimal](https://github.com/TheAlgorithms/Python/blob/master/conversions/binary_to_hexadecimal.py) + * [Binary To Octal](https://github.com/TheAlgorithms/Python/blob/master/conversions/binary_to_octal.py) * [Decimal To Any](https://github.com/TheAlgorithms/Python/blob/master/conversions/decimal_to_any.py) * [Decimal To Binary](https://github.com/TheAlgorithms/Python/blob/master/conversions/decimal_to_binary.py) * [Decimal To Binary Recursion](https://github.com/TheAlgorithms/Python/blob/master/conversions/decimal_to_binary_recursion.py) @@ -352,6 +358,7 @@ * [Multi Heuristic Astar](https://github.com/TheAlgorithms/Python/blob/master/graphs/multi_heuristic_astar.py) * [Page Rank](https://github.com/TheAlgorithms/Python/blob/master/graphs/page_rank.py) * [Prim](https://github.com/TheAlgorithms/Python/blob/master/graphs/prim.py) + * [Random Graph Generator](https://github.com/TheAlgorithms/Python/blob/master/graphs/random_graph_generator.py) * [Scc Kosaraju](https://github.com/TheAlgorithms/Python/blob/master/graphs/scc_kosaraju.py) * [Strongly Connected Components](https://github.com/TheAlgorithms/Python/blob/master/graphs/strongly_connected_components.py) * [Tarjans Scc](https://github.com/TheAlgorithms/Python/blob/master/graphs/tarjans_scc.py) @@ -978,6 +985,7 @@ * [Fetch Jobs](https://github.com/TheAlgorithms/Python/blob/master/web_programming/fetch_jobs.py) * [Get Imdb Top 250 Movies Csv](https://github.com/TheAlgorithms/Python/blob/master/web_programming/get_imdb_top_250_movies_csv.py) * [Get Imdbtop](https://github.com/TheAlgorithms/Python/blob/master/web_programming/get_imdbtop.py) + * [Get User Tweets](https://github.com/TheAlgorithms/Python/blob/master/web_programming/get_user_tweets.py) * [Giphy](https://github.com/TheAlgorithms/Python/blob/master/web_programming/giphy.py) * [Instagram Crawler](https://github.com/TheAlgorithms/Python/blob/master/web_programming/instagram_crawler.py) * [Instagram Pic](https://github.com/TheAlgorithms/Python/blob/master/web_programming/instagram_pic.py) diff --git a/requirements.txt b/requirements.txt index 4867de26f8f1..7c2672ae25d3 100644 --- a/requirements.txt +++ b/requirements.txt @@ -14,5 +14,6 @@ sklearn statsmodels sympy tensorflow +tweepy types-requests xgboost diff --git a/web_programming/get_user_tweets.py b/web_programming/get_user_tweets.py new file mode 100644 index 000000000000..0f70201dc311 --- /dev/null +++ b/web_programming/get_user_tweets.py @@ -0,0 +1,60 @@ +import csv + +import tweepy + +# Twitter API credentials +consumer_key = "" +consumer_secret = "" +access_key = "" +access_secret = "" + + +def get_all_tweets(screen_name: str) -> None: + + # authorize twitter, initialize tweepy + auth = tweepy.OAuthHandler(consumer_key, consumer_secret) + auth.set_access_token(access_key, access_secret) + api = tweepy.API(auth) + + # initialize a list to hold all the tweepy Tweets + alltweets = [] + + # make initial request for most recent tweets (200 is the maximum allowed count) + new_tweets = api.user_timeline(screen_name=screen_name, count=200) + + # save most recent tweets + alltweets.extend(new_tweets) + + # save the id of the oldest tweet less one + oldest = alltweets[-1].id - 1 + + # keep grabbing tweets until there are no tweets left to grab + while len(new_tweets) > 0: + print(f"getting tweets before {oldest}") + + # all subsiquent requests use the max_id param to prevent duplicates + new_tweets = api.user_timeline( + screen_name=screen_name, count=200, max_id=oldest + ) + + # save most recent tweets + alltweets.extend(new_tweets) + + # update the id of the oldest tweet less one + oldest = alltweets[-1].id - 1 + + print(f"...{len(alltweets)} tweets downloaded so far") + + # transform the tweepy tweets into a 2D array that will populate the csv + outtweets = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets] + + # write the csv + with open(f"new_{screen_name}_tweets.csv", "w") as f: + writer = csv.writer(f) + writer.writerow(["id", "created_at", "text"]) + writer.writerows(outtweets) + + +if __name__ == "__main__": + # pass in the username of the account you want to download + get_all_tweets("FirePing32") From 2606f1bbe58dfc370ce1f35d3d9c51396e791fa6 Mon Sep 17 00:00:00 2001 From: Andrew Grangaard Date: Tue, 26 Oct 2021 02:50:36 -0700 Subject: [PATCH 0309/1543] [mypy-fix] Type fixes for graham_scan (#5589) * [mypy] Fixes type annotations in other/graham_scan #4052 + Prefer tuple to list for point x,y pairs * NOP: fixes typo in comment --- other/graham_scan.py | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/other/graham_scan.py b/other/graham_scan.py index 67c5cd8ab9d8..91bb6812fefc 100644 --- a/other/graham_scan.py +++ b/other/graham_scan.py @@ -14,7 +14,7 @@ from sys import maxsize -def graham_scan(points: list[list[int, int]]) -> list[list[int, int]]: +def graham_scan(points: list[tuple[int, int]]) -> list[tuple[int, int]]: """Pure implementation of graham scan algorithm in Python :param points: The unique points on coordinates. @@ -57,7 +57,7 @@ def graham_scan(points: list[list[int, int]]) -> list[list[int, int]]: # remove the lowest and the most left point from points for preparing for sort points.pop(minidx) - def angle_comparer(point: list[int, int], minx: int, miny: int) -> float: + def angle_comparer(point: tuple[int, int], minx: int, miny: int) -> float: """Return the angle toward to point from (minx, miny) :param point: The target point @@ -66,13 +66,13 @@ def angle_comparer(point: list[int, int], minx: int, miny: int) -> float: :return: the angle Examples: - >>> angle_comparer([1,1], 0, 0) + >>> angle_comparer((1,1), 0, 0) 45.0 - >>> angle_comparer([100,1], 10, 10) + >>> angle_comparer((100,1), 10, 10) -5.710593137499642 - >>> angle_comparer([5,5], 2, 3) + >>> angle_comparer((5,5), 2, 3) 33.690067525979785 """ # sort the points accorgind to the angle from the lowest and the most left point @@ -83,7 +83,7 @@ def angle_comparer(point: list[int, int], minx: int, miny: int) -> float: sorted_points = sorted(points, key=lambda point: angle_comparer(point, minx, miny)) # This insert actually costs complexity, - # and you should insteadly add (minx, miny) into stack later. + # and you should instead add (minx, miny) into stack later. # I'm using insert just for easy understanding. sorted_points.insert(0, (minx, miny)) @@ -95,7 +95,7 @@ class Direction(Enum): right = 3 def check_direction( - starting: list[int, int], via: list[int, int], target: list[int, int] + starting: tuple[int, int], via: tuple[int, int], target: tuple[int, int] ) -> Direction: """Return the direction toward to the line from via to target from starting @@ -105,13 +105,13 @@ def check_direction( :return: the Direction Examples: - >>> check_direction([1,1], [2,2], [3,3]) + >>> check_direction((1,1), (2,2), (3,3)) Direction.straight - >>> check_direction([60,1], [-50,199], [30,2]) + >>> check_direction((60,1), (-50,199), (30,2)) Direction.left - >>> check_direction([0,0], [5,5], [10,0]) + >>> check_direction((0,0), (5,5), (10,0)) Direction.right """ x0, y0 = starting @@ -132,12 +132,12 @@ def check_direction( # If they are same, it means they are on a same line of convex hull. if target_angle > via_angle: return Direction.left - if target_angle == via_angle: + elif target_angle == via_angle: return Direction.straight - if target_angle < via_angle: + else: return Direction.right - stack = deque() + stack: deque[tuple[int, int]] = deque() stack.append(sorted_points[0]) stack.append(sorted_points[1]) stack.append(sorted_points[2]) From de07245c170f2007cef415fa4114be870078988e Mon Sep 17 00:00:00 2001 From: Andrew Grangaard Date: Tue, 26 Oct 2021 03:10:37 -0700 Subject: [PATCH 0310/1543] [mypy] Adds type annotations in other/activity_selection #4052 (#5590) --- other/activity_selection.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/other/activity_selection.py b/other/activity_selection.py index c03956cce5d2..d809bf90a3f3 100644 --- a/other/activity_selection.py +++ b/other/activity_selection.py @@ -10,7 +10,7 @@ # finish[] --> An array that contains finish time of all activities -def printMaxActivities(start, finish): +def printMaxActivities(start: list[int], finish: list[int]) -> None: """ >>> start = [1, 3, 0, 5, 8, 5] >>> finish = [2, 4, 6, 7, 9, 9] From e49d8e3af427353c18fe5f1afb0927e3e8d6461c Mon Sep 17 00:00:00 2001 From: Erwin Junge Date: Tue, 26 Oct 2021 12:29:27 +0200 Subject: [PATCH 0311/1543] [mypy] annotate `compression` (#5570) --- compression/burrows_wheeler.py | 12 +++++- compression/huffman.py | 48 ++++++++++++----------- compression/lempel_ziv.py | 4 +- compression/peak_signal_to_noise_ratio.py | 4 +- 4 files changed, 40 insertions(+), 28 deletions(-) diff --git a/compression/burrows_wheeler.py b/compression/burrows_wheeler.py index 7d705af7428e..4ad99a642e49 100644 --- a/compression/burrows_wheeler.py +++ b/compression/burrows_wheeler.py @@ -12,6 +12,13 @@ """ from __future__ import annotations +from typing import TypedDict + + +class BWTTransformDict(TypedDict): + bwt_string: str + idx_original_string: int + def all_rotations(s: str) -> list[str]: """ @@ -43,7 +50,7 @@ def all_rotations(s: str) -> list[str]: return [s[i:] + s[:i] for i in range(len(s))] -def bwt_transform(s: str) -> dict: +def bwt_transform(s: str) -> BWTTransformDict: """ :param s: The string that will be used at bwt algorithm :return: the string composed of the last char of each row of the ordered @@ -75,10 +82,11 @@ def bwt_transform(s: str) -> dict: rotations = all_rotations(s) rotations.sort() # sort the list of rotations in alphabetically order # make a string composed of the last char of each rotation - return { + response: BWTTransformDict = { "bwt_string": "".join([word[-1] for word in rotations]), "idx_original_string": rotations.index(s), } + return response def reverse_bwt(bwt_string: str, idx_original_string: int) -> str: diff --git a/compression/huffman.py b/compression/huffman.py index 8f37a53ce2b7..d5d78b753c3f 100644 --- a/compression/huffman.py +++ b/compression/huffman.py @@ -1,29 +1,31 @@ +from __future__ import annotations + import sys class Letter: - def __init__(self, letter, freq): - self.letter = letter - self.freq = freq - self.bitstring = {} + def __init__(self, letter: str, freq: int): + self.letter: str = letter + self.freq: int = freq + self.bitstring: dict[str, str] = {} - def __repr__(self): + def __repr__(self) -> str: return f"{self.letter}:{self.freq}" class TreeNode: - def __init__(self, freq, left, right): - self.freq = freq - self.left = left - self.right = right + def __init__(self, freq: int, left: Letter | TreeNode, right: Letter | TreeNode): + self.freq: int = freq + self.left: Letter | TreeNode = left + self.right: Letter | TreeNode = right -def parse_file(file_path): +def parse_file(file_path: str) -> list[Letter]: """ Read the file and build a dict of all letters and their frequencies, then convert the dict into a list of Letters. """ - chars = {} + chars: dict[str, int] = {} with open(file_path) as f: while True: c = f.read(1) @@ -33,22 +35,23 @@ def parse_file(file_path): return sorted((Letter(c, f) for c, f in chars.items()), key=lambda l: l.freq) -def build_tree(letters): +def build_tree(letters: list[Letter]) -> Letter | TreeNode: """ Run through the list of Letters and build the min heap for the Huffman Tree. """ - while len(letters) > 1: - left = letters.pop(0) - right = letters.pop(0) + response: list[Letter | TreeNode] = letters # type: ignore + while len(response) > 1: + left = response.pop(0) + right = response.pop(0) total_freq = left.freq + right.freq node = TreeNode(total_freq, left, right) - letters.append(node) - letters.sort(key=lambda l: l.freq) - return letters[0] + response.append(node) + response.sort(key=lambda l: l.freq) + return response[0] -def traverse_tree(root, bitstring): +def traverse_tree(root: Letter | TreeNode, bitstring: str) -> list[Letter]: """ Recursively traverse the Huffman Tree to set each Letter's bitstring dictionary, and return the list of Letters @@ -56,13 +59,14 @@ def traverse_tree(root, bitstring): if type(root) is Letter: root.bitstring[root.letter] = bitstring return [root] + treenode: TreeNode = root # type: ignore letters = [] - letters += traverse_tree(root.left, bitstring + "0") - letters += traverse_tree(root.right, bitstring + "1") + letters += traverse_tree(treenode.left, bitstring + "0") + letters += traverse_tree(treenode.right, bitstring + "1") return letters -def huffman(file_path): +def huffman(file_path: str) -> None: """ Parse the file, build the tree, then run through the file again, using the letters dictionary to find and print out the diff --git a/compression/lempel_ziv.py b/compression/lempel_ziv.py index 6743dc42d56e..ea6f33944a91 100644 --- a/compression/lempel_ziv.py +++ b/compression/lempel_ziv.py @@ -26,7 +26,7 @@ def read_file_binary(file_path: str) -> str: def add_key_to_lexicon( - lexicon: dict, curr_string: str, index: int, last_match_id: str + lexicon: dict[str, str], curr_string: str, index: int, last_match_id: str ) -> None: """ Adds new strings (curr_string + "0", curr_string + "1") to the lexicon @@ -110,7 +110,7 @@ def write_file_binary(file_path: str, to_write: str) -> None: sys.exit() -def compress(source_path, destination_path: str) -> None: +def compress(source_path: str, destination_path: str) -> None: """ Reads source file, compresses it and writes the compressed result in destination file diff --git a/compression/peak_signal_to_noise_ratio.py b/compression/peak_signal_to_noise_ratio.py index 6c6c4c38a12a..dded2a712c7e 100644 --- a/compression/peak_signal_to_noise_ratio.py +++ b/compression/peak_signal_to_noise_ratio.py @@ -12,7 +12,7 @@ import numpy as np -def psnr(original, contrast): +def psnr(original: float, contrast: float) -> float: mse = np.mean((original - contrast) ** 2) if mse == 0: return 100 @@ -21,7 +21,7 @@ def psnr(original, contrast): return PSNR -def main(): +def main() -> None: dir_path = os.path.dirname(os.path.realpath(__file__)) # Loading images (original image and compressed image) original = cv2.imread(os.path.join(dir_path, "image_data/original_image.png")) From 700398ec063687b661e9be865f5c32d3c822fca1 Mon Sep 17 00:00:00 2001 From: Erwin Junge Date: Tue, 26 Oct 2021 12:35:21 +0200 Subject: [PATCH 0312/1543] [mypy] annotate `ciphers` (#5569) * [mypy] annotate `ciphers` * Update ciphers/polybius.py * Update polybius.py Co-authored-by: Christian Clauss --- ciphers/decrypt_caesar_with_chi_squared.py | 11 +++++++---- ciphers/polybius.py | 3 +-- 2 files changed, 8 insertions(+), 6 deletions(-) diff --git a/ciphers/decrypt_caesar_with_chi_squared.py b/ciphers/decrypt_caesar_with_chi_squared.py index 89477914a030..beac851b6c2a 100644 --- a/ciphers/decrypt_caesar_with_chi_squared.py +++ b/ciphers/decrypt_caesar_with_chi_squared.py @@ -221,10 +221,13 @@ def decrypt_caesar_with_chi_squared( # Get the most likely cipher by finding the cipher with the smallest chi squared # statistic - most_likely_cipher: int = min( # type: ignore - chi_squared_statistic_values, # type: ignore - key=chi_squared_statistic_values.get, # type: ignore - ) # type: ignore + def chi_squared_statistic_values_sorting_key(key: int) -> tuple[float, str]: + return chi_squared_statistic_values[key] + + most_likely_cipher: int = min( + chi_squared_statistic_values, + key=chi_squared_statistic_values_sorting_key, + ) # Get all the data from the most likely cipher (key, decoded message) ( diff --git a/ciphers/polybius.py b/ciphers/polybius.py index 9e1dc4cbb5a8..2a45f02a3773 100644 --- a/ciphers/polybius.py +++ b/ciphers/polybius.py @@ -45,8 +45,7 @@ def numbers_to_letter(self, index1: int, index2: int) -> str: >>> PolybiusCipher().numbers_to_letter(1, 1) == "a" True """ - letter = self.SQUARE[index1 - 1, index2 - 1] - return letter + return self.SQUARE[index1 - 1, index2 - 1] def encode(self, message: str) -> str: """ From 366a0f18397427b61979ea27f7497718962834d8 Mon Sep 17 00:00:00 2001 From: Christian Clauss Date: Tue, 26 Oct 2021 14:32:34 +0200 Subject: [PATCH 0313/1543] Fix validate_initial_digits of credit_card_validator.py (#5600) * Fix validate_initial_digits of credit_card_validator.py @Bhargavishnu I think that I broke the logic of validate_initial_digits which should require that credit_card_number[0] is 3 before checking that credit_card_number[1] is 4, 5, or 7. Please verify the new changes and the new test cases to make sure that this is correct. Thanks! * updating DIRECTORY.md Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- DIRECTORY.md | 2 ++ strings/credit_card_validator.py | 6 ++---- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/DIRECTORY.md b/DIRECTORY.md index e2b2442fd1b1..f765b29b60da 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -944,6 +944,7 @@ * [Capitalize](https://github.com/TheAlgorithms/Python/blob/master/strings/capitalize.py) * [Check Anagrams](https://github.com/TheAlgorithms/Python/blob/master/strings/check_anagrams.py) * [Check Pangram](https://github.com/TheAlgorithms/Python/blob/master/strings/check_pangram.py) + * [Credit Card Validator](https://github.com/TheAlgorithms/Python/blob/master/strings/credit_card_validator.py) * [Detecting English Programmatically](https://github.com/TheAlgorithms/Python/blob/master/strings/detecting_english_programmatically.py) * [Frequency Finder](https://github.com/TheAlgorithms/Python/blob/master/strings/frequency_finder.py) * [Indian Phone Validator](https://github.com/TheAlgorithms/Python/blob/master/strings/indian_phone_validator.py) @@ -961,6 +962,7 @@ * [Rabin Karp](https://github.com/TheAlgorithms/Python/blob/master/strings/rabin_karp.py) * [Remove Duplicate](https://github.com/TheAlgorithms/Python/blob/master/strings/remove_duplicate.py) * [Reverse Letters](https://github.com/TheAlgorithms/Python/blob/master/strings/reverse_letters.py) + * [Reverse Long Words](https://github.com/TheAlgorithms/Python/blob/master/strings/reverse_long_words.py) * [Reverse Words](https://github.com/TheAlgorithms/Python/blob/master/strings/reverse_words.py) * [Split](https://github.com/TheAlgorithms/Python/blob/master/strings/split.py) * [Upper](https://github.com/TheAlgorithms/Python/blob/master/strings/upper.py) diff --git a/strings/credit_card_validator.py b/strings/credit_card_validator.py index 3b5a1aae6dc9..3a08c4117a6b 100644 --- a/strings/credit_card_validator.py +++ b/strings/credit_card_validator.py @@ -11,13 +11,11 @@ def validate_initial_digits(credit_card_number: str) -> bool: >>> valid = "4111111111111111 41111111111111 34 35 37 412345 523456 634567" >>> all(validate_initial_digits(cc) for cc in valid.split()) True - >>> invalid = "32323 36111111111111" + >>> invalid = "14 25 76 32323 36111111111111" >>> all(validate_initial_digits(cc) is False for cc in invalid.split()) True """ - if len(credit_card_number) < 2: - return False - return credit_card_number[0] in "456" or credit_card_number[1] in "457" + return credit_card_number.startswith(("34", "35", "37", "4", "5", "6")) def luhn_validation(credit_card_number: str) -> bool: From 827b8f04a4f8090f0a8b42f7685a6b510996e5a3 Mon Sep 17 00:00:00 2001 From: Prakhar Gurunani Date: Tue, 26 Oct 2021 18:43:23 +0530 Subject: [PATCH 0314/1543] Get top 10 HN posts (#5604) * updating DIRECTORY.md * updating DIRECTORY.md * Create get_top_hn_posts.py * updating DIRECTORY.md * Add return type and desc * Add texttable * Update web_programming/get_top_hn_posts.py Co-authored-by: Christian Clauss * Update web_programming/get_top_hn_posts.py Co-authored-by: Christian Clauss * Get top 10 posts * Update get_top_hn_posts.py * Don't use texttable * Setup doctest * Fix pre-commit issues * Remove print statement * Add hackernews_top_stories_as_markdown() Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> Co-authored-by: Christian Clauss --- DIRECTORY.md | 2 ++ requirements.txt | 1 + web_programming/get_top_hn_posts.py | 26 ++++++++++++++++++++++++++ 3 files changed, 29 insertions(+) create mode 100644 web_programming/get_top_hn_posts.py diff --git a/DIRECTORY.md b/DIRECTORY.md index f765b29b60da..67f2113ea87d 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -53,6 +53,7 @@ ## Cellular Automata * [Conways Game Of Life](https://github.com/TheAlgorithms/Python/blob/master/cellular_automata/conways_game_of_life.py) * [Game Of Life](https://github.com/TheAlgorithms/Python/blob/master/cellular_automata/game_of_life.py) + * [Nagel Schrekenberg](https://github.com/TheAlgorithms/Python/blob/master/cellular_automata/nagel_schrekenberg.py) * [One Dimensional](https://github.com/TheAlgorithms/Python/blob/master/cellular_automata/one_dimensional.py) ## Ciphers @@ -987,6 +988,7 @@ * [Fetch Jobs](https://github.com/TheAlgorithms/Python/blob/master/web_programming/fetch_jobs.py) * [Get Imdb Top 250 Movies Csv](https://github.com/TheAlgorithms/Python/blob/master/web_programming/get_imdb_top_250_movies_csv.py) * [Get Imdbtop](https://github.com/TheAlgorithms/Python/blob/master/web_programming/get_imdbtop.py) + * [Get Top Hn Posts](https://github.com/TheAlgorithms/Python/blob/master/web_programming/get_top_hn_posts.py) * [Get User Tweets](https://github.com/TheAlgorithms/Python/blob/master/web_programming/get_user_tweets.py) * [Giphy](https://github.com/TheAlgorithms/Python/blob/master/web_programming/giphy.py) * [Instagram Crawler](https://github.com/TheAlgorithms/Python/blob/master/web_programming/instagram_crawler.py) diff --git a/requirements.txt b/requirements.txt index 7c2672ae25d3..c28238a0774f 100644 --- a/requirements.txt +++ b/requirements.txt @@ -14,6 +14,7 @@ sklearn statsmodels sympy tensorflow +texttable tweepy types-requests xgboost diff --git a/web_programming/get_top_hn_posts.py b/web_programming/get_top_hn_posts.py new file mode 100644 index 000000000000..fbb7c051a88e --- /dev/null +++ b/web_programming/get_top_hn_posts.py @@ -0,0 +1,26 @@ +from __future__ import annotations + +import requests + + +def get_hackernews_story(story_id: str) -> dict: + url = f"https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty" + return requests.get(url).json() + + +def hackernews_top_stories(max_stories: int = 10) -> list[dict]: + """ + Get the top max_stories posts from HackerNews - https://news.ycombinator.com/ + """ + url = "https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty" + story_ids = requests.get(url).json()[:max_stories] + return [get_hackernews_story(story_id) for story_id in story_ids] + + +def hackernews_top_stories_as_markdown(max_stories: int = 10) -> str: + stories = hackernews_top_stories(max_stories) + return "\n".join("* [{title}]({url})".format(**story) for story in stories) + + +if __name__ == "__main__": + print(hackernews_top_stories_as_markdown()) From 6fcefc04535357776124806f0fac40c005b35d1a Mon Sep 17 00:00:00 2001 From: Rohan R Bharadwaj Date: Tue, 26 Oct 2021 18:53:38 +0530 Subject: [PATCH 0315/1543] Add decode function to base16.py (#5575) * Add decode function * Update base16.py * Update base16.py * Update base16.py * Made the line shorter * Made another line shorter --- ciphers/base16.py | 21 ++++++++++++++++++--- 1 file changed, 18 insertions(+), 3 deletions(-) diff --git a/ciphers/base16.py b/ciphers/base16.py index f27ea4628e54..1ef60868dc3f 100644 --- a/ciphers/base16.py +++ b/ciphers/base16.py @@ -4,6 +4,7 @@ def encode_to_b16(inp: str) -> bytes: """ Encodes a given utf-8 string into base-16. + >>> encode_to_b16('Hello World!') b'48656C6C6F20576F726C6421' >>> encode_to_b16('HELLO WORLD!') @@ -11,9 +12,23 @@ def encode_to_b16(inp: str) -> bytes: >>> encode_to_b16('') b'' """ - encoded = inp.encode("utf-8") # encoded the input (we need a bytes like object) - b16encoded = base64.b16encode(encoded) # b16encoded the encoded string - return b16encoded + # encode the input into a bytes-like object and then encode b16encode that + return base64.b16encode(inp.encode("utf-8")) + + +def decode_from_b16(b16encoded: bytes) -> str: + """ + Decodes from base-16 to a utf-8 string. + + >>> decode_from_b16(b'48656C6C6F20576F726C6421') + 'Hello World!' + >>> decode_from_b16(b'48454C4C4F20574F524C4421') + 'HELLO WORLD!' + >>> decode_from_b16(b'') + '' + """ + # b16decode the input into bytes and decode that into a human readable string + return base64.b16decode(b16encoded).decode("utf-8") if __name__ == "__main__": From 23f43afee5f7ee3a2bc60ad2997644c54b23bba7 Mon Sep 17 00:00:00 2001 From: Sabari Ganesh <64348740+SabariGanesh-K@users.noreply.github.com> Date: Tue, 26 Oct 2021 21:27:59 +0530 Subject: [PATCH 0316/1543] Added volume conversions (#5607) * Added volume conversions This is a file which has relevant function which helps in conversion between volume units. Available Units:- Cubic metre,Litre,KiloLitre,Gallon,Cubic yard,Cubic foot,cup The file is also written in a way that , adding a new unit can easily be done by modifying tuple available in the source code * Formatted file The file was formatted to follow the syntax formatting rules of the repo * Formatted file further --- conversions/volume_conversions.py | 79 +++++++++++++++++++++++++++++++ 1 file changed, 79 insertions(+) create mode 100644 conversions/volume_conversions.py diff --git a/conversions/volume_conversions.py b/conversions/volume_conversions.py new file mode 100644 index 000000000000..de2290196fc2 --- /dev/null +++ b/conversions/volume_conversions.py @@ -0,0 +1,79 @@ +""" +Conversion of volume units. +Available Units:- Cubic metre,Litre,KiloLitre,Gallon,Cubic yard,Cubic foot,cup +USAGE : +-> Import this file into their respective project. +-> Use the function length_conversion() for conversion of volume units. +-> Parameters : + -> value : The number of from units you want to convert + -> from_type : From which type you want to convert + -> to_type : To which type you want to convert +REFERENCES : +-> Wikipedia reference: https://en.wikipedia.org/wiki/Cubic_metre +-> Wikipedia reference: https://en.wikipedia.org/wiki/Litre +-> Wikipedia reference: https://en.wiktionary.org/wiki/kilolitre +-> Wikipedia reference: https://en.wikipedia.org/wiki/Gallon +-> Wikipedia reference: https://en.wikipedia.org/wiki/Cubic_yard +-> Wikipedia reference: https://en.wikipedia.org/wiki/Cubic_foot +-> Wikipedia reference: https://en.wikipedia.org/wiki/Cup_(unit) +""" + +from collections import namedtuple + +from_to = namedtuple("from_to", "from_ to") + +METRIC_CONVERSION = { + "cubicmeter": from_to(1, 1), + "litre": from_to(0.001, 1000), + "kilolitre": from_to(1, 1), + "gallon": from_to(0.00454, 264.172), + "cubicyard": from_to(0.76455, 1.30795), + "cubicfoot": from_to(0.028, 35.3147), + "cup": from_to(0.000236588, 4226.75), +} + + +def volume_conversion(value: float, from_type: str, to_type: str) -> float: + """ + Conversion between volume units. + >>> volume_conversion(4, "cubicmeter", "litre") + 4000 + >>> volume_conversion(1, "litre", "gallon") + 0.264172 + >>> volume_conversion(1, "kilolitre", "cubicmeter") + 1 + >>> volume_conversion(3, "gallon", "cubicyard") + 0.017814279 + >>> volume_conversion(2, "cubicyard", "litre") + 1529.1 + >>> volume_conversion(4, "cubicfoot", "cup") + 473.396 + >>> volume_conversion(1, "cup", "kilolitre") + 0.000236588 + >>> volume_conversion(4, "wrongUnit", "litre") + Traceback (most recent call last): + File "/usr/lib/python3.8/doctest.py", line 1336, in __run + exec(compile(example.source, filename, "single", + File "", line 1, in + volume_conversion(4, "wrongUnit", "litre") + File "", line 62, in volume_conversion + ValueError: Invalid 'from_type' value: 'wrongUnit' Supported values are: + cubicmeter, litre, kilolitre, gallon, cubicyard, cubicfoot, cup + """ + if from_type not in METRIC_CONVERSION: + raise ValueError( + f"Invalid 'from_type' value: {from_type!r} Supported values are:\n" + + ", ".join(METRIC_CONVERSION) + ) + if to_type not in METRIC_CONVERSION: + raise ValueError( + f"Invalid 'to_type' value: {to_type!r}. Supported values are:\n" + + ", ".join(METRIC_CONVERSION) + ) + return value * METRIC_CONVERSION[from_type].from_ * METRIC_CONVERSION[to_type].to + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From c41bb49fc05339b6c9fbfe4c025946602b4bb919 Mon Sep 17 00:00:00 2001 From: Tarcisio Bruni Rangel Date: Tue, 26 Oct 2021 13:19:00 -0300 Subject: [PATCH 0317/1543] Financials (#5585) * feat: creates math calculations for financials * refactor: make pull request items requirements * refactor: provides type hint for parameters * refactor: applies code review suggestions * refactor: adds more examples tests * refactor: throws ValueError instead of Exception * refactor: fix formatting * refactor: fix formatting * Update interest.py * Update and rename financials/ABOUT.md to financial/ABOUT.md * Rename financials/__init__.py to financial/__init__.py * Rename financials/interest.py to financial/interest.py * https://www.investopedia.com * Update __init__.py * pre-commit: Disable end-of-file-fixer * Revert change to pre-commit * Update __init__.py * __init__.py Co-authored-by: Christian Clauss Co-authored-by: John Law --- financial/ABOUT.md | 4 +++ financial/__init__.py | 0 financial/interest.py | 83 +++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 87 insertions(+) create mode 100644 financial/ABOUT.md create mode 100644 financial/__init__.py create mode 100644 financial/interest.py diff --git a/financial/ABOUT.md b/financial/ABOUT.md new file mode 100644 index 000000000000..f6b0647f8201 --- /dev/null +++ b/financial/ABOUT.md @@ -0,0 +1,4 @@ +### Interest + +* Compound Interest: "Compound interest is calculated by multiplying the initial principal amount by one plus the annual interest rate raised to the number of compound periods minus one." [Compound Interest](https://www.investopedia.com/) +* Simple Interest: "Simple interest paid or received over a certain period is a fixed percentage of the principal amount that was borrowed or lent. " [Simple Interest](https://www.investopedia.com/) diff --git a/financial/__init__.py b/financial/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/financial/interest.py b/financial/interest.py new file mode 100644 index 000000000000..394da2bc9511 --- /dev/null +++ b/financial/interest.py @@ -0,0 +1,83 @@ +# https://www.investopedia.com + +from __future__ import annotations + + +def simple_interest( + principle: float, daily_interest_rate: float, days_between_payments: int +) -> float: + """ + >>> simple_interest(18000.0, 0.06, 3) + 3240.0 + >>> simple_interest(0.5, 0.06, 3) + 0.09 + >>> simple_interest(18000.0, 0.01, 10) + 1800.0 + >>> simple_interest(18000.0, 0.0, 3) + 0.0 + >>> simple_interest(5500.0, 0.01, 100) + 5500.0 + >>> simple_interest(10000.0, -0.06, 3) + Traceback (most recent call last): + ... + ValueError: daily_interest_rate must be >= 0 + >>> simple_interest(-10000.0, 0.06, 3) + Traceback (most recent call last): + ... + ValueError: principle must be > 0 + >>> simple_interest(5500.0, 0.01, -5) + Traceback (most recent call last): + ... + ValueError: days_between_payments must be > 0 + """ + if days_between_payments <= 0: + raise ValueError("days_between_payments must be > 0") + if daily_interest_rate < 0: + raise ValueError("daily_interest_rate must be >= 0") + if principle <= 0: + raise ValueError("principle must be > 0") + return principle * daily_interest_rate * days_between_payments + + +def compound_interest( + principle: float, + nominal_annual_interest_rate_percentage: float, + number_of_compounding_periods: int, +) -> float: + """ + >>> compound_interest(10000.0, 0.05, 3) + 1576.2500000000014 + >>> compound_interest(10000.0, 0.05, 1) + 500.00000000000045 + >>> compound_interest(0.5, 0.05, 3) + 0.07881250000000006 + >>> compound_interest(10000.0, 0.06, -4) + Traceback (most recent call last): + ... + ValueError: number_of_compounding_periods must be > 0 + >>> compound_interest(10000.0, -3.5, 3.0) + Traceback (most recent call last): + ... + ValueError: nominal_annual_interest_rate_percentage must be >= 0 + >>> compound_interest(-5500.0, 0.01, 5) + Traceback (most recent call last): + ... + ValueError: principle must be > 0 + """ + if number_of_compounding_periods <= 0: + raise ValueError("number_of_compounding_periods must be > 0") + if nominal_annual_interest_rate_percentage < 0: + raise ValueError("nominal_annual_interest_rate_percentage must be >= 0") + if principle <= 0: + raise ValueError("principle must be > 0") + + return principle * ( + (1 + nominal_annual_interest_rate_percentage) ** number_of_compounding_periods + - 1 + ) + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From cb4dc197238e670d150eaebd8f387670f7a54377 Mon Sep 17 00:00:00 2001 From: Christian Clauss Date: Tue, 26 Oct 2021 18:41:32 +0200 Subject: [PATCH 0318/1543] Financial: principle -> principal (#5614) * Financial: principle -> principal The originally invested amount of money: `principal` -- https://www.grammarly.com/blog/principle-principal/ * updating DIRECTORY.md Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- DIRECTORY.md | 4 ++++ financial/interest.py | 20 ++++++++++---------- 2 files changed, 14 insertions(+), 10 deletions(-) diff --git a/DIRECTORY.md b/DIRECTORY.md index 67f2113ea87d..6fbd5e2cc1c5 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -129,6 +129,7 @@ * [Rgb Hsv Conversion](https://github.com/TheAlgorithms/Python/blob/master/conversions/rgb_hsv_conversion.py) * [Roman Numerals](https://github.com/TheAlgorithms/Python/blob/master/conversions/roman_numerals.py) * [Temperature Conversions](https://github.com/TheAlgorithms/Python/blob/master/conversions/temperature_conversions.py) + * [Volume Conversions](https://github.com/TheAlgorithms/Python/blob/master/conversions/volume_conversions.py) * [Weight Conversion](https://github.com/TheAlgorithms/Python/blob/master/conversions/weight_conversion.py) ## Data Structures @@ -290,6 +291,9 @@ * Tests * [Test Send File](https://github.com/TheAlgorithms/Python/blob/master/file_transfer/tests/test_send_file.py) +## Financial + * [Interest](https://github.com/TheAlgorithms/Python/blob/master/financial/interest.py) + ## Fractals * [Julia Sets](https://github.com/TheAlgorithms/Python/blob/master/fractals/julia_sets.py) * [Koch Snowflake](https://github.com/TheAlgorithms/Python/blob/master/fractals/koch_snowflake.py) diff --git a/financial/interest.py b/financial/interest.py index 394da2bc9511..c69c730457d9 100644 --- a/financial/interest.py +++ b/financial/interest.py @@ -4,7 +4,7 @@ def simple_interest( - principle: float, daily_interest_rate: float, days_between_payments: int + principal: float, daily_interest_rate: float, days_between_payments: int ) -> float: """ >>> simple_interest(18000.0, 0.06, 3) @@ -24,7 +24,7 @@ def simple_interest( >>> simple_interest(-10000.0, 0.06, 3) Traceback (most recent call last): ... - ValueError: principle must be > 0 + ValueError: principal must be > 0 >>> simple_interest(5500.0, 0.01, -5) Traceback (most recent call last): ... @@ -34,13 +34,13 @@ def simple_interest( raise ValueError("days_between_payments must be > 0") if daily_interest_rate < 0: raise ValueError("daily_interest_rate must be >= 0") - if principle <= 0: - raise ValueError("principle must be > 0") - return principle * daily_interest_rate * days_between_payments + if principal <= 0: + raise ValueError("principal must be > 0") + return principal * daily_interest_rate * days_between_payments def compound_interest( - principle: float, + principal: float, nominal_annual_interest_rate_percentage: float, number_of_compounding_periods: int, ) -> float: @@ -62,16 +62,16 @@ def compound_interest( >>> compound_interest(-5500.0, 0.01, 5) Traceback (most recent call last): ... - ValueError: principle must be > 0 + ValueError: principal must be > 0 """ if number_of_compounding_periods <= 0: raise ValueError("number_of_compounding_periods must be > 0") if nominal_annual_interest_rate_percentage < 0: raise ValueError("nominal_annual_interest_rate_percentage must be >= 0") - if principle <= 0: - raise ValueError("principle must be > 0") + if principal <= 0: + raise ValueError("principal must be > 0") - return principle * ( + return principal * ( (1 + nominal_annual_interest_rate_percentage) ** number_of_compounding_periods - 1 ) From 31061aacf2482e7a8521f577b68930716dab21eb Mon Sep 17 00:00:00 2001 From: Connor Bottum Date: Tue, 26 Oct 2021 12:43:46 -0400 Subject: [PATCH 0319/1543] fix: use `+=` in sorts/recursive_mergesort_array.py (#5019) --- sorts/recursive_mergesort_array.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/sorts/recursive_mergesort_array.py b/sorts/recursive_mergesort_array.py index f714d02380cf..e02c02405565 100644 --- a/sorts/recursive_mergesort_array.py +++ b/sorts/recursive_mergesort_array.py @@ -38,23 +38,23 @@ def merge(arr: list[int]) -> list[int]: ): # Runs until the lowers size of the left and right are sorted. if left_array[left_index] < right_array[right_index]: arr[index] = left_array[left_index] - left_index = left_index + 1 + left_index += 1 else: arr[index] = right_array[right_index] - right_index = right_index + 1 - index = index + 1 + right_index += 1 + index += 1 while ( left_index < left_size ): # Adds the left over elements in the left half of the array arr[index] = left_array[left_index] - left_index = left_index + 1 - index = index + 1 + left_index += 1 + index += 1 while ( right_index < right_size ): # Adds the left over elements in the right half of the array arr[index] = right_array[right_index] - right_index = right_index + 1 - index = index + 1 + right_index += 1 + index += 1 return arr From b4036b70f1f51c29d0c94a591e8738ac81d7397a Mon Sep 17 00:00:00 2001 From: Srishtik Bhandarkar <53395406+srishtik2310@users.noreply.github.com> Date: Tue, 26 Oct 2021 22:40:15 +0530 Subject: [PATCH 0320/1543] Add solution for probelm_686 of project_euler (#5480) * Added solution for probelm_686 of project_euler * Changed documentation and formatting. * Added ref link to optimization logic * Update project_euler/problem_686/sol1.py Co-authored-by: John Law Co-authored-by: John Law --- project_euler/problem_686/__init__.py | 0 project_euler/problem_686/sol1.py | 160 ++++++++++++++++++++++++++ 2 files changed, 160 insertions(+) create mode 100644 project_euler/problem_686/__init__.py create mode 100644 project_euler/problem_686/sol1.py diff --git a/project_euler/problem_686/__init__.py b/project_euler/problem_686/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/project_euler/problem_686/sol1.py b/project_euler/problem_686/sol1.py new file mode 100644 index 000000000000..3b6bdb655170 --- /dev/null +++ b/project_euler/problem_686/sol1.py @@ -0,0 +1,160 @@ +""" +Project Euler Problem 686: https://projecteuler.net/problem=686 + +2^7 = 128 is the first power of two whose leading digits are "12". +The next power of two whose leading digits are "12" is 2^80. + +Define p(L,n) to be the nth-smallest value of j such that +the base 10 representation of 2^j begins with the digits of L. + +So p(12, 1) = 7 and p(12, 2) = 80. + +You are given that p(123, 45) = 12710. + +Find p(123, 678910). +""" + +import math + + +def log_difference(number: int) -> float: + """ + This function returns the decimal value of a number multiplied with log(2) + Since the problem is on powers of two, finding the powers of two with + large exponents is time consuming. Hence we use log to reduce compute time. + + We can find out that the first power of 2 with starting digits 123 is 90. + Computing 2^90 is time consuming. + Hence we find log(2^90) = 90*log(2) = 27.092699609758302 + But we require only the decimal part to determine whether the power starts with 123. + SO we just return the decimal part of the log product. + Therefore we return 0.092699609758302 + + >>> log_difference(90) + 0.092699609758302 + >>> log_difference(379) + 0.090368356648852 + + """ + + log_number = math.log(2, 10) * number + difference = round((log_number - int(log_number)), 15) + + return difference + + +def solution(number: int = 678910) -> int: + """ + This function calculates the power of two which is nth (n = number) + smallest value of power of 2 + such that the starting digits of the 2^power is 123. + + For example the powers of 2 for which starting digits is 123 are: + 90, 379, 575, 864, 1060, 1545, 1741, 2030, 2226, 2515 and so on. + 90 is the first power of 2 whose starting digits are 123, + 379 is second power of 2 whose starting digits are 123, + and so on. + + So if number = 10, then solution returns 2515 as we observe from above series. + + Wwe will define a lowerbound and upperbound. + lowerbound = log(1.23), upperbound = log(1.24) + because we need to find the powers that yield 123 as starting digits. + + log(1.23) = 0.08990511143939792, log(1,24) = 0.09342168516223506. + We use 1.23 and not 12.3 or 123, because log(1.23) yields only decimal value + which is less than 1. + log(12.3) will be same decimal vale but 1 added to it + which is log(12.3) = 1.093421685162235. + We observe that decimal value remains same no matter 1.23 or 12.3 + Since we use the function log_difference(), + which returns the value that is only decimal part, using 1.23 is logical. + + If we see, 90*log(2) = 27.092699609758302, + decimal part = 0.092699609758302, which is inside the range of lowerbound + and upperbound. + + If we compute the difference between all the powers which lead to 123 + starting digits is as follows: + + 379 - 90 = 289 + 575 - 379 = 196 + 864 - 575 = 289 + 1060 - 864 = 196 + + We see a pattern here. The difference is either 196 or 289 = 196 + 93. + + Hence to optimize the algorithm we will increment by 196 or 93 depending upon the + log_difference() value. + + Lets take for example 90. + Since 90 is the first power leading to staring digits as 123, + we will increment iterator by 196. + Because the difference between any two powers leading to 123 + as staring digits is greater than or equal to 196. + After incrementing by 196 we get 286. + + log_difference(286) = 0.09457875989861 which is greater than upperbound. + The next power is 379, and we need to add 93 to get there. + The iterator will now become 379, + which is the next power leading to 123 as starting digits. + + Lets take 1060. We increment by 196, we get 1256. + log_difference(1256) = 0.09367455396034, + Which is greater than upperbound hence we increment by 93. Now iterator is 1349. + log_difference(1349) = 0.08946415071057 which is less than lowerbound. + The next power is 1545 and we need to add 196 to get 1545. + + Conditions are as follows: + + 1) If we find a power, whose log_difference() is in the range of + lower and upperbound, we will increment by 196. + which implies that the power is a number which will lead to 123 as starting digits. + 2) If we find a power, whose log_difference() is greater than or equal upperbound, + we will increment by 93. + 3) if log_difference() < lowerbound, we increment by 196. + + Reference to the above logic: + https://math.stackexchange.com/questions/4093970/powers-of-2-starting-with-123-does-a-pattern-exist + + >>> solution(1000) + 284168 + + >>> solution(56000) + 15924915 + + >>> solution(678910) + 193060223 + + """ + + power_iterator = 90 + position = 0 + + lower_limit = math.log(1.23, 10) + upper_limit = math.log(1.24, 10) + previous_power = 0 + + while position < number: + difference = log_difference(power_iterator) + + if difference >= upper_limit: + power_iterator += 93 + + elif difference < lower_limit: + power_iterator += 196 + + else: + previous_power = power_iterator + power_iterator += 196 + position += 1 + + return previous_power + + +if __name__ == "__main__": + import doctest + + doctest.testmod() + + print(f"{solution() = }") From 582f57f41fb9d36ae8fe4d49c98775877b9013b7 Mon Sep 17 00:00:00 2001 From: Sabari Ganesh <64348740+SabariGanesh-K@users.noreply.github.com> Date: Tue, 26 Oct 2021 23:25:41 +0530 Subject: [PATCH 0321/1543] Added physical pressure units (#5613) * Added physical pressure units This uses tuple pair which stores units required to be converted to respective other units as mentioned. Available Units:- Pascal,Bar,Kilopascal,Megapascal,psi(pound per square inch),inHg(in mercury column),torr,atm * Formatted file File was formatted as per repo rules * Reformatted file :) * Added more reference * More reference added --- conversions/pressure_conversions.py | 85 +++++++++++++++++++++++++++++ 1 file changed, 85 insertions(+) create mode 100644 conversions/pressure_conversions.py diff --git a/conversions/pressure_conversions.py b/conversions/pressure_conversions.py new file mode 100644 index 000000000000..2018080b9327 --- /dev/null +++ b/conversions/pressure_conversions.py @@ -0,0 +1,85 @@ +""" +Conversion of pressure units. +Available Units:- Pascal,Bar,Kilopascal,Megapascal,psi(pound per square inch), +inHg(in mercury column),torr,atm +USAGE : +-> Import this file into their respective project. +-> Use the function pressure_conversion() for conversion of pressure units. +-> Parameters : + -> value : The number of from units you want to convert + -> from_type : From which type you want to convert + -> to_type : To which type you want to convert +REFERENCES : +-> Wikipedia reference: https://en.wikipedia.org/wiki/Pascal_(unit) +-> Wikipedia reference: https://en.wikipedia.org/wiki/Pound_per_square_inch +-> Wikipedia reference: https://en.wikipedia.org/wiki/Inch_of_mercury +-> Wikipedia reference: https://en.wikipedia.org/wiki/Torr +-> https://en.wikipedia.org/wiki/Standard_atmosphere_(unit) +-> https://msestudent.com/what-are-the-units-of-pressure/ +-> https://www.unitconverters.net/pressure-converter.html +""" + +from collections import namedtuple + +from_to = namedtuple("from_to", "from_ to") + +PRESSURE_CONVERSION = { + "atm": from_to(1, 1), + "pascal": from_to(0.0000098, 101325), + "bar": from_to(0.986923, 1.01325), + "kilopascal": from_to(0.00986923, 101.325), + "megapascal": from_to(9.86923, 0.101325), + "psi": from_to(0.068046, 14.6959), + "inHg": from_to(0.0334211, 29.9213), + "torr": from_to(0.00131579, 760), +} + + +def pressure_conversion(value: float, from_type: str, to_type: str) -> float: + """ + Conversion between pressure units. + >>> pressure_conversion(4, "atm", "pascal") + 405300 + >>> pressure_conversion(1, "pascal", "psi") + 0.00014401981999999998 + >>> pressure_conversion(1, "bar", "atm") + 0.986923 + >>> pressure_conversion(3, "kilopascal", "bar") + 0.029999991892499998 + >>> pressure_conversion(2, "megapascal", "psi") + 290.074434314 + >>> pressure_conversion(4, "psi", "torr") + 206.85984 + >>> pressure_conversion(1, "inHg", "atm") + 0.0334211 + >>> pressure_conversion(1, "torr", "psi") + 0.019336718261000002 + >>> pressure_conversion(4, "wrongUnit", "atm") + Traceback (most recent call last): + File "/usr/lib/python3.8/doctest.py", line 1336, in __run + exec(compile(example.source, filename, "single", + File "", line 1, in + pressure_conversion(4, "wrongUnit", "atm") + File "", line 67, in pressure_conversion + ValueError: Invalid 'from_type' value: 'wrongUnit' Supported values are: + atm, pascal, bar, kilopascal, megapascal, psi, inHg, torr + """ + if from_type not in PRESSURE_CONVERSION: + raise ValueError( + f"Invalid 'from_type' value: {from_type!r} Supported values are:\n" + + ", ".join(PRESSURE_CONVERSION) + ) + if to_type not in PRESSURE_CONVERSION: + raise ValueError( + f"Invalid 'to_type' value: {to_type!r}. Supported values are:\n" + + ", ".join(PRESSURE_CONVERSION) + ) + return ( + value * PRESSURE_CONVERSION[from_type].from_ * PRESSURE_CONVERSION[to_type].to + ) + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From c0ed031b3fcf47736f98dfd89e2588dbffceadde Mon Sep 17 00:00:00 2001 From: Edward Nuno Date: Tue, 26 Oct 2021 11:33:08 -0700 Subject: [PATCH 0322/1543] Fix type annotations for stack.py (#5566) --- data_structures/stacks/balanced_parentheses.py | 2 +- .../stacks/dijkstras_two_stack_algorithm.py | 4 ++-- .../stacks/infix_to_postfix_conversion.py | 2 +- data_structures/stacks/stack.py | 18 +++++++++++------- 4 files changed, 15 insertions(+), 11 deletions(-) diff --git a/data_structures/stacks/balanced_parentheses.py b/data_structures/stacks/balanced_parentheses.py index 674f7ea436ed..3c036c220e5c 100644 --- a/data_structures/stacks/balanced_parentheses.py +++ b/data_structures/stacks/balanced_parentheses.py @@ -14,7 +14,7 @@ def balanced_parentheses(parentheses: str) -> bool: >>> balanced_parentheses("") True """ - stack = Stack() + stack: Stack[str] = Stack() bracket_pairs = {"(": ")", "[": "]", "{": "}"} for bracket in parentheses: if bracket in bracket_pairs: diff --git a/data_structures/stacks/dijkstras_two_stack_algorithm.py b/data_structures/stacks/dijkstras_two_stack_algorithm.py index 8b4668f9f839..ba2ca92c7b5c 100644 --- a/data_structures/stacks/dijkstras_two_stack_algorithm.py +++ b/data_structures/stacks/dijkstras_two_stack_algorithm.py @@ -51,8 +51,8 @@ def dijkstras_two_stack_algorithm(equation: str) -> int: """ operators = {"*": op.mul, "/": op.truediv, "+": op.add, "-": op.sub} - operand_stack = Stack() - operator_stack = Stack() + operand_stack: Stack[int] = Stack() + operator_stack: Stack[str] = Stack() for i in equation: if i.isdigit(): diff --git a/data_structures/stacks/infix_to_postfix_conversion.py b/data_structures/stacks/infix_to_postfix_conversion.py index dedba8479ac8..b812d108e290 100644 --- a/data_structures/stacks/infix_to_postfix_conversion.py +++ b/data_structures/stacks/infix_to_postfix_conversion.py @@ -38,7 +38,7 @@ def infix_to_postfix(expression_str: str) -> str: """ if not balanced_parentheses(expression_str): raise ValueError("Mismatched parentheses") - stack = Stack() + stack: Stack[str] = Stack() postfix = [] for char in expression_str: if char.isalpha() or char.isdigit(): diff --git a/data_structures/stacks/stack.py b/data_structures/stacks/stack.py index 4bc032f72561..d1c73df43067 100644 --- a/data_structures/stacks/stack.py +++ b/data_structures/stacks/stack.py @@ -1,5 +1,9 @@ from __future__ import annotations +from typing import Generic, TypeVar + +T = TypeVar("T") + class StackOverflowError(BaseException): pass @@ -9,7 +13,7 @@ class StackUnderflowError(BaseException): pass -class Stack: +class Stack(Generic[T]): """A stack is an abstract data type that serves as a collection of elements with two principal operations: push() and pop(). push() adds an element to the top of the stack, and pop() removes an element from the top @@ -19,7 +23,7 @@ class Stack: """ def __init__(self, limit: int = 10): - self.stack: list[int] = [] + self.stack: list[T] = [] self.limit = limit def __bool__(self) -> bool: @@ -28,13 +32,13 @@ def __bool__(self) -> bool: def __str__(self) -> str: return str(self.stack) - def push(self, data): + def push(self, data: T) -> None: """Push an element to the top of the stack.""" if len(self.stack) >= self.limit: raise StackOverflowError self.stack.append(data) - def pop(self): + def pop(self) -> T: """ Pop an element off of the top of the stack. @@ -47,7 +51,7 @@ def pop(self): raise StackUnderflowError return self.stack.pop() - def peek(self): + def peek(self) -> T: """ Peek at the top-most element of the stack. @@ -71,7 +75,7 @@ def size(self) -> int: """Return the size of the stack.""" return len(self.stack) - def __contains__(self, item) -> bool: + def __contains__(self, item: T) -> bool: """Check if item is in stack""" return item in self.stack @@ -80,7 +84,7 @@ def test_stack() -> None: """ >>> test_stack() """ - stack = Stack(10) + stack: Stack[int] = Stack(10) assert bool(stack) is False assert stack.is_empty() is True assert stack.is_full() is False From 9a03919052276bfe83f90a7ba2fe258d20a104e9 Mon Sep 17 00:00:00 2001 From: Edward Nuno Date: Tue, 26 Oct 2021 12:12:46 -0700 Subject: [PATCH 0323/1543] [mypy] Fix type annotations for stack_using_dll.py (#5577) * Fix mypy annotations for stack_using_dll.py * Replace Optional with inline union type --- data_structures/stacks/stack_using_dll.py | 40 ++++++++++++++--------- 1 file changed, 24 insertions(+), 16 deletions(-) diff --git a/data_structures/stacks/stack_using_dll.py b/data_structures/stacks/stack_using_dll.py index 75e0cd20640d..a129665f209f 100644 --- a/data_structures/stacks/stack_using_dll.py +++ b/data_structures/stacks/stack_using_dll.py @@ -1,15 +1,21 @@ # A complete working Python program to demonstrate all # stack operations using a doubly linked list +from __future__ import annotations -class Node: - def __init__(self, data): +from typing import Generic, TypeVar + +T = TypeVar("T") + + +class Node(Generic[T]): + def __init__(self, data: T): self.data = data # Assign data - self.next = None # Initialize next as null - self.prev = None # Initialize prev as null + self.next: Node[T] | None = None # Initialize next as null + self.prev: Node[T] | None = None # Initialize prev as null -class Stack: +class Stack(Generic[T]): """ >>> stack = Stack() >>> stack.is_empty() @@ -35,10 +41,10 @@ class Stack: 2->1->0-> """ - def __init__(self): - self.head = None + def __init__(self) -> None: + self.head: Node[T] | None = None - def push(self, data): + def push(self, data: T) -> None: """add a Node to the stack""" if self.head is None: self.head = Node(data) @@ -49,21 +55,23 @@ def push(self, data): new_node.prev = None self.head = new_node - def pop(self): + def pop(self) -> T | None: """pop the top element off the stack""" if self.head is None: return None else: + assert self.head is not None temp = self.head.data self.head = self.head.next - self.head.prev = None + if self.head is not None: + self.head.prev = None return temp - def top(self): + def top(self) -> T | None: """return the top element of the stack""" - return self.head.data + return self.head.data if self.head is not None else None - def __len__(self): + def __len__(self) -> int: temp = self.head count = 0 while temp is not None: @@ -71,10 +79,10 @@ def __len__(self): temp = temp.next return count - def is_empty(self): + def is_empty(self) -> bool: return self.head is None - def print_stack(self): + def print_stack(self) -> None: print("stack elements are:") temp = self.head while temp is not None: @@ -86,7 +94,7 @@ def print_stack(self): if __name__ == "__main__": # Start with the empty stack - stack = Stack() + stack: Stack[int] = Stack() # Insert 4 at the beginning. So stack becomes 4->None print("Stack operations using Doubly LinkedList") From 4eb5c12727d7590d17a5cbefe34c6a255f69e670 Mon Sep 17 00:00:00 2001 From: Matteo Messmer <40521259+matteomessmer@users.noreply.github.com> Date: Wed, 27 Oct 2021 00:28:26 +0200 Subject: [PATCH 0324/1543] Sphere intersection and spherical cap volumes (#5579) * sphere intersection + spherical cap volume formulas * reformatted * Update volume.py Co-authored-by: Christian Clauss --- maths/volume.py | 52 +++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 52 insertions(+) diff --git a/maths/volume.py b/maths/volume.py index 51b2b9fc0334..fd24aa9eef54 100644 --- a/maths/volume.py +++ b/maths/volume.py @@ -20,6 +20,56 @@ def vol_cube(side_length: int | float) -> float: return pow(side_length, 3) +def vol_spherical_cap(height: float, radius: float) -> float: + """ + Calculate the Volume of the spherical cap. + :return 1/3 pi * height ^ 2 * (3 * radius - height) + + >>> vol_spherical_cap(1, 2) + 5.235987755982988 + """ + return 1 / 3 * pi * pow(height, 2) * (3 * radius - height) + + +def vol_spheres_intersect( + radius_1: float, radius_2: float, centers_distance: float +) -> float: + """ + Calculate the volume of the intersection of two spheres. + + The intersection is composed by two spherical caps and therefore its volume is the + sum of the volumes of the spherical caps. First it calculates the heights (h1, h2) + of the the spherical caps, then the two volumes and it returns the sum. + The height formulas are + h1 = (radius_1 - radius_2 + centers_distance) + * (radius_1 + radius_2 - centers_distance) + / (2 * centers_distance) + h2 = (radius_2 - radius_1 + centers_distance) + * (radius_2 + radius_1 - centers_distance) + / (2 * centers_distance) + if centers_distance is 0 then it returns the volume of the smallers sphere + :return vol_spherical_cap(h1, radius_2) + vol_spherical_cap(h2, radius_1) + + >>> vol_spheres_intersect(2, 2, 1) + 21.205750411731103 + """ + if centers_distance == 0: + return vol_sphere(min(radius_1, radius_2)) + + h1 = ( + (radius_1 - radius_2 + centers_distance) + * (radius_1 + radius_2 - centers_distance) + / (2 * centers_distance) + ) + h2 = ( + (radius_2 - radius_1 + centers_distance) + * (radius_2 + radius_1 - centers_distance) + / (2 * centers_distance) + ) + + return vol_spherical_cap(h1, radius_2) + vol_spherical_cap(h2, radius_1) + + def vol_cuboid(width: float, height: float, length: float) -> float: """ Calculate the Volume of a Cuboid. @@ -127,6 +177,8 @@ def main(): print("Pyramid: " + str(vol_pyramid(2, 2))) # ~= 1.33 print("Sphere: " + str(vol_sphere(2))) # ~= 33.5 print("Circular Cylinder: " + str(vol_circular_cylinder(2, 2))) # ~= 25.1 + print("Spherical cap: " + str(vol_spherical_cap(1, 2))) # ~= 5.24 + print("Spheres intersetion: " + str(vol_spheres_intersect(2, 2, 1))) # ~= 21.21 if __name__ == "__main__": From 8285913e81fb8f46b90d0e19da233862964c07dc Mon Sep 17 00:00:00 2001 From: Dylan Buchi Date: Wed, 27 Oct 2021 00:45:33 -0300 Subject: [PATCH 0325/1543] [mypy] Fix and add type annotations (#5618) --- data_structures/queue/double_ended_queue.py | 72 ++++++--------------- 1 file changed, 18 insertions(+), 54 deletions(-) diff --git a/data_structures/queue/double_ended_queue.py b/data_structures/queue/double_ended_queue.py index 36106d8bc0d9..a4658d99759c 100644 --- a/data_structures/queue/double_ended_queue.py +++ b/data_structures/queue/double_ended_queue.py @@ -1,6 +1,8 @@ """ Implementation of double ended queue. """ +from __future__ import annotations + from dataclasses import dataclass from typing import Any, Iterable @@ -8,35 +10,23 @@ class Deque: """ Deque data structure. - Operations ---------- append(val: Any) -> None - appendleft(val: Any) -> None - extend(iter: Iterable) -> None - extendleft(iter: Iterable) -> None - pop() -> Any - popleft() -> Any - - Observers --------- is_empty() -> bool - - Attributes ---------- _front: _Node front of the deque a.k.a. the first element - _back: _Node back of the element a.k.a. the last element - _len: int the number of nodes """ @@ -51,13 +41,12 @@ class _Node: """ val: Any = None - next: "Deque._Node" = None - prev: "Deque._Node" = None + next: Deque._Node | None = None + prev: Deque._Node | None = None class _Iterator: """ Helper class for iteration. Will be used to implement iteration. - Attributes ---------- _cur: _Node @@ -66,10 +55,10 @@ class _Iterator: __slots__ = ["_cur"] - def __init__(self, cur: "Deque._Node") -> None: + def __init__(self, cur: Deque._Node | None) -> None: self._cur = cur - def __iter__(self) -> "Deque._Iterator": + def __iter__(self) -> Deque._Iterator: """ >>> our_deque = Deque([1, 2, 3]) >>> iterator = iter(our_deque) @@ -95,9 +84,10 @@ def __next__(self) -> Any: return val - def __init__(self, iterable: Iterable = None) -> None: - self._front = self._back = None - self._len = 0 + def __init__(self, iterable: Iterable[Any] | None = None) -> None: + self._front: Any = None + self._back: Any = None + self._len: int = 0 if iterable is not None: # append every value to the deque @@ -108,7 +98,6 @@ def append(self, val: Any) -> None: """ Adds val to the end of the deque. Time complexity: O(1) - >>> our_deque_1 = Deque([1, 2, 3]) >>> our_deque_1.append(4) >>> our_deque_1 @@ -117,7 +106,6 @@ def append(self, val: Any) -> None: >>> our_deque_2.append('c') >>> our_deque_2 ['a', 'b', 'c'] - >>> from collections import deque >>> deque_collections_1 = deque([1, 2, 3]) >>> deque_collections_1.append(4) @@ -127,7 +115,6 @@ def append(self, val: Any) -> None: >>> deque_collections_2.append('c') >>> deque_collections_2 deque(['a', 'b', 'c']) - >>> list(our_deque_1) == list(deque_collections_1) True >>> list(our_deque_2) == list(deque_collections_2) @@ -153,7 +140,6 @@ def appendleft(self, val: Any) -> None: """ Adds val to the beginning of the deque. Time complexity: O(1) - >>> our_deque_1 = Deque([2, 3]) >>> our_deque_1.appendleft(1) >>> our_deque_1 @@ -162,7 +148,6 @@ def appendleft(self, val: Any) -> None: >>> our_deque_2.appendleft('a') >>> our_deque_2 ['a', 'b', 'c'] - >>> from collections import deque >>> deque_collections_1 = deque([2, 3]) >>> deque_collections_1.appendleft(1) @@ -172,7 +157,6 @@ def appendleft(self, val: Any) -> None: >>> deque_collections_2.appendleft('a') >>> deque_collections_2 deque(['a', 'b', 'c']) - >>> list(our_deque_1) == list(deque_collections_1) True >>> list(our_deque_2) == list(deque_collections_2) @@ -194,11 +178,10 @@ def appendleft(self, val: Any) -> None: # make sure there were no errors assert not self.is_empty(), "Error on appending value." - def extend(self, iter: Iterable) -> None: + def extend(self, iter: Iterable[Any]) -> None: """ Appends every value of iter to the end of the deque. Time complexity: O(n) - >>> our_deque_1 = Deque([1, 2, 3]) >>> our_deque_1.extend([4, 5]) >>> our_deque_1 @@ -207,7 +190,6 @@ def extend(self, iter: Iterable) -> None: >>> our_deque_2.extend('cd') >>> our_deque_2 ['a', 'b', 'c', 'd'] - >>> from collections import deque >>> deque_collections_1 = deque([1, 2, 3]) >>> deque_collections_1.extend([4, 5]) @@ -217,7 +199,6 @@ def extend(self, iter: Iterable) -> None: >>> deque_collections_2.extend('cd') >>> deque_collections_2 deque(['a', 'b', 'c', 'd']) - >>> list(our_deque_1) == list(deque_collections_1) True >>> list(our_deque_2) == list(deque_collections_2) @@ -226,11 +207,10 @@ def extend(self, iter: Iterable) -> None: for val in iter: self.append(val) - def extendleft(self, iter: Iterable) -> None: + def extendleft(self, iter: Iterable[Any]) -> None: """ Appends every value of iter to the beginning of the deque. Time complexity: O(n) - >>> our_deque_1 = Deque([1, 2, 3]) >>> our_deque_1.extendleft([0, -1]) >>> our_deque_1 @@ -239,7 +219,6 @@ def extendleft(self, iter: Iterable) -> None: >>> our_deque_2.extendleft('ba') >>> our_deque_2 ['a', 'b', 'c', 'd'] - >>> from collections import deque >>> deque_collections_1 = deque([1, 2, 3]) >>> deque_collections_1.extendleft([0, -1]) @@ -249,7 +228,6 @@ def extendleft(self, iter: Iterable) -> None: >>> deque_collections_2.extendleft('ba') >>> deque_collections_2 deque(['a', 'b', 'c', 'd']) - >>> list(our_deque_1) == list(deque_collections_1) True >>> list(our_deque_2) == list(deque_collections_2) @@ -262,16 +240,13 @@ def pop(self) -> Any: """ Removes the last element of the deque and returns it. Time complexity: O(1) - @returns topop.val: the value of the node to pop. - >>> our_deque = Deque([1, 2, 3, 15182]) >>> our_popped = our_deque.pop() >>> our_popped 15182 >>> our_deque [1, 2, 3] - >>> from collections import deque >>> deque_collections = deque([1, 2, 3, 15182]) >>> collections_popped = deque_collections.pop() @@ -279,7 +254,6 @@ def pop(self) -> Any: 15182 >>> deque_collections deque([1, 2, 3]) - >>> list(our_deque) == list(deque_collections) True >>> our_popped == collections_popped @@ -302,16 +276,13 @@ def popleft(self) -> Any: """ Removes the first element of the deque and returns it. Time complexity: O(1) - @returns topop.val: the value of the node to pop. - >>> our_deque = Deque([15182, 1, 2, 3]) >>> our_popped = our_deque.popleft() >>> our_popped 15182 >>> our_deque [1, 2, 3] - >>> from collections import deque >>> deque_collections = deque([15182, 1, 2, 3]) >>> collections_popped = deque_collections.popleft() @@ -319,7 +290,6 @@ def popleft(self) -> Any: 15182 >>> deque_collections deque([1, 2, 3]) - >>> list(our_deque) == list(deque_collections) True >>> our_popped == collections_popped @@ -340,14 +310,12 @@ def is_empty(self) -> bool: """ Checks if the deque is empty. Time complexity: O(1) - >>> our_deque = Deque([1, 2, 3]) >>> our_deque.is_empty() False >>> our_empty_deque = Deque() >>> our_empty_deque.is_empty() True - >>> from collections import deque >>> empty_deque_collections = deque() >>> list(our_empty_deque) == list(empty_deque_collections) @@ -359,14 +327,12 @@ def __len__(self) -> int: """ Implements len() function. Returns the length of the deque. Time complexity: O(1) - >>> our_deque = Deque([1, 2, 3]) >>> len(our_deque) 3 >>> our_empty_deque = Deque() >>> len(our_empty_deque) 0 - >>> from collections import deque >>> deque_collections = deque([1, 2, 3]) >>> len(deque_collections) @@ -379,11 +345,10 @@ def __len__(self) -> int: """ return self._len - def __eq__(self, other: "Deque") -> bool: + def __eq__(self, other: object) -> bool: """ Implements "==" operator. Returns if *self* is equal to *other*. Time complexity: O(n) - >>> our_deque_1 = Deque([1, 2, 3]) >>> our_deque_2 = Deque([1, 2, 3]) >>> our_deque_1 == our_deque_2 @@ -391,7 +356,6 @@ def __eq__(self, other: "Deque") -> bool: >>> our_deque_3 = Deque([1, 2]) >>> our_deque_1 == our_deque_3 False - >>> from collections import deque >>> deque_collections_1 = deque([1, 2, 3]) >>> deque_collections_2 = deque([1, 2, 3]) @@ -400,12 +364,15 @@ def __eq__(self, other: "Deque") -> bool: >>> deque_collections_3 = deque([1, 2]) >>> deque_collections_1 == deque_collections_3 False - >>> (our_deque_1 == our_deque_2) == (deque_collections_1 == deque_collections_2) True >>> (our_deque_1 == our_deque_3) == (deque_collections_1 == deque_collections_3) True """ + + if not isinstance(other, Deque): + return NotImplemented + me = self._front oth = other._front @@ -422,18 +389,16 @@ def __eq__(self, other: "Deque") -> bool: return True - def __iter__(self) -> "_Iterator": + def __iter__(self) -> Deque._Iterator: """ Implements iteration. Time complexity: O(1) - >>> our_deque = Deque([1, 2, 3]) >>> for v in our_deque: ... print(v) 1 2 3 - >>> from collections import deque >>> deque_collections = deque([1, 2, 3]) >>> for v in deque_collections: @@ -449,7 +414,6 @@ def __repr__(self) -> str: Implements representation of the deque. Represents it as a list, with its values between '[' and ']'. Time complexity: O(n) - >>> our_deque = Deque([1, 2, 3]) >>> our_deque [1, 2, 3] From fe5c711ce68cb1d410d13d8c8a02ee7bfd49b1d3 Mon Sep 17 00:00:00 2001 From: Tianyi Zheng Date: Wed, 27 Oct 2021 03:48:43 +0000 Subject: [PATCH 0326/1543] Rewrite parts of Vector and Matrix (#5362) * Rewrite parts of Vector and Matrix methods * Refactor determinant method and add unit tests Refactor determinant method to create separate minor and cofactor methods. Add respective unit tests for new methods. Rename methods using snake case to follow Python naming conventions. * Reorganize Vector and Matrix methods * Update linear_algebra/README.md Co-authored-by: John Law * Fix punctuation and wording * Apply suggestions from code review Co-authored-by: John Law Co-authored-by: John Law --- knapsack/README.md | 2 +- linear_algebra/README.md | 44 +-- linear_algebra/src/lib.py | 378 ++++++++++++---------- linear_algebra/src/test_linear_algebra.py | 96 ++++-- 4 files changed, 295 insertions(+), 225 deletions(-) diff --git a/knapsack/README.md b/knapsack/README.md index 6041c1e48eb8..f31e5f591412 100644 --- a/knapsack/README.md +++ b/knapsack/README.md @@ -17,7 +17,7 @@ The knapsack problem has been studied for more than a century, with early works ## Documentation This module uses docstrings to enable the use of Python's in-built `help(...)` function. -For instance, try `help(Vector)`, `help(unitBasisVector)`, and `help(CLASSNAME.METHODNAME)`. +For instance, try `help(Vector)`, `help(unit_basis_vector)`, and `help(CLASSNAME.METHODNAME)`. --- diff --git a/linear_algebra/README.md b/linear_algebra/README.md index dc6085090d02..35b50b5e0f0a 100644 --- a/linear_algebra/README.md +++ b/linear_algebra/README.md @@ -10,56 +10,56 @@ This module contains classes and functions for doing linear algebra. - - This class represents a vector of arbitrary size and related operations. - **Overview about the methods:** + **Overview of the methods:** - - constructor(components : list) : init the vector - - set(components : list) : changes the vector components. + - constructor(components) : init the vector + - set(components) : changes the vector components. - \_\_str\_\_() : toString method - - component(i : int): gets the i-th component (start by 0) + - component(i): gets the i-th component (0-indexed) - \_\_len\_\_() : gets the size / length of the vector (number of components) - - euclidLength() : returns the eulidean length of the vector. + - euclidean_length() : returns the eulidean length of the vector - operator + : vector addition - operator - : vector subtraction - operator * : scalar multiplication and dot product - - copy() : copies this vector and returns it. - - changeComponent(pos,value) : changes the specified component. + - copy() : copies this vector and returns it + - change_component(pos,value) : changes the specified component -- function zeroVector(dimension) +- function zero_vector(dimension) - returns a zero vector of 'dimension' -- function unitBasisVector(dimension,pos) - - returns a unit basis vector with a One at index 'pos' (indexing at 0) -- function axpy(scalar,vector1,vector2) +- function unit_basis_vector(dimension, pos) + - returns a unit basis vector with a one at index 'pos' (0-indexed) +- function axpy(scalar, vector1, vector2) - computes the axpy operation -- function randomVector(N,a,b) - - returns a random vector of size N, with random integer components between 'a' and 'b'. +- function random_vector(N, a, b) + - returns a random vector of size N, with random integer components between 'a' and 'b' inclusive ### class Matrix - - This class represents a matrix of arbitrary size and operations on it. - **Overview about the methods:** + **Overview of the methods:** - \_\_str\_\_() : returns a string representation - operator * : implements the matrix vector multiplication implements the matrix-scalar multiplication. - - changeComponent(x,y,value) : changes the specified component. - - component(x,y) : returns the specified component. + - change_component(x, y, value) : changes the specified component. + - component(x, y) : returns the specified component. - width() : returns the width of the matrix - height() : returns the height of the matrix - - determinate() : returns the determinate of the matrix if it is square + - determinant() : returns the determinant of the matrix if it is square - operator + : implements the matrix-addition. - - operator - _ implements the matrix-subtraction + - operator - : implements the matrix-subtraction -- function squareZeroMatrix(N) +- function square_zero_matrix(N) - returns a square zero-matrix of dimension NxN -- function randomMatrix(W,H,a,b) - - returns a random matrix WxH with integer components between 'a' and 'b' +- function random_matrix(W, H, a, b) + - returns a random matrix WxH with integer components between 'a' and 'b' inclusive --- ## Documentation This module uses docstrings to enable the use of Python's in-built `help(...)` function. -For instance, try `help(Vector)`, `help(unitBasisVector)`, and `help(CLASSNAME.METHODNAME)`. +For instance, try `help(Vector)`, `help(unit_basis_vector)`, and `help(CLASSNAME.METHODNAME)`. --- diff --git a/linear_algebra/src/lib.py b/linear_algebra/src/lib.py index 6a18df5e15c3..dad0a8c0a6a2 100644 --- a/linear_algebra/src/lib.py +++ b/linear_algebra/src/lib.py @@ -10,13 +10,13 @@ Overview: - class Vector -- function zeroVector(dimension) -- function unitBasisVector(dimension,pos) -- function axpy(scalar,vector1,vector2) -- function randomVector(N,a,b) +- function zero_vector(dimension) +- function unit_basis_vector(dimension, pos) +- function axpy(scalar, vector1, vector2) +- function random_vector(N, a, b) - class Matrix -- function squareZeroMatrix(N) -- function randomMatrix(W,H,a,b) +- function square_zero_matrix(N) +- function random_matrix(W, H, a, b) """ from __future__ import annotations @@ -30,20 +30,23 @@ class Vector: This class represents a vector of arbitrary size. You need to give the vector components. - Overview about the methods: - - constructor(components : list) : init the vector - set(components : list) : changes the vector components. - __str__() : toString method - component(i : int): gets the i-th component (start by 0) - __len__() : gets the size of the vector (number of components) - euclidLength() : returns the euclidean length of the vector. - operator + : vector addition - operator - : vector subtraction - operator * : scalar multiplication and dot product - copy() : copies this vector and returns it. - changeComponent(pos,value) : changes the specified component. - TODO: compare-operator + Overview of the methods: + + __init__(components: Collection[float] | None): init the vector + __len__(): gets the size of the vector (number of components) + __str__(): returns a string representation + __add__(other: Vector): vector addition + __sub__(other: Vector): vector subtraction + __mul__(other: float): scalar multiplication + __mul__(other: Vector): dot product + set(components: Collection[float]): changes the vector components + copy(): copies this vector and returns it + component(i): gets the i-th component (0-indexed) + change_component(pos: int, value: float): changes specified component + euclidean_length(): returns the euclidean length of the vector + magnitude(): returns the magnitude of the vector + angle(other: Vector, deg: bool): returns the angle between two vectors + TODO: compare-operator """ def __init__(self, components: Collection[float] | None = None) -> None: @@ -55,47 +58,17 @@ def __init__(self, components: Collection[float] | None = None) -> None: components = [] self.__components = list(components) - def set(self, components: Collection[float]) -> None: - """ - input: new components - changes the components of the vector. - replace the components with newer one. - """ - if len(components) > 0: - self.__components = list(components) - else: - raise Exception("please give any vector") - - def __str__(self) -> str: - """ - returns a string representation of the vector - """ - return "(" + ",".join(map(str, self.__components)) + ")" - - def component(self, i: int) -> float: - """ - input: index (start at 0) - output: the i-th component of the vector. - """ - if type(i) is int and -len(self.__components) <= i < len(self.__components): - return self.__components[i] - else: - raise Exception("index out of range") - def __len__(self) -> int: """ returns the size of the vector """ return len(self.__components) - def euclidLength(self) -> float: + def __str__(self) -> str: """ - returns the euclidean length of the vector + returns a string representation of the vector """ - summe: float = 0 - for c in self.__components: - summe += c ** 2 - return math.sqrt(summe) + return "(" + ",".join(map(str, self.__components)) + ")" def __add__(self, other: Vector) -> Vector: """ @@ -139,15 +112,57 @@ def __mul__(self, other: float | Vector) -> float | Vector: if isinstance(other, float) or isinstance(other, int): ans = [c * other for c in self.__components] return Vector(ans) - elif isinstance(other, Vector) and (len(self) == len(other)): + elif isinstance(other, Vector) and len(self) == len(other): size = len(self) - summe: float = 0 - for i in range(size): - summe += self.__components[i] * other.component(i) - return summe + prods = [self.__components[i] * other.component(i) for i in range(size)] + return sum(prods) else: # error case raise Exception("invalid operand!") + def set(self, components: Collection[float]) -> None: + """ + input: new components + changes the components of the vector. + replaces the components with newer one. + """ + if len(components) > 0: + self.__components = list(components) + else: + raise Exception("please give any vector") + + def copy(self) -> Vector: + """ + copies this vector and returns it. + """ + return Vector(self.__components) + + def component(self, i: int) -> float: + """ + input: index (0-indexed) + output: the i-th component of the vector. + """ + if type(i) is int and -len(self.__components) <= i < len(self.__components): + return self.__components[i] + else: + raise Exception("index out of range") + + def change_component(self, pos: int, value: float) -> None: + """ + input: an index (pos) and a value + changes the specified component (pos) with the + 'value' + """ + # precondition + assert -len(self.__components) <= pos < len(self.__components) + self.__components[pos] = value + + def euclidean_length(self) -> float: + """ + returns the euclidean length of the vector + """ + squares = [c ** 2 for c in self.__components] + return math.sqrt(sum(squares)) + def magnitude(self) -> float: """ Magnitude of a Vector @@ -156,7 +171,8 @@ def magnitude(self) -> float: 5.385164807134504 """ - return sum([i ** 2 for i in self.__components]) ** (1 / 2) + squares = [c ** 2 for c in self.__components] + return math.sqrt(sum(squares)) def angle(self, other: Vector, deg: bool = False) -> float: """ @@ -178,24 +194,8 @@ def angle(self, other: Vector, deg: bool = False) -> float: else: return math.acos(num / den) - def copy(self) -> Vector: - """ - copies this vector and returns it. - """ - return Vector(self.__components) - - def changeComponent(self, pos: int, value: float) -> None: - """ - input: an index (pos) and a value - changes the specified component (pos) with the - 'value' - """ - # precondition - assert -len(self.__components) <= pos < len(self.__components) - self.__components[pos] = value - -def zeroVector(dimension: int) -> Vector: +def zero_vector(dimension: int) -> Vector: """ returns a zero-vector of size 'dimension' """ @@ -204,7 +204,7 @@ def zeroVector(dimension: int) -> Vector: return Vector([0] * dimension) -def unitBasisVector(dimension: int, pos: int) -> Vector: +def unit_basis_vector(dimension: int, pos: int) -> Vector: """ returns a unit basis vector with a One at index 'pos' (indexing at 0) @@ -225,13 +225,13 @@ def axpy(scalar: float, x: Vector, y: Vector) -> Vector: # precondition assert ( isinstance(x, Vector) - and (isinstance(y, Vector)) + and isinstance(y, Vector) and (isinstance(scalar, int) or isinstance(scalar, float)) ) return x * scalar + y -def randomVector(N: int, a: int, b: int) -> Vector: +def random_vector(n: int, a: int, b: int) -> Vector: """ input: size (N) of the vector. random range (a,b) @@ -239,26 +239,30 @@ def randomVector(N: int, a: int, b: int) -> Vector: random integer components between 'a' and 'b'. """ random.seed(None) - ans = [random.randint(a, b) for _ in range(N)] + ans = [random.randint(a, b) for _ in range(n)] return Vector(ans) class Matrix: """ class: Matrix - This class represents a arbitrary matrix. - - Overview about the methods: - - __str__() : returns a string representation - operator * : implements the matrix vector multiplication - implements the matrix-scalar multiplication. - changeComponent(x,y,value) : changes the specified component. - component(x,y) : returns the specified component. - width() : returns the width of the matrix - height() : returns the height of the matrix - operator + : implements the matrix-addition. - operator - _ implements the matrix-subtraction + This class represents an arbitrary matrix. + + Overview of the methods: + + __init__(): + __str__(): returns a string representation + __add__(other: Matrix): matrix addition + __sub__(other: Matrix): matrix subtraction + __mul__(other: float): scalar multiplication + __mul__(other: Vector): vector multiplication + height() : returns height + width() : returns width + component(x: int, y: int): returns specified component + change_component(x: int, y: int, value: float): changes specified component + minor(x: int, y: int): returns minor along (x, y) + cofactor(x: int, y: int): returns cofactor along (x, y) + determinant() : returns determinant """ def __init__(self, matrix: list[list[float]], w: int, h: int) -> None: @@ -285,62 +289,37 @@ def __str__(self) -> str: ans += str(self.__matrix[i][j]) + "|\n" return ans - def changeComponent(self, x: int, y: int, value: float) -> None: - """ - changes the x-y component of this matrix - """ - if 0 <= x < self.__height and 0 <= y < self.__width: - self.__matrix[x][y] = value - else: - raise Exception("changeComponent: indices out of bounds") - - def component(self, x: int, y: int) -> float: + def __add__(self, other: Matrix) -> Matrix: """ - returns the specified (x,y) component + implements the matrix-addition. """ - if 0 <= x < self.__height and 0 <= y < self.__width: - return self.__matrix[x][y] + if self.__width == other.width() and self.__height == other.height(): + matrix = [] + for i in range(self.__height): + row = [ + self.__matrix[i][j] + other.component(i, j) + for j in range(self.__width) + ] + matrix.append(row) + return Matrix(matrix, self.__width, self.__height) else: - raise Exception("changeComponent: indices out of bounds") - - def width(self) -> int: - """ - getter for the width - """ - return self.__width + raise Exception("matrix must have the same dimension!") - def height(self) -> int: + def __sub__(self, other: Matrix) -> Matrix: """ - getter for the height + implements the matrix-subtraction. """ - return self.__height - - def determinate(self) -> float: - """ - returns the determinate of an nxn matrix using Laplace expansion - """ - if self.__height == self.__width and self.__width >= 2: - total = 0 - if self.__width > 2: - for x in range(0, self.__width): - for y in range(0, self.__height): - total += ( - self.__matrix[x][y] - * (-1) ** (x + y) - * Matrix( - self.__matrix[0:x] + self.__matrix[x + 1 :], - self.__width - 1, - self.__height - 1, - ).determinate() - ) - else: - return ( - self.__matrix[0][0] * self.__matrix[1][1] - - self.__matrix[0][1] * self.__matrix[1][0] - ) - return total + if self.__width == other.width() and self.__height == other.height(): + matrix = [] + for i in range(self.__height): + row = [ + self.__matrix[i][j] - other.component(i, j) + for j in range(self.__width) + ] + matrix.append(row) + return Matrix(matrix, self.__width, self.__height) else: - raise Exception("matrix is not square") + raise Exception("matrices must have the same dimension!") @overload def __mul__(self, other: float) -> Matrix: @@ -355,20 +334,20 @@ def __mul__(self, other: float | Vector) -> Vector | Matrix: implements the matrix-vector multiplication. implements the matrix-scalar multiplication """ - if isinstance(other, Vector): # vector-matrix + if isinstance(other, Vector): # matrix-vector if len(other) == self.__width: - ans = zeroVector(self.__height) + ans = zero_vector(self.__height) for i in range(self.__height): - summe: float = 0 - for j in range(self.__width): - summe += other.component(j) * self.__matrix[i][j] - ans.changeComponent(i, summe) - summe = 0 + prods = [ + self.__matrix[i][j] * other.component(j) + for j in range(self.__width) + ] + ans.change_component(i, sum(prods)) return ans else: raise Exception( "vector must have the same size as the " - + "number of columns of the matrix!" + "number of columns of the matrix!" ) elif isinstance(other, int) or isinstance(other, float): # matrix-scalar matrix = [ @@ -377,52 +356,95 @@ def __mul__(self, other: float | Vector) -> Vector | Matrix: ] return Matrix(matrix, self.__width, self.__height) - def __add__(self, other: Matrix) -> Matrix: + def height(self) -> int: """ - implements the matrix-addition. + getter for the height """ - if self.__width == other.width() and self.__height == other.height(): - matrix = [] - for i in range(self.__height): - row = [] - for j in range(self.__width): - row.append(self.__matrix[i][j] + other.component(i, j)) - matrix.append(row) - return Matrix(matrix, self.__width, self.__height) + return self.__height + + def width(self) -> int: + """ + getter for the width + """ + return self.__width + + def component(self, x: int, y: int) -> float: + """ + returns the specified (x,y) component + """ + if 0 <= x < self.__height and 0 <= y < self.__width: + return self.__matrix[x][y] else: - raise Exception("matrix must have the same dimension!") + raise Exception("change_component: indices out of bounds") - def __sub__(self, other: Matrix) -> Matrix: + def change_component(self, x: int, y: int, value: float) -> None: """ - implements the matrix-subtraction. + changes the x-y component of this matrix """ - if self.__width == other.width() and self.__height == other.height(): - matrix = [] - for i in range(self.__height): - row = [] - for j in range(self.__width): - row.append(self.__matrix[i][j] - other.component(i, j)) - matrix.append(row) - return Matrix(matrix, self.__width, self.__height) + if 0 <= x < self.__height and 0 <= y < self.__width: + self.__matrix[x][y] = value else: - raise Exception("matrix must have the same dimension!") + raise Exception("change_component: indices out of bounds") + + def minor(self, x: int, y: int) -> float: + """ + returns the minor along (x, y) + """ + if self.__height != self.__width: + raise Exception("Matrix is not square") + minor = self.__matrix[:x] + self.__matrix[x + 1 :] + for i in range(len(minor)): + minor[i] = minor[i][:y] + minor[i][y + 1 :] + return Matrix(minor, self.__width - 1, self.__height - 1).determinant() + + def cofactor(self, x: int, y: int) -> float: + """ + returns the cofactor (signed minor) along (x, y) + """ + if self.__height != self.__width: + raise Exception("Matrix is not square") + if 0 <= x < self.__height and 0 <= y < self.__width: + return (-1) ** (x + y) * self.minor(x, y) + else: + raise Exception("Indices out of bounds") + + def determinant(self) -> float: + """ + returns the determinant of an nxn matrix using Laplace expansion + """ + if self.__height != self.__width: + raise Exception("Matrix is not square") + if self.__height < 1: + raise Exception("Matrix has no element") + elif self.__height == 1: + return self.__matrix[0][0] + elif self.__height == 2: + return ( + self.__matrix[0][0] * self.__matrix[1][1] + - self.__matrix[0][1] * self.__matrix[1][0] + ) + else: + cofactor_prods = [ + self.__matrix[0][y] * self.cofactor(0, y) for y in range(self.__width) + ] + return sum(cofactor_prods) -def squareZeroMatrix(N: int) -> Matrix: +def square_zero_matrix(n: int) -> Matrix: """ returns a square zero-matrix of dimension NxN """ - ans: list[list[float]] = [[0] * N for _ in range(N)] - return Matrix(ans, N, N) + ans: list[list[float]] = [[0] * n for _ in range(n)] + return Matrix(ans, n, n) -def randomMatrix(W: int, H: int, a: int, b: int) -> Matrix: +def random_matrix(width: int, height: int, a: int, b: int) -> Matrix: """ returns a random matrix WxH with integer components between 'a' and 'b' """ random.seed(None) matrix: list[list[float]] = [ - [random.randint(a, b) for _ in range(W)] for _ in range(H) + [random.randint(a, b) for _ in range(width)] for _ in range(height) ] - return Matrix(matrix, W, H) + return Matrix(matrix, width, height) diff --git a/linear_algebra/src/test_linear_algebra.py b/linear_algebra/src/test_linear_algebra.py index 0954a2d932b7..de7041a17038 100644 --- a/linear_algebra/src/test_linear_algebra.py +++ b/linear_algebra/src/test_linear_algebra.py @@ -8,13 +8,20 @@ """ import unittest -from .lib import Matrix, Vector, axpy, squareZeroMatrix, unitBasisVector, zeroVector +from .lib import ( + Matrix, + Vector, + axpy, + square_zero_matrix, + unit_basis_vector, + zero_vector, +) class Test(unittest.TestCase): def test_component(self) -> None: """ - test for method component + test for method component() """ x = Vector([1, 2, 3]) self.assertEqual(x.component(0), 1) @@ -23,24 +30,24 @@ def test_component(self) -> None: def test_str(self) -> None: """ - test for toString() method + test for method toString() """ x = Vector([0, 0, 0, 0, 0, 1]) self.assertEqual(str(x), "(0,0,0,0,0,1)") def test_size(self) -> None: """ - test for size()-method + test for method size() """ x = Vector([1, 2, 3, 4]) self.assertEqual(len(x), 4) def test_euclidLength(self) -> None: """ - test for the eulidean length + test for method euclidean_length() """ x = Vector([1, 2]) - self.assertAlmostEqual(x.euclidLength(), 2.236, 3) + self.assertAlmostEqual(x.euclidean_length(), 2.236, 3) def test_add(self) -> None: """ @@ -67,26 +74,26 @@ def test_mul(self) -> None: test for * operator """ x = Vector([1, 2, 3]) - a = Vector([2, -1, 4]) # for test of dot-product + a = Vector([2, -1, 4]) # for test of dot product b = Vector([1, -2, -1]) self.assertEqual(str(x * 3.0), "(3.0,6.0,9.0)") self.assertEqual((a * b), 0) def test_zeroVector(self) -> None: """ - test for the global function zeroVector(...) + test for global function zero_vector() """ - self.assertTrue(str(zeroVector(10)).count("0") == 10) + self.assertTrue(str(zero_vector(10)).count("0") == 10) def test_unitBasisVector(self) -> None: """ - test for the global function unitBasisVector(...) + test for global function unit_basis_vector() """ - self.assertEqual(str(unitBasisVector(3, 1)), "(0,1,0)") + self.assertEqual(str(unit_basis_vector(3, 1)), "(0,1,0)") def test_axpy(self) -> None: """ - test for the global function axpy(...) (operation) + test for global function axpy() (operation) """ x = Vector([1, 2, 3]) y = Vector([1, 0, 1]) @@ -94,7 +101,7 @@ def test_axpy(self) -> None: def test_copy(self) -> None: """ - test for the copy()-method + test for method copy() """ x = Vector([1, 0, 0, 0, 0, 0]) y = x.copy() @@ -102,53 +109,94 @@ def test_copy(self) -> None: def test_changeComponent(self) -> None: """ - test for the changeComponent(...)-method + test for method change_component() """ x = Vector([1, 0, 0]) - x.changeComponent(0, 0) - x.changeComponent(1, 1) + x.change_component(0, 0) + x.change_component(1, 1) self.assertEqual(str(x), "(0,1,0)") def test_str_matrix(self) -> None: + """ + test for Matrix method str() + """ A = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]], 3, 3) self.assertEqual("|1,2,3|\n|2,4,5|\n|6,7,8|\n", str(A)) - def test_determinate(self) -> None: + def test_minor(self) -> None: + """ + test for Matrix method minor() + """ + A = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]], 3, 3) + minors = [[-3, -14, -10], [-5, -10, -5], [-2, -1, 0]] + for x in range(A.height()): + for y in range(A.width()): + self.assertEqual(minors[x][y], A.minor(x, y)) + + def test_cofactor(self) -> None: """ - test for determinate() + test for Matrix method cofactor() """ - A = Matrix([[1, 1, 4, 5], [3, 3, 3, 2], [5, 1, 9, 0], [9, 7, 7, 9]], 4, 4) - self.assertEqual(-376, A.determinate()) + A = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]], 3, 3) + cofactors = [[-3, 14, -10], [5, -10, 5], [-2, 1, 0]] + for x in range(A.height()): + for y in range(A.width()): + self.assertEqual(cofactors[x][y], A.cofactor(x, y)) + + def test_determinant(self) -> None: + """ + test for Matrix method determinant() + """ + A = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]], 3, 3) + self.assertEqual(-5, A.determinant()) def test__mul__matrix(self) -> None: + """ + test for Matrix * operator + """ A = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]], 3, 3) x = Vector([1, 2, 3]) self.assertEqual("(14,32,50)", str(A * x)) self.assertEqual("|2,4,6|\n|8,10,12|\n|14,16,18|\n", str(A * 2)) - def test_changeComponent_matrix(self) -> None: + def test_change_component_matrix(self) -> None: + """ + test for Matrix method change_component() + """ A = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]], 3, 3) - A.changeComponent(0, 2, 5) + A.change_component(0, 2, 5) self.assertEqual("|1,2,5|\n|2,4,5|\n|6,7,8|\n", str(A)) def test_component_matrix(self) -> None: + """ + test for Matrix method component() + """ A = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]], 3, 3) self.assertEqual(7, A.component(2, 1), 0.01) def test__add__matrix(self) -> None: + """ + test for Matrix + operator + """ A = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]], 3, 3) B = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]], 3, 3) self.assertEqual("|2,4,10|\n|4,8,10|\n|12,14,18|\n", str(A + B)) def test__sub__matrix(self) -> None: + """ + test for Matrix - operator + """ A = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]], 3, 3) B = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]], 3, 3) self.assertEqual("|0,0,-4|\n|0,0,0|\n|0,0,-2|\n", str(A - B)) def test_squareZeroMatrix(self) -> None: + """ + test for global function square_zero_matrix() + """ self.assertEqual( - "|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|" + "\n|0,0,0,0,0|\n", - str(squareZeroMatrix(5)), + "|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n", + str(square_zero_matrix(5)), ) From ce9a139b56735ee05fc21679b6a8e35940c7ca77 Mon Sep 17 00:00:00 2001 From: harshitkap00r <76745800+harshitkap00r@users.noreply.github.com> Date: Wed, 27 Oct 2021 09:55:48 +0530 Subject: [PATCH 0327/1543] Update binary_search.py (#4856) Take less time to calculate --- searches/binary_search.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/searches/binary_search.py b/searches/binary_search.py index 0966cd8de857..88fee47157c6 100644 --- a/searches/binary_search.py +++ b/searches/binary_search.py @@ -51,7 +51,7 @@ def bisect_left( hi = len(sorted_collection) while lo < hi: - mid = (lo + hi) // 2 + mid = lo + (hi - lo) // 2 if sorted_collection[mid] < item: lo = mid + 1 else: @@ -96,7 +96,7 @@ def bisect_right( hi = len(sorted_collection) while lo < hi: - mid = (lo + hi) // 2 + mid = lo + (hi - lo) // 2 if sorted_collection[mid] <= item: lo = mid + 1 else: From 329feb492843129a285e8ec11b9d0c07c28579ba Mon Sep 17 00:00:00 2001 From: Prakhar Gurunani Date: Wed, 27 Oct 2021 14:49:04 +0530 Subject: [PATCH 0328/1543] Add Project Euler Problem 078 solution 01 (#5565) * Create sol1.py * updating DIRECTORY.md * Create __init__.py * Add docstring * Reformat with black * Fix flake8 issues * Add EOL * Fix formatting issues * Add docstring * Add func return type * Change return type * Remove test print statement * Reformat code * Fix return types * Break loop * Update doctest sol * Update project_euler/problem_078/sol1.py Co-authored-by: John Law * Added doctest and changed return type * Add int() * Fix flake8 issues * Use argument instead of fixed constant * Update sol1.py * fix sol1.py Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> Co-authored-by: John Law --- DIRECTORY.md | 2 + project_euler/problem_078/__init__.py | 0 project_euler/problem_078/sol1.py | 55 +++++++++++++++++++++++++++ 3 files changed, 57 insertions(+) create mode 100644 project_euler/problem_078/__init__.py create mode 100644 project_euler/problem_078/sol1.py diff --git a/DIRECTORY.md b/DIRECTORY.md index 6fbd5e2cc1c5..c94fb78d6275 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -788,6 +788,8 @@ * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_076/sol1.py) * Problem 077 * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_077/sol1.py) + * Problem 078 + * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_078/sol1.py) * Problem 080 * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_080/sol1.py) * Problem 081 diff --git a/project_euler/problem_078/__init__.py b/project_euler/problem_078/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/project_euler/problem_078/sol1.py b/project_euler/problem_078/sol1.py new file mode 100644 index 000000000000..f92cf0f4020c --- /dev/null +++ b/project_euler/problem_078/sol1.py @@ -0,0 +1,55 @@ +""" +Problem 78 +Url: https://projecteuler.net/problem=78 +Statement: +Let p(n) represent the number of different ways in which n coins +can be separated into piles. For example, five coins can be separated +into piles in exactly seven different ways, so p(5)=7. + + OOOOO + OOOO O + OOO OO + OOO O O + OO OO O + OO O O O + O O O O O +Find the least value of n for which p(n) is divisible by one million. +""" + +import itertools + + +def solution(number: int = 1000000) -> int: + """ + >>> solution() + 55374 + """ + partitions = [1] + + for i in itertools.count(len(partitions)): + item = 0 + for j in itertools.count(1): + sign = -1 if j % 2 == 0 else +1 + index = (j * j * 3 - j) // 2 + if index > i: + break + item += partitions[i - index] * sign + index += j + if index > i: + break + item += partitions[i - index] * sign + item %= number + + if item == 0: + return i + partitions.append(item) + + return 0 + + +if __name__ == "__main__": + import doctest + + doctest.testmod() + + print(f"{solution() = }") From 615c428903602bf40d2e15dec44be914409fe804 Mon Sep 17 00:00:00 2001 From: Rohan R Bharadwaj Date: Wed, 27 Oct 2021 22:00:03 +0530 Subject: [PATCH 0329/1543] Add doctest for exception (#5629) * Add doctest for exception * Spelling correction --- maths/area.py | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/maths/area.py b/maths/area.py index 13c05af5f68e..7b39312cfaf0 100644 --- a/maths/area.py +++ b/maths/area.py @@ -203,6 +203,18 @@ def area_triangle_three_sides(side1: float, side2: float, side3: float) -> float Traceback (most recent call last): ... ValueError: area_triangle_three_sides() only accepts non-negative values + >>> area_triangle_three_sides(2, 4, 7) + Traceback (most recent call last): + ... + ValueError: Given three sides do not form a triangle + >>> area_triangle_three_sides(2, 7, 4) + Traceback (most recent call last): + ... + ValueError: Given three sides do not form a triangle + >>> area_triangle_three_sides(7, 2, 4) + Traceback (most recent call last): + ... + ValueError: Given three sides do not form a triangle """ if side1 < 0 or side2 < 0 or side3 < 0: raise ValueError("area_triangle_three_sides() only accepts non-negative values") From 6b6762bde9e242c21bae147c0e0d56bd072ece96 Mon Sep 17 00:00:00 2001 From: "@im_8055" <38890773+Bhargavishnu@users.noreply.github.com> Date: Wed, 27 Oct 2021 22:48:21 +0530 Subject: [PATCH 0330/1543] Fix pull request template (#5633) The existing template uses * to apply bold font weight. As we already have the ### to markdown the text as heading, its redundant to have the *s. --- .github/pull_request_template.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md index 103ecf7c288a..4d2265968612 100644 --- a/.github/pull_request_template.md +++ b/.github/pull_request_template.md @@ -1,4 +1,4 @@ -### **Describe your change:** +### Describe your change: @@ -6,7 +6,7 @@ * [ ] Fix a bug or typo in an existing algorithm? * [ ] Documentation change? -### **Checklist:** +### Checklist: * [ ] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md). * [ ] This pull request is all my own work -- I have not plagiarized. * [ ] I know that pull requests will not be merged if they fail the automated tests. From bf6db32ec2fb04b6477722f0809c5efef0cad813 Mon Sep 17 00:00:00 2001 From: Dylan Buchi Date: Thu, 28 Oct 2021 11:05:31 -0300 Subject: [PATCH 0331/1543] [mypy] Fix type annotations for binary tree traversals in data structures (#5556) * [mypy] Fix type annotations for binary tree traversals in data structures * Change variable name and update level_order_1 to use a deque Using a deque instead of a list here, because since we are removing from the beginning of the list, the deque will be more efficient. * remove duplicate function * Update data_structures/binary_tree/binary_tree_traversals.py Co-authored-by: John Law * fix function name at line 137 * Update data_structures/binary_tree/binary_tree_traversals.py Co-authored-by: John Law * Update data_structures/binary_tree/binary_tree_traversals.py Co-authored-by: John Law * Remove type alias and use the new syntax * Update data_structures/binary_tree/binary_tree_traversals.py Co-authored-by: John Law * Remove prints inside functions and return lists Co-authored-by: John Law --- .../binary_tree/binary_tree_traversals.py | 158 ++++++++++-------- 1 file changed, 92 insertions(+), 66 deletions(-) diff --git a/data_structures/binary_tree/binary_tree_traversals.py b/data_structures/binary_tree/binary_tree_traversals.py index de9e9d60d272..9a62393914da 100644 --- a/data_structures/binary_tree/binary_tree_traversals.py +++ b/data_structures/binary_tree/binary_tree_traversals.py @@ -1,7 +1,9 @@ # https://en.wikipedia.org/wiki/Tree_traversal from __future__ import annotations +from collections import deque from dataclasses import dataclass +from typing import Any, Sequence @dataclass @@ -11,11 +13,11 @@ class Node: right: Node | None = None -def make_tree() -> Node: +def make_tree() -> Node | None: return Node(1, Node(2, Node(4), Node(5)), Node(3)) -def preorder(root: Node): +def preorder(root: Node | None) -> list[int]: """ Pre-order traversal visits root node, left subtree, right subtree. >>> preorder(make_tree()) @@ -24,7 +26,7 @@ def preorder(root: Node): return [root.data] + preorder(root.left) + preorder(root.right) if root else [] -def postorder(root: Node): +def postorder(root: Node | None) -> list[int]: """ Post-order traversal visits left subtree, right subtree, root node. >>> postorder(make_tree()) @@ -33,7 +35,7 @@ def postorder(root: Node): return postorder(root.left) + postorder(root.right) + [root.data] if root else [] -def inorder(root: Node): +def inorder(root: Node | None) -> list[int]: """ In-order traversal visits left subtree, root node, right subtree. >>> inorder(make_tree()) @@ -42,7 +44,7 @@ def inorder(root: Node): return inorder(root.left) + [root.data] + inorder(root.right) if root else [] -def height(root: Node): +def height(root: Node | None) -> int: """ Recursive function for calculating the height of the binary tree. >>> height(None) @@ -53,80 +55,99 @@ def height(root: Node): return (max(height(root.left), height(root.right)) + 1) if root else 0 -def level_order_1(root: Node): +def level_order(root: Node | None) -> Sequence[Node | None]: """ - Print whole binary tree in Level Order Traverse. + Returns a list of nodes value from a whole binary tree in Level Order Traverse. Level Order traverse: Visit nodes of the tree level-by-level. """ - if not root: - return - temp = root - que = [temp] - while len(que) > 0: - print(que[0].data, end=" ") - temp = que.pop(0) - if temp.left: - que.append(temp.left) - if temp.right: - que.append(temp.right) - return que + output: list[Any] = [] + if root is None: + return output -def level_order_2(root: Node, level: int): - """ - Level-wise traversal: Print all nodes present at the given level of the binary tree - """ - if not root: - return root - if level == 1: - print(root.data, end=" ") - elif level > 1: - level_order_2(root.left, level - 1) - level_order_2(root.right, level - 1) + process_queue = deque([root]) + + while process_queue: + node = process_queue.popleft() + output.append(node.data) + if node.left: + process_queue.append(node.left) + if node.right: + process_queue.append(node.right) + return output -def print_left_to_right(root: Node, level: int): + +def get_nodes_from_left_to_right( + root: Node | None, level: int +) -> Sequence[Node | None]: """ - Print elements on particular level from left to right direction of the binary tree. + Returns a list of nodes value from a particular level: + Left to right direction of the binary tree. """ - if not root: - return - if level == 1: - print(root.data, end=" ") - elif level > 1: - print_left_to_right(root.left, level - 1) - print_left_to_right(root.right, level - 1) + output: list[Any] = [] + + def populate_output(root: Node | None, level: int) -> None: + if not root: + return + if level == 1: + output.append(root.data) + elif level > 1: + populate_output(root.left, level - 1) + populate_output(root.right, level - 1) -def print_right_to_left(root: Node, level: int): + populate_output(root, level) + return output + + +def get_nodes_from_right_to_left( + root: Node | None, level: int +) -> Sequence[Node | None]: """ - Print elements on particular level from right to left direction of the binary tree. + Returns a list of nodes value from a particular level: + Right to left direction of the binary tree. """ - if not root: - return - if level == 1: - print(root.data, end=" ") - elif level > 1: - print_right_to_left(root.right, level - 1) - print_right_to_left(root.left, level - 1) + output: list[Any] = [] + + def populate_output(root: Node | None, level: int) -> None: + if root is None: + return + if level == 1: + output.append(root.data) + elif level > 1: + populate_output(root.right, level - 1) + populate_output(root.left, level - 1) + populate_output(root, level) + return output -def zigzag(root: Node): + +def zigzag(root: Node | None) -> Sequence[Node | None] | list[Any]: """ - ZigZag traverse: Print node left to right and right to left, alternatively. + ZigZag traverse: + Returns a list of nodes value from left to right and right to left, alternatively. """ + if root is None: + return [] + + output: list[Sequence[Node | None]] = [] + flag = 0 height_tree = height(root) + for h in range(1, height_tree + 1): - if flag == 0: - print_left_to_right(root, h) + if not flag: + output.append(get_nodes_from_left_to_right(root, h)) flag = 1 else: - print_right_to_left(root, h) + output.append(get_nodes_from_right_to_left(root, h)) flag = 0 + return output + -def main(): # Main function for testing. +def main() -> None: # Main function for testing. """ Create binary tree. """ @@ -134,18 +155,23 @@ def main(): # Main function for testing. """ All Traversals of the binary are as follows: """ - print(f" In-order Traversal is {inorder(root)}") - print(f" Pre-order Traversal is {preorder(root)}") - print(f"Post-order Traversal is {postorder(root)}") - print(f"Height of Tree is {height(root)}") - print("Complete Level Order Traversal is : ") - level_order_1(root) - print("\nLevel-wise order Traversal is : ") - for h in range(1, height(root) + 1): - level_order_2(root, h) - print("\nZigZag order Traversal is : ") - zigzag(root) - print() + + print(f"In-order Traversal: {inorder(root)}") + print(f"Pre-order Traversal: {preorder(root)}") + print(f"Post-order Traversal: {postorder(root)}", "\n") + + print(f"Height of Tree: {height(root)}", "\n") + + print("Complete Level Order Traversal: ") + print(level_order(root), "\n") + + print("Level-wise order Traversal: ") + + for level in range(1, height(root) + 1): + print(f"Level {level}:", get_nodes_from_left_to_right(root, level=level)) + + print("\nZigZag order Traversal: ") + print(zigzag(root)) if __name__ == "__main__": From 70368a757e8d37b9f3dd96af4ca535275cb39580 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?V=C3=ADctor=20A=2E=20Rodr=C3=ADguez?= Date: Thu, 28 Oct 2021 11:43:24 -0300 Subject: [PATCH 0332/1543] Implement Circular Queue using linked lists. Fixes TheAlgorithms#5361 (#5587) * CircularQueueLinkedList: empty list, trivial implementation TheAlgorithms#5361 * CircularQueueLinkedList: single element list TheAlgorithms#5361 * CircularQueueLinkedList: refactor, no que empty attribute TheAlgorithms#5361 * CircularQueueLinkedList: refactor TheAlgorithms#5361 * CircularQueueLinkedList: changed internal data structure to use double linked list TheAlgorithms#5361 * CircularQueueLinkedList: enqueue test cases added TheAlgorithms#5361 * CircularQueueLinkedList: track full queue TheAlgorithms#5361 * CircularQueueLinkedList: adding functions description TheAlgorithms#5361 * CircularQueueLinkedList: type hints TheAlgorithms#5361 * CircularQueueLinkedList: algorithm explanation TheAlgorithms#5361 * CircularQueueLinkedList: missing type hints TheAlgorithms#5361 * CircularQueueLinkedList: more missing type hints TheAlgorithms#5361 * Update data_structures/queue/circular_queue_linked_list.py Co-authored-by: John Law --- .../queue/circular_queue_linked_list.py | 150 ++++++++++++++++++ 1 file changed, 150 insertions(+) create mode 100644 data_structures/queue/circular_queue_linked_list.py diff --git a/data_structures/queue/circular_queue_linked_list.py b/data_structures/queue/circular_queue_linked_list.py new file mode 100644 index 000000000000..1878403bd2ef --- /dev/null +++ b/data_structures/queue/circular_queue_linked_list.py @@ -0,0 +1,150 @@ +# Implementation of Circular Queue using linked lists +# https://en.wikipedia.org/wiki/Circular_buffer + +from typing import Any + + +class CircularQueueLinkedList: + """ + Circular FIFO list with the given capacity (default queue length : 6) + + >>> cq = CircularQueueLinkedList(2) + >>> cq.enqueue('a') + >>> cq.enqueue('b') + >>> cq.enqueue('c') + Traceback (most recent call last): + ... + Exception: Full Queue + """ + + def __init__(self, initial_capacity: int = 6) -> None: + self.front = None + self.rear = None + self.create_linked_list(initial_capacity) + + def create_linked_list(self, initial_capacity: int) -> None: + current_node = Node() + self.front = current_node + self.rear = current_node + previous_node = current_node + for i in range(1, initial_capacity): + current_node = Node() + previous_node.next = current_node + current_node.prev = previous_node + previous_node = current_node + previous_node.next = self.front + self.front.prev = previous_node + + def is_empty(self) -> bool: + """ + Checks where the queue is empty or not + >>> cq = CircularQueueLinkedList() + >>> cq.is_empty() + True + >>> cq.enqueue('a') + >>> cq.is_empty() + False + >>> cq.dequeue() + 'a' + >>> cq.is_empty() + True + """ + return self.front == self.rear and self.front.data is None + + def first(self) -> Any: + """ + Returns the first element of the queue + >>> cq = CircularQueueLinkedList() + >>> cq.first() + Traceback (most recent call last): + ... + Exception: Empty Queue + >>> cq.enqueue('a') + >>> cq.first() + 'a' + >>> cq.dequeue() + 'a' + >>> cq.first() + Traceback (most recent call last): + ... + Exception: Empty Queue + >>> cq.enqueue('b') + >>> cq.enqueue('c') + >>> cq.first() + 'b' + """ + self.check_can_perform_operation() + return self.front.data + + def enqueue(self, data: Any) -> None: + """ + Saves data at the end of the queue + + >>> cq = CircularQueueLinkedList() + >>> cq.enqueue('a') + >>> cq.enqueue('b') + >>> cq.dequeue() + 'a' + >>> cq.dequeue() + 'b' + >>> cq.dequeue() + Traceback (most recent call last): + ... + Exception: Empty Queue + """ + self.check_is_full() + if self.is_empty(): + self.rear.data = data + else: + self.rear = self.rear.next + self.rear.data = data + + def dequeue(self) -> Any: + """ + Removes and retrieves the first element of the queue + + >>> cq = CircularQueueLinkedList() + >>> cq.dequeue() + Traceback (most recent call last): + ... + Exception: Empty Queue + >>> cq.enqueue('a') + >>> cq.dequeue() + 'a' + >>> cq.dequeue() + Traceback (most recent call last): + ... + Exception: Empty Queue + """ + self.check_can_perform_operation() + if self.front == self.rear: + data = self.front.data + self.front.data = None + return data + + old_front = self.front + self.front = old_front.next + data = old_front.data + old_front.data = None + return data + + def check_can_perform_operation(self) -> None: + if self.is_empty(): + raise Exception("Empty Queue") + + def check_is_full(self) -> None: + if self.rear.next == self.front: + raise Exception("Full Queue") + + +class Node: + def __init__(self) -> None: + self.data = None + self.next = None + self.prev = None + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From 477cc3fe597fd931c742700284016b937c778fe1 Mon Sep 17 00:00:00 2001 From: Christian Clauss Date: Thu, 28 Oct 2021 16:45:59 +0200 Subject: [PATCH 0333/1543] Add pyupgrade to pre-commit (#5638) * Add pyupgrade to pre-commit * Remove unused imports * updating DIRECTORY.md Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- .pre-commit-config.yaml | 6 ++++++ DIRECTORY.md | 3 +++ data_structures/binary_tree/merge_two_binary_trees.py | 4 +--- data_structures/linked_list/skip_list.py | 4 ++-- 4 files changed, 12 insertions(+), 5 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index b666e88aa162..e60003051365 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -22,6 +22,12 @@ repos: - id: isort args: - --profile=black + - repo: https://github.com/asottile/pyupgrade + rev: v2.29.0 + hooks: + - id: pyupgrade + args: + - --py39-plus - repo: https://gitlab.com/pycqa/flake8 rev: 3.9.1 hooks: diff --git a/DIRECTORY.md b/DIRECTORY.md index c94fb78d6275..2acfff69dff4 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -126,6 +126,7 @@ * [Molecular Chemistry](https://github.com/TheAlgorithms/Python/blob/master/conversions/molecular_chemistry.py) * [Octal To Decimal](https://github.com/TheAlgorithms/Python/blob/master/conversions/octal_to_decimal.py) * [Prefix Conversions](https://github.com/TheAlgorithms/Python/blob/master/conversions/prefix_conversions.py) + * [Pressure Conversions](https://github.com/TheAlgorithms/Python/blob/master/conversions/pressure_conversions.py) * [Rgb Hsv Conversion](https://github.com/TheAlgorithms/Python/blob/master/conversions/rgb_hsv_conversion.py) * [Roman Numerals](https://github.com/TheAlgorithms/Python/blob/master/conversions/roman_numerals.py) * [Temperature Conversions](https://github.com/TheAlgorithms/Python/blob/master/conversions/temperature_conversions.py) @@ -860,6 +861,8 @@ * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_301/sol1.py) * Problem 551 * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_551/sol1.py) + * Problem 686 + * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_686/sol1.py) ## Quantum * [Deutsch Jozsa](https://github.com/TheAlgorithms/Python/blob/master/quantum/deutsch_jozsa.py) diff --git a/data_structures/binary_tree/merge_two_binary_trees.py b/data_structures/binary_tree/merge_two_binary_trees.py index 7487268940d3..3380f8c5fb31 100644 --- a/data_structures/binary_tree/merge_two_binary_trees.py +++ b/data_structures/binary_tree/merge_two_binary_trees.py @@ -7,8 +7,6 @@ """ from __future__ import annotations -from typing import Optional - class Node: """ @@ -21,7 +19,7 @@ def __init__(self, value: int = 0) -> None: self.right: Node | None = None -def merge_two_binary_trees(tree1: Node | None, tree2: Node | None) -> Optional[Node]: +def merge_two_binary_trees(tree1: Node | None, tree2: Node | None) -> Node | None: """ Returns root node of the merged tree. diff --git a/data_structures/linked_list/skip_list.py b/data_structures/linked_list/skip_list.py index be30592ec77d..176049120aab 100644 --- a/data_structures/linked_list/skip_list.py +++ b/data_structures/linked_list/skip_list.py @@ -5,14 +5,14 @@ from __future__ import annotations from random import random -from typing import Generic, Optional, TypeVar, Union +from typing import Generic, TypeVar KT = TypeVar("KT") VT = TypeVar("VT") class Node(Generic[KT, VT]): - def __init__(self, key: Union[KT, str] = "root", value: Optional[VT] = None): + def __init__(self, key: KT | str = "root", value: VT | None = None): self.key = key self.value = value self.forward: list[Node[KT, VT]] = [] From 11a15cc5842bb44a81bc8ee56af8f25d92a74287 Mon Sep 17 00:00:00 2001 From: Naveen Namani Date: Thu, 28 Oct 2021 22:57:14 +0530 Subject: [PATCH 0334/1543] Add solution for Project Euler problem 67 (#5519) * New solution for Euler problem 67 A faster and memory efficient solution based on the template of sol1.py. Modified the solution to be more memory efficient while reading and generating the array and during the solution finding. No conditions and straightforward logic. * added return type hint * Update project_euler/problem_067/sol2.py Preferring comprehensions over map Co-authored-by: Christian Clauss * Update sol2.py Self explanatory variable names * Updated sol2 to problem 067 in directory * Update project_euler/problem_067/sol2.py Co-authored-by: Christian Clauss * Update project_euler/problem_067/sol2.py Co-authored-by: Christian Clauss * Fixed extra line Co-authored-by: Christian Clauss --- DIRECTORY.md | 1 + project_euler/problem_067/sol2.py | 39 +++++++++++++++++++++++++++++++ 2 files changed, 40 insertions(+) create mode 100644 project_euler/problem_067/sol2.py diff --git a/DIRECTORY.md b/DIRECTORY.md index 2acfff69dff4..a8986f195901 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -771,6 +771,7 @@ * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_065/sol1.py) * Problem 067 * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_067/sol1.py) + * [Sol2](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_067/sol2.py) * Problem 069 * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_069/sol1.py) * Problem 070 diff --git a/project_euler/problem_067/sol2.py b/project_euler/problem_067/sol2.py new file mode 100644 index 000000000000..2e88a57170a8 --- /dev/null +++ b/project_euler/problem_067/sol2.py @@ -0,0 +1,39 @@ +""" +Problem Statement: +By starting at the top of the triangle below and moving to adjacent numbers on +the row below, the maximum total from top to bottom is 23. +3 +7 4 +2 4 6 +8 5 9 3 +That is, 3 + 7 + 4 + 9 = 23. +Find the maximum total from top to bottom in triangle.txt (right click and +'Save Link/Target As...'), a 15K text file containing a triangle with +one-hundred rows. +""" +import os + + +def solution() -> int: + """ + Finds the maximum total in a triangle as described by the problem statement + above. + >>> solution() + 7273 + """ + script_dir = os.path.dirname(os.path.realpath(__file__)) + triangle_path = os.path.join(script_dir, "triangle.txt") + + with open(triangle_path) as in_file: + triangle = [[int(i) for i in line.split()] for line in in_file] + + while len(triangle) != 1: + last_row = triangle.pop() + curr_row = triangle[-1] + for j in range(len(last_row) - 1): + curr_row[j] += max(last_row[j], last_row[j + 1]) + return triangle[0][0] + + +if __name__ == "__main__": + print(solution()) From 61e1dd27b0db00302cec75fca5365d08e81ab707 Mon Sep 17 00:00:00 2001 From: poloso Date: Thu, 28 Oct 2021 15:31:32 -0500 Subject: [PATCH 0335/1543] [mypy] Fix type annotation in euler_method.py (#5649) * [mypy] Fix type annotation in euler_method.py In line with issue #4052. * updating DIRECTORY.md Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- DIRECTORY.md | 1 + maths/euler_method.py | 27 +++++++++++++++++---------- 2 files changed, 18 insertions(+), 10 deletions(-) diff --git a/DIRECTORY.md b/DIRECTORY.md index a8986f195901..434cddbfd32c 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -186,6 +186,7 @@ * [Swap Nodes](https://github.com/TheAlgorithms/Python/blob/master/data_structures/linked_list/swap_nodes.py) * Queue * [Circular Queue](https://github.com/TheAlgorithms/Python/blob/master/data_structures/queue/circular_queue.py) + * [Circular Queue Linked List](https://github.com/TheAlgorithms/Python/blob/master/data_structures/queue/circular_queue_linked_list.py) * [Double Ended Queue](https://github.com/TheAlgorithms/Python/blob/master/data_structures/queue/double_ended_queue.py) * [Linked Queue](https://github.com/TheAlgorithms/Python/blob/master/data_structures/queue/linked_queue.py) * [Priority Queue Using List](https://github.com/TheAlgorithms/Python/blob/master/data_structures/queue/priority_queue_using_list.py) diff --git a/maths/euler_method.py b/maths/euler_method.py index 7c780198602b..155ef28d1f49 100644 --- a/maths/euler_method.py +++ b/maths/euler_method.py @@ -1,18 +1,25 @@ +from typing import Callable + import numpy as np -def explicit_euler(ode_func, y0, x0, step_size, x_end): - """ - Calculate numeric solution at each step to an ODE using Euler's Method +def explicit_euler( + ode_func: Callable, y0: float, x0: float, step_size: float, x_end: float +) -> np.ndarray: + """Calculate numeric solution at each step to an ODE using Euler's Method + + For reference to Euler's method refer to https://en.wikipedia.org/wiki/Euler_method. - https://en.wikipedia.org/wiki/Euler_method + Args: + ode_func (Callable): The ordinary differential equation + as a function of x and y. + y0 (float): The initial value for y. + x0 (float): The initial value for x. + step_size (float): The increment value for x. + x_end (float): The final value of x to be calculated. - Arguments: - ode_func -- The ode as a function of x and y - y0 -- the initial value for y - x0 -- the initial value for x - stepsize -- the increment value for x - x_end -- the end value for x + Returns: + np.ndarray: Solution of y for every step in x. >>> # the exact solution is math.exp(x) >>> def f(x, y): From 0590d736fa61833c8f8591f7aa3bbea88b8274f9 Mon Sep 17 00:00:00 2001 From: Dylan Buchi Date: Thu, 28 Oct 2021 17:53:02 -0300 Subject: [PATCH 0336/1543] [mypy] Fix type annotations in `wavelet_tree.py` (#5641) * [mypy] Fix type annotations for wavelet_tree.py * fix a typo --- data_structures/binary_tree/wavelet_tree.py | 22 ++++++++++++--------- 1 file changed, 13 insertions(+), 9 deletions(-) diff --git a/data_structures/binary_tree/wavelet_tree.py b/data_structures/binary_tree/wavelet_tree.py index 173a88ab7316..8d7145189018 100644 --- a/data_structures/binary_tree/wavelet_tree.py +++ b/data_structures/binary_tree/wavelet_tree.py @@ -31,7 +31,7 @@ def __repr__(self) -> str: return f"min_value: {self.minn}, max_value: {self.maxx}" -def build_tree(arr: list[int]) -> Node: +def build_tree(arr: list[int]) -> Node | None: """ Builds the tree for arr and returns the root of the constructed tree @@ -51,7 +51,10 @@ def build_tree(arr: list[int]) -> Node: then recursively build trees for left_arr and right_arr """ pivot = (root.minn + root.maxx) // 2 - left_arr, right_arr = [], [] + + left_arr: list[int] = [] + right_arr: list[int] = [] + for index, num in enumerate(arr): if num <= pivot: left_arr.append(num) @@ -63,7 +66,7 @@ def build_tree(arr: list[int]) -> Node: return root -def rank_till_index(node: Node, num: int, index: int) -> int: +def rank_till_index(node: Node | None, num: int, index: int) -> int: """ Returns the number of occurrences of num in interval [0, index] in the list @@ -79,7 +82,7 @@ def rank_till_index(node: Node, num: int, index: int) -> int: >>> rank_till_index(root, 0, 9) 1 """ - if index < 0: + if index < 0 or node is None: return 0 # Leaf node cases if node.minn == node.maxx: @@ -93,7 +96,7 @@ def rank_till_index(node: Node, num: int, index: int) -> int: return rank_till_index(node.right, num, index - node.map_left[index]) -def rank(node: Node, num: int, start: int, end: int) -> int: +def rank(node: Node | None, num: int, start: int, end: int) -> int: """ Returns the number of occurrences of num in interval [start, end] in the list @@ -114,7 +117,7 @@ def rank(node: Node, num: int, start: int, end: int) -> int: return rank_till_end - rank_before_start -def quantile(node: Node, index: int, start: int, end: int) -> int: +def quantile(node: Node | None, index: int, start: int, end: int) -> int: """ Returns the index'th smallest element in interval [start, end] in the list index is 0-indexed @@ -129,7 +132,7 @@ def quantile(node: Node, index: int, start: int, end: int) -> int: >>> quantile(root, 4, 2, 5) -1 """ - if index > (end - start) or start > end: + if index > (end - start) or start > end or node is None: return -1 # Leaf node case if node.minn == node.maxx: @@ -155,10 +158,10 @@ def quantile(node: Node, index: int, start: int, end: int) -> int: def range_counting( - node: Node, start: int, end: int, start_num: int, end_num: int + node: Node | None, start: int, end: int, start_num: int, end_num: int ) -> int: """ - Returns the number of elememts in range [start_num, end_num] + Returns the number of elements in range [start_num, end_num] in interval [start, end] in the list >>> root = build_tree(test_array) @@ -175,6 +178,7 @@ def range_counting( """ if ( start > end + or node is None or start_num > end_num or node.minn > end_num or node.maxx < start_num From 5c8a6c824723e248047ed6eddfad1a4305de7696 Mon Sep 17 00:00:00 2001 From: Marcus T Date: Thu, 28 Oct 2021 19:53:39 -0400 Subject: [PATCH 0337/1543] Add Pollard's Rho algorithm for integer factorization (#5598) --- maths/pollard_rho.py | 148 +++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 148 insertions(+) create mode 100644 maths/pollard_rho.py diff --git a/maths/pollard_rho.py b/maths/pollard_rho.py new file mode 100644 index 000000000000..df020c63f2f9 --- /dev/null +++ b/maths/pollard_rho.py @@ -0,0 +1,148 @@ +from math import gcd +from typing import Union + + +def pollard_rho( + num: int, + seed: int = 2, + step: int = 1, + attempts: int = 3, +) -> Union[int, None]: + """ + Use Pollard's Rho algorithm to return a nontrivial factor of ``num``. + The returned factor may be composite and require further factorization. + If the algorithm will return None if it fails to find a factor within + the specified number of attempts or within the specified number of steps. + If ``num`` is prime, this algorithm is guaranteed to return None. + https://en.wikipedia.org/wiki/Pollard%27s_rho_algorithm + + >>> pollard_rho(18446744073709551617) + 274177 + >>> pollard_rho(97546105601219326301) + 9876543191 + >>> pollard_rho(100) + 2 + >>> pollard_rho(17) + >>> pollard_rho(17**3) + 17 + >>> pollard_rho(17**3, attempts=1) + >>> pollard_rho(3*5*7) + 21 + >>> pollard_rho(1) + Traceback (most recent call last): + ... + ValueError: The input value cannot be less than 2 + """ + # A value less than 2 can cause an infinite loop in the algorithm. + if num < 2: + raise ValueError("The input value cannot be less than 2") + + # Because of the relationship between ``f(f(x))`` and ``f(x)``, this + # algorithm struggles to find factors that are divisible by two. + # As a workaround, we specifically check for two and even inputs. + # See: https://math.stackexchange.com/a/2856214/165820 + if num > 2 and num % 2 == 0: + return 2 + + # Pollard's Rho algorithm requires a function that returns pseudorandom + # values between 0 <= X < ``num``. It doesn't need to be random in the + # sense that the output value is cryptographically secure or difficult + # to calculate, it only needs to be random in the sense that all output + # values should be equally likely to appear. + # For this reason, Pollard suggested using ``f(x) = (x**2 - 1) % num`` + # However, the success of Pollard's algorithm isn't guaranteed and is + # determined in part by the initial seed and the chosen random function. + # To make retries easier, we will instead use ``f(x) = (x**2 + C) % num`` + # where ``C`` is a value that we can modify between each attempt. + def rand_fn(value: int, step: int, modulus: int) -> int: + """ + Returns a pseudorandom value modulo ``modulus`` based on the + input ``value`` and attempt-specific ``step`` size. + + >>> rand_fn(0, 0, 0) + Traceback (most recent call last): + ... + ZeroDivisionError: integer division or modulo by zero + >>> rand_fn(1, 2, 3) + 0 + >>> rand_fn(0, 10, 7) + 3 + >>> rand_fn(1234, 1, 17) + 16 + """ + return (pow(value, 2) + step) % modulus + + for attempt in range(attempts): + # These track the position within the cycle detection logic. + tortoise = seed + hare = seed + + while True: + # At each iteration, the tortoise moves one step and the hare moves two. + tortoise = rand_fn(tortoise, step, num) + hare = rand_fn(hare, step, num) + hare = rand_fn(hare, step, num) + + # At some point both the tortoise and the hare will enter a cycle whose + # length ``p`` is a divisor of ``num``. Once in that cycle, at some point + # the tortoise and hare will end up on the same value modulo ``p``. + # We can detect when this happens because the position difference between + # the tortoise and the hare will share a common divisor with ``num``. + divisor = gcd(hare - tortoise, num) + + if divisor == 1: + # No common divisor yet, just keep searching. + continue + else: + # We found a common divisor! + if divisor == num: + # Unfortunately, the divisor is ``num`` itself and is useless. + break + else: + # The divisor is a nontrivial factor of ``num``! + return divisor + + # If we made it here, then this attempt failed. + # We need to pick a new starting seed for the tortoise and hare + # in addition to a new step value for the random function. + # To keep this example implementation deterministic, the + # new values will be generated based on currently available + # values instead of using something like ``random.randint``. + + # We can use the hare's position as the new seed. + # This is actually what Richard Brent's the "optimized" variant does. + seed = hare + + # The new step value for the random function can just be incremented. + # At first the results will be similar to what the old function would + # have produced, but the value will quickly diverge after a bit. + step += 1 + + # We haven't found a divisor within the requested number of attempts. + # We were unlucky or ``num`` itself is actually prime. + return None + + +if __name__ == "__main__": + import argparse + + parser = argparse.ArgumentParser() + parser.add_argument( + "num", + type=int, + help="The value to find a divisor of", + ) + parser.add_argument( + "--attempts", + type=int, + default=3, + help="The number of attempts before giving up", + ) + args = parser.parse_args() + + divisor = pollard_rho(args.num, attempts=args.attempts) + if divisor is None: + print(f"{args.num} is probably prime") + else: + quotient = args.num // divisor + print(f"{args.num} = {divisor} * {quotient}") From 0fc24e86296c613f5aa24015518a9f187a2cdbb6 Mon Sep 17 00:00:00 2001 From: Andrew Grangaard Date: Thu, 28 Oct 2021 22:21:16 -0700 Subject: [PATCH 0338/1543] [mypy] Annotates other/scoring_algorithm (#5621) * scoring_algorithm: Moves doctest into function docstring so it will be run * [mypy] annotates other/scoring_algorithm * [mypy] renames temp var to unique value to work around mypy issue in other/scoring_algorithm reusing loop variables with the same name and different types gives this very confusing mypy error response. pyright correctly infers the types without issue. ``` scoring_algorithm.py:58: error: Incompatible types in assignment (expression has type "float", variable has type "List[float]") scoring_algorithm.py:60: error: Unsupported operand types for - ("List[float]" and "float") scoring_algorithm.py:65: error: Incompatible types in assignment (expression has type "float", variable has type "List[float]") scoring_algorithm.py:67: error: Unsupported operand types for - ("List[float]" and "float") Found 4 errors in 1 file (checked 1 source file) ``` * scoring_algorithm: uses enumeration instead of manual indexing on loop var * scoring_algorithm: sometimes we look before we leap. * clean-up: runs `black` to fix formatting --- other/scoring_algorithm.py | 29 ++++++++++++++--------------- 1 file changed, 14 insertions(+), 15 deletions(-) diff --git a/other/scoring_algorithm.py b/other/scoring_algorithm.py index 77e614e2622c..cc1744012671 100644 --- a/other/scoring_algorithm.py +++ b/other/scoring_algorithm.py @@ -20,39 +20,38 @@ lowest mileage but newest registration year. Thus the weights for each column are as follows: [0, 0, 1] - ->>> procentual_proximity([[20, 60, 2012],[23, 90, 2015],[22, 50, 2011]], [0, 0, 1]) -[[20, 60, 2012, 2.0], [23, 90, 2015, 1.0], [22, 50, 2011, 1.3333333333333335]] """ -def procentual_proximity(source_data: list, weights: list) -> list: +def procentual_proximity( + source_data: list[list[float]], weights: list[int] +) -> list[list[float]]: """ weights - int list possible values - 0 / 1 0 if lower values have higher weight in the data set 1 if higher values have higher weight in the data set + + >>> procentual_proximity([[20, 60, 2012],[23, 90, 2015],[22, 50, 2011]], [0, 0, 1]) + [[20, 60, 2012, 2.0], [23, 90, 2015, 1.0], [22, 50, 2011, 1.3333333333333335]] """ # getting data - data_lists = [] - for item in source_data: - for i in range(len(item)): - try: - data_lists[i].append(float(item[i])) - except IndexError: - # generate corresponding number of lists + data_lists: list[list[float]] = [] + for data in source_data: + for i, el in enumerate(data): + if len(data_lists) < i + 1: data_lists.append([]) - data_lists[i].append(float(item[i])) + data_lists[i].append(float(el)) - score_lists = [] + score_lists: list[list[float]] = [] # calculating each score for dlist, weight in zip(data_lists, weights): mind = min(dlist) maxd = max(dlist) - score = [] + score: list[float] = [] # for weight 0 score is 1 - actual score if weight == 0: for item in dlist: @@ -75,7 +74,7 @@ def procentual_proximity(source_data: list, weights: list) -> list: score_lists.append(score) # initialize final scores - final_scores = [0 for i in range(len(score_lists[0]))] + final_scores: list[float] = [0 for i in range(len(score_lists[0]))] # generate final scores for i, slist in enumerate(score_lists): From a281151a2c0140dda718186cf82329168e65cf96 Mon Sep 17 00:00:00 2001 From: Andrew Grangaard Date: Fri, 29 Oct 2021 00:22:57 -0700 Subject: [PATCH 0339/1543] Delete other/date_to_weekday.py as a how-to-use, not an algorithm (#5591) * [mypy] Fixes typing errors in other/date_to_weekday * [mypy] uses future annotation style for other/date_to_weekly * date_to_weekday: new implementation replaces buggy original * date_to_weekday: add examples from multiple of 100 years * clean-up: runs `black` to fix formatting * Delete date_to_weekday.py Co-authored-by: Christian Clauss --- other/date_to_weekday.py | 27 --------------------------- 1 file changed, 27 deletions(-) delete mode 100644 other/date_to_weekday.py diff --git a/other/date_to_weekday.py b/other/date_to_weekday.py deleted file mode 100644 index 9dc68666e3b4..000000000000 --- a/other/date_to_weekday.py +++ /dev/null @@ -1,27 +0,0 @@ -from calendar import day_name -from datetime import datetime - - -def date_to_weekday(inp_date: str) -> str: - """ - It returns the day name of the given date string. - :param inp_date: - :return: String - >>> date_to_weekday("7/8/2035") - 'Tuesday' - >>> date_to_weekday("7/8/2021") - 'Saturday' - >>> date_to_weekday("1/1/2021") - 'Friday' - """ - day, month, year = (int(x) for x in inp_date.split("/")) - if year % 100 == 0: - year = "00" - new_base_date: str = f"{day}/{month}/{year%100} 0:0:0" - date_time_obj: datetime.date = datetime.strptime(new_base_date, "%d/%m/%y %H:%M:%S") - out_put_day: int = date_time_obj.weekday() - return day_name[out_put_day] - - -if __name__ == "__main__": - print(date_to_weekday("1/1/2021"), end=" ") From 3a4cc7e31084e15cf2cce24038957c686d41a1b3 Mon Sep 17 00:00:00 2001 From: Shriyans Gandhi <41372639+shri30yans@users.noreply.github.com> Date: Fri, 29 Oct 2021 13:09:32 +0530 Subject: [PATCH 0340/1543] Hexagonal number sequence (#5640) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Hexagonal number sequence A hexagonal number sequence is a sequence of figurate numbers where the nth hexagonal number hₙ is the number of distinct dots in a pattern of dots consisting of the outlines of regular hexagons with sides up to n dots, when the hexagons are overlaid so that they share one vertex. This program returns the hexagonal number sequence of n length. * Update hexagonalnumbers.py * Update hexagonalnumbers.py * Update hexagonalnumbers.py * Update hexagonalnumbers.py * Update and rename hexagonalnumbers.py to hexagonal_numbers.py * Length must be a positive integer Co-authored-by: Christian Clauss --- maths/series/hexagonal_numbers.py | 42 +++++++++++++++++++++++++++++++ 1 file changed, 42 insertions(+) create mode 100644 maths/series/hexagonal_numbers.py diff --git a/maths/series/hexagonal_numbers.py b/maths/series/hexagonal_numbers.py new file mode 100644 index 000000000000..582b1989b7c6 --- /dev/null +++ b/maths/series/hexagonal_numbers.py @@ -0,0 +1,42 @@ +""" +A hexagonal number sequence is a sequence of figurate numbers +where the nth hexagonal number hₙ is the number of distinct dots +in a pattern of dots consisting of the outlines of regular +hexagons with sides up to n dots, when the hexagons are overlaid +so that they share one vertex. + + Calculates the hexagonal numbers sequence with a formula + hₙ = n(2n-1) + where: + hₙ --> is nth element of the sequence + n --> is the number of element in the sequence + reference-->"Hexagonal number" Wikipedia + +""" + + +def hexagonal_numbers(length: int) -> list[int]: + """ + :param len: max number of elements + :type len: int + :return: Hexagonal numbers as a list + + Tests: + >>> hexagonal_numbers(10) + [0, 1, 6, 15, 28, 45, 66, 91, 120, 153] + >>> hexagonal_numbers(5) + [0, 1, 6, 15, 28] + >>> hexagonal_numbers(0) + Traceback (most recent call last): + ... + ValueError: Length must be a positive integer. + """ + + if length <= 0 or not isinstance(length, int): + raise ValueError("Length must be a positive integer.") + return [n * (2 * n - 1) for n in range(length)] + + +if __name__ == "__main__": + print(hexagonal_numbers(length=5)) + print(hexagonal_numbers(length=10)) From e6cf13cc03475b3a5e7e3d3bf4723c37c3063dde Mon Sep 17 00:00:00 2001 From: Casper Rysgaard Date: Sat, 30 Oct 2021 13:06:25 +0200 Subject: [PATCH 0341/1543] Update queue implementation (#5388) * Update queue implementation Popping the first element of a list takes O(n) time. Using a cyclic queue takes O(1) time. * Add queue changes from extra files * Update indentation * Add empty line between imports * Fix lines * Apply suggestions from code review Co-authored-by: John Law Co-authored-by: John Law --- graphs/breadth_first_search.py | 12 +++++++----- graphs/breadth_first_search_2.py | 11 +++++++---- graphs/check_bipartite_graph_bfs.py | 13 ++++++++----- 3 files changed, 22 insertions(+), 14 deletions(-) diff --git a/graphs/breadth_first_search.py b/graphs/breadth_first_search.py index 7c626429e5c0..9264f57b41b2 100644 --- a/graphs/breadth_first_search.py +++ b/graphs/breadth_first_search.py @@ -3,6 +3,8 @@ """ Author: OMKAR PATHAK """ from __future__ import annotations +from queue import Queue + class Graph: def __init__(self) -> None: @@ -51,19 +53,19 @@ def bfs(self, start_vertex: int) -> set[int]: visited = set() # create a first in first out queue to store all the vertices for BFS - queue = [] + queue = Queue() # mark the source node as visited and enqueue it visited.add(start_vertex) - queue.append(start_vertex) + queue.put(start_vertex) - while queue: - vertex = queue.pop(0) + while not queue.empty(): + vertex = queue.get() # loop through all adjacent vertex and enqueue it if not yet visited for adjacent_vertex in self.vertices[vertex]: if adjacent_vertex not in visited: - queue.append(adjacent_vertex) + queue.put(adjacent_vertex) visited.add(adjacent_vertex) return visited diff --git a/graphs/breadth_first_search_2.py b/graphs/breadth_first_search_2.py index a90e963a4043..4c8b69faf656 100644 --- a/graphs/breadth_first_search_2.py +++ b/graphs/breadth_first_search_2.py @@ -14,6 +14,8 @@ """ from __future__ import annotations +from queue import Queue + G = { "A": ["B", "C"], "B": ["A", "D", "E"], @@ -30,13 +32,14 @@ def breadth_first_search(graph: dict, start: str) -> set[str]: 'ABCDEF' """ explored = {start} - queue = [start] - while queue: - v = queue.pop(0) # queue.popleft() + queue = Queue() + queue.put(start) + while not queue.empty(): + v = queue.get() for w in graph[v]: if w not in explored: explored.add(w) - queue.append(w) + queue.put(w) return explored diff --git a/graphs/check_bipartite_graph_bfs.py b/graphs/check_bipartite_graph_bfs.py index 00b771649b5d..b5203b4c5c7d 100644 --- a/graphs/check_bipartite_graph_bfs.py +++ b/graphs/check_bipartite_graph_bfs.py @@ -6,14 +6,17 @@ # from V to U. In other words, for every edge (u, v), either u belongs to U and v to V, # or u belongs to V and v to U. We can also say that there is no edge that connects # vertices of same set. +from queue import Queue + + def checkBipartite(graph): - queue = [] + queue = Queue() visited = [False] * len(graph) color = [-1] * len(graph) def bfs(): - while queue: - u = queue.pop(0) + while not queue.empty(): + u = queue.get() visited[u] = True for neighbour in graph[u]: @@ -23,7 +26,7 @@ def bfs(): if color[neighbour] == -1: color[neighbour] = 1 - color[u] - queue.append(neighbour) + queue.put(neighbour) elif color[neighbour] == color[u]: return False @@ -32,7 +35,7 @@ def bfs(): for i in range(len(graph)): if not visited[i]: - queue.append(i) + queue.put(i) color[i] = 0 if bfs() is False: return False From e7565f8bfc276e0d58d609f6c39f39b80b92a4a2 Mon Sep 17 00:00:00 2001 From: Maxim Smolskiy Date: Sat, 30 Oct 2021 22:36:12 +0300 Subject: [PATCH 0342/1543] Improve Project Euler problem 070 solution 1 (#5166) * Change has_same_digits doctest * Improve has_same_digits function --- project_euler/problem_070/sol1.py | 22 ++-------------------- 1 file changed, 2 insertions(+), 20 deletions(-) diff --git a/project_euler/problem_070/sol1.py b/project_euler/problem_070/sol1.py index e106800d5716..d42b017cc476 100644 --- a/project_euler/problem_070/sol1.py +++ b/project_euler/problem_070/sol1.py @@ -60,34 +60,16 @@ def has_same_digits(num1: int, num2: int) -> bool: Return True if num1 and num2 have the same frequency of every digit, False otherwise. - digits[] is a frequency table where the index represents the digit from - 0-9, and the element stores the number of appearances. Increment the - respective index every time you see the digit in num1, and decrement if in - num2. At the end, if the numbers have the same digits, every index must - contain 0. - >>> has_same_digits(123456789, 987654321) True - >>> has_same_digits(123, 12) + >>> has_same_digits(123, 23) False >>> has_same_digits(1234566, 123456) False """ - digits = [0] * 10 - - while num1 > 0 and num2 > 0: - digits[num1 % 10] += 1 - digits[num2 % 10] -= 1 - num1 //= 10 - num2 //= 10 - - for digit in digits: - if digit != 0: - return False - - return True + return sorted(str(num1)) == sorted(str(num2)) def solution(max: int = 10000000) -> int: From 678535b5c83302ee25b8a135b977c7e48b8f8668 Mon Sep 17 00:00:00 2001 From: Dylan Buchi Date: Sat, 30 Oct 2021 16:43:48 -0300 Subject: [PATCH 0343/1543] [mypy] Fix type annotations in non_recursive_segment_tree (#5652) --- .../binary_tree/non_recursive_segment_tree.py | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) diff --git a/data_structures/binary_tree/non_recursive_segment_tree.py b/data_structures/binary_tree/non_recursive_segment_tree.py index c914079e0a8d..b04a6e5cacb7 100644 --- a/data_structures/binary_tree/non_recursive_segment_tree.py +++ b/data_structures/binary_tree/non_recursive_segment_tree.py @@ -37,12 +37,12 @@ """ from __future__ import annotations -from typing import Callable, TypeVar +from typing import Any, Callable, Generic, TypeVar T = TypeVar("T") -class SegmentTree: +class SegmentTree(Generic[T]): def __init__(self, arr: list[T], fnc: Callable[[T, T], T]) -> None: """ Segment Tree constructor, it works just with commutative combiner. @@ -55,8 +55,10 @@ def __init__(self, arr: list[T], fnc: Callable[[T, T], T]) -> None: ... lambda a, b: (a[0] + b[0], a[1] + b[1])).query(0, 2) (6, 9) """ - self.N = len(arr) - self.st = [None for _ in range(len(arr))] + arr + any_type: Any | T = None + + self.N: int = len(arr) + self.st: list[T] = [any_type for _ in range(self.N)] + arr self.fn = fnc self.build() @@ -83,7 +85,7 @@ def update(self, p: int, v: T) -> None: p = p // 2 self.st[p] = self.fn(self.st[p * 2], self.st[p * 2 + 1]) - def query(self, l: int, r: int) -> T: # noqa: E741 + def query(self, l: int, r: int) -> T | None: # noqa: E741 """ Get range query value in log(N) time :param l: left element index @@ -101,7 +103,8 @@ def query(self, l: int, r: int) -> T: # noqa: E741 7 """ l, r = l + self.N, r + self.N # noqa: E741 - res = None + + res: T | None = None while l <= r: # noqa: E741 if l % 2 == 1: res = self.st[l] if res is None else self.fn(res, self.st[l]) @@ -135,7 +138,7 @@ def query(self, l: int, r: int) -> T: # noqa: E741 max_segment_tree = SegmentTree(test_array, max) sum_segment_tree = SegmentTree(test_array, lambda a, b: a + b) - def test_all_segments(): + def test_all_segments() -> None: """ Test all possible segments """ From 359e0e795e7e0efa4212a3c94fb482e128bc63eb Mon Sep 17 00:00:00 2001 From: Mitheel <81575947+mitheelgajare@users.noreply.github.com> Date: Sun, 31 Oct 2021 01:18:50 +0530 Subject: [PATCH 0344/1543] Fixed grammatical errors in CONTRIBUTING.md (#5635) --- CONTRIBUTING.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index f5c123674f4a..4df60ed3f296 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -67,7 +67,7 @@ pre-commit run --all-files --show-diff-on-failure We want your work to be readable by others; therefore, we encourage you to note the following: - Please write in Python 3.9+. For instance: `print()` is a function in Python 3 so `print "Hello"` will *not* work but `print("Hello")` will. -- Please focus hard on naming of functions, classes, and variables. Help your reader by using __descriptive names__ that can help you to remove redundant comments. +- Please focus hard on the naming of functions, classes, and variables. Help your reader by using __descriptive names__ that can help you to remove redundant comments. - Single letter variable names are *old school* so please avoid them unless their life only spans a few lines. - Expand acronyms because `gcd()` is hard to understand but `greatest_common_divisor()` is not. - Please follow the [Python Naming Conventions](https://pep8.org/#prescriptive-naming-conventions) so variable_names and function_names should be lower_case, CONSTANTS in UPPERCASE, ClassNames should be CamelCase, etc. @@ -102,7 +102,7 @@ We want your work to be readable by others; therefore, we encourage you to note This is too trivial. Comments are expected to be explanatory. For comments, you can write them above, on or below a line of code, as long as you are consistent within the same piece of code. - We encourage you to put docstrings inside your functions but please pay attention to indentation of docstrings. The following is a good example: + We encourage you to put docstrings inside your functions but please pay attention to the indentation of docstrings. The following is a good example: ```python def sum_ab(a, b): @@ -160,7 +160,7 @@ We want your work to be readable by others; therefore, we encourage you to note - [__List comprehensions and generators__](https://docs.python.org/3/tutorial/datastructures.html#list-comprehensions) are preferred over the use of `lambda`, `map`, `filter`, `reduce` but the important thing is to demonstrate the power of Python in code that is easy to read and maintain. - Avoid importing external libraries for basic algorithms. Only use those libraries for complicated algorithms. -- If you need a third party module that is not in the file __requirements.txt__, please add it to that file as part of your submission. +- If you need a third-party module that is not in the file __requirements.txt__, please add it to that file as part of your submission. #### Other Requirements for Submissions - If you are submitting code in the `project_euler/` directory, please also read [the dedicated Guideline](https://github.com/TheAlgorithms/Python/blob/master/project_euler/README.md) before contributing to our Project Euler library. @@ -172,7 +172,7 @@ We want your work to be readable by others; therefore, we encourage you to note - If you have modified/added documentation work, ensure your language is concise and contains no grammar errors. - Do not update the README.md or DIRECTORY.md file which will be periodically autogenerated by our Travis CI processes. - Add a corresponding explanation to [Algorithms-Explanation](https://github.com/TheAlgorithms/Algorithms-Explanation) (Optional but recommended). -- All submissions will be tested with [__mypy__](http://www.mypy-lang.org) so we encourage to add [__Python type hints__](https://docs.python.org/3/library/typing.html) where it makes sense to do so. +- All submissions will be tested with [__mypy__](http://www.mypy-lang.org) so we encourage you to add [__Python type hints__](https://docs.python.org/3/library/typing.html) where it makes sense to do so. - Most importantly, - __Be consistent in the use of these guidelines when submitting.__ From 21c99d2ae294d08893bd0160cd9aacc2ad3ca556 Mon Sep 17 00:00:00 2001 From: Navpreet Singh Devpuri Date: Sun, 31 Oct 2021 01:34:46 +0530 Subject: [PATCH 0345/1543] added is_contains_unique_chars() (#5701) * added is_contains_unique_chars() * added is_contains_unique_chars() * added stackoverflow reference --- strings/is_contains_unique_chars.py | 31 +++++++++++++++++++++++++++++ 1 file changed, 31 insertions(+) create mode 100644 strings/is_contains_unique_chars.py diff --git a/strings/is_contains_unique_chars.py b/strings/is_contains_unique_chars.py new file mode 100644 index 000000000000..fdf7a02ff43f --- /dev/null +++ b/strings/is_contains_unique_chars.py @@ -0,0 +1,31 @@ +def is_contains_unique_chars(input_str: str) -> bool: + """ + Check if all characters in the string is unique or not. + >>> is_contains_unique_chars("I_love.py") + True + >>> is_contains_unique_chars("I don't love Python") + False + + Time complexity: O(n) + Space compexity: O(1) 19320 bytes as we are having 144697 characters in unicode + """ + + # Each bit will represent each unicode character + # For example 65th bit representing 'A' + # https://stackoverflow.com/a/12811293 + bitmap = 0 + for ch in input_str: + ch_unicode = ord(ch) + ch_bit_index_on = pow(2, ch_unicode) + + # If we already turned on bit for current character's unicode + if bitmap >> ch_unicode & 1 == 1: + return False + bitmap |= ch_bit_index_on + return True + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From 99983c91ca26e8cf592699dd320e0d58140cfe41 Mon Sep 17 00:00:00 2001 From: Dylan Buchi Date: Sun, 31 Oct 2021 05:38:24 -0300 Subject: [PATCH 0346/1543] [mypy] Add/fix type annotations in `data_structures/heap/skew_heap.py` (#5634) * Add abstract base class Comparable * [mypy] Fix type annotations (strict mode) * Fix a typo * Remove Comparable class and set bound to bool --- data_structures/heap/skew_heap.py | 22 +++++++++++++--------- 1 file changed, 13 insertions(+), 9 deletions(-) diff --git a/data_structures/heap/skew_heap.py b/data_structures/heap/skew_heap.py index b59441389a91..16ddc5545e36 100644 --- a/data_structures/heap/skew_heap.py +++ b/data_structures/heap/skew_heap.py @@ -2,9 +2,9 @@ from __future__ import annotations -from typing import Generic, Iterable, Iterator, TypeVar +from typing import Any, Generic, Iterable, Iterator, TypeVar -T = TypeVar("T") +T = TypeVar("T", bound=bool) class SkewNode(Generic[T]): @@ -51,7 +51,7 @@ class SkewHeap(Generic[T]): values. Both operations take O(logN) time where N is the size of the structure. Wiki: https://en.wikipedia.org/wiki/Skew_heap - Visualisation: https://www.cs.usfca.edu/~galles/visualization/SkewHeap.html + Visualization: https://www.cs.usfca.edu/~galles/visualization/SkewHeap.html >>> list(SkewHeap([2, 3, 1, 5, 1, 7])) [1, 1, 2, 3, 5, 7] @@ -70,14 +70,16 @@ class SkewHeap(Generic[T]): """ def __init__(self, data: Iterable[T] | None = ()) -> None: + """ >>> sh = SkewHeap([3, 1, 3, 7]) >>> list(sh) [1, 3, 3, 7] """ self._root: SkewNode[T] | None = None - for item in data: - self.insert(item) + if data: + for item in data: + self.insert(item) def __bool__(self) -> bool: """ @@ -103,7 +105,7 @@ def __iter__(self) -> Iterator[T]: >>> list(sh) [1, 3, 3, 7] """ - result = [] + result: list[Any] = [] while self: result.append(self.pop()) @@ -127,7 +129,7 @@ def insert(self, value: T) -> None: """ self._root = SkewNode.merge(self._root, SkewNode(value)) - def pop(self) -> T: + def pop(self) -> T | None: """ Pop the smallest value from the heap and return it. @@ -146,7 +148,9 @@ def pop(self) -> T: IndexError: Can't get top element for the empty heap. """ result = self.top() - self._root = SkewNode.merge(self._root.left, self._root.right) + self._root = ( + SkewNode.merge(self._root.left, self._root.right) if self._root else None + ) return result @@ -172,7 +176,7 @@ def top(self) -> T: raise IndexError("Can't get top element for the empty heap.") return self._root.value - def clear(self): + def clear(self) -> None: """ Clear the heap. From a94c6214ff33def88d9363de935263e3145fc96a Mon Sep 17 00:00:00 2001 From: "@im_8055" <38890773+Bhargavishnu@users.noreply.github.com> Date: Sun, 31 Oct 2021 16:06:03 +0530 Subject: [PATCH 0347/1543] Fix spellings (#5710) --- strings/credit_card_validator.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/strings/credit_card_validator.py b/strings/credit_card_validator.py index 3a08c4117a6b..78bf45740a63 100644 --- a/strings/credit_card_validator.py +++ b/strings/credit_card_validator.py @@ -71,7 +71,7 @@ def validate_credit_card_number(credit_card_number: str) -> bool: 36111111111111 is an invalid credit card number because of its first two digits. False >>> validate_credit_card_number('41111111111111') - 41111111111111 is an invalid credit card number because it fails the Lhun check. + 41111111111111 is an invalid credit card number because it fails the Luhn check. False """ error_message = f"{credit_card_number} is an invalid credit card number because" @@ -88,7 +88,7 @@ def validate_credit_card_number(credit_card_number: str) -> bool: return False if not luhn_validation(credit_card_number): - print(f"{error_message} it fails the Lhun check.") + print(f"{error_message} it fails the Luhn check.") return False print(f"{credit_card_number} is a valid credit card number.") From 965b1ff7dfac4806d61e39dfbbdfb6c5c165c0a7 Mon Sep 17 00:00:00 2001 From: Maxim Smolskiy Date: Sun, 31 Oct 2021 13:36:53 +0300 Subject: [PATCH 0348/1543] Improve Project Euler problem 078 solution 1 (#5708) * Add solution doctests * Improve solution function --- project_euler/problem_078/sol1.py | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/project_euler/problem_078/sol1.py b/project_euler/problem_078/sol1.py index f92cf0f4020c..7e5938c4c466 100644 --- a/project_euler/problem_078/sol1.py +++ b/project_euler/problem_078/sol1.py @@ -21,6 +21,12 @@ def solution(number: int = 1000000) -> int: """ + >>> solution(1) + 1 + + >>> solution(9) + 14 + >>> solution() 55374 """ @@ -34,6 +40,7 @@ def solution(number: int = 1000000) -> int: if index > i: break item += partitions[i - index] * sign + item %= number index += j if index > i: break From 568425dfd14f687aaa11c6405e4acf2556ce74f2 Mon Sep 17 00:00:00 2001 From: Maxim Smolskiy Date: Sun, 31 Oct 2021 13:37:46 +0300 Subject: [PATCH 0349/1543] Improve solution (#5705) --- project_euler/problem_072/sol1.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/project_euler/problem_072/sol1.py b/project_euler/problem_072/sol1.py index 846396ab0f9c..a2a0eeeb31c5 100644 --- a/project_euler/problem_072/sol1.py +++ b/project_euler/problem_072/sol1.py @@ -18,7 +18,7 @@ function, phi(n). So, the answer is simply the sum of phi(n) for 2 <= n <= 1,000,000 Sum of phi(d), for all d|n = n. This result can be used to find phi(n) using a sieve. -Time: 3.5 sec +Time: 1 sec """ @@ -36,8 +36,9 @@ def solution(limit: int = 1_000_000) -> int: phi = [i - 1 for i in range(limit + 1)] for i in range(2, limit + 1): - for j in range(2 * i, limit + 1, i): - phi[j] -= phi[i] + if phi[i] == i - 1: + for j in range(2 * i, limit + 1, i): + phi[j] -= phi[j] // i return sum(phi[2 : limit + 1]) From f92eac982dee9d4ea97e36cfda0f1fa19213b9f4 Mon Sep 17 00:00:00 2001 From: Maxim Smolskiy Date: Sun, 31 Oct 2021 13:38:28 +0300 Subject: [PATCH 0350/1543] Improve Project Euler problem 092 solution 1 (#5703) * Fix typos * Improve solution --- project_euler/problem_092/sol1.py | 23 ++++++++++++++++------- 1 file changed, 16 insertions(+), 7 deletions(-) diff --git a/project_euler/problem_092/sol1.py b/project_euler/problem_092/sol1.py index dcda3a48679e..437a85badc57 100644 --- a/project_euler/problem_092/sol1.py +++ b/project_euler/problem_092/sol1.py @@ -12,11 +12,14 @@ """ +DIGITS_SQUARED = [digit ** 2 for digit in range(10)] + + def next_number(number: int) -> int: """ Returns the next number of the chain by adding the square of each digit - to form a neww number. - For example if number = 12, next_number() will return 1^2 + 2^2 = 5. + to form a new number. + For example, if number = 12, next_number() will return 1^2 + 2^2 = 5. Therefore, 5 is the next number of the chain. >>> next_number(44) 32 @@ -27,12 +30,15 @@ def next_number(number: int) -> int: """ sum_of_digits_squared = 0 while number: - sum_of_digits_squared += (number % 10) ** 2 + sum_of_digits_squared += DIGITS_SQUARED[number % 10] number //= 10 return sum_of_digits_squared +CHAINS = {1: True, 58: False} + + def chain(number: int) -> bool: """ The function generates the chain of numbers until the next number is 1 or 89. @@ -40,7 +46,7 @@ def chain(number: int) -> bool: following chain of numbers: 44 → 32 → 13 → 10 → 1 → 1. Once the next number generated is 1 or 89, the function returns whether - or not the the next number generated by next_number() is 1. + or not the next number generated by next_number() is 1. >>> chain(10) True >>> chain(58) @@ -48,10 +54,13 @@ def chain(number: int) -> bool: >>> chain(1) True """ - while number != 1 and number != 89: - number = next_number(number) + if number in CHAINS: + return CHAINS[number] + + number_chain = chain(next_number(number)) + CHAINS[number] = number_chain - return number == 1 + return number_chain def solution(number: int = 10000000) -> int: From 13fdf21c9c74d9a1d0cc573bb35711b790dc8010 Mon Sep 17 00:00:00 2001 From: SURYAPRATAP SINGH SURYAVANSHI <67123991+suryapratapsinghsuryavanshi@users.noreply.github.com> Date: Sun, 31 Oct 2021 16:10:32 +0530 Subject: [PATCH 0351/1543] Added alternative_list_arrange method (#4631) --- other/alternative_list_arrange.py | 34 +++++++++++++++++++++++++++++++ 1 file changed, 34 insertions(+) create mode 100644 other/alternative_list_arrange.py diff --git a/other/alternative_list_arrange.py b/other/alternative_list_arrange.py new file mode 100644 index 000000000000..84c5dd4293ad --- /dev/null +++ b/other/alternative_list_arrange.py @@ -0,0 +1,34 @@ +def alternative_list_arrange(first_input_list: list, second_input_list: list) -> list: + """ + The method arranges two lists as one list in alternative forms of the list elements. + :param first_input_list: + :param second_input_list: + :return: List + >>> alternative_list_arrange([1, 2, 3, 4, 5], ["A", "B", "C"]) + [1, 'A', 2, 'B', 3, 'C', 4, 5] + >>> alternative_list_arrange(["A", "B", "C"], [1, 2, 3, 4, 5]) + ['A', 1, 'B', 2, 'C', 3, 4, 5] + >>> alternative_list_arrange(["X", "Y", "Z"], [9, 8, 7, 6]) + ['X', 9, 'Y', 8, 'Z', 7, 6] + >>> alternative_list_arrange([1, 2, 3, 4, 5], []) + [1, 2, 3, 4, 5] + """ + first_input_list_length: int = len(first_input_list) + second_input_list_length: int = len(second_input_list) + abs_length: int = ( + first_input_list_length + if first_input_list_length > second_input_list_length + else second_input_list_length + ) + output_result_list: list = [] + for char_count in range(abs_length): + if char_count < first_input_list_length: + output_result_list.append(first_input_list[char_count]) + if char_count < second_input_list_length: + output_result_list.append(second_input_list[char_count]) + + return output_result_list + + +if __name__ == "__main__": + print(alternative_list_arrange(["A", "B", "C"], [1, 2, 3, 4, 5]), end=" ") From 9ac94c09ebc4d25caf3527350ff61d3ba93431e2 Mon Sep 17 00:00:00 2001 From: Morteza Date: Sun, 31 Oct 2021 03:41:39 -0700 Subject: [PATCH 0352/1543] Improve checking anagrams in O(n) with dictionary (#4806) --- strings/check_anagrams.py | 29 +++++++++++++++++++++++++---- 1 file changed, 25 insertions(+), 4 deletions(-) diff --git a/strings/check_anagrams.py b/strings/check_anagrams.py index 62a4441a0c00..938bf4c2abee 100644 --- a/strings/check_anagrams.py +++ b/strings/check_anagrams.py @@ -1,6 +1,7 @@ """ wiki: https://en.wikipedia.org/wiki/Anagram """ +from collections import defaultdict def check_anagrams(first_str: str, second_str: str) -> bool: @@ -16,10 +17,30 @@ def check_anagrams(first_str: str, second_str: str) -> bool: >>> check_anagrams('There', 'Their') False """ - return ( - "".join(sorted(first_str.lower())).strip() - == "".join(sorted(second_str.lower())).strip() - ) + first_str = first_str.lower().strip() + second_str = second_str.lower().strip() + + # Remove whitespace + first_str = first_str.replace(" ", "") + second_str = second_str.replace(" ", "") + + # Strings of different lengths are not anagrams + if len(first_str) != len(second_str): + return False + + # Default values for count should be 0 + count = defaultdict(int) + + # For each character in input strings, + # increment count in the corresponding + for i in range(len(first_str)): + count[first_str[i]] += 1 + count[second_str[i]] -= 1 + + for _count in count.values(): + if _count != 0: + return False + return True if __name__ == "__main__": From f4fd147d0306e5fe3dfdda8ef01ec9068c1c247a Mon Sep 17 00:00:00 2001 From: happiestbee <87628038+happiestbee@users.noreply.github.com> Date: Sun, 31 Oct 2021 06:46:31 -0400 Subject: [PATCH 0353/1543] Make decrypt_caesar_with_chi_squared work with upper case letters (#5379) * Fixes: #5323 * Fixes: #5323 --- ciphers/decrypt_caesar_with_chi_squared.py | 22 +++++++++++++++------- 1 file changed, 15 insertions(+), 7 deletions(-) diff --git a/ciphers/decrypt_caesar_with_chi_squared.py b/ciphers/decrypt_caesar_with_chi_squared.py index beac851b6c2a..6c36860207cd 100644 --- a/ciphers/decrypt_caesar_with_chi_squared.py +++ b/ciphers/decrypt_caesar_with_chi_squared.py @@ -6,7 +6,7 @@ def decrypt_caesar_with_chi_squared( ciphertext: str, cipher_alphabet: list[str] | None = None, frequencies_dict: dict[str, float] | None = None, - case_sensetive: bool = False, + case_sensitive: bool = False, ) -> tuple[int, float, str]: """ Basic Usage @@ -20,7 +20,7 @@ def decrypt_caesar_with_chi_squared( * frequencies_dict (dict): a dictionary of word frequencies where keys are the letters and values are a percentage representation of the frequency as a decimal/float - * case_sensetive (bool): a boolean value: True if the case matters during + * case_sensitive (bool): a boolean value: True if the case matters during decryption, False if it doesn't Returns: @@ -117,6 +117,9 @@ def decrypt_caesar_with_chi_squared( >>> decrypt_caesar_with_chi_squared('crybd cdbsxq') (10, 233.35343938980898, 'short string') + >>> decrypt_caesar_with_chi_squared('Crybd Cdbsxq', case_sensitive=True) + (10, 233.35343938980898, 'Short String') + >>> decrypt_caesar_with_chi_squared(12) Traceback (most recent call last): AttributeError: 'int' object has no attribute 'lower' @@ -158,7 +161,7 @@ def decrypt_caesar_with_chi_squared( # Custom frequencies dictionary frequencies = frequencies_dict - if not case_sensetive: + if not case_sensitive: ciphertext = ciphertext.lower() # Chi squared statistic values @@ -172,10 +175,14 @@ def decrypt_caesar_with_chi_squared( for letter in ciphertext: try: # Try to index the letter in the alphabet - new_key = (alphabet_letters.index(letter) - shift) % len( + new_key = (alphabet_letters.index(letter.lower()) - shift) % len( alphabet_letters ) - decrypted_with_shift += alphabet_letters[new_key] + decrypted_with_shift += ( + alphabet_letters[new_key].upper() + if case_sensitive and letter.isupper() + else alphabet_letters[new_key] + ) except ValueError: # Append the character if it isn't in the alphabet decrypted_with_shift += letter @@ -184,10 +191,11 @@ def decrypt_caesar_with_chi_squared( # Loop through each letter in the decoded message with the shift for letter in decrypted_with_shift: - if case_sensetive: + if case_sensitive: + letter = letter.lower() if letter in frequencies: # Get the amount of times the letter occurs in the message - occurrences = decrypted_with_shift.count(letter) + occurrences = decrypted_with_shift.lower().count(letter) # Get the excepcted amount of times the letter should appear based # on letter frequencies From 0f015fa034646fcacd812b429e47c684b44e5bd3 Mon Sep 17 00:00:00 2001 From: Simon Date: Sun, 31 Oct 2021 11:48:10 +0100 Subject: [PATCH 0354/1543] Added solution for euler problem 493 (#5573) * Added solution for problem 493 * fixed typo * return result as string --- project_euler/problem_493/__init__.py | 0 project_euler/problem_493/sol1.py | 53 +++++++++++++++++++++++++++ 2 files changed, 53 insertions(+) create mode 100644 project_euler/problem_493/__init__.py create mode 100644 project_euler/problem_493/sol1.py diff --git a/project_euler/problem_493/__init__.py b/project_euler/problem_493/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/project_euler/problem_493/sol1.py b/project_euler/problem_493/sol1.py new file mode 100644 index 000000000000..c9879a528230 --- /dev/null +++ b/project_euler/problem_493/sol1.py @@ -0,0 +1,53 @@ +""" +Project Euler Problem 493: https://projecteuler.net/problem=493 + +70 coloured balls are placed in an urn, 10 for each of the seven rainbow colours. +What is the expected number of distinct colours in 20 randomly picked balls? +Give your answer with nine digits after the decimal point (a.bcdefghij). + +----- + +This combinatorial problem can be solved by decomposing the problem into the +following steps: +1. Calculate the total number of possible picking cominations +[combinations := binom_coeff(70, 20)] +2. Calculate the number of combinations with one colour missing +[missing := binom_coeff(60, 20)] +3. Calculate the probability of one colour missing +[missing_prob := missing / combinations] +4. Calculate the probability of no colour missing +[no_missing_prob := 1 - missing_prob] +5. Calculate the expected number of distinct colours +[expected = 7 * no_missing_prob] + +References: +- https://en.wikipedia.org/wiki/Binomial_coefficient +""" + +import math + +BALLS_PER_COLOUR = 10 +NUM_COLOURS = 7 +NUM_BALLS = BALLS_PER_COLOUR * NUM_COLOURS + + +def solution(num_picks: int = 20) -> str: + """ + Calculates the expected number of distinct colours + + >>> solution(10) + '5.669644129' + + >>> solution(30) + '6.985042712' + """ + total = math.comb(NUM_BALLS, num_picks) + missing_colour = math.comb(NUM_BALLS - BALLS_PER_COLOUR, num_picks) + + result = NUM_COLOURS * (1 - missing_colour / total) + + return f"{result:.9f}" + + +if __name__ == "__main__": + print(solution(20)) From 7488c5070e3f5a29a08f584644666494c420f834 Mon Sep 17 00:00:00 2001 From: Dylan Buchi Date: Sun, 31 Oct 2021 07:49:34 -0300 Subject: [PATCH 0355/1543] Fix type annotations in randomized_heap.py (#5704) --- data_structures/heap/randomized_heap.py | 21 ++++++++++++++------- 1 file changed, 14 insertions(+), 7 deletions(-) diff --git a/data_structures/heap/randomized_heap.py b/data_structures/heap/randomized_heap.py index f584f5cb3342..bab4ec1b34c6 100644 --- a/data_structures/heap/randomized_heap.py +++ b/data_structures/heap/randomized_heap.py @@ -3,9 +3,9 @@ from __future__ import annotations import random -from typing import Generic, Iterable, TypeVar +from typing import Any, Generic, Iterable, TypeVar -T = TypeVar("T") +T = TypeVar("T", bound=bool) class RandomizedHeapNode(Generic[T]): @@ -76,8 +76,10 @@ def __init__(self, data: Iterable[T] | None = ()) -> None: [1, 3, 3, 7] """ self._root: RandomizedHeapNode[T] | None = None - for item in data: - self.insert(item) + + if data: + for item in data: + self.insert(item) def insert(self, value: T) -> None: """ @@ -93,7 +95,7 @@ def insert(self, value: T) -> None: """ self._root = RandomizedHeapNode.merge(self._root, RandomizedHeapNode(value)) - def pop(self) -> T: + def pop(self) -> T | None: """ Pop the smallest value from the heap and return it. @@ -111,7 +113,12 @@ def pop(self) -> T: ... IndexError: Can't get top element for the empty heap. """ + result = self.top() + + if self._root is None: + return None + self._root = RandomizedHeapNode.merge(self._root.left, self._root.right) return result @@ -138,7 +145,7 @@ def top(self) -> T: raise IndexError("Can't get top element for the empty heap.") return self._root.value - def clear(self): + def clear(self) -> None: """ Clear the heap. @@ -151,7 +158,7 @@ def clear(self): """ self._root = None - def to_sorted_list(self) -> list[T]: + def to_sorted_list(self) -> list[Any]: """ Returns sorted list containing all the values in the heap. From 508589e3fc3fa93312b131c30c77ecd61460a430 Mon Sep 17 00:00:00 2001 From: Venkatesh Tantravahi <64308188+venkateshtantravahi@users.noreply.github.com> Date: Sun, 31 Oct 2021 16:57:50 +0530 Subject: [PATCH 0356/1543] Local Weighted Learning (#5615) * Local Weighted Learning Added * Delete LWL directory * Local Weighted Learning Added * local weighted learning added * Delete LWL directory * Delete local_weighted_learning.py * rephrased code added * local weight learning updated * local weight learning updated * Updated dir * updated codespell * import modification * Doctests added * doctests updated * lcl updated * doctests updated * doctest values updated --- .../local_weighted_learning/__init__.py | 0 .../local_weighted_learning.md | 66 +++++++++ .../local_weighted_learning.py | 135 ++++++++++++++++++ 3 files changed, 201 insertions(+) create mode 100644 machine_learning/local_weighted_learning/__init__.py create mode 100644 machine_learning/local_weighted_learning/local_weighted_learning.md create mode 100644 machine_learning/local_weighted_learning/local_weighted_learning.py diff --git a/machine_learning/local_weighted_learning/__init__.py b/machine_learning/local_weighted_learning/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/machine_learning/local_weighted_learning/local_weighted_learning.md b/machine_learning/local_weighted_learning/local_weighted_learning.md new file mode 100644 index 000000000000..5c7895e75104 --- /dev/null +++ b/machine_learning/local_weighted_learning/local_weighted_learning.md @@ -0,0 +1,66 @@ +# Locally Weighted Linear Regression +It is a non-parametric ML algorithm that does not learn on a fixed set of parameters such as **linear regression**. \ +So, here comes a question of what is *linear regression*? \ +**Linear regression** is a supervised learning algorithm used for computing linear relationships between input (X) and output (Y). \ + +### Terminology Involved + +number_of_features(i) = Number of features involved. \ +number_of_training_examples(m) = Number of training examples. \ +output_sequence(y) = Output Sequence. \ +$\theta$ $^T$ x = predicted point. \ +J($\theta$) = COst function of point. + +The steps involved in ordinary linear regression are: + +Training phase: Compute \theta to minimize the cost. \ +J($\theta$) = $\sum_{i=1}^m$ (($\theta$)$^T$ $x^i$ - $y^i$)$^2$ + +Predict output: for given query point x, \ + return: ($\theta$)$^T$ x + +Linear Regression + +This training phase is possible when data points are linear, but there again comes a question can we predict non-linear relationship between x and y ? as shown below + +Non-linear Data +
+
+So, here comes the role of non-parametric algorithm which doesn't compute predictions based on fixed set of params. Rather parameters $\theta$ are computed individually for each query point/data point x. +
+
+While Computing $\theta$ , a higher "preferance" is given to points in the vicinity of x than points farther from x. + +Cost Function J($\theta$) = $\sum_{i=1}^m$ $w^i$ (($\theta$)$^T$ $x^i$ - $y^i$)$^2$ + +$w^i$ is non-negative weight associated to training point $x^i$. \ +$w^i$ is large fr $x^i$'s lying closer to query point $x_i$. \ +$w^i$ is small for $x^i$'s lying farther to query point $x_i$. + +A Typical weight can be computed using \ + +$w^i$ = $\exp$(-$\frac{(x^i-x)(x^i-x)^T}{2\tau^2}$) + +Where $\tau$ is the bandwidth parameter that controls $w^i$ distance from x. + +Let's look at a example : + +Suppose, we had a query point x=5.0 and training points $x^1$=4.9 and $x^2$=5.0 than we can calculate weights as : + +$w^i$ = $\exp$(-$\frac{(x^i-x)(x^i-x)^T}{2\tau^2}$) with $\tau$=0.5 + +$w^1$ = $\exp$(-$\frac{(4.9-5)^2}{2(0.5)^2}$) = 0.9802 + +$w^2$ = $\exp$(-$\frac{(3-5)^2}{2(0.5)^2}$) = 0.000335 + +So, J($\theta$) = 0.9802*($\theta$ $^T$ $x^1$ - $y^1$) + 0.000335*($\theta$ $^T$ $x^2$ - $y^2$) + +So, here by we can conclude that the weight fall exponentially as the distance between x & $x^i$ increases and So, does the contribution of error in prediction for $x^i$ to the cost. + +Steps involved in LWL are : \ +Compute \theta to minimize the cost. +J($\theta$) = $\sum_{i=1}^m$ $w^i$ (($\theta$)$^T$ $x^i$ - $y^i$)$^2$ \ +Predict Output: for given query point x, \ +return : $\theta$ $^T$ x + +LWL diff --git a/machine_learning/local_weighted_learning/local_weighted_learning.py b/machine_learning/local_weighted_learning/local_weighted_learning.py new file mode 100644 index 000000000000..af8694bf8f82 --- /dev/null +++ b/machine_learning/local_weighted_learning/local_weighted_learning.py @@ -0,0 +1,135 @@ +# Required imports to run this file +import matplotlib.pyplot as plt +import numpy as np + + +# weighted matrix +def weighted_matrix(point: np.mat, training_data_x: np.mat, bandwidth: float) -> np.mat: + """ + Calculate the weight for every point in the + data set. It takes training_point , query_point, and tau + Here Tau is not a fixed value it can be varied depends on output. + tau --> bandwidth + xmat -->Training data + point --> the x where we want to make predictions + >>> weighted_matrix(np.array([1., 1.]),np.mat([[16.99, 10.34], [21.01,23.68], + ... [24.59,25.69]]), 0.6) + matrix([[1.43807972e-207, 0.00000000e+000, 0.00000000e+000], + [0.00000000e+000, 0.00000000e+000, 0.00000000e+000], + [0.00000000e+000, 0.00000000e+000, 0.00000000e+000]]) + """ + # m is the number of training samples + m, n = np.shape(training_data_x) + # Initializing weights as identity matrix + weights = np.mat(np.eye(m)) + # calculating weights for all training examples [x(i)'s] + for j in range(m): + diff = point - training_data_x[j] + weights[j, j] = np.exp(diff * diff.T / (-2.0 * bandwidth ** 2)) + return weights + + +def local_weight( + point: np.mat, training_data_x: np.mat, training_data_y: np.mat, bandwidth: float +) -> np.mat: + """ + Calculate the local weights using the weight_matrix function on training data. + Return the weighted matrix. + >>> local_weight(np.array([1., 1.]),np.mat([[16.99, 10.34], [21.01,23.68], + ... [24.59,25.69]]),np.mat([[1.01, 1.66, 3.5]]), 0.6) + matrix([[0.00873174], + [0.08272556]]) + """ + weight = weighted_matrix(point, training_data_x, bandwidth) + W = (training_data_x.T * (weight * training_data_x)).I * ( + training_data_x.T * weight * training_data_y.T + ) + + return W + + +def local_weight_regression( + training_data_x: np.mat, training_data_y: np.mat, bandwidth: float +) -> np.mat: + """ + Calculate predictions for each data point on axis. + >>> local_weight_regression(np.mat([[16.99, 10.34], [21.01,23.68], + ... [24.59,25.69]]),np.mat([[1.01, 1.66, 3.5]]), 0.6) + array([1.07173261, 1.65970737, 3.50160179]) + """ + m, n = np.shape(training_data_x) + ypred = np.zeros(m) + + for i, item in enumerate(training_data_x): + ypred[i] = item * local_weight( + item, training_data_x, training_data_y, bandwidth + ) + + return ypred + + +def load_data(dataset_name: str, cola_name: str, colb_name: str) -> np.mat: + """ + Function used for loading data from the seaborn splitting into x and y points + >>> pass # this function has no doctest + """ + import seaborn as sns + + data = sns.load_dataset(dataset_name) + col_a = np.array(data[cola_name]) # total_bill + col_b = np.array(data[colb_name]) # tip + + mcol_a = np.mat(col_a) + mcol_b = np.mat(col_b) + + m = np.shape(mcol_b)[1] + one = np.ones((1, m), dtype=int) + + # horizontal stacking + training_data_x = np.hstack((one.T, mcol_a.T)) + + return training_data_x, mcol_b, col_a, col_b + + +def get_preds(training_data_x: np.mat, mcol_b: np.mat, tau: float) -> np.ndarray: + """ + Get predictions with minimum error for each training data + >>> get_preds(np.mat([[16.99, 10.34], [21.01,23.68], + ... [24.59,25.69]]),np.mat([[1.01, 1.66, 3.5]]), 0.6) + array([1.07173261, 1.65970737, 3.50160179]) + """ + ypred = local_weight_regression(training_data_x, mcol_b, tau) + return ypred + + +def plot_preds( + training_data_x: np.mat, + predictions: np.ndarray, + col_x: np.ndarray, + col_y: np.ndarray, + cola_name: str, + colb_name: str, +) -> plt.plot: + """ + This function used to plot predictions and display the graph + >>> pass #this function has no doctest + """ + xsort = training_data_x.copy() + xsort.sort(axis=0) + plt.scatter(col_x, col_y, color="blue") + plt.plot( + xsort[:, 1], + predictions[training_data_x[:, 1].argsort(0)], + color="yellow", + linewidth=5, + ) + plt.title("Local Weighted Regression") + plt.xlabel(cola_name) + plt.ylabel(colb_name) + plt.show() + + +if __name__ == "__main__": + training_data_x, mcol_b, col_a, col_b = load_data("tips", "total_bill", "tip") + predictions = get_preds(training_data_x, mcol_b, 0.5) + plot_preds(training_data_x, predictions, col_a, col_b, "total_bill", "tip") From a64c9f1e7cc9616c54296ca3983123e15ec486f1 Mon Sep 17 00:00:00 2001 From: Tianyi Zheng Date: Sun, 31 Oct 2021 14:16:02 +0000 Subject: [PATCH 0357/1543] Deduplicate euclidean_length method in Vector (#5658) * Rewrite parts of Vector and Matrix methods * Refactor determinant method and add unit tests Refactor determinant method to create separate minor and cofactor methods. Add respective unit tests for new methods. Rename methods using snake case to follow Python naming conventions. * Reorganize Vector and Matrix methods * Update linear_algebra/README.md Co-authored-by: John Law * Fix punctuation and wording * Apply suggestions from code review Co-authored-by: John Law * Deduplicate euclidean length method for Vector * Add more unit tests for Euclidean length method * Fix bug in unit test for euclidean_length * Remove old comments for magnitude method Co-authored-by: John Law --- linear_algebra/src/lib.py | 33 +++++++++++------------ linear_algebra/src/test_linear_algebra.py | 8 +++++- 2 files changed, 23 insertions(+), 18 deletions(-) diff --git a/linear_algebra/src/lib.py b/linear_algebra/src/lib.py index dad0a8c0a6a2..85dc4b71c4a4 100644 --- a/linear_algebra/src/lib.py +++ b/linear_algebra/src/lib.py @@ -44,7 +44,6 @@ class Vector: component(i): gets the i-th component (0-indexed) change_component(pos: int, value: float): changes specified component euclidean_length(): returns the euclidean length of the vector - magnitude(): returns the magnitude of the vector angle(other: Vector, deg: bool): returns the angle between two vectors TODO: compare-operator """ @@ -159,18 +158,20 @@ def change_component(self, pos: int, value: float) -> None: def euclidean_length(self) -> float: """ returns the euclidean length of the vector - """ - squares = [c ** 2 for c in self.__components] - return math.sqrt(sum(squares)) - - def magnitude(self) -> float: - """ - Magnitude of a Vector - >>> Vector([2, 3, 4]).magnitude() + >>> Vector([2, 3, 4]).euclidean_length() 5.385164807134504 - + >>> Vector([1]).euclidean_length() + 1.0 + >>> Vector([0, -1, -2, -3, 4, 5, 6]).euclidean_length() + 9.539392014169456 + >>> Vector([]).euclidean_length() + Traceback (most recent call last): + ... + Exception: Vector is empty """ + if len(self.__components) == 0: + raise Exception("Vector is empty") squares = [c ** 2 for c in self.__components] return math.sqrt(sum(squares)) @@ -188,7 +189,7 @@ def angle(self, other: Vector, deg: bool = False) -> float: Exception: invalid operand! """ num = self * other - den = self.magnitude() * other.magnitude() + den = self.euclidean_length() * other.euclidean_length() if deg: return math.degrees(math.acos(num / den)) else: @@ -267,8 +268,7 @@ class Matrix: def __init__(self, matrix: list[list[float]], w: int, h: int) -> None: """ - simple constructor for initializing - the matrix with components. + simple constructor for initializing the matrix with components. """ self.__matrix = matrix self.__width = w @@ -276,8 +276,7 @@ def __init__(self, matrix: list[list[float]], w: int, h: int) -> None: def __str__(self) -> str: """ - returns a string representation of this - matrix. + returns a string representation of this matrix. """ ans = "" for i in range(self.__height): @@ -291,7 +290,7 @@ def __str__(self) -> str: def __add__(self, other: Matrix) -> Matrix: """ - implements the matrix-addition. + implements matrix addition. """ if self.__width == other.width() and self.__height == other.height(): matrix = [] @@ -307,7 +306,7 @@ def __add__(self, other: Matrix) -> Matrix: def __sub__(self, other: Matrix) -> Matrix: """ - implements the matrix-subtraction. + implements matrix subtraction. """ if self.__width == other.width() and self.__height == other.height(): matrix = [] diff --git a/linear_algebra/src/test_linear_algebra.py b/linear_algebra/src/test_linear_algebra.py index de7041a17038..724ceef2599a 100644 --- a/linear_algebra/src/test_linear_algebra.py +++ b/linear_algebra/src/test_linear_algebra.py @@ -42,12 +42,18 @@ def test_size(self) -> None: x = Vector([1, 2, 3, 4]) self.assertEqual(len(x), 4) - def test_euclidLength(self) -> None: + def test_euclidean_length(self) -> None: """ test for method euclidean_length() """ x = Vector([1, 2]) + y = Vector([1, 2, 3, 4, 5]) + z = Vector([0, 0, 0, 0, 0, 0, 0, 0, 0, 0]) + w = Vector([1, -1, 1, -1, 2, -3, 4, -5]) self.assertAlmostEqual(x.euclidean_length(), 2.236, 3) + self.assertAlmostEqual(y.euclidean_length(), 7.416, 3) + self.assertEqual(z.euclidean_length(), 0) + self.assertAlmostEqual(w.euclidean_length(), 7.616, 3) def test_add(self) -> None: """ From 868c2fa0a8e1f51ba32e7622990a9259a8740604 Mon Sep 17 00:00:00 2001 From: Maarten Date: Sun, 31 Oct 2021 15:19:44 +0100 Subject: [PATCH 0358/1543] Rewrite fibonacci.py (#5665) (#5677) * Removed doctest call * Removed 0 and 1 append to `fib_array` * Moved fibonacci sequence logic into `calculate` * Refactored `get` to generate missing numbers * Renamed `fib_array` to `sequence` * Renamed `number` to `index` * Refactored `get` to only return sequence to `index` * Moved main block into function * Added documentation to `get` * Added missing type hints * Fixed doctest error in `get` docstring * Moved calculate logic into get * Reformatted with black * Fixed wrong generation range --- dynamic_programming/fibonacci.py | 89 ++++++++++++++------------------ 1 file changed, 38 insertions(+), 51 deletions(-) diff --git a/dynamic_programming/fibonacci.py b/dynamic_programming/fibonacci.py index cab1358ddea1..4abc60d4f3cc 100644 --- a/dynamic_programming/fibonacci.py +++ b/dynamic_programming/fibonacci.py @@ -5,61 +5,48 @@ class Fibonacci: - def __init__(self, N=None): - self.fib_array = [] - if N: - N = int(N) - self.fib_array.append(0) - self.fib_array.append(1) - for i in range(2, N + 1): - self.fib_array.append(self.fib_array[i - 1] + self.fib_array[i - 2]) - elif N == 0: - self.fib_array.append(0) - print(self.fib_array) + def __init__(self) -> None: + self.sequence = [0, 1] - def get(self, sequence_no=None): + def get(self, index: int) -> list: """ - >>> Fibonacci(5).get(3) - [0, 1, 1, 2, 3, 5] - [0, 1, 1, 2] - >>> Fibonacci(5).get(6) - [0, 1, 1, 2, 3, 5] - Out of bound. - >>> Fibonacci(5).get(-1) - [0, 1, 1, 2, 3, 5] - [] + Get the Fibonacci number of `index`. If the number does not exist, + calculate all missing numbers leading up to the number of `index`. + + >>> Fibonacci().get(10) + [0, 1, 1, 2, 3, 5, 8, 13, 21, 34] + >>> Fibonacci().get(5) + [0, 1, 1, 2, 3] """ - if sequence_no is not None: - if sequence_no < len(self.fib_array): - return print(self.fib_array[: sequence_no + 1]) - else: - print("Out of bound.") - else: - print("Please specify a value") + difference = index - (len(self.sequence) - 2) + if difference >= 1: + for _ in range(difference): + self.sequence.append(self.sequence[-1] + self.sequence[-2]) + return self.sequence[:index] -if __name__ == "__main__": - print("\n********* Fibonacci Series Using Dynamic Programming ************\n") - print("\n Enter the upper limit for the fibonacci sequence: ", end="") - try: - N = int(input().strip()) - fib = Fibonacci(N) - print( - "\n********* Enter different values to get the corresponding fibonacci " - "sequence, enter any negative number to exit. ************\n" - ) - while True: - try: - i = int(input("Enter value: ").strip()) - if i < 0: - print("\n********* Good Bye!! ************\n") - break - fib.get(i) - except NameError: - print("\nInvalid input, please try again.") - except NameError: - print("\n********* Invalid input, good bye!! ************\n") +def main(): + print( + "Fibonacci Series Using Dynamic Programming\n", + "Enter the index of the Fibonacci number you want to calculate ", + "in the prompt below. (To exit enter exit or Ctrl-C)\n", + sep="", + ) + fibonacci = Fibonacci() + + while True: + prompt: str = input(">> ") + if prompt in {"exit", "quit"}: + break - import doctest + try: + index: int = int(prompt) + except ValueError: + print("Enter a number or 'exit'") + continue - doctest.testmod() + print(fibonacci.get(index)) + + +if __name__ == "__main__": + main() From 94f38dd88c9f644d270a52f9cdd76c2e64e90c7c Mon Sep 17 00:00:00 2001 From: Edward Nuno Date: Sun, 31 Oct 2021 09:03:03 -0700 Subject: [PATCH 0359/1543] [mypy] Fix type annotations for linked_stack.py (#5576) * Fix type annotations for linked_stack.py * Replace Optional with inline union type * Rename linked_stack to stack_with_singly_linked_list * Rename stack_using_dll to stack_with_doubly_linked_list * updating DIRECTORY.md Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- DIRECTORY.md | 4 +-- ...ll.py => stack_with_doubly_linked_list.py} | 0 ...ck.py => stack_with_singly_linked_list.py} | 29 ++++++++++--------- 3 files changed, 18 insertions(+), 15 deletions(-) rename data_structures/stacks/{stack_using_dll.py => stack_with_doubly_linked_list.py} (100%) rename data_structures/stacks/{linked_stack.py => stack_with_singly_linked_list.py} (83%) diff --git a/DIRECTORY.md b/DIRECTORY.md index 434cddbfd32c..140dc632c931 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -198,12 +198,12 @@ * [Evaluate Postfix Notations](https://github.com/TheAlgorithms/Python/blob/master/data_structures/stacks/evaluate_postfix_notations.py) * [Infix To Postfix Conversion](https://github.com/TheAlgorithms/Python/blob/master/data_structures/stacks/infix_to_postfix_conversion.py) * [Infix To Prefix Conversion](https://github.com/TheAlgorithms/Python/blob/master/data_structures/stacks/infix_to_prefix_conversion.py) - * [Linked Stack](https://github.com/TheAlgorithms/Python/blob/master/data_structures/stacks/linked_stack.py) * [Next Greater Element](https://github.com/TheAlgorithms/Python/blob/master/data_structures/stacks/next_greater_element.py) * [Postfix Evaluation](https://github.com/TheAlgorithms/Python/blob/master/data_structures/stacks/postfix_evaluation.py) * [Prefix Evaluation](https://github.com/TheAlgorithms/Python/blob/master/data_structures/stacks/prefix_evaluation.py) * [Stack](https://github.com/TheAlgorithms/Python/blob/master/data_structures/stacks/stack.py) - * [Stack Using Dll](https://github.com/TheAlgorithms/Python/blob/master/data_structures/stacks/stack_using_dll.py) + * [Stack With Doubly Linked List](https://github.com/TheAlgorithms/Python/blob/master/data_structures/stacks/stack_with_doubly_linked_list.py) + * [Stack With Singly Linked List](https://github.com/TheAlgorithms/Python/blob/master/data_structures/stacks/stack_with_singly_linked_list.py) * [Stock Span Problem](https://github.com/TheAlgorithms/Python/blob/master/data_structures/stacks/stock_span_problem.py) * Trie * [Trie](https://github.com/TheAlgorithms/Python/blob/master/data_structures/trie/trie.py) diff --git a/data_structures/stacks/stack_using_dll.py b/data_structures/stacks/stack_with_doubly_linked_list.py similarity index 100% rename from data_structures/stacks/stack_using_dll.py rename to data_structures/stacks/stack_with_doubly_linked_list.py diff --git a/data_structures/stacks/linked_stack.py b/data_structures/stacks/stack_with_singly_linked_list.py similarity index 83% rename from data_structures/stacks/linked_stack.py rename to data_structures/stacks/stack_with_singly_linked_list.py index 85b59a940e39..903ae39db4b5 100644 --- a/data_structures/stacks/linked_stack.py +++ b/data_structures/stacks/stack_with_singly_linked_list.py @@ -1,19 +1,22 @@ """ A Stack using a linked list like structure """ from __future__ import annotations -from typing import Any +from collections.abc import Iterator +from typing import Generic, TypeVar +T = TypeVar("T") -class Node: - def __init__(self, data): + +class Node(Generic[T]): + def __init__(self, data: T): self.data = data - self.next = None + self.next: Node[T] | None = None - def __str__(self): + def __str__(self) -> str: return f"{self.data}" -class LinkedStack: +class LinkedStack(Generic[T]): """ Linked List Stack implementing push (to top), pop (from top) and is_empty @@ -44,15 +47,15 @@ class LinkedStack: """ def __init__(self) -> None: - self.top: Node | None = None + self.top: Node[T] | None = None - def __iter__(self): + def __iter__(self) -> Iterator[T]: node = self.top while node: yield node.data node = node.next - def __str__(self): + def __str__(self) -> str: """ >>> stack = LinkedStack() >>> stack.push("c") @@ -63,7 +66,7 @@ def __str__(self): """ return "->".join([str(item) for item in self]) - def __len__(self): + def __len__(self) -> int: """ >>> stack = LinkedStack() >>> len(stack) == 0 @@ -87,7 +90,7 @@ def is_empty(self) -> bool: """ return self.top is None - def push(self, item: Any) -> None: + def push(self, item: T) -> None: """ >>> stack = LinkedStack() >>> stack.push("Python") @@ -101,7 +104,7 @@ def push(self, item: Any) -> None: node.next = self.top self.top = node - def pop(self) -> Any: + def pop(self) -> T: """ >>> stack = LinkedStack() >>> stack.pop() @@ -125,7 +128,7 @@ def pop(self) -> Any: self.top = self.top.next return pop_node.data - def peek(self) -> Any: + def peek(self) -> T: """ >>> stack = LinkedStack() >>> stack.push("Java") From 99cf2cc1c5a83585625a26b6f267ac8f25affdc7 Mon Sep 17 00:00:00 2001 From: Rohan R Bharadwaj Date: Mon, 1 Nov 2021 03:26:33 +0530 Subject: [PATCH 0360/1543] Fix build issues due to count (#5725) * Fix build issues due to count * Update check_anagrams.py --- strings/check_anagrams.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/strings/check_anagrams.py b/strings/check_anagrams.py index 938bf4c2abee..f652e2294db2 100644 --- a/strings/check_anagrams.py +++ b/strings/check_anagrams.py @@ -2,6 +2,7 @@ wiki: https://en.wikipedia.org/wiki/Anagram """ from collections import defaultdict +from typing import DefaultDict def check_anagrams(first_str: str, second_str: str) -> bool: @@ -29,7 +30,7 @@ def check_anagrams(first_str: str, second_str: str) -> bool: return False # Default values for count should be 0 - count = defaultdict(int) + count: DefaultDict[str, int] = defaultdict(int) # For each character in input strings, # increment count in the corresponding From 06ab650e0823d7b58d1fc22a6f7cc0cca818f973 Mon Sep 17 00:00:00 2001 From: Tianyi Zheng Date: Mon, 1 Nov 2021 06:25:40 +0000 Subject: [PATCH 0361/1543] Merge maths/fibonacci.py and maths/fibonacci_sequence_recursion.py (#5738) * Rewrite parts of Vector and Matrix methods * Refactor determinant method and add unit tests Refactor determinant method to create separate minor and cofactor methods. Add respective unit tests for new methods. Rename methods using snake case to follow Python naming conventions. * Reorganize Vector and Matrix methods * Update linear_algebra/README.md Co-authored-by: John Law * Fix punctuation and wording * Apply suggestions from code review Co-authored-by: John Law * Deduplicate euclidean length method for Vector * Add more unit tests for Euclidean length method * Fix bug in unit test for euclidean_length * Remove old comments for magnitude method * Rewrite maths/fibonacci.py * Rewrite timer and add unit tests * Fix typos in fib_binet unit tests * Fix typos in fib_binet unit tests * Clean main method * Merge fibonacci.py and fibonacci_sequence_recursion.py * Fix fib_binet unit test Co-authored-by: John Law --- maths/fibonacci.py | 260 +++++++++++++------------- maths/fibonacci_sequence_recursion.py | 22 --- 2 files changed, 130 insertions(+), 152 deletions(-) delete mode 100644 maths/fibonacci_sequence_recursion.py diff --git a/maths/fibonacci.py b/maths/fibonacci.py index e6519035401e..b009ea9df38a 100644 --- a/maths/fibonacci.py +++ b/maths/fibonacci.py @@ -1,130 +1,130 @@ -# fibonacci.py -""" -1. Calculates the iterative fibonacci sequence - -2. Calculates the fibonacci sequence with a formula - an = [ Phin - (phi)n ]/Sqrt[5] - reference-->Su, Francis E., et al. "Fibonacci Number Formula." Math Fun Facts. - -""" -import functools -import math -import time -from decimal import Decimal, getcontext - -getcontext().prec = 100 - - -def timer_decorator(func): - @functools.wraps(func) - def timer_wrapper(*args, **kwargs): - start = time.time() - func(*args, **kwargs) - end = time.time() - if int(end - start) > 0: - print(f"Run time for {func.__name__}: {(end - start):0.2f}s") - else: - print(f"Run time for {func.__name__}: {(end - start)*1000:0.2f}ms") - return func(*args, **kwargs) - - return timer_wrapper - - -# define Python user-defined exceptions -class Error(Exception): - """Base class for other exceptions""" - - -class ValueTooLargeError(Error): - """Raised when the input value is too large""" - - -class ValueTooSmallError(Error): - """Raised when the input value is not greater than one""" - - -class ValueLessThanZero(Error): - """Raised when the input value is less than zero""" - - -def _check_number_input(n, min_thresh, max_thresh=None): - """ - :param n: single integer - :type n: int - :param min_thresh: min threshold, single integer - :type min_thresh: int - :param max_thresh: max threshold, single integer - :type max_thresh: int - :return: boolean - """ - try: - if n >= min_thresh and max_thresh is None: - return True - elif min_thresh <= n <= max_thresh: - return True - elif n < 0: - raise ValueLessThanZero - elif n < min_thresh: - raise ValueTooSmallError - elif n > max_thresh: - raise ValueTooLargeError - except ValueLessThanZero: - print("Incorrect Input: number must not be less than 0") - except ValueTooSmallError: - print( - f"Incorrect Input: input number must be > {min_thresh} for the recursive " - "calculation" - ) - except ValueTooLargeError: - print( - f"Incorrect Input: input number must be < {max_thresh} for the recursive " - "calculation" - ) - return False - - -@timer_decorator -def fib_iterative(n): - """ - :param n: calculate Fibonacci to the nth integer - :type n:int - :return: Fibonacci sequence as a list - """ - n = int(n) - if _check_number_input(n, 2): - seq_out = [0, 1] - a, b = 0, 1 - for _ in range(n - len(seq_out)): - a, b = b, a + b - seq_out.append(b) - return seq_out - - -@timer_decorator -def fib_formula(n): - """ - :param n: calculate Fibonacci to the nth integer - :type n:int - :return: Fibonacci sequence as a list - """ - seq_out = [0, 1] - n = int(n) - if _check_number_input(n, 2, 1000000): - sqrt = Decimal(math.sqrt(5)) - phi_1 = Decimal(1 + sqrt) / Decimal(2) - phi_2 = Decimal(1 - sqrt) / Decimal(2) - for i in range(2, n): - temp_out = ((phi_1 ** Decimal(i)) - (phi_2 ** Decimal(i))) * ( - Decimal(sqrt) ** Decimal(-1) - ) - seq_out.append(int(temp_out)) - return seq_out - - -if __name__ == "__main__": - num = 20 - # print(f'{fib_recursive(num)}\n') - # print(f'{fib_iterative(num)}\n') - # print(f'{fib_formula(num)}\n') - fib_iterative(num) - fib_formula(num) +# fibonacci.py +""" +Calculates the Fibonacci sequence using iteration, recursion, and a simplified +form of Binet's formula + +NOTE 1: the iterative and recursive functions are more accurate than the Binet's +formula function because the iterative function doesn't use floats + +NOTE 2: the Binet's formula function is much more limited in the size of inputs +that it can handle due to the size limitations of Python floats +""" + +from math import sqrt +from time import time + + +def time_func(func, *args, **kwargs): + """ + Times the execution of a function with parameters + """ + start = time() + output = func(*args, **kwargs) + end = time() + if int(end - start) > 0: + print(f"{func.__name__} runtime: {(end - start):0.4f} s") + else: + print(f"{func.__name__} runtime: {(end - start) * 1000:0.4f} ms") + return output + + +def fib_iterative(n: int) -> list[int]: + """ + Calculates the first n (0-indexed) Fibonacci numbers using iteration + >>> fib_iterative(0) + [0] + >>> fib_iterative(1) + [0, 1] + >>> fib_iterative(5) + [0, 1, 1, 2, 3, 5] + >>> fib_iterative(10) + [0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55] + >>> fib_iterative(-1) + Traceback (most recent call last): + ... + Exception: n is negative + """ + if n < 0: + raise Exception("n is negative") + if n == 0: + return [0] + fib = [0, 1] + for _ in range(n - 1): + fib.append(fib[-1] + fib[-2]) + return fib + + +def fib_recursive(n: int) -> list[int]: + """ + Calculates the first n (0-indexed) Fibonacci numbers using recursion + >>> fib_iterative(0) + [0] + >>> fib_iterative(1) + [0, 1] + >>> fib_iterative(5) + [0, 1, 1, 2, 3, 5] + >>> fib_iterative(10) + [0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55] + >>> fib_iterative(-1) + Traceback (most recent call last): + ... + Exception: n is negative + """ + + def fib_recursive_term(i: int) -> int: + """ + Calculates the i-th (0-indexed) Fibonacci number using recursion + """ + if i < 0: + raise Exception("n is negative") + if i < 2: + return i + return fib_recursive_term(i - 1) + fib_recursive_term(i - 2) + + if n < 0: + raise Exception("n is negative") + return [fib_recursive_term(i) for i in range(n + 1)] + + +def fib_binet(n: int) -> list[int]: + """ + Calculates the first n (0-indexed) Fibonacci numbers using a simplified form + of Binet's formula: + https://en.m.wikipedia.org/wiki/Fibonacci_number#Computation_by_rounding + + NOTE 1: this function diverges from fib_iterative at around n = 71, likely + due to compounding floating-point arithmetic errors + + NOTE 2: this function overflows on n >= 1475 because of the size limitations + of Python floats + >>> fib_binet(0) + [0] + >>> fib_binet(1) + [0, 1] + >>> fib_binet(5) + [0, 1, 1, 2, 3, 5] + >>> fib_binet(10) + [0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55] + >>> fib_binet(-1) + Traceback (most recent call last): + ... + Exception: n is negative + >>> fib_binet(1475) + Traceback (most recent call last): + ... + Exception: n is too large + """ + if n < 0: + raise Exception("n is negative") + if n >= 1475: + raise Exception("n is too large") + sqrt_5 = sqrt(5) + phi = (1 + sqrt_5) / 2 + return [round(phi ** i / sqrt_5) for i in range(n + 1)] + + +if __name__ == "__main__": + num = 20 + time_func(fib_iterative, num) + time_func(fib_recursive, num) + time_func(fib_binet, num) diff --git a/maths/fibonacci_sequence_recursion.py b/maths/fibonacci_sequence_recursion.py deleted file mode 100644 index 794b9fc0bd3a..000000000000 --- a/maths/fibonacci_sequence_recursion.py +++ /dev/null @@ -1,22 +0,0 @@ -# Fibonacci Sequence Using Recursion - - -def recur_fibo(n: int) -> int: - """ - >>> [recur_fibo(i) for i in range(12)] - [0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89] - """ - return n if n <= 1 else recur_fibo(n - 1) + recur_fibo(n - 2) - - -def main() -> None: - limit = int(input("How many terms to include in fibonacci series: ")) - if limit > 0: - print(f"The first {limit} terms of the fibonacci series are as follows:") - print([recur_fibo(n) for n in range(limit)]) - else: - print("Please enter a positive integer: ") - - -if __name__ == "__main__": - main() From 71ba3a1ad940bb42f773ac483da9d40b234ecb7f Mon Sep 17 00:00:00 2001 From: Maxim Smolskiy Date: Mon, 1 Nov 2021 09:27:19 +0300 Subject: [PATCH 0362/1543] Improve Project Euler problem 012 solution 1 (#5731) * Improve solution * Uncomment code that has been commented due to slow execution affecting Travis * Retest --- project_euler/problem_012/sol1.py | 24 +++++++++++++----------- 1 file changed, 13 insertions(+), 11 deletions(-) diff --git a/project_euler/problem_012/sol1.py b/project_euler/problem_012/sol1.py index 7e080c4e45a1..861d026ece5b 100644 --- a/project_euler/problem_012/sol1.py +++ b/project_euler/problem_012/sol1.py @@ -21,17 +21,20 @@ What is the value of the first triangle number to have over five hundred divisors? """ -from math import sqrt def count_divisors(n): - nDivisors = 0 - for i in range(1, int(sqrt(n)) + 1): - if n % i == 0: - nDivisors += 2 - # check if n is perfect square - if n ** 0.5 == int(n ** 0.5): - nDivisors -= 1 + nDivisors = 1 + i = 2 + while i * i <= n: + multiplicity = 0 + while n % i == 0: + n //= i + multiplicity += 1 + nDivisors *= multiplicity + 1 + i += 1 + if n > 1: + nDivisors *= 2 return nDivisors @@ -39,9 +42,8 @@ def solution(): """Returns the value of the first triangle number to have over five hundred divisors. - # The code below has been commented due to slow execution affecting Travis. - # >>> solution() - # 76576500 + >>> solution() + 76576500 """ tNum = 1 i = 1 From 68ca61ecb75be579f13c6783c2071cc3b063d21b Mon Sep 17 00:00:00 2001 From: Kelly Costa Date: Mon, 1 Nov 2021 10:36:18 -0300 Subject: [PATCH 0363/1543] Add search book via ISBN using openlibrary.org API (#5736) * Add search book via ISBN using openlibrary.org API * FIX: parameters type hints and isbn sizes * Add doctests * Update search_books_by_isbn.py Co-authored-by: Christian Clauss --- web_programming/search_books_by_isbn.py | 76 +++++++++++++++++++++++++ 1 file changed, 76 insertions(+) create mode 100644 web_programming/search_books_by_isbn.py diff --git a/web_programming/search_books_by_isbn.py b/web_programming/search_books_by_isbn.py new file mode 100644 index 000000000000..fcb8b0428b88 --- /dev/null +++ b/web_programming/search_books_by_isbn.py @@ -0,0 +1,76 @@ +""" +Get book and author data from https://openlibrary.org + +ISBN: https://en.wikipedia.org/wiki/International_Standard_Book_Number +""" +from json import JSONDecodeError # Workaround for requests.exceptions.JSONDecodeError + +import requests + + +def get_openlibrary_data(olid: str = "isbn/0140328726") -> dict: + """ + Given an 'isbn/0140328726', return book data from Open Library as a Python dict. + Given an '/authors/OL34184A', return authors data as a Python dict. + This code must work for olids with or without a leading slash ('/'). + + # Comment out doctests if they take too long or have results that may change + # >>> get_openlibrary_data(olid='isbn/0140328726') # doctest: +ELLIPSIS + {'publishers': ['Puffin'], 'number_of_pages': 96, 'isbn_10': ['0140328726'], ... + # >>> get_openlibrary_data(olid='/authors/OL7353617A') # doctest: +ELLIPSIS + {'name': 'Adrian Brisku', 'created': {'type': '/type/datetime', ... + >>> pass # Placate https://github.com/apps/algorithms-keeper + """ + new_olid = olid.strip().strip("/") # Remove leading/trailing whitespace & slashes + if new_olid.count("/") != 1: + raise ValueError(f"{olid} is not a valid Open Library olid") + return requests.get(f"https://openlibrary.org/{new_olid}.json").json() + + +def summerize_book(ol_book_data: dict) -> dict: + """ + Given Open Library book data, return a summary as a Python dict. + + >>> pass # Placate TheAlgorithms @ + """ + desired_keys = { + "title": "Title", + "publish_date": "Publish date", + "authors": "Authors", + "number_of_pages": "Number of pages:", + "first_sentence": "First sentence", + "isbn_10": "ISBN (10)", + "isbn_13": "ISBN (13)", + } + data = {better_key: ol_book_data[key] for key, better_key in desired_keys.items()} + data["Authors"] = [ + get_openlibrary_data(author["key"])["name"] for author in data["Authors"] + ] + data["First sentence"] = data["First sentence"]["value"] + for key, value in data.items(): + if isinstance(value, list): + data[key] = ", ".join(value) + return data + + +if __name__ == "__main__": + import doctest + + doctest.testmod() + + while True: + isbn = input("\nEnter the ISBN code to search (or 'quit' to stop): ").strip() + if isbn.lower() in ("", "q", "quit", "exit", "stop"): + break + + if len(isbn) not in (10, 13) or not isbn.isdigit(): + print(f"Sorry, {isbn} is not a valid ISBN. Please, input a valid ISBN.") + continue + + print(f"\nSearching Open Library for ISBN: {isbn}...\n") + + try: + book_summary = summerize_book(get_openlibrary_data(f"isbn/{isbn}")) + print("\n".join(f"{key}: {value}" for key, value in book_summary.items())) + except JSONDecodeError: # Workaround for requests.exceptions.RequestException: + print(f"Sorry, there are no results for ISBN: {isbn}.") From 84cca2119c5f493823cc65a3796fe37e4a9c643d Mon Sep 17 00:00:00 2001 From: Tianyi Zheng Date: Mon, 1 Nov 2021 17:06:35 +0000 Subject: [PATCH 0364/1543] Rewrite maths/fibonacci.py (#5734) * Rewrite parts of Vector and Matrix methods * Refactor determinant method and add unit tests Refactor determinant method to create separate minor and cofactor methods. Add respective unit tests for new methods. Rename methods using snake case to follow Python naming conventions. * Reorganize Vector and Matrix methods * Update linear_algebra/README.md Co-authored-by: John Law * Fix punctuation and wording * Apply suggestions from code review Co-authored-by: John Law * Deduplicate euclidean length method for Vector * Add more unit tests for Euclidean length method * Fix bug in unit test for euclidean_length * Remove old comments for magnitude method * Rewrite maths/fibonacci.py * Rewrite timer and add unit tests * Fix typos in fib_binet unit tests * Fix typos in fib_binet unit tests * Clean main method Co-authored-by: John Law --- maths/fibonacci.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/maths/fibonacci.py b/maths/fibonacci.py index b009ea9df38a..9b193b74a827 100644 --- a/maths/fibonacci.py +++ b/maths/fibonacci.py @@ -95,8 +95,8 @@ def fib_binet(n: int) -> list[int]: NOTE 1: this function diverges from fib_iterative at around n = 71, likely due to compounding floating-point arithmetic errors - NOTE 2: this function overflows on n >= 1475 because of the size limitations - of Python floats + NOTE 2: this function doesn't accept n >= 1475 because it overflows + thereafter due to the size limitations of Python floats >>> fib_binet(0) [0] >>> fib_binet(1) From 74f496712628e5bc77ae9e11572d4207066214ba Mon Sep 17 00:00:00 2001 From: Christian Clauss Date: Mon, 1 Nov 2021 18:07:47 +0100 Subject: [PATCH 0365/1543] Fix comment (#5742) * Fix comment * updating DIRECTORY.md Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- DIRECTORY.md | 11 +++++++++-- web_programming/search_books_by_isbn.py | 2 +- 2 files changed, 10 insertions(+), 3 deletions(-) diff --git a/DIRECTORY.md b/DIRECTORY.md index 140dc632c931..e70c0aab64a7 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -420,6 +420,8 @@ * [Knn Sklearn](https://github.com/TheAlgorithms/Python/blob/master/machine_learning/knn_sklearn.py) * [Linear Discriminant Analysis](https://github.com/TheAlgorithms/Python/blob/master/machine_learning/linear_discriminant_analysis.py) * [Linear Regression](https://github.com/TheAlgorithms/Python/blob/master/machine_learning/linear_regression.py) + * Local Weighted Learning + * [Local Weighted Learning](https://github.com/TheAlgorithms/Python/blob/master/machine_learning/local_weighted_learning/local_weighted_learning.py) * [Logistic Regression](https://github.com/TheAlgorithms/Python/blob/master/machine_learning/logistic_regression.py) * Lstm * [Lstm Prediction](https://github.com/TheAlgorithms/Python/blob/master/machine_learning/lstm/lstm_prediction.py) @@ -476,7 +478,6 @@ * [Factors](https://github.com/TheAlgorithms/Python/blob/master/maths/factors.py) * [Fermat Little Theorem](https://github.com/TheAlgorithms/Python/blob/master/maths/fermat_little_theorem.py) * [Fibonacci](https://github.com/TheAlgorithms/Python/blob/master/maths/fibonacci.py) - * [Fibonacci Sequence Recursion](https://github.com/TheAlgorithms/Python/blob/master/maths/fibonacci_sequence_recursion.py) * [Find Max](https://github.com/TheAlgorithms/Python/blob/master/maths/find_max.py) * [Find Max Recursion](https://github.com/TheAlgorithms/Python/blob/master/maths/find_max_recursion.py) * [Find Min](https://github.com/TheAlgorithms/Python/blob/master/maths/find_min.py) @@ -517,6 +518,7 @@ * [Perfect Number](https://github.com/TheAlgorithms/Python/blob/master/maths/perfect_number.py) * [Perfect Square](https://github.com/TheAlgorithms/Python/blob/master/maths/perfect_square.py) * [Pi Monte Carlo Estimation](https://github.com/TheAlgorithms/Python/blob/master/maths/pi_monte_carlo_estimation.py) + * [Pollard Rho](https://github.com/TheAlgorithms/Python/blob/master/maths/pollard_rho.py) * [Polynomial Evaluation](https://github.com/TheAlgorithms/Python/blob/master/maths/polynomial_evaluation.py) * [Power Using Recursion](https://github.com/TheAlgorithms/Python/blob/master/maths/power_using_recursion.py) * [Prime Check](https://github.com/TheAlgorithms/Python/blob/master/maths/prime_check.py) @@ -539,6 +541,7 @@ * [Geometric Series](https://github.com/TheAlgorithms/Python/blob/master/maths/series/geometric_series.py) * [Harmonic](https://github.com/TheAlgorithms/Python/blob/master/maths/series/harmonic.py) * [Harmonic Series](https://github.com/TheAlgorithms/Python/blob/master/maths/series/harmonic_series.py) + * [Hexagonal Numbers](https://github.com/TheAlgorithms/Python/blob/master/maths/series/hexagonal_numbers.py) * [P Series](https://github.com/TheAlgorithms/Python/blob/master/maths/series/p_series.py) * [Sieve Of Eratosthenes](https://github.com/TheAlgorithms/Python/blob/master/maths/sieve_of_eratosthenes.py) * [Sigmoid](https://github.com/TheAlgorithms/Python/blob/master/maths/sigmoid.py) @@ -583,8 +586,8 @@ ## Other * [Activity Selection](https://github.com/TheAlgorithms/Python/blob/master/other/activity_selection.py) + * [Alternative List Arrange](https://github.com/TheAlgorithms/Python/blob/master/other/alternative_list_arrange.py) * [Check Strong Password](https://github.com/TheAlgorithms/Python/blob/master/other/check_strong_password.py) - * [Date To Weekday](https://github.com/TheAlgorithms/Python/blob/master/other/date_to_weekday.py) * [Davisb Putnamb Logemannb Loveland](https://github.com/TheAlgorithms/Python/blob/master/other/davisb_putnamb_logemannb_loveland.py) * [Dijkstra Bankers Algorithm](https://github.com/TheAlgorithms/Python/blob/master/other/dijkstra_bankers_algorithm.py) * [Doomsday](https://github.com/TheAlgorithms/Python/blob/master/other/doomsday.py) @@ -861,6 +864,8 @@ * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_234/sol1.py) * Problem 301 * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_301/sol1.py) + * Problem 493 + * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_493/sol1.py) * Problem 551 * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_551/sol1.py) * Problem 686 @@ -960,6 +965,7 @@ * [Detecting English Programmatically](https://github.com/TheAlgorithms/Python/blob/master/strings/detecting_english_programmatically.py) * [Frequency Finder](https://github.com/TheAlgorithms/Python/blob/master/strings/frequency_finder.py) * [Indian Phone Validator](https://github.com/TheAlgorithms/Python/blob/master/strings/indian_phone_validator.py) + * [Is Contains Unique Chars](https://github.com/TheAlgorithms/Python/blob/master/strings/is_contains_unique_chars.py) * [Is Palindrome](https://github.com/TheAlgorithms/Python/blob/master/strings/is_palindrome.py) * [Jaro Winkler](https://github.com/TheAlgorithms/Python/blob/master/strings/jaro_winkler.py) * [Join](https://github.com/TheAlgorithms/Python/blob/master/strings/join.py) @@ -1008,6 +1014,7 @@ * [Nasa Data](https://github.com/TheAlgorithms/Python/blob/master/web_programming/nasa_data.py) * [Random Anime Character](https://github.com/TheAlgorithms/Python/blob/master/web_programming/random_anime_character.py) * [Recaptcha Verification](https://github.com/TheAlgorithms/Python/blob/master/web_programming/recaptcha_verification.py) + * [Search Books By Isbn](https://github.com/TheAlgorithms/Python/blob/master/web_programming/search_books_by_isbn.py) * [Slack Message](https://github.com/TheAlgorithms/Python/blob/master/web_programming/slack_message.py) * [Test Fetch Github Info](https://github.com/TheAlgorithms/Python/blob/master/web_programming/test_fetch_github_info.py) * [World Covid19 Stats](https://github.com/TheAlgorithms/Python/blob/master/web_programming/world_covid19_stats.py) diff --git a/web_programming/search_books_by_isbn.py b/web_programming/search_books_by_isbn.py index fcb8b0428b88..a55110f3f5fc 100644 --- a/web_programming/search_books_by_isbn.py +++ b/web_programming/search_books_by_isbn.py @@ -31,7 +31,7 @@ def summerize_book(ol_book_data: dict) -> dict: """ Given Open Library book data, return a summary as a Python dict. - >>> pass # Placate TheAlgorithms @ + >>> pass # Placate https://github.com/apps/algorithms-keeper """ desired_keys = { "title": "Title", From dc6e77338c9cac607e58c06b178a232643f3e87d Mon Sep 17 00:00:00 2001 From: Brian Evans <53117772+mrbrianevans@users.noreply.github.com> Date: Mon, 1 Nov 2021 23:09:40 +0000 Subject: [PATCH 0366/1543] Add stone unit of measuring weight (#5730) * Add stone unit of measuring weight And some tests in the docs using an external calculator. Not yet tested if they pass. * Fix rounding descrepencies in doctests to pass tests --- conversions/weight_conversion.py | 28 +++++++++++++++++++++++++++- 1 file changed, 27 insertions(+), 1 deletion(-) diff --git a/conversions/weight_conversion.py b/conversions/weight_conversion.py index c344416be5f5..18c4037317da 100644 --- a/conversions/weight_conversion.py +++ b/conversions/weight_conversion.py @@ -3,7 +3,7 @@ __author__ = "Anubhav Solanki" __license__ = "MIT" -__version__ = "1.0.0" +__version__ = "1.1.0" __maintainer__ = "Anubhav Solanki" __email__ = "anubhavsolanki0@gmail.com" @@ -27,6 +27,7 @@ -> Wikipedia reference: https://en.wikipedia.org/wiki/Ounce -> Wikipedia reference: https://en.wikipedia.org/wiki/Fineness#Karat -> Wikipedia reference: https://en.wikipedia.org/wiki/Dalton_(unit) +-> Wikipedia reference: https://en.wikipedia.org/wiki/Stone_(unit) """ KILOGRAM_CHART: dict[str, float] = { @@ -37,6 +38,7 @@ "long-ton": 0.0009842073, "short-ton": 0.0011023122, "pound": 2.2046244202, + "stone": 0.1574731728, "ounce": 35.273990723, "carrat": 5000, "atomic-mass-unit": 6.022136652e26, @@ -50,6 +52,7 @@ "long-ton": 1016.04608, "short-ton": 907.184, "pound": 0.453592, + "stone": 6.35029, "ounce": 0.0283495, "carrat": 0.0002, "atomic-mass-unit": 1.660540199e-27, @@ -67,6 +70,7 @@ def weight_conversion(from_type: str, to_type: str, value: float) -> float: "long-ton" : 0.0009842073, "short-ton" : 0.0011023122, "pound" : 2.2046244202, + "stone": 0.1574731728, "ounce" : 35.273990723, "carrat" : 5000, "atomic-mass-unit" : 6.022136652E+26 @@ -85,6 +89,8 @@ def weight_conversion(from_type: str, to_type: str, value: float) -> float: 0.0011023122 >>> weight_conversion("kilogram","pound",4) 8.8184976808 + >>> weight_conversion("kilogram","stone",5) + 0.7873658640000001 >>> weight_conversion("kilogram","ounce",4) 141.095962892 >>> weight_conversion("kilogram","carrat",3) @@ -105,6 +111,8 @@ def weight_conversion(from_type: str, to_type: str, value: float) -> float: 3.3069366000000003e-06 >>> weight_conversion("gram","pound",3) 0.0066138732606 + >>> weight_conversion("gram","stone",4) + 0.0006298926912000001 >>> weight_conversion("gram","ounce",1) 0.035273990723 >>> weight_conversion("gram","carrat",2) @@ -211,6 +219,24 @@ def weight_conversion(from_type: str, to_type: str, value: float) -> float: 2267.96 >>> weight_conversion("pound","atomic-mass-unit",4) 1.0926372033015936e+27 + >>> weight_conversion("stone","kilogram",5) + 31.751450000000002 + >>> weight_conversion("stone","gram",2) + 12700.58 + >>> weight_conversion("stone","milligram",3) + 19050870.0 + >>> weight_conversion("stone","metric-ton",3) + 0.01905087 + >>> weight_conversion("stone","long-ton",3) + 0.018750005325351003 + >>> weight_conversion("stone","short-ton",3) + 0.021000006421614002 + >>> weight_conversion("stone","pound",2) + 28.00000881870372 + >>> weight_conversion("stone","ounce",1) + 224.00007054835967 + >>> weight_conversion("stone","carrat",2) + 63502.9 >>> weight_conversion("ounce","kilogram",3) 0.0850485 >>> weight_conversion("ounce","gram",3) From 5910c3aa78e6fa53ef425ab9efa43348cc421e6c Mon Sep 17 00:00:00 2001 From: Rohan R Bharadwaj Date: Tue, 2 Nov 2021 14:50:55 +0530 Subject: [PATCH 0367/1543] Typo (#5750) --- web_programming/search_books_by_isbn.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/web_programming/search_books_by_isbn.py b/web_programming/search_books_by_isbn.py index a55110f3f5fc..22a31dcb1db4 100644 --- a/web_programming/search_books_by_isbn.py +++ b/web_programming/search_books_by_isbn.py @@ -27,7 +27,7 @@ def get_openlibrary_data(olid: str = "isbn/0140328726") -> dict: return requests.get(f"https://openlibrary.org/{new_olid}.json").json() -def summerize_book(ol_book_data: dict) -> dict: +def summarize_book(ol_book_data: dict) -> dict: """ Given Open Library book data, return a summary as a Python dict. @@ -70,7 +70,7 @@ def summerize_book(ol_book_data: dict) -> dict: print(f"\nSearching Open Library for ISBN: {isbn}...\n") try: - book_summary = summerize_book(get_openlibrary_data(f"isbn/{isbn}")) + book_summary = summarize_book(get_openlibrary_data(f"isbn/{isbn}")) print("\n".join(f"{key}: {value}" for key, value in book_summary.items())) except JSONDecodeError: # Workaround for requests.exceptions.RequestException: print(f"Sorry, there are no results for ISBN: {isbn}.") From 424c2008473b00a8d3f31a9ad043526d95c31e69 Mon Sep 17 00:00:00 2001 From: Mozartus <32893711+Mozartuss@users.noreply.github.com> Date: Tue, 2 Nov 2021 11:06:39 +0100 Subject: [PATCH 0368/1543] Add gabor filter (#5289) * add gabor_filter.py * Update gabor_filter.py * update gabor_filter.py * add doctest * change import order * Update digital_image_processing/filters/gabor_filter.py Co-authored-by: John Law * Update gabor_filter.py * fix gabor filter calculation Co-authored-by: John Law --- .../filters/gabor_filter.py | 85 +++++++++++++++++++ 1 file changed, 85 insertions(+) create mode 100644 digital_image_processing/filters/gabor_filter.py diff --git a/digital_image_processing/filters/gabor_filter.py b/digital_image_processing/filters/gabor_filter.py new file mode 100644 index 000000000000..90aa049c24a0 --- /dev/null +++ b/digital_image_processing/filters/gabor_filter.py @@ -0,0 +1,85 @@ +# Implementation of the Gaborfilter +# https://en.wikipedia.org/wiki/Gabor_filter +import numpy as np +from cv2 import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filter2D, imread, imshow, waitKey + + +def gabor_filter_kernel( + ksize: int, sigma: int, theta: int, lambd: int, gamma: int, psi: int +) -> np.ndarray: + """ + :param ksize: The kernelsize of the convolutional filter (ksize x ksize) + :param sigma: standard deviation of the gaussian bell curve + :param theta: The orientation of the normal to the parallel stripes + of Gabor function. + :param lambd: Wavelength of the sinusoidal component. + :param gamma: The spatial aspect ratio and specifies the ellipticity + of the support of Gabor function. + :param psi: The phase offset of the sinusoidal function. + + >>> gabor_filter_kernel(3, 8, 0, 10, 0, 0).tolist() + [[0.8027212023735046, 1.0, 0.8027212023735046], [0.8027212023735046, 1.0, \ +0.8027212023735046], [0.8027212023735046, 1.0, 0.8027212023735046]] + + """ + + # prepare kernel + # the kernel size have to be odd + if (ksize % 2) == 0: + ksize = ksize + 1 + gabor = np.zeros((ksize, ksize), dtype=np.float32) + + # each value + for y in range(ksize): + for x in range(ksize): + # distance from center + px = x - ksize // 2 + py = y - ksize // 2 + + # degree to radiant + _theta = theta / 180 * np.pi + cos_theta = np.cos(_theta) + sin_theta = np.sin(_theta) + + # get kernel x + _x = cos_theta * px + sin_theta * py + + # get kernel y + _y = -sin_theta * px + cos_theta * py + + # fill kernel + gabor[y, x] = np.exp( + -(_x ** 2 + gamma ** 2 * _y ** 2) / (2 * sigma ** 2) + ) * np.cos(2 * np.pi * _x / lambd + psi) + + return gabor + + +if __name__ == "__main__": + import doctest + + doctest.testmod() + # read original image + img = imread("../image_data/lena.jpg") + # turn image in gray scale value + gray = cvtColor(img, COLOR_BGR2GRAY) + + # Apply multiple Kernel to detect edges + out = np.zeros(gray.shape[:2]) + for theta in [0, 30, 60, 90, 120, 150]: + """ + ksize = 10 + sigma = 8 + lambd = 10 + gamma = 0 + psi = 0 + """ + kernel_10 = gabor_filter_kernel(10, 8, theta, 10, 0, 0) + out += filter2D(gray, CV_8UC3, kernel_10) + out = out / out.max() * 255 + out = out.astype(np.uint8) + + imshow("Original", gray) + imshow("Gabor filter with 20x20 mask and 6 directions", out) + + waitKey(0) From 3c8fec1316aade09134255dd101060b4ec036241 Mon Sep 17 00:00:00 2001 From: Matthew Wisdom Date: Tue, 2 Nov 2021 03:07:36 -0700 Subject: [PATCH 0369/1543] Add Neville's algorithm for polynomial interpolation (#5447) * Added nevilles algorithm for polynomial interpolation * Added type hinting for neville_interpolate function arguments. * Added more descriptive names * Update nevilles_method.py * Fixed some linting issues * Fixed type hinting error * Fixed nevilles_method.py * Add ellipsis for doctest spanning multiple lines * Update nevilles_method.py Co-authored-by: John Law --- maths/nevilles_method.py | 56 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 56 insertions(+) create mode 100644 maths/nevilles_method.py diff --git a/maths/nevilles_method.py b/maths/nevilles_method.py new file mode 100644 index 000000000000..5583e4269b32 --- /dev/null +++ b/maths/nevilles_method.py @@ -0,0 +1,56 @@ +""" + Python program to show how to interpolate and evaluate a polynomial + using Neville's method. + Neville’s method evaluates a polynomial that passes through a + given set of x and y points for a particular x value (x0) using the + Newton polynomial form. + Reference: + https://rpubs.com/aaronsc32/nevilles-method-polynomial-interpolation +""" + + +def neville_interpolate(x_points: list, y_points: list, x0: int) -> list: + """ + Interpolate and evaluate a polynomial using Neville's method. + Arguments: + x_points, y_points: Iterables of x and corresponding y points through + which the polynomial passes. + x0: The value of x to evaluate the polynomial for. + Return Value: A list of the approximated value and the Neville iterations + table respectively. + >>> import pprint + >>> neville_interpolate((1,2,3,4,6), (6,7,8,9,11), 5)[0] + 10.0 + >>> pprint.pprint(neville_interpolate((1,2,3,4,6), (6,7,8,9,11), 99)[1]) + [[0, 6, 0, 0, 0], + [0, 7, 0, 0, 0], + [0, 8, 104.0, 0, 0], + [0, 9, 104.0, 104.0, 0], + [0, 11, 104.0, 104.0, 104.0]] + >>> neville_interpolate((1,2,3,4,6), (6,7,8,9,11), 99)[0] + 104.0 + >>> neville_interpolate((1,2,3,4,6), (6,7,8,9,11), '') + Traceback (most recent call last): + File "", line 1, in + ... + TypeError: unsupported operand type(s) for -: 'str' and 'int' + """ + n = len(x_points) + q = [[0] * n for i in range(n)] + for i in range(n): + q[i][1] = y_points[i] + + for i in range(2, n): + for j in range(i, n): + q[j][i] = ( + (x0 - x_points[j - i + 1]) * q[j][i - 1] + - (x0 - x_points[j]) * q[j - 1][i - 1] + ) / (x_points[j] - x_points[j - i + 1]) + + return [q[n - 1][n - 1], q] + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From 24731b078c0e30d0f9bdd4ed96749f33574860c6 Mon Sep 17 00:00:00 2001 From: Dylan Buchi Date: Tue, 2 Nov 2021 07:09:46 -0300 Subject: [PATCH 0370/1543] [mypy] fix type annotations in `data_structures/queue/circular_queue_linked_list.py` (#5749) * [mypy] fix type annotations in circular_queue_linked_list * Remove 10 blank lines Co-authored-by: Christian Clauss --- .../queue/circular_queue_linked_list.py | 37 ++++++++++++------- 1 file changed, 24 insertions(+), 13 deletions(-) diff --git a/data_structures/queue/circular_queue_linked_list.py b/data_structures/queue/circular_queue_linked_list.py index 1878403bd2ef..e8c2b8bffc06 100644 --- a/data_structures/queue/circular_queue_linked_list.py +++ b/data_structures/queue/circular_queue_linked_list.py @@ -1,6 +1,8 @@ # Implementation of Circular Queue using linked lists # https://en.wikipedia.org/wiki/Circular_buffer +from __future__ import annotations + from typing import Any @@ -18,8 +20,8 @@ class CircularQueueLinkedList: """ def __init__(self, initial_capacity: int = 6) -> None: - self.front = None - self.rear = None + self.front: Node | None = None + self.rear: Node | None = None self.create_linked_list(initial_capacity) def create_linked_list(self, initial_capacity: int) -> None: @@ -27,7 +29,7 @@ def create_linked_list(self, initial_capacity: int) -> None: self.front = current_node self.rear = current_node previous_node = current_node - for i in range(1, initial_capacity): + for _ in range(1, initial_capacity): current_node = Node() previous_node.next = current_node current_node.prev = previous_node @@ -49,9 +51,14 @@ def is_empty(self) -> bool: >>> cq.is_empty() True """ - return self.front == self.rear and self.front.data is None - def first(self) -> Any: + return ( + self.front == self.rear + and self.front is not None + and self.front.data is None + ) + + def first(self) -> Any | None: """ Returns the first element of the queue >>> cq = CircularQueueLinkedList() @@ -74,7 +81,7 @@ def first(self) -> Any: 'b' """ self.check_can_perform_operation() - return self.front.data + return self.front.data if self.front else None def enqueue(self, data: Any) -> None: """ @@ -92,11 +99,13 @@ def enqueue(self, data: Any) -> None: ... Exception: Empty Queue """ + if self.rear is None: + return + self.check_is_full() - if self.is_empty(): - self.rear.data = data - else: + if not self.is_empty(): self.rear = self.rear.next + if self.rear: self.rear.data = data def dequeue(self) -> Any: @@ -117,6 +126,8 @@ def dequeue(self) -> Any: Exception: Empty Queue """ self.check_can_perform_operation() + if self.rear is None or self.front is None: + return if self.front == self.rear: data = self.front.data self.front.data = None @@ -133,15 +144,15 @@ def check_can_perform_operation(self) -> None: raise Exception("Empty Queue") def check_is_full(self) -> None: - if self.rear.next == self.front: + if self.rear and self.rear.next == self.front: raise Exception("Full Queue") class Node: def __init__(self) -> None: - self.data = None - self.next = None - self.prev = None + self.data: Any | None = None + self.next: Node | None = None + self.prev: Node | None = None if __name__ == "__main__": From bdd135d40343daf047a00abc9a7360db192793d9 Mon Sep 17 00:00:00 2001 From: Rohan R Bharadwaj Date: Tue, 2 Nov 2021 15:40:25 +0530 Subject: [PATCH 0371/1543] Split base85.py into functions, Add doctests (#5746) * Update base16.py * Rename base64_encoding.py to base64.py * Split into functions, Add doctests * Update base16.py --- ciphers/base16.py | 16 +++++------ ciphers/{base64_encoding.py => base64.py} | 0 ciphers/base85.py | 34 ++++++++++++++++++----- 3 files changed, 35 insertions(+), 15 deletions(-) rename ciphers/{base64_encoding.py => base64.py} (100%) diff --git a/ciphers/base16.py b/ciphers/base16.py index 1ef60868dc3f..a149a6d8c5bf 100644 --- a/ciphers/base16.py +++ b/ciphers/base16.py @@ -1,30 +1,30 @@ import base64 -def encode_to_b16(inp: str) -> bytes: +def base16_encode(inp: str) -> bytes: """ Encodes a given utf-8 string into base-16. - >>> encode_to_b16('Hello World!') + >>> base16_encode('Hello World!') b'48656C6C6F20576F726C6421' - >>> encode_to_b16('HELLO WORLD!') + >>> base16_encode('HELLO WORLD!') b'48454C4C4F20574F524C4421' - >>> encode_to_b16('') + >>> base16_encode('') b'' """ # encode the input into a bytes-like object and then encode b16encode that return base64.b16encode(inp.encode("utf-8")) -def decode_from_b16(b16encoded: bytes) -> str: +def base16_decode(b16encoded: bytes) -> str: """ Decodes from base-16 to a utf-8 string. - >>> decode_from_b16(b'48656C6C6F20576F726C6421') + >>> base16_decode(b'48656C6C6F20576F726C6421') 'Hello World!' - >>> decode_from_b16(b'48454C4C4F20574F524C4421') + >>> base16_decode(b'48454C4C4F20574F524C4421') 'HELLO WORLD!' - >>> decode_from_b16(b'') + >>> base16_decode(b'') '' """ # b16decode the input into bytes and decode that into a human readable string diff --git a/ciphers/base64_encoding.py b/ciphers/base64.py similarity index 100% rename from ciphers/base64_encoding.py rename to ciphers/base64.py diff --git a/ciphers/base85.py b/ciphers/base85.py index 9740299b9771..afd1aff79d11 100644 --- a/ciphers/base85.py +++ b/ciphers/base85.py @@ -1,13 +1,33 @@ import base64 -def main() -> None: - inp = input("->") - encoded = inp.encode("utf-8") # encoded the input (we need a bytes like object) - a85encoded = base64.a85encode(encoded) # a85encoded the encoded string - print(a85encoded) - print(base64.a85decode(a85encoded).decode("utf-8")) # decoded it +def base85_encode(string: str) -> bytes: + """ + >>> base85_encode("") + b'' + >>> base85_encode("12345") + b'0etOA2#' + >>> base85_encode("base 85") + b'@UX=h+?24' + """ + # encoded the input to a bytes-like object and then a85encode that + return base64.a85encode(string.encode("utf-8")) + + +def base85_decode(a85encoded: bytes) -> str: + """ + >>> base85_decode(b"") + '' + >>> base85_decode(b"0etOA2#") + '12345' + >>> base85_decode(b"@UX=h+?24") + 'base 85' + """ + # a85decode the input into bytes and decode that into a human readable string + return base64.a85decode(a85encoded).decode("utf-8") if __name__ == "__main__": - main() + import doctest + + doctest.testmod() From 0124b73484aeb6a35b036842f382f7445fcec258 Mon Sep 17 00:00:00 2001 From: krishchopra02 <77331421+krishchopra02@users.noreply.github.com> Date: Tue, 2 Nov 2021 15:43:49 +0530 Subject: [PATCH 0372/1543] Add a gray_code_sequence.py file to the bit_manipulation folder (#5038) * Added a gray_code_sequence.py file to the bit_manipulation folder * Added a descriptive name for variable n changing it to bit count * Update gray_code_sequence.py Co-authored-by: krishchopra02 Co-authored-by: John Law --- bit_manipulation/gray_code_sequence.py | 94 ++++++++++++++++++++++++++ 1 file changed, 94 insertions(+) create mode 100644 bit_manipulation/gray_code_sequence.py diff --git a/bit_manipulation/gray_code_sequence.py b/bit_manipulation/gray_code_sequence.py new file mode 100644 index 000000000000..636578d89754 --- /dev/null +++ b/bit_manipulation/gray_code_sequence.py @@ -0,0 +1,94 @@ +def gray_code(bit_count: int) -> list: + """ + Takes in an integer n and returns a n-bit + gray code sequence + An n-bit gray code sequence is a sequence of 2^n + integers where: + + a) Every integer is between [0,2^n -1] inclusive + b) The sequence begins with 0 + c) An integer appears at most one times in the sequence + d)The binary representation of every pair of integers differ + by exactly one bit + e) The binary representation of first and last bit also + differ by exactly one bit + + >>> gray_code(2) + [0, 1, 3, 2] + + >>> gray_code(1) + [0, 1] + + >>> gray_code(3) + [0, 1, 3, 2, 6, 7, 5, 4] + + >>> gray_code(-1) + Traceback (most recent call last): + ... + ValueError: The given input must be positive + + >>> gray_code(10.6) + Traceback (most recent call last): + ... + TypeError: unsupported operand type(s) for <<: 'int' and 'float' + """ + + # bit count represents no. of bits in the gray code + if bit_count < 0: + raise ValueError("The given input must be positive") + + # get the generated string sequence + sequence = gray_code_sequence_string(bit_count) + # + # convert them to integers + for i in range(len(sequence)): + sequence[i] = int(sequence[i], 2) + + return sequence + + +def gray_code_sequence_string(bit_count: int) -> list: + """ + Will output the n-bit grey sequence as a + string of bits + + >>> gray_code_sequence_string(2) + ['00', '01', '11', '10'] + + >>> gray_code_sequence_string(1) + ['0', '1'] + """ + + # The approach is a recursive one + # Base case achieved when either n = 0 or n=1 + if bit_count == 0: + return ["0"] + + if bit_count == 1: + return ["0", "1"] + + seq_len = 1 << bit_count # defines the length of the sequence + # 1<< n is equivalent to 2^n + + # recursive answer will generate answer for n-1 bits + smaller_sequence = gray_code_sequence_string(bit_count - 1) + + sequence = [] + + # append 0 to first half of the smaller sequence generated + for i in range(seq_len // 2): + generated_no = "0" + smaller_sequence[i] + sequence.append(generated_no) + + # append 1 to second half ... start from the end of the list + for i in reversed(range(seq_len // 2)): + generated_no = "1" + smaller_sequence[i] + sequence.append(generated_no) + + return sequence + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From dd19d8120df2eeba8f3173f952187be4e7656144 Mon Sep 17 00:00:00 2001 From: Maxim Smolskiy Date: Tue, 2 Nov 2021 16:07:07 +0300 Subject: [PATCH 0373/1543] Uncomment code that has been commented due to slow execution affecting Travis (#5745) --- project_euler/problem_009/sol3.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/project_euler/problem_009/sol3.py b/project_euler/problem_009/sol3.py index 03aed4b70761..d299f821d4f6 100644 --- a/project_euler/problem_009/sol3.py +++ b/project_euler/problem_009/sol3.py @@ -24,9 +24,8 @@ def solution() -> int: 1. a**2 + b**2 = c**2 2. a + b + c = 1000 - # The code below has been commented due to slow execution affecting Travis. - # >>> solution() - # 31875000 + >>> solution() + 31875000 """ return [ From 60ad32920d92a6095b28aa6952a759b40e5759c7 Mon Sep 17 00:00:00 2001 From: Leoriem-code <73761711+Leoriem-code@users.noreply.github.com> Date: Tue, 2 Nov 2021 22:17:57 +0100 Subject: [PATCH 0374/1543] fixed typo for codespell (#5753) --- project_euler/problem_045/sol1.py | 2 +- web_programming/get_user_tweets.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/project_euler/problem_045/sol1.py b/project_euler/problem_045/sol1.py index cb30a4d97339..cdf5c14cf362 100644 --- a/project_euler/problem_045/sol1.py +++ b/project_euler/problem_045/sol1.py @@ -43,7 +43,7 @@ def is_pentagonal(n: int) -> bool: def solution(start: int = 144) -> int: """ - Returns the next number which is traingular, pentagonal and hexagonal. + Returns the next number which is triangular, pentagonal and hexagonal. >>> solution(144) 1533776805 """ diff --git a/web_programming/get_user_tweets.py b/web_programming/get_user_tweets.py index 0f70201dc311..28cf85541dc4 100644 --- a/web_programming/get_user_tweets.py +++ b/web_programming/get_user_tweets.py @@ -32,7 +32,7 @@ def get_all_tweets(screen_name: str) -> None: while len(new_tweets) > 0: print(f"getting tweets before {oldest}") - # all subsiquent requests use the max_id param to prevent duplicates + # all subsequent requests use the max_id param to prevent duplicates new_tweets = api.user_timeline( screen_name=screen_name, count=200, max_id=oldest ) From 37bc6bdebf159d395b559dd7094934a337d59c8a Mon Sep 17 00:00:00 2001 From: Maxim Smolskiy Date: Wed, 3 Nov 2021 00:28:09 +0300 Subject: [PATCH 0375/1543] Replace Travis CI mentions with GitHub actions (#5751) --- CONTRIBUTING.md | 4 ++-- project_euler/README.md | 4 ++-- project_euler/problem_012/sol2.py | 5 ++--- 3 files changed, 6 insertions(+), 7 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 4df60ed3f296..c9525aa4080e 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -23,7 +23,7 @@ __Improving comments__ and __writing proper tests__ are also highly welcome. We appreciate any contribution, from fixing a grammar mistake in a comment to implementing complex algorithms. Please read this section if you are contributing your work. -Your contribution will be tested by our [automated testing on Travis CI](https://travis-ci.org/TheAlgorithms/Python/pull_requests) to save time and mental energy. After you have submitted your pull request, you should see the Travis tests start to run at the bottom of your submission page. If those tests fail, then click on the ___details___ button try to read through the Travis output to understand the failure. If you do not understand, please leave a comment on your submission page and a community member will try to help. +Your contribution will be tested by our [automated testing on GitHub Actions](https://github.com/TheAlgorithms/Python/actions) to save time and mental energy. After you have submitted your pull request, you should see the GitHub Actions tests start to run at the bottom of your submission page. If those tests fail, then click on the ___details___ button try to read through the GitHub Actions output to understand the failure. If you do not understand, please leave a comment on your submission page and a community member will try to help. Please help us keep our issue list small by adding fixes: #{$ISSUE_NO} to the commit message of pull requests that resolve open issues. GitHub will use this tag to auto-close the issue when the PR is merged. @@ -170,7 +170,7 @@ We want your work to be readable by others; therefore, we encourage you to note - If possible, follow the standard *within* the folder you are submitting to. - If you have modified/added code work, make sure the code compiles before submitting. - If you have modified/added documentation work, ensure your language is concise and contains no grammar errors. -- Do not update the README.md or DIRECTORY.md file which will be periodically autogenerated by our Travis CI processes. +- Do not update the README.md or DIRECTORY.md file which will be periodically autogenerated by our GitHub Actions processes. - Add a corresponding explanation to [Algorithms-Explanation](https://github.com/TheAlgorithms/Algorithms-Explanation) (Optional but recommended). - All submissions will be tested with [__mypy__](http://www.mypy-lang.org) so we encourage you to add [__Python type hints__](https://docs.python.org/3/library/typing.html) where it makes sense to do so. diff --git a/project_euler/README.md b/project_euler/README.md index c4c0a854472f..e3dc035eee5e 100644 --- a/project_euler/README.md +++ b/project_euler/README.md @@ -5,7 +5,7 @@ Problems are taken from https://projecteuler.net/, the Project Euler. [Problems Project Euler is a series of challenging mathematical/computer programming problems that require more than just mathematical insights to solve. Project Euler is ideal for mathematicians who are learning to code. -The solutions will be checked by our [automated testing on Travis CI](https://travis-ci.com/github/TheAlgorithms/Python/pull_requests) with the help of [this script](https://github.com/TheAlgorithms/Python/blob/master/scripts/validate_solutions.py). The efficiency of your code is also checked. You can view the top 10 slowest solutions on Travis CI logs (under `slowest 10 durations`) and open a pull request to improve those solutions. +The solutions will be checked by our [automated testing on GitHub Actions](https://github.com/TheAlgorithms/Python/actions) with the help of [this script](https://github.com/TheAlgorithms/Python/blob/master/scripts/validate_solutions.py). The efficiency of your code is also checked. You can view the top 10 slowest solutions on GitHub Actions logs (under `slowest 10 durations`) and open a pull request to improve those solutions. ## Solution Guidelines @@ -28,7 +28,7 @@ Welcome to [TheAlgorithms/Python](https://github.com/TheAlgorithms/Python)! Befo * When the `solution` function is called without any arguments like so: `solution()`, it should return the answer to the problem. * Every function, which includes all the helper functions, if any, and the main solution function, should have `doctest` in the function docstring along with a brief statement mentioning what the function is about. - * There should not be a `doctest` for testing the answer as that is done by our Travis CI build using this [script](https://github.com/TheAlgorithms/Python/blob/master/scripts/validate_solutions.py). Keeping in mind the above example of [Problem 1](https://projecteuler.net/problem=1): + * There should not be a `doctest` for testing the answer as that is done by our GitHub Actions build using this [script](https://github.com/TheAlgorithms/Python/blob/master/scripts/validate_solutions.py). Keeping in mind the above example of [Problem 1](https://projecteuler.net/problem=1): ```python def solution(limit: int = 1000): diff --git a/project_euler/problem_012/sol2.py b/project_euler/problem_012/sol2.py index 7578caa98938..1cc79fc4cd75 100644 --- a/project_euler/problem_012/sol2.py +++ b/project_euler/problem_012/sol2.py @@ -36,9 +36,8 @@ def solution(): """Returns the value of the first triangle number to have over five hundred divisors. - # The code below has been commented due to slow execution affecting Travis. - # >>> solution() - # 76576500 + >>> solution() + 76576500 """ return next(i for i in triangle_number_generator() if count_divisors(i) > 500) From 85ee27687aee9723153bd2f32ed902fac7297757 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?V=C9=AA=C9=B4=E1=B4=80=CA=8F=E1=B4=80=E1=B4=8B=20P?= =?UTF-8?q?=E1=B4=80=C9=B4=E1=B4=85=E1=B4=87=CA=8F?= <87496159+Harpia-Vieillot@users.noreply.github.com> Date: Wed, 3 Nov 2021 16:02:17 +0530 Subject: [PATCH 0376/1543] Add Hexagonal Numbers in directory (#5696) Yesterday hexagonal_numbers.py was created. Added that file in this list(maths/series/hexagonal_numbers.py) From 0ea5c734e13e40fcefdea336bc5f735536f133a0 Mon Sep 17 00:00:00 2001 From: Souvik Ghosh <42302494+SouvikGhosh05@users.noreply.github.com> Date: Thu, 4 Nov 2021 01:54:50 +0530 Subject: [PATCH 0377/1543] sock_merchant.py: Matching socks by color (#5761) * Python file for finding number of pairs * updating DIRECTORY.md * fixed iterative_pair.py * further fixed with type casting * fixed naming conventions * further fixed with naming convention * documented done * build issue fixed * updating DIRECTORY.md * Revert "documented done" This reverts commit 3be15ca374f3ea3f01f725912dba59b939b058b5. * Update canny.py * Update test_digital_image_processing.py * Update sobel_filter.py * requirements.txt fixed * keras<2.7.0 * Update sock_merchant.py * doctest with black fixed * Update sock_merchant.py Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> Co-authored-by: Christian Clauss --- DIRECTORY.md | 6 +++++- maths/sock_merchant.py | 20 ++++++++++++++++++++ requirements.txt | 2 +- 3 files changed, 26 insertions(+), 2 deletions(-) create mode 100644 maths/sock_merchant.py diff --git a/DIRECTORY.md b/DIRECTORY.md index e70c0aab64a7..fd164c92e11c 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -39,6 +39,7 @@ * [Binary Xor Operator](https://github.com/TheAlgorithms/Python/blob/master/bit_manipulation/binary_xor_operator.py) * [Count 1S Brian Kernighan Method](https://github.com/TheAlgorithms/Python/blob/master/bit_manipulation/count_1s_brian_kernighan_method.py) * [Count Number Of One Bits](https://github.com/TheAlgorithms/Python/blob/master/bit_manipulation/count_number_of_one_bits.py) + * [Gray Code Sequence](https://github.com/TheAlgorithms/Python/blob/master/bit_manipulation/gray_code_sequence.py) * [Reverse Bits](https://github.com/TheAlgorithms/Python/blob/master/bit_manipulation/reverse_bits.py) * [Single Bit Manipulation Operations](https://github.com/TheAlgorithms/Python/blob/master/bit_manipulation/single_bit_manipulation_operations.py) @@ -63,7 +64,7 @@ * [Baconian Cipher](https://github.com/TheAlgorithms/Python/blob/master/ciphers/baconian_cipher.py) * [Base16](https://github.com/TheAlgorithms/Python/blob/master/ciphers/base16.py) * [Base32](https://github.com/TheAlgorithms/Python/blob/master/ciphers/base32.py) - * [Base64 Encoding](https://github.com/TheAlgorithms/Python/blob/master/ciphers/base64_encoding.py) + * [Base64](https://github.com/TheAlgorithms/Python/blob/master/ciphers/base64.py) * [Base85](https://github.com/TheAlgorithms/Python/blob/master/ciphers/base85.py) * [Beaufort Cipher](https://github.com/TheAlgorithms/Python/blob/master/ciphers/beaufort_cipher.py) * [Bifid](https://github.com/TheAlgorithms/Python/blob/master/ciphers/bifid.py) @@ -219,6 +220,7 @@ * Filters * [Bilateral Filter](https://github.com/TheAlgorithms/Python/blob/master/digital_image_processing/filters/bilateral_filter.py) * [Convolve](https://github.com/TheAlgorithms/Python/blob/master/digital_image_processing/filters/convolve.py) + * [Gabor Filter](https://github.com/TheAlgorithms/Python/blob/master/digital_image_processing/filters/gabor_filter.py) * [Gaussian Filter](https://github.com/TheAlgorithms/Python/blob/master/digital_image_processing/filters/gaussian_filter.py) * [Median Filter](https://github.com/TheAlgorithms/Python/blob/master/digital_image_processing/filters/median_filter.py) * [Sobel Filter](https://github.com/TheAlgorithms/Python/blob/master/digital_image_processing/filters/sobel_filter.py) @@ -511,6 +513,7 @@ * [Modular Exponential](https://github.com/TheAlgorithms/Python/blob/master/maths/modular_exponential.py) * [Monte Carlo](https://github.com/TheAlgorithms/Python/blob/master/maths/monte_carlo.py) * [Monte Carlo Dice](https://github.com/TheAlgorithms/Python/blob/master/maths/monte_carlo_dice.py) + * [Nevilles Method](https://github.com/TheAlgorithms/Python/blob/master/maths/nevilles_method.py) * [Newton Raphson](https://github.com/TheAlgorithms/Python/blob/master/maths/newton_raphson.py) * [Number Of Digits](https://github.com/TheAlgorithms/Python/blob/master/maths/number_of_digits.py) * [Numerical Integration](https://github.com/TheAlgorithms/Python/blob/master/maths/numerical_integration.py) @@ -546,6 +549,7 @@ * [Sieve Of Eratosthenes](https://github.com/TheAlgorithms/Python/blob/master/maths/sieve_of_eratosthenes.py) * [Sigmoid](https://github.com/TheAlgorithms/Python/blob/master/maths/sigmoid.py) * [Simpson Rule](https://github.com/TheAlgorithms/Python/blob/master/maths/simpson_rule.py) + * [Sock Merchant](https://github.com/TheAlgorithms/Python/blob/master/maths/sock_merchant.py) * [Softmax](https://github.com/TheAlgorithms/Python/blob/master/maths/softmax.py) * [Square Root](https://github.com/TheAlgorithms/Python/blob/master/maths/square_root.py) * [Sum Of Arithmetic Series](https://github.com/TheAlgorithms/Python/blob/master/maths/sum_of_arithmetic_series.py) diff --git a/maths/sock_merchant.py b/maths/sock_merchant.py new file mode 100644 index 000000000000..304efec9ba5e --- /dev/null +++ b/maths/sock_merchant.py @@ -0,0 +1,20 @@ +from collections import Counter + + +def sock_merchant(colors: list[int]) -> int: + """ + >>> sock_merchant([10, 20, 20, 10, 10, 30, 50, 10, 20]) + 3 + >>> sock_merchant([1, 1, 3, 3]) + 2 + """ + return sum(socks_by_color // 2 for socks_by_color in Counter(colors).values()) + + +if __name__ == "__main__": + import doctest + + doctest.testmod() + + colors = [int(x) for x in input("Enter socks by color :").rstrip().split()] + print(f"sock_merchant({colors}) = {sock_merchant(colors)}") diff --git a/requirements.txt b/requirements.txt index c28238a0774f..ef4e18043905 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,6 +1,6 @@ beautifulsoup4 fake_useragent -keras +keras<2.7.0 lxml matplotlib numpy From 765be4581e66f1e8e92f24606c243a8cd45e4d3c Mon Sep 17 00:00:00 2001 From: Maxim Smolskiy Date: Wed, 3 Nov 2021 23:32:10 +0300 Subject: [PATCH 0378/1543] Improve Project Euler problem 012 solution 2 (#5760) * Improve solution * updating DIRECTORY.md Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- project_euler/problem_012/sol2.py | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/project_euler/problem_012/sol2.py b/project_euler/problem_012/sol2.py index 1cc79fc4cd75..380a9b74bb97 100644 --- a/project_euler/problem_012/sol2.py +++ b/project_euler/problem_012/sol2.py @@ -29,7 +29,18 @@ def triangle_number_generator(): def count_divisors(n): - return sum(2 for i in range(1, int(n ** 0.5) + 1) if n % i == 0 and i * i != n) + divisors_count = 1 + i = 2 + while i * i <= n: + multiplicity = 0 + while n % i == 0: + n //= i + multiplicity += 1 + divisors_count *= multiplicity + 1 + i += 1 + if n > 1: + divisors_count *= 2 + return divisors_count def solution(): From 7954a3ae166db66ae6a43043c76417dda688a8e5 Mon Sep 17 00:00:00 2001 From: Andrew Grangaard Date: Wed, 3 Nov 2021 13:32:49 -0700 Subject: [PATCH 0379/1543] [mypy] Fixes typing errors in other/dpll (#5759) + As per usage examples, clause literals are a list of strings. + Note: symbols extracted from literals are expected to be exactly two characters. + self.literal boolean values are initialized to None, so must be optional + model values should be Booleans, but aren't guaranteed to be non-None in the code. + uses newer '... | None' annotation for Optional values + clauses are passed to the Formula initializer as both lists and sets, they are stored as lists. Returned clauses will always be lists. + use explicit tuple annotation from __future__ rather than using (..., ...) in return signatures + mapping returned by dpll_algorithm is optional per the documentation. --- other/davisb_putnamb_logemannb_loveland.py | 35 +++++++++++----------- 1 file changed, 18 insertions(+), 17 deletions(-) diff --git a/other/davisb_putnamb_logemannb_loveland.py b/other/davisb_putnamb_logemannb_loveland.py index 00068930b89e..031f0dbed404 100644 --- a/other/davisb_putnamb_logemannb_loveland.py +++ b/other/davisb_putnamb_logemannb_loveland.py @@ -11,6 +11,7 @@ from __future__ import annotations import random +from typing import Iterable class Clause: @@ -27,12 +28,12 @@ class Clause: True """ - def __init__(self, literals: list[int]) -> None: + def __init__(self, literals: list[str]) -> None: """ Represent the literals and an assignment in a clause." """ # Assign all literals to None initially - self.literals = {literal: None for literal in literals} + self.literals: dict[str, bool | None] = {literal: None for literal in literals} def __str__(self) -> str: """ @@ -52,7 +53,7 @@ def __len__(self) -> int: """ return len(self.literals) - def assign(self, model: dict[str, bool]) -> None: + def assign(self, model: dict[str, bool | None]) -> None: """ Assign values to literals of the clause as given by model. """ @@ -68,7 +69,7 @@ def assign(self, model: dict[str, bool]) -> None: value = not value self.literals[literal] = value - def evaluate(self, model: dict[str, bool]) -> bool: + def evaluate(self, model: dict[str, bool | None]) -> bool | None: """ Evaluates the clause with the assignments in model. This has the following steps: @@ -97,7 +98,7 @@ class Formula: {{A1, A2, A3'}, {A5', A2', A1}} is ((A1 v A2 v A3') and (A5' v A2' v A1)) """ - def __init__(self, clauses: list[Clause]) -> None: + def __init__(self, clauses: Iterable[Clause]) -> None: """ Represent the number of clauses and the clauses themselves. """ @@ -139,14 +140,14 @@ def generate_formula() -> Formula: """ Randomly generate a formula. """ - clauses = set() + clauses: set[Clause] = set() no_of_clauses = random.randint(1, 10) while len(clauses) < no_of_clauses: clauses.add(generate_clause()) - return Formula(set(clauses)) + return Formula(clauses) -def generate_parameters(formula: Formula) -> (list[Clause], list[str]): +def generate_parameters(formula: Formula) -> tuple[list[Clause], list[str]]: """ Return the clauses and symbols from a formula. A symbol is the uncomplemented form of a literal. @@ -173,8 +174,8 @@ def generate_parameters(formula: Formula) -> (list[Clause], list[str]): def find_pure_symbols( - clauses: list[Clause], symbols: list[str], model: dict[str, bool] -) -> (list[str], dict[str, bool]): + clauses: list[Clause], symbols: list[str], model: dict[str, bool | None] +) -> tuple[list[str], dict[str, bool | None]]: """ Return pure symbols and their values to satisfy clause. Pure symbols are symbols in a formula that exist only @@ -198,11 +199,11 @@ def find_pure_symbols( {'A1': True, 'A2': False, 'A3': True, 'A5': False} """ pure_symbols = [] - assignment = dict() + assignment: dict[str, bool | None] = dict() literals = [] for clause in clauses: - if clause.evaluate(model) is True: + if clause.evaluate(model): continue for literal in clause.literals: literals.append(literal) @@ -225,8 +226,8 @@ def find_pure_symbols( def find_unit_clauses( - clauses: list[Clause], model: dict[str, bool] -) -> (list[str], dict[str, bool]): + clauses: list[Clause], model: dict[str, bool | None] +) -> tuple[list[str], dict[str, bool | None]]: """ Returns the unit symbols and their values to satisfy clause. Unit symbols are symbols in a formula that are: @@ -263,7 +264,7 @@ def find_unit_clauses( Ncount += 1 if Fcount == len(clause) - 1 and Ncount == 1: unit_symbols.append(sym) - assignment = dict() + assignment: dict[str, bool | None] = dict() for i in unit_symbols: symbol = i[:2] assignment[symbol] = len(i) == 2 @@ -273,8 +274,8 @@ def find_unit_clauses( def dpll_algorithm( - clauses: list[Clause], symbols: list[str], model: dict[str, bool] -) -> (bool, dict[str, bool]): + clauses: list[Clause], symbols: list[str], model: dict[str, bool | None] +) -> tuple[bool | None, dict[str, bool | None] | None]: """ Returns the model if the formula is satisfiable, else None This has the following steps: From 331fe6d3bc075ac2d91af766cf2bdf7df09f1281 Mon Sep 17 00:00:00 2001 From: Dylan Buchi Date: Wed, 3 Nov 2021 17:34:08 -0300 Subject: [PATCH 0380/1543] [mypy] Fix type annotations in `data_structures/binary_tree/lowest_common_ancestor.py` (#5757) * Fix type annotations in lowest_common_ancestor.py * Refactor line 53 in lowest_common_ancestor.py --- .../binary_tree/lowest_common_ancestor.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/data_structures/binary_tree/lowest_common_ancestor.py b/data_structures/binary_tree/lowest_common_ancestor.py index 2f1e893fcf99..651037703b95 100644 --- a/data_structures/binary_tree/lowest_common_ancestor.py +++ b/data_structures/binary_tree/lowest_common_ancestor.py @@ -3,7 +3,7 @@ from __future__ import annotations -import queue +from queue import Queue def swap(a: int, b: int) -> tuple[int, int]: @@ -37,7 +37,7 @@ def create_sparse(max_node: int, parent: list[list[int]]) -> list[list[int]]: # returns lca of node u,v def lowest_common_ancestor( u: int, v: int, level: list[int], parent: list[list[int]] -) -> list[list[int]]: +) -> int: # u must be deeper in the tree than v if level[u] < level[v]: u, v = swap(u, v) @@ -50,7 +50,7 @@ def lowest_common_ancestor( return u # moving both nodes upwards till lca in found for i in range(18, -1, -1): - if parent[i][u] != 0 and parent[i][u] != parent[i][v]: + if parent[i][u] not in [0, parent[i][v]]: u, v = parent[i][u], parent[i][v] # returning longest common ancestor of u,v return parent[0][u] @@ -61,8 +61,8 @@ def breadth_first_search( level: list[int], parent: list[list[int]], max_node: int, - graph: dict[int, int], - root=1, + graph: dict[int, list[int]], + root: int = 1, ) -> tuple[list[int], list[list[int]]]: """ sets every nodes direct parent @@ -70,7 +70,7 @@ def breadth_first_search( calculates depth of each node from root node """ level[root] = 0 - q = queue.Queue(maxsize=max_node) + q: Queue[int] = Queue(maxsize=max_node) q.put(root) while q.qsize() != 0: u = q.get() @@ -88,7 +88,7 @@ def main() -> None: parent = [[0 for _ in range(max_node + 10)] for _ in range(20)] # initializing with -1 which means every node is unvisited level = [-1 for _ in range(max_node + 10)] - graph = { + graph: dict[int, list[int]] = { 1: [2, 3, 4], 2: [5], 3: [6, 7], From 9655ec2a05f1ebbafb3c0ac5acf3d6278b498030 Mon Sep 17 00:00:00 2001 From: Divyesh Vishwakarma Date: Thu, 4 Nov 2021 16:18:57 +0530 Subject: [PATCH 0381/1543] Added newtons_second_law_of_motion.py (#5474) --- physics/newtons_second_law_of_motion.py | 81 +++++++++++++++++++++++++ 1 file changed, 81 insertions(+) create mode 100644 physics/newtons_second_law_of_motion.py diff --git a/physics/newtons_second_law_of_motion.py b/physics/newtons_second_law_of_motion.py new file mode 100644 index 000000000000..cb53f8f6571f --- /dev/null +++ b/physics/newtons_second_law_of_motion.py @@ -0,0 +1,81 @@ +""" +Description : +Newton's second law of motion pertains to the behavior of objects for which +all existing forces are not balanced. +The second law states that the acceleration of an object is dependent upon two variables +- the net force acting upon the object and the mass of the object. +The acceleration of an object depends directly +upon the net force acting upon the object, +and inversely upon the mass of the object. +As the force acting upon an object is increased, +the acceleration of the object is increased. +As the mass of an object is increased, the acceleration of the object is decreased. +Source: https://www.physicsclassroom.com/class/newtlaws/Lesson-3/Newton-s-Second-Law +Formulation: Fnet = m • a +Diagrammatic Explanation: + Forces are unbalanced + | + | + | + V + There is acceleration + /\ + / \ + / \ + / \ + / \ + / \ + / \ + __________________ ____ ________________ + |The acceleration | |The acceleration | + |depends directly | |depends inversely | + |on the net Force | |upon the object's | + |_________________| |mass_______________| +Units: +1 Newton = 1 kg X meters / (seconds^2) +How to use? +Inputs: + ___________________________________________________ + |Name | Units | Type | + |-------------|-------------------------|-----------| + |mass | (in kgs) | float | + |-------------|-------------------------|-----------| + |acceleration | (in meters/(seconds^2)) | float | + |_____________|_________________________|___________| + +Output: + ___________________________________________________ + |Name | Units | Type | + |-------------|-------------------------|-----------| + |force | (in Newtons) | float | + |_____________|_________________________|___________| + +""" + + +def newtons_second_law_of_motion(mass: float, acceleration: float) -> float: + """ + >>> newtons_second_law_of_motion(10, 10) + 100 + >>> newtons_second_law_of_motion(2.0, 1) + 2.0 + """ + force = float() + try: + force = mass * acceleration + except Exception: + return -0.0 + return force + + +if __name__ == "__main__": + import doctest + + # run doctest + doctest.testmod() + + # demo + mass = 12.5 + acceleration = 10 + force = newtons_second_law_of_motion(mass, acceleration) + print("The force is ", force, "N") From 47dd31f4a1aaa371f3b822e178fd273c68f45962 Mon Sep 17 00:00:00 2001 From: Leoriem-code <73761711+Leoriem-code@users.noreply.github.com> Date: Thu, 4 Nov 2021 11:49:36 +0100 Subject: [PATCH 0382/1543] Add README files 1/7 (#5754) * Added 5 README files * corrected arithmetic_analysis README * Update audio_filters/README.md Co-authored-by: John Law * Update backtracking/README.md Co-authored-by: John Law * Update bit_manipulation/README.md Co-authored-by: John Law Co-authored-by: John Law --- arithmetic_analysis/README.md | 7 +++++++ audio_filters/README.md | 9 +++++++++ backtracking/README.md | 8 ++++++++ bit_manipulation/README.md | 17 +++++++++++------ boolean_algebra/README.md | 7 +++++++ 5 files changed, 42 insertions(+), 6 deletions(-) create mode 100644 arithmetic_analysis/README.md create mode 100644 audio_filters/README.md create mode 100644 backtracking/README.md create mode 100644 boolean_algebra/README.md diff --git a/arithmetic_analysis/README.md b/arithmetic_analysis/README.md new file mode 100644 index 000000000000..45cf321eb6ad --- /dev/null +++ b/arithmetic_analysis/README.md @@ -0,0 +1,7 @@ +# Arithmetic analysis + +Arithmetic analysis is a branch of mathematics that deals with solving linear equations. + +* +* +* diff --git a/audio_filters/README.md b/audio_filters/README.md new file mode 100644 index 000000000000..4419bd8bdbf9 --- /dev/null +++ b/audio_filters/README.md @@ -0,0 +1,9 @@ +# Audio Filter + +Audio filters work on the frequency of an audio signal to attenuate unwanted frequency and amplify wanted ones. +They are used within anything related to sound, whether it is radio communication or a hi-fi system. + +* +* +* +* diff --git a/backtracking/README.md b/backtracking/README.md new file mode 100644 index 000000000000..d4975dfb5ad7 --- /dev/null +++ b/backtracking/README.md @@ -0,0 +1,8 @@ +# Backtracking + +Backtracking is a way to speed up the search process by removing candidates when they can't be the solution of a problem. + +* +* +* +* diff --git a/bit_manipulation/README.md b/bit_manipulation/README.md index e5f82a270e28..3f5e028beb8e 100644 --- a/bit_manipulation/README.md +++ b/bit_manipulation/README.md @@ -1,6 +1,11 @@ -* https://docs.python.org/3/reference/expressions.html#binary-bitwise-operations -* https://docs.python.org/3/reference/expressions.html#unary-arithmetic-and-bitwise-operations -* https://docs.python.org/3/library/stdtypes.html#bitwise-operations-on-integer-types -* https://wiki.python.org/moin/BitManipulation -* https://wiki.python.org/moin/BitwiseOperators -* https://www.tutorialspoint.com/python3/bitwise_operators_example.htm +# Bit manipulation + +Bit manipulation is the act of manipulating bits to detect errors (hamming code), encrypts and decrypts messages (more on that in the 'ciphers' folder) or just do anything at the lowest level of your computer. + +* +* +* +* +* +* +* diff --git a/boolean_algebra/README.md b/boolean_algebra/README.md new file mode 100644 index 000000000000..45969c855f9c --- /dev/null +++ b/boolean_algebra/README.md @@ -0,0 +1,7 @@ +# Boolean Algebra + +Boolean algebra is used to do arithmetic with bits of values True (1) or False (0). +There are three basic operations: 'and', 'or' and 'not'. + +* +* From 3815a97575a2ab3209fd82ac4077942020c2d9bd Mon Sep 17 00:00:00 2001 From: Sailesh Shrestha <34860977+werewolf-65@users.noreply.github.com> Date: Thu, 4 Nov 2021 21:03:38 +0545 Subject: [PATCH 0383/1543] Add all_construct dynamic programming implementation (#5626) * Add all_construct dynamic programming implementation * all_construct: remove the main function * all_construct: Add type hints * all_construct: changed map to list comprehension,fix mutable default arguments * all_construct: fixed type hints * all_construct: cleaner code for initializing word_bank argument * all_construct: added an import for annotations * all_construct: added None in the argument with word_bank * all_construct: fixed a type hint * all_construct: Fixed some more type hints --- dynamic_programming/all_construct.py | 58 ++++++++++++++++++++++++++++ 1 file changed, 58 insertions(+) create mode 100644 dynamic_programming/all_construct.py diff --git a/dynamic_programming/all_construct.py b/dynamic_programming/all_construct.py new file mode 100644 index 000000000000..5ffed2caa182 --- /dev/null +++ b/dynamic_programming/all_construct.py @@ -0,0 +1,58 @@ +""" +Program to list all the ways a target string can be +constructed from the given list of substrings +""" +from __future__ import annotations + + +def all_construct(target: str, word_bank: list[str] | None = None) -> list[list[str]]: + """ + returns the list containing all the possible + combinations a string(target) can be constructed from + the given list of substrings(word_bank) + >>> all_construct("hello", ["he", "l", "o"]) + [['he', 'l', 'l', 'o']] + >>> all_construct("purple",["purp","p","ur","le","purpl"]) + [['purp', 'le'], ['p', 'ur', 'p', 'le']] + """ + + word_bank = word_bank or [] + # create a table + table_size: int = len(target) + 1 + + table: list[list[list[str]]] = [] + for i in range(table_size): + table.append([]) + # seed value + table[0] = [[]] # because empty string has empty combination + + # iterate through the indices + for i in range(table_size): + # condition + if table[i] != []: + for word in word_bank: + # slice condition + if target[i : i + len(word)] == word: + new_combinations: list[list[str]] = [ + [word] + way for way in table[i] + ] + # adds the word to every combination the current position holds + # now,push that combination to the table[i+len(word)] + table[i + len(word)] += new_combinations + + # combinations are in reverse order so reverse for better output + for combination in table[len(target)]: + combination.reverse() + + return table[len(target)] + + +if __name__ == "__main__": + print(all_construct("jwajalapa", ["jwa", "j", "w", "a", "la", "lapa"])) + print(all_construct("rajamati", ["s", "raj", "amat", "raja", "ma", "i", "t"])) + print( + all_construct( + "hexagonosaurus", + ["h", "ex", "hex", "ag", "ago", "ru", "auru", "rus", "go", "no", "o", "s"], + ) + ) From b6eb448e63a7eb8b145a600c368419e77872f134 Mon Sep 17 00:00:00 2001 From: Jaydeep Das Date: Thu, 4 Nov 2021 21:06:22 +0530 Subject: [PATCH 0384/1543] Added reddit.py to get data from reddit (#5698) * Rewritten reddit.py * Removed logging module import * Fixed minor bug which was causing extreme rate limiting * Update reddit.py * Update reddit.py * Update reddit.py Co-authored-by: Christian Clauss --- web_programming/reddit.py | 53 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 53 insertions(+) create mode 100644 web_programming/reddit.py diff --git a/web_programming/reddit.py b/web_programming/reddit.py new file mode 100644 index 000000000000..672109f1399d --- /dev/null +++ b/web_programming/reddit.py @@ -0,0 +1,53 @@ +from __future__ import annotations + +import requests + +valid_terms = set( + """approved_at_utc approved_by author_flair_background_color +author_flair_css_class author_flair_richtext author_flair_template_id author_fullname +author_premium can_mod_post category clicked content_categories created_utc downs +edited gilded gildings hidden hide_score is_created_from_ads_ui is_meta +is_original_content is_reddit_media_domain is_video link_flair_css_class +link_flair_richtext link_flair_text link_flair_text_color media_embed mod_reason_title +name permalink pwls quarantine saved score secure_media secure_media_embed selftext +subreddit subreddit_name_prefixed subreddit_type thumbnail title top_awarded_type +total_awards_received ups upvote_ratio url user_reports""".split() +) + + +def get_subreddit_data( + subreddit: str, limit: int = 1, age: str = "new", wanted_data: list | None = None +) -> dict: + """ + subreddit : Subreddit to query + limit : Number of posts to fetch + age : ["new", "top", "hot"] + wanted_data : Get only the required data in the list + + >>> pass + """ + wanted_data = wanted_data or [] + if invalid_search_terms := ", ".join(sorted(set(wanted_data) - valid_terms)): + raise ValueError(f"Invalid search term: {invalid_search_terms}") + response = requests.get( + f"https://reddit.com/r/{subreddit}/{age}.json?limit={limit}", + headers={"User-agent": "A random string"}, + ) + if response.status_code == 429: + raise requests.HTTPError + + data = response.json() + if not wanted_data: + return {id_: data["data"]["children"][id_] for id_ in range(limit)} + + data_dict = {} + for id_ in range(limit): + data_dict[id_] = { + item: data["data"]["children"][id_]["data"][item] for item in wanted_data + } + return data_dict + + +if __name__ == "__main__": + # If you get Error 429, that means you are rate limited.Try after some time + print(get_subreddit_data("learnpython", wanted_data=["title", "url", "selftext"])) From e835e9685617198095ca44f5d9fda7dc02a3ec83 Mon Sep 17 00:00:00 2001 From: Maxim Smolskiy Date: Thu, 4 Nov 2021 18:37:47 +0300 Subject: [PATCH 0385/1543] Improve Project Euler problem 014 solution 1 (#5747) * Improve solution * Uncomment code that has been commented due to slow execution affecting Travis * Fix --- project_euler/problem_014/sol1.py | 22 ++++++++++++++-------- 1 file changed, 14 insertions(+), 8 deletions(-) diff --git a/project_euler/problem_014/sol1.py b/project_euler/problem_014/sol1.py index 43aa4e726af2..0333495908de 100644 --- a/project_euler/problem_014/sol1.py +++ b/project_euler/problem_014/sol1.py @@ -25,9 +25,8 @@ def solution(n: int = 1000000) -> int: n → n/2 (n is even) n → 3n + 1 (n is odd) - # The code below has been commented due to slow execution affecting Travis. - # >>> solution(1000000) - # 837799 + >>> solution(1000000) + 837799 >>> solution(200) 171 >>> solution(5000) @@ -35,14 +34,18 @@ def solution(n: int = 1000000) -> int: >>> solution(15000) 13255 """ - largest_number = 0 - pre_counter = 0 + largest_number = 1 + pre_counter = 1 + counters = {1: 1} - for input1 in range(n): - counter = 1 + for input1 in range(2, n): + counter = 0 number = input1 - while number > 1: + while True: + if number in counters: + counter += counters[number] + break if number % 2 == 0: number //= 2 counter += 1 @@ -50,6 +53,9 @@ def solution(n: int = 1000000) -> int: number = (3 * number) + 1 counter += 1 + if input1 not in counters: + counters[input1] = counter + if counter > pre_counter: largest_number = input1 pre_counter = counter From 7a605766fe7fe79a00ba1f30447877be4b77a6f2 Mon Sep 17 00:00:00 2001 From: Dylan Buchi Date: Thu, 4 Nov 2021 12:38:43 -0300 Subject: [PATCH 0386/1543] [mypy] Fix type annotations in `data_structures/binary_tree/red_black_tree.py` (#5739) * [mypy] Fix type annotations in red_black_tree.py * Remove blank lines * Update red_black_tree.py --- data_structures/binary_tree/red_black_tree.py | 121 ++++++++++-------- 1 file changed, 69 insertions(+), 52 deletions(-) diff --git a/data_structures/binary_tree/red_black_tree.py b/data_structures/binary_tree/red_black_tree.py index e27757f20062..35517f307fe1 100644 --- a/data_structures/binary_tree/red_black_tree.py +++ b/data_structures/binary_tree/red_black_tree.py @@ -51,6 +51,8 @@ def rotate_left(self) -> RedBlackTree: """ parent = self.parent right = self.right + if right is None: + return self self.right = right.left if self.right: self.right.parent = self @@ -69,6 +71,8 @@ def rotate_right(self) -> RedBlackTree: returns the new root to this subtree. Performing one rotation can be done in O(1). """ + if self.left is None: + return self parent = self.parent left = self.left self.left = left.right @@ -123,23 +127,30 @@ def _insert_repair(self) -> None: if color(uncle) == 0: if self.is_left() and self.parent.is_right(): self.parent.rotate_right() - self.right._insert_repair() + if self.right: + self.right._insert_repair() elif self.is_right() and self.parent.is_left(): self.parent.rotate_left() - self.left._insert_repair() + if self.left: + self.left._insert_repair() elif self.is_left(): - self.grandparent.rotate_right() - self.parent.color = 0 - self.parent.right.color = 1 + if self.grandparent: + self.grandparent.rotate_right() + self.parent.color = 0 + if self.parent.right: + self.parent.right.color = 1 else: - self.grandparent.rotate_left() - self.parent.color = 0 - self.parent.left.color = 1 + if self.grandparent: + self.grandparent.rotate_left() + self.parent.color = 0 + if self.parent.left: + self.parent.left.color = 1 else: self.parent.color = 0 - uncle.color = 0 - self.grandparent.color = 1 - self.grandparent._insert_repair() + if uncle and self.grandparent: + uncle.color = 0 + self.grandparent.color = 1 + self.grandparent._insert_repair() def remove(self, label: int) -> RedBlackTree: """Remove label from this tree.""" @@ -149,8 +160,9 @@ def remove(self, label: int) -> RedBlackTree: # so we replace this node with the greatest one less than # it and remove that. value = self.left.get_max() - self.label = value - self.left.remove(value) + if value is not None: + self.label = value + self.left.remove(value) else: # This node has at most one non-None child, so we don't # need to replace @@ -160,10 +172,11 @@ def remove(self, label: int) -> RedBlackTree: # The only way this happens to a node with one child # is if both children are None leaves. # We can just remove this node and call it a day. - if self.is_left(): - self.parent.left = None - else: - self.parent.right = None + if self.parent: + if self.is_left(): + self.parent.left = None + else: + self.parent.right = None else: # The node is black if child is None: @@ -188,7 +201,7 @@ def remove(self, label: int) -> RedBlackTree: self.left.parent = self if self.right: self.right.parent = self - elif self.label > label: + elif self.label is not None and self.label > label: if self.left: self.left.remove(label) else: @@ -198,6 +211,13 @@ def remove(self, label: int) -> RedBlackTree: def _remove_repair(self) -> None: """Repair the coloring of the tree that may have been messed up.""" + if ( + self.parent is None + or self.sibling is None + or self.parent.sibling is None + or self.grandparent is None + ): + return if color(self.sibling) == 1: self.sibling.color = 0 self.parent.color = 1 @@ -231,7 +251,8 @@ def _remove_repair(self) -> None: ): self.sibling.rotate_right() self.sibling.color = 0 - self.sibling.right.color = 1 + if self.sibling.right: + self.sibling.right.color = 1 if ( self.is_right() and color(self.sibling) == 0 @@ -240,7 +261,8 @@ def _remove_repair(self) -> None: ): self.sibling.rotate_left() self.sibling.color = 0 - self.sibling.left.color = 1 + if self.sibling.left: + self.sibling.left.color = 1 if ( self.is_left() and color(self.sibling) == 0 @@ -275,21 +297,17 @@ def check_color_properties(self) -> bool: """ # I assume property 1 to hold because there is nothing that can # make the color be anything other than 0 or 1. - # Property 2 if self.color: # The root was red print("Property 2") return False - # Property 3 does not need to be checked, because None is assumed # to be black and is all the leaves. - # Property 4 if not self.check_coloring(): print("Property 4") return False - # Property 5 if self.black_height() is None: print("Property 5") @@ -297,7 +315,7 @@ def check_color_properties(self) -> bool: # All properties were met return True - def check_coloring(self) -> None: + def check_coloring(self) -> bool: """A helper function to recursively check Property 4 of a Red-Black Tree. See check_color_properties for more info. """ @@ -310,12 +328,12 @@ def check_coloring(self) -> None: return False return True - def black_height(self) -> int: + def black_height(self) -> int | None: """Returns the number of black nodes from this node to the leaves of the tree, or None if there isn't one such value (the tree is color incorrectly). """ - if self is None: + if self is None or self.left is None or self.right is None: # If we're already at a leaf, there is no path return 1 left = RedBlackTree.black_height(self.left) @@ -332,21 +350,21 @@ def black_height(self) -> int: # Here are functions which are general to all binary search trees - def __contains__(self, label) -> bool: + def __contains__(self, label: int) -> bool: """Search through the tree for label, returning True iff it is found somewhere in the tree. Guaranteed to run in O(log(n)) time. """ return self.search(label) is not None - def search(self, label: int) -> RedBlackTree: + def search(self, label: int) -> RedBlackTree | None: """Search through the tree for label, returning its node if it's found, and None otherwise. This method is guaranteed to run in O(log(n)) time. """ if self.label == label: return self - elif label > self.label: + elif self.label is not None and label > self.label: if self.right is None: return None else: @@ -357,12 +375,12 @@ def search(self, label: int) -> RedBlackTree: else: return self.left.search(label) - def floor(self, label: int) -> int: + def floor(self, label: int) -> int | None: """Returns the largest element in this tree which is at most label. This method is guaranteed to run in O(log(n)) time.""" if self.label == label: return self.label - elif self.label > label: + elif self.label is not None and self.label > label: if self.left: return self.left.floor(label) else: @@ -374,13 +392,13 @@ def floor(self, label: int) -> int: return attempt return self.label - def ceil(self, label: int) -> int: + def ceil(self, label: int) -> int | None: """Returns the smallest element in this tree which is at least label. This method is guaranteed to run in O(log(n)) time. """ if self.label == label: return self.label - elif self.label < label: + elif self.label is not None and self.label < label: if self.right: return self.right.ceil(label) else: @@ -392,7 +410,7 @@ def ceil(self, label: int) -> int: return attempt return self.label - def get_max(self) -> int: + def get_max(self) -> int | None: """Returns the largest element in this tree. This method is guaranteed to run in O(log(n)) time. """ @@ -402,7 +420,7 @@ def get_max(self) -> int: else: return self.label - def get_min(self) -> int: + def get_min(self) -> int | None: """Returns the smallest element in this tree. This method is guaranteed to run in O(log(n)) time. """ @@ -413,7 +431,7 @@ def get_min(self) -> int: return self.label @property - def grandparent(self) -> RedBlackTree: + def grandparent(self) -> RedBlackTree | None: """Get the current node's grandparent, or None if it doesn't exist.""" if self.parent is None: return None @@ -421,7 +439,7 @@ def grandparent(self) -> RedBlackTree: return self.parent.parent @property - def sibling(self) -> RedBlackTree: + def sibling(self) -> RedBlackTree | None: """Get the current node's sibling, or None if it doesn't exist.""" if self.parent is None: return None @@ -432,11 +450,15 @@ def sibling(self) -> RedBlackTree: def is_left(self) -> bool: """Returns true iff this node is the left child of its parent.""" - return self.parent and self.parent.left is self + if self.parent is None: + return False + return self.parent.left is self.parent.left is self def is_right(self) -> bool: """Returns true iff this node is the right child of its parent.""" - return self.parent and self.parent.right is self + if self.parent is None: + return False + return self.parent.right is self def __bool__(self) -> bool: return True @@ -452,21 +474,21 @@ def __len__(self) -> int: ln += len(self.right) return ln - def preorder_traverse(self) -> Iterator[int]: + def preorder_traverse(self) -> Iterator[int | None]: yield self.label if self.left: yield from self.left.preorder_traverse() if self.right: yield from self.right.preorder_traverse() - def inorder_traverse(self) -> Iterator[int]: + def inorder_traverse(self) -> Iterator[int | None]: if self.left: yield from self.left.inorder_traverse() yield self.label if self.right: yield from self.right.inorder_traverse() - def postorder_traverse(self) -> Iterator[int]: + def postorder_traverse(self) -> Iterator[int | None]: if self.left: yield from self.left.postorder_traverse() if self.right: @@ -488,15 +510,17 @@ def __repr__(self) -> str: indent=1, ) - def __eq__(self, other) -> bool: + def __eq__(self, other: object) -> bool: """Test if two trees are equal.""" + if not isinstance(other, RedBlackTree): + return NotImplemented if self.label == other.label: return self.left == other.left and self.right == other.right else: return False -def color(node) -> int: +def color(node: RedBlackTree | None) -> int: """Returns the color of a node, allowing for None leaves.""" if node is None: return 0 @@ -699,19 +723,12 @@ def main() -> None: >>> pytests() """ print_results("Rotating right and left", test_rotations()) - print_results("Inserting", test_insert()) - print_results("Searching", test_insert_and_search()) - print_results("Deleting", test_insert_delete()) - print_results("Floor and ceil", test_floor_ceil()) - print_results("Tree traversal", test_tree_traversal()) - print_results("Tree traversal", test_tree_chaining()) - print("Testing tree balancing...") print("This should only be a few seconds.") test_insertion_speed() From 729aaf64275c61b8bc864ef9138eed078dea9cb2 Mon Sep 17 00:00:00 2001 From: Maxim Smolskiy Date: Thu, 4 Nov 2021 19:01:21 +0300 Subject: [PATCH 0387/1543] Improve Project Euler problem 014 solution 2 (#5744) * Improve solution * Uncomment code that has been commented due to slow execution affecting Travis * Fix * scikit-fuzzy is causing broken builds * fuzz = None * Update fuzzy_operations.py Co-authored-by: Christian Clauss --- fuzzy_logic/fuzzy_operations.py | 9 +++++++-- project_euler/problem_014/sol2.py | 22 ++++++++++++---------- requirements.txt | 2 +- 3 files changed, 20 insertions(+), 13 deletions(-) diff --git a/fuzzy_logic/fuzzy_operations.py b/fuzzy_logic/fuzzy_operations.py index 0f573f158663..fbaca9421327 100644 --- a/fuzzy_logic/fuzzy_operations.py +++ b/fuzzy_logic/fuzzy_operations.py @@ -1,4 +1,5 @@ -"""README, Author - Jigyasa Gandhi(mailto:jigsgandhi97@gmail.com) +""" +README, Author - Jigyasa Gandhi(mailto:jigsgandhi97@gmail.com) Requirements: - scikit-fuzzy - numpy @@ -7,7 +8,11 @@ - 3.5 """ import numpy as np -import skfuzzy as fuzz + +try: + import skfuzzy as fuzz +except ImportError: + fuzz = None if __name__ == "__main__": # Create universe of discourse in Python using linspace () diff --git a/project_euler/problem_014/sol2.py b/project_euler/problem_014/sol2.py index 0a58f8d9a05a..d2a1d9f0e468 100644 --- a/project_euler/problem_014/sol2.py +++ b/project_euler/problem_014/sol2.py @@ -27,25 +27,27 @@ """ from __future__ import annotations +COLLATZ_SEQUENCE_LENGTHS = {1: 1} + def collatz_sequence_length(n: int) -> int: """Returns the Collatz sequence length for n.""" - sequence_length = 1 - while n != 1: - if n % 2 == 0: - n //= 2 - else: - n = 3 * n + 1 - sequence_length += 1 + if n in COLLATZ_SEQUENCE_LENGTHS: + return COLLATZ_SEQUENCE_LENGTHS[n] + if n % 2 == 0: + next_n = n // 2 + else: + next_n = 3 * n + 1 + sequence_length = collatz_sequence_length(next_n) + 1 + COLLATZ_SEQUENCE_LENGTHS[n] = sequence_length return sequence_length def solution(n: int = 1000000) -> int: """Returns the number under n that generates the longest Collatz sequence. - # The code below has been commented due to slow execution affecting Travis. - # >>> solution(1000000) - # 837799 + >>> solution(1000000) + 837799 >>> solution(200) 171 >>> solution(5000) diff --git a/requirements.txt b/requirements.txt index ef4e18043905..e01d87cffabe 100644 --- a/requirements.txt +++ b/requirements.txt @@ -9,7 +9,7 @@ pandas pillow qiskit requests -scikit-fuzzy +# scikit-fuzzy # Causing broken builds sklearn statsmodels sympy From 7390777f9aa4d60fcd87dc02b7628e61f92edc12 Mon Sep 17 00:00:00 2001 From: Snimerjot Singh Date: Thu, 4 Nov 2021 21:38:18 +0530 Subject: [PATCH 0388/1543] Added 2 shaped in volume.py (#5560) --- maths/volume.py | 37 +++++++++++++++++++++++++++++++++++++ 1 file changed, 37 insertions(+) diff --git a/maths/volume.py b/maths/volume.py index fd24aa9eef54..b11995bab917 100644 --- a/maths/volume.py +++ b/maths/volume.py @@ -153,6 +153,21 @@ def vol_sphere(radius: float) -> float: return 4 / 3 * pi * pow(radius, 3) +def vol_hemisphere(radius: float): + """Calculate the volume of a hemisphere + Wikipedia reference: https://en.wikipedia.org/wiki/Hemisphere + Other references: https://www.cuemath.com/geometry/hemisphere + :return 2/3 * pi * radius^3 + + >>> vol_hemisphere(1) + 2.0943951023931953 + + >>> vol_hemisphere(7) + 718.3775201208659 + """ + return 2 / 3 * pi * pow(radius, 3) + + def vol_circular_cylinder(radius: float, height: float) -> float: """Calculate the Volume of a Circular Cylinder. Wikipedia reference: https://en.wikipedia.org/wiki/Cylinder @@ -166,6 +181,26 @@ def vol_circular_cylinder(radius: float, height: float) -> float: return pi * pow(radius, 2) * height +def vol_conical_frustum(height: float, radius_1: float, radius_2: float): + """Calculate the Volume of a Conical Frustum. + Wikipedia reference: https://en.wikipedia.org/wiki/Frustum + :return 1/3 * pi * height * (radius_1^2 + radius_top^2 + radius_1 * radius_2) + + >>> vol_conical_frustum(45, 7, 28) + 48490.482608158454 + + >>> vol_conical_frustum(1, 1, 2) + 7.330382858376184 + """ + return ( + 1 + / 3 + * pi + * height + * (pow(radius_1, 2) + pow(radius_2, 2) + radius_1 * radius_2) + ) + + def main(): """Print the Results of Various Volume Calculations.""" print("Volumes:") @@ -176,7 +211,9 @@ def main(): print("Prism: " + str(vol_prism(2, 2))) # = 4 print("Pyramid: " + str(vol_pyramid(2, 2))) # ~= 1.33 print("Sphere: " + str(vol_sphere(2))) # ~= 33.5 + print("Hemisphere: " + str(vol_hemisphere(2))) # ~= 16.75 print("Circular Cylinder: " + str(vol_circular_cylinder(2, 2))) # ~= 25.1 + print("Conical Frustum: " + str(vol_conical_frustum(2, 2, 4))) # ~= 58.6 print("Spherical cap: " + str(vol_spherical_cap(1, 2))) # ~= 5.24 print("Spheres intersetion: " + str(vol_spheres_intersect(2, 2, 1))) # ~= 21.21 From dbddac74d3dbc0dc7c3d710ac3b42839685160a6 Mon Sep 17 00:00:00 2001 From: Boris Galochkin Date: Thu, 4 Nov 2021 19:51:31 +0300 Subject: [PATCH 0389/1543] Fix `graphs/finding_bridges.py` algorithm + doctests (#5765) * Fix finding_bridges algorithms + tests * update type hints * Better, more obvious condition fix * fix prev commit + more tests * Short explanation + url * Update finding_bridges.py Co-authored-by: Christian Clauss --- graphs/finding_bridges.py | 98 +++++++++++++++++++++++++++++++-------- 1 file changed, 78 insertions(+), 20 deletions(-) diff --git a/graphs/finding_bridges.py b/graphs/finding_bridges.py index 6555dd7bc29e..a877a97489be 100644 --- a/graphs/finding_bridges.py +++ b/graphs/finding_bridges.py @@ -1,5 +1,77 @@ -# Finding Bridges in Undirected Graph -def computeBridges(graph): +""" +An edge is a bridge if, after removing it count of connected components in graph will +be increased by one. Bridges represent vulnerabilities in a connected network and are +useful for designing reliable networks. For example, in a wired computer network, an +articulation point indicates the critical computers and a bridge indicates the critical +wires or connections. + +For more details, refer this article: +https://www.geeksforgeeks.org/bridge-in-a-graph/ +""" + + +def __get_demo_graph(index): + return [ + { + 0: [1, 2], + 1: [0, 2], + 2: [0, 1, 3, 5], + 3: [2, 4], + 4: [3], + 5: [2, 6, 8], + 6: [5, 7], + 7: [6, 8], + 8: [5, 7], + }, + { + 0: [6], + 1: [9], + 2: [4, 5], + 3: [4], + 4: [2, 3], + 5: [2], + 6: [0, 7], + 7: [6], + 8: [], + 9: [1], + }, + { + 0: [4], + 1: [6], + 2: [], + 3: [5, 6, 7], + 4: [0, 6], + 5: [3, 8, 9], + 6: [1, 3, 4, 7], + 7: [3, 6, 8, 9], + 8: [5, 7], + 9: [5, 7], + }, + { + 0: [1, 3], + 1: [0, 2, 4], + 2: [1, 3, 4], + 3: [0, 2, 4], + 4: [1, 2, 3], + }, + ][index] + + +def compute_bridges(graph: dict[int, list[int]]) -> list[tuple[int, int]]: + """ + Return the list of undirected graph bridges [(a1, b1), ..., (ak, bk)]; ai <= bi + >>> compute_bridges(__get_demo_graph(0)) + [(3, 4), (2, 3), (2, 5)] + >>> compute_bridges(__get_demo_graph(1)) + [(6, 7), (0, 6), (1, 9), (3, 4), (2, 4), (2, 5)] + >>> compute_bridges(__get_demo_graph(2)) + [(1, 6), (4, 6), (0, 4)] + >>> compute_bridges(__get_demo_graph(3)) + [] + >>> compute_bridges({}) + [] + """ + id = 0 n = len(graph) # No of vertices in graph low = [0] * n @@ -15,28 +87,14 @@ def dfs(at, parent, bridges, id): elif not visited[to]: dfs(to, at, bridges, id) low[at] = min(low[at], low[to]) - if at < low[to]: - bridges.append([at, to]) + if id <= low[to]: + bridges.append((at, to) if at < to else (to, at)) else: # This edge is a back edge and cannot be a bridge - low[at] = min(low[at], to) + low[at] = min(low[at], low[to]) bridges = [] for i in range(n): if not visited[i]: dfs(i, -1, bridges, id) - print(bridges) - - -graph = { - 0: [1, 2], - 1: [0, 2], - 2: [0, 1, 3, 5], - 3: [2, 4], - 4: [3], - 5: [2, 6, 8], - 6: [5, 7], - 7: [6, 8], - 8: [5, 7], -} -computeBridges(graph) + return bridges From 6b2b476f8633504e4c032dc948ab7dda04cd3d3f Mon Sep 17 00:00:00 2001 From: Leoriem-code <73761711+Leoriem-code@users.noreply.github.com> Date: Fri, 5 Nov 2021 06:06:37 +0100 Subject: [PATCH 0390/1543] fix typo on line 126 (#5768) --- scheduling/shortest_job_first.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scheduling/shortest_job_first.py b/scheduling/shortest_job_first.py index 17409108a34e..9372e9dbc3f4 100644 --- a/scheduling/shortest_job_first.py +++ b/scheduling/shortest_job_first.py @@ -123,7 +123,7 @@ def calculate_average_times( processes = list(range(1, no_of_processes + 1)) for i in range(no_of_processes): - print("Enter the arrival time and brust time for process:--" + str(i + 1)) + print("Enter the arrival time and burst time for process:--" + str(i + 1)) arrival_time[i], burst_time[i] = map(int, input().split()) waiting_time = calculate_waitingtime(arrival_time, burst_time, no_of_processes) From 48960268a2629fe0549ce7a67619852be91baa5f Mon Sep 17 00:00:00 2001 From: Rohan R Bharadwaj Date: Sat, 6 Nov 2021 01:13:52 +0530 Subject: [PATCH 0391/1543] Improve Project Euler Problem 10 Sol-1 (#5773) * Improve Project Euler Problem 10 Sol-1 * Name correction * psf/black formatting * More formatting --- project_euler/problem_010/sol1.py | 21 ++++++--------------- 1 file changed, 6 insertions(+), 15 deletions(-) diff --git a/project_euler/problem_010/sol1.py b/project_euler/problem_010/sol1.py index bd49b3523c97..e060761eecab 100644 --- a/project_euler/problem_010/sol1.py +++ b/project_euler/problem_010/sol1.py @@ -28,11 +28,11 @@ def is_prime(n: int) -> bool: True """ - for i in range(2, int(sqrt(n)) + 1): - if n % i == 0: - return False - - return True + if 1 < n < 4: + return True + elif n < 2 or not n % 2: + return False + return not any(not n % i for i in range(3, int(sqrt(n) + 1), 2)) def solution(n: int = 2000000) -> int: @@ -49,16 +49,7 @@ def solution(n: int = 2000000) -> int: 10 """ - if n > 2: - sum_of_primes = 2 - else: - return 0 - - for i in range(3, n, 2): - if is_prime(i): - sum_of_primes += i - - return sum_of_primes + return sum(num for num in range(3, n, 2) if is_prime(num)) + 2 if n > 2 else 0 if __name__ == "__main__": From 1a43c92c77790632e7958df9c1048ed77aa36f68 Mon Sep 17 00:00:00 2001 From: Maxim Smolskiy Date: Fri, 5 Nov 2021 22:44:24 +0300 Subject: [PATCH 0392/1543] Improve Project Euler problem 043 solution 1 (#5772) * updating DIRECTORY.md * Fix typo * Improve solution Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- DIRECTORY.md | 3 +++ project_euler/problem_043/sol1.py | 21 ++++++++++++++------- 2 files changed, 17 insertions(+), 7 deletions(-) diff --git a/DIRECTORY.md b/DIRECTORY.md index fd164c92e11c..32ca8cd3b256 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -253,6 +253,7 @@ ## Dynamic Programming * [Abbreviation](https://github.com/TheAlgorithms/Python/blob/master/dynamic_programming/abbreviation.py) + * [All Construct](https://github.com/TheAlgorithms/Python/blob/master/dynamic_programming/all_construct.py) * [Bitmask](https://github.com/TheAlgorithms/Python/blob/master/dynamic_programming/bitmask.py) * [Catalan Numbers](https://github.com/TheAlgorithms/Python/blob/master/dynamic_programming/catalan_numbers.py) * [Climbing Stairs](https://github.com/TheAlgorithms/Python/blob/master/dynamic_programming/climbing_stairs.py) @@ -612,6 +613,7 @@ ## Physics * [N Body Simulation](https://github.com/TheAlgorithms/Python/blob/master/physics/n_body_simulation.py) + * [Newtons Second Law Of Motion](https://github.com/TheAlgorithms/Python/blob/master/physics/newtons_second_law_of_motion.py) ## Project Euler * Problem 001 @@ -1018,6 +1020,7 @@ * [Nasa Data](https://github.com/TheAlgorithms/Python/blob/master/web_programming/nasa_data.py) * [Random Anime Character](https://github.com/TheAlgorithms/Python/blob/master/web_programming/random_anime_character.py) * [Recaptcha Verification](https://github.com/TheAlgorithms/Python/blob/master/web_programming/recaptcha_verification.py) + * [Reddit](https://github.com/TheAlgorithms/Python/blob/master/web_programming/reddit.py) * [Search Books By Isbn](https://github.com/TheAlgorithms/Python/blob/master/web_programming/search_books_by_isbn.py) * [Slack Message](https://github.com/TheAlgorithms/Python/blob/master/web_programming/slack_message.py) * [Test Fetch Github Info](https://github.com/TheAlgorithms/Python/blob/master/web_programming/test_fetch_github_info.py) diff --git a/project_euler/problem_043/sol1.py b/project_euler/problem_043/sol1.py index 1febe4a4d37f..c533f40da9c9 100644 --- a/project_euler/problem_043/sol1.py +++ b/project_euler/problem_043/sol1.py @@ -33,9 +33,18 @@ def is_substring_divisible(num: tuple) -> bool: >>> is_substring_divisible((1, 4, 0, 6, 3, 5, 7, 2, 8, 9)) True """ - tests = [2, 3, 5, 7, 11, 13, 17] + if num[3] % 2 != 0: + return False + + if (num[2] + num[3] + num[4]) % 3 != 0: + return False + + if num[5] % 5 != 0: + return False + + tests = [7, 11, 13, 17] for i, test in enumerate(tests): - if (num[i + 1] * 100 + num[i + 2] * 10 + num[i + 3]) % test != 0: + if (num[i + 4] * 100 + num[i + 5] * 10 + num[i + 6]) % test != 0: return False return True @@ -43,17 +52,15 @@ def is_substring_divisible(num: tuple) -> bool: def solution(n: int = 10) -> int: """ Returns the sum of all pandigital numbers which pass the - divisiility tests. + divisibility tests. >>> solution(10) 16695334890 """ - list_nums = [ + return sum( int("".join(map(str, num))) for num in permutations(range(n)) if is_substring_divisible(num) - ] - - return sum(list_nums) + ) if __name__ == "__main__": From e7381b513b526e2f3ca134022389832778bdf080 Mon Sep 17 00:00:00 2001 From: Dylan Buchi Date: Fri, 5 Nov 2021 16:45:37 -0300 Subject: [PATCH 0393/1543] [mypy] Fix type annotations in `data_structures/stacks/next_greater_element.py` (#5763) * Fix type annotations in next_greater_element.py * Refactor next_greater_element.py --- .../stacks/next_greater_element.py | 35 ++++++++++--------- 1 file changed, 19 insertions(+), 16 deletions(-) diff --git a/data_structures/stacks/next_greater_element.py b/data_structures/stacks/next_greater_element.py index d8c7ed17317b..5bab7c609b67 100644 --- a/data_structures/stacks/next_greater_element.py +++ b/data_structures/stacks/next_greater_element.py @@ -1,8 +1,10 @@ +from __future__ import annotations + arr = [-10, -5, 0, 5, 5.1, 11, 13, 21, 3, 4, -21, -10, -5, -1, 0] expect = [-5, 0, 5, 5.1, 11, 13, 21, -1, 4, -1, -10, -5, -1, 0, -1] -def next_greatest_element_slow(arr: list) -> list: +def next_greatest_element_slow(arr: list[float]) -> list[float]: """ Get the Next Greatest Element (NGE) for all elements in a list. Maximum element present after the current one which is also greater than the @@ -10,10 +12,13 @@ def next_greatest_element_slow(arr: list) -> list: >>> next_greatest_element_slow(arr) == expect True """ + result = [] - for i in range(0, len(arr), 1): - next = -1 - for j in range(i + 1, len(arr), 1): + arr_size = len(arr) + + for i in range(arr_size): + next: float = -1 + for j in range(i + 1, arr_size): if arr[i] < arr[j]: next = arr[j] break @@ -21,7 +26,7 @@ def next_greatest_element_slow(arr: list) -> list: return result -def next_greatest_element_fast(arr: list) -> list: +def next_greatest_element_fast(arr: list[float]) -> list[float]: """ Like next_greatest_element_slow() but changes the loops to use enumerate() instead of range(len()) for the outer loop and @@ -31,7 +36,7 @@ def next_greatest_element_fast(arr: list) -> list: """ result = [] for i, outer in enumerate(arr): - next = -1 + next: float = -1 for inner in arr[i + 1 :]: if outer < inner: next = inner @@ -40,7 +45,7 @@ def next_greatest_element_fast(arr: list) -> list: return result -def next_greatest_element(arr: list) -> list: +def next_greatest_element(arr: list[float]) -> list[float]: """ Get the Next Greatest Element (NGE) for all elements in a list. Maximum element present after the current one which is also greater than the @@ -53,21 +58,19 @@ def next_greatest_element(arr: list) -> list: >>> next_greatest_element(arr) == expect True """ - stack = [] - result = [-1] * len(arr) + arr_size = len(arr) + stack: list[float] = [] + result: list[float] = [-1] * arr_size - for index in reversed(range(len(arr))): - if len(stack): + for index in reversed(range(arr_size)): + if stack: while stack[-1] <= arr[index]: stack.pop() - if len(stack) == 0: + if not stack: break - - if len(stack) != 0: + if stack: result[index] = stack[-1] - stack.append(arr[index]) - return result From 8ac86f2ce559e323064b09a54267d35cf3c51ec6 Mon Sep 17 00:00:00 2001 From: Christian Clauss Date: Sat, 6 Nov 2021 13:58:15 +0100 Subject: [PATCH 0394/1543] mypy: Exclude only 20 files that are still failing (#5608) * DRAFT: Run a mypy reality check Let's see what is required to finish #4052 * mypy --ignore-missing-imports --install-types --non-interactive * Check our progress... * Update build.yml * Update build.yml * Update build.yml * Update build.yml * mypy --exclude 20 files * --exclude with no `=` * Update build.yml * 558 character regex!!! * With quotes * mypy.ini: mega exclude * Update mypy.ini * Update build.yml * Update mypy.ini * Update build.yml * Update mypy.ini * .py --> .p* * Escape the dots!: `.` --> `\.` * Remove the comment * Leading slash * Update mypy.ini Co-authored-by: Dylan Buchi Co-authored-by: Dylan Buchi --- .github/workflows/build.yml | 2 +- mypy.ini | 5 +++-- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index f710e1e0ed54..e5f8d6b39a7b 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -21,7 +21,7 @@ jobs: run: | python -m pip install --upgrade pip setuptools six wheel python -m pip install mypy pytest-cov -r requirements.txt - - run: mypy --install-types --non-interactive . + - run: mypy . # See `mypy.ini` for configuration settings. - name: Run tests run: pytest --doctest-modules --ignore=project_euler/ --ignore=scripts/validate_solutions.py --cov-report=term-missing:skip-covered --cov=. . - if: ${{ success() }} diff --git a/mypy.ini b/mypy.ini index ba552f878e30..1a2282c44846 100644 --- a/mypy.ini +++ b/mypy.ini @@ -1,5 +1,6 @@ [mypy] ignore_missing_imports = True +install_types = True +non_interactive = True +exclude = (data_structures/stacks/next_greater_element.py|graphs/boruvka.py|graphs/breadth_first_search.py|graphs/breadth_first_search_2.py|graphs/check_cycle.py|graphs/finding_bridges.py|graphs/greedy_min_vertex_cover.py|graphs/random_graph_generator.py|maths/average_mode.py|maths/gamma_recursive.py|maths/proth_number.py|maths/series/geometric_series.py|maths/series/p_series.py|matrix_operation.py|other/fischer_yates_shuffle.py|other/least_recently_used.py|other/lfu_cache.py|other/lru_cache.py|searches/simulated_annealing.py|searches/ternary_search.py) -; FIXME: #4052 fix mypy errors in the exclude directories and remove them below -exclude = (data_structures|graphs|maths|matrix|other|searches)/$ From accee50cde961af501bd0c6f424cd07dc5d63269 Mon Sep 17 00:00:00 2001 From: Rohan R Bharadwaj Date: Sun, 7 Nov 2021 15:44:42 +0530 Subject: [PATCH 0395/1543] [mypy] Fix `other/fischer_yates_shuffle.py` (#5789) * [mypy] Fix `other/fischer_yates_shuffle.py` * Update mypy.ini --- mypy.ini | 2 +- other/fischer_yates_shuffle.py | 13 +++++++------ 2 files changed, 8 insertions(+), 7 deletions(-) diff --git a/mypy.ini b/mypy.ini index 1a2282c44846..123ffae851a5 100644 --- a/mypy.ini +++ b/mypy.ini @@ -2,5 +2,5 @@ ignore_missing_imports = True install_types = True non_interactive = True -exclude = (data_structures/stacks/next_greater_element.py|graphs/boruvka.py|graphs/breadth_first_search.py|graphs/breadth_first_search_2.py|graphs/check_cycle.py|graphs/finding_bridges.py|graphs/greedy_min_vertex_cover.py|graphs/random_graph_generator.py|maths/average_mode.py|maths/gamma_recursive.py|maths/proth_number.py|maths/series/geometric_series.py|maths/series/p_series.py|matrix_operation.py|other/fischer_yates_shuffle.py|other/least_recently_used.py|other/lfu_cache.py|other/lru_cache.py|searches/simulated_annealing.py|searches/ternary_search.py) +exclude = (data_structures/stacks/next_greater_element.py|graphs/boruvka.py|graphs/breadth_first_search.py|graphs/breadth_first_search_2.py|graphs/check_cycle.py|graphs/finding_bridges.py|graphs/greedy_min_vertex_cover.py|graphs/random_graph_generator.py|maths/average_mode.py|maths/gamma_recursive.py|maths/proth_number.py|maths/series/geometric_series.py|maths/series/p_series.py|matrix_operation.py|other/least_recently_used.py|other/lfu_cache.py|other/lru_cache.py|searches/simulated_annealing.py|searches/ternary_search.py) diff --git a/other/fischer_yates_shuffle.py b/other/fischer_yates_shuffle.py index 035fcb482380..fa2f4dce9db0 100644 --- a/other/fischer_yates_shuffle.py +++ b/other/fischer_yates_shuffle.py @@ -6,14 +6,15 @@ wikipedia/Fischer-Yates-Shuffle. """ import random +from typing import Any -def fisher_yates_shuffle(data: list) -> list: - for _ in range(len(list)): - a = random.randint(0, len(list) - 1) - b = random.randint(0, len(list) - 1) - list[a], list[b] = list[b], list[a] - return list +def fisher_yates_shuffle(data: list) -> list[Any]: + for _ in range(len(data)): + a = random.randint(0, len(data) - 1) + b = random.randint(0, len(data) - 1) + data[a], data[b] = data[b], data[a] + return data if __name__ == "__main__": From db5aa1d18890439e4108fa416679dbab5859f30c Mon Sep 17 00:00:00 2001 From: Rohan R Bharadwaj Date: Sun, 7 Nov 2021 20:10:23 +0530 Subject: [PATCH 0396/1543] Add equated_monthly_installments.py in Financials (#5775) * Add equated_monthly_installments.py in Financials * Formatting * More formatting, Descriptive names * Errors with name change * Formatting * Formatting, Naming Error * dedent * Update DIRECTORY.md --- DIRECTORY.md | 1 + financial/equated_monthly_installments.py | 61 +++++++++++++++++++++++ 2 files changed, 62 insertions(+) create mode 100644 financial/equated_monthly_installments.py diff --git a/DIRECTORY.md b/DIRECTORY.md index 32ca8cd3b256..f515277f403e 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -298,6 +298,7 @@ ## Financial * [Interest](https://github.com/TheAlgorithms/Python/blob/master/financial/interest.py) + * [EMI Calculation](https://github.com/TheAlgorithms/Python/blob/master/financial/equated_monthly_installments.py) ## Fractals * [Julia Sets](https://github.com/TheAlgorithms/Python/blob/master/fractals/julia_sets.py) diff --git a/financial/equated_monthly_installments.py b/financial/equated_monthly_installments.py new file mode 100644 index 000000000000..3af9224930b5 --- /dev/null +++ b/financial/equated_monthly_installments.py @@ -0,0 +1,61 @@ +""" +Program to calculate the amortization amount per month, given +- Principal borrowed +- Rate of interest per annum +- Years to repay the loan + +Wikipedia Reference: https://en.wikipedia.org/wiki/Equated_monthly_installment +""" + + +def equated_monthly_installments( + principal: float, rate_per_annum: float, years_to_repay: int +) -> float: + """ + Formula for amortization amount per month: + A = p * r * (1 + r)^n / ((1 + r)^n - 1) + where p is the principal, r is the rate of interest per month + and n is the number of payments + + >>> equated_monthly_installments(25000, 0.12, 3) + 830.3577453212793 + >>> equated_monthly_installments(25000, 0.12, 10) + 358.67737100646826 + >>> equated_monthly_installments(0, 0.12, 3) + Traceback (most recent call last): + ... + Exception: Principal borrowed must be > 0 + >>> equated_monthly_installments(25000, -1, 3) + Traceback (most recent call last): + ... + Exception: Rate of interest must be >= 0 + >>> equated_monthly_installments(25000, 0.12, 0) + Traceback (most recent call last): + ... + Exception: Years to repay must be an integer > 0 + """ + if principal <= 0: + raise Exception("Principal borrowed must be > 0") + if rate_per_annum < 0: + raise Exception("Rate of interest must be >= 0") + if years_to_repay <= 0 or not isinstance(years_to_repay, int): + raise Exception("Years to repay must be an integer > 0") + + # Yearly rate is divided by 12 to get monthly rate + rate_per_month = rate_per_annum / 12 + + # Years to repay is multiplied by 12 to get number of payments as payment is monthly + number_of_payments = years_to_repay * 12 + + return ( + principal + * rate_per_month + * (1 + rate_per_month) ** number_of_payments + / ((1 + rate_per_month) ** number_of_payments - 1) + ) + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From a98465230f21e6ece76332eeca1558613788c387 Mon Sep 17 00:00:00 2001 From: Rohan R Bharadwaj Date: Sun, 7 Nov 2021 20:43:58 +0530 Subject: [PATCH 0397/1543] [mypy] Fix type annotations for maths directory (#5782) * [mypy] Fix annotations in `maths/series/p_series.py` * Update p_series.py * Update p_series.py * Remove from excluded in mypy.ini * Type annotation for series * Annotate maths/proth_number.py (properly) * Remove from excluded in mypy.ini * Annotate average_mode.py * Update average_mode.py * Update average_mode.py * Update average_mode.py * Update average_mode.py * Remove from excluded in mypy.ini * Fix annotations in gamma_recursive.py * Remove from excluded in mypy.ini * Annotations for geometric_series.py * Update geometric_series.py * Update mypy.ini * Update average_mode.py * Update average_mode.py * Update average_mode.py * Update mypy.ini * Update mypy.ini * Update mypy.ini * Update average_mode.py * Update proth_number.py * Update average_mode.py * Update gamma_recursive.py * Update proth_number.py * Update mypy.ini * Update geometric_series.py * Update average_mode.py * Update proth_number.py * Update geometric_series.py * Update geometric_series.py * Update geometric_series.py * Update p_series.py * Update geometric_series.py * Update p_series.py * Update p_series.py * Update geometric_series.py * Update p_series.py * Update p_series.py * Remove data_structures/stacks/next_greater_element.py| Co-authored-by: Christian Clauss --- maths/average_mode.py | 31 ++++++++++------------ maths/gamma_recursive.py | 3 +-- maths/proth_number.py | 14 ++++------ maths/series/geometric_series.py | 44 ++++++++++++++++++++------------ maths/series/p_series.py | 32 +++++++++++++---------- mypy.ini | 3 +-- 6 files changed, 66 insertions(+), 61 deletions(-) diff --git a/maths/average_mode.py b/maths/average_mode.py index 83db820072bf..40f88f41f8ca 100644 --- a/maths/average_mode.py +++ b/maths/average_mode.py @@ -1,34 +1,29 @@ -def mode(input_list: list) -> list: # Defining function "mode." +from typing import Any + + +def mode(input_list: list) -> list[Any]: """This function returns the mode(Mode as in the measures of central tendency) of the input data. The input list may contain any Datastructure or any Datatype. - >>> input_list = [2, 3, 4, 5, 3, 4, 2, 5, 2, 2, 4, 2, 2, 2] - >>> mode(input_list) + >>> mode([2, 3, 4, 5, 3, 4, 2, 5, 2, 2, 4, 2, 2, 2]) [2] - >>> input_list = [3, 4, 5, 3, 4, 2, 5, 2, 2, 4, 4, 2, 2, 2] - >>> mode(input_list) + >>> mode([3, 4, 5, 3, 4, 2, 5, 2, 2, 4, 4, 2, 2, 2]) [2] - >>> input_list = [3, 4, 5, 3, 4, 2, 5, 2, 2, 4, 4, 4, 2, 2, 4, 2] - >>> mode(input_list) + >>> mode([3, 4, 5, 3, 4, 2, 5, 2, 2, 4, 4, 4, 2, 2, 4, 2]) [2, 4] - >>> input_list = ["x", "y", "y", "z"] - >>> mode(input_list) + >>> mode(["x", "y", "y", "z"]) ['y'] - >>> input_list = ["x", "x" , "y", "y", "z"] - >>> mode(input_list) + >>> mode(["x", "x" , "y", "y", "z"]) ['x', 'y'] """ - result = list() # Empty list to store the counts of elements in input_list - for x in input_list: - result.append(input_list.count(x)) - if not result: + if not input_list: return [] - y = max(result) # Gets the maximum value in the result list. + result = [input_list.count(value) for value in input_list] + y = max(result) # Gets the maximum count in the input list. # Gets values of modes - result = {input_list[i] for i, value in enumerate(result) if value == y} - return sorted(result) + return sorted({input_list[i] for i, value in enumerate(result) if value == y}) if __name__ == "__main__": diff --git a/maths/gamma_recursive.py b/maths/gamma_recursive.py index 683d7adb1aa8..3d6b8c5e8138 100644 --- a/maths/gamma_recursive.py +++ b/maths/gamma_recursive.py @@ -2,7 +2,6 @@ Gamma function is a very useful tool in math and physics. It helps calculating complex integral in a convenient way. for more info: https://en.wikipedia.org/wiki/Gamma_function - Python's Standard Library math.gamma() function overflows around gamma(171.624). """ from math import pi, sqrt @@ -71,7 +70,7 @@ def test_gamma() -> None: from doctest import testmod testmod() - num = 1 + num = 1.0 while num: num = float(input("Gamma of: ")) print(f"gamma({num}) = {gamma(num)}") diff --git a/maths/proth_number.py b/maths/proth_number.py index 065244ed7607..e175031435b0 100644 --- a/maths/proth_number.py +++ b/maths/proth_number.py @@ -1,6 +1,5 @@ """ Calculate the nth Proth number - Source: https://handwiki.org/wiki/Proth_number """ @@ -12,22 +11,17 @@ def proth(number: int) -> int: """ :param number: nth number to calculate in the sequence :return: the nth number in Proth number - Note: indexing starts at 1 i.e. proth(1) gives the first Proth number of 3 - >>> proth(6) 25 - >>> proth(0) Traceback (most recent call last): ... ValueError: Input value of [number=0] must be > 0 - >>> proth(-1) Traceback (most recent call last): ... ValueError: Input value of [number=-1] must be > 0 - >>> proth(6.0) Traceback (most recent call last): ... @@ -44,14 +38,12 @@ def proth(number: int) -> int: elif number == 2: return 5 else: - block_index = number // 3 """ +1 for binary starting at 0 i.e. 2^0, 2^1, etc. +1 to start the sequence at the 3rd Proth number Hence, we have a +2 in the below statement """ - block_index = math.log(block_index, 2) + 2 - block_index = int(block_index) + block_index = int(math.log(number // 3, 2)) + 2 proth_list = [3, 5] proth_index = 2 @@ -66,6 +58,10 @@ def proth(number: int) -> int: if __name__ == "__main__": + import doctest + + doctest.testmod() + for number in range(11): value = 0 try: diff --git a/maths/series/geometric_series.py b/maths/series/geometric_series.py index d12382e6d8c4..a875ab89a0c5 100644 --- a/maths/series/geometric_series.py +++ b/maths/series/geometric_series.py @@ -1,7 +1,6 @@ """ This is a pure Python implementation of the Geometric Series algorithm https://en.wikipedia.org/wiki/Geometric_series - Run the doctests with the following command: python3 -m doctest -v geometric_series.py or @@ -11,8 +10,17 @@ """ -def geometric_series(nth_term: int, start_term_a: int, common_ratio_r: int) -> list: - """Pure Python implementation of Geometric Series algorithm +from __future__ import annotations + + +def geometric_series( + nth_term: float | int, + start_term_a: float | int, + common_ratio_r: float | int, +) -> list[float | int]: + """ + Pure Python implementation of Geometric Series algorithm + :param nth_term: The last term (nth term of Geometric Series) :param start_term_a : The first term of Geometric Series :param common_ratio_r : The common ratio between all the terms @@ -20,15 +28,15 @@ def geometric_series(nth_term: int, start_term_a: int, common_ratio_r: int) -> l ration with first term with increase in power till last term (nth term) Examples: >>> geometric_series(4, 2, 2) - [2, '4.0', '8.0', '16.0'] + [2, 4.0, 8.0, 16.0] >>> geometric_series(4.0, 2.0, 2.0) - [2.0, '4.0', '8.0', '16.0'] + [2.0, 4.0, 8.0, 16.0] >>> geometric_series(4.1, 2.1, 2.1) - [2.1, '4.41', '9.261000000000001', '19.448100000000004'] + [2.1, 4.41, 9.261000000000001, 19.448100000000004] >>> geometric_series(4, 2, -2) - [2, '-4.0', '8.0', '-16.0'] + [2, -4.0, 8.0, -16.0] >>> geometric_series(4, -2, 2) - [-2, '-4.0', '-8.0', '-16.0'] + [-2, -4.0, -8.0, -16.0] >>> geometric_series(-4, 2, 2) [] >>> geometric_series(0, 100, 500) @@ -38,9 +46,9 @@ def geometric_series(nth_term: int, start_term_a: int, common_ratio_r: int) -> l >>> geometric_series(0, 0, 0) [] """ - if "" in (nth_term, start_term_a, common_ratio_r): - return "" - series = [] + if not all((nth_term, start_term_a, common_ratio_r)): + return [] + series: list[float | int] = [] power = 1 multiple = common_ratio_r for _ in range(int(nth_term)): @@ -48,16 +56,20 @@ def geometric_series(nth_term: int, start_term_a: int, common_ratio_r: int) -> l series.append(start_term_a) else: power += 1 - series.append(str(float(start_term_a) * float(multiple))) + series.append(float(start_term_a * multiple)) multiple = pow(float(common_ratio_r), power) return series if __name__ == "__main__": - nth_term = input("Enter the last number (n term) of the Geometric Series") - start_term_a = input("Enter the starting term (a) of the Geometric Series") - common_ratio_r = input( - "Enter the common ratio between two terms (r) of the Geometric Series" + import doctest + + doctest.testmod() + + nth_term = float(input("Enter the last number (n term) of the Geometric Series")) + start_term_a = float(input("Enter the starting term (a) of the Geometric Series")) + common_ratio_r = float( + input("Enter the common ratio between two terms (r) of the Geometric Series") ) print("Formula of Geometric Series => a + ar + ar^2 ... +ar^n") print(geometric_series(nth_term, start_term_a, common_ratio_r)) diff --git a/maths/series/p_series.py b/maths/series/p_series.py index 04019aed5a85..34fa3f2399af 100644 --- a/maths/series/p_series.py +++ b/maths/series/p_series.py @@ -1,48 +1,52 @@ """ This is a pure Python implementation of the P-Series algorithm https://en.wikipedia.org/wiki/Harmonic_series_(mathematics)#P-series - For doctests run following command: python -m doctest -v p_series.py or python3 -m doctest -v p_series.py - For manual testing run: python3 p_series.py """ -def p_series(nth_term: int, power: int) -> list: - """Pure Python implementation of P-Series algorithm +from __future__ import annotations - :return: The P-Series starting from 1 to last (nth) term +def p_series(nth_term: int | float | str, power: int | float | str) -> list[str]: + """ + Pure Python implementation of P-Series algorithm + :return: The P-Series starting from 1 to last (nth) term Examples: >>> p_series(5, 2) - [1, '1/4', '1/9', '1/16', '1/25'] + ['1', '1 / 4', '1 / 9', '1 / 16', '1 / 25'] >>> p_series(-5, 2) [] >>> p_series(5, -2) - [1, '1/0.25', '1/0.1111111111111111', '1/0.0625', '1/0.04'] + ['1', '1 / 0.25', '1 / 0.1111111111111111', '1 / 0.0625', '1 / 0.04'] >>> p_series("", 1000) - '' + [''] >>> p_series(0, 0) [] >>> p_series(1, 1) - [1] + ['1'] """ if nth_term == "": - return nth_term + return [""] nth_term = int(nth_term) power = int(power) - series = [] + series: list[str] = [] for temp in range(int(nth_term)): - series.append(f"1/{pow(temp + 1, int(power))}" if series else 1) + series.append(f"1 / {pow(temp + 1, int(power))}" if series else "1") return series if __name__ == "__main__": - nth_term = input("Enter the last number (nth term) of the P-Series") - power = input("Enter the power for P-Series") + import doctest + + doctest.testmod() + + nth_term = int(input("Enter the last number (nth term) of the P-Series")) + power = int(input("Enter the power for P-Series")) print("Formula of P-Series => 1+1/2^p+1/3^p ..... 1/n^p") print(p_series(nth_term, power)) diff --git a/mypy.ini b/mypy.ini index 123ffae851a5..df69fa841cef 100644 --- a/mypy.ini +++ b/mypy.ini @@ -2,5 +2,4 @@ ignore_missing_imports = True install_types = True non_interactive = True -exclude = (data_structures/stacks/next_greater_element.py|graphs/boruvka.py|graphs/breadth_first_search.py|graphs/breadth_first_search_2.py|graphs/check_cycle.py|graphs/finding_bridges.py|graphs/greedy_min_vertex_cover.py|graphs/random_graph_generator.py|maths/average_mode.py|maths/gamma_recursive.py|maths/proth_number.py|maths/series/geometric_series.py|maths/series/p_series.py|matrix_operation.py|other/least_recently_used.py|other/lfu_cache.py|other/lru_cache.py|searches/simulated_annealing.py|searches/ternary_search.py) - +exclude = (graphs/boruvka.py|graphs/breadth_first_search.py|graphs/breadth_first_search_2.py|graphs/check_cycle.py|graphs/finding_bridges.py|graphs/greedy_min_vertex_cover.py|graphs/random_graph_generator.py|matrix_operation.py|other/least_recently_used.py|other/lfu_cache.py|other/lru_cache.py|searches/simulated_annealing.py|searches/ternary_search.py) From 2f6a7ae1fa44514f52f9a97f83d7bbb2b18e53f2 Mon Sep 17 00:00:00 2001 From: Khoi Vo Date: Mon, 8 Nov 2021 12:35:40 +0700 Subject: [PATCH 0398/1543] ADD the algorithms of image augmentation (#5792) * ADD the algorithms of image augmentation * ADD the algorithms of image augmentation * ADD the algorithms of image augmentation * ADD the algorithms of image augmentation * ADD the algorithms of image augmentation * ADD the algorithms of image augmentation * UPDATE format code * UPDATE format and recode structure * UPDATE format import library * UPDATE code structure * Fix all checks have failded * FIX variable format * FIX variable format * FIX variable format * FIX code structure * FIX code structure * FIX code structure * FIX code structure --- computer_vision/flip_augmentation.py | 131 +++++++++++++++++ computer_vision/mosaic_augmentation.py | 189 +++++++++++++++++++++++++ 2 files changed, 320 insertions(+) create mode 100644 computer_vision/flip_augmentation.py create mode 100644 computer_vision/mosaic_augmentation.py diff --git a/computer_vision/flip_augmentation.py b/computer_vision/flip_augmentation.py new file mode 100644 index 000000000000..1272357fd03e --- /dev/null +++ b/computer_vision/flip_augmentation.py @@ -0,0 +1,131 @@ +import glob +import os +import random +from string import ascii_lowercase, digits + +import cv2 + +""" +Flip image and bounding box for computer vision task +https://paperswithcode.com/method/randomhorizontalflip +""" + +# Params +LABEL_DIR = "" +IMAGE_DIR = "" +OUTPUT_DIR = "" +FLIP_TYPE = 1 # (0 is vertical, 1 is horizontal) + + +def main() -> None: + """ + Get images list and annotations list from input dir. + Update new images and annotations. + Save images and annotations in output dir. + >>> pass # A doctest is not possible for this function. + """ + img_paths, annos = get_dataset(LABEL_DIR, IMAGE_DIR) + print("Processing...") + new_images, new_annos, paths = update_image_and_anno(img_paths, annos, FLIP_TYPE) + + for index, image in enumerate(new_images): + # Get random string code: '7b7ad245cdff75241935e4dd860f3bad' + letter_code = random_chars(32) + file_name = paths[index].split(os.sep)[-1].rsplit(".", 1)[0] + file_root = f"{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}" + cv2.imwrite(f"/{file_root}.jpg", image, [cv2.IMWRITE_JPEG_QUALITY, 85]) + print(f"Success {index+1}/{len(new_images)} with {file_name}") + annos_list = [] + for anno in new_annos[index]: + obj = f"{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}" + annos_list.append(obj) + with open(f"/{file_root}.txt", "w") as outfile: + outfile.write("\n".join(line for line in annos_list)) + + +def get_dataset(label_dir: str, img_dir: str) -> tuple[list, list]: + """ + - label_dir : Path to label include annotation of images + - img_dir : Path to folder contain images + Return : List of images path and labels + >>> pass # A doctest is not possible for this function. + """ + img_paths = [] + labels = [] + for label_file in glob.glob(os.path.join(label_dir, "*.txt")): + label_name = label_file.split(os.sep)[-1].rsplit(".", 1)[0] + with open(label_file) as in_file: + obj_lists = in_file.readlines() + img_path = os.path.join(img_dir, f"{label_name}.jpg") + + boxes = [] + for obj_list in obj_lists: + obj = obj_list.rstrip("\n").split(" ") + boxes.append( + [ + int(obj[0]), + float(obj[1]), + float(obj[2]), + float(obj[3]), + float(obj[4]), + ] + ) + if not boxes: + continue + img_paths.append(img_path) + labels.append(boxes) + return img_paths, labels + + +def update_image_and_anno( + img_list: list, anno_list: list, flip_type: int = 1 +) -> tuple[list, list, list]: + """ + - img_list : list of all images + - anno_list : list of all annotations of specific image + - flip_type : 0 is vertical, 1 is horizontal + Return: + - new_imgs_list : image after resize + - new_annos_lists : list of new annotation after scale + - path_list : list the name of image file + >>> pass # A doctest is not possible for this function. + """ + new_annos_lists = [] + path_list = [] + new_imgs_list = [] + for idx in range(len(img_list)): + new_annos = [] + path = img_list[idx] + path_list.append(path) + img_annos = anno_list[idx] + img = cv2.imread(path) + if flip_type == 1: + new_img = cv2.flip(img, flip_type) + for bbox in img_annos: + x_center_new = 1 - bbox[1] + new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]]) + elif flip_type == 0: + new_img = cv2.flip(img, flip_type) + for bbox in img_annos: + y_center_new = 1 - bbox[2] + new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]]) + new_annos_lists.append(new_annos) + new_imgs_list.append(new_img) + return new_imgs_list, new_annos_lists, path_list + + +def random_chars(number_char: int = 32) -> str: + """ + Automatic generate random 32 characters. + Get random string code: '7b7ad245cdff75241935e4dd860f3bad' + >>> len(random_chars(32)) + 32 + """ + assert number_char > 1, "The number of character should greater than 1" + letter_code = ascii_lowercase + digits + return "".join(random.choice(letter_code) for _ in range(number_char)) + + +if __name__ == "__main__": + main() + print("DONE ✅") diff --git a/computer_vision/mosaic_augmentation.py b/computer_vision/mosaic_augmentation.py new file mode 100644 index 000000000000..4fd81957ce2a --- /dev/null +++ b/computer_vision/mosaic_augmentation.py @@ -0,0 +1,189 @@ +"""Source: https://github.com/jason9075/opencv-mosaic-data-aug""" + +import glob +import os +import random +from string import ascii_lowercase, digits + +import cv2 +import numpy as np + +# Parrameters +OUTPUT_SIZE = (720, 1280) # Height, Width +SCALE_RANGE = (0.4, 0.6) # if height or width lower than this scale, drop it. +FILTER_TINY_SCALE = 1 / 100 +LABEL_DIR = "" +IMG_DIR = "" +OUTPUT_DIR = "" +NUMBER_IMAGES = 250 + + +def main() -> None: + """ + Get images list and annotations list from input dir. + Update new images and annotations. + Save images and annotations in output dir. + >>> pass # A doctest is not possible for this function. + """ + img_paths, annos = get_dataset(LABEL_DIR, IMG_DIR) + for index in range(NUMBER_IMAGES): + idxs = random.sample(range(len(annos)), 4) + new_image, new_annos, path = update_image_and_anno( + img_paths, + annos, + idxs, + OUTPUT_SIZE, + SCALE_RANGE, + filter_scale=FILTER_TINY_SCALE, + ) + + # Get random string code: '7b7ad245cdff75241935e4dd860f3bad' + letter_code = random_chars(32) + file_name = path.split(os.sep)[-1].rsplit(".", 1)[0] + file_root = f"{OUTPUT_DIR}/{file_name}_MOSAIC_{letter_code}" + cv2.imwrite(f"{file_root}.jpg", new_image, [cv2.IMWRITE_JPEG_QUALITY, 85]) + print(f"Succeeded {index+1}/{NUMBER_IMAGES} with {file_name}") + annos_list = [] + for anno in new_annos: + width = anno[3] - anno[1] + height = anno[4] - anno[2] + x_center = anno[1] + width / 2 + y_center = anno[2] + height / 2 + obj = f"{anno[0]} {x_center} {y_center} {width} {height}" + annos_list.append(obj) + with open(f"{file_root}.txt", "w") as outfile: + outfile.write("\n".join(line for line in annos_list)) + + +def get_dataset(label_dir: str, img_dir: str) -> tuple[list, list]: + """ + - label_dir : Path to label include annotation of images + - img_dir : Path to folder contain images + Return : List of images path and labels + >>> pass # A doctest is not possible for this function. + """ + img_paths = [] + labels = [] + for label_file in glob.glob(os.path.join(label_dir, "*.txt")): + label_name = label_file.split(os.sep)[-1].rsplit(".", 1)[0] + with open(label_file) as in_file: + obj_lists = in_file.readlines() + img_path = os.path.join(img_dir, f"{label_name}.jpg") + + boxes = [] + for obj_list in obj_lists: + obj = obj_list.rstrip("\n").split(" ") + xmin = float(obj[1]) - float(obj[3]) / 2 + ymin = float(obj[2]) - float(obj[4]) / 2 + xmax = float(obj[1]) + float(obj[3]) / 2 + ymax = float(obj[2]) + float(obj[4]) / 2 + + boxes.append([int(obj[0]), xmin, ymin, xmax, ymax]) + if not boxes: + continue + img_paths.append(img_path) + labels.append(boxes) + return img_paths, labels + + +def update_image_and_anno( + all_img_list: list, + all_annos: list, + idxs: list[int], + output_size: tuple[int, int], + scale_range: tuple[float, float], + filter_scale: float = 0.0, +) -> tuple[list, list, str]: + """ + - all_img_list : list of all images + - all_annos : list of all annotations of specific image + - idxs : index of image in list + - output_size : size of output image (Height, Width) + - scale_range : range of scale image + - filter_scale : the condition of downscale image and bounding box + Return: + - output_img : image after resize + - new_anno : list of new annotation after scale + - path[0] : get the name of image file + >>> pass # A doctest is not possible for this function. + """ + output_img = np.zeros([output_size[0], output_size[1], 3], dtype=np.uint8) + scale_x = scale_range[0] + random.random() * (scale_range[1] - scale_range[0]) + scale_y = scale_range[0] + random.random() * (scale_range[1] - scale_range[0]) + divid_point_x = int(scale_x * output_size[1]) + divid_point_y = int(scale_y * output_size[0]) + + new_anno = [] + path_list = [] + for i, index in enumerate(idxs): + path = all_img_list[index] + path_list.append(path) + img_annos = all_annos[index] + img = cv2.imread(path) + if i == 0: # top-left + img = cv2.resize(img, (divid_point_x, divid_point_y)) + output_img[:divid_point_y, :divid_point_x, :] = img + for bbox in img_annos: + xmin = bbox[1] * scale_x + ymin = bbox[2] * scale_y + xmax = bbox[3] * scale_x + ymax = bbox[4] * scale_y + new_anno.append([bbox[0], xmin, ymin, xmax, ymax]) + elif i == 1: # top-right + img = cv2.resize(img, (output_size[1] - divid_point_x, divid_point_y)) + output_img[:divid_point_y, divid_point_x : output_size[1], :] = img + for bbox in img_annos: + xmin = scale_x + bbox[1] * (1 - scale_x) + ymin = bbox[2] * scale_y + xmax = scale_x + bbox[3] * (1 - scale_x) + ymax = bbox[4] * scale_y + new_anno.append([bbox[0], xmin, ymin, xmax, ymax]) + elif i == 2: # bottom-left + img = cv2.resize(img, (divid_point_x, output_size[0] - divid_point_y)) + output_img[divid_point_y : output_size[0], :divid_point_x, :] = img + for bbox in img_annos: + xmin = bbox[1] * scale_x + ymin = scale_y + bbox[2] * (1 - scale_y) + xmax = bbox[3] * scale_x + ymax = scale_y + bbox[4] * (1 - scale_y) + new_anno.append([bbox[0], xmin, ymin, xmax, ymax]) + else: # bottom-right + img = cv2.resize( + img, (output_size[1] - divid_point_x, output_size[0] - divid_point_y) + ) + output_img[ + divid_point_y : output_size[0], divid_point_x : output_size[1], : + ] = img + for bbox in img_annos: + xmin = scale_x + bbox[1] * (1 - scale_x) + ymin = scale_y + bbox[2] * (1 - scale_y) + xmax = scale_x + bbox[3] * (1 - scale_x) + ymax = scale_y + bbox[4] * (1 - scale_y) + new_anno.append([bbox[0], xmin, ymin, xmax, ymax]) + + # Remove bounding box small than scale of filter + if 0 < filter_scale: + new_anno = [ + anno + for anno in new_anno + if filter_scale < (anno[3] - anno[1]) and filter_scale < (anno[4] - anno[2]) + ] + + return output_img, new_anno, path_list[0] + + +def random_chars(number_char: int) -> str: + """ + Automatic generate random 32 characters. + Get random string code: '7b7ad245cdff75241935e4dd860f3bad' + >>> len(random_chars(32)) + 32 + """ + assert number_char > 1, "The number of character should greater than 1" + letter_code = ascii_lowercase + digits + return "".join(random.choice(letter_code) for _ in range(number_char)) + + +if __name__ == "__main__": + main() + print("DONE ✅") From ac4bdfd66dbbd4c7c92c73d894469aa4a5c3e5ab Mon Sep 17 00:00:00 2001 From: Dylan Buchi Date: Mon, 8 Nov 2021 10:47:09 -0300 Subject: [PATCH 0399/1543] [mypy] Fix type annotations in `graphs/boruvka.py` (#5794) * Fix type annotations in boruvka.py * Remove graphs/boruvka.py| * updating DIRECTORY.md Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- DIRECTORY.md | 4 +++- graphs/boruvka.py | 8 +++++--- mypy.ini | 2 +- 3 files changed, 9 insertions(+), 5 deletions(-) diff --git a/DIRECTORY.md b/DIRECTORY.md index f515277f403e..228d95472a60 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -109,8 +109,10 @@ ## Computer Vision * [Cnn Classification](https://github.com/TheAlgorithms/Python/blob/master/computer_vision/cnn_classification.py) + * [Flip Augmentation](https://github.com/TheAlgorithms/Python/blob/master/computer_vision/flip_augmentation.py) * [Harris Corner](https://github.com/TheAlgorithms/Python/blob/master/computer_vision/harris_corner.py) * [Mean Threshold](https://github.com/TheAlgorithms/Python/blob/master/computer_vision/mean_threshold.py) + * [Mosaic Augmentation](https://github.com/TheAlgorithms/Python/blob/master/computer_vision/mosaic_augmentation.py) ## Conversions * [Binary To Decimal](https://github.com/TheAlgorithms/Python/blob/master/conversions/binary_to_decimal.py) @@ -297,8 +299,8 @@ * [Test Send File](https://github.com/TheAlgorithms/Python/blob/master/file_transfer/tests/test_send_file.py) ## Financial + * [Equated Monthly Installments](https://github.com/TheAlgorithms/Python/blob/master/financial/equated_monthly_installments.py) * [Interest](https://github.com/TheAlgorithms/Python/blob/master/financial/interest.py) - * [EMI Calculation](https://github.com/TheAlgorithms/Python/blob/master/financial/equated_monthly_installments.py) ## Fractals * [Julia Sets](https://github.com/TheAlgorithms/Python/blob/master/fractals/julia_sets.py) diff --git a/graphs/boruvka.py b/graphs/boruvka.py index eea0b0009941..2715a3085948 100644 --- a/graphs/boruvka.py +++ b/graphs/boruvka.py @@ -26,6 +26,8 @@ """ from __future__ import annotations +from typing import Any + class Graph: def __init__(self, num_of_nodes: int) -> None: @@ -62,7 +64,7 @@ def set_component(self, u_node: int) -> None: for k in self.m_component: self.m_component[k] = self.find_component(k) - def union(self, component_size: list, u_node: int, v_node: int) -> None: + def union(self, component_size: list[int], u_node: int, v_node: int) -> None: """Union finds the roots of components for two nodes, compares the components in terms of size, and attaches the smaller one to the larger one to form single component""" @@ -84,7 +86,7 @@ def boruvka(self) -> None: component_size = [] mst_weight = 0 - minimum_weight_edge: list[int] = [-1] * self.m_num_of_nodes + minimum_weight_edge: list[Any] = [-1] * self.m_num_of_nodes # A list of components (initialized to all of the nodes) for node in range(self.m_num_of_nodes): @@ -119,7 +121,7 @@ def boruvka(self) -> None: minimum_weight_edge[component] = [u, v, w] for edge in minimum_weight_edge: - if edge != -1: + if isinstance(edge, list): u, v, w = edge u_component = self.m_component[u] diff --git a/mypy.ini b/mypy.ini index df69fa841cef..16ca60c4dbcc 100644 --- a/mypy.ini +++ b/mypy.ini @@ -2,4 +2,4 @@ ignore_missing_imports = True install_types = True non_interactive = True -exclude = (graphs/boruvka.py|graphs/breadth_first_search.py|graphs/breadth_first_search_2.py|graphs/check_cycle.py|graphs/finding_bridges.py|graphs/greedy_min_vertex_cover.py|graphs/random_graph_generator.py|matrix_operation.py|other/least_recently_used.py|other/lfu_cache.py|other/lru_cache.py|searches/simulated_annealing.py|searches/ternary_search.py) +exclude = (graphs/breadth_first_search.py|graphs/breadth_first_search_2.py|graphs/check_cycle.py|graphs/finding_bridges.py|graphs/greedy_min_vertex_cover.py|graphs/random_graph_generator.py|matrix_operation.py|other/least_recently_used.py|other/lfu_cache.py|other/lru_cache.py|searches/simulated_annealing.py|searches/ternary_search.py) From a8aeabdf1891397a4a55988f33ac435ae0313c55 Mon Sep 17 00:00:00 2001 From: Rohan R Bharadwaj Date: Mon, 8 Nov 2021 22:48:33 +0530 Subject: [PATCH 0400/1543] [mypy] Type annotations for `graphs/finding_bridges.py` and `graphs/random_graph_generator.py` (#5795) * [mypy] Annotate `graphs/finding_bridges.py` * Remove from excluded in `mypy.ini` * Add doctest.testmod() * psf/black formatting * Annotations for `graphs/random_graph_generator.py` * Remove from excluded in `mypy.ini` * Resolve merge conflict * Resolve merge conflict * Update mypy.ini * Update mypy.ini * Remove from excluded --- graphs/finding_bridges.py | 8 +++++++- graphs/random_graph_generator.py | 2 +- mypy.ini | 2 +- 3 files changed, 9 insertions(+), 3 deletions(-) diff --git a/graphs/finding_bridges.py b/graphs/finding_bridges.py index a877a97489be..3813c4ebbd2a 100644 --- a/graphs/finding_bridges.py +++ b/graphs/finding_bridges.py @@ -93,8 +93,14 @@ def dfs(at, parent, bridges, id): # This edge is a back edge and cannot be a bridge low[at] = min(low[at], low[to]) - bridges = [] + bridges: list[tuple[int, int]] = [] for i in range(n): if not visited[i]: dfs(i, -1, bridges, id) return bridges + + +if __name__ == "__main__": + import doctest + + doctest.testmod() diff --git a/graphs/random_graph_generator.py b/graphs/random_graph_generator.py index d7d5de8a37c0..15ccee5b399c 100644 --- a/graphs/random_graph_generator.py +++ b/graphs/random_graph_generator.py @@ -26,7 +26,7 @@ def random_graph( >>> random_graph(4, 0.5, True) {0: [1], 1: [2, 3], 2: [3], 3: []} """ - graph = {i: [] for i in range(vertices_number)} + graph: dict = {i: [] for i in range(vertices_number)} # if probability is greater or equal than 1, then generate a complete graph if probability >= 1: diff --git a/mypy.ini b/mypy.ini index 16ca60c4dbcc..429c6804daf5 100644 --- a/mypy.ini +++ b/mypy.ini @@ -2,4 +2,4 @@ ignore_missing_imports = True install_types = True non_interactive = True -exclude = (graphs/breadth_first_search.py|graphs/breadth_first_search_2.py|graphs/check_cycle.py|graphs/finding_bridges.py|graphs/greedy_min_vertex_cover.py|graphs/random_graph_generator.py|matrix_operation.py|other/least_recently_used.py|other/lfu_cache.py|other/lru_cache.py|searches/simulated_annealing.py|searches/ternary_search.py) +exclude = (graphs/breadth_first_search.py|graphs/breadth_first_search_2.py|graphs/check_cycle.py|graphs/greedy_min_vertex_cover.py|matrix_operation.py|other/least_recently_used.py|other/lfu_cache.py|other/lru_cache.py|searches/simulated_annealing.py|searches/ternary_search.py) From 4c9949f636248a547b9ff832ad18df372df57ed5 Mon Sep 17 00:00:00 2001 From: Leoriem-code <73761711+Leoriem-code@users.noreply.github.com> Date: Mon, 8 Nov 2021 18:58:15 +0100 Subject: [PATCH 0401/1543] edited strings/anagram.py (#5770) * rewrote anagrams.py, added doctests * corrected mistakes * add anagrams.txt * Update anagrams.py * Update strings/anagrams.py Co-authored-by: Christian Clauss Co-authored-by: Christian Clauss --- strings/anagrams.py | 55 +- strings/anagrams.txt | 33957 +++++++++++++++++++++++++++++++++++++++++ 2 files changed, 33989 insertions(+), 23 deletions(-) create mode 100644 strings/anagrams.txt diff --git a/strings/anagrams.py b/strings/anagrams.py index 1a7c675d6719..b671d3f3d531 100644 --- a/strings/anagrams.py +++ b/strings/anagrams.py @@ -1,35 +1,44 @@ +from __future__ import annotations + import collections -import os import pprint -import time +from pathlib import Path + + +def signature(word: str) -> str: + """Return a word sorted + >>> signature("test") + 'estt' + >>> signature("this is a test") + ' aehiisssttt' + >>> signature("finaltest") + 'aefilnstt' + """ + return "".join(sorted(word)) -start_time = time.time() -print("creating word list...") -path = os.path.split(os.path.realpath(__file__)) -with open(path[0] + "/words.txt") as f: - word_list = sorted(list({word.strip().lower() for word in f})) +def anagram(my_word: str) -> list[str]: + """Return every anagram of the given word + >>> anagram('test') + ['sett', 'stet', 'test'] + >>> anagram('this is a test') + [] + >>> anagram('final') + ['final'] + """ + return word_bysig[signature(my_word)] -def signature(word): - return "".join(sorted(word)) +data: str = Path(__file__).parent.joinpath("words.txt").read_text(encoding="utf-8") +word_list = sorted({word.strip().lower() for word in data.splitlines()}) word_bysig = collections.defaultdict(list) for word in word_list: word_bysig[signature(word)].append(word) +if __name__ == "__main__": + all_anagrams = {word: anagram(word) for word in word_list if len(anagram(word)) > 1} -def anagram(my_word): - return word_bysig[signature(my_word)] - - -print("finding anagrams...") -all_anagrams = {word: anagram(word) for word in word_list if len(anagram(word)) > 1} - -print("writing anagrams to file...") -with open("anagrams.txt", "w") as file: - file.write("all_anagrams = ") - file.write(pprint.pformat(all_anagrams)) - -total_time = round(time.time() - start_time, 2) -print(("Done [", total_time, "seconds ]")) + with open("anagrams.txt", "w") as file: + file.write("all_anagrams = \n ") + file.write(pprint.pformat(all_anagrams)) diff --git a/strings/anagrams.txt b/strings/anagrams.txt new file mode 100644 index 000000000000..52a0fcf3e9a7 --- /dev/null +++ b/strings/anagrams.txt @@ -0,0 +1,33957 @@ +all_anagrams = + {'aal': ['aal', 'ala'], + 'aam': ['aam', 'ama'], + 'aaronic': ['aaronic', 'nicarao', 'ocarina'], + 'aaronite': ['aaronite', 'aeration'], + 'aaru': ['aaru', 'aura'], + 'ab': ['ab', 'ba'], + 'aba': ['aba', 'baa'], + 'abac': ['abac', 'caba'], + 'abactor': ['abactor', 'acrobat'], + 'abaft': ['abaft', 'bafta'], + 'abalone': ['abalone', 'balonea'], + 'abandoner': ['abandoner', 'reabandon'], + 'abanic': ['abanic', 'bianca'], + 'abaris': ['abaris', 'arabis'], + 'abas': ['abas', 'saba'], + 'abaser': ['abaser', 'abrase'], + 'abate': ['abate', 'ateba', 'batea', 'beata'], + 'abater': ['abater', 'artabe', 'eartab', 'trabea'], + 'abb': ['abb', 'bab'], + 'abba': ['abba', 'baba'], + 'abbey': ['abbey', 'bebay'], + 'abby': ['abby', 'baby'], + 'abdat': ['abdat', 'batad'], + 'abdiel': ['abdiel', 'baldie'], + 'abdominovaginal': ['abdominovaginal', 'vaginoabdominal'], + 'abdominovesical': ['abdominovesical', 'vesicoabdominal'], + 'abe': ['abe', 'bae', 'bea'], + 'abed': ['abed', 'bade', 'bead'], + 'abel': ['abel', 'able', 'albe', 'bale', 'beal', 'bela', 'blae'], + 'abele': ['abele', 'albee'], + 'abelian': ['abelian', 'nebalia'], + 'abenteric': ['abenteric', 'bicrenate'], + 'aberia': ['aberia', 'baeria', 'baiera'], + 'abet': ['abet', 'bate', 'beat', 'beta'], + 'abetment': ['abetment', 'batement'], + 'abettor': ['abettor', 'taboret'], + 'abhorrent': ['abhorrent', 'earthborn'], + 'abhorrer': ['abhorrer', 'harborer'], + 'abider': ['abider', 'bardie'], + 'abies': ['abies', 'beisa'], + 'abilla': ['abilla', 'labial'], + 'abilo': ['abilo', 'aboil'], + 'abir': ['abir', 'bari', 'rabi'], + 'abiston': ['abiston', 'bastion'], + 'abiuret': ['abiuret', 'aubrite', 'biurate', 'rubiate'], + 'abkar': ['abkar', 'arkab'], + 'abkhas': ['abkhas', 'kasbah'], + 'ablactate': ['ablactate', 'cabaletta'], + 'ablare': ['ablare', 'arable', 'arbela'], + 'ablastemic': ['ablastemic', 'masticable'], + 'ablation': ['ablation', 'obtainal'], + 'ablaut': ['ablaut', 'tabula'], + 'able': ['abel', 'able', 'albe', 'bale', 'beal', 'bela', 'blae'], + 'ableness': ['ableness', 'blaeness', 'sensable'], + 'ablepsia': ['ablepsia', 'epibasal'], + 'abler': ['abler', 'baler', 'belar', 'blare', 'blear'], + 'ablest': ['ablest', 'stable', 'tables'], + 'abloom': ['abloom', 'mabolo'], + 'ablow': ['ablow', 'balow', 'bowla'], + 'ablude': ['ablude', 'belaud'], + 'abluent': ['abluent', 'tunable'], + 'ablution': ['ablution', 'abutilon'], + 'ably': ['ably', 'blay', 'yalb'], + 'abmho': ['abmho', 'abohm'], + 'abner': ['abner', 'arneb', 'reban'], + 'abnet': ['abnet', 'beant'], + 'abo': ['abo', 'boa'], + 'aboard': ['aboard', 'aborad', 'abroad'], + 'abode': ['abode', 'adobe'], + 'abohm': ['abmho', 'abohm'], + 'aboil': ['abilo', 'aboil'], + 'abolisher': ['abolisher', 'reabolish'], + 'abongo': ['abongo', 'gaboon'], + 'aborad': ['aboard', 'aborad', 'abroad'], + 'aboral': ['aboral', 'arbalo'], + 'abord': ['abord', 'bardo', 'board', 'broad', 'dobra', 'dorab'], + 'abort': ['abort', 'tabor'], + 'aborticide': ['aborticide', 'bacterioid'], + 'abortient': ['abortient', 'torbanite'], + 'abortin': ['abortin', 'taborin'], + 'abortion': ['abortion', 'robotian'], + 'abortive': ['abortive', 'bravoite'], + 'abouts': ['abouts', 'basuto'], + 'abram': ['abram', 'ambar'], + 'abramis': ['abramis', 'arabism'], + 'abrasax': ['abrasax', 'abraxas'], + 'abrase': ['abaser', 'abrase'], + 'abrasion': ['abrasion', 'sorabian'], + 'abrastol': ['abrastol', 'albatros'], + 'abraxas': ['abrasax', 'abraxas'], + 'abreact': ['abreact', 'bractea', 'cabaret'], + 'abret': ['abret', 'bater', 'berat'], + 'abridge': ['abridge', 'brigade'], + 'abrim': ['abrim', 'birma'], + 'abrin': ['abrin', 'bairn', 'brain', 'brian', 'rabin'], + 'abristle': ['abristle', 'libertas'], + 'abroad': ['aboard', 'aborad', 'abroad'], + 'abrotine': ['abrotine', 'baritone', 'obtainer', 'reobtain'], + 'abrus': ['abrus', 'bursa', 'subra'], + 'absalom': ['absalom', 'balsamo'], + 'abscise': ['abscise', 'scabies'], + 'absent': ['absent', 'basten'], + 'absenter': ['absenter', 'reabsent'], + 'absi': ['absi', 'bais', 'bias', 'isba'], + 'absit': ['absit', 'batis'], + 'absmho': ['absmho', 'absohm'], + 'absohm': ['absmho', 'absohm'], + 'absorber': ['absorber', 'reabsorb'], + 'absorpt': ['absorpt', 'barpost'], + 'abthain': ['abthain', 'habitan'], + 'abulic': ['abulic', 'baculi'], + 'abut': ['abut', 'tabu', 'tuba'], + 'abuta': ['abuta', 'bauta'], + 'abutilon': ['ablution', 'abutilon'], + 'aby': ['aby', 'bay'], + 'abysmal': ['abysmal', 'balsamy'], + 'academite': ['academite', 'acetamide'], + 'acadie': ['acadie', 'acedia', 'adicea'], + 'acaleph': ['acaleph', 'acephal'], + 'acalepha': ['acalepha', 'acephala'], + 'acalephae': ['acalephae', 'apalachee'], + 'acalephan': ['acalephan', 'acephalan'], + 'acalyptrate': ['acalyptrate', 'calyptratae'], + 'acamar': ['acamar', 'camara', 'maraca'], + 'acanth': ['acanth', 'anchat', 'tanach'], + 'acanthia': ['acanthia', 'achatina'], + 'acanthial': ['acanthial', 'calathian'], + 'acanthin': ['acanthin', 'chinanta'], + 'acara': ['acara', 'araca'], + 'acardia': ['acardia', 'acarida', 'arcadia'], + 'acarian': ['acarian', 'acarina', 'acrania'], + 'acarid': ['acarid', 'cardia', 'carida'], + 'acarida': ['acardia', 'acarida', 'arcadia'], + 'acarina': ['acarian', 'acarina', 'acrania'], + 'acarine': ['acarine', 'acraein', 'arecain'], + 'acastus': ['acastus', 'astacus'], + 'acatholic': ['acatholic', 'chaotical'], + 'acaudate': ['acaudate', 'ecaudata'], + 'acca': ['acca', 'caca'], + 'accelerator': ['accelerator', 'retrocaecal'], + 'acception': ['acception', 'peccation'], + 'accessioner': ['accessioner', 'reaccession'], + 'accipitres': ['accipitres', 'preascitic'], + 'accite': ['accite', 'acetic'], + 'acclinate': ['acclinate', 'analectic'], + 'accoil': ['accoil', 'calico'], + 'accomplisher': ['accomplisher', 'reaccomplish'], + 'accompt': ['accompt', 'compact'], + 'accorder': ['accorder', 'reaccord'], + 'accoy': ['accoy', 'ccoya'], + 'accretion': ['accretion', 'anorectic', 'neoarctic'], + 'accrual': ['accrual', 'carucal'], + 'accurate': ['accurate', 'carucate'], + 'accurse': ['accurse', 'accuser'], + 'accusable': ['accusable', 'subcaecal'], + 'accused': ['accused', 'succade'], + 'accuser': ['accurse', 'accuser'], + 'acedia': ['acadie', 'acedia', 'adicea'], + 'acedy': ['acedy', 'decay'], + 'acentric': ['acentric', 'encratic', 'nearctic'], + 'acentrous': ['acentrous', 'courtesan', 'nectarous'], + 'acephal': ['acaleph', 'acephal'], + 'acephala': ['acalepha', 'acephala'], + 'acephalan': ['acalephan', 'acephalan'], + 'acephali': ['acephali', 'phacelia'], + 'acephalina': ['acephalina', 'phalaecian'], + 'acer': ['acer', 'acre', 'care', 'crea', 'race'], + 'aceraceae': ['aceraceae', 'arecaceae'], + 'aceraceous': ['aceraceous', 'arecaceous'], + 'acerb': ['acerb', 'brace', 'caber'], + 'acerbic': ['acerbic', 'breccia'], + 'acerdol': ['acerdol', 'coraled'], + 'acerin': ['acerin', 'cearin'], + 'acerous': ['acerous', 'carouse', 'euscaro'], + 'acervate': ['acervate', 'revacate'], + 'acervation': ['acervation', 'vacationer'], + 'acervuline': ['acervuline', 'avirulence'], + 'acetamide': ['academite', 'acetamide'], + 'acetamido': ['acetamido', 'coadamite'], + 'acetanilid': ['acetanilid', 'laciniated', 'teniacidal'], + 'acetanion': ['acetanion', 'antoecian'], + 'acetation': ['acetation', 'itaconate'], + 'acetic': ['accite', 'acetic'], + 'acetin': ['acetin', 'actine', 'enatic'], + 'acetmethylanilide': ['acetmethylanilide', 'methylacetanilide'], + 'acetoin': ['acetoin', 'aconite', 'anoetic', 'antoeci', 'cetonia'], + 'acetol': ['acetol', 'colate', 'locate'], + 'acetone': ['acetone', 'oceanet'], + 'acetonuria': ['acetonuria', 'aeronautic'], + 'acetopyrin': ['acetopyrin', 'capernoity'], + 'acetous': ['acetous', 'outcase'], + 'acetum': ['acetum', 'tecuma'], + 'aceturic': ['aceturic', 'cruciate'], + 'ach': ['ach', 'cha'], + 'achar': ['achar', 'chara'], + 'achate': ['achate', 'chaeta'], + 'achatina': ['acanthia', 'achatina'], + 'ache': ['ache', 'each', 'haec'], + 'acheirus': ['acheirus', 'eucharis'], + 'achen': ['achen', 'chane', 'chena', 'hance'], + 'acher': ['acher', 'arche', 'chare', 'chera', 'rache', 'reach'], + 'acherontic': ['acherontic', 'anchoretic'], + 'acherontical': ['acherontical', 'anchoretical'], + 'achete': ['achete', 'hecate', 'teache', 'thecae'], + 'acheulean': ['acheulean', 'euchlaena'], + 'achill': ['achill', 'cahill', 'chilla'], + 'achillea': ['achillea', 'heliacal'], + 'acholia': ['acholia', 'alochia'], + 'achondrite': ['achondrite', 'ditrochean', 'ordanchite'], + 'achor': ['achor', 'chora', 'corah', 'orach', 'roach'], + 'achras': ['achras', 'charas'], + 'achromat': ['achromat', 'trachoma'], + 'achromatin': ['achromatin', 'chariotman', 'machinator'], + 'achromatinic': ['achromatinic', 'chromatician'], + 'achtel': ['achtel', 'chalet', 'thecal', 'thecla'], + 'achy': ['achy', 'chay'], + 'aciculated': ['aciculated', 'claudicate'], + 'acid': ['acid', 'cadi', 'caid'], + 'acidanthera': ['acidanthera', 'cantharidae'], + 'acider': ['acider', 'ericad'], + 'acidimeter': ['acidimeter', 'mediatrice'], + 'acidity': ['acidity', 'adicity'], + 'acidly': ['acidly', 'acidyl'], + 'acidometry': ['acidometry', 'medicatory', 'radiectomy'], + 'acidophilous': ['acidophilous', 'aphidicolous'], + 'acidyl': ['acidly', 'acidyl'], + 'acier': ['acier', 'aeric', 'ceria', 'erica'], + 'acieral': ['acieral', 'aerical'], + 'aciform': ['aciform', 'formica'], + 'acilius': ['acilius', 'iliacus'], + 'acinar': ['acinar', + 'arnica', + 'canari', + 'carian', + 'carina', + 'crania', + 'narica'], + 'acinic': ['acinic', 'incaic'], + 'aciniform': ['aciniform', 'formicina'], + 'acipenserid': ['acipenserid', 'presidencia'], + 'acis': ['acis', 'asci', 'saic'], + 'acker': ['acker', 'caker', 'crake', 'creak'], + 'ackey': ['ackey', 'cakey'], + 'acle': ['acle', 'alec', 'lace'], + 'acleistous': ['acleistous', 'ossiculate'], + 'aclemon': ['aclemon', 'cloamen'], + 'aclinal': ['aclinal', 'ancilla'], + 'aclys': ['aclys', 'scaly'], + 'acme': ['acme', 'came', 'mace'], + 'acmite': ['acmite', 'micate'], + 'acne': ['acne', 'cane', 'nace'], + 'acnemia': ['acnemia', 'anaemic'], + 'acnida': ['acnida', 'anacid', 'dacian'], + 'acnodal': ['acnodal', 'canadol', 'locanda'], + 'acnode': ['acnode', 'deacon'], + 'acoin': ['acoin', 'oncia'], + 'acoma': ['acoma', 'macao'], + 'acone': ['acone', 'canoe', 'ocean'], + 'aconital': ['aconital', 'actional', 'anatolic'], + 'aconite': ['acetoin', 'aconite', 'anoetic', 'antoeci', 'cetonia'], + 'aconitic': ['aconitic', 'cationic', 'itaconic'], + 'aconitin': ['aconitin', 'inaction', 'nicotian'], + 'aconitum': ['aconitum', 'acontium'], + 'acontias': ['acontias', 'tacsonia'], + 'acontium': ['aconitum', 'acontium'], + 'acontius': ['acontius', 'anticous'], + 'acopon': ['acopon', 'poonac'], + 'acor': ['acor', 'caro', 'cora', 'orca'], + 'acorn': ['acorn', 'acron', 'racon'], + 'acorus': ['acorus', 'soucar'], + 'acosmist': ['acosmist', 'massicot', 'somatics'], + 'acquest': ['acquest', 'casquet'], + 'acrab': ['acrab', 'braca'], + 'acraein': ['acarine', 'acraein', 'arecain'], + 'acrania': ['acarian', 'acarina', 'acrania'], + 'acraniate': ['acraniate', 'carinatae'], + 'acratia': ['acratia', 'cataria'], + 'acre': ['acer', 'acre', 'care', 'crea', 'race'], + 'acream': ['acream', 'camera', 'mareca'], + 'acred': ['acred', 'cader', 'cadre', 'cedar'], + 'acrid': ['acrid', 'caird', 'carid', 'darci', 'daric', 'dirca'], + 'acridan': ['acridan', 'craniad'], + 'acridian': ['acridian', 'cnidaria'], + 'acrididae': ['acrididae', 'cardiidae', 'cidaridae'], + 'acridly': ['acridly', 'acridyl'], + 'acridonium': ['acridonium', 'dicoumarin'], + 'acridyl': ['acridly', 'acridyl'], + 'acrimonious': ['acrimonious', 'isocoumarin'], + 'acrisius': ['acrisius', 'sicarius'], + 'acrita': ['acrita', 'arctia'], + 'acritan': ['acritan', 'arctian'], + 'acrite': ['acrite', 'arcite', 'tercia', 'triace', 'tricae'], + 'acroa': ['acroa', 'caroa'], + 'acrobat': ['abactor', 'acrobat'], + 'acrocera': ['acrocera', 'caracore'], + 'acroclinium': ['acroclinium', 'alcicornium'], + 'acrodus': ['acrodus', 'crusado'], + 'acrogen': ['acrogen', 'cornage'], + 'acrolein': ['acrolein', + 'arecolin', + 'caroline', + 'colinear', + 'cornelia', + 'creolian', + 'lonicera'], + 'acrolith': ['acrolith', 'trochila'], + 'acron': ['acorn', 'acron', 'racon'], + 'acronical': ['acronical', 'alcoranic'], + 'acronym': ['acronym', 'romancy'], + 'acropetal': ['acropetal', 'cleopatra'], + 'acrose': ['acrose', 'coarse'], + 'acrostic': ['acrostic', 'sarcotic', 'socratic'], + 'acrostical': ['acrostical', 'socratical'], + 'acrostically': ['acrostically', 'socratically'], + 'acrosticism': ['acrosticism', 'socraticism'], + 'acrotic': ['acrotic', 'carotic'], + 'acrotism': ['acrotism', 'rotacism'], + 'acrotrophic': ['acrotrophic', 'prothoracic'], + 'acryl': ['acryl', 'caryl', 'clary'], + 'act': ['act', 'cat'], + 'actaeonidae': ['actaeonidae', 'donatiaceae'], + 'actian': ['actian', 'natica', 'tanica'], + 'actifier': ['actifier', 'artifice'], + 'actin': ['actin', 'antic'], + 'actinal': ['actinal', 'alantic', 'alicant', 'antical'], + 'actine': ['acetin', 'actine', 'enatic'], + 'actiniform': ['actiniform', 'naticiform'], + 'actinine': ['actinine', 'naticine'], + 'actinism': ['actinism', 'manistic'], + 'actinogram': ['actinogram', 'morganatic'], + 'actinoid': ['actinoid', 'diatonic', 'naticoid'], + 'actinon': ['actinon', 'cantion', 'contain'], + 'actinopteran': ['actinopteran', 'precantation'], + 'actinopteri': ['actinopteri', 'crepitation', 'precitation'], + 'actinost': ['actinost', 'oscitant'], + 'actinula': ['actinula', 'nautical'], + 'action': ['action', 'atonic', 'cation'], + 'actional': ['aconital', 'actional', 'anatolic'], + 'actioner': ['actioner', 'anerotic', 'ceration', 'creation', 'reaction'], + 'activable': ['activable', 'biclavate'], + 'activate': ['activate', 'cavitate'], + 'activation': ['activation', 'cavitation'], + 'activin': ['activin', 'civitan'], + 'actomyosin': ['actomyosin', 'inocystoma'], + 'acton': ['acton', 'canto', 'octan'], + 'actor': ['actor', 'corta', 'croat', 'rocta', 'taroc', 'troca'], + 'actorship': ['actorship', 'strophaic'], + 'acts': ['acts', 'cast', 'scat'], + 'actuator': ['actuator', 'autocrat'], + 'acture': ['acture', 'cauter', 'curate'], + 'acuan': ['acuan', 'aucan'], + 'acubens': ['acubens', 'benacus'], + 'acumen': ['acumen', 'cueman'], + 'acuminose': ['acuminose', 'mniaceous'], + 'acutenaculum': ['acutenaculum', 'unaccumulate'], + 'acuteness': ['acuteness', 'encaustes'], + 'acutorsion': ['acutorsion', 'octonarius'], + 'acyl': ['acyl', 'clay', 'lacy'], + 'acylation': ['acylation', 'claytonia'], + 'acylogen': ['acylogen', 'cynogale'], + 'ad': ['ad', 'da'], + 'adad': ['adad', 'adda', 'dada'], + 'adage': ['adage', 'agade'], + 'adam': ['adam', 'dama'], + 'adamic': ['adamic', 'cadmia'], + 'adamine': ['adamine', 'manidae'], + 'adamite': ['adamite', 'amidate'], + 'adamsite': ['adamsite', 'diastema'], + 'adance': ['adance', 'ecanda'], + 'adapter': ['adapter', 'predata', 'readapt'], + 'adaption': ['adaption', 'adoptian'], + 'adaptionism': ['adaptionism', 'adoptianism'], + 'adar': ['adar', 'arad', 'raad', 'rada'], + 'adarme': ['adarme', 'adream'], + 'adat': ['adat', 'data'], + 'adawn': ['adawn', 'wadna'], + 'adays': ['adays', 'dasya'], + 'add': ['add', 'dad'], + 'adda': ['adad', 'adda', 'dada'], + 'addendum': ['addendum', 'unmadded'], + 'adder': ['adder', 'dread', 'readd'], + 'addicent': ['addicent', 'dedicant'], + 'addlings': ['addlings', 'saddling'], + 'addresser': ['addresser', 'readdress'], + 'addu': ['addu', 'dadu', 'daud', 'duad'], + 'addy': ['addy', 'dyad'], + 'ade': ['ade', 'dae'], + 'adeem': ['adeem', 'ameed', 'edema'], + 'adela': ['adela', 'dalea'], + 'adeline': ['adeline', 'daniele', 'delaine'], + 'adeling': ['adeling', 'dealing', 'leading'], + 'adelops': ['adelops', 'deposal'], + 'ademonist': ['ademonist', 'demoniast', 'staminode'], + 'ademption': ['ademption', 'tampioned'], + 'adendric': ['adendric', 'riddance'], + 'adenectopic': ['adenectopic', 'pentadecoic'], + 'adenia': ['adenia', 'idaean'], + 'adenochondroma': ['adenochondroma', 'chondroadenoma'], + 'adenocystoma': ['adenocystoma', 'cystoadenoma'], + 'adenofibroma': ['adenofibroma', 'fibroadenoma'], + 'adenolipoma': ['adenolipoma', 'palaemonoid'], + 'adenosarcoma': ['adenosarcoma', 'sarcoadenoma'], + 'adenylic': ['adenylic', 'lycaenid'], + 'adeptness': ['adeptness', 'pedantess'], + 'adequation': ['adequation', 'deaquation'], + 'adermia': ['adermia', 'madeira'], + 'adermin': ['adermin', 'amerind', 'dimeran'], + 'adet': ['adet', 'date', 'tade', 'tead', 'teda'], + 'adevism': ['adevism', 'vedaism'], + 'adhere': ['adhere', 'header', 'hedera', 'rehead'], + 'adherent': ['adherent', 'headrent', 'neatherd', 'threaden'], + 'adiaphon': ['adiaphon', 'aphodian'], + 'adib': ['adib', 'ibad'], + 'adicea': ['acadie', 'acedia', 'adicea'], + 'adicity': ['acidity', 'adicity'], + 'adiel': ['adiel', 'delia', 'ideal'], + 'adieux': ['adieux', 'exaudi'], + 'adighe': ['adighe', 'hidage'], + 'adin': ['adin', 'andi', 'dain', 'dani', 'dian', 'naid'], + 'adinole': ['adinole', 'idoneal'], + 'adion': ['adion', 'danio', 'doina', 'donia'], + 'adipocele': ['adipocele', 'cepolidae', 'ploceidae'], + 'adipocere': ['adipocere', 'percoidea'], + 'adipyl': ['adipyl', 'plaidy'], + 'adit': ['adit', 'dita'], + 'adital': ['adital', 'altaid'], + 'aditus': ['aditus', 'studia'], + 'adjuster': ['adjuster', 'readjust'], + 'adlai': ['adlai', 'alida'], + 'adlay': ['adlay', 'dayal'], + 'adlet': ['adlet', 'dealt', 'delta', 'lated', 'taled'], + 'adlumine': ['adlumine', 'unmailed'], + 'adman': ['adman', 'daman', 'namda'], + 'admi': ['admi', 'amid', 'madi', 'maid'], + 'adminicle': ['adminicle', 'medicinal'], + 'admire': ['admire', 'armied', 'damier', 'dimera', 'merida'], + 'admired': ['admired', 'diaderm'], + 'admirer': ['admirer', 'madrier', 'married'], + 'admissive': ['admissive', 'misadvise'], + 'admit': ['admit', 'atmid'], + 'admittee': ['admittee', 'meditate'], + 'admonisher': ['admonisher', 'rhamnoside'], + 'admonition': ['admonition', 'domination'], + 'admonitive': ['admonitive', 'dominative'], + 'admonitor': ['admonitor', 'dominator'], + 'adnascence': ['adnascence', 'ascendance'], + 'adnascent': ['adnascent', 'ascendant'], + 'adnate': ['adnate', 'entada'], + 'ado': ['ado', 'dao', 'oda'], + 'adobe': ['abode', 'adobe'], + 'adolph': ['adolph', 'pholad'], + 'adonai': ['adonai', 'adonia'], + 'adonia': ['adonai', 'adonia'], + 'adonic': ['adonic', 'anodic'], + 'adonin': ['adonin', 'nanoid', 'nonaid'], + 'adoniram': ['adoniram', 'radioman'], + 'adonize': ['adonize', 'anodize'], + 'adopter': ['adopter', 'protead', 'readopt'], + 'adoptian': ['adaption', 'adoptian'], + 'adoptianism': ['adaptionism', 'adoptianism'], + 'adoptional': ['adoptional', 'aplodontia'], + 'adorability': ['adorability', 'roadability'], + 'adorable': ['adorable', 'roadable'], + 'adorant': ['adorant', 'ondatra'], + 'adore': ['adore', 'oared', 'oread'], + 'adorer': ['adorer', 'roader'], + 'adorn': ['adorn', 'donar', 'drona', 'radon'], + 'adorner': ['adorner', 'readorn'], + 'adpao': ['adpao', 'apoda'], + 'adpromission': ['adpromission', 'proadmission'], + 'adream': ['adarme', 'adream'], + 'adrenin': ['adrenin', 'nardine'], + 'adrenine': ['adrenine', 'adrienne'], + 'adrenolytic': ['adrenolytic', 'declinatory'], + 'adrenotropic': ['adrenotropic', 'incorporated'], + 'adrian': ['adrian', 'andira', 'andria', 'radian', 'randia'], + 'adrienne': ['adrenine', 'adrienne'], + 'adrip': ['adrip', 'rapid'], + 'adroitly': ['adroitly', 'dilatory', 'idolatry'], + 'adrop': ['adrop', 'pardo'], + 'adry': ['adry', 'dray', 'yard'], + 'adscendent': ['adscendent', 'descendant'], + 'adsmith': ['adsmith', 'mahdist'], + 'adular': ['adular', 'aludra', 'radula'], + 'adulation': ['adulation', 'laudation'], + 'adulator': ['adulator', 'laudator'], + 'adulatory': ['adulatory', 'laudatory'], + 'adult': ['adult', 'dulat'], + 'adulterine': ['adulterine', 'laurentide'], + 'adultness': ['adultness', 'dauntless'], + 'adustion': ['adustion', 'sudation'], + 'advene': ['advene', 'evadne'], + 'adventism': ['adventism', 'vedantism'], + 'adventist': ['adventist', 'vedantist'], + 'adventure': ['adventure', 'unaverted'], + 'advice': ['advice', 'vedaic'], + 'ady': ['ady', 'day', 'yad'], + 'adz': ['adz', 'zad'], + 'adze': ['adze', 'daze'], + 'adzer': ['adzer', 'zerda'], + 'ae': ['ae', 'ea'], + 'aecidioform': ['aecidioform', 'formicoidea'], + 'aedilian': ['aedilian', 'laniidae'], + 'aedilic': ['aedilic', 'elaidic'], + 'aedility': ['aedility', 'ideality'], + 'aegipan': ['aegipan', 'apinage'], + 'aegirine': ['aegirine', 'erigenia'], + 'aegirite': ['aegirite', 'ariegite'], + 'aegle': ['aegle', 'eagle', 'galee'], + 'aenean': ['aenean', 'enaena'], + 'aeolharmonica': ['aeolharmonica', 'chloroanaemia'], + 'aeolian': ['aeolian', 'aeolina', 'aeonial'], + 'aeolic': ['aeolic', 'coelia'], + 'aeolina': ['aeolian', 'aeolina', 'aeonial'], + 'aeolis': ['aeolis', 'laiose'], + 'aeolist': ['aeolist', 'isolate'], + 'aeolistic': ['aeolistic', 'socialite'], + 'aeon': ['aeon', 'eoan'], + 'aeonial': ['aeolian', 'aeolina', 'aeonial'], + 'aeonist': ['aeonist', 'asiento', 'satieno'], + 'aer': ['aer', 'are', 'ear', 'era', 'rea'], + 'aerage': ['aerage', 'graeae'], + 'aerarian': ['aerarian', 'arenaria'], + 'aeration': ['aaronite', 'aeration'], + 'aerial': ['aerial', 'aralie'], + 'aeric': ['acier', 'aeric', 'ceria', 'erica'], + 'aerical': ['acieral', 'aerical'], + 'aeried': ['aeried', 'dearie'], + 'aerogenic': ['aerogenic', 'recoinage'], + 'aerographer': ['aerographer', 'areographer'], + 'aerographic': ['aerographic', 'areographic'], + 'aerographical': ['aerographical', 'areographical'], + 'aerography': ['aerography', 'areography'], + 'aerologic': ['aerologic', 'areologic'], + 'aerological': ['aerological', 'areological'], + 'aerologist': ['aerologist', 'areologist'], + 'aerology': ['aerology', 'areology'], + 'aeromantic': ['aeromantic', 'cameration', 'maceration', 'racemation'], + 'aerometer': ['aerometer', 'areometer'], + 'aerometric': ['aerometric', 'areometric'], + 'aerometry': ['aerometry', 'areometry'], + 'aeronautic': ['acetonuria', 'aeronautic'], + 'aeronautism': ['aeronautism', 'measuration'], + 'aerope': ['aerope', 'operae'], + 'aerophilic': ['aerophilic', 'epichorial'], + 'aerosol': ['aerosol', 'roseola'], + 'aerostatics': ['aerostatics', 'aortectasis'], + 'aery': ['aery', 'eyra', 'yare', 'year'], + 'aes': ['aes', 'ase', 'sea'], + 'aesthetic': ['aesthetic', 'chaetites'], + 'aethalioid': ['aethalioid', 'haliotidae'], + 'aetian': ['aetian', 'antiae', 'taenia'], + 'aetobatus': ['aetobatus', 'eastabout'], + 'afebrile': ['afebrile', 'balefire', 'fireable'], + 'afenil': ['afenil', 'finale'], + 'affair': ['affair', 'raffia'], + 'affecter': ['affecter', 'reaffect'], + 'affeer': ['affeer', 'raffee'], + 'affiance': ['affiance', 'caffeina'], + 'affirmer': ['affirmer', 'reaffirm'], + 'afflicter': ['afflicter', 'reafflict'], + 'affy': ['affy', 'yaff'], + 'afghan': ['afghan', 'hafgan'], + 'afield': ['afield', 'defial'], + 'afire': ['afire', 'feria'], + 'aflare': ['aflare', 'rafael'], + 'aflat': ['aflat', 'fatal'], + 'afresh': ['afresh', 'fasher', 'ferash'], + 'afret': ['afret', 'after'], + 'afric': ['afric', 'firca'], + 'afshar': ['afshar', 'ashraf'], + 'aft': ['aft', 'fat'], + 'after': ['afret', 'after'], + 'afteract': ['afteract', 'artefact', 'farcetta', 'farctate'], + 'afterage': ['afterage', 'fregatae'], + 'afterblow': ['afterblow', 'batfowler'], + 'aftercome': ['aftercome', 'forcemeat'], + 'aftercrop': ['aftercrop', 'prefactor'], + 'aftergo': ['aftergo', 'fagoter'], + 'afterguns': ['afterguns', 'transfuge'], + 'aftermath': ['aftermath', 'hamfatter'], + 'afterstate': ['afterstate', 'aftertaste'], + 'aftertaste': ['afterstate', 'aftertaste'], + 'afunctional': ['afunctional', 'unfactional'], + 'agade': ['adage', 'agade'], + 'agal': ['agal', 'agla', 'alga', 'gala'], + 'agalite': ['agalite', 'tailage', 'taliage'], + 'agalma': ['agalma', 'malaga'], + 'agama': ['agama', 'amaga'], + 'agamid': ['agamid', 'madiga'], + 'agapeti': ['agapeti', 'agpaite'], + 'agapornis': ['agapornis', 'sporangia'], + 'agar': ['agar', 'agra', 'gara', 'raga'], + 'agaricin': ['agaricin', 'garcinia'], + 'agau': ['agau', 'agua'], + 'aged': ['aged', 'egad', 'gade'], + 'ageless': ['ageless', 'eagless'], + 'agen': ['agen', 'gaen', 'gane', 'gean', 'gena'], + 'agenesic': ['agenesic', 'genesiac'], + 'agenesis': ['agenesis', 'assignee'], + 'agential': ['agential', 'alginate'], + 'agentive': ['agentive', 'negative'], + 'ager': ['ager', 'agre', 'gare', 'gear', 'rage'], + 'agger': ['agger', 'gager', 'regga'], + 'aggeration': ['aggeration', 'agregation'], + 'aggry': ['aggry', 'raggy'], + 'agialid': ['agialid', 'galidia'], + 'agib': ['agib', 'biga', 'gabi'], + 'agiel': ['agiel', 'agile', 'galei'], + 'agile': ['agiel', 'agile', 'galei'], + 'agileness': ['agileness', 'signalese'], + 'agistment': ['agistment', 'magnetist'], + 'agistor': ['agistor', 'agrotis', 'orgiast'], + 'agla': ['agal', 'agla', 'alga', 'gala'], + 'aglaos': ['aglaos', 'salago'], + 'aglare': ['aglare', 'alegar', 'galera', 'laager'], + 'aglet': ['aglet', 'galet'], + 'agley': ['agley', 'galey'], + 'agmatine': ['agmatine', 'agminate'], + 'agminate': ['agmatine', 'agminate'], + 'agminated': ['agminated', 'diamagnet'], + 'agnail': ['agnail', 'linaga'], + 'agname': ['agname', 'manage'], + 'agnel': ['agnel', 'angel', 'angle', 'galen', 'genal', 'glean', 'lagen'], + 'agnes': ['agnes', 'gesan'], + 'agnize': ['agnize', 'ganzie'], + 'agnosis': ['agnosis', 'ganosis'], + 'agnostic': ['agnostic', 'coasting'], + 'agnus': ['agnus', 'angus', 'sugan'], + 'ago': ['ago', 'goa'], + 'agon': ['agon', 'ango', 'gaon', 'goan', 'gona'], + 'agonal': ['agonal', 'angola'], + 'agone': ['agone', 'genoa'], + 'agoniadin': ['agoniadin', 'anangioid', 'ganoidian'], + 'agonic': ['agonic', 'angico', 'gaonic', 'goniac'], + 'agonista': ['agonista', 'santiago'], + 'agonizer': ['agonizer', 'orangize', 'organize'], + 'agpaite': ['agapeti', 'agpaite'], + 'agra': ['agar', 'agra', 'gara', 'raga'], + 'agral': ['agral', 'argal'], + 'agrania': ['agrania', 'angaria', 'niagara'], + 'agre': ['ager', 'agre', 'gare', 'gear', 'rage'], + 'agree': ['agree', 'eager', 'eagre'], + 'agreed': ['agreed', 'geared'], + 'agregation': ['aggeration', 'agregation'], + 'agrege': ['agrege', 'raggee'], + 'agrestian': ['agrestian', 'gerastian', 'stangeria'], + 'agrestic': ['agrestic', 'ergastic'], + 'agria': ['agria', 'igara'], + 'agricolist': ['agricolist', 'algoristic'], + 'agrilus': ['agrilus', 'gularis'], + 'agrin': ['agrin', 'grain'], + 'agrito': ['agrito', 'ortiga'], + 'agroan': ['agroan', 'angora', 'anogra', 'arango', 'argoan', 'onagra'], + 'agrom': ['agrom', 'morga'], + 'agrotis': ['agistor', 'agrotis', 'orgiast'], + 'aground': ['aground', 'durango'], + 'agrufe': ['agrufe', 'gaufer', 'gaufre'], + 'agrypnia': ['agrypnia', 'paginary'], + 'agsam': ['agsam', 'magas'], + 'agua': ['agau', 'agua'], + 'ague': ['ague', 'auge'], + 'agush': ['agush', 'saugh'], + 'agust': ['agust', 'tsuga'], + 'agy': ['agy', 'gay'], + 'ah': ['ah', 'ha'], + 'ahem': ['ahem', 'haem', 'hame'], + 'ahet': ['ahet', 'haet', 'hate', 'heat', 'thea'], + 'ahey': ['ahey', 'eyah', 'yeah'], + 'ahind': ['ahind', 'dinah'], + 'ahint': ['ahint', 'hiant', 'tahin'], + 'ahir': ['ahir', 'hair'], + 'ahmed': ['ahmed', 'hemad'], + 'ahmet': ['ahmet', 'thema'], + 'aho': ['aho', 'hao'], + 'ahom': ['ahom', 'moha'], + 'ahong': ['ahong', 'hogan'], + 'ahorse': ['ahorse', 'ashore', 'hoarse', 'shorea'], + 'ahoy': ['ahoy', 'hoya'], + 'ahriman': ['ahriman', 'miranha'], + 'ahsan': ['ahsan', 'hansa', 'hasan'], + 'aht': ['aht', 'hat', 'tha'], + 'ahtena': ['ahtena', 'aneath', 'athena'], + 'ahu': ['ahu', 'auh', 'hau'], + 'ahum': ['ahum', 'huma'], + 'ahunt': ['ahunt', 'haunt', 'thuan', 'unhat'], + 'aid': ['aid', 'ida'], + 'aidance': ['aidance', 'canidae'], + 'aide': ['aide', 'idea'], + 'aidenn': ['aidenn', 'andine', 'dannie', 'indane'], + 'aider': ['aider', 'deair', 'irade', 'redia'], + 'aides': ['aides', 'aside', 'sadie'], + 'aiel': ['aiel', 'aile', 'elia'], + 'aiglet': ['aiglet', 'ligate', 'taigle', 'tailge'], + 'ail': ['ail', 'ila', 'lai'], + 'ailantine': ['ailantine', 'antialien'], + 'ailanto': ['ailanto', 'alation', 'laotian', 'notalia'], + 'aile': ['aiel', 'aile', 'elia'], + 'aileen': ['aileen', 'elaine'], + 'aileron': ['aileron', 'alienor'], + 'ailing': ['ailing', 'angili', 'nilgai'], + 'ailment': ['ailment', 'aliment'], + 'aim': ['aim', 'ami', 'ima'], + 'aimer': ['aimer', 'maire', 'marie', 'ramie'], + 'aimless': ['aimless', 'melissa', 'seismal'], + 'ainaleh': ['ainaleh', 'halenia'], + 'aint': ['aint', 'anti', 'tain', 'tina'], + 'aion': ['aion', 'naio'], + 'air': ['air', 'ira', 'ria'], + 'aira': ['aira', 'aria', 'raia'], + 'airan': ['airan', 'arain', 'arian'], + 'airdrome': ['airdrome', 'armoried'], + 'aire': ['aire', 'eria'], + 'airer': ['airer', 'arrie'], + 'airlike': ['airlike', 'kiliare'], + 'airman': ['airman', 'amarin', 'marian', 'marina', 'mirana'], + 'airplane': ['airplane', 'perianal'], + 'airplanist': ['airplanist', 'triplasian'], + 'airt': ['airt', 'rita', 'tari', 'tiar'], + 'airy': ['airy', 'yair'], + 'aisle': ['aisle', 'elias'], + 'aisled': ['aisled', 'deasil', 'ladies', 'sailed'], + 'aisling': ['aisling', 'sailing'], + 'aissor': ['aissor', 'rissoa'], + 'ait': ['ait', 'ati', 'ita', 'tai'], + 'aitch': ['aitch', 'chait', 'chati', 'chita', 'taich', 'tchai'], + 'aition': ['aition', 'itonia'], + 'aizle': ['aizle', 'eliza'], + 'ajar': ['ajar', 'jara', 'raja'], + 'ajhar': ['ajhar', 'rajah'], + 'ajuga': ['ajuga', 'jagua'], + 'ak': ['ak', 'ka'], + 'akal': ['akal', 'kala'], + 'akali': ['akali', 'alaki'], + 'akan': ['akan', 'kana'], + 'ake': ['ake', 'kea'], + 'akebi': ['akebi', 'bakie'], + 'akha': ['akha', 'kaha'], + 'akim': ['akim', 'maki'], + 'akin': ['akin', 'kina', 'naik'], + 'akka': ['akka', 'kaka'], + 'aknee': ['aknee', 'ankee', 'keena'], + 'ako': ['ako', 'koa', 'oak', 'oka'], + 'akoasma': ['akoasma', 'amakosa'], + 'aku': ['aku', 'auk', 'kua'], + 'al': ['al', 'la'], + 'ala': ['aal', 'ala'], + 'alacritous': ['alacritous', 'lactarious', 'lactosuria'], + 'alain': ['alain', 'alani', 'liana'], + 'alaki': ['akali', 'alaki'], + 'alalite': ['alalite', 'tillaea'], + 'alamo': ['alamo', 'aloma'], + 'alan': ['alan', 'anal', 'lana'], + 'alangin': ['alangin', 'anginal', 'anglian', 'nagnail'], + 'alangine': ['alangine', 'angelina', 'galenian'], + 'alani': ['alain', 'alani', 'liana'], + 'alanine': ['alanine', 'linnaea'], + 'alans': ['alans', 'lanas', 'nasal'], + 'alantic': ['actinal', 'alantic', 'alicant', 'antical'], + 'alantolic': ['alantolic', 'allantoic'], + 'alanyl': ['alanyl', 'anally'], + 'alares': ['alares', 'arales'], + 'alaria': ['alaria', 'aralia'], + 'alaric': ['alaric', 'racial'], + 'alarm': ['alarm', 'malar', 'maral', 'marla', 'ramal'], + 'alarmable': ['alarmable', 'ambarella'], + 'alarmedly': ['alarmedly', 'medallary'], + 'alarming': ['alarming', 'marginal'], + 'alarmingly': ['alarmingly', 'marginally'], + 'alarmist': ['alarmist', 'alastrim'], + 'alas': ['alas', 'lasa'], + 'alastair': ['alastair', 'salariat'], + 'alaster': ['alaster', 'tarsale'], + 'alastrim': ['alarmist', 'alastrim'], + 'alatern': ['alatern', 'lateran'], + 'alaternus': ['alaternus', 'saturnale'], + 'alation': ['ailanto', 'alation', 'laotian', 'notalia'], + 'alb': ['alb', 'bal', 'lab'], + 'alba': ['alba', 'baal', 'bala'], + 'alban': ['alban', 'balan', 'banal', 'laban', 'nabal', 'nabla'], + 'albanite': ['albanite', 'balanite', 'nabalite'], + 'albardine': ['albardine', 'drainable'], + 'albarium': ['albarium', 'brumalia'], + 'albata': ['albata', 'atabal', 'balata'], + 'albatros': ['abrastol', 'albatros'], + 'albe': ['abel', 'able', 'albe', 'bale', 'beal', 'bela', 'blae'], + 'albedo': ['albedo', 'doable'], + 'albee': ['abele', 'albee'], + 'albeit': ['albeit', + 'albite', + 'baltei', + 'belait', + 'betail', + 'bletia', + 'libate'], + 'albert': ['albert', 'balter', 'labret', 'tabler'], + 'alberta': ['alberta', 'latebra', 'ratable'], + 'albertina': ['albertina', 'trainable'], + 'alberto': ['alberto', 'bloater', 'latrobe'], + 'albetad': ['albetad', 'datable'], + 'albi': ['albi', 'bail', 'bali'], + 'albian': ['albian', 'bilaan'], + 'albin': ['albin', 'binal', 'blain'], + 'albino': ['albino', 'albion', 'alboin', 'oliban'], + 'albion': ['albino', 'albion', 'alboin', 'oliban'], + 'albite': ['albeit', + 'albite', + 'baltei', + 'belait', + 'betail', + 'bletia', + 'libate'], + 'alboin': ['albino', 'albion', 'alboin', 'oliban'], + 'alboranite': ['alboranite', 'rationable'], + 'albronze': ['albronze', 'blazoner'], + 'albuca': ['albuca', 'bacula'], + 'albuminate': ['albuminate', 'antelabium'], + 'alburnum': ['alburnum', 'laburnum'], + 'alcaic': ['alcaic', 'cicala'], + 'alcaide': ['alcaide', 'alcidae'], + 'alcamine': ['alcamine', 'analcime', 'calamine', 'camelina'], + 'alcantarines': ['alcantarines', 'lancasterian'], + 'alcedo': ['alcedo', 'dacelo'], + 'alces': ['alces', 'casel', 'scale'], + 'alchemic': ['alchemic', 'chemical'], + 'alchemistic': ['alchemistic', 'hemiclastic'], + 'alchera': ['alchera', 'archeal'], + 'alchitran': ['alchitran', 'clathrina'], + 'alcicornium': ['acroclinium', 'alcicornium'], + 'alcidae': ['alcaide', 'alcidae'], + 'alcidine': ['alcidine', 'danielic', 'lecaniid'], + 'alcine': ['alcine', 'ancile'], + 'alco': ['alco', 'coal', 'cola', 'loca'], + 'alcoate': ['alcoate', 'coelata'], + 'alcogel': ['alcogel', 'collage'], + 'alcor': ['alcor', 'calor', 'carlo', 'carol', 'claro', 'coral'], + 'alcoran': ['alcoran', 'ancoral', 'carolan'], + 'alcoranic': ['acronical', 'alcoranic'], + 'alcove': ['alcove', 'coeval', 'volcae'], + 'alcyon': ['alcyon', 'cyanol'], + 'alcyone': ['alcyone', 'cyanole'], + 'aldamine': ['aldamine', 'lamnidae'], + 'aldeament': ['aldeament', 'mandelate'], + 'alder': ['alder', 'daler', 'lader'], + 'aldermanry': ['aldermanry', 'marylander'], + 'aldern': ['aldern', + 'darnel', + 'enlard', + 'lander', + 'lenard', + 'randle', + 'reland'], + 'aldime': ['aldime', 'mailed', 'medial'], + 'aldine': ['aldine', 'daniel', 'delian', 'denial', 'enalid', 'leadin'], + 'aldus': ['aldus', 'sauld'], + 'ale': ['ale', 'lea'], + 'alec': ['acle', 'alec', 'lace'], + 'aleconner': ['aleconner', 'noncereal'], + 'alecost': ['alecost', 'lactose', 'scotale', 'talcose'], + 'alectoris': ['alectoris', 'sarcolite', 'sclerotia', 'sectorial'], + 'alectrion': ['alectrion', 'clarionet', 'crotaline', 'locarnite'], + 'alectryon': ['alectryon', 'tolerancy'], + 'alecup': ['alecup', 'clupea'], + 'alef': ['alef', 'feal', 'flea', 'leaf'], + 'aleft': ['aleft', 'alfet', 'fetal', 'fleta'], + 'alegar': ['aglare', 'alegar', 'galera', 'laager'], + 'alem': ['alem', 'alme', 'lame', 'leam', 'male', 'meal', 'mela'], + 'alemanni': ['alemanni', 'melanian'], + 'alemite': ['alemite', 'elamite'], + 'alen': ['alen', 'lane', 'lean', 'lena', 'nael', 'neal'], + 'aleph': ['aleph', 'pheal'], + 'alepot': ['alepot', 'pelota'], + 'alerce': ['alerce', 'cereal', 'relace'], + 'alerse': ['alerse', 'leaser', 'reales', 'resale', 'reseal', 'sealer'], + 'alert': ['alert', 'alter', 'artel', 'later', 'ratel', 'taler', 'telar'], + 'alertly': ['alertly', 'elytral'], + 'alestake': ['alestake', 'eastlake'], + 'aletap': ['aletap', 'palate', 'platea'], + 'aletris': ['aletris', 'alister', 'listera', 'realist', 'saltier'], + 'aleuronic': ['aleuronic', 'urceolina'], + 'aleut': ['aleut', 'atule'], + 'aleutic': ['aleutic', 'auletic', 'caulite', 'lutecia'], + 'alevin': ['alevin', 'alvine', 'valine', 'veinal', 'venial', 'vineal'], + 'alex': ['alex', 'axel', 'axle'], + 'alexandrian': ['alexandrian', 'alexandrina'], + 'alexandrina': ['alexandrian', 'alexandrina'], + 'alexin': ['alexin', 'xenial'], + 'aleyard': ['aleyard', 'already'], + 'alfenide': ['alfenide', 'enfilade'], + 'alfet': ['aleft', 'alfet', 'fetal', 'fleta'], + 'alfred': ['alfred', 'fardel'], + 'alfur': ['alfur', 'fural'], + 'alga': ['agal', 'agla', 'alga', 'gala'], + 'algae': ['algae', 'galea'], + 'algal': ['algal', 'galla'], + 'algebar': ['algebar', 'algebra'], + 'algebra': ['algebar', 'algebra'], + 'algedi': ['algedi', 'galeid'], + 'algedo': ['algedo', 'geodal'], + 'algedonic': ['algedonic', 'genocidal'], + 'algenib': ['algenib', 'bealing', 'belgian', 'bengali'], + 'algerian': ['algerian', 'geranial', 'regalian'], + 'algernon': ['algernon', 'nonglare'], + 'algesia': ['algesia', 'sailage'], + 'algesis': ['algesis', 'glassie'], + 'algieba': ['algieba', 'bailage'], + 'algin': ['algin', 'align', 'langi', 'liang', 'linga'], + 'alginate': ['agential', 'alginate'], + 'algine': ['algine', 'genial', 'linage'], + 'algist': ['algist', 'gaslit'], + 'algometer': ['algometer', 'glomerate'], + 'algometric': ['algometric', 'melotragic'], + 'algomian': ['algomian', 'magnolia'], + 'algor': ['algor', 'argol', 'goral', 'largo'], + 'algoristic': ['agricolist', 'algoristic'], + 'algorithm': ['algorithm', 'logarithm'], + 'algorithmic': ['algorithmic', 'logarithmic'], + 'algraphic': ['algraphic', 'graphical'], + 'algum': ['algum', 'almug', 'glaum', 'gluma', 'mulga'], + 'alibility': ['alibility', 'liability'], + 'alible': ['alible', 'belial', 'labile', 'liable'], + 'alicant': ['actinal', 'alantic', 'alicant', 'antical'], + 'alice': ['alice', 'celia', 'ileac'], + 'alichel': ['alichel', 'challie', 'helical'], + 'alida': ['adlai', 'alida'], + 'alien': ['alien', 'aline', 'anile', 'elain', 'elian', 'laine', 'linea'], + 'alienation': ['alienation', 'alineation'], + 'alienator': ['alienator', 'rationale'], + 'alienism': ['alienism', 'milesian'], + 'alienor': ['aileron', 'alienor'], + 'alif': ['alif', 'fail'], + 'aligerous': ['aligerous', 'glaireous'], + 'align': ['algin', 'align', 'langi', 'liang', 'linga'], + 'aligner': ['aligner', 'engrail', 'realign', 'reginal'], + 'alignment': ['alignment', 'lamenting'], + 'alike': ['alike', 'lakie'], + 'alikeness': ['alikeness', 'leakiness'], + 'alima': ['alima', 'lamia'], + 'aliment': ['ailment', 'aliment'], + 'alimenter': ['alimenter', 'marteline'], + 'alimentic': ['alimentic', 'antilemic', 'melanitic', 'metanilic'], + 'alimonied': ['alimonied', 'maleinoid'], + 'alin': ['alin', 'anil', 'lain', 'lina', 'nail'], + 'aline': ['alien', 'aline', 'anile', 'elain', 'elian', 'laine', 'linea'], + 'alineation': ['alienation', 'alineation'], + 'aliped': ['aliped', 'elapid'], + 'aliptes': ['aliptes', 'pastile', 'talipes'], + 'aliptic': ['aliptic', 'aplitic'], + 'aliseptal': ['aliseptal', 'pallasite'], + 'alish': ['alish', 'hilsa'], + 'alisier': ['alisier', 'israeli'], + 'aliso': ['aliso', 'alois'], + 'alison': ['alison', 'anolis'], + 'alisp': ['alisp', 'lapsi'], + 'alist': ['alist', 'litas', 'slait', 'talis'], + 'alister': ['aletris', 'alister', 'listera', 'realist', 'saltier'], + 'alit': ['alit', 'tail', 'tali'], + 'alite': ['alite', 'laeti'], + 'aliunde': ['aliunde', 'unideal'], + 'aliveness': ['aliveness', 'vealiness'], + 'alix': ['alix', 'axil'], + 'alk': ['alk', 'lak'], + 'alkalizer': ['alkalizer', 'lazarlike'], + 'alkamin': ['alkamin', 'malakin'], + 'alkene': ['alkene', 'lekane'], + 'alkes': ['alkes', 'sakel', 'slake'], + 'alkine': ['alkine', 'ilkane', 'inlake', 'inleak'], + 'alky': ['alky', 'laky'], + 'alkylic': ['alkylic', 'lilacky'], + 'allagite': ['allagite', 'alligate', 'talliage'], + 'allah': ['allah', 'halal'], + 'allantoic': ['alantolic', 'allantoic'], + 'allay': ['allay', 'yalla'], + 'allayer': ['allayer', 'yallaer'], + 'allbone': ['allbone', 'bellona'], + 'alle': ['alle', 'ella', 'leal'], + 'allecret': ['allecret', 'cellaret'], + 'allegate': ['allegate', 'ellagate'], + 'allegorist': ['allegorist', 'legislator'], + 'allergen': ['allergen', 'generall'], + 'allergia': ['allergia', 'galleria'], + 'allergin': ['allergin', 'gralline'], + 'allergy': ['allergy', 'gallery', 'largely', 'regally'], + 'alliable': ['alliable', 'labiella'], + 'alliably': ['alliably', 'labially'], + 'alliance': ['alliance', 'canaille'], + 'alliancer': ['alliancer', 'ralliance'], + 'allie': ['allie', 'leila', 'lelia'], + 'allies': ['allies', 'aselli'], + 'alligate': ['allagite', 'alligate', 'talliage'], + 'allium': ['allium', 'alulim', 'muilla'], + 'allocation': ['allocation', 'locational'], + 'allocute': ['allocute', 'loculate'], + 'allocution': ['allocution', 'loculation'], + 'allogenic': ['allogenic', 'collegian'], + 'allonym': ['allonym', 'malonyl'], + 'allopathy': ['allopathy', 'lalopathy'], + 'allopatric': ['allopatric', 'patrilocal'], + 'allot': ['allot', 'atoll'], + 'allothogenic': ['allothogenic', 'ethnological'], + 'allover': ['allover', 'overall'], + 'allower': ['allower', 'reallow'], + 'alloy': ['alloy', 'loyal'], + 'allude': ['allude', 'aludel'], + 'allure': ['allure', 'laurel'], + 'alma': ['alma', 'amla', 'lama', 'mala'], + 'almach': ['almach', 'chamal'], + 'almaciga': ['almaciga', 'macaglia'], + 'almain': ['almain', 'animal', 'lamina', 'manila'], + 'alman': ['alman', 'lamna', 'manal'], + 'almandite': ['almandite', 'laminated'], + 'alme': ['alem', 'alme', 'lame', 'leam', 'male', 'meal', 'mela'], + 'almerian': ['almerian', 'manerial'], + 'almoign': ['almoign', 'loaming'], + 'almon': ['almon', 'monal'], + 'almond': ['almond', 'dolman'], + 'almoner': ['almoner', 'moneral', 'nemoral'], + 'almonry': ['almonry', 'romanly'], + 'alms': ['alms', 'salm', 'slam'], + 'almuce': ['almuce', 'caelum', 'macule'], + 'almude': ['almude', 'maudle'], + 'almug': ['algum', 'almug', 'glaum', 'gluma', 'mulga'], + 'almuten': ['almuten', 'emulant'], + 'aln': ['aln', 'lan'], + 'alnage': ['alnage', 'angela', 'galena', 'lagena'], + 'alnico': ['alnico', 'cliona', 'oilcan'], + 'alnilam': ['alnilam', 'manilla'], + 'alnoite': ['alnoite', 'elation', 'toenail'], + 'alnuin': ['alnuin', 'unnail'], + 'alo': ['alo', 'lao', 'loa'], + 'alochia': ['acholia', 'alochia'], + 'alod': ['alod', 'dola', 'load', 'odal'], + 'aloe': ['aloe', 'olea'], + 'aloetic': ['aloetic', 'coalite'], + 'aloft': ['aloft', 'float', 'flota'], + 'alogian': ['alogian', 'logania'], + 'alogical': ['alogical', 'colalgia'], + 'aloid': ['aloid', 'dolia', 'idola'], + 'aloin': ['aloin', 'anoil', 'anoli'], + 'alois': ['aliso', 'alois'], + 'aloma': ['alamo', 'aloma'], + 'alone': ['alone', 'anole', 'olena'], + 'along': ['along', 'gonal', 'lango', 'longa', 'nogal'], + 'alonso': ['alonso', 'alsoon', 'saloon'], + 'alonzo': ['alonzo', 'zoonal'], + 'alop': ['alop', 'opal'], + 'alopecist': ['alopecist', 'altiscope', 'epicostal', 'scapolite'], + 'alosa': ['alosa', 'loasa', 'oasal'], + 'alose': ['alose', 'osela', 'solea'], + 'alow': ['alow', 'awol', 'lowa'], + 'aloxite': ['aloxite', 'oxalite'], + 'alp': ['alp', 'lap', 'pal'], + 'alpeen': ['alpeen', 'lenape', 'pelean'], + 'alpen': ['alpen', 'nepal', 'panel', 'penal', 'plane'], + 'alpestral': ['alpestral', 'palestral'], + 'alpestrian': ['alpestrian', 'palestrian', 'psalterian'], + 'alpestrine': ['alpestrine', 'episternal', 'interlapse', 'presential'], + 'alphenic': ['alphenic', 'cephalin'], + 'alphonse': ['alphonse', 'phenosal'], + 'alphos': ['alphos', 'pholas'], + 'alphosis': ['alphosis', 'haplosis'], + 'alpid': ['alpid', 'plaid'], + 'alpieu': ['alpieu', 'paulie'], + 'alpine': ['alpine', 'nepali', 'penial', 'pineal'], + 'alpinist': ['alpinist', 'antislip'], + 'alpist': ['alpist', 'pastil', 'spital'], + 'alraun': ['alraun', 'alruna', 'ranula'], + 'already': ['aleyard', 'already'], + 'alrighty': ['alrighty', 'arightly'], + 'alruna': ['alraun', 'alruna', 'ranula'], + 'alsine': ['alsine', 'neslia', 'saline', 'selina', 'silane'], + 'also': ['also', 'sola'], + 'alsoon': ['alonso', 'alsoon', 'saloon'], + 'alstonidine': ['alstonidine', 'nonidealist'], + 'alstonine': ['alstonine', 'tensional'], + 'alt': ['alt', 'lat', 'tal'], + 'altaian': ['altaian', 'latania', 'natalia'], + 'altaic': ['altaic', 'altica'], + 'altaid': ['adital', 'altaid'], + 'altair': ['altair', 'atrail', 'atrial', 'lariat', 'latria', 'talari'], + 'altamira': ['altamira', 'matralia'], + 'altar': ['altar', 'artal', 'ratal', 'talar'], + 'altared': ['altared', 'laterad'], + 'altarist': ['altarist', 'striatal'], + 'alter': ['alert', 'alter', 'artel', 'later', 'ratel', 'taler', 'telar'], + 'alterability': ['alterability', 'bilaterality', 'relatability'], + 'alterable': ['alterable', 'relatable'], + 'alterant': ['alterant', 'tarletan'], + 'alterer': ['alterer', 'realter', 'relater'], + 'altern': ['altern', 'antler', 'learnt', 'rental', 'ternal'], + 'alterne': ['alterne', 'enteral', 'eternal', 'teleran', 'teneral'], + 'althea': ['althea', 'elatha'], + 'altho': ['altho', 'lhota', 'loath'], + 'althorn': ['althorn', 'anthrol', 'thronal'], + 'altica': ['altaic', 'altica'], + 'altin': ['altin', 'latin'], + 'altiscope': ['alopecist', 'altiscope', 'epicostal', 'scapolite'], + 'altitude': ['altitude', 'latitude'], + 'altitudinal': ['altitudinal', 'latitudinal'], + 'altitudinarian': ['altitudinarian', 'latitudinarian'], + 'alto': ['alto', 'lota'], + 'altrices': ['altrices', 'selictar'], + 'altruism': ['altruism', 'muralist', 'traulism', 'ultraism'], + 'altruist': ['altruist', 'ultraist'], + 'altruistic': ['altruistic', 'truistical', 'ultraistic'], + 'aludel': ['allude', 'aludel'], + 'aludra': ['adular', 'aludra', 'radula'], + 'alulet': ['alulet', 'luteal'], + 'alulim': ['allium', 'alulim', 'muilla'], + 'alum': ['alum', 'maul'], + 'aluminate': ['aluminate', 'alumniate'], + 'aluminide': ['aluminide', 'unimedial'], + 'aluminosilicate': ['aluminosilicate', 'silicoaluminate'], + 'alumna': ['alumna', 'manual'], + 'alumni': ['alumni', 'unmail'], + 'alumniate': ['aluminate', 'alumniate'], + 'alur': ['alur', 'laur', 'lura', 'raul', 'ural'], + 'alure': ['alure', 'ureal'], + 'alurgite': ['alurgite', 'ligature'], + 'aluta': ['aluta', 'taula'], + 'alvan': ['alvan', 'naval'], + 'alvar': ['alvar', 'arval', 'larva'], + 'alveus': ['alveus', 'avulse'], + 'alvin': ['alvin', 'anvil', 'nival', 'vinal'], + 'alvine': ['alevin', 'alvine', 'valine', 'veinal', 'venial', 'vineal'], + 'aly': ['aly', 'lay'], + 'alypin': ['alypin', 'pialyn'], + 'alytes': ['alytes', 'astely', 'lysate', 'stealy'], + 'am': ['am', 'ma'], + 'ama': ['aam', 'ama'], + 'amacrine': ['amacrine', 'american', 'camerina', 'cinerama'], + 'amadi': ['amadi', 'damia', 'madia', 'maida'], + 'amaethon': ['amaethon', 'thomaean'], + 'amaga': ['agama', 'amaga'], + 'amah': ['amah', 'maha'], + 'amain': ['amain', 'amani', 'amnia', 'anima', 'mania'], + 'amakosa': ['akoasma', 'amakosa'], + 'amalgam': ['amalgam', 'malagma'], + 'amang': ['amang', 'ganam', 'manga'], + 'amani': ['amain', 'amani', 'amnia', 'anima', 'mania'], + 'amanist': ['amanist', 'stamina'], + 'amanitin': ['amanitin', 'maintain'], + 'amanitine': ['amanitine', 'inanimate'], + 'amanori': ['amanori', 'moarian'], + 'amapa': ['amapa', 'apama'], + 'amar': ['amar', 'amra', 'mara', 'rama'], + 'amarin': ['airman', 'amarin', 'marian', 'marina', 'mirana'], + 'amaroid': ['amaroid', 'diorama'], + 'amarth': ['amarth', 'martha'], + 'amass': ['amass', 'assam', 'massa', 'samas'], + 'amasser': ['amasser', 'reamass'], + 'amati': ['amati', 'amita', 'matai'], + 'amatorian': ['amatorian', 'inamorata'], + 'amaurosis': ['amaurosis', 'mosasauri'], + 'amazonite': ['amazonite', 'anatomize'], + 'amba': ['amba', 'maba'], + 'ambar': ['abram', 'ambar'], + 'ambarella': ['alarmable', 'ambarella'], + 'ambash': ['ambash', 'shamba'], + 'ambay': ['ambay', 'mbaya'], + 'ambeer': ['ambeer', 'beamer'], + 'amber': ['amber', 'bearm', 'bemar', 'bream', 'embar'], + 'ambier': ['ambier', 'bremia', 'embira'], + 'ambit': ['ambit', 'imbat'], + 'ambivert': ['ambivert', 'verbatim'], + 'amble': ['amble', 'belam', 'blame', 'mabel'], + 'ambler': ['ambler', 'blamer', 'lamber', 'marble', 'ramble'], + 'ambling': ['ambling', 'blaming'], + 'amblingly': ['amblingly', 'blamingly'], + 'ambo': ['ambo', 'boma'], + 'ambos': ['ambos', 'sambo'], + 'ambrein': ['ambrein', 'mirbane'], + 'ambrette': ['ambrette', 'tambreet'], + 'ambrose': ['ambrose', 'mesobar'], + 'ambrosia': ['ambrosia', 'saboraim'], + 'ambrosin': ['ambrosin', 'barosmin', 'sabromin'], + 'ambry': ['ambry', 'barmy'], + 'ambury': ['ambury', 'aumbry'], + 'ambush': ['ambush', 'shambu'], + 'amchoor': ['amchoor', 'ochroma'], + 'ame': ['ame', 'mae'], + 'ameed': ['adeem', 'ameed', 'edema'], + 'ameen': ['ameen', 'amene', 'enema'], + 'amelification': ['amelification', 'maleficiation'], + 'ameliorant': ['ameliorant', 'lomentaria'], + 'amellus': ['amellus', 'malleus'], + 'amelu': ['amelu', 'leuma', 'ulema'], + 'amelus': ['amelus', 'samuel'], + 'amen': ['amen', 'enam', 'mane', 'mean', 'name', 'nema'], + 'amenability': ['amenability', 'nameability'], + 'amenable': ['amenable', 'nameable'], + 'amend': ['amend', 'mande', 'maned'], + 'amende': ['amende', 'demean', 'meaned', 'nadeem'], + 'amender': ['amender', 'meander', 'reamend', 'reedman'], + 'amends': ['amends', 'desman'], + 'amene': ['ameen', 'amene', 'enema'], + 'amenia': ['amenia', 'anemia'], + 'amenism': ['amenism', 'immanes', 'misname'], + 'amenite': ['amenite', 'etamine', 'matinee'], + 'amenorrheal': ['amenorrheal', 'melanorrhea'], + 'ament': ['ament', 'meant', 'teman'], + 'amental': ['amental', 'leatman'], + 'amentia': ['amentia', 'aminate', 'anamite', 'animate'], + 'amerce': ['amerce', 'raceme'], + 'amercer': ['amercer', 'creamer'], + 'american': ['amacrine', 'american', 'camerina', 'cinerama'], + 'amerind': ['adermin', 'amerind', 'dimeran'], + 'amerism': ['amerism', 'asimmer', 'sammier'], + 'ameristic': ['ameristic', 'armistice', 'artemisic'], + 'amesite': ['amesite', 'mesitae', 'semitae'], + 'ametria': ['ametria', 'artemia', 'meratia', 'ramaite'], + 'ametrope': ['ametrope', 'metapore'], + 'amex': ['amex', 'exam', 'xema'], + 'amgarn': ['amgarn', 'mangar', 'marang', 'ragman'], + 'amhar': ['amhar', 'mahar', 'mahra'], + 'amherstite': ['amherstite', 'hemistater'], + 'amhran': ['amhran', 'harman', 'mahran'], + 'ami': ['aim', 'ami', 'ima'], + 'amia': ['amia', 'maia'], + 'amic': ['amic', 'mica'], + 'amical': ['amical', 'camail', 'lamaic'], + 'amiced': ['amiced', 'decima'], + 'amicicide': ['amicicide', 'cimicidae'], + 'amicron': ['amicron', 'marconi', 'minorca', 'romanic'], + 'amid': ['admi', 'amid', 'madi', 'maid'], + 'amidate': ['adamite', 'amidate'], + 'amide': ['amide', 'damie', 'media'], + 'amidide': ['amidide', 'diamide', 'mididae'], + 'amidin': ['amidin', 'damnii'], + 'amidine': ['amidine', 'diamine'], + 'amidism': ['amidism', 'maidism'], + 'amidist': ['amidist', 'dimatis'], + 'amidon': ['amidon', 'daimon', 'domain'], + 'amidophenol': ['amidophenol', 'monodelphia'], + 'amidst': ['amidst', 'datism'], + 'amigo': ['amigo', 'imago'], + 'amiidae': ['amiidae', 'maiidae'], + 'amil': ['amil', 'amli', 'lima', 'mail', 'mali', 'mila'], + 'amiles': ['amiles', 'asmile', 'mesail', 'mesial', 'samiel'], + 'amimia': ['amimia', 'miamia'], + 'amimide': ['amimide', 'mimidae'], + 'amin': ['amin', 'main', 'mani', 'mian', 'mina', 'naim'], + 'aminate': ['amentia', 'aminate', 'anamite', 'animate'], + 'amination': ['amination', 'animation'], + 'amine': ['amine', 'anime', 'maine', 'manei'], + 'amini': ['amini', 'animi'], + 'aminize': ['aminize', 'animize', 'azimine'], + 'amino': ['amino', 'inoma', 'naomi', 'omani', 'omina'], + 'aminoplast': ['aminoplast', 'plasmation'], + 'amintor': ['amintor', 'tormina'], + 'amir': ['amir', 'irma', 'mari', 'mira', 'rami', 'rima'], + 'amiranha': ['amiranha', 'maharani'], + 'amiray': ['amiray', 'myaria'], + 'amissness': ['amissness', 'massiness'], + 'amita': ['amati', 'amita', 'matai'], + 'amitosis': ['amitosis', 'omasitis'], + 'amixia': ['amixia', 'ixiama'], + 'amla': ['alma', 'amla', 'lama', 'mala'], + 'amli': ['amil', 'amli', 'lima', 'mail', 'mali', 'mila'], + 'amlong': ['amlong', 'logman'], + 'amma': ['amma', 'maam'], + 'ammelin': ['ammelin', 'limeman'], + 'ammeline': ['ammeline', 'melamine'], + 'ammeter': ['ammeter', 'metamer'], + 'ammi': ['ammi', 'imam', 'maim', 'mima'], + 'ammine': ['ammine', 'immane'], + 'ammo': ['ammo', 'mamo'], + 'ammonic': ['ammonic', 'mocmain'], + 'ammonolytic': ['ammonolytic', 'commonality'], + 'ammunition': ['ammunition', 'antimonium'], + 'amnestic': ['amnestic', 'semantic'], + 'amnia': ['amain', 'amani', 'amnia', 'anima', 'mania'], + 'amniac': ['amniac', 'caiman', 'maniac'], + 'amnic': ['amnic', 'manic'], + 'amnion': ['amnion', 'minoan', 'nomina'], + 'amnionata': ['amnionata', 'anamniota'], + 'amnionate': ['amnionate', 'anamniote', 'emanation'], + 'amniota': ['amniota', 'itonama'], + 'amniote': ['amniote', 'anomite'], + 'amniotic': ['amniotic', 'mication'], + 'amniotome': ['amniotome', 'momotinae'], + 'amok': ['amok', 'mako'], + 'amole': ['amole', 'maleo'], + 'amomis': ['amomis', 'mimosa'], + 'among': ['among', 'mango'], + 'amor': ['amor', 'maro', 'mora', 'omar', 'roam'], + 'amores': ['amores', 'ramose', 'sorema'], + 'amoret': ['amoret', 'morate'], + 'amorist': ['amorist', 'aortism', 'miastor'], + 'amoritic': ['amoritic', 'microtia'], + 'amorpha': ['amorpha', 'amphora'], + 'amorphic': ['amorphic', 'amphoric'], + 'amorphous': ['amorphous', 'amphorous'], + 'amort': ['amort', 'morat', 'torma'], + 'amortize': ['amortize', 'atomizer'], + 'amos': ['amos', 'soam', 'soma'], + 'amotion': ['amotion', 'otomian'], + 'amount': ['amount', 'moutan', 'outman'], + 'amoy': ['amoy', 'mayo'], + 'ampelis': ['ampelis', 'lepisma'], + 'ampelite': ['ampelite', 'pimelate'], + 'ampelitic': ['ampelitic', 'implicate'], + 'amper': ['amper', 'remap'], + 'amperemeter': ['amperemeter', 'permeameter'], + 'amperian': ['amperian', 'paramine', 'pearmain'], + 'amphicyon': ['amphicyon', 'hypomanic'], + 'amphigenous': ['amphigenous', 'musophagine'], + 'amphiphloic': ['amphiphloic', 'amphophilic'], + 'amphipodous': ['amphipodous', 'hippodamous'], + 'amphitropous': ['amphitropous', 'pastophorium'], + 'amphophilic': ['amphiphloic', 'amphophilic'], + 'amphora': ['amorpha', 'amphora'], + 'amphore': ['amphore', 'morphea'], + 'amphorette': ['amphorette', 'haptometer'], + 'amphoric': ['amorphic', 'amphoric'], + 'amphorous': ['amorphous', 'amphorous'], + 'amphoteric': ['amphoteric', 'metaphoric'], + 'ample': ['ample', 'maple'], + 'ampliate': ['ampliate', 'palamite'], + 'amplification': ['amplification', 'palmification'], + 'amply': ['amply', 'palmy'], + 'ampul': ['ampul', 'pluma'], + 'ampulla': ['ampulla', 'palmula'], + 'amra': ['amar', 'amra', 'mara', 'rama'], + 'amsath': ['amsath', 'asthma'], + 'amsel': ['amsel', 'melas', 'mesal', 'samel'], + 'amsonia': ['amsonia', 'anosmia'], + 'amt': ['amt', 'mat', 'tam'], + 'amulet': ['amulet', 'muleta'], + 'amunam': ['amunam', 'manuma'], + 'amuse': ['amuse', 'mesua'], + 'amused': ['amused', 'masdeu', 'medusa'], + 'amusee': ['amusee', 'saeume'], + 'amuser': ['amuser', 'mauser'], + 'amusgo': ['amusgo', 'sugamo'], + 'amvis': ['amvis', 'mavis'], + 'amy': ['amy', 'may', 'mya', 'yam'], + 'amyelic': ['amyelic', 'mycelia'], + 'amyl': ['amyl', 'lyam', 'myal'], + 'amylan': ['amylan', 'lamany', 'layman'], + 'amylenol': ['amylenol', 'myelonal'], + 'amylidene': ['amylidene', 'mydaleine'], + 'amylin': ['amylin', 'mainly'], + 'amylo': ['amylo', 'loamy'], + 'amylon': ['amylon', 'onymal'], + 'amyotrophy': ['amyotrophy', 'myoatrophy'], + 'amyrol': ['amyrol', 'molary'], + 'an': ['an', 'na'], + 'ana': ['ana', 'naa'], + 'anacara': ['anacara', 'aracana'], + 'anacard': ['anacard', 'caranda'], + 'anaces': ['anaces', 'scaean'], + 'anachorism': ['anachorism', 'chorasmian', 'maraschino'], + 'anacid': ['acnida', 'anacid', 'dacian'], + 'anacreontic': ['anacreontic', 'canceration'], + 'anacusis': ['anacusis', 'ascanius'], + 'anadem': ['anadem', 'maenad'], + 'anadenia': ['anadenia', 'danainae'], + 'anadrom': ['anadrom', 'madrona', 'mandora', 'monarda', 'roadman'], + 'anaemic': ['acnemia', 'anaemic'], + 'anaeretic': ['anaeretic', 'ecarinate'], + 'anal': ['alan', 'anal', 'lana'], + 'analcime': ['alcamine', 'analcime', 'calamine', 'camelina'], + 'analcite': ['analcite', 'anticlea', 'laitance'], + 'analcitite': ['analcitite', 'catalinite'], + 'analectic': ['acclinate', 'analectic'], + 'analeptical': ['analeptical', 'placentalia'], + 'anally': ['alanyl', 'anally'], + 'analogic': ['analogic', 'calinago'], + 'analogist': ['analogist', 'nostalgia'], + 'anam': ['anam', 'mana', 'naam', 'nama'], + 'anamesite': ['anamesite', 'seamanite'], + 'anamirta': ['anamirta', 'araminta'], + 'anamite': ['amentia', 'aminate', 'anamite', 'animate'], + 'anamniota': ['amnionata', 'anamniota'], + 'anamniote': ['amnionate', 'anamniote', 'emanation'], + 'anan': ['anan', 'anna', 'nana'], + 'ananda': ['ananda', 'danaan'], + 'anandria': ['anandria', 'andriana'], + 'anangioid': ['agoniadin', 'anangioid', 'ganoidian'], + 'ananism': ['ananism', 'samnani'], + 'ananite': ['ananite', 'anatine', 'taenian'], + 'anaphoric': ['anaphoric', 'pharaonic'], + 'anaphorical': ['anaphorical', 'pharaonical'], + 'anapnea': ['anapnea', 'napaean'], + 'anapsid': ['anapsid', 'sapinda'], + 'anapsida': ['anapsida', 'anaspida'], + 'anaptotic': ['anaptotic', 'captation'], + 'anarchic': ['anarchic', 'characin'], + 'anarchism': ['anarchism', 'arachnism'], + 'anarchist': ['anarchist', 'archsaint', 'cantharis'], + 'anarcotin': ['anarcotin', 'cantorian', 'carnation', 'narcotina'], + 'anaretic': ['anaretic', 'arcanite', 'carinate', 'craniate'], + 'anarthropod': ['anarthropod', 'arthropodan'], + 'anas': ['anas', 'ansa', 'saan'], + 'anasa': ['anasa', 'asana'], + 'anaspida': ['anapsida', 'anaspida'], + 'anastaltic': ['anastaltic', 'catalanist'], + 'anat': ['anat', 'anta', 'tana'], + 'anatidae': ['anatidae', 'taeniada'], + 'anatine': ['ananite', 'anatine', 'taenian'], + 'anatocism': ['anatocism', 'anosmatic'], + 'anatole': ['anatole', 'notaeal'], + 'anatolic': ['aconital', 'actional', 'anatolic'], + 'anatomicopathologic': ['anatomicopathologic', 'pathologicoanatomic'], + 'anatomicopathological': ['anatomicopathological', 'pathologicoanatomical'], + 'anatomicophysiologic': ['anatomicophysiologic', 'physiologicoanatomic'], + 'anatomism': ['anatomism', 'nomismata'], + 'anatomize': ['amazonite', 'anatomize'], + 'anatum': ['anatum', 'mantua', 'tamanu'], + 'anay': ['anay', 'yana'], + 'anba': ['anba', 'bana'], + 'ancestor': ['ancestor', 'entosarc'], + 'ancestral': ['ancestral', 'lancaster'], + 'anchat': ['acanth', 'anchat', 'tanach'], + 'anchietin': ['anchietin', 'cathinine'], + 'anchistea': ['anchistea', 'hanseatic'], + 'anchor': ['anchor', 'archon', 'charon', 'rancho'], + 'anchored': ['anchored', 'rondache'], + 'anchorer': ['anchorer', 'ranchero', 'reanchor'], + 'anchoretic': ['acherontic', 'anchoretic'], + 'anchoretical': ['acherontical', 'anchoretical'], + 'anchoretism': ['anchoretism', 'trichomanes'], + 'anchorite': ['anchorite', 'antechoir', 'heatronic', 'hectorian'], + 'anchoritism': ['anchoritism', 'chiromantis', 'chrismation', 'harmonistic'], + 'ancile': ['alcine', 'ancile'], + 'ancilla': ['aclinal', 'ancilla'], + 'ancillary': ['ancillary', 'carlylian', 'cranially'], + 'ancon': ['ancon', 'canon'], + 'anconitis': ['anconitis', 'antiscion', 'onanistic'], + 'ancony': ['ancony', 'canyon'], + 'ancoral': ['alcoran', 'ancoral', 'carolan'], + 'ancylus': ['ancylus', 'unscaly'], + 'ancyrene': ['ancyrene', 'cerynean'], + 'and': ['and', 'dan'], + 'anda': ['anda', 'dana'], + 'andante': ['andante', 'dantean'], + 'ande': ['ande', 'dane', 'dean', 'edna'], + 'andesitic': ['andesitic', 'dianetics'], + 'andhra': ['andhra', 'dharna'], + 'andi': ['adin', 'andi', 'dain', 'dani', 'dian', 'naid'], + 'andian': ['andian', 'danian', 'nidana'], + 'andine': ['aidenn', 'andine', 'dannie', 'indane'], + 'andira': ['adrian', 'andira', 'andria', 'radian', 'randia'], + 'andorite': ['andorite', 'nadorite', 'ordinate', 'rodentia'], + 'andre': ['andre', 'arend', 'daren', 'redan'], + 'andrew': ['andrew', 'redawn', 'wander', 'warden'], + 'andria': ['adrian', 'andira', 'andria', 'radian', 'randia'], + 'andriana': ['anandria', 'andriana'], + 'andrias': ['andrias', 'sardian', 'sarinda'], + 'andric': ['andric', 'cardin', 'rancid'], + 'andries': ['andries', 'isander', 'sardine'], + 'androgynus': ['androgynus', 'gynandrous'], + 'androl': ['androl', 'arnold', 'lardon', 'roland', 'ronald'], + 'andronicus': ['andronicus', 'unsardonic'], + 'androtomy': ['androtomy', 'dynamotor'], + 'anear': ['anear', 'arean', 'arena'], + 'aneath': ['ahtena', 'aneath', 'athena'], + 'anecdota': ['anecdota', 'coadnate'], + 'anele': ['anele', 'elean'], + 'anematosis': ['anematosis', 'menostasia'], + 'anemia': ['amenia', 'anemia'], + 'anemic': ['anemic', 'cinema', 'iceman'], + 'anemograph': ['anemograph', 'phanerogam'], + 'anemographic': ['anemographic', 'phanerogamic'], + 'anemography': ['anemography', 'phanerogamy'], + 'anemone': ['anemone', 'monaene'], + 'anent': ['anent', 'annet', 'nenta'], + 'anepia': ['anepia', 'apinae'], + 'aneretic': ['aneretic', 'centiare', 'creatine', 'increate', 'iterance'], + 'anergic': ['anergic', 'garnice', 'garniec', 'geranic', 'grecian'], + 'anergy': ['anergy', 'rangey'], + 'anerly': ['anerly', 'nearly'], + 'aneroid': ['aneroid', 'arenoid'], + 'anerotic': ['actioner', 'anerotic', 'ceration', 'creation', 'reaction'], + 'anes': ['anes', 'sane', 'sean'], + 'anesis': ['anesis', 'anseis', 'sanies', 'sansei', 'sasine'], + 'aneuric': ['aneuric', 'rinceau'], + 'aneurin': ['aneurin', 'uranine'], + 'aneurism': ['aneurism', 'arsenium', 'sumerian'], + 'anew': ['anew', 'wane', 'wean'], + 'angami': ['angami', 'magani', 'magian'], + 'angara': ['angara', 'aranga', 'nagara'], + 'angaria': ['agrania', 'angaria', 'niagara'], + 'angel': ['agnel', 'angel', 'angle', 'galen', 'genal', 'glean', 'lagen'], + 'angela': ['alnage', 'angela', 'galena', 'lagena'], + 'angeldom': ['angeldom', 'lodgeman'], + 'angelet': ['angelet', 'elegant'], + 'angelic': ['angelic', 'galenic'], + 'angelical': ['angelical', 'englacial', 'galenical'], + 'angelically': ['angelically', 'englacially'], + 'angelin': ['angelin', 'leaning'], + 'angelina': ['alangine', 'angelina', 'galenian'], + 'angelique': ['angelique', 'equiangle'], + 'angelo': ['angelo', 'engaol'], + 'angelot': ['angelot', 'tangelo'], + 'anger': ['anger', 'areng', 'grane', 'range'], + 'angerly': ['angerly', 'geranyl'], + 'angers': ['angers', 'sanger', 'serang'], + 'angico': ['agonic', 'angico', 'gaonic', 'goniac'], + 'angie': ['angie', 'gaine'], + 'angild': ['angild', 'lading'], + 'angili': ['ailing', 'angili', 'nilgai'], + 'angina': ['angina', 'inanga'], + 'anginal': ['alangin', 'anginal', 'anglian', 'nagnail'], + 'angiocholitis': ['angiocholitis', 'cholangioitis'], + 'angiochondroma': ['angiochondroma', 'chondroangioma'], + 'angiofibroma': ['angiofibroma', 'fibroangioma'], + 'angioid': ['angioid', 'gonidia'], + 'angiokeratoma': ['angiokeratoma', 'keratoangioma'], + 'angiometer': ['angiometer', 'ergotamine', 'geometrina'], + 'angka': ['angka', 'kanga'], + 'anglaise': ['anglaise', 'gelasian'], + 'angle': ['agnel', 'angel', 'angle', 'galen', 'genal', 'glean', 'lagen'], + 'angled': ['angled', 'dangle', 'englad', 'lagend'], + 'angler': ['angler', 'arleng', 'garnel', 'largen', 'rangle', 'regnal'], + 'angles': ['angles', 'gansel'], + 'angleworm': ['angleworm', 'lawmonger'], + 'anglian': ['alangin', 'anginal', 'anglian', 'nagnail'], + 'anglic': ['anglic', 'lacing'], + 'anglish': ['anglish', 'ashling'], + 'anglist': ['anglist', 'lasting', 'salting', 'slating', 'staling'], + 'angloid': ['angloid', 'loading'], + 'ango': ['agon', 'ango', 'gaon', 'goan', 'gona'], + 'angola': ['agonal', 'angola'], + 'angolar': ['angolar', 'organal'], + 'angor': ['angor', + 'argon', + 'goran', + 'grano', + 'groan', + 'nagor', + 'orang', + 'organ', + 'rogan', + 'ronga'], + 'angora': ['agroan', 'angora', 'anogra', 'arango', 'argoan', 'onagra'], + 'angriness': ['angriness', 'ranginess'], + 'angrite': ['angrite', 'granite', 'ingrate', 'tangier', 'tearing', 'tigrean'], + 'angry': ['angry', 'rangy'], + 'angst': ['angst', 'stang', 'tangs'], + 'angster': ['angster', 'garnets', 'nagster', 'strange'], + 'anguid': ['anguid', 'gaduin'], + 'anguine': ['anguine', 'guanine', 'guinean'], + 'angula': ['angula', 'nagual'], + 'angular': ['angular', 'granula'], + 'angulate': ['angulate', 'gaetulan'], + 'anguria': ['anguria', 'gaurian', 'guarani'], + 'angus': ['agnus', 'angus', 'sugan'], + 'anharmonic': ['anharmonic', 'monarchian'], + 'anhematosis': ['anhematosis', 'somasthenia'], + 'anhidrotic': ['anhidrotic', 'trachinoid'], + 'anhistous': ['anhistous', 'isanthous'], + 'anhydridize': ['anhydridize', 'hydrazidine'], + 'anhydrize': ['anhydrize', 'hydrazine'], + 'ani': ['ani', 'ian'], + 'anice': ['anice', 'eniac'], + 'aniconic': ['aniconic', 'ciconian'], + 'aniconism': ['aniconism', 'insomniac'], + 'anicular': ['anicular', 'caulinar'], + 'anicut': ['anicut', 'nautic', 'ticuna', 'tunica'], + 'anidian': ['anidian', 'indiana'], + 'aniente': ['aniente', 'itenean'], + 'anight': ['anight', 'athing'], + 'anil': ['alin', 'anil', 'lain', 'lina', 'nail'], + 'anile': ['alien', 'aline', 'anile', 'elain', 'elian', 'laine', 'linea'], + 'anilic': ['anilic', 'clinia'], + 'anilid': ['anilid', 'dialin', 'dianil', 'inlaid'], + 'anilide': ['anilide', 'elaidin'], + 'anilidic': ['anilidic', 'indicial'], + 'anima': ['amain', 'amani', 'amnia', 'anima', 'mania'], + 'animable': ['animable', 'maniable'], + 'animal': ['almain', 'animal', 'lamina', 'manila'], + 'animalic': ['animalic', 'limacina'], + 'animate': ['amentia', 'aminate', 'anamite', 'animate'], + 'animated': ['animated', 'mandaite', 'mantidae'], + 'animater': ['animater', 'marinate'], + 'animating': ['animating', 'imaginant'], + 'animation': ['amination', 'animation'], + 'animator': ['animator', 'tamanoir'], + 'anime': ['amine', 'anime', 'maine', 'manei'], + 'animi': ['amini', 'animi'], + 'animist': ['animist', 'santimi'], + 'animize': ['aminize', 'animize', 'azimine'], + 'animus': ['animus', 'anisum', 'anusim', 'manius'], + 'anionic': ['anionic', 'iconian'], + 'anis': ['anis', 'nais', 'nasi', 'nias', 'sain', 'sina'], + 'anisal': ['anisal', 'nasial', 'salian', 'salina'], + 'anisate': ['anisate', 'entasia'], + 'anise': ['anise', 'insea', 'siena', 'sinae'], + 'anisette': ['anisette', 'atestine', 'settaine'], + 'anisic': ['anisic', 'sicani', 'sinaic'], + 'anisilic': ['anisilic', 'sicilian'], + 'anisodont': ['anisodont', 'sondation'], + 'anisometric': ['anisometric', + 'creationism', + 'miscreation', + 'ramisection', + 'reactionism'], + 'anisopod': ['anisopod', 'isopodan'], + 'anisoptera': ['anisoptera', 'asperation', 'separation'], + 'anisum': ['animus', 'anisum', 'anusim', 'manius'], + 'anisuria': ['anisuria', 'isaurian'], + 'anisyl': ['anisyl', 'snaily'], + 'anita': ['anita', 'niata', 'tania'], + 'anither': ['anither', 'inearth', 'naither'], + 'ankee': ['aknee', 'ankee', 'keena'], + 'anker': ['anker', 'karen', 'naker'], + 'ankh': ['ankh', 'hank', 'khan'], + 'anklet': ['anklet', 'lanket', 'tankle'], + 'ankoli': ['ankoli', 'kaolin'], + 'ankus': ['ankus', 'kusan'], + 'ankyroid': ['ankyroid', 'dikaryon'], + 'anlace': ['anlace', 'calean'], + 'ann': ['ann', 'nan'], + 'anna': ['anan', 'anna', 'nana'], + 'annale': ['annale', 'anneal'], + 'annaline': ['annaline', 'linnaean'], + 'annalist': ['annalist', 'santalin'], + 'annalize': ['annalize', 'zelanian'], + 'annam': ['annam', 'manna'], + 'annamite': ['annamite', 'manatine'], + 'annard': ['annard', 'randan'], + 'annat': ['annat', 'tanan'], + 'annates': ['annates', 'tannase'], + 'anne': ['anne', 'nane'], + 'anneal': ['annale', 'anneal'], + 'annealer': ['annealer', 'lernaean', 'reanneal'], + 'annelid': ['annelid', 'lindane'], + 'annelism': ['annelism', 'linesman'], + 'anneloid': ['anneloid', 'nonideal'], + 'annet': ['anent', 'annet', 'nenta'], + 'annexer': ['annexer', 'reannex'], + 'annie': ['annie', 'inane'], + 'annite': ['annite', 'innate', 'tinean'], + 'annotine': ['annotine', 'tenonian'], + 'annoy': ['annoy', 'nonya'], + 'annoyancer': ['annoyancer', 'rayonnance'], + 'annoyer': ['annoyer', 'reannoy'], + 'annualist': ['annualist', 'sultanian'], + 'annulation': ['annulation', 'unnational'], + 'annulet': ['annulet', 'nauntle'], + 'annulment': ['annulment', 'tunnelman'], + 'annuloid': ['annuloid', 'uninodal'], + 'anodic': ['adonic', 'anodic'], + 'anodize': ['adonize', 'anodize'], + 'anodynic': ['anodynic', 'cydonian'], + 'anoestrous': ['anoestrous', 'treasonous'], + 'anoestrum': ['anoestrum', 'neuromast'], + 'anoetic': ['acetoin', 'aconite', 'anoetic', 'antoeci', 'cetonia'], + 'anogenic': ['anogenic', 'canoeing'], + 'anogra': ['agroan', 'angora', 'anogra', 'arango', 'argoan', 'onagra'], + 'anoil': ['aloin', 'anoil', 'anoli'], + 'anoint': ['anoint', 'nation'], + 'anointer': ['anointer', 'inornate', 'nonirate', 'reanoint'], + 'anole': ['alone', 'anole', 'olena'], + 'anoli': ['aloin', 'anoil', 'anoli'], + 'anolis': ['alison', 'anolis'], + 'anomaliped': ['anomaliped', 'palaemonid'], + 'anomalism': ['anomalism', 'malmaison'], + 'anomalist': ['anomalist', 'atonalism'], + 'anomiidae': ['anomiidae', 'maioidean'], + 'anomite': ['amniote', 'anomite'], + 'anomodont': ['anomodont', 'monodonta'], + 'anomural': ['anomural', 'monaural'], + 'anon': ['anon', 'nona', 'onan'], + 'anophyte': ['anophyte', 'typhoean'], + 'anopia': ['anopia', 'aponia', 'poiana'], + 'anorak': ['anorak', 'korana'], + 'anorchism': ['anorchism', 'harmonics'], + 'anorectic': ['accretion', 'anorectic', 'neoarctic'], + 'anorthic': ['anorthic', 'anthroic', 'tanchoir'], + 'anorthitite': ['anorthitite', 'trithionate'], + 'anorthose': ['anorthose', 'hoarstone'], + 'anosia': ['anosia', 'asonia'], + 'anosmatic': ['anatocism', 'anosmatic'], + 'anosmia': ['amsonia', 'anosmia'], + 'anosmic': ['anosmic', 'masonic'], + 'anoterite': ['anoterite', 'orientate'], + 'another': ['another', 'athenor', 'rheotan'], + 'anotia': ['anotia', 'atonia'], + 'anoxia': ['anoxia', 'axonia'], + 'anoxic': ['anoxic', 'oxanic'], + 'ansa': ['anas', 'ansa', 'saan'], + 'ansar': ['ansar', 'saran', 'sarna'], + 'ansation': ['ansation', 'sonatina'], + 'anseis': ['anesis', 'anseis', 'sanies', 'sansei', 'sasine'], + 'ansel': ['ansel', 'slane'], + 'anselm': ['anselm', 'mensal'], + 'anser': ['anser', 'nares', 'rasen', 'snare'], + 'anserine': ['anserine', 'reinsane'], + 'anserous': ['anserous', 'arsenous'], + 'ansu': ['ansu', 'anus'], + 'answerer': ['answerer', 'reanswer'], + 'ant': ['ant', 'nat', 'tan'], + 'anta': ['anat', 'anta', 'tana'], + 'antacrid': ['antacrid', 'cardiant', 'radicant', 'tridacna'], + 'antagonism': ['antagonism', 'montagnais'], + 'antagonist': ['antagonist', 'stagnation'], + 'antal': ['antal', 'natal'], + 'antanemic': ['antanemic', 'cinnamate'], + 'antar': ['antar', 'antra'], + 'antarchistical': ['antarchistical', 'charlatanistic'], + 'ante': ['ante', 'aten', 'etna', 'nate', 'neat', 'taen', 'tane', 'tean'], + 'anteact': ['anteact', 'cantate'], + 'anteal': ['anteal', 'lanate', 'teanal'], + 'antechoir': ['anchorite', 'antechoir', 'heatronic', 'hectorian'], + 'antecornu': ['antecornu', 'connature'], + 'antedate': ['antedate', 'edentata'], + 'antelabium': ['albuminate', 'antelabium'], + 'antelopian': ['antelopian', 'neapolitan', 'panelation'], + 'antelucan': ['antelucan', 'cannulate'], + 'antelude': ['antelude', 'unelated'], + 'anteluminary': ['anteluminary', 'unalimentary'], + 'antemedial': ['antemedial', 'delaminate'], + 'antenatal': ['antenatal', 'atlantean', 'tantalean'], + 'anteriad': ['anteriad', 'atridean', 'dentaria'], + 'anteroinferior': ['anteroinferior', 'inferoanterior'], + 'anterosuperior': ['anterosuperior', 'superoanterior'], + 'antes': ['antes', 'nates', 'stane', 'stean'], + 'anthela': ['anthela', 'ethanal'], + 'anthem': ['anthem', 'hetman', 'mentha'], + 'anthemwise': ['anthemwise', 'whitmanese'], + 'anther': ['anther', 'nather', 'tharen', 'thenar'], + 'anthericum': ['anthericum', 'narthecium'], + 'anthicidae': ['anthicidae', 'tachinidae'], + 'anthinae': ['anthinae', 'athenian'], + 'anthogenous': ['anthogenous', 'neognathous'], + 'anthonomus': ['anthonomus', 'monanthous'], + 'anthophile': ['anthophile', 'lithophane'], + 'anthracia': ['anthracia', 'antiarcha', 'catharina'], + 'anthroic': ['anorthic', 'anthroic', 'tanchoir'], + 'anthrol': ['althorn', 'anthrol', 'thronal'], + 'anthropic': ['anthropic', 'rhapontic'], + 'anti': ['aint', 'anti', 'tain', 'tina'], + 'antiabrin': ['antiabrin', 'britannia'], + 'antiae': ['aetian', 'antiae', 'taenia'], + 'antiager': ['antiager', 'trainage'], + 'antialbumid': ['antialbumid', 'balantidium'], + 'antialien': ['ailantine', 'antialien'], + 'antiarcha': ['anthracia', 'antiarcha', 'catharina'], + 'antiaris': ['antiaris', 'intarsia'], + 'antibenzaldoxime': ['antibenzaldoxime', 'benzantialdoxime'], + 'antiblue': ['antiblue', 'nubilate'], + 'antic': ['actin', 'antic'], + 'antical': ['actinal', 'alantic', 'alicant', 'antical'], + 'anticaste': ['anticaste', 'ataentsic'], + 'anticlea': ['analcite', 'anticlea', 'laitance'], + 'anticlinorium': ['anticlinorium', 'inclinatorium'], + 'anticly': ['anticly', 'cantily'], + 'anticness': ['anticness', 'cantiness', 'incessant'], + 'anticor': ['anticor', 'carotin', 'cortina', 'ontaric'], + 'anticouncil': ['anticouncil', 'inculcation'], + 'anticourt': ['anticourt', 'curtation', 'ructation'], + 'anticous': ['acontius', 'anticous'], + 'anticreative': ['anticreative', 'antireactive'], + 'anticreep': ['anticreep', 'apenteric', 'increpate'], + 'antidote': ['antidote', 'tetanoid'], + 'antidotical': ['antidotical', 'dictational'], + 'antietam': ['antietam', 'manettia'], + 'antiextreme': ['antiextreme', 'exterminate'], + 'antifouler': ['antifouler', 'fluorinate', 'uniflorate'], + 'antifriction': ['antifriction', 'nitrifaction'], + 'antifungin': ['antifungin', 'unfainting'], + 'antigen': ['antigen', 'gentian'], + 'antigenic': ['antigenic', 'gentianic'], + 'antiglare': ['antiglare', 'raglanite'], + 'antigod': ['antigod', 'doating'], + 'antigone': ['antigone', 'negation'], + 'antiheroic': ['antiheroic', 'theorician'], + 'antilemic': ['alimentic', 'antilemic', 'melanitic', 'metanilic'], + 'antilia': ['antilia', 'italian'], + 'antilipase': ['antilipase', 'sapiential'], + 'antilope': ['antilope', 'antipole'], + 'antimallein': ['antimallein', 'inalimental'], + 'antimasque': ['antimasque', 'squamatine'], + 'antimeric': ['antimeric', 'carminite', 'criminate', 'metrician'], + 'antimerina': ['antimerina', 'maintainer', 'remaintain'], + 'antimeter': ['antimeter', 'attermine', 'interteam', 'terminate', 'tetramine'], + 'antimodel': ['antimodel', 'maldonite', 'monilated'], + 'antimodern': ['antimodern', 'ordainment'], + 'antimonial': ['antimonial', 'lamination'], + 'antimonic': ['antimonic', 'antinomic'], + 'antimonium': ['ammunition', 'antimonium'], + 'antimony': ['antimony', 'antinomy'], + 'antimoral': ['antimoral', 'tailorman'], + 'antimosquito': ['antimosquito', 'misquotation'], + 'antinegro': ['antinegro', 'argentino', 'argention'], + 'antinepotic': ['antinepotic', 'pectination'], + 'antineutral': ['antineutral', 'triannulate'], + 'antinial': ['antinial', 'latinian'], + 'antinome': ['antinome', 'nominate'], + 'antinomian': ['antinomian', 'innominata'], + 'antinomic': ['antimonic', 'antinomic'], + 'antinomy': ['antimony', 'antinomy'], + 'antinormal': ['antinormal', 'nonmarital', 'nonmartial'], + 'antiphonetic': ['antiphonetic', 'pentathionic'], + 'antiphonic': ['antiphonic', 'napthionic'], + 'antiphony': ['antiphony', 'typhonian'], + 'antiphrasis': ['antiphrasis', 'artisanship'], + 'antipodic': ['antipodic', 'diapnotic'], + 'antipole': ['antilope', 'antipole'], + 'antipolo': ['antipolo', 'antipool', 'optional'], + 'antipool': ['antipolo', 'antipool', 'optional'], + 'antipope': ['antipope', 'appointe'], + 'antiprotease': ['antiprotease', 'entoparasite'], + 'antipsoric': ['antipsoric', 'ascription', 'crispation'], + 'antiptosis': ['antiptosis', 'panostitis'], + 'antiputrid': ['antiputrid', 'tripudiant'], + 'antipyretic': ['antipyretic', 'pertinacity'], + 'antique': ['antique', 'quinate'], + 'antiquer': ['antiquer', 'quartine'], + 'antirabies': ['antirabies', 'bestiarian'], + 'antiracer': ['antiracer', 'tarriance'], + 'antireactive': ['anticreative', 'antireactive'], + 'antired': ['antired', 'detrain', 'randite', 'trained'], + 'antireducer': ['antireducer', 'reincrudate', 'untraceried'], + 'antiroyalist': ['antiroyalist', 'stationarily'], + 'antirumor': ['antirumor', 'ruminator'], + 'antirun': ['antirun', 'untrain', 'urinant'], + 'antirust': ['antirust', 'naturist'], + 'antiscion': ['anconitis', 'antiscion', 'onanistic'], + 'antisepsin': ['antisepsin', 'paintiness'], + 'antisepsis': ['antisepsis', 'inspissate'], + 'antiseptic': ['antiseptic', 'psittacine'], + 'antiserum': ['antiserum', 'misaunter'], + 'antisi': ['antisi', 'isatin'], + 'antislip': ['alpinist', 'antislip'], + 'antisoporific': ['antisoporific', 'prosification', 'sporification'], + 'antispace': ['antispace', 'panaceist'], + 'antistes': ['antistes', 'titaness'], + 'antisun': ['antisun', 'unsaint', 'unsatin', 'unstain'], + 'antitheism': ['antitheism', 'themistian'], + 'antitonic': ['antitonic', 'nictation'], + 'antitorpedo': ['antitorpedo', 'deportation'], + 'antitrade': ['antitrade', 'attainder'], + 'antitrope': ['antitrope', 'patronite', 'tritanope'], + 'antitropic': ['antitropic', 'tritanopic'], + 'antitropical': ['antitropical', 'practitional'], + 'antivice': ['antivice', 'inactive', 'vineatic'], + 'antler': ['altern', 'antler', 'learnt', 'rental', 'ternal'], + 'antlia': ['antlia', 'latian', 'nalita'], + 'antliate': ['antliate', 'latinate'], + 'antlid': ['antlid', 'tindal'], + 'antling': ['antling', 'tanling'], + 'antoeci': ['acetoin', 'aconite', 'anoetic', 'antoeci', 'cetonia'], + 'antoecian': ['acetanion', 'antoecian'], + 'anton': ['anton', 'notan', 'tonna'], + 'antproof': ['antproof', 'tanproof'], + 'antra': ['antar', 'antra'], + 'antral': ['antral', 'tarnal'], + 'antre': ['antre', 'arent', 'retan', 'terna'], + 'antrocele': ['antrocele', 'coeternal', 'tolerance'], + 'antronasal': ['antronasal', 'nasoantral'], + 'antroscope': ['antroscope', 'contrapose'], + 'antroscopy': ['antroscopy', 'syncopator'], + 'antrotome': ['antrotome', 'nototrema'], + 'antrustion': ['antrustion', 'nasturtion'], + 'antu': ['antu', 'aunt', 'naut', 'taun', 'tuan', 'tuna'], + 'anubis': ['anubis', 'unbias'], + 'anura': ['anura', 'ruana'], + 'anuresis': ['anuresis', 'senarius'], + 'anuretic': ['anuretic', 'centauri', 'centuria', 'teucrian'], + 'anuria': ['anuria', 'urania'], + 'anuric': ['anuric', 'cinura', 'uranic'], + 'anurous': ['anurous', 'uranous'], + 'anury': ['anury', 'unary', 'unray'], + 'anus': ['ansu', 'anus'], + 'anusim': ['animus', 'anisum', 'anusim', 'manius'], + 'anvil': ['alvin', 'anvil', 'nival', 'vinal'], + 'any': ['any', 'nay', 'yan'], + 'aonach': ['aonach', 'choana'], + 'aoristic': ['aoristic', 'iscariot'], + 'aortal': ['aortal', 'rotala'], + 'aortectasis': ['aerostatics', 'aortectasis'], + 'aortism': ['amorist', 'aortism', 'miastor'], + 'aosmic': ['aosmic', 'mosaic'], + 'apachite': ['apachite', 'hepatica'], + 'apalachee': ['acalephae', 'apalachee'], + 'apama': ['amapa', 'apama'], + 'apanthropy': ['apanthropy', 'panatrophy'], + 'apar': ['apar', 'paar', 'para'], + 'apart': ['apart', 'trapa'], + 'apatetic': ['apatetic', 'capitate'], + 'ape': ['ape', 'pea'], + 'apedom': ['apedom', 'pomade'], + 'apeiron': ['apeiron', 'peorian'], + 'apelet': ['apelet', 'ptelea'], + 'apelike': ['apelike', 'pealike'], + 'apeling': ['apeling', 'leaping'], + 'apenteric': ['anticreep', 'apenteric', 'increpate'], + 'apeptic': ['apeptic', 'catpipe'], + 'aper': ['aper', 'pare', 'pear', 'rape', 'reap'], + 'aperch': ['aperch', 'eparch', 'percha', 'preach'], + 'aperitive': ['aperitive', 'petiveria'], + 'apert': ['apert', 'pater', 'peart', 'prate', 'taper', 'terap'], + 'apertly': ['apertly', 'peartly', 'platery', 'pteryla', 'taperly'], + 'apertness': ['apertness', 'peartness', 'taperness'], + 'apertured': ['apertured', 'departure'], + 'apery': ['apery', 'payer', 'repay'], + 'aphanes': ['aphanes', 'saphena'], + 'aphasia': ['aphasia', 'asaphia'], + 'aphasic': ['aphasic', 'asaphic'], + 'aphemic': ['aphemic', 'impeach'], + 'aphetic': ['aphetic', 'caphite', 'hepatic'], + 'aphetism': ['aphetism', 'mateship', 'shipmate', 'spithame'], + 'aphetize': ['aphetize', 'hepatize'], + 'aphides': ['aphides', 'diphase'], + 'aphidicolous': ['acidophilous', 'aphidicolous'], + 'aphis': ['aphis', 'apish', 'hispa', 'saiph', 'spahi'], + 'aphodian': ['adiaphon', 'aphodian'], + 'aphonic': ['aphonic', 'phocian'], + 'aphorismical': ['aphorismical', 'parochialism'], + 'aphotic': ['aphotic', 'picotah'], + 'aphra': ['aphra', 'harpa', 'parah'], + 'aphrodistic': ['aphrodistic', 'diastrophic'], + 'aphrodite': ['aphrodite', 'atrophied', 'diaporthe'], + 'apian': ['apian', 'apina'], + 'apiator': ['apiator', 'atropia', 'parotia'], + 'apical': ['apical', 'palaic'], + 'apicular': ['apicular', 'piacular'], + 'apina': ['apian', 'apina'], + 'apinae': ['anepia', 'apinae'], + 'apinage': ['aegipan', 'apinage'], + 'apinch': ['apinch', 'chapin', 'phanic'], + 'aping': ['aping', 'ngapi', 'pangi'], + 'apiole': ['apiole', 'leipoa'], + 'apiolin': ['apiolin', 'pinolia'], + 'apionol': ['apionol', 'polonia'], + 'apiose': ['apiose', 'apoise'], + 'apis': ['apis', 'pais', 'pasi', 'saip'], + 'apish': ['aphis', 'apish', 'hispa', 'saiph', 'spahi'], + 'apishly': ['apishly', 'layship'], + 'apism': ['apism', 'sampi'], + 'apitpat': ['apitpat', 'pitapat'], + 'apivorous': ['apivorous', 'oviparous'], + 'aplenty': ['aplenty', 'penalty'], + 'aplite': ['aplite', 'pilate'], + 'aplitic': ['aliptic', 'aplitic'], + 'aplodontia': ['adoptional', 'aplodontia'], + 'aplome': ['aplome', 'malope'], + 'apnea': ['apnea', 'paean'], + 'apneal': ['apneal', 'panela'], + 'apochromat': ['apochromat', 'archoptoma'], + 'apocrita': ['apocrita', 'aproctia'], + 'apocryph': ['apocryph', 'hypocarp'], + 'apod': ['apod', 'dopa'], + 'apoda': ['adpao', 'apoda'], + 'apogon': ['apogon', 'poonga'], + 'apoise': ['apiose', 'apoise'], + 'apolaustic': ['apolaustic', 'autopsical'], + 'apolistan': ['apolistan', 'lapsation'], + 'apollo': ['apollo', 'palolo'], + 'aponia': ['anopia', 'aponia', 'poiana'], + 'aponic': ['aponic', 'ponica'], + 'aporetic': ['aporetic', 'capriote', 'operatic'], + 'aporetical': ['aporetical', 'operatical'], + 'aporia': ['aporia', 'piaroa'], + 'aport': ['aport', 'parto', 'porta'], + 'aportoise': ['aportoise', 'esotropia'], + 'aposporous': ['aposporous', 'aprosopous'], + 'apostil': ['apostil', 'topsail'], + 'apostle': ['apostle', 'aseptol'], + 'apostrophus': ['apostrophus', 'pastophorus'], + 'apothesine': ['apothesine', 'isoheptane'], + 'apout': ['apout', 'taupo'], + 'appall': ['appall', 'palpal'], + 'apparent': ['apparent', 'trappean'], + 'appealer': ['appealer', 'reappeal'], + 'appealing': ['appealing', 'lagniappe', 'panplegia'], + 'appearer': ['appearer', 'rapparee', 'reappear'], + 'append': ['append', 'napped'], + 'applauder': ['applauder', 'reapplaud'], + 'applicator': ['applicator', 'procapital'], + 'applier': ['applier', 'aripple'], + 'appointe': ['antipope', 'appointe'], + 'appointer': ['appointer', 'reappoint'], + 'appointor': ['appointor', 'apportion'], + 'apportion': ['appointor', 'apportion'], + 'apportioner': ['apportioner', 'reapportion'], + 'appraisable': ['appraisable', 'parablepsia'], + 'apprehender': ['apprehender', 'reapprehend'], + 'approacher': ['approacher', 'reapproach'], + 'apricot': ['apricot', 'atropic', 'parotic', 'patrico'], + 'april': ['april', 'pilar', 'ripal'], + 'aprilis': ['aprilis', 'liparis'], + 'aproctia': ['apocrita', 'aproctia'], + 'apronless': ['apronless', 'responsal'], + 'aprosopous': ['aposporous', 'aprosopous'], + 'apse': ['apse', 'pesa', 'spae'], + 'apsidiole': ['apsidiole', 'episodial'], + 'apt': ['apt', 'pat', 'tap'], + 'aptal': ['aptal', 'palta', 'talpa'], + 'aptera': ['aptera', 'parate', 'patera'], + 'apterial': ['apterial', 'parietal'], + 'apteroid': ['apteroid', 'proteida'], + 'aptian': ['aptian', 'patina', 'taipan'], + 'aptly': ['aptly', 'patly', 'platy', 'typal'], + 'aptness': ['aptness', 'patness'], + 'aptote': ['aptote', 'optate', 'potate', 'teapot'], + 'apulian': ['apulian', 'paulian', 'paulina'], + 'apulse': ['apulse', 'upseal'], + 'apus': ['apus', 'supa', 'upas'], + 'aquabelle': ['aquabelle', 'equalable'], + 'aqueoigneous': ['aqueoigneous', 'igneoaqueous'], + 'aquicolous': ['aquicolous', 'loquacious'], + 'ar': ['ar', 'ra'], + 'arab': ['arab', 'arba', 'baar', 'bara'], + 'arabic': ['arabic', 'cairba'], + 'arabinic': ['arabinic', 'cabirian', 'carabini', 'cibarian'], + 'arabis': ['abaris', 'arabis'], + 'arabism': ['abramis', 'arabism'], + 'arabist': ['arabist', 'bartsia'], + 'arabit': ['arabit', 'tabira'], + 'arable': ['ablare', 'arable', 'arbela'], + 'araca': ['acara', 'araca'], + 'aracana': ['anacara', 'aracana'], + 'aracanga': ['aracanga', 'caragana'], + 'arachic': ['arachic', 'archaic'], + 'arachidonic': ['arachidonic', 'characinoid'], + 'arachis': ['arachis', 'asiarch', 'saharic'], + 'arachne': ['arachne', 'archean'], + 'arachnism': ['anarchism', 'arachnism'], + 'arachnitis': ['arachnitis', 'christiana'], + 'arad': ['adar', 'arad', 'raad', 'rada'], + 'arain': ['airan', 'arain', 'arian'], + 'arales': ['alares', 'arales'], + 'aralia': ['alaria', 'aralia'], + 'aralie': ['aerial', 'aralie'], + 'aramaic': ['aramaic', 'cariama'], + 'aramina': ['aramina', 'mariana'], + 'araminta': ['anamirta', 'araminta'], + 'aramus': ['aramus', 'asarum'], + 'araneid': ['araneid', 'ariadne', 'ranidae'], + 'aranein': ['aranein', 'raninae'], + 'aranga': ['angara', 'aranga', 'nagara'], + 'arango': ['agroan', 'angora', 'anogra', 'arango', 'argoan', 'onagra'], + 'arati': ['arati', 'atria', 'riata', 'tarai', 'tiara'], + 'aration': ['aration', 'otarian'], + 'arauan': ['arauan', 'arauna'], + 'arauna': ['arauan', 'arauna'], + 'arba': ['arab', 'arba', 'baar', 'bara'], + 'arbacin': ['arbacin', 'carabin', 'cariban'], + 'arbalester': ['arbalester', 'arbalestre', 'arrestable'], + 'arbalestre': ['arbalester', 'arbalestre', 'arrestable'], + 'arbalister': ['arbalister', 'breastrail'], + 'arbalo': ['aboral', 'arbalo'], + 'arbela': ['ablare', 'arable', 'arbela'], + 'arbiter': ['arbiter', 'rarebit'], + 'arbored': ['arbored', 'boarder', 'reboard'], + 'arboret': ['arboret', 'roberta', 'taborer'], + 'arboretum': ['arboretum', 'tambourer'], + 'arborist': ['arborist', 'ribroast'], + 'arbuscle': ['arbuscle', 'buscarle'], + 'arbutin': ['arbutin', 'tribuna'], + 'arc': ['arc', 'car'], + 'arca': ['arca', 'cara'], + 'arcadia': ['acardia', 'acarida', 'arcadia'], + 'arcadic': ['arcadic', 'cardiac'], + 'arcane': ['arcane', 'carane'], + 'arcanite': ['anaretic', 'arcanite', 'carinate', 'craniate'], + 'arcate': ['arcate', 'cerata'], + 'arch': ['arch', 'char', 'rach'], + 'archae': ['archae', 'areach'], + 'archaic': ['arachic', 'archaic'], + 'archaism': ['archaism', 'charisma'], + 'archapostle': ['archapostle', 'thecasporal'], + 'archcount': ['archcount', 'crouchant'], + 'arche': ['acher', 'arche', 'chare', 'chera', 'rache', 'reach'], + 'archeal': ['alchera', 'archeal'], + 'archean': ['arachne', 'archean'], + 'archer': ['archer', 'charer', 'rechar'], + 'arches': ['arches', 'chaser', 'eschar', 'recash', 'search'], + 'archidome': ['archidome', 'chromidae'], + 'archil': ['archil', 'chiral'], + 'arching': ['arching', 'chagrin'], + 'architis': ['architis', 'rachitis'], + 'archocele': ['archocele', 'cochleare'], + 'archon': ['anchor', 'archon', 'charon', 'rancho'], + 'archontia': ['archontia', 'tocharian'], + 'archoptoma': ['apochromat', 'archoptoma'], + 'archpoet': ['archpoet', 'protheca'], + 'archprelate': ['archprelate', 'pretracheal'], + 'archsaint': ['anarchist', 'archsaint', 'cantharis'], + 'archsee': ['archsee', 'rechase'], + 'archsin': ['archsin', 'incrash'], + 'archy': ['archy', 'chary'], + 'arcidae': ['arcidae', 'caridea'], + 'arcing': ['arcing', 'racing'], + 'arcite': ['acrite', 'arcite', 'tercia', 'triace', 'tricae'], + 'arcked': ['arcked', 'dacker'], + 'arcking': ['arcking', 'carking', 'racking'], + 'arcos': ['arcos', 'crosa', 'oscar', 'sacro'], + 'arctia': ['acrita', 'arctia'], + 'arctian': ['acritan', 'arctian'], + 'arcticize': ['arcticize', 'cicatrize'], + 'arctiid': ['arctiid', 'triacid', 'triadic'], + 'arctoid': ['arctoid', 'carotid', 'dartoic'], + 'arctoidean': ['arctoidean', 'carotidean', 'cordaitean', 'dinocerata'], + 'arctomys': ['arctomys', 'costmary', 'mascotry'], + 'arctos': ['arctos', 'castor', 'costar', 'scrota'], + 'arcual': ['arcual', 'arcula'], + 'arcuale': ['arcuale', 'caurale'], + 'arcubalist': ['arcubalist', 'ultrabasic'], + 'arcula': ['arcual', 'arcula'], + 'arculite': ['arculite', 'cutleria', 'lucretia', 'reticula', 'treculia'], + 'ardea': ['ardea', 'aread'], + 'ardeb': ['ardeb', 'beard', 'bread', 'debar'], + 'ardelia': ['ardelia', 'laridae', 'radiale'], + 'ardella': ['ardella', 'dareall'], + 'ardency': ['ardency', 'dancery'], + 'ardish': ['ardish', 'radish'], + 'ardoise': ['ardoise', 'aroides', 'soredia'], + 'ardu': ['ardu', 'daur', 'dura'], + 'are': ['aer', 'are', 'ear', 'era', 'rea'], + 'areach': ['archae', 'areach'], + 'aread': ['ardea', 'aread'], + 'areal': ['areal', 'reaal'], + 'arean': ['anear', 'arean', 'arena'], + 'arecaceae': ['aceraceae', 'arecaceae'], + 'arecaceous': ['aceraceous', 'arecaceous'], + 'arecain': ['acarine', 'acraein', 'arecain'], + 'arecolin': ['acrolein', + 'arecolin', + 'caroline', + 'colinear', + 'cornelia', + 'creolian', + 'lonicera'], + 'arecoline': ['arecoline', 'arenicole'], + 'arecuna': ['arecuna', 'aucaner'], + 'ared': ['ared', 'daer', 'dare', 'dear', 'read'], + 'areel': ['areel', 'earle'], + 'arena': ['anear', 'arean', 'arena'], + 'arenaria': ['aerarian', 'arenaria'], + 'arend': ['andre', 'arend', 'daren', 'redan'], + 'areng': ['anger', 'areng', 'grane', 'range'], + 'arenga': ['arenga', 'argean'], + 'arenicole': ['arecoline', 'arenicole'], + 'arenicolite': ['arenicolite', 'ricinoleate'], + 'arenig': ['arenig', 'earing', 'gainer', 'reagin', 'regain'], + 'arenoid': ['aneroid', 'arenoid'], + 'arenose': ['arenose', 'serenoa'], + 'arent': ['antre', 'arent', 'retan', 'terna'], + 'areographer': ['aerographer', 'areographer'], + 'areographic': ['aerographic', 'areographic'], + 'areographical': ['aerographical', 'areographical'], + 'areography': ['aerography', 'areography'], + 'areologic': ['aerologic', 'areologic'], + 'areological': ['aerological', 'areological'], + 'areologist': ['aerologist', 'areologist'], + 'areology': ['aerology', 'areology'], + 'areometer': ['aerometer', 'areometer'], + 'areometric': ['aerometric', 'areometric'], + 'areometry': ['aerometry', 'areometry'], + 'arete': ['arete', 'eater', 'teaer'], + 'argal': ['agral', 'argal'], + 'argali': ['argali', 'garial'], + 'argans': ['argans', 'sangar'], + 'argante': ['argante', 'granate', 'tanager'], + 'argas': ['argas', 'sagra'], + 'argean': ['arenga', 'argean'], + 'argeers': ['argeers', 'greaser', 'serrage'], + 'argel': ['argel', 'ergal', 'garle', 'glare', 'lager', 'large', 'regal'], + 'argenol': ['argenol', 'longear'], + 'argent': ['argent', 'garnet', 'garten', 'tanger'], + 'argentamid': ['argentamid', 'marginated'], + 'argenter': ['argenter', 'garneter'], + 'argenteum': ['argenteum', 'augmenter'], + 'argentic': ['argentic', 'citrange'], + 'argentide': ['argentide', 'denigrate', 'dinergate'], + 'argentiferous': ['argentiferous', 'garnetiferous'], + 'argentina': ['argentina', 'tanagrine'], + 'argentine': ['argentine', 'tangerine'], + 'argentino': ['antinegro', 'argentino', 'argention'], + 'argention': ['antinegro', 'argentino', 'argention'], + 'argentite': ['argentite', 'integrate'], + 'argentol': ['argentol', 'gerontal'], + 'argenton': ['argenton', 'negatron'], + 'argentous': ['argentous', 'neotragus'], + 'argentum': ['argentum', 'argument'], + 'arghan': ['arghan', 'hangar'], + 'argil': ['argil', 'glair', 'grail'], + 'arginine': ['arginine', 'nigerian'], + 'argive': ['argive', 'rivage'], + 'argo': ['argo', 'garo', 'gora'], + 'argoan': ['agroan', 'angora', 'anogra', 'arango', 'argoan', 'onagra'], + 'argol': ['algor', 'argol', 'goral', 'largo'], + 'argolet': ['argolet', 'gloater', 'legator'], + 'argolian': ['argolian', 'gloriana'], + 'argolic': ['argolic', 'cograil'], + 'argolid': ['argolid', 'goliard'], + 'argon': ['angor', + 'argon', + 'goran', + 'grano', + 'groan', + 'nagor', + 'orang', + 'organ', + 'rogan', + 'ronga'], + 'argot': ['argot', 'gator', 'gotra', 'groat'], + 'argue': ['argue', 'auger'], + 'argulus': ['argulus', 'lagurus'], + 'argument': ['argentum', 'argument'], + 'argus': ['argus', 'sugar'], + 'arguslike': ['arguslike', 'sugarlike'], + 'argute': ['argute', 'guetar', 'rugate', 'tuareg'], + 'argyle': ['argyle', 'gleary'], + 'arhar': ['arhar', 'arrah'], + 'arhat': ['arhat', 'artha', 'athar'], + 'aria': ['aira', 'aria', 'raia'], + 'ariadne': ['araneid', 'ariadne', 'ranidae'], + 'arian': ['airan', 'arain', 'arian'], + 'arianrhod': ['arianrhod', 'hordarian'], + 'aribine': ['aribine', 'bairnie', 'iberian'], + 'arician': ['arician', 'icarian'], + 'arid': ['arid', 'dari', 'raid'], + 'aridian': ['aridian', 'diarian'], + 'aridly': ['aridly', 'lyraid'], + 'ariegite': ['aegirite', 'ariegite'], + 'aries': ['aries', 'arise', 'raise', 'serai'], + 'arietid': ['arietid', 'iridate'], + 'arietta': ['arietta', 'ratitae'], + 'aright': ['aright', 'graith'], + 'arightly': ['alrighty', 'arightly'], + 'ariidae': ['ariidae', 'raiidae'], + 'aril': ['aril', 'lair', 'lari', 'liar', 'lira', 'rail', 'rial'], + 'ariled': ['ariled', 'derail', 'dialer'], + 'arillate': ['arillate', 'tiarella'], + 'arion': ['arion', 'noria'], + 'ariot': ['ariot', 'ratio'], + 'aripple': ['applier', 'aripple'], + 'arise': ['aries', 'arise', 'raise', 'serai'], + 'arisen': ['arisen', 'arsine', 'resina', 'serian'], + 'arist': ['arist', + 'astir', + 'sitar', + 'stair', + 'stria', + 'tarsi', + 'tisar', + 'trias'], + 'arista': ['arista', 'tarsia'], + 'aristeas': ['aristeas', 'asterias'], + 'aristol': ['aristol', 'oralist', 'ortalis', 'striola'], + 'aristulate': ['aristulate', 'australite'], + 'arite': ['arite', 'artie', 'irate', 'retia', 'tarie'], + 'arithmic': ['arithmic', 'mithraic', 'mithriac'], + 'arius': ['arius', 'asuri'], + 'arizona': ['arizona', 'azorian', 'zonaria'], + 'ark': ['ark', 'kra'], + 'arkab': ['abkar', 'arkab'], + 'arkite': ['arkite', 'karite'], + 'arkose': ['arkose', 'resoak', 'soaker'], + 'arlene': ['arlene', 'leaner'], + 'arleng': ['angler', 'arleng', 'garnel', 'largen', 'rangle', 'regnal'], + 'arles': ['arles', 'arsle', 'laser', 'seral', 'slare'], + 'arline': ['arline', 'larine', 'linear', 'nailer', 'renail'], + 'arm': ['arm', 'mar', 'ram'], + 'armada': ['armada', 'damara', 'ramada'], + 'armangite': ['armangite', 'marginate'], + 'armata': ['armata', 'matara', 'tamara'], + 'armed': ['armed', 'derma', 'dream', 'ramed'], + 'armenian': ['armenian', 'marianne'], + 'armenic': ['armenic', 'carmine', 'ceriman', 'crimean', 'mercian'], + 'armer': ['armer', 'rearm'], + 'armeria': ['armeria', 'mararie'], + 'armet': ['armet', + 'mater', + 'merat', + 'metra', + 'ramet', + 'tamer', + 'terma', + 'trame', + 'trema'], + 'armful': ['armful', 'fulmar'], + 'armgaunt': ['armgaunt', 'granatum'], + 'armied': ['admire', 'armied', 'damier', 'dimera', 'merida'], + 'armiferous': ['armiferous', 'ramiferous'], + 'armigerous': ['armigerous', 'ramigerous'], + 'armil': ['armil', 'marli', 'rimal'], + 'armilla': ['armilla', 'marilla'], + 'armillated': ['armillated', 'malladrite', 'mallardite'], + 'arming': ['arming', 'ingram', 'margin'], + 'armistice': ['ameristic', 'armistice', 'artemisic'], + 'armlet': ['armlet', 'malter', 'martel'], + 'armonica': ['armonica', 'macaroni', 'marocain'], + 'armoried': ['airdrome', 'armoried'], + 'armpit': ['armpit', 'impart'], + 'armplate': ['armplate', 'malapert'], + 'arms': ['arms', 'mars'], + 'armscye': ['armscye', 'screamy'], + 'army': ['army', 'mary', 'myra', 'yarm'], + 'arn': ['arn', 'nar', 'ran'], + 'arna': ['arna', 'rana'], + 'arnaut': ['arnaut', 'arunta'], + 'arne': ['arne', 'earn', 'rane'], + 'arneb': ['abner', 'arneb', 'reban'], + 'arni': ['arni', 'iran', 'nair', 'rain', 'rani'], + 'arnica': ['acinar', + 'arnica', + 'canari', + 'carian', + 'carina', + 'crania', + 'narica'], + 'arnold': ['androl', 'arnold', 'lardon', 'roland', 'ronald'], + 'arnotta': ['arnotta', 'natator'], + 'arnotto': ['arnotto', 'notator'], + 'arnut': ['arnut', 'tuarn', 'untar'], + 'aro': ['aro', 'oar', 'ora'], + 'aroast': ['aroast', 'ostara'], + 'arock': ['arock', 'croak'], + 'aroid': ['aroid', 'doria', 'radio'], + 'aroides': ['ardoise', 'aroides', 'soredia'], + 'aroint': ['aroint', 'ration'], + 'aromatic': ['aromatic', 'macrotia'], + 'aroon': ['aroon', 'oraon'], + 'arose': ['arose', 'oreas'], + 'around': ['around', 'arundo'], + 'arpen': ['arpen', 'paren'], + 'arpent': ['arpent', + 'enrapt', + 'entrap', + 'panter', + 'parent', + 'pretan', + 'trepan'], + 'arrah': ['arhar', 'arrah'], + 'arras': ['arras', 'sarra'], + 'arrau': ['arrau', 'aurar'], + 'arrayer': ['arrayer', 'rearray'], + 'arrect': ['arrect', 'carter', 'crater', 'recart', 'tracer'], + 'arrector': ['arrector', 'carroter'], + 'arrent': ['arrent', 'errant', 'ranter', 'ternar'], + 'arrest': ['arrest', 'astrer', 'raster', 'starer'], + 'arrestable': ['arbalester', 'arbalestre', 'arrestable'], + 'arrester': ['arrester', 'rearrest'], + 'arresting': ['arresting', 'astringer'], + 'arretine': ['arretine', 'eretrian', 'eritrean', 'retainer'], + 'arride': ['arride', 'raider'], + 'arrie': ['airer', 'arrie'], + 'arriet': ['arriet', 'tarrie'], + 'arrish': ['arrish', 'harris', 'rarish', 'sirrah'], + 'arrive': ['arrive', 'varier'], + 'arrogance': ['arrogance', 'coarrange'], + 'arrogant': ['arrogant', 'tarragon'], + 'arrogative': ['arrogative', 'variegator'], + 'arrowy': ['arrowy', 'yarrow'], + 'arry': ['arry', 'yarr'], + 'arsacid': ['arsacid', 'ascarid'], + 'arse': ['arse', 'rase', 'sare', 'sear', 'sera'], + 'arsedine': ['arsedine', 'arsenide', 'sedanier', 'siderean'], + 'arsenal': ['arsenal', 'ranales'], + 'arsenate': ['arsenate', 'serenata'], + 'arsenation': ['arsenation', 'senatorian', 'sonneratia'], + 'arseniate': ['arseniate', 'saernaite'], + 'arsenic': ['arsenic', 'cerasin', 'sarcine'], + 'arsenide': ['arsedine', 'arsenide', 'sedanier', 'siderean'], + 'arsenite': ['arsenite', 'resinate', 'teresian', 'teresina'], + 'arsenium': ['aneurism', 'arsenium', 'sumerian'], + 'arseniuret': ['arseniuret', 'uniserrate'], + 'arseno': ['arseno', 'reason'], + 'arsenopyrite': ['arsenopyrite', 'pyroarsenite'], + 'arsenous': ['anserous', 'arsenous'], + 'arses': ['arses', 'rasse'], + 'arshine': ['arshine', 'nearish', 'rhesian', 'sherani'], + 'arsine': ['arisen', 'arsine', 'resina', 'serian'], + 'arsino': ['arsino', 'rasion', 'sonrai'], + 'arsis': ['arsis', 'sarsi'], + 'arsle': ['arles', 'arsle', 'laser', 'seral', 'slare'], + 'arson': ['arson', 'saron', 'sonar'], + 'arsonic': ['arsonic', 'saronic'], + 'arsonite': ['arsonite', 'asterion', 'oestrian', 'rosinate', 'serotina'], + 'art': ['art', 'rat', 'tar', 'tra'], + 'artaba': ['artaba', 'batara'], + 'artabe': ['abater', 'artabe', 'eartab', 'trabea'], + 'artal': ['altar', 'artal', 'ratal', 'talar'], + 'artamus': ['artamus', 'sumatra'], + 'artarine': ['artarine', 'errantia'], + 'artefact': ['afteract', 'artefact', 'farcetta', 'farctate'], + 'artel': ['alert', 'alter', 'artel', 'later', 'ratel', 'taler', 'telar'], + 'artemas': ['artemas', 'astream'], + 'artemia': ['ametria', 'artemia', 'meratia', 'ramaite'], + 'artemis': ['artemis', 'maestri', 'misrate'], + 'artemisic': ['ameristic', 'armistice', 'artemisic'], + 'arterial': ['arterial', 'triareal'], + 'arterin': ['arterin', 'retrain', 'terrain', 'trainer'], + 'arterious': ['arterious', 'autoriser'], + 'artesian': ['artesian', 'asterina', 'asternia', 'erastian', 'seatrain'], + 'artgum': ['artgum', 'targum'], + 'artha': ['arhat', 'artha', 'athar'], + 'arthel': ['arthel', 'halter', 'lather', 'thaler'], + 'arthemis': ['arthemis', 'marshite', 'meharist'], + 'arthrochondritis': ['arthrochondritis', 'chondroarthritis'], + 'arthromere': ['arthromere', 'metrorrhea'], + 'arthropodan': ['anarthropod', 'arthropodan'], + 'arthrosteitis': ['arthrosteitis', 'ostearthritis'], + 'article': ['article', 'recital'], + 'articled': ['articled', 'lacertid'], + 'artie': ['arite', 'artie', 'irate', 'retia', 'tarie'], + 'artifice': ['actifier', 'artifice'], + 'artisan': ['artisan', 'astrain', 'sartain', 'tsarina'], + 'artisanship': ['antiphrasis', 'artisanship'], + 'artist': ['artist', 'strait', 'strati'], + 'artiste': ['artiste', 'striate'], + 'artlet': ['artlet', 'latter', 'rattle', 'tartle', 'tatler'], + 'artlike': ['artlike', 'ratlike', 'tarlike'], + 'arty': ['arty', 'atry', 'tray'], + 'aru': ['aru', 'rua', 'ura'], + 'aruac': ['aruac', 'carua'], + 'arui': ['arui', 'uria'], + 'arum': ['arum', 'maru', 'mura'], + 'arundo': ['around', 'arundo'], + 'arunta': ['arnaut', 'arunta'], + 'arusa': ['arusa', 'saura', 'usara'], + 'arusha': ['arusha', 'aushar'], + 'arustle': ['arustle', 'estrual', 'saluter', 'saulter'], + 'arval': ['alvar', 'arval', 'larva'], + 'arvel': ['arvel', 'larve', 'laver', 'ravel', 'velar'], + 'arx': ['arx', 'rax'], + 'ary': ['ary', 'ray', 'yar'], + 'arya': ['arya', 'raya'], + 'aryan': ['aryan', 'nayar', 'rayan'], + 'aryl': ['aryl', 'lyra', 'ryal', 'yarl'], + 'as': ['as', 'sa'], + 'asa': ['asa', 'saa'], + 'asak': ['asak', 'kasa', 'saka'], + 'asana': ['anasa', 'asana'], + 'asaph': ['asaph', 'pasha'], + 'asaphia': ['aphasia', 'asaphia'], + 'asaphic': ['aphasic', 'asaphic'], + 'asaprol': ['asaprol', 'parasol'], + 'asarh': ['asarh', 'raash', 'sarah'], + 'asarite': ['asarite', 'asteria', 'atresia', 'setaria'], + 'asarum': ['aramus', 'asarum'], + 'asbest': ['asbest', 'basset'], + 'ascanius': ['anacusis', 'ascanius'], + 'ascare': ['ascare', 'caesar', 'resaca'], + 'ascarid': ['arsacid', 'ascarid'], + 'ascaris': ['ascaris', 'carissa'], + 'ascendance': ['adnascence', 'ascendance'], + 'ascendant': ['adnascent', 'ascendant'], + 'ascender': ['ascender', 'reascend'], + 'ascent': ['ascent', 'secant', 'stance'], + 'ascertain': ['ascertain', 'cartesian', 'cartisane', 'sectarian'], + 'ascertainer': ['ascertainer', 'reascertain', 'secretarian'], + 'ascetic': ['ascetic', 'castice', 'siccate'], + 'ascham': ['ascham', 'chasma'], + 'asci': ['acis', 'asci', 'saic'], + 'ascian': ['ascian', 'sacian', 'scania', 'sicana'], + 'ascidia': ['ascidia', 'diascia'], + 'ascii': ['ascii', 'isiac'], + 'ascites': ['ascites', 'ectasis'], + 'ascitic': ['ascitic', 'sciatic'], + 'ascitical': ['ascitical', 'sciatical'], + 'asclent': ['asclent', 'scantle'], + 'asclepian': ['asclepian', 'spalacine'], + 'ascolichen': ['ascolichen', 'chalcosine'], + 'ascon': ['ascon', 'canso', 'oscan'], + 'ascot': ['ascot', 'coast', 'costa', 'tacso', 'tasco'], + 'ascription': ['antipsoric', 'ascription', 'crispation'], + 'ascry': ['ascry', 'scary', 'scray'], + 'ascula': ['ascula', 'calusa', 'casual', 'casula', 'causal'], + 'asdic': ['asdic', 'sadic'], + 'ase': ['aes', 'ase', 'sea'], + 'asearch': ['asearch', 'eschara'], + 'aselli': ['allies', 'aselli'], + 'asem': ['asem', 'mesa', 'same', 'seam'], + 'asemia': ['asemia', 'saeima'], + 'aseptic': ['aseptic', 'spicate'], + 'aseptol': ['apostle', 'aseptol'], + 'ash': ['ash', 'sah', 'sha'], + 'ashanti': ['ashanti', 'sanhita', 'shaitan', 'thasian'], + 'ashen': ['ashen', 'hanse', 'shane', 'shean'], + 'asher': ['asher', 'share', 'shear'], + 'ashet': ['ashet', 'haste', 'sheat'], + 'ashimmer': ['ashimmer', 'haremism'], + 'ashir': ['ashir', 'shari'], + 'ashling': ['anglish', 'ashling'], + 'ashman': ['ashman', 'shaman'], + 'ashore': ['ahorse', 'ashore', 'hoarse', 'shorea'], + 'ashraf': ['afshar', 'ashraf'], + 'ashur': ['ashur', 'surah'], + 'ashy': ['ashy', 'shay'], + 'asian': ['asian', 'naias', 'sanai'], + 'asiarch': ['arachis', 'asiarch', 'saharic'], + 'aside': ['aides', 'aside', 'sadie'], + 'asideu': ['asideu', 'suidae'], + 'asiento': ['aeonist', 'asiento', 'satieno'], + 'asilid': ['asilid', 'sialid'], + 'asilidae': ['asilidae', 'sialidae'], + 'asilus': ['asilus', 'lasius'], + 'asimen': ['asimen', 'inseam', 'mesian'], + 'asimmer': ['amerism', 'asimmer', 'sammier'], + 'asiphonate': ['asiphonate', 'asthenopia'], + 'ask': ['ask', 'sak'], + 'asker': ['asker', 'reask', 'saker', 'sekar'], + 'askew': ['askew', 'wakes'], + 'askip': ['askip', 'spaik'], + 'askr': ['askr', 'kras', 'sark'], + 'aslant': ['aslant', 'lansat', 'natals', 'santal'], + 'asleep': ['asleep', 'elapse', 'please'], + 'aslope': ['aslope', 'poales'], + 'asmalte': ['asmalte', 'maltase'], + 'asmile': ['amiles', 'asmile', 'mesail', 'mesial', 'samiel'], + 'asnort': ['asnort', 'satron'], + 'asoak': ['asoak', 'asoka'], + 'asok': ['asok', 'soak', 'soka'], + 'asoka': ['asoak', 'asoka'], + 'asonia': ['anosia', 'asonia'], + 'asop': ['asop', 'sapo', 'soap'], + 'asor': ['asor', 'rosa', 'soar', 'sora'], + 'asp': ['asp', 'sap', 'spa'], + 'aspartic': ['aspartic', 'satrapic'], + 'aspection': ['aspection', 'stenopaic'], + 'aspectual': ['aspectual', 'capsulate'], + 'aspen': ['aspen', 'panse', 'snape', 'sneap', 'spane', 'spean'], + 'asper': ['asper', 'parse', 'prase', 'spaer', 'spare', 'spear'], + 'asperate': ['asperate', 'separate'], + 'asperation': ['anisoptera', 'asperation', 'separation'], + 'asperge': ['asperge', 'presage'], + 'asperger': ['asperger', 'presager'], + 'aspergil': ['aspergil', 'splairge'], + 'asperite': ['asperite', 'parietes'], + 'aspermia': ['aspermia', 'sapremia'], + 'aspermic': ['aspermic', 'sapremic'], + 'asperser': ['asperser', 'repasser'], + 'asperulous': ['asperulous', 'pleasurous'], + 'asphalt': ['asphalt', 'spathal', 'taplash'], + 'aspic': ['aspic', 'spica'], + 'aspidinol': ['aspidinol', 'diplasion'], + 'aspirant': ['aspirant', 'partisan', 'spartina'], + 'aspirata': ['aspirata', 'parasita'], + 'aspirate': ['aspirate', 'parasite'], + 'aspire': ['aspire', 'paries', 'praise', 'sirpea', 'spirea'], + 'aspirer': ['aspirer', 'praiser', 'serpari'], + 'aspiring': ['aspiring', 'praising', 'singarip'], + 'aspiringly': ['aspiringly', 'praisingly'], + 'aspish': ['aspish', 'phasis'], + 'asporous': ['asporous', 'saporous'], + 'asport': ['asport', 'pastor', 'sproat'], + 'aspread': ['aspread', 'saperda'], + 'aspring': ['aspring', 'rasping', 'sparing'], + 'asquirm': ['asquirm', 'marquis'], + 'assagai': ['assagai', 'gaiassa'], + 'assailer': ['assailer', 'reassail'], + 'assam': ['amass', 'assam', 'massa', 'samas'], + 'assaulter': ['assaulter', 'reassault', 'saleratus'], + 'assayer': ['assayer', 'reassay'], + 'assemble': ['assemble', 'beamless'], + 'assent': ['assent', 'snaste'], + 'assenter': ['assenter', 'reassent', 'sarsenet'], + 'assentor': ['assentor', 'essorant', 'starnose'], + 'assert': ['assert', 'tasser'], + 'asserter': ['asserter', 'reassert'], + 'assertible': ['assertible', 'resistable'], + 'assertional': ['assertional', 'sensatorial'], + 'assertor': ['assertor', 'assorter', 'oratress', 'reassort'], + 'asset': ['asset', 'tasse'], + 'assets': ['assets', 'stases'], + 'assidean': ['assidean', 'nassidae'], + 'assiento': ['assiento', 'ossetian'], + 'assignee': ['agenesis', 'assignee'], + 'assigner': ['assigner', 'reassign'], + 'assist': ['assist', 'stasis'], + 'assister': ['assister', 'reassist'], + 'associationism': ['associationism', 'misassociation'], + 'assoilment': ['assoilment', 'salmonsite'], + 'assorter': ['assertor', 'assorter', 'oratress', 'reassort'], + 'assuage': ['assuage', 'sausage'], + 'assume': ['assume', 'seamus'], + 'assumer': ['assumer', 'erasmus', 'masseur'], + 'ast': ['ast', 'sat'], + 'astacus': ['acastus', 'astacus'], + 'astare': ['astare', 'satrae'], + 'astart': ['astart', 'strata'], + 'astartian': ['astartian', 'astrantia'], + 'astatine': ['astatine', 'sanitate'], + 'asteep': ['asteep', 'peseta'], + 'asteer': ['asteer', + 'easter', + 'eastre', + 'reseat', + 'saeter', + 'seater', + 'staree', + 'teaser', + 'teresa'], + 'astelic': ['astelic', 'elastic', 'latices'], + 'astely': ['alytes', 'astely', 'lysate', 'stealy'], + 'aster': ['aster', 'serta', 'stare', 'strae', 'tarse', 'teras'], + 'asteria': ['asarite', 'asteria', 'atresia', 'setaria'], + 'asterias': ['aristeas', 'asterias'], + 'asterikos': ['asterikos', 'keratosis'], + 'asterin': ['asterin', 'eranist', 'restain', 'stainer', 'starnie', 'stearin'], + 'asterina': ['artesian', 'asterina', 'asternia', 'erastian', 'seatrain'], + 'asterion': ['arsonite', 'asterion', 'oestrian', 'rosinate', 'serotina'], + 'astern': ['astern', 'enstar', 'stenar', 'sterna'], + 'asternia': ['artesian', 'asterina', 'asternia', 'erastian', 'seatrain'], + 'asteroid': ['asteroid', 'troiades'], + 'asterope': ['asterope', 'protease'], + 'asthenopia': ['asiphonate', 'asthenopia'], + 'asthma': ['amsath', 'asthma'], + 'asthmogenic': ['asthmogenic', 'mesognathic'], + 'asthore': ['asthore', 'earshot'], + 'astian': ['astian', 'tasian'], + 'astigmism': ['astigmism', 'sigmatism'], + 'astilbe': ['astilbe', 'bestial', 'blastie', 'stabile'], + 'astint': ['astint', 'tanist'], + 'astir': ['arist', + 'astir', + 'sitar', + 'stair', + 'stria', + 'tarsi', + 'tisar', + 'trias'], + 'astomous': ['astomous', 'somatous'], + 'astonied': ['astonied', 'sedation'], + 'astonisher': ['astonisher', 'reastonish', 'treasonish'], + 'astor': ['astor', 'roast'], + 'astragali': ['astragali', 'tarsalgia'], + 'astrain': ['artisan', 'astrain', 'sartain', 'tsarina'], + 'astral': ['astral', 'tarsal'], + 'astrantia': ['astartian', 'astrantia'], + 'astream': ['artemas', 'astream'], + 'astrer': ['arrest', 'astrer', 'raster', 'starer'], + 'astrict': ['astrict', 'cartist', 'stratic'], + 'astride': ['astride', 'diaster', 'disrate', 'restiad', 'staired'], + 'astrier': ['astrier', 'tarsier'], + 'astringe': ['astringe', 'ganister', 'gantries'], + 'astringent': ['astringent', 'transigent'], + 'astringer': ['arresting', 'astringer'], + 'astrodome': ['astrodome', 'roomstead'], + 'astrofel': ['astrofel', 'forestal'], + 'astroite': ['astroite', 'ostraite', 'storiate'], + 'astrolabe': ['astrolabe', 'roastable'], + 'astrut': ['astrut', 'rattus', 'stuart'], + 'astur': ['astur', 'surat', 'sutra'], + 'asturian': ['asturian', 'austrian', 'saturnia'], + 'astute': ['astute', 'statue'], + 'astylar': ['astylar', 'saltary'], + 'asunder': ['asunder', 'drusean'], + 'asuri': ['arius', 'asuri'], + 'aswail': ['aswail', 'sawali'], + 'asweat': ['asweat', 'awaste'], + 'aswim': ['aswim', 'swami'], + 'aswing': ['aswing', 'sawing'], + 'asyla': ['asyla', 'salay', 'sayal'], + 'asyllabic': ['asyllabic', 'basically'], + 'asyndetic': ['asyndetic', 'cystidean', 'syndicate'], + 'asynergia': ['asynergia', 'gainsayer'], + 'at': ['at', 'ta'], + 'ata': ['ata', 'taa'], + 'atabal': ['albata', 'atabal', 'balata'], + 'atabrine': ['atabrine', 'rabatine'], + 'atacaman': ['atacaman', 'tamanaca'], + 'ataentsic': ['anticaste', 'ataentsic'], + 'atalan': ['atalan', 'tanala'], + 'atap': ['atap', 'pata', 'tapa'], + 'atazir': ['atazir', 'ziarat'], + 'atchison': ['atchison', 'chitosan'], + 'ate': ['ate', 'eat', 'eta', 'tae', 'tea'], + 'ateba': ['abate', 'ateba', 'batea', 'beata'], + 'atebrin': ['atebrin', 'rabinet'], + 'atechnic': ['atechnic', 'catechin', 'technica'], + 'atechny': ['atechny', 'chantey'], + 'ateeter': ['ateeter', 'treatee'], + 'atef': ['atef', 'fate', 'feat'], + 'ateles': ['ateles', 'saltee', 'sealet', 'stelae', 'teasel'], + 'atelets': ['atelets', 'tsatlee'], + 'atelier': ['atelier', 'tiralee'], + 'aten': ['ante', 'aten', 'etna', 'nate', 'neat', 'taen', 'tane', 'tean'], + 'atenism': ['atenism', 'inmeats', 'insteam', 'samnite'], + 'atenist': ['atenist', 'instate', 'satient', 'steatin'], + 'ates': ['ates', 'east', 'eats', 'sate', 'seat', 'seta'], + 'atestine': ['anisette', 'atestine', 'settaine'], + 'athar': ['arhat', 'artha', 'athar'], + 'atheism': ['atheism', 'hamites'], + 'athena': ['ahtena', 'aneath', 'athena'], + 'athenian': ['anthinae', 'athenian'], + 'athenor': ['another', 'athenor', 'rheotan'], + 'athens': ['athens', 'hasten', 'snathe', 'sneath'], + 'atherine': ['atherine', 'herniate'], + 'atheris': ['atheris', 'sheriat'], + 'athermic': ['athermic', 'marchite', 'rhematic'], + 'athing': ['anight', 'athing'], + 'athirst': ['athirst', 'rattish', 'tartish'], + 'athletic': ['athletic', 'thetical'], + 'athletics': ['athletics', 'statelich'], + 'athort': ['athort', 'throat'], + 'athrive': ['athrive', 'hervati'], + 'ati': ['ait', 'ati', 'ita', 'tai'], + 'atik': ['atik', 'ikat'], + 'atimon': ['atimon', 'manito', 'montia'], + 'atingle': ['atingle', 'gelatin', 'genital', 'langite', 'telinga'], + 'atip': ['atip', 'pita'], + 'atis': ['atis', 'sita', 'tsia'], + 'atlantean': ['antenatal', 'atlantean', 'tantalean'], + 'atlantic': ['atlantic', 'tantalic'], + 'atlantid': ['atlantid', 'dilatant'], + 'atlantite': ['atlantite', 'tantalite'], + 'atlas': ['atlas', 'salat', 'salta'], + 'atle': ['atle', 'laet', 'late', 'leat', 'tael', 'tale', 'teal'], + 'atlee': ['atlee', 'elate'], + 'atloidean': ['atloidean', 'dealation'], + 'atma': ['atma', 'tama'], + 'atman': ['atman', 'manta'], + 'atmid': ['admit', 'atmid'], + 'atmo': ['atmo', 'atom', 'moat', 'toma'], + 'atmogenic': ['atmogenic', 'geomantic'], + 'atmos': ['atmos', 'stoma', 'tomas'], + 'atmosphere': ['atmosphere', 'shapometer'], + 'atmostea': ['atmostea', 'steatoma'], + 'atnah': ['atnah', 'tanha', 'thana'], + 'atocia': ['atocia', 'coaita'], + 'atokal': ['atokal', 'lakota'], + 'atoll': ['allot', 'atoll'], + 'atom': ['atmo', 'atom', 'moat', 'toma'], + 'atomic': ['atomic', 'matico'], + 'atomics': ['atomics', 'catoism', 'cosmati', 'osmatic', 'somatic'], + 'atomize': ['atomize', 'miaotze'], + 'atomizer': ['amortize', 'atomizer'], + 'atonal': ['atonal', 'latona'], + 'atonalism': ['anomalist', 'atonalism'], + 'atone': ['atone', 'oaten'], + 'atoner': ['atoner', 'norate', 'ornate'], + 'atonia': ['anotia', 'atonia'], + 'atonic': ['action', 'atonic', 'cation'], + 'atony': ['atony', 'ayont'], + 'atop': ['atop', 'pato'], + 'atopic': ['atopic', 'capito', 'copita'], + 'atorai': ['atorai', 'otaria'], + 'atrail': ['altair', 'atrail', 'atrial', 'lariat', 'latria', 'talari'], + 'atrepsy': ['atrepsy', 'yapster'], + 'atresia': ['asarite', 'asteria', 'atresia', 'setaria'], + 'atresic': ['atresic', 'stearic'], + 'atresy': ['atresy', 'estray', 'reasty', 'stayer'], + 'atretic': ['atretic', 'citrate'], + 'atria': ['arati', 'atria', 'riata', 'tarai', 'tiara'], + 'atrial': ['altair', 'atrail', 'atrial', 'lariat', 'latria', 'talari'], + 'atridean': ['anteriad', 'atridean', 'dentaria'], + 'atrip': ['atrip', 'tapir'], + 'atrocity': ['atrocity', 'citatory'], + 'atrophied': ['aphrodite', 'atrophied', 'diaporthe'], + 'atropia': ['apiator', 'atropia', 'parotia'], + 'atropic': ['apricot', 'atropic', 'parotic', 'patrico'], + 'atroscine': ['atroscine', 'certosina', 'ostracine', 'tinoceras', 'tricosane'], + 'atry': ['arty', 'atry', 'tray'], + 'attacco': ['attacco', 'toccata'], + 'attach': ['attach', 'chatta'], + 'attache': ['attache', 'thecata'], + 'attacher': ['attacher', 'reattach'], + 'attacker': ['attacker', 'reattack'], + 'attain': ['attain', 'tatian'], + 'attainder': ['antitrade', 'attainder'], + 'attainer': ['attainer', 'reattain', 'tertiana'], + 'attar': ['attar', 'tatar'], + 'attempter': ['attempter', 'reattempt'], + 'attender': ['attender', 'nattered', 'reattend'], + 'attention': ['attention', 'tentation'], + 'attentive': ['attentive', 'tentative'], + 'attentively': ['attentively', 'tentatively'], + 'attentiveness': ['attentiveness', 'tentativeness'], + 'atter': ['atter', 'tater', 'teart', 'tetra', 'treat'], + 'attermine': ['antimeter', 'attermine', 'interteam', 'terminate', 'tetramine'], + 'attern': ['attern', 'natter', 'ratten', 'tarten'], + 'attery': ['attery', 'treaty', 'yatter'], + 'attester': ['attester', 'reattest'], + 'attic': ['attic', 'catti', 'tacit'], + 'attical': ['attical', 'cattail'], + 'attinge': ['attinge', 'tintage'], + 'attire': ['attire', 'ratite', 'tertia'], + 'attired': ['attired', 'tradite'], + 'attorn': ['attorn', 'ratton', 'rottan'], + 'attracter': ['attracter', 'reattract'], + 'attractor': ['attractor', 'tractator'], + 'attrite': ['attrite', 'titrate'], + 'attrition': ['attrition', 'titration'], + 'attune': ['attune', 'nutate', 'tauten'], + 'atule': ['aleut', 'atule'], + 'atumble': ['atumble', 'mutable'], + 'atwin': ['atwin', 'twain', 'witan'], + 'atypic': ['atypic', 'typica'], + 'aube': ['aube', 'beau'], + 'aubrite': ['abiuret', 'aubrite', 'biurate', 'rubiate'], + 'aucan': ['acuan', 'aucan'], + 'aucaner': ['arecuna', 'aucaner'], + 'auchlet': ['auchlet', 'cutheal', 'taluche'], + 'auction': ['auction', 'caution'], + 'auctionary': ['auctionary', 'cautionary'], + 'audiencier': ['audiencier', 'enicuridae'], + 'auge': ['ague', 'auge'], + 'augen': ['augen', 'genua'], + 'augend': ['augend', 'engaud', 'unaged'], + 'auger': ['argue', 'auger'], + 'augerer': ['augerer', 'reargue'], + 'augh': ['augh', 'guha'], + 'augmenter': ['argenteum', 'augmenter'], + 'augustan': ['augustan', 'guatusan'], + 'auh': ['ahu', 'auh', 'hau'], + 'auk': ['aku', 'auk', 'kua'], + 'auld': ['auld', 'dual', 'laud', 'udal'], + 'aulete': ['aulete', 'eluate'], + 'auletic': ['aleutic', 'auletic', 'caulite', 'lutecia'], + 'auletris': ['auletris', 'lisuarte'], + 'aulic': ['aulic', 'lucia'], + 'aulostoma': ['aulostoma', 'autosomal'], + 'aulu': ['aulu', 'ulua'], + 'aum': ['aum', 'mau'], + 'aumbry': ['ambury', 'aumbry'], + 'aumil': ['aumil', 'miaul'], + 'aumrie': ['aumrie', 'uremia'], + 'auncel': ['auncel', 'cuneal', 'lacune', 'launce', 'unlace'], + 'aunt': ['antu', 'aunt', 'naut', 'taun', 'tuan', 'tuna'], + 'auntie': ['auntie', 'uniate'], + 'auntish': ['auntish', 'inhaust'], + 'auntly': ['auntly', 'lutany'], + 'auntsary': ['auntsary', 'unastray'], + 'aura': ['aaru', 'aura'], + 'aural': ['aural', 'laura'], + 'aurar': ['arrau', 'aurar'], + 'auresca': ['auresca', 'caesura'], + 'aureus': ['aureus', 'uraeus'], + 'auricle': ['auricle', 'ciruela'], + 'auricled': ['auricled', 'radicule'], + 'auride': ['auride', 'rideau'], + 'aurin': ['aurin', 'urian'], + 'aurir': ['aurir', 'urari'], + 'auriscalp': ['auriscalp', 'spiracula'], + 'auscult': ['auscult', 'scutula'], + 'aushar': ['arusha', 'aushar'], + 'auster': ['auster', 'reatus'], + 'austere': ['austere', 'euaster'], + 'australian': ['australian', 'saturnalia'], + 'australic': ['australic', 'lactarius'], + 'australite': ['aristulate', 'australite'], + 'austrian': ['asturian', 'austrian', 'saturnia'], + 'aute': ['aute', 'etua'], + 'autecism': ['autecism', 'musicate'], + 'authotype': ['authotype', 'autophyte'], + 'autoclave': ['autoclave', 'vacuolate'], + 'autocrat': ['actuator', 'autocrat'], + 'autoheterosis': ['autoheterosis', 'heteroousiast'], + 'autometric': ['autometric', 'tautomeric'], + 'autometry': ['autometry', 'tautomery'], + 'autophyte': ['authotype', 'autophyte'], + 'autoplast': ['autoplast', 'postulata'], + 'autopsic': ['autopsic', 'captious'], + 'autopsical': ['apolaustic', 'autopsical'], + 'autoradiograph': ['autoradiograph', 'radioautograph'], + 'autoradiographic': ['autoradiographic', 'radioautographic'], + 'autoradiography': ['autoradiography', 'radioautography'], + 'autoriser': ['arterious', 'autoriser'], + 'autosomal': ['aulostoma', 'autosomal'], + 'auxetic': ['auxetic', 'eutaxic'], + 'aval': ['aval', 'lava'], + 'avanti': ['avanti', 'vinata'], + 'avar': ['avar', 'vara'], + 'ave': ['ave', 'eva'], + 'avenge': ['avenge', 'geneva', 'vangee'], + 'avenger': ['avenger', 'engrave'], + 'avenin': ['avenin', 'vienna'], + 'aventine': ['aventine', 'venetian'], + 'aventurine': ['aventurine', 'uninervate'], + 'aver': ['aver', 'rave', 'vare', 'vera'], + 'avera': ['avera', 'erava'], + 'averil': ['averil', 'elvira'], + 'averin': ['averin', 'ravine'], + 'avert': ['avert', 'tarve', 'taver', 'trave'], + 'avertible': ['avertible', 'veritable'], + 'avertin': ['avertin', 'vitrean'], + 'aves': ['aves', 'save', 'vase'], + 'aviatic': ['aviatic', 'viatica'], + 'aviator': ['aviator', 'tovaria'], + 'avicular': ['avicular', 'varicula'], + 'avid': ['avid', 'diva'], + 'avidous': ['avidous', 'vaudois'], + 'avignonese': ['avignonese', 'ingaevones'], + 'avine': ['avine', 'naive', 'vinea'], + 'avirulence': ['acervuline', 'avirulence'], + 'avis': ['avis', 'siva', 'visa'], + 'avitic': ['avitic', 'viatic'], + 'avo': ['avo', 'ova'], + 'avocet': ['avocet', 'octave', 'vocate'], + 'avodire': ['avodire', 'avoider', 'reavoid'], + 'avoider': ['avodire', 'avoider', 'reavoid'], + 'avolation': ['avolation', 'ovational'], + 'avolitional': ['avolitional', 'violational'], + 'avoucher': ['avoucher', 'reavouch'], + 'avower': ['avower', 'reavow'], + 'avshar': ['avshar', 'varsha'], + 'avulse': ['alveus', 'avulse'], + 'aw': ['aw', 'wa'], + 'awag': ['awag', 'waag'], + 'awaiter': ['awaiter', 'reawait'], + 'awakener': ['awakener', 'reawaken'], + 'awarder': ['awarder', 'reaward'], + 'awash': ['awash', 'sawah'], + 'awaste': ['asweat', 'awaste'], + 'awat': ['awat', 'tawa'], + 'awd': ['awd', 'daw', 'wad'], + 'awe': ['awe', 'wae', 'wea'], + 'aweather': ['aweather', 'wheatear'], + 'aweek': ['aweek', 'keawe'], + 'awesome': ['awesome', 'waesome'], + 'awest': ['awest', 'sweat', 'tawse', 'waste'], + 'awfu': ['awfu', 'wauf'], + 'awful': ['awful', 'fulwa'], + 'awhet': ['awhet', 'wheat'], + 'awin': ['awin', 'wain'], + 'awing': ['awing', 'wigan'], + 'awl': ['awl', 'law'], + 'awn': ['awn', 'naw', 'wan'], + 'awned': ['awned', 'dewan', 'waned'], + 'awner': ['awner', 'newar'], + 'awning': ['awning', 'waning'], + 'awny': ['awny', 'wany', 'yawn'], + 'awol': ['alow', 'awol', 'lowa'], + 'awork': ['awork', 'korwa'], + 'awreck': ['awreck', 'wacker'], + 'awrong': ['awrong', 'growan'], + 'awry': ['awry', 'wary'], + 'axel': ['alex', 'axel', 'axle'], + 'axes': ['axes', 'saxe', 'seax'], + 'axil': ['alix', 'axil'], + 'axile': ['axile', 'lexia'], + 'axine': ['axine', 'xenia'], + 'axle': ['alex', 'axel', 'axle'], + 'axon': ['axon', 'noxa', 'oxan'], + 'axonal': ['axonal', 'oxalan'], + 'axonia': ['anoxia', 'axonia'], + 'ay': ['ay', 'ya'], + 'ayah': ['ayah', 'haya'], + 'aye': ['aye', 'yea'], + 'ayont': ['atony', 'ayont'], + 'azimine': ['aminize', 'animize', 'azimine'], + 'azo': ['azo', 'zoa'], + 'azole': ['azole', 'zoeal'], + 'azon': ['azon', 'onza', 'ozan'], + 'azorian': ['arizona', 'azorian', 'zonaria'], + 'azorite': ['azorite', 'zoarite'], + 'azoxine': ['azoxine', 'oxazine'], + 'azteca': ['azteca', 'zacate'], + 'azurine': ['azurine', 'urazine'], + 'ba': ['ab', 'ba'], + 'baa': ['aba', 'baa'], + 'baal': ['alba', 'baal', 'bala'], + 'baalath': ['baalath', 'bathala'], + 'baalite': ['baalite', 'bialate', 'labiate'], + 'baalshem': ['baalshem', 'shamable'], + 'baar': ['arab', 'arba', 'baar', 'bara'], + 'bab': ['abb', 'bab'], + 'baba': ['abba', 'baba'], + 'babbler': ['babbler', 'blabber', 'brabble'], + 'babery': ['babery', 'yabber'], + 'babhan': ['babhan', 'habnab'], + 'babishly': ['babishly', 'shabbily'], + 'babishness': ['babishness', 'shabbiness'], + 'babite': ['babite', 'bebait'], + 'babu': ['babu', 'buba'], + 'babul': ['babul', 'bubal'], + 'baby': ['abby', 'baby'], + 'babylonish': ['babylonish', 'nabobishly'], + 'bac': ['bac', 'cab'], + 'bacao': ['bacao', 'caoba'], + 'bach': ['bach', 'chab'], + 'bache': ['bache', 'beach'], + 'bachel': ['bachel', 'bleach'], + 'bachelor': ['bachelor', 'crabhole'], + 'bacillar': ['bacillar', 'cabrilla'], + 'bacis': ['bacis', 'basic'], + 'backblow': ['backblow', 'blowback'], + 'backen': ['backen', 'neback'], + 'backer': ['backer', 'reback'], + 'backfall': ['backfall', 'fallback'], + 'backfire': ['backfire', 'fireback'], + 'backlog': ['backlog', 'gablock'], + 'backrun': ['backrun', 'runback'], + 'backsaw': ['backsaw', 'sawback'], + 'backset': ['backset', 'setback'], + 'backstop': ['backstop', 'stopback'], + 'backswing': ['backswing', 'swingback'], + 'backward': ['backward', 'drawback'], + 'backway': ['backway', 'wayback'], + 'bacon': ['bacon', 'banco'], + 'bacterial': ['bacterial', 'calibrate'], + 'bacteriform': ['bacteriform', 'bracteiform'], + 'bacterin': ['bacterin', 'centibar'], + 'bacterioid': ['aborticide', 'bacterioid'], + 'bacterium': ['bacterium', 'cumbraite'], + 'bactrian': ['bactrian', 'cantabri'], + 'bacula': ['albuca', 'bacula'], + 'baculi': ['abulic', 'baculi'], + 'baculite': ['baculite', 'cubitale'], + 'baculites': ['baculites', 'bisulcate'], + 'baculoid': ['baculoid', 'cuboidal'], + 'bad': ['bad', 'dab'], + 'badaga': ['badaga', 'dagaba', 'gadaba'], + 'badan': ['badan', 'banda'], + 'bade': ['abed', 'bade', 'bead'], + 'badge': ['badge', 'begad'], + 'badian': ['badian', 'indaba'], + 'badigeon': ['badigeon', 'gabioned'], + 'badly': ['badly', 'baldy', 'blady'], + 'badon': ['badon', 'bando'], + 'bae': ['abe', 'bae', 'bea'], + 'baeria': ['aberia', 'baeria', 'baiera'], + 'baetulus': ['baetulus', 'subulate'], + 'baetyl': ['baetyl', 'baylet', 'bleaty'], + 'baetylic': ['baetylic', 'biacetyl'], + 'bafta': ['abaft', 'bafta'], + 'bag': ['bag', 'gab'], + 'bagani': ['bagani', 'bangia', 'ibanag'], + 'bagel': ['bagel', 'belga', 'gable', 'gleba'], + 'bagger': ['bagger', 'beggar'], + 'bagnio': ['bagnio', 'gabion', 'gobian'], + 'bago': ['bago', 'boga'], + 'bagre': ['bagre', 'barge', 'begar', 'rebag'], + 'bahar': ['bahar', 'bhara'], + 'bahoe': ['bahoe', 'bohea', 'obeah'], + 'baht': ['baht', 'bath', 'bhat'], + 'baiera': ['aberia', 'baeria', 'baiera'], + 'baignet': ['baignet', 'beating'], + 'bail': ['albi', 'bail', 'bali'], + 'bailage': ['algieba', 'bailage'], + 'bailer': ['bailer', 'barile'], + 'baillone': ['baillone', 'bonellia'], + 'bailment': ['bailment', 'libament'], + 'bailor': ['bailor', 'bioral'], + 'bailsman': ['bailsman', 'balanism', 'nabalism'], + 'bain': ['bain', 'bani', 'iban'], + 'baioc': ['baioc', 'cabio', 'cobia'], + 'bairam': ['bairam', 'bramia'], + 'bairn': ['abrin', 'bairn', 'brain', 'brian', 'rabin'], + 'bairnie': ['aribine', 'bairnie', 'iberian'], + 'bairnish': ['bairnish', 'bisharin'], + 'bais': ['absi', 'bais', 'bias', 'isba'], + 'baister': ['baister', 'tribase'], + 'baiter': ['baiter', 'barite', 'rebait', 'terbia'], + 'baith': ['baith', 'habit'], + 'bajocian': ['bajocian', 'jacobian'], + 'bakal': ['bakal', 'balak'], + 'bakatan': ['bakatan', 'batakan'], + 'bake': ['bake', 'beak'], + 'baker': ['baker', 'brake', 'break'], + 'bakerless': ['bakerless', 'brakeless', 'breakless'], + 'bakery': ['bakery', 'barkey'], + 'bakie': ['akebi', 'bakie'], + 'baku': ['baku', 'kuba'], + 'bal': ['alb', 'bal', 'lab'], + 'bala': ['alba', 'baal', 'bala'], + 'baladine': ['baladine', 'balaenid'], + 'balaenid': ['baladine', 'balaenid'], + 'balagan': ['balagan', 'bangala'], + 'balai': ['balai', 'labia'], + 'balak': ['bakal', 'balak'], + 'balan': ['alban', 'balan', 'banal', 'laban', 'nabal', 'nabla'], + 'balancer': ['balancer', 'barnacle'], + 'balangay': ['balangay', 'bangalay'], + 'balanic': ['balanic', 'caliban'], + 'balanid': ['balanid', 'banilad'], + 'balanism': ['bailsman', 'balanism', 'nabalism'], + 'balanite': ['albanite', 'balanite', 'nabalite'], + 'balanites': ['balanites', 'basaltine', 'stainable'], + 'balantidium': ['antialbumid', 'balantidium'], + 'balanus': ['balanus', 'nabalus', 'subanal'], + 'balas': ['balas', 'balsa', 'basal', 'sabal'], + 'balata': ['albata', 'atabal', 'balata'], + 'balatron': ['balatron', 'laborant'], + 'balaustine': ['balaustine', 'unsatiable'], + 'balaustre': ['balaustre', 'saturable'], + 'bald': ['bald', 'blad'], + 'balden': ['balden', 'bandle'], + 'balder': ['balder', 'bardel', 'bedlar', 'bedral', 'belard', 'blader'], + 'baldie': ['abdiel', 'baldie'], + 'baldish': ['baldish', 'bladish'], + 'baldmoney': ['baldmoney', 'molybdena'], + 'baldness': ['baldness', 'bandless'], + 'baldy': ['badly', 'baldy', 'blady'], + 'bale': ['abel', 'able', 'albe', 'bale', 'beal', 'bela', 'blae'], + 'balearic': ['balearic', 'cebalrai'], + 'baleen': ['baleen', 'enable'], + 'balefire': ['afebrile', 'balefire', 'fireable'], + 'baleise': ['baleise', 'besaiel'], + 'baler': ['abler', 'baler', 'belar', 'blare', 'blear'], + 'balete': ['balete', 'belate'], + 'bali': ['albi', 'bail', 'bali'], + 'baline': ['baline', 'blaine'], + 'balinger': ['balinger', 'ringable'], + 'balker': ['balker', 'barkle'], + 'ballaster': ['ballaster', 'reballast'], + 'ballate': ['ballate', 'tabella'], + 'balli': ['balli', 'billa'], + 'balloter': ['balloter', 'reballot'], + 'ballplayer': ['ballplayer', 'preallably'], + 'ballroom': ['ballroom', 'moorball'], + 'ballweed': ['ballweed', 'weldable'], + 'balm': ['balm', 'lamb'], + 'balminess': ['balminess', 'lambiness'], + 'balmlike': ['balmlike', 'lamblike'], + 'balmy': ['balmy', 'lamby'], + 'balolo': ['balolo', 'lobola'], + 'balonea': ['abalone', 'balonea'], + 'balor': ['balor', 'bolar', 'boral', 'labor', 'lobar'], + 'balow': ['ablow', 'balow', 'bowla'], + 'balsa': ['balas', 'balsa', 'basal', 'sabal'], + 'balsam': ['balsam', 'sambal'], + 'balsamic': ['balsamic', 'cabalism'], + 'balsamo': ['absalom', 'balsamo'], + 'balsamy': ['abysmal', 'balsamy'], + 'balt': ['balt', 'blat'], + 'baltei': ['albeit', + 'albite', + 'baltei', + 'belait', + 'betail', + 'bletia', + 'libate'], + 'balter': ['albert', 'balter', 'labret', 'tabler'], + 'balteus': ['balteus', 'sublate'], + 'baltis': ['baltis', 'bisalt'], + 'balu': ['balu', 'baul', 'bual', 'luba'], + 'balunda': ['balunda', 'bulanda'], + 'baluster': ['baluster', 'rustable'], + 'balut': ['balut', 'tubal'], + 'bam': ['bam', 'mab'], + 'ban': ['ban', 'nab'], + 'bana': ['anba', 'bana'], + 'banak': ['banak', 'nabak'], + 'banal': ['alban', 'balan', 'banal', 'laban', 'nabal', 'nabla'], + 'banat': ['banat', 'batan'], + 'banca': ['banca', 'caban'], + 'bancal': ['bancal', 'blanca'], + 'banco': ['bacon', 'banco'], + 'banda': ['badan', 'banda'], + 'bandage': ['bandage', 'dagbane'], + 'bandar': ['bandar', 'raband'], + 'bandarlog': ['bandarlog', 'langobard'], + 'bande': ['bande', 'benda'], + 'bander': ['bander', 'brenda'], + 'banderma': ['banderma', 'breadman'], + 'banderole': ['banderole', 'bandoleer'], + 'bandhook': ['bandhook', 'handbook'], + 'bandle': ['balden', 'bandle'], + 'bandless': ['baldness', 'bandless'], + 'bando': ['badon', 'bando'], + 'bandoleer': ['banderole', 'bandoleer'], + 'bandor': ['bandor', 'bondar', 'roband'], + 'bandore': ['bandore', 'broaden'], + 'bane': ['bane', 'bean', 'bena'], + 'bangala': ['balagan', 'bangala'], + 'bangalay': ['balangay', 'bangalay'], + 'bangash': ['bangash', 'nashgab'], + 'banger': ['banger', 'engarb', 'graben'], + 'banghy': ['banghy', 'hangby'], + 'bangia': ['bagani', 'bangia', 'ibanag'], + 'bangle': ['bangle', 'bengal'], + 'bani': ['bain', 'bani', 'iban'], + 'banilad': ['balanid', 'banilad'], + 'banisher': ['banisher', 'rebanish'], + 'baniva': ['baniva', 'bavian'], + 'baniya': ['baniya', 'banyai'], + 'banjoist': ['banjoist', 'bostanji'], + 'bank': ['bank', 'knab', 'nabk'], + 'banker': ['banker', 'barken'], + 'banshee': ['banshee', 'benshea'], + 'bantam': ['bantam', 'batman'], + 'banteng': ['banteng', 'bentang'], + 'banyai': ['baniya', 'banyai'], + 'banzai': ['banzai', 'zabian'], + 'bar': ['bar', 'bra', 'rab'], + 'bara': ['arab', 'arba', 'baar', 'bara'], + 'barabra': ['barabra', 'barbara'], + 'barad': ['barad', 'draba'], + 'barb': ['barb', 'brab'], + 'barbara': ['barabra', 'barbara'], + 'barbe': ['barbe', 'bebar', 'breba', 'rebab'], + 'barbed': ['barbed', 'dabber'], + 'barbel': ['barbel', 'labber', 'rabble'], + 'barbet': ['barbet', 'rabbet', 'tabber'], + 'barbette': ['barbette', 'bebatter'], + 'barbion': ['barbion', 'rabboni'], + 'barbitone': ['barbitone', 'barbotine'], + 'barbone': ['barbone', 'bebaron'], + 'barbotine': ['barbitone', 'barbotine'], + 'barcella': ['barcella', 'caballer'], + 'barcoo': ['barcoo', 'baroco'], + 'bard': ['bard', 'brad', 'drab'], + 'bardel': ['balder', 'bardel', 'bedlar', 'bedral', 'belard', 'blader'], + 'bardie': ['abider', 'bardie'], + 'bardily': ['bardily', 'rabidly', 'ridably'], + 'bardiness': ['bardiness', 'rabidness'], + 'barding': ['barding', 'brigand'], + 'bardo': ['abord', 'bardo', 'board', 'broad', 'dobra', 'dorab'], + 'bardy': ['bardy', 'darby'], + 'bare': ['bare', 'bear', 'brae'], + 'barefaced': ['barefaced', 'facebread'], + 'barefoot': ['barefoot', 'bearfoot'], + 'barehanded': ['barehanded', 'bradenhead', 'headbander'], + 'barehead': ['barehead', 'braehead'], + 'barely': ['barely', 'barley', 'bleary'], + 'barer': ['barer', 'rebar'], + 'baretta': ['baretta', 'rabatte', 'tabaret'], + 'bargainer': ['bargainer', 'rebargain'], + 'barge': ['bagre', 'barge', 'begar', 'rebag'], + 'bargeer': ['bargeer', 'gerbera'], + 'bargeese': ['bargeese', 'begrease'], + 'bari': ['abir', 'bari', 'rabi'], + 'baric': ['baric', 'carib', 'rabic'], + 'barid': ['barid', 'bidar', 'braid', 'rabid'], + 'barie': ['barie', 'beira', 'erbia', 'rebia'], + 'barile': ['bailer', 'barile'], + 'baris': ['baris', 'sabir'], + 'barish': ['barish', 'shibar'], + 'barit': ['barit', 'ribat'], + 'barite': ['baiter', 'barite', 'rebait', 'terbia'], + 'baritone': ['abrotine', 'baritone', 'obtainer', 'reobtain'], + 'barken': ['banker', 'barken'], + 'barker': ['barker', 'braker'], + 'barkey': ['bakery', 'barkey'], + 'barkle': ['balker', 'barkle'], + 'barky': ['barky', 'braky'], + 'barley': ['barely', 'barley', 'bleary'], + 'barling': ['barling', 'bringal'], + 'barm': ['barm', 'bram'], + 'barmbrack': ['barmbrack', 'brambrack'], + 'barmote': ['barmote', 'bromate'], + 'barmy': ['ambry', 'barmy'], + 'barn': ['barn', 'bran'], + 'barnabite': ['barnabite', 'rabbanite', 'rabbinate'], + 'barnacle': ['balancer', 'barnacle'], + 'barney': ['barney', 'nearby'], + 'barny': ['barny', 'bryan'], + 'baroco': ['barcoo', 'baroco'], + 'barolo': ['barolo', 'robalo'], + 'baron': ['baron', 'boran'], + 'baronet': ['baronet', 'reboant'], + 'barong': ['barong', 'brogan'], + 'barosmin': ['ambrosin', 'barosmin', 'sabromin'], + 'barothermograph': ['barothermograph', 'thermobarograph'], + 'barotse': ['barotse', 'boaster', 'reboast', 'sorbate'], + 'barpost': ['absorpt', 'barpost'], + 'barracan': ['barracan', 'barranca'], + 'barranca': ['barracan', 'barranca'], + 'barrelet': ['barrelet', 'terebral'], + 'barret': ['barret', 'barter'], + 'barrette': ['barrette', 'batterer'], + 'barrio': ['barrio', 'brairo'], + 'barsac': ['barsac', 'scarab'], + 'barse': ['barse', 'besra', 'saber', 'serab'], + 'bart': ['bart', 'brat'], + 'barter': ['barret', 'barter'], + 'barton': ['barton', 'brotan'], + 'bartsia': ['arabist', 'bartsia'], + 'barundi': ['barundi', 'unbraid'], + 'barvel': ['barvel', 'blaver', 'verbal'], + 'barwise': ['barwise', 'swarbie'], + 'barye': ['barye', 'beray', 'yerba'], + 'baryta': ['baryta', 'taryba'], + 'barytine': ['barytine', 'bryanite'], + 'baryton': ['baryton', 'brotany'], + 'bas': ['bas', 'sab'], + 'basal': ['balas', 'balsa', 'basal', 'sabal'], + 'basally': ['basally', 'salably'], + 'basaltic': ['basaltic', 'cabalist'], + 'basaltine': ['balanites', 'basaltine', 'stainable'], + 'base': ['base', 'besa', 'sabe', 'seba'], + 'basella': ['basella', 'sabella', 'salable'], + 'bash': ['bash', 'shab'], + 'basial': ['basial', 'blasia'], + 'basic': ['bacis', 'basic'], + 'basically': ['asyllabic', 'basically'], + 'basidium': ['basidium', 'diiambus'], + 'basil': ['basil', 'labis'], + 'basileus': ['basileus', 'issuable', 'suasible'], + 'basilweed': ['basilweed', 'bladewise'], + 'basinasal': ['basinasal', 'bassalian'], + 'basinet': ['basinet', 'besaint', 'bestain'], + 'basion': ['basion', 'bonsai', 'sabino'], + 'basiparachromatin': ['basiparachromatin', 'marsipobranchiata'], + 'basket': ['basket', 'betask'], + 'basketwork': ['basketwork', 'workbasket'], + 'basos': ['basos', 'basso'], + 'bassalian': ['basinasal', 'bassalian'], + 'bassanite': ['bassanite', 'sebastian'], + 'basset': ['asbest', 'basset'], + 'basso': ['basos', 'basso'], + 'bast': ['bast', 'bats', 'stab'], + 'basta': ['basta', 'staab'], + 'baste': ['baste', 'beast', 'tabes'], + 'basten': ['absent', 'basten'], + 'baster': ['baster', 'bestar', 'breast'], + 'bastille': ['bastille', 'listable'], + 'bastion': ['abiston', 'bastion'], + 'bastionet': ['bastionet', 'obstinate'], + 'bastite': ['bastite', 'batiste', 'bistate'], + 'basto': ['basto', 'boast', 'sabot'], + 'basuto': ['abouts', 'basuto'], + 'bat': ['bat', 'tab'], + 'batad': ['abdat', 'batad'], + 'batakan': ['bakatan', 'batakan'], + 'bataleur': ['bataleur', 'tabulare'], + 'batan': ['banat', 'batan'], + 'batara': ['artaba', 'batara'], + 'batcher': ['batcher', 'berchta', 'brachet'], + 'bate': ['abet', 'bate', 'beat', 'beta'], + 'batea': ['abate', 'ateba', 'batea', 'beata'], + 'batel': ['batel', 'blate', 'bleat', 'table'], + 'batement': ['abetment', 'batement'], + 'bater': ['abret', 'bater', 'berat'], + 'batfowler': ['afterblow', 'batfowler'], + 'bath': ['baht', 'bath', 'bhat'], + 'bathala': ['baalath', 'bathala'], + 'bathe': ['bathe', 'beath'], + 'bather': ['bather', 'bertha', 'breath'], + 'bathonian': ['bathonian', 'nabothian'], + 'batik': ['batik', 'kitab'], + 'batino': ['batino', 'oatbin', 'obtain'], + 'batis': ['absit', 'batis'], + 'batiste': ['bastite', 'batiste', 'bistate'], + 'batling': ['batling', 'tabling'], + 'batman': ['bantam', 'batman'], + 'batophobia': ['batophobia', 'tabophobia'], + 'batrachia': ['batrachia', 'brachiata'], + 'batrachian': ['batrachian', 'branchiata'], + 'bats': ['bast', 'bats', 'stab'], + 'battel': ['battel', 'battle', 'tablet'], + 'batteler': ['batteler', 'berattle'], + 'battening': ['battening', 'bitangent'], + 'batter': ['batter', 'bertat', 'tabret', 'tarbet'], + 'batterer': ['barrette', 'batterer'], + 'battle': ['battel', 'battle', 'tablet'], + 'battler': ['battler', 'blatter', 'brattle'], + 'battue': ['battue', 'tubate'], + 'batule': ['batule', 'betula', 'tabule'], + 'batyphone': ['batyphone', 'hypnobate'], + 'batzen': ['batzen', 'bezant', 'tanzeb'], + 'baud': ['baud', 'buda', 'daub'], + 'baul': ['balu', 'baul', 'bual', 'luba'], + 'baun': ['baun', 'buna', 'nabu', 'nuba'], + 'bauta': ['abuta', 'bauta'], + 'bavian': ['baniva', 'bavian'], + 'baw': ['baw', 'wab'], + 'bawl': ['bawl', 'blaw'], + 'bawler': ['bawler', 'brelaw', 'rebawl', 'warble'], + 'bay': ['aby', 'bay'], + 'baya': ['baya', 'yaba'], + 'bayed': ['bayed', 'beady', 'beday'], + 'baylet': ['baetyl', 'baylet', 'bleaty'], + 'bayonet': ['bayonet', 'betoyan'], + 'baze': ['baze', 'ezba'], + 'bea': ['abe', 'bae', 'bea'], + 'beach': ['bache', 'beach'], + 'bead': ['abed', 'bade', 'bead'], + 'beaded': ['beaded', 'bedead'], + 'beader': ['beader', 'bedare'], + 'beadleism': ['beadleism', 'demisable'], + 'beadlet': ['beadlet', 'belated'], + 'beady': ['bayed', 'beady', 'beday'], + 'beagle': ['beagle', 'belage', 'belgae'], + 'beak': ['bake', 'beak'], + 'beaker': ['beaker', 'berake', 'rebake'], + 'beal': ['abel', 'able', 'albe', 'bale', 'beal', 'bela', 'blae'], + 'bealing': ['algenib', 'bealing', 'belgian', 'bengali'], + 'beam': ['beam', 'bema'], + 'beamer': ['ambeer', 'beamer'], + 'beamless': ['assemble', 'beamless'], + 'beamster': ['beamster', 'bemaster', 'bestream'], + 'beamwork': ['beamwork', 'bowmaker'], + 'beamy': ['beamy', 'embay', 'maybe'], + 'bean': ['bane', 'bean', 'bena'], + 'beanfield': ['beanfield', 'definable'], + 'beant': ['abnet', 'beant'], + 'bear': ['bare', 'bear', 'brae'], + 'bearance': ['bearance', 'carabeen'], + 'beard': ['ardeb', 'beard', 'bread', 'debar'], + 'beardless': ['beardless', 'breadless'], + 'beardlessness': ['beardlessness', 'breadlessness'], + 'bearer': ['bearer', 'rebear'], + 'bearess': ['bearess', 'bessera'], + 'bearfoot': ['barefoot', 'bearfoot'], + 'bearing': ['bearing', 'begrain', 'brainge', 'rigbane'], + 'bearlet': ['bearlet', 'bleater', 'elberta', 'retable'], + 'bearm': ['amber', 'bearm', 'bemar', 'bream', 'embar'], + 'beast': ['baste', 'beast', 'tabes'], + 'beastlily': ['beastlily', 'bestially'], + 'beat': ['abet', 'bate', 'beat', 'beta'], + 'beata': ['abate', 'ateba', 'batea', 'beata'], + 'beater': ['beater', 'berate', 'betear', 'rebate', 'rebeat'], + 'beath': ['bathe', 'beath'], + 'beating': ['baignet', 'beating'], + 'beau': ['aube', 'beau'], + 'bebait': ['babite', 'bebait'], + 'bebar': ['barbe', 'bebar', 'breba', 'rebab'], + 'bebaron': ['barbone', 'bebaron'], + 'bebaste': ['bebaste', 'bebeast'], + 'bebatter': ['barbette', 'bebatter'], + 'bebay': ['abbey', 'bebay'], + 'bebeast': ['bebaste', 'bebeast'], + 'bebog': ['bebog', 'begob', 'gobbe'], + 'becard': ['becard', 'braced'], + 'becater': ['becater', 'betrace'], + 'because': ['because', 'besauce'], + 'becharm': ['becharm', 'brecham', 'chamber'], + 'becher': ['becher', 'breech'], + 'bechern': ['bechern', 'bencher'], + 'bechirp': ['bechirp', 'brephic'], + 'becker': ['becker', 'rebeck'], + 'beclad': ['beclad', 'cabled'], + 'beclart': ['beclart', 'crablet'], + 'becloud': ['becloud', 'obclude'], + 'becram': ['becram', 'camber', 'crambe'], + 'becrimson': ['becrimson', 'scombrine'], + 'becry': ['becry', 'bryce'], + 'bed': ['bed', 'deb'], + 'bedamn': ['bedamn', 'bedman'], + 'bedare': ['beader', 'bedare'], + 'bedark': ['bedark', 'debark'], + 'beday': ['bayed', 'beady', 'beday'], + 'bedead': ['beaded', 'bedead'], + 'bedel': ['bedel', 'bleed'], + 'beden': ['beden', 'deben', 'deneb'], + 'bedim': ['bedim', 'imbed'], + 'bedip': ['bedip', 'biped'], + 'bedismal': ['bedismal', 'semibald'], + 'bedlam': ['bedlam', 'beldam', 'blamed'], + 'bedlar': ['balder', 'bardel', 'bedlar', 'bedral', 'belard', 'blader'], + 'bedless': ['bedless', 'blessed'], + 'bedman': ['bedamn', 'bedman'], + 'bedoctor': ['bedoctor', 'codebtor'], + 'bedog': ['bedog', 'bodge'], + 'bedrail': ['bedrail', 'bridale', 'ridable'], + 'bedral': ['balder', 'bardel', 'bedlar', 'bedral', 'belard', 'blader'], + 'bedrid': ['bedrid', 'bidder'], + 'bedrip': ['bedrip', 'prebid'], + 'bedrock': ['bedrock', 'brocked'], + 'bedroom': ['bedroom', 'boerdom', 'boredom'], + 'bedrown': ['bedrown', 'browden'], + 'bedrug': ['bedrug', 'budger'], + 'bedsick': ['bedsick', 'sickbed'], + 'beduck': ['beduck', 'bucked'], + 'bedur': ['bedur', 'rebud', 'redub'], + 'bedusk': ['bedusk', 'busked'], + 'bedust': ['bedust', 'bestud', 'busted'], + 'beearn': ['beearn', 'berean'], + 'beeman': ['beeman', 'bemean', 'bename'], + 'been': ['been', 'bene', 'eben'], + 'beer': ['beer', 'bere', 'bree'], + 'beest': ['beest', 'beset'], + 'beeswing': ['beeswing', 'beswinge'], + 'befathered': ['befathered', 'featherbed'], + 'befile': ['befile', 'belief'], + 'befinger': ['befinger', 'befringe'], + 'beflea': ['beflea', 'beleaf'], + 'beflour': ['beflour', 'fourble'], + 'beflum': ['beflum', 'fumble'], + 'befret': ['befret', 'bereft'], + 'befringe': ['befinger', 'befringe'], + 'begad': ['badge', 'begad'], + 'begall': ['begall', 'glebal'], + 'begar': ['bagre', 'barge', 'begar', 'rebag'], + 'begash': ['begash', 'beshag'], + 'begat': ['begat', 'betag'], + 'begettal': ['begettal', 'gettable'], + 'beggar': ['bagger', 'beggar'], + 'beggarer': ['beggarer', 'rebeggar'], + 'begin': ['begin', 'being', 'binge'], + 'begird': ['begird', 'bridge'], + 'beglic': ['beglic', 'belgic'], + 'bego': ['bego', 'egbo'], + 'begob': ['bebog', 'begob', 'gobbe'], + 'begone': ['begone', 'engobe'], + 'begrain': ['bearing', 'begrain', 'brainge', 'rigbane'], + 'begrease': ['bargeese', 'begrease'], + 'behaviorism': ['behaviorism', 'misbehavior'], + 'behears': ['behears', 'beshear'], + 'behint': ['behint', 'henbit'], + 'beholder': ['beholder', 'rebehold'], + 'behorn': ['behorn', 'brehon'], + 'beid': ['beid', 'bide', 'debi', 'dieb'], + 'being': ['begin', 'being', 'binge'], + 'beira': ['barie', 'beira', 'erbia', 'rebia'], + 'beisa': ['abies', 'beisa'], + 'bel': ['bel', 'elb'], + 'bela': ['abel', 'able', 'albe', 'bale', 'beal', 'bela', 'blae'], + 'belabor': ['belabor', 'borable'], + 'belaced': ['belaced', 'debacle'], + 'belage': ['beagle', 'belage', 'belgae'], + 'belait': ['albeit', + 'albite', + 'baltei', + 'belait', + 'betail', + 'bletia', + 'libate'], + 'belam': ['amble', 'belam', 'blame', 'mabel'], + 'belar': ['abler', 'baler', 'belar', 'blare', 'blear'], + 'belard': ['balder', 'bardel', 'bedlar', 'bedral', 'belard', 'blader'], + 'belate': ['balete', 'belate'], + 'belated': ['beadlet', 'belated'], + 'belaud': ['ablude', 'belaud'], + 'beldam': ['bedlam', 'beldam', 'blamed'], + 'beleaf': ['beflea', 'beleaf'], + 'beleap': ['beleap', 'bepale'], + 'belga': ['bagel', 'belga', 'gable', 'gleba'], + 'belgae': ['beagle', 'belage', 'belgae'], + 'belgian': ['algenib', 'bealing', 'belgian', 'bengali'], + 'belgic': ['beglic', 'belgic'], + 'belial': ['alible', 'belial', 'labile', 'liable'], + 'belief': ['befile', 'belief'], + 'belili': ['belili', 'billie'], + 'belite': ['belite', 'beltie', 'bietle'], + 'belitter': ['belitter', 'tribelet'], + 'belive': ['belive', 'beveil'], + 'bella': ['bella', 'label'], + 'bellied': ['bellied', 'delible'], + 'bellona': ['allbone', 'bellona'], + 'bellonian': ['bellonian', 'nonliable'], + 'bellote': ['bellote', 'lobelet'], + 'bellower': ['bellower', 'rebellow'], + 'belltail': ['belltail', 'bletilla', 'tillable'], + 'bellyer': ['bellyer', 'rebelly'], + 'bellypinch': ['bellypinch', 'pinchbelly'], + 'beloid': ['beloid', 'boiled', 'bolide'], + 'belonger': ['belonger', 'rebelong'], + 'belonid': ['belonid', 'boldine'], + 'belord': ['belord', 'bordel', 'rebold'], + 'below': ['below', 'bowel', 'elbow'], + 'belt': ['belt', 'blet'], + 'beltane': ['beltane', 'tenable'], + 'belter': ['belter', 'elbert', 'treble'], + 'beltie': ['belite', 'beltie', 'bietle'], + 'beltine': ['beltine', 'tenible'], + 'beltir': ['beltir', 'riblet'], + 'beltman': ['beltman', 'lambent'], + 'belve': ['belve', 'bevel'], + 'bema': ['beam', 'bema'], + 'bemail': ['bemail', 'lambie'], + 'beman': ['beman', 'nambe'], + 'bemar': ['amber', 'bearm', 'bemar', 'bream', 'embar'], + 'bemaster': ['beamster', 'bemaster', 'bestream'], + 'bemaul': ['bemaul', 'blumea'], + 'bemeal': ['bemeal', 'meable'], + 'bemean': ['beeman', 'bemean', 'bename'], + 'bemire': ['bemire', 'bireme'], + 'bemitred': ['bemitred', 'timbered'], + 'bemoil': ['bemoil', 'mobile'], + 'bemole': ['bemole', 'embole'], + 'bemusk': ['bemusk', 'embusk'], + 'ben': ['ben', 'neb'], + 'bena': ['bane', 'bean', 'bena'], + 'benacus': ['acubens', 'benacus'], + 'bename': ['beeman', 'bemean', 'bename'], + 'benami': ['benami', 'bimane'], + 'bencher': ['bechern', 'bencher'], + 'benchwork': ['benchwork', 'workbench'], + 'benda': ['bande', 'benda'], + 'bender': ['bender', 'berend', 'rebend'], + 'bene': ['been', 'bene', 'eben'], + 'benedight': ['benedight', 'benighted'], + 'benefiter': ['benefiter', 'rebenefit'], + 'bengal': ['bangle', 'bengal'], + 'bengali': ['algenib', 'bealing', 'belgian', 'bengali'], + 'beni': ['beni', 'bien', 'bine', 'inbe'], + 'benighted': ['benedight', 'benighted'], + 'beno': ['beno', 'bone', 'ebon'], + 'benote': ['benote', 'betone'], + 'benshea': ['banshee', 'benshea'], + 'benshee': ['benshee', 'shebeen'], + 'bentang': ['banteng', 'bentang'], + 'benton': ['benton', 'bonnet'], + 'benu': ['benu', 'unbe'], + 'benward': ['benward', 'brawned'], + 'benzantialdoxime': ['antibenzaldoxime', 'benzantialdoxime'], + 'benzein': ['benzein', 'benzine'], + 'benzine': ['benzein', 'benzine'], + 'benzo': ['benzo', 'bonze'], + 'benzofluorene': ['benzofluorene', 'fluorobenzene'], + 'benzonitrol': ['benzonitrol', 'nitrobenzol'], + 'bepale': ['beleap', 'bepale'], + 'bepart': ['bepart', 'berapt', 'betrap'], + 'bepaste': ['bepaste', 'bespate'], + 'bepester': ['bepester', 'prebeset'], + 'beplaster': ['beplaster', 'prestable'], + 'ber': ['ber', 'reb'], + 'berake': ['beaker', 'berake', 'rebake'], + 'berapt': ['bepart', 'berapt', 'betrap'], + 'berat': ['abret', 'bater', 'berat'], + 'berate': ['beater', 'berate', 'betear', 'rebate', 'rebeat'], + 'berattle': ['batteler', 'berattle'], + 'beraunite': ['beraunite', 'unebriate'], + 'beray': ['barye', 'beray', 'yerba'], + 'berberi': ['berberi', 'rebribe'], + 'berchta': ['batcher', 'berchta', 'brachet'], + 'bere': ['beer', 'bere', 'bree'], + 'berean': ['beearn', 'berean'], + 'bereft': ['befret', 'bereft'], + 'berend': ['bender', 'berend', 'rebend'], + 'berg': ['berg', 'gerb'], + 'bergama': ['bergama', 'megabar'], + 'bergamo': ['bergamo', 'embargo'], + 'beri': ['beri', 'bier', 'brei', 'ribe'], + 'beringed': ['beringed', 'breeding'], + 'berinse': ['berinse', 'besiren'], + 'berley': ['berley', 'bleery'], + 'berlinite': ['berlinite', 'libertine'], + 'bermudite': ['bermudite', 'demibrute'], + 'bernard': ['bernard', 'brander', 'rebrand'], + 'bernese': ['bernese', 'besneer'], + 'beroe': ['beroe', 'boree'], + 'beroida': ['beroida', 'boreiad'], + 'beroll': ['beroll', 'boller'], + 'berossos': ['berossos', 'obsessor'], + 'beround': ['beround', 'bounder', 'rebound', 'unbored', 'unorbed', 'unrobed'], + 'berri': ['berri', 'brier'], + 'berried': ['berried', 'briered'], + 'berrybush': ['berrybush', 'shrubbery'], + 'bersil': ['bersil', 'birsle'], + 'bert': ['bert', 'bret'], + 'bertat': ['batter', 'bertat', 'tabret', 'tarbet'], + 'berth': ['berth', 'breth'], + 'bertha': ['bather', 'bertha', 'breath'], + 'berther': ['berther', 'herbert'], + 'berthing': ['berthing', 'brighten'], + 'bertie': ['bertie', 'betire', 'rebite'], + 'bertolonia': ['bertolonia', 'borolanite'], + 'berust': ['berust', 'buster', 'stuber'], + 'bervie': ['bervie', 'brieve'], + 'beryllia': ['beryllia', 'reliably'], + 'besa': ['base', 'besa', 'sabe', 'seba'], + 'besaiel': ['baleise', 'besaiel'], + 'besaint': ['basinet', 'besaint', 'bestain'], + 'besauce': ['because', 'besauce'], + 'bescour': ['bescour', 'buceros', 'obscure'], + 'beset': ['beest', 'beset'], + 'beshadow': ['beshadow', 'bodewash'], + 'beshag': ['begash', 'beshag'], + 'beshear': ['behears', 'beshear'], + 'beshod': ['beshod', 'debosh'], + 'besiren': ['berinse', 'besiren'], + 'besit': ['besit', 'betis'], + 'beslaver': ['beslaver', 'servable', 'versable'], + 'beslime': ['beslime', 'besmile'], + 'beslings': ['beslings', 'blessing', 'glibness'], + 'beslow': ['beslow', 'bowels'], + 'besmile': ['beslime', 'besmile'], + 'besneer': ['bernese', 'besneer'], + 'besoot': ['besoot', 'bootes'], + 'besot': ['besot', 'betso'], + 'besoul': ['besoul', 'blouse', 'obelus'], + 'besour': ['besour', 'boreus', 'bourse', 'bouser'], + 'bespate': ['bepaste', 'bespate'], + 'besra': ['barse', 'besra', 'saber', 'serab'], + 'bessera': ['bearess', 'bessera'], + 'bestain': ['basinet', 'besaint', 'bestain'], + 'bestar': ['baster', 'bestar', 'breast'], + 'besteer': ['besteer', 'rebeset'], + 'bestial': ['astilbe', 'bestial', 'blastie', 'stabile'], + 'bestially': ['beastlily', 'bestially'], + 'bestiarian': ['antirabies', 'bestiarian'], + 'bestiary': ['bestiary', 'sybarite'], + 'bestir': ['bestir', 'bister'], + 'bestorm': ['bestorm', 'mobster'], + 'bestowal': ['bestowal', 'stowable'], + 'bestower': ['bestower', 'rebestow'], + 'bestraw': ['bestraw', 'wabster'], + 'bestream': ['beamster', 'bemaster', 'bestream'], + 'bestrew': ['bestrew', 'webster'], + 'bestride': ['bestride', 'bistered'], + 'bestud': ['bedust', 'bestud', 'busted'], + 'beswinge': ['beeswing', 'beswinge'], + 'beta': ['abet', 'bate', 'beat', 'beta'], + 'betag': ['begat', 'betag'], + 'betail': ['albeit', + 'albite', + 'baltei', + 'belait', + 'betail', + 'bletia', + 'libate'], + 'betailor': ['betailor', 'laborite', 'orbitale'], + 'betask': ['basket', 'betask'], + 'betear': ['beater', 'berate', 'betear', 'rebate', 'rebeat'], + 'beth': ['beth', 'theb'], + 'betire': ['bertie', 'betire', 'rebite'], + 'betis': ['besit', 'betis'], + 'betone': ['benote', 'betone'], + 'betoss': ['betoss', 'bosset'], + 'betoya': ['betoya', 'teaboy'], + 'betoyan': ['bayonet', 'betoyan'], + 'betrace': ['becater', 'betrace'], + 'betrail': ['betrail', 'librate', 'triable', 'trilabe'], + 'betrap': ['bepart', 'berapt', 'betrap'], + 'betrayal': ['betrayal', 'tearably'], + 'betrayer': ['betrayer', 'eatberry', 'rebetray', 'teaberry'], + 'betread': ['betread', 'debater'], + 'betrim': ['betrim', 'timber', 'timbre'], + 'betso': ['besot', 'betso'], + 'betta': ['betta', 'tabet'], + 'bettina': ['bettina', 'tabinet', 'tibetan'], + 'betula': ['batule', 'betula', 'tabule'], + 'betulin': ['betulin', 'bluntie'], + 'beturbaned': ['beturbaned', 'unrabbeted'], + 'beveil': ['belive', 'beveil'], + 'bevel': ['belve', 'bevel'], + 'bever': ['bever', 'breve'], + 'bewailer': ['bewailer', 'rebewail'], + 'bework': ['bework', 'bowker'], + 'bey': ['bey', 'bye'], + 'beydom': ['beydom', 'embody'], + 'bezant': ['batzen', 'bezant', 'tanzeb'], + 'bezzo': ['bezzo', 'bozze'], + 'bhakti': ['bhakti', 'khatib'], + 'bhandari': ['bhandari', 'hairband'], + 'bhar': ['bhar', 'harb'], + 'bhara': ['bahar', 'bhara'], + 'bhat': ['baht', 'bath', 'bhat'], + 'bhima': ['bhima', 'biham'], + 'bhotia': ['bhotia', 'tobiah'], + 'bhutani': ['bhutani', 'unhabit'], + 'biacetyl': ['baetylic', 'biacetyl'], + 'bialate': ['baalite', 'bialate', 'labiate'], + 'bialveolar': ['bialveolar', 'labiovelar'], + 'bianca': ['abanic', 'bianca'], + 'bianco': ['bianco', 'bonaci'], + 'biangular': ['biangular', 'bulgarian'], + 'bias': ['absi', 'bais', 'bias', 'isba'], + 'biatomic': ['biatomic', 'moabitic'], + 'bible': ['bible', 'blibe'], + 'bicarpellary': ['bicarpellary', 'prebacillary'], + 'bickern': ['bickern', 'bricken'], + 'biclavate': ['activable', 'biclavate'], + 'bicorn': ['bicorn', 'bicron'], + 'bicornate': ['bicornate', 'carbonite', 'reboantic'], + 'bicrenate': ['abenteric', 'bicrenate'], + 'bicron': ['bicorn', 'bicron'], + 'bicrural': ['bicrural', 'rubrical'], + 'bid': ['bid', 'dib'], + 'bidar': ['barid', 'bidar', 'braid', 'rabid'], + 'bidder': ['bedrid', 'bidder'], + 'bide': ['beid', 'bide', 'debi', 'dieb'], + 'bident': ['bident', 'indebt'], + 'bidented': ['bidented', 'indebted'], + 'bider': ['bider', 'bredi', 'bride', 'rebid'], + 'bidet': ['bidet', 'debit'], + 'biduous': ['biduous', 'dubious'], + 'bien': ['beni', 'bien', 'bine', 'inbe'], + 'bier': ['beri', 'bier', 'brei', 'ribe'], + 'bietle': ['belite', 'beltie', 'bietle'], + 'bifer': ['bifer', 'brief', 'fiber'], + 'big': ['big', 'gib'], + 'biga': ['agib', 'biga', 'gabi'], + 'bigamous': ['bigamous', 'subimago'], + 'bigener': ['bigener', 'rebegin'], + 'bigential': ['bigential', 'tangibile'], + 'biggin': ['biggin', 'gibing'], + 'bigoted': ['bigoted', 'dogbite'], + 'biham': ['bhima', 'biham'], + 'bihari': ['bihari', 'habiri'], + 'bike': ['bike', 'kibe'], + 'bikram': ['bikram', 'imbark'], + 'bilaan': ['albian', 'bilaan'], + 'bilaterality': ['alterability', 'bilaterality', 'relatability'], + 'bilati': ['bilati', 'tibial'], + 'bilby': ['bilby', 'libby'], + 'bildar': ['bildar', 'bridal', 'ribald'], + 'bilge': ['bilge', 'gibel'], + 'biliate': ['biliate', 'tibiale'], + 'bilinear': ['bilinear', 'liberian'], + 'billa': ['balli', 'billa'], + 'billboard': ['billboard', 'broadbill'], + 'biller': ['biller', 'rebill'], + 'billeter': ['billeter', 'rebillet'], + 'billie': ['belili', 'billie'], + 'bilo': ['bilo', 'boil'], + 'bilobated': ['bilobated', 'bobtailed'], + 'biltong': ['biltong', 'bolting'], + 'bim': ['bim', 'mib'], + 'bimane': ['benami', 'bimane'], + 'bimodality': ['bimodality', 'myliobatid'], + 'bimotors': ['bimotors', 'robotism'], + 'bin': ['bin', 'nib'], + 'binal': ['albin', 'binal', 'blain'], + 'binary': ['binary', 'brainy'], + 'binder': ['binder', 'inbred', 'rebind'], + 'bindwood': ['bindwood', 'woodbind'], + 'bine': ['beni', 'bien', 'bine', 'inbe'], + 'binge': ['begin', 'being', 'binge'], + 'bino': ['bino', 'bion', 'boni'], + 'binocular': ['binocular', 'caliburno', 'colubrina'], + 'binomial': ['binomial', 'mobilian'], + 'binuclear': ['binuclear', 'incurable'], + 'biod': ['biod', 'boid'], + 'bion': ['bino', 'bion', 'boni'], + 'biopsychological': ['biopsychological', 'psychobiological'], + 'biopsychology': ['biopsychology', 'psychobiology'], + 'bioral': ['bailor', 'bioral'], + 'biorgan': ['biorgan', 'grobian'], + 'bios': ['bios', 'bois'], + 'biosociological': ['biosociological', 'sociobiological'], + 'biota': ['biota', 'ibota'], + 'biotics': ['biotics', 'cobitis'], + 'bipartile': ['bipartile', 'pretibial'], + 'biped': ['bedip', 'biped'], + 'bipedal': ['bipedal', 'piebald'], + 'bipersonal': ['bipersonal', 'prisonable'], + 'bipolar': ['bipolar', 'parboil'], + 'biracial': ['biracial', 'cibarial'], + 'birchen': ['birchen', 'brichen'], + 'bird': ['bird', 'drib'], + 'birdeen': ['birdeen', 'inbreed'], + 'birdlet': ['birdlet', 'driblet'], + 'birdling': ['birdling', 'bridling', 'lingbird'], + 'birdman': ['birdman', 'manbird'], + 'birdseed': ['birdseed', 'seedbird'], + 'birdstone': ['birdstone', 'stonebird'], + 'bireme': ['bemire', 'bireme'], + 'biretta': ['biretta', 'brattie', 'ratbite'], + 'birle': ['birle', 'liber'], + 'birma': ['abrim', 'birma'], + 'birn': ['birn', 'brin'], + 'birny': ['birny', 'briny'], + 'biron': ['biron', 'inorb', 'robin'], + 'birse': ['birse', 'ribes'], + 'birsle': ['bersil', 'birsle'], + 'birth': ['birth', 'brith'], + 'bis': ['bis', 'sib'], + 'bisalt': ['baltis', 'bisalt'], + 'bisaltae': ['bisaltae', 'satiable'], + 'bisharin': ['bairnish', 'bisharin'], + 'bistate': ['bastite', 'batiste', 'bistate'], + 'bister': ['bestir', 'bister'], + 'bistered': ['bestride', 'bistered'], + 'bisti': ['bisti', 'bitis'], + 'bisulcate': ['baculites', 'bisulcate'], + 'bit': ['bit', 'tib'], + 'bitangent': ['battening', 'bitangent'], + 'bitemporal': ['bitemporal', 'importable'], + 'biter': ['biter', 'tribe'], + 'bitis': ['bisti', 'bitis'], + 'bito': ['bito', 'obit'], + 'bitonality': ['bitonality', 'notability'], + 'bittern': ['bittern', 'britten'], + 'bitumed': ['bitumed', 'budtime'], + 'biurate': ['abiuret', 'aubrite', 'biurate', 'rubiate'], + 'biwa': ['biwa', 'wabi'], + 'bizarre': ['bizarre', 'brazier'], + 'bizet': ['bizet', 'zibet'], + 'blabber': ['babbler', 'blabber', 'brabble'], + 'blackacre': ['blackacre', 'crackable'], + 'blad': ['bald', 'blad'], + 'blader': ['balder', 'bardel', 'bedlar', 'bedral', 'belard', 'blader'], + 'bladewise': ['basilweed', 'bladewise'], + 'bladish': ['baldish', 'bladish'], + 'blady': ['badly', 'baldy', 'blady'], + 'blae': ['abel', 'able', 'albe', 'bale', 'beal', 'bela', 'blae'], + 'blaeberry': ['blaeberry', 'bleaberry'], + 'blaeness': ['ableness', 'blaeness', 'sensable'], + 'blain': ['albin', 'binal', 'blain'], + 'blaine': ['baline', 'blaine'], + 'blair': ['blair', 'brail', 'libra'], + 'blake': ['blake', 'bleak', 'kabel'], + 'blame': ['amble', 'belam', 'blame', 'mabel'], + 'blamed': ['bedlam', 'beldam', 'blamed'], + 'blamer': ['ambler', 'blamer', 'lamber', 'marble', 'ramble'], + 'blaming': ['ambling', 'blaming'], + 'blamingly': ['amblingly', 'blamingly'], + 'blanca': ['bancal', 'blanca'], + 'blare': ['abler', 'baler', 'belar', 'blare', 'blear'], + 'blarina': ['blarina', 'branial'], + 'blarney': ['blarney', 'renably'], + 'blas': ['blas', 'slab'], + 'blase': ['blase', 'sable'], + 'blasia': ['basial', 'blasia'], + 'blastema': ['blastema', 'lambaste'], + 'blastemic': ['blastemic', 'cembalist'], + 'blaster': ['blaster', 'reblast', 'stabler'], + 'blastie': ['astilbe', 'bestial', 'blastie', 'stabile'], + 'blasting': ['blasting', 'stabling'], + 'blastoderm': ['blastoderm', 'dermoblast'], + 'blastogenic': ['blastogenic', 'genoblastic'], + 'blastomeric': ['blastomeric', 'meroblastic'], + 'blastomycetic': ['blastomycetic', 'cytoblastemic'], + 'blastomycetous': ['blastomycetous', 'cytoblastemous'], + 'blasty': ['blasty', 'stably'], + 'blat': ['balt', 'blat'], + 'blate': ['batel', 'blate', 'bleat', 'table'], + 'blather': ['blather', 'halbert'], + 'blatter': ['battler', 'blatter', 'brattle'], + 'blaver': ['barvel', 'blaver', 'verbal'], + 'blaw': ['bawl', 'blaw'], + 'blay': ['ably', 'blay', 'yalb'], + 'blazoner': ['albronze', 'blazoner'], + 'bleaberry': ['blaeberry', 'bleaberry'], + 'bleach': ['bachel', 'bleach'], + 'bleacher': ['bleacher', 'rebleach'], + 'bleak': ['blake', 'bleak', 'kabel'], + 'bleaky': ['bleaky', 'kabyle'], + 'blear': ['abler', 'baler', 'belar', 'blare', 'blear'], + 'bleared': ['bleared', 'reblade'], + 'bleary': ['barely', 'barley', 'bleary'], + 'bleat': ['batel', 'blate', 'bleat', 'table'], + 'bleater': ['bearlet', 'bleater', 'elberta', 'retable'], + 'bleating': ['bleating', 'tangible'], + 'bleaty': ['baetyl', 'baylet', 'bleaty'], + 'bleed': ['bedel', 'bleed'], + 'bleery': ['berley', 'bleery'], + 'blender': ['blender', 'reblend'], + 'blendure': ['blendure', 'rebundle'], + 'blennoid': ['blennoid', 'blondine'], + 'blennoma': ['blennoma', 'nobleman'], + 'bleo': ['bleo', 'bole', 'lobe'], + 'blepharocera': ['blepharocera', 'reproachable'], + 'blessed': ['bedless', 'blessed'], + 'blesser': ['blesser', 'rebless'], + 'blessing': ['beslings', 'blessing', 'glibness'], + 'blet': ['belt', 'blet'], + 'bletia': ['albeit', + 'albite', + 'baltei', + 'belait', + 'betail', + 'bletia', + 'libate'], + 'bletilla': ['belltail', 'bletilla', 'tillable'], + 'blibe': ['bible', 'blibe'], + 'blighter': ['blighter', 'therblig'], + 'blimy': ['blimy', 'limby'], + 'blister': ['blister', 'bristle'], + 'blisterwort': ['blisterwort', 'bristlewort'], + 'blitter': ['blitter', 'brittle', 'triblet'], + 'blo': ['blo', 'lob'], + 'bloated': ['bloated', 'lobated'], + 'bloater': ['alberto', 'bloater', 'latrobe'], + 'bloating': ['bloating', 'obligant'], + 'blocker': ['blocker', 'brockle', 'reblock'], + 'blonde': ['blonde', 'bolden'], + 'blondine': ['blennoid', 'blondine'], + 'blood': ['blood', 'boldo'], + 'bloodleaf': ['bloodleaf', 'floodable'], + 'bloomer': ['bloomer', 'rebloom'], + 'bloomy': ['bloomy', 'lomboy'], + 'blore': ['blore', 'roble'], + 'blosmy': ['blosmy', 'symbol'], + 'blot': ['blot', 'bolt'], + 'blotless': ['blotless', 'boltless'], + 'blotter': ['blotter', 'bottler'], + 'blotting': ['blotting', 'bottling'], + 'blouse': ['besoul', 'blouse', 'obelus'], + 'blow': ['blow', 'bowl'], + 'blowback': ['backblow', 'blowback'], + 'blower': ['blower', 'bowler', 'reblow', 'worble'], + 'blowfly': ['blowfly', 'flyblow'], + 'blowing': ['blowing', 'bowling'], + 'blowout': ['blowout', 'outblow', 'outbowl'], + 'blowup': ['blowup', 'upblow'], + 'blowy': ['blowy', 'bowly'], + 'blub': ['blub', 'bulb'], + 'blubber': ['blubber', 'bubbler'], + 'blue': ['blue', 'lube'], + 'bluegill': ['bluegill', 'gullible'], + 'bluenose': ['bluenose', 'nebulose'], + 'bluer': ['bluer', 'brule', 'burel', 'ruble'], + 'blues': ['blues', 'bulse'], + 'bluffer': ['bluffer', 'rebluff'], + 'bluishness': ['bluishness', 'blushiness'], + 'bluism': ['bluism', 'limbus'], + 'blumea': ['bemaul', 'blumea'], + 'blunder': ['blunder', 'bundler'], + 'blunderer': ['blunderer', 'reblunder'], + 'blunge': ['blunge', 'bungle'], + 'blunger': ['blunger', 'bungler'], + 'bluntie': ['betulin', 'bluntie'], + 'blur': ['blur', 'burl'], + 'blushiness': ['bluishness', 'blushiness'], + 'bluster': ['bluster', 'brustle', 'bustler'], + 'boa': ['abo', 'boa'], + 'boar': ['boar', 'bora'], + 'board': ['abord', 'bardo', 'board', 'broad', 'dobra', 'dorab'], + 'boarder': ['arbored', 'boarder', 'reboard'], + 'boardly': ['boardly', 'broadly'], + 'boardy': ['boardy', 'boyard', 'byroad'], + 'boast': ['basto', 'boast', 'sabot'], + 'boaster': ['barotse', 'boaster', 'reboast', 'sorbate'], + 'boasting': ['boasting', 'bostangi'], + 'boat': ['boat', 'bota', 'toba'], + 'boater': ['boater', 'borate', 'rebato'], + 'boathouse': ['boathouse', 'houseboat'], + 'bobac': ['bobac', 'cabob'], + 'bobfly': ['bobfly', 'flobby'], + 'bobo': ['bobo', 'boob'], + 'bobtailed': ['bilobated', 'bobtailed'], + 'bocardo': ['bocardo', 'cordoba'], + 'boccale': ['boccale', 'cabocle'], + 'bocher': ['bocher', 'broche'], + 'bocking': ['bocking', 'kingcob'], + 'bod': ['bod', 'dob'], + 'bode': ['bode', 'dobe'], + 'boden': ['boden', 'boned'], + 'boder': ['boder', 'orbed'], + 'bodewash': ['beshadow', 'bodewash'], + 'bodge': ['bedog', 'bodge'], + 'bodhi': ['bodhi', 'dhobi'], + 'bodice': ['bodice', 'ceboid'], + 'bodier': ['bodier', 'boride', 'brodie'], + 'bodle': ['bodle', 'boled', 'lobed'], + 'bodo': ['bodo', 'bood', 'doob'], + 'body': ['body', 'boyd', 'doby'], + 'boer': ['boer', 'bore', 'robe'], + 'boerdom': ['bedroom', 'boerdom', 'boredom'], + 'boethian': ['boethian', 'nebaioth'], + 'bog': ['bog', 'gob'], + 'boga': ['bago', 'boga'], + 'bogan': ['bogan', 'goban'], + 'bogeyman': ['bogeyman', 'moneybag'], + 'boggler': ['boggler', 'broggle'], + 'boglander': ['boglander', 'longbeard'], + 'bogle': ['bogle', 'globe'], + 'boglet': ['boglet', 'goblet'], + 'bogo': ['bogo', 'gobo'], + 'bogue': ['bogue', 'bouge'], + 'bogum': ['bogum', 'gumbo'], + 'bogy': ['bogy', 'bygo', 'goby'], + 'bohea': ['bahoe', 'bohea', 'obeah'], + 'boho': ['boho', 'hobo'], + 'bohor': ['bohor', 'rohob'], + 'boid': ['biod', 'boid'], + 'boil': ['bilo', 'boil'], + 'boiled': ['beloid', 'boiled', 'bolide'], + 'boiler': ['boiler', 'reboil'], + 'boilover': ['boilover', 'overboil'], + 'bois': ['bios', 'bois'], + 'bojo': ['bojo', 'jobo'], + 'bolar': ['balor', 'bolar', 'boral', 'labor', 'lobar'], + 'bolden': ['blonde', 'bolden'], + 'bolderian': ['bolderian', 'ordinable'], + 'boldine': ['belonid', 'boldine'], + 'boldness': ['boldness', 'bondless'], + 'boldo': ['blood', 'boldo'], + 'bole': ['bleo', 'bole', 'lobe'], + 'boled': ['bodle', 'boled', 'lobed'], + 'bolelia': ['bolelia', 'lobelia', 'obelial'], + 'bolide': ['beloid', 'boiled', 'bolide'], + 'boller': ['beroll', 'boller'], + 'bolo': ['bolo', 'bool', 'lobo', 'obol'], + 'bolster': ['bolster', 'lobster'], + 'bolt': ['blot', 'bolt'], + 'boltage': ['boltage', 'globate'], + 'bolter': ['bolter', 'orblet', 'reblot', 'rebolt'], + 'bolthead': ['bolthead', 'theobald'], + 'bolting': ['biltong', 'bolting'], + 'boltless': ['blotless', 'boltless'], + 'boltonia': ['boltonia', 'lobation', 'oblation'], + 'bom': ['bom', 'mob'], + 'boma': ['ambo', 'boma'], + 'bombable': ['bombable', 'mobbable'], + 'bombacaceae': ['bombacaceae', 'cabombaceae'], + 'bomber': ['bomber', 'mobber'], + 'bon': ['bon', 'nob'], + 'bonaci': ['bianco', 'bonaci'], + 'bonair': ['bonair', 'borani'], + 'bondage': ['bondage', 'dogbane'], + 'bondar': ['bandor', 'bondar', 'roband'], + 'bondless': ['boldness', 'bondless'], + 'bone': ['beno', 'bone', 'ebon'], + 'boned': ['boden', 'boned'], + 'bonefish': ['bonefish', 'fishbone'], + 'boneless': ['boneless', 'noblesse'], + 'bonellia': ['baillone', 'bonellia'], + 'boner': ['boner', 'borne'], + 'boney': ['boney', 'ebony'], + 'boni': ['bino', 'bion', 'boni'], + 'bonitary': ['bonitary', 'trainboy'], + 'bonk': ['bonk', 'knob'], + 'bonnet': ['benton', 'bonnet'], + 'bonsai': ['basion', 'bonsai', 'sabino'], + 'bonus': ['bonus', 'bosun'], + 'bony': ['bony', 'byon'], + 'bonze': ['benzo', 'bonze'], + 'bonzer': ['bonzer', 'bronze'], + 'boob': ['bobo', 'boob'], + 'bood': ['bodo', 'bood', 'doob'], + 'booger': ['booger', 'goober'], + 'bookcase': ['bookcase', 'casebook'], + 'booker': ['booker', 'brooke', 'rebook'], + 'bookland': ['bookland', 'landbook'], + 'bookshop': ['bookshop', 'shopbook'], + 'bookward': ['bookward', 'woodbark'], + 'bookwork': ['bookwork', 'workbook'], + 'bool': ['bolo', 'bool', 'lobo', 'obol'], + 'booly': ['booly', 'looby'], + 'boomingly': ['boomingly', 'myoglobin'], + 'boopis': ['boopis', 'obispo'], + 'boor': ['boor', 'boro', 'broo'], + 'boort': ['boort', 'robot'], + 'boost': ['boost', 'boots'], + 'bootes': ['besoot', 'bootes'], + 'boother': ['boother', 'theorbo'], + 'boots': ['boost', 'boots'], + 'bop': ['bop', 'pob'], + 'bor': ['bor', 'orb', 'rob'], + 'bora': ['boar', 'bora'], + 'borable': ['belabor', 'borable'], + 'boracic': ['boracic', 'braccio'], + 'boral': ['balor', 'bolar', 'boral', 'labor', 'lobar'], + 'boran': ['baron', 'boran'], + 'borani': ['bonair', 'borani'], + 'borate': ['boater', 'borate', 'rebato'], + 'bord': ['bord', 'brod'], + 'bordel': ['belord', 'bordel', 'rebold'], + 'bordello': ['bordello', 'doorbell'], + 'border': ['border', 'roberd'], + 'borderer': ['borderer', 'broderer'], + 'bordure': ['bordure', 'bourder'], + 'bore': ['boer', 'bore', 'robe'], + 'boredom': ['bedroom', 'boerdom', 'boredom'], + 'boree': ['beroe', 'boree'], + 'boreen': ['boreen', 'enrobe', 'neebor', 'rebone'], + 'boreiad': ['beroida', 'boreiad'], + 'boreism': ['boreism', 'semiorb'], + 'borer': ['borer', 'rerob', 'rober'], + 'boreus': ['besour', 'boreus', 'bourse', 'bouser'], + 'borg': ['borg', 'brog', 'gorb'], + 'boric': ['boric', 'cribo', 'orbic'], + 'boride': ['bodier', 'boride', 'brodie'], + 'boring': ['boring', 'robing'], + 'boringly': ['boringly', 'goblinry'], + 'borlase': ['borlase', 'labrose', 'rosabel'], + 'borne': ['boner', 'borne'], + 'borneo': ['borneo', 'oberon'], + 'bornite': ['bornite', 'robinet'], + 'boro': ['boor', 'boro', 'broo'], + 'borocaine': ['borocaine', 'coenobiar'], + 'borofluohydric': ['borofluohydric', 'hydrofluoboric'], + 'borolanite': ['bertolonia', 'borolanite'], + 'boron': ['boron', 'broon'], + 'boronic': ['boronic', 'cobiron'], + 'borrower': ['borrower', 'reborrow'], + 'borscht': ['borscht', 'bortsch'], + 'bort': ['bort', 'brot'], + 'bortsch': ['borscht', 'bortsch'], + 'bos': ['bos', 'sob'], + 'bosc': ['bosc', 'scob'], + 'boser': ['boser', 'brose', 'sober'], + 'bosn': ['bosn', 'nobs', 'snob'], + 'bosselation': ['bosselation', 'eosinoblast'], + 'bosset': ['betoss', 'bosset'], + 'bostangi': ['boasting', 'bostangi'], + 'bostanji': ['banjoist', 'bostanji'], + 'bosun': ['bonus', 'bosun'], + 'bota': ['boat', 'bota', 'toba'], + 'botanical': ['botanical', 'catabolin'], + 'botanophilist': ['botanophilist', 'philobotanist'], + 'bote': ['bote', 'tobe'], + 'botein': ['botein', 'tobine'], + 'both': ['both', 'thob'], + 'bottler': ['blotter', 'bottler'], + 'bottling': ['blotting', 'bottling'], + 'bouge': ['bogue', 'bouge'], + 'bouget': ['bouget', 'outbeg'], + 'bouk': ['bouk', 'kobu'], + 'boulder': ['boulder', 'doubler'], + 'bouldering': ['bouldering', 'redoubling'], + 'boulter': ['boulter', 'trouble'], + 'bounden': ['bounden', 'unboned'], + 'bounder': ['beround', 'bounder', 'rebound', 'unbored', 'unorbed', 'unrobed'], + 'bounding': ['bounding', 'unboding'], + 'bourder': ['bordure', 'bourder'], + 'bourn': ['bourn', 'bruno'], + 'bourse': ['besour', 'boreus', 'bourse', 'bouser'], + 'bouser': ['besour', 'boreus', 'bourse', 'bouser'], + 'bousy': ['bousy', 'byous'], + 'bow': ['bow', 'wob'], + 'bowel': ['below', 'bowel', 'elbow'], + 'boweled': ['boweled', 'elbowed'], + 'bowels': ['beslow', 'bowels'], + 'bowery': ['bowery', 'bowyer', 'owerby'], + 'bowie': ['bowie', 'woibe'], + 'bowker': ['bework', 'bowker'], + 'bowl': ['blow', 'bowl'], + 'bowla': ['ablow', 'balow', 'bowla'], + 'bowler': ['blower', 'bowler', 'reblow', 'worble'], + 'bowling': ['blowing', 'bowling'], + 'bowly': ['blowy', 'bowly'], + 'bowmaker': ['beamwork', 'bowmaker'], + 'bowyer': ['bowery', 'bowyer', 'owerby'], + 'boxer': ['boxer', 'rebox'], + 'boxwork': ['boxwork', 'workbox'], + 'boyang': ['boyang', 'yagnob'], + 'boyard': ['boardy', 'boyard', 'byroad'], + 'boyd': ['body', 'boyd', 'doby'], + 'boyship': ['boyship', 'shipboy'], + 'bozo': ['bozo', 'zobo'], + 'bozze': ['bezzo', 'bozze'], + 'bra': ['bar', 'bra', 'rab'], + 'brab': ['barb', 'brab'], + 'brabble': ['babbler', 'blabber', 'brabble'], + 'braca': ['acrab', 'braca'], + 'braccio': ['boracic', 'braccio'], + 'brace': ['acerb', 'brace', 'caber'], + 'braced': ['becard', 'braced'], + 'braceleted': ['braceleted', 'celebrated'], + 'bracer': ['bracer', 'craber'], + 'braces': ['braces', 'scrabe'], + 'brachet': ['batcher', 'berchta', 'brachet'], + 'brachiata': ['batrachia', 'brachiata'], + 'brachiofacial': ['brachiofacial', 'faciobrachial'], + 'brachiopode': ['brachiopode', 'cardiophobe'], + 'bracon': ['bracon', 'carbon', 'corban'], + 'bractea': ['abreact', 'bractea', 'cabaret'], + 'bracteal': ['bracteal', 'cartable'], + 'bracteiform': ['bacteriform', 'bracteiform'], + 'bracteose': ['bracteose', 'obsecrate'], + 'brad': ['bard', 'brad', 'drab'], + 'bradenhead': ['barehanded', 'bradenhead', 'headbander'], + 'brae': ['bare', 'bear', 'brae'], + 'braehead': ['barehead', 'braehead'], + 'brag': ['brag', 'garb', 'grab'], + 'bragi': ['bragi', 'girba'], + 'bragless': ['bragless', 'garbless'], + 'brahmi': ['brahmi', 'mihrab'], + 'brahui': ['brahui', 'habiru'], + 'braid': ['barid', 'bidar', 'braid', 'rabid'], + 'braider': ['braider', 'rebraid'], + 'brail': ['blair', 'brail', 'libra'], + 'braille': ['braille', 'liberal'], + 'brain': ['abrin', 'bairn', 'brain', 'brian', 'rabin'], + 'brainache': ['brainache', 'branchiae'], + 'brainge': ['bearing', 'begrain', 'brainge', 'rigbane'], + 'brainwater': ['brainwater', 'waterbrain'], + 'brainy': ['binary', 'brainy'], + 'braird': ['braird', 'briard'], + 'brairo': ['barrio', 'brairo'], + 'braise': ['braise', 'rabies', 'rebias'], + 'brake': ['baker', 'brake', 'break'], + 'brakeage': ['brakeage', 'breakage'], + 'brakeless': ['bakerless', 'brakeless', 'breakless'], + 'braker': ['barker', 'braker'], + 'braky': ['barky', 'braky'], + 'bram': ['barm', 'bram'], + 'brambrack': ['barmbrack', 'brambrack'], + 'bramia': ['bairam', 'bramia'], + 'bran': ['barn', 'bran'], + 'brancher': ['brancher', 'rebranch'], + 'branchiae': ['brainache', 'branchiae'], + 'branchiata': ['batrachian', 'branchiata'], + 'branchiopoda': ['branchiopoda', 'podobranchia'], + 'brander': ['bernard', 'brander', 'rebrand'], + 'brandi': ['brandi', 'riband'], + 'brandisher': ['brandisher', 'rebrandish'], + 'branial': ['blarina', 'branial'], + 'brankie': ['brankie', 'inbreak'], + 'brash': ['brash', 'shrab'], + 'brasiletto': ['brasiletto', 'strobilate'], + 'brassie': ['brassie', 'rebasis'], + 'brat': ['bart', 'brat'], + 'brattie': ['biretta', 'brattie', 'ratbite'], + 'brattle': ['battler', 'blatter', 'brattle'], + 'braunite': ['braunite', 'urbanite', 'urbinate'], + 'brave': ['brave', 'breva'], + 'bravoite': ['abortive', 'bravoite'], + 'brawler': ['brawler', 'warbler'], + 'brawling': ['brawling', 'warbling'], + 'brawlingly': ['brawlingly', 'warblingly'], + 'brawly': ['brawly', 'byrlaw', 'warbly'], + 'brawned': ['benward', 'brawned'], + 'bray': ['bray', 'yarb'], + 'braza': ['braza', 'zabra'], + 'braze': ['braze', 'zebra'], + 'brazier': ['bizarre', 'brazier'], + 'bread': ['ardeb', 'beard', 'bread', 'debar'], + 'breadless': ['beardless', 'breadless'], + 'breadlessness': ['beardlessness', 'breadlessness'], + 'breadman': ['banderma', 'breadman'], + 'breadnut': ['breadnut', 'turbaned'], + 'breaghe': ['breaghe', 'herbage'], + 'break': ['baker', 'brake', 'break'], + 'breakage': ['brakeage', 'breakage'], + 'breakless': ['bakerless', 'brakeless', 'breakless'], + 'breakout': ['breakout', 'outbreak'], + 'breakover': ['breakover', 'overbreak'], + 'breakstone': ['breakstone', 'stonebreak'], + 'breakup': ['breakup', 'upbreak'], + 'breakwind': ['breakwind', 'windbreak'], + 'bream': ['amber', 'bearm', 'bemar', 'bream', 'embar'], + 'breast': ['baster', 'bestar', 'breast'], + 'breasting': ['breasting', 'brigantes'], + 'breastpin': ['breastpin', 'stepbairn'], + 'breastrail': ['arbalister', 'breastrail'], + 'breastweed': ['breastweed', 'sweetbread'], + 'breath': ['bather', 'bertha', 'breath'], + 'breathe': ['breathe', 'rebathe'], + 'breba': ['barbe', 'bebar', 'breba', 'rebab'], + 'breccia': ['acerbic', 'breccia'], + 'brecham': ['becharm', 'brecham', 'chamber'], + 'brede': ['brede', 'breed', 'rebed'], + 'bredi': ['bider', 'bredi', 'bride', 'rebid'], + 'bree': ['beer', 'bere', 'bree'], + 'breech': ['becher', 'breech'], + 'breed': ['brede', 'breed', 'rebed'], + 'breeder': ['breeder', 'rebreed'], + 'breeding': ['beringed', 'breeding'], + 'brehon': ['behorn', 'brehon'], + 'brei': ['beri', 'bier', 'brei', 'ribe'], + 'brelaw': ['bawler', 'brelaw', 'rebawl', 'warble'], + 'breme': ['breme', 'ember'], + 'bremia': ['ambier', 'bremia', 'embira'], + 'brenda': ['bander', 'brenda'], + 'brephic': ['bechirp', 'brephic'], + 'bret': ['bert', 'bret'], + 'breth': ['berth', 'breth'], + 'breva': ['brave', 'breva'], + 'breve': ['bever', 'breve'], + 'brewer': ['brewer', 'rebrew'], + 'brey': ['brey', 'byre', 'yerb'], + 'brian': ['abrin', 'bairn', 'brain', 'brian', 'rabin'], + 'briard': ['braird', 'briard'], + 'briber': ['briber', 'ribber'], + 'brichen': ['birchen', 'brichen'], + 'brickel': ['brickel', 'brickle'], + 'bricken': ['bickern', 'bricken'], + 'brickle': ['brickel', 'brickle'], + 'bricole': ['bricole', 'corbeil', 'orbicle'], + 'bridal': ['bildar', 'bridal', 'ribald'], + 'bridale': ['bedrail', 'bridale', 'ridable'], + 'bridally': ['bridally', 'ribaldly'], + 'bride': ['bider', 'bredi', 'bride', 'rebid'], + 'bridelace': ['bridelace', 'calibered'], + 'bridge': ['begird', 'bridge'], + 'bridgeward': ['bridgeward', 'drawbridge'], + 'bridling': ['birdling', 'bridling', 'lingbird'], + 'brief': ['bifer', 'brief', 'fiber'], + 'briefless': ['briefless', 'fiberless', 'fibreless'], + 'brier': ['berri', 'brier'], + 'briered': ['berried', 'briered'], + 'brieve': ['bervie', 'brieve'], + 'brigade': ['abridge', 'brigade'], + 'brigand': ['barding', 'brigand'], + 'brigantes': ['breasting', 'brigantes'], + 'brighten': ['berthing', 'brighten'], + 'brin': ['birn', 'brin'], + 'brine': ['brine', 'enrib'], + 'bringal': ['barling', 'bringal'], + 'bringer': ['bringer', 'rebring'], + 'briny': ['birny', 'briny'], + 'bristle': ['blister', 'bristle'], + 'bristlewort': ['blisterwort', 'bristlewort'], + 'brisure': ['brisure', 'bruiser'], + 'britannia': ['antiabrin', 'britannia'], + 'brith': ['birth', 'brith'], + 'brither': ['brither', 'rebirth'], + 'britten': ['bittern', 'britten'], + 'brittle': ['blitter', 'brittle', 'triblet'], + 'broacher': ['broacher', 'rebroach'], + 'broad': ['abord', 'bardo', 'board', 'broad', 'dobra', 'dorab'], + 'broadbill': ['billboard', 'broadbill'], + 'broadcaster': ['broadcaster', 'rebroadcast'], + 'broaden': ['bandore', 'broaden'], + 'broadhead': ['broadhead', 'headboard'], + 'broadly': ['boardly', 'broadly'], + 'broadside': ['broadside', 'sideboard'], + 'broadspread': ['broadspread', 'spreadboard'], + 'broadtail': ['broadtail', 'tailboard'], + 'brochan': ['brochan', 'charbon'], + 'broche': ['bocher', 'broche'], + 'brocho': ['brocho', 'brooch'], + 'brocked': ['bedrock', 'brocked'], + 'brockle': ['blocker', 'brockle', 'reblock'], + 'brod': ['bord', 'brod'], + 'broderer': ['borderer', 'broderer'], + 'brodie': ['bodier', 'boride', 'brodie'], + 'brog': ['borg', 'brog', 'gorb'], + 'brogan': ['barong', 'brogan'], + 'broggle': ['boggler', 'broggle'], + 'brolga': ['brolga', 'gorbal'], + 'broma': ['broma', 'rambo'], + 'bromate': ['barmote', 'bromate'], + 'brome': ['brome', 'omber'], + 'brominate': ['brominate', 'tribonema'], + 'bromohydrate': ['bromohydrate', 'hydrobromate'], + 'bronze': ['bonzer', 'bronze'], + 'broo': ['boor', 'boro', 'broo'], + 'brooch': ['brocho', 'brooch'], + 'brooke': ['booker', 'brooke', 'rebook'], + 'broon': ['boron', 'broon'], + 'brose': ['boser', 'brose', 'sober'], + 'brot': ['bort', 'brot'], + 'brotan': ['barton', 'brotan'], + 'brotany': ['baryton', 'brotany'], + 'broth': ['broth', 'throb'], + 'brothelry': ['brothelry', 'brotherly'], + 'brotherly': ['brothelry', 'brotherly'], + 'browden': ['bedrown', 'browden'], + 'browner': ['browner', 'rebrown'], + 'browntail': ['browntail', 'wrainbolt'], + 'bruce': ['bruce', 'cebur', 'cuber'], + 'brucina': ['brucina', 'rubican'], + 'bruckle': ['bruckle', 'buckler'], + 'brugh': ['brugh', 'burgh'], + 'bruin': ['bruin', 'burin', 'inrub'], + 'bruiser': ['brisure', 'bruiser'], + 'bruke': ['bruke', 'burke'], + 'brule': ['bluer', 'brule', 'burel', 'ruble'], + 'brulee': ['brulee', 'burele', 'reblue'], + 'brumal': ['brumal', 'labrum', 'lumbar', 'umbral'], + 'brumalia': ['albarium', 'brumalia'], + 'brume': ['brume', 'umber'], + 'brumous': ['brumous', 'umbrous'], + 'brunellia': ['brunellia', 'unliberal'], + 'brunet': ['brunet', 'bunter', 'burnet'], + 'bruno': ['bourn', 'bruno'], + 'brunt': ['brunt', 'burnt'], + 'brush': ['brush', 'shrub'], + 'brushed': ['brushed', 'subherd'], + 'brusher': ['brusher', 'rebrush'], + 'brushland': ['brushland', 'shrubland'], + 'brushless': ['brushless', 'shrubless'], + 'brushlet': ['brushlet', 'shrublet'], + 'brushlike': ['brushlike', 'shrublike'], + 'brushwood': ['brushwood', 'shrubwood'], + 'brustle': ['bluster', 'brustle', 'bustler'], + 'brut': ['brut', 'burt', 'trub', 'turb'], + 'bruta': ['bruta', 'tubar'], + 'brute': ['brute', 'buret', 'rebut', 'tuber'], + 'brutely': ['brutely', 'butlery'], + 'bryan': ['barny', 'bryan'], + 'bryanite': ['barytine', 'bryanite'], + 'bryce': ['becry', 'bryce'], + 'bual': ['balu', 'baul', 'bual', 'luba'], + 'buba': ['babu', 'buba'], + 'bubal': ['babul', 'bubal'], + 'bubbler': ['blubber', 'bubbler'], + 'buccocervical': ['buccocervical', 'cervicobuccal'], + 'bucconasal': ['bucconasal', 'nasobuccal'], + 'buceros': ['bescour', 'buceros', 'obscure'], + 'buckbush': ['buckbush', 'bushbuck'], + 'bucked': ['beduck', 'bucked'], + 'buckler': ['bruckle', 'buckler'], + 'bucksaw': ['bucksaw', 'sawbuck'], + 'bucrane': ['bucrane', 'unbrace'], + 'bud': ['bud', 'dub'], + 'buda': ['baud', 'buda', 'daub'], + 'budder': ['budder', 'redbud'], + 'budger': ['bedrug', 'budger'], + 'budgeter': ['budgeter', 'rebudget'], + 'budtime': ['bitumed', 'budtime'], + 'buffer': ['buffer', 'rebuff'], + 'buffeter': ['buffeter', 'rebuffet'], + 'bugan': ['bugan', 'bunga', 'unbag'], + 'bughouse': ['bughouse', 'housebug'], + 'bugi': ['bugi', 'guib'], + 'bugle': ['bugle', 'bulge'], + 'bugler': ['bugler', 'bulger', 'burgle'], + 'bugre': ['bugre', 'gebur'], + 'builder': ['builder', 'rebuild'], + 'buildup': ['buildup', 'upbuild'], + 'buirdly': ['buirdly', 'ludibry'], + 'bulanda': ['balunda', 'bulanda'], + 'bulb': ['blub', 'bulb'], + 'bulgarian': ['biangular', 'bulgarian'], + 'bulge': ['bugle', 'bulge'], + 'bulger': ['bugler', 'bulger', 'burgle'], + 'bulimic': ['bulimic', 'umbilic'], + 'bulimiform': ['bulimiform', 'umbiliform'], + 'bulker': ['bulker', 'rebulk'], + 'bulla': ['bulla', 'lulab'], + 'bullace': ['bullace', 'cueball'], + 'bulletin': ['bulletin', 'unbillet'], + 'bullfeast': ['bullfeast', 'stableful'], + 'bulse': ['blues', 'bulse'], + 'bulter': ['bulter', 'burlet', 'butler'], + 'bummler': ['bummler', 'mumbler'], + 'bun': ['bun', 'nub'], + 'buna': ['baun', 'buna', 'nabu', 'nuba'], + 'buncal': ['buncal', 'lucban'], + 'buncher': ['buncher', 'rebunch'], + 'bunder': ['bunder', 'burden', 'burned', 'unbred'], + 'bundle': ['bundle', 'unbled'], + 'bundler': ['blunder', 'bundler'], + 'bundu': ['bundu', 'unbud', 'undub'], + 'bunga': ['bugan', 'bunga', 'unbag'], + 'bungle': ['blunge', 'bungle'], + 'bungler': ['blunger', 'bungler'], + 'bungo': ['bungo', 'unbog'], + 'bunk': ['bunk', 'knub'], + 'bunter': ['brunet', 'bunter', 'burnet'], + 'bunty': ['bunty', 'butyn'], + 'bunya': ['bunya', 'unbay'], + 'bur': ['bur', 'rub'], + 'buran': ['buran', 'unbar', 'urban'], + 'burble': ['burble', 'lubber', 'rubble'], + 'burbler': ['burbler', 'rubbler'], + 'burbly': ['burbly', 'rubbly'], + 'burd': ['burd', 'drub'], + 'burdalone': ['burdalone', 'unlabored'], + 'burden': ['bunder', 'burden', 'burned', 'unbred'], + 'burdener': ['burdener', 'reburden'], + 'burdie': ['burdie', 'buried', 'rubied'], + 'bure': ['bure', 'reub', 'rube'], + 'burel': ['bluer', 'brule', 'burel', 'ruble'], + 'burele': ['brulee', 'burele', 'reblue'], + 'buret': ['brute', 'buret', 'rebut', 'tuber'], + 'burfish': ['burfish', 'furbish'], + 'burg': ['burg', 'grub'], + 'burgh': ['brugh', 'burgh'], + 'burgle': ['bugler', 'bulger', 'burgle'], + 'burian': ['burian', 'urbian'], + 'buried': ['burdie', 'buried', 'rubied'], + 'burin': ['bruin', 'burin', 'inrub'], + 'burke': ['bruke', 'burke'], + 'burl': ['blur', 'burl'], + 'burler': ['burler', 'burrel'], + 'burlet': ['bulter', 'burlet', 'butler'], + 'burletta': ['burletta', 'rebuttal'], + 'burmite': ['burmite', 'imbrute', 'terbium'], + 'burned': ['bunder', 'burden', 'burned', 'unbred'], + 'burner': ['burner', 'reburn'], + 'burnet': ['brunet', 'bunter', 'burnet'], + 'burnfire': ['burnfire', 'fireburn'], + 'burnie': ['burnie', 'rubine'], + 'burnisher': ['burnisher', 'reburnish'], + 'burnout': ['burnout', 'outburn'], + 'burnover': ['burnover', 'overburn'], + 'burnsides': ['burnsides', 'sideburns'], + 'burnt': ['brunt', 'burnt'], + 'burny': ['burny', 'runby'], + 'buro': ['buro', 'roub'], + 'burrel': ['burler', 'burrel'], + 'burro': ['burro', 'robur', 'rubor'], + 'bursa': ['abrus', 'bursa', 'subra'], + 'bursal': ['bursal', 'labrus'], + 'bursate': ['bursate', 'surbate'], + 'burse': ['burse', 'rebus', 'suber'], + 'burst': ['burst', 'strub'], + 'burster': ['burster', 'reburst'], + 'burt': ['brut', 'burt', 'trub', 'turb'], + 'burut': ['burut', 'trubu'], + 'bury': ['bury', 'ruby'], + 'bus': ['bus', 'sub'], + 'buscarle': ['arbuscle', 'buscarle'], + 'bushbuck': ['buckbush', 'bushbuck'], + 'busher': ['busher', 'rebush'], + 'bushwood': ['bushwood', 'woodbush'], + 'busied': ['busied', 'subdie'], + 'busked': ['bedusk', 'busked'], + 'busman': ['busman', 'subman'], + 'bust': ['bust', 'stub'], + 'busted': ['bedust', 'bestud', 'busted'], + 'buster': ['berust', 'buster', 'stuber'], + 'bustic': ['bustic', 'cubist'], + 'bustle': ['bustle', 'sublet', 'subtle'], + 'bustler': ['bluster', 'brustle', 'bustler'], + 'but': ['but', 'tub'], + 'bute': ['bute', 'tebu', 'tube'], + 'butea': ['butea', 'taube', 'tubae'], + 'butein': ['butein', 'butine', 'intube'], + 'butic': ['butic', 'cubit'], + 'butine': ['butein', 'butine', 'intube'], + 'butler': ['bulter', 'burlet', 'butler'], + 'butleress': ['butleress', 'tuberless'], + 'butlery': ['brutely', 'butlery'], + 'buttle': ['buttle', 'tublet'], + 'buttoner': ['buttoner', 'rebutton'], + 'butyn': ['bunty', 'butyn'], + 'buyer': ['buyer', 'rebuy'], + 'bye': ['bey', 'bye'], + 'byeman': ['byeman', 'byname'], + 'byerite': ['byerite', 'ebriety'], + 'bygo': ['bogy', 'bygo', 'goby'], + 'byname': ['byeman', 'byname'], + 'byon': ['bony', 'byon'], + 'byous': ['bousy', 'byous'], + 'byre': ['brey', 'byre', 'yerb'], + 'byrlaw': ['brawly', 'byrlaw', 'warbly'], + 'byroad': ['boardy', 'boyard', 'byroad'], + 'cab': ['bac', 'cab'], + 'caba': ['abac', 'caba'], + 'cabaan': ['cabaan', 'cabana', 'canaba'], + 'cabala': ['cabala', 'calaba'], + 'cabaletta': ['ablactate', 'cabaletta'], + 'cabalism': ['balsamic', 'cabalism'], + 'cabalist': ['basaltic', 'cabalist'], + 'caballer': ['barcella', 'caballer'], + 'caban': ['banca', 'caban'], + 'cabana': ['cabaan', 'cabana', 'canaba'], + 'cabaret': ['abreact', 'bractea', 'cabaret'], + 'cabbler': ['cabbler', 'clabber'], + 'caber': ['acerb', 'brace', 'caber'], + 'cabio': ['baioc', 'cabio', 'cobia'], + 'cabiri': ['cabiri', 'caribi'], + 'cabirian': ['arabinic', 'cabirian', 'carabini', 'cibarian'], + 'cable': ['cable', 'caleb'], + 'cabled': ['beclad', 'cabled'], + 'cabob': ['bobac', 'cabob'], + 'cabocle': ['boccale', 'cabocle'], + 'cabombaceae': ['bombacaceae', 'cabombaceae'], + 'cabrilla': ['bacillar', 'cabrilla'], + 'caca': ['acca', 'caca'], + 'cachet': ['cachet', 'chacte'], + 'cachou': ['cachou', 'caucho'], + 'cackler': ['cackler', 'clacker', 'crackle'], + 'cacodaemonic': ['cacodaemonic', 'cacodemoniac'], + 'cacodemoniac': ['cacodaemonic', 'cacodemoniac'], + 'cacomistle': ['cacomistle', 'cosmetical'], + 'cacoxenite': ['cacoxenite', 'excecation'], + 'cactaceae': ['cactaceae', 'taccaceae'], + 'cactaceous': ['cactaceous', 'taccaceous'], + 'cacti': ['cacti', 'ticca'], + 'cactoid': ['cactoid', 'octadic'], + 'caddice': ['caddice', 'decadic'], + 'caddie': ['caddie', 'eddaic'], + 'cade': ['cade', 'dace', 'ecad'], + 'cadent': ['cadent', 'canted', 'decant'], + 'cadential': ['cadential', 'dancalite'], + 'cader': ['acred', 'cader', 'cadre', 'cedar'], + 'cadet': ['cadet', 'ectad'], + 'cadge': ['cadge', 'caged'], + 'cadger': ['cadger', 'cradge'], + 'cadi': ['acid', 'cadi', 'caid'], + 'cadinene': ['cadinene', 'decennia', 'enneadic'], + 'cadmia': ['adamic', 'cadmia'], + 'cados': ['cados', 'scoad'], + 'cadre': ['acred', 'cader', 'cadre', 'cedar'], + 'cadua': ['cadua', 'cauda'], + 'caduac': ['caduac', 'caduca'], + 'caduca': ['caduac', 'caduca'], + 'cadus': ['cadus', 'dacus'], + 'caeciliae': ['caeciliae', 'ilicaceae'], + 'caedmonian': ['caedmonian', 'macedonian'], + 'caedmonic': ['caedmonic', 'macedonic'], + 'caelum': ['almuce', 'caelum', 'macule'], + 'caelus': ['caelus', 'caules', 'clause'], + 'caesar': ['ascare', 'caesar', 'resaca'], + 'caesarist': ['caesarist', 'staircase'], + 'caesura': ['auresca', 'caesura'], + 'caffeina': ['affiance', 'caffeina'], + 'caged': ['cadge', 'caged'], + 'cageling': ['cageling', 'glaceing'], + 'cager': ['cager', 'garce', 'grace'], + 'cahill': ['achill', 'cahill', 'chilla'], + 'cahita': ['cahita', 'ithaca'], + 'cahnite': ['cahnite', 'cathine'], + 'caid': ['acid', 'cadi', 'caid'], + 'caiman': ['amniac', 'caiman', 'maniac'], + 'caimito': ['caimito', 'comitia'], + 'cain': ['cain', 'inca'], + 'cainism': ['cainism', 'misniac'], + 'cairba': ['arabic', 'cairba'], + 'caird': ['acrid', 'caird', 'carid', 'darci', 'daric', 'dirca'], + 'cairene': ['cairene', 'cinerea'], + 'cairn': ['cairn', 'crain', 'naric'], + 'cairned': ['cairned', 'candier'], + 'cairny': ['cairny', 'riancy'], + 'cairo': ['cairo', 'oaric'], + 'caisson': ['caisson', 'cassino'], + 'cajoler': ['cajoler', 'jecoral'], + 'caker': ['acker', 'caker', 'crake', 'creak'], + 'cakey': ['ackey', 'cakey'], + 'cal': ['cal', 'lac'], + 'calaba': ['cabala', 'calaba'], + 'calamine': ['alcamine', 'analcime', 'calamine', 'camelina'], + 'calamint': ['calamint', 'claimant'], + 'calamitean': ['calamitean', 'catamenial'], + 'calander': ['calander', 'calendar'], + 'calandrinae': ['calandrinae', 'calendarian'], + 'calas': ['calas', 'casal', 'scala'], + 'calash': ['calash', 'lachsa'], + 'calathian': ['acanthial', 'calathian'], + 'calaverite': ['calaverite', 'lacerative'], + 'calcareocorneous': ['calcareocorneous', 'corneocalcareous'], + 'calcareosiliceous': ['calcareosiliceous', 'siliceocalcareous'], + 'calciner': ['calciner', 'larcenic'], + 'calculary': ['calculary', 'calycular'], + 'calculative': ['calculative', 'claviculate'], + 'calden': ['calden', 'candle', 'lanced'], + 'calean': ['anlace', 'calean'], + 'caleb': ['cable', 'caleb'], + 'caledonia': ['caledonia', 'laodicean'], + 'caledonite': ['caledonite', 'celadonite'], + 'calendar': ['calander', 'calendar'], + 'calendarial': ['calendarial', 'dalecarlian'], + 'calendarian': ['calandrinae', 'calendarian'], + 'calender': ['calender', 'encradle'], + 'calenture': ['calenture', 'crenulate'], + 'calepin': ['calepin', 'capelin', 'panicle', 'pelican', 'pinacle'], + 'calfkill': ['calfkill', 'killcalf'], + 'caliban': ['balanic', 'caliban'], + 'caliber': ['caliber', 'calibre'], + 'calibered': ['bridelace', 'calibered'], + 'calibrate': ['bacterial', 'calibrate'], + 'calibre': ['caliber', 'calibre'], + 'caliburno': ['binocular', 'caliburno', 'colubrina'], + 'calico': ['accoil', 'calico'], + 'calidity': ['calidity', 'dialytic'], + 'caliga': ['caliga', 'cigala'], + 'calinago': ['analogic', 'calinago'], + 'calinut': ['calinut', 'lunatic'], + 'caliper': ['caliper', 'picarel', 'replica'], + 'calipers': ['calipers', 'spiracle'], + 'caliphate': ['caliphate', 'hepatical'], + 'calite': ['calite', 'laetic', 'tecali'], + 'caliver': ['caliver', 'caviler', 'claiver', 'clavier', 'valeric', 'velaric'], + 'calk': ['calk', 'lack'], + 'calker': ['calker', 'lacker', 'rackle', 'recalk', 'reckla'], + 'callboy': ['callboy', 'collyba'], + 'caller': ['caller', 'cellar', 'recall'], + 'calli': ['calli', 'lilac'], + 'calligraphy': ['calligraphy', 'graphically'], + 'calliopsis': ['calliopsis', 'lipoclasis'], + 'callisection': ['callisection', 'clinoclasite'], + 'callitype': ['callitype', 'plicately'], + 'callo': ['callo', 'colla', 'local'], + 'callosal': ['callosal', 'scallola'], + 'callose': ['callose', 'oscella'], + 'callosity': ['callosity', 'stoically'], + 'callosum': ['callosum', 'mollusca'], + 'calluna': ['calluna', 'lacunal'], + 'callus': ['callus', 'sulcal'], + 'calm': ['calm', 'clam'], + 'calmant': ['calmant', 'clamant'], + 'calmative': ['calmative', 'clamative'], + 'calmer': ['calmer', 'carmel', 'clamer', 'marcel', 'mercal'], + 'calmierer': ['calmierer', 'reclaimer'], + 'calomba': ['calomba', 'cambalo'], + 'calonectria': ['calonectria', 'ectocranial'], + 'calor': ['alcor', 'calor', 'carlo', 'carol', 'claro', 'coral'], + 'calorie': ['calorie', 'cariole'], + 'calorist': ['calorist', 'coralist'], + 'calorite': ['calorite', 'erotical', 'loricate'], + 'calorize': ['calorize', 'coalizer'], + 'calotermes': ['calotermes', 'mesorectal', 'metacresol'], + 'calotermitid': ['calotermitid', 'dilatometric'], + 'calp': ['calp', 'clap'], + 'caltha': ['caltha', 'chalta'], + 'caltrop': ['caltrop', 'proctal'], + 'calusa': ['ascula', 'calusa', 'casual', 'casula', 'causal'], + 'calvaria': ['calvaria', 'clavaria'], + 'calvary': ['calvary', 'cavalry'], + 'calve': ['calve', 'cavel', 'clave'], + 'calver': ['calver', 'carvel', 'claver'], + 'calves': ['calves', 'scavel'], + 'calycular': ['calculary', 'calycular'], + 'calyptratae': ['acalyptrate', 'calyptratae'], + 'cam': ['cam', 'mac'], + 'camaca': ['camaca', 'macaca'], + 'camail': ['amical', 'camail', 'lamaic'], + 'caman': ['caman', 'macan'], + 'camara': ['acamar', 'camara', 'maraca'], + 'cambalo': ['calomba', 'cambalo'], + 'camber': ['becram', 'camber', 'crambe'], + 'cambrel': ['cambrel', 'clamber', 'cramble'], + 'came': ['acme', 'came', 'mace'], + 'cameist': ['cameist', 'etacism', 'sematic'], + 'camel': ['camel', 'clame', 'cleam', 'macle'], + 'camelid': ['camelid', 'decimal', 'declaim', 'medical'], + 'camelina': ['alcamine', 'analcime', 'calamine', 'camelina'], + 'camelish': ['camelish', 'schalmei'], + 'camellus': ['camellus', 'sacellum'], + 'cameloid': ['cameloid', 'comedial', 'melodica'], + 'cameograph': ['cameograph', 'macrophage'], + 'camera': ['acream', 'camera', 'mareca'], + 'cameral': ['cameral', 'caramel', 'carmela', 'ceramal', 'reclama'], + 'camerate': ['camerate', 'macerate', 'racemate'], + 'camerated': ['camerated', 'demarcate'], + 'cameration': ['aeromantic', 'cameration', 'maceration', 'racemation'], + 'camerina': ['amacrine', 'american', 'camerina', 'cinerama'], + 'camerist': ['camerist', 'ceramist', 'matrices'], + 'camion': ['camion', 'conima', 'manioc', 'monica'], + 'camisado': ['camisado', 'caodaism'], + 'camise': ['camise', 'macies'], + 'campaign': ['campaign', 'pangamic'], + 'campaigner': ['campaigner', 'recampaign'], + 'camphire': ['camphire', 'hemicarp'], + 'campine': ['campine', 'pemican'], + 'campoo': ['campoo', 'capomo'], + 'camptonite': ['camptonite', 'pentatomic'], + 'camus': ['camus', 'musca', 'scaum', 'sumac'], + 'camused': ['camused', 'muscade'], + 'canaba': ['cabaan', 'cabana', 'canaba'], + 'canadol': ['acnodal', 'canadol', 'locanda'], + 'canaille': ['alliance', 'canaille'], + 'canape': ['canape', 'panace'], + 'canari': ['acinar', + 'arnica', + 'canari', + 'carian', + 'carina', + 'crania', + 'narica'], + 'canarin': ['canarin', 'cranian'], + 'canariote': ['canariote', 'ceratonia'], + 'canary': ['canary', 'cynara'], + 'canaut': ['canaut', 'tucana'], + 'canceler': ['canceler', 'clarence', 'recancel'], + 'cancer': ['cancer', 'crance'], + 'cancerate': ['cancerate', 'reactance'], + 'canceration': ['anacreontic', 'canceration'], + 'cancri': ['cancri', 'carnic', 'cranic'], + 'cancroid': ['cancroid', 'draconic'], + 'candela': ['candela', 'decanal'], + 'candier': ['cairned', 'candier'], + 'candiru': ['candiru', 'iracund'], + 'candle': ['calden', 'candle', 'lanced'], + 'candor': ['candor', 'cardon', 'conrad'], + 'candroy': ['candroy', 'dacryon'], + 'cane': ['acne', 'cane', 'nace'], + 'canel': ['canel', 'clean', 'lance', 'lenca'], + 'canelo': ['canelo', 'colane'], + 'canephor': ['canephor', 'chaperno', 'chaperon'], + 'canephore': ['canephore', 'chaperone'], + 'canephroi': ['canephroi', 'parochine'], + 'caner': ['caner', 'crane', 'crena', 'nacre', 'rance'], + 'canful': ['canful', 'flucan'], + 'cangle': ['cangle', 'glance'], + 'cangler': ['cangler', 'glancer', 'reclang'], + 'cangue': ['cangue', 'uncage'], + 'canicola': ['canicola', 'laconica'], + 'canid': ['canid', 'cnida', 'danic'], + 'canidae': ['aidance', 'canidae'], + 'canine': ['canine', 'encina', 'neanic'], + 'canis': ['canis', 'scian'], + 'canister': ['canister', 'cestrian', 'cisterna', 'irascent'], + 'canker': ['canker', 'neckar'], + 'cankerworm': ['cankerworm', 'crownmaker'], + 'cannel': ['cannel', 'lencan'], + 'cannot': ['cannot', 'canton', 'conant', 'nonact'], + 'cannulate': ['antelucan', 'cannulate'], + 'canny': ['canny', 'nancy'], + 'canoe': ['acone', 'canoe', 'ocean'], + 'canoeing': ['anogenic', 'canoeing'], + 'canoeist': ['canoeist', 'cotesian'], + 'canon': ['ancon', 'canon'], + 'canonist': ['canonist', 'sanction', 'sonantic'], + 'canoodler': ['canoodler', 'coronaled'], + 'canroy': ['canroy', 'crayon', 'cyrano', 'nyroca'], + 'canso': ['ascon', 'canso', 'oscan'], + 'cantabri': ['bactrian', 'cantabri'], + 'cantala': ['cantala', 'catalan', 'lantaca'], + 'cantalite': ['cantalite', 'lactinate', 'tetanical'], + 'cantara': ['cantara', 'nacarat'], + 'cantaro': ['cantaro', 'croatan'], + 'cantate': ['anteact', 'cantate'], + 'canted': ['cadent', 'canted', 'decant'], + 'canteen': ['canteen', 'centena'], + 'canter': ['canter', + 'creant', + 'cretan', + 'nectar', + 'recant', + 'tanrec', + 'trance'], + 'canterer': ['canterer', 'recanter', 'recreant', 'terrance'], + 'cantharidae': ['acidanthera', 'cantharidae'], + 'cantharis': ['anarchist', 'archsaint', 'cantharis'], + 'canthus': ['canthus', 'staunch'], + 'cantico': ['cantico', 'catonic', 'taconic'], + 'cantilena': ['cantilena', 'lancinate'], + 'cantilever': ['cantilever', 'trivalence'], + 'cantily': ['anticly', 'cantily'], + 'cantina': ['cantina', 'tannaic'], + 'cantiness': ['anticness', 'cantiness', 'incessant'], + 'cantion': ['actinon', 'cantion', 'contain'], + 'cantle': ['cantle', 'cental', 'lancet', 'tancel'], + 'canto': ['acton', 'canto', 'octan'], + 'canton': ['cannot', 'canton', 'conant', 'nonact'], + 'cantonal': ['cantonal', 'connatal'], + 'cantor': ['cantor', 'carton', 'contra'], + 'cantorian': ['anarcotin', 'cantorian', 'carnation', 'narcotina'], + 'cantoris': ['cantoris', 'castorin', 'corsaint'], + 'cantred': ['cantred', 'centrad', 'tranced'], + 'cantus': ['cantus', 'tuscan', 'uncast'], + 'canun': ['canun', 'cunan'], + 'cany': ['cany', 'cyan'], + 'canyon': ['ancony', 'canyon'], + 'caoba': ['bacao', 'caoba'], + 'caodaism': ['camisado', 'caodaism'], + 'cap': ['cap', 'pac'], + 'capable': ['capable', 'pacable'], + 'caparison': ['caparison', 'paranosic'], + 'cape': ['cape', 'cepa', 'pace'], + 'caped': ['caped', 'decap', 'paced'], + 'capel': ['capel', 'place'], + 'capelin': ['calepin', 'capelin', 'panicle', 'pelican', 'pinacle'], + 'capeline': ['capeline', 'pelecani'], + 'caper': ['caper', 'crape', 'pacer', 'perca', 'recap'], + 'capernaite': ['capernaite', 'paraenetic'], + 'capernoited': ['capernoited', 'deprecation'], + 'capernoity': ['acetopyrin', 'capernoity'], + 'capes': ['capes', 'scape', 'space'], + 'caph': ['caph', 'chap'], + 'caphite': ['aphetic', 'caphite', 'hepatic'], + 'caphtor': ['caphtor', 'toparch'], + 'capias': ['capias', 'pisaca'], + 'capillament': ['capillament', 'implacental'], + 'capillarity': ['capillarity', 'piratically'], + 'capital': ['capital', 'palatic'], + 'capitan': ['capitan', 'captain'], + 'capitate': ['apatetic', 'capitate'], + 'capitellar': ['capitellar', 'prelatical'], + 'capito': ['atopic', 'capito', 'copita'], + 'capitol': ['capitol', 'coalpit', 'optical', 'topical'], + 'capomo': ['campoo', 'capomo'], + 'capon': ['capon', 'ponca'], + 'caponier': ['caponier', 'coprinae', 'procaine'], + 'capot': ['capot', 'coapt'], + 'capote': ['capote', 'toecap'], + 'capreol': ['capreol', 'polacre'], + 'capri': ['capri', 'picra', 'rapic'], + 'caprid': ['caprid', 'carpid', 'picard'], + 'capriote': ['aporetic', 'capriote', 'operatic'], + 'capsian': ['capsian', 'caspian', 'nascapi', 'panisca'], + 'capstone': ['capstone', 'opencast'], + 'capsula': ['capsula', 'pascual', 'scapula'], + 'capsular': ['capsular', 'scapular'], + 'capsulate': ['aspectual', 'capsulate'], + 'capsulated': ['capsulated', 'scapulated'], + 'capsule': ['capsule', 'specula', 'upscale'], + 'capsulectomy': ['capsulectomy', 'scapulectomy'], + 'capsuler': ['capsuler', 'specular'], + 'captain': ['capitan', 'captain'], + 'captation': ['anaptotic', 'captation'], + 'caption': ['caption', 'paction'], + 'captious': ['autopsic', 'captious'], + 'captor': ['captor', 'copart'], + 'capture': ['capture', 'uptrace'], + 'car': ['arc', 'car'], + 'cara': ['arca', 'cara'], + 'carabeen': ['bearance', 'carabeen'], + 'carabin': ['arbacin', 'carabin', 'cariban'], + 'carabini': ['arabinic', 'cabirian', 'carabini', 'cibarian'], + 'caracoli': ['caracoli', 'coracial'], + 'caracore': ['acrocera', 'caracore'], + 'caragana': ['aracanga', 'caragana'], + 'caramel': ['cameral', 'caramel', 'carmela', 'ceramal', 'reclama'], + 'caranda': ['anacard', 'caranda'], + 'carandas': ['carandas', 'sandarac'], + 'carane': ['arcane', 'carane'], + 'carangid': ['carangid', 'cardigan'], + 'carapine': ['carapine', 'carpaine'], + 'caravel': ['caravel', 'lavacre'], + 'carbamide': ['carbamide', 'crambidae'], + 'carbamine': ['carbamine', 'crambinae'], + 'carbamino': ['carbamino', 'macrobian'], + 'carbeen': ['carbeen', 'carbene'], + 'carbene': ['carbeen', 'carbene'], + 'carbo': ['carbo', 'carob', 'coarb', 'cobra'], + 'carbohydride': ['carbohydride', 'hydrocarbide'], + 'carbon': ['bracon', 'carbon', 'corban'], + 'carbonite': ['bicornate', 'carbonite', 'reboantic'], + 'carcel': ['carcel', 'cercal'], + 'carcinoma': ['carcinoma', 'macaronic'], + 'carcinosarcoma': ['carcinosarcoma', 'sarcocarcinoma'], + 'carcoon': ['carcoon', 'raccoon'], + 'cardel': ['cardel', 'cradle'], + 'cardia': ['acarid', 'cardia', 'carida'], + 'cardiac': ['arcadic', 'cardiac'], + 'cardial': ['cardial', 'radical'], + 'cardiant': ['antacrid', 'cardiant', 'radicant', 'tridacna'], + 'cardigan': ['carangid', 'cardigan'], + 'cardiidae': ['acrididae', 'cardiidae', 'cidaridae'], + 'cardin': ['andric', 'cardin', 'rancid'], + 'cardinal': ['cardinal', 'clarinda'], + 'cardioid': ['cardioid', 'caridoid'], + 'cardiophobe': ['brachiopode', 'cardiophobe'], + 'cardo': ['cardo', 'draco'], + 'cardon': ['candor', 'cardon', 'conrad'], + 'cardoon': ['cardoon', 'coronad'], + 'care': ['acer', 'acre', 'care', 'crea', 'race'], + 'careen': ['careen', 'carene', 'enrace'], + 'carene': ['careen', 'carene', 'enrace'], + 'carer': ['carer', 'crare', 'racer'], + 'carest': ['carest', 'caster', 'recast'], + 'caret': ['caret', + 'carte', + 'cater', + 'crate', + 'creat', + 'creta', + 'react', + 'recta', + 'trace'], + 'caretta': ['caretta', 'teacart', 'tearcat'], + 'carful': ['carful', 'furcal'], + 'carhop': ['carhop', 'paroch'], + 'cariama': ['aramaic', 'cariama'], + 'carian': ['acinar', + 'arnica', + 'canari', + 'carian', + 'carina', + 'crania', + 'narica'], + 'carib': ['baric', 'carib', 'rabic'], + 'cariban': ['arbacin', 'carabin', 'cariban'], + 'caribi': ['cabiri', 'caribi'], + 'carid': ['acrid', 'caird', 'carid', 'darci', 'daric', 'dirca'], + 'carida': ['acarid', 'cardia', 'carida'], + 'caridea': ['arcidae', 'caridea'], + 'caridean': ['caridean', 'dircaean', 'radiance'], + 'caridoid': ['cardioid', 'caridoid'], + 'carina': ['acinar', + 'arnica', + 'canari', + 'carian', + 'carina', + 'crania', + 'narica'], + 'carinal': ['carinal', 'carlina', 'clarain', 'cranial'], + 'carinatae': ['acraniate', 'carinatae'], + 'carinate': ['anaretic', 'arcanite', 'carinate', 'craniate'], + 'carinated': ['carinated', 'eradicant'], + 'cariole': ['calorie', 'cariole'], + 'carious': ['carious', 'curiosa'], + 'carisa': ['carisa', 'sciara'], + 'carissa': ['ascaris', 'carissa'], + 'cark': ['cark', 'rack'], + 'carking': ['arcking', 'carking', 'racking'], + 'carkingly': ['carkingly', 'rackingly'], + 'carless': ['carless', 'classer', 'reclass'], + 'carlet': ['carlet', 'cartel', 'claret', 'rectal', 'talcer'], + 'carlie': ['carlie', 'claire', 'eclair', 'erical'], + 'carlin': ['carlin', 'clarin', 'crinal'], + 'carlina': ['carinal', 'carlina', 'clarain', 'cranial'], + 'carlist': ['carlist', 'clarist'], + 'carlo': ['alcor', 'calor', 'carlo', 'carol', 'claro', 'coral'], + 'carlot': ['carlot', 'crotal'], + 'carlylian': ['ancillary', 'carlylian', 'cranially'], + 'carman': ['carman', 'marcan'], + 'carmel': ['calmer', 'carmel', 'clamer', 'marcel', 'mercal'], + 'carmela': ['cameral', 'caramel', 'carmela', 'ceramal', 'reclama'], + 'carmele': ['carmele', 'cleamer'], + 'carmelite': ['carmelite', 'melicerta'], + 'carmeloite': ['carmeloite', 'ectromelia', 'meteorical'], + 'carmine': ['armenic', 'carmine', 'ceriman', 'crimean', 'mercian'], + 'carminette': ['carminette', 'remittance'], + 'carminite': ['antimeric', 'carminite', 'criminate', 'metrician'], + 'carmot': ['carmot', 'comart'], + 'carnage': ['carnage', 'cranage', 'garance'], + 'carnalite': ['carnalite', 'claretian', 'lacertian', 'nectarial'], + 'carnate': ['carnate', 'cateran'], + 'carnation': ['anarcotin', 'cantorian', 'carnation', 'narcotina'], + 'carnationed': ['carnationed', 'dinoceratan'], + 'carnelian': ['carnelian', 'encranial'], + 'carneol': ['carneol', 'corneal'], + 'carneous': ['carneous', 'nacreous'], + 'carney': ['carney', 'craney'], + 'carnic': ['cancri', 'carnic', 'cranic'], + 'carniolan': ['carniolan', 'nonracial'], + 'carnose': ['carnose', 'coarsen', 'narcose'], + 'carnosity': ['carnosity', 'crayonist'], + 'carnotite': ['carnotite', 'cortinate'], + 'carnous': ['carnous', 'nacrous', 'narcous'], + 'caro': ['acor', 'caro', 'cora', 'orca'], + 'caroa': ['acroa', 'caroa'], + 'carob': ['carbo', 'carob', 'coarb', 'cobra'], + 'caroche': ['caroche', 'coacher', 'recoach'], + 'caroid': ['caroid', 'cordia'], + 'carol': ['alcor', 'calor', 'carlo', 'carol', 'claro', 'coral'], + 'carolan': ['alcoran', 'ancoral', 'carolan'], + 'carole': ['carole', 'coaler', 'coelar', 'oracle', 'recoal'], + 'carolean': ['carolean', 'lecanora'], + 'caroler': ['caroler', 'correal'], + 'caroli': ['caroli', 'corial', 'lorica'], + 'carolin': ['carolin', 'clarion', 'colarin', 'locrian'], + 'carolina': ['carolina', 'conarial'], + 'caroline': ['acrolein', + 'arecolin', + 'caroline', + 'colinear', + 'cornelia', + 'creolian', + 'lonicera'], + 'carolingian': ['carolingian', 'inorganical'], + 'carolus': ['carolus', 'oscular'], + 'carom': ['carom', 'coram', 'macro', 'marco'], + 'carone': ['carone', 'cornea'], + 'caroon': ['caroon', 'corona', 'racoon'], + 'carotenoid': ['carotenoid', 'coronadite', 'decoration'], + 'carotic': ['acrotic', 'carotic'], + 'carotid': ['arctoid', 'carotid', 'dartoic'], + 'carotidean': ['arctoidean', 'carotidean', 'cordaitean', 'dinocerata'], + 'carotin': ['anticor', 'carotin', 'cortina', 'ontaric'], + 'carouse': ['acerous', 'carouse', 'euscaro'], + 'carp': ['carp', 'crap'], + 'carpaine': ['carapine', 'carpaine'], + 'carpel': ['carpel', 'parcel', 'placer'], + 'carpellary': ['carpellary', 'parcellary'], + 'carpellate': ['carpellate', 'parcellate', 'prelacteal'], + 'carpent': ['carpent', 'precant'], + 'carpet': ['carpet', 'peract', 'preact'], + 'carpholite': ['carpholite', 'proethical'], + 'carpid': ['caprid', 'carpid', 'picard'], + 'carpiodes': ['carpiodes', 'scorpidae'], + 'carpocerite': ['carpocerite', 'reciprocate'], + 'carpogonial': ['carpogonial', 'coprolagnia'], + 'carpolite': ['carpolite', 'petricola'], + 'carpolith': ['carpolith', 'politarch', 'trophical'], + 'carposperm': ['carposperm', 'spermocarp'], + 'carrot': ['carrot', 'trocar'], + 'carroter': ['arrector', 'carroter'], + 'carse': ['carse', 'caser', 'ceras', 'scare', 'scrae'], + 'carsmith': ['carsmith', 'chartism'], + 'cartable': ['bracteal', 'cartable'], + 'carte': ['caret', + 'carte', + 'cater', + 'crate', + 'creat', + 'creta', + 'react', + 'recta', + 'trace'], + 'cartel': ['carlet', 'cartel', 'claret', 'rectal', 'talcer'], + 'cartelize': ['cartelize', 'zelatrice'], + 'carter': ['arrect', 'carter', 'crater', 'recart', 'tracer'], + 'cartesian': ['ascertain', 'cartesian', 'cartisane', 'sectarian'], + 'cartesianism': ['cartesianism', 'sectarianism'], + 'cartier': ['cartier', 'cirrate', 'erratic'], + 'cartilage': ['cartilage', 'rectalgia'], + 'cartisane': ['ascertain', 'cartesian', 'cartisane', 'sectarian'], + 'cartist': ['astrict', 'cartist', 'stratic'], + 'carton': ['cantor', 'carton', 'contra'], + 'cartoon': ['cartoon', 'coranto'], + 'cartoonist': ['cartoonist', 'scortation'], + 'carty': ['carty', 'tracy'], + 'carua': ['aruac', 'carua'], + 'carucal': ['accrual', 'carucal'], + 'carucate': ['accurate', 'carucate'], + 'carum': ['carum', 'cumar'], + 'carve': ['carve', 'crave', 'varec'], + 'carvel': ['calver', 'carvel', 'claver'], + 'carven': ['carven', 'cavern', 'craven'], + 'carver': ['carver', 'craver'], + 'carving': ['carving', 'craving'], + 'cary': ['cary', 'racy'], + 'caryl': ['acryl', 'caryl', 'clary'], + 'casabe': ['casabe', 'sabeca'], + 'casal': ['calas', 'casal', 'scala'], + 'cascade': ['cascade', 'saccade'], + 'case': ['case', 'esca'], + 'casebook': ['bookcase', 'casebook'], + 'caseful': ['caseful', 'fucales'], + 'casein': ['casein', 'incase'], + 'casel': ['alces', 'casel', 'scale'], + 'caser': ['carse', 'caser', 'ceras', 'scare', 'scrae'], + 'casern': ['casern', 'rescan'], + 'cashable': ['cashable', 'chasable'], + 'cashel': ['cashel', 'laches', 'sealch'], + 'cask': ['cask', 'sack'], + 'casket': ['casket', 'tesack'], + 'casking': ['casking', 'sacking'], + 'casklike': ['casklike', 'sacklike'], + 'casper': ['casper', 'escarp', 'parsec', 'scrape', 'secpar', 'spacer'], + 'caspian': ['capsian', 'caspian', 'nascapi', 'panisca'], + 'casque': ['casque', 'sacque'], + 'casquet': ['acquest', 'casquet'], + 'casse': ['casse', 'scase'], + 'cassian': ['cassian', 'cassina'], + 'cassina': ['cassian', 'cassina'], + 'cassino': ['caisson', 'cassino'], + 'cassock': ['cassock', 'cossack'], + 'cast': ['acts', 'cast', 'scat'], + 'castalia': ['castalia', 'sacalait'], + 'castalian': ['castalian', 'satanical'], + 'caste': ['caste', 'sceat'], + 'castelet': ['castelet', 'telecast'], + 'caster': ['carest', 'caster', 'recast'], + 'castice': ['ascetic', 'castice', 'siccate'], + 'castle': ['castle', 'sclate'], + 'castoff': ['castoff', 'offcast'], + 'castor': ['arctos', 'castor', 'costar', 'scrota'], + 'castores': ['castores', 'coassert'], + 'castoreum': ['castoreum', 'outscream'], + 'castoridae': ['castoridae', 'cestodaria'], + 'castorin': ['cantoris', 'castorin', 'corsaint'], + 'castra': ['castra', 'tarasc'], + 'casual': ['ascula', 'calusa', 'casual', 'casula', 'causal'], + 'casuality': ['casuality', 'causality'], + 'casually': ['casually', 'causally'], + 'casula': ['ascula', 'calusa', 'casual', 'casula', 'causal'], + 'cat': ['act', 'cat'], + 'catabolin': ['botanical', 'catabolin'], + 'catalan': ['cantala', 'catalan', 'lantaca'], + 'catalanist': ['anastaltic', 'catalanist'], + 'catalase': ['catalase', 'salaceta'], + 'catalinite': ['analcitite', 'catalinite'], + 'catalogue': ['catalogue', 'coagulate'], + 'catalyte': ['catalyte', 'cattleya'], + 'catamenial': ['calamitean', 'catamenial'], + 'catapultier': ['catapultier', 'particulate'], + 'cataria': ['acratia', 'cataria'], + 'catcher': ['catcher', 'recatch'], + 'catchup': ['catchup', 'upcatch'], + 'cate': ['cate', 'teca'], + 'catechin': ['atechnic', 'catechin', 'technica'], + 'catechism': ['catechism', 'schematic'], + 'catechol': ['catechol', 'coachlet'], + 'categoric': ['categoric', 'geocratic'], + 'catella': ['catella', 'lacteal'], + 'catenated': ['catenated', 'decantate'], + 'cater': ['caret', + 'carte', + 'cater', + 'crate', + 'creat', + 'creta', + 'react', + 'recta', + 'trace'], + 'cateran': ['carnate', 'cateran'], + 'caterer': ['caterer', 'recrate', 'retrace', 'terrace'], + 'cateress': ['cateress', 'cerastes'], + 'catfish': ['catfish', 'factish'], + 'cathari': ['cathari', 'chirata', 'cithara'], + 'catharina': ['anthracia', 'antiarcha', 'catharina'], + 'cathartae': ['cathartae', 'tracheata'], + 'cathepsin': ['cathepsin', 'stephanic'], + 'catherine': ['catherine', 'heritance'], + 'catheter': ['catheter', 'charette'], + 'cathine': ['cahnite', 'cathine'], + 'cathinine': ['anchietin', 'cathinine'], + 'cathion': ['cathion', 'chatino'], + 'cathograph': ['cathograph', 'tachograph'], + 'cathole': ['cathole', 'cholate'], + 'cathro': ['cathro', 'orchat'], + 'cathryn': ['cathryn', 'chantry'], + 'cathy': ['cathy', 'cyath', 'yacht'], + 'cation': ['action', 'atonic', 'cation'], + 'cationic': ['aconitic', 'cationic', 'itaconic'], + 'catkin': ['catkin', 'natick'], + 'catlin': ['catlin', 'tincal'], + 'catlinite': ['catlinite', 'intactile'], + 'catmalison': ['catmalison', 'monastical'], + 'catoism': ['atomics', 'catoism', 'cosmati', 'osmatic', 'somatic'], + 'catonian': ['catonian', 'taconian'], + 'catonic': ['cantico', 'catonic', 'taconic'], + 'catonism': ['catonism', 'monastic'], + 'catoptric': ['catoptric', 'protactic'], + 'catpipe': ['apeptic', 'catpipe'], + 'catstone': ['catstone', 'constate'], + 'catsup': ['catsup', 'upcast'], + 'cattail': ['attical', 'cattail'], + 'catti': ['attic', 'catti', 'tacit'], + 'cattily': ['cattily', 'tacitly'], + 'cattiness': ['cattiness', 'tacitness'], + 'cattle': ['cattle', 'tectal'], + 'cattleya': ['catalyte', 'cattleya'], + 'catvine': ['catvine', 'venatic'], + 'caucho': ['cachou', 'caucho'], + 'cauda': ['cadua', 'cauda'], + 'caudle': ['caudle', 'cedula', 'claude'], + 'caudodorsal': ['caudodorsal', 'dorsocaudal'], + 'caudofemoral': ['caudofemoral', 'femorocaudal'], + 'caudolateral': ['caudolateral', 'laterocaudal'], + 'caul': ['caul', 'ucal'], + 'cauld': ['cauld', 'ducal'], + 'caules': ['caelus', 'caules', 'clause'], + 'cauliform': ['cauliform', 'formulaic', 'fumarolic'], + 'caulinar': ['anicular', 'caulinar'], + 'caulis': ['caulis', 'clusia', 'sicula'], + 'caulite': ['aleutic', 'auletic', 'caulite', 'lutecia'], + 'caulome': ['caulome', 'leucoma'], + 'caulomic': ['caulomic', 'coumalic'], + 'caulote': ['caulote', 'colutea', 'oculate'], + 'caunch': ['caunch', 'cuchan'], + 'caurale': ['arcuale', 'caurale'], + 'causal': ['ascula', 'calusa', 'casual', 'casula', 'causal'], + 'causality': ['casuality', 'causality'], + 'causally': ['casually', 'causally'], + 'cause': ['cause', 'sauce'], + 'causeless': ['causeless', 'sauceless'], + 'causer': ['causer', 'saucer'], + 'causey': ['causey', 'cayuse'], + 'cautelous': ['cautelous', 'lutaceous'], + 'cauter': ['acture', 'cauter', 'curate'], + 'caution': ['auction', 'caution'], + 'cautionary': ['auctionary', 'cautionary'], + 'cautioner': ['cautioner', 'cointreau'], + 'caval': ['caval', 'clava'], + 'cavalry': ['calvary', 'cavalry'], + 'cavate': ['cavate', 'caveat', 'vacate'], + 'caveat': ['cavate', 'caveat', 'vacate'], + 'cavel': ['calve', 'cavel', 'clave'], + 'cavern': ['carven', 'cavern', 'craven'], + 'cavil': ['cavil', 'lavic'], + 'caviler': ['caliver', 'caviler', 'claiver', 'clavier', 'valeric', 'velaric'], + 'cavillation': ['cavillation', 'vacillation'], + 'cavitate': ['activate', 'cavitate'], + 'cavitation': ['activation', 'cavitation'], + 'cavitied': ['cavitied', 'vaticide'], + 'caw': ['caw', 'wac'], + 'cawk': ['cawk', 'wack'], + 'cawky': ['cawky', 'wacky'], + 'cayapa': ['cayapa', 'pacaya'], + 'cayuse': ['causey', 'cayuse'], + 'ccoya': ['accoy', 'ccoya'], + 'ceanothus': ['ceanothus', 'oecanthus'], + 'cearin': ['acerin', 'cearin'], + 'cebalrai': ['balearic', 'cebalrai'], + 'ceboid': ['bodice', 'ceboid'], + 'cebur': ['bruce', 'cebur', 'cuber'], + 'cecily': ['cecily', 'cicely'], + 'cedar': ['acred', 'cader', 'cadre', 'cedar'], + 'cedarn': ['cedarn', 'dancer', 'nacred'], + 'cedent': ['cedent', 'decent'], + 'ceder': ['ceder', 'cedre', 'cered', 'creed'], + 'cedrat': ['cedrat', 'decart', 'redact'], + 'cedrate': ['cedrate', 'cerated'], + 'cedre': ['ceder', 'cedre', 'cered', 'creed'], + 'cedrela': ['cedrela', 'creedal', 'declare'], + 'cedrin': ['cedrin', 'cinder', 'crined'], + 'cedriret': ['cedriret', 'directer', 'recredit', 'redirect'], + 'cedrol': ['cedrol', 'colder', 'cordel'], + 'cedron': ['cedron', 'conred'], + 'cedrus': ['cedrus', 'cursed'], + 'cedry': ['cedry', 'decry'], + 'cedula': ['caudle', 'cedula', 'claude'], + 'ceilinged': ['ceilinged', 'diligence'], + 'celadonite': ['caledonite', 'celadonite'], + 'celandine': ['celandine', 'decennial'], + 'celarent': ['celarent', 'centrale', 'enclaret'], + 'celature': ['celature', 'ulcerate'], + 'celebrate': ['celebrate', 'erectable'], + 'celebrated': ['braceleted', 'celebrated'], + 'celemin': ['celemin', 'melenic'], + 'celia': ['alice', 'celia', 'ileac'], + 'cellar': ['caller', 'cellar', 'recall'], + 'cellaret': ['allecret', 'cellaret'], + 'celloid': ['celloid', 'codille', 'collide', 'collied'], + 'celloidin': ['celloidin', 'collidine', 'decillion'], + 'celsian': ['celsian', 'escalin', 'sanicle', 'secalin'], + 'celtiberi': ['celtiberi', 'terebilic'], + 'celtis': ['celtis', 'clites'], + 'cembalist': ['blastemic', 'cembalist'], + 'cementer': ['cementer', 'cerement', 'recement'], + 'cendre': ['cendre', 'decern'], + 'cenosity': ['cenosity', 'cytosine'], + 'cense': ['cense', 'scene', 'sence'], + 'censer': ['censer', 'scerne', 'screen', 'secern'], + 'censerless': ['censerless', 'screenless'], + 'censorial': ['censorial', 'sarcoline'], + 'censual': ['censual', 'unscale'], + 'censureless': ['censureless', 'recluseness'], + 'cental': ['cantle', 'cental', 'lancet', 'tancel'], + 'centare': ['centare', 'crenate'], + 'centaur': ['centaur', 'untrace'], + 'centauri': ['anuretic', 'centauri', 'centuria', 'teucrian'], + 'centaury': ['centaury', 'cyanuret'], + 'centena': ['canteen', 'centena'], + 'centenar': ['centenar', 'entrance'], + 'centenier': ['centenier', 'renitence'], + 'center': ['center', 'recent', 'tenrec'], + 'centered': ['centered', 'decenter', 'decentre', 'recedent'], + 'centerer': ['centerer', 'recenter', 'recentre', 'terrence'], + 'centermost': ['centermost', 'escortment'], + 'centesimal': ['centesimal', 'lemniscate'], + 'centiar': ['centiar', 'certain', 'citrean', 'nacrite', 'nectria'], + 'centiare': ['aneretic', 'centiare', 'creatine', 'increate', 'iterance'], + 'centibar': ['bacterin', 'centibar'], + 'centimeter': ['centimeter', 'recitement', 'remittence'], + 'centimo': ['centimo', 'entomic', 'tecomin'], + 'centimolar': ['centimolar', 'melicraton'], + 'centinormal': ['centinormal', 'conterminal', 'nonmetrical'], + 'cento': ['cento', 'conte', 'tecon'], + 'centrad': ['cantred', 'centrad', 'tranced'], + 'centrale': ['celarent', 'centrale', 'enclaret'], + 'centranth': ['centranth', 'trenchant'], + 'centraxonia': ['centraxonia', 'excarnation'], + 'centriole': ['centriole', 'electrion', 'relection'], + 'centrodorsal': ['centrodorsal', 'dorsocentral'], + 'centroid': ['centroid', 'doctrine'], + 'centrolineal': ['centrolineal', 'crenellation'], + 'centunculus': ['centunculus', 'unsucculent'], + 'centuria': ['anuretic', 'centauri', 'centuria', 'teucrian'], + 'centurial': ['centurial', 'lucretian', 'ultranice'], + 'centuried': ['centuried', 'unrecited'], + 'centurion': ['centurion', 'continuer', 'cornutine'], + 'cepa': ['cape', 'cepa', 'pace'], + 'cephalin': ['alphenic', 'cephalin'], + 'cephalina': ['cephalina', 'epilachna'], + 'cephaloid': ['cephaloid', 'pholcidae'], + 'cephalomeningitis': ['cephalomeningitis', 'meningocephalitis'], + 'cephalometric': ['cephalometric', 'petrochemical'], + 'cephalopodous': ['cephalopodous', 'podocephalous'], + 'cephas': ['cephas', 'pesach'], + 'cepolidae': ['adipocele', 'cepolidae', 'ploceidae'], + 'ceps': ['ceps', 'spec'], + 'ceptor': ['ceptor', 'copter'], + 'ceral': ['ceral', 'clare', 'clear', 'lacer'], + 'ceramal': ['cameral', 'caramel', 'carmela', 'ceramal', 'reclama'], + 'ceramic': ['ceramic', 'racemic'], + 'ceramist': ['camerist', 'ceramist', 'matrices'], + 'ceras': ['carse', 'caser', 'ceras', 'scare', 'scrae'], + 'cerasein': ['cerasein', 'increase'], + 'cerasin': ['arsenic', 'cerasin', 'sarcine'], + 'cerastes': ['cateress', 'cerastes'], + 'cerata': ['arcate', 'cerata'], + 'cerate': ['cerate', 'create', 'ecarte'], + 'cerated': ['cedrate', 'cerated'], + 'ceratiid': ['ceratiid', 'raticide'], + 'ceration': ['actioner', 'anerotic', 'ceration', 'creation', 'reaction'], + 'ceratium': ['ceratium', 'muricate'], + 'ceratonia': ['canariote', 'ceratonia'], + 'ceratosa': ['ceratosa', 'ostracea'], + 'ceratothecal': ['ceratothecal', 'chloracetate'], + 'cerberic': ['cerberic', 'cerebric'], + 'cercal': ['carcel', 'cercal'], + 'cerci': ['cerci', 'ceric', 'cicer', 'circe'], + 'cercus': ['cercus', 'cruces'], + 'cerdonian': ['cerdonian', 'ordinance'], + 'cere': ['cere', 'cree'], + 'cereal': ['alerce', 'cereal', 'relace'], + 'cerealin': ['cerealin', 'cinereal', 'reliance'], + 'cerebra': ['cerebra', 'rebrace'], + 'cerebric': ['cerberic', 'cerebric'], + 'cerebroma': ['cerebroma', 'embraceor'], + 'cerebromeningitis': ['cerebromeningitis', 'meningocerebritis'], + 'cerebrum': ['cerebrum', 'cumberer'], + 'cered': ['ceder', 'cedre', 'cered', 'creed'], + 'cerement': ['cementer', 'cerement', 'recement'], + 'ceremonial': ['ceremonial', 'neomiracle'], + 'ceresin': ['ceresin', 'sincere'], + 'cereus': ['cereus', 'ceruse', 'recuse', 'rescue', 'secure'], + 'cerevis': ['cerevis', 'scrieve', 'service'], + 'ceria': ['acier', 'aeric', 'ceria', 'erica'], + 'ceric': ['cerci', 'ceric', 'cicer', 'circe'], + 'ceride': ['ceride', 'deicer'], + 'cerillo': ['cerillo', 'colleri', 'collier'], + 'ceriman': ['armenic', 'carmine', 'ceriman', 'crimean', 'mercian'], + 'cerin': ['cerin', 'crine'], + 'cerion': ['cerion', 'coiner', 'neroic', 'orcein', 'recoin'], + 'ceriops': ['ceriops', 'persico'], + 'cerite': ['cerite', 'certie', 'recite', 'tierce'], + 'cerium': ['cerium', 'uremic'], + 'cernuous': ['cernuous', 'coenurus'], + 'cero': ['cero', 'core'], + 'ceroma': ['ceroma', 'corema'], + 'ceroplast': ['ceroplast', 'precostal'], + 'ceroplastic': ['ceroplastic', 'cleistocarp', 'coreplastic'], + 'ceroplasty': ['ceroplasty', 'coreplasty'], + 'cerotic': ['cerotic', 'orectic'], + 'cerotin': ['cerotin', 'cointer', 'cotrine', 'cretion', 'noticer', 'rection'], + 'cerous': ['cerous', 'course', 'crouse', 'source'], + 'certain': ['centiar', 'certain', 'citrean', 'nacrite', 'nectria'], + 'certhia': ['certhia', 'rhaetic', 'theriac'], + 'certie': ['cerite', 'certie', 'recite', 'tierce'], + 'certifiable': ['certifiable', 'rectifiable'], + 'certification': ['certification', 'cretification', 'rectification'], + 'certificative': ['certificative', 'rectificative'], + 'certificator': ['certificator', 'rectificator'], + 'certificatory': ['certificatory', 'rectificatory'], + 'certified': ['certified', 'rectified'], + 'certifier': ['certifier', 'rectifier'], + 'certify': ['certify', 'cretify', 'rectify'], + 'certis': ['certis', 'steric'], + 'certitude': ['certitude', 'rectitude'], + 'certosina': ['atroscine', 'certosina', 'ostracine', 'tinoceras', 'tricosane'], + 'certosino': ['certosino', 'cortisone', 'socotrine'], + 'cerulean': ['cerulean', 'laurence'], + 'ceruminal': ['ceruminal', 'melanuric', 'numerical'], + 'ceruse': ['cereus', 'ceruse', 'recuse', 'rescue', 'secure'], + 'cervicobuccal': ['buccocervical', 'cervicobuccal'], + 'cervicodorsal': ['cervicodorsal', 'dorsocervical'], + 'cervicodynia': ['cervicodynia', 'corycavidine'], + 'cervicofacial': ['cervicofacial', 'faciocervical'], + 'cervicolabial': ['cervicolabial', 'labiocervical'], + 'cervicovesical': ['cervicovesical', 'vesicocervical'], + 'cervoid': ['cervoid', 'divorce'], + 'cervuline': ['cervuline', 'virulence'], + 'ceryl': ['ceryl', 'clyer'], + 'cerynean': ['ancyrene', 'cerynean'], + 'cesare': ['cesare', 'crease', 'recase', 'searce'], + 'cesarolite': ['cesarolite', 'esoterical'], + 'cesium': ['cesium', 'miscue'], + 'cesser': ['cesser', 'recess'], + 'cession': ['cession', 'oscines'], + 'cessor': ['cessor', 'crosse', 'scorse'], + 'cest': ['cest', 'sect'], + 'cestodaria': ['castoridae', 'cestodaria'], + 'cestrian': ['canister', 'cestrian', 'cisterna', 'irascent'], + 'cetane': ['cetane', 'tenace'], + 'cetene': ['cetene', 'ectene'], + 'ceti': ['ceti', 'cite', 'tice'], + 'cetid': ['cetid', 'edict'], + 'cetomorphic': ['cetomorphic', 'chemotropic', 'ectomorphic'], + 'cetonia': ['acetoin', 'aconite', 'anoetic', 'antoeci', 'cetonia'], + 'cetonian': ['cetonian', 'enaction'], + 'cetorhinus': ['cetorhinus', 'urosthenic'], + 'cetus': ['cetus', 'scute'], + 'cevenol': ['cevenol', 'clovene'], + 'cevine': ['cevine', 'evince', 'venice'], + 'cha': ['ach', 'cha'], + 'chab': ['bach', 'chab'], + 'chabouk': ['chabouk', 'chakobu'], + 'chaco': ['chaco', 'choca', 'coach'], + 'chacte': ['cachet', 'chacte'], + 'chaenolobus': ['chaenolobus', 'unchoosable'], + 'chaeta': ['achate', 'chaeta'], + 'chaetites': ['aesthetic', 'chaetites'], + 'chaetognath': ['chaetognath', 'gnathotheca'], + 'chaetopod': ['chaetopod', 'podotheca'], + 'chafer': ['chafer', 'frache'], + 'chagan': ['chagan', 'changa'], + 'chagrin': ['arching', 'chagrin'], + 'chai': ['chai', 'chia'], + 'chain': ['chain', 'chian', 'china'], + 'chained': ['chained', 'echidna'], + 'chainer': ['chainer', 'enchair', 'rechain'], + 'chainlet': ['chainlet', 'ethnical'], + 'chainman': ['chainman', 'chinaman'], + 'chair': ['chair', 'chria'], + 'chairer': ['chairer', 'charier'], + 'chait': ['aitch', 'chait', 'chati', 'chita', 'taich', 'tchai'], + 'chakar': ['chakar', 'chakra', 'charka'], + 'chakari': ['chakari', 'chikara', 'kachari'], + 'chakobu': ['chabouk', 'chakobu'], + 'chakra': ['chakar', 'chakra', 'charka'], + 'chalcon': ['chalcon', 'clochan', 'conchal'], + 'chalcosine': ['ascolichen', 'chalcosine'], + 'chaldron': ['chaldron', 'chlordan', 'chondral'], + 'chalet': ['achtel', 'chalet', 'thecal', 'thecla'], + 'chalker': ['chalker', 'hackler'], + 'chalky': ['chalky', 'hackly'], + 'challie': ['alichel', 'challie', 'helical'], + 'chalmer': ['chalmer', 'charmel'], + 'chalon': ['chalon', 'lochan'], + 'chalone': ['chalone', 'cholane'], + 'chalta': ['caltha', 'chalta'], + 'chamal': ['almach', 'chamal'], + 'chamar': ['chamar', 'machar'], + 'chamber': ['becharm', 'brecham', 'chamber'], + 'chamberer': ['chamberer', 'rechamber'], + 'chamian': ['chamian', 'mahican'], + 'chamisal': ['chamisal', 'chiasmal'], + 'chamiso': ['chamiso', 'chamois'], + 'chamite': ['chamite', 'hematic'], + 'chamois': ['chamiso', 'chamois'], + 'champa': ['champa', 'mapach'], + 'champain': ['champain', 'chinampa'], + 'chancer': ['chancer', 'chancre'], + 'chanchito': ['chanchito', 'nachitoch'], + 'chanco': ['chanco', 'concha'], + 'chancre': ['chancer', 'chancre'], + 'chandu': ['chandu', 'daunch'], + 'chane': ['achen', 'chane', 'chena', 'hance'], + 'chang': ['chang', 'ganch'], + 'changa': ['chagan', 'changa'], + 'changer': ['changer', 'genarch'], + 'chanidae': ['chanidae', 'hacienda'], + 'channeler': ['channeler', 'encharnel'], + 'chanst': ['chanst', 'snatch', 'stanch'], + 'chant': ['chant', 'natch'], + 'chanter': ['chanter', 'rechant'], + 'chantey': ['atechny', 'chantey'], + 'chantry': ['cathryn', 'chantry'], + 'chaos': ['chaos', 'oshac'], + 'chaotical': ['acatholic', 'chaotical'], + 'chap': ['caph', 'chap'], + 'chaparro': ['chaparro', 'parachor'], + 'chape': ['chape', 'cheap', 'peach'], + 'chaped': ['chaped', 'phecda'], + 'chapel': ['chapel', 'lepcha', 'pleach'], + 'chapelet': ['chapelet', 'peachlet'], + 'chapelmaster': ['chapelmaster', 'spermathecal'], + 'chaperno': ['canephor', 'chaperno', 'chaperon'], + 'chaperon': ['canephor', 'chaperno', 'chaperon'], + 'chaperone': ['canephore', 'chaperone'], + 'chaperonless': ['chaperonless', 'proseneschal'], + 'chapin': ['apinch', 'chapin', 'phanic'], + 'chapiter': ['chapiter', 'phreatic'], + 'chaps': ['chaps', 'pasch'], + 'chapt': ['chapt', 'pacht', 'patch'], + 'chapter': ['chapter', 'patcher', 'repatch'], + 'char': ['arch', 'char', 'rach'], + 'chara': ['achar', 'chara'], + 'charac': ['charac', 'charca'], + 'characin': ['anarchic', 'characin'], + 'characinoid': ['arachidonic', 'characinoid'], + 'charadrii': ['charadrii', 'richardia'], + 'charas': ['achras', 'charas'], + 'charbon': ['brochan', 'charbon'], + 'charca': ['charac', 'charca'], + 'chare': ['acher', 'arche', 'chare', 'chera', 'rache', 'reach'], + 'charer': ['archer', 'charer', 'rechar'], + 'charette': ['catheter', 'charette'], + 'charge': ['charge', 'creagh'], + 'charier': ['chairer', 'charier'], + 'chariot': ['chariot', 'haricot'], + 'charioted': ['charioted', 'trochidae'], + 'chariotman': ['achromatin', 'chariotman', 'machinator'], + 'charism': ['charism', 'chrisma'], + 'charisma': ['archaism', 'charisma'], + 'chark': ['chark', 'karch'], + 'charka': ['chakar', 'chakra', 'charka'], + 'charlatanistic': ['antarchistical', 'charlatanistic'], + 'charleen': ['charleen', 'charlene'], + 'charlene': ['charleen', 'charlene'], + 'charles': ['charles', 'clasher'], + 'charm': ['charm', 'march'], + 'charmel': ['chalmer', 'charmel'], + 'charmer': ['charmer', 'marcher', 'remarch'], + 'charnel': ['charnel', 'larchen'], + 'charon': ['anchor', 'archon', 'charon', 'rancho'], + 'charpoy': ['charpoy', 'corypha'], + 'chart': ['chart', 'ratch'], + 'charter': ['charter', 'ratcher'], + 'charterer': ['charterer', 'recharter'], + 'charting': ['charting', 'ratching'], + 'chartism': ['carsmith', 'chartism'], + 'charuk': ['charuk', 'chukar'], + 'chary': ['archy', 'chary'], + 'chasable': ['cashable', 'chasable'], + 'chaser': ['arches', 'chaser', 'eschar', 'recash', 'search'], + 'chasma': ['ascham', 'chasma'], + 'chaste': ['chaste', 'sachet', 'scathe', 'scheat'], + 'chasten': ['chasten', 'sanetch'], + 'chastener': ['chastener', 'rechasten'], + 'chastity': ['chastity', 'yachtist'], + 'chasuble': ['chasuble', 'subchela'], + 'chat': ['chat', 'tach'], + 'chatelainry': ['chatelainry', 'trachylinae'], + 'chati': ['aitch', 'chait', 'chati', 'chita', 'taich', 'tchai'], + 'chatino': ['cathion', 'chatino'], + 'chatsome': ['chatsome', 'moschate'], + 'chatta': ['attach', 'chatta'], + 'chattation': ['chattation', 'thanatotic'], + 'chattel': ['chattel', 'latchet'], + 'chatter': ['chatter', 'ratchet'], + 'chattery': ['chattery', 'ratchety', 'trachyte'], + 'chatti': ['chatti', 'hattic'], + 'chatty': ['chatty', 'tatchy'], + 'chatwood': ['chatwood', 'woodchat'], + 'chaute': ['chaute', 'chueta'], + 'chawan': ['chawan', 'chwana', 'wachna'], + 'chawer': ['chawer', 'rechaw'], + 'chawk': ['chawk', 'whack'], + 'chay': ['achy', 'chay'], + 'cheap': ['chape', 'cheap', 'peach'], + 'cheapen': ['cheapen', 'peachen'], + 'cheapery': ['cheapery', 'peachery'], + 'cheapside': ['cheapside', 'sphecidae'], + 'cheat': ['cheat', 'tache', 'teach', 'theca'], + 'cheatable': ['cheatable', 'teachable'], + 'cheatableness': ['cheatableness', 'teachableness'], + 'cheater': ['cheater', 'hectare', 'recheat', 'reteach', 'teacher'], + 'cheatery': ['cheatery', 'cytherea', 'teachery'], + 'cheating': ['cheating', 'teaching'], + 'cheatingly': ['cheatingly', 'teachingly'], + 'cheatrie': ['cheatrie', 'hetaeric'], + 'checker': ['checker', 'recheck'], + 'chee': ['chee', 'eche'], + 'cheek': ['cheek', 'cheke', 'keech'], + 'cheerer': ['cheerer', 'recheer'], + 'cheerly': ['cheerly', 'lechery'], + 'cheery': ['cheery', 'reechy'], + 'cheet': ['cheet', 'hecte'], + 'cheir': ['cheir', 'rheic'], + 'cheiropodist': ['cheiropodist', 'coeditorship'], + 'cheka': ['cheka', 'keach'], + 'cheke': ['cheek', 'cheke', 'keech'], + 'chela': ['chela', 'lache', 'leach'], + 'chelide': ['chelide', 'heliced'], + 'chelidon': ['chelidon', 'chelonid', 'delichon'], + 'chelidonate': ['chelidonate', 'endothecial'], + 'chelodina': ['chelodina', 'hedonical'], + 'chelone': ['chelone', 'echelon'], + 'chelonid': ['chelidon', 'chelonid', 'delichon'], + 'cheloniid': ['cheloniid', 'lichenoid'], + 'chemiatrist': ['chemiatrist', 'chrismatite', 'theatricism'], + 'chemical': ['alchemic', 'chemical'], + 'chemicomechanical': ['chemicomechanical', 'mechanicochemical'], + 'chemicophysical': ['chemicophysical', 'physicochemical'], + 'chemicovital': ['chemicovital', 'vitochemical'], + 'chemiloon': ['chemiloon', 'homocline'], + 'chemotaxy': ['chemotaxy', 'myxotheca'], + 'chemotropic': ['cetomorphic', 'chemotropic', 'ectomorphic'], + 'chena': ['achen', 'chane', 'chena', 'hance'], + 'chenica': ['chenica', 'chicane'], + 'chenille': ['chenille', 'hellenic'], + 'chenopod': ['chenopod', 'ponchoed'], + 'chera': ['acher', 'arche', 'chare', 'chera', 'rache', 'reach'], + 'chermes': ['chermes', 'schemer'], + 'chert': ['chert', 'retch'], + 'cherte': ['cherte', 'etcher'], + 'chervil': ['chervil', 'chilver'], + 'cheson': ['cheson', 'chosen', 'schone'], + 'chest': ['chest', 'stech'], + 'chestily': ['chestily', 'lecythis'], + 'chesty': ['chesty', 'scythe'], + 'chet': ['chet', 'etch', 'tche', 'tech'], + 'chettik': ['chettik', 'thicket'], + 'chetty': ['chetty', 'tetchy'], + 'chewer': ['chewer', 'rechew'], + 'chewink': ['chewink', 'whicken'], + 'chi': ['chi', 'hic', 'ich'], + 'chia': ['chai', 'chia'], + 'chiam': ['chiam', 'machi', 'micah'], + 'chian': ['chain', 'chian', 'china'], + 'chiasmal': ['chamisal', 'chiasmal'], + 'chiastolite': ['chiastolite', 'heliostatic'], + 'chicane': ['chenica', 'chicane'], + 'chicle': ['chicle', 'cliche'], + 'chid': ['chid', 'dich'], + 'chider': ['chider', 'herdic'], + 'chidra': ['chidra', 'diarch'], + 'chief': ['chief', 'fiche'], + 'chield': ['chield', 'childe'], + 'chien': ['chien', 'chine', 'niche'], + 'chikara': ['chakari', 'chikara', 'kachari'], + 'chil': ['chil', 'lich'], + 'childe': ['chield', 'childe'], + 'chilean': ['chilean', 'echinal', 'nichael'], + 'chili': ['chili', 'lichi'], + 'chiliasm': ['chiliasm', 'hilasmic', 'machilis'], + 'chilla': ['achill', 'cahill', 'chilla'], + 'chiloma': ['chiloma', 'malicho'], + 'chilopod': ['chilopod', 'pholcoid'], + 'chilopoda': ['chilopoda', 'haplodoci'], + 'chilostome': ['chilostome', 'schooltime'], + 'chilver': ['chervil', 'chilver'], + 'chimane': ['chimane', 'machine'], + 'chime': ['chime', 'hemic', 'miche'], + 'chimer': ['chimer', 'mechir', 'micher'], + 'chimera': ['chimera', 'hermaic'], + 'chimney': ['chimney', 'hymenic'], + 'chimu': ['chimu', 'humic'], + 'chin': ['chin', 'inch'], + 'china': ['chain', 'chian', 'china'], + 'chinaman': ['chainman', 'chinaman'], + 'chinampa': ['champain', 'chinampa'], + 'chinanta': ['acanthin', 'chinanta'], + 'chinar': ['chinar', 'inarch'], + 'chine': ['chien', 'chine', 'niche'], + 'chined': ['chined', 'inched'], + 'chink': ['chink', 'kinch'], + 'chinkle': ['chinkle', 'kelchin'], + 'chinks': ['chinks', 'skinch'], + 'chinoa': ['chinoa', 'noahic'], + 'chinotti': ['chinotti', 'tithonic'], + 'chint': ['chint', 'nitch'], + 'chiolite': ['chiolite', 'eolithic'], + 'chionididae': ['chionididae', 'onchidiidae'], + 'chiral': ['archil', 'chiral'], + 'chirapsia': ['chirapsia', 'pharisaic'], + 'chirata': ['cathari', 'chirata', 'cithara'], + 'chiro': ['chiro', 'choir', 'ichor'], + 'chiromancist': ['chiromancist', 'monarchistic'], + 'chiromant': ['chiromant', 'chromatin'], + 'chiromantic': ['chiromantic', 'chromatinic'], + 'chiromantis': ['anchoritism', 'chiromantis', 'chrismation', 'harmonistic'], + 'chirometer': ['chirometer', 'rheometric'], + 'chiroplasty': ['chiroplasty', 'polyarchist'], + 'chiropter': ['chiropter', 'peritroch'], + 'chirosophist': ['chirosophist', 'opisthorchis'], + 'chirotes': ['chirotes', 'theorics'], + 'chirotype': ['chirotype', 'hypocrite'], + 'chirp': ['chirp', 'prich'], + 'chirper': ['chirper', 'prerich'], + 'chiseler': ['chiseler', 'rechisel'], + 'chit': ['chit', 'itch', 'tchi'], + 'chita': ['aitch', 'chait', 'chati', 'chita', 'taich', 'tchai'], + 'chital': ['chital', 'claith'], + 'chitinoid': ['chitinoid', 'dithionic'], + 'chitosan': ['atchison', 'chitosan'], + 'chitose': ['chitose', 'echoist'], + 'chloe': ['chloe', 'choel'], + 'chloracetate': ['ceratothecal', 'chloracetate'], + 'chloranthy': ['chloranthy', 'rhynchotal'], + 'chlorate': ['chlorate', 'trochlea'], + 'chlordan': ['chaldron', 'chlordan', 'chondral'], + 'chlore': ['chlore', 'choler', 'orchel'], + 'chloremia': ['chloremia', 'homerical'], + 'chlorinate': ['chlorinate', 'ectorhinal', 'tornachile'], + 'chlorite': ['chlorite', 'clothier'], + 'chloritic': ['chloritic', 'trochilic'], + 'chloroamine': ['chloroamine', 'melanochroi'], + 'chloroanaemia': ['aeolharmonica', 'chloroanaemia'], + 'chloroanemia': ['chloroanemia', 'choleromania'], + 'chloroiodide': ['chloroiodide', 'iodochloride'], + 'chloroplatinic': ['chloroplatinic', 'platinochloric'], + 'cho': ['cho', 'och'], + 'choana': ['aonach', 'choana'], + 'choca': ['chaco', 'choca', 'coach'], + 'choco': ['choco', 'hocco'], + 'choel': ['chloe', 'choel'], + 'choenix': ['choenix', 'hexonic'], + 'choes': ['choes', 'chose'], + 'choiak': ['choiak', 'kochia'], + 'choice': ['choice', 'echoic'], + 'choil': ['choil', 'choli', 'olchi'], + 'choir': ['chiro', 'choir', 'ichor'], + 'choirman': ['choirman', 'harmonic', 'omniarch'], + 'choker': ['choker', 'hocker'], + 'choky': ['choky', 'hocky'], + 'chol': ['chol', 'loch'], + 'chola': ['chola', 'loach', 'olcha'], + 'cholane': ['chalone', 'cholane'], + 'cholangioitis': ['angiocholitis', 'cholangioitis'], + 'cholanic': ['cholanic', 'colchian'], + 'cholate': ['cathole', 'cholate'], + 'cholecystoduodenostomy': ['cholecystoduodenostomy', 'duodenocholecystostomy'], + 'choler': ['chlore', 'choler', 'orchel'], + 'cholera': ['cholera', 'choreal'], + 'cholerine': ['cholerine', 'rhinocele'], + 'choleromania': ['chloroanemia', 'choleromania'], + 'cholesteremia': ['cholesteremia', 'heteroecismal'], + 'choli': ['choil', 'choli', 'olchi'], + 'choline': ['choline', 'helicon'], + 'cholo': ['cholo', 'cohol'], + 'chondral': ['chaldron', 'chlordan', 'chondral'], + 'chondrite': ['chondrite', 'threnodic'], + 'chondroadenoma': ['adenochondroma', 'chondroadenoma'], + 'chondroangioma': ['angiochondroma', 'chondroangioma'], + 'chondroarthritis': ['arthrochondritis', 'chondroarthritis'], + 'chondrocostal': ['chondrocostal', 'costochondral'], + 'chondrofibroma': ['chondrofibroma', 'fibrochondroma'], + 'chondrolipoma': ['chondrolipoma', 'lipochondroma'], + 'chondromyxoma': ['chondromyxoma', 'myxochondroma'], + 'chondromyxosarcoma': ['chondromyxosarcoma', 'myxochondrosarcoma'], + 'choop': ['choop', 'pooch'], + 'chopa': ['chopa', 'phoca', 'poach'], + 'chopin': ['chopin', 'phonic'], + 'chopine': ['chopine', 'phocine'], + 'chora': ['achor', 'chora', 'corah', 'orach', 'roach'], + 'choral': ['choral', 'lorcha'], + 'chorasmian': ['anachorism', 'chorasmian', 'maraschino'], + 'chordal': ['chordal', 'dorlach'], + 'chorditis': ['chorditis', 'orchidist'], + 'chordotonal': ['chordotonal', 'notochordal'], + 'chore': ['chore', 'ocher'], + 'chorea': ['chorea', 'ochrea', 'rochea'], + 'choreal': ['cholera', 'choreal'], + 'choree': ['choree', 'cohere', 'echoer'], + 'choreoid': ['choreoid', 'ochidore'], + 'choreus': ['choreus', 'chouser', 'rhoecus'], + 'choric': ['choric', 'orchic'], + 'choriocele': ['choriocele', 'orchiocele'], + 'chorioidoretinitis': ['chorioidoretinitis', 'retinochorioiditis'], + 'chorism': ['chorism', 'chrisom'], + 'chorist': ['chorist', 'ostrich'], + 'choristate': ['choristate', 'rheostatic'], + 'chorization': ['chorization', 'rhizoctonia', 'zonotrichia'], + 'choroid': ['choroid', 'ochroid'], + 'chort': ['chort', 'rotch', 'torch'], + 'chorten': ['chorten', 'notcher'], + 'chorti': ['chorti', 'orthic', 'thoric', 'trochi'], + 'chose': ['choes', 'chose'], + 'chosen': ['cheson', 'chosen', 'schone'], + 'chou': ['chou', 'ouch'], + 'chough': ['chough', 'hughoc'], + 'choup': ['choup', 'pouch'], + 'chous': ['chous', 'hocus'], + 'chouser': ['choreus', 'chouser', 'rhoecus'], + 'chowder': ['chowder', 'cowherd'], + 'chria': ['chair', 'chria'], + 'chrism': ['chrism', 'smirch'], + 'chrisma': ['charism', 'chrisma'], + 'chrismation': ['anchoritism', 'chiromantis', 'chrismation', 'harmonistic'], + 'chrismatite': ['chemiatrist', 'chrismatite', 'theatricism'], + 'chrisom': ['chorism', 'chrisom'], + 'christ': ['christ', 'strich'], + 'christen': ['christen', 'snitcher'], + 'christener': ['christener', 'rechristen'], + 'christian': ['christian', 'christina'], + 'christiana': ['arachnitis', 'christiana'], + 'christina': ['christian', 'christina'], + 'christophe': ['christophe', 'hectorship'], + 'chromatician': ['achromatinic', 'chromatician'], + 'chromatid': ['chromatid', 'dichromat'], + 'chromatin': ['chiromant', 'chromatin'], + 'chromatinic': ['chiromantic', 'chromatinic'], + 'chromatocyte': ['chromatocyte', 'thoracectomy'], + 'chromatoid': ['chromatoid', 'tichodroma'], + 'chromatone': ['chromatone', 'enomotarch'], + 'chromid': ['chromid', 'richdom'], + 'chromidae': ['archidome', 'chromidae'], + 'chromite': ['chromite', 'trichome'], + 'chromocyte': ['chromocyte', 'cytochrome'], + 'chromolithography': ['chromolithography', 'lithochromography'], + 'chromophotography': ['chromophotography', 'photochromography'], + 'chromophotolithograph': ['chromophotolithograph', 'photochromolithograph'], + 'chromopsia': ['chromopsia', 'isocamphor'], + 'chromotype': ['chromotype', 'cormophyte', 'ectomorphy'], + 'chromotypic': ['chromotypic', 'cormophytic', 'mycotrophic'], + 'chronophotograph': ['chronophotograph', 'photochronograph'], + 'chronophotographic': ['chronophotographic', 'photochronographic'], + 'chronophotography': ['chronophotography', 'photochronography'], + 'chrysography': ['chrysography', 'psychorrhagy'], + 'chrysolite': ['chrysolite', 'chrysotile'], + 'chrysopid': ['chrysopid', 'dysphoric'], + 'chrysotile': ['chrysolite', 'chrysotile'], + 'chucker': ['chucker', 'rechuck'], + 'chueta': ['chaute', 'chueta'], + 'chukar': ['charuk', 'chukar'], + 'chulan': ['chulan', 'launch', 'nuchal'], + 'chum': ['chum', 'much'], + 'chumpish': ['chumpish', 'chumship'], + 'chumship': ['chumpish', 'chumship'], + 'chunari': ['chunari', 'unchair'], + 'chunnia': ['chunnia', 'unchain'], + 'chuprassie': ['chuprassie', 'haruspices'], + 'churl': ['churl', 'lurch'], + 'churn': ['churn', 'runch'], + 'chut': ['chut', 'tchu', 'utch'], + 'chwana': ['chawan', 'chwana', 'wachna'], + 'chyak': ['chyak', 'hacky'], + 'chylous': ['chylous', 'slouchy'], + 'cibarial': ['biracial', 'cibarial'], + 'cibarian': ['arabinic', 'cabirian', 'carabini', 'cibarian'], + 'cibolan': ['cibolan', 'coalbin'], + 'cicala': ['alcaic', 'cicala'], + 'cicatrize': ['arcticize', 'cicatrize'], + 'cicely': ['cecily', 'cicely'], + 'cicer': ['cerci', 'ceric', 'cicer', 'circe'], + 'cicerone': ['cicerone', 'croceine'], + 'cicindela': ['cicindela', 'cinclidae', 'icelandic'], + 'ciclatoun': ['ciclatoun', 'noctiluca'], + 'ciconian': ['aniconic', 'ciconian'], + 'ciconine': ['ciconine', 'conicine'], + 'cidaridae': ['acrididae', 'cardiidae', 'cidaridae'], + 'cidaris': ['cidaris', 'sciarid'], + 'cider': ['cider', 'cried', 'deric', 'dicer'], + 'cigala': ['caliga', 'cigala'], + 'cigar': ['cigar', 'craig'], + 'cilia': ['cilia', 'iliac'], + 'ciliation': ['ciliation', 'coinitial'], + 'cilice': ['cilice', 'icicle'], + 'cimbia': ['cimbia', 'iambic'], + 'cimicidae': ['amicicide', 'cimicidae'], + 'cinchonine': ['cinchonine', 'conchinine'], + 'cinclidae': ['cicindela', 'cinclidae', 'icelandic'], + 'cinder': ['cedrin', 'cinder', 'crined'], + 'cinderous': ['cinderous', 'decursion'], + 'cindie': ['cindie', 'incide'], + 'cine': ['cine', 'nice'], + 'cinel': ['cinel', 'cline'], + 'cinema': ['anemic', 'cinema', 'iceman'], + 'cinematographer': ['cinematographer', 'megachiropteran'], + 'cinene': ['cinene', 'nicene'], + 'cineole': ['cineole', 'coeline'], + 'cinerama': ['amacrine', 'american', 'camerina', 'cinerama'], + 'cineration': ['cineration', 'inceration'], + 'cinerea': ['cairene', 'cinerea'], + 'cinereal': ['cerealin', 'cinereal', 'reliance'], + 'cingulum': ['cingulum', 'glucinum'], + 'cinnamate': ['antanemic', 'cinnamate'], + 'cinnamol': ['cinnamol', 'nonclaim'], + 'cinnamon': ['cinnamon', 'mannonic'], + 'cinnamoned': ['cinnamoned', 'demicannon'], + 'cinque': ['cinque', 'quince'], + 'cinter': ['cinter', 'cretin', 'crinet'], + 'cinura': ['anuric', 'cinura', 'uranic'], + 'cion': ['cion', 'coin', 'icon'], + 'cipher': ['cipher', 'rechip'], + 'cipo': ['cipo', 'pico'], + 'circe': ['cerci', 'ceric', 'cicer', 'circe'], + 'circle': ['circle', 'cleric'], + 'cirrate': ['cartier', 'cirrate', 'erratic'], + 'cirrated': ['cirrated', 'craterid'], + 'cirrhotic': ['cirrhotic', 'trichroic'], + 'cirrose': ['cirrose', 'crosier'], + 'cirsectomy': ['cirsectomy', 'citromyces'], + 'cirsoid': ['cirsoid', 'soricid'], + 'ciruela': ['auricle', 'ciruela'], + 'cise': ['cise', 'sice'], + 'cisplatine': ['cisplatine', 'plasticine'], + 'cispontine': ['cispontine', 'inspection'], + 'cissoidal': ['cissoidal', 'dissocial'], + 'cistern': ['cistern', 'increst'], + 'cisterna': ['canister', 'cestrian', 'cisterna', 'irascent'], + 'cisternal': ['cisternal', 'larcenist'], + 'cistvaen': ['cistvaen', 'vesicant'], + 'cit': ['cit', 'tic'], + 'citadel': ['citadel', 'deltaic', 'dialect', 'edictal', 'lactide'], + 'citatory': ['atrocity', 'citatory'], + 'cite': ['ceti', 'cite', 'tice'], + 'citer': ['citer', 'recti', 'ticer', 'trice'], + 'cithara': ['cathari', 'chirata', 'cithara'], + 'citharist': ['citharist', 'trachitis'], + 'citharoedic': ['citharoedic', 'diachoretic'], + 'cither': ['cither', 'thrice'], + 'citied': ['citied', 'dietic'], + 'citizen': ['citizen', 'zincite'], + 'citral': ['citral', 'rictal'], + 'citramide': ['citramide', 'diametric', 'matricide'], + 'citrange': ['argentic', 'citrange'], + 'citrate': ['atretic', 'citrate'], + 'citrated': ['citrated', 'tetracid', 'tetradic'], + 'citrean': ['centiar', 'certain', 'citrean', 'nacrite', 'nectria'], + 'citrene': ['citrene', 'enteric', 'enticer', 'tercine'], + 'citreous': ['citreous', 'urticose'], + 'citric': ['citric', 'critic'], + 'citrin': ['citrin', 'nitric'], + 'citrination': ['citrination', 'intrication'], + 'citrine': ['citrine', 'crinite', 'inciter', 'neritic'], + 'citromyces': ['cirsectomy', 'citromyces'], + 'citron': ['citron', 'cortin', 'crotin'], + 'citronade': ['citronade', 'endaortic', 'redaction'], + 'citronella': ['citronella', 'interlocal'], + 'citrus': ['citrus', 'curtis', 'rictus', 'rustic'], + 'cive': ['cive', 'vice'], + 'civet': ['civet', 'evict'], + 'civetone': ['civetone', 'evection'], + 'civitan': ['activin', 'civitan'], + 'cixo': ['cixo', 'coix'], + 'clabber': ['cabbler', 'clabber'], + 'clacker': ['cackler', 'clacker', 'crackle'], + 'cladine': ['cladine', 'decalin', 'iceland'], + 'cladonia': ['cladonia', 'condalia', 'diaconal'], + 'cladophyll': ['cladophyll', 'phylloclad'], + 'claim': ['claim', 'clima', 'malic'], + 'claimant': ['calamint', 'claimant'], + 'claimer': ['claimer', 'miracle', 'reclaim'], + 'clairce': ['clairce', 'clarice'], + 'claire': ['carlie', 'claire', 'eclair', 'erical'], + 'claith': ['chital', 'claith'], + 'claiver': ['caliver', 'caviler', 'claiver', 'clavier', 'valeric', 'velaric'], + 'clam': ['calm', 'clam'], + 'clamant': ['calmant', 'clamant'], + 'clamative': ['calmative', 'clamative'], + 'clamatores': ['clamatores', 'scleromata'], + 'clamber': ['cambrel', 'clamber', 'cramble'], + 'clame': ['camel', 'clame', 'cleam', 'macle'], + 'clamer': ['calmer', 'carmel', 'clamer', 'marcel', 'mercal'], + 'clamor': ['clamor', 'colmar'], + 'clamorist': ['clamorist', 'crotalism'], + 'clangingly': ['clangingly', 'glancingly'], + 'clap': ['calp', 'clap'], + 'clapper': ['clapper', 'crapple'], + 'claquer': ['claquer', 'lacquer'], + 'clarain': ['carinal', 'carlina', 'clarain', 'cranial'], + 'clare': ['ceral', 'clare', 'clear', 'lacer'], + 'clarence': ['canceler', 'clarence', 'recancel'], + 'claret': ['carlet', 'cartel', 'claret', 'rectal', 'talcer'], + 'claretian': ['carnalite', 'claretian', 'lacertian', 'nectarial'], + 'clarice': ['clairce', 'clarice'], + 'clarin': ['carlin', 'clarin', 'crinal'], + 'clarinda': ['cardinal', 'clarinda'], + 'clarion': ['carolin', 'clarion', 'colarin', 'locrian'], + 'clarionet': ['alectrion', 'clarionet', 'crotaline', 'locarnite'], + 'clarist': ['carlist', 'clarist'], + 'claro': ['alcor', 'calor', 'carlo', 'carol', 'claro', 'coral'], + 'clary': ['acryl', 'caryl', 'clary'], + 'clasher': ['charles', 'clasher'], + 'clasp': ['clasp', 'scalp'], + 'clasper': ['clasper', 'reclasp', 'scalper'], + 'clasping': ['clasping', 'scalping'], + 'classed': ['classed', 'declass'], + 'classer': ['carless', 'classer', 'reclass'], + 'classism': ['classism', 'misclass'], + 'classwork': ['classwork', 'crosswalk'], + 'clat': ['clat', 'talc'], + 'clathrina': ['alchitran', 'clathrina'], + 'clathrose': ['clathrose', 'searcloth'], + 'clatterer': ['clatterer', 'craterlet'], + 'claude': ['caudle', 'cedula', 'claude'], + 'claudian': ['claudian', 'dulciana'], + 'claudicate': ['aciculated', 'claudicate'], + 'clause': ['caelus', 'caules', 'clause'], + 'claustral': ['claustral', 'lacustral'], + 'clava': ['caval', 'clava'], + 'clavacin': ['clavacin', 'vaccinal'], + 'clavaria': ['calvaria', 'clavaria'], + 'clave': ['calve', 'cavel', 'clave'], + 'claver': ['calver', 'carvel', 'claver'], + 'claviculate': ['calculative', 'claviculate'], + 'clavier': ['caliver', 'caviler', 'claiver', 'clavier', 'valeric', 'velaric'], + 'clavis': ['clavis', 'slavic'], + 'clay': ['acyl', 'clay', 'lacy'], + 'clayer': ['clayer', 'lacery'], + 'claytonia': ['acylation', 'claytonia'], + 'clead': ['clead', 'decal', 'laced'], + 'cleam': ['camel', 'clame', 'cleam', 'macle'], + 'cleamer': ['carmele', 'cleamer'], + 'clean': ['canel', 'clean', 'lance', 'lenca'], + 'cleaner': ['cleaner', 'reclean'], + 'cleanly': ['cleanly', 'lancely'], + 'cleanout': ['cleanout', 'outlance'], + 'cleanse': ['cleanse', 'scalene'], + 'cleanup': ['cleanup', 'unplace'], + 'clear': ['ceral', 'clare', 'clear', 'lacer'], + 'clearable': ['clearable', 'lacerable'], + 'clearer': ['clearer', 'reclear'], + 'cleat': ['cleat', 'eclat', 'ectal', 'lacet', 'tecla'], + 'clefted': ['clefted', 'deflect'], + 'cleistocarp': ['ceroplastic', 'cleistocarp', 'coreplastic'], + 'cleistogeny': ['cleistogeny', 'lysogenetic'], + 'cleoid': ['cleoid', 'coiled', 'docile'], + 'cleopatra': ['acropetal', 'cleopatra'], + 'cleric': ['circle', 'cleric'], + 'clericature': ['clericature', 'recirculate'], + 'clerkess': ['clerkess', 'reckless'], + 'clerking': ['clerking', 'reckling'], + 'clerus': ['clerus', 'cruels'], + 'clethra': ['clethra', 'latcher', 'ratchel', 'relatch', 'talcher', 'trachle'], + 'cleveite': ['cleveite', 'elective'], + 'cliche': ['chicle', 'cliche'], + 'clicker': ['clicker', 'crickle'], + 'clidastes': ['clidastes', 'discastle'], + 'clientage': ['clientage', 'genetical'], + 'cliented': ['cliented', 'denticle'], + 'cliftonia': ['cliftonia', 'fictional'], + 'clima': ['claim', 'clima', 'malic'], + 'climber': ['climber', 'reclimb'], + 'clime': ['clime', 'melic'], + 'cline': ['cinel', 'cline'], + 'clinger': ['clinger', 'cringle'], + 'clinia': ['anilic', 'clinia'], + 'clinicopathological': ['clinicopathological', 'pathologicoclinical'], + 'clinium': ['clinium', 'ulminic'], + 'clinker': ['clinker', 'crinkle'], + 'clinoclase': ['clinoclase', 'oscillance'], + 'clinoclasite': ['callisection', 'clinoclasite'], + 'clinodome': ['clinodome', 'melodicon', 'monocleid'], + 'clinology': ['clinology', 'coolingly'], + 'clinometer': ['clinometer', 'recoilment'], + 'clinospore': ['clinospore', 'necropolis'], + 'clio': ['clio', 'coil', 'coli', 'loci'], + 'cliona': ['alnico', 'cliona', 'oilcan'], + 'clione': ['clione', 'coelin', 'encoil', 'enolic'], + 'clipeus': ['clipeus', 'spicule'], + 'clipper': ['clipper', 'cripple'], + 'clipse': ['clipse', 'splice'], + 'clipsome': ['clipsome', 'polemics'], + 'clite': ['clite', 'telic'], + 'clites': ['celtis', 'clites'], + 'clitia': ['clitia', 'italic'], + 'clition': ['clition', 'nilotic'], + 'clitoria': ['clitoria', 'loricati'], + 'clitoridean': ['clitoridean', 'directional'], + 'clitoris': ['clitoris', 'coistril'], + 'clive': ['clive', 'velic'], + 'cloacinal': ['cloacinal', 'cocillana'], + 'cloam': ['cloam', 'comal'], + 'cloamen': ['aclemon', 'cloamen'], + 'clobber': ['clobber', 'cobbler'], + 'clochan': ['chalcon', 'clochan', 'conchal'], + 'clocked': ['clocked', 'cockled'], + 'clocker': ['clocker', 'cockler'], + 'clod': ['clod', 'cold'], + 'clodder': ['clodder', 'coddler'], + 'cloggy': ['cloggy', 'coggly'], + 'cloister': ['cloister', 'coistrel'], + 'cloisteral': ['cloisteral', 'sclerotial'], + 'cloit': ['cloit', 'lotic'], + 'clonicotonic': ['clonicotonic', 'tonicoclonic'], + 'clonus': ['clonus', 'consul'], + 'clop': ['clop', 'colp'], + 'close': ['close', 'socle'], + 'closer': ['closer', 'cresol', 'escrol'], + 'closter': ['closter', 'costrel'], + 'closterium': ['closterium', 'sclerotium'], + 'clot': ['clot', 'colt'], + 'clothier': ['chlorite', 'clothier'], + 'clotho': ['clotho', 'coolth'], + 'clotter': ['clotter', 'crottle'], + 'cloture': ['cloture', 'clouter'], + 'cloud': ['cloud', 'could'], + 'clouter': ['cloture', 'clouter'], + 'clovene': ['cevenol', 'clovene'], + 'clow': ['clow', 'cowl'], + 'cloy': ['cloy', 'coly'], + 'clue': ['clue', 'luce'], + 'clumse': ['clumse', 'muscle'], + 'clumsily': ['clumsily', 'scyllium'], + 'clumsy': ['clumsy', 'muscly'], + 'clunist': ['clunist', 'linctus'], + 'clupea': ['alecup', 'clupea'], + 'clupeine': ['clupeine', 'pulicene'], + 'clusia': ['caulis', 'clusia', 'sicula'], + 'clutch': ['clutch', 'cultch'], + 'clutter': ['clutter', 'cuttler'], + 'clyde': ['clyde', 'decyl'], + 'clyer': ['ceryl', 'clyer'], + 'clymenia': ['clymenia', 'mycelian'], + 'clypeolate': ['clypeolate', 'ptyalocele'], + 'clysis': ['clysis', 'lyssic'], + 'clysmic': ['clysmic', 'cyclism'], + 'cnemial': ['cnemial', 'melanic'], + 'cnemis': ['cnemis', 'mnesic'], + 'cneorum': ['cneorum', 'corneum'], + 'cnicus': ['cnicus', 'succin'], + 'cnida': ['canid', 'cnida', 'danic'], + 'cnidaria': ['acridian', 'cnidaria'], + 'cnidian': ['cnidian', 'indican'], + 'cnidophore': ['cnidophore', 'princehood'], + 'coach': ['chaco', 'choca', 'coach'], + 'coacher': ['caroche', 'coacher', 'recoach'], + 'coachlet': ['catechol', 'coachlet'], + 'coactor': ['coactor', 'tarocco'], + 'coadamite': ['acetamido', 'coadamite'], + 'coadnate': ['anecdota', 'coadnate'], + 'coadunite': ['coadunite', 'education', 'noctuidae'], + 'coagent': ['coagent', 'cognate'], + 'coagulate': ['catalogue', 'coagulate'], + 'coaita': ['atocia', 'coaita'], + 'coal': ['alco', 'coal', 'cola', 'loca'], + 'coalbin': ['cibolan', 'coalbin'], + 'coaler': ['carole', 'coaler', 'coelar', 'oracle', 'recoal'], + 'coalite': ['aloetic', 'coalite'], + 'coalition': ['coalition', 'lociation'], + 'coalitionist': ['coalitionist', 'solicitation'], + 'coalizer': ['calorize', 'coalizer'], + 'coalpit': ['capitol', 'coalpit', 'optical', 'topical'], + 'coaltitude': ['coaltitude', 'colatitude'], + 'coaming': ['coaming', 'macigno'], + 'coan': ['coan', 'onca'], + 'coapt': ['capot', 'coapt'], + 'coarb': ['carbo', 'carob', 'coarb', 'cobra'], + 'coarrange': ['arrogance', 'coarrange'], + 'coarse': ['acrose', 'coarse'], + 'coarsen': ['carnose', 'coarsen', 'narcose'], + 'coassert': ['castores', 'coassert'], + 'coast': ['ascot', 'coast', 'costa', 'tacso', 'tasco'], + 'coastal': ['coastal', 'salacot'], + 'coaster': ['coaster', 'recoast'], + 'coasting': ['agnostic', 'coasting'], + 'coated': ['coated', 'decoat'], + 'coater': ['coater', 'recoat'], + 'coating': ['coating', 'cotinga'], + 'coatroom': ['coatroom', 'morocota'], + 'coax': ['coax', 'coxa'], + 'cobbler': ['clobber', 'cobbler'], + 'cobia': ['baioc', 'cabio', 'cobia'], + 'cobiron': ['boronic', 'cobiron'], + 'cobitis': ['biotics', 'cobitis'], + 'cobra': ['carbo', 'carob', 'coarb', 'cobra'], + 'cocaine': ['cocaine', 'oceanic'], + 'cocainist': ['cocainist', 'siccation'], + 'cocama': ['cocama', 'macaco'], + 'cocamine': ['cocamine', 'comacine'], + 'cochleare': ['archocele', 'cochleare'], + 'cochleitis': ['cochleitis', 'ochlesitic'], + 'cocillana': ['cloacinal', 'cocillana'], + 'cocker': ['cocker', 'recock'], + 'cockily': ['cockily', 'colicky'], + 'cockled': ['clocked', 'cockled'], + 'cockler': ['clocker', 'cockler'], + 'cockup': ['cockup', 'upcock'], + 'cocreditor': ['cocreditor', 'codirector'], + 'cocurrent': ['cocurrent', 'occurrent', 'uncorrect'], + 'cod': ['cod', 'doc'], + 'codamine': ['codamine', 'comedian', 'daemonic', 'demoniac'], + 'codder': ['codder', 'corded'], + 'coddler': ['clodder', 'coddler'], + 'code': ['code', 'coed'], + 'codebtor': ['bedoctor', 'codebtor'], + 'coder': ['coder', 'cored', 'credo'], + 'coderive': ['coderive', 'divorcee'], + 'codicil': ['codicil', 'dicolic'], + 'codille': ['celloid', 'codille', 'collide', 'collied'], + 'codirector': ['cocreditor', 'codirector'], + 'codium': ['codium', 'mucoid'], + 'coed': ['code', 'coed'], + 'coeditorship': ['cheiropodist', 'coeditorship'], + 'coelar': ['carole', 'coaler', 'coelar', 'oracle', 'recoal'], + 'coelata': ['alcoate', 'coelata'], + 'coelection': ['coelection', 'entocoelic'], + 'coelia': ['aeolic', 'coelia'], + 'coelin': ['clione', 'coelin', 'encoil', 'enolic'], + 'coeline': ['cineole', 'coeline'], + 'coelogyne': ['coelogyne', 'gonyocele'], + 'coenactor': ['coenactor', 'croconate'], + 'coenobiar': ['borocaine', 'coenobiar'], + 'coenurus': ['cernuous', 'coenurus'], + 'coestate': ['coestate', 'ecostate'], + 'coeternal': ['antrocele', 'coeternal', 'tolerance'], + 'coeval': ['alcove', 'coeval', 'volcae'], + 'cofaster': ['cofaster', 'forecast'], + 'coferment': ['coferment', 'forcement'], + 'cogeneric': ['cogeneric', 'concierge'], + 'coggly': ['cloggy', 'coggly'], + 'cognate': ['coagent', 'cognate'], + 'cognatical': ['cognatical', 'galactonic'], + 'cognation': ['cognation', 'contagion'], + 'cognition': ['cognition', 'incognito'], + 'cognominal': ['cognominal', 'gnomonical'], + 'cogon': ['cogon', 'congo'], + 'cograil': ['argolic', 'cograil'], + 'coheir': ['coheir', 'heroic'], + 'cohen': ['cohen', 'enoch'], + 'cohere': ['choree', 'cohere', 'echoer'], + 'cohol': ['cholo', 'cohol'], + 'cohune': ['cohune', 'hounce'], + 'coif': ['coif', 'fico', 'foci'], + 'coign': ['coign', 'incog'], + 'coil': ['clio', 'coil', 'coli', 'loci'], + 'coiled': ['cleoid', 'coiled', 'docile'], + 'coiler': ['coiler', 'recoil'], + 'coin': ['cion', 'coin', 'icon'], + 'coinclude': ['coinclude', 'undecolic'], + 'coiner': ['cerion', 'coiner', 'neroic', 'orcein', 'recoin'], + 'coinfer': ['coinfer', 'conifer'], + 'coinherence': ['coinherence', 'incoherence'], + 'coinherent': ['coinherent', 'incoherent'], + 'coinitial': ['ciliation', 'coinitial'], + 'coinmate': ['coinmate', 'maconite'], + 'coinspire': ['coinspire', 'precision'], + 'coinsure': ['coinsure', 'corineus', 'cusinero'], + 'cointer': ['cerotin', 'cointer', 'cotrine', 'cretion', 'noticer', 'rection'], + 'cointreau': ['cautioner', 'cointreau'], + 'coistrel': ['cloister', 'coistrel'], + 'coistril': ['clitoris', 'coistril'], + 'coix': ['cixo', 'coix'], + 'coker': ['coker', 'corke', 'korec'], + 'coky': ['coky', 'yock'], + 'cola': ['alco', 'coal', 'cola', 'loca'], + 'colalgia': ['alogical', 'colalgia'], + 'colan': ['colan', 'conal'], + 'colane': ['canelo', 'colane'], + 'colarin': ['carolin', 'clarion', 'colarin', 'locrian'], + 'colate': ['acetol', 'colate', 'locate'], + 'colation': ['colation', 'coontail', 'location'], + 'colatitude': ['coaltitude', 'colatitude'], + 'colchian': ['cholanic', 'colchian'], + 'colcine': ['colcine', 'concile', 'conicle'], + 'colcothar': ['colcothar', 'ochlocrat'], + 'cold': ['clod', 'cold'], + 'colder': ['cedrol', 'colder', 'cordel'], + 'colectomy': ['colectomy', 'cyclotome'], + 'colemanite': ['colemanite', 'melaconite'], + 'coleur': ['coleur', 'colure'], + 'coleus': ['coleus', 'oscule'], + 'coli': ['clio', 'coil', 'coli', 'loci'], + 'colias': ['colias', 'scolia', 'social'], + 'colicky': ['cockily', 'colicky'], + 'colima': ['colima', 'olamic'], + 'colin': ['colin', 'nicol'], + 'colinear': ['acrolein', + 'arecolin', + 'caroline', + 'colinear', + 'cornelia', + 'creolian', + 'lonicera'], + 'colitis': ['colitis', 'solicit'], + 'colk': ['colk', 'lock'], + 'colla': ['callo', 'colla', 'local'], + 'collage': ['alcogel', 'collage'], + 'collare': ['collare', 'corella', 'ocellar'], + 'collaret': ['collaret', 'corallet'], + 'collarino': ['collarino', 'coronilla'], + 'collatee': ['collatee', 'ocellate'], + 'collationer': ['collationer', 'recollation'], + 'collectioner': ['collectioner', 'recollection'], + 'collegial': ['collegial', 'gallicole'], + 'collegian': ['allogenic', 'collegian'], + 'colleri': ['cerillo', 'colleri', 'collier'], + 'colleter': ['colleter', 'coteller', 'coterell', 'recollet'], + 'colletia': ['colletia', 'teocalli'], + 'collide': ['celloid', 'codille', 'collide', 'collied'], + 'collidine': ['celloidin', 'collidine', 'decillion'], + 'collie': ['collie', 'ocelli'], + 'collied': ['celloid', 'codille', 'collide', 'collied'], + 'collier': ['cerillo', 'colleri', 'collier'], + 'colligate': ['colligate', 'cotillage'], + 'colline': ['colline', 'lioncel'], + 'collinear': ['collinear', 'coralline'], + 'collinsia': ['collinsia', 'isoclinal'], + 'collotypy': ['collotypy', 'polycotyl'], + 'collusive': ['collusive', 'colluvies'], + 'colluvies': ['collusive', 'colluvies'], + 'collyba': ['callboy', 'collyba'], + 'colmar': ['clamor', 'colmar'], + 'colobus': ['colobus', 'subcool'], + 'coloenteritis': ['coloenteritis', 'enterocolitis'], + 'colombian': ['colombian', 'colombina'], + 'colombina': ['colombian', 'colombina'], + 'colonalgia': ['colonalgia', 'naological'], + 'colonate': ['colonate', 'ecotonal'], + 'colonialist': ['colonialist', 'oscillation'], + 'coloproctitis': ['coloproctitis', 'proctocolitis'], + 'color': ['color', 'corol', 'crool'], + 'colored': ['colored', 'croodle', 'decolor'], + 'colorer': ['colorer', 'recolor'], + 'colorin': ['colorin', 'orcinol'], + 'colorman': ['colorman', 'conormal'], + 'colp': ['clop', 'colp'], + 'colpeurynter': ['colpeurynter', 'counterreply'], + 'colpitis': ['colpitis', 'politics', 'psilotic'], + 'colporrhagia': ['colporrhagia', 'orographical'], + 'colt': ['clot', 'colt'], + 'colter': ['colter', 'lector', 'torcel'], + 'coltskin': ['coltskin', 'linstock'], + 'colubrina': ['binocular', 'caliburno', 'colubrina'], + 'columbo': ['columbo', 'coulomb'], + 'columbotitanate': ['columbotitanate', 'titanocolumbate'], + 'columnated': ['columnated', 'documental'], + 'columned': ['columned', 'uncledom'], + 'colunar': ['colunar', 'cornual', 'courlan'], + 'colure': ['coleur', 'colure'], + 'colutea': ['caulote', 'colutea', 'oculate'], + 'coly': ['cloy', 'coly'], + 'coma': ['coma', 'maco'], + 'comacine': ['cocamine', 'comacine'], + 'comal': ['cloam', 'comal'], + 'coman': ['coman', 'macon', 'manoc'], + 'comart': ['carmot', 'comart'], + 'comate': ['comate', 'metoac', 'tecoma'], + 'combat': ['combat', 'tombac'], + 'comber': ['comber', 'recomb'], + 'combinedly': ['combinedly', 'molybdenic'], + 'comedial': ['cameloid', 'comedial', 'melodica'], + 'comedian': ['codamine', 'comedian', 'daemonic', 'demoniac'], + 'comediant': ['comediant', 'metaconid'], + 'comedist': ['comedist', 'demotics', 'docetism', 'domestic'], + 'comedown': ['comedown', 'downcome'], + 'comeliness': ['comeliness', 'incomeless'], + 'comeling': ['comeling', 'comingle'], + 'comenic': ['comenic', 'encomic', 'meconic'], + 'comer': ['comer', 'crome'], + 'comforter': ['comforter', 'recomfort'], + 'comid': ['comid', 'domic'], + 'coming': ['coming', 'gnomic'], + 'comingle': ['comeling', 'comingle'], + 'comintern': ['comintern', 'nonmetric'], + 'comitia': ['caimito', 'comitia'], + 'comitragedy': ['comitragedy', 'tragicomedy'], + 'comity': ['comity', 'myotic'], + 'commander': ['commander', 'recommand'], + 'commation': ['commation', 'monatomic'], + 'commelina': ['commelina', 'melomanic'], + 'commender': ['commender', 'recommend'], + 'commentarial': ['commentarial', 'manometrical'], + 'commination': ['commination', 'monamniotic'], + 'commissioner': ['commissioner', 'recommission'], + 'commonality': ['ammonolytic', 'commonality'], + 'commorient': ['commorient', 'metronomic', 'monometric'], + 'compact': ['accompt', 'compact'], + 'compacter': ['compacter', 'recompact'], + 'company': ['company', 'copyman'], + 'compare': ['compare', 'compear'], + 'comparition': ['comparition', 'proamniotic'], + 'compasser': ['compasser', 'recompass'], + 'compear': ['compare', 'compear'], + 'compenetrate': ['compenetrate', 'contemperate'], + 'competitioner': ['competitioner', 'recompetition'], + 'compile': ['compile', 'polemic'], + 'compiler': ['compiler', 'complier'], + 'complainer': ['complainer', 'procnemial', 'recomplain'], + 'complaint': ['complaint', 'compliant'], + 'complanate': ['complanate', 'placentoma'], + 'compliant': ['complaint', 'compliant'], + 'complier': ['compiler', 'complier'], + 'compounder': ['compounder', 'recompound'], + 'comprehender': ['comprehender', 'recomprehend'], + 'compressed': ['compressed', 'decompress'], + 'comprise': ['comprise', 'perosmic'], + 'compromission': ['compromission', 'procommission'], + 'conal': ['colan', 'conal'], + 'conamed': ['conamed', 'macedon'], + 'conant': ['cannot', 'canton', 'conant', 'nonact'], + 'conarial': ['carolina', 'conarial'], + 'conarium': ['conarium', 'coumarin'], + 'conative': ['conative', 'invocate'], + 'concealer': ['concealer', 'reconceal'], + 'concent': ['concent', 'connect'], + 'concenter': ['concenter', 'reconnect'], + 'concentive': ['concentive', 'connective'], + 'concertize': ['concertize', 'concretize'], + 'concessioner': ['concessioner', 'reconcession'], + 'concha': ['chanco', 'concha'], + 'conchal': ['chalcon', 'clochan', 'conchal'], + 'conchinine': ['cinchonine', 'conchinine'], + 'concierge': ['cogeneric', 'concierge'], + 'concile': ['colcine', 'concile', 'conicle'], + 'concocter': ['concocter', 'reconcoct'], + 'concreter': ['concreter', 'reconcert'], + 'concretize': ['concertize', 'concretize'], + 'concretor': ['concretor', 'conrector'], + 'condalia': ['cladonia', 'condalia', 'diaconal'], + 'condemner': ['condemner', 'recondemn'], + 'condite': ['condite', 'ctenoid'], + 'conditioner': ['conditioner', 'recondition'], + 'condor': ['condor', 'cordon'], + 'conduit': ['conduit', 'duction', 'noctuid'], + 'condylomatous': ['condylomatous', 'monodactylous'], + 'cone': ['cone', 'once'], + 'conepate': ['conepate', 'tepecano'], + 'coner': ['coner', 'crone', 'recon'], + 'cones': ['cones', 'scone'], + 'confesser': ['confesser', 'reconfess'], + 'configurationism': ['configurationism', 'misconfiguration'], + 'confirmer': ['confirmer', 'reconfirm'], + 'confirmor': ['confirmor', 'corniform'], + 'conflate': ['conflate', 'falconet'], + 'conformer': ['conformer', 'reconform'], + 'confounder': ['confounder', 'reconfound'], + 'confrere': ['confrere', 'enforcer', 'reconfer'], + 'confronter': ['confronter', 'reconfront'], + 'congealer': ['congealer', 'recongeal'], + 'congeneric': ['congeneric', 'necrogenic'], + 'congenerous': ['congenerous', 'necrogenous'], + 'congenial': ['congenial', 'goclenian'], + 'congo': ['cogon', 'congo'], + 'congreet': ['congreet', 'coregent'], + 'congreve': ['congreve', 'converge'], + 'conical': ['conical', 'laconic'], + 'conicine': ['ciconine', 'conicine'], + 'conicle': ['colcine', 'concile', 'conicle'], + 'conicoid': ['conicoid', 'conoidic'], + 'conidium': ['conidium', 'mucinoid', 'oncidium'], + 'conifer': ['coinfer', 'conifer'], + 'conima': ['camion', 'conima', 'manioc', 'monica'], + 'conin': ['conin', 'nonic', 'oncin'], + 'conine': ['conine', 'connie', 'ennoic'], + 'conjoiner': ['conjoiner', 'reconjoin'], + 'conk': ['conk', 'nock'], + 'conker': ['conker', 'reckon'], + 'conkers': ['conkers', 'snocker'], + 'connarite': ['connarite', 'container', 'cotarnine', 'crenation', 'narcotine'], + 'connatal': ['cantonal', 'connatal'], + 'connation': ['connation', 'nonaction'], + 'connature': ['antecornu', 'connature'], + 'connect': ['concent', 'connect'], + 'connectival': ['connectival', 'conventical'], + 'connective': ['concentive', 'connective'], + 'connie': ['conine', 'connie', 'ennoic'], + 'connoissance': ['connoissance', 'nonaccession'], + 'conoidal': ['conoidal', 'dolciano'], + 'conoidic': ['conicoid', 'conoidic'], + 'conor': ['conor', 'croon', 'ronco'], + 'conormal': ['colorman', 'conormal'], + 'conoy': ['conoy', 'coony'], + 'conrad': ['candor', 'cardon', 'conrad'], + 'conrector': ['concretor', 'conrector'], + 'conred': ['cedron', 'conred'], + 'conringia': ['conringia', 'inorganic'], + 'consenter': ['consenter', 'nonsecret', 'reconsent'], + 'conservable': ['conservable', 'conversable'], + 'conservancy': ['conservancy', 'conversancy'], + 'conservant': ['conservant', 'conversant'], + 'conservation': ['conservation', 'conversation'], + 'conservational': ['conservational', 'conversational'], + 'conservationist': ['conservationist', 'conversationist'], + 'conservative': ['conservative', 'conversative'], + 'conserve': ['conserve', 'converse'], + 'conserver': ['conserver', 'converser'], + 'considerate': ['considerate', 'desecration'], + 'considerative': ['considerative', 'devisceration'], + 'considered': ['considered', 'deconsider'], + 'considerer': ['considerer', 'reconsider'], + 'consigner': ['consigner', 'reconsign'], + 'conspiracy': ['conspiracy', 'snipocracy'], + 'conspire': ['conspire', 'incorpse'], + 'constate': ['catstone', 'constate'], + 'constitutionalism': ['constitutionalism', 'misconstitutional'], + 'constitutioner': ['constitutioner', 'reconstitution'], + 'constrain': ['constrain', 'transonic'], + 'constructer': ['constructer', 'reconstruct'], + 'constructionism': ['constructionism', 'misconstruction'], + 'consul': ['clonus', 'consul'], + 'consulage': ['consulage', 'glucosane'], + 'consulary': ['consulary', 'cynosural'], + 'consulter': ['consulter', 'reconsult'], + 'consume': ['consume', 'muscone'], + 'consumer': ['consumer', 'mucrones'], + 'consute': ['consute', 'contuse'], + 'contagion': ['cognation', 'contagion'], + 'contain': ['actinon', 'cantion', 'contain'], + 'container': ['connarite', 'container', 'cotarnine', 'crenation', 'narcotine'], + 'conte': ['cento', 'conte', 'tecon'], + 'contemperate': ['compenetrate', 'contemperate'], + 'contender': ['contender', 'recontend'], + 'conter': ['conter', 'cornet', 'cronet', 'roncet'], + 'conterminal': ['centinormal', 'conterminal', 'nonmetrical'], + 'contester': ['contester', 'recontest'], + 'continual': ['continual', 'inoculant', 'unctional'], + 'continued': ['continued', 'unnoticed'], + 'continuer': ['centurion', 'continuer', 'cornutine'], + 'contise': ['contise', 'noetics', 'section'], + 'contline': ['contline', 'nonlicet'], + 'contortae': ['contortae', 'crotonate'], + 'contour': ['contour', 'cornuto', 'countor', 'crouton'], + 'contra': ['cantor', 'carton', 'contra'], + 'contracter': ['contracter', 'correctant', 'recontract'], + 'contrapose': ['antroscope', 'contrapose'], + 'contravene': ['contravene', 'covenanter'], + 'contrite': ['contrite', 'tetronic'], + 'contrive': ['contrive', 'invector'], + 'conturbation': ['conturbation', 'obtruncation'], + 'contuse': ['consute', 'contuse'], + 'conure': ['conure', 'rounce', 'uncore'], + 'conventical': ['connectival', 'conventical'], + 'conventioner': ['conventioner', 'reconvention'], + 'converge': ['congreve', 'converge'], + 'conversable': ['conservable', 'conversable'], + 'conversancy': ['conservancy', 'conversancy'], + 'conversant': ['conservant', 'conversant'], + 'conversation': ['conservation', 'conversation'], + 'conversational': ['conservational', 'conversational'], + 'conversationist': ['conservationist', 'conversationist'], + 'conversative': ['conservative', 'conversative'], + 'converse': ['conserve', 'converse'], + 'converser': ['conserver', 'converser'], + 'converter': ['converter', 'reconvert'], + 'convertise': ['convertise', 'ventricose'], + 'conveyer': ['conveyer', 'reconvey'], + 'conycatcher': ['conycatcher', 'technocracy'], + 'conyrine': ['conyrine', 'corynine'], + 'cooker': ['cooker', 'recook'], + 'cool': ['cool', 'loco'], + 'coolant': ['coolant', 'octonal'], + 'cooler': ['cooler', 'recool'], + 'coolingly': ['clinology', 'coolingly'], + 'coolth': ['clotho', 'coolth'], + 'coolweed': ['coolweed', 'locoweed'], + 'cooly': ['cooly', 'coyol'], + 'coonroot': ['coonroot', 'octoroon'], + 'coontail': ['colation', 'coontail', 'location'], + 'coony': ['conoy', 'coony'], + 'coop': ['coop', 'poco'], + 'coos': ['coos', 'soco'], + 'coost': ['coost', 'scoot'], + 'coot': ['coot', 'coto', 'toco'], + 'copa': ['copa', 'paco'], + 'copable': ['copable', 'placebo'], + 'copalite': ['copalite', 'poetical'], + 'coparent': ['coparent', 'portance'], + 'copart': ['captor', 'copart'], + 'copartner': ['copartner', 'procreant'], + 'copatain': ['copatain', 'pacation'], + 'copehan': ['copehan', 'panoche', 'phocean'], + 'copen': ['copen', 'ponce'], + 'coperta': ['coperta', 'pectora', 'porcate'], + 'copied': ['copied', 'epodic'], + 'copis': ['copis', 'pisco'], + 'copist': ['copist', 'coptis', 'optics', 'postic'], + 'copita': ['atopic', 'capito', 'copita'], + 'coplanar': ['coplanar', 'procanal'], + 'copleased': ['copleased', 'escaloped'], + 'copperer': ['copperer', 'recopper'], + 'coppery': ['coppery', 'precopy'], + 'copr': ['copr', 'corp', 'crop'], + 'coprinae': ['caponier', 'coprinae', 'procaine'], + 'coprinus': ['coprinus', 'poncirus'], + 'coprolagnia': ['carpogonial', 'coprolagnia'], + 'coprophagist': ['coprophagist', 'topographics'], + 'coprose': ['coprose', 'scooper'], + 'copse': ['copse', 'pecos', 'scope'], + 'copter': ['ceptor', 'copter'], + 'coptis': ['copist', 'coptis', 'optics', 'postic'], + 'copula': ['copula', 'cupola'], + 'copular': ['copular', 'croupal', 'cupolar', 'porcula'], + 'copulate': ['copulate', 'outplace'], + 'copulation': ['copulation', 'poculation'], + 'copus': ['copus', 'scoup'], + 'copyman': ['company', 'copyman'], + 'copyrighter': ['copyrighter', 'recopyright'], + 'coque': ['coque', 'ocque'], + 'coquitlam': ['coquitlam', 'quamoclit'], + 'cor': ['cor', 'cro', 'orc', 'roc'], + 'cora': ['acor', 'caro', 'cora', 'orca'], + 'coraciae': ['coraciae', 'icacorea'], + 'coracial': ['caracoli', 'coracial'], + 'coracias': ['coracias', 'rascacio'], + 'coradicate': ['coradicate', 'ectocardia'], + 'corah': ['achor', 'chora', 'corah', 'orach', 'roach'], + 'coraise': ['coraise', 'scoriae'], + 'coral': ['alcor', 'calor', 'carlo', 'carol', 'claro', 'coral'], + 'coraled': ['acerdol', 'coraled'], + 'coralist': ['calorist', 'coralist'], + 'corallet': ['collaret', 'corallet'], + 'corallian': ['corallian', 'corallina'], + 'corallina': ['corallian', 'corallina'], + 'coralline': ['collinear', 'coralline'], + 'corallite': ['corallite', 'lectorial'], + 'coram': ['carom', 'coram', 'macro', 'marco'], + 'coranto': ['cartoon', 'coranto'], + 'corban': ['bracon', 'carbon', 'corban'], + 'corbeil': ['bricole', 'corbeil', 'orbicle'], + 'cordaitean': ['arctoidean', 'carotidean', 'cordaitean', 'dinocerata'], + 'cordate': ['cordate', 'decator', 'redcoat'], + 'corded': ['codder', 'corded'], + 'cordel': ['cedrol', 'colder', 'cordel'], + 'corder': ['corder', 'record'], + 'cordia': ['caroid', 'cordia'], + 'cordial': ['cordial', 'dorical'], + 'cordicole': ['cordicole', 'crocodile'], + 'cordierite': ['cordierite', 'directoire'], + 'cordoba': ['bocardo', 'cordoba'], + 'cordon': ['condor', 'cordon'], + 'core': ['cero', 'core'], + 'cored': ['coder', 'cored', 'credo'], + 'coregent': ['congreet', 'coregent'], + 'coreless': ['coreless', 'sclerose'], + 'corella': ['collare', 'corella', 'ocellar'], + 'corema': ['ceroma', 'corema'], + 'coreplastic': ['ceroplastic', 'cleistocarp', 'coreplastic'], + 'coreplasty': ['ceroplasty', 'coreplasty'], + 'corer': ['corer', 'crore'], + 'coresidual': ['coresidual', 'radiculose'], + 'coresign': ['coresign', 'cosigner'], + 'corge': ['corge', 'gorce'], + 'corgi': ['corgi', 'goric', 'orgic'], + 'corial': ['caroli', 'corial', 'lorica'], + 'coriamyrtin': ['coriamyrtin', 'criminatory'], + 'corin': ['corin', 'noric', 'orcin'], + 'corindon': ['corindon', 'nodicorn'], + 'corineus': ['coinsure', 'corineus', 'cusinero'], + 'corinna': ['corinna', 'cronian'], + 'corinne': ['corinne', 'cornein', 'neronic'], + 'cork': ['cork', 'rock'], + 'corke': ['coker', 'corke', 'korec'], + 'corked': ['corked', 'docker', 'redock'], + 'corker': ['corker', 'recork', 'rocker'], + 'corkiness': ['corkiness', 'rockiness'], + 'corking': ['corking', 'rocking'], + 'corkish': ['corkish', 'rockish'], + 'corkwood': ['corkwood', 'rockwood', 'woodrock'], + 'corky': ['corky', 'rocky'], + 'corm': ['corm', 'crom'], + 'cormophyte': ['chromotype', 'cormophyte', 'ectomorphy'], + 'cormophytic': ['chromotypic', 'cormophytic', 'mycotrophic'], + 'cornage': ['acrogen', 'cornage'], + 'cornea': ['carone', 'cornea'], + 'corneal': ['carneol', 'corneal'], + 'cornein': ['corinne', 'cornein', 'neronic'], + 'cornelia': ['acrolein', + 'arecolin', + 'caroline', + 'colinear', + 'cornelia', + 'creolian', + 'lonicera'], + 'cornelius': ['cornelius', 'inclosure', 'reclusion'], + 'corneocalcareous': ['calcareocorneous', 'corneocalcareous'], + 'cornet': ['conter', 'cornet', 'cronet', 'roncet'], + 'corneum': ['cneorum', 'corneum'], + 'cornic': ['cornic', 'crocin'], + 'cornice': ['cornice', 'crocein'], + 'corniform': ['confirmor', 'corniform'], + 'cornin': ['cornin', 'rincon'], + 'cornish': ['cornish', 'cronish', 'sorchin'], + 'cornual': ['colunar', 'cornual', 'courlan'], + 'cornuate': ['cornuate', 'courante', 'cuneator', 'outrance'], + 'cornuated': ['cornuated', 'undercoat'], + 'cornucopiate': ['cornucopiate', 'reoccupation'], + 'cornulites': ['cornulites', 'uncloister'], + 'cornute': ['cornute', 'counter', 'recount', 'trounce'], + 'cornutine': ['centurion', 'continuer', 'cornutine'], + 'cornuto': ['contour', 'cornuto', 'countor', 'crouton'], + 'corny': ['corny', 'crony'], + 'corol': ['color', 'corol', 'crool'], + 'corollated': ['corollated', 'decollator'], + 'corona': ['caroon', 'corona', 'racoon'], + 'coronad': ['cardoon', 'coronad'], + 'coronadite': ['carotenoid', 'coronadite', 'decoration'], + 'coronal': ['coronal', 'locarno'], + 'coronaled': ['canoodler', 'coronaled'], + 'coronate': ['coronate', 'octonare', 'otocrane'], + 'coronated': ['coronated', 'creodonta'], + 'coroner': ['coroner', 'crooner', 'recroon'], + 'coronilla': ['collarino', 'coronilla'], + 'corp': ['copr', 'corp', 'crop'], + 'corporealist': ['corporealist', 'prosectorial'], + 'corradiate': ['corradiate', 'cortaderia', 'eradicator'], + 'correal': ['caroler', 'correal'], + 'correctant': ['contracter', 'correctant', 'recontract'], + 'correctioner': ['correctioner', 'recorrection'], + 'corrente': ['corrente', 'terceron'], + 'correption': ['correption', 'porrection'], + 'corrodentia': ['corrodentia', 'recordation'], + 'corrupter': ['corrupter', 'recorrupt'], + 'corsage': ['corsage', 'socager'], + 'corsaint': ['cantoris', 'castorin', 'corsaint'], + 'corse': ['corse', 'score'], + 'corselet': ['corselet', 'sclerote', 'selector'], + 'corset': ['corset', 'cortes', 'coster', 'escort', 'scoter', 'sector'], + 'corta': ['actor', 'corta', 'croat', 'rocta', 'taroc', 'troca'], + 'cortaderia': ['corradiate', 'cortaderia', 'eradicator'], + 'cortes': ['corset', 'cortes', 'coster', 'escort', 'scoter', 'sector'], + 'cortical': ['cortical', 'crotalic'], + 'cortices': ['cortices', 'cresotic'], + 'corticose': ['corticose', 'creosotic'], + 'cortin': ['citron', 'cortin', 'crotin'], + 'cortina': ['anticor', 'carotin', 'cortina', 'ontaric'], + 'cortinate': ['carnotite', 'cortinate'], + 'cortisone': ['certosino', 'cortisone', 'socotrine'], + 'corton': ['corton', 'croton'], + 'corvinae': ['corvinae', 'veronica'], + 'cory': ['cory', 'croy'], + 'corycavidine': ['cervicodynia', 'corycavidine'], + 'corydon': ['corydon', 'croydon'], + 'corynine': ['conyrine', 'corynine'], + 'corypha': ['charpoy', 'corypha'], + 'coryphene': ['coryphene', 'hypercone'], + 'cos': ['cos', 'osc', 'soc'], + 'cosalite': ['cosalite', 'societal'], + 'coset': ['coset', 'estoc', 'scote'], + 'cosh': ['cosh', 'scho'], + 'cosharer': ['cosharer', 'horsecar'], + 'cosigner': ['coresign', 'cosigner'], + 'cosine': ['cosine', 'oscine'], + 'cosmati': ['atomics', 'catoism', 'cosmati', 'osmatic', 'somatic'], + 'cosmetical': ['cacomistle', 'cosmetical'], + 'cosmetician': ['cosmetician', 'encomiastic'], + 'cosmist': ['cosmist', 'scotism'], + 'cossack': ['cassock', 'cossack'], + 'cosse': ['cosse', 'secos'], + 'cost': ['cost', 'scot'], + 'costa': ['ascot', 'coast', 'costa', 'tacso', 'tasco'], + 'costar': ['arctos', 'castor', 'costar', 'scrota'], + 'costean': ['costean', 'tsoneca'], + 'coster': ['corset', 'cortes', 'coster', 'escort', 'scoter', 'sector'], + 'costing': ['costing', 'gnostic'], + 'costispinal': ['costispinal', 'pansciolist'], + 'costmary': ['arctomys', 'costmary', 'mascotry'], + 'costochondral': ['chondrocostal', 'costochondral'], + 'costosternal': ['costosternal', 'sternocostal'], + 'costovertebral': ['costovertebral', 'vertebrocostal'], + 'costrel': ['closter', 'costrel'], + 'costula': ['costula', 'locusta', 'talcous'], + 'costumer': ['costumer', 'customer'], + 'cosurety': ['cosurety', 'courtesy'], + 'cosustain': ['cosustain', 'scusation'], + 'cotarius': ['cotarius', 'octarius', 'suctoria'], + 'cotarnine': ['connarite', 'container', 'cotarnine', 'crenation', 'narcotine'], + 'cote': ['cote', 'teco'], + 'coteline': ['coteline', 'election'], + 'coteller': ['colleter', 'coteller', 'coterell', 'recollet'], + 'coterell': ['colleter', 'coteller', 'coterell', 'recollet'], + 'cotesian': ['canoeist', 'cotesian'], + 'coth': ['coth', 'ocht'], + 'cotidal': ['cotidal', 'lactoid', 'talcoid'], + 'cotillage': ['colligate', 'cotillage'], + 'cotillion': ['cotillion', 'octillion'], + 'cotinga': ['coating', 'cotinga'], + 'cotinus': ['cotinus', 'suction', 'unstoic'], + 'cotise': ['cotise', 'oecist'], + 'coto': ['coot', 'coto', 'toco'], + 'cotranspire': ['cotranspire', 'pornerastic'], + 'cotrine': ['cerotin', 'cointer', 'cotrine', 'cretion', 'noticer', 'rection'], + 'cotripper': ['cotripper', 'periproct'], + 'cotte': ['cotte', 'octet'], + 'cotylosaur': ['cotylosaur', 'osculatory'], + 'cotype': ['cotype', 'ectopy'], + 'coude': ['coude', 'douce'], + 'could': ['cloud', 'could'], + 'coulisse': ['coulisse', 'leucosis', 'ossicule'], + 'coulomb': ['columbo', 'coulomb'], + 'coumalic': ['caulomic', 'coumalic'], + 'coumarin': ['conarium', 'coumarin'], + 'counsel': ['counsel', 'unclose'], + 'counter': ['cornute', 'counter', 'recount', 'trounce'], + 'counteracter': ['counteracter', 'countercarte'], + 'countercarte': ['counteracter', 'countercarte'], + 'countercharm': ['countercharm', 'countermarch'], + 'counterguard': ['counterguard', 'uncorrugated'], + 'counteridea': ['counteridea', 'decurionate'], + 'countermarch': ['countercharm', 'countermarch'], + 'counterpaled': ['counterpaled', 'counterplead', 'unpercolated'], + 'counterpaly': ['counterpaly', 'counterplay'], + 'counterplay': ['counterpaly', 'counterplay'], + 'counterplead': ['counterpaled', 'counterplead', 'unpercolated'], + 'counterreply': ['colpeurynter', 'counterreply'], + 'countersale': ['countersale', 'counterseal'], + 'countersea': ['countersea', 'nectareous'], + 'counterseal': ['countersale', 'counterseal'], + 'countershade': ['countershade', 'decantherous'], + 'counterstand': ['counterstand', 'uncontrasted'], + 'countertail': ['countertail', 'reluctation'], + 'countertrades': ['countertrades', 'unstercorated'], + 'countervail': ['countervail', 'involucrate'], + 'countervair': ['countervair', 'overcurtain', 'recurvation'], + 'countor': ['contour', 'cornuto', 'countor', 'crouton'], + 'coupe': ['coupe', 'pouce'], + 'couper': ['couper', 'croupe', 'poucer', 'recoup'], + 'couplement': ['couplement', 'uncomplete'], + 'couplet': ['couplet', 'octuple'], + 'coupon': ['coupon', 'uncoop'], + 'couponed': ['couponed', 'uncooped'], + 'courante': ['cornuate', 'courante', 'cuneator', 'outrance'], + 'courbaril': ['courbaril', 'orbicular'], + 'courlan': ['colunar', 'cornual', 'courlan'], + 'cours': ['cours', 'scour'], + 'course': ['cerous', 'course', 'crouse', 'source'], + 'coursed': ['coursed', 'scoured'], + 'courser': ['courser', 'scourer'], + 'coursing': ['coursing', 'scouring'], + 'court': ['court', 'crout', 'turco'], + 'courtesan': ['acentrous', 'courtesan', 'nectarous'], + 'courtesy': ['cosurety', 'courtesy'], + 'courtier': ['courtier', 'outcrier'], + 'courtiership': ['courtiership', 'peritrichous'], + 'courtin': ['courtin', 'ruction'], + 'courtman': ['courtman', 'turcoman'], + 'couter': ['couter', 'croute'], + 'couth': ['couth', 'thuoc', 'touch'], + 'couthily': ['couthily', 'touchily'], + 'couthiness': ['couthiness', 'touchiness'], + 'couthless': ['couthless', 'touchless'], + 'coutil': ['coutil', 'toluic'], + 'covenanter': ['contravene', 'covenanter'], + 'coverer': ['coverer', 'recover'], + 'coversine': ['coversine', 'vernicose'], + 'covert': ['covert', 'vector'], + 'covisit': ['covisit', 'ovistic'], + 'cowardy': ['cowardy', 'cowyard'], + 'cowherd': ['chowder', 'cowherd'], + 'cowl': ['clow', 'cowl'], + 'cowyard': ['cowardy', 'cowyard'], + 'coxa': ['coax', 'coxa'], + 'coxite': ['coxite', 'exotic'], + 'coyness': ['coyness', 'sycones'], + 'coyol': ['cooly', 'coyol'], + 'coyote': ['coyote', 'oocyte'], + 'craber': ['bracer', 'craber'], + 'crabhole': ['bachelor', 'crabhole'], + 'crablet': ['beclart', 'crablet'], + 'crackable': ['blackacre', 'crackable'], + 'crackle': ['cackler', 'clacker', 'crackle'], + 'crackmans': ['crackmans', 'cracksman'], + 'cracksman': ['crackmans', 'cracksman'], + 'cradge': ['cadger', 'cradge'], + 'cradle': ['cardel', 'cradle'], + 'cradlemate': ['cradlemate', 'malcreated'], + 'craig': ['cigar', 'craig'], + 'crain': ['cairn', 'crain', 'naric'], + 'crake': ['acker', 'caker', 'crake', 'creak'], + 'cram': ['cram', 'marc'], + 'cramasie': ['cramasie', 'mesaraic'], + 'crambe': ['becram', 'camber', 'crambe'], + 'crambidae': ['carbamide', 'crambidae'], + 'crambinae': ['carbamine', 'crambinae'], + 'cramble': ['cambrel', 'clamber', 'cramble'], + 'cramper': ['cramper', 'recramp'], + 'crampon': ['crampon', 'cropman'], + 'cranage': ['carnage', 'cranage', 'garance'], + 'crance': ['cancer', 'crance'], + 'crane': ['caner', 'crane', 'crena', 'nacre', 'rance'], + 'craner': ['craner', 'rancer'], + 'craney': ['carney', 'craney'], + 'crania': ['acinar', + 'arnica', + 'canari', + 'carian', + 'carina', + 'crania', + 'narica'], + 'craniad': ['acridan', 'craniad'], + 'cranial': ['carinal', 'carlina', 'clarain', 'cranial'], + 'cranially': ['ancillary', 'carlylian', 'cranially'], + 'cranian': ['canarin', 'cranian'], + 'craniate': ['anaretic', 'arcanite', 'carinate', 'craniate'], + 'cranic': ['cancri', 'carnic', 'cranic'], + 'craniectomy': ['craniectomy', 'cyanometric'], + 'craniognomy': ['craniognomy', 'organonymic'], + 'craniota': ['craniota', 'croatian', 'narcotia', 'raincoat'], + 'cranker': ['cranker', 'recrank'], + 'crap': ['carp', 'crap'], + 'crape': ['caper', 'crape', 'pacer', 'perca', 'recap'], + 'crappie': ['crappie', 'epicarp'], + 'crapple': ['clapper', 'crapple'], + 'crappo': ['crappo', 'croppa'], + 'craps': ['craps', 'scarp', 'scrap'], + 'crapulous': ['crapulous', 'opuscular'], + 'crare': ['carer', 'crare', 'racer'], + 'crate': ['caret', + 'carte', + 'cater', + 'crate', + 'creat', + 'creta', + 'react', + 'recta', + 'trace'], + 'crateful': ['crateful', 'fulcrate'], + 'crater': ['arrect', 'carter', 'crater', 'recart', 'tracer'], + 'craterid': ['cirrated', 'craterid'], + 'crateriform': ['crateriform', 'terraciform'], + 'crateris': ['crateris', 'serratic'], + 'craterlet': ['clatterer', 'craterlet'], + 'craterous': ['craterous', 'recusator'], + 'cratinean': ['cratinean', 'incarnate', 'nectarian'], + 'cratometric': ['cratometric', 'metrocratic'], + 'crave': ['carve', 'crave', 'varec'], + 'craven': ['carven', 'cavern', 'craven'], + 'craver': ['carver', 'craver'], + 'craving': ['carving', 'craving'], + 'crayon': ['canroy', 'crayon', 'cyrano', 'nyroca'], + 'crayonist': ['carnosity', 'crayonist'], + 'crea': ['acer', 'acre', 'care', 'crea', 'race'], + 'creagh': ['charge', 'creagh'], + 'creak': ['acker', 'caker', 'crake', 'creak'], + 'cream': ['cream', 'macer'], + 'creamer': ['amercer', 'creamer'], + 'creant': ['canter', + 'creant', + 'cretan', + 'nectar', + 'recant', + 'tanrec', + 'trance'], + 'crease': ['cesare', 'crease', 'recase', 'searce'], + 'creaser': ['creaser', 'searcer'], + 'creasing': ['creasing', 'scirenga'], + 'creat': ['caret', + 'carte', + 'cater', + 'crate', + 'creat', + 'creta', + 'react', + 'recta', + 'trace'], + 'creatable': ['creatable', 'traceable'], + 'create': ['cerate', 'create', 'ecarte'], + 'creatine': ['aneretic', 'centiare', 'creatine', 'increate', 'iterance'], + 'creatinine': ['creatinine', 'incinerate'], + 'creation': ['actioner', 'anerotic', 'ceration', 'creation', 'reaction'], + 'creational': ['creational', 'crotalinae', 'laceration', 'reactional'], + 'creationary': ['creationary', 'reactionary'], + 'creationism': ['anisometric', + 'creationism', + 'miscreation', + 'ramisection', + 'reactionism'], + 'creationist': ['creationist', 'reactionist'], + 'creative': ['creative', 'reactive'], + 'creatively': ['creatively', 'reactively'], + 'creativeness': ['creativeness', 'reactiveness'], + 'creativity': ['creativity', 'reactivity'], + 'creator': ['creator', 'reactor'], + 'crebrous': ['crebrous', 'obscurer'], + 'credential': ['credential', 'interlaced', 'reclinated'], + 'credit': ['credit', 'direct'], + 'creditable': ['creditable', 'directable'], + 'creditive': ['creditive', 'directive'], + 'creditor': ['creditor', 'director'], + 'creditorship': ['creditorship', 'directorship'], + 'creditress': ['creditress', 'directress'], + 'creditrix': ['creditrix', 'directrix'], + 'crednerite': ['crednerite', 'interceder'], + 'credo': ['coder', 'cored', 'credo'], + 'cree': ['cere', 'cree'], + 'creed': ['ceder', 'cedre', 'cered', 'creed'], + 'creedal': ['cedrela', 'creedal', 'declare'], + 'creedalism': ['creedalism', 'misdeclare'], + 'creedist': ['creedist', 'desertic', 'discreet', 'discrete'], + 'creep': ['creep', 'crepe'], + 'creepered': ['creepered', 'predecree'], + 'creepie': ['creepie', 'repiece'], + 'cremation': ['cremation', 'manticore'], + 'cremator': ['cremator', 'mercator'], + 'crematorial': ['crematorial', 'mercatorial'], + 'cremor': ['cremor', 'cromer'], + 'crena': ['caner', 'crane', 'crena', 'nacre', 'rance'], + 'crenate': ['centare', 'crenate'], + 'crenated': ['crenated', 'decanter', 'nectared'], + 'crenation': ['connarite', 'container', 'cotarnine', 'crenation', 'narcotine'], + 'crenelate': ['crenelate', 'lanceteer'], + 'crenelation': ['crenelation', 'intolerance'], + 'crenele': ['crenele', 'encreel'], + 'crenellation': ['centrolineal', 'crenellation'], + 'crenitic': ['crenitic', 'cretinic'], + 'crenology': ['crenology', 'necrology'], + 'crenula': ['crenula', 'lucarne', 'nuclear', 'unclear'], + 'crenulate': ['calenture', 'crenulate'], + 'creodonta': ['coronated', 'creodonta'], + 'creolian': ['acrolein', + 'arecolin', + 'caroline', + 'colinear', + 'cornelia', + 'creolian', + 'lonicera'], + 'creolin': ['creolin', 'licorne', 'locrine'], + 'creosotic': ['corticose', 'creosotic'], + 'crepe': ['creep', 'crepe'], + 'crepidula': ['crepidula', 'pedicular'], + 'crepine': ['crepine', 'increep'], + 'crepiness': ['crepiness', 'princesse'], + 'crepis': ['crepis', 'cripes', 'persic', 'precis', 'spicer'], + 'crepitant': ['crepitant', 'pittancer'], + 'crepitation': ['actinopteri', 'crepitation', 'precitation'], + 'crepitous': ['crepitous', 'euproctis', 'uroseptic'], + 'crepitus': ['crepitus', 'piecrust'], + 'crepon': ['crepon', 'procne'], + 'crepy': ['crepy', 'cypre', 'percy'], + 'cresol': ['closer', 'cresol', 'escrol'], + 'cresolin': ['cresolin', 'licensor'], + 'cresotic': ['cortices', 'cresotic'], + 'cresson': ['cresson', 'crosnes'], + 'crestline': ['crestline', 'stenciler'], + 'crestmoreite': ['crestmoreite', 'stereometric'], + 'creta': ['caret', + 'carte', + 'cater', + 'crate', + 'creat', + 'creta', + 'react', + 'recta', + 'trace'], + 'cretan': ['canter', + 'creant', + 'cretan', + 'nectar', + 'recant', + 'tanrec', + 'trance'], + 'crete': ['crete', 'erect'], + 'cretification': ['certification', 'cretification', 'rectification'], + 'cretify': ['certify', 'cretify', 'rectify'], + 'cretin': ['cinter', 'cretin', 'crinet'], + 'cretinic': ['crenitic', 'cretinic'], + 'cretinoid': ['cretinoid', 'direction'], + 'cretion': ['cerotin', 'cointer', 'cotrine', 'cretion', 'noticer', 'rection'], + 'cretism': ['cretism', 'metrics'], + 'crewer': ['crewer', 'recrew'], + 'cribo': ['boric', 'cribo', 'orbic'], + 'crickle': ['clicker', 'crickle'], + 'cricothyroid': ['cricothyroid', 'thyrocricoid'], + 'cried': ['cider', 'cried', 'deric', 'dicer'], + 'crier': ['crier', 'ricer'], + 'criey': ['criey', 'ricey'], + 'crile': ['crile', 'elric', 'relic'], + 'crimean': ['armenic', 'carmine', 'ceriman', 'crimean', 'mercian'], + 'crimeful': ['crimeful', 'merciful'], + 'crimeless': ['crimeless', 'merciless'], + 'crimelessness': ['crimelessness', 'mercilessness'], + 'criminalese': ['criminalese', 'misreliance'], + 'criminate': ['antimeric', 'carminite', 'criminate', 'metrician'], + 'criminatory': ['coriamyrtin', 'criminatory'], + 'crimpage': ['crimpage', 'pergamic'], + 'crinal': ['carlin', 'clarin', 'crinal'], + 'crinanite': ['crinanite', 'natricine'], + 'crinated': ['crinated', 'dicentra'], + 'crine': ['cerin', 'crine'], + 'crined': ['cedrin', 'cinder', 'crined'], + 'crinet': ['cinter', 'cretin', 'crinet'], + 'cringle': ['clinger', 'cringle'], + 'crinite': ['citrine', 'crinite', 'inciter', 'neritic'], + 'crinkle': ['clinker', 'crinkle'], + 'cripes': ['crepis', 'cripes', 'persic', 'precis', 'spicer'], + 'cripple': ['clipper', 'cripple'], + 'crisp': ['crisp', 'scrip'], + 'crispation': ['antipsoric', 'ascription', 'crispation'], + 'crisped': ['crisped', 'discerp'], + 'crispy': ['crispy', 'cypris'], + 'crista': ['crista', 'racist'], + 'cristopher': ['cristopher', 'rectorship'], + 'criteria': ['criteria', 'triceria'], + 'criterion': ['criterion', 'tricerion'], + 'criterium': ['criterium', 'tricerium'], + 'crith': ['crith', 'richt'], + 'critic': ['citric', 'critic'], + 'cro': ['cor', 'cro', 'orc', 'roc'], + 'croak': ['arock', 'croak'], + 'croat': ['actor', 'corta', 'croat', 'rocta', 'taroc', 'troca'], + 'croatan': ['cantaro', 'croatan'], + 'croatian': ['craniota', 'croatian', 'narcotia', 'raincoat'], + 'crocein': ['cornice', 'crocein'], + 'croceine': ['cicerone', 'croceine'], + 'crocetin': ['crocetin', 'necrotic'], + 'crocidolite': ['crocidolite', 'crocodilite'], + 'crocin': ['cornic', 'crocin'], + 'crocodile': ['cordicole', 'crocodile'], + 'crocodilite': ['crocidolite', 'crocodilite'], + 'croconate': ['coenactor', 'croconate'], + 'crocus': ['crocus', 'succor'], + 'crom': ['corm', 'crom'], + 'crome': ['comer', 'crome'], + 'cromer': ['cremor', 'cromer'], + 'crone': ['coner', 'crone', 'recon'], + 'cronet': ['conter', 'cornet', 'cronet', 'roncet'], + 'cronian': ['corinna', 'cronian'], + 'cronish': ['cornish', 'cronish', 'sorchin'], + 'crony': ['corny', 'crony'], + 'croodle': ['colored', 'croodle', 'decolor'], + 'crool': ['color', 'corol', 'crool'], + 'croon': ['conor', 'croon', 'ronco'], + 'crooner': ['coroner', 'crooner', 'recroon'], + 'crop': ['copr', 'corp', 'crop'], + 'cropman': ['crampon', 'cropman'], + 'croppa': ['crappo', 'croppa'], + 'crore': ['corer', 'crore'], + 'crosa': ['arcos', 'crosa', 'oscar', 'sacro'], + 'crosier': ['cirrose', 'crosier'], + 'crosnes': ['cresson', 'crosnes'], + 'crosse': ['cessor', 'crosse', 'scorse'], + 'crosser': ['crosser', 'recross'], + 'crossite': ['crossite', 'crosstie'], + 'crossover': ['crossover', 'overcross'], + 'crosstie': ['crossite', 'crosstie'], + 'crosstied': ['crosstied', 'dissector'], + 'crosstree': ['crosstree', 'rectoress'], + 'crosswalk': ['classwork', 'crosswalk'], + 'crotal': ['carlot', 'crotal'], + 'crotalic': ['cortical', 'crotalic'], + 'crotalinae': ['creational', 'crotalinae', 'laceration', 'reactional'], + 'crotaline': ['alectrion', 'clarionet', 'crotaline', 'locarnite'], + 'crotalism': ['clamorist', 'crotalism'], + 'crotalo': ['crotalo', 'locator'], + 'crotaloid': ['crotaloid', 'doctorial'], + 'crotin': ['citron', 'cortin', 'crotin'], + 'croton': ['corton', 'croton'], + 'crotonate': ['contortae', 'crotonate'], + 'crottle': ['clotter', 'crottle'], + 'crouchant': ['archcount', 'crouchant'], + 'croupal': ['copular', 'croupal', 'cupolar', 'porcula'], + 'croupe': ['couper', 'croupe', 'poucer', 'recoup'], + 'croupily': ['croupily', 'polyuric'], + 'croupiness': ['croupiness', 'percussion', 'supersonic'], + 'crouse': ['cerous', 'course', 'crouse', 'source'], + 'crout': ['court', 'crout', 'turco'], + 'croute': ['couter', 'croute'], + 'crouton': ['contour', 'cornuto', 'countor', 'crouton'], + 'crowder': ['crowder', 'recrowd'], + 'crowned': ['crowned', 'decrown'], + 'crowner': ['crowner', 'recrown'], + 'crownmaker': ['cankerworm', 'crownmaker'], + 'croy': ['cory', 'croy'], + 'croydon': ['corydon', 'croydon'], + 'cruces': ['cercus', 'cruces'], + 'cruciate': ['aceturic', 'cruciate'], + 'crudwort': ['crudwort', 'curdwort'], + 'cruel': ['cruel', 'lucre', 'ulcer'], + 'cruels': ['clerus', 'cruels'], + 'cruelty': ['cruelty', 'cutlery'], + 'cruet': ['cruet', 'eruct', 'recut', 'truce'], + 'cruise': ['cruise', 'crusie'], + 'cruisken': ['cruisken', 'unsicker'], + 'crunode': ['crunode', 'uncored'], + 'crureus': ['crureus', 'surcrue'], + 'crurogenital': ['crurogenital', 'genitocrural'], + 'cruroinguinal': ['cruroinguinal', 'inguinocrural'], + 'crus': ['crus', 'scur'], + 'crusado': ['acrodus', 'crusado'], + 'crusca': ['crusca', 'curcas'], + 'cruse': ['cruse', 'curse', 'sucre'], + 'crusher': ['crusher', 'recrush'], + 'crusie': ['cruise', 'crusie'], + 'crust': ['crust', 'curst'], + 'crustate': ['crustate', 'scrutate'], + 'crustation': ['crustation', 'scrutation'], + 'crustily': ['crustily', 'rusticly'], + 'crustiness': ['crustiness', 'rusticness'], + 'crusty': ['crusty', 'curtsy'], + 'cruth': ['cruth', 'rutch'], + 'cryosel': ['cryosel', 'scroyle'], + 'cryptodire': ['cryptodire', 'predictory'], + 'cryptomeria': ['cryptomeria', 'imprecatory'], + 'cryptostomate': ['cryptostomate', 'prostatectomy'], + 'ctenidial': ['ctenidial', 'identical'], + 'ctenoid': ['condite', 'ctenoid'], + 'ctenolium': ['ctenolium', 'monticule'], + 'ctenophore': ['ctenophore', 'nectophore'], + 'ctetology': ['ctetology', 'tectology'], + 'cuailnge': ['cuailnge', 'glaucine'], + 'cuarteron': ['cuarteron', 'raconteur'], + 'cubanite': ['cubanite', 'incubate'], + 'cuber': ['bruce', 'cebur', 'cuber'], + 'cubist': ['bustic', 'cubist'], + 'cubit': ['butic', 'cubit'], + 'cubitale': ['baculite', 'cubitale'], + 'cuboidal': ['baculoid', 'cuboidal'], + 'cuchan': ['caunch', 'cuchan'], + 'cueball': ['bullace', 'cueball'], + 'cueman': ['acumen', 'cueman'], + 'cuir': ['cuir', 'uric'], + 'culebra': ['culebra', 'curable'], + 'culet': ['culet', 'lucet'], + 'culinary': ['culinary', 'uranylic'], + 'culmy': ['culmy', 'cumyl'], + 'culpose': ['culpose', 'ploceus', 'upclose'], + 'cultch': ['clutch', 'cultch'], + 'cultivar': ['cultivar', 'curvital'], + 'culturine': ['culturine', 'inculture'], + 'cumaean': ['cumaean', 'encauma'], + 'cumar': ['carum', 'cumar'], + 'cumber': ['cumber', 'cumbre'], + 'cumberer': ['cerebrum', 'cumberer'], + 'cumbraite': ['bacterium', 'cumbraite'], + 'cumbre': ['cumber', 'cumbre'], + 'cumic': ['cumic', 'mucic'], + 'cumin': ['cumin', 'mucin'], + 'cumol': ['cumol', 'locum'], + 'cumulite': ['cumulite', 'lutecium'], + 'cumyl': ['culmy', 'cumyl'], + 'cuna': ['cuna', 'unca'], + 'cunan': ['canun', 'cunan'], + 'cuneal': ['auncel', 'cuneal', 'lacune', 'launce', 'unlace'], + 'cuneator': ['cornuate', 'courante', 'cuneator', 'outrance'], + 'cunila': ['cunila', 'lucian', 'lucina', 'uncial'], + 'cuon': ['cuon', 'unco'], + 'cuorin': ['cuorin', 'uronic'], + 'cupid': ['cupid', 'pudic'], + 'cupidity': ['cupidity', 'pudicity'], + 'cupidone': ['cupidone', 'uncopied'], + 'cupola': ['copula', 'cupola'], + 'cupolar': ['copular', 'croupal', 'cupolar', 'porcula'], + 'cupreous': ['cupreous', 'upcourse'], + 'cuprite': ['cuprite', 'picture'], + 'curable': ['culebra', 'curable'], + 'curate': ['acture', 'cauter', 'curate'], + 'curateship': ['curateship', 'pasticheur'], + 'curation': ['curation', 'nocturia'], + 'curatory': ['curatory', 'outcarry'], + 'curcas': ['crusca', 'curcas'], + 'curdle': ['curdle', 'curled'], + 'curdwort': ['crudwort', 'curdwort'], + 'cure': ['cure', 'ecru', 'eruc'], + 'curer': ['curer', 'recur'], + 'curial': ['curial', 'lauric', 'uracil', 'uralic'], + 'curialist': ['curialist', 'rusticial'], + 'curie': ['curie', 'ureic'], + 'curin': ['curin', 'incur', 'runic'], + 'curine': ['curine', 'erucin', 'neuric'], + 'curiosa': ['carious', 'curiosa'], + 'curite': ['curite', 'teucri', 'uretic'], + 'curled': ['curdle', 'curled'], + 'curler': ['curler', 'recurl'], + 'cursa': ['cursa', 'scaur'], + 'cursal': ['cursal', 'sulcar'], + 'curse': ['cruse', 'curse', 'sucre'], + 'cursed': ['cedrus', 'cursed'], + 'curst': ['crust', 'curst'], + 'cursus': ['cursus', 'ruscus'], + 'curtail': ['curtail', 'trucial'], + 'curtailer': ['curtailer', 'recruital', 'reticular'], + 'curtain': ['curtain', 'turacin', 'turcian'], + 'curtation': ['anticourt', 'curtation', 'ructation'], + 'curtilage': ['curtilage', 'cutigeral', 'graticule'], + 'curtis': ['citrus', 'curtis', 'rictus', 'rustic'], + 'curtise': ['curtise', 'icterus'], + 'curtsy': ['crusty', 'curtsy'], + 'curvital': ['cultivar', 'curvital'], + 'cush': ['cush', 'such'], + 'cushionless': ['cushionless', 'slouchiness'], + 'cusinero': ['coinsure', 'corineus', 'cusinero'], + 'cusk': ['cusk', 'suck'], + 'cusp': ['cusp', 'scup'], + 'cuspal': ['cuspal', 'placus'], + 'custom': ['custom', 'muscot'], + 'customer': ['costumer', 'customer'], + 'cutheal': ['auchlet', 'cutheal', 'taluche'], + 'cutigeral': ['curtilage', 'cutigeral', 'graticule'], + 'cutin': ['cutin', 'incut', 'tunic'], + 'cutis': ['cutis', 'ictus'], + 'cutler': ['cutler', 'reluct'], + 'cutleress': ['cutleress', 'lecturess', 'truceless'], + 'cutleria': ['arculite', 'cutleria', 'lucretia', 'reticula', 'treculia'], + 'cutlery': ['cruelty', 'cutlery'], + 'cutlet': ['cutlet', 'cuttle'], + 'cutoff': ['cutoff', 'offcut'], + 'cutout': ['cutout', 'outcut'], + 'cutover': ['cutover', 'overcut'], + 'cuttle': ['cutlet', 'cuttle'], + 'cuttler': ['clutter', 'cuttler'], + 'cutup': ['cutup', 'upcut'], + 'cuya': ['cuya', 'yuca'], + 'cyamus': ['cyamus', 'muysca'], + 'cyan': ['cany', 'cyan'], + 'cyanidine': ['cyanidine', 'dicyanine'], + 'cyanol': ['alcyon', 'cyanol'], + 'cyanole': ['alcyone', 'cyanole'], + 'cyanometric': ['craniectomy', 'cyanometric'], + 'cyanophycin': ['cyanophycin', 'phycocyanin'], + 'cyanuret': ['centaury', 'cyanuret'], + 'cyath': ['cathy', 'cyath', 'yacht'], + 'cyclamine': ['cyclamine', 'macilency'], + 'cyclian': ['cyclian', 'cynical'], + 'cyclide': ['cyclide', 'decylic', 'dicycle'], + 'cyclism': ['clysmic', 'cyclism'], + 'cyclotome': ['colectomy', 'cyclotome'], + 'cydonian': ['anodynic', 'cydonian'], + 'cylindrite': ['cylindrite', 'indirectly'], + 'cylix': ['cylix', 'xylic'], + 'cymation': ['cymation', 'myatonic', 'onymatic'], + 'cymoid': ['cymoid', 'mycoid'], + 'cymometer': ['cymometer', 'mecometry'], + 'cymose': ['cymose', 'mycose'], + 'cymule': ['cymule', 'lyceum'], + 'cynara': ['canary', 'cynara'], + 'cynaroid': ['cynaroid', 'dicaryon'], + 'cynical': ['cyclian', 'cynical'], + 'cynogale': ['acylogen', 'cynogale'], + 'cynophilic': ['cynophilic', 'philocynic'], + 'cynosural': ['consulary', 'cynosural'], + 'cyphonism': ['cyphonism', 'symphonic'], + 'cypre': ['crepy', 'cypre', 'percy'], + 'cypria': ['cypria', 'picary', 'piracy'], + 'cyprian': ['cyprian', 'cyprina'], + 'cyprina': ['cyprian', 'cyprina'], + 'cyprine': ['cyprine', 'pyrenic'], + 'cypris': ['crispy', 'cypris'], + 'cyrano': ['canroy', 'crayon', 'cyrano', 'nyroca'], + 'cyril': ['cyril', 'lyric'], + 'cyrilla': ['cyrilla', 'lyrical'], + 'cyrtopia': ['cyrtopia', 'poticary'], + 'cyst': ['cyst', 'scyt'], + 'cystidean': ['asyndetic', 'cystidean', 'syndicate'], + 'cystitis': ['cystitis', 'scytitis'], + 'cystoadenoma': ['adenocystoma', 'cystoadenoma'], + 'cystofibroma': ['cystofibroma', 'fibrocystoma'], + 'cystolith': ['cystolith', 'lithocyst'], + 'cystomyxoma': ['cystomyxoma', 'myxocystoma'], + 'cystonephrosis': ['cystonephrosis', 'nephrocystosis'], + 'cystopyelitis': ['cystopyelitis', 'pyelocystitis'], + 'cystotome': ['cystotome', 'cytostome', 'ostectomy'], + 'cystourethritis': ['cystourethritis', 'urethrocystitis'], + 'cytase': ['cytase', 'stacey'], + 'cytherea': ['cheatery', 'cytherea', 'teachery'], + 'cytherean': ['cytherean', 'enchytrae'], + 'cytisine': ['cytisine', 'syenitic'], + 'cytoblastemic': ['blastomycetic', 'cytoblastemic'], + 'cytoblastemous': ['blastomycetous', 'cytoblastemous'], + 'cytochrome': ['chromocyte', 'cytochrome'], + 'cytoid': ['cytoid', 'docity'], + 'cytomere': ['cytomere', 'merocyte'], + 'cytophil': ['cytophil', 'phycitol'], + 'cytosine': ['cenosity', 'cytosine'], + 'cytosome': ['cytosome', 'otomyces'], + 'cytost': ['cytost', 'scotty'], + 'cytostome': ['cystotome', 'cytostome', 'ostectomy'], + 'czarian': ['czarian', 'czarina'], + 'czarina': ['czarian', 'czarina'], + 'da': ['ad', 'da'], + 'dab': ['bad', 'dab'], + 'dabber': ['barbed', 'dabber'], + 'dabbler': ['dabbler', 'drabble'], + 'dabitis': ['dabitis', 'dibatis'], + 'dablet': ['dablet', 'tabled'], + 'dace': ['cade', 'dace', 'ecad'], + 'dacelo': ['alcedo', 'dacelo'], + 'dacian': ['acnida', 'anacid', 'dacian'], + 'dacker': ['arcked', 'dacker'], + 'dacryolith': ['dacryolith', 'hydrotical'], + 'dacryon': ['candroy', 'dacryon'], + 'dactylonomy': ['dactylonomy', 'monodactyly'], + 'dactylopteridae': ['dactylopteridae', 'pterodactylidae'], + 'dactylopterus': ['dactylopterus', 'pterodactylus'], + 'dacus': ['cadus', 'dacus'], + 'dad': ['add', 'dad'], + 'dada': ['adad', 'adda', 'dada'], + 'dadap': ['dadap', 'padda'], + 'dade': ['dade', 'dead', 'edda'], + 'dadu': ['addu', 'dadu', 'daud', 'duad'], + 'dae': ['ade', 'dae'], + 'daemon': ['daemon', 'damone', 'modena'], + 'daemonic': ['codamine', 'comedian', 'daemonic', 'demoniac'], + 'daer': ['ared', 'daer', 'dare', 'dear', 'read'], + 'dag': ['dag', 'gad'], + 'dagaba': ['badaga', 'dagaba', 'gadaba'], + 'dagame': ['dagame', 'damage'], + 'dagbane': ['bandage', 'dagbane'], + 'dagestan': ['dagestan', 'standage'], + 'dagger': ['dagger', 'gadger', 'ragged'], + 'daggers': ['daggers', 'seggard'], + 'daggle': ['daggle', 'lagged'], + 'dago': ['dago', 'goad'], + 'dagomba': ['dagomba', 'gambado'], + 'dags': ['dags', 'sgad'], + 'dah': ['dah', 'dha', 'had'], + 'daidle': ['daidle', 'laddie'], + 'daikon': ['daikon', 'nodiak'], + 'dail': ['dail', 'dali', 'dial', 'laid', 'lida'], + 'daily': ['daily', 'lydia'], + 'daimen': ['daimen', 'damine', 'maiden', 'median', 'medina'], + 'daimio': ['daimio', 'maioid'], + 'daimon': ['amidon', 'daimon', 'domain'], + 'dain': ['adin', 'andi', 'dain', 'dani', 'dian', 'naid'], + 'dairi': ['dairi', 'darii', 'radii'], + 'dairy': ['dairy', 'diary', 'yaird'], + 'dais': ['dais', 'dasi', 'disa', 'said', 'sida'], + 'daisy': ['daisy', 'sayid'], + 'daker': ['daker', 'drake', 'kedar', 'radek'], + 'dal': ['dal', 'lad'], + 'dale': ['dale', 'deal', 'lade', 'lead', 'leda'], + 'dalea': ['adela', 'dalea'], + 'dalecarlian': ['calendarial', 'dalecarlian'], + 'daleman': ['daleman', 'lademan', 'leadman'], + 'daler': ['alder', 'daler', 'lader'], + 'dalesman': ['dalesman', 'leadsman'], + 'dali': ['dail', 'dali', 'dial', 'laid', 'lida'], + 'dalle': ['dalle', 'della', 'ladle'], + 'dallying': ['dallying', 'ladyling'], + 'dalt': ['dalt', 'tald'], + 'dalteen': ['dalteen', 'dentale', 'edental'], + 'dam': ['dam', 'mad'], + 'dama': ['adam', 'dama'], + 'damage': ['dagame', 'damage'], + 'daman': ['adman', 'daman', 'namda'], + 'damara': ['armada', 'damara', 'ramada'], + 'dame': ['dame', 'made', 'mead'], + 'damewort': ['damewort', 'wardmote'], + 'damia': ['amadi', 'damia', 'madia', 'maida'], + 'damie': ['amide', 'damie', 'media'], + 'damier': ['admire', 'armied', 'damier', 'dimera', 'merida'], + 'damine': ['daimen', 'damine', 'maiden', 'median', 'medina'], + 'dammer': ['dammer', 'dramme'], + 'dammish': ['dammish', 'mahdism'], + 'damn': ['damn', 'mand'], + 'damnation': ['damnation', 'mandation'], + 'damnatory': ['damnatory', 'mandatory'], + 'damned': ['damned', 'demand', 'madden'], + 'damner': ['damner', 'manred', 'randem', 'remand'], + 'damnii': ['amidin', 'damnii'], + 'damnous': ['damnous', 'osmunda'], + 'damon': ['damon', 'monad', 'nomad'], + 'damone': ['daemon', 'damone', 'modena'], + 'damonico': ['damonico', 'monoacid'], + 'dampen': ['dampen', 'madnep'], + 'damper': ['damper', 'ramped'], + 'dampish': ['dampish', 'madship', 'phasmid'], + 'dan': ['and', 'dan'], + 'dana': ['anda', 'dana'], + 'danaan': ['ananda', 'danaan'], + 'danai': ['danai', 'diana', 'naiad'], + 'danainae': ['anadenia', 'danainae'], + 'danakil': ['danakil', 'dankali', 'kaldani', 'ladakin'], + 'danalite': ['danalite', 'detainal'], + 'dancalite': ['cadential', 'dancalite'], + 'dance': ['dance', 'decan'], + 'dancer': ['cedarn', 'dancer', 'nacred'], + 'dancery': ['ardency', 'dancery'], + 'dander': ['dander', 'darned', 'nadder'], + 'dandle': ['dandle', 'landed'], + 'dandler': ['dandler', 'dendral'], + 'dane': ['ande', 'dane', 'dean', 'edna'], + 'danewort': ['danewort', 'teardown'], + 'danger': ['danger', 'gander', 'garden', 'ranged'], + 'dangerful': ['dangerful', 'gardenful'], + 'dangerless': ['dangerless', 'gardenless'], + 'dangle': ['angled', 'dangle', 'englad', 'lagend'], + 'dangler': ['dangler', 'gnarled'], + 'danglin': ['danglin', 'landing'], + 'dani': ['adin', 'andi', 'dain', 'dani', 'dian', 'naid'], + 'danian': ['andian', 'danian', 'nidana'], + 'danic': ['canid', 'cnida', 'danic'], + 'daniel': ['aldine', 'daniel', 'delian', 'denial', 'enalid', 'leadin'], + 'daniele': ['adeline', 'daniele', 'delaine'], + 'danielic': ['alcidine', 'danielic', 'lecaniid'], + 'danio': ['adion', 'danio', 'doina', 'donia'], + 'danish': ['danish', 'sandhi'], + 'danism': ['danism', 'disman'], + 'danite': ['danite', 'detain'], + 'dankali': ['danakil', 'dankali', 'kaldani', 'ladakin'], + 'danli': ['danli', 'ladin', 'linda', 'nidal'], + 'dannie': ['aidenn', 'andine', 'dannie', 'indane'], + 'danseuse': ['danseuse', 'sudanese'], + 'dantean': ['andante', 'dantean'], + 'dantist': ['dantist', 'distant'], + 'danuri': ['danuri', 'diurna', 'dunair', 'durain', 'durani', 'durian'], + 'dao': ['ado', 'dao', 'oda'], + 'daoine': ['daoine', 'oneida'], + 'dap': ['dap', 'pad'], + 'daphnis': ['daphnis', 'dishpan'], + 'dapicho': ['dapicho', 'phacoid'], + 'dapple': ['dapple', 'lapped', 'palped'], + 'dar': ['dar', 'rad'], + 'daraf': ['daraf', 'farad'], + 'darby': ['bardy', 'darby'], + 'darci': ['acrid', 'caird', 'carid', 'darci', 'daric', 'dirca'], + 'dare': ['ared', 'daer', 'dare', 'dear', 'read'], + 'dareall': ['ardella', 'dareall'], + 'daren': ['andre', 'arend', 'daren', 'redan'], + 'darer': ['darer', 'drear'], + 'darg': ['darg', 'drag', 'grad'], + 'darger': ['darger', 'gerard', 'grader', 'redrag', 'regard'], + 'dargo': ['dargo', 'dogra', 'drago'], + 'dargsman': ['dargsman', 'dragsman'], + 'dari': ['arid', 'dari', 'raid'], + 'daric': ['acrid', 'caird', 'carid', 'darci', 'daric', 'dirca'], + 'darien': ['darien', 'draine'], + 'darii': ['dairi', 'darii', 'radii'], + 'darin': ['darin', 'dinar', 'drain', 'indra', 'nadir', 'ranid'], + 'daring': ['daring', 'dingar', 'gradin'], + 'darius': ['darius', 'radius'], + 'darken': ['darken', 'kanred', 'ranked'], + 'darkener': ['darkener', 'redarken'], + 'darn': ['darn', 'nard', 'rand'], + 'darned': ['dander', 'darned', 'nadder'], + 'darnel': ['aldern', + 'darnel', + 'enlard', + 'lander', + 'lenard', + 'randle', + 'reland'], + 'darner': ['darner', 'darren', 'errand', 'rander', 'redarn'], + 'darning': ['darning', 'randing'], + 'darrein': ['darrein', 'drainer'], + 'darren': ['darner', 'darren', 'errand', 'rander', 'redarn'], + 'darshana': ['darshana', 'shardana'], + 'darst': ['darst', 'darts', 'strad'], + 'dart': ['dart', 'drat'], + 'darter': ['darter', + 'dartre', + 'redart', + 'retard', + 'retrad', + 'tarred', + 'trader'], + 'darting': ['darting', 'trading'], + 'dartle': ['dartle', 'tardle'], + 'dartoic': ['arctoid', 'carotid', 'dartoic'], + 'dartre': ['darter', + 'dartre', + 'redart', + 'retard', + 'retrad', + 'tarred', + 'trader'], + 'dartrose': ['dartrose', 'roadster'], + 'darts': ['darst', 'darts', 'strad'], + 'daryl': ['daryl', 'lardy', 'lyard'], + 'das': ['das', 'sad'], + 'dash': ['dash', 'sadh', 'shad'], + 'dashed': ['dashed', 'shaded'], + 'dasheen': ['dasheen', 'enshade'], + 'dasher': ['dasher', 'shader', 'sheard'], + 'dashing': ['dashing', 'shading'], + 'dashnak': ['dashnak', 'shadkan'], + 'dashy': ['dashy', 'shady'], + 'dasi': ['dais', 'dasi', 'disa', 'said', 'sida'], + 'dasnt': ['dasnt', 'stand'], + 'dasturi': ['dasturi', 'rudista'], + 'dasya': ['adays', 'dasya'], + 'dasyurine': ['dasyurine', 'dysneuria'], + 'data': ['adat', 'data'], + 'datable': ['albetad', 'datable'], + 'dataria': ['dataria', 'radiata'], + 'date': ['adet', 'date', 'tade', 'tead', 'teda'], + 'dateless': ['dateless', 'detassel'], + 'dater': ['dater', 'derat', 'detar', 'drate', 'rated', 'trade', 'tread'], + 'datil': ['datil', 'dital', 'tidal', 'tilda'], + 'datism': ['amidst', 'datism'], + 'daub': ['baud', 'buda', 'daub'], + 'dauber': ['dauber', 'redaub'], + 'daubster': ['daubster', 'subtread'], + 'daud': ['addu', 'dadu', 'daud', 'duad'], + 'daunch': ['chandu', 'daunch'], + 'daunter': ['daunter', 'unarted', 'unrated', 'untread'], + 'dauntless': ['adultness', 'dauntless'], + 'daur': ['ardu', 'daur', 'dura'], + 'dave': ['dave', 'deva', 'vade', 'veda'], + 'daven': ['daven', 'vaned'], + 'davy': ['davy', 'vady'], + 'daw': ['awd', 'daw', 'wad'], + 'dawdler': ['dawdler', 'waddler'], + 'dawdling': ['dawdling', 'waddling'], + 'dawdlingly': ['dawdlingly', 'waddlingly'], + 'dawdy': ['dawdy', 'waddy'], + 'dawn': ['dawn', 'wand'], + 'dawnlike': ['dawnlike', 'wandlike'], + 'dawny': ['dawny', 'wandy'], + 'day': ['ady', 'day', 'yad'], + 'dayal': ['adlay', 'dayal'], + 'dayfly': ['dayfly', 'ladyfy'], + 'days': ['days', 'dyas'], + 'daysman': ['daysman', 'mandyas'], + 'daytime': ['daytime', 'maytide'], + 'daywork': ['daywork', 'workday'], + 'daze': ['adze', 'daze'], + 'de': ['de', 'ed'], + 'deacon': ['acnode', 'deacon'], + 'deaconship': ['deaconship', 'endophasic'], + 'dead': ['dade', 'dead', 'edda'], + 'deadborn': ['deadborn', 'endboard'], + 'deadener': ['deadener', 'endeared'], + 'deadlock': ['deadlock', 'deckload'], + 'deaf': ['deaf', 'fade'], + 'deair': ['aider', 'deair', 'irade', 'redia'], + 'deal': ['dale', 'deal', 'lade', 'lead', 'leda'], + 'dealable': ['dealable', 'leadable'], + 'dealation': ['atloidean', 'dealation'], + 'dealer': ['dealer', 'leader', 'redeal', 'relade', 'relead'], + 'dealership': ['dealership', 'leadership'], + 'dealing': ['adeling', 'dealing', 'leading'], + 'dealt': ['adlet', 'dealt', 'delta', 'lated', 'taled'], + 'deaminase': ['deaminase', 'mesadenia'], + 'dean': ['ande', 'dane', 'dean', 'edna'], + 'deaner': ['deaner', 'endear'], + 'deaness': ['deaness', 'edessan'], + 'deaquation': ['adequation', 'deaquation'], + 'dear': ['ared', 'daer', 'dare', 'dear', 'read'], + 'dearie': ['aeried', 'dearie'], + 'dearth': ['dearth', 'hatred', 'rathed', 'thread'], + 'deary': ['deary', 'deray', 'rayed', 'ready', 'yeard'], + 'deash': ['deash', 'hades', 'sadhe', 'shade'], + 'deasil': ['aisled', 'deasil', 'ladies', 'sailed'], + 'deave': ['deave', 'eaved', 'evade'], + 'deb': ['bed', 'deb'], + 'debacle': ['belaced', 'debacle'], + 'debar': ['ardeb', 'beard', 'bread', 'debar'], + 'debark': ['bedark', 'debark'], + 'debaser': ['debaser', 'sabered'], + 'debater': ['betread', 'debater'], + 'deben': ['beden', 'deben', 'deneb'], + 'debi': ['beid', 'bide', 'debi', 'dieb'], + 'debile': ['debile', 'edible'], + 'debit': ['bidet', 'debit'], + 'debosh': ['beshod', 'debosh'], + 'debrief': ['debrief', 'defiber', 'fibered'], + 'debutant': ['debutant', 'unbatted'], + 'debutante': ['debutante', 'unabetted'], + 'decachord': ['decachord', 'dodecarch'], + 'decadic': ['caddice', 'decadic'], + 'decal': ['clead', 'decal', 'laced'], + 'decalin': ['cladine', 'decalin', 'iceland'], + 'decaliter': ['decaliter', 'decalitre'], + 'decalitre': ['decaliter', 'decalitre'], + 'decameter': ['decameter', 'decametre'], + 'decametre': ['decameter', 'decametre'], + 'decan': ['dance', 'decan'], + 'decanal': ['candela', 'decanal'], + 'decani': ['decani', 'decian'], + 'decant': ['cadent', 'canted', 'decant'], + 'decantate': ['catenated', 'decantate'], + 'decanter': ['crenated', 'decanter', 'nectared'], + 'decantherous': ['countershade', 'decantherous'], + 'decap': ['caped', 'decap', 'paced'], + 'decart': ['cedrat', 'decart', 'redact'], + 'decastere': ['decastere', 'desecrate'], + 'decator': ['cordate', 'decator', 'redcoat'], + 'decay': ['acedy', 'decay'], + 'deceiver': ['deceiver', 'received'], + 'decennia': ['cadinene', 'decennia', 'enneadic'], + 'decennial': ['celandine', 'decennial'], + 'decent': ['cedent', 'decent'], + 'decenter': ['centered', 'decenter', 'decentre', 'recedent'], + 'decentre': ['centered', 'decenter', 'decentre', 'recedent'], + 'decern': ['cendre', 'decern'], + 'decian': ['decani', 'decian'], + 'deciatine': ['deciatine', 'diacetine', 'taenicide', 'teniacide'], + 'decider': ['decider', 'decried'], + 'decillion': ['celloidin', 'collidine', 'decillion'], + 'decima': ['amiced', 'decima'], + 'decimal': ['camelid', 'decimal', 'declaim', 'medical'], + 'decimally': ['decimally', 'medically'], + 'decimate': ['decimate', 'medicate'], + 'decimation': ['decimation', 'medication'], + 'decimator': ['decimator', 'medicator', 'mordicate'], + 'decimestrial': ['decimestrial', 'sedimetrical'], + 'decimosexto': ['decimosexto', 'sextodecimo'], + 'deckel': ['deckel', 'deckle'], + 'decker': ['decker', 'redeck'], + 'deckle': ['deckel', 'deckle'], + 'deckload': ['deadlock', 'deckload'], + 'declaim': ['camelid', 'decimal', 'declaim', 'medical'], + 'declaimer': ['declaimer', 'demiracle'], + 'declaration': ['declaration', 'redactional'], + 'declare': ['cedrela', 'creedal', 'declare'], + 'declass': ['classed', 'declass'], + 'declinate': ['declinate', 'encitadel'], + 'declinatory': ['adrenolytic', 'declinatory'], + 'decoat': ['coated', 'decoat'], + 'decollate': ['decollate', 'ocellated'], + 'decollator': ['corollated', 'decollator'], + 'decolor': ['colored', 'croodle', 'decolor'], + 'decompress': ['compressed', 'decompress'], + 'deconsider': ['considered', 'deconsider'], + 'decorate': ['decorate', 'ocreated'], + 'decoration': ['carotenoid', 'coronadite', 'decoration'], + 'decorist': ['decorist', 'sectroid'], + 'decream': ['decream', 'racemed'], + 'decree': ['decree', 'recede'], + 'decreer': ['decreer', 'receder'], + 'decreet': ['decreet', 'decrete'], + 'decrepit': ['decrepit', 'depicter', 'precited'], + 'decrete': ['decreet', 'decrete'], + 'decretist': ['decretist', 'trisected'], + 'decrial': ['decrial', 'radicel', 'radicle'], + 'decried': ['decider', 'decried'], + 'decrown': ['crowned', 'decrown'], + 'decry': ['cedry', 'decry'], + 'decurionate': ['counteridea', 'decurionate'], + 'decurrency': ['decurrency', 'recrudency'], + 'decursion': ['cinderous', 'decursion'], + 'decus': ['decus', 'duces'], + 'decyl': ['clyde', 'decyl'], + 'decylic': ['cyclide', 'decylic', 'dicycle'], + 'dedan': ['dedan', 'denda'], + 'dedicant': ['addicent', 'dedicant'], + 'dedo': ['dedo', 'dode', 'eddo'], + 'deduce': ['deduce', 'deuced'], + 'deduct': ['deduct', 'ducted'], + 'deem': ['deem', 'deme', 'mede', 'meed'], + 'deemer': ['deemer', 'meered', 'redeem', 'remede'], + 'deep': ['deep', 'peed'], + 'deer': ['deer', 'dere', 'dree', 'rede', 'reed'], + 'deerhair': ['deerhair', 'dehairer'], + 'deerhorn': ['deerhorn', 'dehorner'], + 'deerwood': ['deerwood', 'doorweed'], + 'defat': ['defat', 'fated'], + 'defaulter': ['defaulter', 'redefault'], + 'defeater': ['defeater', 'federate', 'redefeat'], + 'defensor': ['defensor', 'foresend'], + 'defer': ['defer', 'freed'], + 'defial': ['afield', 'defial'], + 'defiber': ['debrief', 'defiber', 'fibered'], + 'defile': ['defile', 'fidele'], + 'defiled': ['defiled', 'fielded'], + 'defiler': ['defiler', 'fielder'], + 'definable': ['beanfield', 'definable'], + 'define': ['define', 'infeed'], + 'definer': ['definer', 'refined'], + 'deflect': ['clefted', 'deflect'], + 'deflesh': ['deflesh', 'fleshed'], + 'deflex': ['deflex', 'flexed'], + 'deflower': ['deflower', 'flowered'], + 'defluent': ['defluent', 'unfelted'], + 'defog': ['defog', 'fodge'], + 'deforciant': ['deforciant', 'fornicated'], + 'deforest': ['deforest', 'forested'], + 'deform': ['deform', 'formed'], + 'deformer': ['deformer', 'reformed'], + 'defray': ['defray', 'frayed'], + 'defrost': ['defrost', 'frosted'], + 'deg': ['deg', 'ged'], + 'degarnish': ['degarnish', 'garnished'], + 'degasser': ['degasser', 'dressage'], + 'degelation': ['degelation', 'delegation'], + 'degrain': ['degrain', 'deraign', 'deringa', 'gradine', 'grained', 'reading'], + 'degu': ['degu', 'gude'], + 'dehair': ['dehair', 'haired'], + 'dehairer': ['deerhair', 'dehairer'], + 'dehorn': ['dehorn', 'horned'], + 'dehorner': ['deerhorn', 'dehorner'], + 'dehors': ['dehors', 'rhodes', 'shoder', 'shored'], + 'dehortation': ['dehortation', 'theriodonta'], + 'dehusk': ['dehusk', 'husked'], + 'deicer': ['ceride', 'deicer'], + 'deictical': ['deictical', 'dialectic'], + 'deification': ['deification', 'edification'], + 'deificatory': ['deificatory', 'edificatory'], + 'deifier': ['deifier', 'edifier'], + 'deify': ['deify', 'edify'], + 'deign': ['deign', 'dinge', 'nidge'], + 'deino': ['deino', 'dione', 'edoni'], + 'deinocephalia': ['deinocephalia', 'palaeechinoid'], + 'deinos': ['deinos', 'donsie', 'inodes', 'onside'], + 'deipara': ['deipara', 'paridae'], + 'deirdre': ['deirdre', 'derider', 'derride', 'ridered'], + 'deism': ['deism', 'disme'], + 'deist': ['deist', 'steid'], + 'deistic': ['deistic', 'dietics'], + 'deistically': ['deistically', 'dialystelic'], + 'deity': ['deity', 'tydie'], + 'deityship': ['deityship', 'diphysite'], + 'del': ['del', 'eld', 'led'], + 'delaine': ['adeline', 'daniele', 'delaine'], + 'delaminate': ['antemedial', 'delaminate'], + 'delapse': ['delapse', 'sepaled'], + 'delate': ['delate', 'elated'], + 'delater': ['delater', 'related', 'treadle'], + 'delator': ['delator', 'leotard'], + 'delawn': ['delawn', 'lawned', 'wandle'], + 'delay': ['delay', 'leady'], + 'delayer': ['delayer', 'layered', 'redelay'], + 'delayful': ['delayful', 'feudally'], + 'dele': ['dele', 'lede', 'leed'], + 'delead': ['delead', 'leaded'], + 'delegation': ['degelation', 'delegation'], + 'delegatory': ['delegatory', 'derogately'], + 'delete': ['delete', 'teedle'], + 'delf': ['delf', 'fled'], + 'delhi': ['delhi', 'hield'], + 'delia': ['adiel', 'delia', 'ideal'], + 'delian': ['aldine', 'daniel', 'delian', 'denial', 'enalid', 'leadin'], + 'delible': ['bellied', 'delible'], + 'delicateness': ['delicateness', 'delicatessen'], + 'delicatessen': ['delicateness', 'delicatessen'], + 'delichon': ['chelidon', 'chelonid', 'delichon'], + 'delict': ['delict', 'deltic'], + 'deligation': ['deligation', 'gadolinite', 'gelatinoid'], + 'delignate': ['delignate', 'gelatined'], + 'delimit': ['delimit', 'limited'], + 'delimitation': ['delimitation', 'mniotiltidae'], + 'delineator': ['delineator', 'rondeletia'], + 'delint': ['delint', 'dentil'], + 'delirament': ['delirament', 'derailment'], + 'deliriant': ['deliriant', 'draintile', 'interlaid'], + 'deliver': ['deliver', 'deviler', 'livered'], + 'deliverer': ['deliverer', 'redeliver'], + 'della': ['dalle', 'della', 'ladle'], + 'deloul': ['deloul', 'duello'], + 'delphinius': ['delphinius', 'sulphinide'], + 'delta': ['adlet', 'dealt', 'delta', 'lated', 'taled'], + 'deltaic': ['citadel', 'deltaic', 'dialect', 'edictal', 'lactide'], + 'deltic': ['delict', 'deltic'], + 'deluding': ['deluding', 'ungilded'], + 'delusion': ['delusion', 'unsoiled'], + 'delusionist': ['delusionist', 'indissolute'], + 'deluster': ['deluster', 'ulstered'], + 'demal': ['demal', 'medal'], + 'demand': ['damned', 'demand', 'madden'], + 'demander': ['demander', 'redemand'], + 'demanding': ['demanding', 'maddening'], + 'demandingly': ['demandingly', 'maddeningly'], + 'demantoid': ['demantoid', 'dominated'], + 'demarcate': ['camerated', 'demarcate'], + 'demarcation': ['demarcation', 'democratian'], + 'demark': ['demark', 'marked'], + 'demast': ['demast', 'masted'], + 'deme': ['deem', 'deme', 'mede', 'meed'], + 'demean': ['amende', 'demean', 'meaned', 'nadeem'], + 'demeanor': ['demeanor', 'enamored'], + 'dementia': ['dementia', 'mendaite'], + 'demerit': ['demerit', 'dimeter', 'merited', 'mitered'], + 'demerol': ['demerol', 'modeler', 'remodel'], + 'demetrian': ['demetrian', 'dermatine', 'meandrite', 'minareted'], + 'demi': ['demi', 'diem', 'dime', 'mide'], + 'demibrute': ['bermudite', 'demibrute'], + 'demicannon': ['cinnamoned', 'demicannon'], + 'demicanon': ['demicanon', 'dominance'], + 'demidog': ['demidog', 'demigod'], + 'demigod': ['demidog', 'demigod'], + 'demiluster': ['demiluster', 'demilustre'], + 'demilustre': ['demiluster', 'demilustre'], + 'demiparallel': ['demiparallel', 'imparalleled'], + 'demipronation': ['demipronation', 'preadmonition', 'predomination'], + 'demiracle': ['declaimer', 'demiracle'], + 'demiram': ['demiram', 'mermaid'], + 'demirep': ['demirep', 'epiderm', 'impeder', 'remiped'], + 'demirobe': ['demirobe', 'embodier'], + 'demisable': ['beadleism', 'demisable'], + 'demise': ['demise', 'diseme'], + 'demit': ['demit', 'timed'], + 'demiturned': ['demiturned', 'undertimed'], + 'demob': ['demob', 'mobed'], + 'democratian': ['demarcation', 'democratian'], + 'demolisher': ['demolisher', 'redemolish'], + 'demoniac': ['codamine', 'comedian', 'daemonic', 'demoniac'], + 'demoniacism': ['demoniacism', 'seminomadic'], + 'demonial': ['demonial', 'melanoid'], + 'demoniast': ['ademonist', 'demoniast', 'staminode'], + 'demonish': ['demonish', 'hedonism'], + 'demonism': ['demonism', 'medimnos', 'misnomed'], + 'demotics': ['comedist', 'demotics', 'docetism', 'domestic'], + 'demotion': ['demotion', 'entomoid', 'moontide'], + 'demount': ['demount', 'mounted'], + 'demurrer': ['demurrer', 'murderer'], + 'demurring': ['demurring', 'murdering'], + 'demurringly': ['demurringly', 'murderingly'], + 'demy': ['demy', 'emyd'], + 'den': ['den', 'end', 'ned'], + 'denarius': ['denarius', 'desaurin', 'unraised'], + 'denaro': ['denaro', 'orenda'], + 'denary': ['denary', 'yander'], + 'denat': ['denat', 'entad'], + 'denature': ['denature', 'undereat'], + 'denda': ['dedan', 'denda'], + 'dendral': ['dandler', 'dendral'], + 'dendrite': ['dendrite', 'tindered'], + 'dendrites': ['dendrites', 'distender', 'redistend'], + 'dene': ['dene', 'eden', 'need'], + 'deneb': ['beden', 'deben', 'deneb'], + 'dengue': ['dengue', 'unedge'], + 'denial': ['aldine', 'daniel', 'delian', 'denial', 'enalid', 'leadin'], + 'denier': ['denier', 'nereid'], + 'denierer': ['denierer', 'reindeer'], + 'denigrate': ['argentide', 'denigrate', 'dinergate'], + 'denim': ['denim', 'mendi'], + 'denis': ['denis', 'snide'], + 'denominate': ['denominate', 'emendation'], + 'denotable': ['denotable', 'detonable'], + 'denotation': ['denotation', 'detonation'], + 'denotative': ['denotative', 'detonative'], + 'denotive': ['denotive', 'devonite'], + 'denouncer': ['denouncer', 'unencored'], + 'dense': ['dense', 'needs'], + 'denshare': ['denshare', 'seerhand'], + 'denshire': ['denshire', 'drisheen'], + 'density': ['density', 'destiny'], + 'dent': ['dent', 'tend'], + 'dental': ['dental', 'tandle'], + 'dentale': ['dalteen', 'dentale', 'edental'], + 'dentalism': ['dentalism', 'dismantle'], + 'dentaria': ['anteriad', 'atridean', 'dentaria'], + 'dentatoserrate': ['dentatoserrate', 'serratodentate'], + 'dentatosinuate': ['dentatosinuate', 'sinuatodentate'], + 'denter': ['denter', 'rented', 'tender'], + 'dentex': ['dentex', 'extend'], + 'denticle': ['cliented', 'denticle'], + 'denticular': ['denticular', 'unarticled'], + 'dentil': ['delint', 'dentil'], + 'dentilingual': ['dentilingual', 'indulgential', 'linguidental'], + 'dentin': ['dentin', 'indent', 'intend', 'tinned'], + 'dentinal': ['dentinal', 'teinland', 'tendinal'], + 'dentine': ['dentine', 'nineted'], + 'dentinitis': ['dentinitis', 'tendinitis'], + 'dentinoma': ['dentinoma', 'nominated'], + 'dentist': ['dentist', 'distent', 'stinted'], + 'dentolabial': ['dentolabial', 'labiodental'], + 'dentolingual': ['dentolingual', 'linguodental'], + 'denture': ['denture', 'untreed'], + 'denudative': ['denudative', 'undeviated'], + 'denude': ['denude', 'dudeen'], + 'denumeral': ['denumeral', 'undermeal', 'unrealmed'], + 'denunciator': ['denunciator', 'underaction'], + 'deny': ['deny', 'dyne'], + 'deoppilant': ['deoppilant', 'pentaploid'], + 'deota': ['deota', 'todea'], + 'depa': ['depa', 'peda'], + 'depaint': ['depaint', 'inadept', 'painted', 'patined'], + 'depart': ['depart', 'parted', 'petard'], + 'departition': ['departition', 'partitioned', 'trepidation'], + 'departure': ['apertured', 'departure'], + 'depas': ['depas', 'sepad', 'spade'], + 'depencil': ['depencil', 'penciled', 'pendicle'], + 'depender': ['depender', 'redepend'], + 'depetticoat': ['depetticoat', 'petticoated'], + 'depicter': ['decrepit', 'depicter', 'precited'], + 'depiction': ['depiction', 'pectinoid'], + 'depilate': ['depilate', 'leptidae', 'pileated'], + 'depletion': ['depletion', 'diplotene'], + 'deploration': ['deploration', 'periodontal'], + 'deploy': ['deploy', 'podley'], + 'depoh': ['depoh', 'ephod', 'hoped'], + 'depolish': ['depolish', 'polished'], + 'deport': ['deport', 'ported', 'redtop'], + 'deportation': ['antitorpedo', 'deportation'], + 'deposal': ['adelops', 'deposal'], + 'deposer': ['deposer', 'reposed'], + 'deposit': ['deposit', 'topside'], + 'deposition': ['deposition', 'positioned'], + 'depositional': ['depositional', 'despoliation'], + 'depositure': ['depositure', 'pterideous'], + 'deprave': ['deprave', 'pervade'], + 'depraver': ['depraver', 'pervader'], + 'depravingly': ['depravingly', 'pervadingly'], + 'deprecable': ['deprecable', 'precedable'], + 'deprecation': ['capernoited', 'deprecation'], + 'depreciation': ['depreciation', 'predeication'], + 'depressant': ['depressant', 'partedness'], + 'deprint': ['deprint', 'printed'], + 'deprival': ['deprival', 'prevalid'], + 'deprivate': ['deprivate', 'predative'], + 'deprive': ['deprive', 'previde'], + 'depriver': ['depriver', 'predrive'], + 'depurant': ['depurant', 'unparted'], + 'depuration': ['depuration', 'portunidae'], + 'deraign': ['degrain', 'deraign', 'deringa', 'gradine', 'grained', 'reading'], + 'derail': ['ariled', 'derail', 'dialer'], + 'derailment': ['delirament', 'derailment'], + 'derange': ['derange', 'enraged', 'gardeen', 'gerenda', 'grandee', 'grenade'], + 'deranged': ['deranged', 'gardened'], + 'deranger': ['deranger', 'gardener'], + 'derat': ['dater', 'derat', 'detar', 'drate', 'rated', 'trade', 'tread'], + 'derate': ['derate', 'redate'], + 'derater': ['derater', 'retrade', 'retread', 'treader'], + 'deray': ['deary', 'deray', 'rayed', 'ready', 'yeard'], + 'dere': ['deer', 'dere', 'dree', 'rede', 'reed'], + 'deregister': ['deregister', 'registered'], + 'derelict': ['derelict', 'relicted'], + 'deric': ['cider', 'cried', 'deric', 'dicer'], + 'derider': ['deirdre', 'derider', 'derride', 'ridered'], + 'deringa': ['degrain', 'deraign', 'deringa', 'gradine', 'grained', 'reading'], + 'derision': ['derision', 'ironside', 'resinoid', 'sirenoid'], + 'derivation': ['derivation', 'ordinative'], + 'derivational': ['derivational', 'revalidation'], + 'derive': ['derive', 'redive'], + 'deriver': ['deriver', 'redrive', 'rivered'], + 'derma': ['armed', 'derma', 'dream', 'ramed'], + 'dermad': ['dermad', 'madder'], + 'dermal': ['dermal', 'marled', 'medlar'], + 'dermatic': ['dermatic', 'timecard'], + 'dermatine': ['demetrian', 'dermatine', 'meandrite', 'minareted'], + 'dermatoneurosis': ['dermatoneurosis', 'neurodermatosis'], + 'dermatophone': ['dermatophone', 'herpetomonad'], + 'dermoblast': ['blastoderm', 'dermoblast'], + 'dermol': ['dermol', 'molder', 'remold'], + 'dermosclerite': ['dermosclerite', 'sclerodermite'], + 'dern': ['dern', 'rend'], + 'derogately': ['delegatory', 'derogately'], + 'derogation': ['derogation', 'trogonidae'], + 'derout': ['derout', 'detour', 'douter'], + 'derride': ['deirdre', 'derider', 'derride', 'ridered'], + 'derries': ['derries', 'desirer', 'resider', 'serried'], + 'derringer': ['derringer', 'regrinder'], + 'derry': ['derry', 'redry', 'ryder'], + 'derust': ['derust', 'duster'], + 'desalt': ['desalt', 'salted'], + 'desand': ['desand', 'sadden', 'sanded'], + 'desaurin': ['denarius', 'desaurin', 'unraised'], + 'descendant': ['adscendent', 'descendant'], + 'descender': ['descender', 'redescend'], + 'descent': ['descent', 'scented'], + 'description': ['description', 'discerption'], + 'desecrate': ['decastere', 'desecrate'], + 'desecration': ['considerate', 'desecration'], + 'deseed': ['deseed', 'seeded'], + 'desertic': ['creedist', 'desertic', 'discreet', 'discrete'], + 'desertion': ['desertion', 'detersion'], + 'deserver': ['deserver', 'reserved', 'reversed'], + 'desex': ['desex', 'sexed'], + 'deshabille': ['deshabille', 'shieldable'], + 'desi': ['desi', 'ides', 'seid', 'side'], + 'desiccation': ['desiccation', 'discoactine'], + 'desight': ['desight', 'sighted'], + 'design': ['design', 'singed'], + 'designer': ['designer', 'redesign', 'resigned'], + 'desilver': ['desilver', 'silvered'], + 'desirable': ['desirable', 'redisable'], + 'desire': ['desire', 'reside'], + 'desirer': ['derries', 'desirer', 'resider', 'serried'], + 'desirous': ['desirous', 'siderous'], + 'desition': ['desition', 'sedition'], + 'desma': ['desma', 'mesad'], + 'desman': ['amends', 'desman'], + 'desmopathy': ['desmopathy', 'phymatodes'], + 'desorption': ['desorption', 'priodontes'], + 'despair': ['despair', 'pardesi'], + 'despairing': ['despairing', 'spinigrade'], + 'desperation': ['desperation', 'esperantido'], + 'despise': ['despise', 'pedesis'], + 'despiser': ['despiser', 'disperse'], + 'despoil': ['despoil', 'soliped', 'spoiled'], + 'despoiler': ['despoiler', 'leprosied'], + 'despoliation': ['depositional', 'despoliation'], + 'despot': ['despot', 'posted'], + 'despotat': ['despotat', 'postdate'], + 'dessert': ['dessert', 'tressed'], + 'destain': ['destain', 'instead', 'sainted', 'satined'], + 'destine': ['destine', 'edestin'], + 'destinism': ['destinism', 'timidness'], + 'destiny': ['density', 'destiny'], + 'desugar': ['desugar', 'sugared'], + 'detail': ['detail', 'dietal', 'dilate', 'edital', 'tailed'], + 'detailer': ['detailer', 'elaterid'], + 'detain': ['danite', 'detain'], + 'detainal': ['danalite', 'detainal'], + 'detar': ['dater', 'derat', 'detar', 'drate', 'rated', 'trade', 'tread'], + 'detassel': ['dateless', 'detassel'], + 'detax': ['detax', 'taxed'], + 'detecter': ['detecter', 'redetect'], + 'detent': ['detent', 'netted', 'tented'], + 'deter': ['deter', 'treed'], + 'determinant': ['determinant', 'detrainment'], + 'detersion': ['desertion', 'detersion'], + 'detest': ['detest', 'tested'], + 'dethrone': ['dethrone', 'threnode'], + 'detin': ['detin', 'teind', 'tined'], + 'detinet': ['detinet', 'dinette'], + 'detonable': ['denotable', 'detonable'], + 'detonation': ['denotation', 'detonation'], + 'detonative': ['denotative', 'detonative'], + 'detonator': ['detonator', 'tetraodon'], + 'detour': ['derout', 'detour', 'douter'], + 'detracter': ['detracter', 'retracted'], + 'detraction': ['detraction', 'doctrinate', 'tetarconid'], + 'detrain': ['antired', 'detrain', 'randite', 'trained'], + 'detrainment': ['determinant', 'detrainment'], + 'detrusion': ['detrusion', 'tinderous', 'unstoried'], + 'detrusive': ['detrusive', 'divesture', 'servitude'], + 'deuce': ['deuce', 'educe'], + 'deuced': ['deduce', 'deuced'], + 'deul': ['deul', 'duel', 'leud'], + 'deva': ['dave', 'deva', 'vade', 'veda'], + 'devance': ['devance', 'vendace'], + 'develin': ['develin', 'endevil'], + 'developer': ['developer', 'redevelop'], + 'devil': ['devil', 'divel', 'lived'], + 'deviler': ['deliver', 'deviler', 'livered'], + 'devisceration': ['considerative', 'devisceration'], + 'deviser': ['deviser', 'diverse', 'revised'], + 'devitrify': ['devitrify', 'fervidity'], + 'devoid': ['devoid', 'voided'], + 'devoir': ['devoir', 'voider'], + 'devonite': ['denotive', 'devonite'], + 'devourer': ['devourer', 'overdure', 'overrude'], + 'devow': ['devow', 'vowed'], + 'dew': ['dew', 'wed'], + 'dewan': ['awned', 'dewan', 'waned'], + 'dewater': ['dewater', 'tarweed', 'watered'], + 'dewer': ['dewer', 'ewder', 'rewed'], + 'dewey': ['dewey', 'weedy'], + 'dewily': ['dewily', 'widely', 'wieldy'], + 'dewiness': ['dewiness', 'wideness'], + 'dewool': ['dewool', 'elwood', 'wooled'], + 'deworm': ['deworm', 'wormed'], + 'dewy': ['dewy', 'wyde'], + 'dextraural': ['dextraural', 'extradural'], + 'dextrosinistral': ['dextrosinistral', 'sinistrodextral'], + 'dey': ['dey', 'dye', 'yed'], + 'deyhouse': ['deyhouse', 'dyehouse'], + 'deyship': ['deyship', 'diphyes'], + 'dezinc': ['dezinc', 'zendic'], + 'dha': ['dah', 'dha', 'had'], + 'dhamnoo': ['dhamnoo', 'hoodman', 'manhood'], + 'dhan': ['dhan', 'hand'], + 'dharna': ['andhra', 'dharna'], + 'dheri': ['dheri', 'hider', 'hired'], + 'dhobi': ['bodhi', 'dhobi'], + 'dhoon': ['dhoon', 'hondo'], + 'dhu': ['dhu', 'hud'], + 'di': ['di', 'id'], + 'diabolist': ['diabolist', 'idioblast'], + 'diacetin': ['diacetin', 'indicate'], + 'diacetine': ['deciatine', 'diacetine', 'taenicide', 'teniacide'], + 'diacetyl': ['diacetyl', 'lyctidae'], + 'diachoretic': ['citharoedic', 'diachoretic'], + 'diaclase': ['diaclase', 'sidalcea'], + 'diaconal': ['cladonia', 'condalia', 'diaconal'], + 'diact': ['diact', 'dicta'], + 'diadem': ['diadem', 'mediad'], + 'diaderm': ['admired', 'diaderm'], + 'diaeretic': ['diaeretic', 'icteridae'], + 'diagenetic': ['diagenetic', 'digenetica'], + 'diageotropism': ['diageotropism', 'geodiatropism'], + 'diagonal': ['diagonal', 'ganoidal', 'gonadial'], + 'dial': ['dail', 'dali', 'dial', 'laid', 'lida'], + 'dialect': ['citadel', 'deltaic', 'dialect', 'edictal', 'lactide'], + 'dialectic': ['deictical', 'dialectic'], + 'dialector': ['dialector', 'lacertoid'], + 'dialer': ['ariled', 'derail', 'dialer'], + 'dialin': ['anilid', 'dialin', 'dianil', 'inlaid'], + 'dialing': ['dialing', 'gliadin'], + 'dialister': ['dialister', 'trailside'], + 'diallelon': ['diallelon', 'llandeilo'], + 'dialogism': ['dialogism', 'sigmoidal'], + 'dialystelic': ['deistically', 'dialystelic'], + 'dialytic': ['calidity', 'dialytic'], + 'diamagnet': ['agminated', 'diamagnet'], + 'diamantine': ['diamantine', 'inanimated'], + 'diameter': ['diameter', 'diatreme'], + 'diametric': ['citramide', 'diametric', 'matricide'], + 'diamide': ['amidide', 'diamide', 'mididae'], + 'diamine': ['amidine', 'diamine'], + 'diamorphine': ['diamorphine', 'phronimidae'], + 'dian': ['adin', 'andi', 'dain', 'dani', 'dian', 'naid'], + 'diana': ['danai', 'diana', 'naiad'], + 'diander': ['diander', 'drained'], + 'diane': ['diane', 'idean'], + 'dianetics': ['andesitic', 'dianetics'], + 'dianil': ['anilid', 'dialin', 'dianil', 'inlaid'], + 'diapensia': ['diapensia', 'diaspinae'], + 'diaper': ['diaper', 'paired'], + 'diaphote': ['diaphote', 'hepatoid'], + 'diaphtherin': ['diaphtherin', 'diphtherian'], + 'diapnoic': ['diapnoic', 'pinacoid'], + 'diapnotic': ['antipodic', 'diapnotic'], + 'diaporthe': ['aphrodite', 'atrophied', 'diaporthe'], + 'diarch': ['chidra', 'diarch'], + 'diarchial': ['diarchial', 'rachidial'], + 'diarchy': ['diarchy', 'hyracid'], + 'diarian': ['aridian', 'diarian'], + 'diary': ['dairy', 'diary', 'yaird'], + 'diascia': ['ascidia', 'diascia'], + 'diascope': ['diascope', 'psocidae', 'scopidae'], + 'diaspinae': ['diapensia', 'diaspinae'], + 'diastem': ['diastem', 'misdate'], + 'diastema': ['adamsite', 'diastema'], + 'diaster': ['astride', 'diaster', 'disrate', 'restiad', 'staired'], + 'diastole': ['diastole', 'isolated', 'sodalite', 'solidate'], + 'diastrophic': ['aphrodistic', 'diastrophic'], + 'diastrophy': ['diastrophy', 'dystrophia'], + 'diatomales': ['diatomales', 'mastoidale', 'mastoideal'], + 'diatomean': ['diatomean', 'mantoidea'], + 'diatomin': ['diatomin', 'domitian'], + 'diatonic': ['actinoid', 'diatonic', 'naticoid'], + 'diatreme': ['diameter', 'diatreme'], + 'diatropism': ['diatropism', 'prismatoid'], + 'dib': ['bid', 'dib'], + 'dibatis': ['dabitis', 'dibatis'], + 'dibber': ['dibber', 'ribbed'], + 'dibbler': ['dibbler', 'dribble'], + 'dibrom': ['dibrom', 'morbid'], + 'dicaryon': ['cynaroid', 'dicaryon'], + 'dicast': ['dicast', 'stadic'], + 'dice': ['dice', 'iced'], + 'dicentra': ['crinated', 'dicentra'], + 'dicer': ['cider', 'cried', 'deric', 'dicer'], + 'diceras': ['diceras', 'radices', 'sidecar'], + 'dich': ['chid', 'dich'], + 'dichroite': ['dichroite', 'erichtoid', 'theriodic'], + 'dichromat': ['chromatid', 'dichromat'], + 'dichter': ['dichter', 'ditcher'], + 'dicolic': ['codicil', 'dicolic'], + 'dicolon': ['dicolon', 'dolcino'], + 'dicoumarin': ['acridonium', 'dicoumarin'], + 'dicta': ['diact', 'dicta'], + 'dictaphone': ['dictaphone', 'endopathic'], + 'dictational': ['antidotical', 'dictational'], + 'dictionary': ['dictionary', 'indicatory'], + 'dicyanine': ['cyanidine', 'dicyanine'], + 'dicycle': ['cyclide', 'decylic', 'dicycle'], + 'dicyema': ['dicyema', 'mediacy'], + 'diddle': ['diddle', 'lidded'], + 'diddler': ['diddler', 'driddle'], + 'didym': ['didym', 'middy'], + 'die': ['die', 'ide'], + 'dieb': ['beid', 'bide', 'debi', 'dieb'], + 'diego': ['diego', 'dogie', 'geoid'], + 'dielytra': ['dielytra', 'tileyard'], + 'diem': ['demi', 'diem', 'dime', 'mide'], + 'dier': ['dier', 'dire', 'reid', 'ride'], + 'diesel': ['diesel', 'sedile', 'seidel'], + 'diet': ['diet', 'dite', 'edit', 'tide', 'tied'], + 'dietal': ['detail', 'dietal', 'dilate', 'edital', 'tailed'], + 'dieter': ['dieter', 'tiered'], + 'dietic': ['citied', 'dietic'], + 'dietics': ['deistic', 'dietics'], + 'dig': ['dig', 'gid'], + 'digenetica': ['diagenetic', 'digenetica'], + 'digeny': ['digeny', 'dyeing'], + 'digester': ['digester', 'redigest'], + 'digitalein': ['digitalein', 'diligentia'], + 'digitation': ['digitation', 'goniatitid'], + 'digitonin': ['digitonin', 'indigotin'], + 'digredient': ['digredient', 'reddingite'], + 'dihalo': ['dihalo', 'haloid'], + 'diiambus': ['basidium', 'diiambus'], + 'dika': ['dika', 'kaid'], + 'dikaryon': ['ankyroid', 'dikaryon'], + 'dike': ['dike', 'keid'], + 'dilacerate': ['dilacerate', 'lacertidae'], + 'dilatant': ['atlantid', 'dilatant'], + 'dilate': ['detail', 'dietal', 'dilate', 'edital', 'tailed'], + 'dilater': ['dilater', 'lardite', 'redtail'], + 'dilatometric': ['calotermitid', 'dilatometric'], + 'dilator': ['dilator', 'ortalid'], + 'dilatory': ['adroitly', 'dilatory', 'idolatry'], + 'diligence': ['ceilinged', 'diligence'], + 'diligentia': ['digitalein', 'diligentia'], + 'dillue': ['dillue', 'illude'], + 'dilluer': ['dilluer', 'illuder'], + 'dilo': ['dilo', 'diol', 'doli', 'idol', 'olid'], + 'diluent': ['diluent', 'untiled'], + 'dilute': ['dilute', 'dultie'], + 'diluted': ['diluted', 'luddite'], + 'dilutent': ['dilutent', 'untilted', 'untitled'], + 'diluvian': ['diluvian', 'induvial'], + 'dim': ['dim', 'mid'], + 'dimatis': ['amidist', 'dimatis'], + 'dimble': ['dimble', 'limbed'], + 'dime': ['demi', 'diem', 'dime', 'mide'], + 'dimer': ['dimer', 'mider'], + 'dimera': ['admire', 'armied', 'damier', 'dimera', 'merida'], + 'dimeran': ['adermin', 'amerind', 'dimeran'], + 'dimerous': ['dimerous', 'soredium'], + 'dimeter': ['demerit', 'dimeter', 'merited', 'mitered'], + 'dimetria': ['dimetria', 'mitridae', 'tiremaid', 'triamide'], + 'diminisher': ['diminisher', 'rediminish'], + 'dimit': ['dimit', 'timid'], + 'dimmer': ['dimmer', 'immerd', 'rimmed'], + 'dimna': ['dimna', 'manid'], + 'dimyarian': ['dimyarian', 'myrianida'], + 'din': ['din', 'ind', 'nid'], + 'dinah': ['ahind', 'dinah'], + 'dinar': ['darin', 'dinar', 'drain', 'indra', 'nadir', 'ranid'], + 'dinder': ['dinder', 'ridden', 'rinded'], + 'dindle': ['dindle', 'niddle'], + 'dine': ['dine', 'enid', 'inde', 'nide'], + 'diner': ['diner', 'riden', 'rinde'], + 'dinergate': ['argentide', 'denigrate', 'dinergate'], + 'dinero': ['dinero', 'dorine'], + 'dinette': ['detinet', 'dinette'], + 'dineuric': ['dineuric', 'eurindic'], + 'dingar': ['daring', 'dingar', 'gradin'], + 'dinge': ['deign', 'dinge', 'nidge'], + 'dingle': ['dingle', 'elding', 'engild', 'gilden'], + 'dingo': ['dingo', 'doing', 'gondi', 'gonid'], + 'dingwall': ['dingwall', 'windgall'], + 'dingy': ['dingy', 'dying'], + 'dinheiro': ['dinheiro', 'hernioid'], + 'dinic': ['dinic', 'indic'], + 'dining': ['dining', 'indign', 'niding'], + 'dink': ['dink', 'kind'], + 'dinkey': ['dinkey', 'kidney'], + 'dinocerata': ['arctoidean', 'carotidean', 'cordaitean', 'dinocerata'], + 'dinoceratan': ['carnationed', 'dinoceratan'], + 'dinomic': ['dinomic', 'dominic'], + 'dint': ['dint', 'tind'], + 'dinus': ['dinus', 'indus', 'nidus'], + 'dioeciopolygamous': ['dioeciopolygamous', 'polygamodioecious'], + 'diogenite': ['diogenite', 'gideonite'], + 'diol': ['dilo', 'diol', 'doli', 'idol', 'olid'], + 'dion': ['dion', 'nodi', 'odin'], + 'dione': ['deino', 'dione', 'edoni'], + 'diopter': ['diopter', 'peridot', 'proetid', 'protide', 'pteroid'], + 'dioptra': ['dioptra', 'parotid'], + 'dioptral': ['dioptral', 'tripodal'], + 'dioptric': ['dioptric', 'tripodic'], + 'dioptrical': ['dioptrical', 'tripodical'], + 'dioptry': ['dioptry', 'tripody'], + 'diorama': ['amaroid', 'diorama'], + 'dioramic': ['dioramic', 'dromicia'], + 'dioscorein': ['dioscorein', 'dioscorine'], + 'dioscorine': ['dioscorein', 'dioscorine'], + 'dioscuri': ['dioscuri', 'sciuroid'], + 'diose': ['diose', 'idose', 'oside'], + 'diosmin': ['diosmin', 'odinism'], + 'diosmotic': ['diosmotic', 'sodomitic'], + 'diparentum': ['diparentum', 'unimparted'], + 'dipetto': ['dipetto', 'diptote'], + 'diphase': ['aphides', 'diphase'], + 'diphaser': ['diphaser', 'parished', 'raphides', 'sephardi'], + 'diphosphate': ['diphosphate', 'phosphatide'], + 'diphtherian': ['diaphtherin', 'diphtherian'], + 'diphyes': ['deyship', 'diphyes'], + 'diphysite': ['deityship', 'diphysite'], + 'dipicrate': ['dipicrate', 'patricide', 'pediatric'], + 'diplanar': ['diplanar', 'prandial'], + 'diplasion': ['aspidinol', 'diplasion'], + 'dipleura': ['dipleura', 'epidural'], + 'dipleural': ['dipleural', 'preludial'], + 'diplocephalus': ['diplocephalus', 'pseudophallic'], + 'diploe': ['diploe', 'dipole'], + 'diploetic': ['diploetic', 'lepidotic'], + 'diplotene': ['depletion', 'diplotene'], + 'dipnoan': ['dipnoan', 'nonpaid', 'pandion'], + 'dipolar': ['dipolar', 'polarid'], + 'dipole': ['diploe', 'dipole'], + 'dipsaceous': ['dipsaceous', 'spadiceous'], + 'dipter': ['dipter', 'trepid'], + 'dipteraceous': ['dipteraceous', 'epiceratodus'], + 'dipteral': ['dipteral', 'tripedal'], + 'dipterological': ['dipterological', 'pteridological'], + 'dipterologist': ['dipterologist', 'pteridologist'], + 'dipterology': ['dipterology', 'pteridology'], + 'dipteros': ['dipteros', 'portside'], + 'diptote': ['dipetto', 'diptote'], + 'dirca': ['acrid', 'caird', 'carid', 'darci', 'daric', 'dirca'], + 'dircaean': ['caridean', 'dircaean', 'radiance'], + 'dire': ['dier', 'dire', 'reid', 'ride'], + 'direct': ['credit', 'direct'], + 'directable': ['creditable', 'directable'], + 'directer': ['cedriret', 'directer', 'recredit', 'redirect'], + 'direction': ['cretinoid', 'direction'], + 'directional': ['clitoridean', 'directional'], + 'directive': ['creditive', 'directive'], + 'directly': ['directly', 'tridecyl'], + 'directoire': ['cordierite', 'directoire'], + 'director': ['creditor', 'director'], + 'directorship': ['creditorship', 'directorship'], + 'directress': ['creditress', 'directress'], + 'directrix': ['creditrix', 'directrix'], + 'direly': ['direly', 'idyler'], + 'direption': ['direption', 'perdition', 'tropidine'], + 'dirge': ['dirge', 'gride', 'redig', 'ridge'], + 'dirgelike': ['dirgelike', 'ridgelike'], + 'dirgeman': ['dirgeman', 'margined', 'midrange'], + 'dirgler': ['dirgler', 'girdler'], + 'dirten': ['dirten', 'rident', 'tinder'], + 'dis': ['dis', 'sid'], + 'disa': ['dais', 'dasi', 'disa', 'said', 'sida'], + 'disadventure': ['disadventure', 'unadvertised'], + 'disappearer': ['disappearer', 'redisappear'], + 'disarmed': ['disarmed', 'misdread'], + 'disastimeter': ['disastimeter', 'semistriated'], + 'disattire': ['disattire', 'distraite'], + 'disbud': ['disbud', 'disdub'], + 'disburse': ['disburse', 'subsider'], + 'discastle': ['clidastes', 'discastle'], + 'discern': ['discern', 'rescind'], + 'discerner': ['discerner', 'rescinder'], + 'discernment': ['discernment', 'rescindment'], + 'discerp': ['crisped', 'discerp'], + 'discerption': ['description', 'discerption'], + 'disclike': ['disclike', 'sicklied'], + 'discoactine': ['desiccation', 'discoactine'], + 'discoid': ['discoid', 'disodic'], + 'discontinuer': ['discontinuer', 'undiscretion'], + 'discounter': ['discounter', 'rediscount'], + 'discoverer': ['discoverer', 'rediscover'], + 'discreate': ['discreate', 'sericated'], + 'discreet': ['creedist', 'desertic', 'discreet', 'discrete'], + 'discreetly': ['discreetly', 'discretely'], + 'discreetness': ['discreetness', 'discreteness'], + 'discrepate': ['discrepate', 'pederastic'], + 'discrete': ['creedist', 'desertic', 'discreet', 'discrete'], + 'discretely': ['discreetly', 'discretely'], + 'discreteness': ['discreetness', 'discreteness'], + 'discretion': ['discretion', 'soricident'], + 'discriminator': ['discriminator', 'doctrinairism'], + 'disculpate': ['disculpate', 'spiculated'], + 'discusser': ['discusser', 'rediscuss'], + 'discutable': ['discutable', 'subdeltaic', 'subdialect'], + 'disdub': ['disbud', 'disdub'], + 'disease': ['disease', 'seaside'], + 'diseme': ['demise', 'diseme'], + 'disenact': ['disenact', 'distance'], + 'disendow': ['disendow', 'downside'], + 'disentwine': ['disentwine', 'indentwise'], + 'disharmony': ['disharmony', 'hydramnios'], + 'dishearten': ['dishearten', 'intershade'], + 'dished': ['dished', 'eddish'], + 'disherent': ['disherent', 'hinderest', 'tenderish'], + 'dishling': ['dishling', 'hidlings'], + 'dishonor': ['dishonor', 'ironshod'], + 'dishorn': ['dishorn', 'dronish'], + 'dishpan': ['daphnis', 'dishpan'], + 'disilicate': ['disilicate', 'idealistic'], + 'disimprove': ['disimprove', 'misprovide'], + 'disk': ['disk', 'kids', 'skid'], + 'dislocate': ['dislocate', 'lactoside'], + 'disman': ['danism', 'disman'], + 'dismantle': ['dentalism', 'dismantle'], + 'disme': ['deism', 'disme'], + 'dismemberer': ['dismemberer', 'disremember'], + 'disnature': ['disnature', 'sturnidae', 'truandise'], + 'disnest': ['disnest', 'dissent'], + 'disodic': ['discoid', 'disodic'], + 'disparage': ['disparage', 'grapsidae'], + 'disparation': ['disparation', 'tridiapason'], + 'dispatcher': ['dispatcher', 'redispatch'], + 'dispensable': ['dispensable', 'piebaldness'], + 'dispense': ['dispense', 'piedness'], + 'disperse': ['despiser', 'disperse'], + 'dispetal': ['dispetal', 'pedalist'], + 'dispireme': ['dispireme', 'epidermis'], + 'displayer': ['displayer', 'redisplay'], + 'displeaser': ['displeaser', 'pearlsides'], + 'disponee': ['disponee', 'openside'], + 'disporum': ['disporum', 'misproud'], + 'disprepare': ['disprepare', 'predespair'], + 'disrate': ['astride', 'diaster', 'disrate', 'restiad', 'staired'], + 'disremember': ['dismemberer', 'disremember'], + 'disrepute': ['disrepute', 'redispute'], + 'disrespect': ['disrespect', 'disscepter'], + 'disrupt': ['disrupt', 'prudist'], + 'disscepter': ['disrespect', 'disscepter'], + 'disseat': ['disseat', 'sestiad'], + 'dissector': ['crosstied', 'dissector'], + 'dissent': ['disnest', 'dissent'], + 'dissenter': ['dissenter', 'tiredness'], + 'dissertate': ['dissertate', 'statesider'], + 'disserve': ['disserve', 'dissever'], + 'dissever': ['disserve', 'dissever'], + 'dissocial': ['cissoidal', 'dissocial'], + 'dissolve': ['dissolve', 'voidless'], + 'dissoul': ['dissoul', 'dulosis', 'solidus'], + 'distale': ['distale', 'salited'], + 'distance': ['disenact', 'distance'], + 'distant': ['dantist', 'distant'], + 'distater': ['distater', 'striated'], + 'distender': ['dendrites', 'distender', 'redistend'], + 'distent': ['dentist', 'distent', 'stinted'], + 'distich': ['distich', 'stichid'], + 'distillage': ['distillage', 'sigillated'], + 'distiller': ['distiller', 'redistill'], + 'distinguisher': ['distinguisher', 'redistinguish'], + 'distoma': ['distoma', 'mastoid'], + 'distome': ['distome', 'modiste'], + 'distrainer': ['distrainer', 'redistrain'], + 'distrait': ['distrait', 'triadist'], + 'distraite': ['disattire', 'distraite'], + 'disturber': ['disturber', 'redisturb'], + 'disulphone': ['disulphone', 'unpolished'], + 'disuniform': ['disuniform', 'indusiform'], + 'dit': ['dit', 'tid'], + 'dita': ['adit', 'dita'], + 'dital': ['datil', 'dital', 'tidal', 'tilda'], + 'ditcher': ['dichter', 'ditcher'], + 'dite': ['diet', 'dite', 'edit', 'tide', 'tied'], + 'diter': ['diter', 'tired', 'tried'], + 'dithionic': ['chitinoid', 'dithionic'], + 'ditone': ['ditone', 'intoed'], + 'ditrochean': ['achondrite', 'ditrochean', 'ordanchite'], + 'diuranate': ['diuranate', 'untiaraed'], + 'diurna': ['danuri', 'diurna', 'dunair', 'durain', 'durani', 'durian'], + 'diurnation': ['diurnation', 'induration'], + 'diurne': ['diurne', 'inured', 'ruined', 'unride'], + 'diva': ['avid', 'diva'], + 'divan': ['divan', 'viand'], + 'divata': ['divata', 'dvaita'], + 'divel': ['devil', 'divel', 'lived'], + 'diver': ['diver', 'drive'], + 'diverge': ['diverge', 'grieved'], + 'diverse': ['deviser', 'diverse', 'revised'], + 'diverter': ['diverter', 'redivert', 'verditer'], + 'divest': ['divest', 'vedist'], + 'divesture': ['detrusive', 'divesture', 'servitude'], + 'divisionism': ['divisionism', 'misdivision'], + 'divorce': ['cervoid', 'divorce'], + 'divorcee': ['coderive', 'divorcee'], + 'do': ['do', 'od'], + 'doable': ['albedo', 'doable'], + 'doarium': ['doarium', 'uramido'], + 'doat': ['doat', 'toad', 'toda'], + 'doater': ['doater', 'toader'], + 'doating': ['antigod', 'doating'], + 'doatish': ['doatish', 'toadish'], + 'dob': ['bod', 'dob'], + 'dobe': ['bode', 'dobe'], + 'dobra': ['abord', 'bardo', 'board', 'broad', 'dobra', 'dorab'], + 'dobrao': ['dobrao', 'doorba'], + 'doby': ['body', 'boyd', 'doby'], + 'doc': ['cod', 'doc'], + 'docetism': ['comedist', 'demotics', 'docetism', 'domestic'], + 'docile': ['cleoid', 'coiled', 'docile'], + 'docity': ['cytoid', 'docity'], + 'docker': ['corked', 'docker', 'redock'], + 'doctorial': ['crotaloid', 'doctorial'], + 'doctorship': ['doctorship', 'trophodisc'], + 'doctrinairism': ['discriminator', 'doctrinairism'], + 'doctrinate': ['detraction', 'doctrinate', 'tetarconid'], + 'doctrine': ['centroid', 'doctrine'], + 'documental': ['columnated', 'documental'], + 'dod': ['dod', 'odd'], + 'dode': ['dedo', 'dode', 'eddo'], + 'dodecarch': ['decachord', 'dodecarch'], + 'dodlet': ['dodlet', 'toddle'], + 'dodman': ['dodman', 'oddman'], + 'doe': ['doe', 'edo', 'ode'], + 'doeg': ['doeg', 'doge', 'gode'], + 'doer': ['doer', 'redo', 'rode', 'roed'], + 'does': ['does', 'dose'], + 'doesnt': ['doesnt', 'stoned'], + 'dog': ['dog', 'god'], + 'dogate': ['dogate', 'dotage', 'togaed'], + 'dogbane': ['bondage', 'dogbane'], + 'dogbite': ['bigoted', 'dogbite'], + 'doge': ['doeg', 'doge', 'gode'], + 'dogger': ['dogger', 'gorged'], + 'doghead': ['doghead', 'godhead'], + 'doghood': ['doghood', 'godhood'], + 'dogie': ['diego', 'dogie', 'geoid'], + 'dogless': ['dogless', 'glossed', 'godless'], + 'doglike': ['doglike', 'godlike'], + 'dogly': ['dogly', 'godly', 'goldy'], + 'dogra': ['dargo', 'dogra', 'drago'], + 'dogship': ['dogship', 'godship'], + 'dogstone': ['dogstone', 'stegodon'], + 'dogwatch': ['dogwatch', 'watchdog'], + 'doina': ['adion', 'danio', 'doina', 'donia'], + 'doing': ['dingo', 'doing', 'gondi', 'gonid'], + 'doko': ['doko', 'dook'], + 'dol': ['dol', 'lod', 'old'], + 'dola': ['alod', 'dola', 'load', 'odal'], + 'dolcian': ['dolcian', 'nodical'], + 'dolciano': ['conoidal', 'dolciano'], + 'dolcino': ['dicolon', 'dolcino'], + 'dole': ['dole', 'elod', 'lode', 'odel'], + 'dolesman': ['dolesman', 'lodesman'], + 'doless': ['doless', 'dossel'], + 'doli': ['dilo', 'diol', 'doli', 'idol', 'olid'], + 'dolia': ['aloid', 'dolia', 'idola'], + 'dolina': ['dolina', 'ladino'], + 'doline': ['doline', 'indole', 'leonid', 'loined', 'olenid'], + 'dolium': ['dolium', 'idolum'], + 'dolly': ['dolly', 'lloyd'], + 'dolman': ['almond', 'dolman'], + 'dolor': ['dolor', 'drool'], + 'dolose': ['dolose', 'oodles', 'soodle'], + 'dolphin': ['dolphin', 'pinhold'], + 'dolt': ['dolt', 'told'], + 'dom': ['dom', 'mod'], + 'domain': ['amidon', 'daimon', 'domain'], + 'domainal': ['domainal', 'domanial'], + 'domal': ['domal', 'modal'], + 'domanial': ['domainal', 'domanial'], + 'dome': ['dome', 'mode', 'moed'], + 'domer': ['domer', 'drome'], + 'domestic': ['comedist', 'demotics', 'docetism', 'domestic'], + 'domic': ['comid', 'domic'], + 'domical': ['domical', 'lacmoid'], + 'dominance': ['demicanon', 'dominance'], + 'dominate': ['dominate', 'nematoid'], + 'dominated': ['demantoid', 'dominated'], + 'domination': ['admonition', 'domination'], + 'dominative': ['admonitive', 'dominative'], + 'dominator': ['admonitor', 'dominator'], + 'domine': ['domine', 'domnei', 'emodin', 'medino'], + 'dominial': ['dominial', 'imolinda', 'limoniad'], + 'dominic': ['dinomic', 'dominic'], + 'domino': ['domino', 'monoid'], + 'domitian': ['diatomin', 'domitian'], + 'domnei': ['domine', 'domnei', 'emodin', 'medino'], + 'don': ['don', 'nod'], + 'donal': ['donal', 'nodal'], + 'donar': ['adorn', 'donar', 'drona', 'radon'], + 'donated': ['donated', 'nodated'], + 'donatiaceae': ['actaeonidae', 'donatiaceae'], + 'donatism': ['donatism', 'saintdom'], + 'donator': ['donator', 'odorant', 'tornado'], + 'done': ['done', 'node'], + 'donet': ['donet', 'noted', 'toned'], + 'dong': ['dong', 'gond'], + 'donga': ['donga', 'gonad'], + 'dongola': ['dongola', 'gondola'], + 'dongon': ['dongon', 'nongod'], + 'donia': ['adion', 'danio', 'doina', 'donia'], + 'donna': ['donna', 'nonda'], + 'donnert': ['donnert', 'tendron'], + 'donnie': ['donnie', 'indone', 'ondine'], + 'donor': ['donor', 'rondo'], + 'donorship': ['donorship', 'rhodopsin'], + 'donsie': ['deinos', 'donsie', 'inodes', 'onside'], + 'donum': ['donum', 'mound'], + 'doob': ['bodo', 'bood', 'doob'], + 'dook': ['doko', 'dook'], + 'dool': ['dool', 'lood'], + 'dooli': ['dooli', 'iodol'], + 'doom': ['doom', 'mood'], + 'doomer': ['doomer', 'mooder', 'redoom', 'roomed'], + 'dooms': ['dooms', 'sodom'], + 'door': ['door', 'odor', 'oord', 'rood'], + 'doorba': ['dobrao', 'doorba'], + 'doorbell': ['bordello', 'doorbell'], + 'doored': ['doored', 'odored'], + 'doorframe': ['doorframe', 'reformado'], + 'doorless': ['doorless', 'odorless'], + 'doorplate': ['doorplate', 'leptodora'], + 'doorpost': ['doorpost', 'doorstop'], + 'doorstone': ['doorstone', 'roodstone'], + 'doorstop': ['doorpost', 'doorstop'], + 'doorweed': ['deerwood', 'doorweed'], + 'dop': ['dop', 'pod'], + 'dopa': ['apod', 'dopa'], + 'doper': ['doper', 'pedro', 'pored'], + 'dopplerite': ['dopplerite', 'lepidopter'], + 'dor': ['dor', 'rod'], + 'dora': ['dora', 'orad', 'road'], + 'dorab': ['abord', 'bardo', 'board', 'broad', 'dobra', 'dorab'], + 'doree': ['doree', 'erode'], + 'dori': ['dori', 'roid'], + 'doria': ['aroid', 'doria', 'radio'], + 'dorian': ['dorian', 'inroad', 'ordain'], + 'dorical': ['cordial', 'dorical'], + 'dorine': ['dinero', 'dorine'], + 'dorlach': ['chordal', 'dorlach'], + 'dormancy': ['dormancy', 'mordancy'], + 'dormant': ['dormant', 'mordant'], + 'dormer': ['dormer', 'remord'], + 'dormie': ['dormie', 'moider'], + 'dorn': ['dorn', 'rond'], + 'dornic': ['dornic', 'nordic'], + 'dorothea': ['dorothea', 'theodora'], + 'dorp': ['dorp', 'drop', 'prod'], + 'dorsel': ['dorsel', 'seldor', 'solder'], + 'dorsoapical': ['dorsoapical', 'prosodiacal'], + 'dorsocaudal': ['caudodorsal', 'dorsocaudal'], + 'dorsocentral': ['centrodorsal', 'dorsocentral'], + 'dorsocervical': ['cervicodorsal', 'dorsocervical'], + 'dorsolateral': ['dorsolateral', 'laterodorsal'], + 'dorsomedial': ['dorsomedial', 'mediodorsal'], + 'dorsosacral': ['dorsosacral', 'sacrodorsal'], + 'dorsoventrad': ['dorsoventrad', 'ventrodorsad'], + 'dorsoventral': ['dorsoventral', 'ventrodorsal'], + 'dorsoventrally': ['dorsoventrally', 'ventrodorsally'], + 'dos': ['dos', 'ods', 'sod'], + 'dosa': ['dosa', 'sado', 'soda'], + 'dosage': ['dosage', 'seadog'], + 'dose': ['does', 'dose'], + 'doser': ['doser', 'rosed'], + 'dosimetric': ['dosimetric', 'mediocrist'], + 'dossel': ['doless', 'dossel'], + 'dosser': ['dosser', 'sordes'], + 'dot': ['dot', 'tod'], + 'dotage': ['dogate', 'dotage', 'togaed'], + 'dote': ['dote', 'tode', 'toed'], + 'doter': ['doter', 'tored', 'trode'], + 'doty': ['doty', 'tody'], + 'doubler': ['boulder', 'doubler'], + 'doubter': ['doubter', 'obtrude', 'outbred', 'redoubt'], + 'douc': ['douc', 'duco'], + 'douce': ['coude', 'douce'], + 'doum': ['doum', 'moud', 'odum'], + 'doup': ['doup', 'updo'], + 'dour': ['dour', 'duro', 'ordu', 'roud'], + 'dourine': ['dourine', 'neuroid'], + 'dourly': ['dourly', 'lourdy'], + 'douser': ['douser', 'soured'], + 'douter': ['derout', 'detour', 'douter'], + 'dover': ['dover', 'drove', 'vedro'], + 'dow': ['dow', 'owd', 'wod'], + 'dowager': ['dowager', 'wordage'], + 'dower': ['dower', 'rowed'], + 'dowl': ['dowl', 'wold'], + 'dowlas': ['dowlas', 'oswald'], + 'downbear': ['downbear', 'rawboned'], + 'downcome': ['comedown', 'downcome'], + 'downer': ['downer', 'wonder', 'worden'], + 'downingia': ['downingia', 'godwinian'], + 'downset': ['downset', 'setdown'], + 'downside': ['disendow', 'downside'], + 'downtake': ['downtake', 'takedown'], + 'downthrow': ['downthrow', 'throwdown'], + 'downturn': ['downturn', 'turndown'], + 'downward': ['downward', 'drawdown'], + 'dowry': ['dowry', 'rowdy', 'wordy'], + 'dowser': ['dowser', 'drowse'], + 'doxa': ['doxa', 'odax'], + 'doyle': ['doyle', 'yodel'], + 'dozen': ['dozen', 'zoned'], + 'drab': ['bard', 'brad', 'drab'], + 'draba': ['barad', 'draba'], + 'drabble': ['dabbler', 'drabble'], + 'draco': ['cardo', 'draco'], + 'draconic': ['cancroid', 'draconic'], + 'draconis': ['draconis', 'sardonic'], + 'dracontian': ['dracontian', 'octandrian'], + 'drafter': ['drafter', 'redraft'], + 'drag': ['darg', 'drag', 'grad'], + 'draggle': ['draggle', 'raggled'], + 'dragline': ['dragline', 'reginald', 'ringlead'], + 'dragman': ['dragman', 'grandam', 'grandma'], + 'drago': ['dargo', 'dogra', 'drago'], + 'dragoman': ['dragoman', 'garamond', 'ondagram'], + 'dragonize': ['dragonize', 'organized'], + 'dragoon': ['dragoon', 'gadroon'], + 'dragoonage': ['dragoonage', 'gadroonage'], + 'dragsman': ['dargsman', 'dragsman'], + 'drail': ['drail', 'laird', 'larid', 'liard'], + 'drain': ['darin', 'dinar', 'drain', 'indra', 'nadir', 'ranid'], + 'drainable': ['albardine', 'drainable'], + 'drainage': ['drainage', 'gardenia'], + 'draine': ['darien', 'draine'], + 'drained': ['diander', 'drained'], + 'drainer': ['darrein', 'drainer'], + 'drainman': ['drainman', 'mandarin'], + 'draintile': ['deliriant', 'draintile', 'interlaid'], + 'drake': ['daker', 'drake', 'kedar', 'radek'], + 'dramme': ['dammer', 'dramme'], + 'drang': ['drang', 'grand'], + 'drape': ['drape', 'padre'], + 'drat': ['dart', 'drat'], + 'drate': ['dater', 'derat', 'detar', 'drate', 'rated', 'trade', 'tread'], + 'draw': ['draw', 'ward'], + 'drawable': ['drawable', 'wardable'], + 'drawback': ['backward', 'drawback'], + 'drawbore': ['drawbore', 'wardrobe'], + 'drawbridge': ['bridgeward', 'drawbridge'], + 'drawdown': ['downward', 'drawdown'], + 'drawee': ['drawee', 'rewade'], + 'drawer': ['drawer', 'redraw', 'reward', 'warder'], + 'drawers': ['drawers', 'resward'], + 'drawfile': ['drawfile', 'lifeward'], + 'drawgate': ['drawgate', 'gateward'], + 'drawhead': ['drawhead', 'headward'], + 'drawhorse': ['drawhorse', 'shoreward'], + 'drawing': ['drawing', 'ginward', 'warding'], + 'drawoff': ['drawoff', 'offward'], + 'drawout': ['drawout', 'outdraw', 'outward'], + 'drawsheet': ['drawsheet', 'watershed'], + 'drawstop': ['drawstop', 'postward'], + 'dray': ['adry', 'dray', 'yard'], + 'drayage': ['drayage', 'yardage'], + 'drayman': ['drayman', 'yardman'], + 'dread': ['adder', 'dread', 'readd'], + 'dreadly': ['dreadly', 'laddery'], + 'dream': ['armed', 'derma', 'dream', 'ramed'], + 'dreamage': ['dreamage', 'redamage'], + 'dreamer': ['dreamer', 'redream'], + 'dreamhole': ['dreamhole', 'heloderma'], + 'dreamish': ['dreamish', 'semihard'], + 'dreamland': ['dreamland', 'raddleman'], + 'drear': ['darer', 'drear'], + 'dreary': ['dreary', 'yarder'], + 'dredge': ['dredge', 'gedder'], + 'dree': ['deer', 'dere', 'dree', 'rede', 'reed'], + 'dreiling': ['dreiling', 'gridelin'], + 'dressage': ['degasser', 'dressage'], + 'dresser': ['dresser', 'redress'], + 'drib': ['bird', 'drib'], + 'dribble': ['dibbler', 'dribble'], + 'driblet': ['birdlet', 'driblet'], + 'driddle': ['diddler', 'driddle'], + 'drier': ['drier', 'rider'], + 'driest': ['driest', 'stride'], + 'driller': ['driller', 'redrill'], + 'drillman': ['drillman', 'mandrill'], + 'dringle': ['dringle', 'grindle'], + 'drisheen': ['denshire', 'drisheen'], + 'drive': ['diver', 'drive'], + 'driven': ['driven', 'nervid', 'verdin'], + 'drivescrew': ['drivescrew', 'screwdrive'], + 'drogue': ['drogue', 'gourde'], + 'drolly': ['drolly', 'lordly'], + 'drome': ['domer', 'drome'], + 'dromicia': ['dioramic', 'dromicia'], + 'drona': ['adorn', 'donar', 'drona', 'radon'], + 'drone': ['drone', 'ronde'], + 'drongo': ['drongo', 'gordon'], + 'dronish': ['dishorn', 'dronish'], + 'drool': ['dolor', 'drool'], + 'drop': ['dorp', 'drop', 'prod'], + 'dropsy': ['dropsy', 'dryops'], + 'drossel': ['drossel', 'rodless'], + 'drove': ['dover', 'drove', 'vedro'], + 'drow': ['drow', 'word'], + 'drowse': ['dowser', 'drowse'], + 'drub': ['burd', 'drub'], + 'drugger': ['drugger', 'grudger'], + 'druggery': ['druggery', 'grudgery'], + 'drungar': ['drungar', 'gurnard'], + 'drupe': ['drupe', 'duper', 'perdu', 'prude', 'pured'], + 'drusean': ['asunder', 'drusean'], + 'dryops': ['dropsy', 'dryops'], + 'duad': ['addu', 'dadu', 'daud', 'duad'], + 'dual': ['auld', 'dual', 'laud', 'udal'], + 'duali': ['duali', 'dulia'], + 'dualin': ['dualin', 'ludian', 'unlaid'], + 'dualism': ['dualism', 'laudism'], + 'dualist': ['dualist', 'laudist'], + 'dub': ['bud', 'dub'], + 'dubber': ['dubber', 'rubbed'], + 'dubious': ['biduous', 'dubious'], + 'dubitate': ['dubitate', 'tabitude'], + 'ducal': ['cauld', 'ducal'], + 'duces': ['decus', 'duces'], + 'duckstone': ['duckstone', 'unstocked'], + 'duco': ['douc', 'duco'], + 'ducted': ['deduct', 'ducted'], + 'duction': ['conduit', 'duction', 'noctuid'], + 'duculinae': ['duculinae', 'nuculidae'], + 'dudeen': ['denude', 'dudeen'], + 'dudler': ['dudler', 'ruddle'], + 'duel': ['deul', 'duel', 'leud'], + 'dueler': ['dueler', 'eluder'], + 'dueling': ['dueling', 'indulge'], + 'duello': ['deloul', 'duello'], + 'duenna': ['duenna', 'undean'], + 'duer': ['duer', 'dure', 'rude', 'urde'], + 'duffer': ['duffer', 'ruffed'], + 'dufter': ['dufter', 'turfed'], + 'dug': ['dug', 'gud'], + 'duim': ['duim', 'muid'], + 'dukery': ['dukery', 'duyker'], + 'dulat': ['adult', 'dulat'], + 'dulcian': ['dulcian', 'incudal', 'lucanid', 'lucinda'], + 'dulciana': ['claudian', 'dulciana'], + 'duler': ['duler', 'urled'], + 'dulia': ['duali', 'dulia'], + 'dullify': ['dullify', 'fluidly'], + 'dulosis': ['dissoul', 'dulosis', 'solidus'], + 'dulseman': ['dulseman', 'unalmsed'], + 'dultie': ['dilute', 'dultie'], + 'dum': ['dum', 'mud'], + 'duma': ['duma', 'maud'], + 'dumaist': ['dumaist', 'stadium'], + 'dumontite': ['dumontite', 'unomitted'], + 'dumple': ['dumple', 'plumed'], + 'dunair': ['danuri', 'diurna', 'dunair', 'durain', 'durani', 'durian'], + 'dunal': ['dunal', 'laund', 'lunda', 'ulnad'], + 'dunderpate': ['dunderpate', 'undeparted'], + 'dune': ['dune', 'nude', 'unde'], + 'dungaree': ['dungaree', 'guardeen', 'unagreed', 'underage', 'ungeared'], + 'dungeon': ['dungeon', 'negundo'], + 'dunger': ['dunger', 'gerund', 'greund', 'nudger'], + 'dungol': ['dungol', 'ungold'], + 'dungy': ['dungy', 'gundy'], + 'dunite': ['dunite', 'united', 'untied'], + 'dunlap': ['dunlap', 'upland'], + 'dunne': ['dunne', 'unden'], + 'dunner': ['dunner', 'undern'], + 'dunpickle': ['dunpickle', 'unpickled'], + 'dunstable': ['dunstable', 'unblasted', 'unstabled'], + 'dunt': ['dunt', 'tund'], + 'duny': ['duny', 'undy'], + 'duo': ['duo', 'udo'], + 'duodenal': ['duodenal', 'unloaded'], + 'duodenocholecystostomy': ['cholecystoduodenostomy', 'duodenocholecystostomy'], + 'duodenojejunal': ['duodenojejunal', 'jejunoduodenal'], + 'duodenopancreatectomy': ['duodenopancreatectomy', 'pancreatoduodenectomy'], + 'dup': ['dup', 'pud'], + 'duper': ['drupe', 'duper', 'perdu', 'prude', 'pured'], + 'dupion': ['dupion', 'unipod'], + 'dupla': ['dupla', 'plaud'], + 'duplone': ['duplone', 'unpoled'], + 'dura': ['ardu', 'daur', 'dura'], + 'durain': ['danuri', 'diurna', 'dunair', 'durain', 'durani', 'durian'], + 'duramen': ['duramen', 'maunder', 'unarmed'], + 'durance': ['durance', 'redunca', 'unraced'], + 'durango': ['aground', 'durango'], + 'durani': ['danuri', 'diurna', 'dunair', 'durain', 'durani', 'durian'], + 'durant': ['durant', 'tundra'], + 'durban': ['durban', 'undrab'], + 'durdenite': ['durdenite', 'undertide'], + 'dure': ['duer', 'dure', 'rude', 'urde'], + 'durene': ['durene', 'endure'], + 'durenol': ['durenol', 'lounder', 'roundel'], + 'durgan': ['durgan', 'undrag'], + 'durian': ['danuri', 'diurna', 'dunair', 'durain', 'durani', 'durian'], + 'during': ['during', 'ungird'], + 'durity': ['durity', 'rudity'], + 'durmast': ['durmast', 'mustard'], + 'duro': ['dour', 'duro', 'ordu', 'roud'], + 'dusken': ['dusken', 'sundek'], + 'dust': ['dust', 'stud'], + 'duster': ['derust', 'duster'], + 'dustin': ['dustin', 'nudist'], + 'dustpan': ['dustpan', 'upstand'], + 'dusty': ['dusty', 'study'], + 'duyker': ['dukery', 'duyker'], + 'dvaita': ['divata', 'dvaita'], + 'dwale': ['dwale', 'waled', 'weald'], + 'dwine': ['dwine', 'edwin', 'wendi', 'widen', 'wined'], + 'dyad': ['addy', 'dyad'], + 'dyas': ['days', 'dyas'], + 'dye': ['dey', 'dye', 'yed'], + 'dyehouse': ['deyhouse', 'dyehouse'], + 'dyeing': ['digeny', 'dyeing'], + 'dyer': ['dyer', 'yerd'], + 'dying': ['dingy', 'dying'], + 'dynamo': ['dynamo', 'monday'], + 'dynamoelectric': ['dynamoelectric', 'electrodynamic'], + 'dynamoelectrical': ['dynamoelectrical', 'electrodynamical'], + 'dynamotor': ['androtomy', 'dynamotor'], + 'dyne': ['deny', 'dyne'], + 'dyophone': ['dyophone', 'honeypod'], + 'dysluite': ['dysluite', 'sedulity'], + 'dysneuria': ['dasyurine', 'dysneuria'], + 'dysphoric': ['chrysopid', 'dysphoric'], + 'dysphrenia': ['dysphrenia', 'sphyraenid', 'sphyrnidae'], + 'dystome': ['dystome', 'modesty'], + 'dystrophia': ['diastrophy', 'dystrophia'], + 'ea': ['ae', 'ea'], + 'each': ['ache', 'each', 'haec'], + 'eager': ['agree', 'eager', 'eagre'], + 'eagle': ['aegle', 'eagle', 'galee'], + 'eagless': ['ageless', 'eagless'], + 'eaglet': ['eaglet', 'legate', 'teagle', 'telega'], + 'eagre': ['agree', 'eager', 'eagre'], + 'ean': ['ean', 'nae', 'nea'], + 'ear': ['aer', 'are', 'ear', 'era', 'rea'], + 'eared': ['eared', 'erade'], + 'earful': ['earful', 'farleu', 'ferula'], + 'earing': ['arenig', 'earing', 'gainer', 'reagin', 'regain'], + 'earl': ['earl', 'eral', 'lear', 'real'], + 'earlap': ['earlap', 'parale'], + 'earle': ['areel', 'earle'], + 'earlet': ['earlet', 'elater', 'relate'], + 'earliness': ['earliness', 'naileress'], + 'earlship': ['earlship', 'pearlish'], + 'early': ['early', 'layer', 'relay'], + 'earn': ['arne', 'earn', 'rane'], + 'earner': ['earner', 'ranere'], + 'earnest': ['earnest', 'eastern', 'nearest'], + 'earnestly': ['earnestly', 'easternly'], + 'earnful': ['earnful', 'funeral'], + 'earning': ['earning', 'engrain'], + 'earplug': ['earplug', 'graupel', 'plaguer'], + 'earring': ['earring', 'grainer'], + 'earringed': ['earringed', 'grenadier'], + 'earshot': ['asthore', 'earshot'], + 'eartab': ['abater', 'artabe', 'eartab', 'trabea'], + 'earth': ['earth', 'hater', 'heart', 'herat', 'rathe'], + 'earthborn': ['abhorrent', 'earthborn'], + 'earthed': ['earthed', 'hearted'], + 'earthen': ['earthen', 'enheart', 'hearten', 'naether', 'teheran', 'traheen'], + 'earthian': ['earthian', 'rhaetian'], + 'earthiness': ['earthiness', 'heartiness'], + 'earthless': ['earthless', 'heartless'], + 'earthling': ['earthling', 'heartling'], + 'earthly': ['earthly', 'heartly', 'lathery', 'rathely'], + 'earthnut': ['earthnut', 'heartnut'], + 'earthpea': ['earthpea', 'heartpea'], + 'earthquake': ['earthquake', 'heartquake'], + 'earthward': ['earthward', 'heartward'], + 'earthy': ['earthy', 'hearty', 'yearth'], + 'earwig': ['earwig', 'grewia'], + 'earwitness': ['earwitness', 'wateriness'], + 'easel': ['easel', 'lease'], + 'easement': ['easement', 'estamene'], + 'easer': ['easer', 'erase'], + 'easily': ['easily', 'elysia'], + 'easing': ['easing', 'sangei'], + 'east': ['ates', 'east', 'eats', 'sate', 'seat', 'seta'], + 'eastabout': ['aetobatus', 'eastabout'], + 'eastbound': ['eastbound', 'unboasted'], + 'easter': ['asteer', + 'easter', + 'eastre', + 'reseat', + 'saeter', + 'seater', + 'staree', + 'teaser', + 'teresa'], + 'easterling': ['easterling', 'generalist'], + 'eastern': ['earnest', 'eastern', 'nearest'], + 'easternly': ['earnestly', 'easternly'], + 'easting': ['easting', + 'gainset', + 'genista', + 'ingesta', + 'seating', + 'signate', + 'teasing'], + 'eastlake': ['alestake', 'eastlake'], + 'eastre': ['asteer', + 'easter', + 'eastre', + 'reseat', + 'saeter', + 'seater', + 'staree', + 'teaser', + 'teresa'], + 'easy': ['easy', 'eyas'], + 'eat': ['ate', 'eat', 'eta', 'tae', 'tea'], + 'eatberry': ['betrayer', 'eatberry', 'rebetray', 'teaberry'], + 'eaten': ['eaten', 'enate'], + 'eater': ['arete', 'eater', 'teaer'], + 'eating': ['eating', 'ingate', 'tangie'], + 'eats': ['ates', 'east', 'eats', 'sate', 'seat', 'seta'], + 'eave': ['eave', 'evea'], + 'eaved': ['deave', 'eaved', 'evade'], + 'eaver': ['eaver', 'reave'], + 'eaves': ['eaves', 'evase', 'seave'], + 'eben': ['been', 'bene', 'eben'], + 'ebenales': ['ebenales', 'lebanese'], + 'ebon': ['beno', 'bone', 'ebon'], + 'ebony': ['boney', 'ebony'], + 'ebriety': ['byerite', 'ebriety'], + 'eburna': ['eburna', 'unbare', 'unbear', 'urbane'], + 'eburnated': ['eburnated', 'underbeat', 'unrebated'], + 'eburnian': ['eburnian', 'inurbane'], + 'ecad': ['cade', 'dace', 'ecad'], + 'ecanda': ['adance', 'ecanda'], + 'ecardinal': ['ecardinal', 'lardacein'], + 'ecarinate': ['anaeretic', 'ecarinate'], + 'ecarte': ['cerate', 'create', 'ecarte'], + 'ecaudata': ['acaudate', 'ecaudata'], + 'ecclesiasticism': ['ecclesiasticism', 'misecclesiastic'], + 'eche': ['chee', 'eche'], + 'echelon': ['chelone', 'echelon'], + 'echeveria': ['echeveria', 'reachieve'], + 'echidna': ['chained', 'echidna'], + 'echinal': ['chilean', 'echinal', 'nichael'], + 'echinate': ['echinate', 'hecatine'], + 'echinital': ['echinital', 'inethical'], + 'echis': ['echis', 'shice'], + 'echoer': ['choree', 'cohere', 'echoer'], + 'echoic': ['choice', 'echoic'], + 'echoist': ['chitose', 'echoist'], + 'eciton': ['eciton', 'noetic', 'notice', 'octine'], + 'eckehart': ['eckehart', 'hacktree'], + 'eclair': ['carlie', 'claire', 'eclair', 'erical'], + 'eclat': ['cleat', 'eclat', 'ectal', 'lacet', 'tecla'], + 'eclipsable': ['eclipsable', 'spliceable'], + 'eclipser': ['eclipser', 'pericles', 'resplice'], + 'economics': ['economics', 'neocosmic'], + 'economism': ['economism', 'monoecism', 'monosemic'], + 'economist': ['economist', 'mesotonic'], + 'ecorticate': ['ecorticate', 'octaeteric'], + 'ecostate': ['coestate', 'ecostate'], + 'ecotonal': ['colonate', 'ecotonal'], + 'ecotype': ['ecotype', 'ocypete'], + 'ecrasite': ['ecrasite', 'sericate'], + 'ecru': ['cure', 'ecru', 'eruc'], + 'ectad': ['cadet', 'ectad'], + 'ectal': ['cleat', 'eclat', 'ectal', 'lacet', 'tecla'], + 'ectasis': ['ascites', 'ectasis'], + 'ectene': ['cetene', 'ectene'], + 'ectental': ['ectental', 'tentacle'], + 'ectiris': ['ectiris', 'eristic'], + 'ectocardia': ['coradicate', 'ectocardia'], + 'ectocranial': ['calonectria', 'ectocranial'], + 'ectoglia': ['ectoglia', 'geotical', 'goetical'], + 'ectomorph': ['ectomorph', 'topchrome'], + 'ectomorphic': ['cetomorphic', 'chemotropic', 'ectomorphic'], + 'ectomorphy': ['chromotype', 'cormophyte', 'ectomorphy'], + 'ectopia': ['ectopia', 'opacite'], + 'ectopy': ['cotype', 'ectopy'], + 'ectorhinal': ['chlorinate', 'ectorhinal', 'tornachile'], + 'ectosarc': ['ectosarc', 'reaccost'], + 'ectrogenic': ['ectrogenic', 'egocentric', 'geocentric'], + 'ectromelia': ['carmeloite', 'ectromelia', 'meteorical'], + 'ectropion': ['ectropion', 'neotropic'], + 'ed': ['de', 'ed'], + 'edda': ['dade', 'dead', 'edda'], + 'eddaic': ['caddie', 'eddaic'], + 'eddish': ['dished', 'eddish'], + 'eddo': ['dedo', 'dode', 'eddo'], + 'edema': ['adeem', 'ameed', 'edema'], + 'eden': ['dene', 'eden', 'need'], + 'edental': ['dalteen', 'dentale', 'edental'], + 'edentata': ['antedate', 'edentata'], + 'edessan': ['deaness', 'edessan'], + 'edestan': ['edestan', 'standee'], + 'edestin': ['destine', 'edestin'], + 'edgar': ['edgar', 'grade'], + 'edger': ['edger', 'greed'], + 'edgerman': ['edgerman', 'gendarme'], + 'edgrew': ['edgrew', 'wedger'], + 'edible': ['debile', 'edible'], + 'edict': ['cetid', 'edict'], + 'edictal': ['citadel', 'deltaic', 'dialect', 'edictal', 'lactide'], + 'edification': ['deification', 'edification'], + 'edificatory': ['deificatory', 'edificatory'], + 'edifier': ['deifier', 'edifier'], + 'edify': ['deify', 'edify'], + 'edit': ['diet', 'dite', 'edit', 'tide', 'tied'], + 'edital': ['detail', 'dietal', 'dilate', 'edital', 'tailed'], + 'edith': ['edith', 'ethid'], + 'edition': ['edition', 'odinite', 'otidine', 'tineoid'], + 'editor': ['editor', 'triode'], + 'editorial': ['editorial', 'radiolite'], + 'edmund': ['edmund', 'mudden'], + 'edna': ['ande', 'dane', 'dean', 'edna'], + 'edo': ['doe', 'edo', 'ode'], + 'edoni': ['deino', 'dione', 'edoni'], + 'education': ['coadunite', 'education', 'noctuidae'], + 'educe': ['deuce', 'educe'], + 'edward': ['edward', 'wadder', 'warded'], + 'edwin': ['dwine', 'edwin', 'wendi', 'widen', 'wined'], + 'eel': ['eel', 'lee'], + 'eelgrass': ['eelgrass', 'gearless', 'rageless'], + 'eelpot': ['eelpot', 'opelet'], + 'eelspear': ['eelspear', 'prelease'], + 'eely': ['eely', 'yeel'], + 'eer': ['eer', 'ere', 'ree'], + 'efik': ['efik', 'fike'], + 'eft': ['eft', 'fet'], + 'egad': ['aged', 'egad', 'gade'], + 'egba': ['egba', 'gabe'], + 'egbo': ['bego', 'egbo'], + 'egeran': ['egeran', 'enrage', 'ergane', 'genear', 'genera'], + 'egest': ['egest', 'geest', 'geste'], + 'egger': ['egger', 'grege'], + 'egghot': ['egghot', 'hogget'], + 'eggler': ['eggler', 'legger'], + 'eggy': ['eggy', 'yegg'], + 'eglantine': ['eglantine', 'inelegant', 'legantine'], + 'eglatere': ['eglatere', 'regelate', 'relegate'], + 'egma': ['egma', 'game', 'mage'], + 'ego': ['ego', 'geo'], + 'egocentric': ['ectrogenic', 'egocentric', 'geocentric'], + 'egoist': ['egoist', 'stogie'], + 'egol': ['egol', 'goel', 'loge', 'ogle', 'oleg'], + 'egotheism': ['egotheism', 'eightsome'], + 'egret': ['egret', 'greet', 'reget'], + 'eh': ['eh', 'he'], + 'ehretia': ['ehretia', 'etheria'], + 'eident': ['eident', 'endite'], + 'eidograph': ['eidograph', 'ideograph'], + 'eidology': ['eidology', 'ideology'], + 'eighth': ['eighth', 'height'], + 'eightsome': ['egotheism', 'eightsome'], + 'eigne': ['eigne', 'genie'], + 'eileen': ['eileen', 'lienee'], + 'ekaha': ['ekaha', 'hakea'], + 'eke': ['eke', 'kee'], + 'eker': ['eker', 'reek'], + 'ekoi': ['ekoi', 'okie'], + 'ekron': ['ekron', 'krone'], + 'ektene': ['ektene', 'ketene'], + 'elabrate': ['elabrate', 'tearable'], + 'elaidic': ['aedilic', 'elaidic'], + 'elaidin': ['anilide', 'elaidin'], + 'elain': ['alien', 'aline', 'anile', 'elain', 'elian', 'laine', 'linea'], + 'elaine': ['aileen', 'elaine'], + 'elamite': ['alemite', 'elamite'], + 'elance': ['elance', 'enlace'], + 'eland': ['eland', 'laden', 'lenad'], + 'elanet': ['elanet', 'lanete', 'lateen'], + 'elanus': ['elanus', 'unseal'], + 'elaphomyces': ['elaphomyces', 'mesocephaly'], + 'elaphurus': ['elaphurus', 'sulphurea'], + 'elapid': ['aliped', 'elapid'], + 'elapoid': ['elapoid', 'oedipal'], + 'elaps': ['elaps', + 'lapse', + 'lepas', + 'pales', + 'salep', + 'saple', + 'sepal', + 'slape', + 'spale', + 'speal'], + 'elapse': ['asleep', 'elapse', 'please'], + 'elastic': ['astelic', 'elastic', 'latices'], + 'elasticin': ['elasticin', 'inelastic', 'sciential'], + 'elastin': ['elastin', 'salient', 'saltine', 'slainte'], + 'elastomer': ['elastomer', 'salometer'], + 'elate': ['atlee', 'elate'], + 'elated': ['delate', 'elated'], + 'elater': ['earlet', 'elater', 'relate'], + 'elaterid': ['detailer', 'elaterid'], + 'elaterin': ['elaterin', 'entailer', 'treenail'], + 'elatha': ['althea', 'elatha'], + 'elatine': ['elatine', 'lineate'], + 'elation': ['alnoite', 'elation', 'toenail'], + 'elator': ['elator', 'lorate'], + 'elb': ['bel', 'elb'], + 'elbert': ['belter', 'elbert', 'treble'], + 'elberta': ['bearlet', 'bleater', 'elberta', 'retable'], + 'elbow': ['below', 'bowel', 'elbow'], + 'elbowed': ['boweled', 'elbowed'], + 'eld': ['del', 'eld', 'led'], + 'eldin': ['eldin', 'lined'], + 'elding': ['dingle', 'elding', 'engild', 'gilden'], + 'elean': ['anele', 'elean'], + 'election': ['coteline', 'election'], + 'elective': ['cleveite', 'elective'], + 'elector': ['elector', 'electro'], + 'electoral': ['electoral', 'recollate'], + 'electra': ['electra', 'treacle'], + 'electragy': ['electragy', 'glycerate'], + 'electret': ['electret', 'tercelet'], + 'electric': ['electric', 'lectrice'], + 'electrion': ['centriole', 'electrion', 'relection'], + 'electro': ['elector', 'electro'], + 'electrodynamic': ['dynamoelectric', 'electrodynamic'], + 'electrodynamical': ['dynamoelectrical', 'electrodynamical'], + 'electromagnetic': ['electromagnetic', 'magnetoelectric'], + 'electromagnetical': ['electromagnetical', 'magnetoelectrical'], + 'electrothermic': ['electrothermic', 'thermoelectric'], + 'electrothermometer': ['electrothermometer', 'thermoelectrometer'], + 'elegant': ['angelet', 'elegant'], + 'elegiambus': ['elegiambus', 'iambelegus'], + 'elegiast': ['elegiast', 'selagite'], + 'elemi': ['elemi', 'meile'], + 'elemin': ['elemin', 'meline'], + 'elephantic': ['elephantic', 'plancheite'], + 'elettaria': ['elettaria', 'retaliate'], + 'eleut': ['eleut', 'elute'], + 'elevator': ['elevator', 'overlate'], + 'elfin': ['elfin', 'nifle'], + 'elfishness': ['elfishness', 'fleshiness'], + 'elfkin': ['elfkin', 'finkel'], + 'elfwort': ['elfwort', 'felwort'], + 'eli': ['eli', 'lei', 'lie'], + 'elia': ['aiel', 'aile', 'elia'], + 'elian': ['alien', 'aline', 'anile', 'elain', 'elian', 'laine', 'linea'], + 'elias': ['aisle', 'elias'], + 'elicitor': ['elicitor', 'trioleic'], + 'eliminand': ['eliminand', 'mindelian'], + 'elinor': ['elinor', 'lienor', 'lorien', 'noiler'], + 'elinvar': ['elinvar', 'ravelin', 'reanvil', 'valerin'], + 'elisha': ['elisha', 'hailse', 'sheila'], + 'elisor': ['elisor', 'resoil'], + 'elissa': ['elissa', 'lassie'], + 'elite': ['elite', 'telei'], + 'eliza': ['aizle', 'eliza'], + 'elk': ['elk', 'lek'], + 'ella': ['alle', 'ella', 'leal'], + 'ellagate': ['allegate', 'ellagate'], + 'ellenyard': ['ellenyard', 'learnedly'], + 'ellick': ['ellick', 'illeck'], + 'elliot': ['elliot', 'oillet'], + 'elm': ['elm', 'mel'], + 'elmer': ['elmer', 'merel', 'merle'], + 'elmy': ['elmy', 'yelm'], + 'eloah': ['eloah', 'haole'], + 'elod': ['dole', 'elod', 'lode', 'odel'], + 'eloge': ['eloge', 'golee'], + 'elohimic': ['elohimic', 'hemiolic'], + 'elohist': ['elohist', 'hostile'], + 'eloign': ['eloign', 'gileno', 'legion'], + 'eloigner': ['eloigner', 'legioner'], + 'eloignment': ['eloignment', 'omnilegent'], + 'elon': ['elon', 'enol', 'leno', 'leon', 'lone', 'noel'], + 'elonite': ['elonite', 'leonite'], + 'elops': ['elops', 'slope', 'spole'], + 'elric': ['crile', 'elric', 'relic'], + 'els': ['els', 'les'], + 'elsa': ['elsa', 'sale', 'seal', 'slae'], + 'else': ['else', 'lees', 'seel', 'sele', 'slee'], + 'elsin': ['elsin', 'lenis', 'niels', 'silen', 'sline'], + 'elt': ['elt', 'let'], + 'eluate': ['aulete', 'eluate'], + 'eluder': ['dueler', 'eluder'], + 'elusion': ['elusion', 'luiseno'], + 'elusory': ['elusory', 'yoursel'], + 'elute': ['eleut', 'elute'], + 'elution': ['elution', 'outline'], + 'elutor': ['elutor', 'louter', 'outler'], + 'elvan': ['elvan', 'navel', 'venal'], + 'elvanite': ['elvanite', 'lavenite'], + 'elver': ['elver', 'lever', 'revel'], + 'elvet': ['elvet', 'velte'], + 'elvira': ['averil', 'elvira'], + 'elvis': ['elvis', 'levis', 'slive'], + 'elwood': ['dewool', 'elwood', 'wooled'], + 'elymi': ['elymi', 'emily', 'limey'], + 'elysia': ['easily', 'elysia'], + 'elytral': ['alertly', 'elytral'], + 'elytrin': ['elytrin', 'inertly', 'trinely'], + 'elytroposis': ['elytroposis', 'proteolysis'], + 'elytrous': ['elytrous', 'urostyle'], + 'em': ['em', 'me'], + 'emanate': ['emanate', 'manatee'], + 'emanation': ['amnionate', 'anamniote', 'emanation'], + 'emanatist': ['emanatist', 'staminate', 'tasmanite'], + 'embalmer': ['embalmer', 'emmarble'], + 'embar': ['amber', 'bearm', 'bemar', 'bream', 'embar'], + 'embargo': ['bergamo', 'embargo'], + 'embark': ['embark', 'markeb'], + 'embay': ['beamy', 'embay', 'maybe'], + 'ember': ['breme', 'ember'], + 'embind': ['embind', 'nimbed'], + 'embira': ['ambier', 'bremia', 'embira'], + 'embodier': ['demirobe', 'embodier'], + 'embody': ['beydom', 'embody'], + 'embole': ['bemole', 'embole'], + 'embraceor': ['cerebroma', 'embraceor'], + 'embrail': ['embrail', 'mirabel'], + 'embryoid': ['embryoid', 'reimbody'], + 'embus': ['embus', 'sebum'], + 'embusk': ['bemusk', 'embusk'], + 'emcee': ['emcee', 'meece'], + 'emeership': ['emeership', 'ephemeris'], + 'emend': ['emend', 'mende'], + 'emendation': ['denominate', 'emendation'], + 'emendator': ['emendator', 'ondameter'], + 'emerita': ['emerita', 'emirate'], + 'emerse': ['emerse', 'seemer'], + 'emersion': ['emersion', 'meriones'], + 'emersonian': ['emersonian', 'mansioneer'], + 'emesa': ['emesa', 'mease'], + 'emigrate': ['emigrate', 'remigate'], + 'emigration': ['emigration', 'remigation'], + 'emil': ['emil', 'lime', 'mile'], + 'emilia': ['emilia', 'mailie'], + 'emily': ['elymi', 'emily', 'limey'], + 'emim': ['emim', 'mime'], + 'emir': ['emir', 'imer', 'mire', 'reim', 'remi', 'riem', 'rime'], + 'emirate': ['emerita', 'emirate'], + 'emirship': ['emirship', 'imperish'], + 'emissary': ['emissary', 'missayer'], + 'emit': ['emit', 'item', 'mite', 'time'], + 'emitter': ['emitter', 'termite'], + 'emm': ['emm', 'mem'], + 'emmarble': ['embalmer', 'emmarble'], + 'emodin': ['domine', 'domnei', 'emodin', 'medino'], + 'emotion': ['emotion', 'moonite'], + 'empanel': ['empanel', 'emplane', 'peelman'], + 'empathic': ['empathic', 'emphatic'], + 'empathically': ['empathically', 'emphatically'], + 'emphasis': ['emphasis', 'misshape'], + 'emphatic': ['empathic', 'emphatic'], + 'emphatically': ['empathically', 'emphatically'], + 'empire': ['empire', 'epimer'], + 'empiricist': ['empiricist', 'empiristic'], + 'empiristic': ['empiricist', 'empiristic'], + 'emplane': ['empanel', 'emplane', 'peelman'], + 'employer': ['employer', 'polymere'], + 'emporia': ['emporia', 'meropia'], + 'emporial': ['emporial', 'proemial'], + 'emporium': ['emporium', 'pomerium', 'proemium'], + 'emprise': ['emprise', 'imprese', 'premise', 'spireme'], + 'empt': ['empt', 'temp'], + 'emptier': ['emptier', 'impetre'], + 'emption': ['emption', 'pimento'], + 'emptional': ['emptional', 'palmitone'], + 'emptor': ['emptor', 'trompe'], + 'empyesis': ['empyesis', 'pyemesis'], + 'emu': ['emu', 'ume'], + 'emulant': ['almuten', 'emulant'], + 'emulation': ['emulation', 'laumonite'], + 'emulsion': ['emulsion', 'solenium'], + 'emundation': ['emundation', 'mountained'], + 'emyd': ['demy', 'emyd'], + 'en': ['en', 'ne'], + 'enable': ['baleen', 'enable'], + 'enabler': ['enabler', 'renable'], + 'enaction': ['cetonian', 'enaction'], + 'enactor': ['enactor', 'necator', 'orcanet'], + 'enactory': ['enactory', 'octenary'], + 'enaena': ['aenean', 'enaena'], + 'enalid': ['aldine', 'daniel', 'delian', 'denial', 'enalid', 'leadin'], + 'enaliornis': ['enaliornis', 'rosaniline'], + 'enaluron': ['enaluron', 'neuronal'], + 'enam': ['amen', 'enam', 'mane', 'mean', 'name', 'nema'], + 'enamel': ['enamel', 'melena'], + 'enameling': ['enameling', 'malengine', 'meningeal'], + 'enamor': ['enamor', 'monera', 'oreman', 'romane'], + 'enamored': ['demeanor', 'enamored'], + 'enanthem': ['enanthem', 'menthane'], + 'enantiomer': ['enantiomer', 'renominate'], + 'enapt': ['enapt', 'paten', 'penta', 'tapen'], + 'enarch': ['enarch', 'ranche'], + 'enarm': ['enarm', 'namer', 'reman'], + 'enarme': ['enarme', 'meaner', 'rename'], + 'enarthrosis': ['enarthrosis', 'nearthrosis'], + 'enate': ['eaten', 'enate'], + 'enatic': ['acetin', 'actine', 'enatic'], + 'enation': ['enation', 'etonian'], + 'enbrave': ['enbrave', 'verbena'], + 'encapsule': ['encapsule', 'pelecanus'], + 'encase': ['encase', 'seance', 'seneca'], + 'encash': ['encash', 'sanche'], + 'encauma': ['cumaean', 'encauma'], + 'encaustes': ['acuteness', 'encaustes'], + 'encaustic': ['encaustic', 'succinate'], + 'encephalomeningitis': ['encephalomeningitis', 'meningoencephalitis'], + 'encephalomeningocele': ['encephalomeningocele', 'meningoencephalocele'], + 'encephalomyelitis': ['encephalomyelitis', 'myeloencephalitis'], + 'enchair': ['chainer', 'enchair', 'rechain'], + 'encharge': ['encharge', 'rechange'], + 'encharnel': ['channeler', 'encharnel'], + 'enchytrae': ['cytherean', 'enchytrae'], + 'encina': ['canine', 'encina', 'neanic'], + 'encinillo': ['encinillo', 'linolenic'], + 'encist': ['encist', 'incest', 'insect', 'scient'], + 'encitadel': ['declinate', 'encitadel'], + 'enclaret': ['celarent', 'centrale', 'enclaret'], + 'enclasp': ['enclasp', 'spancel'], + 'enclave': ['enclave', 'levance', 'valence'], + 'enclosure': ['enclosure', 'recounsel'], + 'encoignure': ['encoignure', 'neurogenic'], + 'encoil': ['clione', 'coelin', 'encoil', 'enolic'], + 'encomiastic': ['cosmetician', 'encomiastic'], + 'encomic': ['comenic', 'encomic', 'meconic'], + 'encomium': ['encomium', 'meconium'], + 'encoronal': ['encoronal', 'olecranon'], + 'encoronate': ['encoronate', 'entocornea'], + 'encradle': ['calender', 'encradle'], + 'encranial': ['carnelian', 'encranial'], + 'encratic': ['acentric', 'encratic', 'nearctic'], + 'encratism': ['encratism', 'miscreant'], + 'encraty': ['encraty', 'nectary'], + 'encreel': ['crenele', 'encreel'], + 'encrinital': ['encrinital', 'tricennial'], + 'encrisp': ['encrisp', 'pincers'], + 'encrust': ['encrust', 'uncrest'], + 'encurl': ['encurl', 'lucern'], + 'encurtain': ['encurtain', 'runcinate', 'uncertain'], + 'encyrtidae': ['encyrtidae', 'nycteridae'], + 'end': ['den', 'end', 'ned'], + 'endaortic': ['citronade', 'endaortic', 'redaction'], + 'endboard': ['deadborn', 'endboard'], + 'endear': ['deaner', 'endear'], + 'endeared': ['deadener', 'endeared'], + 'endearing': ['endearing', 'engrained', 'grenadine'], + 'endearingly': ['endearingly', 'engrainedly'], + 'endemial': ['endemial', 'madeline'], + 'endere': ['endere', 'needer', 'reeden'], + 'enderonic': ['enderonic', 'endocrine'], + 'endevil': ['develin', 'endevil'], + 'endew': ['endew', 'wende'], + 'ending': ['ending', 'ginned'], + 'endite': ['eident', 'endite'], + 'endive': ['endive', 'envied', 'veined'], + 'endoarteritis': ['endoarteritis', 'sideronatrite'], + 'endocline': ['endocline', 'indolence'], + 'endocrine': ['enderonic', 'endocrine'], + 'endome': ['endome', 'omened'], + 'endopathic': ['dictaphone', 'endopathic'], + 'endophasic': ['deaconship', 'endophasic'], + 'endoral': ['endoral', 'ladrone', 'leonard'], + 'endosarc': ['endosarc', 'secondar'], + 'endosome': ['endosome', 'moonseed'], + 'endosporium': ['endosporium', 'imponderous'], + 'endosteal': ['endosteal', 'leadstone'], + 'endothecial': ['chelidonate', 'endothecial'], + 'endothelia': ['endothelia', 'ethanediol', 'ethenoidal'], + 'endow': ['endow', 'nowed'], + 'endura': ['endura', 'neurad', 'undear', 'unread'], + 'endurably': ['endurably', 'undryable'], + 'endure': ['durene', 'endure'], + 'endurer': ['endurer', 'underer'], + 'enduring': ['enduring', 'unringed'], + 'enduringly': ['enduringly', 'underlying'], + 'endwise': ['endwise', 'sinewed'], + 'enema': ['ameen', 'amene', 'enema'], + 'enemy': ['enemy', 'yemen'], + 'energesis': ['energesis', 'regenesis'], + 'energeticist': ['energeticist', 'energetistic'], + 'energetistic': ['energeticist', 'energetistic'], + 'energic': ['energic', 'generic'], + 'energical': ['energical', 'generical'], + 'energid': ['energid', 'reeding'], + 'energist': ['energist', 'steering'], + 'energy': ['energy', 'greeny', 'gyrene'], + 'enervate': ['enervate', 'venerate'], + 'enervation': ['enervation', 'veneration'], + 'enervative': ['enervative', 'venerative'], + 'enervator': ['enervator', 'renovater', 'venerator'], + 'enfilade': ['alfenide', 'enfilade'], + 'enfile': ['enfile', 'enlief', 'enlife', 'feline'], + 'enflesh': ['enflesh', 'fleshen'], + 'enfoil': ['enfoil', 'olefin'], + 'enfold': ['enfold', 'folden', 'fondle'], + 'enforcer': ['confrere', 'enforcer', 'reconfer'], + 'enframe': ['enframe', 'freeman'], + 'engaol': ['angelo', 'engaol'], + 'engarb': ['banger', 'engarb', 'graben'], + 'engaud': ['augend', 'engaud', 'unaged'], + 'engild': ['dingle', 'elding', 'engild', 'gilden'], + 'engird': ['engird', 'ringed'], + 'engirdle': ['engirdle', 'reedling'], + 'engirt': ['engirt', 'tinger'], + 'englacial': ['angelical', 'englacial', 'galenical'], + 'englacially': ['angelically', 'englacially'], + 'englad': ['angled', 'dangle', 'englad', 'lagend'], + 'englander': ['englander', 'greenland'], + 'english': ['english', 'shingle'], + 'englisher': ['englisher', 'reshingle'], + 'englut': ['englut', 'gluten', 'ungelt'], + 'engobe': ['begone', 'engobe'], + 'engold': ['engold', 'golden'], + 'engrail': ['aligner', 'engrail', 'realign', 'reginal'], + 'engrailed': ['engrailed', 'geraldine'], + 'engrailment': ['engrailment', 'realignment'], + 'engrain': ['earning', 'engrain'], + 'engrained': ['endearing', 'engrained', 'grenadine'], + 'engrainedly': ['endearingly', 'engrainedly'], + 'engram': ['engram', 'german', 'manger'], + 'engraphic': ['engraphic', 'preaching'], + 'engrave': ['avenger', 'engrave'], + 'engross': ['engross', 'grossen'], + 'enhat': ['enhat', 'ethan', 'nathe', 'neath', 'thane'], + 'enheart': ['earthen', 'enheart', 'hearten', 'naether', 'teheran', 'traheen'], + 'enherit': ['enherit', 'etherin', 'neither', 'therein'], + 'enhydra': ['enhydra', 'henyard'], + 'eniac': ['anice', 'eniac'], + 'enicuridae': ['audiencier', 'enicuridae'], + 'enid': ['dine', 'enid', 'inde', 'nide'], + 'enif': ['enif', 'fine', 'neif', 'nife'], + 'enisle': ['enisle', 'ensile', 'senile', 'silene'], + 'enlace': ['elance', 'enlace'], + 'enlard': ['aldern', + 'darnel', + 'enlard', + 'lander', + 'lenard', + 'randle', + 'reland'], + 'enlarge': ['enlarge', 'general', 'gleaner'], + 'enleaf': ['enleaf', 'leafen'], + 'enlief': ['enfile', 'enlief', 'enlife', 'feline'], + 'enlife': ['enfile', 'enlief', 'enlife', 'feline'], + 'enlight': ['enlight', 'lighten'], + 'enlist': ['enlist', 'listen', 'silent', 'tinsel'], + 'enlisted': ['enlisted', 'lintseed'], + 'enlister': ['enlister', 'esterlin', 'listener', 'relisten'], + 'enmass': ['enmass', 'maness', 'messan'], + 'enneadic': ['cadinene', 'decennia', 'enneadic'], + 'ennobler': ['ennobler', 'nonrebel'], + 'ennoic': ['conine', 'connie', 'ennoic'], + 'ennomic': ['ennomic', 'meconin'], + 'enoch': ['cohen', 'enoch'], + 'enocyte': ['enocyte', 'neocyte'], + 'enodal': ['enodal', 'loaden'], + 'enoil': ['enoil', 'ileon', 'olein'], + 'enol': ['elon', 'enol', 'leno', 'leon', 'lone', 'noel'], + 'enolic': ['clione', 'coelin', 'encoil', 'enolic'], + 'enomania': ['enomania', 'maeonian'], + 'enomotarch': ['chromatone', 'enomotarch'], + 'enorganic': ['enorganic', 'ignorance'], + 'enorm': ['enorm', 'moner', 'morne'], + 'enormous': ['enormous', 'unmorose'], + 'enos': ['enos', 'nose'], + 'enostosis': ['enostosis', 'sootiness'], + 'enow': ['enow', 'owen', 'wone'], + 'enphytotic': ['enphytotic', 'entophytic'], + 'enrace': ['careen', 'carene', 'enrace'], + 'enrage': ['egeran', 'enrage', 'ergane', 'genear', 'genera'], + 'enraged': ['derange', 'enraged', 'gardeen', 'gerenda', 'grandee', 'grenade'], + 'enragedly': ['enragedly', 'legendary'], + 'enrapt': ['arpent', + 'enrapt', + 'entrap', + 'panter', + 'parent', + 'pretan', + 'trepan'], + 'enravish': ['enravish', 'ravenish', 'vanisher'], + 'enray': ['enray', 'yearn'], + 'enrib': ['brine', 'enrib'], + 'enrich': ['enrich', 'nicher', 'richen'], + 'enring': ['enring', 'ginner'], + 'enrive': ['enrive', 'envier', 'veiner', 'verine'], + 'enrobe': ['boreen', 'enrobe', 'neebor', 'rebone'], + 'enrol': ['enrol', 'loren'], + 'enrolled': ['enrolled', 'rondelle'], + 'enrough': ['enrough', 'roughen'], + 'enruin': ['enruin', 'neurin', 'unrein'], + 'enrut': ['enrut', 'tuner', 'urent'], + 'ens': ['ens', 'sen'], + 'ensaint': ['ensaint', 'stanine'], + 'ensate': ['ensate', 'enseat', 'santee', 'sateen', 'senate'], + 'ense': ['ense', 'esne', 'nese', 'seen', 'snee'], + 'enseam': ['enseam', 'semnae'], + 'enseat': ['ensate', 'enseat', 'santee', 'sateen', 'senate'], + 'ensepulcher': ['ensepulcher', 'ensepulchre'], + 'ensepulchre': ['ensepulcher', 'ensepulchre'], + 'enshade': ['dasheen', 'enshade'], + 'enshroud': ['enshroud', 'unshored'], + 'ensigncy': ['ensigncy', 'syngenic'], + 'ensilage': ['ensilage', 'genesial', 'signalee'], + 'ensile': ['enisle', 'ensile', 'senile', 'silene'], + 'ensilver': ['ensilver', 'sniveler'], + 'ensmall': ['ensmall', 'smallen'], + 'ensoul': ['ensoul', 'olenus', 'unsole'], + 'enspirit': ['enspirit', 'pristine'], + 'enstar': ['astern', 'enstar', 'stenar', 'sterna'], + 'enstatite': ['enstatite', 'intestate', 'satinette'], + 'enstool': ['enstool', 'olonets'], + 'enstore': ['enstore', 'estrone', 'storeen', 'tornese'], + 'ensue': ['ensue', 'seenu', 'unsee'], + 'ensuer': ['ensuer', 'ensure'], + 'ensure': ['ensuer', 'ensure'], + 'entablature': ['entablature', 'untreatable'], + 'entach': ['entach', 'netcha'], + 'entad': ['denat', 'entad'], + 'entada': ['adnate', 'entada'], + 'entail': ['entail', 'tineal'], + 'entailer': ['elaterin', 'entailer', 'treenail'], + 'ental': ['ental', 'laten', 'leant'], + 'entasia': ['anisate', 'entasia'], + 'entasis': ['entasis', 'sestian', 'sestina'], + 'entelam': ['entelam', 'leetman'], + 'enter': ['enter', 'neter', 'renet', 'terne', 'treen'], + 'enteral': ['alterne', 'enteral', 'eternal', 'teleran', 'teneral'], + 'enterer': ['enterer', 'terrene'], + 'enteria': ['enteria', 'trainee', 'triaene'], + 'enteric': ['citrene', 'enteric', 'enticer', 'tercine'], + 'enterocolitis': ['coloenteritis', 'enterocolitis'], + 'enterogastritis': ['enterogastritis', 'gastroenteritis'], + 'enteroid': ['enteroid', 'orendite'], + 'enteron': ['enteron', 'tenoner'], + 'enteropexy': ['enteropexy', 'oxyterpene'], + 'entertain': ['entertain', 'tarentine', 'terentian'], + 'entheal': ['entheal', 'lethean'], + 'enthraldom': ['enthraldom', 'motherland'], + 'enthuse': ['enthuse', 'unsheet'], + 'entia': ['entia', 'teian', 'tenai', 'tinea'], + 'enticer': ['citrene', 'enteric', 'enticer', 'tercine'], + 'entincture': ['entincture', 'unreticent'], + 'entire': ['entire', 'triene'], + 'entirely': ['entirely', 'lientery'], + 'entirety': ['entirety', 'eternity'], + 'entity': ['entity', 'tinety'], + 'entocoelic': ['coelection', 'entocoelic'], + 'entocornea': ['encoronate', 'entocornea'], + 'entohyal': ['entohyal', 'ethanoyl'], + 'entoil': ['entoil', 'lionet'], + 'entomeric': ['entomeric', 'intercome', 'morencite'], + 'entomic': ['centimo', 'entomic', 'tecomin'], + 'entomical': ['entomical', 'melanotic'], + 'entomion': ['entomion', 'noontime'], + 'entomoid': ['demotion', 'entomoid', 'moontide'], + 'entomophily': ['entomophily', 'monophylite'], + 'entomotomy': ['entomotomy', 'omentotomy'], + 'entoparasite': ['antiprotease', 'entoparasite'], + 'entophyte': ['entophyte', 'tenophyte'], + 'entophytic': ['enphytotic', 'entophytic'], + 'entopic': ['entopic', 'nepotic', 'pentoic'], + 'entoplastic': ['entoplastic', 'spinotectal', 'tectospinal', 'tenoplastic'], + 'entoretina': ['entoretina', 'tetraonine'], + 'entosarc': ['ancestor', 'entosarc'], + 'entotic': ['entotic', 'tonetic'], + 'entozoa': ['entozoa', 'ozonate'], + 'entozoic': ['entozoic', 'enzootic'], + 'entrail': ['entrail', + 'latiner', + 'latrine', + 'ratline', + 'reliant', + 'retinal', + 'trenail'], + 'entrain': ['entrain', 'teriann'], + 'entrance': ['centenar', 'entrance'], + 'entrap': ['arpent', + 'enrapt', + 'entrap', + 'panter', + 'parent', + 'pretan', + 'trepan'], + 'entreat': ['entreat', 'ratteen', 'tarente', 'ternate', 'tetrane'], + 'entreating': ['entreating', 'interagent'], + 'entree': ['entree', 'rentee', 'retene'], + 'entrepas': ['entrepas', 'septenar'], + 'entropion': ['entropion', 'pontonier', 'prenotion'], + 'entropium': ['entropium', 'importune'], + 'entrust': ['entrust', 'stunter', 'trusten'], + 'enumeration': ['enumeration', 'mountaineer'], + 'enunciation': ['enunciation', 'incuneation'], + 'enunciator': ['enunciator', 'uncreation'], + 'enure': ['enure', 'reune'], + 'envelope': ['envelope', 'ovenpeel'], + 'enverdure': ['enverdure', 'unrevered'], + 'envied': ['endive', 'envied', 'veined'], + 'envier': ['enrive', 'envier', 'veiner', 'verine'], + 'envious': ['envious', 'niveous', 'veinous'], + 'envoy': ['envoy', 'nevoy', 'yoven'], + 'enwood': ['enwood', 'wooden'], + 'enwound': ['enwound', 'unowned'], + 'enwrap': ['enwrap', 'pawner', 'repawn'], + 'enwrite': ['enwrite', 'retwine'], + 'enzootic': ['entozoic', 'enzootic'], + 'eoan': ['aeon', 'eoan'], + 'eogaean': ['eogaean', 'neogaea'], + 'eolithic': ['chiolite', 'eolithic'], + 'eon': ['eon', 'neo', 'one'], + 'eonism': ['eonism', 'mesion', 'oneism', 'simeon'], + 'eophyton': ['eophyton', 'honeypot'], + 'eosaurus': ['eosaurus', 'rousseau'], + 'eosin': ['eosin', 'noise'], + 'eosinoblast': ['bosselation', 'eosinoblast'], + 'epacrid': ['epacrid', 'peracid', 'preacid'], + 'epacris': ['epacris', 'scrapie', 'serapic'], + 'epactal': ['epactal', 'placate'], + 'eparch': ['aperch', 'eparch', 'percha', 'preach'], + 'eparchial': ['eparchial', 'raphaelic'], + 'eparchy': ['eparchy', 'preachy'], + 'epha': ['epha', 'heap'], + 'epharmonic': ['epharmonic', 'pinachrome'], + 'ephemeris': ['emeership', 'ephemeris'], + 'ephod': ['depoh', 'ephod', 'hoped'], + 'ephor': ['ephor', 'hoper'], + 'ephorus': ['ephorus', 'orpheus', 'upshore'], + 'epibasal': ['ablepsia', 'epibasal'], + 'epibole': ['epibole', 'epilobe'], + 'epic': ['epic', 'pice'], + 'epical': ['epical', 'piacle', 'plaice'], + 'epicarp': ['crappie', 'epicarp'], + 'epicentral': ['epicentral', 'parentelic'], + 'epiceratodus': ['dipteraceous', 'epiceratodus'], + 'epichorial': ['aerophilic', 'epichorial'], + 'epicly': ['epicly', 'pyelic'], + 'epicostal': ['alopecist', 'altiscope', 'epicostal', 'scapolite'], + 'epicotyl': ['epicotyl', 'lipocyte'], + 'epicranial': ['epicranial', 'periacinal'], + 'epiderm': ['demirep', 'epiderm', 'impeder', 'remiped'], + 'epiderma': ['epiderma', 'premedia'], + 'epidermal': ['epidermal', 'impleader', 'premedial'], + 'epidermis': ['dispireme', 'epidermis'], + 'epididymovasostomy': ['epididymovasostomy', 'vasoepididymostomy'], + 'epidural': ['dipleura', 'epidural'], + 'epigram': ['epigram', 'primage'], + 'epilabrum': ['epilabrum', 'impuberal'], + 'epilachna': ['cephalina', 'epilachna'], + 'epilate': ['epilate', 'epitela', 'pileate'], + 'epilation': ['epilation', 'polianite'], + 'epilatory': ['epilatory', 'petiolary'], + 'epilobe': ['epibole', 'epilobe'], + 'epimer': ['empire', 'epimer'], + 'epiotic': ['epiotic', 'poietic'], + 'epipactis': ['epipactis', 'epipastic'], + 'epipastic': ['epipactis', 'epipastic'], + 'epiplasm': ['epiplasm', 'palmipes'], + 'epiploic': ['epiploic', 'epipolic'], + 'epipolic': ['epiploic', 'epipolic'], + 'epirotic': ['epirotic', 'periotic'], + 'episclera': ['episclera', 'periclase'], + 'episematic': ['episematic', 'septicemia'], + 'episodal': ['episodal', 'lapidose', 'sepaloid'], + 'episodial': ['apsidiole', 'episodial'], + 'epistatic': ['epistatic', 'pistacite'], + 'episternal': ['alpestrine', 'episternal', 'interlapse', 'presential'], + 'episternum': ['episternum', 'uprisement'], + 'epistlar': ['epistlar', 'pilaster', 'plaister', 'priestal'], + 'epistle': ['epistle', 'septile'], + 'epistler': ['epistler', 'spirelet'], + 'epistoler': ['epistoler', 'peristole', 'perseitol', 'pistoleer'], + 'epistoma': ['epistoma', 'metopias'], + 'epistome': ['epistome', 'epsomite'], + 'epistroma': ['epistroma', 'peristoma'], + 'epitela': ['epilate', 'epitela', 'pileate'], + 'epithecal': ['epithecal', 'petechial', 'phacelite'], + 'epithecate': ['epithecate', 'petechiate'], + 'epithet': ['epithet', 'heptite'], + 'epithyme': ['epithyme', 'hemitype'], + 'epitomizer': ['epitomizer', 'peritomize'], + 'epizoal': ['epizoal', 'lopezia', 'opalize'], + 'epoch': ['epoch', 'poche'], + 'epodic': ['copied', 'epodic'], + 'epornitic': ['epornitic', 'proteinic'], + 'epos': ['epos', 'peso', 'pose', 'sope'], + 'epsilon': ['epsilon', 'sinople'], + 'epsomite': ['epistome', 'epsomite'], + 'epulis': ['epulis', 'pileus'], + 'epulo': ['epulo', 'loupe'], + 'epuloid': ['epuloid', 'euploid'], + 'epulosis': ['epulosis', 'pelusios'], + 'epulotic': ['epulotic', 'poultice'], + 'epural': ['epural', 'perula', 'pleura'], + 'epuration': ['epuration', 'eupatorin'], + 'equal': ['equal', 'quale', 'queal'], + 'equalable': ['aquabelle', 'equalable'], + 'equiangle': ['angelique', 'equiangle'], + 'equinity': ['equinity', 'inequity'], + 'equip': ['equip', 'pique'], + 'equitable': ['equitable', 'quietable'], + 'equitist': ['equitist', 'quietist'], + 'equus': ['equus', 'usque'], + 'er': ['er', 're'], + 'era': ['aer', 'are', 'ear', 'era', 'rea'], + 'erade': ['eared', 'erade'], + 'eradicant': ['carinated', 'eradicant'], + 'eradicator': ['corradiate', 'cortaderia', 'eradicator'], + 'eral': ['earl', 'eral', 'lear', 'real'], + 'eranist': ['asterin', 'eranist', 'restain', 'stainer', 'starnie', 'stearin'], + 'erase': ['easer', 'erase'], + 'erased': ['erased', 'reseda', 'seared'], + 'eraser': ['eraser', 'searer'], + 'erasmian': ['erasmian', 'raiseman'], + 'erasmus': ['assumer', 'erasmus', 'masseur'], + 'erastian': ['artesian', 'asterina', 'asternia', 'erastian', 'seatrain'], + 'erastus': ['erastus', 'ressaut'], + 'erava': ['avera', 'erava'], + 'erbia': ['barie', 'beira', 'erbia', 'rebia'], + 'erbium': ['erbium', 'imbrue'], + 'erd': ['erd', 'red'], + 'ere': ['eer', 'ere', 'ree'], + 'erect': ['crete', 'erect'], + 'erectable': ['celebrate', 'erectable'], + 'erecting': ['erecting', 'gentrice'], + 'erection': ['erection', 'neoteric', 'nocerite', 'renotice'], + 'eremic': ['eremic', 'merice'], + 'eremital': ['eremital', 'materiel'], + 'erept': ['erept', 'peter', 'petre'], + 'ereptic': ['ereptic', 'precite', 'receipt'], + 'ereption': ['ereption', 'tropeine'], + 'erethic': ['erethic', 'etheric', 'heretic', 'heteric', 'teicher'], + 'erethism': ['erethism', 'etherism', 'heterism'], + 'erethismic': ['erethismic', 'hetericism'], + 'erethistic': ['erethistic', 'hetericist'], + 'eretrian': ['arretine', 'eretrian', 'eritrean', 'retainer'], + 'erg': ['erg', 'ger', 'reg'], + 'ergal': ['argel', 'ergal', 'garle', 'glare', 'lager', 'large', 'regal'], + 'ergamine': ['ergamine', 'merginae'], + 'ergane': ['egeran', 'enrage', 'ergane', 'genear', 'genera'], + 'ergastic': ['agrestic', 'ergastic'], + 'ergates': ['ergates', 'gearset', 'geaster'], + 'ergoism': ['ergoism', 'ogreism'], + 'ergomaniac': ['ergomaniac', 'grecomania'], + 'ergon': ['ergon', 'genro', 'goner', 'negro'], + 'ergot': ['ergot', 'rotge'], + 'ergotamine': ['angiometer', 'ergotamine', 'geometrina'], + 'ergotin': ['ergotin', 'genitor', 'negrito', 'ogtiern', 'trigone'], + 'ergusia': ['ergusia', 'gerusia', 'sarigue'], + 'eria': ['aire', 'eria'], + 'erian': ['erian', 'irena', 'reina'], + 'eric': ['eric', 'rice'], + 'erica': ['acier', 'aeric', 'ceria', 'erica'], + 'ericad': ['acider', 'ericad'], + 'erical': ['carlie', 'claire', 'eclair', 'erical'], + 'erichtoid': ['dichroite', 'erichtoid', 'theriodic'], + 'erigenia': ['aegirine', 'erigenia'], + 'erigeron': ['erigeron', 'reignore'], + 'erik': ['erik', 'kier', 'reki'], + 'erineum': ['erineum', 'unireme'], + 'erinose': ['erinose', 'roseine'], + 'eristalis': ['eristalis', 'serialist'], + 'eristic': ['ectiris', 'eristic'], + 'eristical': ['eristical', 'realistic'], + 'erithacus': ['erithacus', 'eucharist'], + 'eritrean': ['arretine', 'eretrian', 'eritrean', 'retainer'], + 'erma': ['erma', 'mare', 'rame', 'ream'], + 'ermani': ['ermani', 'marine', 'remain'], + 'ermines': ['ermines', 'inermes'], + 'erne': ['erne', 'neer', 'reen'], + 'ernest': ['ernest', 'nester', 'resent', 'streen'], + 'ernie': ['ernie', 'ierne', 'irene'], + 'ernst': ['ernst', 'stern'], + 'erode': ['doree', 'erode'], + 'eros': ['eros', 'rose', 'sero', 'sore'], + 'erose': ['erose', 'soree'], + 'erotesis': ['erotesis', 'isostere'], + 'erotic': ['erotic', 'tercio'], + 'erotical': ['calorite', 'erotical', 'loricate'], + 'eroticism': ['eroticism', 'isometric', 'meroistic', 'trioecism'], + 'erotism': ['erotism', 'mortise', 'trisome'], + 'erotogenic': ['erotogenic', 'geocronite', 'orogenetic'], + 'errabund': ['errabund', 'unbarred'], + 'errand': ['darner', 'darren', 'errand', 'rander', 'redarn'], + 'errant': ['arrent', 'errant', 'ranter', 'ternar'], + 'errantia': ['artarine', 'errantia'], + 'erratic': ['cartier', 'cirrate', 'erratic'], + 'erratum': ['erratum', 'maturer'], + 'erring': ['erring', 'rering', 'ringer'], + 'errite': ['errite', 'reiter', 'retier', 'retire', 'tierer'], + 'ers': ['ers', 'ser'], + 'ersar': ['ersar', 'raser', 'serra'], + 'erse': ['erse', 'rees', 'seer', 'sere'], + 'erthen': ['erthen', 'henter', 'nether', 'threne'], + 'eruc': ['cure', 'ecru', 'eruc'], + 'eruciform': ['eruciform', 'urceiform'], + 'erucin': ['curine', 'erucin', 'neuric'], + 'erucivorous': ['erucivorous', 'overcurious'], + 'eruct': ['cruet', 'eruct', 'recut', 'truce'], + 'eruction': ['eruction', 'neurotic'], + 'erugate': ['erugate', 'guetare'], + 'erumpent': ['erumpent', 'untemper'], + 'eruption': ['eruption', 'unitrope'], + 'erwin': ['erwin', 'rewin', 'winer'], + 'eryngium': ['eryngium', 'gynerium'], + 'eryon': ['eryon', 'onery'], + 'eryops': ['eryops', 'osprey'], + 'erythea': ['erythea', 'hetaery', 'yeather'], + 'erythrin': ['erythrin', 'tyrrheni'], + 'erythrophage': ['erythrophage', 'heterography'], + 'erythrophyllin': ['erythrophyllin', 'phylloerythrin'], + 'erythropia': ['erythropia', 'pyrotheria'], + 'es': ['es', 'se'], + 'esca': ['case', 'esca'], + 'escalan': ['escalan', 'scalena'], + 'escalin': ['celsian', 'escalin', 'sanicle', 'secalin'], + 'escaloped': ['copleased', 'escaloped'], + 'escapement': ['escapement', 'espacement'], + 'escaper': ['escaper', 'respace'], + 'escarp': ['casper', 'escarp', 'parsec', 'scrape', 'secpar', 'spacer'], + 'eschar': ['arches', 'chaser', 'eschar', 'recash', 'search'], + 'eschara': ['asearch', 'eschara'], + 'escheator': ['escheator', 'tocharese'], + 'escobilla': ['escobilla', 'obeliscal'], + 'escolar': ['escolar', 'solacer'], + 'escort': ['corset', 'cortes', 'coster', 'escort', 'scoter', 'sector'], + 'escortment': ['centermost', 'escortment'], + 'escrol': ['closer', 'cresol', 'escrol'], + 'escropulo': ['escropulo', 'supercool'], + 'esculent': ['esculent', 'unselect'], + 'esculin': ['esculin', 'incluse'], + 'esere': ['esere', 'reese', 'resee'], + 'esexual': ['esexual', 'sexuale'], + 'eshin': ['eshin', 'shine'], + 'esiphonal': ['esiphonal', 'phaseolin'], + 'esker': ['esker', 'keres', 'reesk', 'seker', 'skeer', 'skere'], + 'eskualdun': ['eskualdun', 'euskaldun'], + 'eskuara': ['eskuara', 'euskara'], + 'esne': ['ense', 'esne', 'nese', 'seen', 'snee'], + 'esophagogastrostomy': ['esophagogastrostomy', 'gastroesophagostomy'], + 'esopus': ['esopus', 'spouse'], + 'esoterical': ['cesarolite', 'esoterical'], + 'esoterist': ['esoterist', 'trisetose'], + 'esotrope': ['esotrope', 'proteose'], + 'esotropia': ['aportoise', 'esotropia'], + 'espacement': ['escapement', 'espacement'], + 'espadon': ['espadon', 'spadone'], + 'esparto': ['esparto', 'petrosa', 'seaport'], + 'esperantic': ['esperantic', 'interspace'], + 'esperantido': ['desperation', 'esperantido'], + 'esperantism': ['esperantism', 'strepsinema'], + 'esperanto': ['esperanto', 'personate'], + 'espial': ['espial', 'lipase', 'pelias'], + 'espier': ['espier', 'peiser'], + 'espinal': ['espinal', 'pinales', 'spaniel'], + 'espino': ['espino', 'sepion'], + 'espringal': ['espringal', 'presignal', 'relapsing'], + 'esquire': ['esquire', 'risquee'], + 'essence': ['essence', 'senesce'], + 'essenism': ['essenism', 'messines'], + 'essie': ['essie', 'seise'], + 'essling': ['essling', 'singles'], + 'essoin': ['essoin', 'ossein'], + 'essonite': ['essonite', 'ossetine'], + 'essorant': ['assentor', 'essorant', 'starnose'], + 'estamene': ['easement', 'estamene'], + 'esteem': ['esteem', 'mestee'], + 'estella': ['estella', 'sellate'], + 'ester': ['ester', + 'estre', + 'reest', + 'reset', + 'steer', + 'stere', + 'stree', + 'terse', + 'tsere'], + 'esterlin': ['enlister', 'esterlin', 'listener', 'relisten'], + 'esterling': ['esterling', 'steerling'], + 'estevin': ['estevin', 'tensive'], + 'esth': ['esth', 'hest', 'seth'], + 'esther': ['esther', 'hester', 'theres'], + 'estivage': ['estivage', 'vegasite'], + 'estoc': ['coset', 'estoc', 'scote'], + 'estonian': ['estonian', 'nasonite'], + 'estop': ['estop', 'stoep', 'stope'], + 'estradiol': ['estradiol', 'idolaster'], + 'estrange': ['estrange', 'segreant', 'sergeant', 'sternage'], + 'estray': ['atresy', 'estray', 'reasty', 'stayer'], + 'estre': ['ester', + 'estre', + 'reest', + 'reset', + 'steer', + 'stere', + 'stree', + 'terse', + 'tsere'], + 'estreat': ['estreat', 'restate', 'retaste'], + 'estrepe': ['estrepe', 'resteep', 'steeper'], + 'estriate': ['estriate', 'treatise'], + 'estrin': ['estrin', 'insert', 'sinter', 'sterin', 'triens'], + 'estriol': ['estriol', 'torsile'], + 'estrogen': ['estrogen', 'gerontes'], + 'estrone': ['enstore', 'estrone', 'storeen', 'tornese'], + 'estrous': ['estrous', 'oestrus', 'sestuor', 'tussore'], + 'estrual': ['arustle', 'estrual', 'saluter', 'saulter'], + 'estufa': ['estufa', 'fusate'], + 'eta': ['ate', 'eat', 'eta', 'tae', 'tea'], + 'etacism': ['cameist', 'etacism', 'sematic'], + 'etacist': ['etacist', 'statice'], + 'etalon': ['etalon', 'tolane'], + 'etamin': ['etamin', 'inmate', 'taimen', 'tamein'], + 'etamine': ['amenite', 'etamine', 'matinee'], + 'etch': ['chet', 'etch', 'tche', 'tech'], + 'etcher': ['cherte', 'etcher'], + 'eternal': ['alterne', 'enteral', 'eternal', 'teleran', 'teneral'], + 'eternalism': ['eternalism', 'streamline'], + 'eternity': ['entirety', 'eternity'], + 'etesian': ['etesian', 'senaite'], + 'ethal': ['ethal', 'lathe', 'leath'], + 'ethan': ['enhat', 'ethan', 'nathe', 'neath', 'thane'], + 'ethanal': ['anthela', 'ethanal'], + 'ethane': ['ethane', 'taheen'], + 'ethanediol': ['endothelia', 'ethanediol', 'ethenoidal'], + 'ethanim': ['ethanim', 'hematin'], + 'ethanoyl': ['entohyal', 'ethanoyl'], + 'ethel': ['ethel', 'lethe'], + 'ethenoidal': ['endothelia', 'ethanediol', 'ethenoidal'], + 'ether': ['ether', 'rethe', 'theer', 'there', 'three'], + 'etheria': ['ehretia', 'etheria'], + 'etheric': ['erethic', 'etheric', 'heretic', 'heteric', 'teicher'], + 'etherin': ['enherit', 'etherin', 'neither', 'therein'], + 'etherion': ['etherion', 'hereinto', 'heronite'], + 'etherism': ['erethism', 'etherism', 'heterism'], + 'etherization': ['etherization', 'heterization'], + 'etherize': ['etherize', 'heterize'], + 'ethicism': ['ethicism', 'shemitic'], + 'ethicist': ['ethicist', 'thecitis', 'theistic'], + 'ethics': ['ethics', 'sethic'], + 'ethid': ['edith', 'ethid'], + 'ethine': ['ethine', 'theine'], + 'ethiop': ['ethiop', 'ophite', 'peitho'], + 'ethmoidal': ['ethmoidal', 'oldhamite'], + 'ethmosphenoid': ['ethmosphenoid', 'sphenoethmoid'], + 'ethmosphenoidal': ['ethmosphenoidal', 'sphenoethmoidal'], + 'ethnal': ['ethnal', 'hantle', 'lathen', 'thenal'], + 'ethnical': ['chainlet', 'ethnical'], + 'ethnological': ['allothogenic', 'ethnological'], + 'ethnos': ['ethnos', 'honest'], + 'ethography': ['ethography', 'hyetograph'], + 'ethologic': ['ethologic', 'theologic'], + 'ethological': ['ethological', 'lethologica', 'theological'], + 'ethology': ['ethology', 'theology'], + 'ethos': ['ethos', 'shote', 'those'], + 'ethylic': ['ethylic', 'techily'], + 'ethylin': ['ethylin', 'thienyl'], + 'etna': ['ante', 'aten', 'etna', 'nate', 'neat', 'taen', 'tane', 'tean'], + 'etnean': ['etnean', 'neaten'], + 'etonian': ['enation', 'etonian'], + 'etruscan': ['etruscan', 'recusant'], + 'etta': ['etta', 'tate', 'teat'], + 'ettarre': ['ettarre', 'retreat', 'treater'], + 'ettle': ['ettle', 'tetel'], + 'etua': ['aute', 'etua'], + 'euaster': ['austere', 'euaster'], + 'eucalypteol': ['eucalypteol', 'eucalyptole'], + 'eucalyptole': ['eucalypteol', 'eucalyptole'], + 'eucatropine': ['eucatropine', 'neurectopia'], + 'eucharis': ['acheirus', 'eucharis'], + 'eucharist': ['erithacus', 'eucharist'], + 'euchlaena': ['acheulean', 'euchlaena'], + 'eulogism': ['eulogism', 'uglisome'], + 'eumolpus': ['eumolpus', 'plumeous'], + 'eunomia': ['eunomia', 'moineau'], + 'eunomy': ['eunomy', 'euonym'], + 'euonym': ['eunomy', 'euonym'], + 'eupatorin': ['epuration', 'eupatorin'], + 'euplastic': ['euplastic', 'spiculate'], + 'euploid': ['epuloid', 'euploid'], + 'euproctis': ['crepitous', 'euproctis', 'uroseptic'], + 'eurindic': ['dineuric', 'eurindic'], + 'eurus': ['eurus', 'usure'], + 'euscaro': ['acerous', 'carouse', 'euscaro'], + 'euskaldun': ['eskualdun', 'euskaldun'], + 'euskara': ['eskuara', 'euskara'], + 'eusol': ['eusol', 'louse'], + 'eutannin': ['eutannin', 'uninnate'], + 'eutaxic': ['auxetic', 'eutaxic'], + 'eutheria': ['eutheria', 'hauerite'], + 'eutropic': ['eutropic', 'outprice'], + 'eva': ['ave', 'eva'], + 'evade': ['deave', 'eaved', 'evade'], + 'evader': ['evader', 'verdea'], + 'evadne': ['advene', 'evadne'], + 'evan': ['evan', 'nave', 'vane'], + 'evanish': ['evanish', 'inshave'], + 'evase': ['eaves', 'evase', 'seave'], + 'eve': ['eve', 'vee'], + 'evea': ['eave', 'evea'], + 'evection': ['civetone', 'evection'], + 'evejar': ['evejar', 'rajeev'], + 'evelyn': ['evelyn', 'evenly'], + 'even': ['even', 'neve', 'veen'], + 'evener': ['evener', 'veneer'], + 'evenly': ['evelyn', 'evenly'], + 'evens': ['evens', 'seven'], + 'eveque': ['eveque', 'queeve'], + 'ever': ['ever', 'reve', 'veer'], + 'evert': ['evert', 'revet'], + 'everwhich': ['everwhich', 'whichever'], + 'everwho': ['everwho', 'however', 'whoever'], + 'every': ['every', 'veery'], + 'evestar': ['evestar', 'versate'], + 'evict': ['civet', 'evict'], + 'evil': ['evil', 'levi', 'live', 'veil', 'vile', 'vlei'], + 'evildoer': ['evildoer', 'overidle'], + 'evilhearted': ['evilhearted', 'vilehearted'], + 'evilly': ['evilly', 'lively', 'vilely'], + 'evilness': ['evilness', 'liveness', 'veinless', 'vileness', 'vineless'], + 'evince': ['cevine', 'evince', 'venice'], + 'evisite': ['evisite', 'visitee'], + 'evitation': ['evitation', 'novitiate'], + 'evocator': ['evocator', 'overcoat'], + 'evodia': ['evodia', 'ovidae'], + 'evoker': ['evoker', 'revoke'], + 'evolver': ['evolver', 'revolve'], + 'ewder': ['dewer', 'ewder', 'rewed'], + 'ewe': ['ewe', 'wee'], + 'ewer': ['ewer', 'were'], + 'exacter': ['exacter', 'excreta'], + 'exalt': ['exalt', 'latex'], + 'exam': ['amex', 'exam', 'xema'], + 'examinate': ['examinate', 'exanimate', 'metaxenia'], + 'examination': ['examination', 'exanimation'], + 'exanimate': ['examinate', 'exanimate', 'metaxenia'], + 'exanimation': ['examination', 'exanimation'], + 'exasperation': ['exasperation', 'xenoparasite'], + 'exaudi': ['adieux', 'exaudi'], + 'excarnation': ['centraxonia', 'excarnation'], + 'excecation': ['cacoxenite', 'excecation'], + 'except': ['except', 'expect'], + 'exceptant': ['exceptant', 'expectant'], + 'exceptive': ['exceptive', 'expective'], + 'excitation': ['excitation', 'intoxicate'], + 'excitor': ['excitor', 'xerotic'], + 'excreta': ['exacter', 'excreta'], + 'excurse': ['excurse', 'excuser'], + 'excuser': ['excurse', 'excuser'], + 'exert': ['exert', 'exter'], + 'exhilarate': ['exhilarate', 'heteraxial'], + 'exist': ['exist', 'sixte'], + 'exocarp': ['exocarp', 'praecox'], + 'exon': ['exon', 'oxen'], + 'exordia': ['exordia', 'exradio'], + 'exotic': ['coxite', 'exotic'], + 'expatiater': ['expatiater', 'expatriate'], + 'expatriate': ['expatiater', 'expatriate'], + 'expect': ['except', 'expect'], + 'expectant': ['exceptant', 'expectant'], + 'expective': ['exceptive', 'expective'], + 'expirator': ['expirator', 'operatrix'], + 'expiree': ['expiree', 'peixere'], + 'explicator': ['explicator', 'extropical'], + 'expressionism': ['expressionism', 'misexpression'], + 'exradio': ['exordia', 'exradio'], + 'extend': ['dentex', 'extend'], + 'exter': ['exert', 'exter'], + 'exterminate': ['antiextreme', 'exterminate'], + 'extirpationist': ['extirpationist', 'sextipartition'], + 'extra': ['extra', 'retax', 'taxer'], + 'extradural': ['dextraural', 'extradural'], + 'extropical': ['explicator', 'extropical'], + 'exultancy': ['exultancy', 'unexactly'], + 'ey': ['ey', 'ye'], + 'eyah': ['ahey', 'eyah', 'yeah'], + 'eyas': ['easy', 'eyas'], + 'eye': ['eye', 'yee'], + 'eyed': ['eyed', 'yede'], + 'eyen': ['eyen', 'eyne'], + 'eyer': ['eyer', 'eyre', 'yere'], + 'eyn': ['eyn', 'nye', 'yen'], + 'eyne': ['eyen', 'eyne'], + 'eyot': ['eyot', 'yote'], + 'eyra': ['aery', 'eyra', 'yare', 'year'], + 'eyre': ['eyer', 'eyre', 'yere'], + 'ezba': ['baze', 'ezba'], + 'ezra': ['ezra', 'raze'], + 'facebread': ['barefaced', 'facebread'], + 'facer': ['facer', 'farce'], + 'faciend': ['faciend', 'fancied'], + 'facile': ['facile', 'filace'], + 'faciobrachial': ['brachiofacial', 'faciobrachial'], + 'faciocervical': ['cervicofacial', 'faciocervical'], + 'factable': ['factable', 'labefact'], + 'factional': ['factional', 'falcation'], + 'factish': ['catfish', 'factish'], + 'facture': ['facture', 'furcate'], + 'facula': ['facula', 'faucal'], + 'fade': ['deaf', 'fade'], + 'fader': ['fader', 'farde'], + 'faery': ['faery', 'freya'], + 'fagoter': ['aftergo', 'fagoter'], + 'faience': ['faience', 'fiancee'], + 'fail': ['alif', 'fail'], + 'fain': ['fain', 'naif'], + 'fainly': ['fainly', 'naifly'], + 'faint': ['faint', 'fanti'], + 'fair': ['fair', 'fiar', 'raif'], + 'fake': ['fake', 'feak'], + 'faker': ['faker', 'freak'], + 'fakery': ['fakery', 'freaky'], + 'fakir': ['fakir', 'fraik', 'kafir', 'rafik'], + 'falcation': ['factional', 'falcation'], + 'falco': ['falco', 'focal'], + 'falconet': ['conflate', 'falconet'], + 'fallback': ['backfall', 'fallback'], + 'faller': ['faller', 'refall'], + 'fallfish': ['fallfish', 'fishfall'], + 'fallible': ['fallible', 'fillable'], + 'falling': ['falling', 'fingall'], + 'falser': ['falser', 'flaser'], + 'faltboat': ['faltboat', 'flatboat'], + 'falutin': ['falutin', 'flutina'], + 'falx': ['falx', 'flax'], + 'fameless': ['fameless', 'selfsame'], + 'famelessness': ['famelessness', 'selfsameness'], + 'famine': ['famine', 'infame'], + 'fancied': ['faciend', 'fancied'], + 'fangle': ['fangle', 'flange'], + 'fannia': ['fannia', 'fianna'], + 'fanti': ['faint', 'fanti'], + 'far': ['far', 'fra'], + 'farad': ['daraf', 'farad'], + 'farce': ['facer', 'farce'], + 'farcetta': ['afteract', 'artefact', 'farcetta', 'farctate'], + 'farctate': ['afteract', 'artefact', 'farcetta', 'farctate'], + 'farde': ['fader', 'farde'], + 'fardel': ['alfred', 'fardel'], + 'fare': ['fare', 'fear', 'frae', 'rafe'], + 'farfel': ['farfel', 'raffle'], + 'faring': ['faring', 'frangi'], + 'farl': ['farl', 'ralf'], + 'farleu': ['earful', 'farleu', 'ferula'], + 'farm': ['farm', 'fram'], + 'farmable': ['farmable', 'framable'], + 'farmer': ['farmer', 'framer'], + 'farming': ['farming', 'framing'], + 'farnesol': ['farnesol', 'forensal'], + 'faro': ['faro', 'fora'], + 'farolito': ['farolito', 'footrail'], + 'farse': ['farse', 'frase'], + 'farset': ['farset', 'faster', 'strafe'], + 'farsi': ['farsi', 'sarif'], + 'fascio': ['fascio', 'fiasco'], + 'fasher': ['afresh', 'fasher', 'ferash'], + 'fashioner': ['fashioner', 'refashion'], + 'fast': ['fast', 'saft'], + 'fasten': ['fasten', 'nefast', 'stefan'], + 'fastener': ['fastener', 'fenestra', 'refasten'], + 'faster': ['farset', 'faster', 'strafe'], + 'fasthold': ['fasthold', 'holdfast'], + 'fastland': ['fastland', 'landfast'], + 'fat': ['aft', 'fat'], + 'fatal': ['aflat', 'fatal'], + 'fate': ['atef', 'fate', 'feat'], + 'fated': ['defat', 'fated'], + 'father': ['father', 'freath', 'hafter'], + 'faucal': ['facula', 'faucal'], + 'faucet': ['faucet', 'fucate'], + 'faulter': ['faulter', 'refutal', 'tearful'], + 'faultfind': ['faultfind', 'findfault'], + 'faunish': ['faunish', 'nusfiah'], + 'faunist': ['faunist', 'fustian', 'infaust'], + 'favorer': ['favorer', 'overfar', 'refavor'], + 'fayles': ['fayles', 'safely'], + 'feague': ['feague', 'feuage'], + 'feak': ['fake', 'feak'], + 'feal': ['alef', 'feal', 'flea', 'leaf'], + 'fealty': ['fealty', 'featly'], + 'fear': ['fare', 'fear', 'frae', 'rafe'], + 'feastful': ['feastful', 'sufflate'], + 'feat': ['atef', 'fate', 'feat'], + 'featherbed': ['befathered', 'featherbed'], + 'featherer': ['featherer', 'hereafter'], + 'featly': ['fealty', 'featly'], + 'feckly': ['feckly', 'flecky'], + 'fecundate': ['fecundate', 'unfaceted'], + 'fecundator': ['fecundator', 'unfactored'], + 'federate': ['defeater', 'federate', 'redefeat'], + 'feeder': ['feeder', 'refeed'], + 'feeding': ['feeding', 'feigned'], + 'feel': ['feel', 'flee'], + 'feeler': ['feeler', 'refeel', 'reflee'], + 'feer': ['feer', 'free', 'reef'], + 'feering': ['feering', 'feigner', 'freeing', 'reefing', 'refeign'], + 'feetless': ['feetless', 'feteless'], + 'fei': ['fei', 'fie', 'ife'], + 'feif': ['feif', 'fife'], + 'feigned': ['feeding', 'feigned'], + 'feigner': ['feering', 'feigner', 'freeing', 'reefing', 'refeign'], + 'feil': ['feil', 'file', 'leif', 'lief', 'life'], + 'feint': ['feint', 'fient'], + 'feis': ['feis', 'fise', 'sife'], + 'feist': ['feist', 'stife'], + 'felapton': ['felapton', 'pantofle'], + 'felid': ['felid', 'field'], + 'feline': ['enfile', 'enlief', 'enlife', 'feline'], + 'felinity': ['felinity', 'finitely'], + 'fels': ['fels', 'self'], + 'felt': ['felt', 'flet', 'left'], + 'felter': ['felter', 'telfer', 'trefle'], + 'felting': ['felting', 'neftgil'], + 'feltness': ['feltness', 'leftness'], + 'felwort': ['elfwort', 'felwort'], + 'feminal': ['feminal', 'inflame'], + 'femora': ['femora', 'foamer'], + 'femorocaudal': ['caudofemoral', 'femorocaudal'], + 'femorotibial': ['femorotibial', 'tibiofemoral'], + 'femur': ['femur', 'fumer'], + 'fen': ['fen', 'nef'], + 'fender': ['fender', 'ferned'], + 'fenestra': ['fastener', 'fenestra', 'refasten'], + 'feodary': ['feodary', 'foreday'], + 'feral': ['feral', 'flare'], + 'ferash': ['afresh', 'fasher', 'ferash'], + 'feria': ['afire', 'feria'], + 'ferine': ['ferine', 'refine'], + 'ferison': ['ferison', 'foresin'], + 'ferity': ['ferity', 'freity'], + 'ferk': ['ferk', 'kerf'], + 'ferling': ['ferling', 'flinger', 'refling'], + 'ferly': ['ferly', 'flyer', 'refly'], + 'fermail': ['fermail', 'fermila'], + 'fermenter': ['fermenter', 'referment'], + 'fermila': ['fermail', 'fermila'], + 'ferned': ['fender', 'ferned'], + 'ferri': ['ferri', 'firer', 'freir', 'frier'], + 'ferrihydrocyanic': ['ferrihydrocyanic', 'hydroferricyanic'], + 'ferrohydrocyanic': ['ferrohydrocyanic', 'hydroferrocyanic'], + 'ferry': ['ferry', 'freyr', 'fryer'], + 'fertil': ['fertil', 'filter', 'lifter', 'relift', 'trifle'], + 'ferula': ['earful', 'farleu', 'ferula'], + 'ferule': ['ferule', 'fueler', 'refuel'], + 'ferulic': ['ferulic', 'lucifer'], + 'fervidity': ['devitrify', 'fervidity'], + 'festination': ['festination', 'infestation', 'sinfonietta'], + 'fet': ['eft', 'fet'], + 'fetal': ['aleft', 'alfet', 'fetal', 'fleta'], + 'fetcher': ['fetcher', 'refetch'], + 'feteless': ['feetless', 'feteless'], + 'fetial': ['fetial', 'filate', 'lafite', 'leafit'], + 'fetish': ['fetish', 'fishet'], + 'fetor': ['fetor', 'forte', 'ofter'], + 'fetter': ['fetter', 'frette'], + 'feuage': ['feague', 'feuage'], + 'feudalism': ['feudalism', 'sulfamide'], + 'feudally': ['delayful', 'feudally'], + 'feulamort': ['feulamort', 'formulate'], + 'fi': ['fi', 'if'], + 'fiance': ['fiance', 'inface'], + 'fiancee': ['faience', 'fiancee'], + 'fianna': ['fannia', 'fianna'], + 'fiar': ['fair', 'fiar', 'raif'], + 'fiard': ['fiard', 'fraid'], + 'fiasco': ['fascio', 'fiasco'], + 'fiber': ['bifer', 'brief', 'fiber'], + 'fibered': ['debrief', 'defiber', 'fibered'], + 'fiberless': ['briefless', 'fiberless', 'fibreless'], + 'fiberware': ['fiberware', 'fibreware'], + 'fibreless': ['briefless', 'fiberless', 'fibreless'], + 'fibreware': ['fiberware', 'fibreware'], + 'fibroadenoma': ['adenofibroma', 'fibroadenoma'], + 'fibroangioma': ['angiofibroma', 'fibroangioma'], + 'fibrochondroma': ['chondrofibroma', 'fibrochondroma'], + 'fibrocystoma': ['cystofibroma', 'fibrocystoma'], + 'fibrolipoma': ['fibrolipoma', 'lipofibroma'], + 'fibromucous': ['fibromucous', 'mucofibrous'], + 'fibromyoma': ['fibromyoma', 'myofibroma'], + 'fibromyxoma': ['fibromyxoma', 'myxofibroma'], + 'fibromyxosarcoma': ['fibromyxosarcoma', 'myxofibrosarcoma'], + 'fibroneuroma': ['fibroneuroma', 'neurofibroma'], + 'fibroserous': ['fibroserous', 'serofibrous'], + 'fiche': ['chief', 'fiche'], + 'fickleness': ['fickleness', 'fleckiness'], + 'fickly': ['fickly', 'flicky'], + 'fico': ['coif', 'fico', 'foci'], + 'fictional': ['cliftonia', 'fictional'], + 'ficula': ['ficula', 'fulica'], + 'fiddler': ['fiddler', 'flidder'], + 'fidele': ['defile', 'fidele'], + 'fidget': ['fidget', 'gifted'], + 'fidicula': ['fidicula', 'fiducial'], + 'fiducial': ['fidicula', 'fiducial'], + 'fie': ['fei', 'fie', 'ife'], + 'fiedlerite': ['fiedlerite', 'friedelite'], + 'field': ['felid', 'field'], + 'fielded': ['defiled', 'fielded'], + 'fielder': ['defiler', 'fielder'], + 'fieldman': ['fieldman', 'inflamed'], + 'fiendish': ['fiendish', 'finished'], + 'fient': ['feint', 'fient'], + 'fiery': ['fiery', 'reify'], + 'fife': ['feif', 'fife'], + 'fifteener': ['fifteener', 'teneriffe'], + 'fifty': ['fifty', 'tiffy'], + 'fig': ['fig', 'gif'], + 'fighter': ['fighter', 'freight', 'refight'], + 'figurate': ['figurate', 'fruitage'], + 'fike': ['efik', 'fike'], + 'filace': ['facile', 'filace'], + 'filago': ['filago', 'gifola'], + 'filao': ['filao', 'folia'], + 'filar': ['filar', 'flair', 'frail'], + 'filate': ['fetial', 'filate', 'lafite', 'leafit'], + 'file': ['feil', 'file', 'leif', 'lief', 'life'], + 'filelike': ['filelike', 'lifelike'], + 'filer': ['filer', 'flier', 'lifer', 'rifle'], + 'filet': ['filet', 'flite'], + 'fillable': ['fallible', 'fillable'], + 'filler': ['filler', 'refill'], + 'filo': ['filo', 'foil', 'lifo'], + 'filter': ['fertil', 'filter', 'lifter', 'relift', 'trifle'], + 'filterer': ['filterer', 'refilter'], + 'filthless': ['filthless', 'shelflist'], + 'filtrable': ['filtrable', 'flirtable'], + 'filtration': ['filtration', 'flirtation'], + 'finale': ['afenil', 'finale'], + 'finder': ['finder', 'friend', 'redfin', 'refind'], + 'findfault': ['faultfind', 'findfault'], + 'fine': ['enif', 'fine', 'neif', 'nife'], + 'finely': ['finely', 'lenify'], + 'finer': ['finer', 'infer'], + 'finesser': ['finesser', 'rifeness'], + 'fingall': ['falling', 'fingall'], + 'finger': ['finger', 'fringe'], + 'fingerer': ['fingerer', 'refinger'], + 'fingerflower': ['fingerflower', 'fringeflower'], + 'fingerless': ['fingerless', 'fringeless'], + 'fingerlet': ['fingerlet', 'fringelet'], + 'fingu': ['fingu', 'fungi'], + 'finical': ['finical', 'lanific'], + 'finished': ['fiendish', 'finished'], + 'finisher': ['finisher', 'refinish'], + 'finitely': ['felinity', 'finitely'], + 'finkel': ['elfkin', 'finkel'], + 'finlet': ['finlet', 'infelt'], + 'finner': ['finner', 'infern'], + 'firca': ['afric', 'firca'], + 'fire': ['fire', 'reif', 'rife'], + 'fireable': ['afebrile', 'balefire', 'fireable'], + 'firearm': ['firearm', 'marfire'], + 'fireback': ['backfire', 'fireback'], + 'fireburn': ['burnfire', 'fireburn'], + 'fired': ['fired', 'fried'], + 'fireplug': ['fireplug', 'gripeful'], + 'firer': ['ferri', 'firer', 'freir', 'frier'], + 'fireshaft': ['fireshaft', 'tasheriff'], + 'firestone': ['firestone', 'forestine'], + 'firetop': ['firetop', 'potifer'], + 'firm': ['firm', 'frim'], + 'first': ['first', 'frist'], + 'firth': ['firth', 'frith'], + 'fise': ['feis', 'fise', 'sife'], + 'fishbone': ['bonefish', 'fishbone'], + 'fisheater': ['fisheater', 'sherifate'], + 'fisher': ['fisher', 'sherif'], + 'fishery': ['fishery', 'sherify'], + 'fishet': ['fetish', 'fishet'], + 'fishfall': ['fallfish', 'fishfall'], + 'fishlet': ['fishlet', 'leftish'], + 'fishpond': ['fishpond', 'pondfish'], + 'fishpool': ['fishpool', 'foolship'], + 'fishwood': ['fishwood', 'woodfish'], + 'fissury': ['fissury', 'russify'], + 'fist': ['fist', 'sift'], + 'fisted': ['fisted', 'sifted'], + 'fister': ['fister', 'resift', 'sifter', 'strife'], + 'fisting': ['fisting', 'sifting'], + 'fitout': ['fitout', 'outfit'], + 'fitter': ['fitter', 'tifter'], + 'fixer': ['fixer', 'refix'], + 'flageolet': ['flageolet', 'folletage'], + 'flair': ['filar', 'flair', 'frail'], + 'flamant': ['flamant', 'flatman'], + 'flame': ['flame', 'fleam'], + 'flamed': ['flamed', 'malfed'], + 'flandowser': ['flandowser', 'sandflower'], + 'flange': ['fangle', 'flange'], + 'flare': ['feral', 'flare'], + 'flaser': ['falser', 'flaser'], + 'flasher': ['flasher', 'reflash'], + 'flatboat': ['faltboat', 'flatboat'], + 'flatman': ['flamant', 'flatman'], + 'flatwise': ['flatwise', 'saltwife'], + 'flaunt': ['flaunt', 'unflat'], + 'flax': ['falx', 'flax'], + 'flea': ['alef', 'feal', 'flea', 'leaf'], + 'fleam': ['flame', 'fleam'], + 'fleay': ['fleay', 'leafy'], + 'fleche': ['fleche', 'fleech'], + 'flecker': ['flecker', 'freckle'], + 'fleckiness': ['fickleness', 'fleckiness'], + 'flecky': ['feckly', 'flecky'], + 'fled': ['delf', 'fled'], + 'flee': ['feel', 'flee'], + 'fleech': ['fleche', 'fleech'], + 'fleer': ['fleer', 'refel'], + 'flemish': ['flemish', 'himself'], + 'flenser': ['flenser', 'fresnel'], + 'flesh': ['flesh', 'shelf'], + 'fleshed': ['deflesh', 'fleshed'], + 'fleshen': ['enflesh', 'fleshen'], + 'flesher': ['flesher', 'herself'], + 'fleshful': ['fleshful', 'shelfful'], + 'fleshiness': ['elfishness', 'fleshiness'], + 'fleshy': ['fleshy', 'shelfy'], + 'flet': ['felt', 'flet', 'left'], + 'fleta': ['aleft', 'alfet', 'fetal', 'fleta'], + 'fleuret': ['fleuret', 'treeful'], + 'flew': ['flew', 'welf'], + 'flexed': ['deflex', 'flexed'], + 'flexured': ['flexured', 'refluxed'], + 'flicky': ['fickly', 'flicky'], + 'flidder': ['fiddler', 'flidder'], + 'flier': ['filer', 'flier', 'lifer', 'rifle'], + 'fligger': ['fligger', 'friggle'], + 'flinger': ['ferling', 'flinger', 'refling'], + 'flingy': ['flingy', 'flying'], + 'flirtable': ['filtrable', 'flirtable'], + 'flirtation': ['filtration', 'flirtation'], + 'flirter': ['flirter', 'trifler'], + 'flirting': ['flirting', 'trifling'], + 'flirtingly': ['flirtingly', 'triflingly'], + 'flit': ['flit', 'lift'], + 'flite': ['filet', 'flite'], + 'fliting': ['fliting', 'lifting'], + 'flitter': ['flitter', 'triflet'], + 'flo': ['flo', 'lof'], + 'float': ['aloft', 'float', 'flota'], + 'floater': ['floater', 'florate', 'refloat'], + 'flobby': ['bobfly', 'flobby'], + 'flodge': ['flodge', 'fodgel'], + 'floe': ['floe', 'fole'], + 'flog': ['flog', 'golf'], + 'flogger': ['flogger', 'frogleg'], + 'floodable': ['bloodleaf', 'floodable'], + 'flooder': ['flooder', 'reflood'], + 'floodwater': ['floodwater', 'toadflower', 'waterflood'], + 'floorer': ['floorer', 'refloor'], + 'florate': ['floater', 'florate', 'refloat'], + 'florentine': ['florentine', 'nonfertile'], + 'floret': ['floret', 'forlet', 'lofter', 'torfel'], + 'floria': ['floria', 'foliar'], + 'floriate': ['floriate', 'foralite'], + 'florican': ['florican', 'fornical'], + 'floridan': ['floridan', 'florinda'], + 'florinda': ['floridan', 'florinda'], + 'flot': ['flot', 'loft'], + 'flota': ['aloft', 'float', 'flota'], + 'flounder': ['flounder', 'reunfold', 'unfolder'], + 'flour': ['flour', 'fluor'], + 'flourisher': ['flourisher', 'reflourish'], + 'flouting': ['flouting', 'outfling'], + 'flow': ['flow', 'fowl', 'wolf'], + 'flower': ['flower', 'fowler', 'reflow', 'wolfer'], + 'flowered': ['deflower', 'flowered'], + 'flowerer': ['flowerer', 'reflower'], + 'flowery': ['flowery', 'fowlery'], + 'flowing': ['flowing', 'fowling'], + 'floyd': ['floyd', 'foldy'], + 'fluavil': ['fluavil', 'fluvial', 'vialful'], + 'flucan': ['canful', 'flucan'], + 'fluctuant': ['fluctuant', 'untactful'], + 'flue': ['flue', 'fuel'], + 'fluent': ['fluent', 'netful', 'unfelt', 'unleft'], + 'fluidly': ['dullify', 'fluidly'], + 'flukewort': ['flukewort', 'flutework'], + 'fluor': ['flour', 'fluor'], + 'fluorate': ['fluorate', 'outflare'], + 'fluorinate': ['antifouler', 'fluorinate', 'uniflorate'], + 'fluorine': ['fluorine', 'neurofil'], + 'fluorobenzene': ['benzofluorene', 'fluorobenzene'], + 'flusher': ['flusher', 'reflush'], + 'flushing': ['flushing', 'lungfish'], + 'fluster': ['fluster', 'restful'], + 'flustra': ['flustra', 'starful'], + 'flutework': ['flukewort', 'flutework'], + 'flutina': ['falutin', 'flutina'], + 'fluvial': ['fluavil', 'fluvial', 'vialful'], + 'fluxer': ['fluxer', 'reflux'], + 'flyblow': ['blowfly', 'flyblow'], + 'flyer': ['ferly', 'flyer', 'refly'], + 'flying': ['flingy', 'flying'], + 'fo': ['fo', 'of'], + 'foal': ['foal', 'loaf', 'olaf'], + 'foamer': ['femora', 'foamer'], + 'focal': ['falco', 'focal'], + 'foci': ['coif', 'fico', 'foci'], + 'focuser': ['focuser', 'refocus'], + 'fodge': ['defog', 'fodge'], + 'fodgel': ['flodge', 'fodgel'], + 'fogeater': ['fogeater', 'foregate'], + 'fogo': ['fogo', 'goof'], + 'foil': ['filo', 'foil', 'lifo'], + 'foister': ['foister', 'forties'], + 'folden': ['enfold', 'folden', 'fondle'], + 'folder': ['folder', 'refold'], + 'foldy': ['floyd', 'foldy'], + 'fole': ['floe', 'fole'], + 'folia': ['filao', 'folia'], + 'foliar': ['floria', 'foliar'], + 'foliature': ['foliature', 'toluifera'], + 'folletage': ['flageolet', 'folletage'], + 'fomenter': ['fomenter', 'refoment'], + 'fondle': ['enfold', 'folden', 'fondle'], + 'fondu': ['fondu', 'found'], + 'foo': ['foo', 'ofo'], + 'fool': ['fool', 'loof', 'olof'], + 'foolship': ['fishpool', 'foolship'], + 'footer': ['footer', 'refoot'], + 'foothot': ['foothot', 'hotfoot'], + 'footler': ['footler', 'rooflet'], + 'footpad': ['footpad', 'padfoot'], + 'footrail': ['farolito', 'footrail'], + 'foots': ['foots', 'sfoot', 'stoof'], + 'footsore': ['footsore', 'sorefoot'], + 'foppish': ['foppish', 'fopship'], + 'fopship': ['foppish', 'fopship'], + 'for': ['for', 'fro', 'orf'], + 'fora': ['faro', 'fora'], + 'foralite': ['floriate', 'foralite'], + 'foramen': ['foramen', 'foreman'], + 'forcemeat': ['aftercome', 'forcemeat'], + 'forcement': ['coferment', 'forcement'], + 'fore': ['fore', 'froe', 'ofer'], + 'forecast': ['cofaster', 'forecast'], + 'forecaster': ['forecaster', 'reforecast'], + 'forecover': ['forecover', 'overforce'], + 'foreday': ['feodary', 'foreday'], + 'forefit': ['forefit', 'forfeit'], + 'foregate': ['fogeater', 'foregate'], + 'foregirth': ['foregirth', 'foreright'], + 'forego': ['forego', 'goofer'], + 'forel': ['forel', 'rolfe'], + 'forelive': ['forelive', 'overfile'], + 'foreman': ['foramen', 'foreman'], + 'foremean': ['foremean', 'forename'], + 'forename': ['foremean', 'forename'], + 'forensal': ['farnesol', 'forensal'], + 'forensic': ['forensic', 'forinsec'], + 'forepart': ['forepart', 'prefator'], + 'foreright': ['foregirth', 'foreright'], + 'foresend': ['defensor', 'foresend'], + 'foresign': ['foresign', 'foresing'], + 'foresin': ['ferison', 'foresin'], + 'foresing': ['foresign', 'foresing'], + 'forest': ['forest', 'forset', 'foster'], + 'forestage': ['forestage', 'fosterage'], + 'forestal': ['astrofel', 'forestal'], + 'forestate': ['forestate', 'foretaste'], + 'forested': ['deforest', 'forested'], + 'forestem': ['forestem', 'fretsome'], + 'forester': ['forester', 'fosterer', 'reforest'], + 'forestine': ['firestone', 'forestine'], + 'foretaste': ['forestate', 'foretaste'], + 'foreutter': ['foreutter', 'outferret'], + 'forfeit': ['forefit', 'forfeit'], + 'forfeiter': ['forfeiter', 'reforfeit'], + 'forgeman': ['forgeman', 'formagen'], + 'forinsec': ['forensic', 'forinsec'], + 'forint': ['forint', 'fortin'], + 'forlet': ['floret', 'forlet', 'lofter', 'torfel'], + 'form': ['form', 'from'], + 'formagen': ['forgeman', 'formagen'], + 'formalin': ['formalin', 'informal', 'laniform'], + 'formally': ['formally', 'formylal'], + 'formed': ['deform', 'formed'], + 'former': ['former', 'reform'], + 'formica': ['aciform', 'formica'], + 'formicina': ['aciniform', 'formicina'], + 'formicoidea': ['aecidioform', 'formicoidea'], + 'formin': ['formin', 'inform'], + 'forminate': ['forminate', 'fremontia', 'taeniform'], + 'formulae': ['formulae', 'fumarole'], + 'formulaic': ['cauliform', 'formulaic', 'fumarolic'], + 'formulate': ['feulamort', 'formulate'], + 'formulator': ['formulator', 'torulaform'], + 'formylal': ['formally', 'formylal'], + 'fornical': ['florican', 'fornical'], + 'fornicated': ['deforciant', 'fornicated'], + 'forpit': ['forpit', 'profit'], + 'forritsome': ['forritsome', 'ostreiform'], + 'forrue': ['forrue', 'fourer', 'fourre', 'furore'], + 'forset': ['forest', 'forset', 'foster'], + 'forst': ['forst', 'frost'], + 'fort': ['fort', 'frot'], + 'forte': ['fetor', 'forte', 'ofter'], + 'forth': ['forth', 'froth'], + 'forthcome': ['forthcome', 'homecroft'], + 'forthy': ['forthy', 'frothy'], + 'forties': ['foister', 'forties'], + 'fortin': ['forint', 'fortin'], + 'forward': ['forward', 'froward'], + 'forwarder': ['forwarder', 'reforward'], + 'forwardly': ['forwardly', 'frowardly'], + 'forwardness': ['forwardness', 'frowardness'], + 'foster': ['forest', 'forset', 'foster'], + 'fosterage': ['forestage', 'fosterage'], + 'fosterer': ['forester', 'fosterer', 'reforest'], + 'fot': ['fot', 'oft'], + 'fou': ['fou', 'ouf'], + 'fouler': ['fouler', 'furole'], + 'found': ['fondu', 'found'], + 'foundationer': ['foundationer', 'refoundation'], + 'founder': ['founder', 'refound'], + 'foundling': ['foundling', 'unfolding'], + 'fourble': ['beflour', 'fourble'], + 'fourer': ['forrue', 'fourer', 'fourre', 'furore'], + 'fourre': ['forrue', 'fourer', 'fourre', 'furore'], + 'fowl': ['flow', 'fowl', 'wolf'], + 'fowler': ['flower', 'fowler', 'reflow', 'wolfer'], + 'fowlery': ['flowery', 'fowlery'], + 'fowling': ['flowing', 'fowling'], + 'fra': ['far', 'fra'], + 'frache': ['chafer', 'frache'], + 'frae': ['fare', 'fear', 'frae', 'rafe'], + 'fraghan': ['fraghan', 'harfang'], + 'fraid': ['fiard', 'fraid'], + 'fraik': ['fakir', 'fraik', 'kafir', 'rafik'], + 'frail': ['filar', 'flair', 'frail'], + 'fraiser': ['fraiser', 'frasier'], + 'fram': ['farm', 'fram'], + 'framable': ['farmable', 'framable'], + 'frame': ['frame', 'fream'], + 'framer': ['farmer', 'framer'], + 'framing': ['farming', 'framing'], + 'frangi': ['faring', 'frangi'], + 'frantic': ['frantic', 'infarct', 'infract'], + 'frase': ['farse', 'frase'], + 'frasier': ['fraiser', 'frasier'], + 'frat': ['frat', 'raft'], + 'fratcheous': ['fratcheous', 'housecraft'], + 'frater': ['frater', 'rafter'], + 'frayed': ['defray', 'frayed'], + 'freak': ['faker', 'freak'], + 'freaky': ['fakery', 'freaky'], + 'fream': ['frame', 'fream'], + 'freath': ['father', 'freath', 'hafter'], + 'freckle': ['flecker', 'freckle'], + 'free': ['feer', 'free', 'reef'], + 'freed': ['defer', 'freed'], + 'freeing': ['feering', 'feigner', 'freeing', 'reefing', 'refeign'], + 'freeman': ['enframe', 'freeman'], + 'freer': ['freer', 'refer'], + 'fregata': ['fregata', 'raftage'], + 'fregatae': ['afterage', 'fregatae'], + 'freight': ['fighter', 'freight', 'refight'], + 'freir': ['ferri', 'firer', 'freir', 'frier'], + 'freit': ['freit', 'refit'], + 'freity': ['ferity', 'freity'], + 'fremontia': ['forminate', 'fremontia', 'taeniform'], + 'frenetic': ['frenetic', 'infecter', 'reinfect'], + 'freshener': ['freshener', 'refreshen'], + 'fresnel': ['flenser', 'fresnel'], + 'fret': ['fret', 'reft', 'tref'], + 'fretful': ['fretful', 'truffle'], + 'fretsome': ['forestem', 'fretsome'], + 'frette': ['fetter', 'frette'], + 'freya': ['faery', 'freya'], + 'freyr': ['ferry', 'freyr', 'fryer'], + 'fried': ['fired', 'fried'], + 'friedelite': ['fiedlerite', 'friedelite'], + 'friend': ['finder', 'friend', 'redfin', 'refind'], + 'frier': ['ferri', 'firer', 'freir', 'frier'], + 'friesic': ['friesic', 'serific'], + 'friggle': ['fligger', 'friggle'], + 'frightener': ['frightener', 'refrighten'], + 'frigolabile': ['frigolabile', 'glorifiable'], + 'frike': ['frike', 'kefir'], + 'frim': ['firm', 'frim'], + 'fringe': ['finger', 'fringe'], + 'fringeflower': ['fingerflower', 'fringeflower'], + 'fringeless': ['fingerless', 'fringeless'], + 'fringelet': ['fingerlet', 'fringelet'], + 'frist': ['first', 'frist'], + 'frit': ['frit', 'rift'], + 'frith': ['firth', 'frith'], + 'friulian': ['friulian', 'unifilar'], + 'fro': ['for', 'fro', 'orf'], + 'froe': ['fore', 'froe', 'ofer'], + 'frogleg': ['flogger', 'frogleg'], + 'from': ['form', 'from'], + 'fronter': ['fronter', 'refront'], + 'frontonasal': ['frontonasal', 'nasofrontal'], + 'frontooccipital': ['frontooccipital', 'occipitofrontal'], + 'frontoorbital': ['frontoorbital', 'orbitofrontal'], + 'frontoparietal': ['frontoparietal', 'parietofrontal'], + 'frontotemporal': ['frontotemporal', 'temporofrontal'], + 'frontpiece': ['frontpiece', 'perfection'], + 'frost': ['forst', 'frost'], + 'frosted': ['defrost', 'frosted'], + 'frot': ['fort', 'frot'], + 'froth': ['forth', 'froth'], + 'frothy': ['forthy', 'frothy'], + 'froward': ['forward', 'froward'], + 'frowardly': ['forwardly', 'frowardly'], + 'frowardness': ['forwardness', 'frowardness'], + 'fruitage': ['figurate', 'fruitage'], + 'fruitless': ['fruitless', 'resistful'], + 'frush': ['frush', 'shurf'], + 'frustule': ['frustule', 'sulfuret'], + 'fruticulose': ['fruticulose', 'luctiferous'], + 'fryer': ['ferry', 'freyr', 'fryer'], + 'fucales': ['caseful', 'fucales'], + 'fucate': ['faucet', 'fucate'], + 'fuel': ['flue', 'fuel'], + 'fueler': ['ferule', 'fueler', 'refuel'], + 'fuerte': ['fuerte', 'refute'], + 'fuirena': ['fuirena', 'unafire'], + 'fulcrate': ['crateful', 'fulcrate'], + 'fulica': ['ficula', 'fulica'], + 'fulmar': ['armful', 'fulmar'], + 'fulminatory': ['fulminatory', 'unformality'], + 'fulminous': ['fulminous', 'sulfonium'], + 'fulwa': ['awful', 'fulwa'], + 'fumarole': ['formulae', 'fumarole'], + 'fumarolic': ['cauliform', 'formulaic', 'fumarolic'], + 'fumble': ['beflum', 'fumble'], + 'fumer': ['femur', 'fumer'], + 'fundable': ['fundable', 'unfabled'], + 'funder': ['funder', 'refund'], + 'funebrial': ['funebrial', 'unfriable'], + 'funeral': ['earnful', 'funeral'], + 'fungal': ['fungal', 'unflag'], + 'fungi': ['fingu', 'fungi'], + 'funori': ['funori', 'furoin'], + 'fur': ['fur', 'urf'], + 'fural': ['alfur', 'fural'], + 'furan': ['furan', 'unfar'], + 'furbish': ['burfish', 'furbish'], + 'furbisher': ['furbisher', 'refurbish'], + 'furcal': ['carful', 'furcal'], + 'furcate': ['facture', 'furcate'], + 'furler': ['furler', 'refurl'], + 'furnish': ['furnish', 'runfish'], + 'furnisher': ['furnisher', 'refurnish'], + 'furoin': ['funori', 'furoin'], + 'furole': ['fouler', 'furole'], + 'furore': ['forrue', 'fourer', 'fourre', 'furore'], + 'furstone': ['furstone', 'unforest'], + 'fusate': ['estufa', 'fusate'], + 'fusteric': ['fusteric', 'scutifer'], + 'fustian': ['faunist', 'fustian', 'infaust'], + 'gab': ['bag', 'gab'], + 'gabbler': ['gabbler', 'grabble'], + 'gabe': ['egba', 'gabe'], + 'gabelle': ['gabelle', 'gelable'], + 'gabelled': ['gabelled', 'geldable'], + 'gabi': ['agib', 'biga', 'gabi'], + 'gabion': ['bagnio', 'gabion', 'gobian'], + 'gabioned': ['badigeon', 'gabioned'], + 'gable': ['bagel', 'belga', 'gable', 'gleba'], + 'gablock': ['backlog', 'gablock'], + 'gaboon': ['abongo', 'gaboon'], + 'gad': ['dag', 'gad'], + 'gadaba': ['badaga', 'dagaba', 'gadaba'], + 'gadder': ['gadder', 'graded'], + 'gaddi': ['gaddi', 'gadid'], + 'gade': ['aged', 'egad', 'gade'], + 'gadger': ['dagger', 'gadger', 'ragged'], + 'gadget': ['gadget', 'tagged'], + 'gadid': ['gaddi', 'gadid'], + 'gadinine': ['gadinine', 'indigena'], + 'gadolinite': ['deligation', 'gadolinite', 'gelatinoid'], + 'gadroon': ['dragoon', 'gadroon'], + 'gadroonage': ['dragoonage', 'gadroonage'], + 'gaduin': ['anguid', 'gaduin'], + 'gael': ['gael', 'gale', 'geal'], + 'gaen': ['agen', 'gaen', 'gane', 'gean', 'gena'], + 'gaet': ['gaet', 'gate', 'geat', 'geta'], + 'gaetulan': ['angulate', 'gaetulan'], + 'gager': ['agger', 'gager', 'regga'], + 'gahnite': ['gahnite', 'heating'], + 'gahrwali': ['gahrwali', 'garhwali'], + 'gaiassa': ['assagai', 'gaiassa'], + 'gail': ['gail', 'gali', 'gila', 'glia'], + 'gain': ['gain', 'inga', 'naig', 'ngai'], + 'gaincall': ['gaincall', 'gallican'], + 'gaine': ['angie', 'gaine'], + 'gainer': ['arenig', 'earing', 'gainer', 'reagin', 'regain'], + 'gainless': ['gainless', 'glassine'], + 'gainly': ['gainly', 'laying'], + 'gainsayer': ['asynergia', 'gainsayer'], + 'gainset': ['easting', + 'gainset', + 'genista', + 'ingesta', + 'seating', + 'signate', + 'teasing'], + 'gainstrive': ['gainstrive', 'vinegarist'], + 'gainturn': ['gainturn', 'naturing'], + 'gaiter': ['gaiter', 'tairge', 'triage'], + 'gaize': ['gaize', 'ziega'], + 'gaj': ['gaj', 'jag'], + 'gal': ['gal', 'lag'], + 'gala': ['agal', 'agla', 'alga', 'gala'], + 'galactonic': ['cognatical', 'galactonic'], + 'galatae': ['galatae', 'galatea'], + 'galatea': ['galatae', 'galatea'], + 'gale': ['gael', 'gale', 'geal'], + 'galea': ['algae', 'galea'], + 'galee': ['aegle', 'eagle', 'galee'], + 'galei': ['agiel', 'agile', 'galei'], + 'galeid': ['algedi', 'galeid'], + 'galen': ['agnel', 'angel', 'angle', 'galen', 'genal', 'glean', 'lagen'], + 'galena': ['alnage', 'angela', 'galena', 'lagena'], + 'galenian': ['alangine', 'angelina', 'galenian'], + 'galenic': ['angelic', 'galenic'], + 'galenical': ['angelical', 'englacial', 'galenical'], + 'galenist': ['galenist', 'genitals', 'stealing'], + 'galenite': ['galenite', 'legatine'], + 'galeoid': ['galeoid', 'geoidal'], + 'galera': ['aglare', 'alegar', 'galera', 'laager'], + 'galet': ['aglet', 'galet'], + 'galewort': ['galewort', 'waterlog'], + 'galey': ['agley', 'galey'], + 'galga': ['galga', 'glaga'], + 'gali': ['gail', 'gali', 'gila', 'glia'], + 'galidia': ['agialid', 'galidia'], + 'galik': ['galik', 'glaik'], + 'galilean': ['galilean', 'gallinae'], + 'galiot': ['galiot', 'latigo'], + 'galla': ['algal', 'galla'], + 'gallate': ['gallate', 'tallage'], + 'gallein': ['gallein', 'galline', 'nigella'], + 'galleria': ['allergia', 'galleria'], + 'gallery': ['allergy', 'gallery', 'largely', 'regally'], + 'galli': ['galli', 'glial'], + 'gallican': ['gaincall', 'gallican'], + 'gallicole': ['collegial', 'gallicole'], + 'gallinae': ['galilean', 'gallinae'], + 'galline': ['gallein', 'galline', 'nigella'], + 'gallnut': ['gallnut', 'nutgall'], + 'galloper': ['galloper', 'regallop'], + 'gallotannate': ['gallotannate', 'tannogallate'], + 'gallotannic': ['gallotannic', 'tannogallic'], + 'gallstone': ['gallstone', 'stonegall'], + 'gallybagger': ['gallybagger', 'gallybeggar'], + 'gallybeggar': ['gallybagger', 'gallybeggar'], + 'galore': ['galore', 'gaoler'], + 'galtonia': ['galtonia', 'notalgia'], + 'galvanopsychic': ['galvanopsychic', 'psychogalvanic'], + 'galvanothermometer': ['galvanothermometer', 'thermogalvanometer'], + 'gam': ['gam', 'mag'], + 'gamaliel': ['gamaliel', 'melalgia'], + 'gamashes': ['gamashes', 'smashage'], + 'gamasid': ['gamasid', 'magadis'], + 'gambado': ['dagomba', 'gambado'], + 'gambier': ['gambier', 'imbarge'], + 'gambler': ['gambler', 'gambrel'], + 'gambrel': ['gambler', 'gambrel'], + 'game': ['egma', 'game', 'mage'], + 'gamely': ['gamely', 'gleamy', 'mygale'], + 'gamene': ['gamene', 'manege', 'menage'], + 'gamete': ['gamete', 'metage'], + 'gametogenic': ['gametogenic', 'gamogenetic', 'geomagnetic'], + 'gamic': ['gamic', 'magic'], + 'gamin': ['gamin', 'mangi'], + 'gaming': ['gaming', 'gigman'], + 'gamma': ['gamma', 'magma'], + 'gammer': ['gammer', 'gramme'], + 'gamogenetic': ['gametogenic', 'gamogenetic', 'geomagnetic'], + 'gamori': ['gamori', 'gomari', 'gromia'], + 'gan': ['gan', 'nag'], + 'ganam': ['amang', 'ganam', 'manga'], + 'ganch': ['chang', 'ganch'], + 'gander': ['danger', 'gander', 'garden', 'ranged'], + 'gandul': ['gandul', 'unglad'], + 'gane': ['agen', 'gaen', 'gane', 'gean', 'gena'], + 'gangan': ['gangan', 'nagnag'], + 'ganger': ['ganger', 'grange', 'nagger'], + 'ganging': ['ganging', 'nagging'], + 'gangism': ['gangism', 'gigsman'], + 'ganglioneuron': ['ganglioneuron', 'neuroganglion'], + 'gangly': ['gangly', 'naggly'], + 'ganguela': ['ganguela', 'language'], + 'gangway': ['gangway', 'waygang'], + 'ganister': ['astringe', 'ganister', 'gantries'], + 'ganoidal': ['diagonal', 'ganoidal', 'gonadial'], + 'ganoidean': ['ganoidean', 'indogaean'], + 'ganoidian': ['agoniadin', 'anangioid', 'ganoidian'], + 'ganosis': ['agnosis', 'ganosis'], + 'gansel': ['angles', 'gansel'], + 'gant': ['gant', 'gnat', 'tang'], + 'ganta': ['ganta', 'tanga'], + 'ganton': ['ganton', 'tongan'], + 'gantries': ['astringe', 'ganister', 'gantries'], + 'gantry': ['gantry', 'gyrant'], + 'ganymede': ['ganymede', 'megadyne'], + 'ganzie': ['agnize', 'ganzie'], + 'gaol': ['gaol', 'goal', 'gola', 'olga'], + 'gaoler': ['galore', 'gaoler'], + 'gaon': ['agon', 'ango', 'gaon', 'goan', 'gona'], + 'gaonic': ['agonic', 'angico', 'gaonic', 'goniac'], + 'gapa': ['gapa', 'paga'], + 'gape': ['gape', 'page', 'peag', 'pega'], + 'gaper': ['gaper', 'grape', 'pager', 'parge'], + 'gar': ['gar', 'gra', 'rag'], + 'gara': ['agar', 'agra', 'gara', 'raga'], + 'garamond': ['dragoman', 'garamond', 'ondagram'], + 'garance': ['carnage', 'cranage', 'garance'], + 'garb': ['brag', 'garb', 'grab'], + 'garbel': ['garbel', 'garble'], + 'garble': ['garbel', 'garble'], + 'garbless': ['bragless', 'garbless'], + 'garce': ['cager', 'garce', 'grace'], + 'garcinia': ['agaricin', 'garcinia'], + 'gardeen': ['derange', 'enraged', 'gardeen', 'gerenda', 'grandee', 'grenade'], + 'garden': ['danger', 'gander', 'garden', 'ranged'], + 'gardened': ['deranged', 'gardened'], + 'gardener': ['deranger', 'gardener'], + 'gardenful': ['dangerful', 'gardenful'], + 'gardenia': ['drainage', 'gardenia'], + 'gardenin': ['gardenin', 'grenadin'], + 'gardenless': ['dangerless', 'gardenless'], + 'gare': ['ager', 'agre', 'gare', 'gear', 'rage'], + 'gareh': ['gareh', 'gerah'], + 'garetta': ['garetta', 'rattage', 'regatta'], + 'garewaite': ['garewaite', 'waiterage'], + 'garfish': ['garfish', 'ragfish'], + 'garget': ['garget', 'tagger'], + 'gargety': ['gargety', 'raggety'], + 'gargle': ['gargle', 'gregal', 'lagger', 'raggle'], + 'garhwali': ['gahrwali', 'garhwali'], + 'garial': ['argali', 'garial'], + 'garle': ['argel', 'ergal', 'garle', 'glare', 'lager', 'large', 'regal'], + 'garment': ['garment', 'margent'], + 'garmenture': ['garmenture', 'reargument'], + 'garn': ['garn', 'gnar', 'rang'], + 'garnel': ['angler', 'arleng', 'garnel', 'largen', 'rangle', 'regnal'], + 'garner': ['garner', 'ranger'], + 'garnet': ['argent', 'garnet', 'garten', 'tanger'], + 'garneter': ['argenter', 'garneter'], + 'garnetiferous': ['argentiferous', 'garnetiferous'], + 'garnets': ['angster', 'garnets', 'nagster', 'strange'], + 'garnett': ['garnett', 'gnatter', 'gratten', 'tergant'], + 'garnice': ['anergic', 'garnice', 'garniec', 'geranic', 'grecian'], + 'garniec': ['anergic', 'garnice', 'garniec', 'geranic', 'grecian'], + 'garnish': ['garnish', 'rashing'], + 'garnished': ['degarnish', 'garnished'], + 'garnisher': ['garnisher', 'regarnish'], + 'garo': ['argo', 'garo', 'gora'], + 'garran': ['garran', 'ragnar'], + 'garret': ['garret', 'garter', 'grater', 'targer'], + 'garreted': ['garreted', 'gartered'], + 'garroter': ['garroter', 'regrator'], + 'garten': ['argent', 'garnet', 'garten', 'tanger'], + 'garter': ['garret', 'garter', 'grater', 'targer'], + 'gartered': ['garreted', 'gartered'], + 'gartering': ['gartering', 'regrating'], + 'garum': ['garum', 'murga'], + 'gary': ['gary', 'gray'], + 'gas': ['gas', 'sag'], + 'gasan': ['gasan', 'sanga'], + 'gash': ['gash', 'shag'], + 'gasless': ['gasless', 'glasses', 'sagless'], + 'gaslit': ['algist', 'gaslit'], + 'gasoliner': ['gasoliner', 'seignoral'], + 'gasper': ['gasper', 'sparge'], + 'gast': ['gast', 'stag'], + 'gaster': ['gaster', 'stager'], + 'gastrin': ['gastrin', 'staring'], + 'gastroenteritis': ['enterogastritis', 'gastroenteritis'], + 'gastroesophagostomy': ['esophagogastrostomy', 'gastroesophagostomy'], + 'gastrohepatic': ['gastrohepatic', 'hepatogastric'], + 'gastronomic': ['gastronomic', 'monogastric'], + 'gastropathic': ['gastropathic', 'graphostatic'], + 'gastrophrenic': ['gastrophrenic', 'nephrogastric', 'phrenogastric'], + 'gastrular': ['gastrular', 'stragular'], + 'gat': ['gat', 'tag'], + 'gate': ['gaet', 'gate', 'geat', 'geta'], + 'gateman': ['gateman', 'magenta', 'magnate', 'magneta'], + 'gater': ['gater', 'grate', 'great', 'greta', 'retag', 'targe'], + 'gateward': ['drawgate', 'gateward'], + 'gateway': ['gateway', 'getaway', 'waygate'], + 'gatherer': ['gatherer', 'regather'], + 'gator': ['argot', 'gator', 'gotra', 'groat'], + 'gatter': ['gatter', 'target'], + 'gaucho': ['gaucho', 'guacho'], + 'gaufer': ['agrufe', 'gaufer', 'gaufre'], + 'gauffer': ['gauffer', 'gauffre'], + 'gauffre': ['gauffer', 'gauffre'], + 'gaufre': ['agrufe', 'gaufer', 'gaufre'], + 'gaul': ['gaul', 'gula'], + 'gaulin': ['gaulin', 'lingua'], + 'gaulter': ['gaulter', 'tegular'], + 'gaum': ['gaum', 'muga'], + 'gaun': ['gaun', 'guan', 'guna', 'uang'], + 'gaunt': ['gaunt', 'tunga'], + 'gaur': ['gaur', 'guar', 'ruga'], + 'gaura': ['gaura', 'guara'], + 'gaurian': ['anguria', 'gaurian', 'guarani'], + 'gave': ['gave', 'vage', 'vega'], + 'gavyuti': ['gavyuti', 'vaguity'], + 'gaw': ['gaw', 'wag'], + 'gawn': ['gawn', 'gnaw', 'wang'], + 'gay': ['agy', 'gay'], + 'gaz': ['gaz', 'zag'], + 'gazel': ['gazel', 'glaze'], + 'gazer': ['gazer', 'graze'], + 'gazon': ['gazon', 'zogan'], + 'gazy': ['gazy', 'zyga'], + 'geal': ['gael', 'gale', 'geal'], + 'gean': ['agen', 'gaen', 'gane', 'gean', 'gena'], + 'gear': ['ager', 'agre', 'gare', 'gear', 'rage'], + 'geared': ['agreed', 'geared'], + 'gearless': ['eelgrass', 'gearless', 'rageless'], + 'gearman': ['gearman', 'manager'], + 'gearset': ['ergates', 'gearset', 'geaster'], + 'geaster': ['ergates', 'gearset', 'geaster'], + 'geat': ['gaet', 'gate', 'geat', 'geta'], + 'gebur': ['bugre', 'gebur'], + 'ged': ['deg', 'ged'], + 'gedder': ['dredge', 'gedder'], + 'geest': ['egest', 'geest', 'geste'], + 'gegger': ['gegger', 'gregge'], + 'geheimrat': ['geheimrat', 'hermitage'], + 'gein': ['gein', 'gien'], + 'geira': ['geira', 'regia'], + 'geison': ['geison', 'isogen'], + 'geissospermine': ['geissospermine', 'spermiogenesis'], + 'gel': ['gel', 'leg'], + 'gelable': ['gabelle', 'gelable'], + 'gelasian': ['anglaise', 'gelasian'], + 'gelastic': ['gelastic', 'gestical'], + 'gelatin': ['atingle', 'gelatin', 'genital', 'langite', 'telinga'], + 'gelatinate': ['gelatinate', 'nagatelite'], + 'gelatined': ['delignate', 'gelatined'], + 'gelatinizer': ['gelatinizer', 'integralize'], + 'gelatinoid': ['deligation', 'gadolinite', 'gelatinoid'], + 'gelation': ['gelation', 'lagonite', 'legation'], + 'gelatose': ['gelatose', 'segolate'], + 'geldable': ['gabelled', 'geldable'], + 'gelder': ['gelder', 'ledger', 'redleg'], + 'gelding': ['gelding', 'ledging'], + 'gelid': ['gelid', 'glide'], + 'gelidness': ['gelidness', 'glideness'], + 'gelosin': ['gelosin', 'lignose'], + 'gem': ['gem', 'meg'], + 'gemara': ['gemara', 'ramage'], + 'gemaric': ['gemaric', 'grimace', 'megaric'], + 'gemarist': ['gemarist', 'magister', 'sterigma'], + 'gematria': ['gematria', 'maritage'], + 'gemul': ['gemul', 'glume'], + 'gena': ['agen', 'gaen', 'gane', 'gean', 'gena'], + 'genal': ['agnel', 'angel', 'angle', 'galen', 'genal', 'glean', 'lagen'], + 'genarch': ['changer', 'genarch'], + 'gendarme': ['edgerman', 'gendarme'], + 'genear': ['egeran', 'enrage', 'ergane', 'genear', 'genera'], + 'geneat': ['geneat', 'negate', 'tegean'], + 'genera': ['egeran', 'enrage', 'ergane', 'genear', 'genera'], + 'generable': ['generable', 'greenable'], + 'general': ['enlarge', 'general', 'gleaner'], + 'generalist': ['easterling', 'generalist'], + 'generall': ['allergen', 'generall'], + 'generation': ['generation', 'renegation'], + 'generic': ['energic', 'generic'], + 'generical': ['energical', 'generical'], + 'genesiac': ['agenesic', 'genesiac'], + 'genesial': ['ensilage', 'genesial', 'signalee'], + 'genetical': ['clientage', 'genetical'], + 'genetta': ['genetta', 'tentage'], + 'geneura': ['geneura', 'uneager'], + 'geneva': ['avenge', 'geneva', 'vangee'], + 'genial': ['algine', 'genial', 'linage'], + 'genicular': ['genicular', 'neuralgic'], + 'genie': ['eigne', 'genie'], + 'genion': ['genion', 'inogen'], + 'genipa': ['genipa', 'piegan'], + 'genista': ['easting', + 'gainset', + 'genista', + 'ingesta', + 'seating', + 'signate', + 'teasing'], + 'genistein': ['genistein', 'gentisein'], + 'genital': ['atingle', 'gelatin', 'genital', 'langite', 'telinga'], + 'genitals': ['galenist', 'genitals', 'stealing'], + 'genitival': ['genitival', 'vigilante'], + 'genitocrural': ['crurogenital', 'genitocrural'], + 'genitor': ['ergotin', 'genitor', 'negrito', 'ogtiern', 'trigone'], + 'genitorial': ['genitorial', 'religation'], + 'genitory': ['genitory', 'ortygine'], + 'genitourinary': ['genitourinary', 'urinogenitary'], + 'geniture': ['geniture', 'guerinet'], + 'genizero': ['genizero', 'negroize'], + 'genoa': ['agone', 'genoa'], + 'genoblastic': ['blastogenic', 'genoblastic'], + 'genocidal': ['algedonic', 'genocidal'], + 'genom': ['genom', 'gnome'], + 'genotypical': ['genotypical', 'ptyalogenic'], + 'genre': ['genre', 'green', 'neger', 'reneg'], + 'genro': ['ergon', 'genro', 'goner', 'negro'], + 'gent': ['gent', 'teng'], + 'gentes': ['gentes', 'gesten'], + 'genthite': ['genthite', 'teething'], + 'gentian': ['antigen', 'gentian'], + 'gentianic': ['antigenic', 'gentianic'], + 'gentisein': ['genistein', 'gentisein'], + 'gentle': ['gentle', 'telegn'], + 'gentrice': ['erecting', 'gentrice'], + 'genua': ['augen', 'genua'], + 'genual': ['genual', 'leguan'], + 'genuine': ['genuine', 'ingenue'], + 'genus': ['genus', 'negus'], + 'geo': ['ego', 'geo'], + 'geocentric': ['ectrogenic', 'egocentric', 'geocentric'], + 'geocratic': ['categoric', 'geocratic'], + 'geocronite': ['erotogenic', 'geocronite', 'orogenetic'], + 'geodal': ['algedo', 'geodal'], + 'geode': ['geode', 'ogeed'], + 'geodiatropism': ['diageotropism', 'geodiatropism'], + 'geoduck': ['geoduck', 'goeduck'], + 'geohydrology': ['geohydrology', 'hydrogeology'], + 'geoid': ['diego', 'dogie', 'geoid'], + 'geoidal': ['galeoid', 'geoidal'], + 'geoisotherm': ['geoisotherm', 'isogeotherm'], + 'geomagnetic': ['gametogenic', 'gamogenetic', 'geomagnetic'], + 'geomant': ['geomant', 'magneto', 'megaton', 'montage'], + 'geomantic': ['atmogenic', 'geomantic'], + 'geometrical': ['geometrical', 'glaciometer'], + 'geometrina': ['angiometer', 'ergotamine', 'geometrina'], + 'geon': ['geon', 'gone'], + 'geonim': ['geonim', 'imogen'], + 'georama': ['georama', 'roamage'], + 'geotectonic': ['geotectonic', 'tocogenetic'], + 'geotic': ['geotic', 'goetic'], + 'geotical': ['ectoglia', 'geotical', 'goetical'], + 'geotonic': ['geotonic', 'otogenic'], + 'geoty': ['geoty', 'goety'], + 'ger': ['erg', 'ger', 'reg'], + 'gerah': ['gareh', 'gerah'], + 'geraldine': ['engrailed', 'geraldine'], + 'geranial': ['algerian', 'geranial', 'regalian'], + 'geranic': ['anergic', 'garnice', 'garniec', 'geranic', 'grecian'], + 'geraniol': ['geraniol', 'regional'], + 'geranomorph': ['geranomorph', 'monographer', 'nomographer'], + 'geranyl': ['angerly', 'geranyl'], + 'gerard': ['darger', 'gerard', 'grader', 'redrag', 'regard'], + 'gerastian': ['agrestian', 'gerastian', 'stangeria'], + 'geraty': ['geraty', 'gyrate'], + 'gerb': ['berg', 'gerb'], + 'gerbe': ['gerbe', 'grebe', 'rebeg'], + 'gerbera': ['bargeer', 'gerbera'], + 'gerenda': ['derange', 'enraged', 'gardeen', 'gerenda', 'grandee', 'grenade'], + 'gerendum': ['gerendum', 'unmerged'], + 'gerent': ['gerent', 'regent'], + 'gerenuk': ['gerenuk', 'greenuk'], + 'gerim': ['gerim', 'grime'], + 'gerip': ['gerip', 'gripe'], + 'german': ['engram', 'german', 'manger'], + 'germania': ['germania', 'megarian'], + 'germanics': ['germanics', 'screaming'], + 'germanification': ['germanification', 'remagnification'], + 'germanify': ['germanify', 'remagnify'], + 'germanious': ['germanious', 'gramineous', 'marigenous'], + 'germanist': ['germanist', 'streaming'], + 'germanite': ['germanite', 'germinate', 'gramenite', 'mangerite'], + 'germanly': ['germanly', 'germanyl'], + 'germanyl': ['germanly', 'germanyl'], + 'germinal': ['germinal', 'maligner', 'malinger'], + 'germinant': ['germinant', 'minargent'], + 'germinate': ['germanite', 'germinate', 'gramenite', 'mangerite'], + 'germon': ['germon', 'monger', 'morgen'], + 'geronomite': ['geronomite', 'goniometer'], + 'geront': ['geront', 'tonger'], + 'gerontal': ['argentol', 'gerontal'], + 'gerontes': ['estrogen', 'gerontes'], + 'gerontic': ['gerontic', 'negrotic'], + 'gerontism': ['gerontism', 'monergist'], + 'gerres': ['gerres', 'serger'], + 'gersum': ['gersum', 'mergus'], + 'gerund': ['dunger', 'gerund', 'greund', 'nudger'], + 'gerundive': ['gerundive', 'ungrieved'], + 'gerusia': ['ergusia', 'gerusia', 'sarigue'], + 'gervas': ['gervas', 'graves'], + 'gervase': ['gervase', 'greaves', 'servage'], + 'ges': ['ges', 'seg'], + 'gesan': ['agnes', 'gesan'], + 'gesith': ['gesith', 'steigh'], + 'gesning': ['gesning', 'ginseng'], + 'gest': ['gest', 'steg'], + 'gestapo': ['gestapo', 'postage'], + 'gestate': ['gestate', 'tagetes'], + 'geste': ['egest', 'geest', 'geste'], + 'gesten': ['gentes', 'gesten'], + 'gestical': ['gelastic', 'gestical'], + 'gesticular': ['gesticular', 'scutigeral'], + 'gesture': ['gesture', 'guester'], + 'get': ['get', 'teg'], + 'geta': ['gaet', 'gate', 'geat', 'geta'], + 'getaway': ['gateway', 'getaway', 'waygate'], + 'gettable': ['begettal', 'gettable'], + 'getup': ['getup', 'upget'], + 'geyerite': ['geyerite', 'tigereye'], + 'ghaist': ['ghaist', 'tagish'], + 'ghent': ['ghent', 'thegn'], + 'ghosty': ['ghosty', 'hogsty'], + 'ghoul': ['ghoul', 'lough'], + 'giansar': ['giansar', 'sarangi'], + 'giant': ['giant', 'tangi', 'tiang'], + 'gib': ['big', 'gib'], + 'gibbon': ['gibbon', 'gobbin'], + 'gibel': ['bilge', 'gibel'], + 'gibing': ['biggin', 'gibing'], + 'gid': ['dig', 'gid'], + 'gideonite': ['diogenite', 'gideonite'], + 'gien': ['gein', 'gien'], + 'gienah': ['gienah', 'hangie'], + 'gif': ['fig', 'gif'], + 'gifola': ['filago', 'gifola'], + 'gifted': ['fidget', 'gifted'], + 'gigman': ['gaming', 'gigman'], + 'gigsman': ['gangism', 'gigsman'], + 'gila': ['gail', 'gali', 'gila', 'glia'], + 'gilaki': ['gilaki', 'giliak'], + 'gilbertese': ['gilbertese', 'selbergite'], + 'gilden': ['dingle', 'elding', 'engild', 'gilden'], + 'gilder': ['gilder', 'girdle', 'glider', 'regild', 'ridgel'], + 'gilding': ['gilding', 'gliding'], + 'gileno': ['eloign', 'gileno', 'legion'], + 'giles': ['giles', 'gilse'], + 'giliak': ['gilaki', 'giliak'], + 'giller': ['giller', 'grille', 'regill'], + 'gilo': ['gilo', 'goli'], + 'gilpy': ['gilpy', 'pigly'], + 'gilse': ['giles', 'gilse'], + 'gim': ['gim', 'mig'], + 'gimel': ['gimel', 'glime'], + 'gimmer': ['gimmer', 'grimme', 'megrim'], + 'gimper': ['gimper', 'impreg'], + 'gin': ['gin', 'ing', 'nig'], + 'ginger': ['ginger', 'nigger'], + 'gingery': ['gingery', 'niggery'], + 'ginglymodi': ['ginglymodi', 'ginglymoid'], + 'ginglymoid': ['ginglymodi', 'ginglymoid'], + 'gink': ['gink', 'king'], + 'ginned': ['ending', 'ginned'], + 'ginner': ['enring', 'ginner'], + 'ginney': ['ginney', 'nignye'], + 'ginseng': ['gesning', 'ginseng'], + 'ginward': ['drawing', 'ginward', 'warding'], + 'gio': ['gio', 'goi'], + 'giornata': ['giornata', 'gratiano'], + 'giornatate': ['giornatate', 'tetragonia'], + 'gip': ['gip', 'pig'], + 'gipper': ['gipper', 'grippe'], + 'girandole': ['girandole', 'negroidal'], + 'girasole': ['girasole', 'seraglio'], + 'girba': ['bragi', 'girba'], + 'gird': ['gird', 'grid'], + 'girder': ['girder', 'ridger'], + 'girding': ['girding', 'ridging'], + 'girdingly': ['girdingly', 'ridgingly'], + 'girdle': ['gilder', 'girdle', 'glider', 'regild', 'ridgel'], + 'girdler': ['dirgler', 'girdler'], + 'girdling': ['girdling', 'ridgling'], + 'girling': ['girling', 'rigling'], + 'girn': ['girn', 'grin', 'ring'], + 'girny': ['girny', 'ringy'], + 'girondin': ['girondin', 'nonrigid'], + 'girsle': ['girsle', 'gisler', 'glires', 'grilse'], + 'girt': ['girt', 'grit', 'trig'], + 'girth': ['girth', 'grith', 'right'], + 'gish': ['gish', 'sigh'], + 'gisla': ['gisla', 'ligas', 'sigla'], + 'gisler': ['girsle', 'gisler', 'glires', 'grilse'], + 'git': ['git', 'tig'], + 'gitalin': ['gitalin', 'tailing'], + 'gith': ['gith', 'thig'], + 'gitksan': ['gitksan', 'skating', 'takings'], + 'gittern': ['gittern', 'gritten', 'retting'], + 'giustina': ['giustina', 'ignatius'], + 'giver': ['giver', 'vergi'], + 'glaceing': ['cageling', 'glaceing'], + 'glacier': ['glacier', 'gracile'], + 'glaciometer': ['geometrical', 'glaciometer'], + 'gladdener': ['gladdener', 'glandered', 'regladden'], + 'glaga': ['galga', 'glaga'], + 'glaik': ['galik', 'glaik'], + 'glaiket': ['glaiket', 'taglike'], + 'glair': ['argil', 'glair', 'grail'], + 'glaireous': ['aligerous', 'glaireous'], + 'glaister': ['glaister', 'regalist'], + 'glaive': ['glaive', 'vagile'], + 'glance': ['cangle', 'glance'], + 'glancer': ['cangler', 'glancer', 'reclang'], + 'glancingly': ['clangingly', 'glancingly'], + 'glandered': ['gladdener', 'glandered', 'regladden'], + 'glans': ['glans', 'slang'], + 'glare': ['argel', 'ergal', 'garle', 'glare', 'lager', 'large', 'regal'], + 'glariness': ['glariness', 'grainless'], + 'glary': ['glary', 'gyral'], + 'glasser': ['glasser', 'largess'], + 'glasses': ['gasless', 'glasses', 'sagless'], + 'glassie': ['algesis', 'glassie'], + 'glassine': ['gainless', 'glassine'], + 'glaucin': ['glaucin', 'glucina'], + 'glaucine': ['cuailnge', 'glaucine'], + 'glaum': ['algum', 'almug', 'glaum', 'gluma', 'mulga'], + 'glaur': ['glaur', 'gular'], + 'glaury': ['glaury', 'raguly'], + 'glaver': ['glaver', 'gravel'], + 'glaze': ['gazel', 'glaze'], + 'glazy': ['glazy', 'zygal'], + 'gleamy': ['gamely', 'gleamy', 'mygale'], + 'glean': ['agnel', 'angel', 'angle', 'galen', 'genal', 'glean', 'lagen'], + 'gleaner': ['enlarge', 'general', 'gleaner'], + 'gleary': ['argyle', 'gleary'], + 'gleba': ['bagel', 'belga', 'gable', 'gleba'], + 'glebal': ['begall', 'glebal'], + 'glede': ['glede', 'gleed', 'ledge'], + 'gledy': ['gledy', 'ledgy'], + 'gleed': ['glede', 'gleed', 'ledge'], + 'gleeman': ['gleeman', 'melange'], + 'glia': ['gail', 'gali', 'gila', 'glia'], + 'gliadin': ['dialing', 'gliadin'], + 'glial': ['galli', 'glial'], + 'glibness': ['beslings', 'blessing', 'glibness'], + 'glidder': ['glidder', 'griddle'], + 'glide': ['gelid', 'glide'], + 'glideness': ['gelidness', 'glideness'], + 'glider': ['gilder', 'girdle', 'glider', 'regild', 'ridgel'], + 'gliding': ['gilding', 'gliding'], + 'glime': ['gimel', 'glime'], + 'glink': ['glink', 'kling'], + 'glires': ['girsle', 'gisler', 'glires', 'grilse'], + 'glisten': ['glisten', 'singlet'], + 'glister': ['glister', 'gristle'], + 'glitnir': ['glitnir', 'ritling'], + 'glitter': ['glitter', 'grittle'], + 'gloater': ['argolet', 'gloater', 'legator'], + 'gloating': ['gloating', 'goatling'], + 'globate': ['boltage', 'globate'], + 'globe': ['bogle', 'globe'], + 'globin': ['globin', 'goblin', 'lobing'], + 'gloea': ['gloea', 'legoa'], + 'glome': ['glome', 'golem', 'molge'], + 'glomerate': ['algometer', 'glomerate'], + 'glore': ['glore', 'ogler'], + 'gloria': ['gloria', 'larigo', 'logria'], + 'gloriana': ['argolian', 'gloriana'], + 'gloriette': ['gloriette', 'rigolette'], + 'glorifiable': ['frigolabile', 'glorifiable'], + 'glossed': ['dogless', 'glossed', 'godless'], + 'glosser': ['glosser', 'regloss'], + 'glossitic': ['glossitic', 'logistics'], + 'glossohyal': ['glossohyal', 'hyoglossal'], + 'glossolabial': ['glossolabial', 'labioglossal'], + 'glossolabiolaryngeal': ['glossolabiolaryngeal', 'labioglossolaryngeal'], + 'glossolabiopharyngeal': ['glossolabiopharyngeal', 'labioglossopharyngeal'], + 'glottid': ['glottid', 'goldtit'], + 'glover': ['glover', 'grovel'], + 'gloveress': ['gloveress', 'groveless'], + 'glow': ['glow', 'gowl'], + 'glower': ['glower', 'reglow'], + 'gloy': ['gloy', 'logy'], + 'glucemia': ['glucemia', 'mucilage'], + 'glucina': ['glaucin', 'glucina'], + 'glucine': ['glucine', 'lucigen'], + 'glucinum': ['cingulum', 'glucinum'], + 'glucosane': ['consulage', 'glucosane'], + 'glue': ['glue', 'gule', 'luge'], + 'gluer': ['gluer', 'gruel', 'luger'], + 'gluma': ['algum', 'almug', 'glaum', 'gluma', 'mulga'], + 'glume': ['gemul', 'glume'], + 'glumose': ['glumose', 'lugsome'], + 'gluten': ['englut', 'gluten', 'ungelt'], + 'glutin': ['glutin', 'luting', 'ungilt'], + 'glutter': ['glutter', 'guttler'], + 'glycerate': ['electragy', 'glycerate'], + 'glycerinize': ['glycerinize', 'glycerizine'], + 'glycerizine': ['glycerinize', 'glycerizine'], + 'glycerophosphate': ['glycerophosphate', 'phosphoglycerate'], + 'glycocin': ['glycocin', 'glyconic'], + 'glyconic': ['glycocin', 'glyconic'], + 'glycosine': ['glycosine', 'lysogenic'], + 'glycosuria': ['glycosuria', 'graciously'], + 'gnaeus': ['gnaeus', 'unsage'], + 'gnaphalium': ['gnaphalium', 'phalangium'], + 'gnar': ['garn', 'gnar', 'rang'], + 'gnarled': ['dangler', 'gnarled'], + 'gnash': ['gnash', 'shang'], + 'gnat': ['gant', 'gnat', 'tang'], + 'gnatho': ['gnatho', 'thonga'], + 'gnathotheca': ['chaetognath', 'gnathotheca'], + 'gnatling': ['gnatling', 'tangling'], + 'gnatter': ['garnett', 'gnatter', 'gratten', 'tergant'], + 'gnaw': ['gawn', 'gnaw', 'wang'], + 'gnetum': ['gnetum', 'nutmeg'], + 'gnome': ['genom', 'gnome'], + 'gnomic': ['coming', 'gnomic'], + 'gnomist': ['gnomist', 'mosting'], + 'gnomonic': ['gnomonic', 'oncoming'], + 'gnomonical': ['cognominal', 'gnomonical'], + 'gnostic': ['costing', 'gnostic'], + 'gnostical': ['gnostical', 'nostalgic'], + 'gnu': ['gnu', 'gun'], + 'go': ['go', 'og'], + 'goa': ['ago', 'goa'], + 'goad': ['dago', 'goad'], + 'goal': ['gaol', 'goal', 'gola', 'olga'], + 'goan': ['agon', 'ango', 'gaon', 'goan', 'gona'], + 'goat': ['goat', 'toag', 'toga'], + 'goatee': ['goatee', 'goetae'], + 'goatlike': ['goatlike', 'togalike'], + 'goatling': ['gloating', 'goatling'], + 'goatly': ['goatly', 'otalgy'], + 'gob': ['bog', 'gob'], + 'goban': ['bogan', 'goban'], + 'gobbe': ['bebog', 'begob', 'gobbe'], + 'gobbin': ['gibbon', 'gobbin'], + 'gobelin': ['gobelin', 'gobline', 'ignoble', 'inglobe'], + 'gobian': ['bagnio', 'gabion', 'gobian'], + 'goblet': ['boglet', 'goblet'], + 'goblin': ['globin', 'goblin', 'lobing'], + 'gobline': ['gobelin', 'gobline', 'ignoble', 'inglobe'], + 'goblinry': ['boringly', 'goblinry'], + 'gobo': ['bogo', 'gobo'], + 'goby': ['bogy', 'bygo', 'goby'], + 'goclenian': ['congenial', 'goclenian'], + 'god': ['dog', 'god'], + 'goddam': ['goddam', 'mogdad'], + 'gode': ['doeg', 'doge', 'gode'], + 'godhead': ['doghead', 'godhead'], + 'godhood': ['doghood', 'godhood'], + 'godless': ['dogless', 'glossed', 'godless'], + 'godlike': ['doglike', 'godlike'], + 'godling': ['godling', 'lodging'], + 'godly': ['dogly', 'godly', 'goldy'], + 'godship': ['dogship', 'godship'], + 'godwinian': ['downingia', 'godwinian'], + 'goeduck': ['geoduck', 'goeduck'], + 'goel': ['egol', 'goel', 'loge', 'ogle', 'oleg'], + 'goer': ['goer', 'gore', 'ogre'], + 'goes': ['goes', 'sego'], + 'goetae': ['goatee', 'goetae'], + 'goetic': ['geotic', 'goetic'], + 'goetical': ['ectoglia', 'geotical', 'goetical'], + 'goety': ['geoty', 'goety'], + 'goglet': ['goglet', 'toggel', 'toggle'], + 'goi': ['gio', 'goi'], + 'goidel': ['goidel', 'goldie'], + 'goitral': ['goitral', 'larigot', 'ligator'], + 'gol': ['gol', 'log'], + 'gola': ['gaol', 'goal', 'gola', 'olga'], + 'golden': ['engold', 'golden'], + 'goldenmouth': ['goldenmouth', 'longmouthed'], + 'golder': ['golder', 'lodger'], + 'goldie': ['goidel', 'goldie'], + 'goldtit': ['glottid', 'goldtit'], + 'goldy': ['dogly', 'godly', 'goldy'], + 'golee': ['eloge', 'golee'], + 'golem': ['glome', 'golem', 'molge'], + 'golf': ['flog', 'golf'], + 'golfer': ['golfer', 'reflog'], + 'goli': ['gilo', 'goli'], + 'goliard': ['argolid', 'goliard'], + 'golo': ['golo', 'gool'], + 'goma': ['goma', 'ogam'], + 'gomari': ['gamori', 'gomari', 'gromia'], + 'gomart': ['gomart', 'margot'], + 'gomphrena': ['gomphrena', 'nephogram'], + 'gon': ['gon', 'nog'], + 'gona': ['agon', 'ango', 'gaon', 'goan', 'gona'], + 'gonad': ['donga', 'gonad'], + 'gonadial': ['diagonal', 'ganoidal', 'gonadial'], + 'gonal': ['along', 'gonal', 'lango', 'longa', 'nogal'], + 'gond': ['dong', 'gond'], + 'gondi': ['dingo', 'doing', 'gondi', 'gonid'], + 'gondola': ['dongola', 'gondola'], + 'gondolier': ['gondolier', 'negroloid'], + 'gone': ['geon', 'gone'], + 'goner': ['ergon', 'genro', 'goner', 'negro'], + 'gonesome': ['gonesome', 'osmogene'], + 'gongoresque': ['gongoresque', 'gorgonesque'], + 'gonia': ['gonia', 'ngaio', 'nogai'], + 'goniac': ['agonic', 'angico', 'gaonic', 'goniac'], + 'goniale': ['goniale', 'noilage'], + 'goniaster': ['goniaster', 'orangeist'], + 'goniatitid': ['digitation', 'goniatitid'], + 'gonid': ['dingo', 'doing', 'gondi', 'gonid'], + 'gonidia': ['angioid', 'gonidia'], + 'gonidiferous': ['gonidiferous', 'indigoferous'], + 'goniometer': ['geronomite', 'goniometer'], + 'gonomery': ['gonomery', 'merogony'], + 'gonosome': ['gonosome', 'mongoose'], + 'gonyocele': ['coelogyne', 'gonyocele'], + 'gonys': ['gonys', 'songy'], + 'goober': ['booger', 'goober'], + 'goodyear': ['goodyear', 'goodyera'], + 'goodyera': ['goodyear', 'goodyera'], + 'goof': ['fogo', 'goof'], + 'goofer': ['forego', 'goofer'], + 'gool': ['golo', 'gool'], + 'gools': ['gools', 'logos'], + 'goop': ['goop', 'pogo'], + 'gor': ['gor', 'rog'], + 'gora': ['argo', 'garo', 'gora'], + 'goral': ['algor', 'argol', 'goral', 'largo'], + 'goran': ['angor', + 'argon', + 'goran', + 'grano', + 'groan', + 'nagor', + 'orang', + 'organ', + 'rogan', + 'ronga'], + 'gorb': ['borg', 'brog', 'gorb'], + 'gorbal': ['brolga', 'gorbal'], + 'gorce': ['corge', 'gorce'], + 'gordian': ['gordian', 'idorgan', 'roading'], + 'gordon': ['drongo', 'gordon'], + 'gordonia': ['gordonia', 'organoid', 'rigadoon'], + 'gore': ['goer', 'gore', 'ogre'], + 'gorer': ['gorer', 'roger'], + 'gorge': ['gorge', 'grego'], + 'gorged': ['dogger', 'gorged'], + 'gorger': ['gorger', 'gregor'], + 'gorgerin': ['gorgerin', 'ringgoer'], + 'gorgonesque': ['gongoresque', 'gorgonesque'], + 'goric': ['corgi', 'goric', 'orgic'], + 'goring': ['goring', 'gringo'], + 'gorse': ['gorse', 'soger'], + 'gortonian': ['gortonian', 'organotin'], + 'gory': ['gory', 'gyro', 'orgy'], + 'gos': ['gos', 'sog'], + 'gosain': ['gosain', 'isagon', 'sagoin'], + 'gosh': ['gosh', 'shog'], + 'gospel': ['gospel', 'spogel'], + 'gossipry': ['gossipry', 'gryposis'], + 'got': ['got', 'tog'], + 'gotra': ['argot', 'gator', 'gotra', 'groat'], + 'goup': ['goup', 'ogpu', 'upgo'], + 'gourde': ['drogue', 'gourde'], + 'gout': ['gout', 'toug'], + 'goutish': ['goutish', 'outsigh'], + 'gowan': ['gowan', 'wagon', 'wonga'], + 'gowdnie': ['gowdnie', 'widgeon'], + 'gowl': ['glow', 'gowl'], + 'gown': ['gown', 'wong'], + 'goyin': ['goyin', 'yogin'], + 'gra': ['gar', 'gra', 'rag'], + 'grab': ['brag', 'garb', 'grab'], + 'grabble': ['gabbler', 'grabble'], + 'graben': ['banger', 'engarb', 'graben'], + 'grace': ['cager', 'garce', 'grace'], + 'gracile': ['glacier', 'gracile'], + 'graciously': ['glycosuria', 'graciously'], + 'grad': ['darg', 'drag', 'grad'], + 'gradation': ['gradation', 'indagator', 'tanagroid'], + 'grade': ['edgar', 'grade'], + 'graded': ['gadder', 'graded'], + 'grader': ['darger', 'gerard', 'grader', 'redrag', 'regard'], + 'gradient': ['gradient', 'treading'], + 'gradienter': ['gradienter', 'intergrade'], + 'gradientia': ['gradientia', 'grantiidae'], + 'gradin': ['daring', 'dingar', 'gradin'], + 'gradine': ['degrain', 'deraign', 'deringa', 'gradine', 'grained', 'reading'], + 'grading': ['grading', 'niggard'], + 'graeae': ['aerage', 'graeae'], + 'graeme': ['graeme', 'meager', 'meagre'], + 'grafter': ['grafter', 'regraft'], + 'graian': ['graian', 'nagari'], + 'grail': ['argil', 'glair', 'grail'], + 'grailer': ['grailer', 'reglair'], + 'grain': ['agrin', 'grain'], + 'grained': ['degrain', 'deraign', 'deringa', 'gradine', 'grained', 'reading'], + 'grainer': ['earring', 'grainer'], + 'grainless': ['glariness', 'grainless'], + 'graith': ['aright', 'graith'], + 'grallina': ['grallina', 'granilla'], + 'gralline': ['allergin', 'gralline'], + 'grame': ['grame', 'marge', 'regma'], + 'gramenite': ['germanite', 'germinate', 'gramenite', 'mangerite'], + 'gramineous': ['germanious', 'gramineous', 'marigenous'], + 'graminiform': ['graminiform', 'marginiform'], + 'graminous': ['graminous', 'ignoramus'], + 'gramme': ['gammer', 'gramme'], + 'gramophonic': ['gramophonic', 'monographic', 'nomographic', 'phonogramic'], + 'gramophonical': ['gramophonical', 'monographical', 'nomographical'], + 'gramophonically': ['gramophonically', + 'monographically', + 'nomographically', + 'phonogramically'], + 'gramophonist': ['gramophonist', 'monographist'], + 'granadine': ['granadine', 'grenadian'], + 'granate': ['argante', 'granate', 'tanager'], + 'granatum': ['armgaunt', 'granatum'], + 'grand': ['drang', 'grand'], + 'grandam': ['dragman', 'grandam', 'grandma'], + 'grandee': ['derange', 'enraged', 'gardeen', 'gerenda', 'grandee', 'grenade'], + 'grandeeism': ['grandeeism', 'renegadism'], + 'grandeur': ['grandeur', 'unregard'], + 'grandeval': ['grandeval', 'landgrave'], + 'grandiose': ['grandiose', 'sargonide'], + 'grandma': ['dragman', 'grandam', 'grandma'], + 'grandparental': ['grandparental', 'grandpaternal'], + 'grandpaternal': ['grandparental', 'grandpaternal'], + 'grane': ['anger', 'areng', 'grane', 'range'], + 'grange': ['ganger', 'grange', 'nagger'], + 'grangousier': ['grangousier', 'gregarinous'], + 'granilla': ['grallina', 'granilla'], + 'granite': ['angrite', 'granite', 'ingrate', 'tangier', 'tearing', 'tigrean'], + 'granivore': ['granivore', 'overgrain'], + 'grano': ['angor', + 'argon', + 'goran', + 'grano', + 'groan', + 'nagor', + 'orang', + 'organ', + 'rogan', + 'ronga'], + 'granophyre': ['granophyre', 'renography'], + 'grantee': ['grantee', 'greaten', 'reagent', 'rentage'], + 'granter': ['granter', 'regrant'], + 'granth': ['granth', 'thrang'], + 'grantiidae': ['gradientia', 'grantiidae'], + 'granula': ['angular', 'granula'], + 'granule': ['granule', 'unlarge', 'unregal'], + 'granulite': ['granulite', 'traguline'], + 'grape': ['gaper', 'grape', 'pager', 'parge'], + 'graperoot': ['graperoot', 'prorogate'], + 'graphical': ['algraphic', 'graphical'], + 'graphically': ['calligraphy', 'graphically'], + 'graphologic': ['graphologic', 'logographic'], + 'graphological': ['graphological', 'logographical'], + 'graphology': ['graphology', 'logography'], + 'graphometer': ['graphometer', 'meteorgraph'], + 'graphophonic': ['graphophonic', 'phonographic'], + 'graphostatic': ['gastropathic', 'graphostatic'], + 'graphotypic': ['graphotypic', 'pictography', 'typographic'], + 'grapsidae': ['disparage', 'grapsidae'], + 'grasp': ['grasp', 'sprag'], + 'grasper': ['grasper', 'regrasp', 'sparger'], + 'grasser': ['grasser', 'regrass'], + 'grasshopper': ['grasshopper', 'hoppergrass'], + 'grassman': ['grassman', 'mangrass'], + 'grat': ['grat', 'trag'], + 'grate': ['gater', 'grate', 'great', 'greta', 'retag', 'targe'], + 'grateman': ['grateman', 'mangrate', 'mentagra', 'targeman'], + 'grater': ['garret', 'garter', 'grater', 'targer'], + 'gratiano': ['giornata', 'gratiano'], + 'graticule': ['curtilage', 'cutigeral', 'graticule'], + 'gratiolin': ['gratiolin', 'largition', 'tailoring'], + 'gratis': ['gratis', 'striga'], + 'gratten': ['garnett', 'gnatter', 'gratten', 'tergant'], + 'graupel': ['earplug', 'graupel', 'plaguer'], + 'gravamen': ['gravamen', 'graveman'], + 'gravel': ['glaver', 'gravel'], + 'graveman': ['gravamen', 'graveman'], + 'graves': ['gervas', 'graves'], + 'gravure': ['gravure', 'verruga'], + 'gray': ['gary', 'gray'], + 'grayling': ['grayling', 'ragingly'], + 'graze': ['gazer', 'graze'], + 'greaser': ['argeers', 'greaser', 'serrage'], + 'great': ['gater', 'grate', 'great', 'greta', 'retag', 'targe'], + 'greaten': ['grantee', 'greaten', 'reagent', 'rentage'], + 'greater': ['greater', 'regrate', 'terrage'], + 'greaves': ['gervase', 'greaves', 'servage'], + 'grebe': ['gerbe', 'grebe', 'rebeg'], + 'grecian': ['anergic', 'garnice', 'garniec', 'geranic', 'grecian'], + 'grecomania': ['ergomaniac', 'grecomania'], + 'greed': ['edger', 'greed'], + 'green': ['genre', 'green', 'neger', 'reneg'], + 'greenable': ['generable', 'greenable'], + 'greener': ['greener', 'regreen', 'reneger'], + 'greenish': ['greenish', 'sheering'], + 'greenland': ['englander', 'greenland'], + 'greenuk': ['gerenuk', 'greenuk'], + 'greeny': ['energy', 'greeny', 'gyrene'], + 'greet': ['egret', 'greet', 'reget'], + 'greeter': ['greeter', 'regreet'], + 'gregal': ['gargle', 'gregal', 'lagger', 'raggle'], + 'gregarian': ['gregarian', 'gregarina'], + 'gregarina': ['gregarian', 'gregarina'], + 'gregarinous': ['grangousier', 'gregarinous'], + 'grege': ['egger', 'grege'], + 'gregge': ['gegger', 'gregge'], + 'grego': ['gorge', 'grego'], + 'gregor': ['gorger', 'gregor'], + 'greige': ['greige', 'reggie'], + 'grein': ['grein', 'inger', 'nigre', 'regin', 'reign', 'ringe'], + 'gremial': ['gremial', 'lamiger'], + 'gremlin': ['gremlin', 'mingler'], + 'grenade': ['derange', 'enraged', 'gardeen', 'gerenda', 'grandee', 'grenade'], + 'grenadian': ['granadine', 'grenadian'], + 'grenadier': ['earringed', 'grenadier'], + 'grenadin': ['gardenin', 'grenadin'], + 'grenadine': ['endearing', 'engrained', 'grenadine'], + 'greta': ['gater', 'grate', 'great', 'greta', 'retag', 'targe'], + 'gretel': ['gretel', 'reglet'], + 'greund': ['dunger', 'gerund', 'greund', 'nudger'], + 'grewia': ['earwig', 'grewia'], + 'grey': ['grey', 'gyre'], + 'grid': ['gird', 'grid'], + 'griddle': ['glidder', 'griddle'], + 'gride': ['dirge', 'gride', 'redig', 'ridge'], + 'gridelin': ['dreiling', 'gridelin'], + 'grieve': ['grieve', 'regive'], + 'grieved': ['diverge', 'grieved'], + 'grille': ['giller', 'grille', 'regill'], + 'grilse': ['girsle', 'gisler', 'glires', 'grilse'], + 'grimace': ['gemaric', 'grimace', 'megaric'], + 'grime': ['gerim', 'grime'], + 'grimme': ['gimmer', 'grimme', 'megrim'], + 'grin': ['girn', 'grin', 'ring'], + 'grinder': ['grinder', 'regrind'], + 'grindle': ['dringle', 'grindle'], + 'gringo': ['goring', 'gringo'], + 'grip': ['grip', 'prig'], + 'gripe': ['gerip', 'gripe'], + 'gripeful': ['fireplug', 'gripeful'], + 'griper': ['griper', 'regrip'], + 'gripman': ['gripman', 'prigman', 'ramping'], + 'grippe': ['gipper', 'grippe'], + 'grisounite': ['grisounite', 'grisoutine', 'integrious'], + 'grisoutine': ['grisounite', 'grisoutine', 'integrious'], + 'grist': ['grist', 'grits', 'strig'], + 'gristle': ['glister', 'gristle'], + 'grit': ['girt', 'grit', 'trig'], + 'grith': ['girth', 'grith', 'right'], + 'grits': ['grist', 'grits', 'strig'], + 'gritten': ['gittern', 'gritten', 'retting'], + 'grittle': ['glitter', 'grittle'], + 'grivna': ['grivna', 'raving'], + 'grizzel': ['grizzel', 'grizzle'], + 'grizzle': ['grizzel', 'grizzle'], + 'groan': ['angor', + 'argon', + 'goran', + 'grano', + 'groan', + 'nagor', + 'orang', + 'organ', + 'rogan', + 'ronga'], + 'groaner': ['groaner', 'oranger', 'organer'], + 'groaning': ['groaning', 'organing'], + 'groat': ['argot', 'gator', 'gotra', 'groat'], + 'grobian': ['biorgan', 'grobian'], + 'groined': ['groined', 'negroid'], + 'gromia': ['gamori', 'gomari', 'gromia'], + 'groove': ['groove', 'overgo'], + 'grope': ['grope', 'porge'], + 'groper': ['groper', 'porger'], + 'groset': ['groset', 'storge'], + 'grossen': ['engross', 'grossen'], + 'grot': ['grot', 'trog'], + 'grotian': ['grotian', 'trigona'], + 'grotto': ['grotto', 'torgot'], + 'grounded': ['grounded', 'underdog', 'undergod'], + 'grouper': ['grouper', 'regroup'], + 'grouse': ['grouse', 'rugose'], + 'grousy': ['grousy', 'gyrous'], + 'grovel': ['glover', 'grovel'], + 'groveless': ['gloveress', 'groveless'], + 'growan': ['awrong', 'growan'], + 'grower': ['grower', 'regrow'], + 'grown': ['grown', 'wrong'], + 'grub': ['burg', 'grub'], + 'grudge': ['grudge', 'rugged'], + 'grudger': ['drugger', 'grudger'], + 'grudgery': ['druggery', 'grudgery'], + 'grue': ['grue', 'urge'], + 'gruel': ['gluer', 'gruel', 'luger'], + 'gruelly': ['gruelly', 'gullery'], + 'grues': ['grues', 'surge'], + 'grun': ['grun', 'rung'], + 'grush': ['grush', 'shrug'], + 'grusinian': ['grusinian', 'unarising'], + 'grutten': ['grutten', 'turgent'], + 'gryposis': ['gossipry', 'gryposis'], + 'guacho': ['gaucho', 'guacho'], + 'guan': ['gaun', 'guan', 'guna', 'uang'], + 'guanamine': ['guanamine', 'guineaman'], + 'guanine': ['anguine', 'guanine', 'guinean'], + 'guar': ['gaur', 'guar', 'ruga'], + 'guara': ['gaura', 'guara'], + 'guarani': ['anguria', 'gaurian', 'guarani'], + 'guarantorship': ['guarantorship', 'uranographist'], + 'guardeen': ['dungaree', 'guardeen', 'unagreed', 'underage', 'ungeared'], + 'guarder': ['guarder', 'reguard'], + 'guatusan': ['augustan', 'guatusan'], + 'gud': ['dug', 'gud'], + 'gude': ['degu', 'gude'], + 'guenon': ['guenon', 'ungone'], + 'guepard': ['guepard', 'upgrade'], + 'guerdon': ['guerdon', 'undergo', 'ungored'], + 'guerdoner': ['guerdoner', 'reundergo', 'undergoer', 'undergore'], + 'guerinet': ['geniture', 'guerinet'], + 'guester': ['gesture', 'guester'], + 'guetar': ['argute', 'guetar', 'rugate', 'tuareg'], + 'guetare': ['erugate', 'guetare'], + 'guha': ['augh', 'guha'], + 'guiana': ['guiana', 'iguana'], + 'guib': ['bugi', 'guib'], + 'guineaman': ['guanamine', 'guineaman'], + 'guinean': ['anguine', 'guanine', 'guinean'], + 'guiser': ['guiser', 'sergiu'], + 'gul': ['gul', 'lug'], + 'gula': ['gaul', 'gula'], + 'gulae': ['gulae', 'legua'], + 'gular': ['glaur', 'gular'], + 'gularis': ['agrilus', 'gularis'], + 'gulden': ['gulden', 'lunged'], + 'gule': ['glue', 'gule', 'luge'], + 'gules': ['gules', 'gusle'], + 'gullery': ['gruelly', 'gullery'], + 'gullible': ['bluegill', 'gullible'], + 'gulonic': ['gulonic', 'unlogic'], + 'gulp': ['gulp', 'plug'], + 'gulpin': ['gulpin', 'puling'], + 'gum': ['gum', 'mug'], + 'gumbo': ['bogum', 'gumbo'], + 'gumshoe': ['gumshoe', 'hugsome'], + 'gumweed': ['gumweed', 'mugweed'], + 'gun': ['gnu', 'gun'], + 'guna': ['gaun', 'guan', 'guna', 'uang'], + 'gunate': ['gunate', 'tangue'], + 'gundi': ['gundi', 'undig'], + 'gundy': ['dungy', 'gundy'], + 'gunk': ['gunk', 'kung'], + 'gunl': ['gunl', 'lung'], + 'gunnership': ['gunnership', 'unsphering'], + 'gunreach': ['gunreach', 'uncharge'], + 'gunsel': ['gunsel', 'selung', 'slunge'], + 'gunshot': ['gunshot', 'shotgun', 'uhtsong'], + 'gunster': ['gunster', 'surgent'], + 'gunter': ['gunter', 'gurnet', 'urgent'], + 'gup': ['gup', 'pug'], + 'gur': ['gur', 'rug'], + 'gurgeon': ['gurgeon', 'ungorge'], + 'gurgle': ['gurgle', 'lugger', 'ruggle'], + 'gurian': ['gurian', 'ugrian'], + 'guric': ['guric', 'ugric'], + 'gurl': ['gurl', 'lurg'], + 'gurnard': ['drungar', 'gurnard'], + 'gurnet': ['gunter', 'gurnet', 'urgent'], + 'gurt': ['gurt', 'trug'], + 'gush': ['gush', 'shug', 'sugh'], + 'gusher': ['gusher', 'regush'], + 'gusle': ['gules', 'gusle'], + 'gust': ['gust', 'stug'], + 'gut': ['gut', 'tug'], + 'gutless': ['gutless', 'tugless'], + 'gutlike': ['gutlike', 'tuglike'], + 'gutnish': ['gutnish', 'husting', 'unsight'], + 'guttler': ['glutter', 'guttler'], + 'guttular': ['guttular', 'guttural'], + 'guttural': ['guttular', 'guttural'], + 'gweed': ['gweed', 'wedge'], + 'gymnasic': ['gymnasic', 'syngamic'], + 'gymnastic': ['gymnastic', 'nystagmic'], + 'gynandrous': ['androgynus', 'gynandrous'], + 'gynerium': ['eryngium', 'gynerium'], + 'gynospore': ['gynospore', 'sporogeny'], + 'gypsine': ['gypsine', 'pigsney'], + 'gyral': ['glary', 'gyral'], + 'gyrant': ['gantry', 'gyrant'], + 'gyrate': ['geraty', 'gyrate'], + 'gyration': ['gyration', 'organity', 'ortygian'], + 'gyre': ['grey', 'gyre'], + 'gyrene': ['energy', 'greeny', 'gyrene'], + 'gyro': ['gory', 'gyro', 'orgy'], + 'gyroma': ['gyroma', 'morgay'], + 'gyromitra': ['gyromitra', 'migratory'], + 'gyrophora': ['gyrophora', 'orography'], + 'gyrous': ['grousy', 'gyrous'], + 'gyrus': ['gyrus', 'surgy'], + 'ha': ['ah', 'ha'], + 'haberdine': ['haberdine', 'hebridean'], + 'habile': ['habile', 'halebi'], + 'habiri': ['bihari', 'habiri'], + 'habiru': ['brahui', 'habiru'], + 'habit': ['baith', 'habit'], + 'habitan': ['abthain', 'habitan'], + 'habitat': ['habitat', 'tabitha'], + 'habited': ['habited', 'thebaid'], + 'habitus': ['habitus', 'ushabti'], + 'habnab': ['babhan', 'habnab'], + 'hacienda': ['chanidae', 'hacienda'], + 'hackin': ['hackin', 'kachin'], + 'hackle': ['hackle', 'lekach'], + 'hackler': ['chalker', 'hackler'], + 'hackly': ['chalky', 'hackly'], + 'hacktree': ['eckehart', 'hacktree'], + 'hackwood': ['hackwood', 'woodhack'], + 'hacky': ['chyak', 'hacky'], + 'had': ['dah', 'dha', 'had'], + 'hadden': ['hadden', 'handed'], + 'hade': ['hade', 'head'], + 'hades': ['deash', 'hades', 'sadhe', 'shade'], + 'hadji': ['hadji', 'jihad'], + 'haec': ['ache', 'each', 'haec'], + 'haem': ['ahem', 'haem', 'hame'], + 'haet': ['ahet', 'haet', 'hate', 'heat', 'thea'], + 'hafgan': ['afghan', 'hafgan'], + 'hafter': ['father', 'freath', 'hafter'], + 'hageen': ['hageen', 'hangee'], + 'hailse': ['elisha', 'hailse', 'sheila'], + 'hainan': ['hainan', 'nahani'], + 'hair': ['ahir', 'hair'], + 'hairband': ['bhandari', 'hairband'], + 'haired': ['dehair', 'haired'], + 'hairen': ['hairen', 'hernia'], + 'hairlet': ['hairlet', 'therial'], + 'hairstone': ['hairstone', 'hortensia'], + 'hairup': ['hairup', 'rupiah'], + 'hak': ['hak', 'kha'], + 'hakam': ['hakam', 'makah'], + 'hakea': ['ekaha', 'hakea'], + 'hakim': ['hakim', 'khami'], + 'haku': ['haku', 'kahu'], + 'halal': ['allah', 'halal'], + 'halbert': ['blather', 'halbert'], + 'hale': ['hale', 'heal', 'leah'], + 'halebi': ['habile', 'halebi'], + 'halenia': ['ainaleh', 'halenia'], + 'halesome': ['halesome', 'healsome'], + 'halicore': ['halicore', 'heroical'], + 'haliotidae': ['aethalioid', 'haliotidae'], + 'hallan': ['hallan', 'nallah'], + 'hallower': ['hallower', 'rehallow'], + 'halma': ['halma', 'hamal'], + 'halogeton': ['halogeton', 'theogonal'], + 'haloid': ['dihalo', 'haloid'], + 'halophile': ['halophile', 'philohela'], + 'halophytism': ['halophytism', 'hylopathism'], + 'hals': ['hals', 'lash'], + 'halse': ['halse', 'leash', 'selah', 'shale', 'sheal', 'shela'], + 'halsen': ['halsen', 'hansel', 'lanseh'], + 'halt': ['halt', 'lath'], + 'halter': ['arthel', 'halter', 'lather', 'thaler'], + 'halterbreak': ['halterbreak', 'leatherbark'], + 'halting': ['halting', 'lathing', 'thingal'], + 'halve': ['halve', 'havel'], + 'halver': ['halver', 'lavehr'], + 'ham': ['ham', 'mah'], + 'hamal': ['halma', 'hamal'], + 'hame': ['ahem', 'haem', 'hame'], + 'hameil': ['hameil', 'hiemal'], + 'hamel': ['hamel', 'hemal'], + 'hamfatter': ['aftermath', 'hamfatter'], + 'hami': ['hami', 'hima', 'mahi'], + 'hamital': ['hamital', 'thalami'], + 'hamites': ['atheism', 'hamites'], + 'hamlet': ['hamlet', 'malthe'], + 'hammerer': ['hammerer', 'rehammer'], + 'hamsa': ['hamsa', 'masha', 'shama'], + 'hamulites': ['hamulites', 'shulamite'], + 'hamus': ['hamus', 'musha'], + 'hanaster': ['hanaster', 'sheratan'], + 'hance': ['achen', 'chane', 'chena', 'hance'], + 'hand': ['dhan', 'hand'], + 'handbook': ['bandhook', 'handbook'], + 'handed': ['hadden', 'handed'], + 'hander': ['hander', 'harden'], + 'handicapper': ['handicapper', 'prehandicap'], + 'handscrape': ['handscrape', 'scaphander'], + 'handstone': ['handstone', 'stonehand'], + 'handwork': ['handwork', 'workhand'], + 'hangar': ['arghan', 'hangar'], + 'hangby': ['banghy', 'hangby'], + 'hangee': ['hageen', 'hangee'], + 'hanger': ['hanger', 'rehang'], + 'hangie': ['gienah', 'hangie'], + 'hangnail': ['hangnail', 'langhian'], + 'hangout': ['hangout', 'tohunga'], + 'hank': ['ankh', 'hank', 'khan'], + 'hano': ['hano', 'noah'], + 'hans': ['hans', 'nash', 'shan'], + 'hansa': ['ahsan', 'hansa', 'hasan'], + 'hanse': ['ashen', 'hanse', 'shane', 'shean'], + 'hanseatic': ['anchistea', 'hanseatic'], + 'hansel': ['halsen', 'hansel', 'lanseh'], + 'hant': ['hant', 'tanh', 'than'], + 'hantle': ['ethnal', 'hantle', 'lathen', 'thenal'], + 'hao': ['aho', 'hao'], + 'haole': ['eloah', 'haole'], + 'haoma': ['haoma', 'omaha'], + 'haori': ['haori', 'iroha'], + 'hap': ['hap', 'pah'], + 'hapalotis': ['hapalotis', 'sapotilha'], + 'hapi': ['hapi', 'pahi'], + 'haplodoci': ['chilopoda', 'haplodoci'], + 'haplont': ['haplont', 'naphtol'], + 'haplosis': ['alphosis', 'haplosis'], + 'haply': ['haply', 'phyla'], + 'happiest': ['happiest', 'peatship'], + 'haptene': ['haptene', 'heptane', 'phenate'], + 'haptenic': ['haptenic', 'pantheic', 'pithecan'], + 'haptere': ['haptere', 'preheat'], + 'haptic': ['haptic', 'pathic'], + 'haptics': ['haptics', 'spathic'], + 'haptometer': ['amphorette', 'haptometer'], + 'haptophoric': ['haptophoric', 'pathophoric'], + 'haptophorous': ['haptophorous', 'pathophorous'], + 'haptotropic': ['haptotropic', 'protopathic'], + 'hapu': ['hapu', 'hupa'], + 'harass': ['harass', 'hassar'], + 'harb': ['bhar', 'harb'], + 'harborer': ['abhorrer', 'harborer'], + 'harden': ['hander', 'harden'], + 'hardener': ['hardener', 'reharden'], + 'hardenite': ['hardenite', 'herniated'], + 'hardtail': ['hardtail', 'thaliard'], + 'hardy': ['hardy', 'hydra'], + 'hare': ['hare', 'hear', 'rhea'], + 'harebrain': ['harebrain', 'herbarian'], + 'harem': ['harem', 'herma', 'rhema'], + 'haremism': ['ashimmer', 'haremism'], + 'harfang': ['fraghan', 'harfang'], + 'haricot': ['chariot', 'haricot'], + 'hark': ['hark', 'khar', 'rakh'], + 'harka': ['harka', 'kahar'], + 'harlot': ['harlot', 'orthal', 'thoral'], + 'harmala': ['harmala', 'marhala'], + 'harman': ['amhran', 'harman', 'mahran'], + 'harmer': ['harmer', 'reharm'], + 'harmine': ['harmine', 'hireman'], + 'harmonic': ['choirman', 'harmonic', 'omniarch'], + 'harmonical': ['harmonical', 'monarchial'], + 'harmonics': ['anorchism', 'harmonics'], + 'harmonistic': ['anchoritism', 'chiromantis', 'chrismation', 'harmonistic'], + 'harnesser': ['harnesser', 'reharness'], + 'harold': ['harold', 'holard'], + 'harpa': ['aphra', 'harpa', 'parah'], + 'harpings': ['harpings', 'phrasing'], + 'harpist': ['harpist', 'traship'], + 'harpless': ['harpless', 'splasher'], + 'harris': ['arrish', 'harris', 'rarish', 'sirrah'], + 'harrower': ['harrower', 'reharrow'], + 'hart': ['hart', 'rath', 'tahr', 'thar', 'trah'], + 'hartin': ['hartin', 'thrain'], + 'hartite': ['hartite', 'rathite'], + 'haruspices': ['chuprassie', 'haruspices'], + 'harvester': ['harvester', 'reharvest'], + 'hasan': ['ahsan', 'hansa', 'hasan'], + 'hash': ['hash', 'sahh', 'shah'], + 'hasher': ['hasher', 'rehash'], + 'hasidic': ['hasidic', 'sahidic'], + 'hasidim': ['hasidim', 'maidish'], + 'hasky': ['hasky', 'shaky'], + 'haslet': ['haslet', 'lesath', 'shelta'], + 'hasp': ['hasp', 'pash', 'psha', 'shap'], + 'hassar': ['harass', 'hassar'], + 'hassel': ['hassel', 'hassle'], + 'hassle': ['hassel', 'hassle'], + 'haste': ['ashet', 'haste', 'sheat'], + 'hasten': ['athens', 'hasten', 'snathe', 'sneath'], + 'haster': ['haster', 'hearst', 'hearts'], + 'hastilude': ['hastilude', 'lustihead'], + 'hastler': ['hastler', 'slather'], + 'hasty': ['hasty', 'yasht'], + 'hat': ['aht', 'hat', 'tha'], + 'hatchery': ['hatchery', 'thearchy'], + 'hate': ['ahet', 'haet', 'hate', 'heat', 'thea'], + 'hateable': ['hateable', 'heatable'], + 'hateful': ['hateful', 'heatful'], + 'hateless': ['hateless', 'heatless'], + 'hater': ['earth', 'hater', 'heart', 'herat', 'rathe'], + 'hati': ['hati', 'thai'], + 'hatred': ['dearth', 'hatred', 'rathed', 'thread'], + 'hatress': ['hatress', 'shaster'], + 'hatt': ['hatt', 'tath', 'that'], + 'hattemist': ['hattemist', 'thematist'], + 'hatter': ['hatter', 'threat'], + 'hattery': ['hattery', 'theatry'], + 'hattic': ['chatti', 'hattic'], + 'hattock': ['hattock', 'totchka'], + 'hau': ['ahu', 'auh', 'hau'], + 'hauerite': ['eutheria', 'hauerite'], + 'haul': ['haul', 'hula'], + 'hauler': ['hauler', 'rehaul'], + 'haunt': ['ahunt', 'haunt', 'thuan', 'unhat'], + 'haunter': ['haunter', 'nauther', 'unearth', 'unheart', 'urethan'], + 'hauntingly': ['hauntingly', 'unhatingly'], + 'haurient': ['haurient', 'huterian'], + 'havel': ['halve', 'havel'], + 'havers': ['havers', 'shaver', 'shrave'], + 'haw': ['haw', 'hwa', 'wah', 'wha'], + 'hawer': ['hawer', 'whare'], + 'hawm': ['hawm', 'wham'], + 'hawse': ['hawse', 'shewa', 'whase'], + 'hawser': ['hawser', 'rewash', 'washer'], + 'hay': ['hay', 'yah'], + 'haya': ['ayah', 'haya'], + 'hayz': ['hayz', 'hazy'], + 'hazarder': ['hazarder', 'rehazard'], + 'hazel': ['hazel', 'hazle'], + 'hazle': ['hazel', 'hazle'], + 'hazy': ['hayz', 'hazy'], + 'he': ['eh', 'he'], + 'head': ['hade', 'head'], + 'headbander': ['barehanded', 'bradenhead', 'headbander'], + 'headboard': ['broadhead', 'headboard'], + 'header': ['adhere', 'header', 'hedera', 'rehead'], + 'headily': ['headily', 'hylidae'], + 'headlight': ['headlight', 'lighthead'], + 'headlong': ['headlong', 'longhead'], + 'headman': ['headman', 'manhead'], + 'headmaster': ['headmaster', 'headstream', 'streamhead'], + 'headnote': ['headnote', 'notehead'], + 'headrail': ['headrail', 'railhead'], + 'headrent': ['adherent', 'headrent', 'neatherd', 'threaden'], + 'headring': ['headring', 'ringhead'], + 'headset': ['headset', 'sethead'], + 'headskin': ['headskin', 'nakedish', 'sinkhead'], + 'headspring': ['headspring', 'springhead'], + 'headstone': ['headstone', 'stonehead'], + 'headstream': ['headmaster', 'headstream', 'streamhead'], + 'headstrong': ['headstrong', 'stronghead'], + 'headward': ['drawhead', 'headward'], + 'headwater': ['headwater', 'waterhead'], + 'heal': ['hale', 'heal', 'leah'], + 'healer': ['healer', 'rehale', 'reheal'], + 'healsome': ['halesome', 'healsome'], + 'heap': ['epha', 'heap'], + 'heaper': ['heaper', 'reheap'], + 'heaps': ['heaps', 'pesah', 'phase', 'shape'], + 'hear': ['hare', 'hear', 'rhea'], + 'hearer': ['hearer', 'rehear'], + 'hearken': ['hearken', 'kenareh'], + 'hearst': ['haster', 'hearst', 'hearts'], + 'heart': ['earth', 'hater', 'heart', 'herat', 'rathe'], + 'heartdeep': ['heartdeep', 'preheated'], + 'hearted': ['earthed', 'hearted'], + 'heartedness': ['heartedness', 'neatherdess'], + 'hearten': ['earthen', 'enheart', 'hearten', 'naether', 'teheran', 'traheen'], + 'heartener': ['heartener', 'rehearten'], + 'heartiness': ['earthiness', 'heartiness'], + 'hearting': ['hearting', 'ingather'], + 'heartless': ['earthless', 'heartless'], + 'heartling': ['earthling', 'heartling'], + 'heartly': ['earthly', 'heartly', 'lathery', 'rathely'], + 'heartnut': ['earthnut', 'heartnut'], + 'heartpea': ['earthpea', 'heartpea'], + 'heartquake': ['earthquake', 'heartquake'], + 'hearts': ['haster', 'hearst', 'hearts'], + 'heartsome': ['heartsome', 'samothere'], + 'heartward': ['earthward', 'heartward'], + 'heartweed': ['heartweed', 'weathered'], + 'hearty': ['earthy', 'hearty', 'yearth'], + 'heat': ['ahet', 'haet', 'hate', 'heat', 'thea'], + 'heatable': ['hateable', 'heatable'], + 'heater': ['heater', 'hereat', 'reheat'], + 'heatful': ['hateful', 'heatful'], + 'heath': ['heath', 'theah'], + 'heating': ['gahnite', 'heating'], + 'heatless': ['hateless', 'heatless'], + 'heatronic': ['anchorite', 'antechoir', 'heatronic', 'hectorian'], + 'heave': ['heave', 'hevea'], + 'hebraizer': ['hebraizer', 'herbarize'], + 'hebridean': ['haberdine', 'hebridean'], + 'hecate': ['achete', 'hecate', 'teache', 'thecae'], + 'hecatine': ['echinate', 'hecatine'], + 'heckle': ['heckle', 'kechel'], + 'hectare': ['cheater', 'hectare', 'recheat', 'reteach', 'teacher'], + 'hecte': ['cheet', 'hecte'], + 'hector': ['hector', 'rochet', 'tocher', 'troche'], + 'hectorian': ['anchorite', 'antechoir', 'heatronic', 'hectorian'], + 'hectorship': ['christophe', 'hectorship'], + 'hedera': ['adhere', 'header', 'hedera', 'rehead'], + 'hedonical': ['chelodina', 'hedonical'], + 'hedonism': ['demonish', 'hedonism'], + 'heehaw': ['heehaw', 'wahehe'], + 'heel': ['heel', 'hele'], + 'heeler': ['heeler', 'reheel'], + 'heelpost': ['heelpost', 'pesthole'], + 'heer': ['heer', 'here'], + 'hegari': ['hegari', 'hegira'], + 'hegemonic': ['hegemonic', 'hemogenic'], + 'hegira': ['hegari', 'hegira'], + 'hei': ['hei', 'hie'], + 'height': ['eighth', 'height'], + 'heightener': ['heightener', 'reheighten'], + 'heintzite': ['heintzite', 'hintzeite'], + 'heinz': ['heinz', 'hienz'], + 'heir': ['heir', 'hire'], + 'heirdom': ['heirdom', 'homerid'], + 'heirless': ['heirless', 'hireless'], + 'hejazi': ['hejazi', 'jeziah'], + 'helcosis': ['helcosis', 'ochlesis'], + 'helcotic': ['helcotic', 'lochetic', 'ochletic'], + 'hele': ['heel', 'hele'], + 'heliacal': ['achillea', 'heliacal'], + 'heliast': ['heliast', 'thesial'], + 'helical': ['alichel', 'challie', 'helical'], + 'heliced': ['chelide', 'heliced'], + 'helicon': ['choline', 'helicon'], + 'heling': ['heling', 'hingle'], + 'heliophotography': ['heliophotography', 'photoheliography'], + 'helios': ['helios', 'isohel'], + 'heliostatic': ['chiastolite', 'heliostatic'], + 'heliotactic': ['heliotactic', 'thiolacetic'], + 'helium': ['helium', 'humlie'], + 'hellcat': ['hellcat', 'tellach'], + 'helleborein': ['helleborein', 'helleborine'], + 'helleborine': ['helleborein', 'helleborine'], + 'hellenic': ['chenille', 'hellenic'], + 'helleri': ['helleri', 'hellier'], + 'hellicat': ['hellicat', 'lecithal'], + 'hellier': ['helleri', 'hellier'], + 'helm': ['helm', 'heml'], + 'heloderma': ['dreamhole', 'heloderma'], + 'helot': ['helot', 'hotel', 'thole'], + 'helotize': ['helotize', 'hotelize'], + 'helpmeet': ['helpmeet', 'meethelp'], + 'hemad': ['ahmed', 'hemad'], + 'hemal': ['hamel', 'hemal'], + 'hemapod': ['hemapod', 'mophead'], + 'hematic': ['chamite', 'hematic'], + 'hematin': ['ethanim', 'hematin'], + 'hematinic': ['hematinic', 'minchiate'], + 'hematolin': ['hematolin', 'maholtine'], + 'hematonic': ['hematonic', 'methanoic'], + 'hematosin': ['hematosin', 'thomasine'], + 'hematoxic': ['hematoxic', 'hexatomic'], + 'hematuric': ['hematuric', 'rheumatic'], + 'hemiasci': ['hemiasci', 'ischemia'], + 'hemiatrophy': ['hemiatrophy', 'hypothermia'], + 'hemic': ['chime', 'hemic', 'miche'], + 'hemicarp': ['camphire', 'hemicarp'], + 'hemicatalepsy': ['hemicatalepsy', 'mesaticephaly'], + 'hemiclastic': ['alchemistic', 'hemiclastic'], + 'hemicrany': ['hemicrany', 'machinery'], + 'hemiholohedral': ['hemiholohedral', 'holohemihedral'], + 'hemiolic': ['elohimic', 'hemiolic'], + 'hemiparesis': ['hemiparesis', 'phariseeism'], + 'hemistater': ['amherstite', 'hemistater'], + 'hemiterata': ['hemiterata', 'metatheria'], + 'hemitype': ['epithyme', 'hemitype'], + 'heml': ['helm', 'heml'], + 'hemogenic': ['hegemonic', 'hemogenic'], + 'hemol': ['hemol', 'mohel'], + 'hemologist': ['hemologist', 'theologism'], + 'hemopneumothorax': ['hemopneumothorax', 'pneumohemothorax'], + 'henbit': ['behint', 'henbit'], + 'hent': ['hent', 'neth', 'then'], + 'henter': ['erthen', 'henter', 'nether', 'threne'], + 'henyard': ['enhydra', 'henyard'], + 'hepar': ['hepar', 'phare', 'raphe'], + 'heparin': ['heparin', 'nephria'], + 'hepatic': ['aphetic', 'caphite', 'hepatic'], + 'hepatica': ['apachite', 'hepatica'], + 'hepatical': ['caliphate', 'hepatical'], + 'hepatize': ['aphetize', 'hepatize'], + 'hepatocolic': ['hepatocolic', 'otocephalic'], + 'hepatogastric': ['gastrohepatic', 'hepatogastric'], + 'hepatoid': ['diaphote', 'hepatoid'], + 'hepatomegalia': ['hepatomegalia', 'megalohepatia'], + 'hepatonephric': ['hepatonephric', 'phrenohepatic'], + 'hepatostomy': ['hepatostomy', 'somatophyte'], + 'hepialid': ['hepialid', 'phialide'], + 'heptace': ['heptace', 'tepache'], + 'heptad': ['heptad', 'pathed'], + 'heptagon': ['heptagon', 'pathogen'], + 'heptameron': ['heptameron', 'promethean'], + 'heptane': ['haptene', 'heptane', 'phenate'], + 'heptaploidy': ['heptaploidy', 'typhlopidae'], + 'hepteris': ['hepteris', 'treeship'], + 'heptine': ['heptine', 'nephite'], + 'heptite': ['epithet', 'heptite'], + 'heptorite': ['heptorite', 'tephroite'], + 'heptylic': ['heptylic', 'phyletic'], + 'her': ['her', 'reh', 'rhe'], + 'heraclid': ['heraclid', 'heraldic'], + 'heraldic': ['heraclid', 'heraldic'], + 'heraldist': ['heraldist', 'tehsildar'], + 'herat': ['earth', 'hater', 'heart', 'herat', 'rathe'], + 'herbage': ['breaghe', 'herbage'], + 'herbarian': ['harebrain', 'herbarian'], + 'herbarism': ['herbarism', 'shambrier'], + 'herbarize': ['hebraizer', 'herbarize'], + 'herbert': ['berther', 'herbert'], + 'herbous': ['herbous', 'subhero'], + 'herdic': ['chider', 'herdic'], + 'here': ['heer', 'here'], + 'hereafter': ['featherer', 'hereafter'], + 'hereat': ['heater', 'hereat', 'reheat'], + 'herein': ['herein', 'inhere'], + 'hereinto': ['etherion', 'hereinto', 'heronite'], + 'herem': ['herem', 'rheme'], + 'heretic': ['erethic', 'etheric', 'heretic', 'heteric', 'teicher'], + 'heretically': ['heretically', 'heterically'], + 'heretication': ['heretication', 'theoretician'], + 'hereto': ['hereto', 'hetero'], + 'heritance': ['catherine', 'heritance'], + 'herl': ['herl', 'hler', 'lehr'], + 'herma': ['harem', 'herma', 'rhema'], + 'hermaic': ['chimera', 'hermaic'], + 'hermitage': ['geheimrat', 'hermitage'], + 'hermo': ['hermo', 'homer', 'horme'], + 'herne': ['herne', 'rheen'], + 'hernia': ['hairen', 'hernia'], + 'hernial': ['hernial', 'inhaler'], + 'herniate': ['atherine', 'herniate'], + 'herniated': ['hardenite', 'herniated'], + 'hernioid': ['dinheiro', 'hernioid'], + 'hero': ['hero', 'hoer'], + 'herodian': ['herodian', 'ironhead'], + 'heroic': ['coheir', 'heroic'], + 'heroical': ['halicore', 'heroical'], + 'heroin': ['heroin', 'hieron', 'hornie'], + 'heroism': ['heroism', 'moreish'], + 'heronite': ['etherion', 'hereinto', 'heronite'], + 'herophile': ['herophile', 'rheophile'], + 'herpes': ['herpes', 'hesper', 'sphere'], + 'herpetism': ['herpetism', 'metership', 'metreship', 'temperish'], + 'herpetological': ['herpetological', 'pretheological'], + 'herpetomonad': ['dermatophone', 'herpetomonad'], + 'hers': ['hers', 'resh', 'sher'], + 'herse': ['herse', 'sereh', 'sheer', 'shree'], + 'hersed': ['hersed', 'sheder'], + 'herself': ['flesher', 'herself'], + 'hersir': ['hersir', 'sherri'], + 'herulian': ['herulian', 'inhauler'], + 'hervati': ['athrive', 'hervati'], + 'hesitater': ['hesitater', 'hetaerist'], + 'hesper': ['herpes', 'hesper', 'sphere'], + 'hespera': ['hespera', 'rephase', 'reshape'], + 'hesperia': ['hesperia', 'pharisee'], + 'hesperian': ['hesperian', 'phrenesia', 'seraphine'], + 'hesperid': ['hesperid', 'perished'], + 'hesperinon': ['hesperinon', 'prehension'], + 'hesperis': ['hesperis', 'seership'], + 'hest': ['esth', 'hest', 'seth'], + 'hester': ['esther', 'hester', 'theres'], + 'het': ['het', 'the'], + 'hetaeric': ['cheatrie', 'hetaeric'], + 'hetaerist': ['hesitater', 'hetaerist'], + 'hetaery': ['erythea', 'hetaery', 'yeather'], + 'heteratomic': ['heteratomic', 'theorematic'], + 'heteraxial': ['exhilarate', 'heteraxial'], + 'heteric': ['erethic', 'etheric', 'heretic', 'heteric', 'teicher'], + 'heterically': ['heretically', 'heterically'], + 'hetericism': ['erethismic', 'hetericism'], + 'hetericist': ['erethistic', 'hetericist'], + 'heterism': ['erethism', 'etherism', 'heterism'], + 'heterization': ['etherization', 'heterization'], + 'heterize': ['etherize', 'heterize'], + 'hetero': ['hereto', 'hetero'], + 'heterocarpus': ['heterocarpus', 'urethrascope'], + 'heteroclite': ['heteroclite', 'heterotelic'], + 'heterodromy': ['heterodromy', 'hydrometeor'], + 'heteroecismal': ['cholesteremia', 'heteroecismal'], + 'heterography': ['erythrophage', 'heterography'], + 'heterogynous': ['heterogynous', 'thyreogenous'], + 'heterology': ['heterology', 'thereology'], + 'heteromeri': ['heteromeri', 'moerithere'], + 'heteroousiast': ['autoheterosis', 'heteroousiast'], + 'heteropathy': ['heteropathy', 'theotherapy'], + 'heteropodal': ['heteropodal', 'prelatehood'], + 'heterotelic': ['heteroclite', 'heterotelic'], + 'heterotic': ['heterotic', 'theoretic'], + 'hetman': ['anthem', 'hetman', 'mentha'], + 'hetmanate': ['hetmanate', 'methanate'], + 'hetter': ['hetter', 'tether'], + 'hevea': ['heave', 'hevea'], + 'hevi': ['hevi', 'hive'], + 'hewel': ['hewel', 'wheel'], + 'hewer': ['hewer', 'wheer', 'where'], + 'hewn': ['hewn', 'when'], + 'hewt': ['hewt', 'thew', 'whet'], + 'hexacid': ['hexacid', 'hexadic'], + 'hexadic': ['hexacid', 'hexadic'], + 'hexakisoctahedron': ['hexakisoctahedron', 'octakishexahedron'], + 'hexakistetrahedron': ['hexakistetrahedron', 'tetrakishexahedron'], + 'hexatetrahedron': ['hexatetrahedron', 'tetrahexahedron'], + 'hexatomic': ['hematoxic', 'hexatomic'], + 'hexonic': ['choenix', 'hexonic'], + 'hiant': ['ahint', 'hiant', 'tahin'], + 'hiatal': ['hiatal', 'thalia'], + 'hibernate': ['hibernate', 'inbreathe'], + 'hic': ['chi', 'hic', 'ich'], + 'hickwall': ['hickwall', 'wallhick'], + 'hidage': ['adighe', 'hidage'], + 'hider': ['dheri', 'hider', 'hired'], + 'hidling': ['hidling', 'hilding'], + 'hidlings': ['dishling', 'hidlings'], + 'hidrotic': ['hidrotic', 'trichoid'], + 'hie': ['hei', 'hie'], + 'hield': ['delhi', 'hield'], + 'hiemal': ['hameil', 'hiemal'], + 'hienz': ['heinz', 'hienz'], + 'hieron': ['heroin', 'hieron', 'hornie'], + 'hieros': ['hieros', 'hosier'], + 'hight': ['hight', 'thigh'], + 'higuero': ['higuero', 'roughie'], + 'hilasmic': ['chiliasm', 'hilasmic', 'machilis'], + 'hilding': ['hidling', 'hilding'], + 'hillside': ['hillside', 'sidehill'], + 'hilsa': ['alish', 'hilsa'], + 'hilt': ['hilt', 'lith'], + 'hima': ['hami', 'hima', 'mahi'], + 'himself': ['flemish', 'himself'], + 'hinderest': ['disherent', 'hinderest', 'tenderish'], + 'hindu': ['hindu', 'hundi', 'unhid'], + 'hing': ['hing', 'nigh'], + 'hinge': ['hinge', 'neigh'], + 'hingle': ['heling', 'hingle'], + 'hint': ['hint', 'thin'], + 'hinter': ['hinter', 'nither', 'theirn'], + 'hintproof': ['hintproof', 'hoofprint'], + 'hintzeite': ['heintzite', 'hintzeite'], + 'hip': ['hip', 'phi'], + 'hipbone': ['hipbone', 'hopbine'], + 'hippodamous': ['amphipodous', 'hippodamous'], + 'hippolyte': ['hippolyte', 'typophile'], + 'hippus': ['hippus', 'uppish'], + 'hiram': ['hiram', 'ihram', 'mahri'], + 'hircine': ['hircine', 'rheinic'], + 'hire': ['heir', 'hire'], + 'hired': ['dheri', 'hider', 'hired'], + 'hireless': ['heirless', 'hireless'], + 'hireman': ['harmine', 'hireman'], + 'hiren': ['hiren', 'rhein', 'rhine'], + 'hirmos': ['hirmos', 'romish'], + 'hirse': ['hirse', 'shier', 'shire'], + 'hirsel': ['hirsel', 'hirsle', 'relish'], + 'hirsle': ['hirsel', 'hirsle', 'relish'], + 'his': ['his', 'hsi', 'shi'], + 'hish': ['hish', 'shih'], + 'hisn': ['hisn', 'shin', 'sinh'], + 'hispa': ['aphis', 'apish', 'hispa', 'saiph', 'spahi'], + 'hispanist': ['hispanist', 'saintship'], + 'hiss': ['hiss', 'sish'], + 'hist': ['hist', 'sith', 'this', 'tshi'], + 'histamine': ['histamine', 'semihiant'], + 'histie': ['histie', 'shiite'], + 'histioid': ['histioid', 'idiotish'], + 'histon': ['histon', 'shinto', 'tonish'], + 'histonal': ['histonal', 'toshnail'], + 'historic': ['historic', 'orchitis'], + 'historics': ['historics', 'trichosis'], + 'history': ['history', 'toryish'], + 'hittable': ['hittable', 'tithable'], + 'hitter': ['hitter', 'tither'], + 'hive': ['hevi', 'hive'], + 'hives': ['hives', 'shive'], + 'hler': ['herl', 'hler', 'lehr'], + 'ho': ['ho', 'oh'], + 'hoar': ['hoar', 'hora'], + 'hoard': ['hoard', 'rhoda'], + 'hoarse': ['ahorse', 'ashore', 'hoarse', 'shorea'], + 'hoarstone': ['anorthose', 'hoarstone'], + 'hoast': ['hoast', 'hosta', 'shoat'], + 'hobbism': ['hobbism', 'mobbish'], + 'hobo': ['boho', 'hobo'], + 'hocco': ['choco', 'hocco'], + 'hock': ['hock', 'koch'], + 'hocker': ['choker', 'hocker'], + 'hocky': ['choky', 'hocky'], + 'hocus': ['chous', 'hocus'], + 'hodiernal': ['hodiernal', 'rhodaline'], + 'hoer': ['hero', 'hoer'], + 'hogan': ['ahong', 'hogan'], + 'hogget': ['egghot', 'hogget'], + 'hogmanay': ['hogmanay', 'mahogany'], + 'hognut': ['hognut', 'nought'], + 'hogsty': ['ghosty', 'hogsty'], + 'hoister': ['hoister', 'rehoist'], + 'hoit': ['hoit', 'hoti', 'thio'], + 'holard': ['harold', 'holard'], + 'holconoti': ['holconoti', 'holotonic'], + 'holcus': ['holcus', 'lochus', 'slouch'], + 'holdfast': ['fasthold', 'holdfast'], + 'holdout': ['holdout', 'outhold'], + 'holdup': ['holdup', 'uphold'], + 'holeman': ['holeman', 'manhole'], + 'holey': ['holey', 'hoyle'], + 'holiday': ['holiday', 'hyaloid', 'hyoidal'], + 'hollandite': ['hollandite', 'hollantide'], + 'hollantide': ['hollandite', 'hollantide'], + 'hollower': ['hollower', 'rehollow'], + 'holmia': ['holmia', 'maholi'], + 'holocentrid': ['holocentrid', 'lechriodont'], + 'holohemihedral': ['hemiholohedral', 'holohemihedral'], + 'holosteric': ['holosteric', 'thiocresol'], + 'holotonic': ['holconoti', 'holotonic'], + 'holster': ['holster', 'hostler'], + 'homage': ['homage', 'ohmage'], + 'homarine': ['homarine', 'homerian'], + 'homecroft': ['forthcome', 'homecroft'], + 'homeogenous': ['homeogenous', 'homogeneous'], + 'homeotypic': ['homeotypic', 'mythopoeic'], + 'homeotypical': ['homeotypical', 'polymetochia'], + 'homer': ['hermo', 'homer', 'horme'], + 'homerian': ['homarine', 'homerian'], + 'homeric': ['homeric', 'moriche'], + 'homerical': ['chloremia', 'homerical'], + 'homerid': ['heirdom', 'homerid'], + 'homerist': ['homerist', 'isotherm', 'otherism', 'theorism'], + 'homiletics': ['homiletics', 'mesolithic'], + 'homo': ['homo', 'moho'], + 'homocline': ['chemiloon', 'homocline'], + 'homogeneous': ['homeogenous', 'homogeneous'], + 'homopolic': ['homopolic', 'lophocomi'], + 'homopteran': ['homopteran', 'trophonema'], + 'homrai': ['homrai', 'mahori', 'mohair'], + 'hondo': ['dhoon', 'hondo'], + 'honest': ['ethnos', 'honest'], + 'honeypod': ['dyophone', 'honeypod'], + 'honeypot': ['eophyton', 'honeypot'], + 'honorer': ['honorer', 'rehonor'], + 'hontous': ['hontous', 'nothous'], + 'hoodman': ['dhamnoo', 'hoodman', 'manhood'], + 'hoofprint': ['hintproof', 'hoofprint'], + 'hooker': ['hooker', 'rehook'], + 'hookweed': ['hookweed', 'weedhook'], + 'hoop': ['hoop', 'phoo', 'pooh'], + 'hooper': ['hooper', 'rehoop'], + 'hoot': ['hoot', 'thoo', 'toho'], + 'hop': ['hop', 'pho', 'poh'], + 'hopbine': ['hipbone', 'hopbine'], + 'hopcalite': ['hopcalite', 'phacolite'], + 'hope': ['hope', 'peho'], + 'hoped': ['depoh', 'ephod', 'hoped'], + 'hoper': ['ephor', 'hoper'], + 'hoplite': ['hoplite', 'pithole'], + 'hoppergrass': ['grasshopper', 'hoppergrass'], + 'hoppers': ['hoppers', 'shopper'], + 'hora': ['hoar', 'hora'], + 'horal': ['horal', 'lohar'], + 'hordarian': ['arianrhod', 'hordarian'], + 'horizontal': ['horizontal', 'notorhizal'], + 'horme': ['hermo', 'homer', 'horme'], + 'horned': ['dehorn', 'horned'], + 'hornet': ['hornet', 'nother', 'theron', 'throne'], + 'hornie': ['heroin', 'hieron', 'hornie'], + 'hornpipe': ['hornpipe', 'porphine'], + 'horopteric': ['horopteric', 'rheotropic', 'trichopore'], + 'horrent': ['horrent', 'norther'], + 'horse': ['horse', 'shoer', 'shore'], + 'horsecar': ['cosharer', 'horsecar'], + 'horseless': ['horseless', 'shoreless'], + 'horseman': ['horseman', 'rhamnose', 'shoreman'], + 'horser': ['horser', 'shorer'], + 'horsetail': ['horsetail', 'isotheral'], + 'horseweed': ['horseweed', 'shoreweed'], + 'horsewhip': ['horsewhip', 'whoreship'], + 'horsewood': ['horsewood', 'woodhorse'], + 'horsing': ['horsing', 'shoring'], + 'horst': ['horst', 'short'], + 'hortensia': ['hairstone', 'hortensia'], + 'hortite': ['hortite', 'orthite', 'thorite'], + 'hose': ['hose', 'shoe'], + 'hosed': ['hosed', 'shode'], + 'hosel': ['hosel', 'sheol', 'shole'], + 'hoseless': ['hoseless', 'shoeless'], + 'hoseman': ['hoseman', 'shoeman'], + 'hosier': ['hieros', 'hosier'], + 'hospitaler': ['hospitaler', 'trophesial'], + 'host': ['host', 'shot', 'thos', 'tosh'], + 'hosta': ['hoast', 'hosta', 'shoat'], + 'hostager': ['hostager', 'shortage'], + 'hoster': ['hoster', 'tosher'], + 'hostile': ['elohist', 'hostile'], + 'hosting': ['hosting', 'onsight'], + 'hostler': ['holster', 'hostler'], + 'hostless': ['hostless', 'shotless'], + 'hostly': ['hostly', 'toshly'], + 'hot': ['hot', 'tho'], + 'hotel': ['helot', 'hotel', 'thole'], + 'hotelize': ['helotize', 'hotelize'], + 'hotfoot': ['foothot', 'hotfoot'], + 'hoti': ['hoit', 'hoti', 'thio'], + 'hotter': ['hotter', 'tother'], + 'hounce': ['cohune', 'hounce'], + 'houseboat': ['boathouse', 'houseboat'], + 'housebug': ['bughouse', 'housebug'], + 'housecraft': ['fratcheous', 'housecraft'], + 'housetop': ['housetop', 'pothouse'], + 'housewarm': ['housewarm', 'warmhouse'], + 'housewear': ['housewear', 'warehouse'], + 'housework': ['housework', 'workhouse'], + 'hovering': ['hovering', 'overnigh'], + 'how': ['how', 'who'], + 'howel': ['howel', 'whole'], + 'however': ['everwho', 'however', 'whoever'], + 'howlet': ['howlet', 'thowel'], + 'howso': ['howso', 'woosh'], + 'howsomever': ['howsomever', 'whomsoever', 'whosomever'], + 'hoya': ['ahoy', 'hoya'], + 'hoyle': ['holey', 'hoyle'], + 'hsi': ['his', 'hsi', 'shi'], + 'huari': ['huari', 'uriah'], + 'hubert': ['hubert', 'turbeh'], + 'hud': ['dhu', 'hud'], + 'hudsonite': ['hudsonite', 'unhoisted'], + 'huer': ['huer', 'hure'], + 'hug': ['hug', 'ugh'], + 'hughes': ['hughes', 'sheugh'], + 'hughoc': ['chough', 'hughoc'], + 'hugo': ['hugo', 'ough'], + 'hugsome': ['gumshoe', 'hugsome'], + 'huk': ['huk', 'khu'], + 'hula': ['haul', 'hula'], + 'hulsean': ['hulsean', 'unleash'], + 'hulster': ['hulster', 'hustler', 'sluther'], + 'huma': ['ahum', 'huma'], + 'human': ['human', 'nahum'], + 'humane': ['humane', 'humean'], + 'humanics': ['humanics', 'inasmuch'], + 'humean': ['humane', 'humean'], + 'humeroradial': ['humeroradial', 'radiohumeral'], + 'humic': ['chimu', 'humic'], + 'humidor': ['humidor', 'rhodium'], + 'humlie': ['helium', 'humlie'], + 'humor': ['humor', 'mohur'], + 'humoralistic': ['humoralistic', 'humoristical'], + 'humoristical': ['humoralistic', 'humoristical'], + 'hump': ['hump', 'umph'], + 'hundi': ['hindu', 'hundi', 'unhid'], + 'hunger': ['hunger', 'rehung'], + 'hunterian': ['hunterian', 'ruthenian'], + 'hup': ['hup', 'phu'], + 'hupa': ['hapu', 'hupa'], + 'hurdis': ['hurdis', 'rudish'], + 'hurdle': ['hurdle', 'hurled'], + 'hure': ['huer', 'hure'], + 'hurled': ['hurdle', 'hurled'], + 'huron': ['huron', 'rohun'], + 'hurst': ['hurst', 'trush'], + 'hurt': ['hurt', 'ruth'], + 'hurter': ['hurter', 'ruther'], + 'hurtful': ['hurtful', 'ruthful'], + 'hurtfully': ['hurtfully', 'ruthfully'], + 'hurtfulness': ['hurtfulness', 'ruthfulness'], + 'hurting': ['hurting', 'ungirth', 'unright'], + 'hurtingest': ['hurtingest', 'shuttering'], + 'hurtle': ['hurtle', 'luther'], + 'hurtless': ['hurtless', 'ruthless'], + 'hurtlessly': ['hurtlessly', 'ruthlessly'], + 'hurtlessness': ['hurtlessness', 'ruthlessness'], + 'husbander': ['husbander', 'shabunder'], + 'husked': ['dehusk', 'husked'], + 'huso': ['huso', 'shou'], + 'huspil': ['huspil', 'pulish'], + 'husting': ['gutnish', 'husting', 'unsight'], + 'hustle': ['hustle', 'sleuth'], + 'hustler': ['hulster', 'hustler', 'sluther'], + 'huterian': ['haurient', 'huterian'], + 'hwa': ['haw', 'hwa', 'wah', 'wha'], + 'hyaloid': ['holiday', 'hyaloid', 'hyoidal'], + 'hydra': ['hardy', 'hydra'], + 'hydramnios': ['disharmony', 'hydramnios'], + 'hydrate': ['hydrate', 'thready'], + 'hydrazidine': ['anhydridize', 'hydrazidine'], + 'hydrazine': ['anhydrize', 'hydrazine'], + 'hydriodate': ['hydriodate', 'iodhydrate'], + 'hydriodic': ['hydriodic', 'iodhydric'], + 'hydriote': ['hydriote', 'thyreoid'], + 'hydrobromate': ['bromohydrate', 'hydrobromate'], + 'hydrocarbide': ['carbohydride', 'hydrocarbide'], + 'hydrocharis': ['hydrocharis', 'hydrorachis'], + 'hydroferricyanic': ['ferrihydrocyanic', 'hydroferricyanic'], + 'hydroferrocyanic': ['ferrohydrocyanic', 'hydroferrocyanic'], + 'hydrofluoboric': ['borofluohydric', 'hydrofluoboric'], + 'hydrogeology': ['geohydrology', 'hydrogeology'], + 'hydroiodic': ['hydroiodic', 'iodohydric'], + 'hydrometeor': ['heterodromy', 'hydrometeor'], + 'hydromotor': ['hydromotor', 'orthodromy'], + 'hydronephrosis': ['hydronephrosis', 'nephrohydrosis'], + 'hydropneumopericardium': ['hydropneumopericardium', 'pneumohydropericardium'], + 'hydropneumothorax': ['hydropneumothorax', 'pneumohydrothorax'], + 'hydrorachis': ['hydrocharis', 'hydrorachis'], + 'hydrosulphate': ['hydrosulphate', 'sulphohydrate'], + 'hydrotical': ['dacryolith', 'hydrotical'], + 'hydrous': ['hydrous', 'shroudy'], + 'hyetograph': ['ethography', 'hyetograph'], + 'hylidae': ['headily', 'hylidae'], + 'hylist': ['hylist', 'slithy'], + 'hyllus': ['hyllus', 'lushly'], + 'hylopathism': ['halophytism', 'hylopathism'], + 'hymenic': ['chimney', 'hymenic'], + 'hymettic': ['hymettic', 'thymetic'], + 'hymnologist': ['hymnologist', 'smoothingly'], + 'hyoglossal': ['glossohyal', 'hyoglossal'], + 'hyoidal': ['holiday', 'hyaloid', 'hyoidal'], + 'hyothyreoid': ['hyothyreoid', 'thyreohyoid'], + 'hyothyroid': ['hyothyroid', 'thyrohyoid'], + 'hypaethron': ['hypaethron', 'hypothenar'], + 'hypercone': ['coryphene', 'hypercone'], + 'hypergamous': ['hypergamous', 'museography'], + 'hypertoxic': ['hypertoxic', 'xerophytic'], + 'hypnobate': ['batyphone', 'hypnobate'], + 'hypnoetic': ['hypnoetic', 'neophytic'], + 'hypnotic': ['hypnotic', 'phytonic', 'pythonic', 'typhonic'], + 'hypnotism': ['hypnotism', 'pythonism'], + 'hypnotist': ['hypnotist', 'pythonist'], + 'hypnotize': ['hypnotize', 'pythonize'], + 'hypnotoid': ['hypnotoid', 'pythonoid'], + 'hypobole': ['hypobole', 'lyophobe'], + 'hypocarp': ['apocryph', 'hypocarp'], + 'hypocrite': ['chirotype', 'hypocrite'], + 'hypodorian': ['hypodorian', 'radiophony'], + 'hypoglottis': ['hypoglottis', 'phytologist'], + 'hypomanic': ['amphicyon', 'hypomanic'], + 'hypopteron': ['hypopteron', 'phonotyper'], + 'hyporadius': ['hyporadius', 'suprahyoid'], + 'hyposcleral': ['hyposcleral', 'phylloceras'], + 'hyposmia': ['hyposmia', 'phymosia'], + 'hypostomatic': ['hypostomatic', 'somatophytic'], + 'hypothec': ['hypothec', 'photechy'], + 'hypothenar': ['hypaethron', 'hypothenar'], + 'hypothermia': ['hemiatrophy', 'hypothermia'], + 'hypsiloid': ['hypsiloid', 'syphiloid'], + 'hyracid': ['diarchy', 'hyracid'], + 'hyssop': ['hyssop', 'phossy', 'sposhy'], + 'hysteresial': ['hysteresial', 'hysteriales'], + 'hysteria': ['hysteria', 'sheriyat'], + 'hysteriales': ['hysteresial', 'hysteriales'], + 'hysterolaparotomy': ['hysterolaparotomy', 'laparohysterotomy'], + 'hysteromyomectomy': ['hysteromyomectomy', 'myomohysterectomy'], + 'hysteropathy': ['hysteropathy', 'hysterophyta'], + 'hysterophyta': ['hysteropathy', 'hysterophyta'], + 'iamb': ['iamb', 'mabi'], + 'iambelegus': ['elegiambus', 'iambelegus'], + 'iambic': ['cimbia', 'iambic'], + 'ian': ['ani', 'ian'], + 'ianus': ['ianus', 'suina'], + 'iatraliptics': ['iatraliptics', 'partialistic'], + 'iatric': ['iatric', 'tricia'], + 'ibad': ['adib', 'ibad'], + 'iban': ['bain', 'bani', 'iban'], + 'ibanag': ['bagani', 'bangia', 'ibanag'], + 'iberian': ['aribine', 'bairnie', 'iberian'], + 'ibo': ['ibo', 'obi'], + 'ibota': ['biota', 'ibota'], + 'icacorea': ['coraciae', 'icacorea'], + 'icarian': ['arician', 'icarian'], + 'icecap': ['icecap', 'ipecac'], + 'iced': ['dice', 'iced'], + 'iceland': ['cladine', 'decalin', 'iceland'], + 'icelandic': ['cicindela', 'cinclidae', 'icelandic'], + 'iceman': ['anemic', 'cinema', 'iceman'], + 'ich': ['chi', 'hic', 'ich'], + 'ichnolite': ['ichnolite', 'neolithic'], + 'ichor': ['chiro', 'choir', 'ichor'], + 'icicle': ['cilice', 'icicle'], + 'icon': ['cion', 'coin', 'icon'], + 'iconian': ['anionic', 'iconian'], + 'iconism': ['iconism', 'imsonic', 'miscoin'], + 'iconolater': ['iconolater', 'relocation'], + 'iconomania': ['iconomania', 'oniomaniac'], + 'iconometrical': ['iconometrical', 'intracoelomic'], + 'icteridae': ['diaeretic', 'icteridae'], + 'icterine': ['icterine', 'reincite'], + 'icterus': ['curtise', 'icterus'], + 'ictonyx': ['ictonyx', 'oxyntic'], + 'ictus': ['cutis', 'ictus'], + 'id': ['di', 'id'], + 'ida': ['aid', 'ida'], + 'idaean': ['adenia', 'idaean'], + 'ide': ['die', 'ide'], + 'idea': ['aide', 'idea'], + 'ideal': ['adiel', 'delia', 'ideal'], + 'idealism': ['idealism', 'lamiides'], + 'idealistic': ['disilicate', 'idealistic'], + 'ideality': ['aedility', 'ideality'], + 'idealness': ['idealness', 'leadiness'], + 'idean': ['diane', 'idean'], + 'ideation': ['ideation', 'iodinate', 'taenioid'], + 'identical': ['ctenidial', 'identical'], + 'ideograph': ['eidograph', 'ideograph'], + 'ideology': ['eidology', 'ideology'], + 'ideoplasty': ['ideoplasty', 'stylopidae'], + 'ides': ['desi', 'ides', 'seid', 'side'], + 'idiasm': ['idiasm', 'simiad'], + 'idioblast': ['diabolist', 'idioblast'], + 'idiomology': ['idiomology', 'oligomyoid'], + 'idioretinal': ['idioretinal', 'litorinidae'], + 'idiotish': ['histioid', 'idiotish'], + 'idle': ['idle', 'lide', 'lied'], + 'idleman': ['idleman', 'melinda'], + 'idleset': ['idleset', 'isleted'], + 'idlety': ['idlety', 'lydite', 'tidely', 'tidley'], + 'idly': ['idly', 'idyl'], + 'idocrase': ['idocrase', 'radicose'], + 'idoism': ['idoism', 'iodism'], + 'idol': ['dilo', 'diol', 'doli', 'idol', 'olid'], + 'idola': ['aloid', 'dolia', 'idola'], + 'idolaster': ['estradiol', 'idolaster'], + 'idolatry': ['adroitly', 'dilatory', 'idolatry'], + 'idolum': ['dolium', 'idolum'], + 'idoneal': ['adinole', 'idoneal'], + 'idorgan': ['gordian', 'idorgan', 'roading'], + 'idose': ['diose', 'idose', 'oside'], + 'idotea': ['idotea', 'iodate', 'otidae'], + 'idryl': ['idryl', 'lyrid'], + 'idyl': ['idly', 'idyl'], + 'idyler': ['direly', 'idyler'], + 'ierne': ['ernie', 'ierne', 'irene'], + 'if': ['fi', 'if'], + 'ife': ['fei', 'fie', 'ife'], + 'igara': ['agria', 'igara'], + 'igdyr': ['igdyr', 'ridgy'], + 'igloo': ['igloo', 'logoi'], + 'ignatius': ['giustina', 'ignatius'], + 'igneoaqueous': ['aqueoigneous', 'igneoaqueous'], + 'ignicolist': ['ignicolist', 'soliciting'], + 'igniter': ['igniter', 'ringite', 'tigrine'], + 'ignitor': ['ignitor', 'rioting'], + 'ignoble': ['gobelin', 'gobline', 'ignoble', 'inglobe'], + 'ignoramus': ['graminous', 'ignoramus'], + 'ignorance': ['enorganic', 'ignorance'], + 'ignorant': ['ignorant', 'tongrian'], + 'ignore': ['ignore', 'region'], + 'ignorement': ['ignorement', 'omnigerent'], + 'iguana': ['guiana', 'iguana'], + 'ihlat': ['ihlat', 'tahil'], + 'ihram': ['hiram', 'ihram', 'mahri'], + 'ijma': ['ijma', 'jami'], + 'ikat': ['atik', 'ikat'], + 'ikona': ['ikona', 'konia'], + 'ikra': ['ikra', 'kari', 'raki'], + 'ila': ['ail', 'ila', 'lai'], + 'ileac': ['alice', 'celia', 'ileac'], + 'ileon': ['enoil', 'ileon', 'olein'], + 'iliac': ['cilia', 'iliac'], + 'iliacus': ['acilius', 'iliacus'], + 'ilian': ['ilian', 'inial'], + 'ilicaceae': ['caeciliae', 'ilicaceae'], + 'ilioischiac': ['ilioischiac', 'ischioiliac'], + 'iliosacral': ['iliosacral', 'oscillaria'], + 'ilk': ['ilk', 'kil'], + 'ilka': ['ilka', 'kail', 'kali'], + 'ilkane': ['alkine', 'ilkane', 'inlake', 'inleak'], + 'illative': ['illative', 'veiltail'], + 'illaudatory': ['illaudatory', 'laudatorily'], + 'illeck': ['ellick', 'illeck'], + 'illinois': ['illinois', 'illision'], + 'illision': ['illinois', 'illision'], + 'illium': ['illium', 'lilium'], + 'illoricated': ['illoricated', 'lacertiloid'], + 'illth': ['illth', 'thill'], + 'illude': ['dillue', 'illude'], + 'illuder': ['dilluer', 'illuder'], + 'illy': ['illy', 'lily', 'yill'], + 'ilmenite': ['ilmenite', 'melinite', 'menilite'], + 'ilongot': ['ilongot', 'tooling'], + 'ilot': ['ilot', 'toil'], + 'ilya': ['ilya', 'yali'], + 'ima': ['aim', 'ami', 'ima'], + 'imager': ['imager', 'maigre', 'margie', 'mirage'], + 'imaginant': ['animating', 'imaginant'], + 'imaginer': ['imaginer', 'migraine'], + 'imagist': ['imagist', 'stigmai'], + 'imago': ['amigo', 'imago'], + 'imam': ['ammi', 'imam', 'maim', 'mima'], + 'imaret': ['imaret', 'metria', 'mirate', 'rimate'], + 'imbarge': ['gambier', 'imbarge'], + 'imbark': ['bikram', 'imbark'], + 'imbat': ['ambit', 'imbat'], + 'imbed': ['bedim', 'imbed'], + 'imbrue': ['erbium', 'imbrue'], + 'imbrute': ['burmite', 'imbrute', 'terbium'], + 'imer': ['emir', 'imer', 'mire', 'reim', 'remi', 'riem', 'rime'], + 'imerina': ['imerina', 'inermia'], + 'imitancy': ['imitancy', 'intimacy', 'minacity'], + 'immane': ['ammine', 'immane'], + 'immanes': ['amenism', 'immanes', 'misname'], + 'immaterials': ['immaterials', 'materialism'], + 'immerd': ['dimmer', 'immerd', 'rimmed'], + 'immersible': ['immersible', 'semilimber'], + 'immersion': ['immersion', 'semiminor'], + 'immi': ['immi', 'mimi'], + 'imogen': ['geonim', 'imogen'], + 'imolinda': ['dominial', 'imolinda', 'limoniad'], + 'imp': ['imp', 'pim'], + 'impaction': ['impaction', 'ptomainic'], + 'impages': ['impages', 'mispage'], + 'impaint': ['impaint', 'timpani'], + 'impair': ['impair', 'pamiri'], + 'impala': ['impala', 'malapi'], + 'impaler': ['impaler', 'impearl', 'lempira', 'premial'], + 'impalsy': ['impalsy', 'misplay'], + 'impane': ['impane', 'pieman'], + 'impanel': ['impanel', 'maniple'], + 'impar': ['impar', 'pamir', 'prima'], + 'imparalleled': ['demiparallel', 'imparalleled'], + 'imparl': ['imparl', 'primal'], + 'impart': ['armpit', 'impart'], + 'imparter': ['imparter', 'reimpart'], + 'impartial': ['impartial', 'primatial'], + 'impaste': ['impaste', 'pastime'], + 'impasture': ['impasture', 'septarium'], + 'impeach': ['aphemic', 'impeach'], + 'impearl': ['impaler', 'impearl', 'lempira', 'premial'], + 'impeder': ['demirep', 'epiderm', 'impeder', 'remiped'], + 'impedient': ['impedient', 'mendipite'], + 'impenetrable': ['impenetrable', 'intemperable'], + 'impenetrably': ['impenetrably', 'intemperably'], + 'impenetrate': ['impenetrate', 'intemperate'], + 'imperant': ['imperant', 'pairment', 'partimen', 'premiant', 'tripeman'], + 'imperate': ['imperate', 'premiate'], + 'imperish': ['emirship', 'imperish'], + 'imperscriptible': ['imperscriptible', 'imprescriptible'], + 'impersonate': ['impersonate', 'proseminate'], + 'impersonation': ['impersonation', 'prosemination', 'semipronation'], + 'impeticos': ['impeticos', 'poeticism'], + 'impetre': ['emptier', 'impetre'], + 'impetus': ['impetus', 'upsmite'], + 'imphee': ['imphee', 'phemie'], + 'implacental': ['capillament', 'implacental'], + 'implanter': ['implanter', 'reimplant'], + 'implate': ['implate', 'palmite'], + 'impleader': ['epidermal', 'impleader', 'premedial'], + 'implicate': ['ampelitic', 'implicate'], + 'impling': ['impling', 'limping'], + 'imply': ['imply', 'limpy', 'pilmy'], + 'impollute': ['impollute', 'multipole'], + 'imponderous': ['endosporium', 'imponderous'], + 'imponent': ['imponent', 'pimenton'], + 'importable': ['bitemporal', 'importable'], + 'importancy': ['importancy', 'patronymic', 'pyromantic'], + 'importer': ['importer', 'promerit', 'reimport'], + 'importunance': ['importunance', 'unimportance'], + 'importunate': ['importunate', 'permutation'], + 'importune': ['entropium', 'importune'], + 'imposal': ['imposal', 'spiloma'], + 'imposer': ['imposer', 'promise', 'semipro'], + 'imposter': ['imposter', 'tripsome'], + 'imposure': ['imposure', 'premious'], + 'imprecatory': ['cryptomeria', 'imprecatory'], + 'impreg': ['gimper', 'impreg'], + 'imprescriptible': ['imperscriptible', 'imprescriptible'], + 'imprese': ['emprise', 'imprese', 'premise', 'spireme'], + 'impress': ['impress', 'persism', 'premiss'], + 'impresser': ['impresser', 'reimpress'], + 'impressibility': ['impressibility', 'permissibility'], + 'impressible': ['impressible', 'permissible'], + 'impressibleness': ['impressibleness', 'permissibleness'], + 'impressibly': ['impressibly', 'permissibly'], + 'impression': ['impression', 'permission'], + 'impressionism': ['impressionism', 'misimpression'], + 'impressive': ['impressive', 'permissive'], + 'impressively': ['impressively', 'permissively'], + 'impressiveness': ['impressiveness', 'permissiveness'], + 'impressure': ['impressure', 'presurmise'], + 'imprinter': ['imprinter', 'reimprint'], + 'imprisoner': ['imprisoner', 'reimprison'], + 'improcreant': ['improcreant', 'preromantic'], + 'impship': ['impship', 'pimpish'], + 'impuberal': ['epilabrum', 'impuberal'], + 'impugnable': ['impugnable', 'plumbagine'], + 'impure': ['impure', 'umpire'], + 'impuritan': ['impuritan', 'partinium'], + 'imputer': ['imputer', 'trumpie'], + 'imsonic': ['iconism', 'imsonic', 'miscoin'], + 'in': ['in', 'ni'], + 'inaction': ['aconitin', 'inaction', 'nicotian'], + 'inactivate': ['inactivate', 'vaticinate'], + 'inactivation': ['inactivation', 'vaticination'], + 'inactive': ['antivice', 'inactive', 'vineatic'], + 'inadept': ['depaint', 'inadept', 'painted', 'patined'], + 'inaja': ['inaja', 'jaina'], + 'inalimental': ['antimallein', 'inalimental'], + 'inamorata': ['amatorian', 'inamorata'], + 'inane': ['annie', 'inane'], + 'inanga': ['angina', 'inanga'], + 'inanimate': ['amanitine', 'inanimate'], + 'inanimated': ['diamantine', 'inanimated'], + 'inapt': ['inapt', 'paint', 'pinta'], + 'inaptly': ['inaptly', 'planity', 'ptyalin'], + 'inarch': ['chinar', 'inarch'], + 'inarm': ['inarm', 'minar'], + 'inasmuch': ['humanics', 'inasmuch'], + 'inaurate': ['inaurate', 'ituraean'], + 'inbe': ['beni', 'bien', 'bine', 'inbe'], + 'inbreak': ['brankie', 'inbreak'], + 'inbreathe': ['hibernate', 'inbreathe'], + 'inbred': ['binder', 'inbred', 'rebind'], + 'inbreed': ['birdeen', 'inbreed'], + 'inca': ['cain', 'inca'], + 'incaic': ['acinic', 'incaic'], + 'incarnate': ['cratinean', 'incarnate', 'nectarian'], + 'incase': ['casein', 'incase'], + 'incast': ['incast', 'nastic'], + 'incensation': ['incensation', 'inscenation'], + 'incept': ['incept', 'pectin'], + 'inceptor': ['inceptor', 'pretonic'], + 'inceration': ['cineration', 'inceration'], + 'incessant': ['anticness', 'cantiness', 'incessant'], + 'incest': ['encist', 'incest', 'insect', 'scient'], + 'inch': ['chin', 'inch'], + 'inched': ['chined', 'inched'], + 'inchoate': ['inchoate', 'noachite'], + 'incide': ['cindie', 'incide'], + 'incinerate': ['creatinine', 'incinerate'], + 'incisal': ['incisal', 'salicin'], + 'incision': ['incision', 'inosinic'], + 'incisure': ['incisure', 'sciurine'], + 'inciter': ['citrine', 'crinite', 'inciter', 'neritic'], + 'inclinatorium': ['anticlinorium', 'inclinatorium'], + 'inclosure': ['cornelius', 'inclosure', 'reclusion'], + 'include': ['include', 'nuclide'], + 'incluse': ['esculin', 'incluse'], + 'incog': ['coign', 'incog'], + 'incognito': ['cognition', 'incognito'], + 'incoherence': ['coinherence', 'incoherence'], + 'incoherent': ['coinherent', 'incoherent'], + 'incomeless': ['comeliness', 'incomeless'], + 'incomer': ['incomer', 'moneric'], + 'incomputable': ['incomputable', 'uncompatible'], + 'incondite': ['incondite', 'nicotined'], + 'inconglomerate': ['inconglomerate', 'nongeometrical'], + 'inconsistent': ['inconsistent', 'nonscientist'], + 'inconsonant': ['inconsonant', 'nonsanction'], + 'incontrovertibility': ['incontrovertibility', 'introconvertibility'], + 'incontrovertible': ['incontrovertible', 'introconvertible'], + 'incorporate': ['incorporate', 'procreation'], + 'incorporated': ['adrenotropic', 'incorporated'], + 'incorpse': ['conspire', 'incorpse'], + 'incrash': ['archsin', 'incrash'], + 'increase': ['cerasein', 'increase'], + 'increate': ['aneretic', 'centiare', 'creatine', 'increate', 'iterance'], + 'incredited': ['incredited', 'indirected'], + 'increep': ['crepine', 'increep'], + 'increpate': ['anticreep', 'apenteric', 'increpate'], + 'increst': ['cistern', 'increst'], + 'incruental': ['incruental', 'unicentral'], + 'incrustant': ['incrustant', 'scrutinant'], + 'incrustate': ['incrustate', 'scaturient', 'scrutinate'], + 'incubate': ['cubanite', 'incubate'], + 'incudal': ['dulcian', 'incudal', 'lucanid', 'lucinda'], + 'incudomalleal': ['incudomalleal', 'malleoincudal'], + 'inculcation': ['anticouncil', 'inculcation'], + 'inculture': ['culturine', 'inculture'], + 'incuneation': ['enunciation', 'incuneation'], + 'incur': ['curin', 'incur', 'runic'], + 'incurable': ['binuclear', 'incurable'], + 'incus': ['incus', 'usnic'], + 'incut': ['cutin', 'incut', 'tunic'], + 'ind': ['din', 'ind', 'nid'], + 'indaba': ['badian', 'indaba'], + 'indagator': ['gradation', 'indagator', 'tanagroid'], + 'indan': ['indan', 'nandi'], + 'indane': ['aidenn', 'andine', 'dannie', 'indane'], + 'inde': ['dine', 'enid', 'inde', 'nide'], + 'indebt': ['bident', 'indebt'], + 'indebted': ['bidented', 'indebted'], + 'indefinitude': ['indefinitude', 'unidentified'], + 'indent': ['dentin', 'indent', 'intend', 'tinned'], + 'indented': ['indented', 'intended'], + 'indentedly': ['indentedly', 'intendedly'], + 'indenter': ['indenter', 'intender', 'reintend'], + 'indentment': ['indentment', 'intendment'], + 'indentured': ['indentured', 'underntide'], + 'indentwise': ['disentwine', 'indentwise'], + 'indeprivable': ['indeprivable', 'predivinable'], + 'indesert': ['indesert', 'inserted', 'resident'], + 'indiana': ['anidian', 'indiana'], + 'indic': ['dinic', 'indic'], + 'indican': ['cnidian', 'indican'], + 'indicate': ['diacetin', 'indicate'], + 'indicatory': ['dictionary', 'indicatory'], + 'indicial': ['anilidic', 'indicial'], + 'indicter': ['indicter', 'indirect', 'reindict'], + 'indies': ['indies', 'inside'], + 'indigena': ['gadinine', 'indigena'], + 'indigitate': ['indigitate', 'tingitidae'], + 'indign': ['dining', 'indign', 'niding'], + 'indigoferous': ['gonidiferous', 'indigoferous'], + 'indigotin': ['digitonin', 'indigotin'], + 'indirect': ['indicter', 'indirect', 'reindict'], + 'indirected': ['incredited', 'indirected'], + 'indirectly': ['cylindrite', 'indirectly'], + 'indiscreet': ['indiscreet', 'indiscrete', 'iridescent'], + 'indiscreetly': ['indiscreetly', 'indiscretely', 'iridescently'], + 'indiscrete': ['indiscreet', 'indiscrete', 'iridescent'], + 'indiscretely': ['indiscreetly', 'indiscretely', 'iridescently'], + 'indissolute': ['delusionist', 'indissolute'], + 'indite': ['indite', 'tineid'], + 'inditer': ['inditer', 'nitride'], + 'indogaean': ['ganoidean', 'indogaean'], + 'indole': ['doline', 'indole', 'leonid', 'loined', 'olenid'], + 'indolence': ['endocline', 'indolence'], + 'indoles': ['indoles', 'sondeli'], + 'indologist': ['indologist', 'nidologist'], + 'indology': ['indology', 'nidology'], + 'indone': ['donnie', 'indone', 'ondine'], + 'indoors': ['indoors', 'sordino'], + 'indorse': ['indorse', 'ordines', 'siredon', 'sordine'], + 'indra': ['darin', 'dinar', 'drain', 'indra', 'nadir', 'ranid'], + 'indrawn': ['indrawn', 'winnard'], + 'induce': ['induce', 'uniced'], + 'inducer': ['inducer', 'uncried'], + 'indulge': ['dueling', 'indulge'], + 'indulgential': ['dentilingual', 'indulgential', 'linguidental'], + 'indulger': ['indulger', 'ungirdle'], + 'indument': ['indument', 'unminted'], + 'indurable': ['indurable', 'unbrailed', 'unridable'], + 'indurate': ['indurate', 'turdinae'], + 'induration': ['diurnation', 'induration'], + 'indus': ['dinus', 'indus', 'nidus'], + 'indusiform': ['disuniform', 'indusiform'], + 'induviae': ['induviae', 'viduinae'], + 'induvial': ['diluvian', 'induvial'], + 'inearth': ['anither', 'inearth', 'naither'], + 'inelastic': ['elasticin', 'inelastic', 'sciential'], + 'inelegant': ['eglantine', 'inelegant', 'legantine'], + 'ineludible': ['ineludible', 'unelidible'], + 'inept': ['inept', 'pinte'], + 'inequity': ['equinity', 'inequity'], + 'inerm': ['inerm', 'miner'], + 'inermes': ['ermines', 'inermes'], + 'inermia': ['imerina', 'inermia'], + 'inermous': ['inermous', 'monsieur'], + 'inert': ['inert', 'inter', 'niter', 'retin', 'trine'], + 'inertance': ['inertance', 'nectarine'], + 'inertial': ['inertial', 'linarite'], + 'inertly': ['elytrin', 'inertly', 'trinely'], + 'inethical': ['echinital', 'inethical'], + 'ineunt': ['ineunt', 'untine'], + 'inez': ['inez', 'zein'], + 'inface': ['fiance', 'inface'], + 'infame': ['famine', 'infame'], + 'infamy': ['infamy', 'manify'], + 'infarct': ['frantic', 'infarct', 'infract'], + 'infarction': ['infarction', 'infraction'], + 'infaust': ['faunist', 'fustian', 'infaust'], + 'infecter': ['frenetic', 'infecter', 'reinfect'], + 'infeed': ['define', 'infeed'], + 'infelt': ['finlet', 'infelt'], + 'infer': ['finer', 'infer'], + 'inferable': ['inferable', 'refinable'], + 'infern': ['finner', 'infern'], + 'inferoanterior': ['anteroinferior', 'inferoanterior'], + 'inferoposterior': ['inferoposterior', 'posteroinferior'], + 'infestation': ['festination', 'infestation', 'sinfonietta'], + 'infester': ['infester', 'reinfest'], + 'infidel': ['infidel', 'infield'], + 'infield': ['infidel', 'infield'], + 'inflame': ['feminal', 'inflame'], + 'inflamed': ['fieldman', 'inflamed'], + 'inflamer': ['inflamer', 'rifleman'], + 'inflatus': ['inflatus', 'stainful'], + 'inflicter': ['inflicter', 'reinflict'], + 'inform': ['formin', 'inform'], + 'informal': ['formalin', 'informal', 'laniform'], + 'informer': ['informer', 'reinform', 'reniform'], + 'infra': ['infra', 'irfan'], + 'infract': ['frantic', 'infarct', 'infract'], + 'infraction': ['infarction', 'infraction'], + 'infringe': ['infringe', 'refining'], + 'ing': ['gin', 'ing', 'nig'], + 'inga': ['gain', 'inga', 'naig', 'ngai'], + 'ingaevones': ['avignonese', 'ingaevones'], + 'ingate': ['eating', 'ingate', 'tangie'], + 'ingather': ['hearting', 'ingather'], + 'ingenue': ['genuine', 'ingenue'], + 'ingenuous': ['ingenuous', 'unigenous'], + 'inger': ['grein', 'inger', 'nigre', 'regin', 'reign', 'ringe'], + 'ingest': ['ingest', 'signet', 'stinge'], + 'ingesta': ['easting', + 'gainset', + 'genista', + 'ingesta', + 'seating', + 'signate', + 'teasing'], + 'ingle': ['ingle', 'ligne', 'linge', 'nigel'], + 'inglobe': ['gobelin', 'gobline', 'ignoble', 'inglobe'], + 'ingomar': ['ingomar', 'moringa', 'roaming'], + 'ingram': ['arming', 'ingram', 'margin'], + 'ingrate': ['angrite', 'granite', 'ingrate', 'tangier', 'tearing', 'tigrean'], + 'ingrow': ['ingrow', 'rowing'], + 'ingrowth': ['ingrowth', 'throwing'], + 'inguinal': ['inguinal', 'unailing'], + 'inguinocrural': ['cruroinguinal', 'inguinocrural'], + 'inhabiter': ['inhabiter', 'reinhabit'], + 'inhaler': ['hernial', 'inhaler'], + 'inhauler': ['herulian', 'inhauler'], + 'inhaust': ['auntish', 'inhaust'], + 'inhere': ['herein', 'inhere'], + 'inhumer': ['inhumer', 'rhenium'], + 'inial': ['ilian', 'inial'], + 'ink': ['ink', 'kin'], + 'inkle': ['inkle', 'liken'], + 'inkless': ['inkless', 'kinless'], + 'inkling': ['inkling', 'linking'], + 'inknot': ['inknot', 'tonkin'], + 'inkra': ['inkra', 'krina', 'nakir', 'rinka'], + 'inks': ['inks', 'sink', 'skin'], + 'inlaid': ['anilid', 'dialin', 'dianil', 'inlaid'], + 'inlake': ['alkine', 'ilkane', 'inlake', 'inleak'], + 'inlaut': ['inlaut', 'unital'], + 'inlaw': ['inlaw', 'liwan'], + 'inlay': ['inlay', 'naily'], + 'inlayer': ['inlayer', 'nailery'], + 'inleak': ['alkine', 'ilkane', 'inlake', 'inleak'], + 'inlet': ['inlet', 'linet'], + 'inlook': ['inlook', 'koilon'], + 'inly': ['inly', 'liny'], + 'inmate': ['etamin', 'inmate', 'taimen', 'tamein'], + 'inmeats': ['atenism', 'inmeats', 'insteam', 'samnite'], + 'inmost': ['inmost', 'monist', 'omnist'], + 'innate': ['annite', 'innate', 'tinean'], + 'innative': ['innative', 'invinate'], + 'innatural': ['innatural', 'triannual'], + 'inner': ['inner', 'renin'], + 'innerve': ['innerve', 'nervine', 'vernine'], + 'innest': ['innest', 'sennit', 'sinnet', 'tennis'], + 'innet': ['innet', 'tinne'], + 'innominata': ['antinomian', 'innominata'], + 'innovate': ['innovate', 'venation'], + 'innovationist': ['innovationist', 'nonvisitation'], + 'ino': ['ino', 'ion'], + 'inobtainable': ['inobtainable', 'nonbilabiate'], + 'inocarpus': ['inocarpus', 'unprosaic'], + 'inoculant': ['continual', 'inoculant', 'unctional'], + 'inocystoma': ['actomyosin', 'inocystoma'], + 'inodes': ['deinos', 'donsie', 'inodes', 'onside'], + 'inogen': ['genion', 'inogen'], + 'inoma': ['amino', 'inoma', 'naomi', 'omani', 'omina'], + 'inomyxoma': ['inomyxoma', 'myxoinoma'], + 'inone': ['inone', 'oenin'], + 'inoperculata': ['inoperculata', 'precautional'], + 'inorb': ['biron', 'inorb', 'robin'], + 'inorganic': ['conringia', 'inorganic'], + 'inorganical': ['carolingian', 'inorganical'], + 'inornate': ['anointer', 'inornate', 'nonirate', 'reanoint'], + 'inosic': ['inosic', 'sinico'], + 'inosinic': ['incision', 'inosinic'], + 'inosite': ['inosite', 'sionite'], + 'inphase': ['inphase', 'phineas'], + 'inpush': ['inpush', 'punish', 'unship'], + 'input': ['input', 'punti'], + 'inquiet': ['inquiet', 'quinite'], + 'inreality': ['inreality', 'linearity'], + 'inro': ['inro', 'iron', 'noir', 'nori'], + 'inroad': ['dorian', 'inroad', 'ordain'], + 'inroader': ['inroader', 'ordainer', 'reordain'], + 'inrub': ['bruin', 'burin', 'inrub'], + 'inrun': ['inrun', 'inurn'], + 'insane': ['insane', 'sienna'], + 'insatiably': ['insatiably', 'sanability'], + 'inscenation': ['incensation', 'inscenation'], + 'inscient': ['inscient', 'nicenist'], + 'insculp': ['insculp', 'sculpin'], + 'insea': ['anise', 'insea', 'siena', 'sinae'], + 'inseam': ['asimen', 'inseam', 'mesian'], + 'insect': ['encist', 'incest', 'insect', 'scient'], + 'insectan': ['insectan', 'instance'], + 'insectile': ['insectile', 'selenitic'], + 'insectivora': ['insectivora', 'visceration'], + 'insecure': ['insecure', 'sinecure'], + 'insee': ['insee', 'seine'], + 'inseer': ['inseer', 'nereis', 'seiner', 'serine', 'sirene'], + 'insert': ['estrin', 'insert', 'sinter', 'sterin', 'triens'], + 'inserted': ['indesert', 'inserted', 'resident'], + 'inserter': ['inserter', 'reinsert'], + 'insessor': ['insessor', 'rosiness'], + 'inset': ['inset', 'neist', 'snite', 'stein', 'stine', 'tsine'], + 'insetter': ['insetter', 'interest', 'interset', 'sternite'], + 'inshave': ['evanish', 'inshave'], + 'inshoot': ['inshoot', 'insooth'], + 'inside': ['indies', 'inside'], + 'insider': ['insider', 'siderin'], + 'insistent': ['insistent', 'tintiness'], + 'insister': ['insister', 'reinsist', 'sinister', 'sisterin'], + 'insole': ['insole', 'leonis', 'lesion', 'selion'], + 'insomnia': ['insomnia', 'simonian'], + 'insomniac': ['aniconism', 'insomniac'], + 'insooth': ['inshoot', 'insooth'], + 'insorb': ['insorb', 'sorbin'], + 'insoul': ['insoul', 'linous', 'nilous', 'unsoil'], + 'inspection': ['cispontine', 'inspection'], + 'inspiriter': ['inspiriter', 'reinspirit'], + 'inspissate': ['antisepsis', 'inspissate'], + 'inspreith': ['inspreith', 'nephritis', 'phrenitis'], + 'installer': ['installer', 'reinstall'], + 'instance': ['insectan', 'instance'], + 'instanter': ['instanter', 'transient'], + 'instar': ['instar', 'santir', 'strain'], + 'instate': ['atenist', 'instate', 'satient', 'steatin'], + 'instead': ['destain', 'instead', 'sainted', 'satined'], + 'insteam': ['atenism', 'inmeats', 'insteam', 'samnite'], + 'instep': ['instep', 'spinet'], + 'instiller': ['instiller', 'reinstill'], + 'instructer': ['instructer', 'intercrust', 'reinstruct'], + 'instructional': ['instructional', 'nonaltruistic'], + 'insula': ['insula', 'lanius', 'lusian'], + 'insulant': ['insulant', 'sultanin'], + 'insulse': ['insulse', 'silenus'], + 'insult': ['insult', 'sunlit', 'unlist', 'unslit'], + 'insulter': ['insulter', 'lustrine', 'reinsult'], + 'insunk': ['insunk', 'unskin'], + 'insurable': ['insurable', 'sublinear'], + 'insurance': ['insurance', 'nuisancer'], + 'insurant': ['insurant', 'unstrain'], + 'insure': ['insure', 'rusine', 'ursine'], + 'insurge': ['insurge', 'resuing'], + 'insurgent': ['insurgent', 'unresting'], + 'intactile': ['catlinite', 'intactile'], + 'intaglio': ['intaglio', 'ligation'], + 'intake': ['intake', 'kentia'], + 'intaker': ['intaker', 'katrine', 'keratin'], + 'intarsia': ['antiaris', 'intarsia'], + 'intarsiate': ['intarsiate', 'nestiatria'], + 'integral': ['integral', 'teraglin', 'triangle'], + 'integralize': ['gelatinizer', 'integralize'], + 'integrate': ['argentite', 'integrate'], + 'integrative': ['integrative', 'vertiginate', 'vinaigrette'], + 'integrious': ['grisounite', 'grisoutine', 'integrious'], + 'intemperable': ['impenetrable', 'intemperable'], + 'intemperably': ['impenetrably', 'intemperably'], + 'intemperate': ['impenetrate', 'intemperate'], + 'intemporal': ['intemporal', 'trampoline'], + 'intend': ['dentin', 'indent', 'intend', 'tinned'], + 'intended': ['indented', 'intended'], + 'intendedly': ['indentedly', 'intendedly'], + 'intender': ['indenter', 'intender', 'reintend'], + 'intendment': ['indentment', 'intendment'], + 'intense': ['intense', 'sennite'], + 'intent': ['intent', 'tinnet'], + 'intently': ['intently', 'nitently'], + 'inter': ['inert', 'inter', 'niter', 'retin', 'trine'], + 'interactional': ['interactional', 'intercalation'], + 'interagent': ['entreating', 'interagent'], + 'interally': ['interally', 'reliantly'], + 'interastral': ['interastral', 'intertarsal'], + 'intercalation': ['interactional', 'intercalation'], + 'intercale': ['intercale', 'interlace', 'lacertine', 'reclinate'], + 'intercede': ['intercede', 'tridecene'], + 'interceder': ['crednerite', 'interceder'], + 'intercession': ['intercession', 'recensionist'], + 'intercome': ['entomeric', 'intercome', 'morencite'], + 'interconal': ['interconal', 'nonrecital'], + 'intercrust': ['instructer', 'intercrust', 'reinstruct'], + 'interdome': ['interdome', 'mordenite', 'nemertoid'], + 'intereat': ['intereat', 'tinetare'], + 'interest': ['insetter', 'interest', 'interset', 'sternite'], + 'interester': ['interester', 'reinterest'], + 'interfering': ['interfering', 'interfinger'], + 'interfinger': ['interfering', 'interfinger'], + 'intergrade': ['gradienter', 'intergrade'], + 'interim': ['interim', 'termini'], + 'interimistic': ['interimistic', 'trimesitinic'], + 'interlace': ['intercale', 'interlace', 'lacertine', 'reclinate'], + 'interlaced': ['credential', 'interlaced', 'reclinated'], + 'interlaid': ['deliriant', 'draintile', 'interlaid'], + 'interlap': ['interlap', 'repliant', 'triplane'], + 'interlapse': ['alpestrine', 'episternal', 'interlapse', 'presential'], + 'interlay': ['interlay', 'lyterian'], + 'interleaf': ['interleaf', 'reinflate'], + 'interleaver': ['interleaver', 'reverential'], + 'interlocal': ['citronella', 'interlocal'], + 'interlope': ['interlope', 'interpole', 'repletion', 'terpineol'], + 'interlot': ['interlot', 'trotline'], + 'intermat': ['intermat', 'martinet', 'tetramin'], + 'intermatch': ['intermatch', 'thermantic'], + 'intermine': ['intermine', 'nemertini', 'terminine'], + 'intermorainic': ['intermorainic', 'recrimination'], + 'intermutual': ['intermutual', 'ultraminute'], + 'intern': ['intern', 'tinner'], + 'internality': ['internality', 'itinerantly'], + 'internecive': ['internecive', 'reincentive'], + 'internee': ['internee', 'retinene'], + 'interoceptor': ['interoceptor', 'reprotection'], + 'interpause': ['interpause', 'resupinate'], + 'interpave': ['interpave', 'prenative'], + 'interpeal': ['interpeal', 'interplea'], + 'interpellate': ['interpellate', 'pantellerite'], + 'interpellation': ['interpellation', 'interpollinate'], + 'interphone': ['interphone', 'pinnothere'], + 'interplay': ['interplay', 'painterly'], + 'interplea': ['interpeal', 'interplea'], + 'interplead': ['interplead', 'peridental'], + 'interpolar': ['interpolar', 'reniportal'], + 'interpolate': ['interpolate', 'triantelope'], + 'interpole': ['interlope', 'interpole', 'repletion', 'terpineol'], + 'interpollinate': ['interpellation', 'interpollinate'], + 'interpone': ['interpone', 'peritenon', 'pinnotere', 'preintone'], + 'interposal': ['interposal', 'psalterion'], + 'interposure': ['interposure', 'neuropteris'], + 'interpreter': ['interpreter', 'reinterpret'], + 'interproduce': ['interproduce', 'prereduction'], + 'interroom': ['interroom', 'remontoir'], + 'interrupter': ['interrupter', 'reinterrupt'], + 'intersale': ['intersale', 'larsenite'], + 'intersectional': ['intersectional', 'intraselection'], + 'interset': ['insetter', 'interest', 'interset', 'sternite'], + 'intershade': ['dishearten', 'intershade'], + 'intersituate': ['intersituate', 'tenuistriate'], + 'intersocial': ['intersocial', 'orleanistic', 'sclerotinia'], + 'interspace': ['esperantic', 'interspace'], + 'interspecific': ['interspecific', 'prescientific'], + 'interspiration': ['interspiration', 'repristination'], + 'intersporal': ['intersporal', 'tripersonal'], + 'interstation': ['interstation', 'strontianite'], + 'intertalk': ['intertalk', 'latterkin'], + 'intertarsal': ['interastral', 'intertarsal'], + 'interteam': ['antimeter', 'attermine', 'interteam', 'terminate', 'tetramine'], + 'intertie': ['intertie', 'retinite'], + 'intertone': ['intertone', 'retention'], + 'intervascular': ['intervascular', 'vernacularist'], + 'intervention': ['intervention', 'introvenient'], + 'interverbal': ['interverbal', 'invertebral'], + 'interviewer': ['interviewer', 'reinterview'], + 'interwed': ['interwed', 'wintered'], + 'interwish': ['interwish', 'winterish'], + 'interwork': ['interwork', 'tinworker'], + 'interwove': ['interwove', 'overtwine'], + 'intestate': ['enstatite', 'intestate', 'satinette'], + 'intestinovesical': ['intestinovesical', 'vesicointestinal'], + 'inthrong': ['inthrong', 'northing'], + 'intima': ['intima', 'timani'], + 'intimacy': ['imitancy', 'intimacy', 'minacity'], + 'intimater': ['intimater', 'traintime'], + 'into': ['into', 'nito', 'oint', 'tino'], + 'intoed': ['ditone', 'intoed'], + 'intolerance': ['crenelation', 'intolerance'], + 'intolerating': ['intolerating', 'nitrogelatin'], + 'intonate': ['intonate', 'totanine'], + 'intonator': ['intonator', 'tortonian'], + 'intone': ['intone', 'tenino'], + 'intonement': ['intonement', 'omnitenent'], + 'intoner': ['intoner', 'ternion'], + 'intort': ['intort', 'tornit', 'triton'], + 'intoxicate': ['excitation', 'intoxicate'], + 'intracoelomic': ['iconometrical', 'intracoelomic'], + 'intracosmic': ['intracosmic', 'narcoticism'], + 'intracostal': ['intracostal', 'stratonical'], + 'intractile': ['intractile', 'triclinate'], + 'intrada': ['intrada', 'radiant'], + 'intraselection': ['intersectional', 'intraselection'], + 'intraseptal': ['intraseptal', 'paternalist', 'prenatalist'], + 'intraspinal': ['intraspinal', 'pinnitarsal'], + 'intreat': ['intreat', 'iterant', 'nitrate', 'tertian'], + 'intrencher': ['intrencher', 'reintrench'], + 'intricate': ['intricate', 'triactine'], + 'intrication': ['citrination', 'intrication'], + 'intrigue': ['intrigue', 'tigurine'], + 'introconvertibility': ['incontrovertibility', 'introconvertibility'], + 'introconvertible': ['incontrovertible', 'introconvertible'], + 'introduce': ['introduce', 'reduction'], + 'introit': ['introit', 'nitriot'], + 'introitus': ['introitus', 'routinist'], + 'introvenient': ['intervention', 'introvenient'], + 'intrude': ['intrude', 'turdine', 'untired', 'untried'], + 'intruse': ['intruse', 'sturine'], + 'intrust': ['intrust', 'sturtin'], + 'intube': ['butein', 'butine', 'intube'], + 'intue': ['intue', 'unite', 'untie'], + 'inula': ['inula', 'luian', 'uinal'], + 'inurbane': ['eburnian', 'inurbane'], + 'inure': ['inure', 'urine'], + 'inured': ['diurne', 'inured', 'ruined', 'unride'], + 'inurn': ['inrun', 'inurn'], + 'inustion': ['inustion', 'unionist'], + 'invader': ['invader', 'ravined', 'viander'], + 'invaluable': ['invaluable', 'unvailable'], + 'invar': ['invar', 'ravin', 'vanir'], + 'invector': ['contrive', 'invector'], + 'inveigler': ['inveigler', 'relieving'], + 'inventer': ['inventer', 'reinvent', 'ventrine', 'vintener'], + 'inventress': ['inventress', 'vintneress'], + 'inverness': ['inverness', 'nerviness'], + 'inversatile': ['inversatile', 'serviential'], + 'inverse': ['inverse', 'versine'], + 'invert': ['invert', 'virent'], + 'invertase': ['invertase', 'servetian'], + 'invertebral': ['interverbal', 'invertebral'], + 'inverter': ['inverter', 'reinvert', 'trinerve'], + 'investigation': ['investigation', 'tenovaginitis'], + 'invinate': ['innative', 'invinate'], + 'inviter': ['inviter', 'vitrine'], + 'invocate': ['conative', 'invocate'], + 'invoker': ['invoker', 'overink'], + 'involucrate': ['countervail', 'involucrate'], + 'involucre': ['involucre', 'volucrine'], + 'inwards': ['inwards', 'sinward'], + 'inwith': ['inwith', 'within'], + 'iodate': ['idotea', 'iodate', 'otidae'], + 'iodhydrate': ['hydriodate', 'iodhydrate'], + 'iodhydric': ['hydriodic', 'iodhydric'], + 'iodinate': ['ideation', 'iodinate', 'taenioid'], + 'iodinium': ['iodinium', 'ionidium'], + 'iodism': ['idoism', 'iodism'], + 'iodite': ['iodite', 'teioid'], + 'iodo': ['iodo', 'ooid'], + 'iodocasein': ['iodocasein', 'oniscoidea'], + 'iodochloride': ['chloroiodide', 'iodochloride'], + 'iodohydric': ['hydroiodic', 'iodohydric'], + 'iodol': ['dooli', 'iodol'], + 'iodothyrin': ['iodothyrin', 'thyroiodin'], + 'iodous': ['iodous', 'odious'], + 'ion': ['ino', 'ion'], + 'ionidium': ['iodinium', 'ionidium'], + 'ionizer': ['ionizer', 'ironize'], + 'iota': ['iota', 'tiao'], + 'iotacist': ['iotacist', 'taoistic'], + 'ipecac': ['icecap', 'ipecac'], + 'ipil': ['ipil', 'pili'], + 'ipseand': ['ipseand', 'panside', 'pansied'], + 'ira': ['air', 'ira', 'ria'], + 'iracund': ['candiru', 'iracund'], + 'irade': ['aider', 'deair', 'irade', 'redia'], + 'iran': ['arni', 'iran', 'nair', 'rain', 'rani'], + 'irani': ['irani', 'irian'], + 'iranism': ['iranism', 'sirmian'], + 'iranist': ['iranist', 'istrian'], + 'irascent': ['canister', 'cestrian', 'cisterna', 'irascent'], + 'irate': ['arite', 'artie', 'irate', 'retia', 'tarie'], + 'irately': ['irately', 'reality'], + 'ire': ['ire', 'rie'], + 'irena': ['erian', 'irena', 'reina'], + 'irene': ['ernie', 'ierne', 'irene'], + 'irenic': ['irenic', 'ricine'], + 'irenics': ['irenics', 'resinic', 'sericin', 'sirenic'], + 'irenicum': ['irenicum', 'muricine'], + 'iresine': ['iresine', 'iserine'], + 'irfan': ['infra', 'irfan'], + 'irgun': ['irgun', 'ruing', 'unrig'], + 'irian': ['irani', 'irian'], + 'iridal': ['iridal', 'lariid'], + 'iridate': ['arietid', 'iridate'], + 'iridectomy': ['iridectomy', 'mediocrity'], + 'irides': ['irides', 'irised'], + 'iridescent': ['indiscreet', 'indiscrete', 'iridescent'], + 'iridescently': ['indiscreetly', 'indiscretely', 'iridescently'], + 'iridosmium': ['iridosmium', 'osmiridium'], + 'irised': ['irides', 'irised'], + 'irish': ['irish', 'rishi', 'sirih'], + 'irk': ['irk', 'rik'], + 'irma': ['amir', 'irma', 'mari', 'mira', 'rami', 'rima'], + 'iroha': ['haori', 'iroha'], + 'irok': ['irok', 'kori'], + 'iron': ['inro', 'iron', 'noir', 'nori'], + 'ironclad': ['ironclad', 'rolandic'], + 'irone': ['irone', 'norie'], + 'ironhead': ['herodian', 'ironhead'], + 'ironice': ['ironice', 'oneiric'], + 'ironize': ['ionizer', 'ironize'], + 'ironshod': ['dishonor', 'ironshod'], + 'ironside': ['derision', 'ironside', 'resinoid', 'sirenoid'], + 'irradiant': ['irradiant', 'triandria'], + 'irrationable': ['irrationable', 'orbitelarian'], + 'irredenta': ['irredenta', 'retainder'], + 'irrelate': ['irrelate', 'retailer'], + 'irrepentance': ['irrepentance', 'pretercanine'], + 'irving': ['irving', 'riving', 'virgin'], + 'irvingiana': ['irvingiana', 'viraginian'], + 'is': ['is', 'si'], + 'isabel': ['isabel', 'lesbia'], + 'isabella': ['isabella', 'sailable'], + 'isagogical': ['isagogical', 'sialagogic'], + 'isagon': ['gosain', 'isagon', 'sagoin'], + 'isander': ['andries', 'isander', 'sardine'], + 'isanthous': ['anhistous', 'isanthous'], + 'isatate': ['isatate', 'satiate', 'taetsia'], + 'isatic': ['isatic', 'saitic'], + 'isatin': ['antisi', 'isatin'], + 'isatinic': ['isatinic', 'sinaitic'], + 'isaurian': ['anisuria', 'isaurian'], + 'isawa': ['isawa', 'waasi'], + 'isba': ['absi', 'bais', 'bias', 'isba'], + 'iscariot': ['aoristic', 'iscariot'], + 'ischemia': ['hemiasci', 'ischemia'], + 'ischioiliac': ['ilioischiac', 'ischioiliac'], + 'ischiorectal': ['ischiorectal', 'sciotherical'], + 'iserine': ['iresine', 'iserine'], + 'iseum': ['iseum', 'musie'], + 'isiac': ['ascii', 'isiac'], + 'isidore': ['isidore', 'osiride'], + 'isis': ['isis', 'sisi'], + 'islam': ['islam', 'ismal', 'simal'], + 'islamic': ['islamic', 'laicism', 'silicam'], + 'islamitic': ['islamitic', 'italicism'], + 'islandy': ['islandy', 'lindsay'], + 'islay': ['islay', 'saily'], + 'isle': ['isle', 'lise', 'sile'], + 'islet': ['islet', 'istle', 'slite', 'stile'], + 'isleta': ['isleta', 'litsea', 'salite', 'stelai'], + 'isleted': ['idleset', 'isleted'], + 'ism': ['ism', 'sim'], + 'ismal': ['islam', 'ismal', 'simal'], + 'ismatic': ['ismatic', 'itacism'], + 'ismatical': ['ismatical', 'lamaistic'], + 'isocamphor': ['chromopsia', 'isocamphor'], + 'isoclinal': ['collinsia', 'isoclinal'], + 'isocline': ['isocline', 'silicone'], + 'isocoumarin': ['acrimonious', 'isocoumarin'], + 'isodulcite': ['isodulcite', 'solicitude'], + 'isogen': ['geison', 'isogen'], + 'isogeotherm': ['geoisotherm', 'isogeotherm'], + 'isogon': ['isogon', 'songoi'], + 'isogram': ['isogram', 'orgiasm'], + 'isohel': ['helios', 'isohel'], + 'isoheptane': ['apothesine', 'isoheptane'], + 'isolate': ['aeolist', 'isolate'], + 'isolated': ['diastole', 'isolated', 'sodalite', 'solidate'], + 'isolative': ['isolative', 'soliative'], + 'isolde': ['isolde', 'soiled'], + 'isomer': ['isomer', 'rimose'], + 'isometric': ['eroticism', 'isometric', 'meroistic', 'trioecism'], + 'isomorph': ['isomorph', 'moorship'], + 'isonitrile': ['isonitrile', 'resilition'], + 'isonym': ['isonym', 'myosin', 'simony'], + 'isophthalyl': ['isophthalyl', 'lithophysal'], + 'isopodan': ['anisopod', 'isopodan'], + 'isoptera': ['isoptera', 'septoria'], + 'isosaccharic': ['isosaccharic', 'sacroischiac'], + 'isostere': ['erotesis', 'isostere'], + 'isotac': ['isotac', 'scotia'], + 'isotheral': ['horsetail', 'isotheral'], + 'isotherm': ['homerist', 'isotherm', 'otherism', 'theorism'], + 'isotria': ['isotria', 'oaritis'], + 'isotron': ['isotron', 'torsion'], + 'isotrope': ['isotrope', 'portoise'], + 'isotropism': ['isotropism', 'promitosis'], + 'isotropy': ['isotropy', 'porosity'], + 'israel': ['israel', 'relais', 'resail', 'sailer', 'serail', 'serial'], + 'israeli': ['alisier', 'israeli'], + 'israelite': ['israelite', 'resiliate'], + 'issuable': ['basileus', 'issuable', 'suasible'], + 'issuant': ['issuant', 'sustain'], + 'issue': ['issue', 'susie'], + 'issuer': ['issuer', 'uresis'], + 'ist': ['ist', 'its', 'sit'], + 'isthmi': ['isthmi', 'timish'], + 'isthmian': ['isthmian', 'smithian'], + 'isthmoid': ['isthmoid', 'thomisid'], + 'istle': ['islet', 'istle', 'slite', 'stile'], + 'istrian': ['iranist', 'istrian'], + 'isuret': ['isuret', 'resuit'], + 'it': ['it', 'ti'], + 'ita': ['ait', 'ati', 'ita', 'tai'], + 'itacism': ['ismatic', 'itacism'], + 'itaconate': ['acetation', 'itaconate'], + 'itaconic': ['aconitic', 'cationic', 'itaconic'], + 'itali': ['itali', 'tilia'], + 'italian': ['antilia', 'italian'], + 'italic': ['clitia', 'italic'], + 'italicism': ['islamitic', 'italicism'], + 'italite': ['italite', 'letitia', 'tilaite'], + 'italon': ['italon', 'lation', 'talion'], + 'itaves': ['itaves', 'stevia'], + 'itch': ['chit', 'itch', 'tchi'], + 'item': ['emit', 'item', 'mite', 'time'], + 'iten': ['iten', 'neti', 'tien', 'tine'], + 'itenean': ['aniente', 'itenean'], + 'iter': ['iter', 'reit', 'rite', 'teri', 'tier', 'tire'], + 'iterable': ['iterable', 'liberate'], + 'iterance': ['aneretic', 'centiare', 'creatine', 'increate', 'iterance'], + 'iterant': ['intreat', 'iterant', 'nitrate', 'tertian'], + 'ithaca': ['cahita', 'ithaca'], + 'ithacan': ['ithacan', 'tachina'], + 'ither': ['ither', 'their'], + 'itinerant': ['itinerant', 'nitratine'], + 'itinerantly': ['internality', 'itinerantly'], + 'itmo': ['itmo', 'moit', 'omit', 'timo'], + 'ito': ['ito', 'toi'], + 'itoism': ['itoism', 'omitis'], + 'itoist': ['itoist', 'otitis'], + 'itoland': ['itoland', 'talonid', 'tindalo'], + 'itonama': ['amniota', 'itonama'], + 'itonia': ['aition', 'itonia'], + 'its': ['ist', 'its', 'sit'], + 'itself': ['itself', 'stifle'], + 'ituraean': ['inaurate', 'ituraean'], + 'itza': ['itza', 'tiza', 'zati'], + 'iva': ['iva', 'vai', 'via'], + 'ivan': ['ivan', 'vain', 'vina'], + 'ivorist': ['ivorist', 'visitor'], + 'iwaiwa': ['iwaiwa', 'waiwai'], + 'ixiama': ['amixia', 'ixiama'], + 'ixodic': ['ixodic', 'oxidic'], + 'iyo': ['iyo', 'yoi'], + 'izar': ['izar', 'zira'], + 'jacami': ['jacami', 'jicama'], + 'jacobian': ['bajocian', 'jacobian'], + 'jag': ['gaj', 'jag'], + 'jagir': ['jagir', 'jirga'], + 'jagua': ['ajuga', 'jagua'], + 'jail': ['jail', 'lija'], + 'jailer': ['jailer', 'rejail'], + 'jaime': ['jaime', 'jamie'], + 'jain': ['jain', 'jina'], + 'jaina': ['inaja', 'jaina'], + 'jalouse': ['jalouse', 'jealous'], + 'jama': ['jama', 'maja'], + 'jamesian': ['jamesian', 'jamesina'], + 'jamesina': ['jamesian', 'jamesina'], + 'jami': ['ijma', 'jami'], + 'jamie': ['jaime', 'jamie'], + 'jane': ['jane', 'jean'], + 'janos': ['janos', 'jason', 'jonas', 'sonja'], + 'jantu': ['jantu', 'jaunt', 'junta'], + 'januslike': ['januslike', 'seljukian'], + 'japonism': ['japonism', 'pajonism'], + 'jar': ['jar', 'raj'], + 'jara': ['ajar', 'jara', 'raja'], + 'jarmo': ['jarmo', 'major'], + 'jarnut': ['jarnut', 'jurant'], + 'jason': ['janos', 'jason', 'jonas', 'sonja'], + 'jat': ['jat', 'taj'], + 'jatki': ['jatki', 'tajik'], + 'jato': ['jato', 'jota'], + 'jaun': ['jaun', 'juan'], + 'jaunt': ['jantu', 'jaunt', 'junta'], + 'jaup': ['jaup', 'puja'], + 'jealous': ['jalouse', 'jealous'], + 'jean': ['jane', 'jean'], + 'jebusitical': ['jebusitical', 'justiciable'], + 'jecoral': ['cajoler', 'jecoral'], + 'jeffery': ['jeffery', 'jeffrey'], + 'jeffrey': ['jeffery', 'jeffrey'], + 'jejunoduodenal': ['duodenojejunal', 'jejunoduodenal'], + 'jenine': ['jenine', 'jennie'], + 'jennie': ['jenine', 'jennie'], + 'jerker': ['jerker', 'rejerk'], + 'jerkin': ['jerkin', 'jinker'], + 'jeziah': ['hejazi', 'jeziah'], + 'jicama': ['jacami', 'jicama'], + 'jihad': ['hadji', 'jihad'], + 'jina': ['jain', 'jina'], + 'jingoist': ['jingoist', 'joisting'], + 'jinker': ['jerkin', 'jinker'], + 'jirga': ['jagir', 'jirga'], + 'jobo': ['bojo', 'jobo'], + 'johan': ['johan', 'jonah'], + 'join': ['join', 'joni'], + 'joinant': ['joinant', 'jotnian'], + 'joiner': ['joiner', 'rejoin'], + 'jointless': ['jointless', 'joltiness'], + 'joisting': ['jingoist', 'joisting'], + 'jolter': ['jolter', 'rejolt'], + 'joltiness': ['jointless', 'joltiness'], + 'jonah': ['johan', 'jonah'], + 'jonas': ['janos', 'jason', 'jonas', 'sonja'], + 'joni': ['join', 'joni'], + 'joom': ['joom', 'mojo'], + 'joshi': ['joshi', 'shoji'], + 'jota': ['jato', 'jota'], + 'jotnian': ['joinant', 'jotnian'], + 'journeyer': ['journeyer', 'rejourney'], + 'joust': ['joust', 'justo'], + 'juan': ['jaun', 'juan'], + 'judaic': ['judaic', 'judica'], + 'judica': ['judaic', 'judica'], + 'jujitsu': ['jujitsu', 'jujuist'], + 'jujuist': ['jujitsu', 'jujuist'], + 'junta': ['jantu', 'jaunt', 'junta'], + 'jurant': ['jarnut', 'jurant'], + 'justiciable': ['jebusitical', 'justiciable'], + 'justo': ['joust', 'justo'], + 'jute': ['jute', 'teju'], + 'ka': ['ak', 'ka'], + 'kabel': ['blake', 'bleak', 'kabel'], + 'kaberu': ['kaberu', 'kubera'], + 'kabuli': ['kabuli', 'kiluba'], + 'kabyle': ['bleaky', 'kabyle'], + 'kachari': ['chakari', 'chikara', 'kachari'], + 'kachin': ['hackin', 'kachin'], + 'kafir': ['fakir', 'fraik', 'kafir', 'rafik'], + 'kaha': ['akha', 'kaha'], + 'kahar': ['harka', 'kahar'], + 'kahu': ['haku', 'kahu'], + 'kaid': ['dika', 'kaid'], + 'kaik': ['kaik', 'kaki'], + 'kail': ['ilka', 'kail', 'kali'], + 'kainga': ['kainga', 'kanagi'], + 'kaiwi': ['kaiwi', 'kiwai'], + 'kaka': ['akka', 'kaka'], + 'kaki': ['kaik', 'kaki'], + 'kala': ['akal', 'kala'], + 'kalamian': ['kalamian', 'malikana'], + 'kaldani': ['danakil', 'dankali', 'kaldani', 'ladakin'], + 'kale': ['kale', 'lake', 'leak'], + 'kali': ['ilka', 'kail', 'kali'], + 'kalo': ['kalo', 'kola', 'loka'], + 'kamansi': ['kamansi', 'kamasin'], + 'kamares': ['kamares', 'seamark'], + 'kamasin': ['kamansi', 'kamasin'], + 'kame': ['kame', 'make', 'meak'], + 'kamel': ['kamel', 'kemal'], + 'kamiya': ['kamiya', 'yakima'], + 'kan': ['kan', 'nak'], + 'kana': ['akan', 'kana'], + 'kanagi': ['kainga', 'kanagi'], + 'kanap': ['kanap', 'panak'], + 'kanat': ['kanat', 'tanak', 'tanka'], + 'kande': ['kande', 'knead', 'naked'], + 'kang': ['kang', 'knag'], + 'kanga': ['angka', 'kanga'], + 'kangani': ['kangani', 'kiangan'], + 'kangli': ['kangli', 'laking'], + 'kanred': ['darken', 'kanred', 'ranked'], + 'kans': ['kans', 'sank'], + 'kaolin': ['ankoli', 'kaolin'], + 'karch': ['chark', 'karch'], + 'karel': ['karel', 'laker'], + 'karen': ['anker', 'karen', 'naker'], + 'kari': ['ikra', 'kari', 'raki'], + 'karite': ['arkite', 'karite'], + 'karl': ['karl', 'kral', 'lark'], + 'karling': ['karling', 'larking'], + 'karma': ['karma', 'krama', 'marka'], + 'karo': ['karo', 'kora', 'okra', 'roka'], + 'karree': ['karree', 'rerake'], + 'karst': ['karst', 'skart', 'stark'], + 'karstenite': ['karstenite', 'kersantite'], + 'kartel': ['kartel', 'retalk', 'talker'], + 'kasa': ['asak', 'kasa', 'saka'], + 'kasbah': ['abkhas', 'kasbah'], + 'kasha': ['kasha', 'khasa', 'sakha', 'shaka'], + 'kashan': ['kashan', 'sankha'], + 'kasher': ['kasher', 'shaker'], + 'kashi': ['kashi', 'khasi'], + 'kasm': ['kasm', 'mask'], + 'katar': ['katar', 'takar'], + 'kate': ['kate', 'keta', 'take', 'teak'], + 'kath': ['kath', 'khat'], + 'katharsis': ['katharsis', 'shastraik'], + 'katie': ['katie', 'keita'], + 'katik': ['katik', 'tikka'], + 'katrine': ['intaker', 'katrine', 'keratin'], + 'katy': ['katy', 'kyat', 'taky'], + 'kavass': ['kavass', 'vakass'], + 'kavi': ['kavi', 'kiva'], + 'kay': ['kay', 'yak'], + 'kayak': ['kayak', 'yakka'], + 'kayan': ['kayan', 'yakan'], + 'kayo': ['kayo', 'oaky'], + 'kea': ['ake', 'kea'], + 'keach': ['cheka', 'keach'], + 'keawe': ['aweek', 'keawe'], + 'kechel': ['heckle', 'kechel'], + 'kedar': ['daker', 'drake', 'kedar', 'radek'], + 'kee': ['eke', 'kee'], + 'keech': ['cheek', 'cheke', 'keech'], + 'keel': ['keel', 'kele', 'leek'], + 'keen': ['keen', 'knee'], + 'keena': ['aknee', 'ankee', 'keena'], + 'keep': ['keep', 'peek'], + 'keepership': ['keepership', 'shipkeeper'], + 'kees': ['kees', 'seek', 'skee'], + 'keest': ['keest', 'skeet', 'skete', 'steek'], + 'kefir': ['frike', 'kefir'], + 'keid': ['dike', 'keid'], + 'keita': ['katie', 'keita'], + 'keith': ['keith', 'kithe'], + 'keitloa': ['keitloa', 'oatlike'], + 'kelchin': ['chinkle', 'kelchin'], + 'kele': ['keel', 'kele', 'leek'], + 'kelima': ['kelima', 'mikael'], + 'kelpie': ['kelpie', 'pelike'], + 'kelty': ['kelty', 'ketyl'], + 'kemal': ['kamel', 'kemal'], + 'kemalist': ['kemalist', 'mastlike'], + 'kenareh': ['hearken', 'kenareh'], + 'kennel': ['kennel', 'nelken'], + 'kenotic': ['kenotic', 'ketonic'], + 'kent': ['kent', 'knet'], + 'kentia': ['intake', 'kentia'], + 'kenton': ['kenton', 'nekton'], + 'kepi': ['kepi', 'kipe', 'pike'], + 'keralite': ['keralite', 'tearlike'], + 'kerasin': ['kerasin', 'sarkine'], + 'kerat': ['kerat', 'taker'], + 'keratin': ['intaker', 'katrine', 'keratin'], + 'keratoangioma': ['angiokeratoma', 'keratoangioma'], + 'keratosis': ['asterikos', 'keratosis'], + 'keres': ['esker', 'keres', 'reesk', 'seker', 'skeer', 'skere'], + 'keresan': ['keresan', 'sneaker'], + 'kerewa': ['kerewa', 'rewake'], + 'kerf': ['ferk', 'kerf'], + 'kern': ['kern', 'renk'], + 'kersantite': ['karstenite', 'kersantite'], + 'kersey': ['kersey', 'skeery'], + 'kestrel': ['kestrel', 'skelter'], + 'keta': ['kate', 'keta', 'take', 'teak'], + 'ketene': ['ektene', 'ketene'], + 'keto': ['keto', 'oket', 'toke'], + 'ketol': ['ketol', 'loket'], + 'ketonic': ['kenotic', 'ketonic'], + 'ketu': ['ketu', 'teuk', 'tuke'], + 'ketupa': ['ketupa', 'uptake'], + 'ketyl': ['kelty', 'ketyl'], + 'keup': ['keup', 'puke'], + 'keuper': ['keuper', 'peruke'], + 'kevan': ['kevan', 'knave'], + 'kha': ['hak', 'kha'], + 'khami': ['hakim', 'khami'], + 'khan': ['ankh', 'hank', 'khan'], + 'khar': ['hark', 'khar', 'rakh'], + 'khasa': ['kasha', 'khasa', 'sakha', 'shaka'], + 'khasi': ['kashi', 'khasi'], + 'khat': ['kath', 'khat'], + 'khatib': ['bhakti', 'khatib'], + 'khila': ['khila', 'kilah'], + 'khu': ['huk', 'khu'], + 'khula': ['khula', 'kulah'], + 'kiangan': ['kangani', 'kiangan'], + 'kibe': ['bike', 'kibe'], + 'kicker': ['kicker', 'rekick'], + 'kickout': ['kickout', 'outkick'], + 'kidney': ['dinkey', 'kidney'], + 'kids': ['disk', 'kids', 'skid'], + 'kiel': ['kiel', 'like'], + 'kier': ['erik', 'kier', 'reki'], + 'kiku': ['kiku', 'kuki'], + 'kikumon': ['kikumon', 'kokumin'], + 'kil': ['ilk', 'kil'], + 'kilah': ['khila', 'kilah'], + 'kiliare': ['airlike', 'kiliare'], + 'killcalf': ['calfkill', 'killcalf'], + 'killer': ['killer', 'rekill'], + 'kiln': ['kiln', 'link'], + 'kilnman': ['kilnman', 'linkman'], + 'kilo': ['kilo', 'koil', 'koli'], + 'kilp': ['kilp', 'klip'], + 'kilter': ['kilter', 'kirtle'], + 'kilting': ['kilting', 'kitling'], + 'kiluba': ['kabuli', 'kiluba'], + 'kimberlite': ['kimberlite', 'timberlike'], + 'kimnel': ['kimnel', 'milken'], + 'kin': ['ink', 'kin'], + 'kina': ['akin', 'kina', 'naik'], + 'kinase': ['kinase', 'sekani'], + 'kinch': ['chink', 'kinch'], + 'kind': ['dink', 'kind'], + 'kindle': ['kindle', 'linked'], + 'kinetomer': ['kinetomer', 'konimeter'], + 'king': ['gink', 'king'], + 'kingcob': ['bocking', 'kingcob'], + 'kingpin': ['kingpin', 'pinking'], + 'kingrow': ['kingrow', 'working'], + 'kinless': ['inkless', 'kinless'], + 'kinship': ['kinship', 'pinkish'], + 'kioko': ['kioko', 'kokio'], + 'kip': ['kip', 'pik'], + 'kipe': ['kepi', 'kipe', 'pike'], + 'kirk': ['kirk', 'rikk'], + 'kirktown': ['kirktown', 'knitwork'], + 'kirn': ['kirn', 'rink'], + 'kirsten': ['kirsten', 'kristen', 'stinker'], + 'kirsty': ['kirsty', 'skirty'], + 'kirtle': ['kilter', 'kirtle'], + 'kirve': ['kirve', 'kiver'], + 'kish': ['kish', 'shik', 'sikh'], + 'kishen': ['kishen', 'neskhi'], + 'kisra': ['kisra', 'sikar', 'skair'], + 'kissar': ['kissar', 'krasis'], + 'kisser': ['kisser', 'rekiss'], + 'kist': ['kist', 'skit'], + 'kistful': ['kistful', 'lutfisk'], + 'kitab': ['batik', 'kitab'], + 'kitan': ['kitan', 'takin'], + 'kitar': ['kitar', 'krait', 'rakit', 'traik'], + 'kitchen': ['kitchen', 'thicken'], + 'kitchener': ['kitchener', 'rethicken', 'thickener'], + 'kithe': ['keith', 'kithe'], + 'kitling': ['kilting', 'kitling'], + 'kitlope': ['kitlope', 'potlike', 'toplike'], + 'kittel': ['kittel', 'kittle'], + 'kittle': ['kittel', 'kittle'], + 'kittles': ['kittles', 'skittle'], + 'kiva': ['kavi', 'kiva'], + 'kiver': ['kirve', 'kiver'], + 'kiwai': ['kaiwi', 'kiwai'], + 'klan': ['klan', 'lank'], + 'klanism': ['klanism', 'silkman'], + 'klaus': ['klaus', 'lukas', 'sulka'], + 'kleistian': ['kleistian', 'saintlike', 'satinlike'], + 'klendusic': ['klendusic', 'unsickled'], + 'kling': ['glink', 'kling'], + 'klip': ['kilp', 'klip'], + 'klop': ['klop', 'polk'], + 'knab': ['bank', 'knab', 'nabk'], + 'knag': ['kang', 'knag'], + 'knap': ['knap', 'pank'], + 'knape': ['knape', 'pekan'], + 'knar': ['knar', 'kran', 'nark', 'rank'], + 'knave': ['kevan', 'knave'], + 'knawel': ['knawel', 'wankle'], + 'knead': ['kande', 'knead', 'naked'], + 'knee': ['keen', 'knee'], + 'knet': ['kent', 'knet'], + 'knit': ['knit', 'tink'], + 'knitter': ['knitter', 'trinket'], + 'knitwork': ['kirktown', 'knitwork'], + 'knob': ['bonk', 'knob'], + 'knot': ['knot', 'tonk'], + 'knottiness': ['knottiness', 'stinkstone'], + 'knower': ['knower', 'reknow', 'wroken'], + 'knub': ['bunk', 'knub'], + 'knurly': ['knurly', 'runkly'], + 'knut': ['knut', 'tunk'], + 'knute': ['knute', 'unket'], + 'ko': ['ko', 'ok'], + 'koa': ['ako', 'koa', 'oak', 'oka'], + 'koali': ['koali', 'koila'], + 'kobu': ['bouk', 'kobu'], + 'koch': ['hock', 'koch'], + 'kochia': ['choiak', 'kochia'], + 'koel': ['koel', 'loke'], + 'koi': ['koi', 'oki'], + 'koil': ['kilo', 'koil', 'koli'], + 'koila': ['koali', 'koila'], + 'koilon': ['inlook', 'koilon'], + 'kokan': ['kokan', 'konak'], + 'kokio': ['kioko', 'kokio'], + 'kokumin': ['kikumon', 'kokumin'], + 'kola': ['kalo', 'kola', 'loka'], + 'koli': ['kilo', 'koil', 'koli'], + 'kolo': ['kolo', 'look'], + 'kome': ['kome', 'moke'], + 'komi': ['komi', 'moki'], + 'kona': ['kona', 'nako'], + 'konak': ['kokan', 'konak'], + 'kongo': ['kongo', 'ngoko'], + 'kongoni': ['kongoni', 'nooking'], + 'konia': ['ikona', 'konia'], + 'konimeter': ['kinetomer', 'konimeter'], + 'kor': ['kor', 'rok'], + 'kora': ['karo', 'kora', 'okra', 'roka'], + 'korait': ['korait', 'troika'], + 'koran': ['koran', 'krona'], + 'korana': ['anorak', 'korana'], + 'kore': ['kore', 'roke'], + 'korec': ['coker', 'corke', 'korec'], + 'korero': ['korero', 'rooker'], + 'kori': ['irok', 'kori'], + 'korimako': ['korimako', 'koromika'], + 'koromika': ['korimako', 'koromika'], + 'korwa': ['awork', 'korwa'], + 'kory': ['kory', 'roky', 'york'], + 'kos': ['kos', 'sok'], + 'koso': ['koso', 'skoo', 'sook'], + 'kotar': ['kotar', 'tarok'], + 'koto': ['koto', 'toko', 'took'], + 'kra': ['ark', 'kra'], + 'krait': ['kitar', 'krait', 'rakit', 'traik'], + 'kraken': ['kraken', 'nekkar'], + 'kral': ['karl', 'kral', 'lark'], + 'krama': ['karma', 'krama', 'marka'], + 'kran': ['knar', 'kran', 'nark', 'rank'], + 'kras': ['askr', 'kras', 'sark'], + 'krasis': ['kissar', 'krasis'], + 'kraut': ['kraut', 'tukra'], + 'kreis': ['kreis', 'skier'], + 'kreistle': ['kreistle', 'triskele'], + 'krepi': ['krepi', 'piker'], + 'krina': ['inkra', 'krina', 'nakir', 'rinka'], + 'kris': ['kris', 'risk'], + 'krishna': ['krishna', 'rankish'], + 'kristen': ['kirsten', 'kristen', 'stinker'], + 'krona': ['koran', 'krona'], + 'krone': ['ekron', 'krone'], + 'kroo': ['kroo', 'rook'], + 'krosa': ['krosa', 'oskar'], + 'kua': ['aku', 'auk', 'kua'], + 'kuar': ['kuar', 'raku', 'rauk'], + 'kuba': ['baku', 'kuba'], + 'kubera': ['kaberu', 'kubera'], + 'kuki': ['kiku', 'kuki'], + 'kulah': ['khula', 'kulah'], + 'kulimit': ['kulimit', 'tilikum'], + 'kulm': ['kulm', 'mulk'], + 'kuman': ['kuman', 'naumk'], + 'kumhar': ['kumhar', 'kumrah'], + 'kumrah': ['kumhar', 'kumrah'], + 'kunai': ['kunai', 'nikau'], + 'kuneste': ['kuneste', 'netsuke'], + 'kung': ['gunk', 'kung'], + 'kurmi': ['kurmi', 'mukri'], + 'kurt': ['kurt', 'turk'], + 'kurus': ['kurus', 'ursuk'], + 'kusa': ['kusa', 'skua'], + 'kusam': ['kusam', 'sumak'], + 'kusan': ['ankus', 'kusan'], + 'kusha': ['kusha', 'shaku', 'ushak'], + 'kutchin': ['kutchin', 'unthick'], + 'kutenai': ['kutenai', 'unakite'], + 'kyar': ['kyar', 'yark'], + 'kyat': ['katy', 'kyat', 'taky'], + 'kyle': ['kyle', 'yelk'], + 'kylo': ['kylo', 'yolk'], + 'kyte': ['kyte', 'tyke'], + 'la': ['al', 'la'], + 'laager': ['aglare', 'alegar', 'galera', 'laager'], + 'laang': ['laang', 'lagan', 'lagna'], + 'lab': ['alb', 'bal', 'lab'], + 'laban': ['alban', 'balan', 'banal', 'laban', 'nabal', 'nabla'], + 'labber': ['barbel', 'labber', 'rabble'], + 'labefact': ['factable', 'labefact'], + 'label': ['bella', 'label'], + 'labeler': ['labeler', 'relabel'], + 'labia': ['balai', 'labia'], + 'labial': ['abilla', 'labial'], + 'labially': ['alliably', 'labially'], + 'labiate': ['baalite', 'bialate', 'labiate'], + 'labiella': ['alliable', 'labiella'], + 'labile': ['alible', 'belial', 'labile', 'liable'], + 'labiocervical': ['cervicolabial', 'labiocervical'], + 'labiodental': ['dentolabial', 'labiodental'], + 'labioglossal': ['glossolabial', 'labioglossal'], + 'labioglossolaryngeal': ['glossolabiolaryngeal', 'labioglossolaryngeal'], + 'labioglossopharyngeal': ['glossolabiopharyngeal', 'labioglossopharyngeal'], + 'labiomental': ['labiomental', 'mentolabial'], + 'labionasal': ['labionasal', 'nasolabial'], + 'labiovelar': ['bialveolar', 'labiovelar'], + 'labis': ['basil', 'labis'], + 'labor': ['balor', 'bolar', 'boral', 'labor', 'lobar'], + 'laborant': ['balatron', 'laborant'], + 'laborism': ['laborism', 'mislabor'], + 'laborist': ['laborist', 'strobila'], + 'laborite': ['betailor', 'laborite', 'orbitale'], + 'labrador': ['labrador', 'larboard'], + 'labret': ['albert', 'balter', 'labret', 'tabler'], + 'labridae': ['labridae', 'radiable'], + 'labrose': ['borlase', 'labrose', 'rosabel'], + 'labrum': ['brumal', 'labrum', 'lumbar', 'umbral'], + 'labrus': ['bursal', 'labrus'], + 'laburnum': ['alburnum', 'laburnum'], + 'lac': ['cal', 'lac'], + 'lace': ['acle', 'alec', 'lace'], + 'laced': ['clead', 'decal', 'laced'], + 'laceman': ['laceman', 'manacle'], + 'lacepod': ['lacepod', 'pedocal', 'placode'], + 'lacer': ['ceral', 'clare', 'clear', 'lacer'], + 'lacerable': ['clearable', 'lacerable'], + 'lacerate': ['lacerate', 'lacertae'], + 'laceration': ['creational', 'crotalinae', 'laceration', 'reactional'], + 'lacerative': ['calaverite', 'lacerative'], + 'lacertae': ['lacerate', 'lacertae'], + 'lacertian': ['carnalite', 'claretian', 'lacertian', 'nectarial'], + 'lacertid': ['articled', 'lacertid'], + 'lacertidae': ['dilacerate', 'lacertidae'], + 'lacertiloid': ['illoricated', 'lacertiloid'], + 'lacertine': ['intercale', 'interlace', 'lacertine', 'reclinate'], + 'lacertoid': ['dialector', 'lacertoid'], + 'lacery': ['clayer', 'lacery'], + 'lacet': ['cleat', 'eclat', 'ectal', 'lacet', 'tecla'], + 'lache': ['chela', 'lache', 'leach'], + 'laches': ['cashel', 'laches', 'sealch'], + 'lachrymonasal': ['lachrymonasal', 'nasolachrymal'], + 'lachsa': ['calash', 'lachsa'], + 'laciness': ['laciness', 'sensical'], + 'lacing': ['anglic', 'lacing'], + 'lacinia': ['lacinia', 'licania'], + 'laciniated': ['acetanilid', 'laciniated', 'teniacidal'], + 'lacis': ['lacis', 'salic'], + 'lack': ['calk', 'lack'], + 'lacker': ['calker', 'lacker', 'rackle', 'recalk', 'reckla'], + 'lacmoid': ['domical', 'lacmoid'], + 'laconic': ['conical', 'laconic'], + 'laconica': ['canicola', 'laconica'], + 'laconizer': ['laconizer', 'locarnize'], + 'lacquer': ['claquer', 'lacquer'], + 'lacquerer': ['lacquerer', 'relacquer'], + 'lactarene': ['lactarene', 'nectareal'], + 'lactarious': ['alacritous', 'lactarious', 'lactosuria'], + 'lactarium': ['lactarium', 'matricula'], + 'lactarius': ['australic', 'lactarius'], + 'lacteal': ['catella', 'lacteal'], + 'lacteous': ['lacteous', 'osculate'], + 'lactide': ['citadel', 'deltaic', 'dialect', 'edictal', 'lactide'], + 'lactinate': ['cantalite', 'lactinate', 'tetanical'], + 'lacto': ['lacto', 'tlaco'], + 'lactoid': ['cotidal', 'lactoid', 'talcoid'], + 'lactoprotein': ['lactoprotein', 'protectional'], + 'lactose': ['alecost', 'lactose', 'scotale', 'talcose'], + 'lactoside': ['dislocate', 'lactoside'], + 'lactosuria': ['alacritous', 'lactarious', 'lactosuria'], + 'lacunal': ['calluna', 'lacunal'], + 'lacune': ['auncel', 'cuneal', 'lacune', 'launce', 'unlace'], + 'lacustral': ['claustral', 'lacustral'], + 'lacwork': ['lacwork', 'warlock'], + 'lacy': ['acyl', 'clay', 'lacy'], + 'lad': ['dal', 'lad'], + 'ladakin': ['danakil', 'dankali', 'kaldani', 'ladakin'], + 'ladanum': ['ladanum', 'udalman'], + 'ladder': ['ladder', 'raddle'], + 'laddery': ['dreadly', 'laddery'], + 'laddie': ['daidle', 'laddie'], + 'lade': ['dale', 'deal', 'lade', 'lead', 'leda'], + 'lademan': ['daleman', 'lademan', 'leadman'], + 'laden': ['eland', 'laden', 'lenad'], + 'lader': ['alder', 'daler', 'lader'], + 'ladies': ['aisled', 'deasil', 'ladies', 'sailed'], + 'ladin': ['danli', 'ladin', 'linda', 'nidal'], + 'lading': ['angild', 'lading'], + 'ladino': ['dolina', 'ladino'], + 'ladle': ['dalle', 'della', 'ladle'], + 'ladrone': ['endoral', 'ladrone', 'leonard'], + 'ladyfy': ['dayfly', 'ladyfy'], + 'ladyish': ['ladyish', 'shadily'], + 'ladyling': ['dallying', 'ladyling'], + 'laet': ['atle', 'laet', 'late', 'leat', 'tael', 'tale', 'teal'], + 'laeti': ['alite', 'laeti'], + 'laetic': ['calite', 'laetic', 'tecali'], + 'lafite': ['fetial', 'filate', 'lafite', 'leafit'], + 'lag': ['gal', 'lag'], + 'lagan': ['laang', 'lagan', 'lagna'], + 'lagen': ['agnel', 'angel', 'angle', 'galen', 'genal', 'glean', 'lagen'], + 'lagena': ['alnage', 'angela', 'galena', 'lagena'], + 'lagend': ['angled', 'dangle', 'englad', 'lagend'], + 'lager': ['argel', 'ergal', 'garle', 'glare', 'lager', 'large', 'regal'], + 'lagetto': ['lagetto', 'tagetol'], + 'lagged': ['daggle', 'lagged'], + 'laggen': ['laggen', 'naggle'], + 'lagger': ['gargle', 'gregal', 'lagger', 'raggle'], + 'lagna': ['laang', 'lagan', 'lagna'], + 'lagniappe': ['appealing', 'lagniappe', 'panplegia'], + 'lagonite': ['gelation', 'lagonite', 'legation'], + 'lagunero': ['lagunero', 'organule', 'uroglena'], + 'lagurus': ['argulus', 'lagurus'], + 'lai': ['ail', 'ila', 'lai'], + 'laicism': ['islamic', 'laicism', 'silicam'], + 'laid': ['dail', 'dali', 'dial', 'laid', 'lida'], + 'lain': ['alin', 'anil', 'lain', 'lina', 'nail'], + 'laine': ['alien', 'aline', 'anile', 'elain', 'elian', 'laine', 'linea'], + 'laiose': ['aeolis', 'laiose'], + 'lair': ['aril', 'lair', 'lari', 'liar', 'lira', 'rail', 'rial'], + 'lairage': ['lairage', 'railage', 'regalia'], + 'laird': ['drail', 'laird', 'larid', 'liard'], + 'lairless': ['lairless', 'railless'], + 'lairman': ['lairman', 'laminar', 'malarin', 'railman'], + 'lairstone': ['lairstone', 'orleanist', 'serotinal'], + 'lairy': ['lairy', 'riyal'], + 'laitance': ['analcite', 'anticlea', 'laitance'], + 'laity': ['laity', 'taily'], + 'lak': ['alk', 'lak'], + 'lake': ['kale', 'lake', 'leak'], + 'lakeless': ['lakeless', 'leakless'], + 'laker': ['karel', 'laker'], + 'lakie': ['alike', 'lakie'], + 'laking': ['kangli', 'laking'], + 'lakish': ['lakish', 'shakil'], + 'lakota': ['atokal', 'lakota'], + 'laky': ['alky', 'laky'], + 'lalo': ['lalo', 'lola', 'olla'], + 'lalopathy': ['allopathy', 'lalopathy'], + 'lam': ['lam', 'mal'], + 'lama': ['alma', 'amla', 'lama', 'mala'], + 'lamaic': ['amical', 'camail', 'lamaic'], + 'lamaism': ['lamaism', 'miasmal'], + 'lamaist': ['lamaist', 'lamista'], + 'lamaistic': ['ismatical', 'lamaistic'], + 'lamanite': ['lamanite', 'laminate'], + 'lamany': ['amylan', 'lamany', 'layman'], + 'lamb': ['balm', 'lamb'], + 'lambaste': ['blastema', 'lambaste'], + 'lambent': ['beltman', 'lambent'], + 'lamber': ['ambler', 'blamer', 'lamber', 'marble', 'ramble'], + 'lambie': ['bemail', 'lambie'], + 'lambiness': ['balminess', 'lambiness'], + 'lamblike': ['balmlike', 'lamblike'], + 'lamby': ['balmy', 'lamby'], + 'lame': ['alem', 'alme', 'lame', 'leam', 'male', 'meal', 'mela'], + 'lamella': ['lamella', 'malella', 'malleal'], + 'lamellose': ['lamellose', 'semolella'], + 'lamely': ['lamely', 'mellay'], + 'lameness': ['lameness', 'maleness', 'maneless', 'nameless'], + 'lament': ['lament', 'manlet', 'mantel', 'mantle', 'mental'], + 'lamenter': ['lamenter', 'relament', 'remantle'], + 'lamenting': ['alignment', 'lamenting'], + 'lameter': ['lameter', 'metaler', 'remetal'], + 'lamia': ['alima', 'lamia'], + 'lamiger': ['gremial', 'lamiger'], + 'lamiides': ['idealism', 'lamiides'], + 'lamin': ['lamin', 'liman', 'milan'], + 'lamina': ['almain', 'animal', 'lamina', 'manila'], + 'laminae': ['laminae', 'melania'], + 'laminar': ['lairman', 'laminar', 'malarin', 'railman'], + 'laminarin': ['laminarin', 'linamarin'], + 'laminarite': ['laminarite', 'terminalia'], + 'laminate': ['lamanite', 'laminate'], + 'laminated': ['almandite', 'laminated'], + 'lamination': ['antimonial', 'lamination'], + 'laminboard': ['laminboard', 'lombardian'], + 'laminectomy': ['laminectomy', 'metonymical'], + 'laminose': ['laminose', 'lemonias', 'semolina'], + 'lamish': ['lamish', 'shimal'], + 'lamista': ['lamaist', 'lamista'], + 'lamiter': ['lamiter', 'marlite'], + 'lammer': ['lammer', 'rammel'], + 'lammy': ['lammy', 'malmy'], + 'lamna': ['alman', 'lamna', 'manal'], + 'lamnid': ['lamnid', 'mandil'], + 'lamnidae': ['aldamine', 'lamnidae'], + 'lamp': ['lamp', 'palm'], + 'lampad': ['lampad', 'palmad'], + 'lampas': ['lampas', 'plasma'], + 'lamper': ['lamper', 'palmer', 'relamp'], + 'lampers': ['lampers', 'sampler'], + 'lampful': ['lampful', 'palmful'], + 'lampist': ['lampist', 'palmist'], + 'lampistry': ['lampistry', 'palmistry'], + 'lampoon': ['lampoon', 'pomonal'], + 'lamprey': ['lamprey', 'palmery'], + 'lampyridae': ['lampyridae', 'pyramidale'], + 'lamus': ['lamus', 'malus', 'musal', 'slaum'], + 'lamut': ['lamut', 'tamul'], + 'lan': ['aln', 'lan'], + 'lana': ['alan', 'anal', 'lana'], + 'lanas': ['alans', 'lanas', 'nasal'], + 'lanate': ['anteal', 'lanate', 'teanal'], + 'lancaster': ['ancestral', 'lancaster'], + 'lancasterian': ['alcantarines', 'lancasterian'], + 'lance': ['canel', 'clean', 'lance', 'lenca'], + 'lanced': ['calden', 'candle', 'lanced'], + 'lancely': ['cleanly', 'lancely'], + 'lanceolar': ['lanceolar', 'olecranal'], + 'lancer': ['lancer', 'rancel'], + 'lances': ['lances', 'senlac'], + 'lancet': ['cantle', 'cental', 'lancet', 'tancel'], + 'lanceteer': ['crenelate', 'lanceteer'], + 'lancinate': ['cantilena', 'lancinate'], + 'landbook': ['bookland', 'landbook'], + 'landed': ['dandle', 'landed'], + 'lander': ['aldern', + 'darnel', + 'enlard', + 'lander', + 'lenard', + 'randle', + 'reland'], + 'landfast': ['fastland', 'landfast'], + 'landgrave': ['grandeval', 'landgrave'], + 'landimere': ['landimere', 'madrilene'], + 'landing': ['danglin', 'landing'], + 'landlubber': ['landlubber', 'lubberland'], + 'landreeve': ['landreeve', 'reeveland'], + 'landstorm': ['landstorm', 'transmold'], + 'landwash': ['landwash', 'washland'], + 'lane': ['alen', 'lane', 'lean', 'lena', 'nael', 'neal'], + 'lanete': ['elanet', 'lanete', 'lateen'], + 'laney': ['laney', 'layne'], + 'langhian': ['hangnail', 'langhian'], + 'langi': ['algin', 'align', 'langi', 'liang', 'linga'], + 'langite': ['atingle', 'gelatin', 'genital', 'langite', 'telinga'], + 'lango': ['along', 'gonal', 'lango', 'longa', 'nogal'], + 'langobard': ['bandarlog', 'langobard'], + 'language': ['ganguela', 'language'], + 'laniate': ['laniate', 'natalie', 'taenial'], + 'lanific': ['finical', 'lanific'], + 'laniform': ['formalin', 'informal', 'laniform'], + 'laniidae': ['aedilian', 'laniidae'], + 'lanista': ['lanista', 'santali'], + 'lanius': ['insula', 'lanius', 'lusian'], + 'lank': ['klan', 'lank'], + 'lanket': ['anklet', 'lanket', 'tankle'], + 'lanner': ['lanner', 'rannel'], + 'lansat': ['aslant', 'lansat', 'natals', 'santal'], + 'lanseh': ['halsen', 'hansel', 'lanseh'], + 'lantaca': ['cantala', 'catalan', 'lantaca'], + 'lanum': ['lanum', 'manul'], + 'lao': ['alo', 'lao', 'loa'], + 'laodicean': ['caledonia', 'laodicean'], + 'laotian': ['ailanto', 'alation', 'laotian', 'notalia'], + 'lap': ['alp', 'lap', 'pal'], + 'laparohysterotomy': ['hysterolaparotomy', 'laparohysterotomy'], + 'laparosplenotomy': ['laparosplenotomy', 'splenolaparotomy'], + 'lapidarist': ['lapidarist', 'triapsidal'], + 'lapidate': ['lapidate', 'talpidae'], + 'lapideon': ['lapideon', 'palinode', 'pedalion'], + 'lapidose': ['episodal', 'lapidose', 'sepaloid'], + 'lapith': ['lapith', 'tilpah'], + 'lapon': ['lapon', 'nopal'], + 'lapp': ['lapp', 'palp', 'plap'], + 'lappa': ['lappa', 'papal'], + 'lapped': ['dapple', 'lapped', 'palped'], + 'lapper': ['lapper', 'rappel'], + 'lappish': ['lappish', 'shiplap'], + 'lapsation': ['apolistan', 'lapsation'], + 'lapse': ['elaps', + 'lapse', + 'lepas', + 'pales', + 'salep', + 'saple', + 'sepal', + 'slape', + 'spale', + 'speal'], + 'lapsi': ['alisp', 'lapsi'], + 'lapsing': ['lapsing', 'sapling'], + 'lapstone': ['lapstone', 'pleonast'], + 'larboard': ['labrador', 'larboard'], + 'larcenic': ['calciner', 'larcenic'], + 'larcenist': ['cisternal', 'larcenist'], + 'larcenous': ['larcenous', 'senocular'], + 'larchen': ['charnel', 'larchen'], + 'lardacein': ['ecardinal', 'lardacein'], + 'lardite': ['dilater', 'lardite', 'redtail'], + 'lardon': ['androl', 'arnold', 'lardon', 'roland', 'ronald'], + 'lardy': ['daryl', 'lardy', 'lyard'], + 'large': ['argel', 'ergal', 'garle', 'glare', 'lager', 'large', 'regal'], + 'largely': ['allergy', 'gallery', 'largely', 'regally'], + 'largen': ['angler', 'arleng', 'garnel', 'largen', 'rangle', 'regnal'], + 'largeness': ['largeness', 'rangeless', 'regalness'], + 'largess': ['glasser', 'largess'], + 'largition': ['gratiolin', 'largition', 'tailoring'], + 'largo': ['algor', 'argol', 'goral', 'largo'], + 'lari': ['aril', 'lair', 'lari', 'liar', 'lira', 'rail', 'rial'], + 'lariat': ['altair', 'atrail', 'atrial', 'lariat', 'latria', 'talari'], + 'larid': ['drail', 'laird', 'larid', 'liard'], + 'laridae': ['ardelia', 'laridae', 'radiale'], + 'larigo': ['gloria', 'larigo', 'logria'], + 'larigot': ['goitral', 'larigot', 'ligator'], + 'lariid': ['iridal', 'lariid'], + 'larine': ['arline', 'larine', 'linear', 'nailer', 'renail'], + 'lark': ['karl', 'kral', 'lark'], + 'larking': ['karling', 'larking'], + 'larsenite': ['intersale', 'larsenite'], + 'larus': ['larus', 'sural', 'ursal'], + 'larva': ['alvar', 'arval', 'larva'], + 'larval': ['larval', 'vallar'], + 'larvate': ['larvate', 'lavaret', 'travale'], + 'larve': ['arvel', 'larve', 'laver', 'ravel', 'velar'], + 'larvicide': ['larvicide', 'veridical'], + 'laryngopharyngeal': ['laryngopharyngeal', 'pharyngolaryngeal'], + 'laryngopharyngitis': ['laryngopharyngitis', 'pharyngolaryngitis'], + 'laryngotome': ['laryngotome', 'maternology'], + 'laryngotracheotomy': ['laryngotracheotomy', 'tracheolaryngotomy'], + 'las': ['las', 'sal', 'sla'], + 'lasa': ['alas', 'lasa'], + 'lascar': ['lascar', 'rascal', 'sacral', 'scalar'], + 'laser': ['arles', 'arsle', 'laser', 'seral', 'slare'], + 'lash': ['hals', 'lash'], + 'lasi': ['lasi', 'lias', 'lisa', 'sail', 'sial'], + 'lasius': ['asilus', 'lasius'], + 'lask': ['lask', 'skal'], + 'lasket': ['lasket', 'sklate'], + 'laspring': ['laspring', 'sparling', 'springal'], + 'lasque': ['lasque', 'squeal'], + 'lasset': ['lasset', 'tassel'], + 'lassie': ['elissa', 'lassie'], + 'lasso': ['lasso', 'ossal'], + 'lassoer': ['lassoer', 'oarless', 'rosales'], + 'last': ['last', 'salt', 'slat'], + 'laster': ['laster', + 'lastre', + 'rastle', + 'relast', + 'resalt', + 'salter', + 'slater', + 'stelar'], + 'lasting': ['anglist', 'lasting', 'salting', 'slating', 'staling'], + 'lastly': ['lastly', 'saltly'], + 'lastness': ['lastness', 'saltness'], + 'lastre': ['laster', + 'lastre', + 'rastle', + 'relast', + 'resalt', + 'salter', + 'slater', + 'stelar'], + 'lasty': ['lasty', 'salty', 'slaty'], + 'lat': ['alt', 'lat', 'tal'], + 'lata': ['lata', 'taal', 'tala'], + 'latania': ['altaian', 'latania', 'natalia'], + 'latcher': ['clethra', 'latcher', 'ratchel', 'relatch', 'talcher', 'trachle'], + 'latchet': ['chattel', 'latchet'], + 'late': ['atle', 'laet', 'late', 'leat', 'tael', 'tale', 'teal'], + 'latebra': ['alberta', 'latebra', 'ratable'], + 'lated': ['adlet', 'dealt', 'delta', 'lated', 'taled'], + 'lateen': ['elanet', 'lanete', 'lateen'], + 'lately': ['lately', 'lealty'], + 'laten': ['ental', 'laten', 'leant'], + 'latent': ['latent', 'latten', 'nattle', 'talent', 'tantle'], + 'latentness': ['latentness', 'tenantless'], + 'later': ['alert', 'alter', 'artel', 'later', 'ratel', 'taler', 'telar'], + 'latera': ['latera', 'relata'], + 'laterad': ['altared', 'laterad'], + 'lateralis': ['lateralis', 'stellaria'], + 'lateran': ['alatern', 'lateran'], + 'laterite': ['laterite', 'literate', 'teretial'], + 'laterocaudal': ['caudolateral', 'laterocaudal'], + 'laterodorsal': ['dorsolateral', 'laterodorsal'], + 'lateroventral': ['lateroventral', 'ventrolateral'], + 'latest': ['latest', 'sattle', 'taslet'], + 'latex': ['exalt', 'latex'], + 'lath': ['halt', 'lath'], + 'lathe': ['ethal', 'lathe', 'leath'], + 'latheman': ['latheman', 'methanal'], + 'lathen': ['ethnal', 'hantle', 'lathen', 'thenal'], + 'lather': ['arthel', 'halter', 'lather', 'thaler'], + 'lathery': ['earthly', 'heartly', 'lathery', 'rathely'], + 'lathing': ['halting', 'lathing', 'thingal'], + 'latian': ['antlia', 'latian', 'nalita'], + 'latibulize': ['latibulize', 'utilizable'], + 'latices': ['astelic', 'elastic', 'latices'], + 'laticlave': ['laticlave', 'vacillate'], + 'latigo': ['galiot', 'latigo'], + 'latimeria': ['latimeria', 'marialite'], + 'latin': ['altin', 'latin'], + 'latinate': ['antliate', 'latinate'], + 'latiner': ['entrail', + 'latiner', + 'latrine', + 'ratline', + 'reliant', + 'retinal', + 'trenail'], + 'latinesque': ['latinesque', 'sequential'], + 'latinian': ['antinial', 'latinian'], + 'latinizer': ['latinizer', 'trinalize'], + 'latinus': ['latinus', 'tulisan', 'unalist'], + 'lation': ['italon', 'lation', 'talion'], + 'latirostres': ['latirostres', 'setirostral'], + 'latirus': ['latirus', 'trisula'], + 'latish': ['latish', 'tahsil'], + 'latite': ['latite', 'tailet', 'tailte', 'talite'], + 'latitude': ['altitude', 'latitude'], + 'latitudinal': ['altitudinal', 'latitudinal'], + 'latitudinarian': ['altitudinarian', 'latitudinarian'], + 'latomy': ['latomy', 'tyloma'], + 'latona': ['atonal', 'latona'], + 'latonian': ['latonian', 'nataloin', 'national'], + 'latria': ['altair', 'atrail', 'atrial', 'lariat', 'latria', 'talari'], + 'latrine': ['entrail', + 'latiner', + 'latrine', + 'ratline', + 'reliant', + 'retinal', + 'trenail'], + 'latris': ['latris', 'strial'], + 'latro': ['latro', 'rotal', 'toral'], + 'latrobe': ['alberto', 'bloater', 'latrobe'], + 'latrobite': ['latrobite', 'trilobate'], + 'latrocinium': ['latrocinium', 'tourmalinic'], + 'latron': ['latron', 'lontar', 'tornal'], + 'latten': ['latent', 'latten', 'nattle', 'talent', 'tantle'], + 'latter': ['artlet', 'latter', 'rattle', 'tartle', 'tatler'], + 'latterkin': ['intertalk', 'latterkin'], + 'lattice': ['lattice', 'tactile'], + 'latticinio': ['latticinio', 'licitation'], + 'latuka': ['latuka', 'taluka'], + 'latus': ['latus', 'sault', 'talus'], + 'latvian': ['latvian', 'valiant'], + 'laubanite': ['laubanite', 'unlabiate'], + 'laud': ['auld', 'dual', 'laud', 'udal'], + 'laudation': ['adulation', 'laudation'], + 'laudator': ['adulator', 'laudator'], + 'laudatorily': ['illaudatory', 'laudatorily'], + 'laudatory': ['adulatory', 'laudatory'], + 'lauder': ['lauder', 'udaler'], + 'laudism': ['dualism', 'laudism'], + 'laudist': ['dualist', 'laudist'], + 'laumonite': ['emulation', 'laumonite'], + 'laun': ['laun', 'luna', 'ulna', 'unal'], + 'launce': ['auncel', 'cuneal', 'lacune', 'launce', 'unlace'], + 'launch': ['chulan', 'launch', 'nuchal'], + 'launcher': ['launcher', 'relaunch'], + 'laund': ['dunal', 'laund', 'lunda', 'ulnad'], + 'launder': ['launder', 'rundale'], + 'laur': ['alur', 'laur', 'lura', 'raul', 'ural'], + 'laura': ['aural', 'laura'], + 'laurel': ['allure', 'laurel'], + 'laureled': ['laureled', 'reallude'], + 'laurence': ['cerulean', 'laurence'], + 'laurent': ['laurent', 'neutral', 'unalert'], + 'laurentide': ['adulterine', 'laurentide'], + 'lauric': ['curial', 'lauric', 'uracil', 'uralic'], + 'laurin': ['laurin', 'urinal'], + 'laurite': ['laurite', 'uralite'], + 'laurus': ['laurus', 'ursula'], + 'lava': ['aval', 'lava'], + 'lavacre': ['caravel', 'lavacre'], + 'lavaret': ['larvate', 'lavaret', 'travale'], + 'lave': ['lave', 'vale', 'veal', 'vela'], + 'laveer': ['laveer', 'leaver', 'reveal', 'vealer'], + 'lavehr': ['halver', 'lavehr'], + 'lavenite': ['elvanite', 'lavenite'], + 'laver': ['arvel', 'larve', 'laver', 'ravel', 'velar'], + 'laverania': ['laverania', 'valeriana'], + 'lavic': ['cavil', 'lavic'], + 'lavinia': ['lavinia', 'vinalia'], + 'lavish': ['lavish', 'vishal'], + 'lavisher': ['lavisher', 'shrieval'], + 'lavolta': ['lavolta', 'vallota'], + 'law': ['awl', 'law'], + 'lawing': ['lawing', 'waling'], + 'lawk': ['lawk', 'walk'], + 'lawmonger': ['angleworm', 'lawmonger'], + 'lawned': ['delawn', 'lawned', 'wandle'], + 'lawner': ['lawner', 'warnel'], + 'lawny': ['lawny', 'wanly'], + 'lawrie': ['lawrie', 'wailer'], + 'lawter': ['lawter', 'walter'], + 'lawyer': ['lawyer', 'yawler'], + 'laxism': ['laxism', 'smilax'], + 'lay': ['aly', 'lay'], + 'layer': ['early', 'layer', 'relay'], + 'layered': ['delayer', 'layered', 'redelay'], + 'layery': ['layery', 'yearly'], + 'laying': ['gainly', 'laying'], + 'layman': ['amylan', 'lamany', 'layman'], + 'layne': ['laney', 'layne'], + 'layout': ['layout', 'lutayo', 'outlay'], + 'layover': ['layover', 'overlay'], + 'layship': ['apishly', 'layship'], + 'lazarlike': ['alkalizer', 'lazarlike'], + 'laze': ['laze', 'zeal'], + 'lea': ['ale', 'lea'], + 'leach': ['chela', 'lache', 'leach'], + 'leachman': ['leachman', 'mechanal'], + 'lead': ['dale', 'deal', 'lade', 'lead', 'leda'], + 'leadable': ['dealable', 'leadable'], + 'leaded': ['delead', 'leaded'], + 'leader': ['dealer', 'leader', 'redeal', 'relade', 'relead'], + 'leadership': ['dealership', 'leadership'], + 'leadin': ['aldine', 'daniel', 'delian', 'denial', 'enalid', 'leadin'], + 'leadiness': ['idealness', 'leadiness'], + 'leading': ['adeling', 'dealing', 'leading'], + 'leadman': ['daleman', 'lademan', 'leadman'], + 'leads': ['leads', 'slade'], + 'leadsman': ['dalesman', 'leadsman'], + 'leadstone': ['endosteal', 'leadstone'], + 'leady': ['delay', 'leady'], + 'leaf': ['alef', 'feal', 'flea', 'leaf'], + 'leafen': ['enleaf', 'leafen'], + 'leafit': ['fetial', 'filate', 'lafite', 'leafit'], + 'leafy': ['fleay', 'leafy'], + 'leah': ['hale', 'heal', 'leah'], + 'leak': ['kale', 'lake', 'leak'], + 'leakiness': ['alikeness', 'leakiness'], + 'leakless': ['lakeless', 'leakless'], + 'leal': ['alle', 'ella', 'leal'], + 'lealty': ['lately', 'lealty'], + 'leam': ['alem', 'alme', 'lame', 'leam', 'male', 'meal', 'mela'], + 'leamer': ['leamer', 'mealer'], + 'lean': ['alen', 'lane', 'lean', 'lena', 'nael', 'neal'], + 'leander': ['leander', 'learned', 'reladen'], + 'leaner': ['arlene', 'leaner'], + 'leaning': ['angelin', 'leaning'], + 'leant': ['ental', 'laten', 'leant'], + 'leap': ['leap', 'lepa', 'pale', 'peal', 'plea'], + 'leaper': ['leaper', 'releap', 'repale', 'repeal'], + 'leaping': ['apeling', 'leaping'], + 'leapt': ['leapt', + 'palet', + 'patel', + 'pelta', + 'petal', + 'plate', + 'pleat', + 'tepal'], + 'lear': ['earl', 'eral', 'lear', 'real'], + 'learn': ['learn', 'renal'], + 'learned': ['leander', 'learned', 'reladen'], + 'learnedly': ['ellenyard', 'learnedly'], + 'learner': ['learner', 'relearn'], + 'learnt': ['altern', 'antler', 'learnt', 'rental', 'ternal'], + 'leasable': ['leasable', 'sealable'], + 'lease': ['easel', 'lease'], + 'leaser': ['alerse', 'leaser', 'reales', 'resale', 'reseal', 'sealer'], + 'leash': ['halse', 'leash', 'selah', 'shale', 'sheal', 'shela'], + 'leasing': ['leasing', 'sealing'], + 'least': ['least', 'setal', 'slate', 'stale', 'steal', 'stela', 'tales'], + 'leat': ['atle', 'laet', 'late', 'leat', 'tael', 'tale', 'teal'], + 'leath': ['ethal', 'lathe', 'leath'], + 'leather': ['leather', 'tarheel'], + 'leatherbark': ['halterbreak', 'leatherbark'], + 'leatherer': ['leatherer', 'releather', 'tarheeler'], + 'leatman': ['amental', 'leatman'], + 'leaver': ['laveer', 'leaver', 'reveal', 'vealer'], + 'leaves': ['leaves', 'sleave'], + 'leaving': ['leaving', 'vangeli'], + 'leavy': ['leavy', 'vealy'], + 'leban': ['leban', 'nable'], + 'lebanese': ['ebenales', 'lebanese'], + 'lebensraum': ['lebensraum', 'mensurable'], + 'lecaniid': ['alcidine', 'danielic', 'lecaniid'], + 'lecanora': ['carolean', 'lecanora'], + 'lecanoroid': ['lecanoroid', 'olecranoid'], + 'lechery': ['cheerly', 'lechery'], + 'lechriodont': ['holocentrid', 'lechriodont'], + 'lecithal': ['hellicat', 'lecithal'], + 'lecontite': ['lecontite', 'nicolette'], + 'lector': ['colter', 'lector', 'torcel'], + 'lectorial': ['corallite', 'lectorial'], + 'lectorship': ['lectorship', 'leptorchis'], + 'lectrice': ['electric', 'lectrice'], + 'lecturess': ['cutleress', 'lecturess', 'truceless'], + 'lecyth': ['lecyth', 'letchy'], + 'lecythis': ['chestily', 'lecythis'], + 'led': ['del', 'eld', 'led'], + 'leda': ['dale', 'deal', 'lade', 'lead', 'leda'], + 'lede': ['dele', 'lede', 'leed'], + 'leden': ['leden', 'neeld'], + 'ledge': ['glede', 'gleed', 'ledge'], + 'ledger': ['gelder', 'ledger', 'redleg'], + 'ledging': ['gelding', 'ledging'], + 'ledgy': ['gledy', 'ledgy'], + 'lee': ['eel', 'lee'], + 'leed': ['dele', 'lede', 'leed'], + 'leek': ['keel', 'kele', 'leek'], + 'leep': ['leep', 'peel', 'pele'], + 'leepit': ['leepit', 'pelite', 'pielet'], + 'leer': ['leer', 'reel'], + 'leeringly': ['leeringly', 'reelingly'], + 'leerness': ['leerness', 'lessener'], + 'lees': ['else', 'lees', 'seel', 'sele', 'slee'], + 'leet': ['leet', 'lete', 'teel', 'tele'], + 'leetman': ['entelam', 'leetman'], + 'leewan': ['leewan', 'weanel'], + 'left': ['felt', 'flet', 'left'], + 'leftish': ['fishlet', 'leftish'], + 'leftness': ['feltness', 'leftness'], + 'leg': ['gel', 'leg'], + 'legalist': ['legalist', 'stillage'], + 'legantine': ['eglantine', 'inelegant', 'legantine'], + 'legate': ['eaglet', 'legate', 'teagle', 'telega'], + 'legatine': ['galenite', 'legatine'], + 'legation': ['gelation', 'lagonite', 'legation'], + 'legative': ['legative', 'levigate'], + 'legator': ['argolet', 'gloater', 'legator'], + 'legendary': ['enragedly', 'legendary'], + 'leger': ['leger', 'regle'], + 'legger': ['eggler', 'legger'], + 'legion': ['eloign', 'gileno', 'legion'], + 'legioner': ['eloigner', 'legioner'], + 'legionry': ['legionry', 'yeorling'], + 'legislator': ['allegorist', 'legislator'], + 'legman': ['legman', 'mangel', 'mangle'], + 'legoa': ['gloea', 'legoa'], + 'legua': ['gulae', 'legua'], + 'leguan': ['genual', 'leguan'], + 'lehr': ['herl', 'hler', 'lehr'], + 'lei': ['eli', 'lei', 'lie'], + 'leif': ['feil', 'file', 'leif', 'lief', 'life'], + 'leila': ['allie', 'leila', 'lelia'], + 'leipoa': ['apiole', 'leipoa'], + 'leisten': ['leisten', 'setline', 'tensile'], + 'leister': ['leister', 'sterile'], + 'leith': ['leith', 'lithe'], + 'leitneria': ['leitneria', 'lienteria'], + 'lek': ['elk', 'lek'], + 'lekach': ['hackle', 'lekach'], + 'lekane': ['alkene', 'lekane'], + 'lelia': ['allie', 'leila', 'lelia'], + 'leman': ['leman', 'lemna'], + 'lemma': ['lemma', 'melam'], + 'lemna': ['leman', 'lemna'], + 'lemnad': ['lemnad', 'menald'], + 'lemnian': ['lemnian', 'lineman', 'melanin'], + 'lemniscate': ['centesimal', 'lemniscate'], + 'lemon': ['lemon', 'melon', 'monel'], + 'lemonias': ['laminose', 'lemonias', 'semolina'], + 'lemonlike': ['lemonlike', 'melonlike'], + 'lemony': ['lemony', 'myelon'], + 'lemosi': ['lemosi', 'limose', 'moiles'], + 'lempira': ['impaler', 'impearl', 'lempira', 'premial'], + 'lemuria': ['lemuria', 'miauler'], + 'lemurian': ['lemurian', 'malurine', 'rumelian'], + 'lemurinae': ['lemurinae', 'neurilema'], + 'lemurine': ['lemurine', 'meruline', 'relumine'], + 'lena': ['alen', 'lane', 'lean', 'lena', 'nael', 'neal'], + 'lenad': ['eland', 'laden', 'lenad'], + 'lenape': ['alpeen', 'lenape', 'pelean'], + 'lenard': ['aldern', + 'darnel', + 'enlard', + 'lander', + 'lenard', + 'randle', + 'reland'], + 'lenca': ['canel', 'clean', 'lance', 'lenca'], + 'lencan': ['cannel', 'lencan'], + 'lendee': ['lendee', 'needle'], + 'lender': ['lender', 'relend'], + 'lendu': ['lendu', 'unled'], + 'lengthy': ['lengthy', 'thegnly'], + 'lenient': ['lenient', 'tenline'], + 'lenify': ['finely', 'lenify'], + 'lenis': ['elsin', 'lenis', 'niels', 'silen', 'sline'], + 'lenity': ['lenity', 'yetlin'], + 'lenny': ['lenny', 'lynne'], + 'leno': ['elon', 'enol', 'leno', 'leon', 'lone', 'noel'], + 'lenora': ['lenora', 'loaner', 'orlean', 'reloan'], + 'lenticel': ['lenticel', 'lenticle'], + 'lenticle': ['lenticel', 'lenticle'], + 'lentil': ['lentil', 'lintel'], + 'lentisc': ['lentisc', 'scintle', 'stencil'], + 'lentisco': ['lentisco', 'telsonic'], + 'lentiscus': ['lentiscus', 'tunicless'], + 'lento': ['lento', 'olent'], + 'lentous': ['lentous', 'sultone'], + 'lenvoy': ['lenvoy', 'ovenly'], + 'leo': ['leo', 'ole'], + 'leon': ['elon', 'enol', 'leno', 'leon', 'lone', 'noel'], + 'leonard': ['endoral', 'ladrone', 'leonard'], + 'leonhardite': ['leonhardite', 'lionhearted'], + 'leonid': ['doline', 'indole', 'leonid', 'loined', 'olenid'], + 'leonines': ['leonines', 'selenion'], + 'leonis': ['insole', 'leonis', 'lesion', 'selion'], + 'leonist': ['leonist', 'onliest'], + 'leonite': ['elonite', 'leonite'], + 'leonotis': ['leonotis', 'oilstone'], + 'leoparde': ['leoparde', 'reapdole'], + 'leopardite': ['leopardite', 'protelidae'], + 'leotard': ['delator', 'leotard'], + 'lepa': ['leap', 'lepa', 'pale', 'peal', 'plea'], + 'lepanto': ['lepanto', 'nepotal', 'petalon', 'polenta'], + 'lepas': ['elaps', + 'lapse', + 'lepas', + 'pales', + 'salep', + 'saple', + 'sepal', + 'slape', + 'spale', + 'speal'], + 'lepcha': ['chapel', 'lepcha', 'pleach'], + 'leper': ['leper', 'perle', 'repel'], + 'leperdom': ['leperdom', 'premodel'], + 'lepidopter': ['dopplerite', 'lepidopter'], + 'lepidosauria': ['lepidosauria', 'pliosauridae'], + 'lepidote': ['lepidote', 'petioled'], + 'lepidotic': ['diploetic', 'lepidotic'], + 'lepisma': ['ampelis', 'lepisma'], + 'leporid': ['leporid', 'leproid'], + 'leporis': ['leporis', 'spoiler'], + 'lepra': ['lepra', 'paler', 'parel', 'parle', 'pearl', 'perla', 'relap'], + 'leproid': ['leporid', 'leproid'], + 'leproma': ['leproma', 'palermo', 'pleroma', 'polearm'], + 'leprosied': ['despoiler', 'leprosied'], + 'leprosis': ['leprosis', 'plerosis'], + 'leprous': ['leprous', 'pelorus', 'sporule'], + 'leptandra': ['leptandra', 'peltandra'], + 'leptidae': ['depilate', 'leptidae', 'pileated'], + 'leptiform': ['leptiform', 'peltiform'], + 'leptodora': ['doorplate', 'leptodora'], + 'leptome': ['leptome', 'poemlet'], + 'lepton': ['lepton', 'pentol'], + 'leptonema': ['leptonema', 'ptolemean'], + 'leptorchis': ['lectorship', 'leptorchis'], + 'lepus': ['lepus', 'pulse'], + 'ler': ['ler', 'rel'], + 'lernaean': ['annealer', 'lernaean', 'reanneal'], + 'lerot': ['lerot', 'orlet', 'relot'], + 'lerwa': ['lerwa', 'waler'], + 'les': ['els', 'les'], + 'lesath': ['haslet', 'lesath', 'shelta'], + 'lesbia': ['isabel', 'lesbia'], + 'lesche': ['lesche', 'sleech'], + 'lesion': ['insole', 'leonis', 'lesion', 'selion'], + 'lesional': ['lesional', 'solenial'], + 'leslie': ['leslie', 'sellie'], + 'lessener': ['leerness', 'lessener'], + 'lest': ['lest', 'selt'], + 'lester': ['lester', 'selter', 'streel'], + 'let': ['elt', 'let'], + 'letchy': ['lecyth', 'letchy'], + 'lete': ['leet', 'lete', 'teel', 'tele'], + 'lethargus': ['lethargus', 'slaughter'], + 'lethe': ['ethel', 'lethe'], + 'lethean': ['entheal', 'lethean'], + 'lethologica': ['ethological', 'lethologica', 'theological'], + 'letitia': ['italite', 'letitia', 'tilaite'], + 'leto': ['leto', 'lote', 'tole'], + 'letoff': ['letoff', 'offlet'], + 'lett': ['lett', 'telt'], + 'letten': ['letten', 'nettle'], + 'letterer': ['letterer', 'reletter'], + 'lettish': ['lettish', 'thistle'], + 'lettrin': ['lettrin', 'trintle'], + 'leu': ['leu', 'lue', 'ule'], + 'leucadian': ['leucadian', 'lucanidae'], + 'leucocism': ['leucocism', 'muscicole'], + 'leucoma': ['caulome', 'leucoma'], + 'leucosis': ['coulisse', 'leucosis', 'ossicule'], + 'leud': ['deul', 'duel', 'leud'], + 'leuk': ['leuk', 'luke'], + 'leuma': ['amelu', 'leuma', 'ulema'], + 'leung': ['leung', 'lunge'], + 'levance': ['enclave', 'levance', 'valence'], + 'levant': ['levant', 'valent'], + 'levanter': ['levanter', 'relevant', 'revelant'], + 'levantine': ['levantine', 'valentine'], + 'leveler': ['leveler', 'relevel'], + 'lever': ['elver', 'lever', 'revel'], + 'leverer': ['leverer', 'reveler'], + 'levi': ['evil', 'levi', 'live', 'veil', 'vile', 'vlei'], + 'levier': ['levier', 'relive', 'reveil', 'revile', 'veiler'], + 'levigate': ['legative', 'levigate'], + 'levin': ['levin', 'liven'], + 'levining': ['levining', 'nievling'], + 'levir': ['levir', 'liver', 'livre', 'rivel'], + 'levirate': ['levirate', 'relative'], + 'levis': ['elvis', 'levis', 'slive'], + 'levitation': ['levitation', 'tonalitive', 'velitation'], + 'levo': ['levo', 'love', 'velo', 'vole'], + 'levyist': ['levyist', 'sylvite'], + 'lewd': ['lewd', 'weld'], + 'lewis': ['lewis', 'swile'], + 'lexia': ['axile', 'lexia'], + 'ley': ['ley', 'lye'], + 'lhota': ['altho', 'lhota', 'loath'], + 'liability': ['alibility', 'liability'], + 'liable': ['alible', 'belial', 'labile', 'liable'], + 'liana': ['alain', 'alani', 'liana'], + 'liang': ['algin', 'align', 'langi', 'liang', 'linga'], + 'liar': ['aril', 'lair', 'lari', 'liar', 'lira', 'rail', 'rial'], + 'liard': ['drail', 'laird', 'larid', 'liard'], + 'lias': ['lasi', 'lias', 'lisa', 'sail', 'sial'], + 'liatris': ['liatris', 'trilisa'], + 'libament': ['bailment', 'libament'], + 'libate': ['albeit', + 'albite', + 'baltei', + 'belait', + 'betail', + 'bletia', + 'libate'], + 'libationer': ['libationer', 'liberation'], + 'libber': ['libber', 'ribble'], + 'libby': ['bilby', 'libby'], + 'libellary': ['libellary', 'liberally'], + 'liber': ['birle', 'liber'], + 'liberal': ['braille', 'liberal'], + 'liberally': ['libellary', 'liberally'], + 'liberate': ['iterable', 'liberate'], + 'liberation': ['libationer', 'liberation'], + 'liberator': ['liberator', 'orbitelar'], + 'liberian': ['bilinear', 'liberian'], + 'libertas': ['abristle', 'libertas'], + 'libertine': ['berlinite', 'libertine'], + 'libra': ['blair', 'brail', 'libra'], + 'librate': ['betrail', 'librate', 'triable', 'trilabe'], + 'licania': ['lacinia', 'licania'], + 'license': ['license', 'selenic', 'silence'], + 'licensed': ['licensed', 'silenced'], + 'licenser': ['licenser', 'silencer'], + 'licensor': ['cresolin', 'licensor'], + 'lich': ['chil', 'lich'], + 'lichanos': ['lichanos', 'nicholas'], + 'lichenoid': ['cheloniid', 'lichenoid'], + 'lichi': ['chili', 'lichi'], + 'licitation': ['latticinio', 'licitation'], + 'licker': ['licker', 'relick', 'rickle'], + 'lickspit': ['lickspit', 'lipstick'], + 'licorne': ['creolin', 'licorne', 'locrine'], + 'lida': ['dail', 'dali', 'dial', 'laid', 'lida'], + 'lidded': ['diddle', 'lidded'], + 'lidder': ['lidder', 'riddel', 'riddle'], + 'lide': ['idle', 'lide', 'lied'], + 'lie': ['eli', 'lei', 'lie'], + 'lied': ['idle', 'lide', 'lied'], + 'lief': ['feil', 'file', 'leif', 'lief', 'life'], + 'lien': ['lien', 'line', 'neil', 'nile'], + 'lienal': ['lienal', 'lineal'], + 'lienee': ['eileen', 'lienee'], + 'lienor': ['elinor', 'lienor', 'lorien', 'noiler'], + 'lienteria': ['leitneria', 'lienteria'], + 'lientery': ['entirely', 'lientery'], + 'lier': ['lier', 'lire', 'rile'], + 'lierne': ['lierne', 'reline'], + 'lierre': ['lierre', 'relier'], + 'liesh': ['liesh', 'shiel'], + 'lievaart': ['lievaart', 'varietal'], + 'life': ['feil', 'file', 'leif', 'lief', 'life'], + 'lifelike': ['filelike', 'lifelike'], + 'lifer': ['filer', 'flier', 'lifer', 'rifle'], + 'lifeward': ['drawfile', 'lifeward'], + 'lifo': ['filo', 'foil', 'lifo'], + 'lift': ['flit', 'lift'], + 'lifter': ['fertil', 'filter', 'lifter', 'relift', 'trifle'], + 'lifting': ['fliting', 'lifting'], + 'ligament': ['ligament', 'metaling', 'tegminal'], + 'ligas': ['gisla', 'ligas', 'sigla'], + 'ligate': ['aiglet', 'ligate', 'taigle', 'tailge'], + 'ligation': ['intaglio', 'ligation'], + 'ligator': ['goitral', 'larigot', 'ligator'], + 'ligature': ['alurgite', 'ligature'], + 'lighten': ['enlight', 'lighten'], + 'lightener': ['lightener', 'relighten', 'threeling'], + 'lighter': ['lighter', 'relight', 'rightle'], + 'lighthead': ['headlight', 'lighthead'], + 'lightness': ['lightness', 'nightless', 'thingless'], + 'ligne': ['ingle', 'ligne', 'linge', 'nigel'], + 'lignin': ['lignin', 'lining'], + 'lignitic': ['lignitic', 'tiglinic'], + 'lignose': ['gelosin', 'lignose'], + 'ligroine': ['ligroine', 'religion'], + 'ligure': ['ligure', 'reguli'], + 'lija': ['jail', 'lija'], + 'like': ['kiel', 'like'], + 'liken': ['inkle', 'liken'], + 'likewise': ['likewise', 'wiselike'], + 'lilac': ['calli', 'lilac'], + 'lilacky': ['alkylic', 'lilacky'], + 'lilium': ['illium', 'lilium'], + 'lilt': ['lilt', 'till'], + 'lily': ['illy', 'lily', 'yill'], + 'lim': ['lim', 'mil'], + 'lima': ['amil', 'amli', 'lima', 'mail', 'mali', 'mila'], + 'limacina': ['animalic', 'limacina'], + 'limacon': ['limacon', 'malonic'], + 'liman': ['lamin', 'liman', 'milan'], + 'limation': ['limation', 'miltonia'], + 'limbat': ['limbat', 'timbal'], + 'limbate': ['limbate', 'timable', 'timbale'], + 'limbed': ['dimble', 'limbed'], + 'limbus': ['bluism', 'limbus'], + 'limby': ['blimy', 'limby'], + 'lime': ['emil', 'lime', 'mile'], + 'limean': ['limean', 'maline', 'melian', 'menial'], + 'limeman': ['ammelin', 'limeman'], + 'limer': ['limer', 'meril', 'miler'], + 'limes': ['limes', 'miles', 'slime', 'smile'], + 'limestone': ['limestone', 'melonites', 'milestone'], + 'limey': ['elymi', 'emily', 'limey'], + 'liminess': ['liminess', 'senilism'], + 'limitary': ['limitary', 'military'], + 'limitate': ['limitate', 'militate'], + 'limitation': ['limitation', 'militation'], + 'limited': ['delimit', 'limited'], + 'limiter': ['limiter', 'relimit'], + 'limitless': ['limitless', 'semistill'], + 'limner': ['limner', 'merlin', 'milner'], + 'limnetic': ['limnetic', 'milicent'], + 'limoniad': ['dominial', 'imolinda', 'limoniad'], + 'limosa': ['limosa', 'somali'], + 'limose': ['lemosi', 'limose', 'moiles'], + 'limp': ['limp', 'pilm', 'plim'], + 'limper': ['limper', 'prelim', 'rimple'], + 'limping': ['impling', 'limping'], + 'limpsy': ['limpsy', 'simply'], + 'limpy': ['imply', 'limpy', 'pilmy'], + 'limsy': ['limsy', 'slimy', 'smily'], + 'lin': ['lin', 'nil'], + 'lina': ['alin', 'anil', 'lain', 'lina', 'nail'], + 'linaga': ['agnail', 'linaga'], + 'linage': ['algine', 'genial', 'linage'], + 'linamarin': ['laminarin', 'linamarin'], + 'linarite': ['inertial', 'linarite'], + 'linchet': ['linchet', 'tinchel'], + 'linctus': ['clunist', 'linctus'], + 'linda': ['danli', 'ladin', 'linda', 'nidal'], + 'lindane': ['annelid', 'lindane'], + 'linder': ['linder', 'rindle'], + 'lindoite': ['lindoite', 'tolidine'], + 'lindsay': ['islandy', 'lindsay'], + 'line': ['lien', 'line', 'neil', 'nile'], + 'linea': ['alien', 'aline', 'anile', 'elain', 'elian', 'laine', 'linea'], + 'lineal': ['lienal', 'lineal'], + 'lineament': ['lineament', 'manteline'], + 'linear': ['arline', 'larine', 'linear', 'nailer', 'renail'], + 'linearity': ['inreality', 'linearity'], + 'lineate': ['elatine', 'lineate'], + 'lineature': ['lineature', 'rutelinae'], + 'linecut': ['linecut', 'tunicle'], + 'lined': ['eldin', 'lined'], + 'lineman': ['lemnian', 'lineman', 'melanin'], + 'linen': ['linen', 'linne'], + 'linesman': ['annelism', 'linesman'], + 'linet': ['inlet', 'linet'], + 'linga': ['algin', 'align', 'langi', 'liang', 'linga'], + 'lingbird': ['birdling', 'bridling', 'lingbird'], + 'linge': ['ingle', 'ligne', 'linge', 'nigel'], + 'linger': ['linger', 'ringle'], + 'lingo': ['lingo', 'login'], + 'lingtow': ['lingtow', 'twoling'], + 'lingua': ['gaulin', 'lingua'], + 'lingual': ['lingual', 'lingula'], + 'linguidental': ['dentilingual', 'indulgential', 'linguidental'], + 'lingula': ['lingual', 'lingula'], + 'linguodental': ['dentolingual', 'linguodental'], + 'lingy': ['lingy', 'lying'], + 'linha': ['linha', 'nihal'], + 'lining': ['lignin', 'lining'], + 'link': ['kiln', 'link'], + 'linked': ['kindle', 'linked'], + 'linker': ['linker', 'relink'], + 'linking': ['inkling', 'linking'], + 'linkman': ['kilnman', 'linkman'], + 'links': ['links', 'slink'], + 'linnaea': ['alanine', 'linnaea'], + 'linnaean': ['annaline', 'linnaean'], + 'linne': ['linen', 'linne'], + 'linnet': ['linnet', 'linten'], + 'lino': ['lino', 'lion', 'loin', 'noil'], + 'linolenic': ['encinillo', 'linolenic'], + 'linometer': ['linometer', 'nilometer'], + 'linopteris': ['linopteris', 'prosilient'], + 'linous': ['insoul', 'linous', 'nilous', 'unsoil'], + 'linsey': ['linsey', 'lysine'], + 'linstock': ['coltskin', 'linstock'], + 'lintel': ['lentil', 'lintel'], + 'linten': ['linnet', 'linten'], + 'lintseed': ['enlisted', 'lintseed'], + 'linum': ['linum', 'ulmin'], + 'linus': ['linus', 'sunil'], + 'liny': ['inly', 'liny'], + 'lion': ['lino', 'lion', 'loin', 'noil'], + 'lioncel': ['colline', 'lioncel'], + 'lionel': ['lionel', 'niello'], + 'lionet': ['entoil', 'lionet'], + 'lionhearted': ['leonhardite', 'lionhearted'], + 'lipa': ['lipa', 'pail', 'pali', 'pial'], + 'lipan': ['lipan', 'pinal', 'plain'], + 'liparis': ['aprilis', 'liparis'], + 'liparite': ['liparite', 'reptilia'], + 'liparous': ['liparous', 'pliosaur'], + 'lipase': ['espial', 'lipase', 'pelias'], + 'lipin': ['lipin', 'pilin'], + 'liplet': ['liplet', 'pillet'], + 'lipochondroma': ['chondrolipoma', 'lipochondroma'], + 'lipoclasis': ['calliopsis', 'lipoclasis'], + 'lipocyte': ['epicotyl', 'lipocyte'], + 'lipofibroma': ['fibrolipoma', 'lipofibroma'], + 'lipolytic': ['lipolytic', 'politicly'], + 'lipoma': ['lipoma', 'pimola', 'ploima'], + 'lipomyoma': ['lipomyoma', 'myolipoma'], + 'lipomyxoma': ['lipomyxoma', 'myxolipoma'], + 'liposis': ['liposis', 'pilosis'], + 'lipotype': ['lipotype', 'polypite'], + 'lippen': ['lippen', 'nipple'], + 'lipper': ['lipper', 'ripple'], + 'lippia': ['lippia', 'pilpai'], + 'lipsanotheca': ['lipsanotheca', 'sphacelation'], + 'lipstick': ['lickspit', 'lipstick'], + 'liquate': ['liquate', 'tequila'], + 'liquidate': ['liquidate', 'qualitied'], + 'lira': ['aril', 'lair', 'lari', 'liar', 'lira', 'rail', 'rial'], + 'lirate': ['lirate', 'retail', 'retial', 'tailer'], + 'liration': ['liration', 'litorina'], + 'lire': ['lier', 'lire', 'rile'], + 'lis': ['lis', 'sil'], + 'lisa': ['lasi', 'lias', 'lisa', 'sail', 'sial'], + 'lise': ['isle', 'lise', 'sile'], + 'lisere': ['lisere', 'resile'], + 'lisk': ['lisk', 'silk', 'skil'], + 'lisle': ['lisle', 'selli'], + 'lisp': ['lisp', 'slip'], + 'lisper': ['lisper', 'pliers', 'sirple', 'spiler'], + 'list': ['list', 'silt', 'slit'], + 'listable': ['bastille', 'listable'], + 'listen': ['enlist', 'listen', 'silent', 'tinsel'], + 'listener': ['enlister', 'esterlin', 'listener', 'relisten'], + 'lister': ['lister', 'relist'], + 'listera': ['aletris', 'alister', 'listera', 'realist', 'saltier'], + 'listerian': ['listerian', 'trisilane'], + 'listerine': ['listerine', 'resilient'], + 'listerize': ['listerize', 'sterilize'], + 'listing': ['listing', 'silting'], + 'listless': ['listless', 'slitless'], + 'lisuarte': ['auletris', 'lisuarte'], + 'lit': ['lit', 'til'], + 'litas': ['alist', 'litas', 'slait', 'talis'], + 'litchi': ['litchi', 'lithic'], + 'lite': ['lite', 'teil', 'teli', 'tile'], + 'liter': ['liter', 'tiler'], + 'literal': ['literal', 'tallier'], + 'literary': ['literary', 'trailery'], + 'literate': ['laterite', 'literate', 'teretial'], + 'literose': ['literose', 'roselite', 'tirolese'], + 'lith': ['hilt', 'lith'], + 'litharge': ['litharge', 'thirlage'], + 'lithe': ['leith', 'lithe'], + 'lithectomy': ['lithectomy', 'methylotic'], + 'lithic': ['litchi', 'lithic'], + 'litho': ['litho', 'thiol', 'tholi'], + 'lithochromography': ['chromolithography', 'lithochromography'], + 'lithocyst': ['cystolith', 'lithocyst'], + 'lithonephria': ['lithonephria', 'philotherian'], + 'lithonephrotomy': ['lithonephrotomy', 'nephrolithotomy'], + 'lithophane': ['anthophile', 'lithophane'], + 'lithophone': ['lithophone', 'thiophenol'], + 'lithophotography': ['lithophotography', 'photolithography'], + 'lithophysal': ['isophthalyl', 'lithophysal'], + 'lithopone': ['lithopone', 'phonolite'], + 'lithous': ['lithous', 'loutish'], + 'litigate': ['litigate', 'tagilite'], + 'litmus': ['litmus', 'tilmus'], + 'litorina': ['liration', 'litorina'], + 'litorinidae': ['idioretinal', 'litorinidae'], + 'litra': ['litra', 'trail', 'trial'], + 'litsea': ['isleta', 'litsea', 'salite', 'stelai'], + 'litster': ['litster', 'slitter', 'stilter', 'testril'], + 'litten': ['litten', 'tinlet'], + 'litter': ['litter', 'tilter', 'titler'], + 'littery': ['littery', 'tritely'], + 'littoral': ['littoral', 'tortilla'], + 'lituiform': ['lituiform', 'trifolium'], + 'litus': ['litus', 'sluit', 'tulsi'], + 'live': ['evil', 'levi', 'live', 'veil', 'vile', 'vlei'], + 'lived': ['devil', 'divel', 'lived'], + 'livedo': ['livedo', 'olived'], + 'liveliness': ['liveliness', 'villeiness'], + 'livelong': ['livelong', 'loveling'], + 'lively': ['evilly', 'lively', 'vilely'], + 'liven': ['levin', 'liven'], + 'liveness': ['evilness', 'liveness', 'veinless', 'vileness', 'vineless'], + 'liver': ['levir', 'liver', 'livre', 'rivel'], + 'livered': ['deliver', 'deviler', 'livered'], + 'livery': ['livery', 'verily'], + 'livier': ['livier', 'virile'], + 'livonian': ['livonian', 'violanin'], + 'livre': ['levir', 'liver', 'livre', 'rivel'], + 'liwan': ['inlaw', 'liwan'], + 'llandeilo': ['diallelon', 'llandeilo'], + 'llew': ['llew', 'well'], + 'lloyd': ['dolly', 'lloyd'], + 'loa': ['alo', 'lao', 'loa'], + 'loach': ['chola', 'loach', 'olcha'], + 'load': ['alod', 'dola', 'load', 'odal'], + 'loaden': ['enodal', 'loaden'], + 'loader': ['loader', 'ordeal', 'reload'], + 'loading': ['angloid', 'loading'], + 'loaf': ['foal', 'loaf', 'olaf'], + 'loam': ['loam', 'loma', 'malo', 'mola', 'olam'], + 'loaminess': ['loaminess', 'melanosis'], + 'loaming': ['almoign', 'loaming'], + 'loamy': ['amylo', 'loamy'], + 'loaner': ['lenora', 'loaner', 'orlean', 'reloan'], + 'loasa': ['alosa', 'loasa', 'oasal'], + 'loath': ['altho', 'lhota', 'loath'], + 'loather': ['loather', 'rathole'], + 'loathly': ['loathly', 'tallyho'], + 'lob': ['blo', 'lob'], + 'lobar': ['balor', 'bolar', 'boral', 'labor', 'lobar'], + 'lobate': ['lobate', 'oblate'], + 'lobated': ['bloated', 'lobated'], + 'lobately': ['lobately', 'oblately'], + 'lobation': ['boltonia', 'lobation', 'oblation'], + 'lobe': ['bleo', 'bole', 'lobe'], + 'lobed': ['bodle', 'boled', 'lobed'], + 'lobelet': ['bellote', 'lobelet'], + 'lobelia': ['bolelia', 'lobelia', 'obelial'], + 'lobing': ['globin', 'goblin', 'lobing'], + 'lobo': ['bolo', 'bool', 'lobo', 'obol'], + 'lobola': ['balolo', 'lobola'], + 'lobscourse': ['lobscourse', 'lobscouser'], + 'lobscouser': ['lobscourse', 'lobscouser'], + 'lobster': ['bolster', 'lobster'], + 'loca': ['alco', 'coal', 'cola', 'loca'], + 'local': ['callo', 'colla', 'local'], + 'locanda': ['acnodal', 'canadol', 'locanda'], + 'locarnite': ['alectrion', 'clarionet', 'crotaline', 'locarnite'], + 'locarnize': ['laconizer', 'locarnize'], + 'locarno': ['coronal', 'locarno'], + 'locate': ['acetol', 'colate', 'locate'], + 'location': ['colation', 'coontail', 'location'], + 'locational': ['allocation', 'locational'], + 'locator': ['crotalo', 'locator'], + 'loch': ['chol', 'loch'], + 'lochan': ['chalon', 'lochan'], + 'lochetic': ['helcotic', 'lochetic', 'ochletic'], + 'lochus': ['holcus', 'lochus', 'slouch'], + 'loci': ['clio', 'coil', 'coli', 'loci'], + 'lociation': ['coalition', 'lociation'], + 'lock': ['colk', 'lock'], + 'locker': ['locker', 'relock'], + 'lockpin': ['lockpin', 'pinlock'], + 'lockram': ['lockram', 'marlock'], + 'lockspit': ['lockspit', 'lopstick'], + 'lockup': ['lockup', 'uplock'], + 'loco': ['cool', 'loco'], + 'locoweed': ['coolweed', 'locoweed'], + 'locrian': ['carolin', 'clarion', 'colarin', 'locrian'], + 'locrine': ['creolin', 'licorne', 'locrine'], + 'loculate': ['allocute', 'loculate'], + 'loculation': ['allocution', 'loculation'], + 'locum': ['cumol', 'locum'], + 'locusta': ['costula', 'locusta', 'talcous'], + 'lod': ['dol', 'lod', 'old'], + 'lode': ['dole', 'elod', 'lode', 'odel'], + 'lodesman': ['dolesman', 'lodesman'], + 'lodgeman': ['angeldom', 'lodgeman'], + 'lodger': ['golder', 'lodger'], + 'lodging': ['godling', 'lodging'], + 'loess': ['loess', 'soles'], + 'loessic': ['loessic', 'ossicle'], + 'lof': ['flo', 'lof'], + 'loft': ['flot', 'loft'], + 'lofter': ['floret', 'forlet', 'lofter', 'torfel'], + 'lofty': ['lofty', 'oftly'], + 'log': ['gol', 'log'], + 'logania': ['alogian', 'logania'], + 'logarithm': ['algorithm', 'logarithm'], + 'logarithmic': ['algorithmic', 'logarithmic'], + 'loge': ['egol', 'goel', 'loge', 'ogle', 'oleg'], + 'logger': ['logger', 'roggle'], + 'logicalist': ['logicalist', 'logistical'], + 'logicist': ['logicist', 'logistic'], + 'login': ['lingo', 'login'], + 'logistic': ['logicist', 'logistic'], + 'logistical': ['logicalist', 'logistical'], + 'logistics': ['glossitic', 'logistics'], + 'logman': ['amlong', 'logman'], + 'logographic': ['graphologic', 'logographic'], + 'logographical': ['graphological', 'logographical'], + 'logography': ['graphology', 'logography'], + 'logoi': ['igloo', 'logoi'], + 'logometrical': ['logometrical', 'metrological'], + 'logos': ['gools', 'logos'], + 'logotypy': ['logotypy', 'typology'], + 'logria': ['gloria', 'larigo', 'logria'], + 'logy': ['gloy', 'logy'], + 'lohar': ['horal', 'lohar'], + 'loin': ['lino', 'lion', 'loin', 'noil'], + 'loined': ['doline', 'indole', 'leonid', 'loined', 'olenid'], + 'loir': ['loir', 'lori', 'roil'], + 'lois': ['lois', 'silo', 'siol', 'soil', 'soli'], + 'loiter': ['loiter', 'toiler', 'triole'], + 'loka': ['kalo', 'kola', 'loka'], + 'lokao': ['lokao', 'oolak'], + 'loke': ['koel', 'loke'], + 'loket': ['ketol', 'loket'], + 'lola': ['lalo', 'lola', 'olla'], + 'loma': ['loam', 'loma', 'malo', 'mola', 'olam'], + 'lomatine': ['lomatine', 'tolamine'], + 'lombardian': ['laminboard', 'lombardian'], + 'lomboy': ['bloomy', 'lomboy'], + 'loment': ['loment', 'melton', 'molten'], + 'lomentaria': ['ameliorant', 'lomentaria'], + 'lomita': ['lomita', 'tomial'], + 'lone': ['elon', 'enol', 'leno', 'leon', 'lone', 'noel'], + 'longa': ['along', 'gonal', 'lango', 'longa', 'nogal'], + 'longanimous': ['longanimous', 'longimanous'], + 'longbeard': ['boglander', 'longbeard'], + 'longear': ['argenol', 'longear'], + 'longhead': ['headlong', 'longhead'], + 'longimanous': ['longanimous', 'longimanous'], + 'longimetry': ['longimetry', 'mongrelity'], + 'longmouthed': ['goldenmouth', 'longmouthed'], + 'longue': ['longue', 'lounge'], + 'lonicera': ['acrolein', + 'arecolin', + 'caroline', + 'colinear', + 'cornelia', + 'creolian', + 'lonicera'], + 'lontar': ['latron', 'lontar', 'tornal'], + 'looby': ['booly', 'looby'], + 'lood': ['dool', 'lood'], + 'loof': ['fool', 'loof', 'olof'], + 'look': ['kolo', 'look'], + 'looker': ['looker', 'relook'], + 'lookout': ['lookout', 'outlook'], + 'loom': ['loom', 'mool'], + 'loon': ['loon', 'nolo'], + 'loop': ['loop', 'polo', 'pool'], + 'looper': ['looper', 'pooler'], + 'loopist': ['loopist', 'poloist', 'topsoil'], + 'loopy': ['loopy', 'pooly'], + 'loosing': ['loosing', 'sinolog'], + 'loot': ['loot', 'tool'], + 'looter': ['looter', 'retool', 'rootle', 'tooler'], + 'lootie': ['lootie', 'oolite'], + 'lop': ['lop', 'pol'], + 'lope': ['lope', 'olpe', 'pole'], + 'loper': ['loper', 'poler'], + 'lopezia': ['epizoal', 'lopezia', 'opalize'], + 'lophine': ['lophine', 'pinhole'], + 'lophocomi': ['homopolic', 'lophocomi'], + 'loppet': ['loppet', 'topple'], + 'loppy': ['loppy', 'polyp'], + 'lopstick': ['lockspit', 'lopstick'], + 'loquacious': ['aquicolous', 'loquacious'], + 'lora': ['lora', 'oral'], + 'lorandite': ['lorandite', 'rodential'], + 'lorate': ['elator', 'lorate'], + 'lorcha': ['choral', 'lorcha'], + 'lordly': ['drolly', 'lordly'], + 'lore': ['lore', 'orle', 'role'], + 'lored': ['lored', 'older'], + 'loren': ['enrol', 'loren'], + 'lori': ['loir', 'lori', 'roil'], + 'lorica': ['caroli', 'corial', 'lorica'], + 'loricate': ['calorite', 'erotical', 'loricate'], + 'loricati': ['clitoria', 'loricati'], + 'lorien': ['elinor', 'lienor', 'lorien', 'noiler'], + 'loro': ['loro', 'olor', 'orlo', 'rool'], + 'lose': ['lose', 'sloe', 'sole'], + 'loser': ['loser', 'orsel', 'rosel', 'soler'], + 'lost': ['lost', 'lots', 'slot'], + 'lot': ['lot', 'tol'], + 'lota': ['alto', 'lota'], + 'lotase': ['lotase', 'osteal', 'solate', 'stolae', 'talose'], + 'lote': ['leto', 'lote', 'tole'], + 'lotic': ['cloit', 'lotic'], + 'lotrite': ['lotrite', 'tortile', 'triolet'], + 'lots': ['lost', 'lots', 'slot'], + 'lotta': ['lotta', 'total'], + 'lotter': ['lotter', 'rottle', 'tolter'], + 'lottie': ['lottie', 'toilet', 'tolite'], + 'lou': ['lou', 'luo'], + 'loud': ['loud', 'ludo'], + 'louden': ['louden', 'nodule'], + 'lough': ['ghoul', 'lough'], + 'lounder': ['durenol', 'lounder', 'roundel'], + 'lounge': ['longue', 'lounge'], + 'loupe': ['epulo', 'loupe'], + 'lourdy': ['dourly', 'lourdy'], + 'louse': ['eusol', 'louse'], + 'lousy': ['lousy', 'souly'], + 'lout': ['lout', 'tolu'], + 'louter': ['elutor', 'louter', 'outler'], + 'loutish': ['lithous', 'loutish'], + 'louty': ['louty', 'outly'], + 'louvar': ['louvar', 'ovular'], + 'louver': ['louver', 'louvre'], + 'louvre': ['louver', 'louvre'], + 'lovable': ['lovable', 'volable'], + 'lovage': ['lovage', 'volage'], + 'love': ['levo', 'love', 'velo', 'vole'], + 'loveling': ['livelong', 'loveling'], + 'lovely': ['lovely', 'volley'], + 'lovering': ['lovering', 'overling'], + 'low': ['low', 'lwo', 'owl'], + 'lowa': ['alow', 'awol', 'lowa'], + 'lowder': ['lowder', 'weldor', 'wordle'], + 'lower': ['lower', 'owler', 'rowel'], + 'lowerer': ['lowerer', 'relower'], + 'lowery': ['lowery', 'owlery', 'rowley', 'yowler'], + 'lowish': ['lowish', 'owlish'], + 'lowishly': ['lowishly', 'owlishly', 'sillyhow'], + 'lowishness': ['lowishness', 'owlishness'], + 'lowy': ['lowy', 'owly', 'yowl'], + 'loyal': ['alloy', 'loyal'], + 'loyalism': ['loyalism', 'lysiloma'], + 'loyd': ['loyd', 'odyl'], + 'luba': ['balu', 'baul', 'bual', 'luba'], + 'lubber': ['burble', 'lubber', 'rubble'], + 'lubberland': ['landlubber', 'lubberland'], + 'lube': ['blue', 'lube'], + 'lucan': ['lucan', 'nucal'], + 'lucania': ['lucania', 'luciana'], + 'lucanid': ['dulcian', 'incudal', 'lucanid', 'lucinda'], + 'lucanidae': ['leucadian', 'lucanidae'], + 'lucarne': ['crenula', 'lucarne', 'nuclear', 'unclear'], + 'lucban': ['buncal', 'lucban'], + 'luce': ['clue', 'luce'], + 'luceres': ['luceres', 'recluse'], + 'lucern': ['encurl', 'lucern'], + 'lucernal': ['lucernal', 'nucellar', 'uncellar'], + 'lucet': ['culet', 'lucet'], + 'lucia': ['aulic', 'lucia'], + 'lucian': ['cunila', 'lucian', 'lucina', 'uncial'], + 'luciana': ['lucania', 'luciana'], + 'lucifer': ['ferulic', 'lucifer'], + 'lucigen': ['glucine', 'lucigen'], + 'lucina': ['cunila', 'lucian', 'lucina', 'uncial'], + 'lucinda': ['dulcian', 'incudal', 'lucanid', 'lucinda'], + 'lucinoid': ['lucinoid', 'oculinid'], + 'lucite': ['lucite', 'luetic', 'uletic'], + 'lucrative': ['lucrative', 'revictual', 'victualer'], + 'lucre': ['cruel', 'lucre', 'ulcer'], + 'lucretia': ['arculite', 'cutleria', 'lucretia', 'reticula', 'treculia'], + 'lucretian': ['centurial', 'lucretian', 'ultranice'], + 'luctiferous': ['fruticulose', 'luctiferous'], + 'lucubrate': ['lucubrate', 'tubercula'], + 'ludden': ['ludden', 'nuddle'], + 'luddite': ['diluted', 'luddite'], + 'ludian': ['dualin', 'ludian', 'unlaid'], + 'ludibry': ['buirdly', 'ludibry'], + 'ludicroserious': ['ludicroserious', 'serioludicrous'], + 'ludo': ['loud', 'ludo'], + 'lue': ['leu', 'lue', 'ule'], + 'lues': ['lues', 'slue'], + 'luetic': ['lucite', 'luetic', 'uletic'], + 'lug': ['gul', 'lug'], + 'luge': ['glue', 'gule', 'luge'], + 'luger': ['gluer', 'gruel', 'luger'], + 'lugger': ['gurgle', 'lugger', 'ruggle'], + 'lugnas': ['lugnas', 'salung'], + 'lugsome': ['glumose', 'lugsome'], + 'luian': ['inula', 'luian', 'uinal'], + 'luiseno': ['elusion', 'luiseno'], + 'luite': ['luite', 'utile'], + 'lukas': ['klaus', 'lukas', 'sulka'], + 'luke': ['leuk', 'luke'], + 'lula': ['lula', 'ulla'], + 'lulab': ['bulla', 'lulab'], + 'lumbar': ['brumal', 'labrum', 'lumbar', 'umbral'], + 'lumber': ['lumber', 'rumble', 'umbrel'], + 'lumbosacral': ['lumbosacral', 'sacrolumbal'], + 'lumine': ['lumine', 'unlime'], + 'lump': ['lump', 'plum'], + 'lumper': ['lumper', 'plumer', 'replum', 'rumple'], + 'lumpet': ['lumpet', 'plumet'], + 'lumpiness': ['lumpiness', 'pluminess'], + 'lumpy': ['lumpy', 'plumy'], + 'luna': ['laun', 'luna', 'ulna', 'unal'], + 'lunacy': ['lunacy', 'unclay'], + 'lunar': ['lunar', 'ulnar', 'urnal'], + 'lunare': ['lunare', 'neural', 'ulnare', 'unreal'], + 'lunaria': ['lunaria', 'ulnaria', 'uralian'], + 'lunary': ['lunary', 'uranyl'], + 'lunatic': ['calinut', 'lunatic'], + 'lunation': ['lunation', 'ultonian'], + 'lunda': ['dunal', 'laund', 'lunda', 'ulnad'], + 'lung': ['gunl', 'lung'], + 'lunge': ['leung', 'lunge'], + 'lunged': ['gulden', 'lunged'], + 'lungfish': ['flushing', 'lungfish'], + 'lungsick': ['lungsick', 'suckling'], + 'lunisolar': ['lunisolar', 'solilunar'], + 'luo': ['lou', 'luo'], + 'lupe': ['lupe', 'pelu', 'peul', 'pule'], + 'luperci': ['luperci', 'pleuric'], + 'lupicide': ['lupicide', 'pediculi', 'pulicide'], + 'lupinaster': ['lupinaster', 'palustrine'], + 'lupine': ['lupine', 'unpile', 'upline'], + 'lupinus': ['lupinus', 'pinulus'], + 'lupis': ['lupis', 'pilus'], + 'lupous': ['lupous', 'opulus'], + 'lura': ['alur', 'laur', 'lura', 'raul', 'ural'], + 'lurch': ['churl', 'lurch'], + 'lure': ['lure', 'rule'], + 'lurer': ['lurer', 'ruler'], + 'lurg': ['gurl', 'lurg'], + 'luringly': ['luringly', 'rulingly'], + 'luscinia': ['luscinia', 'siculian'], + 'lush': ['lush', 'shlu', 'shul'], + 'lusher': ['lusher', 'shuler'], + 'lushly': ['hyllus', 'lushly'], + 'lushness': ['lushness', 'shunless'], + 'lusian': ['insula', 'lanius', 'lusian'], + 'lusk': ['lusk', 'sulk'], + 'lusky': ['lusky', 'sulky'], + 'lusory': ['lusory', 'sourly'], + 'lust': ['lust', 'slut'], + 'luster': ['luster', 'result', 'rustle', 'sutler', 'ulster'], + 'lusterless': ['lusterless', 'lustreless', 'resultless'], + 'lustihead': ['hastilude', 'lustihead'], + 'lustreless': ['lusterless', 'lustreless', 'resultless'], + 'lustrine': ['insulter', 'lustrine', 'reinsult'], + 'lustring': ['lustring', 'rustling'], + 'lusty': ['lusty', 'tylus'], + 'lutaceous': ['cautelous', 'lutaceous'], + 'lutany': ['auntly', 'lutany'], + 'lutayo': ['layout', 'lutayo', 'outlay'], + 'lute': ['lute', 'tule'], + 'luteal': ['alulet', 'luteal'], + 'lutecia': ['aleutic', 'auletic', 'caulite', 'lutecia'], + 'lutecium': ['cumulite', 'lutecium'], + 'lutein': ['lutein', 'untile'], + 'lutfisk': ['kistful', 'lutfisk'], + 'luther': ['hurtle', 'luther'], + 'lutheran': ['lutheran', 'unhalter'], + 'lutianoid': ['lutianoid', 'nautiloid'], + 'lutianus': ['lutianus', 'nautilus', 'ustulina'], + 'luting': ['glutin', 'luting', 'ungilt'], + 'lutose': ['lutose', 'solute', 'tousle'], + 'lutra': ['lutra', 'ultra'], + 'lutrinae': ['lutrinae', 'retinula', 'rutelian', 'tenurial'], + 'luxe': ['luxe', 'ulex'], + 'lwo': ['low', 'lwo', 'owl'], + 'lyam': ['amyl', 'lyam', 'myal'], + 'lyard': ['daryl', 'lardy', 'lyard'], + 'lyas': ['lyas', 'slay'], + 'lycaenid': ['adenylic', 'lycaenid'], + 'lyceum': ['cymule', 'lyceum'], + 'lycopodium': ['lycopodium', 'polycodium'], + 'lyctidae': ['diacetyl', 'lyctidae'], + 'lyddite': ['lyddite', 'tiddley'], + 'lydia': ['daily', 'lydia'], + 'lydite': ['idlety', 'lydite', 'tidely', 'tidley'], + 'lye': ['ley', 'lye'], + 'lying': ['lingy', 'lying'], + 'lymnaeid': ['lymnaeid', 'maidenly', 'medianly'], + 'lymphadenia': ['lymphadenia', 'nymphalidae'], + 'lymphectasia': ['lymphectasia', 'metaphysical'], + 'lymphopenia': ['lymphopenia', 'polyphemian'], + 'lynne': ['lenny', 'lynne'], + 'lyon': ['lyon', 'only'], + 'lyophobe': ['hypobole', 'lyophobe'], + 'lyra': ['aryl', 'lyra', 'ryal', 'yarl'], + 'lyraid': ['aridly', 'lyraid'], + 'lyrate': ['lyrate', 'raylet', 'realty', 'telary'], + 'lyre': ['lyre', 'rely'], + 'lyric': ['cyril', 'lyric'], + 'lyrical': ['cyrilla', 'lyrical'], + 'lyrid': ['idryl', 'lyrid'], + 'lys': ['lys', 'sly'], + 'lysander': ['lysander', 'synedral'], + 'lysate': ['alytes', 'astely', 'lysate', 'stealy'], + 'lyse': ['lyse', 'sley'], + 'lysiloma': ['loyalism', 'lysiloma'], + 'lysine': ['linsey', 'lysine'], + 'lysogenetic': ['cleistogeny', 'lysogenetic'], + 'lysogenic': ['glycosine', 'lysogenic'], + 'lyssic': ['clysis', 'lyssic'], + 'lyterian': ['interlay', 'lyterian'], + 'lyxose': ['lyxose', 'xylose'], + 'ma': ['am', 'ma'], + 'maam': ['amma', 'maam'], + 'mab': ['bam', 'mab'], + 'maba': ['amba', 'maba'], + 'mabel': ['amble', 'belam', 'blame', 'mabel'], + 'mabi': ['iamb', 'mabi'], + 'mabolo': ['abloom', 'mabolo'], + 'mac': ['cam', 'mac'], + 'macaca': ['camaca', 'macaca'], + 'macaco': ['cocama', 'macaco'], + 'macaglia': ['almaciga', 'macaglia'], + 'macan': ['caman', 'macan'], + 'macanese': ['macanese', 'maecenas'], + 'macao': ['acoma', 'macao'], + 'macarism': ['macarism', 'marasmic'], + 'macaroni': ['armonica', 'macaroni', 'marocain'], + 'macaronic': ['carcinoma', 'macaronic'], + 'mace': ['acme', 'came', 'mace'], + 'macedon': ['conamed', 'macedon'], + 'macedonian': ['caedmonian', 'macedonian'], + 'macedonic': ['caedmonic', 'macedonic'], + 'macer': ['cream', 'macer'], + 'macerate': ['camerate', 'macerate', 'racemate'], + 'maceration': ['aeromantic', 'cameration', 'maceration', 'racemation'], + 'machar': ['chamar', 'machar'], + 'machi': ['chiam', 'machi', 'micah'], + 'machilis': ['chiliasm', 'hilasmic', 'machilis'], + 'machinator': ['achromatin', 'chariotman', 'machinator'], + 'machine': ['chimane', 'machine'], + 'machinery': ['hemicrany', 'machinery'], + 'macies': ['camise', 'macies'], + 'macigno': ['coaming', 'macigno'], + 'macilency': ['cyclamine', 'macilency'], + 'macle': ['camel', 'clame', 'cleam', 'macle'], + 'maclura': ['maclura', 'macular'], + 'maco': ['coma', 'maco'], + 'macon': ['coman', 'macon', 'manoc'], + 'maconite': ['coinmate', 'maconite'], + 'macro': ['carom', 'coram', 'macro', 'marco'], + 'macrobian': ['carbamino', 'macrobian'], + 'macromazia': ['macromazia', 'macrozamia'], + 'macrophage': ['cameograph', 'macrophage'], + 'macrophotograph': ['macrophotograph', 'photomacrograph'], + 'macrotia': ['aromatic', 'macrotia'], + 'macrotin': ['macrotin', 'romantic'], + 'macrourid': ['macrourid', 'macruroid'], + 'macrourus': ['macrourus', 'macrurous'], + 'macrozamia': ['macromazia', 'macrozamia'], + 'macruroid': ['macrourid', 'macruroid'], + 'macrurous': ['macrourus', 'macrurous'], + 'mactra': ['mactra', 'tarmac'], + 'macular': ['maclura', 'macular'], + 'macule': ['almuce', 'caelum', 'macule'], + 'maculose': ['maculose', 'somacule'], + 'mad': ['dam', 'mad'], + 'madden': ['damned', 'demand', 'madden'], + 'maddening': ['demanding', 'maddening'], + 'maddeningly': ['demandingly', 'maddeningly'], + 'madder': ['dermad', 'madder'], + 'made': ['dame', 'made', 'mead'], + 'madeira': ['adermia', 'madeira'], + 'madeiran': ['madeiran', 'marinade'], + 'madeline': ['endemial', 'madeline'], + 'madi': ['admi', 'amid', 'madi', 'maid'], + 'madia': ['amadi', 'damia', 'madia', 'maida'], + 'madiga': ['agamid', 'madiga'], + 'madman': ['madman', 'nammad'], + 'madnep': ['dampen', 'madnep'], + 'madrid': ['madrid', 'riddam'], + 'madrier': ['admirer', 'madrier', 'married'], + 'madrilene': ['landimere', 'madrilene'], + 'madrona': ['anadrom', 'madrona', 'mandora', 'monarda', 'roadman'], + 'madship': ['dampish', 'madship', 'phasmid'], + 'madurese': ['madurese', 'measured'], + 'mae': ['ame', 'mae'], + 'maecenas': ['macanese', 'maecenas'], + 'maenad': ['anadem', 'maenad'], + 'maenadism': ['maenadism', 'mandaeism'], + 'maeonian': ['enomania', 'maeonian'], + 'maestri': ['artemis', 'maestri', 'misrate'], + 'maestro': ['maestro', 'tarsome'], + 'mag': ['gam', 'mag'], + 'magadis': ['gamasid', 'magadis'], + 'magani': ['angami', 'magani', 'magian'], + 'magas': ['agsam', 'magas'], + 'mage': ['egma', 'game', 'mage'], + 'magenta': ['gateman', 'magenta', 'magnate', 'magneta'], + 'magian': ['angami', 'magani', 'magian'], + 'magic': ['gamic', 'magic'], + 'magister': ['gemarist', 'magister', 'sterigma'], + 'magistrate': ['magistrate', 'sterigmata'], + 'magma': ['gamma', 'magma'], + 'magnate': ['gateman', 'magenta', 'magnate', 'magneta'], + 'magnes': ['magnes', 'semang'], + 'magneta': ['gateman', 'magenta', 'magnate', 'magneta'], + 'magnetist': ['agistment', 'magnetist'], + 'magneto': ['geomant', 'magneto', 'megaton', 'montage'], + 'magnetod': ['magnetod', 'megadont'], + 'magnetoelectric': ['electromagnetic', 'magnetoelectric'], + 'magnetoelectrical': ['electromagnetical', 'magnetoelectrical'], + 'magnolia': ['algomian', 'magnolia'], + 'magnus': ['magnus', 'musang'], + 'magpie': ['magpie', 'piemag'], + 'magyar': ['magyar', 'margay'], + 'mah': ['ham', 'mah'], + 'maha': ['amah', 'maha'], + 'mahar': ['amhar', 'mahar', 'mahra'], + 'maharani': ['amiranha', 'maharani'], + 'mahdism': ['dammish', 'mahdism'], + 'mahdist': ['adsmith', 'mahdist'], + 'mahi': ['hami', 'hima', 'mahi'], + 'mahican': ['chamian', 'mahican'], + 'mahogany': ['hogmanay', 'mahogany'], + 'maholi': ['holmia', 'maholi'], + 'maholtine': ['hematolin', 'maholtine'], + 'mahori': ['homrai', 'mahori', 'mohair'], + 'mahra': ['amhar', 'mahar', 'mahra'], + 'mahran': ['amhran', 'harman', 'mahran'], + 'mahri': ['hiram', 'ihram', 'mahri'], + 'maia': ['amia', 'maia'], + 'maid': ['admi', 'amid', 'madi', 'maid'], + 'maida': ['amadi', 'damia', 'madia', 'maida'], + 'maiden': ['daimen', 'damine', 'maiden', 'median', 'medina'], + 'maidenism': ['maidenism', 'medianism'], + 'maidenly': ['lymnaeid', 'maidenly', 'medianly'], + 'maidish': ['hasidim', 'maidish'], + 'maidism': ['amidism', 'maidism'], + 'maigre': ['imager', 'maigre', 'margie', 'mirage'], + 'maiidae': ['amiidae', 'maiidae'], + 'mail': ['amil', 'amli', 'lima', 'mail', 'mali', 'mila'], + 'mailed': ['aldime', 'mailed', 'medial'], + 'mailer': ['mailer', 'remail'], + 'mailie': ['emilia', 'mailie'], + 'maim': ['ammi', 'imam', 'maim', 'mima'], + 'main': ['amin', 'main', 'mani', 'mian', 'mina', 'naim'], + 'maine': ['amine', 'anime', 'maine', 'manei'], + 'mainly': ['amylin', 'mainly'], + 'mainour': ['mainour', 'uramino'], + 'mainpast': ['mainpast', 'mantispa', 'panamist', 'stampian'], + 'mainprise': ['mainprise', 'presimian'], + 'mains': ['mains', 'manis'], + 'maint': ['maint', 'matin'], + 'maintain': ['amanitin', 'maintain'], + 'maintainer': ['antimerina', 'maintainer', 'remaintain'], + 'maintop': ['maintop', 'ptomain', 'tampion', 'timpano'], + 'maioid': ['daimio', 'maioid'], + 'maioidean': ['anomiidae', 'maioidean'], + 'maire': ['aimer', 'maire', 'marie', 'ramie'], + 'maja': ['jama', 'maja'], + 'majoon': ['majoon', 'moonja'], + 'major': ['jarmo', 'major'], + 'makah': ['hakam', 'makah'], + 'makassar': ['makassar', 'samskara'], + 'make': ['kame', 'make', 'meak'], + 'maker': ['maker', 'marek', 'merak'], + 'maki': ['akim', 'maki'], + 'mako': ['amok', 'mako'], + 'mal': ['lam', 'mal'], + 'mala': ['alma', 'amla', 'lama', 'mala'], + 'malacologist': ['malacologist', 'mastological'], + 'malaga': ['agalma', 'malaga'], + 'malagma': ['amalgam', 'malagma'], + 'malakin': ['alkamin', 'malakin'], + 'malanga': ['malanga', 'nagmaal'], + 'malapert': ['armplate', 'malapert'], + 'malapi': ['impala', 'malapi'], + 'malar': ['alarm', 'malar', 'maral', 'marla', 'ramal'], + 'malarin': ['lairman', 'laminar', 'malarin', 'railman'], + 'malate': ['malate', 'meatal', 'tamale'], + 'malcreated': ['cradlemate', 'malcreated'], + 'maldonite': ['antimodel', 'maldonite', 'monilated'], + 'male': ['alem', 'alme', 'lame', 'leam', 'male', 'meal', 'mela'], + 'maleficiation': ['amelification', 'maleficiation'], + 'maleic': ['maleic', 'malice', 'melica'], + 'maleinoid': ['alimonied', 'maleinoid'], + 'malella': ['lamella', 'malella', 'malleal'], + 'maleness': ['lameness', 'maleness', 'maneless', 'nameless'], + 'malengine': ['enameling', 'malengine', 'meningeal'], + 'maleo': ['amole', 'maleo'], + 'malfed': ['flamed', 'malfed'], + 'malhonest': ['malhonest', 'mashelton'], + 'mali': ['amil', 'amli', 'lima', 'mail', 'mali', 'mila'], + 'malic': ['claim', 'clima', 'malic'], + 'malice': ['maleic', 'malice', 'melica'], + 'malicho': ['chiloma', 'malicho'], + 'maligner': ['germinal', 'maligner', 'malinger'], + 'malikana': ['kalamian', 'malikana'], + 'maline': ['limean', 'maline', 'melian', 'menial'], + 'malines': ['malines', 'salmine', 'selamin', 'seminal'], + 'malinger': ['germinal', 'maligner', 'malinger'], + 'malison': ['malison', 'manolis', 'osmanli', 'somnial'], + 'malladrite': ['armillated', 'malladrite', 'mallardite'], + 'mallardite': ['armillated', 'malladrite', 'mallardite'], + 'malleal': ['lamella', 'malella', 'malleal'], + 'mallein': ['mallein', 'manille'], + 'malleoincudal': ['incudomalleal', 'malleoincudal'], + 'malleus': ['amellus', 'malleus'], + 'malmaison': ['anomalism', 'malmaison'], + 'malmy': ['lammy', 'malmy'], + 'malo': ['loam', 'loma', 'malo', 'mola', 'olam'], + 'malonic': ['limacon', 'malonic'], + 'malonyl': ['allonym', 'malonyl'], + 'malope': ['aplome', 'malope'], + 'malpoise': ['malpoise', 'semiopal'], + 'malposed': ['malposed', 'plasmode'], + 'maltase': ['asmalte', 'maltase'], + 'malter': ['armlet', 'malter', 'martel'], + 'maltese': ['maltese', 'seamlet'], + 'malthe': ['hamlet', 'malthe'], + 'malurinae': ['malurinae', 'melanuria'], + 'malurine': ['lemurian', 'malurine', 'rumelian'], + 'malurus': ['malurus', 'ramulus'], + 'malus': ['lamus', 'malus', 'musal', 'slaum'], + 'mamers': ['mamers', 'sammer'], + 'mamo': ['ammo', 'mamo'], + 'man': ['man', 'nam'], + 'mana': ['anam', 'mana', 'naam', 'nama'], + 'manacle': ['laceman', 'manacle'], + 'manacus': ['manacus', 'samucan'], + 'manage': ['agname', 'manage'], + 'manager': ['gearman', 'manager'], + 'manal': ['alman', 'lamna', 'manal'], + 'manas': ['manas', 'saman'], + 'manatee': ['emanate', 'manatee'], + 'manatine': ['annamite', 'manatine'], + 'manbird': ['birdman', 'manbird'], + 'manchester': ['manchester', 'searchment'], + 'mand': ['damn', 'mand'], + 'mandaeism': ['maenadism', 'mandaeism'], + 'mandaite': ['animated', 'mandaite', 'mantidae'], + 'mandarin': ['drainman', 'mandarin'], + 'mandation': ['damnation', 'mandation'], + 'mandatory': ['damnatory', 'mandatory'], + 'mande': ['amend', 'mande', 'maned'], + 'mandelate': ['aldeament', 'mandelate'], + 'mandil': ['lamnid', 'mandil'], + 'mandola': ['mandola', 'odalman'], + 'mandora': ['anadrom', 'madrona', 'mandora', 'monarda', 'roadman'], + 'mandra': ['mandra', 'radman'], + 'mandrill': ['drillman', 'mandrill'], + 'mandyas': ['daysman', 'mandyas'], + 'mane': ['amen', 'enam', 'mane', 'mean', 'name', 'nema'], + 'maned': ['amend', 'mande', 'maned'], + 'manege': ['gamene', 'manege', 'menage'], + 'manei': ['amine', 'anime', 'maine', 'manei'], + 'maneless': ['lameness', 'maleness', 'maneless', 'nameless'], + 'manent': ['manent', 'netman'], + 'manerial': ['almerian', 'manerial'], + 'manes': ['manes', 'manse', 'mensa', 'samen', 'senam'], + 'maness': ['enmass', 'maness', 'messan'], + 'manettia': ['antietam', 'manettia'], + 'maney': ['maney', 'yamen'], + 'manga': ['amang', 'ganam', 'manga'], + 'mangar': ['amgarn', 'mangar', 'marang', 'ragman'], + 'mangel': ['legman', 'mangel', 'mangle'], + 'mangelin': ['mangelin', 'nameling'], + 'manger': ['engram', 'german', 'manger'], + 'mangerite': ['germanite', 'germinate', 'gramenite', 'mangerite'], + 'mangi': ['gamin', 'mangi'], + 'mangle': ['legman', 'mangel', 'mangle'], + 'mango': ['among', 'mango'], + 'mangrass': ['grassman', 'mangrass'], + 'mangrate': ['grateman', 'mangrate', 'mentagra', 'targeman'], + 'mangue': ['mangue', 'maunge'], + 'manhead': ['headman', 'manhead'], + 'manhole': ['holeman', 'manhole'], + 'manhood': ['dhamnoo', 'hoodman', 'manhood'], + 'mani': ['amin', 'main', 'mani', 'mian', 'mina', 'naim'], + 'mania': ['amain', 'amani', 'amnia', 'anima', 'mania'], + 'maniable': ['animable', 'maniable'], + 'maniac': ['amniac', 'caiman', 'maniac'], + 'manic': ['amnic', 'manic'], + 'manid': ['dimna', 'manid'], + 'manidae': ['adamine', 'manidae'], + 'manify': ['infamy', 'manify'], + 'manila': ['almain', 'animal', 'lamina', 'manila'], + 'manilla': ['alnilam', 'manilla'], + 'manille': ['mallein', 'manille'], + 'manioc': ['camion', 'conima', 'manioc', 'monica'], + 'maniple': ['impanel', 'maniple'], + 'manipuri': ['manipuri', 'unimpair'], + 'manis': ['mains', 'manis'], + 'manist': ['manist', 'mantis', 'matins', 'stamin'], + 'manistic': ['actinism', 'manistic'], + 'manito': ['atimon', 'manito', 'montia'], + 'maniu': ['maniu', 'munia', 'unami'], + 'manius': ['animus', 'anisum', 'anusim', 'manius'], + 'maniva': ['maniva', 'vimana'], + 'manlet': ['lament', 'manlet', 'mantel', 'mantle', 'mental'], + 'manna': ['annam', 'manna'], + 'mannite': ['mannite', 'tineman'], + 'mannonic': ['cinnamon', 'mannonic'], + 'mano': ['mano', 'moan', 'mona', 'noam', 'noma', 'oman'], + 'manoc': ['coman', 'macon', 'manoc'], + 'manolis': ['malison', 'manolis', 'osmanli', 'somnial'], + 'manometrical': ['commentarial', 'manometrical'], + 'manometry': ['manometry', 'momentary'], + 'manor': ['manor', 'moran', 'norma', 'ramon', 'roman'], + 'manorial': ['manorial', 'morainal'], + 'manorship': ['manorship', 'orphanism'], + 'manoscope': ['manoscope', 'moonscape'], + 'manred': ['damner', 'manred', 'randem', 'remand'], + 'manrent': ['manrent', 'remnant'], + 'manrope': ['manrope', 'ropeman'], + 'manse': ['manes', 'manse', 'mensa', 'samen', 'senam'], + 'manship': ['manship', 'shipman'], + 'mansion': ['mansion', 'onanism'], + 'mansioneer': ['emersonian', 'mansioneer'], + 'manslaughter': ['manslaughter', 'slaughterman'], + 'manso': ['manso', 'mason', 'monas'], + 'manta': ['atman', 'manta'], + 'mantel': ['lament', 'manlet', 'mantel', 'mantle', 'mental'], + 'manteline': ['lineament', 'manteline'], + 'manter': ['manter', 'marten', 'rament'], + 'mantes': ['mantes', 'stamen'], + 'manticore': ['cremation', 'manticore'], + 'mantidae': ['animated', 'mandaite', 'mantidae'], + 'mantis': ['manist', 'mantis', 'matins', 'stamin'], + 'mantispa': ['mainpast', 'mantispa', 'panamist', 'stampian'], + 'mantissa': ['mantissa', 'satanism'], + 'mantle': ['lament', 'manlet', 'mantel', 'mantle', 'mental'], + 'manto': ['manto', 'toman'], + 'mantodea': ['mantodea', 'nematoda'], + 'mantoidea': ['diatomean', 'mantoidea'], + 'mantra': ['mantra', 'tarman'], + 'mantrap': ['mantrap', 'rampant'], + 'mantua': ['anatum', 'mantua', 'tamanu'], + 'manual': ['alumna', 'manual'], + 'manualism': ['manualism', 'musalmani'], + 'manualiter': ['manualiter', 'unmaterial'], + 'manuel': ['manuel', 'unlame'], + 'manul': ['lanum', 'manul'], + 'manuma': ['amunam', 'manuma'], + 'manure': ['manure', 'menura'], + 'manward': ['manward', 'wardman'], + 'manwards': ['manwards', 'wardsman'], + 'manway': ['manway', 'wayman'], + 'manwise': ['manwise', 'wiseman'], + 'many': ['many', 'myna'], + 'mao': ['mao', 'oam'], + 'maori': ['maori', 'mario', 'moira'], + 'map': ['map', 'pam'], + 'mapach': ['champa', 'mapach'], + 'maple': ['ample', 'maple'], + 'mapper': ['mapper', 'pamper', 'pampre'], + 'mar': ['arm', 'mar', 'ram'], + 'mara': ['amar', 'amra', 'mara', 'rama'], + 'marabout': ['marabout', 'marabuto', 'tamboura'], + 'marabuto': ['marabout', 'marabuto', 'tamboura'], + 'maraca': ['acamar', 'camara', 'maraca'], + 'maral': ['alarm', 'malar', 'maral', 'marla', 'ramal'], + 'marang': ['amgarn', 'mangar', 'marang', 'ragman'], + 'mararie': ['armeria', 'mararie'], + 'marasca': ['marasca', 'mascara'], + 'maraschino': ['anachorism', 'chorasmian', 'maraschino'], + 'marasmic': ['macarism', 'marasmic'], + 'marbelize': ['marbelize', 'marbleize'], + 'marble': ['ambler', 'blamer', 'lamber', 'marble', 'ramble'], + 'marbleize': ['marbelize', 'marbleize'], + 'marbler': ['marbler', 'rambler'], + 'marbling': ['marbling', 'rambling'], + 'marc': ['cram', 'marc'], + 'marcan': ['carman', 'marcan'], + 'marcel': ['calmer', 'carmel', 'clamer', 'marcel', 'mercal'], + 'marcescent': ['marcescent', 'scarcement'], + 'march': ['charm', 'march'], + 'marcher': ['charmer', 'marcher', 'remarch'], + 'marchite': ['athermic', 'marchite', 'rhematic'], + 'marchpane': ['marchpane', 'preachman'], + 'marci': ['marci', 'mirac'], + 'marcionist': ['marcionist', 'romanistic'], + 'marcionite': ['marcionite', 'microtinae', 'remication'], + 'marco': ['carom', 'coram', 'macro', 'marco'], + 'marconi': ['amicron', 'marconi', 'minorca', 'romanic'], + 'mare': ['erma', 'mare', 'rame', 'ream'], + 'mareca': ['acream', 'camera', 'mareca'], + 'marek': ['maker', 'marek', 'merak'], + 'marengo': ['marengo', 'megaron'], + 'mareotid': ['mareotid', 'mediator'], + 'marfik': ['marfik', 'mirfak'], + 'marfire': ['firearm', 'marfire'], + 'margay': ['magyar', 'margay'], + 'marge': ['grame', 'marge', 'regma'], + 'margeline': ['margeline', 'regimenal'], + 'margent': ['garment', 'margent'], + 'margie': ['imager', 'maigre', 'margie', 'mirage'], + 'margin': ['arming', 'ingram', 'margin'], + 'marginal': ['alarming', 'marginal'], + 'marginally': ['alarmingly', 'marginally'], + 'marginate': ['armangite', 'marginate'], + 'marginated': ['argentamid', 'marginated'], + 'margined': ['dirgeman', 'margined', 'midrange'], + 'marginiform': ['graminiform', 'marginiform'], + 'margot': ['gomart', 'margot'], + 'marhala': ['harmala', 'marhala'], + 'mari': ['amir', 'irma', 'mari', 'mira', 'rami', 'rima'], + 'marialite': ['latimeria', 'marialite'], + 'marian': ['airman', 'amarin', 'marian', 'marina', 'mirana'], + 'mariana': ['aramina', 'mariana'], + 'marianne': ['armenian', 'marianne'], + 'marie': ['aimer', 'maire', 'marie', 'ramie'], + 'marigenous': ['germanious', 'gramineous', 'marigenous'], + 'marilla': ['armilla', 'marilla'], + 'marina': ['airman', 'amarin', 'marian', 'marina', 'mirana'], + 'marinade': ['madeiran', 'marinade'], + 'marinate': ['animater', 'marinate'], + 'marine': ['ermani', 'marine', 'remain'], + 'marinist': ['marinist', 'mistrain'], + 'mario': ['maori', 'mario', 'moira'], + 'marion': ['marion', 'romain'], + 'mariou': ['mariou', 'oarium'], + 'maris': ['maris', 'marsi', 'samir', 'simar'], + 'marish': ['marish', 'shamir'], + 'marishness': ['marishness', 'marshiness'], + 'marist': ['marist', 'matris', 'ramist'], + 'maritage': ['gematria', 'maritage'], + 'marital': ['marital', 'martial'], + 'maritality': ['maritality', 'martiality'], + 'maritally': ['maritally', 'martially'], + 'marka': ['karma', 'krama', 'marka'], + 'markeb': ['embark', 'markeb'], + 'marked': ['demark', 'marked'], + 'marker': ['marker', 'remark'], + 'marketable': ['marketable', 'tablemaker'], + 'marketeer': ['marketeer', 'treemaker'], + 'marketer': ['marketer', 'remarket'], + 'marko': ['marko', 'marok'], + 'marla': ['alarm', 'malar', 'maral', 'marla', 'ramal'], + 'marled': ['dermal', 'marled', 'medlar'], + 'marli': ['armil', 'marli', 'rimal'], + 'marline': ['marline', 'mineral', 'ramline'], + 'marlite': ['lamiter', 'marlite'], + 'marlock': ['lockram', 'marlock'], + 'maro': ['amor', 'maro', 'mora', 'omar', 'roam'], + 'marocain': ['armonica', 'macaroni', 'marocain'], + 'marok': ['marko', 'marok'], + 'maronian': ['maronian', 'romanian'], + 'maronist': ['maronist', 'romanist'], + 'maronite': ['maronite', 'martinoe', 'minorate', 'morenita', 'romanite'], + 'marquesan': ['marquesan', 'squareman'], + 'marquis': ['asquirm', 'marquis'], + 'marree': ['marree', 'reamer'], + 'married': ['admirer', 'madrier', 'married'], + 'marrot': ['marrot', 'mortar'], + 'marrowed': ['marrowed', 'romeward'], + 'marryer': ['marryer', 'remarry'], + 'mars': ['arms', 'mars'], + 'marsh': ['marsh', 'shram'], + 'marshaler': ['marshaler', 'remarshal'], + 'marshiness': ['marishness', 'marshiness'], + 'marshite': ['arthemis', 'marshite', 'meharist'], + 'marsi': ['maris', 'marsi', 'samir', 'simar'], + 'marsipobranchiata': ['basiparachromatin', 'marsipobranchiata'], + 'mart': ['mart', 'tram'], + 'martel': ['armlet', 'malter', 'martel'], + 'marteline': ['alimenter', 'marteline'], + 'marten': ['manter', 'marten', 'rament'], + 'martes': ['martes', 'master', 'remast', 'stream'], + 'martha': ['amarth', 'martha'], + 'martial': ['marital', 'martial'], + 'martiality': ['maritality', 'martiality'], + 'martially': ['maritally', 'martially'], + 'martian': ['martian', 'tamarin'], + 'martinet': ['intermat', 'martinet', 'tetramin'], + 'martinico': ['martinico', 'mortician'], + 'martinoe': ['maronite', 'martinoe', 'minorate', 'morenita', 'romanite'], + 'martite': ['martite', 'mitrate'], + 'martius': ['martius', 'matsuri', 'maurist'], + 'martu': ['martu', 'murat', 'turma'], + 'marty': ['marty', 'tryma'], + 'maru': ['arum', 'maru', 'mura'], + 'mary': ['army', 'mary', 'myra', 'yarm'], + 'marylander': ['aldermanry', 'marylander'], + 'marysole': ['marysole', 'ramosely'], + 'mas': ['mas', 'sam', 'sma'], + 'mascara': ['marasca', 'mascara'], + 'mascotry': ['arctomys', 'costmary', 'mascotry'], + 'masculine': ['masculine', 'semuncial', 'simulance'], + 'masculist': ['masculist', 'simulcast'], + 'masdeu': ['amused', 'masdeu', 'medusa'], + 'mash': ['mash', 'samh', 'sham'], + 'masha': ['hamsa', 'masha', 'shama'], + 'mashal': ['mashal', 'shamal'], + 'mashelton': ['malhonest', 'mashelton'], + 'masher': ['masher', 'ramesh', 'shamer'], + 'mashy': ['mashy', 'shyam'], + 'mask': ['kasm', 'mask'], + 'masker': ['masker', 'remask'], + 'mason': ['manso', 'mason', 'monas'], + 'masoner': ['masoner', 'romanes'], + 'masonic': ['anosmic', 'masonic'], + 'masonite': ['masonite', 'misatone'], + 'maspiter': ['maspiter', 'pastimer', 'primates'], + 'masque': ['masque', 'squame', 'squeam'], + 'massa': ['amass', 'assam', 'massa', 'samas'], + 'masse': ['masse', 'sesma'], + 'masser': ['masser', 'remass'], + 'masseter': ['masseter', 'seamster'], + 'masseur': ['assumer', 'erasmus', 'masseur'], + 'massicot': ['acosmist', 'massicot', 'somatics'], + 'massiness': ['amissness', 'massiness'], + 'masskanne': ['masskanne', 'sneaksman'], + 'mast': ['mast', 'mats', 'stam'], + 'masted': ['demast', 'masted'], + 'master': ['martes', 'master', 'remast', 'stream'], + 'masterate': ['masterate', 'metatarse'], + 'masterer': ['masterer', 'restream', 'streamer'], + 'masterful': ['masterful', 'streamful'], + 'masterless': ['masterless', 'streamless'], + 'masterlike': ['masterlike', 'streamlike'], + 'masterling': ['masterling', 'streamling'], + 'masterly': ['masterly', 'myrtales'], + 'mastership': ['mastership', 'shipmaster'], + 'masterwork': ['masterwork', 'workmaster'], + 'masterwort': ['masterwort', 'streamwort'], + 'mastery': ['mastery', 'streamy'], + 'mastic': ['mastic', 'misact'], + 'masticable': ['ablastemic', 'masticable'], + 'mastiche': ['mastiche', 'misteach'], + 'mastlike': ['kemalist', 'mastlike'], + 'mastoid': ['distoma', 'mastoid'], + 'mastoidale': ['diatomales', 'mastoidale', 'mastoideal'], + 'mastoideal': ['diatomales', 'mastoidale', 'mastoideal'], + 'mastological': ['malacologist', 'mastological'], + 'mastomenia': ['mastomenia', 'seminomata'], + 'mastotomy': ['mastotomy', 'stomatomy'], + 'masu': ['masu', 'musa', 'saum'], + 'mat': ['amt', 'mat', 'tam'], + 'matacan': ['matacan', 'tamanac'], + 'matai': ['amati', 'amita', 'matai'], + 'matar': ['matar', 'matra', 'trama'], + 'matara': ['armata', 'matara', 'tamara'], + 'matcher': ['matcher', 'rematch'], + 'mate': ['mate', 'meat', 'meta', 'tame', 'team', 'tema'], + 'mateless': ['mateless', 'meatless', 'tameless', 'teamless'], + 'matelessness': ['matelessness', 'tamelessness'], + 'mately': ['mately', 'tamely'], + 'mater': ['armet', + 'mater', + 'merat', + 'metra', + 'ramet', + 'tamer', + 'terma', + 'trame', + 'trema'], + 'materialism': ['immaterials', 'materialism'], + 'materiel': ['eremital', 'materiel'], + 'maternal': ['maternal', 'ramental'], + 'maternology': ['laryngotome', 'maternology'], + 'mateship': ['aphetism', 'mateship', 'shipmate', 'spithame'], + 'matey': ['matey', 'meaty'], + 'mathesis': ['mathesis', 'thamesis'], + 'mathetic': ['mathetic', 'thematic'], + 'matico': ['atomic', 'matico'], + 'matin': ['maint', 'matin'], + 'matinee': ['amenite', 'etamine', 'matinee'], + 'matins': ['manist', 'mantis', 'matins', 'stamin'], + 'matra': ['matar', 'matra', 'trama'], + 'matral': ['matral', 'tramal'], + 'matralia': ['altamira', 'matralia'], + 'matrices': ['camerist', 'ceramist', 'matrices'], + 'matricide': ['citramide', 'diametric', 'matricide'], + 'matricula': ['lactarium', 'matricula'], + 'matricular': ['matricular', 'trimacular'], + 'matris': ['marist', 'matris', 'ramist'], + 'matrocliny': ['matrocliny', 'romanticly'], + 'matronism': ['matronism', 'romantism'], + 'mats': ['mast', 'mats', 'stam'], + 'matsu': ['matsu', 'tamus', 'tsuma'], + 'matsuri': ['martius', 'matsuri', 'maurist'], + 'matter': ['matter', 'mettar'], + 'mattoir': ['mattoir', 'tritoma'], + 'maturable': ['maturable', 'metabular'], + 'maturation': ['maturation', 'natatorium'], + 'maturer': ['erratum', 'maturer'], + 'mau': ['aum', 'mau'], + 'maud': ['duma', 'maud'], + 'maudle': ['almude', 'maudle'], + 'mauger': ['mauger', 'murage'], + 'maul': ['alum', 'maul'], + 'mauler': ['mauler', 'merula', 'ramule'], + 'maun': ['maun', 'numa'], + 'maund': ['maund', 'munda', 'numda', 'undam', 'unmad'], + 'maunder': ['duramen', 'maunder', 'unarmed'], + 'maunderer': ['maunderer', 'underream'], + 'maunge': ['mangue', 'maunge'], + 'maureen': ['maureen', 'menurae'], + 'maurice': ['maurice', 'uraemic'], + 'maurist': ['martius', 'matsuri', 'maurist'], + 'mauser': ['amuser', 'mauser'], + 'mavis': ['amvis', 'mavis'], + 'maw': ['maw', 'mwa'], + 'mawp': ['mawp', 'wamp'], + 'may': ['amy', 'may', 'mya', 'yam'], + 'maybe': ['beamy', 'embay', 'maybe'], + 'mayer': ['mayer', 'reamy'], + 'maylike': ['maylike', 'yamilke'], + 'mayo': ['amoy', 'mayo'], + 'mayor': ['mayor', 'moray'], + 'maypoling': ['maypoling', 'pygmalion'], + 'maysin': ['maysin', 'minyas', 'mysian'], + 'maytide': ['daytime', 'maytide'], + 'mazer': ['mazer', 'zerma'], + 'mazur': ['mazur', 'murza'], + 'mbaya': ['ambay', 'mbaya'], + 'me': ['em', 'me'], + 'meable': ['bemeal', 'meable'], + 'mead': ['dame', 'made', 'mead'], + 'meader': ['meader', 'remade'], + 'meager': ['graeme', 'meager', 'meagre'], + 'meagre': ['graeme', 'meager', 'meagre'], + 'meak': ['kame', 'make', 'meak'], + 'meal': ['alem', 'alme', 'lame', 'leam', 'male', 'meal', 'mela'], + 'mealer': ['leamer', 'mealer'], + 'mealiness': ['mealiness', 'messaline'], + 'mealy': ['mealy', 'yamel'], + 'mean': ['amen', 'enam', 'mane', 'mean', 'name', 'nema'], + 'meander': ['amender', 'meander', 'reamend', 'reedman'], + 'meandrite': ['demetrian', 'dermatine', 'meandrite', 'minareted'], + 'meandrous': ['meandrous', 'roundseam'], + 'meaned': ['amende', 'demean', 'meaned', 'nadeem'], + 'meaner': ['enarme', 'meaner', 'rename'], + 'meanly': ['meanly', 'namely'], + 'meant': ['ament', 'meant', 'teman'], + 'mease': ['emesa', 'mease'], + 'measly': ['measly', 'samely'], + 'measuration': ['aeronautism', 'measuration'], + 'measure': ['measure', 'reamuse'], + 'measured': ['madurese', 'measured'], + 'meat': ['mate', 'meat', 'meta', 'tame', 'team', 'tema'], + 'meatal': ['malate', 'meatal', 'tamale'], + 'meatless': ['mateless', 'meatless', 'tameless', 'teamless'], + 'meatman': ['meatman', 'teamman'], + 'meatus': ['meatus', 'mutase'], + 'meaty': ['matey', 'meaty'], + 'mechanal': ['leachman', 'mechanal'], + 'mechanicochemical': ['chemicomechanical', 'mechanicochemical'], + 'mechanics': ['mechanics', 'mischance'], + 'mechir': ['chimer', 'mechir', 'micher'], + 'mecometry': ['cymometer', 'mecometry'], + 'meconic': ['comenic', 'encomic', 'meconic'], + 'meconin': ['ennomic', 'meconin'], + 'meconioid': ['meconioid', 'monoeidic'], + 'meconium': ['encomium', 'meconium'], + 'mecopteron': ['mecopteron', 'protocneme'], + 'medal': ['demal', 'medal'], + 'medallary': ['alarmedly', 'medallary'], + 'mede': ['deem', 'deme', 'mede', 'meed'], + 'media': ['amide', 'damie', 'media'], + 'mediacy': ['dicyema', 'mediacy'], + 'mediad': ['diadem', 'mediad'], + 'medial': ['aldime', 'mailed', 'medial'], + 'median': ['daimen', 'damine', 'maiden', 'median', 'medina'], + 'medianism': ['maidenism', 'medianism'], + 'medianly': ['lymnaeid', 'maidenly', 'medianly'], + 'mediator': ['mareotid', 'mediator'], + 'mediatress': ['mediatress', 'streamside'], + 'mediatrice': ['acidimeter', 'mediatrice'], + 'medical': ['camelid', 'decimal', 'declaim', 'medical'], + 'medically': ['decimally', 'medically'], + 'medicate': ['decimate', 'medicate'], + 'medication': ['decimation', 'medication'], + 'medicator': ['decimator', 'medicator', 'mordicate'], + 'medicatory': ['acidometry', 'medicatory', 'radiectomy'], + 'medicinal': ['adminicle', 'medicinal'], + 'medicophysical': ['medicophysical', 'physicomedical'], + 'medimnos': ['demonism', 'medimnos', 'misnomed'], + 'medina': ['daimen', 'damine', 'maiden', 'median', 'medina'], + 'medino': ['domine', 'domnei', 'emodin', 'medino'], + 'mediocrist': ['dosimetric', 'mediocrist'], + 'mediocrity': ['iridectomy', 'mediocrity'], + 'mediodorsal': ['dorsomedial', 'mediodorsal'], + 'medioventral': ['medioventral', 'ventromedial'], + 'meditate': ['admittee', 'meditate'], + 'meditator': ['meditator', 'trematoid'], + 'medlar': ['dermal', 'marled', 'medlar'], + 'medusa': ['amused', 'masdeu', 'medusa'], + 'medusan': ['medusan', 'sudamen'], + 'meece': ['emcee', 'meece'], + 'meed': ['deem', 'deme', 'mede', 'meed'], + 'meeks': ['meeks', 'smeek'], + 'meered': ['deemer', 'meered', 'redeem', 'remede'], + 'meet': ['meet', 'mete', 'teem'], + 'meeter': ['meeter', 'remeet', 'teemer'], + 'meethelp': ['helpmeet', 'meethelp'], + 'meeting': ['meeting', 'teeming', 'tegmine'], + 'meg': ['gem', 'meg'], + 'megabar': ['bergama', 'megabar'], + 'megachiropteran': ['cinematographer', 'megachiropteran'], + 'megadont': ['magnetod', 'megadont'], + 'megadyne': ['ganymede', 'megadyne'], + 'megaera': ['megaera', 'reamage'], + 'megalodon': ['megalodon', 'moonglade'], + 'megalohepatia': ['hepatomegalia', 'megalohepatia'], + 'megalophonous': ['megalophonous', 'omphalogenous'], + 'megalosplenia': ['megalosplenia', 'splenomegalia'], + 'megapod': ['megapod', 'pagedom'], + 'megapodius': ['megapodius', 'pseudimago'], + 'megarian': ['germania', 'megarian'], + 'megaric': ['gemaric', 'grimace', 'megaric'], + 'megaron': ['marengo', 'megaron'], + 'megaton': ['geomant', 'magneto', 'megaton', 'montage'], + 'megmho': ['megmho', 'megohm'], + 'megohm': ['megmho', 'megohm'], + 'megrim': ['gimmer', 'grimme', 'megrim'], + 'mehari': ['mehari', 'meriah'], + 'meharist': ['arthemis', 'marshite', 'meharist'], + 'meile': ['elemi', 'meile'], + 'mein': ['mein', 'mien', 'mine'], + 'meio': ['meio', 'oime'], + 'mel': ['elm', 'mel'], + 'mela': ['alem', 'alme', 'lame', 'leam', 'male', 'meal', 'mela'], + 'melaconite': ['colemanite', 'melaconite'], + 'melalgia': ['gamaliel', 'melalgia'], + 'melam': ['lemma', 'melam'], + 'melamine': ['ammeline', 'melamine'], + 'melange': ['gleeman', 'melange'], + 'melania': ['laminae', 'melania'], + 'melanian': ['alemanni', 'melanian'], + 'melanic': ['cnemial', 'melanic'], + 'melanilin': ['melanilin', 'millennia'], + 'melanin': ['lemnian', 'lineman', 'melanin'], + 'melanism': ['melanism', 'slimeman'], + 'melanite': ['melanite', 'meletian', 'metaline', 'nemalite'], + 'melanitic': ['alimentic', 'antilemic', 'melanitic', 'metanilic'], + 'melanochroi': ['chloroamine', 'melanochroi'], + 'melanogen': ['melanogen', 'melongena'], + 'melanoid': ['demonial', 'melanoid'], + 'melanorrhea': ['amenorrheal', 'melanorrhea'], + 'melanosis': ['loaminess', 'melanosis'], + 'melanotic': ['entomical', 'melanotic'], + 'melanuria': ['malurinae', 'melanuria'], + 'melanuric': ['ceruminal', 'melanuric', 'numerical'], + 'melas': ['amsel', 'melas', 'mesal', 'samel'], + 'melastoma': ['melastoma', 'metasomal'], + 'meldrop': ['meldrop', 'premold'], + 'melena': ['enamel', 'melena'], + 'melenic': ['celemin', 'melenic'], + 'meletian': ['melanite', 'meletian', 'metaline', 'nemalite'], + 'meletski': ['meletski', 'stemlike'], + 'melian': ['limean', 'maline', 'melian', 'menial'], + 'meliatin': ['meliatin', 'timaline'], + 'melic': ['clime', 'melic'], + 'melica': ['maleic', 'malice', 'melica'], + 'melicerta': ['carmelite', 'melicerta'], + 'melicraton': ['centimolar', 'melicraton'], + 'melinda': ['idleman', 'melinda'], + 'meline': ['elemin', 'meline'], + 'melinite': ['ilmenite', 'melinite', 'menilite'], + 'meliorant': ['meliorant', 'mentorial'], + 'melissa': ['aimless', 'melissa', 'seismal'], + 'melitose': ['melitose', 'mesolite'], + 'mellay': ['lamely', 'mellay'], + 'mellit': ['mellit', 'millet'], + 'melodia': ['melodia', 'molidae'], + 'melodica': ['cameloid', 'comedial', 'melodica'], + 'melodicon': ['clinodome', 'melodicon', 'monocleid'], + 'melodist': ['melodist', 'modelist'], + 'melomanic': ['commelina', 'melomanic'], + 'melon': ['lemon', 'melon', 'monel'], + 'melongena': ['melanogen', 'melongena'], + 'melonist': ['melonist', 'telonism'], + 'melonites': ['limestone', 'melonites', 'milestone'], + 'melonlike': ['lemonlike', 'melonlike'], + 'meloplasty': ['meloplasty', 'myeloplast'], + 'melosa': ['melosa', 'salome', 'semola'], + 'melotragic': ['algometric', 'melotragic'], + 'melotrope': ['melotrope', 'metropole'], + 'melter': ['melter', 'remelt'], + 'melters': ['melters', 'resmelt', 'smelter'], + 'melton': ['loment', 'melton', 'molten'], + 'melungeon': ['melungeon', 'nonlegume'], + 'mem': ['emm', 'mem'], + 'memnon': ['memnon', 'mennom'], + 'memo': ['memo', 'mome'], + 'memorandist': ['memorandist', 'moderantism', 'semidormant'], + 'menage': ['gamene', 'manege', 'menage'], + 'menald': ['lemnad', 'menald'], + 'menaspis': ['menaspis', 'semispan'], + 'mendaite': ['dementia', 'mendaite'], + 'mende': ['emend', 'mende'], + 'mender': ['mender', 'remend'], + 'mendi': ['denim', 'mendi'], + 'mendipite': ['impedient', 'mendipite'], + 'menial': ['limean', 'maline', 'melian', 'menial'], + 'menic': ['menic', 'mince'], + 'menilite': ['ilmenite', 'melinite', 'menilite'], + 'meningeal': ['enameling', 'malengine', 'meningeal'], + 'meningocephalitis': ['cephalomeningitis', 'meningocephalitis'], + 'meningocerebritis': ['cerebromeningitis', 'meningocerebritis'], + 'meningoencephalitis': ['encephalomeningitis', 'meningoencephalitis'], + 'meningoencephalocele': ['encephalomeningocele', 'meningoencephalocele'], + 'meningomyelitis': ['meningomyelitis', 'myelomeningitis'], + 'meningomyelocele': ['meningomyelocele', 'myelomeningocele'], + 'mennom': ['memnon', 'mennom'], + 'menostasia': ['anematosis', 'menostasia'], + 'mensa': ['manes', 'manse', 'mensa', 'samen', 'senam'], + 'mensal': ['anselm', 'mensal'], + 'mense': ['mense', 'mesne', 'semen'], + 'menstrual': ['menstrual', 'ulsterman'], + 'mensurable': ['lebensraum', 'mensurable'], + 'mentagra': ['grateman', 'mangrate', 'mentagra', 'targeman'], + 'mental': ['lament', 'manlet', 'mantel', 'mantle', 'mental'], + 'mentalis': ['mentalis', 'smaltine', 'stileman'], + 'mentalize': ['mentalize', 'mentzelia'], + 'mentation': ['mentation', 'montanite'], + 'mentha': ['anthem', 'hetman', 'mentha'], + 'menthane': ['enanthem', 'menthane'], + 'mentigerous': ['mentigerous', 'tergeminous'], + 'mentolabial': ['labiomental', 'mentolabial'], + 'mentor': ['mentor', 'merton', 'termon', 'tormen'], + 'mentorial': ['meliorant', 'mentorial'], + 'mentzelia': ['mentalize', 'mentzelia'], + 'menura': ['manure', 'menura'], + 'menurae': ['maureen', 'menurae'], + 'menyie': ['menyie', 'yemeni'], + 'meo': ['meo', 'moe'], + 'mephisto': ['mephisto', 'pithsome'], + 'merak': ['maker', 'marek', 'merak'], + 'merat': ['armet', + 'mater', + 'merat', + 'metra', + 'ramet', + 'tamer', + 'terma', + 'trame', + 'trema'], + 'meratia': ['ametria', 'artemia', 'meratia', 'ramaite'], + 'mercal': ['calmer', 'carmel', 'clamer', 'marcel', 'mercal'], + 'mercator': ['cremator', 'mercator'], + 'mercatorial': ['crematorial', 'mercatorial'], + 'mercian': ['armenic', 'carmine', 'ceriman', 'crimean', 'mercian'], + 'merciful': ['crimeful', 'merciful'], + 'merciless': ['crimeless', 'merciless'], + 'mercilessness': ['crimelessness', 'mercilessness'], + 'mere': ['mere', 'reem'], + 'merel': ['elmer', 'merel', 'merle'], + 'merely': ['merely', 'yelmer'], + 'merginae': ['ergamine', 'merginae'], + 'mergus': ['gersum', 'mergus'], + 'meriah': ['mehari', 'meriah'], + 'merice': ['eremic', 'merice'], + 'merida': ['admire', 'armied', 'damier', 'dimera', 'merida'], + 'meril': ['limer', 'meril', 'miler'], + 'meriones': ['emersion', 'meriones'], + 'merism': ['merism', 'mermis', 'simmer'], + 'merist': ['merist', 'mister', 'smiter'], + 'meristem': ['meristem', 'mimester'], + 'meristic': ['meristic', 'trimesic', 'trisemic'], + 'merit': ['merit', 'miter', 'mitre', 'remit', 'timer'], + 'merited': ['demerit', 'dimeter', 'merited', 'mitered'], + 'meriter': ['meriter', 'miterer', 'trireme'], + 'merle': ['elmer', 'merel', 'merle'], + 'merlin': ['limner', 'merlin', 'milner'], + 'mermaid': ['demiram', 'mermaid'], + 'mermis': ['merism', 'mermis', 'simmer'], + 'mero': ['mero', 'more', 'omer', 'rome'], + 'meroblastic': ['blastomeric', 'meroblastic'], + 'merocyte': ['cytomere', 'merocyte'], + 'merogony': ['gonomery', 'merogony'], + 'meroistic': ['eroticism', 'isometric', 'meroistic', 'trioecism'], + 'merop': ['merop', 'moper', 'proem', 'remop'], + 'meropia': ['emporia', 'meropia'], + 'meros': ['meros', 'mores', 'morse', 'sermo', 'smore'], + 'merosthenic': ['merosthenic', 'microsthene'], + 'merostome': ['merostome', 'osmometer'], + 'merrow': ['merrow', 'wormer'], + 'merse': ['merse', 'smeer'], + 'merton': ['mentor', 'merton', 'termon', 'tormen'], + 'merula': ['mauler', 'merula', 'ramule'], + 'meruline': ['lemurine', 'meruline', 'relumine'], + 'mesa': ['asem', 'mesa', 'same', 'seam'], + 'mesad': ['desma', 'mesad'], + 'mesadenia': ['deaminase', 'mesadenia'], + 'mesail': ['amiles', 'asmile', 'mesail', 'mesial', 'samiel'], + 'mesal': ['amsel', 'melas', 'mesal', 'samel'], + 'mesalike': ['mesalike', 'seamlike'], + 'mesaraic': ['cramasie', 'mesaraic'], + 'mesaticephaly': ['hemicatalepsy', 'mesaticephaly'], + 'mese': ['mese', 'seem', 'seme', 'smee'], + 'meshech': ['meshech', 'shechem'], + 'mesial': ['amiles', 'asmile', 'mesail', 'mesial', 'samiel'], + 'mesian': ['asimen', 'inseam', 'mesian'], + 'mesic': ['mesic', 'semic'], + 'mesion': ['eonism', 'mesion', 'oneism', 'simeon'], + 'mesitae': ['amesite', 'mesitae', 'semitae'], + 'mesne': ['mense', 'mesne', 'semen'], + 'meso': ['meso', 'mose', 'some'], + 'mesobar': ['ambrose', 'mesobar'], + 'mesocephaly': ['elaphomyces', 'mesocephaly'], + 'mesognathic': ['asthmogenic', 'mesognathic'], + 'mesohepar': ['mesohepar', 'semaphore'], + 'mesolite': ['melitose', 'mesolite'], + 'mesolithic': ['homiletics', 'mesolithic'], + 'mesological': ['mesological', 'semological'], + 'mesology': ['mesology', 'semology'], + 'mesomeric': ['mesomeric', 'microseme', 'semicrome'], + 'mesonotum': ['mesonotum', 'momentous'], + 'mesorectal': ['calotermes', 'mesorectal', 'metacresol'], + 'mesotonic': ['economist', 'mesotonic'], + 'mesoventral': ['mesoventral', 'ventromesal'], + 'mespil': ['mespil', 'simple'], + 'mesropian': ['mesropian', 'promnesia', 'spironema'], + 'messalian': ['messalian', 'seminasal'], + 'messaline': ['mealiness', 'messaline'], + 'messan': ['enmass', 'maness', 'messan'], + 'messelite': ['messelite', 'semisteel', 'teleseism'], + 'messines': ['essenism', 'messines'], + 'messor': ['messor', 'mosser', 'somers'], + 'mestee': ['esteem', 'mestee'], + 'mester': ['mester', 'restem', 'temser', 'termes'], + 'mesua': ['amuse', 'mesua'], + 'meta': ['mate', 'meat', 'meta', 'tame', 'team', 'tema'], + 'metabular': ['maturable', 'metabular'], + 'metaconid': ['comediant', 'metaconid'], + 'metacresol': ['calotermes', 'mesorectal', 'metacresol'], + 'metage': ['gamete', 'metage'], + 'metaler': ['lameter', 'metaler', 'remetal'], + 'metaline': ['melanite', 'meletian', 'metaline', 'nemalite'], + 'metaling': ['ligament', 'metaling', 'tegminal'], + 'metalist': ['metalist', 'smaltite'], + 'metallism': ['metallism', 'smalltime'], + 'metamer': ['ammeter', 'metamer'], + 'metanilic': ['alimentic', 'antilemic', 'melanitic', 'metanilic'], + 'metaphor': ['metaphor', 'trophema'], + 'metaphoric': ['amphoteric', 'metaphoric'], + 'metaphorical': ['metaphorical', 'pharmacolite'], + 'metaphysical': ['lymphectasia', 'metaphysical'], + 'metaplastic': ['metaplastic', 'palmatisect'], + 'metapore': ['ametrope', 'metapore'], + 'metasomal': ['melastoma', 'metasomal'], + 'metatarse': ['masterate', 'metatarse'], + 'metatheria': ['hemiterata', 'metatheria'], + 'metatrophic': ['metatrophic', 'metropathic'], + 'metaxenia': ['examinate', 'exanimate', 'metaxenia'], + 'mete': ['meet', 'mete', 'teem'], + 'meteor': ['meteor', 'remote'], + 'meteorgraph': ['graphometer', 'meteorgraph'], + 'meteorical': ['carmeloite', 'ectromelia', 'meteorical'], + 'meteoristic': ['meteoristic', 'meteoritics'], + 'meteoritics': ['meteoristic', 'meteoritics'], + 'meteoroid': ['meteoroid', 'odiometer'], + 'meter': ['meter', 'retem'], + 'meterless': ['meterless', 'metreless'], + 'metership': ['herpetism', 'metership', 'metreship', 'temperish'], + 'methanal': ['latheman', 'methanal'], + 'methanate': ['hetmanate', 'methanate'], + 'methanoic': ['hematonic', 'methanoic'], + 'mether': ['mether', 'themer'], + 'method': ['method', 'mothed'], + 'methylacetanilide': ['acetmethylanilide', 'methylacetanilide'], + 'methylic': ['methylic', 'thymelic'], + 'methylotic': ['lithectomy', 'methylotic'], + 'metier': ['metier', 'retime', 'tremie'], + 'metin': ['metin', 'temin', 'timne'], + 'metis': ['metis', 'smite', 'stime', 'times'], + 'metoac': ['comate', 'metoac', 'tecoma'], + 'metol': ['metol', 'motel'], + 'metonymical': ['laminectomy', 'metonymical'], + 'metope': ['metope', 'poemet'], + 'metopias': ['epistoma', 'metopias'], + 'metosteon': ['metosteon', 'tomentose'], + 'metra': ['armet', + 'mater', + 'merat', + 'metra', + 'ramet', + 'tamer', + 'terma', + 'trame', + 'trema'], + 'metrectasia': ['metrectasia', 'remasticate'], + 'metreless': ['meterless', 'metreless'], + 'metreship': ['herpetism', 'metership', 'metreship', 'temperish'], + 'metria': ['imaret', 'metria', 'mirate', 'rimate'], + 'metrician': ['antimeric', 'carminite', 'criminate', 'metrician'], + 'metrics': ['cretism', 'metrics'], + 'metrocratic': ['cratometric', 'metrocratic'], + 'metrological': ['logometrical', 'metrological'], + 'metronome': ['metronome', 'monometer', 'monotreme'], + 'metronomic': ['commorient', 'metronomic', 'monometric'], + 'metronomical': ['metronomical', 'monometrical'], + 'metropathic': ['metatrophic', 'metropathic'], + 'metrophlebitis': ['metrophlebitis', 'phlebometritis'], + 'metropole': ['melotrope', 'metropole'], + 'metroptosia': ['metroptosia', 'prostomiate'], + 'metrorrhea': ['arthromere', 'metrorrhea'], + 'metrostyle': ['metrostyle', 'stylometer'], + 'mettar': ['matter', 'mettar'], + 'metusia': ['metusia', 'suimate', 'timaeus'], + 'mew': ['mew', 'wem'], + 'meward': ['meward', 'warmed'], + 'mho': ['mho', 'ohm'], + 'mhometer': ['mhometer', 'ohmmeter'], + 'miamia': ['amimia', 'miamia'], + 'mian': ['amin', 'main', 'mani', 'mian', 'mina', 'naim'], + 'miaotse': ['miaotse', 'ostemia'], + 'miaotze': ['atomize', 'miaotze'], + 'mias': ['mias', 'saim', 'siam', 'sima'], + 'miasmal': ['lamaism', 'miasmal'], + 'miastor': ['amorist', 'aortism', 'miastor'], + 'miaul': ['aumil', 'miaul'], + 'miauler': ['lemuria', 'miauler'], + 'mib': ['bim', 'mib'], + 'mica': ['amic', 'mica'], + 'micah': ['chiam', 'machi', 'micah'], + 'micate': ['acmite', 'micate'], + 'mication': ['amniotic', 'mication'], + 'micellar': ['micellar', 'millrace'], + 'michael': ['michael', 'micheal'], + 'miche': ['chime', 'hemic', 'miche'], + 'micheal': ['michael', 'micheal'], + 'micher': ['chimer', 'mechir', 'micher'], + 'micht': ['micht', 'mitch'], + 'micranthropos': ['micranthropos', 'promonarchist'], + 'micro': ['micro', 'moric', 'romic'], + 'microcephal': ['microcephal', 'prochemical'], + 'microcephaly': ['microcephaly', 'pyrochemical'], + 'microcinema': ['microcinema', 'microcnemia'], + 'microcnemia': ['microcinema', 'microcnemia'], + 'microcrith': ['microcrith', 'trichromic'], + 'micropetalous': ['micropetalous', 'somatopleuric'], + 'microphagy': ['microphagy', 'myographic'], + 'microphone': ['microphone', 'neomorphic'], + 'microphot': ['microphot', 'morphotic'], + 'microphotograph': ['microphotograph', 'photomicrograph'], + 'microphotographic': ['microphotographic', 'photomicrographic'], + 'microphotography': ['microphotography', 'photomicrography'], + 'microphotoscope': ['microphotoscope', 'photomicroscope'], + 'micropterous': ['micropterous', 'prosectorium'], + 'micropyle': ['micropyle', 'polymeric'], + 'microradiometer': ['microradiometer', 'radiomicrometer'], + 'microseme': ['mesomeric', 'microseme', 'semicrome'], + 'microspectroscope': ['microspectroscope', 'spectromicroscope'], + 'microstat': ['microstat', 'stromatic'], + 'microsthene': ['merosthenic', 'microsthene'], + 'microstome': ['microstome', 'osmometric'], + 'microtia': ['amoritic', 'microtia'], + 'microtinae': ['marcionite', 'microtinae', 'remication'], + 'mid': ['dim', 'mid'], + 'midden': ['midden', 'minded'], + 'middler': ['middler', 'mildred'], + 'middy': ['didym', 'middy'], + 'mide': ['demi', 'diem', 'dime', 'mide'], + 'mider': ['dimer', 'mider'], + 'mididae': ['amidide', 'diamide', 'mididae'], + 'midrange': ['dirgeman', 'margined', 'midrange'], + 'midstory': ['midstory', 'modistry'], + 'miek': ['miek', 'mike'], + 'mien': ['mein', 'mien', 'mine'], + 'mig': ['gim', 'mig'], + 'migraine': ['imaginer', 'migraine'], + 'migrate': ['migrate', 'ragtime'], + 'migratory': ['gyromitra', 'migratory'], + 'mihrab': ['brahmi', 'mihrab'], + 'mikael': ['kelima', 'mikael'], + 'mike': ['miek', 'mike'], + 'mil': ['lim', 'mil'], + 'mila': ['amil', 'amli', 'lima', 'mail', 'mali', 'mila'], + 'milan': ['lamin', 'liman', 'milan'], + 'milden': ['milden', 'mindel'], + 'mildness': ['mildness', 'mindless'], + 'mildred': ['middler', 'mildred'], + 'mile': ['emil', 'lime', 'mile'], + 'milepost': ['milepost', 'polemist'], + 'miler': ['limer', 'meril', 'miler'], + 'miles': ['limes', 'miles', 'slime', 'smile'], + 'milesian': ['alienism', 'milesian'], + 'milestone': ['limestone', 'melonites', 'milestone'], + 'milicent': ['limnetic', 'milicent'], + 'military': ['limitary', 'military'], + 'militate': ['limitate', 'militate'], + 'militation': ['limitation', 'militation'], + 'milken': ['kimnel', 'milken'], + 'millennia': ['melanilin', 'millennia'], + 'miller': ['miller', 'remill'], + 'millet': ['mellit', 'millet'], + 'milliare': ['milliare', 'ramillie'], + 'millrace': ['micellar', 'millrace'], + 'milner': ['limner', 'merlin', 'milner'], + 'milo': ['milo', 'moil'], + 'milsie': ['milsie', 'simile'], + 'miltonia': ['limation', 'miltonia'], + 'mima': ['ammi', 'imam', 'maim', 'mima'], + 'mime': ['emim', 'mime'], + 'mimester': ['meristem', 'mimester'], + 'mimi': ['immi', 'mimi'], + 'mimidae': ['amimide', 'mimidae'], + 'mimosa': ['amomis', 'mimosa'], + 'min': ['min', 'nim'], + 'mina': ['amin', 'main', 'mani', 'mian', 'mina', 'naim'], + 'minacity': ['imitancy', 'intimacy', 'minacity'], + 'minar': ['inarm', 'minar'], + 'minaret': ['minaret', 'raiment', 'tireman'], + 'minareted': ['demetrian', 'dermatine', 'meandrite', 'minareted'], + 'minargent': ['germinant', 'minargent'], + 'minatory': ['minatory', 'romanity'], + 'mince': ['menic', 'mince'], + 'minchiate': ['hematinic', 'minchiate'], + 'mincopie': ['mincopie', 'poimenic'], + 'minded': ['midden', 'minded'], + 'mindel': ['milden', 'mindel'], + 'mindelian': ['eliminand', 'mindelian'], + 'minder': ['minder', 'remind'], + 'mindless': ['mildness', 'mindless'], + 'mine': ['mein', 'mien', 'mine'], + 'miner': ['inerm', 'miner'], + 'mineral': ['marline', 'mineral', 'ramline'], + 'minerva': ['minerva', 'vermian'], + 'minerval': ['minerval', 'verminal'], + 'mingler': ['gremlin', 'mingler'], + 'miniator': ['miniator', 'triamino'], + 'minish': ['minish', 'nimshi'], + 'minister': ['minister', 'misinter'], + 'ministry': ['ministry', 'myristin'], + 'minkish': ['minkish', 'nimkish'], + 'minnetaree': ['minnetaree', 'nemertinea'], + 'minoan': ['amnion', 'minoan', 'nomina'], + 'minometer': ['minometer', 'omnimeter'], + 'minor': ['minor', 'morin'], + 'minorate': ['maronite', 'martinoe', 'minorate', 'morenita', 'romanite'], + 'minorca': ['amicron', 'marconi', 'minorca', 'romanic'], + 'minos': ['minos', 'osmin', 'simon'], + 'minot': ['minot', 'timon', 'tomin'], + 'mintage': ['mintage', 'teaming', 'tegmina'], + 'minter': ['minter', 'remint', 'termin'], + 'minuend': ['minuend', 'unmined'], + 'minuet': ['minuet', 'minute'], + 'minute': ['minuet', 'minute'], + 'minutely': ['minutely', 'untimely'], + 'minuter': ['minuter', 'unmiter'], + 'minyas': ['maysin', 'minyas', 'mysian'], + 'mir': ['mir', 'rim'], + 'mira': ['amir', 'irma', 'mari', 'mira', 'rami', 'rima'], + 'mirabel': ['embrail', 'mirabel'], + 'mirac': ['marci', 'mirac'], + 'miracle': ['claimer', 'miracle', 'reclaim'], + 'mirage': ['imager', 'maigre', 'margie', 'mirage'], + 'mirana': ['airman', 'amarin', 'marian', 'marina', 'mirana'], + 'miranha': ['ahriman', 'miranha'], + 'mirate': ['imaret', 'metria', 'mirate', 'rimate'], + 'mirbane': ['ambrein', 'mirbane'], + 'mire': ['emir', 'imer', 'mire', 'reim', 'remi', 'riem', 'rime'], + 'mirfak': ['marfik', 'mirfak'], + 'mirounga': ['mirounga', 'moringua', 'origanum'], + 'miry': ['miry', 'rimy', 'yirm'], + 'mirza': ['mirza', 'mizar'], + 'misact': ['mastic', 'misact'], + 'misadvise': ['admissive', 'misadvise'], + 'misagent': ['misagent', 'steaming'], + 'misaim': ['misaim', 'misima'], + 'misandry': ['misandry', 'myrsinad'], + 'misassociation': ['associationism', 'misassociation'], + 'misatone': ['masonite', 'misatone'], + 'misattend': ['misattend', 'tandemist'], + 'misaunter': ['antiserum', 'misaunter'], + 'misbehavior': ['behaviorism', 'misbehavior'], + 'mischance': ['mechanics', 'mischance'], + 'misclass': ['classism', 'misclass'], + 'miscoin': ['iconism', 'imsonic', 'miscoin'], + 'misconfiguration': ['configurationism', 'misconfiguration'], + 'misconstitutional': ['constitutionalism', 'misconstitutional'], + 'misconstruction': ['constructionism', 'misconstruction'], + 'miscreant': ['encratism', 'miscreant'], + 'miscreation': ['anisometric', + 'creationism', + 'miscreation', + 'ramisection', + 'reactionism'], + 'miscue': ['cesium', 'miscue'], + 'misdate': ['diastem', 'misdate'], + 'misdaub': ['misdaub', 'submaid'], + 'misdeal': ['misdeal', 'mislead'], + 'misdealer': ['misdealer', 'misleader', 'misleared'], + 'misdeclare': ['creedalism', 'misdeclare'], + 'misdiet': ['misdiet', 'misedit', 'mistide'], + 'misdivision': ['divisionism', 'misdivision'], + 'misdread': ['disarmed', 'misdread'], + 'mise': ['mise', 'semi', 'sime'], + 'misease': ['misease', 'siamese'], + 'misecclesiastic': ['ecclesiasticism', 'misecclesiastic'], + 'misedit': ['misdiet', 'misedit', 'mistide'], + 'misexpression': ['expressionism', 'misexpression'], + 'mishmash': ['mishmash', 'shammish'], + 'misima': ['misaim', 'misima'], + 'misimpression': ['impressionism', 'misimpression'], + 'misinter': ['minister', 'misinter'], + 'mislabel': ['mislabel', 'semiball'], + 'mislabor': ['laborism', 'mislabor'], + 'mislead': ['misdeal', 'mislead'], + 'misleader': ['misdealer', 'misleader', 'misleared'], + 'mislear': ['mislear', 'realism'], + 'misleared': ['misdealer', 'misleader', 'misleared'], + 'misname': ['amenism', 'immanes', 'misname'], + 'misniac': ['cainism', 'misniac'], + 'misnomed': ['demonism', 'medimnos', 'misnomed'], + 'misoneism': ['misoneism', 'simeonism'], + 'mispage': ['impages', 'mispage'], + 'misperception': ['misperception', 'perceptionism'], + 'misperform': ['misperform', 'preformism'], + 'misphrase': ['misphrase', 'seraphism'], + 'misplay': ['impalsy', 'misplay'], + 'misplead': ['misplead', 'pedalism'], + 'misprisal': ['misprisal', 'spiralism'], + 'misproud': ['disporum', 'misproud'], + 'misprovide': ['disimprove', 'misprovide'], + 'misput': ['misput', 'sumpit'], + 'misquotation': ['antimosquito', 'misquotation'], + 'misrate': ['artemis', 'maestri', 'misrate'], + 'misread': ['misread', 'sidearm'], + 'misreform': ['misreform', 'reformism'], + 'misrelate': ['misrelate', 'salimeter'], + 'misrelation': ['misrelation', 'orientalism', 'relationism'], + 'misreliance': ['criminalese', 'misreliance'], + 'misreporter': ['misreporter', 'reporterism'], + 'misrepresentation': ['misrepresentation', 'representationism'], + 'misrepresenter': ['misrepresenter', 'remisrepresent'], + 'misrepute': ['misrepute', 'septerium'], + 'misrhyme': ['misrhyme', 'shimmery'], + 'misrule': ['misrule', 'simuler'], + 'missal': ['missal', 'salmis'], + 'missayer': ['emissary', 'missayer'], + 'misset': ['misset', 'tmesis'], + 'misshape': ['emphasis', 'misshape'], + 'missioner': ['missioner', 'remission'], + 'misspell': ['misspell', 'psellism'], + 'missuggestion': ['missuggestion', 'suggestionism'], + 'missy': ['missy', 'mysis'], + 'mist': ['mist', 'smit', 'stim'], + 'misteach': ['mastiche', 'misteach'], + 'mister': ['merist', 'mister', 'smiter'], + 'mistide': ['misdiet', 'misedit', 'mistide'], + 'mistle': ['mistle', 'smilet'], + 'mistone': ['mistone', 'moisten'], + 'mistradition': ['mistradition', 'traditionism'], + 'mistrain': ['marinist', 'mistrain'], + 'mistreat': ['mistreat', 'teratism'], + 'mistrial': ['mistrial', 'trialism'], + 'mistutor': ['mistutor', 'tutorism'], + 'misty': ['misty', 'stimy'], + 'misunderstander': ['misunderstander', 'remisunderstand'], + 'misura': ['misura', 'ramusi'], + 'misuser': ['misuser', 'surmise'], + 'mitannish': ['mitannish', 'sminthian'], + 'mitch': ['micht', 'mitch'], + 'mite': ['emit', 'item', 'mite', 'time'], + 'mitella': ['mitella', 'tellima'], + 'miteproof': ['miteproof', 'timeproof'], + 'miter': ['merit', 'miter', 'mitre', 'remit', 'timer'], + 'mitered': ['demerit', 'dimeter', 'merited', 'mitered'], + 'miterer': ['meriter', 'miterer', 'trireme'], + 'mithraic': ['arithmic', 'mithraic', 'mithriac'], + 'mithraicist': ['mithraicist', 'mithraistic'], + 'mithraistic': ['mithraicist', 'mithraistic'], + 'mithriac': ['arithmic', 'mithraic', 'mithriac'], + 'mitra': ['mitra', 'tarmi', 'timar', 'tirma'], + 'mitral': ['mitral', 'ramtil'], + 'mitrate': ['martite', 'mitrate'], + 'mitre': ['merit', 'miter', 'mitre', 'remit', 'timer'], + 'mitrer': ['mitrer', 'retrim', 'trimer'], + 'mitridae': ['dimetria', 'mitridae', 'tiremaid', 'triamide'], + 'mixer': ['mixer', 'remix'], + 'mizar': ['mirza', 'mizar'], + 'mnesic': ['cnemis', 'mnesic'], + 'mniaceous': ['acuminose', 'mniaceous'], + 'mniotiltidae': ['delimitation', 'mniotiltidae'], + 'mnium': ['mnium', 'nummi'], + 'mo': ['mo', 'om'], + 'moabitic': ['biatomic', 'moabitic'], + 'moan': ['mano', 'moan', 'mona', 'noam', 'noma', 'oman'], + 'moarian': ['amanori', 'moarian'], + 'moat': ['atmo', 'atom', 'moat', 'toma'], + 'mob': ['bom', 'mob'], + 'mobbable': ['bombable', 'mobbable'], + 'mobber': ['bomber', 'mobber'], + 'mobbish': ['hobbism', 'mobbish'], + 'mobed': ['demob', 'mobed'], + 'mobile': ['bemoil', 'mobile'], + 'mobilian': ['binomial', 'mobilian'], + 'mobocrat': ['mobocrat', 'motorcab'], + 'mobship': ['mobship', 'phobism'], + 'mobster': ['bestorm', 'mobster'], + 'mocker': ['mocker', 'remock'], + 'mocmain': ['ammonic', 'mocmain'], + 'mod': ['dom', 'mod'], + 'modal': ['domal', 'modal'], + 'mode': ['dome', 'mode', 'moed'], + 'modeler': ['demerol', 'modeler', 'remodel'], + 'modelist': ['melodist', 'modelist'], + 'modena': ['daemon', 'damone', 'modena'], + 'modenese': ['modenese', 'needsome'], + 'moderant': ['moderant', 'normated'], + 'moderantism': ['memorandist', 'moderantism', 'semidormant'], + 'modern': ['modern', 'morned'], + 'modernistic': ['modernistic', 'monstricide'], + 'modestly': ['modestly', 'styledom'], + 'modesty': ['dystome', 'modesty'], + 'modiste': ['distome', 'modiste'], + 'modistry': ['midstory', 'modistry'], + 'modius': ['modius', 'sodium'], + 'moe': ['meo', 'moe'], + 'moed': ['dome', 'mode', 'moed'], + 'moerithere': ['heteromeri', 'moerithere'], + 'mogdad': ['goddam', 'mogdad'], + 'moha': ['ahom', 'moha'], + 'mohair': ['homrai', 'mahori', 'mohair'], + 'mohel': ['hemol', 'mohel'], + 'mohican': ['mohican', 'monachi'], + 'moho': ['homo', 'moho'], + 'mohur': ['humor', 'mohur'], + 'moider': ['dormie', 'moider'], + 'moieter': ['moieter', 'romeite'], + 'moiety': ['moiety', 'moyite'], + 'moil': ['milo', 'moil'], + 'moiles': ['lemosi', 'limose', 'moiles'], + 'moineau': ['eunomia', 'moineau'], + 'moira': ['maori', 'mario', 'moira'], + 'moisten': ['mistone', 'moisten'], + 'moistener': ['moistener', 'neoterism'], + 'moisture': ['moisture', 'semitour'], + 'moit': ['itmo', 'moit', 'omit', 'timo'], + 'mojo': ['joom', 'mojo'], + 'moke': ['kome', 'moke'], + 'moki': ['komi', 'moki'], + 'mola': ['loam', 'loma', 'malo', 'mola', 'olam'], + 'molar': ['molar', 'moral', 'romal'], + 'molarity': ['molarity', 'morality'], + 'molary': ['amyrol', 'molary'], + 'molder': ['dermol', 'molder', 'remold'], + 'moler': ['moler', 'morel'], + 'molge': ['glome', 'golem', 'molge'], + 'molidae': ['melodia', 'molidae'], + 'molinia': ['molinia', 'monilia'], + 'mollusca': ['callosum', 'mollusca'], + 'moloid': ['moloid', 'oildom'], + 'molten': ['loment', 'melton', 'molten'], + 'molybdena': ['baldmoney', 'molybdena'], + 'molybdenic': ['combinedly', 'molybdenic'], + 'mome': ['memo', 'mome'], + 'moment': ['moment', 'montem'], + 'momentary': ['manometry', 'momentary'], + 'momentous': ['mesonotum', 'momentous'], + 'momotinae': ['amniotome', 'momotinae'], + 'mona': ['mano', 'moan', 'mona', 'noam', 'noma', 'oman'], + 'monachi': ['mohican', 'monachi'], + 'monactin': ['monactin', 'montanic'], + 'monad': ['damon', 'monad', 'nomad'], + 'monadic': ['monadic', 'nomadic'], + 'monadical': ['monadical', 'nomadical'], + 'monadically': ['monadically', 'nomadically'], + 'monadina': ['monadina', 'nomadian'], + 'monadism': ['monadism', 'nomadism'], + 'monaene': ['anemone', 'monaene'], + 'monal': ['almon', 'monal'], + 'monamniotic': ['commination', 'monamniotic'], + 'monanthous': ['anthonomus', 'monanthous'], + 'monarch': ['monarch', 'nomarch', 'onmarch'], + 'monarchial': ['harmonical', 'monarchial'], + 'monarchian': ['anharmonic', 'monarchian'], + 'monarchistic': ['chiromancist', 'monarchistic'], + 'monarchy': ['monarchy', 'nomarchy'], + 'monarda': ['anadrom', 'madrona', 'mandora', 'monarda', 'roadman'], + 'monas': ['manso', 'mason', 'monas'], + 'monasa': ['monasa', 'samoan'], + 'monase': ['monase', 'nosema'], + 'monaster': ['monaster', 'monstera', 'nearmost', 'storeman'], + 'monastery': ['monastery', 'oysterman'], + 'monastic': ['catonism', 'monastic'], + 'monastical': ['catmalison', 'monastical'], + 'monatomic': ['commation', 'monatomic'], + 'monaural': ['anomural', 'monaural'], + 'monday': ['dynamo', 'monday'], + 'mone': ['mone', 'nome', 'omen'], + 'monel': ['lemon', 'melon', 'monel'], + 'moner': ['enorm', 'moner', 'morne'], + 'monera': ['enamor', 'monera', 'oreman', 'romane'], + 'moneral': ['almoner', 'moneral', 'nemoral'], + 'monergist': ['gerontism', 'monergist'], + 'moneric': ['incomer', 'moneric'], + 'monesia': ['monesia', 'osamine', 'osmanie'], + 'monetary': ['monetary', 'myronate', 'naometry'], + 'money': ['money', 'moyen'], + 'moneybag': ['bogeyman', 'moneybag'], + 'moneyless': ['moneyless', 'moyenless'], + 'monger': ['germon', 'monger', 'morgen'], + 'mongler': ['mongler', 'mongrel'], + 'mongoose': ['gonosome', 'mongoose'], + 'mongrel': ['mongler', 'mongrel'], + 'mongrelity': ['longimetry', 'mongrelity'], + 'monial': ['monial', 'nomial', 'oilman'], + 'monias': ['monias', 'osamin', 'osmina'], + 'monica': ['camion', 'conima', 'manioc', 'monica'], + 'monilated': ['antimodel', 'maldonite', 'monilated'], + 'monilia': ['molinia', 'monilia'], + 'monism': ['monism', 'nomism', 'simmon'], + 'monist': ['inmost', 'monist', 'omnist'], + 'monistic': ['monistic', 'nicotism', 'nomistic'], + 'monitory': ['monitory', 'moronity'], + 'monitress': ['monitress', 'sermonist'], + 'mono': ['mono', 'moon'], + 'monoacid': ['damonico', 'monoacid'], + 'monoazo': ['monoazo', 'monozoa'], + 'monocleid': ['clinodome', 'melodicon', 'monocleid'], + 'monoclinous': ['monoclinous', 'monoclonius'], + 'monoclonius': ['monoclinous', 'monoclonius'], + 'monocracy': ['monocracy', 'nomocracy'], + 'monocystidae': ['monocystidae', 'monocystidea'], + 'monocystidea': ['monocystidae', 'monocystidea'], + 'monodactylous': ['condylomatous', 'monodactylous'], + 'monodactyly': ['dactylonomy', 'monodactyly'], + 'monodelphia': ['amidophenol', 'monodelphia'], + 'monodonta': ['anomodont', 'monodonta'], + 'monodram': ['monodram', 'romandom'], + 'monoecian': ['monoecian', 'neocomian'], + 'monoecism': ['economism', 'monoecism', 'monosemic'], + 'monoeidic': ['meconioid', 'monoeidic'], + 'monogastric': ['gastronomic', 'monogastric'], + 'monogenist': ['monogenist', 'nomogenist'], + 'monogenous': ['monogenous', 'nomogenous'], + 'monogeny': ['monogeny', 'nomogeny'], + 'monogram': ['monogram', 'nomogram'], + 'monograph': ['monograph', 'nomograph', 'phonogram'], + 'monographer': ['geranomorph', 'monographer', 'nomographer'], + 'monographic': ['gramophonic', 'monographic', 'nomographic', 'phonogramic'], + 'monographical': ['gramophonical', 'monographical', 'nomographical'], + 'monographically': ['gramophonically', + 'monographically', + 'nomographically', + 'phonogramically'], + 'monographist': ['gramophonist', 'monographist'], + 'monography': ['monography', 'nomography'], + 'monoid': ['domino', 'monoid'], + 'monological': ['monological', 'nomological'], + 'monologist': ['monologist', 'nomologist', 'ontologism'], + 'monology': ['monology', 'nomology'], + 'monometer': ['metronome', 'monometer', 'monotreme'], + 'monometric': ['commorient', 'metronomic', 'monometric'], + 'monometrical': ['metronomical', 'monometrical'], + 'monomorphic': ['monomorphic', 'morphonomic'], + 'monont': ['monont', 'monton'], + 'monopathy': ['monopathy', 'pathonomy'], + 'monopersulphuric': ['monopersulphuric', 'permonosulphuric'], + 'monophote': ['monophote', 'motophone'], + 'monophylite': ['entomophily', 'monophylite'], + 'monophyllous': ['monophyllous', 'nomophyllous'], + 'monoplanist': ['monoplanist', 'postnominal'], + 'monopsychism': ['monopsychism', 'psychomonism'], + 'monopteral': ['monopteral', 'protonemal'], + 'monosemic': ['economism', 'monoecism', 'monosemic'], + 'monosodium': ['monosodium', 'omnimodous', 'onosmodium'], + 'monotheism': ['monotheism', 'nomotheism'], + 'monotheist': ['monotheist', 'thomsonite'], + 'monothetic': ['monothetic', 'nomothetic'], + 'monotreme': ['metronome', 'monometer', 'monotreme'], + 'monotypal': ['monotypal', 'toponymal'], + 'monotypic': ['monotypic', 'toponymic'], + 'monotypical': ['monotypical', 'toponymical'], + 'monozoa': ['monoazo', 'monozoa'], + 'monozoic': ['monozoic', 'zoonomic'], + 'monroeism': ['monroeism', 'semimoron'], + 'monsieur': ['inermous', 'monsieur'], + 'monstera': ['monaster', 'monstera', 'nearmost', 'storeman'], + 'monstricide': ['modernistic', 'monstricide'], + 'montage': ['geomant', 'magneto', 'megaton', 'montage'], + 'montagnais': ['antagonism', 'montagnais'], + 'montanic': ['monactin', 'montanic'], + 'montanite': ['mentation', 'montanite'], + 'montem': ['moment', 'montem'], + 'montes': ['montes', 'ostmen'], + 'montia': ['atimon', 'manito', 'montia'], + 'monticule': ['ctenolium', 'monticule'], + 'monton': ['monont', 'monton'], + 'montu': ['montu', 'mount', 'notum'], + 'monture': ['monture', 'mounter', 'remount'], + 'monumentary': ['monumentary', 'unmomentary'], + 'mood': ['doom', 'mood'], + 'mooder': ['doomer', 'mooder', 'redoom', 'roomed'], + 'mool': ['loom', 'mool'], + 'mools': ['mools', 'sloom'], + 'moon': ['mono', 'moon'], + 'moonglade': ['megalodon', 'moonglade'], + 'moonite': ['emotion', 'moonite'], + 'moonja': ['majoon', 'moonja'], + 'moonscape': ['manoscope', 'moonscape'], + 'moonseed': ['endosome', 'moonseed'], + 'moontide': ['demotion', 'entomoid', 'moontide'], + 'moop': ['moop', 'pomo'], + 'moor': ['moor', 'moro', 'room'], + 'moorage': ['moorage', 'roomage'], + 'moorball': ['ballroom', 'moorball'], + 'moore': ['moore', 'romeo'], + 'moorn': ['moorn', 'moron'], + 'moorship': ['isomorph', 'moorship'], + 'moorup': ['moorup', 'uproom'], + 'moorwort': ['moorwort', 'rootworm', 'tomorrow', 'wormroot'], + 'moory': ['moory', 'roomy'], + 'moost': ['moost', 'smoot'], + 'moot': ['moot', 'toom'], + 'mooth': ['mooth', 'thoom'], + 'mootstead': ['mootstead', 'stomatode'], + 'mop': ['mop', 'pom'], + 'mopane': ['mopane', 'pomane'], + 'mope': ['mope', 'poem', 'pome'], + 'moper': ['merop', 'moper', 'proem', 'remop'], + 'mophead': ['hemapod', 'mophead'], + 'mopish': ['mopish', 'ophism'], + 'mopla': ['mopla', 'palmo'], + 'mopsy': ['mopsy', 'myops'], + 'mora': ['amor', 'maro', 'mora', 'omar', 'roam'], + 'morainal': ['manorial', 'morainal'], + 'moraine': ['moraine', 'romaine'], + 'moral': ['molar', 'moral', 'romal'], + 'morality': ['molarity', 'morality'], + 'morals': ['morals', 'morsal'], + 'moran': ['manor', 'moran', 'norma', 'ramon', 'roman'], + 'morat': ['amort', 'morat', 'torma'], + 'morate': ['amoret', 'morate'], + 'moray': ['mayor', 'moray'], + 'morbid': ['dibrom', 'morbid'], + 'mordancy': ['dormancy', 'mordancy'], + 'mordant': ['dormant', 'mordant'], + 'mordenite': ['interdome', 'mordenite', 'nemertoid'], + 'mordicate': ['decimator', 'medicator', 'mordicate'], + 'more': ['mero', 'more', 'omer', 'rome'], + 'moreish': ['heroism', 'moreish'], + 'morel': ['moler', 'morel'], + 'morencite': ['entomeric', 'intercome', 'morencite'], + 'morenita': ['maronite', 'martinoe', 'minorate', 'morenita', 'romanite'], + 'moreote': ['moreote', 'oometer'], + 'mores': ['meros', 'mores', 'morse', 'sermo', 'smore'], + 'morga': ['agrom', 'morga'], + 'morganatic': ['actinogram', 'morganatic'], + 'morgay': ['gyroma', 'morgay'], + 'morgen': ['germon', 'monger', 'morgen'], + 'moribund': ['moribund', 'unmorbid'], + 'moric': ['micro', 'moric', 'romic'], + 'moriche': ['homeric', 'moriche'], + 'morin': ['minor', 'morin'], + 'moringa': ['ingomar', 'moringa', 'roaming'], + 'moringua': ['mirounga', 'moringua', 'origanum'], + 'morn': ['morn', 'norm'], + 'morne': ['enorm', 'moner', 'morne'], + 'morned': ['modern', 'morned'], + 'mornless': ['mornless', 'normless'], + 'moro': ['moor', 'moro', 'room'], + 'morocota': ['coatroom', 'morocota'], + 'moron': ['moorn', 'moron'], + 'moronic': ['moronic', 'omicron'], + 'moronity': ['monitory', 'moronity'], + 'morphea': ['amphore', 'morphea'], + 'morphonomic': ['monomorphic', 'morphonomic'], + 'morphotic': ['microphot', 'morphotic'], + 'morphotropic': ['morphotropic', 'protomorphic'], + 'morrisean': ['morrisean', 'rosmarine'], + 'morsal': ['morals', 'morsal'], + 'morse': ['meros', 'mores', 'morse', 'sermo', 'smore'], + 'mortacious': ['mortacious', 'urosomatic'], + 'mortar': ['marrot', 'mortar'], + 'mortician': ['martinico', 'mortician'], + 'mortise': ['erotism', 'mortise', 'trisome'], + 'morton': ['morton', 'tomorn'], + 'mortuarian': ['mortuarian', 'muratorian'], + 'mortuary': ['mortuary', 'outmarry'], + 'mortuous': ['mortuous', 'tumorous'], + 'morus': ['morus', 'mosur'], + 'mosaic': ['aosmic', 'mosaic'], + 'mosandrite': ['mosandrite', 'tarsonemid'], + 'mosasauri': ['amaurosis', 'mosasauri'], + 'moschate': ['chatsome', 'moschate'], + 'mose': ['meso', 'mose', 'some'], + 'mosker': ['mosker', 'smoker'], + 'mosser': ['messor', 'mosser', 'somers'], + 'moste': ['moste', 'smote'], + 'mosting': ['gnomist', 'mosting'], + 'mosul': ['mosul', 'mouls', 'solum'], + 'mosur': ['morus', 'mosur'], + 'mot': ['mot', 'tom'], + 'mote': ['mote', 'tome'], + 'motel': ['metol', 'motel'], + 'motet': ['motet', 'motte', 'totem'], + 'mothed': ['method', 'mothed'], + 'mother': ['mother', 'thermo'], + 'motherland': ['enthraldom', 'motherland'], + 'motherward': ['motherward', 'threadworm'], + 'motograph': ['motograph', 'photogram'], + 'motographic': ['motographic', 'tomographic'], + 'motophone': ['monophote', 'motophone'], + 'motorcab': ['mobocrat', 'motorcab'], + 'motte': ['motet', 'motte', 'totem'], + 'moud': ['doum', 'moud', 'odum'], + 'moudy': ['moudy', 'yomud'], + 'moul': ['moul', 'ulmo'], + 'mouls': ['mosul', 'mouls', 'solum'], + 'mound': ['donum', 'mound'], + 'mount': ['montu', 'mount', 'notum'], + 'mountained': ['emundation', 'mountained'], + 'mountaineer': ['enumeration', 'mountaineer'], + 'mounted': ['demount', 'mounted'], + 'mounter': ['monture', 'mounter', 'remount'], + 'mousery': ['mousery', 'seymour'], + 'mousoni': ['mousoni', 'ominous'], + 'mousse': ['mousse', 'smouse'], + 'moutan': ['amount', 'moutan', 'outman'], + 'mouther': ['mouther', 'theorum'], + 'mover': ['mover', 'vomer'], + 'moy': ['moy', 'yom'], + 'moyen': ['money', 'moyen'], + 'moyenless': ['moneyless', 'moyenless'], + 'moyite': ['moiety', 'moyite'], + 'mru': ['mru', 'rum'], + 'mu': ['mu', 'um'], + 'muang': ['muang', 'munga'], + 'much': ['chum', 'much'], + 'mucic': ['cumic', 'mucic'], + 'mucilage': ['glucemia', 'mucilage'], + 'mucin': ['cumin', 'mucin'], + 'mucinoid': ['conidium', 'mucinoid', 'oncidium'], + 'mucofibrous': ['fibromucous', 'mucofibrous'], + 'mucoid': ['codium', 'mucoid'], + 'muconic': ['muconic', 'uncomic'], + 'mucor': ['mucor', 'mucro'], + 'mucoserous': ['mucoserous', 'seromucous'], + 'mucro': ['mucor', 'mucro'], + 'mucrones': ['consumer', 'mucrones'], + 'mud': ['dum', 'mud'], + 'mudar': ['mudar', 'mudra'], + 'mudden': ['edmund', 'mudden'], + 'mudir': ['mudir', 'murid'], + 'mudra': ['mudar', 'mudra'], + 'mudstone': ['mudstone', 'unmodest'], + 'mug': ['gum', 'mug'], + 'muga': ['gaum', 'muga'], + 'muggles': ['muggles', 'smuggle'], + 'mugweed': ['gumweed', 'mugweed'], + 'muid': ['duim', 'muid'], + 'muilla': ['allium', 'alulim', 'muilla'], + 'muir': ['muir', 'rimu'], + 'muishond': ['muishond', 'unmodish'], + 'muist': ['muist', 'tuism'], + 'mukri': ['kurmi', 'mukri'], + 'muleta': ['amulet', 'muleta'], + 'mulga': ['algum', 'almug', 'glaum', 'gluma', 'mulga'], + 'mulier': ['mulier', 'muriel'], + 'mulita': ['mulita', 'ultima'], + 'mulk': ['kulm', 'mulk'], + 'multani': ['multani', 'talinum'], + 'multinervose': ['multinervose', 'volunteerism'], + 'multipole': ['impollute', 'multipole'], + 'mumbler': ['bummler', 'mumbler'], + 'munda': ['maund', 'munda', 'numda', 'undam', 'unmad'], + 'mundane': ['mundane', 'unamend', 'unmaned', 'unnamed'], + 'munga': ['muang', 'munga'], + 'mungo': ['mungo', 'muong'], + 'munia': ['maniu', 'munia', 'unami'], + 'munity': ['munity', 'mutiny'], + 'muong': ['mungo', 'muong'], + 'mura': ['arum', 'maru', 'mura'], + 'murage': ['mauger', 'murage'], + 'mural': ['mural', 'rumal'], + 'muralist': ['altruism', 'muralist', 'traulism', 'ultraism'], + 'muran': ['muran', 'ruman', 'unarm', 'unram', 'urman'], + 'murat': ['martu', 'murat', 'turma'], + 'muratorian': ['mortuarian', 'muratorian'], + 'murderer': ['demurrer', 'murderer'], + 'murdering': ['demurring', 'murdering'], + 'murderingly': ['demurringly', 'murderingly'], + 'murex': ['murex', 'rumex'], + 'murga': ['garum', 'murga'], + 'muricate': ['ceratium', 'muricate'], + 'muricine': ['irenicum', 'muricine'], + 'murid': ['mudir', 'murid'], + 'muriel': ['mulier', 'muriel'], + 'murine': ['murine', 'nerium'], + 'murly': ['murly', 'rumly'], + 'murmurer': ['murmurer', 'remurmur'], + 'murrain': ['murrain', 'murrina'], + 'murrina': ['murrain', 'murrina'], + 'murut': ['murut', 'utrum'], + 'murza': ['mazur', 'murza'], + 'mus': ['mus', 'sum'], + 'musa': ['masu', 'musa', 'saum'], + 'musal': ['lamus', 'malus', 'musal', 'slaum'], + 'musalmani': ['manualism', 'musalmani'], + 'musang': ['magnus', 'musang'], + 'musar': ['musar', 'ramus', 'rusma', 'surma'], + 'musca': ['camus', 'musca', 'scaum', 'sumac'], + 'muscade': ['camused', 'muscade'], + 'muscarine': ['muscarine', 'sucramine'], + 'musci': ['musci', 'music'], + 'muscicole': ['leucocism', 'muscicole'], + 'muscinae': ['muscinae', 'semuncia'], + 'muscle': ['clumse', 'muscle'], + 'muscly': ['clumsy', 'muscly'], + 'muscone': ['consume', 'muscone'], + 'muscot': ['custom', 'muscot'], + 'mused': ['mused', 'sedum'], + 'museography': ['hypergamous', 'museography'], + 'muser': ['muser', 'remus', 'serum'], + 'musha': ['hamus', 'musha'], + 'music': ['musci', 'music'], + 'musicate': ['autecism', 'musicate'], + 'musico': ['musico', 'suomic'], + 'musie': ['iseum', 'musie'], + 'musing': ['musing', 'signum'], + 'muslined': ['muslined', 'unmisled', 'unsmiled'], + 'musophagine': ['amphigenous', 'musophagine'], + 'mussaenda': ['mussaenda', 'unamassed'], + 'must': ['must', 'smut', 'stum'], + 'mustang': ['mustang', 'stagnum'], + 'mustard': ['durmast', 'mustard'], + 'muster': ['muster', 'sertum', 'stumer'], + 'musterer': ['musterer', 'remuster'], + 'mustily': ['mustily', 'mytilus'], + 'muta': ['muta', 'taum'], + 'mutable': ['atumble', 'mutable'], + 'mutant': ['mutant', 'tantum', 'tutman'], + 'mutase': ['meatus', 'mutase'], + 'mute': ['mute', 'tume'], + 'muteness': ['muteness', 'tenesmus'], + 'mutescence': ['mutescence', 'tumescence'], + 'mutilate': ['mutilate', 'ultimate'], + 'mutilation': ['mutilation', 'ultimation'], + 'mutiny': ['munity', 'mutiny'], + 'mutism': ['mutism', 'summit'], + 'mutual': ['mutual', 'umlaut'], + 'mutulary': ['mutulary', 'tumulary'], + 'muysca': ['cyamus', 'muysca'], + 'mwa': ['maw', 'mwa'], + 'my': ['my', 'ym'], + 'mya': ['amy', 'may', 'mya', 'yam'], + 'myal': ['amyl', 'lyam', 'myal'], + 'myaria': ['amiray', 'myaria'], + 'myatonic': ['cymation', 'myatonic', 'onymatic'], + 'mycelia': ['amyelic', 'mycelia'], + 'mycelian': ['clymenia', 'mycelian'], + 'mycoid': ['cymoid', 'mycoid'], + 'mycophagist': ['mycophagist', 'phagocytism'], + 'mycose': ['cymose', 'mycose'], + 'mycosterol': ['mycosterol', 'sclerotomy'], + 'mycotrophic': ['chromotypic', 'cormophytic', 'mycotrophic'], + 'mycterism': ['mycterism', 'symmetric'], + 'myctodera': ['myctodera', 'radectomy'], + 'mydaleine': ['amylidene', 'mydaleine'], + 'myeloencephalitis': ['encephalomyelitis', 'myeloencephalitis'], + 'myelomeningitis': ['meningomyelitis', 'myelomeningitis'], + 'myelomeningocele': ['meningomyelocele', 'myelomeningocele'], + 'myelon': ['lemony', 'myelon'], + 'myelonal': ['amylenol', 'myelonal'], + 'myeloneuritis': ['myeloneuritis', 'neuromyelitis'], + 'myeloplast': ['meloplasty', 'myeloplast'], + 'mygale': ['gamely', 'gleamy', 'mygale'], + 'myitis': ['myitis', 'simity'], + 'myliobatid': ['bimodality', 'myliobatid'], + 'mymar': ['mymar', 'rammy'], + 'myna': ['many', 'myna'], + 'myoatrophy': ['amyotrophy', 'myoatrophy'], + 'myocolpitis': ['myocolpitis', 'polysomitic'], + 'myofibroma': ['fibromyoma', 'myofibroma'], + 'myoglobin': ['boomingly', 'myoglobin'], + 'myographic': ['microphagy', 'myographic'], + 'myographist': ['myographist', 'pythagorism'], + 'myolipoma': ['lipomyoma', 'myolipoma'], + 'myomohysterectomy': ['hysteromyomectomy', 'myomohysterectomy'], + 'myope': ['myope', 'pomey'], + 'myoplastic': ['myoplastic', 'polymastic'], + 'myoplasty': ['myoplasty', 'polymasty'], + 'myopolar': ['myopolar', 'playroom'], + 'myops': ['mopsy', 'myops'], + 'myosin': ['isonym', 'myosin', 'simony'], + 'myosote': ['myosote', 'toysome'], + 'myotenotomy': ['myotenotomy', 'tenomyotomy'], + 'myotic': ['comity', 'myotic'], + 'myra': ['army', 'mary', 'myra', 'yarm'], + 'myrcia': ['myrcia', 'myrica'], + 'myrialiter': ['myrialiter', 'myrialitre'], + 'myrialitre': ['myrialiter', 'myrialitre'], + 'myriameter': ['myriameter', 'myriametre'], + 'myriametre': ['myriameter', 'myriametre'], + 'myrianida': ['dimyarian', 'myrianida'], + 'myrica': ['myrcia', 'myrica'], + 'myristate': ['myristate', 'tasimetry'], + 'myristin': ['ministry', 'myristin'], + 'myristone': ['myristone', 'smyrniote'], + 'myronate': ['monetary', 'myronate', 'naometry'], + 'myrsinad': ['misandry', 'myrsinad'], + 'myrtales': ['masterly', 'myrtales'], + 'myrtle': ['myrtle', 'termly'], + 'mysell': ['mysell', 'smelly'], + 'mysian': ['maysin', 'minyas', 'mysian'], + 'mysis': ['missy', 'mysis'], + 'mysterial': ['mysterial', 'salimetry'], + 'mystes': ['mystes', 'system'], + 'mythographer': ['mythographer', 'thermography'], + 'mythogreen': ['mythogreen', 'thermogeny'], + 'mythologer': ['mythologer', 'thermology'], + 'mythopoeic': ['homeotypic', 'mythopoeic'], + 'mythus': ['mythus', 'thymus'], + 'mytilid': ['mytilid', 'timidly'], + 'mytilus': ['mustily', 'mytilus'], + 'myxochondroma': ['chondromyxoma', 'myxochondroma'], + 'myxochondrosarcoma': ['chondromyxosarcoma', 'myxochondrosarcoma'], + 'myxocystoma': ['cystomyxoma', 'myxocystoma'], + 'myxofibroma': ['fibromyxoma', 'myxofibroma'], + 'myxofibrosarcoma': ['fibromyxosarcoma', 'myxofibrosarcoma'], + 'myxoinoma': ['inomyxoma', 'myxoinoma'], + 'myxolipoma': ['lipomyxoma', 'myxolipoma'], + 'myxotheca': ['chemotaxy', 'myxotheca'], + 'na': ['an', 'na'], + 'naa': ['ana', 'naa'], + 'naam': ['anam', 'mana', 'naam', 'nama'], + 'nab': ['ban', 'nab'], + 'nabak': ['banak', 'nabak'], + 'nabal': ['alban', 'balan', 'banal', 'laban', 'nabal', 'nabla'], + 'nabalism': ['bailsman', 'balanism', 'nabalism'], + 'nabalite': ['albanite', 'balanite', 'nabalite'], + 'nabalus': ['balanus', 'nabalus', 'subanal'], + 'nabk': ['bank', 'knab', 'nabk'], + 'nabla': ['alban', 'balan', 'banal', 'laban', 'nabal', 'nabla'], + 'nable': ['leban', 'nable'], + 'nabobishly': ['babylonish', 'nabobishly'], + 'nabothian': ['bathonian', 'nabothian'], + 'nabs': ['nabs', 'snab'], + 'nabu': ['baun', 'buna', 'nabu', 'nuba'], + 'nacarat': ['cantara', 'nacarat'], + 'nace': ['acne', 'cane', 'nace'], + 'nachitoch': ['chanchito', 'nachitoch'], + 'nacre': ['caner', 'crane', 'crena', 'nacre', 'rance'], + 'nacred': ['cedarn', 'dancer', 'nacred'], + 'nacreous': ['carneous', 'nacreous'], + 'nacrite': ['centiar', 'certain', 'citrean', 'nacrite', 'nectria'], + 'nacrous': ['carnous', 'nacrous', 'narcous'], + 'nadder': ['dander', 'darned', 'nadder'], + 'nadeem': ['amende', 'demean', 'meaned', 'nadeem'], + 'nadir': ['darin', 'dinar', 'drain', 'indra', 'nadir', 'ranid'], + 'nadorite': ['andorite', 'nadorite', 'ordinate', 'rodentia'], + 'nae': ['ean', 'nae', 'nea'], + 'nael': ['alen', 'lane', 'lean', 'lena', 'nael', 'neal'], + 'naether': ['earthen', 'enheart', 'hearten', 'naether', 'teheran', 'traheen'], + 'nag': ['gan', 'nag'], + 'nagara': ['angara', 'aranga', 'nagara'], + 'nagari': ['graian', 'nagari'], + 'nagatelite': ['gelatinate', 'nagatelite'], + 'nagger': ['ganger', 'grange', 'nagger'], + 'nagging': ['ganging', 'nagging'], + 'naggle': ['laggen', 'naggle'], + 'naggly': ['gangly', 'naggly'], + 'nagmaal': ['malanga', 'nagmaal'], + 'nagnag': ['gangan', 'nagnag'], + 'nagnail': ['alangin', 'anginal', 'anglian', 'nagnail'], + 'nagor': ['angor', + 'argon', + 'goran', + 'grano', + 'groan', + 'nagor', + 'orang', + 'organ', + 'rogan', + 'ronga'], + 'nagster': ['angster', 'garnets', 'nagster', 'strange'], + 'nagual': ['angula', 'nagual'], + 'nahani': ['hainan', 'nahani'], + 'nahor': ['nahor', 'norah', 'rohan'], + 'nahum': ['human', 'nahum'], + 'naiad': ['danai', 'diana', 'naiad'], + 'naiant': ['naiant', 'tainan'], + 'naias': ['asian', 'naias', 'sanai'], + 'naid': ['adin', 'andi', 'dain', 'dani', 'dian', 'naid'], + 'naif': ['fain', 'naif'], + 'naifly': ['fainly', 'naifly'], + 'naig': ['gain', 'inga', 'naig', 'ngai'], + 'naik': ['akin', 'kina', 'naik'], + 'nail': ['alin', 'anil', 'lain', 'lina', 'nail'], + 'nailer': ['arline', 'larine', 'linear', 'nailer', 'renail'], + 'naileress': ['earliness', 'naileress'], + 'nailery': ['inlayer', 'nailery'], + 'nailless': ['nailless', 'sensilla'], + 'nailrod': ['nailrod', 'ordinal', 'rinaldo', 'rodinal'], + 'nailshop': ['nailshop', 'siphonal'], + 'naily': ['inlay', 'naily'], + 'naim': ['amin', 'main', 'mani', 'mian', 'mina', 'naim'], + 'nain': ['nain', 'nina'], + 'naio': ['aion', 'naio'], + 'nair': ['arni', 'iran', 'nair', 'rain', 'rani'], + 'nairy': ['nairy', 'rainy'], + 'nais': ['anis', 'nais', 'nasi', 'nias', 'sain', 'sina'], + 'naish': ['naish', 'shina'], + 'naither': ['anither', 'inearth', 'naither'], + 'naive': ['avine', 'naive', 'vinea'], + 'naivete': ['naivete', 'nieveta'], + 'nak': ['kan', 'nak'], + 'naked': ['kande', 'knead', 'naked'], + 'nakedish': ['headskin', 'nakedish', 'sinkhead'], + 'naker': ['anker', 'karen', 'naker'], + 'nakir': ['inkra', 'krina', 'nakir', 'rinka'], + 'nako': ['kona', 'nako'], + 'nalita': ['antlia', 'latian', 'nalita'], + 'nallah': ['hallan', 'nallah'], + 'nam': ['man', 'nam'], + 'nama': ['anam', 'mana', 'naam', 'nama'], + 'namaz': ['namaz', 'zaman'], + 'nambe': ['beman', 'nambe'], + 'namda': ['adman', 'daman', 'namda'], + 'name': ['amen', 'enam', 'mane', 'mean', 'name', 'nema'], + 'nameability': ['amenability', 'nameability'], + 'nameable': ['amenable', 'nameable'], + 'nameless': ['lameness', 'maleness', 'maneless', 'nameless'], + 'nameling': ['mangelin', 'nameling'], + 'namely': ['meanly', 'namely'], + 'namer': ['enarm', 'namer', 'reman'], + 'nammad': ['madman', 'nammad'], + 'nan': ['ann', 'nan'], + 'nana': ['anan', 'anna', 'nana'], + 'nanaimo': ['nanaimo', 'omniana'], + 'nancy': ['canny', 'nancy'], + 'nandi': ['indan', 'nandi'], + 'nane': ['anne', 'nane'], + 'nanes': ['nanes', 'senna'], + 'nanoid': ['adonin', 'nanoid', 'nonaid'], + 'nanosomia': ['nanosomia', 'nosomania'], + 'nanpie': ['nanpie', 'pennia', 'pinnae'], + 'naological': ['colonalgia', 'naological'], + 'naometry': ['monetary', 'myronate', 'naometry'], + 'naomi': ['amino', 'inoma', 'naomi', 'omani', 'omina'], + 'naoto': ['naoto', 'toona'], + 'nap': ['nap', 'pan'], + 'napaean': ['anapnea', 'napaean'], + 'nape': ['nape', 'neap', 'nepa', 'pane', 'pean'], + 'napead': ['napead', 'panade'], + 'napery': ['napery', 'pyrena'], + 'naphthalize': ['naphthalize', 'phthalazine'], + 'naphtol': ['haplont', 'naphtol'], + 'napkin': ['napkin', 'pankin'], + 'napped': ['append', 'napped'], + 'napper': ['napper', 'papern'], + 'napron': ['napron', 'nonpar'], + 'napthionic': ['antiphonic', 'napthionic'], + 'napu': ['napu', 'puan', 'puna'], + 'nar': ['arn', 'nar', 'ran'], + 'narcaciontes': ['narcaciontes', 'transoceanic'], + 'narcose': ['carnose', 'coarsen', 'narcose'], + 'narcotia': ['craniota', 'croatian', 'narcotia', 'raincoat'], + 'narcoticism': ['intracosmic', 'narcoticism'], + 'narcotina': ['anarcotin', 'cantorian', 'carnation', 'narcotina'], + 'narcotine': ['connarite', 'container', 'cotarnine', 'crenation', 'narcotine'], + 'narcotism': ['narcotism', 'romancist'], + 'narcotist': ['narcotist', 'stratonic'], + 'narcotize': ['narcotize', 'zirconate'], + 'narcous': ['carnous', 'nacrous', 'narcous'], + 'nard': ['darn', 'nard', 'rand'], + 'nardine': ['adrenin', 'nardine'], + 'nardus': ['nardus', 'sundar', 'sundra'], + 'nares': ['anser', 'nares', 'rasen', 'snare'], + 'nargil': ['nargil', 'raglin'], + 'naric': ['cairn', 'crain', 'naric'], + 'narica': ['acinar', + 'arnica', + 'canari', + 'carian', + 'carina', + 'crania', + 'narica'], + 'nariform': ['nariform', 'raniform'], + 'narine': ['narine', 'ranine'], + 'nark': ['knar', 'kran', 'nark', 'rank'], + 'narration': ['narration', 'tornarian'], + 'narthecium': ['anthericum', 'narthecium'], + 'nary': ['nary', 'yarn'], + 'nasab': ['nasab', 'saban'], + 'nasal': ['alans', 'lanas', 'nasal'], + 'nasalism': ['nasalism', 'sailsman'], + 'nasard': ['nasard', 'sandra'], + 'nascapi': ['capsian', 'caspian', 'nascapi', 'panisca'], + 'nash': ['hans', 'nash', 'shan'], + 'nashgab': ['bangash', 'nashgab'], + 'nasi': ['anis', 'nais', 'nasi', 'nias', 'sain', 'sina'], + 'nasial': ['anisal', 'nasial', 'salian', 'salina'], + 'nasitis': ['nasitis', 'sistani'], + 'nasoantral': ['antronasal', 'nasoantral'], + 'nasobuccal': ['bucconasal', 'nasobuccal'], + 'nasofrontal': ['frontonasal', 'nasofrontal'], + 'nasolabial': ['labionasal', 'nasolabial'], + 'nasolachrymal': ['lachrymonasal', 'nasolachrymal'], + 'nasonite': ['estonian', 'nasonite'], + 'nasoorbital': ['nasoorbital', 'orbitonasal'], + 'nasopalatal': ['nasopalatal', 'palatonasal'], + 'nasoseptal': ['nasoseptal', 'septonasal'], + 'nassa': ['nassa', 'sasan'], + 'nassidae': ['assidean', 'nassidae'], + 'nast': ['nast', 'sant', 'stan'], + 'nastic': ['incast', 'nastic'], + 'nastily': ['nastily', 'saintly', 'staynil'], + 'nasturtion': ['antrustion', 'nasturtion'], + 'nasty': ['nasty', 'styan', 'tansy'], + 'nasua': ['nasua', 'sauna'], + 'nasus': ['nasus', 'susan'], + 'nasute': ['nasute', 'nauset', 'unseat'], + 'nat': ['ant', 'nat', 'tan'], + 'nataka': ['nataka', 'tanaka'], + 'natal': ['antal', 'natal'], + 'natalia': ['altaian', 'latania', 'natalia'], + 'natalie': ['laniate', 'natalie', 'taenial'], + 'nataloin': ['latonian', 'nataloin', 'national'], + 'natals': ['aslant', 'lansat', 'natals', 'santal'], + 'natator': ['arnotta', 'natator'], + 'natatorium': ['maturation', 'natatorium'], + 'natch': ['chant', 'natch'], + 'nate': ['ante', 'aten', 'etna', 'nate', 'neat', 'taen', 'tane', 'tean'], + 'nates': ['antes', 'nates', 'stane', 'stean'], + 'nathan': ['nathan', 'thanan'], + 'nathe': ['enhat', 'ethan', 'nathe', 'neath', 'thane'], + 'nather': ['anther', 'nather', 'tharen', 'thenar'], + 'natica': ['actian', 'natica', 'tanica'], + 'naticiform': ['actiniform', 'naticiform'], + 'naticine': ['actinine', 'naticine'], + 'natick': ['catkin', 'natick'], + 'naticoid': ['actinoid', 'diatonic', 'naticoid'], + 'nation': ['anoint', 'nation'], + 'national': ['latonian', 'nataloin', 'national'], + 'native': ['native', 'navite'], + 'natively': ['natively', 'venality'], + 'nativist': ['nativist', 'visitant'], + 'natr': ['natr', 'rant', 'tarn', 'tran'], + 'natricinae': ['natricinae', 'nectarinia'], + 'natricine': ['crinanite', 'natricine'], + 'natrolite': ['natrolite', 'tentorial'], + 'natter': ['attern', 'natter', 'ratten', 'tarten'], + 'nattered': ['attender', 'nattered', 'reattend'], + 'nattily': ['nattily', 'titanyl'], + 'nattle': ['latent', 'latten', 'nattle', 'talent', 'tantle'], + 'naturalistic': ['naturalistic', 'unartistical'], + 'naturing': ['gainturn', 'naturing'], + 'naturism': ['naturism', 'sturmian', 'turanism'], + 'naturist': ['antirust', 'naturist'], + 'naturistic': ['naturistic', 'unartistic'], + 'naturistically': ['naturistically', 'unartistically'], + 'nauger': ['nauger', 'raunge', 'ungear'], + 'naumk': ['kuman', 'naumk'], + 'naunt': ['naunt', 'tunna'], + 'nauntle': ['annulet', 'nauntle'], + 'nauplius': ['nauplius', 'paulinus'], + 'nauset': ['nasute', 'nauset', 'unseat'], + 'naut': ['antu', 'aunt', 'naut', 'taun', 'tuan', 'tuna'], + 'nauther': ['haunter', 'nauther', 'unearth', 'unheart', 'urethan'], + 'nautic': ['anicut', 'nautic', 'ticuna', 'tunica'], + 'nautical': ['actinula', 'nautical'], + 'nautiloid': ['lutianoid', 'nautiloid'], + 'nautilus': ['lutianus', 'nautilus', 'ustulina'], + 'naval': ['alvan', 'naval'], + 'navalist': ['navalist', 'salivant'], + 'navar': ['navar', 'varan', 'varna'], + 'nave': ['evan', 'nave', 'vane'], + 'navel': ['elvan', 'navel', 'venal'], + 'naviculare': ['naviculare', 'uncavalier'], + 'navigant': ['navigant', 'vaginant'], + 'navigate': ['navigate', 'vaginate'], + 'navite': ['native', 'navite'], + 'naw': ['awn', 'naw', 'wan'], + 'nawt': ['nawt', 'tawn', 'want'], + 'nay': ['any', 'nay', 'yan'], + 'nayar': ['aryan', 'nayar', 'rayan'], + 'nazarite': ['nazarite', 'nazirate', 'triazane'], + 'nazi': ['nazi', 'zain'], + 'nazim': ['nazim', 'nizam'], + 'nazirate': ['nazarite', 'nazirate', 'triazane'], + 'nazirite': ['nazirite', 'triazine'], + 'ne': ['en', 'ne'], + 'nea': ['ean', 'nae', 'nea'], + 'neal': ['alen', 'lane', 'lean', 'lena', 'nael', 'neal'], + 'neanic': ['canine', 'encina', 'neanic'], + 'neap': ['nape', 'neap', 'nepa', 'pane', 'pean'], + 'neapolitan': ['antelopian', 'neapolitan', 'panelation'], + 'nearby': ['barney', 'nearby'], + 'nearctic': ['acentric', 'encratic', 'nearctic'], + 'nearest': ['earnest', 'eastern', 'nearest'], + 'nearish': ['arshine', 'nearish', 'rhesian', 'sherani'], + 'nearly': ['anerly', 'nearly'], + 'nearmost': ['monaster', 'monstera', 'nearmost', 'storeman'], + 'nearthrosis': ['enarthrosis', 'nearthrosis'], + 'neat': ['ante', 'aten', 'etna', 'nate', 'neat', 'taen', 'tane', 'tean'], + 'neaten': ['etnean', 'neaten'], + 'neath': ['enhat', 'ethan', 'nathe', 'neath', 'thane'], + 'neatherd': ['adherent', 'headrent', 'neatherd', 'threaden'], + 'neatherdess': ['heartedness', 'neatherdess'], + 'neb': ['ben', 'neb'], + 'neback': ['backen', 'neback'], + 'nebaioth': ['boethian', 'nebaioth'], + 'nebalia': ['abelian', 'nebalia'], + 'nebelist': ['nebelist', 'stilbene', 'tensible'], + 'nebula': ['nebula', 'unable', 'unbale'], + 'nebulose': ['bluenose', 'nebulose'], + 'necator': ['enactor', 'necator', 'orcanet'], + 'necessarian': ['necessarian', 'renaissance'], + 'neckar': ['canker', 'neckar'], + 'necrogenic': ['congeneric', 'necrogenic'], + 'necrogenous': ['congenerous', 'necrogenous'], + 'necrology': ['crenology', 'necrology'], + 'necropoles': ['necropoles', 'preconsole'], + 'necropolis': ['clinospore', 'necropolis'], + 'necrotic': ['crocetin', 'necrotic'], + 'necrotomic': ['necrotomic', 'oncometric'], + 'necrotomy': ['necrotomy', 'normocyte', 'oncometry'], + 'nectar': ['canter', + 'creant', + 'cretan', + 'nectar', + 'recant', + 'tanrec', + 'trance'], + 'nectareal': ['lactarene', 'nectareal'], + 'nectared': ['crenated', 'decanter', 'nectared'], + 'nectareous': ['countersea', 'nectareous'], + 'nectarial': ['carnalite', 'claretian', 'lacertian', 'nectarial'], + 'nectarian': ['cratinean', 'incarnate', 'nectarian'], + 'nectaried': ['nectaried', 'tridecane'], + 'nectarine': ['inertance', 'nectarine'], + 'nectarinia': ['natricinae', 'nectarinia'], + 'nectarious': ['nectarious', 'recusation'], + 'nectarlike': ['nectarlike', 'trancelike'], + 'nectarous': ['acentrous', 'courtesan', 'nectarous'], + 'nectary': ['encraty', 'nectary'], + 'nectophore': ['ctenophore', 'nectophore'], + 'nectria': ['centiar', 'certain', 'citrean', 'nacrite', 'nectria'], + 'ned': ['den', 'end', 'ned'], + 'nedder': ['nedder', 'redden'], + 'neebor': ['boreen', 'enrobe', 'neebor', 'rebone'], + 'need': ['dene', 'eden', 'need'], + 'needer': ['endere', 'needer', 'reeden'], + 'needfire': ['needfire', 'redefine'], + 'needily': ['needily', 'yielden'], + 'needle': ['lendee', 'needle'], + 'needless': ['needless', 'seldseen'], + 'needs': ['dense', 'needs'], + 'needsome': ['modenese', 'needsome'], + 'neeger': ['neeger', 'reenge', 'renege'], + 'neeld': ['leden', 'neeld'], + 'neep': ['neep', 'peen'], + 'neepour': ['neepour', 'neurope'], + 'neer': ['erne', 'neer', 'reen'], + 'neet': ['neet', 'nete', 'teen'], + 'neetup': ['neetup', 'petune'], + 'nef': ['fen', 'nef'], + 'nefast': ['fasten', 'nefast', 'stefan'], + 'neftgil': ['felting', 'neftgil'], + 'negate': ['geneat', 'negate', 'tegean'], + 'negation': ['antigone', 'negation'], + 'negative': ['agentive', 'negative'], + 'negativism': ['negativism', 'timesaving'], + 'negator': ['negator', 'tronage'], + 'negatron': ['argenton', 'negatron'], + 'neger': ['genre', 'green', 'neger', 'reneg'], + 'neglecter': ['neglecter', 'reneglect'], + 'negritian': ['negritian', 'retaining'], + 'negrito': ['ergotin', 'genitor', 'negrito', 'ogtiern', 'trigone'], + 'negritoid': ['negritoid', 'rodingite'], + 'negro': ['ergon', 'genro', 'goner', 'negro'], + 'negroid': ['groined', 'negroid'], + 'negroidal': ['girandole', 'negroidal'], + 'negroize': ['genizero', 'negroize'], + 'negroloid': ['gondolier', 'negroloid'], + 'negrotic': ['gerontic', 'negrotic'], + 'negundo': ['dungeon', 'negundo'], + 'negus': ['genus', 'negus'], + 'neif': ['enif', 'fine', 'neif', 'nife'], + 'neigh': ['hinge', 'neigh'], + 'neil': ['lien', 'line', 'neil', 'nile'], + 'neiper': ['neiper', 'perine', 'pirene', 'repine'], + 'neist': ['inset', 'neist', 'snite', 'stein', 'stine', 'tsine'], + 'neither': ['enherit', 'etherin', 'neither', 'therein'], + 'nekkar': ['kraken', 'nekkar'], + 'nekton': ['kenton', 'nekton'], + 'nelken': ['kennel', 'nelken'], + 'nelsonite': ['nelsonite', 'solentine'], + 'nelumbian': ['nelumbian', 'unminable'], + 'nema': ['amen', 'enam', 'mane', 'mean', 'name', 'nema'], + 'nemalite': ['melanite', 'meletian', 'metaline', 'nemalite'], + 'nematoda': ['mantodea', 'nematoda'], + 'nematoid': ['dominate', 'nematoid'], + 'nematophyton': ['nematophyton', 'tenontophyma'], + 'nemertinea': ['minnetaree', 'nemertinea'], + 'nemertini': ['intermine', 'nemertini', 'terminine'], + 'nemertoid': ['interdome', 'mordenite', 'nemertoid'], + 'nemoral': ['almoner', 'moneral', 'nemoral'], + 'nenta': ['anent', 'annet', 'nenta'], + 'neo': ['eon', 'neo', 'one'], + 'neoarctic': ['accretion', 'anorectic', 'neoarctic'], + 'neocomian': ['monoecian', 'neocomian'], + 'neocosmic': ['economics', 'neocosmic'], + 'neocyte': ['enocyte', 'neocyte'], + 'neogaea': ['eogaean', 'neogaea'], + 'neogenesis': ['neogenesis', 'noegenesis'], + 'neogenetic': ['neogenetic', 'noegenetic'], + 'neognathous': ['anthogenous', 'neognathous'], + 'neolatry': ['neolatry', 'ornately', 'tyrolean'], + 'neolithic': ['ichnolite', 'neolithic'], + 'neomiracle': ['ceremonial', 'neomiracle'], + 'neomorphic': ['microphone', 'neomorphic'], + 'neon': ['neon', 'none'], + 'neophilism': ['neophilism', 'philoneism'], + 'neophytic': ['hypnoetic', 'neophytic'], + 'neoplasm': ['neoplasm', 'pleonasm', 'polesman', 'splenoma'], + 'neoplastic': ['neoplastic', 'pleonastic'], + 'neorama': ['neorama', 'romaean'], + 'neornithes': ['neornithes', 'rhinestone'], + 'neossin': ['neossin', 'sension'], + 'neoteric': ['erection', 'neoteric', 'nocerite', 'renotice'], + 'neoterism': ['moistener', 'neoterism'], + 'neotragus': ['argentous', 'neotragus'], + 'neotropic': ['ectropion', 'neotropic'], + 'neotropical': ['neotropical', 'percolation'], + 'neoza': ['neoza', 'ozena'], + 'nep': ['nep', 'pen'], + 'nepa': ['nape', 'neap', 'nepa', 'pane', 'pean'], + 'nepal': ['alpen', 'nepal', 'panel', 'penal', 'plane'], + 'nepali': ['alpine', 'nepali', 'penial', 'pineal'], + 'neper': ['neper', 'preen', 'repen'], + 'nepheloscope': ['nepheloscope', 'phonelescope'], + 'nephite': ['heptine', 'nephite'], + 'nephogram': ['gomphrena', 'nephogram'], + 'nephological': ['nephological', 'phenological'], + 'nephologist': ['nephologist', 'phenologist'], + 'nephology': ['nephology', 'phenology'], + 'nephria': ['heparin', 'nephria'], + 'nephric': ['nephric', 'phrenic', 'pincher'], + 'nephrite': ['nephrite', 'prehnite', 'trephine'], + 'nephritic': ['nephritic', 'phrenitic', 'prehnitic'], + 'nephritis': ['inspreith', 'nephritis', 'phrenitis'], + 'nephrocardiac': ['nephrocardiac', 'phrenocardiac'], + 'nephrocolic': ['nephrocolic', 'phrenocolic'], + 'nephrocystosis': ['cystonephrosis', 'nephrocystosis'], + 'nephrogastric': ['gastrophrenic', 'nephrogastric', 'phrenogastric'], + 'nephrohydrosis': ['hydronephrosis', 'nephrohydrosis'], + 'nephrolithotomy': ['lithonephrotomy', 'nephrolithotomy'], + 'nephrologist': ['nephrologist', 'phrenologist'], + 'nephrology': ['nephrology', 'phrenology'], + 'nephropathic': ['nephropathic', 'phrenopathic'], + 'nephropathy': ['nephropathy', 'phrenopathy'], + 'nephropsidae': ['nephropsidae', 'praesphenoid'], + 'nephroptosia': ['nephroptosia', 'prosiphonate'], + 'nephropyelitis': ['nephropyelitis', 'pyelonephritis'], + 'nephropyosis': ['nephropyosis', 'pyonephrosis'], + 'nephrosis': ['nephrosis', 'phronesis'], + 'nephrostoma': ['nephrostoma', 'strophomena'], + 'nephrotome': ['nephrotome', 'phonometer'], + 'nephrotomy': ['nephrotomy', 'phonometry'], + 'nepman': ['nepman', 'penman'], + 'nepotal': ['lepanto', 'nepotal', 'petalon', 'polenta'], + 'nepote': ['nepote', 'pontee', 'poteen'], + 'nepotic': ['entopic', 'nepotic', 'pentoic'], + 'nereid': ['denier', 'nereid'], + 'nereis': ['inseer', 'nereis', 'seiner', 'serine', 'sirene'], + 'neri': ['neri', 'rein', 'rine'], + 'nerita': ['nerita', 'ratine', 'retain', 'retina', 'tanier'], + 'neritic': ['citrine', 'crinite', 'inciter', 'neritic'], + 'neritina': ['neritina', 'retinian'], + 'neritoid': ['neritoid', 'retinoid'], + 'nerium': ['murine', 'nerium'], + 'neroic': ['cerion', 'coiner', 'neroic', 'orcein', 'recoin'], + 'neronic': ['corinne', 'cornein', 'neronic'], + 'nerval': ['nerval', 'vernal'], + 'nervate': ['nervate', 'veteran'], + 'nervation': ['nervation', 'vernation'], + 'nerve': ['nerve', 'never'], + 'nervid': ['driven', 'nervid', 'verdin'], + 'nervine': ['innerve', 'nervine', 'vernine'], + 'nerviness': ['inverness', 'nerviness'], + 'nervish': ['nervish', 'shriven'], + 'nervulose': ['nervulose', 'unresolve', 'vulnerose'], + 'nese': ['ense', 'esne', 'nese', 'seen', 'snee'], + 'nesh': ['nesh', 'shen'], + 'nesiot': ['nesiot', 'ostein'], + 'neskhi': ['kishen', 'neskhi'], + 'neslia': ['alsine', 'neslia', 'saline', 'selina', 'silane'], + 'nest': ['nest', 'sent', 'sten'], + 'nester': ['ernest', 'nester', 'resent', 'streen'], + 'nestiatria': ['intarsiate', 'nestiatria'], + 'nestlike': ['nestlike', 'skeletin'], + 'nestor': ['nestor', 'sterno', 'stoner', 'strone', 'tensor'], + 'net': ['net', 'ten'], + 'netcha': ['entach', 'netcha'], + 'nete': ['neet', 'nete', 'teen'], + 'neter': ['enter', 'neter', 'renet', 'terne', 'treen'], + 'netful': ['fluent', 'netful', 'unfelt', 'unleft'], + 'neth': ['hent', 'neth', 'then'], + 'nether': ['erthen', 'henter', 'nether', 'threne'], + 'neti': ['iten', 'neti', 'tien', 'tine'], + 'netman': ['manent', 'netman'], + 'netsuke': ['kuneste', 'netsuke'], + 'nettable': ['nettable', 'tentable'], + 'nettapus': ['nettapus', 'stepaunt'], + 'netted': ['detent', 'netted', 'tented'], + 'netter': ['netter', 'retent', 'tenter'], + 'nettion': ['nettion', 'tention', 'tontine'], + 'nettle': ['letten', 'nettle'], + 'nettler': ['nettler', 'ternlet'], + 'netty': ['netty', 'tenty'], + 'neurad': ['endura', 'neurad', 'undear', 'unread'], + 'neural': ['lunare', 'neural', 'ulnare', 'unreal'], + 'neuralgic': ['genicular', 'neuralgic'], + 'neuralist': ['neuralist', 'ulsterian', 'unrealist'], + 'neurectopia': ['eucatropine', 'neurectopia'], + 'neuric': ['curine', 'erucin', 'neuric'], + 'neurilema': ['lemurinae', 'neurilema'], + 'neurin': ['enruin', 'neurin', 'unrein'], + 'neurism': ['neurism', 'semiurn'], + 'neurite': ['neurite', 'retinue', 'reunite', 'uterine'], + 'neuroblast': ['neuroblast', 'unsortable'], + 'neurodermatosis': ['dermatoneurosis', 'neurodermatosis'], + 'neurofibroma': ['fibroneuroma', 'neurofibroma'], + 'neurofil': ['fluorine', 'neurofil'], + 'neuroganglion': ['ganglioneuron', 'neuroganglion'], + 'neurogenic': ['encoignure', 'neurogenic'], + 'neuroid': ['dourine', 'neuroid'], + 'neurolysis': ['neurolysis', 'resinously'], + 'neuromast': ['anoestrum', 'neuromast'], + 'neuromyelitis': ['myeloneuritis', 'neuromyelitis'], + 'neuronal': ['enaluron', 'neuronal'], + 'neurope': ['neepour', 'neurope'], + 'neuropsychological': ['neuropsychological', 'psychoneurological'], + 'neuropsychosis': ['neuropsychosis', 'psychoneurosis'], + 'neuropteris': ['interposure', 'neuropteris'], + 'neurosis': ['neurosis', 'resinous'], + 'neurotic': ['eruction', 'neurotic'], + 'neurotripsy': ['neurotripsy', 'tripyrenous'], + 'neustrian': ['neustrian', 'saturnine', 'sturninae'], + 'neuter': ['neuter', 'retune', 'runtee', 'tenure', 'tureen'], + 'neuterly': ['neuterly', 'rutylene'], + 'neutral': ['laurent', 'neutral', 'unalert'], + 'neutralism': ['neutralism', 'trimensual'], + 'neutrally': ['neutrally', 'unalertly'], + 'neutralness': ['neutralness', 'unalertness'], + 'nevada': ['nevada', 'vedana', 'venada'], + 'neve': ['even', 'neve', 'veen'], + 'never': ['nerve', 'never'], + 'nevo': ['nevo', 'oven'], + 'nevoy': ['envoy', 'nevoy', 'yoven'], + 'nevus': ['nevus', 'venus'], + 'new': ['new', 'wen'], + 'newar': ['awner', 'newar'], + 'newari': ['newari', 'wainer'], + 'news': ['news', 'sewn', 'snew'], + 'newt': ['newt', 'went'], + 'nexus': ['nexus', 'unsex'], + 'ngai': ['gain', 'inga', 'naig', 'ngai'], + 'ngaio': ['gonia', 'ngaio', 'nogai'], + 'ngapi': ['aping', 'ngapi', 'pangi'], + 'ngoko': ['kongo', 'ngoko'], + 'ni': ['in', 'ni'], + 'niagara': ['agrania', 'angaria', 'niagara'], + 'nias': ['anis', 'nais', 'nasi', 'nias', 'sain', 'sina'], + 'niata': ['anita', 'niata', 'tania'], + 'nib': ['bin', 'nib'], + 'nibs': ['nibs', 'snib'], + 'nibsome': ['nibsome', 'nimbose'], + 'nicarao': ['aaronic', 'nicarao', 'ocarina'], + 'niccolous': ['niccolous', 'occlusion'], + 'nice': ['cine', 'nice'], + 'nicene': ['cinene', 'nicene'], + 'nicenist': ['inscient', 'nicenist'], + 'nicesome': ['nicesome', 'semicone'], + 'nichael': ['chilean', 'echinal', 'nichael'], + 'niche': ['chien', 'chine', 'niche'], + 'nicher': ['enrich', 'nicher', 'richen'], + 'nicholas': ['lichanos', 'nicholas'], + 'nickel': ['nickel', 'nickle'], + 'nickle': ['nickel', 'nickle'], + 'nicol': ['colin', 'nicol'], + 'nicolas': ['nicolas', 'scaloni'], + 'nicolette': ['lecontite', 'nicolette'], + 'nicotian': ['aconitin', 'inaction', 'nicotian'], + 'nicotianin': ['nicotianin', 'nicotinian'], + 'nicotined': ['incondite', 'nicotined'], + 'nicotinian': ['nicotianin', 'nicotinian'], + 'nicotism': ['monistic', 'nicotism', 'nomistic'], + 'nicotize': ['nicotize', 'tonicize'], + 'nictate': ['nictate', 'tetanic'], + 'nictation': ['antitonic', 'nictation'], + 'nid': ['din', 'ind', 'nid'], + 'nidal': ['danli', 'ladin', 'linda', 'nidal'], + 'nidana': ['andian', 'danian', 'nidana'], + 'nidation': ['nidation', 'notidani'], + 'niddle': ['dindle', 'niddle'], + 'nide': ['dine', 'enid', 'inde', 'nide'], + 'nidge': ['deign', 'dinge', 'nidge'], + 'nidget': ['nidget', 'tinged'], + 'niding': ['dining', 'indign', 'niding'], + 'nidologist': ['indologist', 'nidologist'], + 'nidology': ['indology', 'nidology'], + 'nidularia': ['nidularia', 'uniradial'], + 'nidulate': ['nidulate', 'untailed'], + 'nidus': ['dinus', 'indus', 'nidus'], + 'niello': ['lionel', 'niello'], + 'niels': ['elsin', 'lenis', 'niels', 'silen', 'sline'], + 'nieve': ['nieve', 'venie'], + 'nieveta': ['naivete', 'nieveta'], + 'nievling': ['levining', 'nievling'], + 'nife': ['enif', 'fine', 'neif', 'nife'], + 'nifle': ['elfin', 'nifle'], + 'nig': ['gin', 'ing', 'nig'], + 'nigel': ['ingle', 'ligne', 'linge', 'nigel'], + 'nigella': ['gallein', 'galline', 'nigella'], + 'nigerian': ['arginine', 'nigerian'], + 'niggard': ['grading', 'niggard'], + 'nigger': ['ginger', 'nigger'], + 'niggery': ['gingery', 'niggery'], + 'nigh': ['hing', 'nigh'], + 'night': ['night', 'thing'], + 'nightless': ['lightness', 'nightless', 'thingless'], + 'nightlike': ['nightlike', 'thinglike'], + 'nightly': ['nightly', 'thingly'], + 'nightman': ['nightman', 'thingman'], + 'nignye': ['ginney', 'nignye'], + 'nigori': ['nigori', 'origin'], + 'nigre': ['grein', 'inger', 'nigre', 'regin', 'reign', 'ringe'], + 'nigrous': ['nigrous', 'rousing', 'souring'], + 'nihal': ['linha', 'nihal'], + 'nikau': ['kunai', 'nikau'], + 'nil': ['lin', 'nil'], + 'nile': ['lien', 'line', 'neil', 'nile'], + 'nilgai': ['ailing', 'angili', 'nilgai'], + 'nilometer': ['linometer', 'nilometer'], + 'niloscope': ['niloscope', 'scopoline'], + 'nilotic': ['clition', 'nilotic'], + 'nilous': ['insoul', 'linous', 'nilous', 'unsoil'], + 'nim': ['min', 'nim'], + 'nimbed': ['embind', 'nimbed'], + 'nimbose': ['nibsome', 'nimbose'], + 'nimkish': ['minkish', 'nimkish'], + 'nimshi': ['minish', 'nimshi'], + 'nina': ['nain', 'nina'], + 'ninescore': ['ninescore', 'recension'], + 'nineted': ['dentine', 'nineted'], + 'ninevite': ['ninevite', 'nivenite'], + 'ningpo': ['ningpo', 'pignon'], + 'nintu': ['nintu', 'ninut', 'untin'], + 'ninut': ['nintu', 'ninut', 'untin'], + 'niota': ['niota', 'taino'], + 'nip': ['nip', 'pin'], + 'nipa': ['nipa', 'pain', 'pani', 'pian', 'pina'], + 'nippers': ['nippers', 'snipper'], + 'nipple': ['lippen', 'nipple'], + 'nipter': ['nipter', 'terpin'], + 'nisaean': ['nisaean', 'sinaean'], + 'nisqualli': ['nisqualli', 'squillian'], + 'nisus': ['nisus', 'sinus'], + 'nit': ['nit', 'tin'], + 'nitch': ['chint', 'nitch'], + 'nitella': ['nitella', 'tellina'], + 'nitently': ['intently', 'nitently'], + 'niter': ['inert', 'inter', 'niter', 'retin', 'trine'], + 'nitered': ['nitered', 'redient', 'teinder'], + 'nither': ['hinter', 'nither', 'theirn'], + 'nito': ['into', 'nito', 'oint', 'tino'], + 'niton': ['niton', 'noint'], + 'nitrate': ['intreat', 'iterant', 'nitrate', 'tertian'], + 'nitratine': ['itinerant', 'nitratine'], + 'nitric': ['citrin', 'nitric'], + 'nitride': ['inditer', 'nitride'], + 'nitrifaction': ['antifriction', 'nitrifaction'], + 'nitriot': ['introit', 'nitriot'], + 'nitrobenzol': ['benzonitrol', 'nitrobenzol'], + 'nitrogelatin': ['intolerating', 'nitrogelatin'], + 'nitrosate': ['nitrosate', 'stationer'], + 'nitrous': ['nitrous', 'trusion'], + 'nitter': ['nitter', 'tinter'], + 'nitty': ['nitty', 'tinty'], + 'niue': ['niue', 'unie'], + 'nival': ['alvin', 'anvil', 'nival', 'vinal'], + 'nivenite': ['ninevite', 'nivenite'], + 'niveous': ['envious', 'niveous', 'veinous'], + 'nivosity': ['nivosity', 'vinosity'], + 'nizam': ['nazim', 'nizam'], + 'no': ['no', 'on'], + 'noa': ['noa', 'ona'], + 'noachite': ['inchoate', 'noachite'], + 'noah': ['hano', 'noah'], + 'noahic': ['chinoa', 'noahic'], + 'noam': ['mano', 'moan', 'mona', 'noam', 'noma', 'oman'], + 'nob': ['bon', 'nob'], + 'nobleman': ['blennoma', 'nobleman'], + 'noblesse': ['boneless', 'noblesse'], + 'nobs': ['bosn', 'nobs', 'snob'], + 'nocardia': ['nocardia', 'orcadian'], + 'nocent': ['nocent', 'nocten'], + 'nocerite': ['erection', 'neoteric', 'nocerite', 'renotice'], + 'nock': ['conk', 'nock'], + 'nocten': ['nocent', 'nocten'], + 'noctiluca': ['ciclatoun', 'noctiluca'], + 'noctuid': ['conduit', 'duction', 'noctuid'], + 'noctuidae': ['coadunite', 'education', 'noctuidae'], + 'nocturia': ['curation', 'nocturia'], + 'nod': ['don', 'nod'], + 'nodal': ['donal', 'nodal'], + 'nodated': ['donated', 'nodated'], + 'node': ['done', 'node'], + 'nodi': ['dion', 'nodi', 'odin'], + 'nodiak': ['daikon', 'nodiak'], + 'nodical': ['dolcian', 'nodical'], + 'nodicorn': ['corindon', 'nodicorn'], + 'nodule': ['louden', 'nodule'], + 'nodus': ['nodus', 'ounds', 'sound'], + 'noegenesis': ['neogenesis', 'noegenesis'], + 'noegenetic': ['neogenetic', 'noegenetic'], + 'noel': ['elon', 'enol', 'leno', 'leon', 'lone', 'noel'], + 'noetic': ['eciton', 'noetic', 'notice', 'octine'], + 'noetics': ['contise', 'noetics', 'section'], + 'nog': ['gon', 'nog'], + 'nogai': ['gonia', 'ngaio', 'nogai'], + 'nogal': ['along', 'gonal', 'lango', 'longa', 'nogal'], + 'noil': ['lino', 'lion', 'loin', 'noil'], + 'noilage': ['goniale', 'noilage'], + 'noiler': ['elinor', 'lienor', 'lorien', 'noiler'], + 'noint': ['niton', 'noint'], + 'noir': ['inro', 'iron', 'noir', 'nori'], + 'noise': ['eosin', 'noise'], + 'noiseless': ['noiseless', 'selenosis'], + 'noisette': ['noisette', 'teosinte'], + 'nolo': ['loon', 'nolo'], + 'noma': ['mano', 'moan', 'mona', 'noam', 'noma', 'oman'], + 'nomad': ['damon', 'monad', 'nomad'], + 'nomadian': ['monadina', 'nomadian'], + 'nomadic': ['monadic', 'nomadic'], + 'nomadical': ['monadical', 'nomadical'], + 'nomadically': ['monadically', 'nomadically'], + 'nomadism': ['monadism', 'nomadism'], + 'nomarch': ['monarch', 'nomarch', 'onmarch'], + 'nomarchy': ['monarchy', 'nomarchy'], + 'nome': ['mone', 'nome', 'omen'], + 'nomeus': ['nomeus', 'unsome'], + 'nomial': ['monial', 'nomial', 'oilman'], + 'nomina': ['amnion', 'minoan', 'nomina'], + 'nominate': ['antinome', 'nominate'], + 'nominated': ['dentinoma', 'nominated'], + 'nominature': ['nominature', 'numeration'], + 'nomism': ['monism', 'nomism', 'simmon'], + 'nomismata': ['anatomism', 'nomismata'], + 'nomistic': ['monistic', 'nicotism', 'nomistic'], + 'nomocracy': ['monocracy', 'nomocracy'], + 'nomogenist': ['monogenist', 'nomogenist'], + 'nomogenous': ['monogenous', 'nomogenous'], + 'nomogeny': ['monogeny', 'nomogeny'], + 'nomogram': ['monogram', 'nomogram'], + 'nomograph': ['monograph', 'nomograph', 'phonogram'], + 'nomographer': ['geranomorph', 'monographer', 'nomographer'], + 'nomographic': ['gramophonic', 'monographic', 'nomographic', 'phonogramic'], + 'nomographical': ['gramophonical', 'monographical', 'nomographical'], + 'nomographically': ['gramophonically', + 'monographically', + 'nomographically', + 'phonogramically'], + 'nomography': ['monography', 'nomography'], + 'nomological': ['monological', 'nomological'], + 'nomologist': ['monologist', 'nomologist', 'ontologism'], + 'nomology': ['monology', 'nomology'], + 'nomophyllous': ['monophyllous', 'nomophyllous'], + 'nomotheism': ['monotheism', 'nomotheism'], + 'nomothetic': ['monothetic', 'nomothetic'], + 'nona': ['anon', 'nona', 'onan'], + 'nonaccession': ['connoissance', 'nonaccession'], + 'nonact': ['cannot', 'canton', 'conant', 'nonact'], + 'nonaction': ['connation', 'nonaction'], + 'nonagent': ['nonagent', 'tannogen'], + 'nonaid': ['adonin', 'nanoid', 'nonaid'], + 'nonaltruistic': ['instructional', 'nonaltruistic'], + 'nonanimal': ['nonanimal', 'nonmanila'], + 'nonbilabiate': ['inobtainable', 'nonbilabiate'], + 'noncaste': ['noncaste', 'tsonecan'], + 'noncereal': ['aleconner', 'noncereal'], + 'noncertified': ['noncertified', 'nonrectified'], + 'nonclaim': ['cinnamol', 'nonclaim'], + 'noncreation': ['noncreation', 'nonreaction'], + 'noncreative': ['noncreative', 'nonreactive'], + 'noncurantist': ['noncurantist', 'unconstraint'], + 'nonda': ['donna', 'nonda'], + 'nondesecration': ['nondesecration', 'recondensation'], + 'none': ['neon', 'none'], + 'nonempirical': ['nonempirical', 'prenominical'], + 'nonerudite': ['nonerudite', 'unoriented'], + 'nonesuch': ['nonesuch', 'unchosen'], + 'nonet': ['nonet', 'tenon'], + 'nonfertile': ['florentine', 'nonfertile'], + 'nongeometrical': ['inconglomerate', 'nongeometrical'], + 'nonglare': ['algernon', 'nonglare'], + 'nongod': ['dongon', 'nongod'], + 'nonhepatic': ['nonhepatic', 'pantheonic'], + 'nonic': ['conin', 'nonic', 'oncin'], + 'nonideal': ['anneloid', 'nonideal'], + 'nonidealist': ['alstonidine', 'nonidealist'], + 'nonirate': ['anointer', 'inornate', 'nonirate', 'reanoint'], + 'nonius': ['nonius', 'unison'], + 'nonlegato': ['nonlegato', 'ontogenal'], + 'nonlegume': ['melungeon', 'nonlegume'], + 'nonliable': ['bellonian', 'nonliable'], + 'nonlicet': ['contline', 'nonlicet'], + 'nonly': ['nonly', 'nonyl', 'nylon'], + 'nonmanila': ['nonanimal', 'nonmanila'], + 'nonmarital': ['antinormal', 'nonmarital', 'nonmartial'], + 'nonmartial': ['antinormal', 'nonmarital', 'nonmartial'], + 'nonmatter': ['nonmatter', 'remontant'], + 'nonmetric': ['comintern', 'nonmetric'], + 'nonmetrical': ['centinormal', 'conterminal', 'nonmetrical'], + 'nonmolar': ['nonmolar', 'nonmoral'], + 'nonmoral': ['nonmolar', 'nonmoral'], + 'nonnat': ['nonnat', 'nontan'], + 'nonoriental': ['nonoriental', 'nonrelation'], + 'nonpaid': ['dipnoan', 'nonpaid', 'pandion'], + 'nonpar': ['napron', 'nonpar'], + 'nonparental': ['nonparental', 'nonpaternal'], + 'nonpaternal': ['nonparental', 'nonpaternal'], + 'nonpearlitic': ['nonpearlitic', 'pratincoline'], + 'nonpenal': ['nonpenal', 'nonplane'], + 'nonplane': ['nonpenal', 'nonplane'], + 'nonracial': ['carniolan', 'nonracial'], + 'nonrated': ['nonrated', 'nontrade'], + 'nonreaction': ['noncreation', 'nonreaction'], + 'nonreactive': ['noncreative', 'nonreactive'], + 'nonrebel': ['ennobler', 'nonrebel'], + 'nonrecital': ['interconal', 'nonrecital'], + 'nonrectified': ['noncertified', 'nonrectified'], + 'nonrelation': ['nonoriental', 'nonrelation'], + 'nonreserve': ['nonreserve', 'nonreverse'], + 'nonreverse': ['nonreserve', 'nonreverse'], + 'nonrigid': ['girondin', 'nonrigid'], + 'nonsanction': ['inconsonant', 'nonsanction'], + 'nonscientist': ['inconsistent', 'nonscientist'], + 'nonsecret': ['consenter', 'nonsecret', 'reconsent'], + 'nontan': ['nonnat', 'nontan'], + 'nontrade': ['nonrated', 'nontrade'], + 'nonunited': ['nonunited', 'unintoned'], + 'nonuse': ['nonuse', 'unnose'], + 'nonvaginal': ['nonvaginal', 'novanglian'], + 'nonvisitation': ['innovationist', 'nonvisitation'], + 'nonya': ['annoy', 'nonya'], + 'nonyl': ['nonly', 'nonyl', 'nylon'], + 'nooking': ['kongoni', 'nooking'], + 'noontide': ['noontide', 'notioned'], + 'noontime': ['entomion', 'noontime'], + 'noop': ['noop', 'poon'], + 'noose': ['noose', 'osone'], + 'nooser': ['nooser', 'seroon', 'sooner'], + 'nopal': ['lapon', 'nopal'], + 'nope': ['nope', 'open', 'peon', 'pone'], + 'nor': ['nor', 'ron'], + 'nora': ['nora', 'orna', 'roan'], + 'norah': ['nahor', 'norah', 'rohan'], + 'norate': ['atoner', 'norate', 'ornate'], + 'noration': ['noration', 'ornation', 'orotinan'], + 'nordic': ['dornic', 'nordic'], + 'nordicity': ['nordicity', 'tyrocidin'], + 'noreast': ['noreast', 'rosetan', 'seatron', 'senator', 'treason'], + 'nori': ['inro', 'iron', 'noir', 'nori'], + 'noria': ['arion', 'noria'], + 'noric': ['corin', 'noric', 'orcin'], + 'norie': ['irone', 'norie'], + 'norite': ['norite', 'orient'], + 'norm': ['morn', 'norm'], + 'norma': ['manor', 'moran', 'norma', 'ramon', 'roman'], + 'normality': ['normality', 'trionymal'], + 'normated': ['moderant', 'normated'], + 'normless': ['mornless', 'normless'], + 'normocyte': ['necrotomy', 'normocyte', 'oncometry'], + 'norse': ['norse', 'noser', 'seron', 'snore'], + 'norsk': ['norsk', 'snork'], + 'north': ['north', 'thorn'], + 'norther': ['horrent', 'norther'], + 'northing': ['inthrong', 'northing'], + 'nosairi': ['nosairi', 'osirian'], + 'nose': ['enos', 'nose'], + 'nosean': ['nosean', 'oannes'], + 'noseless': ['noseless', 'soleness'], + 'noselite': ['noselite', 'solenite'], + 'nosema': ['monase', 'nosema'], + 'noser': ['norse', 'noser', 'seron', 'snore'], + 'nosesmart': ['nosesmart', 'storesman'], + 'nosism': ['nosism', 'simson'], + 'nosomania': ['nanosomia', 'nosomania'], + 'nostalgia': ['analogist', 'nostalgia'], + 'nostalgic': ['gnostical', 'nostalgic'], + 'nostic': ['nostic', 'sintoc', 'tocsin'], + 'nostoc': ['nostoc', 'oncost'], + 'nosu': ['nosu', 'nous', 'onus'], + 'not': ['not', 'ton'], + 'notability': ['bitonality', 'notability'], + 'notaeal': ['anatole', 'notaeal'], + 'notaeum': ['notaeum', 'outname'], + 'notal': ['notal', 'ontal', 'talon', 'tolan', 'tonal'], + 'notalgia': ['galtonia', 'notalgia'], + 'notalia': ['ailanto', 'alation', 'laotian', 'notalia'], + 'notan': ['anton', 'notan', 'tonna'], + 'notarial': ['notarial', 'rational', 'rotalian'], + 'notarially': ['notarially', 'rationally'], + 'notariate': ['notariate', 'rationate'], + 'notation': ['notation', 'tonation'], + 'notator': ['arnotto', 'notator'], + 'notcher': ['chorten', 'notcher'], + 'note': ['note', 'tone'], + 'noted': ['donet', 'noted', 'toned'], + 'notehead': ['headnote', 'notehead'], + 'noteless': ['noteless', 'toneless'], + 'notelessly': ['notelessly', 'tonelessly'], + 'notelessness': ['notelessness', 'tonelessness'], + 'noter': ['noter', 'tenor', 'toner', 'trone'], + 'nother': ['hornet', 'nother', 'theron', 'throne'], + 'nothous': ['hontous', 'nothous'], + 'notice': ['eciton', 'noetic', 'notice', 'octine'], + 'noticer': ['cerotin', 'cointer', 'cotrine', 'cretion', 'noticer', 'rection'], + 'notidani': ['nidation', 'notidani'], + 'notify': ['notify', 'tonify'], + 'notioned': ['noontide', 'notioned'], + 'notochordal': ['chordotonal', 'notochordal'], + 'notopterus': ['notopterus', 'portentous'], + 'notorhizal': ['horizontal', 'notorhizal'], + 'nototrema': ['antrotome', 'nototrema'], + 'notour': ['notour', 'unroot'], + 'notropis': ['notropis', 'positron', 'sorption'], + 'notum': ['montu', 'mount', 'notum'], + 'notus': ['notus', 'snout', 'stoun', 'tonus'], + 'nought': ['hognut', 'nought'], + 'noup': ['noup', 'puno', 'upon'], + 'nourisher': ['nourisher', 'renourish'], + 'nous': ['nosu', 'nous', 'onus'], + 'novalia': ['novalia', 'valonia'], + 'novanglian': ['nonvaginal', 'novanglian'], + 'novem': ['novem', 'venom'], + 'novitiate': ['evitation', 'novitiate'], + 'now': ['now', 'own', 'won'], + 'nowanights': ['nowanights', 'washington'], + 'nowed': ['endow', 'nowed'], + 'nowhere': ['nowhere', 'whereon'], + 'nowise': ['nowise', 'snowie'], + 'nowness': ['nowness', 'ownness'], + 'nowt': ['nowt', 'town', 'wont'], + 'noxa': ['axon', 'noxa', 'oxan'], + 'noy': ['noy', 'yon'], + 'nozi': ['nozi', 'zion'], + 'nu': ['nu', 'un'], + 'nub': ['bun', 'nub'], + 'nuba': ['baun', 'buna', 'nabu', 'nuba'], + 'nubian': ['nubian', 'unbain'], + 'nubilate': ['antiblue', 'nubilate'], + 'nubile': ['nubile', 'unible'], + 'nucal': ['lucan', 'nucal'], + 'nucellar': ['lucernal', 'nucellar', 'uncellar'], + 'nuchal': ['chulan', 'launch', 'nuchal'], + 'nuciferous': ['nuciferous', 'unciferous'], + 'nuciform': ['nuciform', 'unciform'], + 'nuclear': ['crenula', 'lucarne', 'nuclear', 'unclear'], + 'nucleator': ['nucleator', 'recountal'], + 'nucleoid': ['nucleoid', 'uncoiled'], + 'nuclide': ['include', 'nuclide'], + 'nuculid': ['nuculid', 'unlucid'], + 'nuculidae': ['duculinae', 'nuculidae'], + 'nudate': ['nudate', 'undate'], + 'nuddle': ['ludden', 'nuddle'], + 'nude': ['dune', 'nude', 'unde'], + 'nudeness': ['nudeness', 'unsensed'], + 'nudger': ['dunger', 'gerund', 'greund', 'nudger'], + 'nudist': ['dustin', 'nudist'], + 'nudity': ['nudity', 'untidy'], + 'nuisancer': ['insurance', 'nuisancer'], + 'numa': ['maun', 'numa'], + 'numberer': ['numberer', 'renumber'], + 'numda': ['maund', 'munda', 'numda', 'undam', 'unmad'], + 'numeration': ['nominature', 'numeration'], + 'numerical': ['ceruminal', 'melanuric', 'numerical'], + 'numerist': ['numerist', 'terminus'], + 'numida': ['numida', 'unmaid'], + 'numidae': ['numidae', 'unaimed'], + 'nummi': ['mnium', 'nummi'], + 'nunciate': ['nunciate', 'uncinate'], + 'nuncio': ['nuncio', 'uncoin'], + 'nuncioship': ['nuncioship', 'pincushion'], + 'nunki': ['nunki', 'unkin'], + 'nunlet': ['nunlet', 'tunnel', 'unlent'], + 'nunlike': ['nunlike', 'unliken'], + 'nunnated': ['nunnated', 'untanned'], + 'nunni': ['nunni', 'uninn'], + 'nuptial': ['nuptial', 'unplait'], + 'nurse': ['nurse', 'resun'], + 'nusfiah': ['faunish', 'nusfiah'], + 'nut': ['nut', 'tun'], + 'nutarian': ['nutarian', 'turanian'], + 'nutate': ['attune', 'nutate', 'tauten'], + 'nutgall': ['gallnut', 'nutgall'], + 'nuthatch': ['nuthatch', 'unthatch'], + 'nutlike': ['nutlike', 'tunlike'], + 'nutmeg': ['gnetum', 'nutmeg'], + 'nutramin': ['nutramin', 'ruminant'], + 'nutrice': ['nutrice', 'teucrin'], + 'nycteridae': ['encyrtidae', 'nycteridae'], + 'nycterine': ['nycterine', 'renitency'], + 'nycteris': ['nycteris', 'stycerin'], + 'nycturia': ['nycturia', 'tunicary'], + 'nye': ['eyn', 'nye', 'yen'], + 'nylast': ['nylast', 'stanly'], + 'nylon': ['nonly', 'nonyl', 'nylon'], + 'nymphalidae': ['lymphadenia', 'nymphalidae'], + 'nyroca': ['canroy', 'crayon', 'cyrano', 'nyroca'], + 'nystagmic': ['gymnastic', 'nystagmic'], + 'oak': ['ako', 'koa', 'oak', 'oka'], + 'oaky': ['kayo', 'oaky'], + 'oam': ['mao', 'oam'], + 'oannes': ['nosean', 'oannes'], + 'oar': ['aro', 'oar', 'ora'], + 'oared': ['adore', 'oared', 'oread'], + 'oaric': ['cairo', 'oaric'], + 'oaritis': ['isotria', 'oaritis'], + 'oarium': ['mariou', 'oarium'], + 'oarless': ['lassoer', 'oarless', 'rosales'], + 'oarman': ['oarman', 'ramona'], + 'oasal': ['alosa', 'loasa', 'oasal'], + 'oasis': ['oasis', 'sosia'], + 'oast': ['oast', 'stoa', 'taos'], + 'oat': ['oat', 'tao', 'toa'], + 'oatbin': ['batino', 'oatbin', 'obtain'], + 'oaten': ['atone', 'oaten'], + 'oatlike': ['keitloa', 'oatlike'], + 'obclude': ['becloud', 'obclude'], + 'obeah': ['bahoe', 'bohea', 'obeah'], + 'obeisant': ['obeisant', 'sabotine'], + 'obelial': ['bolelia', 'lobelia', 'obelial'], + 'obeliscal': ['escobilla', 'obeliscal'], + 'obelus': ['besoul', 'blouse', 'obelus'], + 'oberon': ['borneo', 'oberon'], + 'obi': ['ibo', 'obi'], + 'obispo': ['boopis', 'obispo'], + 'obit': ['bito', 'obit'], + 'objectative': ['objectative', 'objectivate'], + 'objectivate': ['objectative', 'objectivate'], + 'oblate': ['lobate', 'oblate'], + 'oblately': ['lobately', 'oblately'], + 'oblation': ['boltonia', 'lobation', 'oblation'], + 'obligant': ['bloating', 'obligant'], + 'obliviality': ['obliviality', 'violability'], + 'obol': ['bolo', 'bool', 'lobo', 'obol'], + 'obscurant': ['obscurant', 'subcantor'], + 'obscurantic': ['obscurantic', 'subnarcotic'], + 'obscurantist': ['obscurantist', 'substraction'], + 'obscure': ['bescour', 'buceros', 'obscure'], + 'obscurer': ['crebrous', 'obscurer'], + 'obsecrate': ['bracteose', 'obsecrate'], + 'observe': ['observe', 'obverse', 'verbose'], + 'obsessor': ['berossos', 'obsessor'], + 'obstinate': ['bastionet', 'obstinate'], + 'obtain': ['batino', 'oatbin', 'obtain'], + 'obtainal': ['ablation', 'obtainal'], + 'obtainer': ['abrotine', 'baritone', 'obtainer', 'reobtain'], + 'obtrude': ['doubter', 'obtrude', 'outbred', 'redoubt'], + 'obtruncation': ['conturbation', 'obtruncation'], + 'obturate': ['obturate', 'tabouret'], + 'obverse': ['observe', 'obverse', 'verbose'], + 'obversely': ['obversely', 'verbosely'], + 'ocarina': ['aaronic', 'nicarao', 'ocarina'], + 'occasioner': ['occasioner', 'reoccasion'], + 'occipitofrontal': ['frontooccipital', 'occipitofrontal'], + 'occipitotemporal': ['occipitotemporal', 'temporooccipital'], + 'occlusion': ['niccolous', 'occlusion'], + 'occurrent': ['cocurrent', 'occurrent', 'uncorrect'], + 'ocean': ['acone', 'canoe', 'ocean'], + 'oceanet': ['acetone', 'oceanet'], + 'oceanic': ['cocaine', 'oceanic'], + 'ocellar': ['collare', 'corella', 'ocellar'], + 'ocellate': ['collatee', 'ocellate'], + 'ocellated': ['decollate', 'ocellated'], + 'ocelli': ['collie', 'ocelli'], + 'och': ['cho', 'och'], + 'ocher': ['chore', 'ocher'], + 'ocherous': ['ocherous', 'ochreous'], + 'ochidore': ['choreoid', 'ochidore'], + 'ochlesis': ['helcosis', 'ochlesis'], + 'ochlesitic': ['cochleitis', 'ochlesitic'], + 'ochletic': ['helcotic', 'lochetic', 'ochletic'], + 'ochlocrat': ['colcothar', 'ochlocrat'], + 'ochrea': ['chorea', 'ochrea', 'rochea'], + 'ochreous': ['ocherous', 'ochreous'], + 'ochroid': ['choroid', 'ochroid'], + 'ochroma': ['amchoor', 'ochroma'], + 'ocht': ['coth', 'ocht'], + 'ocque': ['coque', 'ocque'], + 'ocreated': ['decorate', 'ocreated'], + 'octadic': ['cactoid', 'octadic'], + 'octaeteric': ['ecorticate', 'octaeteric'], + 'octakishexahedron': ['hexakisoctahedron', 'octakishexahedron'], + 'octan': ['acton', 'canto', 'octan'], + 'octandrian': ['dracontian', 'octandrian'], + 'octarius': ['cotarius', 'octarius', 'suctoria'], + 'octastrophic': ['octastrophic', 'postthoracic'], + 'octave': ['avocet', 'octave', 'vocate'], + 'octavian': ['octavian', 'octavina', 'vacation'], + 'octavina': ['octavian', 'octavina', 'vacation'], + 'octenary': ['enactory', 'octenary'], + 'octet': ['cotte', 'octet'], + 'octillion': ['cotillion', 'octillion'], + 'octine': ['eciton', 'noetic', 'notice', 'octine'], + 'octometer': ['octometer', 'rectotome', 'tocometer'], + 'octonal': ['coolant', 'octonal'], + 'octonare': ['coronate', 'octonare', 'otocrane'], + 'octonarius': ['acutorsion', 'octonarius'], + 'octoroon': ['coonroot', 'octoroon'], + 'octuple': ['couplet', 'octuple'], + 'ocularist': ['ocularist', 'suctorial'], + 'oculate': ['caulote', 'colutea', 'oculate'], + 'oculinid': ['lucinoid', 'oculinid'], + 'ocypete': ['ecotype', 'ocypete'], + 'od': ['do', 'od'], + 'oda': ['ado', 'dao', 'oda'], + 'odal': ['alod', 'dola', 'load', 'odal'], + 'odalman': ['mandola', 'odalman'], + 'odax': ['doxa', 'odax'], + 'odd': ['dod', 'odd'], + 'oddman': ['dodman', 'oddman'], + 'ode': ['doe', 'edo', 'ode'], + 'odel': ['dole', 'elod', 'lode', 'odel'], + 'odin': ['dion', 'nodi', 'odin'], + 'odinism': ['diosmin', 'odinism'], + 'odinite': ['edition', 'odinite', 'otidine', 'tineoid'], + 'odiometer': ['meteoroid', 'odiometer'], + 'odious': ['iodous', 'odious'], + 'odor': ['door', 'odor', 'oord', 'rood'], + 'odorant': ['donator', 'odorant', 'tornado'], + 'odored': ['doored', 'odored'], + 'odorless': ['doorless', 'odorless'], + 'ods': ['dos', 'ods', 'sod'], + 'odum': ['doum', 'moud', 'odum'], + 'odyl': ['loyd', 'odyl'], + 'odylist': ['odylist', 'styloid'], + 'oecanthus': ['ceanothus', 'oecanthus'], + 'oecist': ['cotise', 'oecist'], + 'oedipal': ['elapoid', 'oedipal'], + 'oenin': ['inone', 'oenin'], + 'oenocarpus': ['oenocarpus', 'uranoscope'], + 'oer': ['oer', 'ore', 'roe'], + 'oes': ['oes', 'ose', 'soe'], + 'oestrian': ['arsonite', 'asterion', 'oestrian', 'rosinate', 'serotina'], + 'oestrid': ['oestrid', 'steroid', 'storied'], + 'oestridae': ['oestridae', 'ostreidae', 'sorediate'], + 'oestrin': ['oestrin', 'tersion'], + 'oestriol': ['oestriol', 'rosolite'], + 'oestroid': ['oestroid', 'ordosite', 'ostreoid'], + 'oestrual': ['oestrual', 'rosulate'], + 'oestrum': ['oestrum', 'rosetum'], + 'oestrus': ['estrous', 'oestrus', 'sestuor', 'tussore'], + 'of': ['fo', 'of'], + 'ofer': ['fore', 'froe', 'ofer'], + 'offcast': ['castoff', 'offcast'], + 'offcut': ['cutoff', 'offcut'], + 'offender': ['offender', 'reoffend'], + 'offerer': ['offerer', 'reoffer'], + 'offlet': ['letoff', 'offlet'], + 'offset': ['offset', 'setoff'], + 'offuscate': ['offuscate', 'suffocate'], + 'offuscation': ['offuscation', 'suffocation'], + 'offward': ['drawoff', 'offward'], + 'ofo': ['foo', 'ofo'], + 'oft': ['fot', 'oft'], + 'oftens': ['oftens', 'soften'], + 'ofter': ['fetor', 'forte', 'ofter'], + 'oftly': ['lofty', 'oftly'], + 'og': ['go', 'og'], + 'ogam': ['goma', 'ogam'], + 'ogeed': ['geode', 'ogeed'], + 'ogle': ['egol', 'goel', 'loge', 'ogle', 'oleg'], + 'ogler': ['glore', 'ogler'], + 'ogpu': ['goup', 'ogpu', 'upgo'], + 'ogre': ['goer', 'gore', 'ogre'], + 'ogreism': ['ergoism', 'ogreism'], + 'ogtiern': ['ergotin', 'genitor', 'negrito', 'ogtiern', 'trigone'], + 'oh': ['ho', 'oh'], + 'ohm': ['mho', 'ohm'], + 'ohmage': ['homage', 'ohmage'], + 'ohmmeter': ['mhometer', 'ohmmeter'], + 'oilcan': ['alnico', 'cliona', 'oilcan'], + 'oilcup': ['oilcup', 'upcoil'], + 'oildom': ['moloid', 'oildom'], + 'oiler': ['oiler', 'oriel', 'reoil'], + 'oillet': ['elliot', 'oillet'], + 'oilman': ['monial', 'nomial', 'oilman'], + 'oilstone': ['leonotis', 'oilstone'], + 'oime': ['meio', 'oime'], + 'oinomania': ['oinomania', 'oniomania'], + 'oint': ['into', 'nito', 'oint', 'tino'], + 'oireachtas': ['oireachtas', 'theocrasia'], + 'ok': ['ko', 'ok'], + 'oka': ['ako', 'koa', 'oak', 'oka'], + 'oket': ['keto', 'oket', 'toke'], + 'oki': ['koi', 'oki'], + 'okie': ['ekoi', 'okie'], + 'okra': ['karo', 'kora', 'okra', 'roka'], + 'olaf': ['foal', 'loaf', 'olaf'], + 'olam': ['loam', 'loma', 'malo', 'mola', 'olam'], + 'olamic': ['colima', 'olamic'], + 'olcha': ['chola', 'loach', 'olcha'], + 'olchi': ['choil', 'choli', 'olchi'], + 'old': ['dol', 'lod', 'old'], + 'older': ['lored', 'older'], + 'oldhamite': ['ethmoidal', 'oldhamite'], + 'ole': ['leo', 'ole'], + 'olea': ['aloe', 'olea'], + 'olecranal': ['lanceolar', 'olecranal'], + 'olecranoid': ['lecanoroid', 'olecranoid'], + 'olecranon': ['encoronal', 'olecranon'], + 'olefin': ['enfoil', 'olefin'], + 'oleg': ['egol', 'goel', 'loge', 'ogle', 'oleg'], + 'olein': ['enoil', 'ileon', 'olein'], + 'olena': ['alone', 'anole', 'olena'], + 'olenid': ['doline', 'indole', 'leonid', 'loined', 'olenid'], + 'olent': ['lento', 'olent'], + 'olenus': ['ensoul', 'olenus', 'unsole'], + 'oleosity': ['oleosity', 'otiosely'], + 'olga': ['gaol', 'goal', 'gola', 'olga'], + 'oliban': ['albino', 'albion', 'alboin', 'oliban'], + 'olibanum': ['olibanum', 'umbonial'], + 'olid': ['dilo', 'diol', 'doli', 'idol', 'olid'], + 'oligoclase': ['oligoclase', 'sociolegal'], + 'oligomyoid': ['idiomology', 'oligomyoid'], + 'oligonephria': ['oligonephria', 'oligophrenia'], + 'oligonephric': ['oligonephric', 'oligophrenic'], + 'oligophrenia': ['oligonephria', 'oligophrenia'], + 'oligophrenic': ['oligonephric', 'oligophrenic'], + 'oliprance': ['oliprance', 'porcelain'], + 'oliva': ['oliva', 'viola'], + 'olivaceous': ['olivaceous', 'violaceous'], + 'olive': ['olive', 'ovile', 'voile'], + 'olived': ['livedo', 'olived'], + 'oliver': ['oliver', 'violer', 'virole'], + 'olivescent': ['olivescent', 'violescent'], + 'olivet': ['olivet', 'violet'], + 'olivetan': ['olivetan', 'velation'], + 'olivette': ['olivette', 'violette'], + 'olivine': ['olivine', 'violine'], + 'olla': ['lalo', 'lola', 'olla'], + 'olof': ['fool', 'loof', 'olof'], + 'olonets': ['enstool', 'olonets'], + 'olor': ['loro', 'olor', 'orlo', 'rool'], + 'olpe': ['lope', 'olpe', 'pole'], + 'olson': ['olson', 'solon'], + 'olympian': ['olympian', 'polymnia'], + 'om': ['mo', 'om'], + 'omaha': ['haoma', 'omaha'], + 'oman': ['mano', 'moan', 'mona', 'noam', 'noma', 'oman'], + 'omani': ['amino', 'inoma', 'naomi', 'omani', 'omina'], + 'omar': ['amor', 'maro', 'mora', 'omar', 'roam'], + 'omasitis': ['amitosis', 'omasitis'], + 'omber': ['brome', 'omber'], + 'omelet': ['omelet', 'telome'], + 'omen': ['mone', 'nome', 'omen'], + 'omened': ['endome', 'omened'], + 'omental': ['omental', 'telamon'], + 'omentotomy': ['entomotomy', 'omentotomy'], + 'omer': ['mero', 'more', 'omer', 'rome'], + 'omicron': ['moronic', 'omicron'], + 'omina': ['amino', 'inoma', 'naomi', 'omani', 'omina'], + 'ominous': ['mousoni', 'ominous'], + 'omit': ['itmo', 'moit', 'omit', 'timo'], + 'omitis': ['itoism', 'omitis'], + 'omniana': ['nanaimo', 'omniana'], + 'omniarch': ['choirman', 'harmonic', 'omniarch'], + 'omnigerent': ['ignorement', 'omnigerent'], + 'omnilegent': ['eloignment', 'omnilegent'], + 'omnimeter': ['minometer', 'omnimeter'], + 'omnimodous': ['monosodium', 'omnimodous', 'onosmodium'], + 'omnist': ['inmost', 'monist', 'omnist'], + 'omnitenent': ['intonement', 'omnitenent'], + 'omphalogenous': ['megalophonous', 'omphalogenous'], + 'on': ['no', 'on'], + 'ona': ['noa', 'ona'], + 'onager': ['onager', 'orange'], + 'onagra': ['agroan', 'angora', 'anogra', 'arango', 'argoan', 'onagra'], + 'onan': ['anon', 'nona', 'onan'], + 'onanism': ['mansion', 'onanism'], + 'onanistic': ['anconitis', 'antiscion', 'onanistic'], + 'onca': ['coan', 'onca'], + 'once': ['cone', 'once'], + 'oncetta': ['oncetta', 'tectona'], + 'onchidiidae': ['chionididae', 'onchidiidae'], + 'oncia': ['acoin', 'oncia'], + 'oncidium': ['conidium', 'mucinoid', 'oncidium'], + 'oncin': ['conin', 'nonic', 'oncin'], + 'oncometric': ['necrotomic', 'oncometric'], + 'oncometry': ['necrotomy', 'normocyte', 'oncometry'], + 'oncoming': ['gnomonic', 'oncoming'], + 'oncosimeter': ['oncosimeter', 'semicoronet'], + 'oncost': ['nostoc', 'oncost'], + 'ondagram': ['dragoman', 'garamond', 'ondagram'], + 'ondameter': ['emendator', 'ondameter'], + 'ondatra': ['adorant', 'ondatra'], + 'ondine': ['donnie', 'indone', 'ondine'], + 'ondy': ['ondy', 'yond'], + 'one': ['eon', 'neo', 'one'], + 'oneida': ['daoine', 'oneida'], + 'oneiric': ['ironice', 'oneiric'], + 'oneism': ['eonism', 'mesion', 'oneism', 'simeon'], + 'oneness': ['oneness', 'senones'], + 'oner': ['oner', 'rone'], + 'onery': ['eryon', 'onery'], + 'oniomania': ['oinomania', 'oniomania'], + 'oniomaniac': ['iconomania', 'oniomaniac'], + 'oniscidae': ['oniscidae', 'oscinidae', 'sciaenoid'], + 'onisciform': ['onisciform', 'somnorific'], + 'oniscoidea': ['iodocasein', 'oniscoidea'], + 'onkos': ['onkos', 'snook'], + 'onlepy': ['onlepy', 'openly'], + 'onliest': ['leonist', 'onliest'], + 'only': ['lyon', 'only'], + 'onmarch': ['monarch', 'nomarch', 'onmarch'], + 'onosmodium': ['monosodium', 'omnimodous', 'onosmodium'], + 'ons': ['ons', 'son'], + 'onset': ['onset', 'seton', 'steno', 'stone'], + 'onshore': ['onshore', 'sorehon'], + 'onside': ['deinos', 'donsie', 'inodes', 'onside'], + 'onsight': ['hosting', 'onsight'], + 'ontal': ['notal', 'ontal', 'talon', 'tolan', 'tonal'], + 'ontaric': ['anticor', 'carotin', 'cortina', 'ontaric'], + 'onto': ['onto', 'oont', 'toon'], + 'ontogenal': ['nonlegato', 'ontogenal'], + 'ontological': ['ontological', 'tonological'], + 'ontologism': ['monologist', 'nomologist', 'ontologism'], + 'ontology': ['ontology', 'tonology'], + 'onus': ['nosu', 'nous', 'onus'], + 'onymal': ['amylon', 'onymal'], + 'onymatic': ['cymation', 'myatonic', 'onymatic'], + 'onza': ['azon', 'onza', 'ozan'], + 'oocyte': ['coyote', 'oocyte'], + 'oodles': ['dolose', 'oodles', 'soodle'], + 'ooid': ['iodo', 'ooid'], + 'oolak': ['lokao', 'oolak'], + 'oolite': ['lootie', 'oolite'], + 'oometer': ['moreote', 'oometer'], + 'oons': ['oons', 'soon'], + 'oont': ['onto', 'oont', 'toon'], + 'oopak': ['oopak', 'pooka'], + 'oord': ['door', 'odor', 'oord', 'rood'], + 'opacate': ['opacate', 'peacoat'], + 'opacite': ['ectopia', 'opacite'], + 'opah': ['opah', 'paho', 'poha'], + 'opal': ['alop', 'opal'], + 'opalina': ['opalina', 'pianola'], + 'opalinine': ['opalinine', 'pleionian'], + 'opalize': ['epizoal', 'lopezia', 'opalize'], + 'opata': ['opata', 'patao', 'tapoa'], + 'opdalite': ['opdalite', 'petaloid'], + 'ope': ['ope', 'poe'], + 'opelet': ['eelpot', 'opelet'], + 'open': ['nope', 'open', 'peon', 'pone'], + 'opencast': ['capstone', 'opencast'], + 'opener': ['opener', 'reopen', 'repone'], + 'openly': ['onlepy', 'openly'], + 'openside': ['disponee', 'openside'], + 'operable': ['operable', 'ropeable'], + 'operae': ['aerope', 'operae'], + 'operant': ['operant', 'pronate', 'protean'], + 'operatic': ['aporetic', 'capriote', 'operatic'], + 'operatical': ['aporetical', 'operatical'], + 'operating': ['operating', 'pignorate'], + 'operatrix': ['expirator', 'operatrix'], + 'opercular': ['opercular', 'preocular'], + 'ophidion': ['ophidion', 'ophionid'], + 'ophionid': ['ophidion', 'ophionid'], + 'ophism': ['mopish', 'ophism'], + 'ophite': ['ethiop', 'ophite', 'peitho'], + 'opinant': ['opinant', 'pintano'], + 'opinator': ['opinator', 'tropaion'], + 'opiner': ['opiner', 'orpine', 'ponier'], + 'opiniaster': ['opiniaster', 'opiniastre'], + 'opiniastre': ['opiniaster', 'opiniastre'], + 'opiniatrety': ['opiniatrety', 'petitionary'], + 'opisometer': ['opisometer', 'opsiometer'], + 'opisthenar': ['opisthenar', 'spheration'], + 'opisthorchis': ['chirosophist', 'opisthorchis'], + 'oppian': ['oppian', 'papion', 'popian'], + 'opposer': ['opposer', 'propose'], + 'oppugn': ['oppugn', 'popgun'], + 'opsiometer': ['opisometer', 'opsiometer'], + 'opsonic': ['opsonic', 'pocosin'], + 'opsy': ['opsy', 'posy'], + 'opt': ['opt', 'pot', 'top'], + 'optable': ['optable', 'potable'], + 'optableness': ['optableness', 'potableness'], + 'optate': ['aptote', 'optate', 'potate', 'teapot'], + 'optation': ['optation', 'potation'], + 'optative': ['optative', 'potative'], + 'optic': ['optic', 'picot', 'topic'], + 'optical': ['capitol', 'coalpit', 'optical', 'topical'], + 'optically': ['optically', 'topically'], + 'optics': ['copist', 'coptis', 'optics', 'postic'], + 'optimal': ['optimal', 'palmito'], + 'option': ['option', 'potion'], + 'optional': ['antipolo', 'antipool', 'optional'], + 'optography': ['optography', 'topography'], + 'optological': ['optological', 'topological'], + 'optologist': ['optologist', 'topologist'], + 'optology': ['optology', 'topology'], + 'optometer': ['optometer', 'potometer'], + 'optophone': ['optophone', 'topophone'], + 'optotype': ['optotype', 'topotype'], + 'opulaster': ['opulaster', 'sportulae', 'sporulate'], + 'opulus': ['lupous', 'opulus'], + 'opuntia': ['opuntia', 'utopian'], + 'opus': ['opus', 'soup'], + 'opuscular': ['crapulous', 'opuscular'], + 'or': ['or', 'ro'], + 'ora': ['aro', 'oar', 'ora'], + 'orach': ['achor', 'chora', 'corah', 'orach', 'roach'], + 'oracle': ['carole', 'coaler', 'coelar', 'oracle', 'recoal'], + 'orad': ['dora', 'orad', 'road'], + 'oral': ['lora', 'oral'], + 'oralist': ['aristol', 'oralist', 'ortalis', 'striola'], + 'orality': ['orality', 'tailory'], + 'orang': ['angor', + 'argon', + 'goran', + 'grano', + 'groan', + 'nagor', + 'orang', + 'organ', + 'rogan', + 'ronga'], + 'orange': ['onager', 'orange'], + 'orangeist': ['goniaster', 'orangeist'], + 'oranger': ['groaner', 'oranger', 'organer'], + 'orangism': ['orangism', 'organism', 'sinogram'], + 'orangist': ['orangist', 'organist', 'roasting', 'signator'], + 'orangize': ['agonizer', 'orangize', 'organize'], + 'orant': ['orant', 'rotan', 'toran', 'trona'], + 'oraon': ['aroon', 'oraon'], + 'oratress': ['assertor', 'assorter', 'oratress', 'reassort'], + 'orb': ['bor', 'orb', 'rob'], + 'orbed': ['boder', 'orbed'], + 'orbic': ['boric', 'cribo', 'orbic'], + 'orbicle': ['bricole', 'corbeil', 'orbicle'], + 'orbicular': ['courbaril', 'orbicular'], + 'orbitale': ['betailor', 'laborite', 'orbitale'], + 'orbitelar': ['liberator', 'orbitelar'], + 'orbitelarian': ['irrationable', 'orbitelarian'], + 'orbitofrontal': ['frontoorbital', 'orbitofrontal'], + 'orbitonasal': ['nasoorbital', 'orbitonasal'], + 'orblet': ['bolter', 'orblet', 'reblot', 'rebolt'], + 'orbulina': ['orbulina', 'unilobar'], + 'orc': ['cor', 'cro', 'orc', 'roc'], + 'orca': ['acor', 'caro', 'cora', 'orca'], + 'orcadian': ['nocardia', 'orcadian'], + 'orcanet': ['enactor', 'necator', 'orcanet'], + 'orcein': ['cerion', 'coiner', 'neroic', 'orcein', 'recoin'], + 'orchat': ['cathro', 'orchat'], + 'orchel': ['chlore', 'choler', 'orchel'], + 'orchester': ['orchester', 'orchestre'], + 'orchestre': ['orchester', 'orchestre'], + 'orchic': ['choric', 'orchic'], + 'orchid': ['orchid', 'rhodic'], + 'orchidist': ['chorditis', 'orchidist'], + 'orchiocele': ['choriocele', 'orchiocele'], + 'orchitis': ['historic', 'orchitis'], + 'orcin': ['corin', 'noric', 'orcin'], + 'orcinol': ['colorin', 'orcinol'], + 'ordain': ['dorian', 'inroad', 'ordain'], + 'ordainer': ['inroader', 'ordainer', 'reordain'], + 'ordainment': ['antimodern', 'ordainment'], + 'ordanchite': ['achondrite', 'ditrochean', 'ordanchite'], + 'ordeal': ['loader', 'ordeal', 'reload'], + 'orderer': ['orderer', 'reorder'], + 'ordinable': ['bolderian', 'ordinable'], + 'ordinal': ['nailrod', 'ordinal', 'rinaldo', 'rodinal'], + 'ordinance': ['cerdonian', 'ordinance'], + 'ordinate': ['andorite', 'nadorite', 'ordinate', 'rodentia'], + 'ordinative': ['derivation', 'ordinative'], + 'ordinator': ['ordinator', 'radiotron'], + 'ordines': ['indorse', 'ordines', 'siredon', 'sordine'], + 'ordosite': ['oestroid', 'ordosite', 'ostreoid'], + 'ordu': ['dour', 'duro', 'ordu', 'roud'], + 'ore': ['oer', 'ore', 'roe'], + 'oread': ['adore', 'oared', 'oread'], + 'oreas': ['arose', 'oreas'], + 'orectic': ['cerotic', 'orectic'], + 'oreman': ['enamor', 'monera', 'oreman', 'romane'], + 'orenda': ['denaro', 'orenda'], + 'orendite': ['enteroid', 'orendite'], + 'orestean': ['orestean', 'resonate', 'stearone'], + 'orf': ['for', 'fro', 'orf'], + 'organ': ['angor', + 'argon', + 'goran', + 'grano', + 'groan', + 'nagor', + 'orang', + 'organ', + 'rogan', + 'ronga'], + 'organal': ['angolar', 'organal'], + 'organer': ['groaner', 'oranger', 'organer'], + 'organicism': ['organicism', 'organismic'], + 'organicist': ['organicist', 'organistic'], + 'organing': ['groaning', 'organing'], + 'organism': ['orangism', 'organism', 'sinogram'], + 'organismic': ['organicism', 'organismic'], + 'organist': ['orangist', 'organist', 'roasting', 'signator'], + 'organistic': ['organicist', 'organistic'], + 'organity': ['gyration', 'organity', 'ortygian'], + 'organize': ['agonizer', 'orangize', 'organize'], + 'organized': ['dragonize', 'organized'], + 'organoid': ['gordonia', 'organoid', 'rigadoon'], + 'organonymic': ['craniognomy', 'organonymic'], + 'organotin': ['gortonian', 'organotin'], + 'organule': ['lagunero', 'organule', 'uroglena'], + 'orgiasm': ['isogram', 'orgiasm'], + 'orgiast': ['agistor', 'agrotis', 'orgiast'], + 'orgic': ['corgi', 'goric', 'orgic'], + 'orgue': ['orgue', 'rogue', 'rouge'], + 'orgy': ['gory', 'gyro', 'orgy'], + 'oriel': ['oiler', 'oriel', 'reoil'], + 'orient': ['norite', 'orient'], + 'oriental': ['oriental', 'relation', 'tirolean'], + 'orientalism': ['misrelation', 'orientalism', 'relationism'], + 'orientalist': ['orientalist', 'relationist'], + 'orientate': ['anoterite', 'orientate'], + 'origanum': ['mirounga', 'moringua', 'origanum'], + 'origin': ['nigori', 'origin'], + 'orle': ['lore', 'orle', 'role'], + 'orlean': ['lenora', 'loaner', 'orlean', 'reloan'], + 'orleanist': ['lairstone', 'orleanist', 'serotinal'], + 'orleanistic': ['intersocial', 'orleanistic', 'sclerotinia'], + 'orlet': ['lerot', 'orlet', 'relot'], + 'orlo': ['loro', 'olor', 'orlo', 'rool'], + 'orna': ['nora', 'orna', 'roan'], + 'ornamenter': ['ornamenter', 'reornament'], + 'ornate': ['atoner', 'norate', 'ornate'], + 'ornately': ['neolatry', 'ornately', 'tyrolean'], + 'ornation': ['noration', 'ornation', 'orotinan'], + 'ornis': ['ornis', 'rosin'], + 'orniscopic': ['orniscopic', 'scorpionic'], + 'ornithomantic': ['ornithomantic', 'orthantimonic'], + 'ornithoptera': ['ornithoptera', 'prototherian'], + 'orogenetic': ['erotogenic', 'geocronite', 'orogenetic'], + 'orographical': ['colporrhagia', 'orographical'], + 'orography': ['gyrophora', 'orography'], + 'orotinan': ['noration', 'ornation', 'orotinan'], + 'orotund': ['orotund', 'rotundo'], + 'orphanism': ['manorship', 'orphanism'], + 'orpheon': ['orpheon', 'phorone'], + 'orpheus': ['ephorus', 'orpheus', 'upshore'], + 'orphical': ['orphical', 'rhopalic'], + 'orphism': ['orphism', 'rompish'], + 'orphize': ['orphize', 'phiroze'], + 'orpine': ['opiner', 'orpine', 'ponier'], + 'orsel': ['loser', 'orsel', 'rosel', 'soler'], + 'orselle': ['orselle', 'roselle'], + 'ort': ['ort', 'rot', 'tor'], + 'ortalid': ['dilator', 'ortalid'], + 'ortalis': ['aristol', 'oralist', 'ortalis', 'striola'], + 'ortet': ['ortet', 'otter', 'toter'], + 'orthal': ['harlot', 'orthal', 'thoral'], + 'orthantimonic': ['ornithomantic', 'orthantimonic'], + 'orthian': ['orthian', 'thorina'], + 'orthic': ['chorti', 'orthic', 'thoric', 'trochi'], + 'orthite': ['hortite', 'orthite', 'thorite'], + 'ortho': ['ortho', 'thoro'], + 'orthodromy': ['hydromotor', 'orthodromy'], + 'orthogamy': ['orthogamy', 'othygroma'], + 'orthogonial': ['orthogonial', 'orthologian'], + 'orthologian': ['orthogonial', 'orthologian'], + 'orthose': ['orthose', 'reshoot', 'shooter', 'soother'], + 'ortiga': ['agrito', 'ortiga'], + 'ortstein': ['ortstein', 'tenorist'], + 'ortygian': ['gyration', 'organity', 'ortygian'], + 'ortygine': ['genitory', 'ortygine'], + 'ory': ['ory', 'roy', 'yor'], + 'oryx': ['oryx', 'roxy'], + 'os': ['os', 'so'], + 'osamin': ['monias', 'osamin', 'osmina'], + 'osamine': ['monesia', 'osamine', 'osmanie'], + 'osc': ['cos', 'osc', 'soc'], + 'oscan': ['ascon', 'canso', 'oscan'], + 'oscar': ['arcos', 'crosa', 'oscar', 'sacro'], + 'oscella': ['callose', 'oscella'], + 'oscheal': ['oscheal', 'scholae'], + 'oscillance': ['clinoclase', 'oscillance'], + 'oscillaria': ['iliosacral', 'oscillaria'], + 'oscillation': ['colonialist', 'oscillation'], + 'oscin': ['oscin', 'scion', 'sonic'], + 'oscine': ['cosine', 'oscine'], + 'oscines': ['cession', 'oscines'], + 'oscinian': ['oscinian', 'socinian'], + 'oscinidae': ['oniscidae', 'oscinidae', 'sciaenoid'], + 'oscitant': ['actinost', 'oscitant'], + 'oscular': ['carolus', 'oscular'], + 'osculate': ['lacteous', 'osculate'], + 'osculatory': ['cotylosaur', 'osculatory'], + 'oscule': ['coleus', 'oscule'], + 'ose': ['oes', 'ose', 'soe'], + 'osela': ['alose', 'osela', 'solea'], + 'oshac': ['chaos', 'oshac'], + 'oside': ['diose', 'idose', 'oside'], + 'osier': ['osier', 'serio'], + 'osirian': ['nosairi', 'osirian'], + 'osiride': ['isidore', 'osiride'], + 'oskar': ['krosa', 'oskar'], + 'osmanie': ['monesia', 'osamine', 'osmanie'], + 'osmanli': ['malison', 'manolis', 'osmanli', 'somnial'], + 'osmatic': ['atomics', 'catoism', 'cosmati', 'osmatic', 'somatic'], + 'osmatism': ['osmatism', 'somatism'], + 'osmerus': ['osmerus', 'smouser'], + 'osmin': ['minos', 'osmin', 'simon'], + 'osmina': ['monias', 'osamin', 'osmina'], + 'osmiridium': ['iridosmium', 'osmiridium'], + 'osmogene': ['gonesome', 'osmogene'], + 'osmometer': ['merostome', 'osmometer'], + 'osmometric': ['microstome', 'osmometric'], + 'osmophore': ['osmophore', 'sophomore'], + 'osmotactic': ['osmotactic', 'scotomatic'], + 'osmunda': ['damnous', 'osmunda'], + 'osone': ['noose', 'osone'], + 'osprey': ['eryops', 'osprey'], + 'ossal': ['lasso', 'ossal'], + 'ossein': ['essoin', 'ossein'], + 'osselet': ['osselet', 'sestole', 'toeless'], + 'ossetian': ['assiento', 'ossetian'], + 'ossetine': ['essonite', 'ossetine'], + 'ossicle': ['loessic', 'ossicle'], + 'ossiculate': ['acleistous', 'ossiculate'], + 'ossicule': ['coulisse', 'leucosis', 'ossicule'], + 'ossuary': ['ossuary', 'suasory'], + 'ostara': ['aroast', 'ostara'], + 'osteal': ['lotase', 'osteal', 'solate', 'stolae', 'talose'], + 'ostearthritis': ['arthrosteitis', 'ostearthritis'], + 'ostectomy': ['cystotome', 'cytostome', 'ostectomy'], + 'ostein': ['nesiot', 'ostein'], + 'ostemia': ['miaotse', 'ostemia'], + 'ostent': ['ostent', 'teston'], + 'ostentation': ['ostentation', 'tionontates'], + 'ostentous': ['ostentous', 'sostenuto'], + 'osteometric': ['osteometric', 'stereotomic'], + 'osteometrical': ['osteometrical', 'stereotomical'], + 'osteometry': ['osteometry', 'stereotomy'], + 'ostic': ['ostic', 'sciot', 'stoic'], + 'ostmen': ['montes', 'ostmen'], + 'ostracea': ['ceratosa', 'ostracea'], + 'ostracean': ['ostracean', 'socratean'], + 'ostracine': ['atroscine', 'certosina', 'ostracine', 'tinoceras', 'tricosane'], + 'ostracism': ['ostracism', 'socratism'], + 'ostracize': ['ostracize', 'socratize'], + 'ostracon': ['ostracon', 'socotran'], + 'ostraite': ['astroite', 'ostraite', 'storiate'], + 'ostreidae': ['oestridae', 'ostreidae', 'sorediate'], + 'ostreiform': ['forritsome', 'ostreiform'], + 'ostreoid': ['oestroid', 'ordosite', 'ostreoid'], + 'ostrich': ['chorist', 'ostrich'], + 'oswald': ['dowlas', 'oswald'], + 'otalgy': ['goatly', 'otalgy'], + 'otaria': ['atorai', 'otaria'], + 'otarian': ['aration', 'otarian'], + 'otarine': ['otarine', 'torenia'], + 'other': ['other', 'thore', 'throe', 'toher'], + 'otherism': ['homerist', 'isotherm', 'otherism', 'theorism'], + 'otherist': ['otherist', 'theorist'], + 'othygroma': ['orthogamy', 'othygroma'], + 'otiant': ['otiant', 'titano'], + 'otidae': ['idotea', 'iodate', 'otidae'], + 'otidine': ['edition', 'odinite', 'otidine', 'tineoid'], + 'otiosely': ['oleosity', 'otiosely'], + 'otitis': ['itoist', 'otitis'], + 'oto': ['oto', 'too'], + 'otocephalic': ['hepatocolic', 'otocephalic'], + 'otocrane': ['coronate', 'octonare', 'otocrane'], + 'otogenic': ['geotonic', 'otogenic'], + 'otomian': ['amotion', 'otomian'], + 'otomyces': ['cytosome', 'otomyces'], + 'ottar': ['ottar', 'tarot', 'torta', 'troat'], + 'otter': ['ortet', 'otter', 'toter'], + 'otto': ['otto', 'toot', 'toto'], + 'otus': ['otus', 'oust', 'suto'], + 'otyak': ['otyak', 'tokay'], + 'ouch': ['chou', 'ouch'], + 'ouf': ['fou', 'ouf'], + 'ough': ['hugo', 'ough'], + 'ought': ['ought', 'tough'], + 'oughtness': ['oughtness', 'toughness'], + 'ounds': ['nodus', 'ounds', 'sound'], + 'our': ['our', 'uro'], + 'ours': ['ours', 'sour'], + 'oust': ['otus', 'oust', 'suto'], + 'ouster': ['ouster', 'souter', 'touser', 'trouse'], + 'out': ['out', 'tou'], + 'outarde': ['outarde', 'outdare', 'outread'], + 'outban': ['outban', 'unboat'], + 'outbar': ['outbar', 'rubato', 'tabour'], + 'outbeg': ['bouget', 'outbeg'], + 'outblow': ['blowout', 'outblow', 'outbowl'], + 'outblunder': ['outblunder', 'untroubled'], + 'outbowl': ['blowout', 'outblow', 'outbowl'], + 'outbreak': ['breakout', 'outbreak'], + 'outbred': ['doubter', 'obtrude', 'outbred', 'redoubt'], + 'outburn': ['burnout', 'outburn'], + 'outburst': ['outburst', 'subtutor'], + 'outbustle': ['outbustle', 'outsubtle'], + 'outcarol': ['outcarol', 'taurocol'], + 'outcarry': ['curatory', 'outcarry'], + 'outcase': ['acetous', 'outcase'], + 'outcharm': ['outcharm', 'outmarch'], + 'outcrier': ['courtier', 'outcrier'], + 'outcut': ['cutout', 'outcut'], + 'outdance': ['outdance', 'uncoated'], + 'outdare': ['outarde', 'outdare', 'outread'], + 'outdraw': ['drawout', 'outdraw', 'outward'], + 'outer': ['outer', 'outre', 'route'], + 'outerness': ['outerness', 'outreness'], + 'outferret': ['foreutter', 'outferret'], + 'outfit': ['fitout', 'outfit'], + 'outflare': ['fluorate', 'outflare'], + 'outfling': ['flouting', 'outfling'], + 'outfly': ['outfly', 'toyful'], + 'outgoer': ['outgoer', 'rougeot'], + 'outgrin': ['outgrin', 'outring', 'routing', 'touring'], + 'outhire': ['outhire', 'routhie'], + 'outhold': ['holdout', 'outhold'], + 'outkick': ['kickout', 'outkick'], + 'outlance': ['cleanout', 'outlance'], + 'outlay': ['layout', 'lutayo', 'outlay'], + 'outleap': ['outleap', 'outpeal'], + 'outler': ['elutor', 'louter', 'outler'], + 'outlet': ['outlet', 'tutelo'], + 'outline': ['elution', 'outline'], + 'outlinear': ['outlinear', 'uranolite'], + 'outlined': ['outlined', 'untoiled'], + 'outlook': ['lookout', 'outlook'], + 'outly': ['louty', 'outly'], + 'outman': ['amount', 'moutan', 'outman'], + 'outmarch': ['outcharm', 'outmarch'], + 'outmarry': ['mortuary', 'outmarry'], + 'outmaster': ['outmaster', 'outstream'], + 'outname': ['notaeum', 'outname'], + 'outpaint': ['outpaint', 'putation'], + 'outpass': ['outpass', 'passout'], + 'outpay': ['outpay', 'tapuyo'], + 'outpeal': ['outleap', 'outpeal'], + 'outpitch': ['outpitch', 'pitchout'], + 'outplace': ['copulate', 'outplace'], + 'outprice': ['eutropic', 'outprice'], + 'outpromise': ['outpromise', 'peritomous'], + 'outrance': ['cornuate', 'courante', 'cuneator', 'outrance'], + 'outrate': ['outrate', 'outtear', 'torteau'], + 'outre': ['outer', 'outre', 'route'], + 'outread': ['outarde', 'outdare', 'outread'], + 'outremer': ['outremer', 'urometer'], + 'outreness': ['outerness', 'outreness'], + 'outring': ['outgrin', 'outring', 'routing', 'touring'], + 'outrun': ['outrun', 'runout'], + 'outsaint': ['outsaint', 'titanous'], + 'outscream': ['castoreum', 'outscream'], + 'outsell': ['outsell', 'sellout'], + 'outset': ['outset', 'setout'], + 'outshake': ['outshake', 'shakeout'], + 'outshape': ['outshape', 'taphouse'], + 'outshine': ['outshine', 'tinhouse'], + 'outshut': ['outshut', 'shutout'], + 'outside': ['outside', 'tedious'], + 'outsideness': ['outsideness', 'tediousness'], + 'outsigh': ['goutish', 'outsigh'], + 'outsin': ['outsin', 'ustion'], + 'outslide': ['outslide', 'solitude'], + 'outsnore': ['outsnore', 'urosteon'], + 'outsoler': ['outsoler', 'torulose'], + 'outspend': ['outspend', 'unposted'], + 'outspit': ['outspit', 'utopist'], + 'outspring': ['outspring', 'sprouting'], + 'outspurn': ['outspurn', 'portunus'], + 'outstair': ['outstair', 'ratitous'], + 'outstand': ['outstand', 'standout'], + 'outstate': ['outstate', 'outtaste'], + 'outstream': ['outmaster', 'outstream'], + 'outstreet': ['outstreet', 'tetterous'], + 'outsubtle': ['outbustle', 'outsubtle'], + 'outtaste': ['outstate', 'outtaste'], + 'outtear': ['outrate', 'outtear', 'torteau'], + 'outthrough': ['outthrough', 'throughout'], + 'outthrow': ['outthrow', 'outworth', 'throwout'], + 'outtrail': ['outtrail', 'tutorial'], + 'outturn': ['outturn', 'turnout'], + 'outturned': ['outturned', 'untutored'], + 'outwalk': ['outwalk', 'walkout'], + 'outward': ['drawout', 'outdraw', 'outward'], + 'outwash': ['outwash', 'washout'], + 'outwatch': ['outwatch', 'watchout'], + 'outwith': ['outwith', 'without'], + 'outwork': ['outwork', 'workout'], + 'outworth': ['outthrow', 'outworth', 'throwout'], + 'ova': ['avo', 'ova'], + 'ovaloid': ['ovaloid', 'ovoidal'], + 'ovarial': ['ovarial', 'variola'], + 'ovariotubal': ['ovariotubal', 'tuboovarial'], + 'ovational': ['avolation', 'ovational'], + 'oven': ['nevo', 'oven'], + 'ovenly': ['lenvoy', 'ovenly'], + 'ovenpeel': ['envelope', 'ovenpeel'], + 'over': ['over', 'rove'], + 'overaction': ['overaction', 'revocation'], + 'overactive': ['overactive', 'revocative'], + 'overall': ['allover', 'overall'], + 'overblame': ['overblame', 'removable'], + 'overblow': ['overblow', 'overbowl'], + 'overboil': ['boilover', 'overboil'], + 'overbowl': ['overblow', 'overbowl'], + 'overbreak': ['breakover', 'overbreak'], + 'overburden': ['overburden', 'overburned'], + 'overburn': ['burnover', 'overburn'], + 'overburned': ['overburden', 'overburned'], + 'overcall': ['overcall', 'vocaller'], + 'overcare': ['overcare', 'overrace'], + 'overcirculate': ['overcirculate', 'uterocervical'], + 'overcoat': ['evocator', 'overcoat'], + 'overcross': ['crossover', 'overcross'], + 'overcup': ['overcup', 'upcover'], + 'overcurious': ['erucivorous', 'overcurious'], + 'overcurtain': ['countervair', 'overcurtain', 'recurvation'], + 'overcut': ['cutover', 'overcut'], + 'overdamn': ['overdamn', 'ravendom'], + 'overdare': ['overdare', 'overdear', 'overread'], + 'overdeal': ['overdeal', 'overlade', 'overlead'], + 'overdear': ['overdare', 'overdear', 'overread'], + 'overdraw': ['overdraw', 'overward'], + 'overdrawer': ['overdrawer', 'overreward'], + 'overdrip': ['overdrip', 'provider'], + 'overdure': ['devourer', 'overdure', 'overrude'], + 'overdust': ['overdust', 'overstud'], + 'overedit': ['overedit', 'overtide'], + 'overfar': ['favorer', 'overfar', 'refavor'], + 'overfile': ['forelive', 'overfile'], + 'overfilm': ['overfilm', 'veliform'], + 'overflower': ['overflower', 'reoverflow'], + 'overforce': ['forecover', 'overforce'], + 'overgaiter': ['overgaiter', 'revigorate'], + 'overglint': ['overglint', 'revolting'], + 'overgo': ['groove', 'overgo'], + 'overgrain': ['granivore', 'overgrain'], + 'overhate': ['overhate', 'overheat'], + 'overheat': ['overhate', 'overheat'], + 'overheld': ['overheld', 'verdelho'], + 'overidle': ['evildoer', 'overidle'], + 'overink': ['invoker', 'overink'], + 'overinsist': ['overinsist', 'versionist'], + 'overkeen': ['overkeen', 'overknee'], + 'overknee': ['overkeen', 'overknee'], + 'overlade': ['overdeal', 'overlade', 'overlead'], + 'overlast': ['overlast', 'oversalt'], + 'overlate': ['elevator', 'overlate'], + 'overlay': ['layover', 'overlay'], + 'overlead': ['overdeal', 'overlade', 'overlead'], + 'overlean': ['overlean', 'valerone'], + 'overleg': ['overleg', 'reglove'], + 'overlie': ['overlie', 'relievo'], + 'overling': ['lovering', 'overling'], + 'overlisten': ['overlisten', 'oversilent'], + 'overlive': ['overlive', 'overveil'], + 'overly': ['overly', 'volery'], + 'overmantel': ['overmantel', 'overmantle'], + 'overmantle': ['overmantel', 'overmantle'], + 'overmaster': ['overmaster', 'overstream'], + 'overmean': ['overmean', 'overname'], + 'overmerit': ['overmerit', 'overtimer'], + 'overname': ['overmean', 'overname'], + 'overneat': ['overneat', 'renovate'], + 'overnew': ['overnew', 'rewoven'], + 'overnigh': ['hovering', 'overnigh'], + 'overpaint': ['overpaint', 'pronative'], + 'overpass': ['overpass', 'passover'], + 'overpet': ['overpet', 'preveto', 'prevote'], + 'overpick': ['overpick', 'pickover'], + 'overplain': ['overplain', 'parvoline'], + 'overply': ['overply', 'plovery'], + 'overpointed': ['overpointed', 'predevotion'], + 'overpot': ['overpot', 'overtop'], + 'overrace': ['overcare', 'overrace'], + 'overrate': ['overrate', 'overtare'], + 'overread': ['overdare', 'overdear', 'overread'], + 'overreward': ['overdrawer', 'overreward'], + 'overrude': ['devourer', 'overdure', 'overrude'], + 'overrun': ['overrun', 'runover'], + 'oversad': ['oversad', 'savored'], + 'oversale': ['oversale', 'overseal'], + 'oversalt': ['overlast', 'oversalt'], + 'oversauciness': ['oversauciness', 'veraciousness'], + 'overseal': ['oversale', 'overseal'], + 'overseen': ['overseen', 'veronese'], + 'overset': ['overset', 'setover'], + 'oversilent': ['overlisten', 'oversilent'], + 'overslip': ['overslip', 'slipover'], + 'overspread': ['overspread', 'spreadover'], + 'overstain': ['overstain', 'servation', 'versation'], + 'overstir': ['overstir', 'servitor'], + 'overstrain': ['overstrain', 'traversion'], + 'overstream': ['overmaster', 'overstream'], + 'overstrew': ['overstrew', 'overwrest'], + 'overstud': ['overdust', 'overstud'], + 'overt': ['overt', 'rovet', 'torve', 'trove', 'voter'], + 'overtare': ['overrate', 'overtare'], + 'overthrow': ['overthrow', 'overwroth'], + 'overthwart': ['overthwart', 'thwartover'], + 'overtide': ['overedit', 'overtide'], + 'overtime': ['overtime', 'remotive'], + 'overtimer': ['overmerit', 'overtimer'], + 'overtip': ['overtip', 'pivoter'], + 'overtop': ['overpot', 'overtop'], + 'overtrade': ['overtrade', 'overtread'], + 'overtread': ['overtrade', 'overtread'], + 'overtrue': ['overtrue', 'overture', 'trouvere'], + 'overture': ['overtrue', 'overture', 'trouvere'], + 'overturn': ['overturn', 'turnover'], + 'overtwine': ['interwove', 'overtwine'], + 'overveil': ['overlive', 'overveil'], + 'overwalk': ['overwalk', 'walkover'], + 'overward': ['overdraw', 'overward'], + 'overwrest': ['overstrew', 'overwrest'], + 'overwroth': ['overthrow', 'overwroth'], + 'ovest': ['ovest', 'stove'], + 'ovidae': ['evodia', 'ovidae'], + 'ovidian': ['ovidian', 'vidonia'], + 'ovile': ['olive', 'ovile', 'voile'], + 'ovillus': ['ovillus', 'villous'], + 'oviparous': ['apivorous', 'oviparous'], + 'ovist': ['ovist', 'visto'], + 'ovistic': ['covisit', 'ovistic'], + 'ovoidal': ['ovaloid', 'ovoidal'], + 'ovular': ['louvar', 'ovular'], + 'ow': ['ow', 'wo'], + 'owd': ['dow', 'owd', 'wod'], + 'owe': ['owe', 'woe'], + 'owen': ['enow', 'owen', 'wone'], + 'owenism': ['owenism', 'winsome'], + 'ower': ['ower', 'wore'], + 'owerby': ['bowery', 'bowyer', 'owerby'], + 'owl': ['low', 'lwo', 'owl'], + 'owler': ['lower', 'owler', 'rowel'], + 'owlery': ['lowery', 'owlery', 'rowley', 'yowler'], + 'owlet': ['owlet', 'towel'], + 'owlish': ['lowish', 'owlish'], + 'owlishly': ['lowishly', 'owlishly', 'sillyhow'], + 'owlishness': ['lowishness', 'owlishness'], + 'owly': ['lowy', 'owly', 'yowl'], + 'own': ['now', 'own', 'won'], + 'owner': ['owner', 'reown', 'rowen'], + 'ownership': ['ownership', 'shipowner'], + 'ownness': ['nowness', 'ownness'], + 'owser': ['owser', 'resow', 'serow', 'sower', 'swore', 'worse'], + 'oxalan': ['axonal', 'oxalan'], + 'oxalite': ['aloxite', 'oxalite'], + 'oxan': ['axon', 'noxa', 'oxan'], + 'oxanic': ['anoxic', 'oxanic'], + 'oxazine': ['azoxine', 'oxazine'], + 'oxen': ['exon', 'oxen'], + 'oxidic': ['ixodic', 'oxidic'], + 'oximate': ['oximate', 'toxemia'], + 'oxy': ['oxy', 'yox'], + 'oxyntic': ['ictonyx', 'oxyntic'], + 'oxyphenol': ['oxyphenol', 'xylophone'], + 'oxyterpene': ['enteropexy', 'oxyterpene'], + 'oyer': ['oyer', 'roey', 'yore'], + 'oyster': ['oyster', 'rosety'], + 'oysterish': ['oysterish', 'thyreosis'], + 'oysterman': ['monastery', 'oysterman'], + 'ozan': ['azon', 'onza', 'ozan'], + 'ozena': ['neoza', 'ozena'], + 'ozonate': ['entozoa', 'ozonate'], + 'ozonic': ['ozonic', 'zoonic'], + 'ozotype': ['ozotype', 'zootype'], + 'paal': ['paal', 'pala'], + 'paar': ['apar', 'paar', 'para'], + 'pablo': ['pablo', 'polab'], + 'pac': ['cap', 'pac'], + 'pacable': ['capable', 'pacable'], + 'pacation': ['copatain', 'pacation'], + 'pacaya': ['cayapa', 'pacaya'], + 'pace': ['cape', 'cepa', 'pace'], + 'paced': ['caped', 'decap', 'paced'], + 'pacer': ['caper', 'crape', 'pacer', 'perca', 'recap'], + 'pachnolite': ['pachnolite', 'phonetical'], + 'pachometer': ['pachometer', 'phacometer'], + 'pacht': ['chapt', 'pacht', 'patch'], + 'pachylosis': ['pachylosis', 'phacolysis'], + 'pacificist': ['pacificist', 'pacifistic'], + 'pacifistic': ['pacificist', 'pacifistic'], + 'packer': ['packer', 'repack'], + 'paco': ['copa', 'paco'], + 'pacolet': ['pacolet', 'polecat'], + 'paction': ['caption', 'paction'], + 'pactional': ['pactional', 'pactolian', 'placation'], + 'pactionally': ['pactionally', 'polyactinal'], + 'pactolian': ['pactional', 'pactolian', 'placation'], + 'pad': ['dap', 'pad'], + 'padda': ['dadap', 'padda'], + 'padder': ['padder', 'parded'], + 'padfoot': ['footpad', 'padfoot'], + 'padle': ['padle', 'paled', 'pedal', 'plead'], + 'padre': ['drape', 'padre'], + 'padtree': ['padtree', 'predate', 'tapered'], + 'paean': ['apnea', 'paean'], + 'paeanism': ['paeanism', 'spanemia'], + 'paegel': ['paegel', 'paegle', 'pelage'], + 'paegle': ['paegel', 'paegle', 'pelage'], + 'paga': ['gapa', 'paga'], + 'page': ['gape', 'page', 'peag', 'pega'], + 'pagedom': ['megapod', 'pagedom'], + 'pager': ['gaper', 'grape', 'pager', 'parge'], + 'pageship': ['pageship', 'shippage'], + 'paginary': ['agrypnia', 'paginary'], + 'paguridae': ['paguridae', 'paguridea'], + 'paguridea': ['paguridae', 'paguridea'], + 'pagurine': ['pagurine', 'perugian'], + 'pah': ['hap', 'pah'], + 'pahari': ['pahari', 'pariah', 'raphia'], + 'pahi': ['hapi', 'pahi'], + 'paho': ['opah', 'paho', 'poha'], + 'paigle': ['paigle', 'pilage'], + 'paik': ['paik', 'pika'], + 'pail': ['lipa', 'pail', 'pali', 'pial'], + 'paillasse': ['paillasse', 'palliasse'], + 'pain': ['nipa', 'pain', 'pani', 'pian', 'pina'], + 'painless': ['painless', 'spinales'], + 'paint': ['inapt', 'paint', 'pinta'], + 'painted': ['depaint', 'inadept', 'painted', 'patined'], + 'painter': ['painter', 'pertain', 'pterian', 'repaint'], + 'painterly': ['interplay', 'painterly'], + 'paintiness': ['antisepsin', 'paintiness'], + 'paip': ['paip', 'pipa'], + 'pair': ['pair', 'pari', 'pria', 'ripa'], + 'paired': ['diaper', 'paired'], + 'pairer': ['pairer', 'rapier', 'repair'], + 'pairment': ['imperant', 'pairment', 'partimen', 'premiant', 'tripeman'], + 'pais': ['apis', 'pais', 'pasi', 'saip'], + 'pajonism': ['japonism', 'pajonism'], + 'pal': ['alp', 'lap', 'pal'], + 'pala': ['paal', 'pala'], + 'palaeechinoid': ['deinocephalia', 'palaeechinoid'], + 'palaemonid': ['anomaliped', 'palaemonid'], + 'palaemonoid': ['adenolipoma', 'palaemonoid'], + 'palaeornis': ['palaeornis', 'personalia'], + 'palaestrics': ['palaestrics', 'paracelsist'], + 'palaic': ['apical', 'palaic'], + 'palaite': ['palaite', 'petalia', 'pileata'], + 'palame': ['palame', 'palmae', 'pamela'], + 'palamite': ['ampliate', 'palamite'], + 'palas': ['palas', 'salpa'], + 'palate': ['aletap', 'palate', 'platea'], + 'palatial': ['palatial', 'palliata'], + 'palatic': ['capital', 'palatic'], + 'palation': ['palation', 'talapoin'], + 'palatonasal': ['nasopalatal', 'palatonasal'], + 'palau': ['palau', 'paula'], + 'palay': ['palay', 'playa'], + 'pale': ['leap', 'lepa', 'pale', 'peal', 'plea'], + 'paled': ['padle', 'paled', 'pedal', 'plead'], + 'paleness': ['paleness', 'paneless'], + 'paleolithy': ['paleolithy', 'polyhalite', 'polythelia'], + 'paler': ['lepra', 'paler', 'parel', 'parle', 'pearl', 'perla', 'relap'], + 'palermitan': ['palermitan', 'parliament'], + 'palermo': ['leproma', 'palermo', 'pleroma', 'polearm'], + 'pales': ['elaps', + 'lapse', + 'lepas', + 'pales', + 'salep', + 'saple', + 'sepal', + 'slape', + 'spale', + 'speal'], + 'palestral': ['alpestral', 'palestral'], + 'palestrian': ['alpestrian', 'palestrian', 'psalterian'], + 'palet': ['leapt', + 'palet', + 'patel', + 'pelta', + 'petal', + 'plate', + 'pleat', + 'tepal'], + 'palette': ['palette', 'peltate'], + 'pali': ['lipa', 'pail', 'pali', 'pial'], + 'palification': ['palification', 'pontificalia'], + 'palinode': ['lapideon', 'palinode', 'pedalion'], + 'palinodist': ['palinodist', 'plastinoid'], + 'palisade': ['palisade', 'salpidae'], + 'palish': ['palish', 'silpha'], + 'pallasite': ['aliseptal', 'pallasite'], + 'pallette': ['pallette', 'platelet'], + 'palliasse': ['paillasse', 'palliasse'], + 'palliata': ['palatial', 'palliata'], + 'pallone': ['pallone', 'pleonal'], + 'palluites': ['palluites', 'pulsatile'], + 'palm': ['lamp', 'palm'], + 'palmad': ['lampad', 'palmad'], + 'palmae': ['palame', 'palmae', 'pamela'], + 'palmary': ['palmary', 'palmyra'], + 'palmatilobed': ['palmatilobed', 'palmilobated'], + 'palmatisect': ['metaplastic', 'palmatisect'], + 'palmer': ['lamper', 'palmer', 'relamp'], + 'palmery': ['lamprey', 'palmery'], + 'palmette': ['palmette', 'template'], + 'palmful': ['lampful', 'palmful'], + 'palmification': ['amplification', 'palmification'], + 'palmilobated': ['palmatilobed', 'palmilobated'], + 'palmipes': ['epiplasm', 'palmipes'], + 'palmist': ['lampist', 'palmist'], + 'palmister': ['palmister', 'prelatism'], + 'palmistry': ['lampistry', 'palmistry'], + 'palmite': ['implate', 'palmite'], + 'palmito': ['optimal', 'palmito'], + 'palmitone': ['emptional', 'palmitone'], + 'palmo': ['mopla', 'palmo'], + 'palmula': ['ampulla', 'palmula'], + 'palmy': ['amply', 'palmy'], + 'palmyra': ['palmary', 'palmyra'], + 'palolo': ['apollo', 'palolo'], + 'palp': ['lapp', 'palp', 'plap'], + 'palpal': ['appall', 'palpal'], + 'palpatory': ['palpatory', 'papolatry'], + 'palped': ['dapple', 'lapped', 'palped'], + 'palpi': ['palpi', 'pipal'], + 'palster': ['palster', 'persalt', 'plaster', 'psalter', 'spartle', 'stapler'], + 'palsy': ['palsy', 'splay'], + 'palt': ['palt', 'plat'], + 'palta': ['aptal', 'palta', 'talpa'], + 'palter': ['palter', 'plater'], + 'palterer': ['palterer', 'platerer'], + 'paltry': ['paltry', 'partly', 'raptly'], + 'paludian': ['paludian', 'paludina'], + 'paludic': ['paludic', 'pudical'], + 'paludina': ['paludian', 'paludina'], + 'palus': ['palus', 'pasul'], + 'palustral': ['palustral', 'plaustral'], + 'palustrine': ['lupinaster', 'palustrine'], + 'paly': ['paly', 'play', 'pyal', 'pyla'], + 'pam': ['map', 'pam'], + 'pamela': ['palame', 'palmae', 'pamela'], + 'pamir': ['impar', 'pamir', 'prima'], + 'pamiri': ['impair', 'pamiri'], + 'pamper': ['mapper', 'pamper', 'pampre'], + 'pampre': ['mapper', 'pamper', 'pampre'], + 'pan': ['nap', 'pan'], + 'panace': ['canape', 'panace'], + 'panaceist': ['antispace', 'panaceist'], + 'panade': ['napead', 'panade'], + 'panak': ['kanap', 'panak'], + 'panamist': ['mainpast', 'mantispa', 'panamist', 'stampian'], + 'panary': ['panary', 'panyar'], + 'panatela': ['panatela', 'plataean'], + 'panatrophy': ['apanthropy', 'panatrophy'], + 'pancreatoduodenectomy': ['duodenopancreatectomy', 'pancreatoduodenectomy'], + 'pandean': ['pandean', 'pannade'], + 'pandemia': ['pandemia', 'pedimana'], + 'pander': ['pander', 'repand'], + 'panderly': ['panderly', 'repandly'], + 'pandermite': ['pandermite', 'pentamerid'], + 'panderous': ['panderous', 'repandous'], + 'pandion': ['dipnoan', 'nonpaid', 'pandion'], + 'pandour': ['pandour', 'poduran'], + 'pane': ['nape', 'neap', 'nepa', 'pane', 'pean'], + 'paned': ['paned', 'penda'], + 'panel': ['alpen', 'nepal', 'panel', 'penal', 'plane'], + 'panela': ['apneal', 'panela'], + 'panelation': ['antelopian', 'neapolitan', 'panelation'], + 'paneler': ['paneler', 'repanel', 'replane'], + 'paneless': ['paleness', 'paneless'], + 'panelist': ['panelist', 'pantelis', 'penalist', 'plastein'], + 'pangamic': ['campaign', 'pangamic'], + 'pangane': ['pangane', 'pannage'], + 'pangen': ['pangen', 'penang'], + 'pangene': ['pangene', 'pennage'], + 'pangi': ['aping', 'ngapi', 'pangi'], + 'pani': ['nipa', 'pain', 'pani', 'pian', 'pina'], + 'panicle': ['calepin', 'capelin', 'panicle', 'pelican', 'pinacle'], + 'paniculitis': ['paniculitis', 'paulinistic'], + 'panisca': ['capsian', 'caspian', 'nascapi', 'panisca'], + 'panisic': ['panisic', 'piscian', 'piscina', 'sinapic'], + 'pank': ['knap', 'pank'], + 'pankin': ['napkin', 'pankin'], + 'panman': ['panman', 'pannam'], + 'panmug': ['panmug', 'pugman'], + 'pannade': ['pandean', 'pannade'], + 'pannage': ['pangane', 'pannage'], + 'pannam': ['panman', 'pannam'], + 'panne': ['panne', 'penna'], + 'pannicle': ['pannicle', 'pinnacle'], + 'pannus': ['pannus', 'sannup', 'unsnap', 'unspan'], + 'panoche': ['copehan', 'panoche', 'phocean'], + 'panoistic': ['panoistic', 'piscation'], + 'panornithic': ['panornithic', 'rhaponticin'], + 'panostitis': ['antiptosis', 'panostitis'], + 'panplegia': ['appealing', 'lagniappe', 'panplegia'], + 'pansciolist': ['costispinal', 'pansciolist'], + 'panse': ['aspen', 'panse', 'snape', 'sneap', 'spane', 'spean'], + 'panside': ['ipseand', 'panside', 'pansied'], + 'pansied': ['ipseand', 'panside', 'pansied'], + 'pansy': ['pansy', 'snapy'], + 'pantaleon': ['pantaleon', 'pantalone'], + 'pantalone': ['pantaleon', 'pantalone'], + 'pantarchy': ['pantarchy', 'pyracanth'], + 'pantelis': ['panelist', 'pantelis', 'penalist', 'plastein'], + 'pantellerite': ['interpellate', 'pantellerite'], + 'panter': ['arpent', + 'enrapt', + 'entrap', + 'panter', + 'parent', + 'pretan', + 'trepan'], + 'pantheic': ['haptenic', 'pantheic', 'pithecan'], + 'pantheonic': ['nonhepatic', 'pantheonic'], + 'pantie': ['pantie', 'patine'], + 'panties': ['panties', 'sapient', 'spinate'], + 'pantile': ['pantile', 'pentail', 'platine', 'talpine'], + 'pantle': ['pantle', 'planet', 'platen'], + 'pantler': ['pantler', 'planter', 'replant'], + 'pantofle': ['felapton', 'pantofle'], + 'pantry': ['pantry', 'trypan'], + 'panyar': ['panary', 'panyar'], + 'papabot': ['papabot', 'papboat'], + 'papal': ['lappa', 'papal'], + 'papalistic': ['papalistic', 'papistical'], + 'papboat': ['papabot', 'papboat'], + 'paper': ['paper', 'rappe'], + 'papered': ['papered', 'pradeep'], + 'paperer': ['paperer', 'perpera', 'prepare', 'repaper'], + 'papern': ['napper', 'papern'], + 'papery': ['papery', 'prepay', 'yapper'], + 'papillote': ['papillote', 'popliteal'], + 'papion': ['oppian', 'papion', 'popian'], + 'papisher': ['papisher', 'sapphire'], + 'papistical': ['papalistic', 'papistical'], + 'papless': ['papless', 'sapples'], + 'papolatry': ['palpatory', 'papolatry'], + 'papule': ['papule', 'upleap'], + 'par': ['par', 'rap'], + 'para': ['apar', 'paar', 'para'], + 'parablepsia': ['appraisable', 'parablepsia'], + 'paracelsist': ['palaestrics', 'paracelsist'], + 'parachor': ['chaparro', 'parachor'], + 'paracolitis': ['paracolitis', 'piscatorial'], + 'paradisaic': ['paradisaic', 'paradisiac'], + 'paradisaically': ['paradisaically', 'paradisiacally'], + 'paradise': ['paradise', 'sparidae'], + 'paradisiac': ['paradisaic', 'paradisiac'], + 'paradisiacally': ['paradisaically', 'paradisiacally'], + 'parado': ['parado', 'pardao'], + 'paraenetic': ['capernaite', 'paraenetic'], + 'paragrapher': ['paragrapher', 'reparagraph'], + 'parah': ['aphra', 'harpa', 'parah'], + 'parale': ['earlap', 'parale'], + 'param': ['param', 'parma', 'praam'], + 'paramine': ['amperian', 'paramine', 'pearmain'], + 'paranephric': ['paranephric', 'paraphrenic'], + 'paranephritis': ['paranephritis', 'paraphrenitis'], + 'paranosic': ['caparison', 'paranosic'], + 'paraphrenic': ['paranephric', 'paraphrenic'], + 'paraphrenitis': ['paranephritis', 'paraphrenitis'], + 'parasita': ['aspirata', 'parasita'], + 'parasite': ['aspirate', 'parasite'], + 'parasol': ['asaprol', 'parasol'], + 'parasuchian': ['parasuchian', 'unpharasaic'], + 'parasyntheton': ['parasyntheton', 'thysanopteran'], + 'parate': ['aptera', 'parate', 'patera'], + 'parathion': ['parathion', 'phanariot'], + 'parazoan': ['parazoan', 'zaparoan'], + 'parboil': ['bipolar', 'parboil'], + 'parcel': ['carpel', 'parcel', 'placer'], + 'parcellary': ['carpellary', 'parcellary'], + 'parcellate': ['carpellate', 'parcellate', 'prelacteal'], + 'parchesi': ['parchesi', 'seraphic'], + 'pard': ['pard', 'prad'], + 'pardao': ['parado', 'pardao'], + 'parded': ['padder', 'parded'], + 'pardesi': ['despair', 'pardesi'], + 'pardo': ['adrop', 'pardo'], + 'pardoner': ['pardoner', 'preadorn'], + 'pare': ['aper', 'pare', 'pear', 'rape', 'reap'], + 'parel': ['lepra', 'paler', 'parel', 'parle', 'pearl', 'perla', 'relap'], + 'paren': ['arpen', 'paren'], + 'parent': ['arpent', + 'enrapt', + 'entrap', + 'panter', + 'parent', + 'pretan', + 'trepan'], + 'parental': ['parental', 'paternal', 'prenatal'], + 'parentalia': ['parentalia', 'planetaria'], + 'parentalism': ['parentalism', 'paternalism'], + 'parentality': ['parentality', 'paternality'], + 'parentally': ['parentally', 'paternally', 'prenatally'], + 'parentelic': ['epicentral', 'parentelic'], + 'parenticide': ['parenticide', 'preindicate'], + 'parer': ['parer', 'raper'], + 'paresis': ['paresis', 'serapis'], + 'paretic': ['paretic', 'patrice', 'picrate'], + 'parge': ['gaper', 'grape', 'pager', 'parge'], + 'pari': ['pair', 'pari', 'pria', 'ripa'], + 'pariah': ['pahari', 'pariah', 'raphia'], + 'paridae': ['deipara', 'paridae'], + 'paries': ['aspire', 'paries', 'praise', 'sirpea', 'spirea'], + 'parietal': ['apterial', 'parietal'], + 'parietes': ['asperite', 'parietes'], + 'parietofrontal': ['frontoparietal', 'parietofrontal'], + 'parietosquamosal': ['parietosquamosal', 'squamosoparietal'], + 'parietotemporal': ['parietotemporal', 'temporoparietal'], + 'parietovisceral': ['parietovisceral', 'visceroparietal'], + 'parine': ['parine', 'rapine'], + 'paring': ['paring', 'raping'], + 'paris': ['paris', 'parsi', 'sarip'], + 'parish': ['parish', 'raphis', 'rhapis'], + 'parished': ['diphaser', 'parished', 'raphides', 'sephardi'], + 'parison': ['parison', 'soprani'], + 'parity': ['parity', 'piraty'], + 'parkee': ['parkee', 'peaker'], + 'parker': ['parker', 'repark'], + 'parlatory': ['parlatory', 'portrayal'], + 'parle': ['lepra', 'paler', 'parel', 'parle', 'pearl', 'perla', 'relap'], + 'parley': ['parley', 'pearly', 'player', 'replay'], + 'parliament': ['palermitan', 'parliament'], + 'parly': ['parly', 'pylar', 'pyral'], + 'parma': ['param', 'parma', 'praam'], + 'parmesan': ['parmesan', 'spearman'], + 'parnel': ['parnel', 'planer', 'replan'], + 'paroch': ['carhop', 'paroch'], + 'parochialism': ['aphorismical', 'parochialism'], + 'parochine': ['canephroi', 'parochine'], + 'parodic': ['parodic', 'picador'], + 'paroecism': ['paroecism', 'premosaic'], + 'parol': ['parol', 'polar', 'poral', 'proal'], + 'parosela': ['parosela', 'psoralea'], + 'parosteal': ['parosteal', 'pastorale'], + 'parostotic': ['parostotic', 'postaortic'], + 'parotia': ['apiator', 'atropia', 'parotia'], + 'parotic': ['apricot', 'atropic', 'parotic', 'patrico'], + 'parotid': ['dioptra', 'parotid'], + 'parotitic': ['parotitic', 'patriotic'], + 'parotitis': ['parotitis', 'topiarist'], + 'parous': ['parous', 'upsoar'], + 'parovarium': ['parovarium', 'vaporarium'], + 'parrot': ['parrot', 'raptor'], + 'parroty': ['parroty', 'portray', 'tropary'], + 'parsable': ['parsable', 'prebasal', 'sparable'], + 'parse': ['asper', 'parse', 'prase', 'spaer', 'spare', 'spear'], + 'parsec': ['casper', 'escarp', 'parsec', 'scrape', 'secpar', 'spacer'], + 'parsee': ['parsee', 'persae', 'persea', 'serape'], + 'parser': ['parser', 'rasper', 'sparer'], + 'parsi': ['paris', 'parsi', 'sarip'], + 'parsley': ['parsley', 'pyrales', 'sparely', 'splayer'], + 'parsoned': ['parsoned', 'spadrone'], + 'parsonese': ['parsonese', 'preseason'], + 'parsonic': ['parsonic', 'scoparin'], + 'part': ['part', 'prat', 'rapt', 'tarp', 'trap'], + 'partan': ['partan', 'tarpan'], + 'parted': ['depart', 'parted', 'petard'], + 'partedness': ['depressant', 'partedness'], + 'parter': ['parter', 'prater'], + 'parthian': ['parthian', 'taphrina'], + 'partial': ['partial', 'patrial'], + 'partialistic': ['iatraliptics', 'partialistic'], + 'particle': ['particle', 'plicater', 'prelatic'], + 'particulate': ['catapultier', 'particulate'], + 'partigen': ['partigen', 'tapering'], + 'partile': ['partile', 'plaiter', 'replait'], + 'partimen': ['imperant', 'pairment', 'partimen', 'premiant', 'tripeman'], + 'partinium': ['impuritan', 'partinium'], + 'partisan': ['aspirant', 'partisan', 'spartina'], + 'partite': ['partite', 'tearpit'], + 'partitioned': ['departition', 'partitioned', 'trepidation'], + 'partitioner': ['partitioner', 'repartition'], + 'partlet': ['partlet', 'platter', 'prattle'], + 'partly': ['paltry', 'partly', 'raptly'], + 'parto': ['aport', 'parto', 'porta'], + 'parture': ['parture', 'rapture'], + 'party': ['party', 'trypa'], + 'parulis': ['parulis', 'spirula', 'uprisal'], + 'parure': ['parure', 'uprear'], + 'parvoline': ['overplain', 'parvoline'], + 'pasan': ['pasan', 'sapan'], + 'pasch': ['chaps', 'pasch'], + 'pascha': ['pascha', 'scapha'], + 'paschite': ['paschite', 'pastiche', 'pistache', 'scaphite'], + 'pascual': ['capsula', 'pascual', 'scapula'], + 'pash': ['hasp', 'pash', 'psha', 'shap'], + 'pasha': ['asaph', 'pasha'], + 'pashm': ['pashm', 'phasm'], + 'pashto': ['pashto', 'pathos', 'potash'], + 'pasi': ['apis', 'pais', 'pasi', 'saip'], + 'passer': ['passer', 'repass', 'sparse'], + 'passional': ['passional', 'sponsalia'], + 'passo': ['passo', 'psoas'], + 'passout': ['outpass', 'passout'], + 'passover': ['overpass', 'passover'], + 'past': ['past', 'spat', 'stap', 'taps'], + 'paste': ['paste', 'septa', 'spate'], + 'pastel': ['pastel', 'septal', 'staple'], + 'paster': ['paster', 'repast', 'trapes'], + 'pasterer': ['pasterer', 'strepera'], + 'pasteur': ['pasteur', 'pasture', 'upstare'], + 'pastiche': ['paschite', 'pastiche', 'pistache', 'scaphite'], + 'pasticheur': ['curateship', 'pasticheur'], + 'pastil': ['alpist', 'pastil', 'spital'], + 'pastile': ['aliptes', 'pastile', 'talipes'], + 'pastime': ['impaste', 'pastime'], + 'pastimer': ['maspiter', 'pastimer', 'primates'], + 'pastophorium': ['amphitropous', 'pastophorium'], + 'pastophorus': ['apostrophus', 'pastophorus'], + 'pastor': ['asport', 'pastor', 'sproat'], + 'pastoral': ['pastoral', 'proatlas'], + 'pastorale': ['parosteal', 'pastorale'], + 'pastose': ['pastose', 'petasos'], + 'pastural': ['pastural', 'spatular'], + 'pasture': ['pasteur', 'pasture', 'upstare'], + 'pasty': ['pasty', 'patsy'], + 'pasul': ['palus', 'pasul'], + 'pat': ['apt', 'pat', 'tap'], + 'pata': ['atap', 'pata', 'tapa'], + 'patao': ['opata', 'patao', 'tapoa'], + 'patarin': ['patarin', 'tarapin'], + 'patarine': ['patarine', 'tarpeian'], + 'patas': ['patas', 'tapas'], + 'patch': ['chapt', 'pacht', 'patch'], + 'patcher': ['chapter', 'patcher', 'repatch'], + 'patchery': ['patchery', 'petchary'], + 'pate': ['pate', 'peat', 'tape', 'teap'], + 'patel': ['leapt', + 'palet', + 'patel', + 'pelta', + 'petal', + 'plate', + 'pleat', + 'tepal'], + 'paten': ['enapt', 'paten', 'penta', 'tapen'], + 'patener': ['patener', 'pearten', 'petrean', 'terpane'], + 'patent': ['patent', 'patten', 'tapnet'], + 'pater': ['apert', 'pater', 'peart', 'prate', 'taper', 'terap'], + 'patera': ['aptera', 'parate', 'patera'], + 'paternal': ['parental', 'paternal', 'prenatal'], + 'paternalism': ['parentalism', 'paternalism'], + 'paternalist': ['intraseptal', 'paternalist', 'prenatalist'], + 'paternality': ['parentality', 'paternality'], + 'paternally': ['parentally', 'paternally', 'prenatally'], + 'paternoster': ['paternoster', 'prosternate', 'transportee'], + 'patesi': ['patesi', 'pietas'], + 'pathed': ['heptad', 'pathed'], + 'pathic': ['haptic', 'pathic'], + 'pathlet': ['pathlet', 'telpath'], + 'pathogen': ['heptagon', 'pathogen'], + 'pathologicoanatomic': ['anatomicopathologic', 'pathologicoanatomic'], + 'pathologicoanatomical': ['anatomicopathological', 'pathologicoanatomical'], + 'pathologicoclinical': ['clinicopathological', 'pathologicoclinical'], + 'pathonomy': ['monopathy', 'pathonomy'], + 'pathophoric': ['haptophoric', 'pathophoric'], + 'pathophorous': ['haptophorous', 'pathophorous'], + 'pathos': ['pashto', 'pathos', 'potash'], + 'pathy': ['pathy', 'typha'], + 'patiently': ['patiently', 'platynite'], + 'patina': ['aptian', 'patina', 'taipan'], + 'patine': ['pantie', 'patine'], + 'patined': ['depaint', 'inadept', 'painted', 'patined'], + 'patio': ['patio', 'taipo', 'topia'], + 'patly': ['aptly', 'patly', 'platy', 'typal'], + 'patness': ['aptness', 'patness'], + 'pato': ['atop', 'pato'], + 'patola': ['patola', 'tapalo'], + 'patrial': ['partial', 'patrial'], + 'patriarch': ['patriarch', 'phratriac'], + 'patrice': ['paretic', 'patrice', 'picrate'], + 'patricide': ['dipicrate', 'patricide', 'pediatric'], + 'patrico': ['apricot', 'atropic', 'parotic', 'patrico'], + 'patrilocal': ['allopatric', 'patrilocal'], + 'patriotic': ['parotitic', 'patriotic'], + 'patroclinous': ['patroclinous', 'pratincolous'], + 'patrol': ['patrol', 'portal', 'tropal'], + 'patron': ['patron', 'tarpon'], + 'patroness': ['patroness', 'transpose'], + 'patronite': ['antitrope', 'patronite', 'tritanope'], + 'patronymic': ['importancy', 'patronymic', 'pyromantic'], + 'patsy': ['pasty', 'patsy'], + 'patte': ['patte', 'tapet'], + 'pattee': ['pattee', 'tapete'], + 'patten': ['patent', 'patten', 'tapnet'], + 'pattener': ['pattener', 'repatent'], + 'patterer': ['patterer', 'pretreat'], + 'pattern': ['pattern', 'reptant'], + 'patterner': ['patterner', 'repattern'], + 'patu': ['patu', 'paut', 'tapu'], + 'patulent': ['patulent', 'petulant'], + 'pau': ['pau', 'pua'], + 'paul': ['paul', 'upla'], + 'paula': ['palau', 'paula'], + 'paulian': ['apulian', 'paulian', 'paulina'], + 'paulie': ['alpieu', 'paulie'], + 'paulin': ['paulin', 'pulian'], + 'paulina': ['apulian', 'paulian', 'paulina'], + 'paulinistic': ['paniculitis', 'paulinistic'], + 'paulinus': ['nauplius', 'paulinus'], + 'paulist': ['paulist', 'stipula'], + 'paup': ['paup', 'pupa'], + 'paut': ['patu', 'paut', 'tapu'], + 'paver': ['paver', 'verpa'], + 'pavid': ['pavid', 'vapid'], + 'pavidity': ['pavidity', 'vapidity'], + 'pavier': ['pavier', 'vipera'], + 'pavisor': ['pavisor', 'proavis'], + 'paw': ['paw', 'wap'], + 'pawner': ['enwrap', 'pawner', 'repawn'], + 'pay': ['pay', 'pya', 'yap'], + 'payer': ['apery', 'payer', 'repay'], + 'payroll': ['payroll', 'polarly'], + 'pea': ['ape', 'pea'], + 'peach': ['chape', 'cheap', 'peach'], + 'peachen': ['cheapen', 'peachen'], + 'peachery': ['cheapery', 'peachery'], + 'peachlet': ['chapelet', 'peachlet'], + 'peacoat': ['opacate', 'peacoat'], + 'peag': ['gape', 'page', 'peag', 'pega'], + 'peaker': ['parkee', 'peaker'], + 'peal': ['leap', 'lepa', 'pale', 'peal', 'plea'], + 'pealike': ['apelike', 'pealike'], + 'pean': ['nape', 'neap', 'nepa', 'pane', 'pean'], + 'pear': ['aper', 'pare', 'pear', 'rape', 'reap'], + 'pearl': ['lepra', 'paler', 'parel', 'parle', 'pearl', 'perla', 'relap'], + 'pearled': ['pearled', 'pedaler', 'pleader', 'replead'], + 'pearlet': ['pearlet', 'pleater', 'prelate', 'ptereal', 'replate', 'repleat'], + 'pearlin': ['pearlin', 'plainer', 'praline'], + 'pearlish': ['earlship', 'pearlish'], + 'pearlsides': ['displeaser', 'pearlsides'], + 'pearly': ['parley', 'pearly', 'player', 'replay'], + 'pearmain': ['amperian', 'paramine', 'pearmain'], + 'peart': ['apert', 'pater', 'peart', 'prate', 'taper', 'terap'], + 'pearten': ['patener', 'pearten', 'petrean', 'terpane'], + 'peartly': ['apertly', 'peartly', 'platery', 'pteryla', 'taperly'], + 'peartness': ['apertness', 'peartness', 'taperness'], + 'peasantry': ['peasantry', 'synaptera'], + 'peat': ['pate', 'peat', 'tape', 'teap'], + 'peatman': ['peatman', 'tapeman'], + 'peatship': ['happiest', 'peatship'], + 'peccation': ['acception', 'peccation'], + 'peckerwood': ['peckerwood', 'woodpecker'], + 'pecos': ['copse', 'pecos', 'scope'], + 'pectin': ['incept', 'pectin'], + 'pectinate': ['pectinate', 'pencatite'], + 'pectination': ['antinepotic', 'pectination'], + 'pectinatopinnate': ['pectinatopinnate', 'pinnatopectinate'], + 'pectinoid': ['depiction', 'pectinoid'], + 'pectora': ['coperta', 'pectora', 'porcate'], + 'pecunious': ['pecunious', 'puniceous'], + 'peda': ['depa', 'peda'], + 'pedal': ['padle', 'paled', 'pedal', 'plead'], + 'pedaler': ['pearled', 'pedaler', 'pleader', 'replead'], + 'pedalier': ['pedalier', 'perlidae'], + 'pedalion': ['lapideon', 'palinode', 'pedalion'], + 'pedalism': ['misplead', 'pedalism'], + 'pedalist': ['dispetal', 'pedalist'], + 'pedaliter': ['pedaliter', 'predetail'], + 'pedant': ['pedant', 'pentad'], + 'pedantess': ['adeptness', 'pedantess'], + 'pedantic': ['pedantic', 'pentacid'], + 'pedary': ['pedary', 'preday'], + 'pederastic': ['discrepate', 'pederastic'], + 'pedes': ['pedes', 'speed'], + 'pedesis': ['despise', 'pedesis'], + 'pedestrial': ['pedestrial', 'pilastered'], + 'pediatric': ['dipicrate', 'patricide', 'pediatric'], + 'pedicel': ['pedicel', 'pedicle'], + 'pedicle': ['pedicel', 'pedicle'], + 'pedicular': ['crepidula', 'pedicular'], + 'pediculi': ['lupicide', 'pediculi', 'pulicide'], + 'pedimana': ['pandemia', 'pedimana'], + 'pedocal': ['lacepod', 'pedocal', 'placode'], + 'pedometrician': ['pedometrician', 'premedication'], + 'pedrail': ['pedrail', 'predial'], + 'pedro': ['doper', 'pedro', 'pored'], + 'peed': ['deep', 'peed'], + 'peek': ['keep', 'peek'], + 'peel': ['leep', 'peel', 'pele'], + 'peelman': ['empanel', 'emplane', 'peelman'], + 'peen': ['neep', 'peen'], + 'peerly': ['peerly', 'yelper'], + 'pega': ['gape', 'page', 'peag', 'pega'], + 'peho': ['hope', 'peho'], + 'peiser': ['espier', 'peiser'], + 'peitho': ['ethiop', 'ophite', 'peitho'], + 'peixere': ['expiree', 'peixere'], + 'pekan': ['knape', 'pekan'], + 'pelage': ['paegel', 'paegle', 'pelage'], + 'pelasgoi': ['pelasgoi', 'spoilage'], + 'pele': ['leep', 'peel', 'pele'], + 'pelean': ['alpeen', 'lenape', 'pelean'], + 'pelecani': ['capeline', 'pelecani'], + 'pelecanus': ['encapsule', 'pelecanus'], + 'pelias': ['espial', 'lipase', 'pelias'], + 'pelican': ['calepin', 'capelin', 'panicle', 'pelican', 'pinacle'], + 'pelick': ['pelick', 'pickle'], + 'pelides': ['pelides', 'seedlip'], + 'pelidnota': ['pelidnota', 'planetoid'], + 'pelike': ['kelpie', 'pelike'], + 'pelisse': ['pelisse', 'pieless'], + 'pelite': ['leepit', 'pelite', 'pielet'], + 'pellation': ['pellation', 'pollinate'], + 'pellotine': ['pellotine', 'pollenite'], + 'pelmet': ['pelmet', 'temple'], + 'pelon': ['pelon', 'pleon'], + 'pelops': ['pelops', 'peplos'], + 'pelorian': ['pelorian', 'peronial', 'proalien'], + 'peloric': ['peloric', 'precoil'], + 'pelorus': ['leprous', 'pelorus', 'sporule'], + 'pelota': ['alepot', 'pelota'], + 'pelta': ['leapt', + 'palet', + 'patel', + 'pelta', + 'petal', + 'plate', + 'pleat', + 'tepal'], + 'peltandra': ['leptandra', 'peltandra'], + 'peltast': ['peltast', 'spattle'], + 'peltate': ['palette', 'peltate'], + 'peltation': ['peltation', 'potential'], + 'pelter': ['pelter', 'petrel'], + 'peltiform': ['leptiform', 'peltiform'], + 'pelting': ['pelting', 'petling'], + 'peltry': ['peltry', 'pertly'], + 'pelu': ['lupe', 'pelu', 'peul', 'pule'], + 'pelusios': ['epulosis', 'pelusios'], + 'pelycography': ['pelycography', 'pyrgocephaly'], + 'pemican': ['campine', 'pemican'], + 'pen': ['nep', 'pen'], + 'penal': ['alpen', 'nepal', 'panel', 'penal', 'plane'], + 'penalist': ['panelist', 'pantelis', 'penalist', 'plastein'], + 'penalty': ['aplenty', 'penalty'], + 'penang': ['pangen', 'penang'], + 'penates': ['penates', 'septane'], + 'pencatite': ['pectinate', 'pencatite'], + 'penciled': ['depencil', 'penciled', 'pendicle'], + 'pencilry': ['pencilry', 'princely'], + 'penda': ['paned', 'penda'], + 'pendicle': ['depencil', 'penciled', 'pendicle'], + 'pendulant': ['pendulant', 'unplanted'], + 'pendular': ['pendular', 'underlap', 'uplander'], + 'pendulate': ['pendulate', 'unpleated'], + 'pendulation': ['pendulation', 'pennatuloid'], + 'pendulum': ['pendulum', 'unlumped', 'unplumed'], + 'penetrable': ['penetrable', 'repentable'], + 'penetrance': ['penetrance', 'repentance'], + 'penetrant': ['penetrant', 'repentant'], + 'penial': ['alpine', 'nepali', 'penial', 'pineal'], + 'penis': ['penis', 'snipe', 'spine'], + 'penitencer': ['penitencer', 'pertinence'], + 'penman': ['nepman', 'penman'], + 'penna': ['panne', 'penna'], + 'pennage': ['pangene', 'pennage'], + 'pennate': ['pennate', 'pentane'], + 'pennatulid': ['pennatulid', 'pinnulated'], + 'pennatuloid': ['pendulation', 'pennatuloid'], + 'pennia': ['nanpie', 'pennia', 'pinnae'], + 'pennisetum': ['pennisetum', 'septennium'], + 'pensioner': ['pensioner', 'repension'], + 'pensive': ['pensive', 'vespine'], + 'penster': ['penster', 'present', 'serpent', 'strepen'], + 'penta': ['enapt', 'paten', 'penta', 'tapen'], + 'pentace': ['pentace', 'tepanec'], + 'pentacid': ['pedantic', 'pentacid'], + 'pentad': ['pedant', 'pentad'], + 'pentadecoic': ['adenectopic', 'pentadecoic'], + 'pentail': ['pantile', 'pentail', 'platine', 'talpine'], + 'pentamerid': ['pandermite', 'pentamerid'], + 'pentameroid': ['pentameroid', 'predominate'], + 'pentane': ['pennate', 'pentane'], + 'pentaploid': ['deoppilant', 'pentaploid'], + 'pentathionic': ['antiphonetic', 'pentathionic'], + 'pentatomic': ['camptonite', 'pentatomic'], + 'pentitol': ['pentitol', 'pointlet'], + 'pentoic': ['entopic', 'nepotic', 'pentoic'], + 'pentol': ['lepton', 'pentol'], + 'pentose': ['pentose', 'posteen'], + 'pentyl': ['pentyl', 'plenty'], + 'penult': ['penult', 'punlet', 'puntel'], + 'peon': ['nope', 'open', 'peon', 'pone'], + 'peony': ['peony', 'poney'], + 'peopler': ['peopler', 'popeler'], + 'peorian': ['apeiron', 'peorian'], + 'peplos': ['pelops', 'peplos'], + 'peplum': ['peplum', 'pumple'], + 'peplus': ['peplus', 'supple'], + 'pepo': ['pepo', 'pope'], + 'per': ['per', 'rep'], + 'peracid': ['epacrid', 'peracid', 'preacid'], + 'peract': ['carpet', 'peract', 'preact'], + 'peracute': ['peracute', 'preacute'], + 'peradventure': ['peradventure', 'preadventure'], + 'perakim': ['perakim', 'permiak', 'rampike'], + 'peramble': ['peramble', 'preamble'], + 'perambulate': ['perambulate', 'preambulate'], + 'perambulation': ['perambulation', 'preambulation'], + 'perambulatory': ['perambulatory', 'preambulatory'], + 'perates': ['perates', 'repaste', 'sperate'], + 'perbend': ['perbend', 'prebend'], + 'perborate': ['perborate', 'prorebate', 'reprobate'], + 'perca': ['caper', 'crape', 'pacer', 'perca', 'recap'], + 'percale': ['percale', 'replace'], + 'percaline': ['percaline', 'periclean'], + 'percent': ['percent', 'precent'], + 'percept': ['percept', 'precept'], + 'perception': ['perception', 'preception'], + 'perceptionism': ['misperception', 'perceptionism'], + 'perceptive': ['perceptive', 'preceptive'], + 'perceptively': ['perceptively', 'preceptively'], + 'perceptual': ['perceptual', 'preceptual'], + 'perceptually': ['perceptually', 'preceptually'], + 'percha': ['aperch', 'eparch', 'percha', 'preach'], + 'perchloric': ['perchloric', 'prechloric'], + 'percid': ['percid', 'priced'], + 'perclose': ['perclose', 'preclose'], + 'percoidea': ['adipocere', 'percoidea'], + 'percolate': ['percolate', 'prelocate'], + 'percolation': ['neotropical', 'percolation'], + 'percompound': ['percompound', 'precompound'], + 'percontation': ['percontation', 'pernoctation'], + 'perculsion': ['perculsion', 'preclusion'], + 'perculsive': ['perculsive', 'preclusive'], + 'percurrent': ['percurrent', 'precurrent'], + 'percursory': ['percursory', 'precursory'], + 'percussion': ['croupiness', 'percussion', 'supersonic'], + 'percussioner': ['percussioner', 'repercussion'], + 'percussor': ['percussor', 'procuress'], + 'percy': ['crepy', 'cypre', 'percy'], + 'perdicine': ['perdicine', 'recipiend'], + 'perdition': ['direption', 'perdition', 'tropidine'], + 'perdu': ['drupe', 'duper', 'perdu', 'prude', 'pured'], + 'peregrina': ['peregrina', 'pregainer'], + 'pereion': ['pereion', 'pioneer'], + 'perendure': ['perendure', 'underpeer'], + 'peres': ['peres', 'perse', 'speer', 'spree'], + 'perfect': ['perfect', 'prefect'], + 'perfected': ['perfected', 'predefect'], + 'perfection': ['frontpiece', 'perfection'], + 'perfectly': ['perfectly', 'prefectly'], + 'perfervid': ['perfervid', 'prefervid'], + 'perfoliation': ['perfoliation', 'prefoliation'], + 'perforative': ['perforative', 'prefavorite'], + 'perform': ['perform', 'preform'], + 'performant': ['performant', 'preformant'], + 'performative': ['performative', 'preformative'], + 'performer': ['performer', 'prereform', 'reperform'], + 'pergamic': ['crimpage', 'pergamic'], + 'perhaps': ['perhaps', 'prehaps'], + 'perhazard': ['perhazard', 'prehazard'], + 'peri': ['peri', 'pier', 'ripe'], + 'periacinal': ['epicranial', 'periacinal'], + 'perianal': ['airplane', 'perianal'], + 'perianth': ['perianth', 'triphane'], + 'periapt': ['periapt', 'rappite'], + 'periaster': ['periaster', 'sparterie'], + 'pericardiacophrenic': ['pericardiacophrenic', 'phrenicopericardiac'], + 'pericardiopleural': ['pericardiopleural', 'pleuropericardial'], + 'perichete': ['perichete', 'perithece'], + 'periclase': ['episclera', 'periclase'], + 'periclean': ['percaline', 'periclean'], + 'pericles': ['eclipser', 'pericles', 'resplice'], + 'pericopal': ['pericopal', 'periploca'], + 'periculant': ['periculant', 'unprelatic'], + 'peridental': ['interplead', 'peridental'], + 'peridiastolic': ['peridiastolic', 'periodicalist', 'proidealistic'], + 'peridot': ['diopter', 'peridot', 'proetid', 'protide', 'pteroid'], + 'perigone': ['perigone', 'pigeoner'], + 'peril': ['peril', 'piler', 'plier'], + 'perilous': ['perilous', 'uropsile'], + 'perimeter': ['perimeter', 'peritreme'], + 'perine': ['neiper', 'perine', 'pirene', 'repine'], + 'perineovaginal': ['perineovaginal', 'vaginoperineal'], + 'periodate': ['periodate', 'proetidae', 'proteidae'], + 'periodicalist': ['peridiastolic', 'periodicalist', 'proidealistic'], + 'periodontal': ['deploration', 'periodontal'], + 'periost': ['periost', 'porites', 'reposit', 'riposte'], + 'periosteal': ['periosteal', 'praseolite'], + 'periotic': ['epirotic', 'periotic'], + 'peripatetic': ['peripatetic', 'precipitate'], + 'peripatidae': ['peripatidae', 'peripatidea'], + 'peripatidea': ['peripatidae', 'peripatidea'], + 'periplaneta': ['periplaneta', 'prepalatine'], + 'periploca': ['pericopal', 'periploca'], + 'periplus': ['periplus', 'supplier'], + 'periportal': ['periportal', 'peritropal'], + 'periproct': ['cotripper', 'periproct'], + 'peripterous': ['peripterous', 'prepositure'], + 'perique': ['perique', 'repique'], + 'perirectal': ['perirectal', 'prerecital'], + 'periscian': ['periscian', 'precisian'], + 'periscopal': ['periscopal', 'sapropelic'], + 'perish': ['perish', 'reship'], + 'perished': ['hesperid', 'perished'], + 'perishment': ['perishment', 'reshipment'], + 'perisomal': ['perisomal', 'semipolar'], + 'perisome': ['perisome', 'promisee', 'reimpose'], + 'perispome': ['perispome', 'preimpose'], + 'peristole': ['epistoler', 'peristole', 'perseitol', 'pistoleer'], + 'peristoma': ['epistroma', 'peristoma'], + 'peristomal': ['peristomal', 'prestomial'], + 'peristylos': ['peristylos', 'pterylosis'], + 'perit': ['perit', 'retip', 'tripe'], + 'perite': ['perite', 'petrie', 'pieter'], + 'peritenon': ['interpone', 'peritenon', 'pinnotere', 'preintone'], + 'perithece': ['perichete', 'perithece'], + 'peritomize': ['epitomizer', 'peritomize'], + 'peritomous': ['outpromise', 'peritomous'], + 'peritreme': ['perimeter', 'peritreme'], + 'peritrichous': ['courtiership', 'peritrichous'], + 'peritroch': ['chiropter', 'peritroch'], + 'peritropal': ['periportal', 'peritropal'], + 'peritropous': ['peritropous', 'proprietous'], + 'perkin': ['perkin', 'pinker'], + 'perknite': ['perknite', 'peterkin'], + 'perla': ['lepra', 'paler', 'parel', 'parle', 'pearl', 'perla', 'relap'], + 'perle': ['leper', 'perle', 'repel'], + 'perlection': ['perlection', 'prelection'], + 'perlidae': ['pedalier', 'perlidae'], + 'perlingual': ['perlingual', 'prelingual'], + 'perlite': ['perlite', 'reptile'], + 'perlitic': ['perlitic', 'triplice'], + 'permeameter': ['amperemeter', 'permeameter'], + 'permeance': ['permeance', 'premenace'], + 'permeant': ['permeant', 'peterman'], + 'permeation': ['permeation', 'preominate'], + 'permiak': ['perakim', 'permiak', 'rampike'], + 'permissibility': ['impressibility', 'permissibility'], + 'permissible': ['impressible', 'permissible'], + 'permissibleness': ['impressibleness', 'permissibleness'], + 'permissibly': ['impressibly', 'permissibly'], + 'permission': ['impression', 'permission'], + 'permissive': ['impressive', 'permissive'], + 'permissively': ['impressively', 'permissively'], + 'permissiveness': ['impressiveness', 'permissiveness'], + 'permitter': ['permitter', 'pretermit'], + 'permixture': ['permixture', 'premixture'], + 'permonosulphuric': ['monopersulphuric', 'permonosulphuric'], + 'permutation': ['importunate', 'permutation'], + 'pernasal': ['pernasal', 'prenasal'], + 'pernis': ['pernis', 'respin', 'sniper'], + 'pernoctation': ['percontation', 'pernoctation'], + 'pernor': ['pernor', 'perron'], + 'pernyi': ['pernyi', 'pinery'], + 'peronial': ['pelorian', 'peronial', 'proalien'], + 'peropus': ['peropus', 'purpose'], + 'peroral': ['peroral', 'preoral'], + 'perorally': ['perorally', 'preorally'], + 'perorate': ['perorate', 'retepora'], + 'perosmate': ['perosmate', 'sematrope'], + 'perosmic': ['comprise', 'perosmic'], + 'perotic': ['perotic', 'proteic', 'tropeic'], + 'perpera': ['paperer', 'perpera', 'prepare', 'repaper'], + 'perpetualist': ['perpetualist', 'pluriseptate'], + 'perplexer': ['perplexer', 'reperplex'], + 'perron': ['pernor', 'perron'], + 'perry': ['perry', 'pryer'], + 'persae': ['parsee', 'persae', 'persea', 'serape'], + 'persalt': ['palster', 'persalt', 'plaster', 'psalter', 'spartle', 'stapler'], + 'perscribe': ['perscribe', 'prescribe'], + 'perse': ['peres', 'perse', 'speer', 'spree'], + 'persea': ['parsee', 'persae', 'persea', 'serape'], + 'perseid': ['perseid', 'preside'], + 'perseitol': ['epistoler', 'peristole', 'perseitol', 'pistoleer'], + 'perseity': ['perseity', 'speerity'], + 'persian': ['persian', 'prasine', 'saprine'], + 'persic': ['crepis', 'cripes', 'persic', 'precis', 'spicer'], + 'persico': ['ceriops', 'persico'], + 'persism': ['impress', 'persism', 'premiss'], + 'persist': ['persist', 'spriest'], + 'persistent': ['persistent', 'presentist', 'prettiness'], + 'personalia': ['palaeornis', 'personalia'], + 'personalistic': ['personalistic', 'pictorialness'], + 'personate': ['esperanto', 'personate'], + 'personed': ['personed', 'responde'], + 'pert': ['pert', 'petr', 'terp'], + 'pertain': ['painter', 'pertain', 'pterian', 'repaint'], + 'perten': ['perten', 'repent'], + 'perthite': ['perthite', 'tephrite'], + 'perthitic': ['perthitic', 'tephritic'], + 'pertinacity': ['antipyretic', 'pertinacity'], + 'pertinence': ['penitencer', 'pertinence'], + 'pertly': ['peltry', 'pertly'], + 'perturbational': ['perturbational', 'protuberantial'], + 'pertussal': ['pertussal', 'supersalt'], + 'perty': ['perty', 'typer'], + 'peru': ['peru', 'prue', 'pure'], + 'perugian': ['pagurine', 'perugian'], + 'peruke': ['keuper', 'peruke'], + 'perula': ['epural', 'perula', 'pleura'], + 'perun': ['perun', 'prune'], + 'perusable': ['perusable', 'superable'], + 'perusal': ['perusal', 'serpula'], + 'peruse': ['peruse', 'respue'], + 'pervade': ['deprave', 'pervade'], + 'pervader': ['depraver', 'pervader'], + 'pervadingly': ['depravingly', 'pervadingly'], + 'perverse': ['perverse', 'preserve'], + 'perversion': ['perversion', 'preversion'], + 'pervious': ['pervious', 'previous', 'viperous'], + 'perviously': ['perviously', 'previously', 'viperously'], + 'perviousness': ['perviousness', 'previousness', 'viperousness'], + 'pesa': ['apse', 'pesa', 'spae'], + 'pesach': ['cephas', 'pesach'], + 'pesah': ['heaps', 'pesah', 'phase', 'shape'], + 'peseta': ['asteep', 'peseta'], + 'peso': ['epos', 'peso', 'pose', 'sope'], + 'pess': ['pess', 'seps'], + 'pessoner': ['pessoner', 'response'], + 'pest': ['pest', 'sept', 'spet', 'step'], + 'peste': ['peste', 'steep'], + 'pester': ['pester', 'preset', 'restep', 'streep'], + 'pesthole': ['heelpost', 'pesthole'], + 'pesticidal': ['pesticidal', 'septicidal'], + 'pestiferous': ['pestiferous', 'septiferous'], + 'pestle': ['pestle', 'spleet'], + 'petal': ['leapt', + 'palet', + 'patel', + 'pelta', + 'petal', + 'plate', + 'pleat', + 'tepal'], + 'petalia': ['palaite', 'petalia', 'pileata'], + 'petaline': ['petaline', 'tapeline'], + 'petalism': ['petalism', 'septimal'], + 'petalless': ['petalless', 'plateless', 'pleatless'], + 'petallike': ['petallike', 'platelike'], + 'petaloid': ['opdalite', 'petaloid'], + 'petalon': ['lepanto', 'nepotal', 'petalon', 'polenta'], + 'petard': ['depart', 'parted', 'petard'], + 'petary': ['petary', 'pratey'], + 'petasos': ['pastose', 'petasos'], + 'petchary': ['patchery', 'petchary'], + 'petechial': ['epithecal', 'petechial', 'phacelite'], + 'petechiate': ['epithecate', 'petechiate'], + 'peteman': ['peteman', 'tempean'], + 'peter': ['erept', 'peter', 'petre'], + 'peterkin': ['perknite', 'peterkin'], + 'peterman': ['permeant', 'peterman'], + 'petiolary': ['epilatory', 'petiolary'], + 'petiole': ['petiole', 'pilotee'], + 'petioled': ['lepidote', 'petioled'], + 'petitionary': ['opiniatrety', 'petitionary'], + 'petitioner': ['petitioner', 'repetition'], + 'petiveria': ['aperitive', 'petiveria'], + 'petling': ['pelting', 'petling'], + 'peto': ['peto', 'poet', 'pote', 'tope'], + 'petr': ['pert', 'petr', 'terp'], + 'petre': ['erept', 'peter', 'petre'], + 'petrea': ['petrea', 'repeat', 'retape'], + 'petrean': ['patener', 'pearten', 'petrean', 'terpane'], + 'petrel': ['pelter', 'petrel'], + 'petricola': ['carpolite', 'petricola'], + 'petrie': ['perite', 'petrie', 'pieter'], + 'petrine': ['petrine', 'terpine'], + 'petrochemical': ['cephalometric', 'petrochemical'], + 'petrogale': ['petrogale', 'petrolage', 'prolegate'], + 'petrographer': ['petrographer', 'pterographer'], + 'petrographic': ['petrographic', 'pterographic'], + 'petrographical': ['petrographical', 'pterographical'], + 'petrographically': ['petrographically', 'pterylographical'], + 'petrography': ['petrography', 'pterography', 'typographer'], + 'petrol': ['petrol', 'replot'], + 'petrolage': ['petrogale', 'petrolage', 'prolegate'], + 'petrolean': ['petrolean', 'rantepole'], + 'petrolic': ['petrolic', 'plerotic'], + 'petrologically': ['petrologically', 'pterylological'], + 'petrosa': ['esparto', 'petrosa', 'seaport'], + 'petrosal': ['petrosal', 'polestar'], + 'petrosquamosal': ['petrosquamosal', 'squamopetrosal'], + 'petrous': ['petrous', 'posture', 'proetus', 'proteus', 'septuor', 'spouter'], + 'petticoated': ['depetticoat', 'petticoated'], + 'petulant': ['patulent', 'petulant'], + 'petune': ['neetup', 'petune'], + 'peul': ['lupe', 'pelu', 'peul', 'pule'], + 'pewy': ['pewy', 'wype'], + 'peyote': ['peyote', 'poteye'], + 'peyotl': ['peyotl', 'poetly'], + 'phacelia': ['acephali', 'phacelia'], + 'phacelite': ['epithecal', 'petechial', 'phacelite'], + 'phacoid': ['dapicho', 'phacoid'], + 'phacolite': ['hopcalite', 'phacolite'], + 'phacolysis': ['pachylosis', 'phacolysis'], + 'phacometer': ['pachometer', 'phacometer'], + 'phaethonic': ['phaethonic', 'theophanic'], + 'phaeton': ['phaeton', 'phonate'], + 'phagocytism': ['mycophagist', 'phagocytism'], + 'phalaecian': ['acephalina', 'phalaecian'], + 'phalangium': ['gnaphalium', 'phalangium'], + 'phalera': ['phalera', 'raphael'], + 'phanariot': ['parathion', 'phanariot'], + 'phanerogam': ['anemograph', 'phanerogam'], + 'phanerogamic': ['anemographic', 'phanerogamic'], + 'phanerogamy': ['anemography', 'phanerogamy'], + 'phanic': ['apinch', 'chapin', 'phanic'], + 'phano': ['phano', 'pohna'], + 'pharaonic': ['anaphoric', 'pharaonic'], + 'pharaonical': ['anaphorical', 'pharaonical'], + 'phare': ['hepar', 'phare', 'raphe'], + 'pharian': ['pharian', 'piranha'], + 'pharisaic': ['chirapsia', 'pharisaic'], + 'pharisean': ['pharisean', 'seraphina'], + 'pharisee': ['hesperia', 'pharisee'], + 'phariseeism': ['hemiparesis', 'phariseeism'], + 'pharmacolite': ['metaphorical', 'pharmacolite'], + 'pharyngolaryngeal': ['laryngopharyngeal', 'pharyngolaryngeal'], + 'pharyngolaryngitis': ['laryngopharyngitis', 'pharyngolaryngitis'], + 'pharyngorhinitis': ['pharyngorhinitis', 'rhinopharyngitis'], + 'phase': ['heaps', 'pesah', 'phase', 'shape'], + 'phaseless': ['phaseless', 'shapeless'], + 'phaseolin': ['esiphonal', 'phaseolin'], + 'phasis': ['aspish', 'phasis'], + 'phasm': ['pashm', 'phasm'], + 'phasmid': ['dampish', 'madship', 'phasmid'], + 'phasmoid': ['phasmoid', 'shopmaid'], + 'pheal': ['aleph', 'pheal'], + 'pheasant': ['pheasant', 'stephana'], + 'phecda': ['chaped', 'phecda'], + 'phemie': ['imphee', 'phemie'], + 'phenacite': ['phenacite', 'phenicate'], + 'phenate': ['haptene', 'heptane', 'phenate'], + 'phenetole': ['phenetole', 'telephone'], + 'phenic': ['phenic', 'pinche'], + 'phenicate': ['phenacite', 'phenicate'], + 'phenolic': ['phenolic', 'pinochle'], + 'phenological': ['nephological', 'phenological'], + 'phenologist': ['nephologist', 'phenologist'], + 'phenology': ['nephology', 'phenology'], + 'phenosal': ['alphonse', 'phenosal'], + 'pheon': ['pheon', 'phone'], + 'phi': ['hip', 'phi'], + 'phialide': ['hepialid', 'phialide'], + 'philobotanist': ['botanophilist', 'philobotanist'], + 'philocynic': ['cynophilic', 'philocynic'], + 'philohela': ['halophile', 'philohela'], + 'philoneism': ['neophilism', 'philoneism'], + 'philopoet': ['philopoet', 'photopile'], + 'philotheist': ['philotheist', 'theophilist'], + 'philotherian': ['lithonephria', 'philotherian'], + 'philozoic': ['philozoic', 'zoophilic'], + 'philozoist': ['philozoist', 'zoophilist'], + 'philter': ['philter', 'thripel'], + 'phineas': ['inphase', 'phineas'], + 'phiroze': ['orphize', 'phiroze'], + 'phit': ['phit', 'pith'], + 'phlebometritis': ['metrophlebitis', 'phlebometritis'], + 'phleum': ['phleum', 'uphelm'], + 'phloretic': ['phloretic', 'plethoric'], + 'pho': ['hop', 'pho', 'poh'], + 'phobism': ['mobship', 'phobism'], + 'phoca': ['chopa', 'phoca', 'poach'], + 'phocaean': ['phocaean', 'phocaena'], + 'phocaena': ['phocaean', 'phocaena'], + 'phocaenine': ['phocaenine', 'phoenicean'], + 'phocean': ['copehan', 'panoche', 'phocean'], + 'phocian': ['aphonic', 'phocian'], + 'phocine': ['chopine', 'phocine'], + 'phoenicean': ['phocaenine', 'phoenicean'], + 'pholad': ['adolph', 'pholad'], + 'pholas': ['alphos', 'pholas'], + 'pholcidae': ['cephaloid', 'pholcidae'], + 'pholcoid': ['chilopod', 'pholcoid'], + 'phonate': ['phaeton', 'phonate'], + 'phone': ['pheon', 'phone'], + 'phonelescope': ['nepheloscope', 'phonelescope'], + 'phonetical': ['pachnolite', 'phonetical'], + 'phonetics': ['phonetics', 'sphenotic'], + 'phoniatry': ['phoniatry', 'thiopyran'], + 'phonic': ['chopin', 'phonic'], + 'phonogram': ['monograph', 'nomograph', 'phonogram'], + 'phonogramic': ['gramophonic', 'monographic', 'nomographic', 'phonogramic'], + 'phonogramically': ['gramophonically', + 'monographically', + 'nomographically', + 'phonogramically'], + 'phonographic': ['graphophonic', 'phonographic'], + 'phonolite': ['lithopone', 'phonolite'], + 'phonometer': ['nephrotome', 'phonometer'], + 'phonometry': ['nephrotomy', 'phonometry'], + 'phonophote': ['phonophote', 'photophone'], + 'phonotyper': ['hypopteron', 'phonotyper'], + 'phoo': ['hoop', 'phoo', 'pooh'], + 'phorone': ['orpheon', 'phorone'], + 'phoronidea': ['phoronidea', 'radiophone'], + 'phos': ['phos', 'posh', 'shop', 'soph'], + 'phosphatide': ['diphosphate', 'phosphatide'], + 'phosphoglycerate': ['glycerophosphate', 'phosphoglycerate'], + 'phossy': ['hyssop', 'phossy', 'sposhy'], + 'phot': ['phot', 'toph'], + 'photechy': ['hypothec', 'photechy'], + 'photochromography': ['chromophotography', 'photochromography'], + 'photochromolithograph': ['chromophotolithograph', 'photochromolithograph'], + 'photochronograph': ['chronophotograph', 'photochronograph'], + 'photochronographic': ['chronophotographic', 'photochronographic'], + 'photochronography': ['chronophotography', 'photochronography'], + 'photogram': ['motograph', 'photogram'], + 'photographer': ['photographer', 'rephotograph'], + 'photoheliography': ['heliophotography', 'photoheliography'], + 'photolithography': ['lithophotography', 'photolithography'], + 'photomacrograph': ['macrophotograph', 'photomacrograph'], + 'photometer': ['photometer', 'prototheme'], + 'photomicrograph': ['microphotograph', 'photomicrograph'], + 'photomicrographic': ['microphotographic', 'photomicrographic'], + 'photomicrography': ['microphotography', 'photomicrography'], + 'photomicroscope': ['microphotoscope', 'photomicroscope'], + 'photophone': ['phonophote', 'photophone'], + 'photopile': ['philopoet', 'photopile'], + 'photostereograph': ['photostereograph', 'stereophotograph'], + 'phototelegraph': ['phototelegraph', 'telephotograph'], + 'phototelegraphic': ['phototelegraphic', 'telephotographic'], + 'phototelegraphy': ['phototelegraphy', 'telephotography'], + 'phototypography': ['phototypography', 'phytotopography'], + 'phrase': ['phrase', 'seraph', 'shaper', 'sherpa'], + 'phraser': ['phraser', 'sharper'], + 'phrasing': ['harpings', 'phrasing'], + 'phrasy': ['phrasy', 'sharpy'], + 'phratriac': ['patriarch', 'phratriac'], + 'phreatic': ['chapiter', 'phreatic'], + 'phrenesia': ['hesperian', 'phrenesia', 'seraphine'], + 'phrenic': ['nephric', 'phrenic', 'pincher'], + 'phrenicopericardiac': ['pericardiacophrenic', 'phrenicopericardiac'], + 'phrenics': ['phrenics', 'pinscher'], + 'phrenitic': ['nephritic', 'phrenitic', 'prehnitic'], + 'phrenitis': ['inspreith', 'nephritis', 'phrenitis'], + 'phrenocardiac': ['nephrocardiac', 'phrenocardiac'], + 'phrenocolic': ['nephrocolic', 'phrenocolic'], + 'phrenocostal': ['phrenocostal', 'plastochrone'], + 'phrenogastric': ['gastrophrenic', 'nephrogastric', 'phrenogastric'], + 'phrenohepatic': ['hepatonephric', 'phrenohepatic'], + 'phrenologist': ['nephrologist', 'phrenologist'], + 'phrenology': ['nephrology', 'phrenology'], + 'phrenopathic': ['nephropathic', 'phrenopathic'], + 'phrenopathy': ['nephropathy', 'phrenopathy'], + 'phrenosplenic': ['phrenosplenic', 'splenonephric', 'splenophrenic'], + 'phronesis': ['nephrosis', 'phronesis'], + 'phronimidae': ['diamorphine', 'phronimidae'], + 'phthalazine': ['naphthalize', 'phthalazine'], + 'phu': ['hup', 'phu'], + 'phycitol': ['cytophil', 'phycitol'], + 'phycocyanin': ['cyanophycin', 'phycocyanin'], + 'phyla': ['haply', 'phyla'], + 'phyletic': ['heptylic', 'phyletic'], + 'phylloceras': ['hyposcleral', 'phylloceras'], + 'phylloclad': ['cladophyll', 'phylloclad'], + 'phyllodial': ['phyllodial', 'phylloidal'], + 'phylloerythrin': ['erythrophyllin', 'phylloerythrin'], + 'phylloidal': ['phyllodial', 'phylloidal'], + 'phyllopodous': ['phyllopodous', 'podophyllous'], + 'phyma': ['phyma', 'yamph'], + 'phymatodes': ['desmopathy', 'phymatodes'], + 'phymosia': ['hyposmia', 'phymosia'], + 'physa': ['physa', 'shapy'], + 'physalite': ['physalite', 'styphelia'], + 'physic': ['physic', 'scyphi'], + 'physicochemical': ['chemicophysical', 'physicochemical'], + 'physicomedical': ['medicophysical', 'physicomedical'], + 'physiocrat': ['physiocrat', 'psychotria'], + 'physiologicoanatomic': ['anatomicophysiologic', 'physiologicoanatomic'], + 'physiopsychological': ['physiopsychological', 'psychophysiological'], + 'physiopsychology': ['physiopsychology', 'psychophysiology'], + 'phytic': ['phytic', 'pitchy', 'pythic', 'typhic'], + 'phytogenesis': ['phytogenesis', 'pythogenesis'], + 'phytogenetic': ['phytogenetic', 'pythogenetic'], + 'phytogenic': ['phytogenic', 'pythogenic', 'typhogenic'], + 'phytogenous': ['phytogenous', 'pythogenous'], + 'phytoid': ['phytoid', 'typhoid'], + 'phytologist': ['hypoglottis', 'phytologist'], + 'phytometer': ['phytometer', 'thermotype'], + 'phytometric': ['phytometric', 'thermotypic'], + 'phytometry': ['phytometry', 'thermotypy'], + 'phytomonas': ['phytomonas', 'somnopathy'], + 'phyton': ['phyton', 'python'], + 'phytonic': ['hypnotic', 'phytonic', 'pythonic', 'typhonic'], + 'phytosis': ['phytosis', 'typhosis'], + 'phytotopography': ['phototypography', 'phytotopography'], + 'phytozoa': ['phytozoa', 'zoopathy', 'zoophyta'], + 'piacle': ['epical', 'piacle', 'plaice'], + 'piacular': ['apicular', 'piacular'], + 'pial': ['lipa', 'pail', 'pali', 'pial'], + 'pialyn': ['alypin', 'pialyn'], + 'pian': ['nipa', 'pain', 'pani', 'pian', 'pina'], + 'pianiste': ['pianiste', 'pisanite'], + 'piannet': ['piannet', 'pinnate'], + 'pianola': ['opalina', 'pianola'], + 'piaroa': ['aporia', 'piaroa'], + 'piast': ['piast', 'stipa', 'tapis'], + 'piaster': ['piaster', 'piastre', 'raspite', 'spirate', 'traipse'], + 'piastre': ['piaster', 'piastre', 'raspite', 'spirate', 'traipse'], + 'picador': ['parodic', 'picador'], + 'picae': ['picae', 'picea'], + 'pical': ['pical', 'plica'], + 'picard': ['caprid', 'carpid', 'picard'], + 'picarel': ['caliper', 'picarel', 'replica'], + 'picary': ['cypria', 'picary', 'piracy'], + 'pice': ['epic', 'pice'], + 'picea': ['picae', 'picea'], + 'picene': ['picene', 'piecen'], + 'picker': ['picker', 'repick'], + 'pickle': ['pelick', 'pickle'], + 'pickler': ['pickler', 'prickle'], + 'pickover': ['overpick', 'pickover'], + 'picktooth': ['picktooth', 'toothpick'], + 'pico': ['cipo', 'pico'], + 'picot': ['optic', 'picot', 'topic'], + 'picotah': ['aphotic', 'picotah'], + 'picra': ['capri', 'picra', 'rapic'], + 'picrate': ['paretic', 'patrice', 'picrate'], + 'pictography': ['graphotypic', 'pictography', 'typographic'], + 'pictorialness': ['personalistic', 'pictorialness'], + 'picture': ['cuprite', 'picture'], + 'picudilla': ['picudilla', 'pulicidal'], + 'pidan': ['pidan', 'pinda'], + 'piebald': ['bipedal', 'piebald'], + 'piebaldness': ['dispensable', 'piebaldness'], + 'piecen': ['picene', 'piecen'], + 'piecer': ['piecer', 'pierce', 'recipe'], + 'piecework': ['piecework', 'workpiece'], + 'piecrust': ['crepitus', 'piecrust'], + 'piedness': ['dispense', 'piedness'], + 'piegan': ['genipa', 'piegan'], + 'pieless': ['pelisse', 'pieless'], + 'pielet': ['leepit', 'pelite', 'pielet'], + 'piemag': ['magpie', 'piemag'], + 'pieman': ['impane', 'pieman'], + 'pien': ['pien', 'pine'], + 'piend': ['piend', 'pined'], + 'pier': ['peri', 'pier', 'ripe'], + 'pierce': ['piecer', 'pierce', 'recipe'], + 'piercent': ['piercent', 'prentice'], + 'piercer': ['piercer', 'reprice'], + 'pierlike': ['pierlike', 'ripelike'], + 'piet': ['piet', 'tipe'], + 'pietas': ['patesi', 'pietas'], + 'pieter': ['perite', 'petrie', 'pieter'], + 'pig': ['gip', 'pig'], + 'pigeoner': ['perigone', 'pigeoner'], + 'pigeontail': ['pigeontail', 'plagionite'], + 'pigly': ['gilpy', 'pigly'], + 'pignon': ['ningpo', 'pignon'], + 'pignorate': ['operating', 'pignorate'], + 'pigskin': ['pigskin', 'spiking'], + 'pigsney': ['gypsine', 'pigsney'], + 'pik': ['kip', 'pik'], + 'pika': ['paik', 'pika'], + 'pike': ['kepi', 'kipe', 'pike'], + 'pikel': ['pikel', 'pikle'], + 'piker': ['krepi', 'piker'], + 'pikle': ['pikel', 'pikle'], + 'pilage': ['paigle', 'pilage'], + 'pilar': ['april', 'pilar', 'ripal'], + 'pilaster': ['epistlar', 'pilaster', 'plaister', 'priestal'], + 'pilastered': ['pedestrial', 'pilastered'], + 'pilastric': ['pilastric', 'triplasic'], + 'pilate': ['aplite', 'pilate'], + 'pileata': ['palaite', 'petalia', 'pileata'], + 'pileate': ['epilate', 'epitela', 'pileate'], + 'pileated': ['depilate', 'leptidae', 'pileated'], + 'piled': ['piled', 'plied'], + 'piler': ['peril', 'piler', 'plier'], + 'piles': ['piles', 'plies', 'slipe', 'spiel', 'spile'], + 'pileus': ['epulis', 'pileus'], + 'pili': ['ipil', 'pili'], + 'pilin': ['lipin', 'pilin'], + 'pillarist': ['pillarist', 'pistillar'], + 'pillet': ['liplet', 'pillet'], + 'pilm': ['limp', 'pilm', 'plim'], + 'pilmy': ['imply', 'limpy', 'pilmy'], + 'pilosis': ['liposis', 'pilosis'], + 'pilotee': ['petiole', 'pilotee'], + 'pilpai': ['lippia', 'pilpai'], + 'pilus': ['lupis', 'pilus'], + 'pim': ['imp', 'pim'], + 'pimelate': ['ampelite', 'pimelate'], + 'pimento': ['emption', 'pimento'], + 'pimenton': ['imponent', 'pimenton'], + 'pimola': ['lipoma', 'pimola', 'ploima'], + 'pimpish': ['impship', 'pimpish'], + 'pimplous': ['pimplous', 'pompilus', 'populism'], + 'pin': ['nip', 'pin'], + 'pina': ['nipa', 'pain', 'pani', 'pian', 'pina'], + 'pinaces': ['pinaces', 'pincase'], + 'pinachrome': ['epharmonic', 'pinachrome'], + 'pinacle': ['calepin', 'capelin', 'panicle', 'pelican', 'pinacle'], + 'pinacoid': ['diapnoic', 'pinacoid'], + 'pinal': ['lipan', 'pinal', 'plain'], + 'pinales': ['espinal', 'pinales', 'spaniel'], + 'pinaster': ['pinaster', 'pristane'], + 'pincase': ['pinaces', 'pincase'], + 'pincer': ['pincer', 'prince'], + 'pincerlike': ['pincerlike', 'princelike'], + 'pincers': ['encrisp', 'pincers'], + 'pinchbelly': ['bellypinch', 'pinchbelly'], + 'pinche': ['phenic', 'pinche'], + 'pincher': ['nephric', 'phrenic', 'pincher'], + 'pincushion': ['nuncioship', 'pincushion'], + 'pinda': ['pidan', 'pinda'], + 'pindari': ['pindari', 'pridian'], + 'pine': ['pien', 'pine'], + 'pineal': ['alpine', 'nepali', 'penial', 'pineal'], + 'pined': ['piend', 'pined'], + 'piner': ['piner', 'prine', 'repin', 'ripen'], + 'pinery': ['pernyi', 'pinery'], + 'pingler': ['pingler', 'pringle'], + 'pinhold': ['dolphin', 'pinhold'], + 'pinhole': ['lophine', 'pinhole'], + 'pinite': ['pinite', 'tiepin'], + 'pinker': ['perkin', 'pinker'], + 'pinking': ['kingpin', 'pinking'], + 'pinkish': ['kinship', 'pinkish'], + 'pinlock': ['lockpin', 'pinlock'], + 'pinnacle': ['pannicle', 'pinnacle'], + 'pinnae': ['nanpie', 'pennia', 'pinnae'], + 'pinnate': ['piannet', 'pinnate'], + 'pinnatopectinate': ['pectinatopinnate', 'pinnatopectinate'], + 'pinnet': ['pinnet', 'tenpin'], + 'pinnitarsal': ['intraspinal', 'pinnitarsal'], + 'pinnotere': ['interpone', 'peritenon', 'pinnotere', 'preintone'], + 'pinnothere': ['interphone', 'pinnothere'], + 'pinnula': ['pinnula', 'unplain'], + 'pinnulated': ['pennatulid', 'pinnulated'], + 'pinochle': ['phenolic', 'pinochle'], + 'pinole': ['pinole', 'pleion'], + 'pinolia': ['apiolin', 'pinolia'], + 'pinscher': ['phrenics', 'pinscher'], + 'pinta': ['inapt', 'paint', 'pinta'], + 'pintail': ['pintail', 'tailpin'], + 'pintano': ['opinant', 'pintano'], + 'pinte': ['inept', 'pinte'], + 'pinto': ['pinto', 'point'], + 'pintura': ['pintura', 'puritan', 'uptrain'], + 'pinulus': ['lupinus', 'pinulus'], + 'piny': ['piny', 'pyin'], + 'pinyl': ['pinyl', 'pliny'], + 'pioneer': ['pereion', 'pioneer'], + 'pioted': ['pioted', 'podite'], + 'pipa': ['paip', 'pipa'], + 'pipal': ['palpi', 'pipal'], + 'piperno': ['piperno', 'propine'], + 'pique': ['equip', 'pique'], + 'pir': ['pir', 'rip'], + 'piracy': ['cypria', 'picary', 'piracy'], + 'piranha': ['pharian', 'piranha'], + 'piratess': ['piratess', 'serapist', 'tarsipes'], + 'piratically': ['capillarity', 'piratically'], + 'piraty': ['parity', 'piraty'], + 'pirene': ['neiper', 'perine', 'pirene', 'repine'], + 'pirssonite': ['pirssonite', 'trispinose'], + 'pisaca': ['capias', 'pisaca'], + 'pisan': ['pisan', 'sapin', 'spina'], + 'pisanite': ['pianiste', 'pisanite'], + 'piscation': ['panoistic', 'piscation'], + 'piscatorial': ['paracolitis', 'piscatorial'], + 'piscian': ['panisic', 'piscian', 'piscina', 'sinapic'], + 'pisciferous': ['pisciferous', 'spiciferous'], + 'pisciform': ['pisciform', 'spiciform'], + 'piscina': ['panisic', 'piscian', 'piscina', 'sinapic'], + 'pisco': ['copis', 'pisco'], + 'pise': ['pise', 'sipe'], + 'pish': ['pish', 'ship'], + 'pisk': ['pisk', 'skip'], + 'pisky': ['pisky', 'spiky'], + 'pismire': ['pismire', 'primsie'], + 'pisonia': ['pisonia', 'sinopia'], + 'pist': ['pist', 'spit'], + 'pistache': ['paschite', 'pastiche', 'pistache', 'scaphite'], + 'pistacite': ['epistatic', 'pistacite'], + 'pistareen': ['pistareen', 'sparteine'], + 'pistillar': ['pillarist', 'pistillar'], + 'pistillary': ['pistillary', 'spiritally'], + 'pistle': ['pistle', 'stipel'], + 'pistol': ['pistol', 'postil', 'spoilt'], + 'pistoleer': ['epistoler', 'peristole', 'perseitol', 'pistoleer'], + 'pit': ['pit', 'tip'], + 'pita': ['atip', 'pita'], + 'pitapat': ['apitpat', 'pitapat'], + 'pitarah': ['pitarah', 'taphria'], + 'pitcher': ['pitcher', 'repitch'], + 'pitchout': ['outpitch', 'pitchout'], + 'pitchy': ['phytic', 'pitchy', 'pythic', 'typhic'], + 'pith': ['phit', 'pith'], + 'pithecan': ['haptenic', 'pantheic', 'pithecan'], + 'pithole': ['hoplite', 'pithole'], + 'pithsome': ['mephisto', 'pithsome'], + 'pitless': ['pitless', 'tipless'], + 'pitman': ['pitman', 'tampin', 'tipman'], + 'pittancer': ['crepitant', 'pittancer'], + 'pittoid': ['pittoid', 'poditti'], + 'pityroid': ['pityroid', 'pyritoid'], + 'pivoter': ['overtip', 'pivoter'], + 'placate': ['epactal', 'placate'], + 'placation': ['pactional', 'pactolian', 'placation'], + 'place': ['capel', 'place'], + 'placebo': ['copable', 'placebo'], + 'placentalia': ['analeptical', 'placentalia'], + 'placentoma': ['complanate', 'placentoma'], + 'placer': ['carpel', 'parcel', 'placer'], + 'placode': ['lacepod', 'pedocal', 'placode'], + 'placoid': ['placoid', 'podalic', 'podical'], + 'placus': ['cuspal', 'placus'], + 'plagionite': ['pigeontail', 'plagionite'], + 'plague': ['plague', 'upgale'], + 'plaguer': ['earplug', 'graupel', 'plaguer'], + 'plaice': ['epical', 'piacle', 'plaice'], + 'plaid': ['alpid', 'plaid'], + 'plaidy': ['adipyl', 'plaidy'], + 'plain': ['lipan', 'pinal', 'plain'], + 'plainer': ['pearlin', 'plainer', 'praline'], + 'plaint': ['plaint', 'pliant'], + 'plaister': ['epistlar', 'pilaster', 'plaister', 'priestal'], + 'plaited': ['plaited', 'taliped'], + 'plaiter': ['partile', 'plaiter', 'replait'], + 'planate': ['planate', 'planeta', 'plantae', 'platane'], + 'planation': ['planation', 'platonian'], + 'plancheite': ['elephantic', 'plancheite'], + 'plane': ['alpen', 'nepal', 'panel', 'penal', 'plane'], + 'planer': ['parnel', 'planer', 'replan'], + 'planera': ['planera', 'preanal'], + 'planet': ['pantle', 'planet', 'platen'], + 'planeta': ['planate', 'planeta', 'plantae', 'platane'], + 'planetabler': ['planetabler', 'replantable'], + 'planetaria': ['parentalia', 'planetaria'], + 'planetoid': ['pelidnota', 'planetoid'], + 'planity': ['inaptly', 'planity', 'ptyalin'], + 'planker': ['planker', 'prankle'], + 'planta': ['planta', 'platan'], + 'plantae': ['planate', 'planeta', 'plantae', 'platane'], + 'planter': ['pantler', 'planter', 'replant'], + 'plap': ['lapp', 'palp', 'plap'], + 'plasher': ['plasher', 'spheral'], + 'plasm': ['plasm', 'psalm', 'slamp'], + 'plasma': ['lampas', 'plasma'], + 'plasmation': ['aminoplast', 'plasmation'], + 'plasmic': ['plasmic', 'psalmic'], + 'plasmode': ['malposed', 'plasmode'], + 'plasmodial': ['plasmodial', 'psalmodial'], + 'plasmodic': ['plasmodic', 'psalmodic'], + 'plasson': ['plasson', 'sponsal'], + 'plastein': ['panelist', 'pantelis', 'penalist', 'plastein'], + 'plaster': ['palster', 'persalt', 'plaster', 'psalter', 'spartle', 'stapler'], + 'plasterer': ['plasterer', 'replaster'], + 'plastery': ['plastery', 'psaltery'], + 'plasticine': ['cisplatine', 'plasticine'], + 'plastidome': ['plastidome', 'postmedial'], + 'plastinoid': ['palinodist', 'plastinoid'], + 'plastochrone': ['phrenocostal', 'plastochrone'], + 'plat': ['palt', 'plat'], + 'plataean': ['panatela', 'plataean'], + 'platan': ['planta', 'platan'], + 'platane': ['planate', 'planeta', 'plantae', 'platane'], + 'plate': ['leapt', + 'palet', + 'patel', + 'pelta', + 'petal', + 'plate', + 'pleat', + 'tepal'], + 'platea': ['aletap', 'palate', 'platea'], + 'plateless': ['petalless', 'plateless', 'pleatless'], + 'platelet': ['pallette', 'platelet'], + 'platelike': ['petallike', 'platelike'], + 'platen': ['pantle', 'planet', 'platen'], + 'plater': ['palter', 'plater'], + 'platerer': ['palterer', 'platerer'], + 'platery': ['apertly', 'peartly', 'platery', 'pteryla', 'taperly'], + 'platine': ['pantile', 'pentail', 'platine', 'talpine'], + 'platinochloric': ['chloroplatinic', 'platinochloric'], + 'platinous': ['platinous', 'pulsation'], + 'platode': ['platode', 'tadpole'], + 'platoid': ['platoid', 'talpoid'], + 'platonian': ['planation', 'platonian'], + 'platter': ['partlet', 'platter', 'prattle'], + 'platy': ['aptly', 'patly', 'platy', 'typal'], + 'platynite': ['patiently', 'platynite'], + 'platysternal': ['platysternal', 'transeptally'], + 'plaud': ['dupla', 'plaud'], + 'plaustral': ['palustral', 'plaustral'], + 'play': ['paly', 'play', 'pyal', 'pyla'], + 'playa': ['palay', 'playa'], + 'player': ['parley', 'pearly', 'player', 'replay'], + 'playgoer': ['playgoer', 'pylagore'], + 'playroom': ['myopolar', 'playroom'], + 'plea': ['leap', 'lepa', 'pale', 'peal', 'plea'], + 'pleach': ['chapel', 'lepcha', 'pleach'], + 'plead': ['padle', 'paled', 'pedal', 'plead'], + 'pleader': ['pearled', 'pedaler', 'pleader', 'replead'], + 'please': ['asleep', 'elapse', 'please'], + 'pleaser': ['pleaser', 'preseal', 'relapse'], + 'pleasure': ['pleasure', 'serpulae'], + 'pleasurer': ['pleasurer', 'reperusal'], + 'pleasurous': ['asperulous', 'pleasurous'], + 'pleat': ['leapt', + 'palet', + 'patel', + 'pelta', + 'petal', + 'plate', + 'pleat', + 'tepal'], + 'pleater': ['pearlet', 'pleater', 'prelate', 'ptereal', 'replate', 'repleat'], + 'pleatless': ['petalless', 'plateless', 'pleatless'], + 'plectre': ['plectre', 'prelect'], + 'pleion': ['pinole', 'pleion'], + 'pleionian': ['opalinine', 'pleionian'], + 'plenilunar': ['plenilunar', 'plurennial'], + 'plenty': ['pentyl', 'plenty'], + 'pleomorph': ['pleomorph', 'prophloem'], + 'pleon': ['pelon', 'pleon'], + 'pleonal': ['pallone', 'pleonal'], + 'pleonasm': ['neoplasm', 'pleonasm', 'polesman', 'splenoma'], + 'pleonast': ['lapstone', 'pleonast'], + 'pleonastic': ['neoplastic', 'pleonastic'], + 'pleroma': ['leproma', 'palermo', 'pleroma', 'polearm'], + 'plerosis': ['leprosis', 'plerosis'], + 'plerotic': ['petrolic', 'plerotic'], + 'plessor': ['plessor', 'preloss'], + 'plethora': ['plethora', 'traphole'], + 'plethoric': ['phloretic', 'plethoric'], + 'pleura': ['epural', 'perula', 'pleura'], + 'pleuric': ['luperci', 'pleuric'], + 'pleuropericardial': ['pericardiopleural', 'pleuropericardial'], + 'pleurotoma': ['pleurotoma', 'tropaeolum'], + 'pleurovisceral': ['pleurovisceral', 'visceropleural'], + 'pliancy': ['pliancy', 'pycnial'], + 'pliant': ['plaint', 'pliant'], + 'plica': ['pical', 'plica'], + 'plicately': ['callitype', 'plicately'], + 'plicater': ['particle', 'plicater', 'prelatic'], + 'plicator': ['plicator', 'tropical'], + 'plied': ['piled', 'plied'], + 'plier': ['peril', 'piler', 'plier'], + 'pliers': ['lisper', 'pliers', 'sirple', 'spiler'], + 'plies': ['piles', 'plies', 'slipe', 'spiel', 'spile'], + 'plighter': ['plighter', 'replight'], + 'plim': ['limp', 'pilm', 'plim'], + 'pliny': ['pinyl', 'pliny'], + 'pliosaur': ['liparous', 'pliosaur'], + 'pliosauridae': ['lepidosauria', 'pliosauridae'], + 'ploceidae': ['adipocele', 'cepolidae', 'ploceidae'], + 'ploceus': ['culpose', 'ploceus', 'upclose'], + 'plodder': ['plodder', 'proddle'], + 'ploima': ['lipoma', 'pimola', 'ploima'], + 'ploration': ['ploration', 'portional', 'prolation'], + 'plot': ['plot', 'polt'], + 'plotful': ['plotful', 'topfull'], + 'plotted': ['plotted', 'pottled'], + 'plotter': ['plotter', 'portlet'], + 'plousiocracy': ['plousiocracy', 'procaciously'], + 'plout': ['plout', 'pluto', 'poult'], + 'plouter': ['plouter', 'poulter'], + 'plovery': ['overply', 'plovery'], + 'plower': ['plower', 'replow'], + 'ploy': ['ploy', 'poly'], + 'plucker': ['plucker', 'puckrel'], + 'plug': ['gulp', 'plug'], + 'plum': ['lump', 'plum'], + 'pluma': ['ampul', 'pluma'], + 'plumbagine': ['impugnable', 'plumbagine'], + 'plumbic': ['plumbic', 'upclimb'], + 'plumed': ['dumple', 'plumed'], + 'plumeous': ['eumolpus', 'plumeous'], + 'plumer': ['lumper', 'plumer', 'replum', 'rumple'], + 'plumet': ['lumpet', 'plumet'], + 'pluminess': ['lumpiness', 'pluminess'], + 'plumy': ['lumpy', 'plumy'], + 'plunderer': ['plunderer', 'replunder'], + 'plunge': ['plunge', 'pungle'], + 'plup': ['plup', 'pulp'], + 'plurennial': ['plenilunar', 'plurennial'], + 'pluriseptate': ['perpetualist', 'pluriseptate'], + 'plutean': ['plutean', 'unpetal', 'unpleat'], + 'pluteus': ['pluteus', 'pustule'], + 'pluto': ['plout', 'pluto', 'poult'], + 'pluvine': ['pluvine', 'vulpine'], + 'plyer': ['plyer', 'reply'], + 'pneumatophony': ['pneumatophony', 'pneumonopathy'], + 'pneumohemothorax': ['hemopneumothorax', 'pneumohemothorax'], + 'pneumohydropericardium': ['hydropneumopericardium', 'pneumohydropericardium'], + 'pneumohydrothorax': ['hydropneumothorax', 'pneumohydrothorax'], + 'pneumonopathy': ['pneumatophony', 'pneumonopathy'], + 'pneumopyothorax': ['pneumopyothorax', 'pyopneumothorax'], + 'poach': ['chopa', 'phoca', 'poach'], + 'poachy': ['poachy', 'pochay'], + 'poales': ['aslope', 'poales'], + 'pob': ['bop', 'pob'], + 'pochay': ['poachy', 'pochay'], + 'poche': ['epoch', 'poche'], + 'pocketer': ['pocketer', 'repocket'], + 'poco': ['coop', 'poco'], + 'pocosin': ['opsonic', 'pocosin'], + 'poculation': ['copulation', 'poculation'], + 'pod': ['dop', 'pod'], + 'podalic': ['placoid', 'podalic', 'podical'], + 'podarthritis': ['podarthritis', 'traditorship'], + 'podial': ['podial', 'poliad'], + 'podical': ['placoid', 'podalic', 'podical'], + 'podiceps': ['podiceps', 'scopiped'], + 'podite': ['pioted', 'podite'], + 'poditti': ['pittoid', 'poditti'], + 'podler': ['podler', 'polder', 'replod'], + 'podley': ['deploy', 'podley'], + 'podobranchia': ['branchiopoda', 'podobranchia'], + 'podocephalous': ['cephalopodous', 'podocephalous'], + 'podophyllous': ['phyllopodous', 'podophyllous'], + 'podoscaph': ['podoscaph', 'scaphopod'], + 'podostomata': ['podostomata', 'stomatopoda'], + 'podostomatous': ['podostomatous', 'stomatopodous'], + 'podotheca': ['chaetopod', 'podotheca'], + 'podura': ['podura', 'uproad'], + 'poduran': ['pandour', 'poduran'], + 'poe': ['ope', 'poe'], + 'poem': ['mope', 'poem', 'pome'], + 'poemet': ['metope', 'poemet'], + 'poemlet': ['leptome', 'poemlet'], + 'poesy': ['poesy', 'posey', 'sepoy'], + 'poet': ['peto', 'poet', 'pote', 'tope'], + 'poetastrical': ['poetastrical', 'spectatorial'], + 'poetical': ['copalite', 'poetical'], + 'poeticism': ['impeticos', 'poeticism'], + 'poetics': ['poetics', 'septoic'], + 'poetly': ['peyotl', 'poetly'], + 'poetryless': ['poetryless', 'presystole'], + 'pogo': ['goop', 'pogo'], + 'poh': ['hop', 'pho', 'poh'], + 'poha': ['opah', 'paho', 'poha'], + 'pohna': ['phano', 'pohna'], + 'poiana': ['anopia', 'aponia', 'poiana'], + 'poietic': ['epiotic', 'poietic'], + 'poimenic': ['mincopie', 'poimenic'], + 'poinder': ['poinder', 'ponerid'], + 'point': ['pinto', 'point'], + 'pointel': ['pointel', 'pontile', 'topline'], + 'pointer': ['pointer', 'protein', 'pterion', 'repoint', 'tropine'], + 'pointlet': ['pentitol', 'pointlet'], + 'pointrel': ['pointrel', 'terpinol'], + 'poisonless': ['poisonless', 'solenopsis'], + 'poker': ['poker', 'proke'], + 'pol': ['lop', 'pol'], + 'polab': ['pablo', 'polab'], + 'polacre': ['capreol', 'polacre'], + 'polander': ['polander', 'ponderal', 'prenodal'], + 'polar': ['parol', 'polar', 'poral', 'proal'], + 'polarid': ['dipolar', 'polarid'], + 'polarimetry': ['polarimetry', 'premorality', 'temporarily'], + 'polaristic': ['polaristic', 'poristical', 'saprolitic'], + 'polarly': ['payroll', 'polarly'], + 'polder': ['podler', 'polder', 'replod'], + 'pole': ['lope', 'olpe', 'pole'], + 'polearm': ['leproma', 'palermo', 'pleroma', 'polearm'], + 'polecat': ['pacolet', 'polecat'], + 'polemic': ['compile', 'polemic'], + 'polemics': ['clipsome', 'polemics'], + 'polemist': ['milepost', 'polemist'], + 'polenta': ['lepanto', 'nepotal', 'petalon', 'polenta'], + 'poler': ['loper', 'poler'], + 'polesman': ['neoplasm', 'pleonasm', 'polesman', 'splenoma'], + 'polestar': ['petrosal', 'polestar'], + 'poliad': ['podial', 'poliad'], + 'polianite': ['epilation', 'polianite'], + 'policyholder': ['policyholder', 'polychloride'], + 'polio': ['polio', 'pooli'], + 'polis': ['polis', 'spoil'], + 'polished': ['depolish', 'polished'], + 'polisher': ['polisher', 'repolish'], + 'polistes': ['polistes', 'telopsis'], + 'politarch': ['carpolith', 'politarch', 'trophical'], + 'politicly': ['lipolytic', 'politicly'], + 'politics': ['colpitis', 'politics', 'psilotic'], + 'polk': ['klop', 'polk'], + 'pollenite': ['pellotine', 'pollenite'], + 'poller': ['poller', 'repoll'], + 'pollinate': ['pellation', 'pollinate'], + 'polo': ['loop', 'polo', 'pool'], + 'poloist': ['loopist', 'poloist', 'topsoil'], + 'polonia': ['apionol', 'polonia'], + 'polos': ['polos', 'sloop', 'spool'], + 'polt': ['plot', 'polt'], + 'poly': ['ploy', 'poly'], + 'polyacid': ['polyacid', 'polyadic'], + 'polyactinal': ['pactionally', 'polyactinal'], + 'polyadic': ['polyacid', 'polyadic'], + 'polyarchist': ['chiroplasty', 'polyarchist'], + 'polychloride': ['policyholder', 'polychloride'], + 'polychroism': ['polychroism', 'polyorchism'], + 'polycitral': ['polycitral', 'tropically'], + 'polycodium': ['lycopodium', 'polycodium'], + 'polycotyl': ['collotypy', 'polycotyl'], + 'polyeidic': ['polyeidic', 'polyideic'], + 'polyeidism': ['polyeidism', 'polyideism'], + 'polyester': ['polyester', 'proselyte'], + 'polygamodioecious': ['dioeciopolygamous', 'polygamodioecious'], + 'polyhalite': ['paleolithy', 'polyhalite', 'polythelia'], + 'polyideic': ['polyeidic', 'polyideic'], + 'polyideism': ['polyeidism', 'polyideism'], + 'polymastic': ['myoplastic', 'polymastic'], + 'polymasty': ['myoplasty', 'polymasty'], + 'polymere': ['employer', 'polymere'], + 'polymeric': ['micropyle', 'polymeric'], + 'polymetochia': ['homeotypical', 'polymetochia'], + 'polymnia': ['olympian', 'polymnia'], + 'polymyodi': ['polymyodi', 'polymyoid'], + 'polymyoid': ['polymyodi', 'polymyoid'], + 'polyorchism': ['polychroism', 'polyorchism'], + 'polyp': ['loppy', 'polyp'], + 'polyphemian': ['lymphopenia', 'polyphemian'], + 'polyphonist': ['polyphonist', 'psilophyton'], + 'polypite': ['lipotype', 'polypite'], + 'polyprene': ['polyprene', 'propylene'], + 'polypterus': ['polypterus', 'suppletory'], + 'polysomitic': ['myocolpitis', 'polysomitic'], + 'polyspore': ['polyspore', 'prosopyle'], + 'polythelia': ['paleolithy', 'polyhalite', 'polythelia'], + 'polythene': ['polythene', 'telephony'], + 'polyuric': ['croupily', 'polyuric'], + 'pom': ['mop', 'pom'], + 'pomade': ['apedom', 'pomade'], + 'pomane': ['mopane', 'pomane'], + 'pome': ['mope', 'poem', 'pome'], + 'pomeranian': ['pomeranian', 'praenomina'], + 'pomerium': ['emporium', 'pomerium', 'proemium'], + 'pomey': ['myope', 'pomey'], + 'pomo': ['moop', 'pomo'], + 'pomonal': ['lampoon', 'pomonal'], + 'pompilus': ['pimplous', 'pompilus', 'populism'], + 'pomster': ['pomster', 'stomper'], + 'ponca': ['capon', 'ponca'], + 'ponce': ['copen', 'ponce'], + 'ponchoed': ['chenopod', 'ponchoed'], + 'poncirus': ['coprinus', 'poncirus'], + 'ponderal': ['polander', 'ponderal', 'prenodal'], + 'ponderate': ['ponderate', 'predonate'], + 'ponderation': ['ponderation', 'predonation'], + 'ponderer': ['ponderer', 'reponder'], + 'pondfish': ['fishpond', 'pondfish'], + 'pone': ['nope', 'open', 'peon', 'pone'], + 'ponerid': ['poinder', 'ponerid'], + 'poneroid': ['poneroid', 'porodine'], + 'poney': ['peony', 'poney'], + 'ponica': ['aponic', 'ponica'], + 'ponier': ['opiner', 'orpine', 'ponier'], + 'pontederia': ['pontederia', 'proteidean'], + 'pontee': ['nepote', 'pontee', 'poteen'], + 'pontes': ['pontes', 'posnet'], + 'ponticular': ['ponticular', 'untropical'], + 'pontificalia': ['palification', 'pontificalia'], + 'pontile': ['pointel', 'pontile', 'topline'], + 'pontonier': ['entropion', 'pontonier', 'prenotion'], + 'pontus': ['pontus', 'unspot', 'unstop'], + 'pooch': ['choop', 'pooch'], + 'pooh': ['hoop', 'phoo', 'pooh'], + 'pooka': ['oopak', 'pooka'], + 'pool': ['loop', 'polo', 'pool'], + 'pooler': ['looper', 'pooler'], + 'pooli': ['polio', 'pooli'], + 'pooly': ['loopy', 'pooly'], + 'poon': ['noop', 'poon'], + 'poonac': ['acopon', 'poonac'], + 'poonga': ['apogon', 'poonga'], + 'poor': ['poor', 'proo'], + 'poot': ['poot', 'toop', 'topo'], + 'pope': ['pepo', 'pope'], + 'popeler': ['peopler', 'popeler'], + 'popery': ['popery', 'pyrope'], + 'popgun': ['oppugn', 'popgun'], + 'popian': ['oppian', 'papion', 'popian'], + 'popish': ['popish', 'shippo'], + 'popliteal': ['papillote', 'popliteal'], + 'poppel': ['poppel', 'popple'], + 'popple': ['poppel', 'popple'], + 'populism': ['pimplous', 'pompilus', 'populism'], + 'populus': ['populus', 'pulpous'], + 'poral': ['parol', 'polar', 'poral', 'proal'], + 'porcate': ['coperta', 'pectora', 'porcate'], + 'porcelain': ['oliprance', 'porcelain'], + 'porcelanite': ['porcelanite', 'praelection'], + 'porcula': ['copular', 'croupal', 'cupolar', 'porcula'], + 'pore': ['pore', 'rope'], + 'pored': ['doper', 'pedro', 'pored'], + 'porelike': ['porelike', 'ropelike'], + 'porer': ['porer', 'prore', 'roper'], + 'porge': ['grope', 'porge'], + 'porger': ['groper', 'porger'], + 'poriness': ['poriness', 'pression', 'ropiness'], + 'poring': ['poring', 'roping'], + 'poristical': ['polaristic', 'poristical', 'saprolitic'], + 'porites': ['periost', 'porites', 'reposit', 'riposte'], + 'poritidae': ['poritidae', 'triopidae'], + 'porker': ['porker', 'proker'], + 'pornerastic': ['cotranspire', 'pornerastic'], + 'porodine': ['poneroid', 'porodine'], + 'poros': ['poros', 'proso', 'sopor', 'spoor'], + 'porosity': ['isotropy', 'porosity'], + 'porotic': ['porotic', 'portico'], + 'porpentine': ['porpentine', 'prepontine'], + 'porphine': ['hornpipe', 'porphine'], + 'porphyrous': ['porphyrous', 'pyrophorus'], + 'porrection': ['correption', 'porrection'], + 'porret': ['porret', 'porter', 'report', 'troper'], + 'porta': ['aport', 'parto', 'porta'], + 'portail': ['portail', 'toprail'], + 'portal': ['patrol', 'portal', 'tropal'], + 'portance': ['coparent', 'portance'], + 'ported': ['deport', 'ported', 'redtop'], + 'portend': ['portend', 'protend'], + 'porteno': ['porteno', 'protone'], + 'portension': ['portension', 'protension'], + 'portent': ['portent', 'torpent'], + 'portentous': ['notopterus', 'portentous'], + 'porter': ['porret', 'porter', 'report', 'troper'], + 'porterage': ['porterage', 'reportage'], + 'portership': ['portership', 'pretorship'], + 'portfire': ['portfire', 'profiter'], + 'portia': ['portia', 'tapiro'], + 'portico': ['porotic', 'portico'], + 'portify': ['portify', 'torpify'], + 'portional': ['ploration', 'portional', 'prolation'], + 'portioner': ['portioner', 'reportion'], + 'portlet': ['plotter', 'portlet'], + 'portly': ['portly', 'protyl', 'tropyl'], + 'porto': ['porto', 'proto', 'troop'], + 'portoise': ['isotrope', 'portoise'], + 'portolan': ['portolan', 'pronotal'], + 'portor': ['portor', 'torpor'], + 'portray': ['parroty', 'portray', 'tropary'], + 'portrayal': ['parlatory', 'portrayal'], + 'portside': ['dipteros', 'portside'], + 'portsider': ['portsider', 'postrider'], + 'portunidae': ['depuration', 'portunidae'], + 'portunus': ['outspurn', 'portunus'], + 'pory': ['pory', 'pyro', 'ropy'], + 'posca': ['posca', 'scopa'], + 'pose': ['epos', 'peso', 'pose', 'sope'], + 'poser': ['poser', 'prose', 'ropes', 'spore'], + 'poseur': ['poseur', 'pouser', 'souper', 'uprose'], + 'posey': ['poesy', 'posey', 'sepoy'], + 'posh': ['phos', 'posh', 'shop', 'soph'], + 'posingly': ['posingly', 'spongily'], + 'position': ['position', 'sopition'], + 'positional': ['positional', 'spoilation', 'spoliation'], + 'positioned': ['deposition', 'positioned'], + 'positioner': ['positioner', 'reposition'], + 'positron': ['notropis', 'positron', 'sorption'], + 'positum': ['positum', 'utopism'], + 'posnet': ['pontes', 'posnet'], + 'posse': ['posse', 'speos'], + 'possessioner': ['possessioner', 'repossession'], + 'post': ['post', 'spot', 'stop', 'tops'], + 'postage': ['gestapo', 'postage'], + 'postaortic': ['parostotic', 'postaortic'], + 'postdate': ['despotat', 'postdate'], + 'posted': ['despot', 'posted'], + 'posteen': ['pentose', 'posteen'], + 'poster': ['poster', 'presto', 'repost', 'respot', 'stoper'], + 'posterial': ['posterial', 'saprolite'], + 'posterior': ['posterior', 'repositor'], + 'posterish': ['posterish', 'prothesis', 'sophister', 'storeship', 'tephrosis'], + 'posteroinferior': ['inferoposterior', 'posteroinferior'], + 'posterosuperior': ['posterosuperior', 'superoposterior'], + 'postgenial': ['postgenial', 'spangolite'], + 'posthaste': ['posthaste', 'tosephtas'], + 'postic': ['copist', 'coptis', 'optics', 'postic'], + 'postical': ['postical', 'slipcoat'], + 'postil': ['pistol', 'postil', 'spoilt'], + 'posting': ['posting', 'stoping'], + 'postischial': ['postischial', 'sophistical'], + 'postless': ['postless', 'spotless', 'stopless'], + 'postlike': ['postlike', 'spotlike'], + 'postman': ['postman', 'topsman'], + 'postmedial': ['plastidome', 'postmedial'], + 'postnaris': ['postnaris', 'sopranist'], + 'postnominal': ['monoplanist', 'postnominal'], + 'postrider': ['portsider', 'postrider'], + 'postsacral': ['postsacral', 'sarcoplast'], + 'postsign': ['postsign', 'signpost'], + 'postthoracic': ['octastrophic', 'postthoracic'], + 'postulata': ['autoplast', 'postulata'], + 'postural': ['postural', 'pulsator', 'sportula'], + 'posture': ['petrous', 'posture', 'proetus', 'proteus', 'septuor', 'spouter'], + 'posturer': ['posturer', 'resprout', 'sprouter'], + 'postuterine': ['postuterine', 'pretentious'], + 'postwar': ['postwar', 'sapwort'], + 'postward': ['drawstop', 'postward'], + 'postwoman': ['postwoman', 'womanpost'], + 'posy': ['opsy', 'posy'], + 'pot': ['opt', 'pot', 'top'], + 'potable': ['optable', 'potable'], + 'potableness': ['optableness', 'potableness'], + 'potash': ['pashto', 'pathos', 'potash'], + 'potass': ['potass', 'topass'], + 'potate': ['aptote', 'optate', 'potate', 'teapot'], + 'potation': ['optation', 'potation'], + 'potative': ['optative', 'potative'], + 'potator': ['potator', 'taproot'], + 'pote': ['peto', 'poet', 'pote', 'tope'], + 'poteen': ['nepote', 'pontee', 'poteen'], + 'potential': ['peltation', 'potential'], + 'poter': ['poter', 'prote', 'repot', 'tepor', 'toper', 'trope'], + 'poteye': ['peyote', 'poteye'], + 'pothouse': ['housetop', 'pothouse'], + 'poticary': ['cyrtopia', 'poticary'], + 'potifer': ['firetop', 'potifer'], + 'potion': ['option', 'potion'], + 'potlatch': ['potlatch', 'tolpatch'], + 'potlike': ['kitlope', 'potlike', 'toplike'], + 'potmaker': ['potmaker', 'topmaker'], + 'potmaking': ['potmaking', 'topmaking'], + 'potman': ['potman', 'tampon', 'topman'], + 'potometer': ['optometer', 'potometer'], + 'potstick': ['potstick', 'tipstock'], + 'potstone': ['potstone', 'topstone'], + 'pottled': ['plotted', 'pottled'], + 'pouce': ['coupe', 'pouce'], + 'poucer': ['couper', 'croupe', 'poucer', 'recoup'], + 'pouch': ['choup', 'pouch'], + 'poulpe': ['poulpe', 'pupelo'], + 'poult': ['plout', 'pluto', 'poult'], + 'poulter': ['plouter', 'poulter'], + 'poultice': ['epulotic', 'poultice'], + 'pounce': ['pounce', 'uncope'], + 'pounder': ['pounder', 'repound', 'unroped'], + 'pour': ['pour', 'roup'], + 'pourer': ['pourer', 'repour', 'rouper'], + 'pouser': ['poseur', 'pouser', 'souper', 'uprose'], + 'pout': ['pout', 'toup'], + 'pouter': ['pouter', 'roupet', 'troupe'], + 'pow': ['pow', 'wop'], + 'powder': ['powder', 'prowed'], + 'powderer': ['powderer', 'repowder'], + 'powerful': ['powerful', 'upflower'], + 'praam': ['param', 'parma', 'praam'], + 'practitional': ['antitropical', 'practitional'], + 'prad': ['pard', 'prad'], + 'pradeep': ['papered', 'pradeep'], + 'praecox': ['exocarp', 'praecox'], + 'praelabrum': ['praelabrum', 'preambular'], + 'praelection': ['porcelanite', 'praelection'], + 'praelector': ['praelector', 'receptoral'], + 'praenomina': ['pomeranian', 'praenomina'], + 'praepostor': ['praepostor', 'pterospora'], + 'praesphenoid': ['nephropsidae', 'praesphenoid'], + 'praetor': ['praetor', 'prorate'], + 'praetorian': ['praetorian', 'reparation'], + 'praise': ['aspire', 'paries', 'praise', 'sirpea', 'spirea'], + 'praiser': ['aspirer', 'praiser', 'serpari'], + 'praising': ['aspiring', 'praising', 'singarip'], + 'praisingly': ['aspiringly', 'praisingly'], + 'praline': ['pearlin', 'plainer', 'praline'], + 'pram': ['pram', 'ramp'], + 'prandial': ['diplanar', 'prandial'], + 'prankle': ['planker', 'prankle'], + 'prase': ['asper', 'parse', 'prase', 'spaer', 'spare', 'spear'], + 'praseolite': ['periosteal', 'praseolite'], + 'prasine': ['persian', 'prasine', 'saprine'], + 'prasinous': ['prasinous', 'psaronius'], + 'prasoid': ['prasoid', 'sparoid'], + 'prasophagous': ['prasophagous', 'saprophagous'], + 'prat': ['part', 'prat', 'rapt', 'tarp', 'trap'], + 'prate': ['apert', 'pater', 'peart', 'prate', 'taper', 'terap'], + 'prater': ['parter', 'prater'], + 'pratey': ['petary', 'pratey'], + 'pratfall': ['pratfall', 'trapfall'], + 'pratincoline': ['nonpearlitic', 'pratincoline'], + 'pratincolous': ['patroclinous', 'pratincolous'], + 'prattle': ['partlet', 'platter', 'prattle'], + 'prau': ['prau', 'rupa'], + 'prawner': ['prawner', 'prewarn'], + 'prayer': ['prayer', 'repray'], + 'preach': ['aperch', 'eparch', 'percha', 'preach'], + 'preacher': ['preacher', 'repreach'], + 'preaching': ['engraphic', 'preaching'], + 'preachman': ['marchpane', 'preachman'], + 'preachy': ['eparchy', 'preachy'], + 'preacid': ['epacrid', 'peracid', 'preacid'], + 'preact': ['carpet', 'peract', 'preact'], + 'preaction': ['preaction', 'precation', 'recaption'], + 'preactive': ['preactive', 'precative'], + 'preactively': ['preactively', 'precatively'], + 'preacute': ['peracute', 'preacute'], + 'preadmonition': ['demipronation', 'preadmonition', 'predomination'], + 'preadorn': ['pardoner', 'preadorn'], + 'preadventure': ['peradventure', 'preadventure'], + 'preallably': ['ballplayer', 'preallably'], + 'preallow': ['preallow', 'walloper'], + 'preamble': ['peramble', 'preamble'], + 'preambular': ['praelabrum', 'preambular'], + 'preambulate': ['perambulate', 'preambulate'], + 'preambulation': ['perambulation', 'preambulation'], + 'preambulatory': ['perambulatory', 'preambulatory'], + 'preanal': ['planera', 'preanal'], + 'prearm': ['prearm', 'ramper'], + 'preascitic': ['accipitres', 'preascitic'], + 'preauditory': ['preauditory', 'repudiatory'], + 'prebacillary': ['bicarpellary', 'prebacillary'], + 'prebasal': ['parsable', 'prebasal', 'sparable'], + 'prebend': ['perbend', 'prebend'], + 'prebeset': ['bepester', 'prebeset'], + 'prebid': ['bedrip', 'prebid'], + 'precant': ['carpent', 'precant'], + 'precantation': ['actinopteran', 'precantation'], + 'precast': ['precast', 'spectra'], + 'precation': ['preaction', 'precation', 'recaption'], + 'precative': ['preactive', 'precative'], + 'precatively': ['preactively', 'precatively'], + 'precaution': ['precaution', 'unoperatic'], + 'precautional': ['inoperculata', 'precautional'], + 'precedable': ['deprecable', 'precedable'], + 'preceder': ['preceder', 'precreed'], + 'precent': ['percent', 'precent'], + 'precept': ['percept', 'precept'], + 'preception': ['perception', 'preception'], + 'preceptive': ['perceptive', 'preceptive'], + 'preceptively': ['perceptively', 'preceptively'], + 'preceptual': ['perceptual', 'preceptual'], + 'preceptually': ['perceptually', 'preceptually'], + 'prechloric': ['perchloric', 'prechloric'], + 'prechoose': ['prechoose', 'rheoscope'], + 'precipitate': ['peripatetic', 'precipitate'], + 'precipitator': ['precipitator', 'prepatriotic'], + 'precis': ['crepis', 'cripes', 'persic', 'precis', 'spicer'], + 'precise': ['precise', 'scripee'], + 'precisian': ['periscian', 'precisian'], + 'precision': ['coinspire', 'precision'], + 'precitation': ['actinopteri', 'crepitation', 'precitation'], + 'precite': ['ereptic', 'precite', 'receipt'], + 'precited': ['decrepit', 'depicter', 'precited'], + 'preclose': ['perclose', 'preclose'], + 'preclusion': ['perculsion', 'preclusion'], + 'preclusive': ['perculsive', 'preclusive'], + 'precoil': ['peloric', 'precoil'], + 'precompound': ['percompound', 'precompound'], + 'precondense': ['precondense', 'respondence'], + 'preconnubial': ['preconnubial', 'pronunciable'], + 'preconsole': ['necropoles', 'preconsole'], + 'preconsultor': ['preconsultor', 'supercontrol'], + 'precontest': ['precontest', 'torpescent'], + 'precopy': ['coppery', 'precopy'], + 'precostal': ['ceroplast', 'precostal'], + 'precredit': ['precredit', 'predirect', 'repredict'], + 'precreditor': ['precreditor', 'predirector'], + 'precreed': ['preceder', 'precreed'], + 'precurrent': ['percurrent', 'precurrent'], + 'precursory': ['percursory', 'precursory'], + 'precyst': ['precyst', 'sceptry', 'spectry'], + 'predata': ['adapter', 'predata', 'readapt'], + 'predate': ['padtree', 'predate', 'tapered'], + 'predatism': ['predatism', 'spermatid'], + 'predative': ['deprivate', 'predative'], + 'predator': ['predator', 'protrade', 'teardrop'], + 'preday': ['pedary', 'preday'], + 'predealer': ['predealer', 'repleader'], + 'predecree': ['creepered', 'predecree'], + 'predefect': ['perfected', 'predefect'], + 'predeication': ['depreciation', 'predeication'], + 'prederive': ['prederive', 'redeprive'], + 'predespair': ['disprepare', 'predespair'], + 'predestine': ['predestine', 'presidente'], + 'predetail': ['pedaliter', 'predetail'], + 'predevotion': ['overpointed', 'predevotion'], + 'predial': ['pedrail', 'predial'], + 'prediastolic': ['prediastolic', 'psiloceratid'], + 'predication': ['predication', 'procidentia'], + 'predictory': ['cryptodire', 'predictory'], + 'predirect': ['precredit', 'predirect', 'repredict'], + 'predirector': ['precreditor', 'predirector'], + 'prediscretion': ['prediscretion', 'redescription'], + 'predislike': ['predislike', 'spiderlike'], + 'predivinable': ['indeprivable', 'predivinable'], + 'predominate': ['pentameroid', 'predominate'], + 'predomination': ['demipronation', 'preadmonition', 'predomination'], + 'predonate': ['ponderate', 'predonate'], + 'predonation': ['ponderation', 'predonation'], + 'predoubter': ['predoubter', 'preobtrude'], + 'predrive': ['depriver', 'predrive'], + 'preen': ['neper', 'preen', 'repen'], + 'prefactor': ['aftercrop', 'prefactor'], + 'prefator': ['forepart', 'prefator'], + 'prefavorite': ['perforative', 'prefavorite'], + 'prefect': ['perfect', 'prefect'], + 'prefectly': ['perfectly', 'prefectly'], + 'prefervid': ['perfervid', 'prefervid'], + 'prefiction': ['prefiction', 'proficient'], + 'prefoliation': ['perfoliation', 'prefoliation'], + 'preform': ['perform', 'preform'], + 'preformant': ['performant', 'preformant'], + 'preformative': ['performative', 'preformative'], + 'preformism': ['misperform', 'preformism'], + 'pregainer': ['peregrina', 'pregainer'], + 'prehandicap': ['handicapper', 'prehandicap'], + 'prehaps': ['perhaps', 'prehaps'], + 'prehazard': ['perhazard', 'prehazard'], + 'preheal': ['preheal', 'rephael'], + 'preheat': ['haptere', 'preheat'], + 'preheated': ['heartdeep', 'preheated'], + 'prehension': ['hesperinon', 'prehension'], + 'prehnite': ['nephrite', 'prehnite', 'trephine'], + 'prehnitic': ['nephritic', 'phrenitic', 'prehnitic'], + 'prehuman': ['prehuman', 'unhamper'], + 'preimpose': ['perispome', 'preimpose'], + 'preindicate': ['parenticide', 'preindicate'], + 'preinduce': ['preinduce', 'unpierced'], + 'preintone': ['interpone', 'peritenon', 'pinnotere', 'preintone'], + 'prelabrum': ['prelabrum', 'prelumbar'], + 'prelacteal': ['carpellate', 'parcellate', 'prelacteal'], + 'prelate': ['pearlet', 'pleater', 'prelate', 'ptereal', 'replate', 'repleat'], + 'prelatehood': ['heteropodal', 'prelatehood'], + 'prelatic': ['particle', 'plicater', 'prelatic'], + 'prelatical': ['capitellar', 'prelatical'], + 'prelation': ['prelation', 'rantipole'], + 'prelatism': ['palmister', 'prelatism'], + 'prelease': ['eelspear', 'prelease'], + 'prelect': ['plectre', 'prelect'], + 'prelection': ['perlection', 'prelection'], + 'prelim': ['limper', 'prelim', 'rimple'], + 'prelingual': ['perlingual', 'prelingual'], + 'prelocate': ['percolate', 'prelocate'], + 'preloss': ['plessor', 'preloss'], + 'preludial': ['dipleural', 'preludial'], + 'prelumbar': ['prelabrum', 'prelumbar'], + 'prelusion': ['prelusion', 'repulsion'], + 'prelusive': ['prelusive', 'repulsive'], + 'prelusively': ['prelusively', 'repulsively'], + 'prelusory': ['prelusory', 'repulsory'], + 'premate': ['premate', 'tempera'], + 'premedia': ['epiderma', 'premedia'], + 'premedial': ['epidermal', 'impleader', 'premedial'], + 'premedication': ['pedometrician', 'premedication'], + 'premenace': ['permeance', 'premenace'], + 'premerit': ['premerit', 'preremit', 'repermit'], + 'premial': ['impaler', 'impearl', 'lempira', 'premial'], + 'premiant': ['imperant', 'pairment', 'partimen', 'premiant', 'tripeman'], + 'premiate': ['imperate', 'premiate'], + 'premier': ['premier', 'reprime'], + 'premious': ['imposure', 'premious'], + 'premise': ['emprise', 'imprese', 'premise', 'spireme'], + 'premiss': ['impress', 'persism', 'premiss'], + 'premixture': ['permixture', 'premixture'], + 'premodel': ['leperdom', 'premodel'], + 'premolar': ['premolar', 'premoral'], + 'premold': ['meldrop', 'premold'], + 'premonetary': ['premonetary', 'pyranometer'], + 'premoral': ['premolar', 'premoral'], + 'premorality': ['polarimetry', 'premorality', 'temporarily'], + 'premosaic': ['paroecism', 'premosaic'], + 'premover': ['premover', 'prevomer'], + 'premusical': ['premusical', 'superclaim'], + 'prenasal': ['pernasal', 'prenasal'], + 'prenatal': ['parental', 'paternal', 'prenatal'], + 'prenatalist': ['intraseptal', 'paternalist', 'prenatalist'], + 'prenatally': ['parentally', 'paternally', 'prenatally'], + 'prenative': ['interpave', 'prenative'], + 'prender': ['prender', 'prendre'], + 'prendre': ['prender', 'prendre'], + 'prenodal': ['polander', 'ponderal', 'prenodal'], + 'prenominical': ['nonempirical', 'prenominical'], + 'prenotice': ['prenotice', 'reception'], + 'prenotion': ['entropion', 'pontonier', 'prenotion'], + 'prentice': ['piercent', 'prentice'], + 'preobtrude': ['predoubter', 'preobtrude'], + 'preocular': ['opercular', 'preocular'], + 'preominate': ['permeation', 'preominate'], + 'preopen': ['preopen', 'propene'], + 'preopinion': ['preopinion', 'prionopine'], + 'preoption': ['preoption', 'protopine'], + 'preoral': ['peroral', 'preoral'], + 'preorally': ['perorally', 'preorally'], + 'prep': ['prep', 'repp'], + 'prepalatine': ['periplaneta', 'prepalatine'], + 'prepare': ['paperer', 'perpera', 'prepare', 'repaper'], + 'prepatriotic': ['precipitator', 'prepatriotic'], + 'prepay': ['papery', 'prepay', 'yapper'], + 'preplot': ['preplot', 'toppler'], + 'prepollent': ['prepollent', 'propellent'], + 'prepontine': ['porpentine', 'prepontine'], + 'prepositure': ['peripterous', 'prepositure'], + 'prepuce': ['prepuce', 'upcreep'], + 'prerational': ['prerational', 'proletarian'], + 'prerealization': ['prerealization', 'proletarianize'], + 'prereceive': ['prereceive', 'reperceive'], + 'prerecital': ['perirectal', 'prerecital'], + 'prereduction': ['interproduce', 'prereduction'], + 'prerefer': ['prerefer', 'reprefer'], + 'prereform': ['performer', 'prereform', 'reperform'], + 'preremit': ['premerit', 'preremit', 'repermit'], + 'prerental': ['prerental', 'replanter'], + 'prerich': ['chirper', 'prerich'], + 'preromantic': ['improcreant', 'preromantic'], + 'presage': ['asperge', 'presage'], + 'presager': ['asperger', 'presager'], + 'presay': ['presay', 'speary'], + 'prescapular': ['prescapular', 'supercarpal'], + 'prescient': ['prescient', 'reinspect'], + 'prescientific': ['interspecific', 'prescientific'], + 'prescribe': ['perscribe', 'prescribe'], + 'prescutal': ['prescutal', 'scalpture'], + 'preseal': ['pleaser', 'preseal', 'relapse'], + 'preseason': ['parsonese', 'preseason'], + 'presell': ['presell', 'respell', 'speller'], + 'present': ['penster', 'present', 'serpent', 'strepen'], + 'presenter': ['presenter', 'represent'], + 'presential': ['alpestrine', 'episternal', 'interlapse', 'presential'], + 'presentist': ['persistent', 'presentist', 'prettiness'], + 'presentive': ['presentive', 'pretensive', 'vespertine'], + 'presentively': ['presentively', 'pretensively'], + 'presentiveness': ['presentiveness', 'pretensiveness'], + 'presently': ['presently', 'serpently'], + 'preserve': ['perverse', 'preserve'], + 'preset': ['pester', 'preset', 'restep', 'streep'], + 'preshare': ['preshare', 'rephrase'], + 'preship': ['preship', 'shipper'], + 'preshortage': ['preshortage', 'stereograph'], + 'preside': ['perseid', 'preside'], + 'presidencia': ['acipenserid', 'presidencia'], + 'president': ['president', 'serpentid'], + 'presidente': ['predestine', 'presidente'], + 'presider': ['presider', 'serriped'], + 'presign': ['presign', 'springe'], + 'presignal': ['espringal', 'presignal', 'relapsing'], + 'presimian': ['mainprise', 'presimian'], + 'presley': ['presley', 'sleepry'], + 'presser': ['presser', 'repress'], + 'pression': ['poriness', 'pression', 'ropiness'], + 'pressive': ['pressive', 'viperess'], + 'prest': ['prest', 'spret'], + 'prestable': ['beplaster', 'prestable'], + 'prestant': ['prestant', 'transept'], + 'prestate': ['prestate', 'pretaste'], + 'presto': ['poster', 'presto', 'repost', 'respot', 'stoper'], + 'prestock': ['prestock', 'sprocket'], + 'prestomial': ['peristomal', 'prestomial'], + 'prestrain': ['prestrain', 'transpire'], + 'presume': ['presume', 'supreme'], + 'presurmise': ['impressure', 'presurmise'], + 'presustain': ['presustain', 'puritaness', 'supersaint'], + 'presystole': ['poetryless', 'presystole'], + 'pretan': ['arpent', + 'enrapt', + 'entrap', + 'panter', + 'parent', + 'pretan', + 'trepan'], + 'pretaste': ['prestate', 'pretaste'], + 'pretensive': ['presentive', 'pretensive', 'vespertine'], + 'pretensively': ['presentively', 'pretensively'], + 'pretensiveness': ['presentiveness', 'pretensiveness'], + 'pretentious': ['postuterine', 'pretentious'], + 'pretercanine': ['irrepentance', 'pretercanine'], + 'preterient': ['preterient', 'triterpene'], + 'pretermit': ['permitter', 'pretermit'], + 'pretheological': ['herpetological', 'pretheological'], + 'pretibial': ['bipartile', 'pretibial'], + 'pretonic': ['inceptor', 'pretonic'], + 'pretorship': ['portership', 'pretorship'], + 'pretrace': ['pretrace', 'recarpet'], + 'pretracheal': ['archprelate', 'pretracheal'], + 'pretrain': ['pretrain', 'terrapin'], + 'pretransmission': ['pretransmission', 'transimpression'], + 'pretreat': ['patterer', 'pretreat'], + 'prettiness': ['persistent', 'presentist', 'prettiness'], + 'prevailer': ['prevailer', 'reprieval'], + 'prevalid': ['deprival', 'prevalid'], + 'prevention': ['prevention', 'provenient'], + 'preversion': ['perversion', 'preversion'], + 'preveto': ['overpet', 'preveto', 'prevote'], + 'previde': ['deprive', 'previde'], + 'previous': ['pervious', 'previous', 'viperous'], + 'previously': ['perviously', 'previously', 'viperously'], + 'previousness': ['perviousness', 'previousness', 'viperousness'], + 'prevoid': ['prevoid', 'provide'], + 'prevomer': ['premover', 'prevomer'], + 'prevote': ['overpet', 'preveto', 'prevote'], + 'prewar': ['prewar', 'rewrap', 'warper'], + 'prewarn': ['prawner', 'prewarn'], + 'prewhip': ['prewhip', 'whipper'], + 'prewrap': ['prewrap', 'wrapper'], + 'prexy': ['prexy', 'pyrex'], + 'prey': ['prey', 'pyre', 'rype'], + 'pria': ['pair', 'pari', 'pria', 'ripa'], + 'price': ['price', 'repic'], + 'priced': ['percid', 'priced'], + 'prich': ['chirp', 'prich'], + 'prickfoot': ['prickfoot', 'tickproof'], + 'prickle': ['pickler', 'prickle'], + 'pride': ['pride', 'pried', 'redip'], + 'pridian': ['pindari', 'pridian'], + 'pried': ['pride', 'pried', 'redip'], + 'prier': ['prier', 'riper'], + 'priest': ['priest', 'pteris', 'sprite', 'stripe'], + 'priestal': ['epistlar', 'pilaster', 'plaister', 'priestal'], + 'priesthood': ['priesthood', 'spritehood'], + 'priestless': ['priestless', 'stripeless'], + 'prig': ['grip', 'prig'], + 'prigman': ['gripman', 'prigman', 'ramping'], + 'prima': ['impar', 'pamir', 'prima'], + 'primage': ['epigram', 'primage'], + 'primal': ['imparl', 'primal'], + 'primates': ['maspiter', 'pastimer', 'primates'], + 'primatial': ['impartial', 'primatial'], + 'primely': ['primely', 'reimply'], + 'primeness': ['primeness', 'spenerism'], + 'primost': ['primost', 'tropism'], + 'primrose': ['primrose', 'promiser'], + 'primsie': ['pismire', 'primsie'], + 'primus': ['primus', 'purism'], + 'prince': ['pincer', 'prince'], + 'princehood': ['cnidophore', 'princehood'], + 'princeite': ['princeite', 'recipient'], + 'princelike': ['pincerlike', 'princelike'], + 'princely': ['pencilry', 'princely'], + 'princesse': ['crepiness', 'princesse'], + 'prine': ['piner', 'prine', 'repin', 'ripen'], + 'pringle': ['pingler', 'pringle'], + 'printed': ['deprint', 'printed'], + 'printer': ['printer', 'reprint'], + 'priodontes': ['desorption', 'priodontes'], + 'prionopine': ['preopinion', 'prionopine'], + 'prisage': ['prisage', 'spairge'], + 'prisal': ['prisal', 'spiral'], + 'prismatoid': ['diatropism', 'prismatoid'], + 'prisometer': ['prisometer', 'spirometer'], + 'prisonable': ['bipersonal', 'prisonable'], + 'pristane': ['pinaster', 'pristane'], + 'pristine': ['enspirit', 'pristine'], + 'pristis': ['pristis', 'tripsis'], + 'prius': ['prius', 'sirup'], + 'proadmission': ['adpromission', 'proadmission'], + 'proal': ['parol', 'polar', 'poral', 'proal'], + 'proalien': ['pelorian', 'peronial', 'proalien'], + 'proamniotic': ['comparition', 'proamniotic'], + 'proathletic': ['proathletic', 'prothetical'], + 'proatlas': ['pastoral', 'proatlas'], + 'proavis': ['pavisor', 'proavis'], + 'probationer': ['probationer', 'reprobation'], + 'probe': ['probe', 'rebop'], + 'procaciously': ['plousiocracy', 'procaciously'], + 'procaine': ['caponier', 'coprinae', 'procaine'], + 'procanal': ['coplanar', 'procanal'], + 'procapital': ['applicator', 'procapital'], + 'procedure': ['procedure', 'reproduce'], + 'proceeder': ['proceeder', 'reproceed'], + 'procellas': ['procellas', 'scalloper'], + 'procerite': ['procerite', 'receiptor'], + 'procession': ['procession', 'scorpiones'], + 'prochemical': ['microcephal', 'prochemical'], + 'procidentia': ['predication', 'procidentia'], + 'proclaimer': ['proclaimer', 'reproclaim'], + 'procne': ['crepon', 'procne'], + 'procnemial': ['complainer', 'procnemial', 'recomplain'], + 'procommission': ['compromission', 'procommission'], + 'procreant': ['copartner', 'procreant'], + 'procreate': ['procreate', 'pterocera'], + 'procreation': ['incorporate', 'procreation'], + 'proctal': ['caltrop', 'proctal'], + 'proctitis': ['proctitis', 'protistic', 'tropistic'], + 'proctocolitis': ['coloproctitis', 'proctocolitis'], + 'procuress': ['percussor', 'procuress'], + 'prod': ['dorp', 'drop', 'prod'], + 'proddle': ['plodder', 'proddle'], + 'proem': ['merop', 'moper', 'proem', 'remop'], + 'proemial': ['emporial', 'proemial'], + 'proemium': ['emporium', 'pomerium', 'proemium'], + 'proethical': ['carpholite', 'proethical'], + 'proetid': ['diopter', 'peridot', 'proetid', 'protide', 'pteroid'], + 'proetidae': ['periodate', 'proetidae', 'proteidae'], + 'proetus': ['petrous', 'posture', 'proetus', 'proteus', 'septuor', 'spouter'], + 'proficient': ['prefiction', 'proficient'], + 'profit': ['forpit', 'profit'], + 'profiter': ['portfire', 'profiter'], + 'progeny': ['progeny', 'pyrogen'], + 'prohibiter': ['prohibiter', 'reprohibit'], + 'proidealistic': ['peridiastolic', 'periodicalist', 'proidealistic'], + 'proke': ['poker', 'proke'], + 'proker': ['porker', 'proker'], + 'prolacrosse': ['prolacrosse', 'sclerospora'], + 'prolapse': ['prolapse', 'sapropel'], + 'prolation': ['ploration', 'portional', 'prolation'], + 'prolegate': ['petrogale', 'petrolage', 'prolegate'], + 'proletarian': ['prerational', 'proletarian'], + 'proletarianize': ['prerealization', 'proletarianize'], + 'proletariat': ['proletariat', 'reptatorial'], + 'proletary': ['proletary', 'pyrolater'], + 'prolicense': ['prolicense', 'proselenic'], + 'prolongate': ['prolongate', 'protogenal'], + 'promerit': ['importer', 'promerit', 'reimport'], + 'promethean': ['heptameron', 'promethean'], + 'promise': ['imposer', 'promise', 'semipro'], + 'promisee': ['perisome', 'promisee', 'reimpose'], + 'promiser': ['primrose', 'promiser'], + 'promitosis': ['isotropism', 'promitosis'], + 'promnesia': ['mesropian', 'promnesia', 'spironema'], + 'promonarchist': ['micranthropos', 'promonarchist'], + 'promote': ['promote', 'protome'], + 'pronaos': ['pronaos', 'soprano'], + 'pronate': ['operant', 'pronate', 'protean'], + 'pronative': ['overpaint', 'pronative'], + 'proneur': ['proneur', 'purrone'], + 'pronotal': ['portolan', 'pronotal'], + 'pronto': ['pronto', 'proton'], + 'pronunciable': ['preconnubial', 'pronunciable'], + 'proo': ['poor', 'proo'], + 'proofer': ['proofer', 'reproof'], + 'prop': ['prop', 'ropp'], + 'propellent': ['prepollent', 'propellent'], + 'propene': ['preopen', 'propene'], + 'prophloem': ['pleomorph', 'prophloem'], + 'propine': ['piperno', 'propine'], + 'propinoic': ['propinoic', 'propionic'], + 'propionic': ['propinoic', 'propionic'], + 'propitial': ['propitial', 'triplopia'], + 'proportioner': ['proportioner', 'reproportion'], + 'propose': ['opposer', 'propose'], + 'propraetorial': ['propraetorial', 'protoperlaria'], + 'proprietous': ['peritropous', 'proprietous'], + 'propylene': ['polyprene', 'propylene'], + 'propyne': ['propyne', 'pyropen'], + 'prorate': ['praetor', 'prorate'], + 'proration': ['proration', 'troparion'], + 'prore': ['porer', 'prore', 'roper'], + 'prorebate': ['perborate', 'prorebate', 'reprobate'], + 'proreduction': ['proreduction', 'reproduction'], + 'prorevision': ['prorevision', 'provisioner', 'reprovision'], + 'prorogate': ['graperoot', 'prorogate'], + 'prosaist': ['prosaist', 'protasis'], + 'prosateur': ['prosateur', 'pterosaur'], + 'prose': ['poser', 'prose', 'ropes', 'spore'], + 'prosecretin': ['prosecretin', 'reinspector'], + 'prosectorial': ['corporealist', 'prosectorial'], + 'prosectorium': ['micropterous', 'prosectorium'], + 'proselenic': ['prolicense', 'proselenic'], + 'proselyte': ['polyester', 'proselyte'], + 'proseminate': ['impersonate', 'proseminate'], + 'prosemination': ['impersonation', 'prosemination', 'semipronation'], + 'proseneschal': ['chaperonless', 'proseneschal'], + 'prosification': ['antisoporific', 'prosification', 'sporification'], + 'prosilient': ['linopteris', 'prosilient'], + 'prosiphonate': ['nephroptosia', 'prosiphonate'], + 'proso': ['poros', 'proso', 'sopor', 'spoor'], + 'prosodiacal': ['dorsoapical', 'prosodiacal'], + 'prosopyle': ['polyspore', 'prosopyle'], + 'prossy': ['prossy', 'spyros'], + 'prostatectomy': ['cryptostomate', 'prostatectomy'], + 'prosternate': ['paternoster', 'prosternate', 'transportee'], + 'prostomiate': ['metroptosia', 'prostomiate'], + 'protactic': ['catoptric', 'protactic'], + 'protasis': ['prosaist', 'protasis'], + 'prote': ['poter', 'prote', 'repot', 'tepor', 'toper', 'trope'], + 'protead': ['adopter', 'protead', 'readopt'], + 'protean': ['operant', 'pronate', 'protean'], + 'protease': ['asterope', 'protease'], + 'protectional': ['lactoprotein', 'protectional'], + 'proteic': ['perotic', 'proteic', 'tropeic'], + 'proteida': ['apteroid', 'proteida'], + 'proteidae': ['periodate', 'proetidae', 'proteidae'], + 'proteidean': ['pontederia', 'proteidean'], + 'protein': ['pointer', 'protein', 'pterion', 'repoint', 'tropine'], + 'proteinic': ['epornitic', 'proteinic'], + 'proteles': ['proteles', 'serpolet'], + 'protelidae': ['leopardite', 'protelidae'], + 'protend': ['portend', 'protend'], + 'protension': ['portension', 'protension'], + 'proteolysis': ['elytroposis', 'proteolysis'], + 'proteose': ['esotrope', 'proteose'], + 'protest': ['protest', 'spotter'], + 'protester': ['protester', 'reprotest'], + 'proteus': ['petrous', 'posture', 'proetus', 'proteus', 'septuor', 'spouter'], + 'protheca': ['archpoet', 'protheca'], + 'prothesis': ['posterish', 'prothesis', 'sophister', 'storeship', 'tephrosis'], + 'prothetical': ['proathletic', 'prothetical'], + 'prothoracic': ['acrotrophic', 'prothoracic'], + 'protide': ['diopter', 'peridot', 'proetid', 'protide', 'pteroid'], + 'protist': ['protist', 'tropist'], + 'protistic': ['proctitis', 'protistic', 'tropistic'], + 'proto': ['porto', 'proto', 'troop'], + 'protocneme': ['mecopteron', 'protocneme'], + 'protogenal': ['prolongate', 'protogenal'], + 'protoma': ['protoma', 'taproom'], + 'protomagnesium': ['protomagnesium', 'spermatogonium'], + 'protome': ['promote', 'protome'], + 'protomorphic': ['morphotropic', 'protomorphic'], + 'proton': ['pronto', 'proton'], + 'protone': ['porteno', 'protone'], + 'protonemal': ['monopteral', 'protonemal'], + 'protoparent': ['protoparent', 'protopteran'], + 'protopathic': ['haptotropic', 'protopathic'], + 'protopathy': ['protopathy', 'protophyta'], + 'protoperlaria': ['propraetorial', 'protoperlaria'], + 'protophyta': ['protopathy', 'protophyta'], + 'protophyte': ['protophyte', 'tropophyte'], + 'protophytic': ['protophytic', 'tropophytic'], + 'protopine': ['preoption', 'protopine'], + 'protopteran': ['protoparent', 'protopteran'], + 'protore': ['protore', 'trooper'], + 'protosulphide': ['protosulphide', 'sulphoproteid'], + 'prototheme': ['photometer', 'prototheme'], + 'prototherian': ['ornithoptera', 'prototherian'], + 'prototrophic': ['prototrophic', 'trophotropic'], + 'protrade': ['predator', 'protrade', 'teardrop'], + 'protreaty': ['protreaty', 'reptatory'], + 'protuberantial': ['perturbational', 'protuberantial'], + 'protyl': ['portly', 'protyl', 'tropyl'], + 'provenient': ['prevention', 'provenient'], + 'provide': ['prevoid', 'provide'], + 'provider': ['overdrip', 'provider'], + 'provisioner': ['prorevision', 'provisioner', 'reprovision'], + 'prowed': ['powder', 'prowed'], + 'prude': ['drupe', 'duper', 'perdu', 'prude', 'pured'], + 'prudent': ['prudent', 'prunted', 'uptrend'], + 'prudential': ['prudential', 'putredinal'], + 'prudist': ['disrupt', 'prudist'], + 'prudy': ['prudy', 'purdy', 'updry'], + 'prue': ['peru', 'prue', 'pure'], + 'prune': ['perun', 'prune'], + 'prunted': ['prudent', 'prunted', 'uptrend'], + 'prussic': ['prussic', 'scirpus'], + 'prut': ['prut', 'turp'], + 'pry': ['pry', 'pyr'], + 'pryer': ['perry', 'pryer'], + 'pryse': ['pryse', 'spyer'], + 'psalm': ['plasm', 'psalm', 'slamp'], + 'psalmic': ['plasmic', 'psalmic'], + 'psalmister': ['psalmister', 'spermalist'], + 'psalmodial': ['plasmodial', 'psalmodial'], + 'psalmodic': ['plasmodic', 'psalmodic'], + 'psaloid': ['psaloid', 'salpoid'], + 'psalter': ['palster', 'persalt', 'plaster', 'psalter', 'spartle', 'stapler'], + 'psalterian': ['alpestrian', 'palestrian', 'psalterian'], + 'psalterion': ['interposal', 'psalterion'], + 'psaltery': ['plastery', 'psaltery'], + 'psaltress': ['psaltress', 'strapless'], + 'psaronius': ['prasinous', 'psaronius'], + 'psedera': ['psedera', 'respade'], + 'psellism': ['misspell', 'psellism'], + 'pseudelytron': ['pseudelytron', 'unproselyted'], + 'pseudimago': ['megapodius', 'pseudimago'], + 'pseudophallic': ['diplocephalus', 'pseudophallic'], + 'psha': ['hasp', 'pash', 'psha', 'shap'], + 'psi': ['psi', 'sip'], + 'psiloceratid': ['prediastolic', 'psiloceratid'], + 'psilophyton': ['polyphonist', 'psilophyton'], + 'psilotic': ['colpitis', 'politics', 'psilotic'], + 'psittacine': ['antiseptic', 'psittacine'], + 'psoadic': ['psoadic', 'scapoid', 'sciapod'], + 'psoas': ['passo', 'psoas'], + 'psocidae': ['diascope', 'psocidae', 'scopidae'], + 'psocine': ['psocine', 'scopine'], + 'psora': ['psora', 'sapor', 'sarpo'], + 'psoralea': ['parosela', 'psoralea'], + 'psoroid': ['psoroid', 'sporoid'], + 'psorous': ['psorous', 'soursop', 'sporous'], + 'psychobiological': ['biopsychological', 'psychobiological'], + 'psychobiology': ['biopsychology', 'psychobiology'], + 'psychogalvanic': ['galvanopsychic', 'psychogalvanic'], + 'psychomancy': ['psychomancy', 'scyphomancy'], + 'psychomonism': ['monopsychism', 'psychomonism'], + 'psychoneurological': ['neuropsychological', 'psychoneurological'], + 'psychoneurosis': ['neuropsychosis', 'psychoneurosis'], + 'psychophysiological': ['physiopsychological', 'psychophysiological'], + 'psychophysiology': ['physiopsychology', 'psychophysiology'], + 'psychorrhagy': ['chrysography', 'psychorrhagy'], + 'psychosomatic': ['psychosomatic', 'somatopsychic'], + 'psychotechnology': ['psychotechnology', 'technopsychology'], + 'psychotheism': ['psychotheism', 'theopsychism'], + 'psychotria': ['physiocrat', 'psychotria'], + 'ptelea': ['apelet', 'ptelea'], + 'ptereal': ['pearlet', 'pleater', 'prelate', 'ptereal', 'replate', 'repleat'], + 'pterian': ['painter', 'pertain', 'pterian', 'repaint'], + 'pterideous': ['depositure', 'pterideous'], + 'pteridological': ['dipterological', 'pteridological'], + 'pteridologist': ['dipterologist', 'pteridologist'], + 'pteridology': ['dipterology', 'pteridology'], + 'pterion': ['pointer', 'protein', 'pterion', 'repoint', 'tropine'], + 'pteris': ['priest', 'pteris', 'sprite', 'stripe'], + 'pterocera': ['procreate', 'pterocera'], + 'pterodactylidae': ['dactylopteridae', 'pterodactylidae'], + 'pterodactylus': ['dactylopterus', 'pterodactylus'], + 'pterographer': ['petrographer', 'pterographer'], + 'pterographic': ['petrographic', 'pterographic'], + 'pterographical': ['petrographical', 'pterographical'], + 'pterography': ['petrography', 'pterography', 'typographer'], + 'pteroid': ['diopter', 'peridot', 'proetid', 'protide', 'pteroid'], + 'pteroma': ['pteroma', 'tempora'], + 'pteropus': ['pteropus', 'stoppeur'], + 'pterosaur': ['prosateur', 'pterosaur'], + 'pterospora': ['praepostor', 'pterospora'], + 'pteryla': ['apertly', 'peartly', 'platery', 'pteryla', 'taperly'], + 'pterylographical': ['petrographically', 'pterylographical'], + 'pterylological': ['petrologically', 'pterylological'], + 'pterylosis': ['peristylos', 'pterylosis'], + 'ptilota': ['ptilota', 'talipot', 'toptail'], + 'ptinus': ['ptinus', 'unspit'], + 'ptolemean': ['leptonema', 'ptolemean'], + 'ptomain': ['maintop', 'ptomain', 'tampion', 'timpano'], + 'ptomainic': ['impaction', 'ptomainic'], + 'ptyalin': ['inaptly', 'planity', 'ptyalin'], + 'ptyalocele': ['clypeolate', 'ptyalocele'], + 'ptyalogenic': ['genotypical', 'ptyalogenic'], + 'pu': ['pu', 'up'], + 'pua': ['pau', 'pua'], + 'puan': ['napu', 'puan', 'puna'], + 'publisher': ['publisher', 'republish'], + 'puckball': ['puckball', 'pullback'], + 'puckrel': ['plucker', 'puckrel'], + 'pud': ['dup', 'pud'], + 'pudendum': ['pudendum', 'undumped'], + 'pudent': ['pudent', 'uptend'], + 'pudic': ['cupid', 'pudic'], + 'pudical': ['paludic', 'pudical'], + 'pudicity': ['cupidity', 'pudicity'], + 'puerer': ['puerer', 'purree'], + 'puffer': ['puffer', 'repuff'], + 'pug': ['gup', 'pug'], + 'pugman': ['panmug', 'pugman'], + 'puisne': ['puisne', 'supine'], + 'puist': ['puist', 'upsit'], + 'puja': ['jaup', 'puja'], + 'puke': ['keup', 'puke'], + 'pule': ['lupe', 'pelu', 'peul', 'pule'], + 'pulian': ['paulin', 'pulian'], + 'pulicene': ['clupeine', 'pulicene'], + 'pulicidal': ['picudilla', 'pulicidal'], + 'pulicide': ['lupicide', 'pediculi', 'pulicide'], + 'puling': ['gulpin', 'puling'], + 'pulish': ['huspil', 'pulish'], + 'pullback': ['puckball', 'pullback'], + 'pulp': ['plup', 'pulp'], + 'pulper': ['pulper', 'purple'], + 'pulpiter': ['pulpiter', 'repulpit'], + 'pulpous': ['populus', 'pulpous'], + 'pulpstone': ['pulpstone', 'unstopple'], + 'pulsant': ['pulsant', 'upslant'], + 'pulsate': ['pulsate', 'spatule', 'upsteal'], + 'pulsatile': ['palluites', 'pulsatile'], + 'pulsation': ['platinous', 'pulsation'], + 'pulsator': ['postural', 'pulsator', 'sportula'], + 'pulse': ['lepus', 'pulse'], + 'pulsion': ['pulsion', 'unspoil', 'upsilon'], + 'pulvic': ['pulvic', 'vulpic'], + 'pumper': ['pumper', 'repump'], + 'pumple': ['peplum', 'pumple'], + 'puna': ['napu', 'puan', 'puna'], + 'puncher': ['puncher', 'unperch'], + 'punctilio': ['punctilio', 'unpolitic'], + 'puncturer': ['puncturer', 'upcurrent'], + 'punger': ['punger', 'repugn'], + 'pungle': ['plunge', 'pungle'], + 'puniceous': ['pecunious', 'puniceous'], + 'punish': ['inpush', 'punish', 'unship'], + 'punisher': ['punisher', 'repunish'], + 'punishment': ['punishment', 'unshipment'], + 'punlet': ['penult', 'punlet', 'puntel'], + 'punnet': ['punnet', 'unpent'], + 'puno': ['noup', 'puno', 'upon'], + 'punta': ['punta', 'unapt', 'untap'], + 'puntal': ['puntal', 'unplat'], + 'puntel': ['penult', 'punlet', 'puntel'], + 'punti': ['input', 'punti'], + 'punto': ['punto', 'unpot', 'untop'], + 'pupa': ['paup', 'pupa'], + 'pupelo': ['poulpe', 'pupelo'], + 'purana': ['purana', 'uparna'], + 'purdy': ['prudy', 'purdy', 'updry'], + 'pure': ['peru', 'prue', 'pure'], + 'pured': ['drupe', 'duper', 'perdu', 'prude', 'pured'], + 'puree': ['puree', 'rupee'], + 'purer': ['purer', 'purre'], + 'purine': ['purine', 'unripe', 'uprein'], + 'purism': ['primus', 'purism'], + 'purist': ['purist', 'spruit', 'uprist', 'upstir'], + 'puritan': ['pintura', 'puritan', 'uptrain'], + 'puritaness': ['presustain', 'puritaness', 'supersaint'], + 'purler': ['purler', 'purrel'], + 'purple': ['pulper', 'purple'], + 'purpose': ['peropus', 'purpose'], + 'purpuroxanthin': ['purpuroxanthin', 'xanthopurpurin'], + 'purre': ['purer', 'purre'], + 'purree': ['puerer', 'purree'], + 'purrel': ['purler', 'purrel'], + 'purrone': ['proneur', 'purrone'], + 'purse': ['purse', 'resup', 'sprue', 'super'], + 'purser': ['purser', 'spruer'], + 'purslane': ['purslane', 'serpulan', 'supernal'], + 'purslet': ['purslet', 'spurlet', 'spurtle'], + 'pursuer': ['pursuer', 'usurper'], + 'pursy': ['pursy', 'pyrus', 'syrup'], + 'pus': ['pus', 'sup'], + 'pushtu': ['pushtu', 'upshut'], + 'pustule': ['pluteus', 'pustule'], + 'pustulose': ['pustulose', 'stupulose'], + 'put': ['put', 'tup'], + 'putanism': ['putanism', 'sumpitan'], + 'putation': ['outpaint', 'putation'], + 'putredinal': ['prudential', 'putredinal'], + 'putrid': ['putrid', 'turpid'], + 'putridly': ['putridly', 'turpidly'], + 'pya': ['pay', 'pya', 'yap'], + 'pyal': ['paly', 'play', 'pyal', 'pyla'], + 'pycnial': ['pliancy', 'pycnial'], + 'pyelic': ['epicly', 'pyelic'], + 'pyelitis': ['pyelitis', 'sipylite'], + 'pyelocystitis': ['cystopyelitis', 'pyelocystitis'], + 'pyelonephritis': ['nephropyelitis', 'pyelonephritis'], + 'pyeloureterogram': ['pyeloureterogram', 'ureteropyelogram'], + 'pyemesis': ['empyesis', 'pyemesis'], + 'pygmalion': ['maypoling', 'pygmalion'], + 'pyin': ['piny', 'pyin'], + 'pyla': ['paly', 'play', 'pyal', 'pyla'], + 'pylades': ['pylades', 'splayed'], + 'pylagore': ['playgoer', 'pylagore'], + 'pylar': ['parly', 'pylar', 'pyral'], + 'pyonephrosis': ['nephropyosis', 'pyonephrosis'], + 'pyopneumothorax': ['pneumopyothorax', 'pyopneumothorax'], + 'pyosepticemia': ['pyosepticemia', 'septicopyemia'], + 'pyosepticemic': ['pyosepticemic', 'septicopyemic'], + 'pyr': ['pry', 'pyr'], + 'pyracanth': ['pantarchy', 'pyracanth'], + 'pyral': ['parly', 'pylar', 'pyral'], + 'pyrales': ['parsley', 'pyrales', 'sparely', 'splayer'], + 'pyralid': ['pyralid', 'rapidly'], + 'pyramidale': ['lampyridae', 'pyramidale'], + 'pyranometer': ['premonetary', 'pyranometer'], + 'pyre': ['prey', 'pyre', 'rype'], + 'pyrena': ['napery', 'pyrena'], + 'pyrenic': ['cyprine', 'pyrenic'], + 'pyrenoid': ['pyrenoid', 'pyridone', 'pyrodine'], + 'pyretogenic': ['pyretogenic', 'pyrogenetic'], + 'pyrex': ['prexy', 'pyrex'], + 'pyrgocephaly': ['pelycography', 'pyrgocephaly'], + 'pyridone': ['pyrenoid', 'pyridone', 'pyrodine'], + 'pyrites': ['pyrites', 'sperity'], + 'pyritoid': ['pityroid', 'pyritoid'], + 'pyro': ['pory', 'pyro', 'ropy'], + 'pyroarsenite': ['arsenopyrite', 'pyroarsenite'], + 'pyrochemical': ['microcephaly', 'pyrochemical'], + 'pyrocomenic': ['pyrocomenic', 'pyromeconic'], + 'pyrodine': ['pyrenoid', 'pyridone', 'pyrodine'], + 'pyrogen': ['progeny', 'pyrogen'], + 'pyrogenetic': ['pyretogenic', 'pyrogenetic'], + 'pyrolater': ['proletary', 'pyrolater'], + 'pyromantic': ['importancy', 'patronymic', 'pyromantic'], + 'pyromeconic': ['pyrocomenic', 'pyromeconic'], + 'pyrope': ['popery', 'pyrope'], + 'pyropen': ['propyne', 'pyropen'], + 'pyrophorus': ['porphyrous', 'pyrophorus'], + 'pyrotheria': ['erythropia', 'pyrotheria'], + 'pyruline': ['pyruline', 'unripely'], + 'pyrus': ['pursy', 'pyrus', 'syrup'], + 'pyruvil': ['pyruvil', 'pyvuril'], + 'pythagorism': ['myographist', 'pythagorism'], + 'pythia': ['pythia', 'typhia'], + 'pythic': ['phytic', 'pitchy', 'pythic', 'typhic'], + 'pythogenesis': ['phytogenesis', 'pythogenesis'], + 'pythogenetic': ['phytogenetic', 'pythogenetic'], + 'pythogenic': ['phytogenic', 'pythogenic', 'typhogenic'], + 'pythogenous': ['phytogenous', 'pythogenous'], + 'python': ['phyton', 'python'], + 'pythonic': ['hypnotic', 'phytonic', 'pythonic', 'typhonic'], + 'pythonism': ['hypnotism', 'pythonism'], + 'pythonist': ['hypnotist', 'pythonist'], + 'pythonize': ['hypnotize', 'pythonize'], + 'pythonoid': ['hypnotoid', 'pythonoid'], + 'pyvuril': ['pyruvil', 'pyvuril'], + 'quadrual': ['quadrual', 'quadrula'], + 'quadrula': ['quadrual', 'quadrula'], + 'quail': ['quail', 'quila'], + 'quake': ['quake', 'queak'], + 'quale': ['equal', 'quale', 'queal'], + 'qualitied': ['liquidate', 'qualitied'], + 'quamoclit': ['coquitlam', 'quamoclit'], + 'quannet': ['quannet', 'tanquen'], + 'quartile': ['quartile', 'requital', 'triequal'], + 'quartine': ['antiquer', 'quartine'], + 'quata': ['quata', 'taqua'], + 'quatrin': ['quatrin', 'tarquin'], + 'queak': ['quake', 'queak'], + 'queal': ['equal', 'quale', 'queal'], + 'queeve': ['eveque', 'queeve'], + 'quencher': ['quencher', 'requench'], + 'querier': ['querier', 'require'], + 'queriman': ['queriman', 'ramequin'], + 'querist': ['querist', 'squiret'], + 'quernal': ['quernal', 'ranquel'], + 'quester': ['quester', 'request'], + 'questioner': ['questioner', 'requestion'], + 'questor': ['questor', 'torques'], + 'quiet': ['quiet', 'quite'], + 'quietable': ['equitable', 'quietable'], + 'quieter': ['quieter', 'requite'], + 'quietist': ['equitist', 'quietist'], + 'quietsome': ['quietsome', 'semiquote'], + 'quiina': ['quiina', 'quinia'], + 'quila': ['quail', 'quila'], + 'quiles': ['quiles', 'quisle'], + 'quinate': ['antique', 'quinate'], + 'quince': ['cinque', 'quince'], + 'quinia': ['quiina', 'quinia'], + 'quinite': ['inquiet', 'quinite'], + 'quinnat': ['quinnat', 'quintan'], + 'quinse': ['quinse', 'sequin'], + 'quintan': ['quinnat', 'quintan'], + 'quintato': ['quintato', 'totaquin'], + 'quinze': ['quinze', 'zequin'], + 'quirt': ['quirt', 'qurti'], + 'quisle': ['quiles', 'quisle'], + 'quite': ['quiet', 'quite'], + 'quits': ['quits', 'squit'], + 'quote': ['quote', 'toque'], + 'quoter': ['quoter', 'roquet', 'torque'], + 'qurti': ['quirt', 'qurti'], + 'ra': ['ar', 'ra'], + 'raad': ['adar', 'arad', 'raad', 'rada'], + 'raash': ['asarh', 'raash', 'sarah'], + 'rab': ['bar', 'bra', 'rab'], + 'raband': ['bandar', 'raband'], + 'rabatine': ['atabrine', 'rabatine'], + 'rabatte': ['baretta', 'rabatte', 'tabaret'], + 'rabbanite': ['barnabite', 'rabbanite', 'rabbinate'], + 'rabbet': ['barbet', 'rabbet', 'tabber'], + 'rabbinate': ['barnabite', 'rabbanite', 'rabbinate'], + 'rabble': ['barbel', 'labber', 'rabble'], + 'rabboni': ['barbion', 'rabboni'], + 'rabi': ['abir', 'bari', 'rabi'], + 'rabic': ['baric', 'carib', 'rabic'], + 'rabid': ['barid', 'bidar', 'braid', 'rabid'], + 'rabidly': ['bardily', 'rabidly', 'ridably'], + 'rabidness': ['bardiness', 'rabidness'], + 'rabies': ['braise', 'rabies', 'rebias'], + 'rabin': ['abrin', 'bairn', 'brain', 'brian', 'rabin'], + 'rabinet': ['atebrin', 'rabinet'], + 'raccoon': ['carcoon', 'raccoon'], + 'race': ['acer', 'acre', 'care', 'crea', 'race'], + 'racemate': ['camerate', 'macerate', 'racemate'], + 'racemation': ['aeromantic', 'cameration', 'maceration', 'racemation'], + 'raceme': ['amerce', 'raceme'], + 'racemed': ['decream', 'racemed'], + 'racemic': ['ceramic', 'racemic'], + 'racer': ['carer', 'crare', 'racer'], + 'rach': ['arch', 'char', 'rach'], + 'rache': ['acher', 'arche', 'chare', 'chera', 'rache', 'reach'], + 'rachel': ['rachel', 'rechal'], + 'rachianectes': ['rachianectes', 'rhacianectes'], + 'rachidial': ['diarchial', 'rachidial'], + 'rachitis': ['architis', 'rachitis'], + 'racial': ['alaric', 'racial'], + 'racialist': ['racialist', 'satirical'], + 'racing': ['arcing', 'racing'], + 'racist': ['crista', 'racist'], + 'rack': ['cark', 'rack'], + 'racker': ['racker', 'rerack'], + 'racket': ['racket', 'retack', 'tacker'], + 'racking': ['arcking', 'carking', 'racking'], + 'rackingly': ['carkingly', 'rackingly'], + 'rackle': ['calker', 'lacker', 'rackle', 'recalk', 'reckla'], + 'racon': ['acorn', 'acron', 'racon'], + 'raconteur': ['cuarteron', 'raconteur'], + 'racoon': ['caroon', 'corona', 'racoon'], + 'racy': ['cary', 'racy'], + 'rad': ['dar', 'rad'], + 'rada': ['adar', 'arad', 'raad', 'rada'], + 'raddle': ['ladder', 'raddle'], + 'raddleman': ['dreamland', 'raddleman'], + 'radectomy': ['myctodera', 'radectomy'], + 'radek': ['daker', 'drake', 'kedar', 'radek'], + 'radiable': ['labridae', 'radiable'], + 'radiale': ['ardelia', 'laridae', 'radiale'], + 'radian': ['adrian', 'andira', 'andria', 'radian', 'randia'], + 'radiance': ['caridean', 'dircaean', 'radiance'], + 'radiant': ['intrada', 'radiant'], + 'radiata': ['dataria', 'radiata'], + 'radical': ['cardial', 'radical'], + 'radicant': ['antacrid', 'cardiant', 'radicant', 'tridacna'], + 'radicel': ['decrial', 'radicel', 'radicle'], + 'radices': ['diceras', 'radices', 'sidecar'], + 'radicle': ['decrial', 'radicel', 'radicle'], + 'radicose': ['idocrase', 'radicose'], + 'radicule': ['auricled', 'radicule'], + 'radiculose': ['coresidual', 'radiculose'], + 'radiectomy': ['acidometry', 'medicatory', 'radiectomy'], + 'radii': ['dairi', 'darii', 'radii'], + 'radio': ['aroid', 'doria', 'radio'], + 'radioautograph': ['autoradiograph', 'radioautograph'], + 'radioautographic': ['autoradiographic', 'radioautographic'], + 'radioautography': ['autoradiography', 'radioautography'], + 'radiohumeral': ['humeroradial', 'radiohumeral'], + 'radiolite': ['editorial', 'radiolite'], + 'radiolucent': ['radiolucent', 'reductional'], + 'radioman': ['adoniram', 'radioman'], + 'radiomicrometer': ['microradiometer', 'radiomicrometer'], + 'radiophone': ['phoronidea', 'radiophone'], + 'radiophony': ['hypodorian', 'radiophony'], + 'radiotelephone': ['radiotelephone', 'teleradiophone'], + 'radiotron': ['ordinator', 'radiotron'], + 'radish': ['ardish', 'radish'], + 'radius': ['darius', 'radius'], + 'radman': ['mandra', 'radman'], + 'radon': ['adorn', 'donar', 'drona', 'radon'], + 'radula': ['adular', 'aludra', 'radula'], + 'rafael': ['aflare', 'rafael'], + 'rafe': ['fare', 'fear', 'frae', 'rafe'], + 'raffee': ['affeer', 'raffee'], + 'raffia': ['affair', 'raffia'], + 'raffle': ['farfel', 'raffle'], + 'rafik': ['fakir', 'fraik', 'kafir', 'rafik'], + 'raft': ['frat', 'raft'], + 'raftage': ['fregata', 'raftage'], + 'rafter': ['frater', 'rafter'], + 'rag': ['gar', 'gra', 'rag'], + 'raga': ['agar', 'agra', 'gara', 'raga'], + 'rage': ['ager', 'agre', 'gare', 'gear', 'rage'], + 'rageless': ['eelgrass', 'gearless', 'rageless'], + 'ragfish': ['garfish', 'ragfish'], + 'ragged': ['dagger', 'gadger', 'ragged'], + 'raggee': ['agrege', 'raggee'], + 'raggety': ['gargety', 'raggety'], + 'raggle': ['gargle', 'gregal', 'lagger', 'raggle'], + 'raggled': ['draggle', 'raggled'], + 'raggy': ['aggry', 'raggy'], + 'ragingly': ['grayling', 'ragingly'], + 'raglanite': ['antiglare', 'raglanite'], + 'raglet': ['raglet', 'tergal'], + 'raglin': ['nargil', 'raglin'], + 'ragman': ['amgarn', 'mangar', 'marang', 'ragman'], + 'ragnar': ['garran', 'ragnar'], + 'ragshag': ['ragshag', 'shagrag'], + 'ragtag': ['ragtag', 'tagrag'], + 'ragtime': ['migrate', 'ragtime'], + 'ragule': ['ragule', 'regula'], + 'raguly': ['glaury', 'raguly'], + 'raia': ['aira', 'aria', 'raia'], + 'raid': ['arid', 'dari', 'raid'], + 'raider': ['arride', 'raider'], + 'raif': ['fair', 'fiar', 'raif'], + 'raiidae': ['ariidae', 'raiidae'], + 'rail': ['aril', 'lair', 'lari', 'liar', 'lira', 'rail', 'rial'], + 'railage': ['lairage', 'railage', 'regalia'], + 'railer': ['railer', 'rerail'], + 'railhead': ['headrail', 'railhead'], + 'railless': ['lairless', 'railless'], + 'railman': ['lairman', 'laminar', 'malarin', 'railman'], + 'raiment': ['minaret', 'raiment', 'tireman'], + 'rain': ['arni', 'iran', 'nair', 'rain', 'rani'], + 'raincoat': ['craniota', 'croatian', 'narcotia', 'raincoat'], + 'rainful': ['rainful', 'unfrail'], + 'rainspout': ['rainspout', 'supinator'], + 'rainy': ['nairy', 'rainy'], + 'rais': ['rais', 'sair', 'sari'], + 'raise': ['aries', 'arise', 'raise', 'serai'], + 'raiseman': ['erasmian', 'raiseman'], + 'raiser': ['raiser', 'sierra'], + 'raisin': ['raisin', 'sirian'], + 'raj': ['jar', 'raj'], + 'raja': ['ajar', 'jara', 'raja'], + 'rajah': ['ajhar', 'rajah'], + 'rajeev': ['evejar', 'rajeev'], + 'rake': ['rake', 'reak'], + 'rakesteel': ['rakesteel', 'rakestele'], + 'rakestele': ['rakesteel', 'rakestele'], + 'rakh': ['hark', 'khar', 'rakh'], + 'raki': ['ikra', 'kari', 'raki'], + 'rakish': ['rakish', 'riksha', 'shikar', 'shikra', 'sikhra'], + 'rakit': ['kitar', 'krait', 'rakit', 'traik'], + 'raku': ['kuar', 'raku', 'rauk'], + 'ralf': ['farl', 'ralf'], + 'ralliance': ['alliancer', 'ralliance'], + 'ralline': ['ralline', 'renilla'], + 'ram': ['arm', 'mar', 'ram'], + 'rama': ['amar', 'amra', 'mara', 'rama'], + 'ramada': ['armada', 'damara', 'ramada'], + 'ramage': ['gemara', 'ramage'], + 'ramaite': ['ametria', 'artemia', 'meratia', 'ramaite'], + 'ramal': ['alarm', 'malar', 'maral', 'marla', 'ramal'], + 'ramanas': ['ramanas', 'sramana'], + 'ramate': ['ramate', 'retama'], + 'ramble': ['ambler', 'blamer', 'lamber', 'marble', 'ramble'], + 'rambler': ['marbler', 'rambler'], + 'rambling': ['marbling', 'rambling'], + 'rambo': ['broma', 'rambo'], + 'rambutan': ['rambutan', 'tamburan'], + 'rame': ['erma', 'mare', 'rame', 'ream'], + 'ramed': ['armed', 'derma', 'dream', 'ramed'], + 'rament': ['manter', 'marten', 'rament'], + 'ramental': ['maternal', 'ramental'], + 'ramequin': ['queriman', 'ramequin'], + 'ramesh': ['masher', 'ramesh', 'shamer'], + 'ramet': ['armet', + 'mater', + 'merat', + 'metra', + 'ramet', + 'tamer', + 'terma', + 'trame', + 'trema'], + 'rami': ['amir', 'irma', 'mari', 'mira', 'rami', 'rima'], + 'ramie': ['aimer', 'maire', 'marie', 'ramie'], + 'ramiferous': ['armiferous', 'ramiferous'], + 'ramigerous': ['armigerous', 'ramigerous'], + 'ramillie': ['milliare', 'ramillie'], + 'ramisection': ['anisometric', + 'creationism', + 'miscreation', + 'ramisection', + 'reactionism'], + 'ramist': ['marist', 'matris', 'ramist'], + 'ramline': ['marline', 'mineral', 'ramline'], + 'rammel': ['lammer', 'rammel'], + 'rammy': ['mymar', 'rammy'], + 'ramon': ['manor', 'moran', 'norma', 'ramon', 'roman'], + 'ramona': ['oarman', 'ramona'], + 'ramose': ['amores', 'ramose', 'sorema'], + 'ramosely': ['marysole', 'ramosely'], + 'ramp': ['pram', 'ramp'], + 'rampant': ['mantrap', 'rampant'], + 'ramped': ['damper', 'ramped'], + 'ramper': ['prearm', 'ramper'], + 'rampike': ['perakim', 'permiak', 'rampike'], + 'ramping': ['gripman', 'prigman', 'ramping'], + 'ramsey': ['ramsey', 'smeary'], + 'ramson': ['ramson', 'ransom'], + 'ramtil': ['mitral', 'ramtil'], + 'ramule': ['mauler', 'merula', 'ramule'], + 'ramulus': ['malurus', 'ramulus'], + 'ramus': ['musar', 'ramus', 'rusma', 'surma'], + 'ramusi': ['misura', 'ramusi'], + 'ran': ['arn', 'nar', 'ran'], + 'rana': ['arna', 'rana'], + 'ranales': ['arsenal', 'ranales'], + 'rance': ['caner', 'crane', 'crena', 'nacre', 'rance'], + 'rancel': ['lancer', 'rancel'], + 'rancer': ['craner', 'rancer'], + 'ranche': ['enarch', 'ranche'], + 'ranchero': ['anchorer', 'ranchero', 'reanchor'], + 'rancho': ['anchor', 'archon', 'charon', 'rancho'], + 'rancid': ['andric', 'cardin', 'rancid'], + 'rand': ['darn', 'nard', 'rand'], + 'randan': ['annard', 'randan'], + 'randem': ['damner', 'manred', 'randem', 'remand'], + 'rander': ['darner', 'darren', 'errand', 'rander', 'redarn'], + 'randia': ['adrian', 'andira', 'andria', 'radian', 'randia'], + 'randing': ['darning', 'randing'], + 'randite': ['antired', 'detrain', 'randite', 'trained'], + 'randle': ['aldern', + 'darnel', + 'enlard', + 'lander', + 'lenard', + 'randle', + 'reland'], + 'random': ['random', 'rodman'], + 'rane': ['arne', 'earn', 'rane'], + 'ranere': ['earner', 'ranere'], + 'rang': ['garn', 'gnar', 'rang'], + 'range': ['anger', 'areng', 'grane', 'range'], + 'ranged': ['danger', 'gander', 'garden', 'ranged'], + 'rangeless': ['largeness', 'rangeless', 'regalness'], + 'ranger': ['garner', 'ranger'], + 'rangey': ['anergy', 'rangey'], + 'ranginess': ['angriness', 'ranginess'], + 'rangle': ['angler', 'arleng', 'garnel', 'largen', 'rangle', 'regnal'], + 'rangy': ['angry', 'rangy'], + 'rani': ['arni', 'iran', 'nair', 'rain', 'rani'], + 'ranid': ['darin', 'dinar', 'drain', 'indra', 'nadir', 'ranid'], + 'ranidae': ['araneid', 'ariadne', 'ranidae'], + 'raniform': ['nariform', 'raniform'], + 'raninae': ['aranein', 'raninae'], + 'ranine': ['narine', 'ranine'], + 'rank': ['knar', 'kran', 'nark', 'rank'], + 'ranked': ['darken', 'kanred', 'ranked'], + 'ranker': ['ranker', 'rerank'], + 'rankish': ['krishna', 'rankish'], + 'rannel': ['lanner', 'rannel'], + 'ranquel': ['quernal', 'ranquel'], + 'ransom': ['ramson', 'ransom'], + 'rant': ['natr', 'rant', 'tarn', 'tran'], + 'rantepole': ['petrolean', 'rantepole'], + 'ranter': ['arrent', 'errant', 'ranter', 'ternar'], + 'rantipole': ['prelation', 'rantipole'], + 'ranula': ['alraun', 'alruna', 'ranula'], + 'rap': ['par', 'rap'], + 'rape': ['aper', 'pare', 'pear', 'rape', 'reap'], + 'rapeful': ['rapeful', 'upflare'], + 'raper': ['parer', 'raper'], + 'raphael': ['phalera', 'raphael'], + 'raphaelic': ['eparchial', 'raphaelic'], + 'raphe': ['hepar', 'phare', 'raphe'], + 'raphia': ['pahari', 'pariah', 'raphia'], + 'raphides': ['diphaser', 'parished', 'raphides', 'sephardi'], + 'raphis': ['parish', 'raphis', 'rhapis'], + 'rapic': ['capri', 'picra', 'rapic'], + 'rapid': ['adrip', 'rapid'], + 'rapidly': ['pyralid', 'rapidly'], + 'rapier': ['pairer', 'rapier', 'repair'], + 'rapine': ['parine', 'rapine'], + 'raping': ['paring', 'raping'], + 'rapparee': ['appearer', 'rapparee', 'reappear'], + 'rappe': ['paper', 'rappe'], + 'rappel': ['lapper', 'rappel'], + 'rappite': ['periapt', 'rappite'], + 'rapt': ['part', 'prat', 'rapt', 'tarp', 'trap'], + 'raptly': ['paltry', 'partly', 'raptly'], + 'raptor': ['parrot', 'raptor'], + 'rapture': ['parture', 'rapture'], + 'rare': ['rare', 'rear'], + 'rarebit': ['arbiter', 'rarebit'], + 'rareripe': ['rareripe', 'repairer'], + 'rarish': ['arrish', 'harris', 'rarish', 'sirrah'], + 'ras': ['ras', 'sar'], + 'rasa': ['rasa', 'sara'], + 'rascacio': ['coracias', 'rascacio'], + 'rascal': ['lascar', 'rascal', 'sacral', 'scalar'], + 'rase': ['arse', 'rase', 'sare', 'sear', 'sera'], + 'rasen': ['anser', 'nares', 'rasen', 'snare'], + 'raser': ['ersar', 'raser', 'serra'], + 'rasher': ['rasher', 'sharer'], + 'rashing': ['garnish', 'rashing'], + 'rashti': ['rashti', 'tarish'], + 'rasion': ['arsino', 'rasion', 'sonrai'], + 'rasp': ['rasp', 'spar'], + 'rasped': ['rasped', 'spader', 'spread'], + 'rasper': ['parser', 'rasper', 'sparer'], + 'rasping': ['aspring', 'rasping', 'sparing'], + 'raspingly': ['raspingly', 'sparingly'], + 'raspingness': ['raspingness', 'sparingness'], + 'raspite': ['piaster', 'piastre', 'raspite', 'spirate', 'traipse'], + 'raspy': ['raspy', 'spary', 'spray'], + 'rasse': ['arses', 'rasse'], + 'raster': ['arrest', 'astrer', 'raster', 'starer'], + 'rastik': ['rastik', 'sarkit', 'straik'], + 'rastle': ['laster', + 'lastre', + 'rastle', + 'relast', + 'resalt', + 'salter', + 'slater', + 'stelar'], + 'rastus': ['rastus', 'tarsus'], + 'rat': ['art', 'rat', 'tar', 'tra'], + 'rata': ['rata', 'taar', 'tara'], + 'ratable': ['alberta', 'latebra', 'ratable'], + 'ratal': ['altar', 'artal', 'ratal', 'talar'], + 'ratanhia': ['ratanhia', 'rhatania'], + 'ratbite': ['biretta', 'brattie', 'ratbite'], + 'ratch': ['chart', 'ratch'], + 'ratchel': ['clethra', 'latcher', 'ratchel', 'relatch', 'talcher', 'trachle'], + 'ratcher': ['charter', 'ratcher'], + 'ratchet': ['chatter', 'ratchet'], + 'ratchety': ['chattery', 'ratchety', 'trachyte'], + 'ratching': ['charting', 'ratching'], + 'rate': ['rate', 'tare', 'tear', 'tera'], + 'rated': ['dater', 'derat', 'detar', 'drate', 'rated', 'trade', 'tread'], + 'ratel': ['alert', 'alter', 'artel', 'later', 'ratel', 'taler', 'telar'], + 'rateless': ['rateless', 'tasseler', 'tearless', 'tesseral'], + 'ratfish': ['ratfish', 'tashrif'], + 'rath': ['hart', 'rath', 'tahr', 'thar', 'trah'], + 'rathe': ['earth', 'hater', 'heart', 'herat', 'rathe'], + 'rathed': ['dearth', 'hatred', 'rathed', 'thread'], + 'rathely': ['earthly', 'heartly', 'lathery', 'rathely'], + 'ratherest': ['ratherest', 'shatterer'], + 'rathest': ['rathest', 'shatter'], + 'rathite': ['hartite', 'rathite'], + 'rathole': ['loather', 'rathole'], + 'raticidal': ['raticidal', 'triadical'], + 'raticide': ['ceratiid', 'raticide'], + 'ratine': ['nerita', 'ratine', 'retain', 'retina', 'tanier'], + 'rating': ['rating', 'tringa'], + 'ratio': ['ariot', 'ratio'], + 'ration': ['aroint', 'ration'], + 'rationable': ['alboranite', 'rationable'], + 'rational': ['notarial', 'rational', 'rotalian'], + 'rationale': ['alienator', 'rationale'], + 'rationalize': ['rationalize', 'realization'], + 'rationally': ['notarially', 'rationally'], + 'rationate': ['notariate', 'rationate'], + 'ratitae': ['arietta', 'ratitae'], + 'ratite': ['attire', 'ratite', 'tertia'], + 'ratitous': ['outstair', 'ratitous'], + 'ratlike': ['artlike', 'ratlike', 'tarlike'], + 'ratline': ['entrail', + 'latiner', + 'latrine', + 'ratline', + 'reliant', + 'retinal', + 'trenail'], + 'rattage': ['garetta', 'rattage', 'regatta'], + 'rattan': ['rattan', 'tantra', 'tartan'], + 'ratteen': ['entreat', 'ratteen', 'tarente', 'ternate', 'tetrane'], + 'ratten': ['attern', 'natter', 'ratten', 'tarten'], + 'ratti': ['ratti', 'titar', 'trait'], + 'rattish': ['athirst', 'rattish', 'tartish'], + 'rattle': ['artlet', 'latter', 'rattle', 'tartle', 'tatler'], + 'rattles': ['rattles', 'slatter', 'starlet', 'startle'], + 'rattlesome': ['rattlesome', 'saltometer'], + 'rattly': ['rattly', 'tartly'], + 'ratton': ['attorn', 'ratton', 'rottan'], + 'rattus': ['astrut', 'rattus', 'stuart'], + 'ratwood': ['ratwood', 'tarwood'], + 'raught': ['raught', 'tughra'], + 'rauk': ['kuar', 'raku', 'rauk'], + 'raul': ['alur', 'laur', 'lura', 'raul', 'ural'], + 'rauli': ['rauli', 'urali', 'urial'], + 'raun': ['raun', 'uran', 'urna'], + 'raunge': ['nauger', 'raunge', 'ungear'], + 'rave': ['aver', 'rave', 'vare', 'vera'], + 'ravel': ['arvel', 'larve', 'laver', 'ravel', 'velar'], + 'ravelin': ['elinvar', 'ravelin', 'reanvil', 'valerin'], + 'ravelly': ['ravelly', 'valeryl'], + 'ravendom': ['overdamn', 'ravendom'], + 'ravenelia': ['ravenelia', 'veneralia'], + 'ravenish': ['enravish', 'ravenish', 'vanisher'], + 'ravi': ['ravi', 'riva', 'vair', 'vari', 'vira'], + 'ravigote': ['ravigote', 'rogative'], + 'ravin': ['invar', 'ravin', 'vanir'], + 'ravine': ['averin', 'ravine'], + 'ravined': ['invader', 'ravined', 'viander'], + 'raving': ['grivna', 'raving'], + 'ravissant': ['ravissant', 'srivatsan'], + 'raw': ['raw', 'war'], + 'rawboned': ['downbear', 'rawboned'], + 'rawish': ['rawish', 'wairsh', 'warish'], + 'rax': ['arx', 'rax'], + 'ray': ['ary', 'ray', 'yar'], + 'raya': ['arya', 'raya'], + 'rayan': ['aryan', 'nayar', 'rayan'], + 'rayed': ['deary', 'deray', 'rayed', 'ready', 'yeard'], + 'raylet': ['lyrate', 'raylet', 'realty', 'telary'], + 'rayonnance': ['annoyancer', 'rayonnance'], + 'raze': ['ezra', 'raze'], + 're': ['er', 're'], + 'rea': ['aer', 'are', 'ear', 'era', 'rea'], + 'reaal': ['areal', 'reaal'], + 'reabandon': ['abandoner', 'reabandon'], + 'reabolish': ['abolisher', 'reabolish'], + 'reabsent': ['absenter', 'reabsent'], + 'reabsorb': ['absorber', 'reabsorb'], + 'reaccession': ['accessioner', 'reaccession'], + 'reaccomplish': ['accomplisher', 'reaccomplish'], + 'reaccord': ['accorder', 'reaccord'], + 'reaccost': ['ectosarc', 'reaccost'], + 'reach': ['acher', 'arche', 'chare', 'chera', 'rache', 'reach'], + 'reachieve': ['echeveria', 'reachieve'], + 'react': ['caret', + 'carte', + 'cater', + 'crate', + 'creat', + 'creta', + 'react', + 'recta', + 'trace'], + 'reactance': ['cancerate', 'reactance'], + 'reaction': ['actioner', 'anerotic', 'ceration', 'creation', 'reaction'], + 'reactional': ['creational', 'crotalinae', 'laceration', 'reactional'], + 'reactionary': ['creationary', 'reactionary'], + 'reactionism': ['anisometric', + 'creationism', + 'miscreation', + 'ramisection', + 'reactionism'], + 'reactionist': ['creationist', 'reactionist'], + 'reactive': ['creative', 'reactive'], + 'reactively': ['creatively', 'reactively'], + 'reactiveness': ['creativeness', 'reactiveness'], + 'reactivity': ['creativity', 'reactivity'], + 'reactor': ['creator', 'reactor'], + 'read': ['ared', 'daer', 'dare', 'dear', 'read'], + 'readapt': ['adapter', 'predata', 'readapt'], + 'readd': ['adder', 'dread', 'readd'], + 'readdress': ['addresser', 'readdress'], + 'reader': ['reader', 'redare', 'reread'], + 'reading': ['degrain', 'deraign', 'deringa', 'gradine', 'grained', 'reading'], + 'readjust': ['adjuster', 'readjust'], + 'readopt': ['adopter', 'protead', 'readopt'], + 'readorn': ['adorner', 'readorn'], + 'ready': ['deary', 'deray', 'rayed', 'ready', 'yeard'], + 'reaffect': ['affecter', 'reaffect'], + 'reaffirm': ['affirmer', 'reaffirm'], + 'reafflict': ['afflicter', 'reafflict'], + 'reagent': ['grantee', 'greaten', 'reagent', 'rentage'], + 'reagin': ['arenig', 'earing', 'gainer', 'reagin', 'regain'], + 'reak': ['rake', 'reak'], + 'real': ['earl', 'eral', 'lear', 'real'], + 'reales': ['alerse', 'leaser', 'reales', 'resale', 'reseal', 'sealer'], + 'realest': ['realest', 'reslate', 'resteal', 'stealer', 'teasler'], + 'realign': ['aligner', 'engrail', 'realign', 'reginal'], + 'realignment': ['engrailment', 'realignment'], + 'realism': ['mislear', 'realism'], + 'realist': ['aletris', 'alister', 'listera', 'realist', 'saltier'], + 'realistic': ['eristical', 'realistic'], + 'reality': ['irately', 'reality'], + 'realive': ['realive', 'valerie'], + 'realization': ['rationalize', 'realization'], + 'reallot': ['reallot', 'rotella', 'tallero'], + 'reallow': ['allower', 'reallow'], + 'reallude': ['laureled', 'reallude'], + 'realmlet': ['realmlet', 'tremella'], + 'realter': ['alterer', 'realter', 'relater'], + 'realtor': ['realtor', 'relator'], + 'realty': ['lyrate', 'raylet', 'realty', 'telary'], + 'ream': ['erma', 'mare', 'rame', 'ream'], + 'reamage': ['megaera', 'reamage'], + 'reamass': ['amasser', 'reamass'], + 'reamend': ['amender', 'meander', 'reamend', 'reedman'], + 'reamer': ['marree', 'reamer'], + 'reamuse': ['measure', 'reamuse'], + 'reamy': ['mayer', 'reamy'], + 'reanchor': ['anchorer', 'ranchero', 'reanchor'], + 'reanneal': ['annealer', 'lernaean', 'reanneal'], + 'reannex': ['annexer', 'reannex'], + 'reannoy': ['annoyer', 'reannoy'], + 'reanoint': ['anointer', 'inornate', 'nonirate', 'reanoint'], + 'reanswer': ['answerer', 'reanswer'], + 'reanvil': ['elinvar', 'ravelin', 'reanvil', 'valerin'], + 'reap': ['aper', 'pare', 'pear', 'rape', 'reap'], + 'reapdole': ['leoparde', 'reapdole'], + 'reappeal': ['appealer', 'reappeal'], + 'reappear': ['appearer', 'rapparee', 'reappear'], + 'reapplaud': ['applauder', 'reapplaud'], + 'reappoint': ['appointer', 'reappoint'], + 'reapportion': ['apportioner', 'reapportion'], + 'reapprehend': ['apprehender', 'reapprehend'], + 'reapproach': ['approacher', 'reapproach'], + 'rear': ['rare', 'rear'], + 'reargue': ['augerer', 'reargue'], + 'reargument': ['garmenture', 'reargument'], + 'rearise': ['rearise', 'reraise'], + 'rearm': ['armer', 'rearm'], + 'rearray': ['arrayer', 'rearray'], + 'rearrest': ['arrester', 'rearrest'], + 'reascend': ['ascender', 'reascend'], + 'reascent': ['reascent', 'sarcenet'], + 'reascertain': ['ascertainer', 'reascertain', 'secretarian'], + 'reask': ['asker', 'reask', 'saker', 'sekar'], + 'reason': ['arseno', 'reason'], + 'reassail': ['assailer', 'reassail'], + 'reassault': ['assaulter', 'reassault', 'saleratus'], + 'reassay': ['assayer', 'reassay'], + 'reassent': ['assenter', 'reassent', 'sarsenet'], + 'reassert': ['asserter', 'reassert'], + 'reassign': ['assigner', 'reassign'], + 'reassist': ['assister', 'reassist'], + 'reassort': ['assertor', 'assorter', 'oratress', 'reassort'], + 'reastonish': ['astonisher', 'reastonish', 'treasonish'], + 'reasty': ['atresy', 'estray', 'reasty', 'stayer'], + 'reasy': ['reasy', 'resay', 'sayer', 'seary'], + 'reattach': ['attacher', 'reattach'], + 'reattack': ['attacker', 'reattack'], + 'reattain': ['attainer', 'reattain', 'tertiana'], + 'reattempt': ['attempter', 'reattempt'], + 'reattend': ['attender', 'nattered', 'reattend'], + 'reattest': ['attester', 'reattest'], + 'reattract': ['attracter', 'reattract'], + 'reattraction': ['reattraction', 'retractation'], + 'reatus': ['auster', 'reatus'], + 'reavail': ['reavail', 'valeria'], + 'reave': ['eaver', 'reave'], + 'reavoid': ['avodire', 'avoider', 'reavoid'], + 'reavouch': ['avoucher', 'reavouch'], + 'reavow': ['avower', 'reavow'], + 'reawait': ['awaiter', 'reawait'], + 'reawaken': ['awakener', 'reawaken'], + 'reaward': ['awarder', 'reaward'], + 'reb': ['ber', 'reb'], + 'rebab': ['barbe', 'bebar', 'breba', 'rebab'], + 'reback': ['backer', 'reback'], + 'rebag': ['bagre', 'barge', 'begar', 'rebag'], + 'rebait': ['baiter', 'barite', 'rebait', 'terbia'], + 'rebake': ['beaker', 'berake', 'rebake'], + 'reballast': ['ballaster', 'reballast'], + 'reballot': ['balloter', 'reballot'], + 'reban': ['abner', 'arneb', 'reban'], + 'rebanish': ['banisher', 'rebanish'], + 'rebar': ['barer', 'rebar'], + 'rebargain': ['bargainer', 'rebargain'], + 'rebasis': ['brassie', 'rebasis'], + 'rebate': ['beater', 'berate', 'betear', 'rebate', 'rebeat'], + 'rebater': ['rebater', 'terebra'], + 'rebathe': ['breathe', 'rebathe'], + 'rebato': ['boater', 'borate', 'rebato'], + 'rebawl': ['bawler', 'brelaw', 'rebawl', 'warble'], + 'rebear': ['bearer', 'rebear'], + 'rebeat': ['beater', 'berate', 'betear', 'rebate', 'rebeat'], + 'rebeck': ['becker', 'rebeck'], + 'rebed': ['brede', 'breed', 'rebed'], + 'rebeg': ['gerbe', 'grebe', 'rebeg'], + 'rebeggar': ['beggarer', 'rebeggar'], + 'rebegin': ['bigener', 'rebegin'], + 'rebehold': ['beholder', 'rebehold'], + 'rebellow': ['bellower', 'rebellow'], + 'rebelly': ['bellyer', 'rebelly'], + 'rebelong': ['belonger', 'rebelong'], + 'rebend': ['bender', 'berend', 'rebend'], + 'rebenefit': ['benefiter', 'rebenefit'], + 'rebeset': ['besteer', 'rebeset'], + 'rebestow': ['bestower', 'rebestow'], + 'rebetray': ['betrayer', 'eatberry', 'rebetray', 'teaberry'], + 'rebewail': ['bewailer', 'rebewail'], + 'rebia': ['barie', 'beira', 'erbia', 'rebia'], + 'rebias': ['braise', 'rabies', 'rebias'], + 'rebid': ['bider', 'bredi', 'bride', 'rebid'], + 'rebill': ['biller', 'rebill'], + 'rebillet': ['billeter', 'rebillet'], + 'rebind': ['binder', 'inbred', 'rebind'], + 'rebirth': ['brither', 'rebirth'], + 'rebite': ['bertie', 'betire', 'rebite'], + 'reblade': ['bleared', 'reblade'], + 'reblast': ['blaster', 'reblast', 'stabler'], + 'rebleach': ['bleacher', 'rebleach'], + 'reblend': ['blender', 'reblend'], + 'rebless': ['blesser', 'rebless'], + 'reblock': ['blocker', 'brockle', 'reblock'], + 'rebloom': ['bloomer', 'rebloom'], + 'reblot': ['bolter', 'orblet', 'reblot', 'rebolt'], + 'reblow': ['blower', 'bowler', 'reblow', 'worble'], + 'reblue': ['brulee', 'burele', 'reblue'], + 'rebluff': ['bluffer', 'rebluff'], + 'reblunder': ['blunderer', 'reblunder'], + 'reboant': ['baronet', 'reboant'], + 'reboantic': ['bicornate', 'carbonite', 'reboantic'], + 'reboard': ['arbored', 'boarder', 'reboard'], + 'reboast': ['barotse', 'boaster', 'reboast', 'sorbate'], + 'reboil': ['boiler', 'reboil'], + 'rebold': ['belord', 'bordel', 'rebold'], + 'rebolt': ['bolter', 'orblet', 'reblot', 'rebolt'], + 'rebone': ['boreen', 'enrobe', 'neebor', 'rebone'], + 'rebook': ['booker', 'brooke', 'rebook'], + 'rebop': ['probe', 'rebop'], + 'rebore': ['rebore', 'rerobe'], + 'reborrow': ['borrower', 'reborrow'], + 'rebound': ['beround', 'bounder', 'rebound', 'unbored', 'unorbed', 'unrobed'], + 'rebounder': ['rebounder', 'underrobe'], + 'rebox': ['boxer', 'rebox'], + 'rebrace': ['cerebra', 'rebrace'], + 'rebraid': ['braider', 'rebraid'], + 'rebranch': ['brancher', 'rebranch'], + 'rebrand': ['bernard', 'brander', 'rebrand'], + 'rebrandish': ['brandisher', 'rebrandish'], + 'rebreed': ['breeder', 'rebreed'], + 'rebrew': ['brewer', 'rebrew'], + 'rebribe': ['berberi', 'rebribe'], + 'rebring': ['bringer', 'rebring'], + 'rebroach': ['broacher', 'rebroach'], + 'rebroadcast': ['broadcaster', 'rebroadcast'], + 'rebrown': ['browner', 'rebrown'], + 'rebrush': ['brusher', 'rebrush'], + 'rebud': ['bedur', 'rebud', 'redub'], + 'rebudget': ['budgeter', 'rebudget'], + 'rebuff': ['buffer', 'rebuff'], + 'rebuffet': ['buffeter', 'rebuffet'], + 'rebuild': ['builder', 'rebuild'], + 'rebulk': ['bulker', 'rebulk'], + 'rebunch': ['buncher', 'rebunch'], + 'rebundle': ['blendure', 'rebundle'], + 'reburden': ['burdener', 'reburden'], + 'reburn': ['burner', 'reburn'], + 'reburnish': ['burnisher', 'reburnish'], + 'reburst': ['burster', 'reburst'], + 'rebus': ['burse', 'rebus', 'suber'], + 'rebush': ['busher', 'rebush'], + 'rebut': ['brute', 'buret', 'rebut', 'tuber'], + 'rebute': ['rebute', 'retube'], + 'rebuttal': ['burletta', 'rebuttal'], + 'rebutton': ['buttoner', 'rebutton'], + 'rebuy': ['buyer', 'rebuy'], + 'recalk': ['calker', 'lacker', 'rackle', 'recalk', 'reckla'], + 'recall': ['caller', 'cellar', 'recall'], + 'recampaign': ['campaigner', 'recampaign'], + 'recancel': ['canceler', 'clarence', 'recancel'], + 'recant': ['canter', + 'creant', + 'cretan', + 'nectar', + 'recant', + 'tanrec', + 'trance'], + 'recantation': ['recantation', 'triacontane'], + 'recanter': ['canterer', 'recanter', 'recreant', 'terrance'], + 'recap': ['caper', 'crape', 'pacer', 'perca', 'recap'], + 'recaption': ['preaction', 'precation', 'recaption'], + 'recarpet': ['pretrace', 'recarpet'], + 'recart': ['arrect', 'carter', 'crater', 'recart', 'tracer'], + 'recase': ['cesare', 'crease', 'recase', 'searce'], + 'recash': ['arches', 'chaser', 'eschar', 'recash', 'search'], + 'recast': ['carest', 'caster', 'recast'], + 'recatch': ['catcher', 'recatch'], + 'recede': ['decree', 'recede'], + 'recedent': ['centered', 'decenter', 'decentre', 'recedent'], + 'receder': ['decreer', 'receder'], + 'receipt': ['ereptic', 'precite', 'receipt'], + 'receiptor': ['procerite', 'receiptor'], + 'receivables': ['receivables', 'serviceable'], + 'received': ['deceiver', 'received'], + 'recement': ['cementer', 'cerement', 'recement'], + 'recension': ['ninescore', 'recension'], + 'recensionist': ['intercession', 'recensionist'], + 'recent': ['center', 'recent', 'tenrec'], + 'recenter': ['centerer', 'recenter', 'recentre', 'terrence'], + 'recentre': ['centerer', 'recenter', 'recentre', 'terrence'], + 'reception': ['prenotice', 'reception'], + 'receptoral': ['praelector', 'receptoral'], + 'recess': ['cesser', 'recess'], + 'rechain': ['chainer', 'enchair', 'rechain'], + 'rechal': ['rachel', 'rechal'], + 'rechamber': ['chamberer', 'rechamber'], + 'rechange': ['encharge', 'rechange'], + 'rechant': ['chanter', 'rechant'], + 'rechar': ['archer', 'charer', 'rechar'], + 'recharter': ['charterer', 'recharter'], + 'rechase': ['archsee', 'rechase'], + 'rechaser': ['rechaser', 'research', 'searcher'], + 'rechasten': ['chastener', 'rechasten'], + 'rechaw': ['chawer', 'rechaw'], + 'recheat': ['cheater', 'hectare', 'recheat', 'reteach', 'teacher'], + 'recheck': ['checker', 'recheck'], + 'recheer': ['cheerer', 'recheer'], + 'rechew': ['chewer', 'rechew'], + 'rechip': ['cipher', 'rechip'], + 'rechisel': ['chiseler', 'rechisel'], + 'rechristen': ['christener', 'rechristen'], + 'rechuck': ['chucker', 'rechuck'], + 'recidivous': ['recidivous', 'veridicous'], + 'recipe': ['piecer', 'pierce', 'recipe'], + 'recipiend': ['perdicine', 'recipiend'], + 'recipient': ['princeite', 'recipient'], + 'reciprocate': ['carpocerite', 'reciprocate'], + 'recirculate': ['clericature', 'recirculate'], + 'recision': ['recision', 'soricine'], + 'recital': ['article', 'recital'], + 'recitativo': ['recitativo', 'victoriate'], + 'recite': ['cerite', 'certie', 'recite', 'tierce'], + 'recitement': ['centimeter', 'recitement', 'remittence'], + 'reckla': ['calker', 'lacker', 'rackle', 'recalk', 'reckla'], + 'reckless': ['clerkess', 'reckless'], + 'reckling': ['clerking', 'reckling'], + 'reckon': ['conker', 'reckon'], + 'reclaim': ['claimer', 'miracle', 'reclaim'], + 'reclaimer': ['calmierer', 'reclaimer'], + 'reclama': ['cameral', 'caramel', 'carmela', 'ceramal', 'reclama'], + 'reclang': ['cangler', 'glancer', 'reclang'], + 'reclasp': ['clasper', 'reclasp', 'scalper'], + 'reclass': ['carless', 'classer', 'reclass'], + 'reclean': ['cleaner', 'reclean'], + 'reclear': ['clearer', 'reclear'], + 'reclimb': ['climber', 'reclimb'], + 'reclinate': ['intercale', 'interlace', 'lacertine', 'reclinate'], + 'reclinated': ['credential', 'interlaced', 'reclinated'], + 'recluse': ['luceres', 'recluse'], + 'recluseness': ['censureless', 'recluseness'], + 'reclusion': ['cornelius', 'inclosure', 'reclusion'], + 'reclusive': ['reclusive', 'versicule'], + 'recoach': ['caroche', 'coacher', 'recoach'], + 'recoal': ['carole', 'coaler', 'coelar', 'oracle', 'recoal'], + 'recoast': ['coaster', 'recoast'], + 'recoat': ['coater', 'recoat'], + 'recock': ['cocker', 'recock'], + 'recoil': ['coiler', 'recoil'], + 'recoilment': ['clinometer', 'recoilment'], + 'recoin': ['cerion', 'coiner', 'neroic', 'orcein', 'recoin'], + 'recoinage': ['aerogenic', 'recoinage'], + 'recollate': ['electoral', 'recollate'], + 'recollation': ['collationer', 'recollation'], + 'recollection': ['collectioner', 'recollection'], + 'recollet': ['colleter', 'coteller', 'coterell', 'recollet'], + 'recolor': ['colorer', 'recolor'], + 'recomb': ['comber', 'recomb'], + 'recomfort': ['comforter', 'recomfort'], + 'recommand': ['commander', 'recommand'], + 'recommend': ['commender', 'recommend'], + 'recommission': ['commissioner', 'recommission'], + 'recompact': ['compacter', 'recompact'], + 'recompass': ['compasser', 'recompass'], + 'recompetition': ['competitioner', 'recompetition'], + 'recomplain': ['complainer', 'procnemial', 'recomplain'], + 'recompound': ['compounder', 'recompound'], + 'recomprehend': ['comprehender', 'recomprehend'], + 'recon': ['coner', 'crone', 'recon'], + 'reconceal': ['concealer', 'reconceal'], + 'reconcert': ['concreter', 'reconcert'], + 'reconcession': ['concessioner', 'reconcession'], + 'reconcoct': ['concocter', 'reconcoct'], + 'recondemn': ['condemner', 'recondemn'], + 'recondensation': ['nondesecration', 'recondensation'], + 'recondition': ['conditioner', 'recondition'], + 'reconfer': ['confrere', 'enforcer', 'reconfer'], + 'reconfess': ['confesser', 'reconfess'], + 'reconfirm': ['confirmer', 'reconfirm'], + 'reconform': ['conformer', 'reconform'], + 'reconfound': ['confounder', 'reconfound'], + 'reconfront': ['confronter', 'reconfront'], + 'recongeal': ['congealer', 'recongeal'], + 'reconjoin': ['conjoiner', 'reconjoin'], + 'reconnect': ['concenter', 'reconnect'], + 'reconnoiter': ['reconnoiter', 'reconnoitre'], + 'reconnoitre': ['reconnoiter', 'reconnoitre'], + 'reconsent': ['consenter', 'nonsecret', 'reconsent'], + 'reconsider': ['considerer', 'reconsider'], + 'reconsign': ['consigner', 'reconsign'], + 'reconstitution': ['constitutioner', 'reconstitution'], + 'reconstruct': ['constructer', 'reconstruct'], + 'reconsult': ['consulter', 'reconsult'], + 'recontend': ['contender', 'recontend'], + 'recontest': ['contester', 'recontest'], + 'recontinue': ['recontinue', 'unctioneer'], + 'recontract': ['contracter', 'correctant', 'recontract'], + 'reconvention': ['conventioner', 'reconvention'], + 'reconvert': ['converter', 'reconvert'], + 'reconvey': ['conveyer', 'reconvey'], + 'recook': ['cooker', 'recook'], + 'recool': ['cooler', 'recool'], + 'recopper': ['copperer', 'recopper'], + 'recopyright': ['copyrighter', 'recopyright'], + 'record': ['corder', 'record'], + 'recordation': ['corrodentia', 'recordation'], + 'recork': ['corker', 'recork', 'rocker'], + 'recorrection': ['correctioner', 'recorrection'], + 'recorrupt': ['corrupter', 'recorrupt'], + 'recounsel': ['enclosure', 'recounsel'], + 'recount': ['cornute', 'counter', 'recount', 'trounce'], + 'recountal': ['nucleator', 'recountal'], + 'recoup': ['couper', 'croupe', 'poucer', 'recoup'], + 'recourse': ['recourse', 'resource'], + 'recover': ['coverer', 'recover'], + 'recramp': ['cramper', 'recramp'], + 'recrank': ['cranker', 'recrank'], + 'recrate': ['caterer', 'recrate', 'retrace', 'terrace'], + 'recreant': ['canterer', 'recanter', 'recreant', 'terrance'], + 'recredit': ['cedriret', 'directer', 'recredit', 'redirect'], + 'recrew': ['crewer', 'recrew'], + 'recrimination': ['intermorainic', 'recrimination'], + 'recroon': ['coroner', 'crooner', 'recroon'], + 'recross': ['crosser', 'recross'], + 'recrowd': ['crowder', 'recrowd'], + 'recrown': ['crowner', 'recrown'], + 'recrudency': ['decurrency', 'recrudency'], + 'recruital': ['curtailer', 'recruital', 'reticular'], + 'recrush': ['crusher', 'recrush'], + 'recta': ['caret', + 'carte', + 'cater', + 'crate', + 'creat', + 'creta', + 'react', + 'recta', + 'trace'], + 'rectal': ['carlet', 'cartel', 'claret', 'rectal', 'talcer'], + 'rectalgia': ['cartilage', 'rectalgia'], + 'recti': ['citer', 'recti', 'ticer', 'trice'], + 'rectifiable': ['certifiable', 'rectifiable'], + 'rectification': ['certification', 'cretification', 'rectification'], + 'rectificative': ['certificative', 'rectificative'], + 'rectificator': ['certificator', 'rectificator'], + 'rectificatory': ['certificatory', 'rectificatory'], + 'rectified': ['certified', 'rectified'], + 'rectifier': ['certifier', 'rectifier'], + 'rectify': ['certify', 'cretify', 'rectify'], + 'rection': ['cerotin', 'cointer', 'cotrine', 'cretion', 'noticer', 'rection'], + 'rectitude': ['certitude', 'rectitude'], + 'rectoress': ['crosstree', 'rectoress'], + 'rectorship': ['cristopher', 'rectorship'], + 'rectotome': ['octometer', 'rectotome', 'tocometer'], + 'rectovesical': ['rectovesical', 'vesicorectal'], + 'recur': ['curer', 'recur'], + 'recurl': ['curler', 'recurl'], + 'recurse': ['recurse', 'rescuer', 'securer'], + 'recurtain': ['recurtain', 'unerratic'], + 'recurvation': ['countervair', 'overcurtain', 'recurvation'], + 'recurvous': ['recurvous', 'verrucous'], + 'recusance': ['recusance', 'securance'], + 'recusant': ['etruscan', 'recusant'], + 'recusation': ['nectarious', 'recusation'], + 'recusator': ['craterous', 'recusator'], + 'recuse': ['cereus', 'ceruse', 'recuse', 'rescue', 'secure'], + 'recut': ['cruet', 'eruct', 'recut', 'truce'], + 'red': ['erd', 'red'], + 'redact': ['cedrat', 'decart', 'redact'], + 'redaction': ['citronade', 'endaortic', 'redaction'], + 'redactional': ['declaration', 'redactional'], + 'redamage': ['dreamage', 'redamage'], + 'redan': ['andre', 'arend', 'daren', 'redan'], + 'redare': ['reader', 'redare', 'reread'], + 'redarken': ['darkener', 'redarken'], + 'redarn': ['darner', 'darren', 'errand', 'rander', 'redarn'], + 'redart': ['darter', + 'dartre', + 'redart', + 'retard', + 'retrad', + 'tarred', + 'trader'], + 'redate': ['derate', 'redate'], + 'redaub': ['dauber', 'redaub'], + 'redawn': ['andrew', 'redawn', 'wander', 'warden'], + 'redbait': ['redbait', 'tribade'], + 'redbud': ['budder', 'redbud'], + 'redcoat': ['cordate', 'decator', 'redcoat'], + 'redden': ['nedder', 'redden'], + 'reddingite': ['digredient', 'reddingite'], + 'rede': ['deer', 'dere', 'dree', 'rede', 'reed'], + 'redeal': ['dealer', 'leader', 'redeal', 'relade', 'relead'], + 'redeck': ['decker', 'redeck'], + 'redeed': ['redeed', 'reeded'], + 'redeem': ['deemer', 'meered', 'redeem', 'remede'], + 'redefault': ['defaulter', 'redefault'], + 'redefeat': ['defeater', 'federate', 'redefeat'], + 'redefine': ['needfire', 'redefine'], + 'redeflect': ['redeflect', 'reflected'], + 'redelay': ['delayer', 'layered', 'redelay'], + 'redeliver': ['deliverer', 'redeliver'], + 'redemand': ['demander', 'redemand'], + 'redemolish': ['demolisher', 'redemolish'], + 'redeny': ['redeny', 'yender'], + 'redepend': ['depender', 'redepend'], + 'redeprive': ['prederive', 'redeprive'], + 'rederivation': ['rederivation', 'veratroidine'], + 'redescend': ['descender', 'redescend'], + 'redescription': ['prediscretion', 'redescription'], + 'redesign': ['designer', 'redesign', 'resigned'], + 'redesman': ['redesman', 'seamrend'], + 'redetect': ['detecter', 'redetect'], + 'redevelop': ['developer', 'redevelop'], + 'redfin': ['finder', 'friend', 'redfin', 'refind'], + 'redhoop': ['redhoop', 'rhodope'], + 'redia': ['aider', 'deair', 'irade', 'redia'], + 'redient': ['nitered', 'redient', 'teinder'], + 'redig': ['dirge', 'gride', 'redig', 'ridge'], + 'redigest': ['digester', 'redigest'], + 'rediminish': ['diminisher', 'rediminish'], + 'redintegrator': ['redintegrator', 'retrogradient'], + 'redip': ['pride', 'pried', 'redip'], + 'redirect': ['cedriret', 'directer', 'recredit', 'redirect'], + 'redisable': ['desirable', 'redisable'], + 'redisappear': ['disappearer', 'redisappear'], + 'rediscount': ['discounter', 'rediscount'], + 'rediscover': ['discoverer', 'rediscover'], + 'rediscuss': ['discusser', 'rediscuss'], + 'redispatch': ['dispatcher', 'redispatch'], + 'redisplay': ['displayer', 'redisplay'], + 'redispute': ['disrepute', 'redispute'], + 'redistend': ['dendrites', 'distender', 'redistend'], + 'redistill': ['distiller', 'redistill'], + 'redistinguish': ['distinguisher', 'redistinguish'], + 'redistrain': ['distrainer', 'redistrain'], + 'redisturb': ['disturber', 'redisturb'], + 'redive': ['derive', 'redive'], + 'redivert': ['diverter', 'redivert', 'verditer'], + 'redleg': ['gelder', 'ledger', 'redleg'], + 'redlegs': ['redlegs', 'sledger'], + 'redo': ['doer', 'redo', 'rode', 'roed'], + 'redock': ['corked', 'docker', 'redock'], + 'redolent': ['redolent', 'rondelet'], + 'redoom': ['doomer', 'mooder', 'redoom', 'roomed'], + 'redoubling': ['bouldering', 'redoubling'], + 'redoubt': ['doubter', 'obtrude', 'outbred', 'redoubt'], + 'redound': ['redound', 'rounded', 'underdo'], + 'redowa': ['redowa', 'woader'], + 'redraft': ['drafter', 'redraft'], + 'redrag': ['darger', 'gerard', 'grader', 'redrag', 'regard'], + 'redraw': ['drawer', 'redraw', 'reward', 'warder'], + 'redrawer': ['redrawer', 'rewarder', 'warderer'], + 'redream': ['dreamer', 'redream'], + 'redress': ['dresser', 'redress'], + 'redrill': ['driller', 'redrill'], + 'redrive': ['deriver', 'redrive', 'rivered'], + 'redry': ['derry', 'redry', 'ryder'], + 'redtail': ['dilater', 'lardite', 'redtail'], + 'redtop': ['deport', 'ported', 'redtop'], + 'redub': ['bedur', 'rebud', 'redub'], + 'reductant': ['reductant', 'traducent', 'truncated'], + 'reduction': ['introduce', 'reduction'], + 'reductional': ['radiolucent', 'reductional'], + 'redue': ['redue', 'urdee'], + 'redunca': ['durance', 'redunca', 'unraced'], + 'redwithe': ['redwithe', 'withered'], + 'redye': ['redye', 'reedy'], + 'ree': ['eer', 'ere', 'ree'], + 'reechy': ['cheery', 'reechy'], + 'reed': ['deer', 'dere', 'dree', 'rede', 'reed'], + 'reeded': ['redeed', 'reeded'], + 'reeden': ['endere', 'needer', 'reeden'], + 'reedily': ['reedily', 'reyield', 'yielder'], + 'reeding': ['energid', 'reeding'], + 'reedling': ['engirdle', 'reedling'], + 'reedman': ['amender', 'meander', 'reamend', 'reedman'], + 'reedwork': ['reedwork', 'reworked'], + 'reedy': ['redye', 'reedy'], + 'reef': ['feer', 'free', 'reef'], + 'reefing': ['feering', 'feigner', 'freeing', 'reefing', 'refeign'], + 'reek': ['eker', 'reek'], + 'reel': ['leer', 'reel'], + 'reeler': ['reeler', 'rereel'], + 'reelingly': ['leeringly', 'reelingly'], + 'reem': ['mere', 'reem'], + 'reeming': ['reeming', 'regimen'], + 'reen': ['erne', 'neer', 'reen'], + 'reenge': ['neeger', 'reenge', 'renege'], + 'rees': ['erse', 'rees', 'seer', 'sere'], + 'reese': ['esere', 'reese', 'resee'], + 'reesk': ['esker', 'keres', 'reesk', 'seker', 'skeer', 'skere'], + 'reest': ['ester', + 'estre', + 'reest', + 'reset', + 'steer', + 'stere', + 'stree', + 'terse', + 'tsere'], + 'reester': ['reester', 'steerer'], + 'reestle': ['reestle', 'resteel', 'steeler'], + 'reesty': ['reesty', 'yester'], + 'reet': ['reet', 'teer', 'tree'], + 'reetam': ['reetam', 'retame', 'teamer'], + 'reeveland': ['landreeve', 'reeveland'], + 'refall': ['faller', 'refall'], + 'refashion': ['fashioner', 'refashion'], + 'refasten': ['fastener', 'fenestra', 'refasten'], + 'refavor': ['favorer', 'overfar', 'refavor'], + 'refeed': ['feeder', 'refeed'], + 'refeel': ['feeler', 'refeel', 'reflee'], + 'refeign': ['feering', 'feigner', 'freeing', 'reefing', 'refeign'], + 'refel': ['fleer', 'refel'], + 'refer': ['freer', 'refer'], + 'referment': ['fermenter', 'referment'], + 'refetch': ['fetcher', 'refetch'], + 'refight': ['fighter', 'freight', 'refight'], + 'refill': ['filler', 'refill'], + 'refilter': ['filterer', 'refilter'], + 'refinable': ['inferable', 'refinable'], + 'refind': ['finder', 'friend', 'redfin', 'refind'], + 'refine': ['ferine', 'refine'], + 'refined': ['definer', 'refined'], + 'refiner': ['refiner', 'reinfer'], + 'refinger': ['fingerer', 'refinger'], + 'refining': ['infringe', 'refining'], + 'refinish': ['finisher', 'refinish'], + 'refit': ['freit', 'refit'], + 'refix': ['fixer', 'refix'], + 'reflash': ['flasher', 'reflash'], + 'reflected': ['redeflect', 'reflected'], + 'reflee': ['feeler', 'refeel', 'reflee'], + 'refling': ['ferling', 'flinger', 'refling'], + 'refloat': ['floater', 'florate', 'refloat'], + 'reflog': ['golfer', 'reflog'], + 'reflood': ['flooder', 'reflood'], + 'refloor': ['floorer', 'refloor'], + 'reflourish': ['flourisher', 'reflourish'], + 'reflow': ['flower', 'fowler', 'reflow', 'wolfer'], + 'reflower': ['flowerer', 'reflower'], + 'reflush': ['flusher', 'reflush'], + 'reflux': ['fluxer', 'reflux'], + 'refluxed': ['flexured', 'refluxed'], + 'refly': ['ferly', 'flyer', 'refly'], + 'refocus': ['focuser', 'refocus'], + 'refold': ['folder', 'refold'], + 'refoment': ['fomenter', 'refoment'], + 'refoot': ['footer', 'refoot'], + 'reforecast': ['forecaster', 'reforecast'], + 'reforest': ['forester', 'fosterer', 'reforest'], + 'reforfeit': ['forfeiter', 'reforfeit'], + 'reform': ['former', 'reform'], + 'reformado': ['doorframe', 'reformado'], + 'reformed': ['deformer', 'reformed'], + 'reformism': ['misreform', 'reformism'], + 'reformist': ['reformist', 'restiform'], + 'reforward': ['forwarder', 'reforward'], + 'refound': ['founder', 'refound'], + 'refoundation': ['foundationer', 'refoundation'], + 'refreshen': ['freshener', 'refreshen'], + 'refrighten': ['frightener', 'refrighten'], + 'refront': ['fronter', 'refront'], + 'reft': ['fret', 'reft', 'tref'], + 'refuel': ['ferule', 'fueler', 'refuel'], + 'refund': ['funder', 'refund'], + 'refurbish': ['furbisher', 'refurbish'], + 'refurl': ['furler', 'refurl'], + 'refurnish': ['furnisher', 'refurnish'], + 'refusingly': ['refusingly', 'syringeful'], + 'refutal': ['faulter', 'refutal', 'tearful'], + 'refute': ['fuerte', 'refute'], + 'reg': ['erg', 'ger', 'reg'], + 'regain': ['arenig', 'earing', 'gainer', 'reagin', 'regain'], + 'regal': ['argel', 'ergal', 'garle', 'glare', 'lager', 'large', 'regal'], + 'regalia': ['lairage', 'railage', 'regalia'], + 'regalian': ['algerian', 'geranial', 'regalian'], + 'regalist': ['glaister', 'regalist'], + 'regallop': ['galloper', 'regallop'], + 'regally': ['allergy', 'gallery', 'largely', 'regally'], + 'regalness': ['largeness', 'rangeless', 'regalness'], + 'regard': ['darger', 'gerard', 'grader', 'redrag', 'regard'], + 'regarnish': ['garnisher', 'regarnish'], + 'regather': ['gatherer', 'regather'], + 'regatta': ['garetta', 'rattage', 'regatta'], + 'regelate': ['eglatere', 'regelate', 'relegate'], + 'regelation': ['regelation', 'relegation'], + 'regenesis': ['energesis', 'regenesis'], + 'regent': ['gerent', 'regent'], + 'reges': ['reges', 'serge'], + 'reget': ['egret', 'greet', 'reget'], + 'regga': ['agger', 'gager', 'regga'], + 'reggie': ['greige', 'reggie'], + 'regia': ['geira', 'regia'], + 'regild': ['gilder', 'girdle', 'glider', 'regild', 'ridgel'], + 'regill': ['giller', 'grille', 'regill'], + 'regimen': ['reeming', 'regimen'], + 'regimenal': ['margeline', 'regimenal'], + 'regin': ['grein', 'inger', 'nigre', 'regin', 'reign', 'ringe'], + 'reginal': ['aligner', 'engrail', 'realign', 'reginal'], + 'reginald': ['dragline', 'reginald', 'ringlead'], + 'region': ['ignore', 'region'], + 'regional': ['geraniol', 'regional'], + 'registered': ['deregister', 'registered'], + 'registerer': ['registerer', 'reregister'], + 'regive': ['grieve', 'regive'], + 'regladden': ['gladdener', 'glandered', 'regladden'], + 'reglair': ['grailer', 'reglair'], + 'regle': ['leger', 'regle'], + 'reglet': ['gretel', 'reglet'], + 'regloss': ['glosser', 'regloss'], + 'reglove': ['overleg', 'reglove'], + 'reglow': ['glower', 'reglow'], + 'regma': ['grame', 'marge', 'regma'], + 'regnal': ['angler', 'arleng', 'garnel', 'largen', 'rangle', 'regnal'], + 'regraft': ['grafter', 'regraft'], + 'regrant': ['granter', 'regrant'], + 'regrasp': ['grasper', 'regrasp', 'sparger'], + 'regrass': ['grasser', 'regrass'], + 'regrate': ['greater', 'regrate', 'terrage'], + 'regrating': ['gartering', 'regrating'], + 'regrator': ['garroter', 'regrator'], + 'regreen': ['greener', 'regreen', 'reneger'], + 'regreet': ['greeter', 'regreet'], + 'regrind': ['grinder', 'regrind'], + 'regrinder': ['derringer', 'regrinder'], + 'regrip': ['griper', 'regrip'], + 'regroup': ['grouper', 'regroup'], + 'regrow': ['grower', 'regrow'], + 'reguard': ['guarder', 'reguard'], + 'regula': ['ragule', 'regula'], + 'regulation': ['regulation', 'urogenital'], + 'reguli': ['ligure', 'reguli'], + 'regur': ['regur', 'urger'], + 'regush': ['gusher', 'regush'], + 'reh': ['her', 'reh', 'rhe'], + 'rehale': ['healer', 'rehale', 'reheal'], + 'rehallow': ['hallower', 'rehallow'], + 'rehammer': ['hammerer', 'rehammer'], + 'rehang': ['hanger', 'rehang'], + 'reharden': ['hardener', 'reharden'], + 'reharm': ['harmer', 'reharm'], + 'reharness': ['harnesser', 'reharness'], + 'reharrow': ['harrower', 'reharrow'], + 'reharvest': ['harvester', 'reharvest'], + 'rehash': ['hasher', 'rehash'], + 'rehaul': ['hauler', 'rehaul'], + 'rehazard': ['hazarder', 'rehazard'], + 'rehead': ['adhere', 'header', 'hedera', 'rehead'], + 'reheal': ['healer', 'rehale', 'reheal'], + 'reheap': ['heaper', 'reheap'], + 'rehear': ['hearer', 'rehear'], + 'rehearser': ['rehearser', 'reshearer'], + 'rehearten': ['heartener', 'rehearten'], + 'reheat': ['heater', 'hereat', 'reheat'], + 'reheel': ['heeler', 'reheel'], + 'reheighten': ['heightener', 'reheighten'], + 'rehoist': ['hoister', 'rehoist'], + 'rehollow': ['hollower', 'rehollow'], + 'rehonor': ['honorer', 'rehonor'], + 'rehook': ['hooker', 'rehook'], + 'rehoop': ['hooper', 'rehoop'], + 'rehung': ['hunger', 'rehung'], + 'reid': ['dier', 'dire', 'reid', 'ride'], + 'reif': ['fire', 'reif', 'rife'], + 'reify': ['fiery', 'reify'], + 'reign': ['grein', 'inger', 'nigre', 'regin', 'reign', 'ringe'], + 'reignore': ['erigeron', 'reignore'], + 'reillustrate': ['reillustrate', 'ultrasterile'], + 'reim': ['emir', 'imer', 'mire', 'reim', 'remi', 'riem', 'rime'], + 'reimbody': ['embryoid', 'reimbody'], + 'reimpart': ['imparter', 'reimpart'], + 'reimplant': ['implanter', 'reimplant'], + 'reimply': ['primely', 'reimply'], + 'reimport': ['importer', 'promerit', 'reimport'], + 'reimpose': ['perisome', 'promisee', 'reimpose'], + 'reimpress': ['impresser', 'reimpress'], + 'reimpression': ['reimpression', 'repermission'], + 'reimprint': ['imprinter', 'reimprint'], + 'reimprison': ['imprisoner', 'reimprison'], + 'rein': ['neri', 'rein', 'rine'], + 'reina': ['erian', 'irena', 'reina'], + 'reincentive': ['internecive', 'reincentive'], + 'reincite': ['icterine', 'reincite'], + 'reincrudate': ['antireducer', 'reincrudate', 'untraceried'], + 'reindeer': ['denierer', 'reindeer'], + 'reindict': ['indicter', 'indirect', 'reindict'], + 'reindue': ['reindue', 'uredine'], + 'reinfect': ['frenetic', 'infecter', 'reinfect'], + 'reinfer': ['refiner', 'reinfer'], + 'reinfest': ['infester', 'reinfest'], + 'reinflate': ['interleaf', 'reinflate'], + 'reinflict': ['inflicter', 'reinflict'], + 'reinform': ['informer', 'reinform', 'reniform'], + 'reinhabit': ['inhabiter', 'reinhabit'], + 'reins': ['reins', 'resin', 'rinse', 'risen', 'serin', 'siren'], + 'reinsane': ['anserine', 'reinsane'], + 'reinsert': ['inserter', 'reinsert'], + 'reinsist': ['insister', 'reinsist', 'sinister', 'sisterin'], + 'reinspect': ['prescient', 'reinspect'], + 'reinspector': ['prosecretin', 'reinspector'], + 'reinspirit': ['inspiriter', 'reinspirit'], + 'reinstall': ['installer', 'reinstall'], + 'reinstation': ['reinstation', 'santorinite'], + 'reinstill': ['instiller', 'reinstill'], + 'reinstruct': ['instructer', 'intercrust', 'reinstruct'], + 'reinsult': ['insulter', 'lustrine', 'reinsult'], + 'reintend': ['indenter', 'intender', 'reintend'], + 'reinter': ['reinter', 'terrine'], + 'reinterest': ['interester', 'reinterest'], + 'reinterpret': ['interpreter', 'reinterpret'], + 'reinterrupt': ['interrupter', 'reinterrupt'], + 'reinterview': ['interviewer', 'reinterview'], + 'reintrench': ['intrencher', 'reintrench'], + 'reintrude': ['reintrude', 'unretired'], + 'reinvent': ['inventer', 'reinvent', 'ventrine', 'vintener'], + 'reinvert': ['inverter', 'reinvert', 'trinerve'], + 'reinvest': ['reinvest', 'servient'], + 'reis': ['reis', 'rise', 'seri', 'sier', 'sire'], + 'reit': ['iter', 'reit', 'rite', 'teri', 'tier', 'tire'], + 'reiter': ['errite', 'reiter', 'retier', 'retire', 'tierer'], + 'reiterable': ['reiterable', 'reliberate'], + 'rejail': ['jailer', 'rejail'], + 'rejerk': ['jerker', 'rejerk'], + 'rejoin': ['joiner', 'rejoin'], + 'rejolt': ['jolter', 'rejolt'], + 'rejourney': ['journeyer', 'rejourney'], + 'reki': ['erik', 'kier', 'reki'], + 'rekick': ['kicker', 'rekick'], + 'rekill': ['killer', 'rekill'], + 'rekiss': ['kisser', 'rekiss'], + 'reknit': ['reknit', 'tinker'], + 'reknow': ['knower', 'reknow', 'wroken'], + 'rel': ['ler', 'rel'], + 'relabel': ['labeler', 'relabel'], + 'relace': ['alerce', 'cereal', 'relace'], + 'relacquer': ['lacquerer', 'relacquer'], + 'relade': ['dealer', 'leader', 'redeal', 'relade', 'relead'], + 'reladen': ['leander', 'learned', 'reladen'], + 'relais': ['israel', 'relais', 'resail', 'sailer', 'serail', 'serial'], + 'relament': ['lamenter', 'relament', 'remantle'], + 'relamp': ['lamper', 'palmer', 'relamp'], + 'reland': ['aldern', + 'darnel', + 'enlard', + 'lander', + 'lenard', + 'randle', + 'reland'], + 'relap': ['lepra', 'paler', 'parel', 'parle', 'pearl', 'perla', 'relap'], + 'relapse': ['pleaser', 'preseal', 'relapse'], + 'relapsing': ['espringal', 'presignal', 'relapsing'], + 'relast': ['laster', + 'lastre', + 'rastle', + 'relast', + 'resalt', + 'salter', + 'slater', + 'stelar'], + 'relata': ['latera', 'relata'], + 'relatability': ['alterability', 'bilaterality', 'relatability'], + 'relatable': ['alterable', 'relatable'], + 'relatch': ['clethra', 'latcher', 'ratchel', 'relatch', 'talcher', 'trachle'], + 'relate': ['earlet', 'elater', 'relate'], + 'related': ['delater', 'related', 'treadle'], + 'relater': ['alterer', 'realter', 'relater'], + 'relation': ['oriental', 'relation', 'tirolean'], + 'relationism': ['misrelation', 'orientalism', 'relationism'], + 'relationist': ['orientalist', 'relationist'], + 'relative': ['levirate', 'relative'], + 'relativization': ['relativization', 'revitalization'], + 'relativize': ['relativize', 'revitalize'], + 'relator': ['realtor', 'relator'], + 'relaunch': ['launcher', 'relaunch'], + 'relay': ['early', 'layer', 'relay'], + 'relead': ['dealer', 'leader', 'redeal', 'relade', 'relead'], + 'releap': ['leaper', 'releap', 'repale', 'repeal'], + 'relearn': ['learner', 'relearn'], + 'releather': ['leatherer', 'releather', 'tarheeler'], + 'relection': ['centriole', 'electrion', 'relection'], + 'relegate': ['eglatere', 'regelate', 'relegate'], + 'relegation': ['regelation', 'relegation'], + 'relend': ['lender', 'relend'], + 'reletter': ['letterer', 'reletter'], + 'relevant': ['levanter', 'relevant', 'revelant'], + 'relevation': ['relevation', 'revelation'], + 'relevator': ['relevator', 'revelator', 'veratrole'], + 'relevel': ['leveler', 'relevel'], + 'reliably': ['beryllia', 'reliably'], + 'reliance': ['cerealin', 'cinereal', 'reliance'], + 'reliant': ['entrail', + 'latiner', + 'latrine', + 'ratline', + 'reliant', + 'retinal', + 'trenail'], + 'reliantly': ['interally', 'reliantly'], + 'reliberate': ['reiterable', 'reliberate'], + 'relic': ['crile', 'elric', 'relic'], + 'relick': ['licker', 'relick', 'rickle'], + 'relicted': ['derelict', 'relicted'], + 'relier': ['lierre', 'relier'], + 'relieving': ['inveigler', 'relieving'], + 'relievo': ['overlie', 'relievo'], + 'relift': ['fertil', 'filter', 'lifter', 'relift', 'trifle'], + 'religation': ['genitorial', 'religation'], + 'relight': ['lighter', 'relight', 'rightle'], + 'relighten': ['lightener', 'relighten', 'threeling'], + 'religion': ['ligroine', 'religion'], + 'relimit': ['limiter', 'relimit'], + 'reline': ['lierne', 'reline'], + 'relink': ['linker', 'relink'], + 'relish': ['hirsel', 'hirsle', 'relish'], + 'relishy': ['relishy', 'shirley'], + 'relist': ['lister', 'relist'], + 'relisten': ['enlister', 'esterlin', 'listener', 'relisten'], + 'relive': ['levier', 'relive', 'reveil', 'revile', 'veiler'], + 'reload': ['loader', 'ordeal', 'reload'], + 'reloan': ['lenora', 'loaner', 'orlean', 'reloan'], + 'relocation': ['iconolater', 'relocation'], + 'relock': ['locker', 'relock'], + 'relook': ['looker', 'relook'], + 'relose': ['relose', 'resole'], + 'relost': ['relost', 'reslot', 'rostel', 'sterol', 'torsel'], + 'relot': ['lerot', 'orlet', 'relot'], + 'relower': ['lowerer', 'relower'], + 'reluct': ['cutler', 'reluct'], + 'reluctation': ['countertail', 'reluctation'], + 'relumine': ['lemurine', 'meruline', 'relumine'], + 'rely': ['lyre', 'rely'], + 'remade': ['meader', 'remade'], + 'remagnification': ['germanification', 'remagnification'], + 'remagnify': ['germanify', 'remagnify'], + 'remail': ['mailer', 'remail'], + 'remain': ['ermani', 'marine', 'remain'], + 'remains': ['remains', 'seminar'], + 'remaintain': ['antimerina', 'maintainer', 'remaintain'], + 'reman': ['enarm', 'namer', 'reman'], + 'remand': ['damner', 'manred', 'randem', 'remand'], + 'remanet': ['remanet', 'remeant', 'treeman'], + 'remantle': ['lamenter', 'relament', 'remantle'], + 'remap': ['amper', 'remap'], + 'remarch': ['charmer', 'marcher', 'remarch'], + 'remark': ['marker', 'remark'], + 'remarket': ['marketer', 'remarket'], + 'remarry': ['marryer', 'remarry'], + 'remarshal': ['marshaler', 'remarshal'], + 'remask': ['masker', 'remask'], + 'remass': ['masser', 'remass'], + 'remast': ['martes', 'master', 'remast', 'stream'], + 'remasticate': ['metrectasia', 'remasticate'], + 'rematch': ['matcher', 'rematch'], + 'remeant': ['remanet', 'remeant', 'treeman'], + 'remede': ['deemer', 'meered', 'redeem', 'remede'], + 'remeet': ['meeter', 'remeet', 'teemer'], + 'remelt': ['melter', 'remelt'], + 'remend': ['mender', 'remend'], + 'remetal': ['lameter', 'metaler', 'remetal'], + 'remi': ['emir', 'imer', 'mire', 'reim', 'remi', 'riem', 'rime'], + 'remication': ['marcionite', 'microtinae', 'remication'], + 'remigate': ['emigrate', 'remigate'], + 'remigation': ['emigration', 'remigation'], + 'remill': ['miller', 'remill'], + 'remind': ['minder', 'remind'], + 'remint': ['minter', 'remint', 'termin'], + 'remiped': ['demirep', 'epiderm', 'impeder', 'remiped'], + 'remisrepresent': ['misrepresenter', 'remisrepresent'], + 'remission': ['missioner', 'remission'], + 'remisunderstand': ['misunderstander', 'remisunderstand'], + 'remit': ['merit', 'miter', 'mitre', 'remit', 'timer'], + 'remittal': ['remittal', 'termital'], + 'remittance': ['carminette', 'remittance'], + 'remittence': ['centimeter', 'recitement', 'remittence'], + 'remitter': ['remitter', 'trimeter'], + 'remix': ['mixer', 'remix'], + 'remnant': ['manrent', 'remnant'], + 'remock': ['mocker', 'remock'], + 'remodel': ['demerol', 'modeler', 'remodel'], + 'remold': ['dermol', 'molder', 'remold'], + 'remontant': ['nonmatter', 'remontant'], + 'remontoir': ['interroom', 'remontoir'], + 'remop': ['merop', 'moper', 'proem', 'remop'], + 'remora': ['remora', 'roamer'], + 'remord': ['dormer', 'remord'], + 'remote': ['meteor', 'remote'], + 'remotive': ['overtime', 'remotive'], + 'remould': ['remould', 'ruledom'], + 'remount': ['monture', 'mounter', 'remount'], + 'removable': ['overblame', 'removable'], + 'remunerate': ['remunerate', 'renumerate'], + 'remuneration': ['remuneration', 'renumeration'], + 'remurmur': ['murmurer', 'remurmur'], + 'remus': ['muser', 'remus', 'serum'], + 'remuster': ['musterer', 'remuster'], + 'renable': ['enabler', 'renable'], + 'renably': ['blarney', 'renably'], + 'renail': ['arline', 'larine', 'linear', 'nailer', 'renail'], + 'renaissance': ['necessarian', 'renaissance'], + 'renal': ['learn', 'renal'], + 'rename': ['enarme', 'meaner', 'rename'], + 'renavigate': ['renavigate', 'vegetarian'], + 'rend': ['dern', 'rend'], + 'rendition': ['rendition', 'trinodine'], + 'reneg': ['genre', 'green', 'neger', 'reneg'], + 'renegadism': ['grandeeism', 'renegadism'], + 'renegation': ['generation', 'renegation'], + 'renege': ['neeger', 'reenge', 'renege'], + 'reneger': ['greener', 'regreen', 'reneger'], + 'reneglect': ['neglecter', 'reneglect'], + 'renerve': ['renerve', 'venerer'], + 'renes': ['renes', 'sneer'], + 'renet': ['enter', 'neter', 'renet', 'terne', 'treen'], + 'reniform': ['informer', 'reinform', 'reniform'], + 'renilla': ['ralline', 'renilla'], + 'renin': ['inner', 'renin'], + 'reniportal': ['interpolar', 'reniportal'], + 'renish': ['renish', 'shiner', 'shrine'], + 'renitence': ['centenier', 'renitence'], + 'renitency': ['nycterine', 'renitency'], + 'renitent': ['renitent', 'trentine'], + 'renk': ['kern', 'renk'], + 'rennet': ['rennet', 'tenner'], + 'renography': ['granophyre', 'renography'], + 'renominate': ['enantiomer', 'renominate'], + 'renotation': ['renotation', 'retonation'], + 'renotice': ['erection', 'neoteric', 'nocerite', 'renotice'], + 'renourish': ['nourisher', 'renourish'], + 'renovate': ['overneat', 'renovate'], + 'renovater': ['enervator', 'renovater', 'venerator'], + 'renown': ['renown', 'wonner'], + 'rent': ['rent', 'tern'], + 'rentage': ['grantee', 'greaten', 'reagent', 'rentage'], + 'rental': ['altern', 'antler', 'learnt', 'rental', 'ternal'], + 'rentaler': ['rentaler', 'rerental'], + 'rented': ['denter', 'rented', 'tender'], + 'rentee': ['entree', 'rentee', 'retene'], + 'renter': ['renter', 'rerent'], + 'renu': ['renu', 'ruen', 'rune'], + 'renumber': ['numberer', 'renumber'], + 'renumerate': ['remunerate', 'renumerate'], + 'renumeration': ['remuneration', 'renumeration'], + 'reobtain': ['abrotine', 'baritone', 'obtainer', 'reobtain'], + 'reoccasion': ['occasioner', 'reoccasion'], + 'reoccupation': ['cornucopiate', 'reoccupation'], + 'reoffend': ['offender', 'reoffend'], + 'reoffer': ['offerer', 'reoffer'], + 'reoil': ['oiler', 'oriel', 'reoil'], + 'reopen': ['opener', 'reopen', 'repone'], + 'reordain': ['inroader', 'ordainer', 'reordain'], + 'reorder': ['orderer', 'reorder'], + 'reordinate': ['reordinate', 'treronidae'], + 'reornament': ['ornamenter', 'reornament'], + 'reoverflow': ['overflower', 'reoverflow'], + 'reown': ['owner', 'reown', 'rowen'], + 'rep': ['per', 'rep'], + 'repack': ['packer', 'repack'], + 'repaint': ['painter', 'pertain', 'pterian', 'repaint'], + 'repair': ['pairer', 'rapier', 'repair'], + 'repairer': ['rareripe', 'repairer'], + 'repale': ['leaper', 'releap', 'repale', 'repeal'], + 'repand': ['pander', 'repand'], + 'repandly': ['panderly', 'repandly'], + 'repandous': ['panderous', 'repandous'], + 'repanel': ['paneler', 'repanel', 'replane'], + 'repaper': ['paperer', 'perpera', 'prepare', 'repaper'], + 'reparagraph': ['paragrapher', 'reparagraph'], + 'reparation': ['praetorian', 'reparation'], + 'repark': ['parker', 'repark'], + 'repartee': ['repartee', 'repeater'], + 'repartition': ['partitioner', 'repartition'], + 'repass': ['passer', 'repass', 'sparse'], + 'repasser': ['asperser', 'repasser'], + 'repast': ['paster', 'repast', 'trapes'], + 'repaste': ['perates', 'repaste', 'sperate'], + 'repasture': ['repasture', 'supertare'], + 'repatch': ['chapter', 'patcher', 'repatch'], + 'repatent': ['pattener', 'repatent'], + 'repattern': ['patterner', 'repattern'], + 'repawn': ['enwrap', 'pawner', 'repawn'], + 'repay': ['apery', 'payer', 'repay'], + 'repeal': ['leaper', 'releap', 'repale', 'repeal'], + 'repeat': ['petrea', 'repeat', 'retape'], + 'repeater': ['repartee', 'repeater'], + 'repel': ['leper', 'perle', 'repel'], + 'repen': ['neper', 'preen', 'repen'], + 'repension': ['pensioner', 'repension'], + 'repent': ['perten', 'repent'], + 'repentable': ['penetrable', 'repentable'], + 'repentance': ['penetrance', 'repentance'], + 'repentant': ['penetrant', 'repentant'], + 'reperceive': ['prereceive', 'reperceive'], + 'repercussion': ['percussioner', 'repercussion'], + 'repercussive': ['repercussive', 'superservice'], + 'reperform': ['performer', 'prereform', 'reperform'], + 'repermission': ['reimpression', 'repermission'], + 'repermit': ['premerit', 'preremit', 'repermit'], + 'reperplex': ['perplexer', 'reperplex'], + 'reperusal': ['pleasurer', 'reperusal'], + 'repetition': ['petitioner', 'repetition'], + 'rephael': ['preheal', 'rephael'], + 'rephase': ['hespera', 'rephase', 'reshape'], + 'rephotograph': ['photographer', 'rephotograph'], + 'rephrase': ['preshare', 'rephrase'], + 'repic': ['price', 'repic'], + 'repick': ['picker', 'repick'], + 'repiece': ['creepie', 'repiece'], + 'repin': ['piner', 'prine', 'repin', 'ripen'], + 'repine': ['neiper', 'perine', 'pirene', 'repine'], + 'repiner': ['repiner', 'ripener'], + 'repiningly': ['repiningly', 'ripeningly'], + 'repique': ['perique', 'repique'], + 'repitch': ['pitcher', 'repitch'], + 'replace': ['percale', 'replace'], + 'replait': ['partile', 'plaiter', 'replait'], + 'replan': ['parnel', 'planer', 'replan'], + 'replane': ['paneler', 'repanel', 'replane'], + 'replant': ['pantler', 'planter', 'replant'], + 'replantable': ['planetabler', 'replantable'], + 'replanter': ['prerental', 'replanter'], + 'replaster': ['plasterer', 'replaster'], + 'replate': ['pearlet', 'pleater', 'prelate', 'ptereal', 'replate', 'repleat'], + 'replay': ['parley', 'pearly', 'player', 'replay'], + 'replead': ['pearled', 'pedaler', 'pleader', 'replead'], + 'repleader': ['predealer', 'repleader'], + 'repleat': ['pearlet', 'pleater', 'prelate', 'ptereal', 'replate', 'repleat'], + 'repleteness': ['repleteness', 'terpeneless'], + 'repletion': ['interlope', 'interpole', 'repletion', 'terpineol'], + 'repliant': ['interlap', 'repliant', 'triplane'], + 'replica': ['caliper', 'picarel', 'replica'], + 'replight': ['plighter', 'replight'], + 'replod': ['podler', 'polder', 'replod'], + 'replot': ['petrol', 'replot'], + 'replow': ['plower', 'replow'], + 'replum': ['lumper', 'plumer', 'replum', 'rumple'], + 'replunder': ['plunderer', 'replunder'], + 'reply': ['plyer', 'reply'], + 'repocket': ['pocketer', 'repocket'], + 'repoint': ['pointer', 'protein', 'pterion', 'repoint', 'tropine'], + 'repolish': ['polisher', 'repolish'], + 'repoll': ['poller', 'repoll'], + 'reponder': ['ponderer', 'reponder'], + 'repone': ['opener', 'reopen', 'repone'], + 'report': ['porret', 'porter', 'report', 'troper'], + 'reportage': ['porterage', 'reportage'], + 'reporterism': ['misreporter', 'reporterism'], + 'reportion': ['portioner', 'reportion'], + 'reposed': ['deposer', 'reposed'], + 'reposit': ['periost', 'porites', 'reposit', 'riposte'], + 'reposition': ['positioner', 'reposition'], + 'repositor': ['posterior', 'repositor'], + 'repossession': ['possessioner', 'repossession'], + 'repost': ['poster', 'presto', 'repost', 'respot', 'stoper'], + 'repot': ['poter', 'prote', 'repot', 'tepor', 'toper', 'trope'], + 'repound': ['pounder', 'repound', 'unroped'], + 'repour': ['pourer', 'repour', 'rouper'], + 'repowder': ['powderer', 'repowder'], + 'repp': ['prep', 'repp'], + 'repray': ['prayer', 'repray'], + 'repreach': ['preacher', 'repreach'], + 'repredict': ['precredit', 'predirect', 'repredict'], + 'reprefer': ['prerefer', 'reprefer'], + 'represent': ['presenter', 'represent'], + 'representationism': ['misrepresentation', 'representationism'], + 'repress': ['presser', 'repress'], + 'repressive': ['repressive', 'respersive'], + 'reprice': ['piercer', 'reprice'], + 'reprieval': ['prevailer', 'reprieval'], + 'reprime': ['premier', 'reprime'], + 'reprint': ['printer', 'reprint'], + 'reprise': ['reprise', 'respire'], + 'repristination': ['interspiration', 'repristination'], + 'reproachable': ['blepharocera', 'reproachable'], + 'reprobate': ['perborate', 'prorebate', 'reprobate'], + 'reprobation': ['probationer', 'reprobation'], + 'reproceed': ['proceeder', 'reproceed'], + 'reproclaim': ['proclaimer', 'reproclaim'], + 'reproduce': ['procedure', 'reproduce'], + 'reproduction': ['proreduction', 'reproduction'], + 'reprohibit': ['prohibiter', 'reprohibit'], + 'reproof': ['proofer', 'reproof'], + 'reproportion': ['proportioner', 'reproportion'], + 'reprotection': ['interoceptor', 'reprotection'], + 'reprotest': ['protester', 'reprotest'], + 'reprovision': ['prorevision', 'provisioner', 'reprovision'], + 'reps': ['reps', 'resp'], + 'reptant': ['pattern', 'reptant'], + 'reptatorial': ['proletariat', 'reptatorial'], + 'reptatory': ['protreaty', 'reptatory'], + 'reptile': ['perlite', 'reptile'], + 'reptilia': ['liparite', 'reptilia'], + 'republish': ['publisher', 'republish'], + 'repudiatory': ['preauditory', 'repudiatory'], + 'repuff': ['puffer', 'repuff'], + 'repugn': ['punger', 'repugn'], + 'repulpit': ['pulpiter', 'repulpit'], + 'repulsion': ['prelusion', 'repulsion'], + 'repulsive': ['prelusive', 'repulsive'], + 'repulsively': ['prelusively', 'repulsively'], + 'repulsory': ['prelusory', 'repulsory'], + 'repump': ['pumper', 'repump'], + 'repunish': ['punisher', 'repunish'], + 'reputative': ['reputative', 'vituperate'], + 'repute': ['repute', 'uptree'], + 'requench': ['quencher', 'requench'], + 'request': ['quester', 'request'], + 'requestion': ['questioner', 'requestion'], + 'require': ['querier', 'require'], + 'requital': ['quartile', 'requital', 'triequal'], + 'requite': ['quieter', 'requite'], + 'rerack': ['racker', 'rerack'], + 'rerail': ['railer', 'rerail'], + 'reraise': ['rearise', 'reraise'], + 'rerake': ['karree', 'rerake'], + 'rerank': ['ranker', 'rerank'], + 'rerate': ['rerate', 'retare', 'tearer'], + 'reread': ['reader', 'redare', 'reread'], + 'rereel': ['reeler', 'rereel'], + 'reregister': ['registerer', 'reregister'], + 'rerent': ['renter', 'rerent'], + 'rerental': ['rentaler', 'rerental'], + 'rering': ['erring', 'rering', 'ringer'], + 'rerise': ['rerise', 'sirree'], + 'rerivet': ['rerivet', 'riveter'], + 'rerob': ['borer', 'rerob', 'rober'], + 'rerobe': ['rebore', 'rerobe'], + 'reroll': ['reroll', 'roller'], + 'reroof': ['reroof', 'roofer'], + 'reroot': ['reroot', 'rooter', 'torero'], + 'rerow': ['rerow', 'rower'], + 'rerun': ['rerun', 'runer'], + 'resaca': ['ascare', 'caesar', 'resaca'], + 'resack': ['resack', 'sacker', 'screak'], + 'resail': ['israel', 'relais', 'resail', 'sailer', 'serail', 'serial'], + 'resale': ['alerse', 'leaser', 'reales', 'resale', 'reseal', 'sealer'], + 'resalt': ['laster', + 'lastre', + 'rastle', + 'relast', + 'resalt', + 'salter', + 'slater', + 'stelar'], + 'resanction': ['resanction', 'sanctioner'], + 'resaw': ['resaw', 'sawer', 'seraw', 'sware', 'swear', 'warse'], + 'resawer': ['resawer', 'reswear', 'swearer'], + 'resay': ['reasy', 'resay', 'sayer', 'seary'], + 'rescan': ['casern', 'rescan'], + 'rescind': ['discern', 'rescind'], + 'rescinder': ['discerner', 'rescinder'], + 'rescindment': ['discernment', 'rescindment'], + 'rescratch': ['rescratch', 'scratcher'], + 'rescuable': ['rescuable', 'securable'], + 'rescue': ['cereus', 'ceruse', 'recuse', 'rescue', 'secure'], + 'rescuer': ['recurse', 'rescuer', 'securer'], + 'reseal': ['alerse', 'leaser', 'reales', 'resale', 'reseal', 'sealer'], + 'reseam': ['reseam', 'seamer'], + 'research': ['rechaser', 'research', 'searcher'], + 'reseat': ['asteer', + 'easter', + 'eastre', + 'reseat', + 'saeter', + 'seater', + 'staree', + 'teaser', + 'teresa'], + 'resect': ['resect', 'screet', 'secret'], + 'resection': ['resection', 'secretion'], + 'resectional': ['resectional', 'secretional'], + 'reseda': ['erased', 'reseda', 'seared'], + 'resee': ['esere', 'reese', 'resee'], + 'reseed': ['reseed', 'seeder'], + 'reseek': ['reseek', 'seeker'], + 'resell': ['resell', 'seller'], + 'resend': ['resend', 'sender'], + 'resene': ['resene', 'serene'], + 'resent': ['ernest', 'nester', 'resent', 'streen'], + 'reservable': ['reservable', 'reversable'], + 'reserval': ['reserval', 'reversal', 'slaverer'], + 'reserve': ['reserve', 'resever', 'reverse', 'severer'], + 'reserved': ['deserver', 'reserved', 'reversed'], + 'reservedly': ['reservedly', 'reversedly'], + 'reserveful': ['reserveful', 'reverseful'], + 'reserveless': ['reserveless', 'reverseless'], + 'reserver': ['reserver', 'reverser'], + 'reservist': ['reservist', 'reversist'], + 'reset': ['ester', + 'estre', + 'reest', + 'reset', + 'steer', + 'stere', + 'stree', + 'terse', + 'tsere'], + 'resever': ['reserve', 'resever', 'reverse', 'severer'], + 'resew': ['resew', 'sewer', 'sweer'], + 'resex': ['resex', 'xeres'], + 'resh': ['hers', 'resh', 'sher'], + 'reshape': ['hespera', 'rephase', 'reshape'], + 'reshare': ['reshare', 'reshear', 'shearer'], + 'resharpen': ['resharpen', 'sharpener'], + 'reshear': ['reshare', 'reshear', 'shearer'], + 'reshearer': ['rehearser', 'reshearer'], + 'reshift': ['reshift', 'shifter'], + 'reshingle': ['englisher', 'reshingle'], + 'reship': ['perish', 'reship'], + 'reshipment': ['perishment', 'reshipment'], + 'reshoot': ['orthose', 'reshoot', 'shooter', 'soother'], + 'reshoulder': ['reshoulder', 'shoulderer'], + 'reshower': ['reshower', 'showerer'], + 'reshun': ['reshun', 'rushen'], + 'reshunt': ['reshunt', 'shunter'], + 'reshut': ['reshut', 'suther', 'thurse', 'tusher'], + 'reside': ['desire', 'reside'], + 'resident': ['indesert', 'inserted', 'resident'], + 'resider': ['derries', 'desirer', 'resider', 'serried'], + 'residua': ['residua', 'ursidae'], + 'resift': ['fister', 'resift', 'sifter', 'strife'], + 'resigh': ['resigh', 'sigher'], + 'resign': ['resign', 'resing', 'signer', 'singer'], + 'resignal': ['resignal', 'seringal', 'signaler'], + 'resigned': ['designer', 'redesign', 'resigned'], + 'resile': ['lisere', 'resile'], + 'resiliate': ['israelite', 'resiliate'], + 'resilient': ['listerine', 'resilient'], + 'resilition': ['isonitrile', 'resilition'], + 'resilver': ['resilver', 'silverer', 'sliverer'], + 'resin': ['reins', 'resin', 'rinse', 'risen', 'serin', 'siren'], + 'resina': ['arisen', 'arsine', 'resina', 'serian'], + 'resinate': ['arsenite', 'resinate', 'teresian', 'teresina'], + 'resing': ['resign', 'resing', 'signer', 'singer'], + 'resinic': ['irenics', 'resinic', 'sericin', 'sirenic'], + 'resinize': ['resinize', 'sirenize'], + 'resink': ['resink', 'reskin', 'sinker'], + 'resinlike': ['resinlike', 'sirenlike'], + 'resinoid': ['derision', 'ironside', 'resinoid', 'sirenoid'], + 'resinol': ['resinol', 'serolin'], + 'resinous': ['neurosis', 'resinous'], + 'resinously': ['neurolysis', 'resinously'], + 'resiny': ['resiny', 'sireny'], + 'resist': ['resist', 'restis', 'sister'], + 'resistable': ['assertible', 'resistable'], + 'resistance': ['resistance', 'senatrices'], + 'resistful': ['fruitless', 'resistful'], + 'resisting': ['resisting', 'sistering'], + 'resistless': ['resistless', 'sisterless'], + 'resize': ['resize', 'seizer'], + 'resketch': ['resketch', 'sketcher'], + 'reskin': ['resink', 'reskin', 'sinker'], + 'reslash': ['reslash', 'slasher'], + 'reslate': ['realest', 'reslate', 'resteal', 'stealer', 'teasler'], + 'reslay': ['reslay', 'slayer'], + 'reslot': ['relost', 'reslot', 'rostel', 'sterol', 'torsel'], + 'resmell': ['resmell', 'smeller'], + 'resmelt': ['melters', 'resmelt', 'smelter'], + 'resmooth': ['resmooth', 'romeshot', 'smoother'], + 'resnap': ['resnap', 'respan', 'snaper'], + 'resnatch': ['resnatch', 'snatcher', 'stancher'], + 'resoak': ['arkose', 'resoak', 'soaker'], + 'resoap': ['resoap', 'soaper'], + 'resoften': ['resoften', 'softener'], + 'resoil': ['elisor', 'resoil'], + 'resojourn': ['resojourn', 'sojourner'], + 'resolder': ['resolder', 'solderer'], + 'resole': ['relose', 'resole'], + 'resolicit': ['resolicit', 'soliciter'], + 'resolution': ['resolution', 'solutioner'], + 'resonate': ['orestean', 'resonate', 'stearone'], + 'resort': ['resort', 'roster', 'sorter', 'storer'], + 'resorter': ['resorter', 'restorer', 'retrorse'], + 'resound': ['resound', 'sounder', 'unrosed'], + 'resource': ['recourse', 'resource'], + 'resow': ['owser', 'resow', 'serow', 'sower', 'swore', 'worse'], + 'resp': ['reps', 'resp'], + 'respace': ['escaper', 'respace'], + 'respade': ['psedera', 'respade'], + 'respan': ['resnap', 'respan', 'snaper'], + 'respeak': ['respeak', 'speaker'], + 'respect': ['respect', 'scepter', 'specter'], + 'respectless': ['respectless', 'scepterless'], + 'respell': ['presell', 'respell', 'speller'], + 'respersive': ['repressive', 'respersive'], + 'respin': ['pernis', 'respin', 'sniper'], + 'respiration': ['respiration', 'retinispora'], + 'respire': ['reprise', 'respire'], + 'respirit': ['respirit', 'spiriter'], + 'respite': ['respite', 'septier'], + 'resplend': ['resplend', 'splender'], + 'resplice': ['eclipser', 'pericles', 'resplice'], + 'responde': ['personed', 'responde'], + 'respondence': ['precondense', 'respondence'], + 'responsal': ['apronless', 'responsal'], + 'response': ['pessoner', 'response'], + 'respot': ['poster', 'presto', 'repost', 'respot', 'stoper'], + 'respray': ['respray', 'sprayer'], + 'respread': ['respread', 'spreader'], + 'respring': ['respring', 'springer'], + 'resprout': ['posturer', 'resprout', 'sprouter'], + 'respue': ['peruse', 'respue'], + 'resqueak': ['resqueak', 'squeaker'], + 'ressaut': ['erastus', 'ressaut'], + 'rest': ['rest', 'sert', 'stre'], + 'restack': ['restack', 'stacker'], + 'restaff': ['restaff', 'staffer'], + 'restain': ['asterin', 'eranist', 'restain', 'stainer', 'starnie', 'stearin'], + 'restake': ['restake', 'sakeret'], + 'restamp': ['restamp', 'stamper'], + 'restart': ['restart', 'starter'], + 'restate': ['estreat', 'restate', 'retaste'], + 'resteal': ['realest', 'reslate', 'resteal', 'stealer', 'teasler'], + 'resteel': ['reestle', 'resteel', 'steeler'], + 'resteep': ['estrepe', 'resteep', 'steeper'], + 'restem': ['mester', 'restem', 'temser', 'termes'], + 'restep': ['pester', 'preset', 'restep', 'streep'], + 'restful': ['fluster', 'restful'], + 'restiad': ['astride', 'diaster', 'disrate', 'restiad', 'staired'], + 'restiffen': ['restiffen', 'stiffener'], + 'restiform': ['reformist', 'restiform'], + 'resting': ['resting', 'stinger'], + 'restio': ['restio', 'sorite', 'sortie', 'triose'], + 'restis': ['resist', 'restis', 'sister'], + 'restitch': ['restitch', 'stitcher'], + 'restive': ['restive', 'servite'], + 'restock': ['restock', 'stocker'], + 'restorer': ['resorter', 'restorer', 'retrorse'], + 'restow': ['restow', 'stower', 'towser', 'worset'], + 'restowal': ['restowal', 'sealwort'], + 'restraighten': ['restraighten', 'straightener'], + 'restrain': ['restrain', 'strainer', 'transire'], + 'restraint': ['restraint', 'retransit', 'trainster', 'transiter'], + 'restream': ['masterer', 'restream', 'streamer'], + 'restrengthen': ['restrengthen', 'strengthener'], + 'restress': ['restress', 'stresser'], + 'restretch': ['restretch', 'stretcher'], + 'restring': ['restring', 'ringster', 'stringer'], + 'restrip': ['restrip', 'striper'], + 'restrive': ['restrive', 'reverist'], + 'restuff': ['restuff', 'stuffer'], + 'resty': ['resty', 'strey'], + 'restyle': ['restyle', 'tersely'], + 'resucceed': ['resucceed', 'succeeder'], + 'resuck': ['resuck', 'sucker'], + 'resue': ['resue', 'reuse'], + 'resuffer': ['resuffer', 'sufferer'], + 'resuggest': ['resuggest', 'suggester'], + 'resuing': ['insurge', 'resuing'], + 'resuit': ['isuret', 'resuit'], + 'result': ['luster', 'result', 'rustle', 'sutler', 'ulster'], + 'resulting': ['resulting', 'ulstering'], + 'resultless': ['lusterless', 'lustreless', 'resultless'], + 'resummon': ['resummon', 'summoner'], + 'resun': ['nurse', 'resun'], + 'resup': ['purse', 'resup', 'sprue', 'super'], + 'resuperheat': ['resuperheat', 'superheater'], + 'resupinate': ['interpause', 'resupinate'], + 'resupination': ['resupination', 'uranospinite'], + 'resupport': ['resupport', 'supporter'], + 'resuppose': ['resuppose', 'superpose'], + 'resupposition': ['resupposition', 'superposition'], + 'resuppress': ['resuppress', 'suppresser'], + 'resurrender': ['resurrender', 'surrenderer'], + 'resurround': ['resurround', 'surrounder'], + 'resuspect': ['resuspect', 'suspecter'], + 'resuspend': ['resuspend', 'suspender', 'unpressed'], + 'reswallow': ['reswallow', 'swallower'], + 'resward': ['drawers', 'resward'], + 'reswarm': ['reswarm', 'swarmer'], + 'reswear': ['resawer', 'reswear', 'swearer'], + 'resweat': ['resweat', 'sweater'], + 'resweep': ['resweep', 'sweeper'], + 'reswell': ['reswell', 'sweller'], + 'reswill': ['reswill', 'swiller'], + 'retable': ['bearlet', 'bleater', 'elberta', 'retable'], + 'retack': ['racket', 'retack', 'tacker'], + 'retag': ['gater', 'grate', 'great', 'greta', 'retag', 'targe'], + 'retail': ['lirate', 'retail', 'retial', 'tailer'], + 'retailer': ['irrelate', 'retailer'], + 'retain': ['nerita', 'ratine', 'retain', 'retina', 'tanier'], + 'retainal': ['retainal', 'telarian'], + 'retainder': ['irredenta', 'retainder'], + 'retainer': ['arretine', 'eretrian', 'eritrean', 'retainer'], + 'retaining': ['negritian', 'retaining'], + 'retaliate': ['elettaria', 'retaliate'], + 'retalk': ['kartel', 'retalk', 'talker'], + 'retama': ['ramate', 'retama'], + 'retame': ['reetam', 'retame', 'teamer'], + 'retan': ['antre', 'arent', 'retan', 'terna'], + 'retape': ['petrea', 'repeat', 'retape'], + 'retard': ['darter', + 'dartre', + 'redart', + 'retard', + 'retrad', + 'tarred', + 'trader'], + 'retardent': ['retardent', 'tetrander'], + 'retare': ['rerate', 'retare', 'tearer'], + 'retaste': ['estreat', 'restate', 'retaste'], + 'retax': ['extra', 'retax', 'taxer'], + 'retaxation': ['retaxation', 'tetraxonia'], + 'retch': ['chert', 'retch'], + 'reteach': ['cheater', 'hectare', 'recheat', 'reteach', 'teacher'], + 'retelegraph': ['retelegraph', 'telegrapher'], + 'retell': ['retell', 'teller'], + 'retem': ['meter', 'retem'], + 'retemper': ['retemper', 'temperer'], + 'retempt': ['retempt', 'tempter'], + 'retenant': ['retenant', 'tenanter'], + 'retender': ['retender', 'tenderer'], + 'retene': ['entree', 'rentee', 'retene'], + 'retent': ['netter', 'retent', 'tenter'], + 'retention': ['intertone', 'retention'], + 'retepora': ['perorate', 'retepora'], + 'retest': ['retest', 'setter', 'street', 'tester'], + 'rethank': ['rethank', 'thanker'], + 'rethatch': ['rethatch', 'thatcher'], + 'rethaw': ['rethaw', 'thawer', 'wreath'], + 'rethe': ['ether', 'rethe', 'theer', 'there', 'three'], + 'retheness': ['retheness', 'thereness', 'threeness'], + 'rethicken': ['kitchener', 'rethicken', 'thickener'], + 'rethink': ['rethink', 'thinker'], + 'rethrash': ['rethrash', 'thrasher'], + 'rethread': ['rethread', 'threader'], + 'rethreaten': ['rethreaten', 'threatener'], + 'rethresh': ['rethresh', 'thresher'], + 'rethrill': ['rethrill', 'thriller'], + 'rethrow': ['rethrow', 'thrower'], + 'rethrust': ['rethrust', 'thruster'], + 'rethunder': ['rethunder', 'thunderer'], + 'retia': ['arite', 'artie', 'irate', 'retia', 'tarie'], + 'retial': ['lirate', 'retail', 'retial', 'tailer'], + 'reticent': ['reticent', 'tencteri'], + 'reticket': ['reticket', 'ticketer'], + 'reticula': ['arculite', 'cutleria', 'lucretia', 'reticula', 'treculia'], + 'reticular': ['curtailer', 'recruital', 'reticular'], + 'retier': ['errite', 'reiter', 'retier', 'retire', 'tierer'], + 'retighten': ['retighten', 'tightener'], + 'retill': ['retill', 'rillet', 'tiller'], + 'retimber': ['retimber', 'timberer'], + 'retime': ['metier', 'retime', 'tremie'], + 'retin': ['inert', 'inter', 'niter', 'retin', 'trine'], + 'retina': ['nerita', 'ratine', 'retain', 'retina', 'tanier'], + 'retinal': ['entrail', + 'latiner', + 'latrine', + 'ratline', + 'reliant', + 'retinal', + 'trenail'], + 'retinalite': ['retinalite', 'trilineate'], + 'retinene': ['internee', 'retinene'], + 'retinian': ['neritina', 'retinian'], + 'retinispora': ['respiration', 'retinispora'], + 'retinite': ['intertie', 'retinite'], + 'retinker': ['retinker', 'tinkerer'], + 'retinochorioiditis': ['chorioidoretinitis', 'retinochorioiditis'], + 'retinoid': ['neritoid', 'retinoid'], + 'retinue': ['neurite', 'retinue', 'reunite', 'uterine'], + 'retinula': ['lutrinae', 'retinula', 'rutelian', 'tenurial'], + 'retinular': ['retinular', 'trineural'], + 'retip': ['perit', 'retip', 'tripe'], + 'retiral': ['retiral', 'retrial', 'trailer'], + 'retire': ['errite', 'reiter', 'retier', 'retire', 'tierer'], + 'retirer': ['retirer', 'terrier'], + 'retistene': ['retistene', 'serinette'], + 'retoast': ['retoast', 'rosetta', 'stoater', 'toaster'], + 'retold': ['retold', 'rodlet'], + 'retomb': ['retomb', 'trombe'], + 'retonation': ['renotation', 'retonation'], + 'retool': ['looter', 'retool', 'rootle', 'tooler'], + 'retooth': ['retooth', 'toother'], + 'retort': ['retort', 'retrot', 'rotter'], + 'retoss': ['retoss', 'tosser'], + 'retouch': ['retouch', 'toucher'], + 'retour': ['retour', 'router', 'tourer'], + 'retrace': ['caterer', 'recrate', 'retrace', 'terrace'], + 'retrack': ['retrack', 'tracker'], + 'retractation': ['reattraction', 'retractation'], + 'retracted': ['detracter', 'retracted'], + 'retraction': ['retraction', 'triaconter'], + 'retrad': ['darter', + 'dartre', + 'redart', + 'retard', + 'retrad', + 'tarred', + 'trader'], + 'retrade': ['derater', 'retrade', 'retread', 'treader'], + 'retradition': ['retradition', 'traditioner'], + 'retrain': ['arterin', 'retrain', 'terrain', 'trainer'], + 'retral': ['retral', 'terral'], + 'retramp': ['retramp', 'tramper'], + 'retransform': ['retransform', 'transformer'], + 'retransit': ['restraint', 'retransit', 'trainster', 'transiter'], + 'retransplant': ['retransplant', 'transplanter'], + 'retransport': ['retransport', 'transporter'], + 'retravel': ['retravel', 'revertal', 'traveler'], + 'retread': ['derater', 'retrade', 'retread', 'treader'], + 'retreat': ['ettarre', 'retreat', 'treater'], + 'retree': ['retree', 'teerer'], + 'retrench': ['retrench', 'trencher'], + 'retrial': ['retiral', 'retrial', 'trailer'], + 'retrim': ['mitrer', 'retrim', 'trimer'], + 'retrocaecal': ['accelerator', 'retrocaecal'], + 'retrogradient': ['redintegrator', 'retrogradient'], + 'retrorse': ['resorter', 'restorer', 'retrorse'], + 'retrot': ['retort', 'retrot', 'rotter'], + 'retrue': ['retrue', 'ureter'], + 'retrust': ['retrust', 'truster'], + 'retry': ['retry', 'terry'], + 'retter': ['retter', 'terret'], + 'retting': ['gittern', 'gritten', 'retting'], + 'retube': ['rebute', 'retube'], + 'retuck': ['retuck', 'tucker'], + 'retune': ['neuter', 'retune', 'runtee', 'tenure', 'tureen'], + 'returf': ['returf', 'rufter'], + 'return': ['return', 'turner'], + 'retuse': ['retuse', 'tereus'], + 'retwine': ['enwrite', 'retwine'], + 'retwist': ['retwist', 'twister'], + 'retzian': ['retzian', 'terzina'], + 'reub': ['bure', 'reub', 'rube'], + 'reundergo': ['guerdoner', 'reundergo', 'undergoer', 'undergore'], + 'reune': ['enure', 'reune'], + 'reunfold': ['flounder', 'reunfold', 'unfolder'], + 'reunify': ['reunify', 'unfiery'], + 'reunionist': ['reunionist', 'sturionine'], + 'reunite': ['neurite', 'retinue', 'reunite', 'uterine'], + 'reunpack': ['reunpack', 'unpacker'], + 'reuphold': ['reuphold', 'upholder'], + 'reupholster': ['reupholster', 'upholsterer'], + 'reuplift': ['reuplift', 'uplifter'], + 'reuse': ['resue', 'reuse'], + 'reutter': ['reutter', 'utterer'], + 'revacate': ['acervate', 'revacate'], + 'revalidation': ['derivational', 'revalidation'], + 'revamp': ['revamp', 'vamper'], + 'revarnish': ['revarnish', 'varnisher'], + 'reve': ['ever', 'reve', 'veer'], + 'reveal': ['laveer', 'leaver', 'reveal', 'vealer'], + 'reveil': ['levier', 'relive', 'reveil', 'revile', 'veiler'], + 'revel': ['elver', 'lever', 'revel'], + 'revelant': ['levanter', 'relevant', 'revelant'], + 'revelation': ['relevation', 'revelation'], + 'revelator': ['relevator', 'revelator', 'veratrole'], + 'reveler': ['leverer', 'reveler'], + 'revenant': ['revenant', 'venerant'], + 'revend': ['revend', 'vender'], + 'revender': ['revender', 'reverend'], + 'reveneer': ['reveneer', 'veneerer'], + 'revent': ['revent', 'venter'], + 'revenue': ['revenue', 'unreeve'], + 'rever': ['rever', 'verre'], + 'reverend': ['revender', 'reverend'], + 'reverential': ['interleaver', 'reverential'], + 'reverist': ['restrive', 'reverist'], + 'revers': ['revers', 'server', 'verser'], + 'reversable': ['reservable', 'reversable'], + 'reversal': ['reserval', 'reversal', 'slaverer'], + 'reverse': ['reserve', 'resever', 'reverse', 'severer'], + 'reversed': ['deserver', 'reserved', 'reversed'], + 'reversedly': ['reservedly', 'reversedly'], + 'reverseful': ['reserveful', 'reverseful'], + 'reverseless': ['reserveless', 'reverseless'], + 'reverser': ['reserver', 'reverser'], + 'reversewise': ['reversewise', 'revieweress'], + 'reversi': ['reversi', 'reviser'], + 'reversion': ['reversion', 'versioner'], + 'reversist': ['reservist', 'reversist'], + 'revertal': ['retravel', 'revertal', 'traveler'], + 'revest': ['revest', 'servet', 'sterve', 'verset', 'vester'], + 'revet': ['evert', 'revet'], + 'revete': ['revete', 'tervee'], + 'revictual': ['lucrative', 'revictual', 'victualer'], + 'review': ['review', 'viewer'], + 'revieweress': ['reversewise', 'revieweress'], + 'revigorate': ['overgaiter', 'revigorate'], + 'revile': ['levier', 'relive', 'reveil', 'revile', 'veiler'], + 'reviling': ['reviling', 'vierling'], + 'revisal': ['revisal', 'virales'], + 'revise': ['revise', 'siever'], + 'revised': ['deviser', 'diverse', 'revised'], + 'reviser': ['reversi', 'reviser'], + 'revision': ['revision', 'visioner'], + 'revisit': ['revisit', 'visiter'], + 'revisitant': ['revisitant', 'transitive'], + 'revitalization': ['relativization', 'revitalization'], + 'revitalize': ['relativize', 'revitalize'], + 'revocation': ['overaction', 'revocation'], + 'revocative': ['overactive', 'revocative'], + 'revoke': ['evoker', 'revoke'], + 'revolting': ['overglint', 'revolting'], + 'revolute': ['revolute', 'truelove'], + 'revolve': ['evolver', 'revolve'], + 'revomit': ['revomit', 'vomiter'], + 'revote': ['revote', 'vetoer'], + 'revuist': ['revuist', 'stuiver'], + 'rewade': ['drawee', 'rewade'], + 'rewager': ['rewager', 'wagerer'], + 'rewake': ['kerewa', 'rewake'], + 'rewaken': ['rewaken', 'wakener'], + 'rewall': ['rewall', 'waller'], + 'rewallow': ['rewallow', 'wallower'], + 'reward': ['drawer', 'redraw', 'reward', 'warder'], + 'rewarder': ['redrawer', 'rewarder', 'warderer'], + 'rewarm': ['rewarm', 'warmer'], + 'rewarn': ['rewarn', 'warner', 'warren'], + 'rewash': ['hawser', 'rewash', 'washer'], + 'rewater': ['rewater', 'waterer'], + 'rewave': ['rewave', 'weaver'], + 'rewax': ['rewax', 'waxer'], + 'reweaken': ['reweaken', 'weakener'], + 'rewear': ['rewear', 'warree', 'wearer'], + 'rewed': ['dewer', 'ewder', 'rewed'], + 'reweigh': ['reweigh', 'weigher'], + 'reweld': ['reweld', 'welder'], + 'rewet': ['rewet', 'tewer', 'twere'], + 'rewhirl': ['rewhirl', 'whirler'], + 'rewhisper': ['rewhisper', 'whisperer'], + 'rewhiten': ['rewhiten', 'whitener'], + 'rewiden': ['rewiden', 'widener'], + 'rewin': ['erwin', 'rewin', 'winer'], + 'rewind': ['rewind', 'winder'], + 'rewish': ['rewish', 'wisher'], + 'rewithdraw': ['rewithdraw', 'withdrawer'], + 'reword': ['reword', 'worder'], + 'rework': ['rework', 'worker'], + 'reworked': ['reedwork', 'reworked'], + 'rewound': ['rewound', 'unrowed', 'wounder'], + 'rewoven': ['overnew', 'rewoven'], + 'rewrap': ['prewar', 'rewrap', 'warper'], + 'reyield': ['reedily', 'reyield', 'yielder'], + 'rhacianectes': ['rachianectes', 'rhacianectes'], + 'rhaetian': ['earthian', 'rhaetian'], + 'rhaetic': ['certhia', 'rhaetic', 'theriac'], + 'rhamnose': ['horseman', 'rhamnose', 'shoreman'], + 'rhamnoside': ['admonisher', 'rhamnoside'], + 'rhapis': ['parish', 'raphis', 'rhapis'], + 'rhapontic': ['anthropic', 'rhapontic'], + 'rhaponticin': ['panornithic', 'rhaponticin'], + 'rhason': ['rhason', 'sharon', 'shoran'], + 'rhatania': ['ratanhia', 'rhatania'], + 'rhe': ['her', 'reh', 'rhe'], + 'rhea': ['hare', 'hear', 'rhea'], + 'rheen': ['herne', 'rheen'], + 'rheic': ['cheir', 'rheic'], + 'rhein': ['hiren', 'rhein', 'rhine'], + 'rheinic': ['hircine', 'rheinic'], + 'rhema': ['harem', 'herma', 'rhema'], + 'rhematic': ['athermic', 'marchite', 'rhematic'], + 'rheme': ['herem', 'rheme'], + 'rhemist': ['rhemist', 'smither'], + 'rhenium': ['inhumer', 'rhenium'], + 'rheometric': ['chirometer', 'rheometric'], + 'rheophile': ['herophile', 'rheophile'], + 'rheoscope': ['prechoose', 'rheoscope'], + 'rheostatic': ['choristate', 'rheostatic'], + 'rheotactic': ['rheotactic', 'theocratic'], + 'rheotan': ['another', 'athenor', 'rheotan'], + 'rheotropic': ['horopteric', 'rheotropic', 'trichopore'], + 'rhesian': ['arshine', 'nearish', 'rhesian', 'sherani'], + 'rhesus': ['rhesus', 'suresh'], + 'rhetor': ['rhetor', 'rother'], + 'rhetoricals': ['rhetoricals', 'trochlearis'], + 'rhetorize': ['rhetorize', 'theorizer'], + 'rheumatic': ['hematuric', 'rheumatic'], + 'rhine': ['hiren', 'rhein', 'rhine'], + 'rhinestone': ['neornithes', 'rhinestone'], + 'rhineura': ['rhineura', 'unhairer'], + 'rhinocele': ['cholerine', 'rhinocele'], + 'rhinopharyngitis': ['pharyngorhinitis', 'rhinopharyngitis'], + 'rhipidate': ['rhipidate', 'thripidae'], + 'rhizoctonia': ['chorization', 'rhizoctonia', 'zonotrichia'], + 'rhoda': ['hoard', 'rhoda'], + 'rhodaline': ['hodiernal', 'rhodaline'], + 'rhodanthe': ['rhodanthe', 'thornhead'], + 'rhodeose': ['rhodeose', 'seerhood'], + 'rhodes': ['dehors', 'rhodes', 'shoder', 'shored'], + 'rhodic': ['orchid', 'rhodic'], + 'rhodite': ['rhodite', 'theroid'], + 'rhodium': ['humidor', 'rhodium'], + 'rhodope': ['redhoop', 'rhodope'], + 'rhodopsin': ['donorship', 'rhodopsin'], + 'rhoecus': ['choreus', 'chouser', 'rhoecus'], + 'rhopalic': ['orphical', 'rhopalic'], + 'rhus': ['rhus', 'rush'], + 'rhynchotal': ['chloranthy', 'rhynchotal'], + 'rhyton': ['rhyton', 'thorny'], + 'ria': ['air', 'ira', 'ria'], + 'rial': ['aril', 'lair', 'lari', 'liar', 'lira', 'rail', 'rial'], + 'riancy': ['cairny', 'riancy'], + 'riant': ['riant', 'tairn', 'tarin', 'train'], + 'riata': ['arati', 'atria', 'riata', 'tarai', 'tiara'], + 'ribald': ['bildar', 'bridal', 'ribald'], + 'ribaldly': ['bridally', 'ribaldly'], + 'riband': ['brandi', 'riband'], + 'ribat': ['barit', 'ribat'], + 'ribbed': ['dibber', 'ribbed'], + 'ribber': ['briber', 'ribber'], + 'ribble': ['libber', 'ribble'], + 'ribbon': ['ribbon', 'robbin'], + 'ribe': ['beri', 'bier', 'brei', 'ribe'], + 'ribes': ['birse', 'ribes'], + 'riblet': ['beltir', 'riblet'], + 'ribroast': ['arborist', 'ribroast'], + 'ribspare': ['ribspare', 'sparerib'], + 'rice': ['eric', 'rice'], + 'ricer': ['crier', 'ricer'], + 'ricey': ['criey', 'ricey'], + 'richardia': ['charadrii', 'richardia'], + 'richdom': ['chromid', 'richdom'], + 'richen': ['enrich', 'nicher', 'richen'], + 'riches': ['riches', 'shicer'], + 'richt': ['crith', 'richt'], + 'ricine': ['irenic', 'ricine'], + 'ricinoleate': ['arenicolite', 'ricinoleate'], + 'rickets': ['rickets', 'sticker'], + 'rickle': ['licker', 'relick', 'rickle'], + 'rictal': ['citral', 'rictal'], + 'rictus': ['citrus', 'curtis', 'rictus', 'rustic'], + 'ridable': ['bedrail', 'bridale', 'ridable'], + 'ridably': ['bardily', 'rabidly', 'ridably'], + 'riddam': ['madrid', 'riddam'], + 'riddance': ['adendric', 'riddance'], + 'riddel': ['lidder', 'riddel', 'riddle'], + 'ridden': ['dinder', 'ridden', 'rinded'], + 'riddle': ['lidder', 'riddel', 'riddle'], + 'ride': ['dier', 'dire', 'reid', 'ride'], + 'rideau': ['auride', 'rideau'], + 'riden': ['diner', 'riden', 'rinde'], + 'rident': ['dirten', 'rident', 'tinder'], + 'rider': ['drier', 'rider'], + 'ridered': ['deirdre', 'derider', 'derride', 'ridered'], + 'ridge': ['dirge', 'gride', 'redig', 'ridge'], + 'ridgel': ['gilder', 'girdle', 'glider', 'regild', 'ridgel'], + 'ridgelike': ['dirgelike', 'ridgelike'], + 'ridger': ['girder', 'ridger'], + 'ridging': ['girding', 'ridging'], + 'ridgingly': ['girdingly', 'ridgingly'], + 'ridgling': ['girdling', 'ridgling'], + 'ridgy': ['igdyr', 'ridgy'], + 'rie': ['ire', 'rie'], + 'riem': ['emir', 'imer', 'mire', 'reim', 'remi', 'riem', 'rime'], + 'rife': ['fire', 'reif', 'rife'], + 'rifeness': ['finesser', 'rifeness'], + 'rifle': ['filer', 'flier', 'lifer', 'rifle'], + 'rifleman': ['inflamer', 'rifleman'], + 'rift': ['frit', 'rift'], + 'rigadoon': ['gordonia', 'organoid', 'rigadoon'], + 'rigation': ['rigation', 'trigonia'], + 'rigbane': ['bearing', 'begrain', 'brainge', 'rigbane'], + 'right': ['girth', 'grith', 'right'], + 'rightle': ['lighter', 'relight', 'rightle'], + 'rigling': ['girling', 'rigling'], + 'rigolette': ['gloriette', 'rigolette'], + 'rik': ['irk', 'rik'], + 'rikisha': ['rikisha', 'shikari'], + 'rikk': ['kirk', 'rikk'], + 'riksha': ['rakish', 'riksha', 'shikar', 'shikra', 'sikhra'], + 'rile': ['lier', 'lire', 'rile'], + 'rillet': ['retill', 'rillet', 'tiller'], + 'rillett': ['rillett', 'trillet'], + 'rillock': ['rillock', 'rollick'], + 'rim': ['mir', 'rim'], + 'rima': ['amir', 'irma', 'mari', 'mira', 'rami', 'rima'], + 'rimal': ['armil', 'marli', 'rimal'], + 'rimate': ['imaret', 'metria', 'mirate', 'rimate'], + 'rime': ['emir', 'imer', 'mire', 'reim', 'remi', 'riem', 'rime'], + 'rimmed': ['dimmer', 'immerd', 'rimmed'], + 'rimose': ['isomer', 'rimose'], + 'rimple': ['limper', 'prelim', 'rimple'], + 'rimu': ['muir', 'rimu'], + 'rimula': ['rimula', 'uramil'], + 'rimy': ['miry', 'rimy', 'yirm'], + 'rinaldo': ['nailrod', 'ordinal', 'rinaldo', 'rodinal'], + 'rinceau': ['aneuric', 'rinceau'], + 'rincon': ['cornin', 'rincon'], + 'rinde': ['diner', 'riden', 'rinde'], + 'rinded': ['dinder', 'ridden', 'rinded'], + 'rindle': ['linder', 'rindle'], + 'rine': ['neri', 'rein', 'rine'], + 'ring': ['girn', 'grin', 'ring'], + 'ringable': ['balinger', 'ringable'], + 'ringe': ['grein', 'inger', 'nigre', 'regin', 'reign', 'ringe'], + 'ringed': ['engird', 'ringed'], + 'ringer': ['erring', 'rering', 'ringer'], + 'ringgoer': ['gorgerin', 'ringgoer'], + 'ringhead': ['headring', 'ringhead'], + 'ringite': ['igniter', 'ringite', 'tigrine'], + 'ringle': ['linger', 'ringle'], + 'ringlead': ['dragline', 'reginald', 'ringlead'], + 'ringlet': ['ringlet', 'tingler', 'tringle'], + 'ringster': ['restring', 'ringster', 'stringer'], + 'ringtail': ['ringtail', 'trailing'], + 'ringy': ['girny', 'ringy'], + 'rink': ['kirn', 'rink'], + 'rinka': ['inkra', 'krina', 'nakir', 'rinka'], + 'rinse': ['reins', 'resin', 'rinse', 'risen', 'serin', 'siren'], + 'rio': ['rio', 'roi'], + 'riot': ['riot', 'roit', 'trio'], + 'rioting': ['ignitor', 'rioting'], + 'rip': ['pir', 'rip'], + 'ripa': ['pair', 'pari', 'pria', 'ripa'], + 'ripal': ['april', 'pilar', 'ripal'], + 'ripe': ['peri', 'pier', 'ripe'], + 'ripelike': ['pierlike', 'ripelike'], + 'ripen': ['piner', 'prine', 'repin', 'ripen'], + 'ripener': ['repiner', 'ripener'], + 'ripeningly': ['repiningly', 'ripeningly'], + 'riper': ['prier', 'riper'], + 'ripgut': ['ripgut', 'upgirt'], + 'ripost': ['ripost', 'triops', 'tripos'], + 'riposte': ['periost', 'porites', 'reposit', 'riposte'], + 'rippet': ['rippet', 'tipper'], + 'ripple': ['lipper', 'ripple'], + 'ripplet': ['ripplet', 'tippler', 'tripple'], + 'ripup': ['ripup', 'uprip'], + 'rise': ['reis', 'rise', 'seri', 'sier', 'sire'], + 'risen': ['reins', 'resin', 'rinse', 'risen', 'serin', 'siren'], + 'rishi': ['irish', 'rishi', 'sirih'], + 'risk': ['kris', 'risk'], + 'risky': ['risky', 'sirky'], + 'risper': ['risper', 'sprier'], + 'risque': ['risque', 'squire'], + 'risquee': ['esquire', 'risquee'], + 'rissel': ['rissel', 'rissle'], + 'rissle': ['rissel', 'rissle'], + 'rissoa': ['aissor', 'rissoa'], + 'rist': ['rist', 'stir'], + 'rit': ['rit', 'tri'], + 'rita': ['airt', 'rita', 'tari', 'tiar'], + 'rite': ['iter', 'reit', 'rite', 'teri', 'tier', 'tire'], + 'riteless': ['riteless', 'tireless'], + 'ritelessness': ['ritelessness', 'tirelessness'], + 'ritling': ['glitnir', 'ritling'], + 'ritualize': ['ritualize', 'uralitize'], + 'riva': ['ravi', 'riva', 'vair', 'vari', 'vira'], + 'rivage': ['argive', 'rivage'], + 'rival': ['rival', 'viral'], + 'rive': ['rive', 'veri', 'vier', 'vire'], + 'rivel': ['levir', 'liver', 'livre', 'rivel'], + 'riven': ['riven', 'viner'], + 'rivered': ['deriver', 'redrive', 'rivered'], + 'rivet': ['rivet', 'tirve', 'tiver'], + 'riveter': ['rerivet', 'riveter'], + 'rivetless': ['rivetless', 'silvester'], + 'riving': ['irving', 'riving', 'virgin'], + 'rivingly': ['rivingly', 'virginly'], + 'rivose': ['rivose', 'virose'], + 'riyal': ['lairy', 'riyal'], + 'ro': ['or', 'ro'], + 'roach': ['achor', 'chora', 'corah', 'orach', 'roach'], + 'road': ['dora', 'orad', 'road'], + 'roadability': ['adorability', 'roadability'], + 'roadable': ['adorable', 'roadable'], + 'roader': ['adorer', 'roader'], + 'roading': ['gordian', 'idorgan', 'roading'], + 'roadite': ['roadite', 'toadier'], + 'roadman': ['anadrom', 'madrona', 'mandora', 'monarda', 'roadman'], + 'roadster': ['dartrose', 'roadster'], + 'roam': ['amor', 'maro', 'mora', 'omar', 'roam'], + 'roamage': ['georama', 'roamage'], + 'roamer': ['remora', 'roamer'], + 'roaming': ['ingomar', 'moringa', 'roaming'], + 'roan': ['nora', 'orna', 'roan'], + 'roast': ['astor', 'roast'], + 'roastable': ['astrolabe', 'roastable'], + 'roasting': ['orangist', 'organist', 'roasting', 'signator'], + 'rob': ['bor', 'orb', 'rob'], + 'robalo': ['barolo', 'robalo'], + 'roband': ['bandor', 'bondar', 'roband'], + 'robbin': ['ribbon', 'robbin'], + 'robe': ['boer', 'bore', 'robe'], + 'rober': ['borer', 'rerob', 'rober'], + 'roberd': ['border', 'roberd'], + 'roberta': ['arboret', 'roberta', 'taborer'], + 'robin': ['biron', 'inorb', 'robin'], + 'robinet': ['bornite', 'robinet'], + 'robing': ['boring', 'robing'], + 'roble': ['blore', 'roble'], + 'robot': ['boort', 'robot'], + 'robotian': ['abortion', 'robotian'], + 'robotism': ['bimotors', 'robotism'], + 'robur': ['burro', 'robur', 'rubor'], + 'roc': ['cor', 'cro', 'orc', 'roc'], + 'rochea': ['chorea', 'ochrea', 'rochea'], + 'rochet': ['hector', 'rochet', 'tocher', 'troche'], + 'rock': ['cork', 'rock'], + 'rocker': ['corker', 'recork', 'rocker'], + 'rocketer': ['rocketer', 'rocktree'], + 'rockiness': ['corkiness', 'rockiness'], + 'rocking': ['corking', 'rocking'], + 'rockish': ['corkish', 'rockish'], + 'rocktree': ['rocketer', 'rocktree'], + 'rockwood': ['corkwood', 'rockwood', 'woodrock'], + 'rocky': ['corky', 'rocky'], + 'rocta': ['actor', 'corta', 'croat', 'rocta', 'taroc', 'troca'], + 'rod': ['dor', 'rod'], + 'rode': ['doer', 'redo', 'rode', 'roed'], + 'rodentia': ['andorite', 'nadorite', 'ordinate', 'rodentia'], + 'rodential': ['lorandite', 'rodential'], + 'rodinal': ['nailrod', 'ordinal', 'rinaldo', 'rodinal'], + 'rodingite': ['negritoid', 'rodingite'], + 'rodless': ['drossel', 'rodless'], + 'rodlet': ['retold', 'rodlet'], + 'rodman': ['random', 'rodman'], + 'rodney': ['rodney', 'yonder'], + 'roe': ['oer', 'ore', 'roe'], + 'roed': ['doer', 'redo', 'rode', 'roed'], + 'roey': ['oyer', 'roey', 'yore'], + 'rog': ['gor', 'rog'], + 'rogan': ['angor', + 'argon', + 'goran', + 'grano', + 'groan', + 'nagor', + 'orang', + 'organ', + 'rogan', + 'ronga'], + 'rogative': ['ravigote', 'rogative'], + 'roger': ['gorer', 'roger'], + 'roggle': ['logger', 'roggle'], + 'rogue': ['orgue', 'rogue', 'rouge'], + 'rohan': ['nahor', 'norah', 'rohan'], + 'rohob': ['bohor', 'rohob'], + 'rohun': ['huron', 'rohun'], + 'roi': ['rio', 'roi'], + 'roid': ['dori', 'roid'], + 'roil': ['loir', 'lori', 'roil'], + 'roister': ['roister', 'storier'], + 'roit': ['riot', 'roit', 'trio'], + 'rok': ['kor', 'rok'], + 'roka': ['karo', 'kora', 'okra', 'roka'], + 'roke': ['kore', 'roke'], + 'rokey': ['rokey', 'yoker'], + 'roky': ['kory', 'roky', 'york'], + 'roland': ['androl', 'arnold', 'lardon', 'roland', 'ronald'], + 'rolandic': ['ironclad', 'rolandic'], + 'role': ['lore', 'orle', 'role'], + 'rolfe': ['forel', 'rolfe'], + 'roller': ['reroll', 'roller'], + 'rollick': ['rillock', 'rollick'], + 'romaean': ['neorama', 'romaean'], + 'romain': ['marion', 'romain'], + 'romaine': ['moraine', 'romaine'], + 'romal': ['molar', 'moral', 'romal'], + 'roman': ['manor', 'moran', 'norma', 'ramon', 'roman'], + 'romancist': ['narcotism', 'romancist'], + 'romancy': ['acronym', 'romancy'], + 'romandom': ['monodram', 'romandom'], + 'romane': ['enamor', 'monera', 'oreman', 'romane'], + 'romanes': ['masoner', 'romanes'], + 'romanian': ['maronian', 'romanian'], + 'romanic': ['amicron', 'marconi', 'minorca', 'romanic'], + 'romanist': ['maronist', 'romanist'], + 'romanistic': ['marcionist', 'romanistic'], + 'romanite': ['maronite', 'martinoe', 'minorate', 'morenita', 'romanite'], + 'romanity': ['minatory', 'romanity'], + 'romanly': ['almonry', 'romanly'], + 'romantic': ['macrotin', 'romantic'], + 'romanticly': ['matrocliny', 'romanticly'], + 'romantism': ['matronism', 'romantism'], + 'rome': ['mero', 'more', 'omer', 'rome'], + 'romeite': ['moieter', 'romeite'], + 'romeo': ['moore', 'romeo'], + 'romero': ['romero', 'roomer'], + 'romeshot': ['resmooth', 'romeshot', 'smoother'], + 'romeward': ['marrowed', 'romeward'], + 'romic': ['micro', 'moric', 'romic'], + 'romish': ['hirmos', 'romish'], + 'rompish': ['orphism', 'rompish'], + 'ron': ['nor', 'ron'], + 'ronald': ['androl', 'arnold', 'lardon', 'roland', 'ronald'], + 'roncet': ['conter', 'cornet', 'cronet', 'roncet'], + 'ronco': ['conor', 'croon', 'ronco'], + 'rond': ['dorn', 'rond'], + 'rondache': ['anchored', 'rondache'], + 'ronde': ['drone', 'ronde'], + 'rondeau': ['rondeau', 'unoared'], + 'rondel': ['rondel', 'rondle'], + 'rondelet': ['redolent', 'rondelet'], + 'rondeletia': ['delineator', 'rondeletia'], + 'rondelle': ['enrolled', 'rondelle'], + 'rondle': ['rondel', 'rondle'], + 'rondo': ['donor', 'rondo'], + 'rondure': ['rondure', 'rounder', 'unorder'], + 'rone': ['oner', 'rone'], + 'ronga': ['angor', + 'argon', + 'goran', + 'grano', + 'groan', + 'nagor', + 'orang', + 'organ', + 'rogan', + 'ronga'], + 'rood': ['door', 'odor', 'oord', 'rood'], + 'roodstone': ['doorstone', 'roodstone'], + 'roofer': ['reroof', 'roofer'], + 'rooflet': ['footler', 'rooflet'], + 'rook': ['kroo', 'rook'], + 'rooker': ['korero', 'rooker'], + 'rool': ['loro', 'olor', 'orlo', 'rool'], + 'room': ['moor', 'moro', 'room'], + 'roomage': ['moorage', 'roomage'], + 'roomed': ['doomer', 'mooder', 'redoom', 'roomed'], + 'roomer': ['romero', 'roomer'], + 'roomlet': ['roomlet', 'tremolo'], + 'roomstead': ['astrodome', 'roomstead'], + 'roomward': ['roomward', 'wardroom'], + 'roomy': ['moory', 'roomy'], + 'roost': ['roost', 'torso'], + 'root': ['root', 'roto', 'toro'], + 'rooter': ['reroot', 'rooter', 'torero'], + 'rootle': ['looter', 'retool', 'rootle', 'tooler'], + 'rootlet': ['rootlet', 'tootler'], + 'rootworm': ['moorwort', 'rootworm', 'tomorrow', 'wormroot'], + 'rope': ['pore', 'rope'], + 'ropeable': ['operable', 'ropeable'], + 'ropelike': ['porelike', 'ropelike'], + 'ropeman': ['manrope', 'ropeman'], + 'roper': ['porer', 'prore', 'roper'], + 'ropes': ['poser', 'prose', 'ropes', 'spore'], + 'ropiness': ['poriness', 'pression', 'ropiness'], + 'roping': ['poring', 'roping'], + 'ropp': ['prop', 'ropp'], + 'ropy': ['pory', 'pyro', 'ropy'], + 'roquet': ['quoter', 'roquet', 'torque'], + 'rosa': ['asor', 'rosa', 'soar', 'sora'], + 'rosabel': ['borlase', 'labrose', 'rosabel'], + 'rosal': ['rosal', 'solar', 'soral'], + 'rosales': ['lassoer', 'oarless', 'rosales'], + 'rosalie': ['rosalie', 'seriola'], + 'rosaniline': ['enaliornis', 'rosaniline'], + 'rosated': ['rosated', 'torsade'], + 'rose': ['eros', 'rose', 'sero', 'sore'], + 'roseal': ['roseal', 'solera'], + 'rosed': ['doser', 'rosed'], + 'rosehead': ['rosehead', 'sorehead'], + 'roseine': ['erinose', 'roseine'], + 'rosel': ['loser', 'orsel', 'rosel', 'soler'], + 'roselite': ['literose', 'roselite', 'tirolese'], + 'roselle': ['orselle', 'roselle'], + 'roseola': ['aerosol', 'roseola'], + 'roset': ['roset', 'rotse', 'soter', 'stero', 'store', 'torse'], + 'rosetan': ['noreast', 'rosetan', 'seatron', 'senator', 'treason'], + 'rosetime': ['rosetime', 'timorese', 'tiresome'], + 'rosetta': ['retoast', 'rosetta', 'stoater', 'toaster'], + 'rosette': ['rosette', 'tetrose'], + 'rosetum': ['oestrum', 'rosetum'], + 'rosety': ['oyster', 'rosety'], + 'rosin': ['ornis', 'rosin'], + 'rosinate': ['arsonite', 'asterion', 'oestrian', 'rosinate', 'serotina'], + 'rosine': ['rosine', 'senior', 'soneri'], + 'rosiness': ['insessor', 'rosiness'], + 'rosmarine': ['morrisean', 'rosmarine'], + 'rosolite': ['oestriol', 'rosolite'], + 'rosorial': ['rosorial', 'sororial'], + 'rossite': ['rossite', 'sorites'], + 'rostel': ['relost', 'reslot', 'rostel', 'sterol', 'torsel'], + 'roster': ['resort', 'roster', 'sorter', 'storer'], + 'rostra': ['rostra', 'sartor'], + 'rostrate': ['rostrate', 'trostera'], + 'rosulate': ['oestrual', 'rosulate'], + 'rosy': ['rosy', 'sory'], + 'rot': ['ort', 'rot', 'tor'], + 'rota': ['rota', 'taro', 'tora'], + 'rotacism': ['acrotism', 'rotacism'], + 'rotal': ['latro', 'rotal', 'toral'], + 'rotala': ['aortal', 'rotala'], + 'rotalian': ['notarial', 'rational', 'rotalian'], + 'rotan': ['orant', 'rotan', 'toran', 'trona'], + 'rotanev': ['rotanev', 'venator'], + 'rotarian': ['rotarian', 'tornaria'], + 'rotate': ['rotate', 'tetrao'], + 'rotch': ['chort', 'rotch', 'torch'], + 'rote': ['rote', 'tore'], + 'rotella': ['reallot', 'rotella', 'tallero'], + 'rotge': ['ergot', 'rotge'], + 'rother': ['rhetor', 'rother'], + 'roto': ['root', 'roto', 'toro'], + 'rotse': ['roset', 'rotse', 'soter', 'stero', 'store', 'torse'], + 'rottan': ['attorn', 'ratton', 'rottan'], + 'rotten': ['rotten', 'terton'], + 'rotter': ['retort', 'retrot', 'rotter'], + 'rottle': ['lotter', 'rottle', 'tolter'], + 'rotula': ['rotula', 'torula'], + 'rotulian': ['rotulian', 'uranotil'], + 'rotuliform': ['rotuliform', 'toruliform'], + 'rotulus': ['rotulus', 'torulus'], + 'rotund': ['rotund', 'untrod'], + 'rotunda': ['rotunda', 'tandour'], + 'rotundate': ['rotundate', 'unrotated'], + 'rotundifoliate': ['rotundifoliate', 'titanofluoride'], + 'rotundo': ['orotund', 'rotundo'], + 'roub': ['buro', 'roub'], + 'roud': ['dour', 'duro', 'ordu', 'roud'], + 'rouge': ['orgue', 'rogue', 'rouge'], + 'rougeot': ['outgoer', 'rougeot'], + 'roughen': ['enrough', 'roughen'], + 'roughie': ['higuero', 'roughie'], + 'rouky': ['rouky', 'yurok'], + 'roulade': ['roulade', 'urodela'], + 'rounce': ['conure', 'rounce', 'uncore'], + 'rounded': ['redound', 'rounded', 'underdo'], + 'roundel': ['durenol', 'lounder', 'roundel'], + 'rounder': ['rondure', 'rounder', 'unorder'], + 'roundhead': ['roundhead', 'unhoarded'], + 'roundseam': ['meandrous', 'roundseam'], + 'roundup': ['roundup', 'unproud'], + 'roup': ['pour', 'roup'], + 'rouper': ['pourer', 'repour', 'rouper'], + 'roupet': ['pouter', 'roupet', 'troupe'], + 'rousedness': ['rousedness', 'souredness'], + 'rouser': ['rouser', 'sourer'], + 'rousing': ['nigrous', 'rousing', 'souring'], + 'rousseau': ['eosaurus', 'rousseau'], + 'roust': ['roust', 'rusot', 'stour', 'sutor', 'torus'], + 'rouster': ['rouster', 'trouser'], + 'rousting': ['rousting', 'stouring'], + 'rout': ['rout', 'toru', 'tour'], + 'route': ['outer', 'outre', 'route'], + 'router': ['retour', 'router', 'tourer'], + 'routh': ['routh', 'throu'], + 'routhie': ['outhire', 'routhie'], + 'routine': ['routine', 'tueiron'], + 'routing': ['outgrin', 'outring', 'routing', 'touring'], + 'routinist': ['introitus', 'routinist'], + 'rove': ['over', 'rove'], + 'rovet': ['overt', 'rovet', 'torve', 'trove', 'voter'], + 'row': ['row', 'wro'], + 'rowdily': ['rowdily', 'wordily'], + 'rowdiness': ['rowdiness', 'wordiness'], + 'rowdy': ['dowry', 'rowdy', 'wordy'], + 'rowed': ['dower', 'rowed'], + 'rowel': ['lower', 'owler', 'rowel'], + 'rowelhead': ['rowelhead', 'wheelroad'], + 'rowen': ['owner', 'reown', 'rowen'], + 'rower': ['rerow', 'rower'], + 'rowet': ['rowet', 'tower', 'wrote'], + 'rowing': ['ingrow', 'rowing'], + 'rowlet': ['rowlet', 'trowel', 'wolter'], + 'rowley': ['lowery', 'owlery', 'rowley', 'yowler'], + 'roxy': ['oryx', 'roxy'], + 'roy': ['ory', 'roy', 'yor'], + 'royalist': ['royalist', 'solitary'], + 'royet': ['royet', 'toyer'], + 'royt': ['royt', 'ryot', 'tory', 'troy', 'tyro'], + 'rua': ['aru', 'rua', 'ura'], + 'ruana': ['anura', 'ruana'], + 'rub': ['bur', 'rub'], + 'rubasse': ['rubasse', 'surbase'], + 'rubato': ['outbar', 'rubato', 'tabour'], + 'rubbed': ['dubber', 'rubbed'], + 'rubble': ['burble', 'lubber', 'rubble'], + 'rubbler': ['burbler', 'rubbler'], + 'rubbly': ['burbly', 'rubbly'], + 'rube': ['bure', 'reub', 'rube'], + 'rubella': ['rubella', 'rulable'], + 'rubescent': ['rubescent', 'subcenter'], + 'rubiate': ['abiuret', 'aubrite', 'biurate', 'rubiate'], + 'rubiator': ['rubiator', 'torrubia'], + 'rubican': ['brucina', 'rubican'], + 'rubied': ['burdie', 'buried', 'rubied'], + 'rubification': ['rubification', 'urbification'], + 'rubify': ['rubify', 'urbify'], + 'rubine': ['burnie', 'rubine'], + 'ruble': ['bluer', 'brule', 'burel', 'ruble'], + 'rubor': ['burro', 'robur', 'rubor'], + 'rubrical': ['bicrural', 'rubrical'], + 'ruby': ['bury', 'ruby'], + 'ructation': ['anticourt', 'curtation', 'ructation'], + 'ruction': ['courtin', 'ruction'], + 'rud': ['rud', 'urd'], + 'rudas': ['rudas', 'sudra'], + 'ruddle': ['dudler', 'ruddle'], + 'rude': ['duer', 'dure', 'rude', 'urde'], + 'rudish': ['hurdis', 'rudish'], + 'rudista': ['dasturi', 'rudista'], + 'rudity': ['durity', 'rudity'], + 'rue': ['rue', 'ure'], + 'ruen': ['renu', 'ruen', 'rune'], + 'ruffed': ['duffer', 'ruffed'], + 'rufter': ['returf', 'rufter'], + 'rug': ['gur', 'rug'], + 'ruga': ['gaur', 'guar', 'ruga'], + 'rugate': ['argute', 'guetar', 'rugate', 'tuareg'], + 'rugged': ['grudge', 'rugged'], + 'ruggle': ['gurgle', 'lugger', 'ruggle'], + 'rugose': ['grouse', 'rugose'], + 'ruinate': ['ruinate', 'taurine', 'uranite', 'urinate'], + 'ruination': ['ruination', 'urination'], + 'ruinator': ['ruinator', 'urinator'], + 'ruined': ['diurne', 'inured', 'ruined', 'unride'], + 'ruing': ['irgun', 'ruing', 'unrig'], + 'ruinous': ['ruinous', 'urinous'], + 'ruinousness': ['ruinousness', 'urinousness'], + 'rulable': ['rubella', 'rulable'], + 'rule': ['lure', 'rule'], + 'ruledom': ['remould', 'ruledom'], + 'ruler': ['lurer', 'ruler'], + 'ruling': ['ruling', 'urling'], + 'rulingly': ['luringly', 'rulingly'], + 'rum': ['mru', 'rum'], + 'rumal': ['mural', 'rumal'], + 'ruman': ['muran', 'ruman', 'unarm', 'unram', 'urman'], + 'rumble': ['lumber', 'rumble', 'umbrel'], + 'rumelian': ['lemurian', 'malurine', 'rumelian'], + 'rumex': ['murex', 'rumex'], + 'ruminant': ['nutramin', 'ruminant'], + 'ruminator': ['antirumor', 'ruminator'], + 'rumly': ['murly', 'rumly'], + 'rumple': ['lumper', 'plumer', 'replum', 'rumple'], + 'run': ['run', 'urn'], + 'runback': ['backrun', 'runback'], + 'runby': ['burny', 'runby'], + 'runch': ['churn', 'runch'], + 'runcinate': ['encurtain', 'runcinate', 'uncertain'], + 'rundale': ['launder', 'rundale'], + 'rundi': ['rundi', 'unrid'], + 'rundlet': ['rundlet', 'trundle'], + 'rune': ['renu', 'ruen', 'rune'], + 'runed': ['runed', 'under', 'unred'], + 'runer': ['rerun', 'runer'], + 'runfish': ['furnish', 'runfish'], + 'rung': ['grun', 'rung'], + 'runic': ['curin', 'incur', 'runic'], + 'runically': ['runically', 'unlyrical'], + 'runite': ['runite', 'triune', 'uniter', 'untire'], + 'runkly': ['knurly', 'runkly'], + 'runlet': ['runlet', 'turnel'], + 'runnet': ['runnet', 'tunner', 'unrent'], + 'runout': ['outrun', 'runout'], + 'runover': ['overrun', 'runover'], + 'runt': ['runt', 'trun', 'turn'], + 'runted': ['runted', 'tunder', 'turned'], + 'runtee': ['neuter', 'retune', 'runtee', 'tenure', 'tureen'], + 'runway': ['runway', 'unwary'], + 'rupa': ['prau', 'rupa'], + 'rupee': ['puree', 'rupee'], + 'rupestrian': ['rupestrian', 'supertrain'], + 'rupiah': ['hairup', 'rupiah'], + 'rural': ['rural', 'urlar'], + 'rus': ['rus', 'sur', 'urs'], + 'rusa': ['rusa', 'saur', 'sura', 'ursa', 'usar'], + 'ruscus': ['cursus', 'ruscus'], + 'ruse': ['ruse', 'suer', 'sure', 'user'], + 'rush': ['rhus', 'rush'], + 'rushen': ['reshun', 'rushen'], + 'rusine': ['insure', 'rusine', 'ursine'], + 'rusma': ['musar', 'ramus', 'rusma', 'surma'], + 'rusot': ['roust', 'rusot', 'stour', 'sutor', 'torus'], + 'russelia': ['russelia', 'siruelas'], + 'russet': ['russet', 'tusser'], + 'russify': ['fissury', 'russify'], + 'russine': ['russine', 'serinus', 'sunrise'], + 'rustable': ['baluster', 'rustable'], + 'rustic': ['citrus', 'curtis', 'rictus', 'rustic'], + 'rusticial': ['curialist', 'rusticial'], + 'rusticly': ['crustily', 'rusticly'], + 'rusticness': ['crustiness', 'rusticness'], + 'rustle': ['luster', 'result', 'rustle', 'sutler', 'ulster'], + 'rustling': ['lustring', 'rustling'], + 'rustly': ['rustly', 'sultry'], + 'rut': ['rut', 'tur'], + 'ruta': ['ruta', 'taur'], + 'rutch': ['cruth', 'rutch'], + 'rutelian': ['lutrinae', 'retinula', 'rutelian', 'tenurial'], + 'rutelinae': ['lineature', 'rutelinae'], + 'ruth': ['hurt', 'ruth'], + 'ruthenian': ['hunterian', 'ruthenian'], + 'ruther': ['hurter', 'ruther'], + 'ruthful': ['hurtful', 'ruthful'], + 'ruthfully': ['hurtfully', 'ruthfully'], + 'ruthfulness': ['hurtfulness', 'ruthfulness'], + 'ruthless': ['hurtless', 'ruthless'], + 'ruthlessly': ['hurtlessly', 'ruthlessly'], + 'ruthlessness': ['hurtlessness', 'ruthlessness'], + 'rutilant': ['rutilant', 'turntail'], + 'rutinose': ['rutinose', 'tursenoi'], + 'rutter': ['rutter', 'turret'], + 'rutyl': ['rutyl', 'truly'], + 'rutylene': ['neuterly', 'rutylene'], + 'ryal': ['aryl', 'lyra', 'ryal', 'yarl'], + 'ryder': ['derry', 'redry', 'ryder'], + 'rye': ['rye', 'yer'], + 'ryen': ['ryen', 'yern'], + 'ryot': ['royt', 'ryot', 'tory', 'troy', 'tyro'], + 'rype': ['prey', 'pyre', 'rype'], + 'rytina': ['rytina', 'trainy', 'tyrian'], + 'sa': ['as', 'sa'], + 'saa': ['asa', 'saa'], + 'saan': ['anas', 'ansa', 'saan'], + 'sab': ['bas', 'sab'], + 'saba': ['abas', 'saba'], + 'sabal': ['balas', 'balsa', 'basal', 'sabal'], + 'saban': ['nasab', 'saban'], + 'sabanut': ['sabanut', 'sabutan', 'tabanus'], + 'sabe': ['base', 'besa', 'sabe', 'seba'], + 'sabeca': ['casabe', 'sabeca'], + 'sabella': ['basella', 'sabella', 'salable'], + 'sabelli': ['sabelli', 'sebilla'], + 'sabellid': ['sabellid', 'slidable'], + 'saber': ['barse', 'besra', 'saber', 'serab'], + 'sabered': ['debaser', 'sabered'], + 'sabian': ['sabian', 'sabina'], + 'sabina': ['sabian', 'sabina'], + 'sabino': ['basion', 'bonsai', 'sabino'], + 'sabir': ['baris', 'sabir'], + 'sable': ['blase', 'sable'], + 'saboraim': ['ambrosia', 'saboraim'], + 'sabot': ['basto', 'boast', 'sabot'], + 'sabotine': ['obeisant', 'sabotine'], + 'sabromin': ['ambrosin', 'barosmin', 'sabromin'], + 'sabulite': ['sabulite', 'suitable'], + 'sabutan': ['sabanut', 'sabutan', 'tabanus'], + 'sacalait': ['castalia', 'sacalait'], + 'saccade': ['cascade', 'saccade'], + 'saccomyian': ['saccomyian', 'saccomyina'], + 'saccomyina': ['saccomyian', 'saccomyina'], + 'sacculoutricular': ['sacculoutricular', 'utriculosaccular'], + 'sacellum': ['camellus', 'sacellum'], + 'sachem': ['sachem', 'schema'], + 'sachet': ['chaste', 'sachet', 'scathe', 'scheat'], + 'sacian': ['ascian', 'sacian', 'scania', 'sicana'], + 'sack': ['cask', 'sack'], + 'sackbut': ['sackbut', 'subtack'], + 'sacken': ['sacken', 'skance'], + 'sacker': ['resack', 'sacker', 'screak'], + 'sacking': ['casking', 'sacking'], + 'sacklike': ['casklike', 'sacklike'], + 'sacque': ['casque', 'sacque'], + 'sacral': ['lascar', 'rascal', 'sacral', 'scalar'], + 'sacrification': ['sacrification', 'scarification'], + 'sacrificator': ['sacrificator', 'scarificator'], + 'sacripant': ['sacripant', 'spartanic'], + 'sacro': ['arcos', 'crosa', 'oscar', 'sacro'], + 'sacrodorsal': ['dorsosacral', 'sacrodorsal'], + 'sacroischiac': ['isosaccharic', 'sacroischiac'], + 'sacrolumbal': ['lumbosacral', 'sacrolumbal'], + 'sacrovertebral': ['sacrovertebral', 'vertebrosacral'], + 'sad': ['das', 'sad'], + 'sadden': ['desand', 'sadden', 'sanded'], + 'saddling': ['addlings', 'saddling'], + 'sadh': ['dash', 'sadh', 'shad'], + 'sadhe': ['deash', 'hades', 'sadhe', 'shade'], + 'sadic': ['asdic', 'sadic'], + 'sadie': ['aides', 'aside', 'sadie'], + 'sadiron': ['sadiron', 'sardoin'], + 'sado': ['dosa', 'sado', 'soda'], + 'sadr': ['sadr', 'sard'], + 'saeima': ['asemia', 'saeima'], + 'saernaite': ['arseniate', 'saernaite'], + 'saeter': ['asteer', + 'easter', + 'eastre', + 'reseat', + 'saeter', + 'seater', + 'staree', + 'teaser', + 'teresa'], + 'saeume': ['amusee', 'saeume'], + 'safar': ['safar', 'saraf'], + 'safely': ['fayles', 'safely'], + 'saft': ['fast', 'saft'], + 'sag': ['gas', 'sag'], + 'sagai': ['sagai', 'saiga'], + 'sagene': ['sagene', 'senega'], + 'sagger': ['sagger', 'seggar'], + 'sagless': ['gasless', 'glasses', 'sagless'], + 'sago': ['sago', 'soga'], + 'sagoin': ['gosain', 'isagon', 'sagoin'], + 'sagra': ['argas', 'sagra'], + 'sah': ['ash', 'sah', 'sha'], + 'saharic': ['arachis', 'asiarch', 'saharic'], + 'sahh': ['hash', 'sahh', 'shah'], + 'sahidic': ['hasidic', 'sahidic'], + 'sahme': ['sahme', 'shame'], + 'saho': ['saho', 'shoa'], + 'sai': ['sai', 'sia'], + 'saic': ['acis', 'asci', 'saic'], + 'said': ['dais', 'dasi', 'disa', 'said', 'sida'], + 'saidi': ['saidi', 'saiid'], + 'saiga': ['sagai', 'saiga'], + 'saiid': ['saidi', 'saiid'], + 'sail': ['lasi', 'lias', 'lisa', 'sail', 'sial'], + 'sailable': ['isabella', 'sailable'], + 'sailage': ['algesia', 'sailage'], + 'sailed': ['aisled', 'deasil', 'ladies', 'sailed'], + 'sailer': ['israel', 'relais', 'resail', 'sailer', 'serail', 'serial'], + 'sailing': ['aisling', 'sailing'], + 'sailoring': ['sailoring', 'signorial'], + 'sailsman': ['nasalism', 'sailsman'], + 'saily': ['islay', 'saily'], + 'saim': ['mias', 'saim', 'siam', 'sima'], + 'sain': ['anis', 'nais', 'nasi', 'nias', 'sain', 'sina'], + 'sainfoin': ['sainfoin', 'sinfonia'], + 'saint': ['saint', 'satin', 'stain'], + 'saintdom': ['donatism', 'saintdom'], + 'sainted': ['destain', 'instead', 'sainted', 'satined'], + 'saintless': ['saintless', 'saltiness', 'slatiness', 'stainless'], + 'saintlike': ['kleistian', 'saintlike', 'satinlike'], + 'saintly': ['nastily', 'saintly', 'staynil'], + 'saintship': ['hispanist', 'saintship'], + 'saip': ['apis', 'pais', 'pasi', 'saip'], + 'saiph': ['aphis', 'apish', 'hispa', 'saiph', 'spahi'], + 'sair': ['rais', 'sair', 'sari'], + 'saite': ['saite', 'taise'], + 'saithe': ['saithe', 'tashie', 'teaish'], + 'saitic': ['isatic', 'saitic'], + 'saivism': ['saivism', 'sivaism'], + 'sak': ['ask', 'sak'], + 'saka': ['asak', 'kasa', 'saka'], + 'sake': ['sake', 'seak'], + 'sakeen': ['sakeen', 'sekane'], + 'sakel': ['alkes', 'sakel', 'slake'], + 'saker': ['asker', 'reask', 'saker', 'sekar'], + 'sakeret': ['restake', 'sakeret'], + 'sakha': ['kasha', 'khasa', 'sakha', 'shaka'], + 'saki': ['saki', 'siak', 'sika'], + 'sal': ['las', 'sal', 'sla'], + 'salable': ['basella', 'sabella', 'salable'], + 'salably': ['basally', 'salably'], + 'salaceta': ['catalase', 'salaceta'], + 'salacot': ['coastal', 'salacot'], + 'salading': ['salading', 'salangid'], + 'salago': ['aglaos', 'salago'], + 'salamandarin': ['salamandarin', 'salamandrian', 'salamandrina'], + 'salamandrian': ['salamandarin', 'salamandrian', 'salamandrina'], + 'salamandrina': ['salamandarin', 'salamandrian', 'salamandrina'], + 'salangid': ['salading', 'salangid'], + 'salariat': ['alastair', 'salariat'], + 'salat': ['atlas', 'salat', 'salta'], + 'salay': ['asyla', 'salay', 'sayal'], + 'sale': ['elsa', 'sale', 'seal', 'slae'], + 'salele': ['salele', 'sallee'], + 'salep': ['elaps', + 'lapse', + 'lepas', + 'pales', + 'salep', + 'saple', + 'sepal', + 'slape', + 'spale', + 'speal'], + 'saleratus': ['assaulter', 'reassault', 'saleratus'], + 'salian': ['anisal', 'nasial', 'salian', 'salina'], + 'salic': ['lacis', 'salic'], + 'salicin': ['incisal', 'salicin'], + 'salicylide': ['salicylide', 'scylliidae'], + 'salience': ['salience', 'secaline'], + 'salient': ['elastin', 'salient', 'saltine', 'slainte'], + 'salimeter': ['misrelate', 'salimeter'], + 'salimetry': ['mysterial', 'salimetry'], + 'salina': ['anisal', 'nasial', 'salian', 'salina'], + 'saline': ['alsine', 'neslia', 'saline', 'selina', 'silane'], + 'salinoterreous': ['salinoterreous', 'soliterraneous'], + 'salite': ['isleta', 'litsea', 'salite', 'stelai'], + 'salited': ['distale', 'salited'], + 'saliva': ['saliva', 'salvia'], + 'salivan': ['salivan', 'slavian'], + 'salivant': ['navalist', 'salivant'], + 'salivate': ['salivate', 'vestalia'], + 'salle': ['salle', 'sella'], + 'sallee': ['salele', 'sallee'], + 'sallet': ['sallet', 'stella', 'talles'], + 'sallow': ['sallow', 'swallo'], + 'salm': ['alms', 'salm', 'slam'], + 'salma': ['salma', 'samal'], + 'salmine': ['malines', 'salmine', 'selamin', 'seminal'], + 'salmis': ['missal', 'salmis'], + 'salmo': ['salmo', 'somal'], + 'salmonsite': ['assoilment', 'salmonsite'], + 'salome': ['melosa', 'salome', 'semola'], + 'salometer': ['elastomer', 'salometer'], + 'salon': ['salon', 'sloan', 'solan'], + 'saloon': ['alonso', 'alsoon', 'saloon'], + 'salp': ['salp', 'slap'], + 'salpa': ['palas', 'salpa'], + 'salpidae': ['palisade', 'salpidae'], + 'salpoid': ['psaloid', 'salpoid'], + 'salt': ['last', 'salt', 'slat'], + 'salta': ['atlas', 'salat', 'salta'], + 'saltary': ['astylar', 'saltary'], + 'saltation': ['saltation', 'stational'], + 'salted': ['desalt', 'salted'], + 'saltee': ['ateles', 'saltee', 'sealet', 'stelae', 'teasel'], + 'salter': ['laster', + 'lastre', + 'rastle', + 'relast', + 'resalt', + 'salter', + 'slater', + 'stelar'], + 'saltern': ['saltern', 'starnel', 'sternal'], + 'saltery': ['saltery', 'stearyl'], + 'saltier': ['aletris', 'alister', 'listera', 'realist', 'saltier'], + 'saltine': ['elastin', 'salient', 'saltine', 'slainte'], + 'saltiness': ['saintless', 'saltiness', 'slatiness', 'stainless'], + 'salting': ['anglist', 'lasting', 'salting', 'slating', 'staling'], + 'saltish': ['saltish', 'slatish'], + 'saltly': ['lastly', 'saltly'], + 'saltness': ['lastness', 'saltness'], + 'saltometer': ['rattlesome', 'saltometer'], + 'saltus': ['saltus', 'tussal'], + 'saltwife': ['flatwise', 'saltwife'], + 'salty': ['lasty', 'salty', 'slaty'], + 'salung': ['lugnas', 'salung'], + 'salute': ['salute', 'setula'], + 'saluter': ['arustle', 'estrual', 'saluter', 'saulter'], + 'salva': ['salva', 'valsa', 'vasal'], + 'salve': ['salve', 'selva', 'slave', 'valse'], + 'salver': ['salver', 'serval', 'slaver', 'versal'], + 'salvia': ['saliva', 'salvia'], + 'salvy': ['salvy', 'sylva'], + 'sam': ['mas', 'sam', 'sma'], + 'samal': ['salma', 'samal'], + 'saman': ['manas', 'saman'], + 'samani': ['samani', 'samian'], + 'samaritan': ['samaritan', 'sarmatian'], + 'samas': ['amass', 'assam', 'massa', 'samas'], + 'sambal': ['balsam', 'sambal'], + 'sambo': ['ambos', 'sambo'], + 'same': ['asem', 'mesa', 'same', 'seam'], + 'samel': ['amsel', 'melas', 'mesal', 'samel'], + 'samely': ['measly', 'samely'], + 'samen': ['manes', 'manse', 'mensa', 'samen', 'senam'], + 'samh': ['mash', 'samh', 'sham'], + 'samian': ['samani', 'samian'], + 'samiel': ['amiles', 'asmile', 'mesail', 'mesial', 'samiel'], + 'samir': ['maris', 'marsi', 'samir', 'simar'], + 'samisen': ['samisen', 'samsien'], + 'samish': ['samish', 'sisham'], + 'samite': ['samite', 'semita', 'tamise', 'teaism'], + 'sammer': ['mamers', 'sammer'], + 'sammier': ['amerism', 'asimmer', 'sammier'], + 'samnani': ['ananism', 'samnani'], + 'samnite': ['atenism', 'inmeats', 'insteam', 'samnite'], + 'samoan': ['monasa', 'samoan'], + 'samothere': ['heartsome', 'samothere'], + 'samoyed': ['samoyed', 'someday'], + 'samphire': ['samphire', 'seraphim'], + 'sampi': ['apism', 'sampi'], + 'sampler': ['lampers', 'sampler'], + 'samsien': ['samisen', 'samsien'], + 'samskara': ['makassar', 'samskara'], + 'samucan': ['manacus', 'samucan'], + 'samuel': ['amelus', 'samuel'], + 'sanability': ['insatiably', 'sanability'], + 'sanai': ['asian', 'naias', 'sanai'], + 'sanand': ['sanand', 'sandan'], + 'sanche': ['encash', 'sanche'], + 'sanct': ['sanct', 'scant'], + 'sanction': ['canonist', 'sanction', 'sonantic'], + 'sanctioner': ['resanction', 'sanctioner'], + 'sanctity': ['sanctity', 'scantity'], + 'sandak': ['sandak', 'skanda'], + 'sandan': ['sanand', 'sandan'], + 'sandarac': ['carandas', 'sandarac'], + 'sandawe': ['sandawe', 'weasand'], + 'sanded': ['desand', 'sadden', 'sanded'], + 'sanderling': ['sanderling', 'slandering'], + 'sandflower': ['flandowser', 'sandflower'], + 'sandhi': ['danish', 'sandhi'], + 'sandra': ['nasard', 'sandra'], + 'sandworm': ['sandworm', 'swordman', 'wordsman'], + 'sane': ['anes', 'sane', 'sean'], + 'sanetch': ['chasten', 'sanetch'], + 'sang': ['sang', 'snag'], + 'sanga': ['gasan', 'sanga'], + 'sangar': ['argans', 'sangar'], + 'sangei': ['easing', 'sangei'], + 'sanger': ['angers', 'sanger', 'serang'], + 'sangrel': ['sangrel', 'snagrel'], + 'sanhita': ['ashanti', 'sanhita', 'shaitan', 'thasian'], + 'sanicle': ['celsian', 'escalin', 'sanicle', 'secalin'], + 'sanies': ['anesis', 'anseis', 'sanies', 'sansei', 'sasine'], + 'sanious': ['sanious', 'suasion'], + 'sanitate': ['astatine', 'sanitate'], + 'sanitize': ['sanitize', 'satinize'], + 'sanity': ['sanity', 'satiny'], + 'sank': ['kans', 'sank'], + 'sankha': ['kashan', 'sankha'], + 'sannup': ['pannus', 'sannup', 'unsnap', 'unspan'], + 'sanpoil': ['sanpoil', 'spaniol'], + 'sansei': ['anesis', 'anseis', 'sanies', 'sansei', 'sasine'], + 'sansi': ['sansi', 'sasin'], + 'sant': ['nast', 'sant', 'stan'], + 'santa': ['santa', 'satan'], + 'santal': ['aslant', 'lansat', 'natals', 'santal'], + 'santali': ['lanista', 'santali'], + 'santalin': ['annalist', 'santalin'], + 'santee': ['ensate', 'enseat', 'santee', 'sateen', 'senate'], + 'santiago': ['agonista', 'santiago'], + 'santimi': ['animist', 'santimi'], + 'santir': ['instar', 'santir', 'strain'], + 'santon': ['santon', 'sonant', 'stanno'], + 'santorinite': ['reinstation', 'santorinite'], + 'sap': ['asp', 'sap', 'spa'], + 'sapan': ['pasan', 'sapan'], + 'sapek': ['sapek', 'speak'], + 'saperda': ['aspread', 'saperda'], + 'saphena': ['aphanes', 'saphena'], + 'sapid': ['sapid', 'spaid'], + 'sapient': ['panties', 'sapient', 'spinate'], + 'sapiential': ['antilipase', 'sapiential'], + 'sapin': ['pisan', 'sapin', 'spina'], + 'sapinda': ['anapsid', 'sapinda'], + 'saple': ['elaps', + 'lapse', + 'lepas', + 'pales', + 'salep', + 'saple', + 'sepal', + 'slape', + 'spale', + 'speal'], + 'sapling': ['lapsing', 'sapling'], + 'sapo': ['asop', 'sapo', 'soap'], + 'sapor': ['psora', 'sapor', 'sarpo'], + 'saporous': ['asporous', 'saporous'], + 'sapota': ['sapota', 'taposa'], + 'sapotilha': ['hapalotis', 'sapotilha'], + 'sapphire': ['papisher', 'sapphire'], + 'sapples': ['papless', 'sapples'], + 'sapremia': ['aspermia', 'sapremia'], + 'sapremic': ['aspermic', 'sapremic'], + 'saprine': ['persian', 'prasine', 'saprine'], + 'saprolite': ['posterial', 'saprolite'], + 'saprolitic': ['polaristic', 'poristical', 'saprolitic'], + 'sapropel': ['prolapse', 'sapropel'], + 'sapropelic': ['periscopal', 'sapropelic'], + 'saprophagous': ['prasophagous', 'saprophagous'], + 'sapwort': ['postwar', 'sapwort'], + 'sar': ['ras', 'sar'], + 'sara': ['rasa', 'sara'], + 'saraad': ['saraad', 'sarada'], + 'sarada': ['saraad', 'sarada'], + 'saraf': ['safar', 'saraf'], + 'sarah': ['asarh', 'raash', 'sarah'], + 'saran': ['ansar', 'saran', 'sarna'], + 'sarangi': ['giansar', 'sarangi'], + 'sarcenet': ['reascent', 'sarcenet'], + 'sarcine': ['arsenic', 'cerasin', 'sarcine'], + 'sarcitis': ['sarcitis', 'triassic'], + 'sarcle': ['sarcle', 'scaler', 'sclera'], + 'sarcoadenoma': ['adenosarcoma', 'sarcoadenoma'], + 'sarcocarcinoma': ['carcinosarcoma', 'sarcocarcinoma'], + 'sarcoid': ['sarcoid', 'scaroid'], + 'sarcoline': ['censorial', 'sarcoline'], + 'sarcolite': ['alectoris', 'sarcolite', 'sclerotia', 'sectorial'], + 'sarcoplast': ['postsacral', 'sarcoplast'], + 'sarcotic': ['acrostic', 'sarcotic', 'socratic'], + 'sard': ['sadr', 'sard'], + 'sardian': ['andrias', 'sardian', 'sarinda'], + 'sardine': ['andries', 'isander', 'sardine'], + 'sardoin': ['sadiron', 'sardoin'], + 'sardonic': ['draconis', 'sardonic'], + 'sare': ['arse', 'rase', 'sare', 'sear', 'sera'], + 'sargonide': ['grandiose', 'sargonide'], + 'sari': ['rais', 'sair', 'sari'], + 'sarif': ['farsi', 'sarif'], + 'sarigue': ['ergusia', 'gerusia', 'sarigue'], + 'sarinda': ['andrias', 'sardian', 'sarinda'], + 'sarip': ['paris', 'parsi', 'sarip'], + 'sark': ['askr', 'kras', 'sark'], + 'sarkine': ['kerasin', 'sarkine'], + 'sarkit': ['rastik', 'sarkit', 'straik'], + 'sarmatian': ['samaritan', 'sarmatian'], + 'sarment': ['sarment', 'smarten'], + 'sarmentous': ['sarmentous', 'tarsonemus'], + 'sarna': ['ansar', 'saran', 'sarna'], + 'sarod': ['sarod', 'sorda'], + 'saron': ['arson', 'saron', 'sonar'], + 'saronic': ['arsonic', 'saronic'], + 'sarpo': ['psora', 'sapor', 'sarpo'], + 'sarra': ['arras', 'sarra'], + 'sarsenet': ['assenter', 'reassent', 'sarsenet'], + 'sarsi': ['arsis', 'sarsi'], + 'sart': ['sart', 'star', 'stra', 'tars', 'tsar'], + 'sartain': ['artisan', 'astrain', 'sartain', 'tsarina'], + 'sartish': ['sartish', 'shastri'], + 'sartor': ['rostra', 'sartor'], + 'sasan': ['nassa', 'sasan'], + 'sasin': ['sansi', 'sasin'], + 'sasine': ['anesis', 'anseis', 'sanies', 'sansei', 'sasine'], + 'sat': ['ast', 'sat'], + 'satan': ['santa', 'satan'], + 'satanical': ['castalian', 'satanical'], + 'satanism': ['mantissa', 'satanism'], + 'sate': ['ates', 'east', 'eats', 'sate', 'seat', 'seta'], + 'sateen': ['ensate', 'enseat', 'santee', 'sateen', 'senate'], + 'sateless': ['sateless', 'seatless'], + 'satelles': ['satelles', 'tessella'], + 'satellite': ['satellite', 'telestial'], + 'satiable': ['bisaltae', 'satiable'], + 'satiate': ['isatate', 'satiate', 'taetsia'], + 'satieno': ['aeonist', 'asiento', 'satieno'], + 'satient': ['atenist', 'instate', 'satient', 'steatin'], + 'satin': ['saint', 'satin', 'stain'], + 'satine': ['satine', 'tisane'], + 'satined': ['destain', 'instead', 'sainted', 'satined'], + 'satinette': ['enstatite', 'intestate', 'satinette'], + 'satinite': ['satinite', 'sittinae'], + 'satinize': ['sanitize', 'satinize'], + 'satinlike': ['kleistian', 'saintlike', 'satinlike'], + 'satiny': ['sanity', 'satiny'], + 'satire': ['satire', 'striae'], + 'satirical': ['racialist', 'satirical'], + 'satirist': ['satirist', 'tarsitis'], + 'satrae': ['astare', 'satrae'], + 'satrapic': ['aspartic', 'satrapic'], + 'satron': ['asnort', 'satron'], + 'sattle': ['latest', 'sattle', 'taslet'], + 'sattva': ['sattva', 'tavast'], + 'saturable': ['balaustre', 'saturable'], + 'saturation': ['saturation', 'titanosaur'], + 'saturator': ['saturator', 'tartarous'], + 'saturn': ['saturn', 'unstar'], + 'saturnale': ['alaternus', 'saturnale'], + 'saturnalia': ['australian', 'saturnalia'], + 'saturnia': ['asturian', 'austrian', 'saturnia'], + 'saturnine': ['neustrian', 'saturnine', 'sturninae'], + 'saturnism': ['saturnism', 'surmisant'], + 'satyr': ['satyr', 'stary', 'stray', 'trasy'], + 'satyrlike': ['satyrlike', 'streakily'], + 'sauce': ['cause', 'sauce'], + 'sauceless': ['causeless', 'sauceless'], + 'sauceline': ['sauceline', 'seleucian'], + 'saucer': ['causer', 'saucer'], + 'sauger': ['sauger', 'usager'], + 'saugh': ['agush', 'saugh'], + 'saul': ['saul', 'sula'], + 'sauld': ['aldus', 'sauld'], + 'sault': ['latus', 'sault', 'talus'], + 'saulter': ['arustle', 'estrual', 'saluter', 'saulter'], + 'saum': ['masu', 'musa', 'saum'], + 'sauna': ['nasua', 'sauna'], + 'saur': ['rusa', 'saur', 'sura', 'ursa', 'usar'], + 'saura': ['arusa', 'saura', 'usara'], + 'saurian': ['saurian', 'suriana'], + 'saury': ['saury', 'surya'], + 'sausage': ['assuage', 'sausage'], + 'saut': ['saut', 'tasu', 'utas'], + 'sauve': ['sauve', 'suave'], + 'save': ['aves', 'save', 'vase'], + 'savin': ['savin', 'sivan'], + 'saviour': ['saviour', 'various'], + 'savor': ['savor', 'sorva'], + 'savored': ['oversad', 'savored'], + 'saw': ['saw', 'swa', 'was'], + 'sawah': ['awash', 'sawah'], + 'sawali': ['aswail', 'sawali'], + 'sawback': ['backsaw', 'sawback'], + 'sawbuck': ['bucksaw', 'sawbuck'], + 'sawer': ['resaw', 'sawer', 'seraw', 'sware', 'swear', 'warse'], + 'sawing': ['aswing', 'sawing'], + 'sawish': ['sawish', 'siwash'], + 'sawn': ['sawn', 'snaw', 'swan'], + 'sawt': ['sawt', 'staw', 'swat', 'taws', 'twas', 'wast'], + 'sawyer': ['sawyer', 'swayer'], + 'saxe': ['axes', 'saxe', 'seax'], + 'saxten': ['saxten', 'sextan'], + 'say': ['say', 'yas'], + 'sayal': ['asyla', 'salay', 'sayal'], + 'sayer': ['reasy', 'resay', 'sayer', 'seary'], + 'sayid': ['daisy', 'sayid'], + 'scabbler': ['scabbler', 'scrabble'], + 'scabies': ['abscise', 'scabies'], + 'scaddle': ['scaddle', 'scalded'], + 'scaean': ['anaces', 'scaean'], + 'scala': ['calas', 'casal', 'scala'], + 'scalar': ['lascar', 'rascal', 'sacral', 'scalar'], + 'scalded': ['scaddle', 'scalded'], + 'scale': ['alces', 'casel', 'scale'], + 'scalena': ['escalan', 'scalena'], + 'scalene': ['cleanse', 'scalene'], + 'scaler': ['sarcle', 'scaler', 'sclera'], + 'scallola': ['callosal', 'scallola'], + 'scalloper': ['procellas', 'scalloper'], + 'scaloni': ['nicolas', 'scaloni'], + 'scalp': ['clasp', 'scalp'], + 'scalper': ['clasper', 'reclasp', 'scalper'], + 'scalping': ['clasping', 'scalping'], + 'scalpture': ['prescutal', 'scalpture'], + 'scaly': ['aclys', 'scaly'], + 'scambler': ['scambler', 'scramble'], + 'scampish': ['scampish', 'scaphism'], + 'scania': ['ascian', 'sacian', 'scania', 'sicana'], + 'scant': ['sanct', 'scant'], + 'scantity': ['sanctity', 'scantity'], + 'scantle': ['asclent', 'scantle'], + 'scape': ['capes', 'scape', 'space'], + 'scapeless': ['scapeless', 'spaceless'], + 'scapha': ['pascha', 'scapha'], + 'scaphander': ['handscrape', 'scaphander'], + 'scaphism': ['scampish', 'scaphism'], + 'scaphite': ['paschite', 'pastiche', 'pistache', 'scaphite'], + 'scaphopod': ['podoscaph', 'scaphopod'], + 'scapoid': ['psoadic', 'scapoid', 'sciapod'], + 'scapolite': ['alopecist', 'altiscope', 'epicostal', 'scapolite'], + 'scappler': ['scappler', 'scrapple'], + 'scapula': ['capsula', 'pascual', 'scapula'], + 'scapular': ['capsular', 'scapular'], + 'scapulated': ['capsulated', 'scapulated'], + 'scapulectomy': ['capsulectomy', 'scapulectomy'], + 'scarab': ['barsac', 'scarab'], + 'scarcement': ['marcescent', 'scarcement'], + 'scare': ['carse', 'caser', 'ceras', 'scare', 'scrae'], + 'scarification': ['sacrification', 'scarification'], + 'scarificator': ['sacrificator', 'scarificator'], + 'scarily': ['scarily', 'scraily'], + 'scarlet': ['scarlet', 'sclater'], + 'scarn': ['scarn', 'scran'], + 'scaroid': ['sarcoid', 'scaroid'], + 'scarp': ['craps', 'scarp', 'scrap'], + 'scarping': ['scarping', 'scraping'], + 'scart': ['scart', 'scrat'], + 'scarth': ['scarth', 'scrath', 'starch'], + 'scary': ['ascry', 'scary', 'scray'], + 'scase': ['casse', 'scase'], + 'scat': ['acts', 'cast', 'scat'], + 'scathe': ['chaste', 'sachet', 'scathe', 'scheat'], + 'scatterer': ['scatterer', 'streetcar'], + 'scaturient': ['incrustate', 'scaturient', 'scrutinate'], + 'scaum': ['camus', 'musca', 'scaum', 'sumac'], + 'scaur': ['cursa', 'scaur'], + 'scaut': ['scaut', 'scuta'], + 'scavel': ['calves', 'scavel'], + 'scawl': ['scawl', 'sclaw'], + 'sceat': ['caste', 'sceat'], + 'scene': ['cense', 'scene', 'sence'], + 'scenery': ['scenery', 'screeny'], + 'scented': ['descent', 'scented'], + 'scepter': ['respect', 'scepter', 'specter'], + 'sceptered': ['sceptered', 'spectered'], + 'scepterless': ['respectless', 'scepterless'], + 'sceptral': ['sceptral', 'scraplet', 'spectral'], + 'sceptry': ['precyst', 'sceptry', 'spectry'], + 'scerne': ['censer', 'scerne', 'screen', 'secern'], + 'schalmei': ['camelish', 'schalmei'], + 'scheat': ['chaste', 'sachet', 'scathe', 'scheat'], + 'schema': ['sachem', 'schema'], + 'schematic': ['catechism', 'schematic'], + 'scheme': ['scheme', 'smeech'], + 'schemer': ['chermes', 'schemer'], + 'scho': ['cosh', 'scho'], + 'scholae': ['oscheal', 'scholae'], + 'schone': ['cheson', 'chosen', 'schone'], + 'schooltime': ['chilostome', 'schooltime'], + 'schout': ['schout', 'scouth'], + 'schute': ['schute', 'tusche'], + 'sciaenoid': ['oniscidae', 'oscinidae', 'sciaenoid'], + 'scian': ['canis', 'scian'], + 'sciapod': ['psoadic', 'scapoid', 'sciapod'], + 'sciara': ['carisa', 'sciara'], + 'sciarid': ['cidaris', 'sciarid'], + 'sciatic': ['ascitic', 'sciatic'], + 'sciatical': ['ascitical', 'sciatical'], + 'scient': ['encist', 'incest', 'insect', 'scient'], + 'sciential': ['elasticin', 'inelastic', 'sciential'], + 'scillitan': ['scillitan', 'scintilla'], + 'scintilla': ['scillitan', 'scintilla'], + 'scintle': ['lentisc', 'scintle', 'stencil'], + 'scion': ['oscin', 'scion', 'sonic'], + 'sciot': ['ostic', 'sciot', 'stoic'], + 'sciotherical': ['ischiorectal', 'sciotherical'], + 'scious': ['scious', 'socius'], + 'scirenga': ['creasing', 'scirenga'], + 'scirpus': ['prussic', 'scirpus'], + 'scissortail': ['scissortail', 'solaristics'], + 'sciurine': ['incisure', 'sciurine'], + 'sciuroid': ['dioscuri', 'sciuroid'], + 'sclate': ['castle', 'sclate'], + 'sclater': ['scarlet', 'sclater'], + 'sclaw': ['scawl', 'sclaw'], + 'sclera': ['sarcle', 'scaler', 'sclera'], + 'sclere': ['sclere', 'screel'], + 'scleria': ['scleria', 'sercial'], + 'sclerite': ['sclerite', 'silcrete'], + 'sclerodermite': ['dermosclerite', 'sclerodermite'], + 'scleromata': ['clamatores', 'scleromata'], + 'sclerose': ['coreless', 'sclerose'], + 'sclerospora': ['prolacrosse', 'sclerospora'], + 'sclerote': ['corselet', 'sclerote', 'selector'], + 'sclerotia': ['alectoris', 'sarcolite', 'sclerotia', 'sectorial'], + 'sclerotial': ['cloisteral', 'sclerotial'], + 'sclerotinia': ['intersocial', 'orleanistic', 'sclerotinia'], + 'sclerotium': ['closterium', 'sclerotium'], + 'sclerotomy': ['mycosterol', 'sclerotomy'], + 'scoad': ['cados', 'scoad'], + 'scob': ['bosc', 'scob'], + 'scobicular': ['scobicular', 'scrobicula'], + 'scolia': ['colias', 'scolia', 'social'], + 'scolion': ['scolion', 'solonic'], + 'scombrine': ['becrimson', 'scombrine'], + 'scone': ['cones', 'scone'], + 'scooper': ['coprose', 'scooper'], + 'scoot': ['coost', 'scoot'], + 'scopa': ['posca', 'scopa'], + 'scoparin': ['parsonic', 'scoparin'], + 'scope': ['copse', 'pecos', 'scope'], + 'scopidae': ['diascope', 'psocidae', 'scopidae'], + 'scopine': ['psocine', 'scopine'], + 'scopiped': ['podiceps', 'scopiped'], + 'scopoline': ['niloscope', 'scopoline'], + 'scorbutical': ['scorbutical', 'subcortical'], + 'scorbutically': ['scorbutically', 'subcortically'], + 'score': ['corse', 'score'], + 'scorer': ['scorer', 'sorcer'], + 'scoriae': ['coraise', 'scoriae'], + 'scorpidae': ['carpiodes', 'scorpidae'], + 'scorpiones': ['procession', 'scorpiones'], + 'scorpionic': ['orniscopic', 'scorpionic'], + 'scorse': ['cessor', 'crosse', 'scorse'], + 'scortation': ['cartoonist', 'scortation'], + 'scot': ['cost', 'scot'], + 'scotale': ['alecost', 'lactose', 'scotale', 'talcose'], + 'scote': ['coset', 'estoc', 'scote'], + 'scoter': ['corset', 'cortes', 'coster', 'escort', 'scoter', 'sector'], + 'scotia': ['isotac', 'scotia'], + 'scotism': ['cosmist', 'scotism'], + 'scotomatic': ['osmotactic', 'scotomatic'], + 'scotty': ['cytost', 'scotty'], + 'scoup': ['copus', 'scoup'], + 'scour': ['cours', 'scour'], + 'scoured': ['coursed', 'scoured'], + 'scourer': ['courser', 'scourer'], + 'scourge': ['scourge', 'scrouge'], + 'scourger': ['scourger', 'scrouger'], + 'scouring': ['coursing', 'scouring'], + 'scouth': ['schout', 'scouth'], + 'scrabble': ['scabbler', 'scrabble'], + 'scrabe': ['braces', 'scrabe'], + 'scrae': ['carse', 'caser', 'ceras', 'scare', 'scrae'], + 'scraily': ['scarily', 'scraily'], + 'scramble': ['scambler', 'scramble'], + 'scran': ['scarn', 'scran'], + 'scrap': ['craps', 'scarp', 'scrap'], + 'scrape': ['casper', 'escarp', 'parsec', 'scrape', 'secpar', 'spacer'], + 'scrapie': ['epacris', 'scrapie', 'serapic'], + 'scraping': ['scarping', 'scraping'], + 'scraplet': ['sceptral', 'scraplet', 'spectral'], + 'scrapple': ['scappler', 'scrapple'], + 'scrat': ['scart', 'scrat'], + 'scratcher': ['rescratch', 'scratcher'], + 'scrath': ['scarth', 'scrath', 'starch'], + 'scray': ['ascry', 'scary', 'scray'], + 'screak': ['resack', 'sacker', 'screak'], + 'screaming': ['germanics', 'screaming'], + 'screamy': ['armscye', 'screamy'], + 'scree': ['scree', 'secre'], + 'screel': ['sclere', 'screel'], + 'screen': ['censer', 'scerne', 'screen', 'secern'], + 'screenless': ['censerless', 'screenless'], + 'screeny': ['scenery', 'screeny'], + 'screet': ['resect', 'screet', 'secret'], + 'screwdrive': ['drivescrew', 'screwdrive'], + 'scrieve': ['cerevis', 'scrieve', 'service'], + 'scrike': ['scrike', 'sicker'], + 'scrip': ['crisp', 'scrip'], + 'scripee': ['precise', 'scripee'], + 'scripula': ['scripula', 'spicular'], + 'scrobicula': ['scobicular', 'scrobicula'], + 'scrota': ['arctos', 'castor', 'costar', 'scrota'], + 'scrouge': ['scourge', 'scrouge'], + 'scrouger': ['scourger', 'scrouger'], + 'scrout': ['scrout', 'scruto'], + 'scroyle': ['cryosel', 'scroyle'], + 'scruf': ['scruf', 'scurf'], + 'scruffle': ['scruffle', 'scuffler'], + 'scruple': ['scruple', 'sculper'], + 'scrutate': ['crustate', 'scrutate'], + 'scrutation': ['crustation', 'scrutation'], + 'scrutinant': ['incrustant', 'scrutinant'], + 'scrutinate': ['incrustate', 'scaturient', 'scrutinate'], + 'scruto': ['scrout', 'scruto'], + 'scudi': ['scudi', 'sudic'], + 'scuffler': ['scruffle', 'scuffler'], + 'sculper': ['scruple', 'sculper'], + 'sculpin': ['insculp', 'sculpin'], + 'scup': ['cusp', 'scup'], + 'scur': ['crus', 'scur'], + 'scurf': ['scruf', 'scurf'], + 'scusation': ['cosustain', 'scusation'], + 'scuta': ['scaut', 'scuta'], + 'scutal': ['scutal', 'suclat'], + 'scute': ['cetus', 'scute'], + 'scutifer': ['fusteric', 'scutifer'], + 'scutigeral': ['gesticular', 'scutigeral'], + 'scutula': ['auscult', 'scutula'], + 'scye': ['scye', 'syce'], + 'scylliidae': ['salicylide', 'scylliidae'], + 'scyllium': ['clumsily', 'scyllium'], + 'scyphi': ['physic', 'scyphi'], + 'scyphomancy': ['psychomancy', 'scyphomancy'], + 'scyt': ['cyst', 'scyt'], + 'scythe': ['chesty', 'scythe'], + 'scytitis': ['cystitis', 'scytitis'], + 'se': ['es', 'se'], + 'sea': ['aes', 'ase', 'sea'], + 'seadog': ['dosage', 'seadog'], + 'seagirt': ['seagirt', 'strigae'], + 'seah': ['seah', 'shea'], + 'seak': ['sake', 'seak'], + 'seal': ['elsa', 'sale', 'seal', 'slae'], + 'sealable': ['leasable', 'sealable'], + 'sealch': ['cashel', 'laches', 'sealch'], + 'sealer': ['alerse', 'leaser', 'reales', 'resale', 'reseal', 'sealer'], + 'sealet': ['ateles', 'saltee', 'sealet', 'stelae', 'teasel'], + 'sealing': ['leasing', 'sealing'], + 'sealwort': ['restowal', 'sealwort'], + 'seam': ['asem', 'mesa', 'same', 'seam'], + 'seamanite': ['anamesite', 'seamanite'], + 'seamark': ['kamares', 'seamark'], + 'seamer': ['reseam', 'seamer'], + 'seamlet': ['maltese', 'seamlet'], + 'seamlike': ['mesalike', 'seamlike'], + 'seamrend': ['redesman', 'seamrend'], + 'seamster': ['masseter', 'seamster'], + 'seamus': ['assume', 'seamus'], + 'sean': ['anes', 'sane', 'sean'], + 'seance': ['encase', 'seance', 'seneca'], + 'seaplane': ['seaplane', 'spelaean'], + 'seaport': ['esparto', 'petrosa', 'seaport'], + 'sear': ['arse', 'rase', 'sare', 'sear', 'sera'], + 'searce': ['cesare', 'crease', 'recase', 'searce'], + 'searcer': ['creaser', 'searcer'], + 'search': ['arches', 'chaser', 'eschar', 'recash', 'search'], + 'searcher': ['rechaser', 'research', 'searcher'], + 'searchment': ['manchester', 'searchment'], + 'searcloth': ['clathrose', 'searcloth'], + 'seared': ['erased', 'reseda', 'seared'], + 'searer': ['eraser', 'searer'], + 'searing': ['searing', 'seringa'], + 'seary': ['reasy', 'resay', 'sayer', 'seary'], + 'seaside': ['disease', 'seaside'], + 'seat': ['ates', 'east', 'eats', 'sate', 'seat', 'seta'], + 'seated': ['seated', 'sedate'], + 'seater': ['asteer', + 'easter', + 'eastre', + 'reseat', + 'saeter', + 'seater', + 'staree', + 'teaser', + 'teresa'], + 'seating': ['easting', + 'gainset', + 'genista', + 'ingesta', + 'seating', + 'signate', + 'teasing'], + 'seatless': ['sateless', 'seatless'], + 'seatrain': ['artesian', 'asterina', 'asternia', 'erastian', 'seatrain'], + 'seatron': ['noreast', 'rosetan', 'seatron', 'senator', 'treason'], + 'seave': ['eaves', 'evase', 'seave'], + 'seax': ['axes', 'saxe', 'seax'], + 'seba': ['base', 'besa', 'sabe', 'seba'], + 'sebastian': ['bassanite', 'sebastian'], + 'sebilla': ['sabelli', 'sebilla'], + 'sebum': ['embus', 'sebum'], + 'secalin': ['celsian', 'escalin', 'sanicle', 'secalin'], + 'secaline': ['salience', 'secaline'], + 'secant': ['ascent', 'secant', 'stance'], + 'secern': ['censer', 'scerne', 'screen', 'secern'], + 'secernent': ['secernent', 'sentencer'], + 'secondar': ['endosarc', 'secondar'], + 'secos': ['cosse', 'secos'], + 'secpar': ['casper', 'escarp', 'parsec', 'scrape', 'secpar', 'spacer'], + 'secre': ['scree', 'secre'], + 'secret': ['resect', 'screet', 'secret'], + 'secretarian': ['ascertainer', 'reascertain', 'secretarian'], + 'secretion': ['resection', 'secretion'], + 'secretional': ['resectional', 'secretional'], + 'sect': ['cest', 'sect'], + 'sectarian': ['ascertain', 'cartesian', 'cartisane', 'sectarian'], + 'sectarianism': ['cartesianism', 'sectarianism'], + 'section': ['contise', 'noetics', 'section'], + 'sectism': ['sectism', 'smectis'], + 'sector': ['corset', 'cortes', 'coster', 'escort', 'scoter', 'sector'], + 'sectorial': ['alectoris', 'sarcolite', 'sclerotia', 'sectorial'], + 'sectroid': ['decorist', 'sectroid'], + 'securable': ['rescuable', 'securable'], + 'securance': ['recusance', 'securance'], + 'secure': ['cereus', 'ceruse', 'recuse', 'rescue', 'secure'], + 'securer': ['recurse', 'rescuer', 'securer'], + 'sedan': ['sedan', 'snead'], + 'sedanier': ['arsedine', 'arsenide', 'sedanier', 'siderean'], + 'sedat': ['sedat', 'stade', 'stead'], + 'sedate': ['seated', 'sedate'], + 'sedation': ['astonied', 'sedation'], + 'sederunt': ['sederunt', 'underset', 'undesert', 'unrested'], + 'sedile': ['diesel', 'sedile', 'seidel'], + 'sedimetric': ['sedimetric', 'semidirect'], + 'sedimetrical': ['decimestrial', 'sedimetrical'], + 'sedition': ['desition', 'sedition'], + 'sedulity': ['dysluite', 'sedulity'], + 'sedum': ['mused', 'sedum'], + 'seedbird': ['birdseed', 'seedbird'], + 'seeded': ['deseed', 'seeded'], + 'seeder': ['reseed', 'seeder'], + 'seedlip': ['pelides', 'seedlip'], + 'seeing': ['seeing', 'signee'], + 'seek': ['kees', 'seek', 'skee'], + 'seeker': ['reseek', 'seeker'], + 'seel': ['else', 'lees', 'seel', 'sele', 'slee'], + 'seem': ['mese', 'seem', 'seme', 'smee'], + 'seemer': ['emerse', 'seemer'], + 'seen': ['ense', 'esne', 'nese', 'seen', 'snee'], + 'seenu': ['ensue', 'seenu', 'unsee'], + 'seer': ['erse', 'rees', 'seer', 'sere'], + 'seerband': ['seerband', 'serabend'], + 'seerhand': ['denshare', 'seerhand'], + 'seerhood': ['rhodeose', 'seerhood'], + 'seership': ['hesperis', 'seership'], + 'seething': ['seething', 'sheeting'], + 'seg': ['ges', 'seg'], + 'seggar': ['sagger', 'seggar'], + 'seggard': ['daggers', 'seggard'], + 'sego': ['goes', 'sego'], + 'segolate': ['gelatose', 'segolate'], + 'segreant': ['estrange', 'segreant', 'sergeant', 'sternage'], + 'seid': ['desi', 'ides', 'seid', 'side'], + 'seidel': ['diesel', 'sedile', 'seidel'], + 'seignioral': ['seignioral', 'seignorial'], + 'seignoral': ['gasoliner', 'seignoral'], + 'seignorial': ['seignioral', 'seignorial'], + 'seine': ['insee', 'seine'], + 'seiner': ['inseer', 'nereis', 'seiner', 'serine', 'sirene'], + 'seise': ['essie', 'seise'], + 'seism': ['seism', 'semis'], + 'seismal': ['aimless', 'melissa', 'seismal'], + 'seismotic': ['seismotic', 'societism'], + 'seit': ['seit', 'site'], + 'seizable': ['seizable', 'sizeable'], + 'seizer': ['resize', 'seizer'], + 'sekane': ['sakeen', 'sekane'], + 'sekani': ['kinase', 'sekani'], + 'sekar': ['asker', 'reask', 'saker', 'sekar'], + 'seker': ['esker', 'keres', 'reesk', 'seker', 'skeer', 'skere'], + 'selagite': ['elegiast', 'selagite'], + 'selah': ['halse', 'leash', 'selah', 'shale', 'sheal', 'shela'], + 'selamin': ['malines', 'salmine', 'selamin', 'seminal'], + 'selbergite': ['gilbertese', 'selbergite'], + 'seldor': ['dorsel', 'seldor', 'solder'], + 'seldseen': ['needless', 'seldseen'], + 'sele': ['else', 'lees', 'seel', 'sele', 'slee'], + 'selector': ['corselet', 'sclerote', 'selector'], + 'selenic': ['license', 'selenic', 'silence'], + 'selenion': ['leonines', 'selenion'], + 'selenitic': ['insectile', 'selenitic'], + 'selenium': ['selenium', 'semilune', 'seminule'], + 'selenosis': ['noiseless', 'selenosis'], + 'seleucian': ['sauceline', 'seleucian'], + 'self': ['fels', 'self'], + 'selfsame': ['fameless', 'selfsame'], + 'selfsameness': ['famelessness', 'selfsameness'], + 'selictar': ['altrices', 'selictar'], + 'selina': ['alsine', 'neslia', 'saline', 'selina', 'silane'], + 'selion': ['insole', 'leonis', 'lesion', 'selion'], + 'seljukian': ['januslike', 'seljukian'], + 'sella': ['salle', 'sella'], + 'sellably': ['sellably', 'syllable'], + 'sellate': ['estella', 'sellate'], + 'seller': ['resell', 'seller'], + 'selli': ['lisle', 'selli'], + 'sellie': ['leslie', 'sellie'], + 'sellout': ['outsell', 'sellout'], + 'selt': ['lest', 'selt'], + 'selter': ['lester', 'selter', 'streel'], + 'selung': ['gunsel', 'selung', 'slunge'], + 'selva': ['salve', 'selva', 'slave', 'valse'], + 'semang': ['magnes', 'semang'], + 'semantic': ['amnestic', 'semantic'], + 'semaphore': ['mesohepar', 'semaphore'], + 'sematic': ['cameist', 'etacism', 'sematic'], + 'sematrope': ['perosmate', 'sematrope'], + 'seme': ['mese', 'seem', 'seme', 'smee'], + 'semen': ['mense', 'mesne', 'semen'], + 'semeostoma': ['semeostoma', 'semostomae'], + 'semi': ['mise', 'semi', 'sime'], + 'semiarch': ['semiarch', 'smachrie'], + 'semibald': ['bedismal', 'semibald'], + 'semiball': ['mislabel', 'semiball'], + 'semic': ['mesic', 'semic'], + 'semicircle': ['semicircle', 'semicleric'], + 'semicleric': ['semicircle', 'semicleric'], + 'semicone': ['nicesome', 'semicone'], + 'semicoronet': ['oncosimeter', 'semicoronet'], + 'semicrome': ['mesomeric', 'microseme', 'semicrome'], + 'semidirect': ['sedimetric', 'semidirect'], + 'semidormant': ['memorandist', 'moderantism', 'semidormant'], + 'semihard': ['dreamish', 'semihard'], + 'semihiant': ['histamine', 'semihiant'], + 'semilimber': ['immersible', 'semilimber'], + 'semilunar': ['semilunar', 'unrealism'], + 'semilune': ['selenium', 'semilune', 'seminule'], + 'semiminor': ['immersion', 'semiminor'], + 'semimoron': ['monroeism', 'semimoron'], + 'seminal': ['malines', 'salmine', 'selamin', 'seminal'], + 'seminar': ['remains', 'seminar'], + 'seminasal': ['messalian', 'seminasal'], + 'seminomadic': ['demoniacism', 'seminomadic'], + 'seminomata': ['mastomenia', 'seminomata'], + 'seminule': ['selenium', 'semilune', 'seminule'], + 'semiopal': ['malpoise', 'semiopal'], + 'semiorb': ['boreism', 'semiorb'], + 'semiovaloid': ['semiovaloid', 'semiovoidal'], + 'semiovoidal': ['semiovaloid', 'semiovoidal'], + 'semipolar': ['perisomal', 'semipolar'], + 'semipro': ['imposer', 'promise', 'semipro'], + 'semipronation': ['impersonation', 'prosemination', 'semipronation'], + 'semiquote': ['quietsome', 'semiquote'], + 'semirotund': ['semirotund', 'unmortised'], + 'semis': ['seism', 'semis'], + 'semispan': ['menaspis', 'semispan'], + 'semisteel': ['messelite', 'semisteel', 'teleseism'], + 'semistill': ['limitless', 'semistill'], + 'semistriated': ['disastimeter', 'semistriated'], + 'semita': ['samite', 'semita', 'tamise', 'teaism'], + 'semitae': ['amesite', 'mesitae', 'semitae'], + 'semitour': ['moisture', 'semitour'], + 'semiurban': ['semiurban', 'submarine'], + 'semiurn': ['neurism', 'semiurn'], + 'semivector': ['semivector', 'viscometer'], + 'semnae': ['enseam', 'semnae'], + 'semola': ['melosa', 'salome', 'semola'], + 'semolella': ['lamellose', 'semolella'], + 'semolina': ['laminose', 'lemonias', 'semolina'], + 'semological': ['mesological', 'semological'], + 'semology': ['mesology', 'semology'], + 'semostomae': ['semeostoma', 'semostomae'], + 'sempiternous': ['sempiternous', 'supermoisten'], + 'semuncia': ['muscinae', 'semuncia'], + 'semuncial': ['masculine', 'semuncial', 'simulance'], + 'sen': ['ens', 'sen'], + 'senaite': ['etesian', 'senaite'], + 'senam': ['manes', 'manse', 'mensa', 'samen', 'senam'], + 'senarius': ['anuresis', 'senarius'], + 'senate': ['ensate', 'enseat', 'santee', 'sateen', 'senate'], + 'senator': ['noreast', 'rosetan', 'seatron', 'senator', 'treason'], + 'senatorian': ['arsenation', 'senatorian', 'sonneratia'], + 'senatrices': ['resistance', 'senatrices'], + 'sence': ['cense', 'scene', 'sence'], + 'senci': ['senci', 'since'], + 'send': ['send', 'sned'], + 'sender': ['resend', 'sender'], + 'seneca': ['encase', 'seance', 'seneca'], + 'senega': ['sagene', 'senega'], + 'senesce': ['essence', 'senesce'], + 'senile': ['enisle', 'ensile', 'senile', 'silene'], + 'senilism': ['liminess', 'senilism'], + 'senior': ['rosine', 'senior', 'soneri'], + 'senlac': ['lances', 'senlac'], + 'senna': ['nanes', 'senna'], + 'sennit': ['innest', 'sennit', 'sinnet', 'tennis'], + 'sennite': ['intense', 'sennite'], + 'senocular': ['larcenous', 'senocular'], + 'senones': ['oneness', 'senones'], + 'sensable': ['ableness', 'blaeness', 'sensable'], + 'sensatorial': ['assertional', 'sensatorial'], + 'sensical': ['laciness', 'sensical'], + 'sensilia': ['sensilia', 'silesian'], + 'sensilla': ['nailless', 'sensilla'], + 'sension': ['neossin', 'sension'], + 'sensuism': ['sensuism', 'senusism'], + 'sent': ['nest', 'sent', 'sten'], + 'sentencer': ['secernent', 'sentencer'], + 'sentition': ['sentition', 'tenonitis'], + 'senusism': ['sensuism', 'senusism'], + 'sepad': ['depas', 'sepad', 'spade'], + 'sepal': ['elaps', + 'lapse', + 'lepas', + 'pales', + 'salep', + 'saple', + 'sepal', + 'slape', + 'spale', + 'speal'], + 'sepaled': ['delapse', 'sepaled'], + 'sepaloid': ['episodal', 'lapidose', 'sepaloid'], + 'separable': ['separable', 'spareable'], + 'separate': ['asperate', 'separate'], + 'separation': ['anisoptera', 'asperation', 'separation'], + 'sephardi': ['diphaser', 'parished', 'raphides', 'sephardi'], + 'sephen': ['sephen', 'sphene'], + 'sepian': ['sepian', 'spinae'], + 'sepic': ['sepic', 'spice'], + 'sepion': ['espino', 'sepion'], + 'sepoy': ['poesy', 'posey', 'sepoy'], + 'seps': ['pess', 'seps'], + 'sepsis': ['sepsis', 'speiss'], + 'sept': ['pest', 'sept', 'spet', 'step'], + 'septa': ['paste', 'septa', 'spate'], + 'septal': ['pastel', 'septal', 'staple'], + 'septane': ['penates', 'septane'], + 'septarium': ['impasture', 'septarium'], + 'septenar': ['entrepas', 'septenar'], + 'septennium': ['pennisetum', 'septennium'], + 'septentrio': ['septentrio', 'tripestone'], + 'septerium': ['misrepute', 'septerium'], + 'septi': ['septi', 'spite', 'stipe'], + 'septicemia': ['episematic', 'septicemia'], + 'septicidal': ['pesticidal', 'septicidal'], + 'septicopyemia': ['pyosepticemia', 'septicopyemia'], + 'septicopyemic': ['pyosepticemic', 'septicopyemic'], + 'septier': ['respite', 'septier'], + 'septiferous': ['pestiferous', 'septiferous'], + 'septile': ['epistle', 'septile'], + 'septimal': ['petalism', 'septimal'], + 'septocosta': ['septocosta', 'statoscope'], + 'septoic': ['poetics', 'septoic'], + 'septonasal': ['nasoseptal', 'septonasal'], + 'septoria': ['isoptera', 'septoria'], + 'septum': ['septum', 'upstem'], + 'septuor': ['petrous', 'posture', 'proetus', 'proteus', 'septuor', 'spouter'], + 'sequential': ['latinesque', 'sequential'], + 'sequin': ['quinse', 'sequin'], + 'ser': ['ers', 'ser'], + 'sera': ['arse', 'rase', 'sare', 'sear', 'sera'], + 'serab': ['barse', 'besra', 'saber', 'serab'], + 'serabend': ['seerband', 'serabend'], + 'seraglio': ['girasole', 'seraglio'], + 'serai': ['aries', 'arise', 'raise', 'serai'], + 'serail': ['israel', 'relais', 'resail', 'sailer', 'serail', 'serial'], + 'seral': ['arles', 'arsle', 'laser', 'seral', 'slare'], + 'serang': ['angers', 'sanger', 'serang'], + 'serape': ['parsee', 'persae', 'persea', 'serape'], + 'seraph': ['phrase', 'seraph', 'shaper', 'sherpa'], + 'seraphic': ['parchesi', 'seraphic'], + 'seraphim': ['samphire', 'seraphim'], + 'seraphina': ['pharisean', 'seraphina'], + 'seraphine': ['hesperian', 'phrenesia', 'seraphine'], + 'seraphism': ['misphrase', 'seraphism'], + 'serapic': ['epacris', 'scrapie', 'serapic'], + 'serapis': ['paresis', 'serapis'], + 'serapist': ['piratess', 'serapist', 'tarsipes'], + 'serau': ['serau', 'urase'], + 'seraw': ['resaw', 'sawer', 'seraw', 'sware', 'swear', 'warse'], + 'sercial': ['scleria', 'sercial'], + 'sere': ['erse', 'rees', 'seer', 'sere'], + 'serean': ['serean', 'serena'], + 'sereh': ['herse', 'sereh', 'sheer', 'shree'], + 'serena': ['serean', 'serena'], + 'serenata': ['arsenate', 'serenata'], + 'serene': ['resene', 'serene'], + 'serenoa': ['arenose', 'serenoa'], + 'serge': ['reges', 'serge'], + 'sergeant': ['estrange', 'segreant', 'sergeant', 'sternage'], + 'sergei': ['sergei', 'sieger'], + 'serger': ['gerres', 'serger'], + 'serging': ['serging', 'snigger'], + 'sergiu': ['guiser', 'sergiu'], + 'seri': ['reis', 'rise', 'seri', 'sier', 'sire'], + 'serial': ['israel', 'relais', 'resail', 'sailer', 'serail', 'serial'], + 'serialist': ['eristalis', 'serialist'], + 'serian': ['arisen', 'arsine', 'resina', 'serian'], + 'sericate': ['ecrasite', 'sericate'], + 'sericated': ['discreate', 'sericated'], + 'sericin': ['irenics', 'resinic', 'sericin', 'sirenic'], + 'serific': ['friesic', 'serific'], + 'serin': ['reins', 'resin', 'rinse', 'risen', 'serin', 'siren'], + 'serine': ['inseer', 'nereis', 'seiner', 'serine', 'sirene'], + 'serinette': ['retistene', 'serinette'], + 'seringa': ['searing', 'seringa'], + 'seringal': ['resignal', 'seringal', 'signaler'], + 'serinus': ['russine', 'serinus', 'sunrise'], + 'serio': ['osier', 'serio'], + 'seriola': ['rosalie', 'seriola'], + 'serioludicrous': ['ludicroserious', 'serioludicrous'], + 'sermo': ['meros', 'mores', 'morse', 'sermo', 'smore'], + 'sermonist': ['monitress', 'sermonist'], + 'sero': ['eros', 'rose', 'sero', 'sore'], + 'serofibrous': ['fibroserous', 'serofibrous'], + 'serolin': ['resinol', 'serolin'], + 'seromucous': ['mucoserous', 'seromucous'], + 'seron': ['norse', 'noser', 'seron', 'snore'], + 'seroon': ['nooser', 'seroon', 'sooner'], + 'seroot': ['seroot', 'sooter', 'torose'], + 'serotina': ['arsonite', 'asterion', 'oestrian', 'rosinate', 'serotina'], + 'serotinal': ['lairstone', 'orleanist', 'serotinal'], + 'serotine': ['serotine', 'torinese'], + 'serous': ['serous', 'souser'], + 'serow': ['owser', 'resow', 'serow', 'sower', 'swore', 'worse'], + 'serpari': ['aspirer', 'praiser', 'serpari'], + 'serpent': ['penster', 'present', 'serpent', 'strepen'], + 'serpentian': ['serpentian', 'serpentina'], + 'serpentid': ['president', 'serpentid'], + 'serpentina': ['serpentian', 'serpentina'], + 'serpentinous': ['serpentinous', 'supertension'], + 'serpently': ['presently', 'serpently'], + 'serpiginous': ['serpiginous', 'spinigerous'], + 'serpolet': ['proteles', 'serpolet'], + 'serpula': ['perusal', 'serpula'], + 'serpulae': ['pleasure', 'serpulae'], + 'serpulan': ['purslane', 'serpulan', 'supernal'], + 'serpulidae': ['serpulidae', 'superideal'], + 'serpuline': ['serpuline', 'superline'], + 'serra': ['ersar', 'raser', 'serra'], + 'serrage': ['argeers', 'greaser', 'serrage'], + 'serran': ['serran', 'snarer'], + 'serrano': ['serrano', 'sornare'], + 'serratic': ['crateris', 'serratic'], + 'serratodentate': ['dentatoserrate', 'serratodentate'], + 'serrature': ['serrature', 'treasurer'], + 'serried': ['derries', 'desirer', 'resider', 'serried'], + 'serriped': ['presider', 'serriped'], + 'sert': ['rest', 'sert', 'stre'], + 'serta': ['aster', 'serta', 'stare', 'strae', 'tarse', 'teras'], + 'sertum': ['muster', 'sertum', 'stumer'], + 'serum': ['muser', 'remus', 'serum'], + 'serut': ['serut', 'strue', 'turse', 'uster'], + 'servable': ['beslaver', 'servable', 'versable'], + 'servage': ['gervase', 'greaves', 'servage'], + 'serval': ['salver', 'serval', 'slaver', 'versal'], + 'servant': ['servant', 'versant'], + 'servation': ['overstain', 'servation', 'versation'], + 'serve': ['serve', 'sever', 'verse'], + 'server': ['revers', 'server', 'verser'], + 'servet': ['revest', 'servet', 'sterve', 'verset', 'vester'], + 'servetian': ['invertase', 'servetian'], + 'servian': ['servian', 'vansire'], + 'service': ['cerevis', 'scrieve', 'service'], + 'serviceable': ['receivables', 'serviceable'], + 'servient': ['reinvest', 'servient'], + 'serviential': ['inversatile', 'serviential'], + 'servilize': ['servilize', 'silverize'], + 'servite': ['restive', 'servite'], + 'servitor': ['overstir', 'servitor'], + 'servitude': ['detrusive', 'divesture', 'servitude'], + 'servo': ['servo', 'verso'], + 'sesma': ['masse', 'sesma'], + 'sestertium': ['sestertium', 'trusteeism'], + 'sestet': ['sestet', 'testes', 'tsetse'], + 'sestiad': ['disseat', 'sestiad'], + 'sestian': ['entasis', 'sestian', 'sestina'], + 'sestina': ['entasis', 'sestian', 'sestina'], + 'sestole': ['osselet', 'sestole', 'toeless'], + 'sestuor': ['estrous', 'oestrus', 'sestuor', 'tussore'], + 'sesuto': ['sesuto', 'setous'], + 'seta': ['ates', 'east', 'eats', 'sate', 'seat', 'seta'], + 'setae': ['setae', 'tease'], + 'setal': ['least', 'setal', 'slate', 'stale', 'steal', 'stela', 'tales'], + 'setaria': ['asarite', 'asteria', 'atresia', 'setaria'], + 'setback': ['backset', 'setback'], + 'setdown': ['downset', 'setdown'], + 'seth': ['esth', 'hest', 'seth'], + 'sethead': ['headset', 'sethead'], + 'sethian': ['sethian', 'sthenia'], + 'sethic': ['ethics', 'sethic'], + 'setibo': ['setibo', 'sobeit'], + 'setirostral': ['latirostres', 'setirostral'], + 'setline': ['leisten', 'setline', 'tensile'], + 'setoff': ['offset', 'setoff'], + 'seton': ['onset', 'seton', 'steno', 'stone'], + 'setous': ['sesuto', 'setous'], + 'setout': ['outset', 'setout'], + 'setover': ['overset', 'setover'], + 'sett': ['sett', 'stet', 'test'], + 'settable': ['settable', 'testable'], + 'settaine': ['anisette', 'atestine', 'settaine'], + 'settee': ['settee', 'testee'], + 'setter': ['retest', 'setter', 'street', 'tester'], + 'setting': ['setting', 'testing'], + 'settler': ['settler', 'sterlet', 'trestle'], + 'settlor': ['settlor', 'slotter'], + 'setula': ['salute', 'setula'], + 'setup': ['setup', 'stupe', 'upset'], + 'setwall': ['setwall', 'swallet'], + 'seven': ['evens', 'seven'], + 'sevener': ['sevener', 'veneres'], + 'sever': ['serve', 'sever', 'verse'], + 'severer': ['reserve', 'resever', 'reverse', 'severer'], + 'sew': ['sew', 'wes'], + 'sewed': ['sewed', 'swede'], + 'sewer': ['resew', 'sewer', 'sweer'], + 'sewered': ['sewered', 'sweered'], + 'sewing': ['sewing', 'swinge'], + 'sewn': ['news', 'sewn', 'snew'], + 'sewround': ['sewround', 'undersow'], + 'sexed': ['desex', 'sexed'], + 'sextan': ['saxten', 'sextan'], + 'sextipartition': ['extirpationist', 'sextipartition'], + 'sextodecimo': ['decimosexto', 'sextodecimo'], + 'sextry': ['sextry', 'xyster'], + 'sexuale': ['esexual', 'sexuale'], + 'sey': ['sey', 'sye', 'yes'], + 'seymour': ['mousery', 'seymour'], + 'sfoot': ['foots', 'sfoot', 'stoof'], + 'sgad': ['dags', 'sgad'], + 'sha': ['ash', 'sah', 'sha'], + 'shab': ['bash', 'shab'], + 'shabbily': ['babishly', 'shabbily'], + 'shabbiness': ['babishness', 'shabbiness'], + 'shabunder': ['husbander', 'shabunder'], + 'shad': ['dash', 'sadh', 'shad'], + 'shade': ['deash', 'hades', 'sadhe', 'shade'], + 'shaded': ['dashed', 'shaded'], + 'shader': ['dasher', 'shader', 'sheard'], + 'shadily': ['ladyish', 'shadily'], + 'shading': ['dashing', 'shading'], + 'shadkan': ['dashnak', 'shadkan'], + 'shady': ['dashy', 'shady'], + 'shafting': ['shafting', 'tangfish'], + 'shag': ['gash', 'shag'], + 'shagrag': ['ragshag', 'shagrag'], + 'shah': ['hash', 'sahh', 'shah'], + 'shahi': ['shahi', 'shiah'], + 'shaitan': ['ashanti', 'sanhita', 'shaitan', 'thasian'], + 'shaivism': ['shaivism', 'shivaism'], + 'shaka': ['kasha', 'khasa', 'sakha', 'shaka'], + 'shakeout': ['outshake', 'shakeout'], + 'shaker': ['kasher', 'shaker'], + 'shakil': ['lakish', 'shakil'], + 'shaku': ['kusha', 'shaku', 'ushak'], + 'shaky': ['hasky', 'shaky'], + 'shale': ['halse', 'leash', 'selah', 'shale', 'sheal', 'shela'], + 'shalt': ['shalt', 'slath'], + 'sham': ['mash', 'samh', 'sham'], + 'shama': ['hamsa', 'masha', 'shama'], + 'shamable': ['baalshem', 'shamable'], + 'shamal': ['mashal', 'shamal'], + 'shaman': ['ashman', 'shaman'], + 'shamba': ['ambash', 'shamba'], + 'shambrier': ['herbarism', 'shambrier'], + 'shambu': ['ambush', 'shambu'], + 'shame': ['sahme', 'shame'], + 'shamer': ['masher', 'ramesh', 'shamer'], + 'shamir': ['marish', 'shamir'], + 'shammish': ['mishmash', 'shammish'], + 'shan': ['hans', 'nash', 'shan'], + 'shane': ['ashen', 'hanse', 'shane', 'shean'], + 'shang': ['gnash', 'shang'], + 'shant': ['shant', 'snath'], + 'shap': ['hasp', 'pash', 'psha', 'shap'], + 'shape': ['heaps', 'pesah', 'phase', 'shape'], + 'shapeless': ['phaseless', 'shapeless'], + 'shaper': ['phrase', 'seraph', 'shaper', 'sherpa'], + 'shapometer': ['atmosphere', 'shapometer'], + 'shapy': ['physa', 'shapy'], + 'shardana': ['darshana', 'shardana'], + 'share': ['asher', 'share', 'shear'], + 'shareman': ['shareman', 'shearman'], + 'sharer': ['rasher', 'sharer'], + 'sharesman': ['sharesman', 'shearsman'], + 'shargar': ['shargar', 'sharrag'], + 'shari': ['ashir', 'shari'], + 'sharon': ['rhason', 'sharon', 'shoran'], + 'sharp': ['sharp', 'shrap'], + 'sharpener': ['resharpen', 'sharpener'], + 'sharper': ['phraser', 'sharper'], + 'sharpy': ['phrasy', 'sharpy'], + 'sharrag': ['shargar', 'sharrag'], + 'shasta': ['shasta', 'tassah'], + 'shaster': ['hatress', 'shaster'], + 'shastraik': ['katharsis', 'shastraik'], + 'shastri': ['sartish', 'shastri'], + 'shat': ['shat', 'tash'], + 'shatter': ['rathest', 'shatter'], + 'shatterer': ['ratherest', 'shatterer'], + 'shattering': ['shattering', 'straighten'], + 'shauri': ['shauri', 'surahi'], + 'shave': ['shave', 'sheva'], + 'shavee': ['shavee', 'sheave'], + 'shaver': ['havers', 'shaver', 'shrave'], + 'shavery': ['shavery', 'shravey'], + 'shaw': ['shaw', 'wash'], + 'shawano': ['shawano', 'washoan'], + 'shawl': ['shawl', 'walsh'], + 'shawy': ['shawy', 'washy'], + 'shay': ['ashy', 'shay'], + 'shea': ['seah', 'shea'], + 'sheal': ['halse', 'leash', 'selah', 'shale', 'sheal', 'shela'], + 'shean': ['ashen', 'hanse', 'shane', 'shean'], + 'shear': ['asher', 'share', 'shear'], + 'shearbill': ['shearbill', 'shillaber'], + 'sheard': ['dasher', 'shader', 'sheard'], + 'shearer': ['reshare', 'reshear', 'shearer'], + 'shearman': ['shareman', 'shearman'], + 'shearsman': ['sharesman', 'shearsman'], + 'sheat': ['ashet', 'haste', 'sheat'], + 'sheave': ['shavee', 'sheave'], + 'shebeen': ['benshee', 'shebeen'], + 'shechem': ['meshech', 'shechem'], + 'sheder': ['hersed', 'sheder'], + 'sheely': ['sheely', 'sheyle'], + 'sheer': ['herse', 'sereh', 'sheer', 'shree'], + 'sheering': ['greenish', 'sheering'], + 'sheet': ['sheet', 'these'], + 'sheeter': ['sheeter', 'therese'], + 'sheeting': ['seething', 'sheeting'], + 'sheila': ['elisha', 'hailse', 'sheila'], + 'shela': ['halse', 'leash', 'selah', 'shale', 'sheal', 'shela'], + 'shelf': ['flesh', 'shelf'], + 'shelfful': ['fleshful', 'shelfful'], + 'shelflist': ['filthless', 'shelflist'], + 'shelfy': ['fleshy', 'shelfy'], + 'shelta': ['haslet', 'lesath', 'shelta'], + 'shelty': ['shelty', 'thysel'], + 'shelve': ['shelve', 'shevel'], + 'shemitic': ['ethicism', 'shemitic'], + 'shen': ['nesh', 'shen'], + 'sheol': ['hosel', 'sheol', 'shole'], + 'sher': ['hers', 'resh', 'sher'], + 'sherani': ['arshine', 'nearish', 'rhesian', 'sherani'], + 'sheratan': ['hanaster', 'sheratan'], + 'sheriat': ['atheris', 'sheriat'], + 'sherif': ['fisher', 'sherif'], + 'sherifate': ['fisheater', 'sherifate'], + 'sherify': ['fishery', 'sherify'], + 'sheriyat': ['hysteria', 'sheriyat'], + 'sherpa': ['phrase', 'seraph', 'shaper', 'sherpa'], + 'sherri': ['hersir', 'sherri'], + 'sheugh': ['hughes', 'sheugh'], + 'sheva': ['shave', 'sheva'], + 'shevel': ['shelve', 'shevel'], + 'shevri': ['shevri', 'shiver', 'shrive'], + 'shewa': ['hawse', 'shewa', 'whase'], + 'sheyle': ['sheely', 'sheyle'], + 'shi': ['his', 'hsi', 'shi'], + 'shiah': ['shahi', 'shiah'], + 'shibar': ['barish', 'shibar'], + 'shice': ['echis', 'shice'], + 'shicer': ['riches', 'shicer'], + 'shide': ['shide', 'shied', 'sidhe'], + 'shied': ['shide', 'shied', 'sidhe'], + 'shiel': ['liesh', 'shiel'], + 'shieldable': ['deshabille', 'shieldable'], + 'shier': ['hirse', 'shier', 'shire'], + 'shiest': ['shiest', 'thesis'], + 'shifter': ['reshift', 'shifter'], + 'shih': ['hish', 'shih'], + 'shiite': ['histie', 'shiite'], + 'shik': ['kish', 'shik', 'sikh'], + 'shikar': ['rakish', 'riksha', 'shikar', 'shikra', 'sikhra'], + 'shikara': ['shikara', 'sikhara'], + 'shikari': ['rikisha', 'shikari'], + 'shikra': ['rakish', 'riksha', 'shikar', 'shikra', 'sikhra'], + 'shillaber': ['shearbill', 'shillaber'], + 'shimal': ['lamish', 'shimal'], + 'shimmery': ['misrhyme', 'shimmery'], + 'shin': ['hisn', 'shin', 'sinh'], + 'shina': ['naish', 'shina'], + 'shine': ['eshin', 'shine'], + 'shiner': ['renish', 'shiner', 'shrine'], + 'shingle': ['english', 'shingle'], + 'shinto': ['histon', 'shinto', 'tonish'], + 'shinty': ['shinty', 'snithy'], + 'ship': ['pish', 'ship'], + 'shipboy': ['boyship', 'shipboy'], + 'shipkeeper': ['keepership', 'shipkeeper'], + 'shiplap': ['lappish', 'shiplap'], + 'shipman': ['manship', 'shipman'], + 'shipmaster': ['mastership', 'shipmaster'], + 'shipmate': ['aphetism', 'mateship', 'shipmate', 'spithame'], + 'shipowner': ['ownership', 'shipowner'], + 'shippage': ['pageship', 'shippage'], + 'shipper': ['preship', 'shipper'], + 'shippo': ['popish', 'shippo'], + 'shipward': ['shipward', 'wardship'], + 'shipwork': ['shipwork', 'workship'], + 'shipworm': ['shipworm', 'wormship'], + 'shire': ['hirse', 'shier', 'shire'], + 'shirker': ['shirker', 'skirreh'], + 'shirley': ['relishy', 'shirley'], + 'shirty': ['shirty', 'thyris'], + 'shirvan': ['shirvan', 'varnish'], + 'shita': ['shita', 'thais'], + 'shivaism': ['shaivism', 'shivaism'], + 'shive': ['hives', 'shive'], + 'shiver': ['shevri', 'shiver', 'shrive'], + 'shlu': ['lush', 'shlu', 'shul'], + 'sho': ['sho', 'soh'], + 'shoa': ['saho', 'shoa'], + 'shoal': ['shoal', 'shola'], + 'shoat': ['hoast', 'hosta', 'shoat'], + 'shockable': ['shockable', 'shoeblack'], + 'shode': ['hosed', 'shode'], + 'shoder': ['dehors', 'rhodes', 'shoder', 'shored'], + 'shoe': ['hose', 'shoe'], + 'shoeblack': ['shockable', 'shoeblack'], + 'shoebrush': ['shoebrush', 'shorebush'], + 'shoeless': ['hoseless', 'shoeless'], + 'shoeman': ['hoseman', 'shoeman'], + 'shoer': ['horse', 'shoer', 'shore'], + 'shog': ['gosh', 'shog'], + 'shoji': ['joshi', 'shoji'], + 'shola': ['shoal', 'shola'], + 'shole': ['hosel', 'sheol', 'shole'], + 'shoo': ['shoo', 'soho'], + 'shoot': ['shoot', 'sooth', 'sotho', 'toosh'], + 'shooter': ['orthose', 'reshoot', 'shooter', 'soother'], + 'shooting': ['shooting', 'soothing'], + 'shop': ['phos', 'posh', 'shop', 'soph'], + 'shopbook': ['bookshop', 'shopbook'], + 'shopmaid': ['phasmoid', 'shopmaid'], + 'shopper': ['hoppers', 'shopper'], + 'shopwork': ['shopwork', 'workshop'], + 'shoran': ['rhason', 'sharon', 'shoran'], + 'shore': ['horse', 'shoer', 'shore'], + 'shorea': ['ahorse', 'ashore', 'hoarse', 'shorea'], + 'shorebush': ['shoebrush', 'shorebush'], + 'shored': ['dehors', 'rhodes', 'shoder', 'shored'], + 'shoreless': ['horseless', 'shoreless'], + 'shoreman': ['horseman', 'rhamnose', 'shoreman'], + 'shorer': ['horser', 'shorer'], + 'shoreward': ['drawhorse', 'shoreward'], + 'shoreweed': ['horseweed', 'shoreweed'], + 'shoring': ['horsing', 'shoring'], + 'short': ['horst', 'short'], + 'shortage': ['hostager', 'shortage'], + 'shorten': ['shorten', 'threnos'], + 'shot': ['host', 'shot', 'thos', 'tosh'], + 'shote': ['ethos', 'shote', 'those'], + 'shotgun': ['gunshot', 'shotgun', 'uhtsong'], + 'shotless': ['hostless', 'shotless'], + 'shotstar': ['shotstar', 'starshot'], + 'shou': ['huso', 'shou'], + 'shoulderer': ['reshoulder', 'shoulderer'], + 'shout': ['shout', 'south'], + 'shouter': ['shouter', 'souther'], + 'shouting': ['shouting', 'southing'], + 'shover': ['shover', 'shrove'], + 'showerer': ['reshower', 'showerer'], + 'shrab': ['brash', 'shrab'], + 'shram': ['marsh', 'shram'], + 'shrap': ['sharp', 'shrap'], + 'shrave': ['havers', 'shaver', 'shrave'], + 'shravey': ['shavery', 'shravey'], + 'shree': ['herse', 'sereh', 'sheer', 'shree'], + 'shrewly': ['shrewly', 'welshry'], + 'shriek': ['shriek', 'shrike'], + 'shrieval': ['lavisher', 'shrieval'], + 'shrike': ['shriek', 'shrike'], + 'shrine': ['renish', 'shiner', 'shrine'], + 'shrite': ['shrite', 'theirs'], + 'shrive': ['shevri', 'shiver', 'shrive'], + 'shriven': ['nervish', 'shriven'], + 'shroudy': ['hydrous', 'shroudy'], + 'shrove': ['shover', 'shrove'], + 'shrub': ['brush', 'shrub'], + 'shrubbery': ['berrybush', 'shrubbery'], + 'shrubland': ['brushland', 'shrubland'], + 'shrubless': ['brushless', 'shrubless'], + 'shrublet': ['brushlet', 'shrublet'], + 'shrublike': ['brushlike', 'shrublike'], + 'shrubwood': ['brushwood', 'shrubwood'], + 'shrug': ['grush', 'shrug'], + 'shu': ['shu', 'ush'], + 'shuba': ['shuba', 'subah'], + 'shug': ['gush', 'shug', 'sugh'], + 'shul': ['lush', 'shlu', 'shul'], + 'shulamite': ['hamulites', 'shulamite'], + 'shuler': ['lusher', 'shuler'], + 'shunless': ['lushness', 'shunless'], + 'shunter': ['reshunt', 'shunter'], + 'shure': ['shure', 'usher'], + 'shurf': ['frush', 'shurf'], + 'shut': ['shut', 'thus', 'tush'], + 'shutness': ['shutness', 'thusness'], + 'shutout': ['outshut', 'shutout'], + 'shuttering': ['hurtingest', 'shuttering'], + 'shyam': ['mashy', 'shyam'], + 'si': ['is', 'si'], + 'sia': ['sai', 'sia'], + 'siak': ['saki', 'siak', 'sika'], + 'sial': ['lasi', 'lias', 'lisa', 'sail', 'sial'], + 'sialagogic': ['isagogical', 'sialagogic'], + 'sialic': ['sialic', 'silica'], + 'sialid': ['asilid', 'sialid'], + 'sialidae': ['asilidae', 'sialidae'], + 'siam': ['mias', 'saim', 'siam', 'sima'], + 'siamese': ['misease', 'siamese'], + 'sib': ['bis', 'sib'], + 'sibyl': ['sibyl', 'sybil'], + 'sibylla': ['sibylla', 'syllabi'], + 'sicana': ['ascian', 'sacian', 'scania', 'sicana'], + 'sicani': ['anisic', 'sicani', 'sinaic'], + 'sicarius': ['acrisius', 'sicarius'], + 'siccate': ['ascetic', 'castice', 'siccate'], + 'siccation': ['cocainist', 'siccation'], + 'sice': ['cise', 'sice'], + 'sicel': ['sicel', 'slice'], + 'sicilian': ['anisilic', 'sicilian'], + 'sickbed': ['bedsick', 'sickbed'], + 'sicker': ['scrike', 'sicker'], + 'sickerly': ['sickerly', 'slickery'], + 'sickle': ['sickle', 'skelic'], + 'sickler': ['sickler', 'slicker'], + 'sicklied': ['disclike', 'sicklied'], + 'sickling': ['sickling', 'slicking'], + 'sicula': ['caulis', 'clusia', 'sicula'], + 'siculian': ['luscinia', 'siculian'], + 'sid': ['dis', 'sid'], + 'sida': ['dais', 'dasi', 'disa', 'said', 'sida'], + 'sidalcea': ['diaclase', 'sidalcea'], + 'side': ['desi', 'ides', 'seid', 'side'], + 'sidearm': ['misread', 'sidearm'], + 'sideboard': ['broadside', 'sideboard'], + 'sideburns': ['burnsides', 'sideburns'], + 'sidecar': ['diceras', 'radices', 'sidecar'], + 'sidehill': ['hillside', 'sidehill'], + 'siderean': ['arsedine', 'arsenide', 'sedanier', 'siderean'], + 'siderin': ['insider', 'siderin'], + 'sideronatrite': ['endoarteritis', 'sideronatrite'], + 'siderous': ['desirous', 'siderous'], + 'sidership': ['sidership', 'spiderish'], + 'sidesway': ['sidesway', 'sideways'], + 'sidetrack': ['sidetrack', 'trackside'], + 'sidewalk': ['sidewalk', 'walkside'], + 'sideway': ['sideway', 'wayside'], + 'sideways': ['sidesway', 'sideways'], + 'sidhe': ['shide', 'shied', 'sidhe'], + 'sidle': ['sidle', 'slide'], + 'sidler': ['sidler', 'slider'], + 'sidling': ['sidling', 'sliding'], + 'sidlingly': ['sidlingly', 'slidingly'], + 'sieger': ['sergei', 'sieger'], + 'siena': ['anise', 'insea', 'siena', 'sinae'], + 'sienna': ['insane', 'sienna'], + 'sier': ['reis', 'rise', 'seri', 'sier', 'sire'], + 'sierra': ['raiser', 'sierra'], + 'siesta': ['siesta', 'tassie'], + 'siever': ['revise', 'siever'], + 'sife': ['feis', 'fise', 'sife'], + 'sift': ['fist', 'sift'], + 'sifted': ['fisted', 'sifted'], + 'sifter': ['fister', 'resift', 'sifter', 'strife'], + 'sifting': ['fisting', 'sifting'], + 'sigh': ['gish', 'sigh'], + 'sigher': ['resigh', 'sigher'], + 'sighted': ['desight', 'sighted'], + 'sightlily': ['sightlily', 'slightily'], + 'sightliness': ['sightliness', 'slightiness'], + 'sightly': ['sightly', 'slighty'], + 'sigillated': ['distillage', 'sigillated'], + 'sigla': ['gisla', 'ligas', 'sigla'], + 'sigmatism': ['astigmism', 'sigmatism'], + 'sigmoidal': ['dialogism', 'sigmoidal'], + 'sign': ['sign', 'sing', 'snig'], + 'signable': ['signable', 'singable'], + 'signalee': ['ensilage', 'genesial', 'signalee'], + 'signaler': ['resignal', 'seringal', 'signaler'], + 'signalese': ['agileness', 'signalese'], + 'signally': ['signally', 'singally', 'slangily'], + 'signary': ['signary', 'syringa'], + 'signate': ['easting', + 'gainset', + 'genista', + 'ingesta', + 'seating', + 'signate', + 'teasing'], + 'signator': ['orangist', 'organist', 'roasting', 'signator'], + 'signee': ['seeing', 'signee'], + 'signer': ['resign', 'resing', 'signer', 'singer'], + 'signet': ['ingest', 'signet', 'stinge'], + 'signorial': ['sailoring', 'signorial'], + 'signpost': ['postsign', 'signpost'], + 'signum': ['musing', 'signum'], + 'sika': ['saki', 'siak', 'sika'], + 'sikar': ['kisra', 'sikar', 'skair'], + 'siket': ['siket', 'skite'], + 'sikh': ['kish', 'shik', 'sikh'], + 'sikhara': ['shikara', 'sikhara'], + 'sikhra': ['rakish', 'riksha', 'shikar', 'shikra', 'sikhra'], + 'sil': ['lis', 'sil'], + 'silane': ['alsine', 'neslia', 'saline', 'selina', 'silane'], + 'silas': ['silas', 'sisal'], + 'silcrete': ['sclerite', 'silcrete'], + 'sile': ['isle', 'lise', 'sile'], + 'silen': ['elsin', 'lenis', 'niels', 'silen', 'sline'], + 'silence': ['license', 'selenic', 'silence'], + 'silenced': ['licensed', 'silenced'], + 'silencer': ['licenser', 'silencer'], + 'silene': ['enisle', 'ensile', 'senile', 'silene'], + 'silent': ['enlist', 'listen', 'silent', 'tinsel'], + 'silently': ['silently', 'tinselly'], + 'silenus': ['insulse', 'silenus'], + 'silesian': ['sensilia', 'silesian'], + 'silica': ['sialic', 'silica'], + 'silicam': ['islamic', 'laicism', 'silicam'], + 'silicane': ['silicane', 'silicean'], + 'silicean': ['silicane', 'silicean'], + 'siliceocalcareous': ['calcareosiliceous', 'siliceocalcareous'], + 'silicoaluminate': ['aluminosilicate', 'silicoaluminate'], + 'silicone': ['isocline', 'silicone'], + 'silicotitanate': ['silicotitanate', 'titanosilicate'], + 'silicotungstate': ['silicotungstate', 'tungstosilicate'], + 'silicotungstic': ['silicotungstic', 'tungstosilicic'], + 'silk': ['lisk', 'silk', 'skil'], + 'silkaline': ['silkaline', 'snaillike'], + 'silkman': ['klanism', 'silkman'], + 'silkness': ['silkness', 'sinkless', 'skinless'], + 'silly': ['silly', 'silyl'], + 'sillyhow': ['lowishly', 'owlishly', 'sillyhow'], + 'silo': ['lois', 'silo', 'siol', 'soil', 'soli'], + 'silpha': ['palish', 'silpha'], + 'silt': ['list', 'silt', 'slit'], + 'silting': ['listing', 'silting'], + 'siltlike': ['siltlike', 'slitlike'], + 'silva': ['silva', 'slavi'], + 'silver': ['silver', 'sliver'], + 'silvered': ['desilver', 'silvered'], + 'silverer': ['resilver', 'silverer', 'sliverer'], + 'silverize': ['servilize', 'silverize'], + 'silverlike': ['silverlike', 'sliverlike'], + 'silverwood': ['silverwood', 'woodsilver'], + 'silvery': ['silvery', 'slivery'], + 'silvester': ['rivetless', 'silvester'], + 'silyl': ['silly', 'silyl'], + 'sim': ['ism', 'sim'], + 'sima': ['mias', 'saim', 'siam', 'sima'], + 'simal': ['islam', 'ismal', 'simal'], + 'simar': ['maris', 'marsi', 'samir', 'simar'], + 'sime': ['mise', 'semi', 'sime'], + 'simeon': ['eonism', 'mesion', 'oneism', 'simeon'], + 'simeonism': ['misoneism', 'simeonism'], + 'simiad': ['idiasm', 'simiad'], + 'simile': ['milsie', 'simile'], + 'simity': ['myitis', 'simity'], + 'simling': ['simling', 'smiling'], + 'simmer': ['merism', 'mermis', 'simmer'], + 'simmon': ['monism', 'nomism', 'simmon'], + 'simon': ['minos', 'osmin', 'simon'], + 'simonian': ['insomnia', 'simonian'], + 'simonist': ['simonist', 'sintoism'], + 'simony': ['isonym', 'myosin', 'simony'], + 'simple': ['mespil', 'simple'], + 'simpleton': ['simpleton', 'spoilment'], + 'simplicist': ['simplicist', 'simplistic'], + 'simplistic': ['simplicist', 'simplistic'], + 'simply': ['limpsy', 'simply'], + 'simson': ['nosism', 'simson'], + 'simulance': ['masculine', 'semuncial', 'simulance'], + 'simulcast': ['masculist', 'simulcast'], + 'simuler': ['misrule', 'simuler'], + 'sina': ['anis', 'nais', 'nasi', 'nias', 'sain', 'sina'], + 'sinae': ['anise', 'insea', 'siena', 'sinae'], + 'sinaean': ['nisaean', 'sinaean'], + 'sinaic': ['anisic', 'sicani', 'sinaic'], + 'sinaitic': ['isatinic', 'sinaitic'], + 'sinal': ['sinal', 'slain', 'snail'], + 'sinapic': ['panisic', 'piscian', 'piscina', 'sinapic'], + 'since': ['senci', 'since'], + 'sincere': ['ceresin', 'sincere'], + 'sinecure': ['insecure', 'sinecure'], + 'sinew': ['sinew', 'swine', 'wisen'], + 'sinewed': ['endwise', 'sinewed'], + 'sinewy': ['sinewy', 'swiney'], + 'sinfonia': ['sainfoin', 'sinfonia'], + 'sinfonietta': ['festination', 'infestation', 'sinfonietta'], + 'sing': ['sign', 'sing', 'snig'], + 'singable': ['signable', 'singable'], + 'singally': ['signally', 'singally', 'slangily'], + 'singarip': ['aspiring', 'praising', 'singarip'], + 'singed': ['design', 'singed'], + 'singer': ['resign', 'resing', 'signer', 'singer'], + 'single': ['single', 'slinge'], + 'singler': ['singler', 'slinger'], + 'singles': ['essling', 'singles'], + 'singlet': ['glisten', 'singlet'], + 'sinh': ['hisn', 'shin', 'sinh'], + 'sinico': ['inosic', 'sinico'], + 'sinister': ['insister', 'reinsist', 'sinister', 'sisterin'], + 'sinistrodextral': ['dextrosinistral', 'sinistrodextral'], + 'sink': ['inks', 'sink', 'skin'], + 'sinker': ['resink', 'reskin', 'sinker'], + 'sinkhead': ['headskin', 'nakedish', 'sinkhead'], + 'sinkless': ['silkness', 'sinkless', 'skinless'], + 'sinklike': ['sinklike', 'skinlike'], + 'sinnet': ['innest', 'sennit', 'sinnet', 'tennis'], + 'sinoatrial': ['sinoatrial', 'solitarian'], + 'sinogram': ['orangism', 'organism', 'sinogram'], + 'sinolog': ['loosing', 'sinolog'], + 'sinopia': ['pisonia', 'sinopia'], + 'sinople': ['epsilon', 'sinople'], + 'sinter': ['estrin', 'insert', 'sinter', 'sterin', 'triens'], + 'sinto': ['sinto', 'stion'], + 'sintoc': ['nostic', 'sintoc', 'tocsin'], + 'sintoism': ['simonist', 'sintoism'], + 'sintu': ['sintu', 'suint'], + 'sinuatodentate': ['dentatosinuate', 'sinuatodentate'], + 'sinuose': ['sinuose', 'suiones'], + 'sinus': ['nisus', 'sinus'], + 'sinward': ['inwards', 'sinward'], + 'siol': ['lois', 'silo', 'siol', 'soil', 'soli'], + 'sionite': ['inosite', 'sionite'], + 'sip': ['psi', 'sip'], + 'sipe': ['pise', 'sipe'], + 'siper': ['siper', 'spier', 'spire'], + 'siphonal': ['nailshop', 'siphonal'], + 'siphuncle': ['siphuncle', 'uncleship'], + 'sipling': ['sipling', 'spiling'], + 'sipylite': ['pyelitis', 'sipylite'], + 'sir': ['sir', 'sri'], + 'sire': ['reis', 'rise', 'seri', 'sier', 'sire'], + 'siredon': ['indorse', 'ordines', 'siredon', 'sordine'], + 'siren': ['reins', 'resin', 'rinse', 'risen', 'serin', 'siren'], + 'sirene': ['inseer', 'nereis', 'seiner', 'serine', 'sirene'], + 'sirenic': ['irenics', 'resinic', 'sericin', 'sirenic'], + 'sirenize': ['resinize', 'sirenize'], + 'sirenlike': ['resinlike', 'sirenlike'], + 'sirenoid': ['derision', 'ironside', 'resinoid', 'sirenoid'], + 'sireny': ['resiny', 'sireny'], + 'sirian': ['raisin', 'sirian'], + 'sirih': ['irish', 'rishi', 'sirih'], + 'sirky': ['risky', 'sirky'], + 'sirmian': ['iranism', 'sirmian'], + 'sirpea': ['aspire', 'paries', 'praise', 'sirpea', 'spirea'], + 'sirple': ['lisper', 'pliers', 'sirple', 'spiler'], + 'sirrah': ['arrish', 'harris', 'rarish', 'sirrah'], + 'sirree': ['rerise', 'sirree'], + 'siruelas': ['russelia', 'siruelas'], + 'sirup': ['prius', 'sirup'], + 'siruper': ['siruper', 'upriser'], + 'siryan': ['siryan', 'syrian'], + 'sis': ['sis', 'ssi'], + 'sisal': ['silas', 'sisal'], + 'sish': ['hiss', 'sish'], + 'sisham': ['samish', 'sisham'], + 'sisi': ['isis', 'sisi'], + 'sisseton': ['sisseton', 'stenosis'], + 'sistani': ['nasitis', 'sistani'], + 'sister': ['resist', 'restis', 'sister'], + 'sisterin': ['insister', 'reinsist', 'sinister', 'sisterin'], + 'sistering': ['resisting', 'sistering'], + 'sisterless': ['resistless', 'sisterless'], + 'sistrum': ['sistrum', 'trismus'], + 'sit': ['ist', 'its', 'sit'], + 'sita': ['atis', 'sita', 'tsia'], + 'sitao': ['sitao', 'staio'], + 'sitar': ['arist', + 'astir', + 'sitar', + 'stair', + 'stria', + 'tarsi', + 'tisar', + 'trias'], + 'sitch': ['sitch', 'stchi', 'stich'], + 'site': ['seit', 'site'], + 'sith': ['hist', 'sith', 'this', 'tshi'], + 'sithens': ['sithens', 'thissen'], + 'sitient': ['sitient', 'sittine'], + 'sitology': ['sitology', 'tsiology'], + 'sittinae': ['satinite', 'sittinae'], + 'sittine': ['sitient', 'sittine'], + 'situal': ['situal', 'situla', 'tulasi'], + 'situate': ['situate', 'usitate'], + 'situla': ['situal', 'situla', 'tulasi'], + 'situs': ['situs', 'suist'], + 'siva': ['avis', 'siva', 'visa'], + 'sivaism': ['saivism', 'sivaism'], + 'sivan': ['savin', 'sivan'], + 'siwan': ['siwan', 'swain'], + 'siwash': ['sawish', 'siwash'], + 'sixte': ['exist', 'sixte'], + 'sixty': ['sixty', 'xysti'], + 'sizeable': ['seizable', 'sizeable'], + 'sizeman': ['sizeman', 'zamenis'], + 'skair': ['kisra', 'sikar', 'skair'], + 'skal': ['lask', 'skal'], + 'skance': ['sacken', 'skance'], + 'skanda': ['sandak', 'skanda'], + 'skart': ['karst', 'skart', 'stark'], + 'skat': ['skat', 'task'], + 'skate': ['skate', 'stake', 'steak'], + 'skater': ['skater', 'staker', 'strake', 'streak', 'tasker'], + 'skating': ['gitksan', 'skating', 'takings'], + 'skean': ['skean', 'snake', 'sneak'], + 'skee': ['kees', 'seek', 'skee'], + 'skeel': ['skeel', 'sleek'], + 'skeeling': ['skeeling', 'sleeking'], + 'skeely': ['skeely', 'sleeky'], + 'skeen': ['skeen', 'skene'], + 'skeer': ['esker', 'keres', 'reesk', 'seker', 'skeer', 'skere'], + 'skeery': ['kersey', 'skeery'], + 'skeet': ['keest', 'skeet', 'skete', 'steek'], + 'skeeter': ['skeeter', 'teskere'], + 'skeletin': ['nestlike', 'skeletin'], + 'skelic': ['sickle', 'skelic'], + 'skelp': ['skelp', 'spelk'], + 'skelter': ['kestrel', 'skelter'], + 'skene': ['skeen', 'skene'], + 'skeo': ['skeo', 'soke'], + 'skeptic': ['skeptic', 'spicket'], + 'skere': ['esker', 'keres', 'reesk', 'seker', 'skeer', 'skere'], + 'sketcher': ['resketch', 'sketcher'], + 'skete': ['keest', 'skeet', 'skete', 'steek'], + 'skey': ['skey', 'skye'], + 'skid': ['disk', 'kids', 'skid'], + 'skier': ['kreis', 'skier'], + 'skil': ['lisk', 'silk', 'skil'], + 'skin': ['inks', 'sink', 'skin'], + 'skinch': ['chinks', 'skinch'], + 'skinless': ['silkness', 'sinkless', 'skinless'], + 'skinlike': ['sinklike', 'skinlike'], + 'skip': ['pisk', 'skip'], + 'skippel': ['skippel', 'skipple'], + 'skipple': ['skippel', 'skipple'], + 'skirmish': ['skirmish', 'smirkish'], + 'skirreh': ['shirker', 'skirreh'], + 'skirret': ['skirret', 'skirter', 'striker'], + 'skirt': ['skirt', 'stirk'], + 'skirter': ['skirret', 'skirter', 'striker'], + 'skirting': ['skirting', 'striking'], + 'skirtingly': ['skirtingly', 'strikingly'], + 'skirty': ['kirsty', 'skirty'], + 'skit': ['kist', 'skit'], + 'skite': ['siket', 'skite'], + 'skiter': ['skiter', 'strike'], + 'skittle': ['kittles', 'skittle'], + 'sklate': ['lasket', 'sklate'], + 'sklater': ['sklater', 'stalker'], + 'sklinter': ['sklinter', 'strinkle'], + 'skoal': ['skoal', 'sloka'], + 'skoo': ['koso', 'skoo', 'sook'], + 'skua': ['kusa', 'skua'], + 'skun': ['skun', 'sunk'], + 'skye': ['skey', 'skye'], + 'sla': ['las', 'sal', 'sla'], + 'slab': ['blas', 'slab'], + 'slacken': ['slacken', 'snackle'], + 'slade': ['leads', 'slade'], + 'slae': ['elsa', 'sale', 'seal', 'slae'], + 'slain': ['sinal', 'slain', 'snail'], + 'slainte': ['elastin', 'salient', 'saltine', 'slainte'], + 'slait': ['alist', 'litas', 'slait', 'talis'], + 'slake': ['alkes', 'sakel', 'slake'], + 'slam': ['alms', 'salm', 'slam'], + 'slamp': ['plasm', 'psalm', 'slamp'], + 'slandering': ['sanderling', 'slandering'], + 'slane': ['ansel', 'slane'], + 'slang': ['glans', 'slang'], + 'slangily': ['signally', 'singally', 'slangily'], + 'slangish': ['slangish', 'slashing'], + 'slangishly': ['slangishly', 'slashingly'], + 'slangster': ['slangster', 'strangles'], + 'slap': ['salp', 'slap'], + 'slape': ['elaps', + 'lapse', + 'lepas', + 'pales', + 'salep', + 'saple', + 'sepal', + 'slape', + 'spale', + 'speal'], + 'slare': ['arles', 'arsle', 'laser', 'seral', 'slare'], + 'slasher': ['reslash', 'slasher'], + 'slashing': ['slangish', 'slashing'], + 'slashingly': ['slangishly', 'slashingly'], + 'slat': ['last', 'salt', 'slat'], + 'slate': ['least', 'setal', 'slate', 'stale', 'steal', 'stela', 'tales'], + 'slater': ['laster', + 'lastre', + 'rastle', + 'relast', + 'resalt', + 'salter', + 'slater', + 'stelar'], + 'slath': ['shalt', 'slath'], + 'slather': ['hastler', 'slather'], + 'slatiness': ['saintless', 'saltiness', 'slatiness', 'stainless'], + 'slating': ['anglist', 'lasting', 'salting', 'slating', 'staling'], + 'slatish': ['saltish', 'slatish'], + 'slatter': ['rattles', 'slatter', 'starlet', 'startle'], + 'slaty': ['lasty', 'salty', 'slaty'], + 'slaughter': ['lethargus', 'slaughter'], + 'slaughterman': ['manslaughter', 'slaughterman'], + 'slaum': ['lamus', 'malus', 'musal', 'slaum'], + 'slave': ['salve', 'selva', 'slave', 'valse'], + 'slaver': ['salver', 'serval', 'slaver', 'versal'], + 'slaverer': ['reserval', 'reversal', 'slaverer'], + 'slavey': ['slavey', 'sylvae'], + 'slavi': ['silva', 'slavi'], + 'slavian': ['salivan', 'slavian'], + 'slavic': ['clavis', 'slavic'], + 'slavonic': ['slavonic', 'volscian'], + 'slay': ['lyas', 'slay'], + 'slayer': ['reslay', 'slayer'], + 'sleave': ['leaves', 'sleave'], + 'sledger': ['redlegs', 'sledger'], + 'slee': ['else', 'lees', 'seel', 'sele', 'slee'], + 'sleech': ['lesche', 'sleech'], + 'sleek': ['skeel', 'sleek'], + 'sleeking': ['skeeling', 'sleeking'], + 'sleeky': ['skeely', 'sleeky'], + 'sleep': ['sleep', 'speel'], + 'sleepless': ['sleepless', 'speelless'], + 'sleepry': ['presley', 'sleepry'], + 'sleet': ['sleet', 'slete', 'steel', 'stele'], + 'sleetiness': ['sleetiness', 'steeliness'], + 'sleeting': ['sleeting', 'steeling'], + 'sleetproof': ['sleetproof', 'steelproof'], + 'sleety': ['sleety', 'steely'], + 'slept': ['slept', 'spelt', 'splet'], + 'slete': ['sleet', 'slete', 'steel', 'stele'], + 'sleuth': ['hustle', 'sleuth'], + 'slew': ['slew', 'wels'], + 'slewing': ['slewing', 'swingle'], + 'sley': ['lyse', 'sley'], + 'slice': ['sicel', 'slice'], + 'slicht': ['slicht', 'slitch'], + 'slicken': ['slicken', 'snickle'], + 'slicker': ['sickler', 'slicker'], + 'slickery': ['sickerly', 'slickery'], + 'slicking': ['sickling', 'slicking'], + 'slidable': ['sabellid', 'slidable'], + 'slidden': ['slidden', 'sniddle'], + 'slide': ['sidle', 'slide'], + 'slider': ['sidler', 'slider'], + 'sliding': ['sidling', 'sliding'], + 'slidingly': ['sidlingly', 'slidingly'], + 'slifter': ['slifter', 'stifler'], + 'slightily': ['sightlily', 'slightily'], + 'slightiness': ['sightliness', 'slightiness'], + 'slighty': ['sightly', 'slighty'], + 'slime': ['limes', 'miles', 'slime', 'smile'], + 'slimeman': ['melanism', 'slimeman'], + 'slimer': ['slimer', 'smiler'], + 'slimy': ['limsy', 'slimy', 'smily'], + 'sline': ['elsin', 'lenis', 'niels', 'silen', 'sline'], + 'slinge': ['single', 'slinge'], + 'slinger': ['singler', 'slinger'], + 'slink': ['links', 'slink'], + 'slip': ['lisp', 'slip'], + 'slipcoat': ['postical', 'slipcoat'], + 'slipe': ['piles', 'plies', 'slipe', 'spiel', 'spile'], + 'slipover': ['overslip', 'slipover'], + 'slipway': ['slipway', 'waspily'], + 'slit': ['list', 'silt', 'slit'], + 'slitch': ['slicht', 'slitch'], + 'slite': ['islet', 'istle', 'slite', 'stile'], + 'slithy': ['hylist', 'slithy'], + 'slitless': ['listless', 'slitless'], + 'slitlike': ['siltlike', 'slitlike'], + 'slitted': ['slitted', 'stilted'], + 'slitter': ['litster', 'slitter', 'stilter', 'testril'], + 'slitty': ['slitty', 'stilty'], + 'slive': ['elvis', 'levis', 'slive'], + 'sliver': ['silver', 'sliver'], + 'sliverer': ['resilver', 'silverer', 'sliverer'], + 'sliverlike': ['silverlike', 'sliverlike'], + 'slivery': ['silvery', 'slivery'], + 'sloan': ['salon', 'sloan', 'solan'], + 'slod': ['slod', 'sold'], + 'sloe': ['lose', 'sloe', 'sole'], + 'sloka': ['skoal', 'sloka'], + 'slone': ['slone', 'solen'], + 'sloo': ['sloo', 'solo', 'sool'], + 'sloom': ['mools', 'sloom'], + 'sloop': ['polos', 'sloop', 'spool'], + 'slope': ['elops', 'slope', 'spole'], + 'sloper': ['sloper', 'splore'], + 'slot': ['lost', 'lots', 'slot'], + 'slote': ['slote', 'stole'], + 'sloted': ['sloted', 'stoled'], + 'slotter': ['settlor', 'slotter'], + 'slouch': ['holcus', 'lochus', 'slouch'], + 'slouchiness': ['cushionless', 'slouchiness'], + 'slouchy': ['chylous', 'slouchy'], + 'slovenian': ['slovenian', 'venosinal'], + 'slow': ['slow', 'sowl'], + 'slud': ['slud', 'suld'], + 'slue': ['lues', 'slue'], + 'sluit': ['litus', 'sluit', 'tulsi'], + 'slunge': ['gunsel', 'selung', 'slunge'], + 'slurp': ['slurp', 'spurl'], + 'slut': ['lust', 'slut'], + 'sluther': ['hulster', 'hustler', 'sluther'], + 'slutter': ['slutter', 'trustle'], + 'sly': ['lys', 'sly'], + 'sma': ['mas', 'sam', 'sma'], + 'smachrie': ['semiarch', 'smachrie'], + 'smallen': ['ensmall', 'smallen'], + 'smalltime': ['metallism', 'smalltime'], + 'smaltine': ['mentalis', 'smaltine', 'stileman'], + 'smaltite': ['metalist', 'smaltite'], + 'smart': ['smart', 'stram'], + 'smarten': ['sarment', 'smarten'], + 'smashage': ['gamashes', 'smashage'], + 'smeary': ['ramsey', 'smeary'], + 'smectis': ['sectism', 'smectis'], + 'smee': ['mese', 'seem', 'seme', 'smee'], + 'smeech': ['scheme', 'smeech'], + 'smeek': ['meeks', 'smeek'], + 'smeer': ['merse', 'smeer'], + 'smeeth': ['smeeth', 'smethe'], + 'smeller': ['resmell', 'smeller'], + 'smelly': ['mysell', 'smelly'], + 'smelter': ['melters', 'resmelt', 'smelter'], + 'smethe': ['smeeth', 'smethe'], + 'smilax': ['laxism', 'smilax'], + 'smile': ['limes', 'miles', 'slime', 'smile'], + 'smiler': ['slimer', 'smiler'], + 'smilet': ['mistle', 'smilet'], + 'smiling': ['simling', 'smiling'], + 'smily': ['limsy', 'slimy', 'smily'], + 'sminthian': ['mitannish', 'sminthian'], + 'smirch': ['chrism', 'smirch'], + 'smirkish': ['skirmish', 'smirkish'], + 'smit': ['mist', 'smit', 'stim'], + 'smite': ['metis', 'smite', 'stime', 'times'], + 'smiter': ['merist', 'mister', 'smiter'], + 'smither': ['rhemist', 'smither'], + 'smithian': ['isthmian', 'smithian'], + 'smoker': ['mosker', 'smoker'], + 'smoot': ['moost', 'smoot'], + 'smoother': ['resmooth', 'romeshot', 'smoother'], + 'smoothingly': ['hymnologist', 'smoothingly'], + 'smore': ['meros', 'mores', 'morse', 'sermo', 'smore'], + 'smote': ['moste', 'smote'], + 'smother': ['smother', 'thermos'], + 'smouse': ['mousse', 'smouse'], + 'smouser': ['osmerus', 'smouser'], + 'smuggle': ['muggles', 'smuggle'], + 'smut': ['must', 'smut', 'stum'], + 'smyrniot': ['smyrniot', 'tyronism'], + 'smyrniote': ['myristone', 'smyrniote'], + 'snab': ['nabs', 'snab'], + 'snackle': ['slacken', 'snackle'], + 'snag': ['sang', 'snag'], + 'snagrel': ['sangrel', 'snagrel'], + 'snail': ['sinal', 'slain', 'snail'], + 'snaillike': ['silkaline', 'snaillike'], + 'snaily': ['anisyl', 'snaily'], + 'snaith': ['snaith', 'tahsin'], + 'snake': ['skean', 'snake', 'sneak'], + 'snap': ['snap', 'span'], + 'snape': ['aspen', 'panse', 'snape', 'sneap', 'spane', 'spean'], + 'snaper': ['resnap', 'respan', 'snaper'], + 'snapless': ['snapless', 'spanless'], + 'snapy': ['pansy', 'snapy'], + 'snare': ['anser', 'nares', 'rasen', 'snare'], + 'snarer': ['serran', 'snarer'], + 'snaste': ['assent', 'snaste'], + 'snatch': ['chanst', 'snatch', 'stanch'], + 'snatchable': ['snatchable', 'stanchable'], + 'snatcher': ['resnatch', 'snatcher', 'stancher'], + 'snath': ['shant', 'snath'], + 'snathe': ['athens', 'hasten', 'snathe', 'sneath'], + 'snaw': ['sawn', 'snaw', 'swan'], + 'snead': ['sedan', 'snead'], + 'sneak': ['skean', 'snake', 'sneak'], + 'sneaker': ['keresan', 'sneaker'], + 'sneaksman': ['masskanne', 'sneaksman'], + 'sneap': ['aspen', 'panse', 'snape', 'sneap', 'spane', 'spean'], + 'sneath': ['athens', 'hasten', 'snathe', 'sneath'], + 'sneathe': ['sneathe', 'thesean'], + 'sned': ['send', 'sned'], + 'snee': ['ense', 'esne', 'nese', 'seen', 'snee'], + 'sneer': ['renes', 'sneer'], + 'snew': ['news', 'sewn', 'snew'], + 'snib': ['nibs', 'snib'], + 'snickle': ['slicken', 'snickle'], + 'sniddle': ['slidden', 'sniddle'], + 'snide': ['denis', 'snide'], + 'snig': ['sign', 'sing', 'snig'], + 'snigger': ['serging', 'snigger'], + 'snip': ['snip', 'spin'], + 'snipe': ['penis', 'snipe', 'spine'], + 'snipebill': ['snipebill', 'spinebill'], + 'snipelike': ['snipelike', 'spinelike'], + 'sniper': ['pernis', 'respin', 'sniper'], + 'snipocracy': ['conspiracy', 'snipocracy'], + 'snipper': ['nippers', 'snipper'], + 'snippet': ['snippet', 'stippen'], + 'snipy': ['snipy', 'spiny'], + 'snitcher': ['christen', 'snitcher'], + 'snite': ['inset', 'neist', 'snite', 'stein', 'stine', 'tsine'], + 'snithy': ['shinty', 'snithy'], + 'sniveler': ['ensilver', 'sniveler'], + 'snively': ['snively', 'sylvine'], + 'snob': ['bosn', 'nobs', 'snob'], + 'snocker': ['conkers', 'snocker'], + 'snod': ['snod', 'sond'], + 'snoek': ['snoek', 'snoke', 'soken'], + 'snog': ['snog', 'song'], + 'snoke': ['snoek', 'snoke', 'soken'], + 'snook': ['onkos', 'snook'], + 'snoop': ['snoop', 'spoon'], + 'snooper': ['snooper', 'spooner'], + 'snoopy': ['snoopy', 'spoony'], + 'snoot': ['snoot', 'stoon'], + 'snore': ['norse', 'noser', 'seron', 'snore'], + 'snorer': ['snorer', 'sorner'], + 'snoring': ['snoring', 'sorning'], + 'snork': ['norsk', 'snork'], + 'snotter': ['snotter', 'stentor', 'torsten'], + 'snout': ['notus', 'snout', 'stoun', 'tonus'], + 'snouter': ['snouter', 'tonsure', 'unstore'], + 'snow': ['snow', 'sown'], + 'snowie': ['nowise', 'snowie'], + 'snowish': ['snowish', 'whisson'], + 'snowy': ['snowy', 'wyson'], + 'snug': ['snug', 'sung'], + 'snup': ['snup', 'spun'], + 'snurp': ['snurp', 'spurn'], + 'snurt': ['snurt', 'turns'], + 'so': ['os', 'so'], + 'soak': ['asok', 'soak', 'soka'], + 'soaker': ['arkose', 'resoak', 'soaker'], + 'soally': ['soally', 'sollya'], + 'soam': ['amos', 'soam', 'soma'], + 'soap': ['asop', 'sapo', 'soap'], + 'soaper': ['resoap', 'soaper'], + 'soar': ['asor', 'rosa', 'soar', 'sora'], + 'sob': ['bos', 'sob'], + 'sobeit': ['setibo', 'sobeit'], + 'sober': ['boser', 'brose', 'sober'], + 'sobralite': ['sobralite', 'strobilae'], + 'soc': ['cos', 'osc', 'soc'], + 'socager': ['corsage', 'socager'], + 'social': ['colias', 'scolia', 'social'], + 'socialite': ['aeolistic', 'socialite'], + 'societal': ['cosalite', 'societal'], + 'societism': ['seismotic', 'societism'], + 'socinian': ['oscinian', 'socinian'], + 'sociobiological': ['biosociological', 'sociobiological'], + 'sociolegal': ['oligoclase', 'sociolegal'], + 'socius': ['scious', 'socius'], + 'socle': ['close', 'socle'], + 'soco': ['coos', 'soco'], + 'socotran': ['ostracon', 'socotran'], + 'socotrine': ['certosino', 'cortisone', 'socotrine'], + 'socratean': ['ostracean', 'socratean'], + 'socratic': ['acrostic', 'sarcotic', 'socratic'], + 'socratical': ['acrostical', 'socratical'], + 'socratically': ['acrostically', 'socratically'], + 'socraticism': ['acrosticism', 'socraticism'], + 'socratism': ['ostracism', 'socratism'], + 'socratize': ['ostracize', 'socratize'], + 'sod': ['dos', 'ods', 'sod'], + 'soda': ['dosa', 'sado', 'soda'], + 'sodalite': ['diastole', 'isolated', 'sodalite', 'solidate'], + 'sodium': ['modius', 'sodium'], + 'sodom': ['dooms', 'sodom'], + 'sodomitic': ['diosmotic', 'sodomitic'], + 'soe': ['oes', 'ose', 'soe'], + 'soft': ['soft', 'stof'], + 'soften': ['oftens', 'soften'], + 'softener': ['resoften', 'softener'], + 'sog': ['gos', 'sog'], + 'soga': ['sago', 'soga'], + 'soger': ['gorse', 'soger'], + 'soh': ['sho', 'soh'], + 'soho': ['shoo', 'soho'], + 'soil': ['lois', 'silo', 'siol', 'soil', 'soli'], + 'soiled': ['isolde', 'soiled'], + 'sojourner': ['resojourn', 'sojourner'], + 'sok': ['kos', 'sok'], + 'soka': ['asok', 'soak', 'soka'], + 'soke': ['skeo', 'soke'], + 'soken': ['snoek', 'snoke', 'soken'], + 'sola': ['also', 'sola'], + 'solacer': ['escolar', 'solacer'], + 'solan': ['salon', 'sloan', 'solan'], + 'solar': ['rosal', 'solar', 'soral'], + 'solaristics': ['scissortail', 'solaristics'], + 'solate': ['lotase', 'osteal', 'solate', 'stolae', 'talose'], + 'sold': ['slod', 'sold'], + 'solder': ['dorsel', 'seldor', 'solder'], + 'solderer': ['resolder', 'solderer'], + 'soldi': ['soldi', 'solid'], + 'soldo': ['soldo', 'solod'], + 'sole': ['lose', 'sloe', 'sole'], + 'solea': ['alose', 'osela', 'solea'], + 'solecist': ['solecist', 'solstice'], + 'solen': ['slone', 'solen'], + 'soleness': ['noseless', 'soleness'], + 'solenial': ['lesional', 'solenial'], + 'solenite': ['noselite', 'solenite'], + 'solenium': ['emulsion', 'solenium'], + 'solenopsis': ['poisonless', 'solenopsis'], + 'solent': ['solent', 'stolen', 'telson'], + 'solentine': ['nelsonite', 'solentine'], + 'soler': ['loser', 'orsel', 'rosel', 'soler'], + 'solera': ['roseal', 'solera'], + 'soles': ['loess', 'soles'], + 'soli': ['lois', 'silo', 'siol', 'soil', 'soli'], + 'soliative': ['isolative', 'soliative'], + 'solicit': ['colitis', 'solicit'], + 'solicitation': ['coalitionist', 'solicitation'], + 'soliciter': ['resolicit', 'soliciter'], + 'soliciting': ['ignicolist', 'soliciting'], + 'solicitude': ['isodulcite', 'solicitude'], + 'solid': ['soldi', 'solid'], + 'solidate': ['diastole', 'isolated', 'sodalite', 'solidate'], + 'solidus': ['dissoul', 'dulosis', 'solidus'], + 'solilunar': ['lunisolar', 'solilunar'], + 'soliped': ['despoil', 'soliped', 'spoiled'], + 'solitarian': ['sinoatrial', 'solitarian'], + 'solitary': ['royalist', 'solitary'], + 'soliterraneous': ['salinoterreous', 'soliterraneous'], + 'solitude': ['outslide', 'solitude'], + 'sollya': ['soally', 'sollya'], + 'solmizate': ['solmizate', 'zealotism'], + 'solo': ['sloo', 'solo', 'sool'], + 'solod': ['soldo', 'solod'], + 'solon': ['olson', 'solon'], + 'solonic': ['scolion', 'solonic'], + 'soloth': ['soloth', 'tholos'], + 'solotink': ['solotink', 'solotnik'], + 'solotnik': ['solotink', 'solotnik'], + 'solstice': ['solecist', 'solstice'], + 'solum': ['mosul', 'mouls', 'solum'], + 'solute': ['lutose', 'solute', 'tousle'], + 'solutioner': ['resolution', 'solutioner'], + 'soma': ['amos', 'soam', 'soma'], + 'somacule': ['maculose', 'somacule'], + 'somal': ['salmo', 'somal'], + 'somali': ['limosa', 'somali'], + 'somasthenia': ['anhematosis', 'somasthenia'], + 'somatic': ['atomics', 'catoism', 'cosmati', 'osmatic', 'somatic'], + 'somatics': ['acosmist', 'massicot', 'somatics'], + 'somatism': ['osmatism', 'somatism'], + 'somatophyte': ['hepatostomy', 'somatophyte'], + 'somatophytic': ['hypostomatic', 'somatophytic'], + 'somatopleuric': ['micropetalous', 'somatopleuric'], + 'somatopsychic': ['psychosomatic', 'somatopsychic'], + 'somatosplanchnic': ['somatosplanchnic', 'splanchnosomatic'], + 'somatous': ['astomous', 'somatous'], + 'somber': ['somber', 'sombre'], + 'sombre': ['somber', 'sombre'], + 'some': ['meso', 'mose', 'some'], + 'someday': ['samoyed', 'someday'], + 'somers': ['messor', 'mosser', 'somers'], + 'somnambule': ['somnambule', 'summonable'], + 'somnial': ['malison', 'manolis', 'osmanli', 'somnial'], + 'somnopathy': ['phytomonas', 'somnopathy'], + 'somnorific': ['onisciform', 'somnorific'], + 'son': ['ons', 'son'], + 'sonant': ['santon', 'sonant', 'stanno'], + 'sonantic': ['canonist', 'sanction', 'sonantic'], + 'sonar': ['arson', 'saron', 'sonar'], + 'sonatina': ['ansation', 'sonatina'], + 'sond': ['snod', 'sond'], + 'sondation': ['anisodont', 'sondation'], + 'sondeli': ['indoles', 'sondeli'], + 'soneri': ['rosine', 'senior', 'soneri'], + 'song': ['snog', 'song'], + 'songoi': ['isogon', 'songoi'], + 'songy': ['gonys', 'songy'], + 'sonic': ['oscin', 'scion', 'sonic'], + 'sonja': ['janos', 'jason', 'jonas', 'sonja'], + 'sonneratia': ['arsenation', 'senatorian', 'sonneratia'], + 'sonnet': ['sonnet', 'stonen', 'tenson'], + 'sonnetwise': ['sonnetwise', 'swinestone'], + 'sonrai': ['arsino', 'rasion', 'sonrai'], + 'sontag': ['sontag', 'tongas'], + 'soodle': ['dolose', 'oodles', 'soodle'], + 'sook': ['koso', 'skoo', 'sook'], + 'sool': ['sloo', 'solo', 'sool'], + 'soon': ['oons', 'soon'], + 'sooner': ['nooser', 'seroon', 'sooner'], + 'sooter': ['seroot', 'sooter', 'torose'], + 'sooth': ['shoot', 'sooth', 'sotho', 'toosh'], + 'soother': ['orthose', 'reshoot', 'shooter', 'soother'], + 'soothing': ['shooting', 'soothing'], + 'sootiness': ['enostosis', 'sootiness'], + 'sooty': ['sooty', 'soyot'], + 'sope': ['epos', 'peso', 'pose', 'sope'], + 'soph': ['phos', 'posh', 'shop', 'soph'], + 'sophister': ['posterish', 'prothesis', 'sophister', 'storeship', 'tephrosis'], + 'sophistical': ['postischial', 'sophistical'], + 'sophomore': ['osmophore', 'sophomore'], + 'sopition': ['position', 'sopition'], + 'sopor': ['poros', 'proso', 'sopor', 'spoor'], + 'soprani': ['parison', 'soprani'], + 'sopranist': ['postnaris', 'sopranist'], + 'soprano': ['pronaos', 'soprano'], + 'sora': ['asor', 'rosa', 'soar', 'sora'], + 'sorabian': ['abrasion', 'sorabian'], + 'soral': ['rosal', 'solar', 'soral'], + 'sorbate': ['barotse', 'boaster', 'reboast', 'sorbate'], + 'sorbin': ['insorb', 'sorbin'], + 'sorcer': ['scorer', 'sorcer'], + 'sorchin': ['cornish', 'cronish', 'sorchin'], + 'sorda': ['sarod', 'sorda'], + 'sordes': ['dosser', 'sordes'], + 'sordine': ['indorse', 'ordines', 'siredon', 'sordine'], + 'sordino': ['indoors', 'sordino'], + 'sore': ['eros', 'rose', 'sero', 'sore'], + 'soredia': ['ardoise', 'aroides', 'soredia'], + 'sorediate': ['oestridae', 'ostreidae', 'sorediate'], + 'soredium': ['dimerous', 'soredium'], + 'soree': ['erose', 'soree'], + 'sorefoot': ['footsore', 'sorefoot'], + 'sorehead': ['rosehead', 'sorehead'], + 'sorehon': ['onshore', 'sorehon'], + 'sorema': ['amores', 'ramose', 'sorema'], + 'soricid': ['cirsoid', 'soricid'], + 'soricident': ['discretion', 'soricident'], + 'soricine': ['recision', 'soricine'], + 'sorite': ['restio', 'sorite', 'sortie', 'triose'], + 'sorites': ['rossite', 'sorites'], + 'sornare': ['serrano', 'sornare'], + 'sorner': ['snorer', 'sorner'], + 'sorning': ['snoring', 'sorning'], + 'sororial': ['rosorial', 'sororial'], + 'sorption': ['notropis', 'positron', 'sorption'], + 'sortable': ['sortable', 'storable'], + 'sorted': ['sorted', 'strode'], + 'sorter': ['resort', 'roster', 'sorter', 'storer'], + 'sortie': ['restio', 'sorite', 'sortie', 'triose'], + 'sortilegus': ['sortilegus', 'strigulose'], + 'sortiment': ['sortiment', 'trimstone'], + 'sortly': ['sortly', 'styrol'], + 'sorty': ['sorty', 'story', 'stroy'], + 'sorva': ['savor', 'sorva'], + 'sory': ['rosy', 'sory'], + 'sosia': ['oasis', 'sosia'], + 'sostenuto': ['ostentous', 'sostenuto'], + 'soter': ['roset', 'rotse', 'soter', 'stero', 'store', 'torse'], + 'soterial': ['soterial', 'striolae'], + 'sotho': ['shoot', 'sooth', 'sotho', 'toosh'], + 'sotie': ['sotie', 'toise'], + 'sotnia': ['sotnia', 'tinosa'], + 'sotol': ['sotol', 'stool'], + 'sots': ['sots', 'toss'], + 'sotter': ['sotter', 'testor'], + 'soucar': ['acorus', 'soucar'], + 'souchet': ['souchet', 'techous', 'tousche'], + 'souly': ['lousy', 'souly'], + 'soum': ['soum', 'sumo'], + 'soumansite': ['soumansite', 'stamineous'], + 'sound': ['nodus', 'ounds', 'sound'], + 'sounder': ['resound', 'sounder', 'unrosed'], + 'soup': ['opus', 'soup'], + 'souper': ['poseur', 'pouser', 'souper', 'uprose'], + 'sour': ['ours', 'sour'], + 'source': ['cerous', 'course', 'crouse', 'source'], + 'soured': ['douser', 'soured'], + 'souredness': ['rousedness', 'souredness'], + 'souren': ['souren', 'unsore', 'ursone'], + 'sourer': ['rouser', 'sourer'], + 'souring': ['nigrous', 'rousing', 'souring'], + 'sourly': ['lusory', 'sourly'], + 'soursop': ['psorous', 'soursop', 'sporous'], + 'soury': ['soury', 'yours'], + 'souser': ['serous', 'souser'], + 'souter': ['ouster', 'souter', 'touser', 'trouse'], + 'souterrain': ['souterrain', 'ternarious', 'trouserian'], + 'south': ['shout', 'south'], + 'souther': ['shouter', 'souther'], + 'southerland': ['southerland', 'southlander'], + 'southing': ['shouting', 'southing'], + 'southlander': ['southerland', 'southlander'], + 'soviet': ['soviet', 'sovite'], + 'sovite': ['soviet', 'sovite'], + 'sowdones': ['sowdones', 'woodness'], + 'sowel': ['sowel', 'sowle'], + 'sower': ['owser', 'resow', 'serow', 'sower', 'swore', 'worse'], + 'sowl': ['slow', 'sowl'], + 'sowle': ['sowel', 'sowle'], + 'sown': ['snow', 'sown'], + 'sowt': ['sowt', 'stow', 'swot', 'wots'], + 'soyot': ['sooty', 'soyot'], + 'spa': ['asp', 'sap', 'spa'], + 'space': ['capes', 'scape', 'space'], + 'spaceless': ['scapeless', 'spaceless'], + 'spacer': ['casper', 'escarp', 'parsec', 'scrape', 'secpar', 'spacer'], + 'spade': ['depas', 'sepad', 'spade'], + 'spader': ['rasped', 'spader', 'spread'], + 'spadiceous': ['dipsaceous', 'spadiceous'], + 'spadone': ['espadon', 'spadone'], + 'spadonic': ['spadonic', 'spondaic', 'spondiac'], + 'spadrone': ['parsoned', 'spadrone'], + 'spae': ['apse', 'pesa', 'spae'], + 'spaer': ['asper', 'parse', 'prase', 'spaer', 'spare', 'spear'], + 'spahi': ['aphis', 'apish', 'hispa', 'saiph', 'spahi'], + 'spaid': ['sapid', 'spaid'], + 'spaik': ['askip', 'spaik'], + 'spairge': ['prisage', 'spairge'], + 'spalacine': ['asclepian', 'spalacine'], + 'spale': ['elaps', + 'lapse', + 'lepas', + 'pales', + 'salep', + 'saple', + 'sepal', + 'slape', + 'spale', + 'speal'], + 'spalt': ['spalt', 'splat'], + 'span': ['snap', 'span'], + 'spancel': ['enclasp', 'spancel'], + 'spane': ['aspen', 'panse', 'snape', 'sneap', 'spane', 'spean'], + 'spanemia': ['paeanism', 'spanemia'], + 'spangler': ['spangler', 'sprangle'], + 'spangolite': ['postgenial', 'spangolite'], + 'spaniel': ['espinal', 'pinales', 'spaniel'], + 'spaniol': ['sanpoil', 'spaniol'], + 'spanless': ['snapless', 'spanless'], + 'spar': ['rasp', 'spar'], + 'sparable': ['parsable', 'prebasal', 'sparable'], + 'spare': ['asper', 'parse', 'prase', 'spaer', 'spare', 'spear'], + 'spareable': ['separable', 'spareable'], + 'sparely': ['parsley', 'pyrales', 'sparely', 'splayer'], + 'sparer': ['parser', 'rasper', 'sparer'], + 'sparerib': ['ribspare', 'sparerib'], + 'sparge': ['gasper', 'sparge'], + 'sparger': ['grasper', 'regrasp', 'sparger'], + 'sparidae': ['paradise', 'sparidae'], + 'sparing': ['aspring', 'rasping', 'sparing'], + 'sparingly': ['raspingly', 'sparingly'], + 'sparingness': ['raspingness', 'sparingness'], + 'sparling': ['laspring', 'sparling', 'springal'], + 'sparoid': ['prasoid', 'sparoid'], + 'sparse': ['passer', 'repass', 'sparse'], + 'spart': ['spart', 'sprat', 'strap', 'traps'], + 'spartanic': ['sacripant', 'spartanic'], + 'sparteine': ['pistareen', 'sparteine'], + 'sparterie': ['periaster', 'sparterie'], + 'spartina': ['aspirant', 'partisan', 'spartina'], + 'spartle': ['palster', 'persalt', 'plaster', 'psalter', 'spartle', 'stapler'], + 'spary': ['raspy', 'spary', 'spray'], + 'spat': ['past', 'spat', 'stap', 'taps'], + 'spate': ['paste', 'septa', 'spate'], + 'spathal': ['asphalt', 'spathal', 'taplash'], + 'spathe': ['spathe', 'thapes'], + 'spathic': ['haptics', 'spathic'], + 'spatling': ['spatling', 'stapling'], + 'spatter': ['spatter', 'tapster'], + 'spattering': ['spattering', 'tapestring'], + 'spattle': ['peltast', 'spattle'], + 'spatular': ['pastural', 'spatular'], + 'spatule': ['pulsate', 'spatule', 'upsteal'], + 'spave': ['spave', 'vespa'], + 'speak': ['sapek', 'speak'], + 'speaker': ['respeak', 'speaker'], + 'speal': ['elaps', + 'lapse', + 'lepas', + 'pales', + 'salep', + 'saple', + 'sepal', + 'slape', + 'spale', + 'speal'], + 'spean': ['aspen', 'panse', 'snape', 'sneap', 'spane', 'spean'], + 'spear': ['asper', 'parse', 'prase', 'spaer', 'spare', 'spear'], + 'spearman': ['parmesan', 'spearman'], + 'spearmint': ['spearmint', 'spermatin'], + 'speary': ['presay', 'speary'], + 'spec': ['ceps', 'spec'], + 'spectatorial': ['poetastrical', 'spectatorial'], + 'specter': ['respect', 'scepter', 'specter'], + 'spectered': ['sceptered', 'spectered'], + 'spectra': ['precast', 'spectra'], + 'spectral': ['sceptral', 'scraplet', 'spectral'], + 'spectromicroscope': ['microspectroscope', 'spectromicroscope'], + 'spectrotelescope': ['spectrotelescope', 'telespectroscope'], + 'spectrous': ['spectrous', 'susceptor', 'suspector'], + 'spectry': ['precyst', 'sceptry', 'spectry'], + 'specula': ['capsule', 'specula', 'upscale'], + 'specular': ['capsuler', 'specular'], + 'speed': ['pedes', 'speed'], + 'speel': ['sleep', 'speel'], + 'speelless': ['sleepless', 'speelless'], + 'speer': ['peres', 'perse', 'speer', 'spree'], + 'speerity': ['perseity', 'speerity'], + 'speiss': ['sepsis', 'speiss'], + 'spelaean': ['seaplane', 'spelaean'], + 'spelk': ['skelp', 'spelk'], + 'speller': ['presell', 'respell', 'speller'], + 'spelt': ['slept', 'spelt', 'splet'], + 'spenerism': ['primeness', 'spenerism'], + 'speos': ['posse', 'speos'], + 'sperate': ['perates', 'repaste', 'sperate'], + 'sperity': ['pyrites', 'sperity'], + 'sperling': ['sperling', 'springle'], + 'spermalist': ['psalmister', 'spermalist'], + 'spermathecal': ['chapelmaster', 'spermathecal'], + 'spermatid': ['predatism', 'spermatid'], + 'spermatin': ['spearmint', 'spermatin'], + 'spermatogonium': ['protomagnesium', 'spermatogonium'], + 'spermatozoic': ['spermatozoic', 'zoospermatic'], + 'spermiogenesis': ['geissospermine', 'spermiogenesis'], + 'spermocarp': ['carposperm', 'spermocarp'], + 'spet': ['pest', 'sept', 'spet', 'step'], + 'spew': ['spew', 'swep'], + 'sphacelation': ['lipsanotheca', 'sphacelation'], + 'sphecidae': ['cheapside', 'sphecidae'], + 'sphene': ['sephen', 'sphene'], + 'sphenoethmoid': ['ethmosphenoid', 'sphenoethmoid'], + 'sphenoethmoidal': ['ethmosphenoidal', 'sphenoethmoidal'], + 'sphenotic': ['phonetics', 'sphenotic'], + 'spheral': ['plasher', 'spheral'], + 'spheration': ['opisthenar', 'spheration'], + 'sphere': ['herpes', 'hesper', 'sphere'], + 'sphery': ['sphery', 'sypher'], + 'sphyraenid': ['dysphrenia', 'sphyraenid', 'sphyrnidae'], + 'sphyrnidae': ['dysphrenia', 'sphyraenid', 'sphyrnidae'], + 'spica': ['aspic', 'spica'], + 'spicate': ['aseptic', 'spicate'], + 'spice': ['sepic', 'spice'], + 'spicer': ['crepis', 'cripes', 'persic', 'precis', 'spicer'], + 'spiciferous': ['pisciferous', 'spiciferous'], + 'spiciform': ['pisciform', 'spiciform'], + 'spicket': ['skeptic', 'spicket'], + 'spicular': ['scripula', 'spicular'], + 'spiculate': ['euplastic', 'spiculate'], + 'spiculated': ['disculpate', 'spiculated'], + 'spicule': ['clipeus', 'spicule'], + 'spider': ['spider', 'spired', 'spried'], + 'spiderish': ['sidership', 'spiderish'], + 'spiderlike': ['predislike', 'spiderlike'], + 'spiel': ['piles', 'plies', 'slipe', 'spiel', 'spile'], + 'spier': ['siper', 'spier', 'spire'], + 'spikelet': ['spikelet', 'steplike'], + 'spiking': ['pigskin', 'spiking'], + 'spiky': ['pisky', 'spiky'], + 'spile': ['piles', 'plies', 'slipe', 'spiel', 'spile'], + 'spiler': ['lisper', 'pliers', 'sirple', 'spiler'], + 'spiling': ['sipling', 'spiling'], + 'spiloma': ['imposal', 'spiloma'], + 'spilt': ['spilt', 'split'], + 'spin': ['snip', 'spin'], + 'spina': ['pisan', 'sapin', 'spina'], + 'spinae': ['sepian', 'spinae'], + 'spinales': ['painless', 'spinales'], + 'spinate': ['panties', 'sapient', 'spinate'], + 'spindled': ['spindled', 'splendid'], + 'spindler': ['spindler', 'splinder'], + 'spine': ['penis', 'snipe', 'spine'], + 'spinebill': ['snipebill', 'spinebill'], + 'spinel': ['spinel', 'spline'], + 'spinelike': ['snipelike', 'spinelike'], + 'spinet': ['instep', 'spinet'], + 'spinigerous': ['serpiginous', 'spinigerous'], + 'spinigrade': ['despairing', 'spinigrade'], + 'spinoid': ['spinoid', 'spionid'], + 'spinoneural': ['spinoneural', 'unipersonal'], + 'spinotectal': ['entoplastic', 'spinotectal', 'tectospinal', 'tenoplastic'], + 'spiny': ['snipy', 'spiny'], + 'spionid': ['spinoid', 'spionid'], + 'spiracle': ['calipers', 'spiracle'], + 'spiracula': ['auriscalp', 'spiracula'], + 'spiral': ['prisal', 'spiral'], + 'spiralism': ['misprisal', 'spiralism'], + 'spiraloid': ['spiraloid', 'sporidial'], + 'spiran': ['spiran', 'sprain'], + 'spirant': ['spirant', 'spraint'], + 'spirate': ['piaster', 'piastre', 'raspite', 'spirate', 'traipse'], + 'spire': ['siper', 'spier', 'spire'], + 'spirea': ['aspire', 'paries', 'praise', 'sirpea', 'spirea'], + 'spired': ['spider', 'spired', 'spried'], + 'spirelet': ['epistler', 'spirelet'], + 'spireme': ['emprise', 'imprese', 'premise', 'spireme'], + 'spiritally': ['pistillary', 'spiritally'], + 'spiriter': ['respirit', 'spiriter'], + 'spirometer': ['prisometer', 'spirometer'], + 'spironema': ['mesropian', 'promnesia', 'spironema'], + 'spirt': ['spirt', 'sprit', 'stirp', 'strip'], + 'spirula': ['parulis', 'spirula', 'uprisal'], + 'spit': ['pist', 'spit'], + 'spital': ['alpist', 'pastil', 'spital'], + 'spite': ['septi', 'spite', 'stipe'], + 'spithame': ['aphetism', 'mateship', 'shipmate', 'spithame'], + 'spitter': ['spitter', 'tipster'], + 'splairge': ['aspergil', 'splairge'], + 'splanchnosomatic': ['somatosplanchnic', 'splanchnosomatic'], + 'splasher': ['harpless', 'splasher'], + 'splat': ['spalt', 'splat'], + 'splay': ['palsy', 'splay'], + 'splayed': ['pylades', 'splayed'], + 'splayer': ['parsley', 'pyrales', 'sparely', 'splayer'], + 'spleet': ['pestle', 'spleet'], + 'splender': ['resplend', 'splender'], + 'splendid': ['spindled', 'splendid'], + 'splenium': ['splenium', 'unsimple'], + 'splenolaparotomy': ['laparosplenotomy', 'splenolaparotomy'], + 'splenoma': ['neoplasm', 'pleonasm', 'polesman', 'splenoma'], + 'splenomegalia': ['megalosplenia', 'splenomegalia'], + 'splenonephric': ['phrenosplenic', 'splenonephric', 'splenophrenic'], + 'splenophrenic': ['phrenosplenic', 'splenonephric', 'splenophrenic'], + 'splet': ['slept', 'spelt', 'splet'], + 'splice': ['clipse', 'splice'], + 'spliceable': ['eclipsable', 'spliceable'], + 'splinder': ['spindler', 'splinder'], + 'spline': ['spinel', 'spline'], + 'split': ['spilt', 'split'], + 'splitter': ['splitter', 'striplet'], + 'splore': ['sloper', 'splore'], + 'spogel': ['gospel', 'spogel'], + 'spoil': ['polis', 'spoil'], + 'spoilage': ['pelasgoi', 'spoilage'], + 'spoilation': ['positional', 'spoilation', 'spoliation'], + 'spoiled': ['despoil', 'soliped', 'spoiled'], + 'spoiler': ['leporis', 'spoiler'], + 'spoilment': ['simpleton', 'spoilment'], + 'spoilt': ['pistol', 'postil', 'spoilt'], + 'spole': ['elops', 'slope', 'spole'], + 'spoliation': ['positional', 'spoilation', 'spoliation'], + 'spondaic': ['spadonic', 'spondaic', 'spondiac'], + 'spondiac': ['spadonic', 'spondaic', 'spondiac'], + 'spongily': ['posingly', 'spongily'], + 'sponsal': ['plasson', 'sponsal'], + 'sponsalia': ['passional', 'sponsalia'], + 'spool': ['polos', 'sloop', 'spool'], + 'spoon': ['snoop', 'spoon'], + 'spooner': ['snooper', 'spooner'], + 'spoony': ['snoopy', 'spoony'], + 'spoonyism': ['spoonyism', 'symposion'], + 'spoor': ['poros', 'proso', 'sopor', 'spoor'], + 'spoot': ['spoot', 'stoop'], + 'sporangia': ['agapornis', 'sporangia'], + 'spore': ['poser', 'prose', 'ropes', 'spore'], + 'sporidial': ['spiraloid', 'sporidial'], + 'sporification': ['antisoporific', 'prosification', 'sporification'], + 'sporogeny': ['gynospore', 'sporogeny'], + 'sporoid': ['psoroid', 'sporoid'], + 'sporotrichum': ['sporotrichum', 'trichosporum'], + 'sporous': ['psorous', 'soursop', 'sporous'], + 'sporozoic': ['sporozoic', 'zoosporic'], + 'sport': ['sport', 'strop'], + 'sporter': ['sporter', 'strepor'], + 'sportula': ['postural', 'pulsator', 'sportula'], + 'sportulae': ['opulaster', 'sportulae', 'sporulate'], + 'sporulate': ['opulaster', 'sportulae', 'sporulate'], + 'sporule': ['leprous', 'pelorus', 'sporule'], + 'sposhy': ['hyssop', 'phossy', 'sposhy'], + 'spot': ['post', 'spot', 'stop', 'tops'], + 'spotless': ['postless', 'spotless', 'stopless'], + 'spotlessness': ['spotlessness', 'stoplessness'], + 'spotlike': ['postlike', 'spotlike'], + 'spottedly': ['spottedly', 'spotteldy'], + 'spotteldy': ['spottedly', 'spotteldy'], + 'spotter': ['protest', 'spotter'], + 'spouse': ['esopus', 'spouse'], + 'spout': ['spout', 'stoup'], + 'spouter': ['petrous', 'posture', 'proetus', 'proteus', 'septuor', 'spouter'], + 'sprag': ['grasp', 'sprag'], + 'sprain': ['spiran', 'sprain'], + 'spraint': ['spirant', 'spraint'], + 'sprangle': ['spangler', 'sprangle'], + 'sprat': ['spart', 'sprat', 'strap', 'traps'], + 'spray': ['raspy', 'spary', 'spray'], + 'sprayer': ['respray', 'sprayer'], + 'spread': ['rasped', 'spader', 'spread'], + 'spreadboard': ['broadspread', 'spreadboard'], + 'spreader': ['respread', 'spreader'], + 'spreadover': ['overspread', 'spreadover'], + 'spree': ['peres', 'perse', 'speer', 'spree'], + 'spret': ['prest', 'spret'], + 'spried': ['spider', 'spired', 'spried'], + 'sprier': ['risper', 'sprier'], + 'spriest': ['persist', 'spriest'], + 'springal': ['laspring', 'sparling', 'springal'], + 'springe': ['presign', 'springe'], + 'springer': ['respring', 'springer'], + 'springhead': ['headspring', 'springhead'], + 'springhouse': ['springhouse', 'surgeonship'], + 'springle': ['sperling', 'springle'], + 'sprit': ['spirt', 'sprit', 'stirp', 'strip'], + 'sprite': ['priest', 'pteris', 'sprite', 'stripe'], + 'spritehood': ['priesthood', 'spritehood'], + 'sproat': ['asport', 'pastor', 'sproat'], + 'sprocket': ['prestock', 'sprocket'], + 'sprout': ['sprout', 'stroup', 'stupor'], + 'sprouter': ['posturer', 'resprout', 'sprouter'], + 'sprouting': ['outspring', 'sprouting'], + 'sprue': ['purse', 'resup', 'sprue', 'super'], + 'spruer': ['purser', 'spruer'], + 'spruit': ['purist', 'spruit', 'uprist', 'upstir'], + 'spun': ['snup', 'spun'], + 'spunkie': ['spunkie', 'unspike'], + 'spuriae': ['spuriae', 'uparise', 'upraise'], + 'spurl': ['slurp', 'spurl'], + 'spurlet': ['purslet', 'spurlet', 'spurtle'], + 'spurn': ['snurp', 'spurn'], + 'spurt': ['spurt', 'turps'], + 'spurtive': ['spurtive', 'upstrive'], + 'spurtle': ['purslet', 'spurlet', 'spurtle'], + 'sputa': ['sputa', 'staup', 'stupa'], + 'sputumary': ['sputumary', 'sumptuary'], + 'sputumous': ['sputumous', 'sumptuous'], + 'spyer': ['pryse', 'spyer'], + 'spyros': ['prossy', 'spyros'], + 'squail': ['squail', 'squali'], + 'squali': ['squail', 'squali'], + 'squamatine': ['antimasque', 'squamatine'], + 'squame': ['masque', 'squame', 'squeam'], + 'squameous': ['squameous', 'squeamous'], + 'squamopetrosal': ['petrosquamosal', 'squamopetrosal'], + 'squamosoparietal': ['parietosquamosal', 'squamosoparietal'], + 'squareman': ['marquesan', 'squareman'], + 'squeaker': ['resqueak', 'squeaker'], + 'squeal': ['lasque', 'squeal'], + 'squeam': ['masque', 'squame', 'squeam'], + 'squeamous': ['squameous', 'squeamous'], + 'squillian': ['nisqualli', 'squillian'], + 'squire': ['risque', 'squire'], + 'squiret': ['querist', 'squiret'], + 'squit': ['quits', 'squit'], + 'sramana': ['ramanas', 'sramana'], + 'sri': ['sir', 'sri'], + 'srivatsan': ['ravissant', 'srivatsan'], + 'ssi': ['sis', 'ssi'], + 'ssu': ['ssu', 'sus'], + 'staab': ['basta', 'staab'], + 'stab': ['bast', 'bats', 'stab'], + 'stabile': ['astilbe', 'bestial', 'blastie', 'stabile'], + 'stable': ['ablest', 'stable', 'tables'], + 'stableful': ['bullfeast', 'stableful'], + 'stabler': ['blaster', 'reblast', 'stabler'], + 'stabling': ['blasting', 'stabling'], + 'stably': ['blasty', 'stably'], + 'staccato': ['staccato', 'stoccata'], + 'stacey': ['cytase', 'stacey'], + 'stacher': ['stacher', 'thraces'], + 'stacker': ['restack', 'stacker'], + 'stackman': ['stackman', 'tacksman'], + 'stacy': ['stacy', 'styca'], + 'stade': ['sedat', 'stade', 'stead'], + 'stadic': ['dicast', 'stadic'], + 'stadium': ['dumaist', 'stadium'], + 'staffer': ['restaff', 'staffer'], + 'stag': ['gast', 'stag'], + 'stager': ['gaster', 'stager'], + 'stagily': ['stagily', 'stygial'], + 'stagnation': ['antagonist', 'stagnation'], + 'stagnum': ['mustang', 'stagnum'], + 'stain': ['saint', 'satin', 'stain'], + 'stainable': ['balanites', 'basaltine', 'stainable'], + 'stainer': ['asterin', 'eranist', 'restain', 'stainer', 'starnie', 'stearin'], + 'stainful': ['inflatus', 'stainful'], + 'stainless': ['saintless', 'saltiness', 'slatiness', 'stainless'], + 'staio': ['sitao', 'staio'], + 'stair': ['arist', + 'astir', + 'sitar', + 'stair', + 'stria', + 'tarsi', + 'tisar', + 'trias'], + 'staircase': ['caesarist', 'staircase'], + 'staired': ['astride', 'diaster', 'disrate', 'restiad', 'staired'], + 'staithman': ['staithman', 'thanatism'], + 'staiver': ['staiver', 'taivers'], + 'stake': ['skate', 'stake', 'steak'], + 'staker': ['skater', 'staker', 'strake', 'streak', 'tasker'], + 'stalagmitic': ['stalagmitic', 'stigmatical'], + 'stale': ['least', 'setal', 'slate', 'stale', 'steal', 'stela', 'tales'], + 'staling': ['anglist', 'lasting', 'salting', 'slating', 'staling'], + 'stalker': ['sklater', 'stalker'], + 'staller': ['staller', 'stellar'], + 'stam': ['mast', 'mats', 'stam'], + 'stamen': ['mantes', 'stamen'], + 'stamin': ['manist', 'mantis', 'matins', 'stamin'], + 'stamina': ['amanist', 'stamina'], + 'staminal': ['staminal', 'tailsman', 'talisman'], + 'staminate': ['emanatist', 'staminate', 'tasmanite'], + 'stamineous': ['soumansite', 'stamineous'], + 'staminode': ['ademonist', 'demoniast', 'staminode'], + 'stammer': ['stammer', 'stremma'], + 'stampede': ['stampede', 'stepdame'], + 'stamper': ['restamp', 'stamper'], + 'stampian': ['mainpast', 'mantispa', 'panamist', 'stampian'], + 'stan': ['nast', 'sant', 'stan'], + 'stance': ['ascent', 'secant', 'stance'], + 'stanch': ['chanst', 'snatch', 'stanch'], + 'stanchable': ['snatchable', 'stanchable'], + 'stancher': ['resnatch', 'snatcher', 'stancher'], + 'stand': ['dasnt', 'stand'], + 'standage': ['dagestan', 'standage'], + 'standee': ['edestan', 'standee'], + 'stander': ['stander', 'sternad'], + 'standout': ['outstand', 'standout'], + 'standstill': ['standstill', 'stillstand'], + 'stane': ['antes', 'nates', 'stane', 'stean'], + 'stang': ['angst', 'stang', 'tangs'], + 'stangeria': ['agrestian', 'gerastian', 'stangeria'], + 'stanine': ['ensaint', 'stanine'], + 'stanly': ['nylast', 'stanly'], + 'stanno': ['santon', 'sonant', 'stanno'], + 'stap': ['past', 'spat', 'stap', 'taps'], + 'staple': ['pastel', 'septal', 'staple'], + 'stapler': ['palster', 'persalt', 'plaster', 'psalter', 'spartle', 'stapler'], + 'stapling': ['spatling', 'stapling'], + 'star': ['sart', 'star', 'stra', 'tars', 'tsar'], + 'starch': ['scarth', 'scrath', 'starch'], + 'stardom': ['stardom', 'tsardom'], + 'stare': ['aster', 'serta', 'stare', 'strae', 'tarse', 'teras'], + 'staree': ['asteer', + 'easter', + 'eastre', + 'reseat', + 'saeter', + 'seater', + 'staree', + 'teaser', + 'teresa'], + 'starer': ['arrest', 'astrer', 'raster', 'starer'], + 'starful': ['flustra', 'starful'], + 'staring': ['gastrin', 'staring'], + 'staringly': ['staringly', 'strayling'], + 'stark': ['karst', 'skart', 'stark'], + 'starky': ['starky', 'straky'], + 'starlet': ['rattles', 'slatter', 'starlet', 'startle'], + 'starlit': ['starlit', 'trisalt'], + 'starlite': ['starlite', 'taistrel'], + 'starnel': ['saltern', 'starnel', 'sternal'], + 'starnie': ['asterin', 'eranist', 'restain', 'stainer', 'starnie', 'stearin'], + 'starnose': ['assentor', 'essorant', 'starnose'], + 'starship': ['starship', 'tsarship'], + 'starshot': ['shotstar', 'starshot'], + 'starter': ['restart', 'starter'], + 'startle': ['rattles', 'slatter', 'starlet', 'startle'], + 'starve': ['starve', 'staver', 'strave', 'tavers', 'versta'], + 'starwise': ['starwise', 'waitress'], + 'stary': ['satyr', 'stary', 'stray', 'trasy'], + 'stases': ['assets', 'stases'], + 'stasis': ['assist', 'stasis'], + 'statable': ['statable', 'tastable'], + 'state': ['state', 'taste', 'tates', 'testa'], + 'stated': ['stated', 'tasted'], + 'stateful': ['stateful', 'tasteful'], + 'statefully': ['statefully', 'tastefully'], + 'statefulness': ['statefulness', 'tastefulness'], + 'stateless': ['stateless', 'tasteless'], + 'statelich': ['athletics', 'statelich'], + 'stately': ['stately', 'stylate'], + 'statement': ['statement', 'testament'], + 'stater': ['stater', 'taster', 'testar'], + 'statesider': ['dissertate', 'statesider'], + 'static': ['static', 'sticta'], + 'statice': ['etacist', 'statice'], + 'stational': ['saltation', 'stational'], + 'stationarily': ['antiroyalist', 'stationarily'], + 'stationer': ['nitrosate', 'stationer'], + 'statoscope': ['septocosta', 'statoscope'], + 'statue': ['astute', 'statue'], + 'stature': ['stature', 'stauter'], + 'staumer': ['staumer', 'strumae'], + 'staun': ['staun', 'suant'], + 'staunch': ['canthus', 'staunch'], + 'staup': ['sputa', 'staup', 'stupa'], + 'staurion': ['staurion', 'sutorian'], + 'stauter': ['stature', 'stauter'], + 'stave': ['stave', 'vesta'], + 'staver': ['starve', 'staver', 'strave', 'tavers', 'versta'], + 'staw': ['sawt', 'staw', 'swat', 'taws', 'twas', 'wast'], + 'stawn': ['stawn', 'wasnt'], + 'stayable': ['stayable', 'teasably'], + 'stayed': ['stayed', 'steady'], + 'stayer': ['atresy', 'estray', 'reasty', 'stayer'], + 'staynil': ['nastily', 'saintly', 'staynil'], + 'stchi': ['sitch', 'stchi', 'stich'], + 'stead': ['sedat', 'stade', 'stead'], + 'steady': ['stayed', 'steady'], + 'steak': ['skate', 'stake', 'steak'], + 'steal': ['least', 'setal', 'slate', 'stale', 'steal', 'stela', 'tales'], + 'stealer': ['realest', 'reslate', 'resteal', 'stealer', 'teasler'], + 'stealing': ['galenist', 'genitals', 'stealing'], + 'stealy': ['alytes', 'astely', 'lysate', 'stealy'], + 'steam': ['steam', 'stema'], + 'steaming': ['misagent', 'steaming'], + 'stean': ['antes', 'nates', 'stane', 'stean'], + 'stearic': ['atresic', 'stearic'], + 'stearin': ['asterin', 'eranist', 'restain', 'stainer', 'starnie', 'stearin'], + 'stearone': ['orestean', 'resonate', 'stearone'], + 'stearyl': ['saltery', 'stearyl'], + 'steatin': ['atenist', 'instate', 'satient', 'steatin'], + 'steatoma': ['atmostea', 'steatoma'], + 'steatornis': ['steatornis', 'treasonist'], + 'stech': ['chest', 'stech'], + 'steek': ['keest', 'skeet', 'skete', 'steek'], + 'steel': ['sleet', 'slete', 'steel', 'stele'], + 'steeler': ['reestle', 'resteel', 'steeler'], + 'steeliness': ['sleetiness', 'steeliness'], + 'steeling': ['sleeting', 'steeling'], + 'steelproof': ['sleetproof', 'steelproof'], + 'steely': ['sleety', 'steely'], + 'steen': ['steen', 'teens', 'tense'], + 'steep': ['peste', 'steep'], + 'steeper': ['estrepe', 'resteep', 'steeper'], + 'steepy': ['steepy', 'typees'], + 'steer': ['ester', + 'estre', + 'reest', + 'reset', + 'steer', + 'stere', + 'stree', + 'terse', + 'tsere'], + 'steerer': ['reester', 'steerer'], + 'steering': ['energist', 'steering'], + 'steerling': ['esterling', 'steerling'], + 'steeve': ['steeve', 'vestee'], + 'stefan': ['fasten', 'nefast', 'stefan'], + 'steg': ['gest', 'steg'], + 'stegodon': ['dogstone', 'stegodon'], + 'steid': ['deist', 'steid'], + 'steigh': ['gesith', 'steigh'], + 'stein': ['inset', 'neist', 'snite', 'stein', 'stine', 'tsine'], + 'stela': ['least', 'setal', 'slate', 'stale', 'steal', 'stela', 'tales'], + 'stelae': ['ateles', 'saltee', 'sealet', 'stelae', 'teasel'], + 'stelai': ['isleta', 'litsea', 'salite', 'stelai'], + 'stelar': ['laster', + 'lastre', + 'rastle', + 'relast', + 'resalt', + 'salter', + 'slater', + 'stelar'], + 'stele': ['sleet', 'slete', 'steel', 'stele'], + 'stella': ['sallet', 'stella', 'talles'], + 'stellar': ['staller', 'stellar'], + 'stellaria': ['lateralis', 'stellaria'], + 'stema': ['steam', 'stema'], + 'stemlike': ['meletski', 'stemlike'], + 'sten': ['nest', 'sent', 'sten'], + 'stenar': ['astern', 'enstar', 'stenar', 'sterna'], + 'stencil': ['lentisc', 'scintle', 'stencil'], + 'stenciler': ['crestline', 'stenciler'], + 'stenion': ['stenion', 'tension'], + 'steno': ['onset', 'seton', 'steno', 'stone'], + 'stenopaic': ['aspection', 'stenopaic'], + 'stenosis': ['sisseton', 'stenosis'], + 'stenotic': ['stenotic', 'tonetics'], + 'stentor': ['snotter', 'stentor', 'torsten'], + 'step': ['pest', 'sept', 'spet', 'step'], + 'stepaunt': ['nettapus', 'stepaunt'], + 'stepbairn': ['breastpin', 'stepbairn'], + 'stepdame': ['stampede', 'stepdame'], + 'stephana': ['pheasant', 'stephana'], + 'stephanic': ['cathepsin', 'stephanic'], + 'steplike': ['spikelet', 'steplike'], + 'sterculia': ['sterculia', 'urticales'], + 'stere': ['ester', + 'estre', + 'reest', + 'reset', + 'steer', + 'stere', + 'stree', + 'terse', + 'tsere'], + 'stereograph': ['preshortage', 'stereograph'], + 'stereometric': ['crestmoreite', 'stereometric'], + 'stereophotograph': ['photostereograph', 'stereophotograph'], + 'stereotelescope': ['stereotelescope', 'telestereoscope'], + 'stereotomic': ['osteometric', 'stereotomic'], + 'stereotomical': ['osteometrical', 'stereotomical'], + 'stereotomy': ['osteometry', 'stereotomy'], + 'steric': ['certis', 'steric'], + 'sterigma': ['gemarist', 'magister', 'sterigma'], + 'sterigmata': ['magistrate', 'sterigmata'], + 'sterile': ['leister', 'sterile'], + 'sterilize': ['listerize', 'sterilize'], + 'sterin': ['estrin', 'insert', 'sinter', 'sterin', 'triens'], + 'sterlet': ['settler', 'sterlet', 'trestle'], + 'stern': ['ernst', 'stern'], + 'sterna': ['astern', 'enstar', 'stenar', 'sterna'], + 'sternad': ['stander', 'sternad'], + 'sternage': ['estrange', 'segreant', 'sergeant', 'sternage'], + 'sternal': ['saltern', 'starnel', 'sternal'], + 'sternalis': ['sternalis', 'trainless'], + 'sternite': ['insetter', 'interest', 'interset', 'sternite'], + 'sterno': ['nestor', 'sterno', 'stoner', 'strone', 'tensor'], + 'sternocostal': ['costosternal', 'sternocostal'], + 'sternovertebral': ['sternovertebral', 'vertebrosternal'], + 'stero': ['roset', 'rotse', 'soter', 'stero', 'store', 'torse'], + 'steroid': ['oestrid', 'steroid', 'storied'], + 'sterol': ['relost', 'reslot', 'rostel', 'sterol', 'torsel'], + 'stert': ['stert', 'stret', 'trest'], + 'sterve': ['revest', 'servet', 'sterve', 'verset', 'vester'], + 'stet': ['sett', 'stet', 'test'], + 'stevan': ['stevan', 'svante'], + 'stevel': ['stevel', 'svelte'], + 'stevia': ['itaves', 'stevia'], + 'stew': ['stew', 'west'], + 'stewart': ['stewart', 'swatter'], + 'stewed': ['stewed', 'wedset'], + 'stewy': ['stewy', 'westy'], + 'stey': ['stey', 'yest'], + 'sthenia': ['sethian', 'sthenia'], + 'stich': ['sitch', 'stchi', 'stich'], + 'stichid': ['distich', 'stichid'], + 'sticker': ['rickets', 'sticker'], + 'stickler': ['stickler', 'strickle'], + 'sticta': ['static', 'sticta'], + 'stife': ['feist', 'stife'], + 'stiffener': ['restiffen', 'stiffener'], + 'stifle': ['itself', 'stifle'], + 'stifler': ['slifter', 'stifler'], + 'stigmai': ['imagist', 'stigmai'], + 'stigmatical': ['stalagmitic', 'stigmatical'], + 'stilbene': ['nebelist', 'stilbene', 'tensible'], + 'stile': ['islet', 'istle', 'slite', 'stile'], + 'stileman': ['mentalis', 'smaltine', 'stileman'], + 'stillage': ['legalist', 'stillage'], + 'stiller': ['stiller', 'trellis'], + 'stillstand': ['standstill', 'stillstand'], + 'stilted': ['slitted', 'stilted'], + 'stilter': ['litster', 'slitter', 'stilter', 'testril'], + 'stilty': ['slitty', 'stilty'], + 'stim': ['mist', 'smit', 'stim'], + 'stime': ['metis', 'smite', 'stime', 'times'], + 'stimulancy': ['stimulancy', 'unmystical'], + 'stimy': ['misty', 'stimy'], + 'stine': ['inset', 'neist', 'snite', 'stein', 'stine', 'tsine'], + 'stinge': ['ingest', 'signet', 'stinge'], + 'stinger': ['resting', 'stinger'], + 'stinker': ['kirsten', 'kristen', 'stinker'], + 'stinkstone': ['knottiness', 'stinkstone'], + 'stinted': ['dentist', 'distent', 'stinted'], + 'stion': ['sinto', 'stion'], + 'stipa': ['piast', 'stipa', 'tapis'], + 'stipe': ['septi', 'spite', 'stipe'], + 'stipel': ['pistle', 'stipel'], + 'stippen': ['snippet', 'stippen'], + 'stipula': ['paulist', 'stipula'], + 'stir': ['rist', 'stir'], + 'stirk': ['skirt', 'stirk'], + 'stirp': ['spirt', 'sprit', 'stirp', 'strip'], + 'stitcher': ['restitch', 'stitcher'], + 'stiver': ['stiver', 'strive', 'verist'], + 'stoa': ['oast', 'stoa', 'taos'], + 'stoach': ['stoach', 'stocah'], + 'stoat': ['stoat', 'toast'], + 'stoater': ['retoast', 'rosetta', 'stoater', 'toaster'], + 'stocah': ['stoach', 'stocah'], + 'stoccata': ['staccato', 'stoccata'], + 'stocker': ['restock', 'stocker'], + 'stoep': ['estop', 'stoep', 'stope'], + 'stof': ['soft', 'stof'], + 'stog': ['stog', 'togs'], + 'stogie': ['egoist', 'stogie'], + 'stoic': ['ostic', 'sciot', 'stoic'], + 'stoically': ['callosity', 'stoically'], + 'stoker': ['stoker', 'stroke'], + 'stolae': ['lotase', 'osteal', 'solate', 'stolae', 'talose'], + 'stole': ['slote', 'stole'], + 'stoled': ['sloted', 'stoled'], + 'stolen': ['solent', 'stolen', 'telson'], + 'stoma': ['atmos', 'stoma', 'tomas'], + 'stomatode': ['mootstead', 'stomatode'], + 'stomatomy': ['mastotomy', 'stomatomy'], + 'stomatopoda': ['podostomata', 'stomatopoda'], + 'stomatopodous': ['podostomatous', 'stomatopodous'], + 'stomper': ['pomster', 'stomper'], + 'stone': ['onset', 'seton', 'steno', 'stone'], + 'stonebird': ['birdstone', 'stonebird'], + 'stonebreak': ['breakstone', 'stonebreak'], + 'stoned': ['doesnt', 'stoned'], + 'stonegall': ['gallstone', 'stonegall'], + 'stonehand': ['handstone', 'stonehand'], + 'stonehead': ['headstone', 'stonehead'], + 'stonen': ['sonnet', 'stonen', 'tenson'], + 'stoner': ['nestor', 'sterno', 'stoner', 'strone', 'tensor'], + 'stonewood': ['stonewood', 'woodstone'], + 'stong': ['stong', 'tongs'], + 'stonker': ['stonker', 'storken'], + 'stoof': ['foots', 'sfoot', 'stoof'], + 'stool': ['sotol', 'stool'], + 'stoon': ['snoot', 'stoon'], + 'stoop': ['spoot', 'stoop'], + 'stop': ['post', 'spot', 'stop', 'tops'], + 'stopback': ['backstop', 'stopback'], + 'stope': ['estop', 'stoep', 'stope'], + 'stoper': ['poster', 'presto', 'repost', 'respot', 'stoper'], + 'stoping': ['posting', 'stoping'], + 'stopless': ['postless', 'spotless', 'stopless'], + 'stoplessness': ['spotlessness', 'stoplessness'], + 'stoppeur': ['pteropus', 'stoppeur'], + 'storable': ['sortable', 'storable'], + 'storage': ['storage', 'tagsore'], + 'store': ['roset', 'rotse', 'soter', 'stero', 'store', 'torse'], + 'storeen': ['enstore', 'estrone', 'storeen', 'tornese'], + 'storeman': ['monaster', 'monstera', 'nearmost', 'storeman'], + 'storer': ['resort', 'roster', 'sorter', 'storer'], + 'storeship': ['posterish', 'prothesis', 'sophister', 'storeship', 'tephrosis'], + 'storesman': ['nosesmart', 'storesman'], + 'storge': ['groset', 'storge'], + 'storiate': ['astroite', 'ostraite', 'storiate'], + 'storied': ['oestrid', 'steroid', 'storied'], + 'storier': ['roister', 'storier'], + 'stork': ['stork', 'torsk'], + 'storken': ['stonker', 'storken'], + 'storm': ['storm', 'strom'], + 'stormwind': ['stormwind', 'windstorm'], + 'story': ['sorty', 'story', 'stroy'], + 'stot': ['stot', 'tost'], + 'stotter': ['stotter', 'stretto'], + 'stoun': ['notus', 'snout', 'stoun', 'tonus'], + 'stoup': ['spout', 'stoup'], + 'stour': ['roust', 'rusot', 'stour', 'sutor', 'torus'], + 'stouring': ['rousting', 'stouring'], + 'stoutly': ['stoutly', 'tylotus'], + 'stove': ['ovest', 'stove'], + 'stover': ['stover', 'strove'], + 'stow': ['sowt', 'stow', 'swot', 'wots'], + 'stowable': ['bestowal', 'stowable'], + 'stower': ['restow', 'stower', 'towser', 'worset'], + 'stra': ['sart', 'star', 'stra', 'tars', 'tsar'], + 'strad': ['darst', 'darts', 'strad'], + 'stradine': ['stradine', 'strained', 'tarnside'], + 'strae': ['aster', 'serta', 'stare', 'strae', 'tarse', 'teras'], + 'strafe': ['farset', 'faster', 'strafe'], + 'stragular': ['gastrular', 'stragular'], + 'straighten': ['shattering', 'straighten'], + 'straightener': ['restraighten', 'straightener'], + 'straightup': ['straightup', 'upstraight'], + 'straik': ['rastik', 'sarkit', 'straik'], + 'strain': ['instar', 'santir', 'strain'], + 'strained': ['stradine', 'strained', 'tarnside'], + 'strainer': ['restrain', 'strainer', 'transire'], + 'strainerman': ['strainerman', 'transmarine'], + 'straint': ['straint', 'transit', 'tristan'], + 'strait': ['artist', 'strait', 'strati'], + 'strake': ['skater', 'staker', 'strake', 'streak', 'tasker'], + 'straky': ['starky', 'straky'], + 'stram': ['smart', 'stram'], + 'strange': ['angster', 'garnets', 'nagster', 'strange'], + 'strangles': ['slangster', 'strangles'], + 'strap': ['spart', 'sprat', 'strap', 'traps'], + 'strapless': ['psaltress', 'strapless'], + 'strata': ['astart', 'strata'], + 'strategi': ['strategi', 'strigate'], + 'strath': ['strath', 'thrast'], + 'strati': ['artist', 'strait', 'strati'], + 'stratic': ['astrict', 'cartist', 'stratic'], + 'stratonic': ['narcotist', 'stratonic'], + 'stratonical': ['intracostal', 'stratonical'], + 'strave': ['starve', 'staver', 'strave', 'tavers', 'versta'], + 'straw': ['straw', 'swart', 'warst'], + 'strawy': ['strawy', 'swarty'], + 'stray': ['satyr', 'stary', 'stray', 'trasy'], + 'strayling': ['staringly', 'strayling'], + 'stre': ['rest', 'sert', 'stre'], + 'streak': ['skater', 'staker', 'strake', 'streak', 'tasker'], + 'streakily': ['satyrlike', 'streakily'], + 'stream': ['martes', 'master', 'remast', 'stream'], + 'streamer': ['masterer', 'restream', 'streamer'], + 'streamful': ['masterful', 'streamful'], + 'streamhead': ['headmaster', 'headstream', 'streamhead'], + 'streaming': ['germanist', 'streaming'], + 'streamless': ['masterless', 'streamless'], + 'streamlike': ['masterlike', 'streamlike'], + 'streamline': ['eternalism', 'streamline'], + 'streamling': ['masterling', 'streamling'], + 'streamside': ['mediatress', 'streamside'], + 'streamwort': ['masterwort', 'streamwort'], + 'streamy': ['mastery', 'streamy'], + 'stree': ['ester', + 'estre', + 'reest', + 'reset', + 'steer', + 'stere', + 'stree', + 'terse', + 'tsere'], + 'streek': ['streek', 'streke'], + 'streel': ['lester', 'selter', 'streel'], + 'streen': ['ernest', 'nester', 'resent', 'streen'], + 'streep': ['pester', 'preset', 'restep', 'streep'], + 'street': ['retest', 'setter', 'street', 'tester'], + 'streetcar': ['scatterer', 'streetcar'], + 'streke': ['streek', 'streke'], + 'strelitz': ['strelitz', 'streltzi'], + 'streltzi': ['strelitz', 'streltzi'], + 'stremma': ['stammer', 'stremma'], + 'strengthener': ['restrengthen', 'strengthener'], + 'strepen': ['penster', 'present', 'serpent', 'strepen'], + 'strepera': ['pasterer', 'strepera'], + 'strepor': ['sporter', 'strepor'], + 'strepsinema': ['esperantism', 'strepsinema'], + 'streptothricosis': ['streptothricosis', 'streptotrichosis'], + 'streptotrichosis': ['streptothricosis', 'streptotrichosis'], + 'stresser': ['restress', 'stresser'], + 'stret': ['stert', 'stret', 'trest'], + 'stretcher': ['restretch', 'stretcher'], + 'stretcherman': ['stretcherman', 'trenchmaster'], + 'stretto': ['stotter', 'stretto'], + 'strew': ['strew', 'trews', 'wrest'], + 'strewer': ['strewer', 'wrester'], + 'strey': ['resty', 'strey'], + 'streyne': ['streyne', 'styrene', 'yestern'], + 'stria': ['arist', + 'astir', + 'sitar', + 'stair', + 'stria', + 'tarsi', + 'tisar', + 'trias'], + 'striae': ['satire', 'striae'], + 'strial': ['latris', 'strial'], + 'striatal': ['altarist', 'striatal'], + 'striate': ['artiste', 'striate'], + 'striated': ['distater', 'striated'], + 'strich': ['christ', 'strich'], + 'strickle': ['stickler', 'strickle'], + 'stride': ['driest', 'stride'], + 'strife': ['fister', 'resift', 'sifter', 'strife'], + 'strig': ['grist', 'grits', 'strig'], + 'striga': ['gratis', 'striga'], + 'strigae': ['seagirt', 'strigae'], + 'strigate': ['strategi', 'strigate'], + 'striges': ['striges', 'tigress'], + 'strigulose': ['sortilegus', 'strigulose'], + 'strike': ['skiter', 'strike'], + 'striker': ['skirret', 'skirter', 'striker'], + 'striking': ['skirting', 'striking'], + 'strikingly': ['skirtingly', 'strikingly'], + 'stringer': ['restring', 'ringster', 'stringer'], + 'strinkle': ['sklinter', 'strinkle'], + 'striola': ['aristol', 'oralist', 'ortalis', 'striola'], + 'striolae': ['soterial', 'striolae'], + 'strip': ['spirt', 'sprit', 'stirp', 'strip'], + 'stripe': ['priest', 'pteris', 'sprite', 'stripe'], + 'stripeless': ['priestless', 'stripeless'], + 'striper': ['restrip', 'striper'], + 'striplet': ['splitter', 'striplet'], + 'strippit': ['strippit', 'trippist'], + 'strit': ['strit', 'trist'], + 'strive': ['stiver', 'strive', 'verist'], + 'stroam': ['stroam', 'stroma'], + 'strobila': ['laborist', 'strobila'], + 'strobilae': ['sobralite', 'strobilae'], + 'strobilate': ['brasiletto', 'strobilate'], + 'strode': ['sorted', 'strode'], + 'stroke': ['stoker', 'stroke'], + 'strom': ['storm', 'strom'], + 'stroma': ['stroam', 'stroma'], + 'stromatic': ['microstat', 'stromatic'], + 'strone': ['nestor', 'sterno', 'stoner', 'strone', 'tensor'], + 'stronghead': ['headstrong', 'stronghead'], + 'strontian': ['strontian', 'trisonant'], + 'strontianite': ['interstation', 'strontianite'], + 'strop': ['sport', 'strop'], + 'strophaic': ['actorship', 'strophaic'], + 'strophiolate': ['strophiolate', 'theatropolis'], + 'strophomena': ['nephrostoma', 'strophomena'], + 'strounge': ['strounge', 'sturgeon'], + 'stroup': ['sprout', 'stroup', 'stupor'], + 'strove': ['stover', 'strove'], + 'strow': ['strow', 'worst'], + 'stroy': ['sorty', 'story', 'stroy'], + 'strub': ['burst', 'strub'], + 'struck': ['struck', 'trucks'], + 'strue': ['serut', 'strue', 'turse', 'uster'], + 'strumae': ['staumer', 'strumae'], + 'strut': ['strut', 'sturt', 'trust'], + 'struth': ['struth', 'thrust'], + 'struthian': ['struthian', 'unathirst'], + 'stu': ['stu', 'ust'], + 'stuart': ['astrut', 'rattus', 'stuart'], + 'stub': ['bust', 'stub'], + 'stuber': ['berust', 'buster', 'stuber'], + 'stud': ['dust', 'stud'], + 'studdie': ['studdie', 'studied'], + 'student': ['student', 'stunted'], + 'studia': ['aditus', 'studia'], + 'studied': ['studdie', 'studied'], + 'study': ['dusty', 'study'], + 'stue': ['stue', 'suet'], + 'stuffer': ['restuff', 'stuffer'], + 'stug': ['gust', 'stug'], + 'stuiver': ['revuist', 'stuiver'], + 'stum': ['must', 'smut', 'stum'], + 'stumer': ['muster', 'sertum', 'stumer'], + 'stumper': ['stumper', 'sumpter'], + 'stun': ['stun', 'sunt', 'tsun'], + 'stunner': ['stunner', 'unstern'], + 'stunted': ['student', 'stunted'], + 'stunter': ['entrust', 'stunter', 'trusten'], + 'stupa': ['sputa', 'staup', 'stupa'], + 'stupe': ['setup', 'stupe', 'upset'], + 'stupor': ['sprout', 'stroup', 'stupor'], + 'stuprate': ['stuprate', 'upstater'], + 'stupulose': ['pustulose', 'stupulose'], + 'sturdiness': ['sturdiness', 'undistress'], + 'sturgeon': ['strounge', 'sturgeon'], + 'sturine': ['intruse', 'sturine'], + 'sturionine': ['reunionist', 'sturionine'], + 'sturmian': ['naturism', 'sturmian', 'turanism'], + 'sturnidae': ['disnature', 'sturnidae', 'truandise'], + 'sturninae': ['neustrian', 'saturnine', 'sturninae'], + 'sturnus': ['sturnus', 'untruss'], + 'sturt': ['strut', 'sturt', 'trust'], + 'sturtin': ['intrust', 'sturtin'], + 'stut': ['stut', 'tuts'], + 'stutter': ['stutter', 'tutster'], + 'styan': ['nasty', 'styan', 'tansy'], + 'styca': ['stacy', 'styca'], + 'stycerin': ['nycteris', 'stycerin'], + 'stygial': ['stagily', 'stygial'], + 'stylate': ['stately', 'stylate'], + 'styledom': ['modestly', 'styledom'], + 'stylite': ['stylite', 'testily'], + 'styloid': ['odylist', 'styloid'], + 'stylometer': ['metrostyle', 'stylometer'], + 'stylopidae': ['ideoplasty', 'stylopidae'], + 'styphelia': ['physalite', 'styphelia'], + 'styrene': ['streyne', 'styrene', 'yestern'], + 'styrol': ['sortly', 'styrol'], + 'stythe': ['stythe', 'tethys'], + 'styx': ['styx', 'xyst'], + 'suability': ['suability', 'usability'], + 'suable': ['suable', 'usable'], + 'sualocin': ['sualocin', 'unsocial'], + 'suant': ['staun', 'suant'], + 'suasible': ['basileus', 'issuable', 'suasible'], + 'suasion': ['sanious', 'suasion'], + 'suasory': ['ossuary', 'suasory'], + 'suave': ['sauve', 'suave'], + 'sub': ['bus', 'sub'], + 'subah': ['shuba', 'subah'], + 'subalpine': ['subalpine', 'unspiable'], + 'subanal': ['balanus', 'nabalus', 'subanal'], + 'subcaecal': ['accusable', 'subcaecal'], + 'subcantor': ['obscurant', 'subcantor'], + 'subcapsular': ['subcapsular', 'subscapular'], + 'subcenter': ['rubescent', 'subcenter'], + 'subchela': ['chasuble', 'subchela'], + 'subcool': ['colobus', 'subcool'], + 'subcortical': ['scorbutical', 'subcortical'], + 'subcortically': ['scorbutically', 'subcortically'], + 'subdealer': ['subdealer', 'subleader'], + 'subdean': ['subdean', 'unbased'], + 'subdeltaic': ['discutable', 'subdeltaic', 'subdialect'], + 'subdialect': ['discutable', 'subdeltaic', 'subdialect'], + 'subdie': ['busied', 'subdie'], + 'suber': ['burse', 'rebus', 'suber'], + 'subessential': ['subessential', 'suitableness'], + 'subherd': ['brushed', 'subherd'], + 'subhero': ['herbous', 'subhero'], + 'subhuman': ['subhuman', 'unambush'], + 'subimago': ['bigamous', 'subimago'], + 'sublanate': ['sublanate', 'unsatable'], + 'sublate': ['balteus', 'sublate'], + 'sublative': ['sublative', 'vestibula'], + 'subleader': ['subdealer', 'subleader'], + 'sublet': ['bustle', 'sublet', 'subtle'], + 'sublinear': ['insurable', 'sublinear'], + 'sublumbar': ['sublumbar', 'subumbral'], + 'submaid': ['misdaub', 'submaid'], + 'subman': ['busman', 'subman'], + 'submarine': ['semiurban', 'submarine'], + 'subnarcotic': ['obscurantic', 'subnarcotic'], + 'subnitrate': ['subnitrate', 'subtertian'], + 'subnote': ['subnote', 'subtone', 'unbesot'], + 'suboval': ['suboval', 'subvola'], + 'subpeltate': ['subpeltate', 'upsettable'], + 'subplat': ['subplat', 'upblast'], + 'subra': ['abrus', 'bursa', 'subra'], + 'subscapular': ['subcapsular', 'subscapular'], + 'subserve': ['subserve', 'subverse'], + 'subsider': ['disburse', 'subsider'], + 'substernal': ['substernal', 'turbanless'], + 'substraction': ['obscurantist', 'substraction'], + 'subtack': ['sackbut', 'subtack'], + 'subterraneal': ['subterraneal', 'unarrestable'], + 'subtertian': ['subnitrate', 'subtertian'], + 'subtle': ['bustle', 'sublet', 'subtle'], + 'subtone': ['subnote', 'subtone', 'unbesot'], + 'subtread': ['daubster', 'subtread'], + 'subtutor': ['outburst', 'subtutor'], + 'subulate': ['baetulus', 'subulate'], + 'subumbral': ['sublumbar', 'subumbral'], + 'subverse': ['subserve', 'subverse'], + 'subvola': ['suboval', 'subvola'], + 'succade': ['accused', 'succade'], + 'succeeder': ['resucceed', 'succeeder'], + 'succin': ['cnicus', 'succin'], + 'succinate': ['encaustic', 'succinate'], + 'succor': ['crocus', 'succor'], + 'such': ['cush', 'such'], + 'suck': ['cusk', 'suck'], + 'sucker': ['resuck', 'sucker'], + 'suckling': ['lungsick', 'suckling'], + 'suclat': ['scutal', 'suclat'], + 'sucramine': ['muscarine', 'sucramine'], + 'sucre': ['cruse', 'curse', 'sucre'], + 'suction': ['cotinus', 'suction', 'unstoic'], + 'suctional': ['suctional', 'sulcation', 'unstoical'], + 'suctoria': ['cotarius', 'octarius', 'suctoria'], + 'suctorial': ['ocularist', 'suctorial'], + 'sud': ['sud', 'uds'], + 'sudamen': ['medusan', 'sudamen'], + 'sudan': ['sudan', 'unsad'], + 'sudanese': ['danseuse', 'sudanese'], + 'sudani': ['sudani', 'unsaid'], + 'sudation': ['adustion', 'sudation'], + 'sudic': ['scudi', 'sudic'], + 'sudra': ['rudas', 'sudra'], + 'sue': ['sue', 'use'], + 'suer': ['ruse', 'suer', 'sure', 'user'], + 'suet': ['stue', 'suet'], + 'sufferer': ['resuffer', 'sufferer'], + 'sufflate': ['feastful', 'sufflate'], + 'suffocate': ['offuscate', 'suffocate'], + 'suffocation': ['offuscation', 'suffocation'], + 'sugamo': ['amusgo', 'sugamo'], + 'sugan': ['agnus', 'angus', 'sugan'], + 'sugar': ['argus', 'sugar'], + 'sugared': ['desugar', 'sugared'], + 'sugarlike': ['arguslike', 'sugarlike'], + 'suggester': ['resuggest', 'suggester'], + 'suggestionism': ['missuggestion', 'suggestionism'], + 'sugh': ['gush', 'shug', 'sugh'], + 'suidae': ['asideu', 'suidae'], + 'suimate': ['metusia', 'suimate', 'timaeus'], + 'suina': ['ianus', 'suina'], + 'suint': ['sintu', 'suint'], + 'suiones': ['sinuose', 'suiones'], + 'suist': ['situs', 'suist'], + 'suitable': ['sabulite', 'suitable'], + 'suitableness': ['subessential', 'suitableness'], + 'suitor': ['suitor', 'tursio'], + 'sula': ['saul', 'sula'], + 'sulcal': ['callus', 'sulcal'], + 'sulcar': ['cursal', 'sulcar'], + 'sulcation': ['suctional', 'sulcation', 'unstoical'], + 'suld': ['slud', 'suld'], + 'sulfamide': ['feudalism', 'sulfamide'], + 'sulfonium': ['fulminous', 'sulfonium'], + 'sulfuret': ['frustule', 'sulfuret'], + 'sulk': ['lusk', 'sulk'], + 'sulka': ['klaus', 'lukas', 'sulka'], + 'sulky': ['lusky', 'sulky'], + 'sulphinide': ['delphinius', 'sulphinide'], + 'sulphohydrate': ['hydrosulphate', 'sulphohydrate'], + 'sulphoproteid': ['protosulphide', 'sulphoproteid'], + 'sulphurea': ['elaphurus', 'sulphurea'], + 'sultan': ['sultan', 'unsalt'], + 'sultane': ['sultane', 'unslate'], + 'sultanian': ['annualist', 'sultanian'], + 'sultanin': ['insulant', 'sultanin'], + 'sultone': ['lentous', 'sultone'], + 'sultry': ['rustly', 'sultry'], + 'sum': ['mus', 'sum'], + 'sumac': ['camus', 'musca', 'scaum', 'sumac'], + 'sumak': ['kusam', 'sumak'], + 'sumatra': ['artamus', 'sumatra'], + 'sumerian': ['aneurism', 'arsenium', 'sumerian'], + 'sumitro': ['sumitro', 'tourism'], + 'summit': ['mutism', 'summit'], + 'summonable': ['somnambule', 'summonable'], + 'summoner': ['resummon', 'summoner'], + 'sumo': ['soum', 'sumo'], + 'sumpit': ['misput', 'sumpit'], + 'sumpitan': ['putanism', 'sumpitan'], + 'sumpter': ['stumper', 'sumpter'], + 'sumptuary': ['sputumary', 'sumptuary'], + 'sumptuous': ['sputumous', 'sumptuous'], + 'sunbeamed': ['sunbeamed', 'unembased'], + 'sundar': ['nardus', 'sundar', 'sundra'], + 'sundek': ['dusken', 'sundek'], + 'sundra': ['nardus', 'sundar', 'sundra'], + 'sung': ['snug', 'sung'], + 'sunil': ['linus', 'sunil'], + 'sunk': ['skun', 'sunk'], + 'sunlighted': ['sunlighted', 'unslighted'], + 'sunlit': ['insult', 'sunlit', 'unlist', 'unslit'], + 'sunni': ['sunni', 'unsin'], + 'sunray': ['sunray', 'surnay', 'synura'], + 'sunrise': ['russine', 'serinus', 'sunrise'], + 'sunshade': ['sunshade', 'unsashed'], + 'sunt': ['stun', 'sunt', 'tsun'], + 'sunup': ['sunup', 'upsun'], + 'sunweed': ['sunweed', 'unsewed'], + 'suomic': ['musico', 'suomic'], + 'sup': ['pus', 'sup'], + 'supa': ['apus', 'supa', 'upas'], + 'super': ['purse', 'resup', 'sprue', 'super'], + 'superable': ['perusable', 'superable'], + 'supercarpal': ['prescapular', 'supercarpal'], + 'superclaim': ['premusical', 'superclaim'], + 'supercontrol': ['preconsultor', 'supercontrol'], + 'supercool': ['escropulo', 'supercool'], + 'superheater': ['resuperheat', 'superheater'], + 'superideal': ['serpulidae', 'superideal'], + 'superintender': ['superintender', 'unenterprised'], + 'superline': ['serpuline', 'superline'], + 'supermoisten': ['sempiternous', 'supermoisten'], + 'supernacular': ['supernacular', 'supranuclear'], + 'supernal': ['purslane', 'serpulan', 'supernal'], + 'superoanterior': ['anterosuperior', 'superoanterior'], + 'superoposterior': ['posterosuperior', 'superoposterior'], + 'superpose': ['resuppose', 'superpose'], + 'superposition': ['resupposition', 'superposition'], + 'supersaint': ['presustain', 'puritaness', 'supersaint'], + 'supersalt': ['pertussal', 'supersalt'], + 'superservice': ['repercussive', 'superservice'], + 'supersonic': ['croupiness', 'percussion', 'supersonic'], + 'supertare': ['repasture', 'supertare'], + 'supertension': ['serpentinous', 'supertension'], + 'supertotal': ['supertotal', 'tetraplous'], + 'supertrain': ['rupestrian', 'supertrain'], + 'supinator': ['rainspout', 'supinator'], + 'supine': ['puisne', 'supine'], + 'supper': ['supper', 'uppers'], + 'supple': ['peplus', 'supple'], + 'suppletory': ['polypterus', 'suppletory'], + 'supplier': ['periplus', 'supplier'], + 'supporter': ['resupport', 'supporter'], + 'suppresser': ['resuppress', 'suppresser'], + 'suprahyoid': ['hyporadius', 'suprahyoid'], + 'supranuclear': ['supernacular', 'supranuclear'], + 'supreme': ['presume', 'supreme'], + 'sur': ['rus', 'sur', 'urs'], + 'sura': ['rusa', 'saur', 'sura', 'ursa', 'usar'], + 'surah': ['ashur', 'surah'], + 'surahi': ['shauri', 'surahi'], + 'sural': ['larus', 'sural', 'ursal'], + 'surat': ['astur', 'surat', 'sutra'], + 'surbase': ['rubasse', 'surbase'], + 'surbate': ['bursate', 'surbate'], + 'surcrue': ['crureus', 'surcrue'], + 'sure': ['ruse', 'suer', 'sure', 'user'], + 'suresh': ['rhesus', 'suresh'], + 'surette': ['surette', 'trustee'], + 'surge': ['grues', 'surge'], + 'surgent': ['gunster', 'surgent'], + 'surgeonship': ['springhouse', 'surgeonship'], + 'surgy': ['gyrus', 'surgy'], + 'suriana': ['saurian', 'suriana'], + 'surinam': ['surinam', 'uranism'], + 'surma': ['musar', 'ramus', 'rusma', 'surma'], + 'surmisant': ['saturnism', 'surmisant'], + 'surmise': ['misuser', 'surmise'], + 'surnap': ['surnap', 'unspar'], + 'surnay': ['sunray', 'surnay', 'synura'], + 'surprisement': ['surprisement', 'trumperiness'], + 'surrenderer': ['resurrender', 'surrenderer'], + 'surrogate': ['surrogate', 'urogaster'], + 'surrounder': ['resurround', 'surrounder'], + 'surya': ['saury', 'surya'], + 'sus': ['ssu', 'sus'], + 'susan': ['nasus', 'susan'], + 'suscept': ['suscept', 'suspect'], + 'susceptible': ['susceptible', 'suspectible'], + 'susceptor': ['spectrous', 'susceptor', 'suspector'], + 'susie': ['issue', 'susie'], + 'suspect': ['suscept', 'suspect'], + 'suspecter': ['resuspect', 'suspecter'], + 'suspectible': ['susceptible', 'suspectible'], + 'suspector': ['spectrous', 'susceptor', 'suspector'], + 'suspender': ['resuspend', 'suspender', 'unpressed'], + 'sustain': ['issuant', 'sustain'], + 'suther': ['reshut', 'suther', 'thurse', 'tusher'], + 'sutler': ['luster', 'result', 'rustle', 'sutler', 'ulster'], + 'suto': ['otus', 'oust', 'suto'], + 'sutor': ['roust', 'rusot', 'stour', 'sutor', 'torus'], + 'sutorian': ['staurion', 'sutorian'], + 'sutorious': ['sutorious', 'ustorious'], + 'sutra': ['astur', 'surat', 'sutra'], + 'suttin': ['suttin', 'tunist'], + 'suture': ['suture', 'uterus'], + 'svante': ['stevan', 'svante'], + 'svelte': ['stevel', 'svelte'], + 'swa': ['saw', 'swa', 'was'], + 'swage': ['swage', 'wages'], + 'swain': ['siwan', 'swain'], + 'swale': ['swale', 'sweal', 'wasel'], + 'swaler': ['swaler', 'warsel', 'warsle'], + 'swallet': ['setwall', 'swallet'], + 'swallo': ['sallow', 'swallo'], + 'swallower': ['reswallow', 'swallower'], + 'swami': ['aswim', 'swami'], + 'swan': ['sawn', 'snaw', 'swan'], + 'swap': ['swap', 'wasp'], + 'swarbie': ['barwise', 'swarbie'], + 'sware': ['resaw', 'sawer', 'seraw', 'sware', 'swear', 'warse'], + 'swarmer': ['reswarm', 'swarmer'], + 'swart': ['straw', 'swart', 'warst'], + 'swarty': ['strawy', 'swarty'], + 'swarve': ['swarve', 'swaver'], + 'swat': ['sawt', 'staw', 'swat', 'taws', 'twas', 'wast'], + 'swath': ['swath', 'whats'], + 'swathe': ['swathe', 'sweath'], + 'swati': ['swati', 'waist'], + 'swatter': ['stewart', 'swatter'], + 'swaver': ['swarve', 'swaver'], + 'sway': ['sway', 'ways', 'yaws'], + 'swayer': ['sawyer', 'swayer'], + 'sweal': ['swale', 'sweal', 'wasel'], + 'swear': ['resaw', 'sawer', 'seraw', 'sware', 'swear', 'warse'], + 'swearer': ['resawer', 'reswear', 'swearer'], + 'sweat': ['awest', 'sweat', 'tawse', 'waste'], + 'sweater': ['resweat', 'sweater'], + 'sweatful': ['sweatful', 'wasteful'], + 'sweath': ['swathe', 'sweath'], + 'sweatily': ['sweatily', 'tileways'], + 'sweatless': ['sweatless', 'wasteless'], + 'sweatproof': ['sweatproof', 'wasteproof'], + 'swede': ['sewed', 'swede'], + 'sweep': ['sweep', 'weeps'], + 'sweeper': ['resweep', 'sweeper'], + 'sweer': ['resew', 'sewer', 'sweer'], + 'sweered': ['sewered', 'sweered'], + 'sweet': ['sweet', 'weste'], + 'sweetbread': ['breastweed', 'sweetbread'], + 'sweller': ['reswell', 'sweller'], + 'swelter': ['swelter', 'wrestle'], + 'swep': ['spew', 'swep'], + 'swertia': ['swertia', 'waister'], + 'swile': ['lewis', 'swile'], + 'swiller': ['reswill', 'swiller'], + 'swindle': ['swindle', 'windles'], + 'swine': ['sinew', 'swine', 'wisen'], + 'swinestone': ['sonnetwise', 'swinestone'], + 'swiney': ['sinewy', 'swiney'], + 'swingback': ['backswing', 'swingback'], + 'swinge': ['sewing', 'swinge'], + 'swingle': ['slewing', 'swingle'], + 'swingtree': ['swingtree', 'westering'], + 'swipy': ['swipy', 'wispy'], + 'swire': ['swire', 'wiser'], + 'swith': ['swith', 'whist', 'whits', 'wisht'], + 'swithe': ['swithe', 'whites'], + 'swither': ['swither', 'whister', 'withers'], + 'swoon': ['swoon', 'woons'], + 'swordman': ['sandworm', 'swordman', 'wordsman'], + 'swordmanship': ['swordmanship', 'wordsmanship'], + 'swore': ['owser', 'resow', 'serow', 'sower', 'swore', 'worse'], + 'swot': ['sowt', 'stow', 'swot', 'wots'], + 'sybarite': ['bestiary', 'sybarite'], + 'sybil': ['sibyl', 'sybil'], + 'syce': ['scye', 'syce'], + 'sycones': ['coyness', 'sycones'], + 'syconoid': ['syconoid', 'syodicon'], + 'sye': ['sey', 'sye', 'yes'], + 'syenitic': ['cytisine', 'syenitic'], + 'syllabi': ['sibylla', 'syllabi'], + 'syllable': ['sellably', 'syllable'], + 'sylva': ['salvy', 'sylva'], + 'sylvae': ['slavey', 'sylvae'], + 'sylvine': ['snively', 'sylvine'], + 'sylvite': ['levyist', 'sylvite'], + 'symbol': ['blosmy', 'symbol'], + 'symmetric': ['mycterism', 'symmetric'], + 'sympathy': ['sympathy', 'symphyta'], + 'symphonic': ['cyphonism', 'symphonic'], + 'symphyta': ['sympathy', 'symphyta'], + 'symposion': ['spoonyism', 'symposion'], + 'synapse': ['synapse', 'yapness'], + 'synaptera': ['peasantry', 'synaptera'], + 'syncopator': ['antroscopy', 'syncopator'], + 'syndicate': ['asyndetic', 'cystidean', 'syndicate'], + 'synedral': ['lysander', 'synedral'], + 'syngamic': ['gymnasic', 'syngamic'], + 'syngenic': ['ensigncy', 'syngenic'], + 'synura': ['sunray', 'surnay', 'synura'], + 'syodicon': ['syconoid', 'syodicon'], + 'sypher': ['sphery', 'sypher'], + 'syphiloid': ['hypsiloid', 'syphiloid'], + 'syrian': ['siryan', 'syrian'], + 'syringa': ['signary', 'syringa'], + 'syringeful': ['refusingly', 'syringeful'], + 'syrup': ['pursy', 'pyrus', 'syrup'], + 'system': ['mystes', 'system'], + 'systole': ['systole', 'toyless'], + 'ta': ['at', 'ta'], + 'taa': ['ata', 'taa'], + 'taal': ['lata', 'taal', 'tala'], + 'taar': ['rata', 'taar', 'tara'], + 'tab': ['bat', 'tab'], + 'tabanus': ['sabanut', 'sabutan', 'tabanus'], + 'tabaret': ['baretta', 'rabatte', 'tabaret'], + 'tabber': ['barbet', 'rabbet', 'tabber'], + 'tabella': ['ballate', 'tabella'], + 'tabes': ['baste', 'beast', 'tabes'], + 'tabet': ['betta', 'tabet'], + 'tabinet': ['bettina', 'tabinet', 'tibetan'], + 'tabira': ['arabit', 'tabira'], + 'tabitha': ['habitat', 'tabitha'], + 'tabitude': ['dubitate', 'tabitude'], + 'table': ['batel', 'blate', 'bleat', 'table'], + 'tabled': ['dablet', 'tabled'], + 'tablemaker': ['marketable', 'tablemaker'], + 'tabler': ['albert', 'balter', 'labret', 'tabler'], + 'tables': ['ablest', 'stable', 'tables'], + 'tablet': ['battel', 'battle', 'tablet'], + 'tabletary': ['tabletary', 'treatably'], + 'tabling': ['batling', 'tabling'], + 'tabophobia': ['batophobia', 'tabophobia'], + 'tabor': ['abort', 'tabor'], + 'taborer': ['arboret', 'roberta', 'taborer'], + 'taboret': ['abettor', 'taboret'], + 'taborin': ['abortin', 'taborin'], + 'tabour': ['outbar', 'rubato', 'tabour'], + 'tabouret': ['obturate', 'tabouret'], + 'tabret': ['batter', 'bertat', 'tabret', 'tarbet'], + 'tabu': ['abut', 'tabu', 'tuba'], + 'tabula': ['ablaut', 'tabula'], + 'tabulare': ['bataleur', 'tabulare'], + 'tabule': ['batule', 'betula', 'tabule'], + 'taccaceae': ['cactaceae', 'taccaceae'], + 'taccaceous': ['cactaceous', 'taccaceous'], + 'tach': ['chat', 'tach'], + 'tache': ['cheat', 'tache', 'teach', 'theca'], + 'tacheless': ['tacheless', 'teachless'], + 'tachina': ['ithacan', 'tachina'], + 'tachinidae': ['anthicidae', 'tachinidae'], + 'tachograph': ['cathograph', 'tachograph'], + 'tacit': ['attic', 'catti', 'tacit'], + 'tacitly': ['cattily', 'tacitly'], + 'tacitness': ['cattiness', 'tacitness'], + 'taciturn': ['taciturn', 'urticant'], + 'tacker': ['racket', 'retack', 'tacker'], + 'tacksman': ['stackman', 'tacksman'], + 'taconian': ['catonian', 'taconian'], + 'taconic': ['cantico', 'catonic', 'taconic'], + 'tacso': ['ascot', 'coast', 'costa', 'tacso', 'tasco'], + 'tacsonia': ['acontias', 'tacsonia'], + 'tactile': ['lattice', 'tactile'], + 'tade': ['adet', 'date', 'tade', 'tead', 'teda'], + 'tadpole': ['platode', 'tadpole'], + 'tae': ['ate', 'eat', 'eta', 'tae', 'tea'], + 'tael': ['atle', 'laet', 'late', 'leat', 'tael', 'tale', 'teal'], + 'taen': ['ante', 'aten', 'etna', 'nate', 'neat', 'taen', 'tane', 'tean'], + 'taenia': ['aetian', 'antiae', 'taenia'], + 'taeniada': ['anatidae', 'taeniada'], + 'taenial': ['laniate', 'natalie', 'taenial'], + 'taenian': ['ananite', 'anatine', 'taenian'], + 'taenicide': ['deciatine', 'diacetine', 'taenicide', 'teniacide'], + 'taeniform': ['forminate', 'fremontia', 'taeniform'], + 'taenioid': ['ideation', 'iodinate', 'taenioid'], + 'taetsia': ['isatate', 'satiate', 'taetsia'], + 'tag': ['gat', 'tag'], + 'tagetes': ['gestate', 'tagetes'], + 'tagetol': ['lagetto', 'tagetol'], + 'tagged': ['gadget', 'tagged'], + 'tagger': ['garget', 'tagger'], + 'tagilite': ['litigate', 'tagilite'], + 'tagish': ['ghaist', 'tagish'], + 'taglike': ['glaiket', 'taglike'], + 'tagrag': ['ragtag', 'tagrag'], + 'tagsore': ['storage', 'tagsore'], + 'taheen': ['ethane', 'taheen'], + 'tahil': ['ihlat', 'tahil'], + 'tahin': ['ahint', 'hiant', 'tahin'], + 'tahr': ['hart', 'rath', 'tahr', 'thar', 'trah'], + 'tahsil': ['latish', 'tahsil'], + 'tahsin': ['snaith', 'tahsin'], + 'tai': ['ait', 'ati', 'ita', 'tai'], + 'taich': ['aitch', 'chait', 'chati', 'chita', 'taich', 'tchai'], + 'taigle': ['aiglet', 'ligate', 'taigle', 'tailge'], + 'tail': ['alit', 'tail', 'tali'], + 'tailage': ['agalite', 'tailage', 'taliage'], + 'tailboard': ['broadtail', 'tailboard'], + 'tailed': ['detail', 'dietal', 'dilate', 'edital', 'tailed'], + 'tailer': ['lirate', 'retail', 'retial', 'tailer'], + 'tailet': ['latite', 'tailet', 'tailte', 'talite'], + 'tailge': ['aiglet', 'ligate', 'taigle', 'tailge'], + 'tailing': ['gitalin', 'tailing'], + 'taille': ['taille', 'telial'], + 'tailoring': ['gratiolin', 'largition', 'tailoring'], + 'tailorman': ['antimoral', 'tailorman'], + 'tailory': ['orality', 'tailory'], + 'tailpin': ['pintail', 'tailpin'], + 'tailsman': ['staminal', 'tailsman', 'talisman'], + 'tailte': ['latite', 'tailet', 'tailte', 'talite'], + 'taily': ['laity', 'taily'], + 'taimen': ['etamin', 'inmate', 'taimen', 'tamein'], + 'tain': ['aint', 'anti', 'tain', 'tina'], + 'tainan': ['naiant', 'tainan'], + 'taino': ['niota', 'taino'], + 'taint': ['taint', 'tanti', 'tinta', 'titan'], + 'tainture': ['tainture', 'unattire'], + 'taipan': ['aptian', 'patina', 'taipan'], + 'taipo': ['patio', 'taipo', 'topia'], + 'tairge': ['gaiter', 'tairge', 'triage'], + 'tairn': ['riant', 'tairn', 'tarin', 'train'], + 'taise': ['saite', 'taise'], + 'taistrel': ['starlite', 'taistrel'], + 'taistril': ['taistril', 'trialist'], + 'taivers': ['staiver', 'taivers'], + 'taj': ['jat', 'taj'], + 'tajik': ['jatki', 'tajik'], + 'takar': ['katar', 'takar'], + 'take': ['kate', 'keta', 'take', 'teak'], + 'takedown': ['downtake', 'takedown'], + 'taker': ['kerat', 'taker'], + 'takin': ['kitan', 'takin'], + 'takings': ['gitksan', 'skating', 'takings'], + 'taky': ['katy', 'kyat', 'taky'], + 'tal': ['alt', 'lat', 'tal'], + 'tala': ['lata', 'taal', 'tala'], + 'talapoin': ['palation', 'talapoin'], + 'talar': ['altar', 'artal', 'ratal', 'talar'], + 'talari': ['altair', 'atrail', 'atrial', 'lariat', 'latria', 'talari'], + 'talc': ['clat', 'talc'], + 'talcer': ['carlet', 'cartel', 'claret', 'rectal', 'talcer'], + 'talcher': ['clethra', 'latcher', 'ratchel', 'relatch', 'talcher', 'trachle'], + 'talcoid': ['cotidal', 'lactoid', 'talcoid'], + 'talcose': ['alecost', 'lactose', 'scotale', 'talcose'], + 'talcous': ['costula', 'locusta', 'talcous'], + 'tald': ['dalt', 'tald'], + 'tale': ['atle', 'laet', 'late', 'leat', 'tael', 'tale', 'teal'], + 'taled': ['adlet', 'dealt', 'delta', 'lated', 'taled'], + 'talent': ['latent', 'latten', 'nattle', 'talent', 'tantle'], + 'taler': ['alert', 'alter', 'artel', 'later', 'ratel', 'taler', 'telar'], + 'tales': ['least', 'setal', 'slate', 'stale', 'steal', 'stela', 'tales'], + 'tali': ['alit', 'tail', 'tali'], + 'taliage': ['agalite', 'tailage', 'taliage'], + 'taligrade': ['taligrade', 'tragedial'], + 'talinum': ['multani', 'talinum'], + 'talion': ['italon', 'lation', 'talion'], + 'taliped': ['plaited', 'taliped'], + 'talipedic': ['talipedic', 'talpicide'], + 'talipes': ['aliptes', 'pastile', 'talipes'], + 'talipot': ['ptilota', 'talipot', 'toptail'], + 'talis': ['alist', 'litas', 'slait', 'talis'], + 'talisman': ['staminal', 'tailsman', 'talisman'], + 'talite': ['latite', 'tailet', 'tailte', 'talite'], + 'talker': ['kartel', 'retalk', 'talker'], + 'tallage': ['gallate', 'tallage'], + 'tallero': ['reallot', 'rotella', 'tallero'], + 'talles': ['sallet', 'stella', 'talles'], + 'talliage': ['allagite', 'alligate', 'talliage'], + 'tallier': ['literal', 'tallier'], + 'tallyho': ['loathly', 'tallyho'], + 'talon': ['notal', 'ontal', 'talon', 'tolan', 'tonal'], + 'taloned': ['taloned', 'toledan'], + 'talonid': ['itoland', 'talonid', 'tindalo'], + 'talose': ['lotase', 'osteal', 'solate', 'stolae', 'talose'], + 'talpa': ['aptal', 'palta', 'talpa'], + 'talpicide': ['talipedic', 'talpicide'], + 'talpidae': ['lapidate', 'talpidae'], + 'talpine': ['pantile', 'pentail', 'platine', 'talpine'], + 'talpoid': ['platoid', 'talpoid'], + 'taluche': ['auchlet', 'cutheal', 'taluche'], + 'taluka': ['latuka', 'taluka'], + 'talus': ['latus', 'sault', 'talus'], + 'tam': ['amt', 'mat', 'tam'], + 'tama': ['atma', 'tama'], + 'tamale': ['malate', 'meatal', 'tamale'], + 'tamanac': ['matacan', 'tamanac'], + 'tamanaca': ['atacaman', 'tamanaca'], + 'tamanoir': ['animator', 'tamanoir'], + 'tamanu': ['anatum', 'mantua', 'tamanu'], + 'tamara': ['armata', 'matara', 'tamara'], + 'tamarao': ['tamarao', 'tamaroa'], + 'tamarin': ['martian', 'tamarin'], + 'tamaroa': ['tamarao', 'tamaroa'], + 'tambor': ['tambor', 'tromba'], + 'tamboura': ['marabout', 'marabuto', 'tamboura'], + 'tambourer': ['arboretum', 'tambourer'], + 'tambreet': ['ambrette', 'tambreet'], + 'tamburan': ['rambutan', 'tamburan'], + 'tame': ['mate', 'meat', 'meta', 'tame', 'team', 'tema'], + 'tamein': ['etamin', 'inmate', 'taimen', 'tamein'], + 'tameless': ['mateless', 'meatless', 'tameless', 'teamless'], + 'tamelessness': ['matelessness', 'tamelessness'], + 'tamely': ['mately', 'tamely'], + 'tamer': ['armet', + 'mater', + 'merat', + 'metra', + 'ramet', + 'tamer', + 'terma', + 'trame', + 'trema'], + 'tamise': ['samite', 'semita', 'tamise', 'teaism'], + 'tampin': ['pitman', 'tampin', 'tipman'], + 'tampion': ['maintop', 'ptomain', 'tampion', 'timpano'], + 'tampioned': ['ademption', 'tampioned'], + 'tampon': ['potman', 'tampon', 'topman'], + 'tamul': ['lamut', 'tamul'], + 'tamus': ['matsu', 'tamus', 'tsuma'], + 'tan': ['ant', 'nat', 'tan'], + 'tana': ['anat', 'anta', 'tana'], + 'tanach': ['acanth', 'anchat', 'tanach'], + 'tanager': ['argante', 'granate', 'tanager'], + 'tanagridae': ['tanagridae', 'tangaridae'], + 'tanagrine': ['argentina', 'tanagrine'], + 'tanagroid': ['gradation', 'indagator', 'tanagroid'], + 'tanak': ['kanat', 'tanak', 'tanka'], + 'tanaka': ['nataka', 'tanaka'], + 'tanala': ['atalan', 'tanala'], + 'tanan': ['annat', 'tanan'], + 'tanbur': ['tanbur', 'turban'], + 'tancel': ['cantle', 'cental', 'lancet', 'tancel'], + 'tanchoir': ['anorthic', 'anthroic', 'tanchoir'], + 'tandemist': ['misattend', 'tandemist'], + 'tandle': ['dental', 'tandle'], + 'tandour': ['rotunda', 'tandour'], + 'tane': ['ante', 'aten', 'etna', 'nate', 'neat', 'taen', 'tane', 'tean'], + 'tang': ['gant', 'gnat', 'tang'], + 'tanga': ['ganta', 'tanga'], + 'tangaridae': ['tanagridae', 'tangaridae'], + 'tangelo': ['angelot', 'tangelo'], + 'tanger': ['argent', 'garnet', 'garten', 'tanger'], + 'tangerine': ['argentine', 'tangerine'], + 'tangfish': ['shafting', 'tangfish'], + 'tangi': ['giant', 'tangi', 'tiang'], + 'tangibile': ['bigential', 'tangibile'], + 'tangible': ['bleating', 'tangible'], + 'tangie': ['eating', 'ingate', 'tangie'], + 'tangier': ['angrite', 'granite', 'ingrate', 'tangier', 'tearing', 'tigrean'], + 'tangle': ['tangle', 'telang'], + 'tangling': ['gnatling', 'tangling'], + 'tango': ['tango', 'tonga'], + 'tangs': ['angst', 'stang', 'tangs'], + 'tangue': ['gunate', 'tangue'], + 'tangum': ['tangum', 'tugman'], + 'tangun': ['tangun', 'tungan'], + 'tanh': ['hant', 'tanh', 'than'], + 'tanha': ['atnah', 'tanha', 'thana'], + 'tania': ['anita', 'niata', 'tania'], + 'tanica': ['actian', 'natica', 'tanica'], + 'tanier': ['nerita', 'ratine', 'retain', 'retina', 'tanier'], + 'tanist': ['astint', 'tanist'], + 'tanitic': ['tanitic', 'titanic'], + 'tanka': ['kanat', 'tanak', 'tanka'], + 'tankle': ['anklet', 'lanket', 'tankle'], + 'tanling': ['antling', 'tanling'], + 'tannaic': ['cantina', 'tannaic'], + 'tannase': ['annates', 'tannase'], + 'tannogallate': ['gallotannate', 'tannogallate'], + 'tannogallic': ['gallotannic', 'tannogallic'], + 'tannogen': ['nonagent', 'tannogen'], + 'tanproof': ['antproof', 'tanproof'], + 'tanquen': ['quannet', 'tanquen'], + 'tanrec': ['canter', + 'creant', + 'cretan', + 'nectar', + 'recant', + 'tanrec', + 'trance'], + 'tansy': ['nasty', 'styan', 'tansy'], + 'tantalean': ['antenatal', 'atlantean', 'tantalean'], + 'tantalic': ['atlantic', 'tantalic'], + 'tantalite': ['atlantite', 'tantalite'], + 'tantara': ['tantara', 'tartana'], + 'tantarara': ['tantarara', 'tarantara'], + 'tanti': ['taint', 'tanti', 'tinta', 'titan'], + 'tantle': ['latent', 'latten', 'nattle', 'talent', 'tantle'], + 'tantra': ['rattan', 'tantra', 'tartan'], + 'tantrism': ['tantrism', 'transmit'], + 'tantum': ['mutant', 'tantum', 'tutman'], + 'tanzeb': ['batzen', 'bezant', 'tanzeb'], + 'tao': ['oat', 'tao', 'toa'], + 'taoistic': ['iotacist', 'taoistic'], + 'taos': ['oast', 'stoa', 'taos'], + 'tap': ['apt', 'pat', 'tap'], + 'tapa': ['atap', 'pata', 'tapa'], + 'tapalo': ['patola', 'tapalo'], + 'tapas': ['patas', 'tapas'], + 'tape': ['pate', 'peat', 'tape', 'teap'], + 'tapeline': ['petaline', 'tapeline'], + 'tapeman': ['peatman', 'tapeman'], + 'tapen': ['enapt', 'paten', 'penta', 'tapen'], + 'taper': ['apert', 'pater', 'peart', 'prate', 'taper', 'terap'], + 'tapered': ['padtree', 'predate', 'tapered'], + 'tapering': ['partigen', 'tapering'], + 'taperly': ['apertly', 'peartly', 'platery', 'pteryla', 'taperly'], + 'taperness': ['apertness', 'peartness', 'taperness'], + 'tapestring': ['spattering', 'tapestring'], + 'tapestry': ['tapestry', 'tryptase'], + 'tapet': ['patte', 'tapet'], + 'tapete': ['pattee', 'tapete'], + 'taphouse': ['outshape', 'taphouse'], + 'taphria': ['pitarah', 'taphria'], + 'taphrina': ['parthian', 'taphrina'], + 'tapir': ['atrip', 'tapir'], + 'tapiro': ['portia', 'tapiro'], + 'tapirus': ['tapirus', 'upstair'], + 'tapis': ['piast', 'stipa', 'tapis'], + 'taplash': ['asphalt', 'spathal', 'taplash'], + 'tapmost': ['tapmost', 'topmast'], + 'tapnet': ['patent', 'patten', 'tapnet'], + 'tapoa': ['opata', 'patao', 'tapoa'], + 'taposa': ['sapota', 'taposa'], + 'taproom': ['protoma', 'taproom'], + 'taproot': ['potator', 'taproot'], + 'taps': ['past', 'spat', 'stap', 'taps'], + 'tapster': ['spatter', 'tapster'], + 'tapu': ['patu', 'paut', 'tapu'], + 'tapuyo': ['outpay', 'tapuyo'], + 'taqua': ['quata', 'taqua'], + 'tar': ['art', 'rat', 'tar', 'tra'], + 'tara': ['rata', 'taar', 'tara'], + 'taraf': ['taraf', 'tarfa'], + 'tarai': ['arati', 'atria', 'riata', 'tarai', 'tiara'], + 'taranchi': ['taranchi', 'thracian'], + 'tarantara': ['tantarara', 'tarantara'], + 'tarapin': ['patarin', 'tarapin'], + 'tarasc': ['castra', 'tarasc'], + 'tarbet': ['batter', 'bertat', 'tabret', 'tarbet'], + 'tardle': ['dartle', 'tardle'], + 'tardy': ['tardy', 'trady'], + 'tare': ['rate', 'tare', 'tear', 'tera'], + 'tarente': ['entreat', 'ratteen', 'tarente', 'ternate', 'tetrane'], + 'tarentine': ['entertain', 'tarentine', 'terentian'], + 'tarfa': ['taraf', 'tarfa'], + 'targe': ['gater', 'grate', 'great', 'greta', 'retag', 'targe'], + 'targeman': ['grateman', 'mangrate', 'mentagra', 'targeman'], + 'targer': ['garret', 'garter', 'grater', 'targer'], + 'target': ['gatter', 'target'], + 'targetman': ['targetman', 'termagant'], + 'targum': ['artgum', 'targum'], + 'tarheel': ['leather', 'tarheel'], + 'tarheeler': ['leatherer', 'releather', 'tarheeler'], + 'tari': ['airt', 'rita', 'tari', 'tiar'], + 'tarie': ['arite', 'artie', 'irate', 'retia', 'tarie'], + 'tarin': ['riant', 'tairn', 'tarin', 'train'], + 'tarish': ['rashti', 'tarish'], + 'tarletan': ['alterant', 'tarletan'], + 'tarlike': ['artlike', 'ratlike', 'tarlike'], + 'tarmac': ['mactra', 'tarmac'], + 'tarman': ['mantra', 'tarman'], + 'tarmi': ['mitra', 'tarmi', 'timar', 'tirma'], + 'tarn': ['natr', 'rant', 'tarn', 'tran'], + 'tarnal': ['antral', 'tarnal'], + 'tarnish': ['tarnish', 'trishna'], + 'tarnside': ['stradine', 'strained', 'tarnside'], + 'taro': ['rota', 'taro', 'tora'], + 'taroc': ['actor', 'corta', 'croat', 'rocta', 'taroc', 'troca'], + 'tarocco': ['coactor', 'tarocco'], + 'tarok': ['kotar', 'tarok'], + 'tarot': ['ottar', 'tarot', 'torta', 'troat'], + 'tarp': ['part', 'prat', 'rapt', 'tarp', 'trap'], + 'tarpan': ['partan', 'tarpan'], + 'tarpaulin': ['tarpaulin', 'unpartial'], + 'tarpeian': ['patarine', 'tarpeian'], + 'tarpon': ['patron', 'tarpon'], + 'tarquin': ['quatrin', 'tarquin'], + 'tarragon': ['arrogant', 'tarragon'], + 'tarred': ['darter', + 'dartre', + 'redart', + 'retard', + 'retrad', + 'tarred', + 'trader'], + 'tarrer': ['tarrer', 'terrar'], + 'tarriance': ['antiracer', 'tarriance'], + 'tarrie': ['arriet', 'tarrie'], + 'tars': ['sart', 'star', 'stra', 'tars', 'tsar'], + 'tarsal': ['astral', 'tarsal'], + 'tarsale': ['alaster', 'tarsale'], + 'tarsalgia': ['astragali', 'tarsalgia'], + 'tarse': ['aster', 'serta', 'stare', 'strae', 'tarse', 'teras'], + 'tarsi': ['arist', + 'astir', + 'sitar', + 'stair', + 'stria', + 'tarsi', + 'tisar', + 'trias'], + 'tarsia': ['arista', 'tarsia'], + 'tarsier': ['astrier', 'tarsier'], + 'tarsipes': ['piratess', 'serapist', 'tarsipes'], + 'tarsitis': ['satirist', 'tarsitis'], + 'tarsome': ['maestro', 'tarsome'], + 'tarsonemid': ['mosandrite', 'tarsonemid'], + 'tarsonemus': ['sarmentous', 'tarsonemus'], + 'tarsus': ['rastus', 'tarsus'], + 'tartan': ['rattan', 'tantra', 'tartan'], + 'tartana': ['tantara', 'tartana'], + 'tartaret': ['tartaret', 'tartrate'], + 'tartarin': ['tartarin', 'triratna'], + 'tartarous': ['saturator', 'tartarous'], + 'tarten': ['attern', 'natter', 'ratten', 'tarten'], + 'tartish': ['athirst', 'rattish', 'tartish'], + 'tartle': ['artlet', 'latter', 'rattle', 'tartle', 'tatler'], + 'tartlet': ['tartlet', 'tattler'], + 'tartly': ['rattly', 'tartly'], + 'tartrate': ['tartaret', 'tartrate'], + 'taruma': ['taruma', 'trauma'], + 'tarve': ['avert', 'tarve', 'taver', 'trave'], + 'tarweed': ['dewater', 'tarweed', 'watered'], + 'tarwood': ['ratwood', 'tarwood'], + 'taryba': ['baryta', 'taryba'], + 'tasco': ['ascot', 'coast', 'costa', 'tacso', 'tasco'], + 'tash': ['shat', 'tash'], + 'tasheriff': ['fireshaft', 'tasheriff'], + 'tashie': ['saithe', 'tashie', 'teaish'], + 'tashrif': ['ratfish', 'tashrif'], + 'tasian': ['astian', 'tasian'], + 'tasimetry': ['myristate', 'tasimetry'], + 'task': ['skat', 'task'], + 'tasker': ['skater', 'staker', 'strake', 'streak', 'tasker'], + 'taslet': ['latest', 'sattle', 'taslet'], + 'tasmanite': ['emanatist', 'staminate', 'tasmanite'], + 'tassah': ['shasta', 'tassah'], + 'tasse': ['asset', 'tasse'], + 'tassel': ['lasset', 'tassel'], + 'tasseler': ['rateless', 'tasseler', 'tearless', 'tesseral'], + 'tasser': ['assert', 'tasser'], + 'tassie': ['siesta', 'tassie'], + 'tastable': ['statable', 'tastable'], + 'taste': ['state', 'taste', 'tates', 'testa'], + 'tasted': ['stated', 'tasted'], + 'tasteful': ['stateful', 'tasteful'], + 'tastefully': ['statefully', 'tastefully'], + 'tastefulness': ['statefulness', 'tastefulness'], + 'tasteless': ['stateless', 'tasteless'], + 'taster': ['stater', 'taster', 'testar'], + 'tasu': ['saut', 'tasu', 'utas'], + 'tatar': ['attar', 'tatar'], + 'tatarize': ['tatarize', 'zaratite'], + 'tatchy': ['chatty', 'tatchy'], + 'tate': ['etta', 'tate', 'teat'], + 'tater': ['atter', 'tater', 'teart', 'tetra', 'treat'], + 'tates': ['state', 'taste', 'tates', 'testa'], + 'tath': ['hatt', 'tath', 'that'], + 'tatian': ['attain', 'tatian'], + 'tatler': ['artlet', 'latter', 'rattle', 'tartle', 'tatler'], + 'tatterly': ['tatterly', 'tattlery'], + 'tattler': ['tartlet', 'tattler'], + 'tattlery': ['tatterly', 'tattlery'], + 'tatu': ['tatu', 'taut'], + 'tau': ['tau', 'tua', 'uta'], + 'taube': ['butea', 'taube', 'tubae'], + 'taula': ['aluta', 'taula'], + 'taum': ['muta', 'taum'], + 'taun': ['antu', 'aunt', 'naut', 'taun', 'tuan', 'tuna'], + 'taungthu': ['taungthu', 'untaught'], + 'taupe': ['taupe', 'upeat'], + 'taupo': ['apout', 'taupo'], + 'taur': ['ruta', 'taur'], + 'taurean': ['taurean', 'uranate'], + 'tauric': ['tauric', 'uratic', 'urtica'], + 'taurine': ['ruinate', 'taurine', 'uranite', 'urinate'], + 'taurocol': ['outcarol', 'taurocol'], + 'taut': ['tatu', 'taut'], + 'tauten': ['attune', 'nutate', 'tauten'], + 'tautomeric': ['autometric', 'tautomeric'], + 'tautomery': ['autometry', 'tautomery'], + 'tav': ['tav', 'vat'], + 'tavast': ['sattva', 'tavast'], + 'tave': ['tave', 'veta'], + 'taver': ['avert', 'tarve', 'taver', 'trave'], + 'tavers': ['starve', 'staver', 'strave', 'tavers', 'versta'], + 'tavert': ['tavert', 'vatter'], + 'tavola': ['tavola', 'volata'], + 'taw': ['taw', 'twa', 'wat'], + 'tawa': ['awat', 'tawa'], + 'tawer': ['tawer', 'water', 'wreat'], + 'tawery': ['tawery', 'watery'], + 'tawite': ['tawite', 'tawtie', 'twaite'], + 'tawn': ['nawt', 'tawn', 'want'], + 'tawny': ['tawny', 'wanty'], + 'taws': ['sawt', 'staw', 'swat', 'taws', 'twas', 'wast'], + 'tawse': ['awest', 'sweat', 'tawse', 'waste'], + 'tawtie': ['tawite', 'tawtie', 'twaite'], + 'taxed': ['detax', 'taxed'], + 'taxer': ['extra', 'retax', 'taxer'], + 'tay': ['tay', 'yat'], + 'tayer': ['tayer', 'teary'], + 'tchai': ['aitch', 'chait', 'chati', 'chita', 'taich', 'tchai'], + 'tche': ['chet', 'etch', 'tche', 'tech'], + 'tchi': ['chit', 'itch', 'tchi'], + 'tchu': ['chut', 'tchu', 'utch'], + 'tchwi': ['tchwi', 'wicht', 'witch'], + 'tea': ['ate', 'eat', 'eta', 'tae', 'tea'], + 'teaberry': ['betrayer', 'eatberry', 'rebetray', 'teaberry'], + 'teaboy': ['betoya', 'teaboy'], + 'teacart': ['caretta', 'teacart', 'tearcat'], + 'teach': ['cheat', 'tache', 'teach', 'theca'], + 'teachable': ['cheatable', 'teachable'], + 'teachableness': ['cheatableness', 'teachableness'], + 'teache': ['achete', 'hecate', 'teache', 'thecae'], + 'teacher': ['cheater', 'hectare', 'recheat', 'reteach', 'teacher'], + 'teachery': ['cheatery', 'cytherea', 'teachery'], + 'teaching': ['cheating', 'teaching'], + 'teachingly': ['cheatingly', 'teachingly'], + 'teachless': ['tacheless', 'teachless'], + 'tead': ['adet', 'date', 'tade', 'tead', 'teda'], + 'teaer': ['arete', 'eater', 'teaer'], + 'teagle': ['eaglet', 'legate', 'teagle', 'telega'], + 'teaish': ['saithe', 'tashie', 'teaish'], + 'teaism': ['samite', 'semita', 'tamise', 'teaism'], + 'teak': ['kate', 'keta', 'take', 'teak'], + 'teal': ['atle', 'laet', 'late', 'leat', 'tael', 'tale', 'teal'], + 'team': ['mate', 'meat', 'meta', 'tame', 'team', 'tema'], + 'teamer': ['reetam', 'retame', 'teamer'], + 'teaming': ['mintage', 'teaming', 'tegmina'], + 'teamless': ['mateless', 'meatless', 'tameless', 'teamless'], + 'teamman': ['meatman', 'teamman'], + 'teamster': ['teamster', 'trametes'], + 'tean': ['ante', 'aten', 'etna', 'nate', 'neat', 'taen', 'tane', 'tean'], + 'teanal': ['anteal', 'lanate', 'teanal'], + 'teap': ['pate', 'peat', 'tape', 'teap'], + 'teapot': ['aptote', 'optate', 'potate', 'teapot'], + 'tear': ['rate', 'tare', 'tear', 'tera'], + 'tearable': ['elabrate', 'tearable'], + 'tearably': ['betrayal', 'tearably'], + 'tearcat': ['caretta', 'teacart', 'tearcat'], + 'teardown': ['danewort', 'teardown'], + 'teardrop': ['predator', 'protrade', 'teardrop'], + 'tearer': ['rerate', 'retare', 'tearer'], + 'tearful': ['faulter', 'refutal', 'tearful'], + 'tearing': ['angrite', 'granite', 'ingrate', 'tangier', 'tearing', 'tigrean'], + 'tearless': ['rateless', 'tasseler', 'tearless', 'tesseral'], + 'tearlike': ['keralite', 'tearlike'], + 'tearpit': ['partite', 'tearpit'], + 'teart': ['atter', 'tater', 'teart', 'tetra', 'treat'], + 'teary': ['tayer', 'teary'], + 'teasably': ['stayable', 'teasably'], + 'tease': ['setae', 'tease'], + 'teasel': ['ateles', 'saltee', 'sealet', 'stelae', 'teasel'], + 'teaser': ['asteer', + 'easter', + 'eastre', + 'reseat', + 'saeter', + 'seater', + 'staree', + 'teaser', + 'teresa'], + 'teasing': ['easting', + 'gainset', + 'genista', + 'ingesta', + 'seating', + 'signate', + 'teasing'], + 'teasler': ['realest', 'reslate', 'resteal', 'stealer', 'teasler'], + 'teasy': ['teasy', 'yeast'], + 'teat': ['etta', 'tate', 'teat'], + 'teather': ['teather', 'theater', 'thereat'], + 'tebu': ['bute', 'tebu', 'tube'], + 'teca': ['cate', 'teca'], + 'tecali': ['calite', 'laetic', 'tecali'], + 'tech': ['chet', 'etch', 'tche', 'tech'], + 'techily': ['ethylic', 'techily'], + 'technica': ['atechnic', 'catechin', 'technica'], + 'technocracy': ['conycatcher', 'technocracy'], + 'technopsychology': ['psychotechnology', 'technopsychology'], + 'techous': ['souchet', 'techous', 'tousche'], + 'techy': ['techy', 'tyche'], + 'tecla': ['cleat', 'eclat', 'ectal', 'lacet', 'tecla'], + 'teco': ['cote', 'teco'], + 'tecoma': ['comate', 'metoac', 'tecoma'], + 'tecomin': ['centimo', 'entomic', 'tecomin'], + 'tecon': ['cento', 'conte', 'tecon'], + 'tectal': ['cattle', 'tectal'], + 'tectology': ['ctetology', 'tectology'], + 'tectona': ['oncetta', 'tectona'], + 'tectospinal': ['entoplastic', 'spinotectal', 'tectospinal', 'tenoplastic'], + 'tecuma': ['acetum', 'tecuma'], + 'tecuna': ['tecuna', 'uncate'], + 'teda': ['adet', 'date', 'tade', 'tead', 'teda'], + 'tedious': ['outside', 'tedious'], + 'tediousness': ['outsideness', 'tediousness'], + 'teedle': ['delete', 'teedle'], + 'teel': ['leet', 'lete', 'teel', 'tele'], + 'teem': ['meet', 'mete', 'teem'], + 'teemer': ['meeter', 'remeet', 'teemer'], + 'teeming': ['meeting', 'teeming', 'tegmine'], + 'teems': ['teems', 'temse'], + 'teen': ['neet', 'nete', 'teen'], + 'teens': ['steen', 'teens', 'tense'], + 'teer': ['reet', 'teer', 'tree'], + 'teerer': ['retree', 'teerer'], + 'teest': ['teest', 'teste'], + 'teet': ['teet', 'tete'], + 'teeter': ['teeter', 'terete'], + 'teeth': ['teeth', 'theet'], + 'teething': ['genthite', 'teething'], + 'teg': ['get', 'teg'], + 'tegean': ['geneat', 'negate', 'tegean'], + 'tegmina': ['mintage', 'teaming', 'tegmina'], + 'tegminal': ['ligament', 'metaling', 'tegminal'], + 'tegmine': ['meeting', 'teeming', 'tegmine'], + 'tegular': ['gaulter', 'tegular'], + 'teheran': ['earthen', 'enheart', 'hearten', 'naether', 'teheran', 'traheen'], + 'tehsildar': ['heraldist', 'tehsildar'], + 'teian': ['entia', 'teian', 'tenai', 'tinea'], + 'teicher': ['erethic', 'etheric', 'heretic', 'heteric', 'teicher'], + 'teil': ['lite', 'teil', 'teli', 'tile'], + 'teind': ['detin', 'teind', 'tined'], + 'teinder': ['nitered', 'redient', 'teinder'], + 'teinland': ['dentinal', 'teinland', 'tendinal'], + 'teioid': ['iodite', 'teioid'], + 'teju': ['jute', 'teju'], + 'telamon': ['omental', 'telamon'], + 'telang': ['tangle', 'telang'], + 'telar': ['alert', 'alter', 'artel', 'later', 'ratel', 'taler', 'telar'], + 'telarian': ['retainal', 'telarian'], + 'telary': ['lyrate', 'raylet', 'realty', 'telary'], + 'tele': ['leet', 'lete', 'teel', 'tele'], + 'telecast': ['castelet', 'telecast'], + 'telega': ['eaglet', 'legate', 'teagle', 'telega'], + 'telegn': ['gentle', 'telegn'], + 'telegrapher': ['retelegraph', 'telegrapher'], + 'telei': ['elite', 'telei'], + 'telephone': ['phenetole', 'telephone'], + 'telephony': ['polythene', 'telephony'], + 'telephotograph': ['phototelegraph', 'telephotograph'], + 'telephotographic': ['phototelegraphic', 'telephotographic'], + 'telephotography': ['phototelegraphy', 'telephotography'], + 'teleradiophone': ['radiotelephone', 'teleradiophone'], + 'teleran': ['alterne', 'enteral', 'eternal', 'teleran', 'teneral'], + 'teleseism': ['messelite', 'semisteel', 'teleseism'], + 'telespectroscope': ['spectrotelescope', 'telespectroscope'], + 'telestereoscope': ['stereotelescope', 'telestereoscope'], + 'telestial': ['satellite', 'telestial'], + 'telestic': ['telestic', 'testicle'], + 'telfer': ['felter', 'telfer', 'trefle'], + 'teli': ['lite', 'teil', 'teli', 'tile'], + 'telial': ['taille', 'telial'], + 'telic': ['clite', 'telic'], + 'telinga': ['atingle', 'gelatin', 'genital', 'langite', 'telinga'], + 'tellach': ['hellcat', 'tellach'], + 'teller': ['retell', 'teller'], + 'tellima': ['mitella', 'tellima'], + 'tellina': ['nitella', 'tellina'], + 'tellurian': ['tellurian', 'unliteral'], + 'telome': ['omelet', 'telome'], + 'telonism': ['melonist', 'telonism'], + 'telopsis': ['polistes', 'telopsis'], + 'telpath': ['pathlet', 'telpath'], + 'telson': ['solent', 'stolen', 'telson'], + 'telsonic': ['lentisco', 'telsonic'], + 'telt': ['lett', 'telt'], + 'tema': ['mate', 'meat', 'meta', 'tame', 'team', 'tema'], + 'teman': ['ament', 'meant', 'teman'], + 'temin': ['metin', 'temin', 'timne'], + 'temp': ['empt', 'temp'], + 'tempean': ['peteman', 'tempean'], + 'temper': ['temper', 'tempre'], + 'tempera': ['premate', 'tempera'], + 'temperer': ['retemper', 'temperer'], + 'temperish': ['herpetism', 'metership', 'metreship', 'temperish'], + 'templar': ['templar', 'trample'], + 'template': ['palmette', 'template'], + 'temple': ['pelmet', 'temple'], + 'tempora': ['pteroma', 'tempora'], + 'temporarily': ['polarimetry', 'premorality', 'temporarily'], + 'temporofrontal': ['frontotemporal', 'temporofrontal'], + 'temporooccipital': ['occipitotemporal', 'temporooccipital'], + 'temporoparietal': ['parietotemporal', 'temporoparietal'], + 'tempre': ['temper', 'tempre'], + 'tempter': ['retempt', 'tempter'], + 'temse': ['teems', 'temse'], + 'temser': ['mester', 'restem', 'temser', 'termes'], + 'temulent': ['temulent', 'unmettle'], + 'ten': ['net', 'ten'], + 'tenable': ['beltane', 'tenable'], + 'tenace': ['cetane', 'tenace'], + 'tenai': ['entia', 'teian', 'tenai', 'tinea'], + 'tenanter': ['retenant', 'tenanter'], + 'tenantless': ['latentness', 'tenantless'], + 'tencteri': ['reticent', 'tencteri'], + 'tend': ['dent', 'tend'], + 'tender': ['denter', 'rented', 'tender'], + 'tenderer': ['retender', 'tenderer'], + 'tenderish': ['disherent', 'hinderest', 'tenderish'], + 'tendinal': ['dentinal', 'teinland', 'tendinal'], + 'tendinitis': ['dentinitis', 'tendinitis'], + 'tendour': ['tendour', 'unroted'], + 'tendril': ['tendril', 'trindle'], + 'tendron': ['donnert', 'tendron'], + 'teneral': ['alterne', 'enteral', 'eternal', 'teleran', 'teneral'], + 'teneriffe': ['fifteener', 'teneriffe'], + 'tenesmus': ['muteness', 'tenesmus'], + 'teng': ['gent', 'teng'], + 'tengu': ['tengu', 'unget'], + 'teniacidal': ['acetanilid', 'laciniated', 'teniacidal'], + 'teniacide': ['deciatine', 'diacetine', 'taenicide', 'teniacide'], + 'tenible': ['beltine', 'tenible'], + 'tenino': ['intone', 'tenino'], + 'tenline': ['lenient', 'tenline'], + 'tenner': ['rennet', 'tenner'], + 'tennis': ['innest', 'sennit', 'sinnet', 'tennis'], + 'tenomyotomy': ['myotenotomy', 'tenomyotomy'], + 'tenon': ['nonet', 'tenon'], + 'tenoner': ['enteron', 'tenoner'], + 'tenonian': ['annotine', 'tenonian'], + 'tenonitis': ['sentition', 'tenonitis'], + 'tenontophyma': ['nematophyton', 'tenontophyma'], + 'tenophyte': ['entophyte', 'tenophyte'], + 'tenoplastic': ['entoplastic', 'spinotectal', 'tectospinal', 'tenoplastic'], + 'tenor': ['noter', 'tenor', 'toner', 'trone'], + 'tenorist': ['ortstein', 'tenorist'], + 'tenovaginitis': ['investigation', 'tenovaginitis'], + 'tenpin': ['pinnet', 'tenpin'], + 'tenrec': ['center', 'recent', 'tenrec'], + 'tense': ['steen', 'teens', 'tense'], + 'tensible': ['nebelist', 'stilbene', 'tensible'], + 'tensile': ['leisten', 'setline', 'tensile'], + 'tension': ['stenion', 'tension'], + 'tensional': ['alstonine', 'tensional'], + 'tensive': ['estevin', 'tensive'], + 'tenson': ['sonnet', 'stonen', 'tenson'], + 'tensor': ['nestor', 'sterno', 'stoner', 'strone', 'tensor'], + 'tentable': ['nettable', 'tentable'], + 'tentacle': ['ectental', 'tentacle'], + 'tentage': ['genetta', 'tentage'], + 'tentation': ['attention', 'tentation'], + 'tentative': ['attentive', 'tentative'], + 'tentatively': ['attentively', 'tentatively'], + 'tentativeness': ['attentiveness', 'tentativeness'], + 'tented': ['detent', 'netted', 'tented'], + 'tenter': ['netter', 'retent', 'tenter'], + 'tention': ['nettion', 'tention', 'tontine'], + 'tentorial': ['natrolite', 'tentorial'], + 'tenty': ['netty', 'tenty'], + 'tenuiroster': ['tenuiroster', 'urosternite'], + 'tenuistriate': ['intersituate', 'tenuistriate'], + 'tenure': ['neuter', 'retune', 'runtee', 'tenure', 'tureen'], + 'tenurial': ['lutrinae', 'retinula', 'rutelian', 'tenurial'], + 'teocalli': ['colletia', 'teocalli'], + 'teosinte': ['noisette', 'teosinte'], + 'tepache': ['heptace', 'tepache'], + 'tepal': ['leapt', + 'palet', + 'patel', + 'pelta', + 'petal', + 'plate', + 'pleat', + 'tepal'], + 'tepanec': ['pentace', 'tepanec'], + 'tepecano': ['conepate', 'tepecano'], + 'tephrite': ['perthite', 'tephrite'], + 'tephritic': ['perthitic', 'tephritic'], + 'tephroite': ['heptorite', 'tephroite'], + 'tephrosis': ['posterish', 'prothesis', 'sophister', 'storeship', 'tephrosis'], + 'tepor': ['poter', 'prote', 'repot', 'tepor', 'toper', 'trope'], + 'tequila': ['liquate', 'tequila'], + 'tera': ['rate', 'tare', 'tear', 'tera'], + 'teraglin': ['integral', 'teraglin', 'triangle'], + 'terap': ['apert', 'pater', 'peart', 'prate', 'taper', 'terap'], + 'teras': ['aster', 'serta', 'stare', 'strae', 'tarse', 'teras'], + 'teratism': ['mistreat', 'teratism'], + 'terbia': ['baiter', 'barite', 'rebait', 'terbia'], + 'terbium': ['burmite', 'imbrute', 'terbium'], + 'tercelet': ['electret', 'tercelet'], + 'terceron': ['corrente', 'terceron'], + 'tercia': ['acrite', 'arcite', 'tercia', 'triace', 'tricae'], + 'tercine': ['citrene', 'enteric', 'enticer', 'tercine'], + 'tercio': ['erotic', 'tercio'], + 'terebilic': ['celtiberi', 'terebilic'], + 'terebinthian': ['terebinthian', 'terebinthina'], + 'terebinthina': ['terebinthian', 'terebinthina'], + 'terebra': ['rebater', 'terebra'], + 'terebral': ['barrelet', 'terebral'], + 'terentian': ['entertain', 'tarentine', 'terentian'], + 'teresa': ['asteer', + 'easter', + 'eastre', + 'reseat', + 'saeter', + 'seater', + 'staree', + 'teaser', + 'teresa'], + 'teresian': ['arsenite', 'resinate', 'teresian', 'teresina'], + 'teresina': ['arsenite', 'resinate', 'teresian', 'teresina'], + 'terete': ['teeter', 'terete'], + 'teretial': ['laterite', 'literate', 'teretial'], + 'tereus': ['retuse', 'tereus'], + 'tergal': ['raglet', 'tergal'], + 'tergant': ['garnett', 'gnatter', 'gratten', 'tergant'], + 'tergeminous': ['mentigerous', 'tergeminous'], + 'teri': ['iter', 'reit', 'rite', 'teri', 'tier', 'tire'], + 'teriann': ['entrain', 'teriann'], + 'terma': ['armet', + 'mater', + 'merat', + 'metra', + 'ramet', + 'tamer', + 'terma', + 'trame', + 'trema'], + 'termagant': ['targetman', 'termagant'], + 'termes': ['mester', 'restem', 'temser', 'termes'], + 'termin': ['minter', 'remint', 'termin'], + 'terminal': ['terminal', 'tramline'], + 'terminalia': ['laminarite', 'terminalia'], + 'terminate': ['antimeter', 'attermine', 'interteam', 'terminate', 'tetramine'], + 'termini': ['interim', 'termini'], + 'terminine': ['intermine', 'nemertini', 'terminine'], + 'terminus': ['numerist', 'terminus'], + 'termital': ['remittal', 'termital'], + 'termite': ['emitter', 'termite'], + 'termly': ['myrtle', 'termly'], + 'termon': ['mentor', 'merton', 'termon', 'tormen'], + 'termor': ['termor', 'tremor'], + 'tern': ['rent', 'tern'], + 'terna': ['antre', 'arent', 'retan', 'terna'], + 'ternal': ['altern', 'antler', 'learnt', 'rental', 'ternal'], + 'ternar': ['arrent', 'errant', 'ranter', 'ternar'], + 'ternarious': ['souterrain', 'ternarious', 'trouserian'], + 'ternate': ['entreat', 'ratteen', 'tarente', 'ternate', 'tetrane'], + 'terne': ['enter', 'neter', 'renet', 'terne', 'treen'], + 'ternion': ['intoner', 'ternion'], + 'ternlet': ['nettler', 'ternlet'], + 'terp': ['pert', 'petr', 'terp'], + 'terpane': ['patener', 'pearten', 'petrean', 'terpane'], + 'terpeneless': ['repleteness', 'terpeneless'], + 'terpin': ['nipter', 'terpin'], + 'terpine': ['petrine', 'terpine'], + 'terpineol': ['interlope', 'interpole', 'repletion', 'terpineol'], + 'terpinol': ['pointrel', 'terpinol'], + 'terrace': ['caterer', 'recrate', 'retrace', 'terrace'], + 'terraciform': ['crateriform', 'terraciform'], + 'terrage': ['greater', 'regrate', 'terrage'], + 'terrain': ['arterin', 'retrain', 'terrain', 'trainer'], + 'terral': ['retral', 'terral'], + 'terrance': ['canterer', 'recanter', 'recreant', 'terrance'], + 'terrapin': ['pretrain', 'terrapin'], + 'terrar': ['tarrer', 'terrar'], + 'terrence': ['centerer', 'recenter', 'recentre', 'terrence'], + 'terrene': ['enterer', 'terrene'], + 'terret': ['retter', 'terret'], + 'terri': ['terri', 'tirer', 'trier'], + 'terrier': ['retirer', 'terrier'], + 'terrine': ['reinter', 'terrine'], + 'terron': ['terron', 'treron', 'troner'], + 'terry': ['retry', 'terry'], + 'terse': ['ester', + 'estre', + 'reest', + 'reset', + 'steer', + 'stere', + 'stree', + 'terse', + 'tsere'], + 'tersely': ['restyle', 'tersely'], + 'tersion': ['oestrin', 'tersion'], + 'tertia': ['attire', 'ratite', 'tertia'], + 'tertian': ['intreat', 'iterant', 'nitrate', 'tertian'], + 'tertiana': ['attainer', 'reattain', 'tertiana'], + 'terton': ['rotten', 'terton'], + 'tervee': ['revete', 'tervee'], + 'terzina': ['retzian', 'terzina'], + 'terzo': ['terzo', 'tozer'], + 'tesack': ['casket', 'tesack'], + 'teskere': ['skeeter', 'teskere'], + 'tessella': ['satelles', 'tessella'], + 'tesseral': ['rateless', 'tasseler', 'tearless', 'tesseral'], + 'test': ['sett', 'stet', 'test'], + 'testa': ['state', 'taste', 'tates', 'testa'], + 'testable': ['settable', 'testable'], + 'testament': ['statement', 'testament'], + 'testar': ['stater', 'taster', 'testar'], + 'teste': ['teest', 'teste'], + 'tested': ['detest', 'tested'], + 'testee': ['settee', 'testee'], + 'tester': ['retest', 'setter', 'street', 'tester'], + 'testes': ['sestet', 'testes', 'tsetse'], + 'testicle': ['telestic', 'testicle'], + 'testicular': ['testicular', 'trisulcate'], + 'testily': ['stylite', 'testily'], + 'testing': ['setting', 'testing'], + 'teston': ['ostent', 'teston'], + 'testor': ['sotter', 'testor'], + 'testril': ['litster', 'slitter', 'stilter', 'testril'], + 'testy': ['testy', 'tyste'], + 'tetanic': ['nictate', 'tetanic'], + 'tetanical': ['cantalite', 'lactinate', 'tetanical'], + 'tetanoid': ['antidote', 'tetanoid'], + 'tetanus': ['tetanus', 'unstate', 'untaste'], + 'tetarconid': ['detraction', 'doctrinate', 'tetarconid'], + 'tetard': ['tetard', 'tetrad'], + 'tetchy': ['chetty', 'tetchy'], + 'tete': ['teet', 'tete'], + 'tetel': ['ettle', 'tetel'], + 'tether': ['hetter', 'tether'], + 'tethys': ['stythe', 'tethys'], + 'tetra': ['atter', 'tater', 'teart', 'tetra', 'treat'], + 'tetracid': ['citrated', 'tetracid', 'tetradic'], + 'tetrad': ['tetard', 'tetrad'], + 'tetradic': ['citrated', 'tetracid', 'tetradic'], + 'tetragonia': ['giornatate', 'tetragonia'], + 'tetrahexahedron': ['hexatetrahedron', 'tetrahexahedron'], + 'tetrakishexahedron': ['hexakistetrahedron', 'tetrakishexahedron'], + 'tetralin': ['tetralin', 'triental'], + 'tetramin': ['intermat', 'martinet', 'tetramin'], + 'tetramine': ['antimeter', 'attermine', 'interteam', 'terminate', 'tetramine'], + 'tetrander': ['retardent', 'tetrander'], + 'tetrandrous': ['tetrandrous', 'unrostrated'], + 'tetrane': ['entreat', 'ratteen', 'tarente', 'ternate', 'tetrane'], + 'tetrao': ['rotate', 'tetrao'], + 'tetraodon': ['detonator', 'tetraodon'], + 'tetraonine': ['entoretina', 'tetraonine'], + 'tetraplous': ['supertotal', 'tetraplous'], + 'tetrasporic': ['tetrasporic', 'triceratops'], + 'tetraxonia': ['retaxation', 'tetraxonia'], + 'tetrical': ['tetrical', 'tractile'], + 'tetricous': ['tetricous', 'toreutics'], + 'tetronic': ['contrite', 'tetronic'], + 'tetrose': ['rosette', 'tetrose'], + 'tetterous': ['outstreet', 'tetterous'], + 'teucri': ['curite', 'teucri', 'uretic'], + 'teucrian': ['anuretic', 'centauri', 'centuria', 'teucrian'], + 'teucrin': ['nutrice', 'teucrin'], + 'teuk': ['ketu', 'teuk', 'tuke'], + 'tew': ['tew', 'wet'], + 'tewa': ['tewa', 'twae', 'weta'], + 'tewel': ['tewel', 'tweel'], + 'tewer': ['rewet', 'tewer', 'twere'], + 'tewit': ['tewit', 'twite'], + 'tewly': ['tewly', 'wetly'], + 'tha': ['aht', 'hat', 'tha'], + 'thai': ['hati', 'thai'], + 'thais': ['shita', 'thais'], + 'thalami': ['hamital', 'thalami'], + 'thaler': ['arthel', 'halter', 'lather', 'thaler'], + 'thalia': ['hiatal', 'thalia'], + 'thaliard': ['hardtail', 'thaliard'], + 'thamesis': ['mathesis', 'thamesis'], + 'than': ['hant', 'tanh', 'than'], + 'thana': ['atnah', 'tanha', 'thana'], + 'thanan': ['nathan', 'thanan'], + 'thanatism': ['staithman', 'thanatism'], + 'thanatotic': ['chattation', 'thanatotic'], + 'thane': ['enhat', 'ethan', 'nathe', 'neath', 'thane'], + 'thanker': ['rethank', 'thanker'], + 'thapes': ['spathe', 'thapes'], + 'thar': ['hart', 'rath', 'tahr', 'thar', 'trah'], + 'tharen': ['anther', 'nather', 'tharen', 'thenar'], + 'tharm': ['tharm', 'thram'], + 'thasian': ['ashanti', 'sanhita', 'shaitan', 'thasian'], + 'that': ['hatt', 'tath', 'that'], + 'thatcher': ['rethatch', 'thatcher'], + 'thaw': ['thaw', 'wath', 'what'], + 'thawer': ['rethaw', 'thawer', 'wreath'], + 'the': ['het', 'the'], + 'thea': ['ahet', 'haet', 'hate', 'heat', 'thea'], + 'theah': ['heath', 'theah'], + 'thearchy': ['hatchery', 'thearchy'], + 'theat': ['theat', 'theta'], + 'theater': ['teather', 'theater', 'thereat'], + 'theatricism': ['chemiatrist', 'chrismatite', 'theatricism'], + 'theatropolis': ['strophiolate', 'theatropolis'], + 'theatry': ['hattery', 'theatry'], + 'theb': ['beth', 'theb'], + 'thebaid': ['habited', 'thebaid'], + 'theca': ['cheat', 'tache', 'teach', 'theca'], + 'thecae': ['achete', 'hecate', 'teache', 'thecae'], + 'thecal': ['achtel', 'chalet', 'thecal', 'thecla'], + 'thecasporal': ['archapostle', 'thecasporal'], + 'thecata': ['attache', 'thecata'], + 'thecitis': ['ethicist', 'thecitis', 'theistic'], + 'thecla': ['achtel', 'chalet', 'thecal', 'thecla'], + 'theer': ['ether', 'rethe', 'theer', 'there', 'three'], + 'theet': ['teeth', 'theet'], + 'thegn': ['ghent', 'thegn'], + 'thegnly': ['lengthy', 'thegnly'], + 'theine': ['ethine', 'theine'], + 'their': ['ither', 'their'], + 'theirn': ['hinter', 'nither', 'theirn'], + 'theirs': ['shrite', 'theirs'], + 'theism': ['theism', 'themis'], + 'theist': ['theist', 'thetis'], + 'theistic': ['ethicist', 'thecitis', 'theistic'], + 'thema': ['ahmet', 'thema'], + 'thematic': ['mathetic', 'thematic'], + 'thematist': ['hattemist', 'thematist'], + 'themer': ['mether', 'themer'], + 'themis': ['theism', 'themis'], + 'themistian': ['antitheism', 'themistian'], + 'then': ['hent', 'neth', 'then'], + 'thenal': ['ethnal', 'hantle', 'lathen', 'thenal'], + 'thenar': ['anther', 'nather', 'tharen', 'thenar'], + 'theobald': ['bolthead', 'theobald'], + 'theocrasia': ['oireachtas', 'theocrasia'], + 'theocrat': ['theocrat', 'trochate'], + 'theocratic': ['rheotactic', 'theocratic'], + 'theodora': ['dorothea', 'theodora'], + 'theodore': ['theodore', 'treehood'], + 'theogonal': ['halogeton', 'theogonal'], + 'theologic': ['ethologic', 'theologic'], + 'theological': ['ethological', 'lethologica', 'theological'], + 'theologism': ['hemologist', 'theologism'], + 'theology': ['ethology', 'theology'], + 'theophanic': ['phaethonic', 'theophanic'], + 'theophilist': ['philotheist', 'theophilist'], + 'theopsychism': ['psychotheism', 'theopsychism'], + 'theorbo': ['boother', 'theorbo'], + 'theorematic': ['heteratomic', 'theorematic'], + 'theoretic': ['heterotic', 'theoretic'], + 'theoretician': ['heretication', 'theoretician'], + 'theorician': ['antiheroic', 'theorician'], + 'theorics': ['chirotes', 'theorics'], + 'theorism': ['homerist', 'isotherm', 'otherism', 'theorism'], + 'theorist': ['otherist', 'theorist'], + 'theorizer': ['rhetorize', 'theorizer'], + 'theorum': ['mouther', 'theorum'], + 'theotherapy': ['heteropathy', 'theotherapy'], + 'therblig': ['blighter', 'therblig'], + 'there': ['ether', 'rethe', 'theer', 'there', 'three'], + 'thereas': ['thereas', 'theresa'], + 'thereat': ['teather', 'theater', 'thereat'], + 'therein': ['enherit', 'etherin', 'neither', 'therein'], + 'thereness': ['retheness', 'thereness', 'threeness'], + 'thereology': ['heterology', 'thereology'], + 'theres': ['esther', 'hester', 'theres'], + 'theresa': ['thereas', 'theresa'], + 'therese': ['sheeter', 'therese'], + 'therewithal': ['therewithal', 'whitleather'], + 'theriac': ['certhia', 'rhaetic', 'theriac'], + 'therial': ['hairlet', 'therial'], + 'theriodic': ['dichroite', 'erichtoid', 'theriodic'], + 'theriodonta': ['dehortation', 'theriodonta'], + 'thermantic': ['intermatch', 'thermantic'], + 'thermo': ['mother', 'thermo'], + 'thermobarograph': ['barothermograph', 'thermobarograph'], + 'thermoelectric': ['electrothermic', 'thermoelectric'], + 'thermoelectrometer': ['electrothermometer', 'thermoelectrometer'], + 'thermogalvanometer': ['galvanothermometer', 'thermogalvanometer'], + 'thermogeny': ['mythogreen', 'thermogeny'], + 'thermography': ['mythographer', 'thermography'], + 'thermology': ['mythologer', 'thermology'], + 'thermos': ['smother', 'thermos'], + 'thermotype': ['phytometer', 'thermotype'], + 'thermotypic': ['phytometric', 'thermotypic'], + 'thermotypy': ['phytometry', 'thermotypy'], + 'theroid': ['rhodite', 'theroid'], + 'theron': ['hornet', 'nother', 'theron', 'throne'], + 'thersitical': ['thersitical', 'trachelitis'], + 'these': ['sheet', 'these'], + 'thesean': ['sneathe', 'thesean'], + 'thesial': ['heliast', 'thesial'], + 'thesis': ['shiest', 'thesis'], + 'theta': ['theat', 'theta'], + 'thetical': ['athletic', 'thetical'], + 'thetis': ['theist', 'thetis'], + 'thew': ['hewt', 'thew', 'whet'], + 'they': ['they', 'yeth'], + 'theyre': ['theyre', 'yether'], + 'thicken': ['kitchen', 'thicken'], + 'thickener': ['kitchener', 'rethicken', 'thickener'], + 'thicket': ['chettik', 'thicket'], + 'thienyl': ['ethylin', 'thienyl'], + 'thig': ['gith', 'thig'], + 'thigh': ['hight', 'thigh'], + 'thill': ['illth', 'thill'], + 'thin': ['hint', 'thin'], + 'thing': ['night', 'thing'], + 'thingal': ['halting', 'lathing', 'thingal'], + 'thingless': ['lightness', 'nightless', 'thingless'], + 'thinglet': ['thinglet', 'thlinget'], + 'thinglike': ['nightlike', 'thinglike'], + 'thingly': ['nightly', 'thingly'], + 'thingman': ['nightman', 'thingman'], + 'thinker': ['rethink', 'thinker'], + 'thio': ['hoit', 'hoti', 'thio'], + 'thiocresol': ['holosteric', 'thiocresol'], + 'thiol': ['litho', 'thiol', 'tholi'], + 'thiolacetic': ['heliotactic', 'thiolacetic'], + 'thiophenol': ['lithophone', 'thiophenol'], + 'thiopyran': ['phoniatry', 'thiopyran'], + 'thirlage': ['litharge', 'thirlage'], + 'this': ['hist', 'sith', 'this', 'tshi'], + 'thissen': ['sithens', 'thissen'], + 'thistle': ['lettish', 'thistle'], + 'thlinget': ['thinglet', 'thlinget'], + 'tho': ['hot', 'tho'], + 'thob': ['both', 'thob'], + 'thole': ['helot', 'hotel', 'thole'], + 'tholi': ['litho', 'thiol', 'tholi'], + 'tholos': ['soloth', 'tholos'], + 'thomaean': ['amaethon', 'thomaean'], + 'thomasine': ['hematosin', 'thomasine'], + 'thomisid': ['isthmoid', 'thomisid'], + 'thomsonite': ['monotheist', 'thomsonite'], + 'thonder': ['thonder', 'thorned'], + 'thonga': ['gnatho', 'thonga'], + 'thoo': ['hoot', 'thoo', 'toho'], + 'thoom': ['mooth', 'thoom'], + 'thoracectomy': ['chromatocyte', 'thoracectomy'], + 'thoracic': ['thoracic', 'tocharic', 'trochaic'], + 'thoral': ['harlot', 'orthal', 'thoral'], + 'thore': ['other', 'thore', 'throe', 'toher'], + 'thoric': ['chorti', 'orthic', 'thoric', 'trochi'], + 'thorina': ['orthian', 'thorina'], + 'thorite': ['hortite', 'orthite', 'thorite'], + 'thorn': ['north', 'thorn'], + 'thorned': ['thonder', 'thorned'], + 'thornhead': ['rhodanthe', 'thornhead'], + 'thorny': ['rhyton', 'thorny'], + 'thoro': ['ortho', 'thoro'], + 'thort': ['thort', 'troth'], + 'thos': ['host', 'shot', 'thos', 'tosh'], + 'those': ['ethos', 'shote', 'those'], + 'thowel': ['howlet', 'thowel'], + 'thraces': ['stacher', 'thraces'], + 'thracian': ['taranchi', 'thracian'], + 'thraep': ['thraep', 'threap'], + 'thrain': ['hartin', 'thrain'], + 'thram': ['tharm', 'thram'], + 'thrang': ['granth', 'thrang'], + 'thrasher': ['rethrash', 'thrasher'], + 'thrast': ['strath', 'thrast'], + 'thraw': ['thraw', 'warth', 'whart', 'wrath'], + 'thread': ['dearth', 'hatred', 'rathed', 'thread'], + 'threaden': ['adherent', 'headrent', 'neatherd', 'threaden'], + 'threader': ['rethread', 'threader'], + 'threadworm': ['motherward', 'threadworm'], + 'thready': ['hydrate', 'thready'], + 'threap': ['thraep', 'threap'], + 'threat': ['hatter', 'threat'], + 'threatener': ['rethreaten', 'threatener'], + 'three': ['ether', 'rethe', 'theer', 'there', 'three'], + 'threeling': ['lightener', 'relighten', 'threeling'], + 'threeness': ['retheness', 'thereness', 'threeness'], + 'threne': ['erthen', 'henter', 'nether', 'threne'], + 'threnode': ['dethrone', 'threnode'], + 'threnodic': ['chondrite', 'threnodic'], + 'threnos': ['shorten', 'threnos'], + 'thresher': ['rethresh', 'thresher'], + 'thrice': ['cither', 'thrice'], + 'thriller': ['rethrill', 'thriller'], + 'thripel': ['philter', 'thripel'], + 'thripidae': ['rhipidate', 'thripidae'], + 'throat': ['athort', 'throat'], + 'throb': ['broth', 'throb'], + 'throe': ['other', 'thore', 'throe', 'toher'], + 'thronal': ['althorn', 'anthrol', 'thronal'], + 'throne': ['hornet', 'nother', 'theron', 'throne'], + 'throu': ['routh', 'throu'], + 'throughout': ['outthrough', 'throughout'], + 'throw': ['throw', 'whort', 'worth', 'wroth'], + 'throwdown': ['downthrow', 'throwdown'], + 'thrower': ['rethrow', 'thrower'], + 'throwing': ['ingrowth', 'throwing'], + 'throwout': ['outthrow', 'outworth', 'throwout'], + 'thrum': ['thrum', 'thurm'], + 'thrust': ['struth', 'thrust'], + 'thruster': ['rethrust', 'thruster'], + 'thuan': ['ahunt', 'haunt', 'thuan', 'unhat'], + 'thulr': ['thulr', 'thurl'], + 'thunderbearing': ['thunderbearing', 'underbreathing'], + 'thunderer': ['rethunder', 'thunderer'], + 'thundering': ['thundering', 'underthing'], + 'thuoc': ['couth', 'thuoc', 'touch'], + 'thurl': ['thulr', 'thurl'], + 'thurm': ['thrum', 'thurm'], + 'thurse': ['reshut', 'suther', 'thurse', 'tusher'], + 'thurt': ['thurt', 'truth'], + 'thus': ['shut', 'thus', 'tush'], + 'thusness': ['shutness', 'thusness'], + 'thwacker': ['thwacker', 'whatreck'], + 'thwartover': ['overthwart', 'thwartover'], + 'thymelic': ['methylic', 'thymelic'], + 'thymetic': ['hymettic', 'thymetic'], + 'thymus': ['mythus', 'thymus'], + 'thyreogenous': ['heterogynous', 'thyreogenous'], + 'thyreohyoid': ['hyothyreoid', 'thyreohyoid'], + 'thyreoid': ['hydriote', 'thyreoid'], + 'thyreoidal': ['thyreoidal', 'thyroideal'], + 'thyreosis': ['oysterish', 'thyreosis'], + 'thyris': ['shirty', 'thyris'], + 'thyrocricoid': ['cricothyroid', 'thyrocricoid'], + 'thyrogenic': ['thyrogenic', 'trichogyne'], + 'thyrohyoid': ['hyothyroid', 'thyrohyoid'], + 'thyroideal': ['thyreoidal', 'thyroideal'], + 'thyroiodin': ['iodothyrin', 'thyroiodin'], + 'thyroprivic': ['thyroprivic', 'vitrophyric'], + 'thysanopteran': ['parasyntheton', 'thysanopteran'], + 'thysel': ['shelty', 'thysel'], + 'ti': ['it', 'ti'], + 'tiang': ['giant', 'tangi', 'tiang'], + 'tiao': ['iota', 'tiao'], + 'tiar': ['airt', 'rita', 'tari', 'tiar'], + 'tiara': ['arati', 'atria', 'riata', 'tarai', 'tiara'], + 'tiarella': ['arillate', 'tiarella'], + 'tib': ['bit', 'tib'], + 'tibetan': ['bettina', 'tabinet', 'tibetan'], + 'tibial': ['bilati', 'tibial'], + 'tibiale': ['biliate', 'tibiale'], + 'tibiofemoral': ['femorotibial', 'tibiofemoral'], + 'tic': ['cit', 'tic'], + 'ticca': ['cacti', 'ticca'], + 'tice': ['ceti', 'cite', 'tice'], + 'ticer': ['citer', 'recti', 'ticer', 'trice'], + 'tichodroma': ['chromatoid', 'tichodroma'], + 'ticketer': ['reticket', 'ticketer'], + 'tickler': ['tickler', 'trickle'], + 'tickproof': ['prickfoot', 'tickproof'], + 'ticuna': ['anicut', 'nautic', 'ticuna', 'tunica'], + 'ticunan': ['ticunan', 'tunican'], + 'tid': ['dit', 'tid'], + 'tidal': ['datil', 'dital', 'tidal', 'tilda'], + 'tiddley': ['lyddite', 'tiddley'], + 'tide': ['diet', 'dite', 'edit', 'tide', 'tied'], + 'tidely': ['idlety', 'lydite', 'tidely', 'tidley'], + 'tiding': ['tiding', 'tingid'], + 'tidley': ['idlety', 'lydite', 'tidely', 'tidley'], + 'tied': ['diet', 'dite', 'edit', 'tide', 'tied'], + 'tien': ['iten', 'neti', 'tien', 'tine'], + 'tiepin': ['pinite', 'tiepin'], + 'tier': ['iter', 'reit', 'rite', 'teri', 'tier', 'tire'], + 'tierce': ['cerite', 'certie', 'recite', 'tierce'], + 'tiered': ['dieter', 'tiered'], + 'tierer': ['errite', 'reiter', 'retier', 'retire', 'tierer'], + 'tiffy': ['fifty', 'tiffy'], + 'tifter': ['fitter', 'tifter'], + 'tig': ['git', 'tig'], + 'tigella': ['tigella', 'tillage'], + 'tiger': ['tiger', 'tigre'], + 'tigereye': ['geyerite', 'tigereye'], + 'tightener': ['retighten', 'tightener'], + 'tiglinic': ['lignitic', 'tiglinic'], + 'tigre': ['tiger', 'tigre'], + 'tigrean': ['angrite', 'granite', 'ingrate', 'tangier', 'tearing', 'tigrean'], + 'tigress': ['striges', 'tigress'], + 'tigrine': ['igniter', 'ringite', 'tigrine'], + 'tigurine': ['intrigue', 'tigurine'], + 'tikka': ['katik', 'tikka'], + 'tikur': ['tikur', 'turki'], + 'til': ['lit', 'til'], + 'tilaite': ['italite', 'letitia', 'tilaite'], + 'tilda': ['datil', 'dital', 'tidal', 'tilda'], + 'tilde': ['tilde', 'tiled'], + 'tile': ['lite', 'teil', 'teli', 'tile'], + 'tiled': ['tilde', 'tiled'], + 'tiler': ['liter', 'tiler'], + 'tilery': ['tilery', 'tilyer'], + 'tileways': ['sweatily', 'tileways'], + 'tileyard': ['dielytra', 'tileyard'], + 'tilia': ['itali', 'tilia'], + 'tilikum': ['kulimit', 'tilikum'], + 'till': ['lilt', 'till'], + 'tillable': ['belltail', 'bletilla', 'tillable'], + 'tillaea': ['alalite', 'tillaea'], + 'tillage': ['tigella', 'tillage'], + 'tiller': ['retill', 'rillet', 'tiller'], + 'tilmus': ['litmus', 'tilmus'], + 'tilpah': ['lapith', 'tilpah'], + 'tilter': ['litter', 'tilter', 'titler'], + 'tilting': ['tilting', 'titling', 'tlingit'], + 'tiltup': ['tiltup', 'uptilt'], + 'tilyer': ['tilery', 'tilyer'], + 'timable': ['limbate', 'timable', 'timbale'], + 'timaeus': ['metusia', 'suimate', 'timaeus'], + 'timaline': ['meliatin', 'timaline'], + 'timani': ['intima', 'timani'], + 'timar': ['mitra', 'tarmi', 'timar', 'tirma'], + 'timbal': ['limbat', 'timbal'], + 'timbale': ['limbate', 'timable', 'timbale'], + 'timber': ['betrim', 'timber', 'timbre'], + 'timbered': ['bemitred', 'timbered'], + 'timberer': ['retimber', 'timberer'], + 'timberlike': ['kimberlite', 'timberlike'], + 'timbre': ['betrim', 'timber', 'timbre'], + 'time': ['emit', 'item', 'mite', 'time'], + 'timecard': ['dermatic', 'timecard'], + 'timed': ['demit', 'timed'], + 'timeproof': ['miteproof', 'timeproof'], + 'timer': ['merit', 'miter', 'mitre', 'remit', 'timer'], + 'times': ['metis', 'smite', 'stime', 'times'], + 'timesaving': ['negativism', 'timesaving'], + 'timework': ['timework', 'worktime'], + 'timid': ['dimit', 'timid'], + 'timidly': ['mytilid', 'timidly'], + 'timidness': ['destinism', 'timidness'], + 'timish': ['isthmi', 'timish'], + 'timne': ['metin', 'temin', 'timne'], + 'timo': ['itmo', 'moit', 'omit', 'timo'], + 'timon': ['minot', 'timon', 'tomin'], + 'timorese': ['rosetime', 'timorese', 'tiresome'], + 'timpani': ['impaint', 'timpani'], + 'timpano': ['maintop', 'ptomain', 'tampion', 'timpano'], + 'tin': ['nit', 'tin'], + 'tina': ['aint', 'anti', 'tain', 'tina'], + 'tincal': ['catlin', 'tincal'], + 'tinchel': ['linchet', 'tinchel'], + 'tinctorial': ['tinctorial', 'trinoctial'], + 'tind': ['dint', 'tind'], + 'tindal': ['antlid', 'tindal'], + 'tindalo': ['itoland', 'talonid', 'tindalo'], + 'tinder': ['dirten', 'rident', 'tinder'], + 'tindered': ['dendrite', 'tindered'], + 'tinderous': ['detrusion', 'tinderous', 'unstoried'], + 'tine': ['iten', 'neti', 'tien', 'tine'], + 'tinea': ['entia', 'teian', 'tenai', 'tinea'], + 'tineal': ['entail', 'tineal'], + 'tinean': ['annite', 'innate', 'tinean'], + 'tined': ['detin', 'teind', 'tined'], + 'tineid': ['indite', 'tineid'], + 'tineman': ['mannite', 'tineman'], + 'tineoid': ['edition', 'odinite', 'otidine', 'tineoid'], + 'tinetare': ['intereat', 'tinetare'], + 'tinety': ['entity', 'tinety'], + 'tinged': ['nidget', 'tinged'], + 'tinger': ['engirt', 'tinger'], + 'tingid': ['tiding', 'tingid'], + 'tingitidae': ['indigitate', 'tingitidae'], + 'tingler': ['ringlet', 'tingler', 'tringle'], + 'tinhouse': ['outshine', 'tinhouse'], + 'tink': ['knit', 'tink'], + 'tinker': ['reknit', 'tinker'], + 'tinkerer': ['retinker', 'tinkerer'], + 'tinkler': ['tinkler', 'trinkle'], + 'tinlet': ['litten', 'tinlet'], + 'tinne': ['innet', 'tinne'], + 'tinned': ['dentin', 'indent', 'intend', 'tinned'], + 'tinner': ['intern', 'tinner'], + 'tinnet': ['intent', 'tinnet'], + 'tino': ['into', 'nito', 'oint', 'tino'], + 'tinoceras': ['atroscine', 'certosina', 'ostracine', 'tinoceras', 'tricosane'], + 'tinosa': ['sotnia', 'tinosa'], + 'tinsel': ['enlist', 'listen', 'silent', 'tinsel'], + 'tinselly': ['silently', 'tinselly'], + 'tinta': ['taint', 'tanti', 'tinta', 'titan'], + 'tintage': ['attinge', 'tintage'], + 'tinter': ['nitter', 'tinter'], + 'tintie': ['tintie', 'titien'], + 'tintiness': ['insistent', 'tintiness'], + 'tinty': ['nitty', 'tinty'], + 'tinworker': ['interwork', 'tinworker'], + 'tionontates': ['ostentation', 'tionontates'], + 'tip': ['pit', 'tip'], + 'tipe': ['piet', 'tipe'], + 'tipful': ['tipful', 'uplift'], + 'tipless': ['pitless', 'tipless'], + 'tipman': ['pitman', 'tampin', 'tipman'], + 'tipper': ['rippet', 'tipper'], + 'tippler': ['ripplet', 'tippler', 'tripple'], + 'tipster': ['spitter', 'tipster'], + 'tipstock': ['potstick', 'tipstock'], + 'tipula': ['tipula', 'tulipa'], + 'tiralee': ['atelier', 'tiralee'], + 'tire': ['iter', 'reit', 'rite', 'teri', 'tier', 'tire'], + 'tired': ['diter', 'tired', 'tried'], + 'tiredly': ['tiredly', 'triedly'], + 'tiredness': ['dissenter', 'tiredness'], + 'tireless': ['riteless', 'tireless'], + 'tirelessness': ['ritelessness', 'tirelessness'], + 'tiremaid': ['dimetria', 'mitridae', 'tiremaid', 'triamide'], + 'tireman': ['minaret', 'raiment', 'tireman'], + 'tirer': ['terri', 'tirer', 'trier'], + 'tiresmith': ['tiresmith', 'tritheism'], + 'tiresome': ['rosetime', 'timorese', 'tiresome'], + 'tirma': ['mitra', 'tarmi', 'timar', 'tirma'], + 'tirolean': ['oriental', 'relation', 'tirolean'], + 'tirolese': ['literose', 'roselite', 'tirolese'], + 'tirve': ['rivet', 'tirve', 'tiver'], + 'tisane': ['satine', 'tisane'], + 'tisar': ['arist', + 'astir', + 'sitar', + 'stair', + 'stria', + 'tarsi', + 'tisar', + 'trias'], + 'titan': ['taint', 'tanti', 'tinta', 'titan'], + 'titaness': ['antistes', 'titaness'], + 'titanic': ['tanitic', 'titanic'], + 'titano': ['otiant', 'titano'], + 'titanocolumbate': ['columbotitanate', 'titanocolumbate'], + 'titanofluoride': ['rotundifoliate', 'titanofluoride'], + 'titanosaur': ['saturation', 'titanosaur'], + 'titanosilicate': ['silicotitanate', 'titanosilicate'], + 'titanous': ['outsaint', 'titanous'], + 'titanyl': ['nattily', 'titanyl'], + 'titar': ['ratti', 'titar', 'trait'], + 'titer': ['titer', 'titre', 'trite'], + 'tithable': ['hittable', 'tithable'], + 'tither': ['hitter', 'tither'], + 'tithonic': ['chinotti', 'tithonic'], + 'titien': ['tintie', 'titien'], + 'titleboard': ['titleboard', 'trilobated'], + 'titler': ['litter', 'tilter', 'titler'], + 'titling': ['tilting', 'titling', 'tlingit'], + 'titrate': ['attrite', 'titrate'], + 'titration': ['attrition', 'titration'], + 'titre': ['titer', 'titre', 'trite'], + 'tiver': ['rivet', 'tirve', 'tiver'], + 'tiza': ['itza', 'tiza', 'zati'], + 'tlaco': ['lacto', 'tlaco'], + 'tlingit': ['tilting', 'titling', 'tlingit'], + 'tmesis': ['misset', 'tmesis'], + 'toa': ['oat', 'tao', 'toa'], + 'toad': ['doat', 'toad', 'toda'], + 'toader': ['doater', 'toader'], + 'toadflower': ['floodwater', 'toadflower', 'waterflood'], + 'toadier': ['roadite', 'toadier'], + 'toadish': ['doatish', 'toadish'], + 'toady': ['toady', 'today'], + 'toadyish': ['toadyish', 'todayish'], + 'toag': ['goat', 'toag', 'toga'], + 'toast': ['stoat', 'toast'], + 'toaster': ['retoast', 'rosetta', 'stoater', 'toaster'], + 'toba': ['boat', 'bota', 'toba'], + 'tobe': ['bote', 'tobe'], + 'tobiah': ['bhotia', 'tobiah'], + 'tobine': ['botein', 'tobine'], + 'toccata': ['attacco', 'toccata'], + 'tocharese': ['escheator', 'tocharese'], + 'tocharian': ['archontia', 'tocharian'], + 'tocharic': ['thoracic', 'tocharic', 'trochaic'], + 'tocher': ['hector', 'rochet', 'tocher', 'troche'], + 'toco': ['coot', 'coto', 'toco'], + 'tocogenetic': ['geotectonic', 'tocogenetic'], + 'tocometer': ['octometer', 'rectotome', 'tocometer'], + 'tocsin': ['nostic', 'sintoc', 'tocsin'], + 'tod': ['dot', 'tod'], + 'toda': ['doat', 'toad', 'toda'], + 'today': ['toady', 'today'], + 'todayish': ['toadyish', 'todayish'], + 'toddle': ['dodlet', 'toddle'], + 'tode': ['dote', 'tode', 'toed'], + 'todea': ['deota', 'todea'], + 'tody': ['doty', 'tody'], + 'toecap': ['capote', 'toecap'], + 'toed': ['dote', 'tode', 'toed'], + 'toeless': ['osselet', 'sestole', 'toeless'], + 'toenail': ['alnoite', 'elation', 'toenail'], + 'tog': ['got', 'tog'], + 'toga': ['goat', 'toag', 'toga'], + 'togaed': ['dogate', 'dotage', 'togaed'], + 'togalike': ['goatlike', 'togalike'], + 'toggel': ['goglet', 'toggel', 'toggle'], + 'toggle': ['goglet', 'toggel', 'toggle'], + 'togs': ['stog', 'togs'], + 'toher': ['other', 'thore', 'throe', 'toher'], + 'toho': ['hoot', 'thoo', 'toho'], + 'tohunga': ['hangout', 'tohunga'], + 'toi': ['ito', 'toi'], + 'toil': ['ilot', 'toil'], + 'toiler': ['loiter', 'toiler', 'triole'], + 'toilet': ['lottie', 'toilet', 'tolite'], + 'toiletry': ['toiletry', 'tyrolite'], + 'toise': ['sotie', 'toise'], + 'tokay': ['otyak', 'tokay'], + 'toke': ['keto', 'oket', 'toke'], + 'toko': ['koto', 'toko', 'took'], + 'tol': ['lot', 'tol'], + 'tolamine': ['lomatine', 'tolamine'], + 'tolan': ['notal', 'ontal', 'talon', 'tolan', 'tonal'], + 'tolane': ['etalon', 'tolane'], + 'told': ['dolt', 'told'], + 'tole': ['leto', 'lote', 'tole'], + 'toledan': ['taloned', 'toledan'], + 'toledo': ['toledo', 'toodle'], + 'tolerance': ['antrocele', 'coeternal', 'tolerance'], + 'tolerancy': ['alectryon', 'tolerancy'], + 'tolidine': ['lindoite', 'tolidine'], + 'tolite': ['lottie', 'toilet', 'tolite'], + 'tollery': ['tollery', 'trolley'], + 'tolly': ['tolly', 'tolyl'], + 'tolpatch': ['potlatch', 'tolpatch'], + 'tolsey': ['tolsey', 'tylose'], + 'tolter': ['lotter', 'rottle', 'tolter'], + 'tolu': ['lout', 'tolu'], + 'toluic': ['coutil', 'toluic'], + 'toluifera': ['foliature', 'toluifera'], + 'tolyl': ['tolly', 'tolyl'], + 'tom': ['mot', 'tom'], + 'toma': ['atmo', 'atom', 'moat', 'toma'], + 'toman': ['manto', 'toman'], + 'tomas': ['atmos', 'stoma', 'tomas'], + 'tombac': ['combat', 'tombac'], + 'tome': ['mote', 'tome'], + 'tomentose': ['metosteon', 'tomentose'], + 'tomial': ['lomita', 'tomial'], + 'tomin': ['minot', 'timon', 'tomin'], + 'tomographic': ['motographic', 'tomographic'], + 'tomorn': ['morton', 'tomorn'], + 'tomorrow': ['moorwort', 'rootworm', 'tomorrow', 'wormroot'], + 'ton': ['not', 'ton'], + 'tonal': ['notal', 'ontal', 'talon', 'tolan', 'tonal'], + 'tonalitive': ['levitation', 'tonalitive', 'velitation'], + 'tonation': ['notation', 'tonation'], + 'tone': ['note', 'tone'], + 'toned': ['donet', 'noted', 'toned'], + 'toneless': ['noteless', 'toneless'], + 'tonelessly': ['notelessly', 'tonelessly'], + 'tonelessness': ['notelessness', 'tonelessness'], + 'toner': ['noter', 'tenor', 'toner', 'trone'], + 'tonetic': ['entotic', 'tonetic'], + 'tonetics': ['stenotic', 'tonetics'], + 'tonga': ['tango', 'tonga'], + 'tongan': ['ganton', 'tongan'], + 'tongas': ['sontag', 'tongas'], + 'tonger': ['geront', 'tonger'], + 'tongrian': ['ignorant', 'tongrian'], + 'tongs': ['stong', 'tongs'], + 'tonicize': ['nicotize', 'tonicize'], + 'tonicoclonic': ['clonicotonic', 'tonicoclonic'], + 'tonify': ['notify', 'tonify'], + 'tonish': ['histon', 'shinto', 'tonish'], + 'tonk': ['knot', 'tonk'], + 'tonkin': ['inknot', 'tonkin'], + 'tonna': ['anton', 'notan', 'tonna'], + 'tonological': ['ontological', 'tonological'], + 'tonology': ['ontology', 'tonology'], + 'tonsorial': ['tonsorial', 'torsional'], + 'tonsure': ['snouter', 'tonsure', 'unstore'], + 'tonsured': ['tonsured', 'unsorted', 'unstored'], + 'tontine': ['nettion', 'tention', 'tontine'], + 'tonus': ['notus', 'snout', 'stoun', 'tonus'], + 'tony': ['tony', 'yont'], + 'too': ['oto', 'too'], + 'toodle': ['toledo', 'toodle'], + 'took': ['koto', 'toko', 'took'], + 'tool': ['loot', 'tool'], + 'tooler': ['looter', 'retool', 'rootle', 'tooler'], + 'tooling': ['ilongot', 'tooling'], + 'toom': ['moot', 'toom'], + 'toon': ['onto', 'oont', 'toon'], + 'toona': ['naoto', 'toona'], + 'toop': ['poot', 'toop', 'topo'], + 'toosh': ['shoot', 'sooth', 'sotho', 'toosh'], + 'toot': ['otto', 'toot', 'toto'], + 'toother': ['retooth', 'toother'], + 'toothpick': ['picktooth', 'toothpick'], + 'tootler': ['rootlet', 'tootler'], + 'top': ['opt', 'pot', 'top'], + 'toparch': ['caphtor', 'toparch'], + 'topass': ['potass', 'topass'], + 'topchrome': ['ectomorph', 'topchrome'], + 'tope': ['peto', 'poet', 'pote', 'tope'], + 'toper': ['poter', 'prote', 'repot', 'tepor', 'toper', 'trope'], + 'topfull': ['plotful', 'topfull'], + 'toph': ['phot', 'toph'], + 'tophus': ['tophus', 'upshot'], + 'topia': ['patio', 'taipo', 'topia'], + 'topiarist': ['parotitis', 'topiarist'], + 'topic': ['optic', 'picot', 'topic'], + 'topical': ['capitol', 'coalpit', 'optical', 'topical'], + 'topically': ['optically', 'topically'], + 'toplike': ['kitlope', 'potlike', 'toplike'], + 'topline': ['pointel', 'pontile', 'topline'], + 'topmaker': ['potmaker', 'topmaker'], + 'topmaking': ['potmaking', 'topmaking'], + 'topman': ['potman', 'tampon', 'topman'], + 'topmast': ['tapmost', 'topmast'], + 'topo': ['poot', 'toop', 'topo'], + 'topographics': ['coprophagist', 'topographics'], + 'topography': ['optography', 'topography'], + 'topological': ['optological', 'topological'], + 'topologist': ['optologist', 'topologist'], + 'topology': ['optology', 'topology'], + 'toponymal': ['monotypal', 'toponymal'], + 'toponymic': ['monotypic', 'toponymic'], + 'toponymical': ['monotypical', 'toponymical'], + 'topophone': ['optophone', 'topophone'], + 'topotype': ['optotype', 'topotype'], + 'topple': ['loppet', 'topple'], + 'toppler': ['preplot', 'toppler'], + 'toprail': ['portail', 'toprail'], + 'tops': ['post', 'spot', 'stop', 'tops'], + 'topsail': ['apostil', 'topsail'], + 'topside': ['deposit', 'topside'], + 'topsman': ['postman', 'topsman'], + 'topsoil': ['loopist', 'poloist', 'topsoil'], + 'topstone': ['potstone', 'topstone'], + 'toptail': ['ptilota', 'talipot', 'toptail'], + 'toque': ['quote', 'toque'], + 'tor': ['ort', 'rot', 'tor'], + 'tora': ['rota', 'taro', 'tora'], + 'toral': ['latro', 'rotal', 'toral'], + 'toran': ['orant', 'rotan', 'toran', 'trona'], + 'torbanite': ['abortient', 'torbanite'], + 'torcel': ['colter', 'lector', 'torcel'], + 'torch': ['chort', 'rotch', 'torch'], + 'tore': ['rote', 'tore'], + 'tored': ['doter', 'tored', 'trode'], + 'torenia': ['otarine', 'torenia'], + 'torero': ['reroot', 'rooter', 'torero'], + 'toreutics': ['tetricous', 'toreutics'], + 'torfel': ['floret', 'forlet', 'lofter', 'torfel'], + 'torgot': ['grotto', 'torgot'], + 'toric': ['toric', 'troic'], + 'torinese': ['serotine', 'torinese'], + 'torma': ['amort', 'morat', 'torma'], + 'tormen': ['mentor', 'merton', 'termon', 'tormen'], + 'tormina': ['amintor', 'tormina'], + 'torn': ['torn', 'tron'], + 'tornachile': ['chlorinate', 'ectorhinal', 'tornachile'], + 'tornado': ['donator', 'odorant', 'tornado'], + 'tornal': ['latron', 'lontar', 'tornal'], + 'tornaria': ['rotarian', 'tornaria'], + 'tornarian': ['narration', 'tornarian'], + 'tornese': ['enstore', 'estrone', 'storeen', 'tornese'], + 'torney': ['torney', 'tyrone'], + 'tornit': ['intort', 'tornit', 'triton'], + 'tornus': ['tornus', 'unsort'], + 'toro': ['root', 'roto', 'toro'], + 'torose': ['seroot', 'sooter', 'torose'], + 'torpent': ['portent', 'torpent'], + 'torpescent': ['precontest', 'torpescent'], + 'torpid': ['torpid', 'tripod'], + 'torpify': ['portify', 'torpify'], + 'torpor': ['portor', 'torpor'], + 'torque': ['quoter', 'roquet', 'torque'], + 'torques': ['questor', 'torques'], + 'torrubia': ['rubiator', 'torrubia'], + 'torsade': ['rosated', 'torsade'], + 'torse': ['roset', 'rotse', 'soter', 'stero', 'store', 'torse'], + 'torsel': ['relost', 'reslot', 'rostel', 'sterol', 'torsel'], + 'torsile': ['estriol', 'torsile'], + 'torsion': ['isotron', 'torsion'], + 'torsional': ['tonsorial', 'torsional'], + 'torsk': ['stork', 'torsk'], + 'torso': ['roost', 'torso'], + 'torsten': ['snotter', 'stentor', 'torsten'], + 'tort': ['tort', 'trot'], + 'torta': ['ottar', 'tarot', 'torta', 'troat'], + 'torteau': ['outrate', 'outtear', 'torteau'], + 'torticone': ['torticone', 'tritocone'], + 'tortile': ['lotrite', 'tortile', 'triolet'], + 'tortilla': ['littoral', 'tortilla'], + 'tortonian': ['intonator', 'tortonian'], + 'tortrices': ['tortrices', 'trisector'], + 'torture': ['torture', 'trouter', 'tutorer'], + 'toru': ['rout', 'toru', 'tour'], + 'torula': ['rotula', 'torula'], + 'torulaform': ['formulator', 'torulaform'], + 'toruliform': ['rotuliform', 'toruliform'], + 'torulose': ['outsoler', 'torulose'], + 'torulus': ['rotulus', 'torulus'], + 'torus': ['roust', 'rusot', 'stour', 'sutor', 'torus'], + 'torve': ['overt', 'rovet', 'torve', 'trove', 'voter'], + 'tory': ['royt', 'ryot', 'tory', 'troy', 'tyro'], + 'toryish': ['history', 'toryish'], + 'toryism': ['toryism', 'trisomy'], + 'tosephtas': ['posthaste', 'tosephtas'], + 'tosh': ['host', 'shot', 'thos', 'tosh'], + 'tosher': ['hoster', 'tosher'], + 'toshly': ['hostly', 'toshly'], + 'toshnail': ['histonal', 'toshnail'], + 'toss': ['sots', 'toss'], + 'tosser': ['retoss', 'tosser'], + 'tossily': ['tossily', 'tylosis'], + 'tossup': ['tossup', 'uptoss'], + 'tost': ['stot', 'tost'], + 'total': ['lotta', 'total'], + 'totanine': ['intonate', 'totanine'], + 'totaquin': ['quintato', 'totaquin'], + 'totchka': ['hattock', 'totchka'], + 'totem': ['motet', 'motte', 'totem'], + 'toter': ['ortet', 'otter', 'toter'], + 'tother': ['hotter', 'tother'], + 'toto': ['otto', 'toot', 'toto'], + 'toty': ['toty', 'tyto'], + 'tou': ['out', 'tou'], + 'toucan': ['toucan', 'tucano', 'uncoat'], + 'touch': ['couth', 'thuoc', 'touch'], + 'toucher': ['retouch', 'toucher'], + 'touchily': ['couthily', 'touchily'], + 'touchiness': ['couthiness', 'touchiness'], + 'touching': ['touching', 'ungothic'], + 'touchless': ['couthless', 'touchless'], + 'toug': ['gout', 'toug'], + 'tough': ['ought', 'tough'], + 'toughness': ['oughtness', 'toughness'], + 'toup': ['pout', 'toup'], + 'tour': ['rout', 'toru', 'tour'], + 'tourer': ['retour', 'router', 'tourer'], + 'touring': ['outgrin', 'outring', 'routing', 'touring'], + 'tourism': ['sumitro', 'tourism'], + 'touristy': ['touristy', 'yttrious'], + 'tourmalinic': ['latrocinium', 'tourmalinic'], + 'tournamental': ['tournamental', 'ultramontane'], + 'tourte': ['tourte', 'touter'], + 'tousche': ['souchet', 'techous', 'tousche'], + 'touser': ['ouster', 'souter', 'touser', 'trouse'], + 'tousle': ['lutose', 'solute', 'tousle'], + 'touter': ['tourte', 'touter'], + 'tovaria': ['aviator', 'tovaria'], + 'tow': ['tow', 'two', 'wot'], + 'towel': ['owlet', 'towel'], + 'tower': ['rowet', 'tower', 'wrote'], + 'town': ['nowt', 'town', 'wont'], + 'towned': ['towned', 'wonted'], + 'towser': ['restow', 'stower', 'towser', 'worset'], + 'towy': ['towy', 'yowt'], + 'toxemia': ['oximate', 'toxemia'], + 'toy': ['toy', 'yot'], + 'toyer': ['royet', 'toyer'], + 'toyful': ['outfly', 'toyful'], + 'toyless': ['systole', 'toyless'], + 'toysome': ['myosote', 'toysome'], + 'tozer': ['terzo', 'tozer'], + 'tra': ['art', 'rat', 'tar', 'tra'], + 'trabea': ['abater', 'artabe', 'eartab', 'trabea'], + 'trace': ['caret', + 'carte', + 'cater', + 'crate', + 'creat', + 'creta', + 'react', + 'recta', + 'trace'], + 'traceable': ['creatable', 'traceable'], + 'tracer': ['arrect', 'carter', 'crater', 'recart', 'tracer'], + 'tracheata': ['cathartae', 'tracheata'], + 'trachelitis': ['thersitical', 'trachelitis'], + 'tracheolaryngotomy': ['laryngotracheotomy', 'tracheolaryngotomy'], + 'trachinoid': ['anhidrotic', 'trachinoid'], + 'trachitis': ['citharist', 'trachitis'], + 'trachle': ['clethra', 'latcher', 'ratchel', 'relatch', 'talcher', 'trachle'], + 'trachoma': ['achromat', 'trachoma'], + 'trachylinae': ['chatelainry', 'trachylinae'], + 'trachyte': ['chattery', 'ratchety', 'trachyte'], + 'tracker': ['retrack', 'tracker'], + 'trackside': ['sidetrack', 'trackside'], + 'tractator': ['attractor', 'tractator'], + 'tractile': ['tetrical', 'tractile'], + 'tracy': ['carty', 'tracy'], + 'trade': ['dater', 'derat', 'detar', 'drate', 'rated', 'trade', 'tread'], + 'trader': ['darter', + 'dartre', + 'redart', + 'retard', + 'retrad', + 'tarred', + 'trader'], + 'trading': ['darting', 'trading'], + 'tradite': ['attired', 'tradite'], + 'traditioner': ['retradition', 'traditioner'], + 'traditionism': ['mistradition', 'traditionism'], + 'traditorship': ['podarthritis', 'traditorship'], + 'traducent': ['reductant', 'traducent', 'truncated'], + 'trady': ['tardy', 'trady'], + 'trag': ['grat', 'trag'], + 'tragedial': ['taligrade', 'tragedial'], + 'tragicomedy': ['comitragedy', 'tragicomedy'], + 'tragulina': ['tragulina', 'triangula'], + 'traguline': ['granulite', 'traguline'], + 'trah': ['hart', 'rath', 'tahr', 'thar', 'trah'], + 'traheen': ['earthen', 'enheart', 'hearten', 'naether', 'teheran', 'traheen'], + 'traik': ['kitar', 'krait', 'rakit', 'traik'], + 'trail': ['litra', 'trail', 'trial'], + 'trailer': ['retiral', 'retrial', 'trailer'], + 'trailery': ['literary', 'trailery'], + 'trailing': ['ringtail', 'trailing'], + 'trailside': ['dialister', 'trailside'], + 'train': ['riant', 'tairn', 'tarin', 'train'], + 'trainable': ['albertina', 'trainable'], + 'trainage': ['antiager', 'trainage'], + 'trainboy': ['bonitary', 'trainboy'], + 'trained': ['antired', 'detrain', 'randite', 'trained'], + 'trainee': ['enteria', 'trainee', 'triaene'], + 'trainer': ['arterin', 'retrain', 'terrain', 'trainer'], + 'trainless': ['sternalis', 'trainless'], + 'trainster': ['restraint', 'retransit', 'trainster', 'transiter'], + 'traintime': ['intimater', 'traintime'], + 'trainy': ['rytina', 'trainy', 'tyrian'], + 'traipse': ['piaster', 'piastre', 'raspite', 'spirate', 'traipse'], + 'trait': ['ratti', 'titar', 'trait'], + 'tram': ['mart', 'tram'], + 'trama': ['matar', 'matra', 'trama'], + 'tramal': ['matral', 'tramal'], + 'trame': ['armet', + 'mater', + 'merat', + 'metra', + 'ramet', + 'tamer', + 'terma', + 'trame', + 'trema'], + 'trametes': ['teamster', 'trametes'], + 'tramline': ['terminal', 'tramline'], + 'tramper': ['retramp', 'tramper'], + 'trample': ['templar', 'trample'], + 'trampoline': ['intemporal', 'trampoline'], + 'tran': ['natr', 'rant', 'tarn', 'tran'], + 'trance': ['canter', + 'creant', + 'cretan', + 'nectar', + 'recant', + 'tanrec', + 'trance'], + 'tranced': ['cantred', 'centrad', 'tranced'], + 'trancelike': ['nectarlike', 'trancelike'], + 'trankum': ['trankum', 'turkman'], + 'transamination': ['transamination', 'transanimation'], + 'transanimation': ['transamination', 'transanimation'], + 'transept': ['prestant', 'transept'], + 'transeptally': ['platysternal', 'transeptally'], + 'transformer': ['retransform', 'transformer'], + 'transfuge': ['afterguns', 'transfuge'], + 'transient': ['instanter', 'transient'], + 'transigent': ['astringent', 'transigent'], + 'transimpression': ['pretransmission', 'transimpression'], + 'transire': ['restrain', 'strainer', 'transire'], + 'transit': ['straint', 'transit', 'tristan'], + 'transiter': ['restraint', 'retransit', 'trainster', 'transiter'], + 'transitive': ['revisitant', 'transitive'], + 'transmarine': ['strainerman', 'transmarine'], + 'transmit': ['tantrism', 'transmit'], + 'transmold': ['landstorm', 'transmold'], + 'transoceanic': ['narcaciontes', 'transoceanic'], + 'transonic': ['constrain', 'transonic'], + 'transpire': ['prestrain', 'transpire'], + 'transplanter': ['retransplant', 'transplanter'], + 'transportee': ['paternoster', 'prosternate', 'transportee'], + 'transporter': ['retransport', 'transporter'], + 'transpose': ['patroness', 'transpose'], + 'transposer': ['transposer', 'transprose'], + 'transprose': ['transposer', 'transprose'], + 'trap': ['part', 'prat', 'rapt', 'tarp', 'trap'], + 'trapa': ['apart', 'trapa'], + 'trapes': ['paster', 'repast', 'trapes'], + 'trapfall': ['pratfall', 'trapfall'], + 'traphole': ['plethora', 'traphole'], + 'trappean': ['apparent', 'trappean'], + 'traps': ['spart', 'sprat', 'strap', 'traps'], + 'traship': ['harpist', 'traship'], + 'trasy': ['satyr', 'stary', 'stray', 'trasy'], + 'traulism': ['altruism', 'muralist', 'traulism', 'ultraism'], + 'trauma': ['taruma', 'trauma'], + 'travale': ['larvate', 'lavaret', 'travale'], + 'trave': ['avert', 'tarve', 'taver', 'trave'], + 'travel': ['travel', 'varlet'], + 'traveler': ['retravel', 'revertal', 'traveler'], + 'traversion': ['overstrain', 'traversion'], + 'travertine': ['travertine', 'trinervate'], + 'travoy': ['travoy', 'votary'], + 'tray': ['arty', 'atry', 'tray'], + 'treacle': ['electra', 'treacle'], + 'tread': ['dater', 'derat', 'detar', 'drate', 'rated', 'trade', 'tread'], + 'treader': ['derater', 'retrade', 'retread', 'treader'], + 'treading': ['gradient', 'treading'], + 'treadle': ['delater', 'related', 'treadle'], + 'treason': ['noreast', 'rosetan', 'seatron', 'senator', 'treason'], + 'treasonish': ['astonisher', 'reastonish', 'treasonish'], + 'treasonist': ['steatornis', 'treasonist'], + 'treasonous': ['anoestrous', 'treasonous'], + 'treasurer': ['serrature', 'treasurer'], + 'treat': ['atter', 'tater', 'teart', 'tetra', 'treat'], + 'treatably': ['tabletary', 'treatably'], + 'treatee': ['ateeter', 'treatee'], + 'treater': ['ettarre', 'retreat', 'treater'], + 'treatise': ['estriate', 'treatise'], + 'treaty': ['attery', 'treaty', 'yatter'], + 'treble': ['belter', 'elbert', 'treble'], + 'treculia': ['arculite', 'cutleria', 'lucretia', 'reticula', 'treculia'], + 'tree': ['reet', 'teer', 'tree'], + 'treed': ['deter', 'treed'], + 'treeful': ['fleuret', 'treeful'], + 'treehood': ['theodore', 'treehood'], + 'treemaker': ['marketeer', 'treemaker'], + 'treeman': ['remanet', 'remeant', 'treeman'], + 'treen': ['enter', 'neter', 'renet', 'terne', 'treen'], + 'treenail': ['elaterin', 'entailer', 'treenail'], + 'treeship': ['hepteris', 'treeship'], + 'tref': ['fret', 'reft', 'tref'], + 'trefle': ['felter', 'telfer', 'trefle'], + 'trellis': ['stiller', 'trellis'], + 'trema': ['armet', + 'mater', + 'merat', + 'metra', + 'ramet', + 'tamer', + 'terma', + 'trame', + 'trema'], + 'trematoid': ['meditator', 'trematoid'], + 'tremella': ['realmlet', 'tremella'], + 'tremie': ['metier', 'retime', 'tremie'], + 'tremolo': ['roomlet', 'tremolo'], + 'tremor': ['termor', 'tremor'], + 'trenail': ['entrail', + 'latiner', + 'latrine', + 'ratline', + 'reliant', + 'retinal', + 'trenail'], + 'trenchant': ['centranth', 'trenchant'], + 'trencher': ['retrench', 'trencher'], + 'trenchmaster': ['stretcherman', 'trenchmaster'], + 'trenchwise': ['trenchwise', 'winchester'], + 'trentine': ['renitent', 'trentine'], + 'trepan': ['arpent', + 'enrapt', + 'entrap', + 'panter', + 'parent', + 'pretan', + 'trepan'], + 'trephine': ['nephrite', 'prehnite', 'trephine'], + 'trepid': ['dipter', 'trepid'], + 'trepidation': ['departition', 'partitioned', 'trepidation'], + 'treron': ['terron', 'treron', 'troner'], + 'treronidae': ['reordinate', 'treronidae'], + 'tressed': ['dessert', 'tressed'], + 'tressful': ['tressful', 'turfless'], + 'tressour': ['tressour', 'trousers'], + 'trest': ['stert', 'stret', 'trest'], + 'trestle': ['settler', 'sterlet', 'trestle'], + 'trevor': ['trevor', 'trover'], + 'trews': ['strew', 'trews', 'wrest'], + 'trey': ['trey', 'tyre'], + 'tri': ['rit', 'tri'], + 'triable': ['betrail', 'librate', 'triable', 'trilabe'], + 'triace': ['acrite', 'arcite', 'tercia', 'triace', 'tricae'], + 'triacid': ['arctiid', 'triacid', 'triadic'], + 'triacontane': ['recantation', 'triacontane'], + 'triaconter': ['retraction', 'triaconter'], + 'triactine': ['intricate', 'triactine'], + 'triadic': ['arctiid', 'triacid', 'triadic'], + 'triadical': ['raticidal', 'triadical'], + 'triadist': ['distrait', 'triadist'], + 'triaene': ['enteria', 'trainee', 'triaene'], + 'triage': ['gaiter', 'tairge', 'triage'], + 'trial': ['litra', 'trail', 'trial'], + 'trialism': ['mistrial', 'trialism'], + 'trialist': ['taistril', 'trialist'], + 'triamide': ['dimetria', 'mitridae', 'tiremaid', 'triamide'], + 'triamino': ['miniator', 'triamino'], + 'triandria': ['irradiant', 'triandria'], + 'triangle': ['integral', 'teraglin', 'triangle'], + 'triangula': ['tragulina', 'triangula'], + 'triannual': ['innatural', 'triannual'], + 'triannulate': ['antineutral', 'triannulate'], + 'triantelope': ['interpolate', 'triantelope'], + 'triapsidal': ['lapidarist', 'triapsidal'], + 'triareal': ['arterial', 'triareal'], + 'trias': ['arist', + 'astir', + 'sitar', + 'stair', + 'stria', + 'tarsi', + 'tisar', + 'trias'], + 'triassic': ['sarcitis', 'triassic'], + 'triazane': ['nazarite', 'nazirate', 'triazane'], + 'triazine': ['nazirite', 'triazine'], + 'tribade': ['redbait', 'tribade'], + 'tribase': ['baister', 'tribase'], + 'tribe': ['biter', 'tribe'], + 'tribelet': ['belitter', 'tribelet'], + 'triblet': ['blitter', 'brittle', 'triblet'], + 'tribonema': ['brominate', 'tribonema'], + 'tribuna': ['arbutin', 'tribuna'], + 'tribunal': ['tribunal', 'turbinal', 'untribal'], + 'tribunate': ['tribunate', 'turbinate'], + 'tribune': ['tribune', 'tuberin', 'turbine'], + 'tricae': ['acrite', 'arcite', 'tercia', 'triace', 'tricae'], + 'trice': ['citer', 'recti', 'ticer', 'trice'], + 'tricennial': ['encrinital', 'tricennial'], + 'triceratops': ['tetrasporic', 'triceratops'], + 'triceria': ['criteria', 'triceria'], + 'tricerion': ['criterion', 'tricerion'], + 'tricerium': ['criterium', 'tricerium'], + 'trichinous': ['trichinous', 'unhistoric'], + 'trichogyne': ['thyrogenic', 'trichogyne'], + 'trichoid': ['hidrotic', 'trichoid'], + 'trichomanes': ['anchoretism', 'trichomanes'], + 'trichome': ['chromite', 'trichome'], + 'trichopore': ['horopteric', 'rheotropic', 'trichopore'], + 'trichosis': ['historics', 'trichosis'], + 'trichosporum': ['sporotrichum', 'trichosporum'], + 'trichroic': ['cirrhotic', 'trichroic'], + 'trichroism': ['trichroism', 'triorchism'], + 'trichromic': ['microcrith', 'trichromic'], + 'tricia': ['iatric', 'tricia'], + 'trickle': ['tickler', 'trickle'], + 'triclinate': ['intractile', 'triclinate'], + 'tricolumnar': ['tricolumnar', 'ultramicron'], + 'tricosane': ['atroscine', 'certosina', 'ostracine', 'tinoceras', 'tricosane'], + 'tridacna': ['antacrid', 'cardiant', 'radicant', 'tridacna'], + 'tridecane': ['nectaried', 'tridecane'], + 'tridecene': ['intercede', 'tridecene'], + 'tridecyl': ['directly', 'tridecyl'], + 'tridiapason': ['disparation', 'tridiapason'], + 'tried': ['diter', 'tired', 'tried'], + 'triedly': ['tiredly', 'triedly'], + 'triene': ['entire', 'triene'], + 'triens': ['estrin', 'insert', 'sinter', 'sterin', 'triens'], + 'triental': ['tetralin', 'triental'], + 'triequal': ['quartile', 'requital', 'triequal'], + 'trier': ['terri', 'tirer', 'trier'], + 'trifle': ['fertil', 'filter', 'lifter', 'relift', 'trifle'], + 'trifler': ['flirter', 'trifler'], + 'triflet': ['flitter', 'triflet'], + 'trifling': ['flirting', 'trifling'], + 'triflingly': ['flirtingly', 'triflingly'], + 'trifolium': ['lituiform', 'trifolium'], + 'trig': ['girt', 'grit', 'trig'], + 'trigona': ['grotian', 'trigona'], + 'trigone': ['ergotin', 'genitor', 'negrito', 'ogtiern', 'trigone'], + 'trigonia': ['rigation', 'trigonia'], + 'trigonid': ['trigonid', 'tringoid'], + 'trigyn': ['trigyn', 'trying'], + 'trilabe': ['betrail', 'librate', 'triable', 'trilabe'], + 'trilineate': ['retinalite', 'trilineate'], + 'trilisa': ['liatris', 'trilisa'], + 'trillet': ['rillett', 'trillet'], + 'trilobate': ['latrobite', 'trilobate'], + 'trilobated': ['titleboard', 'trilobated'], + 'trimacular': ['matricular', 'trimacular'], + 'trimensual': ['neutralism', 'trimensual'], + 'trimer': ['mitrer', 'retrim', 'trimer'], + 'trimesic': ['meristic', 'trimesic', 'trisemic'], + 'trimesitinic': ['interimistic', 'trimesitinic'], + 'trimesyl': ['trimesyl', 'tylerism'], + 'trimeter': ['remitter', 'trimeter'], + 'trimstone': ['sortiment', 'trimstone'], + 'trinalize': ['latinizer', 'trinalize'], + 'trindle': ['tendril', 'trindle'], + 'trine': ['inert', 'inter', 'niter', 'retin', 'trine'], + 'trinely': ['elytrin', 'inertly', 'trinely'], + 'trinervate': ['travertine', 'trinervate'], + 'trinerve': ['inverter', 'reinvert', 'trinerve'], + 'trineural': ['retinular', 'trineural'], + 'tringa': ['rating', 'tringa'], + 'tringle': ['ringlet', 'tingler', 'tringle'], + 'tringoid': ['trigonid', 'tringoid'], + 'trinket': ['knitter', 'trinket'], + 'trinkle': ['tinkler', 'trinkle'], + 'trinoctial': ['tinctorial', 'trinoctial'], + 'trinodine': ['rendition', 'trinodine'], + 'trintle': ['lettrin', 'trintle'], + 'trio': ['riot', 'roit', 'trio'], + 'triode': ['editor', 'triode'], + 'trioecism': ['eroticism', 'isometric', 'meroistic', 'trioecism'], + 'triole': ['loiter', 'toiler', 'triole'], + 'trioleic': ['elicitor', 'trioleic'], + 'triolet': ['lotrite', 'tortile', 'triolet'], + 'trionymal': ['normality', 'trionymal'], + 'triopidae': ['poritidae', 'triopidae'], + 'triops': ['ripost', 'triops', 'tripos'], + 'triorchism': ['trichroism', 'triorchism'], + 'triose': ['restio', 'sorite', 'sortie', 'triose'], + 'tripe': ['perit', 'retip', 'tripe'], + 'tripedal': ['dipteral', 'tripedal'], + 'tripel': ['tripel', 'triple'], + 'tripeman': ['imperant', 'pairment', 'partimen', 'premiant', 'tripeman'], + 'tripersonal': ['intersporal', 'tripersonal'], + 'tripestone': ['septentrio', 'tripestone'], + 'triphane': ['perianth', 'triphane'], + 'triplane': ['interlap', 'repliant', 'triplane'], + 'triplasian': ['airplanist', 'triplasian'], + 'triplasic': ['pilastric', 'triplasic'], + 'triple': ['tripel', 'triple'], + 'triplice': ['perlitic', 'triplice'], + 'triplopia': ['propitial', 'triplopia'], + 'tripod': ['torpid', 'tripod'], + 'tripodal': ['dioptral', 'tripodal'], + 'tripodic': ['dioptric', 'tripodic'], + 'tripodical': ['dioptrical', 'tripodical'], + 'tripody': ['dioptry', 'tripody'], + 'tripos': ['ripost', 'triops', 'tripos'], + 'trippist': ['strippit', 'trippist'], + 'tripple': ['ripplet', 'tippler', 'tripple'], + 'tripsis': ['pristis', 'tripsis'], + 'tripsome': ['imposter', 'tripsome'], + 'tripudiant': ['antiputrid', 'tripudiant'], + 'tripyrenous': ['neurotripsy', 'tripyrenous'], + 'triratna': ['tartarin', 'triratna'], + 'trireme': ['meriter', 'miterer', 'trireme'], + 'trisalt': ['starlit', 'trisalt'], + 'trisected': ['decretist', 'trisected'], + 'trisector': ['tortrices', 'trisector'], + 'trisemic': ['meristic', 'trimesic', 'trisemic'], + 'trisetose': ['esoterist', 'trisetose'], + 'trishna': ['tarnish', 'trishna'], + 'trisilane': ['listerian', 'trisilane'], + 'triskele': ['kreistle', 'triskele'], + 'trismus': ['sistrum', 'trismus'], + 'trisome': ['erotism', 'mortise', 'trisome'], + 'trisomy': ['toryism', 'trisomy'], + 'trisonant': ['strontian', 'trisonant'], + 'trispinose': ['pirssonite', 'trispinose'], + 'trist': ['strit', 'trist'], + 'tristan': ['straint', 'transit', 'tristan'], + 'trisula': ['latirus', 'trisula'], + 'trisulcate': ['testicular', 'trisulcate'], + 'tritanope': ['antitrope', 'patronite', 'tritanope'], + 'tritanopic': ['antitropic', 'tritanopic'], + 'trite': ['titer', 'titre', 'trite'], + 'tritely': ['littery', 'tritely'], + 'triterpene': ['preterient', 'triterpene'], + 'tritheism': ['tiresmith', 'tritheism'], + 'trithionate': ['anorthitite', 'trithionate'], + 'tritocone': ['torticone', 'tritocone'], + 'tritoma': ['mattoir', 'tritoma'], + 'triton': ['intort', 'tornit', 'triton'], + 'triune': ['runite', 'triune', 'uniter', 'untire'], + 'trivalence': ['cantilever', 'trivalence'], + 'trivial': ['trivial', 'vitrail'], + 'trivialist': ['trivialist', 'vitrailist'], + 'troat': ['ottar', 'tarot', 'torta', 'troat'], + 'troca': ['actor', 'corta', 'croat', 'rocta', 'taroc', 'troca'], + 'trocar': ['carrot', 'trocar'], + 'trochaic': ['thoracic', 'tocharic', 'trochaic'], + 'trochate': ['theocrat', 'trochate'], + 'troche': ['hector', 'rochet', 'tocher', 'troche'], + 'trochi': ['chorti', 'orthic', 'thoric', 'trochi'], + 'trochidae': ['charioted', 'trochidae'], + 'trochila': ['acrolith', 'trochila'], + 'trochilic': ['chloritic', 'trochilic'], + 'trochlea': ['chlorate', 'trochlea'], + 'trochlearis': ['rhetoricals', 'trochlearis'], + 'trode': ['doter', 'tored', 'trode'], + 'trog': ['grot', 'trog'], + 'trogonidae': ['derogation', 'trogonidae'], + 'troiades': ['asteroid', 'troiades'], + 'troic': ['toric', 'troic'], + 'troika': ['korait', 'troika'], + 'trolley': ['tollery', 'trolley'], + 'tromba': ['tambor', 'tromba'], + 'trombe': ['retomb', 'trombe'], + 'trompe': ['emptor', 'trompe'], + 'tron': ['torn', 'tron'], + 'trona': ['orant', 'rotan', 'toran', 'trona'], + 'tronage': ['negator', 'tronage'], + 'trone': ['noter', 'tenor', 'toner', 'trone'], + 'troner': ['terron', 'treron', 'troner'], + 'troop': ['porto', 'proto', 'troop'], + 'trooper': ['protore', 'trooper'], + 'tropaeolum': ['pleurotoma', 'tropaeolum'], + 'tropaion': ['opinator', 'tropaion'], + 'tropal': ['patrol', 'portal', 'tropal'], + 'troparion': ['proration', 'troparion'], + 'tropary': ['parroty', 'portray', 'tropary'], + 'trope': ['poter', 'prote', 'repot', 'tepor', 'toper', 'trope'], + 'tropeic': ['perotic', 'proteic', 'tropeic'], + 'tropeine': ['ereption', 'tropeine'], + 'troper': ['porret', 'porter', 'report', 'troper'], + 'trophema': ['metaphor', 'trophema'], + 'trophesial': ['hospitaler', 'trophesial'], + 'trophical': ['carpolith', 'politarch', 'trophical'], + 'trophodisc': ['doctorship', 'trophodisc'], + 'trophonema': ['homopteran', 'trophonema'], + 'trophotropic': ['prototrophic', 'trophotropic'], + 'tropical': ['plicator', 'tropical'], + 'tropically': ['polycitral', 'tropically'], + 'tropidine': ['direption', 'perdition', 'tropidine'], + 'tropine': ['pointer', 'protein', 'pterion', 'repoint', 'tropine'], + 'tropism': ['primost', 'tropism'], + 'tropist': ['protist', 'tropist'], + 'tropistic': ['proctitis', 'protistic', 'tropistic'], + 'tropophyte': ['protophyte', 'tropophyte'], + 'tropophytic': ['protophytic', 'tropophytic'], + 'tropyl': ['portly', 'protyl', 'tropyl'], + 'trostera': ['rostrate', 'trostera'], + 'trot': ['tort', 'trot'], + 'troth': ['thort', 'troth'], + 'trotline': ['interlot', 'trotline'], + 'trouble': ['boulter', 'trouble'], + 'troughy': ['troughy', 'yoghurt'], + 'trounce': ['cornute', 'counter', 'recount', 'trounce'], + 'troupe': ['pouter', 'roupet', 'troupe'], + 'trouse': ['ouster', 'souter', 'touser', 'trouse'], + 'trouser': ['rouster', 'trouser'], + 'trouserian': ['souterrain', 'ternarious', 'trouserian'], + 'trousers': ['tressour', 'trousers'], + 'trout': ['trout', 'tutor'], + 'trouter': ['torture', 'trouter', 'tutorer'], + 'troutless': ['troutless', 'tutorless'], + 'trouty': ['trouty', 'tryout', 'tutory'], + 'trouvere': ['overtrue', 'overture', 'trouvere'], + 'trove': ['overt', 'rovet', 'torve', 'trove', 'voter'], + 'trover': ['trevor', 'trover'], + 'trow': ['trow', 'wort'], + 'trowel': ['rowlet', 'trowel', 'wolter'], + 'troy': ['royt', 'ryot', 'tory', 'troy', 'tyro'], + 'truandise': ['disnature', 'sturnidae', 'truandise'], + 'truant': ['truant', 'turtan'], + 'trub': ['brut', 'burt', 'trub', 'turb'], + 'trubu': ['burut', 'trubu'], + 'truce': ['cruet', 'eruct', 'recut', 'truce'], + 'truceless': ['cutleress', 'lecturess', 'truceless'], + 'trucial': ['curtail', 'trucial'], + 'trucks': ['struck', 'trucks'], + 'truculent': ['truculent', 'unclutter'], + 'truelove': ['revolute', 'truelove'], + 'truffle': ['fretful', 'truffle'], + 'trug': ['gurt', 'trug'], + 'truistical': ['altruistic', 'truistical', 'ultraistic'], + 'truly': ['rutyl', 'truly'], + 'trumperiness': ['surprisement', 'trumperiness'], + 'trumpie': ['imputer', 'trumpie'], + 'trun': ['runt', 'trun', 'turn'], + 'truncated': ['reductant', 'traducent', 'truncated'], + 'trundle': ['rundlet', 'trundle'], + 'trush': ['hurst', 'trush'], + 'trusion': ['nitrous', 'trusion'], + 'trust': ['strut', 'sturt', 'trust'], + 'trustee': ['surette', 'trustee'], + 'trusteeism': ['sestertium', 'trusteeism'], + 'trusten': ['entrust', 'stunter', 'trusten'], + 'truster': ['retrust', 'truster'], + 'trustle': ['slutter', 'trustle'], + 'truth': ['thurt', 'truth'], + 'trying': ['trigyn', 'trying'], + 'tryma': ['marty', 'tryma'], + 'tryout': ['trouty', 'tryout', 'tutory'], + 'trypa': ['party', 'trypa'], + 'trypan': ['pantry', 'trypan'], + 'tryptase': ['tapestry', 'tryptase'], + 'tsar': ['sart', 'star', 'stra', 'tars', 'tsar'], + 'tsardom': ['stardom', 'tsardom'], + 'tsarina': ['artisan', 'astrain', 'sartain', 'tsarina'], + 'tsarship': ['starship', 'tsarship'], + 'tsatlee': ['atelets', 'tsatlee'], + 'tsere': ['ester', + 'estre', + 'reest', + 'reset', + 'steer', + 'stere', + 'stree', + 'terse', + 'tsere'], + 'tsetse': ['sestet', 'testes', 'tsetse'], + 'tshi': ['hist', 'sith', 'this', 'tshi'], + 'tsia': ['atis', 'sita', 'tsia'], + 'tsine': ['inset', 'neist', 'snite', 'stein', 'stine', 'tsine'], + 'tsiology': ['sitology', 'tsiology'], + 'tsoneca': ['costean', 'tsoneca'], + 'tsonecan': ['noncaste', 'tsonecan'], + 'tsuga': ['agust', 'tsuga'], + 'tsuma': ['matsu', 'tamus', 'tsuma'], + 'tsun': ['stun', 'sunt', 'tsun'], + 'tu': ['tu', 'ut'], + 'tua': ['tau', 'tua', 'uta'], + 'tuan': ['antu', 'aunt', 'naut', 'taun', 'tuan', 'tuna'], + 'tuareg': ['argute', 'guetar', 'rugate', 'tuareg'], + 'tuarn': ['arnut', 'tuarn', 'untar'], + 'tub': ['but', 'tub'], + 'tuba': ['abut', 'tabu', 'tuba'], + 'tubae': ['butea', 'taube', 'tubae'], + 'tubal': ['balut', 'tubal'], + 'tubar': ['bruta', 'tubar'], + 'tubate': ['battue', 'tubate'], + 'tube': ['bute', 'tebu', 'tube'], + 'tuber': ['brute', 'buret', 'rebut', 'tuber'], + 'tubercula': ['lucubrate', 'tubercula'], + 'tuberin': ['tribune', 'tuberin', 'turbine'], + 'tuberless': ['butleress', 'tuberless'], + 'tublet': ['buttle', 'tublet'], + 'tuboovarial': ['ovariotubal', 'tuboovarial'], + 'tucana': ['canaut', 'tucana'], + 'tucano': ['toucan', 'tucano', 'uncoat'], + 'tuchun': ['tuchun', 'uncuth'], + 'tucker': ['retuck', 'tucker'], + 'tue': ['tue', 'ute'], + 'tueiron': ['routine', 'tueiron'], + 'tug': ['gut', 'tug'], + 'tughra': ['raught', 'tughra'], + 'tugless': ['gutless', 'tugless'], + 'tuglike': ['gutlike', 'tuglike'], + 'tugman': ['tangum', 'tugman'], + 'tuism': ['muist', 'tuism'], + 'tuke': ['ketu', 'teuk', 'tuke'], + 'tukra': ['kraut', 'tukra'], + 'tulare': ['tulare', 'uretal'], + 'tulasi': ['situal', 'situla', 'tulasi'], + 'tulchan': ['tulchan', 'unlatch'], + 'tule': ['lute', 'tule'], + 'tulipa': ['tipula', 'tulipa'], + 'tulisan': ['latinus', 'tulisan', 'unalist'], + 'tulsi': ['litus', 'sluit', 'tulsi'], + 'tumbler': ['tumbler', 'tumbrel'], + 'tumbrel': ['tumbler', 'tumbrel'], + 'tume': ['mute', 'tume'], + 'tumescence': ['mutescence', 'tumescence'], + 'tumorous': ['mortuous', 'tumorous'], + 'tumulary': ['mutulary', 'tumulary'], + 'tun': ['nut', 'tun'], + 'tuna': ['antu', 'aunt', 'naut', 'taun', 'tuan', 'tuna'], + 'tunable': ['abluent', 'tunable'], + 'tunbellied': ['tunbellied', 'unbilleted'], + 'tunca': ['tunca', 'unact'], + 'tund': ['dunt', 'tund'], + 'tunder': ['runted', 'tunder', 'turned'], + 'tundra': ['durant', 'tundra'], + 'tuner': ['enrut', 'tuner', 'urent'], + 'tunga': ['gaunt', 'tunga'], + 'tungan': ['tangun', 'tungan'], + 'tungate': ['tungate', 'tutenag'], + 'tungo': ['tungo', 'ungot'], + 'tungstosilicate': ['silicotungstate', 'tungstosilicate'], + 'tungstosilicic': ['silicotungstic', 'tungstosilicic'], + 'tunic': ['cutin', 'incut', 'tunic'], + 'tunica': ['anicut', 'nautic', 'ticuna', 'tunica'], + 'tunican': ['ticunan', 'tunican'], + 'tunicary': ['nycturia', 'tunicary'], + 'tunicle': ['linecut', 'tunicle'], + 'tunicless': ['lentiscus', 'tunicless'], + 'tunist': ['suttin', 'tunist'], + 'tunk': ['knut', 'tunk'], + 'tunker': ['tunker', 'turken'], + 'tunlike': ['nutlike', 'tunlike'], + 'tunna': ['naunt', 'tunna'], + 'tunnel': ['nunlet', 'tunnel', 'unlent'], + 'tunnelman': ['annulment', 'tunnelman'], + 'tunner': ['runnet', 'tunner', 'unrent'], + 'tunnor': ['tunnor', 'untorn'], + 'tuno': ['tuno', 'unto'], + 'tup': ['put', 'tup'], + 'tur': ['rut', 'tur'], + 'turacin': ['curtain', 'turacin', 'turcian'], + 'turanian': ['nutarian', 'turanian'], + 'turanism': ['naturism', 'sturmian', 'turanism'], + 'turb': ['brut', 'burt', 'trub', 'turb'], + 'turban': ['tanbur', 'turban'], + 'turbaned': ['breadnut', 'turbaned'], + 'turbanless': ['substernal', 'turbanless'], + 'turbeh': ['hubert', 'turbeh'], + 'turbinal': ['tribunal', 'turbinal', 'untribal'], + 'turbinate': ['tribunate', 'turbinate'], + 'turbine': ['tribune', 'tuberin', 'turbine'], + 'turbined': ['turbined', 'underbit'], + 'turcian': ['curtain', 'turacin', 'turcian'], + 'turco': ['court', 'crout', 'turco'], + 'turcoman': ['courtman', 'turcoman'], + 'turdinae': ['indurate', 'turdinae'], + 'turdine': ['intrude', 'turdine', 'untired', 'untried'], + 'tureen': ['neuter', 'retune', 'runtee', 'tenure', 'tureen'], + 'turfed': ['dufter', 'turfed'], + 'turfen': ['turfen', 'unfret'], + 'turfless': ['tressful', 'turfless'], + 'turgent': ['grutten', 'turgent'], + 'turk': ['kurt', 'turk'], + 'turken': ['tunker', 'turken'], + 'turki': ['tikur', 'turki'], + 'turkman': ['trankum', 'turkman'], + 'turma': ['martu', 'murat', 'turma'], + 'turn': ['runt', 'trun', 'turn'], + 'turndown': ['downturn', 'turndown'], + 'turned': ['runted', 'tunder', 'turned'], + 'turnel': ['runlet', 'turnel'], + 'turner': ['return', 'turner'], + 'turnhall': ['turnhall', 'unthrall'], + 'turnout': ['outturn', 'turnout'], + 'turnover': ['overturn', 'turnover'], + 'turnpin': ['turnpin', 'unprint'], + 'turns': ['snurt', 'turns'], + 'turntail': ['rutilant', 'turntail'], + 'turnup': ['turnup', 'upturn'], + 'turp': ['prut', 'turp'], + 'turpid': ['putrid', 'turpid'], + 'turpidly': ['putridly', 'turpidly'], + 'turps': ['spurt', 'turps'], + 'turret': ['rutter', 'turret'], + 'turricula': ['turricula', 'utricular'], + 'turse': ['serut', 'strue', 'turse', 'uster'], + 'tursenoi': ['rutinose', 'tursenoi'], + 'tursio': ['suitor', 'tursio'], + 'turtan': ['truant', 'turtan'], + 'tuscan': ['cantus', 'tuscan', 'uncast'], + 'tusche': ['schute', 'tusche'], + 'tush': ['shut', 'thus', 'tush'], + 'tusher': ['reshut', 'suther', 'thurse', 'tusher'], + 'tussal': ['saltus', 'tussal'], + 'tusser': ['russet', 'tusser'], + 'tussore': ['estrous', 'oestrus', 'sestuor', 'tussore'], + 'tutelo': ['outlet', 'tutelo'], + 'tutenag': ['tungate', 'tutenag'], + 'tutman': ['mutant', 'tantum', 'tutman'], + 'tutor': ['trout', 'tutor'], + 'tutorer': ['torture', 'trouter', 'tutorer'], + 'tutorial': ['outtrail', 'tutorial'], + 'tutorism': ['mistutor', 'tutorism'], + 'tutorless': ['troutless', 'tutorless'], + 'tutory': ['trouty', 'tryout', 'tutory'], + 'tuts': ['stut', 'tuts'], + 'tutster': ['stutter', 'tutster'], + 'twa': ['taw', 'twa', 'wat'], + 'twae': ['tewa', 'twae', 'weta'], + 'twain': ['atwin', 'twain', 'witan'], + 'twaite': ['tawite', 'tawtie', 'twaite'], + 'twal': ['twal', 'walt'], + 'twas': ['sawt', 'staw', 'swat', 'taws', 'twas', 'wast'], + 'twat': ['twat', 'watt'], + 'twee': ['twee', 'weet'], + 'tweel': ['tewel', 'tweel'], + 'twere': ['rewet', 'tewer', 'twere'], + 'twi': ['twi', 'wit'], + 'twigsome': ['twigsome', 'wegotism'], + 'twin': ['twin', 'wint'], + 'twiner': ['twiner', 'winter'], + 'twingle': ['twingle', 'welting', 'winglet'], + 'twinkle': ['twinkle', 'winklet'], + 'twinkler': ['twinkler', 'wrinklet'], + 'twinter': ['twinter', 'written'], + 'twire': ['twire', 'write'], + 'twister': ['retwist', 'twister'], + 'twitchety': ['twitchety', 'witchetty'], + 'twite': ['tewit', 'twite'], + 'two': ['tow', 'two', 'wot'], + 'twoling': ['lingtow', 'twoling'], + 'tyche': ['techy', 'tyche'], + 'tydie': ['deity', 'tydie'], + 'tye': ['tye', 'yet'], + 'tyke': ['kyte', 'tyke'], + 'tylerism': ['trimesyl', 'tylerism'], + 'tyloma': ['latomy', 'tyloma'], + 'tylose': ['tolsey', 'tylose'], + 'tylosis': ['tossily', 'tylosis'], + 'tylotus': ['stoutly', 'tylotus'], + 'tylus': ['lusty', 'tylus'], + 'typal': ['aptly', 'patly', 'platy', 'typal'], + 'typees': ['steepy', 'typees'], + 'typer': ['perty', 'typer'], + 'typha': ['pathy', 'typha'], + 'typhia': ['pythia', 'typhia'], + 'typhic': ['phytic', 'pitchy', 'pythic', 'typhic'], + 'typhlopidae': ['heptaploidy', 'typhlopidae'], + 'typhoean': ['anophyte', 'typhoean'], + 'typhogenic': ['phytogenic', 'pythogenic', 'typhogenic'], + 'typhoid': ['phytoid', 'typhoid'], + 'typhonian': ['antiphony', 'typhonian'], + 'typhonic': ['hypnotic', 'phytonic', 'pythonic', 'typhonic'], + 'typhosis': ['phytosis', 'typhosis'], + 'typica': ['atypic', 'typica'], + 'typographer': ['petrography', 'pterography', 'typographer'], + 'typographic': ['graphotypic', 'pictography', 'typographic'], + 'typology': ['logotypy', 'typology'], + 'typophile': ['hippolyte', 'typophile'], + 'tyre': ['trey', 'tyre'], + 'tyrian': ['rytina', 'trainy', 'tyrian'], + 'tyro': ['royt', 'ryot', 'tory', 'troy', 'tyro'], + 'tyrocidin': ['nordicity', 'tyrocidin'], + 'tyrolean': ['neolatry', 'ornately', 'tyrolean'], + 'tyrolite': ['toiletry', 'tyrolite'], + 'tyrone': ['torney', 'tyrone'], + 'tyronism': ['smyrniot', 'tyronism'], + 'tyrosine': ['tyrosine', 'tyrsenoi'], + 'tyrrheni': ['erythrin', 'tyrrheni'], + 'tyrsenoi': ['tyrosine', 'tyrsenoi'], + 'tyste': ['testy', 'tyste'], + 'tyto': ['toty', 'tyto'], + 'uang': ['gaun', 'guan', 'guna', 'uang'], + 'ucal': ['caul', 'ucal'], + 'udal': ['auld', 'dual', 'laud', 'udal'], + 'udaler': ['lauder', 'udaler'], + 'udalman': ['ladanum', 'udalman'], + 'udo': ['duo', 'udo'], + 'uds': ['sud', 'uds'], + 'ugh': ['hug', 'ugh'], + 'uglisome': ['eulogism', 'uglisome'], + 'ugrian': ['gurian', 'ugrian'], + 'ugric': ['guric', 'ugric'], + 'uhtsong': ['gunshot', 'shotgun', 'uhtsong'], + 'uinal': ['inula', 'luian', 'uinal'], + 'uinta': ['uinta', 'uniat'], + 'ulcer': ['cruel', 'lucre', 'ulcer'], + 'ulcerate': ['celature', 'ulcerate'], + 'ulcerous': ['ulcerous', 'urceolus'], + 'ule': ['leu', 'lue', 'ule'], + 'ulema': ['amelu', 'leuma', 'ulema'], + 'uletic': ['lucite', 'luetic', 'uletic'], + 'ulex': ['luxe', 'ulex'], + 'ulla': ['lula', 'ulla'], + 'ulling': ['ulling', 'ungill'], + 'ulmin': ['linum', 'ulmin'], + 'ulminic': ['clinium', 'ulminic'], + 'ulmo': ['moul', 'ulmo'], + 'ulna': ['laun', 'luna', 'ulna', 'unal'], + 'ulnad': ['dunal', 'laund', 'lunda', 'ulnad'], + 'ulnar': ['lunar', 'ulnar', 'urnal'], + 'ulnare': ['lunare', 'neural', 'ulnare', 'unreal'], + 'ulnaria': ['lunaria', 'ulnaria', 'uralian'], + 'ulotrichi': ['ulotrichi', 'urolithic'], + 'ulster': ['luster', 'result', 'rustle', 'sutler', 'ulster'], + 'ulstered': ['deluster', 'ulstered'], + 'ulsterian': ['neuralist', 'ulsterian', 'unrealist'], + 'ulstering': ['resulting', 'ulstering'], + 'ulsterman': ['menstrual', 'ulsterman'], + 'ultima': ['mulita', 'ultima'], + 'ultimate': ['mutilate', 'ultimate'], + 'ultimation': ['mutilation', 'ultimation'], + 'ultonian': ['lunation', 'ultonian'], + 'ultra': ['lutra', 'ultra'], + 'ultrabasic': ['arcubalist', 'ultrabasic'], + 'ultraism': ['altruism', 'muralist', 'traulism', 'ultraism'], + 'ultraist': ['altruist', 'ultraist'], + 'ultraistic': ['altruistic', 'truistical', 'ultraistic'], + 'ultramicron': ['tricolumnar', 'ultramicron'], + 'ultraminute': ['intermutual', 'ultraminute'], + 'ultramontane': ['tournamental', 'ultramontane'], + 'ultranice': ['centurial', 'lucretian', 'ultranice'], + 'ultrasterile': ['reillustrate', 'ultrasterile'], + 'ulua': ['aulu', 'ulua'], + 'ulva': ['ulva', 'uval'], + 'um': ['mu', 'um'], + 'umbel': ['umbel', 'umble'], + 'umbellar': ['umbellar', 'umbrella'], + 'umber': ['brume', 'umber'], + 'umbilic': ['bulimic', 'umbilic'], + 'umbiliform': ['bulimiform', 'umbiliform'], + 'umble': ['umbel', 'umble'], + 'umbonial': ['olibanum', 'umbonial'], + 'umbral': ['brumal', 'labrum', 'lumbar', 'umbral'], + 'umbrel': ['lumber', 'rumble', 'umbrel'], + 'umbrella': ['umbellar', 'umbrella'], + 'umbrous': ['brumous', 'umbrous'], + 'ume': ['emu', 'ume'], + 'umlaut': ['mutual', 'umlaut'], + 'umph': ['hump', 'umph'], + 'umpire': ['impure', 'umpire'], + 'un': ['nu', 'un'], + 'unabetted': ['debutante', 'unabetted'], + 'unabhorred': ['unabhorred', 'unharbored'], + 'unable': ['nebula', 'unable', 'unbale'], + 'unaccumulate': ['acutenaculum', 'unaccumulate'], + 'unact': ['tunca', 'unact'], + 'unadherent': ['unadherent', 'underneath', 'underthane'], + 'unadmire': ['unadmire', 'underaim'], + 'unadmired': ['unadmired', 'undermaid'], + 'unadored': ['unadored', 'unroaded'], + 'unadvertised': ['disadventure', 'unadvertised'], + 'unafire': ['fuirena', 'unafire'], + 'unaged': ['augend', 'engaud', 'unaged'], + 'unagreed': ['dungaree', 'guardeen', 'unagreed', 'underage', 'ungeared'], + 'unailing': ['inguinal', 'unailing'], + 'unaimed': ['numidae', 'unaimed'], + 'unaisled': ['unaisled', 'unsailed'], + 'unakite': ['kutenai', 'unakite'], + 'unal': ['laun', 'luna', 'ulna', 'unal'], + 'unalarming': ['unalarming', 'unmarginal'], + 'unalert': ['laurent', 'neutral', 'unalert'], + 'unalertly': ['neutrally', 'unalertly'], + 'unalertness': ['neutralness', 'unalertness'], + 'unalimentary': ['anteluminary', 'unalimentary'], + 'unalist': ['latinus', 'tulisan', 'unalist'], + 'unallotted': ['unallotted', 'untotalled'], + 'unalmsed': ['dulseman', 'unalmsed'], + 'unaltered': ['unaltered', 'unrelated'], + 'unaltering': ['unaltering', 'unrelating'], + 'unamassed': ['mussaenda', 'unamassed'], + 'unambush': ['subhuman', 'unambush'], + 'unamenability': ['unamenability', 'unnameability'], + 'unamenable': ['unamenable', 'unnameable'], + 'unamenableness': ['unamenableness', 'unnameableness'], + 'unamenably': ['unamenably', 'unnameably'], + 'unamend': ['mundane', 'unamend', 'unmaned', 'unnamed'], + 'unami': ['maniu', 'munia', 'unami'], + 'unapt': ['punta', 'unapt', 'untap'], + 'unarising': ['grusinian', 'unarising'], + 'unarm': ['muran', 'ruman', 'unarm', 'unram', 'urman'], + 'unarmed': ['duramen', 'maunder', 'unarmed'], + 'unarray': ['unarray', 'yaruran'], + 'unarrestable': ['subterraneal', 'unarrestable'], + 'unarrested': ['unarrested', 'unserrated'], + 'unarted': ['daunter', 'unarted', 'unrated', 'untread'], + 'unarticled': ['denticular', 'unarticled'], + 'unartistic': ['naturistic', 'unartistic'], + 'unartistical': ['naturalistic', 'unartistical'], + 'unartistically': ['naturistically', 'unartistically'], + 'unary': ['anury', 'unary', 'unray'], + 'unastray': ['auntsary', 'unastray'], + 'unathirst': ['struthian', 'unathirst'], + 'unattire': ['tainture', 'unattire'], + 'unattuned': ['unattuned', 'untaunted'], + 'unaverted': ['adventure', 'unaverted'], + 'unavertible': ['unavertible', 'unveritable'], + 'unbag': ['bugan', 'bunga', 'unbag'], + 'unbain': ['nubian', 'unbain'], + 'unbale': ['nebula', 'unable', 'unbale'], + 'unbar': ['buran', 'unbar', 'urban'], + 'unbare': ['eburna', 'unbare', 'unbear', 'urbane'], + 'unbarred': ['errabund', 'unbarred'], + 'unbased': ['subdean', 'unbased'], + 'unbaste': ['unbaste', 'unbeast'], + 'unbatted': ['debutant', 'unbatted'], + 'unbay': ['bunya', 'unbay'], + 'unbe': ['benu', 'unbe'], + 'unbear': ['eburna', 'unbare', 'unbear', 'urbane'], + 'unbearded': ['unbearded', 'unbreaded'], + 'unbeast': ['unbaste', 'unbeast'], + 'unbeavered': ['unbeavered', 'unbereaved'], + 'unbelied': ['unbelied', 'unedible'], + 'unbereaved': ['unbeavered', 'unbereaved'], + 'unbesot': ['subnote', 'subtone', 'unbesot'], + 'unbias': ['anubis', 'unbias'], + 'unbillet': ['bulletin', 'unbillet'], + 'unbilleted': ['tunbellied', 'unbilleted'], + 'unblasted': ['dunstable', 'unblasted', 'unstabled'], + 'unbled': ['bundle', 'unbled'], + 'unboasted': ['eastbound', 'unboasted'], + 'unboat': ['outban', 'unboat'], + 'unboding': ['bounding', 'unboding'], + 'unbog': ['bungo', 'unbog'], + 'unboiled': ['unboiled', 'unilobed'], + 'unboned': ['bounden', 'unboned'], + 'unborder': ['unborder', 'underorb'], + 'unbored': ['beround', 'bounder', 'rebound', 'unbored', 'unorbed', 'unrobed'], + 'unboweled': ['unboweled', 'unelbowed'], + 'unbrace': ['bucrane', 'unbrace'], + 'unbraceleted': ['unbraceleted', 'uncelebrated'], + 'unbraid': ['barundi', 'unbraid'], + 'unbrailed': ['indurable', 'unbrailed', 'unridable'], + 'unbreaded': ['unbearded', 'unbreaded'], + 'unbred': ['bunder', 'burden', 'burned', 'unbred'], + 'unbribed': ['unbribed', 'unribbed'], + 'unbrief': ['unbrief', 'unfiber'], + 'unbriefed': ['unbriefed', 'unfibered'], + 'unbroiled': ['unbroiled', 'underboil'], + 'unbrushed': ['unbrushed', 'underbush'], + 'unbud': ['bundu', 'unbud', 'undub'], + 'unburden': ['unburden', 'unburned'], + 'unburned': ['unburden', 'unburned'], + 'unbuttered': ['unbuttered', 'unrebutted'], + 'unca': ['cuna', 'unca'], + 'uncage': ['cangue', 'uncage'], + 'uncambered': ['uncambered', 'unembraced'], + 'uncamerated': ['uncamerated', 'unmacerated'], + 'uncapable': ['uncapable', 'unpacable'], + 'uncaptious': ['uncaptious', 'usucaption'], + 'uncarted': ['uncarted', 'uncrated', 'underact', 'untraced'], + 'uncartooned': ['uncartooned', 'uncoronated'], + 'uncase': ['uncase', 'usance'], + 'uncask': ['uncask', 'unsack'], + 'uncasked': ['uncasked', 'unsacked'], + 'uncast': ['cantus', 'tuscan', 'uncast'], + 'uncatalogued': ['uncatalogued', 'uncoagulated'], + 'uncate': ['tecuna', 'uncate'], + 'uncaused': ['uncaused', 'unsauced'], + 'uncavalier': ['naviculare', 'uncavalier'], + 'uncelebrated': ['unbraceleted', 'uncelebrated'], + 'uncellar': ['lucernal', 'nucellar', 'uncellar'], + 'uncenter': ['uncenter', 'unrecent'], + 'uncertain': ['encurtain', 'runcinate', 'uncertain'], + 'uncertifiable': ['uncertifiable', 'unrectifiable'], + 'uncertified': ['uncertified', 'unrectified'], + 'unchain': ['chunnia', 'unchain'], + 'unchair': ['chunari', 'unchair'], + 'unchalked': ['unchalked', 'unhackled'], + 'uncharge': ['gunreach', 'uncharge'], + 'uncharm': ['uncharm', 'unmarch'], + 'uncharming': ['uncharming', 'unmarching'], + 'uncharred': ['uncharred', 'underarch'], + 'uncheat': ['uncheat', 'unteach'], + 'uncheating': ['uncheating', 'unteaching'], + 'unchoked': ['unchoked', 'unhocked'], + 'unchoosable': ['chaenolobus', 'unchoosable'], + 'unchosen': ['nonesuch', 'unchosen'], + 'uncial': ['cunila', 'lucian', 'lucina', 'uncial'], + 'unciferous': ['nuciferous', 'unciferous'], + 'unciform': ['nuciform', 'unciform'], + 'uncinate': ['nunciate', 'uncinate'], + 'unclaimed': ['unclaimed', 'undecimal', 'unmedical'], + 'unclay': ['lunacy', 'unclay'], + 'unclead': ['unclead', 'unlaced'], + 'unclear': ['crenula', 'lucarne', 'nuclear', 'unclear'], + 'uncleared': ['uncleared', 'undeclare'], + 'uncledom': ['columned', 'uncledom'], + 'uncleship': ['siphuncle', 'uncleship'], + 'uncloister': ['cornulites', 'uncloister'], + 'unclose': ['counsel', 'unclose'], + 'unclutter': ['truculent', 'unclutter'], + 'unco': ['cuon', 'unco'], + 'uncoagulated': ['uncatalogued', 'uncoagulated'], + 'uncoat': ['toucan', 'tucano', 'uncoat'], + 'uncoated': ['outdance', 'uncoated'], + 'uncoiled': ['nucleoid', 'uncoiled'], + 'uncoin': ['nuncio', 'uncoin'], + 'uncollapsed': ['uncollapsed', 'unscalloped'], + 'uncolored': ['uncolored', 'undercool'], + 'uncomic': ['muconic', 'uncomic'], + 'uncompatible': ['incomputable', 'uncompatible'], + 'uncomplaint': ['uncomplaint', 'uncompliant'], + 'uncomplete': ['couplement', 'uncomplete'], + 'uncompliant': ['uncomplaint', 'uncompliant'], + 'unconcerted': ['unconcerted', 'unconcreted'], + 'unconcreted': ['unconcerted', 'unconcreted'], + 'unconservable': ['unconservable', 'unconversable'], + 'unconstraint': ['noncurantist', 'unconstraint'], + 'uncontrasted': ['counterstand', 'uncontrasted'], + 'unconversable': ['unconservable', 'unconversable'], + 'uncoop': ['coupon', 'uncoop'], + 'uncooped': ['couponed', 'uncooped'], + 'uncope': ['pounce', 'uncope'], + 'uncopied': ['cupidone', 'uncopied'], + 'uncore': ['conure', 'rounce', 'uncore'], + 'uncored': ['crunode', 'uncored'], + 'uncorked': ['uncorked', 'unrocked'], + 'uncoronated': ['uncartooned', 'uncoronated'], + 'uncorrect': ['cocurrent', 'occurrent', 'uncorrect'], + 'uncorrugated': ['counterguard', 'uncorrugated'], + 'uncorseted': ['uncorseted', 'unescorted'], + 'uncostumed': ['uncostumed', 'uncustomed'], + 'uncoursed': ['uncoursed', 'unscoured'], + 'uncouth': ['uncouth', 'untouch'], + 'uncoverable': ['uncoverable', 'unrevocable'], + 'uncradled': ['uncradled', 'underclad'], + 'uncrated': ['uncarted', 'uncrated', 'underact', 'untraced'], + 'uncreased': ['uncreased', 'undercase'], + 'uncreatable': ['uncreatable', 'untraceable'], + 'uncreatableness': ['uncreatableness', 'untraceableness'], + 'uncreation': ['enunciator', 'uncreation'], + 'uncreative': ['uncreative', 'unreactive'], + 'uncredited': ['uncredited', 'undirected'], + 'uncrest': ['encrust', 'uncrest'], + 'uncrested': ['uncrested', 'undersect'], + 'uncried': ['inducer', 'uncried'], + 'uncrooked': ['uncrooked', 'undercook'], + 'uncrude': ['uncrude', 'uncured'], + 'unctional': ['continual', 'inoculant', 'unctional'], + 'unctioneer': ['recontinue', 'unctioneer'], + 'uncured': ['uncrude', 'uncured'], + 'uncustomed': ['uncostumed', 'uncustomed'], + 'uncuth': ['tuchun', 'uncuth'], + 'undam': ['maund', 'munda', 'numda', 'undam', 'unmad'], + 'undangered': ['undangered', 'underanged', 'ungardened'], + 'undarken': ['undarken', 'unranked'], + 'undashed': ['undashed', 'unshaded'], + 'undate': ['nudate', 'undate'], + 'unde': ['dune', 'nude', 'unde'], + 'undean': ['duenna', 'undean'], + 'undear': ['endura', 'neurad', 'undear', 'unread'], + 'undeceiver': ['undeceiver', 'unreceived'], + 'undecimal': ['unclaimed', 'undecimal', 'unmedical'], + 'undeclare': ['uncleared', 'undeclare'], + 'undecolic': ['coinclude', 'undecolic'], + 'undecorated': ['undecorated', 'undercoated'], + 'undefiled': ['undefiled', 'unfielded'], + 'undeified': ['undeified', 'unedified'], + 'undelible': ['undelible', 'unlibeled'], + 'undelight': ['undelight', 'unlighted'], + 'undelude': ['undelude', 'uneluded'], + 'undeluding': ['undeluding', 'unindulged'], + 'undemanded': ['undemanded', 'unmaddened'], + 'unden': ['dunne', 'unden'], + 'undeparted': ['dunderpate', 'undeparted'], + 'undepraved': ['undepraved', 'unpervaded'], + 'under': ['runed', 'under', 'unred'], + 'underact': ['uncarted', 'uncrated', 'underact', 'untraced'], + 'underacted': ['underacted', 'unredacted'], + 'underaction': ['denunciator', 'underaction'], + 'underage': ['dungaree', 'guardeen', 'unagreed', 'underage', 'ungeared'], + 'underaid': ['underaid', 'unraided'], + 'underaim': ['unadmire', 'underaim'], + 'underanged': ['undangered', 'underanged', 'ungardened'], + 'underarch': ['uncharred', 'underarch'], + 'underarm': ['underarm', 'unmarred'], + 'underbake': ['underbake', 'underbeak'], + 'underbeak': ['underbake', 'underbeak'], + 'underbeat': ['eburnated', 'underbeat', 'unrebated'], + 'underbit': ['turbined', 'underbit'], + 'underboil': ['unbroiled', 'underboil'], + 'underbreathing': ['thunderbearing', 'underbreathing'], + 'underbrush': ['underbrush', 'undershrub'], + 'underbush': ['unbrushed', 'underbush'], + 'undercase': ['uncreased', 'undercase'], + 'underchap': ['underchap', 'unparched'], + 'underclad': ['uncradled', 'underclad'], + 'undercoat': ['cornuated', 'undercoat'], + 'undercoated': ['undecorated', 'undercoated'], + 'undercook': ['uncrooked', 'undercook'], + 'undercool': ['uncolored', 'undercool'], + 'undercut': ['undercut', 'unreduct'], + 'underdead': ['underdead', 'undreaded'], + 'underdig': ['underdig', 'ungirded', 'unridged'], + 'underdive': ['underdive', 'underived'], + 'underdo': ['redound', 'rounded', 'underdo'], + 'underdoer': ['underdoer', 'unordered'], + 'underdog': ['grounded', 'underdog', 'undergod'], + 'underdown': ['underdown', 'undrowned'], + 'underdrag': ['underdrag', 'undergrad'], + 'underdraw': ['underdraw', 'underward'], + 'undereat': ['denature', 'undereat'], + 'underer': ['endurer', 'underer'], + 'underfiend': ['underfiend', 'unfriended'], + 'underfill': ['underfill', 'unfrilled'], + 'underfire': ['underfire', 'unferried'], + 'underflow': ['underflow', 'wonderful'], + 'underfur': ['underfur', 'unfurred'], + 'undergo': ['guerdon', 'undergo', 'ungored'], + 'undergod': ['grounded', 'underdog', 'undergod'], + 'undergoer': ['guerdoner', 'reundergo', 'undergoer', 'undergore'], + 'undergore': ['guerdoner', 'reundergo', 'undergoer', 'undergore'], + 'undergown': ['undergown', 'unwronged'], + 'undergrad': ['underdrag', 'undergrad'], + 'undergrade': ['undergrade', 'unregarded'], + 'underheat': ['underheat', 'unearthed'], + 'underhonest': ['underhonest', 'unshortened'], + 'underhorse': ['underhorse', 'undershore'], + 'underived': ['underdive', 'underived'], + 'underkind': ['underkind', 'unkindred'], + 'underlap': ['pendular', 'underlap', 'uplander'], + 'underleaf': ['underleaf', 'unfederal'], + 'underlease': ['underlease', 'unreleased'], + 'underlegate': ['underlegate', 'unrelegated'], + 'underlid': ['underlid', 'unriddle'], + 'underlive': ['underlive', 'unreviled'], + 'underlying': ['enduringly', 'underlying'], + 'undermade': ['undermade', 'undreamed'], + 'undermaid': ['unadmired', 'undermaid'], + 'undermaker': ['undermaker', 'unremarked'], + 'undermaster': ['undermaster', 'understream'], + 'undermeal': ['denumeral', 'undermeal', 'unrealmed'], + 'undermine': ['undermine', 'unermined'], + 'undermost': ['undermost', 'unstormed'], + 'undermotion': ['undermotion', 'unmonitored'], + 'undern': ['dunner', 'undern'], + 'underneath': ['unadherent', 'underneath', 'underthane'], + 'undernote': ['undernote', 'undertone'], + 'undernoted': ['undernoted', 'undertoned'], + 'underntide': ['indentured', 'underntide'], + 'underorb': ['unborder', 'underorb'], + 'underpay': ['underpay', 'unprayed'], + 'underpeer': ['perendure', 'underpeer'], + 'underpick': ['underpick', 'unpricked'], + 'underpier': ['underpier', 'underripe'], + 'underpile': ['underpile', 'unreplied'], + 'underpose': ['underpose', 'unreposed'], + 'underpuke': ['underpuke', 'unperuked'], + 'underream': ['maunderer', 'underream'], + 'underripe': ['underpier', 'underripe'], + 'underrobe': ['rebounder', 'underrobe'], + 'undersap': ['undersap', 'unparsed', 'unrasped', 'unspared', 'unspread'], + 'undersea': ['undersea', 'unerased', 'unseared'], + 'underseam': ['underseam', 'unsmeared'], + 'undersect': ['uncrested', 'undersect'], + 'underserve': ['underserve', + 'underverse', + 'undeserver', + 'unreserved', + 'unreversed'], + 'underset': ['sederunt', 'underset', 'undesert', 'unrested'], + 'undershapen': ['undershapen', 'unsharpened'], + 'undershore': ['underhorse', 'undershore'], + 'undershrub': ['underbrush', 'undershrub'], + 'underside': ['underside', 'undesired'], + 'undersoil': ['undersoil', 'unsoldier'], + 'undersow': ['sewround', 'undersow'], + 'underspar': ['underspar', 'unsparred'], + 'understain': ['understain', 'unstrained'], + 'understand': ['understand', 'unstranded'], + 'understream': ['undermaster', 'understream'], + 'underthane': ['unadherent', 'underneath', 'underthane'], + 'underthing': ['thundering', 'underthing'], + 'undertide': ['durdenite', 'undertide'], + 'undertime': ['undertime', 'unmerited'], + 'undertimed': ['demiturned', 'undertimed'], + 'undertitle': ['undertitle', 'unlittered'], + 'undertone': ['undernote', 'undertone'], + 'undertoned': ['undernoted', 'undertoned'], + 'undertow': ['undertow', 'untrowed'], + 'undertread': ['undertread', 'unretarded'], + 'undertutor': ['undertutor', 'untortured'], + 'underverse': ['underserve', + 'underverse', + 'undeserver', + 'unreserved', + 'unreversed'], + 'underwage': ['underwage', 'unwagered'], + 'underward': ['underdraw', 'underward'], + 'underwarp': ['underwarp', 'underwrap'], + 'underwave': ['underwave', 'unwavered'], + 'underwrap': ['underwarp', 'underwrap'], + 'undesert': ['sederunt', 'underset', 'undesert', 'unrested'], + 'undeserve': ['undeserve', 'unsevered'], + 'undeserver': ['underserve', + 'underverse', + 'undeserver', + 'unreserved', + 'unreversed'], + 'undesign': ['undesign', 'unsigned', 'unsinged'], + 'undesired': ['underside', 'undesired'], + 'undeviated': ['denudative', 'undeviated'], + 'undieted': ['undieted', 'unedited'], + 'undig': ['gundi', 'undig'], + 'undirect': ['undirect', 'untriced'], + 'undirected': ['uncredited', 'undirected'], + 'undiscerned': ['undiscerned', 'unrescinded'], + 'undiscretion': ['discontinuer', 'undiscretion'], + 'undistress': ['sturdiness', 'undistress'], + 'undiverse': ['undiverse', 'unrevised'], + 'undog': ['undog', 'ungod'], + 'undrab': ['durban', 'undrab'], + 'undrag': ['durgan', 'undrag'], + 'undrape': ['undrape', 'unpared', 'unraped'], + 'undreaded': ['underdead', 'undreaded'], + 'undreamed': ['undermade', 'undreamed'], + 'undrowned': ['underdown', 'undrowned'], + 'undrugged': ['undrugged', 'ungrudged'], + 'undryable': ['endurably', 'undryable'], + 'undub': ['bundu', 'unbud', 'undub'], + 'undumped': ['pudendum', 'undumped'], + 'undy': ['duny', 'undy'], + 'uneager': ['geneura', 'uneager'], + 'unearned': ['unearned', 'unneared'], + 'unearnest': ['unearnest', 'uneastern'], + 'unearth': ['haunter', 'nauther', 'unearth', 'unheart', 'urethan'], + 'unearthed': ['underheat', 'unearthed'], + 'unearthly': ['unearthly', 'urethylan'], + 'uneastern': ['unearnest', 'uneastern'], + 'uneath': ['uneath', 'unhate'], + 'unebriate': ['beraunite', 'unebriate'], + 'unedge': ['dengue', 'unedge'], + 'unedible': ['unbelied', 'unedible'], + 'unedified': ['undeified', 'unedified'], + 'unedited': ['undieted', 'unedited'], + 'unelapsed': ['unelapsed', 'unpleased'], + 'unelated': ['antelude', 'unelated'], + 'unelbowed': ['unboweled', 'unelbowed'], + 'unelidible': ['ineludible', 'unelidible'], + 'uneluded': ['undelude', 'uneluded'], + 'unembased': ['sunbeamed', 'unembased'], + 'unembraced': ['uncambered', 'unembraced'], + 'unenabled': ['unenabled', 'unendable'], + 'unencored': ['denouncer', 'unencored'], + 'unendable': ['unenabled', 'unendable'], + 'unending': ['unending', 'unginned'], + 'unenervated': ['unenervated', 'unvenerated'], + 'unenlisted': ['unenlisted', 'unlistened', 'untinseled'], + 'unenterprised': ['superintender', 'unenterprised'], + 'unenviable': ['unenviable', 'unveniable'], + 'unenvied': ['unenvied', 'unveined'], + 'unequitable': ['unequitable', 'unquietable'], + 'unerased': ['undersea', 'unerased', 'unseared'], + 'unermined': ['undermine', 'unermined'], + 'unerratic': ['recurtain', 'unerratic'], + 'unerupted': ['unerupted', 'unreputed'], + 'unescorted': ['uncorseted', 'unescorted'], + 'unevil': ['unevil', 'unlive', 'unveil'], + 'unexactly': ['exultancy', 'unexactly'], + 'unexceptable': ['unexceptable', 'unexpectable'], + 'unexcepted': ['unexcepted', 'unexpected'], + 'unexcepting': ['unexcepting', 'unexpecting'], + 'unexpectable': ['unexceptable', 'unexpectable'], + 'unexpected': ['unexcepted', 'unexpected'], + 'unexpecting': ['unexcepting', 'unexpecting'], + 'unfabled': ['fundable', 'unfabled'], + 'unfaceted': ['fecundate', 'unfaceted'], + 'unfactional': ['afunctional', 'unfactional'], + 'unfactored': ['fecundator', 'unfactored'], + 'unfainting': ['antifungin', 'unfainting'], + 'unfallible': ['unfallible', 'unfillable'], + 'unfar': ['furan', 'unfar'], + 'unfarmed': ['unfarmed', 'unframed'], + 'unfederal': ['underleaf', 'unfederal'], + 'unfeeding': ['unfeeding', 'unfeigned'], + 'unfeeling': ['unfeeling', 'unfleeing'], + 'unfeigned': ['unfeeding', 'unfeigned'], + 'unfelt': ['fluent', 'netful', 'unfelt', 'unleft'], + 'unfelted': ['defluent', 'unfelted'], + 'unferried': ['underfire', 'unferried'], + 'unfiber': ['unbrief', 'unfiber'], + 'unfibered': ['unbriefed', 'unfibered'], + 'unfielded': ['undefiled', 'unfielded'], + 'unfiend': ['unfiend', 'unfined'], + 'unfiery': ['reunify', 'unfiery'], + 'unfillable': ['unfallible', 'unfillable'], + 'unfined': ['unfiend', 'unfined'], + 'unfired': ['unfired', 'unfried'], + 'unflag': ['fungal', 'unflag'], + 'unflat': ['flaunt', 'unflat'], + 'unfleeing': ['unfeeling', 'unfleeing'], + 'unfloured': ['unfloured', 'unfoldure'], + 'unfolder': ['flounder', 'reunfold', 'unfolder'], + 'unfolding': ['foundling', 'unfolding'], + 'unfoldure': ['unfloured', 'unfoldure'], + 'unforest': ['furstone', 'unforest'], + 'unforested': ['unforested', 'unfostered'], + 'unformality': ['fulminatory', 'unformality'], + 'unforward': ['unforward', 'unfroward'], + 'unfostered': ['unforested', 'unfostered'], + 'unfrail': ['rainful', 'unfrail'], + 'unframed': ['unfarmed', 'unframed'], + 'unfret': ['turfen', 'unfret'], + 'unfriable': ['funebrial', 'unfriable'], + 'unfried': ['unfired', 'unfried'], + 'unfriended': ['underfiend', 'unfriended'], + 'unfriending': ['unfriending', 'uninfringed'], + 'unfrilled': ['underfill', 'unfrilled'], + 'unfroward': ['unforward', 'unfroward'], + 'unfurl': ['unfurl', 'urnful'], + 'unfurred': ['underfur', 'unfurred'], + 'ungaite': ['ungaite', 'unitage'], + 'unganged': ['unganged', 'unnagged'], + 'ungardened': ['undangered', 'underanged', 'ungardened'], + 'ungarnish': ['ungarnish', 'unsharing'], + 'ungear': ['nauger', 'raunge', 'ungear'], + 'ungeared': ['dungaree', 'guardeen', 'unagreed', 'underage', 'ungeared'], + 'ungelt': ['englut', 'gluten', 'ungelt'], + 'ungenerable': ['ungenerable', 'ungreenable'], + 'unget': ['tengu', 'unget'], + 'ungilded': ['deluding', 'ungilded'], + 'ungill': ['ulling', 'ungill'], + 'ungilt': ['glutin', 'luting', 'ungilt'], + 'unginned': ['unending', 'unginned'], + 'ungird': ['during', 'ungird'], + 'ungirded': ['underdig', 'ungirded', 'unridged'], + 'ungirdle': ['indulger', 'ungirdle'], + 'ungirt': ['ungirt', 'untrig'], + 'ungirth': ['hurting', 'ungirth', 'unright'], + 'ungirthed': ['ungirthed', 'unrighted'], + 'unglad': ['gandul', 'unglad'], + 'unglued': ['unglued', 'unguled'], + 'ungod': ['undog', 'ungod'], + 'ungold': ['dungol', 'ungold'], + 'ungone': ['guenon', 'ungone'], + 'ungored': ['guerdon', 'undergo', 'ungored'], + 'ungorge': ['gurgeon', 'ungorge'], + 'ungot': ['tungo', 'ungot'], + 'ungothic': ['touching', 'ungothic'], + 'ungraphic': ['ungraphic', 'uparching'], + 'ungreenable': ['ungenerable', 'ungreenable'], + 'ungrieved': ['gerundive', 'ungrieved'], + 'ungroined': ['ungroined', 'unignored'], + 'ungrudged': ['undrugged', 'ungrudged'], + 'ungual': ['ungual', 'ungula'], + 'ungueal': ['ungueal', 'ungulae'], + 'ungula': ['ungual', 'ungula'], + 'ungulae': ['ungueal', 'ungulae'], + 'unguled': ['unglued', 'unguled'], + 'ungulp': ['ungulp', 'unplug'], + 'unhabit': ['bhutani', 'unhabit'], + 'unhackled': ['unchalked', 'unhackled'], + 'unhairer': ['rhineura', 'unhairer'], + 'unhalsed': ['unhalsed', 'unlashed', 'unshaled'], + 'unhalted': ['unhalted', 'unlathed'], + 'unhalter': ['lutheran', 'unhalter'], + 'unhaltered': ['unhaltered', 'unlathered'], + 'unhamper': ['prehuman', 'unhamper'], + 'unharbored': ['unabhorred', 'unharbored'], + 'unhasped': ['unhasped', 'unphased', 'unshaped'], + 'unhat': ['ahunt', 'haunt', 'thuan', 'unhat'], + 'unhate': ['uneath', 'unhate'], + 'unhatingly': ['hauntingly', 'unhatingly'], + 'unhayed': ['unhayed', 'unheady'], + 'unheady': ['unhayed', 'unheady'], + 'unhearsed': ['unhearsed', 'unsheared'], + 'unheart': ['haunter', 'nauther', 'unearth', 'unheart', 'urethan'], + 'unhid': ['hindu', 'hundi', 'unhid'], + 'unhistoric': ['trichinous', 'unhistoric'], + 'unhittable': ['unhittable', 'untithable'], + 'unhoarded': ['roundhead', 'unhoarded'], + 'unhocked': ['unchoked', 'unhocked'], + 'unhoisted': ['hudsonite', 'unhoisted'], + 'unhorse': ['unhorse', 'unshore'], + 'unhose': ['unhose', 'unshoe'], + 'unhosed': ['unhosed', 'unshoed'], + 'unhurt': ['unhurt', 'unruth'], + 'uniat': ['uinta', 'uniat'], + 'uniate': ['auntie', 'uniate'], + 'unible': ['nubile', 'unible'], + 'uniced': ['induce', 'uniced'], + 'unicentral': ['incruental', 'unicentral'], + 'unideal': ['aliunde', 'unideal'], + 'unidentified': ['indefinitude', 'unidentified'], + 'unidly': ['unidly', 'yildun'], + 'unie': ['niue', 'unie'], + 'unifilar': ['friulian', 'unifilar'], + 'uniflorate': ['antifouler', 'fluorinate', 'uniflorate'], + 'unigenous': ['ingenuous', 'unigenous'], + 'unignored': ['ungroined', 'unignored'], + 'unilobar': ['orbulina', 'unilobar'], + 'unilobed': ['unboiled', 'unilobed'], + 'unimedial': ['aluminide', 'unimedial'], + 'unimpair': ['manipuri', 'unimpair'], + 'unimparted': ['diparentum', 'unimparted'], + 'unimportance': ['importunance', 'unimportance'], + 'unimpressible': ['unimpressible', 'unpermissible'], + 'unimpressive': ['unimpressive', 'unpermissive'], + 'unindented': ['unindented', 'unintended'], + 'unindulged': ['undeluding', 'unindulged'], + 'uninervate': ['aventurine', 'uninervate'], + 'uninfringed': ['unfriending', 'uninfringed'], + 'uningested': ['uningested', 'unsigneted'], + 'uninn': ['nunni', 'uninn'], + 'uninnate': ['eutannin', 'uninnate'], + 'uninodal': ['annuloid', 'uninodal'], + 'unintended': ['unindented', 'unintended'], + 'unintoned': ['nonunited', 'unintoned'], + 'uninured': ['uninured', 'unruined'], + 'unionist': ['inustion', 'unionist'], + 'unipersonal': ['spinoneural', 'unipersonal'], + 'unipod': ['dupion', 'unipod'], + 'uniradial': ['nidularia', 'uniradial'], + 'unireme': ['erineum', 'unireme'], + 'uniserrate': ['arseniuret', 'uniserrate'], + 'unison': ['nonius', 'unison'], + 'unitage': ['ungaite', 'unitage'], + 'unital': ['inlaut', 'unital'], + 'unite': ['intue', 'unite', 'untie'], + 'united': ['dunite', 'united', 'untied'], + 'uniter': ['runite', 'triune', 'uniter', 'untire'], + 'unitiveness': ['unitiveness', 'unsensitive'], + 'unitrope': ['eruption', 'unitrope'], + 'univied': ['univied', 'viduine'], + 'unket': ['knute', 'unket'], + 'unkilned': ['unkilned', 'unlinked'], + 'unkin': ['nunki', 'unkin'], + 'unkindred': ['underkind', 'unkindred'], + 'unlabiate': ['laubanite', 'unlabiate'], + 'unlabored': ['burdalone', 'unlabored'], + 'unlace': ['auncel', 'cuneal', 'lacune', 'launce', 'unlace'], + 'unlaced': ['unclead', 'unlaced'], + 'unlade': ['unlade', 'unlead'], + 'unlaid': ['dualin', 'ludian', 'unlaid'], + 'unlame': ['manuel', 'unlame'], + 'unlapped': ['unlapped', 'unpalped'], + 'unlarge': ['granule', 'unlarge', 'unregal'], + 'unlashed': ['unhalsed', 'unlashed', 'unshaled'], + 'unlasting': ['unlasting', 'unslating'], + 'unlatch': ['tulchan', 'unlatch'], + 'unlathed': ['unhalted', 'unlathed'], + 'unlathered': ['unhaltered', 'unlathered'], + 'unlay': ['unlay', 'yulan'], + 'unlead': ['unlade', 'unlead'], + 'unleasable': ['unleasable', 'unsealable'], + 'unleased': ['unleased', 'unsealed'], + 'unleash': ['hulsean', 'unleash'], + 'unled': ['lendu', 'unled'], + 'unleft': ['fluent', 'netful', 'unfelt', 'unleft'], + 'unlent': ['nunlet', 'tunnel', 'unlent'], + 'unlevied': ['unlevied', 'unveiled'], + 'unlibeled': ['undelible', 'unlibeled'], + 'unliberal': ['brunellia', 'unliberal'], + 'unlicensed': ['unlicensed', 'unsilenced'], + 'unlighted': ['undelight', 'unlighted'], + 'unliken': ['nunlike', 'unliken'], + 'unlime': ['lumine', 'unlime'], + 'unlinked': ['unkilned', 'unlinked'], + 'unlist': ['insult', 'sunlit', 'unlist', 'unslit'], + 'unlistened': ['unenlisted', 'unlistened', 'untinseled'], + 'unlit': ['unlit', 'until'], + 'unliteral': ['tellurian', 'unliteral'], + 'unlittered': ['undertitle', 'unlittered'], + 'unlive': ['unevil', 'unlive', 'unveil'], + 'unloaded': ['duodenal', 'unloaded'], + 'unloaden': ['unloaden', 'unloaned'], + 'unloader': ['unloader', 'urodelan'], + 'unloaned': ['unloaden', 'unloaned'], + 'unlodge': ['unlodge', 'unogled'], + 'unlogic': ['gulonic', 'unlogic'], + 'unlooped': ['unlooped', 'unpooled'], + 'unlooted': ['unlooted', 'untooled'], + 'unlost': ['unlost', 'unslot'], + 'unlowered': ['unlowered', 'unroweled'], + 'unlucid': ['nuculid', 'unlucid'], + 'unlumped': ['pendulum', 'unlumped', 'unplumed'], + 'unlured': ['unlured', 'unruled'], + 'unlyrical': ['runically', 'unlyrical'], + 'unmacerated': ['uncamerated', 'unmacerated'], + 'unmad': ['maund', 'munda', 'numda', 'undam', 'unmad'], + 'unmadded': ['addendum', 'unmadded'], + 'unmaddened': ['undemanded', 'unmaddened'], + 'unmaid': ['numida', 'unmaid'], + 'unmail': ['alumni', 'unmail'], + 'unmailed': ['adlumine', 'unmailed'], + 'unmaned': ['mundane', 'unamend', 'unmaned', 'unnamed'], + 'unmantle': ['unmantle', 'unmental'], + 'unmarch': ['uncharm', 'unmarch'], + 'unmarching': ['uncharming', 'unmarching'], + 'unmarginal': ['unalarming', 'unmarginal'], + 'unmarred': ['underarm', 'unmarred'], + 'unmashed': ['unmashed', 'unshamed'], + 'unmate': ['unmate', 'untame', 'unteam'], + 'unmated': ['unmated', 'untamed'], + 'unmaterial': ['manualiter', 'unmaterial'], + 'unmeated': ['unmeated', 'unteamed'], + 'unmedical': ['unclaimed', 'undecimal', 'unmedical'], + 'unmeet': ['unmeet', 'unteem'], + 'unmemoired': ['unmemoired', 'unmemoried'], + 'unmemoried': ['unmemoired', 'unmemoried'], + 'unmental': ['unmantle', 'unmental'], + 'unmerged': ['gerendum', 'unmerged'], + 'unmerited': ['undertime', 'unmerited'], + 'unmettle': ['temulent', 'unmettle'], + 'unminable': ['nelumbian', 'unminable'], + 'unmined': ['minuend', 'unmined'], + 'unminted': ['indument', 'unminted'], + 'unmisled': ['muslined', 'unmisled', 'unsmiled'], + 'unmiter': ['minuter', 'unmiter'], + 'unmodest': ['mudstone', 'unmodest'], + 'unmodish': ['muishond', 'unmodish'], + 'unmomentary': ['monumentary', 'unmomentary'], + 'unmonitored': ['undermotion', 'unmonitored'], + 'unmorbid': ['moribund', 'unmorbid'], + 'unmorose': ['enormous', 'unmorose'], + 'unmortised': ['semirotund', 'unmortised'], + 'unmotived': ['unmotived', 'unvomited'], + 'unmystical': ['stimulancy', 'unmystical'], + 'unnagged': ['unganged', 'unnagged'], + 'unnail': ['alnuin', 'unnail'], + 'unnameability': ['unamenability', 'unnameability'], + 'unnameable': ['unamenable', 'unnameable'], + 'unnameableness': ['unamenableness', 'unnameableness'], + 'unnameably': ['unamenably', 'unnameably'], + 'unnamed': ['mundane', 'unamend', 'unmaned', 'unnamed'], + 'unnational': ['annulation', 'unnational'], + 'unnative': ['unnative', 'venutian'], + 'unneared': ['unearned', 'unneared'], + 'unnest': ['unnest', 'unsent'], + 'unnetted': ['unnetted', 'untented'], + 'unnose': ['nonuse', 'unnose'], + 'unnoted': ['unnoted', 'untoned'], + 'unnoticed': ['continued', 'unnoticed'], + 'unoared': ['rondeau', 'unoared'], + 'unogled': ['unlodge', 'unogled'], + 'unomitted': ['dumontite', 'unomitted'], + 'unoperatic': ['precaution', 'unoperatic'], + 'unorbed': ['beround', 'bounder', 'rebound', 'unbored', 'unorbed', 'unrobed'], + 'unorder': ['rondure', 'rounder', 'unorder'], + 'unordered': ['underdoer', 'unordered'], + 'unoriented': ['nonerudite', 'unoriented'], + 'unown': ['unown', 'unwon'], + 'unowned': ['enwound', 'unowned'], + 'unpacable': ['uncapable', 'unpacable'], + 'unpacker': ['reunpack', 'unpacker'], + 'unpaired': ['unpaired', 'unrepaid'], + 'unpale': ['unpale', 'uplane'], + 'unpalped': ['unlapped', 'unpalped'], + 'unpanel': ['unpanel', 'unpenal'], + 'unparceled': ['unparceled', 'unreplaced'], + 'unparched': ['underchap', 'unparched'], + 'unpared': ['undrape', 'unpared', 'unraped'], + 'unparsed': ['undersap', 'unparsed', 'unrasped', 'unspared', 'unspread'], + 'unparted': ['depurant', 'unparted'], + 'unpartial': ['tarpaulin', 'unpartial'], + 'unpenal': ['unpanel', 'unpenal'], + 'unpenetrable': ['unpenetrable', 'unrepentable'], + 'unpent': ['punnet', 'unpent'], + 'unperch': ['puncher', 'unperch'], + 'unpercolated': ['counterpaled', 'counterplead', 'unpercolated'], + 'unpermissible': ['unimpressible', 'unpermissible'], + 'unpermissive': ['unimpressive', 'unpermissive'], + 'unperuked': ['underpuke', 'unperuked'], + 'unpervaded': ['undepraved', 'unpervaded'], + 'unpetal': ['plutean', 'unpetal', 'unpleat'], + 'unpharasaic': ['parasuchian', 'unpharasaic'], + 'unphased': ['unhasped', 'unphased', 'unshaped'], + 'unphrased': ['unphrased', 'unsharped'], + 'unpickled': ['dunpickle', 'unpickled'], + 'unpierced': ['preinduce', 'unpierced'], + 'unpile': ['lupine', 'unpile', 'upline'], + 'unpiled': ['unpiled', 'unplied'], + 'unplace': ['cleanup', 'unplace'], + 'unplain': ['pinnula', 'unplain'], + 'unplait': ['nuptial', 'unplait'], + 'unplanted': ['pendulant', 'unplanted'], + 'unplat': ['puntal', 'unplat'], + 'unpleased': ['unelapsed', 'unpleased'], + 'unpleat': ['plutean', 'unpetal', 'unpleat'], + 'unpleated': ['pendulate', 'unpleated'], + 'unplied': ['unpiled', 'unplied'], + 'unplug': ['ungulp', 'unplug'], + 'unplumed': ['pendulum', 'unlumped', 'unplumed'], + 'unpoled': ['duplone', 'unpoled'], + 'unpolished': ['disulphone', 'unpolished'], + 'unpolitic': ['punctilio', 'unpolitic'], + 'unpooled': ['unlooped', 'unpooled'], + 'unposted': ['outspend', 'unposted'], + 'unpot': ['punto', 'unpot', 'untop'], + 'unprayed': ['underpay', 'unprayed'], + 'unprelatic': ['periculant', 'unprelatic'], + 'unpressed': ['resuspend', 'suspender', 'unpressed'], + 'unpricked': ['underpick', 'unpricked'], + 'unprint': ['turnpin', 'unprint'], + 'unprosaic': ['inocarpus', 'unprosaic'], + 'unproselyted': ['pseudelytron', 'unproselyted'], + 'unproud': ['roundup', 'unproud'], + 'unpursued': ['unpursued', 'unusurped'], + 'unpursuing': ['unpursuing', 'unusurping'], + 'unquietable': ['unequitable', 'unquietable'], + 'unrabbeted': ['beturbaned', 'unrabbeted'], + 'unraced': ['durance', 'redunca', 'unraced'], + 'unraided': ['underaid', 'unraided'], + 'unraised': ['denarius', 'desaurin', 'unraised'], + 'unram': ['muran', 'ruman', 'unarm', 'unram', 'urman'], + 'unranked': ['undarken', 'unranked'], + 'unraped': ['undrape', 'unpared', 'unraped'], + 'unrasped': ['undersap', 'unparsed', 'unrasped', 'unspared', 'unspread'], + 'unrated': ['daunter', 'unarted', 'unrated', 'untread'], + 'unravel': ['unravel', 'venular'], + 'unray': ['anury', 'unary', 'unray'], + 'unrayed': ['unrayed', 'unready'], + 'unreactive': ['uncreative', 'unreactive'], + 'unread': ['endura', 'neurad', 'undear', 'unread'], + 'unready': ['unrayed', 'unready'], + 'unreal': ['lunare', 'neural', 'ulnare', 'unreal'], + 'unrealism': ['semilunar', 'unrealism'], + 'unrealist': ['neuralist', 'ulsterian', 'unrealist'], + 'unrealmed': ['denumeral', 'undermeal', 'unrealmed'], + 'unrebated': ['eburnated', 'underbeat', 'unrebated'], + 'unrebutted': ['unbuttered', 'unrebutted'], + 'unreceived': ['undeceiver', 'unreceived'], + 'unrecent': ['uncenter', 'unrecent'], + 'unrecited': ['centuried', 'unrecited'], + 'unrectifiable': ['uncertifiable', 'unrectifiable'], + 'unrectified': ['uncertified', 'unrectified'], + 'unred': ['runed', 'under', 'unred'], + 'unredacted': ['underacted', 'unredacted'], + 'unreduct': ['undercut', 'unreduct'], + 'unreeve': ['revenue', 'unreeve'], + 'unreeving': ['unreeving', 'unveering'], + 'unregal': ['granule', 'unlarge', 'unregal'], + 'unregard': ['grandeur', 'unregard'], + 'unregarded': ['undergrade', 'unregarded'], + 'unrein': ['enruin', 'neurin', 'unrein'], + 'unreinstated': ['unreinstated', 'unstraitened'], + 'unrelated': ['unaltered', 'unrelated'], + 'unrelating': ['unaltering', 'unrelating'], + 'unreleased': ['underlease', 'unreleased'], + 'unrelegated': ['underlegate', 'unrelegated'], + 'unremarked': ['undermaker', 'unremarked'], + 'unrent': ['runnet', 'tunner', 'unrent'], + 'unrented': ['unrented', 'untender'], + 'unrepaid': ['unpaired', 'unrepaid'], + 'unrepentable': ['unpenetrable', 'unrepentable'], + 'unrepined': ['unrepined', 'unripened'], + 'unrepining': ['unrepining', 'unripening'], + 'unreplaced': ['unparceled', 'unreplaced'], + 'unreplied': ['underpile', 'unreplied'], + 'unreposed': ['underpose', 'unreposed'], + 'unreputed': ['unerupted', 'unreputed'], + 'unrescinded': ['undiscerned', 'unrescinded'], + 'unrescued': ['unrescued', 'unsecured'], + 'unreserved': ['underserve', + 'underverse', + 'undeserver', + 'unreserved', + 'unreversed'], + 'unresisted': ['unresisted', 'unsistered'], + 'unresolve': ['nervulose', 'unresolve', 'vulnerose'], + 'unrespect': ['unrespect', 'unscepter', 'unsceptre'], + 'unrespected': ['unrespected', 'unsceptered'], + 'unrested': ['sederunt', 'underset', 'undesert', 'unrested'], + 'unresting': ['insurgent', 'unresting'], + 'unretarded': ['undertread', 'unretarded'], + 'unreticent': ['entincture', 'unreticent'], + 'unretired': ['reintrude', 'unretired'], + 'unrevered': ['enverdure', 'unrevered'], + 'unreversed': ['underserve', + 'underverse', + 'undeserver', + 'unreserved', + 'unreversed'], + 'unreviled': ['underlive', 'unreviled'], + 'unrevised': ['undiverse', 'unrevised'], + 'unrevocable': ['uncoverable', 'unrevocable'], + 'unribbed': ['unbribed', 'unribbed'], + 'unrich': ['unrich', 'urchin'], + 'unrid': ['rundi', 'unrid'], + 'unridable': ['indurable', 'unbrailed', 'unridable'], + 'unriddle': ['underlid', 'unriddle'], + 'unride': ['diurne', 'inured', 'ruined', 'unride'], + 'unridged': ['underdig', 'ungirded', 'unridged'], + 'unrig': ['irgun', 'ruing', 'unrig'], + 'unright': ['hurting', 'ungirth', 'unright'], + 'unrighted': ['ungirthed', 'unrighted'], + 'unring': ['unring', 'urning'], + 'unringed': ['enduring', 'unringed'], + 'unripe': ['purine', 'unripe', 'uprein'], + 'unripely': ['pyruline', 'unripely'], + 'unripened': ['unrepined', 'unripened'], + 'unripening': ['unrepining', 'unripening'], + 'unroaded': ['unadored', 'unroaded'], + 'unrobed': ['beround', 'bounder', 'rebound', 'unbored', 'unorbed', 'unrobed'], + 'unrocked': ['uncorked', 'unrocked'], + 'unroot': ['notour', 'unroot'], + 'unroped': ['pounder', 'repound', 'unroped'], + 'unrosed': ['resound', 'sounder', 'unrosed'], + 'unrostrated': ['tetrandrous', 'unrostrated'], + 'unrotated': ['rotundate', 'unrotated'], + 'unroted': ['tendour', 'unroted'], + 'unroused': ['unroused', 'unsoured'], + 'unrouted': ['unrouted', 'untoured'], + 'unrowed': ['rewound', 'unrowed', 'wounder'], + 'unroweled': ['unlowered', 'unroweled'], + 'unroyalist': ['unroyalist', 'unsolitary'], + 'unruined': ['uninured', 'unruined'], + 'unruled': ['unlured', 'unruled'], + 'unrun': ['unrun', 'unurn'], + 'unruth': ['unhurt', 'unruth'], + 'unsack': ['uncask', 'unsack'], + 'unsacked': ['uncasked', 'unsacked'], + 'unsacred': ['unsacred', 'unscared'], + 'unsad': ['sudan', 'unsad'], + 'unsadden': ['unsadden', 'unsanded'], + 'unsage': ['gnaeus', 'unsage'], + 'unsaid': ['sudani', 'unsaid'], + 'unsailed': ['unaisled', 'unsailed'], + 'unsaint': ['antisun', 'unsaint', 'unsatin', 'unstain'], + 'unsainted': ['unsainted', 'unstained'], + 'unsalt': ['sultan', 'unsalt'], + 'unsalted': ['unsalted', 'unslated', 'unstaled'], + 'unsanded': ['unsadden', 'unsanded'], + 'unsardonic': ['andronicus', 'unsardonic'], + 'unsashed': ['sunshade', 'unsashed'], + 'unsatable': ['sublanate', 'unsatable'], + 'unsatiable': ['balaustine', 'unsatiable'], + 'unsatin': ['antisun', 'unsaint', 'unsatin', 'unstain'], + 'unsauced': ['uncaused', 'unsauced'], + 'unscale': ['censual', 'unscale'], + 'unscalloped': ['uncollapsed', 'unscalloped'], + 'unscaly': ['ancylus', 'unscaly'], + 'unscared': ['unsacred', 'unscared'], + 'unscepter': ['unrespect', 'unscepter', 'unsceptre'], + 'unsceptered': ['unrespected', 'unsceptered'], + 'unsceptre': ['unrespect', 'unscepter', 'unsceptre'], + 'unscoured': ['uncoursed', 'unscoured'], + 'unseal': ['elanus', 'unseal'], + 'unsealable': ['unleasable', 'unsealable'], + 'unsealed': ['unleased', 'unsealed'], + 'unseared': ['undersea', 'unerased', 'unseared'], + 'unseat': ['nasute', 'nauset', 'unseat'], + 'unseated': ['unseated', 'unsedate', 'unteased'], + 'unsecured': ['unrescued', 'unsecured'], + 'unsedate': ['unseated', 'unsedate', 'unteased'], + 'unsee': ['ensue', 'seenu', 'unsee'], + 'unseethed': ['unseethed', 'unsheeted'], + 'unseizable': ['unseizable', 'unsizeable'], + 'unselect': ['esculent', 'unselect'], + 'unsensed': ['nudeness', 'unsensed'], + 'unsensitive': ['unitiveness', 'unsensitive'], + 'unsent': ['unnest', 'unsent'], + 'unsepulcher': ['unsepulcher', 'unsepulchre'], + 'unsepulchre': ['unsepulcher', 'unsepulchre'], + 'unserrated': ['unarrested', 'unserrated'], + 'unserved': ['unserved', 'unversed'], + 'unset': ['unset', 'usent'], + 'unsevered': ['undeserve', 'unsevered'], + 'unsewed': ['sunweed', 'unsewed'], + 'unsex': ['nexus', 'unsex'], + 'unshaded': ['undashed', 'unshaded'], + 'unshaled': ['unhalsed', 'unlashed', 'unshaled'], + 'unshamed': ['unmashed', 'unshamed'], + 'unshaped': ['unhasped', 'unphased', 'unshaped'], + 'unsharing': ['ungarnish', 'unsharing'], + 'unsharped': ['unphrased', 'unsharped'], + 'unsharpened': ['undershapen', 'unsharpened'], + 'unsheared': ['unhearsed', 'unsheared'], + 'unsheet': ['enthuse', 'unsheet'], + 'unsheeted': ['unseethed', 'unsheeted'], + 'unship': ['inpush', 'punish', 'unship'], + 'unshipment': ['punishment', 'unshipment'], + 'unshoe': ['unhose', 'unshoe'], + 'unshoed': ['unhosed', 'unshoed'], + 'unshore': ['unhorse', 'unshore'], + 'unshored': ['enshroud', 'unshored'], + 'unshortened': ['underhonest', 'unshortened'], + 'unsicker': ['cruisken', 'unsicker'], + 'unsickled': ['klendusic', 'unsickled'], + 'unsight': ['gutnish', 'husting', 'unsight'], + 'unsignable': ['unsignable', 'unsingable'], + 'unsigned': ['undesign', 'unsigned', 'unsinged'], + 'unsigneted': ['uningested', 'unsigneted'], + 'unsilenced': ['unlicensed', 'unsilenced'], + 'unsimple': ['splenium', 'unsimple'], + 'unsin': ['sunni', 'unsin'], + 'unsingable': ['unsignable', 'unsingable'], + 'unsinged': ['undesign', 'unsigned', 'unsinged'], + 'unsistered': ['unresisted', 'unsistered'], + 'unsizeable': ['unseizable', 'unsizeable'], + 'unskin': ['insunk', 'unskin'], + 'unslate': ['sultane', 'unslate'], + 'unslated': ['unsalted', 'unslated', 'unstaled'], + 'unslating': ['unlasting', 'unslating'], + 'unslept': ['unslept', 'unspelt'], + 'unslighted': ['sunlighted', 'unslighted'], + 'unslit': ['insult', 'sunlit', 'unlist', 'unslit'], + 'unslot': ['unlost', 'unslot'], + 'unsmeared': ['underseam', 'unsmeared'], + 'unsmiled': ['muslined', 'unmisled', 'unsmiled'], + 'unsnap': ['pannus', 'sannup', 'unsnap', 'unspan'], + 'unsnatch': ['unsnatch', 'unstanch'], + 'unsnow': ['unsnow', 'unsown'], + 'unsocial': ['sualocin', 'unsocial'], + 'unsoil': ['insoul', 'linous', 'nilous', 'unsoil'], + 'unsoiled': ['delusion', 'unsoiled'], + 'unsoldier': ['undersoil', 'unsoldier'], + 'unsole': ['ensoul', 'olenus', 'unsole'], + 'unsolitary': ['unroyalist', 'unsolitary'], + 'unsomber': ['unsomber', 'unsombre'], + 'unsombre': ['unsomber', 'unsombre'], + 'unsome': ['nomeus', 'unsome'], + 'unsore': ['souren', 'unsore', 'ursone'], + 'unsort': ['tornus', 'unsort'], + 'unsortable': ['neuroblast', 'unsortable'], + 'unsorted': ['tonsured', 'unsorted', 'unstored'], + 'unsoured': ['unroused', 'unsoured'], + 'unsown': ['unsnow', 'unsown'], + 'unspan': ['pannus', 'sannup', 'unsnap', 'unspan'], + 'unspar': ['surnap', 'unspar'], + 'unspared': ['undersap', 'unparsed', 'unrasped', 'unspared', 'unspread'], + 'unsparred': ['underspar', 'unsparred'], + 'unspecterlike': ['unspecterlike', 'unspectrelike'], + 'unspectrelike': ['unspecterlike', 'unspectrelike'], + 'unsped': ['unsped', 'upsend'], + 'unspelt': ['unslept', 'unspelt'], + 'unsphering': ['gunnership', 'unsphering'], + 'unspiable': ['subalpine', 'unspiable'], + 'unspike': ['spunkie', 'unspike'], + 'unspit': ['ptinus', 'unspit'], + 'unspoil': ['pulsion', 'unspoil', 'upsilon'], + 'unspot': ['pontus', 'unspot', 'unstop'], + 'unspread': ['undersap', 'unparsed', 'unrasped', 'unspared', 'unspread'], + 'unstabled': ['dunstable', 'unblasted', 'unstabled'], + 'unstain': ['antisun', 'unsaint', 'unsatin', 'unstain'], + 'unstained': ['unsainted', 'unstained'], + 'unstaled': ['unsalted', 'unslated', 'unstaled'], + 'unstanch': ['unsnatch', 'unstanch'], + 'unstar': ['saturn', 'unstar'], + 'unstatable': ['unstatable', 'untastable'], + 'unstate': ['tetanus', 'unstate', 'untaste'], + 'unstateable': ['unstateable', 'untasteable'], + 'unstated': ['unstated', 'untasted'], + 'unstating': ['unstating', 'untasting'], + 'unstayed': ['unstayed', 'unsteady'], + 'unsteady': ['unstayed', 'unsteady'], + 'unstercorated': ['countertrades', 'unstercorated'], + 'unstern': ['stunner', 'unstern'], + 'unstocked': ['duckstone', 'unstocked'], + 'unstoic': ['cotinus', 'suction', 'unstoic'], + 'unstoical': ['suctional', 'sulcation', 'unstoical'], + 'unstop': ['pontus', 'unspot', 'unstop'], + 'unstopple': ['pulpstone', 'unstopple'], + 'unstore': ['snouter', 'tonsure', 'unstore'], + 'unstored': ['tonsured', 'unsorted', 'unstored'], + 'unstoried': ['detrusion', 'tinderous', 'unstoried'], + 'unstormed': ['undermost', 'unstormed'], + 'unstrain': ['insurant', 'unstrain'], + 'unstrained': ['understain', 'unstrained'], + 'unstraitened': ['unreinstated', 'unstraitened'], + 'unstranded': ['understand', 'unstranded'], + 'unstrewed': ['unstrewed', 'unwrested'], + 'unsucculent': ['centunculus', 'unsucculent'], + 'unsued': ['unsued', 'unused'], + 'unsusceptible': ['unsusceptible', 'unsuspectible'], + 'unsusceptive': ['unsusceptive', 'unsuspective'], + 'unsuspectible': ['unsusceptible', 'unsuspectible'], + 'unsuspective': ['unsusceptive', 'unsuspective'], + 'untactful': ['fluctuant', 'untactful'], + 'untailed': ['nidulate', 'untailed'], + 'untame': ['unmate', 'untame', 'unteam'], + 'untamed': ['unmated', 'untamed'], + 'untanned': ['nunnated', 'untanned'], + 'untap': ['punta', 'unapt', 'untap'], + 'untar': ['arnut', 'tuarn', 'untar'], + 'untastable': ['unstatable', 'untastable'], + 'untaste': ['tetanus', 'unstate', 'untaste'], + 'untasteable': ['unstateable', 'untasteable'], + 'untasted': ['unstated', 'untasted'], + 'untasting': ['unstating', 'untasting'], + 'untaught': ['taungthu', 'untaught'], + 'untaunted': ['unattuned', 'untaunted'], + 'unteach': ['uncheat', 'unteach'], + 'unteaching': ['uncheating', 'unteaching'], + 'unteam': ['unmate', 'untame', 'unteam'], + 'unteamed': ['unmeated', 'unteamed'], + 'unteased': ['unseated', 'unsedate', 'unteased'], + 'unteem': ['unmeet', 'unteem'], + 'untemper': ['erumpent', 'untemper'], + 'untender': ['unrented', 'untender'], + 'untented': ['unnetted', 'untented'], + 'unthatch': ['nuthatch', 'unthatch'], + 'unthick': ['kutchin', 'unthick'], + 'unthrall': ['turnhall', 'unthrall'], + 'untiaraed': ['diuranate', 'untiaraed'], + 'untidy': ['nudity', 'untidy'], + 'untie': ['intue', 'unite', 'untie'], + 'untied': ['dunite', 'united', 'untied'], + 'until': ['unlit', 'until'], + 'untile': ['lutein', 'untile'], + 'untiled': ['diluent', 'untiled'], + 'untilted': ['dilutent', 'untilted', 'untitled'], + 'untimely': ['minutely', 'untimely'], + 'untin': ['nintu', 'ninut', 'untin'], + 'untine': ['ineunt', 'untine'], + 'untinseled': ['unenlisted', 'unlistened', 'untinseled'], + 'untirable': ['untirable', 'untriable'], + 'untire': ['runite', 'triune', 'uniter', 'untire'], + 'untired': ['intrude', 'turdine', 'untired', 'untried'], + 'untithable': ['unhittable', 'untithable'], + 'untitled': ['dilutent', 'untilted', 'untitled'], + 'unto': ['tuno', 'unto'], + 'untoiled': ['outlined', 'untoiled'], + 'untoned': ['unnoted', 'untoned'], + 'untooled': ['unlooted', 'untooled'], + 'untop': ['punto', 'unpot', 'untop'], + 'untorn': ['tunnor', 'untorn'], + 'untortured': ['undertutor', 'untortured'], + 'untotalled': ['unallotted', 'untotalled'], + 'untouch': ['uncouth', 'untouch'], + 'untoured': ['unrouted', 'untoured'], + 'untrace': ['centaur', 'untrace'], + 'untraceable': ['uncreatable', 'untraceable'], + 'untraceableness': ['uncreatableness', 'untraceableness'], + 'untraced': ['uncarted', 'uncrated', 'underact', 'untraced'], + 'untraceried': ['antireducer', 'reincrudate', 'untraceried'], + 'untradeable': ['untradeable', 'untreadable'], + 'untrain': ['antirun', 'untrain', 'urinant'], + 'untread': ['daunter', 'unarted', 'unrated', 'untread'], + 'untreadable': ['untradeable', 'untreadable'], + 'untreatable': ['entablature', 'untreatable'], + 'untreed': ['denture', 'untreed'], + 'untriable': ['untirable', 'untriable'], + 'untribal': ['tribunal', 'turbinal', 'untribal'], + 'untriced': ['undirect', 'untriced'], + 'untried': ['intrude', 'turdine', 'untired', 'untried'], + 'untrig': ['ungirt', 'untrig'], + 'untrod': ['rotund', 'untrod'], + 'untropical': ['ponticular', 'untropical'], + 'untroubled': ['outblunder', 'untroubled'], + 'untrowed': ['undertow', 'untrowed'], + 'untruss': ['sturnus', 'untruss'], + 'untutored': ['outturned', 'untutored'], + 'unurn': ['unrun', 'unurn'], + 'unused': ['unsued', 'unused'], + 'unusurped': ['unpursued', 'unusurped'], + 'unusurping': ['unpursuing', 'unusurping'], + 'unvailable': ['invaluable', 'unvailable'], + 'unveering': ['unreeving', 'unveering'], + 'unveil': ['unevil', 'unlive', 'unveil'], + 'unveiled': ['unlevied', 'unveiled'], + 'unveined': ['unenvied', 'unveined'], + 'unvenerated': ['unenervated', 'unvenerated'], + 'unveniable': ['unenviable', 'unveniable'], + 'unveritable': ['unavertible', 'unveritable'], + 'unversed': ['unserved', 'unversed'], + 'unvessel': ['unvessel', 'usselven'], + 'unvest': ['unvest', 'venust'], + 'unvomited': ['unmotived', 'unvomited'], + 'unwagered': ['underwage', 'unwagered'], + 'unwan': ['unwan', 'wunna'], + 'unware': ['unware', 'wauner'], + 'unwarp': ['unwarp', 'unwrap'], + 'unwary': ['runway', 'unwary'], + 'unwavered': ['underwave', 'unwavered'], + 'unwept': ['unwept', 'upwent'], + 'unwon': ['unown', 'unwon'], + 'unwrap': ['unwarp', 'unwrap'], + 'unwrested': ['unstrewed', 'unwrested'], + 'unwronged': ['undergown', 'unwronged'], + 'unyoung': ['unyoung', 'youngun'], + 'unze': ['unze', 'zenu'], + 'up': ['pu', 'up'], + 'uparching': ['ungraphic', 'uparching'], + 'uparise': ['spuriae', 'uparise', 'upraise'], + 'uparna': ['purana', 'uparna'], + 'upas': ['apus', 'supa', 'upas'], + 'upblast': ['subplat', 'upblast'], + 'upblow': ['blowup', 'upblow'], + 'upbreak': ['breakup', 'upbreak'], + 'upbuild': ['buildup', 'upbuild'], + 'upcast': ['catsup', 'upcast'], + 'upcatch': ['catchup', 'upcatch'], + 'upclimb': ['plumbic', 'upclimb'], + 'upclose': ['culpose', 'ploceus', 'upclose'], + 'upcock': ['cockup', 'upcock'], + 'upcoil': ['oilcup', 'upcoil'], + 'upcourse': ['cupreous', 'upcourse'], + 'upcover': ['overcup', 'upcover'], + 'upcreep': ['prepuce', 'upcreep'], + 'upcurrent': ['puncturer', 'upcurrent'], + 'upcut': ['cutup', 'upcut'], + 'updo': ['doup', 'updo'], + 'updraw': ['updraw', 'upward'], + 'updry': ['prudy', 'purdy', 'updry'], + 'upeat': ['taupe', 'upeat'], + 'upflare': ['rapeful', 'upflare'], + 'upflower': ['powerful', 'upflower'], + 'upgale': ['plague', 'upgale'], + 'upget': ['getup', 'upget'], + 'upgirt': ['ripgut', 'upgirt'], + 'upgo': ['goup', 'ogpu', 'upgo'], + 'upgrade': ['guepard', 'upgrade'], + 'uphelm': ['phleum', 'uphelm'], + 'uphold': ['holdup', 'uphold'], + 'upholder': ['reuphold', 'upholder'], + 'upholsterer': ['reupholster', 'upholsterer'], + 'upla': ['paul', 'upla'], + 'upland': ['dunlap', 'upland'], + 'uplander': ['pendular', 'underlap', 'uplander'], + 'uplane': ['unpale', 'uplane'], + 'upleap': ['papule', 'upleap'], + 'uplift': ['tipful', 'uplift'], + 'uplifter': ['reuplift', 'uplifter'], + 'upline': ['lupine', 'unpile', 'upline'], + 'uplock': ['lockup', 'uplock'], + 'upon': ['noup', 'puno', 'upon'], + 'uppers': ['supper', 'uppers'], + 'uppish': ['hippus', 'uppish'], + 'upraise': ['spuriae', 'uparise', 'upraise'], + 'uprear': ['parure', 'uprear'], + 'uprein': ['purine', 'unripe', 'uprein'], + 'uprip': ['ripup', 'uprip'], + 'uprisal': ['parulis', 'spirula', 'uprisal'], + 'uprisement': ['episternum', 'uprisement'], + 'upriser': ['siruper', 'upriser'], + 'uprist': ['purist', 'spruit', 'uprist', 'upstir'], + 'uproad': ['podura', 'uproad'], + 'uproom': ['moorup', 'uproom'], + 'uprose': ['poseur', 'pouser', 'souper', 'uprose'], + 'upscale': ['capsule', 'specula', 'upscale'], + 'upseal': ['apulse', 'upseal'], + 'upsend': ['unsped', 'upsend'], + 'upset': ['setup', 'stupe', 'upset'], + 'upsettable': ['subpeltate', 'upsettable'], + 'upsetter': ['upsetter', 'upstreet'], + 'upshore': ['ephorus', 'orpheus', 'upshore'], + 'upshot': ['tophus', 'upshot'], + 'upshut': ['pushtu', 'upshut'], + 'upsilon': ['pulsion', 'unspoil', 'upsilon'], + 'upsit': ['puist', 'upsit'], + 'upslant': ['pulsant', 'upslant'], + 'upsmite': ['impetus', 'upsmite'], + 'upsoar': ['parous', 'upsoar'], + 'upstair': ['tapirus', 'upstair'], + 'upstand': ['dustpan', 'upstand'], + 'upstare': ['pasteur', 'pasture', 'upstare'], + 'upstater': ['stuprate', 'upstater'], + 'upsteal': ['pulsate', 'spatule', 'upsteal'], + 'upstem': ['septum', 'upstem'], + 'upstir': ['purist', 'spruit', 'uprist', 'upstir'], + 'upstraight': ['straightup', 'upstraight'], + 'upstreet': ['upsetter', 'upstreet'], + 'upstrive': ['spurtive', 'upstrive'], + 'upsun': ['sunup', 'upsun'], + 'upsway': ['upsway', 'upways'], + 'uptake': ['ketupa', 'uptake'], + 'uptend': ['pudent', 'uptend'], + 'uptilt': ['tiltup', 'uptilt'], + 'uptoss': ['tossup', 'uptoss'], + 'uptrace': ['capture', 'uptrace'], + 'uptrain': ['pintura', 'puritan', 'uptrain'], + 'uptree': ['repute', 'uptree'], + 'uptrend': ['prudent', 'prunted', 'uptrend'], + 'upturn': ['turnup', 'upturn'], + 'upward': ['updraw', 'upward'], + 'upwarp': ['upwarp', 'upwrap'], + 'upways': ['upsway', 'upways'], + 'upwent': ['unwept', 'upwent'], + 'upwind': ['upwind', 'windup'], + 'upwrap': ['upwarp', 'upwrap'], + 'ura': ['aru', 'rua', 'ura'], + 'uracil': ['curial', 'lauric', 'uracil', 'uralic'], + 'uraemic': ['maurice', 'uraemic'], + 'uraeus': ['aureus', 'uraeus'], + 'ural': ['alur', 'laur', 'lura', 'raul', 'ural'], + 'urali': ['rauli', 'urali', 'urial'], + 'uralian': ['lunaria', 'ulnaria', 'uralian'], + 'uralic': ['curial', 'lauric', 'uracil', 'uralic'], + 'uralite': ['laurite', 'uralite'], + 'uralitize': ['ritualize', 'uralitize'], + 'uramido': ['doarium', 'uramido'], + 'uramil': ['rimula', 'uramil'], + 'uramino': ['mainour', 'uramino'], + 'uran': ['raun', 'uran', 'urna'], + 'uranate': ['taurean', 'uranate'], + 'urania': ['anuria', 'urania'], + 'uranic': ['anuric', 'cinura', 'uranic'], + 'uranine': ['aneurin', 'uranine'], + 'uranism': ['surinam', 'uranism'], + 'uranite': ['ruinate', 'taurine', 'uranite', 'urinate'], + 'uranographist': ['guarantorship', 'uranographist'], + 'uranolite': ['outlinear', 'uranolite'], + 'uranoscope': ['oenocarpus', 'uranoscope'], + 'uranospinite': ['resupination', 'uranospinite'], + 'uranotil': ['rotulian', 'uranotil'], + 'uranous': ['anurous', 'uranous'], + 'uranyl': ['lunary', 'uranyl'], + 'uranylic': ['culinary', 'uranylic'], + 'urari': ['aurir', 'urari'], + 'urase': ['serau', 'urase'], + 'uratic': ['tauric', 'uratic', 'urtica'], + 'urazine': ['azurine', 'urazine'], + 'urban': ['buran', 'unbar', 'urban'], + 'urbane': ['eburna', 'unbare', 'unbear', 'urbane'], + 'urbanite': ['braunite', 'urbanite', 'urbinate'], + 'urbian': ['burian', 'urbian'], + 'urbification': ['rubification', 'urbification'], + 'urbify': ['rubify', 'urbify'], + 'urbinate': ['braunite', 'urbanite', 'urbinate'], + 'urceiform': ['eruciform', 'urceiform'], + 'urceole': ['urceole', 'urocele'], + 'urceolina': ['aleuronic', 'urceolina'], + 'urceolus': ['ulcerous', 'urceolus'], + 'urchin': ['unrich', 'urchin'], + 'urd': ['rud', 'urd'], + 'urde': ['duer', 'dure', 'rude', 'urde'], + 'urdee': ['redue', 'urdee'], + 'ure': ['rue', 'ure'], + 'ureal': ['alure', 'ureal'], + 'uredine': ['reindue', 'uredine'], + 'ureic': ['curie', 'ureic'], + 'uremia': ['aumrie', 'uremia'], + 'uremic': ['cerium', 'uremic'], + 'urena': ['urena', 'urnae'], + 'urent': ['enrut', 'tuner', 'urent'], + 'uresis': ['issuer', 'uresis'], + 'uretal': ['tulare', 'uretal'], + 'ureter': ['retrue', 'ureter'], + 'ureteropyelogram': ['pyeloureterogram', 'ureteropyelogram'], + 'urethan': ['haunter', 'nauther', 'unearth', 'unheart', 'urethan'], + 'urethrascope': ['heterocarpus', 'urethrascope'], + 'urethrocystitis': ['cystourethritis', 'urethrocystitis'], + 'urethylan': ['unearthly', 'urethylan'], + 'uretic': ['curite', 'teucri', 'uretic'], + 'urf': ['fur', 'urf'], + 'urge': ['grue', 'urge'], + 'urgent': ['gunter', 'gurnet', 'urgent'], + 'urger': ['regur', 'urger'], + 'uria': ['arui', 'uria'], + 'uriah': ['huari', 'uriah'], + 'urial': ['rauli', 'urali', 'urial'], + 'urian': ['aurin', 'urian'], + 'uric': ['cuir', 'uric'], + 'urinal': ['laurin', 'urinal'], + 'urinant': ['antirun', 'untrain', 'urinant'], + 'urinate': ['ruinate', 'taurine', 'uranite', 'urinate'], + 'urination': ['ruination', 'urination'], + 'urinator': ['ruinator', 'urinator'], + 'urine': ['inure', 'urine'], + 'urinogenitary': ['genitourinary', 'urinogenitary'], + 'urinous': ['ruinous', 'urinous'], + 'urinousness': ['ruinousness', 'urinousness'], + 'urite': ['urite', 'uteri'], + 'urlar': ['rural', 'urlar'], + 'urled': ['duler', 'urled'], + 'urling': ['ruling', 'urling'], + 'urman': ['muran', 'ruman', 'unarm', 'unram', 'urman'], + 'urn': ['run', 'urn'], + 'urna': ['raun', 'uran', 'urna'], + 'urnae': ['urena', 'urnae'], + 'urnal': ['lunar', 'ulnar', 'urnal'], + 'urnful': ['unfurl', 'urnful'], + 'urning': ['unring', 'urning'], + 'uro': ['our', 'uro'], + 'urocele': ['urceole', 'urocele'], + 'urodela': ['roulade', 'urodela'], + 'urodelan': ['unloader', 'urodelan'], + 'urogaster': ['surrogate', 'urogaster'], + 'urogenital': ['regulation', 'urogenital'], + 'uroglena': ['lagunero', 'organule', 'uroglena'], + 'urolithic': ['ulotrichi', 'urolithic'], + 'urometer': ['outremer', 'urometer'], + 'uronic': ['cuorin', 'uronic'], + 'uropsile': ['perilous', 'uropsile'], + 'uroseptic': ['crepitous', 'euproctis', 'uroseptic'], + 'urosomatic': ['mortacious', 'urosomatic'], + 'urosteon': ['outsnore', 'urosteon'], + 'urosternite': ['tenuiroster', 'urosternite'], + 'urosthenic': ['cetorhinus', 'urosthenic'], + 'urostyle': ['elytrous', 'urostyle'], + 'urs': ['rus', 'sur', 'urs'], + 'ursa': ['rusa', 'saur', 'sura', 'ursa', 'usar'], + 'ursal': ['larus', 'sural', 'ursal'], + 'ursidae': ['residua', 'ursidae'], + 'ursine': ['insure', 'rusine', 'ursine'], + 'ursone': ['souren', 'unsore', 'ursone'], + 'ursuk': ['kurus', 'ursuk'], + 'ursula': ['laurus', 'ursula'], + 'urtica': ['tauric', 'uratic', 'urtica'], + 'urticales': ['sterculia', 'urticales'], + 'urticant': ['taciturn', 'urticant'], + 'urticose': ['citreous', 'urticose'], + 'usability': ['suability', 'usability'], + 'usable': ['suable', 'usable'], + 'usager': ['sauger', 'usager'], + 'usance': ['uncase', 'usance'], + 'usar': ['rusa', 'saur', 'sura', 'ursa', 'usar'], + 'usara': ['arusa', 'saura', 'usara'], + 'use': ['sue', 'use'], + 'usent': ['unset', 'usent'], + 'user': ['ruse', 'suer', 'sure', 'user'], + 'ush': ['shu', 'ush'], + 'ushabti': ['habitus', 'ushabti'], + 'ushak': ['kusha', 'shaku', 'ushak'], + 'usher': ['shure', 'usher'], + 'usitate': ['situate', 'usitate'], + 'usnic': ['incus', 'usnic'], + 'usque': ['equus', 'usque'], + 'usselven': ['unvessel', 'usselven'], + 'ust': ['stu', 'ust'], + 'uster': ['serut', 'strue', 'turse', 'uster'], + 'ustion': ['outsin', 'ustion'], + 'ustorious': ['sutorious', 'ustorious'], + 'ustulina': ['lutianus', 'nautilus', 'ustulina'], + 'usucaption': ['uncaptious', 'usucaption'], + 'usure': ['eurus', 'usure'], + 'usurper': ['pursuer', 'usurper'], + 'ut': ['tu', 'ut'], + 'uta': ['tau', 'tua', 'uta'], + 'utas': ['saut', 'tasu', 'utas'], + 'utch': ['chut', 'tchu', 'utch'], + 'ute': ['tue', 'ute'], + 'uteri': ['urite', 'uteri'], + 'uterine': ['neurite', 'retinue', 'reunite', 'uterine'], + 'uterocervical': ['overcirculate', 'uterocervical'], + 'uterus': ['suture', 'uterus'], + 'utile': ['luite', 'utile'], + 'utilizable': ['latibulize', 'utilizable'], + 'utopian': ['opuntia', 'utopian'], + 'utopism': ['positum', 'utopism'], + 'utopist': ['outspit', 'utopist'], + 'utricular': ['turricula', 'utricular'], + 'utriculosaccular': ['sacculoutricular', 'utriculosaccular'], + 'utrum': ['murut', 'utrum'], + 'utterer': ['reutter', 'utterer'], + 'uva': ['uva', 'vau'], + 'uval': ['ulva', 'uval'], + 'uveal': ['uveal', 'value'], + 'uviol': ['uviol', 'vouli'], + 'vacate': ['cavate', 'caveat', 'vacate'], + 'vacation': ['octavian', 'octavina', 'vacation'], + 'vacationer': ['acervation', 'vacationer'], + 'vaccinal': ['clavacin', 'vaccinal'], + 'vacillate': ['laticlave', 'vacillate'], + 'vacillation': ['cavillation', 'vacillation'], + 'vacuolate': ['autoclave', 'vacuolate'], + 'vade': ['dave', 'deva', 'vade', 'veda'], + 'vady': ['davy', 'vady'], + 'vage': ['gave', 'vage', 'vega'], + 'vagile': ['glaive', 'vagile'], + 'vaginant': ['navigant', 'vaginant'], + 'vaginate': ['navigate', 'vaginate'], + 'vaginoabdominal': ['abdominovaginal', 'vaginoabdominal'], + 'vaginoperineal': ['perineovaginal', 'vaginoperineal'], + 'vaginovesical': ['vaginovesical', 'vesicovaginal'], + 'vaguity': ['gavyuti', 'vaguity'], + 'vai': ['iva', 'vai', 'via'], + 'vail': ['vail', 'vali', 'vial', 'vila'], + 'vain': ['ivan', 'vain', 'vina'], + 'vair': ['ravi', 'riva', 'vair', 'vari', 'vira'], + 'vakass': ['kavass', 'vakass'], + 'vale': ['lave', 'vale', 'veal', 'vela'], + 'valence': ['enclave', 'levance', 'valence'], + 'valencia': ['valencia', 'valiance'], + 'valent': ['levant', 'valent'], + 'valentine': ['levantine', 'valentine'], + 'valeria': ['reavail', 'valeria'], + 'valeriana': ['laverania', 'valeriana'], + 'valeric': ['caliver', 'caviler', 'claiver', 'clavier', 'valeric', 'velaric'], + 'valerie': ['realive', 'valerie'], + 'valerin': ['elinvar', 'ravelin', 'reanvil', 'valerin'], + 'valerone': ['overlean', 'valerone'], + 'valeryl': ['ravelly', 'valeryl'], + 'valeur': ['valeur', 'valuer'], + 'vali': ['vail', 'vali', 'vial', 'vila'], + 'valiance': ['valencia', 'valiance'], + 'valiant': ['latvian', 'valiant'], + 'valine': ['alevin', 'alvine', 'valine', 'veinal', 'venial', 'vineal'], + 'vallar': ['larval', 'vallar'], + 'vallidom': ['vallidom', 'villadom'], + 'vallota': ['lavolta', 'vallota'], + 'valonia': ['novalia', 'valonia'], + 'valor': ['valor', 'volar'], + 'valsa': ['salva', 'valsa', 'vasal'], + 'valse': ['salve', 'selva', 'slave', 'valse'], + 'value': ['uveal', 'value'], + 'valuer': ['valeur', 'valuer'], + 'vamper': ['revamp', 'vamper'], + 'vane': ['evan', 'nave', 'vane'], + 'vaned': ['daven', 'vaned'], + 'vangee': ['avenge', 'geneva', 'vangee'], + 'vangeli': ['leaving', 'vangeli'], + 'vanir': ['invar', 'ravin', 'vanir'], + 'vanisher': ['enravish', 'ravenish', 'vanisher'], + 'vansire': ['servian', 'vansire'], + 'vapid': ['pavid', 'vapid'], + 'vapidity': ['pavidity', 'vapidity'], + 'vaporarium': ['parovarium', 'vaporarium'], + 'vara': ['avar', 'vara'], + 'varan': ['navar', 'varan', 'varna'], + 'vare': ['aver', 'rave', 'vare', 'vera'], + 'varec': ['carve', 'crave', 'varec'], + 'vari': ['ravi', 'riva', 'vair', 'vari', 'vira'], + 'variate': ['variate', 'vateria'], + 'varices': ['varices', 'viscera'], + 'varicula': ['avicular', 'varicula'], + 'variegator': ['arrogative', 'variegator'], + 'varier': ['arrive', 'varier'], + 'varietal': ['lievaart', 'varietal'], + 'variola': ['ovarial', 'variola'], + 'various': ['saviour', 'various'], + 'varlet': ['travel', 'varlet'], + 'varletry': ['varletry', 'veratryl'], + 'varna': ['navar', 'varan', 'varna'], + 'varnish': ['shirvan', 'varnish'], + 'varnisher': ['revarnish', 'varnisher'], + 'varsha': ['avshar', 'varsha'], + 'vasal': ['salva', 'valsa', 'vasal'], + 'vase': ['aves', 'save', 'vase'], + 'vasoepididymostomy': ['epididymovasostomy', 'vasoepididymostomy'], + 'vat': ['tav', 'vat'], + 'vateria': ['variate', 'vateria'], + 'vaticide': ['cavitied', 'vaticide'], + 'vaticinate': ['inactivate', 'vaticinate'], + 'vaticination': ['inactivation', 'vaticination'], + 'vatter': ['tavert', 'vatter'], + 'vau': ['uva', 'vau'], + 'vaudois': ['avidous', 'vaudois'], + 'veal': ['lave', 'vale', 'veal', 'vela'], + 'vealer': ['laveer', 'leaver', 'reveal', 'vealer'], + 'vealiness': ['aliveness', 'vealiness'], + 'vealy': ['leavy', 'vealy'], + 'vector': ['covert', 'vector'], + 'veda': ['dave', 'deva', 'vade', 'veda'], + 'vedaic': ['advice', 'vedaic'], + 'vedaism': ['adevism', 'vedaism'], + 'vedana': ['nevada', 'vedana', 'venada'], + 'vedanta': ['vedanta', 'vetanda'], + 'vedantism': ['adventism', 'vedantism'], + 'vedantist': ['adventist', 'vedantist'], + 'vedist': ['divest', 'vedist'], + 'vedro': ['dover', 'drove', 'vedro'], + 'vee': ['eve', 'vee'], + 'veen': ['even', 'neve', 'veen'], + 'veer': ['ever', 'reve', 'veer'], + 'veery': ['every', 'veery'], + 'vega': ['gave', 'vage', 'vega'], + 'vegasite': ['estivage', 'vegasite'], + 'vegetarian': ['renavigate', 'vegetarian'], + 'vei': ['vei', 'vie'], + 'veil': ['evil', 'levi', 'live', 'veil', 'vile', 'vlei'], + 'veiler': ['levier', 'relive', 'reveil', 'revile', 'veiler'], + 'veiltail': ['illative', 'veiltail'], + 'vein': ['vein', 'vine'], + 'veinal': ['alevin', 'alvine', 'valine', 'veinal', 'venial', 'vineal'], + 'veined': ['endive', 'envied', 'veined'], + 'veiner': ['enrive', 'envier', 'veiner', 'verine'], + 'veinless': ['evilness', 'liveness', 'veinless', 'vileness', 'vineless'], + 'veinlet': ['veinlet', 'vinelet'], + 'veinous': ['envious', 'niveous', 'veinous'], + 'veinstone': ['veinstone', 'vonsenite'], + 'veinwise': ['veinwise', 'vinewise'], + 'vela': ['lave', 'vale', 'veal', 'vela'], + 'velar': ['arvel', 'larve', 'laver', 'ravel', 'velar'], + 'velaric': ['caliver', 'caviler', 'claiver', 'clavier', 'valeric', 'velaric'], + 'velation': ['olivetan', 'velation'], + 'velic': ['clive', 'velic'], + 'veliform': ['overfilm', 'veliform'], + 'velitation': ['levitation', 'tonalitive', 'velitation'], + 'velo': ['levo', 'love', 'velo', 'vole'], + 'velte': ['elvet', 'velte'], + 'venada': ['nevada', 'vedana', 'venada'], + 'venal': ['elvan', 'navel', 'venal'], + 'venality': ['natively', 'venality'], + 'venatic': ['catvine', 'venatic'], + 'venation': ['innovate', 'venation'], + 'venator': ['rotanev', 'venator'], + 'venatorial': ['venatorial', 'venoatrial'], + 'vendace': ['devance', 'vendace'], + 'vender': ['revend', 'vender'], + 'veneer': ['evener', 'veneer'], + 'veneerer': ['reveneer', 'veneerer'], + 'veneralia': ['ravenelia', 'veneralia'], + 'venerant': ['revenant', 'venerant'], + 'venerate': ['enervate', 'venerate'], + 'veneration': ['enervation', 'veneration'], + 'venerative': ['enervative', 'venerative'], + 'venerator': ['enervator', 'renovater', 'venerator'], + 'venerer': ['renerve', 'venerer'], + 'veneres': ['sevener', 'veneres'], + 'veneti': ['veneti', 'venite'], + 'venetian': ['aventine', 'venetian'], + 'venial': ['alevin', 'alvine', 'valine', 'veinal', 'venial', 'vineal'], + 'venice': ['cevine', 'evince', 'venice'], + 'venie': ['nieve', 'venie'], + 'venite': ['veneti', 'venite'], + 'venoatrial': ['venatorial', 'venoatrial'], + 'venom': ['novem', 'venom'], + 'venosinal': ['slovenian', 'venosinal'], + 'venter': ['revent', 'venter'], + 'ventrad': ['ventrad', 'verdant'], + 'ventricose': ['convertise', 'ventricose'], + 'ventrine': ['inventer', 'reinvent', 'ventrine', 'vintener'], + 'ventrodorsad': ['dorsoventrad', 'ventrodorsad'], + 'ventrodorsal': ['dorsoventral', 'ventrodorsal'], + 'ventrodorsally': ['dorsoventrally', 'ventrodorsally'], + 'ventrolateral': ['lateroventral', 'ventrolateral'], + 'ventromedial': ['medioventral', 'ventromedial'], + 'ventromesal': ['mesoventral', 'ventromesal'], + 'venular': ['unravel', 'venular'], + 'venus': ['nevus', 'venus'], + 'venust': ['unvest', 'venust'], + 'venutian': ['unnative', 'venutian'], + 'vera': ['aver', 'rave', 'vare', 'vera'], + 'veraciousness': ['oversauciness', 'veraciousness'], + 'veratroidine': ['rederivation', 'veratroidine'], + 'veratrole': ['relevator', 'revelator', 'veratrole'], + 'veratryl': ['varletry', 'veratryl'], + 'verbal': ['barvel', 'blaver', 'verbal'], + 'verbality': ['verbality', 'veritably'], + 'verbatim': ['ambivert', 'verbatim'], + 'verbena': ['enbrave', 'verbena'], + 'verberate': ['verberate', 'vertebrae'], + 'verbose': ['observe', 'obverse', 'verbose'], + 'verbosely': ['obversely', 'verbosely'], + 'verdant': ['ventrad', 'verdant'], + 'verdea': ['evader', 'verdea'], + 'verdelho': ['overheld', 'verdelho'], + 'verdin': ['driven', 'nervid', 'verdin'], + 'verditer': ['diverter', 'redivert', 'verditer'], + 'vergi': ['giver', 'vergi'], + 'veri': ['rive', 'veri', 'vier', 'vire'], + 'veridical': ['larvicide', 'veridical'], + 'veridicous': ['recidivous', 'veridicous'], + 'verily': ['livery', 'verily'], + 'verine': ['enrive', 'envier', 'veiner', 'verine'], + 'verism': ['verism', 'vermis'], + 'verist': ['stiver', 'strive', 'verist'], + 'veritable': ['avertible', 'veritable'], + 'veritably': ['verbality', 'veritably'], + 'vermian': ['minerva', 'vermian'], + 'verminal': ['minerval', 'verminal'], + 'vermis': ['verism', 'vermis'], + 'vernacularist': ['intervascular', 'vernacularist'], + 'vernal': ['nerval', 'vernal'], + 'vernation': ['nervation', 'vernation'], + 'vernicose': ['coversine', 'vernicose'], + 'vernine': ['innerve', 'nervine', 'vernine'], + 'veronese': ['overseen', 'veronese'], + 'veronica': ['corvinae', 'veronica'], + 'verpa': ['paver', 'verpa'], + 'verre': ['rever', 'verre'], + 'verrucous': ['recurvous', 'verrucous'], + 'verruga': ['gravure', 'verruga'], + 'versable': ['beslaver', 'servable', 'versable'], + 'versal': ['salver', 'serval', 'slaver', 'versal'], + 'versant': ['servant', 'versant'], + 'versate': ['evestar', 'versate'], + 'versation': ['overstain', 'servation', 'versation'], + 'verse': ['serve', 'sever', 'verse'], + 'verser': ['revers', 'server', 'verser'], + 'verset': ['revest', 'servet', 'sterve', 'verset', 'vester'], + 'versicule': ['reclusive', 'versicule'], + 'versine': ['inverse', 'versine'], + 'versioner': ['reversion', 'versioner'], + 'versionist': ['overinsist', 'versionist'], + 'verso': ['servo', 'verso'], + 'versta': ['starve', 'staver', 'strave', 'tavers', 'versta'], + 'vertebrae': ['verberate', 'vertebrae'], + 'vertebrocostal': ['costovertebral', 'vertebrocostal'], + 'vertebrosacral': ['sacrovertebral', 'vertebrosacral'], + 'vertebrosternal': ['sternovertebral', 'vertebrosternal'], + 'vertiginate': ['integrative', 'vertiginate', 'vinaigrette'], + 'vesicant': ['cistvaen', 'vesicant'], + 'vesicoabdominal': ['abdominovesical', 'vesicoabdominal'], + 'vesicocervical': ['cervicovesical', 'vesicocervical'], + 'vesicointestinal': ['intestinovesical', 'vesicointestinal'], + 'vesicorectal': ['rectovesical', 'vesicorectal'], + 'vesicovaginal': ['vaginovesical', 'vesicovaginal'], + 'vespa': ['spave', 'vespa'], + 'vespertine': ['presentive', 'pretensive', 'vespertine'], + 'vespine': ['pensive', 'vespine'], + 'vesta': ['stave', 'vesta'], + 'vestalia': ['salivate', 'vestalia'], + 'vestee': ['steeve', 'vestee'], + 'vester': ['revest', 'servet', 'sterve', 'verset', 'vester'], + 'vestibula': ['sublative', 'vestibula'], + 'veta': ['tave', 'veta'], + 'vetanda': ['vedanta', 'vetanda'], + 'veteran': ['nervate', 'veteran'], + 'veto': ['veto', 'voet', 'vote'], + 'vetoer': ['revote', 'vetoer'], + 'via': ['iva', 'vai', 'via'], + 'vial': ['vail', 'vali', 'vial', 'vila'], + 'vialful': ['fluavil', 'fluvial', 'vialful'], + 'viand': ['divan', 'viand'], + 'viander': ['invader', 'ravined', 'viander'], + 'viatic': ['avitic', 'viatic'], + 'viatica': ['aviatic', 'viatica'], + 'vibrate': ['vibrate', 'vrbaite'], + 'vicar': ['vicar', 'vraic'], + 'vice': ['cive', 'vice'], + 'vicegeral': ['vicegeral', 'viceregal'], + 'viceregal': ['vicegeral', 'viceregal'], + 'victoriate': ['recitativo', 'victoriate'], + 'victrola': ['victrola', 'vortical'], + 'victualer': ['lucrative', 'revictual', 'victualer'], + 'vidonia': ['ovidian', 'vidonia'], + 'viduinae': ['induviae', 'viduinae'], + 'viduine': ['univied', 'viduine'], + 'vie': ['vei', 'vie'], + 'vienna': ['avenin', 'vienna'], + 'vier': ['rive', 'veri', 'vier', 'vire'], + 'vierling': ['reviling', 'vierling'], + 'view': ['view', 'wive'], + 'viewer': ['review', 'viewer'], + 'vigilante': ['genitival', 'vigilante'], + 'vigor': ['vigor', 'virgo'], + 'vila': ['vail', 'vali', 'vial', 'vila'], + 'vile': ['evil', 'levi', 'live', 'veil', 'vile', 'vlei'], + 'vilehearted': ['evilhearted', 'vilehearted'], + 'vilely': ['evilly', 'lively', 'vilely'], + 'vileness': ['evilness', 'liveness', 'veinless', 'vileness', 'vineless'], + 'villadom': ['vallidom', 'villadom'], + 'villeiness': ['liveliness', 'villeiness'], + 'villous': ['ovillus', 'villous'], + 'vimana': ['maniva', 'vimana'], + 'vina': ['ivan', 'vain', 'vina'], + 'vinaigrette': ['integrative', 'vertiginate', 'vinaigrette'], + 'vinaigrous': ['vinaigrous', 'viraginous'], + 'vinal': ['alvin', 'anvil', 'nival', 'vinal'], + 'vinalia': ['lavinia', 'vinalia'], + 'vinata': ['avanti', 'vinata'], + 'vinculate': ['vinculate', 'vulcanite'], + 'vine': ['vein', 'vine'], + 'vinea': ['avine', 'naive', 'vinea'], + 'vineal': ['alevin', 'alvine', 'valine', 'veinal', 'venial', 'vineal'], + 'vineatic': ['antivice', 'inactive', 'vineatic'], + 'vinegarist': ['gainstrive', 'vinegarist'], + 'vineless': ['evilness', 'liveness', 'veinless', 'vileness', 'vineless'], + 'vinelet': ['veinlet', 'vinelet'], + 'viner': ['riven', 'viner'], + 'vinewise': ['veinwise', 'vinewise'], + 'vinosity': ['nivosity', 'vinosity'], + 'vintener': ['inventer', 'reinvent', 'ventrine', 'vintener'], + 'vintneress': ['inventress', 'vintneress'], + 'viola': ['oliva', 'viola'], + 'violability': ['obliviality', 'violability'], + 'violaceous': ['olivaceous', 'violaceous'], + 'violanin': ['livonian', 'violanin'], + 'violational': ['avolitional', 'violational'], + 'violer': ['oliver', 'violer', 'virole'], + 'violescent': ['olivescent', 'violescent'], + 'violet': ['olivet', 'violet'], + 'violette': ['olivette', 'violette'], + 'violine': ['olivine', 'violine'], + 'vipera': ['pavier', 'vipera'], + 'viperess': ['pressive', 'viperess'], + 'viperian': ['viperian', 'viperina'], + 'viperina': ['viperian', 'viperina'], + 'viperous': ['pervious', 'previous', 'viperous'], + 'viperously': ['perviously', 'previously', 'viperously'], + 'viperousness': ['perviousness', 'previousness', 'viperousness'], + 'vira': ['ravi', 'riva', 'vair', 'vari', 'vira'], + 'viraginian': ['irvingiana', 'viraginian'], + 'viraginous': ['vinaigrous', 'viraginous'], + 'viral': ['rival', 'viral'], + 'virales': ['revisal', 'virales'], + 'vire': ['rive', 'veri', 'vier', 'vire'], + 'virent': ['invert', 'virent'], + 'virgate': ['virgate', 'vitrage'], + 'virgin': ['irving', 'riving', 'virgin'], + 'virginly': ['rivingly', 'virginly'], + 'virgo': ['vigor', 'virgo'], + 'virile': ['livier', 'virile'], + 'virole': ['oliver', 'violer', 'virole'], + 'virose': ['rivose', 'virose'], + 'virtual': ['virtual', 'vitular'], + 'virtuose': ['virtuose', 'vitreous'], + 'virulence': ['cervuline', 'virulence'], + 'visa': ['avis', 'siva', 'visa'], + 'viscera': ['varices', 'viscera'], + 'visceration': ['insectivora', 'visceration'], + 'visceroparietal': ['parietovisceral', 'visceroparietal'], + 'visceropleural': ['pleurovisceral', 'visceropleural'], + 'viscometer': ['semivector', 'viscometer'], + 'viscontal': ['viscontal', 'volcanist'], + 'vishal': ['lavish', 'vishal'], + 'visioner': ['revision', 'visioner'], + 'visit': ['visit', 'vitis'], + 'visitant': ['nativist', 'visitant'], + 'visitee': ['evisite', 'visitee'], + 'visiter': ['revisit', 'visiter'], + 'visitor': ['ivorist', 'visitor'], + 'vistal': ['vistal', 'vitals'], + 'visto': ['ovist', 'visto'], + 'vitals': ['vistal', 'vitals'], + 'vitis': ['visit', 'vitis'], + 'vitochemical': ['chemicovital', 'vitochemical'], + 'vitrage': ['virgate', 'vitrage'], + 'vitrail': ['trivial', 'vitrail'], + 'vitrailist': ['trivialist', 'vitrailist'], + 'vitrain': ['vitrain', 'vitrina'], + 'vitrean': ['avertin', 'vitrean'], + 'vitreous': ['virtuose', 'vitreous'], + 'vitrina': ['vitrain', 'vitrina'], + 'vitrine': ['inviter', 'vitrine'], + 'vitrophyric': ['thyroprivic', 'vitrophyric'], + 'vitular': ['virtual', 'vitular'], + 'vituperate': ['reputative', 'vituperate'], + 'vlei': ['evil', 'levi', 'live', 'veil', 'vile', 'vlei'], + 'vocaller': ['overcall', 'vocaller'], + 'vocate': ['avocet', 'octave', 'vocate'], + 'voet': ['veto', 'voet', 'vote'], + 'voeten': ['voeten', 'voteen'], + 'vogue': ['vogue', 'vouge'], + 'voided': ['devoid', 'voided'], + 'voider': ['devoir', 'voider'], + 'voidless': ['dissolve', 'voidless'], + 'voile': ['olive', 'ovile', 'voile'], + 'volable': ['lovable', 'volable'], + 'volage': ['lovage', 'volage'], + 'volar': ['valor', 'volar'], + 'volata': ['tavola', 'volata'], + 'volatic': ['volatic', 'voltaic'], + 'volcae': ['alcove', 'coeval', 'volcae'], + 'volcanist': ['viscontal', 'volcanist'], + 'vole': ['levo', 'love', 'velo', 'vole'], + 'volery': ['overly', 'volery'], + 'volitate': ['volitate', 'voltaite'], + 'volley': ['lovely', 'volley'], + 'volscian': ['slavonic', 'volscian'], + 'volta': ['volta', 'votal'], + 'voltaic': ['volatic', 'voltaic'], + 'voltaite': ['volitate', 'voltaite'], + 'volucrine': ['involucre', 'volucrine'], + 'volunteerism': ['multinervose', 'volunteerism'], + 'vomer': ['mover', 'vomer'], + 'vomiter': ['revomit', 'vomiter'], + 'vonsenite': ['veinstone', 'vonsenite'], + 'vortical': ['victrola', 'vortical'], + 'votal': ['volta', 'votal'], + 'votary': ['travoy', 'votary'], + 'vote': ['veto', 'voet', 'vote'], + 'voteen': ['voeten', 'voteen'], + 'voter': ['overt', 'rovet', 'torve', 'trove', 'voter'], + 'vouge': ['vogue', 'vouge'], + 'vouli': ['uviol', 'vouli'], + 'vowed': ['devow', 'vowed'], + 'vowel': ['vowel', 'wolve'], + 'vraic': ['vicar', 'vraic'], + 'vrbaite': ['vibrate', 'vrbaite'], + 'vulcanite': ['vinculate', 'vulcanite'], + 'vulnerose': ['nervulose', 'unresolve', 'vulnerose'], + 'vulpic': ['pulvic', 'vulpic'], + 'vulpine': ['pluvine', 'vulpine'], + 'wa': ['aw', 'wa'], + 'waag': ['awag', 'waag'], + 'waasi': ['isawa', 'waasi'], + 'wab': ['baw', 'wab'], + 'wabi': ['biwa', 'wabi'], + 'wabster': ['bestraw', 'wabster'], + 'wac': ['caw', 'wac'], + 'wachna': ['chawan', 'chwana', 'wachna'], + 'wack': ['cawk', 'wack'], + 'wacker': ['awreck', 'wacker'], + 'wacky': ['cawky', 'wacky'], + 'wad': ['awd', 'daw', 'wad'], + 'wadder': ['edward', 'wadder', 'warded'], + 'waddler': ['dawdler', 'waddler'], + 'waddling': ['dawdling', 'waddling'], + 'waddlingly': ['dawdlingly', 'waddlingly'], + 'waddy': ['dawdy', 'waddy'], + 'wadna': ['adawn', 'wadna'], + 'wadset': ['wadset', 'wasted'], + 'wae': ['awe', 'wae', 'wea'], + 'waeg': ['waeg', 'wage', 'wega'], + 'waer': ['waer', 'ware', 'wear'], + 'waesome': ['awesome', 'waesome'], + 'wag': ['gaw', 'wag'], + 'wage': ['waeg', 'wage', 'wega'], + 'wagerer': ['rewager', 'wagerer'], + 'wages': ['swage', 'wages'], + 'waggel': ['waggel', 'waggle'], + 'waggle': ['waggel', 'waggle'], + 'wagnerite': ['wagnerite', 'winterage'], + 'wagon': ['gowan', 'wagon', 'wonga'], + 'wah': ['haw', 'hwa', 'wah', 'wha'], + 'wahehe': ['heehaw', 'wahehe'], + 'wail': ['wail', 'wali'], + 'wailer': ['lawrie', 'wailer'], + 'wain': ['awin', 'wain'], + 'wainer': ['newari', 'wainer'], + 'wairsh': ['rawish', 'wairsh', 'warish'], + 'waist': ['swati', 'waist'], + 'waister': ['swertia', 'waister'], + 'waiterage': ['garewaite', 'waiterage'], + 'waitress': ['starwise', 'waitress'], + 'waiwai': ['iwaiwa', 'waiwai'], + 'wake': ['wake', 'weak', 'weka'], + 'wakener': ['rewaken', 'wakener'], + 'waker': ['waker', 'wreak'], + 'wakes': ['askew', 'wakes'], + 'wale': ['wale', 'weal'], + 'waled': ['dwale', 'waled', 'weald'], + 'waler': ['lerwa', 'waler'], + 'wali': ['wail', 'wali'], + 'waling': ['lawing', 'waling'], + 'walk': ['lawk', 'walk'], + 'walkout': ['outwalk', 'walkout'], + 'walkover': ['overwalk', 'walkover'], + 'walkside': ['sidewalk', 'walkside'], + 'waller': ['rewall', 'waller'], + 'wallet': ['wallet', 'wellat'], + 'wallhick': ['hickwall', 'wallhick'], + 'walloper': ['preallow', 'walloper'], + 'wallower': ['rewallow', 'wallower'], + 'walsh': ['shawl', 'walsh'], + 'walt': ['twal', 'walt'], + 'walter': ['lawter', 'walter'], + 'wame': ['wame', 'weam'], + 'wamp': ['mawp', 'wamp'], + 'wan': ['awn', 'naw', 'wan'], + 'wand': ['dawn', 'wand'], + 'wander': ['andrew', 'redawn', 'wander', 'warden'], + 'wandle': ['delawn', 'lawned', 'wandle'], + 'wandlike': ['dawnlike', 'wandlike'], + 'wandy': ['dawny', 'wandy'], + 'wane': ['anew', 'wane', 'wean'], + 'waned': ['awned', 'dewan', 'waned'], + 'wang': ['gawn', 'gnaw', 'wang'], + 'wanghee': ['wanghee', 'whangee'], + 'wangler': ['wangler', 'wrangle'], + 'waning': ['awning', 'waning'], + 'wankle': ['knawel', 'wankle'], + 'wanly': ['lawny', 'wanly'], + 'want': ['nawt', 'tawn', 'want'], + 'wanty': ['tawny', 'wanty'], + 'wany': ['awny', 'wany', 'yawn'], + 'wap': ['paw', 'wap'], + 'war': ['raw', 'war'], + 'warble': ['bawler', 'brelaw', 'rebawl', 'warble'], + 'warbler': ['brawler', 'warbler'], + 'warbling': ['brawling', 'warbling'], + 'warblingly': ['brawlingly', 'warblingly'], + 'warbly': ['brawly', 'byrlaw', 'warbly'], + 'ward': ['draw', 'ward'], + 'wardable': ['drawable', 'wardable'], + 'warded': ['edward', 'wadder', 'warded'], + 'warden': ['andrew', 'redawn', 'wander', 'warden'], + 'warder': ['drawer', 'redraw', 'reward', 'warder'], + 'warderer': ['redrawer', 'rewarder', 'warderer'], + 'warding': ['drawing', 'ginward', 'warding'], + 'wardman': ['manward', 'wardman'], + 'wardmote': ['damewort', 'wardmote'], + 'wardrobe': ['drawbore', 'wardrobe'], + 'wardroom': ['roomward', 'wardroom'], + 'wardship': ['shipward', 'wardship'], + 'wardsman': ['manwards', 'wardsman'], + 'ware': ['waer', 'ware', 'wear'], + 'warehouse': ['housewear', 'warehouse'], + 'warf': ['warf', 'wraf'], + 'warish': ['rawish', 'wairsh', 'warish'], + 'warlock': ['lacwork', 'warlock'], + 'warmed': ['meward', 'warmed'], + 'warmer': ['rewarm', 'warmer'], + 'warmhouse': ['housewarm', 'warmhouse'], + 'warmish': ['warmish', 'wishram'], + 'warn': ['warn', 'wran'], + 'warnel': ['lawner', 'warnel'], + 'warner': ['rewarn', 'warner', 'warren'], + 'warp': ['warp', 'wrap'], + 'warper': ['prewar', 'rewrap', 'warper'], + 'warree': ['rewear', 'warree', 'wearer'], + 'warren': ['rewarn', 'warner', 'warren'], + 'warri': ['warri', 'wirra'], + 'warse': ['resaw', 'sawer', 'seraw', 'sware', 'swear', 'warse'], + 'warsel': ['swaler', 'warsel', 'warsle'], + 'warsle': ['swaler', 'warsel', 'warsle'], + 'warst': ['straw', 'swart', 'warst'], + 'warth': ['thraw', 'warth', 'whart', 'wrath'], + 'warua': ['warua', 'waura'], + 'warve': ['warve', 'waver'], + 'wary': ['awry', 'wary'], + 'was': ['saw', 'swa', 'was'], + 'wasel': ['swale', 'sweal', 'wasel'], + 'wash': ['shaw', 'wash'], + 'washen': ['washen', 'whenas'], + 'washer': ['hawser', 'rewash', 'washer'], + 'washington': ['nowanights', 'washington'], + 'washland': ['landwash', 'washland'], + 'washoan': ['shawano', 'washoan'], + 'washout': ['outwash', 'washout'], + 'washy': ['shawy', 'washy'], + 'wasnt': ['stawn', 'wasnt'], + 'wasp': ['swap', 'wasp'], + 'waspily': ['slipway', 'waspily'], + 'wast': ['sawt', 'staw', 'swat', 'taws', 'twas', 'wast'], + 'waste': ['awest', 'sweat', 'tawse', 'waste'], + 'wasted': ['wadset', 'wasted'], + 'wasteful': ['sweatful', 'wasteful'], + 'wasteless': ['sweatless', 'wasteless'], + 'wasteproof': ['sweatproof', 'wasteproof'], + 'wastrel': ['wastrel', 'wrastle'], + 'wat': ['taw', 'twa', 'wat'], + 'watchdog': ['dogwatch', 'watchdog'], + 'watchout': ['outwatch', 'watchout'], + 'water': ['tawer', 'water', 'wreat'], + 'waterbrain': ['brainwater', 'waterbrain'], + 'watered': ['dewater', 'tarweed', 'watered'], + 'waterer': ['rewater', 'waterer'], + 'waterflood': ['floodwater', 'toadflower', 'waterflood'], + 'waterhead': ['headwater', 'waterhead'], + 'wateriness': ['earwitness', 'wateriness'], + 'waterlog': ['galewort', 'waterlog'], + 'watershed': ['drawsheet', 'watershed'], + 'watery': ['tawery', 'watery'], + 'wath': ['thaw', 'wath', 'what'], + 'watt': ['twat', 'watt'], + 'wauf': ['awfu', 'wauf'], + 'wauner': ['unware', 'wauner'], + 'waura': ['warua', 'waura'], + 'waver': ['warve', 'waver'], + 'waxer': ['rewax', 'waxer'], + 'way': ['way', 'yaw'], + 'wayback': ['backway', 'wayback'], + 'waygang': ['gangway', 'waygang'], + 'waygate': ['gateway', 'getaway', 'waygate'], + 'wayman': ['manway', 'wayman'], + 'ways': ['sway', 'ways', 'yaws'], + 'wayside': ['sideway', 'wayside'], + 'wea': ['awe', 'wae', 'wea'], + 'weak': ['wake', 'weak', 'weka'], + 'weakener': ['reweaken', 'weakener'], + 'weakliness': ['weakliness', 'weaselskin'], + 'weal': ['wale', 'weal'], + 'weald': ['dwale', 'waled', 'weald'], + 'weam': ['wame', 'weam'], + 'wean': ['anew', 'wane', 'wean'], + 'weanel': ['leewan', 'weanel'], + 'wear': ['waer', 'ware', 'wear'], + 'wearer': ['rewear', 'warree', 'wearer'], + 'weasand': ['sandawe', 'weasand'], + 'weaselskin': ['weakliness', 'weaselskin'], + 'weather': ['weather', 'whereat', 'wreathe'], + 'weathered': ['heartweed', 'weathered'], + 'weaver': ['rewave', 'weaver'], + 'webster': ['bestrew', 'webster'], + 'wed': ['dew', 'wed'], + 'wede': ['wede', 'weed'], + 'wedge': ['gweed', 'wedge'], + 'wedger': ['edgrew', 'wedger'], + 'wedset': ['stewed', 'wedset'], + 'wee': ['ewe', 'wee'], + 'weed': ['wede', 'weed'], + 'weedhook': ['hookweed', 'weedhook'], + 'weedy': ['dewey', 'weedy'], + 'ween': ['ween', 'wene'], + 'weeps': ['sweep', 'weeps'], + 'weet': ['twee', 'weet'], + 'wega': ['waeg', 'wage', 'wega'], + 'wegotism': ['twigsome', 'wegotism'], + 'weigher': ['reweigh', 'weigher'], + 'weir': ['weir', 'weri', 'wire'], + 'weirangle': ['weirangle', 'wierangle'], + 'weird': ['weird', 'wired', 'wride', 'wried'], + 'weka': ['wake', 'weak', 'weka'], + 'weld': ['lewd', 'weld'], + 'weldable': ['ballweed', 'weldable'], + 'welder': ['reweld', 'welder'], + 'weldor': ['lowder', 'weldor', 'wordle'], + 'welf': ['flew', 'welf'], + 'welkin': ['welkin', 'winkel', 'winkle'], + 'well': ['llew', 'well'], + 'wellat': ['wallet', 'wellat'], + 'wels': ['slew', 'wels'], + 'welshry': ['shrewly', 'welshry'], + 'welting': ['twingle', 'welting', 'winglet'], + 'wem': ['mew', 'wem'], + 'wen': ['new', 'wen'], + 'wende': ['endew', 'wende'], + 'wendi': ['dwine', 'edwin', 'wendi', 'widen', 'wined'], + 'wene': ['ween', 'wene'], + 'went': ['newt', 'went'], + 'were': ['ewer', 'were'], + 'weri': ['weir', 'weri', 'wire'], + 'werther': ['werther', 'wherret'], + 'wes': ['sew', 'wes'], + 'weskit': ['weskit', 'wisket'], + 'west': ['stew', 'west'], + 'weste': ['sweet', 'weste'], + 'westering': ['swingtree', 'westering'], + 'westy': ['stewy', 'westy'], + 'wet': ['tew', 'wet'], + 'weta': ['tewa', 'twae', 'weta'], + 'wetly': ['tewly', 'wetly'], + 'wey': ['wey', 'wye', 'yew'], + 'wha': ['haw', 'hwa', 'wah', 'wha'], + 'whack': ['chawk', 'whack'], + 'whale': ['whale', 'wheal'], + 'wham': ['hawm', 'wham'], + 'whame': ['whame', 'wheam'], + 'whangee': ['wanghee', 'whangee'], + 'whare': ['hawer', 'whare'], + 'whart': ['thraw', 'warth', 'whart', 'wrath'], + 'whase': ['hawse', 'shewa', 'whase'], + 'what': ['thaw', 'wath', 'what'], + 'whatreck': ['thwacker', 'whatreck'], + 'whats': ['swath', 'whats'], + 'wheal': ['whale', 'wheal'], + 'wheam': ['whame', 'wheam'], + 'wheat': ['awhet', 'wheat'], + 'wheatear': ['aweather', 'wheatear'], + 'wheedle': ['wheedle', 'wheeled'], + 'wheel': ['hewel', 'wheel'], + 'wheeled': ['wheedle', 'wheeled'], + 'wheelroad': ['rowelhead', 'wheelroad'], + 'wheer': ['hewer', 'wheer', 'where'], + 'whein': ['whein', 'whine'], + 'when': ['hewn', 'when'], + 'whenas': ['washen', 'whenas'], + 'whenso': ['whenso', 'whosen'], + 'where': ['hewer', 'wheer', 'where'], + 'whereat': ['weather', 'whereat', 'wreathe'], + 'whereon': ['nowhere', 'whereon'], + 'wherret': ['werther', 'wherret'], + 'wherrit': ['wherrit', 'whirret', 'writher'], + 'whet': ['hewt', 'thew', 'whet'], + 'whichever': ['everwhich', 'whichever'], + 'whicken': ['chewink', 'whicken'], + 'whilter': ['whilter', 'whirtle'], + 'whine': ['whein', 'whine'], + 'whipper': ['prewhip', 'whipper'], + 'whirler': ['rewhirl', 'whirler'], + 'whirret': ['wherrit', 'whirret', 'writher'], + 'whirtle': ['whilter', 'whirtle'], + 'whisperer': ['rewhisper', 'whisperer'], + 'whisson': ['snowish', 'whisson'], + 'whist': ['swith', 'whist', 'whits', 'wisht'], + 'whister': ['swither', 'whister', 'withers'], + 'whit': ['whit', 'with'], + 'white': ['white', 'withe'], + 'whiten': ['whiten', 'withen'], + 'whitener': ['rewhiten', 'whitener'], + 'whitepot': ['whitepot', 'whitetop'], + 'whites': ['swithe', 'whites'], + 'whitetop': ['whitepot', 'whitetop'], + 'whitewood': ['whitewood', 'withewood'], + 'whitleather': ['therewithal', 'whitleather'], + 'whitmanese': ['anthemwise', 'whitmanese'], + 'whits': ['swith', 'whist', 'whits', 'wisht'], + 'whity': ['whity', 'withy'], + 'who': ['how', 'who'], + 'whoever': ['everwho', 'however', 'whoever'], + 'whole': ['howel', 'whole'], + 'whomsoever': ['howsomever', 'whomsoever', 'whosomever'], + 'whoreship': ['horsewhip', 'whoreship'], + 'whort': ['throw', 'whort', 'worth', 'wroth'], + 'whosen': ['whenso', 'whosen'], + 'whosomever': ['howsomever', 'whomsoever', 'whosomever'], + 'wicht': ['tchwi', 'wicht', 'witch'], + 'widdle': ['widdle', 'wilded'], + 'widely': ['dewily', 'widely', 'wieldy'], + 'widen': ['dwine', 'edwin', 'wendi', 'widen', 'wined'], + 'widener': ['rewiden', 'widener'], + 'wideness': ['dewiness', 'wideness'], + 'widgeon': ['gowdnie', 'widgeon'], + 'wieldy': ['dewily', 'widely', 'wieldy'], + 'wierangle': ['weirangle', 'wierangle'], + 'wigan': ['awing', 'wigan'], + 'wiggler': ['wiggler', 'wriggle'], + 'wilded': ['widdle', 'wilded'], + 'wildness': ['wildness', 'windless'], + 'winchester': ['trenchwise', 'winchester'], + 'windbreak': ['breakwind', 'windbreak'], + 'winder': ['rewind', 'winder'], + 'windgall': ['dingwall', 'windgall'], + 'windles': ['swindle', 'windles'], + 'windless': ['wildness', 'windless'], + 'windstorm': ['stormwind', 'windstorm'], + 'windup': ['upwind', 'windup'], + 'wined': ['dwine', 'edwin', 'wendi', 'widen', 'wined'], + 'winer': ['erwin', 'rewin', 'winer'], + 'winglet': ['twingle', 'welting', 'winglet'], + 'winkel': ['welkin', 'winkel', 'winkle'], + 'winkle': ['welkin', 'winkel', 'winkle'], + 'winklet': ['twinkle', 'winklet'], + 'winnard': ['indrawn', 'winnard'], + 'winnel': ['winnel', 'winnle'], + 'winnle': ['winnel', 'winnle'], + 'winsome': ['owenism', 'winsome'], + 'wint': ['twin', 'wint'], + 'winter': ['twiner', 'winter'], + 'winterage': ['wagnerite', 'winterage'], + 'wintered': ['interwed', 'wintered'], + 'winterish': ['interwish', 'winterish'], + 'winze': ['winze', 'wizen'], + 'wips': ['wips', 'wisp'], + 'wire': ['weir', 'weri', 'wire'], + 'wired': ['weird', 'wired', 'wride', 'wried'], + 'wirer': ['wirer', 'wrier'], + 'wirra': ['warri', 'wirra'], + 'wiselike': ['likewise', 'wiselike'], + 'wiseman': ['manwise', 'wiseman'], + 'wisen': ['sinew', 'swine', 'wisen'], + 'wiser': ['swire', 'wiser'], + 'wisewoman': ['wisewoman', 'womanwise'], + 'wisher': ['rewish', 'wisher'], + 'wishmay': ['wishmay', 'yahwism'], + 'wishram': ['warmish', 'wishram'], + 'wisht': ['swith', 'whist', 'whits', 'wisht'], + 'wisket': ['weskit', 'wisket'], + 'wisp': ['wips', 'wisp'], + 'wispy': ['swipy', 'wispy'], + 'wit': ['twi', 'wit'], + 'witan': ['atwin', 'twain', 'witan'], + 'witch': ['tchwi', 'wicht', 'witch'], + 'witchetty': ['twitchety', 'witchetty'], + 'with': ['whit', 'with'], + 'withdrawer': ['rewithdraw', 'withdrawer'], + 'withe': ['white', 'withe'], + 'withen': ['whiten', 'withen'], + 'wither': ['wither', 'writhe'], + 'withered': ['redwithe', 'withered'], + 'withering': ['withering', 'wrightine'], + 'withers': ['swither', 'whister', 'withers'], + 'withewood': ['whitewood', 'withewood'], + 'within': ['inwith', 'within'], + 'without': ['outwith', 'without'], + 'withy': ['whity', 'withy'], + 'wive': ['view', 'wive'], + 'wiver': ['wiver', 'wrive'], + 'wizen': ['winze', 'wizen'], + 'wo': ['ow', 'wo'], + 'woader': ['redowa', 'woader'], + 'wob': ['bow', 'wob'], + 'wod': ['dow', 'owd', 'wod'], + 'woe': ['owe', 'woe'], + 'woibe': ['bowie', 'woibe'], + 'wold': ['dowl', 'wold'], + 'wolf': ['flow', 'fowl', 'wolf'], + 'wolfer': ['flower', 'fowler', 'reflow', 'wolfer'], + 'wolter': ['rowlet', 'trowel', 'wolter'], + 'wolve': ['vowel', 'wolve'], + 'womanpost': ['postwoman', 'womanpost'], + 'womanwise': ['wisewoman', 'womanwise'], + 'won': ['now', 'own', 'won'], + 'wonder': ['downer', 'wonder', 'worden'], + 'wonderful': ['underflow', 'wonderful'], + 'wone': ['enow', 'owen', 'wone'], + 'wong': ['gown', 'wong'], + 'wonga': ['gowan', 'wagon', 'wonga'], + 'wonner': ['renown', 'wonner'], + 'wont': ['nowt', 'town', 'wont'], + 'wonted': ['towned', 'wonted'], + 'woodbark': ['bookward', 'woodbark'], + 'woodbind': ['bindwood', 'woodbind'], + 'woodbush': ['bushwood', 'woodbush'], + 'woodchat': ['chatwood', 'woodchat'], + 'wooden': ['enwood', 'wooden'], + 'woodfish': ['fishwood', 'woodfish'], + 'woodhack': ['hackwood', 'woodhack'], + 'woodhorse': ['horsewood', 'woodhorse'], + 'woodness': ['sowdones', 'woodness'], + 'woodpecker': ['peckerwood', 'woodpecker'], + 'woodrock': ['corkwood', 'rockwood', 'woodrock'], + 'woodsilver': ['silverwood', 'woodsilver'], + 'woodstone': ['stonewood', 'woodstone'], + 'woodworm': ['woodworm', 'wormwood'], + 'wooled': ['dewool', 'elwood', 'wooled'], + 'woons': ['swoon', 'woons'], + 'woosh': ['howso', 'woosh'], + 'wop': ['pow', 'wop'], + 'worble': ['blower', 'bowler', 'reblow', 'worble'], + 'word': ['drow', 'word'], + 'wordage': ['dowager', 'wordage'], + 'worden': ['downer', 'wonder', 'worden'], + 'worder': ['reword', 'worder'], + 'wordily': ['rowdily', 'wordily'], + 'wordiness': ['rowdiness', 'wordiness'], + 'wordle': ['lowder', 'weldor', 'wordle'], + 'wordsman': ['sandworm', 'swordman', 'wordsman'], + 'wordsmanship': ['swordmanship', 'wordsmanship'], + 'wordy': ['dowry', 'rowdy', 'wordy'], + 'wore': ['ower', 'wore'], + 'workbasket': ['basketwork', 'workbasket'], + 'workbench': ['benchwork', 'workbench'], + 'workbook': ['bookwork', 'workbook'], + 'workbox': ['boxwork', 'workbox'], + 'workday': ['daywork', 'workday'], + 'worker': ['rework', 'worker'], + 'workhand': ['handwork', 'workhand'], + 'workhouse': ['housework', 'workhouse'], + 'working': ['kingrow', 'working'], + 'workmaster': ['masterwork', 'workmaster'], + 'workout': ['outwork', 'workout'], + 'workpiece': ['piecework', 'workpiece'], + 'workship': ['shipwork', 'workship'], + 'workshop': ['shopwork', 'workshop'], + 'worktime': ['timework', 'worktime'], + 'wormed': ['deworm', 'wormed'], + 'wormer': ['merrow', 'wormer'], + 'wormroot': ['moorwort', 'rootworm', 'tomorrow', 'wormroot'], + 'wormship': ['shipworm', 'wormship'], + 'wormwood': ['woodworm', 'wormwood'], + 'worse': ['owser', 'resow', 'serow', 'sower', 'swore', 'worse'], + 'worset': ['restow', 'stower', 'towser', 'worset'], + 'worst': ['strow', 'worst'], + 'wort': ['trow', 'wort'], + 'worth': ['throw', 'whort', 'worth', 'wroth'], + 'worthful': ['worthful', 'wrothful'], + 'worthily': ['worthily', 'wrothily'], + 'worthiness': ['worthiness', 'wrothiness'], + 'worthy': ['worthy', 'wrothy'], + 'wot': ['tow', 'two', 'wot'], + 'wots': ['sowt', 'stow', 'swot', 'wots'], + 'wounder': ['rewound', 'unrowed', 'wounder'], + 'woy': ['woy', 'yow'], + 'wraf': ['warf', 'wraf'], + 'wrainbolt': ['browntail', 'wrainbolt'], + 'wraitly': ['wraitly', 'wrytail'], + 'wran': ['warn', 'wran'], + 'wrangle': ['wangler', 'wrangle'], + 'wrap': ['warp', 'wrap'], + 'wrapper': ['prewrap', 'wrapper'], + 'wrastle': ['wastrel', 'wrastle'], + 'wrath': ['thraw', 'warth', 'whart', 'wrath'], + 'wreak': ['waker', 'wreak'], + 'wreat': ['tawer', 'water', 'wreat'], + 'wreath': ['rethaw', 'thawer', 'wreath'], + 'wreathe': ['weather', 'whereat', 'wreathe'], + 'wrest': ['strew', 'trews', 'wrest'], + 'wrester': ['strewer', 'wrester'], + 'wrestle': ['swelter', 'wrestle'], + 'wride': ['weird', 'wired', 'wride', 'wried'], + 'wried': ['weird', 'wired', 'wride', 'wried'], + 'wrier': ['wirer', 'wrier'], + 'wriggle': ['wiggler', 'wriggle'], + 'wrightine': ['withering', 'wrightine'], + 'wrinklet': ['twinkler', 'wrinklet'], + 'write': ['twire', 'write'], + 'writhe': ['wither', 'writhe'], + 'writher': ['wherrit', 'whirret', 'writher'], + 'written': ['twinter', 'written'], + 'wrive': ['wiver', 'wrive'], + 'wro': ['row', 'wro'], + 'wroken': ['knower', 'reknow', 'wroken'], + 'wrong': ['grown', 'wrong'], + 'wrote': ['rowet', 'tower', 'wrote'], + 'wroth': ['throw', 'whort', 'worth', 'wroth'], + 'wrothful': ['worthful', 'wrothful'], + 'wrothily': ['worthily', 'wrothily'], + 'wrothiness': ['worthiness', 'wrothiness'], + 'wrothy': ['worthy', 'wrothy'], + 'wrytail': ['wraitly', 'wrytail'], + 'wunna': ['unwan', 'wunna'], + 'wyde': ['dewy', 'wyde'], + 'wye': ['wey', 'wye', 'yew'], + 'wype': ['pewy', 'wype'], + 'wyson': ['snowy', 'wyson'], + 'xanthein': ['xanthein', 'xanthine'], + 'xanthine': ['xanthein', 'xanthine'], + 'xanthopurpurin': ['purpuroxanthin', 'xanthopurpurin'], + 'xema': ['amex', 'exam', 'xema'], + 'xenia': ['axine', 'xenia'], + 'xenial': ['alexin', 'xenial'], + 'xenoparasite': ['exasperation', 'xenoparasite'], + 'xeres': ['resex', 'xeres'], + 'xerophytic': ['hypertoxic', 'xerophytic'], + 'xerotic': ['excitor', 'xerotic'], + 'xylic': ['cylix', 'xylic'], + 'xylitone': ['xylitone', 'xylonite'], + 'xylonite': ['xylitone', 'xylonite'], + 'xylophone': ['oxyphenol', 'xylophone'], + 'xylose': ['lyxose', 'xylose'], + 'xyst': ['styx', 'xyst'], + 'xyster': ['sextry', 'xyster'], + 'xysti': ['sixty', 'xysti'], + 'ya': ['ay', 'ya'], + 'yaba': ['baya', 'yaba'], + 'yabber': ['babery', 'yabber'], + 'yacht': ['cathy', 'cyath', 'yacht'], + 'yachtist': ['chastity', 'yachtist'], + 'yad': ['ady', 'day', 'yad'], + 'yaff': ['affy', 'yaff'], + 'yagnob': ['boyang', 'yagnob'], + 'yah': ['hay', 'yah'], + 'yahwism': ['wishmay', 'yahwism'], + 'yair': ['airy', 'yair'], + 'yaird': ['dairy', 'diary', 'yaird'], + 'yak': ['kay', 'yak'], + 'yakan': ['kayan', 'yakan'], + 'yakima': ['kamiya', 'yakima'], + 'yakka': ['kayak', 'yakka'], + 'yalb': ['ably', 'blay', 'yalb'], + 'yali': ['ilya', 'yali'], + 'yalla': ['allay', 'yalla'], + 'yallaer': ['allayer', 'yallaer'], + 'yam': ['amy', 'may', 'mya', 'yam'], + 'yamel': ['mealy', 'yamel'], + 'yamen': ['maney', 'yamen'], + 'yamilke': ['maylike', 'yamilke'], + 'yamph': ['phyma', 'yamph'], + 'yan': ['any', 'nay', 'yan'], + 'yana': ['anay', 'yana'], + 'yander': ['denary', 'yander'], + 'yap': ['pay', 'pya', 'yap'], + 'yapness': ['synapse', 'yapness'], + 'yapper': ['papery', 'prepay', 'yapper'], + 'yapster': ['atrepsy', 'yapster'], + 'yar': ['ary', 'ray', 'yar'], + 'yarb': ['bray', 'yarb'], + 'yard': ['adry', 'dray', 'yard'], + 'yardage': ['drayage', 'yardage'], + 'yarder': ['dreary', 'yarder'], + 'yardman': ['drayman', 'yardman'], + 'yare': ['aery', 'eyra', 'yare', 'year'], + 'yark': ['kyar', 'yark'], + 'yarl': ['aryl', 'lyra', 'ryal', 'yarl'], + 'yarm': ['army', 'mary', 'myra', 'yarm'], + 'yarn': ['nary', 'yarn'], + 'yarr': ['arry', 'yarr'], + 'yarrow': ['arrowy', 'yarrow'], + 'yaruran': ['unarray', 'yaruran'], + 'yas': ['say', 'yas'], + 'yasht': ['hasty', 'yasht'], + 'yat': ['tay', 'yat'], + 'yate': ['yate', 'yeat', 'yeta'], + 'yatter': ['attery', 'treaty', 'yatter'], + 'yaw': ['way', 'yaw'], + 'yawler': ['lawyer', 'yawler'], + 'yawn': ['awny', 'wany', 'yawn'], + 'yaws': ['sway', 'ways', 'yaws'], + 'ye': ['ey', 'ye'], + 'yea': ['aye', 'yea'], + 'yeah': ['ahey', 'eyah', 'yeah'], + 'year': ['aery', 'eyra', 'yare', 'year'], + 'yeard': ['deary', 'deray', 'rayed', 'ready', 'yeard'], + 'yearly': ['layery', 'yearly'], + 'yearn': ['enray', 'yearn'], + 'yearth': ['earthy', 'hearty', 'yearth'], + 'yeast': ['teasy', 'yeast'], + 'yeat': ['yate', 'yeat', 'yeta'], + 'yeather': ['erythea', 'hetaery', 'yeather'], + 'yed': ['dey', 'dye', 'yed'], + 'yede': ['eyed', 'yede'], + 'yee': ['eye', 'yee'], + 'yeel': ['eely', 'yeel'], + 'yees': ['yees', 'yese'], + 'yegg': ['eggy', 'yegg'], + 'yelk': ['kyle', 'yelk'], + 'yelm': ['elmy', 'yelm'], + 'yelmer': ['merely', 'yelmer'], + 'yelper': ['peerly', 'yelper'], + 'yemen': ['enemy', 'yemen'], + 'yemeni': ['menyie', 'yemeni'], + 'yen': ['eyn', 'nye', 'yen'], + 'yender': ['redeny', 'yender'], + 'yeo': ['yeo', 'yoe'], + 'yeorling': ['legionry', 'yeorling'], + 'yer': ['rye', 'yer'], + 'yerb': ['brey', 'byre', 'yerb'], + 'yerba': ['barye', 'beray', 'yerba'], + 'yerd': ['dyer', 'yerd'], + 'yere': ['eyer', 'eyre', 'yere'], + 'yern': ['ryen', 'yern'], + 'yes': ['sey', 'sye', 'yes'], + 'yese': ['yees', 'yese'], + 'yest': ['stey', 'yest'], + 'yester': ['reesty', 'yester'], + 'yestern': ['streyne', 'styrene', 'yestern'], + 'yet': ['tye', 'yet'], + 'yeta': ['yate', 'yeat', 'yeta'], + 'yeth': ['they', 'yeth'], + 'yether': ['theyre', 'yether'], + 'yetlin': ['lenity', 'yetlin'], + 'yew': ['wey', 'wye', 'yew'], + 'yielden': ['needily', 'yielden'], + 'yielder': ['reedily', 'reyield', 'yielder'], + 'yildun': ['unidly', 'yildun'], + 'yill': ['illy', 'lily', 'yill'], + 'yirm': ['miry', 'rimy', 'yirm'], + 'ym': ['my', 'ym'], + 'yock': ['coky', 'yock'], + 'yodel': ['doyle', 'yodel'], + 'yoe': ['yeo', 'yoe'], + 'yoghurt': ['troughy', 'yoghurt'], + 'yogin': ['goyin', 'yogin'], + 'yoi': ['iyo', 'yoi'], + 'yoker': ['rokey', 'yoker'], + 'yolk': ['kylo', 'yolk'], + 'yom': ['moy', 'yom'], + 'yomud': ['moudy', 'yomud'], + 'yon': ['noy', 'yon'], + 'yond': ['ondy', 'yond'], + 'yonder': ['rodney', 'yonder'], + 'yont': ['tony', 'yont'], + 'yor': ['ory', 'roy', 'yor'], + 'yore': ['oyer', 'roey', 'yore'], + 'york': ['kory', 'roky', 'york'], + 'yot': ['toy', 'yot'], + 'yote': ['eyot', 'yote'], + 'youngun': ['unyoung', 'youngun'], + 'yours': ['soury', 'yours'], + 'yoursel': ['elusory', 'yoursel'], + 'yoven': ['envoy', 'nevoy', 'yoven'], + 'yow': ['woy', 'yow'], + 'yowl': ['lowy', 'owly', 'yowl'], + 'yowler': ['lowery', 'owlery', 'rowley', 'yowler'], + 'yowt': ['towy', 'yowt'], + 'yox': ['oxy', 'yox'], + 'yttrious': ['touristy', 'yttrious'], + 'yuca': ['cuya', 'yuca'], + 'yuckel': ['yuckel', 'yuckle'], + 'yuckle': ['yuckel', 'yuckle'], + 'yulan': ['unlay', 'yulan'], + 'yurok': ['rouky', 'yurok'], + 'zabian': ['banzai', 'zabian'], + 'zabra': ['braza', 'zabra'], + 'zacate': ['azteca', 'zacate'], + 'zad': ['adz', 'zad'], + 'zag': ['gaz', 'zag'], + 'zain': ['nazi', 'zain'], + 'zaman': ['namaz', 'zaman'], + 'zamenis': ['sizeman', 'zamenis'], + 'zaparoan': ['parazoan', 'zaparoan'], + 'zaratite': ['tatarize', 'zaratite'], + 'zati': ['itza', 'tiza', 'zati'], + 'zeal': ['laze', 'zeal'], + 'zealotism': ['solmizate', 'zealotism'], + 'zebra': ['braze', 'zebra'], + 'zein': ['inez', 'zein'], + 'zelanian': ['annalize', 'zelanian'], + 'zelatrice': ['cartelize', 'zelatrice'], + 'zemmi': ['zemmi', 'zimme'], + 'zendic': ['dezinc', 'zendic'], + 'zenick': ['zenick', 'zincke'], + 'zenu': ['unze', 'zenu'], + 'zequin': ['quinze', 'zequin'], + 'zerda': ['adzer', 'zerda'], + 'zerma': ['mazer', 'zerma'], + 'ziarat': ['atazir', 'ziarat'], + 'zibet': ['bizet', 'zibet'], + 'ziega': ['gaize', 'ziega'], + 'zimme': ['zemmi', 'zimme'], + 'zincite': ['citizen', 'zincite'], + 'zincke': ['zenick', 'zincke'], + 'zinco': ['zinco', 'zonic'], + 'zion': ['nozi', 'zion'], + 'zira': ['izar', 'zira'], + 'zirconate': ['narcotize', 'zirconate'], + 'zoa': ['azo', 'zoa'], + 'zoanthidae': ['zoanthidae', 'zoanthidea'], + 'zoanthidea': ['zoanthidae', 'zoanthidea'], + 'zoarite': ['azorite', 'zoarite'], + 'zobo': ['bozo', 'zobo'], + 'zoeal': ['azole', 'zoeal'], + 'zogan': ['gazon', 'zogan'], + 'zolotink': ['zolotink', 'zolotnik'], + 'zolotnik': ['zolotink', 'zolotnik'], + 'zonaria': ['arizona', 'azorian', 'zonaria'], + 'zoned': ['dozen', 'zoned'], + 'zonic': ['zinco', 'zonic'], + 'zonotrichia': ['chorization', 'rhizoctonia', 'zonotrichia'], + 'zoonal': ['alonzo', 'zoonal'], + 'zoonic': ['ozonic', 'zoonic'], + 'zoonomic': ['monozoic', 'zoonomic'], + 'zoopathy': ['phytozoa', 'zoopathy', 'zoophyta'], + 'zoophilic': ['philozoic', 'zoophilic'], + 'zoophilist': ['philozoist', 'zoophilist'], + 'zoophyta': ['phytozoa', 'zoopathy', 'zoophyta'], + 'zoospermatic': ['spermatozoic', 'zoospermatic'], + 'zoosporic': ['sporozoic', 'zoosporic'], + 'zootype': ['ozotype', 'zootype'], + 'zyga': ['gazy', 'zyga'], + 'zygal': ['glazy', 'zygal']} \ No newline at end of file From ed4c92d98af4b96605a0463bc94143b9c771a7cd Mon Sep 17 00:00:00 2001 From: Rohan R Bharadwaj Date: Mon, 8 Nov 2021 23:48:30 +0530 Subject: [PATCH 0402/1543] [mypy] Type annotations for graphs directory (#5798) * Type annotations for `breadth_first_search.py` * Type annotations for `breadth_first_search_2.py` * Remove from excluded in mypy.ini * Add doctest.testmod() * Type annotations for `graphs/check_cycle.py` * Type annotations for `graphs/greedy_min_vertex_cover.py` * Remove from excluded in mypy.ini --- graphs/breadth_first_search.py | 2 +- graphs/breadth_first_search_2.py | 5 ++++- graphs/check_cycle.py | 6 ++---- graphs/greedy_min_vertex_cover.py | 9 ++++----- mypy.ini | 2 +- 5 files changed, 12 insertions(+), 12 deletions(-) diff --git a/graphs/breadth_first_search.py b/graphs/breadth_first_search.py index 9264f57b41b2..171d3875f3c5 100644 --- a/graphs/breadth_first_search.py +++ b/graphs/breadth_first_search.py @@ -53,7 +53,7 @@ def bfs(self, start_vertex: int) -> set[int]: visited = set() # create a first in first out queue to store all the vertices for BFS - queue = Queue() + queue: Queue = Queue() # mark the source node as visited and enqueue it visited.add(start_vertex) diff --git a/graphs/breadth_first_search_2.py b/graphs/breadth_first_search_2.py index 4c8b69faf656..2f060a90d40d 100644 --- a/graphs/breadth_first_search_2.py +++ b/graphs/breadth_first_search_2.py @@ -32,7 +32,7 @@ def breadth_first_search(graph: dict, start: str) -> set[str]: 'ABCDEF' """ explored = {start} - queue = Queue() + queue: Queue = Queue() queue.put(start) while not queue.empty(): v = queue.get() @@ -44,4 +44,7 @@ def breadth_first_search(graph: dict, start: str) -> set[str]: if __name__ == "__main__": + import doctest + + doctest.testmod() print(breadth_first_search(G, "A")) diff --git a/graphs/check_cycle.py b/graphs/check_cycle.py index 71d42b4689b7..dcc864988ca5 100644 --- a/graphs/check_cycle.py +++ b/graphs/check_cycle.py @@ -6,16 +6,15 @@ def check_cycle(graph: dict) -> bool: """ Returns True if graph is cyclic else False - >>> check_cycle(graph={0:[], 1:[0, 3], 2:[0, 4], 3:[5], 4:[5], 5:[]}) False >>> check_cycle(graph={0:[1, 2], 1:[2], 2:[0, 3], 3:[3]}) True """ # Keep track of visited nodes - visited = set() + visited: set[int] = set() # To detect a back edge, keep track of vertices currently in the recursion stack - rec_stk = set() + rec_stk: set[int] = set() for node in graph: if node not in visited: if depth_first_search(graph, node, visited, rec_stk): @@ -27,7 +26,6 @@ def depth_first_search(graph: dict, vertex: int, visited: set, rec_stk: set) -> """ Recur for all neighbours. If any neighbour is visited and in rec_stk then graph is cyclic. - >>> graph = {0:[], 1:[0, 3], 2:[0, 4], 3:[5], 4:[5], 5:[]} >>> vertex, visited, rec_stk = 0, set(), set() >>> depth_first_search(graph, vertex, visited, rec_stk) diff --git a/graphs/greedy_min_vertex_cover.py b/graphs/greedy_min_vertex_cover.py index 056c5b89bedf..cdef69141bd6 100644 --- a/graphs/greedy_min_vertex_cover.py +++ b/graphs/greedy_min_vertex_cover.py @@ -2,7 +2,6 @@ * Author: Manuel Di Lullo (https://github.com/manueldilullo) * Description: Approximization algorithm for minimum vertex cover problem. Greedy Approach. Uses graphs represented with an adjacency list - URL: https://mathworld.wolfram.com/MinimumVertexCover.html URL: https://cs.stackexchange.com/questions/129017/greedy-algorithm-for-vertex-cover """ @@ -10,7 +9,7 @@ import heapq -def greedy_min_vertex_cover(graph: dict) -> set: +def greedy_min_vertex_cover(graph: dict) -> set[int]: """ Greedy APX Algorithm for min Vertex Cover @input: graph (graph stored in an adjacency list where each vertex @@ -21,7 +20,7 @@ def greedy_min_vertex_cover(graph: dict) -> set: {0, 1, 2, 4} """ # queue used to store nodes and their rank - queue = [] + queue: list[list] = [] # for each node and his adjacency list add them and the rank of the node to queue # using heapq module the queue will be filled like a Priority Queue @@ -61,5 +60,5 @@ def greedy_min_vertex_cover(graph: dict) -> set: doctest.testmod() - # graph = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]} - # print(f"Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}") + graph = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]} + print(f"Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}") diff --git a/mypy.ini b/mypy.ini index 429c6804daf5..ce7c262ab059 100644 --- a/mypy.ini +++ b/mypy.ini @@ -2,4 +2,4 @@ ignore_missing_imports = True install_types = True non_interactive = True -exclude = (graphs/breadth_first_search.py|graphs/breadth_first_search_2.py|graphs/check_cycle.py|graphs/greedy_min_vertex_cover.py|matrix_operation.py|other/least_recently_used.py|other/lfu_cache.py|other/lru_cache.py|searches/simulated_annealing.py|searches/ternary_search.py) +exclude = (matrix_operation.py|other/least_recently_used.py|other/lfu_cache.py|other/lru_cache.py|searches/simulated_annealing.py|searches/ternary_search.py) From 0b8d6d70cea06eabc19b40a4e583efe62c2c0c2e Mon Sep 17 00:00:00 2001 From: Maxim Smolskiy Date: Tue, 9 Nov 2021 17:25:29 +0300 Subject: [PATCH 0403/1543] Add Project Euler problem 205 solution 1 (#5781) * updating DIRECTORY.md * Add solution * updating DIRECTORY.md * Fix * Fix Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- DIRECTORY.md | 2 + project_euler/problem_205/__init__.py | 0 project_euler/problem_205/sol1.py | 75 +++++++++++++++++++++++++++ 3 files changed, 77 insertions(+) create mode 100644 project_euler/problem_205/__init__.py create mode 100644 project_euler/problem_205/sol1.py diff --git a/DIRECTORY.md b/DIRECTORY.md index 228d95472a60..a2f229a9ed37 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -865,6 +865,8 @@ * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_191/sol1.py) * Problem 203 * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_203/sol1.py) + * Problem 205 + * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_205/sol1.py) * Problem 206 * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_206/sol1.py) * Problem 207 diff --git a/project_euler/problem_205/__init__.py b/project_euler/problem_205/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/project_euler/problem_205/sol1.py b/project_euler/problem_205/sol1.py new file mode 100644 index 000000000000..7249df48829b --- /dev/null +++ b/project_euler/problem_205/sol1.py @@ -0,0 +1,75 @@ +""" +Project Euler Problem 205: https://projecteuler.net/problem=205 + +Peter has nine four-sided (pyramidal) dice, each with faces numbered 1, 2, 3, 4. +Colin has six six-sided (cubic) dice, each with faces numbered 1, 2, 3, 4, 5, 6. + +Peter and Colin roll their dice and compare totals: the highest total wins. +The result is a draw if the totals are equal. + +What is the probability that Pyramidal Peter beats Cubic Colin? +Give your answer rounded to seven decimal places in the form 0.abcdefg +""" + +from itertools import product + + +def total_frequency_distribution(sides_number: int, dice_number: int) -> list[int]: + """ + Returns frequency distribution of total + + >>> total_frequency_distribution(sides_number=6, dice_number=1) + [0, 1, 1, 1, 1, 1, 1] + + >>> total_frequency_distribution(sides_number=4, dice_number=2) + [0, 0, 1, 2, 3, 4, 3, 2, 1] + """ + + max_face_number = sides_number + max_total = max_face_number * dice_number + totals_frequencies = [0] * (max_total + 1) + + min_face_number = 1 + faces_numbers = range(min_face_number, max_face_number + 1) + for dice_numbers in product(faces_numbers, repeat=dice_number): + total = sum(dice_numbers) + totals_frequencies[total] += 1 + + return totals_frequencies + + +def solution() -> float: + """ + Returns probability that Pyramidal Peter beats Cubic Colin + rounded to seven decimal places in the form 0.abcdefg + + >>> solution() + 0.5731441 + """ + + peter_totals_frequencies = total_frequency_distribution( + sides_number=4, dice_number=9 + ) + colin_totals_frequencies = total_frequency_distribution( + sides_number=6, dice_number=6 + ) + + peter_wins_count = 0 + min_peter_total = 9 + max_peter_total = 4 * 9 + min_colin_total = 6 + for peter_total in range(min_peter_total, max_peter_total + 1): + peter_wins_count += peter_totals_frequencies[peter_total] * sum( + colin_totals_frequencies[min_colin_total:peter_total] + ) + + total_games_number = (4 ** 9) * (6 ** 6) + peter_win_probability = peter_wins_count / total_games_number + + rounded_peter_win_probability = round(peter_win_probability, ndigits=7) + + return rounded_peter_win_probability + + +if __name__ == "__main__": + print(f"{solution() = }") From c3d1ff0ebd034eeb6105ef8bad6a3c962efa56f2 Mon Sep 17 00:00:00 2001 From: Nivas Manduva <53264470+eviltypha@users.noreply.github.com> Date: Tue, 9 Nov 2021 20:10:57 +0530 Subject: [PATCH 0404/1543] Add Jacobi Iteration Method (#5113) * Added Jacobi Iteration Method Added this method in arithmetic_analysis folder. This method is used to solve system of linear equations. * Added comments * Added reference link * Update jacobi_iteration_method.py * Changes for codespell test * Update jacobi_iteration_method.py * Update jacobi_iteration_method.py * Update arithmetic_analysis/jacobi_iteration_method.py Co-authored-by: Christian Clauss * updating DIRECTORY.md * Update arithmetic_analysis/jacobi_iteration_method.py Co-authored-by: Christian Clauss * Update arithmetic_analysis/jacobi_iteration_method.py Co-authored-by: Christian Clauss * Update arithmetic_analysis/jacobi_iteration_method.py Co-authored-by: Christian Clauss * Update arithmetic_analysis/jacobi_iteration_method.py Co-authored-by: Christian Clauss * Update arithmetic_analysis/jacobi_iteration_method.py Co-authored-by: Christian Clauss * Update arithmetic_analysis/jacobi_iteration_method.py Co-authored-by: Christian Clauss * Update jacobi_iteration_method.py * Update jacobi_iteration_method.py * Update jacobi_iteration_method.py * fix styles Co-authored-by: Christian Clauss Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> Co-authored-by: John Law --- DIRECTORY.md | 1 + .../jacobi_iteration_method.py | 163 ++++++++++++++++++ 2 files changed, 164 insertions(+) create mode 100644 arithmetic_analysis/jacobi_iteration_method.py diff --git a/DIRECTORY.md b/DIRECTORY.md index a2f229a9ed37..883b81b2444d 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -4,6 +4,7 @@ * [Gaussian Elimination](https://github.com/TheAlgorithms/Python/blob/master/arithmetic_analysis/gaussian_elimination.py) * [In Static Equilibrium](https://github.com/TheAlgorithms/Python/blob/master/arithmetic_analysis/in_static_equilibrium.py) * [Intersection](https://github.com/TheAlgorithms/Python/blob/master/arithmetic_analysis/intersection.py) + * [Jacobi Iteration Method](https://github.com/TheAlgorithms/Python/blob/master/arithmetic_analysis/jacobi_iteration_method.py) * [Lu Decomposition](https://github.com/TheAlgorithms/Python/blob/master/arithmetic_analysis/lu_decomposition.py) * [Newton Forward Interpolation](https://github.com/TheAlgorithms/Python/blob/master/arithmetic_analysis/newton_forward_interpolation.py) * [Newton Method](https://github.com/TheAlgorithms/Python/blob/master/arithmetic_analysis/newton_method.py) diff --git a/arithmetic_analysis/jacobi_iteration_method.py b/arithmetic_analysis/jacobi_iteration_method.py new file mode 100644 index 000000000000..6674824255a1 --- /dev/null +++ b/arithmetic_analysis/jacobi_iteration_method.py @@ -0,0 +1,163 @@ +""" +Jacobi Iteration Method - https://en.wikipedia.org/wiki/Jacobi_method +""" +from __future__ import annotations + +import numpy as np + + +# Method to find solution of system of linear equations +def jacobi_iteration_method( + coefficient_matrix: np.ndarray, + constant_matrix: np.ndarray, + init_val: list, + iterations: int, +) -> list[float]: + """ + Jacobi Iteration Method: + An iterative algorithm to determine the solutions of strictly diagonally dominant + system of linear equations + + 4x1 + x2 + x3 = 2 + x1 + 5x2 + 2x3 = -6 + x1 + 2x2 + 4x3 = -4 + + x_init = [0.5, -0.5 , -0.5] + + Examples: + + >>> coefficient = np.array([[4, 1, 1], [1, 5, 2], [1, 2, 4]]) + >>> constant = np.array([[2], [-6], [-4]]) + >>> init_val = [0.5, -0.5, -0.5] + >>> iterations = 3 + >>> jacobi_iteration_method(coefficient, constant, init_val, iterations) + [0.909375, -1.14375, -0.7484375] + + + >>> coefficient = np.array([[4, 1, 1], [1, 5, 2]]) + >>> constant = np.array([[2], [-6], [-4]]) + >>> init_val = [0.5, -0.5, -0.5] + >>> iterations = 3 + >>> jacobi_iteration_method(coefficient, constant, init_val, iterations) + Traceback (most recent call last): + ... + ValueError: Coefficient matrix dimensions must be nxn but received 2x3 + + >>> coefficient = np.array([[4, 1, 1], [1, 5, 2], [1, 2, 4]]) + >>> constant = np.array([[2], [-6]]) + >>> init_val = [0.5, -0.5, -0.5] + >>> iterations = 3 + >>> jacobi_iteration_method(coefficient, constant, init_val, iterations) + Traceback (most recent call last): + ... + ValueError: Coefficient and constant matrices dimensions must be nxn and nx1 but + received 3x3 and 2x1 + + >>> coefficient = np.array([[4, 1, 1], [1, 5, 2], [1, 2, 4]]) + >>> constant = np.array([[2], [-6], [-4]]) + >>> init_val = [0.5, -0.5] + >>> iterations = 3 + >>> jacobi_iteration_method(coefficient, constant, init_val, iterations) + Traceback (most recent call last): + ... + ValueError: Number of initial values must be equal to number of rows in coefficient + matrix but received 2 and 3 + + >>> coefficient = np.array([[4, 1, 1], [1, 5, 2], [1, 2, 4]]) + >>> constant = np.array([[2], [-6], [-4]]) + >>> init_val = [0.5, -0.5, -0.5] + >>> iterations = 0 + >>> jacobi_iteration_method(coefficient, constant, init_val, iterations) + Traceback (most recent call last): + ... + ValueError: Iterations must be at least 1 + """ + + rows1, cols1 = coefficient_matrix.shape + rows2, cols2 = constant_matrix.shape + + if rows1 != cols1: + raise ValueError( + f"Coefficient matrix dimensions must be nxn but received {rows1}x{cols1}" + ) + + if cols2 != 1: + raise ValueError(f"Constant matrix must be nx1 but received {rows2}x{cols2}") + + if rows1 != rows2: + raise ValueError( + f"""Coefficient and constant matrices dimensions must be nxn and nx1 but + received {rows1}x{cols1} and {rows2}x{cols2}""" + ) + + if len(init_val) != rows1: + raise ValueError( + f"""Number of initial values must be equal to number of rows in coefficient + matrix but received {len(init_val)} and {rows1}""" + ) + + if iterations <= 0: + raise ValueError("Iterations must be at least 1") + + table = np.concatenate((coefficient_matrix, constant_matrix), axis=1) + + rows, cols = table.shape + + strictly_diagonally_dominant(table) + + # Iterates the whole matrix for given number of times + for i in range(iterations): + new_val = [] + for row in range(rows): + temp = 0 + for col in range(cols): + if col == row: + denom = table[row][col] + elif col == cols - 1: + val = table[row][col] + else: + temp += (-1) * table[row][col] * init_val[col] + temp = (temp + val) / denom + new_val.append(temp) + init_val = new_val + + return [float(i) for i in new_val] + + +# Checks if the given matrix is strictly diagonally dominant +def strictly_diagonally_dominant(table: np.ndarray) -> bool: + """ + >>> table = np.array([[4, 1, 1, 2], [1, 5, 2, -6], [1, 2, 4, -4]]) + >>> strictly_diagonally_dominant(table) + True + + >>> table = np.array([[4, 1, 1, 2], [1, 5, 2, -6], [1, 2, 3, -4]]) + >>> strictly_diagonally_dominant(table) + Traceback (most recent call last): + ... + ValueError: Coefficient matrix is not strictly diagonally dominant + """ + + rows, cols = table.shape + + is_diagonally_dominant = True + + for i in range(0, rows): + sum = 0 + for j in range(0, cols - 1): + if i == j: + continue + else: + sum += table[i][j] + + if table[i][i] <= sum: + raise ValueError("Coefficient matrix is not strictly diagonally dominant") + + return is_diagonally_dominant + + +# Test Cases +if __name__ == "__main__": + import doctest + + doctest.testmod() From 745f9e2bc37280368ae007d1a30ffc217e4a5b81 Mon Sep 17 00:00:00 2001 From: Rohan R Bharadwaj Date: Tue, 9 Nov 2021 21:18:30 +0530 Subject: [PATCH 0405/1543] [mypy] Type annotations for searches directory (#5799) * Update ternary_search.py * Update mypy.ini * Update simulated_annealing.py * Update ternary_search.py * formatting * formatting * Update matrix_operation.py * Update matrix_operation.py * Update matrix_operation.py --- mypy.ini | 2 +- searches/simulated_annealing.py | 3 ++- searches/ternary_search.py | 12 ++++++++---- 3 files changed, 11 insertions(+), 6 deletions(-) diff --git a/mypy.ini b/mypy.ini index ce7c262ab059..94fb125fb124 100644 --- a/mypy.ini +++ b/mypy.ini @@ -2,4 +2,4 @@ ignore_missing_imports = True install_types = True non_interactive = True -exclude = (matrix_operation.py|other/least_recently_used.py|other/lfu_cache.py|other/lru_cache.py|searches/simulated_annealing.py|searches/ternary_search.py) +exclude = (matrix_operation.py|other/least_recently_used.py|other/lfu_cache.py|other/lru_cache.py) diff --git a/searches/simulated_annealing.py b/searches/simulated_annealing.py index 2aa980be7748..ad29559f1b8d 100644 --- a/searches/simulated_annealing.py +++ b/searches/simulated_annealing.py @@ -1,6 +1,7 @@ # https://en.wikipedia.org/wiki/Simulated_annealing import math import random +from typing import Any from .hill_climbing import SearchProblem @@ -16,7 +17,7 @@ def simulated_annealing( start_temperate: float = 100, rate_of_decrease: float = 0.01, threshold_temp: float = 1, -) -> SearchProblem: +) -> Any: """ Implementation of the simulated annealing algorithm. We start with a given state, find all its neighbors. Pick a random neighbor, if that neighbor improves the diff --git a/searches/ternary_search.py b/searches/ternary_search.py index 01e437723473..9830cce36000 100644 --- a/searches/ternary_search.py +++ b/searches/ternary_search.py @@ -89,8 +89,8 @@ def ite_ternary_search(array: list[int], target: int) -> int: if right - left < precision: return lin_search(left, right, array, target) - one_third = (left + right) / 3 + 1 - two_third = 2 * (left + right) / 3 + 1 + one_third = (left + right) // 3 + 1 + two_third = 2 * (left + right) // 3 + 1 if array[one_third] == target: return one_third @@ -138,8 +138,8 @@ def rec_ternary_search(left: int, right: int, array: list[int], target: int) -> if left < right: if right - left < precision: return lin_search(left, right, array, target) - one_third = (left + right) / 3 + 1 - two_third = 2 * (left + right) / 3 + 1 + one_third = (left + right) // 3 + 1 + two_third = 2 * (left + right) // 3 + 1 if array[one_third] == target: return one_third @@ -157,6 +157,10 @@ def rec_ternary_search(left: int, right: int, array: list[int], target: int) -> if __name__ == "__main__": + import doctest + + doctest.testmod() + user_input = input("Enter numbers separated by comma:\n").strip() collection = [int(item.strip()) for item in user_input.split(",")] assert collection == sorted(collection), f"List must be ordered.\n{collection}." From d6a1623448f2494fbb21116b6bc699dccb0401b2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Carl-Henrik=20=C3=85keson?= <80765479+chakeson@users.noreply.github.com> Date: Wed, 10 Nov 2021 11:22:27 +0100 Subject: [PATCH 0406/1543] Add solution for Project Euler problem 145 (#5173) * Added solution for Project Euler problem 145 * Updated spelling of comments Updated spelling inline with codespell * Removed trailing whitespaces in comments * Added default values. * nr -> number Co-authored-by: John Law * nr -> number * Update sol1.py * Update sol1.py Co-authored-by: John Law --- project_euler/problem_145/__init__.py | 0 project_euler/problem_145/sol1.py | 87 +++++++++++++++++++++++++++ 2 files changed, 87 insertions(+) create mode 100644 project_euler/problem_145/__init__.py create mode 100644 project_euler/problem_145/sol1.py diff --git a/project_euler/problem_145/__init__.py b/project_euler/problem_145/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/project_euler/problem_145/sol1.py b/project_euler/problem_145/sol1.py new file mode 100644 index 000000000000..5ba3af86a6a1 --- /dev/null +++ b/project_euler/problem_145/sol1.py @@ -0,0 +1,87 @@ +""" +Problem 145: https://projecteuler.net/problem=145 + +Name: How many reversible numbers are there below one-billion? + +Some positive integers n have the property that the +sum [ n + reverse(n) ] consists entirely of odd (decimal) digits. +For instance, 36 + 63 = 99 and 409 + 904 = 1313. +We will call such numbers reversible; so 36, 63, 409, and 904 are reversible. +Leading zeroes are not allowed in either n or reverse(n). + +There are 120 reversible numbers below one-thousand. + +How many reversible numbers are there below one-billion (10^9)? + + +Solution: + +Here a brute force solution is used to find and count the reversible numbers. + +""" +from __future__ import annotations + + +def check_if_odd(sum: int = 36) -> int: + """ + Check if the last digit in the sum is even or odd. If even return 0. + If odd then floor division by 10 is used to remove the last number. + Process continues until sum becomes 0 because no more numbers. + >>> check_if_odd(36) + 0 + >>> check_if_odd(33) + 1 + """ + while sum > 0: + if (sum % 10) % 2 == 0: + return 0 + sum = sum // 10 + return 1 + + +def find_reverse_number(number: int = 36) -> int: + """ + Reverses the given number. Does not work with number that end in zero. + >>> find_reverse_number(36) + 63 + >>> find_reverse_number(409) + 904 + """ + reverse = 0 + + while number > 0: + temp = number % 10 + reverse = reverse * 10 + temp + number = number // 10 + + return reverse + + +def solution(number: int = 1000000000) -> int: + """ + Loops over the range of numbers. + Checks if they have ending zeros which disqualifies them from being reversible. + If that condition is passed it generates the reversed number. + Then sum up n and reverse(n). + Then check if all the numbers in the sum are odd. If true add to the answer. + >>> solution(1000000000) + 608720 + >>> solution(1000000) + 18720 + >>> solution(1000000) + 18720 + >>> solution(1000) + 120 + """ + answer = 0 + for x in range(1, number): + if x % 10 != 0: + reversed_number = find_reverse_number(x) + sum = x + reversed_number + answer += check_if_odd(sum) + + return answer + + +if __name__ == "__main__": + print(f"{solution() = }") From e9882e41ba7a1d1fbdc12362a522a1b646497192 Mon Sep 17 00:00:00 2001 From: Rohan R Bharadwaj Date: Wed, 10 Nov 2021 20:52:52 +0530 Subject: [PATCH 0407/1543] [mypy] Fix `matrix_operation.py` (#5808) * Update matrix_operation.py * Update mypy.ini * Update DIRECTORY.md * formatting * Update matrix_operation.py * doctest for exception * A bit more... --- DIRECTORY.md | 2 ++ matrix/matrix_operation.py | 33 ++++++++++++++++++++++++--------- mypy.ini | 2 +- 3 files changed, 27 insertions(+), 10 deletions(-) diff --git a/DIRECTORY.md b/DIRECTORY.md index 883b81b2444d..c46d81ab75bc 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -854,6 +854,8 @@ * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_135/sol1.py) * Problem 144 * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_144/sol1.py) + * Problem 145 + * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_145/sol1.py) * Problem 173 * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_173/sol1.py) * Problem 174 diff --git a/matrix/matrix_operation.py b/matrix/matrix_operation.py index dca01f9c3183..6d0cd4e655eb 100644 --- a/matrix/matrix_operation.py +++ b/matrix/matrix_operation.py @@ -13,11 +13,16 @@ def add(*matrix_s: list[list]) -> list[list]: [[3.2, 5.4], [7, 9]] >>> add([[1, 2], [4, 5]], [[3, 7], [3, 4]], [[3, 5], [5, 7]]) [[7, 14], [12, 16]] + >>> add([3], [4, 5]) + Traceback (most recent call last): + ... + TypeError: Expected a matrix, got int/list instead """ if all(_check_not_integer(m) for m in matrix_s): for i in matrix_s[1:]: _verify_matrix_sizes(matrix_s[0], i) return [[sum(t) for t in zip(*m)] for m in zip(*matrix_s)] + raise TypeError("Expected a matrix, got int/list instead") def subtract(matrix_a: list[list], matrix_b: list[list]) -> list[list]: @@ -26,6 +31,10 @@ def subtract(matrix_a: list[list], matrix_b: list[list]) -> list[list]: [[-1, -1], [-1, -1]] >>> subtract([[1,2.5],[3,4]],[[2,3],[4,5.5]]) [[-1, -0.5], [-1, -1.5]] + >>> subtract([3], [4, 5]) + Traceback (most recent call last): + ... + TypeError: Expected a matrix, got int/list instead """ if ( _check_not_integer(matrix_a) @@ -33,9 +42,10 @@ def subtract(matrix_a: list[list], matrix_b: list[list]) -> list[list]: and _verify_matrix_sizes(matrix_a, matrix_b) ): return [[i - j for i, j in zip(*m)] for m in zip(matrix_a, matrix_b)] + raise TypeError("Expected a matrix, got int/list instead") -def scalar_multiply(matrix: list[list], n: int) -> list[list]: +def scalar_multiply(matrix: list[list], n: int | float) -> list[list]: """ >>> scalar_multiply([[1,2],[3,4]],5) [[5, 10], [15, 20]] @@ -79,18 +89,23 @@ def identity(n: int) -> list[list]: return [[int(row == column) for column in range(n)] for row in range(n)] -def transpose(matrix: list[list], return_map: bool = True) -> list[list]: +def transpose(matrix: list[list], return_map: bool = True) -> list[list] | map[list]: """ >>> transpose([[1,2],[3,4]]) # doctest: +ELLIPSIS >> transpose([[1,2],[3,4]], return_map=False) [[1, 3], [2, 4]] + >>> transpose([1, [2, 3]]) + Traceback (most recent call last): + ... + TypeError: Expected a matrix, got int/list instead """ if _check_not_integer(matrix): if return_map: return map(list, zip(*matrix)) else: return list(map(list, zip(*matrix))) + raise TypeError("Expected a matrix, got int/list instead") def minor(matrix: list[list], row: int, column: int) -> list[list]: @@ -118,7 +133,7 @@ def determinant(matrix: list[list]) -> int: ) -def inverse(matrix: list[list]) -> list[list]: +def inverse(matrix: list[list]) -> list[list] | None: """ >>> inverse([[1, 2], [3, 4]]) [[-2.0, 1.0], [1.5, -0.5]] @@ -138,21 +153,21 @@ def inverse(matrix: list[list]) -> list[list]: [x * (-1) ** (row + col) for col, x in enumerate(matrix_minor[row])] for row in range(len(matrix)) ] - adjugate = transpose(cofactors) + adjugate = list(transpose(cofactors)) return scalar_multiply(adjugate, 1 / det) def _check_not_integer(matrix: list[list]) -> bool: - if not isinstance(matrix, int) and not isinstance(matrix[0], int): - return True - raise TypeError("Expected a matrix, got int/list instead") + return not isinstance(matrix, int) and not isinstance(matrix[0], int) -def _shape(matrix: list[list]) -> list: +def _shape(matrix: list[list]) -> tuple[int, int]: return len(matrix), len(matrix[0]) -def _verify_matrix_sizes(matrix_a: list[list], matrix_b: list[list]) -> tuple[list]: +def _verify_matrix_sizes( + matrix_a: list[list], matrix_b: list[list] +) -> tuple[tuple, tuple]: shape = _shape(matrix_a) + _shape(matrix_b) if shape[0] != shape[3] or shape[1] != shape[2]: raise ValueError( diff --git a/mypy.ini b/mypy.ini index 94fb125fb124..f00b3eeb6bac 100644 --- a/mypy.ini +++ b/mypy.ini @@ -2,4 +2,4 @@ ignore_missing_imports = True install_types = True non_interactive = True -exclude = (matrix_operation.py|other/least_recently_used.py|other/lfu_cache.py|other/lru_cache.py) +exclude = (other/least_recently_used.py|other/lfu_cache.py|other/lru_cache.py) From 7e81551d7b54f121458bd8e6a67b7ca86156815c Mon Sep 17 00:00:00 2001 From: Joyce Date: Thu, 11 Nov 2021 03:55:23 +0800 Subject: [PATCH 0408/1543] [mypy] fix type annotations for other/least-recently-used.py (#5811) --- other/least_recently_used.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/other/least_recently_used.py b/other/least_recently_used.py index d0e27efc6dc8..9d6b6d7cb6a6 100644 --- a/other/least_recently_used.py +++ b/other/least_recently_used.py @@ -1,5 +1,4 @@ import sys -from abc import abstractmethod from collections import deque @@ -10,7 +9,6 @@ class LRUCache: key_reference_map = object() # References of the keys in cache _MAX_CAPACITY: int = 10 # Maximum capacity of cache - @abstractmethod def __init__(self, n: int): """Creates an empty store and map for the keys. The LRUCache is set the size n. From f36ee034f1f5c65cc89ed1fadea29a28e744a297 Mon Sep 17 00:00:00 2001 From: Andrew Grangaard Date: Wed, 10 Nov 2021 14:21:16 -0800 Subject: [PATCH 0409/1543] [mypy] Annotate other/lru_cache and other/lfu_cache (#5755) * Adds repr and doctest of current behavior linkedlist in other/lru_cache * Blocks removal of head or tail of double linked list * clarifies add() logic for double linked list in other/lru_cache * expands doctests to compare cache and lru cache * [mypy] annotates vars for other/lru_cache * [mypy] Annotates lru_cache decorator for other/lru_cache * Higher order functions require a verbose Callable annotation * [mypy] Makes LRU_Cache generic over key and value types for other/lru_cache + no reason to force int -> int * [mypy] makes decorator a classmethod for access to class generic types * breaks two long lines in doctest for other/lru_cache * simplifies boundary test remove() for other/lru_cache * [mypy] Annotates, adds doctests, and makes Generic other/lfu_cache See also commits to other/lru_cache which guided these * [mypy] annotates cls var in other/lfu_cache * cleans up items from code review for lfu_cache and lru_cache * [mypy] runs mypy on lfu_cache and lru_cache --- mypy.ini | 2 +- other/lfu_cache.py | 232 ++++++++++++++++++++++++++++++++++---------- other/lru_cache.py | 235 +++++++++++++++++++++++++++++++++++---------- 3 files changed, 366 insertions(+), 103 deletions(-) diff --git a/mypy.ini b/mypy.ini index f00b3eeb6bac..7dbc7c4ffc80 100644 --- a/mypy.ini +++ b/mypy.ini @@ -2,4 +2,4 @@ ignore_missing_imports = True install_types = True non_interactive = True -exclude = (other/least_recently_used.py|other/lfu_cache.py|other/lru_cache.py) +exclude = (other/least_recently_used.py) diff --git a/other/lfu_cache.py b/other/lfu_cache.py index 88167ac1f2cb..e955973c95b0 100644 --- a/other/lfu_cache.py +++ b/other/lfu_cache.py @@ -1,61 +1,165 @@ from __future__ import annotations -from typing import Callable +from typing import Callable, Generic, TypeVar +T = TypeVar("T") +U = TypeVar("U") -class DoubleLinkedListNode: + +class DoubleLinkedListNode(Generic[T, U]): """ Double Linked List Node built specifically for LFU Cache + + >>> node = DoubleLinkedListNode(1,1) + >>> node + Node: key: 1, val: 1, freq: 0, has next: False, has prev: False """ - def __init__(self, key: int, val: int): + def __init__(self, key: T | None, val: U | None): self.key = key self.val = val - self.freq = 0 - self.next = None - self.prev = None + self.freq: int = 0 + self.next: DoubleLinkedListNode[T, U] | None = None + self.prev: DoubleLinkedListNode[T, U] | None = None + def __repr__(self) -> str: + return "Node: key: {}, val: {}, freq: {}, has next: {}, has prev: {}".format( + self.key, self.val, self.freq, self.next is not None, self.prev is not None + ) -class DoubleLinkedList: + +class DoubleLinkedList(Generic[T, U]): """ Double Linked List built specifically for LFU Cache + + >>> dll: DoubleLinkedList = DoubleLinkedList() + >>> dll + DoubleLinkedList, + Node: key: None, val: None, freq: 0, has next: True, has prev: False, + Node: key: None, val: None, freq: 0, has next: False, has prev: True + + >>> first_node = DoubleLinkedListNode(1,10) + >>> first_node + Node: key: 1, val: 10, freq: 0, has next: False, has prev: False + + + >>> dll.add(first_node) + >>> dll + DoubleLinkedList, + Node: key: None, val: None, freq: 0, has next: True, has prev: False, + Node: key: 1, val: 10, freq: 1, has next: True, has prev: True, + Node: key: None, val: None, freq: 0, has next: False, has prev: True + + >>> # node is mutated + >>> first_node + Node: key: 1, val: 10, freq: 1, has next: True, has prev: True + + >>> second_node = DoubleLinkedListNode(2,20) + >>> second_node + Node: key: 2, val: 20, freq: 0, has next: False, has prev: False + + >>> dll.add(second_node) + >>> dll + DoubleLinkedList, + Node: key: None, val: None, freq: 0, has next: True, has prev: False, + Node: key: 1, val: 10, freq: 1, has next: True, has prev: True, + Node: key: 2, val: 20, freq: 1, has next: True, has prev: True, + Node: key: None, val: None, freq: 0, has next: False, has prev: True + + >>> removed_node = dll.remove(first_node) + >>> assert removed_node == first_node + >>> dll + DoubleLinkedList, + Node: key: None, val: None, freq: 0, has next: True, has prev: False, + Node: key: 2, val: 20, freq: 1, has next: True, has prev: True, + Node: key: None, val: None, freq: 0, has next: False, has prev: True + + + >>> # Attempt to remove node not on list + >>> removed_node = dll.remove(first_node) + >>> removed_node is None + True + + >>> # Attempt to remove head or rear + >>> dll.head + Node: key: None, val: None, freq: 0, has next: True, has prev: False + >>> dll.remove(dll.head) is None + True + + >>> # Attempt to remove head or rear + >>> dll.rear + Node: key: None, val: None, freq: 0, has next: False, has prev: True + >>> dll.remove(dll.rear) is None + True + + """ - def __init__(self): - self.head = DoubleLinkedListNode(None, None) - self.rear = DoubleLinkedListNode(None, None) + def __init__(self) -> None: + self.head: DoubleLinkedListNode[T, U] = DoubleLinkedListNode(None, None) + self.rear: DoubleLinkedListNode[T, U] = DoubleLinkedListNode(None, None) self.head.next, self.rear.prev = self.rear, self.head - def add(self, node: DoubleLinkedListNode) -> None: + def __repr__(self) -> str: + rep = ["DoubleLinkedList"] + node = self.head + while node.next is not None: + rep.append(str(node)) + node = node.next + rep.append(str(self.rear)) + return ",\n ".join(rep) + + def add(self, node: DoubleLinkedListNode[T, U]) -> None: """ - Adds the given node at the head of the list and shifting it to proper position + Adds the given node at the tail of the list and shifting it to proper position """ - temp = self.rear.prev + previous = self.rear.prev + + # All nodes other than self.head are guaranteed to have non-None previous + assert previous is not None - self.rear.prev, node.next = node, self.rear - temp.next, node.prev = node, temp + previous.next = node + node.prev = previous + self.rear.prev = node + node.next = self.rear node.freq += 1 self._position_node(node) - def _position_node(self, node: DoubleLinkedListNode) -> None: - while node.prev.key and node.prev.freq > node.freq: - node1, node2 = node, node.prev - node1.prev, node2.next = node2.prev, node1.prev - node1.next, node2.prev = node2, node1 + def _position_node(self, node: DoubleLinkedListNode[T, U]) -> None: + """ + Moves node forward to maintain invariant of sort by freq value + """ + + while node.prev is not None and node.prev.freq > node.freq: + # swap node with previous node + previous_node = node.prev - def remove(self, node: DoubleLinkedListNode) -> DoubleLinkedListNode: + node.prev = previous_node.prev + previous_node.next = node.prev + node.next = previous_node + previous_node.prev = node + + def remove( + self, node: DoubleLinkedListNode[T, U] + ) -> DoubleLinkedListNode[T, U] | None: """ Removes and returns the given node from the list + + Returns None if node.prev or node.next is None """ - temp_last, temp_next = node.prev, node.next - node.prev, node.next = None, None - temp_last.next, temp_next.prev = temp_next, temp_last + if node.prev is None or node.next is None: + return None + + node.prev.next = node.next + node.next.prev = node.prev + node.prev = None + node.next = None return node -class LFUCache: +class LFUCache(Generic[T, U]): """ LFU Cache to store a given capacity of data. Can be used as a stand-alone object or as a function decorator. @@ -66,9 +170,11 @@ class LFUCache: >>> cache.get(1) 1 >>> cache.set(3, 3) - >>> cache.get(2) # None is returned + >>> cache.get(2) is None + True >>> cache.set(4, 4) - >>> cache.get(1) # None is returned + >>> cache.get(1) is None + True >>> cache.get(3) 3 >>> cache.get(4) @@ -89,15 +195,15 @@ class LFUCache: """ # class variable to map the decorator functions to their respective instance - decorator_function_to_instance_map = {} + decorator_function_to_instance_map: dict[Callable[[T], U], LFUCache[T, U]] = {} def __init__(self, capacity: int): - self.list = DoubleLinkedList() + self.list: DoubleLinkedList[T, U] = DoubleLinkedList() self.capacity = capacity self.num_keys = 0 self.hits = 0 self.miss = 0 - self.cache = {} + self.cache: dict[T, DoubleLinkedListNode[T, U]] = {} def __repr__(self) -> str: """ @@ -110,40 +216,57 @@ def __repr__(self) -> str: f"capacity={self.capacity}, current_size={self.num_keys})" ) - def __contains__(self, key: int) -> bool: + def __contains__(self, key: T) -> bool: """ >>> cache = LFUCache(1) + >>> 1 in cache False + >>> cache.set(1, 1) >>> 1 in cache True """ + return key in self.cache - def get(self, key: int) -> int | None: + def get(self, key: T) -> U | None: """ Returns the value for the input key and updates the Double Linked List. Returns - None if key is not present in cache + Returns None if key is not present in cache """ if key in self.cache: self.hits += 1 - self.list.add(self.list.remove(self.cache[key])) - return self.cache[key].val + value_node: DoubleLinkedListNode[T, U] = self.cache[key] + node = self.list.remove(self.cache[key]) + assert node == value_node + + # node is guaranteed not None because it is in self.cache + assert node is not None + self.list.add(node) + return node.val self.miss += 1 return None - def set(self, key: int, value: int) -> None: + def set(self, key: T, value: U) -> None: """ Sets the value for the input key and updates the Double Linked List """ if key not in self.cache: if self.num_keys >= self.capacity: - key_to_delete = self.list.head.next.key - self.list.remove(self.cache[key_to_delete]) - del self.cache[key_to_delete] + # delete first node when over capacity + first_node = self.list.head.next + + # guaranteed to have a non-None first node when num_keys > 0 + # explain to type checker via assertions + assert first_node is not None + assert first_node.key is not None + assert self.list.remove(first_node) is not None + # first_node guaranteed to be in list + + del self.cache[first_node.key] self.num_keys -= 1 self.cache[key] = DoubleLinkedListNode(key, value) self.list.add(self.cache[key]) @@ -151,32 +274,35 @@ def set(self, key: int, value: int) -> None: else: node = self.list.remove(self.cache[key]) + assert node is not None # node guaranteed to be in list node.val = value self.list.add(node) - @staticmethod - def decorator(size: int = 128): + @classmethod + def decorator( + cls: type[LFUCache[T, U]], size: int = 128 + ) -> Callable[[Callable[[T], U]], Callable[..., U]]: """ Decorator version of LFU Cache + + Decorated function must be function of T -> U """ - def cache_decorator_inner(func: Callable): - def cache_decorator_wrapper(*args, **kwargs): - if func not in LFUCache.decorator_function_to_instance_map: - LFUCache.decorator_function_to_instance_map[func] = LFUCache(size) + def cache_decorator_inner(func: Callable[[T], U]) -> Callable[..., U]: + def cache_decorator_wrapper(*args: T) -> U: + if func not in cls.decorator_function_to_instance_map: + cls.decorator_function_to_instance_map[func] = LFUCache(size) - result = LFUCache.decorator_function_to_instance_map[func].get(args[0]) + result = cls.decorator_function_to_instance_map[func].get(args[0]) if result is None: - result = func(*args, **kwargs) - LFUCache.decorator_function_to_instance_map[func].set( - args[0], result - ) + result = func(*args) + cls.decorator_function_to_instance_map[func].set(args[0], result) return result - def cache_info(): - return LFUCache.decorator_function_to_instance_map[func] + def cache_info() -> LFUCache[T, U]: + return cls.decorator_function_to_instance_map[func] - cache_decorator_wrapper.cache_info = cache_info + setattr(cache_decorator_wrapper, "cache_info", cache_info) return cache_decorator_wrapper diff --git a/other/lru_cache.py b/other/lru_cache.py index b74c0a45caf9..98051f89db4f 100644 --- a/other/lru_cache.py +++ b/other/lru_cache.py @@ -1,52 +1,147 @@ from __future__ import annotations -from typing import Callable +from typing import Callable, Generic, TypeVar +T = TypeVar("T") +U = TypeVar("U") -class DoubleLinkedListNode: + +class DoubleLinkedListNode(Generic[T, U]): """ Double Linked List Node built specifically for LRU Cache + + >>> DoubleLinkedListNode(1,1) + Node: key: 1, val: 1, has next: False, has prev: False """ - def __init__(self, key: int, val: int): + def __init__(self, key: T | None, val: U | None): self.key = key self.val = val - self.next = None - self.prev = None + self.next: DoubleLinkedListNode[T, U] | None = None + self.prev: DoubleLinkedListNode[T, U] | None = None + + def __repr__(self) -> str: + return "Node: key: {}, val: {}, has next: {}, has prev: {}".format( + self.key, self.val, self.next is not None, self.prev is not None + ) -class DoubleLinkedList: +class DoubleLinkedList(Generic[T, U]): """ Double Linked List built specifically for LRU Cache + + >>> dll: DoubleLinkedList = DoubleLinkedList() + >>> dll + DoubleLinkedList, + Node: key: None, val: None, has next: True, has prev: False, + Node: key: None, val: None, has next: False, has prev: True + + >>> first_node = DoubleLinkedListNode(1,10) + >>> first_node + Node: key: 1, val: 10, has next: False, has prev: False + + + >>> dll.add(first_node) + >>> dll + DoubleLinkedList, + Node: key: None, val: None, has next: True, has prev: False, + Node: key: 1, val: 10, has next: True, has prev: True, + Node: key: None, val: None, has next: False, has prev: True + + >>> # node is mutated + >>> first_node + Node: key: 1, val: 10, has next: True, has prev: True + + >>> second_node = DoubleLinkedListNode(2,20) + >>> second_node + Node: key: 2, val: 20, has next: False, has prev: False + + >>> dll.add(second_node) + >>> dll + DoubleLinkedList, + Node: key: None, val: None, has next: True, has prev: False, + Node: key: 1, val: 10, has next: True, has prev: True, + Node: key: 2, val: 20, has next: True, has prev: True, + Node: key: None, val: None, has next: False, has prev: True + + >>> removed_node = dll.remove(first_node) + >>> assert removed_node == first_node + >>> dll + DoubleLinkedList, + Node: key: None, val: None, has next: True, has prev: False, + Node: key: 2, val: 20, has next: True, has prev: True, + Node: key: None, val: None, has next: False, has prev: True + + + >>> # Attempt to remove node not on list + >>> removed_node = dll.remove(first_node) + >>> removed_node is None + True + + >>> # Attempt to remove head or rear + >>> dll.head + Node: key: None, val: None, has next: True, has prev: False + >>> dll.remove(dll.head) is None + True + + >>> # Attempt to remove head or rear + >>> dll.rear + Node: key: None, val: None, has next: False, has prev: True + >>> dll.remove(dll.rear) is None + True + + """ - def __init__(self): - self.head = DoubleLinkedListNode(None, None) - self.rear = DoubleLinkedListNode(None, None) + def __init__(self) -> None: + self.head: DoubleLinkedListNode[T, U] = DoubleLinkedListNode(None, None) + self.rear: DoubleLinkedListNode[T, U] = DoubleLinkedListNode(None, None) self.head.next, self.rear.prev = self.rear, self.head - def add(self, node: DoubleLinkedListNode) -> None: + def __repr__(self) -> str: + rep = ["DoubleLinkedList"] + node = self.head + while node.next is not None: + rep.append(str(node)) + node = node.next + rep.append(str(self.rear)) + return ",\n ".join(rep) + + def add(self, node: DoubleLinkedListNode[T, U]) -> None: """ Adds the given node to the end of the list (before rear) """ - temp = self.rear.prev - temp.next, node.prev = node, temp - self.rear.prev, node.next = node, self.rear + previous = self.rear.prev + + # All nodes other than self.head are guaranteed to have non-None previous + assert previous is not None + + previous.next = node + node.prev = previous + self.rear.prev = node + node.next = self.rear - def remove(self, node: DoubleLinkedListNode) -> DoubleLinkedListNode: + def remove( + self, node: DoubleLinkedListNode[T, U] + ) -> DoubleLinkedListNode[T, U] | None: """ Removes and returns the given node from the list + + Returns None if node.prev or node.next is None """ - temp_last, temp_next = node.prev, node.next - node.prev, node.next = None, None - temp_last.next, temp_next.prev = temp_next, temp_last + if node.prev is None or node.next is None: + return None + node.prev.next = node.next + node.next.prev = node.prev + node.prev = None + node.next = None return node -class LRUCache: +class LRUCache(Generic[T, U]): """ LRU Cache to store a given capacity of data. Can be used as a stand-alone object or as a function decorator. @@ -54,19 +149,41 @@ class LRUCache: >>> cache = LRUCache(2) >>> cache.set(1, 1) - >>> cache.set(2, 2) - >>> cache.get(1) 1 + >>> cache.list + DoubleLinkedList, + Node: key: None, val: None, has next: True, has prev: False, + Node: key: 2, val: 2, has next: True, has prev: True, + Node: key: 1, val: 1, has next: True, has prev: True, + Node: key: None, val: None, has next: False, has prev: True + + >>> cache.cache # doctest: +NORMALIZE_WHITESPACE + {1: Node: key: 1, val: 1, has next: True, has prev: True, \ + 2: Node: key: 2, val: 2, has next: True, has prev: True} + >>> cache.set(3, 3) - >>> cache.get(2) # None returned + >>> cache.list + DoubleLinkedList, + Node: key: None, val: None, has next: True, has prev: False, + Node: key: 1, val: 1, has next: True, has prev: True, + Node: key: 3, val: 3, has next: True, has prev: True, + Node: key: None, val: None, has next: False, has prev: True + + >>> cache.cache # doctest: +NORMALIZE_WHITESPACE + {1: Node: key: 1, val: 1, has next: True, has prev: True, \ + 3: Node: key: 3, val: 3, has next: True, has prev: True} + + >>> cache.get(2) is None + True >>> cache.set(4, 4) - >>> cache.get(1) # None returned + >>> cache.get(1) is None + True >>> cache.get(3) 3 @@ -91,15 +208,15 @@ class LRUCache: """ # class variable to map the decorator functions to their respective instance - decorator_function_to_instance_map = {} + decorator_function_to_instance_map: dict[Callable[[T], U], LRUCache[T, U]] = {} def __init__(self, capacity: int): - self.list = DoubleLinkedList() + self.list: DoubleLinkedList[T, U] = DoubleLinkedList() self.capacity = capacity self.num_keys = 0 self.hits = 0 self.miss = 0 - self.cache = {} + self.cache: dict[T, DoubleLinkedListNode[T, U]] = {} def __repr__(self) -> str: """ @@ -112,7 +229,7 @@ def __repr__(self) -> str: f"capacity={self.capacity}, current size={self.num_keys})" ) - def __contains__(self, key: int) -> bool: + def __contains__(self, key: T) -> bool: """ >>> cache = LRUCache(1) @@ -127,62 +244,82 @@ def __contains__(self, key: int) -> bool: return key in self.cache - def get(self, key: int) -> int | None: + def get(self, key: T) -> U | None: """ - Returns the value for the input key and updates the Double Linked List. Returns - None if key is not present in cache + Returns the value for the input key and updates the Double Linked List. + Returns None if key is not present in cache """ + # Note: pythonic interface would throw KeyError rather than return None if key in self.cache: self.hits += 1 - self.list.add(self.list.remove(self.cache[key])) - return self.cache[key].val + value_node: DoubleLinkedListNode[T, U] = self.cache[key] + node = self.list.remove(self.cache[key]) + assert node == value_node + + # node is guaranteed not None because it is in self.cache + assert node is not None + self.list.add(node) + return node.val self.miss += 1 return None - def set(self, key: int, value: int) -> None: + def set(self, key: T, value: U) -> None: """ Sets the value for the input key and updates the Double Linked List """ if key not in self.cache: if self.num_keys >= self.capacity: - key_to_delete = self.list.head.next.key - self.list.remove(self.cache[key_to_delete]) - del self.cache[key_to_delete] + # delete first node (oldest) when over capacity + first_node = self.list.head.next + + # guaranteed to have a non-None first node when num_keys > 0 + # explain to type checker via assertions + assert first_node is not None + assert first_node.key is not None + assert ( + self.list.remove(first_node) is not None + ) # node guaranteed to be in list assert node.key is not None + + del self.cache[first_node.key] self.num_keys -= 1 self.cache[key] = DoubleLinkedListNode(key, value) self.list.add(self.cache[key]) self.num_keys += 1 else: + # bump node to the end of the list, update value node = self.list.remove(self.cache[key]) + assert node is not None # node guaranteed to be in list node.val = value self.list.add(node) - @staticmethod - def decorator(size: int = 128): + @classmethod + def decorator( + cls, size: int = 128 + ) -> Callable[[Callable[[T], U]], Callable[..., U]]: """ Decorator version of LRU Cache + + Decorated function must be function of T -> U """ - def cache_decorator_inner(func: Callable): - def cache_decorator_wrapper(*args, **kwargs): - if func not in LRUCache.decorator_function_to_instance_map: - LRUCache.decorator_function_to_instance_map[func] = LRUCache(size) + def cache_decorator_inner(func: Callable[[T], U]) -> Callable[..., U]: + def cache_decorator_wrapper(*args: T) -> U: + if func not in cls.decorator_function_to_instance_map: + cls.decorator_function_to_instance_map[func] = LRUCache(size) - result = LRUCache.decorator_function_to_instance_map[func].get(args[0]) + result = cls.decorator_function_to_instance_map[func].get(args[0]) if result is None: - result = func(*args, **kwargs) - LRUCache.decorator_function_to_instance_map[func].set( - args[0], result - ) + result = func(*args) + cls.decorator_function_to_instance_map[func].set(args[0], result) return result - def cache_info(): - return LRUCache.decorator_function_to_instance_map[func] + def cache_info() -> LRUCache[T, U]: + return cls.decorator_function_to_instance_map[func] - cache_decorator_wrapper.cache_info = cache_info + setattr(cache_decorator_wrapper, "cache_info", cache_info) return cache_decorator_wrapper From 6314195bb1e6001ddd0a11a59c41f1d0b9eeb722 Mon Sep 17 00:00:00 2001 From: Leoriem-code <73761711+Leoriem-code@users.noreply.github.com> Date: Thu, 11 Nov 2021 14:39:54 +0100 Subject: [PATCH 0410/1543] Add README files 2/8 (#5766) * add 5 README files * empty commit to (hopefully) get rid of the `test-are-failling` label * Update ciphers/README.md Co-authored-by: John Law * Update conversions/README.md Co-authored-by: John Law * Update cellular_automata/README.md Co-authored-by: John Law * Update computer_vision/README.md Co-authored-by: John Law * Update conversions/README.md Co-authored-by: John Law * Update compression/README.md Co-authored-by: John Law --- cellular_automata/README.md | 8 ++++++-- ciphers/README.md | 7 +++++++ compression/README.md | 10 ++++++++++ computer_vision/README.md | 10 +++++++--- conversions/README.md | 6 ++++++ 5 files changed, 36 insertions(+), 5 deletions(-) create mode 100644 ciphers/README.md create mode 100644 compression/README.md create mode 100644 conversions/README.md diff --git a/cellular_automata/README.md b/cellular_automata/README.md index c3fa0516f5dd..c5681b33906c 100644 --- a/cellular_automata/README.md +++ b/cellular_automata/README.md @@ -1,4 +1,8 @@ # Cellular Automata -* https://en.wikipedia.org/wiki/Cellular_automaton -* https://mathworld.wolfram.com/ElementaryCellularAutomaton.html +Cellular automata are a way to simulate the behavior of "life", no matter if it is a robot or cell. +They usually follow simple rules but can lead to the creation of complex forms. +The most popular cellular automaton is Conway's [Game of Life](https://en.wikipedia.org/wiki/Conway%27s_Game_of_Life). + +* +* diff --git a/ciphers/README.md b/ciphers/README.md new file mode 100644 index 000000000000..fa09874f38e5 --- /dev/null +++ b/ciphers/README.md @@ -0,0 +1,7 @@ +# Ciphers + +Ciphers are used to protect data from people that are not allowed to have it. They are everywhere on the internet to protect your connections. + +* +* +* diff --git a/compression/README.md b/compression/README.md new file mode 100644 index 000000000000..cf54ea986175 --- /dev/null +++ b/compression/README.md @@ -0,0 +1,10 @@ +# Compression + +Data compression is everywhere, you need it to store data without taking too much space. +Either the compression lose some data (then we talk about lossy compression, such as .jpg) or it does not (and then it is lossless compression, such as .png) + +Lossless compression is mainly used for archive purpose as it allow storing data without losing information about the file archived. On the other hand, lossy compression is used for transfer of file where quality isn't necessarily what is required (i.e: images on Twitter). + +* +* +* diff --git a/computer_vision/README.md b/computer_vision/README.md index 94ee493086cc..8d2f4a130d05 100644 --- a/computer_vision/README.md +++ b/computer_vision/README.md @@ -1,7 +1,11 @@ -### Computer Vision +# Computer Vision + +Computer vision is a field of computer science that works on enabling computers to see, identify and process images in the same way that human does, and provide appropriate output. -Computer vision is a field of computer science that works on enabling computers to see, -identify and process images in the same way that human vision does, and then provide appropriate output. It is like imparting human intelligence and instincts to a computer. Image processing and computer vision are a little different from each other. Image processing means applying some algorithms for transforming image from one form to the other like smoothing, contrasting, stretching, etc. + While computer vision comes from modelling image processing using the techniques of machine learning, computer vision applies machine learning to recognize patterns for interpretation of images (much like the process of visual reasoning of human vision). + +* +* diff --git a/conversions/README.md b/conversions/README.md new file mode 100644 index 000000000000..ec3d931fd828 --- /dev/null +++ b/conversions/README.md @@ -0,0 +1,6 @@ +# Conversion + +Conversion programs convert a type of data, a number from a numerical base or unit into one of another type, base or unit, e.g. binary to decimal, integer to string or foot to meters. + +* +* From 9b9405fdcdc5ba9fc16a53f8ae6be081d4f5582a Mon Sep 17 00:00:00 2001 From: Bartechnika <48760796+Bartechnika@users.noreply.github.com> Date: Sat, 13 Nov 2021 09:32:44 +0000 Subject: [PATCH 0411/1543] Add new persistence algorithm (#4751) * Created new persistence algorithm * Update persistence.py * Added another persistence function --- maths/persistence.py | 82 ++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 82 insertions(+) create mode 100644 maths/persistence.py diff --git a/maths/persistence.py b/maths/persistence.py new file mode 100644 index 000000000000..607641e67200 --- /dev/null +++ b/maths/persistence.py @@ -0,0 +1,82 @@ +def multiplicative_persistence(num: int) -> int: + """ + Return the persistence of a given number. + + https://en.wikipedia.org/wiki/Persistence_of_a_number + + >>> multiplicative_persistence(217) + 2 + >>> multiplicative_persistence(-1) + Traceback (most recent call last): + ... + ValueError: multiplicative_persistence() does not accept negative values + >>> multiplicative_persistence("long number") + Traceback (most recent call last): + ... + ValueError: multiplicative_persistence() only accepts integral values + """ + + if not isinstance(num, int): + raise ValueError("multiplicative_persistence() only accepts integral values") + if num < 0: + raise ValueError("multiplicative_persistence() does not accept negative values") + + steps = 0 + num_string = str(num) + + while len(num_string) != 1: + numbers = [int(i) for i in num_string] + + total = 1 + for i in range(0, len(numbers)): + total *= numbers[i] + + num_string = str(total) + + steps += 1 + return steps + + +def additive_persistence(num: int) -> int: + """ + Return the persistence of a given number. + + https://en.wikipedia.org/wiki/Persistence_of_a_number + + >>> additive_persistence(199) + 3 + >>> additive_persistence(-1) + Traceback (most recent call last): + ... + ValueError: additive_persistence() does not accept negative values + >>> additive_persistence("long number") + Traceback (most recent call last): + ... + ValueError: additive_persistence() only accepts integral values + """ + + if not isinstance(num, int): + raise ValueError("additive_persistence() only accepts integral values") + if num < 0: + raise ValueError("additive_persistence() does not accept negative values") + + steps = 0 + num_string = str(num) + + while len(num_string) != 1: + numbers = [int(i) for i in num_string] + + total = 0 + for i in range(0, len(numbers)): + total += numbers[i] + + num_string = str(total) + + steps += 1 + return steps + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From 551c65766d1594ecd7e127ac288ed23b9e89b8c3 Mon Sep 17 00:00:00 2001 From: Andrew Grangaard Date: Tue, 16 Nov 2021 06:01:17 -0800 Subject: [PATCH 0412/1543] [Mypy] fix other/least_recently_used (#5814) * makes LRUCache constructor concrete * fixes bug in dq_removal in other/least_recently_used + deque.remove() operates by value not index * [mypy] Annotates other/least_recently_used over generic type + clean-up: rename key_reference to match type. * [mypy] updates example to demonstrate LRUCache with complex type * Adds doctest to other/least_recently_used * mypy.ini: Remove exclude = (other/least_recently_used.py) * Various mypy configs * Delete mypy.ini * Add mypy to .pre-commit-config.yaml * mypy --ignore-missing-imports --install-types --non-interactive . * mypy v0.910 * Pillow=8.3.7 * Pillow==8.3.7 * Pillow==8.3.2 * Update .pre-commit-config.yaml * Update requirements.txt * Update pre-commit.yml * --install-types # See mirrors-mypy README.md Co-authored-by: Christian Clauss --- .github/workflows/build.yml | 2 +- .github/workflows/pre-commit.yml | 2 + .pre-commit-config.yaml | 23 +++++++---- mypy.ini | 5 --- other/least_recently_used.py | 70 +++++++++++++++++++++++--------- requirements.txt | 1 - 6 files changed, 68 insertions(+), 35 deletions(-) delete mode 100644 mypy.ini diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index e5f8d6b39a7b..df000cda5997 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -21,7 +21,7 @@ jobs: run: | python -m pip install --upgrade pip setuptools six wheel python -m pip install mypy pytest-cov -r requirements.txt - - run: mypy . # See `mypy.ini` for configuration settings. + - run: mypy --ignore-missing-imports --install-types --non-interactive . - name: Run tests run: pytest --doctest-modules --ignore=project_euler/ --ignore=scripts/validate_solutions.py --cov-report=term-missing:skip-covered --cov=. . - if: ${{ success() }} diff --git a/.github/workflows/pre-commit.yml b/.github/workflows/pre-commit.yml index 27a5a97c0b6c..19196098b1c1 100644 --- a/.github/workflows/pre-commit.yml +++ b/.github/workflows/pre-commit.yml @@ -14,6 +14,8 @@ jobs: ~/.cache/pip key: ${{ runner.os }}-pre-commit-${{ hashFiles('.pre-commit-config.yaml') }} - uses: actions/setup-python@v2 + with: + python-version: 3.9 - uses: psf/black@21.4b0 - name: Install pre-commit run: | diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index e60003051365..0ebd6dfa0d7e 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -12,22 +12,26 @@ repos: data_structures/heap/binomial_heap.py )$ - id: requirements-txt-fixer + - repo: https://github.com/psf/black rev: 21.4b0 hooks: - id: black + - repo: https://github.com/PyCQA/isort rev: 5.8.0 hooks: - id: isort args: - --profile=black + - repo: https://github.com/asottile/pyupgrade rev: v2.29.0 hooks: - id: pyupgrade args: - --py39-plus + - repo: https://gitlab.com/pycqa/flake8 rev: 3.9.1 hooks: @@ -36,13 +40,16 @@ repos: - --ignore=E203,W503 - --max-complexity=25 - --max-line-length=88 -# FIXME: fix mypy errors and then uncomment this -# - repo: https://github.com/pre-commit/mirrors-mypy -# rev: v0.782 -# hooks: -# - id: mypy -# args: -# - --ignore-missing-imports + + - repo: https://github.com/pre-commit/mirrors-mypy + rev: v0.910 + hooks: + - id: mypy + args: + - --ignore-missing-imports + - --install-types # See mirrors-mypy README.md + - --non-interactive + - repo: https://github.com/codespell-project/codespell rev: v2.0.0 hooks: @@ -50,13 +57,13 @@ repos: args: - --ignore-words-list=ans,crate,fo,followings,hist,iff,mater,secant,som,tim - --skip="./.*,./strings/dictionary.txt,./strings/words.txt,./project_euler/problem_022/p022_names.txt" - - --quiet-level=2 exclude: | (?x)^( strings/dictionary.txt | strings/words.txt | project_euler/problem_022/p022_names.txt )$ + - repo: local hooks: - id: validate-filenames diff --git a/mypy.ini b/mypy.ini deleted file mode 100644 index 7dbc7c4ffc80..000000000000 --- a/mypy.ini +++ /dev/null @@ -1,5 +0,0 @@ -[mypy] -ignore_missing_imports = True -install_types = True -non_interactive = True -exclude = (other/least_recently_used.py) diff --git a/other/least_recently_used.py b/other/least_recently_used.py index 9d6b6d7cb6a6..cb692bb1b1c0 100644 --- a/other/least_recently_used.py +++ b/other/least_recently_used.py @@ -1,20 +1,45 @@ +from __future__ import annotations + import sys from collections import deque +from typing import Generic, TypeVar + +T = TypeVar("T") + + +class LRUCache(Generic[T]): + """ + Page Replacement Algorithm, Least Recently Used (LRU) Caching. + + >>> lru_cache: LRUCache[str | int] = LRUCache(4) + >>> lru_cache.refer("A") + >>> lru_cache.refer(2) + >>> lru_cache.refer(3) + + >>> lru_cache + LRUCache(4) => [3, 2, 'A'] + >>> lru_cache.refer("A") + >>> lru_cache + LRUCache(4) => ['A', 3, 2] -class LRUCache: - """Page Replacement Algorithm, Least Recently Used (LRU) Caching.""" + >>> lru_cache.refer(4) + >>> lru_cache.refer(5) + >>> lru_cache + LRUCache(4) => [5, 4, 'A', 3] - dq_store = object() # Cache store of keys - key_reference_map = object() # References of the keys in cache + """ + + dq_store: deque[T] # Cache store of keys + key_reference: set[T] # References of the keys in cache _MAX_CAPACITY: int = 10 # Maximum capacity of cache - def __init__(self, n: int): + def __init__(self, n: int) -> None: """Creates an empty store and map for the keys. The LRUCache is set the size n. """ self.dq_store = deque() - self.key_reference_map = set() + self.key_reference = set() if not n: LRUCache._MAX_CAPACITY = sys.maxsize elif n < 0: @@ -22,41 +47,46 @@ def __init__(self, n: int): else: LRUCache._MAX_CAPACITY = n - def refer(self, x): + def refer(self, x: T) -> None: """ Looks for a page in the cache store and adds reference to the set. Remove the least recently used key if the store is full. Update store to reflect recent access. """ - if x not in self.key_reference_map: + if x not in self.key_reference: if len(self.dq_store) == LRUCache._MAX_CAPACITY: last_element = self.dq_store.pop() - self.key_reference_map.remove(last_element) + self.key_reference.remove(last_element) else: - index_remove = 0 - for idx, key in enumerate(self.dq_store): - if key == x: - index_remove = idx - break - self.dq_store.remove(index_remove) + self.dq_store.remove(x) self.dq_store.appendleft(x) - self.key_reference_map.add(x) + self.key_reference.add(x) - def display(self): + def display(self) -> None: """ Prints all the elements in the store. """ for k in self.dq_store: print(k) + def __repr__(self) -> str: + return f"LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store)}" + if __name__ == "__main__": - lru_cache = LRUCache(4) - lru_cache.refer(1) + import doctest + + doctest.testmod() + + lru_cache: LRUCache[str | int] = LRUCache(4) + lru_cache.refer("A") lru_cache.refer(2) lru_cache.refer(3) - lru_cache.refer(1) + lru_cache.refer("A") lru_cache.refer(4) lru_cache.refer(5) lru_cache.display() + + print(lru_cache) + assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]" diff --git a/requirements.txt b/requirements.txt index e01d87cffabe..9a26dcc21f36 100644 --- a/requirements.txt +++ b/requirements.txt @@ -16,5 +16,4 @@ sympy tensorflow texttable tweepy -types-requests xgboost From d848bfbf3229f2a3240a298a583f6b80a9efc1fd Mon Sep 17 00:00:00 2001 From: Navaneeth Sharma <63489382+Navaneeth-Sharma@users.noreply.github.com> Date: Wed, 17 Nov 2021 04:28:47 +0530 Subject: [PATCH 0413/1543] Adding Pooling Algorithms (#5826) * adding pooling algorithms * pooling.py: Adding pooling algorithms to computer vision pull_number= * pooling.py: Adding pooling algorithms to computer vision * pooling_functions.py: Adding pooling algorithms to computer vision * pooling.py: Adding Pooling Algorithms * pooling_functions.py Add and Update * Update pooling_functions.py * Update computer_vision/pooling_functions.py Co-authored-by: Christian Clauss * Update computer_vision/pooling_functions.py Co-authored-by: Christian Clauss * Update computer_vision/pooling_functions.py Co-authored-by: Christian Clauss * Update computer_vision/pooling_functions.py Co-authored-by: Christian Clauss * Update pooling_functions.py * Formatting pooling.py Co-authored-by: Christian Clauss --- computer_vision/pooling_functions.py | 135 +++++++++++++++++++++++++++ 1 file changed, 135 insertions(+) create mode 100644 computer_vision/pooling_functions.py diff --git a/computer_vision/pooling_functions.py b/computer_vision/pooling_functions.py new file mode 100644 index 000000000000..09beabcba82d --- /dev/null +++ b/computer_vision/pooling_functions.py @@ -0,0 +1,135 @@ +# Source : https://computersciencewiki.org/index.php/Max-pooling_/_Pooling +# Importing the libraries +import numpy as np +from PIL import Image + + +# Maxpooling Function +def maxpooling(arr: np.ndarray, size: int, stride: int) -> np.ndarray: + """ + This function is used to perform maxpooling on the input array of 2D matrix(image) + Args: + arr: numpy array + size: size of pooling matrix + stride: the number of pixels shifts over the input matrix + Returns: + numpy array of maxpooled matrix + Sample Input Output: + >>> maxpooling([[1,2,3,4],[5,6,7,8],[9,10,11,12],[13,14,15,16]], 2, 2) + array([[ 6., 8.], + [14., 16.]]) + >>> maxpooling([[147, 180, 122],[241, 76, 32],[126, 13, 157]], 2, 1) + array([[241., 180.], + [241., 157.]]) + """ + arr = np.array(arr) + if arr.shape[0] != arr.shape[1]: + raise ValueError("The input array is not a square matrix") + i = 0 + j = 0 + mat_i = 0 + mat_j = 0 + + # compute the shape of the output matrix + maxpool_shape = (arr.shape[0] - size) // stride + 1 + # initialize the output matrix with zeros of shape maxpool_shape + updated_arr = np.zeros((maxpool_shape, maxpool_shape)) + + while i < arr.shape[0]: + if i + size > arr.shape[0]: + # if the end of the matrix is reached, break + break + while j < arr.shape[1]: + # if the end of the matrix is reached, break + if j + size > arr.shape[1]: + break + # compute the maximum of the pooling matrix + updated_arr[mat_i][mat_j] = np.max(arr[i : i + size, j : j + size]) + # shift the pooling matrix by stride of column pixels + j += stride + mat_j += 1 + + # shift the pooling matrix by stride of row pixels + i += stride + mat_i += 1 + + # reset the column index to 0 + j = 0 + mat_j = 0 + + return updated_arr + + +# Averagepooling Function +def avgpooling(arr: np.ndarray, size: int, stride: int) -> np.ndarray: + """ + This function is used to perform avgpooling on the input array of 2D matrix(image) + Args: + arr: numpy array + size: size of pooling matrix + stride: the number of pixels shifts over the input matrix + Returns: + numpy array of avgpooled matrix + Sample Input Output: + >>> avgpooling([[1,2,3,4],[5,6,7,8],[9,10,11,12],[13,14,15,16]], 2, 2) + array([[ 3., 5.], + [11., 13.]]) + >>> avgpooling([[147, 180, 122],[241, 76, 32],[126, 13, 157]], 2, 1) + array([[161., 102.], + [114., 69.]]) + """ + arr = np.array(arr) + if arr.shape[0] != arr.shape[1]: + raise ValueError("The input array is not a square matrix") + i = 0 + j = 0 + mat_i = 0 + mat_j = 0 + + # compute the shape of the output matrix + avgpool_shape = (arr.shape[0] - size) // stride + 1 + # initialize the output matrix with zeros of shape avgpool_shape + updated_arr = np.zeros((avgpool_shape, avgpool_shape)) + + while i < arr.shape[0]: + # if the end of the matrix is reached, break + if i + size > arr.shape[0]: + break + while j < arr.shape[1]: + # if the end of the matrix is reached, break + if j + size > arr.shape[1]: + break + # compute the average of the pooling matrix + updated_arr[mat_i][mat_j] = int(np.average(arr[i : i + size, j : j + size])) + # shift the pooling matrix by stride of column pixels + j += stride + mat_j += 1 + + # shift the pooling matrix by stride of row pixels + i += stride + mat_i += 1 + # reset the column index to 0 + j = 0 + mat_j = 0 + + return updated_arr + + +# Main Function +if __name__ == "__main__": + from doctest import testmod + + testmod(name="avgpooling", verbose=True) + + # Loading the image + image = Image.open("path_to_image") + + # Converting the image to numpy array and maxpooling, displaying the result + # Ensure that the image is a square matrix + + Image.fromarray(maxpooling(np.array(image), size=3, stride=2)).show() + + # Converting the image to numpy array and averagepooling, displaying the result + # Ensure that the image is a square matrix + + Image.fromarray(avgpooling(np.array(image), size=3, stride=2)).show() From 1ae5abfc3ca5dcf89b7e378735ceb9ef41769cbf Mon Sep 17 00:00:00 2001 From: Christian Clauss Date: Wed, 17 Nov 2021 04:43:02 +0100 Subject: [PATCH 0414/1543] Replace typing.optional with new annotations syntax (#5829) * Replace typing.optional with new annotations syntax * updating DIRECTORY.md Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- DIRECTORY.md | 1 + data_structures/linked_list/__init__.py | 5 +++-- data_structures/linked_list/circular_linked_list.py | 6 ++++-- data_structures/linked_list/has_loop.py | 6 ++++-- .../linked_list/middle_element_of_linked_list.py | 4 ++-- maths/pollard_rho.py | 5 +++-- 6 files changed, 17 insertions(+), 10 deletions(-) diff --git a/DIRECTORY.md b/DIRECTORY.md index c46d81ab75bc..16244e6ad08e 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -525,6 +525,7 @@ * [Perfect Cube](https://github.com/TheAlgorithms/Python/blob/master/maths/perfect_cube.py) * [Perfect Number](https://github.com/TheAlgorithms/Python/blob/master/maths/perfect_number.py) * [Perfect Square](https://github.com/TheAlgorithms/Python/blob/master/maths/perfect_square.py) + * [Persistence](https://github.com/TheAlgorithms/Python/blob/master/maths/persistence.py) * [Pi Monte Carlo Estimation](https://github.com/TheAlgorithms/Python/blob/master/maths/pi_monte_carlo_estimation.py) * [Pollard Rho](https://github.com/TheAlgorithms/Python/blob/master/maths/pollard_rho.py) * [Polynomial Evaluation](https://github.com/TheAlgorithms/Python/blob/master/maths/polynomial_evaluation.py) diff --git a/data_structures/linked_list/__init__.py b/data_structures/linked_list/__init__.py index 8ae171d71035..6ba660231ae1 100644 --- a/data_structures/linked_list/__init__.py +++ b/data_structures/linked_list/__init__.py @@ -5,8 +5,9 @@ head node gives us access of the complete list - Last node: points to null """ +from __future__ import annotations -from typing import Any, Optional +from typing import Any class Node: @@ -17,7 +18,7 @@ def __init__(self, item: Any, next: Any) -> None: class LinkedList: def __init__(self) -> None: - self.head: Optional[Node] = None + self.head: Node | None = None self.size = 0 def add(self, item: Any) -> None: diff --git a/data_structures/linked_list/circular_linked_list.py b/data_structures/linked_list/circular_linked_list.py index 42794ba793a7..121d934c6957 100644 --- a/data_structures/linked_list/circular_linked_list.py +++ b/data_structures/linked_list/circular_linked_list.py @@ -1,10 +1,12 @@ -from typing import Any, Iterator, Optional +from __future__ import annotations + +from typing import Any, Iterator class Node: def __init__(self, data: Any): self.data: Any = data - self.next: Optional[Node] = None + self.next: Node | None = None class CircularLinkedList: diff --git a/data_structures/linked_list/has_loop.py b/data_structures/linked_list/has_loop.py index a155ab4c7c89..bc06ffe150e8 100644 --- a/data_structures/linked_list/has_loop.py +++ b/data_structures/linked_list/has_loop.py @@ -1,4 +1,6 @@ -from typing import Any, Optional +from __future__ import annotations + +from typing import Any class ContainsLoopError(Exception): @@ -8,7 +10,7 @@ class ContainsLoopError(Exception): class Node: def __init__(self, data: Any) -> None: self.data: Any = data - self.next_node: Optional[Node] = None + self.next_node: Node | None = None def __iter__(self): node = self diff --git a/data_structures/linked_list/middle_element_of_linked_list.py b/data_structures/linked_list/middle_element_of_linked_list.py index 296696897715..0c6250f3b731 100644 --- a/data_structures/linked_list/middle_element_of_linked_list.py +++ b/data_structures/linked_list/middle_element_of_linked_list.py @@ -1,4 +1,4 @@ -from typing import Optional +from __future__ import annotations class Node: @@ -17,7 +17,7 @@ def push(self, new_data: int) -> int: self.head = new_node return self.head.data - def middle_element(self) -> Optional[int]: + def middle_element(self) -> int | None: """ >>> link = LinkedList() >>> link.middle_element() diff --git a/maths/pollard_rho.py b/maths/pollard_rho.py index df020c63f2f9..0fc80cd4280b 100644 --- a/maths/pollard_rho.py +++ b/maths/pollard_rho.py @@ -1,5 +1,6 @@ +from __future__ import annotations + from math import gcd -from typing import Union def pollard_rho( @@ -7,7 +8,7 @@ def pollard_rho( seed: int = 2, step: int = 1, attempts: int = 3, -) -> Union[int, None]: +) -> int | None: """ Use Pollard's Rho algorithm to return a nontrivial factor of ``num``. The returned factor may be composite and require further factorization. From f4a16f607b996b030347fadf011b57320b5624b2 Mon Sep 17 00:00:00 2001 From: Goodness Ezeh <88127727+GoodnessEzeh@users.noreply.github.com> Date: Wed, 24 Nov 2021 23:23:44 +0900 Subject: [PATCH 0415/1543] Lowercase g --> Capital G (#5845) * Updated the comments in the code * Update .gitignore * Update .gitignore * Update .gitignore Co-authored-by: Christian Clauss --- other/password_generator.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/other/password_generator.py b/other/password_generator.py index cf7250c814ff..c09afd7e6125 100644 --- a/other/password_generator.py +++ b/other/password_generator.py @@ -1,4 +1,4 @@ -"""Password generator allows you to generate a random password of length N.""" +"""Password Generator allows you to generate a random password of length N.""" from random import choice, shuffle from string import ascii_letters, digits, punctuation @@ -24,7 +24,7 @@ def password_generator(length=8): # ctbi= characters that must be in password # i= how many letters or characters the password length will be def alternative_password_generator(ctbi, i): - # Password generator = full boot with random_number, random_letters, and + # Password Generator = full boot with random_number, random_letters, and # random_character FUNCTIONS # Put your code here... i = i - len(ctbi) From 65d3cfff2fb6f75999fff7918bccb463593f939d Mon Sep 17 00:00:00 2001 From: Jaydeep Das Date: Sun, 28 Nov 2021 23:50:18 +0530 Subject: [PATCH 0416/1543] Added memoization function in fibonacci (#5856) * Added memoization function in fibonacci * Minor changes --- maths/fibonacci.py | 48 ++++++++++++++++++++++++++++++++++++++++++---- 1 file changed, 44 insertions(+), 4 deletions(-) diff --git a/maths/fibonacci.py b/maths/fibonacci.py index 9b193b74a827..ca4f4a2360a3 100644 --- a/maths/fibonacci.py +++ b/maths/fibonacci.py @@ -1,13 +1,19 @@ # fibonacci.py """ -Calculates the Fibonacci sequence using iteration, recursion, and a simplified -form of Binet's formula +Calculates the Fibonacci sequence using iteration, recursion, memoization, +and a simplified form of Binet's formula -NOTE 1: the iterative and recursive functions are more accurate than the Binet's -formula function because the iterative function doesn't use floats +NOTE 1: the iterative, recursive, memoization functions are more accurate than +the Binet's formula function because the Binet formula function uses floats NOTE 2: the Binet's formula function is much more limited in the size of inputs that it can handle due to the size limitations of Python floats + +RESULTS: (n = 20) +fib_iterative runtime: 0.0055 ms +fib_recursive runtime: 6.5627 ms +fib_memoization runtime: 0.0107 ms +fib_binet runtime: 0.0174 ms """ from math import sqrt @@ -86,6 +92,39 @@ def fib_recursive_term(i: int) -> int: return [fib_recursive_term(i) for i in range(n + 1)] +def fib_memoization(n: int) -> list[int]: + """ + Calculates the first n (0-indexed) Fibonacci numbers using memoization + >>> fib_memoization(0) + [0] + >>> fib_memoization(1) + [0, 1] + >>> fib_memoization(5) + [0, 1, 1, 2, 3, 5] + >>> fib_memoization(10) + [0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55] + >>> fib_iterative(-1) + Traceback (most recent call last): + ... + Exception: n is negative + """ + if n < 0: + raise Exception("n is negative") + # Cache must be outside recursuive function + # other it will reset every time it calls itself. + cache: dict[int, int] = {0: 0, 1: 1, 2: 1} # Prefilled cache + + def rec_fn_memoized(num: int) -> int: + if num in cache: + return cache[num] + + value = rec_fn_memoized(num - 1) + rec_fn_memoized(num - 2) + cache[num] = value + return value + + return [rec_fn_memoized(i) for i in range(n + 1)] + + def fib_binet(n: int) -> list[int]: """ Calculates the first n (0-indexed) Fibonacci numbers using a simplified form @@ -127,4 +166,5 @@ def fib_binet(n: int) -> list[int]: num = 20 time_func(fib_iterative, num) time_func(fib_recursive, num) + time_func(fib_memoization, num) time_func(fib_binet, num) From 6680e435a7983c3691f2bb9399e675cc5dc632db Mon Sep 17 00:00:00 2001 From: yellowsto <79023119+yellowsto@users.noreply.github.com> Date: Thu, 16 Dec 2021 10:27:15 +0100 Subject: [PATCH 0417/1543] Update merge_insertion_sort.py (#5833) * Update merge_insertion_sort.py Fixes #5774 merge_insertion_sort Co-Authored-By: AilisOsswald <44617437+AilisOsswald@users.noreply.github.com> * Update merge_insertion_sort.py Fixes #5774 merge_insertion_sort Co-Authored-By: AilisOsswald <44617437+AilisOsswald@users.noreply.github.com> * Update merge_insertion_sort.py Fixes #5774 added permutation range from 0 to 4 Co-Authored-By: AilisOsswald <44617437+AilisOsswald@users.noreply.github.com> * Use `all()` Co-authored-by: AilisOsswald <44617437+AilisOsswald@users.noreply.github.com> Co-authored-by: John Law --- sorts/merge_insertion_sort.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/sorts/merge_insertion_sort.py b/sorts/merge_insertion_sort.py index fb71d84a3c14..ecaa535457f4 100644 --- a/sorts/merge_insertion_sort.py +++ b/sorts/merge_insertion_sort.py @@ -30,6 +30,12 @@ def merge_insertion_sort(collection: list[int]) -> list[int]: >>> merge_insertion_sort([-2, -5, -45]) [-45, -5, -2] + + Testing with all permutations on range(0,5): + >>> import itertools + >>> permutations = list(itertools.permutations([0, 1, 2, 3, 4])) + >>> all(merge_insertion_sort(p) == [0, 1, 2, 3, 4] for p in permutations) + True """ def binary_search_insertion(sorted_list, item): @@ -160,7 +166,7 @@ def merge(left, right): """ is_last_odd_item_inserted_before_this_index = False for i in range(len(sorted_list_2d) - 1): - if result[i] == collection[-i]: + if result[i] == collection[-1] and has_last_odd_item: is_last_odd_item_inserted_before_this_index = True pivot = sorted_list_2d[i][1] # If last_odd_item is inserted before the item's index, From 9af2eef9b3761bf51580dedfb6fa7136ca0c5c2c Mon Sep 17 00:00:00 2001 From: RenatoLopes771 <52989307+RenatoLopes771@users.noreply.github.com> Date: Thu, 16 Dec 2021 06:28:31 -0300 Subject: [PATCH 0418/1543] =?UTF-8?q?Improve=20Quine=E2=80=93McCluskey=20a?= =?UTF-8?q?lgorithm=20(#4935)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Syntax improvements (I hope) to boolean algebra * Reverted certain index variables to i * remove extra line on decimal_to_binary * Update quine_mc_cluskey.py Co-authored-by: John Law --- boolean_algebra/quine_mc_cluskey.py | 44 ++++++++++++++--------------- 1 file changed, 21 insertions(+), 23 deletions(-) diff --git a/boolean_algebra/quine_mc_cluskey.py b/boolean_algebra/quine_mc_cluskey.py index 0342e5c67753..fb23c8c2e79c 100644 --- a/boolean_algebra/quine_mc_cluskey.py +++ b/boolean_algebra/quine_mc_cluskey.py @@ -1,5 +1,7 @@ from __future__ import annotations +from typing import Sequence + def compare_string(string1: str, string2: str) -> str: """ @@ -9,17 +11,17 @@ def compare_string(string1: str, string2: str) -> str: >>> compare_string('0110','1101') 'X' """ - l1 = list(string1) - l2 = list(string2) + list1 = list(string1) + list2 = list(string2) count = 0 - for i in range(len(l1)): - if l1[i] != l2[i]: + for i in range(len(list1)): + if list1[i] != list2[i]: count += 1 - l1[i] = "_" + list1[i] = "_" if count > 1: return "X" else: - return "".join(l1) + return "".join(list1) def check(binary: list[str]) -> list[str]: @@ -28,7 +30,7 @@ def check(binary: list[str]) -> list[str]: ['0.00.01.5'] """ pi = [] - while 1: + while True: check1 = ["$"] * len(binary) temp = [] for i in range(len(binary)): @@ -46,19 +48,18 @@ def check(binary: list[str]) -> list[str]: binary = list(set(temp)) -def decimal_to_binary(no_of_variable: int, minterms: list[float]) -> list[str]: +def decimal_to_binary(no_of_variable: int, minterms: Sequence[float]) -> list[str]: """ >>> decimal_to_binary(3,[1.5]) ['0.00.01.5'] """ temp = [] - s = "" - for m in minterms: + for minterm in minterms: + string = "" for i in range(no_of_variable): - s = str(m % 2) + s - m //= 2 - temp.append(s) - s = "" + string = str(minterm % 2) + string + minterm //= 2 + temp.append(string) return temp @@ -70,16 +71,13 @@ def is_for_table(string1: str, string2: str, count: int) -> bool: >>> is_for_table('01_','001',1) False """ - l1 = list(string1) - l2 = list(string2) + list1 = list(string1) + list2 = list(string2) count_n = 0 - for i in range(len(l1)): - if l1[i] != l2[i]: + for i in range(len(list1)): + if list1[i] != list2[i]: count_n += 1 - if count_n == count: - return True - else: - return False + return count_n == count def selection(chart: list[list[int]], prime_implicants: list[str]) -> list[str]: @@ -108,7 +106,7 @@ def selection(chart: list[list[int]], prime_implicants: list[str]) -> list[str]: for k in range(len(chart)): chart[k][j] = 0 temp.append(prime_implicants[i]) - while 1: + while True: max_n = 0 rem = -1 count_n = 0 From 7423875cef469c57abdb2be2c2b1b39490141d89 Mon Sep 17 00:00:00 2001 From: Michael Currin <18750745+MichaelCurrin@users.noreply.github.com> Date: Wed, 26 Jan 2022 17:35:51 +0200 Subject: [PATCH 0419/1543] ci: add mkdir step for mypy (#5927) * ci: add mkdir step for mypy * updating DIRECTORY.md Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- .github/workflows/build.yml | 4 +++- DIRECTORY.md | 1 + 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index df000cda5997..4f270ea55d17 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -21,7 +21,9 @@ jobs: run: | python -m pip install --upgrade pip setuptools six wheel python -m pip install mypy pytest-cov -r requirements.txt - - run: mypy --ignore-missing-imports --install-types --non-interactive . + - run: | + mkdir -p .mypy_cache + mypy --ignore-missing-imports --install-types --non-interactive . - name: Run tests run: pytest --doctest-modules --ignore=project_euler/ --ignore=scripts/validate_solutions.py --cov-report=term-missing:skip-covered --cov=. . - if: ${{ success() }} diff --git a/DIRECTORY.md b/DIRECTORY.md index 16244e6ad08e..550920c0fc39 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -114,6 +114,7 @@ * [Harris Corner](https://github.com/TheAlgorithms/Python/blob/master/computer_vision/harris_corner.py) * [Mean Threshold](https://github.com/TheAlgorithms/Python/blob/master/computer_vision/mean_threshold.py) * [Mosaic Augmentation](https://github.com/TheAlgorithms/Python/blob/master/computer_vision/mosaic_augmentation.py) + * [Pooling Functions](https://github.com/TheAlgorithms/Python/blob/master/computer_vision/pooling_functions.py) ## Conversions * [Binary To Decimal](https://github.com/TheAlgorithms/Python/blob/master/conversions/binary_to_decimal.py) From c15a4d5af6ade25c967a226877ba567166707aac Mon Sep 17 00:00:00 2001 From: Michael Currin <18750745+MichaelCurrin@users.noreply.github.com> Date: Fri, 28 Jan 2022 08:52:42 +0200 Subject: [PATCH 0420/1543] Refactor currency_converter.py (#5917) * Update currency_converter.py * refactor: add types and remove reserved keyword "from" usage * feat: update text * Update web_programming/currency_converter.py Co-authored-by: xcodz-dot <71920621+xcodz-dot@users.noreply.github.com> * Update web_programming/currency_converter.py Co-authored-by: xcodz-dot <71920621+xcodz-dot@users.noreply.github.com> * fix: update currency_converter.py * updating DIRECTORY.md * Update currency_converter.py Co-authored-by: xcodz-dot <71920621+xcodz-dot@users.noreply.github.com> Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> Co-authored-by: Christian Clauss --- web_programming/currency_converter.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/web_programming/currency_converter.py b/web_programming/currency_converter.py index 447595b0b646..6fcc60e8feeb 100644 --- a/web_programming/currency_converter.py +++ b/web_programming/currency_converter.py @@ -10,9 +10,11 @@ URL_BASE = "https://www.amdoren.com/api/currency.php" TESTING = os.getenv("CI", False) API_KEY = os.getenv("AMDOREN_API_KEY", "") -if not API_KEY and not TESTING: - raise KeyError("Please put your API key in an environment variable.") +if not API_KEY and not TESTING: + raise KeyError( + "API key must be provided in the 'AMDOREN_API_KEY' environment variable." + ) # Currency and their description list_of_currencies = """ From 24d3cf82445e3f4a2e5287829395a3bf1353a8a3 Mon Sep 17 00:00:00 2001 From: Christian Clauss Date: Sun, 30 Jan 2022 20:29:54 +0100 Subject: [PATCH 0421/1543] The black formatter is no longer beta (#5960) * The black formatter is no longer beta * pre-commit autoupdate * pre-commit autoupdate * Remove project_euler/problem_145 which is killing our CI tests * updating DIRECTORY.md Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- .pre-commit-config.yaml | 16 ++-- DIRECTORY.md | 2 - arithmetic_analysis/bisection.py | 4 +- arithmetic_analysis/in_static_equilibrium.py | 2 +- arithmetic_analysis/intersection.py | 2 +- arithmetic_analysis/newton_method.py | 6 +- arithmetic_analysis/newton_raphson.py | 2 +- ciphers/deterministic_miller_rabin.py | 2 +- ciphers/rabin_miller.py | 2 +- ciphers/rsa_cipher.py | 4 +- ciphers/rsa_factorization.py | 2 +- computer_vision/harris_corner.py | 8 +- .../filters/gabor_filter.py | 2 +- digital_image_processing/index_calculation.py | 6 +- electronics/carrier_concentration.py | 4 +- electronics/coulombs_law.py | 6 +- graphics/bezier_curve.py | 2 +- graphs/bidirectional_a_star.py | 2 +- hashes/chaos_machine.py | 4 +- hashes/hamming_code.py | 2 +- hashes/md5.py | 6 +- linear_algebra/src/lib.py | 2 +- machine_learning/k_means_clust.py | 2 +- .../local_weighted_learning.py | 2 +- .../sequential_minimum_optimization.py | 8 +- maths/area.py | 12 +-- maths/area_under_curve.py | 2 +- maths/armstrong_numbers.py | 4 +- maths/basic_maths.py | 4 +- maths/binomial_distribution.py | 2 +- maths/fibonacci.py | 2 +- maths/gaussian.py | 2 +- maths/karatsuba.py | 4 +- maths/monte_carlo.py | 2 +- maths/numerical_integration.py | 2 +- maths/perfect_square.py | 4 +- maths/pi_monte_carlo_estimation.py | 2 +- maths/polynomial_evaluation.py | 2 +- maths/radix2_fft.py | 2 +- maths/segmented_sieve.py | 2 +- maths/sum_of_geometric_progression.py | 2 +- physics/n_body_simulation.py | 6 +- project_euler/problem_006/sol1.py | 4 +- project_euler/problem_007/sol2.py | 2 +- project_euler/problem_009/sol1.py | 4 +- project_euler/problem_010/sol3.py | 2 +- project_euler/problem_016/sol1.py | 4 +- project_euler/problem_016/sol2.py | 2 +- project_euler/problem_023/sol1.py | 2 +- project_euler/problem_027/sol1.py | 2 +- project_euler/problem_028/sol1.py | 2 +- project_euler/problem_029/sol1.py | 2 +- project_euler/problem_048/sol1.py | 2 +- project_euler/problem_050/sol1.py | 2 +- project_euler/problem_051/sol1.py | 2 +- project_euler/problem_056/sol1.py | 2 +- project_euler/problem_062/sol1.py | 2 +- project_euler/problem_063/sol1.py | 2 +- project_euler/problem_064/sol1.py | 2 +- project_euler/problem_069/sol1.py | 2 +- project_euler/problem_077/sol1.py | 2 +- project_euler/problem_086/sol1.py | 2 +- project_euler/problem_092/sol1.py | 2 +- project_euler/problem_097/sol1.py | 2 +- project_euler/problem_101/sol1.py | 18 ++-- project_euler/problem_125/sol1.py | 6 +- project_euler/problem_144/sol1.py | 6 +- project_euler/problem_145/__init__.py | 0 project_euler/problem_145/sol1.py | 87 ------------------- project_euler/problem_173/sol1.py | 4 +- project_euler/problem_180/sol1.py | 2 +- project_euler/problem_188/sol1.py | 2 +- project_euler/problem_203/sol1.py | 4 +- project_euler/problem_205/sol1.py | 2 +- project_euler/problem_207/sol1.py | 2 +- project_euler/problem_234/sol1.py | 8 +- project_euler/problem_301/sol1.py | 4 +- project_euler/problem_551/sol1.py | 6 +- quantum/deutsch_jozsa.py | 2 +- searches/hill_climbing.py | 4 +- searches/simulated_annealing.py | 4 +- 81 files changed, 139 insertions(+), 228 deletions(-) delete mode 100644 project_euler/problem_145/__init__.py delete mode 100644 project_euler/problem_145/sol1.py diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 0ebd6dfa0d7e..33069a807cee 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,6 +1,6 @@ repos: - repo: https://github.com/pre-commit/pre-commit-hooks - rev: v3.4.0 + rev: v4.1.0 hooks: - id: check-executables-have-shebangs - id: check-yaml @@ -14,26 +14,26 @@ repos: - id: requirements-txt-fixer - repo: https://github.com/psf/black - rev: 21.4b0 + rev: 22.1.0 hooks: - id: black - repo: https://github.com/PyCQA/isort - rev: 5.8.0 + rev: 5.10.1 hooks: - id: isort args: - --profile=black - repo: https://github.com/asottile/pyupgrade - rev: v2.29.0 + rev: v2.31.0 hooks: - id: pyupgrade args: - --py39-plus - repo: https://gitlab.com/pycqa/flake8 - rev: 3.9.1 + rev: 3.9.2 hooks: - id: flake8 args: @@ -42,7 +42,7 @@ repos: - --max-line-length=88 - repo: https://github.com/pre-commit/mirrors-mypy - rev: v0.910 + rev: v0.931 hooks: - id: mypy args: @@ -51,11 +51,11 @@ repos: - --non-interactive - repo: https://github.com/codespell-project/codespell - rev: v2.0.0 + rev: v2.1.0 hooks: - id: codespell args: - - --ignore-words-list=ans,crate,fo,followings,hist,iff,mater,secant,som,tim + - --ignore-words-list=ans,crate,fo,followings,hist,iff,mater,secant,som,sur,tim - --skip="./.*,./strings/dictionary.txt,./strings/words.txt,./project_euler/problem_022/p022_names.txt" exclude: | (?x)^( diff --git a/DIRECTORY.md b/DIRECTORY.md index 550920c0fc39..b5ddb9fcb156 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -856,8 +856,6 @@ * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_135/sol1.py) * Problem 144 * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_144/sol1.py) - * Problem 145 - * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_145/sol1.py) * Problem 173 * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_173/sol1.py) * Problem 174 diff --git a/arithmetic_analysis/bisection.py b/arithmetic_analysis/bisection.py index 0ef691678702..1feb4a8cf626 100644 --- a/arithmetic_analysis/bisection.py +++ b/arithmetic_analysis/bisection.py @@ -32,7 +32,7 @@ def bisection(function: Callable[[float], float], a: float, b: float) -> float: raise ValueError("could not find root in given interval.") else: mid: float = start + (end - start) / 2.0 - while abs(start - mid) > 10 ** -7: # until precisely equals to 10^-7 + while abs(start - mid) > 10**-7: # until precisely equals to 10^-7 if function(mid) == 0: return mid elif function(mid) * function(start) < 0: @@ -44,7 +44,7 @@ def bisection(function: Callable[[float], float], a: float, b: float) -> float: def f(x: float) -> float: - return x ** 3 - 2 * x - 5 + return x**3 - 2 * x - 5 if __name__ == "__main__": diff --git a/arithmetic_analysis/in_static_equilibrium.py b/arithmetic_analysis/in_static_equilibrium.py index 6e8d1d043036..6fe84b45475c 100644 --- a/arithmetic_analysis/in_static_equilibrium.py +++ b/arithmetic_analysis/in_static_equilibrium.py @@ -23,7 +23,7 @@ def polar_force( def in_static_equilibrium( - forces: ndarray, location: ndarray, eps: float = 10 ** -1 + forces: ndarray, location: ndarray, eps: float = 10**-1 ) -> bool: """ Check if a system is in equilibrium. diff --git a/arithmetic_analysis/intersection.py b/arithmetic_analysis/intersection.py index 204dd5d8a935..9d4651144668 100644 --- a/arithmetic_analysis/intersection.py +++ b/arithmetic_analysis/intersection.py @@ -35,7 +35,7 @@ def intersection(function: Callable[[float], float], x0: float, x1: float) -> fl x_n2: float = x_n1 - ( function(x_n1) / ((function(x_n1) - function(x_n)) / (x_n1 - x_n)) ) - if abs(x_n2 - x_n1) < 10 ** -5: + if abs(x_n2 - x_n1) < 10**-5: return x_n2 x_n = x_n1 x_n1 = x_n2 diff --git a/arithmetic_analysis/newton_method.py b/arithmetic_analysis/newton_method.py index a9a94372671e..f0cf4eaa6e83 100644 --- a/arithmetic_analysis/newton_method.py +++ b/arithmetic_analysis/newton_method.py @@ -37,17 +37,17 @@ def newton( next_guess = prev_guess - function(prev_guess) / derivative(prev_guess) except ZeroDivisionError: raise ZeroDivisionError("Could not find root") from None - if abs(prev_guess - next_guess) < 10 ** -5: + if abs(prev_guess - next_guess) < 10**-5: return next_guess prev_guess = next_guess def f(x: float) -> float: - return (x ** 3) - (2 * x) - 5 + return (x**3) - (2 * x) - 5 def f1(x: float) -> float: - return 3 * (x ** 2) - 2 + return 3 * (x**2) - 2 if __name__ == "__main__": diff --git a/arithmetic_analysis/newton_raphson.py b/arithmetic_analysis/newton_raphson.py index 1a820538630f..86ff9d350dde 100644 --- a/arithmetic_analysis/newton_raphson.py +++ b/arithmetic_analysis/newton_raphson.py @@ -11,7 +11,7 @@ def newton_raphson( - func: str, a: float | Decimal, precision: float = 10 ** -10 + func: str, a: float | Decimal, precision: float = 10**-10 ) -> float: """Finds root from the point 'a' onwards by Newton-Raphson method >>> newton_raphson("sin(x)", 2) diff --git a/ciphers/deterministic_miller_rabin.py b/ciphers/deterministic_miller_rabin.py index d7fcb67e936c..2191caf630a7 100644 --- a/ciphers/deterministic_miller_rabin.py +++ b/ciphers/deterministic_miller_rabin.py @@ -73,7 +73,7 @@ def miller_rabin(n: int, allow_probable: bool = False) -> bool: for prime in plist: pr = False for r in range(s): - m = pow(prime, d * 2 ** r, n) + m = pow(prime, d * 2**r, n) # see article for analysis explanation for m if (r == 0 and m == 1) or ((m + 1) % n == 0): pr = True diff --git a/ciphers/rabin_miller.py b/ciphers/rabin_miller.py index 65c162984ece..c42ad2f5928d 100644 --- a/ciphers/rabin_miller.py +++ b/ciphers/rabin_miller.py @@ -21,7 +21,7 @@ def rabinMiller(num: int) -> bool: return False else: i = i + 1 - v = (v ** 2) % num + v = (v**2) % num return True diff --git a/ciphers/rsa_cipher.py b/ciphers/rsa_cipher.py index b1e8a73f33c6..5bb9f9916de5 100644 --- a/ciphers/rsa_cipher.py +++ b/ciphers/rsa_cipher.py @@ -29,8 +29,8 @@ def get_text_from_blocks( block_message: list[str] = [] for i in range(block_size - 1, -1, -1): if len(message) + i < message_length: - ascii_number = block_int // (BYTE_SIZE ** i) - block_int = block_int % (BYTE_SIZE ** i) + ascii_number = block_int // (BYTE_SIZE**i) + block_int = block_int % (BYTE_SIZE**i) block_message.insert(0, chr(ascii_number)) message.extend(block_message) return "".join(message) diff --git a/ciphers/rsa_factorization.py b/ciphers/rsa_factorization.py index 6df32b6cc887..de4df27770c7 100644 --- a/ciphers/rsa_factorization.py +++ b/ciphers/rsa_factorization.py @@ -40,7 +40,7 @@ def rsafactor(d: int, e: int, N: int) -> list[int]: while True: if t % 2 == 0: t = t // 2 - x = (g ** t) % N + x = (g**t) % N y = math.gcd(x - 1, N) if x > 1 and y > 1: p = y diff --git a/computer_vision/harris_corner.py b/computer_vision/harris_corner.py index 02deb54084ef..886ff52ea70b 100644 --- a/computer_vision/harris_corner.py +++ b/computer_vision/harris_corner.py @@ -39,8 +39,8 @@ def detect(self, img_path: str) -> tuple[cv2.Mat, list[list[int]]]: color_img = img.copy() color_img = cv2.cvtColor(color_img, cv2.COLOR_GRAY2RGB) dy, dx = np.gradient(img) - ixx = dx ** 2 - iyy = dy ** 2 + ixx = dx**2 + iyy = dy**2 ixy = dx * dy k = 0.04 offset = self.window_size // 2 @@ -56,9 +56,9 @@ def detect(self, img_path: str) -> tuple[cv2.Mat, list[list[int]]]: y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() - det = (wxx * wyy) - (wxy ** 2) + det = (wxx * wyy) - (wxy**2) trace = wxx + wyy - r = det - k * (trace ** 2) + r = det - k * (trace**2) # Can change the value if r > 0.5: corner_list.append([x, y, r]) diff --git a/digital_image_processing/filters/gabor_filter.py b/digital_image_processing/filters/gabor_filter.py index 90aa049c24a0..8f9212a35a79 100644 --- a/digital_image_processing/filters/gabor_filter.py +++ b/digital_image_processing/filters/gabor_filter.py @@ -49,7 +49,7 @@ def gabor_filter_kernel( # fill kernel gabor[y, x] = np.exp( - -(_x ** 2 + gamma ** 2 * _y ** 2) / (2 * sigma ** 2) + -(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi) return gabor diff --git a/digital_image_processing/index_calculation.py b/digital_image_processing/index_calculation.py index 4350b8603390..033334af8a2a 100644 --- a/digital_image_processing/index_calculation.py +++ b/digital_image_processing/index_calculation.py @@ -203,7 +203,7 @@ def CVI(self): https://www.indexdatabase.de/db/i-single.php?id=391 :return: index """ - return self.nir * (self.red / (self.green ** 2)) + return self.nir * (self.red / (self.green**2)) def GLI(self): """ @@ -295,7 +295,7 @@ def ATSAVI(self, X=0.08, a=1.22, b=0.03): """ return a * ( (self.nir - a * self.red - b) - / (a * self.nir + self.red - a * b + X * (1 + a ** 2)) + / (a * self.nir + self.red - a * b + X * (1 + a**2)) ) def BWDRVI(self): @@ -363,7 +363,7 @@ def GEMI(self): https://www.indexdatabase.de/db/i-single.php?id=25 :return: index """ - n = (2 * (self.nir ** 2 - self.red ** 2) + 1.5 * self.nir + 0.5 * self.red) / ( + n = (2 * (self.nir**2 - self.red**2) + 1.5 * self.nir + 0.5 * self.red) / ( self.nir + self.red + 0.5 ) return n * (1 - 0.25 * n) - (self.red - 0.125) / (1 - self.red) diff --git a/electronics/carrier_concentration.py b/electronics/carrier_concentration.py index 87bcad8df398..03482f1e336e 100644 --- a/electronics/carrier_concentration.py +++ b/electronics/carrier_concentration.py @@ -53,12 +53,12 @@ def carrier_concentration( elif electron_conc == 0: return ( "electron_conc", - intrinsic_conc ** 2 / hole_conc, + intrinsic_conc**2 / hole_conc, ) elif hole_conc == 0: return ( "hole_conc", - intrinsic_conc ** 2 / electron_conc, + intrinsic_conc**2 / electron_conc, ) elif intrinsic_conc == 0: return ( diff --git a/electronics/coulombs_law.py b/electronics/coulombs_law.py index e4c8391c9f9a..e41c0410cc9e 100644 --- a/electronics/coulombs_law.py +++ b/electronics/coulombs_law.py @@ -66,13 +66,13 @@ def couloumbs_law( if distance < 0: raise ValueError("Distance cannot be negative") if force == 0: - force = COULOMBS_CONSTANT * charge_product / (distance ** 2) + force = COULOMBS_CONSTANT * charge_product / (distance**2) return {"force": force} elif charge1 == 0: - charge1 = abs(force) * (distance ** 2) / (COULOMBS_CONSTANT * charge2) + charge1 = abs(force) * (distance**2) / (COULOMBS_CONSTANT * charge2) return {"charge1": charge1} elif charge2 == 0: - charge2 = abs(force) * (distance ** 2) / (COULOMBS_CONSTANT * charge1) + charge2 = abs(force) * (distance**2) / (COULOMBS_CONSTANT * charge1) return {"charge2": charge2} elif distance == 0: distance = (COULOMBS_CONSTANT * charge_product / abs(force)) ** 0.5 diff --git a/graphics/bezier_curve.py b/graphics/bezier_curve.py index 2bb764fdc916..7c22329ad8b4 100644 --- a/graphics/bezier_curve.py +++ b/graphics/bezier_curve.py @@ -40,7 +40,7 @@ def basis_function(self, t: float) -> list[float]: for i in range(len(self.list_of_points)): # basis function for each i output_values.append( - comb(self.degree, i) * ((1 - t) ** (self.degree - i)) * (t ** i) + comb(self.degree, i) * ((1 - t) ** (self.degree - i)) * (t**i) ) # the basis must sum up to 1 for it to produce a valid Bezier curve. assert round(sum(output_values), 5) == 1 diff --git a/graphs/bidirectional_a_star.py b/graphs/bidirectional_a_star.py index 071f1cd685b1..373d67142aa9 100644 --- a/graphs/bidirectional_a_star.py +++ b/graphs/bidirectional_a_star.py @@ -68,7 +68,7 @@ def calculate_heuristic(self) -> float: if HEURISTIC == 1: return abs(dx) + abs(dy) else: - return sqrt(dy ** 2 + dx ** 2) + return sqrt(dy**2 + dx**2) def __lt__(self, other: Node) -> bool: return self.f_cost < other.f_cost diff --git a/hashes/chaos_machine.py b/hashes/chaos_machine.py index 7ef4fdb3ca51..7ad3e5540479 100644 --- a/hashes/chaos_machine.py +++ b/hashes/chaos_machine.py @@ -63,8 +63,8 @@ def xorshift(X, Y): params_space[key] = (machine_time * 0.01 + r * 1.01) % 1 + 3 # Choosing Chaotic Data - X = int(buffer_space[(key + 2) % m] * (10 ** 10)) - Y = int(buffer_space[(key - 2) % m] * (10 ** 10)) + X = int(buffer_space[(key + 2) % m] * (10**10)) + Y = int(buffer_space[(key - 2) % m] * (10**10)) # Machine Time machine_time += 1 diff --git a/hashes/hamming_code.py b/hashes/hamming_code.py index 4a32bae1a51c..ac20fe03b3fb 100644 --- a/hashes/hamming_code.py +++ b/hashes/hamming_code.py @@ -78,7 +78,7 @@ def emitterConverter(sizePar, data): >>> emitterConverter(4, "101010111111") ['1', '1', '1', '1', '0', '1', '0', '0', '1', '0', '1', '1', '1', '1', '1', '1'] """ - if sizePar + len(data) <= 2 ** sizePar - (len(data) - 1): + if sizePar + len(data) <= 2**sizePar - (len(data) - 1): print("ERROR - size of parity don't match with size of data") exit(0) diff --git a/hashes/md5.py b/hashes/md5.py index b08ab957340a..c56c073cc0c7 100644 --- a/hashes/md5.py +++ b/hashes/md5.py @@ -94,7 +94,7 @@ def not32(i): def sum32(a, b): - return (a + b) % 2 ** 32 + return (a + b) % 2**32 def leftrot32(i, s): @@ -114,7 +114,7 @@ def md5me(testString): bs += format(ord(i), "08b") bs = pad(bs) - tvals = [int(2 ** 32 * abs(math.sin(i + 1))) for i in range(64)] + tvals = [int(2**32 * abs(math.sin(i + 1))) for i in range(64)] a0 = 0x67452301 b0 = 0xEFCDAB89 @@ -211,7 +211,7 @@ def md5me(testString): dtemp = D D = C C = B - B = sum32(B, leftrot32((A + f + tvals[i] + m[g]) % 2 ** 32, s[i])) + B = sum32(B, leftrot32((A + f + tvals[i] + m[g]) % 2**32, s[i])) A = dtemp a0 = sum32(a0, A) b0 = sum32(b0, B) diff --git a/linear_algebra/src/lib.py b/linear_algebra/src/lib.py index 85dc4b71c4a4..2bfcea7f8c84 100644 --- a/linear_algebra/src/lib.py +++ b/linear_algebra/src/lib.py @@ -172,7 +172,7 @@ def euclidean_length(self) -> float: """ if len(self.__components) == 0: raise Exception("Vector is empty") - squares = [c ** 2 for c in self.__components] + squares = [c**2 for c in self.__components] return math.sqrt(sum(squares)) def angle(self, other: Vector, deg: bool = False) -> float: diff --git a/machine_learning/k_means_clust.py b/machine_learning/k_means_clust.py index c45be8a4c064..10c9374d8492 100644 --- a/machine_learning/k_means_clust.py +++ b/machine_learning/k_means_clust.py @@ -112,7 +112,7 @@ def compute_heterogeneity(data, k, centroids, cluster_assignment): distances = pairwise_distances( member_data_points, [centroids[i]], metric="euclidean" ) - squared_distances = distances ** 2 + squared_distances = distances**2 heterogeneity += np.sum(squared_distances) return heterogeneity diff --git a/machine_learning/local_weighted_learning/local_weighted_learning.py b/machine_learning/local_weighted_learning/local_weighted_learning.py index af8694bf8f82..db6868687661 100644 --- a/machine_learning/local_weighted_learning/local_weighted_learning.py +++ b/machine_learning/local_weighted_learning/local_weighted_learning.py @@ -25,7 +25,7 @@ def weighted_matrix(point: np.mat, training_data_x: np.mat, bandwidth: float) -> # calculating weights for all training examples [x(i)'s] for j in range(m): diff = point - training_data_x[j] - weights[j, j] = np.exp(diff * diff.T / (-2.0 * bandwidth ** 2)) + weights[j, j] = np.exp(diff * diff.T / (-2.0 * bandwidth**2)) return weights diff --git a/machine_learning/sequential_minimum_optimization.py b/machine_learning/sequential_minimum_optimization.py index 98ce05c46cff..c217a370a975 100644 --- a/machine_learning/sequential_minimum_optimization.py +++ b/machine_learning/sequential_minimum_optimization.py @@ -345,15 +345,15 @@ def _get_new_alpha(self, i1, i2, a1, a2, e1, e2, y1, y2): ol = ( l1 * f1 + L * f2 - + 1 / 2 * l1 ** 2 * K(i1, i1) - + 1 / 2 * L ** 2 * K(i2, i2) + + 1 / 2 * l1**2 * K(i1, i1) + + 1 / 2 * L**2 * K(i2, i2) + s * L * l1 * K(i1, i2) ) oh = ( h1 * f1 + H * f2 - + 1 / 2 * h1 ** 2 * K(i1, i1) - + 1 / 2 * H ** 2 * K(i2, i2) + + 1 / 2 * h1**2 * K(i1, i1) + + 1 / 2 * H**2 * K(i2, i2) + s * H * h1 * K(i1, i2) ) """ diff --git a/maths/area.py b/maths/area.py index 7b39312cfaf0..b1b139cf4e22 100644 --- a/maths/area.py +++ b/maths/area.py @@ -19,7 +19,7 @@ def surface_area_cube(side_length: float) -> float: """ if side_length < 0: raise ValueError("surface_area_cube() only accepts non-negative values") - return 6 * side_length ** 2 + return 6 * side_length**2 def surface_area_sphere(radius: float) -> float: @@ -39,7 +39,7 @@ def surface_area_sphere(radius: float) -> float: """ if radius < 0: raise ValueError("surface_area_sphere() only accepts non-negative values") - return 4 * pi * radius ** 2 + return 4 * pi * radius**2 def surface_area_hemisphere(radius: float) -> float: @@ -62,7 +62,7 @@ def surface_area_hemisphere(radius: float) -> float: """ if radius < 0: raise ValueError("surface_area_hemisphere() only accepts non-negative values") - return 3 * pi * radius ** 2 + return 3 * pi * radius**2 def surface_area_cone(radius: float, height: float) -> float: @@ -90,7 +90,7 @@ def surface_area_cone(radius: float, height: float) -> float: """ if radius < 0 or height < 0: raise ValueError("surface_area_cone() only accepts non-negative values") - return pi * radius * (radius + (height ** 2 + radius ** 2) ** 0.5) + return pi * radius * (radius + (height**2 + radius**2) ** 0.5) def surface_area_cylinder(radius: float, height: float) -> float: @@ -158,7 +158,7 @@ def area_square(side_length: float) -> float: """ if side_length < 0: raise ValueError("area_square() only accepts non-negative values") - return side_length ** 2 + return side_length**2 def area_triangle(base: float, height: float) -> float: @@ -307,7 +307,7 @@ def area_circle(radius: float) -> float: """ if radius < 0: raise ValueError("area_circle() only accepts non-negative values") - return pi * radius ** 2 + return pi * radius**2 def area_ellipse(radius_x: float, radius_y: float) -> float: diff --git a/maths/area_under_curve.py b/maths/area_under_curve.py index ce0932426ef6..6fb3a7c98396 100644 --- a/maths/area_under_curve.py +++ b/maths/area_under_curve.py @@ -50,7 +50,7 @@ def trapezoidal_area( if __name__ == "__main__": def f(x): - return x ** 3 + x ** 2 + return x**3 + x**2 print("f(x) = x^3 + x^2") print("The area between the curve, x = -5, x = 5 and the x axis is:") diff --git a/maths/armstrong_numbers.py b/maths/armstrong_numbers.py index 4e62737e1333..65aebe93722e 100644 --- a/maths/armstrong_numbers.py +++ b/maths/armstrong_numbers.py @@ -36,7 +36,7 @@ def armstrong_number(n: int) -> bool: temp = n while temp > 0: rem = temp % 10 - sum += rem ** number_of_digits + sum += rem**number_of_digits temp //= 10 return n == sum @@ -63,7 +63,7 @@ def pluperfect_number(n: int) -> bool: digit_total += 1 for (cnt, i) in zip(digit_histogram, range(len(digit_histogram))): - sum += cnt * i ** digit_total + sum += cnt * i**digit_total return n == sum diff --git a/maths/basic_maths.py b/maths/basic_maths.py index 47d3d91b397d..58e797772a28 100644 --- a/maths/basic_maths.py +++ b/maths/basic_maths.py @@ -81,14 +81,14 @@ def sum_of_divisors(n: int) -> int: temp += 1 n = int(n / 2) if temp > 1: - s *= (2 ** temp - 1) / (2 - 1) + s *= (2**temp - 1) / (2 - 1) for i in range(3, int(math.sqrt(n)) + 1, 2): temp = 1 while n % i == 0: temp += 1 n = int(n / i) if temp > 1: - s *= (i ** temp - 1) / (i - 1) + s *= (i**temp - 1) / (i - 1) return int(s) diff --git a/maths/binomial_distribution.py b/maths/binomial_distribution.py index a74a5a7ed994..5b56f2d59244 100644 --- a/maths/binomial_distribution.py +++ b/maths/binomial_distribution.py @@ -24,7 +24,7 @@ def binomial_distribution(successes: int, trials: int, prob: float) -> float: raise ValueError("the function is defined for non-negative integers") if not 0 < prob < 1: raise ValueError("prob has to be in range of 1 - 0") - probability = (prob ** successes) * ((1 - prob) ** (trials - successes)) + probability = (prob**successes) * ((1 - prob) ** (trials - successes)) # Calculate the binomial coefficient: n! / k!(n-k)! coefficient = float(factorial(trials)) coefficient /= factorial(successes) * factorial(trials - successes) diff --git a/maths/fibonacci.py b/maths/fibonacci.py index ca4f4a2360a3..07bd6d2ece51 100644 --- a/maths/fibonacci.py +++ b/maths/fibonacci.py @@ -159,7 +159,7 @@ def fib_binet(n: int) -> list[int]: raise Exception("n is too large") sqrt_5 = sqrt(5) phi = (1 + sqrt_5) / 2 - return [round(phi ** i / sqrt_5) for i in range(n + 1)] + return [round(phi**i / sqrt_5) for i in range(n + 1)] if __name__ == "__main__": diff --git a/maths/gaussian.py b/maths/gaussian.py index a5dba50a927d..51ebc2e25849 100644 --- a/maths/gaussian.py +++ b/maths/gaussian.py @@ -52,7 +52,7 @@ def gaussian(x, mu: float = 0.0, sigma: float = 1.0) -> int: >>> gaussian(2523, mu=234234, sigma=3425) 0.0 """ - return 1 / sqrt(2 * pi * sigma ** 2) * exp(-((x - mu) ** 2) / (2 * sigma ** 2)) + return 1 / sqrt(2 * pi * sigma**2) * exp(-((x - mu) ** 2) / (2 * sigma**2)) if __name__ == "__main__": diff --git a/maths/karatsuba.py b/maths/karatsuba.py index df29c77a5cf2..b772c0d77039 100644 --- a/maths/karatsuba.py +++ b/maths/karatsuba.py @@ -14,8 +14,8 @@ def karatsuba(a, b): m1 = max(len(str(a)), len(str(b))) m2 = m1 // 2 - a1, a2 = divmod(a, 10 ** m2) - b1, b2 = divmod(b, 10 ** m2) + a1, a2 = divmod(a, 10**m2) + b1, b2 = divmod(b, 10**m2) x = karatsuba(a2, b2) y = karatsuba((a1 + a2), (b1 + b2)) diff --git a/maths/monte_carlo.py b/maths/monte_carlo.py index 28027cbe4178..efb6a01d57fd 100644 --- a/maths/monte_carlo.py +++ b/maths/monte_carlo.py @@ -20,7 +20,7 @@ def pi_estimator(iterations: int): """ # A local function to see if a dot lands in the circle. def is_in_circle(x: float, y: float) -> bool: - distance_from_centre = sqrt((x ** 2) + (y ** 2)) + distance_from_centre = sqrt((x**2) + (y**2)) # Our circle has a radius of 1, so a distance # greater than 1 would land outside the circle. return distance_from_centre <= 1 diff --git a/maths/numerical_integration.py b/maths/numerical_integration.py index 577c41a4440e..cf2efce12baf 100644 --- a/maths/numerical_integration.py +++ b/maths/numerical_integration.py @@ -56,7 +56,7 @@ def trapezoidal_area( if __name__ == "__main__": def f(x): - return x ** 3 + return x**3 print("f(x) = x^3") print("The area between the curve, x = -10, x = 10 and the x axis is:") diff --git a/maths/perfect_square.py b/maths/perfect_square.py index 4393dcfbc774..107e68528068 100644 --- a/maths/perfect_square.py +++ b/maths/perfect_square.py @@ -58,9 +58,9 @@ def perfect_square_binary_search(n: int) -> bool: right = n while left <= right: mid = (left + right) // 2 - if mid ** 2 == n: + if mid**2 == n: return True - elif mid ** 2 > n: + elif mid**2 > n: right = mid - 1 else: left = mid + 1 diff --git a/maths/pi_monte_carlo_estimation.py b/maths/pi_monte_carlo_estimation.py index 20b46dddc6e5..81be083787bd 100644 --- a/maths/pi_monte_carlo_estimation.py +++ b/maths/pi_monte_carlo_estimation.py @@ -11,7 +11,7 @@ def is_in_unit_circle(self) -> bool: True, if the point lies in the unit circle False, otherwise """ - return (self.x ** 2 + self.y ** 2) <= 1 + return (self.x**2 + self.y**2) <= 1 @classmethod def random_unit_square(cls): diff --git a/maths/polynomial_evaluation.py b/maths/polynomial_evaluation.py index 68ff97ddd25d..4e4016e5133d 100644 --- a/maths/polynomial_evaluation.py +++ b/maths/polynomial_evaluation.py @@ -12,7 +12,7 @@ def evaluate_poly(poly: Sequence[float], x: float) -> float: >>> evaluate_poly((0.0, 0.0, 5.0, 9.3, 7.0), 10.0) 79800.0 """ - return sum(c * (x ** i) for i, c in enumerate(poly)) + return sum(c * (x**i) for i, c in enumerate(poly)) def horner(poly: Sequence[float], x: float) -> float: diff --git a/maths/radix2_fft.py b/maths/radix2_fft.py index 9fc9f843e685..0a431a115fb8 100644 --- a/maths/radix2_fft.py +++ b/maths/radix2_fft.py @@ -91,7 +91,7 @@ def __DFT(self, which): next_ncol = self.C_max_length // 2 while next_ncol > 0: new_dft = [[] for i in range(next_ncol)] - root = self.root ** next_ncol + root = self.root**next_ncol # First half of next step current_root = 1 diff --git a/maths/segmented_sieve.py b/maths/segmented_sieve.py index c1cc497ad33e..b15ec2480678 100644 --- a/maths/segmented_sieve.py +++ b/maths/segmented_sieve.py @@ -48,4 +48,4 @@ def sieve(n): return prime -print(sieve(10 ** 6)) +print(sieve(10**6)) diff --git a/maths/sum_of_geometric_progression.py b/maths/sum_of_geometric_progression.py index f29dd8005cff..9079f35af6d9 100644 --- a/maths/sum_of_geometric_progression.py +++ b/maths/sum_of_geometric_progression.py @@ -25,4 +25,4 @@ def sum_of_geometric_progression( return num_of_terms * first_term # Formula for finding sum of n terms of a GeometricProgression - return (first_term / (1 - common_ratio)) * (1 - common_ratio ** num_of_terms) + return (first_term / (1 - common_ratio)) * (1 - common_ratio**num_of_terms) diff --git a/physics/n_body_simulation.py b/physics/n_body_simulation.py index 045a49f7ff00..01083b9a272e 100644 --- a/physics/n_body_simulation.py +++ b/physics/n_body_simulation.py @@ -159,16 +159,16 @@ def update_system(self, delta_time: float) -> None: # Calculation of the distance using Pythagoras's theorem # Extra factor due to the softening technique - distance = (dif_x ** 2 + dif_y ** 2 + self.softening_factor) ** ( + distance = (dif_x**2 + dif_y**2 + self.softening_factor) ** ( 1 / 2 ) # Newton's law of universal gravitation. force_x += ( - self.gravitation_constant * body2.mass * dif_x / distance ** 3 + self.gravitation_constant * body2.mass * dif_x / distance**3 ) force_y += ( - self.gravitation_constant * body2.mass * dif_y / distance ** 3 + self.gravitation_constant * body2.mass * dif_y / distance**3 ) # Update the body's velocity once all the force components have been added diff --git a/project_euler/problem_006/sol1.py b/project_euler/problem_006/sol1.py index 61dd7a321011..615991bb172c 100644 --- a/project_euler/problem_006/sol1.py +++ b/project_euler/problem_006/sol1.py @@ -35,9 +35,9 @@ def solution(n: int = 100) -> int: sum_of_squares = 0 sum_of_ints = 0 for i in range(1, n + 1): - sum_of_squares += i ** 2 + sum_of_squares += i**2 sum_of_ints += i - return sum_of_ints ** 2 - sum_of_squares + return sum_of_ints**2 - sum_of_squares if __name__ == "__main__": diff --git a/project_euler/problem_007/sol2.py b/project_euler/problem_007/sol2.py index 20c2ddf21ab8..dfcad8c148af 100644 --- a/project_euler/problem_007/sol2.py +++ b/project_euler/problem_007/sol2.py @@ -25,7 +25,7 @@ def isprime(number: int) -> bool: True """ - for i in range(2, int(number ** 0.5) + 1): + for i in range(2, int(number**0.5) + 1): if number % i == 0: return False return True diff --git a/project_euler/problem_009/sol1.py b/project_euler/problem_009/sol1.py index 83c88acf1f8b..1d908402b6b1 100644 --- a/project_euler/problem_009/sol1.py +++ b/project_euler/problem_009/sol1.py @@ -33,7 +33,7 @@ def solution() -> int: for b in range(a + 1, 400): for c in range(b + 1, 500): if (a + b + c) == 1000: - if (a ** 2) + (b ** 2) == (c ** 2): + if (a**2) + (b**2) == (c**2): return a * b * c return -1 @@ -54,7 +54,7 @@ def solution_fast() -> int: for a in range(300): for b in range(400): c = 1000 - a - b - if a < b < c and (a ** 2) + (b ** 2) == (c ** 2): + if a < b < c and (a**2) + (b**2) == (c**2): return a * b * c return -1 diff --git a/project_euler/problem_010/sol3.py b/project_euler/problem_010/sol3.py index f49d9393c7af..72e2894df293 100644 --- a/project_euler/problem_010/sol3.py +++ b/project_euler/problem_010/sol3.py @@ -46,7 +46,7 @@ def solution(n: int = 2000000) -> int: primality_list[0] = 1 primality_list[1] = 1 - for i in range(2, int(n ** 0.5) + 1): + for i in range(2, int(n**0.5) + 1): if primality_list[i] == 0: for j in range(i * i, n + 1, i): primality_list[j] = 1 diff --git a/project_euler/problem_016/sol1.py b/project_euler/problem_016/sol1.py index f6620aa9482f..93584d1d436f 100644 --- a/project_euler/problem_016/sol1.py +++ b/project_euler/problem_016/sol1.py @@ -18,7 +18,7 @@ def solution(power: int = 1000) -> int: >>> solution(15) 26 """ - num = 2 ** power + num = 2**power string_num = str(num) list_num = list(string_num) sum_of_num = 0 @@ -31,6 +31,6 @@ def solution(power: int = 1000) -> int: if __name__ == "__main__": power = int(input("Enter the power of 2: ").strip()) - print("2 ^ ", power, " = ", 2 ** power) + print("2 ^ ", power, " = ", 2**power) result = solution(power) print("Sum of the digits is: ", result) diff --git a/project_euler/problem_016/sol2.py b/project_euler/problem_016/sol2.py index 304d27d1e5d0..1408212e74c5 100644 --- a/project_euler/problem_016/sol2.py +++ b/project_euler/problem_016/sol2.py @@ -19,7 +19,7 @@ def solution(power: int = 1000) -> int: >>> solution(15) 26 """ - n = 2 ** power + n = 2**power r = 0 while n: r, n = r + n % 10, n // 10 diff --git a/project_euler/problem_023/sol1.py b/project_euler/problem_023/sol1.py index a72b6123e3ee..83b85f3f721c 100644 --- a/project_euler/problem_023/sol1.py +++ b/project_euler/problem_023/sol1.py @@ -30,7 +30,7 @@ def solution(limit=28123): """ sumDivs = [1] * (limit + 1) - for i in range(2, int(limit ** 0.5) + 1): + for i in range(2, int(limit**0.5) + 1): sumDivs[i * i] += i for k in range(i + 1, limit // i + 1): sumDivs[k * i] += k + i diff --git a/project_euler/problem_027/sol1.py b/project_euler/problem_027/sol1.py index 6f28b925be08..928c0ec4feb7 100644 --- a/project_euler/problem_027/sol1.py +++ b/project_euler/problem_027/sol1.py @@ -61,7 +61,7 @@ def solution(a_limit: int = 1000, b_limit: int = 1000) -> int: if is_prime(b): count = 0 n = 0 - while is_prime((n ** 2) + (a * n) + b): + while is_prime((n**2) + (a * n) + b): count += 1 n += 1 if count > longest[0]: diff --git a/project_euler/problem_028/sol1.py b/project_euler/problem_028/sol1.py index cbc7de6bea9a..1ea5d4fcafd4 100644 --- a/project_euler/problem_028/sol1.py +++ b/project_euler/problem_028/sol1.py @@ -40,7 +40,7 @@ def solution(n: int = 1001) -> int: for i in range(1, int(ceil(n / 2.0))): odd = 2 * i + 1 even = 2 * i - total = total + 4 * odd ** 2 - 6 * even + total = total + 4 * odd**2 - 6 * even return total diff --git a/project_euler/problem_029/sol1.py b/project_euler/problem_029/sol1.py index 726bcaf6ebd8..d3ab90ac7d25 100644 --- a/project_euler/problem_029/sol1.py +++ b/project_euler/problem_029/sol1.py @@ -41,7 +41,7 @@ def solution(n: int = 100) -> int: for a in range(2, N): for b in range(2, N): - currentPow = a ** b # calculates the current power + currentPow = a**b # calculates the current power collectPowers.add(currentPow) # adds the result to the set return len(collectPowers) diff --git a/project_euler/problem_048/sol1.py b/project_euler/problem_048/sol1.py index 01ff702d9cd5..5a4538cf5d4e 100644 --- a/project_euler/problem_048/sol1.py +++ b/project_euler/problem_048/sol1.py @@ -17,7 +17,7 @@ def solution(): """ total = 0 for i in range(1, 1001): - total += i ** i + total += i**i return str(total)[-10:] diff --git a/project_euler/problem_050/sol1.py b/project_euler/problem_050/sol1.py index cfb1911df5de..fc6e6f2b9a5d 100644 --- a/project_euler/problem_050/sol1.py +++ b/project_euler/problem_050/sol1.py @@ -35,7 +35,7 @@ def prime_sieve(limit: int) -> list[int]: is_prime[1] = False is_prime[2] = True - for i in range(3, int(limit ** 0.5 + 1), 2): + for i in range(3, int(limit**0.5 + 1), 2): index = i * 2 while index < limit: is_prime[index] = False diff --git a/project_euler/problem_051/sol1.py b/project_euler/problem_051/sol1.py index eedb02379e62..921704bc4455 100644 --- a/project_euler/problem_051/sol1.py +++ b/project_euler/problem_051/sol1.py @@ -37,7 +37,7 @@ def prime_sieve(n: int) -> list[int]: is_prime[1] = False is_prime[2] = True - for i in range(3, int(n ** 0.5 + 1), 2): + for i in range(3, int(n**0.5 + 1), 2): index = i * 2 while index < n: is_prime[index] = False diff --git a/project_euler/problem_056/sol1.py b/project_euler/problem_056/sol1.py index f1ec03c497be..c772bec58692 100644 --- a/project_euler/problem_056/sol1.py +++ b/project_euler/problem_056/sol1.py @@ -30,7 +30,7 @@ def solution(a: int = 100, b: int = 100) -> int: # RETURN the MAXIMUM from the list of SUMs of the list of INT converted from STR of # BASE raised to the POWER return max( - sum(int(x) for x in str(base ** power)) + sum(int(x) for x in str(base**power)) for base in range(a) for power in range(b) ) diff --git a/project_euler/problem_062/sol1.py b/project_euler/problem_062/sol1.py index 83286c801301..0c9baf880497 100644 --- a/project_euler/problem_062/sol1.py +++ b/project_euler/problem_062/sol1.py @@ -55,7 +55,7 @@ def get_digits(num: int) -> str: >>> get_digits(123) '0166788' """ - return "".join(sorted(list(str(num ** 3)))) + return "".join(sorted(list(str(num**3)))) if __name__ == "__main__": diff --git a/project_euler/problem_063/sol1.py b/project_euler/problem_063/sol1.py index 29efddba4216..bea30a2e5670 100644 --- a/project_euler/problem_063/sol1.py +++ b/project_euler/problem_063/sol1.py @@ -26,7 +26,7 @@ def solution(max_base: int = 10, max_power: int = 22) -> int: bases = range(1, max_base) powers = range(1, max_power) return sum( - 1 for power in powers for base in bases if len(str(base ** power)) == power + 1 for power in powers for base in bases if len(str(base**power)) == power ) diff --git a/project_euler/problem_064/sol1.py b/project_euler/problem_064/sol1.py index 69e3f6d97580..5df64a90ae55 100644 --- a/project_euler/problem_064/sol1.py +++ b/project_euler/problem_064/sol1.py @@ -38,7 +38,7 @@ def continuous_fraction_period(n: int) -> int: period = 0 while integer_part != 2 * ROOT: numerator = denominator * integer_part - numerator - denominator = (n - numerator ** 2) / denominator + denominator = (n - numerator**2) / denominator integer_part = int((ROOT + numerator) / denominator) period += 1 return period diff --git a/project_euler/problem_069/sol1.py b/project_euler/problem_069/sol1.py index d148dd79a777..5dfd61a89e94 100644 --- a/project_euler/problem_069/sol1.py +++ b/project_euler/problem_069/sol1.py @@ -24,7 +24,7 @@ """ -def solution(n: int = 10 ** 6) -> int: +def solution(n: int = 10**6) -> int: """ Returns solution to problem. Algorithm: diff --git a/project_euler/problem_077/sol1.py b/project_euler/problem_077/sol1.py index 214e258793f6..6098ea9e50a6 100644 --- a/project_euler/problem_077/sol1.py +++ b/project_euler/problem_077/sol1.py @@ -23,7 +23,7 @@ primes.add(2) prime: int -for prime in range(3, ceil(NUM_PRIMES ** 0.5), 2): +for prime in range(3, ceil(NUM_PRIMES**0.5), 2): if prime not in primes: continue primes.difference_update(set(range(prime * prime, NUM_PRIMES, prime))) diff --git a/project_euler/problem_086/sol1.py b/project_euler/problem_086/sol1.py index 0bf66e6b5a31..064af215c049 100644 --- a/project_euler/problem_086/sol1.py +++ b/project_euler/problem_086/sol1.py @@ -91,7 +91,7 @@ def solution(limit: int = 1000000) -> int: while num_cuboids <= limit: max_cuboid_size += 1 for sum_shortest_sides in range(2, 2 * max_cuboid_size + 1): - if sqrt(sum_shortest_sides ** 2 + max_cuboid_size ** 2).is_integer(): + if sqrt(sum_shortest_sides**2 + max_cuboid_size**2).is_integer(): num_cuboids += ( min(max_cuboid_size, sum_shortest_sides // 2) - max(1, sum_shortest_sides - max_cuboid_size) diff --git a/project_euler/problem_092/sol1.py b/project_euler/problem_092/sol1.py index 437a85badc57..d326fc33fcca 100644 --- a/project_euler/problem_092/sol1.py +++ b/project_euler/problem_092/sol1.py @@ -12,7 +12,7 @@ """ -DIGITS_SQUARED = [digit ** 2 for digit in range(10)] +DIGITS_SQUARED = [digit**2 for digit in range(10)] def next_number(number: int) -> int: diff --git a/project_euler/problem_097/sol1.py b/project_euler/problem_097/sol1.py index 2e848c09a940..da5e8120b7c5 100644 --- a/project_euler/problem_097/sol1.py +++ b/project_euler/problem_097/sol1.py @@ -34,7 +34,7 @@ def solution(n: int = 10) -> str: """ if not isinstance(n, int) or n < 0: raise ValueError("Invalid input") - MODULUS = 10 ** n + MODULUS = 10**n NUMBER = 28433 * (pow(2, 7830457, MODULUS)) + 1 return str(NUMBER % MODULUS) diff --git a/project_euler/problem_101/sol1.py b/project_euler/problem_101/sol1.py index 14013c435241..04678847508c 100644 --- a/project_euler/problem_101/sol1.py +++ b/project_euler/problem_101/sol1.py @@ -175,15 +175,15 @@ def question_function(variable: int) -> int: return ( 1 - variable - + variable ** 2 - - variable ** 3 - + variable ** 4 - - variable ** 5 - + variable ** 6 - - variable ** 7 - + variable ** 8 - - variable ** 9 - + variable ** 10 + + variable**2 + - variable**3 + + variable**4 + - variable**5 + + variable**6 + - variable**7 + + variable**8 + - variable**9 + + variable**10 ) diff --git a/project_euler/problem_125/sol1.py b/project_euler/problem_125/sol1.py index afc1f2890cef..7a8f908ed6a9 100644 --- a/project_euler/problem_125/sol1.py +++ b/project_euler/problem_125/sol1.py @@ -35,7 +35,7 @@ def solution() -> int: Returns the sum of all numbers less than 1e8 that are both palindromic and can be written as the sum of consecutive squares. """ - LIMIT = 10 ** 8 + LIMIT = 10**8 answer = set() first_square = 1 sum_squares = 5 @@ -45,9 +45,9 @@ def solution() -> int: if is_palindrome(sum_squares): answer.add(sum_squares) last_square += 1 - sum_squares += last_square ** 2 + sum_squares += last_square**2 first_square += 1 - sum_squares = first_square ** 2 + (first_square + 1) ** 2 + sum_squares = first_square**2 + (first_square + 1) ** 2 return sum(answer) diff --git a/project_euler/problem_144/sol1.py b/project_euler/problem_144/sol1.py index 3f7a766be20f..b5f103b64ff5 100644 --- a/project_euler/problem_144/sol1.py +++ b/project_euler/problem_144/sol1.py @@ -58,15 +58,15 @@ def next_point( # y^2 + 4x^2 = 100 # y - b = m * (x - a) # ==> A x^2 + B x + C = 0 - quadratic_term = outgoing_gradient ** 2 + 4 + quadratic_term = outgoing_gradient**2 + 4 linear_term = 2 * outgoing_gradient * (point_y - outgoing_gradient * point_x) constant_term = (point_y - outgoing_gradient * point_x) ** 2 - 100 x_minus = ( - -linear_term - sqrt(linear_term ** 2 - 4 * quadratic_term * constant_term) + -linear_term - sqrt(linear_term**2 - 4 * quadratic_term * constant_term) ) / (2 * quadratic_term) x_plus = ( - -linear_term + sqrt(linear_term ** 2 - 4 * quadratic_term * constant_term) + -linear_term + sqrt(linear_term**2 - 4 * quadratic_term * constant_term) ) / (2 * quadratic_term) # two solutions, one of which is our input point diff --git a/project_euler/problem_145/__init__.py b/project_euler/problem_145/__init__.py deleted file mode 100644 index e69de29bb2d1..000000000000 diff --git a/project_euler/problem_145/sol1.py b/project_euler/problem_145/sol1.py deleted file mode 100644 index 5ba3af86a6a1..000000000000 --- a/project_euler/problem_145/sol1.py +++ /dev/null @@ -1,87 +0,0 @@ -""" -Problem 145: https://projecteuler.net/problem=145 - -Name: How many reversible numbers are there below one-billion? - -Some positive integers n have the property that the -sum [ n + reverse(n) ] consists entirely of odd (decimal) digits. -For instance, 36 + 63 = 99 and 409 + 904 = 1313. -We will call such numbers reversible; so 36, 63, 409, and 904 are reversible. -Leading zeroes are not allowed in either n or reverse(n). - -There are 120 reversible numbers below one-thousand. - -How many reversible numbers are there below one-billion (10^9)? - - -Solution: - -Here a brute force solution is used to find and count the reversible numbers. - -""" -from __future__ import annotations - - -def check_if_odd(sum: int = 36) -> int: - """ - Check if the last digit in the sum is even or odd. If even return 0. - If odd then floor division by 10 is used to remove the last number. - Process continues until sum becomes 0 because no more numbers. - >>> check_if_odd(36) - 0 - >>> check_if_odd(33) - 1 - """ - while sum > 0: - if (sum % 10) % 2 == 0: - return 0 - sum = sum // 10 - return 1 - - -def find_reverse_number(number: int = 36) -> int: - """ - Reverses the given number. Does not work with number that end in zero. - >>> find_reverse_number(36) - 63 - >>> find_reverse_number(409) - 904 - """ - reverse = 0 - - while number > 0: - temp = number % 10 - reverse = reverse * 10 + temp - number = number // 10 - - return reverse - - -def solution(number: int = 1000000000) -> int: - """ - Loops over the range of numbers. - Checks if they have ending zeros which disqualifies them from being reversible. - If that condition is passed it generates the reversed number. - Then sum up n and reverse(n). - Then check if all the numbers in the sum are odd. If true add to the answer. - >>> solution(1000000000) - 608720 - >>> solution(1000000) - 18720 - >>> solution(1000000) - 18720 - >>> solution(1000) - 120 - """ - answer = 0 - for x in range(1, number): - if x % 10 != 0: - reversed_number = find_reverse_number(x) - sum = x + reversed_number - answer += check_if_odd(sum) - - return answer - - -if __name__ == "__main__": - print(f"{solution() = }") diff --git a/project_euler/problem_173/sol1.py b/project_euler/problem_173/sol1.py index d539b1437ef1..5416e25462cc 100644 --- a/project_euler/problem_173/sol1.py +++ b/project_euler/problem_173/sol1.py @@ -25,8 +25,8 @@ def solution(limit: int = 1000000) -> int: answer = 0 for outer_width in range(3, (limit // 4) + 2): - if outer_width ** 2 > limit: - hole_width_lower_bound = max(ceil(sqrt(outer_width ** 2 - limit)), 1) + if outer_width**2 > limit: + hole_width_lower_bound = max(ceil(sqrt(outer_width**2 - limit)), 1) else: hole_width_lower_bound = 1 if (outer_width - hole_width_lower_bound) % 2: diff --git a/project_euler/problem_180/sol1.py b/project_euler/problem_180/sol1.py index f7c097323c62..12e34dcaa76b 100644 --- a/project_euler/problem_180/sol1.py +++ b/project_euler/problem_180/sol1.py @@ -61,7 +61,7 @@ def is_sq(number: int) -> bool: >>> is_sq(1000000) True """ - sq: int = int(number ** 0.5) + sq: int = int(number**0.5) return number == sq * sq diff --git a/project_euler/problem_188/sol1.py b/project_euler/problem_188/sol1.py index c8cd9eb10aeb..dd4360adb32b 100644 --- a/project_euler/problem_188/sol1.py +++ b/project_euler/problem_188/sol1.py @@ -59,7 +59,7 @@ def solution(base: int = 1777, height: int = 1855, digits: int = 8) -> int: # exponentiation result = base for i in range(1, height): - result = _modexpt(base, result, 10 ** digits) + result = _modexpt(base, result, 10**digits) return result diff --git a/project_euler/problem_203/sol1.py b/project_euler/problem_203/sol1.py index fe4d14b20c92..2ba3c96c9e00 100644 --- a/project_euler/problem_203/sol1.py +++ b/project_euler/problem_203/sol1.py @@ -82,10 +82,10 @@ def get_primes_squared(max_number: int) -> list[int]: if non_primes[num]: continue - for num_counter in range(num ** 2, max_prime + 1, num): + for num_counter in range(num**2, max_prime + 1, num): non_primes[num_counter] = True - primes.append(num ** 2) + primes.append(num**2) return primes diff --git a/project_euler/problem_205/sol1.py b/project_euler/problem_205/sol1.py index 7249df48829b..63b997b9f5a9 100644 --- a/project_euler/problem_205/sol1.py +++ b/project_euler/problem_205/sol1.py @@ -63,7 +63,7 @@ def solution() -> float: colin_totals_frequencies[min_colin_total:peter_total] ) - total_games_number = (4 ** 9) * (6 ** 6) + total_games_number = (4**9) * (6**6) peter_win_probability = peter_wins_count / total_games_number rounded_peter_win_probability = round(peter_win_probability, ndigits=7) diff --git a/project_euler/problem_207/sol1.py b/project_euler/problem_207/sol1.py index 99d1a91746d2..2b3591f51cfa 100644 --- a/project_euler/problem_207/sol1.py +++ b/project_euler/problem_207/sol1.py @@ -81,7 +81,7 @@ def solution(max_proportion: float = 1 / 12345) -> int: integer = 3 while True: - partition_candidate = (integer ** 2 - 1) / 4 + partition_candidate = (integer**2 - 1) / 4 # if candidate is an integer, then there is a partition for k if partition_candidate == int(partition_candidate): partition_candidate = int(partition_candidate) diff --git a/project_euler/problem_234/sol1.py b/project_euler/problem_234/sol1.py index 7516b164db2d..7b20a2206ed2 100644 --- a/project_euler/problem_234/sol1.py +++ b/project_euler/problem_234/sol1.py @@ -35,7 +35,7 @@ def prime_sieve(n: int) -> list: is_prime[1] = False is_prime[2] = True - for i in range(3, int(n ** 0.5 + 1), 2): + for i in range(3, int(n**0.5 + 1), 2): index = i * 2 while index < n: is_prime[index] = False @@ -69,11 +69,11 @@ def solution(limit: int = 999_966_663_333) -> int: prime_index = 0 last_prime = primes[prime_index] - while (last_prime ** 2) <= limit: + while (last_prime**2) <= limit: next_prime = primes[prime_index + 1] - lower_bound = last_prime ** 2 - upper_bound = next_prime ** 2 + lower_bound = last_prime**2 + upper_bound = next_prime**2 # Get numbers divisible by lps(current) current = lower_bound + last_prime diff --git a/project_euler/problem_301/sol1.py b/project_euler/problem_301/sol1.py index b1d434c189b7..4b494033c92d 100644 --- a/project_euler/problem_301/sol1.py +++ b/project_euler/problem_301/sol1.py @@ -48,8 +48,8 @@ def solution(exponent: int = 30) -> int: # To find how many total games were lost for a given exponent x, # we need to find the Fibonacci number F(x+2). fibonacci_index = exponent + 2 - phi = (1 + 5 ** 0.5) / 2 - fibonacci = (phi ** fibonacci_index - (phi - 1) ** fibonacci_index) / 5 ** 0.5 + phi = (1 + 5**0.5) / 2 + fibonacci = (phi**fibonacci_index - (phi - 1) ** fibonacci_index) / 5**0.5 return int(fibonacci) diff --git a/project_euler/problem_551/sol1.py b/project_euler/problem_551/sol1.py index 005d2e98514b..c15445e4d7b0 100644 --- a/project_euler/problem_551/sol1.py +++ b/project_euler/problem_551/sol1.py @@ -14,7 +14,7 @@ ks = [k for k in range(2, 20 + 1)] -base = [10 ** k for k in range(ks[-1] + 1)] +base = [10**k for k in range(ks[-1] + 1)] memo: dict[int, dict[int, list[list[int]]]] = {} @@ -168,7 +168,7 @@ def add(digits, k, addend): digits.append(digit) -def solution(n: int = 10 ** 15) -> int: +def solution(n: int = 10**15) -> int: """ returns n-th term of sequence @@ -193,7 +193,7 @@ def solution(n: int = 10 ** 15) -> int: a_n = 0 for j in range(len(digits)): - a_n += digits[j] * 10 ** j + a_n += digits[j] * 10**j return a_n diff --git a/quantum/deutsch_jozsa.py b/quantum/deutsch_jozsa.py index 304eea196e03..d7e2d8335fb9 100755 --- a/quantum/deutsch_jozsa.py +++ b/quantum/deutsch_jozsa.py @@ -39,7 +39,7 @@ def dj_oracle(case: str, num_qubits: int) -> q.QuantumCircuit: if case == "balanced": # First generate a random number that tells us which CNOTs to # wrap in X-gates: - b = np.random.randint(1, 2 ** num_qubits) + b = np.random.randint(1, 2**num_qubits) # Next, format 'b' as a binary string of length 'n', padded with zeros: b_str = format(b, f"0{num_qubits}b") # Next, we place the first X-gates. Each digit in our binary string diff --git a/searches/hill_climbing.py b/searches/hill_climbing.py index bb24e781a6c1..83a3b8b74e27 100644 --- a/searches/hill_climbing.py +++ b/searches/hill_climbing.py @@ -166,7 +166,7 @@ def hill_climbing( doctest.testmod() def test_f1(x, y): - return (x ** 2) + (y ** 2) + return (x**2) + (y**2) # starting the problem with initial coordinates (3, 4) prob = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_f1) @@ -187,7 +187,7 @@ def test_f1(x, y): ) def test_f2(x, y): - return (3 * x ** 2) - (6 * y) + return (3 * x**2) - (6 * y) prob = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_f1) local_min = hill_climbing(prob, find_max=True) diff --git a/searches/simulated_annealing.py b/searches/simulated_annealing.py index ad29559f1b8d..063d225d0b22 100644 --- a/searches/simulated_annealing.py +++ b/searches/simulated_annealing.py @@ -97,7 +97,7 @@ def simulated_annealing( if __name__ == "__main__": def test_f1(x, y): - return (x ** 2) + (y ** 2) + return (x**2) + (y**2) # starting the problem with initial coordinates (12, 47) prob = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_f1) @@ -120,7 +120,7 @@ def test_f1(x, y): ) def test_f2(x, y): - return (3 * x ** 2) - (6 * y) + return (3 * x**2) - (6 * y) prob = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_f1) local_min = simulated_annealing(prob, find_max=False, visualization=True) From b2a77cc4fb922ab66f0978e45c108bdb4c30396d Mon Sep 17 00:00:00 2001 From: Saptarshi Sengupta <94242536+saptarshi1996@users.noreply.github.com> Date: Mon, 31 Jan 2022 06:11:46 +0530 Subject: [PATCH 0422/1543] Scraping prescription drug prices from Rx site using the prescription drug name and zipcode (#5967) * add wellrx scraping * write test fix docs * fix resolve issues * black format. fix returns * type check fix for union * black formatted * Change requests after code review * add precommit changes * flake errors --- web_programming/fetch_well_rx_price.py | 102 +++++++++++++++++++++++++ 1 file changed, 102 insertions(+) create mode 100644 web_programming/fetch_well_rx_price.py diff --git a/web_programming/fetch_well_rx_price.py b/web_programming/fetch_well_rx_price.py new file mode 100644 index 000000000000..58dbe5993adb --- /dev/null +++ b/web_programming/fetch_well_rx_price.py @@ -0,0 +1,102 @@ +""" + +Scrape the price and pharmacy name for a prescription drug from rx site +after providing the drug name and zipcode. + +""" + +from typing import Union +from urllib.error import HTTPError + +from bs4 import BeautifulSoup +from requests import exceptions, get + +BASE_URL = "https://www.wellrx.com/prescriptions/{0}/{1}/?freshSearch=true" + + +def fetch_pharmacy_and_price_list(drug_name: str, zip_code: str) -> Union[list, None]: + """[summary] + + This function will take input of drug name and zipcode, + then request to the BASE_URL site. + Get the page data and scrape it to the generate the + list of lowest prices for the prescription drug. + + Args: + drug_name (str): [Drug name] + zip_code(str): [Zip code] + + Returns: + list: [List of pharmacy name and price] + + >>> fetch_pharmacy_and_price_list(None, None) + + >>> fetch_pharmacy_and_price_list(None, 30303) + + >>> fetch_pharmacy_and_price_list("eliquis", None) + + """ + + try: + + # Has user provided both inputs? + if not drug_name or not zip_code: + return None + + request_url = BASE_URL.format(drug_name, zip_code) + response = get(request_url) + + # Is the response ok? + response.raise_for_status() + + # Scrape the data using bs4 + soup = BeautifulSoup(response.text, "html.parser") + + # This list will store the name and price. + pharmacy_price_list = [] + + # Fetch all the grids that contains the items. + grid_list = soup.find_all("div", {"class": "grid-x pharmCard"}) + if grid_list and len(grid_list) > 0: + for grid in grid_list: + + # Get the pharmacy price. + pharmacy_name = grid.find("p", {"class": "list-title"}).text + + # Get price of the drug. + price = grid.find("span", {"p", "price price-large"}).text + + pharmacy_price_list.append( + { + "pharmacy_name": pharmacy_name, + "price": price, + } + ) + + return pharmacy_price_list + + except (HTTPError, exceptions.RequestException, ValueError): + return None + + +if __name__ == "__main__": + + # Enter a drug name and a zip code + drug_name = input("Enter drug name: ").strip() + zip_code = input("Enter zip code: ").strip() + + pharmacy_price_list: Union[list, None] = fetch_pharmacy_and_price_list( + drug_name, zip_code + ) + + if pharmacy_price_list: + + print(f"\nSearch results for {drug_name} at location {zip_code}:") + for pharmacy_price in pharmacy_price_list: + + name = pharmacy_price["pharmacy_name"] + price = pharmacy_price["price"] + + print(f"Pharmacy: {name} Price: {price}") + else: + print(f"No results found for {drug_name}") From d28ac6483a97deb5ac09a5261d851e97a25c2ee5 Mon Sep 17 00:00:00 2001 From: Saptarshi Sengupta <94242536+saptarshi1996@users.noreply.github.com> Date: Wed, 2 Feb 2022 03:49:17 +0530 Subject: [PATCH 0423/1543] Scrape anime and play episodes on browser without ads from terminal (#5975) * fetch anime * formatted code * fix format errors * fix bot reviews * pr review fixes * remove unussed exception * change var name * fix comments --- web_programming/fetch_anime_and_play.py | 188 ++++++++++++++++++++++++ 1 file changed, 188 insertions(+) create mode 100644 web_programming/fetch_anime_and_play.py diff --git a/web_programming/fetch_anime_and_play.py b/web_programming/fetch_anime_and_play.py new file mode 100644 index 000000000000..e11948d0ae78 --- /dev/null +++ b/web_programming/fetch_anime_and_play.py @@ -0,0 +1,188 @@ +from xml.dom import NotFoundErr + +import requests +from bs4 import BeautifulSoup, NavigableString +from fake_useragent import UserAgent + +BASE_URL = "https://ww1.gogoanime2.org" + + +def search_scraper(anime_name: str) -> list: + + """[summary] + + Take an url and + return list of anime after scraping the site. + + >>> type(search_scraper("demon_slayer")) + + + Args: + anime_name (str): [Name of anime] + + Raises: + e: [Raises exception on failure] + + Returns: + [list]: [List of animes] + """ + + # concat the name to form the search url. + search_url = f"{BASE_URL}/search/{anime_name}" + + response = requests.get( + search_url, headers={"UserAgent": UserAgent().chrome} + ) # request the url. + + # Is the response ok? + response.raise_for_status() + + # parse with soup. + soup = BeautifulSoup(response.text, "html.parser") + + # get list of anime + anime_ul = soup.find("ul", {"class": "items"}) + anime_li = anime_ul.children + + # for each anime, insert to list. the name and url. + anime_list = [] + for anime in anime_li: + if not isinstance(anime, NavigableString): + try: + anime_url, anime_title = ( + anime.find("a")["href"], + anime.find("a")["title"], + ) + anime_list.append( + { + "title": anime_title, + "url": anime_url, + } + ) + except (NotFoundErr, KeyError): + pass + + return anime_list + + +def search_anime_episode_list(episode_endpoint: str) -> list: + + """[summary] + + Take an url and + return list of episodes after scraping the site + for an url. + + >>> type(search_anime_episode_list("/anime/kimetsu-no-yaiba")) + + + Args: + episode_endpoint (str): [Endpoint of episode] + + Raises: + e: [description] + + Returns: + [list]: [List of episodes] + """ + + request_url = f"{BASE_URL}{episode_endpoint}" + + response = requests.get(url=request_url, headers={"UserAgent": UserAgent().chrome}) + response.raise_for_status() + + soup = BeautifulSoup(response.text, "html.parser") + + # With this id. get the episode list. + episode_page_ul = soup.find("ul", {"id": "episode_related"}) + episode_page_li = episode_page_ul.children + + episode_list = [] + for episode in episode_page_li: + try: + if not isinstance(episode, NavigableString): + episode_list.append( + { + "title": episode.find("div", {"class": "name"}).text.replace( + " ", "" + ), + "url": episode.find("a")["href"], + } + ) + except (KeyError, NotFoundErr): + pass + + return episode_list + + +def get_anime_episode(episode_endpoint: str) -> list: + + """[summary] + + Get click url and download url from episode url + + >>> type(get_anime_episode("/watch/kimetsu-no-yaiba/1")) + + + Args: + episode_endpoint (str): [Endpoint of episode] + + Raises: + e: [description] + + Returns: + [list]: [List of download and watch url] + """ + + episode_page_url = f"{BASE_URL}{episode_endpoint}" + + response = requests.get( + url=episode_page_url, headers={"User-Agent": UserAgent().chrome} + ) + response.raise_for_status() + + soup = BeautifulSoup(response.text, "html.parser") + + try: + episode_url = soup.find("iframe", {"id": "playerframe"})["src"] + download_url = episode_url.replace("/embed/", "/playlist/") + ".m3u8" + except (KeyError, NotFoundErr) as e: + raise e + + return [f"{BASE_URL}{episode_url}", f"{BASE_URL}{download_url}"] + + +if __name__ == "__main__": + + anime_name = input("Enter anime name: ").strip() + anime_list = search_scraper(anime_name) + print("\n") + + if len(anime_list) == 0: + print("No anime found with this name") + else: + + print(f"Found {len(anime_list)} results: ") + for (i, anime) in enumerate(anime_list): + anime_title = anime["title"] + print(f"{i+1}. {anime_title}") + + anime_choice = int(input("\nPlease choose from the following list: ").strip()) + chosen_anime = anime_list[anime_choice - 1] + print(f"You chose {chosen_anime['title']}. Searching for episodes...") + + episode_list = search_anime_episode_list(chosen_anime["url"]) + if len(episode_list) == 0: + print("No episode found for this anime") + else: + print(f"Found {len(episode_list)} results: ") + for (i, episode) in enumerate(episode_list): + print(f"{i+1}. {episode['title']}") + + episode_choice = int(input("\nChoose an episode by serial no: ").strip()) + chosen_episode = episode_list[episode_choice - 1] + print(f"You chose {chosen_episode['title']}. Searching...") + + episode_url, download_url = get_anime_episode(chosen_episode["url"]) + print(f"\nTo watch, ctrl+click on {episode_url}.") + print(f"To download, ctrl+click on {download_url}.") From 54f765bdd0331f4b9381de8c879218ace1313be9 Mon Sep 17 00:00:00 2001 From: Calvin McCarter Date: Wed, 2 Feb 2022 15:05:05 -0500 Subject: [PATCH 0424/1543] Extend power iteration to handle complex Hermitian input matrices (#5925) * works python3 -m unittest discover --start-directory src --pattern "power*.py" --t . -v * cleanup * revert switch to unittest * fix flake8 --- linear_algebra/src/power_iteration.py | 70 ++++++++++++++++++--------- 1 file changed, 47 insertions(+), 23 deletions(-) diff --git a/linear_algebra/src/power_iteration.py b/linear_algebra/src/power_iteration.py index 2cf22838e4a1..4c6525b6e4af 100644 --- a/linear_algebra/src/power_iteration.py +++ b/linear_algebra/src/power_iteration.py @@ -9,10 +9,10 @@ def power_iteration( ) -> tuple[float, np.ndarray]: """ Power Iteration. - Find the largest eignevalue and corresponding eigenvector + Find the largest eigenvalue and corresponding eigenvector of matrix input_matrix given a random vector in the same space. Will work so long as vector has component of largest eigenvector. - input_matrix must be symmetric. + input_matrix must be either real or Hermitian. Input input_matrix: input matrix whose largest eigenvalue we will find. @@ -41,6 +41,12 @@ def power_iteration( assert np.shape(input_matrix)[0] == np.shape(input_matrix)[1] # Ensure proper dimensionality. assert np.shape(input_matrix)[0] == np.shape(vector)[0] + # Ensure inputs are either both complex or both real + assert np.iscomplexobj(input_matrix) == np.iscomplexobj(vector) + is_complex = np.iscomplexobj(input_matrix) + if is_complex: + # Ensure complex input_matrix is Hermitian + assert np.array_equal(input_matrix, input_matrix.conj().T) # Set convergence to False. Will define convergence when we exceed max_iterations # or when we have small changes from one iteration to next. @@ -57,7 +63,8 @@ def power_iteration( vector = w / np.linalg.norm(w) # Find rayleigh quotient # (faster than usual b/c we know vector is normalized already) - lamda = np.dot(vector.T, np.dot(input_matrix, vector)) + vectorH = vector.conj().T if is_complex else vector.T + lamda = np.dot(vectorH, np.dot(input_matrix, vector)) # Check convergence. error = np.abs(lamda - lamda_previous) / lamda @@ -68,6 +75,9 @@ def power_iteration( lamda_previous = lamda + if is_complex: + lamda = np.real(lamda) + return lamda, vector @@ -75,26 +85,40 @@ def test_power_iteration() -> None: """ >>> test_power_iteration() # self running tests """ - # Our implementation. - input_matrix = np.array([[41, 4, 20], [4, 26, 30], [20, 30, 50]]) - vector = np.array([41, 4, 20]) - eigen_value, eigen_vector = power_iteration(input_matrix, vector) - - # Numpy implementation. - - # Get eigen values and eigen vectors using built in numpy - # eigh (eigh used for symmetric or hermetian matrices). - eigen_values, eigen_vectors = np.linalg.eigh(input_matrix) - # Last eigen value is the maximum one. - eigen_value_max = eigen_values[-1] - # Last column in this matrix is eigen vector corresponding to largest eigen value. - eigen_vector_max = eigen_vectors[:, -1] - - # Check our implementation and numpy gives close answers. - assert np.abs(eigen_value - eigen_value_max) <= 1e-6 - # Take absolute values element wise of each eigenvector. - # as they are only unique to a minus sign. - assert np.linalg.norm(np.abs(eigen_vector) - np.abs(eigen_vector_max)) <= 1e-6 + real_input_matrix = np.array([[41, 4, 20], [4, 26, 30], [20, 30, 50]]) + real_vector = np.array([41, 4, 20]) + complex_input_matrix = real_input_matrix.astype(np.complex128) + imag_matrix = np.triu(1j * complex_input_matrix, 1) + complex_input_matrix += imag_matrix + complex_input_matrix += -1 * imag_matrix.T + complex_vector = np.array([41, 4, 20]).astype(np.complex128) + + for problem_type in ["real", "complex"]: + if problem_type == "real": + input_matrix = real_input_matrix + vector = real_vector + elif problem_type == "complex": + input_matrix = complex_input_matrix + vector = complex_vector + + # Our implementation. + eigen_value, eigen_vector = power_iteration(input_matrix, vector) + + # Numpy implementation. + + # Get eigenvalues and eigenvectors using built-in numpy + # eigh (eigh used for symmetric or hermetian matrices). + eigen_values, eigen_vectors = np.linalg.eigh(input_matrix) + # Last eigenvalue is the maximum one. + eigen_value_max = eigen_values[-1] + # Last column in this matrix is eigenvector corresponding to largest eigenvalue. + eigen_vector_max = eigen_vectors[:, -1] + + # Check our implementation and numpy gives close answers. + assert np.abs(eigen_value - eigen_value_max) <= 1e-6 + # Take absolute values element wise of each eigenvector. + # as they are only unique to a minus sign. + assert np.linalg.norm(np.abs(eigen_vector) - np.abs(eigen_vector_max)) <= 1e-6 if __name__ == "__main__": From f707f6d689ed40f51e5ceb8f0554e26e1e9fd507 Mon Sep 17 00:00:00 2001 From: Christian Clauss Date: Sun, 13 Feb 2022 06:57:44 +0100 Subject: [PATCH 0425/1543] Upgrade to Python 3.10 (#5992) * Upgrade to Python 3.10 * Upgrade to Python 3.10 * mypy || true * updating DIRECTORY.md Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- .github/workflows/build.yml | 4 ++-- .github/workflows/pre-commit.yml | 2 +- DIRECTORY.md | 2 ++ requirements.txt | 2 +- 4 files changed, 6 insertions(+), 4 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 4f270ea55d17..aabfacbfc327 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -12,7 +12,7 @@ jobs: - uses: actions/checkout@v2 - uses: actions/setup-python@v2 with: - python-version: "3.9" + python-version: "3.10" - uses: actions/cache@v2 with: path: ~/.cache/pip @@ -23,7 +23,7 @@ jobs: python -m pip install mypy pytest-cov -r requirements.txt - run: | mkdir -p .mypy_cache - mypy --ignore-missing-imports --install-types --non-interactive . + mypy --ignore-missing-imports --install-types --non-interactive . || true - name: Run tests run: pytest --doctest-modules --ignore=project_euler/ --ignore=scripts/validate_solutions.py --cov-report=term-missing:skip-covered --cov=. . - if: ${{ success() }} diff --git a/.github/workflows/pre-commit.yml b/.github/workflows/pre-commit.yml index 19196098b1c1..de73c96adfb1 100644 --- a/.github/workflows/pre-commit.yml +++ b/.github/workflows/pre-commit.yml @@ -15,7 +15,7 @@ jobs: key: ${{ runner.os }}-pre-commit-${{ hashFiles('.pre-commit-config.yaml') }} - uses: actions/setup-python@v2 with: - python-version: 3.9 + python-version: "3.10" - uses: psf/black@21.4b0 - name: Install pre-commit run: | diff --git a/DIRECTORY.md b/DIRECTORY.md index b5ddb9fcb156..e95785b25d66 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -1014,9 +1014,11 @@ * [Daily Horoscope](https://github.com/TheAlgorithms/Python/blob/master/web_programming/daily_horoscope.py) * [Download Images From Google Query](https://github.com/TheAlgorithms/Python/blob/master/web_programming/download_images_from_google_query.py) * [Emails From Url](https://github.com/TheAlgorithms/Python/blob/master/web_programming/emails_from_url.py) + * [Fetch Anime And Play](https://github.com/TheAlgorithms/Python/blob/master/web_programming/fetch_anime_and_play.py) * [Fetch Bbc News](https://github.com/TheAlgorithms/Python/blob/master/web_programming/fetch_bbc_news.py) * [Fetch Github Info](https://github.com/TheAlgorithms/Python/blob/master/web_programming/fetch_github_info.py) * [Fetch Jobs](https://github.com/TheAlgorithms/Python/blob/master/web_programming/fetch_jobs.py) + * [Fetch Well Rx Price](https://github.com/TheAlgorithms/Python/blob/master/web_programming/fetch_well_rx_price.py) * [Get Imdb Top 250 Movies Csv](https://github.com/TheAlgorithms/Python/blob/master/web_programming/get_imdb_top_250_movies_csv.py) * [Get Imdbtop](https://github.com/TheAlgorithms/Python/blob/master/web_programming/get_imdbtop.py) * [Get Top Hn Posts](https://github.com/TheAlgorithms/Python/blob/master/web_programming/get_top_hn_posts.py) diff --git a/requirements.txt b/requirements.txt index 9a26dcc21f36..294494acf41a 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,6 +1,6 @@ beautifulsoup4 fake_useragent -keras<2.7.0 +keras lxml matplotlib numpy From 885580b3a152d02ad72ff433c2aefb6d604ef3c8 Mon Sep 17 00:00:00 2001 From: Christian Clauss Date: Sun, 13 Feb 2022 11:01:58 +0100 Subject: [PATCH 0426/1543] pyupgrade --py310-plus and run mypy in precommit, not build (#5996) * pyupgrade --py310-plus and run mypy in precommit, not build * pyupgrade --py310-plus web_programming/fetch_well_rx_price.py * pyupgrade --py310-plus web_programming/fetch_well_rx_price.py * Fix arithmetic_analysis/in_static_equilibrium.py * Fix arithmetic_analysis/in_static_equilibrium.py --- .github/workflows/build.yml | 5 +---- .pre-commit-config.yaml | 2 +- arithmetic_analysis/in_static_equilibrium.py | 4 ++-- web_programming/fetch_well_rx_price.py | 5 ++--- 4 files changed, 6 insertions(+), 10 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index aabfacbfc327..403ec44c888d 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -20,10 +20,7 @@ jobs: - name: Install dependencies run: | python -m pip install --upgrade pip setuptools six wheel - python -m pip install mypy pytest-cov -r requirements.txt - - run: | - mkdir -p .mypy_cache - mypy --ignore-missing-imports --install-types --non-interactive . || true + python -m pip install pytest-cov -r requirements.txt - name: Run tests run: pytest --doctest-modules --ignore=project_euler/ --ignore=scripts/validate_solutions.py --cov-report=term-missing:skip-covered --cov=. . - if: ${{ success() }} diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 33069a807cee..ab74d28e167a 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -30,7 +30,7 @@ repos: hooks: - id: pyupgrade args: - - --py39-plus + - --py310-plus - repo: https://gitlab.com/pycqa/flake8 rev: 3.9.2 diff --git a/arithmetic_analysis/in_static_equilibrium.py b/arithmetic_analysis/in_static_equilibrium.py index 6fe84b45475c..2ac3e7213fda 100644 --- a/arithmetic_analysis/in_static_equilibrium.py +++ b/arithmetic_analysis/in_static_equilibrium.py @@ -13,9 +13,9 @@ def polar_force( Resolves force along rectangular components. (force, angle) => (force_x, force_y) >>> polar_force(10, 45) - [7.0710678118654755, 7.071067811865475] + [7.071067811865477, 7.0710678118654755] >>> polar_force(10, 3.14, radian_mode=True) - [-9.999987317275394, 0.01592652916486828] + [-9.999987317275396, 0.01592652916486828] """ if radian_mode: return [magnitude * cos(angle), magnitude * sin(angle)] diff --git a/web_programming/fetch_well_rx_price.py b/web_programming/fetch_well_rx_price.py index 58dbe5993adb..5174f39f9532 100644 --- a/web_programming/fetch_well_rx_price.py +++ b/web_programming/fetch_well_rx_price.py @@ -5,7 +5,6 @@ """ -from typing import Union from urllib.error import HTTPError from bs4 import BeautifulSoup @@ -14,7 +13,7 @@ BASE_URL = "https://www.wellrx.com/prescriptions/{0}/{1}/?freshSearch=true" -def fetch_pharmacy_and_price_list(drug_name: str, zip_code: str) -> Union[list, None]: +def fetch_pharmacy_and_price_list(drug_name: str, zip_code: str) -> list | None: """[summary] This function will take input of drug name and zipcode, @@ -85,7 +84,7 @@ def fetch_pharmacy_and_price_list(drug_name: str, zip_code: str) -> Union[list, drug_name = input("Enter drug name: ").strip() zip_code = input("Enter zip code: ").strip() - pharmacy_price_list: Union[list, None] = fetch_pharmacy_and_price_list( + pharmacy_price_list: list | None = fetch_pharmacy_and_price_list( drug_name, zip_code ) From 637cf10555adf7bffdc6aeeef587c4145c8a27a7 Mon Sep 17 00:00:00 2001 From: zer0-x <65136727+zer0-x@users.noreply.github.com> Date: Sun, 13 Feb 2022 20:09:09 +0300 Subject: [PATCH 0427/1543] Add points are collinear in 3d algorithm to /maths (#5983) * Add points are collinear in 3d algorithm to /maths * Apply suggestions from code review in points_are_collinear_3d.py Thanks to cclauss. Co-authored-by: Christian Clauss * Rename some variables to be more self-documenting. * Update points_are_collinear_3d.py Co-authored-by: Christian Clauss --- maths/points_are_collinear_3d.py | 126 +++++++++++++++++++++++++++++++ 1 file changed, 126 insertions(+) create mode 100644 maths/points_are_collinear_3d.py diff --git a/maths/points_are_collinear_3d.py b/maths/points_are_collinear_3d.py new file mode 100644 index 000000000000..3bc0b3b9ebe5 --- /dev/null +++ b/maths/points_are_collinear_3d.py @@ -0,0 +1,126 @@ +""" +Check if three points are collinear in 3D. + +In short, the idea is that we are able to create a triangle using three points, +and the area of that triangle can determine if the three points are collinear or not. + + +First, we create two vectors with the same initial point from the three points, +then we will calculate the cross-product of them. + +The length of the cross vector is numerically equal to the area of a parallelogram. + +Finally, the area of the triangle is equal to half of the area of the parallelogram. + +Since we are only differentiating between zero and anything else, +we can get rid of the square root when calculating the length of the vector, +and also the division by two at the end. + +From a second perspective, if the two vectors are parallel and overlapping, +we can't get a nonzero perpendicular vector, +since there will be an infinite number of orthogonal vectors. + +To simplify the solution we will not calculate the length, +but we will decide directly from the vector whether it is equal to (0, 0, 0) or not. + + +Read More: + https://math.stackexchange.com/a/1951650 +""" + +Vector3d = tuple[float, float, float] +Point3d = tuple[float, float, float] + + +def create_vector(end_point1: Point3d, end_point2: Point3d) -> Vector3d: + """ + Pass two points to get the vector from them in the form (x, y, z). + + >>> create_vector((0, 0, 0), (1, 1, 1)) + (1, 1, 1) + >>> create_vector((45, 70, 24), (47, 32, 1)) + (2, -38, -23) + >>> create_vector((-14, -1, -8), (-7, 6, 4)) + (7, 7, 12) + """ + x = end_point2[0] - end_point1[0] + y = end_point2[1] - end_point1[1] + z = end_point2[2] - end_point1[2] + return (x, y, z) + + +def get_3d_vectors_cross(ab: Vector3d, ac: Vector3d) -> Vector3d: + """ + Get the cross of the two vectors AB and AC. + + I used determinant of 2x2 to get the determinant of the 3x3 matrix in the process. + + Read More: + https://en.wikipedia.org/wiki/Cross_product + https://en.wikipedia.org/wiki/Determinant + + >>> get_3d_vectors_cross((3, 4, 7), (4, 9, 2)) + (-55, 22, 11) + >>> get_3d_vectors_cross((1, 1, 1), (1, 1, 1)) + (0, 0, 0) + >>> get_3d_vectors_cross((-4, 3, 0), (3, -9, -12)) + (-36, -48, 27) + >>> get_3d_vectors_cross((17.67, 4.7, 6.78), (-9.5, 4.78, -19.33)) + (-123.2594, 277.15110000000004, 129.11260000000001) + """ + x = ab[1] * ac[2] - ab[2] * ac[1] # *i + y = (ab[0] * ac[2] - ab[2] * ac[0]) * -1 # *j + z = ab[0] * ac[1] - ab[1] * ac[0] # *k + return (x, y, z) + + +def is_zero_vector(vector: Vector3d, accuracy: int) -> bool: + """ + Check if vector is equal to (0, 0, 0) of not. + + Sine the algorithm is very accurate, we will never get a zero vector, + so we need to round the vector axis, + because we want a result that is either True or False. + In other applications, we can return a float that represents the collinearity ratio. + + >>> is_zero_vector((0, 0, 0), accuracy=10) + True + >>> is_zero_vector((15, 74, 32), accuracy=10) + False + >>> is_zero_vector((-15, -74, -32), accuracy=10) + False + """ + return tuple(round(x, accuracy) for x in vector) == (0, 0, 0) + + +def are_collinear(a: Point3d, b: Point3d, c: Point3d, accuracy: int = 10) -> bool: + """ + Check if three points are collinear or not. + + 1- Create tow vectors AB and AC. + 2- Get the cross vector of the tow vectors. + 3- Calcolate the length of the cross vector. + 4- If the length is zero then the points are collinear, else they are not. + + The use of the accuracy parameter is explained in is_zero_vector docstring. + + >>> are_collinear((4.802293498137402, 3.536233125455244, 0), + ... (-2.186788107953106, -9.24561398001649, 7.141509524846482), + ... (1.530169574640268, -2.447927606600034, 3.343487096469054)) + True + >>> are_collinear((-6, -2, 6), + ... (6.200213806439997, -4.930157614926678, -4.482371908289856), + ... (-4.085171149525941, -2.459889509029438, 4.354787180795383)) + True + >>> are_collinear((2.399001826862445, -2.452009976680793, 4.464656666157666), + ... (-3.682816335934376, 5.753788986533145, 9.490993909044244), + ... (1.962903518985307, 3.741415730125627, 7)) + False + >>> are_collinear((1.875375340689544, -7.268426006071538, 7.358196269835993), + ... (-3.546599383667157, -4.630005261513976, 3.208784032924246), + ... (-2.564606140206386, 3.937845170672183, 7)) + False + """ + ab = create_vector(a, b) + ac = create_vector(a, c) + return is_zero_vector(get_3d_vectors_cross(ab, ac), accuracy) From 7a9b3c7292cbd71fdc7723f449b9bbcbefbf9747 Mon Sep 17 00:00:00 2001 From: Lukas Date: Sun, 13 Feb 2022 12:20:19 -0500 Subject: [PATCH 0428/1543] Added average absolute deviation (#5951) * Added average absolute deviation * Formats program with black * reruns updated pre commit * Update average_absolute_deviation.py Co-authored-by: Christian Clauss --- DIRECTORY.md | 1 + maths/average_absolute_deviation.py | 29 +++++++++++++++++++++++++++++ 2 files changed, 30 insertions(+) create mode 100644 maths/average_absolute_deviation.py diff --git a/DIRECTORY.md b/DIRECTORY.md index e95785b25d66..eeea22e4768f 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -454,6 +454,7 @@ * [Area](https://github.com/TheAlgorithms/Python/blob/master/maths/area.py) * [Area Under Curve](https://github.com/TheAlgorithms/Python/blob/master/maths/area_under_curve.py) * [Armstrong Numbers](https://github.com/TheAlgorithms/Python/blob/master/maths/armstrong_numbers.py) + * [Average Absolute Deviation](https://github.com/TheAlgorithms/Python/blob/master/maths/average_absolute_deviation.py) * [Average Mean](https://github.com/TheAlgorithms/Python/blob/master/maths/average_mean.py) * [Average Median](https://github.com/TheAlgorithms/Python/blob/master/maths/average_median.py) * [Average Mode](https://github.com/TheAlgorithms/Python/blob/master/maths/average_mode.py) diff --git a/maths/average_absolute_deviation.py b/maths/average_absolute_deviation.py new file mode 100644 index 000000000000..193d94a2f265 --- /dev/null +++ b/maths/average_absolute_deviation.py @@ -0,0 +1,29 @@ +def average_absolute_deviation(nums: list[int]) -> float: + """ + Return the average absolute deviation of a list of numbers. + Wiki: https://en.wikipedia.org/wiki/Average_absolute_deviation + + >>> average_absolute_deviation([0]) + 0.0 + >>> average_absolute_deviation([4, 1, 3, 2]) + 1.0 + >>> average_absolute_deviation([2, 70, 6, 50, 20, 8, 4, 0]) + 20.0 + >>> average_absolute_deviation([-20, 0, 30, 15]) + 16.25 + >>> average_absolute_deviation([]) + Traceback (most recent call last): + ... + ValueError: List is empty + """ + if not nums: # Makes sure that the list is not empty + raise ValueError("List is empty") + + average = sum(nums) / len(nums) # Calculate the average + return sum(abs(x - average) for x in nums) / len(nums) + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From 37200a4b3b262dd9aa690c4b6514ead0944b3778 Mon Sep 17 00:00:00 2001 From: Anirudh Lakhotia Date: Wed, 16 Mar 2022 21:10:48 +0530 Subject: [PATCH 0429/1543] LICENSE: Year change (#5920) * :memo: Updated year Fixes: #5916 * Update LICENSE.md Co-authored-by: John Law --- LICENSE.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/LICENSE.md b/LICENSE.md index c3c2857cd312..2897d02e2a01 100644 --- a/LICENSE.md +++ b/LICENSE.md @@ -1,6 +1,6 @@ MIT License -Copyright (c) 2016-2021 The Algorithms +Copyright (c) 2016-2022 TheAlgorithms and contributors Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal From 4064bf45f54c1359b686cd7f231b25ee5ae2d3cc Mon Sep 17 00:00:00 2001 From: Harshit Agarwal <43147421+9harshit@users.noreply.github.com> Date: Wed, 30 Mar 2022 00:10:56 -0300 Subject: [PATCH 0430/1543] fix(pre-commit): update `black` version (#6075) black==22.1.0 is breaking the hook. Updating it to 22.3.0 fixes all the issue Refer: https://github.com/python-poetry/poetry/issues/5375 --- .pre-commit-config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index ab74d28e167a..33da02fb72ad 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -14,7 +14,7 @@ repos: - id: requirements-txt-fixer - repo: https://github.com/psf/black - rev: 22.1.0 + rev: 22.3.0 hooks: - id: black From 1f1daaf1c75adbe43126e53ef1eba718ecb67029 Mon Sep 17 00:00:00 2001 From: Harshit Agarwal <43147421+9harshit@users.noreply.github.com> Date: Mon, 4 Apr 2022 00:36:32 -0300 Subject: [PATCH 0431/1543] feat: add strings/ngram algorithm (#6074) * feat: added ngram algorithm * fix(test): use `math.isclose` to match floating point numbers approximately Co-authored-by: Christian Clauss Co-authored-by: Dhruv Manilawala --- arithmetic_analysis/in_static_equilibrium.py | 14 +++++++++--- strings/ngram.py | 23 ++++++++++++++++++++ 2 files changed, 34 insertions(+), 3 deletions(-) create mode 100644 strings/ngram.py diff --git a/arithmetic_analysis/in_static_equilibrium.py b/arithmetic_analysis/in_static_equilibrium.py index 2ac3e7213fda..ed0d1eb98cf3 100644 --- a/arithmetic_analysis/in_static_equilibrium.py +++ b/arithmetic_analysis/in_static_equilibrium.py @@ -12,8 +12,12 @@ def polar_force( """ Resolves force along rectangular components. (force, angle) => (force_x, force_y) - >>> polar_force(10, 45) - [7.071067811865477, 7.0710678118654755] + >>> import math + >>> force = polar_force(10, 45) + >>> math.isclose(force[0], 7.071067811865477) + True + >>> math.isclose(force[1], 7.0710678118654755) + True >>> polar_force(10, 3.14, radian_mode=True) [-9.999987317275396, 0.01592652916486828] """ @@ -50,7 +54,11 @@ def in_static_equilibrium( if __name__ == "__main__": # Test to check if it works forces = array( - [polar_force(718.4, 180 - 30), polar_force(879.54, 45), polar_force(100, -90)] + [ + polar_force(718.4, 180 - 30), + polar_force(879.54, 45), + polar_force(100, -90), + ] ) location = array([[0, 0], [0, 0], [0, 0]]) diff --git a/strings/ngram.py b/strings/ngram.py new file mode 100644 index 000000000000..0b13e34a4732 --- /dev/null +++ b/strings/ngram.py @@ -0,0 +1,23 @@ +""" +https://en.wikipedia.org/wiki/N-gram +""" + + +def create_ngram(sentence: str, ngram_size: int) -> list[str]: + """ + Create ngrams from a sentence + + >>> create_ngram("I am a sentence", 2) + ['I ', ' a', 'am', 'm ', ' a', 'a ', ' s', 'se', 'en', 'nt', 'te', 'en', 'nc', 'ce'] + >>> create_ngram("I am an NLPer", 2) + ['I ', ' a', 'am', 'm ', ' a', 'an', 'n ', ' N', 'NL', 'LP', 'Pe', 'er'] + >>> create_ngram("This is short", 50) + [] + """ + return [sentence[i : i + ngram_size] for i in range(len(sentence) - ngram_size + 1)] + + +if __name__ == "__main__": + from doctest import testmod + + testmod() From 1d3d18bcd28cd4eb0ffb7b1db213215f2f92c78a Mon Sep 17 00:00:00 2001 From: Aviv Faraj <73610201+avivfaraj@users.noreply.github.com> Date: Mon, 4 Apr 2022 16:44:29 +0300 Subject: [PATCH 0432/1543] horizontal motion code physics (#4710) * Add files via upload * Changed print to f-string Also printed out results in a math notation * Add files via upload * Fixes: #4710 provided return type * File exists in another pull request * imported radians from math * Updated file according to pre-commit test * Updated file * Updated gamma * Deleted duplicate file * removed pi * reversed tests * Fixed angle condition * Modified prints to f-string * Update horizontal_projectile_motion.py * Update horizontal_projectile_motion.py * Fixes #4710 added exceptions and tests * Added float tests * Fixed type annotations * Fixed last annotation * Fixed annotations * fixed format * Revert "fixed format" This reverts commit 5b0249ac0a0f9c36c3cfbab8423eb72925a73ffb. Undo changes @wq * Revert "Fixed annotations" This reverts commit c37bb9540834cb77e37822eb376a5896cda34778. * Revert "Fixed last annotation" This reverts commit e3678fdeadd23f1bfca27015ab524efa184f6c79. * Revert "Fixed type annotations" This reverts commit 3f2b238c34cd926b335d1f6f750e009f08e8f270. * Revert to 4e2fcaf6fb * Fixing errors found during pre-commit --- physics/horizontal_projectile_motion.py | 152 ++++++++++++++++++++++++ 1 file changed, 152 insertions(+) create mode 100644 physics/horizontal_projectile_motion.py diff --git a/physics/horizontal_projectile_motion.py b/physics/horizontal_projectile_motion.py new file mode 100644 index 000000000000..0f27b0617105 --- /dev/null +++ b/physics/horizontal_projectile_motion.py @@ -0,0 +1,152 @@ +""" +Horizontal Projectile Motion problem in physics. +This algorithm solves a specific problem in which +the motion starts from the ground as can be seen below: + (v = 0) + ** + * * + * * + * * + * * + * * +GROUND GROUND +For more info: https://en.wikipedia.org/wiki/Projectile_motion +""" + +# Importing packages +from math import radians as angle_to_radians +from math import sin + +# Acceleration Constant on hearth (unit m/s^2) +g = 9.80665 + + +def check_args(init_velocity: float, angle: float) -> None: + """ + Check that the arguments are valid + """ + + # Ensure valid instance + if not isinstance(init_velocity, (int, float)): + raise TypeError("Invalid velocity. Should be a positive number.") + + if not isinstance(angle, (int, float)): + raise TypeError("Invalid angle. Range is 1-90 degrees.") + + # Ensure valid angle + if angle > 90 or angle < 1: + raise ValueError("Invalid angle. Range is 1-90 degrees.") + + # Ensure valid velocity + if init_velocity < 0: + raise ValueError("Invalid velocity. Should be a positive number.") + + +def horizontal_distance(init_velocity: float, angle: float) -> float: + """ + Returns the horizontal distance that the object cover + Formula: + v_0^2 * sin(2 * alpha) + --------------------- + g + v_0 - initial velocity + alpha - angle + >>> horizontal_distance(30, 45) + 91.77 + >>> horizontal_distance(100, 78) + 414.76 + >>> horizontal_distance(-1, 20) + Traceback (most recent call last): + ... + ValueError: Invalid velocity. Should be a positive number. + >>> horizontal_distance(30, -20) + Traceback (most recent call last): + ... + ValueError: Invalid angle. Range is 1-90 degrees. + """ + check_args(init_velocity, angle) + radians = angle_to_radians(2 * angle) + return round(init_velocity**2 * sin(radians) / g, 2) + + +def max_height(init_velocity: float, angle: float) -> float: + """ + Returns the maximum height that the object reach + Formula: + v_0^2 * sin^2(alpha) + -------------------- + 2g + v_0 - initial velocity + alpha - angle + >>> max_height(30, 45) + 22.94 + >>> max_height(100, 78) + 487.82 + >>> max_height("a", 20) + Traceback (most recent call last): + ... + TypeError: Invalid velocity. Should be a positive number. + >>> horizontal_distance(30, "b") + Traceback (most recent call last): + ... + TypeError: Invalid angle. Range is 1-90 degrees. + """ + check_args(init_velocity, angle) + radians = angle_to_radians(angle) + return round(init_velocity**2 * sin(radians) ** 2 / (2 * g), 2) + + +def total_time(init_velocity: float, angle: float) -> float: + """ + Returns total time of the motion + Formula: + 2 * v_0 * sin(alpha) + -------------------- + g + v_0 - initial velocity + alpha - angle + >>> total_time(30, 45) + 4.33 + >>> total_time(100, 78) + 19.95 + >>> total_time(-10, 40) + Traceback (most recent call last): + ... + ValueError: Invalid velocity. Should be a positive number. + >>> total_time(30, "b") + Traceback (most recent call last): + ... + TypeError: Invalid angle. Range is 1-90 degrees. + """ + check_args(init_velocity, angle) + radians = angle_to_radians(angle) + return round(2 * init_velocity * sin(radians) / g, 2) + + +def test_motion() -> None: + """ + >>> test_motion() + """ + v0, angle = 25, 20 + assert horizontal_distance(v0, angle) == 40.97 + assert max_height(v0, angle) == 3.73 + assert total_time(v0, angle) == 1.74 + + +if __name__ == "__main__": + from doctest import testmod + + testmod() + + # Get input from user + init_vel = float(input("Initial Velocity: ").strip()) + + # Get input from user + angle = float(input("angle: ").strip()) + + # Print results + print() + print("Results: ") + print(f"Horizontal Distance: {str(horizontal_distance(init_vel, angle))} [m]") + print(f"Maximum Height: {str(max_height(init_vel, angle))} [m]") + print(f"Total Time: {str(total_time(init_vel, angle))} [s]") From 1400cb86ff7c656087963db41844e0ca503ae6d5 Mon Sep 17 00:00:00 2001 From: "Paulo S. G. Ferraz" Date: Fri, 8 Apr 2022 14:40:45 -0300 Subject: [PATCH 0433/1543] Remove duplicate is_prime related functions (#5892) * Fixes (#5434) * Update ciphers.rabin_miller.py maths.miller_rabin.py * Fixing ERROR maths/miller_rabin.py - ModuleNotFoundError and changing project_euler's isPrime to is_prime function names * Update sol1.py * fix: try to change to list * fix pre-commit * fix capital letters * Update miller_rabin.py * Update rabin_miller.py Co-authored-by: John Law --- ciphers/rabin_miller.py | 6 +++--- maths/miller_rabin.py | 10 +++++----- project_euler/problem_003/sol1.py | 22 +++++++++++----------- project_euler/problem_007/sol2.py | 10 +++++----- project_euler/problem_007/sol3.py | 10 +++++----- project_euler/problem_058/sol1.py | 17 ++++++++--------- 6 files changed, 37 insertions(+), 38 deletions(-) diff --git a/ciphers/rabin_miller.py b/ciphers/rabin_miller.py index c42ad2f5928d..a9b834bfb4be 100644 --- a/ciphers/rabin_miller.py +++ b/ciphers/rabin_miller.py @@ -25,7 +25,7 @@ def rabinMiller(num: int) -> bool: return True -def isPrime(num: int) -> bool: +def is_prime_low_num(num: int) -> bool: if num < 2: return False @@ -213,11 +213,11 @@ def isPrime(num: int) -> bool: def generateLargePrime(keysize: int = 1024) -> int: while True: num = random.randrange(2 ** (keysize - 1), 2 ** (keysize)) - if isPrime(num): + if is_prime_low_num(num): return num if __name__ == "__main__": num = generateLargePrime() print(("Prime number:", num)) - print(("isPrime:", isPrime(num))) + print(("is_prime_low_num:", is_prime_low_num(num))) diff --git a/maths/miller_rabin.py b/maths/miller_rabin.py index 2b0944508b4b..d35e5485888f 100644 --- a/maths/miller_rabin.py +++ b/maths/miller_rabin.py @@ -6,11 +6,11 @@ # This is a probabilistic check to test primality, useful for big numbers! # if it's a prime, it will return true # if it's not a prime, the chance of it returning true is at most 1/4**prec -def is_prime(n, prec=1000): +def is_prime_big(n, prec=1000): """ - >>> from .prime_check import prime_check - >>> # all(is_prime(i) == prime_check(i) for i in range(1000)) # 3.45s - >>> all(is_prime(i) == prime_check(i) for i in range(256)) + >>> from maths.prime_check import prime_check + >>> # all(is_prime_big(i) == prime_check(i) for i in range(1000)) # 3.45s + >>> all(is_prime_big(i) == prime_check(i) for i in range(256)) True """ if n < 2: @@ -48,4 +48,4 @@ def is_prime(n, prec=1000): if __name__ == "__main__": n = abs(int(input("Enter bound : ").strip())) print("Here's the list of primes:") - print(", ".join(str(i) for i in range(n + 1) if is_prime(i))) + print(", ".join(str(i) for i in range(n + 1) if is_prime_big(i))) diff --git a/project_euler/problem_003/sol1.py b/project_euler/problem_003/sol1.py index 1f329984203a..606a6945e4ad 100644 --- a/project_euler/problem_003/sol1.py +++ b/project_euler/problem_003/sol1.py @@ -13,23 +13,23 @@ import math -def isprime(num: int) -> bool: +def is_prime(num: int) -> bool: """ Returns boolean representing primality of given number num. - >>> isprime(2) + >>> is_prime(2) True - >>> isprime(3) + >>> is_prime(3) True - >>> isprime(27) + >>> is_prime(27) False - >>> isprime(2999) + >>> is_prime(2999) True - >>> isprime(0) + >>> is_prime(0) Traceback (most recent call last): ... ValueError: Parameter num must be greater than or equal to two. - >>> isprime(1) + >>> is_prime(1) Traceback (most recent call last): ... ValueError: Parameter num must be greater than or equal to two. @@ -84,18 +84,18 @@ def solution(n: int = 600851475143) -> int: if n <= 0: raise ValueError("Parameter n must be greater than or equal to one.") max_number = 0 - if isprime(n): + if is_prime(n): return n while n % 2 == 0: n //= 2 - if isprime(n): + if is_prime(n): return n for i in range(3, int(math.sqrt(n)) + 1, 2): if n % i == 0: - if isprime(n // i): + if is_prime(n // i): max_number = n // i break - elif isprime(i): + elif is_prime(i): max_number = i return max_number diff --git a/project_euler/problem_007/sol2.py b/project_euler/problem_007/sol2.py index dfcad8c148af..44d72e9493e8 100644 --- a/project_euler/problem_007/sol2.py +++ b/project_euler/problem_007/sol2.py @@ -13,15 +13,15 @@ """ -def isprime(number: int) -> bool: +def is_prime(number: int) -> bool: """ Determines whether the given number is prime or not - >>> isprime(2) + >>> is_prime(2) True - >>> isprime(15) + >>> is_prime(15) False - >>> isprime(29) + >>> is_prime(29) True """ @@ -76,7 +76,7 @@ def solution(nth: int = 10001) -> int: primes: list[int] = [] num = 2 while len(primes) < nth: - if isprime(num): + if is_prime(num): primes.append(num) num += 1 else: diff --git a/project_euler/problem_007/sol3.py b/project_euler/problem_007/sol3.py index 7911fa3e9d6f..daa719cefbda 100644 --- a/project_euler/problem_007/sol3.py +++ b/project_euler/problem_007/sol3.py @@ -15,15 +15,15 @@ import math -def prime_check(number: int) -> bool: +def is_prime(number: int) -> bool: """ Determines whether a given number is prime or not - >>> prime_check(2) + >>> is_prime(2) True - >>> prime_check(15) + >>> is_prime(15) False - >>> prime_check(29) + >>> is_prime(29) True """ @@ -39,7 +39,7 @@ def prime_generator(): num = 2 while True: - if prime_check(num): + if is_prime(num): yield num num += 1 diff --git a/project_euler/problem_058/sol1.py b/project_euler/problem_058/sol1.py index ed407edf7158..c59b0dd71af1 100644 --- a/project_euler/problem_058/sol1.py +++ b/project_euler/problem_058/sol1.py @@ -36,14 +36,14 @@ from math import isqrt -def isprime(number: int) -> int: +def is_prime(number: int) -> int: """ - returns whether the given number is prime or not - >>> isprime(1) + Returns whether the given number is prime or not + >>> is_prime(1) 0 - >>> isprime(17) + >>> is_prime(17) 1 - >>> isprime(10000) + >>> is_prime(10000) 0 """ if number == 1: @@ -60,7 +60,7 @@ def isprime(number: int) -> int: def solution(ratio: float = 0.1) -> int: """ - returns the side length of the square spiral of odd length greater + Returns the side length of the square spiral of odd length greater than 1 for which the ratio of primes along both diagonals first falls below the given ratio. >>> solution(.5) @@ -76,9 +76,8 @@ def solution(ratio: float = 0.1) -> int: while primes / (2 * j - 1) >= ratio: for i in range(j * j + j + 1, (j + 2) * (j + 2), j + 1): - primes = primes + isprime(i) - - j = j + 2 + primes += is_prime(i) + j += 2 return j From 10d0e4ecbf741f9acded245b5f47f77b8d672596 Mon Sep 17 00:00:00 2001 From: varopxndx <42877919+varopxndx@users.noreply.github.com> Date: Thu, 28 Apr 2022 12:05:21 -0500 Subject: [PATCH 0434/1543] docs: Fix quicksort & binary tree traversal doc (#4878) * Fix quicksort doc * add binary tree traversals doc * Add link to the reference * Fix job * Change url * Update binary_tree_traversals.md * Update normal_distribution_quick_sort.md * Update normal_distribution_quick_sort.md Co-authored-by: John Law --- .../binary_tree/binary_tree_traversals.md | 111 ++++++++++++++++++ sorts/normal_distribution_quick_sort.md | 45 ++----- 2 files changed, 124 insertions(+), 32 deletions(-) create mode 100644 data_structures/binary_tree/binary_tree_traversals.md diff --git a/data_structures/binary_tree/binary_tree_traversals.md b/data_structures/binary_tree/binary_tree_traversals.md new file mode 100644 index 000000000000..ebe727b6589d --- /dev/null +++ b/data_structures/binary_tree/binary_tree_traversals.md @@ -0,0 +1,111 @@ +# Binary Tree Traversal + +## Overview + +The combination of binary trees being data structures and traversal being an algorithm relates to classic problems, either directly or indirectly. + +> If you can grasp the traversal of binary trees, the traversal of other complicated trees will be easy for you. + +The following are some common ways to traverse trees. + +- Depth First Traversals (DFS): In-order, Pre-order, Post-order + +- Level Order Traversal or Breadth First or Traversal (BFS) + +There are applications for both DFS and BFS. + +Stack can be used to simplify the process of DFS traversal. Besides, since tree is a recursive data structure, recursion and stack are two key points for DFS. + +Graph for DFS: + +![binary-tree-traversal-dfs](https://tva1.sinaimg.cn/large/007S8ZIlly1ghluhzhynsg30dw0dw3yl.gif) + +The key point of BFS is how to determine whether the traversal of each level has been completed. The answer is to use a variable as a flag to represent the end of the traversal of current level. + +## Pre-order Traversal + +The traversal order of pre-order traversal is `root-left-right`. + +Algorithm Pre-order + +1. Visit the root node and push it into a stack. + +2. Pop a node from the stack, and push its right and left child node into the stack respectively. + +3. Repeat step 2. + +Conclusion: This problem involves the classic recursive data structure (i.e. a binary tree), and the algorithm above demonstrates how a simplified solution can be reached by using a stack. + +If you look at the bigger picture, you'll find that the process of traversal is as followed. `Visit the left subtrees respectively from top to bottom, and visit the right subtrees respectively from bottom to top`. If we are to implement it from this perspective, things will be somewhat different. For the `top to bottom` part we can simply use recursion, and for the `bottom to top` part we can turn to stack. + +## In-order Traversal + +The traversal order of in-order traversal is `left-root-right`. + +So the root node is not printed first. Things are getting a bit complicated here. + +Algorithm In-order + +1. Visit the root and push it into a stack. + +2. If there is a left child node, push it into the stack. Repeat this process until a leaf node reached. + + > At this point the root node and all the left nodes are in the stack. + +3. Start popping nodes from the stack. If a node has a right child node, push the child node into the stack. Repeat step 2. + +It's worth pointing out that the in-order traversal of a binary search tree (BST) is a sorted array, which is helpful for coming up simplified solutions for some problems. + +## Post-order Traversal + +The traversal order of post-order traversal is `left-right-root`. + +This one is a bit of a challenge. It deserves the `hard` tag of LeetCode. + +In this case, the root node is printed not as the first but the last one. A cunning way to do it is to: + +Record whether the current node has been visited. If 1) it's a leaf node or 2) both its left and right subtrees have been traversed, then it can be popped from the stack. + +As for `1) it's a leaf node`, you can easily tell whether a node is a leaf if both its left and right are `null`. + +As for `2) both its left and right subtrees have been traversed`, we only need a variable to record whether a node has been visited or not. In the worst case, we need to record the status for every single node and the space complexity is `O(n)`. But if you come to think about it, as we are using a stack and start printing the result from the leaf nodes, it makes sense that we only record the status for the current node popping from the stack, reducing the space complexity to `O(1)`. + +## Level Order Traversal + +The key point of level order traversal is how do we know whether the traversal of each level is done. The answer is that we use a variable as a flag representing the end of the traversal of the current level. + +![binary-tree-traversal-bfs](https://tva1.sinaimg.cn/large/007S8ZIlly1ghlui1tpoug30dw0dw3yl.gif) + +Algorithm Level-order + +1. Visit the root node, put it in a FIFO queue, put in the queue a special flag (we are using `null` here). + +2. Dequeue a node. + +3. If the node equals `null`, it means that all nodes of the current level have been visited. If the queue is empty, we do nothing. Or else we put in another `null`. + +4. If the node is not `null`, meaning the traversal of current level has not finished yet, we enqueue its left subtree and right subtree respectively. + +## Bi-color marking + +We know that there is a tri-color marking in garbage collection algorithm, which works as described below. + +- The white color represents "not visited". + +- The gray color represents "not all child nodes visited". + +- The black color represents "all child nodes visited". + +Enlightened by tri-color marking, a bi-color marking method can be invented to solve all three traversal problems with one solution. + +The core idea is as follow. + +- Use a color to mark whether a node has been visited or not. Nodes yet to be visited are marked as white and visited nodes are marked as gray. + +- If we are visiting a white node, turn it into gray, and push its right child node, itself, and it's left child node into the stack respectively. + +- If we are visiting a gray node, print it. + +Implementation of pre-order and post-order traversal algorithms can be easily done by changing the order of pushing the child nodes into the stack. + +Reference: [LeetCode](https://github.com/azl397985856/leetcode/blob/master/thinkings/binary-tree-traversal.en.md) diff --git a/sorts/normal_distribution_quick_sort.md b/sorts/normal_distribution_quick_sort.md index 2a9f77b3ee95..c073f2cbc81c 100644 --- a/sorts/normal_distribution_quick_sort.md +++ b/sorts/normal_distribution_quick_sort.md @@ -1,15 +1,12 @@ # Normal Distribution QuickSort +QuickSort Algorithm where the pivot element is chosen randomly between first and last elements of the array, and the array elements are taken from Standard Normal Distribution. -Algorithm implementing QuickSort Algorithm where the pivot element is chosen randomly between first and last elements of the array and the array elements are taken from a Standard Normal Distribution. -This is different from the ordinary quicksort in the sense, that it applies more to real life problems , where elements usually follow a normal distribution. Also the pivot is randomized to make it a more generic one. +## Array elements +The array elements are taken from a Standard Normal Distribution, having mean = 0 and standard deviation = 1. -## Array Elements - -The array elements are taken from a Standard Normal Distribution , having mean = 0 and standard deviation 1. - -#### The code +### The code ```python @@ -27,7 +24,7 @@ The array elements are taken from a Standard Normal Distribution , having mean = ------ -#### The Distribution of the Array elements. +#### The distribution of the array elements ```python >>> mu, sigma = 0, 1 # mean and standard deviation @@ -35,41 +32,25 @@ The array elements are taken from a Standard Normal Distribution , having mean = >>> count, bins, ignored = plt.hist(s, 30, normed=True) >>> plt.plot(bins , 1/(sigma * np.sqrt(2 * np.pi)) *np.exp( - (bins - mu)**2 / (2 * sigma**2) ),linewidth=2, color='r') >>> plt.show() - ``` +------ +![normal distribution large](https://upload.wikimedia.org/wikipedia/commons/thumb/2/25/The_Normal_Distribution.svg/1280px-The_Normal_Distribution.svg.png) ------ - - - - -![](https://www.mathsisfun.com/data/images/normal-distrubution-large.gif) - ---- - ---------------------- +------ --- +## Comparing the numbers of comparisons -## Plotting the function for Checking 'The Number of Comparisons' taking place between Normal Distribution QuickSort and Ordinary QuickSort +We can plot the function for Checking 'The Number of Comparisons' taking place between Normal Distribution QuickSort and Ordinary QuickSort: ```python ->>>import matplotlib.pyplot as plt +>>> import matplotlib.pyplot as plt - - # Normal Disrtibution QuickSort is red + # Normal Distribution QuickSort is red >>> plt.plot([1,2,4,16,32,64,128,256,512,1024,2048],[1,1,6,15,43,136,340,800,2156,6821,16325],linewidth=2, color='r') - #Ordinary QuickSort is green + # Ordinary QuickSort is green >>> plt.plot([1,2,4,16,32,64,128,256,512,1024,2048],[1,1,4,16,67,122,362,949,2131,5086,12866],linewidth=2, color='g') >>> plt.show() - ``` - - ----- - - ------------------- From a7e4b2326a74067404339b1147c1ff40568ee4c0 Mon Sep 17 00:00:00 2001 From: Manuel Di Lullo <39048927+manueldilullo@users.noreply.github.com> Date: Sun, 1 May 2022 11:45:08 +0200 Subject: [PATCH 0435/1543] Add prefix conversions for strings (#5453) * First commit for add_prefix_conversion * Class names in CamelCase, str.format() to f-string * Fixed following pre-commit guidelines * solved issues with mypy and enum.Enum * Rename add_prefix_conversion.py to prefix_conversions_string.py Co-authored-by: John Law --- conversions/prefix_conversions_string.py | 121 +++++++++++++++++++++++ 1 file changed, 121 insertions(+) create mode 100644 conversions/prefix_conversions_string.py diff --git a/conversions/prefix_conversions_string.py b/conversions/prefix_conversions_string.py new file mode 100644 index 000000000000..3255eae6ff45 --- /dev/null +++ b/conversions/prefix_conversions_string.py @@ -0,0 +1,121 @@ +""" +* Author: Manuel Di Lullo (https://github.com/manueldilullo) +* Description: Convert a number to use the correct SI or Binary unit prefix. + +Inspired by prefix_conversion.py file in this repository by lance-pyles + +URL: https://en.wikipedia.org/wiki/Metric_prefix#List_of_SI_prefixes +URL: https://en.wikipedia.org/wiki/Binary_prefix +""" + +from __future__ import annotations + +from enum import Enum, unique +from typing import Type, TypeVar + +# Create a generic variable that can be 'Enum', or any subclass. +T = TypeVar("T", bound="Enum") + + +@unique +class BinaryUnit(Enum): + yotta = 80 + zetta = 70 + exa = 60 + peta = 50 + tera = 40 + giga = 30 + mega = 20 + kilo = 10 + + +@unique +class SIUnit(Enum): + yotta = 24 + zetta = 21 + exa = 18 + peta = 15 + tera = 12 + giga = 9 + mega = 6 + kilo = 3 + hecto = 2 + deca = 1 + deci = -1 + centi = -2 + milli = -3 + micro = -6 + nano = -9 + pico = -12 + femto = -15 + atto = -18 + zepto = -21 + yocto = -24 + + @classmethod + def get_positive(cls: Type[T]) -> dict: + """ + Returns a dictionary with only the elements of this enum + that has a positive value + >>> from itertools import islice + >>> positive = SIUnit.get_positive() + >>> inc = iter(positive.items()) + >>> dict(islice(inc, len(positive) // 2)) + {'yotta': 24, 'zetta': 21, 'exa': 18, 'peta': 15, 'tera': 12} + >>> dict(inc) + {'giga': 9, 'mega': 6, 'kilo': 3, 'hecto': 2, 'deca': 1} + """ + return {unit.name: unit.value for unit in cls if unit.value > 0} + + @classmethod + def get_negative(cls: Type[T]) -> dict: + """ + Returns a dictionary with only the elements of this enum + that has a negative value + @example + >>> from itertools import islice + >>> negative = SIUnit.get_negative() + >>> inc = iter(negative.items()) + >>> dict(islice(inc, len(negative) // 2)) + {'deci': -1, 'centi': -2, 'milli': -3, 'micro': -6, 'nano': -9} + >>> dict(inc) + {'pico': -12, 'femto': -15, 'atto': -18, 'zepto': -21, 'yocto': -24} + """ + return {unit.name: unit.value for unit in cls if unit.value < 0} + + +def add_si_prefix(value: float) -> str: + """ + Function that converts a number to his version with SI prefix + @input value (an integer) + @example: + >>> add_si_prefix(10000) + '10.0 kilo' + """ + prefixes = SIUnit.get_positive() if value > 0 else SIUnit.get_negative() + for name_prefix, value_prefix in prefixes.items(): + numerical_part = value / (10 ** value_prefix) + if numerical_part > 1: + return f"{str(numerical_part)} {name_prefix}" + return str(value) + + +def add_binary_prefix(value: float) -> str: + """ + Function that converts a number to his version with Binary prefix + @input value (an integer) + @example: + >>> add_binary_prefix(65536) + '64.0 kilo' + """ + for prefix in BinaryUnit: + numerical_part = value / (2 ** prefix.value) + if numerical_part > 1: + return f"{str(numerical_part)} {prefix.name}" + return str(value) + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From e1ec661d4e368ceabd50e7ef3714c85dbe139c02 Mon Sep 17 00:00:00 2001 From: Shuangchi He <34329208+Yulv-git@users.noreply.github.com> Date: Sun, 1 May 2022 18:44:23 +0800 Subject: [PATCH 0436/1543] Fix some typos (#6113) * Fix some typos. * Update volume.py Co-authored-by: John Law --- ciphers/shuffled_shift_cipher.py | 2 +- data_structures/stacks/dijkstras_two_stack_algorithm.py | 2 +- divide_and_conquer/inversions.py | 2 +- maths/volume.py | 4 ++-- strings/manacher.py | 2 +- 5 files changed, 6 insertions(+), 6 deletions(-) diff --git a/ciphers/shuffled_shift_cipher.py b/ciphers/shuffled_shift_cipher.py index 3b84f97f6769..714acd4b1afc 100644 --- a/ciphers/shuffled_shift_cipher.py +++ b/ciphers/shuffled_shift_cipher.py @@ -9,7 +9,7 @@ class ShuffledShiftCipher: This algorithm uses the Caesar Cipher algorithm but removes the option to use brute force to decrypt the message. - The passcode is a a random password from the selection buffer of + The passcode is a random password from the selection buffer of 1. uppercase letters of the English alphabet 2. lowercase letters of the English alphabet 3. digits from 0 to 9 diff --git a/data_structures/stacks/dijkstras_two_stack_algorithm.py b/data_structures/stacks/dijkstras_two_stack_algorithm.py index ba2ca92c7b5c..976c9a53c931 100644 --- a/data_structures/stacks/dijkstras_two_stack_algorithm.py +++ b/data_structures/stacks/dijkstras_two_stack_algorithm.py @@ -10,7 +10,7 @@ THESE ARE THE ALGORITHM'S RULES: RULE 1: Scan the expression from left to right. When an operand is encountered, - push it onto the the operand stack. + push it onto the operand stack. RULE 2: When an operator is encountered in the expression, push it onto the operator stack. diff --git a/divide_and_conquer/inversions.py b/divide_and_conquer/inversions.py index b471456025be..e20d35daccbe 100644 --- a/divide_and_conquer/inversions.py +++ b/divide_and_conquer/inversions.py @@ -11,7 +11,7 @@ def count_inversions_bf(arr): """ - Counts the number of inversions using a a naive brute-force algorithm + Counts the number of inversions using a naive brute-force algorithm Parameters ---------- arr: arr: array-like, the list containing the items for which the number diff --git a/maths/volume.py b/maths/volume.py index b11995bab917..acaed65f4858 100644 --- a/maths/volume.py +++ b/maths/volume.py @@ -38,8 +38,8 @@ def vol_spheres_intersect( Calculate the volume of the intersection of two spheres. The intersection is composed by two spherical caps and therefore its volume is the - sum of the volumes of the spherical caps. First it calculates the heights (h1, h2) - of the the spherical caps, then the two volumes and it returns the sum. + sum of the volumes of the spherical caps. First, it calculates the heights (h1, h2) + of the spherical caps, then the two volumes and it returns the sum. The height formulas are h1 = (radius_1 - radius_2 + centers_distance) * (radius_1 + radius_2 - centers_distance) diff --git a/strings/manacher.py b/strings/manacher.py index e6ea71cde12f..c58c7c19ec44 100644 --- a/strings/manacher.py +++ b/strings/manacher.py @@ -88,7 +88,7 @@ def palindromic_string(input_string: str) -> str: now for a5 we will calculate the length of palindromic substring with center as a5 but can we use previously calculated information in some way? Yes, look the above string we know that a5 is inside the palindrome with center a3 and -previously we have have calculated that +previously we have calculated that a0==a2 (palindrome of center a1) a2==a4 (palindrome of center a3) a0==a6 (palindrome of center a3) From 7a394411b70bf7ca654e97f2d2663674ce1757c7 Mon Sep 17 00:00:00 2001 From: John Law Date: Sun, 1 May 2022 21:52:40 +0800 Subject: [PATCH 0437/1543] fix black at prefix string (#6122) * fix black at prefix string * Type -> type * Type unused --- conversions/prefix_conversions_string.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/conversions/prefix_conversions_string.py b/conversions/prefix_conversions_string.py index 3255eae6ff45..3851d7c8b993 100644 --- a/conversions/prefix_conversions_string.py +++ b/conversions/prefix_conversions_string.py @@ -11,7 +11,7 @@ from __future__ import annotations from enum import Enum, unique -from typing import Type, TypeVar +from typing import TypeVar # Create a generic variable that can be 'Enum', or any subclass. T = TypeVar("T", bound="Enum") @@ -53,7 +53,7 @@ class SIUnit(Enum): yocto = -24 @classmethod - def get_positive(cls: Type[T]) -> dict: + def get_positive(cls: type[T]) -> dict: """ Returns a dictionary with only the elements of this enum that has a positive value @@ -68,7 +68,7 @@ def get_positive(cls: Type[T]) -> dict: return {unit.name: unit.value for unit in cls if unit.value > 0} @classmethod - def get_negative(cls: Type[T]) -> dict: + def get_negative(cls: type[T]) -> dict: """ Returns a dictionary with only the elements of this enum that has a negative value @@ -94,7 +94,7 @@ def add_si_prefix(value: float) -> str: """ prefixes = SIUnit.get_positive() if value > 0 else SIUnit.get_negative() for name_prefix, value_prefix in prefixes.items(): - numerical_part = value / (10 ** value_prefix) + numerical_part = value / (10**value_prefix) if numerical_part > 1: return f"{str(numerical_part)} {name_prefix}" return str(value) @@ -109,7 +109,7 @@ def add_binary_prefix(value: float) -> str: '64.0 kilo' """ for prefix in BinaryUnit: - numerical_part = value / (2 ** prefix.value) + numerical_part = value / (2**prefix.value) if numerical_part > 1: return f"{str(numerical_part)} {prefix.name}" return str(value) From 4bd5494992a03a63aa0a1d55169a0171dee38468 Mon Sep 17 00:00:00 2001 From: Vineet Rao <28603906+VinWare@users.noreply.github.com> Date: Mon, 2 May 2022 19:28:12 +0530 Subject: [PATCH 0438/1543] Add solution to Problem 145 of Project Euler (#5464) * Solution to Problem 145 of Project Euler * Provided more descriptive filename * Update sol1.py Co-authored-by: John Law --- project_euler/problem_145/__init__.py | 0 project_euler/problem_145/sol1.py | 57 +++++++++++++++++++++++++++ 2 files changed, 57 insertions(+) create mode 100644 project_euler/problem_145/__init__.py create mode 100644 project_euler/problem_145/sol1.py diff --git a/project_euler/problem_145/__init__.py b/project_euler/problem_145/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/project_euler/problem_145/sol1.py b/project_euler/problem_145/sol1.py new file mode 100644 index 000000000000..82e2ea79bc25 --- /dev/null +++ b/project_euler/problem_145/sol1.py @@ -0,0 +1,57 @@ +""" +Project Euler problem 145: https://projecteuler.net/problem=145 +Author: Vineet Rao +Problem statement: + +Some positive integers n have the property that the sum [ n + reverse(n) ] +consists entirely of odd (decimal) digits. +For instance, 36 + 63 = 99 and 409 + 904 = 1313. +We will call such numbers reversible; so 36, 63, 409, and 904 are reversible. +Leading zeroes are not allowed in either n or reverse(n). + +There are 120 reversible numbers below one-thousand. + +How many reversible numbers are there below one-billion (10^9)? +""" + + +def odd_digits(num: int) -> bool: + """ + Check if the number passed as argument has only odd digits. + >>> odd_digits(123) + False + >>> odd_digits(135797531) + True + """ + num_str = str(num) + for i in ["0", "2", "4", "6", "8"]: + if i in num_str: + return False + return True + + +def solution(max_num: int = 1_000_000_000) -> int: + """ + To evaluate the solution, use solution() + >>> solution(1000) + 120 + >>> solution(1_000_000) + 18720 + >>> solution(10_000_000) + 68720 + """ + result = 0 + # All single digit numbers reverse to themselves, so their sums are even + # Therefore at least one digit in their sum is even + # Last digit cannot be 0, else it causes leading zeros in reverse + for num in range(11, max_num): + if num % 10 == 0: + continue + num_sum = num + int(str(num)[::-1]) + num_is_reversible = odd_digits(num_sum) + result += 1 if num_is_reversible else 0 + return result + + +if __name__ == "__main__": + print(f"{solution() = }") From 26f2df762248e947638ffdb61a9d7c9f5d5f0592 Mon Sep 17 00:00:00 2001 From: Kunwar Preet Singh <75082218+Enkryp@users.noreply.github.com> Date: Mon, 2 May 2022 19:42:18 +0530 Subject: [PATCH 0439/1543] Add sol for P104 Project Euler (#5257) * Hacktoberfest: added sol for P104 Project Euler * bot requests resolved * pre-commit * Update sol.py * Update sol.py * remove trailing zeroes * Update sol.py * Update sol.py * Update sol.py Co-authored-by: John Law --- project_euler/problem_104/__init__.py | 0 project_euler/problem_104/sol.py | 137 ++++++++++++++++++++++++++ 2 files changed, 137 insertions(+) create mode 100644 project_euler/problem_104/__init__.py create mode 100644 project_euler/problem_104/sol.py diff --git a/project_euler/problem_104/__init__.py b/project_euler/problem_104/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/project_euler/problem_104/sol.py b/project_euler/problem_104/sol.py new file mode 100644 index 000000000000..0818ac401c3a --- /dev/null +++ b/project_euler/problem_104/sol.py @@ -0,0 +1,137 @@ +""" +Project Euler Problem 104 : https://projecteuler.net/problem=104 + +The Fibonacci sequence is defined by the recurrence relation: + +Fn = Fn−1 + Fn−2, where F1 = 1 and F2 = 1. +It turns out that F541, which contains 113 digits, is the first Fibonacci number +for which the last nine digits are 1-9 pandigital (contain all the digits 1 to 9, +but not necessarily in order). And F2749, which contains 575 digits, is the first +Fibonacci number for which the first nine digits are 1-9 pandigital. + +Given that Fk is the first Fibonacci number for which the first nine digits AND +the last nine digits are 1-9 pandigital, find k. +""" + + +def check(number: int) -> bool: + """ + Takes a number and checks if it is pandigital both from start and end + + + >>> check(123456789987654321) + True + + >>> check(120000987654321) + False + + >>> check(1234567895765677987654321) + True + + """ + + check_last = [0] * 11 + check_front = [0] * 11 + + # mark last 9 numbers + for x in range(9): + check_last[int(number % 10)] = 1 + number = number // 10 + # flag + f = True + + # check last 9 numbers for pandigitality + + for x in range(9): + if not check_last[x + 1]: + f = False + if not f: + return f + + # mark first 9 numbers + number = int(str(number)[:9]) + + for x in range(9): + check_front[int(number % 10)] = 1 + number = number // 10 + + # check first 9 numbers for pandigitality + + for x in range(9): + if not check_front[x + 1]: + f = False + return f + + +def check1(number: int) -> bool: + """ + Takes a number and checks if it is pandigital from END + + >>> check1(123456789987654321) + True + + >>> check1(120000987654321) + True + + >>> check1(12345678957656779870004321) + False + + """ + + check_last = [0] * 11 + + # mark last 9 numbers + for x in range(9): + check_last[int(number % 10)] = 1 + number = number // 10 + # flag + f = True + + # check last 9 numbers for pandigitality + + for x in range(9): + if not check_last[x + 1]: + f = False + return f + + +def solution() -> int: + """ + Outputs the answer is the least Fibonacci number pandigital from both sides. + >>> solution() + 329468 + """ + + a = 1 + b = 1 + c = 2 + # temporary Fibonacci numbers + + a1 = 1 + b1 = 1 + c1 = 2 + # temporary Fibonacci numbers mod 1e9 + + # mod m=1e9, done for fast optimisation + tocheck = [0] * 1000000 + m = 1000000000 + + for x in range(1000000): + c1 = (a1 + b1) % m + a1 = b1 % m + b1 = c1 % m + if check1(b1): + tocheck[x + 3] = 1 + + for x in range(1000000): + c = a + b + a = b + b = c + # perform check only if in tocheck + if tocheck[x + 3] and check(b): + return x + 3 # first 2 already done + return -1 + + +if __name__ == "__main__": + print(f"{solution() = }") From 8226636ea321d8fddae55460cd3c8a25b537160e Mon Sep 17 00:00:00 2001 From: Simon Date: Mon, 2 May 2022 18:07:29 +0200 Subject: [PATCH 0440/1543] Add the Horn-Schunck algorithm (#5333) * Added implementation of the Horn-Schunck algorithm * Cleaner variable names * added doctests * Fix doctest * Update horn_schunck.py * Update horn_schunck.py * Update horn_schunck.py * Update horn_schunck.py Co-authored-by: John Law --- computer_vision/horn_schunck.py | 130 ++++++++++++++++++++++++++++++++ 1 file changed, 130 insertions(+) create mode 100644 computer_vision/horn_schunck.py diff --git a/computer_vision/horn_schunck.py b/computer_vision/horn_schunck.py new file mode 100644 index 000000000000..1428487d051b --- /dev/null +++ b/computer_vision/horn_schunck.py @@ -0,0 +1,130 @@ +""" + The Horn-Schunck method estimates the optical flow for every single pixel of + a sequence of images. + It works by assuming brightness constancy between two consecutive frames + and smoothness in the optical flow. + + Useful resources: + Wikipedia: https://en.wikipedia.org/wiki/Horn%E2%80%93Schunck_method + Paper: http://image.diku.dk/imagecanon/material/HornSchunckOptical_Flow.pdf +""" + +import numpy as np +from scipy.ndimage.filters import convolve +from typing_extensions import SupportsIndex + + +def warp( + image: np.ndarray, horizontal_flow: np.ndarray, vertical_flow: np.ndarray +) -> np.ndarray: + """ + Warps the pixels of an image into a new image using the horizontal and vertical + flows. + Pixels that are warped from an invalid location are set to 0. + + Parameters: + image: Grayscale image + horizontal_flow: Horizontal flow + vertical_flow: Vertical flow + + Returns: Warped image + + >>> warp(np.array([[0, 1, 2], [0, 3, 0], [2, 2, 2]]), \ + np.array([[0, 1, -1], [-1, 0, 0], [1, 1, 1]]), \ + np.array([[0, 0, 0], [0, 1, 0], [0, 0, 1]])) + array([[0, 0, 0], + [3, 1, 0], + [0, 2, 3]]) + """ + flow = np.stack((horizontal_flow, vertical_flow), 2) + + # Create a grid of all pixel coordinates and subtract the flow to get the + # target pixels coordinates + grid = np.stack( + np.meshgrid(np.arange(0, image.shape[1]), np.arange(0, image.shape[0])), 2 + ) + grid = np.round(grid - flow).astype(np.int32) + + # Find the locations outside of the original image + invalid = (grid < 0) | (grid >= np.array([image.shape[1], image.shape[0]])) + grid[invalid] = 0 + + warped = image[grid[:, :, 1], grid[:, :, 0]] + + # Set pixels at invalid locations to 0 + warped[invalid[:, :, 0] | invalid[:, :, 1]] = 0 + + return warped + + +def horn_schunck( + image0: np.ndarray, + image1: np.ndarray, + num_iter: SupportsIndex, + alpha: float | None = None, +) -> tuple[np.ndarray, np.ndarray]: + """ + This function performs the Horn-Schunck algorithm and returns the estimated + optical flow. It is assumed that the input images are grayscale and + normalized to be in [0, 1]. + + Parameters: + image0: First image of the sequence + image1: Second image of the sequence + alpha: Regularization constant + num_iter: Number of iterations performed + + Returns: estimated horizontal & vertical flow + + >>> np.round(horn_schunck(np.array([[0, 0, 2], [0, 0, 2]]), \ + np.array([[0, 2, 0], [0, 2, 0]]), alpha=0.1, num_iter=110)).\ + astype(np.int32) + array([[[ 0, -1, -1], + [ 0, -1, -1]], + + [[ 0, 0, 0], + [ 0, 0, 0]]], dtype=int32) + """ + if alpha is None: + alpha = 0.1 + + # Initialize flow + horizontal_flow = np.zeros_like(image0) + vertical_flow = np.zeros_like(image0) + + # Prepare kernels for the calculation of the derivatives and the average velocity + kernel_x = np.array([[-1, 1], [-1, 1]]) * 0.25 + kernel_y = np.array([[-1, -1], [1, 1]]) * 0.25 + kernel_t = np.array([[1, 1], [1, 1]]) * 0.25 + kernel_laplacian = np.array( + [[1 / 12, 1 / 6, 1 / 12], [1 / 6, 0, 1 / 6], [1 / 12, 1 / 6, 1 / 12]] + ) + + # Iteratively refine the flow + for _ in range(num_iter): + warped_image = warp(image0, horizontal_flow, vertical_flow) + derivative_x = convolve(warped_image, kernel_x) + convolve(image1, kernel_x) + derivative_y = convolve(warped_image, kernel_y) + convolve(image1, kernel_y) + derivative_t = convolve(warped_image, kernel_t) + convolve(image1, -kernel_t) + + avg_horizontal_velocity = convolve(horizontal_flow, kernel_laplacian) + avg_vertical_velocity = convolve(vertical_flow, kernel_laplacian) + + # This updates the flow as proposed in the paper (Step 12) + update = ( + derivative_x * avg_horizontal_velocity + + derivative_y * avg_vertical_velocity + + derivative_t + ) + update = update / (alpha**2 + derivative_x**2 + derivative_y**2) + + horizontal_flow = avg_horizontal_velocity - derivative_x * update + vertical_flow = avg_vertical_velocity - derivative_y * update + + return horizontal_flow, vertical_flow + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From 3bff196981312e41ba9dac91e1bd971b7120726c Mon Sep 17 00:00:00 2001 From: KerimovEmil Date: Wed, 11 May 2022 23:28:45 -0400 Subject: [PATCH 0441/1543] Fix some typos in solution 1 of euler 686 (#6112) While reading this code I noticed some typos in the doc strings and wanted to fix them. --- project_euler/problem_686/sol1.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/project_euler/problem_686/sol1.py b/project_euler/problem_686/sol1.py index 3b6bdb655170..99a317cd1451 100644 --- a/project_euler/problem_686/sol1.py +++ b/project_euler/problem_686/sol1.py @@ -27,7 +27,7 @@ def log_difference(number: int) -> float: Computing 2^90 is time consuming. Hence we find log(2^90) = 90*log(2) = 27.092699609758302 But we require only the decimal part to determine whether the power starts with 123. - SO we just return the decimal part of the log product. + So we just return the decimal part of the log product. Therefore we return 0.092699609758302 >>> log_difference(90) @@ -57,14 +57,14 @@ def solution(number: int = 678910) -> int: So if number = 10, then solution returns 2515 as we observe from above series. - Wwe will define a lowerbound and upperbound. + We will define a lowerbound and upperbound. lowerbound = log(1.23), upperbound = log(1.24) because we need to find the powers that yield 123 as starting digits. log(1.23) = 0.08990511143939792, log(1,24) = 0.09342168516223506. We use 1.23 and not 12.3 or 123, because log(1.23) yields only decimal value which is less than 1. - log(12.3) will be same decimal vale but 1 added to it + log(12.3) will be same decimal value but 1 added to it which is log(12.3) = 1.093421685162235. We observe that decimal value remains same no matter 1.23 or 12.3 Since we use the function log_difference(), @@ -87,7 +87,7 @@ def solution(number: int = 678910) -> int: Hence to optimize the algorithm we will increment by 196 or 93 depending upon the log_difference() value. - Lets take for example 90. + Let's take for example 90. Since 90 is the first power leading to staring digits as 123, we will increment iterator by 196. Because the difference between any two powers leading to 123 @@ -99,7 +99,7 @@ def solution(number: int = 678910) -> int: The iterator will now become 379, which is the next power leading to 123 as starting digits. - Lets take 1060. We increment by 196, we get 1256. + Let's take 1060. We increment by 196, we get 1256. log_difference(1256) = 0.09367455396034, Which is greater than upperbound hence we increment by 93. Now iterator is 1349. log_difference(1349) = 0.08946415071057 which is less than lowerbound. @@ -107,7 +107,7 @@ def solution(number: int = 678910) -> int: Conditions are as follows: - 1) If we find a power, whose log_difference() is in the range of + 1) If we find a power whose log_difference() is in the range of lower and upperbound, we will increment by 196. which implies that the power is a number which will lead to 123 as starting digits. 2) If we find a power, whose log_difference() is greater than or equal upperbound, From e23c18fb5cb34d51b69e2840c304ade597163085 Mon Sep 17 00:00:00 2001 From: Omkaar <79257339+Pysics@users.noreply.github.com> Date: Thu, 12 May 2022 09:00:00 +0530 Subject: [PATCH 0442/1543] Fix typos (#6127) --- maths/integration_by_simpson_approx.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/maths/integration_by_simpson_approx.py b/maths/integration_by_simpson_approx.py index da0e1cffde02..feb77440dd2f 100644 --- a/maths/integration_by_simpson_approx.py +++ b/maths/integration_by_simpson_approx.py @@ -40,7 +40,7 @@ def simpson_integration(function, a: float, b: float, precision: int = 4) -> flo Args: function : the function which's integration is desired a : the lower limit of integration - b : upper limit of integraion + b : upper limit of integration precision : precision of the result,error required default is 4 Returns: @@ -106,7 +106,7 @@ def simpson_integration(function, a: float, b: float, precision: int = 4) -> flo isinstance(precision, int) and precision > 0 ), f"precision should be positive integer your input : {precision}" - # just applying the formula of simpson for approximate integraion written in + # just applying the formula of simpson for approximate integration written in # mentioned article in first comment of this file and above this function h = (b - a) / N_STEPS From 533eea5afa916fbe1d0db6db8da76c68b2928ca0 Mon Sep 17 00:00:00 2001 From: Leoriem-code <73761711+Leoriem-code@users.noreply.github.com> Date: Thu, 12 May 2022 05:35:56 +0200 Subject: [PATCH 0443/1543] fix mypy annotations for arithmetic_analysis (#6040) * fixed mypy annotations for arithmetic_analysis * shortened numpy references --- arithmetic_analysis/gaussian_elimination.py | 14 ++++++++++---- arithmetic_analysis/in_static_equilibrium.py | 9 +++++---- arithmetic_analysis/jacobi_iteration_method.py | 14 +++++++++----- arithmetic_analysis/lu_decomposition.py | 6 +++++- 4 files changed, 29 insertions(+), 14 deletions(-) diff --git a/arithmetic_analysis/gaussian_elimination.py b/arithmetic_analysis/gaussian_elimination.py index 2dada4fbf9b1..89ed3b323d03 100644 --- a/arithmetic_analysis/gaussian_elimination.py +++ b/arithmetic_analysis/gaussian_elimination.py @@ -5,9 +5,13 @@ import numpy as np +from numpy import float64 +from numpy.typing import NDArray -def retroactive_resolution(coefficients: np.matrix, vector: np.ndarray) -> np.ndarray: +def retroactive_resolution( + coefficients: NDArray[float64], vector: NDArray[float64] +) -> NDArray[float64]: """ This function performs a retroactive linear system resolution for triangular matrix @@ -27,7 +31,7 @@ def retroactive_resolution(coefficients: np.matrix, vector: np.ndarray) -> np.nd rows, columns = np.shape(coefficients) - x = np.zeros((rows, 1), dtype=float) + x: NDArray[float64] = np.zeros((rows, 1), dtype=float) for row in reversed(range(rows)): sum = 0 for col in range(row + 1, columns): @@ -38,7 +42,9 @@ def retroactive_resolution(coefficients: np.matrix, vector: np.ndarray) -> np.nd return x -def gaussian_elimination(coefficients: np.matrix, vector: np.ndarray) -> np.ndarray: +def gaussian_elimination( + coefficients: NDArray[float64], vector: NDArray[float64] +) -> NDArray[float64]: """ This function performs Gaussian elimination method @@ -60,7 +66,7 @@ def gaussian_elimination(coefficients: np.matrix, vector: np.ndarray) -> np.ndar return np.array((), dtype=float) # augmented matrix - augmented_mat = np.concatenate((coefficients, vector), axis=1) + augmented_mat: NDArray[float64] = np.concatenate((coefficients, vector), axis=1) augmented_mat = augmented_mat.astype("float64") # scale the matrix leaving it triangular diff --git a/arithmetic_analysis/in_static_equilibrium.py b/arithmetic_analysis/in_static_equilibrium.py index ed0d1eb98cf3..d762a376f577 100644 --- a/arithmetic_analysis/in_static_equilibrium.py +++ b/arithmetic_analysis/in_static_equilibrium.py @@ -3,7 +3,8 @@ """ from __future__ import annotations -from numpy import array, cos, cross, ndarray, radians, sin +from numpy import array, cos, cross, float64, radians, sin +from numpy.typing import NDArray def polar_force( @@ -27,7 +28,7 @@ def polar_force( def in_static_equilibrium( - forces: ndarray, location: ndarray, eps: float = 10**-1 + forces: NDArray[float64], location: NDArray[float64], eps: float = 10**-1 ) -> bool: """ Check if a system is in equilibrium. @@ -46,7 +47,7 @@ def in_static_equilibrium( False """ # summation of moments is zero - moments: ndarray = cross(location, forces) + moments: NDArray[float64] = cross(location, forces) sum_moments: float = sum(moments) return abs(sum_moments) < eps @@ -61,7 +62,7 @@ def in_static_equilibrium( ] ) - location = array([[0, 0], [0, 0], [0, 0]]) + location: NDArray[float64] = array([[0, 0], [0, 0], [0, 0]]) assert in_static_equilibrium(forces, location) diff --git a/arithmetic_analysis/jacobi_iteration_method.py b/arithmetic_analysis/jacobi_iteration_method.py index 6674824255a1..4336aaa91623 100644 --- a/arithmetic_analysis/jacobi_iteration_method.py +++ b/arithmetic_analysis/jacobi_iteration_method.py @@ -4,13 +4,15 @@ from __future__ import annotations import numpy as np +from numpy import float64 +from numpy.typing import NDArray # Method to find solution of system of linear equations def jacobi_iteration_method( - coefficient_matrix: np.ndarray, - constant_matrix: np.ndarray, - init_val: list, + coefficient_matrix: NDArray[float64], + constant_matrix: NDArray[float64], + init_val: list[int], iterations: int, ) -> list[float]: """ @@ -99,7 +101,9 @@ def jacobi_iteration_method( if iterations <= 0: raise ValueError("Iterations must be at least 1") - table = np.concatenate((coefficient_matrix, constant_matrix), axis=1) + table: NDArray[float64] = np.concatenate( + (coefficient_matrix, constant_matrix), axis=1 + ) rows, cols = table.shape @@ -125,7 +129,7 @@ def jacobi_iteration_method( # Checks if the given matrix is strictly diagonally dominant -def strictly_diagonally_dominant(table: np.ndarray) -> bool: +def strictly_diagonally_dominant(table: NDArray[float64]) -> bool: """ >>> table = np.array([[4, 1, 1, 2], [1, 5, 2, -6], [1, 2, 4, -4]]) >>> strictly_diagonally_dominant(table) diff --git a/arithmetic_analysis/lu_decomposition.py b/arithmetic_analysis/lu_decomposition.py index b488b1bb3211..371f7b166b2e 100644 --- a/arithmetic_analysis/lu_decomposition.py +++ b/arithmetic_analysis/lu_decomposition.py @@ -6,9 +6,13 @@ from __future__ import annotations import numpy as np +import numpy.typing as NDArray +from numpy import float64 -def lower_upper_decomposition(table: np.ndarray) -> tuple[np.ndarray, np.ndarray]: +def lower_upper_decomposition( + table: NDArray[float64], +) -> tuple[NDArray[float64], NDArray[float64]]: """Lower-Upper (LU) Decomposition Example: From 562cf31a9a9d448b761cdc30df03fb7b526966d9 Mon Sep 17 00:00:00 2001 From: Maxim Smolskiy Date: Thu, 12 May 2022 06:48:04 +0300 Subject: [PATCH 0444/1543] Improve Project Euler problem 074 solution 2 (#5803) * Fix statement * Improve solution * Fix * Add tests --- project_euler/problem_074/sol2.py | 197 +++++++++++++++++------------- 1 file changed, 109 insertions(+), 88 deletions(-) diff --git a/project_euler/problem_074/sol2.py b/project_euler/problem_074/sol2.py index 55e67c6b98dd..d76bb014d629 100644 --- a/project_euler/problem_074/sol2.py +++ b/project_euler/problem_074/sol2.py @@ -1,122 +1,143 @@ """ - Project Euler Problem 074: https://projecteuler.net/problem=74 +Project Euler Problem 074: https://projecteuler.net/problem=74 - Starting from any positive integer number - it is possible to attain another one summing the factorial of its digits. +The number 145 is well known for the property that the sum of the factorial of its +digits is equal to 145: - Repeating this step, we can build chains of numbers. - It is not difficult to prove that EVERY starting number - will eventually get stuck in a loop. +1! + 4! + 5! = 1 + 24 + 120 = 145 - The request is to find how many numbers less than one million - produce a chain with exactly 60 non repeating items. +Perhaps less well known is 169, in that it produces the longest chain of numbers that +link back to 169; it turns out that there are only three such loops that exist: - Solution approach: - This solution simply consists in a loop that generates - the chains of non repeating items. - The generation of the chain stops before a repeating item - or if the size of the chain is greater then the desired one. - After generating each chain, the length is checked and the - counter increases. -""" +169 → 363601 → 1454 → 169 +871 → 45361 → 871 +872 → 45362 → 872 -factorial_cache: dict[int, int] = {} -factorial_sum_cache: dict[int, int] = {} +It is not difficult to prove that EVERY starting number will eventually get stuck in a +loop. For example, +69 → 363600 → 1454 → 169 → 363601 (→ 1454) +78 → 45360 → 871 → 45361 (→ 871) +540 → 145 (→ 145) -def factorial(a: int) -> int: - """Returns the factorial of the input a - >>> factorial(5) - 120 +Starting with 69 produces a chain of five non-repeating terms, but the longest +non-repeating chain with a starting number below one million is sixty terms. - >>> factorial(6) - 720 +How many chains, with a starting number below one million, contain exactly sixty +non-repeating terms? - >>> factorial(0) - 1 - """ - - # The factorial function is not defined for negative numbers - if a < 0: - raise ValueError("Invalid negative input!", a) +Solution approach: +This solution simply consists in a loop that generates the chains of non repeating +items using the cached sizes of the previous chains. +The generation of the chain stops before a repeating item or if the size of the chain +is greater then the desired one. +After generating each chain, the length is checked and the counter increases. +""" +from math import factorial - if a in factorial_cache: - return factorial_cache[a] +DIGIT_FACTORIAL: dict[str, int] = {str(digit): factorial(digit) for digit in range(10)} - # The case of 0! is handled separately - if a == 0: - factorial_cache[a] = 1 - else: - # use a temporary support variable to store the computation - temporary_number = a - temporary_computation = 1 - while temporary_number > 0: - temporary_computation *= temporary_number - temporary_number -= 1 +def digit_factorial_sum(number: int) -> int: + """ + Function to perform the sum of the factorial of all the digits in number - factorial_cache[a] = temporary_computation - return factorial_cache[a] + >>> digit_factorial_sum(69.0) + Traceback (most recent call last): + ... + TypeError: Parameter number must be int + >>> digit_factorial_sum(-1) + Traceback (most recent call last): + ... + ValueError: Parameter number must be greater than or equal to 0 -def factorial_sum(a: int) -> int: - """Function to perform the sum of the factorial - of all the digits in a + >>> digit_factorial_sum(0) + 1 - >>> factorial_sum(69) + >>> digit_factorial_sum(69) 363600 """ - if a in factorial_sum_cache: - return factorial_sum_cache[a] - # Prepare a variable to hold the computation - fact_sum = 0 - - """ Convert a in string to iterate on its digits - convert the digit back into an int - and add its factorial to fact_sum. - """ - for i in str(a): - fact_sum += factorial(int(i)) - factorial_sum_cache[a] = fact_sum - return fact_sum + if not isinstance(number, int): + raise TypeError("Parameter number must be int") + + if number < 0: + raise ValueError("Parameter number must be greater than or equal to 0") + + # Converts number in string to iterate on its digits and adds its factorial. + return sum(DIGIT_FACTORIAL[digit] for digit in str(number)) def solution(chain_length: int = 60, number_limit: int = 1000000) -> int: - """Returns the number of numbers that produce - chains with exactly 60 non repeating elements. - >>> solution(10, 1000) - 26 """ + Returns the number of numbers below number_limit that produce chains with exactly + chain_length non repeating elements. - # the counter for the chains with the exact desired length - chain_counter = 0 - - for i in range(1, number_limit + 1): + >>> solution(10.0, 1000) + Traceback (most recent call last): + ... + TypeError: Parameters chain_length and number_limit must be int - # The temporary list will contain the elements of the chain - chain_set = {i} - len_chain_set = 1 - last_chain_element = i + >>> solution(10, 1000.0) + Traceback (most recent call last): + ... + TypeError: Parameters chain_length and number_limit must be int - # The new element of the chain - new_chain_element = factorial_sum(last_chain_element) + >>> solution(0, 1000) + Traceback (most recent call last): + ... + ValueError: Parameters chain_length and number_limit must be greater than 0 - # Stop computing the chain when you find a repeating item - # or the length it greater then the desired one. + >>> solution(10, 0) + Traceback (most recent call last): + ... + ValueError: Parameters chain_length and number_limit must be greater than 0 - while new_chain_element not in chain_set and len_chain_set <= chain_length: - chain_set.add(new_chain_element) + >>> solution(10, 1000) + 26 + """ - len_chain_set += 1 - last_chain_element = new_chain_element - new_chain_element = factorial_sum(last_chain_element) + if not isinstance(chain_length, int) or not isinstance(number_limit, int): + raise TypeError("Parameters chain_length and number_limit must be int") - # If the while exited because the chain list contains the exact amount - # of elements increase the counter - if len_chain_set == chain_length: - chain_counter += 1 + if chain_length <= 0 or number_limit <= 0: + raise ValueError( + "Parameters chain_length and number_limit must be greater than 0" + ) - return chain_counter + # the counter for the chains with the exact desired length + chains_counter = 0 + # the cached sizes of the previous chains + chain_sets_lengths: dict[int, int] = {} + + for start_chain_element in range(1, number_limit): + + # The temporary set will contain the elements of the chain + chain_set = set() + chain_set_length = 0 + + # Stop computing the chain when you find a cached size, a repeating item or the + # length is greater then the desired one. + chain_element = start_chain_element + while ( + chain_element not in chain_sets_lengths + and chain_element not in chain_set + and chain_set_length <= chain_length + ): + chain_set.add(chain_element) + chain_set_length += 1 + chain_element = digit_factorial_sum(chain_element) + + if chain_element in chain_sets_lengths: + chain_set_length += chain_sets_lengths[chain_element] + + chain_sets_lengths[start_chain_element] = chain_set_length + + # If chain contains the exact amount of elements increase the counter + if chain_set_length == chain_length: + chains_counter += 1 + + return chains_counter if __name__ == "__main__": From bbb88bb5c261085ff23bce2b3c17266ebfa7b087 Mon Sep 17 00:00:00 2001 From: eee555 <50390200+eee555@users.noreply.github.com> Date: Fri, 13 May 2022 04:28:51 +0800 Subject: [PATCH 0445/1543] Fix bug in bucket_sort.py (#6005) --- sorts/bucket_sort.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/sorts/bucket_sort.py b/sorts/bucket_sort.py index 58242a1cb1f8..7bcbe61a4526 100644 --- a/sorts/bucket_sort.py +++ b/sorts/bucket_sort.py @@ -54,8 +54,8 @@ def bucket_sort(my_list: list) -> list: bucket_count = int(max_value - min_value) + 1 buckets: list[list] = [[] for _ in range(bucket_count)] - for i in range(len(my_list)): - buckets[(int(my_list[i] - min_value) // bucket_count)].append(my_list[i]) + for i in my_list: + buckets[int(i - min_value)].append(i) return [v for bucket in buckets for v in sorted(bucket)] From e95ecfaf27c545391bdb7a2d1d8948943a40f828 Mon Sep 17 00:00:00 2001 From: Rohan R Bharadwaj Date: Fri, 13 May 2022 11:25:53 +0530 Subject: [PATCH 0446/1543] Add missing type annotations for `strings` directory (#5817) * Type annotations for `strings/autocomplete_using_trie.py` * Update autocomplete_using_trie.py * Update detecting_english_programmatically.py * Update detecting_english_programmatically.py * Update frequency_finder.py * Update frequency_finder.py * Update frequency_finder.py * Update word_occurrence.py * Update frequency_finder.py * Update z_function.py * Update z_function.py * Update frequency_finder.py --- strings/autocomplete_using_trie.py | 28 +++--- strings/detecting_english_programmatically.py | 53 ++++++----- strings/frequency_finder.py | 94 +++++++------------ strings/word_occurrence.py | 3 +- strings/z_function.py | 6 +- 5 files changed, 82 insertions(+), 102 deletions(-) diff --git a/strings/autocomplete_using_trie.py b/strings/autocomplete_using_trie.py index 8aa0dc223680..758260292a30 100644 --- a/strings/autocomplete_using_trie.py +++ b/strings/autocomplete_using_trie.py @@ -1,11 +1,13 @@ +from __future__ import annotations + END = "#" class Trie: - def __init__(self): - self._trie = {} + def __init__(self) -> None: + self._trie: dict = {} - def insert_word(self, text): + def insert_word(self, text: str) -> None: trie = self._trie for char in text: if char not in trie: @@ -13,7 +15,7 @@ def insert_word(self, text): trie = trie[char] trie[END] = True - def find_word(self, prefix): + def find_word(self, prefix: str) -> tuple | list: trie = self._trie for char in prefix: if char in trie: @@ -22,7 +24,7 @@ def find_word(self, prefix): return [] return self._elements(trie) - def _elements(self, d): + def _elements(self, d: dict) -> tuple: result = [] for c, v in d.items(): if c == END: @@ -39,26 +41,28 @@ def _elements(self, d): trie.insert_word(word) -def autocomplete_using_trie(s): +def autocomplete_using_trie(string: str) -> tuple: """ >>> trie = Trie() >>> for word in words: ... trie.insert_word(word) ... >>> matches = autocomplete_using_trie("de") - - "detergent " in matches + >>> "detergent " in matches True - "dog " in matches + >>> "dog " in matches False """ - suffixes = trie.find_word(s) - return tuple(s + w for w in suffixes) + suffixes = trie.find_word(string) + return tuple(string + word for word in suffixes) -def main(): +def main() -> None: print(autocomplete_using_trie("de")) if __name__ == "__main__": + import doctest + + doctest.testmod() main() diff --git a/strings/detecting_english_programmatically.py b/strings/detecting_english_programmatically.py index 44fb7191866b..aa18db21027a 100644 --- a/strings/detecting_english_programmatically.py +++ b/strings/detecting_english_programmatically.py @@ -4,55 +4,56 @@ LETTERS_AND_SPACE = UPPERLETTERS + UPPERLETTERS.lower() + " \t\n" -def loadDictionary(): +def load_dictionary() -> dict[str, None]: path = os.path.split(os.path.realpath(__file__)) - englishWords = {} - with open(path[0] + "/dictionary.txt") as dictionaryFile: - for word in dictionaryFile.read().split("\n"): - englishWords[word] = None - return englishWords + english_words: dict[str, None] = {} + with open(path[0] + "/dictionary.txt") as dictionary_file: + for word in dictionary_file.read().split("\n"): + english_words[word] = None + return english_words -ENGLISH_WORDS = loadDictionary() +ENGLISH_WORDS = load_dictionary() -def getEnglishCount(message): +def get_english_count(message: str) -> float: message = message.upper() - message = removeNonLetters(message) - possibleWords = message.split() + message = remove_non_letters(message) + possible_words = message.split() - if possibleWords == []: + if possible_words == []: return 0.0 matches = 0 - for word in possibleWords: + for word in possible_words: if word in ENGLISH_WORDS: matches += 1 - return float(matches) / len(possibleWords) + return float(matches) / len(possible_words) -def removeNonLetters(message): - lettersOnly = [] +def remove_non_letters(message: str) -> str: + letters_only = [] for symbol in message: if symbol in LETTERS_AND_SPACE: - lettersOnly.append(symbol) - return "".join(lettersOnly) + letters_only.append(symbol) + return "".join(letters_only) -def isEnglish(message, wordPercentage=20, letterPercentage=85): +def is_english( + message: str, word_percentage: int = 20, letter_percentage: int = 85 +) -> bool: """ - >>> isEnglish('Hello World') + >>> is_english('Hello World') True - - >>> isEnglish('llold HorWd') + >>> is_english('llold HorWd') False """ - wordsMatch = getEnglishCount(message) * 100 >= wordPercentage - numLetters = len(removeNonLetters(message)) - messageLettersPercentage = (float(numLetters) / len(message)) * 100 - lettersMatch = messageLettersPercentage >= letterPercentage - return wordsMatch and lettersMatch + words_match = get_english_count(message) * 100 >= word_percentage + num_letters = len(remove_non_letters(message)) + message_letters_percentage = (float(num_letters) / len(message)) * 100 + letters_match = message_letters_percentage >= letter_percentage + return words_match and letters_match if __name__ == "__main__": diff --git a/strings/frequency_finder.py b/strings/frequency_finder.py index 48760a9deb09..7024be17b8ab 100644 --- a/strings/frequency_finder.py +++ b/strings/frequency_finder.py @@ -1,7 +1,9 @@ # Frequency Finder +import string + # frequency taken from http://en.wikipedia.org/wiki/Letter_frequency -englishLetterFreq = { +english_letter_freq = { "E": 12.70, "T": 9.06, "A": 8.17, @@ -33,85 +35,57 @@ LETTERS = "ABCDEFGHIJKLMNOPQRSTUVWXYZ" -def getLetterCount(message): - letterCount = { - "A": 0, - "B": 0, - "C": 0, - "D": 0, - "E": 0, - "F": 0, - "G": 0, - "H": 0, - "I": 0, - "J": 0, - "K": 0, - "L": 0, - "M": 0, - "N": 0, - "O": 0, - "P": 0, - "Q": 0, - "R": 0, - "S": 0, - "T": 0, - "U": 0, - "V": 0, - "W": 0, - "X": 0, - "Y": 0, - "Z": 0, - } +def get_letter_count(message: str) -> dict[str, int]: + letter_count = {letter: 0 for letter in string.ascii_uppercase} for letter in message.upper(): if letter in LETTERS: - letterCount[letter] += 1 + letter_count[letter] += 1 - return letterCount + return letter_count -def getItemAtIndexZero(x): +def get_item_at_index_zero(x: tuple) -> str: return x[0] -def getFrequencyOrder(message): - letterToFreq = getLetterCount(message) - freqToLetter = {} +def get_frequency_order(message: str) -> str: + letter_to_freq = get_letter_count(message) + freq_to_letter: dict[int, list[str]] = { + freq: [] for letter, freq in letter_to_freq.items() + } for letter in LETTERS: - if letterToFreq[letter] not in freqToLetter: - freqToLetter[letterToFreq[letter]] = [letter] - else: - freqToLetter[letterToFreq[letter]].append(letter) + freq_to_letter[letter_to_freq[letter]].append(letter) + + freq_to_letter_str: dict[int, str] = {} - for freq in freqToLetter: - freqToLetter[freq].sort(key=ETAOIN.find, reverse=True) - freqToLetter[freq] = "".join(freqToLetter[freq]) + for freq in freq_to_letter: + freq_to_letter[freq].sort(key=ETAOIN.find, reverse=True) + freq_to_letter_str[freq] = "".join(freq_to_letter[freq]) - freqPairs = list(freqToLetter.items()) - freqPairs.sort(key=getItemAtIndexZero, reverse=True) + freq_pairs = list(freq_to_letter_str.items()) + freq_pairs.sort(key=get_item_at_index_zero, reverse=True) - freqOrder = [] - for freqPair in freqPairs: - freqOrder.append(freqPair[1]) + freq_order: list[str] = [freq_pair[1] for freq_pair in freq_pairs] - return "".join(freqOrder) + return "".join(freq_order) -def englishFreqMatchScore(message): +def english_freq_match_score(message: str) -> int: """ - >>> englishFreqMatchScore('Hello World') + >>> english_freq_match_score('Hello World') 1 """ - freqOrder = getFrequencyOrder(message) - matchScore = 0 - for commonLetter in ETAOIN[:6]: - if commonLetter in freqOrder[:6]: - matchScore += 1 + freq_order = get_frequency_order(message) + match_score = 0 + for common_letter in ETAOIN[:6]: + if common_letter in freq_order[:6]: + match_score += 1 - for uncommonLetter in ETAOIN[-6:]: - if uncommonLetter in freqOrder[-6:]: - matchScore += 1 + for uncommon_letter in ETAOIN[-6:]: + if uncommon_letter in freq_order[-6:]: + match_score += 1 - return matchScore + return match_score if __name__ == "__main__": diff --git a/strings/word_occurrence.py b/strings/word_occurrence.py index 4acfa41adf11..4e0b3ff34ccf 100644 --- a/strings/word_occurrence.py +++ b/strings/word_occurrence.py @@ -1,6 +1,7 @@ # Created by sarathkaul on 17/11/19 # Modified by Arkadip Bhattacharya(@darkmatter18) on 20/04/2020 from collections import defaultdict +from typing import DefaultDict def word_occurence(sentence: str) -> dict: @@ -14,7 +15,7 @@ def word_occurence(sentence: str) -> dict: >>> dict(word_occurence("Two spaces")) {'Two': 1, 'spaces': 1} """ - occurrence: dict = defaultdict(int) + occurrence: DefaultDict[str, int] = defaultdict(int) # Creating a dictionary containing count of each word for word in sentence.split(): occurrence[word] += 1 diff --git a/strings/z_function.py b/strings/z_function.py index d8d823a37efb..e77ba8dab5df 100644 --- a/strings/z_function.py +++ b/strings/z_function.py @@ -10,7 +10,7 @@ """ -def z_function(input_str: str) -> list: +def z_function(input_str: str) -> list[int]: """ For the given string this function computes value for each index, which represents the maximal length substring starting from the index @@ -27,7 +27,7 @@ def z_function(input_str: str) -> list: >>> z_function("zxxzxxz") [0, 0, 0, 4, 0, 0, 1] """ - z_result = [0] * len(input_str) + z_result = [0 for i in range(len(input_str))] # initialize interval's left pointer and right pointer left_pointer, right_pointer = 0, 0 @@ -49,7 +49,7 @@ def z_function(input_str: str) -> list: return z_result -def go_next(i, z_result, s): +def go_next(i: int, z_result: list[int], s: str) -> bool: """ Check if we have to move forward to the next characters or not """ From dbee5f072f68c57bce3443e5ed07fe496ba9d76d Mon Sep 17 00:00:00 2001 From: Omkaar <79257339+Pysics@users.noreply.github.com> Date: Fri, 13 May 2022 18:21:44 +0530 Subject: [PATCH 0447/1543] Improve code on f-strings and brevity (#6126) * Update strassen_matrix_multiplication.py * Update matrix_operation.py * Update enigma_machine2.py * Update enigma_machine.py * Update enigma_machine2.py * Update rod_cutting.py * Update external_sort.py * Update sol1.py * Update hill_cipher.py * Update prime_numbers.py * Update integration_by_simpson_approx.py --- ciphers/enigma_machine2.py | 6 +++--- ciphers/hill_cipher.py | 2 +- data_structures/hashing/number_theory/prime_numbers.py | 2 +- divide_and_conquer/strassen_matrix_multiplication.py | 2 +- dynamic_programming/rod_cutting.py | 2 +- hashes/enigma_machine.py | 2 +- maths/integration_by_simpson_approx.py | 10 +++------- matrix/matrix_operation.py | 2 +- project_euler/problem_067/sol1.py | 2 +- sorts/external_sort.py | 2 +- 10 files changed, 14 insertions(+), 18 deletions(-) diff --git a/ciphers/enigma_machine2.py b/ciphers/enigma_machine2.py index 9252dd0edbf7..70f84752d55b 100644 --- a/ciphers/enigma_machine2.py +++ b/ciphers/enigma_machine2.py @@ -94,15 +94,15 @@ def _validator( rotorpos1, rotorpos2, rotorpos3 = rotpos if not 0 < rotorpos1 <= len(abc): raise ValueError( - f"First rotor position is not within range of 1..26 (" f"{rotorpos1}" + "First rotor position is not within range of 1..26 (" f"{rotorpos1}" ) if not 0 < rotorpos2 <= len(abc): raise ValueError( - f"Second rotor position is not within range of 1..26 (" f"{rotorpos2})" + "Second rotor position is not within range of 1..26 (" f"{rotorpos2})" ) if not 0 < rotorpos3 <= len(abc): raise ValueError( - f"Third rotor position is not within range of 1..26 (" f"{rotorpos3})" + "Third rotor position is not within range of 1..26 (" f"{rotorpos3})" ) # Validates string and returns dict diff --git a/ciphers/hill_cipher.py b/ciphers/hill_cipher.py index bc8f5b41b624..d8e436e92c56 100644 --- a/ciphers/hill_cipher.py +++ b/ciphers/hill_cipher.py @@ -62,7 +62,7 @@ class HillCipher: # take x and return x % len(key_string) modulus = numpy.vectorize(lambda x: x % 36) - to_int = numpy.vectorize(lambda x: round(x)) + to_int = numpy.vectorize(round) def __init__(self, encrypt_key: numpy.ndarray) -> None: """ diff --git a/data_structures/hashing/number_theory/prime_numbers.py b/data_structures/hashing/number_theory/prime_numbers.py index db4d40f475b2..bf614e7d48df 100644 --- a/data_structures/hashing/number_theory/prime_numbers.py +++ b/data_structures/hashing/number_theory/prime_numbers.py @@ -14,7 +14,7 @@ def check_prime(number): elif number == special_non_primes[-1]: return 3 - return all([number % i for i in range(2, number)]) + return all(number % i for i in range(2, number)) def next_prime(value, factor=1, **kwargs): diff --git a/divide_and_conquer/strassen_matrix_multiplication.py b/divide_and_conquer/strassen_matrix_multiplication.py index ca10e04abcbc..17efcfc7c8ee 100644 --- a/divide_and_conquer/strassen_matrix_multiplication.py +++ b/divide_and_conquer/strassen_matrix_multiplication.py @@ -114,7 +114,7 @@ def strassen(matrix1: list, matrix2: list) -> list: """ if matrix_dimensions(matrix1)[1] != matrix_dimensions(matrix2)[0]: raise Exception( - f"Unable to multiply these matrices, please check the dimensions. \n" + "Unable to multiply these matrices, please check the dimensions. \n" f"Matrix A:{matrix1} \nMatrix B:{matrix2}" ) dimension1 = matrix_dimensions(matrix1) diff --git a/dynamic_programming/rod_cutting.py b/dynamic_programming/rod_cutting.py index 442a39cb1616..79104d8f4044 100644 --- a/dynamic_programming/rod_cutting.py +++ b/dynamic_programming/rod_cutting.py @@ -181,7 +181,7 @@ def _enforce_args(n: int, prices: list): if n > len(prices): raise ValueError( - f"Each integral piece of rod must have a corresponding " + "Each integral piece of rod must have a corresponding " f"price. Got n = {n} but length of prices = {len(prices)}" ) diff --git a/hashes/enigma_machine.py b/hashes/enigma_machine.py index d1cb6efc2e8d..b0d45718e286 100644 --- a/hashes/enigma_machine.py +++ b/hashes/enigma_machine.py @@ -55,5 +55,5 @@ def engine(input_character): print("\n" + "".join(code)) print( f"\nYour Token is {token} please write it down.\nIf you want to decode " - f"this message again you should input same digits as token!" + "this message again you should input same digits as token!" ) diff --git a/maths/integration_by_simpson_approx.py b/maths/integration_by_simpson_approx.py index feb77440dd2f..408041de93f1 100644 --- a/maths/integration_by_simpson_approx.py +++ b/maths/integration_by_simpson_approx.py @@ -92,16 +92,12 @@ def simpson_integration(function, a: float, b: float, precision: int = 4) -> flo assert callable( function ), f"the function(object) passed should be callable your input : {function}" - assert isinstance(a, float) or isinstance( - a, int - ), f"a should be float or integer your input : {a}" - assert isinstance(function(a), float) or isinstance(function(a), int), ( + assert isinstance(a, (float, int)), f"a should be float or integer your input : {a}" + assert isinstance(function(a), (float, int)), ( "the function should return integer or float return type of your function, " f"{type(a)}" ) - assert isinstance(b, float) or isinstance( - b, int - ), f"b should be float or integer your input : {b}" + assert isinstance(b, (float, int)), f"b should be float or integer your input : {b}" assert ( isinstance(precision, int) and precision > 0 ), f"precision should be positive integer your input : {precision}" diff --git a/matrix/matrix_operation.py b/matrix/matrix_operation.py index 6d0cd4e655eb..8e5d0f583486 100644 --- a/matrix/matrix_operation.py +++ b/matrix/matrix_operation.py @@ -171,7 +171,7 @@ def _verify_matrix_sizes( shape = _shape(matrix_a) + _shape(matrix_b) if shape[0] != shape[3] or shape[1] != shape[2]: raise ValueError( - f"operands could not be broadcast together with shape " + "operands could not be broadcast together with shape " f"({shape[0], shape[1]}), ({shape[2], shape[3]})" ) return (shape[0], shape[2]), (shape[1], shape[3]) diff --git a/project_euler/problem_067/sol1.py b/project_euler/problem_067/sol1.py index ebfa865a9479..527d4dc592ac 100644 --- a/project_euler/problem_067/sol1.py +++ b/project_euler/problem_067/sol1.py @@ -29,7 +29,7 @@ def solution(): triangle = f.readlines() a = map(lambda x: x.rstrip("\r\n").split(" "), triangle) - a = list(map(lambda x: list(map(lambda y: int(y), x)), a)) + a = list(map(lambda x: list(map(int, x)), a)) for i in range(1, len(a)): for j in range(len(a[i])): diff --git a/sorts/external_sort.py b/sorts/external_sort.py index 060e67adf827..7af7dc0a609d 100644 --- a/sorts/external_sort.py +++ b/sorts/external_sort.py @@ -41,7 +41,7 @@ def split(self, block_size, sort_key=None): i += 1 def cleanup(self): - map(lambda f: os.remove(f), self.block_filenames) + map(os.remove, self.block_filenames) class NWayMerge: From 80f1da235b0a467dc9b31aa8a56dd3a792a59d7c Mon Sep 17 00:00:00 2001 From: zer0-x <65136727+zer0-x@users.noreply.github.com> Date: Mon, 16 May 2022 14:28:30 +0300 Subject: [PATCH 0448/1543] Add sin function to maths (#5949) * Add sin function to /maths. * Fix typo in /maths/sin.py * Format sin.py to meet the new black rules. * Some improvements. * Fix a formating error. --- maths/sin.py | 64 ++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 64 insertions(+) create mode 100644 maths/sin.py diff --git a/maths/sin.py b/maths/sin.py new file mode 100644 index 000000000000..b06e6c9f1e5d --- /dev/null +++ b/maths/sin.py @@ -0,0 +1,64 @@ +""" +Calculate sin function. + +It's not a perfect function so I am rounding the result to 10 decimal places by default. + +Formula: sin(x) = x - x^3/3! + x^5/5! - x^7/7! + ... +Where: x = angle in randians. + +Source: + https://www.homeschoolmath.net/teaching/sine_calculator.php + +""" + +from math import factorial, radians + + +def sin( + angle_in_degrees: float, accuracy: int = 18, rounded_values_count: int = 10 +) -> float: + """ + Implement sin function. + + >>> sin(0.0) + 0.0 + >>> sin(90.0) + 1.0 + >>> sin(180.0) + 0.0 + >>> sin(270.0) + -1.0 + >>> sin(0.68) + 0.0118679603 + >>> sin(1.97) + 0.0343762121 + >>> sin(64.0) + 0.8987940463 + >>> sin(9999.0) + -0.9876883406 + >>> sin(-689.0) + 0.5150380749 + >>> sin(89.7) + 0.9999862922 + """ + # Simplify the angle to be between 360 and -360 degrees. + angle_in_degrees = angle_in_degrees - ((angle_in_degrees // 360.0) * 360.0) + + # Converting from degrees to radians + angle_in_radians = radians(angle_in_degrees) + + result = angle_in_radians + a = 3 + b = -1 + + for _ in range(accuracy): + result += (b * (angle_in_radians**a)) / factorial(a) + + b = -b # One positive term and the next will be negative and so on... + a += 2 # Increased by 2 for every term. + + return round(result, rounded_values_count) + + +if __name__ == "__main__": + __import__("doctest").testmod() From ec54da34b96de0b09b0685881646df1f9f7df989 Mon Sep 17 00:00:00 2001 From: Aviv Faraj <73610201+avivfaraj@users.noreply.github.com> Date: Mon, 16 May 2022 10:26:19 -0400 Subject: [PATCH 0449/1543] Lorenz transformation - physics (#6097) * Add files via upload * Changed print to f-string Also printed out results in a math notation * Add files via upload * Fixes: #4710 provided return type * File exists in another pull request * imported radians from math * Updated file according to pre-commit test * Updated file * Updated gamma * Deleted duplicate file * removed pi * reversed tests * Fixed angle condition * Modified prints to f-string * Update horizontal_projectile_motion.py * Update horizontal_projectile_motion.py * Fixes #4710 added exceptions and tests * Added float tests * Fixed type annotations * Fixed last annotation * Fixed annotations * fixed format * Revert "fixed format" This reverts commit 5b0249ac0a0f9c36c3cfbab8423eb72925a73ffb. Undo changes @wq * Revert "Fixed annotations" This reverts commit c37bb9540834cb77e37822eb376a5896cda34778. * Revert "Fixed last annotation" This reverts commit e3678fdeadd23f1bfca27015ab524efa184f6c79. * Revert "Fixed type annotations" This reverts commit 3f2b238c34cd926b335d1f6f750e009f08e8f270. * Revert to 4e2fcaf6fb * Fixing errors found during pre-commit * Added gauss law * Implemented Lorenz tranformation with four vector * pre-commit fixes * flake8 fixes * More flake8 fixes * Added blank space for flake8 * Added reference * Trailing whitespace fix * Replaced argument u with velocity (descriptive name fix) * Added tests for functions + moved velocity check to beta function * Modified condition to 'not symbolic' in the transform function * trainling whitespace fix * Added type hint for 'smybolic' argument in transform function * Changed reference to avoid pre-commit fails because of spelling issue related to the URL * Added tests for gamma and transformation_matrix functions * Fixed transformation_matrix tests * Fixed tests on beta and gamma functions --- physics/__init__.py | 0 physics/lorenz_transformation_four_vector.py | 205 +++++++++++++++++++ 2 files changed, 205 insertions(+) create mode 100644 physics/__init__.py create mode 100644 physics/lorenz_transformation_four_vector.py diff --git a/physics/__init__.py b/physics/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/physics/lorenz_transformation_four_vector.py b/physics/lorenz_transformation_four_vector.py new file mode 100644 index 000000000000..6c0d5f9d1997 --- /dev/null +++ b/physics/lorenz_transformation_four_vector.py @@ -0,0 +1,205 @@ +""" +Lorenz transformation describes the transition from a reference frame P +to another reference frame P', each of which is moving in a direction with +respect to the other. The Lorenz transformation implemented in this code +is the relativistic version using a four vector described by Minkowsky Space: +x0 = ct, x1 = x, x2 = y, and x3 = z + +NOTE: Please note that x0 is c (speed of light) times t (time). + +So, the Lorenz transformation using a four vector is defined as: + +|ct'| | γ -γβ 0 0| |ct| +|x' | = |-γβ γ 0 0| *|x | +|y' | | 0 0 1 0| |y | +|z' | | 0 0 0 1| |z | + +Where: + 1 +γ = --------------- + ----------- + / v^2 | + /(1 - --- + -/ c^2 + + v +β = ----- + c + +Reference: https://en.wikipedia.org/wiki/Lorentz_transformation +""" +from __future__ import annotations + +from math import sqrt + +import numpy as np # type: ignore +from sympy import symbols # type: ignore + +# Coefficient +# Speed of light (m/s) +c = 299792458 + +# Symbols +ct, x, y, z = symbols("ct x y z") +ct_p, x_p, y_p, z_p = symbols("ct' x' y' z'") + + +# Vehicle's speed divided by speed of light (no units) +def beta(velocity: float) -> float: + """ + >>> beta(c) + 1.0 + + >>> beta(199792458) + 0.666435904801848 + + >>> beta(1e5) + 0.00033356409519815205 + + >>> beta(0.2) + Traceback (most recent call last): + ... + ValueError: Speed must be greater than 1! + """ + if velocity > c: + raise ValueError("Speed must not exceed Light Speed 299,792,458 [m/s]!") + + # Usually the speed u should be much higher than 1 (c order of magnitude) + elif velocity < 1: + raise ValueError("Speed must be greater than 1!") + return velocity / c + + +def gamma(velocity: float) -> float: + """ + >>> gamma(4) + 1.0000000000000002 + + >>> gamma(1e5) + 1.0000000556325075 + + >>> gamma(3e7) + 1.005044845777813 + + >>> gamma(2.8e8) + 2.7985595722318277 + + >>> gamma(299792451) + 4627.49902669495 + + >>> gamma(0.3) + Traceback (most recent call last): + ... + ValueError: Speed must be greater than 1! + + >>> gamma(2*c) + Traceback (most recent call last): + ... + ValueError: Speed must not exceed Light Speed 299,792,458 [m/s]! + """ + return 1 / (sqrt(1 - beta(velocity) ** 2)) + + +def transformation_matrix(velocity: float) -> np.array: + """ + >>> transformation_matrix(29979245) + array([[ 1.00503781, -0.10050378, 0. , 0. ], + [-0.10050378, 1.00503781, 0. , 0. ], + [ 0. , 0. , 1. , 0. ], + [ 0. , 0. , 0. , 1. ]]) + + >>> transformation_matrix(19979245.2) + array([[ 1.00222811, -0.06679208, 0. , 0. ], + [-0.06679208, 1.00222811, 0. , 0. ], + [ 0. , 0. , 1. , 0. ], + [ 0. , 0. , 0. , 1. ]]) + + >>> transformation_matrix(1) + array([[ 1.00000000e+00, -3.33564095e-09, 0.00000000e+00, + 0.00000000e+00], + [-3.33564095e-09, 1.00000000e+00, 0.00000000e+00, + 0.00000000e+00], + [ 0.00000000e+00, 0.00000000e+00, 1.00000000e+00, + 0.00000000e+00], + [ 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, + 1.00000000e+00]]) + + >>> transformation_matrix(0) + Traceback (most recent call last): + ... + ValueError: Speed must be greater than 1! + + >>> transformation_matrix(c * 1.5) + Traceback (most recent call last): + ... + ValueError: Speed must not exceed Light Speed 299,792,458 [m/s]! + """ + return np.array( + [ + [gamma(velocity), -gamma(velocity) * beta(velocity), 0, 0], + [-gamma(velocity) * beta(velocity), gamma(velocity), 0, 0], + [0, 0, 1, 0], + [0, 0, 0, 1], + ] + ) + + +def transform( + velocity: float, event: np.array = np.zeros(4), symbolic: bool = True +) -> np.array: + """ + >>> transform(29979245,np.array([1,2,3,4]), False) + array([ 3.01302757e+08, -3.01302729e+07, 3.00000000e+00, 4.00000000e+00]) + + >>> transform(29979245) + array([1.00503781498831*ct - 0.100503778816875*x, + -0.100503778816875*ct + 1.00503781498831*x, 1.0*y, 1.0*z], + dtype=object) + + >>> transform(19879210.2) + array([1.0022057787097*ct - 0.066456172618675*x, + -0.066456172618675*ct + 1.0022057787097*x, 1.0*y, 1.0*z], + dtype=object) + + >>> transform(299792459, np.array([1,1,1,1])) + Traceback (most recent call last): + ... + ValueError: Speed must not exceed Light Speed 299,792,458 [m/s]! + + >>> transform(-1, np.array([1,1,1,1])) + Traceback (most recent call last): + ... + ValueError: Speed must be greater than 1! + """ + # Ensure event is not a vector of zeros + if not symbolic: + + # x0 is ct (speed of ligt * time) + event[0] = event[0] * c + else: + + # Symbolic four vector + event = np.array([ct, x, y, z]) + + return transformation_matrix(velocity).dot(event) + + +if __name__ == "__main__": + import doctest + + doctest.testmod() + + # Example of symbolic vector: + four_vector = transform(29979245) + print("Example of four vector: ") + print(f"ct' = {four_vector[0]}") + print(f"x' = {four_vector[1]}") + print(f"y' = {four_vector[2]}") + print(f"z' = {four_vector[3]}") + + # Substitute symbols with numerical values: + values = np.array([1, 1, 1, 1]) + sub_dict = {ct: c * values[0], x: values[1], y: values[2], z: values[3]} + numerical_vector = [four_vector[i].subs(sub_dict) for i in range(0, 4)] + + print(f"\n{numerical_vector}") From dceb30aad623c8c9dffd739f41e1f5f46eb44530 Mon Sep 17 00:00:00 2001 From: Ikko Ashimine Date: Fri, 20 May 2022 13:03:54 +0900 Subject: [PATCH 0450/1543] Fix typo in word_occurrence.py (#6154) word_occurence -> word_occurrence --- strings/word_occurrence.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/strings/word_occurrence.py b/strings/word_occurrence.py index 4e0b3ff34ccf..8260620c38a4 100644 --- a/strings/word_occurrence.py +++ b/strings/word_occurrence.py @@ -4,15 +4,15 @@ from typing import DefaultDict -def word_occurence(sentence: str) -> dict: +def word_occurrence(sentence: str) -> dict: """ >>> from collections import Counter >>> SENTENCE = "a b A b c b d b d e f e g e h e i e j e 0" - >>> occurence_dict = word_occurence(SENTENCE) + >>> occurence_dict = word_occurrence(SENTENCE) >>> all(occurence_dict[word] == count for word, count ... in Counter(SENTENCE.split()).items()) True - >>> dict(word_occurence("Two spaces")) + >>> dict(word_occurrence("Two spaces")) {'Two': 1, 'spaces': 1} """ occurrence: DefaultDict[str, int] = defaultdict(int) @@ -23,5 +23,5 @@ def word_occurence(sentence: str) -> dict: if __name__ == "__main__": - for word, count in word_occurence("INPUT STRING").items(): + for word, count in word_occurrence("INPUT STRING").items(): print(f"{word}: {count}") From 5bac76d7a505c43aad6d0a32cd39982f4b927eac Mon Sep 17 00:00:00 2001 From: dangbb <51513363+dangbb@users.noreply.github.com> Date: Sat, 21 May 2022 21:02:53 +0700 Subject: [PATCH 0451/1543] Fix `iter_merge_sort` bug (#6153) * Fixed bug where array length 2 can't be sorted * Add MCC and DU path test Add test to conversions/octal_to_decimal.py and sorts\iterative_merge_sort.py * "" * Update octal_to_decimal.py Co-authored-by: John Law --- conversions/octal_to_decimal.py | 36 +++++++++++++++++++++++++++++++++ sorts/iterative_merge_sort.py | 24 ++++++++++++++++++++-- 2 files changed, 58 insertions(+), 2 deletions(-) diff --git a/conversions/octal_to_decimal.py b/conversions/octal_to_decimal.py index 5a7373fef7e3..551311e2651e 100644 --- a/conversions/octal_to_decimal.py +++ b/conversions/octal_to_decimal.py @@ -2,12 +2,48 @@ def oct_to_decimal(oct_string: str) -> int: """ Convert a octal value to its decimal equivalent + >>> oct_to_decimal("") + Traceback (most recent call last): + ... + ValueError: Empty string was passed to the function + >>> oct_to_decimal("-") + Traceback (most recent call last): + ... + ValueError: Non-octal value was passed to the function + >>> oct_to_decimal("e") + Traceback (most recent call last): + ... + ValueError: Non-octal value was passed to the function + >>> oct_to_decimal("8") + Traceback (most recent call last): + ... + ValueError: Non-octal value was passed to the function + >>> oct_to_decimal("-e") + Traceback (most recent call last): + ... + ValueError: Non-octal value was passed to the function + >>> oct_to_decimal("-8") + Traceback (most recent call last): + ... + ValueError: Non-octal value was passed to the function + >>> oct_to_decimal("1") + 1 + >>> oct_to_decimal("-1") + -1 >>> oct_to_decimal("12") 10 >>> oct_to_decimal(" 12 ") 10 >>> oct_to_decimal("-45") -37 + >>> oct_to_decimal("-") + Traceback (most recent call last): + ... + ValueError: Non-octal value was passed to the function + >>> oct_to_decimal("0") + 0 + >>> oct_to_decimal("-4055") + -2093 >>> oct_to_decimal("2-0Fm") Traceback (most recent call last): ... diff --git a/sorts/iterative_merge_sort.py b/sorts/iterative_merge_sort.py index 5ee0badab9e6..327974fa61ae 100644 --- a/sorts/iterative_merge_sort.py +++ b/sorts/iterative_merge_sort.py @@ -32,6 +32,22 @@ def iter_merge_sort(input_list: list) -> list: >>> iter_merge_sort([5, 9, 8, 7, 1, 2, 7]) [1, 2, 5, 7, 7, 8, 9] + >>> iter_merge_sort([1]) + [1] + >>> iter_merge_sort([2, 1]) + [1, 2] + >>> iter_merge_sort([2, 1, 3]) + [1, 2, 3] + >>> iter_merge_sort([4, 3, 2, 1]) + [1, 2, 3, 4] + >>> iter_merge_sort([5, 4, 3, 2, 1]) + [1, 2, 3, 4, 5] + >>> iter_merge_sort(['c', 'b', 'a']) + ['a', 'b', 'c'] + >>> iter_merge_sort([0.3, 0.2, 0.1]) + [0.1, 0.2, 0.3] + >>> iter_merge_sort(['dep', 'dang', 'trai']) + ['dang', 'dep', 'trai'] >>> iter_merge_sort([6]) [6] >>> iter_merge_sort([]) @@ -51,7 +67,7 @@ def iter_merge_sort(input_list: list) -> list: # iteration for two-way merging p = 2 - while p < len(input_list): + while p <= len(input_list): # getting low, high and middle value for merge-sort of single list for i in range(0, len(input_list), p): low = i @@ -62,6 +78,7 @@ def iter_merge_sort(input_list: list) -> list: if p * 2 >= len(input_list): mid = i input_list = merge(input_list, 0, mid, len(input_list) - 1) + break p *= 2 return input_list @@ -69,5 +86,8 @@ def iter_merge_sort(input_list: list) -> list: if __name__ == "__main__": user_input = input("Enter numbers separated by a comma:\n").strip() - unsorted = [int(item.strip()) for item in user_input.split(",")] + if user_input == "": + unsorted = [] + else: + unsorted = [int(item.strip()) for item in user_input.split(",")] print(iter_merge_sort(unsorted)) From a28ad3f759ba4b69e58e65db60eb26f0fd9756cc Mon Sep 17 00:00:00 2001 From: Luke Banicevic <60857954+banaboi@users.noreply.github.com> Date: Tue, 24 May 2022 11:18:50 +1000 Subject: [PATCH 0452/1543] Add Microsoft Excel Column Title to Column Number Conversion (#4849) * Added excel column title to number algorithm as part of conversions * Renamed file to better reflect algorithm function * Removed duplicate file * Update excel_title_to_column.py * Update excel_title_to_column.py Co-authored-by: John Law --- conversions/excel_title_to_column.py | 33 ++++++++++++++++++++++++++++ 1 file changed, 33 insertions(+) create mode 100644 conversions/excel_title_to_column.py diff --git a/conversions/excel_title_to_column.py b/conversions/excel_title_to_column.py new file mode 100644 index 000000000000..d77031ec26f2 --- /dev/null +++ b/conversions/excel_title_to_column.py @@ -0,0 +1,33 @@ +def excel_title_to_column(column_title: str) -> int: + """ + Given a string column_title that represents + the column title in an Excel sheet, return + its corresponding column number. + + >>> excel_title_to_column("A") + 1 + >>> excel_title_to_column("B") + 2 + >>> excel_title_to_column("AB") + 28 + >>> excel_title_to_column("Z") + 26 + """ + assert column_title.isupper() + answer = 0 + index = len(column_title) - 1 + power = 0 + + while index >= 0: + value = (ord(column_title[index]) - 64) * pow(26, power) + answer += value + power += 1 + index -= 1 + + return answer + + +if __name__ == "__main__": + from doctest import testmod + + testmod() From de4d98081b9f9ba4bc6447ef20bb8fed329b343e Mon Sep 17 00:00:00 2001 From: Maxim Smolskiy Date: Tue, 24 May 2022 04:20:47 +0300 Subject: [PATCH 0453/1543] Improve Project Euler problem 145 solution 1 (#6141) * updating DIRECTORY.md * Improve solution * updating DIRECTORY.md * Fix Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- DIRECTORY.md | 9 +++++++++ project_euler/problem_145/sol1.py | 7 ++++--- 2 files changed, 13 insertions(+), 3 deletions(-) diff --git a/DIRECTORY.md b/DIRECTORY.md index eeea22e4768f..64a87dc660da 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -112,6 +112,7 @@ * [Cnn Classification](https://github.com/TheAlgorithms/Python/blob/master/computer_vision/cnn_classification.py) * [Flip Augmentation](https://github.com/TheAlgorithms/Python/blob/master/computer_vision/flip_augmentation.py) * [Harris Corner](https://github.com/TheAlgorithms/Python/blob/master/computer_vision/harris_corner.py) + * [Horn Schunck](https://github.com/TheAlgorithms/Python/blob/master/computer_vision/horn_schunck.py) * [Mean Threshold](https://github.com/TheAlgorithms/Python/blob/master/computer_vision/mean_threshold.py) * [Mosaic Augmentation](https://github.com/TheAlgorithms/Python/blob/master/computer_vision/mosaic_augmentation.py) * [Pooling Functions](https://github.com/TheAlgorithms/Python/blob/master/computer_vision/pooling_functions.py) @@ -131,6 +132,7 @@ * [Molecular Chemistry](https://github.com/TheAlgorithms/Python/blob/master/conversions/molecular_chemistry.py) * [Octal To Decimal](https://github.com/TheAlgorithms/Python/blob/master/conversions/octal_to_decimal.py) * [Prefix Conversions](https://github.com/TheAlgorithms/Python/blob/master/conversions/prefix_conversions.py) + * [Prefix Conversions String](https://github.com/TheAlgorithms/Python/blob/master/conversions/prefix_conversions_string.py) * [Pressure Conversions](https://github.com/TheAlgorithms/Python/blob/master/conversions/pressure_conversions.py) * [Rgb Hsv Conversion](https://github.com/TheAlgorithms/Python/blob/master/conversions/rgb_hsv_conversion.py) * [Roman Numerals](https://github.com/TheAlgorithms/Python/blob/master/conversions/roman_numerals.py) @@ -529,6 +531,7 @@ * [Perfect Square](https://github.com/TheAlgorithms/Python/blob/master/maths/perfect_square.py) * [Persistence](https://github.com/TheAlgorithms/Python/blob/master/maths/persistence.py) * [Pi Monte Carlo Estimation](https://github.com/TheAlgorithms/Python/blob/master/maths/pi_monte_carlo_estimation.py) + * [Points Are Collinear 3D](https://github.com/TheAlgorithms/Python/blob/master/maths/points_are_collinear_3d.py) * [Pollard Rho](https://github.com/TheAlgorithms/Python/blob/master/maths/pollard_rho.py) * [Polynomial Evaluation](https://github.com/TheAlgorithms/Python/blob/master/maths/polynomial_evaluation.py) * [Power Using Recursion](https://github.com/TheAlgorithms/Python/blob/master/maths/power_using_recursion.py) @@ -619,6 +622,7 @@ * [Tower Of Hanoi](https://github.com/TheAlgorithms/Python/blob/master/other/tower_of_hanoi.py) ## Physics + * [Horizontal Projectile Motion](https://github.com/TheAlgorithms/Python/blob/master/physics/horizontal_projectile_motion.py) * [N Body Simulation](https://github.com/TheAlgorithms/Python/blob/master/physics/n_body_simulation.py) * [Newtons Second Law Of Motion](https://github.com/TheAlgorithms/Python/blob/master/physics/newtons_second_law_of_motion.py) @@ -833,6 +837,8 @@ * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_101/sol1.py) * Problem 102 * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_102/sol1.py) + * Problem 104 + * [Sol](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_104/sol.py) * Problem 107 * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_107/sol1.py) * Problem 109 @@ -857,6 +863,8 @@ * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_135/sol1.py) * Problem 144 * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_144/sol1.py) + * Problem 145 + * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_145/sol1.py) * Problem 173 * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_173/sol1.py) * Problem 174 @@ -990,6 +998,7 @@ * [Manacher](https://github.com/TheAlgorithms/Python/blob/master/strings/manacher.py) * [Min Cost String Conversion](https://github.com/TheAlgorithms/Python/blob/master/strings/min_cost_string_conversion.py) * [Naive String Search](https://github.com/TheAlgorithms/Python/blob/master/strings/naive_string_search.py) + * [Ngram](https://github.com/TheAlgorithms/Python/blob/master/strings/ngram.py) * [Palindrome](https://github.com/TheAlgorithms/Python/blob/master/strings/palindrome.py) * [Prefix Function](https://github.com/TheAlgorithms/Python/blob/master/strings/prefix_function.py) * [Rabin Karp](https://github.com/TheAlgorithms/Python/blob/master/strings/rabin_karp.py) diff --git a/project_euler/problem_145/sol1.py b/project_euler/problem_145/sol1.py index 82e2ea79bc25..09d8daff57be 100644 --- a/project_euler/problem_145/sol1.py +++ b/project_euler/problem_145/sol1.py @@ -23,10 +23,11 @@ def odd_digits(num: int) -> bool: >>> odd_digits(135797531) True """ - num_str = str(num) - for i in ["0", "2", "4", "6", "8"]: - if i in num_str: + while num > 0: + digit = num % 10 + if digit % 2 == 0: return False + num //= 10 return True From b8fdd81f286e2435d058750d35420cb5a89f470d Mon Sep 17 00:00:00 2001 From: Raine Legary <64663183+Rainethhh@users.noreply.github.com> Date: Tue, 24 May 2022 23:49:54 -0600 Subject: [PATCH 0454/1543] Add minmum path sum (#5882) * commit on 'shortest_path_sum' * minimum_path_sum updated * commit to 'minimum_path_sum' * added description to minimum_path_sum * bot requirements fixed for * Update minimum_path_sum.py * Update minimum_path_sum.py Co-authored-by: John Law --- graphs/minimum_path_sum.py | 63 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 63 insertions(+) create mode 100644 graphs/minimum_path_sum.py diff --git a/graphs/minimum_path_sum.py b/graphs/minimum_path_sum.py new file mode 100644 index 000000000000..df1e545df3d0 --- /dev/null +++ b/graphs/minimum_path_sum.py @@ -0,0 +1,63 @@ +def min_path_sum(grid: list) -> int: + """ + Find the path from top left to bottom right of array of numbers + with the lowest possible sum and return the sum along this path. + >>> min_path_sum([ + ... [1, 3, 1], + ... [1, 5, 1], + ... [4, 2, 1], + ... ]) + 7 + + >>> min_path_sum([ + ... [1, 0, 5, 6, 7], + ... [8, 9, 0, 4, 2], + ... [4, 4, 4, 5, 1], + ... [9, 6, 3, 1, 0], + ... [8, 4, 3, 2, 7], + ... ]) + 20 + + >>> min_path_sum(None) + Traceback (most recent call last): + ... + TypeError: The grid does not contain the appropriate information + + >>> min_path_sum([[]]) + Traceback (most recent call last): + ... + TypeError: The grid does not contain the appropriate information + """ + + if not grid or not grid[0]: + raise TypeError("The grid does not contain the appropriate information") + + for cell_n in range(1, len(grid[0])): + grid[0][cell_n] += grid[0][cell_n - 1] + row_above = grid[0] + + for row_n in range(1, len(grid)): + current_row = grid[row_n] + grid[row_n] = fill_row(current_row, row_above) + row_above = grid[row_n] + + return grid[-1][-1] + + +def fill_row(current_row: list, row_above: list) -> list: + """ + >>> fill_row([2, 2, 2], [1, 2, 3]) + [3, 4, 5] + """ + + current_row[0] += row_above[0] + for cell_n in range(1, len(current_row)): + current_row[cell_n] += min(current_row[cell_n - 1], row_above[cell_n]) + + return current_row + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From 8004671b984a58a86eb010e3add16f7268ed56b8 Mon Sep 17 00:00:00 2001 From: kugiyasan <44143656+kugiyasan@users.noreply.github.com> Date: Thu, 26 May 2022 15:24:23 -0400 Subject: [PATCH 0455/1543] Add Project Euler 68 Solution (#5552) * updating DIRECTORY.md * Project Euler 68 Solution * updating DIRECTORY.md * Project Euler 68 Fixed doctests, now at 93% coverage * Update sol1.py Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> Co-authored-by: kugiyasan Co-authored-by: John Law --- DIRECTORY.md | 2 + project_euler/problem_068/__init__.py | 0 project_euler/problem_068/sol1.py | 133 ++++++++++++++++++++++++++ 3 files changed, 135 insertions(+) create mode 100644 project_euler/problem_068/__init__.py create mode 100644 project_euler/problem_068/sol1.py diff --git a/DIRECTORY.md b/DIRECTORY.md index 64a87dc660da..f4a470c12148 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -793,6 +793,8 @@ * Problem 067 * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_067/sol1.py) * [Sol2](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_067/sol2.py) + * Problem 068 + * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_068/sol1.py) * Problem 069 * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_069/sol1.py) * Problem 070 diff --git a/project_euler/problem_068/__init__.py b/project_euler/problem_068/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/project_euler/problem_068/sol1.py b/project_euler/problem_068/sol1.py new file mode 100644 index 000000000000..772be359f630 --- /dev/null +++ b/project_euler/problem_068/sol1.py @@ -0,0 +1,133 @@ +""" +Project Euler Problem 68: https://projecteuler.net/problem=68 + +Magic 5-gon ring + +Problem Statement: +Consider the following "magic" 3-gon ring, +filled with the numbers 1 to 6, and each line adding to nine. + + 4 + \ + 3 + / \ + 1 - 2 - 6 + / + 5 + +Working clockwise, and starting from the group of three +with the numerically lowest external node (4,3,2 in this example), +each solution can be described uniquely. +For example, the above solution can be described by the set: 4,3,2; 6,2,1; 5,1,3. + +It is possible to complete the ring with four different totals: 9, 10, 11, and 12. +There are eight solutions in total. +Total Solution Set +9 4,2,3; 5,3,1; 6,1,2 +9 4,3,2; 6,2,1; 5,1,3 +10 2,3,5; 4,5,1; 6,1,3 +10 2,5,3; 6,3,1; 4,1,5 +11 1,4,6; 3,6,2; 5,2,4 +11 1,6,4; 5,4,2; 3,2,6 +12 1,5,6; 2,6,4; 3,4,5 +12 1,6,5; 3,5,4; 2,4,6 + +By concatenating each group it is possible to form 9-digit strings; +the maximum string for a 3-gon ring is 432621513. + +Using the numbers 1 to 10, and depending on arrangements, +it is possible to form 16- and 17-digit strings. +What is the maximum 16-digit string for a "magic" 5-gon ring? +""" + +from itertools import permutations + + +def solution(gon_side: int = 5) -> int: + """ + Find the maximum number for a "magic" gon_side-gon ring + + The gon_side parameter should be in the range [3, 5], + other side numbers aren't tested + + >>> solution(3) + 432621513 + >>> solution(4) + 426561813732 + >>> solution() + 6531031914842725 + >>> solution(6) + Traceback (most recent call last): + ValueError: gon_side must be in the range [3, 5] + """ + if gon_side < 3 or gon_side > 5: + raise ValueError("gon_side must be in the range [3, 5]") + + # Since it's 16, we know 10 is on the outer ring + # Put the big numbers at the end so that they are never the first number + small_numbers = list(range(gon_side + 1, 0, -1)) + big_numbers = list(range(gon_side + 2, gon_side * 2 + 1)) + + for perm in permutations(small_numbers + big_numbers): + numbers = generate_gon_ring(gon_side, list(perm)) + if is_magic_gon(numbers): + return int("".join(str(n) for n in numbers)) + + raise ValueError(f"Magic {gon_side}-gon ring is impossible") + + +def generate_gon_ring(gon_side: int, perm: list[int]) -> list[int]: + """ + Generate a gon_side-gon ring from a permutation state + The permutation state is the ring, but every duplicate is removed + + >>> generate_gon_ring(3, [4, 2, 3, 5, 1, 6]) + [4, 2, 3, 5, 3, 1, 6, 1, 2] + >>> generate_gon_ring(5, [6, 5, 4, 3, 2, 1, 7, 8, 9, 10]) + [6, 5, 4, 3, 4, 2, 1, 2, 7, 8, 7, 9, 10, 9, 5] + """ + result = [0] * (gon_side * 3) + result[0:3] = perm[0:3] + perm.append(perm[1]) + + magic_number = 1 if gon_side < 5 else 2 + + for i in range(1, len(perm) // 3 + magic_number): + result[3 * i] = perm[2 * i + 1] + result[3 * i + 1] = result[3 * i - 1] + result[3 * i + 2] = perm[2 * i + 2] + + return result + + +def is_magic_gon(numbers: list[int]) -> bool: + """ + Check if the solution set is a magic n-gon ring + Check that the first number is the smallest number on the outer ring + Take a list, and check if the sum of each 3 numbers chunk is equal to the same total + + >>> is_magic_gon([4, 2, 3, 5, 3, 1, 6, 1, 2]) + True + >>> is_magic_gon([4, 3, 2, 6, 2, 1, 5, 1, 3]) + True + >>> is_magic_gon([2, 3, 5, 4, 5, 1, 6, 1, 3]) + True + >>> is_magic_gon([1, 2, 3, 4, 5, 6, 7, 8, 9]) + False + >>> is_magic_gon([1]) + Traceback (most recent call last): + ValueError: a gon ring should have a length that is a multiple of 3 + """ + if len(numbers) % 3 != 0: + raise ValueError("a gon ring should have a length that is a multiple of 3") + + if min(numbers[::3]) != numbers[0]: + return False + + total = sum(numbers[:3]) + + return all(sum(numbers[i : i + 3]) == total for i in range(3, len(numbers), 3)) + + +if __name__ == "__main__": + print(solution()) From a44afc9b7dd74f85f0c54ebdd8f0135b6bc38dc9 Mon Sep 17 00:00:00 2001 From: DongJoon Cha <81581204+dongjji@users.noreply.github.com> Date: Sun, 5 Jun 2022 01:41:52 +0900 Subject: [PATCH 0456/1543] Add Multi-Level-Feedback-Queue scheduling algorithm (#6165) * Add Multi-Level-Feedback-Queue scheduling algorithm * fix type hint annotation for pre-commit * Update scheduling/multi_level_feedback_queue.py Co-authored-by: John Law * Update scheduling/multi_level_feedback_queue.py Co-authored-by: John Law * Update scheduling/multi_level_feedback_queue.py Co-authored-by: John Law * Update scheduling/multi_level_feedback_queue.py * Update scheduling/multi_level_feedback_queue.py Co-authored-by: John Law Co-authored-by: John Law --- scheduling/multi_level_feedback_queue.py | 312 +++++++++++++++++++++++ 1 file changed, 312 insertions(+) create mode 100644 scheduling/multi_level_feedback_queue.py diff --git a/scheduling/multi_level_feedback_queue.py b/scheduling/multi_level_feedback_queue.py new file mode 100644 index 000000000000..95ca827e062d --- /dev/null +++ b/scheduling/multi_level_feedback_queue.py @@ -0,0 +1,312 @@ +from collections import deque + + +class Process: + def __init__(self, process_name: str, arrival_time: int, burst_time: int) -> None: + self.process_name = process_name # process name + self.arrival_time = arrival_time # arrival time of the process + # completion time of finished process or last interrupted time + self.stop_time = arrival_time + self.burst_time = burst_time # remaining burst time + self.waiting_time = 0 # total time of the process wait in ready queue + self.turnaround_time = 0 # time from arrival time to completion time + + +class MLFQ: + """ + MLFQ(Multi Level Feedback Queue) + https://en.wikipedia.org/wiki/Multilevel_feedback_queue + MLFQ has a lot of queues that have different priority + In this MLFQ, + The first Queue(0) to last second Queue(N-2) of MLFQ have Round Robin Algorithm + The last Queue(N-1) has First Come, First Served Algorithm + """ + + def __init__( + self, + number_of_queues: int, + time_slices: list[int], + queue: deque[Process], + current_time: int, + ) -> None: + # total number of mlfq's queues + self.number_of_queues = number_of_queues + # time slice of queues that round robin algorithm applied + self.time_slices = time_slices + # unfinished process is in this ready_queue + self.ready_queue = queue + # current time + self.current_time = current_time + # finished process is in this sequence queue + self.finish_queue: deque[Process] = deque() + + def calculate_sequence_of_finish_queue(self) -> list[str]: + """ + This method returns the sequence of finished processes + >>> P1 = Process("P1", 0, 53) + >>> P2 = Process("P2", 0, 17) + >>> P3 = Process("P3", 0, 68) + >>> P4 = Process("P4", 0, 24) + >>> mlfq = MLFQ(3, [17, 25], deque([P1, P2, P3, P4]), 0) + >>> _ = mlfq.multi_level_feedback_queue() + >>> mlfq.calculate_sequence_of_finish_queue() + ['P2', 'P4', 'P1', 'P3'] + """ + sequence = [] + for i in range(len(self.finish_queue)): + sequence.append(self.finish_queue[i].process_name) + return sequence + + def calculate_waiting_time(self, queue: list[Process]) -> list[int]: + """ + This method calculates waiting time of processes + >>> P1 = Process("P1", 0, 53) + >>> P2 = Process("P2", 0, 17) + >>> P3 = Process("P3", 0, 68) + >>> P4 = Process("P4", 0, 24) + >>> mlfq = MLFQ(3, [17, 25], deque([P1, P2, P3, P4]), 0) + >>> _ = mlfq.multi_level_feedback_queue() + >>> mlfq.calculate_waiting_time([P1, P2, P3, P4]) + [83, 17, 94, 101] + """ + waiting_times = [] + for i in range(len(queue)): + waiting_times.append(queue[i].waiting_time) + return waiting_times + + def calculate_turnaround_time(self, queue: list[Process]) -> list[int]: + """ + This method calculates turnaround time of processes + >>> P1 = Process("P1", 0, 53) + >>> P2 = Process("P2", 0, 17) + >>> P3 = Process("P3", 0, 68) + >>> P4 = Process("P4", 0, 24) + >>> mlfq = MLFQ(3, [17, 25], deque([P1, P2, P3, P4]), 0) + >>> _ = mlfq.multi_level_feedback_queue() + >>> mlfq.calculate_turnaround_time([P1, P2, P3, P4]) + [136, 34, 162, 125] + """ + turnaround_times = [] + for i in range(len(queue)): + turnaround_times.append(queue[i].turnaround_time) + return turnaround_times + + def calculate_completion_time(self, queue: list[Process]) -> list[int]: + """ + This method calculates completion time of processes + >>> P1 = Process("P1", 0, 53) + >>> P2 = Process("P2", 0, 17) + >>> P3 = Process("P3", 0, 68) + >>> P4 = Process("P4", 0, 24) + >>> mlfq = MLFQ(3, [17, 25], deque([P1, P2, P3, P4]), 0) + >>> _ = mlfq.multi_level_feedback_queue() + >>> mlfq.calculate_turnaround_time([P1, P2, P3, P4]) + [136, 34, 162, 125] + """ + completion_times = [] + for i in range(len(queue)): + completion_times.append(queue[i].stop_time) + return completion_times + + def calculate_remaining_burst_time_of_processes( + self, queue: deque[Process] + ) -> list[int]: + """ + This method calculate remaining burst time of processes + >>> P1 = Process("P1", 0, 53) + >>> P2 = Process("P2", 0, 17) + >>> P3 = Process("P3", 0, 68) + >>> P4 = Process("P4", 0, 24) + >>> mlfq = MLFQ(3, [17, 25], deque([P1, P2, P3, P4]), 0) + >>> finish_queue, ready_queue = mlfq.round_robin(deque([P1, P2, P3, P4]), 17) + >>> mlfq.calculate_remaining_burst_time_of_processes(mlfq.finish_queue) + [0] + >>> mlfq.calculate_remaining_burst_time_of_processes(ready_queue) + [36, 51, 7] + >>> finish_queue, ready_queue = mlfq.round_robin(ready_queue, 25) + >>> mlfq.calculate_remaining_burst_time_of_processes(mlfq.finish_queue) + [0, 0] + >>> mlfq.calculate_remaining_burst_time_of_processes(ready_queue) + [11, 26] + """ + return [q.burst_time for q in queue] + + def update_waiting_time(self, process: Process) -> int: + """ + This method updates waiting times of unfinished processes + >>> P1 = Process("P1", 0, 53) + >>> P2 = Process("P2", 0, 17) + >>> P3 = Process("P3", 0, 68) + >>> P4 = Process("P4", 0, 24) + >>> mlfq = MLFQ(3, [17, 25], deque([P1, P2, P3, P4]), 0) + >>> mlfq.current_time = 10 + >>> P1.stop_time = 5 + >>> mlfq.update_waiting_time(P1) + 5 + """ + process.waiting_time += self.current_time - process.stop_time + return process.waiting_time + + def first_come_first_served(self, ready_queue: deque[Process]) -> deque[Process]: + """ + FCFS(First Come, First Served) + FCFS will be applied to MLFQ's last queue + A first came process will be finished at first + >>> P1 = Process("P1", 0, 53) + >>> P2 = Process("P2", 0, 17) + >>> P3 = Process("P3", 0, 68) + >>> P4 = Process("P4", 0, 24) + >>> mlfq = MLFQ(3, [17, 25], deque([P1, P2, P3, P4]), 0) + >>> _ = mlfq.first_come_first_served(mlfq.ready_queue) + >>> mlfq.calculate_sequence_of_finish_queue() + ['P1', 'P2', 'P3', 'P4'] + """ + finished: deque[Process] = deque() # sequence deque of finished process + while len(ready_queue) != 0: + cp = ready_queue.popleft() # current process + + # if process's arrival time is later than current time, update current time + if self.current_time < cp.arrival_time: + self.current_time += cp.arrival_time + + # update waiting time of current process + self.update_waiting_time(cp) + # update current time + self.current_time += cp.burst_time + # finish the process and set the process's burst-time 0 + cp.burst_time = 0 + # set the process's turnaround time because it is finished + cp.turnaround_time = self.current_time - cp.arrival_time + # set the completion time + cp.stop_time = self.current_time + # add the process to queue that has finished queue + finished.append(cp) + + self.finish_queue.extend(finished) # add finished process to finish queue + # FCFS will finish all remaining processes + return finished + + def round_robin( + self, ready_queue: deque[Process], time_slice: int + ) -> tuple[deque[Process], deque[Process]]: + """ + RR(Round Robin) + RR will be applied to MLFQ's all queues except last queue + All processes can't use CPU for time more than time_slice + If the process consume CPU up to time_slice, it will go back to ready queue + >>> P1 = Process("P1", 0, 53) + >>> P2 = Process("P2", 0, 17) + >>> P3 = Process("P3", 0, 68) + >>> P4 = Process("P4", 0, 24) + >>> mlfq = MLFQ(3, [17, 25], deque([P1, P2, P3, P4]), 0) + >>> finish_queue, ready_queue = mlfq.round_robin(mlfq.ready_queue, 17) + >>> mlfq.calculate_sequence_of_finish_queue() + ['P2'] + """ + finished: deque[Process] = deque() # sequence deque of terminated process + # just for 1 cycle and unfinished processes will go back to queue + for i in range(len(ready_queue)): + cp = ready_queue.popleft() # current process + + # if process's arrival time is later than current time, update current time + if self.current_time < cp.arrival_time: + self.current_time += cp.arrival_time + + # update waiting time of unfinished processes + self.update_waiting_time(cp) + # if the burst time of process is bigger than time-slice + if cp.burst_time > time_slice: + # use CPU for only time-slice + self.current_time += time_slice + # update remaining burst time + cp.burst_time -= time_slice + # update end point time + cp.stop_time = self.current_time + # locate the process behind the queue because it is not finished + ready_queue.append(cp) + else: + # use CPU for remaining burst time + self.current_time += cp.burst_time + # set burst time 0 because the process is finished + cp.burst_time = 0 + # set the finish time + cp.stop_time = self.current_time + # update the process' turnaround time because it is finished + cp.turnaround_time = self.current_time - cp.arrival_time + # add the process to queue that has finished queue + finished.append(cp) + + self.finish_queue.extend(finished) # add finished process to finish queue + # return finished processes queue and remaining processes queue + return finished, ready_queue + + def multi_level_feedback_queue(self) -> deque[Process]: + """ + MLFQ(Multi Level Feedback Queue) + >>> P1 = Process("P1", 0, 53) + >>> P2 = Process("P2", 0, 17) + >>> P3 = Process("P3", 0, 68) + >>> P4 = Process("P4", 0, 24) + >>> mlfq = MLFQ(3, [17, 25], deque([P1, P2, P3, P4]), 0) + >>> finish_queue = mlfq.multi_level_feedback_queue() + >>> mlfq.calculate_sequence_of_finish_queue() + ['P2', 'P4', 'P1', 'P3'] + """ + + # all queues except last one have round_robin algorithm + for i in range(self.number_of_queues - 1): + finished, self.ready_queue = self.round_robin( + self.ready_queue, self.time_slices[i] + ) + # the last queue has first_come_first_served algorithm + self.first_come_first_served(self.ready_queue) + + return self.finish_queue + + +if __name__ == "__main__": + import doctest + + P1 = Process("P1", 0, 53) + P2 = Process("P2", 0, 17) + P3 = Process("P3", 0, 68) + P4 = Process("P4", 0, 24) + number_of_queues = 3 + time_slices = [17, 25] + queue = deque([P1, P2, P3, P4]) + + if len(time_slices) != number_of_queues - 1: + exit() + + doctest.testmod(extraglobs={"queue": deque([P1, P2, P3, P4])}) + + P1 = Process("P1", 0, 53) + P2 = Process("P2", 0, 17) + P3 = Process("P3", 0, 68) + P4 = Process("P4", 0, 24) + number_of_queues = 3 + time_slices = [17, 25] + queue = deque([P1, P2, P3, P4]) + mlfq = MLFQ(number_of_queues, time_slices, queue, 0) + finish_queue = mlfq.multi_level_feedback_queue() + + # print total waiting times of processes(P1, P2, P3, P4) + print( + f"waiting time:\ + \t\t\t{MLFQ.calculate_waiting_time(mlfq, [P1, P2, P3, P4])}" + ) + # print completion times of processes(P1, P2, P3, P4) + print( + f"completion time:\ + \t\t{MLFQ.calculate_completion_time(mlfq, [P1, P2, P3, P4])}" + ) + # print total turnaround times of processes(P1, P2, P3, P4) + print( + f"turnaround time:\ + \t\t{MLFQ.calculate_turnaround_time(mlfq, [P1, P2, P3, P4])}" + ) + # print sequence of finished processes + print( + f"sequnece of finished processes:\ + {mlfq.calculate_sequence_of_finish_queue()}" + ) From c86aa72cfa0467bd9a5711d7b5a77ed8243e49f1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=EC=9D=B4=EB=B9=88?= <76545238+Bynnn@users.noreply.github.com> Date: Tue, 7 Jun 2022 01:44:49 +0900 Subject: [PATCH 0457/1543] Create non_preemptive_shortest_job_first.py (#6169) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Create non_preemptive_shortest_job_first.py * 파일 위치 변경 * Delete non_preemptive_shortest_job_first.py * delete Korean comments * change comments, & to and, type annotation * type annotation * delete unnecessary comment --- .../non_preemptive_shortest_job_first.py | 111 ++++++++++++++++++ 1 file changed, 111 insertions(+) create mode 100644 scheduling/non_preemptive_shortest_job_first.py diff --git a/scheduling/non_preemptive_shortest_job_first.py b/scheduling/non_preemptive_shortest_job_first.py new file mode 100644 index 000000000000..96e571230ec0 --- /dev/null +++ b/scheduling/non_preemptive_shortest_job_first.py @@ -0,0 +1,111 @@ +""" +Non-preemptive Shortest Job First +Shortest execution time process is chosen for the next execution. +https://www.guru99.com/shortest-job-first-sjf-scheduling.html +https://en.wikipedia.org/wiki/Shortest_job_next +""" + + +from __future__ import annotations + +from statistics import mean + + +def calculate_waitingtime( + arrival_time: list[int], burst_time: list[int], no_of_processes: int +) -> list[int]: + """ + Calculate the waiting time of each processes + + Return: The waiting time for each process. + >>> calculate_waitingtime([0,1,2], [10, 5, 8], 3) + [0, 9, 13] + >>> calculate_waitingtime([1,2,2,4], [4, 6, 3, 1], 4) + [0, 7, 4, 1] + >>> calculate_waitingtime([0,0,0], [12, 2, 10],3) + [12, 0, 2] + """ + + waiting_time = [0] * no_of_processes + remaining_time = [0] * no_of_processes + + # Initialize remaining_time to waiting_time. + + for i in range(no_of_processes): + remaining_time[i] = burst_time[i] + ready_process: list[int] = [] + + completed = 0 + total_time = 0 + + # When processes are not completed, + # A process whose arrival time has passed \ + # and has remaining execution time is put into the ready_process. + # The shortest process in the ready_process, target_process is executed. + + while completed != no_of_processes: + ready_process = [] + target_process = -1 + + for i in range(no_of_processes): + if (arrival_time[i] <= total_time) and (remaining_time[i] > 0): + ready_process.append(i) + + if len(ready_process) > 0: + target_process = ready_process[0] + for i in ready_process: + if remaining_time[i] < remaining_time[target_process]: + target_process = i + total_time += burst_time[target_process] + completed += 1 + remaining_time[target_process] = 0 + waiting_time[target_process] = ( + total_time - arrival_time[target_process] - burst_time[target_process] + ) + else: + total_time += 1 + + return waiting_time + + +def calculate_turnaroundtime( + burst_time: list[int], no_of_processes: int, waiting_time: list[int] +) -> list[int]: + """ + Calculate the turnaround time of each process. + + Return: The turnaround time for each process. + >>> calculate_turnaroundtime([0,1,2], 3, [0, 10, 15]) + [0, 11, 17] + >>> calculate_turnaroundtime([1,2,2,4], 4, [1, 8, 5, 4]) + [2, 10, 7, 8] + >>> calculate_turnaroundtime([0,0,0], 3, [12, 0, 2]) + [12, 0, 2] + """ + + turn_around_time = [0] * no_of_processes + for i in range(no_of_processes): + turn_around_time[i] = burst_time[i] + waiting_time[i] + return turn_around_time + + +if __name__ == "__main__": + print("[TEST CASE 01]") + + no_of_processes = 4 + burst_time = [2, 5, 3, 7] + arrival_time = [0, 0, 0, 0] + waiting_time = calculate_waitingtime(arrival_time, burst_time, no_of_processes) + turn_around_time = calculate_turnaroundtime( + burst_time, no_of_processes, waiting_time + ) + + # Printing the Result + print("PID\tBurst Time\tArrival Time\tWaiting Time\tTurnaround Time") + for i, process_ID in enumerate(list(range(1, 5))): + print( + f"{process_ID}\t{burst_time[i]}\t\t\t{arrival_time[i]}\t\t\t\t" + f"{waiting_time[i]}\t\t\t\t{turn_around_time[i]}" + ) + print(f"\nAverage waiting time = {mean(waiting_time):.5f}") + print(f"Average turnaround time = {mean(turn_around_time):.5f}") From 69cde43ca1e78980922adaf6b852008840d52e14 Mon Sep 17 00:00:00 2001 From: Vcrostin <52068696+Vcrostin@users.noreply.github.com> Date: Wed, 22 Jun 2022 07:01:05 +0300 Subject: [PATCH 0458/1543] make DIRECTORY.md paths relative Fixes (#6179) (#6190) --- DIRECTORY.md | 1658 +++++++++++++++++---------------- scripts/build_directory_md.py | 4 +- 2 files changed, 833 insertions(+), 829 deletions(-) diff --git a/DIRECTORY.md b/DIRECTORY.md index f4a470c12148..d30e275d067f 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -1,1049 +1,1055 @@ ## Arithmetic Analysis - * [Bisection](https://github.com/TheAlgorithms/Python/blob/master/arithmetic_analysis/bisection.py) - * [Gaussian Elimination](https://github.com/TheAlgorithms/Python/blob/master/arithmetic_analysis/gaussian_elimination.py) - * [In Static Equilibrium](https://github.com/TheAlgorithms/Python/blob/master/arithmetic_analysis/in_static_equilibrium.py) - * [Intersection](https://github.com/TheAlgorithms/Python/blob/master/arithmetic_analysis/intersection.py) - * [Jacobi Iteration Method](https://github.com/TheAlgorithms/Python/blob/master/arithmetic_analysis/jacobi_iteration_method.py) - * [Lu Decomposition](https://github.com/TheAlgorithms/Python/blob/master/arithmetic_analysis/lu_decomposition.py) - * [Newton Forward Interpolation](https://github.com/TheAlgorithms/Python/blob/master/arithmetic_analysis/newton_forward_interpolation.py) - * [Newton Method](https://github.com/TheAlgorithms/Python/blob/master/arithmetic_analysis/newton_method.py) - * [Newton Raphson](https://github.com/TheAlgorithms/Python/blob/master/arithmetic_analysis/newton_raphson.py) - * [Secant Method](https://github.com/TheAlgorithms/Python/blob/master/arithmetic_analysis/secant_method.py) + * [Bisection](arithmetic_analysis/bisection.py) + * [Gaussian Elimination](arithmetic_analysis/gaussian_elimination.py) + * [In Static Equilibrium](arithmetic_analysis/in_static_equilibrium.py) + * [Intersection](arithmetic_analysis/intersection.py) + * [Jacobi Iteration Method](arithmetic_analysis/jacobi_iteration_method.py) + * [Lu Decomposition](arithmetic_analysis/lu_decomposition.py) + * [Newton Forward Interpolation](arithmetic_analysis/newton_forward_interpolation.py) + * [Newton Method](arithmetic_analysis/newton_method.py) + * [Newton Raphson](arithmetic_analysis/newton_raphson.py) + * [Secant Method](arithmetic_analysis/secant_method.py) ## Audio Filters - * [Butterworth Filter](https://github.com/TheAlgorithms/Python/blob/master/audio_filters/butterworth_filter.py) - * [Iir Filter](https://github.com/TheAlgorithms/Python/blob/master/audio_filters/iir_filter.py) - * [Show Response](https://github.com/TheAlgorithms/Python/blob/master/audio_filters/show_response.py) + * [Butterworth Filter](audio_filters/butterworth_filter.py) + * [Iir Filter](audio_filters/iir_filter.py) + * [Show Response](audio_filters/show_response.py) ## Backtracking - * [All Combinations](https://github.com/TheAlgorithms/Python/blob/master/backtracking/all_combinations.py) - * [All Permutations](https://github.com/TheAlgorithms/Python/blob/master/backtracking/all_permutations.py) - * [All Subsequences](https://github.com/TheAlgorithms/Python/blob/master/backtracking/all_subsequences.py) - * [Coloring](https://github.com/TheAlgorithms/Python/blob/master/backtracking/coloring.py) - * [Hamiltonian Cycle](https://github.com/TheAlgorithms/Python/blob/master/backtracking/hamiltonian_cycle.py) - * [Knight Tour](https://github.com/TheAlgorithms/Python/blob/master/backtracking/knight_tour.py) - * [Minimax](https://github.com/TheAlgorithms/Python/blob/master/backtracking/minimax.py) - * [N Queens](https://github.com/TheAlgorithms/Python/blob/master/backtracking/n_queens.py) - * [N Queens Math](https://github.com/TheAlgorithms/Python/blob/master/backtracking/n_queens_math.py) - * [Rat In Maze](https://github.com/TheAlgorithms/Python/blob/master/backtracking/rat_in_maze.py) - * [Sudoku](https://github.com/TheAlgorithms/Python/blob/master/backtracking/sudoku.py) - * [Sum Of Subsets](https://github.com/TheAlgorithms/Python/blob/master/backtracking/sum_of_subsets.py) + * [All Combinations](backtracking/all_combinations.py) + * [All Permutations](backtracking/all_permutations.py) + * [All Subsequences](backtracking/all_subsequences.py) + * [Coloring](backtracking/coloring.py) + * [Hamiltonian Cycle](backtracking/hamiltonian_cycle.py) + * [Knight Tour](backtracking/knight_tour.py) + * [Minimax](backtracking/minimax.py) + * [N Queens](backtracking/n_queens.py) + * [N Queens Math](backtracking/n_queens_math.py) + * [Rat In Maze](backtracking/rat_in_maze.py) + * [Sudoku](backtracking/sudoku.py) + * [Sum Of Subsets](backtracking/sum_of_subsets.py) ## Bit Manipulation - * [Binary And Operator](https://github.com/TheAlgorithms/Python/blob/master/bit_manipulation/binary_and_operator.py) - * [Binary Count Setbits](https://github.com/TheAlgorithms/Python/blob/master/bit_manipulation/binary_count_setbits.py) - * [Binary Count Trailing Zeros](https://github.com/TheAlgorithms/Python/blob/master/bit_manipulation/binary_count_trailing_zeros.py) - * [Binary Or Operator](https://github.com/TheAlgorithms/Python/blob/master/bit_manipulation/binary_or_operator.py) - * [Binary Shifts](https://github.com/TheAlgorithms/Python/blob/master/bit_manipulation/binary_shifts.py) - * [Binary Twos Complement](https://github.com/TheAlgorithms/Python/blob/master/bit_manipulation/binary_twos_complement.py) - * [Binary Xor Operator](https://github.com/TheAlgorithms/Python/blob/master/bit_manipulation/binary_xor_operator.py) - * [Count 1S Brian Kernighan Method](https://github.com/TheAlgorithms/Python/blob/master/bit_manipulation/count_1s_brian_kernighan_method.py) - * [Count Number Of One Bits](https://github.com/TheAlgorithms/Python/blob/master/bit_manipulation/count_number_of_one_bits.py) - * [Gray Code Sequence](https://github.com/TheAlgorithms/Python/blob/master/bit_manipulation/gray_code_sequence.py) - * [Reverse Bits](https://github.com/TheAlgorithms/Python/blob/master/bit_manipulation/reverse_bits.py) - * [Single Bit Manipulation Operations](https://github.com/TheAlgorithms/Python/blob/master/bit_manipulation/single_bit_manipulation_operations.py) + * [Binary And Operator](bit_manipulation/binary_and_operator.py) + * [Binary Count Setbits](bit_manipulation/binary_count_setbits.py) + * [Binary Count Trailing Zeros](bit_manipulation/binary_count_trailing_zeros.py) + * [Binary Or Operator](bit_manipulation/binary_or_operator.py) + * [Binary Shifts](bit_manipulation/binary_shifts.py) + * [Binary Twos Complement](bit_manipulation/binary_twos_complement.py) + * [Binary Xor Operator](bit_manipulation/binary_xor_operator.py) + * [Count 1S Brian Kernighan Method](bit_manipulation/count_1s_brian_kernighan_method.py) + * [Count Number Of One Bits](bit_manipulation/count_number_of_one_bits.py) + * [Gray Code Sequence](bit_manipulation/gray_code_sequence.py) + * [Reverse Bits](bit_manipulation/reverse_bits.py) + * [Single Bit Manipulation Operations](bit_manipulation/single_bit_manipulation_operations.py) ## Blockchain - * [Chinese Remainder Theorem](https://github.com/TheAlgorithms/Python/blob/master/blockchain/chinese_remainder_theorem.py) - * [Diophantine Equation](https://github.com/TheAlgorithms/Python/blob/master/blockchain/diophantine_equation.py) - * [Modular Division](https://github.com/TheAlgorithms/Python/blob/master/blockchain/modular_division.py) + * [Chinese Remainder Theorem](blockchain/chinese_remainder_theorem.py) + * [Diophantine Equation](blockchain/diophantine_equation.py) + * [Modular Division](blockchain/modular_division.py) ## Boolean Algebra - * [Quine Mc Cluskey](https://github.com/TheAlgorithms/Python/blob/master/boolean_algebra/quine_mc_cluskey.py) + * [Quine Mc Cluskey](boolean_algebra/quine_mc_cluskey.py) ## Cellular Automata - * [Conways Game Of Life](https://github.com/TheAlgorithms/Python/blob/master/cellular_automata/conways_game_of_life.py) - * [Game Of Life](https://github.com/TheAlgorithms/Python/blob/master/cellular_automata/game_of_life.py) - * [Nagel Schrekenberg](https://github.com/TheAlgorithms/Python/blob/master/cellular_automata/nagel_schrekenberg.py) - * [One Dimensional](https://github.com/TheAlgorithms/Python/blob/master/cellular_automata/one_dimensional.py) + * [Conways Game Of Life](cellular_automata/conways_game_of_life.py) + * [Game Of Life](cellular_automata/game_of_life.py) + * [Nagel Schrekenberg](cellular_automata/nagel_schrekenberg.py) + * [One Dimensional](cellular_automata/one_dimensional.py) ## Ciphers - * [A1Z26](https://github.com/TheAlgorithms/Python/blob/master/ciphers/a1z26.py) - * [Affine Cipher](https://github.com/TheAlgorithms/Python/blob/master/ciphers/affine_cipher.py) - * [Atbash](https://github.com/TheAlgorithms/Python/blob/master/ciphers/atbash.py) - * [Baconian Cipher](https://github.com/TheAlgorithms/Python/blob/master/ciphers/baconian_cipher.py) - * [Base16](https://github.com/TheAlgorithms/Python/blob/master/ciphers/base16.py) - * [Base32](https://github.com/TheAlgorithms/Python/blob/master/ciphers/base32.py) - * [Base64](https://github.com/TheAlgorithms/Python/blob/master/ciphers/base64.py) - * [Base85](https://github.com/TheAlgorithms/Python/blob/master/ciphers/base85.py) - * [Beaufort Cipher](https://github.com/TheAlgorithms/Python/blob/master/ciphers/beaufort_cipher.py) - * [Bifid](https://github.com/TheAlgorithms/Python/blob/master/ciphers/bifid.py) - * [Brute Force Caesar Cipher](https://github.com/TheAlgorithms/Python/blob/master/ciphers/brute_force_caesar_cipher.py) - * [Caesar Cipher](https://github.com/TheAlgorithms/Python/blob/master/ciphers/caesar_cipher.py) - * [Cryptomath Module](https://github.com/TheAlgorithms/Python/blob/master/ciphers/cryptomath_module.py) - * [Decrypt Caesar With Chi Squared](https://github.com/TheAlgorithms/Python/blob/master/ciphers/decrypt_caesar_with_chi_squared.py) - * [Deterministic Miller Rabin](https://github.com/TheAlgorithms/Python/blob/master/ciphers/deterministic_miller_rabin.py) - * [Diffie](https://github.com/TheAlgorithms/Python/blob/master/ciphers/diffie.py) - * [Diffie Hellman](https://github.com/TheAlgorithms/Python/blob/master/ciphers/diffie_hellman.py) - * [Elgamal Key Generator](https://github.com/TheAlgorithms/Python/blob/master/ciphers/elgamal_key_generator.py) - * [Enigma Machine2](https://github.com/TheAlgorithms/Python/blob/master/ciphers/enigma_machine2.py) - * [Hill Cipher](https://github.com/TheAlgorithms/Python/blob/master/ciphers/hill_cipher.py) - * [Mixed Keyword Cypher](https://github.com/TheAlgorithms/Python/blob/master/ciphers/mixed_keyword_cypher.py) - * [Mono Alphabetic Ciphers](https://github.com/TheAlgorithms/Python/blob/master/ciphers/mono_alphabetic_ciphers.py) - * [Morse Code](https://github.com/TheAlgorithms/Python/blob/master/ciphers/morse_code.py) - * [Onepad Cipher](https://github.com/TheAlgorithms/Python/blob/master/ciphers/onepad_cipher.py) - * [Playfair Cipher](https://github.com/TheAlgorithms/Python/blob/master/ciphers/playfair_cipher.py) - * [Polybius](https://github.com/TheAlgorithms/Python/blob/master/ciphers/polybius.py) - * [Porta Cipher](https://github.com/TheAlgorithms/Python/blob/master/ciphers/porta_cipher.py) - * [Rabin Miller](https://github.com/TheAlgorithms/Python/blob/master/ciphers/rabin_miller.py) - * [Rail Fence Cipher](https://github.com/TheAlgorithms/Python/blob/master/ciphers/rail_fence_cipher.py) - * [Rot13](https://github.com/TheAlgorithms/Python/blob/master/ciphers/rot13.py) - * [Rsa Cipher](https://github.com/TheAlgorithms/Python/blob/master/ciphers/rsa_cipher.py) - * [Rsa Factorization](https://github.com/TheAlgorithms/Python/blob/master/ciphers/rsa_factorization.py) - * [Rsa Key Generator](https://github.com/TheAlgorithms/Python/blob/master/ciphers/rsa_key_generator.py) - * [Shuffled Shift Cipher](https://github.com/TheAlgorithms/Python/blob/master/ciphers/shuffled_shift_cipher.py) - * [Simple Keyword Cypher](https://github.com/TheAlgorithms/Python/blob/master/ciphers/simple_keyword_cypher.py) - * [Simple Substitution Cipher](https://github.com/TheAlgorithms/Python/blob/master/ciphers/simple_substitution_cipher.py) - * [Trafid Cipher](https://github.com/TheAlgorithms/Python/blob/master/ciphers/trafid_cipher.py) - * [Transposition Cipher](https://github.com/TheAlgorithms/Python/blob/master/ciphers/transposition_cipher.py) - * [Transposition Cipher Encrypt Decrypt File](https://github.com/TheAlgorithms/Python/blob/master/ciphers/transposition_cipher_encrypt_decrypt_file.py) - * [Vigenere Cipher](https://github.com/TheAlgorithms/Python/blob/master/ciphers/vigenere_cipher.py) - * [Xor Cipher](https://github.com/TheAlgorithms/Python/blob/master/ciphers/xor_cipher.py) + * [A1Z26](ciphers/a1z26.py) + * [Affine Cipher](ciphers/affine_cipher.py) + * [Atbash](ciphers/atbash.py) + * [Baconian Cipher](ciphers/baconian_cipher.py) + * [Base16](ciphers/base16.py) + * [Base32](ciphers/base32.py) + * [Base64](ciphers/base64.py) + * [Base85](ciphers/base85.py) + * [Beaufort Cipher](ciphers/beaufort_cipher.py) + * [Bifid](ciphers/bifid.py) + * [Brute Force Caesar Cipher](ciphers/brute_force_caesar_cipher.py) + * [Caesar Cipher](ciphers/caesar_cipher.py) + * [Cryptomath Module](ciphers/cryptomath_module.py) + * [Decrypt Caesar With Chi Squared](ciphers/decrypt_caesar_with_chi_squared.py) + * [Deterministic Miller Rabin](ciphers/deterministic_miller_rabin.py) + * [Diffie](ciphers/diffie.py) + * [Diffie Hellman](ciphers/diffie_hellman.py) + * [Elgamal Key Generator](ciphers/elgamal_key_generator.py) + * [Enigma Machine2](ciphers/enigma_machine2.py) + * [Hill Cipher](ciphers/hill_cipher.py) + * [Mixed Keyword Cypher](ciphers/mixed_keyword_cypher.py) + * [Mono Alphabetic Ciphers](ciphers/mono_alphabetic_ciphers.py) + * [Morse Code](ciphers/morse_code.py) + * [Onepad Cipher](ciphers/onepad_cipher.py) + * [Playfair Cipher](ciphers/playfair_cipher.py) + * [Polybius](ciphers/polybius.py) + * [Porta Cipher](ciphers/porta_cipher.py) + * [Rabin Miller](ciphers/rabin_miller.py) + * [Rail Fence Cipher](ciphers/rail_fence_cipher.py) + * [Rot13](ciphers/rot13.py) + * [Rsa Cipher](ciphers/rsa_cipher.py) + * [Rsa Factorization](ciphers/rsa_factorization.py) + * [Rsa Key Generator](ciphers/rsa_key_generator.py) + * [Shuffled Shift Cipher](ciphers/shuffled_shift_cipher.py) + * [Simple Keyword Cypher](ciphers/simple_keyword_cypher.py) + * [Simple Substitution Cipher](ciphers/simple_substitution_cipher.py) + * [Trafid Cipher](ciphers/trafid_cipher.py) + * [Transposition Cipher](ciphers/transposition_cipher.py) + * [Transposition Cipher Encrypt Decrypt File](ciphers/transposition_cipher_encrypt_decrypt_file.py) + * [Vigenere Cipher](ciphers/vigenere_cipher.py) + * [Xor Cipher](ciphers/xor_cipher.py) ## Compression - * [Burrows Wheeler](https://github.com/TheAlgorithms/Python/blob/master/compression/burrows_wheeler.py) - * [Huffman](https://github.com/TheAlgorithms/Python/blob/master/compression/huffman.py) - * [Lempel Ziv](https://github.com/TheAlgorithms/Python/blob/master/compression/lempel_ziv.py) - * [Lempel Ziv Decompress](https://github.com/TheAlgorithms/Python/blob/master/compression/lempel_ziv_decompress.py) - * [Peak Signal To Noise Ratio](https://github.com/TheAlgorithms/Python/blob/master/compression/peak_signal_to_noise_ratio.py) + * [Burrows Wheeler](compression/burrows_wheeler.py) + * [Huffman](compression/huffman.py) + * [Lempel Ziv](compression/lempel_ziv.py) + * [Lempel Ziv Decompress](compression/lempel_ziv_decompress.py) + * [Peak Signal To Noise Ratio](compression/peak_signal_to_noise_ratio.py) ## Computer Vision - * [Cnn Classification](https://github.com/TheAlgorithms/Python/blob/master/computer_vision/cnn_classification.py) - * [Flip Augmentation](https://github.com/TheAlgorithms/Python/blob/master/computer_vision/flip_augmentation.py) - * [Harris Corner](https://github.com/TheAlgorithms/Python/blob/master/computer_vision/harris_corner.py) - * [Horn Schunck](https://github.com/TheAlgorithms/Python/blob/master/computer_vision/horn_schunck.py) - * [Mean Threshold](https://github.com/TheAlgorithms/Python/blob/master/computer_vision/mean_threshold.py) - * [Mosaic Augmentation](https://github.com/TheAlgorithms/Python/blob/master/computer_vision/mosaic_augmentation.py) - * [Pooling Functions](https://github.com/TheAlgorithms/Python/blob/master/computer_vision/pooling_functions.py) + * [Cnn Classification](computer_vision/cnn_classification.py) + * [Flip Augmentation](computer_vision/flip_augmentation.py) + * [Harris Corner](computer_vision/harris_corner.py) + * [Horn Schunck](computer_vision/horn_schunck.py) + * [Mean Threshold](computer_vision/mean_threshold.py) + * [Mosaic Augmentation](computer_vision/mosaic_augmentation.py) + * [Pooling Functions](computer_vision/pooling_functions.py) ## Conversions - * [Binary To Decimal](https://github.com/TheAlgorithms/Python/blob/master/conversions/binary_to_decimal.py) - * [Binary To Hexadecimal](https://github.com/TheAlgorithms/Python/blob/master/conversions/binary_to_hexadecimal.py) - * [Binary To Octal](https://github.com/TheAlgorithms/Python/blob/master/conversions/binary_to_octal.py) - * [Decimal To Any](https://github.com/TheAlgorithms/Python/blob/master/conversions/decimal_to_any.py) - * [Decimal To Binary](https://github.com/TheAlgorithms/Python/blob/master/conversions/decimal_to_binary.py) - * [Decimal To Binary Recursion](https://github.com/TheAlgorithms/Python/blob/master/conversions/decimal_to_binary_recursion.py) - * [Decimal To Hexadecimal](https://github.com/TheAlgorithms/Python/blob/master/conversions/decimal_to_hexadecimal.py) - * [Decimal To Octal](https://github.com/TheAlgorithms/Python/blob/master/conversions/decimal_to_octal.py) - * [Hex To Bin](https://github.com/TheAlgorithms/Python/blob/master/conversions/hex_to_bin.py) - * [Hexadecimal To Decimal](https://github.com/TheAlgorithms/Python/blob/master/conversions/hexadecimal_to_decimal.py) - * [Length Conversion](https://github.com/TheAlgorithms/Python/blob/master/conversions/length_conversion.py) - * [Molecular Chemistry](https://github.com/TheAlgorithms/Python/blob/master/conversions/molecular_chemistry.py) - * [Octal To Decimal](https://github.com/TheAlgorithms/Python/blob/master/conversions/octal_to_decimal.py) - * [Prefix Conversions](https://github.com/TheAlgorithms/Python/blob/master/conversions/prefix_conversions.py) - * [Prefix Conversions String](https://github.com/TheAlgorithms/Python/blob/master/conversions/prefix_conversions_string.py) - * [Pressure Conversions](https://github.com/TheAlgorithms/Python/blob/master/conversions/pressure_conversions.py) - * [Rgb Hsv Conversion](https://github.com/TheAlgorithms/Python/blob/master/conversions/rgb_hsv_conversion.py) - * [Roman Numerals](https://github.com/TheAlgorithms/Python/blob/master/conversions/roman_numerals.py) - * [Temperature Conversions](https://github.com/TheAlgorithms/Python/blob/master/conversions/temperature_conversions.py) - * [Volume Conversions](https://github.com/TheAlgorithms/Python/blob/master/conversions/volume_conversions.py) - * [Weight Conversion](https://github.com/TheAlgorithms/Python/blob/master/conversions/weight_conversion.py) + * [Binary To Decimal](conversions/binary_to_decimal.py) + * [Binary To Hexadecimal](conversions/binary_to_hexadecimal.py) + * [Binary To Octal](conversions/binary_to_octal.py) + * [Decimal To Any](conversions/decimal_to_any.py) + * [Decimal To Binary](conversions/decimal_to_binary.py) + * [Decimal To Binary Recursion](conversions/decimal_to_binary_recursion.py) + * [Decimal To Hexadecimal](conversions/decimal_to_hexadecimal.py) + * [Decimal To Octal](conversions/decimal_to_octal.py) + * [Excel Title To Column](conversions/excel_title_to_column.py) + * [Hex To Bin](conversions/hex_to_bin.py) + * [Hexadecimal To Decimal](conversions/hexadecimal_to_decimal.py) + * [Length Conversion](conversions/length_conversion.py) + * [Molecular Chemistry](conversions/molecular_chemistry.py) + * [Octal To Decimal](conversions/octal_to_decimal.py) + * [Prefix Conversions](conversions/prefix_conversions.py) + * [Prefix Conversions String](conversions/prefix_conversions_string.py) + * [Pressure Conversions](conversions/pressure_conversions.py) + * [Rgb Hsv Conversion](conversions/rgb_hsv_conversion.py) + * [Roman Numerals](conversions/roman_numerals.py) + * [Temperature Conversions](conversions/temperature_conversions.py) + * [Volume Conversions](conversions/volume_conversions.py) + * [Weight Conversion](conversions/weight_conversion.py) ## Data Structures * Binary Tree - * [Avl Tree](https://github.com/TheAlgorithms/Python/blob/master/data_structures/binary_tree/avl_tree.py) - * [Basic Binary Tree](https://github.com/TheAlgorithms/Python/blob/master/data_structures/binary_tree/basic_binary_tree.py) - * [Binary Search Tree](https://github.com/TheAlgorithms/Python/blob/master/data_structures/binary_tree/binary_search_tree.py) - * [Binary Search Tree Recursive](https://github.com/TheAlgorithms/Python/blob/master/data_structures/binary_tree/binary_search_tree_recursive.py) - * [Binary Tree Mirror](https://github.com/TheAlgorithms/Python/blob/master/data_structures/binary_tree/binary_tree_mirror.py) - * [Binary Tree Traversals](https://github.com/TheAlgorithms/Python/blob/master/data_structures/binary_tree/binary_tree_traversals.py) - * [Fenwick Tree](https://github.com/TheAlgorithms/Python/blob/master/data_structures/binary_tree/fenwick_tree.py) - * [Lazy Segment Tree](https://github.com/TheAlgorithms/Python/blob/master/data_structures/binary_tree/lazy_segment_tree.py) - * [Lowest Common Ancestor](https://github.com/TheAlgorithms/Python/blob/master/data_structures/binary_tree/lowest_common_ancestor.py) - * [Merge Two Binary Trees](https://github.com/TheAlgorithms/Python/blob/master/data_structures/binary_tree/merge_two_binary_trees.py) - * [Non Recursive Segment Tree](https://github.com/TheAlgorithms/Python/blob/master/data_structures/binary_tree/non_recursive_segment_tree.py) - * [Number Of Possible Binary Trees](https://github.com/TheAlgorithms/Python/blob/master/data_structures/binary_tree/number_of_possible_binary_trees.py) - * [Red Black Tree](https://github.com/TheAlgorithms/Python/blob/master/data_structures/binary_tree/red_black_tree.py) - * [Segment Tree](https://github.com/TheAlgorithms/Python/blob/master/data_structures/binary_tree/segment_tree.py) - * [Segment Tree Other](https://github.com/TheAlgorithms/Python/blob/master/data_structures/binary_tree/segment_tree_other.py) - * [Treap](https://github.com/TheAlgorithms/Python/blob/master/data_structures/binary_tree/treap.py) - * [Wavelet Tree](https://github.com/TheAlgorithms/Python/blob/master/data_structures/binary_tree/wavelet_tree.py) + * [Avl Tree](data_structures/binary_tree/avl_tree.py) + * [Basic Binary Tree](data_structures/binary_tree/basic_binary_tree.py) + * [Binary Search Tree](data_structures/binary_tree/binary_search_tree.py) + * [Binary Search Tree Recursive](data_structures/binary_tree/binary_search_tree_recursive.py) + * [Binary Tree Mirror](data_structures/binary_tree/binary_tree_mirror.py) + * [Binary Tree Traversals](data_structures/binary_tree/binary_tree_traversals.py) + * [Fenwick Tree](data_structures/binary_tree/fenwick_tree.py) + * [Lazy Segment Tree](data_structures/binary_tree/lazy_segment_tree.py) + * [Lowest Common Ancestor](data_structures/binary_tree/lowest_common_ancestor.py) + * [Merge Two Binary Trees](data_structures/binary_tree/merge_two_binary_trees.py) + * [Non Recursive Segment Tree](data_structures/binary_tree/non_recursive_segment_tree.py) + * [Number Of Possible Binary Trees](data_structures/binary_tree/number_of_possible_binary_trees.py) + * [Red Black Tree](data_structures/binary_tree/red_black_tree.py) + * [Segment Tree](data_structures/binary_tree/segment_tree.py) + * [Segment Tree Other](data_structures/binary_tree/segment_tree_other.py) + * [Treap](data_structures/binary_tree/treap.py) + * [Wavelet Tree](data_structures/binary_tree/wavelet_tree.py) * Disjoint Set - * [Alternate Disjoint Set](https://github.com/TheAlgorithms/Python/blob/master/data_structures/disjoint_set/alternate_disjoint_set.py) - * [Disjoint Set](https://github.com/TheAlgorithms/Python/blob/master/data_structures/disjoint_set/disjoint_set.py) + * [Alternate Disjoint Set](data_structures/disjoint_set/alternate_disjoint_set.py) + * [Disjoint Set](data_structures/disjoint_set/disjoint_set.py) * Hashing - * [Double Hash](https://github.com/TheAlgorithms/Python/blob/master/data_structures/hashing/double_hash.py) - * [Hash Table](https://github.com/TheAlgorithms/Python/blob/master/data_structures/hashing/hash_table.py) - * [Hash Table With Linked List](https://github.com/TheAlgorithms/Python/blob/master/data_structures/hashing/hash_table_with_linked_list.py) + * [Double Hash](data_structures/hashing/double_hash.py) + * [Hash Table](data_structures/hashing/hash_table.py) + * [Hash Table With Linked List](data_structures/hashing/hash_table_with_linked_list.py) * Number Theory - * [Prime Numbers](https://github.com/TheAlgorithms/Python/blob/master/data_structures/hashing/number_theory/prime_numbers.py) - * [Quadratic Probing](https://github.com/TheAlgorithms/Python/blob/master/data_structures/hashing/quadratic_probing.py) + * [Prime Numbers](data_structures/hashing/number_theory/prime_numbers.py) + * [Quadratic Probing](data_structures/hashing/quadratic_probing.py) * Heap - * [Binomial Heap](https://github.com/TheAlgorithms/Python/blob/master/data_structures/heap/binomial_heap.py) - * [Heap](https://github.com/TheAlgorithms/Python/blob/master/data_structures/heap/heap.py) - * [Heap Generic](https://github.com/TheAlgorithms/Python/blob/master/data_structures/heap/heap_generic.py) - * [Max Heap](https://github.com/TheAlgorithms/Python/blob/master/data_structures/heap/max_heap.py) - * [Min Heap](https://github.com/TheAlgorithms/Python/blob/master/data_structures/heap/min_heap.py) - * [Randomized Heap](https://github.com/TheAlgorithms/Python/blob/master/data_structures/heap/randomized_heap.py) - * [Skew Heap](https://github.com/TheAlgorithms/Python/blob/master/data_structures/heap/skew_heap.py) + * [Binomial Heap](data_structures/heap/binomial_heap.py) + * [Heap](data_structures/heap/heap.py) + * [Heap Generic](data_structures/heap/heap_generic.py) + * [Max Heap](data_structures/heap/max_heap.py) + * [Min Heap](data_structures/heap/min_heap.py) + * [Randomized Heap](data_structures/heap/randomized_heap.py) + * [Skew Heap](data_structures/heap/skew_heap.py) * Linked List - * [Circular Linked List](https://github.com/TheAlgorithms/Python/blob/master/data_structures/linked_list/circular_linked_list.py) - * [Deque Doubly](https://github.com/TheAlgorithms/Python/blob/master/data_structures/linked_list/deque_doubly.py) - * [Doubly Linked List](https://github.com/TheAlgorithms/Python/blob/master/data_structures/linked_list/doubly_linked_list.py) - * [Doubly Linked List Two](https://github.com/TheAlgorithms/Python/blob/master/data_structures/linked_list/doubly_linked_list_two.py) - * [From Sequence](https://github.com/TheAlgorithms/Python/blob/master/data_structures/linked_list/from_sequence.py) - * [Has Loop](https://github.com/TheAlgorithms/Python/blob/master/data_structures/linked_list/has_loop.py) - * [Is Palindrome](https://github.com/TheAlgorithms/Python/blob/master/data_structures/linked_list/is_palindrome.py) - * [Merge Two Lists](https://github.com/TheAlgorithms/Python/blob/master/data_structures/linked_list/merge_two_lists.py) - * [Middle Element Of Linked List](https://github.com/TheAlgorithms/Python/blob/master/data_structures/linked_list/middle_element_of_linked_list.py) - * [Print Reverse](https://github.com/TheAlgorithms/Python/blob/master/data_structures/linked_list/print_reverse.py) - * [Singly Linked List](https://github.com/TheAlgorithms/Python/blob/master/data_structures/linked_list/singly_linked_list.py) - * [Skip List](https://github.com/TheAlgorithms/Python/blob/master/data_structures/linked_list/skip_list.py) - * [Swap Nodes](https://github.com/TheAlgorithms/Python/blob/master/data_structures/linked_list/swap_nodes.py) + * [Circular Linked List](data_structures/linked_list/circular_linked_list.py) + * [Deque Doubly](data_structures/linked_list/deque_doubly.py) + * [Doubly Linked List](data_structures/linked_list/doubly_linked_list.py) + * [Doubly Linked List Two](data_structures/linked_list/doubly_linked_list_two.py) + * [From Sequence](data_structures/linked_list/from_sequence.py) + * [Has Loop](data_structures/linked_list/has_loop.py) + * [Is Palindrome](data_structures/linked_list/is_palindrome.py) + * [Merge Two Lists](data_structures/linked_list/merge_two_lists.py) + * [Middle Element Of Linked List](data_structures/linked_list/middle_element_of_linked_list.py) + * [Print Reverse](data_structures/linked_list/print_reverse.py) + * [Singly Linked List](data_structures/linked_list/singly_linked_list.py) + * [Skip List](data_structures/linked_list/skip_list.py) + * [Swap Nodes](data_structures/linked_list/swap_nodes.py) * Queue - * [Circular Queue](https://github.com/TheAlgorithms/Python/blob/master/data_structures/queue/circular_queue.py) - * [Circular Queue Linked List](https://github.com/TheAlgorithms/Python/blob/master/data_structures/queue/circular_queue_linked_list.py) - * [Double Ended Queue](https://github.com/TheAlgorithms/Python/blob/master/data_structures/queue/double_ended_queue.py) - * [Linked Queue](https://github.com/TheAlgorithms/Python/blob/master/data_structures/queue/linked_queue.py) - * [Priority Queue Using List](https://github.com/TheAlgorithms/Python/blob/master/data_structures/queue/priority_queue_using_list.py) - * [Queue On List](https://github.com/TheAlgorithms/Python/blob/master/data_structures/queue/queue_on_list.py) - * [Queue On Pseudo Stack](https://github.com/TheAlgorithms/Python/blob/master/data_structures/queue/queue_on_pseudo_stack.py) + * [Circular Queue](data_structures/queue/circular_queue.py) + * [Circular Queue Linked List](data_structures/queue/circular_queue_linked_list.py) + * [Double Ended Queue](data_structures/queue/double_ended_queue.py) + * [Linked Queue](data_structures/queue/linked_queue.py) + * [Priority Queue Using List](data_structures/queue/priority_queue_using_list.py) + * [Queue On List](data_structures/queue/queue_on_list.py) + * [Queue On Pseudo Stack](data_structures/queue/queue_on_pseudo_stack.py) * Stacks - * [Balanced Parentheses](https://github.com/TheAlgorithms/Python/blob/master/data_structures/stacks/balanced_parentheses.py) - * [Dijkstras Two Stack Algorithm](https://github.com/TheAlgorithms/Python/blob/master/data_structures/stacks/dijkstras_two_stack_algorithm.py) - * [Evaluate Postfix Notations](https://github.com/TheAlgorithms/Python/blob/master/data_structures/stacks/evaluate_postfix_notations.py) - * [Infix To Postfix Conversion](https://github.com/TheAlgorithms/Python/blob/master/data_structures/stacks/infix_to_postfix_conversion.py) - * [Infix To Prefix Conversion](https://github.com/TheAlgorithms/Python/blob/master/data_structures/stacks/infix_to_prefix_conversion.py) - * [Next Greater Element](https://github.com/TheAlgorithms/Python/blob/master/data_structures/stacks/next_greater_element.py) - * [Postfix Evaluation](https://github.com/TheAlgorithms/Python/blob/master/data_structures/stacks/postfix_evaluation.py) - * [Prefix Evaluation](https://github.com/TheAlgorithms/Python/blob/master/data_structures/stacks/prefix_evaluation.py) - * [Stack](https://github.com/TheAlgorithms/Python/blob/master/data_structures/stacks/stack.py) - * [Stack With Doubly Linked List](https://github.com/TheAlgorithms/Python/blob/master/data_structures/stacks/stack_with_doubly_linked_list.py) - * [Stack With Singly Linked List](https://github.com/TheAlgorithms/Python/blob/master/data_structures/stacks/stack_with_singly_linked_list.py) - * [Stock Span Problem](https://github.com/TheAlgorithms/Python/blob/master/data_structures/stacks/stock_span_problem.py) + * [Balanced Parentheses](data_structures/stacks/balanced_parentheses.py) + * [Dijkstras Two Stack Algorithm](data_structures/stacks/dijkstras_two_stack_algorithm.py) + * [Evaluate Postfix Notations](data_structures/stacks/evaluate_postfix_notations.py) + * [Infix To Postfix Conversion](data_structures/stacks/infix_to_postfix_conversion.py) + * [Infix To Prefix Conversion](data_structures/stacks/infix_to_prefix_conversion.py) + * [Next Greater Element](data_structures/stacks/next_greater_element.py) + * [Postfix Evaluation](data_structures/stacks/postfix_evaluation.py) + * [Prefix Evaluation](data_structures/stacks/prefix_evaluation.py) + * [Stack](data_structures/stacks/stack.py) + * [Stack With Doubly Linked List](data_structures/stacks/stack_with_doubly_linked_list.py) + * [Stack With Singly Linked List](data_structures/stacks/stack_with_singly_linked_list.py) + * [Stock Span Problem](data_structures/stacks/stock_span_problem.py) * Trie - * [Trie](https://github.com/TheAlgorithms/Python/blob/master/data_structures/trie/trie.py) + * [Trie](data_structures/trie/trie.py) ## Digital Image Processing - * [Change Brightness](https://github.com/TheAlgorithms/Python/blob/master/digital_image_processing/change_brightness.py) - * [Change Contrast](https://github.com/TheAlgorithms/Python/blob/master/digital_image_processing/change_contrast.py) - * [Convert To Negative](https://github.com/TheAlgorithms/Python/blob/master/digital_image_processing/convert_to_negative.py) + * [Change Brightness](digital_image_processing/change_brightness.py) + * [Change Contrast](digital_image_processing/change_contrast.py) + * [Convert To Negative](digital_image_processing/convert_to_negative.py) * Dithering - * [Burkes](https://github.com/TheAlgorithms/Python/blob/master/digital_image_processing/dithering/burkes.py) + * [Burkes](digital_image_processing/dithering/burkes.py) * Edge Detection - * [Canny](https://github.com/TheAlgorithms/Python/blob/master/digital_image_processing/edge_detection/canny.py) + * [Canny](digital_image_processing/edge_detection/canny.py) * Filters - * [Bilateral Filter](https://github.com/TheAlgorithms/Python/blob/master/digital_image_processing/filters/bilateral_filter.py) - * [Convolve](https://github.com/TheAlgorithms/Python/blob/master/digital_image_processing/filters/convolve.py) - * [Gabor Filter](https://github.com/TheAlgorithms/Python/blob/master/digital_image_processing/filters/gabor_filter.py) - * [Gaussian Filter](https://github.com/TheAlgorithms/Python/blob/master/digital_image_processing/filters/gaussian_filter.py) - * [Median Filter](https://github.com/TheAlgorithms/Python/blob/master/digital_image_processing/filters/median_filter.py) - * [Sobel Filter](https://github.com/TheAlgorithms/Python/blob/master/digital_image_processing/filters/sobel_filter.py) + * [Bilateral Filter](digital_image_processing/filters/bilateral_filter.py) + * [Convolve](digital_image_processing/filters/convolve.py) + * [Gabor Filter](digital_image_processing/filters/gabor_filter.py) + * [Gaussian Filter](digital_image_processing/filters/gaussian_filter.py) + * [Median Filter](digital_image_processing/filters/median_filter.py) + * [Sobel Filter](digital_image_processing/filters/sobel_filter.py) * Histogram Equalization - * [Histogram Stretch](https://github.com/TheAlgorithms/Python/blob/master/digital_image_processing/histogram_equalization/histogram_stretch.py) - * [Index Calculation](https://github.com/TheAlgorithms/Python/blob/master/digital_image_processing/index_calculation.py) + * [Histogram Stretch](digital_image_processing/histogram_equalization/histogram_stretch.py) + * [Index Calculation](digital_image_processing/index_calculation.py) * Morphological Operations - * [Dilation Operation](https://github.com/TheAlgorithms/Python/blob/master/digital_image_processing/morphological_operations/dilation_operation.py) - * [Erosion Operation](https://github.com/TheAlgorithms/Python/blob/master/digital_image_processing/morphological_operations/erosion_operation.py) + * [Dilation Operation](digital_image_processing/morphological_operations/dilation_operation.py) + * [Erosion Operation](digital_image_processing/morphological_operations/erosion_operation.py) * Resize - * [Resize](https://github.com/TheAlgorithms/Python/blob/master/digital_image_processing/resize/resize.py) + * [Resize](digital_image_processing/resize/resize.py) * Rotation - * [Rotation](https://github.com/TheAlgorithms/Python/blob/master/digital_image_processing/rotation/rotation.py) - * [Sepia](https://github.com/TheAlgorithms/Python/blob/master/digital_image_processing/sepia.py) - * [Test Digital Image Processing](https://github.com/TheAlgorithms/Python/blob/master/digital_image_processing/test_digital_image_processing.py) + * [Rotation](digital_image_processing/rotation/rotation.py) + * [Sepia](digital_image_processing/sepia.py) + * [Test Digital Image Processing](digital_image_processing/test_digital_image_processing.py) ## Divide And Conquer - * [Closest Pair Of Points](https://github.com/TheAlgorithms/Python/blob/master/divide_and_conquer/closest_pair_of_points.py) - * [Convex Hull](https://github.com/TheAlgorithms/Python/blob/master/divide_and_conquer/convex_hull.py) - * [Heaps Algorithm](https://github.com/TheAlgorithms/Python/blob/master/divide_and_conquer/heaps_algorithm.py) - * [Heaps Algorithm Iterative](https://github.com/TheAlgorithms/Python/blob/master/divide_and_conquer/heaps_algorithm_iterative.py) - * [Inversions](https://github.com/TheAlgorithms/Python/blob/master/divide_and_conquer/inversions.py) - * [Kth Order Statistic](https://github.com/TheAlgorithms/Python/blob/master/divide_and_conquer/kth_order_statistic.py) - * [Max Difference Pair](https://github.com/TheAlgorithms/Python/blob/master/divide_and_conquer/max_difference_pair.py) - * [Max Subarray Sum](https://github.com/TheAlgorithms/Python/blob/master/divide_and_conquer/max_subarray_sum.py) - * [Mergesort](https://github.com/TheAlgorithms/Python/blob/master/divide_and_conquer/mergesort.py) - * [Peak](https://github.com/TheAlgorithms/Python/blob/master/divide_and_conquer/peak.py) - * [Power](https://github.com/TheAlgorithms/Python/blob/master/divide_and_conquer/power.py) - * [Strassen Matrix Multiplication](https://github.com/TheAlgorithms/Python/blob/master/divide_and_conquer/strassen_matrix_multiplication.py) + * [Closest Pair Of Points](divide_and_conquer/closest_pair_of_points.py) + * [Convex Hull](divide_and_conquer/convex_hull.py) + * [Heaps Algorithm](divide_and_conquer/heaps_algorithm.py) + * [Heaps Algorithm Iterative](divide_and_conquer/heaps_algorithm_iterative.py) + * [Inversions](divide_and_conquer/inversions.py) + * [Kth Order Statistic](divide_and_conquer/kth_order_statistic.py) + * [Max Difference Pair](divide_and_conquer/max_difference_pair.py) + * [Max Subarray Sum](divide_and_conquer/max_subarray_sum.py) + * [Mergesort](divide_and_conquer/mergesort.py) + * [Peak](divide_and_conquer/peak.py) + * [Power](divide_and_conquer/power.py) + * [Strassen Matrix Multiplication](divide_and_conquer/strassen_matrix_multiplication.py) ## Dynamic Programming - * [Abbreviation](https://github.com/TheAlgorithms/Python/blob/master/dynamic_programming/abbreviation.py) - * [All Construct](https://github.com/TheAlgorithms/Python/blob/master/dynamic_programming/all_construct.py) - * [Bitmask](https://github.com/TheAlgorithms/Python/blob/master/dynamic_programming/bitmask.py) - * [Catalan Numbers](https://github.com/TheAlgorithms/Python/blob/master/dynamic_programming/catalan_numbers.py) - * [Climbing Stairs](https://github.com/TheAlgorithms/Python/blob/master/dynamic_programming/climbing_stairs.py) - * [Edit Distance](https://github.com/TheAlgorithms/Python/blob/master/dynamic_programming/edit_distance.py) - * [Factorial](https://github.com/TheAlgorithms/Python/blob/master/dynamic_programming/factorial.py) - * [Fast Fibonacci](https://github.com/TheAlgorithms/Python/blob/master/dynamic_programming/fast_fibonacci.py) - * [Fibonacci](https://github.com/TheAlgorithms/Python/blob/master/dynamic_programming/fibonacci.py) - * [Floyd Warshall](https://github.com/TheAlgorithms/Python/blob/master/dynamic_programming/floyd_warshall.py) - * [Fractional Knapsack](https://github.com/TheAlgorithms/Python/blob/master/dynamic_programming/fractional_knapsack.py) - * [Fractional Knapsack 2](https://github.com/TheAlgorithms/Python/blob/master/dynamic_programming/fractional_knapsack_2.py) - * [Integer Partition](https://github.com/TheAlgorithms/Python/blob/master/dynamic_programming/integer_partition.py) - * [Iterating Through Submasks](https://github.com/TheAlgorithms/Python/blob/master/dynamic_programming/iterating_through_submasks.py) - * [Knapsack](https://github.com/TheAlgorithms/Python/blob/master/dynamic_programming/knapsack.py) - * [Longest Common Subsequence](https://github.com/TheAlgorithms/Python/blob/master/dynamic_programming/longest_common_subsequence.py) - * [Longest Increasing Subsequence](https://github.com/TheAlgorithms/Python/blob/master/dynamic_programming/longest_increasing_subsequence.py) - * [Longest Increasing Subsequence O(Nlogn)](https://github.com/TheAlgorithms/Python/blob/master/dynamic_programming/longest_increasing_subsequence_o(nlogn).py) - * [Longest Sub Array](https://github.com/TheAlgorithms/Python/blob/master/dynamic_programming/longest_sub_array.py) - * [Matrix Chain Order](https://github.com/TheAlgorithms/Python/blob/master/dynamic_programming/matrix_chain_order.py) - * [Max Non Adjacent Sum](https://github.com/TheAlgorithms/Python/blob/master/dynamic_programming/max_non_adjacent_sum.py) - * [Max Sub Array](https://github.com/TheAlgorithms/Python/blob/master/dynamic_programming/max_sub_array.py) - * [Max Sum Contiguous Subsequence](https://github.com/TheAlgorithms/Python/blob/master/dynamic_programming/max_sum_contiguous_subsequence.py) - * [Minimum Coin Change](https://github.com/TheAlgorithms/Python/blob/master/dynamic_programming/minimum_coin_change.py) - * [Minimum Cost Path](https://github.com/TheAlgorithms/Python/blob/master/dynamic_programming/minimum_cost_path.py) - * [Minimum Partition](https://github.com/TheAlgorithms/Python/blob/master/dynamic_programming/minimum_partition.py) - * [Minimum Steps To One](https://github.com/TheAlgorithms/Python/blob/master/dynamic_programming/minimum_steps_to_one.py) - * [Optimal Binary Search Tree](https://github.com/TheAlgorithms/Python/blob/master/dynamic_programming/optimal_binary_search_tree.py) - * [Rod Cutting](https://github.com/TheAlgorithms/Python/blob/master/dynamic_programming/rod_cutting.py) - * [Subset Generation](https://github.com/TheAlgorithms/Python/blob/master/dynamic_programming/subset_generation.py) - * [Sum Of Subset](https://github.com/TheAlgorithms/Python/blob/master/dynamic_programming/sum_of_subset.py) + * [Abbreviation](dynamic_programming/abbreviation.py) + * [All Construct](dynamic_programming/all_construct.py) + * [Bitmask](dynamic_programming/bitmask.py) + * [Catalan Numbers](dynamic_programming/catalan_numbers.py) + * [Climbing Stairs](dynamic_programming/climbing_stairs.py) + * [Edit Distance](dynamic_programming/edit_distance.py) + * [Factorial](dynamic_programming/factorial.py) + * [Fast Fibonacci](dynamic_programming/fast_fibonacci.py) + * [Fibonacci](dynamic_programming/fibonacci.py) + * [Floyd Warshall](dynamic_programming/floyd_warshall.py) + * [Fractional Knapsack](dynamic_programming/fractional_knapsack.py) + * [Fractional Knapsack 2](dynamic_programming/fractional_knapsack_2.py) + * [Integer Partition](dynamic_programming/integer_partition.py) + * [Iterating Through Submasks](dynamic_programming/iterating_through_submasks.py) + * [Knapsack](dynamic_programming/knapsack.py) + * [Longest Common Subsequence](dynamic_programming/longest_common_subsequence.py) + * [Longest Increasing Subsequence](dynamic_programming/longest_increasing_subsequence.py) + * [Longest Increasing Subsequence O(Nlogn)](dynamic_programming/longest_increasing_subsequence_o(nlogn).py) + * [Longest Sub Array](dynamic_programming/longest_sub_array.py) + * [Matrix Chain Order](dynamic_programming/matrix_chain_order.py) + * [Max Non Adjacent Sum](dynamic_programming/max_non_adjacent_sum.py) + * [Max Sub Array](dynamic_programming/max_sub_array.py) + * [Max Sum Contiguous Subsequence](dynamic_programming/max_sum_contiguous_subsequence.py) + * [Minimum Coin Change](dynamic_programming/minimum_coin_change.py) + * [Minimum Cost Path](dynamic_programming/minimum_cost_path.py) + * [Minimum Partition](dynamic_programming/minimum_partition.py) + * [Minimum Steps To One](dynamic_programming/minimum_steps_to_one.py) + * [Optimal Binary Search Tree](dynamic_programming/optimal_binary_search_tree.py) + * [Rod Cutting](dynamic_programming/rod_cutting.py) + * [Subset Generation](dynamic_programming/subset_generation.py) + * [Sum Of Subset](dynamic_programming/sum_of_subset.py) ## Electronics - * [Carrier Concentration](https://github.com/TheAlgorithms/Python/blob/master/electronics/carrier_concentration.py) - * [Coulombs Law](https://github.com/TheAlgorithms/Python/blob/master/electronics/coulombs_law.py) - * [Electric Power](https://github.com/TheAlgorithms/Python/blob/master/electronics/electric_power.py) - * [Ohms Law](https://github.com/TheAlgorithms/Python/blob/master/electronics/ohms_law.py) + * [Carrier Concentration](electronics/carrier_concentration.py) + * [Coulombs Law](electronics/coulombs_law.py) + * [Electric Power](electronics/electric_power.py) + * [Ohms Law](electronics/ohms_law.py) ## File Transfer - * [Receive File](https://github.com/TheAlgorithms/Python/blob/master/file_transfer/receive_file.py) - * [Send File](https://github.com/TheAlgorithms/Python/blob/master/file_transfer/send_file.py) + * [Receive File](file_transfer/receive_file.py) + * [Send File](file_transfer/send_file.py) * Tests - * [Test Send File](https://github.com/TheAlgorithms/Python/blob/master/file_transfer/tests/test_send_file.py) + * [Test Send File](file_transfer/tests/test_send_file.py) ## Financial - * [Equated Monthly Installments](https://github.com/TheAlgorithms/Python/blob/master/financial/equated_monthly_installments.py) - * [Interest](https://github.com/TheAlgorithms/Python/blob/master/financial/interest.py) + * [Equated Monthly Installments](financial/equated_monthly_installments.py) + * [Interest](financial/interest.py) ## Fractals - * [Julia Sets](https://github.com/TheAlgorithms/Python/blob/master/fractals/julia_sets.py) - * [Koch Snowflake](https://github.com/TheAlgorithms/Python/blob/master/fractals/koch_snowflake.py) - * [Mandelbrot](https://github.com/TheAlgorithms/Python/blob/master/fractals/mandelbrot.py) - * [Sierpinski Triangle](https://github.com/TheAlgorithms/Python/blob/master/fractals/sierpinski_triangle.py) + * [Julia Sets](fractals/julia_sets.py) + * [Koch Snowflake](fractals/koch_snowflake.py) + * [Mandelbrot](fractals/mandelbrot.py) + * [Sierpinski Triangle](fractals/sierpinski_triangle.py) ## Fuzzy Logic - * [Fuzzy Operations](https://github.com/TheAlgorithms/Python/blob/master/fuzzy_logic/fuzzy_operations.py) + * [Fuzzy Operations](fuzzy_logic/fuzzy_operations.py) ## Genetic Algorithm - * [Basic String](https://github.com/TheAlgorithms/Python/blob/master/genetic_algorithm/basic_string.py) + * [Basic String](genetic_algorithm/basic_string.py) ## Geodesy - * [Haversine Distance](https://github.com/TheAlgorithms/Python/blob/master/geodesy/haversine_distance.py) - * [Lamberts Ellipsoidal Distance](https://github.com/TheAlgorithms/Python/blob/master/geodesy/lamberts_ellipsoidal_distance.py) + * [Haversine Distance](geodesy/haversine_distance.py) + * [Lamberts Ellipsoidal Distance](geodesy/lamberts_ellipsoidal_distance.py) ## Graphics - * [Bezier Curve](https://github.com/TheAlgorithms/Python/blob/master/graphics/bezier_curve.py) - * [Vector3 For 2D Rendering](https://github.com/TheAlgorithms/Python/blob/master/graphics/vector3_for_2d_rendering.py) + * [Bezier Curve](graphics/bezier_curve.py) + * [Vector3 For 2D Rendering](graphics/vector3_for_2d_rendering.py) ## Graphs - * [A Star](https://github.com/TheAlgorithms/Python/blob/master/graphs/a_star.py) - * [Articulation Points](https://github.com/TheAlgorithms/Python/blob/master/graphs/articulation_points.py) - * [Basic Graphs](https://github.com/TheAlgorithms/Python/blob/master/graphs/basic_graphs.py) - * [Bellman Ford](https://github.com/TheAlgorithms/Python/blob/master/graphs/bellman_ford.py) - * [Bfs Shortest Path](https://github.com/TheAlgorithms/Python/blob/master/graphs/bfs_shortest_path.py) - * [Bfs Zero One Shortest Path](https://github.com/TheAlgorithms/Python/blob/master/graphs/bfs_zero_one_shortest_path.py) - * [Bidirectional A Star](https://github.com/TheAlgorithms/Python/blob/master/graphs/bidirectional_a_star.py) - * [Bidirectional Breadth First Search](https://github.com/TheAlgorithms/Python/blob/master/graphs/bidirectional_breadth_first_search.py) - * [Boruvka](https://github.com/TheAlgorithms/Python/blob/master/graphs/boruvka.py) - * [Breadth First Search](https://github.com/TheAlgorithms/Python/blob/master/graphs/breadth_first_search.py) - * [Breadth First Search 2](https://github.com/TheAlgorithms/Python/blob/master/graphs/breadth_first_search_2.py) - * [Breadth First Search Shortest Path](https://github.com/TheAlgorithms/Python/blob/master/graphs/breadth_first_search_shortest_path.py) - * [Check Bipartite Graph Bfs](https://github.com/TheAlgorithms/Python/blob/master/graphs/check_bipartite_graph_bfs.py) - * [Check Bipartite Graph Dfs](https://github.com/TheAlgorithms/Python/blob/master/graphs/check_bipartite_graph_dfs.py) - * [Check Cycle](https://github.com/TheAlgorithms/Python/blob/master/graphs/check_cycle.py) - * [Connected Components](https://github.com/TheAlgorithms/Python/blob/master/graphs/connected_components.py) - * [Depth First Search](https://github.com/TheAlgorithms/Python/blob/master/graphs/depth_first_search.py) - * [Depth First Search 2](https://github.com/TheAlgorithms/Python/blob/master/graphs/depth_first_search_2.py) - * [Dijkstra](https://github.com/TheAlgorithms/Python/blob/master/graphs/dijkstra.py) - * [Dijkstra 2](https://github.com/TheAlgorithms/Python/blob/master/graphs/dijkstra_2.py) - * [Dijkstra Algorithm](https://github.com/TheAlgorithms/Python/blob/master/graphs/dijkstra_algorithm.py) - * [Dinic](https://github.com/TheAlgorithms/Python/blob/master/graphs/dinic.py) - * [Directed And Undirected (Weighted) Graph](https://github.com/TheAlgorithms/Python/blob/master/graphs/directed_and_undirected_(weighted)_graph.py) - * [Edmonds Karp Multiple Source And Sink](https://github.com/TheAlgorithms/Python/blob/master/graphs/edmonds_karp_multiple_source_and_sink.py) - * [Eulerian Path And Circuit For Undirected Graph](https://github.com/TheAlgorithms/Python/blob/master/graphs/eulerian_path_and_circuit_for_undirected_graph.py) - * [Even Tree](https://github.com/TheAlgorithms/Python/blob/master/graphs/even_tree.py) - * [Finding Bridges](https://github.com/TheAlgorithms/Python/blob/master/graphs/finding_bridges.py) - * [Frequent Pattern Graph Miner](https://github.com/TheAlgorithms/Python/blob/master/graphs/frequent_pattern_graph_miner.py) - * [G Topological Sort](https://github.com/TheAlgorithms/Python/blob/master/graphs/g_topological_sort.py) - * [Gale Shapley Bigraph](https://github.com/TheAlgorithms/Python/blob/master/graphs/gale_shapley_bigraph.py) - * [Graph List](https://github.com/TheAlgorithms/Python/blob/master/graphs/graph_list.py) - * [Graph Matrix](https://github.com/TheAlgorithms/Python/blob/master/graphs/graph_matrix.py) - * [Graphs Floyd Warshall](https://github.com/TheAlgorithms/Python/blob/master/graphs/graphs_floyd_warshall.py) - * [Greedy Best First](https://github.com/TheAlgorithms/Python/blob/master/graphs/greedy_best_first.py) - * [Greedy Min Vertex Cover](https://github.com/TheAlgorithms/Python/blob/master/graphs/greedy_min_vertex_cover.py) - * [Kahns Algorithm Long](https://github.com/TheAlgorithms/Python/blob/master/graphs/kahns_algorithm_long.py) - * [Kahns Algorithm Topo](https://github.com/TheAlgorithms/Python/blob/master/graphs/kahns_algorithm_topo.py) - * [Karger](https://github.com/TheAlgorithms/Python/blob/master/graphs/karger.py) - * [Markov Chain](https://github.com/TheAlgorithms/Python/blob/master/graphs/markov_chain.py) - * [Matching Min Vertex Cover](https://github.com/TheAlgorithms/Python/blob/master/graphs/matching_min_vertex_cover.py) - * [Minimum Spanning Tree Boruvka](https://github.com/TheAlgorithms/Python/blob/master/graphs/minimum_spanning_tree_boruvka.py) - * [Minimum Spanning Tree Kruskal](https://github.com/TheAlgorithms/Python/blob/master/graphs/minimum_spanning_tree_kruskal.py) - * [Minimum Spanning Tree Kruskal2](https://github.com/TheAlgorithms/Python/blob/master/graphs/minimum_spanning_tree_kruskal2.py) - * [Minimum Spanning Tree Prims](https://github.com/TheAlgorithms/Python/blob/master/graphs/minimum_spanning_tree_prims.py) - * [Minimum Spanning Tree Prims2](https://github.com/TheAlgorithms/Python/blob/master/graphs/minimum_spanning_tree_prims2.py) - * [Multi Heuristic Astar](https://github.com/TheAlgorithms/Python/blob/master/graphs/multi_heuristic_astar.py) - * [Page Rank](https://github.com/TheAlgorithms/Python/blob/master/graphs/page_rank.py) - * [Prim](https://github.com/TheAlgorithms/Python/blob/master/graphs/prim.py) - * [Random Graph Generator](https://github.com/TheAlgorithms/Python/blob/master/graphs/random_graph_generator.py) - * [Scc Kosaraju](https://github.com/TheAlgorithms/Python/blob/master/graphs/scc_kosaraju.py) - * [Strongly Connected Components](https://github.com/TheAlgorithms/Python/blob/master/graphs/strongly_connected_components.py) - * [Tarjans Scc](https://github.com/TheAlgorithms/Python/blob/master/graphs/tarjans_scc.py) + * [A Star](graphs/a_star.py) + * [Articulation Points](graphs/articulation_points.py) + * [Basic Graphs](graphs/basic_graphs.py) + * [Bellman Ford](graphs/bellman_ford.py) + * [Bfs Shortest Path](graphs/bfs_shortest_path.py) + * [Bfs Zero One Shortest Path](graphs/bfs_zero_one_shortest_path.py) + * [Bidirectional A Star](graphs/bidirectional_a_star.py) + * [Bidirectional Breadth First Search](graphs/bidirectional_breadth_first_search.py) + * [Boruvka](graphs/boruvka.py) + * [Breadth First Search](graphs/breadth_first_search.py) + * [Breadth First Search 2](graphs/breadth_first_search_2.py) + * [Breadth First Search Shortest Path](graphs/breadth_first_search_shortest_path.py) + * [Check Bipartite Graph Bfs](graphs/check_bipartite_graph_bfs.py) + * [Check Bipartite Graph Dfs](graphs/check_bipartite_graph_dfs.py) + * [Check Cycle](graphs/check_cycle.py) + * [Connected Components](graphs/connected_components.py) + * [Depth First Search](graphs/depth_first_search.py) + * [Depth First Search 2](graphs/depth_first_search_2.py) + * [Dijkstra](graphs/dijkstra.py) + * [Dijkstra 2](graphs/dijkstra_2.py) + * [Dijkstra Algorithm](graphs/dijkstra_algorithm.py) + * [Dinic](graphs/dinic.py) + * [Directed And Undirected (Weighted) Graph](graphs/directed_and_undirected_(weighted)_graph.py) + * [Edmonds Karp Multiple Source And Sink](graphs/edmonds_karp_multiple_source_and_sink.py) + * [Eulerian Path And Circuit For Undirected Graph](graphs/eulerian_path_and_circuit_for_undirected_graph.py) + * [Even Tree](graphs/even_tree.py) + * [Finding Bridges](graphs/finding_bridges.py) + * [Frequent Pattern Graph Miner](graphs/frequent_pattern_graph_miner.py) + * [G Topological Sort](graphs/g_topological_sort.py) + * [Gale Shapley Bigraph](graphs/gale_shapley_bigraph.py) + * [Graph List](graphs/graph_list.py) + * [Graph Matrix](graphs/graph_matrix.py) + * [Graphs Floyd Warshall](graphs/graphs_floyd_warshall.py) + * [Greedy Best First](graphs/greedy_best_first.py) + * [Greedy Min Vertex Cover](graphs/greedy_min_vertex_cover.py) + * [Kahns Algorithm Long](graphs/kahns_algorithm_long.py) + * [Kahns Algorithm Topo](graphs/kahns_algorithm_topo.py) + * [Karger](graphs/karger.py) + * [Markov Chain](graphs/markov_chain.py) + * [Matching Min Vertex Cover](graphs/matching_min_vertex_cover.py) + * [Minimum Path Sum](graphs/minimum_path_sum.py) + * [Minimum Spanning Tree Boruvka](graphs/minimum_spanning_tree_boruvka.py) + * [Minimum Spanning Tree Kruskal](graphs/minimum_spanning_tree_kruskal.py) + * [Minimum Spanning Tree Kruskal2](graphs/minimum_spanning_tree_kruskal2.py) + * [Minimum Spanning Tree Prims](graphs/minimum_spanning_tree_prims.py) + * [Minimum Spanning Tree Prims2](graphs/minimum_spanning_tree_prims2.py) + * [Multi Heuristic Astar](graphs/multi_heuristic_astar.py) + * [Page Rank](graphs/page_rank.py) + * [Prim](graphs/prim.py) + * [Random Graph Generator](graphs/random_graph_generator.py) + * [Scc Kosaraju](graphs/scc_kosaraju.py) + * [Strongly Connected Components](graphs/strongly_connected_components.py) + * [Tarjans Scc](graphs/tarjans_scc.py) * Tests - * [Test Min Spanning Tree Kruskal](https://github.com/TheAlgorithms/Python/blob/master/graphs/tests/test_min_spanning_tree_kruskal.py) - * [Test Min Spanning Tree Prim](https://github.com/TheAlgorithms/Python/blob/master/graphs/tests/test_min_spanning_tree_prim.py) + * [Test Min Spanning Tree Kruskal](graphs/tests/test_min_spanning_tree_kruskal.py) + * [Test Min Spanning Tree Prim](graphs/tests/test_min_spanning_tree_prim.py) ## Greedy Methods - * [Optimal Merge Pattern](https://github.com/TheAlgorithms/Python/blob/master/greedy_methods/optimal_merge_pattern.py) + * [Optimal Merge Pattern](greedy_methods/optimal_merge_pattern.py) ## Hashes - * [Adler32](https://github.com/TheAlgorithms/Python/blob/master/hashes/adler32.py) - * [Chaos Machine](https://github.com/TheAlgorithms/Python/blob/master/hashes/chaos_machine.py) - * [Djb2](https://github.com/TheAlgorithms/Python/blob/master/hashes/djb2.py) - * [Enigma Machine](https://github.com/TheAlgorithms/Python/blob/master/hashes/enigma_machine.py) - * [Hamming Code](https://github.com/TheAlgorithms/Python/blob/master/hashes/hamming_code.py) - * [Luhn](https://github.com/TheAlgorithms/Python/blob/master/hashes/luhn.py) - * [Md5](https://github.com/TheAlgorithms/Python/blob/master/hashes/md5.py) - * [Sdbm](https://github.com/TheAlgorithms/Python/blob/master/hashes/sdbm.py) - * [Sha1](https://github.com/TheAlgorithms/Python/blob/master/hashes/sha1.py) - * [Sha256](https://github.com/TheAlgorithms/Python/blob/master/hashes/sha256.py) + * [Adler32](hashes/adler32.py) + * [Chaos Machine](hashes/chaos_machine.py) + * [Djb2](hashes/djb2.py) + * [Enigma Machine](hashes/enigma_machine.py) + * [Hamming Code](hashes/hamming_code.py) + * [Luhn](hashes/luhn.py) + * [Md5](hashes/md5.py) + * [Sdbm](hashes/sdbm.py) + * [Sha1](hashes/sha1.py) + * [Sha256](hashes/sha256.py) ## Knapsack - * [Greedy Knapsack](https://github.com/TheAlgorithms/Python/blob/master/knapsack/greedy_knapsack.py) - * [Knapsack](https://github.com/TheAlgorithms/Python/blob/master/knapsack/knapsack.py) + * [Greedy Knapsack](knapsack/greedy_knapsack.py) + * [Knapsack](knapsack/knapsack.py) * Tests - * [Test Greedy Knapsack](https://github.com/TheAlgorithms/Python/blob/master/knapsack/tests/test_greedy_knapsack.py) - * [Test Knapsack](https://github.com/TheAlgorithms/Python/blob/master/knapsack/tests/test_knapsack.py) + * [Test Greedy Knapsack](knapsack/tests/test_greedy_knapsack.py) + * [Test Knapsack](knapsack/tests/test_knapsack.py) ## Linear Algebra * Src - * [Conjugate Gradient](https://github.com/TheAlgorithms/Python/blob/master/linear_algebra/src/conjugate_gradient.py) - * [Lib](https://github.com/TheAlgorithms/Python/blob/master/linear_algebra/src/lib.py) - * [Polynom For Points](https://github.com/TheAlgorithms/Python/blob/master/linear_algebra/src/polynom_for_points.py) - * [Power Iteration](https://github.com/TheAlgorithms/Python/blob/master/linear_algebra/src/power_iteration.py) - * [Rayleigh Quotient](https://github.com/TheAlgorithms/Python/blob/master/linear_algebra/src/rayleigh_quotient.py) - * [Schur Complement](https://github.com/TheAlgorithms/Python/blob/master/linear_algebra/src/schur_complement.py) - * [Test Linear Algebra](https://github.com/TheAlgorithms/Python/blob/master/linear_algebra/src/test_linear_algebra.py) - * [Transformations 2D](https://github.com/TheAlgorithms/Python/blob/master/linear_algebra/src/transformations_2d.py) + * [Conjugate Gradient](linear_algebra/src/conjugate_gradient.py) + * [Lib](linear_algebra/src/lib.py) + * [Polynom For Points](linear_algebra/src/polynom_for_points.py) + * [Power Iteration](linear_algebra/src/power_iteration.py) + * [Rayleigh Quotient](linear_algebra/src/rayleigh_quotient.py) + * [Schur Complement](linear_algebra/src/schur_complement.py) + * [Test Linear Algebra](linear_algebra/src/test_linear_algebra.py) + * [Transformations 2D](linear_algebra/src/transformations_2d.py) ## Machine Learning - * [Astar](https://github.com/TheAlgorithms/Python/blob/master/machine_learning/astar.py) - * [Data Transformations](https://github.com/TheAlgorithms/Python/blob/master/machine_learning/data_transformations.py) - * [Decision Tree](https://github.com/TheAlgorithms/Python/blob/master/machine_learning/decision_tree.py) + * [Astar](machine_learning/astar.py) + * [Data Transformations](machine_learning/data_transformations.py) + * [Decision Tree](machine_learning/decision_tree.py) * Forecasting - * [Run](https://github.com/TheAlgorithms/Python/blob/master/machine_learning/forecasting/run.py) - * [Gaussian Naive Bayes](https://github.com/TheAlgorithms/Python/blob/master/machine_learning/gaussian_naive_bayes.py) - * [Gradient Boosting Regressor](https://github.com/TheAlgorithms/Python/blob/master/machine_learning/gradient_boosting_regressor.py) - * [Gradient Descent](https://github.com/TheAlgorithms/Python/blob/master/machine_learning/gradient_descent.py) - * [K Means Clust](https://github.com/TheAlgorithms/Python/blob/master/machine_learning/k_means_clust.py) - * [K Nearest Neighbours](https://github.com/TheAlgorithms/Python/blob/master/machine_learning/k_nearest_neighbours.py) - * [Knn Sklearn](https://github.com/TheAlgorithms/Python/blob/master/machine_learning/knn_sklearn.py) - * [Linear Discriminant Analysis](https://github.com/TheAlgorithms/Python/blob/master/machine_learning/linear_discriminant_analysis.py) - * [Linear Regression](https://github.com/TheAlgorithms/Python/blob/master/machine_learning/linear_regression.py) + * [Run](machine_learning/forecasting/run.py) + * [Gaussian Naive Bayes](machine_learning/gaussian_naive_bayes.py) + * [Gradient Boosting Regressor](machine_learning/gradient_boosting_regressor.py) + * [Gradient Descent](machine_learning/gradient_descent.py) + * [K Means Clust](machine_learning/k_means_clust.py) + * [K Nearest Neighbours](machine_learning/k_nearest_neighbours.py) + * [Knn Sklearn](machine_learning/knn_sklearn.py) + * [Linear Discriminant Analysis](machine_learning/linear_discriminant_analysis.py) + * [Linear Regression](machine_learning/linear_regression.py) * Local Weighted Learning - * [Local Weighted Learning](https://github.com/TheAlgorithms/Python/blob/master/machine_learning/local_weighted_learning/local_weighted_learning.py) - * [Logistic Regression](https://github.com/TheAlgorithms/Python/blob/master/machine_learning/logistic_regression.py) + * [Local Weighted Learning](machine_learning/local_weighted_learning/local_weighted_learning.py) + * [Logistic Regression](machine_learning/logistic_regression.py) * Lstm - * [Lstm Prediction](https://github.com/TheAlgorithms/Python/blob/master/machine_learning/lstm/lstm_prediction.py) - * [Multilayer Perceptron Classifier](https://github.com/TheAlgorithms/Python/blob/master/machine_learning/multilayer_perceptron_classifier.py) - * [Polymonial Regression](https://github.com/TheAlgorithms/Python/blob/master/machine_learning/polymonial_regression.py) - * [Random Forest Classifier](https://github.com/TheAlgorithms/Python/blob/master/machine_learning/random_forest_classifier.py) - * [Random Forest Regressor](https://github.com/TheAlgorithms/Python/blob/master/machine_learning/random_forest_regressor.py) - * [Scoring Functions](https://github.com/TheAlgorithms/Python/blob/master/machine_learning/scoring_functions.py) - * [Sequential Minimum Optimization](https://github.com/TheAlgorithms/Python/blob/master/machine_learning/sequential_minimum_optimization.py) - * [Similarity Search](https://github.com/TheAlgorithms/Python/blob/master/machine_learning/similarity_search.py) - * [Support Vector Machines](https://github.com/TheAlgorithms/Python/blob/master/machine_learning/support_vector_machines.py) - * [Word Frequency Functions](https://github.com/TheAlgorithms/Python/blob/master/machine_learning/word_frequency_functions.py) + * [Lstm Prediction](machine_learning/lstm/lstm_prediction.py) + * [Multilayer Perceptron Classifier](machine_learning/multilayer_perceptron_classifier.py) + * [Polymonial Regression](machine_learning/polymonial_regression.py) + * [Random Forest Classifier](machine_learning/random_forest_classifier.py) + * [Random Forest Regressor](machine_learning/random_forest_regressor.py) + * [Scoring Functions](machine_learning/scoring_functions.py) + * [Sequential Minimum Optimization](machine_learning/sequential_minimum_optimization.py) + * [Similarity Search](machine_learning/similarity_search.py) + * [Support Vector Machines](machine_learning/support_vector_machines.py) + * [Word Frequency Functions](machine_learning/word_frequency_functions.py) ## Maths - * [3N Plus 1](https://github.com/TheAlgorithms/Python/blob/master/maths/3n_plus_1.py) - * [Abs](https://github.com/TheAlgorithms/Python/blob/master/maths/abs.py) - * [Abs Max](https://github.com/TheAlgorithms/Python/blob/master/maths/abs_max.py) - * [Abs Min](https://github.com/TheAlgorithms/Python/blob/master/maths/abs_min.py) - * [Add](https://github.com/TheAlgorithms/Python/blob/master/maths/add.py) - * [Aliquot Sum](https://github.com/TheAlgorithms/Python/blob/master/maths/aliquot_sum.py) - * [Allocation Number](https://github.com/TheAlgorithms/Python/blob/master/maths/allocation_number.py) - * [Area](https://github.com/TheAlgorithms/Python/blob/master/maths/area.py) - * [Area Under Curve](https://github.com/TheAlgorithms/Python/blob/master/maths/area_under_curve.py) - * [Armstrong Numbers](https://github.com/TheAlgorithms/Python/blob/master/maths/armstrong_numbers.py) - * [Average Absolute Deviation](https://github.com/TheAlgorithms/Python/blob/master/maths/average_absolute_deviation.py) - * [Average Mean](https://github.com/TheAlgorithms/Python/blob/master/maths/average_mean.py) - * [Average Median](https://github.com/TheAlgorithms/Python/blob/master/maths/average_median.py) - * [Average Mode](https://github.com/TheAlgorithms/Python/blob/master/maths/average_mode.py) - * [Bailey Borwein Plouffe](https://github.com/TheAlgorithms/Python/blob/master/maths/bailey_borwein_plouffe.py) - * [Basic Maths](https://github.com/TheAlgorithms/Python/blob/master/maths/basic_maths.py) - * [Binary Exp Mod](https://github.com/TheAlgorithms/Python/blob/master/maths/binary_exp_mod.py) - * [Binary Exponentiation](https://github.com/TheAlgorithms/Python/blob/master/maths/binary_exponentiation.py) - * [Binary Exponentiation 2](https://github.com/TheAlgorithms/Python/blob/master/maths/binary_exponentiation_2.py) - * [Binary Exponentiation 3](https://github.com/TheAlgorithms/Python/blob/master/maths/binary_exponentiation_3.py) - * [Binomial Coefficient](https://github.com/TheAlgorithms/Python/blob/master/maths/binomial_coefficient.py) - * [Binomial Distribution](https://github.com/TheAlgorithms/Python/blob/master/maths/binomial_distribution.py) - * [Bisection](https://github.com/TheAlgorithms/Python/blob/master/maths/bisection.py) - * [Ceil](https://github.com/TheAlgorithms/Python/blob/master/maths/ceil.py) - * [Check Polygon](https://github.com/TheAlgorithms/Python/blob/master/maths/check_polygon.py) - * [Chudnovsky Algorithm](https://github.com/TheAlgorithms/Python/blob/master/maths/chudnovsky_algorithm.py) - * [Collatz Sequence](https://github.com/TheAlgorithms/Python/blob/master/maths/collatz_sequence.py) - * [Combinations](https://github.com/TheAlgorithms/Python/blob/master/maths/combinations.py) - * [Decimal Isolate](https://github.com/TheAlgorithms/Python/blob/master/maths/decimal_isolate.py) - * [Double Factorial Iterative](https://github.com/TheAlgorithms/Python/blob/master/maths/double_factorial_iterative.py) - * [Double Factorial Recursive](https://github.com/TheAlgorithms/Python/blob/master/maths/double_factorial_recursive.py) - * [Entropy](https://github.com/TheAlgorithms/Python/blob/master/maths/entropy.py) - * [Euclidean Distance](https://github.com/TheAlgorithms/Python/blob/master/maths/euclidean_distance.py) - * [Euclidean Gcd](https://github.com/TheAlgorithms/Python/blob/master/maths/euclidean_gcd.py) - * [Euler Method](https://github.com/TheAlgorithms/Python/blob/master/maths/euler_method.py) - * [Euler Modified](https://github.com/TheAlgorithms/Python/blob/master/maths/euler_modified.py) - * [Eulers Totient](https://github.com/TheAlgorithms/Python/blob/master/maths/eulers_totient.py) - * [Extended Euclidean Algorithm](https://github.com/TheAlgorithms/Python/blob/master/maths/extended_euclidean_algorithm.py) - * [Factorial Iterative](https://github.com/TheAlgorithms/Python/blob/master/maths/factorial_iterative.py) - * [Factorial Recursive](https://github.com/TheAlgorithms/Python/blob/master/maths/factorial_recursive.py) - * [Factors](https://github.com/TheAlgorithms/Python/blob/master/maths/factors.py) - * [Fermat Little Theorem](https://github.com/TheAlgorithms/Python/blob/master/maths/fermat_little_theorem.py) - * [Fibonacci](https://github.com/TheAlgorithms/Python/blob/master/maths/fibonacci.py) - * [Find Max](https://github.com/TheAlgorithms/Python/blob/master/maths/find_max.py) - * [Find Max Recursion](https://github.com/TheAlgorithms/Python/blob/master/maths/find_max_recursion.py) - * [Find Min](https://github.com/TheAlgorithms/Python/blob/master/maths/find_min.py) - * [Find Min Recursion](https://github.com/TheAlgorithms/Python/blob/master/maths/find_min_recursion.py) - * [Floor](https://github.com/TheAlgorithms/Python/blob/master/maths/floor.py) - * [Gamma](https://github.com/TheAlgorithms/Python/blob/master/maths/gamma.py) - * [Gamma Recursive](https://github.com/TheAlgorithms/Python/blob/master/maths/gamma_recursive.py) - * [Gaussian](https://github.com/TheAlgorithms/Python/blob/master/maths/gaussian.py) - * [Greatest Common Divisor](https://github.com/TheAlgorithms/Python/blob/master/maths/greatest_common_divisor.py) - * [Greedy Coin Change](https://github.com/TheAlgorithms/Python/blob/master/maths/greedy_coin_change.py) - * [Hardy Ramanujanalgo](https://github.com/TheAlgorithms/Python/blob/master/maths/hardy_ramanujanalgo.py) - * [Integration By Simpson Approx](https://github.com/TheAlgorithms/Python/blob/master/maths/integration_by_simpson_approx.py) - * [Is Ip V4 Address Valid](https://github.com/TheAlgorithms/Python/blob/master/maths/is_ip_v4_address_valid.py) - * [Is Square Free](https://github.com/TheAlgorithms/Python/blob/master/maths/is_square_free.py) - * [Jaccard Similarity](https://github.com/TheAlgorithms/Python/blob/master/maths/jaccard_similarity.py) - * [Kadanes](https://github.com/TheAlgorithms/Python/blob/master/maths/kadanes.py) - * [Karatsuba](https://github.com/TheAlgorithms/Python/blob/master/maths/karatsuba.py) - * [Krishnamurthy Number](https://github.com/TheAlgorithms/Python/blob/master/maths/krishnamurthy_number.py) - * [Kth Lexicographic Permutation](https://github.com/TheAlgorithms/Python/blob/master/maths/kth_lexicographic_permutation.py) - * [Largest Of Very Large Numbers](https://github.com/TheAlgorithms/Python/blob/master/maths/largest_of_very_large_numbers.py) - * [Largest Subarray Sum](https://github.com/TheAlgorithms/Python/blob/master/maths/largest_subarray_sum.py) - * [Least Common Multiple](https://github.com/TheAlgorithms/Python/blob/master/maths/least_common_multiple.py) - * [Line Length](https://github.com/TheAlgorithms/Python/blob/master/maths/line_length.py) - * [Lucas Lehmer Primality Test](https://github.com/TheAlgorithms/Python/blob/master/maths/lucas_lehmer_primality_test.py) - * [Lucas Series](https://github.com/TheAlgorithms/Python/blob/master/maths/lucas_series.py) - * [Matrix Exponentiation](https://github.com/TheAlgorithms/Python/blob/master/maths/matrix_exponentiation.py) - * [Max Sum Sliding Window](https://github.com/TheAlgorithms/Python/blob/master/maths/max_sum_sliding_window.py) - * [Median Of Two Arrays](https://github.com/TheAlgorithms/Python/blob/master/maths/median_of_two_arrays.py) - * [Miller Rabin](https://github.com/TheAlgorithms/Python/blob/master/maths/miller_rabin.py) - * [Mobius Function](https://github.com/TheAlgorithms/Python/blob/master/maths/mobius_function.py) - * [Modular Exponential](https://github.com/TheAlgorithms/Python/blob/master/maths/modular_exponential.py) - * [Monte Carlo](https://github.com/TheAlgorithms/Python/blob/master/maths/monte_carlo.py) - * [Monte Carlo Dice](https://github.com/TheAlgorithms/Python/blob/master/maths/monte_carlo_dice.py) - * [Nevilles Method](https://github.com/TheAlgorithms/Python/blob/master/maths/nevilles_method.py) - * [Newton Raphson](https://github.com/TheAlgorithms/Python/blob/master/maths/newton_raphson.py) - * [Number Of Digits](https://github.com/TheAlgorithms/Python/blob/master/maths/number_of_digits.py) - * [Numerical Integration](https://github.com/TheAlgorithms/Python/blob/master/maths/numerical_integration.py) - * [Perfect Cube](https://github.com/TheAlgorithms/Python/blob/master/maths/perfect_cube.py) - * [Perfect Number](https://github.com/TheAlgorithms/Python/blob/master/maths/perfect_number.py) - * [Perfect Square](https://github.com/TheAlgorithms/Python/blob/master/maths/perfect_square.py) - * [Persistence](https://github.com/TheAlgorithms/Python/blob/master/maths/persistence.py) - * [Pi Monte Carlo Estimation](https://github.com/TheAlgorithms/Python/blob/master/maths/pi_monte_carlo_estimation.py) - * [Points Are Collinear 3D](https://github.com/TheAlgorithms/Python/blob/master/maths/points_are_collinear_3d.py) - * [Pollard Rho](https://github.com/TheAlgorithms/Python/blob/master/maths/pollard_rho.py) - * [Polynomial Evaluation](https://github.com/TheAlgorithms/Python/blob/master/maths/polynomial_evaluation.py) - * [Power Using Recursion](https://github.com/TheAlgorithms/Python/blob/master/maths/power_using_recursion.py) - * [Prime Check](https://github.com/TheAlgorithms/Python/blob/master/maths/prime_check.py) - * [Prime Factors](https://github.com/TheAlgorithms/Python/blob/master/maths/prime_factors.py) - * [Prime Numbers](https://github.com/TheAlgorithms/Python/blob/master/maths/prime_numbers.py) - * [Prime Sieve Eratosthenes](https://github.com/TheAlgorithms/Python/blob/master/maths/prime_sieve_eratosthenes.py) - * [Primelib](https://github.com/TheAlgorithms/Python/blob/master/maths/primelib.py) - * [Proth Number](https://github.com/TheAlgorithms/Python/blob/master/maths/proth_number.py) - * [Pythagoras](https://github.com/TheAlgorithms/Python/blob/master/maths/pythagoras.py) - * [Qr Decomposition](https://github.com/TheAlgorithms/Python/blob/master/maths/qr_decomposition.py) - * [Quadratic Equations Complex Numbers](https://github.com/TheAlgorithms/Python/blob/master/maths/quadratic_equations_complex_numbers.py) - * [Radians](https://github.com/TheAlgorithms/Python/blob/master/maths/radians.py) - * [Radix2 Fft](https://github.com/TheAlgorithms/Python/blob/master/maths/radix2_fft.py) - * [Relu](https://github.com/TheAlgorithms/Python/blob/master/maths/relu.py) - * [Runge Kutta](https://github.com/TheAlgorithms/Python/blob/master/maths/runge_kutta.py) - * [Segmented Sieve](https://github.com/TheAlgorithms/Python/blob/master/maths/segmented_sieve.py) + * [3N Plus 1](maths/3n_plus_1.py) + * [Abs](maths/abs.py) + * [Abs Max](maths/abs_max.py) + * [Abs Min](maths/abs_min.py) + * [Add](maths/add.py) + * [Aliquot Sum](maths/aliquot_sum.py) + * [Allocation Number](maths/allocation_number.py) + * [Area](maths/area.py) + * [Area Under Curve](maths/area_under_curve.py) + * [Armstrong Numbers](maths/armstrong_numbers.py) + * [Average Absolute Deviation](maths/average_absolute_deviation.py) + * [Average Mean](maths/average_mean.py) + * [Average Median](maths/average_median.py) + * [Average Mode](maths/average_mode.py) + * [Bailey Borwein Plouffe](maths/bailey_borwein_plouffe.py) + * [Basic Maths](maths/basic_maths.py) + * [Binary Exp Mod](maths/binary_exp_mod.py) + * [Binary Exponentiation](maths/binary_exponentiation.py) + * [Binary Exponentiation 2](maths/binary_exponentiation_2.py) + * [Binary Exponentiation 3](maths/binary_exponentiation_3.py) + * [Binomial Coefficient](maths/binomial_coefficient.py) + * [Binomial Distribution](maths/binomial_distribution.py) + * [Bisection](maths/bisection.py) + * [Ceil](maths/ceil.py) + * [Check Polygon](maths/check_polygon.py) + * [Chudnovsky Algorithm](maths/chudnovsky_algorithm.py) + * [Collatz Sequence](maths/collatz_sequence.py) + * [Combinations](maths/combinations.py) + * [Decimal Isolate](maths/decimal_isolate.py) + * [Double Factorial Iterative](maths/double_factorial_iterative.py) + * [Double Factorial Recursive](maths/double_factorial_recursive.py) + * [Entropy](maths/entropy.py) + * [Euclidean Distance](maths/euclidean_distance.py) + * [Euclidean Gcd](maths/euclidean_gcd.py) + * [Euler Method](maths/euler_method.py) + * [Euler Modified](maths/euler_modified.py) + * [Eulers Totient](maths/eulers_totient.py) + * [Extended Euclidean Algorithm](maths/extended_euclidean_algorithm.py) + * [Factorial Iterative](maths/factorial_iterative.py) + * [Factorial Recursive](maths/factorial_recursive.py) + * [Factors](maths/factors.py) + * [Fermat Little Theorem](maths/fermat_little_theorem.py) + * [Fibonacci](maths/fibonacci.py) + * [Find Max](maths/find_max.py) + * [Find Max Recursion](maths/find_max_recursion.py) + * [Find Min](maths/find_min.py) + * [Find Min Recursion](maths/find_min_recursion.py) + * [Floor](maths/floor.py) + * [Gamma](maths/gamma.py) + * [Gamma Recursive](maths/gamma_recursive.py) + * [Gaussian](maths/gaussian.py) + * [Greatest Common Divisor](maths/greatest_common_divisor.py) + * [Greedy Coin Change](maths/greedy_coin_change.py) + * [Hardy Ramanujanalgo](maths/hardy_ramanujanalgo.py) + * [Integration By Simpson Approx](maths/integration_by_simpson_approx.py) + * [Is Ip V4 Address Valid](maths/is_ip_v4_address_valid.py) + * [Is Square Free](maths/is_square_free.py) + * [Jaccard Similarity](maths/jaccard_similarity.py) + * [Kadanes](maths/kadanes.py) + * [Karatsuba](maths/karatsuba.py) + * [Krishnamurthy Number](maths/krishnamurthy_number.py) + * [Kth Lexicographic Permutation](maths/kth_lexicographic_permutation.py) + * [Largest Of Very Large Numbers](maths/largest_of_very_large_numbers.py) + * [Largest Subarray Sum](maths/largest_subarray_sum.py) + * [Least Common Multiple](maths/least_common_multiple.py) + * [Line Length](maths/line_length.py) + * [Lucas Lehmer Primality Test](maths/lucas_lehmer_primality_test.py) + * [Lucas Series](maths/lucas_series.py) + * [Matrix Exponentiation](maths/matrix_exponentiation.py) + * [Max Sum Sliding Window](maths/max_sum_sliding_window.py) + * [Median Of Two Arrays](maths/median_of_two_arrays.py) + * [Miller Rabin](maths/miller_rabin.py) + * [Mobius Function](maths/mobius_function.py) + * [Modular Exponential](maths/modular_exponential.py) + * [Monte Carlo](maths/monte_carlo.py) + * [Monte Carlo Dice](maths/monte_carlo_dice.py) + * [Nevilles Method](maths/nevilles_method.py) + * [Newton Raphson](maths/newton_raphson.py) + * [Number Of Digits](maths/number_of_digits.py) + * [Numerical Integration](maths/numerical_integration.py) + * [Perfect Cube](maths/perfect_cube.py) + * [Perfect Number](maths/perfect_number.py) + * [Perfect Square](maths/perfect_square.py) + * [Persistence](maths/persistence.py) + * [Pi Monte Carlo Estimation](maths/pi_monte_carlo_estimation.py) + * [Points Are Collinear 3D](maths/points_are_collinear_3d.py) + * [Pollard Rho](maths/pollard_rho.py) + * [Polynomial Evaluation](maths/polynomial_evaluation.py) + * [Power Using Recursion](maths/power_using_recursion.py) + * [Prime Check](maths/prime_check.py) + * [Prime Factors](maths/prime_factors.py) + * [Prime Numbers](maths/prime_numbers.py) + * [Prime Sieve Eratosthenes](maths/prime_sieve_eratosthenes.py) + * [Primelib](maths/primelib.py) + * [Proth Number](maths/proth_number.py) + * [Pythagoras](maths/pythagoras.py) + * [Qr Decomposition](maths/qr_decomposition.py) + * [Quadratic Equations Complex Numbers](maths/quadratic_equations_complex_numbers.py) + * [Radians](maths/radians.py) + * [Radix2 Fft](maths/radix2_fft.py) + * [Relu](maths/relu.py) + * [Runge Kutta](maths/runge_kutta.py) + * [Segmented Sieve](maths/segmented_sieve.py) * Series - * [Arithmetic](https://github.com/TheAlgorithms/Python/blob/master/maths/series/arithmetic.py) - * [Geometric](https://github.com/TheAlgorithms/Python/blob/master/maths/series/geometric.py) - * [Geometric Series](https://github.com/TheAlgorithms/Python/blob/master/maths/series/geometric_series.py) - * [Harmonic](https://github.com/TheAlgorithms/Python/blob/master/maths/series/harmonic.py) - * [Harmonic Series](https://github.com/TheAlgorithms/Python/blob/master/maths/series/harmonic_series.py) - * [Hexagonal Numbers](https://github.com/TheAlgorithms/Python/blob/master/maths/series/hexagonal_numbers.py) - * [P Series](https://github.com/TheAlgorithms/Python/blob/master/maths/series/p_series.py) - * [Sieve Of Eratosthenes](https://github.com/TheAlgorithms/Python/blob/master/maths/sieve_of_eratosthenes.py) - * [Sigmoid](https://github.com/TheAlgorithms/Python/blob/master/maths/sigmoid.py) - * [Simpson Rule](https://github.com/TheAlgorithms/Python/blob/master/maths/simpson_rule.py) - * [Sock Merchant](https://github.com/TheAlgorithms/Python/blob/master/maths/sock_merchant.py) - * [Softmax](https://github.com/TheAlgorithms/Python/blob/master/maths/softmax.py) - * [Square Root](https://github.com/TheAlgorithms/Python/blob/master/maths/square_root.py) - * [Sum Of Arithmetic Series](https://github.com/TheAlgorithms/Python/blob/master/maths/sum_of_arithmetic_series.py) - * [Sum Of Digits](https://github.com/TheAlgorithms/Python/blob/master/maths/sum_of_digits.py) - * [Sum Of Geometric Progression](https://github.com/TheAlgorithms/Python/blob/master/maths/sum_of_geometric_progression.py) - * [Sylvester Sequence](https://github.com/TheAlgorithms/Python/blob/master/maths/sylvester_sequence.py) - * [Test Prime Check](https://github.com/TheAlgorithms/Python/blob/master/maths/test_prime_check.py) - * [Trapezoidal Rule](https://github.com/TheAlgorithms/Python/blob/master/maths/trapezoidal_rule.py) - * [Triplet Sum](https://github.com/TheAlgorithms/Python/blob/master/maths/triplet_sum.py) - * [Two Pointer](https://github.com/TheAlgorithms/Python/blob/master/maths/two_pointer.py) - * [Two Sum](https://github.com/TheAlgorithms/Python/blob/master/maths/two_sum.py) - * [Ugly Numbers](https://github.com/TheAlgorithms/Python/blob/master/maths/ugly_numbers.py) - * [Volume](https://github.com/TheAlgorithms/Python/blob/master/maths/volume.py) - * [Zellers Congruence](https://github.com/TheAlgorithms/Python/blob/master/maths/zellers_congruence.py) + * [Arithmetic](maths/series/arithmetic.py) + * [Geometric](maths/series/geometric.py) + * [Geometric Series](maths/series/geometric_series.py) + * [Harmonic](maths/series/harmonic.py) + * [Harmonic Series](maths/series/harmonic_series.py) + * [Hexagonal Numbers](maths/series/hexagonal_numbers.py) + * [P Series](maths/series/p_series.py) + * [Sieve Of Eratosthenes](maths/sieve_of_eratosthenes.py) + * [Sigmoid](maths/sigmoid.py) + * [Simpson Rule](maths/simpson_rule.py) + * [Sin](maths/sin.py) + * [Sock Merchant](maths/sock_merchant.py) + * [Softmax](maths/softmax.py) + * [Square Root](maths/square_root.py) + * [Sum Of Arithmetic Series](maths/sum_of_arithmetic_series.py) + * [Sum Of Digits](maths/sum_of_digits.py) + * [Sum Of Geometric Progression](maths/sum_of_geometric_progression.py) + * [Sylvester Sequence](maths/sylvester_sequence.py) + * [Test Prime Check](maths/test_prime_check.py) + * [Trapezoidal Rule](maths/trapezoidal_rule.py) + * [Triplet Sum](maths/triplet_sum.py) + * [Two Pointer](maths/two_pointer.py) + * [Two Sum](maths/two_sum.py) + * [Ugly Numbers](maths/ugly_numbers.py) + * [Volume](maths/volume.py) + * [Zellers Congruence](maths/zellers_congruence.py) ## Matrix - * [Count Islands In Matrix](https://github.com/TheAlgorithms/Python/blob/master/matrix/count_islands_in_matrix.py) - * [Inverse Of Matrix](https://github.com/TheAlgorithms/Python/blob/master/matrix/inverse_of_matrix.py) - * [Matrix Class](https://github.com/TheAlgorithms/Python/blob/master/matrix/matrix_class.py) - * [Matrix Operation](https://github.com/TheAlgorithms/Python/blob/master/matrix/matrix_operation.py) - * [Nth Fibonacci Using Matrix Exponentiation](https://github.com/TheAlgorithms/Python/blob/master/matrix/nth_fibonacci_using_matrix_exponentiation.py) - * [Rotate Matrix](https://github.com/TheAlgorithms/Python/blob/master/matrix/rotate_matrix.py) - * [Searching In Sorted Matrix](https://github.com/TheAlgorithms/Python/blob/master/matrix/searching_in_sorted_matrix.py) - * [Sherman Morrison](https://github.com/TheAlgorithms/Python/blob/master/matrix/sherman_morrison.py) - * [Spiral Print](https://github.com/TheAlgorithms/Python/blob/master/matrix/spiral_print.py) + * [Count Islands In Matrix](matrix/count_islands_in_matrix.py) + * [Inverse Of Matrix](matrix/inverse_of_matrix.py) + * [Matrix Class](matrix/matrix_class.py) + * [Matrix Operation](matrix/matrix_operation.py) + * [Nth Fibonacci Using Matrix Exponentiation](matrix/nth_fibonacci_using_matrix_exponentiation.py) + * [Rotate Matrix](matrix/rotate_matrix.py) + * [Searching In Sorted Matrix](matrix/searching_in_sorted_matrix.py) + * [Sherman Morrison](matrix/sherman_morrison.py) + * [Spiral Print](matrix/spiral_print.py) * Tests - * [Test Matrix Operation](https://github.com/TheAlgorithms/Python/blob/master/matrix/tests/test_matrix_operation.py) + * [Test Matrix Operation](matrix/tests/test_matrix_operation.py) ## Networking Flow - * [Ford Fulkerson](https://github.com/TheAlgorithms/Python/blob/master/networking_flow/ford_fulkerson.py) - * [Minimum Cut](https://github.com/TheAlgorithms/Python/blob/master/networking_flow/minimum_cut.py) + * [Ford Fulkerson](networking_flow/ford_fulkerson.py) + * [Minimum Cut](networking_flow/minimum_cut.py) ## Neural Network - * [2 Hidden Layers Neural Network](https://github.com/TheAlgorithms/Python/blob/master/neural_network/2_hidden_layers_neural_network.py) - * [Back Propagation Neural Network](https://github.com/TheAlgorithms/Python/blob/master/neural_network/back_propagation_neural_network.py) - * [Convolution Neural Network](https://github.com/TheAlgorithms/Python/blob/master/neural_network/convolution_neural_network.py) - * [Perceptron](https://github.com/TheAlgorithms/Python/blob/master/neural_network/perceptron.py) + * [2 Hidden Layers Neural Network](neural_network/2_hidden_layers_neural_network.py) + * [Back Propagation Neural Network](neural_network/back_propagation_neural_network.py) + * [Convolution Neural Network](neural_network/convolution_neural_network.py) + * [Perceptron](neural_network/perceptron.py) ## Other - * [Activity Selection](https://github.com/TheAlgorithms/Python/blob/master/other/activity_selection.py) - * [Alternative List Arrange](https://github.com/TheAlgorithms/Python/blob/master/other/alternative_list_arrange.py) - * [Check Strong Password](https://github.com/TheAlgorithms/Python/blob/master/other/check_strong_password.py) - * [Davisb Putnamb Logemannb Loveland](https://github.com/TheAlgorithms/Python/blob/master/other/davisb_putnamb_logemannb_loveland.py) - * [Dijkstra Bankers Algorithm](https://github.com/TheAlgorithms/Python/blob/master/other/dijkstra_bankers_algorithm.py) - * [Doomsday](https://github.com/TheAlgorithms/Python/blob/master/other/doomsday.py) - * [Fischer Yates Shuffle](https://github.com/TheAlgorithms/Python/blob/master/other/fischer_yates_shuffle.py) - * [Gauss Easter](https://github.com/TheAlgorithms/Python/blob/master/other/gauss_easter.py) - * [Graham Scan](https://github.com/TheAlgorithms/Python/blob/master/other/graham_scan.py) - * [Greedy](https://github.com/TheAlgorithms/Python/blob/master/other/greedy.py) - * [Least Recently Used](https://github.com/TheAlgorithms/Python/blob/master/other/least_recently_used.py) - * [Lfu Cache](https://github.com/TheAlgorithms/Python/blob/master/other/lfu_cache.py) - * [Linear Congruential Generator](https://github.com/TheAlgorithms/Python/blob/master/other/linear_congruential_generator.py) - * [Lru Cache](https://github.com/TheAlgorithms/Python/blob/master/other/lru_cache.py) - * [Magicdiamondpattern](https://github.com/TheAlgorithms/Python/blob/master/other/magicdiamondpattern.py) - * [Nested Brackets](https://github.com/TheAlgorithms/Python/blob/master/other/nested_brackets.py) - * [Password Generator](https://github.com/TheAlgorithms/Python/blob/master/other/password_generator.py) - * [Scoring Algorithm](https://github.com/TheAlgorithms/Python/blob/master/other/scoring_algorithm.py) - * [Sdes](https://github.com/TheAlgorithms/Python/blob/master/other/sdes.py) - * [Tower Of Hanoi](https://github.com/TheAlgorithms/Python/blob/master/other/tower_of_hanoi.py) + * [Activity Selection](other/activity_selection.py) + * [Alternative List Arrange](other/alternative_list_arrange.py) + * [Check Strong Password](other/check_strong_password.py) + * [Davisb Putnamb Logemannb Loveland](other/davisb_putnamb_logemannb_loveland.py) + * [Dijkstra Bankers Algorithm](other/dijkstra_bankers_algorithm.py) + * [Doomsday](other/doomsday.py) + * [Fischer Yates Shuffle](other/fischer_yates_shuffle.py) + * [Gauss Easter](other/gauss_easter.py) + * [Graham Scan](other/graham_scan.py) + * [Greedy](other/greedy.py) + * [Least Recently Used](other/least_recently_used.py) + * [Lfu Cache](other/lfu_cache.py) + * [Linear Congruential Generator](other/linear_congruential_generator.py) + * [Lru Cache](other/lru_cache.py) + * [Magicdiamondpattern](other/magicdiamondpattern.py) + * [Nested Brackets](other/nested_brackets.py) + * [Password Generator](other/password_generator.py) + * [Scoring Algorithm](other/scoring_algorithm.py) + * [Sdes](other/sdes.py) + * [Tower Of Hanoi](other/tower_of_hanoi.py) ## Physics - * [Horizontal Projectile Motion](https://github.com/TheAlgorithms/Python/blob/master/physics/horizontal_projectile_motion.py) - * [N Body Simulation](https://github.com/TheAlgorithms/Python/blob/master/physics/n_body_simulation.py) - * [Newtons Second Law Of Motion](https://github.com/TheAlgorithms/Python/blob/master/physics/newtons_second_law_of_motion.py) + * [Horizontal Projectile Motion](physics/horizontal_projectile_motion.py) + * [Lorenz Transformation Four Vector](physics/lorenz_transformation_four_vector.py) + * [N Body Simulation](physics/n_body_simulation.py) + * [Newtons Second Law Of Motion](physics/newtons_second_law_of_motion.py) ## Project Euler * Problem 001 - * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_001/sol1.py) - * [Sol2](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_001/sol2.py) - * [Sol3](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_001/sol3.py) - * [Sol4](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_001/sol4.py) - * [Sol5](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_001/sol5.py) - * [Sol6](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_001/sol6.py) - * [Sol7](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_001/sol7.py) + * [Sol1](project_euler/problem_001/sol1.py) + * [Sol2](project_euler/problem_001/sol2.py) + * [Sol3](project_euler/problem_001/sol3.py) + * [Sol4](project_euler/problem_001/sol4.py) + * [Sol5](project_euler/problem_001/sol5.py) + * [Sol6](project_euler/problem_001/sol6.py) + * [Sol7](project_euler/problem_001/sol7.py) * Problem 002 - * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_002/sol1.py) - * [Sol2](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_002/sol2.py) - * [Sol3](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_002/sol3.py) - * [Sol4](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_002/sol4.py) - * [Sol5](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_002/sol5.py) + * [Sol1](project_euler/problem_002/sol1.py) + * [Sol2](project_euler/problem_002/sol2.py) + * [Sol3](project_euler/problem_002/sol3.py) + * [Sol4](project_euler/problem_002/sol4.py) + * [Sol5](project_euler/problem_002/sol5.py) * Problem 003 - * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_003/sol1.py) - * [Sol2](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_003/sol2.py) - * [Sol3](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_003/sol3.py) + * [Sol1](project_euler/problem_003/sol1.py) + * [Sol2](project_euler/problem_003/sol2.py) + * [Sol3](project_euler/problem_003/sol3.py) * Problem 004 - * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_004/sol1.py) - * [Sol2](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_004/sol2.py) + * [Sol1](project_euler/problem_004/sol1.py) + * [Sol2](project_euler/problem_004/sol2.py) * Problem 005 - * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_005/sol1.py) - * [Sol2](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_005/sol2.py) + * [Sol1](project_euler/problem_005/sol1.py) + * [Sol2](project_euler/problem_005/sol2.py) * Problem 006 - * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_006/sol1.py) - * [Sol2](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_006/sol2.py) - * [Sol3](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_006/sol3.py) - * [Sol4](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_006/sol4.py) + * [Sol1](project_euler/problem_006/sol1.py) + * [Sol2](project_euler/problem_006/sol2.py) + * [Sol3](project_euler/problem_006/sol3.py) + * [Sol4](project_euler/problem_006/sol4.py) * Problem 007 - * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_007/sol1.py) - * [Sol2](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_007/sol2.py) - * [Sol3](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_007/sol3.py) + * [Sol1](project_euler/problem_007/sol1.py) + * [Sol2](project_euler/problem_007/sol2.py) + * [Sol3](project_euler/problem_007/sol3.py) * Problem 008 - * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_008/sol1.py) - * [Sol2](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_008/sol2.py) - * [Sol3](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_008/sol3.py) + * [Sol1](project_euler/problem_008/sol1.py) + * [Sol2](project_euler/problem_008/sol2.py) + * [Sol3](project_euler/problem_008/sol3.py) * Problem 009 - * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_009/sol1.py) - * [Sol2](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_009/sol2.py) - * [Sol3](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_009/sol3.py) + * [Sol1](project_euler/problem_009/sol1.py) + * [Sol2](project_euler/problem_009/sol2.py) + * [Sol3](project_euler/problem_009/sol3.py) * Problem 010 - * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_010/sol1.py) - * [Sol2](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_010/sol2.py) - * [Sol3](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_010/sol3.py) + * [Sol1](project_euler/problem_010/sol1.py) + * [Sol2](project_euler/problem_010/sol2.py) + * [Sol3](project_euler/problem_010/sol3.py) * Problem 011 - * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_011/sol1.py) - * [Sol2](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_011/sol2.py) + * [Sol1](project_euler/problem_011/sol1.py) + * [Sol2](project_euler/problem_011/sol2.py) * Problem 012 - * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_012/sol1.py) - * [Sol2](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_012/sol2.py) + * [Sol1](project_euler/problem_012/sol1.py) + * [Sol2](project_euler/problem_012/sol2.py) * Problem 013 - * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_013/sol1.py) + * [Sol1](project_euler/problem_013/sol1.py) * Problem 014 - * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_014/sol1.py) - * [Sol2](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_014/sol2.py) + * [Sol1](project_euler/problem_014/sol1.py) + * [Sol2](project_euler/problem_014/sol2.py) * Problem 015 - * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_015/sol1.py) + * [Sol1](project_euler/problem_015/sol1.py) * Problem 016 - * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_016/sol1.py) - * [Sol2](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_016/sol2.py) + * [Sol1](project_euler/problem_016/sol1.py) + * [Sol2](project_euler/problem_016/sol2.py) * Problem 017 - * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_017/sol1.py) + * [Sol1](project_euler/problem_017/sol1.py) * Problem 018 - * [Solution](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_018/solution.py) + * [Solution](project_euler/problem_018/solution.py) * Problem 019 - * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_019/sol1.py) + * [Sol1](project_euler/problem_019/sol1.py) * Problem 020 - * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_020/sol1.py) - * [Sol2](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_020/sol2.py) - * [Sol3](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_020/sol3.py) - * [Sol4](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_020/sol4.py) + * [Sol1](project_euler/problem_020/sol1.py) + * [Sol2](project_euler/problem_020/sol2.py) + * [Sol3](project_euler/problem_020/sol3.py) + * [Sol4](project_euler/problem_020/sol4.py) * Problem 021 - * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_021/sol1.py) + * [Sol1](project_euler/problem_021/sol1.py) * Problem 022 - * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_022/sol1.py) - * [Sol2](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_022/sol2.py) + * [Sol1](project_euler/problem_022/sol1.py) + * [Sol2](project_euler/problem_022/sol2.py) * Problem 023 - * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_023/sol1.py) + * [Sol1](project_euler/problem_023/sol1.py) * Problem 024 - * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_024/sol1.py) + * [Sol1](project_euler/problem_024/sol1.py) * Problem 025 - * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_025/sol1.py) - * [Sol2](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_025/sol2.py) - * [Sol3](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_025/sol3.py) + * [Sol1](project_euler/problem_025/sol1.py) + * [Sol2](project_euler/problem_025/sol2.py) + * [Sol3](project_euler/problem_025/sol3.py) * Problem 026 - * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_026/sol1.py) + * [Sol1](project_euler/problem_026/sol1.py) * Problem 027 - * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_027/sol1.py) + * [Sol1](project_euler/problem_027/sol1.py) * Problem 028 - * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_028/sol1.py) + * [Sol1](project_euler/problem_028/sol1.py) * Problem 029 - * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_029/sol1.py) + * [Sol1](project_euler/problem_029/sol1.py) * Problem 030 - * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_030/sol1.py) + * [Sol1](project_euler/problem_030/sol1.py) * Problem 031 - * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_031/sol1.py) - * [Sol2](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_031/sol2.py) + * [Sol1](project_euler/problem_031/sol1.py) + * [Sol2](project_euler/problem_031/sol2.py) * Problem 032 - * [Sol32](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_032/sol32.py) + * [Sol32](project_euler/problem_032/sol32.py) * Problem 033 - * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_033/sol1.py) + * [Sol1](project_euler/problem_033/sol1.py) * Problem 034 - * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_034/sol1.py) + * [Sol1](project_euler/problem_034/sol1.py) * Problem 035 - * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_035/sol1.py) + * [Sol1](project_euler/problem_035/sol1.py) * Problem 036 - * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_036/sol1.py) + * [Sol1](project_euler/problem_036/sol1.py) * Problem 037 - * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_037/sol1.py) + * [Sol1](project_euler/problem_037/sol1.py) * Problem 038 - * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_038/sol1.py) + * [Sol1](project_euler/problem_038/sol1.py) * Problem 039 - * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_039/sol1.py) + * [Sol1](project_euler/problem_039/sol1.py) * Problem 040 - * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_040/sol1.py) + * [Sol1](project_euler/problem_040/sol1.py) * Problem 041 - * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_041/sol1.py) + * [Sol1](project_euler/problem_041/sol1.py) * Problem 042 - * [Solution42](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_042/solution42.py) + * [Solution42](project_euler/problem_042/solution42.py) * Problem 043 - * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_043/sol1.py) + * [Sol1](project_euler/problem_043/sol1.py) * Problem 044 - * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_044/sol1.py) + * [Sol1](project_euler/problem_044/sol1.py) * Problem 045 - * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_045/sol1.py) + * [Sol1](project_euler/problem_045/sol1.py) * Problem 046 - * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_046/sol1.py) + * [Sol1](project_euler/problem_046/sol1.py) * Problem 047 - * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_047/sol1.py) + * [Sol1](project_euler/problem_047/sol1.py) * Problem 048 - * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_048/sol1.py) + * [Sol1](project_euler/problem_048/sol1.py) * Problem 049 - * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_049/sol1.py) + * [Sol1](project_euler/problem_049/sol1.py) * Problem 050 - * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_050/sol1.py) + * [Sol1](project_euler/problem_050/sol1.py) * Problem 051 - * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_051/sol1.py) + * [Sol1](project_euler/problem_051/sol1.py) * Problem 052 - * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_052/sol1.py) + * [Sol1](project_euler/problem_052/sol1.py) * Problem 053 - * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_053/sol1.py) + * [Sol1](project_euler/problem_053/sol1.py) * Problem 054 - * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_054/sol1.py) - * [Test Poker Hand](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_054/test_poker_hand.py) + * [Sol1](project_euler/problem_054/sol1.py) + * [Test Poker Hand](project_euler/problem_054/test_poker_hand.py) * Problem 055 - * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_055/sol1.py) + * [Sol1](project_euler/problem_055/sol1.py) * Problem 056 - * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_056/sol1.py) + * [Sol1](project_euler/problem_056/sol1.py) * Problem 057 - * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_057/sol1.py) + * [Sol1](project_euler/problem_057/sol1.py) * Problem 058 - * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_058/sol1.py) + * [Sol1](project_euler/problem_058/sol1.py) * Problem 059 - * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_059/sol1.py) + * [Sol1](project_euler/problem_059/sol1.py) * Problem 062 - * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_062/sol1.py) + * [Sol1](project_euler/problem_062/sol1.py) * Problem 063 - * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_063/sol1.py) + * [Sol1](project_euler/problem_063/sol1.py) * Problem 064 - * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_064/sol1.py) + * [Sol1](project_euler/problem_064/sol1.py) * Problem 065 - * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_065/sol1.py) + * [Sol1](project_euler/problem_065/sol1.py) * Problem 067 - * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_067/sol1.py) - * [Sol2](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_067/sol2.py) + * [Sol1](project_euler/problem_067/sol1.py) + * [Sol2](project_euler/problem_067/sol2.py) * Problem 068 - * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_068/sol1.py) + * [Sol1](project_euler/problem_068/sol1.py) * Problem 069 - * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_069/sol1.py) + * [Sol1](project_euler/problem_069/sol1.py) * Problem 070 - * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_070/sol1.py) + * [Sol1](project_euler/problem_070/sol1.py) * Problem 071 - * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_071/sol1.py) + * [Sol1](project_euler/problem_071/sol1.py) * Problem 072 - * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_072/sol1.py) - * [Sol2](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_072/sol2.py) + * [Sol1](project_euler/problem_072/sol1.py) + * [Sol2](project_euler/problem_072/sol2.py) * Problem 074 - * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_074/sol1.py) - * [Sol2](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_074/sol2.py) + * [Sol1](project_euler/problem_074/sol1.py) + * [Sol2](project_euler/problem_074/sol2.py) * Problem 075 - * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_075/sol1.py) + * [Sol1](project_euler/problem_075/sol1.py) * Problem 076 - * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_076/sol1.py) + * [Sol1](project_euler/problem_076/sol1.py) * Problem 077 - * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_077/sol1.py) + * [Sol1](project_euler/problem_077/sol1.py) * Problem 078 - * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_078/sol1.py) + * [Sol1](project_euler/problem_078/sol1.py) * Problem 080 - * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_080/sol1.py) + * [Sol1](project_euler/problem_080/sol1.py) * Problem 081 - * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_081/sol1.py) + * [Sol1](project_euler/problem_081/sol1.py) * Problem 085 - * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_085/sol1.py) + * [Sol1](project_euler/problem_085/sol1.py) * Problem 086 - * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_086/sol1.py) + * [Sol1](project_euler/problem_086/sol1.py) * Problem 087 - * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_087/sol1.py) + * [Sol1](project_euler/problem_087/sol1.py) * Problem 089 - * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_089/sol1.py) + * [Sol1](project_euler/problem_089/sol1.py) * Problem 091 - * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_091/sol1.py) + * [Sol1](project_euler/problem_091/sol1.py) * Problem 092 - * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_092/sol1.py) + * [Sol1](project_euler/problem_092/sol1.py) * Problem 097 - * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_097/sol1.py) + * [Sol1](project_euler/problem_097/sol1.py) * Problem 099 - * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_099/sol1.py) + * [Sol1](project_euler/problem_099/sol1.py) * Problem 101 - * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_101/sol1.py) + * [Sol1](project_euler/problem_101/sol1.py) * Problem 102 - * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_102/sol1.py) + * [Sol1](project_euler/problem_102/sol1.py) * Problem 104 - * [Sol](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_104/sol.py) + * [Sol](project_euler/problem_104/sol.py) * Problem 107 - * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_107/sol1.py) + * [Sol1](project_euler/problem_107/sol1.py) * Problem 109 - * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_109/sol1.py) + * [Sol1](project_euler/problem_109/sol1.py) * Problem 112 - * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_112/sol1.py) + * [Sol1](project_euler/problem_112/sol1.py) * Problem 113 - * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_113/sol1.py) + * [Sol1](project_euler/problem_113/sol1.py) * Problem 119 - * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_119/sol1.py) + * [Sol1](project_euler/problem_119/sol1.py) * Problem 120 - * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_120/sol1.py) + * [Sol1](project_euler/problem_120/sol1.py) * Problem 121 - * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_121/sol1.py) + * [Sol1](project_euler/problem_121/sol1.py) * Problem 123 - * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_123/sol1.py) + * [Sol1](project_euler/problem_123/sol1.py) * Problem 125 - * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_125/sol1.py) + * [Sol1](project_euler/problem_125/sol1.py) * Problem 129 - * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_129/sol1.py) + * [Sol1](project_euler/problem_129/sol1.py) * Problem 135 - * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_135/sol1.py) + * [Sol1](project_euler/problem_135/sol1.py) * Problem 144 - * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_144/sol1.py) + * [Sol1](project_euler/problem_144/sol1.py) * Problem 145 - * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_145/sol1.py) + * [Sol1](project_euler/problem_145/sol1.py) * Problem 173 - * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_173/sol1.py) + * [Sol1](project_euler/problem_173/sol1.py) * Problem 174 - * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_174/sol1.py) + * [Sol1](project_euler/problem_174/sol1.py) * Problem 180 - * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_180/sol1.py) + * [Sol1](project_euler/problem_180/sol1.py) * Problem 188 - * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_188/sol1.py) + * [Sol1](project_euler/problem_188/sol1.py) * Problem 191 - * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_191/sol1.py) + * [Sol1](project_euler/problem_191/sol1.py) * Problem 203 - * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_203/sol1.py) + * [Sol1](project_euler/problem_203/sol1.py) * Problem 205 - * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_205/sol1.py) + * [Sol1](project_euler/problem_205/sol1.py) * Problem 206 - * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_206/sol1.py) + * [Sol1](project_euler/problem_206/sol1.py) * Problem 207 - * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_207/sol1.py) + * [Sol1](project_euler/problem_207/sol1.py) * Problem 234 - * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_234/sol1.py) + * [Sol1](project_euler/problem_234/sol1.py) * Problem 301 - * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_301/sol1.py) + * [Sol1](project_euler/problem_301/sol1.py) * Problem 493 - * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_493/sol1.py) + * [Sol1](project_euler/problem_493/sol1.py) * Problem 551 - * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_551/sol1.py) + * [Sol1](project_euler/problem_551/sol1.py) * Problem 686 - * [Sol1](https://github.com/TheAlgorithms/Python/blob/master/project_euler/problem_686/sol1.py) + * [Sol1](project_euler/problem_686/sol1.py) ## Quantum - * [Deutsch Jozsa](https://github.com/TheAlgorithms/Python/blob/master/quantum/deutsch_jozsa.py) - * [Half Adder](https://github.com/TheAlgorithms/Python/blob/master/quantum/half_adder.py) - * [Not Gate](https://github.com/TheAlgorithms/Python/blob/master/quantum/not_gate.py) - * [Quantum Entanglement](https://github.com/TheAlgorithms/Python/blob/master/quantum/quantum_entanglement.py) - * [Ripple Adder Classic](https://github.com/TheAlgorithms/Python/blob/master/quantum/ripple_adder_classic.py) - * [Single Qubit Measure](https://github.com/TheAlgorithms/Python/blob/master/quantum/single_qubit_measure.py) + * [Deutsch Jozsa](quantum/deutsch_jozsa.py) + * [Half Adder](quantum/half_adder.py) + * [Not Gate](quantum/not_gate.py) + * [Quantum Entanglement](quantum/quantum_entanglement.py) + * [Ripple Adder Classic](quantum/ripple_adder_classic.py) + * [Single Qubit Measure](quantum/single_qubit_measure.py) ## Scheduling - * [First Come First Served](https://github.com/TheAlgorithms/Python/blob/master/scheduling/first_come_first_served.py) - * [Round Robin](https://github.com/TheAlgorithms/Python/blob/master/scheduling/round_robin.py) - * [Shortest Job First](https://github.com/TheAlgorithms/Python/blob/master/scheduling/shortest_job_first.py) + * [First Come First Served](scheduling/first_come_first_served.py) + * [Multi Level Feedback Queue](scheduling/multi_level_feedback_queue.py) + * [Non Preemptive Shortest Job First](scheduling/non_preemptive_shortest_job_first.py) + * [Round Robin](scheduling/round_robin.py) + * [Shortest Job First](scheduling/shortest_job_first.py) ## Searches - * [Binary Search](https://github.com/TheAlgorithms/Python/blob/master/searches/binary_search.py) - * [Binary Tree Traversal](https://github.com/TheAlgorithms/Python/blob/master/searches/binary_tree_traversal.py) - * [Double Linear Search](https://github.com/TheAlgorithms/Python/blob/master/searches/double_linear_search.py) - * [Double Linear Search Recursion](https://github.com/TheAlgorithms/Python/blob/master/searches/double_linear_search_recursion.py) - * [Fibonacci Search](https://github.com/TheAlgorithms/Python/blob/master/searches/fibonacci_search.py) - * [Hill Climbing](https://github.com/TheAlgorithms/Python/blob/master/searches/hill_climbing.py) - * [Interpolation Search](https://github.com/TheAlgorithms/Python/blob/master/searches/interpolation_search.py) - * [Jump Search](https://github.com/TheAlgorithms/Python/blob/master/searches/jump_search.py) - * [Linear Search](https://github.com/TheAlgorithms/Python/blob/master/searches/linear_search.py) - * [Quick Select](https://github.com/TheAlgorithms/Python/blob/master/searches/quick_select.py) - * [Sentinel Linear Search](https://github.com/TheAlgorithms/Python/blob/master/searches/sentinel_linear_search.py) - * [Simple Binary Search](https://github.com/TheAlgorithms/Python/blob/master/searches/simple_binary_search.py) - * [Simulated Annealing](https://github.com/TheAlgorithms/Python/blob/master/searches/simulated_annealing.py) - * [Tabu Search](https://github.com/TheAlgorithms/Python/blob/master/searches/tabu_search.py) - * [Ternary Search](https://github.com/TheAlgorithms/Python/blob/master/searches/ternary_search.py) + * [Binary Search](searches/binary_search.py) + * [Binary Tree Traversal](searches/binary_tree_traversal.py) + * [Double Linear Search](searches/double_linear_search.py) + * [Double Linear Search Recursion](searches/double_linear_search_recursion.py) + * [Fibonacci Search](searches/fibonacci_search.py) + * [Hill Climbing](searches/hill_climbing.py) + * [Interpolation Search](searches/interpolation_search.py) + * [Jump Search](searches/jump_search.py) + * [Linear Search](searches/linear_search.py) + * [Quick Select](searches/quick_select.py) + * [Sentinel Linear Search](searches/sentinel_linear_search.py) + * [Simple Binary Search](searches/simple_binary_search.py) + * [Simulated Annealing](searches/simulated_annealing.py) + * [Tabu Search](searches/tabu_search.py) + * [Ternary Search](searches/ternary_search.py) ## Sorts - * [Bead Sort](https://github.com/TheAlgorithms/Python/blob/master/sorts/bead_sort.py) - * [Bitonic Sort](https://github.com/TheAlgorithms/Python/blob/master/sorts/bitonic_sort.py) - * [Bogo Sort](https://github.com/TheAlgorithms/Python/blob/master/sorts/bogo_sort.py) - * [Bubble Sort](https://github.com/TheAlgorithms/Python/blob/master/sorts/bubble_sort.py) - * [Bucket Sort](https://github.com/TheAlgorithms/Python/blob/master/sorts/bucket_sort.py) - * [Cocktail Shaker Sort](https://github.com/TheAlgorithms/Python/blob/master/sorts/cocktail_shaker_sort.py) - * [Comb Sort](https://github.com/TheAlgorithms/Python/blob/master/sorts/comb_sort.py) - * [Counting Sort](https://github.com/TheAlgorithms/Python/blob/master/sorts/counting_sort.py) - * [Cycle Sort](https://github.com/TheAlgorithms/Python/blob/master/sorts/cycle_sort.py) - * [Double Sort](https://github.com/TheAlgorithms/Python/blob/master/sorts/double_sort.py) - * [Dutch National Flag Sort](https://github.com/TheAlgorithms/Python/blob/master/sorts/dutch_national_flag_sort.py) - * [Exchange Sort](https://github.com/TheAlgorithms/Python/blob/master/sorts/exchange_sort.py) - * [External Sort](https://github.com/TheAlgorithms/Python/blob/master/sorts/external_sort.py) - * [Gnome Sort](https://github.com/TheAlgorithms/Python/blob/master/sorts/gnome_sort.py) - * [Heap Sort](https://github.com/TheAlgorithms/Python/blob/master/sorts/heap_sort.py) - * [Insertion Sort](https://github.com/TheAlgorithms/Python/blob/master/sorts/insertion_sort.py) - * [Intro Sort](https://github.com/TheAlgorithms/Python/blob/master/sorts/intro_sort.py) - * [Iterative Merge Sort](https://github.com/TheAlgorithms/Python/blob/master/sorts/iterative_merge_sort.py) - * [Merge Insertion Sort](https://github.com/TheAlgorithms/Python/blob/master/sorts/merge_insertion_sort.py) - * [Merge Sort](https://github.com/TheAlgorithms/Python/blob/master/sorts/merge_sort.py) - * [Msd Radix Sort](https://github.com/TheAlgorithms/Python/blob/master/sorts/msd_radix_sort.py) - * [Natural Sort](https://github.com/TheAlgorithms/Python/blob/master/sorts/natural_sort.py) - * [Odd Even Sort](https://github.com/TheAlgorithms/Python/blob/master/sorts/odd_even_sort.py) - * [Odd Even Transposition Parallel](https://github.com/TheAlgorithms/Python/blob/master/sorts/odd_even_transposition_parallel.py) - * [Odd Even Transposition Single Threaded](https://github.com/TheAlgorithms/Python/blob/master/sorts/odd_even_transposition_single_threaded.py) - * [Pancake Sort](https://github.com/TheAlgorithms/Python/blob/master/sorts/pancake_sort.py) - * [Patience Sort](https://github.com/TheAlgorithms/Python/blob/master/sorts/patience_sort.py) - * [Pigeon Sort](https://github.com/TheAlgorithms/Python/blob/master/sorts/pigeon_sort.py) - * [Pigeonhole Sort](https://github.com/TheAlgorithms/Python/blob/master/sorts/pigeonhole_sort.py) - * [Quick Sort](https://github.com/TheAlgorithms/Python/blob/master/sorts/quick_sort.py) - * [Quick Sort 3 Partition](https://github.com/TheAlgorithms/Python/blob/master/sorts/quick_sort_3_partition.py) - * [Radix Sort](https://github.com/TheAlgorithms/Python/blob/master/sorts/radix_sort.py) - * [Random Normal Distribution Quicksort](https://github.com/TheAlgorithms/Python/blob/master/sorts/random_normal_distribution_quicksort.py) - * [Random Pivot Quick Sort](https://github.com/TheAlgorithms/Python/blob/master/sorts/random_pivot_quick_sort.py) - * [Recursive Bubble Sort](https://github.com/TheAlgorithms/Python/blob/master/sorts/recursive_bubble_sort.py) - * [Recursive Insertion Sort](https://github.com/TheAlgorithms/Python/blob/master/sorts/recursive_insertion_sort.py) - * [Recursive Mergesort Array](https://github.com/TheAlgorithms/Python/blob/master/sorts/recursive_mergesort_array.py) - * [Recursive Quick Sort](https://github.com/TheAlgorithms/Python/blob/master/sorts/recursive_quick_sort.py) - * [Selection Sort](https://github.com/TheAlgorithms/Python/blob/master/sorts/selection_sort.py) - * [Shell Sort](https://github.com/TheAlgorithms/Python/blob/master/sorts/shell_sort.py) - * [Slowsort](https://github.com/TheAlgorithms/Python/blob/master/sorts/slowsort.py) - * [Stooge Sort](https://github.com/TheAlgorithms/Python/blob/master/sorts/stooge_sort.py) - * [Strand Sort](https://github.com/TheAlgorithms/Python/blob/master/sorts/strand_sort.py) - * [Tim Sort](https://github.com/TheAlgorithms/Python/blob/master/sorts/tim_sort.py) - * [Topological Sort](https://github.com/TheAlgorithms/Python/blob/master/sorts/topological_sort.py) - * [Tree Sort](https://github.com/TheAlgorithms/Python/blob/master/sorts/tree_sort.py) - * [Unknown Sort](https://github.com/TheAlgorithms/Python/blob/master/sorts/unknown_sort.py) - * [Wiggle Sort](https://github.com/TheAlgorithms/Python/blob/master/sorts/wiggle_sort.py) + * [Bead Sort](sorts/bead_sort.py) + * [Bitonic Sort](sorts/bitonic_sort.py) + * [Bogo Sort](sorts/bogo_sort.py) + * [Bubble Sort](sorts/bubble_sort.py) + * [Bucket Sort](sorts/bucket_sort.py) + * [Cocktail Shaker Sort](sorts/cocktail_shaker_sort.py) + * [Comb Sort](sorts/comb_sort.py) + * [Counting Sort](sorts/counting_sort.py) + * [Cycle Sort](sorts/cycle_sort.py) + * [Double Sort](sorts/double_sort.py) + * [Dutch National Flag Sort](sorts/dutch_national_flag_sort.py) + * [Exchange Sort](sorts/exchange_sort.py) + * [External Sort](sorts/external_sort.py) + * [Gnome Sort](sorts/gnome_sort.py) + * [Heap Sort](sorts/heap_sort.py) + * [Insertion Sort](sorts/insertion_sort.py) + * [Intro Sort](sorts/intro_sort.py) + * [Iterative Merge Sort](sorts/iterative_merge_sort.py) + * [Merge Insertion Sort](sorts/merge_insertion_sort.py) + * [Merge Sort](sorts/merge_sort.py) + * [Msd Radix Sort](sorts/msd_radix_sort.py) + * [Natural Sort](sorts/natural_sort.py) + * [Odd Even Sort](sorts/odd_even_sort.py) + * [Odd Even Transposition Parallel](sorts/odd_even_transposition_parallel.py) + * [Odd Even Transposition Single Threaded](sorts/odd_even_transposition_single_threaded.py) + * [Pancake Sort](sorts/pancake_sort.py) + * [Patience Sort](sorts/patience_sort.py) + * [Pigeon Sort](sorts/pigeon_sort.py) + * [Pigeonhole Sort](sorts/pigeonhole_sort.py) + * [Quick Sort](sorts/quick_sort.py) + * [Quick Sort 3 Partition](sorts/quick_sort_3_partition.py) + * [Radix Sort](sorts/radix_sort.py) + * [Random Normal Distribution Quicksort](sorts/random_normal_distribution_quicksort.py) + * [Random Pivot Quick Sort](sorts/random_pivot_quick_sort.py) + * [Recursive Bubble Sort](sorts/recursive_bubble_sort.py) + * [Recursive Insertion Sort](sorts/recursive_insertion_sort.py) + * [Recursive Mergesort Array](sorts/recursive_mergesort_array.py) + * [Recursive Quick Sort](sorts/recursive_quick_sort.py) + * [Selection Sort](sorts/selection_sort.py) + * [Shell Sort](sorts/shell_sort.py) + * [Slowsort](sorts/slowsort.py) + * [Stooge Sort](sorts/stooge_sort.py) + * [Strand Sort](sorts/strand_sort.py) + * [Tim Sort](sorts/tim_sort.py) + * [Topological Sort](sorts/topological_sort.py) + * [Tree Sort](sorts/tree_sort.py) + * [Unknown Sort](sorts/unknown_sort.py) + * [Wiggle Sort](sorts/wiggle_sort.py) ## Strings - * [Aho Corasick](https://github.com/TheAlgorithms/Python/blob/master/strings/aho_corasick.py) - * [Alternative String Arrange](https://github.com/TheAlgorithms/Python/blob/master/strings/alternative_string_arrange.py) - * [Anagrams](https://github.com/TheAlgorithms/Python/blob/master/strings/anagrams.py) - * [Autocomplete Using Trie](https://github.com/TheAlgorithms/Python/blob/master/strings/autocomplete_using_trie.py) - * [Boyer Moore Search](https://github.com/TheAlgorithms/Python/blob/master/strings/boyer_moore_search.py) - * [Can String Be Rearranged As Palindrome](https://github.com/TheAlgorithms/Python/blob/master/strings/can_string_be_rearranged_as_palindrome.py) - * [Capitalize](https://github.com/TheAlgorithms/Python/blob/master/strings/capitalize.py) - * [Check Anagrams](https://github.com/TheAlgorithms/Python/blob/master/strings/check_anagrams.py) - * [Check Pangram](https://github.com/TheAlgorithms/Python/blob/master/strings/check_pangram.py) - * [Credit Card Validator](https://github.com/TheAlgorithms/Python/blob/master/strings/credit_card_validator.py) - * [Detecting English Programmatically](https://github.com/TheAlgorithms/Python/blob/master/strings/detecting_english_programmatically.py) - * [Frequency Finder](https://github.com/TheAlgorithms/Python/blob/master/strings/frequency_finder.py) - * [Indian Phone Validator](https://github.com/TheAlgorithms/Python/blob/master/strings/indian_phone_validator.py) - * [Is Contains Unique Chars](https://github.com/TheAlgorithms/Python/blob/master/strings/is_contains_unique_chars.py) - * [Is Palindrome](https://github.com/TheAlgorithms/Python/blob/master/strings/is_palindrome.py) - * [Jaro Winkler](https://github.com/TheAlgorithms/Python/blob/master/strings/jaro_winkler.py) - * [Join](https://github.com/TheAlgorithms/Python/blob/master/strings/join.py) - * [Knuth Morris Pratt](https://github.com/TheAlgorithms/Python/blob/master/strings/knuth_morris_pratt.py) - * [Levenshtein Distance](https://github.com/TheAlgorithms/Python/blob/master/strings/levenshtein_distance.py) - * [Lower](https://github.com/TheAlgorithms/Python/blob/master/strings/lower.py) - * [Manacher](https://github.com/TheAlgorithms/Python/blob/master/strings/manacher.py) - * [Min Cost String Conversion](https://github.com/TheAlgorithms/Python/blob/master/strings/min_cost_string_conversion.py) - * [Naive String Search](https://github.com/TheAlgorithms/Python/blob/master/strings/naive_string_search.py) - * [Ngram](https://github.com/TheAlgorithms/Python/blob/master/strings/ngram.py) - * [Palindrome](https://github.com/TheAlgorithms/Python/blob/master/strings/palindrome.py) - * [Prefix Function](https://github.com/TheAlgorithms/Python/blob/master/strings/prefix_function.py) - * [Rabin Karp](https://github.com/TheAlgorithms/Python/blob/master/strings/rabin_karp.py) - * [Remove Duplicate](https://github.com/TheAlgorithms/Python/blob/master/strings/remove_duplicate.py) - * [Reverse Letters](https://github.com/TheAlgorithms/Python/blob/master/strings/reverse_letters.py) - * [Reverse Long Words](https://github.com/TheAlgorithms/Python/blob/master/strings/reverse_long_words.py) - * [Reverse Words](https://github.com/TheAlgorithms/Python/blob/master/strings/reverse_words.py) - * [Split](https://github.com/TheAlgorithms/Python/blob/master/strings/split.py) - * [Upper](https://github.com/TheAlgorithms/Python/blob/master/strings/upper.py) - * [Wildcard Pattern Matching](https://github.com/TheAlgorithms/Python/blob/master/strings/wildcard_pattern_matching.py) - * [Word Occurrence](https://github.com/TheAlgorithms/Python/blob/master/strings/word_occurrence.py) - * [Word Patterns](https://github.com/TheAlgorithms/Python/blob/master/strings/word_patterns.py) - * [Z Function](https://github.com/TheAlgorithms/Python/blob/master/strings/z_function.py) + * [Aho Corasick](strings/aho_corasick.py) + * [Alternative String Arrange](strings/alternative_string_arrange.py) + * [Anagrams](strings/anagrams.py) + * [Autocomplete Using Trie](strings/autocomplete_using_trie.py) + * [Boyer Moore Search](strings/boyer_moore_search.py) + * [Can String Be Rearranged As Palindrome](strings/can_string_be_rearranged_as_palindrome.py) + * [Capitalize](strings/capitalize.py) + * [Check Anagrams](strings/check_anagrams.py) + * [Check Pangram](strings/check_pangram.py) + * [Credit Card Validator](strings/credit_card_validator.py) + * [Detecting English Programmatically](strings/detecting_english_programmatically.py) + * [Frequency Finder](strings/frequency_finder.py) + * [Indian Phone Validator](strings/indian_phone_validator.py) + * [Is Contains Unique Chars](strings/is_contains_unique_chars.py) + * [Is Palindrome](strings/is_palindrome.py) + * [Jaro Winkler](strings/jaro_winkler.py) + * [Join](strings/join.py) + * [Knuth Morris Pratt](strings/knuth_morris_pratt.py) + * [Levenshtein Distance](strings/levenshtein_distance.py) + * [Lower](strings/lower.py) + * [Manacher](strings/manacher.py) + * [Min Cost String Conversion](strings/min_cost_string_conversion.py) + * [Naive String Search](strings/naive_string_search.py) + * [Ngram](strings/ngram.py) + * [Palindrome](strings/palindrome.py) + * [Prefix Function](strings/prefix_function.py) + * [Rabin Karp](strings/rabin_karp.py) + * [Remove Duplicate](strings/remove_duplicate.py) + * [Reverse Letters](strings/reverse_letters.py) + * [Reverse Long Words](strings/reverse_long_words.py) + * [Reverse Words](strings/reverse_words.py) + * [Split](strings/split.py) + * [Upper](strings/upper.py) + * [Wildcard Pattern Matching](strings/wildcard_pattern_matching.py) + * [Word Occurrence](strings/word_occurrence.py) + * [Word Patterns](strings/word_patterns.py) + * [Z Function](strings/z_function.py) ## Web Programming - * [Co2 Emission](https://github.com/TheAlgorithms/Python/blob/master/web_programming/co2_emission.py) - * [Covid Stats Via Xpath](https://github.com/TheAlgorithms/Python/blob/master/web_programming/covid_stats_via_xpath.py) - * [Crawl Google Results](https://github.com/TheAlgorithms/Python/blob/master/web_programming/crawl_google_results.py) - * [Crawl Google Scholar Citation](https://github.com/TheAlgorithms/Python/blob/master/web_programming/crawl_google_scholar_citation.py) - * [Currency Converter](https://github.com/TheAlgorithms/Python/blob/master/web_programming/currency_converter.py) - * [Current Stock Price](https://github.com/TheAlgorithms/Python/blob/master/web_programming/current_stock_price.py) - * [Current Weather](https://github.com/TheAlgorithms/Python/blob/master/web_programming/current_weather.py) - * [Daily Horoscope](https://github.com/TheAlgorithms/Python/blob/master/web_programming/daily_horoscope.py) - * [Download Images From Google Query](https://github.com/TheAlgorithms/Python/blob/master/web_programming/download_images_from_google_query.py) - * [Emails From Url](https://github.com/TheAlgorithms/Python/blob/master/web_programming/emails_from_url.py) - * [Fetch Anime And Play](https://github.com/TheAlgorithms/Python/blob/master/web_programming/fetch_anime_and_play.py) - * [Fetch Bbc News](https://github.com/TheAlgorithms/Python/blob/master/web_programming/fetch_bbc_news.py) - * [Fetch Github Info](https://github.com/TheAlgorithms/Python/blob/master/web_programming/fetch_github_info.py) - * [Fetch Jobs](https://github.com/TheAlgorithms/Python/blob/master/web_programming/fetch_jobs.py) - * [Fetch Well Rx Price](https://github.com/TheAlgorithms/Python/blob/master/web_programming/fetch_well_rx_price.py) - * [Get Imdb Top 250 Movies Csv](https://github.com/TheAlgorithms/Python/blob/master/web_programming/get_imdb_top_250_movies_csv.py) - * [Get Imdbtop](https://github.com/TheAlgorithms/Python/blob/master/web_programming/get_imdbtop.py) - * [Get Top Hn Posts](https://github.com/TheAlgorithms/Python/blob/master/web_programming/get_top_hn_posts.py) - * [Get User Tweets](https://github.com/TheAlgorithms/Python/blob/master/web_programming/get_user_tweets.py) - * [Giphy](https://github.com/TheAlgorithms/Python/blob/master/web_programming/giphy.py) - * [Instagram Crawler](https://github.com/TheAlgorithms/Python/blob/master/web_programming/instagram_crawler.py) - * [Instagram Pic](https://github.com/TheAlgorithms/Python/blob/master/web_programming/instagram_pic.py) - * [Instagram Video](https://github.com/TheAlgorithms/Python/blob/master/web_programming/instagram_video.py) - * [Nasa Data](https://github.com/TheAlgorithms/Python/blob/master/web_programming/nasa_data.py) - * [Random Anime Character](https://github.com/TheAlgorithms/Python/blob/master/web_programming/random_anime_character.py) - * [Recaptcha Verification](https://github.com/TheAlgorithms/Python/blob/master/web_programming/recaptcha_verification.py) - * [Reddit](https://github.com/TheAlgorithms/Python/blob/master/web_programming/reddit.py) - * [Search Books By Isbn](https://github.com/TheAlgorithms/Python/blob/master/web_programming/search_books_by_isbn.py) - * [Slack Message](https://github.com/TheAlgorithms/Python/blob/master/web_programming/slack_message.py) - * [Test Fetch Github Info](https://github.com/TheAlgorithms/Python/blob/master/web_programming/test_fetch_github_info.py) - * [World Covid19 Stats](https://github.com/TheAlgorithms/Python/blob/master/web_programming/world_covid19_stats.py) + * [Co2 Emission](web_programming/co2_emission.py) + * [Covid Stats Via Xpath](web_programming/covid_stats_via_xpath.py) + * [Crawl Google Results](web_programming/crawl_google_results.py) + * [Crawl Google Scholar Citation](web_programming/crawl_google_scholar_citation.py) + * [Currency Converter](web_programming/currency_converter.py) + * [Current Stock Price](web_programming/current_stock_price.py) + * [Current Weather](web_programming/current_weather.py) + * [Daily Horoscope](web_programming/daily_horoscope.py) + * [Download Images From Google Query](web_programming/download_images_from_google_query.py) + * [Emails From Url](web_programming/emails_from_url.py) + * [Fetch Anime And Play](web_programming/fetch_anime_and_play.py) + * [Fetch Bbc News](web_programming/fetch_bbc_news.py) + * [Fetch Github Info](web_programming/fetch_github_info.py) + * [Fetch Jobs](web_programming/fetch_jobs.py) + * [Fetch Well Rx Price](web_programming/fetch_well_rx_price.py) + * [Get Imdb Top 250 Movies Csv](web_programming/get_imdb_top_250_movies_csv.py) + * [Get Imdbtop](web_programming/get_imdbtop.py) + * [Get Top Hn Posts](web_programming/get_top_hn_posts.py) + * [Get User Tweets](web_programming/get_user_tweets.py) + * [Giphy](web_programming/giphy.py) + * [Instagram Crawler](web_programming/instagram_crawler.py) + * [Instagram Pic](web_programming/instagram_pic.py) + * [Instagram Video](web_programming/instagram_video.py) + * [Nasa Data](web_programming/nasa_data.py) + * [Random Anime Character](web_programming/random_anime_character.py) + * [Recaptcha Verification](web_programming/recaptcha_verification.py) + * [Reddit](web_programming/reddit.py) + * [Search Books By Isbn](web_programming/search_books_by_isbn.py) + * [Slack Message](web_programming/slack_message.py) + * [Test Fetch Github Info](web_programming/test_fetch_github_info.py) + * [World Covid19 Stats](web_programming/world_covid19_stats.py) diff --git a/scripts/build_directory_md.py b/scripts/build_directory_md.py index 7a4bc3a4b258..71577fe6d4ac 100755 --- a/scripts/build_directory_md.py +++ b/scripts/build_directory_md.py @@ -3,8 +3,6 @@ import os from typing import Iterator -URL_BASE = "https://github.com/TheAlgorithms/Python/blob/master" - def good_file_paths(top_dir: str = ".") -> Iterator[str]: for dir_path, dir_names, filenames in os.walk(top_dir): @@ -36,7 +34,7 @@ def print_directory_md(top_dir: str = ".") -> None: if filepath != old_path: old_path = print_path(old_path, filepath) indent = (filepath.count(os.sep) + 1) if filepath else 0 - url = "/".join((URL_BASE, filepath, filename)).replace(" ", "%20") + url = "/".join((filepath, filename)).replace(" ", "%20") filename = os.path.splitext(filename.replace("_", " ").title())[0] print(f"{md_prefix(indent)} [{filename}]({url})") From 42a80cdaf689b03b326164862318039bd43bbff1 Mon Sep 17 00:00:00 2001 From: Nivid Patel <66813410+nivid26@users.noreply.github.com> Date: Wed, 22 Jun 2022 00:04:18 -0400 Subject: [PATCH 0459/1543] Update basic_maths.py (#6017) --- maths/basic_maths.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/maths/basic_maths.py b/maths/basic_maths.py index 58e797772a28..26c52c54983e 100644 --- a/maths/basic_maths.py +++ b/maths/basic_maths.py @@ -57,6 +57,8 @@ def number_of_divisors(n: int) -> int: temp += 1 n = int(n / i) div *= temp + if n > 1: + div *= 2 return div From a80e5aadf30817251989378e8d908ca18f733a2f Mon Sep 17 00:00:00 2001 From: yulmam <70622601+yulmam@users.noreply.github.com> Date: Wed, 22 Jun 2022 13:23:52 +0900 Subject: [PATCH 0460/1543] add highest_response_ratio_next.py (#6183) * add highest_response_ratio_next.py * Update highest_response_ratio_next.py * Update highest_response_ratio_next.py --- scheduling/highest_response_ratio_next.py | 118 ++++++++++++++++++++++ 1 file changed, 118 insertions(+) create mode 100644 scheduling/highest_response_ratio_next.py diff --git a/scheduling/highest_response_ratio_next.py b/scheduling/highest_response_ratio_next.py new file mode 100644 index 000000000000..a5c62ddbe952 --- /dev/null +++ b/scheduling/highest_response_ratio_next.py @@ -0,0 +1,118 @@ +""" +Highest response ratio next (HRRN) scheduling is a non-preemptive discipline. +It was developed as modification of shortest job next or shortest job first (SJN or SJF) +to mitigate the problem of process starvation. +https://en.wikipedia.org/wiki/Highest_response_ratio_next +""" +from statistics import mean + +import numpy as np + + +def calculate_turn_around_time( + process_name: list, arrival_time: list, burst_time: list, no_of_process: int +) -> list: + """ + Calculate the turn around time of each processes + + Return: The turn around time time for each process. + >>> calculate_turn_around_time(["A", "B", "C"], [3, 5, 8], [2, 4, 6], 3) + [2, 4, 7] + >>> calculate_turn_around_time(["A", "B", "C"], [0, 2, 4], [3, 5, 7], 3) + [3, 6, 11] + """ + + current_time = 0 + # Number of processes finished + finished_process_count = 0 + # Displays the finished process. + # If it is 0, the performance is completed if it is 1, before the performance. + finished_process = [0] * no_of_process + # List to include calculation results + turn_around_time = [0] * no_of_process + + # Sort by arrival time. + burst_time = [burst_time[i] for i in np.argsort(arrival_time)] + process_name = [process_name[i] for i in np.argsort(arrival_time)] + arrival_time.sort() + + while no_of_process > finished_process_count: + + """ + If the current time is less than the arrival time of + the process that arrives first among the processes that have not been performed, + change the current time. + """ + i = 0 + while finished_process[i] == 1: + i += 1 + if current_time < arrival_time[i]: + current_time = arrival_time[i] + + response_ratio = 0 + # Index showing the location of the process being performed + loc = 0 + # Saves the current response ratio. + temp = 0 + for i in range(0, no_of_process): + if finished_process[i] == 0 and arrival_time[i] <= current_time: + temp = (burst_time[i] + (current_time - arrival_time[i])) / burst_time[ + i + ] + if response_ratio < temp: + response_ratio = temp + loc = i + + # Calculate the turn around time + turn_around_time[loc] = current_time + burst_time[loc] - arrival_time[loc] + current_time += burst_time[loc] + # Indicates that the process has been performed. + finished_process[loc] = 1 + # Increase finished_process_count by 1 + finished_process_count += 1 + + return turn_around_time + + +def calculate_waiting_time( + process_name: list, turn_around_time: list, burst_time: list, no_of_process: int +) -> list: + """ + Calculate the waiting time of each processes. + + Return: The waiting time for each process. + >>> calculate_waiting_time(["A", "B", "C"], [2, 4, 7], [2, 4, 6], 3) + [0, 0, 1] + >>> calculate_waiting_time(["A", "B", "C"], [3, 6, 11], [3, 5, 7], 3) + [0, 1, 4] + """ + + waiting_time = [0] * no_of_process + for i in range(0, no_of_process): + waiting_time[i] = turn_around_time[i] - burst_time[i] + return waiting_time + + +if __name__ == "__main__": + + no_of_process = 5 + process_name = ["A", "B", "C", "D", "E"] + arrival_time = [1, 2, 3, 4, 5] + burst_time = [1, 2, 3, 4, 5] + + turn_around_time = calculate_turn_around_time( + process_name, arrival_time, burst_time, no_of_process + ) + waiting_time = calculate_waiting_time( + process_name, turn_around_time, burst_time, no_of_process + ) + + print("Process name \tArrival time \tBurst time \tTurn around time \tWaiting time") + for i in range(0, no_of_process): + print( + f"{process_name[i]}\t\t{arrival_time[i]}\t\t{burst_time[i]}\t\t" + f"{turn_around_time[i]}\t\t\t{waiting_time[i]}" + ) + + print(f"average waiting time : {mean(waiting_time):.5f}") + print(f"average turn around time : {mean(turn_around_time):.5f}") From 04bc8f01dd81b8f4ca68e470d046fcb571b4d3d0 Mon Sep 17 00:00:00 2001 From: Margaret <62753112+meg-1@users.noreply.github.com> Date: Thu, 23 Jun 2022 19:47:29 +0300 Subject: [PATCH 0461/1543] Wave (#6061) * Added censor function * Added censor code * Added comments to the code * modified censor function * added decrypt function * added cypher and decypher functions, deleted censor and decrypt functions * Deleted decrypt.py * Deleted censor.py * edited the crypt and decrypt files * Update cypher_txt.py * Remove the endline in cypher.py * Removed the print at the end of decypher.py * added 4 new algorithms * added tests to the four files * added type hints for the function variables * Deleted decode message * Deleted code message * Welford average algorithm * added average welford algorithm * is_narcissistic added * added a descriptive name * added max_sectors algorithm * added find_unique * added wave algorithm * deleting average_welford [ in the wrong pr ] * deleting is_narcissistic [ is in the wrong pr ] * deleting max_sectors [ is in the wrong pr ] * deleting find_unique [ is in the wrong pr ] * deleting censor [ is in the wrong pr ] * deleting decrypt [ is in the wrong pr ] * fixed wave.py fixed indentation and followed the bots reccomendations * fixed wave.py again * fixing wave.py for the third time. * fixing wave.py * merging strings/wave.py merging the suggestion Co-authored-by: John Law Co-authored-by: John Law --- strings/wave.py | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) create mode 100644 strings/wave.py diff --git a/strings/wave.py b/strings/wave.py new file mode 100644 index 000000000000..69d534432420 --- /dev/null +++ b/strings/wave.py @@ -0,0 +1,20 @@ +def wave(txt: str) -> list: + """ + Returns a so called 'wave' of a given string + >>> wave('cat') + ['Cat', 'cAt', 'caT'] + >>> wave('one') + ['One', 'oNe', 'onE'] + >>> wave('book') + ['Book', 'bOok', 'boOk', 'booK'] + """ + + return [ + txt[:a] + txt[a].upper() + txt[a + 1 :] + for a in range(len(txt)) + if txt[a].isalpha() + ] + + +if __name__ == "__main__": + __import__("doctest").testmod() From 4a51244e0feee90c8a80d6516628c9acb69c40b3 Mon Sep 17 00:00:00 2001 From: Erik Parmann Date: Thu, 23 Jun 2022 19:00:55 +0200 Subject: [PATCH 0462/1543] Remove how-to example for support vector machine (#6201) --- machine_learning/support_vector_machines.py | 58 --------------------- 1 file changed, 58 deletions(-) delete mode 100644 machine_learning/support_vector_machines.py diff --git a/machine_learning/support_vector_machines.py b/machine_learning/support_vector_machines.py deleted file mode 100644 index c5e5085d8748..000000000000 --- a/machine_learning/support_vector_machines.py +++ /dev/null @@ -1,58 +0,0 @@ -from sklearn import svm -from sklearn.datasets import load_iris -from sklearn.model_selection import train_test_split - - -# different functions implementing different types of SVM's -def NuSVC(train_x, train_y): - svc_NuSVC = svm.NuSVC() - svc_NuSVC.fit(train_x, train_y) - return svc_NuSVC - - -def Linearsvc(train_x, train_y): - svc_linear = svm.LinearSVC(tol=10e-2) - svc_linear.fit(train_x, train_y) - return svc_linear - - -def SVC(train_x, train_y): - # svm.SVC(C=1.0, kernel='rbf', degree=3, gamma=0.0, coef0=0.0, shrinking=True, - # probability=False,tol=0.001, cache_size=200, class_weight=None, verbose=False, - # max_iter=1000, random_state=None) - # various parameters like "kernel","gamma","C" can effectively tuned for a given - # machine learning model. - SVC = svm.SVC(gamma="auto") - SVC.fit(train_x, train_y) - return SVC - - -def test(X_new): - """ - 3 test cases to be passed - an array containing the sepal length (cm), sepal width (cm), petal length (cm), - petal width (cm) based on which the target name will be predicted - >>> test([1,2,1,4]) - 'virginica' - >>> test([5, 2, 4, 1]) - 'versicolor' - >>> test([6,3,4,1]) - 'versicolor' - """ - iris = load_iris() - # splitting the dataset to test and train - train_x, test_x, train_y, test_y = train_test_split( - iris["data"], iris["target"], random_state=4 - ) - # any of the 3 types of SVM can be used - # current_model=SVC(train_x, train_y) - # current_model=NuSVC(train_x, train_y) - current_model = Linearsvc(train_x, train_y) - prediction = current_model.predict([X_new]) - return iris["target_names"][prediction][0] - - -if __name__ == "__main__": - import doctest - - doctest.testmod() From e5d1ff2ea8c3820343c46fd08b089207f75ca03d Mon Sep 17 00:00:00 2001 From: lovetodevelop <103916828+lovetodevelop@users.noreply.github.com> Date: Sun, 3 Jul 2022 09:28:53 -0700 Subject: [PATCH 0463/1543] Fix tiny spelling error (#6219) --- ciphers/enigma_machine2.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ciphers/enigma_machine2.py b/ciphers/enigma_machine2.py index 70f84752d55b..9f9dbe6f7cd0 100644 --- a/ciphers/enigma_machine2.py +++ b/ciphers/enigma_machine2.py @@ -8,7 +8,7 @@ Module includes: - enigma function - showcase of function usage -- 9 randnomly generated rotors +- 9 randomly generated rotors - reflector (aka static rotor) - original alphabet From 89fc7bf0b024e4c9508db80f575efd5b5616f932 Mon Sep 17 00:00:00 2001 From: Sedat Aybars Nazlica Date: Wed, 6 Jul 2022 16:19:13 +0900 Subject: [PATCH 0464/1543] Add hamming distance (#6194) * Add hamming distance * Fix doctest * Refactor * Raise ValueError when string lengths are different --- strings/hamming_distance.py | 39 +++++++++++++++++++++++++++++++++++++ 1 file changed, 39 insertions(+) create mode 100644 strings/hamming_distance.py diff --git a/strings/hamming_distance.py b/strings/hamming_distance.py new file mode 100644 index 000000000000..b8feaef06190 --- /dev/null +++ b/strings/hamming_distance.py @@ -0,0 +1,39 @@ +def hamming_distance(string1: str, string2: str) -> int: + """Calculate the Hamming distance between two equal length strings + In information theory, the Hamming distance between two strings of equal + length is the number of positions at which the corresponding symbols are + different. https://en.wikipedia.org/wiki/Hamming_distance + + Args: + string1 (str): Sequence 1 + string2 (str): Sequence 2 + + Returns: + int: Hamming distance + + >>> hamming_distance("python", "python") + 0 + >>> hamming_distance("karolin", "kathrin") + 3 + >>> hamming_distance("00000", "11111") + 5 + >>> hamming_distance("karolin", "kath") + ValueError: String lengths must match! + """ + if len(string1) != len(string2): + raise ValueError("String lengths must match!") + + count = 0 + + for char1, char2 in zip(string1, string2): + if char1 != char2: + count += 1 + + return count + + +if __name__ == "__main__": + + import doctest + + doctest.testmod() From 9135a1f41192ebe1d835282a1465dc284359d95c Mon Sep 17 00:00:00 2001 From: John Law Date: Wed, 6 Jul 2022 16:00:05 +0800 Subject: [PATCH 0465/1543] Fix doctests and builds in various files (#6233) * Fix doctest in hamming distance * add line break * try to fix quantum_riper_adder * fix floating point build --- arithmetic_analysis/in_static_equilibrium.py | 7 +++++-- quantum/ripple_adder_classic.py | 4 ++-- strings/hamming_distance.py | 2 ++ 3 files changed, 9 insertions(+), 4 deletions(-) diff --git a/arithmetic_analysis/in_static_equilibrium.py b/arithmetic_analysis/in_static_equilibrium.py index d762a376f577..7aaecf174a5e 100644 --- a/arithmetic_analysis/in_static_equilibrium.py +++ b/arithmetic_analysis/in_static_equilibrium.py @@ -19,8 +19,11 @@ def polar_force( True >>> math.isclose(force[1], 7.0710678118654755) True - >>> polar_force(10, 3.14, radian_mode=True) - [-9.999987317275396, 0.01592652916486828] + >>> force = polar_force(10, 3.14, radian_mode=True) + >>> math.isclose(force[0], -9.999987317275396) + True + >>> math.isclose(force[1], 0.01592652916486828) + True """ if radian_mode: return [magnitude * cos(angle), magnitude * sin(angle)] diff --git a/quantum/ripple_adder_classic.py b/quantum/ripple_adder_classic.py index 8539a62afd52..1d3724476068 100644 --- a/quantum/ripple_adder_classic.py +++ b/quantum/ripple_adder_classic.py @@ -3,7 +3,7 @@ # https://en.wikipedia.org/wiki/Controlled_NOT_gate from qiskit import Aer, QuantumCircuit, execute -from qiskit.providers import BaseBackend +from qiskit.providers import Backend def store_two_classics(val1: int, val2: int) -> tuple[QuantumCircuit, str, str]: @@ -62,7 +62,7 @@ def full_adder( def ripple_adder( val1: int, val2: int, - backend: BaseBackend = Aer.get_backend("qasm_simulator"), # noqa: B008 + backend: Backend = Aer.get_backend("qasm_simulator"), # noqa: B008 ) -> int: """ Quantum Equivalent of a Ripple Adder Circuit diff --git a/strings/hamming_distance.py b/strings/hamming_distance.py index b8feaef06190..5de27dc77f44 100644 --- a/strings/hamming_distance.py +++ b/strings/hamming_distance.py @@ -18,6 +18,8 @@ def hamming_distance(string1: str, string2: str) -> int: >>> hamming_distance("00000", "11111") 5 >>> hamming_distance("karolin", "kath") + Traceback (most recent call last): + ... ValueError: String lengths must match! """ if len(string1) != len(string2): From 0a0f4986e4fde05ebc2a24c9cc2cd6b8200b8df1 Mon Sep 17 00:00:00 2001 From: Christian Clauss Date: Thu, 7 Jul 2022 05:25:25 +0200 Subject: [PATCH 0466/1543] Upgrade GitHub Actions (#6236) * Upgrade GitHub Actions * updating DIRECTORY.md Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- .github/workflows/build.yml | 8 ++++---- .github/workflows/directory_writer.yml | 6 ++++-- .github/workflows/pre-commit.yml | 10 +++++----- .github/workflows/project_euler.yml | 12 ++++++++---- .pre-commit-config.yaml | 8 ++++---- DIRECTORY.md | 4 +++- 6 files changed, 28 insertions(+), 20 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 403ec44c888d..8481b962a256 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -9,11 +9,11 @@ jobs: build: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v2 - - uses: actions/setup-python@v2 + - uses: actions/checkout@v3 + - uses: actions/setup-python@v4 with: - python-version: "3.10" - - uses: actions/cache@v2 + python-version: 3.x + - uses: actions/cache@v3 with: path: ~/.cache/pip key: ${{ runner.os }}-pip-${{ hashFiles('requirements.txt') }} diff --git a/.github/workflows/directory_writer.yml b/.github/workflows/directory_writer.yml index be8154a32696..331962cef11e 100644 --- a/.github/workflows/directory_writer.yml +++ b/.github/workflows/directory_writer.yml @@ -6,8 +6,10 @@ jobs: build: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v1 # v1, NOT v2 - - uses: actions/setup-python@v2 + - uses: actions/checkout@v1 # v1, NOT v2 or v3 + - uses: actions/setup-python@v4 + with: + python-version: 3.x - name: Write DIRECTORY.md run: | scripts/build_directory_md.py 2>&1 | tee DIRECTORY.md diff --git a/.github/workflows/pre-commit.yml b/.github/workflows/pre-commit.yml index de73c96adfb1..3b128bc540bf 100644 --- a/.github/workflows/pre-commit.yml +++ b/.github/workflows/pre-commit.yml @@ -6,17 +6,17 @@ jobs: pre-commit: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v2 - - uses: actions/cache@v2 + - uses: actions/checkout@v3 + - uses: actions/cache@v3 with: path: | ~/.cache/pre-commit ~/.cache/pip key: ${{ runner.os }}-pre-commit-${{ hashFiles('.pre-commit-config.yaml') }} - - uses: actions/setup-python@v2 + - uses: actions/setup-python@v4 with: - python-version: "3.10" - - uses: psf/black@21.4b0 + python-version: 3.x + # - uses: psf/black@22.6.0 - name: Install pre-commit run: | python -m pip install --upgrade pip diff --git a/.github/workflows/project_euler.yml b/.github/workflows/project_euler.yml index 995295fcaa9a..460938219c14 100644 --- a/.github/workflows/project_euler.yml +++ b/.github/workflows/project_euler.yml @@ -14,8 +14,10 @@ jobs: project-euler: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v2 - - uses: actions/setup-python@v2 + - uses: actions/checkout@v3 + - uses: actions/setup-python@v4 + with: + python-version: 3.x - name: Install pytest and pytest-cov run: | python -m pip install --upgrade pip @@ -24,8 +26,10 @@ jobs: validate-solutions: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v2 - - uses: actions/setup-python@v2 + - uses: actions/checkout@v3 + - uses: actions/setup-python@v4 + with: + python-version: 3.x - name: Install pytest and requests run: | python -m pip install --upgrade pip diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 33da02fb72ad..90feb50ff2af 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,6 +1,6 @@ repos: - repo: https://github.com/pre-commit/pre-commit-hooks - rev: v4.1.0 + rev: v4.3.0 hooks: - id: check-executables-have-shebangs - id: check-yaml @@ -14,7 +14,7 @@ repos: - id: requirements-txt-fixer - repo: https://github.com/psf/black - rev: 22.3.0 + rev: 22.6.0 hooks: - id: black @@ -26,7 +26,7 @@ repos: - --profile=black - repo: https://github.com/asottile/pyupgrade - rev: v2.31.0 + rev: v2.34.0 hooks: - id: pyupgrade args: @@ -42,7 +42,7 @@ repos: - --max-line-length=88 - repo: https://github.com/pre-commit/mirrors-mypy - rev: v0.931 + rev: v0.961 hooks: - id: mypy args: diff --git a/DIRECTORY.md b/DIRECTORY.md index d30e275d067f..2e9c03cbcd9b 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -444,7 +444,6 @@ * [Scoring Functions](machine_learning/scoring_functions.py) * [Sequential Minimum Optimization](machine_learning/sequential_minimum_optimization.py) * [Similarity Search](machine_learning/similarity_search.py) - * [Support Vector Machines](machine_learning/support_vector_machines.py) * [Word Frequency Functions](machine_learning/word_frequency_functions.py) ## Maths @@ -910,6 +909,7 @@ ## Scheduling * [First Come First Served](scheduling/first_come_first_served.py) + * [Highest Response Ratio Next](scheduling/highest_response_ratio_next.py) * [Multi Level Feedback Queue](scheduling/multi_level_feedback_queue.py) * [Non Preemptive Shortest Job First](scheduling/non_preemptive_shortest_job_first.py) * [Round Robin](scheduling/round_robin.py) @@ -995,6 +995,7 @@ * [Credit Card Validator](strings/credit_card_validator.py) * [Detecting English Programmatically](strings/detecting_english_programmatically.py) * [Frequency Finder](strings/frequency_finder.py) + * [Hamming Distance](strings/hamming_distance.py) * [Indian Phone Validator](strings/indian_phone_validator.py) * [Is Contains Unique Chars](strings/is_contains_unique_chars.py) * [Is Palindrome](strings/is_palindrome.py) @@ -1016,6 +1017,7 @@ * [Reverse Words](strings/reverse_words.py) * [Split](strings/split.py) * [Upper](strings/upper.py) + * [Wave](strings/wave.py) * [Wildcard Pattern Matching](strings/wildcard_pattern_matching.py) * [Word Occurrence](strings/word_occurrence.py) * [Word Patterns](strings/word_patterns.py) From 2d5dd6f132a25165473471bf83765ec50c9f14d6 Mon Sep 17 00:00:00 2001 From: Vardhaman <83634399+cyai@users.noreply.github.com> Date: Thu, 7 Jul 2022 20:04:07 +0530 Subject: [PATCH 0467/1543] MAINT: Updated f-string method (#6230) * MAINT: Used f-string method Updated the code with f-string methods wherever required for a better and cleaner understanding of the code. * Updated files with f-string method * Update rsa_key_generator.py * Update rsa_key_generator.py * Update elgamal_key_generator.py * Update lru_cache.py I don't think this change is efficient but it might tackle the error as the error was due to using long character lines. * Update lru_cache.py * Update lru_cache.py Co-authored-by: cyai Co-authored-by: Christian Clauss --- ciphers/elgamal_key_generator.py | 12 +++++------- ciphers/rsa_cipher.py | 4 ++-- ciphers/rsa_key_generator.py | 12 +++++------- ciphers/transposition_cipher.py | 4 ++-- ciphers/transposition_cipher_encrypt_decrypt_file.py | 4 ++-- ciphers/vigenere_cipher.py | 2 +- data_structures/binary_tree/binary_search_tree.py | 2 +- hashes/chaos_machine.py | 2 +- machine_learning/gradient_boosting_regressor.py | 4 ++-- machine_learning/k_means_clust.py | 4 +--- machine_learning/linear_regression.py | 2 +- matrix/sherman_morrison.py | 2 +- neural_network/convolution_neural_network.py | 4 ++-- other/lru_cache.py | 5 +++-- other/scoring_algorithm.py | 2 +- scheduling/shortest_job_first.py | 2 +- searches/binary_tree_traversal.py | 4 ++-- strings/min_cost_string_conversion.py | 12 ++++++------ 18 files changed, 39 insertions(+), 44 deletions(-) diff --git a/ciphers/elgamal_key_generator.py b/ciphers/elgamal_key_generator.py index f557b0e0dc91..485b77595c7c 100644 --- a/ciphers/elgamal_key_generator.py +++ b/ciphers/elgamal_key_generator.py @@ -38,9 +38,7 @@ def generate_key(key_size: int) -> tuple[tuple[int, int, int, int], tuple[int, i def make_key_files(name: str, keySize: int) -> None: - if os.path.exists("%s_pubkey.txt" % name) or os.path.exists( - "%s_privkey.txt" % name - ): + if os.path.exists(f"{name}_pubkey.txt") or os.path.exists(f"{name}_privkey.txt"): print("\nWARNING:") print( '"%s_pubkey.txt" or "%s_privkey.txt" already exists. \n' @@ -50,14 +48,14 @@ def make_key_files(name: str, keySize: int) -> None: sys.exit() publicKey, privateKey = generate_key(keySize) - print("\nWriting public key to file %s_pubkey.txt..." % name) - with open("%s_pubkey.txt" % name, "w") as fo: + print(f"\nWriting public key to file {name}_pubkey.txt...") + with open(f"{name}_pubkey.txt", "w") as fo: fo.write( "%d,%d,%d,%d" % (publicKey[0], publicKey[1], publicKey[2], publicKey[3]) ) - print("Writing private key to file %s_privkey.txt..." % name) - with open("%s_privkey.txt" % name, "w") as fo: + print(f"Writing private key to file {name}_privkey.txt...") + with open(f"{name}_privkey.txt", "w") as fo: fo.write("%d,%d" % (privateKey[0], privateKey[1])) diff --git a/ciphers/rsa_cipher.py b/ciphers/rsa_cipher.py index 5bb9f9916de5..c6bfaa0fb00c 100644 --- a/ciphers/rsa_cipher.py +++ b/ciphers/rsa_cipher.py @@ -129,7 +129,7 @@ def main() -> None: message = input("\nEnter message: ") pubkey_filename = "rsa_pubkey.txt" - print("Encrypting and writing to %s..." % (filename)) + print(f"Encrypting and writing to {filename}...") encryptedText = encrypt_and_write_to_file(filename, pubkey_filename, message) print("\nEncrypted text:") @@ -137,7 +137,7 @@ def main() -> None: elif mode == "decrypt": privkey_filename = "rsa_privkey.txt" - print("Reading from %s and decrypting..." % (filename)) + print(f"Reading from {filename} and decrypting...") decrypted_text = read_from_file_and_decrypt(filename, privkey_filename) print("writing decryption to rsa_decryption.txt...") with open("rsa_decryption.txt", "w") as dec: diff --git a/ciphers/rsa_key_generator.py b/ciphers/rsa_key_generator.py index 584066d8970f..d983c14f1d7e 100644 --- a/ciphers/rsa_key_generator.py +++ b/ciphers/rsa_key_generator.py @@ -34,9 +34,7 @@ def generateKey(keySize: int) -> tuple[tuple[int, int], tuple[int, int]]: def makeKeyFiles(name: str, keySize: int) -> None: - if os.path.exists("%s_pubkey.txt" % (name)) or os.path.exists( - "%s_privkey.txt" % (name) - ): + if os.path.exists(f"{name}_pubkey.txt") or os.path.exists(f"{name}_privkey.txt"): print("\nWARNING:") print( '"%s_pubkey.txt" or "%s_privkey.txt" already exists. \n' @@ -46,12 +44,12 @@ def makeKeyFiles(name: str, keySize: int) -> None: sys.exit() publicKey, privateKey = generateKey(keySize) - print("\nWriting public key to file %s_pubkey.txt..." % name) - with open("%s_pubkey.txt" % name, "w") as out_file: + print(f"\nWriting public key to file {name}_pubkey.txt...") + with open(f"{name}_pubkey.txt", "w") as out_file: out_file.write(f"{keySize},{publicKey[0]},{publicKey[1]}") - print("Writing private key to file %s_privkey.txt..." % name) - with open("%s_privkey.txt" % name, "w") as out_file: + print(f"Writing private key to file {name}_privkey.txt...") + with open(f"{name}_privkey.txt", "w") as out_file: out_file.write(f"{keySize},{privateKey[0]},{privateKey[1]}") diff --git a/ciphers/transposition_cipher.py b/ciphers/transposition_cipher.py index 589bb8cb5cd5..ed9923a6ba46 100644 --- a/ciphers/transposition_cipher.py +++ b/ciphers/transposition_cipher.py @@ -10,7 +10,7 @@ def main() -> None: message = input("Enter message: ") - key = int(input("Enter key [2-%s]: " % (len(message) - 1))) + key = int(input(f"Enter key [2-{len(message) - 1}]: ")) mode = input("Encryption/Decryption [e/d]: ") if mode.lower().startswith("e"): @@ -19,7 +19,7 @@ def main() -> None: text = decryptMessage(key, message) # Append pipe symbol (vertical bar) to identify spaces at the end. - print("Output:\n%s" % (text + "|")) + print(f"Output:\n{text + '|'}") def encryptMessage(key: int, message: str) -> str: diff --git a/ciphers/transposition_cipher_encrypt_decrypt_file.py b/ciphers/transposition_cipher_encrypt_decrypt_file.py index b91c73c9f2ad..926a1b36ac44 100644 --- a/ciphers/transposition_cipher_encrypt_decrypt_file.py +++ b/ciphers/transposition_cipher_encrypt_decrypt_file.py @@ -12,10 +12,10 @@ def main() -> None: mode = input("Encrypt/Decrypt [e/d]: ") if not os.path.exists(inputFile): - print("File %s does not exist. Quitting..." % inputFile) + print(f"File {inputFile} does not exist. Quitting...") sys.exit() if os.path.exists(outputFile): - print("Overwrite %s? [y/n]" % outputFile) + print(f"Overwrite {outputFile}? [y/n]") response = input("> ") if not response.lower().startswith("y"): sys.exit() diff --git a/ciphers/vigenere_cipher.py b/ciphers/vigenere_cipher.py index d97a96949fb8..2e3987708d01 100644 --- a/ciphers/vigenere_cipher.py +++ b/ciphers/vigenere_cipher.py @@ -13,7 +13,7 @@ def main() -> None: mode = "decrypt" translated = decryptMessage(key, message) - print("\n%sed message:" % mode.title()) + print(f"\n{mode.title()}ed message:") print(translated) diff --git a/data_structures/binary_tree/binary_search_tree.py b/data_structures/binary_tree/binary_search_tree.py index ce490fd98524..b9af23dc8b00 100644 --- a/data_structures/binary_tree/binary_search_tree.py +++ b/data_structures/binary_tree/binary_search_tree.py @@ -15,7 +15,7 @@ def __repr__(self): if self.left is None and self.right is None: return str(self.value) - return pformat({"%s" % (self.value): (self.left, self.right)}, indent=1) + return pformat({f"{self.value}": (self.left, self.right)}, indent=1) class BinarySearchTree: diff --git a/hashes/chaos_machine.py b/hashes/chaos_machine.py index 7ad3e5540479..a6d476eb7320 100644 --- a/hashes/chaos_machine.py +++ b/hashes/chaos_machine.py @@ -96,7 +96,7 @@ def reset(): # Pulling Data (Output) while inp in ("e", "E"): - print("%s" % format(pull(), "#04x")) + print(f"{format(pull(), '#04x')}") print(buffer_space) print(params_space) inp = input("(e)exit? ").strip() diff --git a/machine_learning/gradient_boosting_regressor.py b/machine_learning/gradient_boosting_regressor.py index 0aa0e7a10ac5..c73e30680a67 100644 --- a/machine_learning/gradient_boosting_regressor.py +++ b/machine_learning/gradient_boosting_regressor.py @@ -47,9 +47,9 @@ def main(): y_pred = model.predict(X_test) # The mean squared error - print("Mean squared error: %.2f" % mean_squared_error(y_test, y_pred)) + print(f"Mean squared error: {mean_squared_error(y_test, y_pred):.2f}") # Explained variance score: 1 is perfect prediction - print("Test Variance score: %.2f" % r2_score(y_test, y_pred)) + print(f"Test Variance score: {r2_score(y_test, y_pred):.2f}") # So let's run the model against the test data fig, ax = plt.subplots() diff --git a/machine_learning/k_means_clust.py b/machine_learning/k_means_clust.py index 10c9374d8492..60450b7f8493 100644 --- a/machine_learning/k_means_clust.py +++ b/machine_learning/k_means_clust.py @@ -164,9 +164,7 @@ def kmeans( num_changed = np.sum(prev_cluster_assignment != cluster_assignment) if verbose: print( - " {:5d} elements changed their cluster assignment.".format( - num_changed - ) + f" {num_changed:5d} elements changed their cluster assignment." ) # Record heterogeneity convergence metric diff --git a/machine_learning/linear_regression.py b/machine_learning/linear_regression.py index b0bbc7b904c3..85fdfb0005ac 100644 --- a/machine_learning/linear_regression.py +++ b/machine_learning/linear_regression.py @@ -99,7 +99,7 @@ def main(): len_result = theta.shape[1] print("Resultant Feature vector : ") for i in range(0, len_result): - print("%.5f" % (theta[0, i])) + print(f"{theta[0, i]:.5f}") if __name__ == "__main__": diff --git a/matrix/sherman_morrison.py b/matrix/sherman_morrison.py index 3466b3d4a01f..63783c8b40fc 100644 --- a/matrix/sherman_morrison.py +++ b/matrix/sherman_morrison.py @@ -256,7 +256,7 @@ def test1(): v[0, 0], v[1, 0], v[2, 0] = 4, -2, 5 print(f"u is {u}") print(f"v is {v}") - print("uv^T is %s" % (u * v.transpose())) + print(f"uv^T is {u * v.transpose()}") # Sherman Morrison print(f"(a + uv^T)^(-1) is {ainv.ShermanMorrison(u, v)}") diff --git a/neural_network/convolution_neural_network.py b/neural_network/convolution_neural_network.py index d821488025ef..e3993efb4249 100644 --- a/neural_network/convolution_neural_network.py +++ b/neural_network/convolution_neural_network.py @@ -71,7 +71,7 @@ def save_model(self, save_path): with open(save_path, "wb") as f: pickle.dump(model_dic, f) - print("Model saved: %s" % save_path) + print(f"Model saved: {save_path}") @classmethod def ReadModel(cls, model_path): @@ -303,7 +303,7 @@ def draw_error(): plt.show() print("------------------Training Complished---------------------") - print((" - - Training epoch: ", rp, " - - Mse: %.6f" % mse)) + print((" - - Training epoch: ", rp, f" - - Mse: {mse:.6f}")) if draw_e: draw_error() return mse diff --git a/other/lru_cache.py b/other/lru_cache.py index 98051f89db4f..834ea52a95e1 100644 --- a/other/lru_cache.py +++ b/other/lru_cache.py @@ -21,8 +21,9 @@ def __init__(self, key: T | None, val: U | None): self.prev: DoubleLinkedListNode[T, U] | None = None def __repr__(self) -> str: - return "Node: key: {}, val: {}, has next: {}, has prev: {}".format( - self.key, self.val, self.next is not None, self.prev is not None + return ( + f"Node: key: {self.key}, val: {self.val}, " + f"has next: {bool(self.next)}, has prev: {bool(self.prev)}" ) diff --git a/other/scoring_algorithm.py b/other/scoring_algorithm.py index cc1744012671..aecd19c55927 100644 --- a/other/scoring_algorithm.py +++ b/other/scoring_algorithm.py @@ -69,7 +69,7 @@ def procentual_proximity( # weight not 0 or 1 else: - raise ValueError("Invalid weight of %f provided" % (weight)) + raise ValueError(f"Invalid weight of {weight:f} provided") score_lists.append(score) diff --git a/scheduling/shortest_job_first.py b/scheduling/shortest_job_first.py index 9372e9dbc3f4..b3f81bfd10e7 100644 --- a/scheduling/shortest_job_first.py +++ b/scheduling/shortest_job_first.py @@ -111,7 +111,7 @@ def calculate_average_times( for i in range(no_of_processes): total_waiting_time = total_waiting_time + waiting_time[i] total_turn_around_time = total_turn_around_time + turn_around_time[i] - print("Average waiting time = %.5f" % (total_waiting_time / no_of_processes)) + print(f"Average waiting time = {total_waiting_time / no_of_processes:.5f}") print("Average turn around time =", total_turn_around_time / no_of_processes) diff --git a/searches/binary_tree_traversal.py b/searches/binary_tree_traversal.py index f919a2962354..033db83d789e 100644 --- a/searches/binary_tree_traversal.py +++ b/searches/binary_tree_traversal.py @@ -25,14 +25,14 @@ def build_tree(): q.put(tree_node) while not q.empty(): node_found = q.get() - msg = "Enter the left node of %s: " % node_found.data + msg = f"Enter the left node of {node_found.data}: " check = input(msg).strip().lower() or "n" if check == "n": return tree_node left_node = TreeNode(int(check)) node_found.left = left_node q.put(left_node) - msg = "Enter the right node of %s: " % node_found.data + msg = f"Enter the right node of {node_found.data}: " check = input(msg).strip().lower() or "n" if check == "n": return tree_node diff --git a/strings/min_cost_string_conversion.py b/strings/min_cost_string_conversion.py index 147bc6fc740a..089c2532f900 100644 --- a/strings/min_cost_string_conversion.py +++ b/strings/min_cost_string_conversion.py @@ -31,28 +31,28 @@ def compute_transform_tables( for i in range(1, len_source_seq + 1): costs[i][0] = i * delete_cost - ops[i][0] = "D%c" % source_seq[i - 1] + ops[i][0] = f"D{source_seq[i - 1]:c}" for i in range(1, len_destination_seq + 1): costs[0][i] = i * insert_cost - ops[0][i] = "I%c" % destination_seq[i - 1] + ops[0][i] = f"I{destination_seq[i - 1]:c}" for i in range(1, len_source_seq + 1): for j in range(1, len_destination_seq + 1): if source_seq[i - 1] == destination_seq[j - 1]: costs[i][j] = costs[i - 1][j - 1] + copy_cost - ops[i][j] = "C%c" % source_seq[i - 1] + ops[i][j] = f"C{source_seq[i - 1]:c}" else: costs[i][j] = costs[i - 1][j - 1] + replace_cost - ops[i][j] = "R%c" % source_seq[i - 1] + str(destination_seq[j - 1]) + ops[i][j] = f"R{source_seq[i - 1]:c}" + str(destination_seq[j - 1]) if costs[i - 1][j] + delete_cost < costs[i][j]: costs[i][j] = costs[i - 1][j] + delete_cost - ops[i][j] = "D%c" % source_seq[i - 1] + ops[i][j] = f"D{source_seq[i - 1]:c}" if costs[i][j - 1] + insert_cost < costs[i][j]: costs[i][j] = costs[i][j - 1] + insert_cost - ops[i][j] = "I%c" % destination_seq[j - 1] + ops[i][j] = f"I{destination_seq[j - 1]:c}" return costs, ops From b75a7c77f89e55e1f2510b2eca9b4fd1a5d21ed8 Mon Sep 17 00:00:00 2001 From: Christian Clauss Date: Mon, 11 Jul 2022 10:19:52 +0200 Subject: [PATCH 0468/1543] pre-commit autoupdate: pyupgrade v2.34.0 -> v2.37.0 (#6245) * pre-commit autoupdate: pyupgrade v2.34.0 -> v2.37.0 * pre-commit run --all-files --- .pre-commit-config.yaml | 2 +- arithmetic_analysis/bisection.py | 2 +- arithmetic_analysis/intersection.py | 2 +- arithmetic_analysis/newton_method.py | 2 +- boolean_algebra/quine_mc_cluskey.py | 2 +- ciphers/playfair_cipher.py | 2 +- computer_vision/horn_schunck.py | 3 ++- data_structures/binary_tree/binary_search_tree_recursive.py | 2 +- data_structures/binary_tree/binary_tree_traversals.py | 3 ++- data_structures/binary_tree/non_recursive_segment_tree.py | 3 ++- data_structures/binary_tree/red_black_tree.py | 2 +- data_structures/heap/heap.py | 2 +- data_structures/heap/randomized_heap.py | 3 ++- data_structures/heap/skew_heap.py | 3 ++- data_structures/linked_list/circular_linked_list.py | 3 ++- data_structures/queue/double_ended_queue.py | 3 ++- data_structures/queue/linked_queue.py | 3 ++- divide_and_conquer/convex_hull.py | 2 +- fractals/julia_sets.py | 3 ++- graphs/prim.py | 2 +- linear_algebra/src/lib.py | 3 ++- machine_learning/linear_discriminant_analysis.py | 3 ++- maths/area_under_curve.py | 2 +- maths/euclidean_distance.py | 3 ++- maths/euler_method.py | 2 +- maths/euler_modified.py | 2 +- maths/line_length.py | 2 +- maths/monte_carlo.py | 2 +- maths/numerical_integration.py | 2 +- maths/polynomial_evaluation.py | 2 +- maths/prime_numbers.py | 2 +- other/davisb_putnamb_logemannb_loveland.py | 2 +- other/lfu_cache.py | 3 ++- other/lru_cache.py | 3 ++- project_euler/problem_010/sol2.py | 2 +- project_euler/problem_025/sol2.py | 2 +- project_euler/problem_101/sol1.py | 3 ++- project_euler/problem_107/sol1.py | 2 +- project_euler/problem_123/sol1.py | 2 +- scripts/build_directory_md.py | 2 +- web_programming/fetch_jobs.py | 2 +- 41 files changed, 56 insertions(+), 41 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 90feb50ff2af..7ff7459978e6 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -26,7 +26,7 @@ repos: - --profile=black - repo: https://github.com/asottile/pyupgrade - rev: v2.34.0 + rev: v2.37.0 hooks: - id: pyupgrade args: diff --git a/arithmetic_analysis/bisection.py b/arithmetic_analysis/bisection.py index 1feb4a8cf626..640913a7acc0 100644 --- a/arithmetic_analysis/bisection.py +++ b/arithmetic_analysis/bisection.py @@ -1,4 +1,4 @@ -from typing import Callable +from collections.abc import Callable def bisection(function: Callable[[float], float], a: float, b: float) -> float: diff --git a/arithmetic_analysis/intersection.py b/arithmetic_analysis/intersection.py index 9d4651144668..49213dd05988 100644 --- a/arithmetic_analysis/intersection.py +++ b/arithmetic_analysis/intersection.py @@ -1,5 +1,5 @@ import math -from typing import Callable +from collections.abc import Callable def intersection(function: Callable[[float], float], x0: float, x1: float) -> float: diff --git a/arithmetic_analysis/newton_method.py b/arithmetic_analysis/newton_method.py index f0cf4eaa6e83..c4018a0f260c 100644 --- a/arithmetic_analysis/newton_method.py +++ b/arithmetic_analysis/newton_method.py @@ -1,7 +1,7 @@ """Newton's Method.""" # Newton's Method - https://en.wikipedia.org/wiki/Newton%27s_method -from typing import Callable +from collections.abc import Callable RealFunc = Callable[[float], float] # type alias for a real -> real function diff --git a/boolean_algebra/quine_mc_cluskey.py b/boolean_algebra/quine_mc_cluskey.py index fb23c8c2e79c..9aa9b10c8429 100644 --- a/boolean_algebra/quine_mc_cluskey.py +++ b/boolean_algebra/quine_mc_cluskey.py @@ -1,6 +1,6 @@ from __future__ import annotations -from typing import Sequence +from collections.abc import Sequence def compare_string(string1: str, string2: str) -> str: diff --git a/ciphers/playfair_cipher.py b/ciphers/playfair_cipher.py index 7c0ee5bd5ae1..89aedb7afdb8 100644 --- a/ciphers/playfair_cipher.py +++ b/ciphers/playfair_cipher.py @@ -1,6 +1,6 @@ import itertools import string -from typing import Generator, Iterable +from collections.abc import Generator, Iterable def chunker(seq: Iterable[str], size: int) -> Generator[tuple[str, ...], None, None]: diff --git a/computer_vision/horn_schunck.py b/computer_vision/horn_schunck.py index 1428487d051b..2a153d06ddae 100644 --- a/computer_vision/horn_schunck.py +++ b/computer_vision/horn_schunck.py @@ -9,9 +9,10 @@ Paper: http://image.diku.dk/imagecanon/material/HornSchunckOptical_Flow.pdf """ +from typing import SupportsIndex + import numpy as np from scipy.ndimage.filters import convolve -from typing_extensions import SupportsIndex def warp( diff --git a/data_structures/binary_tree/binary_search_tree_recursive.py b/data_structures/binary_tree/binary_search_tree_recursive.py index 4bdf4e33dcc3..0d0ac8fd1e22 100644 --- a/data_structures/binary_tree/binary_search_tree_recursive.py +++ b/data_structures/binary_tree/binary_search_tree_recursive.py @@ -10,7 +10,7 @@ from __future__ import annotations import unittest -from typing import Iterator +from collections.abc import Iterator class Node: diff --git a/data_structures/binary_tree/binary_tree_traversals.py b/data_structures/binary_tree/binary_tree_traversals.py index 9a62393914da..378598bb096d 100644 --- a/data_structures/binary_tree/binary_tree_traversals.py +++ b/data_structures/binary_tree/binary_tree_traversals.py @@ -2,8 +2,9 @@ from __future__ import annotations from collections import deque +from collections.abc import Sequence from dataclasses import dataclass -from typing import Any, Sequence +from typing import Any @dataclass diff --git a/data_structures/binary_tree/non_recursive_segment_tree.py b/data_structures/binary_tree/non_recursive_segment_tree.py index b04a6e5cacb7..c29adefffd20 100644 --- a/data_structures/binary_tree/non_recursive_segment_tree.py +++ b/data_structures/binary_tree/non_recursive_segment_tree.py @@ -37,7 +37,8 @@ """ from __future__ import annotations -from typing import Any, Callable, Generic, TypeVar +from collections.abc import Callable +from typing import Any, Generic, TypeVar T = TypeVar("T") diff --git a/data_structures/binary_tree/red_black_tree.py b/data_structures/binary_tree/red_black_tree.py index 35517f307fe1..a9dbd699c3c1 100644 --- a/data_structures/binary_tree/red_black_tree.py +++ b/data_structures/binary_tree/red_black_tree.py @@ -4,7 +4,7 @@ """ from __future__ import annotations -from typing import Iterator +from collections.abc import Iterator class RedBlackTree: diff --git a/data_structures/heap/heap.py b/data_structures/heap/heap.py index 550439edd239..4c19747ec823 100644 --- a/data_structures/heap/heap.py +++ b/data_structures/heap/heap.py @@ -1,6 +1,6 @@ from __future__ import annotations -from typing import Iterable +from collections.abc import Iterable class Heap: diff --git a/data_structures/heap/randomized_heap.py b/data_structures/heap/randomized_heap.py index bab4ec1b34c6..c0f9888f80c7 100644 --- a/data_structures/heap/randomized_heap.py +++ b/data_structures/heap/randomized_heap.py @@ -3,7 +3,8 @@ from __future__ import annotations import random -from typing import Any, Generic, Iterable, TypeVar +from collections.abc import Iterable +from typing import Any, Generic, TypeVar T = TypeVar("T", bound=bool) diff --git a/data_structures/heap/skew_heap.py b/data_structures/heap/skew_heap.py index 16ddc5545e36..490db061deac 100644 --- a/data_structures/heap/skew_heap.py +++ b/data_structures/heap/skew_heap.py @@ -2,7 +2,8 @@ from __future__ import annotations -from typing import Any, Generic, Iterable, Iterator, TypeVar +from collections.abc import Iterable, Iterator +from typing import Any, Generic, TypeVar T = TypeVar("T", bound=bool) diff --git a/data_structures/linked_list/circular_linked_list.py b/data_structures/linked_list/circular_linked_list.py index 121d934c6957..6fec0a12542f 100644 --- a/data_structures/linked_list/circular_linked_list.py +++ b/data_structures/linked_list/circular_linked_list.py @@ -1,6 +1,7 @@ from __future__ import annotations -from typing import Any, Iterator +from collections.abc import Iterator +from typing import Any class Node: diff --git a/data_structures/queue/double_ended_queue.py b/data_structures/queue/double_ended_queue.py index a4658d99759c..1603e50bc7f2 100644 --- a/data_structures/queue/double_ended_queue.py +++ b/data_structures/queue/double_ended_queue.py @@ -3,8 +3,9 @@ """ from __future__ import annotations +from collections.abc import Iterable from dataclasses import dataclass -from typing import Any, Iterable +from typing import Any class Deque: diff --git a/data_structures/queue/linked_queue.py b/data_structures/queue/linked_queue.py index 21970e7df965..c6e9f53908dd 100644 --- a/data_structures/queue/linked_queue.py +++ b/data_structures/queue/linked_queue.py @@ -1,7 +1,8 @@ """ A Queue using a linked list like structure """ from __future__ import annotations -from typing import Any, Iterator +from collections.abc import Iterator +from typing import Any class Node: diff --git a/divide_and_conquer/convex_hull.py b/divide_and_conquer/convex_hull.py index 63f8dbb20cc0..72da116398a9 100644 --- a/divide_and_conquer/convex_hull.py +++ b/divide_and_conquer/convex_hull.py @@ -14,7 +14,7 @@ """ from __future__ import annotations -from typing import Iterable +from collections.abc import Iterable class Point: diff --git a/fractals/julia_sets.py b/fractals/julia_sets.py index 0168a0153de1..f273943851fc 100644 --- a/fractals/julia_sets.py +++ b/fractals/julia_sets.py @@ -22,7 +22,8 @@ """ import warnings -from typing import Any, Callable +from collections.abc import Callable +from typing import Any import numpy from matplotlib import pyplot diff --git a/graphs/prim.py b/graphs/prim.py index 70329da7e8e2..55d0fbfa8e96 100644 --- a/graphs/prim.py +++ b/graphs/prim.py @@ -7,7 +7,7 @@ import heapq as hq import math -from typing import Iterator +from collections.abc import Iterator class Vertex: diff --git a/linear_algebra/src/lib.py b/linear_algebra/src/lib.py index 2bfcea7f8c84..b9791c860a74 100644 --- a/linear_algebra/src/lib.py +++ b/linear_algebra/src/lib.py @@ -22,7 +22,8 @@ import math import random -from typing import Collection, overload +from collections.abc import Collection +from typing import overload class Vector: diff --git a/machine_learning/linear_discriminant_analysis.py b/machine_learning/linear_discriminant_analysis.py index 18553a77ad1c..9ef42ed19bab 100644 --- a/machine_learning/linear_discriminant_analysis.py +++ b/machine_learning/linear_discriminant_analysis.py @@ -42,10 +42,11 @@ Author: @EverLookNeverSee """ +from collections.abc import Callable from math import log from os import name, system from random import gauss, seed -from typing import Callable, TypeVar +from typing import TypeVar # Make a training dataset drawn from a gaussian distribution diff --git a/maths/area_under_curve.py b/maths/area_under_curve.py index 6fb3a7c98396..d345398b4c2c 100644 --- a/maths/area_under_curve.py +++ b/maths/area_under_curve.py @@ -3,7 +3,7 @@ """ from __future__ import annotations -from typing import Callable +from collections.abc import Callable def trapezoidal_area( diff --git a/maths/euclidean_distance.py b/maths/euclidean_distance.py index a2078161374b..22012e92c9cf 100644 --- a/maths/euclidean_distance.py +++ b/maths/euclidean_distance.py @@ -1,6 +1,7 @@ from __future__ import annotations -from typing import Iterable, Union +from collections.abc import Iterable +from typing import Union import numpy as np diff --git a/maths/euler_method.py b/maths/euler_method.py index 155ef28d1f49..af7eecb2ff29 100644 --- a/maths/euler_method.py +++ b/maths/euler_method.py @@ -1,4 +1,4 @@ -from typing import Callable +from collections.abc import Callable import numpy as np diff --git a/maths/euler_modified.py b/maths/euler_modified.py index 7c76a0ee0b86..5659fa063fc4 100644 --- a/maths/euler_modified.py +++ b/maths/euler_modified.py @@ -1,4 +1,4 @@ -from typing import Callable +from collections.abc import Callable import numpy as np diff --git a/maths/line_length.py b/maths/line_length.py index c4d986279cda..ad12a816b93e 100644 --- a/maths/line_length.py +++ b/maths/line_length.py @@ -1,7 +1,7 @@ from __future__ import annotations import math -from typing import Callable +from collections.abc import Callable def line_length( diff --git a/maths/monte_carlo.py b/maths/monte_carlo.py index efb6a01d57fd..c13b8d0a4f6b 100644 --- a/maths/monte_carlo.py +++ b/maths/monte_carlo.py @@ -1,10 +1,10 @@ """ @author: MatteoRaso """ +from collections.abc import Callable from math import pi, sqrt from random import uniform from statistics import mean -from typing import Callable def pi_estimator(iterations: int): diff --git a/maths/numerical_integration.py b/maths/numerical_integration.py index cf2efce12baf..a2bfce5b911d 100644 --- a/maths/numerical_integration.py +++ b/maths/numerical_integration.py @@ -3,7 +3,7 @@ """ from __future__ import annotations -from typing import Callable +from collections.abc import Callable def trapezoidal_area( diff --git a/maths/polynomial_evaluation.py b/maths/polynomial_evaluation.py index 4e4016e5133d..8ee82467efa1 100644 --- a/maths/polynomial_evaluation.py +++ b/maths/polynomial_evaluation.py @@ -1,4 +1,4 @@ -from typing import Sequence +from collections.abc import Sequence def evaluate_poly(poly: Sequence[float], x: float) -> float: diff --git a/maths/prime_numbers.py b/maths/prime_numbers.py index 183fbd39349e..7be4d3d95b0e 100644 --- a/maths/prime_numbers.py +++ b/maths/prime_numbers.py @@ -1,5 +1,5 @@ import math -from typing import Generator +from collections.abc import Generator def slow_primes(max: int) -> Generator[int, None, None]: diff --git a/other/davisb_putnamb_logemannb_loveland.py b/other/davisb_putnamb_logemannb_loveland.py index 031f0dbed404..88aefabc8087 100644 --- a/other/davisb_putnamb_logemannb_loveland.py +++ b/other/davisb_putnamb_logemannb_loveland.py @@ -11,7 +11,7 @@ from __future__ import annotations import random -from typing import Iterable +from collections.abc import Iterable class Clause: diff --git a/other/lfu_cache.py b/other/lfu_cache.py index e955973c95b0..072d00ab58c8 100644 --- a/other/lfu_cache.py +++ b/other/lfu_cache.py @@ -1,6 +1,7 @@ from __future__ import annotations -from typing import Callable, Generic, TypeVar +from collections.abc import Callable +from typing import Generic, TypeVar T = TypeVar("T") U = TypeVar("U") diff --git a/other/lru_cache.py b/other/lru_cache.py index 834ea52a95e1..b68ae0a8e296 100644 --- a/other/lru_cache.py +++ b/other/lru_cache.py @@ -1,6 +1,7 @@ from __future__ import annotations -from typing import Callable, Generic, TypeVar +from collections.abc import Callable +from typing import Generic, TypeVar T = TypeVar("T") U = TypeVar("U") diff --git a/project_euler/problem_010/sol2.py b/project_euler/problem_010/sol2.py index 3a2f485dde50..a288bb85fd52 100644 --- a/project_euler/problem_010/sol2.py +++ b/project_euler/problem_010/sol2.py @@ -11,8 +11,8 @@ - https://en.wikipedia.org/wiki/Prime_number """ import math +from collections.abc import Iterator from itertools import takewhile -from typing import Iterator def is_prime(number: int) -> bool: diff --git a/project_euler/problem_025/sol2.py b/project_euler/problem_025/sol2.py index b041afd98c86..6f49e89fb465 100644 --- a/project_euler/problem_025/sol2.py +++ b/project_euler/problem_025/sol2.py @@ -23,7 +23,7 @@ What is the index of the first term in the Fibonacci sequence to contain 1000 digits? """ -from typing import Generator +from collections.abc import Generator def fibonacci_generator() -> Generator[int, None, None]: diff --git a/project_euler/problem_101/sol1.py b/project_euler/problem_101/sol1.py index 04678847508c..27438e086c4f 100644 --- a/project_euler/problem_101/sol1.py +++ b/project_euler/problem_101/sol1.py @@ -43,7 +43,8 @@ """ from __future__ import annotations -from typing import Callable, Union +from collections.abc import Callable +from typing import Union Matrix = list[list[Union[float, int]]] diff --git a/project_euler/problem_107/sol1.py b/project_euler/problem_107/sol1.py index 6a411a11473d..048cf033dc2e 100644 --- a/project_euler/problem_107/sol1.py +++ b/project_euler/problem_107/sol1.py @@ -30,7 +30,7 @@ from __future__ import annotations import os -from typing import Mapping +from collections.abc import Mapping EdgeT = tuple[int, int] diff --git a/project_euler/problem_123/sol1.py b/project_euler/problem_123/sol1.py index 91913222759b..f74cdd999401 100644 --- a/project_euler/problem_123/sol1.py +++ b/project_euler/problem_123/sol1.py @@ -39,7 +39,7 @@ """ from __future__ import annotations -from typing import Generator +from collections.abc import Generator def sieve() -> Generator[int, None, None]: diff --git a/scripts/build_directory_md.py b/scripts/build_directory_md.py index 71577fe6d4ac..7572ce342720 100755 --- a/scripts/build_directory_md.py +++ b/scripts/build_directory_md.py @@ -1,7 +1,7 @@ #!/usr/bin/env python3 import os -from typing import Iterator +from collections.abc import Iterator def good_file_paths(top_dir: str = ".") -> Iterator[str]: diff --git a/web_programming/fetch_jobs.py b/web_programming/fetch_jobs.py index bb2171e1f0ee..5af90a0bb239 100644 --- a/web_programming/fetch_jobs.py +++ b/web_programming/fetch_jobs.py @@ -3,7 +3,7 @@ """ from __future__ import annotations -from typing import Generator +from collections.abc import Generator import requests from bs4 import BeautifulSoup From ba129de7f32b6acd1efd8e942aca109bacd86646 Mon Sep 17 00:00:00 2001 From: Todor Peev <46652070+Bjiornulf@users.noreply.github.com> Date: Mon, 11 Jul 2022 12:42:07 +0200 Subject: [PATCH 0469/1543] Fixes: 6216 | Support vector machines (#6240) * initial commit * first implementation of hard margin * remove debugging print * many commits squashed because pre-commit was buggy * more kernels and improved kernel management * remove unnecessary code + fix names + formatting + doctests * rename to fit initial naming * better naming and documentation * better naming and documentation --- machine_learning/support_vector_machines.py | 205 ++++++++++++++++++++ 1 file changed, 205 insertions(+) create mode 100644 machine_learning/support_vector_machines.py diff --git a/machine_learning/support_vector_machines.py b/machine_learning/support_vector_machines.py new file mode 100644 index 000000000000..caec10175c50 --- /dev/null +++ b/machine_learning/support_vector_machines.py @@ -0,0 +1,205 @@ +import numpy as np +from numpy import ndarray +from scipy.optimize import Bounds, LinearConstraint, minimize + + +def norm_squared(vector: ndarray) -> float: + """ + Return the squared second norm of vector + norm_squared(v) = sum(x * x for x in v) + + Args: + vector (ndarray): input vector + + Returns: + float: squared second norm of vector + + >>> norm_squared([1, 2]) + 5 + >>> norm_squared(np.asarray([1, 2])) + 5 + >>> norm_squared([0, 0]) + 0 + """ + return np.dot(vector, vector) + + +class SVC: + """ + Support Vector Classifier + + Args: + kernel (str): kernel to use. Default: linear + Possible choices: + - linear + regularization: constraint for soft margin (data not linearly separable) + Default: unbound + + >>> SVC(kernel="asdf") + Traceback (most recent call last): + ... + ValueError: Unknown kernel: asdf + + >>> SVC(kernel="rbf") + Traceback (most recent call last): + ... + ValueError: rbf kernel requires gamma + + >>> SVC(kernel="rbf", gamma=-1) + Traceback (most recent call last): + ... + ValueError: gamma must be > 0 + """ + + def __init__( + self, + *, + regularization: float = np.inf, + kernel: str = "linear", + gamma: float = 0, + ) -> None: + self.regularization = regularization + self.gamma = gamma + if kernel == "linear": + self.kernel = self.__linear + elif kernel == "rbf": + if self.gamma == 0: + raise ValueError("rbf kernel requires gamma") + if not (isinstance(self.gamma, float) or isinstance(self.gamma, int)): + raise ValueError("gamma must be float or int") + if not self.gamma > 0: + raise ValueError("gamma must be > 0") + self.kernel = self.__rbf + # in the future, there could be a default value like in sklearn + # sklear: def_gamma = 1/(n_features * X.var()) (wiki) + # previously it was 1/(n_features) + else: + raise ValueError(f"Unknown kernel: {kernel}") + + # kernels + def __linear(self, vector1: ndarray, vector2: ndarray) -> float: + """Linear kernel (as if no kernel used at all)""" + return np.dot(vector1, vector2) + + def __rbf(self, vector1: ndarray, vector2: ndarray) -> float: + """ + RBF: Radial Basis Function Kernel + + Note: for more information see: + https://en.wikipedia.org/wiki/Radial_basis_function_kernel + + Args: + vector1 (ndarray): first vector + vector2 (ndarray): second vector) + + Returns: + float: exp(-(gamma * norm_squared(vector1 - vector2))) + """ + return np.exp(-(self.gamma * norm_squared(vector1 - vector2))) + + def fit(self, observations: list[ndarray], classes: ndarray) -> None: + """ + Fits the SVC with a set of observations. + + Args: + observations (list[ndarray]): list of observations + classes (ndarray): classification of each observation (in {1, -1}) + """ + + self.observations = observations + self.classes = classes + + # using Wolfe's Dual to calculate w. + # Primal problem: minimize 1/2*norm_squared(w) + # constraint: yn(w . xn + b) >= 1 + # + # With l a vector + # Dual problem: maximize sum_n(ln) - + # 1/2 * sum_n(sum_m(ln*lm*yn*ym*xn . xm)) + # constraint: self.C >= ln >= 0 + # and sum_n(ln*yn) = 0 + # Then we get w using w = sum_n(ln*yn*xn) + # At the end we can get b ~= mean(yn - w . xn) + # + # Since we use kernels, we only need l_star to calculate b + # and to classify observations + + (n,) = np.shape(classes) + + def to_minimize(candidate: ndarray) -> float: + """ + Opposite of the function to maximize + + Args: + candidate (ndarray): candidate array to test + + Return: + float: Wolfe's Dual result to minimize + """ + s = 0 + (n,) = np.shape(candidate) + for i in range(n): + for j in range(n): + s += ( + candidate[i] + * candidate[j] + * classes[i] + * classes[j] + * self.kernel(observations[i], observations[j]) + ) + return 1 / 2 * s - sum(candidate) + + ly_contraint = LinearConstraint(classes, 0, 0) + l_bounds = Bounds(0, self.regularization) + + l_star = minimize( + to_minimize, np.ones(n), bounds=l_bounds, constraints=[ly_contraint] + ).x + self.optimum = l_star + + # calculating mean offset of separation plane to points + s = 0 + for i in range(n): + for j in range(n): + s += classes[i] - classes[i] * self.optimum[i] * self.kernel( + observations[i], observations[j] + ) + self.offset = s / n + + def predict(self, observation: ndarray) -> int: + """ + Get the expected class of an observation + + Args: + observation (Vector): observation + + Returns: + int {1, -1}: expected class + + >>> xs = [ + ... np.asarray([0, 1]), np.asarray([0, 2]), + ... np.asarray([1, 1]), np.asarray([1, 2]) + ... ] + >>> y = np.asarray([1, 1, -1, -1]) + >>> s = SVC() + >>> s.fit(xs, y) + >>> s.predict(np.asarray([0, 1])) + 1 + >>> s.predict(np.asarray([1, 1])) + -1 + >>> s.predict(np.asarray([2, 2])) + -1 + """ + s = sum( + self.optimum[n] + * self.classes[n] + * self.kernel(self.observations[n], observation) + for n in range(len(self.classes)) + ) + return 1 if s + self.offset >= 0 else -1 + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From dad789d9034ea6fb183bddb1a34b6b89d379e422 Mon Sep 17 00:00:00 2001 From: Christian Clauss Date: Mon, 11 Jul 2022 13:11:17 +0200 Subject: [PATCH 0470/1543] Get rid of the Union (#6246) * Get rid of the Union * updating DIRECTORY.md * Get rid of the Union * Remove the redundant pre-commit runs. Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- .github/workflows/pre-commit.yml | 6 +++++- DIRECTORY.md | 1 + project_euler/problem_101/sol1.py | 3 +-- 3 files changed, 7 insertions(+), 3 deletions(-) diff --git a/.github/workflows/pre-commit.yml b/.github/workflows/pre-commit.yml index 3b128bc540bf..eb5e3d4ce1cd 100644 --- a/.github/workflows/pre-commit.yml +++ b/.github/workflows/pre-commit.yml @@ -1,6 +1,10 @@ name: pre-commit -on: [push, pull_request] +on: + push: + branches: [ master ] + pull_request: + branches: [ master ] jobs: pre-commit: diff --git a/DIRECTORY.md b/DIRECTORY.md index 2e9c03cbcd9b..c8f03658c537 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -444,6 +444,7 @@ * [Scoring Functions](machine_learning/scoring_functions.py) * [Sequential Minimum Optimization](machine_learning/sequential_minimum_optimization.py) * [Similarity Search](machine_learning/similarity_search.py) + * [Support Vector Machines](machine_learning/support_vector_machines.py) * [Word Frequency Functions](machine_learning/word_frequency_functions.py) ## Maths diff --git a/project_euler/problem_101/sol1.py b/project_euler/problem_101/sol1.py index 27438e086c4f..d5c503af796a 100644 --- a/project_euler/problem_101/sol1.py +++ b/project_euler/problem_101/sol1.py @@ -44,9 +44,8 @@ from __future__ import annotations from collections.abc import Callable -from typing import Union -Matrix = list[list[Union[float, int]]] +Matrix = list[list[float | int]] def solve(matrix: Matrix, vector: Matrix) -> Matrix: From f7c58e4c4b66750cbb3afd9ad29e9c246b2480ab Mon Sep 17 00:00:00 2001 From: Nikos Giachoudis Date: Mon, 11 Jul 2022 10:36:57 -0400 Subject: [PATCH 0471/1543] Unify primality checking (#6228) * renames prime functions and occurances in comments * changes implementation of primality testing to be uniform * adds static typing as per conventions * updating DIRECTORY.md Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- DIRECTORY.md | 1 - data_structures/hashing/double_hash.py | 4 +- .../hashing/number_theory/prime_numbers.py | 50 ++++++++++++--- maths/prime_check.py | 61 ++++++++++--------- maths/primelib.py | 22 +++---- 5 files changed, 86 insertions(+), 52 deletions(-) diff --git a/DIRECTORY.md b/DIRECTORY.md index c8f03658c537..2e9c03cbcd9b 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -444,7 +444,6 @@ * [Scoring Functions](machine_learning/scoring_functions.py) * [Sequential Minimum Optimization](machine_learning/sequential_minimum_optimization.py) * [Similarity Search](machine_learning/similarity_search.py) - * [Support Vector Machines](machine_learning/support_vector_machines.py) * [Word Frequency Functions](machine_learning/word_frequency_functions.py) ## Maths diff --git a/data_structures/hashing/double_hash.py b/data_structures/hashing/double_hash.py index 57b1ffff4770..bd1355fca65d 100644 --- a/data_structures/hashing/double_hash.py +++ b/data_structures/hashing/double_hash.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 from .hash_table import HashTable -from .number_theory.prime_numbers import check_prime, next_prime +from .number_theory.prime_numbers import is_prime, next_prime class DoubleHash(HashTable): @@ -15,7 +15,7 @@ def __hash_function_2(self, value, data): next_prime_gt = ( next_prime(value % self.size_table) - if not check_prime(value % self.size_table) + if not is_prime(value % self.size_table) else value % self.size_table ) # gt = bigger than return next_prime_gt - (data % next_prime_gt) diff --git a/data_structures/hashing/number_theory/prime_numbers.py b/data_structures/hashing/number_theory/prime_numbers.py index bf614e7d48df..b88ab76ecc23 100644 --- a/data_structures/hashing/number_theory/prime_numbers.py +++ b/data_structures/hashing/number_theory/prime_numbers.py @@ -3,25 +3,55 @@ module to operations with prime numbers """ +import math -def check_prime(number): - """ - it's not the best solution + +def is_prime(number: int) -> bool: + """Checks to see if a number is a prime in O(sqrt(n)). + + A number is prime if it has exactly two factors: 1 and itself. + + >>> is_prime(0) + False + >>> is_prime(1) + False + >>> is_prime(2) + True + >>> is_prime(3) + True + >>> is_prime(27) + False + >>> is_prime(87) + False + >>> is_prime(563) + True + >>> is_prime(2999) + True + >>> is_prime(67483) + False """ - special_non_primes = [0, 1, 2] - if number in special_non_primes[:2]: - return 2 - elif number == special_non_primes[-1]: - return 3 - return all(number % i for i in range(2, number)) + # precondition + assert isinstance(number, int) and ( + number >= 0 + ), "'number' must been an int and positive" + + if 1 < number < 4: + # 2 and 3 are primes + return True + elif number < 2 or not number % 2: + # Negatives, 0, 1 and all even numbers are not primes + return False + + odd_numbers = range(3, int(math.sqrt(number) + 1), 2) + return not any(not number % i for i in odd_numbers) def next_prime(value, factor=1, **kwargs): value = factor * value first_value_val = value - while not check_prime(value): + while not is_prime(value): value += 1 if not ("desc" in kwargs.keys() and kwargs["desc"] is True) else -1 if value == first_value_val: diff --git a/maths/prime_check.py b/maths/prime_check.py index 92d31cfeee80..315492054659 100644 --- a/maths/prime_check.py +++ b/maths/prime_check.py @@ -4,31 +4,36 @@ import unittest -def prime_check(number: int) -> bool: +def is_prime(number: int) -> bool: """Checks to see if a number is a prime in O(sqrt(n)). A number is prime if it has exactly two factors: 1 and itself. - >>> prime_check(0) + >>> is_prime(0) False - >>> prime_check(1) + >>> is_prime(1) False - >>> prime_check(2) + >>> is_prime(2) True - >>> prime_check(3) + >>> is_prime(3) True - >>> prime_check(27) + >>> is_prime(27) False - >>> prime_check(87) + >>> is_prime(87) False - >>> prime_check(563) + >>> is_prime(563) True - >>> prime_check(2999) + >>> is_prime(2999) True - >>> prime_check(67483) + >>> is_prime(67483) False """ + # precondition + assert isinstance(number, int) and ( + number >= 0 + ), "'number' must been an int and positive" + if 1 < number < 4: # 2 and 3 are primes return True @@ -42,35 +47,35 @@ def prime_check(number: int) -> bool: class Test(unittest.TestCase): def test_primes(self): - self.assertTrue(prime_check(2)) - self.assertTrue(prime_check(3)) - self.assertTrue(prime_check(5)) - self.assertTrue(prime_check(7)) - self.assertTrue(prime_check(11)) - self.assertTrue(prime_check(13)) - self.assertTrue(prime_check(17)) - self.assertTrue(prime_check(19)) - self.assertTrue(prime_check(23)) - self.assertTrue(prime_check(29)) + self.assertTrue(is_prime(2)) + self.assertTrue(is_prime(3)) + self.assertTrue(is_prime(5)) + self.assertTrue(is_prime(7)) + self.assertTrue(is_prime(11)) + self.assertTrue(is_prime(13)) + self.assertTrue(is_prime(17)) + self.assertTrue(is_prime(19)) + self.assertTrue(is_prime(23)) + self.assertTrue(is_prime(29)) def test_not_primes(self): self.assertFalse( - prime_check(-19), + is_prime(-19), "Negative numbers are excluded by definition of prime numbers.", ) self.assertFalse( - prime_check(0), + is_prime(0), "Zero doesn't have any positive factors, primes must have exactly two.", ) self.assertFalse( - prime_check(1), + is_prime(1), "One only has 1 positive factor, primes must have exactly two.", ) - self.assertFalse(prime_check(2 * 2)) - self.assertFalse(prime_check(2 * 3)) - self.assertFalse(prime_check(3 * 3)) - self.assertFalse(prime_check(3 * 5)) - self.assertFalse(prime_check(3 * 5 * 7)) + self.assertFalse(is_prime(2 * 2)) + self.assertFalse(is_prime(2 * 3)) + self.assertFalse(is_prime(3 * 3)) + self.assertFalse(is_prime(3 * 5)) + self.assertFalse(is_prime(3 * 5 * 7)) if __name__ == "__main__": diff --git a/maths/primelib.py b/maths/primelib.py index 37883d9cf591..3da9c56f66d6 100644 --- a/maths/primelib.py +++ b/maths/primelib.py @@ -41,7 +41,7 @@ from math import sqrt -def isPrime(number): +def is_prime(number: int) -> bool: """ input: positive integer 'number' returns true if 'number' is prime otherwise false. @@ -129,7 +129,7 @@ def getPrimeNumbers(N): # if a number is prime then appends to list 'ans' for number in range(2, N + 1): - if isPrime(number): + if is_prime(number): ans.append(number) @@ -164,11 +164,11 @@ def primeFactorization(number): ans.append(number) # if 'number' not prime then builds the prime factorization of 'number' - elif not isPrime(number): + elif not is_prime(number): while quotient != 1: - if isPrime(factor) and (quotient % factor == 0): + if is_prime(factor) and (quotient % factor == 0): ans.append(factor) quotient /= factor else: @@ -317,8 +317,8 @@ def goldbach(number): isinstance(ans, list) and (len(ans) == 2) and (ans[0] + ans[1] == number) - and isPrime(ans[0]) - and isPrime(ans[1]) + and is_prime(ans[0]) + and is_prime(ans[1]) ), "'ans' must contains two primes. And sum of elements must been eq 'number'" return ans @@ -462,11 +462,11 @@ def getPrime(n): # if ans not prime then # runs to the next prime number. - while not isPrime(ans): + while not is_prime(ans): ans += 1 # precondition - assert isinstance(ans, int) and isPrime( + assert isinstance(ans, int) and is_prime( ans ), "'ans' must been a prime number and from type int" @@ -486,7 +486,7 @@ def getPrimesBetween(pNumber1, pNumber2): # precondition assert ( - isPrime(pNumber1) and isPrime(pNumber2) and (pNumber1 < pNumber2) + is_prime(pNumber1) and is_prime(pNumber2) and (pNumber1 < pNumber2) ), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'" number = pNumber1 + 1 # jump to the next number @@ -495,7 +495,7 @@ def getPrimesBetween(pNumber1, pNumber2): # if number is not prime then # fetch the next prime number. - while not isPrime(number): + while not is_prime(number): number += 1 while number < pNumber2: @@ -505,7 +505,7 @@ def getPrimesBetween(pNumber1, pNumber2): number += 1 # fetch the next prime number. - while not isPrime(number): + while not is_prime(number): number += 1 # precondition From dcc387631d201de42dbf34216088e7faba302a41 Mon Sep 17 00:00:00 2001 From: lakshmikanth ayyadevara <52835045+Lakshmikanth2001@users.noreply.github.com> Date: Mon, 11 Jul 2022 20:59:27 +0530 Subject: [PATCH 0472/1543] Improve `prime_check` in math modules (#6044) * improved prime_check * updating DIRECTORY.md * included suggested changes * updating DIRECTORY.md Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- maths/prime_check.py | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/maths/prime_check.py b/maths/prime_check.py index 315492054659..6af5a75c2dd8 100644 --- a/maths/prime_check.py +++ b/maths/prime_check.py @@ -37,12 +37,15 @@ def is_prime(number: int) -> bool: if 1 < number < 4: # 2 and 3 are primes return True - elif number < 2 or not number % 2: - # Negatives, 0, 1 and all even numbers are not primes + elif number < 2 or number % 2 == 0 or number % 3 == 0: + # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False - odd_numbers = range(3, int(math.sqrt(number) + 1), 2) - return not any(not number % i for i in odd_numbers) + # All primes number are in format of 6k +/- 1 + for i in range(5, int(math.sqrt(number) + 1), 6): + if number % i == 0 or number % (i + 2) == 0: + return False + return True class Test(unittest.TestCase): From 38dfcd28b5f2fb19bae130a942466d73933e072f Mon Sep 17 00:00:00 2001 From: Dhruv Manilawala Date: Thu, 14 Jul 2022 12:54:24 +0530 Subject: [PATCH 0473/1543] fix: test failures (#6250) 1. Incorrect function was being imported from the module 2. Testing for exception was not done correctly --- maths/miller_rabin.py | 6 +++--- maths/prime_check.py | 6 ++---- 2 files changed, 5 insertions(+), 7 deletions(-) diff --git a/maths/miller_rabin.py b/maths/miller_rabin.py index d35e5485888f..b4dfed1290de 100644 --- a/maths/miller_rabin.py +++ b/maths/miller_rabin.py @@ -8,9 +8,9 @@ # if it's not a prime, the chance of it returning true is at most 1/4**prec def is_prime_big(n, prec=1000): """ - >>> from maths.prime_check import prime_check - >>> # all(is_prime_big(i) == prime_check(i) for i in range(1000)) # 3.45s - >>> all(is_prime_big(i) == prime_check(i) for i in range(256)) + >>> from maths.prime_check import is_prime + >>> # all(is_prime_big(i) == is_prime(i) for i in range(1000)) # 3.45s + >>> all(is_prime_big(i) == is_prime(i) for i in range(256)) True """ if n < 2: diff --git a/maths/prime_check.py b/maths/prime_check.py index 6af5a75c2dd8..80ab8bc5d2cd 100644 --- a/maths/prime_check.py +++ b/maths/prime_check.py @@ -62,10 +62,8 @@ def test_primes(self): self.assertTrue(is_prime(29)) def test_not_primes(self): - self.assertFalse( - is_prime(-19), - "Negative numbers are excluded by definition of prime numbers.", - ) + with self.assertRaises(AssertionError): + is_prime(-19) self.assertFalse( is_prime(0), "Zero doesn't have any positive factors, primes must have exactly two.", From e1e7922efac2b7fdfab7555baaf784edb345c222 Mon Sep 17 00:00:00 2001 From: KanakalathaVemuru <46847239+KanakalathaVemuru@users.noreply.github.com> Date: Sun, 17 Jul 2022 03:12:58 +0530 Subject: [PATCH 0474/1543] Add circle sort implementation (#5548) * Added circle sort implementation * Added modifications * Added modifications * Update circle_sort.py * Update circle_sort.py Co-authored-by: Christian Clauss Co-authored-by: John Law --- sorts/circle_sort.py | 87 ++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 87 insertions(+) create mode 100644 sorts/circle_sort.py diff --git a/sorts/circle_sort.py b/sorts/circle_sort.py new file mode 100644 index 000000000000..da3c59059516 --- /dev/null +++ b/sorts/circle_sort.py @@ -0,0 +1,87 @@ +""" +This is a Python implementation of the circle sort algorithm + +For doctests run following command: +python3 -m doctest -v circle_sort.py + +For manual testing run: +python3 circle_sort.py +""" + + +def circle_sort(collection: list) -> list: + """A pure Python implementation of circle sort algorithm + + :param collection: a mutable collection of comparable items in any order + :return: the same collection in ascending order + + Examples: + >>> circle_sort([0, 5, 3, 2, 2]) + [0, 2, 2, 3, 5] + >>> circle_sort([]) + [] + >>> circle_sort([-2, 5, 0, -45]) + [-45, -2, 0, 5] + >>> collections = ([], [0, 5, 3, 2, 2], [-2, 5, 0, -45]) + >>> all(sorted(collection) == circle_sort(collection) for collection in collections) + True + """ + + if len(collection) < 2: + return collection + + def circle_sort_util(collection: list, low: int, high: int) -> bool: + """ + >>> arr = [5,4,3,2,1] + >>> circle_sort_util(lst, 0, 2) + True + >>> arr + [3, 4, 5, 2, 1] + """ + + swapped = False + + if low == high: + return swapped + + left = low + right = high + + while left < right: + if collection[left] > collection[right]: + collection[left], collection[right] = ( + collection[right], + collection[left], + ) + swapped = True + + left += 1 + right -= 1 + + if left == right: + if collection[left] > collection[right + 1]: + collection[left], collection[right + 1] = ( + collection[right + 1], + collection[left], + ) + + swapped = True + + mid = low + int((high - low) / 2) + left_swap = circle_sort_util(collection, low, mid) + right_swap = circle_sort_util(collection, mid + 1, high) + + return swapped or left_swap or right_swap + + is_not_sorted = True + + while is_not_sorted is True: + is_not_sorted = circle_sort_util(collection, 0, len(collection) - 1) + + return collection + + +if __name__ == "__main__": + user_input = input("Enter numbers separated by a comma:\n").strip() + unsorted = [int(item) for item in user_input.split(",")] + print(circle_sort(unsorted)) From b3d9281591df03768fe062cc0517ee0d4cc387f0 Mon Sep 17 00:00:00 2001 From: U <80122730+und1n3@users.noreply.github.com> Date: Sat, 16 Jul 2022 23:55:29 +0200 Subject: [PATCH 0475/1543] Add algorithm for creating Hamming numbers (#4992) * Added algorithm for creating Hamming numbers series in Python * Changed to f-string format. * Added modifications * Update and rename hamming.py to hamming_numbers.py * Update hamming_numbers.py * Update hamming_numbers.py * Rename maths/series/hamming_numbers.py to maths/hamming_numbers.py Co-authored-by: John Law --- maths/hamming_numbers.py | 51 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 51 insertions(+) create mode 100644 maths/hamming_numbers.py diff --git a/maths/hamming_numbers.py b/maths/hamming_numbers.py new file mode 100644 index 000000000000..4575119c8a95 --- /dev/null +++ b/maths/hamming_numbers.py @@ -0,0 +1,51 @@ +""" +A Hamming number is a positive integer of the form 2^i*3^j*5^k, for some +non-negative integers i, j, and k. They are often referred to as regular numbers. +More info at: https://en.wikipedia.org/wiki/Regular_number. +""" + + +def hamming(n_element: int) -> list: + """ + This function creates an ordered list of n length as requested, and afterwards + returns the last value of the list. It must be given a positive integer. + + :param n_element: The number of elements on the list + :return: The nth element of the list + + >>> hamming(5) + [1, 2, 3, 4, 5] + >>> hamming(10) + [1, 2, 3, 4, 5, 6, 8, 9, 10, 12] + >>> hamming(15) + [1, 2, 3, 4, 5, 6, 8, 9, 10, 12, 15, 16, 18, 20, 24] + """ + n_element = int(n_element) + if n_element < 1: + my_error = ValueError("a should be a positive number") + raise my_error + + hamming_list = [1] + i, j, k = (0, 0, 0) + index = 1 + while index < n_element: + while hamming_list[i] * 2 <= hamming_list[-1]: + i += 1 + while hamming_list[j] * 3 <= hamming_list[-1]: + j += 1 + while hamming_list[k] * 5 <= hamming_list[-1]: + k += 1 + hamming_list.append( + min(hamming_list[i] * 2, hamming_list[j] * 3, hamming_list[k] * 5) + ) + index += 1 + return hamming_list + + +if __name__ == "__main__": + n = input("Enter the last number (nth term) of the Hamming Number Series: ") + print("Formula of Hamming Number Series => 2^i * 3^j * 5^k") + hamming_numbers = hamming(int(n)) + print("-----------------------------------------------------") + print(f"The list with nth numbers is: {hamming_numbers}") + print("-----------------------------------------------------") From c45fb3c2948449760667fdf085cfc0467376ade8 Mon Sep 17 00:00:00 2001 From: Maxim Smolskiy Date: Sat, 23 Jul 2022 04:53:46 +0300 Subject: [PATCH 0476/1543] perf: Project Euler problem 145 solution 1 (#6259) Improve solution (~30 times - from 900+ seconds to ~30 seconds) Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- DIRECTORY.md | 3 ++ project_euler/problem_145/sol1.py | 90 ++++++++++++++++++++++--------- 2 files changed, 67 insertions(+), 26 deletions(-) diff --git a/DIRECTORY.md b/DIRECTORY.md index 2e9c03cbcd9b..1ee106252ce2 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -444,6 +444,7 @@ * [Scoring Functions](machine_learning/scoring_functions.py) * [Sequential Minimum Optimization](machine_learning/sequential_minimum_optimization.py) * [Similarity Search](machine_learning/similarity_search.py) + * [Support Vector Machines](machine_learning/support_vector_machines.py) * [Word Frequency Functions](machine_learning/word_frequency_functions.py) ## Maths @@ -500,6 +501,7 @@ * [Gaussian](maths/gaussian.py) * [Greatest Common Divisor](maths/greatest_common_divisor.py) * [Greedy Coin Change](maths/greedy_coin_change.py) + * [Hamming Numbers](maths/hamming_numbers.py) * [Hardy Ramanujanalgo](maths/hardy_ramanujanalgo.py) * [Integration By Simpson Approx](maths/integration_by_simpson_approx.py) * [Is Ip V4 Address Valid](maths/is_ip_v4_address_valid.py) @@ -938,6 +940,7 @@ * [Bogo Sort](sorts/bogo_sort.py) * [Bubble Sort](sorts/bubble_sort.py) * [Bucket Sort](sorts/bucket_sort.py) + * [Circle Sort](sorts/circle_sort.py) * [Cocktail Shaker Sort](sorts/cocktail_shaker_sort.py) * [Comb Sort](sorts/comb_sort.py) * [Counting Sort](sorts/counting_sort.py) diff --git a/project_euler/problem_145/sol1.py b/project_euler/problem_145/sol1.py index 09d8daff57be..e9fc1a199161 100644 --- a/project_euler/problem_145/sol1.py +++ b/project_euler/problem_145/sol1.py @@ -1,6 +1,6 @@ """ Project Euler problem 145: https://projecteuler.net/problem=145 -Author: Vineet Rao +Author: Vineet Rao, Maxim Smolskiy Problem statement: Some positive integers n have the property that the sum [ n + reverse(n) ] @@ -13,44 +13,82 @@ How many reversible numbers are there below one-billion (10^9)? """ +EVEN_DIGITS = [0, 2, 4, 6, 8] +ODD_DIGITS = [1, 3, 5, 7, 9] -def odd_digits(num: int) -> bool: +def reversible_numbers( + remaining_length: int, remainder: int, digits: list[int], length: int +) -> int: """ - Check if the number passed as argument has only odd digits. - >>> odd_digits(123) - False - >>> odd_digits(135797531) - True + Count the number of reversible numbers of given length. + Iterate over possible digits considering parity of current sum remainder. + >>> reversible_numbers(1, 0, [0], 1) + 0 + >>> reversible_numbers(2, 0, [0] * 2, 2) + 20 + >>> reversible_numbers(3, 0, [0] * 3, 3) + 100 """ - while num > 0: - digit = num % 10 - if digit % 2 == 0: - return False - num //= 10 - return True + if remaining_length == 0: + if digits[0] == 0 or digits[-1] == 0: + return 0 + for i in range(length // 2 - 1, -1, -1): + remainder += digits[i] + digits[length - i - 1] -def solution(max_num: int = 1_000_000_000) -> int: + if remainder % 2 == 0: + return 0 + + remainder //= 10 + + return 1 + + if remaining_length == 1: + if remainder % 2 == 0: + return 0 + + result = 0 + for digit in range(10): + digits[length // 2] = digit + result += reversible_numbers( + 0, (remainder + 2 * digit) // 10, digits, length + ) + return result + + result = 0 + for digit1 in range(10): + digits[(length + remaining_length) // 2 - 1] = digit1 + + if (remainder + digit1) % 2 == 0: + other_parity_digits = ODD_DIGITS + else: + other_parity_digits = EVEN_DIGITS + + for digit2 in other_parity_digits: + digits[(length - remaining_length) // 2] = digit2 + result += reversible_numbers( + remaining_length - 2, + (remainder + digit1 + digit2) // 10, + digits, + length, + ) + return result + + +def solution(max_power: int = 9) -> int: """ To evaluate the solution, use solution() - >>> solution(1000) + >>> solution(3) 120 - >>> solution(1_000_000) + >>> solution(6) 18720 - >>> solution(10_000_000) + >>> solution(7) 68720 """ result = 0 - # All single digit numbers reverse to themselves, so their sums are even - # Therefore at least one digit in their sum is even - # Last digit cannot be 0, else it causes leading zeros in reverse - for num in range(11, max_num): - if num % 10 == 0: - continue - num_sum = num + int(str(num)[::-1]) - num_is_reversible = odd_digits(num_sum) - result += 1 if num_is_reversible else 0 + for length in range(1, max_power + 1): + result += reversible_numbers(length, 0, [0] * length, length) return result From d53fdc29e2b47213999f566d16acd60409de6dc2 Mon Sep 17 00:00:00 2001 From: lance-pyles <36748284+lance-pyles@users.noreply.github.com> Date: Fri, 22 Jul 2022 19:26:59 -0700 Subject: [PATCH 0477/1543] chore: update .gitignore (#6263) --- .gitignore | 1 + 1 file changed, 1 insertion(+) diff --git a/.gitignore b/.gitignore index 574cdf312836..baea84b8d1f1 100644 --- a/.gitignore +++ b/.gitignore @@ -107,3 +107,4 @@ venv.bak/ .idea .try .vscode/ +.vs/ From 7d9ebee75fd9036579c2ecb282cbf4910de12b58 Mon Sep 17 00:00:00 2001 From: keshav Sharma <72795959+ksharma20@users.noreply.github.com> Date: Sun, 24 Jul 2022 21:33:10 +0530 Subject: [PATCH 0478/1543] chore: rename gcd to greatest_common_divisor (#6265) As described in CONTRIBUTING.md > Expand acronyms because gcd() is hard to understand but greatest_common_divisor() is not. Co-authored-by: Dhruv Manilawala --- project_euler/problem_005/sol2.py | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/project_euler/problem_005/sol2.py b/project_euler/problem_005/sol2.py index c88044487d20..1b3e5e130f03 100644 --- a/project_euler/problem_005/sol2.py +++ b/project_euler/problem_005/sol2.py @@ -16,28 +16,28 @@ """ -def gcd(x: int, y: int) -> int: +def greatest_common_divisor(x: int, y: int) -> int: """ - Euclidean GCD algorithm (Greatest Common Divisor) + Euclidean Greatest Common Divisor algorithm - >>> gcd(0, 0) + >>> greatest_common_divisor(0, 0) 0 - >>> gcd(23, 42) + >>> greatest_common_divisor(23, 42) 1 - >>> gcd(15, 33) + >>> greatest_common_divisor(15, 33) 3 - >>> gcd(12345, 67890) + >>> greatest_common_divisor(12345, 67890) 15 """ - return x if y == 0 else gcd(y, x % y) + return x if y == 0 else greatest_common_divisor(y, x % y) def lcm(x: int, y: int) -> int: """ Least Common Multiple. - Using the property that lcm(a, b) * gcd(a, b) = a*b + Using the property that lcm(a, b) * greatest_common_divisor(a, b) = a*b >>> lcm(3, 15) 15 @@ -49,7 +49,7 @@ def lcm(x: int, y: int) -> int: 192 """ - return (x * y) // gcd(x, y) + return (x * y) // greatest_common_divisor(x, y) def solution(n: int = 20) -> int: From 90959212e5b0f3cfbae95ea38100e6fee4d2475f Mon Sep 17 00:00:00 2001 From: Maxim Smolskiy Date: Mon, 25 Jul 2022 19:41:12 +0300 Subject: [PATCH 0479/1543] perf: improve Project Euler problem 030 solution 1 (#6267) Improve solution (locally 3+ times - from 3+ seconds to ~1 second) Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- project_euler/problem_030/sol1.py | 32 ++++++++++++++++++------------- 1 file changed, 19 insertions(+), 13 deletions(-) diff --git a/project_euler/problem_030/sol1.py b/project_euler/problem_030/sol1.py index c9f2d71965e3..2c6b4e4e85d5 100644 --- a/project_euler/problem_030/sol1.py +++ b/project_euler/problem_030/sol1.py @@ -1,4 +1,4 @@ -""" Problem Statement (Digit Fifth Power ): https://projecteuler.net/problem=30 +""" Problem Statement (Digit Fifth Powers): https://projecteuler.net/problem=30 Surprisingly there are only three numbers that can be written as the sum of fourth powers of their digits: @@ -13,26 +13,32 @@ Find the sum of all the numbers that can be written as the sum of fifth powers of their digits. -(9^5)=59,049‬ -59049*7=4,13,343 (which is only 6 digit number ) -So, number greater than 9,99,999 are rejected -and also 59049*3=1,77,147 (which exceeds the criteria of number being 3 digit) -So, n>999 -and hence a bound between (1000,1000000) +9^5 = 59049 +59049 * 7 = 413343 (which is only 6 digit number) +So, numbers greater than 999999 are rejected +and also 59049 * 3 = 177147 (which exceeds the criteria of number being 3 digit) +So, number > 999 +and hence a number between 1000 and 1000000 """ -def digitsum(s: str) -> int: +DIGITS_FIFTH_POWER = {str(digit): digit**5 for digit in range(10)} + + +def digits_fifth_powers_sum(number: int) -> int: """ - >>> all(digitsum(str(i)) == (1 if i == 1 else 0) for i in range(100)) - True + >>> digits_fifth_powers_sum(1234) + 1300 """ - i = sum(pow(int(c), 5) for c in s) - return i if i == int(s) else 0 + return sum(DIGITS_FIFTH_POWER[digit] for digit in str(number)) def solution() -> int: - return sum(digitsum(str(i)) for i in range(1000, 1000000)) + return sum( + number + for number in range(1000, 1000000) + if number == digits_fifth_powers_sum(number) + ) if __name__ == "__main__": From 97f25d4b431ffe432a30853ed0bcc75ea5e8166f Mon Sep 17 00:00:00 2001 From: Maxim Smolskiy Date: Tue, 26 Jul 2022 19:15:14 +0300 Subject: [PATCH 0480/1543] feat: add Project Euler problem 587 solution 1 (#6269) Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- DIRECTORY.md | 2 + project_euler/problem_587/__init__.py | 0 project_euler/problem_587/sol1.py | 94 +++++++++++++++++++++++++++ 3 files changed, 96 insertions(+) create mode 100644 project_euler/problem_587/__init__.py create mode 100644 project_euler/problem_587/sol1.py diff --git a/DIRECTORY.md b/DIRECTORY.md index 1ee106252ce2..843ff77bb67b 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -898,6 +898,8 @@ * [Sol1](project_euler/problem_493/sol1.py) * Problem 551 * [Sol1](project_euler/problem_551/sol1.py) + * Problem 587 + * [Sol1](project_euler/problem_587/sol1.py) * Problem 686 * [Sol1](project_euler/problem_686/sol1.py) diff --git a/project_euler/problem_587/__init__.py b/project_euler/problem_587/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/project_euler/problem_587/sol1.py b/project_euler/problem_587/sol1.py new file mode 100644 index 000000000000..dde5c16103ac --- /dev/null +++ b/project_euler/problem_587/sol1.py @@ -0,0 +1,94 @@ +""" +Project Euler Problem 587: https://projecteuler.net/problem=587 + +A square is drawn around a circle as shown in the diagram below on the left. +We shall call the blue shaded region the L-section. +A line is drawn from the bottom left of the square to the top right +as shown in the diagram on the right. +We shall call the orange shaded region a concave triangle. + +It should be clear that the concave triangle occupies exactly half of the L-section. + +Two circles are placed next to each other horizontally, +a rectangle is drawn around both circles, and +a line is drawn from the bottom left to the top right as shown in the diagram below. + +This time the concave triangle occupies approximately 36.46% of the L-section. + +If n circles are placed next to each other horizontally, +a rectangle is drawn around the n circles, and +a line is drawn from the bottom left to the top right, +then it can be shown that the least value of n +for which the concave triangle occupies less than 10% of the L-section is n = 15. + +What is the least value of n +for which the concave triangle occupies less than 0.1% of the L-section? +""" + +from itertools import count +from math import asin, pi, sqrt + + +def circle_bottom_arc_integral(point: float) -> float: + """ + Returns integral of circle bottom arc y = 1 / 2 - sqrt(1 / 4 - (x - 1 / 2) ^ 2) + + >>> circle_bottom_arc_integral(0) + 0.39269908169872414 + + >>> circle_bottom_arc_integral(1 / 2) + 0.44634954084936207 + + >>> circle_bottom_arc_integral(1) + 0.5 + """ + + return ( + (1 - 2 * point) * sqrt(point - point**2) + 2 * point + asin(sqrt(1 - point)) + ) / 4 + + +def concave_triangle_area(circles_number: int) -> float: + """ + Returns area of concave triangle + + >>> concave_triangle_area(1) + 0.026825229575318944 + + >>> concave_triangle_area(2) + 0.01956236140083944 + """ + + intersection_y = (circles_number + 1 - sqrt(2 * circles_number)) / ( + 2 * (circles_number**2 + 1) + ) + intersection_x = circles_number * intersection_y + + triangle_area = intersection_x * intersection_y / 2 + concave_region_area = circle_bottom_arc_integral( + 1 / 2 + ) - circle_bottom_arc_integral(intersection_x) + + return triangle_area + concave_region_area + + +def solution(fraction: float = 1 / 1000) -> int: + """ + Returns least value of n + for which the concave triangle occupies less than fraction of the L-section + + >>> solution(1 / 10) + 15 + """ + + l_section_area = (1 - pi / 4) / 4 + + for n in count(1): + if concave_triangle_area(n) / l_section_area < fraction: + return n + + return -1 + + +if __name__ == "__main__": + print(f"{solution() = }") From defc205ef4459264b753429c6cbc23481347e8b7 Mon Sep 17 00:00:00 2001 From: Maxim Smolskiy Date: Sat, 6 Aug 2022 17:04:24 +0300 Subject: [PATCH 0481/1543] perf: improve Project Euler problem 203 solution 1 (#6279) Improve solution (locally 1500+ times - from 3+ seconds to ~2 milliseconds) Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- project_euler/problem_203/sol1.py | 112 ++++++------------------------ 1 file changed, 21 insertions(+), 91 deletions(-) diff --git a/project_euler/problem_203/sol1.py b/project_euler/problem_203/sol1.py index 2ba3c96c9e00..dc93683da535 100644 --- a/project_euler/problem_203/sol1.py +++ b/project_euler/problem_203/sol1.py @@ -29,8 +29,6 @@ """ from __future__ import annotations -import math - def get_pascal_triangle_unique_coefficients(depth: int) -> set[int]: """ @@ -61,76 +59,9 @@ def get_pascal_triangle_unique_coefficients(depth: int) -> set[int]: return coefficients -def get_primes_squared(max_number: int) -> list[int]: - """ - Calculates all primes between 2 and round(sqrt(max_number)) and returns - them squared up. - - >>> get_primes_squared(2) - [] - >>> get_primes_squared(4) - [4] - >>> get_primes_squared(10) - [4, 9] - >>> get_primes_squared(100) - [4, 9, 25, 49] - """ - max_prime = math.isqrt(max_number) - non_primes = [False] * (max_prime + 1) - primes = [] - for num in range(2, max_prime + 1): - if non_primes[num]: - continue - - for num_counter in range(num**2, max_prime + 1, num): - non_primes[num_counter] = True - - primes.append(num**2) - return primes - - -def get_squared_primes_to_use( - num_to_look: int, squared_primes: list[int], previous_index: int -) -> int: - """ - Returns an int indicating the last index on which squares of primes - in primes are lower than num_to_look. - - This method supposes that squared_primes is sorted in ascending order and that - each num_to_look is provided in ascending order as well. Under these - assumptions, it needs a previous_index parameter that tells what was - the index returned by the method for the previous num_to_look. - - If all the elements in squared_primes are greater than num_to_look, then the - method returns -1. - - >>> get_squared_primes_to_use(1, [4, 9, 16, 25], 0) - -1 - >>> get_squared_primes_to_use(4, [4, 9, 16, 25], 0) - 1 - >>> get_squared_primes_to_use(16, [4, 9, 16, 25], 1) - 3 +def get_squarefrees(unique_coefficients: set[int]) -> set[int]: """ - idx = max(previous_index, 0) - - while idx < len(squared_primes) and squared_primes[idx] <= num_to_look: - idx += 1 - - if idx == 0 and squared_primes[idx] > num_to_look: - return -1 - - if idx == len(squared_primes) and squared_primes[-1] > num_to_look: - return -1 - - return idx - - -def get_squarefree( - unique_coefficients: set[int], squared_primes: list[int] -) -> set[int]: - """ - Calculates the squarefree numbers inside unique_coefficients given a - list of square of primes. + Calculates the squarefree numbers inside unique_coefficients. Based on the definition of a non-squarefree number, then any non-squarefree n can be decomposed as n = p*p*r, where p is positive prime number and r @@ -140,27 +71,27 @@ def get_squarefree( squarefree as r cannot be negative. On the contrary, if any r exists such that n = p*p*r, then the number is non-squarefree. - >>> get_squarefree({1}, []) - set() - >>> get_squarefree({1, 2}, []) - set() - >>> get_squarefree({1, 2, 3, 4, 5, 6, 7, 35, 10, 15, 20, 21}, [4, 9, 25]) + >>> get_squarefrees({1}) + {1} + >>> get_squarefrees({1, 2}) + {1, 2} + >>> get_squarefrees({1, 2, 3, 4, 5, 6, 7, 35, 10, 15, 20, 21}) {1, 2, 3, 5, 6, 7, 35, 10, 15, 21} """ - if len(squared_primes) == 0: - return set() - non_squarefrees = set() - prime_squared_idx = 0 - for num in sorted(unique_coefficients): - prime_squared_idx = get_squared_primes_to_use( - num, squared_primes, prime_squared_idx - ) - if prime_squared_idx == -1: - continue - if any(num % prime == 0 for prime in squared_primes[:prime_squared_idx]): - non_squarefrees.add(num) + for number in unique_coefficients: + divisor = 2 + copy_number = number + while divisor**2 <= copy_number: + multiplicity = 0 + while copy_number % divisor == 0: + copy_number //= divisor + multiplicity += 1 + if multiplicity >= 2: + non_squarefrees.add(number) + break + divisor += 1 return unique_coefficients.difference(non_squarefrees) @@ -170,15 +101,14 @@ def solution(n: int = 51) -> int: Returns the sum of squarefrees for a given Pascal's Triangle of depth n. >>> solution(1) - 0 + 1 >>> solution(8) 105 >>> solution(9) 175 """ unique_coefficients = get_pascal_triangle_unique_coefficients(n) - primes = get_primes_squared(max(unique_coefficients)) - squarefrees = get_squarefree(unique_coefficients, primes) + squarefrees = get_squarefrees(unique_coefficients) return sum(squarefrees) From 9eac958725c8e5c62b225c77eaf83ec0ecb7e6f6 Mon Sep 17 00:00:00 2001 From: Horst JENS Date: Sat, 6 Aug 2022 17:47:56 +0200 Subject: [PATCH 0482/1543] typo corrected: heart -> Earth (#6275) --- physics/horizontal_projectile_motion.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/physics/horizontal_projectile_motion.py b/physics/horizontal_projectile_motion.py index 0f27b0617105..a747acd72072 100644 --- a/physics/horizontal_projectile_motion.py +++ b/physics/horizontal_projectile_motion.py @@ -17,7 +17,7 @@ from math import radians as angle_to_radians from math import sin -# Acceleration Constant on hearth (unit m/s^2) +# Acceleration Constant on Earth (unit m/s^2) g = 9.80665 From a69d880bb5e9113ccf09aeaa31a570330f856417 Mon Sep 17 00:00:00 2001 From: Maxim Smolskiy Date: Sun, 7 Aug 2022 05:07:35 +0300 Subject: [PATCH 0483/1543] feat: add Project Euler problem 114 solution 1 (#6300) Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- DIRECTORY.md | 2 + project_euler/problem_114/__init__.py | 0 project_euler/problem_114/sol1.py | 58 +++++++++++++++++++++++++++ 3 files changed, 60 insertions(+) create mode 100644 project_euler/problem_114/__init__.py create mode 100644 project_euler/problem_114/sol1.py diff --git a/DIRECTORY.md b/DIRECTORY.md index 843ff77bb67b..98b87f2fe279 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -854,6 +854,8 @@ * [Sol1](project_euler/problem_112/sol1.py) * Problem 113 * [Sol1](project_euler/problem_113/sol1.py) + * Problem 114 + * [Sol1](project_euler/problem_114/sol1.py) * Problem 119 * [Sol1](project_euler/problem_119/sol1.py) * Problem 120 diff --git a/project_euler/problem_114/__init__.py b/project_euler/problem_114/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/project_euler/problem_114/sol1.py b/project_euler/problem_114/sol1.py new file mode 100644 index 000000000000..14d8092b25dd --- /dev/null +++ b/project_euler/problem_114/sol1.py @@ -0,0 +1,58 @@ +""" +Project Euler Problem 114: https://projecteuler.net/problem=114 + +A row measuring seven units in length has red blocks with a minimum length +of three units placed on it, such that any two red blocks +(which are allowed to be different lengths) are separated by at least one grey square. +There are exactly seventeen ways of doing this. + + |g|g|g|g|g|g|g| |r,r,r|g|g|g|g| + + |g|r,r,r|g|g|g| |g|g|r,r,r|g|g| + + |g|g|g|r,r,r|g| |g|g|g|g|r,r,r| + + |r,r,r|g|r,r,r| |r,r,r,r|g|g|g| + + |g|r,r,r,r|g|g| |g|g|r,r,r,r|g| + + |g|g|g|r,r,r,r| |r,r,r,r,r|g|g| + + |g|r,r,r,r,r|g| |g|g|r,r,r,r,r| + + |r,r,r,r,r,r|g| |g|r,r,r,r,r,r| + + |r,r,r,r,r,r,r| + +How many ways can a row measuring fifty units in length be filled? + +NOTE: Although the example above does not lend itself to the possibility, +in general it is permitted to mix block sizes. For example, +on a row measuring eight units in length you could use red (3), grey (1), and red (4). +""" + + +def solution(length: int = 50) -> int: + """ + Returns the number of ways a row of the given length can be filled + + >>> solution(7) + 17 + """ + + ways_number = [1] * (length + 1) + + for row_length in range(3, length + 1): + for block_length in range(3, row_length + 1): + for block_start in range(row_length - block_length): + ways_number[row_length] += ways_number[ + row_length - block_start - block_length - 1 + ] + + ways_number[row_length] += 1 + + return ways_number[length] + + +if __name__ == "__main__": + print(f"{solution() = }") From 063a0eced918ffa58af99c9c8d2fab72ea519c59 Mon Sep 17 00:00:00 2001 From: Maxim Smolskiy Date: Sun, 7 Aug 2022 14:20:45 +0300 Subject: [PATCH 0484/1543] feat: add Project Euler problem 115 solution 1 (#6303) Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- DIRECTORY.md | 2 + project_euler/problem_115/__init__.py | 0 project_euler/problem_115/sol1.py | 62 +++++++++++++++++++++++++++ 3 files changed, 64 insertions(+) create mode 100644 project_euler/problem_115/__init__.py create mode 100644 project_euler/problem_115/sol1.py diff --git a/DIRECTORY.md b/DIRECTORY.md index 98b87f2fe279..b37bb35ec619 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -856,6 +856,8 @@ * [Sol1](project_euler/problem_113/sol1.py) * Problem 114 * [Sol1](project_euler/problem_114/sol1.py) + * Problem 115 + * [Sol1](project_euler/problem_115/sol1.py) * Problem 119 * [Sol1](project_euler/problem_119/sol1.py) * Problem 120 diff --git a/project_euler/problem_115/__init__.py b/project_euler/problem_115/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/project_euler/problem_115/sol1.py b/project_euler/problem_115/sol1.py new file mode 100644 index 000000000000..15a13516d54d --- /dev/null +++ b/project_euler/problem_115/sol1.py @@ -0,0 +1,62 @@ +""" +Project Euler Problem 115: https://projecteuler.net/problem=115 + +NOTE: This is a more difficult version of Problem 114 +(https://projecteuler.net/problem=114). + +A row measuring n units in length has red blocks +with a minimum length of m units placed on it, such that any two red blocks +(which are allowed to be different lengths) are separated by at least one black square. + +Let the fill-count function, F(m, n), +represent the number of ways that a row can be filled. + +For example, F(3, 29) = 673135 and F(3, 30) = 1089155. + +That is, for m = 3, it can be seen that n = 30 is the smallest value +for which the fill-count function first exceeds one million. + +In the same way, for m = 10, it can be verified that +F(10, 56) = 880711 and F(10, 57) = 1148904, so n = 57 is the least value +for which the fill-count function first exceeds one million. + +For m = 50, find the least value of n +for which the fill-count function first exceeds one million. +""" + +from itertools import count + + +def solution(min_block_length: int = 50) -> int: + """ + Returns for given minimum block length the least value of n + for which the fill-count function first exceeds one million + + >>> solution(3) + 30 + + >>> solution(10) + 57 + """ + + fill_count_functions = [1] * min_block_length + + for n in count(min_block_length): + fill_count_functions.append(1) + + for block_length in range(min_block_length, n + 1): + for block_start in range(n - block_length): + fill_count_functions[n] += fill_count_functions[ + n - block_start - block_length - 1 + ] + + fill_count_functions[n] += 1 + + if fill_count_functions[n] > 1_000_000: + break + + return n + + +if __name__ == "__main__": + print(f"{solution() = }") From f46ce47274e89ab52581b56fec0aefa0e844dfa7 Mon Sep 17 00:00:00 2001 From: AmirMohammad Hosseini Nasab <19665344+itsamirhn@users.noreply.github.com> Date: Fri, 12 Aug 2022 13:42:58 +0430 Subject: [PATCH 0485/1543] Add Max Fenwick Tree (#6298) * Add `MaxFenwickTree` * Reformat code style * Fix type hints * Fix type hints again * Complete docstring * Complete docstring * Fix typo in file name * Change MaxFenwickTree into 0-based indexing * Fix Bugs * Minor fix --- .../binary_tree/maximum_fenwick_tree.py | 102 ++++++++++++++++++ 1 file changed, 102 insertions(+) create mode 100644 data_structures/binary_tree/maximum_fenwick_tree.py diff --git a/data_structures/binary_tree/maximum_fenwick_tree.py b/data_structures/binary_tree/maximum_fenwick_tree.py new file mode 100644 index 000000000000..e90bd634d51c --- /dev/null +++ b/data_structures/binary_tree/maximum_fenwick_tree.py @@ -0,0 +1,102 @@ +class MaxFenwickTree: + """ + Maximum Fenwick Tree + + More info: https://cp-algorithms.com/data_structures/fenwick.html + --------- + >>> ft = MaxFenwickTree(5) + >>> ft.query(0, 5) + 0 + >>> ft.update(4, 100) + >>> ft.query(0, 5) + 100 + >>> ft.update(4, 0) + >>> ft.update(2, 20) + >>> ft.query(0, 5) + 20 + >>> ft.update(4, 10) + >>> ft.query(2, 5) + 10 + >>> ft.query(1, 5) + 20 + >>> ft.update(2, 0) + >>> ft.query(0, 5) + 10 + >>> ft = MaxFenwickTree(10000) + >>> ft.update(255, 30) + >>> ft.query(0, 10000) + 30 + """ + + def __init__(self, size: int) -> None: + """ + Create empty Maximum Fenwick Tree with specified size + + Parameters: + size: size of Array + + Returns: + None + """ + self.size = size + self.arr = [0] * size + self.tree = [0] * size + + @staticmethod + def get_next(index: int) -> int: + """ + Get next index in O(1) + """ + return index + (index & -index) + + @staticmethod + def get_prev(index: int) -> int: + """ + Get previous index in O(1) + """ + return index - (index & -index) + + def update(self, index: int, value: int) -> None: + """ + Set index to value in O(lg^2 N) + + Parameters: + index: index to update + value: value to set + + Returns: + None + """ + self.arr[index] = value + while index < self.size: + self.tree[index] = max(value, self.query(self.get_prev(index), index)) + index = self.get_next(index) + + def query(self, left: int, right: int) -> int: + """ + Answer the query of maximum range [l, r) in O(lg^2 N) + + Parameters: + left: left index of query range (inclusive) + right: right index of query range (exclusive) + + Returns: + Maximum value of range [left, right) + """ + right -= 1 # Because of right is exclusive + result = 0 + while left < right: + current_left = self.get_prev(right) + if left < current_left: + result = max(result, self.tree[right]) + right = current_left + else: + result = max(result, self.arr[right]) + right -= 1 + return result + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From f31fa4ea7e26d8721be7e966ecf92fcbd87af65f Mon Sep 17 00:00:00 2001 From: AmirMohammad Hosseini Nasab <19665344+itsamirhn@users.noreply.github.com> Date: Tue, 16 Aug 2022 22:08:33 +0430 Subject: [PATCH 0486/1543] Fenwick Tree (#6319) * Enhance fenwick_tree.py * Change update to add in fenwick_tree.py * Some changes * Fix bug * Add O(N) initializer to FenwickTree * Add get method to Fenwick Tree * Change tree in Fenwick Tree * Add rank query to FenwickTree * Add get_array method to FenwickTree * Add some tests * Update data_structures/binary_tree/fenwick_tree.py Co-authored-by: Christian Clauss * Update data_structures/binary_tree/fenwick_tree.py Co-authored-by: Christian Clauss * Update data_structures/binary_tree/fenwick_tree.py Co-authored-by: Christian Clauss * change `List` to `list` Co-authored-by: Christian Clauss --- data_structures/binary_tree/fenwick_tree.py | 263 ++++++++++++++++++-- 1 file changed, 241 insertions(+), 22 deletions(-) diff --git a/data_structures/binary_tree/fenwick_tree.py b/data_structures/binary_tree/fenwick_tree.py index 54f0f07ac68d..96020d1427af 100644 --- a/data_structures/binary_tree/fenwick_tree.py +++ b/data_structures/binary_tree/fenwick_tree.py @@ -1,28 +1,247 @@ +from copy import deepcopy + + class FenwickTree: - def __init__(self, SIZE): # create fenwick tree with size SIZE - self.Size = SIZE - self.ft = [0 for i in range(0, SIZE)] + """ + Fenwick Tree + + More info: https://en.wikipedia.org/wiki/Fenwick_tree + """ + + def __init__(self, arr: list[int] = None, size: int = None) -> None: + """ + Constructor for the Fenwick tree + + Parameters: + arr (list): list of elements to initialize the tree with (optional) + size (int): size of the Fenwick tree (if arr is None) + """ + + if arr is None and size is not None: + self.size = size + self.tree = [0] * size + elif arr is not None: + self.init(arr) + else: + raise ValueError("Either arr or size must be specified") + + def init(self, arr: list[int]) -> None: + """ + Initialize the Fenwick tree with arr in O(N) + + Parameters: + arr (list): list of elements to initialize the tree with + + Returns: + None + + >>> a = [1, 2, 3, 4, 5] + >>> f1 = FenwickTree(a) + >>> f2 = FenwickTree(size=len(a)) + >>> for index, value in enumerate(a): + ... f2.add(index, value) + >>> f1.tree == f2.tree + True + """ + self.size = len(arr) + self.tree = deepcopy(arr) + for i in range(1, self.size): + j = self.next(i) + if j < self.size: + self.tree[j] += self.tree[i] + + def get_array(self) -> list[int]: + """ + Get the Normal Array of the Fenwick tree in O(N) + + Returns: + list: Normal Array of the Fenwick tree + + >>> a = [i for i in range(128)] + >>> f = FenwickTree(a) + >>> f.get_array() == a + True + """ + arr = self.tree[:] + for i in range(self.size - 1, 0, -1): + j = self.next(i) + if j < self.size: + arr[j] -= arr[i] + return arr + + @staticmethod + def next(index: int) -> int: + return index + (index & (-index)) + + @staticmethod + def prev(index: int) -> int: + return index - (index & (-index)) + + def add(self, index: int, value: int) -> None: + """ + Add a value to index in O(lg N) + + Parameters: + index (int): index to add value to + value (int): value to add to index + + Returns: + None + + >>> f = FenwickTree([1, 2, 3, 4, 5]) + >>> f.add(0, 1) + >>> f.add(1, 2) + >>> f.add(2, 3) + >>> f.add(3, 4) + >>> f.add(4, 5) + >>> f.get_array() + [2, 4, 6, 8, 10] + """ + if index == 0: + self.tree[0] += value + return + while index < self.size: + self.tree[index] += value + index = self.next(index) + + def update(self, index: int, value: int) -> None: + """ + Set the value of index in O(lg N) + + Parameters: + index (int): index to set value to + value (int): value to set in index - def update(self, i, val): # update data (adding) in index i in O(lg N) - while i < self.Size: - self.ft[i] += val - i += i & (-i) + Returns: + None - def query(self, i): # query cumulative data from index 0 to i in O(lg N) - ret = 0 - while i > 0: - ret += self.ft[i] - i -= i & (-i) - return ret + >>> f = FenwickTree([5, 4, 3, 2, 1]) + >>> f.update(0, 1) + >>> f.update(1, 2) + >>> f.update(2, 3) + >>> f.update(3, 4) + >>> f.update(4, 5) + >>> f.get_array() + [1, 2, 3, 4, 5] + """ + self.add(index, value - self.get(index)) + + def prefix(self, right: int) -> int: + """ + Prefix sum of all elements in [0, right) in O(lg N) + + Parameters: + right (int): right bound of the query (exclusive) + + Returns: + int: sum of all elements in [0, right) + + >>> a = [i for i in range(128)] + >>> f = FenwickTree(a) + >>> res = True + >>> for i in range(len(a)): + ... res = res and f.prefix(i) == sum(a[:i]) + >>> res + True + """ + if right == 0: + return 0 + result = self.tree[0] + right -= 1 # make right inclusive + while right > 0: + result += self.tree[right] + right = self.prev(right) + return result + + def query(self, left: int, right: int) -> int: + """ + Query the sum of all elements in [left, right) in O(lg N) + + Parameters: + left (int): left bound of the query (inclusive) + right (int): right bound of the query (exclusive) + + Returns: + int: sum of all elements in [left, right) + + >>> a = [i for i in range(128)] + >>> f = FenwickTree(a) + >>> res = True + >>> for i in range(len(a)): + ... for j in range(i + 1, len(a)): + ... res = res and f.query(i, j) == sum(a[i:j]) + >>> res + True + """ + return self.prefix(right) - self.prefix(left) + + def get(self, index: int) -> int: + """ + Get value at index in O(lg N) + + Parameters: + index (int): index to get the value + + Returns: + int: Value of element at index + + >>> a = [i for i in range(128)] + >>> f = FenwickTree(a) + >>> res = True + >>> for i in range(len(a)): + ... res = res and f.get(i) == a[i] + >>> res + True + """ + return self.query(index, index + 1) + + def rank_query(self, value: int) -> int: + """ + Find the largest index with prefix(i) <= value in O(lg N) + NOTE: Requires that all values are non-negative! + + Parameters: + value (int): value to find the largest index of + + Returns: + -1: if value is smaller than all elements in prefix sum + int: largest index with prefix(i) <= value + + >>> f = FenwickTree([1, 2, 0, 3, 0, 5]) + >>> f.rank_query(0) + -1 + >>> f.rank_query(2) + 0 + >>> f.rank_query(1) + 0 + >>> f.rank_query(3) + 2 + >>> f.rank_query(5) + 2 + >>> f.rank_query(6) + 4 + >>> f.rank_query(11) + 5 + """ + value -= self.tree[0] + if value < 0: + return -1 + + j = 1 # Largest power of 2 <= size + while j * 2 < self.size: + j *= 2 + + i = 0 + + while j > 0: + if i + j < self.size and self.tree[i + j] <= value: + value -= self.tree[i + j] + i += j + j //= 2 + return i if __name__ == "__main__": - f = FenwickTree(100) - f.update(1, 20) - f.update(4, 4) - print(f.query(1)) - print(f.query(3)) - print(f.query(4)) - f.update(2, -5) - print(f.query(1)) - print(f.query(3)) + import doctest + + doctest.testmod() From b1818af5171ecf149b5a602572a7361c5e624f0d Mon Sep 17 00:00:00 2001 From: zhexuanl <63616187+zhexuanl@users.noreply.github.com> Date: Wed, 24 Aug 2022 12:48:54 +0800 Subject: [PATCH 0487/1543] Add Digital Image Processing Algorithm: Local Binary Pattern (#6294) * add algorithm local binary pattern * fix failed test for local binary pattern * updating DIRECTORY.md * fix detected precommit-error * fix precommit error * final check * Add descriptive name for parameters x and y * Update digital_image_processing/filters/local_binary_pattern.py Co-authored-by: Christian Clauss * Update digital_image_processing/filters/local_binary_pattern.py Co-authored-by: Christian Clauss * Update digital_image_processing/filters/local_binary_pattern.py Co-authored-by: Christian Clauss * Update local_binary_pattern.py * undo changes made on get_neighbors_pixel() * files formatted by black * Update digital_image_processing/filters/local_binary_pattern.py ok thanks Co-authored-by: Christian Clauss * add test for get_neighbors_pixel() function * reviewed * fix get_neighbors_pixel * Update test_digital_image_processing.py * updating DIRECTORY.md * Create code_quality.yml * Create code_quality.yml * Delete code_quality.yml * Update code_quality.yml * Delete code_quality.yml Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> Co-authored-by: Christian Clauss --- DIRECTORY.md | 2 + .../filters/local_binary_pattern.py | 81 +++++++++++++++++++ .../test_digital_image_processing.py | 32 ++++++++ 3 files changed, 115 insertions(+) create mode 100644 digital_image_processing/filters/local_binary_pattern.py diff --git a/DIRECTORY.md b/DIRECTORY.md index b37bb35ec619..a7305395a67b 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -152,6 +152,7 @@ * [Fenwick Tree](data_structures/binary_tree/fenwick_tree.py) * [Lazy Segment Tree](data_structures/binary_tree/lazy_segment_tree.py) * [Lowest Common Ancestor](data_structures/binary_tree/lowest_common_ancestor.py) + * [Maximum Fenwick Tree](data_structures/binary_tree/maximum_fenwick_tree.py) * [Merge Two Binary Trees](data_structures/binary_tree/merge_two_binary_trees.py) * [Non Recursive Segment Tree](data_structures/binary_tree/non_recursive_segment_tree.py) * [Number Of Possible Binary Trees](data_structures/binary_tree/number_of_possible_binary_trees.py) @@ -229,6 +230,7 @@ * [Convolve](digital_image_processing/filters/convolve.py) * [Gabor Filter](digital_image_processing/filters/gabor_filter.py) * [Gaussian Filter](digital_image_processing/filters/gaussian_filter.py) + * [Local Binary Pattern](digital_image_processing/filters/local_binary_pattern.py) * [Median Filter](digital_image_processing/filters/median_filter.py) * [Sobel Filter](digital_image_processing/filters/sobel_filter.py) * Histogram Equalization diff --git a/digital_image_processing/filters/local_binary_pattern.py b/digital_image_processing/filters/local_binary_pattern.py new file mode 100644 index 000000000000..e73aa59bfa53 --- /dev/null +++ b/digital_image_processing/filters/local_binary_pattern.py @@ -0,0 +1,81 @@ +import cv2 +import numpy as np + + +def get_neighbors_pixel( + image: np.ndarray, x_coordinate: int, y_coordinate: int, center: int +) -> int: + """ + Comparing local neighborhood pixel value with threshold value of centre pixel. + Exception is required when neighborhood value of a center pixel value is null. + i.e. values present at boundaries. + + :param image: The image we're working with + :param x_coordinate: x-coordinate of the pixel + :param y_coordinate: The y coordinate of the pixel + :param center: center pixel value + :return: The value of the pixel is being returned. + """ + + try: + return int(image[x_coordinate][y_coordinate] >= center) + except (IndexError, TypeError): + return 0 + + +def local_binary_value(image: np.ndarray, x_coordinate: int, y_coordinate: int) -> int: + """ + It takes an image, an x and y coordinate, and returns the + decimal value of the local binary patternof the pixel + at that coordinate + + :param image: the image to be processed + :param x_coordinate: x coordinate of the pixel + :param y_coordinate: the y coordinate of the pixel + :return: The decimal value of the binary value of the pixels + around the center pixel. + """ + center = image[x_coordinate][y_coordinate] + powers = [1, 2, 4, 8, 16, 32, 64, 128] + + # skip get_neighbors_pixel if center is null + if center is None: + return 0 + + # Starting from the top right, assigning value to pixels clockwise + binary_values = [ + get_neighbors_pixel(image, x_coordinate - 1, y_coordinate + 1, center), + get_neighbors_pixel(image, x_coordinate, y_coordinate + 1, center), + get_neighbors_pixel(image, x_coordinate - 1, y_coordinate, center), + get_neighbors_pixel(image, x_coordinate + 1, y_coordinate + 1, center), + get_neighbors_pixel(image, x_coordinate + 1, y_coordinate, center), + get_neighbors_pixel(image, x_coordinate + 1, y_coordinate - 1, center), + get_neighbors_pixel(image, x_coordinate, y_coordinate - 1, center), + get_neighbors_pixel(image, x_coordinate - 1, y_coordinate - 1, center), + ] + + # Converting the binary value to decimal. + return sum( + binary_value * power for binary_value, power in zip(binary_values, powers) + ) + + +if __name__ == "main": + + # Reading the image and converting it to grayscale. + image = cv2.imread( + "digital_image_processing/image_data/lena.jpg", cv2.IMREAD_GRAYSCALE + ) + + # Create a numpy array as the same height and width of read image + lbp_image = np.zeros((image.shape[0], image.shape[1])) + + # Iterating through the image and calculating the + # local binary pattern value for each pixel. + for i in range(0, image.shape[0]): + for j in range(0, image.shape[1]): + lbp_image[i][j] = local_binary_value(image, i, j) + + cv2.imshow("local binary pattern", lbp_image) + cv2.waitKey(0) + cv2.destroyAllWindows() diff --git a/digital_image_processing/test_digital_image_processing.py b/digital_image_processing/test_digital_image_processing.py index 40f2f7b83b6d..1f42fddf297a 100644 --- a/digital_image_processing/test_digital_image_processing.py +++ b/digital_image_processing/test_digital_image_processing.py @@ -1,6 +1,7 @@ """ PyTest's for Digital Image Processing """ +import numpy as np from cv2 import COLOR_BGR2GRAY, cvtColor, imread from numpy import array, uint8 from PIL import Image @@ -12,6 +13,7 @@ from digital_image_processing.edge_detection import canny as canny from digital_image_processing.filters import convolve as conv from digital_image_processing.filters import gaussian_filter as gg +from digital_image_processing.filters import local_binary_pattern as lbp from digital_image_processing.filters import median_filter as med from digital_image_processing.filters import sobel_filter as sob from digital_image_processing.resize import resize as rs @@ -91,3 +93,33 @@ def test_nearest_neighbour( nn = rs.NearestNeighbour(imread(file_path, 1), 400, 200) nn.process() assert nn.output.any() + + +def test_local_binary_pattern(): + file_path: str = "digital_image_processing/image_data/lena.jpg" + + # Reading the image and converting it to grayscale. + image = imread(file_path, 0) + + # Test for get_neighbors_pixel function() return not None + x_coordinate = 0 + y_coordinate = 0 + center = image[x_coordinate][y_coordinate] + + neighbors_pixels = lbp.get_neighbors_pixel( + image, x_coordinate, y_coordinate, center + ) + + assert neighbors_pixels is not None + + # Test for local_binary_pattern function() + # Create a numpy array as the same height and width of read image + lbp_image = np.zeros((image.shape[0], image.shape[1])) + + # Iterating through the image and calculating the local binary pattern value + # for each pixel. + for i in range(0, image.shape[0]): + for j in range(0, image.shape[1]): + lbp_image[i][j] = lbp.local_binary_value(image, i, j) + + assert lbp_image.any() From cbf3c6140aafefbaef7186e0cb97d0758b1d38b2 Mon Sep 17 00:00:00 2001 From: Margaret <62753112+meg-1@users.noreply.github.com> Date: Mon, 5 Sep 2022 04:51:11 +0300 Subject: [PATCH 0488/1543] add the dna algorithm (#6323) * adding the dna algorithm * following bot recommendations following bot recommendations for the indentation * following bot recommendations following bot recommendations regarding indentation [ again ] * following bot recommendations following bot recommendations regarding indentation [ again. ] * following bot recommendations following bot recommendations. --- strings/dna.py | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+) create mode 100644 strings/dna.py diff --git a/strings/dna.py b/strings/dna.py new file mode 100644 index 000000000000..46e271d689db --- /dev/null +++ b/strings/dna.py @@ -0,0 +1,26 @@ +import re + + +def dna(dna: str) -> str: + + """ + https://en.wikipedia.org/wiki/DNA + Returns the second side of a DNA strand + + >>> dna("GCTA") + 'CGAT' + >>> dna("ATGC") + 'TACG' + >>> dna("CTGA") + 'GACT' + >>> dna("GFGG") + 'Invalid Strand' + """ + + r = len(re.findall("[ATCG]", dna)) != len(dna) + val = dna.translate(dna.maketrans("ATCG", "TAGC")) + return "Invalid Strand" if r else val + + +if __name__ == "__main__": + __import__("doctest").testmod() From 4e4fe95369c15e62364f7d6a6bfc9464c1143dc6 Mon Sep 17 00:00:00 2001 From: Nikhil Kala Date: Fri, 9 Sep 2022 11:09:31 -0600 Subject: [PATCH 0489/1543] chore: remove the PayPal badge (#6348) --- README.md | 3 --- 1 file changed, 3 deletions(-) diff --git a/README.md b/README.md index 0298d46020ac..c787979607ee 100644 --- a/README.md +++ b/README.md @@ -12,9 +12,6 @@ Contributions Welcome - - Donate - Discord chat From 81e30fd33c91bc37bc3baf54c42d1b192ecf41a6 Mon Sep 17 00:00:00 2001 From: C21 <31063253+C21-github@users.noreply.github.com> Date: Wed, 14 Sep 2022 13:54:55 +0530 Subject: [PATCH 0490/1543] Fix Max Fenwick Tree (#6328) --- .../binary_tree/maximum_fenwick_tree.py | 24 ++++++++++++++----- 1 file changed, 18 insertions(+), 6 deletions(-) diff --git a/data_structures/binary_tree/maximum_fenwick_tree.py b/data_structures/binary_tree/maximum_fenwick_tree.py index e90bd634d51c..84967a70cc73 100644 --- a/data_structures/binary_tree/maximum_fenwick_tree.py +++ b/data_structures/binary_tree/maximum_fenwick_tree.py @@ -16,7 +16,7 @@ class MaxFenwickTree: 20 >>> ft.update(4, 10) >>> ft.query(2, 5) - 10 + 20 >>> ft.query(1, 5) 20 >>> ft.update(2, 0) @@ -26,6 +26,14 @@ class MaxFenwickTree: >>> ft.update(255, 30) >>> ft.query(0, 10000) 30 + >>> ft = MaxFenwickTree(6) + >>> ft.update(5, 1) + >>> ft.query(5, 6) + 1 + >>> ft = MaxFenwickTree(6) + >>> ft.update(0, 1000) + >>> ft.query(0, 1) + 1000 """ def __init__(self, size: int) -> None: @@ -47,14 +55,14 @@ def get_next(index: int) -> int: """ Get next index in O(1) """ - return index + (index & -index) + return index | (index + 1) @staticmethod def get_prev(index: int) -> int: """ Get previous index in O(1) """ - return index - (index & -index) + return (index & (index + 1)) - 1 def update(self, index: int, value: int) -> None: """ @@ -69,7 +77,11 @@ def update(self, index: int, value: int) -> None: """ self.arr[index] = value while index < self.size: - self.tree[index] = max(value, self.query(self.get_prev(index), index)) + current_left_border = self.get_prev(index) + 1 + if current_left_border == index: + self.tree[index] = value + else: + self.tree[index] = max(value, current_left_border, index) index = self.get_next(index) def query(self, left: int, right: int) -> int: @@ -85,9 +97,9 @@ def query(self, left: int, right: int) -> int: """ right -= 1 # Because of right is exclusive result = 0 - while left < right: + while left <= right: current_left = self.get_prev(right) - if left < current_left: + if left <= current_left: result = max(result, self.tree[right]) right = current_left else: From 2104fa7aebe8d76b2b2b2c47fe7e2ee615a05df6 Mon Sep 17 00:00:00 2001 From: Nikos Giachoudis Date: Wed, 14 Sep 2022 11:40:04 +0300 Subject: [PATCH 0491/1543] Unify `O(sqrt(N))` `is_prime` functions under `project_euler` (#6258) * fixes #5434 * fixes broken solution * removes assert * removes assert * Apply suggestions from code review Co-authored-by: John Law * Update project_euler/problem_003/sol1.py Co-authored-by: John Law --- project_euler/problem_003/sol1.py | 30 ++++++++--------- project_euler/problem_007/sol1.py | 32 ++++++++++++------- project_euler/problem_007/sol2.py | 29 +++++++++++++---- project_euler/problem_007/sol3.py | 29 +++++++++++++---- project_euler/problem_010/sol1.py | 27 ++++++++++++---- project_euler/problem_010/sol2.py | 23 +++++++++++--- project_euler/problem_027/sol1.py | 41 +++++++++++++++++------- project_euler/problem_037/sol1.py | 51 ++++++++++++++++++++--------- project_euler/problem_041/sol1.py | 40 +++++++++++++++++------ project_euler/problem_046/sol1.py | 49 +++++++++++++++++++--------- project_euler/problem_049/sol1.py | 38 +++++++++++++++------- project_euler/problem_058/sol1.py | 53 ++++++++++++++++++++----------- 12 files changed, 310 insertions(+), 132 deletions(-) diff --git a/project_euler/problem_003/sol1.py b/project_euler/problem_003/sol1.py index 606a6945e4ad..a7d01bb041ba 100644 --- a/project_euler/problem_003/sol1.py +++ b/project_euler/problem_003/sol1.py @@ -13,9 +13,11 @@ import math -def is_prime(num: int) -> bool: - """ - Returns boolean representing primality of given number num. +def is_prime(number: int) -> bool: + """Checks to see if a number is a prime in O(sqrt(n)). + A number is prime if it has exactly two factors: 1 and itself. + Returns boolean representing primality of given number (i.e., if the + result is true, then the number is indeed prime else it is not). >>> is_prime(2) True @@ -26,23 +28,21 @@ def is_prime(num: int) -> bool: >>> is_prime(2999) True >>> is_prime(0) - Traceback (most recent call last): - ... - ValueError: Parameter num must be greater than or equal to two. + False >>> is_prime(1) - Traceback (most recent call last): - ... - ValueError: Parameter num must be greater than or equal to two. + False """ - if num <= 1: - raise ValueError("Parameter num must be greater than or equal to two.") - if num == 2: + if 1 < number < 4: + # 2 and 3 are primes return True - elif num % 2 == 0: + elif number < 2 or number % 2 == 0 or number % 3 == 0: + # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False - for i in range(3, int(math.sqrt(num)) + 1, 2): - if num % i == 0: + + # All primes number are in format of 6k +/- 1 + for i in range(5, int(math.sqrt(number) + 1), 6): + if number % i == 0 or number % (i + 2) == 0: return False return True diff --git a/project_euler/problem_007/sol1.py b/project_euler/problem_007/sol1.py index 78fbcb511611..f52ff931f9a8 100644 --- a/project_euler/problem_007/sol1.py +++ b/project_euler/problem_007/sol1.py @@ -15,29 +15,37 @@ from math import sqrt -def is_prime(num: int) -> bool: - """ - Determines whether the given number is prime or not +def is_prime(number: int) -> bool: + """Checks to see if a number is a prime in O(sqrt(n)). + A number is prime if it has exactly two factors: 1 and itself. + Returns boolean representing primality of given number (i.e., if the + result is true, then the number is indeed prime else it is not). >>> is_prime(2) True - >>> is_prime(15) + >>> is_prime(3) + True + >>> is_prime(27) False - >>> is_prime(29) + >>> is_prime(2999) True >>> is_prime(0) False + >>> is_prime(1) + False """ - if num == 2: + if 1 < number < 4: + # 2 and 3 are primes return True - elif num % 2 == 0: + elif number < 2 or number % 2 == 0 or number % 3 == 0: + # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False - else: - sq = int(sqrt(num)) + 1 - for i in range(3, sq, 2): - if num % i == 0: - return False + + # All primes number are in format of 6k +/- 1 + for i in range(5, int(sqrt(number) + 1), 6): + if number % i == 0 or number % (i + 2) == 0: + return False return True diff --git a/project_euler/problem_007/sol2.py b/project_euler/problem_007/sol2.py index 44d72e9493e8..75d351889ea8 100644 --- a/project_euler/problem_007/sol2.py +++ b/project_euler/problem_007/sol2.py @@ -11,22 +11,39 @@ References: - https://en.wikipedia.org/wiki/Prime_number """ +import math def is_prime(number: int) -> bool: - """ - Determines whether the given number is prime or not + """Checks to see if a number is a prime in O(sqrt(n)). + A number is prime if it has exactly two factors: 1 and itself. + Returns boolean representing primality of given number (i.e., if the + result is true, then the number is indeed prime else it is not). >>> is_prime(2) True - >>> is_prime(15) + >>> is_prime(3) + True + >>> is_prime(27) False - >>> is_prime(29) + >>> is_prime(2999) True + >>> is_prime(0) + False + >>> is_prime(1) + False """ - for i in range(2, int(number**0.5) + 1): - if number % i == 0: + if 1 < number < 4: + # 2 and 3 are primes + return True + elif number < 2 or number % 2 == 0 or number % 3 == 0: + # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes + return False + + # All primes number are in format of 6k +/- 1 + for i in range(5, int(math.sqrt(number) + 1), 6): + if number % i == 0 or number % (i + 2) == 0: return False return True diff --git a/project_euler/problem_007/sol3.py b/project_euler/problem_007/sol3.py index daa719cefbda..774260db99a0 100644 --- a/project_euler/problem_007/sol3.py +++ b/project_euler/problem_007/sol3.py @@ -16,20 +16,37 @@ def is_prime(number: int) -> bool: - """ - Determines whether a given number is prime or not + """Checks to see if a number is a prime in O(sqrt(n)). + A number is prime if it has exactly two factors: 1 and itself. + Returns boolean representing primality of given number (i.e., if the + result is true, then the number is indeed prime else it is not). >>> is_prime(2) True - >>> is_prime(15) + >>> is_prime(3) + True + >>> is_prime(27) False - >>> is_prime(29) + >>> is_prime(2999) True + >>> is_prime(0) + False + >>> is_prime(1) + False """ - if number % 2 == 0 and number > 2: + if 1 < number < 4: + # 2 and 3 are primes + return True + elif number < 2 or number % 2 == 0 or number % 3 == 0: + # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False - return all(number % i for i in range(3, int(math.sqrt(number)) + 1, 2)) + + # All primes number are in format of 6k +/- 1 + for i in range(5, int(math.sqrt(number) + 1), 6): + if number % i == 0 or number % (i + 2) == 0: + return False + return True def prime_generator(): diff --git a/project_euler/problem_010/sol1.py b/project_euler/problem_010/sol1.py index e060761eecab..31f2feda3728 100644 --- a/project_euler/problem_010/sol1.py +++ b/project_euler/problem_010/sol1.py @@ -11,12 +11,14 @@ - https://en.wikipedia.org/wiki/Prime_number """ -from math import sqrt +import math -def is_prime(n: int) -> bool: - """ - Returns boolean representing primality of given number num. +def is_prime(number: int) -> bool: + """Checks to see if a number is a prime in O(sqrt(n)). + A number is prime if it has exactly two factors: 1 and itself. + Returns boolean representing primality of given number num (i.e., if the + result is true, then the number is indeed prime else it is not). >>> is_prime(2) True @@ -26,13 +28,24 @@ def is_prime(n: int) -> bool: False >>> is_prime(2999) True + >>> is_prime(0) + False + >>> is_prime(1) + False """ - if 1 < n < 4: + if 1 < number < 4: + # 2 and 3 are primes return True - elif n < 2 or not n % 2: + elif number < 2 or number % 2 == 0 or number % 3 == 0: + # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False - return not any(not n % i for i in range(3, int(sqrt(n) + 1), 2)) + + # All primes number are in format of 6k +/- 1 + for i in range(5, int(math.sqrt(number) + 1), 6): + if number % i == 0 or number % (i + 2) == 0: + return False + return True def solution(n: int = 2000000) -> int: diff --git a/project_euler/problem_010/sol2.py b/project_euler/problem_010/sol2.py index a288bb85fd52..245cca1d1720 100644 --- a/project_euler/problem_010/sol2.py +++ b/project_euler/problem_010/sol2.py @@ -16,8 +16,10 @@ def is_prime(number: int) -> bool: - """ - Returns boolean representing primality of given number num. + """Checks to see if a number is a prime in O(sqrt(n)). + A number is prime if it has exactly two factors: 1 and itself. + Returns boolean representing primality of given number num (i.e., if the + result is true, then the number is indeed prime else it is not). >>> is_prime(2) True @@ -27,11 +29,24 @@ def is_prime(number: int) -> bool: False >>> is_prime(2999) True + >>> is_prime(0) + False + >>> is_prime(1) + False """ - if number % 2 == 0 and number > 2: + if 1 < number < 4: + # 2 and 3 are primes + return True + elif number < 2 or number % 2 == 0 or number % 3 == 0: + # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False - return all(number % i for i in range(3, int(math.sqrt(number)) + 1, 2)) + + # All primes number are in format of 6k +/- 1 + for i in range(5, int(math.sqrt(number) + 1), 6): + if number % i == 0 or number % (i + 2) == 0: + return False + return True def prime_generator() -> Iterator[int]: diff --git a/project_euler/problem_027/sol1.py b/project_euler/problem_027/sol1.py index 928c0ec4feb7..c93e2b4fa251 100644 --- a/project_euler/problem_027/sol1.py +++ b/project_euler/problem_027/sol1.py @@ -23,22 +23,39 @@ import math -def is_prime(k: int) -> bool: - """ - Determine if a number is prime - >>> is_prime(10) +def is_prime(number: int) -> bool: + """Checks to see if a number is a prime in O(sqrt(n)). + A number is prime if it has exactly two factors: 1 and itself. + Returns boolean representing primality of given number num (i.e., if the + result is true, then the number is indeed prime else it is not). + + >>> is_prime(2) + True + >>> is_prime(3) + True + >>> is_prime(27) False - >>> is_prime(11) + >>> is_prime(2999) True + >>> is_prime(0) + False + >>> is_prime(1) + False + >>> is_prime(-10) + False """ - if k < 2 or k % 2 == 0: - return False - elif k == 2: + + if 1 < number < 4: + # 2 and 3 are primes return True - else: - for x in range(3, int(math.sqrt(k) + 1), 2): - if k % x == 0: - return False + elif number < 2 or number % 2 == 0 or number % 3 == 0: + # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes + return False + + # All primes number are in format of 6k +/- 1 + for i in range(5, int(math.sqrt(number) + 1), 6): + if number % i == 0 or number % (i + 2) == 0: + return False return True diff --git a/project_euler/problem_037/sol1.py b/project_euler/problem_037/sol1.py index 0411ad41ba2f..ef7686cbcb96 100644 --- a/project_euler/problem_037/sol1.py +++ b/project_euler/problem_037/sol1.py @@ -1,4 +1,7 @@ """ +Truncatable primes +Problem 37: https://projecteuler.net/problem=37 + The number 3797 has an interesting property. Being prime itself, it is possible to continuously remove digits from left to right, and remain prime at each stage: 3797, 797, 97, and 7. Similarly we can work from right to left: 3797, 379, 37, and 3. @@ -11,28 +14,46 @@ from __future__ import annotations -seive = [True] * 1000001 -seive[1] = False -i = 2 -while i * i <= 1000000: - if seive[i]: - for j in range(i * i, 1000001, i): - seive[j] = False - i += 1 +import math -def is_prime(n: int) -> bool: - """ - Returns True if n is prime, - False otherwise, for 1 <= n <= 1000000 - >>> is_prime(87) +def is_prime(number: int) -> bool: + """Checks to see if a number is a prime in O(sqrt(n)). + + A number is prime if it has exactly two factors: 1 and itself. + + >>> is_prime(0) False >>> is_prime(1) False - >>> is_prime(25363) + >>> is_prime(2) + True + >>> is_prime(3) + True + >>> is_prime(27) + False + >>> is_prime(87) + False + >>> is_prime(563) + True + >>> is_prime(2999) + True + >>> is_prime(67483) False """ - return seive[n] + + if 1 < number < 4: + # 2 and 3 are primes + return True + elif number < 2 or number % 2 == 0 or number % 3 == 0: + # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes + return False + + # All primes number are in format of 6k +/- 1 + for i in range(5, int(math.sqrt(number) + 1), 6): + if number % i == 0 or number % (i + 2) == 0: + return False + return True def list_truncated_nums(n: int) -> list[int]: diff --git a/project_euler/problem_041/sol1.py b/project_euler/problem_041/sol1.py index 80ef2125b82a..2ef0120684c3 100644 --- a/project_euler/problem_041/sol1.py +++ b/project_euler/problem_041/sol1.py @@ -12,25 +12,45 @@ """ from __future__ import annotations +import math from itertools import permutations -from math import sqrt -def is_prime(n: int) -> bool: - """ - Returns True if n is prime, - False otherwise. - >>> is_prime(67483) +def is_prime(number: int) -> bool: + """Checks to see if a number is a prime in O(sqrt(n)). + + A number is prime if it has exactly two factors: 1 and itself. + + >>> is_prime(0) False - >>> is_prime(563) + >>> is_prime(1) + False + >>> is_prime(2) True + >>> is_prime(3) + True + >>> is_prime(27) + False >>> is_prime(87) False + >>> is_prime(563) + True + >>> is_prime(2999) + True + >>> is_prime(67483) + False """ - if n % 2 == 0: + + if 1 < number < 4: + # 2 and 3 are primes + return True + elif number < 2 or number % 2 == 0 or number % 3 == 0: + # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False - for i in range(3, int(sqrt(n) + 1), 2): - if n % i == 0: + + # All primes number are in format of 6k +/- 1 + for i in range(5, int(math.sqrt(number) + 1), 6): + if number % i == 0 or number % (i + 2) == 0: return False return True diff --git a/project_euler/problem_046/sol1.py b/project_euler/problem_046/sol1.py index 550c4c7c4268..07dd9bbf84c8 100644 --- a/project_euler/problem_046/sol1.py +++ b/project_euler/problem_046/sol1.py @@ -19,30 +19,49 @@ from __future__ import annotations -seive = [True] * 100001 -i = 2 -while i * i <= 100000: - if seive[i]: - for j in range(i * i, 100001, i): - seive[j] = False - i += 1 +import math -def is_prime(n: int) -> bool: - """ - Returns True if n is prime, - False otherwise, for 2 <= n <= 100000 +def is_prime(number: int) -> bool: + """Checks to see if a number is a prime in O(sqrt(n)). + + A number is prime if it has exactly two factors: 1 and itself. + + >>> is_prime(0) + False + >>> is_prime(1) + False + >>> is_prime(2) + True + >>> is_prime(3) + True + >>> is_prime(27) + False >>> is_prime(87) False - >>> is_prime(23) + >>> is_prime(563) + True + >>> is_prime(2999) True - >>> is_prime(25363) + >>> is_prime(67483) False """ - return seive[n] + + if 1 < number < 4: + # 2 and 3 are primes + return True + elif number < 2 or number % 2 == 0 or number % 3 == 0: + # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes + return False + + # All primes number are in format of 6k +/- 1 + for i in range(5, int(math.sqrt(number) + 1), 6): + if number % i == 0 or number % (i + 2) == 0: + return False + return True -odd_composites = [num for num in range(3, len(seive), 2) if not is_prime(num)] +odd_composites = [num for num in range(3, 100001, 2) if not is_prime(num)] def compute_nums(n: int) -> list[int]: diff --git a/project_euler/problem_049/sol1.py b/project_euler/problem_049/sol1.py index dd2ef71a38a8..5c7560cbddae 100644 --- a/project_euler/problem_049/sol1.py +++ b/project_euler/problem_049/sol1.py @@ -25,32 +25,46 @@ The bruteforce of this solution will be about 1 sec. """ +import math from itertools import permutations -from math import floor, sqrt def is_prime(number: int) -> bool: - """ - function to check whether the number is prime or not. - >>> is_prime(2) - True - >>> is_prime(6) + """Checks to see if a number is a prime in O(sqrt(n)). + + A number is prime if it has exactly two factors: 1 and itself. + + >>> is_prime(0) False >>> is_prime(1) False - >>> is_prime(-800) + >>> is_prime(2) + True + >>> is_prime(3) + True + >>> is_prime(27) + False + >>> is_prime(87) False - >>> is_prime(104729) + >>> is_prime(563) True + >>> is_prime(2999) + True + >>> is_prime(67483) + False """ - if number < 2: + if 1 < number < 4: + # 2 and 3 are primes + return True + elif number < 2 or number % 2 == 0 or number % 3 == 0: + # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False - for i in range(2, floor(sqrt(number)) + 1): - if number % i == 0: + # All primes number are in format of 6k +/- 1 + for i in range(5, int(math.sqrt(number) + 1), 6): + if number % i == 0 or number % (i + 2) == 0: return False - return True diff --git a/project_euler/problem_058/sol1.py b/project_euler/problem_058/sol1.py index c59b0dd71af1..6a991c58b6b8 100644 --- a/project_euler/problem_058/sol1.py +++ b/project_euler/problem_058/sol1.py @@ -33,29 +33,46 @@ count of current primes. """ -from math import isqrt +import math -def is_prime(number: int) -> int: - """ - Returns whether the given number is prime or not +def is_prime(number: int) -> bool: + """Checks to see if a number is a prime in O(sqrt(n)). + + A number is prime if it has exactly two factors: 1 and itself. + + >>> is_prime(0) + False >>> is_prime(1) - 0 - >>> is_prime(17) - 1 - >>> is_prime(10000) - 0 + False + >>> is_prime(2) + True + >>> is_prime(3) + True + >>> is_prime(27) + False + >>> is_prime(87) + False + >>> is_prime(563) + True + >>> is_prime(2999) + True + >>> is_prime(67483) + False """ - if number == 1: - return 0 - - if number % 2 == 0 and number > 2: - return 0 - for i in range(3, isqrt(number) + 1, 2): - if number % i == 0: - return 0 - return 1 + if 1 < number < 4: + # 2 and 3 are primes + return True + elif number < 2 or number % 2 == 0 or number % 3 == 0: + # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes + return False + + # All primes number are in format of 6k +/- 1 + for i in range(5, int(math.sqrt(number) + 1), 6): + if number % i == 0 or number % (i + 2) == 0: + return False + return True def solution(ratio: float = 0.1) -> int: From 45d3eabeb5f22624245095abdc044422bfe5eeea Mon Sep 17 00:00:00 2001 From: Satish Mishra <36122092+ZicsX@users.noreply.github.com> Date: Wed, 14 Sep 2022 14:13:08 +0530 Subject: [PATCH 0492/1543] Add Optimized Shell Sort (#6225) * Add Optimized Shell Sort * Added return type * reformatted * added shrink_shell.py * ran directory generator * Rename shrink_shell.py to shrink_shell_sort.py Co-authored-by: John Law --- DIRECTORY.md | 2 +- sorts/shrink_shell_sort.py | 66 ++++++++++++++++++++++++++++++++++++++ 2 files changed, 67 insertions(+), 1 deletion(-) create mode 100644 sorts/shrink_shell_sort.py diff --git a/DIRECTORY.md b/DIRECTORY.md index a7305395a67b..25eb0ef0e9ca 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -446,7 +446,6 @@ * [Scoring Functions](machine_learning/scoring_functions.py) * [Sequential Minimum Optimization](machine_learning/sequential_minimum_optimization.py) * [Similarity Search](machine_learning/similarity_search.py) - * [Support Vector Machines](machine_learning/support_vector_machines.py) * [Word Frequency Functions](machine_learning/word_frequency_functions.py) ## Maths @@ -984,6 +983,7 @@ * [Recursive Quick Sort](sorts/recursive_quick_sort.py) * [Selection Sort](sorts/selection_sort.py) * [Shell Sort](sorts/shell_sort.py) + * [Shrink Shell](sorts/shrink_shell.py) * [Slowsort](sorts/slowsort.py) * [Stooge Sort](sorts/stooge_sort.py) * [Strand Sort](sorts/strand_sort.py) diff --git a/sorts/shrink_shell_sort.py b/sorts/shrink_shell_sort.py new file mode 100644 index 000000000000..69992bfb75bc --- /dev/null +++ b/sorts/shrink_shell_sort.py @@ -0,0 +1,66 @@ +""" +This function implements the shell sort algorithm +which is slightly faster than its pure implementation. + +This shell sort is implemented using a gap, which +shrinks by a certain factor each iteration. In this +implementation, the gap is initially set to the +length of the collection. The gap is then reduced by +a certain factor (1.3) each iteration. + +For each iteration, the algorithm compares elements +that are a certain number of positions apart +(determined by the gap). If the element at the higher +position is greater than the element at the lower +position, the two elements are swapped. The process +is repeated until the gap is equal to 1. + +The reason this is more efficient is that it reduces +the number of comparisons that need to be made. By +using a smaller gap, the list is sorted more quickly. +""" + + +def shell_sort(collection: list) -> list: + """Implementation of shell sort algorithm in Python + :param collection: Some mutable ordered collection with heterogeneous + comparable items inside + :return: the same collection ordered by ascending + + >>> shell_sort([3, 2, 1]) + [1, 2, 3] + >>> shell_sort([]) + [] + >>> shell_sort([1]) + [1] + """ + + # Choose an initial gap value + gap = len(collection) + + # Set the gap value to be decreased by a factor of 1.3 + # after each iteration + shrink = 1.3 + + # Continue sorting until the gap is 1 + while gap > 1: + + # Decrease the gap value + gap = int(gap / shrink) + + # Sort the elements using insertion sort + for i in range(gap, len(collection)): + temp = collection[i] + j = i + while j >= gap and collection[j - gap] > temp: + collection[j] = collection[j - gap] + j -= gap + collection[j] = temp + + return collection + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From 5754bd09ffd9a13d9e9bc3083419a22ab8ff7df6 Mon Sep 17 00:00:00 2001 From: Beksultan <96925396+Vazno@users.noreply.github.com> Date: Wed, 21 Sep 2022 21:37:38 +0600 Subject: [PATCH 0493/1543] fix typo (#6375) --- strings/is_contains_unique_chars.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/strings/is_contains_unique_chars.py b/strings/is_contains_unique_chars.py index fdf7a02ff43f..d6b698e769f8 100644 --- a/strings/is_contains_unique_chars.py +++ b/strings/is_contains_unique_chars.py @@ -7,7 +7,7 @@ def is_contains_unique_chars(input_str: str) -> bool: False Time complexity: O(n) - Space compexity: O(1) 19320 bytes as we are having 144697 characters in unicode + Space complexity: O(1) 19320 bytes as we are having 144697 characters in unicode """ # Each bit will represent each unicode character From 91c671ebabb3449fbbaefc2c4d959566eaed48dc Mon Sep 17 00:00:00 2001 From: Yannick Brenning <90418998+ybrenning@users.noreply.github.com> Date: Sat, 24 Sep 2022 18:46:03 +0200 Subject: [PATCH 0494/1543] Fix minor typo and add matrix dimension check (#6367) * Fix minor typo in comment * Add matrix dimension check * Add descriptive comment --- matrix/inverse_of_matrix.py | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/matrix/inverse_of_matrix.py b/matrix/inverse_of_matrix.py index 9deca6c3c08e..e414ee254c10 100644 --- a/matrix/inverse_of_matrix.py +++ b/matrix/inverse_of_matrix.py @@ -27,14 +27,21 @@ def inverse_of_matrix(matrix: list[list[float]]) -> list[list[float]]: [[0.25, -0.5], [-0.3, 1.0]] """ - D = Decimal # An abbreviation to be conciseness + D = Decimal # An abbreviation for conciseness + + # Check if the provided matrix has 2 rows and 2 columns, since this implementation only works for 2x2 matrices + if len(matrix) != 2 or len(matrix[0]) != 2 or len(matrix[1]) != 2: + raise ValueError("Please provide a matrix of size 2x2.") + # Calculate the determinant of the matrix determinant = D(matrix[0][0]) * D(matrix[1][1]) - D(matrix[1][0]) * D(matrix[0][1]) if determinant == 0: raise ValueError("This matrix has no inverse.") + # Creates a copy of the matrix with swapped positions of the elements swapped_matrix = [[0.0, 0.0], [0.0, 0.0]] swapped_matrix[0][0], swapped_matrix[1][1] = matrix[1][1], matrix[0][0] swapped_matrix[1][0], swapped_matrix[0][1] = -matrix[1][0], -matrix[0][1] + # Calculate the inverse of the matrix return [[float(D(n) / determinant) or 0.0 for n in row] for row in swapped_matrix] From a0b0f414ae134aa1772d33bb930e5a960f9979e8 Mon Sep 17 00:00:00 2001 From: Maxim Smolskiy Date: Sat, 24 Sep 2022 20:04:00 +0300 Subject: [PATCH 0495/1543] Add Project Euler problem 116 solution 1 (#6305) * Add solution * updating DIRECTORY.md * Fix pre-commit * updating DIRECTORY.md Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> Co-authored-by: John Law --- DIRECTORY.md | 6 ++- matrix/inverse_of_matrix.py | 3 +- project_euler/problem_116/__init__.py | 0 project_euler/problem_116/sol1.py | 64 +++++++++++++++++++++++++++ 4 files changed, 71 insertions(+), 2 deletions(-) create mode 100644 project_euler/problem_116/__init__.py create mode 100644 project_euler/problem_116/sol1.py diff --git a/DIRECTORY.md b/DIRECTORY.md index 25eb0ef0e9ca..1d9e6eff75c6 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -446,6 +446,7 @@ * [Scoring Functions](machine_learning/scoring_functions.py) * [Sequential Minimum Optimization](machine_learning/sequential_minimum_optimization.py) * [Similarity Search](machine_learning/similarity_search.py) + * [Support Vector Machines](machine_learning/support_vector_machines.py) * [Word Frequency Functions](machine_learning/word_frequency_functions.py) ## Maths @@ -859,6 +860,8 @@ * [Sol1](project_euler/problem_114/sol1.py) * Problem 115 * [Sol1](project_euler/problem_115/sol1.py) + * Problem 116 + * [Sol1](project_euler/problem_116/sol1.py) * Problem 119 * [Sol1](project_euler/problem_119/sol1.py) * Problem 120 @@ -983,7 +986,7 @@ * [Recursive Quick Sort](sorts/recursive_quick_sort.py) * [Selection Sort](sorts/selection_sort.py) * [Shell Sort](sorts/shell_sort.py) - * [Shrink Shell](sorts/shrink_shell.py) + * [Shrink Shell Sort](sorts/shrink_shell_sort.py) * [Slowsort](sorts/slowsort.py) * [Stooge Sort](sorts/stooge_sort.py) * [Strand Sort](sorts/strand_sort.py) @@ -1005,6 +1008,7 @@ * [Check Pangram](strings/check_pangram.py) * [Credit Card Validator](strings/credit_card_validator.py) * [Detecting English Programmatically](strings/detecting_english_programmatically.py) + * [Dna](strings/dna.py) * [Frequency Finder](strings/frequency_finder.py) * [Hamming Distance](strings/hamming_distance.py) * [Indian Phone Validator](strings/indian_phone_validator.py) diff --git a/matrix/inverse_of_matrix.py b/matrix/inverse_of_matrix.py index e414ee254c10..92780e656ea1 100644 --- a/matrix/inverse_of_matrix.py +++ b/matrix/inverse_of_matrix.py @@ -29,7 +29,8 @@ def inverse_of_matrix(matrix: list[list[float]]) -> list[list[float]]: D = Decimal # An abbreviation for conciseness - # Check if the provided matrix has 2 rows and 2 columns, since this implementation only works for 2x2 matrices + # Check if the provided matrix has 2 rows and 2 columns + # since this implementation only works for 2x2 matrices if len(matrix) != 2 or len(matrix[0]) != 2 or len(matrix[1]) != 2: raise ValueError("Please provide a matrix of size 2x2.") diff --git a/project_euler/problem_116/__init__.py b/project_euler/problem_116/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/project_euler/problem_116/sol1.py b/project_euler/problem_116/sol1.py new file mode 100644 index 000000000000..efa13ee3f25a --- /dev/null +++ b/project_euler/problem_116/sol1.py @@ -0,0 +1,64 @@ +""" +Project Euler Problem 116: https://projecteuler.net/problem=116 + +A row of five grey square tiles is to have a number of its tiles +replaced with coloured oblong tiles chosen +from red (length two), green (length three), or blue (length four). + +If red tiles are chosen there are exactly seven ways this can be done. + + |red,red|grey|grey|grey| |grey|red,red|grey|grey| + + |grey|grey|red,red|grey| |grey|grey|grey|red,red| + + |red,red|red,red|grey| |red,red|grey|red,red| + + |grey|red,red|red,red| + +If green tiles are chosen there are three ways. + + |green,green,green|grey|grey| |grey|green,green,green|grey| + + |grey|grey|green,green,green| + +And if blue tiles are chosen there are two ways. + + |blue,blue,blue,blue|grey| |grey|blue,blue,blue,blue| + +Assuming that colours cannot be mixed there are 7 + 3 + 2 = 12 ways +of replacing the grey tiles in a row measuring five units in length. + +How many different ways can the grey tiles in a row measuring fifty units in length +be replaced if colours cannot be mixed and at least one coloured tile must be used? + +NOTE: This is related to Problem 117 (https://projecteuler.net/problem=117). +""" + + +def solution(length: int = 50) -> int: + """ + Returns the number of different ways can the grey tiles in a row + of the given length be replaced if colours cannot be mixed + and at least one coloured tile must be used + + >>> solution(5) + 12 + """ + + different_colour_ways_number = [[0] * 3 for _ in range(length + 1)] + + for row_length in range(length + 1): + for tile_length in range(2, 5): + for tile_start in range(row_length - tile_length + 1): + different_colour_ways_number[row_length][tile_length - 2] += ( + different_colour_ways_number[row_length - tile_start - tile_length][ + tile_length - 2 + ] + + 1 + ) + + return sum(different_colour_ways_number[length]) + + +if __name__ == "__main__": + print(f"{solution() = }") From a12e6941a6c90d80d4aaee5c2c013e7da0288492 Mon Sep 17 00:00:00 2001 From: Debjit Bhowal <68442560+debjit-bw@users.noreply.github.com> Date: Sat, 1 Oct 2022 17:47:15 +0530 Subject: [PATCH 0496/1543] Fix docstring (#6461) * fixed wrong algo name to radix sort * added wiki url * Added "source" in docstring * Update radix_sort.py Co-authored-by: Christian Clauss --- sorts/radix_sort.py | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) diff --git a/sorts/radix_sort.py b/sorts/radix_sort.py index e433bc507a1e..c3ff04f3d5e5 100644 --- a/sorts/radix_sort.py +++ b/sorts/radix_sort.py @@ -1,11 +1,7 @@ """ -This is a pure Python implementation of the quick sort algorithm -For doctests run following command: -python -m doctest -v radix_sort.py -or -python3 -m doctest -v radix_sort.py -For manual testing run: -python radix_sort.py +This is a pure Python implementation of the radix sort algorithm + +Source: https://en.wikipedia.org/wiki/Radix_sort """ from __future__ import annotations From 346b0a8466732c9594eb62c4c60203d87106bf69 Mon Sep 17 00:00:00 2001 From: RAHUL S H Date: Sun, 2 Oct 2022 03:50:47 -0700 Subject: [PATCH 0497/1543] Added fetch_quotes.py (#6529) * Added fetch_quotes.py fetches quotes from zenquotes.io api * Update web_programming/fetch_quotes.py Co-authored-by: rohanr18 <114707091+rohanr18@users.noreply.github.com> Co-authored-by: rohanr18 <114707091+rohanr18@users.noreply.github.com> --- web_programming/fetch_quotes.py | 34 +++++++++++++++++++++++++++++++++ 1 file changed, 34 insertions(+) create mode 100644 web_programming/fetch_quotes.py diff --git a/web_programming/fetch_quotes.py b/web_programming/fetch_quotes.py new file mode 100644 index 000000000000..4a3b002e515f --- /dev/null +++ b/web_programming/fetch_quotes.py @@ -0,0 +1,34 @@ +""" +This file fetches quotes from the " ZenQuotes API ". +It does not require any API key as it uses free tier. + +For more details and premium features visit: + https://zenquotes.io/ +""" + +import pprint + +import requests + + +def quote_of_the_day() -> list: + API_ENDPOINT_URL = "https://zenquotes.io/api/today/" + return requests.get(API_ENDPOINT_URL).json() + + +def random_quotes() -> list: + API_ENDPOINT_URL = "https://zenquotes.io/api/random/" + return requests.get(API_ENDPOINT_URL).json() + + +if __name__ == "__main__": + """ + response object has all the info with the quote + To retrieve the actual quote access the response.json() object as below + response.json() is a list of json object + response.json()[0]['q'] = actual quote. + response.json()[0]['a'] = author name. + response.json()[0]['h'] = in html format. + """ + response = random_quotes() + pprint.pprint(response) From cabd8c63825fcd1b35fdd621ba443f31d0fb880d Mon Sep 17 00:00:00 2001 From: Dhruv Manilawala Date: Sun, 2 Oct 2022 17:49:30 +0530 Subject: [PATCH 0498/1543] feat: basic issue forms (#6533) --- .github/ISSUE_TEMPLATE/bug_report.yaml | 54 +++++++++++++++++++++ .github/ISSUE_TEMPLATE/config.yaml | 5 ++ .github/ISSUE_TEMPLATE/feature_request.yaml | 26 ++++++++++ 3 files changed, 85 insertions(+) create mode 100644 .github/ISSUE_TEMPLATE/bug_report.yaml create mode 100644 .github/ISSUE_TEMPLATE/config.yaml create mode 100644 .github/ISSUE_TEMPLATE/feature_request.yaml diff --git a/.github/ISSUE_TEMPLATE/bug_report.yaml b/.github/ISSUE_TEMPLATE/bug_report.yaml new file mode 100644 index 000000000000..6b3a5222f0eb --- /dev/null +++ b/.github/ISSUE_TEMPLATE/bug_report.yaml @@ -0,0 +1,54 @@ +name: Bug report +description: Create a bug report to help us address errors in the repository +labels: [bug] +body: + - type: markdown + attributes: + value: | + Before requesting please search [existing issues](https://github.com/TheAlgorithms/Python/labels/bug). + Usage questions such as "How do I...?" belong on the + [Discord](https://discord.gg/c7MnfGFGa6) and will be closed. + + - type: input + attributes: + label: "Repository commit" + description: | + The commit hash for `TheAlgorithms/Python` repository. You can get this + by running the command `git rev-parse HEAD` locally. + placeholder: "a0b0f414ae134aa1772d33bb930e5a960f9979e8" + validations: + required: true + + - type: input + attributes: + label: "Python version (python --version)" + placeholder: "Python 3.10.7" + validations: + required: true + + - type: input + attributes: + label: "Dependencies version (pip freeze)" + description: | + This is the output of the command `pip freeze --all`. Note that the + actual output might be different as compared to the placeholder text. + placeholder: | + appnope==0.1.3 + asttokens==2.0.8 + backcall==0.2.0 + ... + validations: + required: true + + - type: textarea + attributes: + label: "Expected behavior" + description: "Describe the behavior you expect. May include images or videos." + validations: + required: true + + - type: textarea + attributes: + label: "Actual behavior" + validations: + required: true diff --git a/.github/ISSUE_TEMPLATE/config.yaml b/.github/ISSUE_TEMPLATE/config.yaml new file mode 100644 index 000000000000..62019bb08938 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/config.yaml @@ -0,0 +1,5 @@ +blank_issues_enabled: false +contact_links: + - name: Discord community + url: https://discord.gg/c7MnfGFGa6 + about: Have any questions or need any help? Please contact us via Discord diff --git a/.github/ISSUE_TEMPLATE/feature_request.yaml b/.github/ISSUE_TEMPLATE/feature_request.yaml new file mode 100644 index 000000000000..7d6e221e32bd --- /dev/null +++ b/.github/ISSUE_TEMPLATE/feature_request.yaml @@ -0,0 +1,26 @@ +name: Feature request +description: Suggest features, propose improvements, discuss new ideas. +labels: [enhancement] +body: + - type: markdown + attributes: + value: | + Before requesting please search [existing issues](https://github.com/TheAlgorithms/Python/labels/enhancement). + Usage questions such as "How do I...?" belong on the + [Discord](https://discord.gg/c7MnfGFGa6) and will be closed. + + - type: textarea + attributes: + label: "Feature description" + description: | + This could be new algorithms, data structures or improving any existing + implementations. + validations: + required: true + + - type: checkboxes + attributes: + label: Would you like to work on this feature? + options: + - label: Yes, I want to work on this feature! + required: false From c9f1d09e1a82ec25ebd259e108b6b85046212a6e Mon Sep 17 00:00:00 2001 From: Dhruv Manilawala Date: Sun, 2 Oct 2022 18:51:53 +0530 Subject: [PATCH 0499/1543] fix: GitHub requires `.yml` extension (#6542) * fix: GitHub requires `.yml` extension Ref: https://docs.github.com/en/communities/using-templates-to-encourage-useful-issues-and-pull-requests/configuring-issue-templates-for-your-repository#configuring-the-template-chooser * fix: remove newlines from multiline string * fix: use textarea for dependencies list input --- .../ISSUE_TEMPLATE/{bug_report.yaml => bug_report.yml} | 8 ++++---- .github/ISSUE_TEMPLATE/{config.yaml => config.yml} | 0 .../{feature_request.yaml => feature_request.yml} | 4 ++-- 3 files changed, 6 insertions(+), 6 deletions(-) rename .github/ISSUE_TEMPLATE/{bug_report.yaml => bug_report.yml} (95%) rename .github/ISSUE_TEMPLATE/{config.yaml => config.yml} (100%) rename .github/ISSUE_TEMPLATE/{feature_request.yaml => feature_request.yml} (95%) diff --git a/.github/ISSUE_TEMPLATE/bug_report.yaml b/.github/ISSUE_TEMPLATE/bug_report.yml similarity index 95% rename from .github/ISSUE_TEMPLATE/bug_report.yaml rename to .github/ISSUE_TEMPLATE/bug_report.yml index 6b3a5222f0eb..4ccdb52cad24 100644 --- a/.github/ISSUE_TEMPLATE/bug_report.yaml +++ b/.github/ISSUE_TEMPLATE/bug_report.yml @@ -4,7 +4,7 @@ labels: [bug] body: - type: markdown attributes: - value: | + value: > Before requesting please search [existing issues](https://github.com/TheAlgorithms/Python/labels/bug). Usage questions such as "How do I...?" belong on the [Discord](https://discord.gg/c7MnfGFGa6) and will be closed. @@ -12,7 +12,7 @@ body: - type: input attributes: label: "Repository commit" - description: | + description: > The commit hash for `TheAlgorithms/Python` repository. You can get this by running the command `git rev-parse HEAD` locally. placeholder: "a0b0f414ae134aa1772d33bb930e5a960f9979e8" @@ -26,10 +26,10 @@ body: validations: required: true - - type: input + - type: textarea attributes: label: "Dependencies version (pip freeze)" - description: | + description: > This is the output of the command `pip freeze --all`. Note that the actual output might be different as compared to the placeholder text. placeholder: | diff --git a/.github/ISSUE_TEMPLATE/config.yaml b/.github/ISSUE_TEMPLATE/config.yml similarity index 100% rename from .github/ISSUE_TEMPLATE/config.yaml rename to .github/ISSUE_TEMPLATE/config.yml diff --git a/.github/ISSUE_TEMPLATE/feature_request.yaml b/.github/ISSUE_TEMPLATE/feature_request.yml similarity index 95% rename from .github/ISSUE_TEMPLATE/feature_request.yaml rename to .github/ISSUE_TEMPLATE/feature_request.yml index 7d6e221e32bd..bed3e8ab54ae 100644 --- a/.github/ISSUE_TEMPLATE/feature_request.yaml +++ b/.github/ISSUE_TEMPLATE/feature_request.yml @@ -4,7 +4,7 @@ labels: [enhancement] body: - type: markdown attributes: - value: | + value: > Before requesting please search [existing issues](https://github.com/TheAlgorithms/Python/labels/enhancement). Usage questions such as "How do I...?" belong on the [Discord](https://discord.gg/c7MnfGFGa6) and will be closed. @@ -12,7 +12,7 @@ body: - type: textarea attributes: label: "Feature description" - description: | + description: > This could be new algorithms, data structures or improving any existing implementations. validations: From 072312bd0a4a2b48cc44e44a55c41ae408e8d9c1 Mon Sep 17 00:00:00 2001 From: Jay Gala <57001778+jaygala223@users.noreply.github.com> Date: Sun, 2 Oct 2022 20:19:49 +0530 Subject: [PATCH 0500/1543] Added code for Maximum Subarray Sum (#6536) * Added maximum subarray sum #6519 * fixes: #6519 function names changed as per naming conventions --- other/maximum_subarray.py | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+) create mode 100644 other/maximum_subarray.py diff --git a/other/maximum_subarray.py b/other/maximum_subarray.py new file mode 100644 index 000000000000..756e009444fe --- /dev/null +++ b/other/maximum_subarray.py @@ -0,0 +1,26 @@ +def max_subarray(nums: list[int]) -> int: + """ + Returns the subarray with maximum sum + >>> max_subarray([1,2,3,4,-2]) + 10 + >>> max_subarray([-2,1,-3,4,-1,2,1,-5,4]) + 6 + """ + + curr_max = ans = nums[0] + + for i in range(1, len(nums)): + if curr_max >= 0: + curr_max = curr_max + nums[i] + else: + curr_max = nums[i] + + ans = max(curr_max, ans) + + return ans + + +if __name__ == "__main__": + n = int(input("Enter number of elements : ").strip()) + array = list(map(int, input("\nEnter the numbers : ").strip().split()))[:n] + print(max_subarray(array)) From 50545d10c55859e3a3d792132ca6769f219bb130 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jerrit=20Gl=C3=A4sker?= Date: Sun, 2 Oct 2022 16:57:11 +0200 Subject: [PATCH 0501/1543] Run length encoding (#6492) * Removed unused commit * Added wikipedia url * Renamed parameter, changed decoding to use list comprehension --- compression/run_length_encoding.py | 48 ++++++++++++++++++++++++++++++ 1 file changed, 48 insertions(+) create mode 100644 compression/run_length_encoding.py diff --git a/compression/run_length_encoding.py b/compression/run_length_encoding.py new file mode 100644 index 000000000000..691e19095dc6 --- /dev/null +++ b/compression/run_length_encoding.py @@ -0,0 +1,48 @@ +# https://en.wikipedia.org/wiki/Run-length_encoding + + +def run_length_encode(text: str) -> list: + """ + Performs Run Length Encoding + >>> run_length_encode("AAAABBBCCDAA") + [('A', 4), ('B', 3), ('C', 2), ('D', 1), ('A', 2)] + >>> run_length_encode("A") + [('A', 1)] + >>> run_length_encode("AA") + [('A', 2)] + >>> run_length_encode("AAADDDDDDFFFCCCAAVVVV") + [('A', 3), ('D', 6), ('F', 3), ('C', 3), ('A', 2), ('V', 4)] + """ + encoded = [] + count = 1 + + for i in range(len(text)): + if i + 1 < len(text) and text[i] == text[i + 1]: + count += 1 + else: + encoded.append((text[i], count)) + count = 1 + + return encoded + + +def run_length_decode(encoded: list) -> str: + """ + Performs Run Length Decoding + >>> run_length_decode([('A', 4), ('B', 3), ('C', 2), ('D', 1), ('A', 2)]) + 'AAAABBBCCDAA' + >>> run_length_decode([('A', 1)]) + 'A' + >>> run_length_decode([('A', 2)]) + 'AA' + >>> run_length_decode([('A', 3), ('D', 6), ('F', 3), ('C', 3), ('A', 2), ('V', 4)]) + 'AAADDDDDDFFFCCCAAVVVV' + """ + return "".join(char * length for char, length in encoded) + + +if __name__ == "__main__": + from doctest import testmod + + testmod(name="run_length_encode", verbose=True) + testmod(name="run_length_decode", verbose=True) From 8b8fba34594764cbd8d834337f03d6e03b108964 Mon Sep 17 00:00:00 2001 From: Daniel Pustotin Date: Sun, 2 Oct 2022 19:35:02 +0300 Subject: [PATCH 0502/1543] Improve code complexity for segmented sieve (#6372) --- maths/segmented_sieve.py | 11 +++-------- 1 file changed, 3 insertions(+), 8 deletions(-) diff --git a/maths/segmented_sieve.py b/maths/segmented_sieve.py index b15ec2480678..0054b0595be5 100644 --- a/maths/segmented_sieve.py +++ b/maths/segmented_sieve.py @@ -15,15 +15,12 @@ def sieve(n): if temp[start] is True: in_prime.append(start) for i in range(start * start, end + 1, start): - if temp[i] is True: - temp[i] = False + temp[i] = False start += 1 prime += in_prime low = end + 1 - high = low + end - 1 - if high > n: - high = n + high = min(2 * end, n) while low <= n: temp = [True] * (high - low + 1) @@ -41,9 +38,7 @@ def sieve(n): prime.append(j + low) low = high + 1 - high = low + end - 1 - if high > n: - high = n + high = min(high + end, n) return prime From f42b2b8dff3fc9463d73072a2968594f4eda383b Mon Sep 17 00:00:00 2001 From: Saksham1970 <45041294+Saksham1970@users.noreply.github.com> Date: Sun, 2 Oct 2022 23:21:04 +0530 Subject: [PATCH 0503/1543] Newton raphson complex (#6545) * Newton raphson better implementation * flake8 test passed * Update arithmetic_analysis/newton_raphson_new.py Co-authored-by: Christian Clauss * added multiline suggestions Co-authored-by: Christian Clauss --- arithmetic_analysis/newton_raphson_new.py | 84 +++++++++++++++++++++++ 1 file changed, 84 insertions(+) create mode 100644 arithmetic_analysis/newton_raphson_new.py diff --git a/arithmetic_analysis/newton_raphson_new.py b/arithmetic_analysis/newton_raphson_new.py new file mode 100644 index 000000000000..19ea4ce21806 --- /dev/null +++ b/arithmetic_analysis/newton_raphson_new.py @@ -0,0 +1,84 @@ +# Implementing Newton Raphson method in Python +# Author: Saksham Gupta +# +# The Newton-Raphson method (also known as Newton's method) is a way to +# quickly find a good approximation for the root of a functreal-valued ion +# The method can also be extended to complex functions +# +# Newton's Method - https://en.wikipedia.org/wiki/Newton's_method + +from sympy import diff, lambdify, symbols +from sympy.functions import * # noqa: F401, F403 + + +def newton_raphson( + function: str, + starting_point: complex, + variable: str = "x", + precision: float = 10**-10, + multiplicity: int = 1, +) -> complex: + """Finds root from the 'starting_point' onwards by Newton-Raphson method + Refer to https://docs.sympy.org/latest/modules/functions/index.html + for usable mathematical functions + + >>> newton_raphson("sin(x)", 2) + 3.141592653589793 + >>> newton_raphson("x**4 -5", 0.4 + 5j) + (-7.52316384526264e-37+1.4953487812212207j) + >>> newton_raphson('log(y) - 1', 2, variable='y') + 2.7182818284590455 + >>> newton_raphson('exp(x) - 1', 10, precision=0.005) + 1.2186556186174883e-10 + >>> newton_raphson('cos(x)', 0) + Traceback (most recent call last): + ... + ZeroDivisionError: Could not find root + """ + + x = symbols(variable) + func = lambdify(x, function) + diff_function = lambdify(x, diff(function, x)) + + prev_guess = starting_point + + while True: + if diff_function(prev_guess) != 0: + next_guess = prev_guess - multiplicity * func(prev_guess) / diff_function( + prev_guess + ) + else: + raise ZeroDivisionError("Could not find root") from None + + # Precision is checked by comparing the difference of consecutive guesses + if abs(next_guess - prev_guess) < precision: + return next_guess + + prev_guess = next_guess + + +# Let's Execute +if __name__ == "__main__": + + # Find root of trigonometric function + # Find value of pi + print(f"The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}") + + # Find root of polynomial + # Find fourth Root of 5 + print(f"The root of x**4 - 5 = 0 is {newton_raphson('x**4 -5', 0.4 +5j)}") + + # Find value of e + print( + "The root of log(y) - 1 = 0 is ", + f"{newton_raphson('log(y) - 1', 2, variable='y')}", + ) + + # Exponential Roots + print( + "The root of exp(x) - 1 = 0 is", + f"{newton_raphson('exp(x) - 1', 10, precision=0.005)}", + ) + + # Find root of cos(x) + print(f"The root of cos(x) = 0 is {newton_raphson('cos(x)', 0)}") From 3d33b36e92c2edd1afd542b59d157ad4bccd4bf6 Mon Sep 17 00:00:00 2001 From: Christian Clauss Date: Sun, 2 Oct 2022 21:59:17 +0200 Subject: [PATCH 0504/1543] Fix pre-commit.ci: additional_dependencies: [types-requests] (#6559) * Fix pre-commit.ci: additional_dependencies: [types-requests==2.28.11] * updating DIRECTORY.md * Update .pre-commit-config.yaml * additional_dependencies: [types-requests] Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- .pre-commit-config.yaml | 1 + DIRECTORY.md | 4 ++++ 2 files changed, 5 insertions(+) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 7ff7459978e6..325063c3b8a5 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -49,6 +49,7 @@ repos: - --ignore-missing-imports - --install-types # See mirrors-mypy README.md - --non-interactive + additional_dependencies: [types-requests] - repo: https://github.com/codespell-project/codespell rev: v2.1.0 diff --git a/DIRECTORY.md b/DIRECTORY.md index 1d9e6eff75c6..64e9d5333a2f 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -9,6 +9,7 @@ * [Newton Forward Interpolation](arithmetic_analysis/newton_forward_interpolation.py) * [Newton Method](arithmetic_analysis/newton_method.py) * [Newton Raphson](arithmetic_analysis/newton_raphson.py) + * [Newton Raphson New](arithmetic_analysis/newton_raphson_new.py) * [Secant Method](arithmetic_analysis/secant_method.py) ## Audio Filters @@ -107,6 +108,7 @@ * [Lempel Ziv](compression/lempel_ziv.py) * [Lempel Ziv Decompress](compression/lempel_ziv_decompress.py) * [Peak Signal To Noise Ratio](compression/peak_signal_to_noise_ratio.py) + * [Run Length Encoding](compression/run_length_encoding.py) ## Computer Vision * [Cnn Classification](computer_vision/cnn_classification.py) @@ -621,6 +623,7 @@ * [Linear Congruential Generator](other/linear_congruential_generator.py) * [Lru Cache](other/lru_cache.py) * [Magicdiamondpattern](other/magicdiamondpattern.py) + * [Maximum Subarray](other/maximum_subarray.py) * [Nested Brackets](other/nested_brackets.py) * [Password Generator](other/password_generator.py) * [Scoring Algorithm](other/scoring_algorithm.py) @@ -1053,6 +1056,7 @@ * [Fetch Bbc News](web_programming/fetch_bbc_news.py) * [Fetch Github Info](web_programming/fetch_github_info.py) * [Fetch Jobs](web_programming/fetch_jobs.py) + * [Fetch Quotes](web_programming/fetch_quotes.py) * [Fetch Well Rx Price](web_programming/fetch_well_rx_price.py) * [Get Imdb Top 250 Movies Csv](web_programming/get_imdb_top_250_movies_csv.py) * [Get Imdbtop](web_programming/get_imdbtop.py) From 707809b0006a76210bc3d0e4312ff5c73ff68300 Mon Sep 17 00:00:00 2001 From: AHTESHAM ZAIDI Date: Mon, 3 Oct 2022 03:25:24 +0530 Subject: [PATCH 0505/1543] Update astar.py (#6456) * Update astar.py Improved comments added punctuations. * Update astar.py * Update machine_learning/astar.py Co-authored-by: Caeden * Update astar.py Co-authored-by: Christian Clauss Co-authored-by: Caeden --- machine_learning/astar.py | 55 ++++++++++++++++++--------------------- 1 file changed, 26 insertions(+), 29 deletions(-) diff --git a/machine_learning/astar.py b/machine_learning/astar.py index ee3fcff0b7bf..7a60ed225a2d 100644 --- a/machine_learning/astar.py +++ b/machine_learning/astar.py @@ -1,41 +1,38 @@ """ -The A* algorithm combines features of uniform-cost search and pure -heuristic search to efficiently compute optimal solutions. -A* algorithm is a best-first search algorithm in which the cost -associated with a node is f(n) = g(n) + h(n), -where g(n) is the cost of the path from the initial state to node n and -h(n) is the heuristic estimate or the cost or a path -from node n to a goal.A* algorithm introduces a heuristic into a -regular graph-searching algorithm, -essentially planning ahead at each step so a more optimal decision -is made.A* also known as the algorithm with brains +The A* algorithm combines features of uniform-cost search and pure heuristic search to +efficiently compute optimal solutions. + +The A* algorithm is a best-first search algorithm in which the cost associated with a +node is f(n) = g(n) + h(n), where g(n) is the cost of the path from the initial state to +node n and h(n) is the heuristic estimate or the cost or a path from node n to a goal. + +The A* algorithm introduces a heuristic into a regular graph-searching algorithm, +essentially planning ahead at each step so a more optimal decision is made. For this +reason, A* is known as an algorithm with brains. + +https://en.wikipedia.org/wiki/A*_search_algorithm """ import numpy as np class Cell: """ - Class cell represents a cell in the world which have the property - position : The position of the represented by tupleof x and y - coordinates initially set to (0,0) - parent : This contains the parent cell object which we visited - before arrinving this cell - g,h,f : The parameters for constructing the heuristic function - which can be any function. for simplicity used line - distance + Class cell represents a cell in the world which have the properties: + position: represented by tuple of x and y coordinates initially set to (0,0). + parent: Contains the parent cell object visited before we arrived at this cell. + g, h, f: Parameters used when calling our heuristic function. """ def __init__(self): self.position = (0, 0) self.parent = None - self.g = 0 self.h = 0 self.f = 0 """ - overrides equals method because otherwise cell assign will give - wrong results + Overrides equals method because otherwise cell assign will give + wrong results. """ def __eq__(self, cell): @@ -48,8 +45,8 @@ def showcell(self): class Gridworld: """ Gridworld class represents the external world here a grid M*M - matrix - world_size: create a numpy array with the given world_size default is 5 + matrix. + world_size: create a numpy array with the given world_size default is 5. """ def __init__(self, world_size=(5, 5)): @@ -90,10 +87,10 @@ def get_neigbours(self, cell): def astar(world, start, goal): """ - Implementation of a start algorithm - world : Object of the world object - start : Object of the cell as start position - stop : Object of the cell as goal position + Implementation of a start algorithm. + world : Object of the world object. + start : Object of the cell as start position. + stop : Object of the cell as goal position. >>> p = Gridworld() >>> start = Cell() @@ -137,14 +134,14 @@ def astar(world, start, goal): if __name__ == "__main__": world = Gridworld() - # stat position and Goal + # Start position and goal start = Cell() start.position = (0, 0) goal = Cell() goal.position = (4, 4) print(f"path from {start.position} to {goal.position}") s = astar(world, start, goal) - # Just for visual reasons + # Just for visual reasons. for i in s: world.w[i] = 1 print(world.w) From e9862adafce9eb682cabcf8ac502893e0272ae65 Mon Sep 17 00:00:00 2001 From: Christian Clauss Date: Mon, 3 Oct 2022 03:27:14 +0200 Subject: [PATCH 0506/1543] chore: remove pre-commit GHA (#6565) [`pre-commit.ci` is working](https://results.pre-commit.ci/repo/github/63476337) so let's remove our redundant and less powerful GitHub Action. --- .github/workflows/pre-commit.yml | 28 ---------------------------- 1 file changed, 28 deletions(-) delete mode 100644 .github/workflows/pre-commit.yml diff --git a/.github/workflows/pre-commit.yml b/.github/workflows/pre-commit.yml deleted file mode 100644 index eb5e3d4ce1cd..000000000000 --- a/.github/workflows/pre-commit.yml +++ /dev/null @@ -1,28 +0,0 @@ -name: pre-commit - -on: - push: - branches: [ master ] - pull_request: - branches: [ master ] - -jobs: - pre-commit: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v3 - - uses: actions/cache@v3 - with: - path: | - ~/.cache/pre-commit - ~/.cache/pip - key: ${{ runner.os }}-pre-commit-${{ hashFiles('.pre-commit-config.yaml') }} - - uses: actions/setup-python@v4 - with: - python-version: 3.x - # - uses: psf/black@22.6.0 - - name: Install pre-commit - run: | - python -m pip install --upgrade pip - python -m pip install --upgrade pre-commit - - run: pre-commit run --verbose --all-files --show-diff-on-failure From 756bb268eb22199534fc8d6478cf0e006f02b56b Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 3 Oct 2022 22:00:45 +0200 Subject: [PATCH 0507/1543] [pre-commit.ci] pre-commit autoupdate (#6629) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * [pre-commit.ci] pre-commit autoupdate updates: - [github.com/psf/black: 22.6.0 → 22.8.0](https://github.com/psf/black/compare/22.6.0...22.8.0) - [github.com/asottile/pyupgrade: v2.37.0 → v2.38.2](https://github.com/asottile/pyupgrade/compare/v2.37.0...v2.38.2) - https://gitlab.com/pycqa/flake8 → https://github.com/PyCQA/flake8 - [github.com/PyCQA/flake8: 3.9.2 → 5.0.4](https://github.com/PyCQA/flake8/compare/3.9.2...5.0.4) - [github.com/pre-commit/mirrors-mypy: v0.961 → v0.981](https://github.com/pre-commit/mirrors-mypy/compare/v0.961...v0.981) - [github.com/codespell-project/codespell: v2.1.0 → v2.2.1](https://github.com/codespell-project/codespell/compare/v2.1.0...v2.2.1) * Fix a long line * Update sol1.py * Update sol1.py * lambda_ * Update multi_level_feedback_queue.py * Update double_ended_queue.py * Update sequential_minimum_optimization.py * Update .pre-commit-config.yaml Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- .pre-commit-config.yaml | 14 +++++++------- data_structures/queue/double_ended_queue.py | 2 +- linear_algebra/src/power_iteration.py | 12 ++++++------ .../sequential_minimum_optimization.py | 2 +- project_euler/problem_045/sol1.py | 2 +- project_euler/problem_113/sol1.py | 2 +- scheduling/multi_level_feedback_queue.py | 2 +- .../download_images_from_google_query.py | 3 ++- 8 files changed, 20 insertions(+), 19 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 325063c3b8a5..a2fcf12c9bbd 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -14,7 +14,7 @@ repos: - id: requirements-txt-fixer - repo: https://github.com/psf/black - rev: 22.6.0 + rev: 22.8.0 hooks: - id: black @@ -26,14 +26,14 @@ repos: - --profile=black - repo: https://github.com/asottile/pyupgrade - rev: v2.37.0 + rev: v2.38.2 hooks: - id: pyupgrade args: - --py310-plus - - repo: https://gitlab.com/pycqa/flake8 - rev: 3.9.2 + - repo: https://github.com/PyCQA/flake8 + rev: 5.0.4 hooks: - id: flake8 args: @@ -42,7 +42,7 @@ repos: - --max-line-length=88 - repo: https://github.com/pre-commit/mirrors-mypy - rev: v0.961 + rev: v0.981 hooks: - id: mypy args: @@ -52,11 +52,11 @@ repos: additional_dependencies: [types-requests] - repo: https://github.com/codespell-project/codespell - rev: v2.1.0 + rev: v2.2.1 hooks: - id: codespell args: - - --ignore-words-list=ans,crate,fo,followings,hist,iff,mater,secant,som,sur,tim + - --ignore-words-list=ans,crate,damon,fo,followings,hist,iff,mater,secant,som,sur,tim,zar - --skip="./.*,./strings/dictionary.txt,./strings/words.txt,./project_euler/problem_022/p022_names.txt" exclude: | (?x)^( diff --git a/data_structures/queue/double_ended_queue.py b/data_structures/queue/double_ended_queue.py index 1603e50bc7f2..f38874788df1 100644 --- a/data_structures/queue/double_ended_queue.py +++ b/data_structures/queue/double_ended_queue.py @@ -377,7 +377,7 @@ def __eq__(self, other: object) -> bool: me = self._front oth = other._front - # if the length of the deques are not the same, they are not equal + # if the length of the dequeues are not the same, they are not equal if len(self) != len(other): return False diff --git a/linear_algebra/src/power_iteration.py b/linear_algebra/src/power_iteration.py index 4c6525b6e4af..4b866331b8e3 100644 --- a/linear_algebra/src/power_iteration.py +++ b/linear_algebra/src/power_iteration.py @@ -52,7 +52,7 @@ def power_iteration( # or when we have small changes from one iteration to next. convergence = False - lamda_previous = 0 + lambda_previous = 0 iterations = 0 error = 1e12 @@ -64,21 +64,21 @@ def power_iteration( # Find rayleigh quotient # (faster than usual b/c we know vector is normalized already) vectorH = vector.conj().T if is_complex else vector.T - lamda = np.dot(vectorH, np.dot(input_matrix, vector)) + lambda_ = np.dot(vectorH, np.dot(input_matrix, vector)) # Check convergence. - error = np.abs(lamda - lamda_previous) / lamda + error = np.abs(lambda_ - lambda_previous) / lambda_ iterations += 1 if error <= error_tol or iterations >= max_iterations: convergence = True - lamda_previous = lamda + lambda_previous = lambda_ if is_complex: - lamda = np.real(lamda) + lambda_ = np.real(lambda_) - return lamda, vector + return lambda_, vector def test_power_iteration() -> None: diff --git a/machine_learning/sequential_minimum_optimization.py b/machine_learning/sequential_minimum_optimization.py index c217a370a975..cc7868d0fd8e 100644 --- a/machine_learning/sequential_minimum_optimization.py +++ b/machine_learning/sequential_minimum_optimization.py @@ -145,7 +145,7 @@ def fit(self): if self._is_unbound(i2): self._error[i2] = 0 - # Predict test samles + # Predict test samples def predict(self, test_samples, classify=True): if test_samples.shape[1] > self.samples.shape[1]: diff --git a/project_euler/problem_045/sol1.py b/project_euler/problem_045/sol1.py index cdf5c14cf362..d921b2802c2d 100644 --- a/project_euler/problem_045/sol1.py +++ b/project_euler/problem_045/sol1.py @@ -8,7 +8,7 @@ It can be verified that T(285) = P(165) = H(143) = 40755. Find the next triangle number that is also pentagonal and hexagonal. -All trinagle numbers are hexagonal numbers. +All triangle numbers are hexagonal numbers. T(2n-1) = n * (2 * n - 1) = H(n) So we shall check only for hexagonal numbers which are also pentagonal. """ diff --git a/project_euler/problem_113/sol1.py b/project_euler/problem_113/sol1.py index 951d9b49c104..2077c0fa62f3 100644 --- a/project_euler/problem_113/sol1.py +++ b/project_euler/problem_113/sol1.py @@ -62,7 +62,7 @@ def non_bouncy_upto(n: int) -> int: def solution(num_digits: int = 100) -> int: """ - Caclulate the number of non-bouncy numbers less than a googol. + Calculate the number of non-bouncy numbers less than a googol. >>> solution(6) 12951 >>> solution(10) diff --git a/scheduling/multi_level_feedback_queue.py b/scheduling/multi_level_feedback_queue.py index 95ca827e062d..b54cc8719039 100644 --- a/scheduling/multi_level_feedback_queue.py +++ b/scheduling/multi_level_feedback_queue.py @@ -307,6 +307,6 @@ def multi_level_feedback_queue(self) -> deque[Process]: ) # print sequence of finished processes print( - f"sequnece of finished processes:\ + f"sequence of finished processes:\ {mlfq.calculate_sequence_of_finish_queue()}" ) diff --git a/web_programming/download_images_from_google_query.py b/web_programming/download_images_from_google_query.py index b11a7f883085..9c0c21dc804e 100644 --- a/web_programming/download_images_from_google_query.py +++ b/web_programming/download_images_from_google_query.py @@ -14,7 +14,8 @@ def download_images_from_google_query(query: str = "dhaka", max_images: int = 5) -> int: - """Searches google using the provided query term and downloads the images in a folder. + """ + Searches google using the provided query term and downloads the images in a folder. Args: query : The image search term to be provided by the user. Defaults to From fa49e27d22d57db01994e94d2d5391b8d52c79ce Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=A9rome=20Eertmans?= Date: Tue, 4 Oct 2022 12:25:23 +0200 Subject: [PATCH 0508/1543] fix: remove non-existing user from CODEOWNERS (#6648) Removes user @mateuszz0000 that does not exist, or seems to have been renamed to @L3str4nge --- .github/CODEOWNERS | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 260b9704eda7..fdce879f80c4 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -31,7 +31,7 @@ # /data_structures/ @cclauss # TODO: Uncomment this line after Hacktoberfest -/digital_image_processing/ @mateuszz0000 +# /digital_image_processing/ # /divide_and_conquer/ @@ -79,7 +79,7 @@ # /searches/ -/sorts/ @mateuszz0000 +# /sorts/ # /strings/ @cclauss # TODO: Uncomment this line after Hacktoberfest From a84fb58271b1d42da300ccad54ee8391a518a5bb Mon Sep 17 00:00:00 2001 From: Tarun Jain <66197713+lucifertrj@users.noreply.github.com> Date: Tue, 4 Oct 2022 22:10:53 +0530 Subject: [PATCH 0509/1543] Discord Server invite (#6663) --- CONTRIBUTING.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index c9525aa4080e..b5a07af100ee 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -176,7 +176,7 @@ We want your work to be readable by others; therefore, we encourage you to note - Most importantly, - __Be consistent in the use of these guidelines when submitting.__ - - __Join__ [Gitter](https://gitter.im/TheAlgorithms) __now!__ + - __Join__ us on [Discord](https://discord.com/invite/c7MnfGFGa6) and [Gitter](https://gitter.im/TheAlgorithms) __now!__ - Happy coding! Writer [@poyea](https://github.com/poyea), Jun 2019. From 46842e8c5b5fc78ced0f38206560deb2b8160a54 Mon Sep 17 00:00:00 2001 From: Rohan R Bharadwaj <114707091+rohanr18@users.noreply.github.com> Date: Tue, 4 Oct 2022 23:35:56 +0530 Subject: [PATCH 0510/1543] Add missing type hints in `matrix` directory (#6612) * Update count_islands_in_matrix.py * Update matrix_class.py * Update matrix_operation.py * Update nth_fibonacci_using_matrix_exponentiation.py * Update searching_in_sorted_matrix.py * Update count_islands_in_matrix.py * Update matrix_class.py * Update matrix_operation.py * Update rotate_matrix.py * Update sherman_morrison.py * Update spiral_print.py * Update count_islands_in_matrix.py * formatting * formatting * Update matrix_class.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- matrix/count_islands_in_matrix.py | 7 +- matrix/matrix_class.py | 80 ++++++++++--------- matrix/matrix_operation.py | 34 ++++---- ...h_fibonacci_using_matrix_exponentiation.py | 10 +-- matrix/rotate_matrix.py | 16 ++-- matrix/searching_in_sorted_matrix.py | 4 +- matrix/sherman_morrison.py | 46 +++++------ matrix/spiral_print.py | 64 +++++++++------ 8 files changed, 142 insertions(+), 119 deletions(-) diff --git a/matrix/count_islands_in_matrix.py b/matrix/count_islands_in_matrix.py index ad9c67fb8c1b..00f9e14362b2 100644 --- a/matrix/count_islands_in_matrix.py +++ b/matrix/count_islands_in_matrix.py @@ -4,12 +4,12 @@ class matrix: # Public class to implement a graph - def __init__(self, row: int, col: int, graph: list): + def __init__(self, row: int, col: int, graph: list[list[bool]]) -> None: self.ROW = row self.COL = col self.graph = graph - def is_safe(self, i, j, visited) -> bool: + def is_safe(self, i: int, j: int, visited: list[list[bool]]) -> bool: return ( 0 <= i < self.ROW and 0 <= j < self.COL @@ -17,7 +17,8 @@ def is_safe(self, i, j, visited) -> bool: and self.graph[i][j] ) - def diffs(self, i, j, visited): # Checking all 8 elements surrounding nth element + def diffs(self, i: int, j: int, visited: list[list[bool]]) -> None: + # Checking all 8 elements surrounding nth element rowNbr = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order colNbr = [-1, 0, 1, -1, 1, -1, 0, 1] visited[i][j] = True # Make those cells visited diff --git a/matrix/matrix_class.py b/matrix/matrix_class.py index 57a2fc45ffd1..305cad0a5a9c 100644 --- a/matrix/matrix_class.py +++ b/matrix/matrix_class.py @@ -1,5 +1,7 @@ # An OOP approach to representing and manipulating matrices +from __future__ import annotations + class Matrix: """ @@ -54,7 +56,9 @@ class Matrix: [6. -12. 6.] [-3. 6. -3.]] >>> print(matrix.inverse()) - None + Traceback (most recent call last): + ... + TypeError: Only matrices with a non-zero determinant have an inverse Determinant is an int, float, or Nonetype >>> matrix.determinant() @@ -101,10 +105,9 @@ class Matrix: [198. 243. 288. 304.] [306. 378. 450. 472.] [414. 513. 612. 640.]] - """ - def __init__(self, rows): + def __init__(self, rows: list[list[int]]): error = TypeError( "Matrices must be formed from a list of zero or more lists containing at " "least one and the same number of values, each of which must be of type " @@ -125,42 +128,43 @@ def __init__(self, rows): self.rows = [] # MATRIX INFORMATION - def columns(self): + def columns(self) -> list[list[int]]: return [[row[i] for row in self.rows] for i in range(len(self.rows[0]))] @property - def num_rows(self): + def num_rows(self) -> int: return len(self.rows) @property - def num_columns(self): + def num_columns(self) -> int: return len(self.rows[0]) @property - def order(self): + def order(self) -> tuple[int, int]: return (self.num_rows, self.num_columns) @property - def is_square(self): + def is_square(self) -> bool: return self.order[0] == self.order[1] - def identity(self): + def identity(self) -> Matrix: values = [ [0 if column_num != row_num else 1 for column_num in range(self.num_rows)] for row_num in range(self.num_rows) ] return Matrix(values) - def determinant(self): + def determinant(self) -> int: if not self.is_square: - return None + return 0 if self.order == (0, 0): return 1 if self.order == (1, 1): - return self.rows[0][0] + return int(self.rows[0][0]) if self.order == (2, 2): - return (self.rows[0][0] * self.rows[1][1]) - ( - self.rows[0][1] * self.rows[1][0] + return int( + (self.rows[0][0] * self.rows[1][1]) + - (self.rows[0][1] * self.rows[1][0]) ) else: return sum( @@ -168,10 +172,10 @@ def determinant(self): for column in range(self.num_columns) ) - def is_invertable(self): + def is_invertable(self) -> bool: return bool(self.determinant()) - def get_minor(self, row, column): + def get_minor(self, row: int, column: int) -> int: values = [ [ self.rows[other_row][other_column] @@ -183,12 +187,12 @@ def get_minor(self, row, column): ] return Matrix(values).determinant() - def get_cofactor(self, row, column): + def get_cofactor(self, row: int, column: int) -> int: if (row + column) % 2 == 0: return self.get_minor(row, column) return -1 * self.get_minor(row, column) - def minors(self): + def minors(self) -> Matrix: return Matrix( [ [self.get_minor(row, column) for column in range(self.num_columns)] @@ -196,7 +200,7 @@ def minors(self): ] ) - def cofactors(self): + def cofactors(self) -> Matrix: return Matrix( [ [ @@ -209,25 +213,27 @@ def cofactors(self): ] ) - def adjugate(self): + def adjugate(self) -> Matrix: values = [ [self.cofactors().rows[column][row] for column in range(self.num_columns)] for row in range(self.num_rows) ] return Matrix(values) - def inverse(self): + def inverse(self) -> Matrix: determinant = self.determinant() - return None if not determinant else self.adjugate() * (1 / determinant) + if not determinant: + raise TypeError("Only matrices with a non-zero determinant have an inverse") + return self.adjugate() * (1 / determinant) - def __repr__(self): + def __repr__(self) -> str: return str(self.rows) - def __str__(self): + def __str__(self) -> str: if self.num_rows == 0: return "[]" if self.num_rows == 1: - return "[[" + ". ".join(self.rows[0]) + "]]" + return "[[" + ". ".join(str(self.rows[0])) + "]]" return ( "[" + "\n ".join( @@ -240,7 +246,7 @@ def __str__(self): ) # MATRIX MANIPULATION - def add_row(self, row, position=None): + def add_row(self, row: list[int], position: int | None = None) -> None: type_error = TypeError("Row must be a list containing all ints and/or floats") if not isinstance(row, list): raise type_error @@ -256,7 +262,7 @@ def add_row(self, row, position=None): else: self.rows = self.rows[0:position] + [row] + self.rows[position:] - def add_column(self, column, position=None): + def add_column(self, column: list[int], position: int | None = None) -> None: type_error = TypeError( "Column must be a list containing all ints and/or floats" ) @@ -278,18 +284,18 @@ def add_column(self, column, position=None): ] # MATRIX OPERATIONS - def __eq__(self, other): + def __eq__(self, other: object) -> bool: if not isinstance(other, Matrix): raise TypeError("A Matrix can only be compared with another Matrix") return self.rows == other.rows - def __ne__(self, other): + def __ne__(self, other: object) -> bool: return not self == other - def __neg__(self): + def __neg__(self) -> Matrix: return self * -1 - def __add__(self, other): + def __add__(self, other: Matrix) -> Matrix: if self.order != other.order: raise ValueError("Addition requires matrices of the same order") return Matrix( @@ -299,7 +305,7 @@ def __add__(self, other): ] ) - def __sub__(self, other): + def __sub__(self, other: Matrix) -> Matrix: if self.order != other.order: raise ValueError("Subtraction requires matrices of the same order") return Matrix( @@ -309,9 +315,11 @@ def __sub__(self, other): ] ) - def __mul__(self, other): + def __mul__(self, other: Matrix | int | float) -> Matrix: if isinstance(other, (int, float)): - return Matrix([[element * other for element in row] for row in self.rows]) + return Matrix( + [[int(element * other) for element in row] for row in self.rows] + ) elif isinstance(other, Matrix): if self.num_columns != other.num_rows: raise ValueError( @@ -329,7 +337,7 @@ def __mul__(self, other): "A Matrix can only be multiplied by an int, float, or another matrix" ) - def __pow__(self, other): + def __pow__(self, other: int) -> Matrix: if not isinstance(other, int): raise TypeError("A Matrix can only be raised to the power of an int") if not self.is_square: @@ -348,7 +356,7 @@ def __pow__(self, other): return result @classmethod - def dot_product(cls, row, column): + def dot_product(cls, row: list[int], column: list[int]) -> int: return sum(row[i] * column[i] for i in range(len(row))) diff --git a/matrix/matrix_operation.py b/matrix/matrix_operation.py index 8e5d0f583486..576094902af4 100644 --- a/matrix/matrix_operation.py +++ b/matrix/matrix_operation.py @@ -4,8 +4,10 @@ from __future__ import annotations +from typing import Any -def add(*matrix_s: list[list]) -> list[list]: + +def add(*matrix_s: list[list[int]]) -> list[list[int]]: """ >>> add([[1,2],[3,4]],[[2,3],[4,5]]) [[3, 5], [7, 9]] @@ -25,7 +27,7 @@ def add(*matrix_s: list[list]) -> list[list]: raise TypeError("Expected a matrix, got int/list instead") -def subtract(matrix_a: list[list], matrix_b: list[list]) -> list[list]: +def subtract(matrix_a: list[list[int]], matrix_b: list[list[int]]) -> list[list[int]]: """ >>> subtract([[1,2],[3,4]],[[2,3],[4,5]]) [[-1, -1], [-1, -1]] @@ -45,7 +47,7 @@ def subtract(matrix_a: list[list], matrix_b: list[list]) -> list[list]: raise TypeError("Expected a matrix, got int/list instead") -def scalar_multiply(matrix: list[list], n: int | float) -> list[list]: +def scalar_multiply(matrix: list[list[int]], n: int | float) -> list[list[float]]: """ >>> scalar_multiply([[1,2],[3,4]],5) [[5, 10], [15, 20]] @@ -55,7 +57,7 @@ def scalar_multiply(matrix: list[list], n: int | float) -> list[list]: return [[x * n for x in row] for row in matrix] -def multiply(matrix_a: list[list], matrix_b: list[list]) -> list[list]: +def multiply(matrix_a: list[list[int]], matrix_b: list[list[int]]) -> list[list[int]]: """ >>> multiply([[1,2],[3,4]],[[5,5],[7,5]]) [[19, 15], [43, 35]] @@ -77,7 +79,7 @@ def multiply(matrix_a: list[list], matrix_b: list[list]) -> list[list]: ] -def identity(n: int) -> list[list]: +def identity(n: int) -> list[list[int]]: """ :param n: dimension for nxn matrix :type n: int @@ -89,7 +91,9 @@ def identity(n: int) -> list[list]: return [[int(row == column) for column in range(n)] for row in range(n)] -def transpose(matrix: list[list], return_map: bool = True) -> list[list] | map[list]: +def transpose( + matrix: list[list[int]], return_map: bool = True +) -> list[list[int]] | map[list[int]]: """ >>> transpose([[1,2],[3,4]]) # doctest: +ELLIPSIS list[list] | map[l raise TypeError("Expected a matrix, got int/list instead") -def minor(matrix: list[list], row: int, column: int) -> list[list]: +def minor(matrix: list[list[int]], row: int, column: int) -> list[list[int]]: """ >>> minor([[1, 2], [3, 4]], 1, 1) [[1]] @@ -117,7 +121,7 @@ def minor(matrix: list[list], row: int, column: int) -> list[list]: return [row[:column] + row[column + 1 :] for row in minor] -def determinant(matrix: list[list]) -> int: +def determinant(matrix: list[list[int]]) -> Any: """ >>> determinant([[1, 2], [3, 4]]) -2 @@ -133,7 +137,7 @@ def determinant(matrix: list[list]) -> int: ) -def inverse(matrix: list[list]) -> list[list] | None: +def inverse(matrix: list[list[int]]) -> list[list[float]] | None: """ >>> inverse([[1, 2], [3, 4]]) [[-2.0, 1.0], [1.5, -0.5]] @@ -157,27 +161,27 @@ def inverse(matrix: list[list]) -> list[list] | None: return scalar_multiply(adjugate, 1 / det) -def _check_not_integer(matrix: list[list]) -> bool: +def _check_not_integer(matrix: list[list[int]]) -> bool: return not isinstance(matrix, int) and not isinstance(matrix[0], int) -def _shape(matrix: list[list]) -> tuple[int, int]: +def _shape(matrix: list[list[int]]) -> tuple[int, int]: return len(matrix), len(matrix[0]) def _verify_matrix_sizes( - matrix_a: list[list], matrix_b: list[list] -) -> tuple[tuple, tuple]: + matrix_a: list[list[int]], matrix_b: list[list[int]] +) -> tuple[tuple[int, int], tuple[int, int]]: shape = _shape(matrix_a) + _shape(matrix_b) if shape[0] != shape[3] or shape[1] != shape[2]: raise ValueError( - "operands could not be broadcast together with shape " + f"operands could not be broadcast together with shape " f"({shape[0], shape[1]}), ({shape[2], shape[3]})" ) return (shape[0], shape[2]), (shape[1], shape[3]) -def main(): +def main() -> None: matrix_a = [[12, 10], [3, 9]] matrix_b = [[3, 4], [7, 4]] matrix_c = [[11, 12, 13, 14], [21, 22, 23, 24], [31, 32, 33, 34], [41, 42, 43, 44]] diff --git a/matrix/nth_fibonacci_using_matrix_exponentiation.py b/matrix/nth_fibonacci_using_matrix_exponentiation.py index 341a02e1a95d..7c964d884617 100644 --- a/matrix/nth_fibonacci_using_matrix_exponentiation.py +++ b/matrix/nth_fibonacci_using_matrix_exponentiation.py @@ -16,7 +16,7 @@ """ -def multiply(matrix_a, matrix_b): +def multiply(matrix_a: list[list[int]], matrix_b: list[list[int]]) -> list[list[int]]: matrix_c = [] n = len(matrix_a) for i in range(n): @@ -30,11 +30,11 @@ def multiply(matrix_a, matrix_b): return matrix_c -def identity(n): +def identity(n: int) -> list[list[int]]: return [[int(row == column) for column in range(n)] for row in range(n)] -def nth_fibonacci_matrix(n): +def nth_fibonacci_matrix(n: int) -> int: """ >>> nth_fibonacci_matrix(100) 354224848179261915075 @@ -54,7 +54,7 @@ def nth_fibonacci_matrix(n): return res_matrix[0][0] -def nth_fibonacci_bruteforce(n): +def nth_fibonacci_bruteforce(n: int) -> int: """ >>> nth_fibonacci_bruteforce(100) 354224848179261915075 @@ -70,7 +70,7 @@ def nth_fibonacci_bruteforce(n): return fib1 -def main(): +def main() -> None: for ordinal in "0th 1st 2nd 3rd 10th 100th 1000th".split(): n = int("".join(c for c in ordinal if c in "0123456789")) # 1000th --> 1000 print( diff --git a/matrix/rotate_matrix.py b/matrix/rotate_matrix.py index f638597ae35d..c16cdb9a81bb 100644 --- a/matrix/rotate_matrix.py +++ b/matrix/rotate_matrix.py @@ -8,7 +8,7 @@ from __future__ import annotations -def make_matrix(row_size: int = 4) -> list[list]: +def make_matrix(row_size: int = 4) -> list[list[int]]: """ >>> make_matrix() [[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12], [13, 14, 15, 16]] @@ -25,7 +25,7 @@ def make_matrix(row_size: int = 4) -> list[list]: return [[1 + x + y * row_size for x in range(row_size)] for y in range(row_size)] -def rotate_90(matrix: list[list]) -> list[list]: +def rotate_90(matrix: list[list[int]]) -> list[list[int]]: """ >>> rotate_90(make_matrix()) [[4, 8, 12, 16], [3, 7, 11, 15], [2, 6, 10, 14], [1, 5, 9, 13]] @@ -37,7 +37,7 @@ def rotate_90(matrix: list[list]) -> list[list]: # OR.. transpose(reverse_column(matrix)) -def rotate_180(matrix: list[list]) -> list[list]: +def rotate_180(matrix: list[list[int]]) -> list[list[int]]: """ >>> rotate_180(make_matrix()) [[16, 15, 14, 13], [12, 11, 10, 9], [8, 7, 6, 5], [4, 3, 2, 1]] @@ -49,7 +49,7 @@ def rotate_180(matrix: list[list]) -> list[list]: # OR.. reverse_column(reverse_row(matrix)) -def rotate_270(matrix: list[list]) -> list[list]: +def rotate_270(matrix: list[list[int]]) -> list[list[int]]: """ >>> rotate_270(make_matrix()) [[13, 9, 5, 1], [14, 10, 6, 2], [15, 11, 7, 3], [16, 12, 8, 4]] @@ -61,22 +61,22 @@ def rotate_270(matrix: list[list]) -> list[list]: # OR.. transpose(reverse_row(matrix)) -def transpose(matrix: list[list]) -> list[list]: +def transpose(matrix: list[list[int]]) -> list[list[int]]: matrix[:] = [list(x) for x in zip(*matrix)] return matrix -def reverse_row(matrix: list[list]) -> list[list]: +def reverse_row(matrix: list[list[int]]) -> list[list[int]]: matrix[:] = matrix[::-1] return matrix -def reverse_column(matrix: list[list]) -> list[list]: +def reverse_column(matrix: list[list[int]]) -> list[list[int]]: matrix[:] = [x[::-1] for x in matrix] return matrix -def print_matrix(matrix: list[list]) -> None: +def print_matrix(matrix: list[list[int]]) -> None: for i in matrix: print(*i) diff --git a/matrix/searching_in_sorted_matrix.py b/matrix/searching_in_sorted_matrix.py index ae81361499e5..ddca3b1ce781 100644 --- a/matrix/searching_in_sorted_matrix.py +++ b/matrix/searching_in_sorted_matrix.py @@ -2,7 +2,7 @@ def search_in_a_sorted_matrix( - mat: list[list], m: int, n: int, key: int | float + mat: list[list[int]], m: int, n: int, key: int | float ) -> None: """ >>> search_in_a_sorted_matrix( @@ -30,7 +30,7 @@ def search_in_a_sorted_matrix( print(f"Key {key} not found") -def main(): +def main() -> None: mat = [[2, 5, 7], [4, 8, 13], [9, 11, 15], [12, 17, 20]] x = int(input("Enter the element to be searched:")) print(mat) diff --git a/matrix/sherman_morrison.py b/matrix/sherman_morrison.py index 63783c8b40fc..a0c93f11574e 100644 --- a/matrix/sherman_morrison.py +++ b/matrix/sherman_morrison.py @@ -1,14 +1,18 @@ +from __future__ import annotations + +from typing import Any + + class Matrix: """ Matrix structure. """ - def __init__(self, row: int, column: int, default_value: float = 0): + def __init__(self, row: int, column: int, default_value: float = 0) -> None: """ Initialize matrix with given size and default value. - Example: >>> a = Matrix(2, 3, 1) >>> a @@ -20,7 +24,7 @@ def __init__(self, row: int, column: int, default_value: float = 0): self.row, self.column = row, column self.array = [[default_value for c in range(column)] for r in range(row)] - def __str__(self): + def __str__(self) -> str: """ Return string representation of this matrix. @@ -37,7 +41,7 @@ def __str__(self): string_format_identifier = "%%%ds" % (max_element_length,) # Make string and return - def single_line(row_vector): + def single_line(row_vector: list[float]) -> str: nonlocal string_format_identifier line = "[" line += ", ".join(string_format_identifier % (obj,) for obj in row_vector) @@ -47,14 +51,13 @@ def single_line(row_vector): s += "\n".join(single_line(row_vector) for row_vector in self.array) return s - def __repr__(self): + def __repr__(self) -> str: return str(self) - def validateIndices(self, loc: tuple): + def validateIndices(self, loc: tuple[int, int]) -> bool: """ Check if given indices are valid to pick element from matrix. - Example: >>> a = Matrix(2, 6, 0) >>> a.validateIndices((2, 7)) @@ -69,11 +72,10 @@ def validateIndices(self, loc: tuple): else: return True - def __getitem__(self, loc: tuple): + def __getitem__(self, loc: tuple[int, int]) -> Any: """ Return array[row][column] where loc = (row, column). - Example: >>> a = Matrix(3, 2, 7) >>> a[1, 0] @@ -82,11 +84,10 @@ def __getitem__(self, loc: tuple): assert self.validateIndices(loc) return self.array[loc[0]][loc[1]] - def __setitem__(self, loc: tuple, value: float): + def __setitem__(self, loc: tuple[int, int], value: float) -> None: """ Set array[row][column] = value where loc = (row, column). - Example: >>> a = Matrix(2, 3, 1) >>> a[1, 2] = 51 @@ -98,11 +99,10 @@ def __setitem__(self, loc: tuple, value: float): assert self.validateIndices(loc) self.array[loc[0]][loc[1]] = value - def __add__(self, another): + def __add__(self, another: Matrix) -> Matrix: """ Return self + another. - Example: >>> a = Matrix(2, 1, -4) >>> b = Matrix(2, 1, 3) @@ -123,11 +123,10 @@ def __add__(self, another): result[r, c] = self[r, c] + another[r, c] return result - def __neg__(self): + def __neg__(self) -> Matrix: """ Return -self. - Example: >>> a = Matrix(2, 2, 3) >>> a[0, 1] = a[1, 0] = -2 @@ -143,14 +142,13 @@ def __neg__(self): result[r, c] = -self[r, c] return result - def __sub__(self, another): + def __sub__(self, another: Matrix) -> Matrix: return self + (-another) - def __mul__(self, another): + def __mul__(self, another: int | float | Matrix) -> Matrix: """ Return self * another. - Example: >>> a = Matrix(2, 3, 1) >>> a[0,2] = a[1,2] = 3 @@ -177,11 +175,10 @@ def __mul__(self, another): else: raise TypeError(f"Unsupported type given for another ({type(another)})") - def transpose(self): + def transpose(self) -> Matrix: """ Return self^T. - Example: >>> a = Matrix(2, 3) >>> for r in range(2): @@ -201,7 +198,7 @@ def transpose(self): result[c, r] = self[r, c] return result - def ShermanMorrison(self, u, v): + def ShermanMorrison(self, u: Matrix, v: Matrix) -> Any: """ Apply Sherman-Morrison formula in O(n^2). @@ -211,7 +208,6 @@ def ShermanMorrison(self, u, v): impossible to calculate. Warning: This method doesn't check if self is invertible. Make sure self is invertible before execute this method. - Example: >>> ainv = Matrix(3, 3, 0) >>> for i in range(3): ainv[i,i] = 1 @@ -243,7 +239,7 @@ def ShermanMorrison(self, u, v): # Testing if __name__ == "__main__": - def test1(): + def test1() -> None: # a^(-1) ainv = Matrix(3, 3, 0) for i in range(3): @@ -256,11 +252,11 @@ def test1(): v[0, 0], v[1, 0], v[2, 0] = 4, -2, 5 print(f"u is {u}") print(f"v is {v}") - print(f"uv^T is {u * v.transpose()}") + print("uv^T is %s" % (u * v.transpose())) # Sherman Morrison print(f"(a + uv^T)^(-1) is {ainv.ShermanMorrison(u, v)}") - def test2(): + def test2() -> None: import doctest doctest.testmod() diff --git a/matrix/spiral_print.py b/matrix/spiral_print.py index 6f699c1ab662..2441f05d15ef 100644 --- a/matrix/spiral_print.py +++ b/matrix/spiral_print.py @@ -1,19 +1,17 @@ """ This program print the matrix in spiral form. This problem has been solved through recursive way. - Matrix must satisfy below conditions i) matrix should be only one or two dimensional ii) number of column of all rows should be equal """ -from collections.abc import Iterable - -def check_matrix(matrix): +def check_matrix(matrix: list[list[int]]) -> bool: # must be - if matrix and isinstance(matrix, Iterable): - if isinstance(matrix[0], Iterable): + matrix = list(list(row) for row in matrix) + if matrix and isinstance(matrix, list): + if isinstance(matrix[0], list): prev_len = 0 for row in matrix: if prev_len == 0: @@ -29,32 +27,48 @@ def check_matrix(matrix): return result -def spiralPrint(a): +def spiral_print_clockwise(a: list[list[int]]) -> None: + """ + >>> spiral_print_clockwise([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]]) + 1 + 2 + 3 + 4 + 8 + 12 + 11 + 10 + 9 + 5 + 6 + 7 + """ if check_matrix(a) and len(a) > 0: - matRow = len(a) - if isinstance(a[0], Iterable): - matCol = len(a[0]) + a = list(list(row) for row in a) + mat_row = len(a) + if isinstance(a[0], list): + mat_col = len(a[0]) else: for dat in a: - print(dat), + print(dat) return # horizotal printing increasing - for i in range(0, matCol): - print(a[0][i]), + for i in range(0, mat_col): + print(a[0][i]) # vertical printing down - for i in range(1, matRow): - print(a[i][matCol - 1]), + for i in range(1, mat_row): + print(a[i][mat_col - 1]) # horizotal printing decreasing - if matRow > 1: - for i in range(matCol - 2, -1, -1): - print(a[matRow - 1][i]), + if mat_row > 1: + for i in range(mat_col - 2, -1, -1): + print(a[mat_row - 1][i]) # vertical printing up - for i in range(matRow - 2, 0, -1): - print(a[i][0]), - remainMat = [row[1 : matCol - 1] for row in a[1 : matRow - 1]] - if len(remainMat) > 0: - spiralPrint(remainMat) + for i in range(mat_row - 2, 0, -1): + print(a[i][0]) + remain_mat = [row[1 : mat_col - 1] for row in a[1 : mat_row - 1]] + if len(remain_mat) > 0: + spiral_print_clockwise(remain_mat) else: return else: @@ -64,5 +78,5 @@ def spiralPrint(a): # driver code if __name__ == "__main__": - a = ([1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]) - spiralPrint(a) + a = [[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]] + spiral_print_clockwise(a) From 087a3a8d537ceb179d0a47eda66f47d103c4b1b9 Mon Sep 17 00:00:00 2001 From: Rohan R Bharadwaj <114707091+rohanr18@users.noreply.github.com> Date: Wed, 5 Oct 2022 01:22:49 +0530 Subject: [PATCH 0511/1543] lorenz -> lorentz (#6670) --- ...four_vector.py => lorentz_transformation_four_vector.py} | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) rename physics/{lorenz_transformation_four_vector.py => lorentz_transformation_four_vector.py} (96%) diff --git a/physics/lorenz_transformation_four_vector.py b/physics/lorentz_transformation_four_vector.py similarity index 96% rename from physics/lorenz_transformation_four_vector.py rename to physics/lorentz_transformation_four_vector.py index 6c0d5f9d1997..bda852c25520 100644 --- a/physics/lorenz_transformation_four_vector.py +++ b/physics/lorentz_transformation_four_vector.py @@ -1,13 +1,13 @@ """ -Lorenz transformation describes the transition from a reference frame P +Lorentz transformation describes the transition from a reference frame P to another reference frame P', each of which is moving in a direction with -respect to the other. The Lorenz transformation implemented in this code +respect to the other. The Lorentz transformation implemented in this code is the relativistic version using a four vector described by Minkowsky Space: x0 = ct, x1 = x, x2 = y, and x3 = z NOTE: Please note that x0 is c (speed of light) times t (time). -So, the Lorenz transformation using a four vector is defined as: +So, the Lorentz transformation using a four vector is defined as: |ct'| | γ -γβ 0 0| |ct| |x' | = |-γβ γ 0 0| *|x | From 8cce0d463a0f65c22769fe4f0750acbeed2e0d60 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=A9rome=20Eertmans?= Date: Wed, 5 Oct 2022 12:32:07 +0200 Subject: [PATCH 0512/1543] refactor: pivot is randomly chosen (#6643) As described in #6095, this reduces the chances to observe a O(n^2) complexity. Here, `collection.pop(pivot_index)` is avoided for performance reasons. Fixes: #6095 --- sorts/quick_sort.py | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/sorts/quick_sort.py b/sorts/quick_sort.py index b099c78861ba..70cd19d7afe0 100644 --- a/sorts/quick_sort.py +++ b/sorts/quick_sort.py @@ -9,6 +9,8 @@ """ from __future__ import annotations +from random import randrange + def quick_sort(collection: list) -> list: """A pure Python implementation of quick sort algorithm @@ -26,11 +28,17 @@ def quick_sort(collection: list) -> list: """ if len(collection) < 2: return collection - pivot = collection.pop() # Use the last element as the first pivot + pivot_index = randrange(len(collection)) # Use random element as pivot + pivot = collection[pivot_index] greater: list[int] = [] # All elements greater than pivot lesser: list[int] = [] # All elements less than or equal to pivot - for element in collection: + + for element in collection[:pivot_index]: (greater if element > pivot else lesser).append(element) + + for element in collection[pivot_index + 1 :]: + (greater if element > pivot else lesser).append(element) + return quick_sort(lesser) + [pivot] + quick_sort(greater) From 660d2bb66c8ca03e2225090b5c638ffb0fd14a60 Mon Sep 17 00:00:00 2001 From: Paul <56065602+ZeroDayOwl@users.noreply.github.com> Date: Thu, 6 Oct 2022 23:19:34 +0600 Subject: [PATCH 0513/1543] Add algorithm for Newton's Law of Gravitation (#6626) * Add algorithm for Newton's Law of Gravitation * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update physics/newtons_law_of_gravitation.py Co-authored-by: Christian Clauss * One and only one argument must be 0 * Update newtons_law_of_gravitation.py Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- physics/newtons_law_of_gravitation.py | 100 ++++++++++++++++++++++++++ 1 file changed, 100 insertions(+) create mode 100644 physics/newtons_law_of_gravitation.py diff --git a/physics/newtons_law_of_gravitation.py b/physics/newtons_law_of_gravitation.py new file mode 100644 index 000000000000..0bb27bb2415d --- /dev/null +++ b/physics/newtons_law_of_gravitation.py @@ -0,0 +1,100 @@ +""" +Title : Finding the value of either Gravitational Force, one of the masses or distance +provided that the other three parameters are given. + +Description : Newton's Law of Universal Gravitation explains the presence of force of +attraction between bodies having a definite mass situated at a distance. It is usually +stated as that, every particle attracts every other particle in the universe with a +force that is directly proportional to the product of their masses and inversely +proportional to the square of the distance between their centers. The publication of the +theory has become known as the "first great unification", as it marked the unification +of the previously described phenomena of gravity on Earth with known astronomical +behaviors. + +The equation for the universal gravitation is as follows: +F = (G * mass_1 * mass_2) / (distance)^2 + +Source : +- https://en.wikipedia.org/wiki/Newton%27s_law_of_universal_gravitation +- Newton (1687) "Philosophiæ Naturalis Principia Mathematica" +""" + +from __future__ import annotations + +# Define the Gravitational Constant G and the function +GRAVITATIONAL_CONSTANT = 6.6743e-11 # unit of G : m^3 * kg^-1 * s^-2 + + +def gravitational_law( + force: float, mass_1: float, mass_2: float, distance: float +) -> dict[str, float]: + + """ + Input Parameters + ---------------- + force : magnitude in Newtons + + mass_1 : mass in Kilograms + + mass_2 : mass in Kilograms + + distance : distance in Meters + + Returns + ------- + result : dict name, value pair of the parameter having Zero as it's value + + Returns the value of one of the parameters specified as 0, provided the values of + other parameters are given. + >>> gravitational_law(force=0, mass_1=5, mass_2=10, distance=20) + {'force': 8.342875e-12} + + >>> gravitational_law(force=7367.382, mass_1=0, mass_2=74, distance=3048) + {'mass_1': 1.385816317292268e+19} + + >>> gravitational_law(force=36337.283, mass_1=0, mass_2=0, distance=35584) + Traceback (most recent call last): + ... + ValueError: One and only one argument must be 0 + + >>> gravitational_law(force=36337.283, mass_1=-674, mass_2=0, distance=35584) + Traceback (most recent call last): + ... + ValueError: Mass can not be negative + + >>> gravitational_law(force=-847938e12, mass_1=674, mass_2=0, distance=9374) + Traceback (most recent call last): + ... + ValueError: Gravitational force can not be negative + """ + + product_of_mass = mass_1 * mass_2 + + if (force, mass_1, mass_2, distance).count(0) != 1: + raise ValueError("One and only one argument must be 0") + if force < 0: + raise ValueError("Gravitational force can not be negative") + if distance < 0: + raise ValueError("Distance can not be negative") + if mass_1 < 0 or mass_2 < 0: + raise ValueError("Mass can not be negative") + if force == 0: + force = GRAVITATIONAL_CONSTANT * product_of_mass / (distance**2) + return {"force": force} + elif mass_1 == 0: + mass_1 = (force) * (distance**2) / (GRAVITATIONAL_CONSTANT * mass_2) + return {"mass_1": mass_1} + elif mass_2 == 0: + mass_2 = (force) * (distance**2) / (GRAVITATIONAL_CONSTANT * mass_1) + return {"mass_2": mass_2} + elif distance == 0: + distance = (GRAVITATIONAL_CONSTANT * product_of_mass / (force)) ** 0.5 + return {"distance": distance} + raise ValueError("One and only one argument must be 0") + + +# Run doctest +if __name__ == "__main__": + import doctest + + doctest.testmod() From 5894554d41116af83152b1ea59fbf78303d87966 Mon Sep 17 00:00:00 2001 From: Jordan Rinder Date: Sat, 8 Oct 2022 18:28:17 -0400 Subject: [PATCH 0514/1543] Add Catalan number to maths (#6845) * Add Catalan number to maths * updating DIRECTORY.md * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- DIRECTORY.md | 4 +++- maths/catalan_number.py | 51 +++++++++++++++++++++++++++++++++++++++++ 2 files changed, 54 insertions(+), 1 deletion(-) create mode 100644 maths/catalan_number.py diff --git a/DIRECTORY.md b/DIRECTORY.md index 64e9d5333a2f..668da4761f74 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -475,6 +475,7 @@ * [Binomial Coefficient](maths/binomial_coefficient.py) * [Binomial Distribution](maths/binomial_distribution.py) * [Bisection](maths/bisection.py) + * [Catalan Number](maths/catalan_number.py) * [Ceil](maths/ceil.py) * [Check Polygon](maths/check_polygon.py) * [Chudnovsky Algorithm](maths/chudnovsky_algorithm.py) @@ -632,8 +633,9 @@ ## Physics * [Horizontal Projectile Motion](physics/horizontal_projectile_motion.py) - * [Lorenz Transformation Four Vector](physics/lorenz_transformation_four_vector.py) + * [Lorentz Transformation Four Vector](physics/lorentz_transformation_four_vector.py) * [N Body Simulation](physics/n_body_simulation.py) + * [Newtons Law Of Gravitation](physics/newtons_law_of_gravitation.py) * [Newtons Second Law Of Motion](physics/newtons_second_law_of_motion.py) ## Project Euler diff --git a/maths/catalan_number.py b/maths/catalan_number.py new file mode 100644 index 000000000000..4a1280a45bf2 --- /dev/null +++ b/maths/catalan_number.py @@ -0,0 +1,51 @@ +""" + +Calculate the nth Catalan number + +Source: + https://en.wikipedia.org/wiki/Catalan_number + +""" + + +def catalan(number: int) -> int: + """ + :param number: nth catalan number to calculate + :return: the nth catalan number + Note: A catalan number is only defined for positive integers + + >>> catalan(5) + 14 + >>> catalan(0) + Traceback (most recent call last): + ... + ValueError: Input value of [number=0] must be > 0 + >>> catalan(-1) + Traceback (most recent call last): + ... + ValueError: Input value of [number=-1] must be > 0 + >>> catalan(5.0) + Traceback (most recent call last): + ... + TypeError: Input value of [number=5.0] must be an integer + """ + + if not isinstance(number, int): + raise TypeError(f"Input value of [number={number}] must be an integer") + + if number < 1: + raise ValueError(f"Input value of [number={number}] must be > 0") + + current_number = 1 + + for i in range(1, number): + current_number *= 4 * i - 2 + current_number //= i + 1 + + return current_number + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From 51dba4d743cd2c8d407eea3e9cd4e7b2f69ee34d Mon Sep 17 00:00:00 2001 From: Lakshay Roopchandani <75477853+lakshayroop5@users.noreply.github.com> Date: Sun, 9 Oct 2022 18:23:44 +0530 Subject: [PATCH 0515/1543] Job sequencing with deadlines (#6854) * completed optimised code for job sequencing with deadline problem * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * completed optimised code for job sequencing with deadline problem * completed optimized code for job sequencing with deadline problem * completed optimised code for job sequencing with deadline problem * completed optimised code for job sequencing with deadline problem * completed optimised code for job sequencing with deadline problem * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * completed optimized code for the issue "Job Scheduling with deadlines" * completed optimized code for the issue "Job Scheduling with deadlines" * completed optimized code for the issue "Job Scheduling with deadlines" * Update greedy_methods/job_sequencing_with_deadline.py Co-authored-by: Christian Clauss * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Updated reviews * Updated reviews * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update and rename greedy_methods/job_sequencing_with_deadline.py to scheduling/job_sequencing_with_deadline.py Co-authored-by: lakshayroop5 <87693528+lavenroop5@users.noreply.github.com> Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- scheduling/job_sequencing_with_deadline.py | 48 ++++++++++++++++++++++ 1 file changed, 48 insertions(+) create mode 100644 scheduling/job_sequencing_with_deadline.py diff --git a/scheduling/job_sequencing_with_deadline.py b/scheduling/job_sequencing_with_deadline.py new file mode 100644 index 000000000000..7b23c0b3575f --- /dev/null +++ b/scheduling/job_sequencing_with_deadline.py @@ -0,0 +1,48 @@ +def job_sequencing_with_deadlines(num_jobs: int, jobs: list) -> list: + """ + Function to find the maximum profit by doing jobs in a given time frame + + Args: + num_jobs [int]: Number of jobs + jobs [list]: A list of tuples of (job_id, deadline, profit) + + Returns: + max_profit [int]: Maximum profit that can be earned by doing jobs + in a given time frame + + Examples: + >>> job_sequencing_with_deadlines(4, + ... [(1, 4, 20), (2, 1, 10), (3, 1, 40), (4, 1, 30)]) + [2, 60] + >>> job_sequencing_with_deadlines(5, + ... [(1, 2, 100), (2, 1, 19), (3, 2, 27), (4, 1, 25), (5, 1, 15)]) + [2, 127] + """ + + # Sort the jobs in descending order of profit + jobs = sorted(jobs, key=lambda value: value[2], reverse=True) + + # Create a list of size equal to the maximum deadline + # and initialize it with -1 + max_deadline = max(jobs, key=lambda value: value[1])[1] + time_slots = [-1] * max_deadline + + # Finding the maximum profit and the count of jobs + count = 0 + max_profit = 0 + for job in jobs: + # Find a free time slot for this job + # (Note that we start from the last possible slot) + for i in range(job[1] - 1, -1, -1): + if time_slots[i] == -1: + time_slots[i] = job[0] + count += 1 + max_profit += job[2] + break + return [count, max_profit] + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From 0a3433eaed6c8369f6c45b3abf70ee33a3a74910 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 10 Oct 2022 22:04:33 +0200 Subject: [PATCH 0516/1543] [pre-commit.ci] pre-commit autoupdate (#6940) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * [pre-commit.ci] pre-commit autoupdate updates: - [github.com/psf/black: 22.8.0 → 22.10.0](https://github.com/psf/black/compare/22.8.0...22.10.0) - [github.com/asottile/pyupgrade: v2.38.2 → v3.0.0](https://github.com/asottile/pyupgrade/compare/v2.38.2...v3.0.0) - [github.com/pre-commit/mirrors-mypy: v0.981 → v0.982](https://github.com/pre-commit/mirrors-mypy/compare/v0.981...v0.982) * updating DIRECTORY.md Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- .pre-commit-config.yaml | 6 +++--- DIRECTORY.md | 1 + 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index a2fcf12c9bbd..0abe647b017a 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -14,7 +14,7 @@ repos: - id: requirements-txt-fixer - repo: https://github.com/psf/black - rev: 22.8.0 + rev: 22.10.0 hooks: - id: black @@ -26,7 +26,7 @@ repos: - --profile=black - repo: https://github.com/asottile/pyupgrade - rev: v2.38.2 + rev: v3.0.0 hooks: - id: pyupgrade args: @@ -42,7 +42,7 @@ repos: - --max-line-length=88 - repo: https://github.com/pre-commit/mirrors-mypy - rev: v0.981 + rev: v0.982 hooks: - id: mypy args: diff --git a/DIRECTORY.md b/DIRECTORY.md index 668da4761f74..9ef72c403f32 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -927,6 +927,7 @@ ## Scheduling * [First Come First Served](scheduling/first_come_first_served.py) * [Highest Response Ratio Next](scheduling/highest_response_ratio_next.py) + * [Job Sequencing With Deadline](scheduling/job_sequencing_with_deadline.py) * [Multi Level Feedback Queue](scheduling/multi_level_feedback_queue.py) * [Non Preemptive Shortest Job First](scheduling/non_preemptive_shortest_job_first.py) * [Round Robin](scheduling/round_robin.py) From f0d1a42deb146bebcdf7b1b2ec788c815ede452a Mon Sep 17 00:00:00 2001 From: Shubhajit Roy <81477286+shubhajitroy123@users.noreply.github.com> Date: Wed, 12 Oct 2022 12:52:23 +0530 Subject: [PATCH 0517/1543] Python program for Carmicheal Number (#6864) * Add files via upload Python program to determine whether a number is Carmichael Number or not. * Rename Carmichael Number.py to carmichael number.py * Rename carmichael number.py to carmichael_number.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update carmichael_number.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Create carmichael_number.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update maths/carmichael_number.py Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- maths/carmichael_number.py | 47 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 47 insertions(+) create mode 100644 maths/carmichael_number.py diff --git a/maths/carmichael_number.py b/maths/carmichael_number.py new file mode 100644 index 000000000000..09a4fedfb763 --- /dev/null +++ b/maths/carmichael_number.py @@ -0,0 +1,47 @@ +""" +== Carmichael Numbers == +A number n is said to be a Carmichael number if it +satisfies the following modular arithmetic condition: + + power(b, n-1) MOD n = 1, + for all b ranging from 1 to n such that b and + n are relatively prime, i.e, gcd(b, n) = 1 + +Examples of Carmichael Numbers: 561, 1105, ... +https://en.wikipedia.org/wiki/Carmichael_number +""" + + +def gcd(a: int, b: int) -> int: + if a < b: + return gcd(b, a) + if a % b == 0: + return b + return gcd(b, a % b) + + +def power(x: int, y: int, mod: int) -> int: + if y == 0: + return 1 + temp = power(x, y // 2, mod) % mod + temp = (temp * temp) % mod + if y % 2 == 1: + temp = (temp * x) % mod + return temp + + +def isCarmichaelNumber(n: int) -> bool: + b = 2 + while b < n: + if gcd(b, n) == 1 and power(b, n - 1, n) != 1: + return False + b += 1 + return True + + +if __name__ == "__main__": + number = int(input("Enter number: ").strip()) + if isCarmichaelNumber(number): + print(f"{number} is a Carmichael Number.") + else: + print(f"{number} is not a Carmichael Number.") From a04a6365dee01bebf382809a5638b6fd0d0a51e6 Mon Sep 17 00:00:00 2001 From: Martmists Date: Wed, 12 Oct 2022 15:19:00 +0200 Subject: [PATCH 0518/1543] Add Equal Loudness Filter (#7019) * Add Equal Loudness Filter Signed-off-by: Martmists * NoneType return on __init__ Signed-off-by: Martmists * Add data to JSON as requested by @CenTdemeern1 in a not very polite manner Signed-off-by: Martmists * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * 'modernize' Signed-off-by: Martmists * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update audio_filters/equal_loudness_filter.py Co-authored-by: Christian Clauss * Update equal_loudness_filter.py * Update equal_loudness_filter.py * Finally!! * Arrgghh Signed-off-by: Martmists Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- audio_filters/equal_loudness_filter.py | 61 +++++++++++++++++++++ audio_filters/loudness_curve.json | 76 ++++++++++++++++++++++++++ requirements.txt | 1 + 3 files changed, 138 insertions(+) create mode 100644 audio_filters/equal_loudness_filter.py create mode 100644 audio_filters/loudness_curve.json diff --git a/audio_filters/equal_loudness_filter.py b/audio_filters/equal_loudness_filter.py new file mode 100644 index 000000000000..b9a3c50e1c33 --- /dev/null +++ b/audio_filters/equal_loudness_filter.py @@ -0,0 +1,61 @@ +from json import loads +from pathlib import Path + +import numpy as np +from yulewalker import yulewalk + +from audio_filters.butterworth_filter import make_highpass +from audio_filters.iir_filter import IIRFilter + +data = loads((Path(__file__).resolve().parent / "loudness_curve.json").read_text()) + + +class EqualLoudnessFilter: + r""" + An equal-loudness filter which compensates for the human ear's non-linear response + to sound. + This filter corrects this by cascading a yulewalk filter and a butterworth filter. + + Designed for use with samplerate of 44.1kHz and above. If you're using a lower + samplerate, use with caution. + + Code based on matlab implementation at https://bit.ly/3eqh2HU + (url shortened for flake8) + + Target curve: https://i.imgur.com/3g2VfaM.png + Yulewalk response: https://i.imgur.com/J9LnJ4C.png + Butterworth and overall response: https://i.imgur.com/3g2VfaM.png + + Images and original matlab implementation by David Robinson, 2001 + """ + + def __init__(self, samplerate: int = 44100) -> None: + self.yulewalk_filter = IIRFilter(10) + self.butterworth_filter = make_highpass(150, samplerate) + + # pad the data to nyquist + curve_freqs = np.array(data["frequencies"] + [max(20000.0, samplerate / 2)]) + curve_gains = np.array(data["gains"] + [140]) + + # Convert to angular frequency + freqs_normalized = curve_freqs / samplerate * 2 + # Invert the curve and normalize to 0dB + gains_normalized = np.power(10, (np.min(curve_gains) - curve_gains) / 20) + + # Scipy's `yulewalk` function is a stub, so we're using the + # `yulewalker` library instead. + # This function computes the coefficients using a least-squares + # fit to the specified curve. + ya, yb = yulewalk(10, freqs_normalized, gains_normalized) + self.yulewalk_filter.set_coefficients(ya, yb) + + def process(self, sample: float) -> float: + """ + Process a single sample through both filters + + >>> filt = EqualLoudnessFilter() + >>> filt.process(0.0) + 0.0 + """ + tmp = self.yulewalk_filter.process(sample) + return self.butterworth_filter.process(tmp) diff --git a/audio_filters/loudness_curve.json b/audio_filters/loudness_curve.json new file mode 100644 index 000000000000..fc066a0810fc --- /dev/null +++ b/audio_filters/loudness_curve.json @@ -0,0 +1,76 @@ +{ + "_comment": "The following is a representative average of the Equal Loudness Contours as measured by Robinson and Dadson, 1956", + "_doi": "10.1088/0508-3443/7/5/302", + "frequencies": [ + 0, + 20, + 30, + 40, + 50, + 60, + 70, + 80, + 90, + 100, + 200, + 300, + 400, + 500, + 600, + 700, + 800, + 900, + 1000, + 1500, + 2000, + 2500, + 3000, + 3700, + 4000, + 5000, + 6000, + 7000, + 8000, + 9000, + 10000, + 12000, + 15000, + 20000 + ], + "gains": [ + 120, + 113, + 103, + 97, + 93, + 91, + 89, + 87, + 86, + 85, + 78, + 76, + 76, + 76, + 76, + 77, + 78, + 79.5, + 80, + 79, + 77, + 74, + 71.5, + 70, + 70.5, + 74, + 79, + 84, + 86, + 86, + 85, + 95, + 110, + 125 + ] +} diff --git a/requirements.txt b/requirements.txt index 294494acf41a..0fbc1cc4b45c 100644 --- a/requirements.txt +++ b/requirements.txt @@ -17,3 +17,4 @@ tensorflow texttable tweepy xgboost +yulewalker From d15bf7d492bc778682f80392bfd559074c4adbec Mon Sep 17 00:00:00 2001 From: Saksham Chawla <51916697+saksham-chawla@users.noreply.github.com> Date: Wed, 12 Oct 2022 22:05:31 +0530 Subject: [PATCH 0519/1543] Add typing to data_structures/heap/heap_generic.py (#7044) * Update heap_generic.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update heap_generic.py * Update heap_generic.py * Update heap_generic.py * Update heap_generic.py * Update heap_generic.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- data_structures/heap/heap_generic.py | 35 +++++++++++++++------------- 1 file changed, 19 insertions(+), 16 deletions(-) diff --git a/data_structures/heap/heap_generic.py b/data_structures/heap/heap_generic.py index 553cb94518c4..e7831cd45b43 100644 --- a/data_structures/heap/heap_generic.py +++ b/data_structures/heap/heap_generic.py @@ -1,35 +1,38 @@ +from collections.abc import Callable + + class Heap: """ A generic Heap class, can be used as min or max by passing the key function accordingly. """ - def __init__(self, key=None): + def __init__(self, key: Callable | None = None) -> None: # Stores actual heap items. - self.arr = list() + self.arr: list = list() # Stores indexes of each item for supporting updates and deletion. - self.pos_map = {} + self.pos_map: dict = {} # Stores current size of heap. self.size = 0 # Stores function used to evaluate the score of an item on which basis ordering # will be done. self.key = key or (lambda x: x) - def _parent(self, i): + def _parent(self, i: int) -> int | None: """Returns parent index of given index if exists else None""" return int((i - 1) / 2) if i > 0 else None - def _left(self, i): + def _left(self, i: int) -> int | None: """Returns left-child-index of given index if exists else None""" left = int(2 * i + 1) return left if 0 < left < self.size else None - def _right(self, i): + def _right(self, i: int) -> int | None: """Returns right-child-index of given index if exists else None""" right = int(2 * i + 2) return right if 0 < right < self.size else None - def _swap(self, i, j): + def _swap(self, i: int, j: int) -> None: """Performs changes required for swapping two elements in the heap""" # First update the indexes of the items in index map. self.pos_map[self.arr[i][0]], self.pos_map[self.arr[j][0]] = ( @@ -39,11 +42,11 @@ def _swap(self, i, j): # Then swap the items in the list. self.arr[i], self.arr[j] = self.arr[j], self.arr[i] - def _cmp(self, i, j): + def _cmp(self, i: int, j: int) -> bool: """Compares the two items using default comparison""" return self.arr[i][1] < self.arr[j][1] - def _get_valid_parent(self, i): + def _get_valid_parent(self, i: int) -> int: """ Returns index of valid parent as per desired ordering among given index and both it's children @@ -59,21 +62,21 @@ def _get_valid_parent(self, i): return valid_parent - def _heapify_up(self, index): + def _heapify_up(self, index: int) -> None: """Fixes the heap in upward direction of given index""" parent = self._parent(index) while parent is not None and not self._cmp(index, parent): self._swap(index, parent) index, parent = parent, self._parent(parent) - def _heapify_down(self, index): + def _heapify_down(self, index: int) -> None: """Fixes the heap in downward direction of given index""" valid_parent = self._get_valid_parent(index) while valid_parent != index: self._swap(index, valid_parent) index, valid_parent = valid_parent, self._get_valid_parent(valid_parent) - def update_item(self, item, item_value): + def update_item(self, item: int, item_value: int) -> None: """Updates given item value in heap if present""" if item not in self.pos_map: return @@ -84,7 +87,7 @@ def update_item(self, item, item_value): self._heapify_up(index) self._heapify_down(index) - def delete_item(self, item): + def delete_item(self, item: int) -> None: """Deletes given item from heap if present""" if item not in self.pos_map: return @@ -99,7 +102,7 @@ def delete_item(self, item): self._heapify_up(index) self._heapify_down(index) - def insert_item(self, item, item_value): + def insert_item(self, item: int, item_value: int) -> None: """Inserts given item with given value in heap""" arr_len = len(self.arr) if arr_len == self.size: @@ -110,11 +113,11 @@ def insert_item(self, item, item_value): self.size += 1 self._heapify_up(self.size - 1) - def get_top(self): + def get_top(self) -> tuple | None: """Returns top item tuple (Calculated value, item) from heap if present""" return self.arr[0] if self.size else None - def extract_top(self): + def extract_top(self) -> tuple | None: """ Return top item tuple (Calculated value, item) from heap and removes it as well if present From aeb933bff55734f33268848fb1fcb6a0395297cb Mon Sep 17 00:00:00 2001 From: Saksham Chawla <51916697+saksham-chawla@users.noreply.github.com> Date: Wed, 12 Oct 2022 22:07:00 +0530 Subject: [PATCH 0520/1543] Add typing to data_structures/hashing/hash_table.py (#7040) * Update hash_table.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update hash_table.py * Update hash_table.py Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- data_structures/hashing/hash_table.py | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/data_structures/hashing/hash_table.py b/data_structures/hashing/hash_table.py index f4422de53821..1cd71cc4baf3 100644 --- a/data_structures/hashing/hash_table.py +++ b/data_structures/hashing/hash_table.py @@ -7,13 +7,18 @@ class HashTable: Basic Hash Table example with open addressing and linear probing """ - def __init__(self, size_table, charge_factor=None, lim_charge=None): + def __init__( + self, + size_table: int, + charge_factor: int | None = None, + lim_charge: float | None = None, + ) -> None: self.size_table = size_table self.values = [None] * self.size_table self.lim_charge = 0.75 if lim_charge is None else lim_charge self.charge_factor = 1 if charge_factor is None else charge_factor - self.__aux_list = [] - self._keys = {} + self.__aux_list: list = [] + self._keys: dict = {} def keys(self): return self._keys From e272b9d6a494036aaa7f71c53d01017a34117bc9 Mon Sep 17 00:00:00 2001 From: Saksham Chawla <51916697+saksham-chawla@users.noreply.github.com> Date: Wed, 12 Oct 2022 22:14:08 +0530 Subject: [PATCH 0521/1543] Add typing to data_structures/queue/queue_on_pseudo_stack.py (#7037) * Add typing hacktoberfest * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- data_structures/queue/queue_on_pseudo_stack.py | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/data_structures/queue/queue_on_pseudo_stack.py b/data_structures/queue/queue_on_pseudo_stack.py index 7fa2fb2566af..9a0c16f61eb4 100644 --- a/data_structures/queue/queue_on_pseudo_stack.py +++ b/data_structures/queue/queue_on_pseudo_stack.py @@ -1,4 +1,5 @@ """Queue represented by a pseudo stack (represented by a list with pop and append)""" +from typing import Any class Queue: @@ -14,7 +15,7 @@ def __str__(self): @param item item to enqueue""" - def put(self, item): + def put(self, item: Any) -> None: self.stack.append(item) self.length = self.length + 1 @@ -23,7 +24,7 @@ def put(self, item): @return dequeued item that was dequeued""" - def get(self): + def get(self) -> Any: self.rotate(1) dequeued = self.stack[self.length - 1] self.stack = self.stack[:-1] @@ -35,7 +36,7 @@ def get(self): @param rotation number of times to rotate queue""" - def rotate(self, rotation): + def rotate(self, rotation: int) -> None: for i in range(rotation): temp = self.stack[0] self.stack = self.stack[1:] @@ -45,7 +46,7 @@ def rotate(self, rotation): """Reports item at the front of self @return item at front of self.stack""" - def front(self): + def front(self) -> Any: front = self.get() self.put(front) self.rotate(self.length - 1) @@ -53,5 +54,5 @@ def front(self): """Returns the length of this.stack""" - def size(self): + def size(self) -> int: return self.length From f676055bc6e4f3540c97745ffc19bf62955c9077 Mon Sep 17 00:00:00 2001 From: Saksham Chawla <51916697+saksham-chawla@users.noreply.github.com> Date: Wed, 12 Oct 2022 22:19:49 +0530 Subject: [PATCH 0522/1543] Add typing to maths/segmented_sieve.py (#7054) --- maths/segmented_sieve.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/maths/segmented_sieve.py b/maths/segmented_sieve.py index 0054b0595be5..35ed9702b3be 100644 --- a/maths/segmented_sieve.py +++ b/maths/segmented_sieve.py @@ -3,7 +3,7 @@ import math -def sieve(n): +def sieve(n: int) -> list[int]: """Segmented Sieve.""" in_prime = [] start = 2 From 922887c38609650dc8eb8eaa9153605eabc45ecd Mon Sep 17 00:00:00 2001 From: Rohan R Bharadwaj <114707091+rohanr18@users.noreply.github.com> Date: Thu, 13 Oct 2022 00:04:01 +0530 Subject: [PATCH 0523/1543] Add volume of hollow circular cylinder, Exceptions (#6441) * Add volume of hollow circular cylinder, Exceptions * Update volume.py * floats, zeroes tests added * Update volume.py * f-strings --- maths/volume.py | 255 ++++++++++++++++++++++++++++++++++++++++++------ 1 file changed, 223 insertions(+), 32 deletions(-) diff --git a/maths/volume.py b/maths/volume.py index acaed65f4858..97c06d7e1c3a 100644 --- a/maths/volume.py +++ b/maths/volume.py @@ -1,6 +1,5 @@ """ Find Volumes of Various Shapes. - Wikipedia reference: https://en.wikipedia.org/wiki/Volume """ from __future__ import annotations @@ -11,12 +10,21 @@ def vol_cube(side_length: int | float) -> float: """ Calculate the Volume of a Cube. - >>> vol_cube(1) 1.0 >>> vol_cube(3) 27.0 + >>> vol_cube(0) + 0.0 + >>> vol_cube(1.6) + 4.096000000000001 + >>> vol_cube(-1) + Traceback (most recent call last): + ... + ValueError: vol_cube() only accepts non-negative values """ + if side_length < 0: + raise ValueError("vol_cube() only accepts non-negative values") return pow(side_length, 3) @@ -24,10 +32,23 @@ def vol_spherical_cap(height: float, radius: float) -> float: """ Calculate the Volume of the spherical cap. :return 1/3 pi * height ^ 2 * (3 * radius - height) - >>> vol_spherical_cap(1, 2) 5.235987755982988 + >>> vol_spherical_cap(1.6, 2.6) + 16.621119532592402 + >>> vol_spherical_cap(0, 0) + 0.0 + >>> vol_spherical_cap(-1, 2) + Traceback (most recent call last): + ... + ValueError: vol_spherical_cap() only accepts non-negative values + >>> vol_spherical_cap(1, -2) + Traceback (most recent call last): + ... + ValueError: vol_spherical_cap() only accepts non-negative values """ + if height < 0 or radius < 0: + raise ValueError("vol_spherical_cap() only accepts non-negative values") return 1 / 3 * pi * pow(height, 2) * (3 * radius - height) @@ -36,7 +57,6 @@ def vol_spheres_intersect( ) -> float: """ Calculate the volume of the intersection of two spheres. - The intersection is composed by two spherical caps and therefore its volume is the sum of the volumes of the spherical caps. First, it calculates the heights (h1, h2) of the spherical caps, then the two volumes and it returns the sum. @@ -49,10 +69,27 @@ def vol_spheres_intersect( / (2 * centers_distance) if centers_distance is 0 then it returns the volume of the smallers sphere :return vol_spherical_cap(h1, radius_2) + vol_spherical_cap(h2, radius_1) - >>> vol_spheres_intersect(2, 2, 1) 21.205750411731103 + >>> vol_spheres_intersect(2.6, 2.6, 1.6) + 40.71504079052372 + >>> vol_spheres_intersect(0, 0, 0) + 0.0 + >>> vol_spheres_intersect(-2, 2, 1) + Traceback (most recent call last): + ... + ValueError: vol_spheres_intersect() only accepts non-negative values + >>> vol_spheres_intersect(2, -2, 1) + Traceback (most recent call last): + ... + ValueError: vol_spheres_intersect() only accepts non-negative values + >>> vol_spheres_intersect(2, 2, -1) + Traceback (most recent call last): + ... + ValueError: vol_spheres_intersect() only accepts non-negative values """ + if radius_1 < 0 or radius_2 < 0 or centers_distance < 0: + raise ValueError("vol_spheres_intersect() only accepts non-negative values") if centers_distance == 0: return vol_sphere(min(radius_1, radius_2)) @@ -74,40 +111,81 @@ def vol_cuboid(width: float, height: float, length: float) -> float: """ Calculate the Volume of a Cuboid. :return multiple of width, length and height - >>> vol_cuboid(1, 1, 1) 1.0 >>> vol_cuboid(1, 2, 3) 6.0 + >>> vol_cuboid(1.6, 2.6, 3.6) + 14.976 + >>> vol_cuboid(0, 0, 0) + 0.0 + >>> vol_cuboid(-1, 2, 3) + Traceback (most recent call last): + ... + ValueError: vol_cuboid() only accepts non-negative values + >>> vol_cuboid(1, -2, 3) + Traceback (most recent call last): + ... + ValueError: vol_cuboid() only accepts non-negative values + >>> vol_cuboid(1, 2, -3) + Traceback (most recent call last): + ... + ValueError: vol_cuboid() only accepts non-negative values """ + if width < 0 or height < 0 or length < 0: + raise ValueError("vol_cuboid() only accepts non-negative values") return float(width * height * length) def vol_cone(area_of_base: float, height: float) -> float: """ Calculate the Volume of a Cone. - Wikipedia reference: https://en.wikipedia.org/wiki/Cone :return (1/3) * area_of_base * height - >>> vol_cone(10, 3) 10.0 >>> vol_cone(1, 1) 0.3333333333333333 + >>> vol_cone(1.6, 1.6) + 0.8533333333333335 + >>> vol_cone(0, 0) + 0.0 + >>> vol_cone(-1, 1) + Traceback (most recent call last): + ... + ValueError: vol_cone() only accepts non-negative values + >>> vol_cone(1, -1) + Traceback (most recent call last): + ... + ValueError: vol_cone() only accepts non-negative values """ + if height < 0 or area_of_base < 0: + raise ValueError("vol_cone() only accepts non-negative values") return area_of_base * height / 3.0 def vol_right_circ_cone(radius: float, height: float) -> float: """ Calculate the Volume of a Right Circular Cone. - Wikipedia reference: https://en.wikipedia.org/wiki/Cone :return (1/3) * pi * radius^2 * height - >>> vol_right_circ_cone(2, 3) 12.566370614359172 + >>> vol_right_circ_cone(0, 0) + 0.0 + >>> vol_right_circ_cone(1.6, 1.6) + 4.289321169701265 + >>> vol_right_circ_cone(-1, 1) + Traceback (most recent call last): + ... + ValueError: vol_right_circ_cone() only accepts non-negative values + >>> vol_right_circ_cone(1, -1) + Traceback (most recent call last): + ... + ValueError: vol_right_circ_cone() only accepts non-negative values """ + if height < 0 or radius < 0: + raise ValueError("vol_right_circ_cone() only accepts non-negative values") return pi * pow(radius, 2) * height / 3.0 @@ -116,12 +194,25 @@ def vol_prism(area_of_base: float, height: float) -> float: Calculate the Volume of a Prism. Wikipedia reference: https://en.wikipedia.org/wiki/Prism_(geometry) :return V = Bh - >>> vol_prism(10, 2) 20.0 >>> vol_prism(11, 1) 11.0 + >>> vol_prism(1.6, 1.6) + 2.5600000000000005 + >>> vol_prism(0, 0) + 0.0 + >>> vol_prism(-1, 1) + Traceback (most recent call last): + ... + ValueError: vol_prism() only accepts non-negative values + >>> vol_prism(1, -1) + Traceback (most recent call last): + ... + ValueError: vol_prism() only accepts non-negative values """ + if height < 0 or area_of_base < 0: + raise ValueError("vol_prism() only accepts non-negative values") return float(area_of_base * height) @@ -130,12 +221,25 @@ def vol_pyramid(area_of_base: float, height: float) -> float: Calculate the Volume of a Pyramid. Wikipedia reference: https://en.wikipedia.org/wiki/Pyramid_(geometry) :return (1/3) * Bh - >>> vol_pyramid(10, 3) 10.0 >>> vol_pyramid(1.5, 3) 1.5 + >>> vol_pyramid(1.6, 1.6) + 0.8533333333333335 + >>> vol_pyramid(0, 0) + 0.0 + >>> vol_pyramid(-1, 1) + Traceback (most recent call last): + ... + ValueError: vol_pyramid() only accepts non-negative values + >>> vol_pyramid(1, -1) + Traceback (most recent call last): + ... + ValueError: vol_pyramid() only accepts non-negative values """ + if height < 0 or area_of_base < 0: + raise ValueError("vol_pyramid() only accepts non-negative values") return area_of_base * height / 3.0 @@ -144,27 +248,44 @@ def vol_sphere(radius: float) -> float: Calculate the Volume of a Sphere. Wikipedia reference: https://en.wikipedia.org/wiki/Sphere :return (4/3) * pi * r^3 - >>> vol_sphere(5) 523.5987755982989 >>> vol_sphere(1) 4.1887902047863905 + >>> vol_sphere(1.6) + 17.15728467880506 + >>> vol_sphere(0) + 0.0 + >>> vol_sphere(-1) + Traceback (most recent call last): + ... + ValueError: vol_sphere() only accepts non-negative values """ + if radius < 0: + raise ValueError("vol_sphere() only accepts non-negative values") return 4 / 3 * pi * pow(radius, 3) -def vol_hemisphere(radius: float): +def vol_hemisphere(radius: float) -> float: """Calculate the volume of a hemisphere Wikipedia reference: https://en.wikipedia.org/wiki/Hemisphere Other references: https://www.cuemath.com/geometry/hemisphere :return 2/3 * pi * radius^3 - >>> vol_hemisphere(1) 2.0943951023931953 - >>> vol_hemisphere(7) 718.3775201208659 + >>> vol_hemisphere(1.6) + 8.57864233940253 + >>> vol_hemisphere(0) + 0.0 + >>> vol_hemisphere(-1) + Traceback (most recent call last): + ... + ValueError: vol_hemisphere() only accepts non-negative values """ + if radius < 0: + raise ValueError("vol_hemisphere() only accepts non-negative values") return 2 / 3 * pi * pow(radius, 3) @@ -172,26 +293,93 @@ def vol_circular_cylinder(radius: float, height: float) -> float: """Calculate the Volume of a Circular Cylinder. Wikipedia reference: https://en.wikipedia.org/wiki/Cylinder :return pi * radius^2 * height - >>> vol_circular_cylinder(1, 1) 3.141592653589793 >>> vol_circular_cylinder(4, 3) 150.79644737231007 + >>> vol_circular_cylinder(1.6, 1.6) + 12.867963509103795 + >>> vol_circular_cylinder(0, 0) + 0.0 + >>> vol_circular_cylinder(-1, 1) + Traceback (most recent call last): + ... + ValueError: vol_circular_cylinder() only accepts non-negative values + >>> vol_circular_cylinder(1, -1) + Traceback (most recent call last): + ... + ValueError: vol_circular_cylinder() only accepts non-negative values """ + if height < 0 or radius < 0: + raise ValueError("vol_circular_cylinder() only accepts non-negative values") return pi * pow(radius, 2) * height -def vol_conical_frustum(height: float, radius_1: float, radius_2: float): +def vol_hollow_circular_cylinder( + inner_radius: float, outer_radius: float, height: float +) -> float: + """Calculate the Volume of a Hollow Circular Cylinder. + >>> vol_hollow_circular_cylinder(1, 2, 3) + 28.274333882308138 + >>> vol_hollow_circular_cylinder(1.6, 2.6, 3.6) + 47.50088092227767 + >>> vol_hollow_circular_cylinder(-1, 2, 3) + Traceback (most recent call last): + ... + ValueError: vol_hollow_circular_cylinder() only accepts non-negative values + >>> vol_hollow_circular_cylinder(1, -2, 3) + Traceback (most recent call last): + ... + ValueError: vol_hollow_circular_cylinder() only accepts non-negative values + >>> vol_hollow_circular_cylinder(1, 2, -3) + Traceback (most recent call last): + ... + ValueError: vol_hollow_circular_cylinder() only accepts non-negative values + >>> vol_hollow_circular_cylinder(2, 1, 3) + Traceback (most recent call last): + ... + ValueError: outer_radius must be greater than inner_radius + >>> vol_hollow_circular_cylinder(0, 0, 0) + Traceback (most recent call last): + ... + ValueError: outer_radius must be greater than inner_radius + """ + if inner_radius < 0 or outer_radius < 0 or height < 0: + raise ValueError( + "vol_hollow_circular_cylinder() only accepts non-negative values" + ) + if outer_radius <= inner_radius: + raise ValueError("outer_radius must be greater than inner_radius") + return pi * (pow(outer_radius, 2) - pow(inner_radius, 2)) * height + + +def vol_conical_frustum(height: float, radius_1: float, radius_2: float) -> float: """Calculate the Volume of a Conical Frustum. Wikipedia reference: https://en.wikipedia.org/wiki/Frustum :return 1/3 * pi * height * (radius_1^2 + radius_top^2 + radius_1 * radius_2) - >>> vol_conical_frustum(45, 7, 28) 48490.482608158454 - >>> vol_conical_frustum(1, 1, 2) 7.330382858376184 + >>> vol_conical_frustum(1.6, 2.6, 3.6) + 48.7240076620753 + >>> vol_conical_frustum(0, 0, 0) + 0.0 + >>> vol_conical_frustum(-2, 2, 1) + Traceback (most recent call last): + ... + ValueError: vol_conical_frustum() only accepts non-negative values + >>> vol_conical_frustum(2, -2, 1) + Traceback (most recent call last): + ... + ValueError: vol_conical_frustum() only accepts non-negative values + >>> vol_conical_frustum(2, 2, -1) + Traceback (most recent call last): + ... + ValueError: vol_conical_frustum() only accepts non-negative values """ + if radius_1 < 0 or radius_2 < 0 or height < 0: + raise ValueError("vol_conical_frustum() only accepts non-negative values") return ( 1 / 3 @@ -204,18 +392,21 @@ def vol_conical_frustum(height: float, radius_1: float, radius_2: float): def main(): """Print the Results of Various Volume Calculations.""" print("Volumes:") - print("Cube: " + str(vol_cube(2))) # = 8 - print("Cuboid: " + str(vol_cuboid(2, 2, 2))) # = 8 - print("Cone: " + str(vol_cone(2, 2))) # ~= 1.33 - print("Right Circular Cone: " + str(vol_right_circ_cone(2, 2))) # ~= 8.38 - print("Prism: " + str(vol_prism(2, 2))) # = 4 - print("Pyramid: " + str(vol_pyramid(2, 2))) # ~= 1.33 - print("Sphere: " + str(vol_sphere(2))) # ~= 33.5 - print("Hemisphere: " + str(vol_hemisphere(2))) # ~= 16.75 - print("Circular Cylinder: " + str(vol_circular_cylinder(2, 2))) # ~= 25.1 - print("Conical Frustum: " + str(vol_conical_frustum(2, 2, 4))) # ~= 58.6 - print("Spherical cap: " + str(vol_spherical_cap(1, 2))) # ~= 5.24 - print("Spheres intersetion: " + str(vol_spheres_intersect(2, 2, 1))) # ~= 21.21 + print(f"Cube: {vol_cube(2) = }") # = 8 + print(f"Cuboid: {vol_cuboid(2, 2, 2) = }") # = 8 + print(f"Cone: {vol_cone(2, 2) = }") # ~= 1.33 + print(f"Right Circular Cone: {vol_right_circ_cone(2, 2) = }") # ~= 8.38 + print(f"Prism: {vol_prism(2, 2) = }") # = 4 + print(f"Pyramid: {vol_pyramid(2, 2) = }") # ~= 1.33 + print(f"Sphere: {vol_sphere(2) = }") # ~= 33.5 + print(f"Hemisphere: {vol_hemisphere(2) = }") # ~= 16.75 + print(f"Circular Cylinder: {vol_circular_cylinder(2, 2) = }") # ~= 25.1 + print( + f"Hollow Circular Cylinder: {vol_hollow_circular_cylinder(1, 2, 3) = }" + ) # ~= 28.3 + print(f"Conical Frustum: {vol_conical_frustum(2, 2, 4) = }") # ~= 58.6 + print(f"Spherical cap: {vol_spherical_cap(1, 2) = }") # ~= 5.24 + print(f"Spheres intersetion: {vol_spheres_intersect(2, 2, 1) = }") # ~= 21.21 if __name__ == "__main__": From 2423760e1d28b4c6860ef63f83b1e6b4b83c1522 Mon Sep 17 00:00:00 2001 From: Saksham Chawla <51916697+saksham-chawla@users.noreply.github.com> Date: Thu, 13 Oct 2022 00:11:01 +0530 Subject: [PATCH 0524/1543] Add typing to maths/abs.py (#7060) --- maths/abs.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/maths/abs.py b/maths/abs.py index 68c99a1d51d8..dfea52dfbb97 100644 --- a/maths/abs.py +++ b/maths/abs.py @@ -1,7 +1,7 @@ """Absolute Value.""" -def abs_val(num): +def abs_val(num: float) -> float: """ Find the absolute value of a number. From 74494d433f8d050d37642f912f616451f40d65e6 Mon Sep 17 00:00:00 2001 From: Saksham Chawla <51916697+saksham-chawla@users.noreply.github.com> Date: Thu, 13 Oct 2022 00:11:52 +0530 Subject: [PATCH 0525/1543] Add typing to maths/ceil.py (#7057) --- maths/ceil.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/maths/ceil.py b/maths/ceil.py index 97578265c1a9..909e02b3f780 100644 --- a/maths/ceil.py +++ b/maths/ceil.py @@ -3,7 +3,7 @@ """ -def ceil(x) -> int: +def ceil(x: float) -> int: """ Return the ceiling of x as an Integral. From 32ff33648e0d1f93398db34fd271aa6606abc3a4 Mon Sep 17 00:00:00 2001 From: Saksham Chawla <51916697+saksham-chawla@users.noreply.github.com> Date: Thu, 13 Oct 2022 00:12:30 +0530 Subject: [PATCH 0526/1543] Add typing to maths/floor.py (#7056) --- maths/floor.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/maths/floor.py b/maths/floor.py index 482250f5e59e..8bbcb21aa6e4 100644 --- a/maths/floor.py +++ b/maths/floor.py @@ -3,7 +3,7 @@ """ -def floor(x) -> int: +def floor(x: float) -> int: """ Return the floor of x as an Integral. :param x: the number From 467ade28a04ed3e77b6c89542fd99f390139b5bd Mon Sep 17 00:00:00 2001 From: Rohan R Bharadwaj <114707091+rohanr18@users.noreply.github.com> Date: Thu, 13 Oct 2022 00:18:49 +0530 Subject: [PATCH 0527/1543] Add surface area of cuboid, conical frustum (#6442) * Add surface area of cuboid, conical frustum * add tests for floats, zeroes * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- maths/area.py | 131 ++++++++++++++++++++++++++++++++++++++++++++------ 1 file changed, 116 insertions(+), 15 deletions(-) diff --git a/maths/area.py b/maths/area.py index b1b139cf4e22..abbf7aa85da5 100644 --- a/maths/area.py +++ b/maths/area.py @@ -7,9 +7,12 @@ def surface_area_cube(side_length: float) -> float: """ Calculate the Surface Area of a Cube. - >>> surface_area_cube(1) 6 + >>> surface_area_cube(1.6) + 15.360000000000003 + >>> surface_area_cube(0) + 0 >>> surface_area_cube(3) 54 >>> surface_area_cube(-1) @@ -22,16 +25,46 @@ def surface_area_cube(side_length: float) -> float: return 6 * side_length**2 +def surface_area_cuboid(length: float, breadth: float, height: float) -> float: + """ + Calculate the Surface Area of a Cuboid. + >>> surface_area_cuboid(1, 2, 3) + 22 + >>> surface_area_cuboid(0, 0, 0) + 0 + >>> surface_area_cuboid(1.6, 2.6, 3.6) + 38.56 + >>> surface_area_cuboid(-1, 2, 3) + Traceback (most recent call last): + ... + ValueError: surface_area_cuboid() only accepts non-negative values + >>> surface_area_cuboid(1, -2, 3) + Traceback (most recent call last): + ... + ValueError: surface_area_cuboid() only accepts non-negative values + >>> surface_area_cuboid(1, 2, -3) + Traceback (most recent call last): + ... + ValueError: surface_area_cuboid() only accepts non-negative values + """ + if length < 0 or breadth < 0 or height < 0: + raise ValueError("surface_area_cuboid() only accepts non-negative values") + return 2 * ((length * breadth) + (breadth * height) + (length * height)) + + def surface_area_sphere(radius: float) -> float: """ Calculate the Surface Area of a Sphere. Wikipedia reference: https://en.wikipedia.org/wiki/Sphere Formula: 4 * pi * r^2 - >>> surface_area_sphere(5) 314.1592653589793 >>> surface_area_sphere(1) 12.566370614359172 + >>> surface_area_sphere(1.6) + 32.169908772759484 + >>> surface_area_sphere(0) + 0.0 >>> surface_area_sphere(-1) Traceback (most recent call last): ... @@ -46,7 +79,6 @@ def surface_area_hemisphere(radius: float) -> float: """ Calculate the Surface Area of a Hemisphere. Formula: 3 * pi * r^2 - >>> surface_area_hemisphere(5) 235.61944901923448 >>> surface_area_hemisphere(1) @@ -70,11 +102,14 @@ def surface_area_cone(radius: float, height: float) -> float: Calculate the Surface Area of a Cone. Wikipedia reference: https://en.wikipedia.org/wiki/Cone Formula: pi * r * (r + (h ** 2 + r ** 2) ** 0.5) - >>> surface_area_cone(10, 24) 1130.9733552923256 >>> surface_area_cone(6, 8) 301.59289474462014 + >>> surface_area_cone(1.6, 2.6) + 23.387862992395807 + >>> surface_area_cone(0, 0) + 0.0 >>> surface_area_cone(-1, -2) Traceback (most recent call last): ... @@ -93,14 +128,51 @@ def surface_area_cone(radius: float, height: float) -> float: return pi * radius * (radius + (height**2 + radius**2) ** 0.5) +def surface_area_conical_frustum( + radius_1: float, radius_2: float, height: float +) -> float: + """ + Calculate the Surface Area of a Conical Frustum. + >>> surface_area_conical_frustum(1, 2, 3) + 45.511728065337266 + >>> surface_area_conical_frustum(4, 5, 6) + 300.7913575056268 + >>> surface_area_conical_frustum(0, 0, 0) + 0.0 + >>> surface_area_conical_frustum(1.6, 2.6, 3.6) + 78.57907060751548 + >>> surface_area_conical_frustum(-1, 2, 3) + Traceback (most recent call last): + ... + ValueError: surface_area_conical_frustum() only accepts non-negative values + >>> surface_area_conical_frustum(1, -2, 3) + Traceback (most recent call last): + ... + ValueError: surface_area_conical_frustum() only accepts non-negative values + >>> surface_area_conical_frustum(1, 2, -3) + Traceback (most recent call last): + ... + ValueError: surface_area_conical_frustum() only accepts non-negative values + """ + if radius_1 < 0 or radius_2 < 0 or height < 0: + raise ValueError( + "surface_area_conical_frustum() only accepts non-negative values" + ) + slant_height = (height**2 + (radius_1 - radius_2) ** 2) ** 0.5 + return pi * ((slant_height * (radius_1 + radius_2)) + radius_1**2 + radius_2**2) + + def surface_area_cylinder(radius: float, height: float) -> float: """ Calculate the Surface Area of a Cylinder. Wikipedia reference: https://en.wikipedia.org/wiki/Cylinder Formula: 2 * pi * r * (h + r) - >>> surface_area_cylinder(7, 10) 747.6990515543707 + >>> surface_area_cylinder(1.6, 2.6) + 42.22300526424682 + >>> surface_area_cylinder(0, 0) + 0.0 >>> surface_area_cylinder(6, 8) 527.7875658030853 >>> surface_area_cylinder(-1, -2) @@ -124,9 +196,12 @@ def surface_area_cylinder(radius: float, height: float) -> float: def area_rectangle(length: float, width: float) -> float: """ Calculate the area of a rectangle. - >>> area_rectangle(10, 20) 200 + >>> area_rectangle(1.6, 2.6) + 4.16 + >>> area_rectangle(0, 0) + 0 >>> area_rectangle(-1, -2) Traceback (most recent call last): ... @@ -148,9 +223,12 @@ def area_rectangle(length: float, width: float) -> float: def area_square(side_length: float) -> float: """ Calculate the area of a square. - >>> area_square(10) 100 + >>> area_square(0) + 0 + >>> area_square(1.6) + 2.5600000000000005 >>> area_square(-1) Traceback (most recent call last): ... @@ -164,9 +242,12 @@ def area_square(side_length: float) -> float: def area_triangle(base: float, height: float) -> float: """ Calculate the area of a triangle given the base and height. - >>> area_triangle(10, 10) 50.0 + >>> area_triangle(1.6, 2.6) + 2.08 + >>> area_triangle(0, 0) + 0.0 >>> area_triangle(-1, -2) Traceback (most recent call last): ... @@ -188,13 +269,15 @@ def area_triangle(base: float, height: float) -> float: def area_triangle_three_sides(side1: float, side2: float, side3: float) -> float: """ Calculate area of triangle when the length of 3 sides are known. - This function uses Heron's formula: https://en.wikipedia.org/wiki/Heron%27s_formula - >>> area_triangle_three_sides(5, 12, 13) 30.0 >>> area_triangle_three_sides(10, 11, 12) 51.521233486786784 + >>> area_triangle_three_sides(0, 0, 0) + 0.0 + >>> area_triangle_three_sides(1.6, 2.6, 3.6) + 1.8703742940919619 >>> area_triangle_three_sides(-1, -2, -1) Traceback (most recent call last): ... @@ -233,9 +316,12 @@ def area_triangle_three_sides(side1: float, side2: float, side3: float) -> float def area_parallelogram(base: float, height: float) -> float: """ Calculate the area of a parallelogram. - >>> area_parallelogram(10, 20) 200 + >>> area_parallelogram(1.6, 2.6) + 4.16 + >>> area_parallelogram(0, 0) + 0 >>> area_parallelogram(-1, -2) Traceback (most recent call last): ... @@ -257,9 +343,12 @@ def area_parallelogram(base: float, height: float) -> float: def area_trapezium(base1: float, base2: float, height: float) -> float: """ Calculate the area of a trapezium. - >>> area_trapezium(10, 20, 30) 450.0 + >>> area_trapezium(1.6, 2.6, 3.6) + 7.5600000000000005 + >>> area_trapezium(0, 0, 0) + 0.0 >>> area_trapezium(-1, -2, -3) Traceback (most recent call last): ... @@ -297,9 +386,12 @@ def area_trapezium(base1: float, base2: float, height: float) -> float: def area_circle(radius: float) -> float: """ Calculate the area of a circle. - >>> area_circle(20) 1256.6370614359173 + >>> area_circle(1.6) + 8.042477193189871 + >>> area_circle(0) + 0.0 >>> area_circle(-1) Traceback (most recent call last): ... @@ -313,11 +405,14 @@ def area_circle(radius: float) -> float: def area_ellipse(radius_x: float, radius_y: float) -> float: """ Calculate the area of a ellipse. - >>> area_ellipse(10, 10) 314.1592653589793 >>> area_ellipse(10, 20) 628.3185307179587 + >>> area_ellipse(0, 0) + 0.0 + >>> area_ellipse(1.6, 2.6) + 13.06902543893354 >>> area_ellipse(-10, 20) Traceback (most recent call last): ... @@ -339,9 +434,12 @@ def area_ellipse(radius_x: float, radius_y: float) -> float: def area_rhombus(diagonal_1: float, diagonal_2: float) -> float: """ Calculate the area of a rhombus. - >>> area_rhombus(10, 20) 100.0 + >>> area_rhombus(1.6, 2.6) + 2.08 + >>> area_rhombus(0, 0) + 0.0 >>> area_rhombus(-1, -2) Traceback (most recent call last): ... @@ -374,9 +472,12 @@ def area_rhombus(diagonal_1: float, diagonal_2: float) -> float: print(f"Rhombus: {area_rhombus(10, 20) = }") print(f"Trapezium: {area_trapezium(10, 20, 30) = }") print(f"Circle: {area_circle(20) = }") + print(f"Ellipse: {area_ellipse(10, 20) = }") print("\nSurface Areas of various geometric shapes: \n") print(f"Cube: {surface_area_cube(20) = }") + print(f"Cuboid: {surface_area_cuboid(10, 20, 30) = }") print(f"Sphere: {surface_area_sphere(20) = }") print(f"Hemisphere: {surface_area_hemisphere(20) = }") print(f"Cone: {surface_area_cone(10, 20) = }") + print(f"Conical Frustum: {surface_area_conical_frustum(10, 20, 30) = }") print(f"Cylinder: {surface_area_cylinder(10, 20) = }") From c0c230255ffe79946bd959ecb559696353ac33f2 Mon Sep 17 00:00:00 2001 From: Eeman Majumder <54275491+Eeman1113@users.noreply.github.com> Date: Thu, 13 Oct 2022 01:13:52 +0530 Subject: [PATCH 0528/1543] added self organising maps algorithm in the machine learning section. (#6877) * added self organising maps algo * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update machine_learning/Self_Organising_Maps.py * Update and rename Self_Organising_Maps.py to self_organizing_map.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update self_organizing_map.py * Update self_organizing_map.py * Update self_organizing_map.py * Update self_organizing_map.py Co-authored-by: Eeman Majumder Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- machine_learning/self_organizing_map.py | 73 +++++++++++++++++++++++++ 1 file changed, 73 insertions(+) create mode 100644 machine_learning/self_organizing_map.py diff --git a/machine_learning/self_organizing_map.py b/machine_learning/self_organizing_map.py new file mode 100644 index 000000000000..bd3d388f910f --- /dev/null +++ b/machine_learning/self_organizing_map.py @@ -0,0 +1,73 @@ +""" +https://en.wikipedia.org/wiki/Self-organizing_map +""" +import math + + +class SelfOrganizingMap: + def get_winner(self, weights: list[list[float]], sample: list[int]) -> int: + """ + Compute the winning vector by Euclidean distance + + >>> SelfOrganizingMap().get_winner([[1, 2, 3], [4, 5, 6]], [1, 2, 3]) + 1 + """ + d0 = 0.0 + d1 = 0.0 + for i in range(len(sample)): + d0 += math.pow((sample[i] - weights[0][i]), 2) + d1 += math.pow((sample[i] - weights[1][i]), 2) + return 0 if d0 > d1 else 1 + return 0 + + def update( + self, weights: list[list[int | float]], sample: list[int], j: int, alpha: float + ) -> list[list[int | float]]: + """ + Update the winning vector. + + >>> SelfOrganizingMap().update([[1, 2, 3], [4, 5, 6]], [1, 2, 3], 1, 0.1) + [[1, 2, 3], [3.7, 4.7, 6]] + """ + for i in range(len(weights)): + weights[j][i] += alpha * (sample[i] - weights[j][i]) + return weights + + +# Driver code +def main() -> None: + # Training Examples ( m, n ) + training_samples = [[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]] + + # weight initialization ( n, C ) + weights = [[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]] + + # training + self_organizing_map = SelfOrganizingMap() + epochs = 3 + alpha = 0.5 + + for i in range(epochs): + for j in range(len(training_samples)): + + # training sample + sample = training_samples[j] + + # Compute the winning vector + winner = self_organizing_map.get_winner(weights, sample) + + # Update the winning vector + weights = self_organizing_map.update(weights, sample, winner, alpha) + + # classify test sample + sample = [0, 0, 0, 1] + winner = self_organizing_map.get_winner(weights, sample) + + # results + print(f"Clusters that the test sample belongs to : {winner}") + print(f"Weights that have been trained : {weights}") + + +# running the main() function +if __name__ == "__main__": + main() From bae08adc86c44268faaa0fe05ea0f2f91567ac9a Mon Sep 17 00:00:00 2001 From: Christian Clauss Date: Wed, 12 Oct 2022 21:56:07 +0200 Subject: [PATCH 0529/1543] README.md: Lose LGTM badge because we don't use it (#7063) * README.md: Lose LGTM badge because we don't use it * updating DIRECTORY.md Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- DIRECTORY.md | 2 ++ README.md | 3 --- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/DIRECTORY.md b/DIRECTORY.md index 9ef72c403f32..25272af4a708 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -14,6 +14,7 @@ ## Audio Filters * [Butterworth Filter](audio_filters/butterworth_filter.py) + * [Equal Loudness Filter](audio_filters/equal_loudness_filter.py) * [Iir Filter](audio_filters/iir_filter.py) * [Show Response](audio_filters/show_response.py) @@ -475,6 +476,7 @@ * [Binomial Coefficient](maths/binomial_coefficient.py) * [Binomial Distribution](maths/binomial_distribution.py) * [Bisection](maths/bisection.py) + * [Carmichael Number](maths/carmichael_number.py) * [Catalan Number](maths/catalan_number.py) * [Ceil](maths/ceil.py) * [Check Polygon](maths/check_polygon.py) diff --git a/README.md b/README.md index c787979607ee..c499c14e12b9 100644 --- a/README.md +++ b/README.md @@ -24,9 +24,6 @@ GitHub Workflow Status - - LGTM - pre-commit From e2cd982b1154814debe2960498ccbb29d4829bf7 Mon Sep 17 00:00:00 2001 From: VARISH GAUTAM <48176176+Variiiest@users.noreply.github.com> Date: Thu, 13 Oct 2022 02:12:02 +0530 Subject: [PATCH 0530/1543] Weird numbers (#6871) * Create weird_number.py In number theory, a weird number is a natural number that is abundant but not semiperfect * check * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * resolved * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * removed * Update weird_number.py * Update weird_number.py * Update weird_number.py Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- maths/weird_number.py | 100 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 100 insertions(+) create mode 100644 maths/weird_number.py diff --git a/maths/weird_number.py b/maths/weird_number.py new file mode 100644 index 000000000000..2834a9fee31e --- /dev/null +++ b/maths/weird_number.py @@ -0,0 +1,100 @@ +""" +https://en.wikipedia.org/wiki/Weird_number + +Fun fact: The set of weird numbers has positive asymptotic density. +""" +from math import sqrt + + +def factors(number: int) -> list[int]: + """ + >>> factors(12) + [1, 2, 3, 4, 6] + >>> factors(1) + [1] + >>> factors(100) + [1, 2, 4, 5, 10, 20, 25, 50] + + # >>> factors(-12) + # [1, 2, 3, 4, 6] + """ + + values = [1] + for i in range(2, int(sqrt(number)) + 1, 1): + if number % i == 0: + values.append(i) + if int(number // i) != i: + values.append(int(number // i)) + return sorted(values) + + +def abundant(n: int) -> bool: + """ + >>> abundant(0) + True + >>> abundant(1) + False + >>> abundant(12) + True + >>> abundant(13) + False + >>> abundant(20) + True + + # >>> abundant(-12) + # True + """ + return sum(factors(n)) > n + + +def semi_perfect(number: int) -> bool: + """ + >>> semi_perfect(0) + True + >>> semi_perfect(1) + True + >>> semi_perfect(12) + True + >>> semi_perfect(13) + False + + # >>> semi_perfect(-12) + # True + """ + values = factors(number) + r = len(values) + subset = [[0 for i in range(number + 1)] for j in range(r + 1)] + for i in range(r + 1): + subset[i][0] = True + + for i in range(1, number + 1): + subset[0][i] = False + + for i in range(1, r + 1): + for j in range(1, number + 1): + if j < values[i - 1]: + subset[i][j] = subset[i - 1][j] + else: + subset[i][j] = subset[i - 1][j] or subset[i - 1][j - values[i - 1]] + + return subset[r][number] != 0 + + +def weird(number: int) -> bool: + """ + >>> weird(0) + False + >>> weird(70) + True + >>> weird(77) + False + """ + return abundant(number) and not semi_perfect(number) + + +if __name__ == "__main__": + import doctest + + doctest.testmod(verbose=True) + for number in (69, 70, 71): + print(f"{number} is {'' if weird(number) else 'not '}weird.") From 07e991d55330bf1363ba53858a98cf6fd8d45026 Mon Sep 17 00:00:00 2001 From: Caeden Date: Wed, 12 Oct 2022 23:54:20 +0100 Subject: [PATCH 0531/1543] Add pep8-naming to pre-commit hooks and fixes incorrect naming conventions (#7062) * ci(pre-commit): Add pep8-naming to `pre-commit` hooks (#7038) * refactor: Fix naming conventions (#7038) * Update arithmetic_analysis/lu_decomposition.py Co-authored-by: Christian Clauss * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * refactor(lu_decomposition): Replace `NDArray` with `ArrayLike` (#7038) * chore: Fix naming conventions in doctests (#7038) * fix: Temporarily disable project euler problem 104 (#7069) * chore: Fix naming conventions in doctests (#7038) Co-authored-by: Christian Clauss Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 1 + arithmetic_analysis/lu_decomposition.py | 6 +- backtracking/n_queens.py | 4 +- ciphers/affine_cipher.py | 52 ++--- ciphers/bifid.py | 2 +- ciphers/brute_force_caesar_cipher.py | 2 +- ciphers/elgamal_key_generator.py | 10 +- ciphers/hill_cipher.py | 4 +- ciphers/polybius.py | 2 +- ciphers/rabin_miller.py | 14 +- ciphers/rsa_cipher.py | 14 +- ciphers/rsa_factorization.py | 10 +- ciphers/rsa_key_generator.py | 28 +-- ciphers/simple_substitution_cipher.py | 48 ++--- ciphers/trafid_cipher.py | 56 +++--- ciphers/transposition_cipher.py | 36 ++-- ...ansposition_cipher_encrypt_decrypt_file.py | 32 +-- ciphers/vigenere_cipher.py | 30 +-- compression/lempel_ziv_decompress.py | 6 +- compression/peak_signal_to_noise_ratio.py | 4 +- computer_vision/harris_corner.py | 4 +- conversions/binary_to_hexadecimal.py | 2 +- conversions/decimal_to_any.py | 2 +- conversions/prefix_conversions.py | 32 +-- conversions/roman_numerals.py | 2 +- data_structures/binary_tree/avl_tree.py | 42 ++-- .../binary_tree/lazy_segment_tree.py | 8 +- data_structures/binary_tree/segment_tree.py | 14 +- data_structures/binary_tree/treap.py | 16 +- data_structures/heap/min_heap.py | 22 +- .../stacks/infix_to_prefix_conversion.py | 60 +++--- data_structures/stacks/postfix_evaluation.py | 38 ++-- data_structures/stacks/stock_span_problem.py | 12 +- .../edge_detection/canny.py | 24 +-- .../filters/bilateral_filter.py | 20 +- .../histogram_stretch.py | 12 +- digital_image_processing/index_calculation.py | 190 +++++++++--------- .../test_digital_image_processing.py | 4 +- divide_and_conquer/inversions.py | 34 ++-- dynamic_programming/bitmask.py | 12 +- dynamic_programming/edit_distance.py | 34 ++-- dynamic_programming/floyd_warshall.py | 46 ++--- dynamic_programming/fractional_knapsack.py | 8 +- dynamic_programming/knapsack.py | 34 ++-- .../longest_common_subsequence.py | 10 +- .../longest_increasing_subsequence.py | 6 +- ...longest_increasing_subsequence_o(nlogn).py | 16 +- dynamic_programming/matrix_chain_order.py | 38 ++-- dynamic_programming/max_sub_array.py | 16 +- dynamic_programming/minimum_coin_change.py | 4 +- dynamic_programming/minimum_partition.py | 2 +- dynamic_programming/sum_of_subset.py | 18 +- fractals/sierpinski_triangle.py | 28 +-- geodesy/haversine_distance.py | 6 +- geodesy/lamberts_ellipsoidal_distance.py | 24 +-- graphs/articulation_points.py | 30 +-- graphs/basic_graphs.py | 78 +++---- graphs/check_bipartite_graph_bfs.py | 4 +- graphs/dijkstra.py | 12 +- graphs/dijkstra_2.py | 36 ++-- graphs/dijkstra_algorithm.py | 14 +- .../edmonds_karp_multiple_source_and_sink.py | 163 +++++++-------- ...n_path_and_circuit_for_undirected_graph.py | 20 +- graphs/frequent_pattern_graph_miner.py | 28 +-- graphs/kahns_algorithm_long.py | 12 +- graphs/kahns_algorithm_topo.py | 4 +- graphs/minimum_spanning_tree_prims.py | 48 ++--- graphs/multi_heuristic_astar.py | 12 +- graphs/scc_kosaraju.py | 12 +- graphs/tests/test_min_spanning_tree_prim.py | 2 +- hashes/adler32.py | 2 +- hashes/chaos_machine.py | 16 +- hashes/hamming_code.py | 170 ++++++++-------- hashes/md5.py | 92 ++++----- hashes/sha1.py | 2 +- hashes/sha256.py | 8 +- linear_algebra/src/power_iteration.py | 4 +- linear_algebra/src/rayleigh_quotient.py | 16 +- linear_algebra/src/test_linear_algebra.py | 60 +++--- machine_learning/decision_tree.py | 54 ++--- machine_learning/gaussian_naive_bayes.py | 12 +- .../gradient_boosting_regressor.py | 14 +- machine_learning/k_means_clust.py | 16 +- .../local_weighted_learning.py | 4 +- machine_learning/logistic_regression.py | 36 ++-- .../multilayer_perceptron_classifier.py | 4 +- machine_learning/random_forest_classifier.py | 6 +- machine_learning/random_forest_regressor.py | 6 +- .../sequential_minimum_optimization.py | 64 +++--- machine_learning/word_frequency_functions.py | 10 +- maths/binomial_coefficient.py | 8 +- maths/carmichael_number.py | 4 +- maths/decimal_isolate.py | 6 +- maths/euler_method.py | 6 +- maths/euler_modified.py | 6 +- maths/hardy_ramanujanalgo.py | 6 +- maths/jaccard_similarity.py | 48 ++--- maths/krishnamurthy_number.py | 6 +- maths/kth_lexicographic_permutation.py | 6 +- maths/lucas_lehmer_primality_test.py | 4 +- maths/primelib.py | 140 ++++++------- maths/qr_decomposition.py | 20 +- maths/radix2_fft.py | 72 +++---- maths/runge_kutta.py | 6 +- maths/softmax.py | 6 +- matrix/count_islands_in_matrix.py | 10 +- matrix/inverse_of_matrix.py | 6 +- matrix/sherman_morrison.py | 26 +-- networking_flow/ford_fulkerson.py | 8 +- networking_flow/minimum_cut.py | 4 +- neural_network/convolution_neural_network.py | 6 +- other/davisb_putnamb_logemannb_loveland.py | 36 ++-- other/greedy.py | 38 ++-- other/nested_brackets.py | 12 +- other/sdes.py | 8 +- other/tower_of_hanoi.py | 14 +- physics/n_body_simulation.py | 6 +- project_euler/problem_011/sol1.py | 30 +-- project_euler/problem_012/sol1.py | 16 +- project_euler/problem_023/sol1.py | 8 +- project_euler/problem_029/sol1.py | 16 +- project_euler/problem_032/sol32.py | 8 +- project_euler/problem_042/solution42.py | 4 +- project_euler/problem_054/test_poker_hand.py | 2 +- project_euler/problem_064/sol1.py | 2 +- project_euler/problem_097/sol1.py | 4 +- .../problem_104/{sol.py => sol.py.FIXME} | 0 project_euler/problem_125/sol1.py | 2 +- .../non_preemptive_shortest_job_first.py | 4 +- searches/tabu_search.py | 4 +- sorts/odd_even_transposition_parallel.py | 92 ++++----- sorts/radix_sort.py | 2 +- sorts/random_normal_distribution_quicksort.py | 44 ++-- sorts/random_pivot_quick_sort.py | 24 +-- sorts/tree_sort.py | 8 +- strings/boyer_moore_search.py | 8 +- .../can_string_be_rearranged_as_palindrome.py | 6 +- strings/check_anagrams.py | 8 +- strings/word_patterns.py | 8 +- web_programming/fetch_quotes.py | 4 +- 140 files changed, 1555 insertions(+), 1539 deletions(-) rename project_euler/problem_104/{sol.py => sol.py.FIXME} (100%) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 0abe647b017a..2f6a92814c66 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -40,6 +40,7 @@ repos: - --ignore=E203,W503 - --max-complexity=25 - --max-line-length=88 + additional_dependencies: [pep8-naming] - repo: https://github.com/pre-commit/mirrors-mypy rev: v0.982 diff --git a/arithmetic_analysis/lu_decomposition.py b/arithmetic_analysis/lu_decomposition.py index 371f7b166b2e..1e98b9066c3f 100644 --- a/arithmetic_analysis/lu_decomposition.py +++ b/arithmetic_analysis/lu_decomposition.py @@ -6,13 +6,13 @@ from __future__ import annotations import numpy as np -import numpy.typing as NDArray from numpy import float64 +from numpy.typing import ArrayLike def lower_upper_decomposition( - table: NDArray[float64], -) -> tuple[NDArray[float64], NDArray[float64]]: + table: ArrayLike[float64], +) -> tuple[ArrayLike[float64], ArrayLike[float64]]: """Lower-Upper (LU) Decomposition Example: diff --git a/backtracking/n_queens.py b/backtracking/n_queens.py index b8ace59781f5..bbf0ce44f91c 100644 --- a/backtracking/n_queens.py +++ b/backtracking/n_queens.py @@ -12,7 +12,7 @@ solution = [] -def isSafe(board: list[list[int]], row: int, column: int) -> bool: +def is_safe(board: list[list[int]], row: int, column: int) -> bool: """ This function returns a boolean value True if it is safe to place a queen there considering the current state of the board. @@ -63,7 +63,7 @@ def solve(board: list[list[int]], row: int) -> bool: If all the combinations for that particular branch are successful the board is reinitialized for the next possible combination. """ - if isSafe(board, row, i): + if is_safe(board, row, i): board[row][i] = 1 solve(board, row + 1) board[row][i] = 0 diff --git a/ciphers/affine_cipher.py b/ciphers/affine_cipher.py index d3b806ba1eeb..cd1e33b88425 100644 --- a/ciphers/affine_cipher.py +++ b/ciphers/affine_cipher.py @@ -9,26 +9,26 @@ ) -def check_keys(keyA: int, keyB: int, mode: str) -> None: +def check_keys(key_a: int, key_b: int, mode: str) -> None: if mode == "encrypt": - if keyA == 1: + if key_a == 1: sys.exit( "The affine cipher becomes weak when key " "A is set to 1. Choose different key" ) - if keyB == 0: + if key_b == 0: sys.exit( "The affine cipher becomes weak when key " "B is set to 0. Choose different key" ) - if keyA < 0 or keyB < 0 or keyB > len(SYMBOLS) - 1: + if key_a < 0 or key_b < 0 or key_b > len(SYMBOLS) - 1: sys.exit( "Key A must be greater than 0 and key B must " f"be between 0 and {len(SYMBOLS) - 1}." ) - if cryptomath.gcd(keyA, len(SYMBOLS)) != 1: + if cryptomath.gcd(key_a, len(SYMBOLS)) != 1: sys.exit( - f"Key A {keyA} and the symbol set size {len(SYMBOLS)} " + f"Key A {key_a} and the symbol set size {len(SYMBOLS)} " "are not relatively prime. Choose a different key." ) @@ -39,16 +39,16 @@ def encrypt_message(key: int, message: str) -> str: ... 'substitution cipher.') 'VL}p MM{I}p~{HL}Gp{vp pFsH}pxMpyxIx JHL O}F{~pvuOvF{FuF{xIp~{HL}Gi' """ - keyA, keyB = divmod(key, len(SYMBOLS)) - check_keys(keyA, keyB, "encrypt") - cipherText = "" + key_a, key_b = divmod(key, len(SYMBOLS)) + check_keys(key_a, key_b, "encrypt") + cipher_text = "" for symbol in message: if symbol in SYMBOLS: - symIndex = SYMBOLS.find(symbol) - cipherText += SYMBOLS[(symIndex * keyA + keyB) % len(SYMBOLS)] + sym_index = SYMBOLS.find(symbol) + cipher_text += SYMBOLS[(sym_index * key_a + key_b) % len(SYMBOLS)] else: - cipherText += symbol - return cipherText + cipher_text += symbol + return cipher_text def decrypt_message(key: int, message: str) -> str: @@ -57,25 +57,27 @@ def decrypt_message(key: int, message: str) -> str: ... '{xIp~{HL}Gi') 'The affine cipher is a type of monoalphabetic substitution cipher.' """ - keyA, keyB = divmod(key, len(SYMBOLS)) - check_keys(keyA, keyB, "decrypt") - plainText = "" - modInverseOfkeyA = cryptomath.find_mod_inverse(keyA, len(SYMBOLS)) + key_a, key_b = divmod(key, len(SYMBOLS)) + check_keys(key_a, key_b, "decrypt") + plain_text = "" + mod_inverse_of_key_a = cryptomath.find_mod_inverse(key_a, len(SYMBOLS)) for symbol in message: if symbol in SYMBOLS: - symIndex = SYMBOLS.find(symbol) - plainText += SYMBOLS[(symIndex - keyB) * modInverseOfkeyA % len(SYMBOLS)] + sym_index = SYMBOLS.find(symbol) + plain_text += SYMBOLS[ + (sym_index - key_b) * mod_inverse_of_key_a % len(SYMBOLS) + ] else: - plainText += symbol - return plainText + plain_text += symbol + return plain_text def get_random_key() -> int: while True: - keyA = random.randint(2, len(SYMBOLS)) - keyB = random.randint(2, len(SYMBOLS)) - if cryptomath.gcd(keyA, len(SYMBOLS)) == 1 and keyB % len(SYMBOLS) != 0: - return keyA * len(SYMBOLS) + keyB + key_b = random.randint(2, len(SYMBOLS)) + key_b = random.randint(2, len(SYMBOLS)) + if cryptomath.gcd(key_b, len(SYMBOLS)) == 1 and key_b % len(SYMBOLS) != 0: + return key_b * len(SYMBOLS) + key_b def main() -> None: diff --git a/ciphers/bifid.py b/ciphers/bifid.py index c1b071155917..54d55574cdca 100644 --- a/ciphers/bifid.py +++ b/ciphers/bifid.py @@ -12,7 +12,7 @@ class BifidCipher: def __init__(self) -> None: - SQUARE = [ + SQUARE = [ # noqa: N806 ["a", "b", "c", "d", "e"], ["f", "g", "h", "i", "k"], ["l", "m", "n", "o", "p"], diff --git a/ciphers/brute_force_caesar_cipher.py b/ciphers/brute_force_caesar_cipher.py index 8ab6e77307b4..cc97111e05a7 100644 --- a/ciphers/brute_force_caesar_cipher.py +++ b/ciphers/brute_force_caesar_cipher.py @@ -28,7 +28,7 @@ def decrypt(message: str) -> None: Decryption using Key #24: VOFGVWZ ROFXW Decryption using Key #25: UNEFUVY QNEWV """ - LETTERS = "ABCDEFGHIJKLMNOPQRSTUVWXYZ" + LETTERS = "ABCDEFGHIJKLMNOPQRSTUVWXYZ" # noqa: N806 for key in range(len(LETTERS)): translated = "" for symbol in message: diff --git a/ciphers/elgamal_key_generator.py b/ciphers/elgamal_key_generator.py index 485b77595c7c..4d72128aed52 100644 --- a/ciphers/elgamal_key_generator.py +++ b/ciphers/elgamal_key_generator.py @@ -26,7 +26,7 @@ def primitive_root(p_val: int) -> int: def generate_key(key_size: int) -> tuple[tuple[int, int, int, int], tuple[int, int]]: print("Generating prime p...") - p = rabin_miller.generateLargePrime(key_size) # select large prime number. + p = rabin_miller.generate_large_prime(key_size) # select large prime number. e_1 = primitive_root(p) # one primitive root on modulo p. d = random.randrange(3, p) # private_key -> have to be greater than 2 for safety. e_2 = cryptomath.find_mod_inverse(pow(e_1, d, p), p) @@ -37,7 +37,7 @@ def generate_key(key_size: int) -> tuple[tuple[int, int, int, int], tuple[int, i return public_key, private_key -def make_key_files(name: str, keySize: int) -> None: +def make_key_files(name: str, key_size: int) -> None: if os.path.exists(f"{name}_pubkey.txt") or os.path.exists(f"{name}_privkey.txt"): print("\nWARNING:") print( @@ -47,16 +47,16 @@ def make_key_files(name: str, keySize: int) -> None: ) sys.exit() - publicKey, privateKey = generate_key(keySize) + public_key, private_key = generate_key(key_size) print(f"\nWriting public key to file {name}_pubkey.txt...") with open(f"{name}_pubkey.txt", "w") as fo: fo.write( - "%d,%d,%d,%d" % (publicKey[0], publicKey[1], publicKey[2], publicKey[3]) + "%d,%d,%d,%d" % (public_key[0], public_key[1], public_key[2], public_key[3]) ) print(f"Writing private key to file {name}_privkey.txt...") with open(f"{name}_privkey.txt", "w") as fo: - fo.write("%d,%d" % (privateKey[0], privateKey[1])) + fo.write("%d,%d" % (private_key[0], private_key[1])) def main() -> None: diff --git a/ciphers/hill_cipher.py b/ciphers/hill_cipher.py index d8e436e92c56..f646d567b4c8 100644 --- a/ciphers/hill_cipher.py +++ b/ciphers/hill_cipher.py @@ -201,11 +201,11 @@ def decrypt(self, text: str) -> str: def main() -> None: - N = int(input("Enter the order of the encryption key: ")) + n = int(input("Enter the order of the encryption key: ")) hill_matrix = [] print("Enter each row of the encryption key with space separated integers") - for _ in range(N): + for _ in range(n): row = [int(x) for x in input().split()] hill_matrix.append(row) diff --git a/ciphers/polybius.py b/ciphers/polybius.py index 2a45f02a3773..bf5d62f8d33e 100644 --- a/ciphers/polybius.py +++ b/ciphers/polybius.py @@ -11,7 +11,7 @@ class PolybiusCipher: def __init__(self) -> None: - SQUARE = [ + SQUARE = [ # noqa: N806 ["a", "b", "c", "d", "e"], ["f", "g", "h", "i", "k"], ["l", "m", "n", "o", "p"], diff --git a/ciphers/rabin_miller.py b/ciphers/rabin_miller.py index a9b834bfb4be..0aab80eb9175 100644 --- a/ciphers/rabin_miller.py +++ b/ciphers/rabin_miller.py @@ -3,7 +3,7 @@ import random -def rabinMiller(num: int) -> bool: +def rabin_miller(num: int) -> bool: s = num - 1 t = 0 @@ -29,7 +29,7 @@ def is_prime_low_num(num: int) -> bool: if num < 2: return False - lowPrimes = [ + low_primes = [ 2, 3, 5, @@ -200,17 +200,17 @@ def is_prime_low_num(num: int) -> bool: 997, ] - if num in lowPrimes: + if num in low_primes: return True - for prime in lowPrimes: + for prime in low_primes: if (num % prime) == 0: return False - return rabinMiller(num) + return rabin_miller(num) -def generateLargePrime(keysize: int = 1024) -> int: +def generate_large_prime(keysize: int = 1024) -> int: while True: num = random.randrange(2 ** (keysize - 1), 2 ** (keysize)) if is_prime_low_num(num): @@ -218,6 +218,6 @@ def generateLargePrime(keysize: int = 1024) -> int: if __name__ == "__main__": - num = generateLargePrime() + num = generate_large_prime() print(("Prime number:", num)) print(("is_prime_low_num:", is_prime_low_num(num))) diff --git a/ciphers/rsa_cipher.py b/ciphers/rsa_cipher.py index c6bfaa0fb00c..de26992f5eeb 100644 --- a/ciphers/rsa_cipher.py +++ b/ciphers/rsa_cipher.py @@ -37,12 +37,12 @@ def get_text_from_blocks( def encrypt_message( - message: str, key: tuple[int, int], blockSize: int = DEFAULT_BLOCK_SIZE + message: str, key: tuple[int, int], block_size: int = DEFAULT_BLOCK_SIZE ) -> list[int]: encrypted_blocks = [] n, e = key - for block in get_blocks_from_text(message, blockSize): + for block in get_blocks_from_text(message, block_size): encrypted_blocks.append(pow(block, e, n)) return encrypted_blocks @@ -63,8 +63,8 @@ def decrypt_message( def read_key_file(key_filename: str) -> tuple[int, int, int]: with open(key_filename) as fo: content = fo.read() - key_size, n, EorD = content.split(",") - return (int(key_size), int(n), int(EorD)) + key_size, n, eor_d = content.split(",") + return (int(key_size), int(n), int(eor_d)) def encrypt_and_write_to_file( @@ -125,15 +125,15 @@ def main() -> None: if mode == "encrypt": if not os.path.exists("rsa_pubkey.txt"): - rkg.makeKeyFiles("rsa", 1024) + rkg.make_key_files("rsa", 1024) message = input("\nEnter message: ") pubkey_filename = "rsa_pubkey.txt" print(f"Encrypting and writing to {filename}...") - encryptedText = encrypt_and_write_to_file(filename, pubkey_filename, message) + encrypted_text = encrypt_and_write_to_file(filename, pubkey_filename, message) print("\nEncrypted text:") - print(encryptedText) + print(encrypted_text) elif mode == "decrypt": privkey_filename = "rsa_privkey.txt" diff --git a/ciphers/rsa_factorization.py b/ciphers/rsa_factorization.py index de4df27770c7..9ee52777ed83 100644 --- a/ciphers/rsa_factorization.py +++ b/ciphers/rsa_factorization.py @@ -13,7 +13,7 @@ import random -def rsafactor(d: int, e: int, N: int) -> list[int]: +def rsafactor(d: int, e: int, n: int) -> list[int]: """ This function returns the factors of N, where p*q=N Return: [p, q] @@ -35,16 +35,16 @@ def rsafactor(d: int, e: int, N: int) -> list[int]: p = 0 q = 0 while p == 0: - g = random.randint(2, N - 1) + g = random.randint(2, n - 1) t = k while True: if t % 2 == 0: t = t // 2 - x = (g**t) % N - y = math.gcd(x - 1, N) + x = (g**t) % n + y = math.gcd(x - 1, n) if x > 1 and y > 1: p = y - q = N // y + q = n // y break # find the correct factors else: break # t is not divisible by 2, break and choose another g diff --git a/ciphers/rsa_key_generator.py b/ciphers/rsa_key_generator.py index d983c14f1d7e..f64bc7dd0557 100644 --- a/ciphers/rsa_key_generator.py +++ b/ciphers/rsa_key_generator.py @@ -2,38 +2,38 @@ import random import sys -from . import cryptomath_module as cryptoMath -from . import rabin_miller as rabinMiller +from . import cryptomath_module as cryptoMath # noqa: N812 +from . import rabin_miller as rabinMiller # noqa: N812 def main() -> None: print("Making key files...") - makeKeyFiles("rsa", 1024) + make_key_files("rsa", 1024) print("Key files generation successful.") -def generateKey(keySize: int) -> tuple[tuple[int, int], tuple[int, int]]: +def generate_key(key_size: int) -> tuple[tuple[int, int], tuple[int, int]]: print("Generating prime p...") - p = rabinMiller.generateLargePrime(keySize) + p = rabinMiller.generate_large_prime(key_size) print("Generating prime q...") - q = rabinMiller.generateLargePrime(keySize) + q = rabinMiller.generate_large_prime(key_size) n = p * q print("Generating e that is relatively prime to (p - 1) * (q - 1)...") while True: - e = random.randrange(2 ** (keySize - 1), 2 ** (keySize)) + e = random.randrange(2 ** (key_size - 1), 2 ** (key_size)) if cryptoMath.gcd(e, (p - 1) * (q - 1)) == 1: break print("Calculating d that is mod inverse of e...") d = cryptoMath.find_mod_inverse(e, (p - 1) * (q - 1)) - publicKey = (n, e) - privateKey = (n, d) - return (publicKey, privateKey) + public_key = (n, e) + private_key = (n, d) + return (public_key, private_key) -def makeKeyFiles(name: str, keySize: int) -> None: +def make_key_files(name: str, key_size: int) -> None: if os.path.exists(f"{name}_pubkey.txt") or os.path.exists(f"{name}_privkey.txt"): print("\nWARNING:") print( @@ -43,14 +43,14 @@ def makeKeyFiles(name: str, keySize: int) -> None: ) sys.exit() - publicKey, privateKey = generateKey(keySize) + public_key, private_key = generate_key(key_size) print(f"\nWriting public key to file {name}_pubkey.txt...") with open(f"{name}_pubkey.txt", "w") as out_file: - out_file.write(f"{keySize},{publicKey[0]},{publicKey[1]}") + out_file.write(f"{key_size},{public_key[0]},{public_key[1]}") print(f"Writing private key to file {name}_privkey.txt...") with open(f"{name}_privkey.txt", "w") as out_file: - out_file.write(f"{keySize},{privateKey[0]},{privateKey[1]}") + out_file.write(f"{key_size},{private_key[0]},{private_key[1]}") if __name__ == "__main__": diff --git a/ciphers/simple_substitution_cipher.py b/ciphers/simple_substitution_cipher.py index a763bd6b6b48..291a9bccd771 100644 --- a/ciphers/simple_substitution_cipher.py +++ b/ciphers/simple_substitution_cipher.py @@ -9,66 +9,66 @@ def main() -> None: key = "LFWOAYUISVKMNXPBDCRJTQEGHZ" resp = input("Encrypt/Decrypt [e/d]: ") - checkValidKey(key) + check_valid_key(key) if resp.lower().startswith("e"): mode = "encrypt" - translated = encryptMessage(key, message) + translated = encrypt_message(key, message) elif resp.lower().startswith("d"): mode = "decrypt" - translated = decryptMessage(key, message) + translated = decrypt_message(key, message) print(f"\n{mode.title()}ion: \n{translated}") -def checkValidKey(key: str) -> None: - keyList = list(key) - lettersList = list(LETTERS) - keyList.sort() - lettersList.sort() +def check_valid_key(key: str) -> None: + key_list = list(key) + letters_list = list(LETTERS) + key_list.sort() + letters_list.sort() - if keyList != lettersList: + if key_list != letters_list: sys.exit("Error in the key or symbol set.") -def encryptMessage(key: str, message: str) -> str: +def encrypt_message(key: str, message: str) -> str: """ - >>> encryptMessage('LFWOAYUISVKMNXPBDCRJTQEGHZ', 'Harshil Darji') + >>> encrypt_message('LFWOAYUISVKMNXPBDCRJTQEGHZ', 'Harshil Darji') 'Ilcrism Olcvs' """ - return translateMessage(key, message, "encrypt") + return translate_message(key, message, "encrypt") -def decryptMessage(key: str, message: str) -> str: +def decrypt_message(key: str, message: str) -> str: """ - >>> decryptMessage('LFWOAYUISVKMNXPBDCRJTQEGHZ', 'Ilcrism Olcvs') + >>> decrypt_message('LFWOAYUISVKMNXPBDCRJTQEGHZ', 'Ilcrism Olcvs') 'Harshil Darji' """ - return translateMessage(key, message, "decrypt") + return translate_message(key, message, "decrypt") -def translateMessage(key: str, message: str, mode: str) -> str: +def translate_message(key: str, message: str, mode: str) -> str: translated = "" - charsA = LETTERS - charsB = key + chars_a = LETTERS + chars_b = key if mode == "decrypt": - charsA, charsB = charsB, charsA + chars_a, chars_b = chars_b, chars_a for symbol in message: - if symbol.upper() in charsA: - symIndex = charsA.find(symbol.upper()) + if symbol.upper() in chars_a: + sym_index = chars_a.find(symbol.upper()) if symbol.isupper(): - translated += charsB[symIndex].upper() + translated += chars_b[sym_index].upper() else: - translated += charsB[symIndex].lower() + translated += chars_b[sym_index].lower() else: translated += symbol return translated -def getRandomKey() -> str: +def get_random_key() -> str: key = list(LETTERS) random.shuffle(key) return "".join(key) diff --git a/ciphers/trafid_cipher.py b/ciphers/trafid_cipher.py index b12ceff72907..108ac652f0e4 100644 --- a/ciphers/trafid_cipher.py +++ b/ciphers/trafid_cipher.py @@ -2,12 +2,12 @@ from __future__ import annotations -def __encryptPart(messagePart: str, character2Number: dict[str, str]) -> str: +def __encrypt_part(message_part: str, character_to_number: dict[str, str]) -> str: one, two, three = "", "", "" tmp = [] - for character in messagePart: - tmp.append(character2Number[character]) + for character in message_part: + tmp.append(character_to_number[character]) for each in tmp: one += each[0] @@ -17,18 +17,18 @@ def __encryptPart(messagePart: str, character2Number: dict[str, str]) -> str: return one + two + three -def __decryptPart( - messagePart: str, character2Number: dict[str, str] +def __decrypt_part( + message_part: str, character_to_number: dict[str, str] ) -> tuple[str, str, str]: - tmp, thisPart = "", "" + tmp, this_part = "", "" result = [] - for character in messagePart: - thisPart += character2Number[character] + for character in message_part: + this_part += character_to_number[character] - for digit in thisPart: + for digit in this_part: tmp += digit - if len(tmp) == len(messagePart): + if len(tmp) == len(message_part): result.append(tmp) tmp = "" @@ -79,51 +79,57 @@ def __prepare( "332", "333", ) - character2Number = {} - number2Character = {} + character_to_number = {} + number_to_character = {} for letter, number in zip(alphabet, numbers): - character2Number[letter] = number - number2Character[number] = letter + character_to_number[letter] = number + number_to_character[number] = letter - return message, alphabet, character2Number, number2Character + return message, alphabet, character_to_number, number_to_character -def encryptMessage( +def encrypt_message( message: str, alphabet: str = "ABCDEFGHIJKLMNOPQRSTUVWXYZ.", period: int = 5 ) -> str: - message, alphabet, character2Number, number2Character = __prepare(message, alphabet) + message, alphabet, character_to_number, number_to_character = __prepare( + message, alphabet + ) encrypted, encrypted_numeric = "", "" for i in range(0, len(message) + 1, period): - encrypted_numeric += __encryptPart(message[i : i + period], character2Number) + encrypted_numeric += __encrypt_part( + message[i : i + period], character_to_number + ) for i in range(0, len(encrypted_numeric), 3): - encrypted += number2Character[encrypted_numeric[i : i + 3]] + encrypted += number_to_character[encrypted_numeric[i : i + 3]] return encrypted -def decryptMessage( +def decrypt_message( message: str, alphabet: str = "ABCDEFGHIJKLMNOPQRSTUVWXYZ.", period: int = 5 ) -> str: - message, alphabet, character2Number, number2Character = __prepare(message, alphabet) + message, alphabet, character_to_number, number_to_character = __prepare( + message, alphabet + ) decrypted_numeric = [] decrypted = "" for i in range(0, len(message) + 1, period): - a, b, c = __decryptPart(message[i : i + period], character2Number) + a, b, c = __decrypt_part(message[i : i + period], character_to_number) for j in range(0, len(a)): decrypted_numeric.append(a[j] + b[j] + c[j]) for each in decrypted_numeric: - decrypted += number2Character[each] + decrypted += number_to_character[each] return decrypted if __name__ == "__main__": msg = "DEFEND THE EAST WALL OF THE CASTLE." - encrypted = encryptMessage(msg, "EPSDUCVWYM.ZLKXNBTFGORIJHAQ") - decrypted = decryptMessage(encrypted, "EPSDUCVWYM.ZLKXNBTFGORIJHAQ") + encrypted = encrypt_message(msg, "EPSDUCVWYM.ZLKXNBTFGORIJHAQ") + decrypted = decrypt_message(encrypted, "EPSDUCVWYM.ZLKXNBTFGORIJHAQ") print(f"Encrypted: {encrypted}\nDecrypted: {decrypted}") diff --git a/ciphers/transposition_cipher.py b/ciphers/transposition_cipher.py index ed9923a6ba46..f1f07ddc3f35 100644 --- a/ciphers/transposition_cipher.py +++ b/ciphers/transposition_cipher.py @@ -14,53 +14,53 @@ def main() -> None: mode = input("Encryption/Decryption [e/d]: ") if mode.lower().startswith("e"): - text = encryptMessage(key, message) + text = encrypt_message(key, message) elif mode.lower().startswith("d"): - text = decryptMessage(key, message) + text = decrypt_message(key, message) # Append pipe symbol (vertical bar) to identify spaces at the end. print(f"Output:\n{text + '|'}") -def encryptMessage(key: int, message: str) -> str: +def encrypt_message(key: int, message: str) -> str: """ - >>> encryptMessage(6, 'Harshil Darji') + >>> encrypt_message(6, 'Harshil Darji') 'Hlia rDsahrij' """ - cipherText = [""] * key + cipher_text = [""] * key for col in range(key): pointer = col while pointer < len(message): - cipherText[col] += message[pointer] + cipher_text[col] += message[pointer] pointer += key - return "".join(cipherText) + return "".join(cipher_text) -def decryptMessage(key: int, message: str) -> str: +def decrypt_message(key: int, message: str) -> str: """ - >>> decryptMessage(6, 'Hlia rDsahrij') + >>> decrypt_message(6, 'Hlia rDsahrij') 'Harshil Darji' """ - numCols = math.ceil(len(message) / key) - numRows = key - numShadedBoxes = (numCols * numRows) - len(message) - plainText = [""] * numCols + num_cols = math.ceil(len(message) / key) + num_rows = key + num_shaded_boxes = (num_cols * num_rows) - len(message) + plain_text = [""] * num_cols col = 0 row = 0 for symbol in message: - plainText[col] += symbol + plain_text[col] += symbol col += 1 if ( - (col == numCols) - or (col == numCols - 1) - and (row >= numRows - numShadedBoxes) + (col == num_cols) + or (col == num_cols - 1) + and (row >= num_rows - num_shaded_boxes) ): col = 0 row += 1 - return "".join(plainText) + return "".join(plain_text) if __name__ == "__main__": diff --git a/ciphers/transposition_cipher_encrypt_decrypt_file.py b/ciphers/transposition_cipher_encrypt_decrypt_file.py index 926a1b36ac44..6296b1e6d709 100644 --- a/ciphers/transposition_cipher_encrypt_decrypt_file.py +++ b/ciphers/transposition_cipher_encrypt_decrypt_file.py @@ -2,39 +2,39 @@ import sys import time -from . import transposition_cipher as transCipher +from . import transposition_cipher as trans_cipher def main() -> None: - inputFile = "Prehistoric Men.txt" - outputFile = "Output.txt" + input_file = "Prehistoric Men.txt" + output_file = "Output.txt" key = int(input("Enter key: ")) mode = input("Encrypt/Decrypt [e/d]: ") - if not os.path.exists(inputFile): - print(f"File {inputFile} does not exist. Quitting...") + if not os.path.exists(input_file): + print(f"File {input_file} does not exist. Quitting...") sys.exit() - if os.path.exists(outputFile): - print(f"Overwrite {outputFile}? [y/n]") + if os.path.exists(output_file): + print(f"Overwrite {output_file}? [y/n]") response = input("> ") if not response.lower().startswith("y"): sys.exit() - startTime = time.time() + start_time = time.time() if mode.lower().startswith("e"): - with open(inputFile) as f: + with open(input_file) as f: content = f.read() - translated = transCipher.encryptMessage(key, content) + translated = trans_cipher.encrypt_message(key, content) elif mode.lower().startswith("d"): - with open(outputFile) as f: + with open(output_file) as f: content = f.read() - translated = transCipher.decryptMessage(key, content) + translated = trans_cipher.decrypt_message(key, content) - with open(outputFile, "w") as outputObj: - outputObj.write(translated) + with open(output_file, "w") as output_obj: + output_obj.write(translated) - totalTime = round(time.time() - startTime, 2) - print(("Done (", totalTime, "seconds )")) + total_time = round(time.time() - start_time, 2) + print(("Done (", total_time, "seconds )")) if __name__ == "__main__": diff --git a/ciphers/vigenere_cipher.py b/ciphers/vigenere_cipher.py index 2e3987708d01..e76161351fb1 100644 --- a/ciphers/vigenere_cipher.py +++ b/ciphers/vigenere_cipher.py @@ -8,43 +8,43 @@ def main() -> None: if mode.lower().startswith("e"): mode = "encrypt" - translated = encryptMessage(key, message) + translated = encrypt_message(key, message) elif mode.lower().startswith("d"): mode = "decrypt" - translated = decryptMessage(key, message) + translated = decrypt_message(key, message) print(f"\n{mode.title()}ed message:") print(translated) -def encryptMessage(key: str, message: str) -> str: +def encrypt_message(key: str, message: str) -> str: """ - >>> encryptMessage('HDarji', 'This is Harshil Darji from Dharmaj.') + >>> encrypt_message('HDarji', 'This is Harshil Darji from Dharmaj.') 'Akij ra Odrjqqs Gaisq muod Mphumrs.' """ - return translateMessage(key, message, "encrypt") + return translate_message(key, message, "encrypt") -def decryptMessage(key: str, message: str) -> str: +def decrypt_message(key: str, message: str) -> str: """ - >>> decryptMessage('HDarji', 'Akij ra Odrjqqs Gaisq muod Mphumrs.') + >>> decrypt_message('HDarji', 'Akij ra Odrjqqs Gaisq muod Mphumrs.') 'This is Harshil Darji from Dharmaj.' """ - return translateMessage(key, message, "decrypt") + return translate_message(key, message, "decrypt") -def translateMessage(key: str, message: str, mode: str) -> str: +def translate_message(key: str, message: str, mode: str) -> str: translated = [] - keyIndex = 0 + key_index = 0 key = key.upper() for symbol in message: num = LETTERS.find(symbol.upper()) if num != -1: if mode == "encrypt": - num += LETTERS.find(key[keyIndex]) + num += LETTERS.find(key[key_index]) elif mode == "decrypt": - num -= LETTERS.find(key[keyIndex]) + num -= LETTERS.find(key[key_index]) num %= len(LETTERS) @@ -53,9 +53,9 @@ def translateMessage(key: str, message: str, mode: str) -> str: elif symbol.islower(): translated.append(LETTERS[num].lower()) - keyIndex += 1 - if keyIndex == len(key): - keyIndex = 0 + key_index += 1 + if key_index == len(key): + key_index = 0 else: translated.append(symbol) return "".join(translated) diff --git a/compression/lempel_ziv_decompress.py b/compression/lempel_ziv_decompress.py index 4d3c2c0d2cf3..ddedc3d6d32a 100644 --- a/compression/lempel_ziv_decompress.py +++ b/compression/lempel_ziv_decompress.py @@ -43,10 +43,10 @@ def decompress_data(data_bits: str) -> str: lexicon[curr_string] = last_match_id + "0" if math.log2(index).is_integer(): - newLex = {} + new_lex = {} for curr_key in list(lexicon): - newLex["0" + curr_key] = lexicon.pop(curr_key) - lexicon = newLex + new_lex["0" + curr_key] = lexicon.pop(curr_key) + lexicon = new_lex lexicon[bin(index)[2:]] = last_match_id + "1" index += 1 diff --git a/compression/peak_signal_to_noise_ratio.py b/compression/peak_signal_to_noise_ratio.py index dded2a712c7e..66b18b50b028 100644 --- a/compression/peak_signal_to_noise_ratio.py +++ b/compression/peak_signal_to_noise_ratio.py @@ -16,8 +16,8 @@ def psnr(original: float, contrast: float) -> float: mse = np.mean((original - contrast) ** 2) if mse == 0: return 100 - PIXEL_MAX = 255.0 - PSNR = 20 * math.log10(PIXEL_MAX / math.sqrt(mse)) + PIXEL_MAX = 255.0 # noqa: N806 + PSNR = 20 * math.log10(PIXEL_MAX / math.sqrt(mse)) # noqa: N806 return PSNR diff --git a/computer_vision/harris_corner.py b/computer_vision/harris_corner.py index 886ff52ea70b..7850085f8935 100644 --- a/computer_vision/harris_corner.py +++ b/computer_vision/harris_corner.py @@ -7,7 +7,7 @@ """ -class Harris_Corner: +class HarrisCorner: def __init__(self, k: float, window_size: int): """ @@ -70,6 +70,6 @@ def detect(self, img_path: str) -> tuple[cv2.Mat, list[list[int]]]: if __name__ == "__main__": - edge_detect = Harris_Corner(0.04, 3) + edge_detect = HarrisCorner(0.04, 3) color_img, _ = edge_detect.detect("path_to_image") cv2.imwrite("detect.png", color_img) diff --git a/conversions/binary_to_hexadecimal.py b/conversions/binary_to_hexadecimal.py index f94a12390607..61f335a4c465 100644 --- a/conversions/binary_to_hexadecimal.py +++ b/conversions/binary_to_hexadecimal.py @@ -17,7 +17,7 @@ def bin_to_hexadecimal(binary_str: str) -> str: ... ValueError: Empty string was passed to the function """ - BITS_TO_HEX = { + BITS_TO_HEX = { # noqa: N806 "0000": "0", "0001": "1", "0010": "2", diff --git a/conversions/decimal_to_any.py b/conversions/decimal_to_any.py index 3c72a7732ac6..e54fa154a0f7 100644 --- a/conversions/decimal_to_any.py +++ b/conversions/decimal_to_any.py @@ -66,7 +66,7 @@ def decimal_to_any(num: int, base: int) -> str: if base > 36: raise ValueError("base must be <= 36") # fmt: off - ALPHABET_VALUES = {'10': 'A', '11': 'B', '12': 'C', '13': 'D', '14': 'E', '15': 'F', + ALPHABET_VALUES = {'10': 'A', '11': 'B', '12': 'C', '13': 'D', '14': 'E', '15': 'F', # noqa: N806, E501 '16': 'G', '17': 'H', '18': 'I', '19': 'J', '20': 'K', '21': 'L', '22': 'M', '23': 'N', '24': 'O', '25': 'P', '26': 'Q', '27': 'R', '28': 'S', '29': 'T', '30': 'U', '31': 'V', '32': 'W', '33': 'X', diff --git a/conversions/prefix_conversions.py b/conversions/prefix_conversions.py index a77556433c66..06b759e355a7 100644 --- a/conversions/prefix_conversions.py +++ b/conversions/prefix_conversions.py @@ -6,7 +6,7 @@ from enum import Enum -class SI_Unit(Enum): +class SIUnit(Enum): yotta = 24 zetta = 21 exa = 18 @@ -29,7 +29,7 @@ class SI_Unit(Enum): yocto = -24 -class Binary_Unit(Enum): +class BinaryUnit(Enum): yotta = 8 zetta = 7 exa = 6 @@ -42,17 +42,17 @@ class Binary_Unit(Enum): def convert_si_prefix( known_amount: float, - known_prefix: str | SI_Unit, - unknown_prefix: str | SI_Unit, + known_prefix: str | SIUnit, + unknown_prefix: str | SIUnit, ) -> float: """ Wikipedia reference: https://en.wikipedia.org/wiki/Binary_prefix Wikipedia reference: https://en.wikipedia.org/wiki/International_System_of_Units - >>> convert_si_prefix(1, SI_Unit.giga, SI_Unit.mega) + >>> convert_si_prefix(1, SIUnit.giga, SIUnit.mega) 1000 - >>> convert_si_prefix(1, SI_Unit.mega, SI_Unit.giga) + >>> convert_si_prefix(1, SIUnit.mega, SIUnit.giga) 0.001 - >>> convert_si_prefix(1, SI_Unit.kilo, SI_Unit.kilo) + >>> convert_si_prefix(1, SIUnit.kilo, SIUnit.kilo) 1 >>> convert_si_prefix(1, 'giga', 'mega') 1000 @@ -60,9 +60,9 @@ def convert_si_prefix( 1000 """ if isinstance(known_prefix, str): - known_prefix = SI_Unit[known_prefix.lower()] + known_prefix = SIUnit[known_prefix.lower()] if isinstance(unknown_prefix, str): - unknown_prefix = SI_Unit[unknown_prefix.lower()] + unknown_prefix = SIUnit[unknown_prefix.lower()] unknown_amount: float = known_amount * ( 10 ** (known_prefix.value - unknown_prefix.value) ) @@ -71,16 +71,16 @@ def convert_si_prefix( def convert_binary_prefix( known_amount: float, - known_prefix: str | Binary_Unit, - unknown_prefix: str | Binary_Unit, + known_prefix: str | BinaryUnit, + unknown_prefix: str | BinaryUnit, ) -> float: """ Wikipedia reference: https://en.wikipedia.org/wiki/Metric_prefix - >>> convert_binary_prefix(1, Binary_Unit.giga, Binary_Unit.mega) + >>> convert_binary_prefix(1, BinaryUnit.giga, BinaryUnit.mega) 1024 - >>> convert_binary_prefix(1, Binary_Unit.mega, Binary_Unit.giga) + >>> convert_binary_prefix(1, BinaryUnit.mega, BinaryUnit.giga) 0.0009765625 - >>> convert_binary_prefix(1, Binary_Unit.kilo, Binary_Unit.kilo) + >>> convert_binary_prefix(1, BinaryUnit.kilo, BinaryUnit.kilo) 1 >>> convert_binary_prefix(1, 'giga', 'mega') 1024 @@ -88,9 +88,9 @@ def convert_binary_prefix( 1024 """ if isinstance(known_prefix, str): - known_prefix = Binary_Unit[known_prefix.lower()] + known_prefix = BinaryUnit[known_prefix.lower()] if isinstance(unknown_prefix, str): - unknown_prefix = Binary_Unit[unknown_prefix.lower()] + unknown_prefix = BinaryUnit[unknown_prefix.lower()] unknown_amount: float = known_amount * ( 2 ** ((known_prefix.value - unknown_prefix.value) * 10) ) diff --git a/conversions/roman_numerals.py b/conversions/roman_numerals.py index 9933e6a78a4d..960d41342276 100644 --- a/conversions/roman_numerals.py +++ b/conversions/roman_numerals.py @@ -29,7 +29,7 @@ def int_to_roman(number: int) -> str: >>> all(int_to_roman(value) == key for key, value in tests.items()) True """ - ROMAN = [ + ROMAN = [ # noqa: N806 (1000, "M"), (900, "CM"), (500, "D"), diff --git a/data_structures/binary_tree/avl_tree.py b/data_structures/binary_tree/avl_tree.py index 1ab13777b7a6..2f4bd60d9749 100644 --- a/data_structures/binary_tree/avl_tree.py +++ b/data_structures/binary_tree/avl_tree.py @@ -12,7 +12,7 @@ from typing import Any -class my_queue: +class MyQueue: def __init__(self) -> None: self.data: list[Any] = [] self.head: int = 0 @@ -39,20 +39,20 @@ def print(self) -> None: print(self.data[self.head : self.tail]) -class my_node: +class MyNode: def __init__(self, data: Any) -> None: self.data = data - self.left: my_node | None = None - self.right: my_node | None = None + self.left: MyNode | None = None + self.right: MyNode | None = None self.height: int = 1 def get_data(self) -> Any: return self.data - def get_left(self) -> my_node | None: + def get_left(self) -> MyNode | None: return self.left - def get_right(self) -> my_node | None: + def get_right(self) -> MyNode | None: return self.right def get_height(self) -> int: @@ -62,11 +62,11 @@ def set_data(self, data: Any) -> None: self.data = data return - def set_left(self, node: my_node | None) -> None: + def set_left(self, node: MyNode | None) -> None: self.left = node return - def set_right(self, node: my_node | None) -> None: + def set_right(self, node: MyNode | None) -> None: self.right = node return @@ -75,7 +75,7 @@ def set_height(self, height: int) -> None: return -def get_height(node: my_node | None) -> int: +def get_height(node: MyNode | None) -> int: if node is None: return 0 return node.get_height() @@ -87,7 +87,7 @@ def my_max(a: int, b: int) -> int: return b -def right_rotation(node: my_node) -> my_node: +def right_rotation(node: MyNode) -> MyNode: r""" A B / \ / \ @@ -110,7 +110,7 @@ def right_rotation(node: my_node) -> my_node: return ret -def left_rotation(node: my_node) -> my_node: +def left_rotation(node: MyNode) -> MyNode: """ a mirror symmetry rotation of the left_rotation """ @@ -126,7 +126,7 @@ def left_rotation(node: my_node) -> my_node: return ret -def lr_rotation(node: my_node) -> my_node: +def lr_rotation(node: MyNode) -> MyNode: r""" A A Br / \ / \ / \ @@ -143,16 +143,16 @@ def lr_rotation(node: my_node) -> my_node: return right_rotation(node) -def rl_rotation(node: my_node) -> my_node: +def rl_rotation(node: MyNode) -> MyNode: right_child = node.get_right() assert right_child is not None node.set_right(right_rotation(right_child)) return left_rotation(node) -def insert_node(node: my_node | None, data: Any) -> my_node | None: +def insert_node(node: MyNode | None, data: Any) -> MyNode | None: if node is None: - return my_node(data) + return MyNode(data) if data < node.get_data(): node.set_left(insert_node(node.get_left(), data)) if ( @@ -180,7 +180,7 @@ def insert_node(node: my_node | None, data: Any) -> my_node | None: return node -def get_rightMost(root: my_node) -> Any: +def get_right_most(root: MyNode) -> Any: while True: right_child = root.get_right() if right_child is None: @@ -189,7 +189,7 @@ def get_rightMost(root: my_node) -> Any: return root.get_data() -def get_leftMost(root: my_node) -> Any: +def get_left_most(root: MyNode) -> Any: while True: left_child = root.get_left() if left_child is None: @@ -198,12 +198,12 @@ def get_leftMost(root: my_node) -> Any: return root.get_data() -def del_node(root: my_node, data: Any) -> my_node | None: +def del_node(root: MyNode, data: Any) -> MyNode | None: left_child = root.get_left() right_child = root.get_right() if root.get_data() == data: if left_child is not None and right_child is not None: - temp_data = get_leftMost(right_child) + temp_data = get_left_most(right_child) root.set_data(temp_data) root.set_right(del_node(right_child, temp_data)) elif left_child is not None: @@ -276,7 +276,7 @@ class AVLtree: """ def __init__(self) -> None: - self.root: my_node | None = None + self.root: MyNode | None = None def get_height(self) -> int: return get_height(self.root) @@ -296,7 +296,7 @@ def __str__( self, ) -> str: # a level traversale, gives a more intuitive look on the tree output = "" - q = my_queue() + q = MyQueue() q.push(self.root) layer = self.get_height() if layer == 0: diff --git a/data_structures/binary_tree/lazy_segment_tree.py b/data_structures/binary_tree/lazy_segment_tree.py index 94329cb43a76..050dfe0a6f2f 100644 --- a/data_structures/binary_tree/lazy_segment_tree.py +++ b/data_structures/binary_tree/lazy_segment_tree.py @@ -37,14 +37,14 @@ def right(self, idx: int) -> int: return idx * 2 + 1 def build( - self, idx: int, left_element: int, right_element: int, A: list[int] + self, idx: int, left_element: int, right_element: int, a: list[int] ) -> None: if left_element == right_element: - self.segment_tree[idx] = A[left_element - 1] + self.segment_tree[idx] = a[left_element - 1] else: mid = (left_element + right_element) // 2 - self.build(self.left(idx), left_element, mid, A) - self.build(self.right(idx), mid + 1, right_element, A) + self.build(self.left(idx), left_element, mid, a) + self.build(self.right(idx), mid + 1, right_element, a) self.segment_tree[idx] = max( self.segment_tree[self.left(idx)], self.segment_tree[self.right(idx)] ) diff --git a/data_structures/binary_tree/segment_tree.py b/data_structures/binary_tree/segment_tree.py index 10451ae68bb2..949a3ecdd32c 100644 --- a/data_structures/binary_tree/segment_tree.py +++ b/data_structures/binary_tree/segment_tree.py @@ -2,8 +2,8 @@ class SegmentTree: - def __init__(self, A): - self.N = len(A) + def __init__(self, a): + self.N = len(a) self.st = [0] * ( 4 * self.N ) # approximate the overall size of segment tree with array N @@ -58,11 +58,11 @@ def query_recursive(self, idx, l, r, a, b): # noqa: E741 q2 = self.query_recursive(self.right(idx), mid + 1, r, a, b) return max(q1, q2) - def showData(self): - showList = [] + def show_data(self): + show_list = [] for i in range(1, N + 1): - showList += [self.query(i, i)] - print(showList) + show_list += [self.query(i, i)] + print(show_list) if __name__ == "__main__": @@ -75,4 +75,4 @@ def showData(self): segt.update(1, 3, 111) print(segt.query(1, 15)) segt.update(7, 8, 235) - segt.showData() + segt.show_data() diff --git a/data_structures/binary_tree/treap.py b/data_structures/binary_tree/treap.py index 0526b139b3c7..a53ac566ed54 100644 --- a/data_structures/binary_tree/treap.py +++ b/data_structures/binary_tree/treap.py @@ -121,28 +121,28 @@ def inorder(root: Node | None) -> None: inorder(root.right) -def interactTreap(root: Node | None, args: str) -> Node | None: +def interact_treap(root: Node | None, args: str) -> Node | None: """ Commands: + value to add value into treap - value to erase all nodes with value - >>> root = interactTreap(None, "+1") + >>> root = interact_treap(None, "+1") >>> inorder(root) 1, - >>> root = interactTreap(root, "+3 +5 +17 +19 +2 +16 +4 +0") + >>> root = interact_treap(root, "+3 +5 +17 +19 +2 +16 +4 +0") >>> inorder(root) 0,1,2,3,4,5,16,17,19, - >>> root = interactTreap(root, "+4 +4 +4") + >>> root = interact_treap(root, "+4 +4 +4") >>> inorder(root) 0,1,2,3,4,4,4,4,5,16,17,19, - >>> root = interactTreap(root, "-0") + >>> root = interact_treap(root, "-0") >>> inorder(root) 1,2,3,4,4,4,4,5,16,17,19, - >>> root = interactTreap(root, "-4") + >>> root = interact_treap(root, "-4") >>> inorder(root) 1,2,3,5,16,17,19, - >>> root = interactTreap(root, "=0") + >>> root = interact_treap(root, "=0") Unknown command """ for arg in args.split(): @@ -168,7 +168,7 @@ def main() -> None: args = input() while args != "q": - root = interactTreap(root, args) + root = interact_treap(root, args) print(root) args = input() diff --git a/data_structures/heap/min_heap.py b/data_structures/heap/min_heap.py index 9265c4839536..d8975eb2dcc7 100644 --- a/data_structures/heap/min_heap.py +++ b/data_structures/heap/min_heap.py @@ -52,14 +52,14 @@ def get_value(self, key): return self.heap_dict[key] def build_heap(self, array): - lastIdx = len(array) - 1 - startFrom = self.get_parent_idx(lastIdx) + last_idx = len(array) - 1 + start_from = self.get_parent_idx(last_idx) for idx, i in enumerate(array): self.idx_of_element[i] = idx self.heap_dict[i.name] = i.val - for i in range(startFrom, -1, -1): + for i in range(start_from, -1, -1): self.sift_down(i, array) return array @@ -123,12 +123,12 @@ def insert(self, node): def is_empty(self): return True if len(self.heap) == 0 else False - def decrease_key(self, node, newValue): + def decrease_key(self, node, new_value): assert ( - self.heap[self.idx_of_element[node]].val > newValue + self.heap[self.idx_of_element[node]].val > new_value ), "newValue must be less that current value" - node.val = newValue - self.heap_dict[node.name] = newValue + node.val = new_value + self.heap_dict[node.name] = new_value self.sift_up(self.idx_of_element[node]) @@ -143,7 +143,7 @@ def decrease_key(self, node, newValue): # Use one of these two ways to generate Min-Heap # Generating Min-Heap from array -myMinHeap = MinHeap([r, b, a, x, e]) +my_min_heap = MinHeap([r, b, a, x, e]) # Generating Min-Heap by Insert method # myMinHeap.insert(a) @@ -154,14 +154,14 @@ def decrease_key(self, node, newValue): # Before print("Min Heap - before decrease key") -for i in myMinHeap.heap: +for i in my_min_heap.heap: print(i) print("Min Heap - After decrease key of node [B -> -17]") -myMinHeap.decrease_key(b, -17) +my_min_heap.decrease_key(b, -17) # After -for i in myMinHeap.heap: +for i in my_min_heap.heap: print(i) if __name__ == "__main__": diff --git a/data_structures/stacks/infix_to_prefix_conversion.py b/data_structures/stacks/infix_to_prefix_conversion.py index d3dc9e3e9c73..6f6d5d57e2cb 100644 --- a/data_structures/stacks/infix_to_prefix_conversion.py +++ b/data_structures/stacks/infix_to_prefix_conversion.py @@ -15,9 +15,9 @@ """ -def infix_2_postfix(Infix): - Stack = [] - Postfix = [] +def infix_2_postfix(infix): + stack = [] + post_fix = [] priority = { "^": 3, "*": 2, @@ -26,7 +26,7 @@ def infix_2_postfix(Infix): "+": 1, "-": 1, } # Priority of each operator - print_width = len(Infix) if (len(Infix) > 7) else 7 + print_width = len(infix) if (len(infix) > 7) else 7 # Print table header for output print( @@ -37,52 +37,52 @@ def infix_2_postfix(Infix): ) print("-" * (print_width * 3 + 7)) - for x in Infix: + for x in infix: if x.isalpha() or x.isdigit(): - Postfix.append(x) # if x is Alphabet / Digit, add it to Postfix + post_fix.append(x) # if x is Alphabet / Digit, add it to Postfix elif x == "(": - Stack.append(x) # if x is "(" push to Stack + stack.append(x) # if x is "(" push to Stack elif x == ")": # if x is ")" pop stack until "(" is encountered - while Stack[-1] != "(": - Postfix.append(Stack.pop()) # Pop stack & add the content to Postfix - Stack.pop() + while stack[-1] != "(": + post_fix.append(stack.pop()) # Pop stack & add the content to Postfix + stack.pop() else: - if len(Stack) == 0: - Stack.append(x) # If stack is empty, push x to stack + if len(stack) == 0: + stack.append(x) # If stack is empty, push x to stack else: # while priority of x is not > priority of element in the stack - while len(Stack) > 0 and priority[x] <= priority[Stack[-1]]: - Postfix.append(Stack.pop()) # pop stack & add to Postfix - Stack.append(x) # push x to stack + while len(stack) > 0 and priority[x] <= priority[stack[-1]]: + post_fix.append(stack.pop()) # pop stack & add to Postfix + stack.append(x) # push x to stack print( x.center(8), - ("".join(Stack)).ljust(print_width), - ("".join(Postfix)).ljust(print_width), + ("".join(stack)).ljust(print_width), + ("".join(post_fix)).ljust(print_width), sep=" | ", ) # Output in tabular format - while len(Stack) > 0: # while stack is not empty - Postfix.append(Stack.pop()) # pop stack & add to Postfix + while len(stack) > 0: # while stack is not empty + post_fix.append(stack.pop()) # pop stack & add to Postfix print( " ".center(8), - ("".join(Stack)).ljust(print_width), - ("".join(Postfix)).ljust(print_width), + ("".join(stack)).ljust(print_width), + ("".join(post_fix)).ljust(print_width), sep=" | ", ) # Output in tabular format - return "".join(Postfix) # return Postfix as str + return "".join(post_fix) # return Postfix as str -def infix_2_prefix(Infix): - Infix = list(Infix[::-1]) # reverse the infix equation +def infix_2_prefix(infix): + infix = list(infix[::-1]) # reverse the infix equation - for i in range(len(Infix)): - if Infix[i] == "(": - Infix[i] = ")" # change "(" to ")" - elif Infix[i] == ")": - Infix[i] = "(" # change ")" to "(" + for i in range(len(infix)): + if infix[i] == "(": + infix[i] = ")" # change "(" to ")" + elif infix[i] == ")": + infix[i] = "(" # change ")" to "(" - return (infix_2_postfix("".join(Infix)))[ + return (infix_2_postfix("".join(infix)))[ ::-1 ] # call infix_2_postfix on Infix, return reverse of Postfix diff --git a/data_structures/stacks/postfix_evaluation.py b/data_structures/stacks/postfix_evaluation.py index 574acac71c43..28128f82ec19 100644 --- a/data_structures/stacks/postfix_evaluation.py +++ b/data_structures/stacks/postfix_evaluation.py @@ -20,49 +20,49 @@ import operator as op -def Solve(Postfix): - Stack = [] - Div = lambda x, y: int(x / y) # noqa: E731 integer division operation - Opr = { +def solve(post_fix): + stack = [] + div = lambda x, y: int(x / y) # noqa: E731 integer division operation + opr = { "^": op.pow, "*": op.mul, - "/": Div, + "/": div, "+": op.add, "-": op.sub, } # operators & their respective operation # print table header print("Symbol".center(8), "Action".center(12), "Stack", sep=" | ") - print("-" * (30 + len(Postfix))) + print("-" * (30 + len(post_fix))) - for x in Postfix: + for x in post_fix: if x.isdigit(): # if x in digit - Stack.append(x) # append x to stack + stack.append(x) # append x to stack # output in tabular format - print(x.rjust(8), ("push(" + x + ")").ljust(12), ",".join(Stack), sep=" | ") + print(x.rjust(8), ("push(" + x + ")").ljust(12), ",".join(stack), sep=" | ") else: - B = Stack.pop() # pop stack + b = stack.pop() # pop stack # output in tabular format - print("".rjust(8), ("pop(" + B + ")").ljust(12), ",".join(Stack), sep=" | ") + print("".rjust(8), ("pop(" + b + ")").ljust(12), ",".join(stack), sep=" | ") - A = Stack.pop() # pop stack + a = stack.pop() # pop stack # output in tabular format - print("".rjust(8), ("pop(" + A + ")").ljust(12), ",".join(Stack), sep=" | ") + print("".rjust(8), ("pop(" + a + ")").ljust(12), ",".join(stack), sep=" | ") - Stack.append( - str(Opr[x](int(A), int(B))) + stack.append( + str(opr[x](int(a), int(b))) ) # evaluate the 2 values popped from stack & push result to stack # output in tabular format print( x.rjust(8), - ("push(" + A + x + B + ")").ljust(12), - ",".join(Stack), + ("push(" + a + x + b + ")").ljust(12), + ",".join(stack), sep=" | ", ) - return int(Stack[0]) + return int(stack[0]) if __name__ == "__main__": Postfix = input("\n\nEnter a Postfix Equation (space separated) = ").split(" ") - print("\n\tResult = ", Solve(Postfix)) + print("\n\tResult = ", solve(Postfix)) diff --git a/data_structures/stacks/stock_span_problem.py b/data_structures/stacks/stock_span_problem.py index cc2adfdd6c21..19a81bd368de 100644 --- a/data_structures/stacks/stock_span_problem.py +++ b/data_structures/stacks/stock_span_problem.py @@ -8,7 +8,7 @@ """ -def calculateSpan(price, S): +def calculation_span(price, s): n = len(price) # Create a stack and push index of fist element to it @@ -16,7 +16,7 @@ def calculateSpan(price, S): st.append(0) # Span value of first element is always 1 - S[0] = 1 + s[0] = 1 # Calculate span values for rest of the elements for i in range(1, n): @@ -30,14 +30,14 @@ def calculateSpan(price, S): # than all elements on left of it, i.e. price[0], # price[1], ..price[i-1]. Else the price[i] is # greater than elements after top of stack - S[i] = i + 1 if len(st) <= 0 else (i - st[0]) + s[i] = i + 1 if len(st) <= 0 else (i - st[0]) # Push this element to stack st.append(i) # A utility function to print elements of array -def printArray(arr, n): +def print_array(arr, n): for i in range(0, n): print(arr[i], end=" ") @@ -47,7 +47,7 @@ def printArray(arr, n): S = [0 for i in range(len(price) + 1)] # Fill the span values in array S[] -calculateSpan(price, S) +calculation_span(price, S) # Print the calculated span values -printArray(S, len(price)) +print_array(S, len(price)) diff --git a/digital_image_processing/edge_detection/canny.py b/digital_image_processing/edge_detection/canny.py index 295b4d825c12..a830355267c4 100644 --- a/digital_image_processing/edge_detection/canny.py +++ b/digital_image_processing/edge_detection/canny.py @@ -43,33 +43,33 @@ def canny(image, threshold_low=15, threshold_high=30, weak=128, strong=255): or 15 * PI / 8 <= direction <= 2 * PI or 7 * PI / 8 <= direction <= 9 * PI / 8 ): - W = sobel_grad[row, col - 1] - E = sobel_grad[row, col + 1] - if sobel_grad[row, col] >= W and sobel_grad[row, col] >= E: + w = sobel_grad[row, col - 1] + e = sobel_grad[row, col + 1] + if sobel_grad[row, col] >= w and sobel_grad[row, col] >= e: dst[row, col] = sobel_grad[row, col] elif (PI / 8 <= direction < 3 * PI / 8) or ( 9 * PI / 8 <= direction < 11 * PI / 8 ): - SW = sobel_grad[row + 1, col - 1] - NE = sobel_grad[row - 1, col + 1] - if sobel_grad[row, col] >= SW and sobel_grad[row, col] >= NE: + sw = sobel_grad[row + 1, col - 1] + ne = sobel_grad[row - 1, col + 1] + if sobel_grad[row, col] >= sw and sobel_grad[row, col] >= ne: dst[row, col] = sobel_grad[row, col] elif (3 * PI / 8 <= direction < 5 * PI / 8) or ( 11 * PI / 8 <= direction < 13 * PI / 8 ): - N = sobel_grad[row - 1, col] - S = sobel_grad[row + 1, col] - if sobel_grad[row, col] >= N and sobel_grad[row, col] >= S: + n = sobel_grad[row - 1, col] + s = sobel_grad[row + 1, col] + if sobel_grad[row, col] >= n and sobel_grad[row, col] >= s: dst[row, col] = sobel_grad[row, col] elif (5 * PI / 8 <= direction < 7 * PI / 8) or ( 13 * PI / 8 <= direction < 15 * PI / 8 ): - NW = sobel_grad[row - 1, col - 1] - SE = sobel_grad[row + 1, col + 1] - if sobel_grad[row, col] >= NW and sobel_grad[row, col] >= SE: + nw = sobel_grad[row - 1, col - 1] + se = sobel_grad[row + 1, col + 1] + if sobel_grad[row, col] >= nw and sobel_grad[row, col] >= se: dst[row, col] = sobel_grad[row, col] """ diff --git a/digital_image_processing/filters/bilateral_filter.py b/digital_image_processing/filters/bilateral_filter.py index 76ae4dd20345..1afa01d3fc1a 100644 --- a/digital_image_processing/filters/bilateral_filter.py +++ b/digital_image_processing/filters/bilateral_filter.py @@ -46,16 +46,16 @@ def bilateral_filter( kernel_size: int, ) -> np.ndarray: img2 = np.zeros(img.shape) - gaussKer = get_gauss_kernel(kernel_size, spatial_variance) - sizeX, sizeY = img.shape - for i in range(kernel_size // 2, sizeX - kernel_size // 2): - for j in range(kernel_size // 2, sizeY - kernel_size // 2): - - imgS = get_slice(img, i, j, kernel_size) - imgI = imgS - imgS[kernel_size // 2, kernel_size // 2] - imgIG = vec_gaussian(imgI, intensity_variance) - weights = np.multiply(gaussKer, imgIG) - vals = np.multiply(imgS, weights) + gauss_ker = get_gauss_kernel(kernel_size, spatial_variance) + size_x, size_y = img.shape + for i in range(kernel_size // 2, size_x - kernel_size // 2): + for j in range(kernel_size // 2, size_y - kernel_size // 2): + + img_s = get_slice(img, i, j, kernel_size) + img_i = img_s - img_s[kernel_size // 2, kernel_size // 2] + img_ig = vec_gaussian(img_i, intensity_variance) + weights = np.multiply(gauss_ker, img_ig) + vals = np.multiply(img_s, weights) val = np.sum(vals) / np.sum(weights) img2[i, j] = val return img2 diff --git a/digital_image_processing/histogram_equalization/histogram_stretch.py b/digital_image_processing/histogram_equalization/histogram_stretch.py index 0288a2c1fcf5..5ea7773e32d9 100644 --- a/digital_image_processing/histogram_equalization/histogram_stretch.py +++ b/digital_image_processing/histogram_equalization/histogram_stretch.py @@ -11,7 +11,7 @@ from matplotlib import pyplot as plt -class contrastStretch: +class ConstantStretch: def __init__(self): self.img = "" self.original_image = "" @@ -45,10 +45,10 @@ def stretch(self, input_image): self.img[j][i] = self.last_list[num] cv2.imwrite("output_data/output.jpg", self.img) - def plotHistogram(self): + def plot_histogram(self): plt.hist(self.img.ravel(), 256, [0, 256]) - def showImage(self): + def show_image(self): cv2.imshow("Output-Image", self.img) cv2.imshow("Input-Image", self.original_image) cv2.waitKey(5000) @@ -57,7 +57,7 @@ def showImage(self): if __name__ == "__main__": file_path = os.path.join(os.path.basename(__file__), "image_data/input.jpg") - stretcher = contrastStretch() + stretcher = ConstantStretch() stretcher.stretch(file_path) - stretcher.plotHistogram() - stretcher.showImage() + stretcher.plot_histogram() + stretcher.show_image() diff --git a/digital_image_processing/index_calculation.py b/digital_image_processing/index_calculation.py index 033334af8a2a..2f8fdc066919 100644 --- a/digital_image_processing/index_calculation.py +++ b/digital_image_processing/index_calculation.py @@ -104,72 +104,72 @@ class IndexCalculation: #RGBIndex = ["GLI", "CI", "Hue", "I", "NGRDI", "RI", "S", "IF"] """ - def __init__(self, red=None, green=None, blue=None, redEdge=None, nir=None): + def __init__(self, red=None, green=None, blue=None, red_edge=None, nir=None): # print("Numpy version: " + np.__version__) - self.setMatrices(red=red, green=green, blue=blue, redEdge=redEdge, nir=nir) + self.set_matricies(red=red, green=green, blue=blue, red_edge=red_edge, nir=nir) - def setMatrices(self, red=None, green=None, blue=None, redEdge=None, nir=None): + def set_matricies(self, red=None, green=None, blue=None, red_edge=None, nir=None): if red is not None: self.red = red if green is not None: self.green = green if blue is not None: self.blue = blue - if redEdge is not None: - self.redEdge = redEdge + if red_edge is not None: + self.redEdge = red_edge if nir is not None: self.nir = nir return True def calculation( - self, index="", red=None, green=None, blue=None, redEdge=None, nir=None + self, index="", red=None, green=None, blue=None, red_edge=None, nir=None ): """ performs the calculation of the index with the values instantiated in the class :str index: abbreviation of index name to perform """ - self.setMatrices(red=red, green=green, blue=blue, redEdge=redEdge, nir=nir) + self.set_matricies(red=red, green=green, blue=blue, red_edge=red_edge, nir=nir) funcs = { - "ARVI2": self.ARVI2, - "CCCI": self.CCCI, - "CVI": self.CVI, - "GLI": self.GLI, - "NDVI": self.NDVI, - "BNDVI": self.BNDVI, - "redEdgeNDVI": self.redEdgeNDVI, - "GNDVI": self.GNDVI, - "GBNDVI": self.GBNDVI, - "GRNDVI": self.GRNDVI, - "RBNDVI": self.RBNDVI, - "PNDVI": self.PNDVI, - "ATSAVI": self.ATSAVI, - "BWDRVI": self.BWDRVI, - "CIgreen": self.CIgreen, - "CIrededge": self.CIrededge, - "CI": self.CI, - "CTVI": self.CTVI, - "GDVI": self.GDVI, - "EVI": self.EVI, - "GEMI": self.GEMI, - "GOSAVI": self.GOSAVI, - "GSAVI": self.GSAVI, - "Hue": self.Hue, - "IVI": self.IVI, - "IPVI": self.IPVI, - "I": self.I, - "RVI": self.RVI, - "MRVI": self.MRVI, - "MSAVI": self.MSAVI, - "NormG": self.NormG, - "NormNIR": self.NormNIR, - "NormR": self.NormR, - "NGRDI": self.NGRDI, - "RI": self.RI, - "S": self.S, - "IF": self.IF, - "DVI": self.DVI, - "TVI": self.TVI, - "NDRE": self.NDRE, + "ARVI2": self.arv12, + "CCCI": self.ccci, + "CVI": self.cvi, + "GLI": self.gli, + "NDVI": self.ndvi, + "BNDVI": self.bndvi, + "redEdgeNDVI": self.red_edge_ndvi, + "GNDVI": self.gndvi, + "GBNDVI": self.gbndvi, + "GRNDVI": self.grndvi, + "RBNDVI": self.rbndvi, + "PNDVI": self.pndvi, + "ATSAVI": self.atsavi, + "BWDRVI": self.bwdrvi, + "CIgreen": self.ci_green, + "CIrededge": self.ci_rededge, + "CI": self.ci, + "CTVI": self.ctvi, + "GDVI": self.gdvi, + "EVI": self.evi, + "GEMI": self.gemi, + "GOSAVI": self.gosavi, + "GSAVI": self.gsavi, + "Hue": self.hue, + "IVI": self.ivi, + "IPVI": self.ipvi, + "I": self.i, + "RVI": self.rvi, + "MRVI": self.mrvi, + "MSAVI": self.m_savi, + "NormG": self.norm_g, + "NormNIR": self.norm_nir, + "NormR": self.norm_r, + "NGRDI": self.ngrdi, + "RI": self.ri, + "S": self.s, + "IF": self._if, + "DVI": self.dvi, + "TVI": self.tvi, + "NDRE": self.ndre, } try: @@ -178,7 +178,7 @@ def calculation( print("Index not in the list!") return False - def ARVI2(self): + def arv12(self): """ Atmospherically Resistant Vegetation Index 2 https://www.indexdatabase.de/db/i-single.php?id=396 @@ -187,7 +187,7 @@ def ARVI2(self): """ return -0.18 + (1.17 * ((self.nir - self.red) / (self.nir + self.red))) - def CCCI(self): + def ccci(self): """ Canopy Chlorophyll Content Index https://www.indexdatabase.de/db/i-single.php?id=224 @@ -197,7 +197,7 @@ def CCCI(self): (self.nir - self.red) / (self.nir + self.red) ) - def CVI(self): + def cvi(self): """ Chlorophyll vegetation index https://www.indexdatabase.de/db/i-single.php?id=391 @@ -205,7 +205,7 @@ def CVI(self): """ return self.nir * (self.red / (self.green**2)) - def GLI(self): + def gli(self): """ self.green leaf index https://www.indexdatabase.de/db/i-single.php?id=375 @@ -215,7 +215,7 @@ def GLI(self): 2 * self.green + self.red + self.blue ) - def NDVI(self): + def ndvi(self): """ Normalized Difference self.nir/self.red Normalized Difference Vegetation Index, Calibrated NDVI - CDVI @@ -224,7 +224,7 @@ def NDVI(self): """ return (self.nir - self.red) / (self.nir + self.red) - def BNDVI(self): + def bndvi(self): """ Normalized Difference self.nir/self.blue self.blue-normalized difference vegetation index @@ -233,7 +233,7 @@ def BNDVI(self): """ return (self.nir - self.blue) / (self.nir + self.blue) - def redEdgeNDVI(self): + def red_edge_ndvi(self): """ Normalized Difference self.rededge/self.red https://www.indexdatabase.de/db/i-single.php?id=235 @@ -241,7 +241,7 @@ def redEdgeNDVI(self): """ return (self.redEdge - self.red) / (self.redEdge + self.red) - def GNDVI(self): + def gndvi(self): """ Normalized Difference self.nir/self.green self.green NDVI https://www.indexdatabase.de/db/i-single.php?id=401 @@ -249,7 +249,7 @@ def GNDVI(self): """ return (self.nir - self.green) / (self.nir + self.green) - def GBNDVI(self): + def gbndvi(self): """ self.green-self.blue NDVI https://www.indexdatabase.de/db/i-single.php?id=186 @@ -259,7 +259,7 @@ def GBNDVI(self): self.nir + (self.green + self.blue) ) - def GRNDVI(self): + def grndvi(self): """ self.green-self.red NDVI https://www.indexdatabase.de/db/i-single.php?id=185 @@ -269,7 +269,7 @@ def GRNDVI(self): self.nir + (self.green + self.red) ) - def RBNDVI(self): + def rbndvi(self): """ self.red-self.blue NDVI https://www.indexdatabase.de/db/i-single.php?id=187 @@ -277,7 +277,7 @@ def RBNDVI(self): """ return (self.nir - (self.blue + self.red)) / (self.nir + (self.blue + self.red)) - def PNDVI(self): + def pndvi(self): """ Pan NDVI https://www.indexdatabase.de/db/i-single.php?id=188 @@ -287,7 +287,7 @@ def PNDVI(self): self.nir + (self.green + self.red + self.blue) ) - def ATSAVI(self, X=0.08, a=1.22, b=0.03): + def atsavi(self, x=0.08, a=1.22, b=0.03): """ Adjusted transformed soil-adjusted VI https://www.indexdatabase.de/db/i-single.php?id=209 @@ -295,10 +295,10 @@ def ATSAVI(self, X=0.08, a=1.22, b=0.03): """ return a * ( (self.nir - a * self.red - b) - / (a * self.nir + self.red - a * b + X * (1 + a**2)) + / (a * self.nir + self.red - a * b + x * (1 + a**2)) ) - def BWDRVI(self): + def bwdrvi(self): """ self.blue-wide dynamic range vegetation index https://www.indexdatabase.de/db/i-single.php?id=136 @@ -306,7 +306,7 @@ def BWDRVI(self): """ return (0.1 * self.nir - self.blue) / (0.1 * self.nir + self.blue) - def CIgreen(self): + def ci_green(self): """ Chlorophyll Index self.green https://www.indexdatabase.de/db/i-single.php?id=128 @@ -314,7 +314,7 @@ def CIgreen(self): """ return (self.nir / self.green) - 1 - def CIrededge(self): + def ci_rededge(self): """ Chlorophyll Index self.redEdge https://www.indexdatabase.de/db/i-single.php?id=131 @@ -322,7 +322,7 @@ def CIrededge(self): """ return (self.nir / self.redEdge) - 1 - def CI(self): + def ci(self): """ Coloration Index https://www.indexdatabase.de/db/i-single.php?id=11 @@ -330,16 +330,16 @@ def CI(self): """ return (self.red - self.blue) / self.red - def CTVI(self): + def ctvi(self): """ Corrected Transformed Vegetation Index https://www.indexdatabase.de/db/i-single.php?id=244 :return: index """ - ndvi = self.NDVI() + ndvi = self.ndvi() return ((ndvi + 0.5) / (abs(ndvi + 0.5))) * (abs(ndvi + 0.5) ** (1 / 2)) - def GDVI(self): + def gdvi(self): """ Difference self.nir/self.green self.green Difference Vegetation Index https://www.indexdatabase.de/db/i-single.php?id=27 @@ -347,7 +347,7 @@ def GDVI(self): """ return self.nir - self.green - def EVI(self): + def evi(self): """ Enhanced Vegetation Index https://www.indexdatabase.de/db/i-single.php?id=16 @@ -357,7 +357,7 @@ def EVI(self): (self.nir - self.red) / (self.nir + 6 * self.red - 7.5 * self.blue + 1) ) - def GEMI(self): + def gemi(self): """ Global Environment Monitoring Index https://www.indexdatabase.de/db/i-single.php?id=25 @@ -368,25 +368,25 @@ def GEMI(self): ) return n * (1 - 0.25 * n) - (self.red - 0.125) / (1 - self.red) - def GOSAVI(self, Y=0.16): + def gosavi(self, y=0.16): """ self.green Optimized Soil Adjusted Vegetation Index https://www.indexdatabase.de/db/i-single.php?id=29 mit Y = 0,16 :return: index """ - return (self.nir - self.green) / (self.nir + self.green + Y) + return (self.nir - self.green) / (self.nir + self.green + y) - def GSAVI(self, L=0.5): + def gsavi(self, n=0.5): """ self.green Soil Adjusted Vegetation Index https://www.indexdatabase.de/db/i-single.php?id=31 - mit L = 0,5 + mit N = 0,5 :return: index """ - return ((self.nir - self.green) / (self.nir + self.green + L)) * (1 + L) + return ((self.nir - self.green) / (self.nir + self.green + n)) * (1 + n) - def Hue(self): + def hue(self): """ Hue https://www.indexdatabase.de/db/i-single.php?id=34 @@ -396,7 +396,7 @@ def Hue(self): ((2 * self.red - self.green - self.blue) / 30.5) * (self.green - self.blue) ) - def IVI(self, a=None, b=None): + def ivi(self, a=None, b=None): """ Ideal vegetation index https://www.indexdatabase.de/db/i-single.php?id=276 @@ -406,15 +406,15 @@ def IVI(self, a=None, b=None): """ return (self.nir - b) / (a * self.red) - def IPVI(self): + def ipvi(self): """ Infraself.red percentage vegetation index https://www.indexdatabase.de/db/i-single.php?id=35 :return: index """ - return (self.nir / ((self.nir + self.red) / 2)) * (self.NDVI() + 1) + return (self.nir / ((self.nir + self.red) / 2)) * (self.ndvi() + 1) - def I(self): # noqa: E741,E743 + def i(self): # noqa: E741,E743 """ Intensity https://www.indexdatabase.de/db/i-single.php?id=36 @@ -422,7 +422,7 @@ def I(self): # noqa: E741,E743 """ return (self.red + self.green + self.blue) / 30.5 - def RVI(self): + def rvi(self): """ Ratio-Vegetation-Index http://www.seos-project.eu/modules/remotesensing/remotesensing-c03-s01-p01.html @@ -430,15 +430,15 @@ def RVI(self): """ return self.nir / self.red - def MRVI(self): + def mrvi(self): """ Modified Normalized Difference Vegetation Index RVI https://www.indexdatabase.de/db/i-single.php?id=275 :return: index """ - return (self.RVI() - 1) / (self.RVI() + 1) + return (self.rvi() - 1) / (self.rvi() + 1) - def MSAVI(self): + def m_savi(self): """ Modified Soil Adjusted Vegetation Index https://www.indexdatabase.de/db/i-single.php?id=44 @@ -449,7 +449,7 @@ def MSAVI(self): - ((2 * self.nir + 1) ** 2 - 8 * (self.nir - self.red)) ** (1 / 2) ) / 2 - def NormG(self): + def norm_g(self): """ Norm G https://www.indexdatabase.de/db/i-single.php?id=50 @@ -457,7 +457,7 @@ def NormG(self): """ return self.green / (self.nir + self.red + self.green) - def NormNIR(self): + def norm_nir(self): """ Norm self.nir https://www.indexdatabase.de/db/i-single.php?id=51 @@ -465,7 +465,7 @@ def NormNIR(self): """ return self.nir / (self.nir + self.red + self.green) - def NormR(self): + def norm_r(self): """ Norm R https://www.indexdatabase.de/db/i-single.php?id=52 @@ -473,7 +473,7 @@ def NormR(self): """ return self.red / (self.nir + self.red + self.green) - def NGRDI(self): + def ngrdi(self): """ Normalized Difference self.green/self.red Normalized self.green self.red difference index, Visible Atmospherically Resistant Indices self.green @@ -483,7 +483,7 @@ def NGRDI(self): """ return (self.green - self.red) / (self.green + self.red) - def RI(self): + def ri(self): """ Normalized Difference self.red/self.green self.redness Index https://www.indexdatabase.de/db/i-single.php?id=74 @@ -491,7 +491,7 @@ def RI(self): """ return (self.red - self.green) / (self.red + self.green) - def S(self): + def s(self): """ Saturation https://www.indexdatabase.de/db/i-single.php?id=77 @@ -501,7 +501,7 @@ def S(self): min = np.min([np.min(self.red), np.min(self.green), np.min(self.blue)]) return (max - min) / max - def IF(self): + def _if(self): """ Shape Index https://www.indexdatabase.de/db/i-single.php?id=79 @@ -509,7 +509,7 @@ def IF(self): """ return (2 * self.red - self.green - self.blue) / (self.green - self.blue) - def DVI(self): + def dvi(self): """ Simple Ratio self.nir/self.red Difference Vegetation Index, Vegetation Index Number (VIN) @@ -518,15 +518,15 @@ def DVI(self): """ return self.nir / self.red - def TVI(self): + def tvi(self): """ Transformed Vegetation Index https://www.indexdatabase.de/db/i-single.php?id=98 :return: index """ - return (self.NDVI() + 0.5) ** (1 / 2) + return (self.ndvi() + 0.5) ** (1 / 2) - def NDRE(self): + def ndre(self): return (self.nir - self.redEdge) / (self.nir + self.redEdge) diff --git a/digital_image_processing/test_digital_image_processing.py b/digital_image_processing/test_digital_image_processing.py index 1f42fddf297a..fdcebfdad161 100644 --- a/digital_image_processing/test_digital_image_processing.py +++ b/digital_image_processing/test_digital_image_processing.py @@ -62,8 +62,8 @@ def test_gen_gaussian_kernel_filter(): def test_convolve_filter(): # laplace diagonals - Laplace = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]]) - res = conv.img_convolve(gray, Laplace).astype(uint8) + laplace = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]]) + res = conv.img_convolve(gray, laplace).astype(uint8) assert res.any() diff --git a/divide_and_conquer/inversions.py b/divide_and_conquer/inversions.py index e20d35daccbe..35f78fe5cf1e 100644 --- a/divide_and_conquer/inversions.py +++ b/divide_and_conquer/inversions.py @@ -63,18 +63,18 @@ def count_inversions_recursive(arr): if len(arr) <= 1: return arr, 0 mid = len(arr) // 2 - P = arr[0:mid] - Q = arr[mid:] + p = arr[0:mid] + q = arr[mid:] - A, inversion_p = count_inversions_recursive(P) - B, inversions_q = count_inversions_recursive(Q) - C, cross_inversions = _count_cross_inversions(A, B) + a, inversion_p = count_inversions_recursive(p) + b, inversions_q = count_inversions_recursive(q) + c, cross_inversions = _count_cross_inversions(a, b) num_inversions = inversion_p + inversions_q + cross_inversions - return C, num_inversions + return c, num_inversions -def _count_cross_inversions(P, Q): +def _count_cross_inversions(p, q): """ Counts the inversions across two sorted arrays. And combine the two arrays into one sorted array @@ -96,26 +96,26 @@ def _count_cross_inversions(P, Q): ([1, 2, 3, 3, 4, 5], 0) """ - R = [] + r = [] i = j = num_inversion = 0 - while i < len(P) and j < len(Q): - if P[i] > Q[j]: + while i < len(p) and j < len(q): + if p[i] > q[j]: # if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P) # These are all inversions. The claim emerges from the # property that P is sorted. - num_inversion += len(P) - i - R.append(Q[j]) + num_inversion += len(p) - i + r.append(q[j]) j += 1 else: - R.append(P[i]) + r.append(p[i]) i += 1 - if i < len(P): - R.extend(P[i:]) + if i < len(p): + r.extend(p[i:]) else: - R.extend(Q[j:]) + r.extend(q[j:]) - return R, num_inversion + return r, num_inversion def main(): diff --git a/dynamic_programming/bitmask.py b/dynamic_programming/bitmask.py index 2994db5b5e1e..f45250c9cb84 100644 --- a/dynamic_programming/bitmask.py +++ b/dynamic_programming/bitmask.py @@ -28,7 +28,7 @@ def __init__(self, task_performed, total): # to 1 self.final_mask = (1 << len(task_performed)) - 1 - def CountWaysUtil(self, mask, task_no): + def count_ways_until(self, mask, task_no): # if mask == self.finalmask all persons are distributed tasks, return 1 if mask == self.final_mask: @@ -43,7 +43,7 @@ def CountWaysUtil(self, mask, task_no): return self.dp[mask][task_no] # Number of ways when we don't this task in the arrangement - total_ways_util = self.CountWaysUtil(mask, task_no + 1) + total_ways_util = self.count_ways_until(mask, task_no + 1) # now assign the tasks one by one to all possible persons and recursively # assign for the remaining tasks. @@ -56,14 +56,14 @@ def CountWaysUtil(self, mask, task_no): # assign this task to p and change the mask value. And recursively # assign tasks with the new mask value. - total_ways_util += self.CountWaysUtil(mask | (1 << p), task_no + 1) + total_ways_util += self.count_ways_until(mask | (1 << p), task_no + 1) # save the value. self.dp[mask][task_no] = total_ways_util return self.dp[mask][task_no] - def countNoOfWays(self, task_performed): + def count_no_of_ways(self, task_performed): # Store the list of persons for each task for i in range(len(task_performed)): @@ -71,7 +71,7 @@ def countNoOfWays(self, task_performed): self.task[j].append(i) # call the function to fill the DP table, final answer is stored in dp[0][1] - return self.CountWaysUtil(0, 1) + return self.count_ways_until(0, 1) if __name__ == "__main__": @@ -81,7 +81,7 @@ def countNoOfWays(self, task_performed): # the list of tasks that can be done by M persons. task_performed = [[1, 3, 4], [1, 2, 5], [3, 4]] print( - AssignmentUsingBitmask(task_performed, total_tasks).countNoOfWays( + AssignmentUsingBitmask(task_performed, total_tasks).count_no_of_ways( task_performed ) ) diff --git a/dynamic_programming/edit_distance.py b/dynamic_programming/edit_distance.py index 56877e0c50a2..d63e559e30da 100644 --- a/dynamic_programming/edit_distance.py +++ b/dynamic_programming/edit_distance.py @@ -21,10 +21,10 @@ class EditDistance: def __init__(self): self.__prepare__() - def __prepare__(self, N=0, M=0): - self.dp = [[-1 for y in range(0, M)] for x in range(0, N)] + def __prepare__(self, n=0, m=0): + self.dp = [[-1 for y in range(0, m)] for x in range(0, n)] - def __solveDP(self, x, y): + def __solve_dp(self, x, y): if x == -1: return y + 1 elif y == -1: @@ -32,30 +32,30 @@ def __solveDP(self, x, y): elif self.dp[x][y] > -1: return self.dp[x][y] else: - if self.A[x] == self.B[y]: - self.dp[x][y] = self.__solveDP(x - 1, y - 1) + if self.a[x] == self.b[y]: + self.dp[x][y] = self.__solve_dp(x - 1, y - 1) else: self.dp[x][y] = 1 + min( - self.__solveDP(x, y - 1), - self.__solveDP(x - 1, y), - self.__solveDP(x - 1, y - 1), + self.__solve_dp(x, y - 1), + self.__solve_dp(x - 1, y), + self.__solve_dp(x - 1, y - 1), ) return self.dp[x][y] - def solve(self, A, B): - if isinstance(A, bytes): - A = A.decode("ascii") + def solve(self, a, b): + if isinstance(a, bytes): + a = a.decode("ascii") - if isinstance(B, bytes): - B = B.decode("ascii") + if isinstance(b, bytes): + b = b.decode("ascii") - self.A = str(A) - self.B = str(B) + self.a = str(a) + self.b = str(b) - self.__prepare__(len(A), len(B)) + self.__prepare__(len(a), len(b)) - return self.__solveDP(len(A) - 1, len(B) - 1) + return self.__solve_dp(len(a) - 1, len(b) - 1) def min_distance_bottom_up(word1: str, word2: str) -> int: diff --git a/dynamic_programming/floyd_warshall.py b/dynamic_programming/floyd_warshall.py index a4b6c6a82568..614a3c72a992 100644 --- a/dynamic_programming/floyd_warshall.py +++ b/dynamic_programming/floyd_warshall.py @@ -2,41 +2,41 @@ class Graph: - def __init__(self, N=0): # a graph with Node 0,1,...,N-1 - self.N = N - self.W = [ - [math.inf for j in range(0, N)] for i in range(0, N) + def __init__(self, n=0): # a graph with Node 0,1,...,N-1 + self.n = n + self.w = [ + [math.inf for j in range(0, n)] for i in range(0, n) ] # adjacency matrix for weight self.dp = [ - [math.inf for j in range(0, N)] for i in range(0, N) + [math.inf for j in range(0, n)] for i in range(0, n) ] # dp[i][j] stores minimum distance from i to j - def addEdge(self, u, v, w): + def add_edge(self, u, v, w): self.dp[u][v] = w def floyd_warshall(self): - for k in range(0, self.N): - for i in range(0, self.N): - for j in range(0, self.N): + for k in range(0, self.n): + for i in range(0, self.n): + for j in range(0, self.n): self.dp[i][j] = min(self.dp[i][j], self.dp[i][k] + self.dp[k][j]) - def showMin(self, u, v): + def show_min(self, u, v): return self.dp[u][v] if __name__ == "__main__": graph = Graph(5) - graph.addEdge(0, 2, 9) - graph.addEdge(0, 4, 10) - graph.addEdge(1, 3, 5) - graph.addEdge(2, 3, 7) - graph.addEdge(3, 0, 10) - graph.addEdge(3, 1, 2) - graph.addEdge(3, 2, 1) - graph.addEdge(3, 4, 6) - graph.addEdge(4, 1, 3) - graph.addEdge(4, 2, 4) - graph.addEdge(4, 3, 9) + graph.add_edge(0, 2, 9) + graph.add_edge(0, 4, 10) + graph.add_edge(1, 3, 5) + graph.add_edge(2, 3, 7) + graph.add_edge(3, 0, 10) + graph.add_edge(3, 1, 2) + graph.add_edge(3, 2, 1) + graph.add_edge(3, 4, 6) + graph.add_edge(4, 1, 3) + graph.add_edge(4, 2, 4) + graph.add_edge(4, 3, 9) graph.floyd_warshall() - graph.showMin(1, 4) - graph.showMin(0, 3) + graph.show_min(1, 4) + graph.show_min(0, 3) diff --git a/dynamic_programming/fractional_knapsack.py b/dynamic_programming/fractional_knapsack.py index c74af7ef8fc5..6f7a2a08cf9b 100644 --- a/dynamic_programming/fractional_knapsack.py +++ b/dynamic_programming/fractional_knapsack.py @@ -2,20 +2,20 @@ from itertools import accumulate -def fracKnapsack(vl, wt, W, n): +def frac_knapsack(vl, wt, w, n): """ - >>> fracKnapsack([60, 100, 120], [10, 20, 30], 50, 3) + >>> frac_knapsack([60, 100, 120], [10, 20, 30], 50, 3) 240.0 """ r = list(sorted(zip(vl, wt), key=lambda x: x[0] / x[1], reverse=True)) vl, wt = [i[0] for i in r], [i[1] for i in r] acc = list(accumulate(wt)) - k = bisect(acc, W) + k = bisect(acc, w) return ( 0 if k == 0 - else sum(vl[:k]) + (W - acc[k - 1]) * (vl[k]) / (wt[k]) + else sum(vl[:k]) + (w - acc[k - 1]) * (vl[k]) / (wt[k]) if k != n else sum(vl[:k]) ) diff --git a/dynamic_programming/knapsack.py b/dynamic_programming/knapsack.py index 804d7d4f12f5..9efb60bab98b 100644 --- a/dynamic_programming/knapsack.py +++ b/dynamic_programming/knapsack.py @@ -7,39 +7,39 @@ """ -def MF_knapsack(i, wt, val, j): +def mf_knapsack(i, wt, val, j): """ This code involves the concept of memory functions. Here we solve the subproblems which are needed unlike the below example F is a 2D array with -1s filled up """ - global F # a global dp table for knapsack - if F[i][j] < 0: + global f # a global dp table for knapsack + if f[i][j] < 0: if j < wt[i - 1]: - val = MF_knapsack(i - 1, wt, val, j) + val = mf_knapsack(i - 1, wt, val, j) else: val = max( - MF_knapsack(i - 1, wt, val, j), - MF_knapsack(i - 1, wt, val, j - wt[i - 1]) + val[i - 1], + mf_knapsack(i - 1, wt, val, j), + mf_knapsack(i - 1, wt, val, j - wt[i - 1]) + val[i - 1], ) - F[i][j] = val - return F[i][j] + f[i][j] = val + return f[i][j] -def knapsack(W, wt, val, n): - dp = [[0 for i in range(W + 1)] for j in range(n + 1)] +def knapsack(w, wt, val, n): + dp = [[0 for i in range(w + 1)] for j in range(n + 1)] for i in range(1, n + 1): - for w in range(1, W + 1): + for w in range(1, w + 1): if wt[i - 1] <= w: dp[i][w] = max(val[i - 1] + dp[i - 1][w - wt[i - 1]], dp[i - 1][w]) else: dp[i][w] = dp[i - 1][w] - return dp[n][W], dp + return dp[n][w], dp -def knapsack_with_example_solution(W: int, wt: list, val: list): +def knapsack_with_example_solution(w: int, wt: list, val: list): """ Solves the integer weights knapsack problem returns one of the several possible optimal subsets. @@ -90,9 +90,9 @@ def knapsack_with_example_solution(W: int, wt: list, val: list): f"got weight of type {type(wt[i])} at index {i}" ) - optimal_val, dp_table = knapsack(W, wt, val, num_items) + optimal_val, dp_table = knapsack(w, wt, val, num_items) example_optional_set: set = set() - _construct_solution(dp_table, wt, num_items, W, example_optional_set) + _construct_solution(dp_table, wt, num_items, w, example_optional_set) return optimal_val, example_optional_set @@ -136,10 +136,10 @@ def _construct_solution(dp: list, wt: list, i: int, j: int, optimal_set: set): wt = [4, 3, 2, 3] n = 4 w = 6 - F = [[0] * (w + 1)] + [[0] + [-1 for i in range(w + 1)] for j in range(n + 1)] + f = [[0] * (w + 1)] + [[0] + [-1 for i in range(w + 1)] for j in range(n + 1)] optimal_solution, _ = knapsack(w, wt, val, n) print(optimal_solution) - print(MF_knapsack(n, wt, val, w)) # switched the n and w + print(mf_knapsack(n, wt, val, w)) # switched the n and w # testing the dynamic programming problem with example # the optimal subset for the above example are items 3 and 4 diff --git a/dynamic_programming/longest_common_subsequence.py b/dynamic_programming/longest_common_subsequence.py index fdcf3311a017..3468fd87da8d 100644 --- a/dynamic_programming/longest_common_subsequence.py +++ b/dynamic_programming/longest_common_subsequence.py @@ -38,7 +38,7 @@ def longest_common_subsequence(x: str, y: str): n = len(y) # declaring the array for storing the dp values - L = [[0] * (n + 1) for _ in range(m + 1)] + l = [[0] * (n + 1) for _ in range(m + 1)] # noqa: E741 for i in range(1, m + 1): for j in range(1, n + 1): @@ -47,7 +47,7 @@ def longest_common_subsequence(x: str, y: str): else: match = 0 - L[i][j] = max(L[i - 1][j], L[i][j - 1], L[i - 1][j - 1] + match) + l[i][j] = max(l[i - 1][j], l[i][j - 1], l[i - 1][j - 1] + match) seq = "" i, j = m, n @@ -57,17 +57,17 @@ def longest_common_subsequence(x: str, y: str): else: match = 0 - if L[i][j] == L[i - 1][j - 1] + match: + if l[i][j] == l[i - 1][j - 1] + match: if match == 1: seq = x[i - 1] + seq i -= 1 j -= 1 - elif L[i][j] == L[i - 1][j]: + elif l[i][j] == l[i - 1][j]: i -= 1 else: j -= 1 - return L[m][n], seq + return l[m][n], seq if __name__ == "__main__": diff --git a/dynamic_programming/longest_increasing_subsequence.py b/dynamic_programming/longest_increasing_subsequence.py index a029f9be7d98..6feed23529f1 100644 --- a/dynamic_programming/longest_increasing_subsequence.py +++ b/dynamic_programming/longest_increasing_subsequence.py @@ -34,12 +34,12 @@ def longest_subsequence(array: list[int]) -> list[int]: # This function is recu return array # Else pivot = array[0] - isFound = False + is_found = False i = 1 longest_subseq: list[int] = [] - while not isFound and i < array_length: + while not is_found and i < array_length: if array[i] < pivot: - isFound = True + is_found = True temp_array = [element for element in array[i:] if element >= array[i]] temp_array = longest_subsequence(temp_array) if len(temp_array) > len(longest_subseq): diff --git a/dynamic_programming/longest_increasing_subsequence_o(nlogn).py b/dynamic_programming/longest_increasing_subsequence_o(nlogn).py index af536f8bbd01..5e11d729f395 100644 --- a/dynamic_programming/longest_increasing_subsequence_o(nlogn).py +++ b/dynamic_programming/longest_increasing_subsequence_o(nlogn).py @@ -7,7 +7,7 @@ from __future__ import annotations -def CeilIndex(v, l, r, key): # noqa: E741 +def ceil_index(v, l, r, key): # noqa: E741 while r - l > 1: m = (l + r) // 2 if v[m] >= key: @@ -17,16 +17,16 @@ def CeilIndex(v, l, r, key): # noqa: E741 return r -def LongestIncreasingSubsequenceLength(v: list[int]) -> int: +def longest_increasing_subsequence_length(v: list[int]) -> int: """ - >>> LongestIncreasingSubsequenceLength([2, 5, 3, 7, 11, 8, 10, 13, 6]) + >>> longest_increasing_subsequence_length([2, 5, 3, 7, 11, 8, 10, 13, 6]) 6 - >>> LongestIncreasingSubsequenceLength([]) + >>> longest_increasing_subsequence_length([]) 0 - >>> LongestIncreasingSubsequenceLength([0, 8, 4, 12, 2, 10, 6, 14, 1, 9, 5, 13, 3, - ... 11, 7, 15]) + >>> longest_increasing_subsequence_length([0, 8, 4, 12, 2, 10, 6, 14, 1, 9, 5, 13, + ... 3, 11, 7, 15]) 6 - >>> LongestIncreasingSubsequenceLength([5, 4, 3, 2, 1]) + >>> longest_increasing_subsequence_length([5, 4, 3, 2, 1]) 1 """ if len(v) == 0: @@ -44,7 +44,7 @@ def LongestIncreasingSubsequenceLength(v: list[int]) -> int: tail[length] = v[i] length += 1 else: - tail[CeilIndex(tail, -1, length - 1, v[i])] = v[i] + tail[ceil_index(tail, -1, length - 1, v[i])] = v[i] return length diff --git a/dynamic_programming/matrix_chain_order.py b/dynamic_programming/matrix_chain_order.py index 9411bc704f1c..d612aea7b99d 100644 --- a/dynamic_programming/matrix_chain_order.py +++ b/dynamic_programming/matrix_chain_order.py @@ -8,34 +8,34 @@ """ -def MatrixChainOrder(array): - N = len(array) - Matrix = [[0 for x in range(N)] for x in range(N)] - Sol = [[0 for x in range(N)] for x in range(N)] +def matrix_chain_order(array): + n = len(array) + matrix = [[0 for x in range(n)] for x in range(n)] + sol = [[0 for x in range(n)] for x in range(n)] - for ChainLength in range(2, N): - for a in range(1, N - ChainLength + 1): - b = a + ChainLength - 1 + for chain_length in range(2, n): + for a in range(1, n - chain_length + 1): + b = a + chain_length - 1 - Matrix[a][b] = sys.maxsize + matrix[a][b] = sys.maxsize for c in range(a, b): cost = ( - Matrix[a][c] + Matrix[c + 1][b] + array[a - 1] * array[c] * array[b] + matrix[a][c] + matrix[c + 1][b] + array[a - 1] * array[c] * array[b] ) - if cost < Matrix[a][b]: - Matrix[a][b] = cost - Sol[a][b] = c - return Matrix, Sol + if cost < matrix[a][b]: + matrix[a][b] = cost + sol[a][b] = c + return matrix, sol # Print order of matrix with Ai as Matrix -def PrintOptimalSolution(OptimalSolution, i, j): +def print_optiomal_solution(optimal_solution, i, j): if i == j: print("A" + str(i), end=" ") else: print("(", end=" ") - PrintOptimalSolution(OptimalSolution, i, OptimalSolution[i][j]) - PrintOptimalSolution(OptimalSolution, OptimalSolution[i][j] + 1, j) + print_optiomal_solution(optimal_solution, i, optimal_solution[i][j]) + print_optiomal_solution(optimal_solution, optimal_solution[i][j] + 1, j) print(")", end=" ") @@ -44,10 +44,10 @@ def main(): n = len(array) # Size of matrix created from above array will be # 30*35 35*15 15*5 5*10 10*20 20*25 - Matrix, OptimalSolution = MatrixChainOrder(array) + matrix, optimal_solution = matrix_chain_order(array) - print("No. of Operation required: " + str(Matrix[1][n - 1])) - PrintOptimalSolution(OptimalSolution, 1, n - 1) + print("No. of Operation required: " + str(matrix[1][n - 1])) + print_optiomal_solution(optimal_solution, 1, n - 1) if __name__ == "__main__": diff --git a/dynamic_programming/max_sub_array.py b/dynamic_programming/max_sub_array.py index 3060010ef7c6..42eca79a931e 100644 --- a/dynamic_programming/max_sub_array.py +++ b/dynamic_programming/max_sub_array.py @@ -4,14 +4,14 @@ from __future__ import annotations -def find_max_sub_array(A, low, high): +def find_max_sub_array(a, low, high): if low == high: - return low, high, A[low] + return low, high, a[low] else: mid = (low + high) // 2 - left_low, left_high, left_sum = find_max_sub_array(A, low, mid) - right_low, right_high, right_sum = find_max_sub_array(A, mid + 1, high) - cross_left, cross_right, cross_sum = find_max_cross_sum(A, low, mid, high) + left_low, left_high, left_sum = find_max_sub_array(a, low, mid) + right_low, right_high, right_sum = find_max_sub_array(a, mid + 1, high) + cross_left, cross_right, cross_sum = find_max_cross_sum(a, low, mid, high) if left_sum >= right_sum and left_sum >= cross_sum: return left_low, left_high, left_sum elif right_sum >= left_sum and right_sum >= cross_sum: @@ -20,18 +20,18 @@ def find_max_sub_array(A, low, high): return cross_left, cross_right, cross_sum -def find_max_cross_sum(A, low, mid, high): +def find_max_cross_sum(a, low, mid, high): left_sum, max_left = -999999999, -1 right_sum, max_right = -999999999, -1 summ = 0 for i in range(mid, low - 1, -1): - summ += A[i] + summ += a[i] if summ > left_sum: left_sum = summ max_left = i summ = 0 for i in range(mid + 1, high + 1): - summ += A[i] + summ += a[i] if summ > right_sum: right_sum = summ max_right = i diff --git a/dynamic_programming/minimum_coin_change.py b/dynamic_programming/minimum_coin_change.py index 2869b5857be1..848bd654d3b9 100644 --- a/dynamic_programming/minimum_coin_change.py +++ b/dynamic_programming/minimum_coin_change.py @@ -7,7 +7,7 @@ """ -def dp_count(S, n): +def dp_count(s, n): """ >>> dp_count([1, 2, 3], 4) 4 @@ -33,7 +33,7 @@ def dp_count(S, n): # Pick all coins one by one and update table[] values # after the index greater than or equal to the value of the # picked coin - for coin_val in S: + for coin_val in s: for j in range(coin_val, n + 1): table[j] += table[j - coin_val] diff --git a/dynamic_programming/minimum_partition.py b/dynamic_programming/minimum_partition.py index 8fad4ef3072f..3daa9767fde4 100644 --- a/dynamic_programming/minimum_partition.py +++ b/dynamic_programming/minimum_partition.py @@ -3,7 +3,7 @@ """ -def findMin(arr): +def find_min(arr): n = len(arr) s = sum(arr) diff --git a/dynamic_programming/sum_of_subset.py b/dynamic_programming/sum_of_subset.py index a12177b57c74..77672b0b83e5 100644 --- a/dynamic_programming/sum_of_subset.py +++ b/dynamic_programming/sum_of_subset.py @@ -1,25 +1,25 @@ -def isSumSubset(arr, arrLen, requiredSum): +def is_sum_subset(arr, arr_len, required_sum): """ - >>> isSumSubset([2, 4, 6, 8], 4, 5) + >>> is_sum_subset([2, 4, 6, 8], 4, 5) False - >>> isSumSubset([2, 4, 6, 8], 4, 14) + >>> is_sum_subset([2, 4, 6, 8], 4, 14) True """ # a subset value says 1 if that subset sum can be formed else 0 # initially no subsets can be formed hence False/0 - subset = [[False for i in range(requiredSum + 1)] for i in range(arrLen + 1)] + subset = [[False for i in range(required_sum + 1)] for i in range(arr_len + 1)] # for each arr value, a sum of zero(0) can be formed by not taking any element # hence True/1 - for i in range(arrLen + 1): + for i in range(arr_len + 1): subset[i][0] = True # sum is not zero and set is empty then false - for i in range(1, requiredSum + 1): + for i in range(1, required_sum + 1): subset[0][i] = False - for i in range(1, arrLen + 1): - for j in range(1, requiredSum + 1): + for i in range(1, arr_len + 1): + for j in range(1, required_sum + 1): if arr[i - 1] > j: subset[i][j] = subset[i - 1][j] if arr[i - 1] <= j: @@ -28,7 +28,7 @@ def isSumSubset(arr, arrLen, requiredSum): # uncomment to print the subset # for i in range(arrLen+1): # print(subset[i]) - print(subset[arrLen][requiredSum]) + print(subset[arr_len][required_sum]) if __name__ == "__main__": diff --git a/fractals/sierpinski_triangle.py b/fractals/sierpinski_triangle.py index cf41ffa5f190..8be2897c152a 100644 --- a/fractals/sierpinski_triangle.py +++ b/fractals/sierpinski_triangle.py @@ -35,30 +35,30 @@ points = [[-175, -125], [0, 175], [175, -125]] # size of triangle -def getMid(p1, p2): +def get_mid(p1, p2): return ((p1[0] + p2[0]) / 2, (p1[1] + p2[1]) / 2) # find midpoint def triangle(points, depth): - myPen.up() - myPen.goto(points[0][0], points[0][1]) - myPen.down() - myPen.goto(points[1][0], points[1][1]) - myPen.goto(points[2][0], points[2][1]) - myPen.goto(points[0][0], points[0][1]) + my_pen.up() + my_pen.goto(points[0][0], points[0][1]) + my_pen.down() + my_pen.goto(points[1][0], points[1][1]) + my_pen.goto(points[2][0], points[2][1]) + my_pen.goto(points[0][0], points[0][1]) if depth > 0: triangle( - [points[0], getMid(points[0], points[1]), getMid(points[0], points[2])], + [points[0], get_mid(points[0], points[1]), get_mid(points[0], points[2])], depth - 1, ) triangle( - [points[1], getMid(points[0], points[1]), getMid(points[1], points[2])], + [points[1], get_mid(points[0], points[1]), get_mid(points[1], points[2])], depth - 1, ) triangle( - [points[2], getMid(points[2], points[1]), getMid(points[0], points[2])], + [points[2], get_mid(points[2], points[1]), get_mid(points[0], points[2])], depth - 1, ) @@ -69,8 +69,8 @@ def triangle(points, depth): "right format for using this script: " "$python fractals.py " ) - myPen = turtle.Turtle() - myPen.ht() - myPen.speed(5) - myPen.pencolor("red") + my_pen = turtle.Turtle() + my_pen.ht() + my_pen.speed(5) + my_pen.pencolor("red") triangle(points, int(sys.argv[1])) diff --git a/geodesy/haversine_distance.py b/geodesy/haversine_distance.py index de8ac7f88302..b601d2fd1983 100644 --- a/geodesy/haversine_distance.py +++ b/geodesy/haversine_distance.py @@ -30,9 +30,9 @@ def haversine_distance(lat1: float, lon1: float, lat2: float, lon2: float) -> fl """ # CONSTANTS per WGS84 https://en.wikipedia.org/wiki/World_Geodetic_System # Distance in metres(m) - AXIS_A = 6378137.0 - AXIS_B = 6356752.314245 - RADIUS = 6378137 + AXIS_A = 6378137.0 # noqa: N806 + AXIS_B = 6356752.314245 # noqa: N806 + RADIUS = 6378137 # noqa: N806 # Equation parameters # Equation https://en.wikipedia.org/wiki/Haversine_formula#Formulation flattening = (AXIS_A - AXIS_B) / AXIS_A diff --git a/geodesy/lamberts_ellipsoidal_distance.py b/geodesy/lamberts_ellipsoidal_distance.py index bf8f1b9a5080..d36d399538de 100644 --- a/geodesy/lamberts_ellipsoidal_distance.py +++ b/geodesy/lamberts_ellipsoidal_distance.py @@ -45,9 +45,9 @@ def lamberts_ellipsoidal_distance( # CONSTANTS per WGS84 https://en.wikipedia.org/wiki/World_Geodetic_System # Distance in metres(m) - AXIS_A = 6378137.0 - AXIS_B = 6356752.314245 - EQUATORIAL_RADIUS = 6378137 + AXIS_A = 6378137.0 # noqa: N806 + AXIS_B = 6356752.314245 # noqa: N806 + EQUATORIAL_RADIUS = 6378137 # noqa: N806 # Equation Parameters # https://en.wikipedia.org/wiki/Geographical_distance#Lambert's_formula_for_long_lines @@ -62,22 +62,22 @@ def lamberts_ellipsoidal_distance( sigma = haversine_distance(lat1, lon1, lat2, lon2) / EQUATORIAL_RADIUS # Intermediate P and Q values - P_value = (b_lat1 + b_lat2) / 2 - Q_value = (b_lat2 - b_lat1) / 2 + p_value = (b_lat1 + b_lat2) / 2 + q_value = (b_lat2 - b_lat1) / 2 # Intermediate X value # X = (sigma - sin(sigma)) * sin^2Pcos^2Q / cos^2(sigma/2) - X_numerator = (sin(P_value) ** 2) * (cos(Q_value) ** 2) - X_demonimator = cos(sigma / 2) ** 2 - X_value = (sigma - sin(sigma)) * (X_numerator / X_demonimator) + x_numerator = (sin(p_value) ** 2) * (cos(q_value) ** 2) + x_demonimator = cos(sigma / 2) ** 2 + x_value = (sigma - sin(sigma)) * (x_numerator / x_demonimator) # Intermediate Y value # Y = (sigma + sin(sigma)) * cos^2Psin^2Q / sin^2(sigma/2) - Y_numerator = (cos(P_value) ** 2) * (sin(Q_value) ** 2) - Y_denominator = sin(sigma / 2) ** 2 - Y_value = (sigma + sin(sigma)) * (Y_numerator / Y_denominator) + y_numerator = (cos(p_value) ** 2) * (sin(q_value) ** 2) + y_denominator = sin(sigma / 2) ** 2 + y_value = (sigma + sin(sigma)) * (y_numerator / y_denominator) - return EQUATORIAL_RADIUS * (sigma - ((flattening / 2) * (X_value + Y_value))) + return EQUATORIAL_RADIUS * (sigma - ((flattening / 2) * (x_value + y_value))) if __name__ == "__main__": diff --git a/graphs/articulation_points.py b/graphs/articulation_points.py index 7197369de090..d28045282425 100644 --- a/graphs/articulation_points.py +++ b/graphs/articulation_points.py @@ -1,14 +1,14 @@ # Finding Articulation Points in Undirected Graph -def computeAP(l): # noqa: E741 +def compute_ap(l): # noqa: E741 n = len(l) - outEdgeCount = 0 + out_edge_count = 0 low = [0] * n visited = [False] * n - isArt = [False] * n + is_art = [False] * n - def dfs(root, at, parent, outEdgeCount): + def dfs(root, at, parent, out_edge_count): if parent == root: - outEdgeCount += 1 + out_edge_count += 1 visited[at] = True low[at] = at @@ -16,27 +16,27 @@ def dfs(root, at, parent, outEdgeCount): if to == parent: pass elif not visited[to]: - outEdgeCount = dfs(root, to, at, outEdgeCount) + out_edge_count = dfs(root, to, at, out_edge_count) low[at] = min(low[at], low[to]) # AP found via bridge if at < low[to]: - isArt[at] = True + is_art[at] = True # AP found via cycle if at == low[to]: - isArt[at] = True + is_art[at] = True else: low[at] = min(low[at], to) - return outEdgeCount + return out_edge_count for i in range(n): if not visited[i]: - outEdgeCount = 0 - outEdgeCount = dfs(i, i, -1, outEdgeCount) - isArt[i] = outEdgeCount > 1 + out_edge_count = 0 + out_edge_count = dfs(i, i, -1, out_edge_count) + is_art[i] = out_edge_count > 1 - for x in range(len(isArt)): - if isArt[x] is True: + for x in range(len(is_art)): + if is_art[x] is True: print(x) @@ -52,4 +52,4 @@ def dfs(root, at, parent, outEdgeCount): 7: [6, 8], 8: [5, 7], } -computeAP(data) +compute_ap(data) diff --git a/graphs/basic_graphs.py b/graphs/basic_graphs.py index db0ef8e7b3ac..b02e9af65846 100644 --- a/graphs/basic_graphs.py +++ b/graphs/basic_graphs.py @@ -76,20 +76,20 @@ def initialize_weighted_undirected_graph( """ -def dfs(G, s): - vis, S = {s}, [s] +def dfs(g, s): + vis, _s = {s}, [s] print(s) - while S: + while _s: flag = 0 - for i in G[S[-1]]: + for i in g[_s[-1]]: if i not in vis: - S.append(i) + _s.append(i) vis.add(i) flag = 1 print(i) break if not flag: - S.pop() + _s.pop() """ @@ -103,15 +103,15 @@ def dfs(G, s): """ -def bfs(G, s): - vis, Q = {s}, deque([s]) +def bfs(g, s): + vis, q = {s}, deque([s]) print(s) - while Q: - u = Q.popleft() - for v in G[u]: + while q: + u = q.popleft() + for v in g[u]: if v not in vis: vis.add(v) - Q.append(v) + q.append(v) print(v) @@ -127,10 +127,10 @@ def bfs(G, s): """ -def dijk(G, s): +def dijk(g, s): dist, known, path = {s: 0}, set(), {s: 0} while True: - if len(known) == len(G) - 1: + if len(known) == len(g) - 1: break mini = 100000 for i in dist: @@ -138,7 +138,7 @@ def dijk(G, s): mini = dist[i] u = i known.add(u) - for v in G[u]: + for v in g[u]: if v[0] not in known: if dist[u] + v[1] < dist.get(v[0], 100000): dist[v[0]] = dist[u] + v[1] @@ -155,27 +155,27 @@ def dijk(G, s): """ -def topo(G, ind=None, Q=None): - if Q is None: - Q = [1] +def topo(g, ind=None, q=None): + if q is None: + q = [1] if ind is None: - ind = [0] * (len(G) + 1) # SInce oth Index is ignored - for u in G: - for v in G[u]: + ind = [0] * (len(g) + 1) # SInce oth Index is ignored + for u in g: + for v in g[u]: ind[v] += 1 - Q = deque() - for i in G: + q = deque() + for i in g: if ind[i] == 0: - Q.append(i) - if len(Q) == 0: + q.append(i) + if len(q) == 0: return - v = Q.popleft() + v = q.popleft() print(v) - for w in G[v]: + for w in g[v]: ind[w] -= 1 if ind[w] == 0: - Q.append(w) - topo(G, ind, Q) + q.append(w) + topo(g, ind, q) """ @@ -206,9 +206,9 @@ def adjm(): """ -def floy(A_and_n): - (A, n) = A_and_n - dist = list(A) +def floy(a_and_n): + (a, n) = a_and_n + dist = list(a) path = [[0] * n for i in range(n)] for k in range(n): for i in range(n): @@ -231,10 +231,10 @@ def floy(A_and_n): """ -def prim(G, s): +def prim(g, s): dist, known, path = {s: 0}, set(), {s: 0} while True: - if len(known) == len(G) - 1: + if len(known) == len(g) - 1: break mini = 100000 for i in dist: @@ -242,7 +242,7 @@ def prim(G, s): mini = dist[i] u = i known.add(u) - for v in G[u]: + for v in g[u]: if v[0] not in known: if v[1] < dist.get(v[0], 100000): dist[v[0]] = v[1] @@ -279,16 +279,16 @@ def edglist(): """ -def krusk(E_and_n): +def krusk(e_and_n): # Sort edges on the basis of distance - (E, n) = E_and_n - E.sort(reverse=True, key=lambda x: x[2]) + (e, n) = e_and_n + e.sort(reverse=True, key=lambda x: x[2]) s = [{i} for i in range(1, n + 1)] while True: if len(s) == 1: break print(s) - x = E.pop() + x = e.pop() for i in range(len(s)): if x[0] in s[i]: break diff --git a/graphs/check_bipartite_graph_bfs.py b/graphs/check_bipartite_graph_bfs.py index b5203b4c5c7d..552b7eee283d 100644 --- a/graphs/check_bipartite_graph_bfs.py +++ b/graphs/check_bipartite_graph_bfs.py @@ -9,7 +9,7 @@ from queue import Queue -def checkBipartite(graph): +def check_bipartite(graph): queue = Queue() visited = [False] * len(graph) color = [-1] * len(graph) @@ -45,4 +45,4 @@ def bfs(): if __name__ == "__main__": # Adjacency List of graph - print(checkBipartite({0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2]})) + print(check_bipartite({0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2]})) diff --git a/graphs/dijkstra.py b/graphs/dijkstra.py index d15fcbbfeef0..62c60f2c6be6 100644 --- a/graphs/dijkstra.py +++ b/graphs/dijkstra.py @@ -103,14 +103,14 @@ def dijkstra(graph, start, end): "G": [["F", 1]], } -shortDistance = dijkstra(G, "E", "C") -print(shortDistance) # E -- 3 --> F -- 3 --> C == 6 +short_distance = dijkstra(G, "E", "C") +print(short_distance) # E -- 3 --> F -- 3 --> C == 6 -shortDistance = dijkstra(G2, "E", "F") -print(shortDistance) # E -- 3 --> F == 3 +short_distance = dijkstra(G2, "E", "F") +print(short_distance) # E -- 3 --> F == 3 -shortDistance = dijkstra(G3, "E", "F") -print(shortDistance) # E -- 2 --> G -- 1 --> F == 3 +short_distance = dijkstra(G3, "E", "F") +print(short_distance) # E -- 2 --> G -- 1 --> F == 3 if __name__ == "__main__": import doctest diff --git a/graphs/dijkstra_2.py b/graphs/dijkstra_2.py index 762884136e4a..3170765bc8a8 100644 --- a/graphs/dijkstra_2.py +++ b/graphs/dijkstra_2.py @@ -1,6 +1,6 @@ -def printDist(dist, V): +def print_dist(dist, v): print("\nVertex Distance") - for i in range(V): + for i in range(v): if dist[i] != float("inf"): print(i, "\t", int(dist[i]), end="\t") else: @@ -8,26 +8,26 @@ def printDist(dist, V): print() -def minDist(mdist, vset, V): - minVal = float("inf") - minInd = -1 - for i in range(V): - if (not vset[i]) and mdist[i] < minVal: - minInd = i - minVal = mdist[i] - return minInd +def min_dist(mdist, vset, v): + min_val = float("inf") + min_ind = -1 + for i in range(v): + if (not vset[i]) and mdist[i] < min_val: + min_ind = i + min_val = mdist[i] + return min_ind -def Dijkstra(graph, V, src): - mdist = [float("inf") for i in range(V)] - vset = [False for i in range(V)] +def dijkstra(graph, v, src): + mdist = [float("inf") for i in range(v)] + vset = [False for i in range(v)] mdist[src] = 0.0 - for i in range(V - 1): - u = minDist(mdist, vset, V) + for i in range(v - 1): + u = min_dist(mdist, vset, v) vset[u] = True - for v in range(V): + for v in range(v): if ( (not vset[v]) and graph[u][v] != float("inf") @@ -35,7 +35,7 @@ def Dijkstra(graph, V, src): ): mdist[v] = mdist[u] + graph[u][v] - printDist(mdist, V) + print_dist(mdist, v) if __name__ == "__main__": @@ -55,4 +55,4 @@ def Dijkstra(graph, V, src): graph[src][dst] = weight gsrc = int(input("\nEnter shortest path source:").strip()) - Dijkstra(graph, V, gsrc) + dijkstra(graph, V, gsrc) diff --git a/graphs/dijkstra_algorithm.py b/graphs/dijkstra_algorithm.py index 6b64834acd81..122821a376ed 100644 --- a/graphs/dijkstra_algorithm.py +++ b/graphs/dijkstra_algorithm.py @@ -15,7 +15,7 @@ def __init__(self): self.array = [] self.pos = {} # To store the pos of node in array - def isEmpty(self): + def is_empty(self): return self.cur_size == 0 def min_heapify(self, idx): @@ -110,24 +110,24 @@ def dijkstra(self, src): self.par = [-1] * self.num_nodes # src is the source node self.dist[src] = 0 - Q = PriorityQueue() - Q.insert((0, src)) # (dist from src, node) + q = PriorityQueue() + q.insert((0, src)) # (dist from src, node) for u in self.adjList.keys(): if u != src: self.dist[u] = sys.maxsize # Infinity self.par[u] = -1 - while not Q.isEmpty(): - u = Q.extract_min() # Returns node with the min dist from source + while not q.is_empty(): + u = q.extract_min() # Returns node with the min dist from source # Update the distance of all the neighbours of u and # if their prev dist was INFINITY then push them in Q for v, w in self.adjList[u]: new_dist = self.dist[u] + w if self.dist[v] > new_dist: if self.dist[v] == sys.maxsize: - Q.insert((new_dist, v)) + q.insert((new_dist, v)) else: - Q.decrease_key((self.dist[v], v), new_dist) + q.decrease_key((self.dist[v], v), new_dist) self.dist[v] = new_dist self.par[v] = u diff --git a/graphs/edmonds_karp_multiple_source_and_sink.py b/graphs/edmonds_karp_multiple_source_and_sink.py index 0f359ff1aea3..070d758e63b6 100644 --- a/graphs/edmonds_karp_multiple_source_and_sink.py +++ b/graphs/edmonds_karp_multiple_source_and_sink.py @@ -1,15 +1,15 @@ class FlowNetwork: def __init__(self, graph, sources, sinks): - self.sourceIndex = None - self.sinkIndex = None + self.source_index = None + self.sink_index = None self.graph = graph - self._normalizeGraph(sources, sinks) - self.verticesCount = len(graph) - self.maximumFlowAlgorithm = None + self._normalize_graph(sources, sinks) + self.vertices_count = len(graph) + self.maximum_flow_algorithm = None # make only one source and one sink - def _normalizeGraph(self, sources, sinks): + def _normalize_graph(self, sources, sinks): if sources is int: sources = [sources] if sinks is int: @@ -18,54 +18,54 @@ def _normalizeGraph(self, sources, sinks): if len(sources) == 0 or len(sinks) == 0: return - self.sourceIndex = sources[0] - self.sinkIndex = sinks[0] + self.source_index = sources[0] + self.sink_index = sinks[0] # make fake vertex if there are more # than one source or sink if len(sources) > 1 or len(sinks) > 1: - maxInputFlow = 0 + max_input_flow = 0 for i in sources: - maxInputFlow += sum(self.graph[i]) + max_input_flow += sum(self.graph[i]) size = len(self.graph) + 1 for room in self.graph: room.insert(0, 0) self.graph.insert(0, [0] * size) for i in sources: - self.graph[0][i + 1] = maxInputFlow - self.sourceIndex = 0 + self.graph[0][i + 1] = max_input_flow + self.source_index = 0 size = len(self.graph) + 1 for room in self.graph: room.append(0) self.graph.append([0] * size) for i in sinks: - self.graph[i + 1][size - 1] = maxInputFlow - self.sinkIndex = size - 1 + self.graph[i + 1][size - 1] = max_input_flow + self.sink_index = size - 1 - def findMaximumFlow(self): - if self.maximumFlowAlgorithm is None: + def find_maximum_flow(self): + if self.maximum_flow_algorithm is None: raise Exception("You need to set maximum flow algorithm before.") - if self.sourceIndex is None or self.sinkIndex is None: + if self.source_index is None or self.sink_index is None: return 0 - self.maximumFlowAlgorithm.execute() - return self.maximumFlowAlgorithm.getMaximumFlow() + self.maximum_flow_algorithm.execute() + return self.maximum_flow_algorithm.getMaximumFlow() - def setMaximumFlowAlgorithm(self, Algorithm): - self.maximumFlowAlgorithm = Algorithm(self) + def set_maximum_flow_algorithm(self, algorithm): + self.maximum_flow_algorithm = algorithm(self) class FlowNetworkAlgorithmExecutor: - def __init__(self, flowNetwork): - self.flowNetwork = flowNetwork - self.verticesCount = flowNetwork.verticesCount - self.sourceIndex = flowNetwork.sourceIndex - self.sinkIndex = flowNetwork.sinkIndex + def __init__(self, flow_network): + self.flow_network = flow_network + self.verticies_count = flow_network.verticesCount + self.source_index = flow_network.sourceIndex + self.sink_index = flow_network.sinkIndex # it's just a reference, so you shouldn't change # it in your algorithms, use deep copy before doing that - self.graph = flowNetwork.graph + self.graph = flow_network.graph self.executed = False def execute(self): @@ -79,95 +79,96 @@ def _algorithm(self): class MaximumFlowAlgorithmExecutor(FlowNetworkAlgorithmExecutor): - def __init__(self, flowNetwork): - super().__init__(flowNetwork) + def __init__(self, flow_network): + super().__init__(flow_network) # use this to save your result - self.maximumFlow = -1 + self.maximum_flow = -1 - def getMaximumFlow(self): + def get_maximum_flow(self): if not self.executed: raise Exception("You should execute algorithm before using its result!") - return self.maximumFlow + return self.maximum_flow class PushRelabelExecutor(MaximumFlowAlgorithmExecutor): - def __init__(self, flowNetwork): - super().__init__(flowNetwork) + def __init__(self, flow_network): + super().__init__(flow_network) - self.preflow = [[0] * self.verticesCount for i in range(self.verticesCount)] + self.preflow = [[0] * self.verticies_count for i in range(self.verticies_count)] - self.heights = [0] * self.verticesCount - self.excesses = [0] * self.verticesCount + self.heights = [0] * self.verticies_count + self.excesses = [0] * self.verticies_count def _algorithm(self): - self.heights[self.sourceIndex] = self.verticesCount + self.heights[self.source_index] = self.verticies_count # push some substance to graph - for nextVertexIndex, bandwidth in enumerate(self.graph[self.sourceIndex]): - self.preflow[self.sourceIndex][nextVertexIndex] += bandwidth - self.preflow[nextVertexIndex][self.sourceIndex] -= bandwidth - self.excesses[nextVertexIndex] += bandwidth + for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index]): + self.preflow[self.source_index][nextvertex_index] += bandwidth + self.preflow[nextvertex_index][self.source_index] -= bandwidth + self.excesses[nextvertex_index] += bandwidth # Relabel-to-front selection rule - verticesList = [ + vertices_list = [ i - for i in range(self.verticesCount) - if i != self.sourceIndex and i != self.sinkIndex + for i in range(self.verticies_count) + if i != self.source_index and i != self.sink_index ] # move through list i = 0 - while i < len(verticesList): - vertexIndex = verticesList[i] - previousHeight = self.heights[vertexIndex] - self.processVertex(vertexIndex) - if self.heights[vertexIndex] > previousHeight: + while i < len(vertices_list): + vertex_index = vertices_list[i] + previous_height = self.heights[vertex_index] + self.process_vertex(vertex_index) + if self.heights[vertex_index] > previous_height: # if it was relabeled, swap elements # and start from 0 index - verticesList.insert(0, verticesList.pop(i)) + vertices_list.insert(0, vertices_list.pop(i)) i = 0 else: i += 1 - self.maximumFlow = sum(self.preflow[self.sourceIndex]) + self.maximum_flow = sum(self.preflow[self.source_index]) - def processVertex(self, vertexIndex): - while self.excesses[vertexIndex] > 0: - for neighbourIndex in range(self.verticesCount): + def process_vertex(self, vertex_index): + while self.excesses[vertex_index] > 0: + for neighbour_index in range(self.verticies_count): # if it's neighbour and current vertex is higher if ( - self.graph[vertexIndex][neighbourIndex] - - self.preflow[vertexIndex][neighbourIndex] + self.graph[vertex_index][neighbour_index] + - self.preflow[vertex_index][neighbour_index] > 0 - and self.heights[vertexIndex] > self.heights[neighbourIndex] + and self.heights[vertex_index] > self.heights[neighbour_index] ): - self.push(vertexIndex, neighbourIndex) + self.push(vertex_index, neighbour_index) - self.relabel(vertexIndex) + self.relabel(vertex_index) - def push(self, fromIndex, toIndex): - preflowDelta = min( - self.excesses[fromIndex], - self.graph[fromIndex][toIndex] - self.preflow[fromIndex][toIndex], + def push(self, from_index, to_index): + preflow_delta = min( + self.excesses[from_index], + self.graph[from_index][to_index] - self.preflow[from_index][to_index], ) - self.preflow[fromIndex][toIndex] += preflowDelta - self.preflow[toIndex][fromIndex] -= preflowDelta - self.excesses[fromIndex] -= preflowDelta - self.excesses[toIndex] += preflowDelta - - def relabel(self, vertexIndex): - minHeight = None - for toIndex in range(self.verticesCount): + self.preflow[from_index][to_index] += preflow_delta + self.preflow[to_index][from_index] -= preflow_delta + self.excesses[from_index] -= preflow_delta + self.excesses[to_index] += preflow_delta + + def relabel(self, vertex_index): + min_height = None + for to_index in range(self.verticies_count): if ( - self.graph[vertexIndex][toIndex] - self.preflow[vertexIndex][toIndex] + self.graph[vertex_index][to_index] + - self.preflow[vertex_index][to_index] > 0 ): - if minHeight is None or self.heights[toIndex] < minHeight: - minHeight = self.heights[toIndex] + if min_height is None or self.heights[to_index] < min_height: + min_height = self.heights[to_index] - if minHeight is not None: - self.heights[vertexIndex] = minHeight + 1 + if min_height is not None: + self.heights[vertex_index] = min_height + 1 if __name__ == "__main__": @@ -184,10 +185,10 @@ def relabel(self, vertexIndex): graph = [[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]] # prepare our network - flowNetwork = FlowNetwork(graph, entrances, exits) + flow_network = FlowNetwork(graph, entrances, exits) # set algorithm - flowNetwork.setMaximumFlowAlgorithm(PushRelabelExecutor) + flow_network.set_maximum_flow_algorithm(PushRelabelExecutor) # and calculate - maximumFlow = flowNetwork.findMaximumFlow() + maximum_flow = flow_network.find_maximum_flow() - print(f"maximum flow is {maximumFlow}") + print(f"maximum flow is {maximum_flow}") diff --git a/graphs/eulerian_path_and_circuit_for_undirected_graph.py b/graphs/eulerian_path_and_circuit_for_undirected_graph.py index fa4f73abd86f..6c43c5d3e6e3 100644 --- a/graphs/eulerian_path_and_circuit_for_undirected_graph.py +++ b/graphs/eulerian_path_and_circuit_for_undirected_graph.py @@ -50,21 +50,21 @@ def check_euler(graph, max_node): def main(): - G1 = {1: [2, 3, 4], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [4]} - G2 = {1: [2, 3, 4, 5], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [1, 4]} - G3 = {1: [2, 3, 4], 2: [1, 3, 4], 3: [1, 2], 4: [1, 2, 5], 5: [4]} - G4 = {1: [2, 3], 2: [1, 3], 3: [1, 2]} - G5 = { + g1 = {1: [2, 3, 4], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [4]} + g2 = {1: [2, 3, 4, 5], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [1, 4]} + g3 = {1: [2, 3, 4], 2: [1, 3, 4], 3: [1, 2], 4: [1, 2, 5], 5: [4]} + g4 = {1: [2, 3], 2: [1, 3], 3: [1, 2]} + g5 = { 1: [], 2: [] # all degree is zero } max_node = 10 - check_euler(G1, max_node) - check_euler(G2, max_node) - check_euler(G3, max_node) - check_euler(G4, max_node) - check_euler(G5, max_node) + check_euler(g1, max_node) + check_euler(g2, max_node) + check_euler(g3, max_node) + check_euler(g4, max_node) + check_euler(g5, max_node) if __name__ == "__main__": diff --git a/graphs/frequent_pattern_graph_miner.py b/graphs/frequent_pattern_graph_miner.py index 548ce3c54ffe..50081afa6728 100644 --- a/graphs/frequent_pattern_graph_miner.py +++ b/graphs/frequent_pattern_graph_miner.py @@ -151,16 +151,16 @@ def create_edge(nodes, graph, cluster, c1): def construct_graph(cluster, nodes): - X = cluster[max(cluster.keys())] + x = cluster[max(cluster.keys())] cluster[max(cluster.keys()) + 1] = "Header" graph = {} - for i in X: + for i in x: if tuple(["Header"]) in graph: - graph[tuple(["Header"])].append(X[i]) + graph[tuple(["Header"])].append(x[i]) else: - graph[tuple(["Header"])] = [X[i]] - for i in X: - graph[tuple(X[i])] = [["Header"]] + graph[tuple(["Header"])] = [x[i]] + for i in x: + graph[tuple(x[i])] = [["Header"]] i = 1 while i < max(cluster) - 1: create_edge(nodes, graph, cluster, i) @@ -168,7 +168,7 @@ def construct_graph(cluster, nodes): return graph -def myDFS(graph, start, end, path=None): +def my_dfs(graph, start, end, path=None): """ find different DFS walk from given node to Header node """ @@ -177,7 +177,7 @@ def myDFS(graph, start, end, path=None): paths.append(path) for node in graph[start]: if tuple(node) not in path: - myDFS(graph, tuple(node), end, path) + my_dfs(graph, tuple(node), end, path) def find_freq_subgraph_given_support(s, cluster, graph): @@ -186,23 +186,23 @@ def find_freq_subgraph_given_support(s, cluster, graph): """ k = int(s / 100 * (len(cluster) - 1)) for i in cluster[k].keys(): - myDFS(graph, tuple(cluster[k][i]), tuple(["Header"])) + my_dfs(graph, tuple(cluster[k][i]), tuple(["Header"])) def freq_subgraphs_edge_list(paths): """ returns Edge list for frequent subgraphs """ - freq_sub_EL = [] + freq_sub_el = [] for edges in paths: - EL = [] + el = [] for j in range(len(edges) - 1): temp = list(edges[j]) for e in temp: edge = (e[0], e[1]) - EL.append(edge) - freq_sub_EL.append(EL) - return freq_sub_EL + el.append(edge) + freq_sub_el.append(el) + return freq_sub_el def preprocess(edge_array): diff --git a/graphs/kahns_algorithm_long.py b/graphs/kahns_algorithm_long.py index fed7517a21e2..776ae3a2f903 100644 --- a/graphs/kahns_algorithm_long.py +++ b/graphs/kahns_algorithm_long.py @@ -1,8 +1,8 @@ # Finding longest distance in Directed Acyclic Graph using KahnsAlgorithm -def longestDistance(graph): +def longest_distance(graph): indegree = [0] * len(graph) queue = [] - longDist = [1] * len(graph) + long_dist = [1] * len(graph) for key, values in graph.items(): for i in values: @@ -17,15 +17,15 @@ def longestDistance(graph): for x in graph[vertex]: indegree[x] -= 1 - if longDist[vertex] + 1 > longDist[x]: - longDist[x] = longDist[vertex] + 1 + if long_dist[vertex] + 1 > long_dist[x]: + long_dist[x] = long_dist[vertex] + 1 if indegree[x] == 0: queue.append(x) - print(max(longDist)) + print(max(long_dist)) # Adjacency list of Graph graph = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []} -longestDistance(graph) +longest_distance(graph) diff --git a/graphs/kahns_algorithm_topo.py b/graphs/kahns_algorithm_topo.py index bf9f90299361..6879b047fe35 100644 --- a/graphs/kahns_algorithm_topo.py +++ b/graphs/kahns_algorithm_topo.py @@ -1,4 +1,4 @@ -def topologicalSort(graph): +def topological_sort(graph): """ Kahn's Algorithm is used to find Topological ordering of Directed Acyclic Graph using BFS @@ -33,4 +33,4 @@ def topologicalSort(graph): # Adjacency List of Graph graph = {0: [1, 2], 1: [3], 2: [3], 3: [4, 5], 4: [], 5: []} -topologicalSort(graph) +topological_sort(graph) diff --git a/graphs/minimum_spanning_tree_prims.py b/graphs/minimum_spanning_tree_prims.py index 16b4286140ec..9b2c645f16df 100644 --- a/graphs/minimum_spanning_tree_prims.py +++ b/graphs/minimum_spanning_tree_prims.py @@ -2,15 +2,15 @@ from collections import defaultdict -def PrimsAlgorithm(l): # noqa: E741 +def prisms_algorithm(l): # noqa: E741 - nodePosition = [] + node_position = [] def get_position(vertex): - return nodePosition[vertex] + return node_position[vertex] def set_position(vertex, pos): - nodePosition[vertex] = pos + node_position[vertex] = pos def top_to_bottom(heap, start, size, positions): if start > size // 2 - 1: @@ -64,44 +64,44 @@ def heapify(heap, positions): for i in range(start, -1, -1): top_to_bottom(heap, i, len(heap), positions) - def deleteMinimum(heap, positions): + def delete_minimum(heap, positions): temp = positions[0] heap[0] = sys.maxsize top_to_bottom(heap, 0, len(heap), positions) return temp visited = [0 for i in range(len(l))] - Nbr_TV = [-1 for i in range(len(l))] # Neighboring Tree Vertex of selected vertex + nbr_tv = [-1 for i in range(len(l))] # Neighboring Tree Vertex of selected vertex # Minimum Distance of explored vertex with neighboring vertex of partial tree # formed in graph - Distance_TV = [] # Heap of Distance of vertices from their neighboring vertex - Positions = [] + distance_tv = [] # Heap of Distance of vertices from their neighboring vertex + positions = [] for x in range(len(l)): p = sys.maxsize - Distance_TV.append(p) - Positions.append(x) - nodePosition.append(x) + distance_tv.append(p) + positions.append(x) + node_position.append(x) - TreeEdges = [] + tree_edges = [] visited[0] = 1 - Distance_TV[0] = sys.maxsize + distance_tv[0] = sys.maxsize for x in l[0]: - Nbr_TV[x[0]] = 0 - Distance_TV[x[0]] = x[1] - heapify(Distance_TV, Positions) + nbr_tv[x[0]] = 0 + distance_tv[x[0]] = x[1] + heapify(distance_tv, positions) for i in range(1, len(l)): - vertex = deleteMinimum(Distance_TV, Positions) + vertex = delete_minimum(distance_tv, positions) if visited[vertex] == 0: - TreeEdges.append((Nbr_TV[vertex], vertex)) + tree_edges.append((nbr_tv[vertex], vertex)) visited[vertex] = 1 for v in l[vertex]: - if visited[v[0]] == 0 and v[1] < Distance_TV[get_position(v[0])]: - Distance_TV[get_position(v[0])] = v[1] - bottom_to_top(v[1], get_position(v[0]), Distance_TV, Positions) - Nbr_TV[v[0]] = vertex - return TreeEdges + if visited[v[0]] == 0 and v[1] < distance_tv[get_position(v[0])]: + distance_tv[get_position(v[0])] = v[1] + bottom_to_top(v[1], get_position(v[0]), distance_tv, positions) + nbr_tv[v[0]] = vertex + return tree_edges if __name__ == "__main__": # pragma: no cover @@ -113,4 +113,4 @@ def deleteMinimum(heap, positions): l = [int(x) for x in input().strip().split()] # noqa: E741 adjlist[l[0]].append([l[1], l[2]]) adjlist[l[1]].append([l[0], l[2]]) - print(PrimsAlgorithm(adjlist)) + print(prisms_algorithm(adjlist)) diff --git a/graphs/multi_heuristic_astar.py b/graphs/multi_heuristic_astar.py index 8607f51d8f52..e16a983932d0 100644 --- a/graphs/multi_heuristic_astar.py +++ b/graphs/multi_heuristic_astar.py @@ -55,21 +55,21 @@ def get(self): return (priority, item) -def consistent_heuristic(P: TPos, goal: TPos): +def consistent_heuristic(p: TPos, goal: TPos): # euclidean distance - a = np.array(P) + a = np.array(p) b = np.array(goal) return np.linalg.norm(a - b) -def heuristic_2(P: TPos, goal: TPos): +def heuristic_2(p: TPos, goal: TPos): # integer division by time variable - return consistent_heuristic(P, goal) // t + return consistent_heuristic(p, goal) // t -def heuristic_1(P: TPos, goal: TPos): +def heuristic_1(p: TPos, goal: TPos): # manhattan distance - return abs(P[0] - goal[0]) + abs(P[1] - goal[1]) + return abs(p[0] - goal[0]) + abs(p[1] - goal[1]) def key(start: TPos, i: int, goal: TPos, g_function: dict[TPos, float]): diff --git a/graphs/scc_kosaraju.py b/graphs/scc_kosaraju.py index fa182aa2faf1..ea9d35282858 100644 --- a/graphs/scc_kosaraju.py +++ b/graphs/scc_kosaraju.py @@ -2,7 +2,7 @@ def dfs(u): - global graph, reversedGraph, scc, component, visit, stack + global graph, reversed_graph, scc, component, visit, stack if visit[u]: return visit[u] = True @@ -12,17 +12,17 @@ def dfs(u): def dfs2(u): - global graph, reversedGraph, scc, component, visit, stack + global graph, reversed_graph, scc, component, visit, stack if visit[u]: return visit[u] = True component.append(u) - for v in reversedGraph[u]: + for v in reversed_graph[u]: dfs2(v) def kosaraju(): - global graph, reversedGraph, scc, component, visit, stack + global graph, reversed_graph, scc, component, visit, stack for i in range(n): dfs(i) visit = [False] * n @@ -40,12 +40,12 @@ def kosaraju(): n, m = list(map(int, input().strip().split())) graph: list[list[int]] = [[] for i in range(n)] # graph - reversedGraph: list[list[int]] = [[] for i in range(n)] # reversed graph + reversed_graph: list[list[int]] = [[] for i in range(n)] # reversed graph # input graph data (edges) for i in range(m): u, v = list(map(int, input().strip().split())) graph[u].append(v) - reversedGraph[v].append(u) + reversed_graph[v].append(u) stack: list[int] = [] visit: list[bool] = [False] * n diff --git a/graphs/tests/test_min_spanning_tree_prim.py b/graphs/tests/test_min_spanning_tree_prim.py index 048fbf595fa6..91feab28fc81 100644 --- a/graphs/tests/test_min_spanning_tree_prim.py +++ b/graphs/tests/test_min_spanning_tree_prim.py @@ -1,6 +1,6 @@ from collections import defaultdict -from graphs.minimum_spanning_tree_prims import PrimsAlgorithm as mst +from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst def test_prim_successful_result(): diff --git a/hashes/adler32.py b/hashes/adler32.py index 4a61b97e3590..80229f04620a 100644 --- a/hashes/adler32.py +++ b/hashes/adler32.py @@ -20,7 +20,7 @@ def adler32(plain_text: str) -> int: >>> adler32('go adler em all') 708642122 """ - MOD_ADLER = 65521 + MOD_ADLER = 65521 # noqa: N806 a = 1 b = 0 for plain_chr in plain_text: diff --git a/hashes/chaos_machine.py b/hashes/chaos_machine.py index a6d476eb7320..69313fbb2065 100644 --- a/hashes/chaos_machine.py +++ b/hashes/chaos_machine.py @@ -43,11 +43,11 @@ def pull(): global buffer_space, params_space, machine_time, K, m, t # PRNG (Xorshift by George Marsaglia) - def xorshift(X, Y): - X ^= Y >> 13 - Y ^= X << 17 - X ^= Y >> 5 - return X + def xorshift(x, y): + x ^= y >> 13 + y ^= x << 17 + x ^= y >> 5 + return x # Choosing Dynamical Systems (Increment) key = machine_time % m @@ -63,13 +63,13 @@ def xorshift(X, Y): params_space[key] = (machine_time * 0.01 + r * 1.01) % 1 + 3 # Choosing Chaotic Data - X = int(buffer_space[(key + 2) % m] * (10**10)) - Y = int(buffer_space[(key - 2) % m] * (10**10)) + x = int(buffer_space[(key + 2) % m] * (10**10)) + y = int(buffer_space[(key - 2) % m] * (10**10)) # Machine Time machine_time += 1 - return xorshift(X, Y) % 0xFFFFFFFF + return xorshift(x, y) % 0xFFFFFFFF def reset(): diff --git a/hashes/hamming_code.py b/hashes/hamming_code.py index ac20fe03b3fb..a62d092a172f 100644 --- a/hashes/hamming_code.py +++ b/hashes/hamming_code.py @@ -68,177 +68,177 @@ def text_from_bits(bits, encoding="utf-8", errors="surrogatepass"): # Functions of hamming code------------------------------------------- -def emitterConverter(sizePar, data): +def emitter_converter(size_par, data): """ - :param sizePar: how many parity bits the message must have + :param size_par: how many parity bits the message must have :param data: information bits :return: message to be transmitted by unreliable medium - bits of information merged with parity bits - >>> emitterConverter(4, "101010111111") + >>> emitter_converter(4, "101010111111") ['1', '1', '1', '1', '0', '1', '0', '0', '1', '0', '1', '1', '1', '1', '1', '1'] """ - if sizePar + len(data) <= 2**sizePar - (len(data) - 1): + if size_par + len(data) <= 2**size_par - (len(data) - 1): print("ERROR - size of parity don't match with size of data") exit(0) - dataOut = [] + data_out = [] parity = [] - binPos = [bin(x)[2:] for x in range(1, sizePar + len(data) + 1)] + bin_pos = [bin(x)[2:] for x in range(1, size_par + len(data) + 1)] # sorted information data for the size of the output data - dataOrd = [] + data_ord = [] # data position template + parity - dataOutGab = [] + data_out_gab = [] # parity bit counter - qtdBP = 0 + qtd_bp = 0 # counter position of data bits - contData = 0 + cont_data = 0 - for x in range(1, sizePar + len(data) + 1): + for x in range(1, size_par + len(data) + 1): # Performs a template of bit positions - who should be given, # and who should be parity - if qtdBP < sizePar: + if qtd_bp < size_par: if (np.log(x) / np.log(2)).is_integer(): - dataOutGab.append("P") - qtdBP = qtdBP + 1 + data_out_gab.append("P") + qtd_bp = qtd_bp + 1 else: - dataOutGab.append("D") + data_out_gab.append("D") else: - dataOutGab.append("D") + data_out_gab.append("D") # Sorts the data to the new output size - if dataOutGab[-1] == "D": - dataOrd.append(data[contData]) - contData += 1 + if data_out_gab[-1] == "D": + data_ord.append(data[cont_data]) + cont_data += 1 else: - dataOrd.append(None) + data_ord.append(None) # Calculates parity - qtdBP = 0 # parity bit counter - for bp in range(1, sizePar + 1): + qtd_bp = 0 # parity bit counter + for bp in range(1, size_par + 1): # Bit counter one for a given parity - contBO = 0 + cont_bo = 0 # counter to control the loop reading - contLoop = 0 - for x in dataOrd: + cont_loop = 0 + for x in data_ord: if x is not None: try: - aux = (binPos[contLoop])[-1 * (bp)] + aux = (bin_pos[cont_loop])[-1 * (bp)] except IndexError: aux = "0" if aux == "1": if x == "1": - contBO += 1 - contLoop += 1 - parity.append(contBO % 2) + cont_bo += 1 + cont_loop += 1 + parity.append(cont_bo % 2) - qtdBP += 1 + qtd_bp += 1 # Mount the message - ContBP = 0 # parity bit counter - for x in range(0, sizePar + len(data)): - if dataOrd[x] is None: - dataOut.append(str(parity[ContBP])) - ContBP += 1 + cont_bp = 0 # parity bit counter + for x in range(0, size_par + len(data)): + if data_ord[x] is None: + data_out.append(str(parity[cont_bp])) + cont_bp += 1 else: - dataOut.append(dataOrd[x]) + data_out.append(data_ord[x]) - return dataOut + return data_out -def receptorConverter(sizePar, data): +def receptor_converter(size_par, data): """ - >>> receptorConverter(4, "1111010010111111") + >>> receptor_converter(4, "1111010010111111") (['1', '0', '1', '0', '1', '0', '1', '1', '1', '1', '1', '1'], True) """ # data position template + parity - dataOutGab = [] + data_out_gab = [] # Parity bit counter - qtdBP = 0 + qtd_bp = 0 # Counter p data bit reading - contData = 0 + cont_data = 0 # list of parity received - parityReceived = [] - dataOutput = [] + parity_received = [] + data_output = [] for x in range(1, len(data) + 1): # Performs a template of bit positions - who should be given, # and who should be parity - if qtdBP < sizePar and (np.log(x) / np.log(2)).is_integer(): - dataOutGab.append("P") - qtdBP = qtdBP + 1 + if qtd_bp < size_par and (np.log(x) / np.log(2)).is_integer(): + data_out_gab.append("P") + qtd_bp = qtd_bp + 1 else: - dataOutGab.append("D") + data_out_gab.append("D") # Sorts the data to the new output size - if dataOutGab[-1] == "D": - dataOutput.append(data[contData]) + if data_out_gab[-1] == "D": + data_output.append(data[cont_data]) else: - parityReceived.append(data[contData]) - contData += 1 + parity_received.append(data[cont_data]) + cont_data += 1 # -----------calculates the parity with the data - dataOut = [] + data_out = [] parity = [] - binPos = [bin(x)[2:] for x in range(1, sizePar + len(dataOutput) + 1)] + bin_pos = [bin(x)[2:] for x in range(1, size_par + len(data_output) + 1)] # sorted information data for the size of the output data - dataOrd = [] + data_ord = [] # Data position feedback + parity - dataOutGab = [] + data_out_gab = [] # Parity bit counter - qtdBP = 0 + qtd_bp = 0 # Counter p data bit reading - contData = 0 + cont_data = 0 - for x in range(1, sizePar + len(dataOutput) + 1): + for x in range(1, size_par + len(data_output) + 1): # Performs a template position of bits - who should be given, # and who should be parity - if qtdBP < sizePar and (np.log(x) / np.log(2)).is_integer(): - dataOutGab.append("P") - qtdBP = qtdBP + 1 + if qtd_bp < size_par and (np.log(x) / np.log(2)).is_integer(): + data_out_gab.append("P") + qtd_bp = qtd_bp + 1 else: - dataOutGab.append("D") + data_out_gab.append("D") # Sorts the data to the new output size - if dataOutGab[-1] == "D": - dataOrd.append(dataOutput[contData]) - contData += 1 + if data_out_gab[-1] == "D": + data_ord.append(data_output[cont_data]) + cont_data += 1 else: - dataOrd.append(None) + data_ord.append(None) # Calculates parity - qtdBP = 0 # parity bit counter - for bp in range(1, sizePar + 1): + qtd_bp = 0 # parity bit counter + for bp in range(1, size_par + 1): # Bit counter one for a certain parity - contBO = 0 + cont_bo = 0 # Counter to control loop reading - contLoop = 0 - for x in dataOrd: + cont_loop = 0 + for x in data_ord: if x is not None: try: - aux = (binPos[contLoop])[-1 * (bp)] + aux = (bin_pos[cont_loop])[-1 * (bp)] except IndexError: aux = "0" if aux == "1" and x == "1": - contBO += 1 - contLoop += 1 - parity.append(str(contBO % 2)) + cont_bo += 1 + cont_loop += 1 + parity.append(str(cont_bo % 2)) - qtdBP += 1 + qtd_bp += 1 # Mount the message - ContBP = 0 # Parity bit counter - for x in range(0, sizePar + len(dataOutput)): - if dataOrd[x] is None: - dataOut.append(str(parity[ContBP])) - ContBP += 1 + cont_bp = 0 # Parity bit counter + for x in range(0, size_par + len(data_output)): + if data_ord[x] is None: + data_out.append(str(parity[cont_bp])) + cont_bp += 1 else: - dataOut.append(dataOrd[x]) + data_out.append(data_ord[x]) - ack = parityReceived == parity - return dataOutput, ack + ack = parity_received == parity + return data_output, ack # --------------------------------------------------------------------- diff --git a/hashes/md5.py b/hashes/md5.py index c56c073cc0c7..2020bf2e53bf 100644 --- a/hashes/md5.py +++ b/hashes/md5.py @@ -1,7 +1,7 @@ import math -def rearrange(bitString32): +def rearrange(bit_string_32): """[summary] Regroups the given binary string. @@ -17,21 +17,21 @@ def rearrange(bitString32): 'pqrstuvwhijklmno90abcdfg12345678' """ - if len(bitString32) != 32: + if len(bit_string_32) != 32: raise ValueError("Need length 32") - newString = "" + new_string = "" for i in [3, 2, 1, 0]: - newString += bitString32[8 * i : 8 * i + 8] - return newString + new_string += bit_string_32[8 * i : 8 * i + 8] + return new_string -def reformatHex(i): +def reformat_hex(i): """[summary] Converts the given integer into 8-digit hex number. Arguments: i {[int]} -- [integer] - >>> reformatHex(666) + >>> reformat_hex(666) '9a020000' """ @@ -42,7 +42,7 @@ def reformatHex(i): return thing -def pad(bitString): +def pad(bit_string): """[summary] Fills up the binary string to a 512 bit binary string @@ -52,33 +52,33 @@ def pad(bitString): Returns: [string] -- [binary string] """ - startLength = len(bitString) - bitString += "1" - while len(bitString) % 512 != 448: - bitString += "0" - lastPart = format(startLength, "064b") - bitString += rearrange(lastPart[32:]) + rearrange(lastPart[:32]) - return bitString + start_length = len(bit_string) + bit_string += "1" + while len(bit_string) % 512 != 448: + bit_string += "0" + last_part = format(start_length, "064b") + bit_string += rearrange(last_part[32:]) + rearrange(last_part[:32]) + return bit_string -def getBlock(bitString): +def get_block(bit_string): """[summary] Iterator: Returns by each call a list of length 16 with the 32 bit integer blocks. Arguments: - bitString {[string]} -- [binary string >= 512] + bit_string {[string]} -- [binary string >= 512] """ - currPos = 0 - while currPos < len(bitString): - currPart = bitString[currPos : currPos + 512] - mySplits = [] + curr_pos = 0 + while curr_pos < len(bit_string): + curr_part = bit_string[curr_pos : curr_pos + 512] + my_splits = [] for i in range(16): - mySplits.append(int(rearrange(currPart[32 * i : 32 * i + 32]), 2)) - yield mySplits - currPos += 512 + my_splits.append(int(rearrange(curr_part[32 * i : 32 * i + 32]), 2)) + yield my_splits + curr_pos += 512 def not32(i): @@ -101,7 +101,7 @@ def leftrot32(i, s): return (i << s) ^ (i >> (32 - s)) -def md5me(testString): +def md5me(test_string): """[summary] Returns a 32-bit hash code of the string 'testString' @@ -110,7 +110,7 @@ def md5me(testString): """ bs = "" - for i in testString: + for i in test_string: bs += format(ord(i), "08b") bs = pad(bs) @@ -188,37 +188,37 @@ def md5me(testString): 21, ] - for m in getBlock(bs): - A = a0 - B = b0 - C = c0 - D = d0 + for m in get_block(bs): + a = a0 + b = b0 + c = c0 + d = d0 for i in range(64): if i <= 15: # f = (B & C) | (not32(B) & D) - f = D ^ (B & (C ^ D)) + f = d ^ (b & (c ^ d)) g = i elif i <= 31: # f = (D & B) | (not32(D) & C) - f = C ^ (D & (B ^ C)) + f = c ^ (d & (b ^ c)) g = (5 * i + 1) % 16 elif i <= 47: - f = B ^ C ^ D + f = b ^ c ^ d g = (3 * i + 5) % 16 else: - f = C ^ (B | not32(D)) + f = c ^ (b | not32(d)) g = (7 * i) % 16 - dtemp = D - D = C - C = B - B = sum32(B, leftrot32((A + f + tvals[i] + m[g]) % 2**32, s[i])) - A = dtemp - a0 = sum32(a0, A) - b0 = sum32(b0, B) - c0 = sum32(c0, C) - d0 = sum32(d0, D) - - digest = reformatHex(a0) + reformatHex(b0) + reformatHex(c0) + reformatHex(d0) + dtemp = d + d = c + c = b + b = sum32(b, leftrot32((a + f + tvals[i] + m[g]) % 2**32, s[i])) + a = dtemp + a0 = sum32(a0, a) + b0 = sum32(b0, b) + c0 = sum32(c0, c) + d0 = sum32(d0, d) + + digest = reformat_hex(a0) + reformat_hex(b0) + reformat_hex(c0) + reformat_hex(d0) return digest diff --git a/hashes/sha1.py b/hashes/sha1.py index dde1efc557bb..b19e0cfafea3 100644 --- a/hashes/sha1.py +++ b/hashes/sha1.py @@ -133,7 +133,7 @@ class SHA1HashTest(unittest.TestCase): Test class for the SHA1Hash class. Inherits the TestCase class from unittest """ - def testMatchHashes(self): + def testMatchHashes(self): # noqa: N802 msg = bytes("Test String", "utf-8") self.assertEqual(SHA1Hash(msg).final_hash(), hashlib.sha1(msg).hexdigest()) diff --git a/hashes/sha256.py b/hashes/sha256.py index 9d4f250fe353..98f7c096e3b6 100644 --- a/hashes/sha256.py +++ b/hashes/sha256.py @@ -157,14 +157,14 @@ def final_hash(self) -> None: ) % 0x100000000 # Compression - S1 = self.ror(e, 6) ^ self.ror(e, 11) ^ self.ror(e, 25) + s1 = self.ror(e, 6) ^ self.ror(e, 11) ^ self.ror(e, 25) ch = (e & f) ^ ((~e & (0xFFFFFFFF)) & g) temp1 = ( - h + S1 + ch + self.round_constants[index] + words[index] + h + s1 + ch + self.round_constants[index] + words[index] ) % 0x100000000 - S0 = self.ror(a, 2) ^ self.ror(a, 13) ^ self.ror(a, 22) + s0 = self.ror(a, 2) ^ self.ror(a, 13) ^ self.ror(a, 22) maj = (a & b) ^ (a & c) ^ (b & c) - temp2 = (S0 + maj) % 0x100000000 + temp2 = (s0 + maj) % 0x100000000 h, g, f, e, d, c, b, a = ( g, diff --git a/linear_algebra/src/power_iteration.py b/linear_algebra/src/power_iteration.py index 4b866331b8e3..24fbd9a5e002 100644 --- a/linear_algebra/src/power_iteration.py +++ b/linear_algebra/src/power_iteration.py @@ -63,8 +63,8 @@ def power_iteration( vector = w / np.linalg.norm(w) # Find rayleigh quotient # (faster than usual b/c we know vector is normalized already) - vectorH = vector.conj().T if is_complex else vector.T - lambda_ = np.dot(vectorH, np.dot(input_matrix, vector)) + vector_h = vector.conj().T if is_complex else vector.T + lambda_ = np.dot(vector_h, np.dot(input_matrix, vector)) # Check convergence. error = np.abs(lambda_ - lambda_previous) / lambda_ diff --git a/linear_algebra/src/rayleigh_quotient.py b/linear_algebra/src/rayleigh_quotient.py index 78083aa755f1..4773429cbf1b 100644 --- a/linear_algebra/src/rayleigh_quotient.py +++ b/linear_algebra/src/rayleigh_quotient.py @@ -26,7 +26,7 @@ def is_hermitian(matrix: np.ndarray) -> bool: return np.array_equal(matrix, matrix.conjugate().T) -def rayleigh_quotient(A: np.ndarray, v: np.ndarray) -> Any: +def rayleigh_quotient(a: np.ndarray, v: np.ndarray) -> Any: """ Returns the Rayleigh quotient of a Hermitian matrix A and vector v. @@ -45,20 +45,20 @@ def rayleigh_quotient(A: np.ndarray, v: np.ndarray) -> Any: array([[3.]]) """ v_star = v.conjugate().T - v_star_dot = v_star.dot(A) + v_star_dot = v_star.dot(a) assert isinstance(v_star_dot, np.ndarray) return (v_star_dot.dot(v)) / (v_star.dot(v)) def tests() -> None: - A = np.array([[2, 2 + 1j, 4], [2 - 1j, 3, 1j], [4, -1j, 1]]) + a = np.array([[2, 2 + 1j, 4], [2 - 1j, 3, 1j], [4, -1j, 1]]) v = np.array([[1], [2], [3]]) - assert is_hermitian(A), f"{A} is not hermitian." - print(rayleigh_quotient(A, v)) + assert is_hermitian(a), f"{a} is not hermitian." + print(rayleigh_quotient(a, v)) - A = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]]) - assert is_hermitian(A), f"{A} is not hermitian." - assert rayleigh_quotient(A, v) == float(3) + a = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]]) + assert is_hermitian(a), f"{a} is not hermitian." + assert rayleigh_quotient(a, v) == float(3) if __name__ == "__main__": diff --git a/linear_algebra/src/test_linear_algebra.py b/linear_algebra/src/test_linear_algebra.py index 724ceef2599a..97c06cb44e15 100644 --- a/linear_algebra/src/test_linear_algebra.py +++ b/linear_algebra/src/test_linear_algebra.py @@ -85,13 +85,13 @@ def test_mul(self) -> None: self.assertEqual(str(x * 3.0), "(3.0,6.0,9.0)") self.assertEqual((a * b), 0) - def test_zeroVector(self) -> None: + def test_zero_vector(self) -> None: """ test for global function zero_vector() """ self.assertTrue(str(zero_vector(10)).count("0") == 10) - def test_unitBasisVector(self) -> None: + def test_unit_basis_vector(self) -> None: """ test for global function unit_basis_vector() """ @@ -113,7 +113,7 @@ def test_copy(self) -> None: y = x.copy() self.assertEqual(str(x), str(y)) - def test_changeComponent(self) -> None: + def test_change_component(self) -> None: """ test for method change_component() """ @@ -126,77 +126,77 @@ def test_str_matrix(self) -> None: """ test for Matrix method str() """ - A = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]], 3, 3) - self.assertEqual("|1,2,3|\n|2,4,5|\n|6,7,8|\n", str(A)) + a = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]], 3, 3) + self.assertEqual("|1,2,3|\n|2,4,5|\n|6,7,8|\n", str(a)) def test_minor(self) -> None: """ test for Matrix method minor() """ - A = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]], 3, 3) + a = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]], 3, 3) minors = [[-3, -14, -10], [-5, -10, -5], [-2, -1, 0]] - for x in range(A.height()): - for y in range(A.width()): - self.assertEqual(minors[x][y], A.minor(x, y)) + for x in range(a.height()): + for y in range(a.width()): + self.assertEqual(minors[x][y], a.minor(x, y)) def test_cofactor(self) -> None: """ test for Matrix method cofactor() """ - A = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]], 3, 3) + a = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]], 3, 3) cofactors = [[-3, 14, -10], [5, -10, 5], [-2, 1, 0]] - for x in range(A.height()): - for y in range(A.width()): - self.assertEqual(cofactors[x][y], A.cofactor(x, y)) + for x in range(a.height()): + for y in range(a.width()): + self.assertEqual(cofactors[x][y], a.cofactor(x, y)) def test_determinant(self) -> None: """ test for Matrix method determinant() """ - A = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]], 3, 3) - self.assertEqual(-5, A.determinant()) + a = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]], 3, 3) + self.assertEqual(-5, a.determinant()) def test__mul__matrix(self) -> None: """ test for Matrix * operator """ - A = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]], 3, 3) + a = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]], 3, 3) x = Vector([1, 2, 3]) - self.assertEqual("(14,32,50)", str(A * x)) - self.assertEqual("|2,4,6|\n|8,10,12|\n|14,16,18|\n", str(A * 2)) + self.assertEqual("(14,32,50)", str(a * x)) + self.assertEqual("|2,4,6|\n|8,10,12|\n|14,16,18|\n", str(a * 2)) def test_change_component_matrix(self) -> None: """ test for Matrix method change_component() """ - A = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]], 3, 3) - A.change_component(0, 2, 5) - self.assertEqual("|1,2,5|\n|2,4,5|\n|6,7,8|\n", str(A)) + a = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]], 3, 3) + a.change_component(0, 2, 5) + self.assertEqual("|1,2,5|\n|2,4,5|\n|6,7,8|\n", str(a)) def test_component_matrix(self) -> None: """ test for Matrix method component() """ - A = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]], 3, 3) - self.assertEqual(7, A.component(2, 1), 0.01) + a = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]], 3, 3) + self.assertEqual(7, a.component(2, 1), 0.01) def test__add__matrix(self) -> None: """ test for Matrix + operator """ - A = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]], 3, 3) - B = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]], 3, 3) - self.assertEqual("|2,4,10|\n|4,8,10|\n|12,14,18|\n", str(A + B)) + a = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]], 3, 3) + b = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]], 3, 3) + self.assertEqual("|2,4,10|\n|4,8,10|\n|12,14,18|\n", str(a + b)) def test__sub__matrix(self) -> None: """ test for Matrix - operator """ - A = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]], 3, 3) - B = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]], 3, 3) - self.assertEqual("|0,0,-4|\n|0,0,0|\n|0,0,-2|\n", str(A - B)) + a = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]], 3, 3) + b = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]], 3, 3) + self.assertEqual("|0,0,-4|\n|0,0,0|\n|0,0,-2|\n", str(a - b)) - def test_squareZeroMatrix(self) -> None: + def test_square_zero_matrix(self) -> None: """ test for global function square_zero_matrix() """ diff --git a/machine_learning/decision_tree.py b/machine_learning/decision_tree.py index ace6fb0fa883..4a86e5322a27 100644 --- a/machine_learning/decision_tree.py +++ b/machine_learning/decision_tree.py @@ -6,7 +6,7 @@ import numpy as np -class Decision_Tree: +class DecisionTree: def __init__(self, depth=5, min_leaf_size=5): self.depth = depth self.decision_boundary = 0 @@ -22,17 +22,17 @@ def mean_squared_error(self, labels, prediction): @param prediction: a floating point value return value: mean_squared_error calculates the error if prediction is used to estimate the labels - >>> tester = Decision_Tree() + >>> tester = DecisionTree() >>> test_labels = np.array([1,2,3,4,5,6,7,8,9,10]) >>> test_prediction = np.float(6) >>> tester.mean_squared_error(test_labels, test_prediction) == ( - ... Test_Decision_Tree.helper_mean_squared_error_test(test_labels, + ... TestDecisionTree.helper_mean_squared_error_test(test_labels, ... test_prediction)) True >>> test_labels = np.array([1,2,3]) >>> test_prediction = np.float(2) >>> tester.mean_squared_error(test_labels, test_prediction) == ( - ... Test_Decision_Tree.helper_mean_squared_error_test(test_labels, + ... TestDecisionTree.helper_mean_squared_error_test(test_labels, ... test_prediction)) True """ @@ -41,10 +41,10 @@ def mean_squared_error(self, labels, prediction): return np.mean((labels - prediction) ** 2) - def train(self, X, y): + def train(self, x, y): """ train: - @param X: a one dimensional numpy array + @param x: a one dimensional numpy array @param y: a one dimensional numpy array. The contents of y are the labels for the corresponding X values @@ -55,17 +55,17 @@ def train(self, X, y): this section is to check that the inputs conform to our dimensionality constraints """ - if X.ndim != 1: + if x.ndim != 1: print("Error: Input data set must be one dimensional") return - if len(X) != len(y): + if len(x) != len(y): print("Error: X and y have different lengths") return if y.ndim != 1: print("Error: Data set labels must be one dimensional") return - if len(X) < 2 * self.min_leaf_size: + if len(x) < 2 * self.min_leaf_size: self.prediction = np.mean(y) return @@ -74,7 +74,7 @@ def train(self, X, y): return best_split = 0 - min_error = self.mean_squared_error(X, np.mean(y)) * 2 + min_error = self.mean_squared_error(x, np.mean(y)) * 2 """ loop over all possible splits for the decision tree. find the best split. @@ -82,34 +82,34 @@ def train(self, X, y): then the data set is not split and the average for the entire array is used as the predictor """ - for i in range(len(X)): - if len(X[:i]) < self.min_leaf_size: + for i in range(len(x)): + if len(x[:i]) < self.min_leaf_size: continue - elif len(X[i:]) < self.min_leaf_size: + elif len(x[i:]) < self.min_leaf_size: continue else: - error_left = self.mean_squared_error(X[:i], np.mean(y[:i])) - error_right = self.mean_squared_error(X[i:], np.mean(y[i:])) + error_left = self.mean_squared_error(x[:i], np.mean(y[:i])) + error_right = self.mean_squared_error(x[i:], np.mean(y[i:])) error = error_left + error_right if error < min_error: best_split = i min_error = error if best_split != 0: - left_X = X[:best_split] + left_x = x[:best_split] left_y = y[:best_split] - right_X = X[best_split:] + right_x = x[best_split:] right_y = y[best_split:] - self.decision_boundary = X[best_split] - self.left = Decision_Tree( + self.decision_boundary = x[best_split] + self.left = DecisionTree( depth=self.depth - 1, min_leaf_size=self.min_leaf_size ) - self.right = Decision_Tree( + self.right = DecisionTree( depth=self.depth - 1, min_leaf_size=self.min_leaf_size ) - self.left.train(left_X, left_y) - self.right.train(right_X, right_y) + self.left.train(left_x, left_y) + self.right.train(right_x, right_y) else: self.prediction = np.mean(y) @@ -134,7 +134,7 @@ def predict(self, x): return None -class Test_Decision_Tree: +class TestDecisionTree: """Decision Tres test class""" @staticmethod @@ -159,11 +159,11 @@ def main(): predict the label of 10 different test values. Then the mean squared error over this test is displayed. """ - X = np.arange(-1.0, 1.0, 0.005) - y = np.sin(X) + x = np.arange(-1.0, 1.0, 0.005) + y = np.sin(x) - tree = Decision_Tree(depth=10, min_leaf_size=10) - tree.train(X, y) + tree = DecisionTree(depth=10, min_leaf_size=10) + tree.train(x, y) test_cases = (np.random.rand(10) * 2) - 1 predictions = np.array([tree.predict(x) for x in test_cases]) diff --git a/machine_learning/gaussian_naive_bayes.py b/machine_learning/gaussian_naive_bayes.py index c200aa5a4d2d..77e7326626c4 100644 --- a/machine_learning/gaussian_naive_bayes.py +++ b/machine_learning/gaussian_naive_bayes.py @@ -17,19 +17,19 @@ def main(): iris = load_iris() # Split dataset into train and test data - X = iris["data"] # features - Y = iris["target"] + x = iris["data"] # features + y = iris["target"] x_train, x_test, y_train, y_test = train_test_split( - X, Y, test_size=0.3, random_state=1 + x, y, test_size=0.3, random_state=1 ) # Gaussian Naive Bayes - NB_model = GaussianNB() - NB_model.fit(x_train, y_train) + nb_model = GaussianNB() + nb_model.fit(x_train, y_train) # Display Confusion Matrix plot_confusion_matrix( - NB_model, + nb_model, x_test, y_test, display_labels=iris["target_names"], diff --git a/machine_learning/gradient_boosting_regressor.py b/machine_learning/gradient_boosting_regressor.py index c73e30680a67..c082f3cafe10 100644 --- a/machine_learning/gradient_boosting_regressor.py +++ b/machine_learning/gradient_boosting_regressor.py @@ -26,25 +26,25 @@ def main(): print(df_boston.describe().T) # Feature selection - X = df_boston.iloc[:, :-1] + x = df_boston.iloc[:, :-1] y = df_boston.iloc[:, -1] # target variable # split the data with 75% train and 25% test sets. - X_train, X_test, y_train, y_test = train_test_split( - X, y, random_state=0, test_size=0.25 + x_train, x_test, y_train, y_test = train_test_split( + x, y, random_state=0, test_size=0.25 ) model = GradientBoostingRegressor( n_estimators=500, max_depth=5, min_samples_split=4, learning_rate=0.01 ) # training the model - model.fit(X_train, y_train) + model.fit(x_train, y_train) # to see how good the model fit the data - training_score = model.score(X_train, y_train).round(3) - test_score = model.score(X_test, y_test).round(3) + training_score = model.score(x_train, y_train).round(3) + test_score = model.score(x_test, y_test).round(3) print("Training score of GradientBoosting is :", training_score) print("The test score of GradientBoosting is :", test_score) # Let us evaluation the model by finding the errors - y_pred = model.predict(X_test) + y_pred = model.predict(x_test) # The mean squared error print(f"Mean squared error: {mean_squared_error(y_test, y_pred):.2f}") diff --git a/machine_learning/k_means_clust.py b/machine_learning/k_means_clust.py index 60450b7f8493..5dc2b7118b56 100644 --- a/machine_learning/k_means_clust.py +++ b/machine_learning/k_means_clust.py @@ -69,8 +69,8 @@ def get_initial_centroids(data, k, seed=None): return centroids -def centroid_pairwise_dist(X, centroids): - return pairwise_distances(X, centroids, metric="euclidean") +def centroid_pairwise_dist(x, centroids): + return pairwise_distances(x, centroids, metric="euclidean") def assign_clusters(data, centroids): @@ -197,8 +197,8 @@ def kmeans( plot_heterogeneity(heterogeneity, k) -def ReportGenerator( - df: pd.DataFrame, ClusteringVariables: np.ndarray, FillMissingReport=None +def report_generator( + df: pd.DataFrame, clustering_variables: np.ndarray, fill_missing_report=None ) -> pd.DataFrame: """ Function generates easy-erading clustering report. It takes 2 arguments as an input: @@ -214,7 +214,7 @@ def ReportGenerator( >>> data['col2'] = [100, 200, 300] >>> data['col3'] = [10, 20, 30] >>> data['Cluster'] = [1, 1, 2] - >>> ReportGenerator(data, ['col1', 'col2'], 0) + >>> report_generator(data, ['col1', 'col2'], 0) Features Type Mark 1 2 0 # of Customers ClusterSize False 2.000000 1.000000 1 % of Customers ClusterProportion False 0.666667 0.333333 @@ -231,8 +231,8 @@ def ReportGenerator( [104 rows x 5 columns] """ # Fill missing values with given rules - if FillMissingReport: - df.fillna(value=FillMissingReport, inplace=True) + if fill_missing_report: + df.fillna(value=fill_missing_report, inplace=True) df["dummy"] = 1 numeric_cols = df.select_dtypes(np.number).columns report = ( @@ -313,7 +313,7 @@ def ReportGenerator( report = pd.concat( [report, a, clustersize, clusterproportion], axis=0 ) # concat report with clustert size and nan values - report["Mark"] = report["Features"].isin(ClusteringVariables) + report["Mark"] = report["Features"].isin(clustering_variables) cols = report.columns.tolist() cols = cols[0:2] + cols[-1:] + cols[2:-1] report = report[cols] diff --git a/machine_learning/local_weighted_learning/local_weighted_learning.py b/machine_learning/local_weighted_learning/local_weighted_learning.py index db6868687661..6c542ab825aa 100644 --- a/machine_learning/local_weighted_learning/local_weighted_learning.py +++ b/machine_learning/local_weighted_learning/local_weighted_learning.py @@ -41,11 +41,11 @@ def local_weight( [0.08272556]]) """ weight = weighted_matrix(point, training_data_x, bandwidth) - W = (training_data_x.T * (weight * training_data_x)).I * ( + w = (training_data_x.T * (weight * training_data_x)).I * ( training_data_x.T * weight * training_data_y.T ) - return W + return w def local_weight_regression( diff --git a/machine_learning/logistic_regression.py b/machine_learning/logistic_regression.py index 48d88ef61185..87bc8f6681cc 100644 --- a/machine_learning/logistic_regression.py +++ b/machine_learning/logistic_regression.py @@ -35,25 +35,25 @@ def cost_function(h, y): return (-y * np.log(h) - (1 - y) * np.log(1 - h)).mean() -def log_likelihood(X, Y, weights): - scores = np.dot(X, weights) - return np.sum(Y * scores - np.log(1 + np.exp(scores))) +def log_likelihood(x, y, weights): + scores = np.dot(x, weights) + return np.sum(y * scores - np.log(1 + np.exp(scores))) # here alpha is the learning rate, X is the feature matrix,y is the target matrix -def logistic_reg(alpha, X, y, max_iterations=70000): - theta = np.zeros(X.shape[1]) +def logistic_reg(alpha, x, y, max_iterations=70000): + theta = np.zeros(x.shape[1]) for iterations in range(max_iterations): - z = np.dot(X, theta) + z = np.dot(x, theta) h = sigmoid_function(z) - gradient = np.dot(X.T, h - y) / y.size + gradient = np.dot(x.T, h - y) / y.size theta = theta - alpha * gradient # updating the weights - z = np.dot(X, theta) + z = np.dot(x, theta) h = sigmoid_function(z) - J = cost_function(h, y) + j = cost_function(h, y) if iterations % 100 == 0: - print(f"loss: {J} \t") # printing the loss after every 100 iterations + print(f"loss: {j} \t") # printing the loss after every 100 iterations return theta @@ -61,23 +61,23 @@ def logistic_reg(alpha, X, y, max_iterations=70000): if __name__ == "__main__": iris = datasets.load_iris() - X = iris.data[:, :2] + x = iris.data[:, :2] y = (iris.target != 0) * 1 alpha = 0.1 - theta = logistic_reg(alpha, X, y, max_iterations=70000) + theta = logistic_reg(alpha, x, y, max_iterations=70000) print("theta: ", theta) # printing the theta i.e our weights vector - def predict_prob(X): + def predict_prob(x): return sigmoid_function( - np.dot(X, theta) + np.dot(x, theta) ) # predicting the value of probability from the logistic regression algorithm plt.figure(figsize=(10, 6)) - plt.scatter(X[y == 0][:, 0], X[y == 0][:, 1], color="b", label="0") - plt.scatter(X[y == 1][:, 0], X[y == 1][:, 1], color="r", label="1") - (x1_min, x1_max) = (X[:, 0].min(), X[:, 0].max()) - (x2_min, x2_max) = (X[:, 1].min(), X[:, 1].max()) + plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color="b", label="0") + plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color="r", label="1") + (x1_min, x1_max) = (x[:, 0].min(), x[:, 0].max()) + (x2_min, x2_max) = (x[:, 1].min(), x[:, 1].max()) (xx1, xx2) = np.meshgrid(np.linspace(x1_min, x1_max), np.linspace(x2_min, x2_max)) grid = np.c_[xx1.ravel(), xx2.ravel()] probs = predict_prob(grid).reshape(xx1.shape) diff --git a/machine_learning/multilayer_perceptron_classifier.py b/machine_learning/multilayer_perceptron_classifier.py index 604185cef677..e99a4131e972 100644 --- a/machine_learning/multilayer_perceptron_classifier.py +++ b/machine_learning/multilayer_perceptron_classifier.py @@ -15,12 +15,12 @@ Y = clf.predict(test) -def wrapper(Y): +def wrapper(y): """ >>> wrapper(Y) [0, 0, 1] """ - return list(Y) + return list(y) if __name__ == "__main__": diff --git a/machine_learning/random_forest_classifier.py b/machine_learning/random_forest_classifier.py index 6370254090f7..3267fa209660 100644 --- a/machine_learning/random_forest_classifier.py +++ b/machine_learning/random_forest_classifier.py @@ -17,10 +17,10 @@ def main(): iris = load_iris() # Split dataset into train and test data - X = iris["data"] # features - Y = iris["target"] + x = iris["data"] # features + y = iris["target"] x_train, x_test, y_train, y_test = train_test_split( - X, Y, test_size=0.3, random_state=1 + x, y, test_size=0.3, random_state=1 ) # Random Forest Classifier diff --git a/machine_learning/random_forest_regressor.py b/machine_learning/random_forest_regressor.py index 0aade626b038..1001931a109d 100644 --- a/machine_learning/random_forest_regressor.py +++ b/machine_learning/random_forest_regressor.py @@ -17,10 +17,10 @@ def main(): print(boston.keys()) # Split dataset into train and test data - X = boston["data"] # features - Y = boston["target"] + x = boston["data"] # features + y = boston["target"] x_train, x_test, y_train, y_test = train_test_split( - X, Y, test_size=0.3, random_state=1 + x, y, test_size=0.3, random_state=1 ) # Random Forest Regressor diff --git a/machine_learning/sequential_minimum_optimization.py b/machine_learning/sequential_minimum_optimization.py index cc7868d0fd8e..fb4b35f31289 100644 --- a/machine_learning/sequential_minimum_optimization.py +++ b/machine_learning/sequential_minimum_optimization.py @@ -80,7 +80,7 @@ def __init__( # Calculate alphas using SMO algorithm def fit(self): - K = self._k + k = self._k state = None while True: @@ -106,14 +106,14 @@ def fit(self): # 3: update threshold(b) b1_new = np.float64( -e1 - - y1 * K(i1, i1) * (a1_new - a1) - - y2 * K(i2, i1) * (a2_new - a2) + - y1 * k(i1, i1) * (a1_new - a1) + - y2 * k(i2, i1) * (a2_new - a2) + self._b ) b2_new = np.float64( -e2 - - y2 * K(i2, i2) * (a2_new - a2) - - y1 * K(i1, i2) * (a1_new - a1) + - y2 * k(i2, i2) * (a2_new - a2) + - y1 * k(i1, i2) * (a1_new - a1) + self._b ) if 0.0 < a1_new < self._c: @@ -134,8 +134,8 @@ def fit(self): if s == i1 or s == i2: continue self._error[s] += ( - y1 * (a1_new - a1) * K(i1, s) - + y2 * (a2_new - a2) * K(i2, s) + y1 * (a1_new - a1) * k(i1, s) + + y2 * (a2_new - a2) * k(i2, s) + (self._b - b_old) ) @@ -305,56 +305,56 @@ def _choose_a2(self, i1): # Get the new alpha2 and new alpha1 def _get_new_alpha(self, i1, i2, a1, a2, e1, e2, y1, y2): - K = self._k + k = self._k if i1 == i2: return None, None # calculate L and H which bound the new alpha2 s = y1 * y2 if s == -1: - L, H = max(0.0, a2 - a1), min(self._c, self._c + a2 - a1) + l, h = max(0.0, a2 - a1), min(self._c, self._c + a2 - a1) else: - L, H = max(0.0, a2 + a1 - self._c), min(self._c, a2 + a1) - if L == H: + l, h = max(0.0, a2 + a1 - self._c), min(self._c, a2 + a1) + if l == h: # noqa: E741 return None, None # calculate eta - k11 = K(i1, i1) - k22 = K(i2, i2) - k12 = K(i1, i2) + k11 = k(i1, i1) + k22 = k(i2, i2) + k12 = k(i1, i2) eta = k11 + k22 - 2.0 * k12 # select the new alpha2 which could get the minimal objectives if eta > 0.0: a2_new_unc = a2 + (y2 * (e1 - e2)) / eta # a2_new has a boundary - if a2_new_unc >= H: - a2_new = H - elif a2_new_unc <= L: - a2_new = L + if a2_new_unc >= h: + a2_new = h + elif a2_new_unc <= l: + a2_new = l else: a2_new = a2_new_unc else: b = self._b - l1 = a1 + s * (a2 - L) - h1 = a1 + s * (a2 - H) + l1 = a1 + s * (a2 - l) + h1 = a1 + s * (a2 - h) # way 1 - f1 = y1 * (e1 + b) - a1 * K(i1, i1) - s * a2 * K(i1, i2) - f2 = y2 * (e2 + b) - a2 * K(i2, i2) - s * a1 * K(i1, i2) + f1 = y1 * (e1 + b) - a1 * k(i1, i1) - s * a2 * k(i1, i2) + f2 = y2 * (e2 + b) - a2 * k(i2, i2) - s * a1 * k(i1, i2) ol = ( l1 * f1 - + L * f2 - + 1 / 2 * l1**2 * K(i1, i1) - + 1 / 2 * L**2 * K(i2, i2) - + s * L * l1 * K(i1, i2) + + l * f2 + + 1 / 2 * l1**2 * k(i1, i1) + + 1 / 2 * l**2 * k(i2, i2) + + s * l * l1 * k(i1, i2) ) oh = ( h1 * f1 - + H * f2 - + 1 / 2 * h1**2 * K(i1, i1) - + 1 / 2 * H**2 * K(i2, i2) - + s * H * h1 * K(i1, i2) + + h * f2 + + 1 / 2 * h1**2 * k(i1, i1) + + 1 / 2 * h**2 * k(i2, i2) + + s * h * h1 * k(i1, i2) ) """ # way 2 @@ -362,9 +362,9 @@ def _get_new_alpha(self, i1, i2, a1, a2, e1, e2, y1, y2): objectives """ if ol < (oh - self._eps): - a2_new = L + a2_new = l elif ol > oh + self._eps: - a2_new = H + a2_new = h else: a2_new = a2 diff --git a/machine_learning/word_frequency_functions.py b/machine_learning/word_frequency_functions.py index 3e8faf39cf07..8fd2741f611c 100644 --- a/machine_learning/word_frequency_functions.py +++ b/machine_learning/word_frequency_functions.py @@ -83,7 +83,7 @@ def document_frequency(term: str, corpus: str) -> tuple[int, int]: return (len([doc for doc in docs if term in doc]), len(docs)) -def inverse_document_frequency(df: int, N: int, smoothing=False) -> float: +def inverse_document_frequency(df: int, n: int, smoothing=False) -> float: """ Return an integer denoting the importance of a word. This measure of importance is @@ -109,15 +109,15 @@ def inverse_document_frequency(df: int, N: int, smoothing=False) -> float: 1.477 """ if smoothing: - if N == 0: + if n == 0: raise ValueError("log10(0) is undefined.") - return round(1 + log10(N / (1 + df)), 3) + return round(1 + log10(n / (1 + df)), 3) if df == 0: raise ZeroDivisionError("df must be > 0") - elif N == 0: + elif n == 0: raise ValueError("log10(0) is undefined.") - return round(log10(N / df), 3) + return round(log10(n / df), 3) def tf_idf(tf: int, idf: int) -> float: diff --git a/maths/binomial_coefficient.py b/maths/binomial_coefficient.py index 4def041492f3..0d4b3d1a8d9a 100644 --- a/maths/binomial_coefficient.py +++ b/maths/binomial_coefficient.py @@ -5,16 +5,16 @@ def binomial_coefficient(n, r): >>> binomial_coefficient(10, 5) 252 """ - C = [0 for i in range(r + 1)] + c = [0 for i in range(r + 1)] # nc0 = 1 - C[0] = 1 + c[0] = 1 for i in range(1, n + 1): # to compute current row from previous row. j = min(i, r) while j > 0: - C[j] += C[j - 1] + c[j] += c[j - 1] j -= 1 - return C[r] + return c[r] print(binomial_coefficient(n=10, r=5)) diff --git a/maths/carmichael_number.py b/maths/carmichael_number.py index 09a4fedfb763..c9c144759246 100644 --- a/maths/carmichael_number.py +++ b/maths/carmichael_number.py @@ -30,7 +30,7 @@ def power(x: int, y: int, mod: int) -> int: return temp -def isCarmichaelNumber(n: int) -> bool: +def is_carmichael_number(n: int) -> bool: b = 2 while b < n: if gcd(b, n) == 1 and power(b, n - 1, n) != 1: @@ -41,7 +41,7 @@ def isCarmichaelNumber(n: int) -> bool: if __name__ == "__main__": number = int(input("Enter number: ").strip()) - if isCarmichaelNumber(number): + if is_carmichael_number(number): print(f"{number} is a Carmichael Number.") else: print(f"{number} is not a Carmichael Number.") diff --git a/maths/decimal_isolate.py b/maths/decimal_isolate.py index 0e3967a4671d..1b8f6cbcad89 100644 --- a/maths/decimal_isolate.py +++ b/maths/decimal_isolate.py @@ -4,7 +4,7 @@ """ -def decimal_isolate(number, digitAmount): +def decimal_isolate(number, digit_amount): """ Isolates the decimal part of a number. @@ -28,8 +28,8 @@ def decimal_isolate(number, digitAmount): >>> decimal_isolate(-14.123, 3) -0.123 """ - if digitAmount > 0: - return round(number - int(number), digitAmount) + if digit_amount > 0: + return round(number - int(number), digit_amount) return number - int(number) diff --git a/maths/euler_method.py b/maths/euler_method.py index af7eecb2ff29..30f193e6daa5 100644 --- a/maths/euler_method.py +++ b/maths/euler_method.py @@ -29,12 +29,12 @@ def explicit_euler( >>> y[-1] 144.77277243257308 """ - N = int(np.ceil((x_end - x0) / step_size)) - y = np.zeros((N + 1,)) + n = int(np.ceil((x_end - x0) / step_size)) + y = np.zeros((n + 1,)) y[0] = y0 x = x0 - for k in range(N): + for k in range(n): y[k + 1] = y[k] + step_size * ode_func(x, y[k]) x += step_size diff --git a/maths/euler_modified.py b/maths/euler_modified.py index 5659fa063fc4..14bddadf4c53 100644 --- a/maths/euler_modified.py +++ b/maths/euler_modified.py @@ -33,12 +33,12 @@ def euler_modified( >>> y[-1] 0.5525976431951775 """ - N = int(np.ceil((x_end - x0) / step_size)) - y = np.zeros((N + 1,)) + n = int(np.ceil((x_end - x0) / step_size)) + y = np.zeros((n + 1,)) y[0] = y0 x = x0 - for k in range(N): + for k in range(n): y_get = y[k] + step_size * ode_func(x, y[k]) y[k + 1] = y[k] + ( (step_size / 2) * (ode_func(x, y[k]) + ode_func(x + step_size, y_get)) diff --git a/maths/hardy_ramanujanalgo.py b/maths/hardy_ramanujanalgo.py index e36f763da19e..6929533fc389 100644 --- a/maths/hardy_ramanujanalgo.py +++ b/maths/hardy_ramanujanalgo.py @@ -4,9 +4,9 @@ import math -def exactPrimeFactorCount(n): +def exact_prime_factor_count(n): """ - >>> exactPrimeFactorCount(51242183) + >>> exact_prime_factor_count(51242183) 3 """ count = 0 @@ -36,7 +36,7 @@ def exactPrimeFactorCount(n): if __name__ == "__main__": n = 51242183 - print(f"The number of distinct prime factors is/are {exactPrimeFactorCount(n)}") + print(f"The number of distinct prime factors is/are {exact_prime_factor_count(n)}") print(f"The value of log(log(n)) is {math.log(math.log(n)):.4f}") """ diff --git a/maths/jaccard_similarity.py b/maths/jaccard_similarity.py index 4f24d308f340..77f4b90ea79f 100644 --- a/maths/jaccard_similarity.py +++ b/maths/jaccard_similarity.py @@ -14,7 +14,7 @@ """ -def jaccard_similariy(setA, setB, alternativeUnion=False): +def jaccard_similariy(set_a, set_b, alternative_union=False): """ Finds the jaccard similarity between two sets. Essentially, its intersection over union. @@ -24,8 +24,8 @@ def jaccard_similariy(setA, setB, alternativeUnion=False): of a set with itself be 1/2 instead of 1. [MMDS 2nd Edition, Page 77] Parameters: - :setA (set,list,tuple): A non-empty set/list - :setB (set,list,tuple): A non-empty set/list + :set_a (set,list,tuple): A non-empty set/list + :set_b (set,list,tuple): A non-empty set/list :alternativeUnion (boolean): If True, use sum of number of items as union @@ -33,48 +33,48 @@ def jaccard_similariy(setA, setB, alternativeUnion=False): (float) The jaccard similarity between the two sets. Examples: - >>> setA = {'a', 'b', 'c', 'd', 'e'} - >>> setB = {'c', 'd', 'e', 'f', 'h', 'i'} - >>> jaccard_similariy(setA,setB) + >>> set_a = {'a', 'b', 'c', 'd', 'e'} + >>> set_b = {'c', 'd', 'e', 'f', 'h', 'i'} + >>> jaccard_similariy(set_a, set_b) 0.375 - >>> jaccard_similariy(setA,setA) + >>> jaccard_similariy(set_a, set_a) 1.0 - >>> jaccard_similariy(setA,setA,True) + >>> jaccard_similariy(set_a, set_a, True) 0.5 - >>> setA = ['a', 'b', 'c', 'd', 'e'] - >>> setB = ('c', 'd', 'e', 'f', 'h', 'i') - >>> jaccard_similariy(setA,setB) + >>> set_a = ['a', 'b', 'c', 'd', 'e'] + >>> set_b = ('c', 'd', 'e', 'f', 'h', 'i') + >>> jaccard_similariy(set_a, set_b) 0.375 """ - if isinstance(setA, set) and isinstance(setB, set): + if isinstance(set_a, set) and isinstance(set_b, set): - intersection = len(setA.intersection(setB)) + intersection = len(set_a.intersection(set_b)) - if alternativeUnion: - union = len(setA) + len(setB) + if alternative_union: + union = len(set_a) + len(set_b) else: - union = len(setA.union(setB)) + union = len(set_a.union(set_b)) return intersection / union - if isinstance(setA, (list, tuple)) and isinstance(setB, (list, tuple)): + if isinstance(set_a, (list, tuple)) and isinstance(set_b, (list, tuple)): - intersection = [element for element in setA if element in setB] + intersection = [element for element in set_a if element in set_b] - if alternativeUnion: - union = len(setA) + len(setB) + if alternative_union: + union = len(set_a) + len(set_b) else: - union = setA + [element for element in setB if element not in setA] + union = set_a + [element for element in set_b if element not in set_a] return len(intersection) / len(union) if __name__ == "__main__": - setA = {"a", "b", "c", "d", "e"} - setB = {"c", "d", "e", "f", "h", "i"} - print(jaccard_similariy(setA, setB)) + set_a = {"a", "b", "c", "d", "e"} + set_b = {"c", "d", "e", "f", "h", "i"} + print(jaccard_similariy(set_a, set_b)) diff --git a/maths/krishnamurthy_number.py b/maths/krishnamurthy_number.py index c88f68a07f27..c1d8a8fc5f56 100644 --- a/maths/krishnamurthy_number.py +++ b/maths/krishnamurthy_number.py @@ -33,12 +33,12 @@ def krishnamurthy(number: int) -> bool: True """ - factSum = 0 + fact_sum = 0 duplicate = number while duplicate > 0: duplicate, digit = divmod(duplicate, 10) - factSum += factorial(digit) - return factSum == number + fact_sum += factorial(digit) + return fact_sum == number if __name__ == "__main__": diff --git a/maths/kth_lexicographic_permutation.py b/maths/kth_lexicographic_permutation.py index 23eab626fbf8..b85558aca6d4 100644 --- a/maths/kth_lexicographic_permutation.py +++ b/maths/kth_lexicographic_permutation.py @@ -1,17 +1,17 @@ -def kthPermutation(k, n): +def kth_permutation(k, n): """ Finds k'th lexicographic permutation (in increasing order) of 0,1,2,...n-1 in O(n^2) time. Examples: First permutation is always 0,1,2,...n - >>> kthPermutation(0,5) + >>> kth_permutation(0,5) [0, 1, 2, 3, 4] The order of permutation of 0,1,2,3 is [0,1,2,3], [0,1,3,2], [0,2,1,3], [0,2,3,1], [0,3,1,2], [0,3,2,1], [1,0,2,3], [1,0,3,2], [1,2,0,3], [1,2,3,0], [1,3,0,2] - >>> kthPermutation(10,4) + >>> kth_permutation(10,4) [1, 3, 0, 2] """ # Factorails from 1! to (n-1)! diff --git a/maths/lucas_lehmer_primality_test.py b/maths/lucas_lehmer_primality_test.py index 15e25cbfe996..916abfcc175e 100644 --- a/maths/lucas_lehmer_primality_test.py +++ b/maths/lucas_lehmer_primality_test.py @@ -30,9 +30,9 @@ def lucas_lehmer_test(p: int) -> bool: return True s = 4 - M = (1 << p) - 1 + m = (1 << p) - 1 for i in range(p - 2): - s = ((s * s) - 2) % M + s = ((s * s) - 2) % m return s == 0 diff --git a/maths/primelib.py b/maths/primelib.py index 3da9c56f66d6..7d2a22f39c59 100644 --- a/maths/primelib.py +++ b/maths/primelib.py @@ -8,27 +8,27 @@ Overview: -isPrime(number) -sieveEr(N) -getPrimeNumbers(N) -primeFactorization(number) -greatestPrimeFactor(number) -smallestPrimeFactor(number) -getPrime(n) -getPrimesBetween(pNumber1, pNumber2) +is_prime(number) +sieve_er(N) +get_prime_numbers(N) +prime_factorization(number) +greatest_prime_factor(number) +smallest_prime_factor(number) +get_prime(n) +get_primes_between(pNumber1, pNumber2) ---- -isEven(number) -isOdd(number) +is_even(number) +is_odd(number) gcd(number1, number2) // greatest common divisor -kgV(number1, number2) // least common multiple -getDivisors(number) // all divisors of 'number' inclusive 1, number -isPerfectNumber(number) +kg_v(number1, number2) // least common multiple +get_divisors(number) // all divisors of 'number' inclusive 1, number +is_perfect_number(number) NEW-FUNCTIONS -simplifyFraction(numerator, denominator) +simplify_fraction(numerator, denominator) factorial (n) // n! fib (n) // calculate the n-th fibonacci term. @@ -75,7 +75,7 @@ def is_prime(number: int) -> bool: # ------------------------------------------ -def sieveEr(N): +def sieve_er(n): """ input: positive integer 'N' > 2 returns a list of prime numbers from 2 up to N. @@ -86,23 +86,23 @@ def sieveEr(N): """ # precondition - assert isinstance(N, int) and (N > 2), "'N' must been an int and > 2" + assert isinstance(n, int) and (n > 2), "'N' must been an int and > 2" # beginList: contains all natural numbers from 2 up to N - beginList = [x for x in range(2, N + 1)] + begin_list = [x for x in range(2, n + 1)] ans = [] # this list will be returns. # actual sieve of erathostenes - for i in range(len(beginList)): + for i in range(len(begin_list)): - for j in range(i + 1, len(beginList)): + for j in range(i + 1, len(begin_list)): - if (beginList[i] != 0) and (beginList[j] % beginList[i] == 0): - beginList[j] = 0 + if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0): + begin_list[j] = 0 # filters actual prime numbers. - ans = [x for x in beginList if x != 0] + ans = [x for x in begin_list if x != 0] # precondition assert isinstance(ans, list), "'ans' must been from type list" @@ -113,7 +113,7 @@ def sieveEr(N): # -------------------------------- -def getPrimeNumbers(N): +def get_prime_numbers(n): """ input: positive integer 'N' > 2 returns a list of prime numbers from 2 up to N (inclusive) @@ -121,13 +121,13 @@ def getPrimeNumbers(N): """ # precondition - assert isinstance(N, int) and (N > 2), "'N' must been an int and > 2" + assert isinstance(n, int) and (n > 2), "'N' must been an int and > 2" ans = [] # iterates over all numbers between 2 up to N+1 # if a number is prime then appends to list 'ans' - for number in range(2, N + 1): + for number in range(2, n + 1): if is_prime(number): @@ -142,7 +142,7 @@ def getPrimeNumbers(N): # ----------------------------------------- -def primeFactorization(number): +def prime_factorization(number): """ input: positive integer 'number' returns a list of the prime number factors of 'number' @@ -186,7 +186,7 @@ def primeFactorization(number): # ----------------------------------------- -def greatestPrimeFactor(number): +def greatest_prime_factor(number): """ input: positive integer 'number' >= 0 returns the greatest prime number factor of 'number' @@ -200,9 +200,9 @@ def greatestPrimeFactor(number): ans = 0 # prime factorization of 'number' - primeFactors = primeFactorization(number) + prime_factors = prime_factorization(number) - ans = max(primeFactors) + ans = max(prime_factors) # precondition assert isinstance(ans, int), "'ans' must been from type int" @@ -213,7 +213,7 @@ def greatestPrimeFactor(number): # ---------------------------------------------- -def smallestPrimeFactor(number): +def smallest_prime_factor(number): """ input: integer 'number' >= 0 returns the smallest prime number factor of 'number' @@ -227,9 +227,9 @@ def smallestPrimeFactor(number): ans = 0 # prime factorization of 'number' - primeFactors = primeFactorization(number) + prime_factors = prime_factorization(number) - ans = min(primeFactors) + ans = min(prime_factors) # precondition assert isinstance(ans, int), "'ans' must been from type int" @@ -240,7 +240,7 @@ def smallestPrimeFactor(number): # ---------------------- -def isEven(number): +def is_even(number): """ input: integer 'number' returns true if 'number' is even, otherwise false. @@ -256,7 +256,7 @@ def isEven(number): # ------------------------ -def isOdd(number): +def is_odd(number): """ input: integer 'number' returns true if 'number' is odd, otherwise false. @@ -281,14 +281,14 @@ def goldbach(number): # precondition assert ( - isinstance(number, int) and (number > 2) and isEven(number) + isinstance(number, int) and (number > 2) and is_even(number) ), "'number' must been an int, even and > 2" ans = [] # this list will returned # creates a list of prime numbers between 2 up to 'number' - primeNumbers = getPrimeNumbers(number) - lenPN = len(primeNumbers) + prime_numbers = get_prime_numbers(number) + len_pn = len(prime_numbers) # run variable for while-loops. i = 0 @@ -297,16 +297,16 @@ def goldbach(number): # exit variable. for break up the loops loop = True - while i < lenPN and loop: + while i < len_pn and loop: j = i + 1 - while j < lenPN and loop: + while j < len_pn and loop: - if primeNumbers[i] + primeNumbers[j] == number: + if prime_numbers[i] + prime_numbers[j] == number: loop = False - ans.append(primeNumbers[i]) - ans.append(primeNumbers[j]) + ans.append(prime_numbers[i]) + ans.append(prime_numbers[j]) j += 1 @@ -361,7 +361,7 @@ def gcd(number1, number2): # ---------------------------------------------------- -def kgV(number1, number2): +def kg_v(number1, number2): """ Least common multiple input: two positive integer 'number1' and 'number2' @@ -382,13 +382,13 @@ def kgV(number1, number2): if number1 > 1 and number2 > 1: # builds the prime factorization of 'number1' and 'number2' - primeFac1 = primeFactorization(number1) - primeFac2 = primeFactorization(number2) + prime_fac_1 = prime_factorization(number1) + prime_fac_2 = prime_factorization(number2) elif number1 == 1 or number2 == 1: - primeFac1 = [] - primeFac2 = [] + prime_fac_1 = [] + prime_fac_2 = [] ans = max(number1, number2) count1 = 0 @@ -397,21 +397,21 @@ def kgV(number1, number2): done = [] # captured numbers int both 'primeFac1' and 'primeFac2' # iterates through primeFac1 - for n in primeFac1: + for n in prime_fac_1: if n not in done: - if n in primeFac2: + if n in prime_fac_2: - count1 = primeFac1.count(n) - count2 = primeFac2.count(n) + count1 = prime_fac_1.count(n) + count2 = prime_fac_2.count(n) for i in range(max(count1, count2)): ans *= n else: - count1 = primeFac1.count(n) + count1 = prime_fac_1.count(n) for i in range(count1): ans *= n @@ -419,11 +419,11 @@ def kgV(number1, number2): done.append(n) # iterates through primeFac2 - for n in primeFac2: + for n in prime_fac_2: if n not in done: - count2 = primeFac2.count(n) + count2 = prime_fac_2.count(n) for i in range(count2): ans *= n @@ -441,7 +441,7 @@ def kgV(number1, number2): # ---------------------------------- -def getPrime(n): +def get_prime(n): """ Gets the n-th prime number. input: positive integer 'n' >= 0 @@ -476,7 +476,7 @@ def getPrime(n): # --------------------------------------------------- -def getPrimesBetween(pNumber1, pNumber2): +def get_primes_between(p_number_1, p_number_2): """ input: prime numbers 'pNumber1' and 'pNumber2' pNumber1 < pNumber2 @@ -486,10 +486,10 @@ def getPrimesBetween(pNumber1, pNumber2): # precondition assert ( - is_prime(pNumber1) and is_prime(pNumber2) and (pNumber1 < pNumber2) + is_prime(p_number_1) and is_prime(p_number_2) and (p_number_1 < p_number_2) ), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'" - number = pNumber1 + 1 # jump to the next number + number = p_number_1 + 1 # jump to the next number ans = [] # this list will be returns. @@ -498,7 +498,7 @@ def getPrimesBetween(pNumber1, pNumber2): while not is_prime(number): number += 1 - while number < pNumber2: + while number < p_number_2: ans.append(number) @@ -510,7 +510,9 @@ def getPrimesBetween(pNumber1, pNumber2): # precondition assert ( - isinstance(ans, list) and ans[0] != pNumber1 and ans[len(ans) - 1] != pNumber2 + isinstance(ans, list) + and ans[0] != p_number_1 + and ans[len(ans) - 1] != p_number_2 ), "'ans' must been a list without the arguments" # 'ans' contains not 'pNumber1' and 'pNumber2' ! @@ -520,7 +522,7 @@ def getPrimesBetween(pNumber1, pNumber2): # ---------------------------------------------------- -def getDivisors(n): +def get_divisors(n): """ input: positive integer 'n' >= 1 returns all divisors of n (inclusive 1 and 'n') @@ -545,7 +547,7 @@ def getDivisors(n): # ---------------------------------------------------- -def isPerfectNumber(number): +def is_perfect_number(number): """ input: positive integer 'number' > 1 returns true if 'number' is a perfect number otherwise false. @@ -556,7 +558,7 @@ def isPerfectNumber(number): number > 1 ), "'number' must been an int and >= 1" - divisors = getDivisors(number) + divisors = get_divisors(number) # precondition assert ( @@ -572,7 +574,7 @@ def isPerfectNumber(number): # ------------------------------------------------------------ -def simplifyFraction(numerator, denominator): +def simplify_fraction(numerator, denominator): """ input: two integer 'numerator' and 'denominator' assumes: 'denominator' != 0 @@ -587,16 +589,16 @@ def simplifyFraction(numerator, denominator): ), "The arguments must been from type int and 'denominator' != 0" # build the greatest common divisor of numerator and denominator. - gcdOfFraction = gcd(abs(numerator), abs(denominator)) + gcd_of_fraction = gcd(abs(numerator), abs(denominator)) # precondition assert ( - isinstance(gcdOfFraction, int) - and (numerator % gcdOfFraction == 0) - and (denominator % gcdOfFraction == 0) + isinstance(gcd_of_fraction, int) + and (numerator % gcd_of_fraction == 0) + and (denominator % gcd_of_fraction == 0) ), "Error in function gcd(...,...)" - return (numerator // gcdOfFraction, denominator // gcdOfFraction) + return (numerator // gcd_of_fraction, denominator // gcd_of_fraction) # ----------------------------------------------------------------- diff --git a/maths/qr_decomposition.py b/maths/qr_decomposition.py index 5e15fede4f2a..a8414fbece87 100644 --- a/maths/qr_decomposition.py +++ b/maths/qr_decomposition.py @@ -1,7 +1,7 @@ import numpy as np -def qr_householder(A): +def qr_householder(a): """Return a QR-decomposition of the matrix A using Householder reflection. The QR-decomposition decomposes the matrix A of shape (m, n) into an @@ -37,14 +37,14 @@ def qr_householder(A): >>> np.allclose(np.triu(R), R) True """ - m, n = A.shape + m, n = a.shape t = min(m, n) - Q = np.eye(m) - R = A.copy() + q = np.eye(m) + r = a.copy() for k in range(t - 1): # select a column of modified matrix A': - x = R[k:, [k]] + x = r[k:, [k]] # construct first basis vector e1 = np.zeros_like(x) e1[0] = 1.0 @@ -55,14 +55,14 @@ def qr_householder(A): v /= np.linalg.norm(v) # construct the Householder matrix - Q_k = np.eye(m - k) - 2.0 * v @ v.T + q_k = np.eye(m - k) - 2.0 * v @ v.T # pad with ones and zeros as necessary - Q_k = np.block([[np.eye(k), np.zeros((k, m - k))], [np.zeros((m - k, k)), Q_k]]) + q_k = np.block([[np.eye(k), np.zeros((k, m - k))], [np.zeros((m - k, k)), q_k]]) - Q = Q @ Q_k.T - R = Q_k @ R + q = q @ q_k.T + r = q_k @ r - return Q, R + return q, r if __name__ == "__main__": diff --git a/maths/radix2_fft.py b/maths/radix2_fft.py index 0a431a115fb8..52442134de59 100644 --- a/maths/radix2_fft.py +++ b/maths/radix2_fft.py @@ -49,10 +49,10 @@ class FFT: A*B = 0*x^(-0+0j) + 1*x^(2+0j) + 2*x^(3+0j) + 3*x^(8+0j) + 4*x^(6+0j) + 5*x^(8+0j) """ - def __init__(self, polyA=None, polyB=None): + def __init__(self, poly_a=None, poly_b=None): # Input as list - self.polyA = list(polyA or [0])[:] - self.polyB = list(polyB or [0])[:] + self.polyA = list(poly_a or [0])[:] + self.polyB = list(poly_b or [0])[:] # Remove leading zero coefficients while self.polyA[-1] == 0: @@ -64,22 +64,22 @@ def __init__(self, polyA=None, polyB=None): self.len_B = len(self.polyB) # Add 0 to make lengths equal a power of 2 - self.C_max_length = int( + self.c_max_length = int( 2 ** np.ceil(np.log2(len(self.polyA) + len(self.polyB) - 1)) ) - while len(self.polyA) < self.C_max_length: + while len(self.polyA) < self.c_max_length: self.polyA.append(0) - while len(self.polyB) < self.C_max_length: + while len(self.polyB) < self.c_max_length: self.polyB.append(0) # A complex root used for the fourier transform - self.root = complex(mpmath.root(x=1, n=self.C_max_length, k=1)) + self.root = complex(mpmath.root(x=1, n=self.c_max_length, k=1)) # The product self.product = self.__multiply() # Discrete fourier transform of A and B - def __DFT(self, which): + def __dft(self, which): if which == "A": dft = [[x] for x in self.polyA] else: @@ -88,20 +88,20 @@ def __DFT(self, which): if len(dft) <= 1: return dft[0] # - next_ncol = self.C_max_length // 2 + next_ncol = self.c_max_length // 2 while next_ncol > 0: new_dft = [[] for i in range(next_ncol)] root = self.root**next_ncol # First half of next step current_root = 1 - for j in range(self.C_max_length // (next_ncol * 2)): + for j in range(self.c_max_length // (next_ncol * 2)): for i in range(next_ncol): new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j]) current_root *= root # Second half of next step current_root = 1 - for j in range(self.C_max_length // (next_ncol * 2)): + for j in range(self.c_max_length // (next_ncol * 2)): for i in range(next_ncol): new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j]) current_root *= root @@ -112,65 +112,65 @@ def __DFT(self, which): # multiply the DFTs of A and B and find A*B def __multiply(self): - dftA = self.__DFT("A") - dftB = self.__DFT("B") - inverseC = [[dftA[i] * dftB[i] for i in range(self.C_max_length)]] - del dftA - del dftB + dft_a = self.__dft("A") + dft_b = self.__dft("B") + inverce_c = [[dft_a[i] * dft_b[i] for i in range(self.c_max_length)]] + del dft_a + del dft_b # Corner Case - if len(inverseC[0]) <= 1: - return inverseC[0] + if len(inverce_c[0]) <= 1: + return inverce_c[0] # Inverse DFT next_ncol = 2 - while next_ncol <= self.C_max_length: - new_inverseC = [[] for i in range(next_ncol)] + while next_ncol <= self.c_max_length: + new_inverse_c = [[] for i in range(next_ncol)] root = self.root ** (next_ncol // 2) current_root = 1 # First half of next step - for j in range(self.C_max_length // next_ncol): + for j in range(self.c_max_length // next_ncol): for i in range(next_ncol // 2): # Even positions - new_inverseC[i].append( + new_inverse_c[i].append( ( - inverseC[i][j] - + inverseC[i][j + self.C_max_length // next_ncol] + inverce_c[i][j] + + inverce_c[i][j + self.c_max_length // next_ncol] ) / 2 ) # Odd positions - new_inverseC[i + next_ncol // 2].append( + new_inverse_c[i + next_ncol // 2].append( ( - inverseC[i][j] - - inverseC[i][j + self.C_max_length // next_ncol] + inverce_c[i][j] + - inverce_c[i][j + self.c_max_length // next_ncol] ) / (2 * current_root) ) current_root *= root # Update - inverseC = new_inverseC + inverce_c = new_inverse_c next_ncol *= 2 # Unpack - inverseC = [round(x[0].real, 8) + round(x[0].imag, 8) * 1j for x in inverseC] + inverce_c = [round(x[0].real, 8) + round(x[0].imag, 8) * 1j for x in inverce_c] # Remove leading 0's - while inverseC[-1] == 0: - inverseC.pop() - return inverseC + while inverce_c[-1] == 0: + inverce_c.pop() + return inverce_c # Overwrite __str__ for print(); Shows A, B and A*B def __str__(self): - A = "A = " + " + ".join( + a = "A = " + " + ".join( f"{coef}*x^{i}" for coef, i in enumerate(self.polyA[: self.len_A]) ) - B = "B = " + " + ".join( + b = "B = " + " + ".join( f"{coef}*x^{i}" for coef, i in enumerate(self.polyB[: self.len_B]) ) - C = "A*B = " + " + ".join( + c = "A*B = " + " + ".join( f"{coef}*x^{i}" for coef, i in enumerate(self.product) ) - return "\n".join((A, B, C)) + return "\n".join((a, b, c)) # Unit tests diff --git a/maths/runge_kutta.py b/maths/runge_kutta.py index 383797daa5ac..4cac017ee89e 100644 --- a/maths/runge_kutta.py +++ b/maths/runge_kutta.py @@ -22,12 +22,12 @@ def runge_kutta(f, y0, x0, h, x_end): >>> y[-1] 148.41315904125113 """ - N = int(np.ceil((x_end - x0) / h)) - y = np.zeros((N + 1,)) + n = int(np.ceil((x_end - x0) / h)) + y = np.zeros((n + 1,)) y[0] = y0 x = x0 - for k in range(N): + for k in range(n): k1 = f(x, y[k]) k2 = f(x + 0.5 * h, y[k] + 0.5 * h * k1) k3 = f(x + 0.5 * h, y[k] + 0.5 * h * k2) diff --git a/maths/softmax.py b/maths/softmax.py index e021a7f8a6fe..04cf77525420 100644 --- a/maths/softmax.py +++ b/maths/softmax.py @@ -41,13 +41,13 @@ def softmax(vector): # Calculate e^x for each x in your vector where e is Euler's # number (approximately 2.718) - exponentVector = np.exp(vector) + exponent_vector = np.exp(vector) # Add up the all the exponentials - sumOfExponents = np.sum(exponentVector) + sum_of_exponents = np.sum(exponent_vector) # Divide every exponent by the sum of all exponents - softmax_vector = exponentVector / sumOfExponents + softmax_vector = exponent_vector / sum_of_exponents return softmax_vector diff --git a/matrix/count_islands_in_matrix.py b/matrix/count_islands_in_matrix.py index 00f9e14362b2..64c595e8499d 100644 --- a/matrix/count_islands_in_matrix.py +++ b/matrix/count_islands_in_matrix.py @@ -3,7 +3,7 @@ # connections. -class matrix: # Public class to implement a graph +class Matrix: # Public class to implement a graph def __init__(self, row: int, col: int, graph: list[list[bool]]) -> None: self.ROW = row self.COL = col @@ -19,12 +19,12 @@ def is_safe(self, i: int, j: int, visited: list[list[bool]]) -> bool: def diffs(self, i: int, j: int, visited: list[list[bool]]) -> None: # Checking all 8 elements surrounding nth element - rowNbr = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order - colNbr = [-1, 0, 1, -1, 1, -1, 0, 1] + row_nbr = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order + col_nbr = [-1, 0, 1, -1, 1, -1, 0, 1] visited[i][j] = True # Make those cells visited for k in range(8): - if self.is_safe(i + rowNbr[k], j + colNbr[k], visited): - self.diffs(i + rowNbr[k], j + colNbr[k], visited) + if self.is_safe(i + row_nbr[k], j + col_nbr[k], visited): + self.diffs(i + row_nbr[k], j + col_nbr[k], visited) def count_islands(self) -> int: # And finally, count all islands. visited = [[False for j in range(self.COL)] for i in range(self.ROW)] diff --git a/matrix/inverse_of_matrix.py b/matrix/inverse_of_matrix.py index 92780e656ea1..770ce39b584f 100644 --- a/matrix/inverse_of_matrix.py +++ b/matrix/inverse_of_matrix.py @@ -27,7 +27,7 @@ def inverse_of_matrix(matrix: list[list[float]]) -> list[list[float]]: [[0.25, -0.5], [-0.3, 1.0]] """ - D = Decimal # An abbreviation for conciseness + d = Decimal # An abbreviation for conciseness # Check if the provided matrix has 2 rows and 2 columns # since this implementation only works for 2x2 matrices @@ -35,7 +35,7 @@ def inverse_of_matrix(matrix: list[list[float]]) -> list[list[float]]: raise ValueError("Please provide a matrix of size 2x2.") # Calculate the determinant of the matrix - determinant = D(matrix[0][0]) * D(matrix[1][1]) - D(matrix[1][0]) * D(matrix[0][1]) + determinant = d(matrix[0][0]) * d(matrix[1][1]) - d(matrix[1][0]) * d(matrix[0][1]) if determinant == 0: raise ValueError("This matrix has no inverse.") @@ -45,4 +45,4 @@ def inverse_of_matrix(matrix: list[list[float]]) -> list[list[float]]: swapped_matrix[1][0], swapped_matrix[0][1] = -matrix[1][0], -matrix[0][1] # Calculate the inverse of the matrix - return [[float(D(n) / determinant) or 0.0 for n in row] for row in swapped_matrix] + return [[float(d(n) / determinant) or 0.0 for n in row] for row in swapped_matrix] diff --git a/matrix/sherman_morrison.py b/matrix/sherman_morrison.py index a0c93f11574e..29c9b3381b55 100644 --- a/matrix/sherman_morrison.py +++ b/matrix/sherman_morrison.py @@ -54,15 +54,15 @@ def single_line(row_vector: list[float]) -> str: def __repr__(self) -> str: return str(self) - def validateIndices(self, loc: tuple[int, int]) -> bool: + def validate_indicies(self, loc: tuple[int, int]) -> bool: """ - + Check if given indices are valid to pick element from matrix. Example: >>> a = Matrix(2, 6, 0) - >>> a.validateIndices((2, 7)) + >>> a.validate_indicies((2, 7)) False - >>> a.validateIndices((0, 0)) + >>> a.validate_indicies((0, 0)) True """ if not (isinstance(loc, (list, tuple)) and len(loc) == 2): @@ -81,7 +81,7 @@ def __getitem__(self, loc: tuple[int, int]) -> Any: >>> a[1, 0] 7 """ - assert self.validateIndices(loc) + assert self.validate_indicies(loc) return self.array[loc[0]][loc[1]] def __setitem__(self, loc: tuple[int, int], value: float) -> None: @@ -96,7 +96,7 @@ def __setitem__(self, loc: tuple[int, int], value: float) -> None: [ 1, 1, 1] [ 1, 1, 51] """ - assert self.validateIndices(loc) + assert self.validate_indicies(loc) self.array[loc[0]][loc[1]] = value def __add__(self, another: Matrix) -> Matrix: @@ -198,9 +198,9 @@ def transpose(self) -> Matrix: result[c, r] = self[r, c] return result - def ShermanMorrison(self, u: Matrix, v: Matrix) -> Any: + def sherman_morrison(self, u: Matrix, v: Matrix) -> Any: """ - + Apply Sherman-Morrison formula in O(n^2). To learn this formula, please look this: https://en.wikipedia.org/wiki/Sherman%E2%80%93Morrison_formula @@ -216,7 +216,7 @@ def ShermanMorrison(self, u: Matrix, v: Matrix) -> Any: >>> u[0,0], u[1,0], u[2,0] = 1, 2, -3 >>> v = Matrix(3, 1, 0) >>> v[0,0], v[1,0], v[2,0] = 4, -2, 5 - >>> ainv.ShermanMorrison(u, v) + >>> ainv.sherman_morrison(u, v) Matrix consist of 3 rows and 3 columns [ 1.2857142857142856, -0.14285714285714285, 0.3571428571428571] [ 0.5714285714285714, 0.7142857142857143, 0.7142857142857142] @@ -229,11 +229,11 @@ def ShermanMorrison(self, u: Matrix, v: Matrix) -> Any: assert u.column == v.column == 1 # u, v should be column vector # Calculate - vT = v.transpose() - numerator_factor = (vT * self * u)[0, 0] + 1 + v_t = v.transpose() + numerator_factor = (v_t * self * u)[0, 0] + 1 if numerator_factor == 0: return None # It's not invertable - return self - ((self * u) * (vT * self) * (1.0 / numerator_factor)) + return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor)) # Testing @@ -254,7 +254,7 @@ def test1() -> None: print(f"v is {v}") print("uv^T is %s" % (u * v.transpose())) # Sherman Morrison - print(f"(a + uv^T)^(-1) is {ainv.ShermanMorrison(u, v)}") + print(f"(a + uv^T)^(-1) is {ainv.sherman_morrison(u, v)}") def test2() -> None: import doctest diff --git a/networking_flow/ford_fulkerson.py b/networking_flow/ford_fulkerson.py index 96b782649774..370e3848222a 100644 --- a/networking_flow/ford_fulkerson.py +++ b/networking_flow/ford_fulkerson.py @@ -6,7 +6,7 @@ """ -def BFS(graph, s, t, parent): +def bfs(graph, s, t, parent): # Return True if there is node that has not iterated. visited = [False] * len(graph) queue = [] @@ -24,11 +24,11 @@ def BFS(graph, s, t, parent): return True if visited[t] else False -def FordFulkerson(graph, source, sink): +def ford_fulkerson(graph, source, sink): # This array is filled by BFS and to store path parent = [-1] * (len(graph)) max_flow = 0 - while BFS(graph, source, sink, parent): + while bfs(graph, source, sink, parent): path_flow = float("Inf") s = sink @@ -58,4 +58,4 @@ def FordFulkerson(graph, source, sink): ] source, sink = 0, 5 -print(FordFulkerson(graph, source, sink)) +print(ford_fulkerson(graph, source, sink)) diff --git a/networking_flow/minimum_cut.py b/networking_flow/minimum_cut.py index d79f3619caf1..33131315f4e1 100644 --- a/networking_flow/minimum_cut.py +++ b/networking_flow/minimum_cut.py @@ -10,7 +10,7 @@ ] -def BFS(graph, s, t, parent): +def bfs(graph, s, t, parent): # Return True if there is node that has not iterated. visited = [False] * len(graph) queue = [s] @@ -36,7 +36,7 @@ def mincut(graph, source, sink): max_flow = 0 res = [] temp = [i[:] for i in graph] # Record original cut, copy. - while BFS(graph, source, sink, parent): + while bfs(graph, source, sink, parent): path_flow = float("Inf") s = sink diff --git a/neural_network/convolution_neural_network.py b/neural_network/convolution_neural_network.py index e3993efb4249..bbade1c417d0 100644 --- a/neural_network/convolution_neural_network.py +++ b/neural_network/convolution_neural_network.py @@ -74,7 +74,7 @@ def save_model(self, save_path): print(f"Model saved: {save_path}") @classmethod - def ReadModel(cls, model_path): + def read_model(cls, model_path): # read saved model with open(model_path, "rb") as f: model_dic = pickle.load(f) @@ -119,7 +119,7 @@ def convolute(self, data, convs, w_convs, thre_convs, conv_step): data_focus.append(focus) # calculate the feature map of every single kernel, and saved as list of matrix data_featuremap = [] - Size_FeatureMap = int((size_data - size_conv) / conv_step + 1) + size_feature_map = int((size_data - size_conv) / conv_step + 1) for i_map in range(num_conv): featuremap = [] for i_focus in range(len(data_focus)): @@ -129,7 +129,7 @@ def convolute(self, data, convs, w_convs, thre_convs, conv_step): ) featuremap.append(self.sig(net_focus)) featuremap = np.asmatrix(featuremap).reshape( - Size_FeatureMap, Size_FeatureMap + size_feature_map, size_feature_map ) data_featuremap.append(featuremap) diff --git a/other/davisb_putnamb_logemannb_loveland.py b/other/davisb_putnamb_logemannb_loveland.py index 88aefabc8087..03d60a9a1aaf 100644 --- a/other/davisb_putnamb_logemannb_loveland.py +++ b/other/davisb_putnamb_logemannb_loveland.py @@ -255,14 +255,14 @@ def find_unit_clauses( if len(clause) == 1: unit_symbols.append(list(clause.literals.keys())[0]) else: - Fcount, Ncount = 0, 0 + f_count, n_count = 0, 0 for literal, value in clause.literals.items(): if value is False: - Fcount += 1 + f_count += 1 elif value is None: sym = literal - Ncount += 1 - if Fcount == len(clause) - 1 and Ncount == 1: + n_count += 1 + if f_count == len(clause) - 1 and n_count == 1: unit_symbols.append(sym) assignment: dict[str, bool | None] = dict() for i in unit_symbols: @@ -310,33 +310,33 @@ def dpll_algorithm( except RecursionError: print("raises a RecursionError and is") return None, {} - P = None + p = None if len(pure_symbols) > 0: - P, value = pure_symbols[0], assignment[pure_symbols[0]] + p, value = pure_symbols[0], assignment[pure_symbols[0]] - if P: + if p: tmp_model = model - tmp_model[P] = value + tmp_model[p] = value tmp_symbols = [i for i in symbols] - if P in tmp_symbols: - tmp_symbols.remove(P) + if p in tmp_symbols: + tmp_symbols.remove(p) return dpll_algorithm(clauses, tmp_symbols, tmp_model) unit_symbols, assignment = find_unit_clauses(clauses, model) - P = None + p = None if len(unit_symbols) > 0: - P, value = unit_symbols[0], assignment[unit_symbols[0]] - if P: + p, value = unit_symbols[0], assignment[unit_symbols[0]] + if p: tmp_model = model - tmp_model[P] = value + tmp_model[p] = value tmp_symbols = [i for i in symbols] - if P in tmp_symbols: - tmp_symbols.remove(P) + if p in tmp_symbols: + tmp_symbols.remove(p) return dpll_algorithm(clauses, tmp_symbols, tmp_model) - P = symbols[0] + p = symbols[0] rest = symbols[1:] tmp1, tmp2 = model, model - tmp1[P], tmp2[P] = True, False + tmp1[p], tmp2[p] = True, False return dpll_algorithm(clauses, rest, tmp1) or dpll_algorithm(clauses, rest, tmp2) diff --git a/other/greedy.py b/other/greedy.py index 4b78bf1c0415..72e05f451fbb 100644 --- a/other/greedy.py +++ b/other/greedy.py @@ -1,4 +1,4 @@ -class things: +class Things: def __init__(self, name, value, weight): self.name = name self.value = value @@ -16,27 +16,27 @@ def get_name(self): def get_weight(self): return self.weight - def value_Weight(self): + def value_weight(self): return self.value / self.weight def build_menu(name, value, weight): menu = [] for i in range(len(value)): - menu.append(things(name[i], value[i], weight[i])) + menu.append(Things(name[i], value[i], weight[i])) return menu -def greedy(item, maxCost, keyFunc): - itemsCopy = sorted(item, key=keyFunc, reverse=True) +def greedy(item, max_cost, key_func): + items_copy = sorted(item, key=key_func, reverse=True) result = [] - totalValue, total_cost = 0.0, 0.0 - for i in range(len(itemsCopy)): - if (total_cost + itemsCopy[i].get_weight()) <= maxCost: - result.append(itemsCopy[i]) - total_cost += itemsCopy[i].get_weight() - totalValue += itemsCopy[i].get_value() - return (result, totalValue) + total_value, total_cost = 0.0, 0.0 + for i in range(len(items_copy)): + if (total_cost + items_copy[i].get_weight()) <= max_cost: + result.append(items_copy[i]) + total_cost += items_copy[i].get_weight() + total_value += items_copy[i].get_value() + return (result, total_value) def test_greedy(): @@ -47,13 +47,13 @@ def test_greedy(): >>> weight = [40, 60, 40, 70, 100, 85, 55, 70] >>> foods = build_menu(food, value, weight) >>> foods # doctest: +NORMALIZE_WHITESPACE - [things(Burger, 80, 40), things(Pizza, 100, 60), things(Coca Cola, 60, 40), - things(Rice, 70, 70), things(Sambhar, 50, 100), things(Chicken, 110, 85), - things(Fries, 90, 55), things(Milk, 60, 70)] - >>> greedy(foods, 500, things.get_value) # doctest: +NORMALIZE_WHITESPACE - ([things(Chicken, 110, 85), things(Pizza, 100, 60), things(Fries, 90, 55), - things(Burger, 80, 40), things(Rice, 70, 70), things(Coca Cola, 60, 40), - things(Milk, 60, 70)], 570.0) + [Things(Burger, 80, 40), Things(Pizza, 100, 60), Things(Coca Cola, 60, 40), + Things(Rice, 70, 70), Things(Sambhar, 50, 100), Things(Chicken, 110, 85), + Things(Fries, 90, 55), Things(Milk, 60, 70)] + >>> greedy(foods, 500, Things.get_value) # doctest: +NORMALIZE_WHITESPACE + ([Things(Chicken, 110, 85), Things(Pizza, 100, 60), Things(Fries, 90, 55), + Things(Burger, 80, 40), Things(Rice, 70, 70), Things(Coca Cola, 60, 40), + Things(Milk, 60, 70)], 570.0) """ diff --git a/other/nested_brackets.py b/other/nested_brackets.py index 99e2f3a38797..9dd9a0f042ed 100644 --- a/other/nested_brackets.py +++ b/other/nested_brackets.py @@ -14,21 +14,21 @@ """ -def is_balanced(S): +def is_balanced(s): stack = [] open_brackets = set({"(", "[", "{"}) closed_brackets = set({")", "]", "}"}) open_to_closed = dict({"{": "}", "[": "]", "(": ")"}) - for i in range(len(S)): + for i in range(len(s)): - if S[i] in open_brackets: - stack.append(S[i]) + if s[i] in open_brackets: + stack.append(s[i]) - elif S[i] in closed_brackets: + elif s[i] in closed_brackets: if len(stack) == 0 or ( - len(stack) > 0 and open_to_closed[stack.pop()] != S[i] + len(stack) > 0 and open_to_closed[stack.pop()] != s[i] ): return False diff --git a/other/sdes.py b/other/sdes.py index cfc5a53df2b2..695675000632 100644 --- a/other/sdes.py +++ b/other/sdes.py @@ -19,9 +19,9 @@ def left_shift(data): return data[1:] + data[0] -def XOR(a, b): +def xor(a, b): """ - >>> XOR("01010101", "00001111") + >>> xor("01010101", "00001111") '01011010' """ res = "" @@ -43,13 +43,13 @@ def function(expansion, s0, s1, key, message): left = message[:4] right = message[4:] temp = apply_table(right, expansion) - temp = XOR(temp, key) + temp = xor(temp, key) l = apply_sbox(s0, temp[:4]) # noqa: E741 r = apply_sbox(s1, temp[4:]) l = "0" * (2 - len(l)) + l # noqa: E741 r = "0" * (2 - len(r)) + r temp = apply_table(l + r, p4_table) - temp = XOR(left, temp) + temp = xor(left, temp) return temp + right diff --git a/other/tower_of_hanoi.py b/other/tower_of_hanoi.py index 3cc0e40b369f..1fff45039891 100644 --- a/other/tower_of_hanoi.py +++ b/other/tower_of_hanoi.py @@ -1,6 +1,6 @@ -def moveTower(height, fromPole, toPole, withPole): +def move_tower(height, from_pole, to_pole, with_pole): """ - >>> moveTower(3, 'A', 'B', 'C') + >>> move_tower(3, 'A', 'B', 'C') moving disk from A to B moving disk from A to C moving disk from B to C @@ -10,18 +10,18 @@ def moveTower(height, fromPole, toPole, withPole): moving disk from A to B """ if height >= 1: - moveTower(height - 1, fromPole, withPole, toPole) - moveDisk(fromPole, toPole) - moveTower(height - 1, withPole, toPole, fromPole) + move_tower(height - 1, from_pole, with_pole, to_pole) + move_disk(from_pole, to_pole) + move_tower(height - 1, with_pole, to_pole, from_pole) -def moveDisk(fp, tp): +def move_disk(fp, tp): print("moving disk from", fp, "to", tp) def main(): height = int(input("Height of hanoi: ").strip()) - moveTower(height, "A", "B", "C") + move_tower(height, "A", "B", "C") if __name__ == "__main__": diff --git a/physics/n_body_simulation.py b/physics/n_body_simulation.py index 01083b9a272e..7e9fc1642c84 100644 --- a/physics/n_body_simulation.py +++ b/physics/n_body_simulation.py @@ -219,9 +219,11 @@ def plot( Utility function to plot how the given body-system evolves over time. No doctest provided since this function does not have a return value. """ + # Frame rate of the animation + INTERVAL = 20 # noqa: N806 - INTERVAL = 20 # Frame rate of the animation - DELTA_TIME = INTERVAL / 1000 # Time between time steps in seconds + # Time between time steps in seconds + DELTA_TIME = INTERVAL / 1000 # noqa: N806 fig = plt.figure() fig.canvas.set_window_title(title) diff --git a/project_euler/problem_011/sol1.py b/project_euler/problem_011/sol1.py index 9dea73e8cef2..ad45f0983a7c 100644 --- a/project_euler/problem_011/sol1.py +++ b/project_euler/problem_011/sol1.py @@ -28,23 +28,23 @@ def largest_product(grid): - nColumns = len(grid[0]) - nRows = len(grid) + n_columns = len(grid[0]) + n_rows = len(grid) largest = 0 - lrDiagProduct = 0 - rlDiagProduct = 0 + lr_diag_product = 0 + rl_diag_product = 0 # Check vertically, horizontally, diagonally at the same time (only works # for nxn grid) - for i in range(nColumns): - for j in range(nRows - 3): - vertProduct = grid[j][i] * grid[j + 1][i] * grid[j + 2][i] * grid[j + 3][i] - horzProduct = grid[i][j] * grid[i][j + 1] * grid[i][j + 2] * grid[i][j + 3] + for i in range(n_columns): + for j in range(n_rows - 3): + vert_product = grid[j][i] * grid[j + 1][i] * grid[j + 2][i] * grid[j + 3][i] + horz_product = grid[i][j] * grid[i][j + 1] * grid[i][j + 2] * grid[i][j + 3] # Left-to-right diagonal (\) product - if i < nColumns - 3: - lrDiagProduct = ( + if i < n_columns - 3: + lr_diag_product = ( grid[i][j] * grid[i + 1][j + 1] * grid[i + 2][j + 2] @@ -53,16 +53,18 @@ def largest_product(grid): # Right-to-left diagonal(/) product if i > 2: - rlDiagProduct = ( + rl_diag_product = ( grid[i][j] * grid[i - 1][j + 1] * grid[i - 2][j + 2] * grid[i - 3][j + 3] ) - maxProduct = max(vertProduct, horzProduct, lrDiagProduct, rlDiagProduct) - if maxProduct > largest: - largest = maxProduct + max_product = max( + vert_product, horz_product, lr_diag_product, rl_diag_product + ) + if max_product > largest: + largest = max_product return largest diff --git a/project_euler/problem_012/sol1.py b/project_euler/problem_012/sol1.py index 861d026ece5b..e42b03419c69 100644 --- a/project_euler/problem_012/sol1.py +++ b/project_euler/problem_012/sol1.py @@ -24,18 +24,18 @@ def count_divisors(n): - nDivisors = 1 + n_divisors = 1 i = 2 while i * i <= n: multiplicity = 0 while n % i == 0: n //= i multiplicity += 1 - nDivisors *= multiplicity + 1 + n_divisors *= multiplicity + 1 i += 1 if n > 1: - nDivisors *= 2 - return nDivisors + n_divisors *= 2 + return n_divisors def solution(): @@ -45,17 +45,17 @@ def solution(): >>> solution() 76576500 """ - tNum = 1 + t_num = 1 i = 1 while True: i += 1 - tNum += i + t_num += i - if count_divisors(tNum) > 500: + if count_divisors(t_num) > 500: break - return tNum + return t_num if __name__ == "__main__": diff --git a/project_euler/problem_023/sol1.py b/project_euler/problem_023/sol1.py index 83b85f3f721c..9fdf7284a3fd 100644 --- a/project_euler/problem_023/sol1.py +++ b/project_euler/problem_023/sol1.py @@ -28,18 +28,18 @@ def solution(limit=28123): >>> solution() 4179871 """ - sumDivs = [1] * (limit + 1) + sum_divs = [1] * (limit + 1) for i in range(2, int(limit**0.5) + 1): - sumDivs[i * i] += i + sum_divs[i * i] += i for k in range(i + 1, limit // i + 1): - sumDivs[k * i] += k + i + sum_divs[k * i] += k + i abundants = set() res = 0 for n in range(1, limit + 1): - if sumDivs[n] > n: + if sum_divs[n] > n: abundants.add(n) if not any((n - a in abundants) for a in abundants): diff --git a/project_euler/problem_029/sol1.py b/project_euler/problem_029/sol1.py index d3ab90ac7d25..d9a81e55ca35 100644 --- a/project_euler/problem_029/sol1.py +++ b/project_euler/problem_029/sol1.py @@ -33,17 +33,17 @@ def solution(n: int = 100) -> int: >>> solution(1) 0 """ - collectPowers = set() + collect_powers = set() - currentPow = 0 + current_pow = 0 - N = n + 1 # maximum limit + n = n + 1 # maximum limit - for a in range(2, N): - for b in range(2, N): - currentPow = a**b # calculates the current power - collectPowers.add(currentPow) # adds the result to the set - return len(collectPowers) + for a in range(2, n): + for b in range(2, n): + current_pow = a**b # calculates the current power + collect_powers.add(current_pow) # adds the result to the set + return len(collect_powers) if __name__ == "__main__": diff --git a/project_euler/problem_032/sol32.py b/project_euler/problem_032/sol32.py index 393218339e9f..c4d11e86c877 100644 --- a/project_euler/problem_032/sol32.py +++ b/project_euler/problem_032/sol32.py @@ -15,15 +15,15 @@ import itertools -def isCombinationValid(combination): +def is_combination_valid(combination): """ Checks if a combination (a tuple of 9 digits) is a valid product equation. - >>> isCombinationValid(('3', '9', '1', '8', '6', '7', '2', '5', '4')) + >>> is_combination_valid(('3', '9', '1', '8', '6', '7', '2', '5', '4')) True - >>> isCombinationValid(('1', '2', '3', '4', '5', '6', '7', '8', '9')) + >>> is_combination_valid(('1', '2', '3', '4', '5', '6', '7', '8', '9')) False """ @@ -49,7 +49,7 @@ def solution(): { int("".join(pandigital[5:9])) for pandigital in itertools.permutations("123456789") - if isCombinationValid(pandigital) + if is_combination_valid(pandigital) } ) diff --git a/project_euler/problem_042/solution42.py b/project_euler/problem_042/solution42.py index b3aecf4cf144..6d22a8dfb655 100644 --- a/project_euler/problem_042/solution42.py +++ b/project_euler/problem_042/solution42.py @@ -27,10 +27,10 @@ def solution(): 162 """ script_dir = os.path.dirname(os.path.realpath(__file__)) - wordsFilePath = os.path.join(script_dir, "words.txt") + words_file_path = os.path.join(script_dir, "words.txt") words = "" - with open(wordsFilePath) as f: + with open(words_file_path) as f: words = f.readline() words = list(map(lambda word: word.strip('"'), words.strip("\r\n").split(","))) diff --git a/project_euler/problem_054/test_poker_hand.py b/project_euler/problem_054/test_poker_hand.py index 96317fc7df33..bf5a20a8e862 100644 --- a/project_euler/problem_054/test_poker_hand.py +++ b/project_euler/problem_054/test_poker_hand.py @@ -185,7 +185,7 @@ def test_compare_random(hand, other, expected): def test_hand_sorted(): - POKER_HANDS = [PokerHand(hand) for hand in SORTED_HANDS] + POKER_HANDS = [PokerHand(hand) for hand in SORTED_HANDS] # noqa: N806 list_copy = POKER_HANDS.copy() shuffle(list_copy) user_sorted = chain(sorted(list_copy)) diff --git a/project_euler/problem_064/sol1.py b/project_euler/problem_064/sol1.py index 5df64a90ae55..9edd9a1e7a64 100644 --- a/project_euler/problem_064/sol1.py +++ b/project_euler/problem_064/sol1.py @@ -33,7 +33,7 @@ def continuous_fraction_period(n: int) -> int: """ numerator = 0.0 denominator = 1.0 - ROOT = int(sqrt(n)) + ROOT = int(sqrt(n)) # noqa: N806 integer_part = ROOT period = 0 while integer_part != 2 * ROOT: diff --git a/project_euler/problem_097/sol1.py b/project_euler/problem_097/sol1.py index da5e8120b7c5..94a43894ee07 100644 --- a/project_euler/problem_097/sol1.py +++ b/project_euler/problem_097/sol1.py @@ -34,8 +34,8 @@ def solution(n: int = 10) -> str: """ if not isinstance(n, int) or n < 0: raise ValueError("Invalid input") - MODULUS = 10**n - NUMBER = 28433 * (pow(2, 7830457, MODULUS)) + 1 + MODULUS = 10**n # noqa: N806 + NUMBER = 28433 * (pow(2, 7830457, MODULUS)) + 1 # noqa: N806 return str(NUMBER % MODULUS) diff --git a/project_euler/problem_104/sol.py b/project_euler/problem_104/sol.py.FIXME similarity index 100% rename from project_euler/problem_104/sol.py rename to project_euler/problem_104/sol.py.FIXME diff --git a/project_euler/problem_125/sol1.py b/project_euler/problem_125/sol1.py index 7a8f908ed6a9..1812df36132e 100644 --- a/project_euler/problem_125/sol1.py +++ b/project_euler/problem_125/sol1.py @@ -35,7 +35,7 @@ def solution() -> int: Returns the sum of all numbers less than 1e8 that are both palindromic and can be written as the sum of consecutive squares. """ - LIMIT = 10**8 + LIMIT = 10**8 # noqa: N806 answer = set() first_square = 1 sum_squares = 5 diff --git a/scheduling/non_preemptive_shortest_job_first.py b/scheduling/non_preemptive_shortest_job_first.py index 96e571230ec0..69c974b0044d 100644 --- a/scheduling/non_preemptive_shortest_job_first.py +++ b/scheduling/non_preemptive_shortest_job_first.py @@ -102,9 +102,9 @@ def calculate_turnaroundtime( # Printing the Result print("PID\tBurst Time\tArrival Time\tWaiting Time\tTurnaround Time") - for i, process_ID in enumerate(list(range(1, 5))): + for i, process_id in enumerate(list(range(1, 5))): print( - f"{process_ID}\t{burst_time[i]}\t\t\t{arrival_time[i]}\t\t\t\t" + f"{process_id}\t{burst_time[i]}\t\t\t{arrival_time[i]}\t\t\t\t" f"{waiting_time[i]}\t\t\t\t{turn_around_time[i]}" ) print(f"\nAverage waiting time = {mean(waiting_time):.5f}") diff --git a/searches/tabu_search.py b/searches/tabu_search.py index 24d0dbf6f1c2..45ce19d46b23 100644 --- a/searches/tabu_search.py +++ b/searches/tabu_search.py @@ -178,9 +178,9 @@ def find_neighborhood(solution, dict_of_neighbours): if _tmp not in neighborhood_of_solution: neighborhood_of_solution.append(_tmp) - indexOfLastItemInTheList = len(neighborhood_of_solution[0]) - 1 + index_of_last_item_in_the_list = len(neighborhood_of_solution[0]) - 1 - neighborhood_of_solution.sort(key=lambda x: x[indexOfLastItemInTheList]) + neighborhood_of_solution.sort(key=lambda x: x[index_of_last_item_in_the_list]) return neighborhood_of_solution diff --git a/sorts/odd_even_transposition_parallel.py b/sorts/odd_even_transposition_parallel.py index 5de7a016c628..b656df3a3a90 100644 --- a/sorts/odd_even_transposition_parallel.py +++ b/sorts/odd_even_transposition_parallel.py @@ -13,7 +13,7 @@ from multiprocessing import Lock, Pipe, Process # lock used to ensure that two processes do not access a pipe at the same time -processLock = Lock() +process_lock = Lock() """ The function run by the processes that sorts the list @@ -27,42 +27,42 @@ """ -def oeProcess(position, value, LSend, RSend, LRcv, RRcv, resultPipe): - global processLock +def oe_process(position, value, l_send, r_send, lr_cv, rr_cv, result_pipe): + global process_lock # we perform n swaps since after n swaps we know we are sorted # we *could* stop early if we are sorted already, but it takes as long to # find out we are sorted as it does to sort the list with this algorithm for i in range(0, 10): - if (i + position) % 2 == 0 and RSend is not None: + if (i + position) % 2 == 0 and r_send is not None: # send your value to your right neighbor - processLock.acquire() - RSend[1].send(value) - processLock.release() + process_lock.acquire() + r_send[1].send(value) + process_lock.release() # receive your right neighbor's value - processLock.acquire() - temp = RRcv[0].recv() - processLock.release() + process_lock.acquire() + temp = rr_cv[0].recv() + process_lock.release() # take the lower value since you are on the left value = min(value, temp) - elif (i + position) % 2 != 0 and LSend is not None: + elif (i + position) % 2 != 0 and l_send is not None: # send your value to your left neighbor - processLock.acquire() - LSend[1].send(value) - processLock.release() + process_lock.acquire() + l_send[1].send(value) + process_lock.release() # receive your left neighbor's value - processLock.acquire() - temp = LRcv[0].recv() - processLock.release() + process_lock.acquire() + temp = lr_cv[0].recv() + process_lock.release() # take the higher value since you are on the right value = max(value, temp) # after all swaps are performed, send the values back to main - resultPipe[1].send(value) + result_pipe[1].send(value) """ @@ -72,61 +72,61 @@ def oeProcess(position, value, LSend, RSend, LRcv, RRcv, resultPipe): """ -def OddEvenTransposition(arr): - processArray = [] - resultPipe = [] +def odd_even_transposition(arr): + process_array_ = [] + result_pipe = [] # initialize the list of pipes where the values will be retrieved for _ in arr: - resultPipe.append(Pipe()) + result_pipe.append(Pipe()) # creates the processes # the first and last process only have one neighbor so they are made outside # of the loop - tempRs = Pipe() - tempRr = Pipe() - processArray.append( + temp_rs = Pipe() + temp_rr = Pipe() + process_array_.append( Process( - target=oeProcess, - args=(0, arr[0], None, tempRs, None, tempRr, resultPipe[0]), + target=oe_process, + args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]), ) ) - tempLr = tempRs - tempLs = tempRr + temp_lr = temp_rs + temp_ls = temp_rr for i in range(1, len(arr) - 1): - tempRs = Pipe() - tempRr = Pipe() - processArray.append( + temp_rs = Pipe() + temp_rr = Pipe() + process_array_.append( Process( - target=oeProcess, - args=(i, arr[i], tempLs, tempRs, tempLr, tempRr, resultPipe[i]), + target=oe_process, + args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]), ) ) - tempLr = tempRs - tempLs = tempRr + temp_lr = temp_rs + temp_ls = temp_rr - processArray.append( + process_array_.append( Process( - target=oeProcess, + target=oe_process, args=( len(arr) - 1, arr[len(arr) - 1], - tempLs, + temp_ls, None, - tempLr, + temp_lr, None, - resultPipe[len(arr) - 1], + result_pipe[len(arr) - 1], ), ) ) # start the processes - for p in processArray: + for p in process_array_: p.start() # wait for the processes to end and write their values to the list - for p in range(0, len(resultPipe)): - arr[p] = resultPipe[p][0].recv() - processArray[p].join() + for p in range(0, len(result_pipe)): + arr[p] = result_pipe[p][0].recv() + process_array_[p].join() return arr @@ -135,7 +135,7 @@ def main(): arr = list(range(10, 0, -1)) print("Initial List") print(*arr) - arr = OddEvenTransposition(arr) + arr = odd_even_transposition(arr) print("Sorted List\n") print(*arr) diff --git a/sorts/radix_sort.py b/sorts/radix_sort.py index c3ff04f3d5e5..afe62bc7ec30 100644 --- a/sorts/radix_sort.py +++ b/sorts/radix_sort.py @@ -19,7 +19,7 @@ def radix_sort(list_of_ints: list[int]) -> list[int]: >>> radix_sort([1,100,10,1000]) == sorted([1,100,10,1000]) True """ - RADIX = 10 + RADIX = 10 # noqa: N806 placement = 1 max_digit = max(list_of_ints) while placement <= max_digit: diff --git a/sorts/random_normal_distribution_quicksort.py b/sorts/random_normal_distribution_quicksort.py index 73eb70bea07f..5777d5cb2e7a 100644 --- a/sorts/random_normal_distribution_quicksort.py +++ b/sorts/random_normal_distribution_quicksort.py @@ -4,41 +4,41 @@ import numpy as np -def _inPlaceQuickSort(A, start, end): +def _in_place_quick_sort(a, start, end): count = 0 if start < end: pivot = randint(start, end) - temp = A[end] - A[end] = A[pivot] - A[pivot] = temp + temp = a[end] + a[end] = a[pivot] + a[pivot] = temp - p, count = _inPlacePartition(A, start, end) - count += _inPlaceQuickSort(A, start, p - 1) - count += _inPlaceQuickSort(A, p + 1, end) + p, count = _in_place_partition(a, start, end) + count += _in_place_quick_sort(a, start, p - 1) + count += _in_place_quick_sort(a, p + 1, end) return count -def _inPlacePartition(A, start, end): +def _in_place_partition(a, start, end): count = 0 pivot = randint(start, end) - temp = A[end] - A[end] = A[pivot] - A[pivot] = temp - newPivotIndex = start - 1 + temp = a[end] + a[end] = a[pivot] + a[pivot] = temp + new_pivot_index = start - 1 for index in range(start, end): count += 1 - if A[index] < A[end]: # check if current val is less than pivot value - newPivotIndex = newPivotIndex + 1 - temp = A[newPivotIndex] - A[newPivotIndex] = A[index] - A[index] = temp + if a[index] < a[end]: # check if current val is less than pivot value + new_pivot_index = new_pivot_index + 1 + temp = a[new_pivot_index] + a[new_pivot_index] = a[index] + a[index] = temp - temp = A[newPivotIndex + 1] - A[newPivotIndex + 1] = A[end] - A[end] = temp - return newPivotIndex + 1, count + temp = a[new_pivot_index + 1] + a[new_pivot_index + 1] = a[end] + a[end] = temp + return new_pivot_index + 1, count outfile = TemporaryFile() @@ -55,7 +55,7 @@ def _inPlacePartition(A, start, end): outfile.seek(0) # using the same array M = np.load(outfile) r = len(M) - 1 -z = _inPlaceQuickSort(M, 0, r) +z = _in_place_quick_sort(M, 0, r) print( "No of Comparisons for 100 elements selected from a standard normal distribution" diff --git a/sorts/random_pivot_quick_sort.py b/sorts/random_pivot_quick_sort.py index d9cf4e981c2f..748b6741047e 100644 --- a/sorts/random_pivot_quick_sort.py +++ b/sorts/random_pivot_quick_sort.py @@ -4,30 +4,30 @@ import random -def partition(A, left_index, right_index): - pivot = A[left_index] +def partition(a, left_index, right_index): + pivot = a[left_index] i = left_index + 1 for j in range(left_index + 1, right_index): - if A[j] < pivot: - A[j], A[i] = A[i], A[j] + if a[j] < pivot: + a[j], a[i] = a[i], a[j] i += 1 - A[left_index], A[i - 1] = A[i - 1], A[left_index] + a[left_index], a[i - 1] = a[i - 1], a[left_index] return i - 1 -def quick_sort_random(A, left, right): +def quick_sort_random(a, left, right): if left < right: pivot = random.randint(left, right - 1) - A[pivot], A[left] = ( - A[left], - A[pivot], + a[pivot], a[left] = ( + a[left], + a[pivot], ) # switches the pivot with the left most bound - pivot_index = partition(A, left, right) + pivot_index = partition(a, left, right) quick_sort_random( - A, left, pivot_index + a, left, pivot_index ) # recursive quicksort to the left of the pivot point quick_sort_random( - A, pivot_index + 1, right + a, pivot_index + 1, right ) # recursive quicksort to the right of the pivot point diff --git a/sorts/tree_sort.py b/sorts/tree_sort.py index e445fb4520aa..78c3e893e0ce 100644 --- a/sorts/tree_sort.py +++ b/sorts/tree_sort.py @@ -5,7 +5,7 @@ """ -class node: +class Node: # BST data structure def __init__(self, val): self.val = val @@ -16,12 +16,12 @@ def insert(self, val): if self.val: if val < self.val: if self.left is None: - self.left = node(val) + self.left = Node(val) else: self.left.insert(val) elif val > self.val: if self.right is None: - self.right = node(val) + self.right = Node(val) else: self.right.insert(val) else: @@ -40,7 +40,7 @@ def tree_sort(arr): # Build BST if len(arr) == 0: return arr - root = node(arr[0]) + root = Node(arr[0]) for i in range(1, len(arr)): root.insert(arr[i]) # Traverse BST in order. diff --git a/strings/boyer_moore_search.py b/strings/boyer_moore_search.py index 8d8ff22f67bd..117305d32fd3 100644 --- a/strings/boyer_moore_search.py +++ b/strings/boyer_moore_search.py @@ -41,13 +41,13 @@ def match_in_pattern(self, char: str) -> int: return i return -1 - def mismatch_in_text(self, currentPos: int) -> int: + def mismatch_in_text(self, current_pos: int) -> int: """ find the index of mis-matched character in text when compared with pattern from last Parameters : - currentPos (int): current index position of text + current_pos (int): current index position of text Returns : i (int): index of mismatched char from last in text @@ -55,8 +55,8 @@ def mismatch_in_text(self, currentPos: int) -> int: """ for i in range(self.patLen - 1, -1, -1): - if self.pattern[i] != self.text[currentPos + i]: - return currentPos + i + if self.pattern[i] != self.text[current_pos + i]: + return current_pos + i return -1 def bad_character_heuristic(self) -> list[int]: diff --git a/strings/can_string_be_rearranged_as_palindrome.py b/strings/can_string_be_rearranged_as_palindrome.py index ddc4828c773b..21d653db1405 100644 --- a/strings/can_string_be_rearranged_as_palindrome.py +++ b/strings/can_string_be_rearranged_as_palindrome.py @@ -67,12 +67,12 @@ def can_string_be_rearranged_as_palindrome(input_str: str = "") -> bool: Step 2:If we find more than 1 character that appears odd number of times, It is not possible to rearrange as a palindrome """ - oddChar = 0 + odd_char = 0 for character_count in character_freq_dict.values(): if character_count % 2: - oddChar += 1 - if oddChar > 1: + odd_char += 1 + if odd_char > 1: return False return True diff --git a/strings/check_anagrams.py b/strings/check_anagrams.py index f652e2294db2..0d2f8091a3f0 100644 --- a/strings/check_anagrams.py +++ b/strings/check_anagrams.py @@ -48,8 +48,8 @@ def check_anagrams(first_str: str, second_str: str) -> bool: from doctest import testmod testmod() - input_A = input("Enter the first string ").strip() - input_B = input("Enter the second string ").strip() + input_a = input("Enter the first string ").strip() + input_b = input("Enter the second string ").strip() - status = check_anagrams(input_A, input_B) - print(f"{input_A} and {input_B} are {'' if status else 'not '}anagrams.") + status = check_anagrams(input_a, input_b) + print(f"{input_a} and {input_b} are {'' if status else 'not '}anagrams.") diff --git a/strings/word_patterns.py b/strings/word_patterns.py index 90b092a20dc8..d12d267e7b35 100644 --- a/strings/word_patterns.py +++ b/strings/word_patterns.py @@ -26,10 +26,10 @@ def get_word_pattern(word: str) -> str: start_time = time.time() with open("dictionary.txt") as in_file: - wordList = in_file.read().splitlines() + word_list = in_file.read().splitlines() all_patterns: dict = {} - for word in wordList: + for word in word_list: pattern = get_word_pattern(word) if pattern in all_patterns: all_patterns[pattern].append(word) @@ -39,6 +39,6 @@ def get_word_pattern(word: str) -> str: with open("word_patterns.txt", "w") as out_file: out_file.write(pprint.pformat(all_patterns)) - totalTime = round(time.time() - start_time, 2) - print(f"Done! {len(all_patterns):,} word patterns found in {totalTime} seconds.") + total_time = round(time.time() - start_time, 2) + print(f"Done! {len(all_patterns):,} word patterns found in {total_time} seconds.") # Done! 9,581 word patterns found in 0.58 seconds. diff --git a/web_programming/fetch_quotes.py b/web_programming/fetch_quotes.py index 4a3b002e515f..a45f6ea0eaf1 100644 --- a/web_programming/fetch_quotes.py +++ b/web_programming/fetch_quotes.py @@ -12,12 +12,12 @@ def quote_of_the_day() -> list: - API_ENDPOINT_URL = "https://zenquotes.io/api/today/" + API_ENDPOINT_URL = "https://zenquotes.io/api/today/" # noqa: N806 return requests.get(API_ENDPOINT_URL).json() def random_quotes() -> list: - API_ENDPOINT_URL = "https://zenquotes.io/api/random/" + API_ENDPOINT_URL = "https://zenquotes.io/api/random/" # noqa: N806 return requests.get(API_ENDPOINT_URL).json() From 1aa7bd96164bf9f17acd770f4c6992d35c468541 Mon Sep 17 00:00:00 2001 From: Abinash Satapathy Date: Thu, 13 Oct 2022 00:56:10 +0200 Subject: [PATCH 0532/1543] Added barcode_validator.py (#6771) * Update README.md Added Google Cirq references * Create barcode_validator.py Barcode/EAN validator * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update barcode_validator.py Included docstring and updated variables to snake_case * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update barcode_validator.py Included docset and updated bugs * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update barcode_validator.py Implemented the changes asked in review. * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update barcode_validator.py Updated with f-string format * Update barcode_validator.py Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- quantum/README.md | 8 ++++ strings/barcode_validator.py | 88 ++++++++++++++++++++++++++++++++++++ 2 files changed, 96 insertions(+) create mode 100644 strings/barcode_validator.py diff --git a/quantum/README.md b/quantum/README.md index 423d34fa3364..3ce364574486 100644 --- a/quantum/README.md +++ b/quantum/README.md @@ -6,6 +6,7 @@ Started at https://github.com/TheAlgorithms/Python/issues/1831 * Google: https://research.google/teams/applied-science/quantum * IBM: https://qiskit.org and https://github.com/Qiskit * Rigetti: https://rigetti.com and https://github.com/rigetti +* Zapata: https://www.zapatacomputing.com and https://github.com/zapatacomputing ## IBM Qiskit - Start using by installing `pip install qiskit`, refer the [docs](https://qiskit.org/documentation/install.html) for more info. @@ -13,3 +14,10 @@ Started at https://github.com/TheAlgorithms/Python/issues/1831 - https://github.com/Qiskit/qiskit-tutorials - https://quantum-computing.ibm.com/docs/iql/first-circuit - https://medium.com/qiskit/how-to-program-a-quantum-computer-982a9329ed02 + +## Google Cirq +- Start using by installing `python -m pip install cirq`, refer the [docs](https://quantumai.google/cirq/start/install) for more info. +- Tutorials & references + - https://github.com/quantumlib/cirq + - https://quantumai.google/cirq/experiments + - https://tanishabassan.medium.com/quantum-programming-with-google-cirq-3209805279bc diff --git a/strings/barcode_validator.py b/strings/barcode_validator.py new file mode 100644 index 000000000000..05670007665c --- /dev/null +++ b/strings/barcode_validator.py @@ -0,0 +1,88 @@ +""" +https://en.wikipedia.org/wiki/Check_digit#Algorithms +""" + + +def get_check_digit(barcode: int) -> int: + """ + Returns the last digit of barcode by excluding the last digit first + and then computing to reach the actual last digit from the remaining + 12 digits. + + >>> get_check_digit(8718452538119) + 9 + >>> get_check_digit(87184523) + 5 + >>> get_check_digit(87193425381086) + 9 + >>> [get_check_digit(x) for x in range(0, 100, 10)] + [0, 7, 4, 1, 8, 5, 2, 9, 6, 3] + """ + barcode //= 10 # exclude the last digit + checker = False + s = 0 + + # extract and check each digit + while barcode != 0: + mult = 1 if checker else 3 + s += mult * (barcode % 10) + barcode //= 10 + checker = not checker + + return (10 - (s % 10)) % 10 + + +def is_valid(barcode: int) -> bool: + """ + Checks for length of barcode and last-digit + Returns boolean value of validity of barcode + + >>> is_valid(8718452538119) + True + >>> is_valid(87184525) + False + >>> is_valid(87193425381089) + False + >>> is_valid(0) + False + >>> is_valid(dwefgiweuf) + Traceback (most recent call last): + ... + NameError: name 'dwefgiweuf' is not defined + """ + return len(str(barcode)) == 13 and get_check_digit(barcode) == barcode % 10 + + +def get_barcode(barcode: str) -> int: + """ + Returns the barcode as an integer + + >>> get_barcode("8718452538119") + 8718452538119 + >>> get_barcode("dwefgiweuf") + Traceback (most recent call last): + ... + ValueError: Barcode 'dwefgiweuf' has alphabetic characters. + """ + if str(barcode).isalpha(): + raise ValueError(f"Barcode '{barcode}' has alphabetic characters.") + elif int(barcode) < 0: + raise ValueError("The entered barcode has a negative value. Try again.") + else: + return int(barcode) + + +if __name__ == "__main__": + import doctest + + doctest.testmod() + """ + Enter a barcode. + + """ + barcode = get_barcode(input("Barcode: ").strip()) + + if is_valid(barcode): + print(f"'{barcode}' is a valid Barcode") + else: + print(f"'{barcode}' is NOT is valid Barcode.") From 6118b05f0efd1c2839eb8bc4de36723af1fcc364 Mon Sep 17 00:00:00 2001 From: Carlos Villar Date: Thu, 13 Oct 2022 08:24:53 +0200 Subject: [PATCH 0533/1543] Convert snake_case to camelCase or PascalCase (#7028) (#7034) * Added snake_case to Camel or Pascal case Fixes: #7028 * Added suggested changes * Add ending empty line from suggestion Co-authored-by: Caeden * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update strings/snake_case_to_camel_pascal_case.py Co-authored-by: Christian Clauss Co-authored-by: Caeden Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- strings/snake_case_to_camel_pascal_case.py | 52 ++++++++++++++++++++++ 1 file changed, 52 insertions(+) create mode 100644 strings/snake_case_to_camel_pascal_case.py diff --git a/strings/snake_case_to_camel_pascal_case.py b/strings/snake_case_to_camel_pascal_case.py new file mode 100644 index 000000000000..7b2b61d1d1cf --- /dev/null +++ b/strings/snake_case_to_camel_pascal_case.py @@ -0,0 +1,52 @@ +def snake_to_camel_case(input: str, use_pascal: bool = False) -> str: + """ + Transforms a snake_case given string to camelCase (or PascalCase if indicated) + (defaults to not use Pascal) + + >>> snake_to_camel_case("some_random_string") + 'someRandomString' + + >>> snake_to_camel_case("some_random_string", use_pascal=True) + 'SomeRandomString' + + >>> snake_to_camel_case("some_random_string_with_numbers_123") + 'someRandomStringWithNumbers123' + + >>> snake_to_camel_case("some_random_string_with_numbers_123", use_pascal=True) + 'SomeRandomStringWithNumbers123' + + >>> snake_to_camel_case(123) + Traceback (most recent call last): + ... + ValueError: Expected string as input, found + + >>> snake_to_camel_case("some_string", use_pascal="True") + Traceback (most recent call last): + ... + ValueError: Expected boolean as use_pascal parameter, found + """ + + if not isinstance(input, str): + raise ValueError(f"Expected string as input, found {type(input)}") + if not isinstance(use_pascal, bool): + raise ValueError( + f"Expected boolean as use_pascal parameter, found {type(use_pascal)}" + ) + + words = input.split("_") + + start_index = 0 if use_pascal else 1 + + words_to_capitalize = words[start_index:] + + capitalized_words = [word[0].upper() + word[1:] for word in words_to_capitalize] + + initial_word = "" if use_pascal else words[0] + + return "".join([initial_word] + capitalized_words) + + +if __name__ == "__main__": + from doctest import testmod + + testmod() From 7ad6c6402945349fbca42cce5acad631b0930a1d Mon Sep 17 00:00:00 2001 From: Saksham Chawla <51916697+saksham-chawla@users.noreply.github.com> Date: Thu, 13 Oct 2022 15:15:20 +0530 Subject: [PATCH 0534/1543] Add typing to maths/add.py (#7064) * Add typing to maths/add.py https://stackoverflow.com/questions/50928592/mypy-type-hint-unionfloat-int-is-there-a-number-type * Update add.py * Update add.py --- maths/add.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/maths/add.py b/maths/add.py index 0bc7da9697d3..c89252c645ea 100644 --- a/maths/add.py +++ b/maths/add.py @@ -3,7 +3,7 @@ """ -def add(a, b): +def add(a: float, b: float) -> float: """ >>> add(2, 2) 4 From 9b0909d6545df269dc2c943df2e470671f0d1bcf Mon Sep 17 00:00:00 2001 From: Saksham Chawla <51916697+saksham-chawla@users.noreply.github.com> Date: Thu, 13 Oct 2022 16:17:52 +0530 Subject: [PATCH 0535/1543] Add typing and snake casing to maths/decimal_isolate.py (#7066) --- maths/decimal_isolate.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/maths/decimal_isolate.py b/maths/decimal_isolate.py index 1b8f6cbcad89..cdf43ea5d0ef 100644 --- a/maths/decimal_isolate.py +++ b/maths/decimal_isolate.py @@ -4,7 +4,7 @@ """ -def decimal_isolate(number, digit_amount): +def decimal_isolate(number: float, digit_amount: int) -> float: """ Isolates the decimal part of a number. From c73cb7e3f7324ab2715ffc74ab18c32e3a90a065 Mon Sep 17 00:00:00 2001 From: Saksham Chawla <51916697+saksham-chawla@users.noreply.github.com> Date: Thu, 13 Oct 2022 16:18:28 +0530 Subject: [PATCH 0536/1543] Add typing to maths/sum_of_arithmetic_series.py (#7065) --- maths/sum_of_arithmetic_series.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/maths/sum_of_arithmetic_series.py b/maths/sum_of_arithmetic_series.py index 74eef0f18a12..e0e22760bfbe 100644 --- a/maths/sum_of_arithmetic_series.py +++ b/maths/sum_of_arithmetic_series.py @@ -1,5 +1,5 @@ # DarkCoder -def sum_of_series(first_term, common_diff, num_of_terms): +def sum_of_series(first_term: int, common_diff: int, num_of_terms: int) -> float: """ Find the sum of n terms in an arithmetic progression. From e661b9882995718ed967e33c2c814866b26fa76d Mon Sep 17 00:00:00 2001 From: GURNEET SINGH <79376134+SinghGurneet21@users.noreply.github.com> Date: Thu, 13 Oct 2022 17:39:01 +0530 Subject: [PATCH 0537/1543] Binary Search Tree Inorder Traversal Algorithm (#6840) * Binary Search Tree Inorder Traversal * updating DIRECTORY.md * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Binary Search Tree Inorder Traversal v2 * Binary Search Tree Inorder Traversal * Binary Search Tree Inorder Traversal * Update inorder_tree_traversal_2022.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update inorder_tree_traversal_2022.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update inorder_tree_traversal_2022.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update data_structures/binary_tree/inorder_tree_traversal_2022.py * Update data_structures/binary_tree/inorder_tree_traversal_2022.py * Updated * Updated * Update inorder_tree_traversal_2022.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update inorder_tree_traversal_2022.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update data_structures/binary_tree/inorder_tree_traversal_2022.py Co-authored-by: Christian Clauss * Updated and removed print statement removed the print from inorder function * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- DIRECTORY.md | 1 + .../inorder_tree_traversal_2022.py | 83 +++++++++++++++++++ 2 files changed, 84 insertions(+) create mode 100644 data_structures/binary_tree/inorder_tree_traversal_2022.py diff --git a/DIRECTORY.md b/DIRECTORY.md index 25272af4a708..2786e1f82de8 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -153,6 +153,7 @@ * [Binary Tree Mirror](data_structures/binary_tree/binary_tree_mirror.py) * [Binary Tree Traversals](data_structures/binary_tree/binary_tree_traversals.py) * [Fenwick Tree](data_structures/binary_tree/fenwick_tree.py) + * [Inorder Tree Traversal 2022](data_structures/binary_tree/inorder_tree_traversal_2022.py) * [Lazy Segment Tree](data_structures/binary_tree/lazy_segment_tree.py) * [Lowest Common Ancestor](data_structures/binary_tree/lowest_common_ancestor.py) * [Maximum Fenwick Tree](data_structures/binary_tree/maximum_fenwick_tree.py) diff --git a/data_structures/binary_tree/inorder_tree_traversal_2022.py b/data_structures/binary_tree/inorder_tree_traversal_2022.py new file mode 100644 index 000000000000..08001738f53d --- /dev/null +++ b/data_structures/binary_tree/inorder_tree_traversal_2022.py @@ -0,0 +1,83 @@ +""" +Illustrate how to implement inorder traversal in binary search tree. +Author: Gurneet Singh +https://www.geeksforgeeks.org/tree-traversals-inorder-preorder-and-postorder/ +""" + + +class BinaryTreeNode: + """Defining the structure of BinaryTreeNode""" + + def __init__(self, data: int) -> None: + self.data = data + self.left_child: BinaryTreeNode | None = None + self.right_child: BinaryTreeNode | None = None + + +def insert(node: BinaryTreeNode | None, new_value: int) -> BinaryTreeNode | None: + """ + If the binary search tree is empty, make a new node and declare it as root. + >>> node_a = BinaryTreeNode(12345) + >>> node_b = insert(node_a, 67890) + >>> node_a.left_child == node_b.left_child + True + >>> node_a.right_child == node_b.right_child + True + >>> node_a.data == node_b.data + True + """ + if node is None: + node = BinaryTreeNode(new_value) + return node + + # binary search tree is not empty, + # so we will insert it into the tree + # if new_value is less than value of data in node, + # add it to left subtree and proceed recursively + if new_value < node.data: + node.left_child = insert(node.left_child, new_value) + else: + # if new_value is greater than value of data in node, + # add it to right subtree and proceed recursively + node.right_child = insert(node.right_child, new_value) + return node + + +def inorder(node: None | BinaryTreeNode) -> list[int]: # if node is None,return + """ + >>> inorder(make_tree()) + [6, 10, 14, 15, 20, 25, 60] + """ + if node: + inorder_array = inorder(node.left_child) + inorder_array = inorder_array + [node.data] + inorder_array = inorder_array + inorder(node.right_child) + else: + inorder_array = [] + return inorder_array + + +def make_tree() -> BinaryTreeNode | None: + + root = insert(None, 15) + insert(root, 10) + insert(root, 25) + insert(root, 6) + insert(root, 14) + insert(root, 20) + insert(root, 60) + return root + + +def main() -> None: + # main function + root = make_tree() + print("Printing values of binary search tree in Inorder Traversal.") + inorder(root) + + +if __name__ == "__main__": + import doctest + + doctest.testmod() + main() From d5a9f649b8279858add6fe6dd5a84af2f40a4cc9 Mon Sep 17 00:00:00 2001 From: Caeden Date: Thu, 13 Oct 2022 15:23:59 +0100 Subject: [PATCH 0538/1543] Add flake8-builtins to pre-commit and fix errors (#7105) Ignore `A003` Co-authored-by: Christian Clauss Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Dhruv Manilawala --- .flake8 | 3 +++ .pre-commit-config.yaml | 2 +- arithmetic_analysis/gaussian_elimination.py | 6 +++--- arithmetic_analysis/jacobi_iteration_method.py | 6 +++--- audio_filters/show_response.py | 8 ++++---- backtracking/hamiltonian_cycle.py | 6 +++--- data_structures/binary_tree/avl_tree.py | 2 +- data_structures/linked_list/__init__.py | 2 +- .../linked_list/singly_linked_list.py | 4 ++-- data_structures/queue/double_ended_queue.py | 16 ++++++++-------- data_structures/stacks/next_greater_element.py | 12 ++++++------ digital_image_processing/index_calculation.py | 6 +++--- .../optimal_binary_search_tree.py | 6 +++--- graphs/a_star.py | 8 ++++---- graphs/dijkstra.py | 4 ++-- graphs/finding_bridges.py | 14 +++++++------- graphs/prim.py | 4 ++-- hashes/djb2.py | 6 +++--- hashes/sdbm.py | 8 +++++--- maths/armstrong_numbers.py | 12 ++++++------ maths/bailey_borwein_plouffe.py | 6 +++--- maths/kadanes.py | 8 ++++---- maths/prime_numbers.py | 14 +++++++------- maths/sum_of_arithmetic_series.py | 4 ++-- neural_network/2_hidden_layers_neural_network.py | 10 ++++++---- neural_network/convolution_neural_network.py | 10 +++++----- neural_network/perceptron.py | 4 ++-- project_euler/problem_065/sol1.py | 4 ++-- project_euler/problem_070/sol1.py | 6 +++--- sorts/odd_even_sort.py | 10 +++++----- strings/snake_case_to_camel_pascal_case.py | 8 ++++---- 31 files changed, 113 insertions(+), 106 deletions(-) create mode 100644 .flake8 diff --git a/.flake8 b/.flake8 new file mode 100644 index 000000000000..9a5863c9cd0b --- /dev/null +++ b/.flake8 @@ -0,0 +1,3 @@ +[flake8] +extend-ignore = + A003 # Class attribute is shadowing a python builtin diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 2f6a92814c66..e0de70b01883 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -40,7 +40,7 @@ repos: - --ignore=E203,W503 - --max-complexity=25 - --max-line-length=88 - additional_dependencies: [pep8-naming] + additional_dependencies: [flake8-builtins, pep8-naming] - repo: https://github.com/pre-commit/mirrors-mypy rev: v0.982 diff --git a/arithmetic_analysis/gaussian_elimination.py b/arithmetic_analysis/gaussian_elimination.py index 89ed3b323d03..f0f20af8e417 100644 --- a/arithmetic_analysis/gaussian_elimination.py +++ b/arithmetic_analysis/gaussian_elimination.py @@ -33,11 +33,11 @@ def retroactive_resolution( x: NDArray[float64] = np.zeros((rows, 1), dtype=float) for row in reversed(range(rows)): - sum = 0 + total = 0 for col in range(row + 1, columns): - sum += coefficients[row, col] * x[col] + total += coefficients[row, col] * x[col] - x[row, 0] = (vector[row] - sum) / coefficients[row, row] + x[row, 0] = (vector[row] - total) / coefficients[row, row] return x diff --git a/arithmetic_analysis/jacobi_iteration_method.py b/arithmetic_analysis/jacobi_iteration_method.py index 4336aaa91623..0aab4db20595 100644 --- a/arithmetic_analysis/jacobi_iteration_method.py +++ b/arithmetic_analysis/jacobi_iteration_method.py @@ -147,14 +147,14 @@ def strictly_diagonally_dominant(table: NDArray[float64]) -> bool: is_diagonally_dominant = True for i in range(0, rows): - sum = 0 + total = 0 for j in range(0, cols - 1): if i == j: continue else: - sum += table[i][j] + total += table[i][j] - if table[i][i] <= sum: + if table[i][i] <= total: raise ValueError("Coefficient matrix is not strictly diagonally dominant") return is_diagonally_dominant diff --git a/audio_filters/show_response.py b/audio_filters/show_response.py index 6e2731a58419..097b8152b4e6 100644 --- a/audio_filters/show_response.py +++ b/audio_filters/show_response.py @@ -34,7 +34,7 @@ def get_bounds( return lowest, highest -def show_frequency_response(filter: FilterType, samplerate: int) -> None: +def show_frequency_response(filter_type: FilterType, samplerate: int) -> None: """ Show frequency response of a filter @@ -45,7 +45,7 @@ def show_frequency_response(filter: FilterType, samplerate: int) -> None: size = 512 inputs = [1] + [0] * (size - 1) - outputs = [filter.process(item) for item in inputs] + outputs = [filter_type.process(item) for item in inputs] filler = [0] * (samplerate - size) # zero-padding outputs += filler @@ -66,7 +66,7 @@ def show_frequency_response(filter: FilterType, samplerate: int) -> None: plt.show() -def show_phase_response(filter: FilterType, samplerate: int) -> None: +def show_phase_response(filter_type: FilterType, samplerate: int) -> None: """ Show phase response of a filter @@ -77,7 +77,7 @@ def show_phase_response(filter: FilterType, samplerate: int) -> None: size = 512 inputs = [1] + [0] * (size - 1) - outputs = [filter.process(item) for item in inputs] + outputs = [filter_type.process(item) for item in inputs] filler = [0] * (samplerate - size) # zero-padding outputs += filler diff --git a/backtracking/hamiltonian_cycle.py b/backtracking/hamiltonian_cycle.py index 500e993e5c8b..4c6ae46799f4 100644 --- a/backtracking/hamiltonian_cycle.py +++ b/backtracking/hamiltonian_cycle.py @@ -95,10 +95,10 @@ def util_hamilton_cycle(graph: list[list[int]], path: list[int], curr_ind: int) return graph[path[curr_ind - 1]][path[0]] == 1 # Recursive Step - for next in range(0, len(graph)): - if valid_connection(graph, next, curr_ind, path): + for next_ver in range(0, len(graph)): + if valid_connection(graph, next_ver, curr_ind, path): # Insert current vertex into path as next transition - path[curr_ind] = next + path[curr_ind] = next_ver # Validate created path if util_hamilton_cycle(graph, path, curr_ind + 1): return True diff --git a/data_structures/binary_tree/avl_tree.py b/data_structures/binary_tree/avl_tree.py index 2f4bd60d9749..320e7ed0d792 100644 --- a/data_structures/binary_tree/avl_tree.py +++ b/data_structures/binary_tree/avl_tree.py @@ -33,7 +33,7 @@ def pop(self) -> Any: def count(self) -> int: return self.tail - self.head - def print(self) -> None: + def print_queue(self) -> None: print(self.data) print("**************") print(self.data[self.head : self.tail]) diff --git a/data_structures/linked_list/__init__.py b/data_structures/linked_list/__init__.py index 6ba660231ae1..85660a6d2c27 100644 --- a/data_structures/linked_list/__init__.py +++ b/data_structures/linked_list/__init__.py @@ -11,7 +11,7 @@ class Node: - def __init__(self, item: Any, next: Any) -> None: + def __init__(self, item: Any, next: Any) -> None: # noqa: A002 self.item = item self.next = next diff --git a/data_structures/linked_list/singly_linked_list.py b/data_structures/linked_list/singly_linked_list.py index a4156b650776..59d7c512bad7 100644 --- a/data_structures/linked_list/singly_linked_list.py +++ b/data_structures/linked_list/singly_linked_list.py @@ -392,7 +392,7 @@ def test_singly_linked_list_2() -> None: This section of the test used varying data types for input. >>> test_singly_linked_list_2() """ - input = [ + test_input = [ -9, 100, Node(77345112), @@ -410,7 +410,7 @@ def test_singly_linked_list_2() -> None: ] linked_list = LinkedList() - for i in input: + for i in test_input: linked_list.insert_tail(i) # Check if it's empty or not diff --git a/data_structures/queue/double_ended_queue.py b/data_structures/queue/double_ended_queue.py index f38874788df1..7053879d4512 100644 --- a/data_structures/queue/double_ended_queue.py +++ b/data_structures/queue/double_ended_queue.py @@ -15,8 +15,8 @@ class Deque: ---------- append(val: Any) -> None appendleft(val: Any) -> None - extend(iter: Iterable) -> None - extendleft(iter: Iterable) -> None + extend(iterable: Iterable) -> None + extendleft(iterable: Iterable) -> None pop() -> Any popleft() -> Any Observers @@ -179,9 +179,9 @@ def appendleft(self, val: Any) -> None: # make sure there were no errors assert not self.is_empty(), "Error on appending value." - def extend(self, iter: Iterable[Any]) -> None: + def extend(self, iterable: Iterable[Any]) -> None: """ - Appends every value of iter to the end of the deque. + Appends every value of iterable to the end of the deque. Time complexity: O(n) >>> our_deque_1 = Deque([1, 2, 3]) >>> our_deque_1.extend([4, 5]) @@ -205,12 +205,12 @@ def extend(self, iter: Iterable[Any]) -> None: >>> list(our_deque_2) == list(deque_collections_2) True """ - for val in iter: + for val in iterable: self.append(val) - def extendleft(self, iter: Iterable[Any]) -> None: + def extendleft(self, iterable: Iterable[Any]) -> None: """ - Appends every value of iter to the beginning of the deque. + Appends every value of iterable to the beginning of the deque. Time complexity: O(n) >>> our_deque_1 = Deque([1, 2, 3]) >>> our_deque_1.extendleft([0, -1]) @@ -234,7 +234,7 @@ def extendleft(self, iter: Iterable[Any]) -> None: >>> list(our_deque_2) == list(deque_collections_2) True """ - for val in iter: + for val in iterable: self.appendleft(val) def pop(self) -> Any: diff --git a/data_structures/stacks/next_greater_element.py b/data_structures/stacks/next_greater_element.py index 5bab7c609b67..7d76d1f47dfa 100644 --- a/data_structures/stacks/next_greater_element.py +++ b/data_structures/stacks/next_greater_element.py @@ -17,12 +17,12 @@ def next_greatest_element_slow(arr: list[float]) -> list[float]: arr_size = len(arr) for i in range(arr_size): - next: float = -1 + next_element: float = -1 for j in range(i + 1, arr_size): if arr[i] < arr[j]: - next = arr[j] + next_element = arr[j] break - result.append(next) + result.append(next_element) return result @@ -36,12 +36,12 @@ def next_greatest_element_fast(arr: list[float]) -> list[float]: """ result = [] for i, outer in enumerate(arr): - next: float = -1 + next_item: float = -1 for inner in arr[i + 1 :]: if outer < inner: - next = inner + next_item = inner break - result.append(next) + result.append(next_item) return result diff --git a/digital_image_processing/index_calculation.py b/digital_image_processing/index_calculation.py index 2f8fdc066919..01cd79fc18ff 100644 --- a/digital_image_processing/index_calculation.py +++ b/digital_image_processing/index_calculation.py @@ -497,9 +497,9 @@ def s(self): https://www.indexdatabase.de/db/i-single.php?id=77 :return: index """ - max = np.max([np.max(self.red), np.max(self.green), np.max(self.blue)]) - min = np.min([np.min(self.red), np.min(self.green), np.min(self.blue)]) - return (max - min) / max + max_value = np.max([np.max(self.red), np.max(self.green), np.max(self.blue)]) + min_value = np.min([np.min(self.red), np.min(self.green), np.min(self.blue)]) + return (max_value - min_value) / max_value def _if(self): """ diff --git a/dynamic_programming/optimal_binary_search_tree.py b/dynamic_programming/optimal_binary_search_tree.py index 0d94c1b61d39..b4f1181ac11c 100644 --- a/dynamic_programming/optimal_binary_search_tree.py +++ b/dynamic_programming/optimal_binary_search_tree.py @@ -104,7 +104,7 @@ def find_optimal_binary_search_tree(nodes): dp = [[freqs[i] if i == j else 0 for j in range(n)] for i in range(n)] # sum[i][j] stores the sum of key frequencies between i and j inclusive in nodes # array - sum = [[freqs[i] if i == j else 0 for j in range(n)] for i in range(n)] + total = [[freqs[i] if i == j else 0 for j in range(n)] for i in range(n)] # stores tree roots that will be used later for constructing binary search tree root = [[i if i == j else 0 for j in range(n)] for i in range(n)] @@ -113,14 +113,14 @@ def find_optimal_binary_search_tree(nodes): j = i + interval_length - 1 dp[i][j] = sys.maxsize # set the value to "infinity" - sum[i][j] = sum[i][j - 1] + freqs[j] + total[i][j] = total[i][j - 1] + freqs[j] # Apply Knuth's optimization # Loop without optimization: for r in range(i, j + 1): for r in range(root[i][j - 1], root[i + 1][j] + 1): # r is a temporal root left = dp[i][r - 1] if r != i else 0 # optimal cost for left subtree right = dp[r + 1][j] if r != j else 0 # optimal cost for right subtree - cost = left + sum[i][j] + right + cost = left + total[i][j] + right if dp[i][j] > cost: dp[i][j] = cost diff --git a/graphs/a_star.py b/graphs/a_star.py index e0f24734a4cb..793ba3bda6b2 100644 --- a/graphs/a_star.py +++ b/graphs/a_star.py @@ -40,10 +40,10 @@ def search( else: # to choose the least costliest action so as to move closer to the goal cell.sort() cell.reverse() - next = cell.pop() - x = next[2] - y = next[3] - g = next[1] + next_cell = cell.pop() + x = next_cell[2] + y = next_cell[3] + g = next_cell[1] if x == goal[0] and y == goal[1]: found = True diff --git a/graphs/dijkstra.py b/graphs/dijkstra.py index 62c60f2c6be6..b0bdfab60649 100644 --- a/graphs/dijkstra.py +++ b/graphs/dijkstra.py @@ -56,8 +56,8 @@ def dijkstra(graph, start, end): for v, c in graph[u]: if v in visited: continue - next = cost + c - heapq.heappush(heap, (next, v)) + next_item = cost + c + heapq.heappush(heap, (next_item, v)) return -1 diff --git a/graphs/finding_bridges.py b/graphs/finding_bridges.py index 3813c4ebbd2a..c17606745ad8 100644 --- a/graphs/finding_bridges.py +++ b/graphs/finding_bridges.py @@ -72,22 +72,22 @@ def compute_bridges(graph: dict[int, list[int]]) -> list[tuple[int, int]]: [] """ - id = 0 + id_ = 0 n = len(graph) # No of vertices in graph low = [0] * n visited = [False] * n - def dfs(at, parent, bridges, id): + def dfs(at, parent, bridges, id_): visited[at] = True - low[at] = id - id += 1 + low[at] = id_ + id_ += 1 for to in graph[at]: if to == parent: pass elif not visited[to]: - dfs(to, at, bridges, id) + dfs(to, at, bridges, id_) low[at] = min(low[at], low[to]) - if id <= low[to]: + if id_ <= low[to]: bridges.append((at, to) if at < to else (to, at)) else: # This edge is a back edge and cannot be a bridge @@ -96,7 +96,7 @@ def dfs(at, parent, bridges, id): bridges: list[tuple[int, int]] = [] for i in range(n): if not visited[i]: - dfs(i, -1, bridges, id) + dfs(i, -1, bridges, id_) return bridges diff --git a/graphs/prim.py b/graphs/prim.py index 55d0fbfa8e96..6cb1a6def359 100644 --- a/graphs/prim.py +++ b/graphs/prim.py @@ -13,7 +13,7 @@ class Vertex: """Class Vertex.""" - def __init__(self, id): + def __init__(self, id_): """ Arguments: id - input an id to identify the vertex @@ -21,7 +21,7 @@ def __init__(self, id): neighbors - a list of the vertices it is linked to edges - a dict to store the edges's weight """ - self.id = str(id) + self.id = str(id_) self.key = None self.pi = None self.neighbors = [] diff --git a/hashes/djb2.py b/hashes/djb2.py index 2d1c9aabb1fb..4c84635098f2 100644 --- a/hashes/djb2.py +++ b/hashes/djb2.py @@ -29,7 +29,7 @@ def djb2(s: str) -> int: >>> djb2('scramble bits') 1609059040 """ - hash = 5381 + hash_value = 5381 for x in s: - hash = ((hash << 5) + hash) + ord(x) - return hash & 0xFFFFFFFF + hash_value = ((hash_value << 5) + hash_value) + ord(x) + return hash_value & 0xFFFFFFFF diff --git a/hashes/sdbm.py b/hashes/sdbm.py index daf292717f75..a5432874ba7d 100644 --- a/hashes/sdbm.py +++ b/hashes/sdbm.py @@ -31,7 +31,9 @@ def sdbm(plain_text: str) -> int: >>> sdbm('scramble bits') 730247649148944819640658295400555317318720608290373040936089 """ - hash = 0 + hash_value = 0 for plain_chr in plain_text: - hash = ord(plain_chr) + (hash << 6) + (hash << 16) - hash - return hash + hash_value = ( + ord(plain_chr) + (hash_value << 6) + (hash_value << 16) - hash_value + ) + return hash_value diff --git a/maths/armstrong_numbers.py b/maths/armstrong_numbers.py index 65aebe93722e..f62991b7415b 100644 --- a/maths/armstrong_numbers.py +++ b/maths/armstrong_numbers.py @@ -25,7 +25,7 @@ def armstrong_number(n: int) -> bool: return False # Initialization of sum and number of digits. - sum = 0 + total = 0 number_of_digits = 0 temp = n # Calculation of digits of the number @@ -36,9 +36,9 @@ def armstrong_number(n: int) -> bool: temp = n while temp > 0: rem = temp % 10 - sum += rem**number_of_digits + total += rem**number_of_digits temp //= 10 - return n == sum + return n == total def pluperfect_number(n: int) -> bool: @@ -55,7 +55,7 @@ def pluperfect_number(n: int) -> bool: # Init a "histogram" of the digits digit_histogram = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0] digit_total = 0 - sum = 0 + total = 0 temp = n while temp > 0: temp, rem = divmod(temp, 10) @@ -63,9 +63,9 @@ def pluperfect_number(n: int) -> bool: digit_total += 1 for (cnt, i) in zip(digit_histogram, range(len(digit_histogram))): - sum += cnt * i**digit_total + total += cnt * i**digit_total - return n == sum + return n == total def narcissistic_number(n: int) -> bool: diff --git a/maths/bailey_borwein_plouffe.py b/maths/bailey_borwein_plouffe.py index b647ae56dbac..389b1566e9de 100644 --- a/maths/bailey_borwein_plouffe.py +++ b/maths/bailey_borwein_plouffe.py @@ -67,7 +67,7 @@ def _subsum( @param precision: same as precision in main function @return: floating-point number whose integer part is not important """ - sum = 0.0 + total = 0.0 for sum_index in range(digit_pos_to_extract + precision): denominator = 8 * sum_index + denominator_addend if sum_index < digit_pos_to_extract: @@ -79,8 +79,8 @@ def _subsum( ) else: exponential_term = pow(16, digit_pos_to_extract - 1 - sum_index) - sum += exponential_term / denominator - return sum + total += exponential_term / denominator + return total if __name__ == "__main__": diff --git a/maths/kadanes.py b/maths/kadanes.py index d239d4a2589b..b23409e2b978 100644 --- a/maths/kadanes.py +++ b/maths/kadanes.py @@ -14,13 +14,13 @@ def negative_exist(arr: list) -> int: [-2, 0, 0, 0, 0] """ arr = arr or [0] - max = arr[0] + max_number = arr[0] for i in arr: if i >= 0: return 0 - elif max <= i: - max = i - return max + elif max_number <= i: + max_number = i + return max_number def kadanes(arr: list) -> int: diff --git a/maths/prime_numbers.py b/maths/prime_numbers.py index 7be4d3d95b0e..4e076fe317b4 100644 --- a/maths/prime_numbers.py +++ b/maths/prime_numbers.py @@ -2,7 +2,7 @@ from collections.abc import Generator -def slow_primes(max: int) -> Generator[int, None, None]: +def slow_primes(max_n: int) -> Generator[int, None, None]: """ Return a list of all primes numbers up to max. >>> list(slow_primes(0)) @@ -20,7 +20,7 @@ def slow_primes(max: int) -> Generator[int, None, None]: >>> list(slow_primes(10000))[-1] 9973 """ - numbers: Generator = (i for i in range(1, (max + 1))) + numbers: Generator = (i for i in range(1, (max_n + 1))) for i in (n for n in numbers if n > 1): for j in range(2, i): if (i % j) == 0: @@ -29,7 +29,7 @@ def slow_primes(max: int) -> Generator[int, None, None]: yield i -def primes(max: int) -> Generator[int, None, None]: +def primes(max_n: int) -> Generator[int, None, None]: """ Return a list of all primes numbers up to max. >>> list(primes(0)) @@ -47,7 +47,7 @@ def primes(max: int) -> Generator[int, None, None]: >>> list(primes(10000))[-1] 9973 """ - numbers: Generator = (i for i in range(1, (max + 1))) + numbers: Generator = (i for i in range(1, (max_n + 1))) for i in (n for n in numbers if n > 1): # only need to check for factors up to sqrt(i) bound = int(math.sqrt(i)) + 1 @@ -58,7 +58,7 @@ def primes(max: int) -> Generator[int, None, None]: yield i -def fast_primes(max: int) -> Generator[int, None, None]: +def fast_primes(max_n: int) -> Generator[int, None, None]: """ Return a list of all primes numbers up to max. >>> list(fast_primes(0)) @@ -76,9 +76,9 @@ def fast_primes(max: int) -> Generator[int, None, None]: >>> list(fast_primes(10000))[-1] 9973 """ - numbers: Generator = (i for i in range(1, (max + 1), 2)) + numbers: Generator = (i for i in range(1, (max_n + 1), 2)) # It's useless to test even numbers as they will not be prime - if max > 2: + if max_n > 2: yield 2 # Because 2 will not be tested, it's necessary to yield it now for i in (n for n in numbers if n > 1): bound = int(math.sqrt(i)) + 1 diff --git a/maths/sum_of_arithmetic_series.py b/maths/sum_of_arithmetic_series.py index e0e22760bfbe..3e381b8c20a8 100644 --- a/maths/sum_of_arithmetic_series.py +++ b/maths/sum_of_arithmetic_series.py @@ -8,9 +8,9 @@ def sum_of_series(first_term: int, common_diff: int, num_of_terms: int) -> float >>> sum_of_series(1, 10, 100) 49600.0 """ - sum = (num_of_terms / 2) * (2 * first_term + (num_of_terms - 1) * common_diff) + total = (num_of_terms / 2) * (2 * first_term + (num_of_terms - 1) * common_diff) # formula for sum of series - return sum + return total def main(): diff --git a/neural_network/2_hidden_layers_neural_network.py b/neural_network/2_hidden_layers_neural_network.py index 1cf78ec4c7c0..9c5772326165 100644 --- a/neural_network/2_hidden_layers_neural_network.py +++ b/neural_network/2_hidden_layers_neural_network.py @@ -182,7 +182,7 @@ def train(self, output: numpy.ndarray, iterations: int, give_loss: bool) -> None loss = numpy.mean(numpy.square(output - self.feedforward())) print(f"Iteration {iteration} Loss: {loss}") - def predict(self, input: numpy.ndarray) -> int: + def predict(self, input_arr: numpy.ndarray) -> int: """ Predict's the output for the given input values using the trained neural network. @@ -201,7 +201,7 @@ def predict(self, input: numpy.ndarray) -> int: """ # Input values for which the predictions are to be made. - self.array = input + self.array = input_arr self.layer_between_input_and_first_hidden_layer = sigmoid( numpy.dot(self.array, self.input_layer_and_first_hidden_layer_weights) @@ -264,7 +264,7 @@ def example() -> int: True """ # Input values. - input = numpy.array( + test_input = numpy.array( ( [0, 0, 0], [0, 0, 1], @@ -282,7 +282,9 @@ def example() -> int: output = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]), dtype=numpy.float64) # Calling neural network class. - neural_network = TwoHiddenLayerNeuralNetwork(input_array=input, output_array=output) + neural_network = TwoHiddenLayerNeuralNetwork( + input_array=test_input, output_array=output + ) # Calling training function. # Set give_loss to True if you want to see loss in every iteration. diff --git a/neural_network/convolution_neural_network.py b/neural_network/convolution_neural_network.py index bbade1c417d0..9dfb6d091412 100644 --- a/neural_network/convolution_neural_network.py +++ b/neural_network/convolution_neural_network.py @@ -140,24 +140,24 @@ def convolute(self, data, convs, w_convs, thre_convs, conv_step): focus_list = np.asarray(focus1_list) return focus_list, data_featuremap - def pooling(self, featuremaps, size_pooling, type="average_pool"): + def pooling(self, featuremaps, size_pooling, pooling_type="average_pool"): # pooling process size_map = len(featuremaps[0]) size_pooled = int(size_map / size_pooling) featuremap_pooled = [] for i_map in range(len(featuremaps)): - map = featuremaps[i_map] + feature_map = featuremaps[i_map] map_pooled = [] for i_focus in range(0, size_map, size_pooling): for j_focus in range(0, size_map, size_pooling): - focus = map[ + focus = feature_map[ i_focus : i_focus + size_pooling, j_focus : j_focus + size_pooling, ] - if type == "average_pool": + if pooling_type == "average_pool": # average pooling map_pooled.append(np.average(focus)) - elif type == "max_pooling": + elif pooling_type == "max_pooling": # max pooling map_pooled.append(np.max(focus)) map_pooled = np.asmatrix(map_pooled).reshape(size_pooled, size_pooled) diff --git a/neural_network/perceptron.py b/neural_network/perceptron.py index 063be5ea554c..a2bfdb326d77 100644 --- a/neural_network/perceptron.py +++ b/neural_network/perceptron.py @@ -182,7 +182,7 @@ def sign(self, u: float) -> int: [0.2012, 0.2611, 5.4631], ] -exit = [ +target = [ -1, -1, -1, @@ -222,7 +222,7 @@ def sign(self, u: float) -> int: doctest.testmod() network = Perceptron( - sample=samples, target=exit, learning_rate=0.01, epoch_number=1000, bias=-1 + sample=samples, target=target, learning_rate=0.01, epoch_number=1000, bias=-1 ) network.training() print("Finished training perceptron") diff --git a/project_euler/problem_065/sol1.py b/project_euler/problem_065/sol1.py index 229769a77d07..0a00cf4773d7 100644 --- a/project_euler/problem_065/sol1.py +++ b/project_euler/problem_065/sol1.py @@ -71,7 +71,7 @@ def sum_digits(num: int) -> int: return digit_sum -def solution(max: int = 100) -> int: +def solution(max_n: int = 100) -> int: """ Returns the sum of the digits in the numerator of the max-th convergent of the continued fraction for e. @@ -86,7 +86,7 @@ def solution(max: int = 100) -> int: pre_numerator = 1 cur_numerator = 2 - for i in range(2, max + 1): + for i in range(2, max_n + 1): temp = pre_numerator e_cont = 2 * i // 3 if i % 3 == 0 else 1 pre_numerator = cur_numerator diff --git a/project_euler/problem_070/sol1.py b/project_euler/problem_070/sol1.py index d42b017cc476..273f37efc5fc 100644 --- a/project_euler/problem_070/sol1.py +++ b/project_euler/problem_070/sol1.py @@ -72,7 +72,7 @@ def has_same_digits(num1: int, num2: int) -> bool: return sorted(str(num1)) == sorted(str(num2)) -def solution(max: int = 10000000) -> int: +def solution(max_n: int = 10000000) -> int: """ Finds the value of n from 1 to max such that n/φ(n) produces a minimum. @@ -85,9 +85,9 @@ def solution(max: int = 10000000) -> int: min_numerator = 1 # i min_denominator = 0 # φ(i) - totients = get_totients(max + 1) + totients = get_totients(max_n + 1) - for i in range(2, max + 1): + for i in range(2, max_n + 1): t = totients[i] if i * min_denominator < min_numerator * t and has_same_digits(i, t): diff --git a/sorts/odd_even_sort.py b/sorts/odd_even_sort.py index 557337ee77bc..532f829499e8 100644 --- a/sorts/odd_even_sort.py +++ b/sorts/odd_even_sort.py @@ -20,21 +20,21 @@ def odd_even_sort(input_list: list) -> list: >>> odd_even_sort([1 ,2 ,3 ,4]) [1, 2, 3, 4] """ - sorted = False - while sorted is False: # Until all the indices are traversed keep looping - sorted = True + is_sorted = False + while is_sorted is False: # Until all the indices are traversed keep looping + is_sorted = True for i in range(0, len(input_list) - 1, 2): # iterating over all even indices if input_list[i] > input_list[i + 1]: input_list[i], input_list[i + 1] = input_list[i + 1], input_list[i] # swapping if elements not in order - sorted = False + is_sorted = False for i in range(1, len(input_list) - 1, 2): # iterating over all odd indices if input_list[i] > input_list[i + 1]: input_list[i], input_list[i + 1] = input_list[i + 1], input_list[i] # swapping if elements not in order - sorted = False + is_sorted = False return input_list diff --git a/strings/snake_case_to_camel_pascal_case.py b/strings/snake_case_to_camel_pascal_case.py index 7b2b61d1d1cf..eaabdcb87a0f 100644 --- a/strings/snake_case_to_camel_pascal_case.py +++ b/strings/snake_case_to_camel_pascal_case.py @@ -1,4 +1,4 @@ -def snake_to_camel_case(input: str, use_pascal: bool = False) -> str: +def snake_to_camel_case(input_str: str, use_pascal: bool = False) -> str: """ Transforms a snake_case given string to camelCase (or PascalCase if indicated) (defaults to not use Pascal) @@ -26,14 +26,14 @@ def snake_to_camel_case(input: str, use_pascal: bool = False) -> str: ValueError: Expected boolean as use_pascal parameter, found """ - if not isinstance(input, str): - raise ValueError(f"Expected string as input, found {type(input)}") + if not isinstance(input_str, str): + raise ValueError(f"Expected string as input, found {type(input_str)}") if not isinstance(use_pascal, bool): raise ValueError( f"Expected boolean as use_pascal parameter, found {type(use_pascal)}" ) - words = input.split("_") + words = input_str.split("_") start_index = 0 if use_pascal else 1 From f176786d12ead5796644a9b37d96786cdaa55391 Mon Sep 17 00:00:00 2001 From: Praveen Date: Thu, 13 Oct 2022 21:04:52 +0530 Subject: [PATCH 0539/1543] Update open_google_results.py (#7085) * update crawl_google_results.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update and rename crawl_google_results.py to open_google_results.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Create crawl_google_results.py * Update web_programming/open_google_results.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update open_google_results.py Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- web_programming/open_google_results.py | 42 ++++++++++++++++++++++++++ 1 file changed, 42 insertions(+) create mode 100644 web_programming/open_google_results.py diff --git a/web_programming/open_google_results.py b/web_programming/open_google_results.py new file mode 100644 index 000000000000..0e1dba8c5856 --- /dev/null +++ b/web_programming/open_google_results.py @@ -0,0 +1,42 @@ +import webbrowser +from sys import argv +from urllib.parse import quote, parse_qs +from fake_useragent import UserAgent + +import requests +from bs4 import BeautifulSoup + +if __name__ == "__main__": + if len(argv) > 1: + query = "%20".join(argv[1:]) + else: + query = quote(str(input("Search: "))) + + print("Googling.....") + + url = f"https://www.google.com/search?q={query}&num=100" + + res = requests.get( + url, + headers={ + "User-Agent": str(UserAgent().random) + }, + ) + + try: + link = ( + BeautifulSoup(res.text, "html.parser") + .find("div", attrs={"class": "yuRUbf"}) + .find("a") + .get("href") + ) + + except AttributeError: + link = parse_qs( + BeautifulSoup(res.text, "html.parser") + .find("div", attrs={"class": "kCrYT"}) + .find("a") + .get("href") + )["url"][0] + + webbrowser.open(link) From 4d0c830d2c7a4a535501887a8eb97966a370ef57 Mon Sep 17 00:00:00 2001 From: Caeden Date: Thu, 13 Oct 2022 17:03:06 +0100 Subject: [PATCH 0540/1543] Add flake8 pluin flake8 bugbear to pre-commit (#7132) * ci(pre-commit): Add ``flake8-builtins`` additional dependency to ``pre-commit`` (#7104) * refactor: Fix ``flake8-builtins`` (#7104) * fix(lru_cache): Fix naming conventions in docstrings (#7104) * ci(pre-commit): Order additional dependencies alphabetically (#7104) * fix(lfu_cache): Correct function name in docstring (#7104) * Update strings/snake_case_to_camel_pascal_case.py Co-authored-by: Christian Clauss * Update data_structures/stacks/next_greater_element.py Co-authored-by: Christian Clauss * Update digital_image_processing/index_calculation.py Co-authored-by: Christian Clauss * Update graphs/prim.py Co-authored-by: Christian Clauss * Update hashes/djb2.py Co-authored-by: Christian Clauss * refactor: Rename `_builtin` to `builtin_` ( #7104) * fix: Rename all instances (#7104) * refactor: Update variable names (#7104) * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * ci: Create ``tox.ini`` and ignore ``A003`` (#7123) * revert: Remove function name changes (#7104) * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Rename tox.ini to .flake8 * Update data_structures/heap/heap.py Co-authored-by: Dhruv Manilawala * refactor: Rename `next_` to `next_item` (#7104) * ci(pre-commit): Add `flake8` plugin `flake8-bugbear` (#7127) * refactor: Follow `flake8-bugbear` plugin (#7127) * fix: Correct `knapsack` code (#7127) * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Co-authored-by: Christian Clauss Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Dhruv Manilawala --- .pre-commit-config.yaml | 5 +++- .../jacobi_iteration_method.py | 2 +- .../newton_forward_interpolation.py | 2 +- arithmetic_analysis/secant_method.py | 2 +- audio_filters/butterworth_filter.py | 23 +++++++++++++------ backtracking/sum_of_subsets.py | 8 +++---- boolean_algebra/quine_mc_cluskey.py | 2 +- ciphers/mixed_keyword_cypher.py | 2 +- ciphers/rabin_miller.py | 2 +- compression/burrows_wheeler.py | 2 +- .../binary_search_tree_recursive.py | 8 +++---- .../linked_list/circular_linked_list.py | 8 +++---- .../linked_list/doubly_linked_list.py | 8 +++---- .../middle_element_of_linked_list.py | 2 +- .../linked_list/singly_linked_list.py | 6 ++--- data_structures/linked_list/skip_list.py | 4 ++-- data_structures/queue/queue_on_list.py | 2 +- .../queue/queue_on_pseudo_stack.py | 2 +- data_structures/stacks/stack.py | 6 ++--- divide_and_conquer/convex_hull.py | 8 +++---- .../strassen_matrix_multiplication.py | 6 ++--- dynamic_programming/all_construct.py | 2 +- dynamic_programming/knapsack.py | 10 ++++---- fractals/julia_sets.py | 2 +- fractals/koch_snowflake.py | 2 +- fractals/mandelbrot.py | 2 +- genetic_algorithm/basic_string.py | 7 ++++-- graphs/basic_graphs.py | 4 ++-- graphs/bellman_ford.py | 2 +- graphs/dijkstra_2.py | 18 +++++++-------- graphs/frequent_pattern_graph_miner.py | 2 +- graphs/kahns_algorithm_long.py | 2 +- graphs/kahns_algorithm_topo.py | 2 +- graphs/minimum_spanning_tree_prims.py | 2 +- graphs/page_rank.py | 2 +- graphs/scc_kosaraju.py | 4 ++-- greedy_methods/optimal_merge_pattern.py | 2 +- hashes/chaos_machine.py | 2 +- hashes/enigma_machine.py | 2 +- machine_learning/self_organizing_map.py | 2 +- maths/area_under_curve.py | 2 +- maths/line_length.py | 2 +- maths/lucas_lehmer_primality_test.py | 2 +- maths/lucas_series.py | 2 +- maths/miller_rabin.py | 2 +- maths/monte_carlo_dice.py | 2 +- maths/numerical_integration.py | 2 +- maths/pi_monte_carlo_estimation.py | 2 +- maths/pollard_rho.py | 2 +- maths/primelib.py | 8 +++---- maths/proth_number.py | 2 +- maths/square_root.py | 2 +- maths/ugly_numbers.py | 2 +- matrix/matrix_class.py | 2 +- ...h_fibonacci_using_matrix_exponentiation.py | 2 +- .../back_propagation_neural_network.py | 2 +- neural_network/perceptron.py | 2 +- other/lfu_cache.py | 2 +- other/lru_cache.py | 2 +- other/magicdiamondpattern.py | 8 +++---- other/scoring_algorithm.py | 2 +- physics/lorentz_transformation_four_vector.py | 2 +- physics/n_body_simulation.py | 2 +- project_euler/problem_011/sol2.py | 2 +- project_euler/problem_025/sol3.py | 2 +- project_euler/problem_026/sol1.py | 2 +- project_euler/problem_188/sol1.py | 2 +- project_euler/problem_203/sol1.py | 2 +- scheduling/multi_level_feedback_queue.py | 2 +- sorts/double_sort.py | 2 +- web_programming/open_google_results.py | 8 +++---- 71 files changed, 137 insertions(+), 124 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index e0de70b01883..d2558b90abb1 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -40,7 +40,10 @@ repos: - --ignore=E203,W503 - --max-complexity=25 - --max-line-length=88 - additional_dependencies: [flake8-builtins, pep8-naming] + additional_dependencies: + - flake8-bugbear + - flake8-builtins + - pep8-naming - repo: https://github.com/pre-commit/mirrors-mypy rev: v0.982 diff --git a/arithmetic_analysis/jacobi_iteration_method.py b/arithmetic_analysis/jacobi_iteration_method.py index 0aab4db20595..3087309e8c3d 100644 --- a/arithmetic_analysis/jacobi_iteration_method.py +++ b/arithmetic_analysis/jacobi_iteration_method.py @@ -110,7 +110,7 @@ def jacobi_iteration_method( strictly_diagonally_dominant(table) # Iterates the whole matrix for given number of times - for i in range(iterations): + for _ in range(iterations): new_val = [] for row in range(rows): temp = 0 diff --git a/arithmetic_analysis/newton_forward_interpolation.py b/arithmetic_analysis/newton_forward_interpolation.py index 490e0687f15f..466f6c18cf59 100644 --- a/arithmetic_analysis/newton_forward_interpolation.py +++ b/arithmetic_analysis/newton_forward_interpolation.py @@ -23,7 +23,7 @@ def ucal(u: float, p: int) -> float: def main() -> None: n = int(input("enter the numbers of values: ")) y: list[list[float]] = [] - for i in range(n): + for _ in range(n): y.append([]) for i in range(n): for j in range(n): diff --git a/arithmetic_analysis/secant_method.py b/arithmetic_analysis/secant_method.py index 45bcb185fc3e..d28a46206d40 100644 --- a/arithmetic_analysis/secant_method.py +++ b/arithmetic_analysis/secant_method.py @@ -20,7 +20,7 @@ def secant_method(lower_bound: float, upper_bound: float, repeats: int) -> float """ x0 = lower_bound x1 = upper_bound - for i in range(0, repeats): + for _ in range(0, repeats): x0, x1 = x1, x1 - (f(x1) * (x1 - x0)) / (f(x1) - f(x0)) return x1 diff --git a/audio_filters/butterworth_filter.py b/audio_filters/butterworth_filter.py index 409cfeb1d95c..cffedb7a68fd 100644 --- a/audio_filters/butterworth_filter.py +++ b/audio_filters/butterworth_filter.py @@ -11,7 +11,7 @@ def make_lowpass( - frequency: int, samplerate: int, q_factor: float = 1 / sqrt(2) + frequency: int, samplerate: int, q_factor: float = 1 / sqrt(2) # noqa: B008 ) -> IIRFilter: """ Creates a low-pass filter @@ -39,7 +39,7 @@ def make_lowpass( def make_highpass( - frequency: int, samplerate: int, q_factor: float = 1 / sqrt(2) + frequency: int, samplerate: int, q_factor: float = 1 / sqrt(2) # noqa: B008 ) -> IIRFilter: """ Creates a high-pass filter @@ -67,7 +67,7 @@ def make_highpass( def make_bandpass( - frequency: int, samplerate: int, q_factor: float = 1 / sqrt(2) + frequency: int, samplerate: int, q_factor: float = 1 / sqrt(2) # noqa: B008 ) -> IIRFilter: """ Creates a band-pass filter @@ -96,7 +96,7 @@ def make_bandpass( def make_allpass( - frequency: int, samplerate: int, q_factor: float = 1 / sqrt(2) + frequency: int, samplerate: int, q_factor: float = 1 / sqrt(2) # noqa: B008 ) -> IIRFilter: """ Creates an all-pass filter @@ -121,7 +121,10 @@ def make_allpass( def make_peak( - frequency: int, samplerate: int, gain_db: float, q_factor: float = 1 / sqrt(2) + frequency: int, + samplerate: int, + gain_db: float, + q_factor: float = 1 / sqrt(2), # noqa: B008 ) -> IIRFilter: """ Creates a peak filter @@ -150,7 +153,10 @@ def make_peak( def make_lowshelf( - frequency: int, samplerate: int, gain_db: float, q_factor: float = 1 / sqrt(2) + frequency: int, + samplerate: int, + gain_db: float, + q_factor: float = 1 / sqrt(2), # noqa: B008 ) -> IIRFilter: """ Creates a low-shelf filter @@ -184,7 +190,10 @@ def make_lowshelf( def make_highshelf( - frequency: int, samplerate: int, gain_db: float, q_factor: float = 1 / sqrt(2) + frequency: int, + samplerate: int, + gain_db: float, + q_factor: float = 1 / sqrt(2), # noqa: B008 ) -> IIRFilter: """ Creates a high-shelf filter diff --git a/backtracking/sum_of_subsets.py b/backtracking/sum_of_subsets.py index 8348544c0175..128e290718cd 100644 --- a/backtracking/sum_of_subsets.py +++ b/backtracking/sum_of_subsets.py @@ -39,14 +39,14 @@ def create_state_space_tree( if sum(path) == max_sum: result.append(path) return - for num_index in range(num_index, len(nums)): + for index in range(num_index, len(nums)): create_state_space_tree( nums, max_sum, - num_index + 1, - path + [nums[num_index]], + index + 1, + path + [nums[index]], result, - remaining_nums_sum - nums[num_index], + remaining_nums_sum - nums[index], ) diff --git a/boolean_algebra/quine_mc_cluskey.py b/boolean_algebra/quine_mc_cluskey.py index 9aa9b10c8429..5bd7117bb3e7 100644 --- a/boolean_algebra/quine_mc_cluskey.py +++ b/boolean_algebra/quine_mc_cluskey.py @@ -56,7 +56,7 @@ def decimal_to_binary(no_of_variable: int, minterms: Sequence[float]) -> list[st temp = [] for minterm in minterms: string = "" - for i in range(no_of_variable): + for _ in range(no_of_variable): string = str(minterm % 2) + string minterm //= 2 temp.append(string) diff --git a/ciphers/mixed_keyword_cypher.py b/ciphers/mixed_keyword_cypher.py index 178902173477..f55c9c4286df 100644 --- a/ciphers/mixed_keyword_cypher.py +++ b/ciphers/mixed_keyword_cypher.py @@ -40,7 +40,7 @@ def mixed_keyword(key: str = "college", pt: str = "UNIVERSITY") -> str: k = 0 for _ in range(r): s = [] - for j in range(len_temp): + for _ in range(len_temp): s.append(temp[k]) if not (k < 25): break diff --git a/ciphers/rabin_miller.py b/ciphers/rabin_miller.py index 0aab80eb9175..410d559d4315 100644 --- a/ciphers/rabin_miller.py +++ b/ciphers/rabin_miller.py @@ -11,7 +11,7 @@ def rabin_miller(num: int) -> bool: s = s // 2 t += 1 - for trials in range(5): + for _ in range(5): a = random.randrange(2, num - 1) v = pow(a, s, num) if v != 1: diff --git a/compression/burrows_wheeler.py b/compression/burrows_wheeler.py index 4ad99a642e49..0916b8a654d2 100644 --- a/compression/burrows_wheeler.py +++ b/compression/burrows_wheeler.py @@ -154,7 +154,7 @@ def reverse_bwt(bwt_string: str, idx_original_string: int) -> str: ) ordered_rotations = [""] * len(bwt_string) - for x in range(len(bwt_string)): + for _ in range(len(bwt_string)): for i in range(len(bwt_string)): ordered_rotations[i] = bwt_string[i] + ordered_rotations[i] ordered_rotations.sort() diff --git a/data_structures/binary_tree/binary_search_tree_recursive.py b/data_structures/binary_tree/binary_search_tree_recursive.py index 0d0ac8fd1e22..97eb8e25bedd 100644 --- a/data_structures/binary_tree/binary_search_tree_recursive.py +++ b/data_structures/binary_tree/binary_search_tree_recursive.py @@ -357,7 +357,7 @@ def test_put(self) -> None: assert t.root.left.left.parent == t.root.left assert t.root.left.left.label == 1 - with self.assertRaises(Exception): + with self.assertRaises(Exception): # noqa: B017 t.put(1) def test_search(self) -> None: @@ -369,7 +369,7 @@ def test_search(self) -> None: node = t.search(13) assert node.label == 13 - with self.assertRaises(Exception): + with self.assertRaises(Exception): # noqa: B017 t.search(2) def test_remove(self) -> None: @@ -515,7 +515,7 @@ def test_get_max_label(self) -> None: assert t.get_max_label() == 14 t.empty() - with self.assertRaises(Exception): + with self.assertRaises(Exception): # noqa: B017 t.get_max_label() def test_get_min_label(self) -> None: @@ -524,7 +524,7 @@ def test_get_min_label(self) -> None: assert t.get_min_label() == 1 t.empty() - with self.assertRaises(Exception): + with self.assertRaises(Exception): # noqa: B017 t.get_min_label() def test_inorder_traversal(self) -> None: diff --git a/data_structures/linked_list/circular_linked_list.py b/data_structures/linked_list/circular_linked_list.py index 6fec0a12542f..67a63cd55e19 100644 --- a/data_structures/linked_list/circular_linked_list.py +++ b/data_structures/linked_list/circular_linked_list.py @@ -94,25 +94,25 @@ def test_circular_linked_list() -> None: try: circular_linked_list.delete_front() - assert False # This should not happen + raise AssertionError() # This should not happen except IndexError: assert True # This should happen try: circular_linked_list.delete_tail() - assert False # This should not happen + raise AssertionError() # This should not happen except IndexError: assert True # This should happen try: circular_linked_list.delete_nth(-1) - assert False + raise AssertionError() except IndexError: assert True try: circular_linked_list.delete_nth(0) - assert False + raise AssertionError() except IndexError: assert True diff --git a/data_structures/linked_list/doubly_linked_list.py b/data_structures/linked_list/doubly_linked_list.py index 0eb3cf101a3e..9e996ef0fb9d 100644 --- a/data_structures/linked_list/doubly_linked_list.py +++ b/data_structures/linked_list/doubly_linked_list.py @@ -96,7 +96,7 @@ def insert_at_nth(self, index: int, data): self.tail = new_node else: temp = self.head - for i in range(0, index): + for _ in range(0, index): temp = temp.next temp.previous.next = new_node new_node.previous = temp.previous @@ -145,7 +145,7 @@ def delete_at_nth(self, index: int): self.tail.next = None else: temp = self.head - for i in range(0, index): + for _ in range(0, index): temp = temp.next delete_node = temp temp.next.previous = temp.previous @@ -194,13 +194,13 @@ def test_doubly_linked_list() -> None: try: linked_list.delete_head() - assert False # This should not happen. + raise AssertionError() # This should not happen. except IndexError: assert True # This should happen. try: linked_list.delete_tail() - assert False # This should not happen. + raise AssertionError() # This should not happen. except IndexError: assert True # This should happen. diff --git a/data_structures/linked_list/middle_element_of_linked_list.py b/data_structures/linked_list/middle_element_of_linked_list.py index 0c6250f3b731..86dad6b41d73 100644 --- a/data_structures/linked_list/middle_element_of_linked_list.py +++ b/data_structures/linked_list/middle_element_of_linked_list.py @@ -62,7 +62,7 @@ def middle_element(self) -> int | None: if __name__ == "__main__": link = LinkedList() - for i in range(int(input().strip())): + for _ in range(int(input().strip())): data = int(input().strip()) link.push(data) print(link.middle_element()) diff --git a/data_structures/linked_list/singly_linked_list.py b/data_structures/linked_list/singly_linked_list.py index 59d7c512bad7..89a05ae81d4c 100644 --- a/data_structures/linked_list/singly_linked_list.py +++ b/data_structures/linked_list/singly_linked_list.py @@ -132,7 +132,7 @@ def __setitem__(self, index: int, data: Any) -> None: if not 0 <= index < len(self): raise ValueError("list index out of range.") current = self.head - for i in range(index): + for _ in range(index): current = current.next current.data = data @@ -352,13 +352,13 @@ def test_singly_linked_list() -> None: try: linked_list.delete_head() - assert False # This should not happen. + raise AssertionError() # This should not happen. except IndexError: assert True # This should happen. try: linked_list.delete_tail() - assert False # This should not happen. + raise AssertionError() # This should not happen. except IndexError: assert True # This should happen. diff --git a/data_structures/linked_list/skip_list.py b/data_structures/linked_list/skip_list.py index 176049120aab..a667e3e9bc84 100644 --- a/data_structures/linked_list/skip_list.py +++ b/data_structures/linked_list/skip_list.py @@ -205,7 +205,7 @@ def insert(self, key: KT, value: VT): if level > self.level: # After level increase we have to add additional nodes to head. - for i in range(self.level - 1, level): + for _ in range(self.level - 1, level): update_vector.append(self.head) self.level = level @@ -407,7 +407,7 @@ def is_sorted(lst): def pytests(): - for i in range(100): + for _ in range(100): # Repeat test 100 times due to the probabilistic nature of skip list # random values == random bugs test_insert() diff --git a/data_structures/queue/queue_on_list.py b/data_structures/queue/queue_on_list.py index 485cf0b6f7a3..71fca6b2f5f4 100644 --- a/data_structures/queue/queue_on_list.py +++ b/data_structures/queue/queue_on_list.py @@ -37,7 +37,7 @@ def get(self): number of times to rotate queue""" def rotate(self, rotation): - for i in range(rotation): + for _ in range(rotation): self.put(self.get()) """Enqueues {@code item} diff --git a/data_structures/queue/queue_on_pseudo_stack.py b/data_structures/queue/queue_on_pseudo_stack.py index 9a0c16f61eb4..d9845100008e 100644 --- a/data_structures/queue/queue_on_pseudo_stack.py +++ b/data_structures/queue/queue_on_pseudo_stack.py @@ -37,7 +37,7 @@ def get(self) -> Any: number of times to rotate queue""" def rotate(self, rotation: int) -> None: - for i in range(rotation): + for _ in range(rotation): temp = self.stack[0] self.stack = self.stack[1:] self.put(temp) diff --git a/data_structures/stacks/stack.py b/data_structures/stacks/stack.py index d1c73df43067..55d424d5018b 100644 --- a/data_structures/stacks/stack.py +++ b/data_structures/stacks/stack.py @@ -92,13 +92,13 @@ def test_stack() -> None: try: _ = stack.pop() - assert False # This should not happen + raise AssertionError() # This should not happen except StackUnderflowError: assert True # This should happen try: _ = stack.peek() - assert False # This should not happen + raise AssertionError() # This should not happen except StackUnderflowError: assert True # This should happen @@ -118,7 +118,7 @@ def test_stack() -> None: try: stack.push(200) - assert False # This should not happen + raise AssertionError() # This should not happen except StackOverflowError: assert True # This should happen diff --git a/divide_and_conquer/convex_hull.py b/divide_and_conquer/convex_hull.py index 72da116398a9..39e78be04a71 100644 --- a/divide_and_conquer/convex_hull.py +++ b/divide_and_conquer/convex_hull.py @@ -458,16 +458,16 @@ def convex_hull_melkman(points: list[Point]) -> list[Point]: convex_hull[1] = points[i] i += 1 - for i in range(i, n): + for j in range(i, n): if ( - _det(convex_hull[0], convex_hull[-1], points[i]) > 0 + _det(convex_hull[0], convex_hull[-1], points[j]) > 0 and _det(convex_hull[-1], convex_hull[0], points[1]) < 0 ): # The point lies within the convex hull continue - convex_hull.insert(0, points[i]) - convex_hull.append(points[i]) + convex_hull.insert(0, points[j]) + convex_hull.append(points[j]) while _det(convex_hull[0], convex_hull[1], convex_hull[2]) >= 0: del convex_hull[1] while _det(convex_hull[-1], convex_hull[-2], convex_hull[-3]) <= 0: diff --git a/divide_and_conquer/strassen_matrix_multiplication.py b/divide_and_conquer/strassen_matrix_multiplication.py index 17efcfc7c8ee..0ee426e4b39a 100644 --- a/divide_and_conquer/strassen_matrix_multiplication.py +++ b/divide_and_conquer/strassen_matrix_multiplication.py @@ -132,12 +132,12 @@ def strassen(matrix1: list, matrix2: list) -> list: # power of 2 for i in range(0, maxim): if i < dimension1[0]: - for j in range(dimension1[1], maxim): + for _ in range(dimension1[1], maxim): new_matrix1[i].append(0) else: new_matrix1.append([0] * maxim) if i < dimension2[0]: - for j in range(dimension2[1], maxim): + for _ in range(dimension2[1], maxim): new_matrix2[i].append(0) else: new_matrix2.append([0] * maxim) @@ -147,7 +147,7 @@ def strassen(matrix1: list, matrix2: list) -> list: # Removing the additional zeros for i in range(0, maxim): if i < dimension1[0]: - for j in range(dimension2[1], maxim): + for _ in range(dimension2[1], maxim): final_matrix[i].pop() else: final_matrix.pop() diff --git a/dynamic_programming/all_construct.py b/dynamic_programming/all_construct.py index 5ffed2caa182..3839d01e6db0 100644 --- a/dynamic_programming/all_construct.py +++ b/dynamic_programming/all_construct.py @@ -21,7 +21,7 @@ def all_construct(target: str, word_bank: list[str] | None = None) -> list[list[ table_size: int = len(target) + 1 table: list[list[list[str]]] = [] - for i in range(table_size): + for _ in range(table_size): table.append([]) # seed value table[0] = [[]] # because empty string has empty combination diff --git a/dynamic_programming/knapsack.py b/dynamic_programming/knapsack.py index 9efb60bab98b..093e15f49ba0 100644 --- a/dynamic_programming/knapsack.py +++ b/dynamic_programming/knapsack.py @@ -30,13 +30,13 @@ def knapsack(w, wt, val, n): dp = [[0 for i in range(w + 1)] for j in range(n + 1)] for i in range(1, n + 1): - for w in range(1, w + 1): - if wt[i - 1] <= w: - dp[i][w] = max(val[i - 1] + dp[i - 1][w - wt[i - 1]], dp[i - 1][w]) + for w_ in range(1, w + 1): + if wt[i - 1] <= w_: + dp[i][w_] = max(val[i - 1] + dp[i - 1][w_ - wt[i - 1]], dp[i - 1][w_]) else: - dp[i][w] = dp[i - 1][w] + dp[i][w_] = dp[i - 1][w_] - return dp[n][w], dp + return dp[n][w_], dp def knapsack_with_example_solution(w: int, wt: list, val: list): diff --git a/fractals/julia_sets.py b/fractals/julia_sets.py index f273943851fc..28c675c750bc 100644 --- a/fractals/julia_sets.py +++ b/fractals/julia_sets.py @@ -118,7 +118,7 @@ def iterate_function( """ z_n = z_0.astype("complex64") - for i in range(nb_iterations): + for _ in range(nb_iterations): z_n = eval_function(function_params, z_n) if infinity is not None: numpy.nan_to_num(z_n, copy=False, nan=infinity) diff --git a/fractals/koch_snowflake.py b/fractals/koch_snowflake.py index 07c1835b41ed..b0aaa86b11d8 100644 --- a/fractals/koch_snowflake.py +++ b/fractals/koch_snowflake.py @@ -46,7 +46,7 @@ def iterate(initial_vectors: list[numpy.ndarray], steps: int) -> list[numpy.ndar 0.28867513]), array([0.66666667, 0. ]), array([1, 0])] """ vectors = initial_vectors - for i in range(steps): + for _ in range(steps): vectors = iteration_step(vectors) return vectors diff --git a/fractals/mandelbrot.py b/fractals/mandelbrot.py index 5d61b72e172f..f97bcd17031c 100644 --- a/fractals/mandelbrot.py +++ b/fractals/mandelbrot.py @@ -36,7 +36,7 @@ def get_distance(x: float, y: float, max_step: int) -> float: """ a = x b = y - for step in range(max_step): + for step in range(max_step): # noqa: B007 a_new = a * a - b * b + x b = 2 * a * b + y a = a_new diff --git a/genetic_algorithm/basic_string.py b/genetic_algorithm/basic_string.py index 97dbe182bc82..bd7d8026866c 100644 --- a/genetic_algorithm/basic_string.py +++ b/genetic_algorithm/basic_string.py @@ -80,7 +80,7 @@ def evaluate(item: str, main_target: str = target) -> tuple[str, float]: score = len( [g for position, g in enumerate(item) if g == main_target[position]] ) - return (item, float(score)) + return (item, float(score)) # noqa: B023 # Adding a bit of concurrency can make everything faster, # @@ -129,7 +129,10 @@ def select(parent_1: tuple[str, float]) -> list[str]: child_n = int(parent_1[1] * 100) + 1 child_n = 10 if child_n >= 10 else child_n for _ in range(child_n): - parent_2 = population_score[random.randint(0, N_SELECTED)][0] + parent_2 = population_score[ # noqa: B023 + random.randint(0, N_SELECTED) + ][0] + child_1, child_2 = crossover(parent_1[0], parent_2) # Append new string to the population list pop.append(mutate(child_1)) diff --git a/graphs/basic_graphs.py b/graphs/basic_graphs.py index b02e9af65846..298a97bf0e17 100644 --- a/graphs/basic_graphs.py +++ b/graphs/basic_graphs.py @@ -188,7 +188,7 @@ def topo(g, ind=None, q=None): def adjm(): n = input().strip() a = [] - for i in range(n): + for _ in range(n): a.append(map(int, input().strip().split())) return a, n @@ -264,7 +264,7 @@ def prim(g, s): def edglist(): n, m = map(int, input().split(" ")) edges = [] - for i in range(m): + for _ in range(m): edges.append(map(int, input().split(" "))) return edges, n diff --git a/graphs/bellman_ford.py b/graphs/bellman_ford.py index 0f654a510b59..eb2cd25bf682 100644 --- a/graphs/bellman_ford.py +++ b/graphs/bellman_ford.py @@ -36,7 +36,7 @@ def bellman_ford( distance = [float("inf")] * vertex_count distance[src] = 0.0 - for i in range(vertex_count - 1): + for _ in range(vertex_count - 1): for j in range(edge_count): u, v, w = (graph[j][k] for k in ["src", "dst", "weight"]) diff --git a/graphs/dijkstra_2.py b/graphs/dijkstra_2.py index 3170765bc8a8..f548463ff7bd 100644 --- a/graphs/dijkstra_2.py +++ b/graphs/dijkstra_2.py @@ -19,23 +19,23 @@ def min_dist(mdist, vset, v): def dijkstra(graph, v, src): - mdist = [float("inf") for i in range(v)] - vset = [False for i in range(v)] + mdist = [float("inf") for _ in range(v)] + vset = [False for _ in range(v)] mdist[src] = 0.0 - for i in range(v - 1): + for _ in range(v - 1): u = min_dist(mdist, vset, v) vset[u] = True - for v in range(v): + for i in range(v): if ( - (not vset[v]) - and graph[u][v] != float("inf") - and mdist[u] + graph[u][v] < mdist[v] + (not vset[i]) + and graph[u][i] != float("inf") + and mdist[u] + graph[u][i] < mdist[i] ): - mdist[v] = mdist[u] + graph[u][v] + mdist[i] = mdist[u] + graph[u][i] - print_dist(mdist, v) + print_dist(mdist, i) if __name__ == "__main__": diff --git a/graphs/frequent_pattern_graph_miner.py b/graphs/frequent_pattern_graph_miner.py index 50081afa6728..a5ecbe6e8223 100644 --- a/graphs/frequent_pattern_graph_miner.py +++ b/graphs/frequent_pattern_graph_miner.py @@ -79,7 +79,7 @@ def get_nodes(frequency_table): {'11111': ['ab', 'ac', 'df', 'bd', 'bc']} """ nodes = {} - for i, item in enumerate(frequency_table): + for _, item in enumerate(frequency_table): nodes.setdefault(item[2], []).append(item[0]) return nodes diff --git a/graphs/kahns_algorithm_long.py b/graphs/kahns_algorithm_long.py index 776ae3a2f903..63cbeb909a8a 100644 --- a/graphs/kahns_algorithm_long.py +++ b/graphs/kahns_algorithm_long.py @@ -4,7 +4,7 @@ def longest_distance(graph): queue = [] long_dist = [1] * len(graph) - for key, values in graph.items(): + for values in graph.values(): for i in values: indegree[i] += 1 diff --git a/graphs/kahns_algorithm_topo.py b/graphs/kahns_algorithm_topo.py index 6879b047fe35..b1260bd5bd9b 100644 --- a/graphs/kahns_algorithm_topo.py +++ b/graphs/kahns_algorithm_topo.py @@ -8,7 +8,7 @@ def topological_sort(graph): topo = [] cnt = 0 - for key, values in graph.items(): + for values in graph.values(): for i in values: indegree[i] += 1 diff --git a/graphs/minimum_spanning_tree_prims.py b/graphs/minimum_spanning_tree_prims.py index 9b2c645f16df..5b2eaa4bff40 100644 --- a/graphs/minimum_spanning_tree_prims.py +++ b/graphs/minimum_spanning_tree_prims.py @@ -91,7 +91,7 @@ def delete_minimum(heap, positions): distance_tv[x[0]] = x[1] heapify(distance_tv, positions) - for i in range(1, len(l)): + for _ in range(1, len(l)): vertex = delete_minimum(distance_tv, positions) if visited[vertex] == 0: tree_edges.append((nbr_tv[vertex], vertex)) diff --git a/graphs/page_rank.py b/graphs/page_rank.py index 672405b7345b..e1af35b34749 100644 --- a/graphs/page_rank.py +++ b/graphs/page_rank.py @@ -41,7 +41,7 @@ def page_rank(nodes, limit=3, d=0.85): for i in range(limit): print(f"======= Iteration {i + 1} =======") - for j, node in enumerate(nodes): + for _, node in enumerate(nodes): ranks[node.name] = (1 - d) + d * sum( ranks[ib] / outbounds[ib] for ib in node.inbound ) diff --git a/graphs/scc_kosaraju.py b/graphs/scc_kosaraju.py index ea9d35282858..39211c64b687 100644 --- a/graphs/scc_kosaraju.py +++ b/graphs/scc_kosaraju.py @@ -39,10 +39,10 @@ def kosaraju(): # n - no of nodes, m - no of edges n, m = list(map(int, input().strip().split())) - graph: list[list[int]] = [[] for i in range(n)] # graph + graph: list[list[int]] = [[] for _ in range(n)] # graph reversed_graph: list[list[int]] = [[] for i in range(n)] # reversed graph # input graph data (edges) - for i in range(m): + for _ in range(m): u, v = list(map(int, input().strip().split())) graph[u].append(v) reversed_graph[v].append(u) diff --git a/greedy_methods/optimal_merge_pattern.py b/greedy_methods/optimal_merge_pattern.py index 911e1966f3b9..a1c934f84498 100644 --- a/greedy_methods/optimal_merge_pattern.py +++ b/greedy_methods/optimal_merge_pattern.py @@ -41,7 +41,7 @@ def optimal_merge_pattern(files: list) -> float: while len(files) > 1: temp = 0 # Consider two files with minimum cost to be merged - for i in range(2): + for _ in range(2): min_index = files.index(min(files)) temp += files[min_index] files.pop(min_index) diff --git a/hashes/chaos_machine.py b/hashes/chaos_machine.py index 69313fbb2065..238fdb1c0634 100644 --- a/hashes/chaos_machine.py +++ b/hashes/chaos_machine.py @@ -53,7 +53,7 @@ def xorshift(x, y): key = machine_time % m # Evolution (Time Length) - for i in range(0, t): + for _ in range(0, t): # Variables (Position + Parameters) r = params_space[key] value = buffer_space[key] diff --git a/hashes/enigma_machine.py b/hashes/enigma_machine.py index b0d45718e286..0194f7da7d6f 100644 --- a/hashes/enigma_machine.py +++ b/hashes/enigma_machine.py @@ -48,7 +48,7 @@ def engine(input_character): break except Exception as error: print(error) - for i in range(token): + for _ in range(token): rotator() for j in decode: engine(j) diff --git a/machine_learning/self_organizing_map.py b/machine_learning/self_organizing_map.py index bd3d388f910f..057c2a76b8ac 100644 --- a/machine_learning/self_organizing_map.py +++ b/machine_learning/self_organizing_map.py @@ -47,7 +47,7 @@ def main() -> None: epochs = 3 alpha = 0.5 - for i in range(epochs): + for _ in range(epochs): for j in range(len(training_samples)): # training sample diff --git a/maths/area_under_curve.py b/maths/area_under_curve.py index d345398b4c2c..b557b2029657 100644 --- a/maths/area_under_curve.py +++ b/maths/area_under_curve.py @@ -35,7 +35,7 @@ def trapezoidal_area( x1 = x_start fx1 = fnc(x_start) area = 0.0 - for i in range(steps): + for _ in range(steps): # Approximates small segments of curve as linear and solve # for trapezoidal area x2 = (x_end - x_start) / steps + x1 diff --git a/maths/line_length.py b/maths/line_length.py index ad12a816b93e..ea27ee904a24 100644 --- a/maths/line_length.py +++ b/maths/line_length.py @@ -40,7 +40,7 @@ def line_length( fx1 = fnc(x_start) length = 0.0 - for i in range(steps): + for _ in range(steps): # Approximates curve as a sequence of linear lines and sums their length x2 = (x_end - x_start) / steps + x1 diff --git a/maths/lucas_lehmer_primality_test.py b/maths/lucas_lehmer_primality_test.py index 916abfcc175e..0a5621aacd79 100644 --- a/maths/lucas_lehmer_primality_test.py +++ b/maths/lucas_lehmer_primality_test.py @@ -31,7 +31,7 @@ def lucas_lehmer_test(p: int) -> bool: s = 4 m = (1 << p) - 1 - for i in range(p - 2): + for _ in range(p - 2): s = ((s * s) - 2) % m return s == 0 diff --git a/maths/lucas_series.py b/maths/lucas_series.py index 6b32c2022e13..cae6c2815aec 100644 --- a/maths/lucas_series.py +++ b/maths/lucas_series.py @@ -50,7 +50,7 @@ def dynamic_lucas_number(n_th_number: int) -> int: if not isinstance(n_th_number, int): raise TypeError("dynamic_lucas_number accepts only integer arguments.") a, b = 2, 1 - for i in range(n_th_number): + for _ in range(n_th_number): a, b = b, a + b return a diff --git a/maths/miller_rabin.py b/maths/miller_rabin.py index b4dfed1290de..9f2668dbab14 100644 --- a/maths/miller_rabin.py +++ b/maths/miller_rabin.py @@ -33,7 +33,7 @@ def is_prime_big(n, prec=1000): b = bin_exp_mod(a, d, n) if b != 1: flag = True - for i in range(exp): + for _ in range(exp): if b == n - 1: flag = False break diff --git a/maths/monte_carlo_dice.py b/maths/monte_carlo_dice.py index 17cedbdbcb18..c4150b88f6cc 100644 --- a/maths/monte_carlo_dice.py +++ b/maths/monte_carlo_dice.py @@ -35,7 +35,7 @@ def throw_dice(num_throws: int, num_dice: int = 2) -> list[float]: """ dices = [Dice() for i in range(num_dice)] count_of_sum = [0] * (len(dices) * Dice.NUM_SIDES + 1) - for i in range(num_throws): + for _ in range(num_throws): count_of_sum[sum(dice.roll() for dice in dices)] += 1 probability = [round((count * 100) / num_throws, 2) for count in count_of_sum] return probability[num_dice:] # remove probability of sums that never appear diff --git a/maths/numerical_integration.py b/maths/numerical_integration.py index a2bfce5b911d..8f32fd3564df 100644 --- a/maths/numerical_integration.py +++ b/maths/numerical_integration.py @@ -39,7 +39,7 @@ def trapezoidal_area( fx1 = fnc(x_start) area = 0.0 - for i in range(steps): + for _ in range(steps): # Approximates small segments of curve as linear and solve # for trapezoidal area diff --git a/maths/pi_monte_carlo_estimation.py b/maths/pi_monte_carlo_estimation.py index 81be083787bd..29b679907239 100644 --- a/maths/pi_monte_carlo_estimation.py +++ b/maths/pi_monte_carlo_estimation.py @@ -47,7 +47,7 @@ def estimate_pi(number_of_simulations: int) -> float: raise ValueError("At least one simulation is necessary to estimate PI.") number_in_unit_circle = 0 - for simulation_index in range(number_of_simulations): + for _ in range(number_of_simulations): random_point = Point.random_unit_square() if random_point.is_in_unit_circle(): diff --git a/maths/pollard_rho.py b/maths/pollard_rho.py index 0fc80cd4280b..5082f54f71a8 100644 --- a/maths/pollard_rho.py +++ b/maths/pollard_rho.py @@ -73,7 +73,7 @@ def rand_fn(value: int, step: int, modulus: int) -> int: """ return (pow(value, 2) + step) % modulus - for attempt in range(attempts): + for _ in range(attempts): # These track the position within the cycle detection logic. tortoise = seed hare = seed diff --git a/maths/primelib.py b/maths/primelib.py index 7d2a22f39c59..eb72a9f8ae6a 100644 --- a/maths/primelib.py +++ b/maths/primelib.py @@ -406,14 +406,14 @@ def kg_v(number1, number2): count1 = prime_fac_1.count(n) count2 = prime_fac_2.count(n) - for i in range(max(count1, count2)): + for _ in range(max(count1, count2)): ans *= n else: count1 = prime_fac_1.count(n) - for i in range(count1): + for _ in range(count1): ans *= n done.append(n) @@ -425,7 +425,7 @@ def kg_v(number1, number2): count2 = prime_fac_2.count(n) - for i in range(count2): + for _ in range(count2): ans *= n done.append(n) @@ -637,7 +637,7 @@ def fib(n): fib1 = 1 ans = 1 # this will be return - for i in range(n - 1): + for _ in range(n - 1): tmp = ans ans += fib1 diff --git a/maths/proth_number.py b/maths/proth_number.py index e175031435b0..6b15190249f0 100644 --- a/maths/proth_number.py +++ b/maths/proth_number.py @@ -49,7 +49,7 @@ def proth(number: int) -> int: proth_index = 2 increment = 3 for block in range(1, block_index): - for move in range(increment): + for _ in range(increment): proth_list.append(2 ** (block + 1) + proth_list[proth_index - 1]) proth_index += 1 increment *= 2 diff --git a/maths/square_root.py b/maths/square_root.py index b324c723037c..2cbf14beae18 100644 --- a/maths/square_root.py +++ b/maths/square_root.py @@ -49,7 +49,7 @@ def square_root_iterative( value = get_initial_point(a) - for i in range(max_iter): + for _ in range(max_iter): prev_value = value value = value - fx(value, a) / fx_derivative(value) if abs(prev_value - value) < tolerance: diff --git a/maths/ugly_numbers.py b/maths/ugly_numbers.py index 4451a68cdaad..81bd928c6b3d 100644 --- a/maths/ugly_numbers.py +++ b/maths/ugly_numbers.py @@ -32,7 +32,7 @@ def ugly_numbers(n: int) -> int: next_3 = ugly_nums[i3] * 3 next_5 = ugly_nums[i5] * 5 - for i in range(1, n): + for _ in range(1, n): next_num = min(next_2, next_3, next_5) ugly_nums.append(next_num) if next_num == next_2: diff --git a/matrix/matrix_class.py b/matrix/matrix_class.py index 305cad0a5a9c..6495bd8fc88d 100644 --- a/matrix/matrix_class.py +++ b/matrix/matrix_class.py @@ -351,7 +351,7 @@ def __pow__(self, other: int) -> Matrix: "Only invertable matrices can be raised to a negative power" ) result = self - for i in range(other - 1): + for _ in range(other - 1): result *= self return result diff --git a/matrix/nth_fibonacci_using_matrix_exponentiation.py b/matrix/nth_fibonacci_using_matrix_exponentiation.py index 7c964d884617..65f10c90d07a 100644 --- a/matrix/nth_fibonacci_using_matrix_exponentiation.py +++ b/matrix/nth_fibonacci_using_matrix_exponentiation.py @@ -65,7 +65,7 @@ def nth_fibonacci_bruteforce(n: int) -> int: return n fib0 = 0 fib1 = 1 - for i in range(2, n + 1): + for _ in range(2, n + 1): fib0, fib1 = fib1, fib0 + fib1 return fib1 diff --git a/neural_network/back_propagation_neural_network.py b/neural_network/back_propagation_neural_network.py index 43e796e77be3..23b818b0f3cf 100644 --- a/neural_network/back_propagation_neural_network.py +++ b/neural_network/back_propagation_neural_network.py @@ -128,7 +128,7 @@ def train(self, xdata, ydata, train_round, accuracy): self.ax_loss.hlines(self.accuracy, 0, self.train_round * 1.1) x_shape = np.shape(xdata) - for round_i in range(train_round): + for _ in range(train_round): all_loss = 0 for row in range(x_shape[0]): _xdata = np.asmatrix(xdata[row, :]).T diff --git a/neural_network/perceptron.py b/neural_network/perceptron.py index a2bfdb326d77..f04c81424c81 100644 --- a/neural_network/perceptron.py +++ b/neural_network/perceptron.py @@ -69,7 +69,7 @@ def training(self) -> None: for sample in self.sample: sample.insert(0, self.bias) - for i in range(self.col_sample): + for _ in range(self.col_sample): self.weight.append(random.random()) self.weight.insert(0, self.bias) diff --git a/other/lfu_cache.py b/other/lfu_cache.py index 072d00ab58c8..2f26bb6cc74a 100644 --- a/other/lfu_cache.py +++ b/other/lfu_cache.py @@ -303,7 +303,7 @@ def cache_decorator_wrapper(*args: T) -> U: def cache_info() -> LFUCache[T, U]: return cls.decorator_function_to_instance_map[func] - setattr(cache_decorator_wrapper, "cache_info", cache_info) + setattr(cache_decorator_wrapper, "cache_info", cache_info) # noqa: B010 return cache_decorator_wrapper diff --git a/other/lru_cache.py b/other/lru_cache.py index b68ae0a8e296..aa910e487406 100644 --- a/other/lru_cache.py +++ b/other/lru_cache.py @@ -321,7 +321,7 @@ def cache_decorator_wrapper(*args: T) -> U: def cache_info() -> LRUCache[T, U]: return cls.decorator_function_to_instance_map[func] - setattr(cache_decorator_wrapper, "cache_info", cache_info) + setattr(cache_decorator_wrapper, "cache_info", cache_info) # noqa: B010 return cache_decorator_wrapper diff --git a/other/magicdiamondpattern.py b/other/magicdiamondpattern.py index 71bc50b51fc2..0fc41d7a25d8 100644 --- a/other/magicdiamondpattern.py +++ b/other/magicdiamondpattern.py @@ -8,9 +8,9 @@ def floyd(n): n : size of pattern """ for i in range(0, n): - for j in range(0, n - i - 1): # printing spaces + for _ in range(0, n - i - 1): # printing spaces print(" ", end="") - for k in range(0, i + 1): # printing stars + for _ in range(0, i + 1): # printing stars print("* ", end="") print() @@ -22,10 +22,10 @@ def reverse_floyd(n): n : size of pattern """ for i in range(n, 0, -1): - for j in range(i, 0, -1): # printing stars + for _ in range(i, 0, -1): # printing stars print("* ", end="") print() - for k in range(n - i + 1, 0, -1): # printing spaces + for _ in range(n - i + 1, 0, -1): # printing spaces print(" ", end="") diff --git a/other/scoring_algorithm.py b/other/scoring_algorithm.py index aecd19c55927..1e6293f8465c 100644 --- a/other/scoring_algorithm.py +++ b/other/scoring_algorithm.py @@ -77,7 +77,7 @@ def procentual_proximity( final_scores: list[float] = [0 for i in range(len(score_lists[0]))] # generate final scores - for i, slist in enumerate(score_lists): + for slist in score_lists: for j, ele in enumerate(slist): final_scores[j] = final_scores[j] + ele diff --git a/physics/lorentz_transformation_four_vector.py b/physics/lorentz_transformation_four_vector.py index bda852c25520..f58b40e5906b 100644 --- a/physics/lorentz_transformation_four_vector.py +++ b/physics/lorentz_transformation_four_vector.py @@ -145,7 +145,7 @@ def transformation_matrix(velocity: float) -> np.array: def transform( - velocity: float, event: np.array = np.zeros(4), symbolic: bool = True + velocity: float, event: np.array = np.zeros(4), symbolic: bool = True # noqa: B008 ) -> np.array: """ >>> transform(29979245,np.array([1,2,3,4]), False) diff --git a/physics/n_body_simulation.py b/physics/n_body_simulation.py index 7e9fc1642c84..2f8153782663 100644 --- a/physics/n_body_simulation.py +++ b/physics/n_body_simulation.py @@ -310,7 +310,7 @@ def example_3() -> BodySystem: """ bodies = [] - for i in range(10): + for _ in range(10): velocity_x = random.uniform(-0.5, 0.5) velocity_y = random.uniform(-0.5, 0.5) diff --git a/project_euler/problem_011/sol2.py b/project_euler/problem_011/sol2.py index 839ca6717571..9ea0db991aaf 100644 --- a/project_euler/problem_011/sol2.py +++ b/project_euler/problem_011/sol2.py @@ -36,7 +36,7 @@ def solution(): """ with open(os.path.dirname(__file__) + "/grid.txt") as f: l = [] # noqa: E741 - for i in range(20): + for _ in range(20): l.append([int(x) for x in f.readline().split()]) maximum = 0 diff --git a/project_euler/problem_025/sol3.py b/project_euler/problem_025/sol3.py index c66411dc55fc..0b9f3a0c84ef 100644 --- a/project_euler/problem_025/sol3.py +++ b/project_euler/problem_025/sol3.py @@ -45,7 +45,7 @@ def solution(n: int = 1000) -> int: f = f1 + f2 f1, f2 = f2, f index += 1 - for j in str(f): + for _ in str(f): i += 1 if i == n: break diff --git a/project_euler/problem_026/sol1.py b/project_euler/problem_026/sol1.py index 75d48df7910c..ccf2c111d2c5 100644 --- a/project_euler/problem_026/sol1.py +++ b/project_euler/problem_026/sol1.py @@ -41,7 +41,7 @@ def solution(numerator: int = 1, digit: int = 1000) -> int: for divide_by_number in range(numerator, digit + 1): has_been_divided: list[int] = [] now_divide = numerator - for division_cycle in range(1, digit + 1): + for _ in range(1, digit + 1): if now_divide in has_been_divided: if longest_list_length < len(has_been_divided): longest_list_length = len(has_been_divided) diff --git a/project_euler/problem_188/sol1.py b/project_euler/problem_188/sol1.py index dd4360adb32b..88bd1327e917 100644 --- a/project_euler/problem_188/sol1.py +++ b/project_euler/problem_188/sol1.py @@ -58,7 +58,7 @@ def solution(base: int = 1777, height: int = 1855, digits: int = 8) -> int: # calculate base↑↑height by right-assiciative repeated modular # exponentiation result = base - for i in range(1, height): + for _ in range(1, height): result = _modexpt(base, result, 10**digits) return result diff --git a/project_euler/problem_203/sol1.py b/project_euler/problem_203/sol1.py index dc93683da535..713b530b6af2 100644 --- a/project_euler/problem_203/sol1.py +++ b/project_euler/problem_203/sol1.py @@ -49,7 +49,7 @@ def get_pascal_triangle_unique_coefficients(depth: int) -> set[int]: """ coefficients = {1} previous_coefficients = [1] - for step in range(2, depth + 1): + for _ in range(2, depth + 1): coefficients_begins_one = previous_coefficients + [0] coefficients_ends_one = [0] + previous_coefficients previous_coefficients = [] diff --git a/scheduling/multi_level_feedback_queue.py b/scheduling/multi_level_feedback_queue.py index b54cc8719039..a3ba1b340e9b 100644 --- a/scheduling/multi_level_feedback_queue.py +++ b/scheduling/multi_level_feedback_queue.py @@ -205,7 +205,7 @@ def round_robin( """ finished: deque[Process] = deque() # sequence deque of terminated process # just for 1 cycle and unfinished processes will go back to queue - for i in range(len(ready_queue)): + for _ in range(len(ready_queue)): cp = ready_queue.popleft() # current process # if process's arrival time is later than current time, update current time diff --git a/sorts/double_sort.py b/sorts/double_sort.py index 4e08e27b3c21..5ca88a6745d5 100644 --- a/sorts/double_sort.py +++ b/sorts/double_sort.py @@ -15,7 +15,7 @@ def double_sort(lst): True """ no_of_elements = len(lst) - for i in range( + for _ in range( 0, int(((no_of_elements - 1) / 2) + 1) ): # we don't need to traverse to end of list as for j in range(0, no_of_elements - 1): diff --git a/web_programming/open_google_results.py b/web_programming/open_google_results.py index 0e1dba8c5856..2685bf62114d 100644 --- a/web_programming/open_google_results.py +++ b/web_programming/open_google_results.py @@ -1,10 +1,10 @@ import webbrowser from sys import argv -from urllib.parse import quote, parse_qs -from fake_useragent import UserAgent +from urllib.parse import parse_qs, quote import requests from bs4 import BeautifulSoup +from fake_useragent import UserAgent if __name__ == "__main__": if len(argv) > 1: @@ -18,9 +18,7 @@ res = requests.get( url, - headers={ - "User-Agent": str(UserAgent().random) - }, + headers={"User-Agent": str(UserAgent().random)}, ) try: From 71353ed79787cbbe3800ee32a1fb3d82c1335d19 Mon Sep 17 00:00:00 2001 From: Advik Sharma <70201060+advik-student-dev@users.noreply.github.com> Date: Thu, 13 Oct 2022 10:09:48 -0700 Subject: [PATCH 0541/1543] refined readme.md (#7081) * refined readme.md added some refinements to readme.md * Update README.md Co-authored-by: Christian Clauss --- README.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index c499c14e12b9..da80c012b0c6 100644 --- a/README.md +++ b/README.md @@ -34,7 +34,7 @@

All algorithms implemented in Python - for education

-Implementations are for learning purposes only. As they may be less efficient than the implementations in the Python standard library, use them at your discretion. +Implementations are for learning purposes only. They may be less efficient than the implementations in the Python standard library. Use them at your discretion. ## Getting Started @@ -42,8 +42,8 @@ Read through our [Contribution Guidelines](CONTRIBUTING.md) before you contribut ## Community Channels -We're on [Discord](https://discord.gg/c7MnfGFGa6) and [Gitter](https://gitter.im/TheAlgorithms)! Community channels are great for you to ask questions and get help. Please join us! +We are on [Discord](https://discord.gg/c7MnfGFGa6) and [Gitter](https://gitter.im/TheAlgorithms)! Community channels are a great way for you to ask questions and get help. Please join us! ## List of Algorithms -See our [directory](DIRECTORY.md) for easier navigation and better overview of the project. +See our [directory](DIRECTORY.md) for easier navigation and a better overview of the project. From 3deb4a3042438007df7373c07c6280e55d3511da Mon Sep 17 00:00:00 2001 From: Anurag Shukla <76862299+anuragshuklajec@users.noreply.github.com> Date: Fri, 14 Oct 2022 01:33:15 +0530 Subject: [PATCH 0542/1543] Create binary_search_matrix.py (#6995) * Create binary_search_matrix.py Added an algorithm to search in matrix * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update binary_search_matrix.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fix Indentation * Update matrix/binary_search_matrix.py Co-authored-by: Christian Clauss Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- matrix/binary_search_matrix.py | 57 ++++++++++++++++++++++++++++++++++ 1 file changed, 57 insertions(+) create mode 100644 matrix/binary_search_matrix.py diff --git a/matrix/binary_search_matrix.py b/matrix/binary_search_matrix.py new file mode 100644 index 000000000000..6f203b7a3484 --- /dev/null +++ b/matrix/binary_search_matrix.py @@ -0,0 +1,57 @@ +def binary_search(array: list, lower_bound: int, upper_bound: int, value: int) -> int: + """ + This function carries out Binary search on a 1d array and + return -1 if it do not exist + array: A 1d sorted array + value : the value meant to be searched + >>> matrix = [1, 4, 7, 11, 15] + >>> binary_search(matrix, 0, len(matrix) - 1, 1) + 0 + >>> binary_search(matrix, 0, len(matrix) - 1, 23) + -1 + """ + + r = int((lower_bound + upper_bound) // 2) + if array[r] == value: + return r + if lower_bound >= upper_bound: + return -1 + if array[r] < value: + return binary_search(array, r + 1, upper_bound, value) + else: + return binary_search(array, lower_bound, r - 1, value) + + +def mat_bin_search(value: int, matrix: list) -> list: + """ + This function loops over a 2d matrix and calls binarySearch on + the selected 1d array and returns [-1, -1] is it do not exist + value : value meant to be searched + matrix = a sorted 2d matrix + >>> matrix = [[1, 4, 7, 11, 15], + ... [2, 5, 8, 12, 19], + ... [3, 6, 9, 16, 22], + ... [10, 13, 14, 17, 24], + ... [18, 21, 23, 26, 30]] + >>> target = 1 + >>> mat_bin_search(target, matrix) + [0, 0] + >>> target = 34 + >>> mat_bin_search(target, matrix) + [-1, -1] + """ + index = 0 + if matrix[index][0] == value: + return [index, 0] + while index < len(matrix) and matrix[index][0] < value: + r = binary_search(matrix[index], 0, len(matrix[index]) - 1, value) + if r != -1: + return [index, r] + index += 1 + return [-1, -1] + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From 05e19128f7fd1bee9c8d037b3f84cd42374aad0d Mon Sep 17 00:00:00 2001 From: AkshajV1309 <79909101+AkshajV1309@users.noreply.github.com> Date: Fri, 14 Oct 2022 01:54:31 +0530 Subject: [PATCH 0543/1543] Create norgate.py (#7133) * Create norgate.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Create norgate.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update boolean_algebra/norgate.py * Update boolean_algebra/norgate.py * Update boolean_algebra/norgate.py * Update boolean_algebra/norgate.py * Update boolean_algebra/norgate.py Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- boolean_algebra/norgate.py | 46 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 46 insertions(+) create mode 100644 boolean_algebra/norgate.py diff --git a/boolean_algebra/norgate.py b/boolean_algebra/norgate.py new file mode 100644 index 000000000000..82a1fb2e33e5 --- /dev/null +++ b/boolean_algebra/norgate.py @@ -0,0 +1,46 @@ +""" A NOR Gate is a logic gate in boolean algebra which results to false(0) + if any of the input is 1, and True(1) if both the inputs are 0. + Following is the truth table of an NOR Gate: + | Input 1 | Input 2 | Output | + | 0 | 0 | 1 | + | 0 | 1 | 0 | + | 1 | 0 | 0 | + | 1 | 1 | 0 | +""" +"""Following is the code implementation of the NOR Gate""" + + +def nor_gate(input_1: int, input_2: int) -> int: + """ + >>> nor_gate(0, 0) + 1 + >>> nor_gate(0, 1) + 0 + >>> nor_gate(1, 0) + 0 + >>> nor_gate(1, 1) + 0 + >>> nor_gate(0.0, 0.0) + 1 + >>> nor_gate(0, -7) + 0 + """ + return int(bool(input_1 == input_2 == 0)) + + +def main() -> None: + print("Truth Table of NOR Gate:") + print("| Input 1 |", " Input 2 |", " Output |") + print("| 0 |", " 0 | ", nor_gate(0, 0), " |") + print("| 0 |", " 1 | ", nor_gate(0, 1), " |") + print("| 1 |", " 0 | ", nor_gate(1, 0), " |") + print("| 1 |", " 1 | ", nor_gate(1, 1), " |") + + +if __name__ == "__main__": + import doctest + + doctest.testmod() + main() +"""Code provided by Akshaj Vishwanathan""" +"""Reference: https://www.geeksforgeeks.org/logic-gates-in-python/""" From 26fe4c65390b7a2bfe2722b674943b64820d8442 Mon Sep 17 00:00:00 2001 From: Md Mahiuddin <68785084+mahiuddin-dev@users.noreply.github.com> Date: Fri, 14 Oct 2022 13:20:40 +0600 Subject: [PATCH 0544/1543] Remove extra Semicolon (#7152) --- data_structures/queue/linked_queue.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/data_structures/queue/linked_queue.py b/data_structures/queue/linked_queue.py index c6e9f53908dd..3675da7db78a 100644 --- a/data_structures/queue/linked_queue.py +++ b/data_structures/queue/linked_queue.py @@ -22,7 +22,7 @@ class LinkedQueue: >>> queue.put(5) >>> queue.put(9) >>> queue.put('python') - >>> queue.is_empty(); + >>> queue.is_empty() False >>> queue.get() 5 From e40c7b4bf1794c94993715c99e2a97b9d8f5e590 Mon Sep 17 00:00:00 2001 From: Dhruv Manilawala Date: Fri, 14 Oct 2022 20:04:44 +0530 Subject: [PATCH 0545/1543] refactor: move flake8 config (#7167) * refactor: move flake8 config * Update .pre-commit-config.yaml Co-authored-by: Christian Clauss --- .flake8 | 5 +++++ .pre-commit-config.yaml | 8 ++------ 2 files changed, 7 insertions(+), 6 deletions(-) diff --git a/.flake8 b/.flake8 index 9a5863c9cd0b..0d9ef18d142b 100644 --- a/.flake8 +++ b/.flake8 @@ -1,3 +1,8 @@ [flake8] +max-line-length = 88 +max-complexity = 25 extend-ignore = A003 # Class attribute is shadowing a python builtin + # Formatting style for `black` + E203 # Whitespace before ':' + W503 # Line break occurred before a binary operator diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index d2558b90abb1..d3ea9722f8f3 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -35,11 +35,7 @@ repos: - repo: https://github.com/PyCQA/flake8 rev: 5.0.4 hooks: - - id: flake8 - args: - - --ignore=E203,W503 - - --max-complexity=25 - - --max-line-length=88 + - id: flake8 # See .flake8 for args additional_dependencies: - flake8-bugbear - flake8-builtins @@ -51,7 +47,7 @@ repos: - id: mypy args: - --ignore-missing-imports - - --install-types # See mirrors-mypy README.md + - --install-types # See mirrors-mypy README.md - --non-interactive additional_dependencies: [types-requests] From fd5ab454921b687af94927015d4ab06d3a84886b Mon Sep 17 00:00:00 2001 From: Abinash Satapathy Date: Fri, 14 Oct 2022 17:47:39 +0200 Subject: [PATCH 0546/1543] Doctest output simpler version (#7116) * Update README.md Added Google Cirq references * Create barcode_validator.py Barcode/EAN validator * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update barcode_validator.py Included docstring and updated variables to snake_case * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update barcode_validator.py Included docset and updated bugs * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update barcode_validator.py Implemented the changes asked in review. * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update barcode_validator.py Updated with f-string format * Update barcode_validator.py * Update volume_conversions.py Simpler doctest output * Update volume_conversions.py Fixed indentation Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- conversions/volume_conversions.py | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/conversions/volume_conversions.py b/conversions/volume_conversions.py index de2290196fc2..44d29009120c 100644 --- a/conversions/volume_conversions.py +++ b/conversions/volume_conversions.py @@ -52,11 +52,7 @@ def volume_conversion(value: float, from_type: str, to_type: str) -> float: 0.000236588 >>> volume_conversion(4, "wrongUnit", "litre") Traceback (most recent call last): - File "/usr/lib/python3.8/doctest.py", line 1336, in __run - exec(compile(example.source, filename, "single", - File "", line 1, in - volume_conversion(4, "wrongUnit", "litre") - File "", line 62, in volume_conversion + ... ValueError: Invalid 'from_type' value: 'wrongUnit' Supported values are: cubicmeter, litre, kilolitre, gallon, cubicyard, cubicfoot, cup """ From 0c06b255822905512b9fa9c12cb09dabf8fa405f Mon Sep 17 00:00:00 2001 From: Abinash Satapathy Date: Fri, 14 Oct 2022 23:42:41 +0200 Subject: [PATCH 0547/1543] Create speed_conversions.py (#7128) * Update README.md Added Google Cirq references * Create barcode_validator.py Barcode/EAN validator * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update barcode_validator.py Included docstring and updated variables to snake_case * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update barcode_validator.py Included docset and updated bugs * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update barcode_validator.py Implemented the changes asked in review. * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update barcode_validator.py Updated with f-string format * Update barcode_validator.py * Update volume_conversions.py Simpler doctest output * Create speed_conversions.py Conversion of speed units * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update speed_conversions.py Doctests updated, dictionary implemented. * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update speed_conversions.py Reduced LOC * Update volume_conversions.py Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- conversions/speed_conversions.py | 70 ++++++++++++++++++++++++++++++++ 1 file changed, 70 insertions(+) create mode 100644 conversions/speed_conversions.py diff --git a/conversions/speed_conversions.py b/conversions/speed_conversions.py new file mode 100644 index 000000000000..62da9e137bc7 --- /dev/null +++ b/conversions/speed_conversions.py @@ -0,0 +1,70 @@ +""" +Convert speed units + +https://en.wikipedia.org/wiki/Kilometres_per_hour +https://en.wikipedia.org/wiki/Miles_per_hour +https://en.wikipedia.org/wiki/Knot_(unit) +https://en.wikipedia.org/wiki/Metre_per_second +""" + +speed_chart: dict[str, float] = { + "km/h": 1.0, + "m/s": 3.6, + "mph": 1.609344, + "knot": 1.852, +} + +speed_chart_inverse: dict[str, float] = { + "km/h": 1.0, + "m/s": 0.277777778, + "mph": 0.621371192, + "knot": 0.539956803, +} + + +def convert_speed(speed: float, unit_from: str, unit_to: str) -> float: + """ + Convert speed from one unit to another using the speed_chart above. + + "km/h": 1.0, + "m/s": 3.6, + "mph": 1.609344, + "knot": 1.852, + + >>> convert_speed(100, "km/h", "m/s") + 27.778 + >>> convert_speed(100, "km/h", "mph") + 62.137 + >>> convert_speed(100, "km/h", "knot") + 53.996 + >>> convert_speed(100, "m/s", "km/h") + 360.0 + >>> convert_speed(100, "m/s", "mph") + 223.694 + >>> convert_speed(100, "m/s", "knot") + 194.384 + >>> convert_speed(100, "mph", "km/h") + 160.934 + >>> convert_speed(100, "mph", "m/s") + 44.704 + >>> convert_speed(100, "mph", "knot") + 86.898 + >>> convert_speed(100, "knot", "km/h") + 185.2 + >>> convert_speed(100, "knot", "m/s") + 51.444 + >>> convert_speed(100, "knot", "mph") + 115.078 + """ + if unit_to not in speed_chart or unit_from not in speed_chart_inverse: + raise ValueError( + f"Incorrect 'from_type' or 'to_type' value: {unit_from!r}, {unit_to!r}\n" + f"Valid values are: {', '.join(speed_chart_inverse)}" + ) + return round(speed * speed_chart[unit_from] * speed_chart_inverse[unit_to], 3) + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From 20587750051c3699b051579d7e97e5508958ea5a Mon Sep 17 00:00:00 2001 From: Caeden Date: Fri, 14 Oct 2022 23:25:15 +0100 Subject: [PATCH 0548/1543] refactor: Make code more understandable (#7196) * refactor: Make code more understandable * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .../binary_tree/binary_tree_traversals.py | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/data_structures/binary_tree/binary_tree_traversals.py b/data_structures/binary_tree/binary_tree_traversals.py index 378598bb096d..54b1dc536f32 100644 --- a/data_structures/binary_tree/binary_tree_traversals.py +++ b/data_structures/binary_tree/binary_tree_traversals.py @@ -15,7 +15,20 @@ class Node: def make_tree() -> Node | None: - return Node(1, Node(2, Node(4), Node(5)), Node(3)) + r""" + The below tree + 1 + / \ + 2 3 + / \ + 4 5 + """ + tree = Node(1) + tree.left = Node(2) + tree.right = Node(3) + tree.left.left = Node(4) + tree.left.right = Node(5) + return tree def preorder(root: Node | None) -> list[int]: From 5dc0dc4d23eb1efa4564c0531402af3d2419012d Mon Sep 17 00:00:00 2001 From: Lukas Esc <55601315+Luk-ESC@users.noreply.github.com> Date: Fri, 14 Oct 2022 17:37:15 -0500 Subject: [PATCH 0549/1543] remove useless bool() call (#7189) --- boolean_algebra/norgate.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/boolean_algebra/norgate.py b/boolean_algebra/norgate.py index 82a1fb2e33e5..1c341e8a707b 100644 --- a/boolean_algebra/norgate.py +++ b/boolean_algebra/norgate.py @@ -25,7 +25,7 @@ def nor_gate(input_1: int, input_2: int) -> int: >>> nor_gate(0, -7) 0 """ - return int(bool(input_1 == input_2 == 0)) + return int(input_1 == input_2 == 0) def main() -> None: From dcca5351c9185bf8c568615782ffb28319a6539d Mon Sep 17 00:00:00 2001 From: Claudio Lucisano <43884655+Claudiocli@users.noreply.github.com> Date: Sat, 15 Oct 2022 00:45:12 +0200 Subject: [PATCH 0550/1543] Added astronomical_length_scale_conversion.py (#7183) --- .../astronomical_length_scale_conversion.py | 104 ++++++++++++++++++ 1 file changed, 104 insertions(+) create mode 100644 conversions/astronomical_length_scale_conversion.py diff --git a/conversions/astronomical_length_scale_conversion.py b/conversions/astronomical_length_scale_conversion.py new file mode 100644 index 000000000000..804d82487a25 --- /dev/null +++ b/conversions/astronomical_length_scale_conversion.py @@ -0,0 +1,104 @@ +""" +Conversion of length units. +Available Units: +Metre, Kilometre, Megametre, Gigametre, +Terametre, Petametre, Exametre, Zettametre, Yottametre + +USAGE : +-> Import this file into their respective project. +-> Use the function length_conversion() for conversion of length units. +-> Parameters : + -> value : The number of from units you want to convert + -> from_type : From which type you want to convert + -> to_type : To which type you want to convert + +REFERENCES : +-> Wikipedia reference: https://en.wikipedia.org/wiki/Meter +-> Wikipedia reference: https://en.wikipedia.org/wiki/Kilometer +-> Wikipedia reference: https://en.wikipedia.org/wiki/Orders_of_magnitude_(length) +""" + +UNIT_SYMBOL = { + "meter": "m", + "kilometer": "km", + "megametre": "Mm", + "gigametre": "Gm", + "terametre": "Tm", + "petametre": "Pm", + "exametre": "Em", + "zettametre": "Zm", + "yottametre": "Ym", +} +# Exponent of the factor(meter) +METRIC_CONVERSION = { + "m": 0, + "km": 3, + "Mm": 6, + "Gm": 9, + "Tm": 12, + "Pm": 15, + "Em": 18, + "Zm": 21, + "Ym": 24, +} + + +def length_conversion(value: float, from_type: str, to_type: str) -> float: + """ + Conversion between astronomical length units. + + >>> length_conversion(1, "meter", "kilometer") + 0.001 + >>> length_conversion(1, "meter", "megametre") + 1e-06 + >>> length_conversion(1, "gigametre", "meter") + 1000000000 + >>> length_conversion(1, "gigametre", "terametre") + 0.001 + >>> length_conversion(1, "petametre", "terametre") + 1000 + >>> length_conversion(1, "petametre", "exametre") + 0.001 + >>> length_conversion(1, "terametre", "zettametre") + 1e-09 + >>> length_conversion(1, "yottametre", "zettametre") + 1000 + >>> length_conversion(4, "wrongUnit", "inch") + Traceback (most recent call last): + ... + ValueError: Invalid 'from_type' value: 'wrongUnit'. + Conversion abbreviations are: m, km, Mm, Gm, Tm, Pm, Em, Zm, Ym + """ + + from_sanitized = from_type.lower().strip("s") + to_sanitized = to_type.lower().strip("s") + + from_sanitized = UNIT_SYMBOL.get(from_sanitized, from_sanitized) + to_sanitized = UNIT_SYMBOL.get(to_sanitized, to_sanitized) + + if from_sanitized not in METRIC_CONVERSION: + raise ValueError( + f"Invalid 'from_type' value: {from_type!r}.\n" + f"Conversion abbreviations are: {', '.join(METRIC_CONVERSION)}" + ) + if to_sanitized not in METRIC_CONVERSION: + raise ValueError( + f"Invalid 'to_type' value: {to_type!r}.\n" + f"Conversion abbreviations are: {', '.join(METRIC_CONVERSION)}" + ) + from_exponent = METRIC_CONVERSION[from_sanitized] + to_exponent = METRIC_CONVERSION[to_sanitized] + exponent = 1 + + if from_exponent > to_exponent: + exponent = from_exponent - to_exponent + else: + exponent = -(to_exponent - from_exponent) + + return value * pow(10, exponent) + + +if __name__ == "__main__": + from doctest import testmod + + testmod() From 6e69181d1f592a08806717058720bf63e241eef2 Mon Sep 17 00:00:00 2001 From: Caeden Date: Sat, 15 Oct 2022 02:07:03 +0100 Subject: [PATCH 0551/1543] refactor: Replace `list()` and `dict()` calls with literals (#7198) --- data_structures/binary_tree/binary_search_tree.py | 2 +- data_structures/heap/heap_generic.py | 2 +- data_structures/trie/trie.py | 2 +- graphs/frequent_pattern_graph_miner.py | 2 +- maths/greedy_coin_change.py | 2 +- other/davisb_putnamb_logemannb_loveland.py | 4 ++-- project_euler/problem_107/sol1.py | 2 +- searches/tabu_search.py | 6 +++--- sorts/msd_radix_sort.py | 4 ++-- strings/aho_corasick.py | 2 +- 10 files changed, 14 insertions(+), 14 deletions(-) diff --git a/data_structures/binary_tree/binary_search_tree.py b/data_structures/binary_tree/binary_search_tree.py index b9af23dc8b00..51a651be0f82 100644 --- a/data_structures/binary_tree/binary_search_tree.py +++ b/data_structures/binary_tree/binary_search_tree.py @@ -160,7 +160,7 @@ def postorder(curr_node): """ postOrder (left, right, self) """ - node_list = list() + node_list = [] if curr_node is not None: node_list = postorder(curr_node.left) + postorder(curr_node.right) + [curr_node] return node_list diff --git a/data_structures/heap/heap_generic.py b/data_structures/heap/heap_generic.py index e7831cd45b43..b4d7019f41f9 100644 --- a/data_structures/heap/heap_generic.py +++ b/data_structures/heap/heap_generic.py @@ -9,7 +9,7 @@ class Heap: def __init__(self, key: Callable | None = None) -> None: # Stores actual heap items. - self.arr: list = list() + self.arr: list = [] # Stores indexes of each item for supporting updates and deletion. self.pos_map: dict = {} # Stores current size of heap. diff --git a/data_structures/trie/trie.py b/data_structures/trie/trie.py index 162d08d1d678..46b93a499d14 100644 --- a/data_structures/trie/trie.py +++ b/data_structures/trie/trie.py @@ -8,7 +8,7 @@ class TrieNode: def __init__(self) -> None: - self.nodes: dict[str, TrieNode] = dict() # Mapping from char to TrieNode + self.nodes: dict[str, TrieNode] = {} # Mapping from char to TrieNode self.is_leaf = False def insert_many(self, words: list[str]) -> None: diff --git a/graphs/frequent_pattern_graph_miner.py b/graphs/frequent_pattern_graph_miner.py index a5ecbe6e8223..1d26702a480e 100644 --- a/graphs/frequent_pattern_graph_miner.py +++ b/graphs/frequent_pattern_graph_miner.py @@ -54,7 +54,7 @@ def get_frequency_table(edge_array): Returns Frequency Table """ distinct_edge = get_distinct_edge(edge_array) - frequency_table = dict() + frequency_table = {} for item in distinct_edge: bit = get_bitcode(edge_array, item) diff --git a/maths/greedy_coin_change.py b/maths/greedy_coin_change.py index 5233ee1cbc12..29c2f1803d5c 100644 --- a/maths/greedy_coin_change.py +++ b/maths/greedy_coin_change.py @@ -74,7 +74,7 @@ def find_minimum_change(denominations: list[int], value: str) -> list[int]: # Driver Code if __name__ == "__main__": - denominations = list() + denominations = [] value = "0" if ( diff --git a/other/davisb_putnamb_logemannb_loveland.py b/other/davisb_putnamb_logemannb_loveland.py index 03d60a9a1aaf..3110515d5874 100644 --- a/other/davisb_putnamb_logemannb_loveland.py +++ b/other/davisb_putnamb_logemannb_loveland.py @@ -199,7 +199,7 @@ def find_pure_symbols( {'A1': True, 'A2': False, 'A3': True, 'A5': False} """ pure_symbols = [] - assignment: dict[str, bool | None] = dict() + assignment: dict[str, bool | None] = {} literals = [] for clause in clauses: @@ -264,7 +264,7 @@ def find_unit_clauses( n_count += 1 if f_count == len(clause) - 1 and n_count == 1: unit_symbols.append(sym) - assignment: dict[str, bool | None] = dict() + assignment: dict[str, bool | None] = {} for i in unit_symbols: symbol = i[:2] assignment[symbol] = len(i) == 2 diff --git a/project_euler/problem_107/sol1.py b/project_euler/problem_107/sol1.py index 048cf033dc2e..b3f5685b95ef 100644 --- a/project_euler/problem_107/sol1.py +++ b/project_euler/problem_107/sol1.py @@ -100,7 +100,7 @@ def solution(filename: str = "p107_network.txt") -> int: script_dir: str = os.path.abspath(os.path.dirname(__file__)) network_file: str = os.path.join(script_dir, filename) adjacency_matrix: list[list[str]] - edges: dict[EdgeT, int] = dict() + edges: dict[EdgeT, int] = {} data: list[str] edge1: int edge2: int diff --git a/searches/tabu_search.py b/searches/tabu_search.py index 45ce19d46b23..3e1728286d98 100644 --- a/searches/tabu_search.py +++ b/searches/tabu_search.py @@ -51,7 +51,7 @@ def generate_neighbours(path): with open(path) as f: for line in f: if line.split()[0] not in dict_of_neighbours: - _list = list() + _list = [] _list.append([line.split()[1], line.split()[2]]) dict_of_neighbours[line.split()[0]] = _list else: @@ -59,7 +59,7 @@ def generate_neighbours(path): [line.split()[1], line.split()[2]] ) if line.split()[1] not in dict_of_neighbours: - _list = list() + _list = [] _list.append([line.split()[0], line.split()[2]]) dict_of_neighbours[line.split()[1]] = _list else: @@ -206,7 +206,7 @@ def tabu_search( """ count = 1 solution = first_solution - tabu_list = list() + tabu_list = [] best_cost = distance_of_first_solution best_solution_ever = solution diff --git a/sorts/msd_radix_sort.py b/sorts/msd_radix_sort.py index 3cdec4bd0711..7430fc5a63c8 100644 --- a/sorts/msd_radix_sort.py +++ b/sorts/msd_radix_sort.py @@ -52,8 +52,8 @@ def _msd_radix_sort(list_of_ints: list[int], bit_position: int) -> list[int]: if bit_position == 0 or len(list_of_ints) in [0, 1]: return list_of_ints - zeros = list() - ones = list() + zeros = [] + ones = [] # Split numbers based on bit at bit_position from the right for number in list_of_ints: if (number >> (bit_position - 1)) & 1: diff --git a/strings/aho_corasick.py b/strings/aho_corasick.py index b9a6a80728f6..2d2f562df951 100644 --- a/strings/aho_corasick.py +++ b/strings/aho_corasick.py @@ -5,7 +5,7 @@ class Automaton: def __init__(self, keywords: list[str]): - self.adlist: list[dict] = list() + self.adlist: list[dict] = [] self.adlist.append( {"value": "", "next_states": [], "fail_state": 0, "output": []} ) From 70b60dc3231e1df72622db64f9b97fef772181e5 Mon Sep 17 00:00:00 2001 From: Dhruv Manilawala Date: Sat, 15 Oct 2022 12:07:59 +0530 Subject: [PATCH 0552/1543] chore: remove inactive user from CODEOWNERS (#7205) * chore: remove inactive user from CODEOWNERS * updating DIRECTORY.md Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- .github/CODEOWNERS | 6 +++--- DIRECTORY.md | 11 +++++++++-- 2 files changed, 12 insertions(+), 5 deletions(-) diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index fdce879f80c4..abf99ab227be 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -35,7 +35,7 @@ # /divide_and_conquer/ -/dynamic_programming/ @Kush1101 +# /dynamic_programming/ # /file_transfer/ @@ -59,7 +59,7 @@ # /machine_learning/ -/maths/ @Kush1101 +# /maths/ # /matrix/ @@ -69,7 +69,7 @@ # /other/ @cclauss # TODO: Uncomment this line after Hacktoberfest -/project_euler/ @dhruvmanila @Kush1101 +/project_euler/ @dhruvmanila # /quantum/ diff --git a/DIRECTORY.md b/DIRECTORY.md index 2786e1f82de8..239dafa65f2b 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -52,6 +52,7 @@ * [Modular Division](blockchain/modular_division.py) ## Boolean Algebra + * [Norgate](boolean_algebra/norgate.py) * [Quine Mc Cluskey](boolean_algebra/quine_mc_cluskey.py) ## Cellular Automata @@ -121,6 +122,7 @@ * [Pooling Functions](computer_vision/pooling_functions.py) ## Conversions + * [Astronomical Length Scale Conversion](conversions/astronomical_length_scale_conversion.py) * [Binary To Decimal](conversions/binary_to_decimal.py) * [Binary To Hexadecimal](conversions/binary_to_hexadecimal.py) * [Binary To Octal](conversions/binary_to_octal.py) @@ -140,6 +142,7 @@ * [Pressure Conversions](conversions/pressure_conversions.py) * [Rgb Hsv Conversion](conversions/rgb_hsv_conversion.py) * [Roman Numerals](conversions/roman_numerals.py) + * [Speed Conversions](conversions/speed_conversions.py) * [Temperature Conversions](conversions/temperature_conversions.py) * [Volume Conversions](conversions/volume_conversions.py) * [Weight Conversion](conversions/weight_conversion.py) @@ -448,6 +451,7 @@ * [Random Forest Classifier](machine_learning/random_forest_classifier.py) * [Random Forest Regressor](machine_learning/random_forest_regressor.py) * [Scoring Functions](machine_learning/scoring_functions.py) + * [Self Organizing Map](machine_learning/self_organizing_map.py) * [Sequential Minimum Optimization](machine_learning/sequential_minimum_optimization.py) * [Similarity Search](machine_learning/similarity_search.py) * [Support Vector Machines](machine_learning/support_vector_machines.py) @@ -586,9 +590,11 @@ * [Two Sum](maths/two_sum.py) * [Ugly Numbers](maths/ugly_numbers.py) * [Volume](maths/volume.py) + * [Weird Number](maths/weird_number.py) * [Zellers Congruence](maths/zellers_congruence.py) ## Matrix + * [Binary Search Matrix](matrix/binary_search_matrix.py) * [Count Islands In Matrix](matrix/count_islands_in_matrix.py) * [Inverse Of Matrix](matrix/inverse_of_matrix.py) * [Matrix Class](matrix/matrix_class.py) @@ -854,8 +860,6 @@ * [Sol1](project_euler/problem_101/sol1.py) * Problem 102 * [Sol1](project_euler/problem_102/sol1.py) - * Problem 104 - * [Sol](project_euler/problem_104/sol.py) * Problem 107 * [Sol1](project_euler/problem_107/sol1.py) * Problem 109 @@ -1010,6 +1014,7 @@ * [Alternative String Arrange](strings/alternative_string_arrange.py) * [Anagrams](strings/anagrams.py) * [Autocomplete Using Trie](strings/autocomplete_using_trie.py) + * [Barcode Validator](strings/barcode_validator.py) * [Boyer Moore Search](strings/boyer_moore_search.py) * [Can String Be Rearranged As Palindrome](strings/can_string_be_rearranged_as_palindrome.py) * [Capitalize](strings/capitalize.py) @@ -1039,6 +1044,7 @@ * [Reverse Letters](strings/reverse_letters.py) * [Reverse Long Words](strings/reverse_long_words.py) * [Reverse Words](strings/reverse_words.py) + * [Snake Case To Camel Pascal Case](strings/snake_case_to_camel_pascal_case.py) * [Split](strings/split.py) * [Upper](strings/upper.py) * [Wave](strings/wave.py) @@ -1073,6 +1079,7 @@ * [Instagram Pic](web_programming/instagram_pic.py) * [Instagram Video](web_programming/instagram_video.py) * [Nasa Data](web_programming/nasa_data.py) + * [Open Google Results](web_programming/open_google_results.py) * [Random Anime Character](web_programming/random_anime_character.py) * [Recaptcha Verification](web_programming/recaptcha_verification.py) * [Reddit](web_programming/reddit.py) From 6be9500b2fb5d2e51432f9966e76a107dd604a41 Mon Sep 17 00:00:00 2001 From: Christian Clauss Date: Sat, 15 Oct 2022 09:02:07 +0200 Subject: [PATCH 0553/1543] chore: remove checkbox in feature issue template (#7212) We do not assign issues in this repo Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- .github/ISSUE_TEMPLATE/feature_request.yml | 7 ------- 1 file changed, 7 deletions(-) diff --git a/.github/ISSUE_TEMPLATE/feature_request.yml b/.github/ISSUE_TEMPLATE/feature_request.yml index bed3e8ab54ae..09a159b2193e 100644 --- a/.github/ISSUE_TEMPLATE/feature_request.yml +++ b/.github/ISSUE_TEMPLATE/feature_request.yml @@ -17,10 +17,3 @@ body: implementations. validations: required: true - - - type: checkboxes - attributes: - label: Would you like to work on this feature? - options: - - label: Yes, I want to work on this feature! - required: false From 98a4c2487814cdfe0822526e05c4e63ff6aef7d0 Mon Sep 17 00:00:00 2001 From: Caeden Date: Sat, 15 Oct 2022 13:58:09 +0100 Subject: [PATCH 0554/1543] feat: Binary tree node sum (#7020) (#7162) * feat: Binary tree node sum (#7020) * feat: Sum of all nodes in binary tree explanation (#7020) * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update data_structures/binary_tree/binary_tree_node_sum.py Co-authored-by: Christian Clauss * refactor: Change replace method with `__iter__` overriding (#7020) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- DIRECTORY.md | 1 + .../binary_tree/binary_tree_node_sum.py | 76 +++++++++++++++++++ 2 files changed, 77 insertions(+) create mode 100644 data_structures/binary_tree/binary_tree_node_sum.py diff --git a/DIRECTORY.md b/DIRECTORY.md index 239dafa65f2b..92bed9cb4c6e 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -154,6 +154,7 @@ * [Binary Search Tree](data_structures/binary_tree/binary_search_tree.py) * [Binary Search Tree Recursive](data_structures/binary_tree/binary_search_tree_recursive.py) * [Binary Tree Mirror](data_structures/binary_tree/binary_tree_mirror.py) + * [Binary Tree Node Sum](data_structures/binary_tree/binary_tree_node_sum.py) * [Binary Tree Traversals](data_structures/binary_tree/binary_tree_traversals.py) * [Fenwick Tree](data_structures/binary_tree/fenwick_tree.py) * [Inorder Tree Traversal 2022](data_structures/binary_tree/inorder_tree_traversal_2022.py) diff --git a/data_structures/binary_tree/binary_tree_node_sum.py b/data_structures/binary_tree/binary_tree_node_sum.py new file mode 100644 index 000000000000..5a13e74e3c9f --- /dev/null +++ b/data_structures/binary_tree/binary_tree_node_sum.py @@ -0,0 +1,76 @@ +""" +Sum of all nodes in a binary tree. + +Python implementation: + O(n) time complexity - Recurses through :meth:`depth_first_search` + with each element. + O(n) space complexity - At any point in time maximum number of stack + frames that could be in memory is `n` +""" + + +from __future__ import annotations + +from collections.abc import Iterator + + +class Node: + """ + A Node has a value variable and pointers to Nodes to its left and right. + """ + + def __init__(self, value: int) -> None: + self.value = value + self.left: Node | None = None + self.right: Node | None = None + + +class BinaryTreeNodeSum: + r""" + The below tree looks like this + 10 + / \ + 5 -3 + / / \ + 12 8 0 + + >>> tree = Node(10) + >>> sum(BinaryTreeNodeSum(tree)) + 10 + + >>> tree.left = Node(5) + >>> sum(BinaryTreeNodeSum(tree)) + 15 + + >>> tree.right = Node(-3) + >>> sum(BinaryTreeNodeSum(tree)) + 12 + + >>> tree.left.left = Node(12) + >>> sum(BinaryTreeNodeSum(tree)) + 24 + + >>> tree.right.left = Node(8) + >>> tree.right.right = Node(0) + >>> sum(BinaryTreeNodeSum(tree)) + 32 + """ + + def __init__(self, tree: Node) -> None: + self.tree = tree + + def depth_first_search(self, node: Node | None) -> int: + if node is None: + return 0 + return node.value + ( + self.depth_first_search(node.left) + self.depth_first_search(node.right) + ) + + def __iter__(self) -> Iterator[int]: + yield self.depth_first_search(self.tree) + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From a652905b605ddcc43626072366d1130315801dc9 Mon Sep 17 00:00:00 2001 From: Caeden Date: Sat, 15 Oct 2022 18:29:42 +0100 Subject: [PATCH 0555/1543] Add Flake8 comprehensions to pre-commit (#7235) * ci(pre-commit): Add ``flake8-comprehensions`` to ``pre-commit`` (#7233) * refactor: Fix ``flake8-comprehensions`` errors * fix: Replace `map` with generator (#7233) * fix: Cast `range` objects to `list` --- .pre-commit-config.yaml | 1 + ciphers/onepad_cipher.py | 2 +- ciphers/rail_fence_cipher.py | 2 +- data_structures/hashing/hash_table.py | 2 +- data_structures/linked_list/merge_two_lists.py | 2 +- dynamic_programming/fractional_knapsack.py | 2 +- graphs/bellman_ford.py | 2 +- graphs/frequent_pattern_graph_miner.py | 10 +++++----- hashes/enigma_machine.py | 8 ++++---- maths/primelib.py | 2 +- matrix/spiral_print.py | 4 ++-- other/davisb_putnamb_logemannb_loveland.py | 4 ++-- project_euler/problem_042/solution42.py | 4 ++-- project_euler/problem_052/sol1.py | 12 ++++++------ project_euler/problem_062/sol1.py | 2 +- project_euler/problem_067/sol1.py | 4 ++-- project_euler/problem_109/sol1.py | 2 +- project_euler/problem_551/sol1.py | 2 +- sorts/radix_sort.py | 2 +- strings/aho_corasick.py | 4 +--- 20 files changed, 36 insertions(+), 37 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index d3ea9722f8f3..3455135653cf 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -39,6 +39,7 @@ repos: additional_dependencies: - flake8-bugbear - flake8-builtins + - flake8-comprehensions - pep8-naming - repo: https://github.com/pre-commit/mirrors-mypy diff --git a/ciphers/onepad_cipher.py b/ciphers/onepad_cipher.py index 3ace9b098cba..4bfe35b7180a 100644 --- a/ciphers/onepad_cipher.py +++ b/ciphers/onepad_cipher.py @@ -22,7 +22,7 @@ def decrypt(cipher: list[int], key: list[int]) -> str: for i in range(len(key)): p = int((cipher[i] - (key[i]) ** 2) / key[i]) plain.append(chr(p)) - return "".join([i for i in plain]) + return "".join(plain) if __name__ == "__main__": diff --git a/ciphers/rail_fence_cipher.py b/ciphers/rail_fence_cipher.py index cba593ca7335..47ee7db89831 100644 --- a/ciphers/rail_fence_cipher.py +++ b/ciphers/rail_fence_cipher.py @@ -72,7 +72,7 @@ def decrypt(input_string: str, key: int) -> str: counter = 0 for row in temp_grid: # fills in the characters splice = input_string[counter : counter + len(row)] - grid.append([character for character in splice]) + grid.append(list(splice)) counter += len(row) output_string = "" # reads as zigzag diff --git a/data_structures/hashing/hash_table.py b/data_structures/hashing/hash_table.py index 1cd71cc4baf3..607454c8255f 100644 --- a/data_structures/hashing/hash_table.py +++ b/data_structures/hashing/hash_table.py @@ -34,7 +34,7 @@ def hash_function(self, key): def _step_by_step(self, step_ord): print(f"step {step_ord}") - print([i for i in range(len(self.values))]) + print(list(range(len(self.values)))) print(self.values) def bulk_insert(self, values): diff --git a/data_structures/linked_list/merge_two_lists.py b/data_structures/linked_list/merge_two_lists.py index 43dd461867f1..93cf7a7e1602 100644 --- a/data_structures/linked_list/merge_two_lists.py +++ b/data_structures/linked_list/merge_two_lists.py @@ -19,7 +19,7 @@ class Node: class SortedLinkedList: def __init__(self, ints: Iterable[int]) -> None: self.head: Node | None = None - for i in reversed(sorted(ints)): + for i in sorted(ints, reverse=True): self.head = Node(i, self.head) def __iter__(self) -> Iterator[int]: diff --git a/dynamic_programming/fractional_knapsack.py b/dynamic_programming/fractional_knapsack.py index 6f7a2a08cf9b..58976d40c02b 100644 --- a/dynamic_programming/fractional_knapsack.py +++ b/dynamic_programming/fractional_knapsack.py @@ -8,7 +8,7 @@ def frac_knapsack(vl, wt, w, n): 240.0 """ - r = list(sorted(zip(vl, wt), key=lambda x: x[0] / x[1], reverse=True)) + r = sorted(zip(vl, wt), key=lambda x: x[0] / x[1], reverse=True) vl, wt = [i[0] for i in r], [i[1] for i in r] acc = list(accumulate(wt)) k = bisect(acc, w) diff --git a/graphs/bellman_ford.py b/graphs/bellman_ford.py index eb2cd25bf682..9ac8bae85d4f 100644 --- a/graphs/bellman_ford.py +++ b/graphs/bellman_ford.py @@ -58,7 +58,7 @@ def bellman_ford( V = int(input("Enter number of vertices: ").strip()) E = int(input("Enter number of edges: ").strip()) - graph: list[dict[str, int]] = [dict() for j in range(E)] + graph: list[dict[str, int]] = [{} for _ in range(E)] for i in range(E): print("Edge ", i + 1) diff --git a/graphs/frequent_pattern_graph_miner.py b/graphs/frequent_pattern_graph_miner.py index 1d26702a480e..87d5605a0bc8 100644 --- a/graphs/frequent_pattern_graph_miner.py +++ b/graphs/frequent_pattern_graph_miner.py @@ -155,12 +155,12 @@ def construct_graph(cluster, nodes): cluster[max(cluster.keys()) + 1] = "Header" graph = {} for i in x: - if tuple(["Header"]) in graph: - graph[tuple(["Header"])].append(x[i]) + if (["Header"],) in graph: + graph[(["Header"],)].append(x[i]) else: - graph[tuple(["Header"])] = [x[i]] + graph[(["Header"],)] = [x[i]] for i in x: - graph[tuple(x[i])] = [["Header"]] + graph[(x[i],)] = [["Header"]] i = 1 while i < max(cluster) - 1: create_edge(nodes, graph, cluster, i) @@ -186,7 +186,7 @@ def find_freq_subgraph_given_support(s, cluster, graph): """ k = int(s / 100 * (len(cluster) - 1)) for i in cluster[k].keys(): - my_dfs(graph, tuple(cluster[k][i]), tuple(["Header"])) + my_dfs(graph, tuple(cluster[k][i]), (["Header"],)) def freq_subgraphs_edge_list(paths): diff --git a/hashes/enigma_machine.py b/hashes/enigma_machine.py index 0194f7da7d6f..d95437d12c34 100644 --- a/hashes/enigma_machine.py +++ b/hashes/enigma_machine.py @@ -1,8 +1,8 @@ alphabets = [chr(i) for i in range(32, 126)] -gear_one = [i for i in range(len(alphabets))] -gear_two = [i for i in range(len(alphabets))] -gear_three = [i for i in range(len(alphabets))] -reflector = [i for i in reversed(range(len(alphabets)))] +gear_one = list(range(len(alphabets))) +gear_two = list(range(len(alphabets))) +gear_three = list(range(len(alphabets))) +reflector = list(reversed(range(len(alphabets)))) code = [] gear_one_pos = gear_two_pos = gear_three_pos = 0 diff --git a/maths/primelib.py b/maths/primelib.py index eb72a9f8ae6a..9586227ea3ca 100644 --- a/maths/primelib.py +++ b/maths/primelib.py @@ -89,7 +89,7 @@ def sieve_er(n): assert isinstance(n, int) and (n > 2), "'N' must been an int and > 2" # beginList: contains all natural numbers from 2 up to N - begin_list = [x for x in range(2, n + 1)] + begin_list = list(range(2, n + 1)) ans = [] # this list will be returns. diff --git a/matrix/spiral_print.py b/matrix/spiral_print.py index 2441f05d15ef..0cf732d60ca8 100644 --- a/matrix/spiral_print.py +++ b/matrix/spiral_print.py @@ -9,7 +9,7 @@ def check_matrix(matrix: list[list[int]]) -> bool: # must be - matrix = list(list(row) for row in matrix) + matrix = [list(row) for row in matrix] if matrix and isinstance(matrix, list): if isinstance(matrix[0], list): prev_len = 0 @@ -44,7 +44,7 @@ def spiral_print_clockwise(a: list[list[int]]) -> None: 7 """ if check_matrix(a) and len(a) > 0: - a = list(list(row) for row in a) + a = [list(row) for row in a] mat_row = len(a) if isinstance(a[0], list): mat_col = len(a[0]) diff --git a/other/davisb_putnamb_logemannb_loveland.py b/other/davisb_putnamb_logemannb_loveland.py index 3110515d5874..a1bea5b3992e 100644 --- a/other/davisb_putnamb_logemannb_loveland.py +++ b/other/davisb_putnamb_logemannb_loveland.py @@ -317,7 +317,7 @@ def dpll_algorithm( if p: tmp_model = model tmp_model[p] = value - tmp_symbols = [i for i in symbols] + tmp_symbols = list(symbols) if p in tmp_symbols: tmp_symbols.remove(p) return dpll_algorithm(clauses, tmp_symbols, tmp_model) @@ -329,7 +329,7 @@ def dpll_algorithm( if p: tmp_model = model tmp_model[p] = value - tmp_symbols = [i for i in symbols] + tmp_symbols = list(symbols) if p in tmp_symbols: tmp_symbols.remove(p) return dpll_algorithm(clauses, tmp_symbols, tmp_model) diff --git a/project_euler/problem_042/solution42.py b/project_euler/problem_042/solution42.py index 6d22a8dfb655..c0fb2ad50c11 100644 --- a/project_euler/problem_042/solution42.py +++ b/project_euler/problem_042/solution42.py @@ -33,11 +33,11 @@ def solution(): with open(words_file_path) as f: words = f.readline() - words = list(map(lambda word: word.strip('"'), words.strip("\r\n").split(","))) + words = [word.strip('"') for word in words.strip("\r\n").split(",")] words = list( filter( lambda word: word in TRIANGULAR_NUMBERS, - map(lambda word: sum(map(lambda x: ord(x) - 64, word)), words), + (sum(ord(x) - 64 for x in word) for word in words), ) ) return len(words) diff --git a/project_euler/problem_052/sol1.py b/project_euler/problem_052/sol1.py index df5c46ae05d1..21acfb633696 100644 --- a/project_euler/problem_052/sol1.py +++ b/project_euler/problem_052/sol1.py @@ -21,12 +21,12 @@ def solution(): while True: if ( - sorted(list(str(i))) - == sorted(list(str(2 * i))) - == sorted(list(str(3 * i))) - == sorted(list(str(4 * i))) - == sorted(list(str(5 * i))) - == sorted(list(str(6 * i))) + sorted(str(i)) + == sorted(str(2 * i)) + == sorted(str(3 * i)) + == sorted(str(4 * i)) + == sorted(str(5 * i)) + == sorted(str(6 * i)) ): return i diff --git a/project_euler/problem_062/sol1.py b/project_euler/problem_062/sol1.py index 0c9baf880497..3efdb3513bf6 100644 --- a/project_euler/problem_062/sol1.py +++ b/project_euler/problem_062/sol1.py @@ -55,7 +55,7 @@ def get_digits(num: int) -> str: >>> get_digits(123) '0166788' """ - return "".join(sorted(list(str(num**3)))) + return "".join(sorted(str(num**3))) if __name__ == "__main__": diff --git a/project_euler/problem_067/sol1.py b/project_euler/problem_067/sol1.py index 527d4dc592ac..ab305684dd0d 100644 --- a/project_euler/problem_067/sol1.py +++ b/project_euler/problem_067/sol1.py @@ -28,8 +28,8 @@ def solution(): with open(triangle) as f: triangle = f.readlines() - a = map(lambda x: x.rstrip("\r\n").split(" "), triangle) - a = list(map(lambda x: list(map(int, x)), a)) + a = (x.rstrip("\r\n").split(" ") for x in triangle) + a = [list(map(int, x)) for x in a] for i in range(1, len(a)): for j in range(len(a[i])): diff --git a/project_euler/problem_109/sol1.py b/project_euler/problem_109/sol1.py index 91c71eb9f4cb..852f001d38af 100644 --- a/project_euler/problem_109/sol1.py +++ b/project_euler/problem_109/sol1.py @@ -65,7 +65,7 @@ def solution(limit: int = 100) -> int: >>> solution(50) 12577 """ - singles: list[int] = [x for x in range(1, 21)] + [25] + singles: list[int] = list(range(1, 21)) + [25] doubles: list[int] = [2 * x for x in range(1, 21)] + [50] triples: list[int] = [3 * x for x in range(1, 21)] all_values: list[int] = singles + doubles + triples + [0] diff --git a/project_euler/problem_551/sol1.py b/project_euler/problem_551/sol1.py index c15445e4d7b0..2cd75efbb68d 100644 --- a/project_euler/problem_551/sol1.py +++ b/project_euler/problem_551/sol1.py @@ -13,7 +13,7 @@ """ -ks = [k for k in range(2, 20 + 1)] +ks = range(2, 20 + 1) base = [10**k for k in range(ks[-1] + 1)] memo: dict[int, dict[int, list[list[int]]]] = {} diff --git a/sorts/radix_sort.py b/sorts/radix_sort.py index afe62bc7ec30..a496cdc0c743 100644 --- a/sorts/radix_sort.py +++ b/sorts/radix_sort.py @@ -24,7 +24,7 @@ def radix_sort(list_of_ints: list[int]) -> list[int]: max_digit = max(list_of_ints) while placement <= max_digit: # declare and initialize empty buckets - buckets: list[list] = [list() for _ in range(RADIX)] + buckets: list[list] = [[] for _ in range(RADIX)] # split list_of_ints between the buckets for i in list_of_ints: tmp = int((i / placement) % RADIX) diff --git a/strings/aho_corasick.py b/strings/aho_corasick.py index 2d2f562df951..25ed649ce645 100644 --- a/strings/aho_corasick.py +++ b/strings/aho_corasick.py @@ -70,9 +70,7 @@ def search_in(self, string: str) -> dict[str, list[int]]: >>> A.search_in("whatever, err ... , wherever") {'what': [0], 'hat': [1], 'ver': [5, 25], 'er': [6, 10, 22, 26]} """ - result: dict = ( - dict() - ) # returns a dict with keywords and list of its occurrences + result: dict = {} # returns a dict with keywords and list of its occurrences current_state = 0 for i in range(len(string)): while ( From 553624fcd4d7e8a4c561b182967291a1cc44ade9 Mon Sep 17 00:00:00 2001 From: Paul <56065602+ZeroDayOwl@users.noreply.github.com> Date: Sat, 15 Oct 2022 23:39:27 +0600 Subject: [PATCH 0556/1543] Add algorithm for Casimir Effect (#7141) * Add algorithm for Casimir Effect * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fix the line length * Fix the line length * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Import math module and use Pi * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update doctest results * from math import pi Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- physics/casimir_effect.py | 121 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 121 insertions(+) create mode 100644 physics/casimir_effect.py diff --git a/physics/casimir_effect.py b/physics/casimir_effect.py new file mode 100644 index 000000000000..ee8a6c1eba53 --- /dev/null +++ b/physics/casimir_effect.py @@ -0,0 +1,121 @@ +""" +Title : Finding the value of magnitude of either the Casimir force, the surface area +of one of the plates or distance between the plates provided that the other +two parameters are given. + +Description : In quantum field theory, the Casimir effect is a physical force +acting on the macroscopic boundaries of a confined space which arises from the +quantum fluctuations of the field. It is a physical force exerted between separate +objects, which is due to neither charge, gravity, nor the exchange of particles, +but instead is due to resonance of all-pervasive energy fields in the intervening +space between the objects. Since the strength of the force falls off rapidly with +distance it is only measurable when the distance between the objects is extremely +small. On a submicron scale, this force becomes so strong that it becomes the +dominant force between uncharged conductors. + +Dutch physicist Hendrik B. G. Casimir first proposed the existence of the force, +and he formulated an experiment to detect it in 1948 while participating in research +at Philips Research Labs. The classic form of his experiment used a pair of uncharged +parallel metal plates in a vacuum, and successfully demonstrated the force to within +15% of the value he had predicted according to his theory. + +The Casimir force F for idealized, perfectly conducting plates of surface area +A square meter and placed at a distance of a meter apart with vacuum between +them is expressed as - + +F = - ((Reduced Planck Constant ℏ) * c * Pi^2 * A) / (240 * a^4) + +Here, the negative sign indicates the force is attractive in nature. For the ease +of calculation, only the magnitude of the force is considered. + +Source : +- https://en.wikipedia.org/wiki/Casimir_effect +- https://www.cs.mcgill.ca/~rwest/wikispeedia/wpcd/wp/c/Casimir_effect.htm +- Casimir, H. B. ; Polder, D. (1948) "The Influence of Retardation on the + London-van der Waals Forces", Physical Review, vol. 73, Issue 4, pp. 360-372 +""" + +from __future__ import annotations + +from math import pi + +# Define the Reduced Planck Constant ℏ (H bar), speed of light C, value of +# Pi and the function +REDUCED_PLANCK_CONSTANT = 1.054571817e-34 # unit of ℏ : J * s + +SPEED_OF_LIGHT = 3e8 # unit of c : m * s^-1 + + +def casimir_force(force: float, area: float, distance: float) -> dict[str, float]: + + """ + Input Parameters + ---------------- + force -> Casimir Force : magnitude in Newtons + + area -> Surface area of each plate : magnitude in square meters + + distance -> Distance between two plates : distance in Meters + + Returns + ------- + result : dict name, value pair of the parameter having Zero as it's value + + Returns the value of one of the parameters specified as 0, provided the values of + other parameters are given. + >>> casimir_force(force = 0, area = 4, distance = 0.03) + {'force': 6.4248189174864216e-21} + + >>> casimir_force(force = 2635e-13, area = 0.0023, distance = 0) + {'distance': 1.0323056015031114e-05} + + >>> casimir_force(force = 2737e-21, area = 0, distance = 0.0023746) + {'area': 0.06688838837354052} + + >>> casimir_force(force = 3457e-12, area = 0, distance = 0) + Traceback (most recent call last): + ... + ValueError: One and only one argument must be 0 + + >>> casimir_force(force = 3457e-12, area = 0, distance = -0.00344) + Traceback (most recent call last): + ... + ValueError: Distance can not be negative + + >>> casimir_force(force = -912e-12, area = 0, distance = 0.09374) + Traceback (most recent call last): + ... + ValueError: Magnitude of force can not be negative + """ + + if (force, area, distance).count(0) != 1: + raise ValueError("One and only one argument must be 0") + if force < 0: + raise ValueError("Magnitude of force can not be negative") + if distance < 0: + raise ValueError("Distance can not be negative") + if area < 0: + raise ValueError("Area can not be negative") + if force == 0: + force = (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / ( + 240 * (distance) ** 4 + ) + return {"force": force} + elif area == 0: + area = (240 * force * (distance) ** 4) / ( + REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 + ) + return {"area": area} + elif distance == 0: + distance = ( + (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (240 * force) + ) ** (1 / 4) + return {"distance": distance} + raise ValueError("One and only one argument must be 0") + + +# Run doctest +if __name__ == "__main__": + import doctest + + doctest.testmod() From c94e215c8dbdfe1f349eab5708be6b5f337b6ddd Mon Sep 17 00:00:00 2001 From: Caeden Date: Sat, 15 Oct 2022 23:51:23 +0100 Subject: [PATCH 0557/1543] types: Update binary search tree typehints (#7197) * types: Update binary search tree typehints * refactor: Don't return `self` in `:meth:insert` * test: Fix failing doctests * Apply suggestions from code review Co-authored-by: Dhruv Manilawala --- .../binary_tree/binary_search_tree.py | 77 +++++++++++-------- 1 file changed, 44 insertions(+), 33 deletions(-) diff --git a/data_structures/binary_tree/binary_search_tree.py b/data_structures/binary_tree/binary_search_tree.py index 51a651be0f82..fc60540a1f3b 100644 --- a/data_structures/binary_tree/binary_search_tree.py +++ b/data_structures/binary_tree/binary_search_tree.py @@ -2,15 +2,18 @@ A binary search Tree """ +from collections.abc import Iterable +from typing import Any + class Node: - def __init__(self, value, parent): + def __init__(self, value: int | None = None): self.value = value - self.parent = parent # Added in order to delete a node easier - self.left = None - self.right = None + self.parent: Node | None = None # Added in order to delete a node easier + self.left: Node | None = None + self.right: Node | None = None - def __repr__(self): + def __repr__(self) -> str: from pprint import pformat if self.left is None and self.right is None: @@ -19,16 +22,16 @@ def __repr__(self): class BinarySearchTree: - def __init__(self, root=None): + def __init__(self, root: Node | None = None): self.root = root - def __str__(self): + def __str__(self) -> str: """ Return a string of all the Nodes using in order traversal """ return str(self.root) - def __reassign_nodes(self, node, new_children): + def __reassign_nodes(self, node: Node, new_children: Node | None) -> None: if new_children is not None: # reset its kids new_children.parent = node.parent if node.parent is not None: # reset its parent @@ -37,23 +40,27 @@ def __reassign_nodes(self, node, new_children): else: node.parent.left = new_children else: - self.root = new_children + self.root = None - def is_right(self, node): - return node == node.parent.right + def is_right(self, node: Node) -> bool: + if node.parent and node.parent.right: + return node == node.parent.right + return False - def empty(self): + def empty(self) -> bool: return self.root is None - def __insert(self, value): + def __insert(self, value) -> None: """ Insert a new node in Binary Search Tree with value label """ - new_node = Node(value, None) # create a new Node + new_node = Node(value) # create a new Node if self.empty(): # if Tree is empty self.root = new_node # set its root else: # Tree is not empty parent_node = self.root # from root + if parent_node is None: + return None while True: # While we don't get to a leaf if value < parent_node.value: # We go left if parent_node.left is None: @@ -69,12 +76,11 @@ def __insert(self, value): parent_node = parent_node.right new_node.parent = parent_node - def insert(self, *values): + def insert(self, *values) -> None: for value in values: self.__insert(value) - return self - def search(self, value): + def search(self, value) -> Node | None: if self.empty(): raise IndexError("Warning: Tree is empty! please use another.") else: @@ -84,30 +90,35 @@ def search(self, value): node = node.left if value < node.value else node.right return node - def get_max(self, node=None): + def get_max(self, node: Node | None = None) -> Node | None: """ We go deep on the right branch """ if node is None: + if self.root is None: + return None node = self.root + if not self.empty(): while node.right is not None: node = node.right return node - def get_min(self, node=None): + def get_min(self, node: Node | None = None) -> Node | None: """ We go deep on the left branch """ if node is None: node = self.root + if self.root is None: + return None if not self.empty(): node = self.root while node.left is not None: node = node.left return node - def remove(self, value): + def remove(self, value: int) -> None: node = self.search(value) # Look for the node with that label if node is not None: if node.left is None and node.right is None: # If it has no children @@ -120,18 +131,18 @@ def remove(self, value): tmp_node = self.get_max( node.left ) # Gets the max value of the left branch - self.remove(tmp_node.value) + self.remove(tmp_node.value) # type: ignore node.value = ( - tmp_node.value + tmp_node.value # type: ignore ) # Assigns the value to the node to delete and keep tree structure - def preorder_traverse(self, node): + def preorder_traverse(self, node: Node | None) -> Iterable: if node is not None: yield node # Preorder Traversal yield from self.preorder_traverse(node.left) yield from self.preorder_traverse(node.right) - def traversal_tree(self, traversal_function=None): + def traversal_tree(self, traversal_function=None) -> Any: """ This function traversal the tree. You can pass a function to traversal the tree as needed by client code @@ -141,7 +152,7 @@ def traversal_tree(self, traversal_function=None): else: return traversal_function(self.root) - def inorder(self, arr: list, node: Node): + def inorder(self, arr: list, node: Node | None) -> None: """Perform an inorder traversal and append values of the nodes to a list named arr""" if node: @@ -151,12 +162,12 @@ def inorder(self, arr: list, node: Node): def find_kth_smallest(self, k: int, node: Node) -> int: """Return the kth smallest element in a binary search tree""" - arr: list = [] + arr: list[int] = [] self.inorder(arr, node) # append all values to list using inorder traversal return arr[k - 1] -def postorder(curr_node): +def postorder(curr_node: Node | None) -> list[Node]: """ postOrder (left, right, self) """ @@ -166,7 +177,7 @@ def postorder(curr_node): return node_list -def binary_search_tree(): +def binary_search_tree() -> None: r""" Example 8 @@ -177,7 +188,8 @@ def binary_search_tree(): / \ / 4 7 13 - >>> t = BinarySearchTree().insert(8, 3, 6, 1, 10, 14, 13, 4, 7) + >>> t = BinarySearchTree() + >>> t.insert(8, 3, 6, 1, 10, 14, 13, 4, 7) >>> print(" ".join(repr(i.value) for i in t.traversal_tree())) 8 3 1 6 4 7 10 14 13 >>> print(" ".join(repr(i.value) for i in t.traversal_tree(postorder))) @@ -206,8 +218,8 @@ def binary_search_tree(): print("The value -1 doesn't exist") if not t.empty(): - print("Max Value: ", t.get_max().value) - print("Min Value: ", t.get_min().value) + print("Max Value: ", t.get_max().value) # type: ignore + print("Min Value: ", t.get_min().value) # type: ignore for i in testlist: t.remove(i) @@ -217,5 +229,4 @@ def binary_search_tree(): if __name__ == "__main__": import doctest - doctest.testmod() - # binary_search_tree() + doctest.testmod(verbose=True) From 04698538d816fc5f70c850e8b89c6d1f5599fa84 Mon Sep 17 00:00:00 2001 From: CenTdemeern1 Date: Sat, 15 Oct 2022 22:25:38 -0700 Subject: [PATCH 0558/1543] Misc fixes across multiple algorithms (#6912) Source: Snyk code quality Add scikit-fuzzy to requirements Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Dhruv Manilawala --- compression/huffman.py | 2 +- data_structures/linked_list/is_palindrome.py | 2 +- .../filters/local_binary_pattern.py | 2 +- fuzzy_logic/fuzzy_operations.py | 6 +----- graphs/dijkstra_algorithm.py | 4 ++-- .../directed_and_undirected_(weighted)_graph.py | 7 ------- hashes/hamming_code.py | 3 +-- linear_algebra/src/test_linear_algebra.py | 2 +- maths/extended_euclidean_algorithm.py | 5 +++-- maths/jaccard_similarity.py | 15 ++++++++------- matrix/matrix_class.py | 2 +- project_euler/problem_001/sol7.py | 4 +--- project_euler/problem_042/solution42.py | 11 +++++------ project_euler/problem_067/sol1.py | 8 ++++++-- project_euler/problem_089/sol1.py | 5 +++-- requirements.txt | 2 +- scheduling/first_come_first_served.py | 4 ++-- scheduling/multi_level_feedback_queue.py | 2 +- web_programming/emails_from_url.py | 2 +- 19 files changed, 40 insertions(+), 48 deletions(-) diff --git a/compression/huffman.py b/compression/huffman.py index d5d78b753c3f..f619ed82c764 100644 --- a/compression/huffman.py +++ b/compression/huffman.py @@ -31,7 +31,7 @@ def parse_file(file_path: str) -> list[Letter]: c = f.read(1) if not c: break - chars[c] = chars[c] + 1 if c in chars.keys() else 1 + chars[c] = chars[c] + 1 if c in chars else 1 return sorted((Letter(c, f) for c, f in chars.items()), key=lambda l: l.freq) diff --git a/data_structures/linked_list/is_palindrome.py b/data_structures/linked_list/is_palindrome.py index acc87c1c272b..ec19e99f78c0 100644 --- a/data_structures/linked_list/is_palindrome.py +++ b/data_structures/linked_list/is_palindrome.py @@ -55,7 +55,7 @@ def is_palindrome_dict(head): d = {} pos = 0 while head: - if head.val in d.keys(): + if head.val in d: d[head.val].append(pos) else: d[head.val] = [pos] diff --git a/digital_image_processing/filters/local_binary_pattern.py b/digital_image_processing/filters/local_binary_pattern.py index e73aa59bfa53..e92e554a3e5f 100644 --- a/digital_image_processing/filters/local_binary_pattern.py +++ b/digital_image_processing/filters/local_binary_pattern.py @@ -60,7 +60,7 @@ def local_binary_value(image: np.ndarray, x_coordinate: int, y_coordinate: int) ) -if __name__ == "main": +if __name__ == "__main__": # Reading the image and converting it to grayscale. image = cv2.imread( diff --git a/fuzzy_logic/fuzzy_operations.py b/fuzzy_logic/fuzzy_operations.py index fbaca9421327..0786ef8b0c67 100644 --- a/fuzzy_logic/fuzzy_operations.py +++ b/fuzzy_logic/fuzzy_operations.py @@ -8,11 +8,7 @@ - 3.5 """ import numpy as np - -try: - import skfuzzy as fuzz -except ImportError: - fuzz = None +import skfuzzy as fuzz if __name__ == "__main__": # Create universe of discourse in Python using linspace () diff --git a/graphs/dijkstra_algorithm.py b/graphs/dijkstra_algorithm.py index 122821a376ed..1845dad05db2 100644 --- a/graphs/dijkstra_algorithm.py +++ b/graphs/dijkstra_algorithm.py @@ -89,13 +89,13 @@ def add_edge(self, u, v, w): # Edge going from node u to v and v to u with weight w # u (w)-> v, v (w) -> u # Check if u already in graph - if u in self.adjList.keys(): + if u in self.adjList: self.adjList[u].append((v, w)) else: self.adjList[u] = [(v, w)] # Assuming undirected graph - if v in self.adjList.keys(): + if v in self.adjList: self.adjList[v].append((u, w)) else: self.adjList[v] = [(u, w)] diff --git a/graphs/directed_and_undirected_(weighted)_graph.py b/graphs/directed_and_undirected_(weighted)_graph.py index 5cfa9e13edd9..43a72b89e3a7 100644 --- a/graphs/directed_and_undirected_(weighted)_graph.py +++ b/graphs/directed_and_undirected_(weighted)_graph.py @@ -226,9 +226,6 @@ def has_cycle(self): break else: return True - # TODO:The following code is unreachable. - anticipating_nodes.add(stack[len_stack_minus_one]) - len_stack_minus_one -= 1 if visited.count(node[1]) < 1: stack.append(node[1]) visited.append(node[1]) @@ -454,10 +451,6 @@ def has_cycle(self): break else: return True - # TODO: the following code is unreachable - # is this meant to be called in the else ? - anticipating_nodes.add(stack[len_stack_minus_one]) - len_stack_minus_one -= 1 if visited.count(node[1]) < 1: stack.append(node[1]) visited.append(node[1]) diff --git a/hashes/hamming_code.py b/hashes/hamming_code.py index a62d092a172f..481a6750773a 100644 --- a/hashes/hamming_code.py +++ b/hashes/hamming_code.py @@ -79,8 +79,7 @@ def emitter_converter(size_par, data): ['1', '1', '1', '1', '0', '1', '0', '0', '1', '0', '1', '1', '1', '1', '1', '1'] """ if size_par + len(data) <= 2**size_par - (len(data) - 1): - print("ERROR - size of parity don't match with size of data") - exit(0) + raise ValueError("size of parity don't match with size of data") data_out = [] parity = [] diff --git a/linear_algebra/src/test_linear_algebra.py b/linear_algebra/src/test_linear_algebra.py index 97c06cb44e15..50d079572e0f 100644 --- a/linear_algebra/src/test_linear_algebra.py +++ b/linear_algebra/src/test_linear_algebra.py @@ -89,7 +89,7 @@ def test_zero_vector(self) -> None: """ test for global function zero_vector() """ - self.assertTrue(str(zero_vector(10)).count("0") == 10) + self.assertEqual(str(zero_vector(10)).count("0"), 10) def test_unit_basis_vector(self) -> None: """ diff --git a/maths/extended_euclidean_algorithm.py b/maths/extended_euclidean_algorithm.py index 72afd40aa707..c54909e19101 100644 --- a/maths/extended_euclidean_algorithm.py +++ b/maths/extended_euclidean_algorithm.py @@ -75,11 +75,12 @@ def main(): """Call Extended Euclidean Algorithm.""" if len(sys.argv) < 3: print("2 integer arguments required") - exit(1) + return 1 a = int(sys.argv[1]) b = int(sys.argv[2]) print(extended_euclidean_algorithm(a, b)) + return 0 if __name__ == "__main__": - main() + raise SystemExit(main()) diff --git a/maths/jaccard_similarity.py b/maths/jaccard_similarity.py index 77f4b90ea79f..b299a81476ab 100644 --- a/maths/jaccard_similarity.py +++ b/maths/jaccard_similarity.py @@ -14,7 +14,7 @@ """ -def jaccard_similariy(set_a, set_b, alternative_union=False): +def jaccard_similarity(set_a, set_b, alternative_union=False): """ Finds the jaccard similarity between two sets. Essentially, its intersection over union. @@ -35,18 +35,18 @@ def jaccard_similariy(set_a, set_b, alternative_union=False): Examples: >>> set_a = {'a', 'b', 'c', 'd', 'e'} >>> set_b = {'c', 'd', 'e', 'f', 'h', 'i'} - >>> jaccard_similariy(set_a, set_b) + >>> jaccard_similarity(set_a, set_b) 0.375 - >>> jaccard_similariy(set_a, set_a) + >>> jaccard_similarity(set_a, set_a) 1.0 - >>> jaccard_similariy(set_a, set_a, True) + >>> jaccard_similarity(set_a, set_a, True) 0.5 >>> set_a = ['a', 'b', 'c', 'd', 'e'] >>> set_b = ('c', 'd', 'e', 'f', 'h', 'i') - >>> jaccard_similariy(set_a, set_b) + >>> jaccard_similarity(set_a, set_b) 0.375 """ @@ -67,14 +67,15 @@ def jaccard_similariy(set_a, set_b, alternative_union=False): if alternative_union: union = len(set_a) + len(set_b) + return len(intersection) / union else: union = set_a + [element for element in set_b if element not in set_a] + return len(intersection) / len(union) return len(intersection) / len(union) if __name__ == "__main__": - set_a = {"a", "b", "c", "d", "e"} set_b = {"c", "d", "e", "f", "h", "i"} - print(jaccard_similariy(set_a, set_b)) + print(jaccard_similarity(set_a, set_b)) diff --git a/matrix/matrix_class.py b/matrix/matrix_class.py index 6495bd8fc88d..8b6fefa2124b 100644 --- a/matrix/matrix_class.py +++ b/matrix/matrix_class.py @@ -286,7 +286,7 @@ def add_column(self, column: list[int], position: int | None = None) -> None: # MATRIX OPERATIONS def __eq__(self, other: object) -> bool: if not isinstance(other, Matrix): - raise TypeError("A Matrix can only be compared with another Matrix") + return NotImplemented return self.rows == other.rows def __ne__(self, other: object) -> bool: diff --git a/project_euler/problem_001/sol7.py b/project_euler/problem_001/sol7.py index 8f5d1977fdde..6ada70c12dbd 100644 --- a/project_euler/problem_001/sol7.py +++ b/project_euler/problem_001/sol7.py @@ -26,9 +26,7 @@ def solution(n: int = 1000) -> int: result = 0 for i in range(n): - if i % 3 == 0: - result += i - elif i % 5 == 0: + if i % 3 == 0 or i % 5 == 0: result += i return result diff --git a/project_euler/problem_042/solution42.py b/project_euler/problem_042/solution42.py index c0fb2ad50c11..f8a54e40eaab 100644 --- a/project_euler/problem_042/solution42.py +++ b/project_euler/problem_042/solution42.py @@ -34,12 +34,11 @@ def solution(): words = f.readline() words = [word.strip('"') for word in words.strip("\r\n").split(",")] - words = list( - filter( - lambda word: word in TRIANGULAR_NUMBERS, - (sum(ord(x) - 64 for x in word) for word in words), - ) - ) + words = [ + word + for word in [sum(ord(x) - 64 for x in word) for word in words] + if word in TRIANGULAR_NUMBERS + ] return len(words) diff --git a/project_euler/problem_067/sol1.py b/project_euler/problem_067/sol1.py index ab305684dd0d..f20c206cca11 100644 --- a/project_euler/problem_067/sol1.py +++ b/project_euler/problem_067/sol1.py @@ -28,8 +28,12 @@ def solution(): with open(triangle) as f: triangle = f.readlines() - a = (x.rstrip("\r\n").split(" ") for x in triangle) - a = [list(map(int, x)) for x in a] + a = [] + for line in triangle: + numbers_from_line = [] + for number in line.strip().split(" "): + numbers_from_line.append(int(number)) + a.append(numbers_from_line) for i in range(1, len(a)): for j in range(len(a[i])): diff --git a/project_euler/problem_089/sol1.py b/project_euler/problem_089/sol1.py index 1c4e2600f847..83609cd236e1 100644 --- a/project_euler/problem_089/sol1.py +++ b/project_euler/problem_089/sol1.py @@ -125,8 +125,9 @@ def solution(roman_numerals_filename: str = "/p089_roman.txt") -> int: savings = 0 - file1 = open(os.path.dirname(__file__) + roman_numerals_filename) - lines = file1.readlines() + with open(os.path.dirname(__file__) + roman_numerals_filename) as file1: + lines = file1.readlines() + for line in lines: original = line.strip() num = parse_roman_numerals(original) diff --git a/requirements.txt b/requirements.txt index 0fbc1cc4b45c..b14a3eb0157c 100644 --- a/requirements.txt +++ b/requirements.txt @@ -9,7 +9,7 @@ pandas pillow qiskit requests -# scikit-fuzzy # Causing broken builds +scikit-fuzzy sklearn statsmodels sympy diff --git a/scheduling/first_come_first_served.py b/scheduling/first_come_first_served.py index c5f61720f97e..06cdb8ddf821 100644 --- a/scheduling/first_come_first_served.py +++ b/scheduling/first_come_first_served.py @@ -79,7 +79,7 @@ def calculate_average_waiting_time(waiting_times: list[int]) -> float: # ensure that we actually have processes if len(processes) == 0: print("Zero amount of processes") - exit() + raise SystemExit(0) # duration time of all processes duration_times = [19, 8, 9] @@ -87,7 +87,7 @@ def calculate_average_waiting_time(waiting_times: list[int]) -> float: # ensure we can match each id to a duration time if len(duration_times) != len(processes): print("Unable to match all id's with their duration time") - exit() + raise SystemExit(0) # get the waiting times and the turnaround times waiting_times = calculate_waiting_times(duration_times) diff --git a/scheduling/multi_level_feedback_queue.py b/scheduling/multi_level_feedback_queue.py index a3ba1b340e9b..abee3c85c5a5 100644 --- a/scheduling/multi_level_feedback_queue.py +++ b/scheduling/multi_level_feedback_queue.py @@ -276,7 +276,7 @@ def multi_level_feedback_queue(self) -> deque[Process]: queue = deque([P1, P2, P3, P4]) if len(time_slices) != number_of_queues - 1: - exit() + raise SystemExit(0) doctest.testmod(extraglobs={"queue": deque([P1, P2, P3, P4])}) diff --git a/web_programming/emails_from_url.py b/web_programming/emails_from_url.py index afaee5bbe854..074ef878c0d7 100644 --- a/web_programming/emails_from_url.py +++ b/web_programming/emails_from_url.py @@ -93,7 +93,7 @@ def emails_from_url(url: str = "https://github.com") -> list[str]: except ValueError: pass except ValueError: - exit(-1) + raise SystemExit(1) # Finally return a sorted list of email addresses with no duplicates. return sorted(valid_emails) From e7b6d2824a65985790d0044262f717898ffbeb4d Mon Sep 17 00:00:00 2001 From: Sagar Giri Date: Sun, 16 Oct 2022 16:43:29 +0900 Subject: [PATCH 0559/1543] Change to https. (#7277) * Change to https. * Revert the py_tf file. --- fractals/julia_sets.py | 2 +- fractals/sierpinski_triangle.py | 2 +- machine_learning/lstm/lstm_prediction.py | 2 +- machine_learning/sequential_minimum_optimization.py | 4 ++-- maths/matrix_exponentiation.py | 2 +- maths/test_prime_check.py | 2 +- physics/n_body_simulation.py | 4 ++-- strings/frequency_finder.py | 2 +- web_programming/crawl_google_results.py | 2 +- web_programming/crawl_google_scholar_citation.py | 2 +- web_programming/current_weather.py | 2 +- web_programming/giphy.py | 2 +- 12 files changed, 14 insertions(+), 14 deletions(-) diff --git a/fractals/julia_sets.py b/fractals/julia_sets.py index 28c675c750bc..35fdc45d020a 100644 --- a/fractals/julia_sets.py +++ b/fractals/julia_sets.py @@ -12,7 +12,7 @@ https://en.wikipedia.org/wiki/File:Julia_z2%2B0,25.png - Other examples from https://en.wikipedia.org/wiki/Julia_set - An exponential map Julia set, ambiantly homeomorphic to the examples in -http://www.math.univ-toulouse.fr/~cheritat/GalII/galery.html +https://www.math.univ-toulouse.fr/~cheritat/GalII/galery.html and https://ddd.uab.cat/pub/pubmat/02141493v43n1/02141493v43n1p27.pdf diff --git a/fractals/sierpinski_triangle.py b/fractals/sierpinski_triangle.py index 8be2897c152a..084f6661f425 100644 --- a/fractals/sierpinski_triangle.py +++ b/fractals/sierpinski_triangle.py @@ -24,7 +24,7 @@ - $python sierpinski_triangle.py Credits: This code was written by editing the code from -http://www.riannetrujillo.com/blog/python-fractal/ +https://www.riannetrujillo.com/blog/python-fractal/ """ import sys diff --git a/machine_learning/lstm/lstm_prediction.py b/machine_learning/lstm/lstm_prediction.py index 6fd3cf29131d..74197c46a0ad 100644 --- a/machine_learning/lstm/lstm_prediction.py +++ b/machine_learning/lstm/lstm_prediction.py @@ -1,7 +1,7 @@ """ Create a Long Short Term Memory (LSTM) network model An LSTM is a type of Recurrent Neural Network (RNN) as discussed at: - * http://colah.github.io/posts/2015-08-Understanding-LSTMs + * https://colah.github.io/posts/2015-08-Understanding-LSTMs * https://en.wikipedia.org/wiki/Long_short-term_memory """ import numpy as np diff --git a/machine_learning/sequential_minimum_optimization.py b/machine_learning/sequential_minimum_optimization.py index fb4b35f31289..40adca7e0828 100644 --- a/machine_learning/sequential_minimum_optimization.py +++ b/machine_learning/sequential_minimum_optimization.py @@ -28,7 +28,7 @@ Reference: https://www.microsoft.com/en-us/research/wp-content/uploads/2016/02/smo-book.pdf https://www.microsoft.com/en-us/research/wp-content/uploads/2016/02/tr-98-14.pdf - http://web.cs.iastate.edu/~honavar/smo-svm.pdf + https://web.cs.iastate.edu/~honavar/smo-svm.pdf """ @@ -43,7 +43,7 @@ from sklearn.preprocessing import StandardScaler CANCER_DATASET_URL = ( - "http://archive.ics.uci.edu/ml/machine-learning-databases/" + "https://archive.ics.uci.edu/ml/machine-learning-databases/" "breast-cancer-wisconsin/wdbc.data" ) diff --git a/maths/matrix_exponentiation.py b/maths/matrix_exponentiation.py index 033ceb3f28a0..7c37151c87ca 100644 --- a/maths/matrix_exponentiation.py +++ b/maths/matrix_exponentiation.py @@ -5,7 +5,7 @@ """ Matrix Exponentiation is a technique to solve linear recurrences in logarithmic time. You read more about it here: -http://zobayer.blogspot.com/2010/11/matrix-exponentiation.html +https://zobayer.blogspot.com/2010/11/matrix-exponentiation.html https://www.hackerearth.com/practice/notes/matrix-exponentiation-1/ """ diff --git a/maths/test_prime_check.py b/maths/test_prime_check.py index b6389684af9e..3ea3b2f1f88b 100644 --- a/maths/test_prime_check.py +++ b/maths/test_prime_check.py @@ -1,6 +1,6 @@ """ Minimalist file that allows pytest to find and run the Test unittest. For details, see: -http://doc.pytest.org/en/latest/goodpractices.html#conventions-for-python-test-discovery +https://doc.pytest.org/en/latest/goodpractices.html#conventions-for-python-test-discovery """ from .prime_check import Test diff --git a/physics/n_body_simulation.py b/physics/n_body_simulation.py index 2f8153782663..e62e1de62757 100644 --- a/physics/n_body_simulation.py +++ b/physics/n_body_simulation.py @@ -8,7 +8,7 @@ numerical divergences when a particle comes too close to another (and the force goes to infinity). (Description adapted from https://en.wikipedia.org/wiki/N-body_simulation ) -(See also http://www.shodor.org/refdesk/Resources/Algorithms/EulersMethod/ ) +(See also https://www.shodor.org/refdesk/Resources/Algorithms/EulersMethod/ ) """ @@ -258,7 +258,7 @@ def example_1() -> BodySystem: Example 1: figure-8 solution to the 3-body-problem This example can be seen as a test of the implementation: given the right initial conditions, the bodies should move in a figure-8. - (initial conditions taken from http://www.artcompsci.org/vol_1/v1_web/node56.html) + (initial conditions taken from https://www.artcompsci.org/vol_1/v1_web/node56.html) >>> body_system = example_1() >>> len(body_system) 3 diff --git a/strings/frequency_finder.py b/strings/frequency_finder.py index 7024be17b8ab..19f97afbbe37 100644 --- a/strings/frequency_finder.py +++ b/strings/frequency_finder.py @@ -2,7 +2,7 @@ import string -# frequency taken from http://en.wikipedia.org/wiki/Letter_frequency +# frequency taken from https://en.wikipedia.org/wiki/Letter_frequency english_letter_freq = { "E": 12.70, "T": 9.06, diff --git a/web_programming/crawl_google_results.py b/web_programming/crawl_google_results.py index a33a3f3bbe5c..1f5e6d31992b 100644 --- a/web_programming/crawl_google_results.py +++ b/web_programming/crawl_google_results.py @@ -21,4 +21,4 @@ if link.text == "Maps": webbrowser.open(link.get("href")) else: - webbrowser.open(f"http://google.com{link.get('href')}") + webbrowser.open(f"https://google.com{link.get('href')}") diff --git a/web_programming/crawl_google_scholar_citation.py b/web_programming/crawl_google_scholar_citation.py index d023380c0818..f92a3d139520 100644 --- a/web_programming/crawl_google_scholar_citation.py +++ b/web_programming/crawl_google_scholar_citation.py @@ -29,4 +29,4 @@ def get_citation(base_url: str, params: dict) -> str: "year": 2018, "hl": "en", } - print(get_citation("http://scholar.google.com/scholar_lookup", params=params)) + print(get_citation("https://scholar.google.com/scholar_lookup", params=params)) diff --git a/web_programming/current_weather.py b/web_programming/current_weather.py index e043b438473f..3ed4c8a95a0c 100644 --- a/web_programming/current_weather.py +++ b/web_programming/current_weather.py @@ -1,7 +1,7 @@ import requests APPID = "" # <-- Put your OpenWeatherMap appid here! -URL_BASE = "http://api.openweathermap.org/data/2.5/" +URL_BASE = "https://api.openweathermap.org/data/2.5/" def current_weather(q: str = "Chicago", appid: str = APPID) -> dict: diff --git a/web_programming/giphy.py b/web_programming/giphy.py index dc8c6be08caa..a5c3f8f7493e 100644 --- a/web_programming/giphy.py +++ b/web_programming/giphy.py @@ -10,7 +10,7 @@ def get_gifs(query: str, api_key: str = giphy_api_key) -> list: Get a list of URLs of GIFs based on a given query.. """ formatted_query = "+".join(query.split()) - url = f"http://api.giphy.com/v1/gifs/search?q={formatted_query}&api_key={api_key}" + url = f"https://api.giphy.com/v1/gifs/search?q={formatted_query}&api_key={api_key}" gifs = requests.get(url).json()["data"] return [gif["url"] for gif in gifs] From 77764116217708933bdc65b29801092fa291398e Mon Sep 17 00:00:00 2001 From: Kevin Joven <59969678+KevinJoven11@users.noreply.github.com> Date: Sun, 16 Oct 2022 02:47:54 -0500 Subject: [PATCH 0560/1543] Create q_full_adder.py (#6735) * Create q_full_adder.py This is for the #Hacktoberfest. This circuit is the quantum full adder. I saw that in the repo is the half adder so I decided to build the full adder to complete the set of adders. I hope that this is enough to be consider a contribution. Best, Kevin * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Erase the unused numpy library * Create the doctest. * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * doctest for negative numbers, float, etc. * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- quantum/q_full_adder.py | 112 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 112 insertions(+) create mode 100644 quantum/q_full_adder.py diff --git a/quantum/q_full_adder.py b/quantum/q_full_adder.py new file mode 100644 index 000000000000..597efb8342e1 --- /dev/null +++ b/quantum/q_full_adder.py @@ -0,0 +1,112 @@ +""" +Build the quantum full adder (QFA) for any sum of +two quantum registers and one carry in. This circuit +is designed using the Qiskit framework. This +experiment run in IBM Q simulator with 1000 shots. +. +References: +https://www.quantum-inspire.com/kbase/full-adder/ +""" + +import math + +import qiskit +from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute + + +def quantum_full_adder( + input_1: int = 1, input_2: int = 1, carry_in: int = 1 +) -> qiskit.result.counts.Counts: + """ + # >>> q_full_adder(inp_1, inp_2, cin) + # the inputs can be 0/1 for qubits in define + # values, or can be in a superposition of both + # states with hadamard gate using the input value 2. + # result for default values: {11: 1000} + qr_0: ──■────■──────────────■── + │ ┌─┴─┐ ┌─┴─┐ + qr_1: ──■──┤ X ├──■────■──┤ X ├ + │ └───┘ │ ┌─┴─┐└───┘ + qr_2: ──┼─────────■──┤ X ├───── + ┌─┴─┐ ┌─┴─┐└───┘ + qr_3: ┤ X ├─────┤ X ├────────── + └───┘ └───┘ + cr: 2/═════════════════════════ + Args: + input_1: input 1 for the circuit. + input_2: input 2 for the circuit. + carry_in: carry in for the circuit. + Returns: + qiskit.result.counts.Counts: sum result counts. + >>> quantum_full_adder(1,1,1) + {'11': 1000} + >>> quantum_full_adder(0,0,1) + {'01': 1000} + >>> quantum_full_adder(1,0,1) + {'10': 1000} + >>> quantum_full_adder(1,-4,1) + Traceback (most recent call last): + ... + ValueError: inputs must be positive. + >>> quantum_full_adder('q',0,1) + Traceback (most recent call last): + ... + TypeError: inputs must be integers. + >>> quantum_full_adder(0.5,0,1) + Traceback (most recent call last): + ... + ValueError: inputs must be exact integers. + >>> quantum_full_adder(0,1,3) + Traceback (most recent call last): + ... + ValueError: inputs must be less or equal to 2. + """ + if (type(input_1) == str) or (type(input_2) == str) or (type(carry_in) == str): + raise TypeError("inputs must be integers.") + + if (input_1 < 0) or (input_2 < 0) or (carry_in < 0): + raise ValueError("inputs must be positive.") + + if ( + (math.floor(input_1) != input_1) + or (math.floor(input_2) != input_2) + or (math.floor(carry_in) != carry_in) + ): + raise ValueError("inputs must be exact integers.") + + if (input_1 > 2) or (input_2 > 2) or (carry_in > 2): + raise ValueError("inputs must be less or equal to 2.") + + # build registers + qr = QuantumRegister(4, "qr") + cr = ClassicalRegister(2, "cr") + # list the entries + entry = [input_1, input_2, carry_in] + + quantum_circuit = QuantumCircuit(qr, cr) + + for i in range(0, 3): + if entry[i] == 2: + quantum_circuit.h(i) # for hadamard entries + elif entry[i] == 1: + quantum_circuit.x(i) # for 1 entries + elif entry[i] == 0: + quantum_circuit.i(i) # for 0 entries + + # build the circuit + quantum_circuit.ccx(0, 1, 3) # ccx = toffoli gate + quantum_circuit.cx(0, 1) + quantum_circuit.ccx(1, 2, 3) + quantum_circuit.cx(1, 2) + quantum_circuit.cx(0, 1) + + quantum_circuit.measure([2, 3], cr) # measure the last two qbits + + backend = Aer.get_backend("qasm_simulator") + job = execute(quantum_circuit, backend, shots=1000) + + return job.result().get_counts(quantum_circuit) + + +if __name__ == "__main__": + print(f"Total sum count for state is: {quantum_full_adder(1,1,1)}") From c6582b35bf8b8aba622c63096e3ab2f01aa36854 Mon Sep 17 00:00:00 2001 From: Caeden Date: Sun, 16 Oct 2022 10:33:29 +0100 Subject: [PATCH 0561/1543] refactor: Move constants outside of variable scope (#7262) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Dhruv Manilawala Co-authored-by: Christian Clauss --- ciphers/bifid.py | 15 ++++---- ciphers/brute_force_caesar_cipher.py | 14 ++++--- ciphers/polybius.py | 16 ++++---- compression/peak_signal_to_noise_ratio.py | 13 ++++--- conversions/binary_to_hexadecimal.py | 39 ++++++++++---------- conversions/decimal_to_any.py | 11 ++---- conversions/roman_numerals.py | 32 ++++++++-------- geodesy/haversine_distance.py | 7 ++-- geodesy/lamberts_ellipsoidal_distance.py | 8 ++-- hashes/adler32.py | 3 +- physics/n_body_simulation.py | 12 +++--- project_euler/problem_054/test_poker_hand.py | 6 +-- project_euler/problem_064/sol1.py | 8 ++-- project_euler/problem_097/sol1.py | 6 +-- project_euler/problem_125/sol1.py | 3 +- sorts/radix_sort.py | 3 +- web_programming/fetch_quotes.py | 8 ++-- 17 files changed, 107 insertions(+), 97 deletions(-) diff --git a/ciphers/bifid.py b/ciphers/bifid.py index 54d55574cdca..c005e051a6ba 100644 --- a/ciphers/bifid.py +++ b/ciphers/bifid.py @@ -9,16 +9,17 @@ import numpy as np +SQUARE = [ + ["a", "b", "c", "d", "e"], + ["f", "g", "h", "i", "k"], + ["l", "m", "n", "o", "p"], + ["q", "r", "s", "t", "u"], + ["v", "w", "x", "y", "z"], +] + class BifidCipher: def __init__(self) -> None: - SQUARE = [ # noqa: N806 - ["a", "b", "c", "d", "e"], - ["f", "g", "h", "i", "k"], - ["l", "m", "n", "o", "p"], - ["q", "r", "s", "t", "u"], - ["v", "w", "x", "y", "z"], - ] self.SQUARE = np.array(SQUARE) def letter_to_numbers(self, letter: str) -> np.ndarray: diff --git a/ciphers/brute_force_caesar_cipher.py b/ciphers/brute_force_caesar_cipher.py index cc97111e05a7..458d08db2628 100644 --- a/ciphers/brute_force_caesar_cipher.py +++ b/ciphers/brute_force_caesar_cipher.py @@ -1,3 +1,6 @@ +import string + + def decrypt(message: str) -> None: """ >>> decrypt('TMDETUX PMDVU') @@ -28,16 +31,15 @@ def decrypt(message: str) -> None: Decryption using Key #24: VOFGVWZ ROFXW Decryption using Key #25: UNEFUVY QNEWV """ - LETTERS = "ABCDEFGHIJKLMNOPQRSTUVWXYZ" # noqa: N806 - for key in range(len(LETTERS)): + for key in range(len(string.ascii_uppercase)): translated = "" for symbol in message: - if symbol in LETTERS: - num = LETTERS.find(symbol) + if symbol in string.ascii_uppercase: + num = string.ascii_uppercase.find(symbol) num = num - key if num < 0: - num = num + len(LETTERS) - translated = translated + LETTERS[num] + num = num + len(string.ascii_uppercase) + translated = translated + string.ascii_uppercase[num] else: translated = translated + symbol print(f"Decryption using Key #{key}: {translated}") diff --git a/ciphers/polybius.py b/ciphers/polybius.py index bf5d62f8d33e..c81c1d39533f 100644 --- a/ciphers/polybius.py +++ b/ciphers/polybius.py @@ -8,16 +8,18 @@ import numpy as np +SQUARE = [ + ["a", "b", "c", "d", "e"], + ["f", "g", "h", "i", "k"], + ["l", "m", "n", "o", "p"], + ["q", "r", "s", "t", "u"], + ["v", "w", "x", "y", "z"], +] + class PolybiusCipher: def __init__(self) -> None: - SQUARE = [ # noqa: N806 - ["a", "b", "c", "d", "e"], - ["f", "g", "h", "i", "k"], - ["l", "m", "n", "o", "p"], - ["q", "r", "s", "t", "u"], - ["v", "w", "x", "y", "z"], - ] + self.SQUARE = np.array(SQUARE) def letter_to_numbers(self, letter: str) -> np.ndarray: diff --git a/compression/peak_signal_to_noise_ratio.py b/compression/peak_signal_to_noise_ratio.py index 66b18b50b028..284f2904a21d 100644 --- a/compression/peak_signal_to_noise_ratio.py +++ b/compression/peak_signal_to_noise_ratio.py @@ -11,14 +11,15 @@ import cv2 import numpy as np +PIXEL_MAX = 255.0 -def psnr(original: float, contrast: float) -> float: + +def peak_signal_to_noise_ratio(original: float, contrast: float) -> float: mse = np.mean((original - contrast) ** 2) if mse == 0: return 100 - PIXEL_MAX = 255.0 # noqa: N806 - PSNR = 20 * math.log10(PIXEL_MAX / math.sqrt(mse)) # noqa: N806 - return PSNR + + return 20 * math.log10(PIXEL_MAX / math.sqrt(mse)) def main() -> None: @@ -34,11 +35,11 @@ def main() -> None: # Value expected: 29.73dB print("-- First Test --") - print(f"PSNR value is {psnr(original, contrast)} dB") + print(f"PSNR value is {peak_signal_to_noise_ratio(original, contrast)} dB") # # Value expected: 31.53dB (Wikipedia Example) print("\n-- Second Test --") - print(f"PSNR value is {psnr(original2, contrast2)} dB") + print(f"PSNR value is {peak_signal_to_noise_ratio(original2, contrast2)} dB") if __name__ == "__main__": diff --git a/conversions/binary_to_hexadecimal.py b/conversions/binary_to_hexadecimal.py index 61f335a4c465..89f7af696357 100644 --- a/conversions/binary_to_hexadecimal.py +++ b/conversions/binary_to_hexadecimal.py @@ -1,3 +1,23 @@ +BITS_TO_HEX = { + "0000": "0", + "0001": "1", + "0010": "2", + "0011": "3", + "0100": "4", + "0101": "5", + "0110": "6", + "0111": "7", + "1000": "8", + "1001": "9", + "1010": "a", + "1011": "b", + "1100": "c", + "1101": "d", + "1110": "e", + "1111": "f", +} + + def bin_to_hexadecimal(binary_str: str) -> str: """ Converting a binary string into hexadecimal using Grouping Method @@ -17,25 +37,6 @@ def bin_to_hexadecimal(binary_str: str) -> str: ... ValueError: Empty string was passed to the function """ - BITS_TO_HEX = { # noqa: N806 - "0000": "0", - "0001": "1", - "0010": "2", - "0011": "3", - "0100": "4", - "0101": "5", - "0110": "6", - "0111": "7", - "1000": "8", - "1001": "9", - "1010": "a", - "1011": "b", - "1100": "c", - "1101": "d", - "1110": "e", - "1111": "f", - } - # Sanitising parameter binary_str = str(binary_str).strip() diff --git a/conversions/decimal_to_any.py b/conversions/decimal_to_any.py index e54fa154a0f7..908c89e8fb6b 100644 --- a/conversions/decimal_to_any.py +++ b/conversions/decimal_to_any.py @@ -1,5 +1,9 @@ """Convert a positive Decimal Number to Any Other Representation""" +from string import ascii_uppercase + +ALPHABET_VALUES = {str(ord(c) - 55): c for c in ascii_uppercase} + def decimal_to_any(num: int, base: int) -> str: """ @@ -65,13 +69,6 @@ def decimal_to_any(num: int, base: int) -> str: raise ValueError("base must be >= 2") if base > 36: raise ValueError("base must be <= 36") - # fmt: off - ALPHABET_VALUES = {'10': 'A', '11': 'B', '12': 'C', '13': 'D', '14': 'E', '15': 'F', # noqa: N806, E501 - '16': 'G', '17': 'H', '18': 'I', '19': 'J', '20': 'K', '21': 'L', - '22': 'M', '23': 'N', '24': 'O', '25': 'P', '26': 'Q', '27': 'R', - '28': 'S', '29': 'T', '30': 'U', '31': 'V', '32': 'W', '33': 'X', - '34': 'Y', '35': 'Z'} - # fmt: on new_value = "" mod = 0 div = 0 diff --git a/conversions/roman_numerals.py b/conversions/roman_numerals.py index 960d41342276..61215a0c0730 100644 --- a/conversions/roman_numerals.py +++ b/conversions/roman_numerals.py @@ -1,3 +1,20 @@ +ROMAN = [ + (1000, "M"), + (900, "CM"), + (500, "D"), + (400, "CD"), + (100, "C"), + (90, "XC"), + (50, "L"), + (40, "XL"), + (10, "X"), + (9, "IX"), + (5, "V"), + (4, "IV"), + (1, "I"), +] + + def roman_to_int(roman: str) -> int: """ LeetCode No. 13 Roman to Integer @@ -29,21 +46,6 @@ def int_to_roman(number: int) -> str: >>> all(int_to_roman(value) == key for key, value in tests.items()) True """ - ROMAN = [ # noqa: N806 - (1000, "M"), - (900, "CM"), - (500, "D"), - (400, "CD"), - (100, "C"), - (90, "XC"), - (50, "L"), - (40, "XL"), - (10, "X"), - (9, "IX"), - (5, "V"), - (4, "IV"), - (1, "I"), - ] result = [] for (arabic, roman) in ROMAN: (factor, number) = divmod(number, arabic) diff --git a/geodesy/haversine_distance.py b/geodesy/haversine_distance.py index b601d2fd1983..93e625770f9d 100644 --- a/geodesy/haversine_distance.py +++ b/geodesy/haversine_distance.py @@ -1,5 +1,9 @@ from math import asin, atan, cos, radians, sin, sqrt, tan +AXIS_A = 6378137.0 +AXIS_B = 6356752.314245 +RADIUS = 6378137 + def haversine_distance(lat1: float, lon1: float, lat2: float, lon2: float) -> float: """ @@ -30,9 +34,6 @@ def haversine_distance(lat1: float, lon1: float, lat2: float, lon2: float) -> fl """ # CONSTANTS per WGS84 https://en.wikipedia.org/wiki/World_Geodetic_System # Distance in metres(m) - AXIS_A = 6378137.0 # noqa: N806 - AXIS_B = 6356752.314245 # noqa: N806 - RADIUS = 6378137 # noqa: N806 # Equation parameters # Equation https://en.wikipedia.org/wiki/Haversine_formula#Formulation flattening = (AXIS_A - AXIS_B) / AXIS_A diff --git a/geodesy/lamberts_ellipsoidal_distance.py b/geodesy/lamberts_ellipsoidal_distance.py index d36d399538de..62ce59bb476f 100644 --- a/geodesy/lamberts_ellipsoidal_distance.py +++ b/geodesy/lamberts_ellipsoidal_distance.py @@ -2,6 +2,10 @@ from .haversine_distance import haversine_distance +AXIS_A = 6378137.0 +AXIS_B = 6356752.314245 +EQUATORIAL_RADIUS = 6378137 + def lamberts_ellipsoidal_distance( lat1: float, lon1: float, lat2: float, lon2: float @@ -45,10 +49,6 @@ def lamberts_ellipsoidal_distance( # CONSTANTS per WGS84 https://en.wikipedia.org/wiki/World_Geodetic_System # Distance in metres(m) - AXIS_A = 6378137.0 # noqa: N806 - AXIS_B = 6356752.314245 # noqa: N806 - EQUATORIAL_RADIUS = 6378137 # noqa: N806 - # Equation Parameters # https://en.wikipedia.org/wiki/Geographical_distance#Lambert's_formula_for_long_lines flattening = (AXIS_A - AXIS_B) / AXIS_A diff --git a/hashes/adler32.py b/hashes/adler32.py index 80229f04620a..611ebc88b80f 100644 --- a/hashes/adler32.py +++ b/hashes/adler32.py @@ -8,6 +8,8 @@ source: https://en.wikipedia.org/wiki/Adler-32 """ +MOD_ADLER = 65521 + def adler32(plain_text: str) -> int: """ @@ -20,7 +22,6 @@ def adler32(plain_text: str) -> int: >>> adler32('go adler em all') 708642122 """ - MOD_ADLER = 65521 # noqa: N806 a = 1 b = 0 for plain_chr in plain_text: diff --git a/physics/n_body_simulation.py b/physics/n_body_simulation.py index e62e1de62757..f6efb0fec81c 100644 --- a/physics/n_body_simulation.py +++ b/physics/n_body_simulation.py @@ -19,6 +19,12 @@ from matplotlib import animation from matplotlib import pyplot as plt +# Frame rate of the animation +INTERVAL = 20 + +# Time between time steps in seconds +DELTA_TIME = INTERVAL / 1000 + class Body: def __init__( @@ -219,12 +225,6 @@ def plot( Utility function to plot how the given body-system evolves over time. No doctest provided since this function does not have a return value. """ - # Frame rate of the animation - INTERVAL = 20 # noqa: N806 - - # Time between time steps in seconds - DELTA_TIME = INTERVAL / 1000 # noqa: N806 - fig = plt.figure() fig.canvas.set_window_title(title) ax = plt.axes( diff --git a/project_euler/problem_054/test_poker_hand.py b/project_euler/problem_054/test_poker_hand.py index bf5a20a8e862..5735bfc37947 100644 --- a/project_euler/problem_054/test_poker_hand.py +++ b/project_euler/problem_054/test_poker_hand.py @@ -185,12 +185,12 @@ def test_compare_random(hand, other, expected): def test_hand_sorted(): - POKER_HANDS = [PokerHand(hand) for hand in SORTED_HANDS] # noqa: N806 - list_copy = POKER_HANDS.copy() + poker_hands = [PokerHand(hand) for hand in SORTED_HANDS] + list_copy = poker_hands.copy() shuffle(list_copy) user_sorted = chain(sorted(list_copy)) for index, hand in enumerate(user_sorted): - assert hand == POKER_HANDS[index] + assert hand == poker_hands[index] def test_custom_sort_five_high_straight(): diff --git a/project_euler/problem_064/sol1.py b/project_euler/problem_064/sol1.py index 9edd9a1e7a64..81ebcc7b73c3 100644 --- a/project_euler/problem_064/sol1.py +++ b/project_euler/problem_064/sol1.py @@ -33,13 +33,13 @@ def continuous_fraction_period(n: int) -> int: """ numerator = 0.0 denominator = 1.0 - ROOT = int(sqrt(n)) # noqa: N806 - integer_part = ROOT + root = int(sqrt(n)) + integer_part = root period = 0 - while integer_part != 2 * ROOT: + while integer_part != 2 * root: numerator = denominator * integer_part - numerator denominator = (n - numerator**2) / denominator - integer_part = int((ROOT + numerator) / denominator) + integer_part = int((root + numerator) / denominator) period += 1 return period diff --git a/project_euler/problem_097/sol1.py b/project_euler/problem_097/sol1.py index 94a43894ee07..2807e893ded0 100644 --- a/project_euler/problem_097/sol1.py +++ b/project_euler/problem_097/sol1.py @@ -34,9 +34,9 @@ def solution(n: int = 10) -> str: """ if not isinstance(n, int) or n < 0: raise ValueError("Invalid input") - MODULUS = 10**n # noqa: N806 - NUMBER = 28433 * (pow(2, 7830457, MODULUS)) + 1 # noqa: N806 - return str(NUMBER % MODULUS) + modulus = 10**n + number = 28433 * (pow(2, 7830457, modulus)) + 1 + return str(number % modulus) if __name__ == "__main__": diff --git a/project_euler/problem_125/sol1.py b/project_euler/problem_125/sol1.py index 1812df36132e..616f6f122f97 100644 --- a/project_euler/problem_125/sol1.py +++ b/project_euler/problem_125/sol1.py @@ -13,6 +13,8 @@ be written as the sum of consecutive squares. """ +LIMIT = 10**8 + def is_palindrome(n: int) -> bool: """ @@ -35,7 +37,6 @@ def solution() -> int: Returns the sum of all numbers less than 1e8 that are both palindromic and can be written as the sum of consecutive squares. """ - LIMIT = 10**8 # noqa: N806 answer = set() first_square = 1 sum_squares = 5 diff --git a/sorts/radix_sort.py b/sorts/radix_sort.py index a496cdc0c743..832b6162f349 100644 --- a/sorts/radix_sort.py +++ b/sorts/radix_sort.py @@ -5,6 +5,8 @@ """ from __future__ import annotations +RADIX = 10 + def radix_sort(list_of_ints: list[int]) -> list[int]: """ @@ -19,7 +21,6 @@ def radix_sort(list_of_ints: list[int]) -> list[int]: >>> radix_sort([1,100,10,1000]) == sorted([1,100,10,1000]) True """ - RADIX = 10 # noqa: N806 placement = 1 max_digit = max(list_of_ints) while placement <= max_digit: diff --git a/web_programming/fetch_quotes.py b/web_programming/fetch_quotes.py index a45f6ea0eaf1..d557e2d95e74 100644 --- a/web_programming/fetch_quotes.py +++ b/web_programming/fetch_quotes.py @@ -10,15 +10,15 @@ import requests +API_ENDPOINT_URL = "https://zenquotes.io/api" + def quote_of_the_day() -> list: - API_ENDPOINT_URL = "https://zenquotes.io/api/today/" # noqa: N806 - return requests.get(API_ENDPOINT_URL).json() + return requests.get(API_ENDPOINT_URL + "/today").json() def random_quotes() -> list: - API_ENDPOINT_URL = "https://zenquotes.io/api/random/" # noqa: N806 - return requests.get(API_ENDPOINT_URL).json() + return requests.get(API_ENDPOINT_URL + "/random").json() if __name__ == "__main__": From d728f5a96bce1cb748d903de2f7dff2e2a2b54eb Mon Sep 17 00:00:00 2001 From: Advik Sharma <70201060+advik-student-dev@users.noreply.github.com> Date: Sun, 16 Oct 2022 06:28:10 -0700 Subject: [PATCH 0562/1543] Added some more comments to volume.py in maths folder (#7080) * Added some more comments added some more comments (to formulas which need it) which make the code more readable and understandable. might make a list of all the formulas on the top, later * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Apply suggestions from code review * The order changes the result * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fix long line * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- maths/volume.py | 23 +++++++++++++++-------- 1 file changed, 15 insertions(+), 8 deletions(-) diff --git a/maths/volume.py b/maths/volume.py index 97c06d7e1c3a..a594e1b90feb 100644 --- a/maths/volume.py +++ b/maths/volume.py @@ -1,6 +1,7 @@ """ -Find Volumes of Various Shapes. -Wikipedia reference: https://en.wikipedia.org/wiki/Volume +Find the volume of various shapes. +* https://en.wikipedia.org/wiki/Volume +* https://en.wikipedia.org/wiki/Spherical_cap """ from __future__ import annotations @@ -30,8 +31,7 @@ def vol_cube(side_length: int | float) -> float: def vol_spherical_cap(height: float, radius: float) -> float: """ - Calculate the Volume of the spherical cap. - :return 1/3 pi * height ^ 2 * (3 * radius - height) + Calculate the volume of the spherical cap. >>> vol_spherical_cap(1, 2) 5.235987755982988 >>> vol_spherical_cap(1.6, 2.6) @@ -49,6 +49,7 @@ def vol_spherical_cap(height: float, radius: float) -> float: """ if height < 0 or radius < 0: raise ValueError("vol_spherical_cap() only accepts non-negative values") + # Volume is 1/3 pi * height squared * (3 * radius - height) return 1 / 3 * pi * pow(height, 2) * (3 * radius - height) @@ -263,6 +264,7 @@ def vol_sphere(radius: float) -> float: """ if radius < 0: raise ValueError("vol_sphere() only accepts non-negative values") + # Volume is 4/3 * pi * radius cubed return 4 / 3 * pi * pow(radius, 3) @@ -274,7 +276,7 @@ def vol_hemisphere(radius: float) -> float: >>> vol_hemisphere(1) 2.0943951023931953 >>> vol_hemisphere(7) - 718.3775201208659 + 718.377520120866 >>> vol_hemisphere(1.6) 8.57864233940253 >>> vol_hemisphere(0) @@ -286,7 +288,8 @@ def vol_hemisphere(radius: float) -> float: """ if radius < 0: raise ValueError("vol_hemisphere() only accepts non-negative values") - return 2 / 3 * pi * pow(radius, 3) + # Volume is radius cubed * pi * 2/3 + return pow(radius, 3) * pi * 2 / 3 def vol_circular_cylinder(radius: float, height: float) -> float: @@ -312,7 +315,8 @@ def vol_circular_cylinder(radius: float, height: float) -> float: """ if height < 0 or radius < 0: raise ValueError("vol_circular_cylinder() only accepts non-negative values") - return pi * pow(radius, 2) * height + # Volume is radius squared * height * pi + return pow(radius, 2) * height * pi def vol_hollow_circular_cylinder( @@ -344,6 +348,7 @@ def vol_hollow_circular_cylinder( ... ValueError: outer_radius must be greater than inner_radius """ + # Volume - (outer_radius squared - inner_radius squared) * pi * height if inner_radius < 0 or outer_radius < 0 or height < 0: raise ValueError( "vol_hollow_circular_cylinder() only accepts non-negative values" @@ -356,7 +361,7 @@ def vol_hollow_circular_cylinder( def vol_conical_frustum(height: float, radius_1: float, radius_2: float) -> float: """Calculate the Volume of a Conical Frustum. Wikipedia reference: https://en.wikipedia.org/wiki/Frustum - :return 1/3 * pi * height * (radius_1^2 + radius_top^2 + radius_1 * radius_2) + >>> vol_conical_frustum(45, 7, 28) 48490.482608158454 >>> vol_conical_frustum(1, 1, 2) @@ -378,6 +383,8 @@ def vol_conical_frustum(height: float, radius_1: float, radius_2: float) -> floa ... ValueError: vol_conical_frustum() only accepts non-negative values """ + # Volume is 1/3 * pi * height * + # (radius_1 squared + radius_2 squared + radius_1 * radius_2) if radius_1 < 0 or radius_2 < 0 or height < 0: raise ValueError("vol_conical_frustum() only accepts non-negative values") return ( From b5b1eb2f00f942955217ef6968fe8016476690ba Mon Sep 17 00:00:00 2001 From: Sagar Giri Date: Sun, 16 Oct 2022 22:45:25 +0900 Subject: [PATCH 0563/1543] Fix broken links by PR #7277 (#7319) --- bit_manipulation/count_1s_brian_kernighan_method.py | 2 +- machine_learning/sequential_minimum_optimization.py | 1 - physics/n_body_simulation.py | 4 ++-- 3 files changed, 3 insertions(+), 4 deletions(-) diff --git a/bit_manipulation/count_1s_brian_kernighan_method.py b/bit_manipulation/count_1s_brian_kernighan_method.py index d217af90b3d9..e6d6d65345c4 100644 --- a/bit_manipulation/count_1s_brian_kernighan_method.py +++ b/bit_manipulation/count_1s_brian_kernighan_method.py @@ -1,7 +1,7 @@ def get_1s_count(number: int) -> int: """ Count the number of set bits in a 32 bit integer using Brian Kernighan's way. - Ref - http://graphics.stanford.edu/~seander/bithacks.html#CountBitsSetKernighan + Ref - https://graphics.stanford.edu/~seander/bithacks.html#CountBitsSetKernighan >>> get_1s_count(25) 3 >>> get_1s_count(37) diff --git a/machine_learning/sequential_minimum_optimization.py b/machine_learning/sequential_minimum_optimization.py index 40adca7e0828..df5b03790804 100644 --- a/machine_learning/sequential_minimum_optimization.py +++ b/machine_learning/sequential_minimum_optimization.py @@ -28,7 +28,6 @@ Reference: https://www.microsoft.com/en-us/research/wp-content/uploads/2016/02/smo-book.pdf https://www.microsoft.com/en-us/research/wp-content/uploads/2016/02/tr-98-14.pdf - https://web.cs.iastate.edu/~honavar/smo-svm.pdf """ diff --git a/physics/n_body_simulation.py b/physics/n_body_simulation.py index f6efb0fec81c..2b701283f166 100644 --- a/physics/n_body_simulation.py +++ b/physics/n_body_simulation.py @@ -8,7 +8,7 @@ numerical divergences when a particle comes too close to another (and the force goes to infinity). (Description adapted from https://en.wikipedia.org/wiki/N-body_simulation ) -(See also https://www.shodor.org/refdesk/Resources/Algorithms/EulersMethod/ ) +(See also http://www.shodor.org/refdesk/Resources/Algorithms/EulersMethod/ ) """ @@ -258,7 +258,7 @@ def example_1() -> BodySystem: Example 1: figure-8 solution to the 3-body-problem This example can be seen as a test of the implementation: given the right initial conditions, the bodies should move in a figure-8. - (initial conditions taken from https://www.artcompsci.org/vol_1/v1_web/node56.html) + (initial conditions taken from http://www.artcompsci.org/vol_1/v1_web/node56.html) >>> body_system = example_1() >>> len(body_system) 3 From 6d20e2b750839d978873f6a89ce6d844ba3cc0b8 Mon Sep 17 00:00:00 2001 From: Caeden Perelli-Harris Date: Sun, 16 Oct 2022 20:50:48 +0100 Subject: [PATCH 0564/1543] Add `flake8-broken-line` to `pre-commit` (#7338) * ci: Add ``flake8-broken-line`` plugin to ``pre-commit`` * refactor: Fix errors from ``flake8-broken-line`` --- .pre-commit-config.yaml | 1 + project_euler/problem_008/sol1.py | 42 ++++++++++++++++--------------- project_euler/problem_008/sol3.py | 42 ++++++++++++++++--------------- 3 files changed, 45 insertions(+), 40 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 3455135653cf..39af0f3b4370 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -39,6 +39,7 @@ repos: additional_dependencies: - flake8-bugbear - flake8-builtins + - flake8-broken-line - flake8-comprehensions - pep8-naming diff --git a/project_euler/problem_008/sol1.py b/project_euler/problem_008/sol1.py index 796080127778..69dd1b4736c1 100644 --- a/project_euler/problem_008/sol1.py +++ b/project_euler/problem_008/sol1.py @@ -33,26 +33,28 @@ import sys -N = """73167176531330624919225119674426574742355349194934\ -96983520312774506326239578318016984801869478851843\ -85861560789112949495459501737958331952853208805511\ -12540698747158523863050715693290963295227443043557\ -66896648950445244523161731856403098711121722383113\ -62229893423380308135336276614282806444486645238749\ -30358907296290491560440772390713810515859307960866\ -70172427121883998797908792274921901699720888093776\ -65727333001053367881220235421809751254540594752243\ -52584907711670556013604839586446706324415722155397\ -53697817977846174064955149290862569321978468622482\ -83972241375657056057490261407972968652414535100474\ -82166370484403199890008895243450658541227588666881\ -16427171479924442928230863465674813919123162824586\ -17866458359124566529476545682848912883142607690042\ -24219022671055626321111109370544217506941658960408\ -07198403850962455444362981230987879927244284909188\ -84580156166097919133875499200524063689912560717606\ -05886116467109405077541002256983155200055935729725\ -71636269561882670428252483600823257530420752963450""" +N = ( + "73167176531330624919225119674426574742355349194934" + "96983520312774506326239578318016984801869478851843" + "85861560789112949495459501737958331952853208805511" + "12540698747158523863050715693290963295227443043557" + "66896648950445244523161731856403098711121722383113" + "62229893423380308135336276614282806444486645238749" + "30358907296290491560440772390713810515859307960866" + "70172427121883998797908792274921901699720888093776" + "65727333001053367881220235421809751254540594752243" + "52584907711670556013604839586446706324415722155397" + "53697817977846174064955149290862569321978468622482" + "83972241375657056057490261407972968652414535100474" + "82166370484403199890008895243450658541227588666881" + "16427171479924442928230863465674813919123162824586" + "17866458359124566529476545682848912883142607690042" + "24219022671055626321111109370544217506941658960408" + "07198403850962455444362981230987879927244284909188" + "84580156166097919133875499200524063689912560717606" + "05886116467109405077541002256983155200055935729725" + "71636269561882670428252483600823257530420752963450" +) def solution(n: str = N) -> int: diff --git a/project_euler/problem_008/sol3.py b/project_euler/problem_008/sol3.py index 4b99d0ea6e76..c6081aa05e2c 100644 --- a/project_euler/problem_008/sol3.py +++ b/project_euler/problem_008/sol3.py @@ -32,26 +32,28 @@ """ import sys -N = """73167176531330624919225119674426574742355349194934\ -96983520312774506326239578318016984801869478851843\ -85861560789112949495459501737958331952853208805511\ -12540698747158523863050715693290963295227443043557\ -66896648950445244523161731856403098711121722383113\ -62229893423380308135336276614282806444486645238749\ -30358907296290491560440772390713810515859307960866\ -70172427121883998797908792274921901699720888093776\ -65727333001053367881220235421809751254540594752243\ -52584907711670556013604839586446706324415722155397\ -53697817977846174064955149290862569321978468622482\ -83972241375657056057490261407972968652414535100474\ -82166370484403199890008895243450658541227588666881\ -16427171479924442928230863465674813919123162824586\ -17866458359124566529476545682848912883142607690042\ -24219022671055626321111109370544217506941658960408\ -07198403850962455444362981230987879927244284909188\ -84580156166097919133875499200524063689912560717606\ -05886116467109405077541002256983155200055935729725\ -71636269561882670428252483600823257530420752963450""" +N = ( + "73167176531330624919225119674426574742355349194934" + "96983520312774506326239578318016984801869478851843" + "85861560789112949495459501737958331952853208805511" + "12540698747158523863050715693290963295227443043557" + "66896648950445244523161731856403098711121722383113" + "62229893423380308135336276614282806444486645238749" + "30358907296290491560440772390713810515859307960866" + "70172427121883998797908792274921901699720888093776" + "65727333001053367881220235421809751254540594752243" + "52584907711670556013604839586446706324415722155397" + "53697817977846174064955149290862569321978468622482" + "83972241375657056057490261407972968652414535100474" + "82166370484403199890008895243450658541227588666881" + "16427171479924442928230863465674813919123162824586" + "17866458359124566529476545682848912883142607690042" + "24219022671055626321111109370544217506941658960408" + "07198403850962455444362981230987879927244284909188" + "84580156166097919133875499200524063689912560717606" + "05886116467109405077541002256983155200055935729725" + "71636269561882670428252483600823257530420752963450" +) def str_eval(s: str) -> int: From 7f6e0b656f6362e452b11d06acde50b8b81cb31a Mon Sep 17 00:00:00 2001 From: SudhanshuSuman <51868273+SudhanshuSuman@users.noreply.github.com> Date: Mon, 17 Oct 2022 02:11:28 +0530 Subject: [PATCH 0565/1543] Corrected the directory of Fractional Knapsack algorithm (#7086) * Moved fractional knapsack from 'dynamic_programming' to 'greedy_methods' * Updated DIRECTORY.md --- DIRECTORY.md | 4 +- .../fractional_knapsack.py | 0 .../fractional_knapsack_2.py | 106 +++++++++--------- 3 files changed, 55 insertions(+), 55 deletions(-) rename {dynamic_programming => greedy_methods}/fractional_knapsack.py (100%) rename {dynamic_programming => greedy_methods}/fractional_knapsack_2.py (96%) diff --git a/DIRECTORY.md b/DIRECTORY.md index 92bed9cb4c6e..fae9a5183f04 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -279,8 +279,6 @@ * [Fast Fibonacci](dynamic_programming/fast_fibonacci.py) * [Fibonacci](dynamic_programming/fibonacci.py) * [Floyd Warshall](dynamic_programming/floyd_warshall.py) - * [Fractional Knapsack](dynamic_programming/fractional_knapsack.py) - * [Fractional Knapsack 2](dynamic_programming/fractional_knapsack_2.py) * [Integer Partition](dynamic_programming/integer_partition.py) * [Iterating Through Submasks](dynamic_programming/iterating_through_submasks.py) * [Knapsack](dynamic_programming/knapsack.py) @@ -396,6 +394,8 @@ * [Test Min Spanning Tree Prim](graphs/tests/test_min_spanning_tree_prim.py) ## Greedy Methods + * [Fractional Knapsack](greedy_methods/fractional_knapsack.py) + * [Fractional Knapsack 2](greedy_methods/fractional_knapsack_2.py) * [Optimal Merge Pattern](greedy_methods/optimal_merge_pattern.py) ## Hashes diff --git a/dynamic_programming/fractional_knapsack.py b/greedy_methods/fractional_knapsack.py similarity index 100% rename from dynamic_programming/fractional_knapsack.py rename to greedy_methods/fractional_knapsack.py diff --git a/dynamic_programming/fractional_knapsack_2.py b/greedy_methods/fractional_knapsack_2.py similarity index 96% rename from dynamic_programming/fractional_knapsack_2.py rename to greedy_methods/fractional_knapsack_2.py index bd776723c146..6d9ed2ec3b6b 100644 --- a/dynamic_programming/fractional_knapsack_2.py +++ b/greedy_methods/fractional_knapsack_2.py @@ -1,53 +1,53 @@ -# https://en.wikipedia.org/wiki/Continuous_knapsack_problem -# https://www.guru99.com/fractional-knapsack-problem-greedy.html -# https://medium.com/walkinthecode/greedy-algorithm-fractional-knapsack-problem-9aba1daecc93 - -from __future__ import annotations - - -def fractional_knapsack( - value: list[int], weight: list[int], capacity: int -) -> tuple[float, list[float]]: - """ - >>> value = [1, 3, 5, 7, 9] - >>> weight = [0.9, 0.7, 0.5, 0.3, 0.1] - >>> fractional_knapsack(value, weight, 5) - (25, [1, 1, 1, 1, 1]) - >>> fractional_knapsack(value, weight, 15) - (25, [1, 1, 1, 1, 1]) - >>> fractional_knapsack(value, weight, 25) - (25, [1, 1, 1, 1, 1]) - >>> fractional_knapsack(value, weight, 26) - (25, [1, 1, 1, 1, 1]) - >>> fractional_knapsack(value, weight, -1) - (-90.0, [0, 0, 0, 0, -10.0]) - >>> fractional_knapsack([1, 3, 5, 7], weight, 30) - (16, [1, 1, 1, 1]) - >>> fractional_knapsack(value, [0.9, 0.7, 0.5, 0.3, 0.1], 30) - (25, [1, 1, 1, 1, 1]) - >>> fractional_knapsack([], [], 30) - (0, []) - """ - index = list(range(len(value))) - ratio = [v / w for v, w in zip(value, weight)] - index.sort(key=lambda i: ratio[i], reverse=True) - - max_value: float = 0 - fractions: list[float] = [0] * len(value) - for i in index: - if weight[i] <= capacity: - fractions[i] = 1 - max_value += value[i] - capacity -= weight[i] - else: - fractions[i] = capacity / weight[i] - max_value += value[i] * capacity / weight[i] - break - - return max_value, fractions - - -if __name__ == "__main__": - import doctest - - doctest.testmod() +# https://en.wikipedia.org/wiki/Continuous_knapsack_problem +# https://www.guru99.com/fractional-knapsack-problem-greedy.html +# https://medium.com/walkinthecode/greedy-algorithm-fractional-knapsack-problem-9aba1daecc93 + +from __future__ import annotations + + +def fractional_knapsack( + value: list[int], weight: list[int], capacity: int +) -> tuple[float, list[float]]: + """ + >>> value = [1, 3, 5, 7, 9] + >>> weight = [0.9, 0.7, 0.5, 0.3, 0.1] + >>> fractional_knapsack(value, weight, 5) + (25, [1, 1, 1, 1, 1]) + >>> fractional_knapsack(value, weight, 15) + (25, [1, 1, 1, 1, 1]) + >>> fractional_knapsack(value, weight, 25) + (25, [1, 1, 1, 1, 1]) + >>> fractional_knapsack(value, weight, 26) + (25, [1, 1, 1, 1, 1]) + >>> fractional_knapsack(value, weight, -1) + (-90.0, [0, 0, 0, 0, -10.0]) + >>> fractional_knapsack([1, 3, 5, 7], weight, 30) + (16, [1, 1, 1, 1]) + >>> fractional_knapsack(value, [0.9, 0.7, 0.5, 0.3, 0.1], 30) + (25, [1, 1, 1, 1, 1]) + >>> fractional_knapsack([], [], 30) + (0, []) + """ + index = list(range(len(value))) + ratio = [v / w for v, w in zip(value, weight)] + index.sort(key=lambda i: ratio[i], reverse=True) + + max_value: float = 0 + fractions: list[float] = [0] * len(value) + for i in index: + if weight[i] <= capacity: + fractions[i] = 1 + max_value += value[i] + capacity -= weight[i] + else: + fractions[i] = capacity / weight[i] + max_value += value[i] * capacity / weight[i] + break + + return max_value, fractions + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From f15cc2f01c2a4124ff6dc0843c728a546f9d9f79 Mon Sep 17 00:00:00 2001 From: Caeden Perelli-Harris Date: Sun, 16 Oct 2022 21:50:11 +0100 Subject: [PATCH 0566/1543] Follow Flake8 pep3101 and remove modulo formatting (#7339) * ci: Add ``flake8-pep3101`` plugin to ``pre-commit`` * refactor: Remove all modulo string formatting * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * refactor: Remove ``flake8-pep3101`` plugin from ``pre-commit`` * revert: Revert to modulo formatting * refactor: Use f-string instead of `join` Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- ciphers/elgamal_key_generator.py | 9 +++------ ciphers/rsa_key_generator.py | 3 +-- dynamic_programming/edit_distance.py | 4 ++-- genetic_algorithm/basic_string.py | 4 ++-- graphs/minimum_spanning_tree_boruvka.py | 2 +- machine_learning/linear_regression.py | 2 +- matrix/sherman_morrison.py | 6 +++--- neural_network/back_propagation_neural_network.py | 2 +- neural_network/convolution_neural_network.py | 2 +- 9 files changed, 15 insertions(+), 19 deletions(-) diff --git a/ciphers/elgamal_key_generator.py b/ciphers/elgamal_key_generator.py index 4d72128aed52..17ba55c0d013 100644 --- a/ciphers/elgamal_key_generator.py +++ b/ciphers/elgamal_key_generator.py @@ -41,22 +41,19 @@ def make_key_files(name: str, key_size: int) -> None: if os.path.exists(f"{name}_pubkey.txt") or os.path.exists(f"{name}_privkey.txt"): print("\nWARNING:") print( - '"%s_pubkey.txt" or "%s_privkey.txt" already exists. \n' + f'"{name}_pubkey.txt" or "{name}_privkey.txt" already exists. \n' "Use a different name or delete these files and re-run this program." - % (name, name) ) sys.exit() public_key, private_key = generate_key(key_size) print(f"\nWriting public key to file {name}_pubkey.txt...") with open(f"{name}_pubkey.txt", "w") as fo: - fo.write( - "%d,%d,%d,%d" % (public_key[0], public_key[1], public_key[2], public_key[3]) - ) + fo.write(f"{public_key[0]},{public_key[1]},{public_key[2]},{public_key[3]}") print(f"Writing private key to file {name}_privkey.txt...") with open(f"{name}_privkey.txt", "w") as fo: - fo.write("%d,%d" % (private_key[0], private_key[1])) + fo.write(f"{private_key[0]},{private_key[1]}") def main() -> None: diff --git a/ciphers/rsa_key_generator.py b/ciphers/rsa_key_generator.py index f64bc7dd0557..2573ed01387b 100644 --- a/ciphers/rsa_key_generator.py +++ b/ciphers/rsa_key_generator.py @@ -37,9 +37,8 @@ def make_key_files(name: str, key_size: int) -> None: if os.path.exists(f"{name}_pubkey.txt") or os.path.exists(f"{name}_privkey.txt"): print("\nWARNING:") print( - '"%s_pubkey.txt" or "%s_privkey.txt" already exists. \n' + f'"{name}_pubkey.txt" or "{name}_privkey.txt" already exists. \n' "Use a different name or delete these files and re-run this program." - % (name, name) ) sys.exit() diff --git a/dynamic_programming/edit_distance.py b/dynamic_programming/edit_distance.py index d63e559e30da..fe23431a7ea6 100644 --- a/dynamic_programming/edit_distance.py +++ b/dynamic_programming/edit_distance.py @@ -99,7 +99,7 @@ def min_distance_bottom_up(word1: str, word2: str) -> int: S2 = input("Enter the second string: ").strip() print() - print("The minimum Edit Distance is: %d" % (solver.solve(S1, S2))) - print("The minimum Edit Distance is: %d" % (min_distance_bottom_up(S1, S2))) + print(f"The minimum Edit Distance is: {solver.solve(S1, S2)}") + print(f"The minimum Edit Distance is: {min_distance_bottom_up(S1, S2)}") print() print("*************** End of Testing Edit Distance DP Algorithm ***************") diff --git a/genetic_algorithm/basic_string.py b/genetic_algorithm/basic_string.py index bd7d8026866c..d2d305189983 100644 --- a/genetic_algorithm/basic_string.py +++ b/genetic_algorithm/basic_string.py @@ -172,7 +172,7 @@ def mutate(child: str) -> str: " ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm" "nopqrstuvwxyz.,;!?+-*#@^'èéòà€ù=)(&%$£/\\" ) + generation, population, target = basic(target_str, genes_list) print( - "\nGeneration: %s\nTotal Population: %s\nTarget: %s" - % basic(target_str, genes_list) + f"\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}" ) diff --git a/graphs/minimum_spanning_tree_boruvka.py b/graphs/minimum_spanning_tree_boruvka.py index 32548b2ecb6c..6c72615cc729 100644 --- a/graphs/minimum_spanning_tree_boruvka.py +++ b/graphs/minimum_spanning_tree_boruvka.py @@ -63,7 +63,7 @@ def __str__(self): for tail in self.adjacency: for head in self.adjacency[tail]: weight = self.adjacency[head][tail] - string += "%d -> %d == %d\n" % (head, tail, weight) + string += f"{head} -> {tail} == {weight}\n" return string.rstrip("\n") def get_edges(self): diff --git a/machine_learning/linear_regression.py b/machine_learning/linear_regression.py index 85fdfb0005ac..92ab91c01b95 100644 --- a/machine_learning/linear_regression.py +++ b/machine_learning/linear_regression.py @@ -82,7 +82,7 @@ def run_linear_regression(data_x, data_y): for i in range(0, iterations): theta = run_steep_gradient_descent(data_x, data_y, len_data, alpha, theta) error = sum_of_square_error(data_x, data_y, len_data, theta) - print("At Iteration %d - Error is %.5f " % (i + 1, error)) + print(f"At Iteration {i + 1} - Error is {error:.5f}") return theta diff --git a/matrix/sherman_morrison.py b/matrix/sherman_morrison.py index 29c9b3381b55..39eddfed81f3 100644 --- a/matrix/sherman_morrison.py +++ b/matrix/sherman_morrison.py @@ -31,14 +31,14 @@ def __str__(self) -> str: """ # Prefix - s = "Matrix consist of %d rows and %d columns\n" % (self.row, self.column) + s = f"Matrix consist of {self.row} rows and {self.column} columns\n" # Make string identifier max_element_length = 0 for row_vector in self.array: for obj in row_vector: max_element_length = max(max_element_length, len(str(obj))) - string_format_identifier = "%%%ds" % (max_element_length,) + string_format_identifier = f"%{max_element_length}s" # Make string and return def single_line(row_vector: list[float]) -> str: @@ -252,7 +252,7 @@ def test1() -> None: v[0, 0], v[1, 0], v[2, 0] = 4, -2, 5 print(f"u is {u}") print(f"v is {v}") - print("uv^T is %s" % (u * v.transpose())) + print(f"uv^T is {u * v.transpose()}") # Sherman Morrison print(f"(a + uv^T)^(-1) is {ainv.sherman_morrison(u, v)}") diff --git a/neural_network/back_propagation_neural_network.py b/neural_network/back_propagation_neural_network.py index 23b818b0f3cf..cb47b829010c 100644 --- a/neural_network/back_propagation_neural_network.py +++ b/neural_network/back_propagation_neural_network.py @@ -117,7 +117,7 @@ def build(self): def summary(self): for i, layer in enumerate(self.layers[:]): - print("------- layer %d -------" % i) + print(f"------- layer {i} -------") print("weight.shape ", np.shape(layer.weight)) print("bias.shape ", np.shape(layer.bias)) diff --git a/neural_network/convolution_neural_network.py b/neural_network/convolution_neural_network.py index 9dfb6d091412..bd0550212157 100644 --- a/neural_network/convolution_neural_network.py +++ b/neural_network/convolution_neural_network.py @@ -219,7 +219,7 @@ def train( mse = 10000 while rp < n_repeat and mse >= error_accuracy: error_count = 0 - print("-------------Learning Time %d--------------" % rp) + print(f"-------------Learning Time {rp}--------------") for p in range(len(datas_train)): # print('------------Learning Image: %d--------------'%p) data_train = np.asmatrix(datas_train[p]) From a34b756fd40e5cdfb69abc06dcd42f5f1b5fa21e Mon Sep 17 00:00:00 2001 From: Caeden Perelli-Harris Date: Sun, 16 Oct 2022 21:51:40 +0100 Subject: [PATCH 0567/1543] ci: Add ``yesqa`` (flake8-plugin) to ``pre-commit`` (#7340) --- .pre-commit-config.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 39af0f3b4370..aea82d12cd13 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -42,6 +42,7 @@ repos: - flake8-broken-line - flake8-comprehensions - pep8-naming + - yesqa - repo: https://github.com/pre-commit/mirrors-mypy rev: v0.982 From 0c7c5fa7b0161a7433467240155356c93ae106b8 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 17 Oct 2022 21:59:25 +0200 Subject: [PATCH 0568/1543] [pre-commit.ci] pre-commit autoupdate (#7387) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * [pre-commit.ci] pre-commit autoupdate updates: - [github.com/asottile/pyupgrade: v3.0.0 → v3.1.0](https://github.com/asottile/pyupgrade/compare/v3.0.0...v3.1.0) - [github.com/codespell-project/codespell: v2.2.1 → v2.2.2](https://github.com/codespell-project/codespell/compare/v2.2.1...v2.2.2) * updating DIRECTORY.md * Fix typo discovered by codespell * Fix typo discovered by codespell * Update .pre-commit-config.yaml * Update .pre-commit-config.yaml * Update .pre-commit-config.yaml Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> Co-authored-by: Christian Clauss --- .pre-commit-config.yaml | 6 +++--- DIRECTORY.md | 2 ++ .../local_weighted_learning/local_weighted_learning.md | 2 +- maths/is_square_free.py | 2 +- 4 files changed, 7 insertions(+), 5 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index aea82d12cd13..5bdda50be0c4 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -26,7 +26,7 @@ repos: - --profile=black - repo: https://github.com/asottile/pyupgrade - rev: v3.0.0 + rev: v3.1.0 hooks: - id: pyupgrade args: @@ -55,14 +55,14 @@ repos: additional_dependencies: [types-requests] - repo: https://github.com/codespell-project/codespell - rev: v2.2.1 + rev: v2.2.2 hooks: - id: codespell args: - --ignore-words-list=ans,crate,damon,fo,followings,hist,iff,mater,secant,som,sur,tim,zar - - --skip="./.*,./strings/dictionary.txt,./strings/words.txt,./project_euler/problem_022/p022_names.txt" exclude: | (?x)^( + ciphers/prehistoric_men.txt | strings/dictionary.txt | strings/words.txt | project_euler/problem_022/p022_names.txt diff --git a/DIRECTORY.md b/DIRECTORY.md index fae9a5183f04..94ec42832e41 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -642,6 +642,7 @@ * [Tower Of Hanoi](other/tower_of_hanoi.py) ## Physics + * [Casimir Effect](physics/casimir_effect.py) * [Horizontal Projectile Motion](physics/horizontal_projectile_motion.py) * [Lorentz Transformation Four Vector](physics/lorentz_transformation_four_vector.py) * [N Body Simulation](physics/n_body_simulation.py) @@ -928,6 +929,7 @@ * [Deutsch Jozsa](quantum/deutsch_jozsa.py) * [Half Adder](quantum/half_adder.py) * [Not Gate](quantum/not_gate.py) + * [Q Full Adder](quantum/q_full_adder.py) * [Quantum Entanglement](quantum/quantum_entanglement.py) * [Ripple Adder Classic](quantum/ripple_adder_classic.py) * [Single Qubit Measure](quantum/single_qubit_measure.py) diff --git a/machine_learning/local_weighted_learning/local_weighted_learning.md b/machine_learning/local_weighted_learning/local_weighted_learning.md index 5c7895e75104..ef4dbc958600 100644 --- a/machine_learning/local_weighted_learning/local_weighted_learning.md +++ b/machine_learning/local_weighted_learning/local_weighted_learning.md @@ -29,7 +29,7 @@ This training phase is possible when data points are linear, but there again com So, here comes the role of non-parametric algorithm which doesn't compute predictions based on fixed set of params. Rather parameters $\theta$ are computed individually for each query point/data point x.

-While Computing $\theta$ , a higher "preferance" is given to points in the vicinity of x than points farther from x. +While Computing $\theta$ , a higher preference is given to points in the vicinity of x than points farther from x. Cost Function J($\theta$) = $\sum_{i=1}^m$ $w^i$ (($\theta$)$^T$ $x^i$ - $y^i$)$^2$ diff --git a/maths/is_square_free.py b/maths/is_square_free.py index 8d83d95ffb67..4134398d258b 100644 --- a/maths/is_square_free.py +++ b/maths/is_square_free.py @@ -15,7 +15,7 @@ def is_square_free(factors: list[int]) -> bool: False These are wrong but should return some value - it simply checks for repition in the numbers. + it simply checks for repetition in the numbers. >>> is_square_free([1, 3, 4, 'sd', 0.0]) True From 3448ae5cec868d4a03349cb952765e9abff41243 Mon Sep 17 00:00:00 2001 From: Shubham Kondekar <40213815+kondekarshubham123@users.noreply.github.com> Date: Tue, 18 Oct 2022 02:00:01 +0530 Subject: [PATCH 0569/1543] [Binary Tree] Different views of binary tree added (#6965) * Different views of binary tree added * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * mypy errors resolved * doc test for remaining functions * Flake8 comments resolved * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Example moved in if block * doctest cases added * Cases from if block removed * Update data_structures/binary_tree/diff_views_of_binary_tree.py Co-authored-by: Christian Clauss * Update data_structures/binary_tree/diff_views_of_binary_tree.py Co-authored-by: Christian Clauss * PR Comments resolved * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * flake8 warning resolved * Changes revered * flake8 issue resolved * Put the diagrams just above the doctests * Update diff_views_of_binary_tree.py * Update diff_views_of_binary_tree.py * I love mypy * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- .../binary_tree/diff_views_of_binary_tree.py | 210 ++++++++++++++++++ 1 file changed, 210 insertions(+) create mode 100644 data_structures/binary_tree/diff_views_of_binary_tree.py diff --git a/data_structures/binary_tree/diff_views_of_binary_tree.py b/data_structures/binary_tree/diff_views_of_binary_tree.py new file mode 100644 index 000000000000..3198d8065918 --- /dev/null +++ b/data_structures/binary_tree/diff_views_of_binary_tree.py @@ -0,0 +1,210 @@ +r""" +Problem: Given root of a binary tree, return the: +1. binary-tree-right-side-view +2. binary-tree-left-side-view +3. binary-tree-top-side-view +4. binary-tree-bottom-side-view +""" + +from __future__ import annotations + +from collections import defaultdict +from dataclasses import dataclass + + +@dataclass +class TreeNode: + val: int + left: TreeNode | None = None + right: TreeNode | None = None + + +def make_tree() -> TreeNode: + """ + >>> make_tree().val + 3 + """ + return TreeNode(3, TreeNode(9), TreeNode(20, TreeNode(15), TreeNode(7))) + + +def binary_tree_right_side_view(root: TreeNode) -> list[int]: + r""" + Function returns the right side view of binary tree. + + 3 <- 3 + / \ + 9 20 <- 20 + / \ + 15 7 <- 7 + + >>> binary_tree_right_side_view(make_tree()) + [3, 20, 7] + >>> binary_tree_right_side_view(None) + [] + """ + + def depth_first_search( + root: TreeNode | None, depth: int, right_view: list[int] + ) -> None: + """ + A depth first search preorder traversal to append the values at + right side of tree. + """ + if not root: + return + + if depth == len(right_view): + right_view.append(root.val) + + depth_first_search(root.right, depth + 1, right_view) + depth_first_search(root.left, depth + 1, right_view) + + right_view: list = [] + if not root: + return right_view + + depth_first_search(root, 0, right_view) + return right_view + + +def binary_tree_left_side_view(root: TreeNode) -> list[int]: + r""" + Function returns the left side view of binary tree. + + 3 -> 3 + / \ + 9 -> 9 20 + / \ + 15 -> 15 7 + + >>> binary_tree_left_side_view(make_tree()) + [3, 9, 15] + >>> binary_tree_left_side_view(None) + [] + """ + + def depth_first_search( + root: TreeNode | None, depth: int, left_view: list[int] + ) -> None: + """ + A depth first search preorder traversal to append the values + at left side of tree. + """ + if not root: + return + + if depth == len(left_view): + left_view.append(root.val) + + depth_first_search(root.left, depth + 1, left_view) + depth_first_search(root.right, depth + 1, left_view) + + left_view: list = [] + if not root: + return left_view + + depth_first_search(root, 0, left_view) + return left_view + + +def binary_tree_top_side_view(root: TreeNode) -> list[int]: + r""" + Function returns the top side view of binary tree. + + 9 3 20 7 + ⬇ ⬇ ⬇ ⬇ + + 3 + / \ + 9 20 + / \ + 15 7 + + >>> binary_tree_top_side_view(make_tree()) + [9, 3, 20, 7] + >>> binary_tree_top_side_view(None) + [] + """ + + def breadth_first_search(root: TreeNode, top_view: list[int]) -> None: + """ + A breadth first search traversal with defaultdict ds to append + the values of tree from top view + """ + queue = [(root, 0)] + lookup = defaultdict(list) + + while queue: + first = queue.pop(0) + node, hd = first + + lookup[hd].append(node.val) + + if node.left: + queue.append((node.left, hd - 1)) + if node.right: + queue.append((node.right, hd + 1)) + + for pair in sorted(lookup.items(), key=lambda each: each[0]): + top_view.append(pair[1][0]) + + top_view: list = [] + if not root: + return top_view + + breadth_first_search(root, top_view) + return top_view + + +def binary_tree_bottom_side_view(root: TreeNode) -> list[int]: + r""" + Function returns the bottom side view of binary tree + + 3 + / \ + 9 20 + / \ + 15 7 + ↑ ↑ ↑ ↑ + 9 15 20 7 + + >>> binary_tree_bottom_side_view(make_tree()) + [9, 15, 20, 7] + >>> binary_tree_bottom_side_view(None) + [] + """ + from collections import defaultdict + + def breadth_first_search(root: TreeNode, bottom_view: list[int]) -> None: + """ + A breadth first search traversal with defaultdict ds to append + the values of tree from bottom view + """ + queue = [(root, 0)] + lookup = defaultdict(list) + + while queue: + first = queue.pop(0) + node, hd = first + lookup[hd].append(node.val) + + if node.left: + queue.append((node.left, hd - 1)) + if node.right: + queue.append((node.right, hd + 1)) + + for pair in sorted(lookup.items(), key=lambda each: each[0]): + bottom_view.append(pair[1][-1]) + + bottom_view: list = [] + if not root: + return bottom_view + + breadth_first_search(root, bottom_view) + return bottom_view + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From 49cd46acea37350c8c22488316f8cf3f5ea88925 Mon Sep 17 00:00:00 2001 From: Tianyi Zheng Date: Tue, 18 Oct 2022 02:09:41 -0400 Subject: [PATCH 0570/1543] Update convolve function namespace (#7390) --- computer_vision/horn_schunck.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/computer_vision/horn_schunck.py b/computer_vision/horn_schunck.py index 2a153d06ddae..b63e0268294c 100644 --- a/computer_vision/horn_schunck.py +++ b/computer_vision/horn_schunck.py @@ -12,7 +12,7 @@ from typing import SupportsIndex import numpy as np -from scipy.ndimage.filters import convolve +from scipy.ndimage import convolve def warp( From 6d1e009f35dd172ef51d484d0310919cdbab189d Mon Sep 17 00:00:00 2001 From: Tianyi Zheng Date: Tue, 18 Oct 2022 05:57:03 -0400 Subject: [PATCH 0571/1543] Remove depreciated np.float (#7394) --- machine_learning/decision_tree.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/machine_learning/decision_tree.py b/machine_learning/decision_tree.py index 4a86e5322a27..7cd1b02c4181 100644 --- a/machine_learning/decision_tree.py +++ b/machine_learning/decision_tree.py @@ -24,13 +24,13 @@ def mean_squared_error(self, labels, prediction): estimate the labels >>> tester = DecisionTree() >>> test_labels = np.array([1,2,3,4,5,6,7,8,9,10]) - >>> test_prediction = np.float(6) + >>> test_prediction = float(6) >>> tester.mean_squared_error(test_labels, test_prediction) == ( ... TestDecisionTree.helper_mean_squared_error_test(test_labels, ... test_prediction)) True >>> test_labels = np.array([1,2,3]) - >>> test_prediction = np.float(2) + >>> test_prediction = float(2) >>> tester.mean_squared_error(test_labels, test_prediction) == ( ... TestDecisionTree.helper_mean_squared_error_test(test_labels, ... test_prediction)) @@ -145,11 +145,11 @@ def helper_mean_squared_error_test(labels, prediction): @param prediction: a floating point value return value: helper_mean_squared_error_test calculates the mean squared error """ - squared_error_sum = np.float(0) + squared_error_sum = float(0) for label in labels: squared_error_sum += (label - prediction) ** 2 - return np.float(squared_error_sum / labels.size) + return float(squared_error_sum / labels.size) def main(): From 2ca695b0fe28519d3449106bff9f9004d93a0b3f Mon Sep 17 00:00:00 2001 From: Shubham Kondekar <40213815+kondekarshubham123@users.noreply.github.com> Date: Tue, 18 Oct 2022 23:35:18 +0530 Subject: [PATCH 0572/1543] [Matrix] Max area of island problem solved DFS algorithm (#6918) * Maximum area of island program added * Update matrix/max_area_of_island.py Co-authored-by: Caeden * Update matrix/max_area_of_island.py Co-authored-by: Caeden * Update matrix/max_area_of_island.py Co-authored-by: Caeden * Review's comment resolved * max area of island * PR Comments resolved * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Test case fail fix * Grammer correction * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * flake8 issue resolved * some variable name fix * Update matrix/max_area_of_island.py Co-authored-by: Caeden Perelli-Harris * Update matrix/max_area_of_island.py Co-authored-by: Caeden Perelli-Harris * PR, comments resolved * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update matrix/max_area_of_island.py Co-authored-by: Christian Clauss * Update matrix/max_area_of_island.py Co-authored-by: Christian Clauss * PR, comments resolved * Update max_area_of_island.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Typo Co-authored-by: Caeden Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- matrix/max_area_of_island.py | 112 +++++++++++++++++++++++++++++++++++ 1 file changed, 112 insertions(+) create mode 100644 matrix/max_area_of_island.py diff --git a/matrix/max_area_of_island.py b/matrix/max_area_of_island.py new file mode 100644 index 000000000000..40950c303795 --- /dev/null +++ b/matrix/max_area_of_island.py @@ -0,0 +1,112 @@ +""" +Given an two dimensional binary matrix grid. An island is a group of 1's (representing +land) connected 4-directionally (horizontal or vertical.) You may assume all four edges +of the grid are surrounded by water. The area of an island is the number of cells with +a value 1 in the island. Return the maximum area of an island in a grid. If there is no +island, return 0. +""" + +matrix = [ + [0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0], + [0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 0], + [0, 1, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0], +] + + +def is_safe(row: int, col: int, rows: int, cols: int) -> bool: + """ + Checking whether coordinate (row, col) is valid or not. + + >>> is_safe(0, 0, 5, 5) + True + >>> is_safe(-1,-1, 5, 5) + False + """ + return 0 <= row < rows and 0 <= col < cols + + +def depth_first_search(row: int, col: int, seen: set, mat: list[list[int]]) -> int: + """ + Returns the current area of the island + + >>> depth_first_search(0, 0, set(), matrix) + 0 + """ + rows = len(mat) + cols = len(mat[0]) + if is_safe(row, col, rows, cols) and (row, col) not in seen and mat[row][col] == 1: + seen.add((row, col)) + return ( + 1 + + depth_first_search(row + 1, col, seen, mat) + + depth_first_search(row - 1, col, seen, mat) + + depth_first_search(row, col + 1, seen, mat) + + depth_first_search(row, col - 1, seen, mat) + ) + else: + return 0 + + +def find_max_area(mat: list[list[int]]) -> int: + """ + Finds the area of all islands and returns the maximum area. + + >>> find_max_area(matrix) + 6 + """ + seen: set = set() + + max_area = 0 + for row, line in enumerate(mat): + for col, item in enumerate(line): + if item == 1 and (row, col) not in seen: + # Maximizing the area + max_area = max(max_area, depth_first_search(row, col, seen, mat)) + return max_area + + +if __name__ == "__main__": + import doctest + + print(find_max_area(matrix)) # Output -> 6 + + """ + Explanation: + We are allowed to move in four directions (horizontal or vertical) so the possible + in a matrix if we are at x and y position the possible moving are + + Directions are [(x, y+1), (x, y-1), (x+1, y), (x-1, y)] but we need to take care of + boundary cases as well which are x and y can not be smaller than 0 and greater than + the number of rows and columns respectively. + + Visualization + mat = [ + [0,0,A,0,0,0,0,B,0,0,0,0,0], + [0,0,0,0,0,0,0,B,B,B,0,0,0], + [0,C,C,0,D,0,0,0,0,0,0,0,0], + [0,C,0,0,D,D,0,0,E,0,E,0,0], + [0,C,0,0,D,D,0,0,E,E,E,0,0], + [0,0,0,0,0,0,0,0,0,0,E,0,0], + [0,0,0,0,0,0,0,F,F,F,0,0,0], + [0,0,0,0,0,0,0,F,F,0,0,0,0] + ] + + For visualization, I have defined the connected island with letters + by observation, we can see that + A island is of area 1 + B island is of area 4 + C island is of area 4 + D island is of area 5 + E island is of area 6 and + F island is of area 5 + + it has 6 unique islands of mentioned areas + and the maximum of all of them is 6 so we return 6. + """ + + doctest.testmod() From 5bfcab1aa4392e4e3f43927a7fbd8bf6c6815c88 Mon Sep 17 00:00:00 2001 From: Manish Kumar <73126278+ManishKumar219@users.noreply.github.com> Date: Wed, 19 Oct 2022 00:52:38 +0530 Subject: [PATCH 0573/1543] Create minmax.py (#7409) * Create minmax.py * Update minmax.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Co-authored-by: Christian Clauss Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- backtracking/minmax.py | 69 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 69 insertions(+) create mode 100644 backtracking/minmax.py diff --git a/backtracking/minmax.py b/backtracking/minmax.py new file mode 100644 index 000000000000..9b87183cfdb7 --- /dev/null +++ b/backtracking/minmax.py @@ -0,0 +1,69 @@ +""" +Minimax helps to achieve maximum score in a game by checking all possible moves. + +""" +from __future__ import annotations + +import math + + +def minimax( + depth: int, node_index: int, is_max: bool, scores: list[int], height: float +) -> int: + """ + depth is current depth in game tree. + node_index is index of current node in scores[]. + scores[] contains the leaves of game tree. + height is maximum height of game tree. + + >>> scores = [90, 23, 6, 33, 21, 65, 123, 34423] + >>> height = math.log(len(scores), 2) + >>> minimax(0, 0, True, scores, height) + 65 + >>> minimax(-1, 0, True, scores, height) + Traceback (most recent call last): + ... + ValueError: Depth cannot be less than 0 + >>> minimax(0, 0, True, [], 2) + Traceback (most recent call last): + ... + ValueError: Scores cannot be empty + >>> scores = [3, 5, 2, 9, 12, 5, 23, 23] + >>> height = math.log(len(scores), 2) + >>> minimax(0, 0, True, scores, height) + 12 + """ + + if depth < 0: + raise ValueError("Depth cannot be less than 0") + + if not scores: + raise ValueError("Scores cannot be empty") + + if depth == height: + return scores[node_index] + + return ( + max( + minimax(depth + 1, node_index * 2, False, scores, height), + minimax(depth + 1, node_index * 2 + 1, False, scores, height), + ) + if is_max + else min( + minimax(depth + 1, node_index * 2, True, scores, height), + minimax(depth + 1, node_index * 2 + 1, True, scores, height), + ) + ) + + +def main() -> None: + scores = [90, 23, 6, 33, 21, 65, 123, 34423] + height = math.log(len(scores), 2) + print(f"Optimal value : {minimax(0, 0, True, scores, height)}") + + +if __name__ == "__main__": + import doctest + + doctest.testmod() + main() From b90ec303989b864996e31e021863f8b2c8852054 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Nadirhan=20=C5=9Eahin?= Date: Tue, 18 Oct 2022 22:55:43 +0300 Subject: [PATCH 0574/1543] Create combination_sum.py (#7403) * Create combination_sum.py * Update DIRECTORY.md * Adds doctests Co-authored-by: Christian Clauss * Update combination_sum.py * Update combination_sum.py Co-authored-by: Christian Clauss --- DIRECTORY.md | 1 + backtracking/combination_sum.py | 66 +++++++++++++++++++++++++++++++++ 2 files changed, 67 insertions(+) create mode 100644 backtracking/combination_sum.py diff --git a/DIRECTORY.md b/DIRECTORY.md index 94ec42832e41..c1fad8d9d794 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -23,6 +23,7 @@ * [All Permutations](backtracking/all_permutations.py) * [All Subsequences](backtracking/all_subsequences.py) * [Coloring](backtracking/coloring.py) + * [Combination Sum](backtracking/combination_sum.py) * [Hamiltonian Cycle](backtracking/hamiltonian_cycle.py) * [Knight Tour](backtracking/knight_tour.py) * [Minimax](backtracking/minimax.py) diff --git a/backtracking/combination_sum.py b/backtracking/combination_sum.py new file mode 100644 index 000000000000..f555adb751d0 --- /dev/null +++ b/backtracking/combination_sum.py @@ -0,0 +1,66 @@ +""" +In the Combination Sum problem, we are given a list consisting of distinct integers. +We need to find all the combinations whose sum equals to target given. +We can use an element more than one. + +Time complexity(Average Case): O(n!) + +Constraints: +1 <= candidates.length <= 30 +2 <= candidates[i] <= 40 +All elements of candidates are distinct. +1 <= target <= 40 +""" + + +def backtrack( + candidates: list, path: list, answer: list, target: int, previous_index: int +) -> None: + """ + A recursive function that searches for possible combinations. Backtracks in case + of a bigger current combination value than the target value. + + Parameters + ---------- + previous_index: Last index from the previous search + target: The value we need to obtain by summing our integers in the path list. + answer: A list of possible combinations + path: Current combination + candidates: A list of integers we can use. + """ + if target == 0: + answer.append(path.copy()) + else: + for index in range(previous_index, len(candidates)): + if target >= candidates[index]: + path.append(candidates[index]) + backtrack(candidates, path, answer, target - candidates[index], index) + path.pop(len(path) - 1) + + +def combination_sum(candidates: list, target: int) -> list: + """ + >>> combination_sum([2, 3, 5], 8) + [[2, 2, 2, 2], [2, 3, 3], [3, 5]] + >>> combination_sum([2, 3, 6, 7], 7) + [[2, 2, 3], [7]] + >>> combination_sum([-8, 2.3, 0], 1) + Traceback (most recent call last): + ... + RecursionError: maximum recursion depth exceeded in comparison + """ + path = [] # type: list[int] + answer = [] # type: list[int] + backtrack(candidates, path, answer, target, 0) + return answer + + +def main() -> None: + print(combination_sum([-8, 2.3, 0], 1)) + + +if __name__ == "__main__": + import doctest + + doctest.testmod() + main() From 80ff25ed38e62bcf2e51a4a51bf7bf8f9b03ea11 Mon Sep 17 00:00:00 2001 From: Sai Ganesh Manda <89340753+mvsg2@users.noreply.github.com> Date: Wed, 19 Oct 2022 17:13:26 +0530 Subject: [PATCH 0575/1543] Update gaussian_naive_bayes.py (#7406) * Update gaussian_naive_bayes.py Just adding in a final metric of accuracy to declare... * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- machine_learning/gaussian_naive_bayes.py | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/machine_learning/gaussian_naive_bayes.py b/machine_learning/gaussian_naive_bayes.py index 77e7326626c4..7e9a8d7f6dcf 100644 --- a/machine_learning/gaussian_naive_bayes.py +++ b/machine_learning/gaussian_naive_bayes.py @@ -1,7 +1,9 @@ # Gaussian Naive Bayes Example +import time + from matplotlib import pyplot as plt from sklearn.datasets import load_iris -from sklearn.metrics import plot_confusion_matrix +from sklearn.metrics import accuracy_score, plot_confusion_matrix from sklearn.model_selection import train_test_split from sklearn.naive_bayes import GaussianNB @@ -25,7 +27,9 @@ def main(): # Gaussian Naive Bayes nb_model = GaussianNB() - nb_model.fit(x_train, y_train) + time.sleep(2.9) + model_fit = nb_model.fit(x_train, y_train) + y_pred = model_fit.predict(x_test) # Predictions on the test set # Display Confusion Matrix plot_confusion_matrix( @@ -33,12 +37,16 @@ def main(): x_test, y_test, display_labels=iris["target_names"], - cmap="Blues", + cmap="Blues", # although, Greys_r has a better contrast... normalize="true", ) plt.title("Normalized Confusion Matrix - IRIS Dataset") plt.show() + time.sleep(1.8) + final_accuracy = 100 * accuracy_score(y_true=y_test, y_pred=y_pred) + print(f"The overall accuracy of the model is: {round(final_accuracy, 2)}%") + if __name__ == "__main__": main() From b8281d79ef6fdfa11bdd697be3f4a1ef7824cf7f Mon Sep 17 00:00:00 2001 From: Kuldeep Borkar <74557588+KuldeepBorkar@users.noreply.github.com> Date: Wed, 19 Oct 2022 17:16:56 +0530 Subject: [PATCH 0576/1543] Fixed a typo of 'a' and 'an' and used f string in print statement (#7398) --- boolean_algebra/norgate.py | 20 +++++++++++--------- 1 file changed, 11 insertions(+), 9 deletions(-) diff --git a/boolean_algebra/norgate.py b/boolean_algebra/norgate.py index 1c341e8a707b..2c27b80afdbe 100644 --- a/boolean_algebra/norgate.py +++ b/boolean_algebra/norgate.py @@ -1,13 +1,15 @@ -""" A NOR Gate is a logic gate in boolean algebra which results to false(0) - if any of the input is 1, and True(1) if both the inputs are 0. - Following is the truth table of an NOR Gate: +""" +A NOR Gate is a logic gate in boolean algebra which results to false(0) +if any of the input is 1, and True(1) if both the inputs are 0. +Following is the truth table of a NOR Gate: | Input 1 | Input 2 | Output | | 0 | 0 | 1 | | 0 | 1 | 0 | | 1 | 0 | 0 | | 1 | 1 | 0 | + +Following is the code implementation of the NOR Gate """ -"""Following is the code implementation of the NOR Gate""" def nor_gate(input_1: int, input_2: int) -> int: @@ -30,11 +32,11 @@ def nor_gate(input_1: int, input_2: int) -> int: def main() -> None: print("Truth Table of NOR Gate:") - print("| Input 1 |", " Input 2 |", " Output |") - print("| 0 |", " 0 | ", nor_gate(0, 0), " |") - print("| 0 |", " 1 | ", nor_gate(0, 1), " |") - print("| 1 |", " 0 | ", nor_gate(1, 0), " |") - print("| 1 |", " 1 | ", nor_gate(1, 1), " |") + print("| Input 1 | Input 2 | Output |") + print(f"| 0 | 0 | {nor_gate(0, 0)} |") + print(f"| 0 | 1 | {nor_gate(0, 1)} |") + print(f"| 1 | 0 | {nor_gate(1, 0)} |") + print(f"| 1 | 1 | {nor_gate(1, 1)} |") if __name__ == "__main__": From 50da472ddcdc2d79d1ad325ec05cda3558802fda Mon Sep 17 00:00:00 2001 From: Kuldeep Borkar <74557588+KuldeepBorkar@users.noreply.github.com> Date: Wed, 19 Oct 2022 22:48:33 +0530 Subject: [PATCH 0577/1543] Implemented Gelu Function (#7368) * Implemented Gelu Function * Renamed file and added more description to function * Extended the name GELU * Update gaussian_error_linear_unit.py Co-authored-by: Christian Clauss --- maths/gaussian_error_linear_unit.py | 53 +++++++++++++++++++++++++++++ 1 file changed, 53 insertions(+) create mode 100644 maths/gaussian_error_linear_unit.py diff --git a/maths/gaussian_error_linear_unit.py b/maths/gaussian_error_linear_unit.py new file mode 100644 index 000000000000..7b5f875143b9 --- /dev/null +++ b/maths/gaussian_error_linear_unit.py @@ -0,0 +1,53 @@ +""" +This script demonstrates an implementation of the Gaussian Error Linear Unit function. +* https://en.wikipedia.org/wiki/Activation_function#Comparison_of_activation_functions + +The function takes a vector of K real numbers as input and returns x * sigmoid(1.702*x). +Gaussian Error Linear Unit (GELU) is a high-performing neural network activation +function. + +This script is inspired by a corresponding research paper. +* https://arxiv.org/abs/1606.08415 +""" + +import numpy as np + + +def sigmoid(vector: np.array) -> np.array: + """ + Mathematical function sigmoid takes a vector x of K real numbers as input and + returns 1/ (1 + e^-x). + https://en.wikipedia.org/wiki/Sigmoid_function + + >>> sigmoid(np.array([-1.0, 1.0, 2.0])) + array([0.26894142, 0.73105858, 0.88079708]) + """ + return 1 / (1 + np.exp(-vector)) + + +def gaussian_error_linear_unit(vector: np.array) -> np.array: + """ + Implements the Gaussian Error Linear Unit (GELU) function + + Parameters: + vector (np.array): A numpy array of shape (1,n) + consisting of real values + + Returns: + gelu_vec (np.array): The input numpy array, after applying + gelu. + + Examples: + >>> gaussian_error_linear_unit(np.array([-1.0, 1.0, 2.0])) + array([-0.15420423, 0.84579577, 1.93565862]) + + >>> gaussian_error_linear_unit(np.array([-3])) + array([-0.01807131]) + """ + return vector * sigmoid(1.702 * vector) + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From 2859d4bf3aa96737a4715c65d4a9051d9c62d24d Mon Sep 17 00:00:00 2001 From: Tianyi Zheng Date: Wed, 19 Oct 2022 16:12:44 -0400 Subject: [PATCH 0578/1543] Remove references to depreciated QasmSimulator (#7417) * Fix typos * Replace depreciated QasmSimulator in Deutsch-Jozsa algorithm * Replace depreciated QasmSimulator in half adder algorithm * Replace depreciated QasmSimulator in not gate algorithm * Replace depreciated QasmSimulator in full adder algorithm * Simplify qiskit import * Make formatting more consistent * Replace depreciated QasmSimulator in quantum entanglement algorithm * Replace depreciated QasmSimulator in ripple adder algorithm * Replace depreciated QasmSimulator in qubit measure algorithm * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * updating DIRECTORY.md * updating DIRECTORY.md * Remove qiskit import alias for clarity Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- DIRECTORY.md | 4 ++++ quantum/deutsch_jozsa.py | 28 +++++++++++++++------------- quantum/half_adder.py | 14 +++++++------- quantum/not_gate.py | 14 ++++++++------ quantum/q_full_adder.py | 27 +++++++++++++-------------- quantum/quantum_entanglement.py | 6 +++--- quantum/ripple_adder_classic.py | 16 ++++++++-------- quantum/single_qubit_measure.py | 16 +++++++++------- 8 files changed, 67 insertions(+), 58 deletions(-) diff --git a/DIRECTORY.md b/DIRECTORY.md index c1fad8d9d794..1fad287988c4 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -27,6 +27,7 @@ * [Hamiltonian Cycle](backtracking/hamiltonian_cycle.py) * [Knight Tour](backtracking/knight_tour.py) * [Minimax](backtracking/minimax.py) + * [Minmax](backtracking/minmax.py) * [N Queens](backtracking/n_queens.py) * [N Queens Math](backtracking/n_queens_math.py) * [Rat In Maze](backtracking/rat_in_maze.py) @@ -157,6 +158,7 @@ * [Binary Tree Mirror](data_structures/binary_tree/binary_tree_mirror.py) * [Binary Tree Node Sum](data_structures/binary_tree/binary_tree_node_sum.py) * [Binary Tree Traversals](data_structures/binary_tree/binary_tree_traversals.py) + * [Diff Views Of Binary Tree](data_structures/binary_tree/diff_views_of_binary_tree.py) * [Fenwick Tree](data_structures/binary_tree/fenwick_tree.py) * [Inorder Tree Traversal 2022](data_structures/binary_tree/inorder_tree_traversal_2022.py) * [Lazy Segment Tree](data_structures/binary_tree/lazy_segment_tree.py) @@ -513,6 +515,7 @@ * [Gamma](maths/gamma.py) * [Gamma Recursive](maths/gamma_recursive.py) * [Gaussian](maths/gaussian.py) + * [Gaussian Error Linear Unit](maths/gaussian_error_linear_unit.py) * [Greatest Common Divisor](maths/greatest_common_divisor.py) * [Greedy Coin Change](maths/greedy_coin_change.py) * [Hamming Numbers](maths/hamming_numbers.py) @@ -601,6 +604,7 @@ * [Inverse Of Matrix](matrix/inverse_of_matrix.py) * [Matrix Class](matrix/matrix_class.py) * [Matrix Operation](matrix/matrix_operation.py) + * [Max Area Of Island](matrix/max_area_of_island.py) * [Nth Fibonacci Using Matrix Exponentiation](matrix/nth_fibonacci_using_matrix_exponentiation.py) * [Rotate Matrix](matrix/rotate_matrix.py) * [Searching In Sorted Matrix](matrix/searching_in_sorted_matrix.py) diff --git a/quantum/deutsch_jozsa.py b/quantum/deutsch_jozsa.py index d7e2d8335fb9..95c3e65b5edf 100755 --- a/quantum/deutsch_jozsa.py +++ b/quantum/deutsch_jozsa.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 """ -Deutsch-Josza Algorithm is one of the first examples of a quantum +Deutsch-Jozsa Algorithm is one of the first examples of a quantum algorithm that is exponentially faster than any possible deterministic classical algorithm @@ -22,10 +22,10 @@ """ import numpy as np -import qiskit as q +import qiskit -def dj_oracle(case: str, num_qubits: int) -> q.QuantumCircuit: +def dj_oracle(case: str, num_qubits: int) -> qiskit.QuantumCircuit: """ Returns a Quantum Circuit for the Oracle function. The circuit returned can represent balanced or constant function, @@ -33,7 +33,7 @@ def dj_oracle(case: str, num_qubits: int) -> q.QuantumCircuit: """ # This circuit has num_qubits+1 qubits: the size of the input, # plus one output qubit - oracle_qc = q.QuantumCircuit(num_qubits + 1) + oracle_qc = qiskit.QuantumCircuit(num_qubits + 1) # First, let's deal with the case in which oracle is balanced if case == "balanced": @@ -43,7 +43,7 @@ def dj_oracle(case: str, num_qubits: int) -> q.QuantumCircuit: # Next, format 'b' as a binary string of length 'n', padded with zeros: b_str = format(b, f"0{num_qubits}b") # Next, we place the first X-gates. Each digit in our binary string - # correspopnds to a qubit, if the digit is 0, we do nothing, if it's 1 + # corresponds to a qubit, if the digit is 0, we do nothing, if it's 1 # we apply an X-gate to that qubit: for index, bit in enumerate(b_str): if bit == "1": @@ -70,13 +70,15 @@ def dj_oracle(case: str, num_qubits: int) -> q.QuantumCircuit: return oracle_gate -def dj_algorithm(oracle: q.QuantumCircuit, num_qubits: int) -> q.QuantumCircuit: +def dj_algorithm( + oracle: qiskit.QuantumCircuit, num_qubits: int +) -> qiskit.QuantumCircuit: """ - Returns the complete Deustch-Jozsa Quantum Circuit, + Returns the complete Deutsch-Jozsa Quantum Circuit, adding Input & Output registers and Hadamard & Measurement Gates, to the Oracle Circuit passed in arguments """ - dj_circuit = q.QuantumCircuit(num_qubits + 1, num_qubits) + dj_circuit = qiskit.QuantumCircuit(num_qubits + 1, num_qubits) # Set up the output qubit: dj_circuit.x(num_qubits) dj_circuit.h(num_qubits) @@ -95,7 +97,7 @@ def dj_algorithm(oracle: q.QuantumCircuit, num_qubits: int) -> q.QuantumCircuit: return dj_circuit -def deutsch_jozsa(case: str, num_qubits: int) -> q.result.counts.Counts: +def deutsch_jozsa(case: str, num_qubits: int) -> qiskit.result.counts.Counts: """ Main function that builds the circuit using other helper functions, runs the experiment 1000 times & returns the resultant qubit counts @@ -104,14 +106,14 @@ def deutsch_jozsa(case: str, num_qubits: int) -> q.result.counts.Counts: >>> deutsch_jozsa("balanced", 3) {'111': 1000} """ - # Use Aer's qasm_simulator - simulator = q.Aer.get_backend("qasm_simulator") + # Use Aer's simulator + simulator = qiskit.Aer.get_backend("aer_simulator") oracle_gate = dj_oracle(case, num_qubits) dj_circuit = dj_algorithm(oracle_gate, num_qubits) - # Execute the circuit on the qasm simulator - job = q.execute(dj_circuit, simulator, shots=1000) + # Execute the circuit on the simulator + job = qiskit.execute(dj_circuit, simulator, shots=1000) # Return the histogram data of the results of the experiment. return job.result().get_counts(dj_circuit) diff --git a/quantum/half_adder.py b/quantum/half_adder.py index 4af704e640be..21a57ddcf2dd 100755 --- a/quantum/half_adder.py +++ b/quantum/half_adder.py @@ -10,10 +10,10 @@ https://qiskit.org/textbook/ch-states/atoms-computation.html#4.2-Remembering-how-to-add- """ -import qiskit as q +import qiskit -def half_adder(bit0: int, bit1: int) -> q.result.counts.Counts: +def half_adder(bit0: int, bit1: int) -> qiskit.result.counts.Counts: """ >>> half_adder(0, 0) {'00': 1000} @@ -24,10 +24,10 @@ def half_adder(bit0: int, bit1: int) -> q.result.counts.Counts: >>> half_adder(1, 1) {'10': 1000} """ - # Use Aer's qasm_simulator - simulator = q.Aer.get_backend("qasm_simulator") + # Use Aer's simulator + simulator = qiskit.Aer.get_backend("aer_simulator") - qc_ha = q.QuantumCircuit(4, 2) + qc_ha = qiskit.QuantumCircuit(4, 2) # encode inputs in qubits 0 and 1 if bit0 == 1: qc_ha.x(0) @@ -48,9 +48,9 @@ def half_adder(bit0: int, bit1: int) -> q.result.counts.Counts: qc_ha.measure(3, 1) # extract AND value # Execute the circuit on the qasm simulator - job = q.execute(qc_ha, simulator, shots=1000) + job = qiskit.execute(qc_ha, simulator, shots=1000) - # Return the histogram data of the results of the experiment. + # Return the histogram data of the results of the experiment return job.result().get_counts(qc_ha) diff --git a/quantum/not_gate.py b/quantum/not_gate.py index e68a780091c7..ee23272d7a08 100644 --- a/quantum/not_gate.py +++ b/quantum/not_gate.py @@ -6,21 +6,23 @@ Qiskit Docs: https://qiskit.org/documentation/getting_started.html """ -import qiskit as q +import qiskit -def single_qubit_measure(qubits: int, classical_bits: int) -> q.result.counts.Counts: +def single_qubit_measure( + qubits: int, classical_bits: int +) -> qiskit.result.counts.Counts: """ >>> single_qubit_measure(2, 2) {'11': 1000} >>> single_qubit_measure(4, 4) {'0011': 1000} """ - # Use Aer's qasm_simulator - simulator = q.Aer.get_backend("qasm_simulator") + # Use Aer's simulator + simulator = qiskit.Aer.get_backend("aer_simulator") # Create a Quantum Circuit acting on the q register - circuit = q.QuantumCircuit(qubits, classical_bits) + circuit = qiskit.QuantumCircuit(qubits, classical_bits) # Apply X (NOT) Gate to Qubits 0 & 1 circuit.x(0) @@ -30,7 +32,7 @@ def single_qubit_measure(qubits: int, classical_bits: int) -> q.result.counts.Co circuit.measure([0, 1], [0, 1]) # Execute the circuit on the qasm simulator - job = q.execute(circuit, simulator, shots=1000) + job = qiskit.execute(circuit, simulator, shots=1000) # Return the histogram data of the results of the experiment. return job.result().get_counts(circuit) diff --git a/quantum/q_full_adder.py b/quantum/q_full_adder.py index 597efb8342e1..c6d03d170659 100644 --- a/quantum/q_full_adder.py +++ b/quantum/q_full_adder.py @@ -11,7 +11,6 @@ import math import qiskit -from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute def quantum_full_adder( @@ -38,25 +37,25 @@ def quantum_full_adder( carry_in: carry in for the circuit. Returns: qiskit.result.counts.Counts: sum result counts. - >>> quantum_full_adder(1,1,1) + >>> quantum_full_adder(1, 1, 1) {'11': 1000} - >>> quantum_full_adder(0,0,1) + >>> quantum_full_adder(0, 0, 1) {'01': 1000} - >>> quantum_full_adder(1,0,1) + >>> quantum_full_adder(1, 0, 1) {'10': 1000} - >>> quantum_full_adder(1,-4,1) + >>> quantum_full_adder(1, -4, 1) Traceback (most recent call last): ... ValueError: inputs must be positive. - >>> quantum_full_adder('q',0,1) + >>> quantum_full_adder('q', 0, 1) Traceback (most recent call last): ... TypeError: inputs must be integers. - >>> quantum_full_adder(0.5,0,1) + >>> quantum_full_adder(0.5, 0, 1) Traceback (most recent call last): ... ValueError: inputs must be exact integers. - >>> quantum_full_adder(0,1,3) + >>> quantum_full_adder(0, 1, 3) Traceback (most recent call last): ... ValueError: inputs must be less or equal to 2. @@ -78,12 +77,12 @@ def quantum_full_adder( raise ValueError("inputs must be less or equal to 2.") # build registers - qr = QuantumRegister(4, "qr") - cr = ClassicalRegister(2, "cr") + qr = qiskit.QuantumRegister(4, "qr") + cr = qiskit.ClassicalRegister(2, "cr") # list the entries entry = [input_1, input_2, carry_in] - quantum_circuit = QuantumCircuit(qr, cr) + quantum_circuit = qiskit.QuantumCircuit(qr, cr) for i in range(0, 3): if entry[i] == 2: @@ -102,11 +101,11 @@ def quantum_full_adder( quantum_circuit.measure([2, 3], cr) # measure the last two qbits - backend = Aer.get_backend("qasm_simulator") - job = execute(quantum_circuit, backend, shots=1000) + backend = qiskit.Aer.get_backend("aer_simulator") + job = qiskit.execute(quantum_circuit, backend, shots=1000) return job.result().get_counts(quantum_circuit) if __name__ == "__main__": - print(f"Total sum count for state is: {quantum_full_adder(1,1,1)}") + print(f"Total sum count for state is: {quantum_full_adder(1, 1, 1)}") diff --git a/quantum/quantum_entanglement.py b/quantum/quantum_entanglement.py index 3d8e2771361c..08fc32e493b2 100644 --- a/quantum/quantum_entanglement.py +++ b/quantum/quantum_entanglement.py @@ -29,8 +29,8 @@ def quantum_entanglement(qubits: int = 2) -> qiskit.result.counts.Counts: """ classical_bits = qubits - # Using Aer's qasm_simulator - simulator = qiskit.Aer.get_backend("qasm_simulator") + # Using Aer's simulator + simulator = qiskit.Aer.get_backend("aer_simulator") # Creating a Quantum Circuit acting on the q register circuit = qiskit.QuantumCircuit(qubits, classical_bits) @@ -48,7 +48,7 @@ def quantum_entanglement(qubits: int = 2) -> qiskit.result.counts.Counts: # Now measuring any one qubit would affect other qubits to collapse # their super position and have same state as the measured one. - # Executing the circuit on the qasm simulator + # Executing the circuit on the simulator job = qiskit.execute(circuit, simulator, shots=1000) return job.result().get_counts(circuit) diff --git a/quantum/ripple_adder_classic.py b/quantum/ripple_adder_classic.py index 1d3724476068..c07757af7fff 100644 --- a/quantum/ripple_adder_classic.py +++ b/quantum/ripple_adder_classic.py @@ -2,11 +2,11 @@ # https://en.wikipedia.org/wiki/Adder_(electronics)#Full_adder # https://en.wikipedia.org/wiki/Controlled_NOT_gate -from qiskit import Aer, QuantumCircuit, execute +import qiskit from qiskit.providers import Backend -def store_two_classics(val1: int, val2: int) -> tuple[QuantumCircuit, str, str]: +def store_two_classics(val1: int, val2: int) -> tuple[qiskit.QuantumCircuit, str, str]: """ Generates a Quantum Circuit which stores two classical integers Returns the circuit and binary representation of the integers @@ -21,10 +21,10 @@ def store_two_classics(val1: int, val2: int) -> tuple[QuantumCircuit, str, str]: # We need (3 * number of bits in the larger number)+1 qBits # The second parameter is the number of classical registers, to measure the result - circuit = QuantumCircuit((len(x) * 3) + 1, len(x) + 1) + circuit = qiskit.QuantumCircuit((len(x) * 3) + 1, len(x) + 1) # We are essentially "not-ing" the bits that are 1 - # Reversed because its easier to perform ops on more significant bits + # Reversed because it's easier to perform ops on more significant bits for i in range(len(x)): if x[::-1][i] == "1": circuit.x(i) @@ -36,7 +36,7 @@ def store_two_classics(val1: int, val2: int) -> tuple[QuantumCircuit, str, str]: def full_adder( - circuit: QuantumCircuit, + circuit: qiskit.QuantumCircuit, input1_loc: int, input2_loc: int, carry_in: int, @@ -55,14 +55,14 @@ def full_adder( # The default value for **backend** is the result of a function call which is not # normally recommended and causes flake8-bugbear to raise a B008 error. However, -# in this case, this is accptable because `Aer.get_backend()` is called when the +# in this case, this is acceptable because `Aer.get_backend()` is called when the # function is defined and that same backend is then reused for all function calls. def ripple_adder( val1: int, val2: int, - backend: Backend = Aer.get_backend("qasm_simulator"), # noqa: B008 + backend: Backend = qiskit.Aer.get_backend("aer_simulator"), # noqa: B008 ) -> int: """ Quantum Equivalent of a Ripple Adder Circuit @@ -104,7 +104,7 @@ def ripple_adder( for i in range(len(x) + 1): circuit.measure([(len(x) * 2) + i], [i]) - res = execute(circuit, backend, shots=1).result() + res = qiskit.execute(circuit, backend, shots=1).result() # The result is in binary. Convert it back to int return int(list(res.get_counts())[0], 2) diff --git a/quantum/single_qubit_measure.py b/quantum/single_qubit_measure.py index 7f058c2179a9..605bd804314a 100755 --- a/quantum/single_qubit_measure.py +++ b/quantum/single_qubit_measure.py @@ -6,25 +6,27 @@ Qiskit Docs: https://qiskit.org/documentation/getting_started.html """ -import qiskit as q +import qiskit -def single_qubit_measure(qubits: int, classical_bits: int) -> q.result.counts.Counts: +def single_qubit_measure( + qubits: int, classical_bits: int +) -> qiskit.result.counts.Counts: """ >>> single_qubit_measure(1, 1) {'0': 1000} """ - # Use Aer's qasm_simulator - simulator = q.Aer.get_backend("qasm_simulator") + # Use Aer's simulator + simulator = qiskit.Aer.get_backend("aer_simulator") # Create a Quantum Circuit acting on the q register - circuit = q.QuantumCircuit(qubits, classical_bits) + circuit = qiskit.QuantumCircuit(qubits, classical_bits) # Map the quantum measurement to the classical bits circuit.measure([0], [0]) - # Execute the circuit on the qasm simulator - job = q.execute(circuit, simulator, shots=1000) + # Execute the circuit on the simulator + job = qiskit.execute(circuit, simulator, shots=1000) # Return the histogram data of the results of the experiment. return job.result().get_counts(circuit) From 4829fea24dc2c75ffc49571538fc40bce2d7e64b Mon Sep 17 00:00:00 2001 From: Atul Rajput <92659293+AtulRajput01@users.noreply.github.com> Date: Thu, 20 Oct 2022 13:18:28 +0530 Subject: [PATCH 0579/1543] Create graphs/dijkstra_alternate.py (#7405) * Update dijkstra.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update dijkstra.py * Update graphs/dijkstra.py Co-authored-by: Christian Clauss * Update graphs/dijkstra.py Co-authored-by: Christian Clauss * Update graphs/dijkstra.py Co-authored-by: Christian Clauss * Update dijkstra.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update dijkstra.py * Apply suggestions from code review * Create dijkstra_alternate.py * Update dijkstra.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * int(1e7) * Update dijkstra_alternate.py * Update graphs/dijkstra_alternate.py * sptset --> visited Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- graphs/dijkstra_alternate.py | 98 ++++++++++++++++++++++++++++++++++++ 1 file changed, 98 insertions(+) create mode 100644 graphs/dijkstra_alternate.py diff --git a/graphs/dijkstra_alternate.py b/graphs/dijkstra_alternate.py new file mode 100644 index 000000000000..7beef6b04da1 --- /dev/null +++ b/graphs/dijkstra_alternate.py @@ -0,0 +1,98 @@ +from __future__ import annotations + + +class Graph: + def __init__(self, vertices: int) -> None: + """ + >>> graph = Graph(2) + >>> graph.vertices + 2 + >>> len(graph.graph) + 2 + >>> len(graph.graph[0]) + 2 + """ + self.vertices = vertices + self.graph = [[0] * vertices for _ in range(vertices)] + + def print_solution(self, distances_from_source: list[int]) -> None: + """ + >>> Graph(0).print_solution([]) # doctest: +NORMALIZE_WHITESPACE + Vertex Distance from Source + """ + print("Vertex \t Distance from Source") + for vertex in range(self.vertices): + print(vertex, "\t\t", distances_from_source[vertex]) + + def minimum_distance( + self, distances_from_source: list[int], visited: list[bool] + ) -> int: + """ + A utility function to find the vertex with minimum distance value, from the set + of vertices not yet included in shortest path tree. + + >>> Graph(3).minimum_distance([1, 2, 3], [False, False, True]) + 0 + """ + + # Initialize minimum distance for next node + minimum = 1e7 + min_index = 0 + + # Search not nearest vertex not in the shortest path tree + for vertex in range(self.vertices): + if distances_from_source[vertex] < minimum and visited[vertex] is False: + minimum = distances_from_source[vertex] + min_index = vertex + return min_index + + def dijkstra(self, source: int) -> None: + """ + Function that implements Dijkstra's single source shortest path algorithm for a + graph represented using adjacency matrix representation. + + >>> Graph(4).dijkstra(1) # doctest: +NORMALIZE_WHITESPACE + Vertex Distance from Source + 0 10000000 + 1 0 + 2 10000000 + 3 10000000 + """ + + distances = [int(1e7)] * self.vertices # distances from the source + distances[source] = 0 + visited = [False] * self.vertices + + for _ in range(self.vertices): + u = self.minimum_distance(distances, visited) + visited[u] = True + + # Update dist value of the adjacent vertices + # of the picked vertex only if the current + # distance is greater than new distance and + # the vertex in not in the shortest path tree + for v in range(self.vertices): + if ( + self.graph[u][v] > 0 + and visited[v] is False + and distances[v] > distances[u] + self.graph[u][v] + ): + distances[v] = distances[u] + self.graph[u][v] + + self.print_solution(distances) + + +if __name__ == "__main__": + graph = Graph(9) + graph.graph = [ + [0, 4, 0, 0, 0, 0, 0, 8, 0], + [4, 0, 8, 0, 0, 0, 0, 11, 0], + [0, 8, 0, 7, 0, 4, 0, 0, 2], + [0, 0, 7, 0, 9, 14, 0, 0, 0], + [0, 0, 0, 9, 0, 10, 0, 0, 0], + [0, 0, 4, 14, 10, 0, 2, 0, 0], + [0, 0, 0, 0, 0, 2, 0, 1, 6], + [8, 11, 0, 0, 0, 0, 1, 0, 7], + [0, 0, 2, 0, 0, 0, 6, 7, 0], + ] + graph.dijkstra(0) From 831280ceddb1e37bb0215fd32899a52acbbccf2d Mon Sep 17 00:00:00 2001 From: Alan Paul <57307037+Alanzz@users.noreply.github.com> Date: Thu, 20 Oct 2022 15:57:13 +0530 Subject: [PATCH 0580/1543] Add quantum_random.py (#7446) * Create quantum_random.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update quantum_random.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update quantum_random.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update quantum/quantum_random.py Co-authored-by: Christian Clauss * Update quantum/quantum_random.py Co-authored-by: Christian Clauss * Update quantum/quantum_random.py Co-authored-by: Christian Clauss * Update quantum_random.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update quantum_random.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * requirements.txt: Add projectq * Update quantum_random.py * Update quantum/quantum_random.py Co-authored-by: Christian Clauss * Update quantum_random.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update quantum_random.py * Update quantum_random.py * Update quantum/quantum_random.py * Update quantum/quantum_random.py * Update quantum_random.py Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- quantum/quantum_random.py | 30 ++++++++++++++++++++++++++++++ requirements.txt | 1 + 2 files changed, 31 insertions(+) create mode 100644 quantum/quantum_random.py diff --git a/quantum/quantum_random.py b/quantum/quantum_random.py new file mode 100644 index 000000000000..01c8faa12ac0 --- /dev/null +++ b/quantum/quantum_random.py @@ -0,0 +1,30 @@ +import doctest + +import projectq +from projectq.ops import H, Measure + + +def get_random_number(quantum_engine: projectq.cengines._main.MainEngine) -> int: + """ + >>> isinstance(get_random_number(projectq.MainEngine()), int) + True + """ + qubit = quantum_engine.allocate_qubit() + H | qubit + Measure | qubit + return int(qubit) + + +if __name__ == "__main__": + doctest.testmod() + + # initialises a new quantum backend + quantum_engine = projectq.MainEngine() + + # Generate a list of 10 random numbers + random_numbers_list = [get_random_number(quantum_engine) for _ in range(10)] + + # Flushes the quantum engine from memory + quantum_engine.flush() + + print("Random numbers", random_numbers_list) diff --git a/requirements.txt b/requirements.txt index b14a3eb0157c..25d2b4ef93d5 100644 --- a/requirements.txt +++ b/requirements.txt @@ -7,6 +7,7 @@ numpy opencv-python pandas pillow +projectq qiskit requests scikit-fuzzy From 42b56f2345ed4566ea48306d3a727f1aa5c88218 Mon Sep 17 00:00:00 2001 From: Modassir Afzal <60973906+Moddy2024@users.noreply.github.com> Date: Fri, 21 Oct 2022 03:29:11 +0530 Subject: [PATCH 0581/1543] XGBoost Classifier (#7106) * Fixes: #{6551} * Update xgboostclassifier.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update xgboostclassifier.py * Update xgboostclassifier.py * Update xgboostclassifier.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fixes: #{6551} * Update xgboostclassifier.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update xgboostclassifier.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update xgboostclassifier.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update xgboostclassifier.py * Fixes : #6551 * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fixes : #6551 * Fixes : #6551 * Fixes: #6551 * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update xgboostclassifier.py * Update xgboostclassifier.py * Update xgboostclassifier.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fixes: #6551 * Fixes #6551 * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fixes: {#6551} * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fixes: {#6551} * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fixes: #6551 * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * FIXES: {#6551} * Fixes : { #6551} * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fixes : { #6551} * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fixes: { #6551] * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update xgboostclassifier.py * Update xgboostclassifier.py * Apply suggestions from code review * Update xgboostclassifier.py * Update xgboostclassifier.py * Update xgboostclassifier.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fixes: { #6551} * Update xgboostclassifier.py * Fixes: { #6551} * Update xgboostclassifier.py * Fixes: ( #6551) * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fixes: { #6551} Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- machine_learning/xgboostclassifier.py | 82 +++++++++++++++++++++++++++ 1 file changed, 82 insertions(+) create mode 100644 machine_learning/xgboostclassifier.py diff --git a/machine_learning/xgboostclassifier.py b/machine_learning/xgboostclassifier.py new file mode 100644 index 000000000000..bb5b48b7ab23 --- /dev/null +++ b/machine_learning/xgboostclassifier.py @@ -0,0 +1,82 @@ +# XGBoost Classifier Example +import numpy as np +from matplotlib import pyplot as plt +from sklearn.datasets import load_iris +from sklearn.metrics import plot_confusion_matrix +from sklearn.model_selection import train_test_split +from xgboost import XGBClassifier + + +def data_handling(data: dict) -> tuple: + # Split dataset into features and target + # data is features + """ + >>> data_handling(({'data':'[5.1, 3.5, 1.4, 0.2]','target':([0])})) + ('[5.1, 3.5, 1.4, 0.2]', [0]) + >>> data_handling( + ... {'data': '[4.9, 3.0, 1.4, 0.2], [4.7, 3.2, 1.3, 0.2]', 'target': ([0, 0])} + ... ) + ('[4.9, 3.0, 1.4, 0.2], [4.7, 3.2, 1.3, 0.2]', [0, 0]) + """ + return (data["data"], data["target"]) + + +def xgboost(features: np.ndarray, target: np.ndarray) -> XGBClassifier: + """ + >>> xgboost(np.array([[5.1, 3.6, 1.4, 0.2]]), np.array([0])) + XGBClassifier(base_score=0.5, booster='gbtree', callbacks=None, + colsample_bylevel=1, colsample_bynode=1, colsample_bytree=1, + early_stopping_rounds=None, enable_categorical=False, + eval_metric=None, gamma=0, gpu_id=-1, grow_policy='depthwise', + importance_type=None, interaction_constraints='', + learning_rate=0.300000012, max_bin=256, max_cat_to_onehot=4, + max_delta_step=0, max_depth=6, max_leaves=0, min_child_weight=1, + missing=nan, monotone_constraints='()', n_estimators=100, + n_jobs=0, num_parallel_tree=1, predictor='auto', random_state=0, + reg_alpha=0, reg_lambda=1, ...) + """ + classifier = XGBClassifier() + classifier.fit(features, target) + return classifier + + +def main() -> None: + + """ + >>> main() + + Url for the algorithm: + https://xgboost.readthedocs.io/en/stable/ + Iris type dataset is used to demonstrate algorithm. + """ + + # Load Iris dataset + iris = load_iris() + features, targets = data_handling(iris) + x_train, x_test, y_train, y_test = train_test_split( + features, targets, test_size=0.25 + ) + + names = iris["target_names"] + + # Create an XGBoost Classifier from the training data + xgboost_classifier = xgboost(x_train, y_train) + + # Display the confusion matrix of the classifier with both training and test sets + plot_confusion_matrix( + xgboost_classifier, + x_test, + y_test, + display_labels=names, + cmap="Blues", + normalize="true", + ) + plt.title("Normalized Confusion Matrix - IRIS Dataset") + plt.show() + + +if __name__ == "__main__": + import doctest + + doctest.testmod(verbose=True) + main() From 717f0e46d950060f2147f022f65b7e44e72cfdd8 Mon Sep 17 00:00:00 2001 From: Chris O <46587501+ChrisO345@users.noreply.github.com> Date: Fri, 21 Oct 2022 20:03:57 +1300 Subject: [PATCH 0582/1543] Maclaurin series approximation of sin (#7451) * added maclaurin_sin.py function * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * added type hints and fixed line overflows * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * removed incompatable type examples * Update maths/maclaurin_sin.py Co-authored-by: Caeden Perelli-Harris * changed error details * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fixed grammatical errors * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * improved function accuracy and added test case * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update maths/maclaurin_sin.py Co-authored-by: Christian Clauss * removed redundant return * fixed pytest * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Caeden Perelli-Harris Co-authored-by: Christian Clauss --- maths/maclaurin_sin.py | 64 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 64 insertions(+) create mode 100644 maths/maclaurin_sin.py diff --git a/maths/maclaurin_sin.py b/maths/maclaurin_sin.py new file mode 100644 index 000000000000..3c27ccf63d70 --- /dev/null +++ b/maths/maclaurin_sin.py @@ -0,0 +1,64 @@ +""" +https://en.wikipedia.org/wiki/Taylor_series#Trigonometric_functions +""" +from math import factorial, pi + + +def maclaurin_sin(theta: float, accuracy: int = 30) -> float: + """ + Finds the maclaurin approximation of sin + + :param theta: the angle to which sin is found + :param accuracy: the degree of accuracy wanted minimum ~ 1.5 theta + :return: the value of sine in radians + + + >>> from math import isclose, sin + >>> all(isclose(maclaurin_sin(x, 50), sin(x)) for x in range(-25, 25)) + True + >>> maclaurin_sin(10) + -0.544021110889369 + >>> maclaurin_sin(-10) + 0.5440211108893703 + >>> maclaurin_sin(10, 15) + -0.5440211108893689 + >>> maclaurin_sin(-10, 15) + 0.5440211108893703 + >>> maclaurin_sin("10") + Traceback (most recent call last): + ... + ValueError: maclaurin_sin() requires either an int or float for theta + >>> maclaurin_sin(10, -30) + Traceback (most recent call last): + ... + ValueError: maclaurin_sin() requires a positive int for accuracy + >>> maclaurin_sin(10, 30.5) + Traceback (most recent call last): + ... + ValueError: maclaurin_sin() requires a positive int for accuracy + >>> maclaurin_sin(10, "30") + Traceback (most recent call last): + ... + ValueError: maclaurin_sin() requires a positive int for accuracy + """ + + if not isinstance(theta, (int, float)): + raise ValueError("maclaurin_sin() requires either an int or float for theta") + + if not isinstance(accuracy, int) or accuracy <= 0: + raise ValueError("maclaurin_sin() requires a positive int for accuracy") + + theta = float(theta) + div = theta // (2 * pi) + theta -= 2 * div * pi + return sum( + (((-1) ** r) * ((theta ** (2 * r + 1)) / factorial(2 * r + 1))) + for r in range(accuracy) + ) + + +if __name__ == "__main__": + print(maclaurin_sin(10)) + print(maclaurin_sin(-10)) + print(maclaurin_sin(10, 15)) + print(maclaurin_sin(-10, 15)) From cc10b20beb8f0b10b50c84bd523bf41095fe9f37 Mon Sep 17 00:00:00 2001 From: Tianyi Zheng Date: Sat, 22 Oct 2022 07:33:51 -0400 Subject: [PATCH 0583/1543] Remove some print statements within algorithmic functions (#7499) * Remove commented-out print statements in algorithmic functions * Encapsulate non-algorithmic code in __main__ * Remove unused print_matrix function * Remove print statement in __init__ * Remove print statement from doctest * Encapsulate non-algorithmic code in __main__ * Modify algorithm to return instead of print * Encapsulate non-algorithmic code in __main__ * Refactor data_safety_checker to return instead of print * updating DIRECTORY.md * updating DIRECTORY.md * Apply suggestions from code review * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> Co-authored-by: Christian Clauss Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- DIRECTORY.md | 4 + cellular_automata/game_of_life.py | 1 - digital_image_processing/index_calculation.py | 1 - divide_and_conquer/max_subarray_sum.py | 12 ++- .../strassen_matrix_multiplication.py | 3 +- dynamic_programming/longest_sub_array.py | 1 - dynamic_programming/max_non_adjacent_sum.py | 2 +- dynamic_programming/subset_generation.py | 9 +- dynamic_programming/sum_of_subset.py | 14 ++- machine_learning/forecasting/run.py | 96 ++++++++++--------- 10 files changed, 74 insertions(+), 69 deletions(-) diff --git a/DIRECTORY.md b/DIRECTORY.md index 1fad287988c4..70644d0639dc 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -360,6 +360,7 @@ * [Dijkstra](graphs/dijkstra.py) * [Dijkstra 2](graphs/dijkstra_2.py) * [Dijkstra Algorithm](graphs/dijkstra_algorithm.py) + * [Dijkstra Alternate](graphs/dijkstra_alternate.py) * [Dinic](graphs/dinic.py) * [Directed And Undirected (Weighted) Graph](graphs/directed_and_undirected_(weighted)_graph.py) * [Edmonds Karp Multiple Source And Sink](graphs/edmonds_karp_multiple_source_and_sink.py) @@ -460,6 +461,7 @@ * [Similarity Search](machine_learning/similarity_search.py) * [Support Vector Machines](machine_learning/support_vector_machines.py) * [Word Frequency Functions](machine_learning/word_frequency_functions.py) + * [Xgboostclassifier](machine_learning/xgboostclassifier.py) ## Maths * [3N Plus 1](maths/3n_plus_1.py) @@ -534,6 +536,7 @@ * [Line Length](maths/line_length.py) * [Lucas Lehmer Primality Test](maths/lucas_lehmer_primality_test.py) * [Lucas Series](maths/lucas_series.py) + * [Maclaurin Sin](maths/maclaurin_sin.py) * [Matrix Exponentiation](maths/matrix_exponentiation.py) * [Max Sum Sliding Window](maths/max_sum_sliding_window.py) * [Median Of Two Arrays](maths/median_of_two_arrays.py) @@ -936,6 +939,7 @@ * [Not Gate](quantum/not_gate.py) * [Q Full Adder](quantum/q_full_adder.py) * [Quantum Entanglement](quantum/quantum_entanglement.py) + * [Quantum Random](quantum/quantum_random.py) * [Ripple Adder Classic](quantum/ripple_adder_classic.py) * [Single Qubit Measure](quantum/single_qubit_measure.py) diff --git a/cellular_automata/game_of_life.py b/cellular_automata/game_of_life.py index c5324da73dbf..8e54702519b9 100644 --- a/cellular_automata/game_of_life.py +++ b/cellular_automata/game_of_life.py @@ -66,7 +66,6 @@ def run(canvas: list[list[bool]]) -> list[list[bool]]: next_gen_canvas = np.array(create_canvas(current_canvas.shape[0])) for r, row in enumerate(current_canvas): for c, pt in enumerate(row): - # print(r-1,r+2,c-1,c+2) next_gen_canvas[r][c] = __judge_point( pt, current_canvas[r - 1 : r + 2, c - 1 : c + 2] ) diff --git a/digital_image_processing/index_calculation.py b/digital_image_processing/index_calculation.py index 01cd79fc18ff..be1855e99d10 100644 --- a/digital_image_processing/index_calculation.py +++ b/digital_image_processing/index_calculation.py @@ -105,7 +105,6 @@ class IndexCalculation: """ def __init__(self, red=None, green=None, blue=None, red_edge=None, nir=None): - # print("Numpy version: " + np.__version__) self.set_matricies(red=red, green=green, blue=blue, red_edge=red_edge, nir=nir) def set_matricies(self, red=None, green=None, blue=None, red_edge=None, nir=None): diff --git a/divide_and_conquer/max_subarray_sum.py b/divide_and_conquer/max_subarray_sum.py index 43f58086e078..f23e81719025 100644 --- a/divide_and_conquer/max_subarray_sum.py +++ b/divide_and_conquer/max_subarray_sum.py @@ -69,8 +69,10 @@ def max_subarray_sum(array, left, right): return max(left_half_sum, right_half_sum, cross_sum) -array = [-2, -5, 6, -2, -3, 1, 5, -6] -array_length = len(array) -print( - "Maximum sum of contiguous subarray:", max_subarray_sum(array, 0, array_length - 1) -) +if __name__ == "__main__": + array = [-2, -5, 6, -2, -3, 1, 5, -6] + array_length = len(array) + print( + "Maximum sum of contiguous subarray:", + max_subarray_sum(array, 0, array_length - 1), + ) diff --git a/divide_and_conquer/strassen_matrix_multiplication.py b/divide_and_conquer/strassen_matrix_multiplication.py index 0ee426e4b39a..371605d6d4d4 100644 --- a/divide_and_conquer/strassen_matrix_multiplication.py +++ b/divide_and_conquer/strassen_matrix_multiplication.py @@ -68,8 +68,7 @@ def matrix_dimensions(matrix: list) -> tuple[int, int]: def print_matrix(matrix: list) -> None: - for i in range(len(matrix)): - print(matrix[i]) + print("\n".join(str(line) for line in matrix)) def actual_strassen(matrix_a: list, matrix_b: list) -> list: diff --git a/dynamic_programming/longest_sub_array.py b/dynamic_programming/longest_sub_array.py index 30159a1386c3..b477acf61e66 100644 --- a/dynamic_programming/longest_sub_array.py +++ b/dynamic_programming/longest_sub_array.py @@ -14,7 +14,6 @@ class SubArray: def __init__(self, arr): # we need a list not a string, so do something to change the type self.array = arr.split(",") - print(("the input array is:", self.array)) def solve_sub_array(self): rear = [int(self.array[0])] * len(self.array) diff --git a/dynamic_programming/max_non_adjacent_sum.py b/dynamic_programming/max_non_adjacent_sum.py index 5362b22ca9dc..e3cc23f4983e 100644 --- a/dynamic_programming/max_non_adjacent_sum.py +++ b/dynamic_programming/max_non_adjacent_sum.py @@ -7,7 +7,7 @@ def maximum_non_adjacent_sum(nums: list[int]) -> int: """ Find the maximum non-adjacent sum of the integers in the nums input list - >>> print(maximum_non_adjacent_sum([1, 2, 3])) + >>> maximum_non_adjacent_sum([1, 2, 3]) 4 >>> maximum_non_adjacent_sum([1, 5, 3, 7, 2, 2, 6]) 18 diff --git a/dynamic_programming/subset_generation.py b/dynamic_programming/subset_generation.py index 4781b23b32eb..819fd8106def 100644 --- a/dynamic_programming/subset_generation.py +++ b/dynamic_programming/subset_generation.py @@ -37,7 +37,8 @@ def print_combination(arr, n, r): combination_util(arr, n, r, 0, data, 0) -# Driver function to check for above function -arr = [10, 20, 30, 40, 50] -print_combination(arr, len(arr), 3) -# This code is contributed by Ambuj sahu +if __name__ == "__main__": + # Driver code to check the function above + arr = [10, 20, 30, 40, 50] + print_combination(arr, len(arr), 3) + # This code is contributed by Ambuj sahu diff --git a/dynamic_programming/sum_of_subset.py b/dynamic_programming/sum_of_subset.py index 77672b0b83e5..96ebcf583a4b 100644 --- a/dynamic_programming/sum_of_subset.py +++ b/dynamic_programming/sum_of_subset.py @@ -1,13 +1,14 @@ -def is_sum_subset(arr, arr_len, required_sum): +def is_sum_subset(arr: list[int], required_sum: int) -> bool: """ - >>> is_sum_subset([2, 4, 6, 8], 4, 5) + >>> is_sum_subset([2, 4, 6, 8], 5) False - >>> is_sum_subset([2, 4, 6, 8], 4, 14) + >>> is_sum_subset([2, 4, 6, 8], 14) True """ # a subset value says 1 if that subset sum can be formed else 0 # initially no subsets can be formed hence False/0 - subset = [[False for i in range(required_sum + 1)] for i in range(arr_len + 1)] + arr_len = len(arr) + subset = [[False] * (required_sum + 1) for _ in range(arr_len + 1)] # for each arr value, a sum of zero(0) can be formed by not taking any element # hence True/1 @@ -25,10 +26,7 @@ def is_sum_subset(arr, arr_len, required_sum): if arr[i - 1] <= j: subset[i][j] = subset[i - 1][j] or subset[i - 1][j - arr[i - 1]] - # uncomment to print the subset - # for i in range(arrLen+1): - # print(subset[i]) - print(subset[arr_len][required_sum]) + return subset[arr_len][required_sum] if __name__ == "__main__": diff --git a/machine_learning/forecasting/run.py b/machine_learning/forecasting/run.py index b11a230129eb..0909b76d8907 100644 --- a/machine_learning/forecasting/run.py +++ b/machine_learning/forecasting/run.py @@ -1,7 +1,7 @@ """ this is code for forecasting but i modified it and used it for safety checker of data -for ex: you have a online shop and for some reason some data are +for ex: you have an online shop and for some reason some data are missing (the amount of data that u expected are not supposed to be) then we can use it *ps : 1. ofc we can use normal statistic method but in this case @@ -91,14 +91,14 @@ def interquartile_range_checker(train_user: list) -> float: return low_lim -def data_safety_checker(list_vote: list, actual_result: float) -> None: +def data_safety_checker(list_vote: list, actual_result: float) -> bool: """ Used to review all the votes (list result prediction) and compare it to the actual result. input : list of predictions output : print whether it's safe or not - >>> data_safety_checker([2,3,4],5.0) - Today's data is not safe. + >>> data_safety_checker([2, 3, 4], 5.0) + False """ safe = 0 not_safe = 0 @@ -107,50 +107,54 @@ def data_safety_checker(list_vote: list, actual_result: float) -> None: safe = not_safe + 1 else: if abs(abs(i) - abs(actual_result)) <= 0.1: - safe = safe + 1 + safe += 1 else: - not_safe = not_safe + 1 - print(f"Today's data is {'not ' if safe <= not_safe else ''}safe.") + not_safe += 1 + return safe > not_safe -# data_input_df = pd.read_csv("ex_data.csv", header=None) -data_input = [[18231, 0.0, 1], [22621, 1.0, 2], [15675, 0.0, 3], [23583, 1.0, 4]] -data_input_df = pd.DataFrame(data_input, columns=["total_user", "total_even", "days"]) +if __name__ == "__main__": + # data_input_df = pd.read_csv("ex_data.csv", header=None) + data_input = [[18231, 0.0, 1], [22621, 1.0, 2], [15675, 0.0, 3], [23583, 1.0, 4]] + data_input_df = pd.DataFrame( + data_input, columns=["total_user", "total_even", "days"] + ) -""" -data column = total user in a day, how much online event held in one day, -what day is that(sunday-saturday) -""" + """ + data column = total user in a day, how much online event held in one day, + what day is that(sunday-saturday) + """ -# start normalization -normalize_df = Normalizer().fit_transform(data_input_df.values) -# split data -total_date = normalize_df[:, 2].tolist() -total_user = normalize_df[:, 0].tolist() -total_match = normalize_df[:, 1].tolist() - -# for svr (input variable = total date and total match) -x = normalize_df[:, [1, 2]].tolist() -x_train = x[: len(x) - 1] -x_test = x[len(x) - 1 :] - -# for linear reression & sarimax -trn_date = total_date[: len(total_date) - 1] -trn_user = total_user[: len(total_user) - 1] -trn_match = total_match[: len(total_match) - 1] - -tst_date = total_date[len(total_date) - 1 :] -tst_user = total_user[len(total_user) - 1 :] -tst_match = total_match[len(total_match) - 1 :] - - -# voting system with forecasting -res_vote = [] -res_vote.append( - linear_regression_prediction(trn_date, trn_user, trn_match, tst_date, tst_match) -) -res_vote.append(sarimax_predictor(trn_user, trn_match, tst_match)) -res_vote.append(support_vector_regressor(x_train, x_test, trn_user)) - -# check the safety of todays'data^^ -data_safety_checker(res_vote, tst_user) + # start normalization + normalize_df = Normalizer().fit_transform(data_input_df.values) + # split data + total_date = normalize_df[:, 2].tolist() + total_user = normalize_df[:, 0].tolist() + total_match = normalize_df[:, 1].tolist() + + # for svr (input variable = total date and total match) + x = normalize_df[:, [1, 2]].tolist() + x_train = x[: len(x) - 1] + x_test = x[len(x) - 1 :] + + # for linear regression & sarimax + trn_date = total_date[: len(total_date) - 1] + trn_user = total_user[: len(total_user) - 1] + trn_match = total_match[: len(total_match) - 1] + + tst_date = total_date[len(total_date) - 1 :] + tst_user = total_user[len(total_user) - 1 :] + tst_match = total_match[len(total_match) - 1 :] + + # voting system with forecasting + res_vote = [ + linear_regression_prediction( + trn_date, trn_user, trn_match, tst_date, tst_match + ), + sarimax_predictor(trn_user, trn_match, tst_match), + support_vector_regressor(x_train, x_test, trn_user), + ] + + # check the safety of today's data + not_str = "" if data_safety_checker(res_vote, tst_user) else "not " + print("Today's data is {not_str}safe.") From a5dd07c3707a0d3ebde0321ce7984082b3d322ff Mon Sep 17 00:00:00 2001 From: Chris O <46587501+ChrisO345@users.noreply.github.com> Date: Sun, 23 Oct 2022 05:17:07 +1300 Subject: [PATCH 0584/1543] Maclaurin approximation of cos (#7507) * renamed maclaurin_sin.py to maclaurin_series.py and included function for cos approximation * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * attempt to fix pytest error Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- maths/maclaurin_series.py | 121 ++++++++++++++++++++++++++++++++++++++ maths/maclaurin_sin.py | 64 -------------------- 2 files changed, 121 insertions(+), 64 deletions(-) create mode 100644 maths/maclaurin_series.py delete mode 100644 maths/maclaurin_sin.py diff --git a/maths/maclaurin_series.py b/maths/maclaurin_series.py new file mode 100644 index 000000000000..57edc90bf676 --- /dev/null +++ b/maths/maclaurin_series.py @@ -0,0 +1,121 @@ +""" +https://en.wikipedia.org/wiki/Taylor_series#Trigonometric_functions +""" +from math import factorial, pi + + +def maclaurin_sin(theta: float, accuracy: int = 30) -> float: + """ + Finds the maclaurin approximation of sin + + :param theta: the angle to which sin is found + :param accuracy: the degree of accuracy wanted minimum + :return: the value of sine in radians + + + >>> from math import isclose, sin + >>> all(isclose(maclaurin_sin(x, 50), sin(x)) for x in range(-25, 25)) + True + >>> maclaurin_sin(10) + -0.544021110889369 + >>> maclaurin_sin(-10) + 0.5440211108893703 + >>> maclaurin_sin(10, 15) + -0.5440211108893689 + >>> maclaurin_sin(-10, 15) + 0.5440211108893703 + >>> maclaurin_sin("10") + Traceback (most recent call last): + ... + ValueError: maclaurin_sin() requires either an int or float for theta + >>> maclaurin_sin(10, -30) + Traceback (most recent call last): + ... + ValueError: maclaurin_sin() requires a positive int for accuracy + >>> maclaurin_sin(10, 30.5) + Traceback (most recent call last): + ... + ValueError: maclaurin_sin() requires a positive int for accuracy + >>> maclaurin_sin(10, "30") + Traceback (most recent call last): + ... + ValueError: maclaurin_sin() requires a positive int for accuracy + """ + + if not isinstance(theta, (int, float)): + raise ValueError("maclaurin_sin() requires either an int or float for theta") + + if not isinstance(accuracy, int) or accuracy <= 0: + raise ValueError("maclaurin_sin() requires a positive int for accuracy") + + theta = float(theta) + div = theta // (2 * pi) + theta -= 2 * div * pi + return sum( + (((-1) ** r) * ((theta ** (2 * r + 1)) / factorial(2 * r + 1))) + for r in range(accuracy) + ) + + +def maclaurin_cos(theta: float, accuracy: int = 30) -> float: + """ + Finds the maclaurin approximation of cos + + :param theta: the angle to which cos is found + :param accuracy: the degree of accuracy wanted + :return: the value of cosine in radians + + + >>> from math import isclose, cos + >>> all(isclose(maclaurin_cos(x, 50), cos(x)) for x in range(-25, 25)) + True + >>> maclaurin_cos(5) + 0.28366218546322675 + >>> maclaurin_cos(-5) + 0.2836621854632266 + >>> maclaurin_cos(10, 15) + -0.8390715290764525 + >>> maclaurin_cos(-10, 15) + -0.8390715290764521 + >>> maclaurin_cos("10") + Traceback (most recent call last): + ... + ValueError: maclaurin_cos() requires either an int or float for theta + >>> maclaurin_cos(10, -30) + Traceback (most recent call last): + ... + ValueError: maclaurin_cos() requires a positive int for accuracy + >>> maclaurin_cos(10, 30.5) + Traceback (most recent call last): + ... + ValueError: maclaurin_cos() requires a positive int for accuracy + >>> maclaurin_cos(10, "30") + Traceback (most recent call last): + ... + ValueError: maclaurin_cos() requires a positive int for accuracy + """ + + if not isinstance(theta, (int, float)): + raise ValueError("maclaurin_cos() requires either an int or float for theta") + + if not isinstance(accuracy, int) or accuracy <= 0: + raise ValueError("maclaurin_cos() requires a positive int for accuracy") + + theta = float(theta) + div = theta // (2 * pi) + theta -= 2 * div * pi + return sum( + (((-1) ** r) * ((theta ** (2 * r)) / factorial(2 * r))) for r in range(accuracy) + ) + + +if __name__ == "__main__": + print(maclaurin_sin(10)) + print(maclaurin_sin(-10)) + print(maclaurin_sin(10, 15)) + print(maclaurin_sin(-10, 15)) + + print(maclaurin_cos(5)) + print(maclaurin_cos(-5)) + print(maclaurin_cos(10, 15)) + print(maclaurin_cos(-10, 15)) diff --git a/maths/maclaurin_sin.py b/maths/maclaurin_sin.py deleted file mode 100644 index 3c27ccf63d70..000000000000 --- a/maths/maclaurin_sin.py +++ /dev/null @@ -1,64 +0,0 @@ -""" -https://en.wikipedia.org/wiki/Taylor_series#Trigonometric_functions -""" -from math import factorial, pi - - -def maclaurin_sin(theta: float, accuracy: int = 30) -> float: - """ - Finds the maclaurin approximation of sin - - :param theta: the angle to which sin is found - :param accuracy: the degree of accuracy wanted minimum ~ 1.5 theta - :return: the value of sine in radians - - - >>> from math import isclose, sin - >>> all(isclose(maclaurin_sin(x, 50), sin(x)) for x in range(-25, 25)) - True - >>> maclaurin_sin(10) - -0.544021110889369 - >>> maclaurin_sin(-10) - 0.5440211108893703 - >>> maclaurin_sin(10, 15) - -0.5440211108893689 - >>> maclaurin_sin(-10, 15) - 0.5440211108893703 - >>> maclaurin_sin("10") - Traceback (most recent call last): - ... - ValueError: maclaurin_sin() requires either an int or float for theta - >>> maclaurin_sin(10, -30) - Traceback (most recent call last): - ... - ValueError: maclaurin_sin() requires a positive int for accuracy - >>> maclaurin_sin(10, 30.5) - Traceback (most recent call last): - ... - ValueError: maclaurin_sin() requires a positive int for accuracy - >>> maclaurin_sin(10, "30") - Traceback (most recent call last): - ... - ValueError: maclaurin_sin() requires a positive int for accuracy - """ - - if not isinstance(theta, (int, float)): - raise ValueError("maclaurin_sin() requires either an int or float for theta") - - if not isinstance(accuracy, int) or accuracy <= 0: - raise ValueError("maclaurin_sin() requires a positive int for accuracy") - - theta = float(theta) - div = theta // (2 * pi) - theta -= 2 * div * pi - return sum( - (((-1) ** r) * ((theta ** (2 * r + 1)) / factorial(2 * r + 1))) - for r in range(accuracy) - ) - - -if __name__ == "__main__": - print(maclaurin_sin(10)) - print(maclaurin_sin(-10)) - print(maclaurin_sin(10, 15)) - print(maclaurin_sin(-10, 15)) From ed127032b303d06f2c1ceefd58a8680bb4c2ce50 Mon Sep 17 00:00:00 2001 From: Akshit Gulyan <103456810+AkshitGulyan@users.noreply.github.com> Date: Sun, 23 Oct 2022 09:59:10 +0530 Subject: [PATCH 0585/1543] Created sum_of_harmonic_series.py (#7504) * Created sum_of_harmonic_series.py Here in this code the formula for Harmonic sum is not used, Sum of the series is calculated by creating a list of the elements in the given Harmonic series and adding all the elements of that list ! * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update maths/sum_of_harmonic_series.py Co-authored-by: Chris O <46587501+ChrisO345@users.noreply.github.com> * Update maths/sum_of_harmonic_series.py Co-authored-by: Chris O <46587501+ChrisO345@users.noreply.github.com> * Update maths/sum_of_harmonic_series.py Co-authored-by: Chris O <46587501+ChrisO345@users.noreply.github.com> * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update maths/sum_of_harmonic_series.py Co-authored-by: Christian Clauss * Update maths/sum_of_harmonic_series.py Co-authored-by: Christian Clauss * Update maths/sum_of_harmonic_series.py Co-authored-by: Christian Clauss * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update sum_of_harmonic_series.py * Add doctests * Update sum_of_harmonic_series.py Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Chris O <46587501+ChrisO345@users.noreply.github.com> Co-authored-by: Christian Clauss --- maths/sum_of_harmonic_series.py | 29 +++++++++++++++++++++++++++++ 1 file changed, 29 insertions(+) create mode 100644 maths/sum_of_harmonic_series.py diff --git a/maths/sum_of_harmonic_series.py b/maths/sum_of_harmonic_series.py new file mode 100644 index 000000000000..9e0d6b19b95a --- /dev/null +++ b/maths/sum_of_harmonic_series.py @@ -0,0 +1,29 @@ +def sum_of_harmonic_progression( + first_term: float, common_difference: float, number_of_terms: int +) -> float: + """ + https://en.wikipedia.org/wiki/Harmonic_progression_(mathematics) + + Find the sum of n terms in an harmonic progression. The calculation starts with the + first_term and loops adding the common difference of Arithmetic Progression by which + the given Harmonic Progression is linked. + + >>> sum_of_harmonic_progression(1 / 2, 2, 2) + 0.75 + >>> sum_of_harmonic_progression(1 / 5, 5, 5) + 0.45666666666666667 + """ + arithmetic_progression = [1 / first_term] + first_term = 1 / first_term + for _ in range(number_of_terms - 1): + first_term += common_difference + arithmetic_progression.append(first_term) + harmonic_series = [1 / step for step in arithmetic_progression] + return sum(harmonic_series) + + +if __name__ == "__main__": + import doctest + + doctest.testmod() + print(sum_of_harmonic_progression(1 / 2, 2, 2)) From f32f78a9e0a4c2c1e2e9c985fd2375e7ede8925c Mon Sep 17 00:00:00 2001 From: Abhishek Chakraborty Date: Sun, 23 Oct 2022 03:42:02 -0700 Subject: [PATCH 0586/1543] Basic string grammar fix (#7534) * Grammar edit * Flake8 consistency fix * Apply suggestions from code review Co-authored-by: Christian Clauss --- genetic_algorithm/basic_string.py | 54 +++++++++++++++---------------- 1 file changed, 27 insertions(+), 27 deletions(-) diff --git a/genetic_algorithm/basic_string.py b/genetic_algorithm/basic_string.py index d2d305189983..3227adf53ae4 100644 --- a/genetic_algorithm/basic_string.py +++ b/genetic_algorithm/basic_string.py @@ -9,15 +9,15 @@ import random -# Maximum size of the population. bigger could be faster but is more memory expensive +# Maximum size of the population. Bigger could be faster but is more memory expensive. N_POPULATION = 200 -# Number of elements selected in every generation for evolution the selection takes -# place from the best to the worst of that generation must be smaller than N_POPULATION +# Number of elements selected in every generation of evolution. The selection takes +# place from best to worst of that generation and must be smaller than N_POPULATION. N_SELECTED = 50 -# Probability that an element of a generation can mutate changing one of its genes this -# guarantees that all genes will be used during evolution +# Probability that an element of a generation can mutate, changing one of its genes. +# This will guarantee that all genes will be used during evolution. MUTATION_PROBABILITY = 0.4 -# just a seed to improve randomness required by the algorithm +# Just a seed to improve randomness required by the algorithm. random.seed(random.randint(0, 1000)) @@ -56,20 +56,20 @@ def basic(target: str, genes: list[str], debug: bool = True) -> tuple[int, int, f"{not_in_genes_list} is not in genes list, evolution cannot converge" ) - # Generate random starting population + # Generate random starting population. population = [] for _ in range(N_POPULATION): population.append("".join([random.choice(genes) for i in range(len(target))])) - # Just some logs to know what the algorithms is doing + # Just some logs to know what the algorithms is doing. generation, total_population = 0, 0 - # This loop will end when we will find a perfect match for our target + # This loop will end when we find a perfect match for our target. while True: generation += 1 total_population += len(population) - # Random population created now it's time to evaluate + # Random population created. Now it's time to evaluate. def evaluate(item: str, main_target: str = target) -> tuple[str, float]: """ Evaluate how similar the item is with the target by just @@ -92,17 +92,17 @@ def evaluate(item: str, main_target: str = target) -> tuple[str, float]: # concurrent.futures.wait(futures) # population_score = [item.result() for item in futures] # - # but with a simple algorithm like this will probably be slower - # we just need to call evaluate for every item inside population + # but with a simple algorithm like this, it will probably be slower. + # We just need to call evaluate for every item inside the population. population_score = [evaluate(item) for item in population] - # Check if there is a matching evolution + # Check if there is a matching evolution. population_score = sorted(population_score, key=lambda x: x[1], reverse=True) if population_score[0][0] == target: return (generation, total_population, population_score[0][0]) - # Print the Best result every 10 generation - # just to know that the algorithm is working + # Print the best result every 10 generation. + # Just to know that the algorithm is working. if debug and generation % 10 == 0: print( f"\nGeneration: {generation}" @@ -111,21 +111,21 @@ def evaluate(item: str, main_target: str = target) -> tuple[str, float]: f"\nBest string: {population_score[0][0]}" ) - # Flush the old population keeping some of the best evolutions - # Keeping this avoid regression of evolution + # Flush the old population, keeping some of the best evolutions. + # Keeping this avoid regression of evolution. population_best = population[: int(N_POPULATION / 3)] population.clear() population.extend(population_best) - # Normalize population score from 0 to 1 + # Normalize population score to be between 0 and 1. population_score = [ (item, score / len(target)) for item, score in population_score ] - # Select, Crossover and Mutate a new population + # Select, crossover and mutate a new population. def select(parent_1: tuple[str, float]) -> list[str]: """Select the second parent and generate new population""" pop = [] - # Generate more child proportionally to the fitness score + # Generate more children proportionally to the fitness score. child_n = int(parent_1[1] * 100) + 1 child_n = 10 if child_n >= 10 else child_n for _ in range(child_n): @@ -134,32 +134,32 @@ def select(parent_1: tuple[str, float]) -> list[str]: ][0] child_1, child_2 = crossover(parent_1[0], parent_2) - # Append new string to the population list + # Append new string to the population list. pop.append(mutate(child_1)) pop.append(mutate(child_2)) return pop def crossover(parent_1: str, parent_2: str) -> tuple[str, str]: - """Slice and combine two string in a random point""" + """Slice and combine two string at a random point.""" random_slice = random.randint(0, len(parent_1) - 1) child_1 = parent_1[:random_slice] + parent_2[random_slice:] child_2 = parent_2[:random_slice] + parent_1[random_slice:] return (child_1, child_2) def mutate(child: str) -> str: - """Mutate a random gene of a child with another one from the list""" + """Mutate a random gene of a child with another one from the list.""" child_list = list(child) if random.uniform(0, 1) < MUTATION_PROBABILITY: child_list[random.randint(0, len(child)) - 1] = random.choice(genes) return "".join(child_list) - # This is Selection + # This is selection for i in range(N_SELECTED): population.extend(select(population_score[int(i)])) # Check if the population has already reached the maximum value and if so, - # break the cycle. if this check is disabled the algorithm will take - # forever to compute large strings but will also calculate small string in - # a lot fewer generations + # break the cycle. If this check is disabled, the algorithm will take + # forever to compute large strings, but will also calculate small strings in + # a far fewer generations. if len(population) > N_POPULATION: break From a0cbc2056e9b9ff4f8c5da682061996e783b13e3 Mon Sep 17 00:00:00 2001 From: Caeden Perelli-Harris Date: Sun, 23 Oct 2022 12:01:51 +0100 Subject: [PATCH 0587/1543] refactor: Make code more simple in maclaurin_series (#7522) --- maths/maclaurin_series.py | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/maths/maclaurin_series.py b/maths/maclaurin_series.py index 57edc90bf676..a2619d4e6b92 100644 --- a/maths/maclaurin_series.py +++ b/maths/maclaurin_series.py @@ -52,8 +52,7 @@ def maclaurin_sin(theta: float, accuracy: int = 30) -> float: div = theta // (2 * pi) theta -= 2 * div * pi return sum( - (((-1) ** r) * ((theta ** (2 * r + 1)) / factorial(2 * r + 1))) - for r in range(accuracy) + (-1) ** r * theta ** (2 * r + 1) / factorial(2 * r + 1) for r in range(accuracy) ) @@ -104,12 +103,14 @@ def maclaurin_cos(theta: float, accuracy: int = 30) -> float: theta = float(theta) div = theta // (2 * pi) theta -= 2 * div * pi - return sum( - (((-1) ** r) * ((theta ** (2 * r)) / factorial(2 * r))) for r in range(accuracy) - ) + return sum((-1) ** r * theta ** (2 * r) / factorial(2 * r) for r in range(accuracy)) if __name__ == "__main__": + import doctest + + doctest.testmod() + print(maclaurin_sin(10)) print(maclaurin_sin(-10)) print(maclaurin_sin(10, 15)) From 1bbb0092f3fc311fac9e56e12c1fa223dbe16465 Mon Sep 17 00:00:00 2001 From: Arjit Arora <42044030+arjitarora26@users.noreply.github.com> Date: Sun, 23 Oct 2022 16:47:30 +0530 Subject: [PATCH 0588/1543] Add signum function (#7526) * Add signum function * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Add typehints for functions * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update signum.py Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- maths/signum.py | 34 ++++++++++++++++++++++++++++++++++ 1 file changed, 34 insertions(+) create mode 100644 maths/signum.py diff --git a/maths/signum.py b/maths/signum.py new file mode 100644 index 000000000000..148f931767c1 --- /dev/null +++ b/maths/signum.py @@ -0,0 +1,34 @@ +""" +Signum function -- https://en.wikipedia.org/wiki/Sign_function +""" + + +def signum(num: float) -> int: + """ + Applies signum function on the number + + >>> signum(-10) + -1 + >>> signum(10) + 1 + >>> signum(0) + 0 + """ + if num < 0: + return -1 + return 1 if num else 0 + + +def test_signum() -> None: + """ + Tests the signum function + """ + assert signum(5) == 1 + assert signum(-5) == -1 + assert signum(0) == 0 + + +if __name__ == "__main__": + print(signum(12)) + print(signum(-12)) + print(signum(0)) From b092f9979f5afd3bd86cb46e891eb1f318b351d9 Mon Sep 17 00:00:00 2001 From: Modassir Afzal <60973906+Moddy2024@users.noreply.github.com> Date: Sun, 23 Oct 2022 17:17:19 +0530 Subject: [PATCH 0589/1543] XGB Regressor (#7107) * Fixes: #{6551} * Fixes: #{6551} * Update xgboostclassifier.py * Delete xgboostclassifier.py * Update xgboostregressor.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fixes: #{6551} * Fixes : {#6551} * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fixes: {#6551] * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update xgboostregressor.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update xgboostregressor.py * Update xgboostregressor.py * Fixes: { #6551} * Update xgboostregressor.py * Fixes: { #6551} * Fixes: { #6551} * Update and rename xgboostregressor.py to xgboost_regressor.py Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- machine_learning/xgboost_regressor.py | 64 +++++++++++++++++++++++++++ 1 file changed, 64 insertions(+) create mode 100644 machine_learning/xgboost_regressor.py diff --git a/machine_learning/xgboost_regressor.py b/machine_learning/xgboost_regressor.py new file mode 100644 index 000000000000..023984fc1f59 --- /dev/null +++ b/machine_learning/xgboost_regressor.py @@ -0,0 +1,64 @@ +# XGBoost Regressor Example +import numpy as np +from sklearn.datasets import fetch_california_housing +from sklearn.metrics import mean_absolute_error, mean_squared_error +from sklearn.model_selection import train_test_split +from xgboost import XGBRegressor + + +def data_handling(data: dict) -> tuple: + # Split dataset into features and target. Data is features. + """ + >>> data_handling(( + ... {'data':'[ 8.3252 41. 6.9841269 1.02380952 322. 2.55555556 37.88 -122.23 ]' + ... ,'target':([4.526])})) + ('[ 8.3252 41. 6.9841269 1.02380952 322. 2.55555556 37.88 -122.23 ]', [4.526]) + """ + return (data["data"], data["target"]) + + +def xgboost( + features: np.ndarray, target: np.ndarray, test_features: np.ndarray +) -> np.ndarray: + """ + >>> xgboost(np.array([[ 2.3571 , 52. , 6.00813008, 1.06775068, + ... 907. , 2.45799458, 40.58 , -124.26]]),np.array([1.114]), + ... np.array([[1.97840000e+00, 3.70000000e+01, 4.98858447e+00, 1.03881279e+00, + ... 1.14300000e+03, 2.60958904e+00, 3.67800000e+01, -1.19780000e+02]])) + array([[1.1139996]], dtype=float32) + """ + xgb = XGBRegressor(verbosity=0, random_state=42) + xgb.fit(features, target) + # Predict target for test data + predictions = xgb.predict(test_features) + predictions = predictions.reshape(len(predictions), 1) + return predictions + + +def main() -> None: + """ + >>> main() + Mean Absolute Error : 0.30957163379906033 + Mean Square Error : 0.22611560196662744 + + The URL for this algorithm + https://xgboost.readthedocs.io/en/stable/ + California house price dataset is used to demonstrate the algorithm. + """ + # Load California house price dataset + california = fetch_california_housing() + data, target = data_handling(california) + x_train, x_test, y_train, y_test = train_test_split( + data, target, test_size=0.25, random_state=1 + ) + predictions = xgboost(x_train, y_train, x_test) + # Error printing + print(f"Mean Absolute Error : {mean_absolute_error(y_test, predictions)}") + print(f"Mean Square Error : {mean_squared_error(y_test, predictions)}") + + +if __name__ == "__main__": + import doctest + + doctest.testmod(verbose=True) + main() From a3383ce3fd6bc30b01681503a4307df2462c8bd4 Mon Sep 17 00:00:00 2001 From: Pradyumn Singh Rahar Date: Sun, 23 Oct 2022 17:56:40 +0530 Subject: [PATCH 0590/1543] Reduced Time Complexity to O(sqrt(n)) (#7429) * Reduced Time Complexity to O(sqrt(n)) * Added testmod * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- maths/factors.py | 24 ++++++++++++++++++++---- 1 file changed, 20 insertions(+), 4 deletions(-) diff --git a/maths/factors.py b/maths/factors.py index e2fdc4063a13..ae2e5316cf65 100644 --- a/maths/factors.py +++ b/maths/factors.py @@ -1,3 +1,7 @@ +from doctest import testmod +from math import sqrt + + def factors_of_a_number(num: int) -> list: """ >>> factors_of_a_number(1) @@ -9,10 +13,22 @@ def factors_of_a_number(num: int) -> list: >>> factors_of_a_number(-24) [] """ - return [i for i in range(1, num + 1) if num % i == 0] + facs: list[int] = [] + if num < 1: + return facs + facs.append(1) + if num == 1: + return facs + facs.append(num) + for i in range(2, int(sqrt(num)) + 1): + if num % i == 0: # If i is a factor of num + facs.append(i) + d = num // i # num//i is the other factor of num + if d != i: # If d and i are distinct + facs.append(d) # we have found another factor + facs.sort() + return facs if __name__ == "__main__": - num = int(input("Enter a number to find its factors: ")) - factors = factors_of_a_number(num) - print(f"{num} has {len(factors)} factors: {', '.join(str(f) for f in factors)}") + testmod(name="factors_of_a_number", verbose=True) From a5362799a5e73e199cda7f1acec71d1e97addc97 Mon Sep 17 00:00:00 2001 From: Kevin Joven <59969678+KevinJoven11@users.noreply.github.com> Date: Sun, 23 Oct 2022 08:54:27 -0400 Subject: [PATCH 0591/1543] Create superdense_coding.py (#7349) * Create superdense_coding.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- quantum/superdense_coding.py | 102 +++++++++++++++++++++++++++++++++++ 1 file changed, 102 insertions(+) create mode 100644 quantum/superdense_coding.py diff --git a/quantum/superdense_coding.py b/quantum/superdense_coding.py new file mode 100644 index 000000000000..c8eda381158b --- /dev/null +++ b/quantum/superdense_coding.py @@ -0,0 +1,102 @@ +""" +Build the superdense coding protocol. This quantum +circuit can send two classical bits using one quantum +bit. This circuit is designed using the Qiskit +framework. This experiment run in IBM Q simulator +with 1000 shots. +. +References: +https://qiskit.org/textbook/ch-algorithms/superdense-coding.html +https://en.wikipedia.org/wiki/Superdense_coding +""" + +import math + +import qiskit +from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute + + +def superdense_coding(bit_1: int = 1, bit_2: int = 1) -> qiskit.result.counts.Counts: + """ + The input refer to the classical message + that you wants to send. {'00','01','10','11'} + result for default values: {11: 1000} + ┌───┐ ┌───┐ + qr_0: ─────┤ X ├──────────┤ X ├───── + ┌───┐└─┬─┘┌───┐┌───┐└─┬─┘┌───┐ + qr_1: ┤ H ├──■──┤ X ├┤ Z ├──■──┤ H ├ + └───┘ └───┘└───┘ └───┘ + cr: 2/══════════════════════════════ + Args: + bit_1: bit 1 of classical information to send. + bit_2: bit 2 of classical information to send. + Returns: + qiskit.result.counts.Counts: counts of send state. + >>> superdense_coding(0,0) + {'00': 1000} + >>> superdense_coding(0,1) + {'01': 1000} + >>> superdense_coding(-1,0) + Traceback (most recent call last): + ... + ValueError: inputs must be positive. + >>> superdense_coding(1,'j') + Traceback (most recent call last): + ... + TypeError: inputs must be integers. + >>> superdense_coding(1,0.5) + Traceback (most recent call last): + ... + ValueError: inputs must be exact integers. + >>> superdense_coding(2,1) + Traceback (most recent call last): + ... + ValueError: inputs must be less or equal to 1. + """ + if (type(bit_1) == str) or (type(bit_2) == str): + raise TypeError("inputs must be integers.") + if (bit_1 < 0) or (bit_2 < 0): + raise ValueError("inputs must be positive.") + if (math.floor(bit_1) != bit_1) or (math.floor(bit_2) != bit_2): + raise ValueError("inputs must be exact integers.") + if (bit_1 > 1) or (bit_2 > 1): + raise ValueError("inputs must be less or equal to 1.") + + # build registers + qr = QuantumRegister(2, "qr") + cr = ClassicalRegister(2, "cr") + + quantum_circuit = QuantumCircuit(qr, cr) + + # entanglement the qubits + quantum_circuit.h(1) + quantum_circuit.cx(1, 0) + + # send the information + c_information = str(bit_1) + str(bit_2) + + if c_information == "11": + quantum_circuit.x(1) + quantum_circuit.z(1) + elif c_information == "10": + quantum_circuit.z(1) + elif c_information == "01": + quantum_circuit.x(1) + else: + quantum_circuit.i(1) + + # unentangled the circuit + quantum_circuit.cx(1, 0) + quantum_circuit.h(1) + + # measure the circuit + quantum_circuit.measure(qr, cr) + + backend = Aer.get_backend("qasm_simulator") + job = execute(quantum_circuit, backend, shots=1000) + + return job.result().get_counts(quantum_circuit) + + +if __name__ == "__main__": + print(f"Counts for classical state send: {superdense_coding(1,1)}") From d5f322f5764f42fc846fbcdaefac238a9ab62c7f Mon Sep 17 00:00:00 2001 From: Caeden Perelli-Harris Date: Sun, 23 Oct 2022 14:06:12 +0100 Subject: [PATCH 0592/1543] fix: Replace deprecated `qasm_simulator` with `aer_simulator` (#7308) (#7556) --- quantum/superdense_coding.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/quantum/superdense_coding.py b/quantum/superdense_coding.py index c8eda381158b..10ebc2d3593c 100644 --- a/quantum/superdense_coding.py +++ b/quantum/superdense_coding.py @@ -92,7 +92,7 @@ def superdense_coding(bit_1: int = 1, bit_2: int = 1) -> qiskit.result.counts.Co # measure the circuit quantum_circuit.measure(qr, cr) - backend = Aer.get_backend("qasm_simulator") + backend = Aer.get_backend("aer_simulator") job = execute(quantum_circuit, backend, shots=1000) return job.result().get_counts(quantum_circuit) From 81ccf54c75edbf52cd2b5bd4e139cba3b6e5e5ab Mon Sep 17 00:00:00 2001 From: Christian Clauss Date: Sun, 23 Oct 2022 15:09:25 +0200 Subject: [PATCH 0593/1543] Rename xgboostclassifier.py to xgboost_classifier.py (#7550) Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- DIRECTORY.md | 7 +++++-- .../{xgboostclassifier.py => xgboost_classifier.py} | 0 2 files changed, 5 insertions(+), 2 deletions(-) rename machine_learning/{xgboostclassifier.py => xgboost_classifier.py} (100%) diff --git a/DIRECTORY.md b/DIRECTORY.md index 70644d0639dc..3fd1a3c383d7 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -461,7 +461,8 @@ * [Similarity Search](machine_learning/similarity_search.py) * [Support Vector Machines](machine_learning/support_vector_machines.py) * [Word Frequency Functions](machine_learning/word_frequency_functions.py) - * [Xgboostclassifier](machine_learning/xgboostclassifier.py) + * [Xgboost Classifier](machine_learning/xgboost_classifier.py) + * [Xgboost Regressor](machine_learning/xgboost_regressor.py) ## Maths * [3N Plus 1](maths/3n_plus_1.py) @@ -536,7 +537,7 @@ * [Line Length](maths/line_length.py) * [Lucas Lehmer Primality Test](maths/lucas_lehmer_primality_test.py) * [Lucas Series](maths/lucas_series.py) - * [Maclaurin Sin](maths/maclaurin_sin.py) + * [Maclaurin Series](maths/maclaurin_series.py) * [Matrix Exponentiation](maths/matrix_exponentiation.py) * [Max Sum Sliding Window](maths/max_sum_sliding_window.py) * [Median Of Two Arrays](maths/median_of_two_arrays.py) @@ -582,6 +583,7 @@ * [P Series](maths/series/p_series.py) * [Sieve Of Eratosthenes](maths/sieve_of_eratosthenes.py) * [Sigmoid](maths/sigmoid.py) + * [Signum](maths/signum.py) * [Simpson Rule](maths/simpson_rule.py) * [Sin](maths/sin.py) * [Sock Merchant](maths/sock_merchant.py) @@ -590,6 +592,7 @@ * [Sum Of Arithmetic Series](maths/sum_of_arithmetic_series.py) * [Sum Of Digits](maths/sum_of_digits.py) * [Sum Of Geometric Progression](maths/sum_of_geometric_progression.py) + * [Sum Of Harmonic Series](maths/sum_of_harmonic_series.py) * [Sylvester Sequence](maths/sylvester_sequence.py) * [Test Prime Check](maths/test_prime_check.py) * [Trapezoidal Rule](maths/trapezoidal_rule.py) diff --git a/machine_learning/xgboostclassifier.py b/machine_learning/xgboost_classifier.py similarity index 100% rename from machine_learning/xgboostclassifier.py rename to machine_learning/xgboost_classifier.py From 0f06a0b5ff43c4cfa98db33926d21ce688b69a10 Mon Sep 17 00:00:00 2001 From: Sagar Giri Date: Sun, 23 Oct 2022 23:35:27 +0900 Subject: [PATCH 0594/1543] Add web program to fetch top 10 real time billionaires using the forbes API. (#7538) * Add web program to fetch top 10 realtime billioners using forbes API. * Provide return type to function. * Use rich for tables and minor refactors. * Fix tiny typo. * Add the top {LIMIT} in rich table title. * Update web_programming/get_top_billioners.py Co-authored-by: Caeden Perelli-Harris * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Change the API path. * Update get_top_billioners.py Co-authored-by: Caeden Perelli-Harris Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- requirements.txt | 1 + web_programming/get_top_billioners.py | 84 +++++++++++++++++++++++++++ 2 files changed, 85 insertions(+) create mode 100644 web_programming/get_top_billioners.py diff --git a/requirements.txt b/requirements.txt index 25d2b4ef93d5..9ffe784c945d 100644 --- a/requirements.txt +++ b/requirements.txt @@ -10,6 +10,7 @@ pillow projectq qiskit requests +rich scikit-fuzzy sklearn statsmodels diff --git a/web_programming/get_top_billioners.py b/web_programming/get_top_billioners.py new file mode 100644 index 000000000000..514ea1db9789 --- /dev/null +++ b/web_programming/get_top_billioners.py @@ -0,0 +1,84 @@ +""" +CAUTION: You may get a json.decoding error. This works for some of us but fails for others. +""" + +from datetime import datetime + +import requests +from rich import box +from rich import console as rich_console +from rich import table as rich_table + +LIMIT = 10 +TODAY = datetime.now() + +API_URL = ( + "https://www.forbes.com/forbesapi/person/rtb/0/position/true.json" + "?fields=personName,gender,source,countryOfCitizenship,birthDate,finalWorth" + f"&limit={LIMIT}" +) + + +def calculate_age(unix_date: int) -> str: + """Calculates age from given unix time format. + + Returns: + Age as string + + >>> calculate_age(-657244800000) + '73' + >>> calculate_age(46915200000) + '51' + """ + birthdate = datetime.fromtimestamp(unix_date / 1000).date() + return str( + TODAY.year + - birthdate.year + - ((TODAY.month, TODAY.day) < (birthdate.month, birthdate.day)) + ) + + +def get_forbes_real_time_billionaires() -> list[dict[str, str]]: + """Get top 10 realtime billionaires using forbes API. + + Returns: + List of top 10 realtime billionaires data. + """ + response_json = requests.get(API_URL).json() + return [ + { + "Name": person["personName"], + "Source": person["source"], + "Country": person["countryOfCitizenship"], + "Gender": person["gender"], + "Worth ($)": f"{person['finalWorth'] / 1000:.1f} Billion", + "Age": calculate_age(person["birthDate"]), + } + for person in response_json["personList"]["personsLists"] + ] + + +def display_billionaires(forbes_billionaires: list[dict[str, str]]) -> None: + """Display Forbes real time billionaires in a rich table. + + Args: + forbes_billionaires (list): Forbes top 10 real time billionaires + """ + + table = rich_table.Table( + title=f"Forbes Top {LIMIT} Real Time Billionaires at {TODAY:%Y-%m-%d %H:%M}", + style="green", + highlight=True, + box=box.SQUARE, + ) + for key in forbes_billionaires[0]: + table.add_column(key) + + for billionaire in forbes_billionaires: + table.add_row(*billionaire.values()) + + rich_console.Console().print(table) + + +if __name__ == "__main__": + display_billionaires(get_forbes_real_time_billionaires()) From 393b9605259fe19e03bdaac2b0866151e1a2afc2 Mon Sep 17 00:00:00 2001 From: Caeden Perelli-Harris Date: Sun, 23 Oct 2022 15:36:10 +0100 Subject: [PATCH 0595/1543] refactor: Replace doctest traceback with `...` (#7558) --- conversions/pressure_conversions.py | 6 +----- electronics/carrier_concentration.py | 8 ++++---- electronics/electric_power.py | 6 +++--- maths/nevilles_method.py | 3 +-- 4 files changed, 9 insertions(+), 14 deletions(-) diff --git a/conversions/pressure_conversions.py b/conversions/pressure_conversions.py index 2018080b9327..e0cd18d234ba 100644 --- a/conversions/pressure_conversions.py +++ b/conversions/pressure_conversions.py @@ -56,11 +56,7 @@ def pressure_conversion(value: float, from_type: str, to_type: str) -> float: 0.019336718261000002 >>> pressure_conversion(4, "wrongUnit", "atm") Traceback (most recent call last): - File "/usr/lib/python3.8/doctest.py", line 1336, in __run - exec(compile(example.source, filename, "single", - File "", line 1, in - pressure_conversion(4, "wrongUnit", "atm") - File "", line 67, in pressure_conversion + ... ValueError: Invalid 'from_type' value: 'wrongUnit' Supported values are: atm, pascal, bar, kilopascal, megapascal, psi, inHg, torr """ diff --git a/electronics/carrier_concentration.py b/electronics/carrier_concentration.py index 03482f1e336e..1fb9f2430dcd 100644 --- a/electronics/carrier_concentration.py +++ b/electronics/carrier_concentration.py @@ -25,19 +25,19 @@ def carrier_concentration( ('hole_conc', 1440.0) >>> carrier_concentration(electron_conc=1000, hole_conc=400, intrinsic_conc=1200) Traceback (most recent call last): - File "", line 37, in + ... ValueError: You cannot supply more or less than 2 values >>> carrier_concentration(electron_conc=-1000, hole_conc=0, intrinsic_conc=1200) Traceback (most recent call last): - File "", line 40, in + ... ValueError: Electron concentration cannot be negative in a semiconductor >>> carrier_concentration(electron_conc=0, hole_conc=-400, intrinsic_conc=1200) Traceback (most recent call last): - File "", line 44, in + ... ValueError: Hole concentration cannot be negative in a semiconductor >>> carrier_concentration(electron_conc=0, hole_conc=400, intrinsic_conc=-1200) Traceback (most recent call last): - File "", line 48, in + ... ValueError: Intrinsic concentration cannot be negative in a semiconductor """ if (electron_conc, hole_conc, intrinsic_conc).count(0) != 1: diff --git a/electronics/electric_power.py b/electronics/electric_power.py index ac673d7e3a94..e59795601791 100644 --- a/electronics/electric_power.py +++ b/electronics/electric_power.py @@ -17,15 +17,15 @@ def electric_power(voltage: float, current: float, power: float) -> tuple: result(name='power', value=6.0) >>> electric_power(voltage=2, current=4, power=2) Traceback (most recent call last): - File "", line 15, in + ... ValueError: Only one argument must be 0 >>> electric_power(voltage=0, current=0, power=2) Traceback (most recent call last): - File "", line 19, in + ... ValueError: Only one argument must be 0 >>> electric_power(voltage=0, current=2, power=-4) Traceback (most recent call last): - File "", line 23, in >> electric_power(voltage=2.2, current=2.2, power=0) result(name='power', value=4.84) diff --git a/maths/nevilles_method.py b/maths/nevilles_method.py index 5583e4269b32..1f48b43fbd22 100644 --- a/maths/nevilles_method.py +++ b/maths/nevilles_method.py @@ -31,8 +31,7 @@ def neville_interpolate(x_points: list, y_points: list, x0: int) -> list: 104.0 >>> neville_interpolate((1,2,3,4,6), (6,7,8,9,11), '') Traceback (most recent call last): - File "", line 1, in - ... + ... TypeError: unsupported operand type(s) for -: 'str' and 'int' """ n = len(x_points) From 10b6e7a658c4664ce823cc1d0f159cd717b506db Mon Sep 17 00:00:00 2001 From: Caeden Perelli-Harris Date: Sun, 23 Oct 2022 16:14:45 +0100 Subject: [PATCH 0596/1543] fix: Fix line too long in doctest (#7566) --- web_programming/get_top_billioners.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/web_programming/get_top_billioners.py b/web_programming/get_top_billioners.py index 514ea1db9789..6a8054e26270 100644 --- a/web_programming/get_top_billioners.py +++ b/web_programming/get_top_billioners.py @@ -1,5 +1,6 @@ """ -CAUTION: You may get a json.decoding error. This works for some of us but fails for others. +CAUTION: You may get a json.decoding error. +This works for some of us but fails for others. """ from datetime import datetime From 0dc95c0a6be06f33153e8fcd84d2c854dac7a353 Mon Sep 17 00:00:00 2001 From: SwayamSahu <91021799+SwayamSahu@users.noreply.github.com> Date: Sun, 23 Oct 2022 21:30:59 +0530 Subject: [PATCH 0597/1543] Update comments in check_pangram.py script (#7564) * Update comments in check_pangram.py script * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update and rename check_pangram.py to is_pangram.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- strings/check_pangram.py | 74 ------------------------------- strings/is_pangram.py | 95 ++++++++++++++++++++++++++++++++++++++++ 2 files changed, 95 insertions(+), 74 deletions(-) delete mode 100644 strings/check_pangram.py create mode 100644 strings/is_pangram.py diff --git a/strings/check_pangram.py b/strings/check_pangram.py deleted file mode 100644 index 81384bfd4cc6..000000000000 --- a/strings/check_pangram.py +++ /dev/null @@ -1,74 +0,0 @@ -""" -wiki: https://en.wikipedia.org/wiki/Pangram -""" - - -def check_pangram( - input_str: str = "The quick brown fox jumps over the lazy dog", -) -> bool: - """ - A Pangram String contains all the alphabets at least once. - >>> check_pangram("The quick brown fox jumps over the lazy dog") - True - >>> check_pangram("Waltz, bad nymph, for quick jigs vex.") - True - >>> check_pangram("Jived fox nymph grabs quick waltz.") - True - >>> check_pangram("My name is Unknown") - False - >>> check_pangram("The quick brown fox jumps over the la_y dog") - False - >>> check_pangram() - True - """ - frequency = set() - input_str = input_str.replace( - " ", "" - ) # Replacing all the Whitespaces in our sentence - for alpha in input_str: - if "a" <= alpha.lower() <= "z": - frequency.add(alpha.lower()) - - return True if len(frequency) == 26 else False - - -def check_pangram_faster( - input_str: str = "The quick brown fox jumps over the lazy dog", -) -> bool: - """ - >>> check_pangram_faster("The quick brown fox jumps over the lazy dog") - True - >>> check_pangram_faster("Waltz, bad nymph, for quick jigs vex.") - True - >>> check_pangram_faster("Jived fox nymph grabs quick waltz.") - True - >>> check_pangram_faster("The quick brown fox jumps over the la_y dog") - False - >>> check_pangram_faster() - True - """ - flag = [False] * 26 - for char in input_str: - if char.islower(): - flag[ord(char) - 97] = True - elif char.isupper(): - flag[ord(char) - 65] = True - return all(flag) - - -def benchmark() -> None: - """ - Benchmark code comparing different version. - """ - from timeit import timeit - - setup = "from __main__ import check_pangram, check_pangram_faster" - print(timeit("check_pangram()", setup=setup)) - print(timeit("check_pangram_faster()", setup=setup)) - - -if __name__ == "__main__": - import doctest - - doctest.testmod() - benchmark() diff --git a/strings/is_pangram.py b/strings/is_pangram.py new file mode 100644 index 000000000000..c8b894b7ea31 --- /dev/null +++ b/strings/is_pangram.py @@ -0,0 +1,95 @@ +""" +wiki: https://en.wikipedia.org/wiki/Pangram +""" + + +def is_pangram( + input_str: str = "The quick brown fox jumps over the lazy dog", +) -> bool: + """ + A Pangram String contains all the alphabets at least once. + >>> is_pangram("The quick brown fox jumps over the lazy dog") + True + >>> is_pangram("Waltz, bad nymph, for quick jigs vex.") + True + >>> is_pangram("Jived fox nymph grabs quick waltz.") + True + >>> is_pangram("My name is Unknown") + False + >>> is_pangram("The quick brown fox jumps over the la_y dog") + False + >>> is_pangram() + True + """ + # Declare frequency as a set to have unique occurrences of letters + frequency = set() + + # Replace all the whitespace in our sentence + input_str = input_str.replace(" ", "") + for alpha in input_str: + if "a" <= alpha.lower() <= "z": + frequency.add(alpha.lower()) + return len(frequency) == 26 + + +def is_pangram_faster( + input_str: str = "The quick brown fox jumps over the lazy dog", +) -> bool: + """ + >>> is_pangram_faster("The quick brown fox jumps over the lazy dog") + True + >>> is_pangram_faster("Waltz, bad nymph, for quick jigs vex.") + True + >>> is_pangram_faster("Jived fox nymph grabs quick waltz.") + True + >>> is_pangram_faster("The quick brown fox jumps over the la_y dog") + False + >>> is_pangram_faster() + True + """ + flag = [False] * 26 + for char in input_str: + if char.islower(): + flag[ord(char) - 97] = True + elif char.isupper(): + flag[ord(char) - 65] = True + return all(flag) + + +def is_pangram_fastest( + input_str: str = "The quick brown fox jumps over the lazy dog", +) -> bool: + """ + >>> is_pangram_fastest("The quick brown fox jumps over the lazy dog") + True + >>> is_pangram_fastest("Waltz, bad nymph, for quick jigs vex.") + True + >>> is_pangram_fastest("Jived fox nymph grabs quick waltz.") + True + >>> is_pangram_fastest("The quick brown fox jumps over the la_y dog") + False + >>> is_pangram_fastest() + True + """ + return len({char for char in input_str.lower() if char.isalpha()}) == 26 + + +def benchmark() -> None: + """ + Benchmark code comparing different version. + """ + from timeit import timeit + + setup = "from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest" + print(timeit("is_pangram()", setup=setup)) + print(timeit("is_pangram_faster()", setup=setup)) + print(timeit("is_pangram_fastest()", setup=setup)) + # 5.348480500048026, 2.6477354579837993, 1.8470395830227062 + # 5.036091582966037, 2.644472333951853, 1.8869528750656173 + + +if __name__ == "__main__": + import doctest + + doctest.testmod() + benchmark() From b8b63469efff57b8cb3c6e4aec4279c8e864b8db Mon Sep 17 00:00:00 2001 From: Christian Clauss Date: Sun, 23 Oct 2022 18:12:49 +0200 Subject: [PATCH 0598/1543] My favorite palindrome (#7455) * My favorite palindrome * updating DIRECTORY.md * Update is_palindrome.py * Update is_palindrome.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update strings/is_palindrome.py Co-authored-by: Caeden Perelli-Harris * Update is_palindrome.py Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Caeden Perelli-Harris --- strings/is_palindrome.py | 19 +++++++++---------- 1 file changed, 9 insertions(+), 10 deletions(-) diff --git a/strings/is_palindrome.py b/strings/is_palindrome.py index 4776a5fc29c4..5758af0cef9b 100644 --- a/strings/is_palindrome.py +++ b/strings/is_palindrome.py @@ -1,9 +1,8 @@ def is_palindrome(s: str) -> bool: """ - Determine whether the string is palindrome - :param s: - :return: Boolean - >>> is_palindrome("a man a plan a canal panama".replace(" ", "")) + Determine if the string s is a palindrome. + + >>> is_palindrome("A man, A plan, A canal -- Panama!") True >>> is_palindrome("Hello") False @@ -14,15 +13,15 @@ def is_palindrome(s: str) -> bool: >>> is_palindrome("Mr. Owl ate my metal worm?") True """ - # Since Punctuation, capitalization, and spaces are usually ignored while checking - # Palindrome, we first remove them from our string. - s = "".join([character for character in s.lower() if character.isalnum()]) + # Since punctuation, capitalization, and spaces are often ignored while checking + # palindromes, we first remove them from our string. + s = "".join(character for character in s.lower() if character.isalnum()) return s == s[::-1] if __name__ == "__main__": - s = input("Enter string to determine whether its palindrome or not: ").strip() + s = input("Please enter a string to see if it is a palindrome: ") if is_palindrome(s): - print("Given string is palindrome") + print(f"'{s}' is a palindrome.") else: - print("Given string is not palindrome") + print(f"'{s}' is not a palindrome.") From 39a99b46f5e9b2c56951c22189a8ac3ea0730b01 Mon Sep 17 00:00:00 2001 From: Laukik Chahande <103280327+luciferx48@users.noreply.github.com> Date: Sun, 23 Oct 2022 22:56:22 +0530 Subject: [PATCH 0599/1543] check whether integer is even or odd using bit manupulation (#7099) * even_or_not file added * Updated DIRECTORY.md * modified DIRECTORY.md * Update bit_manipulation/even_or_not.py * updating DIRECTORY.md * Rename even_or_not.py to is_even.py * updating DIRECTORY.md Co-authored-by: luciferx48 Co-authored-by: Christian Clauss Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- DIRECTORY.md | 1 + bit_manipulation/is_even.py | 37 +++++++++++++++++++++++++++++++++++++ 2 files changed, 38 insertions(+) create mode 100644 bit_manipulation/is_even.py diff --git a/DIRECTORY.md b/DIRECTORY.md index 3fd1a3c383d7..10e78a92c00f 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -45,6 +45,7 @@ * [Count 1S Brian Kernighan Method](bit_manipulation/count_1s_brian_kernighan_method.py) * [Count Number Of One Bits](bit_manipulation/count_number_of_one_bits.py) * [Gray Code Sequence](bit_manipulation/gray_code_sequence.py) + * [Is Even](bit_manipulation/is_even.py) * [Reverse Bits](bit_manipulation/reverse_bits.py) * [Single Bit Manipulation Operations](bit_manipulation/single_bit_manipulation_operations.py) diff --git a/bit_manipulation/is_even.py b/bit_manipulation/is_even.py new file mode 100644 index 000000000000..b7b0841a1427 --- /dev/null +++ b/bit_manipulation/is_even.py @@ -0,0 +1,37 @@ +def is_even(number: int) -> bool: + """ + return true if the input integer is even + Explanation: Lets take a look at the following deicmal to binary conversions + 2 => 10 + 14 => 1110 + 100 => 1100100 + 3 => 11 + 13 => 1101 + 101 => 1100101 + from the above examples we can observe that + for all the odd integers there is always 1 set bit at the end + also, 1 in binary can be represented as 001, 00001, or 0000001 + so for any odd integer n => n&1 is always equlas 1 else the integer is even + + >>> is_even(1) + False + >>> is_even(4) + True + >>> is_even(9) + False + >>> is_even(15) + False + >>> is_even(40) + True + >>> is_even(100) + True + >>> is_even(101) + False + """ + return number & 1 == 0 + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From e2a83b3bc66630cb2667375fba9de5c5baac3aca Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Nadirhan=20=C5=9Eahin?= Date: Sun, 23 Oct 2022 22:28:11 +0300 Subject: [PATCH 0600/1543] Update knapsack.py (#7271) * Update knapsack.py * Update dynamic_programming/knapsack.py Co-authored-by: Christian Clauss * Update knapsack.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Co-authored-by: Christian Clauss Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- dynamic_programming/knapsack.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/dynamic_programming/knapsack.py b/dynamic_programming/knapsack.py index 093e15f49ba0..b12d30313e31 100644 --- a/dynamic_programming/knapsack.py +++ b/dynamic_programming/knapsack.py @@ -1,9 +1,9 @@ """ Given weights and values of n items, put these items in a knapsack of - capacity W to get the maximum total value in the knapsack. +capacity W to get the maximum total value in the knapsack. Note that only the integer weights 0-1 knapsack problem is solvable - using dynamic programming. +using dynamic programming. """ @@ -27,7 +27,7 @@ def mf_knapsack(i, wt, val, j): def knapsack(w, wt, val, n): - dp = [[0 for i in range(w + 1)] for j in range(n + 1)] + dp = [[0] * (w + 1) for _ in range(n + 1)] for i in range(1, n + 1): for w_ in range(1, w + 1): @@ -108,7 +108,7 @@ def _construct_solution(dp: list, wt: list, i: int, j: int, optimal_set: set): dp: list of list, the table of a solved integer weight dynamic programming problem wt: list or tuple, the vector of weights of the items - i: int, the index of the item under consideration + i: int, the index of the item under consideration j: int, the current possible maximum weight optimal_set: set, the optimal subset so far. This gets modified by the function. @@ -136,7 +136,7 @@ def _construct_solution(dp: list, wt: list, i: int, j: int, optimal_set: set): wt = [4, 3, 2, 3] n = 4 w = 6 - f = [[0] * (w + 1)] + [[0] + [-1 for i in range(w + 1)] for j in range(n + 1)] + f = [[0] * (w + 1)] + [[0] + [-1] * (w + 1) for _ in range(n + 1)] optimal_solution, _ = knapsack(w, wt, val, n) print(optimal_solution) print(mf_knapsack(n, wt, val, w)) # switched the n and w From bd490614a69cc9cdff367cb4a1775dd063c6e617 Mon Sep 17 00:00:00 2001 From: Arjit Arora <42044030+arjitarora26@users.noreply.github.com> Date: Mon, 24 Oct 2022 15:43:01 +0530 Subject: [PATCH 0601/1543] Add function for AND gate (#7593) --- boolean_algebra/and_gate.py | 48 +++++++++++++++++++++++++++++++++++++ 1 file changed, 48 insertions(+) create mode 100644 boolean_algebra/and_gate.py diff --git a/boolean_algebra/and_gate.py b/boolean_algebra/and_gate.py new file mode 100644 index 000000000000..cbbcfde79f33 --- /dev/null +++ b/boolean_algebra/and_gate.py @@ -0,0 +1,48 @@ +""" +An AND Gate is a logic gate in boolean algebra which results to 1 (True) if both the +inputs are 1, and 0 (False) otherwise. + +Following is the truth table of an AND Gate: + ------------------------------ + | Input 1 | Input 2 | Output | + ------------------------------ + | 0 | 0 | 0 | + | 0 | 1 | 0 | + | 1 | 0 | 0 | + | 1 | 1 | 1 | + ------------------------------ + +Refer - https://www.geeksforgeeks.org/logic-gates-in-python/ +""" + + +def and_gate(input_1: int, input_2: int) -> int: + """ + Calculate AND of the input values + + >>> and_gate(0, 0) + 0 + >>> and_gate(0, 1) + 0 + >>> and_gate(1, 0) + 0 + >>> and_gate(1, 1) + 1 + """ + return int((input_1, input_2).count(0) == 0) + + +def test_and_gate() -> None: + """ + Tests the and_gate function + """ + assert and_gate(0, 0) == 0 + assert and_gate(0, 1) == 0 + assert and_gate(1, 0) == 0 + assert and_gate(1, 1) == 1 + + +if __name__ == "__main__": + print(and_gate(0, 0)) + print(and_gate(0, 1)) + print(and_gate(1, 1)) From bb078541dd030b4957ee1b5ac87b7a31bf1a7235 Mon Sep 17 00:00:00 2001 From: JatinR05 <71865805+JatinR05@users.noreply.github.com> Date: Mon, 24 Oct 2022 15:43:39 +0530 Subject: [PATCH 0602/1543] Update count_number_of_one_bits.py (#7589) * Update count_number_of_one_bits.py removed the modulo operator as it is very time consuming in comparison to the and operator * Update count_number_of_one_bits.py Updated with the timeit library to compare. Moreover I have updated my code which helps us in reaching the output comparatively faster. * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update bit_manipulation/count_number_of_one_bits.py Co-authored-by: Christian Clauss * Update count_number_of_one_bits.py Updated the code * Update count_number_of_one_bits.py Updated code * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Run the tests before running the benchmarks * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * consistently Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- bit_manipulation/count_number_of_one_bits.py | 79 +++++++++++++++++--- 1 file changed, 68 insertions(+), 11 deletions(-) diff --git a/bit_manipulation/count_number_of_one_bits.py b/bit_manipulation/count_number_of_one_bits.py index 51fd2b630483..a1687503a383 100644 --- a/bit_manipulation/count_number_of_one_bits.py +++ b/bit_manipulation/count_number_of_one_bits.py @@ -1,34 +1,91 @@ -def get_set_bits_count(number: int) -> int: +from timeit import timeit + + +def get_set_bits_count_using_brian_kernighans_algorithm(number: int) -> int: """ Count the number of set bits in a 32 bit integer - >>> get_set_bits_count(25) + >>> get_set_bits_count_using_brian_kernighans_algorithm(25) 3 - >>> get_set_bits_count(37) + >>> get_set_bits_count_using_brian_kernighans_algorithm(37) 3 - >>> get_set_bits_count(21) + >>> get_set_bits_count_using_brian_kernighans_algorithm(21) 3 - >>> get_set_bits_count(58) + >>> get_set_bits_count_using_brian_kernighans_algorithm(58) 4 - >>> get_set_bits_count(0) + >>> get_set_bits_count_using_brian_kernighans_algorithm(0) 0 - >>> get_set_bits_count(256) + >>> get_set_bits_count_using_brian_kernighans_algorithm(256) 1 - >>> get_set_bits_count(-1) + >>> get_set_bits_count_using_brian_kernighans_algorithm(-1) Traceback (most recent call last): ... - ValueError: the value of input must be positive + ValueError: the value of input must not be negative """ if number < 0: - raise ValueError("the value of input must be positive") + raise ValueError("the value of input must not be negative") + result = 0 + while number: + number &= number - 1 + result += 1 + return result + + +def get_set_bits_count_using_modulo_operator(number: int) -> int: + """ + Count the number of set bits in a 32 bit integer + >>> get_set_bits_count_using_modulo_operator(25) + 3 + >>> get_set_bits_count_using_modulo_operator(37) + 3 + >>> get_set_bits_count_using_modulo_operator(21) + 3 + >>> get_set_bits_count_using_modulo_operator(58) + 4 + >>> get_set_bits_count_using_modulo_operator(0) + 0 + >>> get_set_bits_count_using_modulo_operator(256) + 1 + >>> get_set_bits_count_using_modulo_operator(-1) + Traceback (most recent call last): + ... + ValueError: the value of input must not be negative + """ + if number < 0: + raise ValueError("the value of input must not be negative") result = 0 while number: if number % 2 == 1: result += 1 - number = number >> 1 + number >>= 1 return result +def benchmark() -> None: + """ + Benchmark code for comparing 2 functions, with different length int values. + Brian Kernighan's algorithm is consistently faster than using modulo_operator. + """ + + def do_benchmark(number: int) -> None: + setup = "import __main__ as z" + print(f"Benchmark when {number = }:") + print(f"{get_set_bits_count_using_modulo_operator(number) = }") + timing = timeit("z.get_set_bits_count_using_modulo_operator(25)", setup=setup) + print(f"timeit() runs in {timing} seconds") + print(f"{get_set_bits_count_using_brian_kernighans_algorithm(number) = }") + timing = timeit( + "z.get_set_bits_count_using_brian_kernighans_algorithm(25)", + setup=setup, + ) + print(f"timeit() runs in {timing} seconds") + + for number in (25, 37, 58, 0): + do_benchmark(number) + print() + + if __name__ == "__main__": import doctest doctest.testmod() + benchmark() From d8ab8a0a0ebcb05783c93fe4ed04a940fc0b857f Mon Sep 17 00:00:00 2001 From: Carlos Villar Date: Mon, 24 Oct 2022 13:33:56 +0200 Subject: [PATCH 0603/1543] Add Spain National ID validator (#7574) (#7575) * Add Spain National ID validator (#7574) * is_spain_national_id() * Update is_spain_national_id.py * Some systems add a dash Co-authored-by: Christian Clauss --- strings/is_spain_national_id.py | 72 +++++++++++++++++++++++++++++++++ 1 file changed, 72 insertions(+) create mode 100644 strings/is_spain_national_id.py diff --git a/strings/is_spain_national_id.py b/strings/is_spain_national_id.py new file mode 100644 index 000000000000..67f49755f412 --- /dev/null +++ b/strings/is_spain_national_id.py @@ -0,0 +1,72 @@ +NUMBERS_PLUS_LETTER = "Input must be a string of 8 numbers plus letter" +LOOKUP_LETTERS = "TRWAGMYFPDXBNJZSQVHLCKE" + + +def is_spain_national_id(spanish_id: str) -> bool: + """ + Spain National Id is a string composed by 8 numbers plus a letter + The letter in fact is not part of the ID, it acts as a validator, + checking you didn't do a mistake when entering it on a system or + are giving a fake one. + + https://en.wikipedia.org/wiki/Documento_Nacional_de_Identidad_(Spain)#Number + + >>> is_spain_national_id("12345678Z") + True + >>> is_spain_national_id("12345678z") # It is case-insensitive + True + >>> is_spain_national_id("12345678x") + False + >>> is_spain_national_id("12345678I") + False + >>> is_spain_national_id("12345678-Z") # Some systems add a dash + True + >>> is_spain_national_id("12345678") + Traceback (most recent call last): + ... + ValueError: Input must be a string of 8 numbers plus letter + >>> is_spain_national_id("123456709") + Traceback (most recent call last): + ... + ValueError: Input must be a string of 8 numbers plus letter + >>> is_spain_national_id("1234567--Z") + Traceback (most recent call last): + ... + ValueError: Input must be a string of 8 numbers plus letter + >>> is_spain_national_id("1234Z") + Traceback (most recent call last): + ... + ValueError: Input must be a string of 8 numbers plus letter + >>> is_spain_national_id("1234ZzZZ") + Traceback (most recent call last): + ... + ValueError: Input must be a string of 8 numbers plus letter + >>> is_spain_national_id(12345678) + Traceback (most recent call last): + ... + TypeError: Expected string as input, found int + """ + + if not isinstance(spanish_id, str): + raise TypeError(f"Expected string as input, found {type(spanish_id).__name__}") + + spanish_id_clean = spanish_id.replace("-", "").upper() + if len(spanish_id_clean) != 9: + raise ValueError(NUMBERS_PLUS_LETTER) + + try: + number = int(spanish_id_clean[0:8]) + letter = spanish_id_clean[8] + except ValueError as ex: + raise ValueError(NUMBERS_PLUS_LETTER) from ex + + if letter.isdigit(): + raise ValueError(NUMBERS_PLUS_LETTER) + + return letter == LOOKUP_LETTERS[number % 23] + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From a041b64f7aaf7dd54f154ba1fb5cd10e3110c1eb Mon Sep 17 00:00:00 2001 From: Maxim Smolskiy Date: Mon, 24 Oct 2022 16:29:49 +0300 Subject: [PATCH 0604/1543] feat: add Project Euler problem 073 solution 1 (#6273) Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- DIRECTORY.md | 2 ++ project_euler/problem_073/__init__.py | 0 project_euler/problem_073/sol1.py | 46 +++++++++++++++++++++++++++ 3 files changed, 48 insertions(+) create mode 100644 project_euler/problem_073/__init__.py create mode 100644 project_euler/problem_073/sol1.py diff --git a/DIRECTORY.md b/DIRECTORY.md index 10e78a92c00f..16e6b7ae3e3e 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -839,6 +839,8 @@ * Problem 072 * [Sol1](project_euler/problem_072/sol1.py) * [Sol2](project_euler/problem_072/sol2.py) + * Problem 073 + * [Sol1](project_euler/problem_073/sol1.py) * Problem 074 * [Sol1](project_euler/problem_074/sol1.py) * [Sol2](project_euler/problem_074/sol2.py) diff --git a/project_euler/problem_073/__init__.py b/project_euler/problem_073/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/project_euler/problem_073/sol1.py b/project_euler/problem_073/sol1.py new file mode 100644 index 000000000000..2b66b7d8769b --- /dev/null +++ b/project_euler/problem_073/sol1.py @@ -0,0 +1,46 @@ +""" +Project Euler Problem 73: https://projecteuler.net/problem=73 + +Consider the fraction, n/d, where n and d are positive integers. +If n int: + """ + Returns number of fractions lie between 1/3 and 1/2 in the sorted set + of reduced proper fractions for d ≤ max_d + + >>> solution(4) + 0 + + >>> solution(5) + 1 + + >>> solution(8) + 3 + """ + + fractions_number = 0 + for d in range(max_d + 1): + for n in range(d // 3 + 1, (d + 1) // 2): + if gcd(n, d) == 1: + fractions_number += 1 + return fractions_number + + +if __name__ == "__main__": + print(f"{solution() = }") From d407476531dd85db79e58aa2dd13d3b3031d8185 Mon Sep 17 00:00:00 2001 From: Maxim Smolskiy Date: Tue, 25 Oct 2022 03:57:03 +0300 Subject: [PATCH 0605/1543] fix: increase str conversion limit where required (#7604) Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- DIRECTORY.md | 8 +++++++- project_euler/problem_104/{sol.py.FIXME => sol1.py} | 10 +++++++--- 2 files changed, 14 insertions(+), 4 deletions(-) rename project_euler/problem_104/{sol.py.FIXME => sol1.py} (95%) diff --git a/DIRECTORY.md b/DIRECTORY.md index 16e6b7ae3e3e..3e722a8784e5 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -55,6 +55,7 @@ * [Modular Division](blockchain/modular_division.py) ## Boolean Algebra + * [And Gate](boolean_algebra/and_gate.py) * [Norgate](boolean_algebra/norgate.py) * [Quine Mc Cluskey](boolean_algebra/quine_mc_cluskey.py) @@ -876,6 +877,8 @@ * [Sol1](project_euler/problem_101/sol1.py) * Problem 102 * [Sol1](project_euler/problem_102/sol1.py) + * Problem 104 + * [Sol1](project_euler/problem_104/sol1.py) * Problem 107 * [Sol1](project_euler/problem_107/sol1.py) * Problem 109 @@ -948,6 +951,7 @@ * [Quantum Random](quantum/quantum_random.py) * [Ripple Adder Classic](quantum/ripple_adder_classic.py) * [Single Qubit Measure](quantum/single_qubit_measure.py) + * [Superdense Coding](quantum/superdense_coding.py) ## Scheduling * [First Come First Served](scheduling/first_come_first_served.py) @@ -1037,7 +1041,6 @@ * [Can String Be Rearranged As Palindrome](strings/can_string_be_rearranged_as_palindrome.py) * [Capitalize](strings/capitalize.py) * [Check Anagrams](strings/check_anagrams.py) - * [Check Pangram](strings/check_pangram.py) * [Credit Card Validator](strings/credit_card_validator.py) * [Detecting English Programmatically](strings/detecting_english_programmatically.py) * [Dna](strings/dna.py) @@ -1046,6 +1049,8 @@ * [Indian Phone Validator](strings/indian_phone_validator.py) * [Is Contains Unique Chars](strings/is_contains_unique_chars.py) * [Is Palindrome](strings/is_palindrome.py) + * [Is Pangram](strings/is_pangram.py) + * [Is Spain National Id](strings/is_spain_national_id.py) * [Jaro Winkler](strings/jaro_winkler.py) * [Join](strings/join.py) * [Knuth Morris Pratt](strings/knuth_morris_pratt.py) @@ -1090,6 +1095,7 @@ * [Fetch Well Rx Price](web_programming/fetch_well_rx_price.py) * [Get Imdb Top 250 Movies Csv](web_programming/get_imdb_top_250_movies_csv.py) * [Get Imdbtop](web_programming/get_imdbtop.py) + * [Get Top Billioners](web_programming/get_top_billioners.py) * [Get Top Hn Posts](web_programming/get_top_hn_posts.py) * [Get User Tweets](web_programming/get_user_tweets.py) * [Giphy](web_programming/giphy.py) diff --git a/project_euler/problem_104/sol.py.FIXME b/project_euler/problem_104/sol1.py similarity index 95% rename from project_euler/problem_104/sol.py.FIXME rename to project_euler/problem_104/sol1.py index 0818ac401c3a..60fd6fe99adb 100644 --- a/project_euler/problem_104/sol.py.FIXME +++ b/project_euler/problem_104/sol1.py @@ -13,6 +13,10 @@ the last nine digits are 1-9 pandigital, find k. """ +import sys + +sys.set_int_max_str_digits(0) # type: ignore + def check(number: int) -> bool: """ @@ -34,7 +38,7 @@ def check(number: int) -> bool: check_front = [0] * 11 # mark last 9 numbers - for x in range(9): + for _ in range(9): check_last[int(number % 10)] = 1 number = number // 10 # flag @@ -51,7 +55,7 @@ def check(number: int) -> bool: # mark first 9 numbers number = int(str(number)[:9]) - for x in range(9): + for _ in range(9): check_front[int(number % 10)] = 1 number = number // 10 @@ -81,7 +85,7 @@ def check1(number: int) -> bool: check_last = [0] * 11 # mark last 9 numbers - for x in range(9): + for _ in range(9): check_last[int(number % 10)] = 1 number = number // 10 # flag From a662d96196d58c2415d6a6933fa78a59996cc3fa Mon Sep 17 00:00:00 2001 From: Arjit Arora <42044030+arjitarora26@users.noreply.github.com> Date: Wed, 26 Oct 2022 00:56:53 +0530 Subject: [PATCH 0606/1543] Add function for xor gate (#7588) * Add function for xor gate * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Add test case for xor functions * Update boolean_algebra/xor_gate.py Co-authored-by: Christian Clauss * Update boolean_algebra/xor_gate.py Co-authored-by: Christian Clauss * Split long comment line into two lines * 88 characters per line Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- boolean_algebra/xor_gate.py | 46 +++++++++++++++++++++++++++++++++++++ 1 file changed, 46 insertions(+) create mode 100644 boolean_algebra/xor_gate.py diff --git a/boolean_algebra/xor_gate.py b/boolean_algebra/xor_gate.py new file mode 100644 index 000000000000..db4f5b45c3c6 --- /dev/null +++ b/boolean_algebra/xor_gate.py @@ -0,0 +1,46 @@ +""" +A XOR Gate is a logic gate in boolean algebra which results to 1 (True) if only one of +the two inputs is 1, and 0 (False) if an even number of inputs are 1. +Following is the truth table of a XOR Gate: + ------------------------------ + | Input 1 | Input 2 | Output | + ------------------------------ + | 0 | 0 | 0 | + | 0 | 1 | 1 | + | 1 | 0 | 1 | + | 1 | 1 | 0 | + ------------------------------ + +Refer - https://www.geeksforgeeks.org/logic-gates-in-python/ +""" + + +def xor_gate(input_1: int, input_2: int) -> int: + """ + calculate xor of the input values + + >>> xor_gate(0, 0) + 0 + >>> xor_gate(0, 1) + 1 + >>> xor_gate(1, 0) + 1 + >>> xor_gate(1, 1) + 0 + """ + return (input_1, input_2).count(0) % 2 + + +def test_xor_gate() -> None: + """ + Tests the xor_gate function + """ + assert xor_gate(0, 0) == 0 + assert xor_gate(0, 1) == 1 + assert xor_gate(1, 0) == 1 + assert xor_gate(1, 1) == 0 + + +if __name__ == "__main__": + print(xor_gate(0, 0)) + print(xor_gate(0, 1)) From cbdbe07ffd07619f1c3c5ab63ae6b2775e3c235d Mon Sep 17 00:00:00 2001 From: SparshRastogi <75373475+SparshRastogi@users.noreply.github.com> Date: Wed, 26 Oct 2022 01:13:02 +0530 Subject: [PATCH 0607/1543] Create kinetic_energy.py (#7620) * Create kinetic_energy.py Finding the kinetic energy of an object,by taking its mass and velocity as input * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update kinetic_energy.py * Update kinetic_energy.py Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- physics/kinetic_energy.py | 47 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 47 insertions(+) create mode 100644 physics/kinetic_energy.py diff --git a/physics/kinetic_energy.py b/physics/kinetic_energy.py new file mode 100644 index 000000000000..535ffc219251 --- /dev/null +++ b/physics/kinetic_energy.py @@ -0,0 +1,47 @@ +""" +Find the kinetic energy of an object, give its mass and velocity +Description : In physics, the kinetic energy of an object is the energy that it +possesses due to its motion. It is defined as the work needed to accelerate a body of a +given mass from rest to its stated velocity. Having gained this energy during its +acceleration, the body maintains this kinetic energy unless its speed changes. The same +amount of work is done by the body when decelerating from its current speed to a state +of rest. Formally, a kinetic energy is any term in a system's Lagrangian which includes +a derivative with respect to time. + +In classical mechanics, the kinetic energy of a non-rotating object of mass m traveling +at a speed v is ½mv². In relativistic mechanics, this is a good approximation only when +v is much less than the speed of light. The standard unit of kinetic energy is the +joule, while the English unit of kinetic energy is the foot-pound. + +Reference : https://en.m.wikipedia.org/wiki/Kinetic_energy +""" + + +def kinetic_energy(mass: float, velocity: float) -> float: + """ + The kinetic energy of a non-rotating object of mass m traveling at a speed v is ½mv² + + >>> kinetic_energy(10,10) + 500.0 + >>> kinetic_energy(0,10) + 0.0 + >>> kinetic_energy(10,0) + 0.0 + >>> kinetic_energy(20,-20) + 4000.0 + >>> kinetic_energy(0,0) + 0.0 + >>> kinetic_energy(2,2) + 4.0 + >>> kinetic_energy(100,100) + 500000.0 + """ + if mass < 0: + raise ValueError("The mass of a body cannot be negative") + return 0.5 * mass * abs(velocity) * abs(velocity) + + +if __name__ == "__main__": + import doctest + + doctest.testmod(verbose=True) From 450842321d30ab072a84aee15dfdbf199f9914dc Mon Sep 17 00:00:00 2001 From: Havish <100441982+havishs9@users.noreply.github.com> Date: Tue, 25 Oct 2022 12:47:52 -0700 Subject: [PATCH 0608/1543] Arc Length Algorithm (#7610) * Create decimal_conversions.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Create arc_length.py * Delete decimal_conversions.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Removed redundant statement, fixed line overflow * Update arc_length.py Changed rad to radius as not to get confused with radians * Update arc_length.py * Update arc_length.py Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Chris O <46587501+ChrisO345@users.noreply.github.com> --- maths/arc_length.py | 15 +++++++++++++++ 1 file changed, 15 insertions(+) create mode 100644 maths/arc_length.py diff --git a/maths/arc_length.py b/maths/arc_length.py new file mode 100644 index 000000000000..9e87ca38cc7d --- /dev/null +++ b/maths/arc_length.py @@ -0,0 +1,15 @@ +from math import pi + + +def arc_length(angle: int, radius: int) -> float: + """ + >>> arc_length(45, 5) + 3.9269908169872414 + >>> arc_length(120, 15) + 31.415926535897928 + """ + return 2 * pi * radius * (angle / 360) + + +if __name__ == "__main__": + print(arc_length(90, 10)) From 103c9e0876490d6cf683ba2d3f89e5198647bc32 Mon Sep 17 00:00:00 2001 From: Karthik S <73390717+karthiks2611@users.noreply.github.com> Date: Wed, 26 Oct 2022 01:23:21 +0530 Subject: [PATCH 0609/1543] Added Implementation of NAND, OR ,XNOR and NOT gates in python (#7596) * Added Implementation for XNOR gate * Added Implementation for OR gate * Added implementation of NAND gate * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Added Implementation of NAND gate * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Updated nand_gate.py * updated xnor_gate.py after some changes * Delete due to duplicate file * Updated xnor_gate.py * Added Implementation of NOT gate in python * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fixed a typo error * Updated to a new logic * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Updated nand_gate.py file Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- boolean_algebra/nand_gate.py | 47 +++++++++++++++++++++++++++++++++++ boolean_algebra/not_gate.py | 37 +++++++++++++++++++++++++++ boolean_algebra/or_gate.py | 46 ++++++++++++++++++++++++++++++++++ boolean_algebra/xnor_gate.py | 48 ++++++++++++++++++++++++++++++++++++ 4 files changed, 178 insertions(+) create mode 100644 boolean_algebra/nand_gate.py create mode 100644 boolean_algebra/not_gate.py create mode 100644 boolean_algebra/or_gate.py create mode 100644 boolean_algebra/xnor_gate.py diff --git a/boolean_algebra/nand_gate.py b/boolean_algebra/nand_gate.py new file mode 100644 index 000000000000..ea3303d16b25 --- /dev/null +++ b/boolean_algebra/nand_gate.py @@ -0,0 +1,47 @@ +""" +A NAND Gate is a logic gate in boolean algebra which results to 0 (False) if both +the inputs are 1, and 1 (True) otherwise. It's similar to adding +a NOT gate along with an AND gate. +Following is the truth table of a NAND Gate: + ------------------------------ + | Input 1 | Input 2 | Output | + ------------------------------ + | 0 | 0 | 1 | + | 0 | 1 | 1 | + | 1 | 0 | 1 | + | 1 | 1 | 0 | + ------------------------------ +Refer - https://www.geeksforgeeks.org/logic-gates-in-python/ +""" + + +def nand_gate(input_1: int, input_2: int) -> int: + """ + Calculate NAND of the input values + >>> nand_gate(0, 0) + 1 + >>> nand_gate(0, 1) + 1 + >>> nand_gate(1, 0) + 1 + >>> nand_gate(1, 1) + 0 + """ + return int((input_1, input_2).count(0) != 0) + + +def test_nand_gate() -> None: + """ + Tests the nand_gate function + """ + assert nand_gate(0, 0) == 1 + assert nand_gate(0, 1) == 1 + assert nand_gate(1, 0) == 1 + assert nand_gate(1, 1) == 0 + + +if __name__ == "__main__": + print(nand_gate(0, 0)) + print(nand_gate(0, 1)) + print(nand_gate(1, 0)) + print(nand_gate(1, 1)) diff --git a/boolean_algebra/not_gate.py b/boolean_algebra/not_gate.py new file mode 100644 index 000000000000..b41da602d936 --- /dev/null +++ b/boolean_algebra/not_gate.py @@ -0,0 +1,37 @@ +""" +A NOT Gate is a logic gate in boolean algebra which results to 0 (False) if the +input is high, and 1 (True) if the input is low. +Following is the truth table of a XOR Gate: + ------------------------------ + | Input | Output | + ------------------------------ + | 0 | 1 | + | 1 | 0 | + ------------------------------ +Refer - https://www.geeksforgeeks.org/logic-gates-in-python/ +""" + + +def not_gate(input_1: int) -> int: + """ + Calculate NOT of the input values + >>> not_gate(0) + 1 + >>> not_gate(1) + 0 + """ + + return 1 if input_1 == 0 else 0 + + +def test_not_gate() -> None: + """ + Tests the not_gate function + """ + assert not_gate(0) == 1 + assert not_gate(1) == 0 + + +if __name__ == "__main__": + print(not_gate(0)) + print(not_gate(1)) diff --git a/boolean_algebra/or_gate.py b/boolean_algebra/or_gate.py new file mode 100644 index 000000000000..aa7e6645e33f --- /dev/null +++ b/boolean_algebra/or_gate.py @@ -0,0 +1,46 @@ +""" +An OR Gate is a logic gate in boolean algebra which results to 0 (False) if both the +inputs are 0, and 1 (True) otherwise. +Following is the truth table of an AND Gate: + ------------------------------ + | Input 1 | Input 2 | Output | + ------------------------------ + | 0 | 0 | 0 | + | 0 | 1 | 1 | + | 1 | 0 | 1 | + | 1 | 1 | 1 | + ------------------------------ +Refer - https://www.geeksforgeeks.org/logic-gates-in-python/ +""" + + +def or_gate(input_1: int, input_2: int) -> int: + """ + Calculate OR of the input values + >>> or_gate(0, 0) + 0 + >>> or_gate(0, 1) + 1 + >>> or_gate(1, 0) + 1 + >>> or_gate(1, 1) + 1 + """ + return int((input_1, input_2).count(1) != 0) + + +def test_or_gate() -> None: + """ + Tests the or_gate function + """ + assert or_gate(0, 0) == 0 + assert or_gate(0, 1) == 1 + assert or_gate(1, 0) == 1 + assert or_gate(1, 1) == 1 + + +if __name__ == "__main__": + print(or_gate(0, 1)) + print(or_gate(1, 0)) + print(or_gate(0, 0)) + print(or_gate(1, 1)) diff --git a/boolean_algebra/xnor_gate.py b/boolean_algebra/xnor_gate.py new file mode 100644 index 000000000000..45ab2700ec35 --- /dev/null +++ b/boolean_algebra/xnor_gate.py @@ -0,0 +1,48 @@ +""" +A XNOR Gate is a logic gate in boolean algebra which results to 0 (False) if both the +inputs are different, and 1 (True), if the inputs are same. +It's similar to adding a NOT gate to an XOR gate + +Following is the truth table of a XNOR Gate: + ------------------------------ + | Input 1 | Input 2 | Output | + ------------------------------ + | 0 | 0 | 1 | + | 0 | 1 | 0 | + | 1 | 0 | 0 | + | 1 | 1 | 1 | + ------------------------------ +Refer - https://www.geeksforgeeks.org/logic-gates-in-python/ +""" + + +def xnor_gate(input_1: int, input_2: int) -> int: + """ + Calculate XOR of the input values + >>> xnor_gate(0, 0) + 1 + >>> xnor_gate(0, 1) + 0 + >>> xnor_gate(1, 0) + 0 + >>> xnor_gate(1, 1) + 1 + """ + return 1 if input_1 == input_2 else 0 + + +def test_xnor_gate() -> None: + """ + Tests the xnor_gate function + """ + assert xnor_gate(0, 0) == 1 + assert xnor_gate(0, 1) == 0 + assert xnor_gate(1, 0) == 0 + assert xnor_gate(1, 1) == 1 + + +if __name__ == "__main__": + print(xnor_gate(0, 0)) + print(xnor_gate(0, 1)) + print(xnor_gate(1, 0)) + print(xnor_gate(1, 1)) From d25187eb7f27227381a03ba800890af7848b57d5 Mon Sep 17 00:00:00 2001 From: Tianyi Zheng Date: Tue, 25 Oct 2022 16:34:46 -0400 Subject: [PATCH 0610/1543] Remove type cast in combinations algorithm (#7607) * Remove commented-out print statements in algorithmic functions * Encapsulate non-algorithmic code in __main__ * Remove unused print_matrix function * Remove print statement in __init__ * Remove print statement from doctest * Encapsulate non-algorithmic code in __main__ * Modify algorithm to return instead of print * Encapsulate non-algorithmic code in __main__ * Refactor data_safety_checker to return instead of print * updating DIRECTORY.md * updating DIRECTORY.md * Apply suggestions from code review * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * updating DIRECTORY.md * Remove int cast and change float division to int division * Move new-line chars * updating DIRECTORY.md Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> Co-authored-by: Christian Clauss Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- maths/combinations.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/maths/combinations.py b/maths/combinations.py index 40f4f7a9f850..6db1d773faa6 100644 --- a/maths/combinations.py +++ b/maths/combinations.py @@ -35,18 +35,18 @@ def combinations(n: int, k: int) -> int: # to calculate a factorial of a negative number, which is not possible if n < k or k < 0: raise ValueError("Please enter positive integers for n and k where n >= k") - return int(factorial(n) / ((factorial(k)) * (factorial(n - k)))) + return factorial(n) // (factorial(k) * factorial(n - k)) if __name__ == "__main__": print( - "\nThe number of five-card hands possible from a standard", - f"fifty-two card deck is: {combinations(52, 5)}", + "The number of five-card hands possible from a standard", + f"fifty-two card deck is: {combinations(52, 5)}\n", ) print( - "\nIf a class of 40 students must be arranged into groups of", + "If a class of 40 students must be arranged into groups of", f"4 for group projects, there are {combinations(40, 4)} ways", "to arrange them.\n", ) @@ -54,5 +54,5 @@ def combinations(n: int, k: int) -> int: print( "If 10 teams are competing in a Formula One race, there", f"are {combinations(10, 3)} ways that first, second and", - "third place can be awarded.\n", + "third place can be awarded.", ) From 7e3dff17c5046aad1c67fa689e5146a13e8cc052 Mon Sep 17 00:00:00 2001 From: Arya Samik <90042953+AryaSamik@users.noreply.github.com> Date: Wed, 26 Oct 2022 03:12:49 +0530 Subject: [PATCH 0611/1543] Docs: correct the shape of trajectory (#6255) The shape of the trajectory of projectile is a parabola. --- physics/horizontal_projectile_motion.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/physics/horizontal_projectile_motion.py b/physics/horizontal_projectile_motion.py index a747acd72072..dbde3660f62f 100644 --- a/physics/horizontal_projectile_motion.py +++ b/physics/horizontal_projectile_motion.py @@ -3,13 +3,13 @@ This algorithm solves a specific problem in which the motion starts from the ground as can be seen below: (v = 0) - ** - * * - * * - * * - * * - * * -GROUND GROUND + * * + * * + * * + * * + * * + * * +GROUND GROUND For more info: https://en.wikipedia.org/wiki/Projectile_motion """ From 2c959a749163365705a53b049aa1a3e093ee4e7a Mon Sep 17 00:00:00 2001 From: harshyadavcs <108284583+harshyadavcs@users.noreply.github.com> Date: Wed, 26 Oct 2022 03:13:45 +0530 Subject: [PATCH 0612/1543] Update documentation of cnn_classification.py (#7486) * Updated documentation of cnn_classification.py for much better understanding * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update computer_vision/cnn_classification.py Co-authored-by: Caeden Perelli-Harris Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Caeden Perelli-Harris --- computer_vision/cnn_classification.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/computer_vision/cnn_classification.py b/computer_vision/cnn_classification.py index 6d4f19639c24..59e4556e069b 100644 --- a/computer_vision/cnn_classification.py +++ b/computer_vision/cnn_classification.py @@ -30,9 +30,12 @@ if __name__ == "__main__": # Initialising the CNN + # (Sequential- Building the model layer by layer) classifier = models.Sequential() # Step 1 - Convolution + # Here 64,64 is the length & breadth of dataset images and 3 is for the RGB channel + # (3,3) is the kernel size (filter matrix) classifier.add( layers.Conv2D(32, (3, 3), input_shape=(64, 64, 3), activation="relu") ) From c3bcfbf19d43e20e9145d8968659101c1fd8b747 Mon Sep 17 00:00:00 2001 From: Karthik Ayangar <66073214+kituuu@users.noreply.github.com> Date: Wed, 26 Oct 2022 03:25:31 +0530 Subject: [PATCH 0613/1543] Add Cramer's rule for solving system of linear equations in two variables (#7547) * added script for solving system of linear equations in two variables * implemented all the suggested changes * changed RuntimeError to ValueError * Update matrix/system_of_linear_equation_in_2_variables.py * Update matrix/system_of_linear_equation_in_2_variables.py * Update and rename system_of_linear_equation_in_2_variables.py to cramers_rule_2x2.py Co-authored-by: Christian Clauss --- matrix/cramers_rule_2x2.py | 82 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 82 insertions(+) create mode 100644 matrix/cramers_rule_2x2.py diff --git a/matrix/cramers_rule_2x2.py b/matrix/cramers_rule_2x2.py new file mode 100644 index 000000000000..a635d66fbb6c --- /dev/null +++ b/matrix/cramers_rule_2x2.py @@ -0,0 +1,82 @@ +# https://www.chilimath.com/lessons/advanced-algebra/cramers-rule-with-two-variables +# https://en.wikipedia.org/wiki/Cramer%27s_rule + + +def cramers_rule_2x2(equation1: list[int], equation2: list[int]) -> str: + """ + Solves the system of linear equation in 2 variables. + :param: equation1: list of 3 numbers + :param: equation2: list of 3 numbers + :return: String of result + input format : [a1, b1, d1], [a2, b2, d2] + determinant = [[a1, b1], [a2, b2]] + determinant_x = [[d1, b1], [d2, b2]] + determinant_y = [[a1, d1], [a2, d2]] + + >>> cramers_rule_2x2([2, 3, 0], [5, 1, 0]) + 'Trivial solution. (Consistent system) x = 0 and y = 0' + >>> cramers_rule_2x2([0, 4, 50], [2, 0, 26]) + 'Non-Trivial Solution (Consistent system) x = 13.0, y = 12.5' + >>> cramers_rule_2x2([11, 2, 30], [1, 0, 4]) + 'Non-Trivial Solution (Consistent system) x = 4.0, y = -7.0' + >>> cramers_rule_2x2([4, 7, 1], [1, 2, 0]) + 'Non-Trivial Solution (Consistent system) x = 2.0, y = -1.0' + + >>> cramers_rule_2x2([1, 2, 3], [2, 4, 6]) + Traceback (most recent call last): + ... + ValueError: Infinite solutions. (Consistent system) + >>> cramers_rule_2x2([1, 2, 3], [2, 4, 7]) + Traceback (most recent call last): + ... + ValueError: No solution. (Inconsistent system) + >>> cramers_rule_2x2([1, 2, 3], [11, 22]) + Traceback (most recent call last): + ... + ValueError: Please enter a valid equation. + >>> cramers_rule_2x2([0, 1, 6], [0, 0, 3]) + Traceback (most recent call last): + ... + ValueError: No solution. (Inconsistent system) + >>> cramers_rule_2x2([0, 0, 6], [0, 0, 3]) + Traceback (most recent call last): + ... + ValueError: Both a & b of two equations can't be zero. + >>> cramers_rule_2x2([1, 2, 3], [1, 2, 3]) + Traceback (most recent call last): + ... + ValueError: Infinite solutions. (Consistent system) + >>> cramers_rule_2x2([0, 4, 50], [0, 3, 99]) + Traceback (most recent call last): + ... + ValueError: No solution. (Inconsistent system) + """ + + # Check if the input is valid + if not len(equation1) == len(equation2) == 3: + raise ValueError("Please enter a valid equation.") + if equation1[0] == equation1[1] == equation2[0] == equation2[1] == 0: + raise ValueError("Both a & b of two equations can't be zero.") + + # Extract the coefficients + a1, b1, c1 = equation1 + a2, b2, c2 = equation2 + + # Calculate the determinants of the matrices + determinant = a1 * b2 - a2 * b1 + determinant_x = c1 * b2 - c2 * b1 + determinant_y = a1 * c2 - a2 * c1 + + # Check if the system of linear equations has a solution (using Cramer's rule) + if determinant == 0: + if determinant_x == determinant_y == 0: + raise ValueError("Infinite solutions. (Consistent system)") + else: + raise ValueError("No solution. (Inconsistent system)") + else: + if determinant_x == determinant_y == 0: + return "Trivial solution. (Consistent system) x = 0 and y = 0" + else: + x = determinant_x / determinant + y = determinant_y / determinant + return f"Non-Trivial Solution (Consistent system) x = {x}, y = {y}" From c31ef5e7782803b07e6d7eb4dca3b038cbdb095d Mon Sep 17 00:00:00 2001 From: RohitSingh107 <64142943+RohitSingh107@users.noreply.github.com> Date: Wed, 26 Oct 2022 03:25:48 +0530 Subject: [PATCH 0614/1543] Add longest common substring (#7488) * added longest common substring * added retrun type hint * Update dynamic_programming/longest_common_substring.py Co-authored-by: Caeden Perelli-Harris * Update dynamic_programming/longest_common_substring.py Co-authored-by: Caeden Perelli-Harris * Update dynamic_programming/longest_common_substring.py Co-authored-by: Caeden Perelli-Harris * Update dynamic_programming/longest_common_substring.py Co-authored-by: Caeden Perelli-Harris * Update dynamic_programming/longest_common_substring.py Co-authored-by: Caeden Perelli-Harris * Update dynamic_programming/longest_common_substring.py Co-authored-by: Caeden Perelli-Harris * Update dynamic_programming/longest_common_substring.py Co-authored-by: Caeden Perelli-Harris * Update dynamic_programming/longest_common_substring.py Co-authored-by: Caeden Perelli-Harris * Update dynamic_programming/longest_common_substring.py Co-authored-by: Caeden Perelli-Harris * Update dynamic_programming/longest_common_substring.py Co-authored-by: Caeden Perelli-Harris * Update dynamic_programming/longest_common_substring.py Co-authored-by: Caeden Perelli-Harris * changed t1, t2 to text1, text2 * Update longest_common_substring.py * Update dynamic_programming/longest_common_substring.py Co-authored-by: Chris O <46587501+ChrisO345@users.noreply.github.com> * Update dynamic_programming/longest_common_substring.py Co-authored-by: Chris O <46587501+ChrisO345@users.noreply.github.com> * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * applied suggested changes * Update dynamic_programming/longest_common_substring.py Co-authored-by: Caeden Perelli-Harris * removed space between line * return longest common substring * Update dynamic_programming/longest_common_substring.py Co-authored-by: Christian Clauss * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Co-authored-by: Caeden Perelli-Harris Co-authored-by: Chris O <46587501+ChrisO345@users.noreply.github.com> Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- .../longest_common_substring.py | 63 +++++++++++++++++++ 1 file changed, 63 insertions(+) create mode 100644 dynamic_programming/longest_common_substring.py diff --git a/dynamic_programming/longest_common_substring.py b/dynamic_programming/longest_common_substring.py new file mode 100644 index 000000000000..84a9f18609f9 --- /dev/null +++ b/dynamic_programming/longest_common_substring.py @@ -0,0 +1,63 @@ +""" +Longest Common Substring Problem Statement: Given two sequences, find the +longest common substring present in both of them. A substring is +necessarily continuous. +Example: "abcdef" and "xabded" have two longest common substrings, "ab" or "de". +Therefore, algorithm should return any one of them. +""" + + +def longest_common_substring(text1: str, text2: str) -> str: + """ + Finds the longest common substring between two strings. + >>> longest_common_substring("", "") + '' + >>> longest_common_substring("a","") + '' + >>> longest_common_substring("", "a") + '' + >>> longest_common_substring("a", "a") + 'a' + >>> longest_common_substring("abcdef", "bcd") + 'bcd' + >>> longest_common_substring("abcdef", "xabded") + 'ab' + >>> longest_common_substring("GeeksforGeeks", "GeeksQuiz") + 'Geeks' + >>> longest_common_substring("abcdxyz", "xyzabcd") + 'abcd' + >>> longest_common_substring("zxabcdezy", "yzabcdezx") + 'abcdez' + >>> longest_common_substring("OldSite:GeeksforGeeks.org", "NewSite:GeeksQuiz.com") + 'Site:Geeks' + >>> longest_common_substring(1, 1) + Traceback (most recent call last): + ... + ValueError: longest_common_substring() takes two strings for inputs + """ + + if not (isinstance(text1, str) and isinstance(text2, str)): + raise ValueError("longest_common_substring() takes two strings for inputs") + + text1_length = len(text1) + text2_length = len(text2) + + dp = [[0] * (text2_length + 1) for _ in range(text1_length + 1)] + ans_index = 0 + ans_length = 0 + + for i in range(1, text1_length + 1): + for j in range(1, text2_length + 1): + if text1[i - 1] == text2[j - 1]: + dp[i][j] = 1 + dp[i - 1][j - 1] + if dp[i][j] > ans_length: + ans_index = i + ans_length = dp[i][j] + + return text1[ans_index - ans_length : ans_index] + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From 505c5e20fa7efec9f6c4cb5b8bafd8ff2001e3b7 Mon Sep 17 00:00:00 2001 From: Mislah <76743829+mislah@users.noreply.github.com> Date: Wed, 26 Oct 2022 03:56:05 +0530 Subject: [PATCH 0615/1543] Included area of n sided regular polygon (#7438) * Included area of n sided regular polygon Added a function to calculate the area of n sided regular polygons * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * code standard fixes as per PR comments * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- maths/area.py | 67 ++++++++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 66 insertions(+), 1 deletion(-) diff --git a/maths/area.py b/maths/area.py index abbf7aa85da5..5db7dac38973 100644 --- a/maths/area.py +++ b/maths/area.py @@ -1,12 +1,14 @@ """ Find the area of various geometric shapes +Wikipedia reference: https://en.wikipedia.org/wiki/Area """ -from math import pi, sqrt +from math import pi, sqrt, tan def surface_area_cube(side_length: float) -> float: """ Calculate the Surface Area of a Cube. + >>> surface_area_cube(1) 6 >>> surface_area_cube(1.6) @@ -28,6 +30,7 @@ def surface_area_cube(side_length: float) -> float: def surface_area_cuboid(length: float, breadth: float, height: float) -> float: """ Calculate the Surface Area of a Cuboid. + >>> surface_area_cuboid(1, 2, 3) 22 >>> surface_area_cuboid(0, 0, 0) @@ -57,6 +60,7 @@ def surface_area_sphere(radius: float) -> float: Calculate the Surface Area of a Sphere. Wikipedia reference: https://en.wikipedia.org/wiki/Sphere Formula: 4 * pi * r^2 + >>> surface_area_sphere(5) 314.1592653589793 >>> surface_area_sphere(1) @@ -79,6 +83,7 @@ def surface_area_hemisphere(radius: float) -> float: """ Calculate the Surface Area of a Hemisphere. Formula: 3 * pi * r^2 + >>> surface_area_hemisphere(5) 235.61944901923448 >>> surface_area_hemisphere(1) @@ -102,6 +107,7 @@ def surface_area_cone(radius: float, height: float) -> float: Calculate the Surface Area of a Cone. Wikipedia reference: https://en.wikipedia.org/wiki/Cone Formula: pi * r * (r + (h ** 2 + r ** 2) ** 0.5) + >>> surface_area_cone(10, 24) 1130.9733552923256 >>> surface_area_cone(6, 8) @@ -133,6 +139,7 @@ def surface_area_conical_frustum( ) -> float: """ Calculate the Surface Area of a Conical Frustum. + >>> surface_area_conical_frustum(1, 2, 3) 45.511728065337266 >>> surface_area_conical_frustum(4, 5, 6) @@ -167,6 +174,7 @@ def surface_area_cylinder(radius: float, height: float) -> float: Calculate the Surface Area of a Cylinder. Wikipedia reference: https://en.wikipedia.org/wiki/Cylinder Formula: 2 * pi * r * (h + r) + >>> surface_area_cylinder(7, 10) 747.6990515543707 >>> surface_area_cylinder(1.6, 2.6) @@ -196,6 +204,7 @@ def surface_area_cylinder(radius: float, height: float) -> float: def area_rectangle(length: float, width: float) -> float: """ Calculate the area of a rectangle. + >>> area_rectangle(10, 20) 200 >>> area_rectangle(1.6, 2.6) @@ -223,6 +232,7 @@ def area_rectangle(length: float, width: float) -> float: def area_square(side_length: float) -> float: """ Calculate the area of a square. + >>> area_square(10) 100 >>> area_square(0) @@ -242,6 +252,7 @@ def area_square(side_length: float) -> float: def area_triangle(base: float, height: float) -> float: """ Calculate the area of a triangle given the base and height. + >>> area_triangle(10, 10) 50.0 >>> area_triangle(1.6, 2.6) @@ -270,6 +281,7 @@ def area_triangle_three_sides(side1: float, side2: float, side3: float) -> float """ Calculate area of triangle when the length of 3 sides are known. This function uses Heron's formula: https://en.wikipedia.org/wiki/Heron%27s_formula + >>> area_triangle_three_sides(5, 12, 13) 30.0 >>> area_triangle_three_sides(10, 11, 12) @@ -316,6 +328,7 @@ def area_triangle_three_sides(side1: float, side2: float, side3: float) -> float def area_parallelogram(base: float, height: float) -> float: """ Calculate the area of a parallelogram. + >>> area_parallelogram(10, 20) 200 >>> area_parallelogram(1.6, 2.6) @@ -343,6 +356,7 @@ def area_parallelogram(base: float, height: float) -> float: def area_trapezium(base1: float, base2: float, height: float) -> float: """ Calculate the area of a trapezium. + >>> area_trapezium(10, 20, 30) 450.0 >>> area_trapezium(1.6, 2.6, 3.6) @@ -386,6 +400,7 @@ def area_trapezium(base1: float, base2: float, height: float) -> float: def area_circle(radius: float) -> float: """ Calculate the area of a circle. + >>> area_circle(20) 1256.6370614359173 >>> area_circle(1.6) @@ -405,6 +420,7 @@ def area_circle(radius: float) -> float: def area_ellipse(radius_x: float, radius_y: float) -> float: """ Calculate the area of a ellipse. + >>> area_ellipse(10, 10) 314.1592653589793 >>> area_ellipse(10, 20) @@ -434,6 +450,7 @@ def area_ellipse(radius_x: float, radius_y: float) -> float: def area_rhombus(diagonal_1: float, diagonal_2: float) -> float: """ Calculate the area of a rhombus. + >>> area_rhombus(10, 20) 100.0 >>> area_rhombus(1.6, 2.6) @@ -458,6 +475,51 @@ def area_rhombus(diagonal_1: float, diagonal_2: float) -> float: return 1 / 2 * diagonal_1 * diagonal_2 +def area_reg_polygon(sides: int, length: float) -> float: + """ + Calculate the area of a regular polygon. + Wikipedia reference: https://en.wikipedia.org/wiki/Polygon#Regular_polygons + Formula: (n*s^2*cot(pi/n))/4 + + >>> area_reg_polygon(3, 10) + 43.301270189221945 + >>> area_reg_polygon(4, 10) + 100.00000000000001 + >>> area_reg_polygon(0, 0) + Traceback (most recent call last): + ... + ValueError: area_reg_polygon() only accepts integers greater than or equal to \ +three as number of sides + >>> area_reg_polygon(-1, -2) + Traceback (most recent call last): + ... + ValueError: area_reg_polygon() only accepts integers greater than or equal to \ +three as number of sides + >>> area_reg_polygon(5, -2) + Traceback (most recent call last): + ... + ValueError: area_reg_polygon() only accepts non-negative values as \ +length of a side + >>> area_reg_polygon(-1, 2) + Traceback (most recent call last): + ... + ValueError: area_reg_polygon() only accepts integers greater than or equal to \ +three as number of sides + """ + if not isinstance(sides, int) or sides < 3: + raise ValueError( + "area_reg_polygon() only accepts integers greater than or \ +equal to three as number of sides" + ) + elif length < 0: + raise ValueError( + "area_reg_polygon() only accepts non-negative values as \ +length of a side" + ) + return (sides * length**2) / (4 * tan(pi / sides)) + return (sides * length**2) / (4 * tan(pi / sides)) + + if __name__ == "__main__": import doctest @@ -481,3 +543,6 @@ def area_rhombus(diagonal_1: float, diagonal_2: float) -> float: print(f"Cone: {surface_area_cone(10, 20) = }") print(f"Conical Frustum: {surface_area_conical_frustum(10, 20, 30) = }") print(f"Cylinder: {surface_area_cylinder(10, 20) = }") + print(f"Equilateral Triangle: {area_reg_polygon(3, 10) = }") + print(f"Square: {area_reg_polygon(4, 10) = }") + print(f"Reqular Pentagon: {area_reg_polygon(5, 10) = }") From 68f6e9ac305b75a7aa8455977e35eeb942051959 Mon Sep 17 00:00:00 2001 From: M3talM0nk3y Date: Tue, 25 Oct 2022 23:31:16 -0400 Subject: [PATCH 0616/1543] Added function that checks if a string is an isogram (#7608) * Added function that checks if a string is an isogram. * Added wiki reference and fixed comments. * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Made function name more self-documenting. Raise ValueError if string contains 1 or more digits. Renamed file. Lowercase string inside function. * Removed check_isogram.py (file renamed). * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fixed test failure. * Raise ValueError when string has non-alpha characters. Removed import. Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- strings/is_isogram.py | 30 ++++++++++++++++++++++++++++++ 1 file changed, 30 insertions(+) create mode 100644 strings/is_isogram.py diff --git a/strings/is_isogram.py b/strings/is_isogram.py new file mode 100644 index 000000000000..a9d9acc8138e --- /dev/null +++ b/strings/is_isogram.py @@ -0,0 +1,30 @@ +""" +wiki: https://en.wikipedia.org/wiki/Heterogram_(literature)#Isograms +""" + + +def is_isogram(string: str) -> bool: + """ + An isogram is a word in which no letter is repeated. + Examples of isograms are uncopyrightable and ambidextrously. + >>> is_isogram('Uncopyrightable') + True + >>> is_isogram('allowance') + False + >>> is_isogram('copy1') + Traceback (most recent call last): + ... + ValueError: String must only contain alphabetic characters. + """ + if not all(x.isalpha() for x in string): + raise ValueError("String must only contain alphabetic characters.") + + letters = sorted(string.lower()) + return len(letters) == len(set(letters)) + + +if __name__ == "__main__": + input_str = input("Enter a string ").strip() + + isogram = is_isogram(input_str) + print(f"{input_str} is {'an' if isogram else 'not an'} isogram.") From abf0909b6877d64c3adc9d666b85aa38bcd98566 Mon Sep 17 00:00:00 2001 From: CenTdemeern1 Date: Tue, 25 Oct 2022 23:09:28 -0700 Subject: [PATCH 0617/1543] Write a proper implementation for base16 (#6909) According to CONTRIBUTING.md: "Algorithms in this repo should not be how-to examples for existing Python packages." --- ciphers/base16.py | 75 ++++++++++++++++++++++++++++++++--------------- 1 file changed, 52 insertions(+), 23 deletions(-) diff --git a/ciphers/base16.py b/ciphers/base16.py index a149a6d8c5bf..6cd62846fc87 100644 --- a/ciphers/base16.py +++ b/ciphers/base16.py @@ -1,34 +1,63 @@ -import base64 - - -def base16_encode(inp: str) -> bytes: +def base16_encode(data: bytes) -> str: """ - Encodes a given utf-8 string into base-16. + Encodes the given bytes into base16. - >>> base16_encode('Hello World!') - b'48656C6C6F20576F726C6421' - >>> base16_encode('HELLO WORLD!') - b'48454C4C4F20574F524C4421' - >>> base16_encode('') - b'' + >>> base16_encode(b'Hello World!') + '48656C6C6F20576F726C6421' + >>> base16_encode(b'HELLO WORLD!') + '48454C4C4F20574F524C4421' + >>> base16_encode(b'') + '' """ - # encode the input into a bytes-like object and then encode b16encode that - return base64.b16encode(inp.encode("utf-8")) + # Turn the data into a list of integers (where each integer is a byte), + # Then turn each byte into its hexadecimal representation, make sure + # it is uppercase, and then join everything together and return it. + return "".join([hex(byte)[2:].zfill(2).upper() for byte in list(data)]) -def base16_decode(b16encoded: bytes) -> str: +def base16_decode(data: str) -> bytes: """ - Decodes from base-16 to a utf-8 string. + Decodes the given base16 encoded data into bytes. - >>> base16_decode(b'48656C6C6F20576F726C6421') - 'Hello World!' - >>> base16_decode(b'48454C4C4F20574F524C4421') - 'HELLO WORLD!' - >>> base16_decode(b'') - '' + >>> base16_decode('48656C6C6F20576F726C6421') + b'Hello World!' + >>> base16_decode('48454C4C4F20574F524C4421') + b'HELLO WORLD!' + >>> base16_decode('') + b'' + >>> base16_decode('486') + Traceback (most recent call last): + ... + ValueError: Base16 encoded data is invalid: + Data does not have an even number of hex digits. + >>> base16_decode('48656c6c6f20576f726c6421') + Traceback (most recent call last): + ... + ValueError: Base16 encoded data is invalid: + Data is not uppercase hex or it contains invalid characters. + >>> base16_decode('This is not base64 encoded data.') + Traceback (most recent call last): + ... + ValueError: Base16 encoded data is invalid: + Data is not uppercase hex or it contains invalid characters. """ - # b16decode the input into bytes and decode that into a human readable string - return base64.b16decode(b16encoded).decode("utf-8") + # Check data validity, following RFC3548 + # https://www.ietf.org/rfc/rfc3548.txt + if (len(data) % 2) != 0: + raise ValueError( + """Base16 encoded data is invalid: +Data does not have an even number of hex digits.""" + ) + # Check the character set - the standard base16 alphabet + # is uppercase according to RFC3548 section 6 + if not set(data) <= set("0123456789ABCDEF"): + raise ValueError( + """Base16 encoded data is invalid: +Data is not uppercase hex or it contains invalid characters.""" + ) + # For every two hexadecimal digits (= a byte), turn it into an integer. + # Then, string the result together into bytes, and return it. + return bytes(int(data[i] + data[i + 1], 16) for i in range(0, len(data), 2)) if __name__ == "__main__": From 93905653506c684e393d984ad814af66af8ee0e9 Mon Sep 17 00:00:00 2001 From: Karthik Ayangar <66073214+kituuu@users.noreply.github.com> Date: Wed, 26 Oct 2022 14:06:40 +0530 Subject: [PATCH 0618/1543] added support for inverse of 3x3 matrix (#7355) * added support for inverse of 3x3 matrix * Modified Docstring and improved code * fixed an error * Modified docstring * Apply all suggestions from code review Co-authored-by: Caeden Perelli-Harris Co-authored-by: Chris O <46587501+ChrisO345@users.noreply.github.com> Co-authored-by: Christian Clauss Co-authored-by: Caeden Perelli-Harris Co-authored-by: Chris O <46587501+ChrisO345@users.noreply.github.com> --- matrix/inverse_of_matrix.py | 137 ++++++++++++++++++++++++++++++++---- 1 file changed, 122 insertions(+), 15 deletions(-) diff --git a/matrix/inverse_of_matrix.py b/matrix/inverse_of_matrix.py index 770ce39b584f..e53d90df8253 100644 --- a/matrix/inverse_of_matrix.py +++ b/matrix/inverse_of_matrix.py @@ -2,22 +2,25 @@ from decimal import Decimal +from numpy import array + def inverse_of_matrix(matrix: list[list[float]]) -> list[list[float]]: """ A matrix multiplied with its inverse gives the identity matrix. - This function finds the inverse of a 2x2 matrix. + This function finds the inverse of a 2x2 and 3x3 matrix. If the determinant of a matrix is 0, its inverse does not exist. Sources for fixing inaccurate float arithmetic: https://stackoverflow.com/questions/6563058/how-do-i-use-accurate-float-arithmetic-in-python https://docs.python.org/3/library/decimal.html + Doctests for 2x2 >>> inverse_of_matrix([[2, 5], [2, 0]]) [[0.0, 0.5], [0.2, -0.2]] >>> inverse_of_matrix([[2.5, 5], [1, 2]]) Traceback (most recent call last): - ... + ... ValueError: This matrix has no inverse. >>> inverse_of_matrix([[12, -16], [-9, 0]]) [[0.0, -0.1111111111111111], [-0.0625, -0.08333333333333333]] @@ -25,24 +28,128 @@ def inverse_of_matrix(matrix: list[list[float]]) -> list[list[float]]: [[0.16666666666666666, -0.0625], [-0.3333333333333333, 0.25]] >>> inverse_of_matrix([[10, 5], [3, 2.5]]) [[0.25, -0.5], [-0.3, 1.0]] + + Doctests for 3x3 + >>> inverse_of_matrix([[2, 5, 7], [2, 0, 1], [1, 2, 3]]) + [[2.0, 5.0, -4.0], [1.0, 1.0, -1.0], [-5.0, -12.0, 10.0]] + >>> inverse_of_matrix([[1, 2, 2], [1, 2, 2], [3, 2, -1]]) + Traceback (most recent call last): + ... + ValueError: This matrix has no inverse. + + >>> inverse_of_matrix([[],[]]) + Traceback (most recent call last): + ... + ValueError: Please provide a matrix of size 2x2 or 3x3. + + >>> inverse_of_matrix([[1, 2], [3, 4], [5, 6]]) + Traceback (most recent call last): + ... + ValueError: Please provide a matrix of size 2x2 or 3x3. + + >>> inverse_of_matrix([[1, 2, 1], [0,3, 4]]) + Traceback (most recent call last): + ... + ValueError: Please provide a matrix of size 2x2 or 3x3. + + >>> inverse_of_matrix([[1, 2, 3], [7, 8, 9], [7, 8, 9]]) + Traceback (most recent call last): + ... + ValueError: This matrix has no inverse. + + >>> inverse_of_matrix([[1, 0, 0], [0, 1, 0], [0, 0, 1]]) + [[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]] """ - d = Decimal # An abbreviation for conciseness + d = Decimal # Check if the provided matrix has 2 rows and 2 columns # since this implementation only works for 2x2 matrices - if len(matrix) != 2 or len(matrix[0]) != 2 or len(matrix[1]) != 2: - raise ValueError("Please provide a matrix of size 2x2.") + if len(matrix) == 2 and len(matrix[0]) == 2 and len(matrix[1]) == 2: + # Calculate the determinant of the matrix + determinant = float( + d(matrix[0][0]) * d(matrix[1][1]) - d(matrix[1][0]) * d(matrix[0][1]) + ) + if determinant == 0: + raise ValueError("This matrix has no inverse.") + + # Creates a copy of the matrix with swapped positions of the elements + swapped_matrix = [[0.0, 0.0], [0.0, 0.0]] + swapped_matrix[0][0], swapped_matrix[1][1] = matrix[1][1], matrix[0][0] + swapped_matrix[1][0], swapped_matrix[0][1] = -matrix[1][0], -matrix[0][1] + + # Calculate the inverse of the matrix + return [ + [(float(d(n)) / determinant) or 0.0 for n in row] for row in swapped_matrix + ] + elif ( + len(matrix) == 3 + and len(matrix[0]) == 3 + and len(matrix[1]) == 3 + and len(matrix[2]) == 3 + ): + # Calculate the determinant of the matrix using Sarrus rule + determinant = float( + ( + (d(matrix[0][0]) * d(matrix[1][1]) * d(matrix[2][2])) + + (d(matrix[0][1]) * d(matrix[1][2]) * d(matrix[2][0])) + + (d(matrix[0][2]) * d(matrix[1][0]) * d(matrix[2][1])) + ) + - ( + (d(matrix[0][2]) * d(matrix[1][1]) * d(matrix[2][0])) + + (d(matrix[0][1]) * d(matrix[1][0]) * d(matrix[2][2])) + + (d(matrix[0][0]) * d(matrix[1][2]) * d(matrix[2][1])) + ) + ) + if determinant == 0: + raise ValueError("This matrix has no inverse.") + + # Creating cofactor matrix + cofactor_matrix = [ + [d(0.0), d(0.0), d(0.0)], + [d(0.0), d(0.0), d(0.0)], + [d(0.0), d(0.0), d(0.0)], + ] + cofactor_matrix[0][0] = (d(matrix[1][1]) * d(matrix[2][2])) - ( + d(matrix[1][2]) * d(matrix[2][1]) + ) + cofactor_matrix[0][1] = -( + (d(matrix[1][0]) * d(matrix[2][2])) - (d(matrix[1][2]) * d(matrix[2][0])) + ) + cofactor_matrix[0][2] = (d(matrix[1][0]) * d(matrix[2][1])) - ( + d(matrix[1][1]) * d(matrix[2][0]) + ) + cofactor_matrix[1][0] = -( + (d(matrix[0][1]) * d(matrix[2][2])) - (d(matrix[0][2]) * d(matrix[2][1])) + ) + cofactor_matrix[1][1] = (d(matrix[0][0]) * d(matrix[2][2])) - ( + d(matrix[0][2]) * d(matrix[2][0]) + ) + cofactor_matrix[1][2] = -( + (d(matrix[0][0]) * d(matrix[2][1])) - (d(matrix[0][1]) * d(matrix[2][0])) + ) + cofactor_matrix[2][0] = (d(matrix[0][1]) * d(matrix[1][2])) - ( + d(matrix[0][2]) * d(matrix[1][1]) + ) + cofactor_matrix[2][1] = -( + (d(matrix[0][0]) * d(matrix[1][2])) - (d(matrix[0][2]) * d(matrix[1][0])) + ) + cofactor_matrix[2][2] = (d(matrix[0][0]) * d(matrix[1][1])) - ( + d(matrix[0][1]) * d(matrix[1][0]) + ) - # Calculate the determinant of the matrix - determinant = d(matrix[0][0]) * d(matrix[1][1]) - d(matrix[1][0]) * d(matrix[0][1]) - if determinant == 0: - raise ValueError("This matrix has no inverse.") + # Transpose the cofactor matrix (Adjoint matrix) + adjoint_matrix = array(cofactor_matrix) + for i in range(3): + for j in range(3): + adjoint_matrix[i][j] = cofactor_matrix[j][i] - # Creates a copy of the matrix with swapped positions of the elements - swapped_matrix = [[0.0, 0.0], [0.0, 0.0]] - swapped_matrix[0][0], swapped_matrix[1][1] = matrix[1][1], matrix[0][0] - swapped_matrix[1][0], swapped_matrix[0][1] = -matrix[1][0], -matrix[0][1] + # Inverse of the matrix using the formula (1/determinant) * adjoint matrix + inverse_matrix = array(cofactor_matrix) + for i in range(3): + for j in range(3): + inverse_matrix[i][j] /= d(determinant) - # Calculate the inverse of the matrix - return [[float(d(n) / determinant) or 0.0 for n in row] for row in swapped_matrix] + # Calculate the inverse of the matrix + return [[float(d(n)) or 0.0 for n in row] for row in inverse_matrix] + raise ValueError("Please provide a matrix of size 2x2 or 3x3.") From 8fd06efe22ec3e870ac1fa375bd4600cb30baad4 Mon Sep 17 00:00:00 2001 From: JatinR05 <71865805+JatinR05@users.noreply.github.com> Date: Wed, 26 Oct 2022 20:13:01 +0530 Subject: [PATCH 0619/1543] Create minimums_squares_to_represent_a_number.py (#7595) * Create minimums_squares_to_represent_a_number.py added a dynamic programming approach of finding the minimum number of square to represent a number. eg : 25 = 5*5 37 = 6*6 + 1*1 21 = 4*4 + 2*2 + 1*1 * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update and rename minimums_squares_to_represent_a_number.py to minimum_squares_to_represent_a_number.py updated the code * Update minimum_squares_to_represent_a_number.py I have added the appropriate checks for 0 and 12.34. It would be great if you could suggest a name for the dp array * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update minimum_squares_to_represent_a_number.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update minimum_squares_to_represent_a_number.py updated * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update minimum_squares_to_represent_a_number.py updated * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .../minimum_squares_to_represent_a_number.py | 48 +++++++++++++++++++ 1 file changed, 48 insertions(+) create mode 100644 dynamic_programming/minimum_squares_to_represent_a_number.py diff --git a/dynamic_programming/minimum_squares_to_represent_a_number.py b/dynamic_programming/minimum_squares_to_represent_a_number.py new file mode 100644 index 000000000000..bf5849f5bcb3 --- /dev/null +++ b/dynamic_programming/minimum_squares_to_represent_a_number.py @@ -0,0 +1,48 @@ +import math +import sys + + +def minimum_squares_to_represent_a_number(number: int) -> int: + """ + Count the number of minimum squares to represent a number + >>> minimum_squares_to_represent_a_number(25) + 1 + >>> minimum_squares_to_represent_a_number(37) + 2 + >>> minimum_squares_to_represent_a_number(21) + 3 + >>> minimum_squares_to_represent_a_number(58) + 2 + >>> minimum_squares_to_represent_a_number(-1) + Traceback (most recent call last): + ... + ValueError: the value of input must not be a negative number + >>> minimum_squares_to_represent_a_number(0) + 1 + >>> minimum_squares_to_represent_a_number(12.34) + Traceback (most recent call last): + ... + ValueError: the value of input must be a natural number + """ + if number != int(number): + raise ValueError("the value of input must be a natural number") + if number < 0: + raise ValueError("the value of input must not be a negative number") + if number == 0: + return 1 + answers = [-1] * (number + 1) + answers[0] = 0 + for i in range(1, number + 1): + answer = sys.maxsize + root = int(math.sqrt(i)) + for j in range(1, root + 1): + current_answer = 1 + answers[i - (j**2)] + answer = min(answer, current_answer) + answers[i] = answer + return answers[number] + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From 5c8a939c5a51104fce4b22ef56d29720c6ce47bb Mon Sep 17 00:00:00 2001 From: Shubham Kondekar <40213815+kondekarshubham123@users.noreply.github.com> Date: Wed, 26 Oct 2022 20:36:15 +0530 Subject: [PATCH 0620/1543] Create largest_square_area_in_matrix.py (#7673) * Create largest_square_area_in_matrix.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update matrix/largest_square_area_in_matrix.py Co-authored-by: Caeden Perelli-Harris * Update matrix/largest_square_area_in_matrix.py Co-authored-by: Caeden Perelli-Harris * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update largest_square_area_in_matrix.py Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Caeden Perelli-Harris --- matrix/largest_square_area_in_matrix.py | 191 ++++++++++++++++++++++++ 1 file changed, 191 insertions(+) create mode 100644 matrix/largest_square_area_in_matrix.py diff --git a/matrix/largest_square_area_in_matrix.py b/matrix/largest_square_area_in_matrix.py new file mode 100644 index 000000000000..cf975cb7ce1f --- /dev/null +++ b/matrix/largest_square_area_in_matrix.py @@ -0,0 +1,191 @@ +""" +Question: +Given a binary matrix mat of size n * m, find out the maximum size square +sub-matrix with all 1s. + +--- +Example 1: + +Input: +n = 2, m = 2 +mat = [[1, 1], + [1, 1]] + +Output: +2 + +Explanation: The maximum size of the square +sub-matrix is 2. The matrix itself is the +maximum sized sub-matrix in this case. +--- +Example 2 + +Input: +n = 2, m = 2 +mat = [[0, 0], + [0, 0]] +Output: 0 + +Explanation: There is no 1 in the matrix. + + +Approach: +We initialize another matrix (dp) with the same dimensions +as the original one initialized with all 0’s. + +dp_array(i,j) represents the side length of the maximum square whose +bottom right corner is the cell with index (i,j) in the original matrix. + +Starting from index (0,0), for every 1 found in the original matrix, +we update the value of the current element as + +dp_array(i,j)=dp_array(dp(i−1,j),dp_array(i−1,j−1),dp_array(i,j−1)) + 1. +""" + + +def largest_square_area_in_matrix_top_down_approch( + rows: int, cols: int, mat: list[list[int]] +) -> int: + """ + Function updates the largest_square_area[0], if recursive call found + square with maximum area. + + We aren't using dp_array here, so the time complexity would be exponential. + + >>> largest_square_area_in_matrix_top_down_approch(2, 2, [[1,1], [1,1]]) + 2 + >>> largest_square_area_in_matrix_top_down_approch(2, 2, [[0,0], [0,0]]) + 0 + """ + + def update_area_of_max_square(row: int, col: int) -> int: + + # BASE CASE + if row >= rows or col >= cols: + return 0 + + right = update_area_of_max_square(row, col + 1) + diagonal = update_area_of_max_square(row + 1, col + 1) + down = update_area_of_max_square(row + 1, col) + + if mat[row][col]: + sub_problem_sol = 1 + min([right, diagonal, down]) + largest_square_area[0] = max(largest_square_area[0], sub_problem_sol) + return sub_problem_sol + else: + return 0 + + largest_square_area = [0] + update_area_of_max_square(0, 0) + return largest_square_area[0] + + +def largest_square_area_in_matrix_top_down_approch_with_dp( + rows: int, cols: int, mat: list[list[int]] +) -> int: + """ + Function updates the largest_square_area[0], if recursive call found + square with maximum area. + + We are using dp_array here, so the time complexity would be O(N^2). + + >>> largest_square_area_in_matrix_top_down_approch_with_dp(2, 2, [[1,1], [1,1]]) + 2 + >>> largest_square_area_in_matrix_top_down_approch_with_dp(2, 2, [[0,0], [0,0]]) + 0 + """ + + def update_area_of_max_square_using_dp_array( + row: int, col: int, dp_array: list[list[int]] + ) -> int: + if row >= rows or col >= cols: + return 0 + if dp_array[row][col] != -1: + return dp_array[row][col] + + right = update_area_of_max_square_using_dp_array(row, col + 1, dp_array) + diagonal = update_area_of_max_square_using_dp_array(row + 1, col + 1, dp_array) + down = update_area_of_max_square_using_dp_array(row + 1, col, dp_array) + + if mat[row][col]: + sub_problem_sol = 1 + min([right, diagonal, down]) + largest_square_area[0] = max(largest_square_area[0], sub_problem_sol) + dp_array[row][col] = sub_problem_sol + return sub_problem_sol + else: + return 0 + + largest_square_area = [0] + dp_array = [[-1] * cols for _ in range(rows)] + update_area_of_max_square_using_dp_array(0, 0, dp_array) + + return largest_square_area[0] + + +def largest_square_area_in_matrix_bottom_up( + rows: int, cols: int, mat: list[list[int]] +) -> int: + """ + Function updates the largest_square_area, using bottom up approach. + + >>> largest_square_area_in_matrix_bottom_up(2, 2, [[1,1], [1,1]]) + 2 + >>> largest_square_area_in_matrix_bottom_up(2, 2, [[0,0], [0,0]]) + 0 + + """ + dp_array = [[0] * (cols + 1) for _ in range(rows + 1)] + largest_square_area = 0 + for row in range(rows - 1, -1, -1): + for col in range(cols - 1, -1, -1): + + right = dp_array[row][col + 1] + diagonal = dp_array[row + 1][col + 1] + bottom = dp_array[row + 1][col] + + if mat[row][col] == 1: + dp_array[row][col] = 1 + min(right, diagonal, bottom) + largest_square_area = max(dp_array[row][col], largest_square_area) + else: + dp_array[row][col] = 0 + + return largest_square_area + + +def largest_square_area_in_matrix_bottom_up_space_optimization( + rows: int, cols: int, mat: list[list[int]] +) -> int: + """ + Function updates the largest_square_area, using bottom up + approach. with space optimization. + + >>> largest_square_area_in_matrix_bottom_up_space_optimization(2, 2, [[1,1], [1,1]]) + 2 + >>> largest_square_area_in_matrix_bottom_up_space_optimization(2, 2, [[0,0], [0,0]]) + 0 + """ + current_row = [0] * (cols + 1) + next_row = [0] * (cols + 1) + largest_square_area = 0 + for row in range(rows - 1, -1, -1): + for col in range(cols - 1, -1, -1): + + right = current_row[col + 1] + diagonal = next_row[col + 1] + bottom = next_row[col] + + if mat[row][col] == 1: + current_row[col] = 1 + min(right, diagonal, bottom) + largest_square_area = max(current_row[col], largest_square_area) + else: + current_row[col] = 0 + next_row = current_row + + return largest_square_area + + +if __name__ == "__main__": + import doctest + + doctest.testmod() + print(largest_square_area_in_matrix_bottom_up(2, 2, [[1, 1], [1, 1]])) From 614274a9dc996f64dd470d2029847cc229f19346 Mon Sep 17 00:00:00 2001 From: Shubham Kondekar <40213815+kondekarshubham123@users.noreply.github.com> Date: Wed, 26 Oct 2022 22:28:33 +0530 Subject: [PATCH 0621/1543] Update spiral_print.py (#7674) * Update spiral_print.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update matrix/spiral_print.py Co-authored-by: Caeden Perelli-Harris * Update matrix/spiral_print.py Co-authored-by: Caeden Perelli-Harris * Update matrix/spiral_print.py Co-authored-by: Caeden Perelli-Harris * Update matrix/spiral_print.py Co-authored-by: Caeden Perelli-Harris * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update spiral_print.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update spiral_print.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update spiral_print.py * Update spiral_print.py * Update spiral_print.py * Update spiral_print.py Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Caeden Perelli-Harris --- matrix/spiral_print.py | 49 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 49 insertions(+) diff --git a/matrix/spiral_print.py b/matrix/spiral_print.py index 0cf732d60ca8..0d0be1527aec 100644 --- a/matrix/spiral_print.py +++ b/matrix/spiral_print.py @@ -76,7 +76,56 @@ def spiral_print_clockwise(a: list[list[int]]) -> None: return +# Other Easy to understand Approach + + +def spiral_traversal(matrix: list[list]) -> list[int]: + """ + >>> spiral_traversal([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]]) + [1, 2, 3, 4, 8, 12, 11, 10, 9, 5, 6, 7] + + Example: + matrix = [[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]] + Algorithm: + Step 1. first pop the 0 index list. (which is [1,2,3,4] and concatenate the + output of [step 2]) + Step 2. Now perform matrix’s Transpose operation (Change rows to column + and vice versa) and reverse the resultant matrix. + Step 3. Pass the output of [2nd step], to same recursive function till + base case hits. + Dry Run: + Stage 1. + [1, 2, 3, 4] + spiral_traversal([ + [8, 12], [7, 11], [6, 10], [5, 9]] + ]) + Stage 2. + [1, 2, 3, 4, 8, 12] + spiral_traversal([ + [11, 10, 9], [7, 6, 5] + ]) + Stage 3. + [1, 2, 3, 4, 8, 12, 11, 10, 9] + spiral_traversal([ + [5], [6], [7] + ]) + Stage 4. + [1, 2, 3, 4, 8, 12, 11, 10, 9, 5] + spiral_traversal([ + [5], [6], [7] + ]) + Stage 5. + [1, 2, 3, 4, 8, 12, 11, 10, 9, 5] + spiral_traversal([[6, 7]]) + Stage 6. + [1, 2, 3, 4, 8, 12, 11, 10, 9, 5, 6, 7] + spiral_traversal([]) + """ + if matrix: + return list(matrix.pop(0)) + spiral_traversal(list(zip(*matrix))[::-1]) + else: + return [] + + # driver code if __name__ == "__main__": + import doctest + + doctest.testmod() + a = [[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]] spiral_print_clockwise(a) From 74325d079cf4394f7b75c26b334a81e98b7e25b1 Mon Sep 17 00:00:00 2001 From: Christian Clauss Date: Wed, 26 Oct 2022 22:08:53 +0200 Subject: [PATCH 0622/1543] Rename quantum_random.py to quantum_random.py.DISABLED.txt (#7683) * Rename quantum_random.py to quantum_random.py.DISABLED.txt #7682 * updating DIRECTORY.md Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- DIRECTORY.md | 13 ++++++++++++- ...tum_random.py => quantum_random.py.DISABLED.txt} | 0 2 files changed, 12 insertions(+), 1 deletion(-) rename quantum/{quantum_random.py => quantum_random.py.DISABLED.txt} (100%) diff --git a/DIRECTORY.md b/DIRECTORY.md index 3e722a8784e5..ba7d3e62a9e1 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -56,8 +56,13 @@ ## Boolean Algebra * [And Gate](boolean_algebra/and_gate.py) + * [Nand Gate](boolean_algebra/nand_gate.py) * [Norgate](boolean_algebra/norgate.py) + * [Not Gate](boolean_algebra/not_gate.py) + * [Or Gate](boolean_algebra/or_gate.py) * [Quine Mc Cluskey](boolean_algebra/quine_mc_cluskey.py) + * [Xnor Gate](boolean_algebra/xnor_gate.py) + * [Xor Gate](boolean_algebra/xor_gate.py) ## Cellular Automata * [Conways Game Of Life](cellular_automata/conways_game_of_life.py) @@ -288,6 +293,7 @@ * [Iterating Through Submasks](dynamic_programming/iterating_through_submasks.py) * [Knapsack](dynamic_programming/knapsack.py) * [Longest Common Subsequence](dynamic_programming/longest_common_subsequence.py) + * [Longest Common Substring](dynamic_programming/longest_common_substring.py) * [Longest Increasing Subsequence](dynamic_programming/longest_increasing_subsequence.py) * [Longest Increasing Subsequence O(Nlogn)](dynamic_programming/longest_increasing_subsequence_o(nlogn).py) * [Longest Sub Array](dynamic_programming/longest_sub_array.py) @@ -298,6 +304,7 @@ * [Minimum Coin Change](dynamic_programming/minimum_coin_change.py) * [Minimum Cost Path](dynamic_programming/minimum_cost_path.py) * [Minimum Partition](dynamic_programming/minimum_partition.py) + * [Minimum Squares To Represent A Number](dynamic_programming/minimum_squares_to_represent_a_number.py) * [Minimum Steps To One](dynamic_programming/minimum_steps_to_one.py) * [Optimal Binary Search Tree](dynamic_programming/optimal_binary_search_tree.py) * [Rod Cutting](dynamic_programming/rod_cutting.py) @@ -474,6 +481,7 @@ * [Add](maths/add.py) * [Aliquot Sum](maths/aliquot_sum.py) * [Allocation Number](maths/allocation_number.py) + * [Arc Length](maths/arc_length.py) * [Area](maths/area.py) * [Area Under Curve](maths/area_under_curve.py) * [Armstrong Numbers](maths/armstrong_numbers.py) @@ -609,7 +617,9 @@ ## Matrix * [Binary Search Matrix](matrix/binary_search_matrix.py) * [Count Islands In Matrix](matrix/count_islands_in_matrix.py) + * [Cramers Rule 2X2](matrix/cramers_rule_2x2.py) * [Inverse Of Matrix](matrix/inverse_of_matrix.py) + * [Largest Square Area In Matrix](matrix/largest_square_area_in_matrix.py) * [Matrix Class](matrix/matrix_class.py) * [Matrix Operation](matrix/matrix_operation.py) * [Max Area Of Island](matrix/max_area_of_island.py) @@ -657,6 +667,7 @@ ## Physics * [Casimir Effect](physics/casimir_effect.py) * [Horizontal Projectile Motion](physics/horizontal_projectile_motion.py) + * [Kinetic Energy](physics/kinetic_energy.py) * [Lorentz Transformation Four Vector](physics/lorentz_transformation_four_vector.py) * [N Body Simulation](physics/n_body_simulation.py) * [Newtons Law Of Gravitation](physics/newtons_law_of_gravitation.py) @@ -948,7 +959,6 @@ * [Not Gate](quantum/not_gate.py) * [Q Full Adder](quantum/q_full_adder.py) * [Quantum Entanglement](quantum/quantum_entanglement.py) - * [Quantum Random](quantum/quantum_random.py) * [Ripple Adder Classic](quantum/ripple_adder_classic.py) * [Single Qubit Measure](quantum/single_qubit_measure.py) * [Superdense Coding](quantum/superdense_coding.py) @@ -1048,6 +1058,7 @@ * [Hamming Distance](strings/hamming_distance.py) * [Indian Phone Validator](strings/indian_phone_validator.py) * [Is Contains Unique Chars](strings/is_contains_unique_chars.py) + * [Is Isogram](strings/is_isogram.py) * [Is Palindrome](strings/is_palindrome.py) * [Is Pangram](strings/is_pangram.py) * [Is Spain National Id](strings/is_spain_national_id.py) diff --git a/quantum/quantum_random.py b/quantum/quantum_random.py.DISABLED.txt similarity index 100% rename from quantum/quantum_random.py rename to quantum/quantum_random.py.DISABLED.txt From b46b92a9160360ea09848893b90dd6022f371ffe Mon Sep 17 00:00:00 2001 From: Arjit Arora <42044030+arjitarora26@users.noreply.github.com> Date: Thu, 27 Oct 2022 01:39:23 +0530 Subject: [PATCH 0623/1543] Add function for highest set bit location (#7586) * Add function for highest set bit location * Address review comments --- bit_manipulation/highest_set_bit.py | 34 +++++++++++++++++++++++++++++ 1 file changed, 34 insertions(+) create mode 100644 bit_manipulation/highest_set_bit.py diff --git a/bit_manipulation/highest_set_bit.py b/bit_manipulation/highest_set_bit.py new file mode 100644 index 000000000000..21d92dcb9492 --- /dev/null +++ b/bit_manipulation/highest_set_bit.py @@ -0,0 +1,34 @@ +def get_highest_set_bit_position(number: int) -> int: + """ + Returns position of the highest set bit of a number. + Ref - https://graphics.stanford.edu/~seander/bithacks.html#IntegerLogObvious + >>> get_highest_set_bit_position(25) + 5 + >>> get_highest_set_bit_position(37) + 6 + >>> get_highest_set_bit_position(1) + 1 + >>> get_highest_set_bit_position(4) + 3 + >>> get_highest_set_bit_position(0) + 0 + >>> get_highest_set_bit_position(0.8) + Traceback (most recent call last): + ... + TypeError: Input value must be an 'int' type + """ + if not isinstance(number, int): + raise TypeError("Input value must be an 'int' type") + + position = 0 + while number: + position += 1 + number >>= 1 + + return position + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From 71c7c0bd3592225c027d07a10d1c71946c0f677a Mon Sep 17 00:00:00 2001 From: SwayamSahu <91021799+SwayamSahu@users.noreply.github.com> Date: Thu, 27 Oct 2022 01:50:00 +0530 Subject: [PATCH 0624/1543] Updated a typo in print statement (#7696) * Updated a typo in print statement * Apply suggestions from code review Co-authored-by: Caeden Perelli-Harris Co-authored-by: Christian Clauss Co-authored-by: Caeden Perelli-Harris --- strings/barcode_validator.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/strings/barcode_validator.py b/strings/barcode_validator.py index 05670007665c..2e1ea87039ef 100644 --- a/strings/barcode_validator.py +++ b/strings/barcode_validator.py @@ -83,6 +83,6 @@ def get_barcode(barcode: str) -> int: barcode = get_barcode(input("Barcode: ").strip()) if is_valid(barcode): - print(f"'{barcode}' is a valid Barcode") + print(f"'{barcode}' is a valid barcode.") else: - print(f"'{barcode}' is NOT is valid Barcode.") + print(f"'{barcode}' is NOT a valid barcode.") From d33f9b31fe96acf5201c39f565015444526a3e38 Mon Sep 17 00:00:00 2001 From: Sushant Srivastav <63559772+sushant4191@users.noreply.github.com> Date: Thu, 27 Oct 2022 02:45:02 +0530 Subject: [PATCH 0625/1543] Calculate GST Amount (#7694) * Calculate GST Amount The program helps to get the net amount after GST is added to it. * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update financial/calculating GST.py Thanks! Co-authored-by: Christian Clauss * Update and rename calculating GST.py to price_plus_tax.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update price_plus_tax.py Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- financial/price_plus_tax.py | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) create mode 100644 financial/price_plus_tax.py diff --git a/financial/price_plus_tax.py b/financial/price_plus_tax.py new file mode 100644 index 000000000000..43876d35e57c --- /dev/null +++ b/financial/price_plus_tax.py @@ -0,0 +1,18 @@ +""" +Calculate price plus tax of a good or service given its price and a tax rate. +""" + + +def price_plus_tax(price: float, tax_rate: float) -> float: + """ + >>> price_plus_tax(100, 0.25) + 125.0 + >>> price_plus_tax(125.50, 0.05) + 131.775 + """ + return price * (1 + tax_rate) + + +if __name__ == "__main__": + print(f"{price_plus_tax(100, 0.25) = }") + print(f"{price_plus_tax(125.50, 0.05) = }") From e906a5149a0a9c116e1a3dbade6eb6ea659ac68a Mon Sep 17 00:00:00 2001 From: SparshRastogi <75373475+SparshRastogi@users.noreply.github.com> Date: Thu, 27 Oct 2022 16:52:10 +0530 Subject: [PATCH 0626/1543] Create malus_law.py (#7710) * Create malus_law.py Finding the intensity of light transmitted through a polariser using Malus Law and by taking initial intensity and angle between polariser and axis as input * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update physics/malus_law.py Co-authored-by: Caeden Perelli-Harris * Update physics/malus_law.py Co-authored-by: Caeden Perelli-Harris * Update physics/malus_law.py Co-authored-by: Caeden Perelli-Harris * Update physics/malus_law.py Co-authored-by: Caeden Perelli-Harris * Update malus_law.py Made some changes in the error messages and the docstring testcases * Update malus_law.py Made changes for the passing the precommit * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Caeden Perelli-Harris --- physics/malus_law.py | 80 ++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 80 insertions(+) create mode 100644 physics/malus_law.py diff --git a/physics/malus_law.py b/physics/malus_law.py new file mode 100644 index 000000000000..ae77d45cf614 --- /dev/null +++ b/physics/malus_law.py @@ -0,0 +1,80 @@ +import math + +""" +Finding the intensity of light transmitted through a polariser using Malus Law +and by taking initial intensity and angle between polariser and axis as input + +Description : Malus's law, which is named after Étienne-Louis Malus, +says that when a perfect polarizer is placed in a polarized +beam of light, the irradiance, I, of the light that passes +through is given by + I=I'cos²θ +where I' is the initial intensity and θ is the angle between the light's +initial polarization direction and the axis of the polarizer. +A beam of unpolarized light can be thought of as containing a +uniform mixture of linear polarizations at all possible angles. +Since the average value of cos²θ is 1/2, the transmission coefficient becomes +I/I' = 1/2 +In practice, some light is lost in the polarizer and the actual transmission +will be somewhat lower than this, around 38% for Polaroid-type polarizers but +considerably higher (>49.9%) for some birefringent prism types. +If two polarizers are placed one after another (the second polarizer is +generally called an analyzer), the mutual angle between their polarizing axes +gives the value of θ in Malus's law. If the two axes are orthogonal, the +polarizers are crossed and in theory no light is transmitted, though again +practically speaking no polarizer is perfect and the transmission is not exactly +zero (for example, crossed Polaroid sheets appear slightly blue in colour because +their extinction ratio is better in the red). If a transparent object is placed +between the crossed polarizers, any polarization effects present in the sample +(such as birefringence) will be shown as an increase in transmission. +This effect is used in polarimetry to measure the optical activity of a sample. +Real polarizers are also not perfect blockers of the polarization orthogonal to +their polarization axis; the ratio of the transmission of the unwanted component +to the wanted component is called the extinction ratio, and varies from around +1:500 for Polaroid to about 1:106 for Glan–Taylor prism polarizers. + +Reference : "https://en.wikipedia.org/wiki/Polarizer#Malus's_law_and_other_properties" +""" + + +def malus_law(initial_intensity: float, angle: float) -> float: + """ + >>> round(malus_law(10,45),2) + 5.0 + >>> round(malus_law(100,60),2) + 25.0 + >>> round(malus_law(50,150),2) + 37.5 + >>> round(malus_law(75,270),2) + 0.0 + >>> round(malus_law(10,-900),2) + Traceback (most recent call last): + ... + ValueError: In Malus Law, the angle is in the range 0-360 degrees + >>> round(malus_law(10,900),2) + Traceback (most recent call last): + ... + ValueError: In Malus Law, the angle is in the range 0-360 degrees + >>> round(malus_law(-100,900),2) + Traceback (most recent call last): + ... + ValueError: The value of intensity cannot be negative + >>> round(malus_law(100,180),2) + 100.0 + >>> round(malus_law(100,360),2) + 100.0 + """ + + if initial_intensity < 0: + raise ValueError("The value of intensity cannot be negative") + # handling of negative values of initial intensity + if angle < 0 or angle > 360: + raise ValueError("In Malus Law, the angle is in the range 0-360 degrees") + # handling of values out of allowed range + return initial_intensity * (math.cos(math.radians(angle)) ** 2) + + +if __name__ == "__main__": + import doctest + + doctest.testmod(name="malus_law") From e8915097c4a632419acc77c1ce08aae3e3c3b864 Mon Sep 17 00:00:00 2001 From: Caeden Perelli-Harris Date: Thu, 27 Oct 2022 14:15:15 +0100 Subject: [PATCH 0627/1543] refactor: Fix matrix display deprecation (#7729) --- machine_learning/xgboost_classifier.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/machine_learning/xgboost_classifier.py b/machine_learning/xgboost_classifier.py index bb5b48b7ab23..62a1b331baaf 100644 --- a/machine_learning/xgboost_classifier.py +++ b/machine_learning/xgboost_classifier.py @@ -2,7 +2,7 @@ import numpy as np from matplotlib import pyplot as plt from sklearn.datasets import load_iris -from sklearn.metrics import plot_confusion_matrix +from sklearn.metrics import ConfusionMatrixDisplay from sklearn.model_selection import train_test_split from xgboost import XGBClassifier @@ -63,7 +63,7 @@ def main() -> None: xgboost_classifier = xgboost(x_train, y_train) # Display the confusion matrix of the classifier with both training and test sets - plot_confusion_matrix( + ConfusionMatrixDisplay.from_estimator( xgboost_classifier, x_test, y_test, From 9bba42eca8c679a32f99984bbb5bb53795f4e71f Mon Sep 17 00:00:00 2001 From: Caeden Perelli-Harris Date: Thu, 27 Oct 2022 18:42:30 +0100 Subject: [PATCH 0628/1543] refactor: Indent ... for visual purposes (#7744) --- arithmetic_analysis/bisection.py | 4 +-- arithmetic_analysis/intersection.py | 4 +-- .../jacobi_iteration_method.py | 10 +++--- arithmetic_analysis/lu_decomposition.py | 2 +- arithmetic_analysis/newton_method.py | 2 +- arithmetic_analysis/newton_raphson_new.py | 2 +- backtracking/knight_tour.py | 2 +- conversions/binary_to_decimal.py | 6 ++-- conversions/binary_to_hexadecimal.py | 4 +-- conversions/binary_to_octal.py | 4 +-- conversions/decimal_to_any.py | 12 +++---- conversions/decimal_to_binary.py | 4 +-- conversions/decimal_to_binary_recursion.py | 6 ++-- conversions/decimal_to_hexadecimal.py | 4 +-- conversions/hex_to_bin.py | 4 +-- conversions/hexadecimal_to_decimal.py | 6 ++-- conversions/octal_to_decimal.py | 20 ++++++------ conversions/temperature_conversions.py | 32 +++++++++---------- .../binary_tree/binary_search_tree.py | 2 +- .../binary_tree/binary_tree_mirror.py | 4 +-- .../number_of_possible_binary_trees.py | 2 +- .../linked_list/doubly_linked_list.py | 10 +++--- .../linked_list/singly_linked_list.py | 16 +++++----- data_structures/queue/linked_queue.py | 4 +-- .../queue/priority_queue_using_list.py | 2 +- .../stacks/infix_to_postfix_conversion.py | 2 +- .../stacks/stack_with_singly_linked_list.py | 2 +- .../longest_common_substring.py | 2 +- genetic_algorithm/basic_string.py | 6 ++-- linear_algebra/src/lib.py | 4 +-- machine_learning/similarity_search.py | 6 ++-- maths/bisection.py | 2 +- maths/catalan_number.py | 6 ++-- maths/fibonacci.py | 10 +++--- maths/maclaurin_series.py | 16 +++++----- maths/proth_number.py | 6 ++-- maths/sylvester_sequence.py | 4 +-- maths/zellers_congruence.py | 4 +-- neural_network/perceptron.py | 6 ++-- project_euler/problem_004/sol1.py | 2 +- project_euler/problem_010/sol3.py | 6 ++-- searches/interpolation_search.py | 2 +- sorts/bead_sort.py | 4 +-- sorts/msd_radix_sort.py | 4 +-- strings/barcode_validator.py | 4 +-- strings/join.py | 2 +- 46 files changed, 134 insertions(+), 134 deletions(-) diff --git a/arithmetic_analysis/bisection.py b/arithmetic_analysis/bisection.py index 640913a7acc0..e359cc170072 100644 --- a/arithmetic_analysis/bisection.py +++ b/arithmetic_analysis/bisection.py @@ -8,7 +8,7 @@ def bisection(function: Callable[[float], float], a: float, b: float) -> float: 1.0000000149011612 >>> bisection(lambda x: x ** 3 - 1, 2, 1000) Traceback (most recent call last): - ... + ... ValueError: could not find root in given interval. >>> bisection(lambda x: x ** 2 - 4 * x + 3, 0, 2) 1.0 @@ -16,7 +16,7 @@ def bisection(function: Callable[[float], float], a: float, b: float) -> float: 3.0 >>> bisection(lambda x: x ** 2 - 4 * x + 3, 4, 1000) Traceback (most recent call last): - ... + ... ValueError: could not find root in given interval. """ start: float = a diff --git a/arithmetic_analysis/intersection.py b/arithmetic_analysis/intersection.py index 49213dd05988..826c0ead0a00 100644 --- a/arithmetic_analysis/intersection.py +++ b/arithmetic_analysis/intersection.py @@ -10,7 +10,7 @@ def intersection(function: Callable[[float], float], x0: float, x1: float) -> fl 0.9999999999954654 >>> intersection(lambda x: x ** 3 - 1, 5, 5) Traceback (most recent call last): - ... + ... ZeroDivisionError: float division by zero, could not find root >>> intersection(lambda x: x ** 3 - 1, 100, 200) 1.0000000000003888 @@ -24,7 +24,7 @@ def intersection(function: Callable[[float], float], x0: float, x1: float) -> fl 0.0 >>> intersection(math.cos, -math.pi, math.pi) Traceback (most recent call last): - ... + ... ZeroDivisionError: float division by zero, could not find root """ x_n: float = x0 diff --git a/arithmetic_analysis/jacobi_iteration_method.py b/arithmetic_analysis/jacobi_iteration_method.py index 3087309e8c3d..fe506a94a65d 100644 --- a/arithmetic_analysis/jacobi_iteration_method.py +++ b/arithmetic_analysis/jacobi_iteration_method.py @@ -42,7 +42,7 @@ def jacobi_iteration_method( >>> iterations = 3 >>> jacobi_iteration_method(coefficient, constant, init_val, iterations) Traceback (most recent call last): - ... + ... ValueError: Coefficient matrix dimensions must be nxn but received 2x3 >>> coefficient = np.array([[4, 1, 1], [1, 5, 2], [1, 2, 4]]) @@ -51,7 +51,7 @@ def jacobi_iteration_method( >>> iterations = 3 >>> jacobi_iteration_method(coefficient, constant, init_val, iterations) Traceback (most recent call last): - ... + ... ValueError: Coefficient and constant matrices dimensions must be nxn and nx1 but received 3x3 and 2x1 @@ -61,7 +61,7 @@ def jacobi_iteration_method( >>> iterations = 3 >>> jacobi_iteration_method(coefficient, constant, init_val, iterations) Traceback (most recent call last): - ... + ... ValueError: Number of initial values must be equal to number of rows in coefficient matrix but received 2 and 3 @@ -71,7 +71,7 @@ def jacobi_iteration_method( >>> iterations = 0 >>> jacobi_iteration_method(coefficient, constant, init_val, iterations) Traceback (most recent call last): - ... + ... ValueError: Iterations must be at least 1 """ @@ -138,7 +138,7 @@ def strictly_diagonally_dominant(table: NDArray[float64]) -> bool: >>> table = np.array([[4, 1, 1, 2], [1, 5, 2, -6], [1, 2, 3, -4]]) >>> strictly_diagonally_dominant(table) Traceback (most recent call last): - ... + ... ValueError: Coefficient matrix is not strictly diagonally dominant """ diff --git a/arithmetic_analysis/lu_decomposition.py b/arithmetic_analysis/lu_decomposition.py index 1e98b9066c3f..217719cf4da1 100644 --- a/arithmetic_analysis/lu_decomposition.py +++ b/arithmetic_analysis/lu_decomposition.py @@ -31,7 +31,7 @@ def lower_upper_decomposition( >>> matrix = np.array([[2, -2, 1], [0, 1, 2]]) >>> lower_upper_decomposition(matrix) Traceback (most recent call last): - ... + ... ValueError: 'table' has to be of square shaped array but got a 2x3 array: [[ 2 -2 1] [ 0 1 2]] diff --git a/arithmetic_analysis/newton_method.py b/arithmetic_analysis/newton_method.py index c4018a0f260c..5127bfcafd9a 100644 --- a/arithmetic_analysis/newton_method.py +++ b/arithmetic_analysis/newton_method.py @@ -28,7 +28,7 @@ def newton( 1.5707963267948966 >>> newton(math.cos, lambda x: -math.sin(x), 0) Traceback (most recent call last): - ... + ... ZeroDivisionError: Could not find root """ prev_guess = float(starting_int) diff --git a/arithmetic_analysis/newton_raphson_new.py b/arithmetic_analysis/newton_raphson_new.py index 19ea4ce21806..dd1d7e0929cf 100644 --- a/arithmetic_analysis/newton_raphson_new.py +++ b/arithmetic_analysis/newton_raphson_new.py @@ -32,7 +32,7 @@ def newton_raphson( 1.2186556186174883e-10 >>> newton_raphson('cos(x)', 0) Traceback (most recent call last): - ... + ... ZeroDivisionError: Could not find root """ diff --git a/backtracking/knight_tour.py b/backtracking/knight_tour.py index 6e9b31bd1133..bb650ece3f5e 100644 --- a/backtracking/knight_tour.py +++ b/backtracking/knight_tour.py @@ -78,7 +78,7 @@ def open_knight_tour(n: int) -> list[list[int]]: >>> open_knight_tour(2) Traceback (most recent call last): - ... + ... ValueError: Open Kight Tour cannot be performed on a board of size 2 """ diff --git a/conversions/binary_to_decimal.py b/conversions/binary_to_decimal.py index a7625e475bdc..914a9318c225 100644 --- a/conversions/binary_to_decimal.py +++ b/conversions/binary_to_decimal.py @@ -12,15 +12,15 @@ def bin_to_decimal(bin_string: str) -> int: 0 >>> bin_to_decimal("a") Traceback (most recent call last): - ... + ... ValueError: Non-binary value was passed to the function >>> bin_to_decimal("") Traceback (most recent call last): - ... + ... ValueError: Empty string was passed to the function >>> bin_to_decimal("39") Traceback (most recent call last): - ... + ... ValueError: Non-binary value was passed to the function """ bin_string = str(bin_string).strip() diff --git a/conversions/binary_to_hexadecimal.py b/conversions/binary_to_hexadecimal.py index 89f7af696357..a3855bb70b52 100644 --- a/conversions/binary_to_hexadecimal.py +++ b/conversions/binary_to_hexadecimal.py @@ -30,11 +30,11 @@ def bin_to_hexadecimal(binary_str: str) -> str: '-0x1d' >>> bin_to_hexadecimal('a') Traceback (most recent call last): - ... + ... ValueError: Non-binary value was passed to the function >>> bin_to_hexadecimal('') Traceback (most recent call last): - ... + ... ValueError: Empty string was passed to the function """ # Sanitising parameter diff --git a/conversions/binary_to_octal.py b/conversions/binary_to_octal.py index 35ede95b134d..82f81e06234a 100644 --- a/conversions/binary_to_octal.py +++ b/conversions/binary_to_octal.py @@ -9,11 +9,11 @@ >>> bin_to_octal("") Traceback (most recent call last): -... + ... ValueError: Empty string was passed to the function >>> bin_to_octal("a-1") Traceback (most recent call last): -... + ... ValueError: Non-binary value was passed to the function """ diff --git a/conversions/decimal_to_any.py b/conversions/decimal_to_any.py index 908c89e8fb6b..11a2af294829 100644 --- a/conversions/decimal_to_any.py +++ b/conversions/decimal_to_any.py @@ -29,32 +29,32 @@ def decimal_to_any(num: int, base: int) -> str: >>> # negatives will error >>> decimal_to_any(-45, 8) # doctest: +ELLIPSIS Traceback (most recent call last): - ... + ... ValueError: parameter must be positive int >>> # floats will error >>> decimal_to_any(34.4, 6) # doctest: +ELLIPSIS Traceback (most recent call last): - ... + ... TypeError: int() can't convert non-string with explicit base >>> # a float base will error >>> decimal_to_any(5, 2.5) # doctest: +ELLIPSIS Traceback (most recent call last): - ... + ... TypeError: 'float' object cannot be interpreted as an integer >>> # a str base will error >>> decimal_to_any(10, '16') # doctest: +ELLIPSIS Traceback (most recent call last): - ... + ... TypeError: 'str' object cannot be interpreted as an integer >>> # a base less than 2 will error >>> decimal_to_any(7, 0) # doctest: +ELLIPSIS Traceback (most recent call last): - ... + ... ValueError: base must be >= 2 >>> # a base greater than 36 will error >>> decimal_to_any(34, 37) # doctest: +ELLIPSIS Traceback (most recent call last): - ... + ... ValueError: base must be <= 36 """ if isinstance(num, float): diff --git a/conversions/decimal_to_binary.py b/conversions/decimal_to_binary.py index c21cdbcaec68..cfda57ca714a 100644 --- a/conversions/decimal_to_binary.py +++ b/conversions/decimal_to_binary.py @@ -19,12 +19,12 @@ def decimal_to_binary(num: int) -> str: >>> # other floats will error >>> decimal_to_binary(16.16) # doctest: +ELLIPSIS Traceback (most recent call last): - ... + ... TypeError: 'float' object cannot be interpreted as an integer >>> # strings will error as well >>> decimal_to_binary('0xfffff') # doctest: +ELLIPSIS Traceback (most recent call last): - ... + ... TypeError: 'str' object cannot be interpreted as an integer """ diff --git a/conversions/decimal_to_binary_recursion.py b/conversions/decimal_to_binary_recursion.py index c149ea86592f..05833ca670c3 100644 --- a/conversions/decimal_to_binary_recursion.py +++ b/conversions/decimal_to_binary_recursion.py @@ -7,7 +7,7 @@ def binary_recursive(decimal: int) -> str: '1001000' >>> binary_recursive("number") Traceback (most recent call last): - ... + ... ValueError: invalid literal for int() with base 10: 'number' """ decimal = int(decimal) @@ -30,11 +30,11 @@ def main(number: str) -> str: '-0b101000' >>> main(40.8) Traceback (most recent call last): - ... + ... ValueError: Input value is not an integer >>> main("forty") Traceback (most recent call last): - ... + ... ValueError: Input value is not an integer """ number = str(number).strip() diff --git a/conversions/decimal_to_hexadecimal.py b/conversions/decimal_to_hexadecimal.py index 2389c6d1f2a1..5ea48401f488 100644 --- a/conversions/decimal_to_hexadecimal.py +++ b/conversions/decimal_to_hexadecimal.py @@ -46,12 +46,12 @@ def decimal_to_hexadecimal(decimal: float) -> str: >>> # other floats will error >>> decimal_to_hexadecimal(16.16) # doctest: +ELLIPSIS Traceback (most recent call last): - ... + ... AssertionError >>> # strings will error as well >>> decimal_to_hexadecimal('0xfffff') # doctest: +ELLIPSIS Traceback (most recent call last): - ... + ... AssertionError >>> # results are the same when compared to Python's default hex function >>> decimal_to_hexadecimal(-256) == hex(-256) diff --git a/conversions/hex_to_bin.py b/conversions/hex_to_bin.py index e358d810b581..b872ab5cbce6 100644 --- a/conversions/hex_to_bin.py +++ b/conversions/hex_to_bin.py @@ -21,11 +21,11 @@ def hex_to_bin(hex_num: str) -> int: -1111111111111111 >>> hex_to_bin("F-f") Traceback (most recent call last): - ... + ... ValueError: Invalid value was passed to the function >>> hex_to_bin("") Traceback (most recent call last): - ... + ... ValueError: No value was passed to the function """ diff --git a/conversions/hexadecimal_to_decimal.py b/conversions/hexadecimal_to_decimal.py index beb1c2c3ded6..209e4aebb368 100644 --- a/conversions/hexadecimal_to_decimal.py +++ b/conversions/hexadecimal_to_decimal.py @@ -18,15 +18,15 @@ def hex_to_decimal(hex_string: str) -> int: -255 >>> hex_to_decimal("F-f") Traceback (most recent call last): - ... + ... ValueError: Non-hexadecimal value was passed to the function >>> hex_to_decimal("") Traceback (most recent call last): - ... + ... ValueError: Empty string was passed to the function >>> hex_to_decimal("12m") Traceback (most recent call last): - ... + ... ValueError: Non-hexadecimal value was passed to the function """ hex_string = hex_string.strip().lower() diff --git a/conversions/octal_to_decimal.py b/conversions/octal_to_decimal.py index 551311e2651e..7f006f20e0c8 100644 --- a/conversions/octal_to_decimal.py +++ b/conversions/octal_to_decimal.py @@ -4,27 +4,27 @@ def oct_to_decimal(oct_string: str) -> int: >>> oct_to_decimal("") Traceback (most recent call last): - ... + ... ValueError: Empty string was passed to the function >>> oct_to_decimal("-") Traceback (most recent call last): - ... + ... ValueError: Non-octal value was passed to the function >>> oct_to_decimal("e") Traceback (most recent call last): - ... + ... ValueError: Non-octal value was passed to the function >>> oct_to_decimal("8") Traceback (most recent call last): - ... + ... ValueError: Non-octal value was passed to the function >>> oct_to_decimal("-e") Traceback (most recent call last): - ... + ... ValueError: Non-octal value was passed to the function >>> oct_to_decimal("-8") Traceback (most recent call last): - ... + ... ValueError: Non-octal value was passed to the function >>> oct_to_decimal("1") 1 @@ -38,7 +38,7 @@ def oct_to_decimal(oct_string: str) -> int: -37 >>> oct_to_decimal("-") Traceback (most recent call last): - ... + ... ValueError: Non-octal value was passed to the function >>> oct_to_decimal("0") 0 @@ -46,15 +46,15 @@ def oct_to_decimal(oct_string: str) -> int: -2093 >>> oct_to_decimal("2-0Fm") Traceback (most recent call last): - ... + ... ValueError: Non-octal value was passed to the function >>> oct_to_decimal("") Traceback (most recent call last): - ... + ... ValueError: Empty string was passed to the function >>> oct_to_decimal("19") Traceback (most recent call last): - ... + ... ValueError: Non-octal value was passed to the function """ oct_string = str(oct_string).strip() diff --git a/conversions/temperature_conversions.py b/conversions/temperature_conversions.py index 167c9dc64727..e5af465561f9 100644 --- a/conversions/temperature_conversions.py +++ b/conversions/temperature_conversions.py @@ -23,7 +23,7 @@ def celsius_to_fahrenheit(celsius: float, ndigits: int = 2) -> float: 104.0 >>> celsius_to_fahrenheit("celsius") Traceback (most recent call last): - ... + ... ValueError: could not convert string to float: 'celsius' """ return round((float(celsius) * 9 / 5) + 32, ndigits) @@ -47,7 +47,7 @@ def celsius_to_kelvin(celsius: float, ndigits: int = 2) -> float: 313.15 >>> celsius_to_kelvin("celsius") Traceback (most recent call last): - ... + ... ValueError: could not convert string to float: 'celsius' """ return round(float(celsius) + 273.15, ndigits) @@ -71,7 +71,7 @@ def celsius_to_rankine(celsius: float, ndigits: int = 2) -> float: 563.67 >>> celsius_to_rankine("celsius") Traceback (most recent call last): - ... + ... ValueError: could not convert string to float: 'celsius' """ return round((float(celsius) * 9 / 5) + 491.67, ndigits) @@ -101,7 +101,7 @@ def fahrenheit_to_celsius(fahrenheit: float, ndigits: int = 2) -> float: 37.78 >>> fahrenheit_to_celsius("fahrenheit") Traceback (most recent call last): - ... + ... ValueError: could not convert string to float: 'fahrenheit' """ return round((float(fahrenheit) - 32) * 5 / 9, ndigits) @@ -131,7 +131,7 @@ def fahrenheit_to_kelvin(fahrenheit: float, ndigits: int = 2) -> float: 310.93 >>> fahrenheit_to_kelvin("fahrenheit") Traceback (most recent call last): - ... + ... ValueError: could not convert string to float: 'fahrenheit' """ return round(((float(fahrenheit) - 32) * 5 / 9) + 273.15, ndigits) @@ -161,7 +161,7 @@ def fahrenheit_to_rankine(fahrenheit: float, ndigits: int = 2) -> float: 559.67 >>> fahrenheit_to_rankine("fahrenheit") Traceback (most recent call last): - ... + ... ValueError: could not convert string to float: 'fahrenheit' """ return round(float(fahrenheit) + 459.67, ndigits) @@ -185,7 +185,7 @@ def kelvin_to_celsius(kelvin: float, ndigits: int = 2) -> float: 42.35 >>> kelvin_to_celsius("kelvin") Traceback (most recent call last): - ... + ... ValueError: could not convert string to float: 'kelvin' """ return round(float(kelvin) - 273.15, ndigits) @@ -209,7 +209,7 @@ def kelvin_to_fahrenheit(kelvin: float, ndigits: int = 2) -> float: 108.23 >>> kelvin_to_fahrenheit("kelvin") Traceback (most recent call last): - ... + ... ValueError: could not convert string to float: 'kelvin' """ return round(((float(kelvin) - 273.15) * 9 / 5) + 32, ndigits) @@ -233,7 +233,7 @@ def kelvin_to_rankine(kelvin: float, ndigits: int = 2) -> float: 72.0 >>> kelvin_to_rankine("kelvin") Traceback (most recent call last): - ... + ... ValueError: could not convert string to float: 'kelvin' """ return round((float(kelvin) * 9 / 5), ndigits) @@ -257,7 +257,7 @@ def rankine_to_celsius(rankine: float, ndigits: int = 2) -> float: -97.87 >>> rankine_to_celsius("rankine") Traceback (most recent call last): - ... + ... ValueError: could not convert string to float: 'rankine' """ return round((float(rankine) - 491.67) * 5 / 9, ndigits) @@ -277,7 +277,7 @@ def rankine_to_fahrenheit(rankine: float, ndigits: int = 2) -> float: -144.17 >>> rankine_to_fahrenheit("rankine") Traceback (most recent call last): - ... + ... ValueError: could not convert string to float: 'rankine' """ return round(float(rankine) - 459.67, ndigits) @@ -297,7 +297,7 @@ def rankine_to_kelvin(rankine: float, ndigits: int = 2) -> float: 22.22 >>> rankine_to_kelvin("rankine") Traceback (most recent call last): - ... + ... ValueError: could not convert string to float: 'rankine' """ return round((float(rankine) * 5 / 9), ndigits) @@ -316,7 +316,7 @@ def reaumur_to_kelvin(reaumur: float, ndigits: int = 2) -> float: 323.15 >>> reaumur_to_kelvin("reaumur") Traceback (most recent call last): - ... + ... ValueError: could not convert string to float: 'reaumur' """ return round((float(reaumur) * 1.25 + 273.15), ndigits) @@ -335,7 +335,7 @@ def reaumur_to_fahrenheit(reaumur: float, ndigits: int = 2) -> float: 122.0 >>> reaumur_to_fahrenheit("reaumur") Traceback (most recent call last): - ... + ... ValueError: could not convert string to float: 'reaumur' """ return round((float(reaumur) * 2.25 + 32), ndigits) @@ -354,7 +354,7 @@ def reaumur_to_celsius(reaumur: float, ndigits: int = 2) -> float: 50.0 >>> reaumur_to_celsius("reaumur") Traceback (most recent call last): - ... + ... ValueError: could not convert string to float: 'reaumur' """ return round((float(reaumur) * 1.25), ndigits) @@ -373,7 +373,7 @@ def reaumur_to_rankine(reaumur: float, ndigits: int = 2) -> float: 581.67 >>> reaumur_to_rankine("reaumur") Traceback (most recent call last): - ... + ... ValueError: could not convert string to float: 'reaumur' """ return round((float(reaumur) * 2.25 + 32 + 459.67), ndigits) diff --git a/data_structures/binary_tree/binary_search_tree.py b/data_structures/binary_tree/binary_search_tree.py index fc60540a1f3b..fc512944eb50 100644 --- a/data_structures/binary_tree/binary_search_tree.py +++ b/data_structures/binary_tree/binary_search_tree.py @@ -196,7 +196,7 @@ def binary_search_tree() -> None: 1 4 7 6 3 13 14 10 8 >>> BinarySearchTree().search(6) Traceback (most recent call last): - ... + ... IndexError: Warning: Tree is empty! please use another. """ testlist = (8, 3, 6, 1, 10, 14, 13, 4, 7) diff --git a/data_structures/binary_tree/binary_tree_mirror.py b/data_structures/binary_tree/binary_tree_mirror.py index cdd56e35d765..1ef950ad62d7 100644 --- a/data_structures/binary_tree/binary_tree_mirror.py +++ b/data_structures/binary_tree/binary_tree_mirror.py @@ -21,11 +21,11 @@ def binary_tree_mirror(binary_tree: dict, root: int = 1) -> dict: {1: [3, 2], 2: [5, 4], 3: [7, 6], 4: [11, 10]} >>> binary_tree_mirror({ 1: [2,3], 2: [4,5], 3: [6,7], 4: [10,11]}, 5) Traceback (most recent call last): - ... + ... ValueError: root 5 is not present in the binary_tree >>> binary_tree_mirror({}, 5) Traceback (most recent call last): - ... + ... ValueError: binary tree cannot be empty """ if not binary_tree: diff --git a/data_structures/binary_tree/number_of_possible_binary_trees.py b/data_structures/binary_tree/number_of_possible_binary_trees.py index 1ad8f2ed4287..684c518b1eb6 100644 --- a/data_structures/binary_tree/number_of_possible_binary_trees.py +++ b/data_structures/binary_tree/number_of_possible_binary_trees.py @@ -67,7 +67,7 @@ def factorial(n: int) -> int: True >>> factorial(-5) # doctest: +ELLIPSIS Traceback (most recent call last): - ... + ... ValueError: factorial() not defined for negative values """ if n < 0: diff --git a/data_structures/linked_list/doubly_linked_list.py b/data_structures/linked_list/doubly_linked_list.py index 9e996ef0fb9d..90b6b6eb2a32 100644 --- a/data_structures/linked_list/doubly_linked_list.py +++ b/data_structures/linked_list/doubly_linked_list.py @@ -64,11 +64,11 @@ def insert_at_nth(self, index: int, data): >>> linked_list = DoublyLinkedList() >>> linked_list.insert_at_nth(-1, 666) Traceback (most recent call last): - .... + .... IndexError: list index out of range >>> linked_list.insert_at_nth(1, 666) Traceback (most recent call last): - .... + .... IndexError: list index out of range >>> linked_list.insert_at_nth(0, 2) >>> linked_list.insert_at_nth(0, 1) @@ -78,7 +78,7 @@ def insert_at_nth(self, index: int, data): '1->2->3->4' >>> linked_list.insert_at_nth(5, 5) Traceback (most recent call last): - .... + .... IndexError: list index out of range """ if not 0 <= index <= len(self): @@ -114,7 +114,7 @@ def delete_at_nth(self, index: int): >>> linked_list = DoublyLinkedList() >>> linked_list.delete_at_nth(0) Traceback (most recent call last): - .... + .... IndexError: list index out of range >>> for i in range(0, 5): ... linked_list.insert_at_nth(i, i + 1) @@ -128,7 +128,7 @@ def delete_at_nth(self, index: int): '2->4' >>> linked_list.delete_at_nth(2) Traceback (most recent call last): - .... + .... IndexError: list index out of range """ if not 0 <= index <= len(self) - 1: diff --git a/data_structures/linked_list/singly_linked_list.py b/data_structures/linked_list/singly_linked_list.py index 89a05ae81d4c..3e52c7e43cf5 100644 --- a/data_structures/linked_list/singly_linked_list.py +++ b/data_structures/linked_list/singly_linked_list.py @@ -95,11 +95,11 @@ def __getitem__(self, index: int) -> Any: True >>> linked_list[-10] Traceback (most recent call last): - ... + ... ValueError: list index out of range. >>> linked_list[len(linked_list)] Traceback (most recent call last): - ... + ... ValueError: list index out of range. """ if not 0 <= index < len(self): @@ -122,11 +122,11 @@ def __setitem__(self, index: int, data: Any) -> None: -666 >>> linked_list[-10] = 666 Traceback (most recent call last): - ... + ... ValueError: list index out of range. >>> linked_list[len(linked_list)] = 666 Traceback (most recent call last): - ... + ... ValueError: list index out of range. """ if not 0 <= index < len(self): @@ -233,7 +233,7 @@ def delete_head(self) -> Any: 'third' >>> linked_list.delete_head() Traceback (most recent call last): - ... + ... IndexError: List index out of range. """ return self.delete_nth(0) @@ -260,7 +260,7 @@ def delete_tail(self) -> Any: # delete from tail 'first' >>> linked_list.delete_tail() Traceback (most recent call last): - ... + ... IndexError: List index out of range. """ return self.delete_nth(len(self) - 1) @@ -281,11 +281,11 @@ def delete_nth(self, index: int = 0) -> Any: first->third >>> linked_list.delete_nth(5) # this raises error Traceback (most recent call last): - ... + ... IndexError: List index out of range. >>> linked_list.delete_nth(-1) # this also raises error Traceback (most recent call last): - ... + ... IndexError: List index out of range. """ if not 0 <= index <= len(self) - 1: # test if index is valid diff --git a/data_structures/queue/linked_queue.py b/data_structures/queue/linked_queue.py index 3675da7db78a..3af97d28e4f7 100644 --- a/data_structures/queue/linked_queue.py +++ b/data_structures/queue/linked_queue.py @@ -96,7 +96,7 @@ def put(self, item: Any) -> None: >>> queue = LinkedQueue() >>> queue.get() Traceback (most recent call last): - ... + ... IndexError: dequeue from empty queue >>> for i in range(1, 6): ... queue.put(i) @@ -116,7 +116,7 @@ def get(self) -> Any: >>> queue = LinkedQueue() >>> queue.get() Traceback (most recent call last): - ... + ... IndexError: dequeue from empty queue >>> queue = LinkedQueue() >>> for i in range(1, 6): diff --git a/data_structures/queue/priority_queue_using_list.py b/data_structures/queue/priority_queue_using_list.py index c5cf26433fff..f61b5e8e664d 100644 --- a/data_structures/queue/priority_queue_using_list.py +++ b/data_structures/queue/priority_queue_using_list.py @@ -58,7 +58,7 @@ class FixedPriorityQueue: 4 >>> fpq.dequeue() Traceback (most recent call last): - ... + ... data_structures.queue.priority_queue_using_list.UnderFlowError: All queues are empty >>> print(fpq) Priority 0: [] diff --git a/data_structures/stacks/infix_to_postfix_conversion.py b/data_structures/stacks/infix_to_postfix_conversion.py index b812d108e290..9017443091cf 100644 --- a/data_structures/stacks/infix_to_postfix_conversion.py +++ b/data_structures/stacks/infix_to_postfix_conversion.py @@ -21,7 +21,7 @@ def infix_to_postfix(expression_str: str) -> str: """ >>> infix_to_postfix("(1*(2+3)+4))") Traceback (most recent call last): - ... + ... ValueError: Mismatched parentheses >>> infix_to_postfix("") '' diff --git a/data_structures/stacks/stack_with_singly_linked_list.py b/data_structures/stacks/stack_with_singly_linked_list.py index 903ae39db4b5..f5ce83b863ce 100644 --- a/data_structures/stacks/stack_with_singly_linked_list.py +++ b/data_structures/stacks/stack_with_singly_linked_list.py @@ -109,7 +109,7 @@ def pop(self) -> T: >>> stack = LinkedStack() >>> stack.pop() Traceback (most recent call last): - ... + ... IndexError: pop from empty stack >>> stack.push("c") >>> stack.push("b") diff --git a/dynamic_programming/longest_common_substring.py b/dynamic_programming/longest_common_substring.py index 84a9f18609f9..e2f944a5e336 100644 --- a/dynamic_programming/longest_common_substring.py +++ b/dynamic_programming/longest_common_substring.py @@ -32,7 +32,7 @@ def longest_common_substring(text1: str, text2: str) -> str: 'Site:Geeks' >>> longest_common_substring(1, 1) Traceback (most recent call last): - ... + ... ValueError: longest_common_substring() takes two strings for inputs """ diff --git a/genetic_algorithm/basic_string.py b/genetic_algorithm/basic_string.py index 3227adf53ae4..5cf8d691b1d7 100644 --- a/genetic_algorithm/basic_string.py +++ b/genetic_algorithm/basic_string.py @@ -32,17 +32,17 @@ def basic(target: str, genes: list[str], debug: bool = True) -> tuple[int, int, >>> genes.remove("e") >>> basic("test", genes) Traceback (most recent call last): - ... + ... ValueError: ['e'] is not in genes list, evolution cannot converge >>> genes.remove("s") >>> basic("test", genes) Traceback (most recent call last): - ... + ... ValueError: ['e', 's'] is not in genes list, evolution cannot converge >>> genes.remove("t") >>> basic("test", genes) Traceback (most recent call last): - ... + ... ValueError: ['e', 's', 't'] is not in genes list, evolution cannot converge """ diff --git a/linear_algebra/src/lib.py b/linear_algebra/src/lib.py index b9791c860a74..079731487b3a 100644 --- a/linear_algebra/src/lib.py +++ b/linear_algebra/src/lib.py @@ -168,7 +168,7 @@ def euclidean_length(self) -> float: 9.539392014169456 >>> Vector([]).euclidean_length() Traceback (most recent call last): - ... + ... Exception: Vector is empty """ if len(self.__components) == 0: @@ -186,7 +186,7 @@ def angle(self, other: Vector, deg: bool = False) -> float: 85.40775111366095 >>> Vector([3, 4, -1]).angle(Vector([2, -1])) Traceback (most recent call last): - ... + ... Exception: invalid operand! """ num = self * other diff --git a/machine_learning/similarity_search.py b/machine_learning/similarity_search.py index ec1b9f9e3e13..2f5fc46c065e 100644 --- a/machine_learning/similarity_search.py +++ b/machine_learning/similarity_search.py @@ -70,7 +70,7 @@ def similarity_search( >>> value_array = np.array([1]) >>> similarity_search(dataset, value_array) Traceback (most recent call last): - ... + ... ValueError: Wrong input data's dimensions... dataset : 2, value_array : 1 2. If data's shapes are different. @@ -80,7 +80,7 @@ def similarity_search( >>> value_array = np.array([[0, 0, 0], [0, 0, 1]]) >>> similarity_search(dataset, value_array) Traceback (most recent call last): - ... + ... ValueError: Wrong input data's shape... dataset : 2, value_array : 3 3. If data types are different. @@ -90,7 +90,7 @@ def similarity_search( >>> value_array = np.array([[0, 0], [0, 1]], dtype=np.int32) >>> similarity_search(dataset, value_array) # doctest: +NORMALIZE_WHITESPACE Traceback (most recent call last): - ... + ... TypeError: Input data have different datatype... dataset : float32, value_array : int32 """ diff --git a/maths/bisection.py b/maths/bisection.py index 93cc2247b64e..45f26d8d88e4 100644 --- a/maths/bisection.py +++ b/maths/bisection.py @@ -32,7 +32,7 @@ def bisection(a: float, b: float) -> float: 3.158203125 >>> bisection(2, 3) Traceback (most recent call last): - ... + ... ValueError: Wrong space! """ # Bolzano theory in order to find if there is a root between a and b diff --git a/maths/catalan_number.py b/maths/catalan_number.py index 4a1280a45bf2..85607dc1eca4 100644 --- a/maths/catalan_number.py +++ b/maths/catalan_number.py @@ -18,15 +18,15 @@ def catalan(number: int) -> int: 14 >>> catalan(0) Traceback (most recent call last): - ... + ... ValueError: Input value of [number=0] must be > 0 >>> catalan(-1) Traceback (most recent call last): - ... + ... ValueError: Input value of [number=-1] must be > 0 >>> catalan(5.0) Traceback (most recent call last): - ... + ... TypeError: Input value of [number=5.0] must be an integer """ diff --git a/maths/fibonacci.py b/maths/fibonacci.py index 07bd6d2ece51..e0da66ee5e3b 100644 --- a/maths/fibonacci.py +++ b/maths/fibonacci.py @@ -47,7 +47,7 @@ def fib_iterative(n: int) -> list[int]: [0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55] >>> fib_iterative(-1) Traceback (most recent call last): - ... + ... Exception: n is negative """ if n < 0: @@ -73,7 +73,7 @@ def fib_recursive(n: int) -> list[int]: [0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55] >>> fib_iterative(-1) Traceback (most recent call last): - ... + ... Exception: n is negative """ @@ -105,7 +105,7 @@ def fib_memoization(n: int) -> list[int]: [0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55] >>> fib_iterative(-1) Traceback (most recent call last): - ... + ... Exception: n is negative """ if n < 0: @@ -146,11 +146,11 @@ def fib_binet(n: int) -> list[int]: [0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55] >>> fib_binet(-1) Traceback (most recent call last): - ... + ... Exception: n is negative >>> fib_binet(1475) Traceback (most recent call last): - ... + ... Exception: n is too large """ if n < 0: diff --git a/maths/maclaurin_series.py b/maths/maclaurin_series.py index a2619d4e6b92..e55839bc15ba 100644 --- a/maths/maclaurin_series.py +++ b/maths/maclaurin_series.py @@ -26,19 +26,19 @@ def maclaurin_sin(theta: float, accuracy: int = 30) -> float: 0.5440211108893703 >>> maclaurin_sin("10") Traceback (most recent call last): - ... + ... ValueError: maclaurin_sin() requires either an int or float for theta >>> maclaurin_sin(10, -30) Traceback (most recent call last): - ... + ... ValueError: maclaurin_sin() requires a positive int for accuracy >>> maclaurin_sin(10, 30.5) Traceback (most recent call last): - ... + ... ValueError: maclaurin_sin() requires a positive int for accuracy >>> maclaurin_sin(10, "30") Traceback (most recent call last): - ... + ... ValueError: maclaurin_sin() requires a positive int for accuracy """ @@ -78,19 +78,19 @@ def maclaurin_cos(theta: float, accuracy: int = 30) -> float: -0.8390715290764521 >>> maclaurin_cos("10") Traceback (most recent call last): - ... + ... ValueError: maclaurin_cos() requires either an int or float for theta >>> maclaurin_cos(10, -30) Traceback (most recent call last): - ... + ... ValueError: maclaurin_cos() requires a positive int for accuracy >>> maclaurin_cos(10, 30.5) Traceback (most recent call last): - ... + ... ValueError: maclaurin_cos() requires a positive int for accuracy >>> maclaurin_cos(10, "30") Traceback (most recent call last): - ... + ... ValueError: maclaurin_cos() requires a positive int for accuracy """ diff --git a/maths/proth_number.py b/maths/proth_number.py index 6b15190249f0..ce911473a2d2 100644 --- a/maths/proth_number.py +++ b/maths/proth_number.py @@ -16,15 +16,15 @@ def proth(number: int) -> int: 25 >>> proth(0) Traceback (most recent call last): - ... + ... ValueError: Input value of [number=0] must be > 0 >>> proth(-1) Traceback (most recent call last): - ... + ... ValueError: Input value of [number=-1] must be > 0 >>> proth(6.0) Traceback (most recent call last): - ... + ... TypeError: Input value of [number=6.0] must be an integer """ diff --git a/maths/sylvester_sequence.py b/maths/sylvester_sequence.py index 0cd99affe046..114c9dd58582 100644 --- a/maths/sylvester_sequence.py +++ b/maths/sylvester_sequence.py @@ -18,12 +18,12 @@ def sylvester(number: int) -> int: >>> sylvester(-1) Traceback (most recent call last): - ... + ... ValueError: The input value of [n=-1] has to be > 0 >>> sylvester(8.0) Traceback (most recent call last): - ... + ... AssertionError: The input value of [n=8.0] is not an integer """ assert isinstance(number, int), f"The input value of [n={number}] is not an integer" diff --git a/maths/zellers_congruence.py b/maths/zellers_congruence.py index 2d4a22a0a5ba..624bbfe1061c 100644 --- a/maths/zellers_congruence.py +++ b/maths/zellers_congruence.py @@ -14,11 +14,11 @@ def zeller(date_input: str) -> str: Validate out of range month >>> zeller('13-31-2010') Traceback (most recent call last): - ... + ... ValueError: Month must be between 1 - 12 >>> zeller('.2-31-2010') Traceback (most recent call last): - ... + ... ValueError: invalid literal for int() with base 10: '.2' Validate out of range date: diff --git a/neural_network/perceptron.py b/neural_network/perceptron.py index f04c81424c81..487842067ca3 100644 --- a/neural_network/perceptron.py +++ b/neural_network/perceptron.py @@ -29,15 +29,15 @@ def __init__( >>> p = Perceptron([], (0, 1, 2)) Traceback (most recent call last): - ... + ... ValueError: Sample data can not be empty >>> p = Perceptron(([0], 1, 2), []) Traceback (most recent call last): - ... + ... ValueError: Target data can not be empty >>> p = Perceptron(([0], 1, 2), (0, 1)) Traceback (most recent call last): - ... + ... ValueError: Sample data and Target data do not have matching lengths """ self.sample = sample diff --git a/project_euler/problem_004/sol1.py b/project_euler/problem_004/sol1.py index db6133a1a1d2..b1e229289988 100644 --- a/project_euler/problem_004/sol1.py +++ b/project_euler/problem_004/sol1.py @@ -26,7 +26,7 @@ def solution(n: int = 998001) -> int: 39893 >>> solution(10000) Traceback (most recent call last): - ... + ... ValueError: That number is larger than our acceptable range. """ diff --git a/project_euler/problem_010/sol3.py b/project_euler/problem_010/sol3.py index 72e2894df293..60abbd57194b 100644 --- a/project_euler/problem_010/sol3.py +++ b/project_euler/problem_010/sol3.py @@ -30,15 +30,15 @@ def solution(n: int = 2000000) -> int: 10 >>> solution(7.1) # doctest: +ELLIPSIS Traceback (most recent call last): - ... + ... TypeError: 'float' object cannot be interpreted as an integer >>> solution(-7) # doctest: +ELLIPSIS Traceback (most recent call last): - ... + ... IndexError: list assignment index out of range >>> solution("seven") # doctest: +ELLIPSIS Traceback (most recent call last): - ... + ... TypeError: can only concatenate str (not "int") to str """ diff --git a/searches/interpolation_search.py b/searches/interpolation_search.py index f4fa8e1203df..35e6bc506661 100644 --- a/searches/interpolation_search.py +++ b/searches/interpolation_search.py @@ -101,7 +101,7 @@ def __assert_sorted(collection): True >>> __assert_sorted([10, -1, 5]) Traceback (most recent call last): - ... + ... ValueError: Collection must be ascending sorted """ if collection != sorted(collection): diff --git a/sorts/bead_sort.py b/sorts/bead_sort.py index d22367c52fa9..e51173643d81 100644 --- a/sorts/bead_sort.py +++ b/sorts/bead_sort.py @@ -20,12 +20,12 @@ def bead_sort(sequence: list) -> list: >>> bead_sort([1, .9, 0.0, 0, -1, -.9]) Traceback (most recent call last): - ... + ... TypeError: Sequence must be list of non-negative integers >>> bead_sort("Hello world") Traceback (most recent call last): - ... + ... TypeError: Sequence must be list of non-negative integers """ if any(not isinstance(x, int) or x < 0 for x in sequence): diff --git a/sorts/msd_radix_sort.py b/sorts/msd_radix_sort.py index 7430fc5a63c8..84460e47b440 100644 --- a/sorts/msd_radix_sort.py +++ b/sorts/msd_radix_sort.py @@ -23,7 +23,7 @@ def msd_radix_sort(list_of_ints: list[int]) -> list[int]: [1, 45, 1209, 540402, 834598] >>> msd_radix_sort([-1, 34, 45]) Traceback (most recent call last): - ... + ... ValueError: All numbers must be positive """ if not list_of_ints: @@ -93,7 +93,7 @@ def msd_radix_sort_inplace(list_of_ints: list[int]): >>> lst = [-1, 34, 23, 4, -42] >>> msd_radix_sort_inplace(lst) Traceback (most recent call last): - ... + ... ValueError: All numbers must be positive """ diff --git a/strings/barcode_validator.py b/strings/barcode_validator.py index 2e1ea87039ef..e050cd337d74 100644 --- a/strings/barcode_validator.py +++ b/strings/barcode_validator.py @@ -47,7 +47,7 @@ def is_valid(barcode: int) -> bool: False >>> is_valid(dwefgiweuf) Traceback (most recent call last): - ... + ... NameError: name 'dwefgiweuf' is not defined """ return len(str(barcode)) == 13 and get_check_digit(barcode) == barcode % 10 @@ -61,7 +61,7 @@ def get_barcode(barcode: str) -> int: 8718452538119 >>> get_barcode("dwefgiweuf") Traceback (most recent call last): - ... + ... ValueError: Barcode 'dwefgiweuf' has alphabetic characters. """ if str(barcode).isalpha(): diff --git a/strings/join.py b/strings/join.py index c17ddd144597..739856c1aa93 100644 --- a/strings/join.py +++ b/strings/join.py @@ -15,7 +15,7 @@ def join(separator: str, separated: list[str]) -> str: 'You are amazing!' >>> join("#", ["a", "b", "c", 1]) Traceback (most recent call last): - ... + ... Exception: join() accepts only strings to be joined """ joined = "" From 71e8ed81aeb24820a03b968633884ac10b047ad4 Mon Sep 17 00:00:00 2001 From: Matteo Messmer <40521259+matteomessmer@users.noreply.github.com> Date: Thu, 27 Oct 2022 19:45:58 +0200 Subject: [PATCH 0629/1543] Added spheres union (#6879) * Spheres union * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update volume.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update volume.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * f-strings * Update maths/volume.py Co-authored-by: Christian Clauss * more tests * fix non negative * fix 0 radius * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fix tests * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fix tests * fix print * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fix comment * fix comment * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update volume.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- maths/volume.py | 52 ++++++++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 49 insertions(+), 3 deletions(-) diff --git a/maths/volume.py b/maths/volume.py index a594e1b90feb..da4054646659 100644 --- a/maths/volume.py +++ b/maths/volume.py @@ -108,6 +108,51 @@ def vol_spheres_intersect( return vol_spherical_cap(h1, radius_2) + vol_spherical_cap(h2, radius_1) +def vol_spheres_union( + radius_1: float, radius_2: float, centers_distance: float +) -> float: + """ + Calculate the volume of the union of two spheres that possibly intersect. + It is the sum of sphere A and sphere B minus their intersection. + First, it calculates the volumes (v1, v2) of the spheres, + then the volume of the intersection (i) and it returns the sum v1+v2-i. + If centers_distance is 0 then it returns the volume of the larger sphere + :return vol_sphere(radius_1) + vol_sphere(radius_2) + - vol_spheres_intersect(radius_1, radius_2, centers_distance) + + >>> vol_spheres_union(2, 2, 1) + 45.814892864851146 + >>> vol_spheres_union(1.56, 2.2, 1.4) + 48.77802773671288 + >>> vol_spheres_union(0, 2, 1) + Traceback (most recent call last): + ... + ValueError: vol_spheres_union() only accepts non-negative values, non-zero radius + >>> vol_spheres_union('1.56', '2.2', '1.4') + Traceback (most recent call last): + ... + TypeError: '<=' not supported between instances of 'str' and 'int' + >>> vol_spheres_union(1, None, 1) + Traceback (most recent call last): + ... + TypeError: '<=' not supported between instances of 'NoneType' and 'int' + """ + + if radius_1 <= 0 or radius_2 <= 0 or centers_distance < 0: + raise ValueError( + "vol_spheres_union() only accepts non-negative values, non-zero radius" + ) + + if centers_distance == 0: + return vol_sphere(max(radius_1, radius_2)) + + return ( + vol_sphere(radius_1) + + vol_sphere(radius_2) + - vol_spheres_intersect(radius_1, radius_2, centers_distance) + ) + + def vol_cuboid(width: float, height: float, length: float) -> float: """ Calculate the Volume of a Cuboid. @@ -408,12 +453,13 @@ def main(): print(f"Sphere: {vol_sphere(2) = }") # ~= 33.5 print(f"Hemisphere: {vol_hemisphere(2) = }") # ~= 16.75 print(f"Circular Cylinder: {vol_circular_cylinder(2, 2) = }") # ~= 25.1 - print( - f"Hollow Circular Cylinder: {vol_hollow_circular_cylinder(1, 2, 3) = }" - ) # ~= 28.3 print(f"Conical Frustum: {vol_conical_frustum(2, 2, 4) = }") # ~= 58.6 print(f"Spherical cap: {vol_spherical_cap(1, 2) = }") # ~= 5.24 print(f"Spheres intersetion: {vol_spheres_intersect(2, 2, 1) = }") # ~= 21.21 + print(f"Spheres union: {vol_spheres_union(2, 2, 1) = }") # ~= 45.81 + print( + f"Hollow Circular Cylinder: {vol_hollow_circular_cylinder(1, 2, 3) = }" + ) # ~= 28.3 if __name__ == "__main__": From 501a1cf0c7b31773fb02bc2966f5c1db99311b36 Mon Sep 17 00:00:00 2001 From: Alexandre Velloso <4320811+AlexandreVelloso@users.noreply.github.com> Date: Thu, 27 Oct 2022 21:51:14 +0100 Subject: [PATCH 0630/1543] Remove unnecessary else statement (#7759) * Remove unnecessary else statement * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- maths/karatsuba.py | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/maths/karatsuba.py b/maths/karatsuba.py index b772c0d77039..4bf4aecdc068 100644 --- a/maths/karatsuba.py +++ b/maths/karatsuba.py @@ -10,18 +10,18 @@ def karatsuba(a, b): """ if len(str(a)) == 1 or len(str(b)) == 1: return a * b - else: - m1 = max(len(str(a)), len(str(b))) - m2 = m1 // 2 - a1, a2 = divmod(a, 10**m2) - b1, b2 = divmod(b, 10**m2) + m1 = max(len(str(a)), len(str(b))) + m2 = m1 // 2 - x = karatsuba(a2, b2) - y = karatsuba((a1 + a2), (b1 + b2)) - z = karatsuba(a1, b1) + a1, a2 = divmod(a, 10**m2) + b1, b2 = divmod(b, 10**m2) - return (z * 10 ** (2 * m2)) + ((y - z - x) * 10 ** (m2)) + (x) + x = karatsuba(a2, b2) + y = karatsuba((a1 + a2), (b1 + b2)) + z = karatsuba(a1, b1) + + return (z * 10 ** (2 * m2)) + ((y - z - x) * 10 ** (m2)) + (x) def main(): From 61eedc16c392823e46ef37cc2a86864fa15e89fe Mon Sep 17 00:00:00 2001 From: Caeden Perelli-Harris Date: Thu, 27 Oct 2022 21:52:00 +0100 Subject: [PATCH 0631/1543] Remove useless code in doctests (#7733) * refactor: Fix matrix display deprecation * refactor: Remove useless `print` and `pass` statements * revert: Replace broken doctests * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * revert: Fix failing doctests * chore: Satisfy pre-commit Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- backtracking/hamiltonian_cycle.py | 4 ++-- computer_vision/flip_augmentation.py | 3 --- computer_vision/mosaic_augmentation.py | 3 --- data_structures/heap/binomial_heap.py | 4 ++-- data_structures/heap/heap.py | 8 ++++---- data_structures/heap/min_heap.py | 2 +- data_structures/linked_list/skip_list.py | 3 +++ graphs/gale_shapley_bigraph.py | 2 +- graphs/graph_list.py | 6 +++--- graphs/minimum_spanning_tree_prims2.py | 8 ++++---- graphs/random_graph_generator.py | 2 +- .../local_weighted_learning.py | 2 -- maths/polynomial_evaluation.py | 2 +- maths/radix2_fft.py | 2 +- matrix/matrix_class.py | 6 +++--- searches/simple_binary_search.py | 20 +++++++++---------- sorts/bitonic_sort.py | 12 +++++------ sorts/normal_distribution_quick_sort.md | 4 ++-- sorts/recursive_insertion_sort.py | 12 +++++------ web_programming/reddit.py | 2 -- web_programming/search_books_by_isbn.py | 5 +---- 21 files changed, 51 insertions(+), 61 deletions(-) diff --git a/backtracking/hamiltonian_cycle.py b/backtracking/hamiltonian_cycle.py index 4c6ae46799f4..4a4156d70b32 100644 --- a/backtracking/hamiltonian_cycle.py +++ b/backtracking/hamiltonian_cycle.py @@ -71,7 +71,7 @@ def util_hamilton_cycle(graph: list[list[int]], path: list[int], curr_ind: int) >>> curr_ind = 1 >>> util_hamilton_cycle(graph, path, curr_ind) True - >>> print(path) + >>> path [0, 1, 2, 4, 3, 0] Case 2: Use exact graph as in previous case, but in the properties taken from @@ -85,7 +85,7 @@ def util_hamilton_cycle(graph: list[list[int]], path: list[int], curr_ind: int) >>> curr_ind = 3 >>> util_hamilton_cycle(graph, path, curr_ind) True - >>> print(path) + >>> path [0, 1, 2, 4, 3, 0] """ diff --git a/computer_vision/flip_augmentation.py b/computer_vision/flip_augmentation.py index 1272357fd03e..93b4e3f6da79 100644 --- a/computer_vision/flip_augmentation.py +++ b/computer_vision/flip_augmentation.py @@ -22,7 +22,6 @@ def main() -> None: Get images list and annotations list from input dir. Update new images and annotations. Save images and annotations in output dir. - >>> pass # A doctest is not possible for this function. """ img_paths, annos = get_dataset(LABEL_DIR, IMAGE_DIR) print("Processing...") @@ -48,7 +47,6 @@ def get_dataset(label_dir: str, img_dir: str) -> tuple[list, list]: - label_dir : Path to label include annotation of images - img_dir : Path to folder contain images Return : List of images path and labels - >>> pass # A doctest is not possible for this function. """ img_paths = [] labels = [] @@ -88,7 +86,6 @@ def update_image_and_anno( - new_imgs_list : image after resize - new_annos_lists : list of new annotation after scale - path_list : list the name of image file - >>> pass # A doctest is not possible for this function. """ new_annos_lists = [] path_list = [] diff --git a/computer_vision/mosaic_augmentation.py b/computer_vision/mosaic_augmentation.py index 4fd81957ce2a..e2953749753f 100644 --- a/computer_vision/mosaic_augmentation.py +++ b/computer_vision/mosaic_augmentation.py @@ -23,7 +23,6 @@ def main() -> None: Get images list and annotations list from input dir. Update new images and annotations. Save images and annotations in output dir. - >>> pass # A doctest is not possible for this function. """ img_paths, annos = get_dataset(LABEL_DIR, IMG_DIR) for index in range(NUMBER_IMAGES): @@ -60,7 +59,6 @@ def get_dataset(label_dir: str, img_dir: str) -> tuple[list, list]: - label_dir : Path to label include annotation of images - img_dir : Path to folder contain images Return : List of images path and labels - >>> pass # A doctest is not possible for this function. """ img_paths = [] labels = [] @@ -105,7 +103,6 @@ def update_image_and_anno( - output_img : image after resize - new_anno : list of new annotation after scale - path[0] : get the name of image file - >>> pass # A doctest is not possible for this function. """ output_img = np.zeros([output_size[0], output_size[1], 3], dtype=np.uint8) scale_x = scale_range[0] + random.random() * (scale_range[1] - scale_range[0]) diff --git a/data_structures/heap/binomial_heap.py b/data_structures/heap/binomial_heap.py index 334b444eaaff..6398c99439cd 100644 --- a/data_structures/heap/binomial_heap.py +++ b/data_structures/heap/binomial_heap.py @@ -71,7 +71,7 @@ class BinomialHeap: ... first_heap.insert(number) Size test - >>> print(first_heap.size) + >>> first_heap.size 30 Deleting - delete() test @@ -97,7 +97,7 @@ class BinomialHeap: # # # # preOrder() test - >>> print(second_heap.preOrder()) + >>> second_heap.preOrder() [(17, 0), ('#', 1), (31, 1), (20, 2), ('#', 3), ('#', 3), (34, 2), ('#', 3), ('#', 3)] printing Heap - __str__() test diff --git a/data_structures/heap/heap.py b/data_structures/heap/heap.py index 4c19747ec823..071790d18448 100644 --- a/data_structures/heap/heap.py +++ b/data_structures/heap/heap.py @@ -9,20 +9,20 @@ class Heap: >>> unsorted = [103, 9, 1, 7, 11, 15, 25, 201, 209, 107, 5] >>> h = Heap() >>> h.build_max_heap(unsorted) - >>> print(h) + >>> h [209, 201, 25, 103, 107, 15, 1, 9, 7, 11, 5] >>> >>> h.extract_max() 209 - >>> print(h) + >>> h [201, 107, 25, 103, 11, 15, 1, 9, 7, 5] >>> >>> h.insert(100) - >>> print(h) + >>> h [201, 107, 25, 103, 100, 15, 1, 9, 7, 5, 11] >>> >>> h.heap_sort() - >>> print(h) + >>> h [1, 5, 7, 9, 11, 15, 25, 100, 103, 107, 201] """ diff --git a/data_structures/heap/min_heap.py b/data_structures/heap/min_heap.py index d8975eb2dcc7..0403624f285a 100644 --- a/data_structures/heap/min_heap.py +++ b/data_structures/heap/min_heap.py @@ -27,7 +27,7 @@ class MinHeap: >>> myMinHeap.decrease_key(b, -17) >>> print(b) Node(B, -17) - >>> print(myMinHeap["B"]) + >>> myMinHeap["B"] -17 """ diff --git a/data_structures/linked_list/skip_list.py b/data_structures/linked_list/skip_list.py index a667e3e9bc84..96b0db7c896b 100644 --- a/data_structures/linked_list/skip_list.py +++ b/data_structures/linked_list/skip_list.py @@ -443,4 +443,7 @@ def main(): if __name__ == "__main__": + import doctest + + doctest.testmod() main() diff --git a/graphs/gale_shapley_bigraph.py b/graphs/gale_shapley_bigraph.py index 56b8c6c77bcb..f4b3153817c4 100644 --- a/graphs/gale_shapley_bigraph.py +++ b/graphs/gale_shapley_bigraph.py @@ -17,7 +17,7 @@ def stable_matching( >>> donor_pref = [[0, 1, 3, 2], [0, 2, 3, 1], [1, 0, 2, 3], [0, 3, 1, 2]] >>> recipient_pref = [[3, 1, 2, 0], [3, 1, 0, 2], [0, 3, 1, 2], [1, 0, 3, 2]] - >>> print(stable_matching(donor_pref, recipient_pref)) + >>> stable_matching(donor_pref, recipient_pref) [1, 2, 3, 0] """ assert len(donor_pref) == len(recipient_pref) diff --git a/graphs/graph_list.py b/graphs/graph_list.py index f04b7a92390d..e871f3b8a9d6 100644 --- a/graphs/graph_list.py +++ b/graphs/graph_list.py @@ -18,7 +18,7 @@ class GraphAdjacencyList(Generic[T]): Directed graph example: >>> d_graph = GraphAdjacencyList() - >>> d_graph + >>> print(d_graph) {} >>> d_graph.add_edge(0, 1) {0: [1], 1: []} @@ -26,7 +26,7 @@ class GraphAdjacencyList(Generic[T]): {0: [1], 1: [2, 4, 5], 2: [], 4: [], 5: []} >>> d_graph.add_edge(2, 0).add_edge(2, 6).add_edge(2, 7) {0: [1], 1: [2, 4, 5], 2: [0, 6, 7], 4: [], 5: [], 6: [], 7: []} - >>> print(d_graph) + >>> d_graph {0: [1], 1: [2, 4, 5], 2: [0, 6, 7], 4: [], 5: [], 6: [], 7: []} >>> print(repr(d_graph)) {0: [1], 1: [2, 4, 5], 2: [0, 6, 7], 4: [], 5: [], 6: [], 7: []} @@ -68,7 +68,7 @@ class GraphAdjacencyList(Generic[T]): {'a': ['b'], 'b': ['a']} >>> char_graph.add_edge('b', 'c').add_edge('b', 'e').add_edge('b', 'f') {'a': ['b'], 'b': ['a', 'c', 'e', 'f'], 'c': ['b'], 'e': ['b'], 'f': ['b']} - >>> print(char_graph) + >>> char_graph {'a': ['b'], 'b': ['a', 'c', 'e', 'f'], 'c': ['b'], 'e': ['b'], 'f': ['b']} """ diff --git a/graphs/minimum_spanning_tree_prims2.py b/graphs/minimum_spanning_tree_prims2.py index d924ee3db1e5..707be783d087 100644 --- a/graphs/minimum_spanning_tree_prims2.py +++ b/graphs/minimum_spanning_tree_prims2.py @@ -69,16 +69,16 @@ class MinPriorityQueue(Generic[T]): >>> queue.push(3, 4000) >>> queue.push(4, 3000) - >>> print(queue.extract_min()) + >>> queue.extract_min() 2 >>> queue.update_key(4, 50) - >>> print(queue.extract_min()) + >>> queue.extract_min() 4 - >>> print(queue.extract_min()) + >>> queue.extract_min() 1 - >>> print(queue.extract_min()) + >>> queue.extract_min() 3 """ diff --git a/graphs/random_graph_generator.py b/graphs/random_graph_generator.py index 15ccee5b399c..0e7e18bc8fd9 100644 --- a/graphs/random_graph_generator.py +++ b/graphs/random_graph_generator.py @@ -53,7 +53,7 @@ def complete_graph(vertices_number: int) -> dict: @input: vertices_number (number of vertices), directed (False if the graph is undirected, True otherwise) @example: - >>> print(complete_graph(3)) + >>> complete_graph(3) {0: [1, 2], 1: [0, 2], 2: [0, 1]} """ return { diff --git a/machine_learning/local_weighted_learning/local_weighted_learning.py b/machine_learning/local_weighted_learning/local_weighted_learning.py index 6c542ab825aa..df03fe0a178d 100644 --- a/machine_learning/local_weighted_learning/local_weighted_learning.py +++ b/machine_learning/local_weighted_learning/local_weighted_learning.py @@ -71,7 +71,6 @@ def local_weight_regression( def load_data(dataset_name: str, cola_name: str, colb_name: str) -> np.mat: """ Function used for loading data from the seaborn splitting into x and y points - >>> pass # this function has no doctest """ import seaborn as sns @@ -112,7 +111,6 @@ def plot_preds( ) -> plt.plot: """ This function used to plot predictions and display the graph - >>> pass #this function has no doctest """ xsort = training_data_x.copy() xsort.sort(axis=0) diff --git a/maths/polynomial_evaluation.py b/maths/polynomial_evaluation.py index 8ee82467efa1..90a51f521e01 100644 --- a/maths/polynomial_evaluation.py +++ b/maths/polynomial_evaluation.py @@ -45,7 +45,7 @@ def horner(poly: Sequence[float], x: float) -> float: >>> poly = (0.0, 0.0, 5.0, 9.3, 7.0) # f(x) = 7.0x^4 + 9.3x^3 + 5.0x^2 >>> x = -13.0 >>> # f(-13) = 7.0(-13)^4 + 9.3(-13)^3 + 5.0(-13)^2 = 180339.9 - >>> print(evaluate_poly(poly, x)) + >>> evaluate_poly(poly, x) 180339.9 """ poly = (0.0, 0.0, 5.0, 9.3, 7.0) diff --git a/maths/radix2_fft.py b/maths/radix2_fft.py index 52442134de59..1def58e1f226 100644 --- a/maths/radix2_fft.py +++ b/maths/radix2_fft.py @@ -39,7 +39,7 @@ class FFT: >>> x = FFT(A, B) Print product - >>> print(x.product) # 2x + 3x^2 + 8x^3 + 4x^4 + 6x^5 + >>> x.product # 2x + 3x^2 + 8x^3 + 4x^4 + 6x^5 [(-0+0j), (2+0j), (3+0j), (8+0j), (6+0j), (8+0j)] __str__ test diff --git a/matrix/matrix_class.py b/matrix/matrix_class.py index 8b6fefa2124b..0c3078fe6dc8 100644 --- a/matrix/matrix_class.py +++ b/matrix/matrix_class.py @@ -21,9 +21,9 @@ class Matrix: [7. 8. 9.]] Matrix rows and columns are available as 2D arrays - >>> print(matrix.rows) + >>> matrix.rows [[1, 2, 3], [4, 5, 6], [7, 8, 9]] - >>> print(matrix.columns()) + >>> matrix.columns() [[1, 4, 7], [2, 5, 8], [3, 6, 9]] Order is returned as a tuple @@ -55,7 +55,7 @@ class Matrix: [[-3. 6. -3.] [6. -12. 6.] [-3. 6. -3.]] - >>> print(matrix.inverse()) + >>> matrix.inverse() Traceback (most recent call last): ... TypeError: Only matrices with a non-zero determinant have an inverse diff --git a/searches/simple_binary_search.py b/searches/simple_binary_search.py index d1f7f7a51cbc..ff043d7369af 100644 --- a/searches/simple_binary_search.py +++ b/searches/simple_binary_search.py @@ -13,25 +13,25 @@ def binary_search(a_list: list[int], item: int) -> bool: """ >>> test_list = [0, 1, 2, 8, 13, 17, 19, 32, 42] - >>> print(binary_search(test_list, 3)) + >>> binary_search(test_list, 3) False - >>> print(binary_search(test_list, 13)) + >>> binary_search(test_list, 13) True - >>> print(binary_search([4, 4, 5, 6, 7], 4)) + >>> binary_search([4, 4, 5, 6, 7], 4) True - >>> print(binary_search([4, 4, 5, 6, 7], -10)) + >>> binary_search([4, 4, 5, 6, 7], -10) False - >>> print(binary_search([-18, 2], -18)) + >>> binary_search([-18, 2], -18) True - >>> print(binary_search([5], 5)) + >>> binary_search([5], 5) True - >>> print(binary_search(['a', 'c', 'd'], 'c')) + >>> binary_search(['a', 'c', 'd'], 'c') True - >>> print(binary_search(['a', 'c', 'd'], 'f')) + >>> binary_search(['a', 'c', 'd'], 'f') False - >>> print(binary_search([], 1)) + >>> binary_search([], 1) False - >>> print(binary_search([-.1, .1 , .8], .1)) + >>> binary_search([-.1, .1 , .8], .1) True >>> binary_search(range(-5000, 5000, 10), 80) True diff --git a/sorts/bitonic_sort.py b/sorts/bitonic_sort.py index 201fecd2ce86..b65f877a45e3 100644 --- a/sorts/bitonic_sort.py +++ b/sorts/bitonic_sort.py @@ -16,19 +16,19 @@ def comp_and_swap(array: list[int], index1: int, index2: int, direction: int) -> >>> arr = [12, 42, -21, 1] >>> comp_and_swap(arr, 1, 2, 1) - >>> print(arr) + >>> arr [12, -21, 42, 1] >>> comp_and_swap(arr, 1, 2, 0) - >>> print(arr) + >>> arr [12, 42, -21, 1] >>> comp_and_swap(arr, 0, 3, 1) - >>> print(arr) + >>> arr [1, 42, -21, 12] >>> comp_and_swap(arr, 0, 3, 0) - >>> print(arr) + >>> arr [12, 42, -21, 1] """ if (direction == 1 and array[index1] > array[index2]) or ( @@ -46,11 +46,11 @@ def bitonic_merge(array: list[int], low: int, length: int, direction: int) -> No >>> arr = [12, 42, -21, 1] >>> bitonic_merge(arr, 0, 4, 1) - >>> print(arr) + >>> arr [-21, 1, 12, 42] >>> bitonic_merge(arr, 0, 4, 0) - >>> print(arr) + >>> arr [42, 12, 1, -21] """ if length > 1: diff --git a/sorts/normal_distribution_quick_sort.md b/sorts/normal_distribution_quick_sort.md index c073f2cbc81c..27aca340fb3b 100644 --- a/sorts/normal_distribution_quick_sort.md +++ b/sorts/normal_distribution_quick_sort.md @@ -17,8 +17,8 @@ The array elements are taken from a Standard Normal Distribution, having mean = >>> mu, sigma = 0, 1 # mean and standard deviation >>> X = np.random.normal(mu, sigma, p) >>> np.save(outfile, X) ->>> print('The array is') ->>> print(X) +>>> 'The array is' +>>> X ``` diff --git a/sorts/recursive_insertion_sort.py b/sorts/recursive_insertion_sort.py index ab2716f8eae5..297dbe9457e6 100644 --- a/sorts/recursive_insertion_sort.py +++ b/sorts/recursive_insertion_sort.py @@ -14,17 +14,17 @@ def rec_insertion_sort(collection: list, n: int): >>> col = [1, 2, 1] >>> rec_insertion_sort(col, len(col)) - >>> print(col) + >>> col [1, 1, 2] >>> col = [2, 1, 0, -1, -2] >>> rec_insertion_sort(col, len(col)) - >>> print(col) + >>> col [-2, -1, 0, 1, 2] >>> col = [1] >>> rec_insertion_sort(col, len(col)) - >>> print(col) + >>> col [1] """ # Checks if the entire collection has been sorted @@ -41,17 +41,17 @@ def insert_next(collection: list, index: int): >>> col = [3, 2, 4, 2] >>> insert_next(col, 1) - >>> print(col) + >>> col [2, 3, 4, 2] >>> col = [3, 2, 3] >>> insert_next(col, 2) - >>> print(col) + >>> col [3, 2, 3] >>> col = [] >>> insert_next(col, 1) - >>> print(col) + >>> col [] """ # Checks order between adjacent elements diff --git a/web_programming/reddit.py b/web_programming/reddit.py index 672109f1399d..6a31c81c34bd 100644 --- a/web_programming/reddit.py +++ b/web_programming/reddit.py @@ -23,8 +23,6 @@ def get_subreddit_data( limit : Number of posts to fetch age : ["new", "top", "hot"] wanted_data : Get only the required data in the list - - >>> pass """ wanted_data = wanted_data or [] if invalid_search_terms := ", ".join(sorted(set(wanted_data) - valid_terms)): diff --git a/web_programming/search_books_by_isbn.py b/web_programming/search_books_by_isbn.py index 22a31dcb1db4..abac3c70b22e 100644 --- a/web_programming/search_books_by_isbn.py +++ b/web_programming/search_books_by_isbn.py @@ -19,7 +19,6 @@ def get_openlibrary_data(olid: str = "isbn/0140328726") -> dict: {'publishers': ['Puffin'], 'number_of_pages': 96, 'isbn_10': ['0140328726'], ... # >>> get_openlibrary_data(olid='/authors/OL7353617A') # doctest: +ELLIPSIS {'name': 'Adrian Brisku', 'created': {'type': '/type/datetime', ... - >>> pass # Placate https://github.com/apps/algorithms-keeper """ new_olid = olid.strip().strip("/") # Remove leading/trailing whitespace & slashes if new_olid.count("/") != 1: @@ -29,9 +28,7 @@ def get_openlibrary_data(olid: str = "isbn/0140328726") -> dict: def summarize_book(ol_book_data: dict) -> dict: """ - Given Open Library book data, return a summary as a Python dict. - - >>> pass # Placate https://github.com/apps/algorithms-keeper + Given Open Library book data, return a summary as a Python dict. """ desired_keys = { "title": "Title", From de3271ec80c76a8b79c913f68a94f693e8a00a0b Mon Sep 17 00:00:00 2001 From: SwayamSahu <91021799+SwayamSahu@users.noreply.github.com> Date: Fri, 28 Oct 2022 02:32:15 +0530 Subject: [PATCH 0632/1543] Refactoring the syntax using list comprehension (#7749) * Refactoring the syntax using list comprehension * Update detecting_english_programmatically.py * Update detecting_english_programmatically.py Co-authored-by: Christian Clauss --- strings/detecting_english_programmatically.py | 20 ++++--------------- 1 file changed, 4 insertions(+), 16 deletions(-) diff --git a/strings/detecting_english_programmatically.py b/strings/detecting_english_programmatically.py index aa18db21027a..b9000101beb4 100644 --- a/strings/detecting_english_programmatically.py +++ b/strings/detecting_english_programmatically.py @@ -1,7 +1,7 @@ import os +from string import ascii_letters -UPPERLETTERS = "ABCDEFGHIJKLMNOPQRSTUVWXYZ" -LETTERS_AND_SPACE = UPPERLETTERS + UPPERLETTERS.lower() + " \t\n" +LETTERS_AND_SPACE = ascii_letters + " \t\n" def load_dictionary() -> dict[str, None]: @@ -20,24 +20,12 @@ def get_english_count(message: str) -> float: message = message.upper() message = remove_non_letters(message) possible_words = message.split() - - if possible_words == []: - return 0.0 - - matches = 0 - for word in possible_words: - if word in ENGLISH_WORDS: - matches += 1 - + matches = len([word for word in possible_words if word in ENGLISH_WORDS]) return float(matches) / len(possible_words) def remove_non_letters(message: str) -> str: - letters_only = [] - for symbol in message: - if symbol in LETTERS_AND_SPACE: - letters_only.append(symbol) - return "".join(letters_only) + return "".join(symbol for symbol in message if symbol in LETTERS_AND_SPACE) def is_english( From 25757e697cfbb5bc7abf47c1ffa13061cb1534e1 Mon Sep 17 00:00:00 2001 From: Caeden Perelli-Harris Date: Thu, 27 Oct 2022 22:03:01 +0100 Subject: [PATCH 0633/1543] Binary tree path sum (#7748) * feat: Implement binary tree path sum (#7135) * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update data_structures/binary_tree/binary_tree_path_sum.py Co-authored-by: Christian Clauss * refactor: Rename `dfs` to `depth_first_search` Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- .../binary_tree/binary_tree_path_sum.py | 88 +++++++++++++++++++ 1 file changed, 88 insertions(+) create mode 100644 data_structures/binary_tree/binary_tree_path_sum.py diff --git a/data_structures/binary_tree/binary_tree_path_sum.py b/data_structures/binary_tree/binary_tree_path_sum.py new file mode 100644 index 000000000000..a3fe9ca7a7e2 --- /dev/null +++ b/data_structures/binary_tree/binary_tree_path_sum.py @@ -0,0 +1,88 @@ +""" +Given the root of a binary tree and an integer target, +find the number of paths where the sum of the values +along the path equals target. + + +Leetcode reference: https://leetcode.com/problems/path-sum-iii/ +""" + +from __future__ import annotations + + +class Node: + """ + A Node has value variable and pointers to Nodes to its left and right. + """ + + def __init__(self, value: int) -> None: + self.value = value + self.left: Node | None = None + self.right: Node | None = None + + +class BinaryTreePathSum: + r""" + The below tree looks like this + 10 + / \ + 5 -3 + / \ \ + 3 2 11 + / \ \ + 3 -2 1 + + + >>> tree = Node(10) + >>> tree.left = Node(5) + >>> tree.right = Node(-3) + >>> tree.left.left = Node(3) + >>> tree.left.right = Node(2) + >>> tree.right.right = Node(11) + >>> tree.left.left.left = Node(3) + >>> tree.left.left.right = Node(-2) + >>> tree.left.right.right = Node(1) + + >>> BinaryTreePathSum().path_sum(tree, 8) + 3 + >>> BinaryTreePathSum().path_sum(tree, 7) + 2 + >>> tree.right.right = Node(10) + >>> BinaryTreePathSum().path_sum(tree, 8) + 2 + """ + + target: int + + def __init__(self) -> None: + self.paths = 0 + + def depth_first_search(self, node: Node | None, path_sum: int) -> None: + if node is None: + return + + if path_sum == self.target: + self.paths += 1 + + if node.left: + self.depth_first_search(node.left, path_sum + node.left.value) + if node.right: + self.depth_first_search(node.right, path_sum + node.right.value) + + def path_sum(self, node: Node | None, target: int | None = None) -> int: + if node is None: + return 0 + if target is not None: + self.target = target + + self.depth_first_search(node, node.value) + self.path_sum(node.left) + self.path_sum(node.right) + + return self.paths + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From 15c93e5f4bc5b03cecc000506bdf45c100b8f0b3 Mon Sep 17 00:00:00 2001 From: MoPaMo <67760881+MoPaMo@users.noreply.github.com> Date: Thu, 27 Oct 2022 23:03:34 +0200 Subject: [PATCH 0634/1543] fix typo in caesar_cipher.py (#7761) very character-> every character --- ciphers/caesar_cipher.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ciphers/caesar_cipher.py b/ciphers/caesar_cipher.py index 8cd9fab58471..d19b9a337221 100644 --- a/ciphers/caesar_cipher.py +++ b/ciphers/caesar_cipher.py @@ -27,7 +27,7 @@ def encrypt(input_string: str, key: int, alphabet: str | None = None) -> str: ========================= The caesar cipher is named after Julius Caesar who used it when sending secret military messages to his troops. This is a simple substitution cipher - where very character in the plain-text is shifted by a certain number known + where every character in the plain-text is shifted by a certain number known as the "key" or "shift". Example: From 19bff003aa1c365bec86d3f4a13a9c3d6c36d230 Mon Sep 17 00:00:00 2001 From: Christian Clauss Date: Fri, 28 Oct 2022 15:54:54 +0200 Subject: [PATCH 0635/1543] Adopt Python >= 3.8 assignment expressions using auto-walrus (#7737) * Adopt Python >= 3.8 assignment expressions using auto-walrus * updating DIRECTORY.md * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 5 + DIRECTORY.md | 3 + ciphers/enigma_machine2.py | 3 +- .../linked_list/doubly_linked_list_two.py | 3 +- dynamic_programming/fibonacci.py | 3 +- .../sequential_minimum_optimization.py | 1261 ++++++++--------- strings/indian_phone_validator.py | 3 +- 7 files changed, 642 insertions(+), 639 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 5bdda50be0c4..7f6c206b49bc 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -13,6 +13,11 @@ repos: )$ - id: requirements-txt-fixer + - repo: https://github.com/MarcoGorelli/auto-walrus + rev: v0.2.1 + hooks: + - id: auto-walrus + - repo: https://github.com/psf/black rev: 22.10.0 hooks: diff --git a/DIRECTORY.md b/DIRECTORY.md index ba7d3e62a9e1..7621427a6c34 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -45,6 +45,7 @@ * [Count 1S Brian Kernighan Method](bit_manipulation/count_1s_brian_kernighan_method.py) * [Count Number Of One Bits](bit_manipulation/count_number_of_one_bits.py) * [Gray Code Sequence](bit_manipulation/gray_code_sequence.py) + * [Highest Set Bit](bit_manipulation/highest_set_bit.py) * [Is Even](bit_manipulation/is_even.py) * [Reverse Bits](bit_manipulation/reverse_bits.py) * [Single Bit Manipulation Operations](bit_manipulation/single_bit_manipulation_operations.py) @@ -326,6 +327,7 @@ ## Financial * [Equated Monthly Installments](financial/equated_monthly_installments.py) * [Interest](financial/interest.py) + * [Price Plus Tax](financial/price_plus_tax.py) ## Fractals * [Julia Sets](fractals/julia_sets.py) @@ -669,6 +671,7 @@ * [Horizontal Projectile Motion](physics/horizontal_projectile_motion.py) * [Kinetic Energy](physics/kinetic_energy.py) * [Lorentz Transformation Four Vector](physics/lorentz_transformation_four_vector.py) + * [Malus Law](physics/malus_law.py) * [N Body Simulation](physics/n_body_simulation.py) * [Newtons Law Of Gravitation](physics/newtons_law_of_gravitation.py) * [Newtons Second Law Of Motion](physics/newtons_second_law_of_motion.py) diff --git a/ciphers/enigma_machine2.py b/ciphers/enigma_machine2.py index 9f9dbe6f7cd0..a877256ebeeb 100644 --- a/ciphers/enigma_machine2.py +++ b/ciphers/enigma_machine2.py @@ -86,8 +86,7 @@ def _validator( """ # Checks if there are 3 unique rotors - unique_rotsel = len(set(rotsel)) - if unique_rotsel < 3: + if (unique_rotsel := len(set(rotsel))) < 3: raise Exception(f"Please use 3 unique rotors (not {unique_rotsel})") # Checks if rotor positions are valid diff --git a/data_structures/linked_list/doubly_linked_list_two.py b/data_structures/linked_list/doubly_linked_list_two.py index 184b6966b5a9..94b916a623f6 100644 --- a/data_structures/linked_list/doubly_linked_list_two.py +++ b/data_structures/linked_list/doubly_linked_list_two.py @@ -143,9 +143,8 @@ def get_node(self, item: int) -> Node: raise Exception("Node not found") def delete_value(self, value): - node = self.get_node(value) - if node is not None: + if (node := self.get_node(value)) is not None: if node == self.head: self.head = self.head.get_next() diff --git a/dynamic_programming/fibonacci.py b/dynamic_programming/fibonacci.py index 4abc60d4f3cc..7ec5993ef38d 100644 --- a/dynamic_programming/fibonacci.py +++ b/dynamic_programming/fibonacci.py @@ -18,8 +18,7 @@ def get(self, index: int) -> list: >>> Fibonacci().get(5) [0, 1, 1, 2, 3] """ - difference = index - (len(self.sequence) - 2) - if difference >= 1: + if (difference := index - (len(self.sequence) - 2)) >= 1: for _ in range(difference): self.sequence.append(self.sequence[-1] + self.sequence[-2]) return self.sequence[:index] diff --git a/machine_learning/sequential_minimum_optimization.py b/machine_learning/sequential_minimum_optimization.py index df5b03790804..66535e806c43 100644 --- a/machine_learning/sequential_minimum_optimization.py +++ b/machine_learning/sequential_minimum_optimization.py @@ -1,631 +1,630 @@ -""" - Implementation of sequential minimal optimization (SMO) for support vector machines - (SVM). - - Sequential minimal optimization (SMO) is an algorithm for solving the quadratic - programming (QP) problem that arises during the training of support vector - machines. - It was invented by John Platt in 1998. - -Input: - 0: type: numpy.ndarray. - 1: first column of ndarray must be tags of samples, must be 1 or -1. - 2: rows of ndarray represent samples. - -Usage: - Command: - python3 sequential_minimum_optimization.py - Code: - from sequential_minimum_optimization import SmoSVM, Kernel - - kernel = Kernel(kernel='poly', degree=3., coef0=1., gamma=0.5) - init_alphas = np.zeros(train.shape[0]) - SVM = SmoSVM(train=train, alpha_list=init_alphas, kernel_func=kernel, cost=0.4, - b=0.0, tolerance=0.001) - SVM.fit() - predict = SVM.predict(test_samples) - -Reference: - https://www.microsoft.com/en-us/research/wp-content/uploads/2016/02/smo-book.pdf - https://www.microsoft.com/en-us/research/wp-content/uploads/2016/02/tr-98-14.pdf -""" - - -import os -import sys -import urllib.request - -import numpy as np -import pandas as pd -from matplotlib import pyplot as plt -from sklearn.datasets import make_blobs, make_circles -from sklearn.preprocessing import StandardScaler - -CANCER_DATASET_URL = ( - "https://archive.ics.uci.edu/ml/machine-learning-databases/" - "breast-cancer-wisconsin/wdbc.data" -) - - -class SmoSVM: - def __init__( - self, - train, - kernel_func, - alpha_list=None, - cost=0.4, - b=0.0, - tolerance=0.001, - auto_norm=True, - ): - self._init = True - self._auto_norm = auto_norm - self._c = np.float64(cost) - self._b = np.float64(b) - self._tol = np.float64(tolerance) if tolerance > 0.0001 else np.float64(0.001) - - self.tags = train[:, 0] - self.samples = self._norm(train[:, 1:]) if self._auto_norm else train[:, 1:] - self.alphas = alpha_list if alpha_list is not None else np.zeros(train.shape[0]) - self.Kernel = kernel_func - - self._eps = 0.001 - self._all_samples = list(range(self.length)) - self._K_matrix = self._calculate_k_matrix() - self._error = np.zeros(self.length) - self._unbound = [] - - self.choose_alpha = self._choose_alphas() - - # Calculate alphas using SMO algorithm - def fit(self): - k = self._k - state = None - while True: - - # 1: Find alpha1, alpha2 - try: - i1, i2 = self.choose_alpha.send(state) - state = None - except StopIteration: - print("Optimization done!\nEvery sample satisfy the KKT condition!") - break - - # 2: calculate new alpha2 and new alpha1 - y1, y2 = self.tags[i1], self.tags[i2] - a1, a2 = self.alphas[i1].copy(), self.alphas[i2].copy() - e1, e2 = self._e(i1), self._e(i2) - args = (i1, i2, a1, a2, e1, e2, y1, y2) - a1_new, a2_new = self._get_new_alpha(*args) - if not a1_new and not a2_new: - state = False - continue - self.alphas[i1], self.alphas[i2] = a1_new, a2_new - - # 3: update threshold(b) - b1_new = np.float64( - -e1 - - y1 * k(i1, i1) * (a1_new - a1) - - y2 * k(i2, i1) * (a2_new - a2) - + self._b - ) - b2_new = np.float64( - -e2 - - y2 * k(i2, i2) * (a2_new - a2) - - y1 * k(i1, i2) * (a1_new - a1) - + self._b - ) - if 0.0 < a1_new < self._c: - b = b1_new - if 0.0 < a2_new < self._c: - b = b2_new - if not (np.float64(0) < a2_new < self._c) and not ( - np.float64(0) < a1_new < self._c - ): - b = (b1_new + b2_new) / 2.0 - b_old = self._b - self._b = b - - # 4: update error value,here we only calculate those non-bound samples' - # error - self._unbound = [i for i in self._all_samples if self._is_unbound(i)] - for s in self.unbound: - if s == i1 or s == i2: - continue - self._error[s] += ( - y1 * (a1_new - a1) * k(i1, s) - + y2 * (a2_new - a2) * k(i2, s) - + (self._b - b_old) - ) - - # if i1 or i2 is non-bound,update there error value to zero - if self._is_unbound(i1): - self._error[i1] = 0 - if self._is_unbound(i2): - self._error[i2] = 0 - - # Predict test samples - def predict(self, test_samples, classify=True): - - if test_samples.shape[1] > self.samples.shape[1]: - raise ValueError( - "Test samples' feature length does not equal to that of train samples" - ) - - if self._auto_norm: - test_samples = self._norm(test_samples) - - results = [] - for test_sample in test_samples: - result = self._predict(test_sample) - if classify: - results.append(1 if result > 0 else -1) - else: - results.append(result) - return np.array(results) - - # Check if alpha violate KKT condition - def _check_obey_kkt(self, index): - alphas = self.alphas - tol = self._tol - r = self._e(index) * self.tags[index] - c = self._c - - return (r < -tol and alphas[index] < c) or (r > tol and alphas[index] > 0.0) - - # Get value calculated from kernel function - def _k(self, i1, i2): - # for test samples,use Kernel function - if isinstance(i2, np.ndarray): - return self.Kernel(self.samples[i1], i2) - # for train samples,Kernel values have been saved in matrix - else: - return self._K_matrix[i1, i2] - - # Get sample's error - def _e(self, index): - """ - Two cases: - 1:Sample[index] is non-bound,Fetch error from list: _error - 2:sample[index] is bound,Use predicted value deduct true value: g(xi) - yi - - """ - # get from error data - if self._is_unbound(index): - return self._error[index] - # get by g(xi) - yi - else: - gx = np.dot(self.alphas * self.tags, self._K_matrix[:, index]) + self._b - yi = self.tags[index] - return gx - yi - - # Calculate Kernel matrix of all possible i1,i2 ,saving time - def _calculate_k_matrix(self): - k_matrix = np.zeros([self.length, self.length]) - for i in self._all_samples: - for j in self._all_samples: - k_matrix[i, j] = np.float64( - self.Kernel(self.samples[i, :], self.samples[j, :]) - ) - return k_matrix - - # Predict test sample's tag - def _predict(self, sample): - k = self._k - predicted_value = ( - np.sum( - [ - self.alphas[i1] * self.tags[i1] * k(i1, sample) - for i1 in self._all_samples - ] - ) - + self._b - ) - return predicted_value - - # Choose alpha1 and alpha2 - def _choose_alphas(self): - locis = yield from self._choose_a1() - if not locis: - return - return locis - - def _choose_a1(self): - """ - Choose first alpha ;steps: - 1:First loop over all sample - 2:Second loop over all non-bound samples till all non-bound samples does not - voilate kkt condition. - 3:Repeat this two process endlessly,till all samples does not voilate kkt - condition samples after first loop. - """ - while True: - all_not_obey = True - # all sample - print("scanning all sample!") - for i1 in [i for i in self._all_samples if self._check_obey_kkt(i)]: - all_not_obey = False - yield from self._choose_a2(i1) - - # non-bound sample - print("scanning non-bound sample!") - while True: - not_obey = True - for i1 in [ - i - for i in self._all_samples - if self._check_obey_kkt(i) and self._is_unbound(i) - ]: - not_obey = False - yield from self._choose_a2(i1) - if not_obey: - print("all non-bound samples fit the KKT condition!") - break - if all_not_obey: - print("all samples fit the KKT condition! Optimization done!") - break - return False - - def _choose_a2(self, i1): - """ - Choose the second alpha by using heuristic algorithm ;steps: - 1: Choose alpha2 which gets the maximum step size (|E1 - E2|). - 2: Start in a random point,loop over all non-bound samples till alpha1 and - alpha2 are optimized. - 3: Start in a random point,loop over all samples till alpha1 and alpha2 are - optimized. - """ - self._unbound = [i for i in self._all_samples if self._is_unbound(i)] - - if len(self.unbound) > 0: - tmp_error = self._error.copy().tolist() - tmp_error_dict = { - index: value - for index, value in enumerate(tmp_error) - if self._is_unbound(index) - } - if self._e(i1) >= 0: - i2 = min(tmp_error_dict, key=lambda index: tmp_error_dict[index]) - else: - i2 = max(tmp_error_dict, key=lambda index: tmp_error_dict[index]) - cmd = yield i1, i2 - if cmd is None: - return - - for i2 in np.roll(self.unbound, np.random.choice(self.length)): - cmd = yield i1, i2 - if cmd is None: - return - - for i2 in np.roll(self._all_samples, np.random.choice(self.length)): - cmd = yield i1, i2 - if cmd is None: - return - - # Get the new alpha2 and new alpha1 - def _get_new_alpha(self, i1, i2, a1, a2, e1, e2, y1, y2): - k = self._k - if i1 == i2: - return None, None - - # calculate L and H which bound the new alpha2 - s = y1 * y2 - if s == -1: - l, h = max(0.0, a2 - a1), min(self._c, self._c + a2 - a1) - else: - l, h = max(0.0, a2 + a1 - self._c), min(self._c, a2 + a1) - if l == h: # noqa: E741 - return None, None - - # calculate eta - k11 = k(i1, i1) - k22 = k(i2, i2) - k12 = k(i1, i2) - eta = k11 + k22 - 2.0 * k12 - - # select the new alpha2 which could get the minimal objectives - if eta > 0.0: - a2_new_unc = a2 + (y2 * (e1 - e2)) / eta - # a2_new has a boundary - if a2_new_unc >= h: - a2_new = h - elif a2_new_unc <= l: - a2_new = l - else: - a2_new = a2_new_unc - else: - b = self._b - l1 = a1 + s * (a2 - l) - h1 = a1 + s * (a2 - h) - - # way 1 - f1 = y1 * (e1 + b) - a1 * k(i1, i1) - s * a2 * k(i1, i2) - f2 = y2 * (e2 + b) - a2 * k(i2, i2) - s * a1 * k(i1, i2) - ol = ( - l1 * f1 - + l * f2 - + 1 / 2 * l1**2 * k(i1, i1) - + 1 / 2 * l**2 * k(i2, i2) - + s * l * l1 * k(i1, i2) - ) - oh = ( - h1 * f1 - + h * f2 - + 1 / 2 * h1**2 * k(i1, i1) - + 1 / 2 * h**2 * k(i2, i2) - + s * h * h1 * k(i1, i2) - ) - """ - # way 2 - Use objective function check which alpha2 new could get the minimal - objectives - """ - if ol < (oh - self._eps): - a2_new = l - elif ol > oh + self._eps: - a2_new = h - else: - a2_new = a2 - - # a1_new has a boundary too - a1_new = a1 + s * (a2 - a2_new) - if a1_new < 0: - a2_new += s * a1_new - a1_new = 0 - if a1_new > self._c: - a2_new += s * (a1_new - self._c) - a1_new = self._c - - return a1_new, a2_new - - # Normalise data using min_max way - def _norm(self, data): - if self._init: - self._min = np.min(data, axis=0) - self._max = np.max(data, axis=0) - self._init = False - return (data - self._min) / (self._max - self._min) - else: - return (data - self._min) / (self._max - self._min) - - def _is_unbound(self, index): - if 0.0 < self.alphas[index] < self._c: - return True - else: - return False - - def _is_support(self, index): - if self.alphas[index] > 0: - return True - else: - return False - - @property - def unbound(self): - return self._unbound - - @property - def support(self): - return [i for i in range(self.length) if self._is_support(i)] - - @property - def length(self): - return self.samples.shape[0] - - -class Kernel: - def __init__(self, kernel, degree=1.0, coef0=0.0, gamma=1.0): - self.degree = np.float64(degree) - self.coef0 = np.float64(coef0) - self.gamma = np.float64(gamma) - self._kernel_name = kernel - self._kernel = self._get_kernel(kernel_name=kernel) - self._check() - - def _polynomial(self, v1, v2): - return (self.gamma * np.inner(v1, v2) + self.coef0) ** self.degree - - def _linear(self, v1, v2): - return np.inner(v1, v2) + self.coef0 - - def _rbf(self, v1, v2): - return np.exp(-1 * (self.gamma * np.linalg.norm(v1 - v2) ** 2)) - - def _check(self): - if self._kernel == self._rbf: - if self.gamma < 0: - raise ValueError("gamma value must greater than 0") - - def _get_kernel(self, kernel_name): - maps = {"linear": self._linear, "poly": self._polynomial, "rbf": self._rbf} - return maps[kernel_name] - - def __call__(self, v1, v2): - return self._kernel(v1, v2) - - def __repr__(self): - return self._kernel_name - - -def count_time(func): - def call_func(*args, **kwargs): - import time - - start_time = time.time() - func(*args, **kwargs) - end_time = time.time() - print(f"smo algorithm cost {end_time - start_time} seconds") - - return call_func - - -@count_time -def test_cancel_data(): - print("Hello!\nStart test svm by smo algorithm!") - # 0: download dataset and load into pandas' dataframe - if not os.path.exists(r"cancel_data.csv"): - request = urllib.request.Request( - CANCER_DATASET_URL, - headers={"User-Agent": "Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)"}, - ) - response = urllib.request.urlopen(request) - content = response.read().decode("utf-8") - with open(r"cancel_data.csv", "w") as f: - f.write(content) - - data = pd.read_csv(r"cancel_data.csv", header=None) - - # 1: pre-processing data - del data[data.columns.tolist()[0]] - data = data.dropna(axis=0) - data = data.replace({"M": np.float64(1), "B": np.float64(-1)}) - samples = np.array(data)[:, :] - - # 2: dividing data into train_data data and test_data data - train_data, test_data = samples[:328, :], samples[328:, :] - test_tags, test_samples = test_data[:, 0], test_data[:, 1:] - - # 3: choose kernel function,and set initial alphas to zero(optional) - mykernel = Kernel(kernel="rbf", degree=5, coef0=1, gamma=0.5) - al = np.zeros(train_data.shape[0]) - - # 4: calculating best alphas using SMO algorithm and predict test_data samples - mysvm = SmoSVM( - train=train_data, - kernel_func=mykernel, - alpha_list=al, - cost=0.4, - b=0.0, - tolerance=0.001, - ) - mysvm.fit() - predict = mysvm.predict(test_samples) - - # 5: check accuracy - score = 0 - test_num = test_tags.shape[0] - for i in range(test_tags.shape[0]): - if test_tags[i] == predict[i]: - score += 1 - print(f"\nall: {test_num}\nright: {score}\nfalse: {test_num - score}") - print(f"Rough Accuracy: {score / test_tags.shape[0]}") - - -def test_demonstration(): - # change stdout - print("\nStart plot,please wait!!!") - sys.stdout = open(os.devnull, "w") - - ax1 = plt.subplot2grid((2, 2), (0, 0)) - ax2 = plt.subplot2grid((2, 2), (0, 1)) - ax3 = plt.subplot2grid((2, 2), (1, 0)) - ax4 = plt.subplot2grid((2, 2), (1, 1)) - ax1.set_title("linear svm,cost:0.1") - test_linear_kernel(ax1, cost=0.1) - ax2.set_title("linear svm,cost:500") - test_linear_kernel(ax2, cost=500) - ax3.set_title("rbf kernel svm,cost:0.1") - test_rbf_kernel(ax3, cost=0.1) - ax4.set_title("rbf kernel svm,cost:500") - test_rbf_kernel(ax4, cost=500) - - sys.stdout = sys.__stdout__ - print("Plot done!!!") - - -def test_linear_kernel(ax, cost): - train_x, train_y = make_blobs( - n_samples=500, centers=2, n_features=2, random_state=1 - ) - train_y[train_y == 0] = -1 - scaler = StandardScaler() - train_x_scaled = scaler.fit_transform(train_x, train_y) - train_data = np.hstack((train_y.reshape(500, 1), train_x_scaled)) - mykernel = Kernel(kernel="linear", degree=5, coef0=1, gamma=0.5) - mysvm = SmoSVM( - train=train_data, - kernel_func=mykernel, - cost=cost, - tolerance=0.001, - auto_norm=False, - ) - mysvm.fit() - plot_partition_boundary(mysvm, train_data, ax=ax) - - -def test_rbf_kernel(ax, cost): - train_x, train_y = make_circles( - n_samples=500, noise=0.1, factor=0.1, random_state=1 - ) - train_y[train_y == 0] = -1 - scaler = StandardScaler() - train_x_scaled = scaler.fit_transform(train_x, train_y) - train_data = np.hstack((train_y.reshape(500, 1), train_x_scaled)) - mykernel = Kernel(kernel="rbf", degree=5, coef0=1, gamma=0.5) - mysvm = SmoSVM( - train=train_data, - kernel_func=mykernel, - cost=cost, - tolerance=0.001, - auto_norm=False, - ) - mysvm.fit() - plot_partition_boundary(mysvm, train_data, ax=ax) - - -def plot_partition_boundary( - model, train_data, ax, resolution=100, colors=("b", "k", "r") -): - """ - We can not get the optimum w of our kernel svm model which is different from linear - svm. For this reason, we generate randomly distributed points with high desity and - prediced values of these points are calculated by using our tained model. Then we - could use this prediced values to draw contour map. - And this contour map can represent svm's partition boundary. - """ - train_data_x = train_data[:, 1] - train_data_y = train_data[:, 2] - train_data_tags = train_data[:, 0] - xrange = np.linspace(train_data_x.min(), train_data_x.max(), resolution) - yrange = np.linspace(train_data_y.min(), train_data_y.max(), resolution) - test_samples = np.array([(x, y) for x in xrange for y in yrange]).reshape( - resolution * resolution, 2 - ) - - test_tags = model.predict(test_samples, classify=False) - grid = test_tags.reshape((len(xrange), len(yrange))) - - # Plot contour map which represents the partition boundary - ax.contour( - xrange, - yrange, - np.mat(grid).T, - levels=(-1, 0, 1), - linestyles=("--", "-", "--"), - linewidths=(1, 1, 1), - colors=colors, - ) - # Plot all train samples - ax.scatter( - train_data_x, - train_data_y, - c=train_data_tags, - cmap=plt.cm.Dark2, - lw=0, - alpha=0.5, - ) - - # Plot support vectors - support = model.support - ax.scatter( - train_data_x[support], - train_data_y[support], - c=train_data_tags[support], - cmap=plt.cm.Dark2, - ) - - -if __name__ == "__main__": - test_cancel_data() - test_demonstration() - plt.show() +""" + Implementation of sequential minimal optimization (SMO) for support vector machines + (SVM). + + Sequential minimal optimization (SMO) is an algorithm for solving the quadratic + programming (QP) problem that arises during the training of support vector + machines. + It was invented by John Platt in 1998. + +Input: + 0: type: numpy.ndarray. + 1: first column of ndarray must be tags of samples, must be 1 or -1. + 2: rows of ndarray represent samples. + +Usage: + Command: + python3 sequential_minimum_optimization.py + Code: + from sequential_minimum_optimization import SmoSVM, Kernel + + kernel = Kernel(kernel='poly', degree=3., coef0=1., gamma=0.5) + init_alphas = np.zeros(train.shape[0]) + SVM = SmoSVM(train=train, alpha_list=init_alphas, kernel_func=kernel, cost=0.4, + b=0.0, tolerance=0.001) + SVM.fit() + predict = SVM.predict(test_samples) + +Reference: + https://www.microsoft.com/en-us/research/wp-content/uploads/2016/02/smo-book.pdf + https://www.microsoft.com/en-us/research/wp-content/uploads/2016/02/tr-98-14.pdf +""" + + +import os +import sys +import urllib.request + +import numpy as np +import pandas as pd +from matplotlib import pyplot as plt +from sklearn.datasets import make_blobs, make_circles +from sklearn.preprocessing import StandardScaler + +CANCER_DATASET_URL = ( + "https://archive.ics.uci.edu/ml/machine-learning-databases/" + "breast-cancer-wisconsin/wdbc.data" +) + + +class SmoSVM: + def __init__( + self, + train, + kernel_func, + alpha_list=None, + cost=0.4, + b=0.0, + tolerance=0.001, + auto_norm=True, + ): + self._init = True + self._auto_norm = auto_norm + self._c = np.float64(cost) + self._b = np.float64(b) + self._tol = np.float64(tolerance) if tolerance > 0.0001 else np.float64(0.001) + + self.tags = train[:, 0] + self.samples = self._norm(train[:, 1:]) if self._auto_norm else train[:, 1:] + self.alphas = alpha_list if alpha_list is not None else np.zeros(train.shape[0]) + self.Kernel = kernel_func + + self._eps = 0.001 + self._all_samples = list(range(self.length)) + self._K_matrix = self._calculate_k_matrix() + self._error = np.zeros(self.length) + self._unbound = [] + + self.choose_alpha = self._choose_alphas() + + # Calculate alphas using SMO algorithm + def fit(self): + k = self._k + state = None + while True: + + # 1: Find alpha1, alpha2 + try: + i1, i2 = self.choose_alpha.send(state) + state = None + except StopIteration: + print("Optimization done!\nEvery sample satisfy the KKT condition!") + break + + # 2: calculate new alpha2 and new alpha1 + y1, y2 = self.tags[i1], self.tags[i2] + a1, a2 = self.alphas[i1].copy(), self.alphas[i2].copy() + e1, e2 = self._e(i1), self._e(i2) + args = (i1, i2, a1, a2, e1, e2, y1, y2) + a1_new, a2_new = self._get_new_alpha(*args) + if not a1_new and not a2_new: + state = False + continue + self.alphas[i1], self.alphas[i2] = a1_new, a2_new + + # 3: update threshold(b) + b1_new = np.float64( + -e1 + - y1 * k(i1, i1) * (a1_new - a1) + - y2 * k(i2, i1) * (a2_new - a2) + + self._b + ) + b2_new = np.float64( + -e2 + - y2 * k(i2, i2) * (a2_new - a2) + - y1 * k(i1, i2) * (a1_new - a1) + + self._b + ) + if 0.0 < a1_new < self._c: + b = b1_new + if 0.0 < a2_new < self._c: + b = b2_new + if not (np.float64(0) < a2_new < self._c) and not ( + np.float64(0) < a1_new < self._c + ): + b = (b1_new + b2_new) / 2.0 + b_old = self._b + self._b = b + + # 4: update error value,here we only calculate those non-bound samples' + # error + self._unbound = [i for i in self._all_samples if self._is_unbound(i)] + for s in self.unbound: + if s == i1 or s == i2: + continue + self._error[s] += ( + y1 * (a1_new - a1) * k(i1, s) + + y2 * (a2_new - a2) * k(i2, s) + + (self._b - b_old) + ) + + # if i1 or i2 is non-bound,update there error value to zero + if self._is_unbound(i1): + self._error[i1] = 0 + if self._is_unbound(i2): + self._error[i2] = 0 + + # Predict test samples + def predict(self, test_samples, classify=True): + + if test_samples.shape[1] > self.samples.shape[1]: + raise ValueError( + "Test samples' feature length does not equal to that of train samples" + ) + + if self._auto_norm: + test_samples = self._norm(test_samples) + + results = [] + for test_sample in test_samples: + result = self._predict(test_sample) + if classify: + results.append(1 if result > 0 else -1) + else: + results.append(result) + return np.array(results) + + # Check if alpha violate KKT condition + def _check_obey_kkt(self, index): + alphas = self.alphas + tol = self._tol + r = self._e(index) * self.tags[index] + c = self._c + + return (r < -tol and alphas[index] < c) or (r > tol and alphas[index] > 0.0) + + # Get value calculated from kernel function + def _k(self, i1, i2): + # for test samples,use Kernel function + if isinstance(i2, np.ndarray): + return self.Kernel(self.samples[i1], i2) + # for train samples,Kernel values have been saved in matrix + else: + return self._K_matrix[i1, i2] + + # Get sample's error + def _e(self, index): + """ + Two cases: + 1:Sample[index] is non-bound,Fetch error from list: _error + 2:sample[index] is bound,Use predicted value deduct true value: g(xi) - yi + + """ + # get from error data + if self._is_unbound(index): + return self._error[index] + # get by g(xi) - yi + else: + gx = np.dot(self.alphas * self.tags, self._K_matrix[:, index]) + self._b + yi = self.tags[index] + return gx - yi + + # Calculate Kernel matrix of all possible i1,i2 ,saving time + def _calculate_k_matrix(self): + k_matrix = np.zeros([self.length, self.length]) + for i in self._all_samples: + for j in self._all_samples: + k_matrix[i, j] = np.float64( + self.Kernel(self.samples[i, :], self.samples[j, :]) + ) + return k_matrix + + # Predict test sample's tag + def _predict(self, sample): + k = self._k + predicted_value = ( + np.sum( + [ + self.alphas[i1] * self.tags[i1] * k(i1, sample) + for i1 in self._all_samples + ] + ) + + self._b + ) + return predicted_value + + # Choose alpha1 and alpha2 + def _choose_alphas(self): + locis = yield from self._choose_a1() + if not locis: + return + return locis + + def _choose_a1(self): + """ + Choose first alpha ;steps: + 1:First loop over all sample + 2:Second loop over all non-bound samples till all non-bound samples does not + voilate kkt condition. + 3:Repeat this two process endlessly,till all samples does not voilate kkt + condition samples after first loop. + """ + while True: + all_not_obey = True + # all sample + print("scanning all sample!") + for i1 in [i for i in self._all_samples if self._check_obey_kkt(i)]: + all_not_obey = False + yield from self._choose_a2(i1) + + # non-bound sample + print("scanning non-bound sample!") + while True: + not_obey = True + for i1 in [ + i + for i in self._all_samples + if self._check_obey_kkt(i) and self._is_unbound(i) + ]: + not_obey = False + yield from self._choose_a2(i1) + if not_obey: + print("all non-bound samples fit the KKT condition!") + break + if all_not_obey: + print("all samples fit the KKT condition! Optimization done!") + break + return False + + def _choose_a2(self, i1): + """ + Choose the second alpha by using heuristic algorithm ;steps: + 1: Choose alpha2 which gets the maximum step size (|E1 - E2|). + 2: Start in a random point,loop over all non-bound samples till alpha1 and + alpha2 are optimized. + 3: Start in a random point,loop over all samples till alpha1 and alpha2 are + optimized. + """ + self._unbound = [i for i in self._all_samples if self._is_unbound(i)] + + if len(self.unbound) > 0: + tmp_error = self._error.copy().tolist() + tmp_error_dict = { + index: value + for index, value in enumerate(tmp_error) + if self._is_unbound(index) + } + if self._e(i1) >= 0: + i2 = min(tmp_error_dict, key=lambda index: tmp_error_dict[index]) + else: + i2 = max(tmp_error_dict, key=lambda index: tmp_error_dict[index]) + cmd = yield i1, i2 + if cmd is None: + return + + for i2 in np.roll(self.unbound, np.random.choice(self.length)): + cmd = yield i1, i2 + if cmd is None: + return + + for i2 in np.roll(self._all_samples, np.random.choice(self.length)): + cmd = yield i1, i2 + if cmd is None: + return + + # Get the new alpha2 and new alpha1 + def _get_new_alpha(self, i1, i2, a1, a2, e1, e2, y1, y2): + k = self._k + if i1 == i2: + return None, None + + # calculate L and H which bound the new alpha2 + s = y1 * y2 + if s == -1: + l, h = max(0.0, a2 - a1), min(self._c, self._c + a2 - a1) + else: + l, h = max(0.0, a2 + a1 - self._c), min(self._c, a2 + a1) + if l == h: # noqa: E741 + return None, None + + # calculate eta + k11 = k(i1, i1) + k22 = k(i2, i2) + k12 = k(i1, i2) + + # select the new alpha2 which could get the minimal objectives + if (eta := k11 + k22 - 2.0 * k12) > 0.0: + a2_new_unc = a2 + (y2 * (e1 - e2)) / eta + # a2_new has a boundary + if a2_new_unc >= h: + a2_new = h + elif a2_new_unc <= l: + a2_new = l + else: + a2_new = a2_new_unc + else: + b = self._b + l1 = a1 + s * (a2 - l) + h1 = a1 + s * (a2 - h) + + # way 1 + f1 = y1 * (e1 + b) - a1 * k(i1, i1) - s * a2 * k(i1, i2) + f2 = y2 * (e2 + b) - a2 * k(i2, i2) - s * a1 * k(i1, i2) + ol = ( + l1 * f1 + + l * f2 + + 1 / 2 * l1**2 * k(i1, i1) + + 1 / 2 * l**2 * k(i2, i2) + + s * l * l1 * k(i1, i2) + ) + oh = ( + h1 * f1 + + h * f2 + + 1 / 2 * h1**2 * k(i1, i1) + + 1 / 2 * h**2 * k(i2, i2) + + s * h * h1 * k(i1, i2) + ) + """ + # way 2 + Use objective function check which alpha2 new could get the minimal + objectives + """ + if ol < (oh - self._eps): + a2_new = l + elif ol > oh + self._eps: + a2_new = h + else: + a2_new = a2 + + # a1_new has a boundary too + a1_new = a1 + s * (a2 - a2_new) + if a1_new < 0: + a2_new += s * a1_new + a1_new = 0 + if a1_new > self._c: + a2_new += s * (a1_new - self._c) + a1_new = self._c + + return a1_new, a2_new + + # Normalise data using min_max way + def _norm(self, data): + if self._init: + self._min = np.min(data, axis=0) + self._max = np.max(data, axis=0) + self._init = False + return (data - self._min) / (self._max - self._min) + else: + return (data - self._min) / (self._max - self._min) + + def _is_unbound(self, index): + if 0.0 < self.alphas[index] < self._c: + return True + else: + return False + + def _is_support(self, index): + if self.alphas[index] > 0: + return True + else: + return False + + @property + def unbound(self): + return self._unbound + + @property + def support(self): + return [i for i in range(self.length) if self._is_support(i)] + + @property + def length(self): + return self.samples.shape[0] + + +class Kernel: + def __init__(self, kernel, degree=1.0, coef0=0.0, gamma=1.0): + self.degree = np.float64(degree) + self.coef0 = np.float64(coef0) + self.gamma = np.float64(gamma) + self._kernel_name = kernel + self._kernel = self._get_kernel(kernel_name=kernel) + self._check() + + def _polynomial(self, v1, v2): + return (self.gamma * np.inner(v1, v2) + self.coef0) ** self.degree + + def _linear(self, v1, v2): + return np.inner(v1, v2) + self.coef0 + + def _rbf(self, v1, v2): + return np.exp(-1 * (self.gamma * np.linalg.norm(v1 - v2) ** 2)) + + def _check(self): + if self._kernel == self._rbf: + if self.gamma < 0: + raise ValueError("gamma value must greater than 0") + + def _get_kernel(self, kernel_name): + maps = {"linear": self._linear, "poly": self._polynomial, "rbf": self._rbf} + return maps[kernel_name] + + def __call__(self, v1, v2): + return self._kernel(v1, v2) + + def __repr__(self): + return self._kernel_name + + +def count_time(func): + def call_func(*args, **kwargs): + import time + + start_time = time.time() + func(*args, **kwargs) + end_time = time.time() + print(f"smo algorithm cost {end_time - start_time} seconds") + + return call_func + + +@count_time +def test_cancel_data(): + print("Hello!\nStart test svm by smo algorithm!") + # 0: download dataset and load into pandas' dataframe + if not os.path.exists(r"cancel_data.csv"): + request = urllib.request.Request( + CANCER_DATASET_URL, + headers={"User-Agent": "Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)"}, + ) + response = urllib.request.urlopen(request) + content = response.read().decode("utf-8") + with open(r"cancel_data.csv", "w") as f: + f.write(content) + + data = pd.read_csv(r"cancel_data.csv", header=None) + + # 1: pre-processing data + del data[data.columns.tolist()[0]] + data = data.dropna(axis=0) + data = data.replace({"M": np.float64(1), "B": np.float64(-1)}) + samples = np.array(data)[:, :] + + # 2: dividing data into train_data data and test_data data + train_data, test_data = samples[:328, :], samples[328:, :] + test_tags, test_samples = test_data[:, 0], test_data[:, 1:] + + # 3: choose kernel function,and set initial alphas to zero(optional) + mykernel = Kernel(kernel="rbf", degree=5, coef0=1, gamma=0.5) + al = np.zeros(train_data.shape[0]) + + # 4: calculating best alphas using SMO algorithm and predict test_data samples + mysvm = SmoSVM( + train=train_data, + kernel_func=mykernel, + alpha_list=al, + cost=0.4, + b=0.0, + tolerance=0.001, + ) + mysvm.fit() + predict = mysvm.predict(test_samples) + + # 5: check accuracy + score = 0 + test_num = test_tags.shape[0] + for i in range(test_tags.shape[0]): + if test_tags[i] == predict[i]: + score += 1 + print(f"\nall: {test_num}\nright: {score}\nfalse: {test_num - score}") + print(f"Rough Accuracy: {score / test_tags.shape[0]}") + + +def test_demonstration(): + # change stdout + print("\nStart plot,please wait!!!") + sys.stdout = open(os.devnull, "w") + + ax1 = plt.subplot2grid((2, 2), (0, 0)) + ax2 = plt.subplot2grid((2, 2), (0, 1)) + ax3 = plt.subplot2grid((2, 2), (1, 0)) + ax4 = plt.subplot2grid((2, 2), (1, 1)) + ax1.set_title("linear svm,cost:0.1") + test_linear_kernel(ax1, cost=0.1) + ax2.set_title("linear svm,cost:500") + test_linear_kernel(ax2, cost=500) + ax3.set_title("rbf kernel svm,cost:0.1") + test_rbf_kernel(ax3, cost=0.1) + ax4.set_title("rbf kernel svm,cost:500") + test_rbf_kernel(ax4, cost=500) + + sys.stdout = sys.__stdout__ + print("Plot done!!!") + + +def test_linear_kernel(ax, cost): + train_x, train_y = make_blobs( + n_samples=500, centers=2, n_features=2, random_state=1 + ) + train_y[train_y == 0] = -1 + scaler = StandardScaler() + train_x_scaled = scaler.fit_transform(train_x, train_y) + train_data = np.hstack((train_y.reshape(500, 1), train_x_scaled)) + mykernel = Kernel(kernel="linear", degree=5, coef0=1, gamma=0.5) + mysvm = SmoSVM( + train=train_data, + kernel_func=mykernel, + cost=cost, + tolerance=0.001, + auto_norm=False, + ) + mysvm.fit() + plot_partition_boundary(mysvm, train_data, ax=ax) + + +def test_rbf_kernel(ax, cost): + train_x, train_y = make_circles( + n_samples=500, noise=0.1, factor=0.1, random_state=1 + ) + train_y[train_y == 0] = -1 + scaler = StandardScaler() + train_x_scaled = scaler.fit_transform(train_x, train_y) + train_data = np.hstack((train_y.reshape(500, 1), train_x_scaled)) + mykernel = Kernel(kernel="rbf", degree=5, coef0=1, gamma=0.5) + mysvm = SmoSVM( + train=train_data, + kernel_func=mykernel, + cost=cost, + tolerance=0.001, + auto_norm=False, + ) + mysvm.fit() + plot_partition_boundary(mysvm, train_data, ax=ax) + + +def plot_partition_boundary( + model, train_data, ax, resolution=100, colors=("b", "k", "r") +): + """ + We can not get the optimum w of our kernel svm model which is different from linear + svm. For this reason, we generate randomly distributed points with high desity and + prediced values of these points are calculated by using our tained model. Then we + could use this prediced values to draw contour map. + And this contour map can represent svm's partition boundary. + """ + train_data_x = train_data[:, 1] + train_data_y = train_data[:, 2] + train_data_tags = train_data[:, 0] + xrange = np.linspace(train_data_x.min(), train_data_x.max(), resolution) + yrange = np.linspace(train_data_y.min(), train_data_y.max(), resolution) + test_samples = np.array([(x, y) for x in xrange for y in yrange]).reshape( + resolution * resolution, 2 + ) + + test_tags = model.predict(test_samples, classify=False) + grid = test_tags.reshape((len(xrange), len(yrange))) + + # Plot contour map which represents the partition boundary + ax.contour( + xrange, + yrange, + np.mat(grid).T, + levels=(-1, 0, 1), + linestyles=("--", "-", "--"), + linewidths=(1, 1, 1), + colors=colors, + ) + # Plot all train samples + ax.scatter( + train_data_x, + train_data_y, + c=train_data_tags, + cmap=plt.cm.Dark2, + lw=0, + alpha=0.5, + ) + + # Plot support vectors + support = model.support + ax.scatter( + train_data_x[support], + train_data_y[support], + c=train_data_tags[support], + cmap=plt.cm.Dark2, + ) + + +if __name__ == "__main__": + test_cancel_data() + test_demonstration() + plt.show() diff --git a/strings/indian_phone_validator.py b/strings/indian_phone_validator.py index 7f3fda5db949..07161a63a7af 100644 --- a/strings/indian_phone_validator.py +++ b/strings/indian_phone_validator.py @@ -20,8 +20,7 @@ def indian_phone_validator(phone: str) -> bool: True """ pat = re.compile(r"^(\+91[\-\s]?)?[0]?(91)?[789]\d{9}$") - match = re.search(pat, phone) - if match: + if match := re.search(pat, phone): return match.string == phone return False From 3a671b57a29e3c2b4e973b01bc5bbe1554aa5da2 Mon Sep 17 00:00:00 2001 From: Kuldeep Borkar <74557588+KuldeepBorkar@users.noreply.github.com> Date: Fri, 28 Oct 2022 19:57:16 +0530 Subject: [PATCH 0636/1543] Implemented Swish Function (#7357) * Implemented Swish Function * Added more description and return hint in def * Changed the name and added more descrition including test for sigmoid function * Added * in front of links --- maths/sigmoid_linear_unit.py | 57 ++++++++++++++++++++++++++++++++++++ 1 file changed, 57 insertions(+) create mode 100644 maths/sigmoid_linear_unit.py diff --git a/maths/sigmoid_linear_unit.py b/maths/sigmoid_linear_unit.py new file mode 100644 index 000000000000..a8ada10dd8ec --- /dev/null +++ b/maths/sigmoid_linear_unit.py @@ -0,0 +1,57 @@ +""" +This script demonstrates the implementation of the Sigmoid Linear Unit (SiLU) +or swish function. +* https://en.wikipedia.org/wiki/Rectifier_(neural_networks) +* https://en.wikipedia.org/wiki/Swish_function + +The function takes a vector x of K real numbers as input and returns x * sigmoid(x). +Swish is a smooth, non-monotonic function defined as f(x) = x * sigmoid(x). +Extensive experiments shows that Swish consistently matches or outperforms ReLU +on deep networks applied to a variety of challenging domains such as +image classification and machine translation. + +This script is inspired by a corresponding research paper. +* https://arxiv.org/abs/1710.05941 +""" + +import numpy as np + + +def sigmoid(vector: np.array) -> np.array: + """ + Mathematical function sigmoid takes a vector x of K real numbers as input and + returns 1/ (1 + e^-x). + https://en.wikipedia.org/wiki/Sigmoid_function + + >>> sigmoid(np.array([-1.0, 1.0, 2.0])) + array([0.26894142, 0.73105858, 0.88079708]) + """ + return 1 / (1 + np.exp(-vector)) + + +def sigmoid_linear_unit(vector: np.array) -> np.array: + """ + Implements the Sigmoid Linear Unit (SiLU) or swish function + + Parameters: + vector (np.array): A numpy array consisting of real + values. + + Returns: + swish_vec (np.array): The input numpy array, after applying + swish. + + Examples: + >>> sigmoid_linear_unit(np.array([-1.0, 1.0, 2.0])) + array([-0.26894142, 0.73105858, 1.76159416]) + + >>> sigmoid_linear_unit(np.array([-2])) + array([-0.23840584]) + """ + return vector * sigmoid(vector) + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From 26cecea27198848e2c1c0bc6d7f887d4ed7adb87 Mon Sep 17 00:00:00 2001 From: SparshRastogi <75373475+SparshRastogi@users.noreply.github.com> Date: Fri, 28 Oct 2022 20:03:21 +0530 Subject: [PATCH 0637/1543] Create fetch_amazon_product_data.py (#7585) * Create fetch_amazon_product_data.py This file provides a function which will take a product name as input from the user,and fetch the necessary information about that kind of products from Amazon like the product title,link to that product,price of the product,the ratings of the product and the discount available on the product in the form of a csv file,this will help the users by improving searchability and navigability and find the right product easily and in a short period of time, it will also be beneficial for performing better analysis on products * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update fetch_amazon_product_data.py Added type hints and modified files to pass precommit test * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update fetch_amazon_product_data.py Added type hints and made changes to pass the precommit * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update fetch_amazon_product_data.py Modified function to return the data in the form of Pandas Dataframe,modified type hints and added a functionality to let the user determine if they need the data in a csv file * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update fetch_amazon_product_data.py Made some bug fixes * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update and rename fetch_amazon_product_data.py to get_amazon_product_data.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update get_amazon_product_data.py Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- web_programming/get_amazon_product_data.py | 100 +++++++++++++++++++++ 1 file changed, 100 insertions(+) create mode 100644 web_programming/get_amazon_product_data.py diff --git a/web_programming/get_amazon_product_data.py b/web_programming/get_amazon_product_data.py new file mode 100644 index 000000000000..c796793f2205 --- /dev/null +++ b/web_programming/get_amazon_product_data.py @@ -0,0 +1,100 @@ +""" +This file provides a function which will take a product name as input from the user, +and fetch from Amazon information about products of this name or category. The product +information will include title, URL, price, ratings, and the discount available. +""" + + +from itertools import zip_longest + +import requests +from bs4 import BeautifulSoup +from pandas import DataFrame + + +def get_amazon_product_data(product: str = "laptop") -> DataFrame: + """ + Take a product name or category as input and return product information from Amazon + including title, URL, price, ratings, and the discount available. + """ + url = f"https://www.amazon.in/laptop/s?k={product}" + header = { + "User-Agent": """Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 + (KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36""", + "Accept-Language": "en-US, en;q=0.5", + } + soup = BeautifulSoup(requests.get(url, headers=header).text) + # Initialize a Pandas dataframe with the column titles + data_frame = DataFrame( + columns=[ + "Product Title", + "Product Link", + "Current Price of the product", + "Product Rating", + "MRP of the product", + "Discount", + ] + ) + # Loop through each entry and store them in the dataframe + for item, _ in zip_longest( + soup.find_all( + "div", + attrs={"class": "s-result-item", "data-component-type": "s-search-result"}, + ), + soup.find_all("div", attrs={"class": "a-row a-size-base a-color-base"}), + ): + try: + product_title = item.h2.text + product_link = "https://www.amazon.in/" + item.h2.a["href"] + product_price = item.find("span", attrs={"class": "a-offscreen"}).text + try: + product_rating = item.find("span", attrs={"class": "a-icon-alt"}).text + except AttributeError: + product_rating = "Not available" + try: + product_mrp = ( + "₹" + + item.find( + "span", attrs={"class": "a-price a-text-price"} + ).text.split("₹")[1] + ) + except AttributeError: + product_mrp = "" + try: + discount = float( + ( + ( + float(product_mrp.strip("₹").replace(",", "")) + - float(product_price.strip("₹").replace(",", "")) + ) + / float(product_mrp.strip("₹").replace(",", "")) + ) + * 100 + ) + except ValueError: + discount = float("nan") + except AttributeError: + pass + data_frame.loc[len(data_frame.index)] = [ + product_title, + product_link, + product_price, + product_rating, + product_mrp, + discount, + ] + data_frame.loc[ + data_frame["Current Price of the product"] > data_frame["MRP of the product"], + "MRP of the product", + ] = " " + data_frame.loc[ + data_frame["Current Price of the product"] > data_frame["MRP of the product"], + "Discount", + ] = " " + data_frame.index += 1 + return data_frame + + +if __name__ == "__main__": + product = "headphones" + get_amazon_product_data(product).to_csv(f"Amazon Product Data for {product}.csv") From d9efd7e25bbe937893a9818cfda62ca3f72ffe0d Mon Sep 17 00:00:00 2001 From: Andrey Date: Fri, 28 Oct 2022 21:54:44 +0300 Subject: [PATCH 0638/1543] Update PR template (#7794) * Update PR template * Revert changes, reword line --- .github/pull_request_template.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md index 4d2265968612..b3ba8baf9c34 100644 --- a/.github/pull_request_template.md +++ b/.github/pull_request_template.md @@ -16,5 +16,5 @@ * [ ] All functions and variable names follow Python naming conventions. * [ ] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html). * [ ] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing. -* [ ] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation. +* [ ] All new algorithms include at least one URL that points to Wikipedia or another similar explanation. * [ ] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`. From 528b1290194da09c2e762c2232502d2cfcdb1e3d Mon Sep 17 00:00:00 2001 From: Pronoy Mandal Date: Sat, 29 Oct 2022 00:38:41 +0530 Subject: [PATCH 0639/1543] Update maximum_subarray.py (#7757) * Update maximum_subarray.py 1. Rectify documentation to indicate the correct output: function doesn't return the subarray, but rather returns a sum. 2. Make the function more Pythonic and optimal. 3. Make function annotation generic i.e. can accept any sequence. 4. Raise value error when the input sequence is empty. * Update maximum_subarray.py 1. Use the conventions as mentioned in pep-0257. 2. Use negative infinity as the initial value for the current maximum and the answer. * Update maximum_subarray.py Avoid type conflict by returning the answer cast to an integer. * Update other/maximum_subarray.py Co-authored-by: Andrey * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update maximum_subarray.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update maximum_subarray.py Remove typecast to int for the final answer Co-authored-by: Andrey Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- other/maximum_subarray.py | 30 ++++++++++++++++++------------ 1 file changed, 18 insertions(+), 12 deletions(-) diff --git a/other/maximum_subarray.py b/other/maximum_subarray.py index 756e009444fe..1c8c8cabcd2d 100644 --- a/other/maximum_subarray.py +++ b/other/maximum_subarray.py @@ -1,20 +1,26 @@ -def max_subarray(nums: list[int]) -> int: - """ - Returns the subarray with maximum sum - >>> max_subarray([1,2,3,4,-2]) +from collections.abc import Sequence + + +def max_subarray_sum(nums: Sequence[int]) -> int: + """Return the maximum possible sum amongst all non - empty subarrays. + + Raises: + ValueError: when nums is empty. + + >>> max_subarray_sum([1,2,3,4,-2]) 10 - >>> max_subarray([-2,1,-3,4,-1,2,1,-5,4]) + >>> max_subarray_sum([-2,1,-3,4,-1,2,1,-5,4]) 6 """ + if not nums: + raise ValueError("Input sequence should not be empty") curr_max = ans = nums[0] + nums_len = len(nums) - for i in range(1, len(nums)): - if curr_max >= 0: - curr_max = curr_max + nums[i] - else: - curr_max = nums[i] - + for i in range(1, nums_len): + num = nums[i] + curr_max = max(curr_max + num, num) ans = max(curr_max, ans) return ans @@ -23,4 +29,4 @@ def max_subarray(nums: list[int]) -> int: if __name__ == "__main__": n = int(input("Enter number of elements : ").strip()) array = list(map(int, input("\nEnter the numbers : ").strip().split()))[:n] - print(max_subarray(array)) + print(max_subarray_sum(array)) From fe5819c872abcbe1a96ee7bd20ab930b2892bbf5 Mon Sep 17 00:00:00 2001 From: Shubham Kondekar <40213815+kondekarshubham123@users.noreply.github.com> Date: Sat, 29 Oct 2022 01:02:32 +0530 Subject: [PATCH 0640/1543] Create combination_sum_iv.py (#7672) * Create combination_sum_iv.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update dynamic_programming/combination_sum_iv.py Co-authored-by: Caeden Perelli-Harris * Update dynamic_programming/combination_sum_iv.py Co-authored-by: Caeden Perelli-Harris * Update dynamic_programming/combination_sum_iv.py Co-authored-by: Caeden Perelli-Harris * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update combination_sum_iv.py * Update combination_sum_iv.py * Resolved PR Comments * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * minor change, argument missing in function * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update dynamic_programming/combination_sum_iv.py Co-authored-by: Christian Clauss * minor change Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Caeden Perelli-Harris Co-authored-by: Christian Clauss --- dynamic_programming/combination_sum_iv.py | 102 ++++++++++++++++++++++ 1 file changed, 102 insertions(+) create mode 100644 dynamic_programming/combination_sum_iv.py diff --git a/dynamic_programming/combination_sum_iv.py b/dynamic_programming/combination_sum_iv.py new file mode 100644 index 000000000000..b2aeb0824f64 --- /dev/null +++ b/dynamic_programming/combination_sum_iv.py @@ -0,0 +1,102 @@ +""" +Question: +You are given an array of distinct integers and you have to tell how many +different ways of selecting the elements from the array are there such that +the sum of chosen elements is equal to the target number tar. + +Example + +Input: +N = 3 +target = 5 +array = [1, 2, 5] + +Output: +9 + +Approach: +The basic idea is to go over recursively to find the way such that the sum +of chosen elements is “tar”. For every element, we have two choices + 1. Include the element in our set of chosen elements. + 2. Don’t include the element in our set of chosen elements. +""" + + +def combination_sum_iv(n: int, array: list[int], target: int) -> int: + """ + Function checks the all possible combinations, and returns the count + of possible combination in exponential Time Complexity. + + >>> combination_sum_iv(3, [1,2,5], 5) + 9 + """ + + def count_of_possible_combinations(target: int) -> int: + if target < 0: + return 0 + if target == 0: + return 1 + return sum(count_of_possible_combinations(target - item) for item in array) + + return count_of_possible_combinations(target) + + +def combination_sum_iv_dp_array(n: int, array: list[int], target: int) -> int: + """ + Function checks the all possible combinations, and returns the count + of possible combination in O(N^2) Time Complexity as we are using Dynamic + programming array here. + + >>> combination_sum_iv_dp_array(3, [1,2,5], 5) + 9 + """ + + def count_of_possible_combinations_with_dp_array( + target: int, dp_array: list[int] + ) -> int: + if target < 0: + return 0 + if target == 0: + return 1 + if dp_array[target] != -1: + return dp_array[target] + answer = sum( + count_of_possible_combinations_with_dp_array(target - item, dp_array) + for item in array + ) + dp_array[target] = answer + return answer + + dp_array = [-1] * (target + 1) + return count_of_possible_combinations_with_dp_array(target, dp_array) + + +def combination_sum_iv_bottom_up(n: int, array: list[int], target: int) -> int: + """ + Function checks the all possible combinations with using bottom up approach, + and returns the count of possible combination in O(N^2) Time Complexity + as we are using Dynamic programming array here. + + >>> combination_sum_iv_bottom_up(3, [1,2,5], 5) + 9 + """ + + dp_array = [0] * (target + 1) + dp_array[0] = 1 + + for i in range(1, target + 1): + for j in range(n): + if i - array[j] >= 0: + dp_array[i] += dp_array[i - array[j]] + + return dp_array[target] + + +if __name__ == "__main__": + import doctest + + doctest.testmod() + n = 3 + target = 5 + array = [1, 2, 5] + print(combination_sum_iv(n, array, target)) From 762afc086f065f1d8fe1afcde8c8ad3fa46898a7 Mon Sep 17 00:00:00 2001 From: Andrey Date: Fri, 28 Oct 2022 23:27:39 +0300 Subject: [PATCH 0641/1543] Update breadth_first_search_2.py (#7765) * Cleanup the BFS * Add both functions and timeit * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Add performace results as comment * Update breadth_first_search_2.py Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- graphs/breadth_first_search_2.py | 45 +++++++++++++++++++++++++++++--- 1 file changed, 41 insertions(+), 4 deletions(-) diff --git a/graphs/breadth_first_search_2.py b/graphs/breadth_first_search_2.py index 2f060a90d40d..a0b92b90b456 100644 --- a/graphs/breadth_first_search_2.py +++ b/graphs/breadth_first_search_2.py @@ -14,7 +14,9 @@ """ from __future__ import annotations +from collections import deque from queue import Queue +from timeit import timeit G = { "A": ["B", "C"], @@ -26,12 +28,15 @@ } -def breadth_first_search(graph: dict, start: str) -> set[str]: +def breadth_first_search(graph: dict, start: str) -> list[str]: """ - >>> ''.join(sorted(breadth_first_search(G, 'A'))) + Implementation of breadth first search using queue.Queue. + + >>> ''.join(breadth_first_search(G, 'A')) 'ABCDEF' """ explored = {start} + result = [start] queue: Queue = Queue() queue.put(start) while not queue.empty(): @@ -39,12 +44,44 @@ def breadth_first_search(graph: dict, start: str) -> set[str]: for w in graph[v]: if w not in explored: explored.add(w) + result.append(w) queue.put(w) - return explored + return result + + +def breadth_first_search_with_deque(graph: dict, start: str) -> list[str]: + """ + Implementation of breadth first search using collection.queue. + + >>> ''.join(breadth_first_search_with_deque(G, 'A')) + 'ABCDEF' + """ + visited = {start} + result = [start] + queue = deque([start]) + while queue: + v = queue.popleft() + for child in graph[v]: + if child not in visited: + visited.add(child) + result.append(child) + queue.append(child) + return result + + +def benchmark_function(name: str) -> None: + setup = f"from __main__ import G, {name}" + number = 10000 + res = timeit(f"{name}(G, 'A')", setup=setup, number=number) + print(f"{name:<35} finished {number} runs in {res:.5f} seconds") if __name__ == "__main__": import doctest doctest.testmod() - print(breadth_first_search(G, "A")) + + benchmark_function("breadth_first_search") + benchmark_function("breadth_first_search_with_deque") + # breadth_first_search finished 10000 runs in 0.20999 seconds + # breadth_first_search_with_deque finished 10000 runs in 0.01421 seconds From cf08d9f5e7afdcfb9406032abcad328aa79c566a Mon Sep 17 00:00:00 2001 From: Andrey Date: Sat, 29 Oct 2022 09:26:19 +0300 Subject: [PATCH 0642/1543] Format docs (#7821) * Reformat docs for odd_even_sort.py * Fix docstring formatting * Apply suggestions from code review Co-authored-by: Caeden Perelli-Harris Co-authored-by: Caeden Perelli-Harris --- machine_learning/data_transformations.py | 10 +++++++--- physics/kinetic_energy.py | 5 ++++- sorts/merge_sort.py | 9 ++++++--- sorts/odd_even_sort.py | 9 +++++++-- 4 files changed, 24 insertions(+), 9 deletions(-) diff --git a/machine_learning/data_transformations.py b/machine_learning/data_transformations.py index 9e0d747e93fa..ecfd3b9e27c2 100644 --- a/machine_learning/data_transformations.py +++ b/machine_learning/data_transformations.py @@ -1,5 +1,7 @@ """ -Normalization Wikipedia: https://en.wikipedia.org/wiki/Normalization +Normalization. + +Wikipedia: https://en.wikipedia.org/wiki/Normalization Normalization is the process of converting numerical data to a standard range of values. This range is typically between [0, 1] or [-1, 1]. The equation for normalization is x_norm = (x - x_min)/(x_max - x_min) where x_norm is the normalized value, x is the @@ -28,7 +30,8 @@ def normalization(data: list, ndigits: int = 3) -> list: """ - Returns a normalized list of values + Return a normalized list of values. + @params: data, a list of values to normalize @returns: a list of normalized values (rounded to ndigits decimal places) @examples: @@ -46,7 +49,8 @@ def normalization(data: list, ndigits: int = 3) -> list: def standardization(data: list, ndigits: int = 3) -> list: """ - Returns a standardized list of values + Return a standardized list of values. + @params: data, a list of values to standardize @returns: a list of standardized values (rounded to ndigits decimal places) @examples: diff --git a/physics/kinetic_energy.py b/physics/kinetic_energy.py index 535ffc219251..8863919ac79f 100644 --- a/physics/kinetic_energy.py +++ b/physics/kinetic_energy.py @@ -1,5 +1,6 @@ """ -Find the kinetic energy of an object, give its mass and velocity +Find the kinetic energy of an object, given its mass and velocity. + Description : In physics, the kinetic energy of an object is the energy that it possesses due to its motion. It is defined as the work needed to accelerate a body of a given mass from rest to its stated velocity. Having gained this energy during its @@ -19,6 +20,8 @@ def kinetic_energy(mass: float, velocity: float) -> float: """ + Calculate kinetick energy. + The kinetic energy of a non-rotating object of mass m traveling at a speed v is ½mv² >>> kinetic_energy(10,10) diff --git a/sorts/merge_sort.py b/sorts/merge_sort.py index 4da29f32a36d..e80b1cb226ec 100644 --- a/sorts/merge_sort.py +++ b/sorts/merge_sort.py @@ -1,5 +1,6 @@ """ -This is a pure Python implementation of the merge sort algorithm +This is a pure Python implementation of the merge sort algorithm. + For doctests run following command: python -m doctest -v merge_sort.py or @@ -10,7 +11,7 @@ def merge_sort(collection: list) -> list: - """Pure implementation of the merge sort algorithm in Python + """ :param collection: some mutable ordered collection with heterogeneous comparable items inside :return: the same collection ordered by ascending @@ -24,7 +25,9 @@ def merge_sort(collection: list) -> list: """ def merge(left: list, right: list) -> list: - """merge left and right + """ + Merge left and right. + :param left: left collection :param right: right collection :return: merge result diff --git a/sorts/odd_even_sort.py b/sorts/odd_even_sort.py index 532f829499e8..9ef4462c72c0 100644 --- a/sorts/odd_even_sort.py +++ b/sorts/odd_even_sort.py @@ -1,10 +1,15 @@ -"""For reference +""" +Odd even sort implementation. + https://en.wikipedia.org/wiki/Odd%E2%80%93even_sort """ def odd_even_sort(input_list: list) -> list: - """this algorithm uses the same idea of bubblesort, + """ + Sort input with odd even sort. + + This algorithm uses the same idea of bubblesort, but by first dividing in two phase (odd and even). Originally developed for use on parallel processors with local interconnections. From 301a520f0362261cddadc87e1bcfe20310308030 Mon Sep 17 00:00:00 2001 From: SparshRastogi <75373475+SparshRastogi@users.noreply.github.com> Date: Sat, 29 Oct 2022 16:44:44 +0530 Subject: [PATCH 0643/1543] Create potential_energy.py (#7666) * Create potential_energy.py Finding the gravitational potential energy of an object with reference to the earth, by taking its mass and height above the ground as input * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update physics/potential_energy.py Co-authored-by: Chris O <46587501+ChrisO345@users.noreply.github.com> * Update physics/potential_energy.py Co-authored-by: Chris O <46587501+ChrisO345@users.noreply.github.com> * Update physics/potential_energy.py Co-authored-by: Caeden Perelli-Harris * Update physics/potential_energy.py Co-authored-by: Caeden Perelli-Harris * Update physics/potential_energy.py Co-authored-by: Caeden Perelli-Harris * Update physics/potential_energy.py Co-authored-by: Caeden Perelli-Harris * Update physics/potential_energy.py Co-authored-by: Caeden Perelli-Harris * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Chris O <46587501+ChrisO345@users.noreply.github.com> Co-authored-by: Caeden Perelli-Harris --- physics/potential_energy.py | 61 +++++++++++++++++++++++++++++++++++++ 1 file changed, 61 insertions(+) create mode 100644 physics/potential_energy.py diff --git a/physics/potential_energy.py b/physics/potential_energy.py new file mode 100644 index 000000000000..c6544f6f76d8 --- /dev/null +++ b/physics/potential_energy.py @@ -0,0 +1,61 @@ +from scipy.constants import g + +""" +Finding the gravitational potential energy of an object with reference +to the earth,by taking its mass and height above the ground as input + + +Description : Gravitational energy or gravitational potential energy +is the potential energy a massive object has in relation to another +massive object due to gravity. It is the potential energy associated +with the gravitational field, which is released (converted into +kinetic energy) when the objects fall towards each other. +Gravitational potential energy increases when two objects +are brought further apart. + +For two pairwise interacting point particles, the gravitational +potential energy U is given by +U=-GMm/R +where M and m are the masses of the two particles, R is the distance +between them, and G is the gravitational constant. +Close to the Earth's surface, the gravitational field is approximately +constant, and the gravitational potential energy of an object reduces to +U=mgh +where m is the object's mass, g=GM/R² is the gravity of Earth, and h is +the height of the object's center of mass above a chosen reference level. + +Reference : "https://en.m.wikipedia.org/wiki/Gravitational_energy" +""" + + +def potential_energy(mass: float, height: float) -> float: + # function will accept mass and height as parameters and return potential energy + """ + >>> potential_energy(10,10) + 980.665 + >>> potential_energy(0,5) + 0.0 + >>> potential_energy(8,0) + 0.0 + >>> potential_energy(10,5) + 490.3325 + >>> potential_energy(0,0) + 0.0 + >>> potential_energy(2,8) + 156.9064 + >>> potential_energy(20,100) + 19613.3 + """ + if mass < 0: + # handling of negative values of mass + raise ValueError("The mass of a body cannot be negative") + if height < 0: + # handling of negative values of height + raise ValueError("The height above the ground cannot be negative") + return mass * g * height + + +if __name__ == "__main__": + from doctest import testmod + + testmod(name="potential_energy") From a9bd68d96e519d0919c2e4385dbe433ff44b4c4f Mon Sep 17 00:00:00 2001 From: Andrey Date: Sat, 29 Oct 2022 15:27:47 +0300 Subject: [PATCH 0644/1543] Add running doctest to pytest default (#7840) * Add default options for pytest * updating DIRECTORY.md * Move pytest settings to pyproject.toml * Move coverage settings to the pyproject.toml * Return --doctest-continue-on-failure to pytest * Convert pytest args to list * Update pyproject.toml Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> Co-authored-by: Christian Clauss --- .coveragerc | 4 ---- .github/workflows/build.yml | 2 +- DIRECTORY.md | 4 ++++ pyproject.toml | 20 ++++++++++++++++++++ pytest.ini | 5 ----- 5 files changed, 25 insertions(+), 10 deletions(-) delete mode 100644 .coveragerc create mode 100644 pyproject.toml delete mode 100644 pytest.ini diff --git a/.coveragerc b/.coveragerc deleted file mode 100644 index f7e6eb212bc8..000000000000 --- a/.coveragerc +++ /dev/null @@ -1,4 +0,0 @@ -[report] -sort = Cover -omit = - .env/* diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 8481b962a256..159ce13b3fff 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -22,6 +22,6 @@ jobs: python -m pip install --upgrade pip setuptools six wheel python -m pip install pytest-cov -r requirements.txt - name: Run tests - run: pytest --doctest-modules --ignore=project_euler/ --ignore=scripts/validate_solutions.py --cov-report=term-missing:skip-covered --cov=. . + run: pytest --ignore=project_euler/ --ignore=scripts/validate_solutions.py --cov-report=term-missing:skip-covered --cov=. . - if: ${{ success() }} run: scripts/build_directory_md.py 2>&1 | tee DIRECTORY.md diff --git a/DIRECTORY.md b/DIRECTORY.md index 7621427a6c34..1fa6af75d9c3 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -165,6 +165,7 @@ * [Binary Search Tree Recursive](data_structures/binary_tree/binary_search_tree_recursive.py) * [Binary Tree Mirror](data_structures/binary_tree/binary_tree_mirror.py) * [Binary Tree Node Sum](data_structures/binary_tree/binary_tree_node_sum.py) + * [Binary Tree Path Sum](data_structures/binary_tree/binary_tree_path_sum.py) * [Binary Tree Traversals](data_structures/binary_tree/binary_tree_traversals.py) * [Diff Views Of Binary Tree](data_structures/binary_tree/diff_views_of_binary_tree.py) * [Fenwick Tree](data_structures/binary_tree/fenwick_tree.py) @@ -285,6 +286,7 @@ * [Bitmask](dynamic_programming/bitmask.py) * [Catalan Numbers](dynamic_programming/catalan_numbers.py) * [Climbing Stairs](dynamic_programming/climbing_stairs.py) + * [Combination Sum Iv](dynamic_programming/combination_sum_iv.py) * [Edit Distance](dynamic_programming/edit_distance.py) * [Factorial](dynamic_programming/factorial.py) * [Fast Fibonacci](dynamic_programming/fast_fibonacci.py) @@ -595,6 +597,7 @@ * [P Series](maths/series/p_series.py) * [Sieve Of Eratosthenes](maths/sieve_of_eratosthenes.py) * [Sigmoid](maths/sigmoid.py) + * [Sigmoid Linear Unit](maths/sigmoid_linear_unit.py) * [Signum](maths/signum.py) * [Simpson Rule](maths/simpson_rule.py) * [Sin](maths/sin.py) @@ -1107,6 +1110,7 @@ * [Fetch Jobs](web_programming/fetch_jobs.py) * [Fetch Quotes](web_programming/fetch_quotes.py) * [Fetch Well Rx Price](web_programming/fetch_well_rx_price.py) + * [Get Amazon Product Data](web_programming/get_amazon_product_data.py) * [Get Imdb Top 250 Movies Csv](web_programming/get_imdb_top_250_movies_csv.py) * [Get Imdbtop](web_programming/get_imdbtop.py) * [Get Top Billioners](web_programming/get_top_billioners.py) diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 000000000000..410e7655b2b5 --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,20 @@ +[tool.pytest.ini_options] +markers = [ + "mat_ops: mark a test as utilizing matrix operations.", +] +addopts = [ + "--durations=10", + "--doctest-modules", + "--showlocals", +] + + +[tool.coverage.report] +omit = [".env/*"] +sort = "Cover" + +#[report] +#sort = Cover +#omit = +# .env/* +# backtracking/* diff --git a/pytest.ini b/pytest.ini deleted file mode 100644 index 488379278230..000000000000 --- a/pytest.ini +++ /dev/null @@ -1,5 +0,0 @@ -# Setup for pytest -[pytest] -markers = - mat_ops: mark a test as utilizing matrix operations. -addopts = --durations=10 From 6e809a25e33e2da07e03921bbf6614523a939e94 Mon Sep 17 00:00:00 2001 From: Andrey Date: Sat, 29 Oct 2022 15:31:56 +0300 Subject: [PATCH 0645/1543] Rename files (#7819) --- ...s_shortest_path.py => breadth_first_search_shortest_path_2.py} | 0 ...est_path.py => breadth_first_search_zero_one_shortest_path.py} | 0 2 files changed, 0 insertions(+), 0 deletions(-) rename graphs/{bfs_shortest_path.py => breadth_first_search_shortest_path_2.py} (100%) rename graphs/{bfs_zero_one_shortest_path.py => breadth_first_search_zero_one_shortest_path.py} (100%) diff --git a/graphs/bfs_shortest_path.py b/graphs/breadth_first_search_shortest_path_2.py similarity index 100% rename from graphs/bfs_shortest_path.py rename to graphs/breadth_first_search_shortest_path_2.py diff --git a/graphs/bfs_zero_one_shortest_path.py b/graphs/breadth_first_search_zero_one_shortest_path.py similarity index 100% rename from graphs/bfs_zero_one_shortest_path.py rename to graphs/breadth_first_search_zero_one_shortest_path.py From 327c38d6f0c6b79b46465406373ea7048bfec55e Mon Sep 17 00:00:00 2001 From: Sineth Sankalpa <66241389+sinsankio@users.noreply.github.com> Date: Sat, 29 Oct 2022 18:10:14 +0530 Subject: [PATCH 0646/1543] Srilankan phone number validation (#7706) * Add is_srilankan_phone_number.py * Update is_srilankan_phone_number.py --- strings/is_srilankan_phone_number.py | 35 ++++++++++++++++++++++++++++ 1 file changed, 35 insertions(+) create mode 100644 strings/is_srilankan_phone_number.py diff --git a/strings/is_srilankan_phone_number.py b/strings/is_srilankan_phone_number.py new file mode 100644 index 000000000000..7bded93f7f1d --- /dev/null +++ b/strings/is_srilankan_phone_number.py @@ -0,0 +1,35 @@ +import re + + +def is_sri_lankan_phone_number(phone: str) -> bool: + """ + Determine whether the string is a valid sri lankan mobile phone number or not + References: https://aye.sh/blog/sri-lankan-phone-number-regex + + >>> is_sri_lankan_phone_number("+94773283048") + True + >>> is_sri_lankan_phone_number("+9477-3283048") + True + >>> is_sri_lankan_phone_number("0718382399") + True + >>> is_sri_lankan_phone_number("0094702343221") + True + >>> is_sri_lankan_phone_number("075 3201568") + True + >>> is_sri_lankan_phone_number("07779209245") + False + >>> is_sri_lankan_phone_number("0957651234") + False + """ + + pattern = re.compile( + r"^(?:0|94|\+94|0{2}94)" r"7(0|1|2|4|5|6|7|8)" r"(-| |)" r"\d{7}$" + ) + + return bool(re.search(pattern, phone)) + + +if __name__ == "__main__": + phone = "0094702343221" + + print(is_sri_lankan_phone_number(phone)) From b0f68a0248d3eb48f3baf7e18f6420dc983bdb19 Mon Sep 17 00:00:00 2001 From: tarushirastogi <108577219+tarushirastogi@users.noreply.github.com> Date: Sat, 29 Oct 2022 18:13:51 +0530 Subject: [PATCH 0647/1543] Create centripetal_force.py (#7778) * Create centripetal_force.py Centripetal force is the force acting on an object in curvilinear motion directed towards the axis of rotation or centre of curvature. * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update centripetal_force.py The value error should also handle negative values of the radius and using more descriptive names will be more beneficial for the users * Update centripetal_force.py Made some bug fixes Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: SparshRastogi <75373475+SparshRastogi@users.noreply.github.com> --- physics/centripetal_force.py | 49 ++++++++++++++++++++++++++++++++++++ 1 file changed, 49 insertions(+) create mode 100644 physics/centripetal_force.py diff --git a/physics/centripetal_force.py b/physics/centripetal_force.py new file mode 100644 index 000000000000..04069d256468 --- /dev/null +++ b/physics/centripetal_force.py @@ -0,0 +1,49 @@ +""" +Description : Centripetal force is the force acting on an object in +curvilinear motion directed towards the axis of rotation +or centre of curvature. + +The unit of centripetal force is newton. + +The centripetal force is always directed perpendicular to the +direction of the object’s displacement. Using Newton’s second +law of motion, it is found that the centripetal force of an object +moving in a circular path always acts towards the centre of the circle. +The Centripetal Force Formula is given as the product of mass (in kg) +and tangential velocity (in meters per second) squared, divided by the +radius (in meters) that implies that on doubling the tangential velocity, +the centripetal force will be quadrupled. Mathematically it is written as: +F = mv²/r +Where, F is the Centripetal force, m is the mass of the object, v is the +speed or velocity of the object and r is the radius. + +Reference: https://byjus.com/physics/centripetal-and-centrifugal-force/ +""" + + +def centripetal(mass: float, velocity: float, radius: float) -> float: + """ + The Centripetal Force formula is given as: (m*v*v)/r + + >>> round(centripetal(15.5,-30,10),2) + 1395.0 + >>> round(centripetal(10,15,5),2) + 450.0 + >>> round(centripetal(20,-50,15),2) + 3333.33 + >>> round(centripetal(12.25,40,25),2) + 784.0 + >>> round(centripetal(50,100,50),2) + 10000.0 + """ + if mass < 0: + raise ValueError("The mass of the body cannot be negative") + if radius <= 0: + raise ValueError("The radius is always a positive non zero integer") + return (mass * (velocity) ** 2) / radius + + +if __name__ == "__main__": + import doctest + + doctest.testmod(verbose=True) From 18ffc4dec85a85837f71cd6c9b1e630b9d185001 Mon Sep 17 00:00:00 2001 From: Pronoy Mandal Date: Sat, 29 Oct 2022 18:24:13 +0530 Subject: [PATCH 0648/1543] Update password_generator.py (#7745) * Update password_generator.py 1. Use secrets module instead of random for passwords as it gives a secure source of randomness 2. Add type annotations for functions 3. Replace ctbi (variable for the characters to be included) with a more meaningful and short name 4. Use integer division instead of obtaining the integer part of a division computing a floating point * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- other/password_generator.py | 40 +++++++++++++++++++------------------ 1 file changed, 21 insertions(+), 19 deletions(-) diff --git a/other/password_generator.py b/other/password_generator.py index c09afd7e6125..8f9d58a33b82 100644 --- a/other/password_generator.py +++ b/other/password_generator.py @@ -1,9 +1,10 @@ """Password Generator allows you to generate a random password of length N.""" -from random import choice, shuffle +import secrets +from random import shuffle from string import ascii_letters, digits, punctuation -def password_generator(length=8): +def password_generator(length: int = 8) -> str: """ >>> len(password_generator()) 8 @@ -17,58 +18,59 @@ def password_generator(length=8): 0 """ chars = ascii_letters + digits + punctuation - return "".join(choice(chars) for x in range(length)) + return "".join(secrets.choice(chars) for _ in range(length)) # ALTERNATIVE METHODS -# ctbi= characters that must be in password +# chars_incl= characters that must be in password # i= how many letters or characters the password length will be -def alternative_password_generator(ctbi, i): +def alternative_password_generator(chars_incl: str, i: int) -> str: # Password Generator = full boot with random_number, random_letters, and # random_character FUNCTIONS # Put your code here... - i = i - len(ctbi) - quotient = int(i / 3) + i -= len(chars_incl) + quotient = i // 3 remainder = i % 3 - # chars = ctbi + random_letters(ascii_letters, i / 3 + remainder) + + # chars = chars_incl + random_letters(ascii_letters, i / 3 + remainder) + # random_number(digits, i / 3) + random_characters(punctuation, i / 3) chars = ( - ctbi + chars_incl + random(ascii_letters, quotient + remainder) + random(digits, quotient) + random(punctuation, quotient) ) - chars = list(chars) - shuffle(chars) - return "".join(chars) + list_of_chars = list(chars) + shuffle(list_of_chars) + return "".join(list_of_chars) # random is a generalised function for letters, characters and numbers -def random(ctbi, i): - return "".join(choice(ctbi) for x in range(i)) +def random(chars_incl: str, i: int) -> str: + return "".join(secrets.choice(chars_incl) for _ in range(i)) -def random_number(ctbi, i): +def random_number(chars_incl, i): pass # Put your code here... -def random_letters(ctbi, i): +def random_letters(chars_incl, i): pass # Put your code here... -def random_characters(ctbi, i): +def random_characters(chars_incl, i): pass # Put your code here... def main(): length = int(input("Please indicate the max length of your password: ").strip()) - ctbi = input( + chars_incl = input( "Please indicate the characters that must be in your password: " ).strip() print("Password generated:", password_generator(length)) print( - "Alternative Password generated:", alternative_password_generator(ctbi, length) + "Alternative Password generated:", + alternative_password_generator(chars_incl, length), ) print("[If you are thinking of using this passsword, You better save it.]") From 584e743422565decd35b1b6f94cef3ced840698b Mon Sep 17 00:00:00 2001 From: Andrey Date: Sat, 29 Oct 2022 16:07:02 +0300 Subject: [PATCH 0649/1543] Fix yesqa hook (#7843) * fix yesqa hook * Remove redundant noqa * updating DIRECTORY.md Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- .pre-commit-config.yaml | 10 ++++++++-- DIRECTORY.md | 5 +++-- .../binary_tree/non_recursive_segment_tree.py | 2 +- digital_image_processing/index_calculation.py | 2 +- genetic_algorithm/basic_string.py | 2 +- maths/prime_sieve_eratosthenes.py | 2 -- 6 files changed, 14 insertions(+), 9 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 7f6c206b49bc..56946f5f240f 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -41,13 +41,19 @@ repos: rev: 5.0.4 hooks: - id: flake8 # See .flake8 for args - additional_dependencies: + additional_dependencies: &flake8-plugins - flake8-bugbear - flake8-builtins - flake8-broken-line - flake8-comprehensions - pep8-naming - - yesqa + + - repo: https://github.com/asottile/yesqa + rev: v1.4.0 + hooks: + - id: yesqa + additional_dependencies: + *flake8-plugins - repo: https://github.com/pre-commit/mirrors-mypy rev: v0.982 diff --git a/DIRECTORY.md b/DIRECTORY.md index 1fa6af75d9c3..198cc7077d2b 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -356,14 +356,14 @@ * [Articulation Points](graphs/articulation_points.py) * [Basic Graphs](graphs/basic_graphs.py) * [Bellman Ford](graphs/bellman_ford.py) - * [Bfs Shortest Path](graphs/bfs_shortest_path.py) - * [Bfs Zero One Shortest Path](graphs/bfs_zero_one_shortest_path.py) * [Bidirectional A Star](graphs/bidirectional_a_star.py) * [Bidirectional Breadth First Search](graphs/bidirectional_breadth_first_search.py) * [Boruvka](graphs/boruvka.py) * [Breadth First Search](graphs/breadth_first_search.py) * [Breadth First Search 2](graphs/breadth_first_search_2.py) * [Breadth First Search Shortest Path](graphs/breadth_first_search_shortest_path.py) + * [Breadth First Search Shortest Path 2](graphs/breadth_first_search_shortest_path_2.py) + * [Breadth First Search Zero One Shortest Path](graphs/breadth_first_search_zero_one_shortest_path.py) * [Check Bipartite Graph Bfs](graphs/check_bipartite_graph_bfs.py) * [Check Bipartite Graph Dfs](graphs/check_bipartite_graph_dfs.py) * [Check Cycle](graphs/check_cycle.py) @@ -678,6 +678,7 @@ * [N Body Simulation](physics/n_body_simulation.py) * [Newtons Law Of Gravitation](physics/newtons_law_of_gravitation.py) * [Newtons Second Law Of Motion](physics/newtons_second_law_of_motion.py) + * [Potential Energy](physics/potential_energy.py) ## Project Euler * Problem 001 diff --git a/data_structures/binary_tree/non_recursive_segment_tree.py b/data_structures/binary_tree/non_recursive_segment_tree.py index c29adefffd20..075ff6c912ff 100644 --- a/data_structures/binary_tree/non_recursive_segment_tree.py +++ b/data_structures/binary_tree/non_recursive_segment_tree.py @@ -103,7 +103,7 @@ def query(self, l: int, r: int) -> T | None: # noqa: E741 >>> st.query(2, 3) 7 """ - l, r = l + self.N, r + self.N # noqa: E741 + l, r = l + self.N, r + self.N res: T | None = None while l <= r: # noqa: E741 diff --git a/digital_image_processing/index_calculation.py b/digital_image_processing/index_calculation.py index be1855e99d10..67830668b0da 100644 --- a/digital_image_processing/index_calculation.py +++ b/digital_image_processing/index_calculation.py @@ -413,7 +413,7 @@ def ipvi(self): """ return (self.nir / ((self.nir + self.red) / 2)) * (self.ndvi() + 1) - def i(self): # noqa: E741,E743 + def i(self): """ Intensity https://www.indexdatabase.de/db/i-single.php?id=36 diff --git a/genetic_algorithm/basic_string.py b/genetic_algorithm/basic_string.py index 5cf8d691b1d7..45b8be651f6e 100644 --- a/genetic_algorithm/basic_string.py +++ b/genetic_algorithm/basic_string.py @@ -80,7 +80,7 @@ def evaluate(item: str, main_target: str = target) -> tuple[str, float]: score = len( [g for position, g in enumerate(item) if g == main_target[position]] ) - return (item, float(score)) # noqa: B023 + return (item, float(score)) # Adding a bit of concurrency can make everything faster, # diff --git a/maths/prime_sieve_eratosthenes.py b/maths/prime_sieve_eratosthenes.py index 8d60e48c2140..3a3c55085218 100644 --- a/maths/prime_sieve_eratosthenes.py +++ b/maths/prime_sieve_eratosthenes.py @@ -1,5 +1,3 @@ -# flake8: noqa - """ Sieve of Eratosthenes From 93ad7db97fa211b6e9f77025513a45df83400f88 Mon Sep 17 00:00:00 2001 From: JatinR05 <71865805+JatinR05@users.noreply.github.com> Date: Sat, 29 Oct 2022 18:58:12 +0530 Subject: [PATCH 0650/1543] Create recursive_approach_knapsack.py (#7587) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Create recursive_approach_knapsack.py Added a new naïve recursive approach to solve the knapsack problem. * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update recursive_approach_knapsack.py Updated the code * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update recursive_approach_knapsack.py Updated * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- knapsack/recursive_approach_knapsack.py | 52 +++++++++++++++++++++++++ 1 file changed, 52 insertions(+) create mode 100644 knapsack/recursive_approach_knapsack.py diff --git a/knapsack/recursive_approach_knapsack.py b/knapsack/recursive_approach_knapsack.py new file mode 100644 index 000000000000..d813981cb79c --- /dev/null +++ b/knapsack/recursive_approach_knapsack.py @@ -0,0 +1,52 @@ +# To get an insight into naive recursive way to solve the Knapsack problem + + +""" +A shopkeeper has bags of wheat that each have different weights and different profits. +eg. +no_of_items 4 +profit 5 4 8 6 +weight 1 2 4 5 +max_weight 5 +Constraints: +max_weight > 0 +profit[i] >= 0 +weight[i] >= 0 +Calculate the maximum profit that the shopkeeper can make given maxmum weight that can +be carried. +""" + + +def knapsack( + weights: list, values: list, number_of_items: int, max_weight: int, index: int +) -> int: + """ + Function description is as follows- + :param weights: Take a list of weights + :param values: Take a list of profits corresponding to the weights + :param number_of_items: number of items available to pick from + :param max_weight: Maximum weight that could be carried + :param index: the element we are looking at + :return: Maximum expected gain + >>> knapsack([1, 2, 4, 5], [5, 4, 8, 6], 4, 5, 0) + 13 + >>> knapsack([3 ,4 , 5], [10, 9 , 8], 3, 25, 0) + 27 + """ + if index == number_of_items: + return 0 + ans1 = 0 + ans2 = 0 + ans1 = knapsack(weights, values, number_of_items, max_weight, index + 1) + if weights[index] <= max_weight: + ans2 = values[index] + knapsack( + weights, values, number_of_items, max_weight - weights[index], index + 1 + ) + return max(ans1, ans2) + + +if __name__ == "__main__": + + import doctest + + doctest.testmod() From efb4a3aee842e1db855e678f28b79588734ff146 Mon Sep 17 00:00:00 2001 From: Anshraj Shrivastava <42239140+rajansh87@users.noreply.github.com> Date: Sat, 29 Oct 2022 18:59:15 +0530 Subject: [PATCH 0651/1543] added algo for finding permutations of an array (#7614) * Add files via upload * Delete permutations.cpython-310.pyc * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update permutations.py * Update permutations.py * Add files via upload * Delete permutations.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update permutations.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update permutations.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update permutations.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update data_structures/arrays/permutations.py Co-authored-by: Christian Clauss * Update permutations.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update permutations.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update data_structures/arrays/permutations.py Co-authored-by: Chris O <46587501+ChrisO345@users.noreply.github.com> * Update permutations.py * Update permutations.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update permutations.py Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss Co-authored-by: Chris O <46587501+ChrisO345@users.noreply.github.com> --- data_structures/arrays/permutations.py | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+) create mode 100644 data_structures/arrays/permutations.py diff --git a/data_structures/arrays/permutations.py b/data_structures/arrays/permutations.py new file mode 100644 index 000000000000..eb3f26517863 --- /dev/null +++ b/data_structures/arrays/permutations.py @@ -0,0 +1,26 @@ +def permute(nums: list[int]) -> list[list[int]]: + """ + Return all permutations. + + >>> from itertools import permutations + >>> numbers= [1,2,3] + >>> all(list(nums) in permute(numbers) for nums in permutations(numbers)) + True + """ + result = [] + if len(nums) == 1: + return [nums.copy()] + for _ in range(len(nums)): + n = nums.pop(0) + permutations = permute(nums) + for perm in permutations: + perm.append(n) + result.extend(permutations) + nums.append(n) + return result + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From 7b521b66cfe3d16960c3fa8e01ff947794cc44a6 Mon Sep 17 00:00:00 2001 From: Carlos Villar Date: Sat, 29 Oct 2022 15:44:18 +0200 Subject: [PATCH 0652/1543] Add Viterbi algorithm (#7509) * Added Viterbi algorithm Fixes: #7465 Squashed commits * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Added doctest for validators * moved all extracted functions to the main function * Forgot a type hint Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- dynamic_programming/viterbi.py | 400 +++++++++++++++++++++++++++++++++ 1 file changed, 400 insertions(+) create mode 100644 dynamic_programming/viterbi.py diff --git a/dynamic_programming/viterbi.py b/dynamic_programming/viterbi.py new file mode 100644 index 000000000000..93ab845e2ae8 --- /dev/null +++ b/dynamic_programming/viterbi.py @@ -0,0 +1,400 @@ +from typing import Any + + +def viterbi( + observations_space: list, + states_space: list, + initial_probabilities: dict, + transition_probabilities: dict, + emission_probabilities: dict, +) -> list: + """ + Viterbi Algorithm, to find the most likely path of + states from the start and the expected output. + https://en.wikipedia.org/wiki/Viterbi_algorithm + sdafads + Wikipedia example + >>> observations = ["normal", "cold", "dizzy"] + >>> states = ["Healthy", "Fever"] + >>> start_p = {"Healthy": 0.6, "Fever": 0.4} + >>> trans_p = { + ... "Healthy": {"Healthy": 0.7, "Fever": 0.3}, + ... "Fever": {"Healthy": 0.4, "Fever": 0.6}, + ... } + >>> emit_p = { + ... "Healthy": {"normal": 0.5, "cold": 0.4, "dizzy": 0.1}, + ... "Fever": {"normal": 0.1, "cold": 0.3, "dizzy": 0.6}, + ... } + >>> viterbi(observations, states, start_p, trans_p, emit_p) + ['Healthy', 'Healthy', 'Fever'] + + >>> viterbi((), states, start_p, trans_p, emit_p) + Traceback (most recent call last): + ... + ValueError: There's an empty parameter + + >>> viterbi(observations, (), start_p, trans_p, emit_p) + Traceback (most recent call last): + ... + ValueError: There's an empty parameter + + >>> viterbi(observations, states, {}, trans_p, emit_p) + Traceback (most recent call last): + ... + ValueError: There's an empty parameter + + >>> viterbi(observations, states, start_p, {}, emit_p) + Traceback (most recent call last): + ... + ValueError: There's an empty parameter + + >>> viterbi(observations, states, start_p, trans_p, {}) + Traceback (most recent call last): + ... + ValueError: There's an empty parameter + + >>> viterbi("invalid", states, start_p, trans_p, emit_p) + Traceback (most recent call last): + ... + ValueError: observations_space must be a list + + >>> viterbi(["valid", 123], states, start_p, trans_p, emit_p) + Traceback (most recent call last): + ... + ValueError: observations_space must be a list of strings + + >>> viterbi(observations, "invalid", start_p, trans_p, emit_p) + Traceback (most recent call last): + ... + ValueError: states_space must be a list + + >>> viterbi(observations, ["valid", 123], start_p, trans_p, emit_p) + Traceback (most recent call last): + ... + ValueError: states_space must be a list of strings + + >>> viterbi(observations, states, "invalid", trans_p, emit_p) + Traceback (most recent call last): + ... + ValueError: initial_probabilities must be a dict + + >>> viterbi(observations, states, {2:2}, trans_p, emit_p) + Traceback (most recent call last): + ... + ValueError: initial_probabilities all keys must be strings + + >>> viterbi(observations, states, {"a":2}, trans_p, emit_p) + Traceback (most recent call last): + ... + ValueError: initial_probabilities all values must be float + + >>> viterbi(observations, states, start_p, "invalid", emit_p) + Traceback (most recent call last): + ... + ValueError: transition_probabilities must be a dict + + >>> viterbi(observations, states, start_p, {"a":2}, emit_p) + Traceback (most recent call last): + ... + ValueError: transition_probabilities all values must be dict + + >>> viterbi(observations, states, start_p, {2:{2:2}}, emit_p) + Traceback (most recent call last): + ... + ValueError: transition_probabilities all keys must be strings + + >>> viterbi(observations, states, start_p, {"a":{2:2}}, emit_p) + Traceback (most recent call last): + ... + ValueError: transition_probabilities all keys must be strings + + >>> viterbi(observations, states, start_p, {"a":{"b":2}}, emit_p) + Traceback (most recent call last): + ... + ValueError: transition_probabilities nested dictionary all values must be float + + >>> viterbi(observations, states, start_p, trans_p, "invalid") + Traceback (most recent call last): + ... + ValueError: emission_probabilities must be a dict + + >>> viterbi(observations, states, start_p, trans_p, None) + Traceback (most recent call last): + ... + ValueError: There's an empty parameter + + """ + _validation( + observations_space, + states_space, + initial_probabilities, + transition_probabilities, + emission_probabilities, + ) + # Creates data structures and fill initial step + probabilities: dict = {} + pointers: dict = {} + for state in states_space: + observation = observations_space[0] + probabilities[(state, observation)] = ( + initial_probabilities[state] * emission_probabilities[state][observation] + ) + pointers[(state, observation)] = None + + # Fills the data structure with the probabilities of + # different transitions and pointers to previous states + for o in range(1, len(observations_space)): + observation = observations_space[o] + prior_observation = observations_space[o - 1] + for state in states_space: + # Calculates the argmax for probability function + arg_max = "" + max_probability = -1 + for k_state in states_space: + probability = ( + probabilities[(k_state, prior_observation)] + * transition_probabilities[k_state][state] + * emission_probabilities[state][observation] + ) + if probability > max_probability: + max_probability = probability + arg_max = k_state + + # Update probabilities and pointers dicts + probabilities[(state, observation)] = ( + probabilities[(arg_max, prior_observation)] + * transition_probabilities[arg_max][state] + * emission_probabilities[state][observation] + ) + + pointers[(state, observation)] = arg_max + + # The final observation + final_observation = observations_space[len(observations_space) - 1] + + # argmax for given final observation + arg_max = "" + max_probability = -1 + for k_state in states_space: + probability = probabilities[(k_state, final_observation)] + if probability > max_probability: + max_probability = probability + arg_max = k_state + last_state = arg_max + + # Process pointers backwards + previous = last_state + result = [] + for o in range(len(observations_space) - 1, -1, -1): + result.append(previous) + previous = pointers[previous, observations_space[o]] + result.reverse() + + return result + + +def _validation( + observations_space: Any, + states_space: Any, + initial_probabilities: Any, + transition_probabilities: Any, + emission_probabilities: Any, +) -> None: + """ + >>> observations = ["normal", "cold", "dizzy"] + >>> states = ["Healthy", "Fever"] + >>> start_p = {"Healthy": 0.6, "Fever": 0.4} + >>> trans_p = { + ... "Healthy": {"Healthy": 0.7, "Fever": 0.3}, + ... "Fever": {"Healthy": 0.4, "Fever": 0.6}, + ... } + >>> emit_p = { + ... "Healthy": {"normal": 0.5, "cold": 0.4, "dizzy": 0.1}, + ... "Fever": {"normal": 0.1, "cold": 0.3, "dizzy": 0.6}, + ... } + >>> _validation(observations, states, start_p, trans_p, emit_p) + + >>> _validation([], states, start_p, trans_p, emit_p) + Traceback (most recent call last): + ... + ValueError: There's an empty parameter + """ + _validate_not_empty( + observations_space, + states_space, + initial_probabilities, + transition_probabilities, + emission_probabilities, + ) + _validate_lists(observations_space, states_space) + _validate_dicts( + initial_probabilities, transition_probabilities, emission_probabilities + ) + + +def _validate_not_empty( + observations_space: Any, + states_space: Any, + initial_probabilities: Any, + transition_probabilities: Any, + emission_probabilities: Any, +) -> None: + """ + >>> _validate_not_empty(["a"], ["b"], {"c":0.5}, + ... {"d": {"e": 0.6}}, {"f": {"g": 0.7}}) + + >>> _validate_not_empty(["a"], ["b"], {"c":0.5}, {}, {"f": {"g": 0.7}}) + Traceback (most recent call last): + ... + ValueError: There's an empty parameter + >>> _validate_not_empty(["a"], ["b"], None, {"d": {"e": 0.6}}, {"f": {"g": 0.7}}) + Traceback (most recent call last): + ... + ValueError: There's an empty parameter + """ + if not all( + [ + observations_space, + states_space, + initial_probabilities, + transition_probabilities, + emission_probabilities, + ] + ): + raise ValueError("There's an empty parameter") + + +def _validate_lists(observations_space: Any, states_space: Any) -> None: + """ + >>> _validate_lists(["a"], ["b"]) + + >>> _validate_lists(1234, ["b"]) + Traceback (most recent call last): + ... + ValueError: observations_space must be a list + + >>> _validate_lists(["a"], [3]) + Traceback (most recent call last): + ... + ValueError: states_space must be a list of strings + """ + _validate_list(observations_space, "observations_space") + _validate_list(states_space, "states_space") + + +def _validate_list(_object: Any, var_name: str) -> None: + """ + >>> _validate_list(["a"], "mock_name") + + >>> _validate_list("a", "mock_name") + Traceback (most recent call last): + ... + ValueError: mock_name must be a list + >>> _validate_list([0.5], "mock_name") + Traceback (most recent call last): + ... + ValueError: mock_name must be a list of strings + + """ + if not isinstance(_object, list): + raise ValueError(f"{var_name} must be a list") + else: + for x in _object: + if not isinstance(x, str): + raise ValueError(f"{var_name} must be a list of strings") + + +def _validate_dicts( + initial_probabilities: Any, + transition_probabilities: Any, + emission_probabilities: Any, +) -> None: + """ + >>> _validate_dicts({"c":0.5}, {"d": {"e": 0.6}}, {"f": {"g": 0.7}}) + + >>> _validate_dicts("invalid", {"d": {"e": 0.6}}, {"f": {"g": 0.7}}) + Traceback (most recent call last): + ... + ValueError: initial_probabilities must be a dict + >>> _validate_dicts({"c":0.5}, {2: {"e": 0.6}}, {"f": {"g": 0.7}}) + Traceback (most recent call last): + ... + ValueError: transition_probabilities all keys must be strings + >>> _validate_dicts({"c":0.5}, {"d": {"e": 0.6}}, {"f": {2: 0.7}}) + Traceback (most recent call last): + ... + ValueError: emission_probabilities all keys must be strings + >>> _validate_dicts({"c":0.5}, {"d": {"e": 0.6}}, {"f": {"g": "h"}}) + Traceback (most recent call last): + ... + ValueError: emission_probabilities nested dictionary all values must be float + """ + _validate_dict(initial_probabilities, "initial_probabilities", float) + _validate_nested_dict(transition_probabilities, "transition_probabilities") + _validate_nested_dict(emission_probabilities, "emission_probabilities") + + +def _validate_nested_dict(_object: Any, var_name: str) -> None: + """ + >>> _validate_nested_dict({"a":{"b": 0.5}}, "mock_name") + + >>> _validate_nested_dict("invalid", "mock_name") + Traceback (most recent call last): + ... + ValueError: mock_name must be a dict + >>> _validate_nested_dict({"a": 8}, "mock_name") + Traceback (most recent call last): + ... + ValueError: mock_name all values must be dict + >>> _validate_nested_dict({"a":{2: 0.5}}, "mock_name") + Traceback (most recent call last): + ... + ValueError: mock_name all keys must be strings + >>> _validate_nested_dict({"a":{"b": 4}}, "mock_name") + Traceback (most recent call last): + ... + ValueError: mock_name nested dictionary all values must be float + """ + _validate_dict(_object, var_name, dict) + for x in _object.values(): + _validate_dict(x, var_name, float, True) + + +def _validate_dict( + _object: Any, var_name: str, value_type: type, nested: bool = False +) -> None: + """ + >>> _validate_dict({"b": 0.5}, "mock_name", float) + + >>> _validate_dict("invalid", "mock_name", float) + Traceback (most recent call last): + ... + ValueError: mock_name must be a dict + >>> _validate_dict({"a": 8}, "mock_name", dict) + Traceback (most recent call last): + ... + ValueError: mock_name all values must be dict + >>> _validate_dict({2: 0.5}, "mock_name",float, True) + Traceback (most recent call last): + ... + ValueError: mock_name all keys must be strings + >>> _validate_dict({"b": 4}, "mock_name", float,True) + Traceback (most recent call last): + ... + ValueError: mock_name nested dictionary all values must be float + """ + if not isinstance(_object, dict): + raise ValueError(f"{var_name} must be a dict") + if not all(isinstance(x, str) for x in _object): + raise ValueError(f"{var_name} all keys must be strings") + if not all(isinstance(x, value_type) for x in _object.values()): + nested_text = "nested dictionary " if nested else "" + raise ValueError( + f"{var_name} {nested_text}all values must be {value_type.__name__}" + ) + + +if __name__ == "__main__": + from doctest import testmod + + testmod() From 038f8a00e56bda8e8e2903fe4acf2ca7e3c83a57 Mon Sep 17 00:00:00 2001 From: sadiqebrahim <75269485+sadiqebrahim@users.noreply.github.com> Date: Sat, 29 Oct 2022 19:22:19 +0530 Subject: [PATCH 0653/1543] add electric conductivity algorithm (#7449) * add electric conductivity algorithm * Update electric_conductivity.py * Apply suggestions from code review Co-authored-by: Caeden Perelli-Harris * Update electric_conductivity.py * Update electric_conductivity.py * Update electric_conductivity.py * add algorithm Co-authored-by: Caeden Perelli-Harris --- electronics/electric_conductivity.py | 53 ++++++++++++++++++++++++++++ physics/sheer_stress.py | 51 ++++++++++++++++++++++++++ 2 files changed, 104 insertions(+) create mode 100644 electronics/electric_conductivity.py create mode 100644 physics/sheer_stress.py diff --git a/electronics/electric_conductivity.py b/electronics/electric_conductivity.py new file mode 100644 index 000000000000..11f2a607d214 --- /dev/null +++ b/electronics/electric_conductivity.py @@ -0,0 +1,53 @@ +from __future__ import annotations + +ELECTRON_CHARGE = 1.6021e-19 # units = C + + +def electric_conductivity( + conductivity: float, + electron_conc: float, + mobility: float, +) -> tuple[str, float]: + """ + This function can calculate any one of the three - + 1. Conductivity + 2. Electron Concentration + 3. Electron Mobility + This is calculated from the other two provided values + Examples - + >>> electric_conductivity(conductivity=25, electron_conc=100, mobility=0) + ('mobility', 1.5604519068722301e+18) + >>> electric_conductivity(conductivity=0, electron_conc=1600, mobility=200) + ('conductivity', 5.12672e-14) + >>> electric_conductivity(conductivity=1000, electron_conc=0, mobility=1200) + ('electron_conc', 5.201506356240767e+18) + """ + if (conductivity, electron_conc, mobility).count(0) != 1: + raise ValueError("You cannot supply more or less than 2 values") + elif conductivity < 0: + raise ValueError("Conductivity cannot be negative") + elif electron_conc < 0: + raise ValueError("Electron concentration cannot be negative") + elif mobility < 0: + raise ValueError("mobility cannot be negative") + elif conductivity == 0: + return ( + "conductivity", + mobility * electron_conc * ELECTRON_CHARGE, + ) + elif electron_conc == 0: + return ( + "electron_conc", + conductivity / (mobility * ELECTRON_CHARGE), + ) + else: + return ( + "mobility", + conductivity / (electron_conc * ELECTRON_CHARGE), + ) + + +if __name__ == "__main__": + import doctest + + doctest.testmod() diff --git a/physics/sheer_stress.py b/physics/sheer_stress.py new file mode 100644 index 000000000000..74a2d36b1f45 --- /dev/null +++ b/physics/sheer_stress.py @@ -0,0 +1,51 @@ +from __future__ import annotations + + +def sheer_stress( + stress: float, + tangential_force: float, + area: float, +) -> tuple[str, float]: + """ + This function can calculate any one of the three - + 1. Sheer Stress + 2. Tangential Force + 3. Cross-sectional Area + This is calculated from the other two provided values + Examples - + >>> sheer_stress(stress=25, tangential_force=100, area=0) + ('area', 4.0) + >>> sheer_stress(stress=0, tangential_force=1600, area=200) + ('stress', 8.0) + >>> sheer_stress(stress=1000, tangential_force=0, area=1200) + ('tangential_force', 1200000) + """ + if (stress, tangential_force, area).count(0) != 1: + raise ValueError("You cannot supply more or less than 2 values") + elif stress < 0: + raise ValueError("Stress cannot be negative") + elif tangential_force < 0: + raise ValueError("Tangential Force cannot be negative") + elif area < 0: + raise ValueError("Area cannot be negative") + elif stress == 0: + return ( + "stress", + tangential_force / area, + ) + elif tangential_force == 0: + return ( + "tangential_force", + stress * area, + ) + else: + return ( + "area", + tangential_force / stress, + ) + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From a02e7a1331583829b2768f02c4b9c412bf26251b Mon Sep 17 00:00:00 2001 From: Harsh Verma <53353745+TheLameOne@users.noreply.github.com> Date: Sat, 29 Oct 2022 19:24:32 +0530 Subject: [PATCH 0654/1543] Added algorithm for Text Justification in Strings (#7354) * Added algorithm for Text Justification in Strings * Added algorithm for Text Justification in Strings --- strings/text_justification.py | 92 +++++++++++++++++++++++++++++++++++ 1 file changed, 92 insertions(+) create mode 100644 strings/text_justification.py diff --git a/strings/text_justification.py b/strings/text_justification.py new file mode 100644 index 000000000000..5e86456c2456 --- /dev/null +++ b/strings/text_justification.py @@ -0,0 +1,92 @@ +def text_justification(word: str, max_width: int) -> list: + """ + Will format the string such that each line has exactly + (max_width) characters and is fully (left and right) justified, + and return the list of justified text. + + example 1: + string = "This is an example of text justification." + max_width = 16 + + output = ['This is an', + 'example of text', + 'justification. '] + + >>> text_justification("This is an example of text justification.", 16) + ['This is an', 'example of text', 'justification. '] + + example 2: + string = "Two roads diverged in a yellow wood" + max_width = 16 + output = ['Two roads', + 'diverged in a', + 'yellow wood '] + + >>> text_justification("Two roads diverged in a yellow wood", 16) + ['Two roads', 'diverged in a', 'yellow wood '] + + Time complexity: O(m*n) + Space complexity: O(m*n) + """ + + # Converting string into list of strings split by a space + words = word.split() + + def justify(line: list, width: int, max_width: int) -> str: + + overall_spaces_count = max_width - width + words_count = len(line) + if len(line) == 1: + # if there is only word in line + # just insert overall_spaces_count for the remainder of line + return line[0] + " " * overall_spaces_count + else: + spaces_to_insert_between_words = words_count - 1 + # num_spaces_between_words_list[i] : tells you to insert + # num_spaces_between_words_list[i] spaces + # after word on line[i] + num_spaces_between_words_list = spaces_to_insert_between_words * [ + overall_spaces_count // spaces_to_insert_between_words + ] + spaces_count_in_locations = ( + overall_spaces_count % spaces_to_insert_between_words + ) + # distribute spaces via round robin to the left words + for i in range(spaces_count_in_locations): + num_spaces_between_words_list[i] += 1 + aligned_words_list = [] + for i in range(spaces_to_insert_between_words): + # add the word + aligned_words_list.append(line[i]) + # add the spaces to insert + aligned_words_list.append(num_spaces_between_words_list[i] * " ") + # just add the last word to the sentence + aligned_words_list.append(line[-1]) + # join the aligned words list to form a justified line + return "".join(aligned_words_list) + + answer = [] + line: list[str] = [] + width = 0 + for word in words: + if width + len(word) + len(line) <= max_width: + # keep adding words until we can fill out max_width + # width = sum of length of all words (without overall_spaces_count) + # len(word) = length of current word + # len(line) = number of overall_spaces_count to insert between words + line.append(word) + width += len(word) + else: + # justify the line and add it to result + answer.append(justify(line, width, max_width)) + # reset new line and new width + line, width = [word], len(word) + remaining_spaces = max_width - width - len(line) + answer.append(" ".join(line) + (remaining_spaces + 1) * " ") + return answer + + +if __name__ == "__main__": + from doctest import testmod + + testmod() From d84452344ae1931c635245b1311a10e330223fc6 Mon Sep 17 00:00:00 2001 From: dmorozov001 <116645674+dmorozov001@users.noreply.github.com> Date: Sat, 29 Oct 2022 15:43:03 +0100 Subject: [PATCH 0655/1543] Correcting typos in CONTRIBUTING.md (#7845) --- CONTRIBUTING.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index b5a07af100ee..5cbb24e563da 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -8,7 +8,7 @@ Welcome to [TheAlgorithms/Python](https://github.com/TheAlgorithms/Python)! Befo ### Contributor -We are very happy that you consider implementing algorithms and data structures for others! This repository is referenced and used by learners from all over the globe. Being one of our contributors, you agree and confirm that: +We are very happy that you are considering implementing algorithms and data structures for others! This repository is referenced and used by learners from all over the globe. Being one of our contributors, you agree and confirm that: - You did your work - no plagiarism allowed - Any plagiarized work will not be merged. From bd50a3068270261fe845aac0daf309c7134e2477 Mon Sep 17 00:00:00 2001 From: Shashank Kashyap <50551759+SKVKPandey@users.noreply.github.com> Date: Sat, 29 Oct 2022 20:55:26 +0530 Subject: [PATCH 0656/1543] Resonant Frequency & Electrical Impedance (#6983) * Resonant Frequency * Resonant Frequency of LC Circuit * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update electronics/resonant_frequency.py Co-authored-by: Caeden * Update electronics/resonant_frequency.py Co-authored-by: Caeden * Update electronics/resonant_frequency.py Co-authored-by: Caeden * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Updated resonant_frequency.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update electronics/resonant_frequency.py Co-authored-by: Paul <56065602+ZeroDayOwl@users.noreply.github.com> * Fixed doctest issues in resonant_frequency.py * Algorithm for Electrical Impedance * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Updated Algorithm for Electrical Impedance * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update resonant_frequency.py * Update electrical_impedance.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update resonant_frequency.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update electronics/electrical_impedance.py Co-authored-by: Paul <56065602+ZeroDayOwl@users.noreply.github.com> * Update electronics/electrical_impedance.py Co-authored-by: Paul <56065602+ZeroDayOwl@users.noreply.github.com> * Update electronics/resonant_frequency.py Co-authored-by: Paul <56065602+ZeroDayOwl@users.noreply.github.com> * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Caeden Co-authored-by: Paul <56065602+ZeroDayOwl@users.noreply.github.com> --- electronics/electrical_impedance.py | 46 ++++++++++++++++++++++++++ electronics/resonant_frequency.py | 50 +++++++++++++++++++++++++++++ 2 files changed, 96 insertions(+) create mode 100644 electronics/electrical_impedance.py create mode 100644 electronics/resonant_frequency.py diff --git a/electronics/electrical_impedance.py b/electronics/electrical_impedance.py new file mode 100644 index 000000000000..44041ff790b6 --- /dev/null +++ b/electronics/electrical_impedance.py @@ -0,0 +1,46 @@ +"""Electrical impedance is the measure of the opposition that a +circuit presents to a current when a voltage is applied. +Impedance extends the concept of resistance to alternating current (AC) circuits. +Source: https://en.wikipedia.org/wiki/Electrical_impedance +""" + +from __future__ import annotations + +from math import pow, sqrt + + +def electrical_impedance( + resistance: float, reactance: float, impedance: float +) -> dict[str, float]: + """ + Apply Electrical Impedance formula, on any two given electrical values, + which can be resistance, reactance, and impedance, and then in a Python dict + return name/value pair of the zero value. + + >>> electrical_impedance(3,4,0) + {'impedance': 5.0} + >>> electrical_impedance(0,4,5) + {'resistance': 3.0} + >>> electrical_impedance(3,0,5) + {'reactance': 4.0} + >>> electrical_impedance(3,4,5) + Traceback (most recent call last): + ... + ValueError: One and only one argument must be 0 + """ + if (resistance, reactance, impedance).count(0) != 1: + raise ValueError("One and only one argument must be 0") + if resistance == 0: + return {"resistance": sqrt(pow(impedance, 2) - pow(reactance, 2))} + elif reactance == 0: + return {"reactance": sqrt(pow(impedance, 2) - pow(resistance, 2))} + elif impedance == 0: + return {"impedance": sqrt(pow(resistance, 2) + pow(reactance, 2))} + else: + raise ValueError("Exactly one argument must be 0") + + +if __name__ == "__main__": + import doctest + + doctest.testmod() diff --git a/electronics/resonant_frequency.py b/electronics/resonant_frequency.py new file mode 100644 index 000000000000..4f95043b600a --- /dev/null +++ b/electronics/resonant_frequency.py @@ -0,0 +1,50 @@ +# https://en.wikipedia.org/wiki/LC_circuit + +"""An LC circuit, also called a resonant circuit, tank circuit, or tuned circuit, +is an electric circuit consisting of an inductor, represented by the letter L, +and a capacitor, represented by the letter C, connected together. +The circuit can act as an electrical resonator, an electrical analogue of a +tuning fork, storing energy oscillating at the circuit's resonant frequency. +Source: https://en.wikipedia.org/wiki/LC_circuit +""" + +from __future__ import annotations + +from math import pi, sqrt + + +def resonant_frequency(inductance: float, capacitance: float) -> tuple: + """ + This function can calculate the resonant frequency of LC circuit, + for the given value of inductance and capacitnace. + + Examples are given below: + >>> resonant_frequency(inductance=10, capacitance=5) + ('Resonant frequency', 0.022507907903927652) + >>> resonant_frequency(inductance=0, capacitance=5) + Traceback (most recent call last): + ... + ValueError: Inductance cannot be 0 or negative + >>> resonant_frequency(inductance=10, capacitance=0) + Traceback (most recent call last): + ... + ValueError: Capacitance cannot be 0 or negative + """ + + if inductance <= 0: + raise ValueError("Inductance cannot be 0 or negative") + + elif capacitance <= 0: + raise ValueError("Capacitance cannot be 0 or negative") + + else: + return ( + "Resonant frequency", + float(1 / (2 * pi * (sqrt(inductance * capacitance)))), + ) + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From 47ddba1d914bf5955a244056e794e718dee9ead1 Mon Sep 17 00:00:00 2001 From: Kushagra Makharia Date: Sat, 29 Oct 2022 21:08:40 +0530 Subject: [PATCH 0657/1543] Added cosine similarity (#7001) * Added cosine similarity * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- machine_learning/similarity_search.py | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/machine_learning/similarity_search.py b/machine_learning/similarity_search.py index 2f5fc46c065e..72979181f67c 100644 --- a/machine_learning/similarity_search.py +++ b/machine_learning/similarity_search.py @@ -12,6 +12,7 @@ import math import numpy as np +from numpy.linalg import norm def euclidean(input_a: np.ndarray, input_b: np.ndarray) -> float: @@ -135,6 +136,22 @@ def similarity_search( return answer +def cosine_similarity(input_a: np.ndarray, input_b: np.ndarray) -> float: + """ + Calculates cosine similarity between two data. + :param input_a: ndarray of first vector. + :param input_b: ndarray of second vector. + :return: Cosine similarity of input_a and input_b. By using math.sqrt(), + result will be float. + + >>> cosine_similarity(np.array([1]), np.array([1])) + 1.0 + >>> cosine_similarity(np.array([1, 2]), np.array([6, 32])) + 0.9615239476408232 + """ + return np.dot(input_a, input_b) / (norm(input_a) * norm(input_b)) + + if __name__ == "__main__": import doctest From 1550731cb7457ddae216da2ffe0bc1587f5234f3 Mon Sep 17 00:00:00 2001 From: Andrey Date: Sat, 29 Oct 2022 23:45:21 +0300 Subject: [PATCH 0658/1543] Remove file-level flake8 suppression (#7844) * Remove file-level flake8 suppression * updating DIRECTORY.md * Fix tests Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- DIRECTORY.md | 2 ++ data_structures/heap/binomial_heap.py | 50 +++++++++++++-------------- other/activity_selection.py | 8 ++--- searches/binary_tree_traversal.py | 10 +++--- 4 files changed, 34 insertions(+), 36 deletions(-) diff --git a/DIRECTORY.md b/DIRECTORY.md index 198cc7077d2b..9ea8f3140f35 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -671,6 +671,7 @@ ## Physics * [Casimir Effect](physics/casimir_effect.py) + * [Centripetal Force](physics/centripetal_force.py) * [Horizontal Projectile Motion](physics/horizontal_projectile_motion.py) * [Kinetic Energy](physics/kinetic_energy.py) * [Lorentz Transformation Four Vector](physics/lorentz_transformation_four_vector.py) @@ -1069,6 +1070,7 @@ * [Is Palindrome](strings/is_palindrome.py) * [Is Pangram](strings/is_pangram.py) * [Is Spain National Id](strings/is_spain_national_id.py) + * [Is Srilankan Phone Number](strings/is_srilankan_phone_number.py) * [Jaro Winkler](strings/jaro_winkler.py) * [Join](strings/join.py) * [Knuth Morris Pratt](strings/knuth_morris_pratt.py) diff --git a/data_structures/heap/binomial_heap.py b/data_structures/heap/binomial_heap.py index 6398c99439cd..d79fac7a99d5 100644 --- a/data_structures/heap/binomial_heap.py +++ b/data_structures/heap/binomial_heap.py @@ -1,5 +1,3 @@ -# flake8: noqa - """ Binomial Heap Reference: Advanced Data Structures, Peter Brass @@ -22,7 +20,7 @@ def __init__(self, val): self.right = None self.parent = None - def mergeTrees(self, other): + def merge_trees(self, other): """ In-place merge of two binomial trees of equal size. Returns the root of the resulting tree @@ -75,9 +73,8 @@ class BinomialHeap: 30 Deleting - delete() test - >>> for i in range(25): - ... print(first_heap.deleteMin(), end=" ") - 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 + >>> [first_heap.delete_min() for _ in range(20)] + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19] Create a new Heap >>> second_heap = BinomialHeap() @@ -97,8 +94,8 @@ class BinomialHeap: # # # # preOrder() test - >>> second_heap.preOrder() - [(17, 0), ('#', 1), (31, 1), (20, 2), ('#', 3), ('#', 3), (34, 2), ('#', 3), ('#', 3)] + >>> " ".join(str(x) for x in second_heap.pre_order()) + "(17, 0) ('#', 1) (31, 1) (20, 2) ('#', 3) ('#', 3) (34, 2) ('#', 3) ('#', 3)" printing Heap - __str__() test >>> print(second_heap) @@ -113,14 +110,17 @@ class BinomialHeap: ---# mergeHeaps() test - >>> merged = second_heap.mergeHeaps(first_heap) + >>> + >>> merged = second_heap.merge_heaps(first_heap) >>> merged.peek() 17 values in merged heap; (merge is inplace) - >>> while not first_heap.isEmpty(): - ... print(first_heap.deleteMin(), end=" ") - 17 20 25 26 27 28 29 31 34 + >>> results = [] + >>> while not first_heap.is_empty(): + ... results.append(first_heap.delete_min()) + >>> results + [17, 20, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 31, 34] """ def __init__(self, bottom_root=None, min_node=None, heap_size=0): @@ -128,7 +128,7 @@ def __init__(self, bottom_root=None, min_node=None, heap_size=0): self.bottom_root = bottom_root self.min_node = min_node - def mergeHeaps(self, other): + def merge_heaps(self, other): """ In-place merge of two binomial heaps. Both of them become the resulting merged heap @@ -180,7 +180,7 @@ def mergeHeaps(self, other): next_node = i.parent.parent # Merging trees - i = i.mergeTrees(i.parent) + i = i.merge_trees(i.parent) # Updating links i.left = previous_node @@ -238,7 +238,7 @@ def insert(self, val): next_node = self.bottom_root.parent.parent # Merge - self.bottom_root = self.bottom_root.mergeTrees(self.bottom_root.parent) + self.bottom_root = self.bottom_root.merge_trees(self.bottom_root.parent) # Update Links self.bottom_root.parent = next_node @@ -252,10 +252,10 @@ def peek(self): """ return self.min_node.val - def isEmpty(self): + def is_empty(self): return self.size == 0 - def deleteMin(self): + def delete_min(self): """ delete min element and return it """ @@ -317,7 +317,7 @@ def deleteMin(self): return min_value # Remaining cases # Construct heap of right subtree - newHeap = BinomialHeap( + new_heap = BinomialHeap( bottom_root=bottom_of_new, min_node=min_of_new, heap_size=size_of_new ) @@ -354,11 +354,11 @@ def deleteMin(self): self.min_node = i i = i.parent # Merge heaps - self.mergeHeaps(newHeap) + self.merge_heaps(new_heap) return min_value - def preOrder(self): + def pre_order(self): """ Returns the Pre-order representation of the heap including values of nodes plus their level distance from the root; @@ -369,9 +369,9 @@ def preOrder(self): while top_root.parent: top_root = top_root.parent # preorder - heap_preOrder = [] - self.__traversal(top_root, heap_preOrder) - return heap_preOrder + heap_pre_order = [] + self.__traversal(top_root, heap_pre_order) + return heap_pre_order def __traversal(self, curr_node, preorder, level=0): """ @@ -389,9 +389,9 @@ def __str__(self): Overwriting str for a pre-order print of nodes in heap; Performance is poor, so use only for small examples """ - if self.isEmpty(): + if self.is_empty(): return "" - preorder_heap = self.preOrder() + preorder_heap = self.pre_order() return "\n".join(("-" * level + str(value)) for value, level in preorder_heap) diff --git a/other/activity_selection.py b/other/activity_selection.py index d809bf90a3f3..18ff6a24c32a 100644 --- a/other/activity_selection.py +++ b/other/activity_selection.py @@ -1,5 +1,3 @@ -# flake8: noqa - """The following implementation assumes that the activities are already sorted according to their finish time""" @@ -10,11 +8,11 @@ # finish[] --> An array that contains finish time of all activities -def printMaxActivities(start: list[int], finish: list[int]) -> None: +def print_max_activities(start: list[int], finish: list[int]) -> None: """ >>> start = [1, 3, 0, 5, 8, 5] >>> finish = [2, 4, 6, 7, 9, 9] - >>> printMaxActivities(start, finish) + >>> print_max_activities(start, finish) The following activities are selected: 0,1,3,4, """ @@ -43,4 +41,4 @@ def printMaxActivities(start: list[int], finish: list[int]) -> None: start = [1, 3, 0, 5, 8, 5] finish = [2, 4, 6, 7, 9, 9] - printMaxActivities(start, finish) + print_max_activities(start, finish) diff --git a/searches/binary_tree_traversal.py b/searches/binary_tree_traversal.py index 033db83d789e..66814b47883d 100644 --- a/searches/binary_tree_traversal.py +++ b/searches/binary_tree_traversal.py @@ -1,5 +1,3 @@ -# flake8: noqa - """ This is pure Python implementation of tree traversal algorithms """ @@ -157,16 +155,16 @@ def level_order_actual(node: TreeNode) -> None: q: queue.Queue = queue.Queue() q.put(node) while not q.empty(): - list = [] + list_ = [] while not q.empty(): node_dequeued = q.get() print(node_dequeued.data, end=",") if node_dequeued.left: - list.append(node_dequeued.left) + list_.append(node_dequeued.left) if node_dequeued.right: - list.append(node_dequeued.right) + list_.append(node_dequeued.right) print() - for node in list: + for node in list_: q.put(node) From 3ec0aa85c0074d838d97dc030e582743586cd80e Mon Sep 17 00:00:00 2001 From: SparshRastogi <75373475+SparshRastogi@users.noreply.github.com> Date: Sun, 30 Oct 2022 02:54:59 +0530 Subject: [PATCH 0659/1543] Update kinetic_energy.py (#7848) Fixed a typo error in docstrings --- physics/kinetic_energy.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/physics/kinetic_energy.py b/physics/kinetic_energy.py index 8863919ac79f..77016e223c16 100644 --- a/physics/kinetic_energy.py +++ b/physics/kinetic_energy.py @@ -2,16 +2,16 @@ Find the kinetic energy of an object, given its mass and velocity. Description : In physics, the kinetic energy of an object is the energy that it -possesses due to its motion. It is defined as the work needed to accelerate a body of a -given mass from rest to its stated velocity. Having gained this energy during its -acceleration, the body maintains this kinetic energy unless its speed changes. The same +possesses due to its motion.It is defined as the work needed to accelerate a body of a +given mass from rest to its stated velocity.Having gained this energy during its +acceleration, the body maintains this kinetic energy unless its speed changes.The same amount of work is done by the body when decelerating from its current speed to a state -of rest. Formally, a kinetic energy is any term in a system's Lagrangian which includes +of rest.Formally, a kinetic energy is any term in a system's Lagrangian which includes a derivative with respect to time. In classical mechanics, the kinetic energy of a non-rotating object of mass m traveling -at a speed v is ½mv². In relativistic mechanics, this is a good approximation only when -v is much less than the speed of light. The standard unit of kinetic energy is the +at a speed v is ½mv².In relativistic mechanics, this is a good approximation only when +v is much less than the speed of light.The standard unit of kinetic energy is the joule, while the English unit of kinetic energy is the foot-pound. Reference : https://en.m.wikipedia.org/wiki/Kinetic_energy @@ -20,7 +20,7 @@ def kinetic_energy(mass: float, velocity: float) -> float: """ - Calculate kinetick energy. + Calculate kinetic energy. The kinetic energy of a non-rotating object of mass m traveling at a speed v is ½mv² From 7b7b3dd086eb3d8f6a82aa94b4398c0b95a7f186 Mon Sep 17 00:00:00 2001 From: Jason Devers <74424054+jdevers1@users.noreply.github.com> Date: Sun, 30 Oct 2022 01:20:07 -0400 Subject: [PATCH 0660/1543] matrix/count_paths.py (#7533) * added recursive dfs backtracking for count paths with doctests * fixed doc testing * added type hints * redefined r as row, c as col * fixed naming conventions, ran mypy, only tests that didn't pass were using List[], rathan list() * added another doctest, as well as a explanation above * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update matrix/count_paths.py Co-authored-by: Chris O <46587501+ChrisO345@users.noreply.github.com> * Update matrix/count_paths.py Co-authored-by: Chris O <46587501+ChrisO345@users.noreply.github.com> * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Apply suggestions from code review Co-authored-by: Caeden Perelli-Harris Co-authored-by: J Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Chris O <46587501+ChrisO345@users.noreply.github.com> Co-authored-by: Caeden Perelli-Harris --- matrix/count_paths.py | 75 +++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 75 insertions(+) create mode 100644 matrix/count_paths.py diff --git a/matrix/count_paths.py b/matrix/count_paths.py new file mode 100644 index 000000000000..4861ad5fd0aa --- /dev/null +++ b/matrix/count_paths.py @@ -0,0 +1,75 @@ +""" +Given a grid, where you start from the top left position [0, 0], +you want to find how many paths you can take to get to the bottom right position. + +start here -> 0 0 0 0 + 1 1 0 0 + 0 0 0 1 + 0 1 0 0 <- finish here +how many 'distinct' paths can you take to get to the finish? +Using a recursive depth-first search algorithm below, you are able to +find the number of distinct unique paths (count). + +'*' will demonstrate a path +In the example above, there are two distinct paths: +1. 2. + * * * 0 * * * * + 1 1 * 0 1 1 * * + 0 0 * 1 0 0 * 1 + 0 1 * * 0 1 * * +""" + + +def depth_first_search(grid: list[list[int]], row: int, col: int, visit: set) -> int: + """ + Recursive Backtracking Depth First Search Algorithm + + Starting from top left of a matrix, count the number of + paths that can reach the bottom right of a matrix. + 1 represents a block (inaccessible) + 0 represents a valid space (accessible) + + 0 0 0 0 + 1 1 0 0 + 0 0 0 1 + 0 1 0 0 + >>> grid = [[0, 0, 0, 0], [1, 1, 0, 0], [0, 0, 0, 1], [0, 1, 0, 0]] + >>> depth_first_search(grid, 0, 0, set()) + 2 + + 0 0 0 0 0 + 0 1 1 1 0 + 0 1 1 1 0 + 0 0 0 0 0 + >>> grid = [[0, 0, 0, 0, 0], [0, 1, 1, 1, 0], [0, 1, 1, 1, 0], [0, 0, 0, 0, 0]] + >>> depth_first_search(grid, 0, 0, set()) + 2 + """ + row_length, col_length = len(grid), len(grid[0]) + if ( + min(row, col) < 0 + or row == row_length + or col == col_length + or (row, col) in visit + or grid[row][col] == 1 + ): + return 0 + if row == row_length - 1 and col == col_length - 1: + return 1 + + visit.add((row, col)) + + count = 0 + count += depth_first_search(grid, row + 1, col, visit) + count += depth_first_search(grid, row - 1, col, visit) + count += depth_first_search(grid, row, col + 1, visit) + count += depth_first_search(grid, row, col - 1, visit) + + visit.remove((row, col)) + return count + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From 2d3985006f0c88e339a900caa4974493bc6fa861 Mon Sep 17 00:00:00 2001 From: Itssxxsalman <114142076+Itssxxsalman@users.noreply.github.com> Date: Sun, 30 Oct 2022 12:03:28 +0500 Subject: [PATCH 0661/1543] Fix grammatical mistakes in `simple_keyword_cypher.py` (#6385) * Fixed grammitical mistake * Update ciphers/simple_keyword_cypher.py Co-authored-by: Caeden Perelli-Harris Co-authored-by: Christian Clauss Co-authored-by: Caeden Perelli-Harris --- ciphers/simple_keyword_cypher.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ciphers/simple_keyword_cypher.py b/ciphers/simple_keyword_cypher.py index 447bacfc2e6c..1635471aebd1 100644 --- a/ciphers/simple_keyword_cypher.py +++ b/ciphers/simple_keyword_cypher.py @@ -21,7 +21,7 @@ def create_cipher_map(key: str) -> dict[str, str]: :param key: keyword to use :return: dictionary cipher map """ - # Create alphabet list + # Create a list of the letters in the alphabet alphabet = [chr(i + 65) for i in range(26)] # Remove duplicate characters from key key = remove_duplicates(key.upper()) From f340bde6e047d86171385b90a023ac01e8914d0c Mon Sep 17 00:00:00 2001 From: Caio Cordeiro Date: Sun, 30 Oct 2022 04:05:44 -0300 Subject: [PATCH 0662/1543] Add simple neural network (#6452) * feat: add simple foward propagation implementation * fix: add PR requested changes * feat: add code example * fix: solve pre-commit failure * feat: add doctest inside code execution * fix: PR requested changes * fix: pr requested changes Co-authored-by: Caio Cordeiro --- neural_network/simple_neural_network.py | 63 +++++++++++++++++++++++++ 1 file changed, 63 insertions(+) create mode 100644 neural_network/simple_neural_network.py diff --git a/neural_network/simple_neural_network.py b/neural_network/simple_neural_network.py new file mode 100644 index 000000000000..f2a3234873b5 --- /dev/null +++ b/neural_network/simple_neural_network.py @@ -0,0 +1,63 @@ +""" +Forward propagation explanation: +https://towardsdatascience.com/forward-propagation-in-neural-networks-simplified-math-and-code-version-bbcfef6f9250 +""" + +import math +import random + + +# Sigmoid +def sigmoid_function(value: float, deriv: bool = False) -> float: + """Return the sigmoid function of a float. + + >>> sigmoid_function(3.5) + 0.9706877692486436 + >>> sigmoid_function(3.5, True) + -8.75 + """ + if deriv: + return value * (1 - value) + return 1 / (1 + math.exp(-value)) + + +# Initial Value +INITIAL_VALUE = 0.02 + + +def forward_propagation(expected: int, number_propagations: int) -> float: + """Return the value found after the forward propagation training. + + >>> res = forward_propagation(32, 10000000) + >>> res > 31 and res < 33 + True + + >>> res = forward_propagation(32, 1000) + >>> res > 31 and res < 33 + False + """ + + # Random weight + weight = float(2 * (random.randint(1, 100)) - 1) + + for _ in range(number_propagations): + # Forward propagation + layer_1 = sigmoid_function(INITIAL_VALUE * weight) + # How much did we miss? + layer_1_error = (expected / 100) - layer_1 + # Error delta + layer_1_delta = layer_1_error * sigmoid_function(layer_1, True) + # Update weight + weight += INITIAL_VALUE * layer_1_delta + + return layer_1 * 100 + + +if __name__ == "__main__": + import doctest + + doctest.testmod() + + expected = int(input("Expected value: ")) + number_propagations = int(input("Number of propagations: ")) + print(forward_propagation(expected, number_propagations)) From 0c5f1c01302c8208251f61730ba74e078bfd0ac8 Mon Sep 17 00:00:00 2001 From: ok-open-sc <114725648+ok-open-sc@users.noreply.github.com> Date: Sun, 30 Oct 2022 03:11:17 -0400 Subject: [PATCH 0663/1543] Increased Readability Of Variables (#6400) * Increased Readability Of Variables * Update anagrams.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update anagrams.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Co-authored-by: Christian Clauss Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- strings/anagrams.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/strings/anagrams.py b/strings/anagrams.py index b671d3f3d531..fb9ac0bd1f45 100644 --- a/strings/anagrams.py +++ b/strings/anagrams.py @@ -26,15 +26,15 @@ def anagram(my_word: str) -> list[str]: >>> anagram('final') ['final'] """ - return word_bysig[signature(my_word)] + return word_by_signature[signature(my_word)] data: str = Path(__file__).parent.joinpath("words.txt").read_text(encoding="utf-8") word_list = sorted({word.strip().lower() for word in data.splitlines()}) -word_bysig = collections.defaultdict(list) +word_by_signature = collections.defaultdict(list) for word in word_list: - word_bysig[signature(word)].append(word) + word_by_signature[signature(word)].append(word) if __name__ == "__main__": all_anagrams = {word: anagram(word) for word in word_list if len(anagram(word)) > 1} From f87de60b6d1cd6e9ce412503f48727015f46ada2 Mon Sep 17 00:00:00 2001 From: lostybtw <58177990+lostybtw@users.noreply.github.com> Date: Sun, 30 Oct 2022 07:22:52 +0000 Subject: [PATCH 0664/1543] fizzbuzz complete (#6504) * fizzbuzz * Update dynamic_programming/fizz_buzz.py Co-authored-by: Caeden * Update dynamic_programming/fizz_buzz.py Co-authored-by: Caeden * Update dynamic_programming/fizz_buzz.py Co-authored-by: Caeden * Update dynamic_programming/fizz_buzz.py Co-authored-by: Caeden * Update dynamic_programming/fizz_buzz.py Co-authored-by: Caeden * added doctests and function to fizzbuzz * Update fizz_buzz.py * Update fizz_buzz.py * Fixed FizzBuzz * fizzbuzz passing test * Update dynamic_programming/fizz_buzz.py Co-authored-by: Caeden * Update dynamic_programming/fizz_buzz.py Co-authored-by: Caeden * Update dynamic_programming/fizz_buzz.py Co-authored-by: Caeden * Update dynamic_programming/fizz_buzz.py Co-authored-by: Caeden * Update fizz_buzz.py * Update fizz_buzz.py * Update fizz_buzz.py * fixed fizzbuzz * Add files via upload * added mechanical energy calculation * Delete mechanical_energy.py * Update fizz_buzz.py * Update dynamic_programming/fizz_buzz.py Co-authored-by: Christian Clauss * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update fizz_buzz.py Co-authored-by: Caeden Co-authored-by: Christian Clauss Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- dynamic_programming/fizz_buzz.py | 65 ++++++++++++++++++++++++++++++++ 1 file changed, 65 insertions(+) create mode 100644 dynamic_programming/fizz_buzz.py diff --git a/dynamic_programming/fizz_buzz.py b/dynamic_programming/fizz_buzz.py new file mode 100644 index 000000000000..dd1d21b1075e --- /dev/null +++ b/dynamic_programming/fizz_buzz.py @@ -0,0 +1,65 @@ +# https://en.wikipedia.org/wiki/Fizz_buzz#Programming + + +def fizz_buzz(number: int, iterations: int) -> str: + """ + Plays FizzBuzz. + Prints Fizz if number is a multiple of 3. + Prints Buzz if its a multiple of 5. + Prints FizzBuzz if its a multiple of both 3 and 5 or 15. + Else Prints The Number Itself. + >>> fizz_buzz(1,7) + '1 2 Fizz 4 Buzz Fizz 7 ' + >>> fizz_buzz(1,0) + Traceback (most recent call last): + ... + ValueError: Iterations must be done more than 0 times to play FizzBuzz + >>> fizz_buzz(-5,5) + Traceback (most recent call last): + ... + ValueError: starting number must be + and integer and be more than 0 + >>> fizz_buzz(10,-5) + Traceback (most recent call last): + ... + ValueError: Iterations must be done more than 0 times to play FizzBuzz + >>> fizz_buzz(1.5,5) + Traceback (most recent call last): + ... + ValueError: starting number must be + and integer and be more than 0 + >>> fizz_buzz(1,5.5) + Traceback (most recent call last): + ... + ValueError: iterations must be defined as integers + """ + + if not type(iterations) == int: + raise ValueError("iterations must be defined as integers") + if not type(number) == int or not number >= 1: + raise ValueError( + """starting number must be + and integer and be more than 0""" + ) + if not iterations >= 1: + raise ValueError("Iterations must be done more than 0 times to play FizzBuzz") + + out = "" + while number <= iterations: + if number % 3 == 0: + out += "Fizz" + if number % 5 == 0: + out += "Buzz" + if not number % 3 == 0 and not number % 5 == 0: + out += str(number) + + # print(out) + number += 1 + out += " " + return out + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From 17d93cab783095dd1def3c382866cd94296db455 Mon Sep 17 00:00:00 2001 From: Carlos Villar Date: Sun, 30 Oct 2022 10:00:47 +0100 Subject: [PATCH 0665/1543] Added Manhattan distance algorithm (#7790) * Added Manhattan distance algorithm, Fixes: #7776 * Forgot that isinstance can accept a tuple * Apply suggestions from code review * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update manhattan_distance.py * Update manhattan_distance.py Co-authored-by: Christian Clauss Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- maths/manhattan_distance.py | 126 ++++++++++++++++++++++++++++++++++++ 1 file changed, 126 insertions(+) create mode 100644 maths/manhattan_distance.py diff --git a/maths/manhattan_distance.py b/maths/manhattan_distance.py new file mode 100644 index 000000000000..2711d4c8ccd6 --- /dev/null +++ b/maths/manhattan_distance.py @@ -0,0 +1,126 @@ +def manhattan_distance(point_a: list, point_b: list) -> float: + """ + Expectts two list of numbers representing two points in the same + n-dimensional space + + https://en.wikipedia.org/wiki/Taxicab_geometry + + >>> manhattan_distance([1,1], [2,2]) + 2.0 + >>> manhattan_distance([1.5,1.5], [2,2]) + 1.0 + >>> manhattan_distance([1.5,1.5], [2.5,2]) + 1.5 + >>> manhattan_distance([-3, -3, -3], [0, 0, 0]) + 9.0 + >>> manhattan_distance([1,1], None) + Traceback (most recent call last): + ... + ValueError: Missing an input + >>> manhattan_distance([1,1], [2, 2, 2]) + Traceback (most recent call last): + ... + ValueError: Both points must be in the same n-dimensional space + >>> manhattan_distance([1,"one"], [2, 2, 2]) + Traceback (most recent call last): + ... + TypeError: Expected a list of numbers as input, found str + >>> manhattan_distance(1, [2, 2, 2]) + Traceback (most recent call last): + ... + TypeError: Expected a list of numbers as input, found int + >>> manhattan_distance([1,1], "not_a_list") + Traceback (most recent call last): + ... + TypeError: Expected a list of numbers as input, found str + """ + + _validate_point(point_a) + _validate_point(point_b) + if len(point_a) != len(point_b): + raise ValueError("Both points must be in the same n-dimensional space") + + return float(sum(abs(a - b) for a, b in zip(point_a, point_b))) + + +def _validate_point(point: list[float]) -> None: + """ + >>> _validate_point(None) + Traceback (most recent call last): + ... + ValueError: Missing an input + >>> _validate_point([1,"one"]) + Traceback (most recent call last): + ... + TypeError: Expected a list of numbers as input, found str + >>> _validate_point(1) + Traceback (most recent call last): + ... + TypeError: Expected a list of numbers as input, found int + >>> _validate_point("not_a_list") + Traceback (most recent call last): + ... + TypeError: Expected a list of numbers as input, found str + """ + if point: + if isinstance(point, list): + for item in point: + if not isinstance(item, (int, float)): + raise TypeError( + f"Expected a list of numbers as input, " + f"found {type(item).__name__}" + ) + else: + raise TypeError( + f"Expected a list of numbers as input, found {type(point).__name__}" + ) + else: + raise ValueError("Missing an input") + + +def manhattan_distance_one_liner(point_a: list, point_b: list) -> float: + """ + Version with one liner + + >>> manhattan_distance_one_liner([1,1], [2,2]) + 2.0 + >>> manhattan_distance_one_liner([1.5,1.5], [2,2]) + 1.0 + >>> manhattan_distance_one_liner([1.5,1.5], [2.5,2]) + 1.5 + >>> manhattan_distance_one_liner([-3, -3, -3], [0, 0, 0]) + 9.0 + >>> manhattan_distance_one_liner([1,1], None) + Traceback (most recent call last): + ... + ValueError: Missing an input + >>> manhattan_distance_one_liner([1,1], [2, 2, 2]) + Traceback (most recent call last): + ... + ValueError: Both points must be in the same n-dimensional space + >>> manhattan_distance_one_liner([1,"one"], [2, 2, 2]) + Traceback (most recent call last): + ... + TypeError: Expected a list of numbers as input, found str + >>> manhattan_distance_one_liner(1, [2, 2, 2]) + Traceback (most recent call last): + ... + TypeError: Expected a list of numbers as input, found int + >>> manhattan_distance_one_liner([1,1], "not_a_list") + Traceback (most recent call last): + ... + TypeError: Expected a list of numbers as input, found str + """ + + _validate_point(point_a) + _validate_point(point_b) + if len(point_a) != len(point_b): + raise ValueError("Both points must be in the same n-dimensional space") + + return float(sum(abs(x - y) for x, y in zip(point_a, point_b))) + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From 57ccabbaeb0f32165271e3a218bc9c6dcfc21823 Mon Sep 17 00:00:00 2001 From: Andrey Date: Sun, 30 Oct 2022 11:01:58 +0200 Subject: [PATCH 0666/1543] Update docs (#7867) * Update docs, remove unused excludes from pre-commit * updating DIRECTORY.md Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- .pre-commit-config.yaml | 4 ---- CONTRIBUTING.md | 2 +- DIRECTORY.md | 12 ++++++++++++ 3 files changed, 13 insertions(+), 5 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 56946f5f240f..004def5e4e8b 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -7,10 +7,6 @@ repos: - id: end-of-file-fixer types: [python] - id: trailing-whitespace - exclude: | - (?x)^( - data_structures/heap/binomial_heap.py - )$ - id: requirements-txt-fixer - repo: https://github.com/MarcoGorelli/auto-walrus diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 5cbb24e563da..37e020b8fd8a 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -66,7 +66,7 @@ pre-commit run --all-files --show-diff-on-failure We want your work to be readable by others; therefore, we encourage you to note the following: -- Please write in Python 3.9+. For instance: `print()` is a function in Python 3 so `print "Hello"` will *not* work but `print("Hello")` will. +- Please write in Python 3.10+. For instance: `print()` is a function in Python 3 so `print "Hello"` will *not* work but `print("Hello")` will. - Please focus hard on the naming of functions, classes, and variables. Help your reader by using __descriptive names__ that can help you to remove redundant comments. - Single letter variable names are *old school* so please avoid them unless their life only spans a few lines. - Expand acronyms because `gcd()` is hard to understand but `greatest_common_divisor()` is not. diff --git a/DIRECTORY.md b/DIRECTORY.md index 9ea8f3140f35..8ac9c3be713a 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -158,6 +158,8 @@ * [Weight Conversion](conversions/weight_conversion.py) ## Data Structures + * Arrays + * [Permutations](data_structures/arrays/permutations.py) * Binary Tree * [Avl Tree](data_structures/binary_tree/avl_tree.py) * [Basic Binary Tree](data_structures/binary_tree/basic_binary_tree.py) @@ -291,6 +293,7 @@ * [Factorial](dynamic_programming/factorial.py) * [Fast Fibonacci](dynamic_programming/fast_fibonacci.py) * [Fibonacci](dynamic_programming/fibonacci.py) + * [Fizz Buzz](dynamic_programming/fizz_buzz.py) * [Floyd Warshall](dynamic_programming/floyd_warshall.py) * [Integer Partition](dynamic_programming/integer_partition.py) * [Iterating Through Submasks](dynamic_programming/iterating_through_submasks.py) @@ -313,12 +316,16 @@ * [Rod Cutting](dynamic_programming/rod_cutting.py) * [Subset Generation](dynamic_programming/subset_generation.py) * [Sum Of Subset](dynamic_programming/sum_of_subset.py) + * [Viterbi](dynamic_programming/viterbi.py) ## Electronics * [Carrier Concentration](electronics/carrier_concentration.py) * [Coulombs Law](electronics/coulombs_law.py) + * [Electric Conductivity](electronics/electric_conductivity.py) * [Electric Power](electronics/electric_power.py) + * [Electrical Impedance](electronics/electrical_impedance.py) * [Ohms Law](electronics/ohms_law.py) + * [Resonant Frequency](electronics/resonant_frequency.py) ## File Transfer * [Receive File](file_transfer/receive_file.py) @@ -430,6 +437,7 @@ ## Knapsack * [Greedy Knapsack](knapsack/greedy_knapsack.py) * [Knapsack](knapsack/knapsack.py) + * [Recursive Approach Knapsack](knapsack/recursive_approach_knapsack.py) * Tests * [Test Greedy Knapsack](knapsack/tests/test_greedy_knapsack.py) * [Test Knapsack](knapsack/tests/test_knapsack.py) @@ -622,6 +630,7 @@ ## Matrix * [Binary Search Matrix](matrix/binary_search_matrix.py) * [Count Islands In Matrix](matrix/count_islands_in_matrix.py) + * [Count Paths](matrix/count_paths.py) * [Cramers Rule 2X2](matrix/cramers_rule_2x2.py) * [Inverse Of Matrix](matrix/inverse_of_matrix.py) * [Largest Square Area In Matrix](matrix/largest_square_area_in_matrix.py) @@ -645,6 +654,7 @@ * [Back Propagation Neural Network](neural_network/back_propagation_neural_network.py) * [Convolution Neural Network](neural_network/convolution_neural_network.py) * [Perceptron](neural_network/perceptron.py) + * [Simple Neural Network](neural_network/simple_neural_network.py) ## Other * [Activity Selection](other/activity_selection.py) @@ -680,6 +690,7 @@ * [Newtons Law Of Gravitation](physics/newtons_law_of_gravitation.py) * [Newtons Second Law Of Motion](physics/newtons_second_law_of_motion.py) * [Potential Energy](physics/potential_energy.py) + * [Sheer Stress](physics/sheer_stress.py) ## Project Euler * Problem 001 @@ -1089,6 +1100,7 @@ * [Reverse Words](strings/reverse_words.py) * [Snake Case To Camel Pascal Case](strings/snake_case_to_camel_pascal_case.py) * [Split](strings/split.py) + * [Text Justification](strings/text_justification.py) * [Upper](strings/upper.py) * [Wave](strings/wave.py) * [Wildcard Pattern Matching](strings/wildcard_pattern_matching.py) From 5ba5c548584f44bac0bc3c0cb4e95233560627cf Mon Sep 17 00:00:00 2001 From: Sushant Srivastav <63559772+sushant4191@users.noreply.github.com> Date: Sun, 30 Oct 2022 14:38:54 +0530 Subject: [PATCH 0667/1543] Updated info (#7866) * Updated info Updated the readme section for sorts. * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update sorts/README.md Co-authored-by: Caeden Perelli-Harris * Update README.md Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Caeden Perelli-Harris Co-authored-by: Christian Clauss --- sorts/README.md | 11 +++++++++++ 1 file changed, 11 insertions(+) create mode 100644 sorts/README.md diff --git a/sorts/README.md b/sorts/README.md new file mode 100644 index 000000000000..ceb0207c2be4 --- /dev/null +++ b/sorts/README.md @@ -0,0 +1,11 @@ +# Sorting Algorithms +Sorting is the process of putting data in a specific order. The way to arrange data in a specific order +is specified by the sorting algorithm. The most typical orders are lexical or numerical. The significance +of sorting lies in the fact that, if data is stored in a sorted manner, data searching can be highly optimised. +Another use for sorting is to represent data in a more readable manner. + +This section contains a lot of important algorithms that helps us to use sorting algorithms in various scenarios. +## References +* +* +* From 87a5d919761e9ccb05e19e68a5307348c6264cd0 Mon Sep 17 00:00:00 2001 From: Kevin Joven <59969678+KevinJoven11@users.noreply.github.com> Date: Sun, 30 Oct 2022 05:49:33 -0400 Subject: [PATCH 0668/1543] quantum_teleportation.py (#6632) * quantum_teleportation.py This code is for the #Hacktoberfest. This file run the quantum teleportation circuit using Qiskit. * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update quantum/quantum_teleportation.py Co-authored-by: Caeden * Update quantum/quantum_teleportation.py Co-authored-by: Caeden * Update Corrected some typos. Add more comments for adding the gates. Update the variable qc with quantum_circuit in the simulator and execute. * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * python return typehint solved. * Fix long line Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Caeden Co-authored-by: Christian Clauss --- quantum/quantum_teleportation.py | 70 ++++++++++++++++++++++++++++++++ 1 file changed, 70 insertions(+) create mode 100644 quantum/quantum_teleportation.py diff --git a/quantum/quantum_teleportation.py b/quantum/quantum_teleportation.py new file mode 100644 index 000000000000..5fbc57a66821 --- /dev/null +++ b/quantum/quantum_teleportation.py @@ -0,0 +1,70 @@ +#!/usr/bin/env python3 +""" +Build quantum teleportation circuit using three quantum bits +and 1 classical bit. The main idea is to send one qubit from +Alice to Bob using the entanglement properties. This experiment +run in IBM Q simulator with 1000 shots. +. +References: +https://en.wikipedia.org/wiki/Quantum_teleportation +https://qiskit.org/textbook/ch-algorithms/teleportation.html +""" + +import numpy as np +import qiskit +from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute + + +def quantum_teleportation( + theta: float = np.pi / 2, phi: float = np.pi / 2, lam: float = np.pi / 2 +) -> qiskit.result.counts.Counts: + + """ + # >>> quantum_teleportation() + #{'00': 500, '11': 500} # ideally + # ┌─────────────────┐ ┌───┐ + #qr_0: ┤ U(π/2,π/2,π/2) ├───────■──┤ H ├─■───────── + # └──────┬───┬──────┘ ┌─┴─┐└───┘ │ + #qr_1: ───────┤ H ├─────────■──┤ X ├──────┼───■───── + # └───┘ ┌─┴─┐└───┘ │ ┌─┴─┐┌─┐ + #qr_2: ───────────────────┤ X ├───────────■─┤ X ├┤M├ + # └───┘ └───┘└╥┘ + #cr: 1/═══════════════════════════════════════════╩═ + Args: + theta (float): Single qubit rotation U Gate theta parameter. Default to np.pi/2 + phi (float): Single qubit rotation U Gate phi parameter. Default to np.pi/2 + lam (float): Single qubit rotation U Gate lam parameter. Default to np.pi/2 + Returns: + qiskit.result.counts.Counts: Teleported qubit counts. + """ + + qr = QuantumRegister(3, "qr") # Define the number of quantum bits + cr = ClassicalRegister(1, "cr") # Define the number of classical bits + + quantum_circuit = QuantumCircuit(qr, cr) # Define the quantum circuit. + + # Build the circuit + quantum_circuit.u(theta, phi, lam, 0) # Quantum State to teleport + quantum_circuit.h(1) # add hadamard gate + quantum_circuit.cx( + 1, 2 + ) # add control gate with qubit 1 as control and 2 as target. + quantum_circuit.cx(0, 1) + quantum_circuit.h(0) + quantum_circuit.cz(0, 2) # add control z gate. + quantum_circuit.cx(1, 2) + + quantum_circuit.measure([2], [0]) # measure the qubit. + + # Simulate the circuit using qasm simulator + backend = Aer.get_backend("qasm_simulator") + job = execute(quantum_circuit, backend, shots=1000) + + return job.result().get_counts(quantum_circuit) + + +if __name__ == "__main__": + print( + "Total count for teleported state is: " + f"{quantum_teleportation(np.pi/2, np.pi/2, np.pi/2)}" + ) From 00dfad9d20abf755a91abc0ba35f5d92fcab9149 Mon Sep 17 00:00:00 2001 From: giladwo <25708271+giladwo@users.noreply.github.com> Date: Sun, 30 Oct 2022 11:59:10 +0200 Subject: [PATCH 0669/1543] Simplify climbing stairs and use constant memory (#6628) * Simplify climbing stairs and use constant memory * number_of_steps Co-authored-by: Christian Clauss --- dynamic_programming/climbing_stairs.py | 29 +++++++++++++------------- 1 file changed, 14 insertions(+), 15 deletions(-) diff --git a/dynamic_programming/climbing_stairs.py b/dynamic_programming/climbing_stairs.py index 048d57aed1be..d6273d025f08 100644 --- a/dynamic_programming/climbing_stairs.py +++ b/dynamic_programming/climbing_stairs.py @@ -1,20 +1,20 @@ #!/usr/bin/env python3 -def climb_stairs(n: int) -> int: +def climb_stairs(number_of_steps: int) -> int: """ LeetCdoe No.70: Climbing Stairs - Distinct ways to climb a n step staircase where - each time you can either climb 1 or 2 steps. + Distinct ways to climb a number_of_steps staircase where each time you can either + climb 1 or 2 steps. Args: - n: number of steps of staircase + number_of_steps: number of steps on the staircase Returns: - Distinct ways to climb a n step staircase + Distinct ways to climb a number_of_steps staircase Raises: - AssertionError: n not positive integer + AssertionError: number_of_steps not positive integer >>> climb_stairs(3) 3 @@ -23,18 +23,17 @@ def climb_stairs(n: int) -> int: >>> climb_stairs(-7) # doctest: +ELLIPSIS Traceback (most recent call last): ... - AssertionError: n needs to be positive integer, your input -7 + AssertionError: number_of_steps needs to be positive integer, your input -7 """ assert ( - isinstance(n, int) and n > 0 - ), f"n needs to be positive integer, your input {n}" - if n == 1: + isinstance(number_of_steps, int) and number_of_steps > 0 + ), f"number_of_steps needs to be positive integer, your input {number_of_steps}" + if number_of_steps == 1: return 1 - dp = [0] * (n + 1) - dp[0], dp[1] = (1, 1) - for i in range(2, n + 1): - dp[i] = dp[i - 1] + dp[i - 2] - return dp[n] + previous, current = 1, 1 + for _ in range(number_of_steps - 1): + current, previous = current + previous, current + return current if __name__ == "__main__": From 84facb78b20be6a9a90307c79e318c65a04987ac Mon Sep 17 00:00:00 2001 From: Saksham1970 <45041294+Saksham1970@users.noreply.github.com> Date: Sun, 30 Oct 2022 15:40:16 +0530 Subject: [PATCH 0670/1543] Project Euler: 092 decreased the time (#6627) * Added explanation and increased speed of the solution of problem 092 * updating DIRECTORY.md * Added temporary fix to the failing of problem 104 * Reduced few seconds by minor improvements * Update sol.py Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> Co-authored-by: Christian Clauss --- project_euler/problem_092/sol1.py | 42 +++++++++++++++++++++++-------- 1 file changed, 32 insertions(+), 10 deletions(-) diff --git a/project_euler/problem_092/sol1.py b/project_euler/problem_092/sol1.py index d326fc33fcca..33a6c06946f7 100644 --- a/project_euler/problem_092/sol1.py +++ b/project_euler/problem_092/sol1.py @@ -11,11 +11,11 @@ How many starting numbers below ten million will arrive at 89? """ - -DIGITS_SQUARED = [digit**2 for digit in range(10)] +DIGITS_SQUARED = [sum(int(c, 10) ** 2 for c in i.__str__()) for i in range(100000)] def next_number(number: int) -> int: + """ Returns the next number of the chain by adding the square of each digit to form a new number. @@ -28,15 +28,29 @@ def next_number(number: int) -> int: >>> next_number(32) 13 """ + sum_of_digits_squared = 0 while number: - sum_of_digits_squared += DIGITS_SQUARED[number % 10] - number //= 10 + + # Increased Speed Slightly by checking every 5 digits together. + sum_of_digits_squared += DIGITS_SQUARED[number % 100000] + number //= 100000 return sum_of_digits_squared -CHAINS = {1: True, 58: False} +# There are 2 Chains made, +# One ends with 89 with the chain member 58 being the one which when declared first, +# there will be the least number of iterations for all the members to be checked. + +# The other one ends with 1 and has only one element 1. + +# So 58 and 1 are chosen to be declared at the starting. + +# Changed dictionary to an array to quicken the solution +CHAINS: list[bool | None] = [None] * 10000000 +CHAINS[0] = True +CHAINS[57] = False def chain(number: int) -> bool: @@ -54,11 +68,16 @@ def chain(number: int) -> bool: >>> chain(1) True """ - if number in CHAINS: - return CHAINS[number] + + if CHAINS[number - 1] is not None: + return CHAINS[number - 1] # type: ignore number_chain = chain(next_number(number)) - CHAINS[number] = number_chain + CHAINS[number - 1] = number_chain + + while number < 10000000: + CHAINS[number - 1] = number_chain + number *= 10 return number_chain @@ -74,12 +93,15 @@ def solution(number: int = 10000000) -> int: >>> solution(10000000) 8581146 """ - return sum(1 for i in range(1, number) if not chain(i)) + for i in range(1, number): + if CHAINS[i] is None: + chain(i + 1) + + return CHAINS[:number].count(False) if __name__ == "__main__": import doctest doctest.testmod() - print(f"{solution() = }") From 48a73a28d477a1b634479001bc04e0886b265bfb Mon Sep 17 00:00:00 2001 From: Caeden Perelli-Harris Date: Sun, 30 Oct 2022 10:11:29 +0000 Subject: [PATCH 0671/1543] fix(quantum): Correct simulator deprecation (#7869) --- quantum/quantum_teleportation.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/quantum/quantum_teleportation.py b/quantum/quantum_teleportation.py index 5fbc57a66821..d04b44d15a05 100644 --- a/quantum/quantum_teleportation.py +++ b/quantum/quantum_teleportation.py @@ -57,7 +57,7 @@ def quantum_teleportation( quantum_circuit.measure([2], [0]) # measure the qubit. # Simulate the circuit using qasm simulator - backend = Aer.get_backend("qasm_simulator") + backend = Aer.get_backend("aer_simulator") job = execute(quantum_circuit, backend, shots=1000) return job.result().get_counts(quantum_circuit) From ba576a9a0b0a41405cfa11606c39908a1bc2b01b Mon Sep 17 00:00:00 2001 From: Devesh Swarnkar <71492529+devesh-0419@users.noreply.github.com> Date: Sun, 30 Oct 2022 15:44:02 +0530 Subject: [PATCH 0672/1543] Create README.md (#6642) for blockchain file --- blockchain/README.md | 8 ++++++++ 1 file changed, 8 insertions(+) create mode 100644 blockchain/README.md diff --git a/blockchain/README.md b/blockchain/README.md new file mode 100644 index 000000000000..5ae7f95ec981 --- /dev/null +++ b/blockchain/README.md @@ -0,0 +1,8 @@ +# Blockchain + +A Blockchain is a type of distributed ledger technology (DLT) that consists of growing list of records, called blocks, that are securely linked together using cryptography. + +* +* +* +* From ca923389c0330b6b7afc935bdd7fa9a15d377079 Mon Sep 17 00:00:00 2001 From: Si Lam Date: Sun, 30 Oct 2022 05:25:51 -0500 Subject: [PATCH 0673/1543] Description of Double hasing (#6467) * Description of DOuble hasing * Fix sheebang * Update double_hash.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update double_hash.py Co-authored-by: Christian Clauss Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- data_structures/hashing/double_hash.py | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/data_structures/hashing/double_hash.py b/data_structures/hashing/double_hash.py index bd1355fca65d..453e0d13106d 100644 --- a/data_structures/hashing/double_hash.py +++ b/data_structures/hashing/double_hash.py @@ -1,4 +1,16 @@ #!/usr/bin/env python3 +""" +Double hashing is a collision resolving technique in Open Addressed Hash tables. +Double hashing uses the idea of applying a second hash function to key when a collision +occurs. The advantage of Double hashing is that it is one of the best form of probing, +producing a uniform distribution of records throughout a hash table. This technique +does not yield any clusters. It is one of effective method for resolving collisions. + +Double hashing can be done using: (hash1(key) + i * hash2(key)) % TABLE_SIZE +Where hash1() and hash2() are hash functions and TABLE_SIZE is size of hash table. + +Reference: https://en.wikipedia.org/wiki/Double_hashing +""" from .hash_table import HashTable from .number_theory.prime_numbers import is_prime, next_prime From c0b0b128b7ad4a5a75ed866bc7c114c3cf7a89ef Mon Sep 17 00:00:00 2001 From: Kavienan J <45987371+kavienanj@users.noreply.github.com> Date: Sun, 30 Oct 2022 15:56:46 +0530 Subject: [PATCH 0674/1543] Add Ideal Gas Law for physics (#6503) * add physics ideal gas law * run pre commit * Update physics/ideal_gas_law.py Suggestion #1 Co-authored-by: Caeden * Update physics/ideal_gas_law.py Suggestion #2 Co-authored-by: Caeden * run pre commit * Update volume return line sugesstion Co-authored-by: Caeden Perelli-Harris * Add suggestions * Apply suggestions from code review Co-authored-by: Caeden Perelli-Harris Co-authored-by: Caeden Co-authored-by: Christian Clauss --- physics/ideal_gas_law.py | 59 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 59 insertions(+) create mode 100644 physics/ideal_gas_law.py diff --git a/physics/ideal_gas_law.py b/physics/ideal_gas_law.py new file mode 100644 index 000000000000..805da47b0079 --- /dev/null +++ b/physics/ideal_gas_law.py @@ -0,0 +1,59 @@ +""" +The ideal gas law, also called the general gas equation, is the +equation of state of a hypothetical ideal gas. It is a good approximation +of the behavior of many gases under many conditions, although it has +several limitations. It was first stated by Benoît Paul Émile Clapeyron +in 1834 as a combination of the empirical Boyle's law, Charles's law, +Avogadro's law, and Gay-Lussac's law.[1] The ideal gas law is often written +in an empirical form: + ------------ + | PV = nRT | + ------------ +P = Pressure (Pa) +V = Volume (m^3) +n = Amount of substance (mol) +R = Universal gas constant +T = Absolute temperature (Kelvin) + +(Description adapted from https://en.wikipedia.org/wiki/Ideal_gas_law ) +""" + +UNIVERSAL_GAS_CONSTANT = 8.314462 # Unit - J mol-1 K-1 + + +def pressure_of_gas_system(moles: float, kelvin: float, volume: float) -> float: + """ + >>> pressure_of_gas_system(2, 100, 5) + 332.57848 + >>> pressure_of_gas_system(0.5, 273, 0.004) + 283731.01575 + >>> pressure_of_gas_system(3, -0.46, 23.5) + Traceback (most recent call last): + ... + ValueError: Invalid inputs. Enter positive value. + """ + if moles < 0 or kelvin < 0 or volume < 0: + raise ValueError("Invalid inputs. Enter positive value.") + return moles * kelvin * UNIVERSAL_GAS_CONSTANT / volume + + +def volume_of_gas_system(moles: float, kelvin: float, pressure: float) -> float: + """ + >>> volume_of_gas_system(2, 100, 5) + 332.57848 + >>> volume_of_gas_system(0.5, 273, 0.004) + 283731.01575 + >>> volume_of_gas_system(3, -0.46, 23.5) + Traceback (most recent call last): + ... + ValueError: Invalid inputs. Enter positive value. + """ + if moles < 0 or kelvin < 0 or pressure < 0: + raise ValueError("Invalid inputs. Enter positive value.") + return moles * kelvin * UNIVERSAL_GAS_CONSTANT / pressure + + +if __name__ == "__main__": + from doctest import testmod + + testmod() From b32903d22f3a0fc8985a3dd1e4c4645f12b9f961 Mon Sep 17 00:00:00 2001 From: Kavienan J <45987371+kavienanj@users.noreply.github.com> Date: Sun, 30 Oct 2022 15:59:00 +0530 Subject: [PATCH 0675/1543] Add root mean square speed of gas molecules to physics (#6569) * add rms speed of molecule to physics * Update physics/rms_speed_of_molecule.py Co-authored-by: Caeden Perelli-Harris Co-authored-by: Christian Clauss Co-authored-by: Caeden Perelli-Harris --- physics/rms_speed_of_molecule.py | 52 ++++++++++++++++++++++++++++++++ 1 file changed, 52 insertions(+) create mode 100644 physics/rms_speed_of_molecule.py diff --git a/physics/rms_speed_of_molecule.py b/physics/rms_speed_of_molecule.py new file mode 100644 index 000000000000..478cee01c7fd --- /dev/null +++ b/physics/rms_speed_of_molecule.py @@ -0,0 +1,52 @@ +""" +The root-mean-square speed is essential in measuring the average speed of particles +contained in a gas, defined as, + ----------------- + | Vrms = √3RT/M | + ----------------- + +In Kinetic Molecular Theory, gasified particles are in a condition of constant random +motion; each particle moves at a completely different pace, perpetually clashing and +changing directions consistently velocity is used to describe the movement of gas +particles, thereby taking into account both speed and direction. Although the velocity +of gaseous particles is constantly changing, the distribution of velocities does not +change. +We cannot gauge the velocity of every individual particle, thus we frequently reason +in terms of the particles average behavior. Particles moving in opposite directions +have velocities of opposite signs. Since gas particles are in random motion, it's +plausible that there'll be about as several moving in one direction as within the other +way, which means that the average velocity for a collection of gas particles equals +zero; as this value is unhelpful, the average of velocities can be determined using an +alternative method. +""" + + +UNIVERSAL_GAS_CONSTANT = 8.3144598 + + +def rms_speed_of_molecule(temperature: float, molar_mass: float) -> float: + """ + >>> rms_speed_of_molecule(100, 2) + 35.315279554323226 + >>> rms_speed_of_molecule(273, 12) + 23.821458421977443 + """ + if temperature < 0: + raise Exception("Temperature cannot be less than 0 K") + if molar_mass <= 0: + raise Exception("Molar mass cannot be less than or equal to 0 kg/mol") + else: + return (3 * UNIVERSAL_GAS_CONSTANT * temperature / molar_mass) ** 0.5 + + +if __name__ == "__main__": + import doctest + + # run doctest + doctest.testmod() + + # example + temperature = 300 + molar_mass = 28 + vrms = rms_speed_of_molecule(temperature, molar_mass) + print(f"Vrms of Nitrogen gas at 300 K is {vrms} m/s") From fcfe35c3d8ed15037c0f20e3ee2268eea840b1ff Mon Sep 17 00:00:00 2001 From: samyakpagariya <72349392+samyakpagariya@users.noreply.github.com> Date: Sun, 30 Oct 2022 16:13:41 +0530 Subject: [PATCH 0676/1543] For the better understanding of time taken. (#6583) * For the better understanding of time taken. In this change I have initialized a variable p with the value of (1e9+7) and then took the modulus of process time with it . This modification gives better time taken by the process . Firstly it was giving answer in the exponential now it gives in the integer form. * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Apply suggestions from code review Co-authored-by: Caeden Perelli-Harris Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss Co-authored-by: Caeden Perelli-Harris --- sorts/bubble_sort.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sorts/bubble_sort.py b/sorts/bubble_sort.py index d4f0d25ca77c..aef2da272bd0 100644 --- a/sorts/bubble_sort.py +++ b/sorts/bubble_sort.py @@ -49,4 +49,4 @@ def bubble_sort(collection): unsorted = [int(item) for item in user_input.split(",")] start = time.process_time() print(*bubble_sort(unsorted), sep=",") - print(f"Processing time: {time.process_time() - start}") + print(f"Processing time: {(time.process_time() - start)%1e9 + 7}") From 00fc53de9709648b495ecf707549d6068592fb76 Mon Sep 17 00:00:00 2001 From: happiestbee <87628038+happiestbee@users.noreply.github.com> Date: Sun, 30 Oct 2022 06:49:05 -0400 Subject: [PATCH 0677/1543] added sumset.py Fixes: #{6563} (#6742) * Create sumset.py * updating DIRECTORY.md * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Add descriptive var names * Update maths/sumset.py Co-authored-by: Caeden * Update sumset.py * updating DIRECTORY.md Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Caeden Co-authored-by: Christian Clauss --- DIRECTORY.md | 5 +++++ maths/sumset.py | 37 +++++++++++++++++++++++++++++++++++++ 2 files changed, 42 insertions(+) create mode 100644 maths/sumset.py diff --git a/DIRECTORY.md b/DIRECTORY.md index 8ac9c3be713a..38fd1d656488 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -560,6 +560,7 @@ * [Lucas Lehmer Primality Test](maths/lucas_lehmer_primality_test.py) * [Lucas Series](maths/lucas_series.py) * [Maclaurin Series](maths/maclaurin_series.py) + * [Manhattan Distance](maths/manhattan_distance.py) * [Matrix Exponentiation](maths/matrix_exponentiation.py) * [Max Sum Sliding Window](maths/max_sum_sliding_window.py) * [Median Of Two Arrays](maths/median_of_two_arrays.py) @@ -616,6 +617,7 @@ * [Sum Of Digits](maths/sum_of_digits.py) * [Sum Of Geometric Progression](maths/sum_of_geometric_progression.py) * [Sum Of Harmonic Series](maths/sum_of_harmonic_series.py) + * [Sumset](maths/sumset.py) * [Sylvester Sequence](maths/sylvester_sequence.py) * [Test Prime Check](maths/test_prime_check.py) * [Trapezoidal Rule](maths/trapezoidal_rule.py) @@ -683,6 +685,7 @@ * [Casimir Effect](physics/casimir_effect.py) * [Centripetal Force](physics/centripetal_force.py) * [Horizontal Projectile Motion](physics/horizontal_projectile_motion.py) + * [Ideal Gas Law](physics/ideal_gas_law.py) * [Kinetic Energy](physics/kinetic_energy.py) * [Lorentz Transformation Four Vector](physics/lorentz_transformation_four_vector.py) * [Malus Law](physics/malus_law.py) @@ -690,6 +693,7 @@ * [Newtons Law Of Gravitation](physics/newtons_law_of_gravitation.py) * [Newtons Second Law Of Motion](physics/newtons_second_law_of_motion.py) * [Potential Energy](physics/potential_energy.py) + * [Rms Speed Of Molecule](physics/rms_speed_of_molecule.py) * [Sheer Stress](physics/sheer_stress.py) ## Project Euler @@ -978,6 +982,7 @@ * [Not Gate](quantum/not_gate.py) * [Q Full Adder](quantum/q_full_adder.py) * [Quantum Entanglement](quantum/quantum_entanglement.py) + * [Quantum Teleportation](quantum/quantum_teleportation.py) * [Ripple Adder Classic](quantum/ripple_adder_classic.py) * [Single Qubit Measure](quantum/single_qubit_measure.py) * [Superdense Coding](quantum/superdense_coding.py) diff --git a/maths/sumset.py b/maths/sumset.py new file mode 100644 index 000000000000..fa18f9e24b4c --- /dev/null +++ b/maths/sumset.py @@ -0,0 +1,37 @@ +""" + +Calculates the SumSet of two sets of numbers (A and B) + +Source: + https://en.wikipedia.org/wiki/Sumset + +""" + + +def sumset(set_a: set, set_b: set) -> set: + """ + :param first set: a set of numbers + :param second set: a set of numbers + :return: the nth number in Sylvester's sequence + + >>> sumset({1, 2, 3}, {4, 5, 6}) + {5, 6, 7, 8, 9} + + >>> sumset({1, 2, 3}, {4, 5, 6, 7}) + {5, 6, 7, 8, 9, 10} + + >>> sumset({1, 2, 3, 4}, 3) + Traceback (most recent call last): + ... + AssertionError: The input value of [set_b=3] is not a set + """ + assert isinstance(set_a, set), f"The input value of [set_a={set_a}] is not a set" + assert isinstance(set_b, set), f"The input value of [set_b={set_b}] is not a set" + + return {a + b for a in set_a for b in set_b} + + +if __name__ == "__main__": + from doctest import testmod + + testmod() From 6b6d8cc1110b16b38c7e6aafe91cb6f9583669ae Mon Sep 17 00:00:00 2001 From: Micael Pereira <8707982+micaelalex@users.noreply.github.com> Date: Sun, 30 Oct 2022 10:49:22 +0000 Subject: [PATCH 0678/1543] Adding ELFHash Algorithm (#6731) * Adding ELFHash Algorithm Adding a new Hash Algorithm. * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update elf.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update elf.py * Update elf.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update elf.py * Apply suggestions from code review Co-authored-by: Caeden Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Caeden --- hashes/elf.py | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) create mode 100644 hashes/elf.py diff --git a/hashes/elf.py b/hashes/elf.py new file mode 100644 index 000000000000..87fe339da44d --- /dev/null +++ b/hashes/elf.py @@ -0,0 +1,23 @@ +def elf_hash(data: str) -> int: + """ + Implementation of ElfHash Algorithm, a variant of PJW hash function. + + Returns: + [int] -- [32 bit binary int] + >>> elf_hash('lorem ipsum') + 253956621 + """ + hash = x = 0 + for letter in data: + hash = (hash << 4) + ord(letter) + x = hash & 0xF0000000 + if x != 0: + hash ^= x >> 24 + hash &= ~x + return hash + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From cc423007800b8707ea87353be808a90bef13ba18 Mon Sep 17 00:00:00 2001 From: Pravin Date: Sun, 30 Oct 2022 16:20:08 +0530 Subject: [PATCH 0679/1543] Added Readme file to document the hashing algorithm. (#6743) * Added Readme file to document the hashing algorithm. * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- hashes/README.md | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) create mode 100644 hashes/README.md diff --git a/hashes/README.md b/hashes/README.md new file mode 100644 index 000000000000..6df9a2fb6360 --- /dev/null +++ b/hashes/README.md @@ -0,0 +1,17 @@ +# Hashes +Hashing is the process of mapping any amount of data to a specified size using an algorithm. This is known as a hash value (or, if you're feeling fancy, a hash code, hash sums, or even a hash digest). Hashing is a one-way function, whereas encryption is a two-way function. While it is functionally conceivable to reverse-hash stuff, the required computing power makes it impractical. Hashing is a one-way street. +Unlike encryption, which is intended to protect data in transit, hashing is intended to authenticate that a file or piece of data has not been altered—that it is authentic. In other words, it functions as a checksum. + +## Common hashing algorithms +### MD5 +This is one of the first algorithms that has gained widespread acceptance. MD5 is hashing algorithm made by Ray Rivest that is known to suffer vulnerabilities. It was created in 1992 as the successor to MD4. Currently MD6 is in the works, but as of 2009 Rivest had removed it from NIST consideration for SHA-3. + +### SHA +SHA stands for Security Hashing Algorithm and it’s probably best known as the hashing algorithm used in most SSL/TLS cipher suites. A cipher suite is a collection of ciphers and algorithms that are used for SSL/TLS connections. SHA handles the hashing aspects. SHA-1, as we mentioned earlier, is now deprecated. SHA-2 is now mandatory. SHA-2 is sometimes known has SHA-256, though variants with longer bit lengths are also available. + +### SHA256 +SHA 256 is a member of the SHA 2 algorithm family, under which SHA stands for Secure Hash Algorithm. It was a collaborative effort between both the NSA and NIST to implement a successor to the SHA 1 family, which was beginning to lose potency against brute force attacks. It was published in 2001. +The importance of the 256 in the name refers to the final hash digest value, i.e. the hash value will remain 256 bits regardless of the size of the plaintext/cleartext. Other algorithms in the SHA family are similar to SHA 256 in some ways. + +### Luhn +The Luhn algorithm, also renowned as the modulus 10 or mod 10 algorithm, is a straightforward checksum formula used to validate a wide range of identification numbers, including credit card numbers, IMEI numbers, and Canadian Social Insurance Numbers. A community of mathematicians developed the LUHN formula in the late 1960s. Companies offering credit cards quickly followed suit. Since the algorithm is in the public interest, anyone can use it. The algorithm is used by most credit cards and many government identification numbers as a simple method of differentiating valid figures from mistyped or otherwise incorrect numbers. It was created to guard against unintentional errors, not malicious attacks. \ No newline at end of file From b5d7f186f4c93e0a00635e9efabe33971b161fc6 Mon Sep 17 00:00:00 2001 From: Emmanuel Bauma Murairi <40155399+Emmastro@users.noreply.github.com> Date: Sun, 30 Oct 2022 14:52:50 +0400 Subject: [PATCH 0680/1543] Polynomial (#6745) * implement function to handle polynomial operations * edit documentation * fix type hint and linter errors * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fix short variable name * fix spelling Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- maths/polynomials/__init__.py | 0 .../single_indeterminate_operations.py | 188 ++++++++++++++++++ 2 files changed, 188 insertions(+) create mode 100644 maths/polynomials/__init__.py create mode 100644 maths/polynomials/single_indeterminate_operations.py diff --git a/maths/polynomials/__init__.py b/maths/polynomials/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/maths/polynomials/single_indeterminate_operations.py b/maths/polynomials/single_indeterminate_operations.py new file mode 100644 index 000000000000..8bafdb591793 --- /dev/null +++ b/maths/polynomials/single_indeterminate_operations.py @@ -0,0 +1,188 @@ +""" + +This module implements a single indeterminate polynomials class +with some basic operations + +Reference: https://en.wikipedia.org/wiki/Polynomial + +""" + +from __future__ import annotations + +from collections.abc import MutableSequence + + +class Polynomial: + def __init__(self, degree: int, coefficients: MutableSequence[float]) -> None: + """ + The coefficients should be in order of degree, from smallest to largest. + >>> p = Polynomial(2, [1, 2, 3]) + >>> p = Polynomial(2, [1, 2, 3, 4]) + Traceback (most recent call last): + ... + ValueError: The number of coefficients should be equal to the degree + 1. + + """ + if len(coefficients) != degree + 1: + raise ValueError( + "The number of coefficients should be equal to the degree + 1." + ) + + self.coefficients: list[float] = list(coefficients) + self.degree = degree + + def __add__(self, polynomial_2: Polynomial) -> Polynomial: + """ + Polynomial addition + >>> p = Polynomial(2, [1, 2, 3]) + >>> q = Polynomial(2, [1, 2, 3]) + >>> p + q + 6x^2 + 4x + 2 + """ + + if self.degree > polynomial_2.degree: + coefficients = self.coefficients[:] + for i in range(polynomial_2.degree + 1): + coefficients[i] += polynomial_2.coefficients[i] + return Polynomial(self.degree, coefficients) + else: + coefficients = polynomial_2.coefficients[:] + for i in range(self.degree + 1): + coefficients[i] += self.coefficients[i] + return Polynomial(polynomial_2.degree, coefficients) + + def __sub__(self, polynomial_2: Polynomial) -> Polynomial: + """ + Polynomial subtraction + >>> p = Polynomial(2, [1, 2, 4]) + >>> q = Polynomial(2, [1, 2, 3]) + >>> p - q + 1x^2 + """ + return self + polynomial_2 * Polynomial(0, [-1]) + + def __neg__(self) -> Polynomial: + """ + Polynomial negation + >>> p = Polynomial(2, [1, 2, 3]) + >>> -p + - 3x^2 - 2x - 1 + """ + return Polynomial(self.degree, [-c for c in self.coefficients]) + + def __mul__(self, polynomial_2: Polynomial) -> Polynomial: + """ + Polynomial multiplication + >>> p = Polynomial(2, [1, 2, 3]) + >>> q = Polynomial(2, [1, 2, 3]) + >>> p * q + 9x^4 + 12x^3 + 10x^2 + 4x + 1 + """ + coefficients: list[float] = [0] * (self.degree + polynomial_2.degree + 1) + for i in range(self.degree + 1): + for j in range(polynomial_2.degree + 1): + coefficients[i + j] += ( + self.coefficients[i] * polynomial_2.coefficients[j] + ) + + return Polynomial(self.degree + polynomial_2.degree, coefficients) + + def evaluate(self, substitution: int | float) -> int | float: + """ + Evaluates the polynomial at x. + >>> p = Polynomial(2, [1, 2, 3]) + >>> p.evaluate(2) + 17 + """ + result: int | float = 0 + for i in range(self.degree + 1): + result += self.coefficients[i] * (substitution**i) + return result + + def __str__(self) -> str: + """ + >>> p = Polynomial(2, [1, 2, 3]) + >>> print(p) + 3x^2 + 2x + 1 + """ + polynomial = "" + for i in range(self.degree, -1, -1): + if self.coefficients[i] == 0: + continue + elif self.coefficients[i] > 0: + if polynomial: + polynomial += " + " + else: + polynomial += " - " + + if i == 0: + polynomial += str(abs(self.coefficients[i])) + elif i == 1: + polynomial += str(abs(self.coefficients[i])) + "x" + else: + polynomial += str(abs(self.coefficients[i])) + "x^" + str(i) + + return polynomial + + def __repr__(self) -> str: + """ + >>> p = Polynomial(2, [1, 2, 3]) + >>> p + 3x^2 + 2x + 1 + """ + return self.__str__() + + def derivative(self) -> Polynomial: + """ + Returns the derivative of the polynomial. + >>> p = Polynomial(2, [1, 2, 3]) + >>> p.derivative() + 6x + 2 + """ + coefficients: list[float] = [0] * self.degree + for i in range(self.degree): + coefficients[i] = self.coefficients[i + 1] * (i + 1) + return Polynomial(self.degree - 1, coefficients) + + def integral(self, constant: int | float = 0) -> Polynomial: + """ + Returns the integral of the polynomial. + >>> p = Polynomial(2, [1, 2, 3]) + >>> p.integral() + 1.0x^3 + 1.0x^2 + 1.0x + """ + coefficients: list[float] = [0] * (self.degree + 2) + coefficients[0] = constant + for i in range(self.degree + 1): + coefficients[i + 1] = self.coefficients[i] / (i + 1) + return Polynomial(self.degree + 1, coefficients) + + def __eq__(self, polynomial_2: object) -> bool: + """ + Checks if two polynomials are equal. + >>> p = Polynomial(2, [1, 2, 3]) + >>> q = Polynomial(2, [1, 2, 3]) + >>> p == q + True + """ + if not isinstance(polynomial_2, Polynomial): + return False + + if self.degree != polynomial_2.degree: + return False + + for i in range(self.degree + 1): + if self.coefficients[i] != polynomial_2.coefficients[i]: + return False + + return True + + def __ne__(self, polynomial_2: object) -> bool: + """ + Checks if two polynomials are not equal. + >>> p = Polynomial(2, [1, 2, 3]) + >>> q = Polynomial(2, [1, 2, 3]) + >>> p != q + False + """ + return not self.__eq__(polynomial_2) From 9278d0c6cdaa30115dbfef510e31a805bd3027dd Mon Sep 17 00:00:00 2001 From: Dima I <79413560+DIvkov575@users.noreply.github.com> Date: Sun, 30 Oct 2022 06:54:23 -0400 Subject: [PATCH 0681/1543] Added archimedes principle (physics) (#7143) * Added archimedes principle (physics) * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * reformated * reformatted archimedes principles Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- physics/archimedes_principle.py | 49 +++++++++++++++++++++++++++++++++ 1 file changed, 49 insertions(+) create mode 100644 physics/archimedes_principle.py diff --git a/physics/archimedes_principle.py b/physics/archimedes_principle.py new file mode 100644 index 000000000000..6ecfc65e7461 --- /dev/null +++ b/physics/archimedes_principle.py @@ -0,0 +1,49 @@ +""" +Calculates buoyant force on object submerged within static fluid. +Discovered by greek mathematician, Archimedes. The principle is named after him. + +Equation for calculating buoyant force: +Fb = ρ * V * g + +Source: +- https://en.wikipedia.org/wiki/Archimedes%27_principle +""" + + +# Acceleration Constant on Earth (unit m/s^2) +g = 9.80665 + + +def archimedes_principle( + fluid_density: float, volume: float, gravity: float = g +) -> float: + """ + Args: + fluid_density: density of fluid (kg/m^3) + volume: volume of object / liquid being displaced by object + gravity: Acceleration from gravity. Gravitational force on system, + Default is Earth Gravity + returns: + buoyant force on object in Newtons + + >>> archimedes_principle(fluid_density=997, volume=0.5, gravity=9.8) + 4885.3 + >>> archimedes_principle(fluid_density=997, volume=0.7) + 6844.061035 + """ + + if fluid_density <= 0: + raise ValueError("Impossible fluid density") + if volume < 0: + raise ValueError("Impossible Object volume") + if gravity <= 0: + raise ValueError("Impossible Gravity") + + return fluid_density * gravity * volume + + +if __name__ == "__main__": + import doctest + + # run doctest + doctest.testmod() From cafbbab125ebcdac4294f4cbda024b840d230b9a Mon Sep 17 00:00:00 2001 From: Lukas Esc <55601315+Luk-ESC@users.noreply.github.com> Date: Sun, 30 Oct 2022 11:56:54 +0100 Subject: [PATCH 0682/1543] shortened code using abs() and inplace ops (#7191) n = -n if n < 0 else n --> n = abs(n) n = n // 10 --> n //= 10 --- maths/sum_of_digits.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/maths/sum_of_digits.py b/maths/sum_of_digits.py index 64da00d4634c..5ad5fe6c9877 100644 --- a/maths/sum_of_digits.py +++ b/maths/sum_of_digits.py @@ -14,11 +14,11 @@ def sum_of_digits(n: int) -> int: >>> sum_of_digits(0) 0 """ - n = -n if n < 0 else n + n = abs(n) res = 0 while n > 0: res += n % 10 - n = n // 10 + n //= 10 return res @@ -35,7 +35,7 @@ def sum_of_digits_recursion(n: int) -> int: >>> sum_of_digits_recursion(0) 0 """ - n = -n if n < 0 else n + n = abs(n) return n if n < 10 else n % 10 + sum_of_digits(n // 10) From ab9d8f3874ba550bea0103e0891160b8d9145208 Mon Sep 17 00:00:00 2001 From: Jeremias Moreira Gomes Date: Sun, 30 Oct 2022 08:09:23 -0300 Subject: [PATCH 0683/1543] Adding a Quine in Python. (#6807) * Adding a Quine in Python. * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- other/quine.py | 10 ++++++++++ 1 file changed, 10 insertions(+) create mode 100644 other/quine.py diff --git a/other/quine.py b/other/quine.py new file mode 100644 index 000000000000..01e03bbb02cb --- /dev/null +++ b/other/quine.py @@ -0,0 +1,10 @@ +#!/bin/python3 +""" +Quine: + +A quine is a computer program which takes no input and produces a copy of its +own source code as its only output (disregarding this docstring and the shebang). + +More info on: https://en.wikipedia.org/wiki/Quine_(computing) +""" +print((lambda quine: quine % quine)("print((lambda quine: quine %% quine)(%r))")) From 94b51f6a91def387b82369401a42710cae4ee4e0 Mon Sep 17 00:00:00 2001 From: sadiqebrahim <75269485+sadiqebrahim@users.noreply.github.com> Date: Sun, 30 Oct 2022 17:22:20 +0530 Subject: [PATCH 0684/1543] Added Builtin Voltage (#7850) * Added Builtin Voltage * Update builtin_voltage.py * Update electronics/builtin_voltage.py Co-authored-by: Caeden Perelli-Harris * Update electronics/builtin_voltage.py Co-authored-by: Caeden Perelli-Harris * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Apply suggestions from code review Co-authored-by: Caeden Perelli-Harris * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Create elf.py Co-authored-by: Caeden Perelli-Harris Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- electronics/builtin_voltage.py | 67 ++++++++++++++++++++++++++++++++++ hashes/elf.py | 14 +++---- 2 files changed, 73 insertions(+), 8 deletions(-) create mode 100644 electronics/builtin_voltage.py diff --git a/electronics/builtin_voltage.py b/electronics/builtin_voltage.py new file mode 100644 index 000000000000..38fde4524d1a --- /dev/null +++ b/electronics/builtin_voltage.py @@ -0,0 +1,67 @@ +from math import log + +from scipy.constants import Boltzmann, physical_constants + +T = 300 # TEMPERATURE (unit = K) + + +def builtin_voltage( + donor_conc: float, # donor concentration + acceptor_conc: float, # acceptor concentration + intrinsic_conc: float, # intrinsic concentration +) -> float: + """ + This function can calculate the Builtin Voltage of a pn junction diode. + This is calculated from the given three values. + Examples - + >>> builtin_voltage(donor_conc=1e17, acceptor_conc=1e17, intrinsic_conc=1e10) + 0.833370010652644 + >>> builtin_voltage(donor_conc=0, acceptor_conc=1600, intrinsic_conc=200) + Traceback (most recent call last): + ... + ValueError: Donor concentration should be positive + >>> builtin_voltage(donor_conc=1000, acceptor_conc=0, intrinsic_conc=1200) + Traceback (most recent call last): + ... + ValueError: Acceptor concentration should be positive + >>> builtin_voltage(donor_conc=1000, acceptor_conc=1000, intrinsic_conc=0) + Traceback (most recent call last): + ... + ValueError: Intrinsic concentration should be positive + >>> builtin_voltage(donor_conc=1000, acceptor_conc=3000, intrinsic_conc=2000) + Traceback (most recent call last): + ... + ValueError: Donor concentration should be greater than intrinsic concentration + >>> builtin_voltage(donor_conc=3000, acceptor_conc=1000, intrinsic_conc=2000) + Traceback (most recent call last): + ... + ValueError: Acceptor concentration should be greater than intrinsic concentration + """ + + if donor_conc <= 0: + raise ValueError("Donor concentration should be positive") + elif acceptor_conc <= 0: + raise ValueError("Acceptor concentration should be positive") + elif intrinsic_conc <= 0: + raise ValueError("Intrinsic concentration should be positive") + elif donor_conc <= intrinsic_conc: + raise ValueError( + "Donor concentration should be greater than intrinsic concentration" + ) + elif acceptor_conc <= intrinsic_conc: + raise ValueError( + "Acceptor concentration should be greater than intrinsic concentration" + ) + else: + return ( + Boltzmann + * T + * log((donor_conc * acceptor_conc) / intrinsic_conc**2) + / physical_constants["electron volt"][0] + ) + + +if __name__ == "__main__": + import doctest + + doctest.testmod() diff --git a/hashes/elf.py b/hashes/elf.py index 87fe339da44d..e4bfcec22c22 100644 --- a/hashes/elf.py +++ b/hashes/elf.py @@ -2,19 +2,17 @@ def elf_hash(data: str) -> int: """ Implementation of ElfHash Algorithm, a variant of PJW hash function. - Returns: - [int] -- [32 bit binary int] >>> elf_hash('lorem ipsum') 253956621 """ - hash = x = 0 + hash_ = x = 0 for letter in data: - hash = (hash << 4) + ord(letter) - x = hash & 0xF0000000 + hash_ = (hash_ << 4) + ord(letter) + x = hash_ & 0xF0000000 if x != 0: - hash ^= x >> 24 - hash &= ~x - return hash + hash_ ^= x >> 24 + hash_ &= ~x + return hash_ if __name__ == "__main__": From 69d04ff64468d5b2815c0f22190b741393496a9e Mon Sep 17 00:00:00 2001 From: Kushagra Makharia Date: Sun, 30 Oct 2022 18:12:59 +0530 Subject: [PATCH 0685/1543] Added mean absolute error in linear regression (#7003) * Added mean absolute error in linear regression * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Code feedback changes * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Apply suggestions from code review Co-authored-by: Caeden Perelli-Harris * Apply suggestions from code review * Update linear_regression.py Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss Co-authored-by: Caeden Perelli-Harris --- machine_learning/linear_regression.py | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) diff --git a/machine_learning/linear_regression.py b/machine_learning/linear_regression.py index 92ab91c01b95..75943ac9f2ad 100644 --- a/machine_learning/linear_regression.py +++ b/machine_learning/linear_regression.py @@ -17,9 +17,8 @@ def collect_dataset(): :return : dataset obtained from the link, as matrix """ response = requests.get( - "https://raw.githubusercontent.com/yashLadha/" - + "The_Math_of_Intelligence/master/Week1/ADRvs" - + "Rating.csv" + "https://raw.githubusercontent.com/yashLadha/The_Math_of_Intelligence/" + "master/Week1/ADRvsRating.csv" ) lines = response.text.splitlines() data = [] @@ -87,6 +86,16 @@ def run_linear_regression(data_x, data_y): return theta +def mean_absolute_error(predicted_y, original_y): + """Return sum of square error for error calculation + :param predicted_y : contains the output of prediction (result vector) + :param original_y : contains values of expected outcome + :return : mean absolute error computed from given feature's + """ + total = sum(abs(y - predicted_y[i]) for i, y in enumerate(original_y)) + return total / len(original_y) + + def main(): """Driver function""" data = collect_dataset() From 2c65597093efa80a572a6a739d8f13a8d3579c18 Mon Sep 17 00:00:00 2001 From: kumarsurajsk <104374726+kumarsurajsk@users.noreply.github.com> Date: Sun, 30 Oct 2022 18:22:37 +0530 Subject: [PATCH 0686/1543] addition_without_arithmetic (#6830) * Addition_without_arithmetic * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * added_param * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * added_param_in_first_sec * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * change_align * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update Addition_without_arithmetic.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Rename Addition_without_arithmetic.py to addition_without_arithmetic.py * Update addition_without_arithmetic.py * Update addition_without_arithmetic.py Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- maths/addition_without_arithmetic.py | 39 ++++++++++++++++++++++++++++ 1 file changed, 39 insertions(+) create mode 100644 maths/addition_without_arithmetic.py diff --git a/maths/addition_without_arithmetic.py b/maths/addition_without_arithmetic.py new file mode 100644 index 000000000000..409604e4c08a --- /dev/null +++ b/maths/addition_without_arithmetic.py @@ -0,0 +1,39 @@ +""" +Illustrate how to add the integer without arithmetic operation +Author: suraj Kumar +Time Complexity: 1 +https://en.wikipedia.org/wiki/Bitwise_operation +""" + + +def add(first: int, second: int) -> int: + """ + Implementation of addition of integer + + Examples: + >>> add(3, 5) + 8 + >>> add(13, 5) + 18 + >>> add(-7, 2) + -5 + >>> add(0, -7) + -7 + >>> add(-321, 0) + -321 + """ + while second != 0: + c = first & second + first ^= second + second = c << 1 + return first + + +if __name__ == "__main__": + import doctest + + doctest.testmod() + + first = int(input("Enter the first number: ").strip()) + second = int(input("Enter the second number: ").strip()) + print(f"{add(first, second) = }") From cf915e704285b1b40b6d0f180d60791204486fd3 Mon Sep 17 00:00:00 2001 From: Alexander Pantyukhin Date: Sun, 30 Oct 2022 17:00:16 +0400 Subject: [PATCH 0687/1543] add Levinstein distance with Dynamic Programming: up -> down approach (#7171) * add Levinstein distance with Dynamic Programming: up -> down approach * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * add type hint * fix flake8 * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update dynamic_programming/min_distance_up_bottom.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update min_distance_up_bottom.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- dynamic_programming/min_distance_up_bottom.py | 55 +++++++++++++++++++ 1 file changed, 55 insertions(+) create mode 100644 dynamic_programming/min_distance_up_bottom.py diff --git a/dynamic_programming/min_distance_up_bottom.py b/dynamic_programming/min_distance_up_bottom.py new file mode 100644 index 000000000000..49c361f24d45 --- /dev/null +++ b/dynamic_programming/min_distance_up_bottom.py @@ -0,0 +1,55 @@ +""" +Author : Alexander Pantyukhin +Date : October 14, 2022 +This is implementation Dynamic Programming up bottom approach +to find edit distance. +The aim is to demonstate up bottom approach for solving the task. +The implementation was tested on the +leetcode: https://leetcode.com/problems/edit-distance/ +""" + +""" +Levinstein distance +Dynamic Programming: up -> down. +""" + + +def min_distance_up_bottom(word1: str, word2: str) -> int: + """ + >>> min_distance_up_bottom("intention", "execution") + 5 + >>> min_distance_up_bottom("intention", "") + 9 + >>> min_distance_up_bottom("", "") + 0 + >>> min_distance_up_bottom("zooicoarchaeologist", "zoologist") + 10 + """ + + from functools import lru_cache + + len_word1 = len(word1) + len_word2 = len(word2) + + @lru_cache(maxsize=None) + def min_distance(index1: int, index2: int) -> int: + # if first word index is overflow - delete all from the second word + if index1 >= len_word1: + return len_word2 - index2 + # if second word index is overflow - delete all from the first word + if index2 >= len_word2: + return len_word1 - index1 + diff = int(word1[index1] != word2[index2]) # current letters not identical + return min( + 1 + min_distance(index1 + 1, index2), + 1 + min_distance(index1, index2 + 1), + diff + min_distance(index1 + 1, index2 + 1), + ) + + return min_distance(0, 0) + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From d1430aa36b0a15a9e018367db210061e7a76dec4 Mon Sep 17 00:00:00 2001 From: Wissam Fawaz <55150850+wissamfawaz@users.noreply.github.com> Date: Sun, 30 Oct 2022 15:14:22 +0200 Subject: [PATCH 0688/1543] Implemented a Pascal triangle generator (#7317) * Added a Pascal triangle implementation to the other folder * Added Pascal triangle implementation to the other folder. * Added Pascal triangle implementation to the other folder. * Added Pascal triangle implementation to the other folder. * Implemented a Pascal triangle generator. * Reversed Changes to DIRECTORY.md * Reversed changed to .md files * Update other/pascal_triangle.py Removed personal info Co-authored-by: Paul <56065602+ZeroDayOwl@users.noreply.github.com> * Update pascal_triangle.py Expanded the description of the algorithm * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Printed output in triangular form * Update CONTRIBUTING.md Co-authored-by: Paul <56065602+ZeroDayOwl@users.noreply.github.com> Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- other/pascal_triangle.py | 96 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 96 insertions(+) create mode 100644 other/pascal_triangle.py diff --git a/other/pascal_triangle.py b/other/pascal_triangle.py new file mode 100644 index 000000000000..5cc3cee8af56 --- /dev/null +++ b/other/pascal_triangle.py @@ -0,0 +1,96 @@ +""" +This implementation demonstrates how to generate the +elements of a Pascal's triangle. The element having +a row index of r and column index of c can be derived +as follows: +triangle[r][c] = triangle[r-1][c-1]+triangle[r-1][c] +What is Pascal's triangle? +- It is a triangular array containing binomial coefficients. +Refer to (https://en.wikipedia.org/wiki/Pascal%27s_triangle) +for more info about this triangle. +""" + + +def print_pascal_triangle(num_rows: int) -> None: + """ + Print Pascal's triangle for different number of rows + >>> print_pascal_triangle(5) + 1 + 1 1 + 1 2 1 + 1 3 3 1 + 1 4 6 4 1 + """ + triangle = generate_pascal_triangle(num_rows) + for row_idx in range(num_rows): + # Print left spaces + for _ in range(num_rows - row_idx - 1): + print(end=" ") + # Print row values + for col_idx in range(row_idx + 1): + if col_idx != row_idx: + print(triangle[row_idx][col_idx], end=" ") + else: + print(triangle[row_idx][col_idx], end="") + print() + + +def generate_pascal_triangle(num_rows: int) -> list[list[int]]: + """ + Create Pascal's triangle for different number of rows + >>> generate_pascal_triangle(1) + [[1]] + >>> generate_pascal_triangle(2) + [[1], [1, 1]] + >>> generate_pascal_triangle(3) + [[1], [1, 1], [1, 2, 1]] + >>> generate_pascal_triangle(4) + [[1], [1, 1], [1, 2, 1], [1, 3, 3, 1]] + >>> generate_pascal_triangle(5) + [[1], [1, 1], [1, 2, 1], [1, 3, 3, 1], [1, 4, 6, 4, 1]] + """ + triangle: list[list[int]] = [] + for current_row_idx in range(num_rows): + current_row = populate_current_row(triangle, current_row_idx) + triangle.append(current_row) + return triangle + + +def populate_current_row(triangle: list[list[int]], current_row_idx: int) -> list[int]: + """ + >>> triangle = [[1]] + >>> populate_current_row(triangle, 1) + [1, 1] + """ + current_row = [-1] * (current_row_idx + 1) + # first and last elements of current row are equal to 1 + current_row[0], current_row[-1] = 1, 1 + for current_col_idx in range(1, current_row_idx): + calculate_current_element( + triangle, current_row, current_row_idx, current_col_idx + ) + return current_row + + +def calculate_current_element( + triangle: list[list[int]], + current_row: list[int], + current_row_idx: int, + current_col_idx: int, +) -> None: + """ + >>> triangle = [[1], [1, 1]] + >>> current_row = [1, -1, 1] + >>> calculate_current_element(triangle, current_row, 2, 1) + >>> current_row + [1, 2, 1] + """ + above_to_left_elt = triangle[current_row_idx - 1][current_col_idx - 1] + above_to_right_elt = triangle[current_row_idx - 1][current_col_idx] + current_row[current_col_idx] = above_to_left_elt + above_to_right_elt + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From 47100b992aef2fd5a7ae001155e3d0411db99ec9 Mon Sep 17 00:00:00 2001 From: Agniv Ghosh <73717822+agnivg@users.noreply.github.com> Date: Sun, 30 Oct 2022 18:45:46 +0530 Subject: [PATCH 0689/1543] Added code for palindrome partitioning problem under dynamic programming (#7222) * Added code for palindrome partitioning problem under dynamic programming * Updated return type for function * Updated Line 24 according to suggestions * Apply suggestions from code review Co-authored-by: Caeden Perelli-Harris * Update palindrome_partitioning.py * Update palindrome_partitioning.py * is_palindromic Co-authored-by: Christian Clauss Co-authored-by: Caeden Perelli-Harris --- .../palindrome_partitioning.py | 39 +++++++++++++++++++ 1 file changed, 39 insertions(+) create mode 100644 dynamic_programming/palindrome_partitioning.py diff --git a/dynamic_programming/palindrome_partitioning.py b/dynamic_programming/palindrome_partitioning.py new file mode 100644 index 000000000000..c1629440ef2e --- /dev/null +++ b/dynamic_programming/palindrome_partitioning.py @@ -0,0 +1,39 @@ +""" +Given a string s, partition s such that every substring of the +partition is a palindrome. +Find the minimum cuts needed for a palindrome partitioning of s. + +Time Complexity: O(n^2) +Space Complexity: O(n^2) +For other explanations refer to: https://www.youtube.com/watch?v=_H8V5hJUGd0 +""" + + +def find_minimum_partitions(string: str) -> int: + """ + Returns the minimum cuts needed for a palindrome partitioning of string + + >>> find_minimum_partitions("aab") + 1 + >>> find_minimum_partitions("aaa") + 0 + >>> find_minimum_partitions("ababbbabbababa") + 3 + """ + length = len(string) + cut = [0] * length + is_palindromic = [[False for i in range(length)] for j in range(length)] + for i, c in enumerate(string): + mincut = i + for j in range(i + 1): + if c == string[j] and (i - j < 2 or is_palindromic[j + 1][i - 1]): + is_palindromic[j][i] = True + mincut = min(mincut, 0 if j == 0 else (cut[j - 1] + 1)) + cut[i] = mincut + return cut[length - 1] + + +if __name__ == "__main__": + s = input("Enter the string: ").strip() + ans = find_minimum_partitions(s) + print(f"Minimum number of partitions required for the '{s}' is {ans}") From 11e6c6fcc485bf78e5d28c7cf311278a013685d5 Mon Sep 17 00:00:00 2001 From: Gautam Chaurasia <64725629+GautamChaurasia@users.noreply.github.com> Date: Sun, 30 Oct 2022 18:58:27 +0530 Subject: [PATCH 0690/1543] Added algorithm for finding index of rightmost set bit (#7234) * Added algorithm for finding index of rightmost set bit * applied suggested changes * applied suggested changes * Fixed failing Testcases * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .../index_of_rightmost_set_bit.py | 43 +++++++++++++++++++ 1 file changed, 43 insertions(+) create mode 100644 bit_manipulation/index_of_rightmost_set_bit.py diff --git a/bit_manipulation/index_of_rightmost_set_bit.py b/bit_manipulation/index_of_rightmost_set_bit.py new file mode 100644 index 000000000000..eb52ea4e63e3 --- /dev/null +++ b/bit_manipulation/index_of_rightmost_set_bit.py @@ -0,0 +1,43 @@ +# Reference: https://www.geeksforgeeks.org/position-of-rightmost-set-bit/ + + +def get_index_of_rightmost_set_bit(number: int) -> int: + """ + Take in a positive integer 'number'. + Returns the zero-based index of first set bit in that 'number' from right. + Returns -1, If no set bit found. + + >>> get_index_of_rightmost_set_bit(0) + -1 + >>> get_index_of_rightmost_set_bit(5) + 0 + >>> get_index_of_rightmost_set_bit(36) + 2 + >>> get_index_of_rightmost_set_bit(8) + 3 + >>> get_index_of_rightmost_set_bit(-18) + Traceback (most recent call last): + ... + ValueError: Input must be a non-negative integer + """ + + if number < 0 or not isinstance(number, int): + raise ValueError("Input must be a non-negative integer") + + intermediate = number & ~(number - 1) + index = 0 + while intermediate: + intermediate >>= 1 + index += 1 + return index - 1 + + +if __name__ == "__main__": + """ + Finding the index of rightmost set bit has some very peculiar use-cases, + especially in finding missing or/and repeating numbers in a list of + positive integers. + """ + import doctest + + doctest.testmod(verbose=True) From e12516debb977e0b3ec9b67d1ddc8770450ae8d1 Mon Sep 17 00:00:00 2001 From: Abhishek Chakraborty Date: Sun, 30 Oct 2022 14:11:05 -0700 Subject: [PATCH 0691/1543] Shear stress: typo + WIkipedia URL (#7896) --- physics/{sheer_stress.py => shear_stress.py} | 18 +++++++++++++----- 1 file changed, 13 insertions(+), 5 deletions(-) rename physics/{sheer_stress.py => shear_stress.py} (70%) diff --git a/physics/sheer_stress.py b/physics/shear_stress.py similarity index 70% rename from physics/sheer_stress.py rename to physics/shear_stress.py index 74a2d36b1f45..129148943893 100644 --- a/physics/sheer_stress.py +++ b/physics/shear_stress.py @@ -1,23 +1,31 @@ from __future__ import annotations +""" +Shear stress is a component of stress that is coplanar to the material cross-section. +It arises due to a shear force, the component of the force vector parallel to the +material cross-section. -def sheer_stress( +https://en.wikipedia.org/wiki/Shear_stress +""" + + +def shear_stress( stress: float, tangential_force: float, area: float, ) -> tuple[str, float]: """ This function can calculate any one of the three - - 1. Sheer Stress + 1. Shear Stress 2. Tangential Force 3. Cross-sectional Area This is calculated from the other two provided values Examples - - >>> sheer_stress(stress=25, tangential_force=100, area=0) + >>> shear_stress(stress=25, tangential_force=100, area=0) ('area', 4.0) - >>> sheer_stress(stress=0, tangential_force=1600, area=200) + >>> shear_stress(stress=0, tangential_force=1600, area=200) ('stress', 8.0) - >>> sheer_stress(stress=1000, tangential_force=0, area=1200) + >>> shear_stress(stress=1000, tangential_force=0, area=1200) ('tangential_force', 1200000) """ if (stress, tangential_force, area).count(0) != 1: From c0168cd33f6670f7e32eaa04d77b6be70b3588d4 Mon Sep 17 00:00:00 2001 From: Gmuslow <54784260+Gmuslow@users.noreply.github.com> Date: Sun, 30 Oct 2022 16:33:13 -0500 Subject: [PATCH 0692/1543] Created equivalent_resistance under Electronics (#6782) * Create resistor_equivalence.py * Update resistor_equivalence.py * Update electronics/resistor_equivalence.py removed an unnecessary space Co-authored-by: Caeden * Update resistor_equivalence.py fixed the snake_case requirement * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update resistor_equivalence.py finalize the naming convention errors (hopefully) * Update resistor_equivalence.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Co-authored-by: Caeden Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- electronics/resistor_equivalence.py | 58 +++++++++++++++++++++++++++++ 1 file changed, 58 insertions(+) create mode 100644 electronics/resistor_equivalence.py diff --git a/electronics/resistor_equivalence.py b/electronics/resistor_equivalence.py new file mode 100644 index 000000000000..7142f838a065 --- /dev/null +++ b/electronics/resistor_equivalence.py @@ -0,0 +1,58 @@ +# https://byjus.com/equivalent-resistance-formula/ + +from __future__ import annotations + + +def resistor_parallel(resistors: list[float]) -> float: + """ + Req = 1/ (1/R1 + 1/R2 + ... + 1/Rn) + + >>> resistor_parallel([3.21389, 2, 3]) + 0.8737571620498019 + >>> resistor_parallel([3.21389, 2, -3]) + Traceback (most recent call last): + ... + ValueError: Resistor at index 2 has a negative or zero value! + >>> resistor_parallel([3.21389, 2, 0.000]) + Traceback (most recent call last): + ... + ValueError: Resistor at index 2 has a negative or zero value! + """ + + first_sum = 0.00 + index = 0 + for resistor in resistors: + if resistor <= 0: + raise ValueError(f"Resistor at index {index} has a negative or zero value!") + first_sum += 1 / float(resistor) + index += 1 + return 1 / first_sum + + +def resistor_series(resistors: list[float]) -> float: + """ + Req = R1 + R2 + ... + Rn + + Calculate the equivalent resistance for any number of resistors in parallel. + + >>> resistor_series([3.21389, 2, 3]) + 8.21389 + >>> resistor_series([3.21389, 2, -3]) + Traceback (most recent call last): + ... + ValueError: Resistor at index 2 has a negative value! + """ + sum_r = 0.00 + index = 0 + for resistor in resistors: + sum_r += resistor + if resistor < 0: + raise ValueError(f"Resistor at index {index} has a negative value!") + index += 1 + return sum_r + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From f8958ebe20522f5b0d32f33fd78870185912a67a Mon Sep 17 00:00:00 2001 From: himanshit0304 <70479061+himanshit0304@users.noreply.github.com> Date: Mon, 31 Oct 2022 04:25:11 +0530 Subject: [PATCH 0693/1543] Add print_multiplication_table.py (#6607) * Add print_multiplication_table.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Added return type description * Update print_multiplication_table.py Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- maths/print_multiplication_table.py | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+) create mode 100644 maths/print_multiplication_table.py diff --git a/maths/print_multiplication_table.py b/maths/print_multiplication_table.py new file mode 100644 index 000000000000..dbe4a4be0ee8 --- /dev/null +++ b/maths/print_multiplication_table.py @@ -0,0 +1,26 @@ +def multiplication_table(number: int, number_of_terms: int) -> str: + """ + Prints the multiplication table of a given number till the given number of terms + + >>> print(multiplication_table(3, 5)) + 3 * 1 = 3 + 3 * 2 = 6 + 3 * 3 = 9 + 3 * 4 = 12 + 3 * 5 = 15 + + >>> print(multiplication_table(-4, 6)) + -4 * 1 = -4 + -4 * 2 = -8 + -4 * 3 = -12 + -4 * 4 = -16 + -4 * 5 = -20 + -4 * 6 = -24 + """ + return "\n".join( + f"{number} * {i} = {number * i}" for i in range(1, number_of_terms + 1) + ) + + +if __name__ == "__main__": + print(multiplication_table(number=5, number_of_terms=10)) From 39e5bc5980254582362ad02bb6616aaa58bfac8a Mon Sep 17 00:00:00 2001 From: Tianyi Zheng Date: Mon, 31 Oct 2022 01:13:21 -0400 Subject: [PATCH 0694/1543] Refactor bottom-up edit distance function to be class method (#7347) * Refactor bottom-up function to be class method * Add type hints * Update convolve function namespace * Remove depreciated np.float * updating DIRECTORY.md * updating DIRECTORY.md * updating DIRECTORY.md * updating DIRECTORY.md * Renamed function for consistency * updating DIRECTORY.md Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> Co-authored-by: Chris O <46587501+ChrisO345@users.noreply.github.com> --- DIRECTORY.md | 15 ++- dynamic_programming/edit_distance.py | 134 +++++++++++++-------------- 2 files changed, 80 insertions(+), 69 deletions(-) diff --git a/DIRECTORY.md b/DIRECTORY.md index 38fd1d656488..be3a121c80bd 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -46,6 +46,7 @@ * [Count Number Of One Bits](bit_manipulation/count_number_of_one_bits.py) * [Gray Code Sequence](bit_manipulation/gray_code_sequence.py) * [Highest Set Bit](bit_manipulation/highest_set_bit.py) + * [Index Of Rightmost Set Bit](bit_manipulation/index_of_rightmost_set_bit.py) * [Is Even](bit_manipulation/is_even.py) * [Reverse Bits](bit_manipulation/reverse_bits.py) * [Single Bit Manipulation Operations](bit_manipulation/single_bit_manipulation_operations.py) @@ -307,24 +308,28 @@ * [Max Non Adjacent Sum](dynamic_programming/max_non_adjacent_sum.py) * [Max Sub Array](dynamic_programming/max_sub_array.py) * [Max Sum Contiguous Subsequence](dynamic_programming/max_sum_contiguous_subsequence.py) + * [Min Distance Up Bottom](dynamic_programming/min_distance_up_bottom.py) * [Minimum Coin Change](dynamic_programming/minimum_coin_change.py) * [Minimum Cost Path](dynamic_programming/minimum_cost_path.py) * [Minimum Partition](dynamic_programming/minimum_partition.py) * [Minimum Squares To Represent A Number](dynamic_programming/minimum_squares_to_represent_a_number.py) * [Minimum Steps To One](dynamic_programming/minimum_steps_to_one.py) * [Optimal Binary Search Tree](dynamic_programming/optimal_binary_search_tree.py) + * [Palindrome Partitioning](dynamic_programming/palindrome_partitioning.py) * [Rod Cutting](dynamic_programming/rod_cutting.py) * [Subset Generation](dynamic_programming/subset_generation.py) * [Sum Of Subset](dynamic_programming/sum_of_subset.py) * [Viterbi](dynamic_programming/viterbi.py) ## Electronics + * [Builtin Voltage](electronics/builtin_voltage.py) * [Carrier Concentration](electronics/carrier_concentration.py) * [Coulombs Law](electronics/coulombs_law.py) * [Electric Conductivity](electronics/electric_conductivity.py) * [Electric Power](electronics/electric_power.py) * [Electrical Impedance](electronics/electrical_impedance.py) * [Ohms Law](electronics/ohms_law.py) + * [Resistor Equivalence](electronics/resistor_equivalence.py) * [Resonant Frequency](electronics/resonant_frequency.py) ## File Transfer @@ -426,6 +431,7 @@ * [Adler32](hashes/adler32.py) * [Chaos Machine](hashes/chaos_machine.py) * [Djb2](hashes/djb2.py) + * [Elf](hashes/elf.py) * [Enigma Machine](hashes/enigma_machine.py) * [Hamming Code](hashes/hamming_code.py) * [Luhn](hashes/luhn.py) @@ -491,6 +497,7 @@ * [Abs Max](maths/abs_max.py) * [Abs Min](maths/abs_min.py) * [Add](maths/add.py) + * [Addition Without Arithmetic](maths/addition_without_arithmetic.py) * [Aliquot Sum](maths/aliquot_sum.py) * [Allocation Number](maths/allocation_number.py) * [Arc Length](maths/arc_length.py) @@ -581,12 +588,15 @@ * [Points Are Collinear 3D](maths/points_are_collinear_3d.py) * [Pollard Rho](maths/pollard_rho.py) * [Polynomial Evaluation](maths/polynomial_evaluation.py) + * Polynomials + * [Single Indeterminate Operations](maths/polynomials/single_indeterminate_operations.py) * [Power Using Recursion](maths/power_using_recursion.py) * [Prime Check](maths/prime_check.py) * [Prime Factors](maths/prime_factors.py) * [Prime Numbers](maths/prime_numbers.py) * [Prime Sieve Eratosthenes](maths/prime_sieve_eratosthenes.py) * [Primelib](maths/primelib.py) + * [Print Multiplication Table](maths/print_multiplication_table.py) * [Proth Number](maths/proth_number.py) * [Pythagoras](maths/pythagoras.py) * [Qr Decomposition](maths/qr_decomposition.py) @@ -676,12 +686,15 @@ * [Magicdiamondpattern](other/magicdiamondpattern.py) * [Maximum Subarray](other/maximum_subarray.py) * [Nested Brackets](other/nested_brackets.py) + * [Pascal Triangle](other/pascal_triangle.py) * [Password Generator](other/password_generator.py) + * [Quine](other/quine.py) * [Scoring Algorithm](other/scoring_algorithm.py) * [Sdes](other/sdes.py) * [Tower Of Hanoi](other/tower_of_hanoi.py) ## Physics + * [Archimedes Principle](physics/archimedes_principle.py) * [Casimir Effect](physics/casimir_effect.py) * [Centripetal Force](physics/centripetal_force.py) * [Horizontal Projectile Motion](physics/horizontal_projectile_motion.py) @@ -694,7 +707,7 @@ * [Newtons Second Law Of Motion](physics/newtons_second_law_of_motion.py) * [Potential Energy](physics/potential_energy.py) * [Rms Speed Of Molecule](physics/rms_speed_of_molecule.py) - * [Sheer Stress](physics/sheer_stress.py) + * [Shear Stress](physics/shear_stress.py) ## Project Euler * Problem 001 diff --git a/dynamic_programming/edit_distance.py b/dynamic_programming/edit_distance.py index fe23431a7ea6..774aa047326e 100644 --- a/dynamic_programming/edit_distance.py +++ b/dynamic_programming/edit_distance.py @@ -19,74 +19,72 @@ class EditDistance: """ def __init__(self): - self.__prepare__() - - def __prepare__(self, n=0, m=0): - self.dp = [[-1 for y in range(0, m)] for x in range(0, n)] - - def __solve_dp(self, x, y): - if x == -1: - return y + 1 - elif y == -1: - return x + 1 - elif self.dp[x][y] > -1: - return self.dp[x][y] + self.word1 = "" + self.word2 = "" + self.dp = [] + + def __min_dist_top_down_dp(self, m: int, n: int) -> int: + if m == -1: + return n + 1 + elif n == -1: + return m + 1 + elif self.dp[m][n] > -1: + return self.dp[m][n] else: - if self.a[x] == self.b[y]: - self.dp[x][y] = self.__solve_dp(x - 1, y - 1) + if self.word1[m] == self.word2[n]: + self.dp[m][n] = self.__min_dist_top_down_dp(m - 1, n - 1) else: - self.dp[x][y] = 1 + min( - self.__solve_dp(x, y - 1), - self.__solve_dp(x - 1, y), - self.__solve_dp(x - 1, y - 1), - ) - - return self.dp[x][y] - - def solve(self, a, b): - if isinstance(a, bytes): - a = a.decode("ascii") - - if isinstance(b, bytes): - b = b.decode("ascii") - - self.a = str(a) - self.b = str(b) - - self.__prepare__(len(a), len(b)) - - return self.__solve_dp(len(a) - 1, len(b) - 1) - - -def min_distance_bottom_up(word1: str, word2: str) -> int: - """ - >>> min_distance_bottom_up("intention", "execution") - 5 - >>> min_distance_bottom_up("intention", "") - 9 - >>> min_distance_bottom_up("", "") - 0 - """ - m = len(word1) - n = len(word2) - dp = [[0 for _ in range(n + 1)] for _ in range(m + 1)] - for i in range(m + 1): - for j in range(n + 1): - - if i == 0: # first string is empty - dp[i][j] = j - elif j == 0: # second string is empty - dp[i][j] = i - elif ( - word1[i - 1] == word2[j - 1] - ): # last character of both substing is equal - dp[i][j] = dp[i - 1][j - 1] - else: - insert = dp[i][j - 1] - delete = dp[i - 1][j] - replace = dp[i - 1][j - 1] - dp[i][j] = 1 + min(insert, delete, replace) - return dp[m][n] + insert = self.__min_dist_top_down_dp(m, n - 1) + delete = self.__min_dist_top_down_dp(m - 1, n) + replace = self.__min_dist_top_down_dp(m - 1, n - 1) + self.dp[m][n] = 1 + min(insert, delete, replace) + + return self.dp[m][n] + + def min_dist_top_down(self, word1: str, word2: str) -> int: + """ + >>> EditDistance().min_dist_top_down("intention", "execution") + 5 + >>> EditDistance().min_dist_top_down("intention", "") + 9 + >>> EditDistance().min_dist_top_down("", "") + 0 + """ + self.word1 = word1 + self.word2 = word2 + self.dp = [[-1 for _ in range(len(word2))] for _ in range(len(word1))] + + return self.__min_dist_top_down_dp(len(word1) - 1, len(word2) - 1) + + def min_dist_bottom_up(self, word1: str, word2: str) -> int: + """ + >>> EditDistance().min_dist_bottom_up("intention", "execution") + 5 + >>> EditDistance().min_dist_bottom_up("intention", "") + 9 + >>> EditDistance().min_dist_bottom_up("", "") + 0 + """ + self.word1 = word1 + self.word2 = word2 + m = len(word1) + n = len(word2) + self.dp = [[0 for _ in range(n + 1)] for _ in range(m + 1)] + + for i in range(m + 1): + for j in range(n + 1): + if i == 0: # first string is empty + self.dp[i][j] = j + elif j == 0: # second string is empty + self.dp[i][j] = i + elif word1[i - 1] == word2[j - 1]: # last characters are equal + self.dp[i][j] = self.dp[i - 1][j - 1] + else: + insert = self.dp[i][j - 1] + delete = self.dp[i - 1][j] + replace = self.dp[i - 1][j - 1] + self.dp[i][j] = 1 + min(insert, delete, replace) + return self.dp[m][n] if __name__ == "__main__": @@ -99,7 +97,7 @@ def min_distance_bottom_up(word1: str, word2: str) -> int: S2 = input("Enter the second string: ").strip() print() - print(f"The minimum Edit Distance is: {solver.solve(S1, S2)}") - print(f"The minimum Edit Distance is: {min_distance_bottom_up(S1, S2)}") + print(f"The minimum edit distance is: {solver.min_dist_top_down(S1, S2)}") + print(f"The minimum edit distance is: {solver.min_dist_bottom_up(S1, S2)}") print() print("*************** End of Testing Edit Distance DP Algorithm ***************") From 0fd1ccb13358feff2d6ea8dd62200cabe363ee8e Mon Sep 17 00:00:00 2001 From: Roberts Date: Mon, 31 Oct 2022 13:31:15 +0200 Subject: [PATCH 0695/1543] Adding inductive reactance calculation (#6625) * Adding inductive reactance calculation * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * from math import pi * 0007957747154594767 * 36420441699332 * 2199114857512855 Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- electronics/ind_reactance.py | 69 ++++++++++++++++++++++++++++++++++++ 1 file changed, 69 insertions(+) create mode 100644 electronics/ind_reactance.py diff --git a/electronics/ind_reactance.py b/electronics/ind_reactance.py new file mode 100644 index 000000000000..3f77ef628203 --- /dev/null +++ b/electronics/ind_reactance.py @@ -0,0 +1,69 @@ +# https://en.wikipedia.org/wiki/Electrical_reactance#Inductive_reactance +from __future__ import annotations + +from math import pi + + +def ind_reactance( + inductance: float, frequency: float, reactance: float +) -> dict[str, float]: + """ + Calculate inductive reactance, frequency or inductance from two given electrical + properties then return name/value pair of the zero value in a Python dict. + + Parameters + ---------- + inductance : float with units in Henries + + frequency : float with units in Hertz + + reactance : float with units in Ohms + + >>> ind_reactance(-35e-6, 1e3, 0) + Traceback (most recent call last): + ... + ValueError: Inductance cannot be negative + + >>> ind_reactance(35e-6, -1e3, 0) + Traceback (most recent call last): + ... + ValueError: Frequency cannot be negative + + >>> ind_reactance(35e-6, 0, -1) + Traceback (most recent call last): + ... + ValueError: Inductive reactance cannot be negative + + >>> ind_reactance(0, 10e3, 50) + {'inductance': 0.0007957747154594767} + + >>> ind_reactance(35e-3, 0, 50) + {'frequency': 227.36420441699332} + + >>> ind_reactance(35e-6, 1e3, 0) + {'reactance': 0.2199114857512855} + + """ + + if (inductance, frequency, reactance).count(0) != 1: + raise ValueError("One and only one argument must be 0") + if inductance < 0: + raise ValueError("Inductance cannot be negative") + if frequency < 0: + raise ValueError("Frequency cannot be negative") + if reactance < 0: + raise ValueError("Inductive reactance cannot be negative") + if inductance == 0: + return {"inductance": reactance / (2 * pi * frequency)} + elif frequency == 0: + return {"frequency": reactance / (2 * pi * inductance)} + elif reactance == 0: + return {"reactance": 2 * pi * frequency * inductance} + else: + raise ValueError("Exactly one argument must be 0") + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From b2165a65fcf1a087236d2a1527b10b64a12f69e6 Mon Sep 17 00:00:00 2001 From: Alex de la Cruz <46356295+acrulopez@users.noreply.github.com> Date: Mon, 31 Oct 2022 14:14:33 +0100 Subject: [PATCH 0696/1543] Added Radix Tree in data structures (#6616) * added radix tree to data structures * added doctests * solved flake8 * added type hints * added description for delete function * Update data_structures/trie/radix_tree.py * Update radix_tree.py * Update radix_tree.py * Update radix_tree.py Co-authored-by: Alex de la Cruz Co-authored-by: Christian Clauss --- data_structures/trie/radix_tree.py | 223 +++++++++++++++++++++++++++++ 1 file changed, 223 insertions(+) create mode 100644 data_structures/trie/radix_tree.py diff --git a/data_structures/trie/radix_tree.py b/data_structures/trie/radix_tree.py new file mode 100644 index 000000000000..66890346ec2b --- /dev/null +++ b/data_structures/trie/radix_tree.py @@ -0,0 +1,223 @@ +""" +A Radix Tree is a data structure that represents a space-optimized +trie (prefix tree) in whicheach node that is the only child is merged +with its parent [https://en.wikipedia.org/wiki/Radix_tree] +""" + + +class RadixNode: + def __init__(self, prefix: str = "", is_leaf: bool = False) -> None: + # Mapping from the first character of the prefix of the node + self.nodes: dict[str, RadixNode] = {} + + # A node will be a leaf if the tree contains its word + self.is_leaf = is_leaf + + self.prefix = prefix + + def match(self, word: str) -> tuple[str, str, str]: + """Compute the common substring of the prefix of the node and a word + + Args: + word (str): word to compare + + Returns: + (str, str, str): common substring, remaining prefix, remaining word + + >>> RadixNode("myprefix").match("mystring") + ('my', 'prefix', 'string') + """ + x = 0 + for q, w in zip(self.prefix, word): + if q != w: + break + + x += 1 + + return self.prefix[:x], self.prefix[x:], word[x:] + + def insert_many(self, words: list[str]) -> None: + """Insert many words in the tree + + Args: + words (list[str]): list of words + + >>> RadixNode("myprefix").insert_many(["mystring", "hello"]) + """ + for word in words: + self.insert(word) + + def insert(self, word: str) -> None: + """Insert a word into the tree + + Args: + word (str): word to insert + + >>> RadixNode("myprefix").insert("mystring") + """ + # Case 1: If the word is the prefix of the node + # Solution: We set the current node as leaf + if self.prefix == word: + self.is_leaf = True + + # Case 2: The node has no edges that have a prefix to the word + # Solution: We create an edge from the current node to a new one + # containing the word + elif word[0] not in self.nodes: + self.nodes[word[0]] = RadixNode(prefix=word, is_leaf=True) + + else: + incoming_node = self.nodes[word[0]] + matching_string, remaining_prefix, remaining_word = incoming_node.match( + word + ) + + # Case 3: The node prefix is equal to the matching + # Solution: We insert remaining word on the next node + if remaining_prefix == "": + self.nodes[matching_string[0]].insert(remaining_word) + + # Case 4: The word is greater equal to the matching + # Solution: Create a node in between both nodes, change + # prefixes and add the new node for the remaining word + else: + incoming_node.prefix = remaining_prefix + + aux_node = self.nodes[matching_string[0]] + self.nodes[matching_string[0]] = RadixNode(matching_string, False) + self.nodes[matching_string[0]].nodes[remaining_prefix[0]] = aux_node + + if remaining_word == "": + self.nodes[matching_string[0]].is_leaf = True + else: + self.nodes[matching_string[0]].insert(remaining_word) + + def find(self, word: str) -> bool: + """Returns if the word is on the tree + + Args: + word (str): word to check + + Returns: + bool: True if the word appears on the tree + + >>> RadixNode("myprefix").find("mystring") + False + """ + incoming_node = self.nodes.get(word[0], None) + if not incoming_node: + return False + else: + matching_string, remaining_prefix, remaining_word = incoming_node.match( + word + ) + # If there is remaining prefix, the word can't be on the tree + if remaining_prefix != "": + return False + # This applies when the word and the prefix are equal + elif remaining_word == "": + return incoming_node.is_leaf + # We have word remaining so we check the next node + else: + return incoming_node.find(remaining_word) + + def delete(self, word: str) -> bool: + """Deletes a word from the tree if it exists + + Args: + word (str): word to be deleted + + Returns: + bool: True if the word was found and deleted. False if word is not found + + >>> RadixNode("myprefix").delete("mystring") + False + """ + incoming_node = self.nodes.get(word[0], None) + if not incoming_node: + return False + else: + matching_string, remaining_prefix, remaining_word = incoming_node.match( + word + ) + # If there is remaining prefix, the word can't be on the tree + if remaining_prefix != "": + return False + # We have word remaining so we check the next node + elif remaining_word != "": + return incoming_node.delete(remaining_word) + else: + # If it is not a leaf, we don't have to delete + if not incoming_node.is_leaf: + return False + else: + # We delete the nodes if no edges go from it + if len(incoming_node.nodes) == 0: + del self.nodes[word[0]] + # We merge the current node with its only child + if len(self.nodes) == 1 and not self.is_leaf: + merging_node = list(self.nodes.values())[0] + self.is_leaf = merging_node.is_leaf + self.prefix += merging_node.prefix + self.nodes = merging_node.nodes + # If there is more than 1 edge, we just mark it as non-leaf + elif len(incoming_node.nodes) > 1: + incoming_node.is_leaf = False + # If there is 1 edge, we merge it with its child + else: + merging_node = list(incoming_node.nodes.values())[0] + incoming_node.is_leaf = merging_node.is_leaf + incoming_node.prefix += merging_node.prefix + incoming_node.nodes = merging_node.nodes + + return True + + def print_tree(self, height: int = 0) -> None: + """Print the tree + + Args: + height (int, optional): Height of the printed node + """ + if self.prefix != "": + print("-" * height, self.prefix, " (leaf)" if self.is_leaf else "") + + for value in self.nodes.values(): + value.print_tree(height + 1) + + +def test_trie() -> bool: + words = "banana bananas bandana band apple all beast".split() + root = RadixNode() + root.insert_many(words) + + assert all(root.find(word) for word in words) + assert not root.find("bandanas") + assert not root.find("apps") + root.delete("all") + assert not root.find("all") + root.delete("banana") + assert not root.find("banana") + assert root.find("bananas") + + return True + + +def pytests() -> None: + assert test_trie() + + +def main() -> None: + """ + >>> pytests() + """ + root = RadixNode() + words = "banana bananas bandanas bandana band apple all beast".split() + root.insert_many(words) + + print("Words:", words) + print("Tree:") + root.print_tree() + + +if __name__ == "__main__": + main() From a31edd4477af958adb840dadd568c38eecc9567b Mon Sep 17 00:00:00 2001 From: Christian Clauss Date: Mon, 31 Oct 2022 14:50:03 +0100 Subject: [PATCH 0697/1543] Test on Python 3.11 (#6591) * Test on Python 3.11 release candidate 2 * tensorflow; python<3.11 * tensorflow; python_version < 3.11 * tensorflow; python_version < "3.11" * sympy, tensorflow; python_version < "3.11" * sklearn; python_version < "3.11" * matplotlib, pandas, qiskit * statsmodels; python_version < "3.11" * Bring back Pandas * Problem deps are qiskit, statsmodels, and tensorflow * updating DIRECTORY.md * python-version: 3.11-dev --> 3.11 * updating DIRECTORY.md * Add pytest --ignore to pyproject.toml * Update build.yml * Update pyproject.toml * Update pyproject.toml * Python 3.11 Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- .github/workflows/build.yml | 13 +++++++++++-- CONTRIBUTING.md | 2 +- DIRECTORY.md | 1 + requirements.txt | 6 +++--- 4 files changed, 16 insertions(+), 6 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 159ce13b3fff..1069c68d215f 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -12,7 +12,7 @@ jobs: - uses: actions/checkout@v3 - uses: actions/setup-python@v4 with: - python-version: 3.x + python-version: 3.11 - uses: actions/cache@v3 with: path: ~/.cache/pip @@ -22,6 +22,15 @@ jobs: python -m pip install --upgrade pip setuptools six wheel python -m pip install pytest-cov -r requirements.txt - name: Run tests - run: pytest --ignore=project_euler/ --ignore=scripts/validate_solutions.py --cov-report=term-missing:skip-covered --cov=. . + # See: #6591 for re-enabling tests on Python v3.11 + run: pytest + --ignore=computer_vision/cnn_classification.py + --ignore=machine_learning/forecasting/run.py + --ignore=machine_learning/lstm/lstm_prediction.py + --ignore=quantum/ + --ignore=project_euler/ + --ignore=scripts/validate_solutions.py + --cov-report=term-missing:skip-covered + --cov=. . - if: ${{ success() }} run: scripts/build_directory_md.py 2>&1 | tee DIRECTORY.md diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 37e020b8fd8a..3ce5bd1edf68 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -66,7 +66,7 @@ pre-commit run --all-files --show-diff-on-failure We want your work to be readable by others; therefore, we encourage you to note the following: -- Please write in Python 3.10+. For instance: `print()` is a function in Python 3 so `print "Hello"` will *not* work but `print("Hello")` will. +- Please write in Python 3.11+. For instance: `print()` is a function in Python 3 so `print "Hello"` will *not* work but `print("Hello")` will. - Please focus hard on the naming of functions, classes, and variables. Help your reader by using __descriptive names__ that can help you to remove redundant comments. - Single letter variable names are *old school* so please avoid them unless their life only spans a few lines. - Expand acronyms because `gcd()` is hard to understand but `greatest_common_divisor()` is not. diff --git a/DIRECTORY.md b/DIRECTORY.md index be3a121c80bd..0b0d1e6a7c9d 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -328,6 +328,7 @@ * [Electric Conductivity](electronics/electric_conductivity.py) * [Electric Power](electronics/electric_power.py) * [Electrical Impedance](electronics/electrical_impedance.py) + * [Ind Reactance](electronics/ind_reactance.py) * [Ohms Law](electronics/ohms_law.py) * [Resistor Equivalence](electronics/resistor_equivalence.py) * [Resonant Frequency](electronics/resonant_frequency.py) diff --git a/requirements.txt b/requirements.txt index 9ffe784c945d..ae62039988e6 100644 --- a/requirements.txt +++ b/requirements.txt @@ -8,14 +8,14 @@ opencv-python pandas pillow projectq -qiskit +qiskit; python_version < "3.11" requests rich scikit-fuzzy sklearn -statsmodels +statsmodels; python_version < "3.11" sympy -tensorflow +tensorflow; python_version < "3.11" texttable tweepy xgboost From fecbf59436702b34b987773aa872d79f5df466df Mon Sep 17 00:00:00 2001 From: TechFreak107 <62158210+TechFreak107@users.noreply.github.com> Date: Mon, 31 Oct 2022 22:28:42 +0530 Subject: [PATCH 0698/1543] Modified 'pascal_triangle.py' program (#7901) * Added pascals_triangle.py program to maths directory * Deleted 'pascals_triangle.py' because of duplication. Added a optimized function to generate pascal's triangle to 'pascal_triangle.py' program. Added some aadditional doctests to the existing function. Added some type check functionality to the existing function. * Modified type check hints in 'generate_pascal_triangle_optimized' function' q * Modified 'pascal_triangle' prgram * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update pascal_triangle.py Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- other/pascal_triangle.py | 109 ++++++++++++++++++++++++++++++++++++--- 1 file changed, 101 insertions(+), 8 deletions(-) diff --git a/other/pascal_triangle.py b/other/pascal_triangle.py index 5cc3cee8af56..7f6555f9c8b9 100644 --- a/other/pascal_triangle.py +++ b/other/pascal_triangle.py @@ -1,13 +1,10 @@ """ -This implementation demonstrates how to generate the -elements of a Pascal's triangle. The element having -a row index of r and column index of c can be derived -as follows: +This implementation demonstrates how to generate the elements of a Pascal's triangle. +The element havingva row index of r and column index of c can be derivedvas follows: triangle[r][c] = triangle[r-1][c-1]+triangle[r-1][c] -What is Pascal's triangle? -- It is a triangular array containing binomial coefficients. -Refer to (https://en.wikipedia.org/wiki/Pascal%27s_triangle) -for more info about this triangle. + +A Pascal's triangle is a triangular array containing binomial coefficients. +https://en.wikipedia.org/wiki/Pascal%27s_triangle """ @@ -38,6 +35,8 @@ def print_pascal_triangle(num_rows: int) -> None: def generate_pascal_triangle(num_rows: int) -> list[list[int]]: """ Create Pascal's triangle for different number of rows + >>> generate_pascal_triangle(0) + [] >>> generate_pascal_triangle(1) [[1]] >>> generate_pascal_triangle(2) @@ -48,7 +47,26 @@ def generate_pascal_triangle(num_rows: int) -> list[list[int]]: [[1], [1, 1], [1, 2, 1], [1, 3, 3, 1]] >>> generate_pascal_triangle(5) [[1], [1, 1], [1, 2, 1], [1, 3, 3, 1], [1, 4, 6, 4, 1]] + >>> generate_pascal_triangle(-5) + Traceback (most recent call last): + ... + ValueError: The input value of 'num_rows' should be greater than or equal to 0 + >>> generate_pascal_triangle(7.89) + Traceback (most recent call last): + ... + TypeError: The input value of 'num_rows' should be 'int' """ + + if not isinstance(num_rows, int): + raise TypeError("The input value of 'num_rows' should be 'int'") + + if num_rows == 0: + return [] + elif num_rows < 0: + raise ValueError( + "The input value of 'num_rows' should be greater than or equal to 0" + ) + triangle: list[list[int]] = [] for current_row_idx in range(num_rows): current_row = populate_current_row(triangle, current_row_idx) @@ -90,7 +108,82 @@ def calculate_current_element( current_row[current_col_idx] = above_to_left_elt + above_to_right_elt +def generate_pascal_triangle_optimized(num_rows: int) -> list[list[int]]: + """ + This function returns a matrix representing the corresponding pascal's triangle + according to the given input of number of rows of Pascal's triangle to be generated. + It reduces the operations done to generate a row by half + by eliminating redundant calculations. + + :param num_rows: Integer specifying the number of rows in the Pascal's triangle + :return: 2-D List (matrix) representing the Pascal's triangle + + Return the Pascal's triangle of given rows + >>> generate_pascal_triangle_optimized(3) + [[1], [1, 1], [1, 2, 1]] + >>> generate_pascal_triangle_optimized(1) + [[1]] + >>> generate_pascal_triangle_optimized(0) + [] + >>> generate_pascal_triangle_optimized(-5) + Traceback (most recent call last): + ... + ValueError: The input value of 'num_rows' should be greater than or equal to 0 + >>> generate_pascal_triangle_optimized(7.89) + Traceback (most recent call last): + ... + TypeError: The input value of 'num_rows' should be 'int' + """ + + if not isinstance(num_rows, int): + raise TypeError("The input value of 'num_rows' should be 'int'") + + if num_rows == 0: + return [] + elif num_rows < 0: + raise ValueError( + "The input value of 'num_rows' should be greater than or equal to 0" + ) + + result: list[list[int]] = [[1]] + + for row_index in range(1, num_rows): + temp_row = [0] + result[-1] + [0] + row_length = row_index + 1 + # Calculate the number of distinct elements in a row + distinct_elements = sum(divmod(row_length, 2)) + row_first_half = [ + temp_row[i - 1] + temp_row[i] for i in range(1, distinct_elements + 1) + ] + row_second_half = row_first_half[: (row_index + 1) // 2] + row_second_half.reverse() + row = row_first_half + row_second_half + result.append(row) + + return result + + +def benchmark() -> None: + """ + Benchmark multiple functions, with three different length int values. + """ + from collections.abc import Callable + from timeit import timeit + + def benchmark_a_function(func: Callable, value: int) -> None: + call = f"{func.__name__}({value})" + timing = timeit(f"__main__.{call}", setup="import __main__") + # print(f"{call:38} = {func(value)} -- {timing:.4f} seconds") + print(f"{call:38} -- {timing:.4f} seconds") + + for value in range(15): # (1, 7, 14): + for func in (generate_pascal_triangle, generate_pascal_triangle_optimized): + benchmark_a_function(func, value) + print() + + if __name__ == "__main__": import doctest doctest.testmod() + benchmark() From 506b63f02da11691f19c4fd86c120e1d54842ea4 Mon Sep 17 00:00:00 2001 From: Shreyas Kamath <42207943+s18k@users.noreply.github.com> Date: Mon, 31 Oct 2022 22:34:42 +0530 Subject: [PATCH 0699/1543] Create convert_number_to_words.py (#6788) * Create convert_number_to_words.py A Python Program to convert numerical digits to English words. An Application of this can be in a Payment Application for confirmation * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- web_programming/convert_number_to_words.py | 111 +++++++++++++++++++++ 1 file changed, 111 insertions(+) create mode 100644 web_programming/convert_number_to_words.py diff --git a/web_programming/convert_number_to_words.py b/web_programming/convert_number_to_words.py new file mode 100644 index 000000000000..50612dec20dd --- /dev/null +++ b/web_programming/convert_number_to_words.py @@ -0,0 +1,111 @@ +import math + + +def convert(number: int) -> str: + """ + Given a number return the number in words. + + >>> convert(123) + 'OneHundred,TwentyThree' + """ + if number == 0: + words = "Zero" + return words + else: + digits = math.log10(number) + digits = digits + 1 + singles = {} + singles[0] = "" + singles[1] = "One" + singles[2] = "Two" + singles[3] = "Three" + singles[4] = "Four" + singles[5] = "Five" + singles[6] = "Six" + singles[7] = "Seven" + singles[8] = "Eight" + singles[9] = "Nine" + + doubles = {} + doubles[0] = "" + doubles[2] = "Twenty" + doubles[3] = "Thirty" + doubles[4] = "Forty" + doubles[5] = "Fifty" + doubles[6] = "Sixty" + doubles[7] = "Seventy" + doubles[8] = "Eighty" + doubles[9] = "Ninety" + + teens = {} + teens[0] = "Ten" + teens[1] = "Eleven" + teens[2] = "Twelve" + teens[3] = "Thirteen" + teens[4] = "Fourteen" + teens[5] = "Fifteen" + teens[6] = "Sixteen" + teens[7] = "Seventeen" + teens[8] = "Eighteen" + teens[9] = "Nineteen" + + placevalue = {} + placevalue[2] = "Hundred," + placevalue[3] = "Thousand," + placevalue[5] = "Lakh," + placevalue[7] = "Crore," + + temp_num = number + words = "" + counter = 0 + digits = int(digits) + while counter < digits: + current = temp_num % 10 + if counter % 2 == 0: + addition = "" + if counter in placevalue.keys() and current != 0: + addition = placevalue[counter] + if counter == 2: + words = singles[current] + addition + words + elif counter == 0: + if ((temp_num % 100) // 10) == 1: + words = teens[current] + addition + words + temp_num = temp_num // 10 + counter += 1 + else: + words = singles[current] + addition + words + + else: + words = doubles[current] + addition + words + + else: + if counter == 1: + if current == 1: + words = teens[number % 10] + words + else: + addition = "" + if counter in placevalue.keys(): + addition = placevalue[counter] + words = doubles[current] + addition + words + else: + addition = "" + if counter in placevalue.keys(): + if current == 0 and ((temp_num % 100) // 10) == 0: + addition = "" + else: + addition = placevalue[counter] + if ((temp_num % 100) // 10) == 1: + words = teens[current] + addition + words + temp_num = temp_num // 10 + counter += 1 + else: + words = singles[current] + addition + words + counter += 1 + temp_num = temp_num // 10 + return words + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From ded5deabe9f9f1c8f2a57da8657056480f142b55 Mon Sep 17 00:00:00 2001 From: Shriyans Gandhi <41372639+shri30yans@users.noreply.github.com> Date: Mon, 31 Oct 2022 22:45:37 +0530 Subject: [PATCH 0700/1543] Dodecahedron surface area and volume (#6606) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Hexagonal number sequence A hexagonal number sequence is a sequence of figurate numbers where the nth hexagonal number hₙ is the number of distinct dots in a pattern of dots consisting of the outlines of regular hexagons with sides up to n dots, when the hexagons are overlaid so that they share one vertex. This program returns the hexagonal number sequence of n length. * Update hexagonalnumbers.py * Update hexagonalnumbers.py * Update hexagonalnumbers.py * Update hexagonalnumbers.py * Update and rename hexagonalnumbers.py to hexagonal_numbers.py * Length must be a positive integer * Create dodecahedron.py * Update dodecahedron.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update dodecahedron.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update dodecahedron.py * Update dodecahedron.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update dodecahedron.py * Update dodecahedron.py * Apply suggestions from code review Co-authored-by: Paul <56065602+ZeroDayOwl@users.noreply.github.com> * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Apply suggestions from code review * Update dodecahedron.py Co-authored-by: Christian Clauss Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Paul <56065602+ZeroDayOwl@users.noreply.github.com> --- maths/dodecahedron.py | 73 +++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 73 insertions(+) create mode 100644 maths/dodecahedron.py diff --git a/maths/dodecahedron.py b/maths/dodecahedron.py new file mode 100644 index 000000000000..856245f4a868 --- /dev/null +++ b/maths/dodecahedron.py @@ -0,0 +1,73 @@ +# dodecahedron.py + +""" +A regular dodecahedron is a three-dimensional figure made up of +12 pentagon faces having the same equal size. +""" + + +def dodecahedron_surface_area(edge: float) -> float: + """ + Calculates the surface area of a regular dodecahedron + a = 3 * ((25 + 10 * (5** (1 / 2))) ** (1 / 2 )) * (e**2) + where: + a --> is the area of the dodecahedron + e --> is the length of the edge + reference-->"Dodecahedron" Study.com + + + :param edge: length of the edge of the dodecahedron + :type edge: float + :return: the surface area of the dodecahedron as a float + + + Tests: + >>> dodecahedron_surface_area(5) + 516.1432201766901 + >>> dodecahedron_surface_area(10) + 2064.5728807067603 + >>> dodecahedron_surface_area(-1) + Traceback (most recent call last): + ... + ValueError: Length must be a positive. + """ + + if edge <= 0 or not isinstance(edge, int): + raise ValueError("Length must be a positive.") + return 3 * ((25 + 10 * (5 ** (1 / 2))) ** (1 / 2)) * (edge**2) + + +def dodecahedron_volume(edge: float) -> float: + """ + Calculates the volume of a regular dodecahedron + v = ((15 + (7 * (5** (1 / 2)))) / 4) * (e**3) + where: + v --> is the volume of the dodecahedron + e --> is the length of the edge + reference-->"Dodecahedron" Study.com + + + :param edge: length of the edge of the dodecahedron + :type edge: float + :return: the volume of the dodecahedron as a float + + Tests: + >>> dodecahedron_volume(5) + 957.8898700780791 + >>> dodecahedron_volume(10) + 7663.118960624633 + >>> dodecahedron_volume(-1) + Traceback (most recent call last): + ... + ValueError: Length must be a positive. + """ + + if edge <= 0 or not isinstance(edge, int): + raise ValueError("Length must be a positive.") + return ((15 + (7 * (5 ** (1 / 2)))) / 4) * (edge**3) + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From 21601a4070830069101bbb0ddc2d662eac68d627 Mon Sep 17 00:00:00 2001 From: Kevin Joven <59969678+KevinJoven11@users.noreply.github.com> Date: Mon, 31 Oct 2022 13:32:54 -0400 Subject: [PATCH 0701/1543] create quantum_fourier_transform (#6682) * create quantum_fourier_transform This is part of the #Hacktoberfest. I build the quantum fourier transform for N qubits. (n = 3 in the example) Best, Kevin * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update q_fourier_transform.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Add the doctest! * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update q_fourier_transform.py * Pass first then fail Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- quantum/q_fourier_transform.py | 97 ++++++++++++++++++++++++++++++++++ 1 file changed, 97 insertions(+) create mode 100644 quantum/q_fourier_transform.py diff --git a/quantum/q_fourier_transform.py b/quantum/q_fourier_transform.py new file mode 100644 index 000000000000..d138dfb452ee --- /dev/null +++ b/quantum/q_fourier_transform.py @@ -0,0 +1,97 @@ +""" +Build the quantum fourier transform (qft) for a desire +number of quantum bits using Qiskit framework. This +experiment run in IBM Q simulator with 10000 shots. +This circuit can be use as a building block to design +the Shor's algorithm in quantum computing. As well as, +quantum phase estimation among others. +. +References: +https://en.wikipedia.org/wiki/Quantum_Fourier_transform +https://qiskit.org/textbook/ch-algorithms/quantum-fourier-transform.html +""" + +import math + +import numpy as np +import qiskit +from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute + + +def quantum_fourier_transform(number_of_qubits: int = 3) -> qiskit.result.counts.Counts: + """ + # >>> quantum_fourier_transform(2) + # {'00': 2500, '01': 2500, '11': 2500, '10': 2500} + # quantum circuit for number_of_qubits = 3: + ┌───┐ + qr_0: ──────■──────────────────────■───────┤ H ├─X─ + │ ┌───┐ │P(π/2) └───┘ │ + qr_1: ──────┼────────■───────┤ H ├─■─────────────┼─ + ┌───┐ │P(π/4) │P(π/2) └───┘ │ + qr_2: ┤ H ├─■────────■───────────────────────────X─ + └───┘ + cr: 3/═════════════════════════════════════════════ + Args: + n : number of qubits + Returns: + qiskit.result.counts.Counts: distribute counts. + + >>> quantum_fourier_transform(2) + {'00': 2500, '01': 2500, '10': 2500, '11': 2500} + >>> quantum_fourier_transform(-1) + Traceback (most recent call last): + ... + ValueError: number of qubits must be > 0. + >>> quantum_fourier_transform('a') + Traceback (most recent call last): + ... + TypeError: number of qubits must be a integer. + >>> quantum_fourier_transform(100) + Traceback (most recent call last): + ... + ValueError: number of qubits too large to simulate(>10). + >>> quantum_fourier_transform(0.5) + Traceback (most recent call last): + ... + ValueError: number of qubits must be exact integer. + """ + if type(number_of_qubits) == str: + raise TypeError("number of qubits must be a integer.") + if not number_of_qubits > 0: + raise ValueError("number of qubits must be > 0.") + if math.floor(number_of_qubits) != number_of_qubits: + raise ValueError("number of qubits must be exact integer.") + if number_of_qubits > 10: + raise ValueError("number of qubits too large to simulate(>10).") + + qr = QuantumRegister(number_of_qubits, "qr") + cr = ClassicalRegister(number_of_qubits, "cr") + + quantum_circuit = QuantumCircuit(qr, cr) + + counter = number_of_qubits + + for i in range(counter): + + quantum_circuit.h(number_of_qubits - i - 1) + counter -= 1 + for j in range(counter): + quantum_circuit.cp(np.pi / 2 ** (counter - j), j, counter) + + for k in range(number_of_qubits // 2): + quantum_circuit.swap(k, number_of_qubits - k - 1) + + # measure all the qubits + quantum_circuit.measure(qr, cr) + # simulate with 10000 shots + backend = Aer.get_backend("qasm_simulator") + job = execute(quantum_circuit, backend, shots=10000) + + return job.result().get_counts(quantum_circuit) + + +if __name__ == "__main__": + print( + f"Total count for quantum fourier transform state is: \ + {quantum_fourier_transform(3)}" + ) From 6cd7c49525b520fc5fe44ac0568fe39393ff85b4 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 31 Oct 2022 21:33:08 +0100 Subject: [PATCH 0702/1543] [pre-commit.ci] pre-commit autoupdate (#7920) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * [pre-commit.ci] pre-commit autoupdate updates: - [github.com/asottile/pyupgrade: v3.1.0 → v3.2.0](https://github.com/asottile/pyupgrade/compare/v3.1.0...v3.2.0) * updating DIRECTORY.md Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- .pre-commit-config.yaml | 2 +- DIRECTORY.md | 4 ++++ 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 004def5e4e8b..a0ea03b9b8cd 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -27,7 +27,7 @@ repos: - --profile=black - repo: https://github.com/asottile/pyupgrade - rev: v3.1.0 + rev: v3.2.0 hooks: - id: pyupgrade args: diff --git a/DIRECTORY.md b/DIRECTORY.md index 0b0d1e6a7c9d..5c4a032db6cd 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -238,6 +238,7 @@ * [Stack With Singly Linked List](data_structures/stacks/stack_with_singly_linked_list.py) * [Stock Span Problem](data_structures/stacks/stock_span_problem.py) * Trie + * [Radix Tree](data_structures/trie/radix_tree.py) * [Trie](data_structures/trie/trie.py) ## Digital Image Processing @@ -526,6 +527,7 @@ * [Collatz Sequence](maths/collatz_sequence.py) * [Combinations](maths/combinations.py) * [Decimal Isolate](maths/decimal_isolate.py) + * [Dodecahedron](maths/dodecahedron.py) * [Double Factorial Iterative](maths/double_factorial_iterative.py) * [Double Factorial Recursive](maths/double_factorial_recursive.py) * [Entropy](maths/entropy.py) @@ -994,6 +996,7 @@ * [Deutsch Jozsa](quantum/deutsch_jozsa.py) * [Half Adder](quantum/half_adder.py) * [Not Gate](quantum/not_gate.py) + * [Q Fourier Transform](quantum/q_fourier_transform.py) * [Q Full Adder](quantum/q_full_adder.py) * [Quantum Entanglement](quantum/quantum_entanglement.py) * [Quantum Teleportation](quantum/quantum_teleportation.py) @@ -1129,6 +1132,7 @@ ## Web Programming * [Co2 Emission](web_programming/co2_emission.py) + * [Convert Number To Words](web_programming/convert_number_to_words.py) * [Covid Stats Via Xpath](web_programming/covid_stats_via_xpath.py) * [Crawl Google Results](web_programming/crawl_google_results.py) * [Crawl Google Scholar Citation](web_programming/crawl_google_scholar_citation.py) From 6c15f526e58dbb3d3a67e613323781df39b58620 Mon Sep 17 00:00:00 2001 From: Paradact <44441385+Paradact@users.noreply.github.com> Date: Mon, 31 Oct 2022 22:50:50 +0100 Subject: [PATCH 0703/1543] Added Torus surface area (#7906) * Added Torus surface area * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fixed error in test Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- maths/area.py | 35 +++++++++++++++++++++++++++++++++++ 1 file changed, 35 insertions(+) diff --git a/maths/area.py b/maths/area.py index 5db7dac38973..ea7216c8fe3f 100644 --- a/maths/area.py +++ b/maths/area.py @@ -201,6 +201,40 @@ def surface_area_cylinder(radius: float, height: float) -> float: return 2 * pi * radius * (height + radius) +def surface_area_torus(torus_radius: float, tube_radius: float) -> float: + """Calculate the Area of a Torus. + Wikipedia reference: https://en.wikipedia.org/wiki/Torus + :return 4pi^2 * torus_radius * tube_radius + >>> surface_area_torus(1, 1) + 39.47841760435743 + >>> surface_area_torus(4, 3) + 473.7410112522892 + >>> surface_area_torus(3, 4) + Traceback (most recent call last): + ... + ValueError: surface_area_torus() does not support spindle or self intersecting tori + >>> surface_area_torus(1.6, 1.6) + 101.06474906715503 + >>> surface_area_torus(0, 0) + 0.0 + >>> surface_area_torus(-1, 1) + Traceback (most recent call last): + ... + ValueError: surface_area_torus() only accepts non-negative values + >>> surface_area_torus(1, -1) + Traceback (most recent call last): + ... + ValueError: surface_area_torus() only accepts non-negative values + """ + if torus_radius < 0 or tube_radius < 0: + raise ValueError("surface_area_torus() only accepts non-negative values") + if torus_radius < tube_radius: + raise ValueError( + "surface_area_torus() does not support spindle or self intersecting tori" + ) + return 4 * pow(pi, 2) * torus_radius * tube_radius + + def area_rectangle(length: float, width: float) -> float: """ Calculate the area of a rectangle. @@ -543,6 +577,7 @@ def area_reg_polygon(sides: int, length: float) -> float: print(f"Cone: {surface_area_cone(10, 20) = }") print(f"Conical Frustum: {surface_area_conical_frustum(10, 20, 30) = }") print(f"Cylinder: {surface_area_cylinder(10, 20) = }") + print(f"Torus: {surface_area_torus(20, 10) = }") print(f"Equilateral Triangle: {area_reg_polygon(3, 10) = }") print(f"Square: {area_reg_polygon(4, 10) = }") print(f"Reqular Pentagon: {area_reg_polygon(5, 10) = }") From 7addbccee72d2b18e6d095ab6675cbcb290412ce Mon Sep 17 00:00:00 2001 From: Paradact <44441385+Paradact@users.noreply.github.com> Date: Mon, 31 Oct 2022 22:51:45 +0100 Subject: [PATCH 0704/1543] Torus volume (#7905) * Added Torus volume algorithm * Updated Torus volume for simplicity (removed ref to vol_sphere()) * Refactoring * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- maths/volume.py | 29 +++++++++++++++++++++++++++++ 1 file changed, 29 insertions(+) diff --git a/maths/volume.py b/maths/volume.py index da4054646659..1da4584c893e 100644 --- a/maths/volume.py +++ b/maths/volume.py @@ -441,6 +441,34 @@ def vol_conical_frustum(height: float, radius_1: float, radius_2: float) -> floa ) +def vol_torus(torus_radius: float, tube_radius: float) -> float: + """Calculate the Volume of a Torus. + Wikipedia reference: https://en.wikipedia.org/wiki/Torus + :return 2pi^2 * torus_radius * tube_radius^2 + >>> vol_torus(1, 1) + 19.739208802178716 + >>> vol_torus(4, 3) + 710.6115168784338 + >>> vol_torus(3, 4) + 947.4820225045784 + >>> vol_torus(1.6, 1.6) + 80.85179925372404 + >>> vol_torus(0, 0) + 0.0 + >>> vol_torus(-1, 1) + Traceback (most recent call last): + ... + ValueError: vol_torus() only accepts non-negative values + >>> vol_torus(1, -1) + Traceback (most recent call last): + ... + ValueError: vol_torus() only accepts non-negative values + """ + if torus_radius < 0 or tube_radius < 0: + raise ValueError("vol_torus() only accepts non-negative values") + return 2 * pow(pi, 2) * torus_radius * pow(tube_radius, 2) + + def main(): """Print the Results of Various Volume Calculations.""" print("Volumes:") @@ -453,6 +481,7 @@ def main(): print(f"Sphere: {vol_sphere(2) = }") # ~= 33.5 print(f"Hemisphere: {vol_hemisphere(2) = }") # ~= 16.75 print(f"Circular Cylinder: {vol_circular_cylinder(2, 2) = }") # ~= 25.1 + print(f"Torus: {vol_torus(2, 2) = }") # ~= 157.9 print(f"Conical Frustum: {vol_conical_frustum(2, 2, 4) = }") # ~= 58.6 print(f"Spherical cap: {vol_spherical_cap(1, 2) = }") # ~= 5.24 print(f"Spheres intersetion: {vol_spheres_intersect(2, 2, 1) = }") # ~= 21.21 From 74aa9efa1d164e7dba56a88b4b3546232f3c3024 Mon Sep 17 00:00:00 2001 From: Gustavobflh <43830003+Gustavobflh@users.noreply.github.com> Date: Mon, 31 Oct 2022 19:04:42 -0300 Subject: [PATCH 0705/1543] Added a Hubble Parameter calculator file (#7921) --- physics/hubble_parameter.py | 110 ++++++++++++++++++++++++++++++++++++ 1 file changed, 110 insertions(+) create mode 100644 physics/hubble_parameter.py diff --git a/physics/hubble_parameter.py b/physics/hubble_parameter.py new file mode 100644 index 000000000000..7985647222c9 --- /dev/null +++ b/physics/hubble_parameter.py @@ -0,0 +1,110 @@ +""" +Title : Calculating the Hubble Parameter + +Description : The Hubble parameter H is the Universe expansion rate +in any time. In cosmology is customary to use the redshift redshift +in place of time, becausethe redshift is directily mensure +in the light of galaxies moving away from us. + +So, the general relation that we obtain is + +H = hubble_constant*(radiation_density*(redshift+1)**4 + + matter_density*(redshift+1)**3 + + curvature*(redshift+1)**2 + dark_energy)**(1/2) + +where radiation_density, matter_density, dark_energy are the relativity +(the percentage) energy densities that exist +in the Universe today. Here, matter_density is the +sum of the barion density and the +dark matter. Curvature is the curvature parameter and can be written in term +of the densities by the completeness + + +curvature = 1 - (matter_density + radiation_density + dark_energy) + +Source : +https://www.sciencedirect.com/topics/mathematics/hubble-parameter +""" + + +def hubble_parameter( + hubble_constant: float, + radiation_density: float, + matter_density: float, + dark_energy: float, + redshift: float, +) -> float: + + """ + Input Parameters + ---------------- + hubble_constant: Hubble constante is the expansion rate today usually + given in km/(s*Mpc) + + radiation_density: relative radiation density today + + matter_density: relative mass density today + + dark_energy: relative dark energy density today + + redshift: the light redshift + + Returns + ------- + result : Hubble parameter in and the unit km/s/Mpc (the unit can be + changed if you want, just need to change the unit of the Hubble constant) + + >>> hubble_parameter(hubble_constant=68.3, radiation_density=1e-4, + ... matter_density=-0.3, dark_energy=0.7, redshift=1) + Traceback (most recent call last): + ... + ValueError: All input parameters must be positive + + >>> hubble_parameter(hubble_constant=68.3, radiation_density=1e-4, + ... matter_density= 1.2, dark_energy=0.7, redshift=1) + Traceback (most recent call last): + ... + ValueError: Relative densities cannot be greater than one + + >>> hubble_parameter(hubble_constant=68.3, radiation_density=1e-4, + ... matter_density= 0.3, dark_energy=0.7, redshift=0) + 68.3 + """ + parameters = [redshift, radiation_density, matter_density, dark_energy] + if any(0 > p for p in parameters): + raise ValueError("All input parameters must be positive") + + if any(1 < p for p in parameters[1:4]): + raise ValueError("Relative densities cannot be greater than one") + else: + curvature = 1 - (matter_density + radiation_density + dark_energy) + + e_2 = ( + radiation_density * (redshift + 1) ** 4 + + matter_density * (redshift + 1) ** 3 + + curvature * (redshift + 1) ** 2 + + dark_energy + ) + + hubble = hubble_constant * e_2 ** (1 / 2) + return hubble + + +if __name__ == "__main__": + import doctest + + # run doctest + doctest.testmod() + + # demo LCDM approximation + matter_density = 0.3 + + print( + hubble_parameter( + hubble_constant=68.3, + radiation_density=1e-4, + matter_density=matter_density, + dark_energy=1 - matter_density, + redshift=0, + ) + ) From 7d139ee7f1e48648cc8cf176b293d23d2ba85d13 Mon Sep 17 00:00:00 2001 From: Caeden Perelli-Harris Date: Tue, 1 Nov 2022 06:50:43 +0000 Subject: [PATCH 0706/1543] refactor(abs): Condense `abs_min` and `abs_max` (#7881) * refactor(abs): Condense `abs_min` and `abs_max` * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- maths/abs.py | 66 ++++++++++++++++++++++++++++++++++++++++++++++++ maths/abs_max.py | 50 ------------------------------------ maths/abs_min.py | 35 ------------------------- 3 files changed, 66 insertions(+), 85 deletions(-) delete mode 100644 maths/abs_max.py delete mode 100644 maths/abs_min.py diff --git a/maths/abs.py b/maths/abs.py index dfea52dfbb97..cb0ffc8a5b61 100644 --- a/maths/abs.py +++ b/maths/abs.py @@ -15,6 +15,62 @@ def abs_val(num: float) -> float: return -num if num < 0 else num +def abs_min(x: list[int]) -> int: + """ + >>> abs_min([0,5,1,11]) + 0 + >>> abs_min([3,-10,-2]) + -2 + >>> abs_min([]) + Traceback (most recent call last): + ... + ValueError: abs_min() arg is an empty sequence + """ + if len(x) == 0: + raise ValueError("abs_min() arg is an empty sequence") + j = x[0] + for i in x: + if abs_val(i) < abs_val(j): + j = i + return j + + +def abs_max(x: list[int]) -> int: + """ + >>> abs_max([0,5,1,11]) + 11 + >>> abs_max([3,-10,-2]) + -10 + >>> abs_max([]) + Traceback (most recent call last): + ... + ValueError: abs_max() arg is an empty sequence + """ + if len(x) == 0: + raise ValueError("abs_max() arg is an empty sequence") + j = x[0] + for i in x: + if abs(i) > abs(j): + j = i + return j + + +def abs_max_sort(x: list[int]) -> int: + """ + >>> abs_max_sort([0,5,1,11]) + 11 + >>> abs_max_sort([3,-10,-2]) + -10 + >>> abs_max_sort([]) + Traceback (most recent call last): + ... + ValueError: abs_max_sort() arg is an empty sequence + """ + if len(x) == 0: + raise ValueError("abs_max_sort() arg is an empty sequence") + return sorted(x, key=abs)[-1] + + def test_abs_val(): """ >>> test_abs_val() @@ -23,6 +79,16 @@ def test_abs_val(): assert 34 == abs_val(34) assert 100000000000 == abs_val(-100000000000) + a = [-3, -1, 2, -11] + assert abs_max(a) == -11 + assert abs_max_sort(a) == -11 + assert abs_min(a) == -1 + if __name__ == "__main__": + import doctest + + doctest.testmod() + + test_abs_val() print(abs_val(-34)) # --> 34 diff --git a/maths/abs_max.py b/maths/abs_max.py deleted file mode 100644 index 4a4b4d9ebca3..000000000000 --- a/maths/abs_max.py +++ /dev/null @@ -1,50 +0,0 @@ -from __future__ import annotations - - -def abs_max(x: list[int]) -> int: - """ - >>> abs_max([0,5,1,11]) - 11 - >>> abs_max([3,-10,-2]) - -10 - >>> abs_max([]) - Traceback (most recent call last): - ... - ValueError: abs_max() arg is an empty sequence - """ - if len(x) == 0: - raise ValueError("abs_max() arg is an empty sequence") - j = x[0] - for i in x: - if abs(i) > abs(j): - j = i - return j - - -def abs_max_sort(x: list[int]) -> int: - """ - >>> abs_max_sort([0,5,1,11]) - 11 - >>> abs_max_sort([3,-10,-2]) - -10 - >>> abs_max_sort([]) - Traceback (most recent call last): - ... - ValueError: abs_max_sort() arg is an empty sequence - """ - if len(x) == 0: - raise ValueError("abs_max_sort() arg is an empty sequence") - return sorted(x, key=abs)[-1] - - -def main(): - a = [1, 2, -11] - assert abs_max(a) == -11 - assert abs_max_sort(a) == -11 - - -if __name__ == "__main__": - import doctest - - doctest.testmod(verbose=True) - main() diff --git a/maths/abs_min.py b/maths/abs_min.py deleted file mode 100644 index 00dbcb025cfb..000000000000 --- a/maths/abs_min.py +++ /dev/null @@ -1,35 +0,0 @@ -from __future__ import annotations - -from .abs import abs_val - - -def abs_min(x: list[int]) -> int: - """ - >>> abs_min([0,5,1,11]) - 0 - >>> abs_min([3,-10,-2]) - -2 - >>> abs_min([]) - Traceback (most recent call last): - ... - ValueError: abs_min() arg is an empty sequence - """ - if len(x) == 0: - raise ValueError("abs_min() arg is an empty sequence") - j = x[0] - for i in x: - if abs_val(i) < abs_val(j): - j = i - return j - - -def main(): - a = [-3, -1, 2, -11] - print(abs_min(a)) # = -1 - - -if __name__ == "__main__": - import doctest - - doctest.testmod(verbose=True) - main() From d23e709aea75647540e6ba57b3a5979854e80117 Mon Sep 17 00:00:00 2001 From: Christian Clauss Date: Tue, 1 Nov 2022 14:07:11 +0100 Subject: [PATCH 0707/1543] maths/sum_of_digits.py: Streamline benchmarks (#7914) * maths/sum_of_digits.py: Streamline benchmarks ``` sum_of_digits(262144): 19 -- 0.3128329170285724 seconds sum_of_digits_recursion(262144): 19 -- 0.34008108399575576 seconds sum_of_digits_compact(262144): 19 -- 0.6086010000435635 seconds sum_of_digits(1125899906842624): 76 -- 0.8079068749793805 seconds sum_of_digits_recursion(1125899906842624): 76 -- 0.8435653329943307 seconds sum_of_digits_compact(1125899906842624): 76 -- 1.247976207989268 seconds sum_of_digits(1267650600228229401496703205376): 115 -- 1.6441589999594726 seconds sum_of_digits_recursion(1267650600228229401496703205376): 115 -- 1.713684624992311 seconds sum_of_digits_compact(1267650600228229401496703205376): 115 -- 2.2197747920290567 seconds ``` * updating DIRECTORY.md * Update sum_of_digits.py * Update sum_of_digits.py Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- maths/sum_of_digits.py | 99 +++++------------------------------------- 1 file changed, 12 insertions(+), 87 deletions(-) diff --git a/maths/sum_of_digits.py b/maths/sum_of_digits.py index 5ad5fe6c9877..d5488bb9e9e0 100644 --- a/maths/sum_of_digits.py +++ b/maths/sum_of_digits.py @@ -1,10 +1,6 @@ -from timeit import timeit - - def sum_of_digits(n: int) -> int: """ Find the sum of digits of a number. - >>> sum_of_digits(12345) 15 >>> sum_of_digits(123) @@ -25,7 +21,6 @@ def sum_of_digits(n: int) -> int: def sum_of_digits_recursion(n: int) -> int: """ Find the sum of digits of a number using recursion - >>> sum_of_digits_recursion(12345) 15 >>> sum_of_digits_recursion(123) @@ -42,7 +37,6 @@ def sum_of_digits_recursion(n: int) -> int: def sum_of_digits_compact(n: int) -> int: """ Find the sum of digits of a number - >>> sum_of_digits_compact(12345) 15 >>> sum_of_digits_compact(123) @@ -57,93 +51,24 @@ def sum_of_digits_compact(n: int) -> int: def benchmark() -> None: """ - Benchmark code for comparing 3 functions, - with 3 different length int values. + Benchmark multiple functions, with three different length int values. """ - print("\nFor small_num = ", small_num, ":") - print( - "> sum_of_digits()", - "\t\tans =", - sum_of_digits(small_num), - "\ttime =", - timeit("z.sum_of_digits(z.small_num)", setup="import __main__ as z"), - "seconds", - ) - print( - "> sum_of_digits_recursion()", - "\tans =", - sum_of_digits_recursion(small_num), - "\ttime =", - timeit("z.sum_of_digits_recursion(z.small_num)", setup="import __main__ as z"), - "seconds", - ) - print( - "> sum_of_digits_compact()", - "\tans =", - sum_of_digits_compact(small_num), - "\ttime =", - timeit("z.sum_of_digits_compact(z.small_num)", setup="import __main__ as z"), - "seconds", - ) + from collections.abc import Callable + from timeit import timeit - print("\nFor medium_num = ", medium_num, ":") - print( - "> sum_of_digits()", - "\t\tans =", - sum_of_digits(medium_num), - "\ttime =", - timeit("z.sum_of_digits(z.medium_num)", setup="import __main__ as z"), - "seconds", - ) - print( - "> sum_of_digits_recursion()", - "\tans =", - sum_of_digits_recursion(medium_num), - "\ttime =", - timeit("z.sum_of_digits_recursion(z.medium_num)", setup="import __main__ as z"), - "seconds", - ) - print( - "> sum_of_digits_compact()", - "\tans =", - sum_of_digits_compact(medium_num), - "\ttime =", - timeit("z.sum_of_digits_compact(z.medium_num)", setup="import __main__ as z"), - "seconds", - ) + def benchmark_a_function(func: Callable, value: int) -> None: + call = f"{func.__name__}({value})" + timing = timeit(f"__main__.{call}", setup="import __main__") + print(f"{call:56} = {func(value)} -- {timing:.4f} seconds") - print("\nFor large_num = ", large_num, ":") - print( - "> sum_of_digits()", - "\t\tans =", - sum_of_digits(large_num), - "\ttime =", - timeit("z.sum_of_digits(z.large_num)", setup="import __main__ as z"), - "seconds", - ) - print( - "> sum_of_digits_recursion()", - "\tans =", - sum_of_digits_recursion(large_num), - "\ttime =", - timeit("z.sum_of_digits_recursion(z.large_num)", setup="import __main__ as z"), - "seconds", - ) - print( - "> sum_of_digits_compact()", - "\tans =", - sum_of_digits_compact(large_num), - "\ttime =", - timeit("z.sum_of_digits_compact(z.large_num)", setup="import __main__ as z"), - "seconds", - ) + for value in (262144, 1125899906842624, 1267650600228229401496703205376): + for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact): + benchmark_a_function(func, value) + print() if __name__ == "__main__": - small_num = 262144 - medium_num = 1125899906842624 - large_num = 1267650600228229401496703205376 - benchmark() import doctest doctest.testmod() + benchmark() From 4e6c1c049dffdc984232fe1fce1e4791fc527d11 Mon Sep 17 00:00:00 2001 From: Alexander Pantyukhin Date: Tue, 1 Nov 2022 21:43:03 +0400 Subject: [PATCH 0708/1543] Is power of two (#7936) * add is power of two * fix comment * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Deal with negative numbers * Spelling: negative Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- bit_manipulation/is_power_of_two.py | 57 +++++++++++++++++++++++++++++ 1 file changed, 57 insertions(+) create mode 100644 bit_manipulation/is_power_of_two.py diff --git a/bit_manipulation/is_power_of_two.py b/bit_manipulation/is_power_of_two.py new file mode 100644 index 000000000000..023e979fe51c --- /dev/null +++ b/bit_manipulation/is_power_of_two.py @@ -0,0 +1,57 @@ +""" +Author : Alexander Pantyukhin +Date : November 1, 2022 + +Task: +Given a positive int number. Return True if this number is power of 2 +or False otherwise. + +Implementation notes: Use bit manipulation. +For example if the number is the power of two it's bits representation: +n = 0..100..00 +n - 1 = 0..011..11 + +n & (n - 1) - no intersections = 0 +""" + + +def is_power_of_two(number: int) -> bool: + """ + Return True if this number is power of 2 or False otherwise. + + >>> is_power_of_two(0) + True + >>> is_power_of_two(1) + True + >>> is_power_of_two(2) + True + >>> is_power_of_two(4) + True + >>> is_power_of_two(6) + False + >>> is_power_of_two(8) + True + >>> is_power_of_two(17) + False + >>> is_power_of_two(-1) + Traceback (most recent call last): + ... + ValueError: number must not be negative + >>> is_power_of_two(1.2) + Traceback (most recent call last): + ... + TypeError: unsupported operand type(s) for &: 'float' and 'float' + + # Test all powers of 2 from 0 to 10,000 + >>> all(is_power_of_two(int(2 ** i)) for i in range(10000)) + True + """ + if number < 0: + raise ValueError("number must not be negative") + return number & (number - 1) == 0 + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From f512b4d105b6f3188deced19761b6ed288378f0d Mon Sep 17 00:00:00 2001 From: Caeden Perelli-Harris Date: Tue, 1 Nov 2022 19:25:39 +0000 Subject: [PATCH 0709/1543] refactor: Move pascals triange to maths/ (#7932) * refactor: Move pascals triange to maths/ * Update xgboost_classifier.py * statsmodels is now compatible with Python 3.11 * statsmodels is now compatible with Python 3.11 * cython>=0.29.28 * cython>=0.29.28 # For statsmodels on Python 3.11 Co-authored-by: Christian Clauss --- .github/workflows/build.yml | 1 - machine_learning/xgboost_classifier.py | 2 +- {other => matrix}/pascal_triangle.py | 0 requirements.txt | 3 ++- 4 files changed, 3 insertions(+), 3 deletions(-) rename {other => matrix}/pascal_triangle.py (100%) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 1069c68d215f..6b9cc890b6af 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -25,7 +25,6 @@ jobs: # See: #6591 for re-enabling tests on Python v3.11 run: pytest --ignore=computer_vision/cnn_classification.py - --ignore=machine_learning/forecasting/run.py --ignore=machine_learning/lstm/lstm_prediction.py --ignore=quantum/ --ignore=project_euler/ diff --git a/machine_learning/xgboost_classifier.py b/machine_learning/xgboost_classifier.py index 62a1b331baaf..08967f1715a1 100644 --- a/machine_learning/xgboost_classifier.py +++ b/machine_learning/xgboost_classifier.py @@ -23,7 +23,7 @@ def data_handling(data: dict) -> tuple: def xgboost(features: np.ndarray, target: np.ndarray) -> XGBClassifier: """ - >>> xgboost(np.array([[5.1, 3.6, 1.4, 0.2]]), np.array([0])) + # THIS TEST IS BROKEN!! >>> xgboost(np.array([[5.1, 3.6, 1.4, 0.2]]), np.array([0])) XGBClassifier(base_score=0.5, booster='gbtree', callbacks=None, colsample_bylevel=1, colsample_bynode=1, colsample_bytree=1, early_stopping_rounds=None, enable_categorical=False, diff --git a/other/pascal_triangle.py b/matrix/pascal_triangle.py similarity index 100% rename from other/pascal_triangle.py rename to matrix/pascal_triangle.py diff --git a/requirements.txt b/requirements.txt index ae62039988e6..2e278245541d 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,4 +1,5 @@ beautifulsoup4 +cython>=0.29.28 # For statsmodels on Python 3.11 fake_useragent keras lxml @@ -13,7 +14,7 @@ requests rich scikit-fuzzy sklearn -statsmodels; python_version < "3.11" +statsmodels sympy tensorflow; python_version < "3.11" texttable From f05baa2b2b9aeb5a9ae8184ff418a5ccdc56960a Mon Sep 17 00:00:00 2001 From: Alexander Pantyukhin Date: Wed, 2 Nov 2022 16:25:19 +0400 Subject: [PATCH 0710/1543] add dp up - down minimum cost for tickets (#7934) * add dp up - down minimum cost for tickets * add typints * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * add new tests and checks. * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * add more tests * add types for the dp function * Update dynamic_programming/minimum_tickets_cost.py Co-authored-by: Christian Clauss * fix review notes * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * small fix * Update dynamic_programming/minimum_tickets_cost.py Co-authored-by: Christian Clauss * Update dynamic_programming/minimum_tickets_cost.py Co-authored-by: Christian Clauss * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fix tests * Update dynamic_programming/minimum_tickets_cost.py Co-authored-by: Christian Clauss * Update dynamic_programming/minimum_tickets_cost.py Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- dynamic_programming/minimum_tickets_cost.py | 129 ++++++++++++++++++++ 1 file changed, 129 insertions(+) create mode 100644 dynamic_programming/minimum_tickets_cost.py diff --git a/dynamic_programming/minimum_tickets_cost.py b/dynamic_programming/minimum_tickets_cost.py new file mode 100644 index 000000000000..261a5a7cf42a --- /dev/null +++ b/dynamic_programming/minimum_tickets_cost.py @@ -0,0 +1,129 @@ +""" +Author : Alexander Pantyukhin +Date : November 1, 2022 + +Task: +Given a list of days when you need to travel. Each day is integer from 1 to 365. +You are able to use tickets for 1 day, 7 days and 30 days. +Each ticket has a cost. + +Find the minimum cost you need to travel every day in the given list of days. + +Implementation notes: +implementation Dynamic Programming up bottom approach. + +Runtime complexity: O(n) + +The implementation was tested on the +leetcode: https://leetcode.com/problems/minimum-cost-for-tickets/ + + +Minimum Cost For Tickets +Dynamic Programming: up -> down. +""" + +from functools import lru_cache + + +def mincost_tickets(days: list[int], costs: list[int]) -> int: + """ + >>> mincost_tickets([1, 4, 6, 7, 8, 20], [2, 7, 15]) + 11 + + >>> mincost_tickets([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 30, 31], [2, 7, 15]) + 17 + + >>> mincost_tickets([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 30, 31], [2, 90, 150]) + 24 + + >>> mincost_tickets([2], [2, 90, 150]) + 2 + + >>> mincost_tickets([], [2, 90, 150]) + 0 + + >>> mincost_tickets('hello', [2, 90, 150]) + Traceback (most recent call last): + ... + ValueError: The parameter days should be a list of integers + + >>> mincost_tickets([], 'world') + Traceback (most recent call last): + ... + ValueError: The parameter costs should be a list of three integers + + >>> mincost_tickets([0.25, 2, 3, 4, 5, 6, 7, 8, 9, 10, 30, 31], [2, 90, 150]) + Traceback (most recent call last): + ... + ValueError: The parameter days should be a list of integers + + >>> mincost_tickets([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 30, 31], [2, 0.9, 150]) + Traceback (most recent call last): + ... + ValueError: The parameter costs should be a list of three integers + + >>> mincost_tickets([-1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 30, 31], [2, 90, 150]) + Traceback (most recent call last): + ... + ValueError: All days elements should be greater than 0 + + >>> mincost_tickets([2, 367], [2, 90, 150]) + Traceback (most recent call last): + ... + ValueError: All days elements should be less than 366 + + >>> mincost_tickets([2, 3, 4, 5, 6, 7, 8, 9, 10, 30, 31], []) + Traceback (most recent call last): + ... + ValueError: The parameter costs should be a list of three integers + + >>> mincost_tickets([], []) + Traceback (most recent call last): + ... + ValueError: The parameter costs should be a list of three integers + + >>> mincost_tickets([2, 3, 4, 5, 6, 7, 8, 9, 10, 30, 31], [1, 2, 3, 4]) + Traceback (most recent call last): + ... + ValueError: The parameter costs should be a list of three integers + """ + + # Validation + if not isinstance(days, list) or not all(isinstance(day, int) for day in days): + raise ValueError("The parameter days should be a list of integers") + + if len(costs) != 3 or not all(isinstance(cost, int) for cost in costs): + raise ValueError("The parameter costs should be a list of three integers") + + if len(days) == 0: + return 0 + + if min(days) <= 0: + raise ValueError("All days elements should be greater than 0") + + if max(days) >= 366: + raise ValueError("All days elements should be less than 366") + + days_set = set(days) + + @lru_cache(maxsize=None) + def dynamic_programming(index: int) -> int: + if index > 365: + return 0 + + if index not in days_set: + return dp(index + 1) + + return min( + costs[0] + dp(index + 1), + costs[1] + dp(index + 7), + costs[2] + dp(index + 30), + ) + + return dynamic_programming(1) + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From 598f6a26a14d815f5fd079f43787995b0f076c03 Mon Sep 17 00:00:00 2001 From: Caeden Perelli-Harris Date: Wed, 2 Nov 2022 16:20:57 +0000 Subject: [PATCH 0711/1543] refactor: Condense `password` related files in one (#7939) * refactor: Condense `password` related files in one * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update other/password.py Co-authored-by: Christian Clauss * dynamic_programming * test: Make test input `str` * requirements.txt: Remove cython>=0.29.28 # For statsmodels on Python 3.11 Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- dynamic_programming/minimum_tickets_cost.py | 8 ++-- other/check_strong_password.py | 47 -------------------- other/{password_generator.py => password.py} | 44 +++++++++++++++++- requirements.txt | 1 - 4 files changed, 46 insertions(+), 54 deletions(-) delete mode 100644 other/check_strong_password.py rename other/{password_generator.py => password.py} (58%) diff --git a/dynamic_programming/minimum_tickets_cost.py b/dynamic_programming/minimum_tickets_cost.py index 261a5a7cf42a..d07056d9217f 100644 --- a/dynamic_programming/minimum_tickets_cost.py +++ b/dynamic_programming/minimum_tickets_cost.py @@ -112,12 +112,12 @@ def dynamic_programming(index: int) -> int: return 0 if index not in days_set: - return dp(index + 1) + return dynamic_programming(index + 1) return min( - costs[0] + dp(index + 1), - costs[1] + dp(index + 7), - costs[2] + dp(index + 30), + costs[0] + dynamic_programming(index + 1), + costs[1] + dynamic_programming(index + 7), + costs[2] + dynamic_programming(index + 30), ) return dynamic_programming(1) diff --git a/other/check_strong_password.py b/other/check_strong_password.py deleted file mode 100644 index 95bb327addf4..000000000000 --- a/other/check_strong_password.py +++ /dev/null @@ -1,47 +0,0 @@ -# This Will Check Whether A Given Password Is Strong Or Not -# It Follows The Rule that Length Of Password Should Be At Least 8 Characters -# And At Least 1 Lower, 1 Upper, 1 Number And 1 Special Character - -from string import ascii_lowercase, ascii_uppercase, digits, punctuation - - -def strong_password_detector(password: str, min_length: int = 8) -> str: - """ - >>> strong_password_detector('Hwea7$2!') - 'This is a strong Password' - - >>> strong_password_detector('Sh0r1') - 'Your Password must be at least 8 characters long' - - >>> strong_password_detector('Hello123') - 'Password should contain UPPERCASE, lowercase, numbers, special characters' - - >>> strong_password_detector('Hello1238udfhiaf038fajdvjjf!jaiuFhkqi1') - 'This is a strong Password' - - >>> strong_password_detector(0) - 'Your Password must be at least 8 characters long' - """ - - if len(str(password)) < 8: - return "Your Password must be at least 8 characters long" - - upper = any(char in ascii_uppercase for char in password) - lower = any(char in ascii_lowercase for char in password) - num = any(char in digits for char in password) - spec_char = any(char in punctuation for char in password) - - if upper and lower and num and spec_char: - return "This is a strong Password" - - else: - return ( - "Password should contain UPPERCASE, lowercase, " - "numbers, special characters" - ) - - -if __name__ == "__main__": - import doctest - - doctest.testmod() diff --git a/other/password_generator.py b/other/password.py similarity index 58% rename from other/password_generator.py rename to other/password.py index 8f9d58a33b82..8f6833073288 100644 --- a/other/password_generator.py +++ b/other/password.py @@ -1,11 +1,12 @@ -"""Password Generator allows you to generate a random password of length N.""" import secrets from random import shuffle -from string import ascii_letters, digits, punctuation +from string import ascii_letters, ascii_lowercase, ascii_uppercase, digits, punctuation def password_generator(length: int = 8) -> str: """ + Password Generator allows you to generate a random password of length N. + >>> len(password_generator()) 8 >>> len(password_generator(length=16)) @@ -62,6 +63,45 @@ def random_characters(chars_incl, i): pass # Put your code here... +# This Will Check Whether A Given Password Is Strong Or Not +# It Follows The Rule that Length Of Password Should Be At Least 8 Characters +# And At Least 1 Lower, 1 Upper, 1 Number And 1 Special Character +def strong_password_detector(password: str, min_length: int = 8) -> str: + """ + >>> strong_password_detector('Hwea7$2!') + 'This is a strong Password' + + >>> strong_password_detector('Sh0r1') + 'Your Password must be at least 8 characters long' + + >>> strong_password_detector('Hello123') + 'Password should contain UPPERCASE, lowercase, numbers, special characters' + + >>> strong_password_detector('Hello1238udfhiaf038fajdvjjf!jaiuFhkqi1') + 'This is a strong Password' + + >>> strong_password_detector('0') + 'Your Password must be at least 8 characters long' + """ + + if len(password) < min_length: + return "Your Password must be at least 8 characters long" + + upper = any(char in ascii_uppercase for char in password) + lower = any(char in ascii_lowercase for char in password) + num = any(char in digits for char in password) + spec_char = any(char in punctuation for char in password) + + if upper and lower and num and spec_char: + return "This is a strong Password" + + else: + return ( + "Password should contain UPPERCASE, lowercase, " + "numbers, special characters" + ) + + def main(): length = int(input("Please indicate the max length of your password: ").strip()) chars_incl = input( diff --git a/requirements.txt b/requirements.txt index 2e278245541d..00f31b85e404 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,5 +1,4 @@ beautifulsoup4 -cython>=0.29.28 # For statsmodels on Python 3.11 fake_useragent keras lxml From 45b3383c3952f646e985972d1fcd772d3d9f5d3f Mon Sep 17 00:00:00 2001 From: Christian Clauss Date: Wed, 2 Nov 2022 19:20:45 +0100 Subject: [PATCH 0712/1543] Flake8: Drop ignore of issue A003 (#7949) * Flake8: Drop ignore of issue A003 * updating DIRECTORY.md Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- .flake8 | 4 +-- DIRECTORY.md | 10 +++--- data_structures/binary_tree/fenwick_tree.py | 8 ++--- data_structures/heap/heap.py | 7 ----- .../linked_list/merge_two_lists.py | 4 +-- data_structures/queue/double_ended_queue.py | 31 +++++++++---------- linear_algebra/src/lib.py | 12 ------- other/lfu_cache.py | 14 ++++----- other/lru_cache.py | 14 ++++----- 9 files changed, 42 insertions(+), 62 deletions(-) diff --git a/.flake8 b/.flake8 index 0d9ef18d142b..2bb36b71a271 100644 --- a/.flake8 +++ b/.flake8 @@ -1,8 +1,8 @@ [flake8] max-line-length = 88 -max-complexity = 25 +# max-complexity should be 10 +max-complexity = 23 extend-ignore = - A003 # Class attribute is shadowing a python builtin # Formatting style for `black` E203 # Whitespace before ':' W503 # Line break occurred before a binary operator diff --git a/DIRECTORY.md b/DIRECTORY.md index 5c4a032db6cd..a2112bcfb7b4 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -48,6 +48,7 @@ * [Highest Set Bit](bit_manipulation/highest_set_bit.py) * [Index Of Rightmost Set Bit](bit_manipulation/index_of_rightmost_set_bit.py) * [Is Even](bit_manipulation/is_even.py) + * [Is Power Of Two](bit_manipulation/is_power_of_two.py) * [Reverse Bits](bit_manipulation/reverse_bits.py) * [Single Bit Manipulation Operations](bit_manipulation/single_bit_manipulation_operations.py) @@ -315,6 +316,7 @@ * [Minimum Partition](dynamic_programming/minimum_partition.py) * [Minimum Squares To Represent A Number](dynamic_programming/minimum_squares_to_represent_a_number.py) * [Minimum Steps To One](dynamic_programming/minimum_steps_to_one.py) + * [Minimum Tickets Cost](dynamic_programming/minimum_tickets_cost.py) * [Optimal Binary Search Tree](dynamic_programming/optimal_binary_search_tree.py) * [Palindrome Partitioning](dynamic_programming/palindrome_partitioning.py) * [Rod Cutting](dynamic_programming/rod_cutting.py) @@ -496,8 +498,6 @@ ## Maths * [3N Plus 1](maths/3n_plus_1.py) * [Abs](maths/abs.py) - * [Abs Max](maths/abs_max.py) - * [Abs Min](maths/abs_min.py) * [Add](maths/add.py) * [Addition Without Arithmetic](maths/addition_without_arithmetic.py) * [Aliquot Sum](maths/aliquot_sum.py) @@ -653,6 +653,7 @@ * [Matrix Operation](matrix/matrix_operation.py) * [Max Area Of Island](matrix/max_area_of_island.py) * [Nth Fibonacci Using Matrix Exponentiation](matrix/nth_fibonacci_using_matrix_exponentiation.py) + * [Pascal Triangle](matrix/pascal_triangle.py) * [Rotate Matrix](matrix/rotate_matrix.py) * [Searching In Sorted Matrix](matrix/searching_in_sorted_matrix.py) * [Sherman Morrison](matrix/sherman_morrison.py) @@ -674,7 +675,6 @@ ## Other * [Activity Selection](other/activity_selection.py) * [Alternative List Arrange](other/alternative_list_arrange.py) - * [Check Strong Password](other/check_strong_password.py) * [Davisb Putnamb Logemannb Loveland](other/davisb_putnamb_logemannb_loveland.py) * [Dijkstra Bankers Algorithm](other/dijkstra_bankers_algorithm.py) * [Doomsday](other/doomsday.py) @@ -689,8 +689,7 @@ * [Magicdiamondpattern](other/magicdiamondpattern.py) * [Maximum Subarray](other/maximum_subarray.py) * [Nested Brackets](other/nested_brackets.py) - * [Pascal Triangle](other/pascal_triangle.py) - * [Password Generator](other/password_generator.py) + * [Password](other/password.py) * [Quine](other/quine.py) * [Scoring Algorithm](other/scoring_algorithm.py) * [Sdes](other/sdes.py) @@ -701,6 +700,7 @@ * [Casimir Effect](physics/casimir_effect.py) * [Centripetal Force](physics/centripetal_force.py) * [Horizontal Projectile Motion](physics/horizontal_projectile_motion.py) + * [Hubble Parameter](physics/hubble_parameter.py) * [Ideal Gas Law](physics/ideal_gas_law.py) * [Kinetic Energy](physics/kinetic_energy.py) * [Lorentz Transformation Four Vector](physics/lorentz_transformation_four_vector.py) diff --git a/data_structures/binary_tree/fenwick_tree.py b/data_structures/binary_tree/fenwick_tree.py index 96020d1427af..babd75ac4b31 100644 --- a/data_structures/binary_tree/fenwick_tree.py +++ b/data_structures/binary_tree/fenwick_tree.py @@ -46,7 +46,7 @@ def init(self, arr: list[int]) -> None: self.size = len(arr) self.tree = deepcopy(arr) for i in range(1, self.size): - j = self.next(i) + j = self.next_(i) if j < self.size: self.tree[j] += self.tree[i] @@ -64,13 +64,13 @@ def get_array(self) -> list[int]: """ arr = self.tree[:] for i in range(self.size - 1, 0, -1): - j = self.next(i) + j = self.next_(i) if j < self.size: arr[j] -= arr[i] return arr @staticmethod - def next(index: int) -> int: + def next_(index: int) -> int: return index + (index & (-index)) @staticmethod @@ -102,7 +102,7 @@ def add(self, index: int, value: int) -> None: return while index < self.size: self.tree[index] += value - index = self.next(index) + index = self.next_(index) def update(self, index: int, value: int) -> None: """ diff --git a/data_structures/heap/heap.py b/data_structures/heap/heap.py index 071790d18448..b14c55d9db4c 100644 --- a/data_structures/heap/heap.py +++ b/data_structures/heap/heap.py @@ -88,13 +88,6 @@ def build_max_heap(self, collection: Iterable[float]) -> None: for i in range(self.heap_size // 2 - 1, -1, -1): self.max_heapify(i) - def max(self) -> float: - """return the max in the heap""" - if self.heap_size >= 1: - return self.h[0] - else: - raise Exception("Empty heap") - def extract_max(self) -> float: """get and remove max from heap""" if self.heap_size >= 2: diff --git a/data_structures/linked_list/merge_two_lists.py b/data_structures/linked_list/merge_two_lists.py index 93cf7a7e1602..61e2412aa7fd 100644 --- a/data_structures/linked_list/merge_two_lists.py +++ b/data_structures/linked_list/merge_two_lists.py @@ -13,7 +13,7 @@ @dataclass class Node: data: int - next: Node | None + next_node: Node | None class SortedLinkedList: @@ -32,7 +32,7 @@ def __iter__(self) -> Iterator[int]: node = self.head while node: yield node.data - node = node.next + node = node.next_node def __len__(self) -> int: """ diff --git a/data_structures/queue/double_ended_queue.py b/data_structures/queue/double_ended_queue.py index 7053879d4512..11942db8305c 100644 --- a/data_structures/queue/double_ended_queue.py +++ b/data_structures/queue/double_ended_queue.py @@ -42,8 +42,8 @@ class _Node: """ val: Any = None - next: Deque._Node | None = None - prev: Deque._Node | None = None + next_node: Deque._Node | None = None + prev_node: Deque._Node | None = None class _Iterator: """ @@ -81,7 +81,7 @@ def __next__(self) -> Any: # finished iterating raise StopIteration val = self._cur.val - self._cur = self._cur.next + self._cur = self._cur.next_node return val @@ -128,8 +128,8 @@ def append(self, val: Any) -> None: self._len = 1 else: # connect nodes - self._back.next = node - node.prev = self._back + self._back.next_node = node + node.prev_node = self._back self._back = node # assign new back to the new node self._len += 1 @@ -170,8 +170,8 @@ def appendleft(self, val: Any) -> None: self._len = 1 else: # connect nodes - node.next = self._front - self._front.prev = node + node.next_node = self._front + self._front.prev_node = node self._front = node # assign new front to the new node self._len += 1 @@ -264,10 +264,9 @@ def pop(self) -> Any: assert not self.is_empty(), "Deque is empty." topop = self._back - self._back = self._back.prev # set new back - self._back.next = ( - None # drop the last node - python will deallocate memory automatically - ) + self._back = self._back.prev_node # set new back + # drop the last node - python will deallocate memory automatically + self._back.next_node = None self._len -= 1 @@ -300,8 +299,8 @@ def popleft(self) -> Any: assert not self.is_empty(), "Deque is empty." topop = self._front - self._front = self._front.next # set new front and drop the first node - self._front.prev = None + self._front = self._front.next_node # set new front and drop the first node + self._front.prev_node = None self._len -= 1 @@ -385,8 +384,8 @@ def __eq__(self, other: object) -> bool: # compare every value if me.val != oth.val: return False - me = me.next - oth = oth.next + me = me.next_node + oth = oth.next_node return True @@ -424,7 +423,7 @@ def __repr__(self) -> str: while aux is not None: # append the values in a list to display values_list.append(aux.val) - aux = aux.next + aux = aux.next_node return "[" + ", ".join(repr(val) for val in values_list) + "]" diff --git a/linear_algebra/src/lib.py b/linear_algebra/src/lib.py index 079731487b3a..775e0244abb2 100644 --- a/linear_algebra/src/lib.py +++ b/linear_algebra/src/lib.py @@ -40,7 +40,6 @@ class Vector: __sub__(other: Vector): vector subtraction __mul__(other: float): scalar multiplication __mul__(other: Vector): dot product - set(components: Collection[float]): changes the vector components copy(): copies this vector and returns it component(i): gets the i-th component (0-indexed) change_component(pos: int, value: float): changes specified component @@ -119,17 +118,6 @@ def __mul__(self, other: float | Vector) -> float | Vector: else: # error case raise Exception("invalid operand!") - def set(self, components: Collection[float]) -> None: - """ - input: new components - changes the components of the vector. - replaces the components with newer one. - """ - if len(components) > 0: - self.__components = list(components) - else: - raise Exception("please give any vector") - def copy(self) -> Vector: """ copies this vector and returns it. diff --git a/other/lfu_cache.py b/other/lfu_cache.py index 2f26bb6cc74a..b68ba3a4605c 100644 --- a/other/lfu_cache.py +++ b/other/lfu_cache.py @@ -166,14 +166,14 @@ class LFUCache(Generic[T, U]): or as a function decorator. >>> cache = LFUCache(2) - >>> cache.set(1, 1) - >>> cache.set(2, 2) + >>> cache.put(1, 1) + >>> cache.put(2, 2) >>> cache.get(1) 1 - >>> cache.set(3, 3) + >>> cache.put(3, 3) >>> cache.get(2) is None True - >>> cache.set(4, 4) + >>> cache.put(4, 4) >>> cache.get(1) is None True >>> cache.get(3) @@ -224,7 +224,7 @@ def __contains__(self, key: T) -> bool: >>> 1 in cache False - >>> cache.set(1, 1) + >>> cache.put(1, 1) >>> 1 in cache True """ @@ -250,7 +250,7 @@ def get(self, key: T) -> U | None: self.miss += 1 return None - def set(self, key: T, value: U) -> None: + def put(self, key: T, value: U) -> None: """ Sets the value for the input key and updates the Double Linked List """ @@ -297,7 +297,7 @@ def cache_decorator_wrapper(*args: T) -> U: result = cls.decorator_function_to_instance_map[func].get(args[0]) if result is None: result = func(*args) - cls.decorator_function_to_instance_map[func].set(args[0], result) + cls.decorator_function_to_instance_map[func].put(args[0], result) return result def cache_info() -> LFUCache[T, U]: diff --git a/other/lru_cache.py b/other/lru_cache.py index aa910e487406..1e5eeac45b4e 100644 --- a/other/lru_cache.py +++ b/other/lru_cache.py @@ -150,8 +150,8 @@ class LRUCache(Generic[T, U]): >>> cache = LRUCache(2) - >>> cache.set(1, 1) - >>> cache.set(2, 2) + >>> cache.put(1, 1) + >>> cache.put(2, 2) >>> cache.get(1) 1 @@ -166,7 +166,7 @@ class LRUCache(Generic[T, U]): {1: Node: key: 1, val: 1, has next: True, has prev: True, \ 2: Node: key: 2, val: 2, has next: True, has prev: True} - >>> cache.set(3, 3) + >>> cache.put(3, 3) >>> cache.list DoubleLinkedList, @@ -182,7 +182,7 @@ class LRUCache(Generic[T, U]): >>> cache.get(2) is None True - >>> cache.set(4, 4) + >>> cache.put(4, 4) >>> cache.get(1) is None True @@ -238,7 +238,7 @@ def __contains__(self, key: T) -> bool: >>> 1 in cache False - >>> cache.set(1, 1) + >>> cache.put(1, 1) >>> 1 in cache True @@ -266,7 +266,7 @@ def get(self, key: T) -> U | None: self.miss += 1 return None - def set(self, key: T, value: U) -> None: + def put(self, key: T, value: U) -> None: """ Sets the value for the input key and updates the Double Linked List """ @@ -315,7 +315,7 @@ def cache_decorator_wrapper(*args: T) -> U: result = cls.decorator_function_to_instance_map[func].get(args[0]) if result is None: result = func(*args) - cls.decorator_function_to_instance_map[func].set(args[0], result) + cls.decorator_function_to_instance_map[func].put(args[0], result) return result def cache_info() -> LRUCache[T, U]: From db5215f60e31820dba5525e8b5fbf3e73b76b8df Mon Sep 17 00:00:00 2001 From: Maxim Smolskiy Date: Wed, 2 Nov 2022 21:40:25 +0300 Subject: [PATCH 0713/1543] Reduce the complexity of linear_algebra/src/polynom_for_points.py (#7948) * updating DIRECTORY.md * updating DIRECTORY.md * updating DIRECTORY.md * Lower the --max-complexity threshold in the file .flake8 * Reduce the complexity of linear_algebra/src/polynom_for_points.py * Update linear_algebra/src/polynom_for_points.py Co-authored-by: Christian Clauss * Update linear_algebra/src/polynom_for_points.py Co-authored-by: Christian Clauss * Fix Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> Co-authored-by: Christian Clauss --- .flake8 | 2 +- linear_algebra/src/polynom_for_points.py | 139 ++++++++++------------- 2 files changed, 62 insertions(+), 79 deletions(-) diff --git a/.flake8 b/.flake8 index 2bb36b71a271..1a62d57f9a57 100644 --- a/.flake8 +++ b/.flake8 @@ -1,7 +1,7 @@ [flake8] max-line-length = 88 # max-complexity should be 10 -max-complexity = 23 +max-complexity = 21 extend-ignore = # Formatting style for `black` E203 # Whitespace before ':' diff --git a/linear_algebra/src/polynom_for_points.py b/linear_algebra/src/polynom_for_points.py index 091849542ffe..1d702deb1e99 100644 --- a/linear_algebra/src/polynom_for_points.py +++ b/linear_algebra/src/polynom_for_points.py @@ -24,96 +24,79 @@ def points_to_polynomial(coordinates: list[list[int]]) -> str: >>> print(points_to_polynomial([[1, 5], [2, 2], [3, 9]])) f(x)=x^2*5.0+x^1*-18.0+x^0*18.0 """ - try: - check = 1 - more_check = 0 - d = coordinates[0][0] - for j in range(len(coordinates)): - if j == 0: - continue - if d == coordinates[j][0]: - more_check += 1 - solved = "x=" + str(coordinates[j][0]) - if more_check == len(coordinates) - 1: - check = 2 - break - elif more_check > 0 and more_check != len(coordinates) - 1: - check = 3 - else: - check = 1 + if len(coordinates) == 0 or not all(len(pair) == 2 for pair in coordinates): + return "The program cannot work out a fitting polynomial." + + if len({tuple(pair) for pair in coordinates}) != len(coordinates): + return "The program cannot work out a fitting polynomial." - if len(coordinates) == 1 and coordinates[0][0] == 0: - check = 2 - solved = "x=0" - except Exception: - check = 3 + set_x = {x for x, _ in coordinates} + if len(set_x) == 1: + return f"x={coordinates[0][0]}" + + if len(set_x) != len(coordinates): + return "The program cannot work out a fitting polynomial." x = len(coordinates) - if check == 1: - count_of_line = 0 - matrix: list[list[float]] = [] - # put the x and x to the power values in a matrix - while count_of_line < x: - count_in_line = 0 - a = coordinates[count_of_line][0] - count_line: list[float] = [] - while count_in_line < x: - count_line.append(a ** (x - (count_in_line + 1))) - count_in_line += 1 - matrix.append(count_line) - count_of_line += 1 + count_of_line = 0 + matrix: list[list[float]] = [] + # put the x and x to the power values in a matrix + while count_of_line < x: + count_in_line = 0 + a = coordinates[count_of_line][0] + count_line: list[float] = [] + while count_in_line < x: + count_line.append(a ** (x - (count_in_line + 1))) + count_in_line += 1 + matrix.append(count_line) + count_of_line += 1 - count_of_line = 0 - # put the y values into a vector - vector: list[float] = [] - while count_of_line < x: - vector.append(coordinates[count_of_line][1]) - count_of_line += 1 + count_of_line = 0 + # put the y values into a vector + vector: list[float] = [] + while count_of_line < x: + vector.append(coordinates[count_of_line][1]) + count_of_line += 1 - count = 0 + count = 0 - while count < x: - zahlen = 0 - while zahlen < x: - if count == zahlen: - zahlen += 1 - if zahlen == x: - break - bruch = matrix[zahlen][count] / matrix[count][count] - for counting_columns, item in enumerate(matrix[count]): - # manipulating all the values in the matrix - matrix[zahlen][counting_columns] -= item * bruch - # manipulating the values in the vector - vector[zahlen] -= vector[count] * bruch + while count < x: + zahlen = 0 + while zahlen < x: + if count == zahlen: zahlen += 1 - count += 1 - - count = 0 - # make solutions - solution: list[str] = [] - while count < x: - solution.append(str(vector[count] / matrix[count][count])) - count += 1 + if zahlen == x: + break + bruch = matrix[zahlen][count] / matrix[count][count] + for counting_columns, item in enumerate(matrix[count]): + # manipulating all the values in the matrix + matrix[zahlen][counting_columns] -= item * bruch + # manipulating the values in the vector + vector[zahlen] -= vector[count] * bruch + zahlen += 1 + count += 1 - count = 0 - solved = "f(x)=" + count = 0 + # make solutions + solution: list[str] = [] + while count < x: + solution.append(str(vector[count] / matrix[count][count])) + count += 1 - while count < x: - remove_e: list[str] = solution[count].split("E") - if len(remove_e) > 1: - solution[count] = remove_e[0] + "*10^" + remove_e[1] - solved += "x^" + str(x - (count + 1)) + "*" + str(solution[count]) - if count + 1 != x: - solved += "+" - count += 1 + count = 0 + solved = "f(x)=" - return solved + while count < x: + remove_e: list[str] = solution[count].split("E") + if len(remove_e) > 1: + solution[count] = f"{remove_e[0]}*10^{remove_e[1]}" + solved += f"x^{x - (count + 1)}*{solution[count]}" + if count + 1 != x: + solved += "+" + count += 1 - elif check == 2: - return solved - else: - return "The program cannot work out a fitting polynomial." + return solved if __name__ == "__main__": From a02de964d137b803aad9bb9c9d7096eff62539fd Mon Sep 17 00:00:00 2001 From: Maxim Smolskiy Date: Thu, 3 Nov 2022 00:16:44 +0300 Subject: [PATCH 0714/1543] Reduce the complexity of graphs/minimum_spanning_tree_prims.py (#7952) * Lower the --max-complexity threshold in the file .flake8 * Add test * Reduce the complexity of graphs/minimum_spanning_tree_prims.py * Remove backslashes * Remove # noqa: E741 * Fix the flake8 E741 issues * Refactor * Fix --- .flake8 | 2 +- graphs/minimum_spanning_tree_prims.py | 127 +++++++++++++++----------- 2 files changed, 76 insertions(+), 53 deletions(-) diff --git a/.flake8 b/.flake8 index 1a62d57f9a57..834d1f63d13e 100644 --- a/.flake8 +++ b/.flake8 @@ -1,7 +1,7 @@ [flake8] max-line-length = 88 # max-complexity should be 10 -max-complexity = 21 +max-complexity = 20 extend-ignore = # Formatting style for `black` E203 # Whitespace before ':' diff --git a/graphs/minimum_spanning_tree_prims.py b/graphs/minimum_spanning_tree_prims.py index 5b2eaa4bff40..f577866f0da6 100644 --- a/graphs/minimum_spanning_tree_prims.py +++ b/graphs/minimum_spanning_tree_prims.py @@ -2,40 +2,45 @@ from collections import defaultdict -def prisms_algorithm(l): # noqa: E741 +class Heap: + def __init__(self): + self.node_position = [] - node_position = [] + def get_position(self, vertex): + return self.node_position[vertex] - def get_position(vertex): - return node_position[vertex] + def set_position(self, vertex, pos): + self.node_position[vertex] = pos - def set_position(vertex, pos): - node_position[vertex] = pos - - def top_to_bottom(heap, start, size, positions): + def top_to_bottom(self, heap, start, size, positions): if start > size // 2 - 1: return else: if 2 * start + 2 >= size: - m = 2 * start + 1 + smallest_child = 2 * start + 1 else: if heap[2 * start + 1] < heap[2 * start + 2]: - m = 2 * start + 1 + smallest_child = 2 * start + 1 else: - m = 2 * start + 2 - if heap[m] < heap[start]: - temp, temp1 = heap[m], positions[m] - heap[m], positions[m] = heap[start], positions[start] + smallest_child = 2 * start + 2 + if heap[smallest_child] < heap[start]: + temp, temp1 = heap[smallest_child], positions[smallest_child] + heap[smallest_child], positions[smallest_child] = ( + heap[start], + positions[start], + ) heap[start], positions[start] = temp, temp1 - temp = get_position(positions[m]) - set_position(positions[m], get_position(positions[start])) - set_position(positions[start], temp) + temp = self.get_position(positions[smallest_child]) + self.set_position( + positions[smallest_child], self.get_position(positions[start]) + ) + self.set_position(positions[start], temp) - top_to_bottom(heap, m, size, positions) + self.top_to_bottom(heap, smallest_child, size, positions) # Update function if value of any node in min-heap decreases - def bottom_to_top(val, index, heap, position): + def bottom_to_top(self, val, index, heap, position): temp = position[index] while index != 0: @@ -47,70 +52,88 @@ def bottom_to_top(val, index, heap, position): if val < heap[parent]: heap[index] = heap[parent] position[index] = position[parent] - set_position(position[parent], index) + self.set_position(position[parent], index) else: heap[index] = val position[index] = temp - set_position(temp, index) + self.set_position(temp, index) break index = parent else: heap[0] = val position[0] = temp - set_position(temp, 0) + self.set_position(temp, 0) - def heapify(heap, positions): + def heapify(self, heap, positions): start = len(heap) // 2 - 1 for i in range(start, -1, -1): - top_to_bottom(heap, i, len(heap), positions) + self.top_to_bottom(heap, i, len(heap), positions) - def delete_minimum(heap, positions): + def delete_minimum(self, heap, positions): temp = positions[0] heap[0] = sys.maxsize - top_to_bottom(heap, 0, len(heap), positions) + self.top_to_bottom(heap, 0, len(heap), positions) return temp - visited = [0 for i in range(len(l))] - nbr_tv = [-1 for i in range(len(l))] # Neighboring Tree Vertex of selected vertex + +def prisms_algorithm(adjacency_list): + """ + >>> adjacency_list = {0: [[1, 1], [3, 3]], + ... 1: [[0, 1], [2, 6], [3, 5], [4, 1]], + ... 2: [[1, 6], [4, 5], [5, 2]], + ... 3: [[0, 3], [1, 5], [4, 1]], + ... 4: [[1, 1], [2, 5], [3, 1], [5, 4]], + ... 5: [[2, 2], [4, 4]]} + >>> prisms_algorithm(adjacency_list) + [(0, 1), (1, 4), (4, 3), (4, 5), (5, 2)] + """ + + heap = Heap() + + visited = [0] * len(adjacency_list) + nbr_tv = [-1] * len(adjacency_list) # Neighboring Tree Vertex of selected vertex # Minimum Distance of explored vertex with neighboring vertex of partial tree # formed in graph distance_tv = [] # Heap of Distance of vertices from their neighboring vertex positions = [] - for x in range(len(l)): - p = sys.maxsize - distance_tv.append(p) - positions.append(x) - node_position.append(x) + for vertex in range(len(adjacency_list)): + distance_tv.append(sys.maxsize) + positions.append(vertex) + heap.node_position.append(vertex) tree_edges = [] visited[0] = 1 distance_tv[0] = sys.maxsize - for x in l[0]: - nbr_tv[x[0]] = 0 - distance_tv[x[0]] = x[1] - heapify(distance_tv, positions) + for neighbor, distance in adjacency_list[0]: + nbr_tv[neighbor] = 0 + distance_tv[neighbor] = distance + heap.heapify(distance_tv, positions) - for _ in range(1, len(l)): - vertex = delete_minimum(distance_tv, positions) + for _ in range(1, len(adjacency_list)): + vertex = heap.delete_minimum(distance_tv, positions) if visited[vertex] == 0: tree_edges.append((nbr_tv[vertex], vertex)) visited[vertex] = 1 - for v in l[vertex]: - if visited[v[0]] == 0 and v[1] < distance_tv[get_position(v[0])]: - distance_tv[get_position(v[0])] = v[1] - bottom_to_top(v[1], get_position(v[0]), distance_tv, positions) - nbr_tv[v[0]] = vertex + for neighbor, distance in adjacency_list[vertex]: + if ( + visited[neighbor] == 0 + and distance < distance_tv[heap.get_position(neighbor)] + ): + distance_tv[heap.get_position(neighbor)] = distance + heap.bottom_to_top( + distance, heap.get_position(neighbor), distance_tv, positions + ) + nbr_tv[neighbor] = vertex return tree_edges if __name__ == "__main__": # pragma: no cover # < --------- Prims Algorithm --------- > - n = int(input("Enter number of vertices: ").strip()) - e = int(input("Enter number of edges: ").strip()) - adjlist = defaultdict(list) - for x in range(e): - l = [int(x) for x in input().strip().split()] # noqa: E741 - adjlist[l[0]].append([l[1], l[2]]) - adjlist[l[1]].append([l[0], l[2]]) - print(prisms_algorithm(adjlist)) + edges_number = int(input("Enter number of edges: ").strip()) + adjacency_list = defaultdict(list) + for _ in range(edges_number): + edge = [int(x) for x in input().strip().split()] + adjacency_list[edge[0]].append([edge[1], edge[2]]) + adjacency_list[edge[1]].append([edge[0], edge[2]]) + print(prisms_algorithm(adjacency_list)) From 978414bd50ae294352e0e4d93566f49074450857 Mon Sep 17 00:00:00 2001 From: Maxim Smolskiy Date: Thu, 3 Nov 2022 01:56:30 +0300 Subject: [PATCH 0715/1543] Reduce the complexity of other/graham_scan.py (#7953) * Reduce the complexity of other/graham_scan.py * Lower the --max-complexity threshold in the file .flake8 * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fix tests * Update other/graham_scan.py Co-authored-by: Christian Clauss * Update graham_scan.py * Update other/graham_scan.py Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- .flake8 | 2 +- other/graham_scan.py | 150 ++++++++++++++++++++++--------------------- 2 files changed, 77 insertions(+), 75 deletions(-) diff --git a/.flake8 b/.flake8 index 834d1f63d13e..2f74f421d020 100644 --- a/.flake8 +++ b/.flake8 @@ -1,7 +1,7 @@ [flake8] max-line-length = 88 # max-complexity should be 10 -max-complexity = 20 +max-complexity = 19 extend-ignore = # Formatting style for `black` E203 # Whitespace before ':' diff --git a/other/graham_scan.py b/other/graham_scan.py index 91bb6812fefc..8e83bfcf4c49 100644 --- a/other/graham_scan.py +++ b/other/graham_scan.py @@ -14,6 +14,82 @@ from sys import maxsize +# traversal from the lowest and the most left point in anti-clockwise direction +# if direction gets right, the previous point is not the convex hull. +class Direction(Enum): + left = 1 + straight = 2 + right = 3 + + def __repr__(self): + return f"{self.__class__.__name__}.{self.name}" + + +def angle_comparer(point: tuple[int, int], minx: int, miny: int) -> float: + """Return the angle toward to point from (minx, miny) + + :param point: The target point + minx: The starting point's x + miny: The starting point's y + :return: the angle + + Examples: + >>> angle_comparer((1,1), 0, 0) + 45.0 + + >>> angle_comparer((100,1), 10, 10) + -5.710593137499642 + + >>> angle_comparer((5,5), 2, 3) + 33.690067525979785 + """ + # sort the points accorgind to the angle from the lowest and the most left point + x, y = point + return degrees(atan2(y - miny, x - minx)) + + +def check_direction( + starting: tuple[int, int], via: tuple[int, int], target: tuple[int, int] +) -> Direction: + """Return the direction toward to the line from via to target from starting + + :param starting: The starting point + via: The via point + target: The target point + :return: the Direction + + Examples: + >>> check_direction((1,1), (2,2), (3,3)) + Direction.straight + + >>> check_direction((60,1), (-50,199), (30,2)) + Direction.left + + >>> check_direction((0,0), (5,5), (10,0)) + Direction.right + """ + x0, y0 = starting + x1, y1 = via + x2, y2 = target + via_angle = degrees(atan2(y1 - y0, x1 - x0)) + via_angle %= 360 + target_angle = degrees(atan2(y2 - y0, x2 - x0)) + target_angle %= 360 + # t- + # \ \ + # \ v + # \| + # s + # via_angle is always lower than target_angle, if direction is left. + # If they are same, it means they are on a same line of convex hull. + if target_angle > via_angle: + return Direction.left + elif target_angle == via_angle: + return Direction.straight + else: + return Direction.right + + def graham_scan(points: list[tuple[int, int]]) -> list[tuple[int, int]]: """Pure implementation of graham scan algorithm in Python @@ -57,86 +133,12 @@ def graham_scan(points: list[tuple[int, int]]) -> list[tuple[int, int]]: # remove the lowest and the most left point from points for preparing for sort points.pop(minidx) - def angle_comparer(point: tuple[int, int], minx: int, miny: int) -> float: - """Return the angle toward to point from (minx, miny) - - :param point: The target point - minx: The starting point's x - miny: The starting point's y - :return: the angle - - Examples: - >>> angle_comparer((1,1), 0, 0) - 45.0 - - >>> angle_comparer((100,1), 10, 10) - -5.710593137499642 - - >>> angle_comparer((5,5), 2, 3) - 33.690067525979785 - """ - # sort the points accorgind to the angle from the lowest and the most left point - x = point[0] - y = point[1] - angle = degrees(atan2(y - miny, x - minx)) - return angle - sorted_points = sorted(points, key=lambda point: angle_comparer(point, minx, miny)) # This insert actually costs complexity, # and you should instead add (minx, miny) into stack later. # I'm using insert just for easy understanding. sorted_points.insert(0, (minx, miny)) - # traversal from the lowest and the most left point in anti-clockwise direction - # if direction gets right, the previous point is not the convex hull. - class Direction(Enum): - left = 1 - straight = 2 - right = 3 - - def check_direction( - starting: tuple[int, int], via: tuple[int, int], target: tuple[int, int] - ) -> Direction: - """Return the direction toward to the line from via to target from starting - - :param starting: The starting point - via: The via point - target: The target point - :return: the Direction - - Examples: - >>> check_direction((1,1), (2,2), (3,3)) - Direction.straight - - >>> check_direction((60,1), (-50,199), (30,2)) - Direction.left - - >>> check_direction((0,0), (5,5), (10,0)) - Direction.right - """ - x0, y0 = starting - x1, y1 = via - x2, y2 = target - via_angle = degrees(atan2(y1 - y0, x1 - x0)) - if via_angle < 0: - via_angle += 360 - target_angle = degrees(atan2(y2 - y0, x2 - x0)) - if target_angle < 0: - target_angle += 360 - # t- - # \ \ - # \ v - # \| - # s - # via_angle is always lower than target_angle, if direction is left. - # If they are same, it means they are on a same line of convex hull. - if target_angle > via_angle: - return Direction.left - elif target_angle == via_angle: - return Direction.straight - else: - return Direction.right - stack: deque[tuple[int, int]] = deque() stack.append(sorted_points[0]) stack.append(sorted_points[1]) From 3e1cb70abf9997af3a4903f77cb3506a301de893 Mon Sep 17 00:00:00 2001 From: Alexander Pantyukhin Date: Fri, 4 Nov 2022 00:03:37 +0400 Subject: [PATCH 0716/1543] add algorithm to check binary search tree (#7947) * add algorithm to check binary search tree * add tests * add leetcode link * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fix typehints * typehints fixes * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update data_structures/binary_tree/is_bst.py Co-authored-by: Caeden Perelli-Harris * Update data_structures/binary_tree/is_bst.py Co-authored-by: Caeden Perelli-Harris * Update data_structures/binary_tree/is_bst.py Co-authored-by: Caeden Perelli-Harris * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fix flake8 * fix typehint * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * add TreeNode resolving * Update data_structures/binary_tree/is_bst.py Co-authored-by: Caeden Perelli-Harris * Update data_structures/binary_tree/is_bst.py Co-authored-by: Caeden Perelli-Harris * Update data_structures/binary_tree/is_bst.py Co-authored-by: Caeden Perelli-Harris * Update data_structures/binary_tree/is_bst.py Co-authored-by: Christian Clauss * change func name * Update data_structures/binary_tree/is_bst.py Co-authored-by: Christian Clauss * review notes fixes. * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fix flake8 * fix flake 8 * fix doctest * Update data_structures/binary_tree/is_bst.py Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Caeden Perelli-Harris Co-authored-by: Christian Clauss --- data_structures/binary_tree/is_bst.py | 131 ++++++++++++++++++++++++++ 1 file changed, 131 insertions(+) create mode 100644 data_structures/binary_tree/is_bst.py diff --git a/data_structures/binary_tree/is_bst.py b/data_structures/binary_tree/is_bst.py new file mode 100644 index 000000000000..0b2ef8c9ffde --- /dev/null +++ b/data_structures/binary_tree/is_bst.py @@ -0,0 +1,131 @@ +""" +Author : Alexander Pantyukhin +Date : November 2, 2022 + +Task: +Given the root of a binary tree, determine if it is a valid binary search +tree (BST). + +A valid binary search tree is defined as follows: + +- The left subtree of a node contains only nodes with keys less than the node's key. +- The right subtree of a node contains only nodes with keys greater than the node's key. +- Both the left and right subtrees must also be binary search trees. + +Implementation notes: +Depth-first search approach. + +leetcode: https://leetcode.com/problems/validate-binary-search-tree/ + +Let n is the number of nodes in tree +Runtime: O(n) +Space: O(1) +""" + +from __future__ import annotations + +from dataclasses import dataclass + + +@dataclass +class TreeNode: + data: float + left: TreeNode | None = None + right: TreeNode | None = None + + +def is_binary_search_tree(root: TreeNode | None) -> bool: + """ + >>> is_binary_search_tree(TreeNode(data=2, + ... left=TreeNode(data=1), + ... right=TreeNode(data=3)) + ... ) + True + + >>> is_binary_search_tree(TreeNode(data=0, + ... left=TreeNode(data=-11), + ... right=TreeNode(data=3)) + ... ) + True + + >>> is_binary_search_tree(TreeNode(data=5, + ... left=TreeNode(data=1), + ... right=TreeNode(data=4, left=TreeNode(data=3))) + ... ) + False + + >>> is_binary_search_tree(TreeNode(data='a', + ... left=TreeNode(data=1), + ... right=TreeNode(data=4, left=TreeNode(data=3))) + ... ) + Traceback (most recent call last): + ... + ValueError: Each node should be type of TreeNode and data should be float. + + >>> is_binary_search_tree(TreeNode(data=2, + ... left=TreeNode([]), + ... right=TreeNode(data=4, left=TreeNode(data=3))) + ... ) + Traceback (most recent call last): + ... + ValueError: Each node should be type of TreeNode and data should be float. + """ + + # Validation + def is_valid_tree(node: TreeNode | None) -> bool: + """ + >>> is_valid_tree(None) + True + >>> is_valid_tree('abc') + False + >>> is_valid_tree(TreeNode(data='not a float')) + False + >>> is_valid_tree(TreeNode(data=1, left=TreeNode('123'))) + False + """ + if node is None: + return True + + if not isinstance(node, TreeNode): + return False + + try: + float(node.data) + except (TypeError, ValueError): + return False + + return is_valid_tree(node.left) and is_valid_tree(node.right) + + if not is_valid_tree(root): + raise ValueError( + "Each node should be type of TreeNode and data should be float." + ) + + def is_binary_search_tree_recursive_check( + node: TreeNode | None, left_bound: float, right_bound: float + ) -> bool: + """ + >>> is_binary_search_tree_recursive_check(None) + True + >>> is_binary_search_tree_recursive_check(TreeNode(data=1), 10, 20) + False + """ + + if node is None: + return True + + return ( + left_bound < node.data < right_bound + and is_binary_search_tree_recursive_check(node.left, left_bound, node.data) + and is_binary_search_tree_recursive_check( + node.right, node.data, right_bound + ) + ) + + return is_binary_search_tree_recursive_check(root, -float("inf"), float("inf")) + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From 7f1a5521f4b73d15df409a81f3da48427f9c6cdc Mon Sep 17 00:00:00 2001 From: Alexander Pantyukhin Date: Fri, 4 Nov 2022 11:30:32 +0400 Subject: [PATCH 0717/1543] add prefix sum (#7959) * add prefix sum * updating DIRECTORY.md * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- DIRECTORY.md | 2 + data_structures/arrays/prefix_sum.py | 78 ++++++++++++++++++++++++++++ 2 files changed, 80 insertions(+) create mode 100644 data_structures/arrays/prefix_sum.py diff --git a/DIRECTORY.md b/DIRECTORY.md index a2112bcfb7b4..76c7f9dea4e3 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -162,6 +162,7 @@ ## Data Structures * Arrays * [Permutations](data_structures/arrays/permutations.py) + * [Prefix Sum](data_structures/arrays/prefix_sum.py) * Binary Tree * [Avl Tree](data_structures/binary_tree/avl_tree.py) * [Basic Binary Tree](data_structures/binary_tree/basic_binary_tree.py) @@ -174,6 +175,7 @@ * [Diff Views Of Binary Tree](data_structures/binary_tree/diff_views_of_binary_tree.py) * [Fenwick Tree](data_structures/binary_tree/fenwick_tree.py) * [Inorder Tree Traversal 2022](data_structures/binary_tree/inorder_tree_traversal_2022.py) + * [Is Bst](data_structures/binary_tree/is_bst.py) * [Lazy Segment Tree](data_structures/binary_tree/lazy_segment_tree.py) * [Lowest Common Ancestor](data_structures/binary_tree/lowest_common_ancestor.py) * [Maximum Fenwick Tree](data_structures/binary_tree/maximum_fenwick_tree.py) diff --git a/data_structures/arrays/prefix_sum.py b/data_structures/arrays/prefix_sum.py new file mode 100644 index 000000000000..2243a5308937 --- /dev/null +++ b/data_structures/arrays/prefix_sum.py @@ -0,0 +1,78 @@ +""" +Author : Alexander Pantyukhin +Date : November 3, 2022 + +Implement the class of prefix sum with useful functions based on it. + +""" + + +class PrefixSum: + def __init__(self, array: list[int]) -> None: + len_array = len(array) + self.prefix_sum = [0] * len_array + + if len_array > 0: + self.prefix_sum[0] = array[0] + + for i in range(1, len_array): + self.prefix_sum[i] = self.prefix_sum[i - 1] + array[i] + + def get_sum(self, start: int, end: int) -> int: + """ + The function returns the sum of array from the start to the end indexes. + Runtime : O(1) + Space: O(1) + + >>> PrefixSum([1,2,3]).get_sum(0, 2) + 6 + >>> PrefixSum([1,2,3]).get_sum(1, 2) + 5 + >>> PrefixSum([1,2,3]).get_sum(2, 2) + 3 + >>> PrefixSum([1,2,3]).get_sum(2, 3) + Traceback (most recent call last): + ... + IndexError: list index out of range + """ + if start == 0: + return self.prefix_sum[end] + + return self.prefix_sum[end] - self.prefix_sum[start - 1] + + def contains_sum(self, target_sum: int) -> bool: + """ + The function returns True if array contains the target_sum, + False otherwise. + + Runtime : O(n) + Space: O(n) + + >>> PrefixSum([1,2,3]).contains_sum(6) + True + >>> PrefixSum([1,2,3]).contains_sum(5) + True + >>> PrefixSum([1,2,3]).contains_sum(3) + True + >>> PrefixSum([1,2,3]).contains_sum(4) + False + >>> PrefixSum([1,2,3]).contains_sum(7) + False + >>> PrefixSum([1,-2,3]).contains_sum(2) + True + """ + + sums = {0} + for sum_item in self.prefix_sum: + if sum_item - target_sum in sums: + return True + + sums.add(sum_item) + + return False + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From 51708530b6a46a5e53d12e750521a11c6bf5c986 Mon Sep 17 00:00:00 2001 From: Sanders Lin Date: Sun, 6 Nov 2022 17:35:40 +0800 Subject: [PATCH 0718/1543] Update 3n_plus_1.py (#7966) * Update 3n_plus_1.py 1. Minor issue with ValueError message: Given integer should be positive, not greater than 1, as 1 is allowed. 2. += calls underlying list extend method which might be slower. Calling apend seems more appropriate. * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- maths/3n_plus_1.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/maths/3n_plus_1.py b/maths/3n_plus_1.py index e455a158e619..59fdec48e100 100644 --- a/maths/3n_plus_1.py +++ b/maths/3n_plus_1.py @@ -11,15 +11,15 @@ def n31(a: int) -> tuple[list[int], int]: if not isinstance(a, int): raise TypeError(f"Must be int, not {type(a).__name__}") if a < 1: - raise ValueError(f"Given integer must be greater than 1, not {a}") + raise ValueError(f"Given integer must be positive, not {a}") path = [a] while a != 1: if a % 2 == 0: - a = a // 2 + a //= 2 else: a = 3 * a + 1 - path += [a] + path.append(a) return path, len(path) From daa1c7529ac6491338adb81622d5041a4ba1f446 Mon Sep 17 00:00:00 2001 From: Caeden Perelli-Harris Date: Sun, 6 Nov 2022 14:54:44 +0000 Subject: [PATCH 0719/1543] Raise error not string (#7945) * ci: Add `B023` to `.flake8` ignores * refactor: Return `bool`/raise Exception * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * revert: Remove previous branch commit * Update data_structures/binary_tree/segment_tree_other.py Co-authored-by: Christian Clauss * feat: Apply `__repr__` changes * chore: Fix failing tests * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update data_structures/binary_tree/segment_tree_other.py Co-authored-by: Christian Clauss * test: Fix doctests * random.choice(population_score[:N_SELECTED])[0] * Update basic_string.py Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- boolean_algebra/quine_mc_cluskey.py | 11 +- ciphers/shuffled_shift_cipher.py | 2 +- computer_vision/harris_corner.py | 3 +- .../binary_tree/segment_tree_other.py | 121 +++++++++--------- data_structures/binary_tree/wavelet_tree.py | 6 +- .../linked_list/doubly_linked_list.py | 2 +- data_structures/queue/double_ended_queue.py | 2 +- graphs/breadth_first_search_shortest_path.py | 8 +- graphs/page_rank.py | 2 +- linear_algebra/src/polynom_for_points.py | 14 +- maths/monte_carlo_dice.py | 3 - matrix/cramers_rule_2x2.py | 16 ++- other/password.py | 38 +++--- strings/dna.py | 15 ++- 14 files changed, 123 insertions(+), 120 deletions(-) diff --git a/boolean_algebra/quine_mc_cluskey.py b/boolean_algebra/quine_mc_cluskey.py index 5bd7117bb3e7..6788dfb28ba1 100644 --- a/boolean_algebra/quine_mc_cluskey.py +++ b/boolean_algebra/quine_mc_cluskey.py @@ -1,15 +1,16 @@ from __future__ import annotations from collections.abc import Sequence +from typing import Literal -def compare_string(string1: str, string2: str) -> str: +def compare_string(string1: str, string2: str) -> str | Literal[False]: """ >>> compare_string('0010','0110') '0_10' >>> compare_string('0110','1101') - 'X' + False """ list1 = list(string1) list2 = list(string2) @@ -19,7 +20,7 @@ def compare_string(string1: str, string2: str) -> str: count += 1 list1[i] = "_" if count > 1: - return "X" + return False else: return "".join(list1) @@ -36,10 +37,10 @@ def check(binary: list[str]) -> list[str]: for i in range(len(binary)): for j in range(i + 1, len(binary)): k = compare_string(binary[i], binary[j]) - if k != "X": + if k is False: check1[i] = "*" check1[j] = "*" - temp.append(k) + temp.append("X") for i in range(len(binary)): if check1[i] == "$": pi.append(binary[i]) diff --git a/ciphers/shuffled_shift_cipher.py b/ciphers/shuffled_shift_cipher.py index 714acd4b1afc..08b2cab97c69 100644 --- a/ciphers/shuffled_shift_cipher.py +++ b/ciphers/shuffled_shift_cipher.py @@ -42,7 +42,7 @@ def __str__(self) -> str: """ :return: passcode of the cipher object """ - return "Passcode is: " + "".join(self.__passcode) + return "".join(self.__passcode) def __neg_pos(self, iterlist: list[int]) -> list[int]: """ diff --git a/computer_vision/harris_corner.py b/computer_vision/harris_corner.py index 7850085f8935..c8905bb6a9cd 100644 --- a/computer_vision/harris_corner.py +++ b/computer_vision/harris_corner.py @@ -22,8 +22,7 @@ def __init__(self, k: float, window_size: int): raise ValueError("invalid k value") def __str__(self) -> str: - - return f"Harris Corner detection with k : {self.k}" + return str(self.k) def detect(self, img_path: str) -> tuple[cv2.Mat, list[list[int]]]: diff --git a/data_structures/binary_tree/segment_tree_other.py b/data_structures/binary_tree/segment_tree_other.py index 90afd7ca8b71..cc77c4951f1a 100644 --- a/data_structures/binary_tree/segment_tree_other.py +++ b/data_structures/binary_tree/segment_tree_other.py @@ -16,40 +16,36 @@ def __init__(self, start, end, val, left=None, right=None): self.left = left self.right = right - def __str__(self): - return f"val: {self.val}, start: {self.start}, end: {self.end}" + def __repr__(self): + return f"SegmentTreeNode(start={self.start}, end={self.end}, val={self.val})" class SegmentTree: """ >>> import operator >>> num_arr = SegmentTree([2, 1, 5, 3, 4], operator.add) - >>> for node in num_arr.traverse(): - ... print(node) - ... - val: 15, start: 0, end: 4 - val: 8, start: 0, end: 2 - val: 7, start: 3, end: 4 - val: 3, start: 0, end: 1 - val: 5, start: 2, end: 2 - val: 3, start: 3, end: 3 - val: 4, start: 4, end: 4 - val: 2, start: 0, end: 0 - val: 1, start: 1, end: 1 + >>> tuple(num_arr.traverse()) # doctest: +NORMALIZE_WHITESPACE + (SegmentTreeNode(start=0, end=4, val=15), + SegmentTreeNode(start=0, end=2, val=8), + SegmentTreeNode(start=3, end=4, val=7), + SegmentTreeNode(start=0, end=1, val=3), + SegmentTreeNode(start=2, end=2, val=5), + SegmentTreeNode(start=3, end=3, val=3), + SegmentTreeNode(start=4, end=4, val=4), + SegmentTreeNode(start=0, end=0, val=2), + SegmentTreeNode(start=1, end=1, val=1)) >>> >>> num_arr.update(1, 5) - >>> for node in num_arr.traverse(): - ... print(node) - ... - val: 19, start: 0, end: 4 - val: 12, start: 0, end: 2 - val: 7, start: 3, end: 4 - val: 7, start: 0, end: 1 - val: 5, start: 2, end: 2 - val: 3, start: 3, end: 3 - val: 4, start: 4, end: 4 - val: 2, start: 0, end: 0 - val: 5, start: 1, end: 1 + >>> tuple(num_arr.traverse()) # doctest: +NORMALIZE_WHITESPACE + (SegmentTreeNode(start=0, end=4, val=19), + SegmentTreeNode(start=0, end=2, val=12), + SegmentTreeNode(start=3, end=4, val=7), + SegmentTreeNode(start=0, end=1, val=7), + SegmentTreeNode(start=2, end=2, val=5), + SegmentTreeNode(start=3, end=3, val=3), + SegmentTreeNode(start=4, end=4, val=4), + SegmentTreeNode(start=0, end=0, val=2), + SegmentTreeNode(start=1, end=1, val=5)) >>> >>> num_arr.query_range(3, 4) 7 @@ -62,29 +58,29 @@ class SegmentTree: >>> for node in max_arr.traverse(): ... print(node) ... - val: 5, start: 0, end: 4 - val: 5, start: 0, end: 2 - val: 4, start: 3, end: 4 - val: 2, start: 0, end: 1 - val: 5, start: 2, end: 2 - val: 3, start: 3, end: 3 - val: 4, start: 4, end: 4 - val: 2, start: 0, end: 0 - val: 1, start: 1, end: 1 + SegmentTreeNode(start=0, end=4, val=5) + SegmentTreeNode(start=0, end=2, val=5) + SegmentTreeNode(start=3, end=4, val=4) + SegmentTreeNode(start=0, end=1, val=2) + SegmentTreeNode(start=2, end=2, val=5) + SegmentTreeNode(start=3, end=3, val=3) + SegmentTreeNode(start=4, end=4, val=4) + SegmentTreeNode(start=0, end=0, val=2) + SegmentTreeNode(start=1, end=1, val=1) >>> >>> max_arr.update(1, 5) >>> for node in max_arr.traverse(): ... print(node) ... - val: 5, start: 0, end: 4 - val: 5, start: 0, end: 2 - val: 4, start: 3, end: 4 - val: 5, start: 0, end: 1 - val: 5, start: 2, end: 2 - val: 3, start: 3, end: 3 - val: 4, start: 4, end: 4 - val: 2, start: 0, end: 0 - val: 5, start: 1, end: 1 + SegmentTreeNode(start=0, end=4, val=5) + SegmentTreeNode(start=0, end=2, val=5) + SegmentTreeNode(start=3, end=4, val=4) + SegmentTreeNode(start=0, end=1, val=5) + SegmentTreeNode(start=2, end=2, val=5) + SegmentTreeNode(start=3, end=3, val=3) + SegmentTreeNode(start=4, end=4, val=4) + SegmentTreeNode(start=0, end=0, val=2) + SegmentTreeNode(start=1, end=1, val=5) >>> >>> max_arr.query_range(3, 4) 4 @@ -97,29 +93,29 @@ class SegmentTree: >>> for node in min_arr.traverse(): ... print(node) ... - val: 1, start: 0, end: 4 - val: 1, start: 0, end: 2 - val: 3, start: 3, end: 4 - val: 1, start: 0, end: 1 - val: 5, start: 2, end: 2 - val: 3, start: 3, end: 3 - val: 4, start: 4, end: 4 - val: 2, start: 0, end: 0 - val: 1, start: 1, end: 1 + SegmentTreeNode(start=0, end=4, val=1) + SegmentTreeNode(start=0, end=2, val=1) + SegmentTreeNode(start=3, end=4, val=3) + SegmentTreeNode(start=0, end=1, val=1) + SegmentTreeNode(start=2, end=2, val=5) + SegmentTreeNode(start=3, end=3, val=3) + SegmentTreeNode(start=4, end=4, val=4) + SegmentTreeNode(start=0, end=0, val=2) + SegmentTreeNode(start=1, end=1, val=1) >>> >>> min_arr.update(1, 5) >>> for node in min_arr.traverse(): ... print(node) ... - val: 2, start: 0, end: 4 - val: 2, start: 0, end: 2 - val: 3, start: 3, end: 4 - val: 2, start: 0, end: 1 - val: 5, start: 2, end: 2 - val: 3, start: 3, end: 3 - val: 4, start: 4, end: 4 - val: 2, start: 0, end: 0 - val: 5, start: 1, end: 1 + SegmentTreeNode(start=0, end=4, val=2) + SegmentTreeNode(start=0, end=2, val=2) + SegmentTreeNode(start=3, end=4, val=3) + SegmentTreeNode(start=0, end=1, val=2) + SegmentTreeNode(start=2, end=2, val=5) + SegmentTreeNode(start=3, end=3, val=3) + SegmentTreeNode(start=4, end=4, val=4) + SegmentTreeNode(start=0, end=0, val=2) + SegmentTreeNode(start=1, end=1, val=5) >>> >>> min_arr.query_range(3, 4) 3 @@ -128,7 +124,6 @@ class SegmentTree: >>> min_arr.query_range(1, 3) 3 >>> - """ def __init__(self, collection: Sequence, function): diff --git a/data_structures/binary_tree/wavelet_tree.py b/data_structures/binary_tree/wavelet_tree.py index 8d7145189018..041e140f5b15 100644 --- a/data_structures/binary_tree/wavelet_tree.py +++ b/data_structures/binary_tree/wavelet_tree.py @@ -24,11 +24,11 @@ def __repr__(self) -> str: """ >>> node = Node(length=27) >>> repr(node) - 'min_value: -1, max_value: -1' + 'Node(min_value=-1 max_value=-1)' >>> repr(node) == str(node) True """ - return f"min_value: {self.minn}, max_value: {self.maxx}" + return f"Node(min_value={self.minn} max_value={self.maxx})" def build_tree(arr: list[int]) -> Node | None: @@ -37,7 +37,7 @@ def build_tree(arr: list[int]) -> Node | None: of the constructed tree >>> build_tree(test_array) - min_value: 0, max_value: 9 + Node(min_value=0 max_value=9) """ root = Node(len(arr)) root.minn, root.maxx = min(arr), max(arr) diff --git a/data_structures/linked_list/doubly_linked_list.py b/data_structures/linked_list/doubly_linked_list.py index 90b6b6eb2a32..6c81493fff85 100644 --- a/data_structures/linked_list/doubly_linked_list.py +++ b/data_structures/linked_list/doubly_linked_list.py @@ -159,7 +159,7 @@ def delete(self, data) -> str: if current.next: current = current.next else: # We have reached the end an no value matches - return "No data matching given value" + raise ValueError("No data matching given value") if current == self.head: self.delete_head() diff --git a/data_structures/queue/double_ended_queue.py b/data_structures/queue/double_ended_queue.py index 11942db8305c..637b7f62fd2c 100644 --- a/data_structures/queue/double_ended_queue.py +++ b/data_structures/queue/double_ended_queue.py @@ -425,7 +425,7 @@ def __repr__(self) -> str: values_list.append(aux.val) aux = aux.next_node - return "[" + ", ".join(repr(val) for val in values_list) + "]" + return f"[{', '.join(repr(val) for val in values_list)}]" if __name__ == "__main__": diff --git a/graphs/breadth_first_search_shortest_path.py b/graphs/breadth_first_search_shortest_path.py index 697a8c634859..cb21076f91d2 100644 --- a/graphs/breadth_first_search_shortest_path.py +++ b/graphs/breadth_first_search_shortest_path.py @@ -58,7 +58,9 @@ def shortest_path(self, target_vertex: str) -> str: Case 1 - No path is found. >>> g.shortest_path("Foo") - 'No path from vertex:G to vertex:Foo' + Traceback (most recent call last): + ... + ValueError: No path from vertex: G to vertex: Foo Case 2 - The path is found. >>> g.shortest_path("D") @@ -71,7 +73,9 @@ def shortest_path(self, target_vertex: str) -> str: target_vertex_parent = self.parent.get(target_vertex) if target_vertex_parent is None: - return f"No path from vertex:{self.source_vertex} to vertex:{target_vertex}" + raise ValueError( + f"No path from vertex: {self.source_vertex} to vertex: {target_vertex}" + ) return self.shortest_path(target_vertex_parent) + f"->{target_vertex}" diff --git a/graphs/page_rank.py b/graphs/page_rank.py index e1af35b34749..b9e4c4a72a93 100644 --- a/graphs/page_rank.py +++ b/graphs/page_rank.py @@ -27,7 +27,7 @@ def add_outbound(self, node): self.outbound.append(node) def __repr__(self): - return f"Node {self.name}: Inbound: {self.inbound} ; Outbound: {self.outbound}" + return f"" def page_rank(nodes, limit=3, d=0.85): diff --git a/linear_algebra/src/polynom_for_points.py b/linear_algebra/src/polynom_for_points.py index 1d702deb1e99..f5e3db0cbb13 100644 --- a/linear_algebra/src/polynom_for_points.py +++ b/linear_algebra/src/polynom_for_points.py @@ -4,9 +4,13 @@ def points_to_polynomial(coordinates: list[list[int]]) -> str: number of points you want to use >>> print(points_to_polynomial([])) - The program cannot work out a fitting polynomial. + Traceback (most recent call last): + ... + ValueError: The program cannot work out a fitting polynomial. >>> print(points_to_polynomial([[]])) - The program cannot work out a fitting polynomial. + Traceback (most recent call last): + ... + ValueError: The program cannot work out a fitting polynomial. >>> print(points_to_polynomial([[1, 0], [2, 0], [3, 0]])) f(x)=x^2*0.0+x^1*-0.0+x^0*0.0 >>> print(points_to_polynomial([[1, 1], [2, 1], [3, 1]])) @@ -25,17 +29,17 @@ def points_to_polynomial(coordinates: list[list[int]]) -> str: f(x)=x^2*5.0+x^1*-18.0+x^0*18.0 """ if len(coordinates) == 0 or not all(len(pair) == 2 for pair in coordinates): - return "The program cannot work out a fitting polynomial." + raise ValueError("The program cannot work out a fitting polynomial.") if len({tuple(pair) for pair in coordinates}) != len(coordinates): - return "The program cannot work out a fitting polynomial." + raise ValueError("The program cannot work out a fitting polynomial.") set_x = {x for x, _ in coordinates} if len(set_x) == 1: return f"x={coordinates[0][0]}" if len(set_x) != len(coordinates): - return "The program cannot work out a fitting polynomial." + raise ValueError("The program cannot work out a fitting polynomial.") x = len(coordinates) diff --git a/maths/monte_carlo_dice.py b/maths/monte_carlo_dice.py index c4150b88f6cc..362f70b49828 100644 --- a/maths/monte_carlo_dice.py +++ b/maths/monte_carlo_dice.py @@ -13,9 +13,6 @@ def __init__(self): def roll(self): return random.choice(self.sides) - def _str_(self): - return "Fair Dice" - def throw_dice(num_throws: int, num_dice: int = 2) -> list[float]: """ diff --git a/matrix/cramers_rule_2x2.py b/matrix/cramers_rule_2x2.py index a635d66fbb6c..4f52dbe646ad 100644 --- a/matrix/cramers_rule_2x2.py +++ b/matrix/cramers_rule_2x2.py @@ -2,7 +2,7 @@ # https://en.wikipedia.org/wiki/Cramer%27s_rule -def cramers_rule_2x2(equation1: list[int], equation2: list[int]) -> str: +def cramers_rule_2x2(equation1: list[int], equation2: list[int]) -> tuple[float, float]: """ Solves the system of linear equation in 2 variables. :param: equation1: list of 3 numbers @@ -14,13 +14,13 @@ def cramers_rule_2x2(equation1: list[int], equation2: list[int]) -> str: determinant_y = [[a1, d1], [a2, d2]] >>> cramers_rule_2x2([2, 3, 0], [5, 1, 0]) - 'Trivial solution. (Consistent system) x = 0 and y = 0' + (0.0, 0.0) >>> cramers_rule_2x2([0, 4, 50], [2, 0, 26]) - 'Non-Trivial Solution (Consistent system) x = 13.0, y = 12.5' + (13.0, 12.5) >>> cramers_rule_2x2([11, 2, 30], [1, 0, 4]) - 'Non-Trivial Solution (Consistent system) x = 4.0, y = -7.0' + (4.0, -7.0) >>> cramers_rule_2x2([4, 7, 1], [1, 2, 0]) - 'Non-Trivial Solution (Consistent system) x = 2.0, y = -1.0' + (2.0, -1.0) >>> cramers_rule_2x2([1, 2, 3], [2, 4, 6]) Traceback (most recent call last): @@ -75,8 +75,10 @@ def cramers_rule_2x2(equation1: list[int], equation2: list[int]) -> str: raise ValueError("No solution. (Inconsistent system)") else: if determinant_x == determinant_y == 0: - return "Trivial solution. (Consistent system) x = 0 and y = 0" + # Trivial solution (Inconsistent system) + return (0.0, 0.0) else: x = determinant_x / determinant y = determinant_y / determinant - return f"Non-Trivial Solution (Consistent system) x = {x}, y = {y}" + # Non-Trivial Solution (Consistent system) + return (x, y) diff --git a/other/password.py b/other/password.py index 8f6833073288..f463c7564536 100644 --- a/other/password.py +++ b/other/password.py @@ -66,26 +66,23 @@ def random_characters(chars_incl, i): # This Will Check Whether A Given Password Is Strong Or Not # It Follows The Rule that Length Of Password Should Be At Least 8 Characters # And At Least 1 Lower, 1 Upper, 1 Number And 1 Special Character -def strong_password_detector(password: str, min_length: int = 8) -> str: +def is_strong_password(password: str, min_length: int = 8) -> bool: """ - >>> strong_password_detector('Hwea7$2!') - 'This is a strong Password' - - >>> strong_password_detector('Sh0r1') - 'Your Password must be at least 8 characters long' - - >>> strong_password_detector('Hello123') - 'Password should contain UPPERCASE, lowercase, numbers, special characters' - - >>> strong_password_detector('Hello1238udfhiaf038fajdvjjf!jaiuFhkqi1') - 'This is a strong Password' - - >>> strong_password_detector('0') - 'Your Password must be at least 8 characters long' + >>> is_strong_password('Hwea7$2!') + True + >>> is_strong_password('Sh0r1') + False + >>> is_strong_password('Hello123') + False + >>> is_strong_password('Hello1238udfhiaf038fajdvjjf!jaiuFhkqi1') + True + >>> is_strong_password('0') + False """ if len(password) < min_length: - return "Your Password must be at least 8 characters long" + # Your Password must be at least 8 characters long + return False upper = any(char in ascii_uppercase for char in password) lower = any(char in ascii_lowercase for char in password) @@ -93,13 +90,12 @@ def strong_password_detector(password: str, min_length: int = 8) -> str: spec_char = any(char in punctuation for char in password) if upper and lower and num and spec_char: - return "This is a strong Password" + return True else: - return ( - "Password should contain UPPERCASE, lowercase, " - "numbers, special characters" - ) + # Passwords should contain UPPERCASE, lowerase + # numbers, and special characters + return False def main(): diff --git a/strings/dna.py b/strings/dna.py index 46e271d689db..c2b96110b893 100644 --- a/strings/dna.py +++ b/strings/dna.py @@ -14,13 +14,18 @@ def dna(dna: str) -> str: >>> dna("CTGA") 'GACT' >>> dna("GFGG") - 'Invalid Strand' + Traceback (most recent call last): + ... + ValueError: Invalid Strand """ - r = len(re.findall("[ATCG]", dna)) != len(dna) - val = dna.translate(dna.maketrans("ATCG", "TAGC")) - return "Invalid Strand" if r else val + if len(re.findall("[ATCG]", dna)) != len(dna): + raise ValueError("Invalid Strand") + + return dna.translate(dna.maketrans("ATCG", "TAGC")) if __name__ == "__main__": - __import__("doctest").testmod() + import doctest + + doctest.testmod() From 6aaf0a2c77b671f3e35e71dfccc569f51d8e3b00 Mon Sep 17 00:00:00 2001 From: Christian Clauss Date: Tue, 8 Nov 2022 12:49:47 +0100 Subject: [PATCH 0720/1543] maths/number_of_digits.py: Streamline benchmarks (#7913) * maths/number_of_digits.py: Streamline benchmarks ``` num_digits(262144): 6 -- 0.2226011250168085 seconds num_digits_fast(262144): 6 -- 0.13145116699161008 seconds num_digits_faster(262144): 6 -- 0.09273383300751448 seconds num_digits(1125899906842624): 16 -- 0.6056742920191027 seconds num_digits_fast(1125899906842624): 16 -- 0.15698366600554436 seconds num_digits_faster(1125899906842624): 16 -- 0.1027024170034565 seconds num_digits(1267650600228229401496703205376): 31 -- 1.1957934170495719 seconds num_digits_fast(1267650600228229401496703205376): 31 -- 0.15552304196171463 seconds num_digits_faster(1267650600228229401496703205376): 31 -- 0.13062308297958225 seconds ``` * updating DIRECTORY.md * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update number_of_digits.py Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- maths/number_of_digits.py | 96 ++++++--------------------------------- 1 file changed, 13 insertions(+), 83 deletions(-) diff --git a/maths/number_of_digits.py b/maths/number_of_digits.py index 3c0eb7b3863f..86bc67f72490 100644 --- a/maths/number_of_digits.py +++ b/maths/number_of_digits.py @@ -67,93 +67,23 @@ def num_digits_faster(n: int) -> int: def benchmark() -> None: """ - Benchmark code for comparing 3 functions, - with 3 different length int values. + Benchmark multiple functions, with three different length int values. """ - print("\nFor small_num = ", small_num, ":") - print( - "> num_digits()", - "\t\tans =", - num_digits(small_num), - "\ttime =", - timeit("z.num_digits(z.small_num)", setup="import __main__ as z"), - "seconds", - ) - print( - "> num_digits_fast()", - "\tans =", - num_digits_fast(small_num), - "\ttime =", - timeit("z.num_digits_fast(z.small_num)", setup="import __main__ as z"), - "seconds", - ) - print( - "> num_digits_faster()", - "\tans =", - num_digits_faster(small_num), - "\ttime =", - timeit("z.num_digits_faster(z.small_num)", setup="import __main__ as z"), - "seconds", - ) - - print("\nFor medium_num = ", medium_num, ":") - print( - "> num_digits()", - "\t\tans =", - num_digits(medium_num), - "\ttime =", - timeit("z.num_digits(z.medium_num)", setup="import __main__ as z"), - "seconds", - ) - print( - "> num_digits_fast()", - "\tans =", - num_digits_fast(medium_num), - "\ttime =", - timeit("z.num_digits_fast(z.medium_num)", setup="import __main__ as z"), - "seconds", - ) - print( - "> num_digits_faster()", - "\tans =", - num_digits_faster(medium_num), - "\ttime =", - timeit("z.num_digits_faster(z.medium_num)", setup="import __main__ as z"), - "seconds", - ) - - print("\nFor large_num = ", large_num, ":") - print( - "> num_digits()", - "\t\tans =", - num_digits(large_num), - "\ttime =", - timeit("z.num_digits(z.large_num)", setup="import __main__ as z"), - "seconds", - ) - print( - "> num_digits_fast()", - "\tans =", - num_digits_fast(large_num), - "\ttime =", - timeit("z.num_digits_fast(z.large_num)", setup="import __main__ as z"), - "seconds", - ) - print( - "> num_digits_faster()", - "\tans =", - num_digits_faster(large_num), - "\ttime =", - timeit("z.num_digits_faster(z.large_num)", setup="import __main__ as z"), - "seconds", - ) + from collections.abc import Callable + + def benchmark_a_function(func: Callable, value: int) -> None: + call = f"{func.__name__}({value})" + timing = timeit(f"__main__.{call}", setup="import __main__") + print(f"{call}: {func(value)} -- {timing} seconds") + + for value in (262144, 1125899906842624, 1267650600228229401496703205376): + for func in (num_digits, num_digits_fast, num_digits_faster): + benchmark_a_function(func, value) + print() if __name__ == "__main__": - small_num = 262144 - medium_num = 1125899906842624 - large_num = 1267650600228229401496703205376 - benchmark() import doctest doctest.testmod() + benchmark() From 8951d857fea2f30d30f64e63d906dc986c32308a Mon Sep 17 00:00:00 2001 From: Abhishek Chakraborty Date: Tue, 8 Nov 2022 09:24:21 -0800 Subject: [PATCH 0721/1543] BB84 QKD algorithm (#7898) * Added BB84 algorithm. * Function name lowercase + imports fix I thought uppercase was appropriate because they're initials. * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update quantum/bb84.py Co-authored-by: Christian Clauss * Removed python < 3.11 restriction from qiskit * Removed python < 3.11 restriction from qiskit * scikit-learn * Update quantum/bb84.py Correct typo in `default_rng()` call Co-authored-by: Maxim Smolskiy Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss Co-authored-by: Maxim Smolskiy --- quantum/bb84.py | 133 +++++++++++++++++++++++++++++++++++++++++++++++ requirements.txt | 4 +- 2 files changed, 135 insertions(+), 2 deletions(-) create mode 100644 quantum/bb84.py diff --git a/quantum/bb84.py b/quantum/bb84.py new file mode 100644 index 000000000000..60d64371fe63 --- /dev/null +++ b/quantum/bb84.py @@ -0,0 +1,133 @@ +#!/usr/bin/env python3 +""" +Simulation of the Quantum Key Distribution (QKD) protocol called BB84, +created by Charles Bennett and Gilles Brassard in 1984. + +BB84 is a key-distribution protocol that ensures secure key distribution +using qubits instead of classical bits. The generated key is the result +of simulating a quantum circuit. Our algorithm to construct the circuit +is as follows: + +Alice generates two binary strings. One encodes the basis for each qubit: + + - 0 -> {0,1} basis. + - 1 -> {+,-} basis. + +The other encodes the state: + + - 0 -> |0> or |+>. + - 1 -> |1> or |->. + +Bob also generates a binary string and uses the same convention to choose +a basis for measurement. Based on the following results, we follow the +algorithm below: + +X|0> = |1> + +H|0> = |+> + +HX|0> = |-> + +1. Whenever Alice wants to encode 1 in a qubit, she applies an +X (NOT) gate to the qubit. To encode 0, no action is needed. + +2. Wherever she wants to encode it in the {+,-} basis, she applies +an H (Hadamard) gate. No action is necessary to encode a qubit in +the {0,1} basis. + +3. She then sends the qubits to Bob (symbolically represented in +this circuit using wires). + +4. Bob measures the qubits according to his binary string for +measurement. To measure a qubit in the {+,-} basis, he applies +an H gate to the corresponding qubit and then performs a measurement. + +References: +https://en.wikipedia.org/wiki/BB84 +https://qiskit.org/textbook/ch-algorithms/quantum-key-distribution.html +""" +import numpy as np +import qiskit + + +def bb84(key_len: int = 8, seed: int | None = None) -> str: + """ + Performs the BB84 protocol using a key made of `key_len` bits. + The two parties in the key distribution are called Alice and Bob. + Args: + key_len: The length of the generated key in bits. The default is 8. + + seed: Seed for the random number generator. + Mostly used for testing. Default is None. + + Returns: + key: The key generated using BB84 protocol. + + >>> bb84(16, seed=0) + '1101101100010000' + + >>> bb84(8, seed=0) + '01011011' + """ + # Set up the random number generator. + rng = np.random.default_rng(seed=seed) + + # Roughly 25% of the qubits will contribute to the key. + # So we take more than we need. + num_qubits = 6 * key_len + # Measurement basis for Alice's qubits. + alice_basis = rng.integers(2, size=num_qubits) + # The set of states Alice will prepare. + alice_state = rng.integers(2, size=num_qubits) + # Measurement basis for Bob's qubits. + bob_basis = rng.integers(2, size=num_qubits) + + # Quantum Circuit to simulate BB84 + bb84_circ = qiskit.QuantumCircuit(num_qubits, name="BB84") + + # Alice prepares her qubits according to rules above. + for index, _ in enumerate(alice_basis): + if alice_state[index] == 1: + bb84_circ.x(index) + if alice_basis[index] == 1: + bb84_circ.h(index) + bb84_circ.barrier() + + # Bob measures the received qubits according to rules above. + for index, _ in enumerate(bob_basis): + if bob_basis[index] == 1: + bb84_circ.h(index) + + bb84_circ.barrier() + bb84_circ.measure_all() + + # Simulate the quantum circuit. + sim = qiskit.Aer.get_backend("aer_simulator") + # We only need to run one shot because the key is unique. + # Multiple shots will produce the same key. + job = qiskit.execute(bb84_circ, sim, shots=1, seed_simulator=seed) + # Returns the result of measurement. + result = job.result().get_counts(bb84_circ).most_frequent() + + # Extracting the generated key from the simulation results. + # Only keep measurement results where Alice and Bob chose the same basis. + gen_key = "".join( + [ + result_bit + for alice_basis_bit, bob_basis_bit, result_bit in zip( + alice_basis, bob_basis, result + ) + if alice_basis_bit == bob_basis_bit + ] + ) + + # Get final key. Pad with 0 if too short, otherwise truncate. + key = gen_key[:key_len] if len(gen_key) >= key_len else gen_key.ljust(key_len, "0") + return key + + +if __name__ == "__main__": + print(f"The generated key is : {bb84(8, seed=0)}") + from doctest import testmod + + testmod() diff --git a/requirements.txt b/requirements.txt index 00f31b85e404..a1d607df07e1 100644 --- a/requirements.txt +++ b/requirements.txt @@ -8,11 +8,11 @@ opencv-python pandas pillow projectq -qiskit; python_version < "3.11" +qiskit requests rich scikit-fuzzy -sklearn +scikit-learn statsmodels sympy tensorflow; python_version < "3.11" From 3f9aae149dba5c9b68ff6f7fd83cadf3fd6b1d7d Mon Sep 17 00:00:00 2001 From: Akshay Dubey <38462415+itsAkshayDubey@users.noreply.github.com> Date: Wed, 9 Nov 2022 21:06:38 +0530 Subject: [PATCH 0722/1543] feat: Add automorphic number implementation (#7978) * feat: Add automorphic number implementation * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * refactor: Add type checking for number * refactor: Rename variable n to number * test: Add doctest * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * test: Add unit test for number=0 Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- maths/automorphic_number.py | 58 +++++++++++++++++++++++++++++++++++++ 1 file changed, 58 insertions(+) create mode 100644 maths/automorphic_number.py diff --git a/maths/automorphic_number.py b/maths/automorphic_number.py new file mode 100644 index 000000000000..103fc7301831 --- /dev/null +++ b/maths/automorphic_number.py @@ -0,0 +1,58 @@ +""" +== Automorphic Numbers == +A number n is said to be a Automorphic number if +the square of n "ends" in the same digits as n itself. + +Examples of Automorphic Numbers: 0, 1, 5, 6, 25, 76, 376, 625, 9376, 90625, ... +https://en.wikipedia.org/wiki/Automorphic_number +""" + +# Author : Akshay Dubey (https://github.com/itsAkshayDubey) +# Time Complexity : O(log10n) + + +def is_automorphic_number(number: int) -> bool: + """ + # doctest: +NORMALIZE_WHITESPACE + This functions takes an integer number as input. + returns True if the number is automorphic. + >>> is_automorphic_number(-1) + False + >>> is_automorphic_number(0) + True + >>> is_automorphic_number(5) + True + >>> is_automorphic_number(6) + True + >>> is_automorphic_number(7) + False + >>> is_automorphic_number(25) + True + >>> is_automorphic_number(259918212890625) + True + >>> is_automorphic_number(259918212890636) + False + >>> is_automorphic_number(740081787109376) + True + >>> is_automorphic_number(5.0) + Traceback (most recent call last): + ... + TypeError: Input value of [number=5.0] must be an integer + """ + if not isinstance(number, int): + raise TypeError(f"Input value of [number={number}] must be an integer") + if number < 0: + return False + number_square = number * number + while number > 0: + if number % 10 != number_square % 10: + return False + number //= 10 + number_square //= 10 + return True + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From 076193eefa161a2030ca4b1ee60b285d4a50e4c6 Mon Sep 17 00:00:00 2001 From: Akshay Dubey <38462415+itsAkshayDubey@users.noreply.github.com> Date: Thu, 10 Nov 2022 08:09:47 +0530 Subject: [PATCH 0723/1543] feat: Add pronic number implementation (#7979) * feat: Add pronic number implementation * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- maths/pronic_number.py | 54 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 54 insertions(+) create mode 100644 maths/pronic_number.py diff --git a/maths/pronic_number.py b/maths/pronic_number.py new file mode 100644 index 000000000000..8b554dbbd602 --- /dev/null +++ b/maths/pronic_number.py @@ -0,0 +1,54 @@ +""" +== Pronic Number == +A number n is said to be a Proic number if +there exists an integer m such that n = m * (m + 1) + +Examples of Proic Numbers: 0, 2, 6, 12, 20, 30, 42, 56, 72, 90, 110 ... +https://en.wikipedia.org/wiki/Pronic_number +""" + +# Author : Akshay Dubey (https://github.com/itsAkshayDubey) + + +def is_pronic(number: int) -> bool: + """ + # doctest: +NORMALIZE_WHITESPACE + This functions takes an integer number as input. + returns True if the number is pronic. + >>> is_pronic(-1) + False + >>> is_pronic(0) + True + >>> is_pronic(2) + True + >>> is_pronic(5) + False + >>> is_pronic(6) + True + >>> is_pronic(8) + False + >>> is_pronic(30) + True + >>> is_pronic(32) + False + >>> is_pronic(2147441940) + True + >>> is_pronic(9223372033963249500) + True + >>> is_pronic(6.0) + Traceback (most recent call last): + ... + TypeError: Input value of [number=6.0] must be an integer + """ + if not isinstance(number, int): + raise TypeError(f"Input value of [number={number}] must be an integer") + if number < 0 or number % 2 == 1: + return False + number_sqrt = int(number**0.5) + return number == number_sqrt * (number_sqrt + 1) + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From 4cddb26908bde48047e4b6e383c4b061c289a5e5 Mon Sep 17 00:00:00 2001 From: Christian Clauss Date: Thu, 10 Nov 2022 03:41:28 +0100 Subject: [PATCH 0724/1543] atbash.py: Tighten up the benchmarks (#7977) * atbash.py: Tighten up the benchmarks * updating DIRECTORY.md Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- DIRECTORY.md | 1 + ciphers/atbash.py | 21 ++++----------------- 2 files changed, 5 insertions(+), 17 deletions(-) diff --git a/DIRECTORY.md b/DIRECTORY.md index 76c7f9dea4e3..5f314c31745d 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -995,6 +995,7 @@ * [Sol1](project_euler/problem_686/sol1.py) ## Quantum + * [Bb84](quantum/bb84.py) * [Deutsch Jozsa](quantum/deutsch_jozsa.py) * [Half Adder](quantum/half_adder.py) * [Not Gate](quantum/not_gate.py) diff --git a/ciphers/atbash.py b/ciphers/atbash.py index 5c2aea610bff..0a86a800c51a 100644 --- a/ciphers/atbash.py +++ b/ciphers/atbash.py @@ -38,26 +38,13 @@ def atbash(sequence: str) -> str: def benchmark() -> None: - """Let's benchmark them side-by-side...""" + """Let's benchmark our functions side-by-side...""" from timeit import timeit print("Running performance benchmarks...") - print( - "> atbash_slow()", - timeit( - "atbash_slow(printable)", - setup="from string import printable ; from __main__ import atbash_slow", - ), - "seconds", - ) - print( - "> atbash()", - timeit( - "atbash(printable)", - setup="from string import printable ; from __main__ import atbash", - ), - "seconds", - ) + setup = "from string import printable ; from __main__ import atbash, atbash_slow" + print(f"> atbash_slow(): {timeit('atbash_slow(printable)', setup=setup)} seconds") + print(f"> atbash(): {timeit('atbash(printable)', setup=setup)} seconds") if __name__ == "__main__": From 5c92b7390e650494f49e1f9298c1a79421673385 Mon Sep 17 00:00:00 2001 From: Christian Clauss Date: Thu, 10 Nov 2022 03:42:14 +0100 Subject: [PATCH 0725/1543] prime_numbers.py: Tighten up the benchmarks (#7976) * prime_numbers.py: Tighten up the benchmarks * updating DIRECTORY.md Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- maths/prime_numbers.py | 38 +++++++++++++------------------------- 1 file changed, 13 insertions(+), 25 deletions(-) diff --git a/maths/prime_numbers.py b/maths/prime_numbers.py index 4e076fe317b4..c5297ed9264c 100644 --- a/maths/prime_numbers.py +++ b/maths/prime_numbers.py @@ -90,32 +90,20 @@ def fast_primes(max_n: int) -> Generator[int, None, None]: yield i +def benchmark(): + """ + Let's benchmark our functions side-by-side... + """ + from timeit import timeit + + setup = "from __main__ import slow_primes, primes, fast_primes" + print(timeit("slow_primes(1_000_000_000_000)", setup=setup, number=1_000_000)) + print(timeit("primes(1_000_000_000_000)", setup=setup, number=1_000_000)) + print(timeit("fast_primes(1_000_000_000_000)", setup=setup, number=1_000_000)) + + if __name__ == "__main__": number = int(input("Calculate primes up to:\n>> ").strip()) for ret in primes(number): print(ret) - - # Let's benchmark them side-by-side... - from timeit import timeit - - print( - timeit( - "slow_primes(1_000_000_000_000)", - setup="from __main__ import slow_primes", - number=1_000_000, - ) - ) - print( - timeit( - "primes(1_000_000_000_000)", - setup="from __main__ import primes", - number=1_000_000, - ) - ) - print( - timeit( - "fast_primes(1_000_000_000_000)", - setup="from __main__ import fast_primes", - number=1_000_000, - ) - ) + benchmark() From 7b2eca0243f5c4454875e17971cb527037d2e281 Mon Sep 17 00:00:00 2001 From: Alexander Pantyukhin Date: Thu, 10 Nov 2022 06:49:38 +0400 Subject: [PATCH 0726/1543] add distribute coins (#7975) * add distribute coins * updating DIRECTORY.md * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fix review notes * fix typehint * fix type in TreeNode Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- DIRECTORY.md | 1 + .../binary_tree/distribute_coins.py | 135 ++++++++++++++++++ 2 files changed, 136 insertions(+) create mode 100644 data_structures/binary_tree/distribute_coins.py diff --git a/DIRECTORY.md b/DIRECTORY.md index 5f314c31745d..74243cd0687b 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -173,6 +173,7 @@ * [Binary Tree Path Sum](data_structures/binary_tree/binary_tree_path_sum.py) * [Binary Tree Traversals](data_structures/binary_tree/binary_tree_traversals.py) * [Diff Views Of Binary Tree](data_structures/binary_tree/diff_views_of_binary_tree.py) + * [Distribute Coins](data_structures/binary_tree/distribute_coins.py) * [Fenwick Tree](data_structures/binary_tree/fenwick_tree.py) * [Inorder Tree Traversal 2022](data_structures/binary_tree/inorder_tree_traversal_2022.py) * [Is Bst](data_structures/binary_tree/is_bst.py) diff --git a/data_structures/binary_tree/distribute_coins.py b/data_structures/binary_tree/distribute_coins.py new file mode 100644 index 000000000000..ea02afc2cea6 --- /dev/null +++ b/data_structures/binary_tree/distribute_coins.py @@ -0,0 +1,135 @@ +""" +Author : Alexander Pantyukhin +Date : November 7, 2022 + +Task: +You are given a tree root of a binary tree with n nodes, where each node has +node.data coins. There are exactly n coins in whole tree. + +In one move, we may choose two adjacent nodes and move one coin from one node +to another. A move may be from parent to child, or from child to parent. + +Return the minimum number of moves required to make every node have exactly one coin. + +Example 1: + + 3 + / \ + 0 0 + +Result: 2 + +Example 2: + + 0 + / \ + 3 0 + +Result 3 + +leetcode: https://leetcode.com/problems/distribute-coins-in-binary-tree/ + +Implementation notes: +User depth-first search approach. + +Let n is the number of nodes in tree +Runtime: O(n) +Space: O(1) +""" + +from __future__ import annotations + +from collections import namedtuple +from dataclasses import dataclass + + +@dataclass +class TreeNode: + data: int + left: TreeNode | None = None + right: TreeNode | None = None + + +CoinsDistribResult = namedtuple("CoinsDistribResult", "moves excess") + + +def distribute_coins(root: TreeNode | None) -> int: + """ + >>> distribute_coins(TreeNode(3, TreeNode(0), TreeNode(0))) + 2 + >>> distribute_coins(TreeNode(0, TreeNode(3), TreeNode(0))) + 3 + >>> distribute_coins(TreeNode(0, TreeNode(0), TreeNode(3))) + 3 + >>> distribute_coins(None) + 0 + >>> distribute_coins(TreeNode(0, TreeNode(0), TreeNode(0))) + Traceback (most recent call last): + ... + ValueError: The nodes number should be same as the number of coins + >>> distribute_coins(TreeNode(0, TreeNode(1), TreeNode(1))) + Traceback (most recent call last): + ... + ValueError: The nodes number should be same as the number of coins + """ + + if root is None: + return 0 + + # Validation + def count_nodes(node: TreeNode | None) -> int: + """ + >>> count_nodes(None): + 0 + """ + if node is None: + return 0 + + return count_nodes(node.left) + count_nodes(node.right) + 1 + + def count_coins(node: TreeNode | None) -> int: + """ + >>> count_coins(None): + 0 + """ + if node is None: + return 0 + + return count_coins(node.left) + count_coins(node.right) + node.data + + if count_nodes(root) != count_coins(root): + raise ValueError("The nodes number should be same as the number of coins") + + # Main calculation + def get_distrib(node: TreeNode | None) -> CoinsDistribResult: + """ + >>> get_distrib(None) + namedtuple("CoinsDistribResult", "0 2") + """ + + if node is None: + return CoinsDistribResult(0, 1) + + left_distrib_moves, left_distrib_excess = get_distrib(node.left) + right_distrib_moves, right_distrib_excess = get_distrib(node.right) + + coins_to_left = 1 - left_distrib_excess + coins_to_right = 1 - right_distrib_excess + + result_moves = ( + left_distrib_moves + + right_distrib_moves + + abs(coins_to_left) + + abs(coins_to_right) + ) + result_excess = node.data - coins_to_left - coins_to_right + + return CoinsDistribResult(result_moves, result_excess) + + return get_distrib(root)[0] + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From e1be882f72f85d5f7267b46f0ffd5203a6d81e2e Mon Sep 17 00:00:00 2001 From: Akshay Dubey <38462415+itsAkshayDubey@users.noreply.github.com> Date: Thu, 10 Nov 2022 16:25:50 +0530 Subject: [PATCH 0727/1543] algorithm: Twin prime (#7980) * feat: Add twin prime algorithm * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fix: Fix broken import statement Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- maths/twin_prime.py | 45 +++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 45 insertions(+) create mode 100644 maths/twin_prime.py diff --git a/maths/twin_prime.py b/maths/twin_prime.py new file mode 100644 index 000000000000..e6ac0cc7805b --- /dev/null +++ b/maths/twin_prime.py @@ -0,0 +1,45 @@ +""" +== Twin Prime == +A number n+2 is said to be a Twin prime of number n if +both n and n+2 are prime. + +Examples of Twin pairs: (3, 5), (5, 7), (11, 13), (17, 19), (29, 31), (41, 43), ... +https://en.wikipedia.org/wiki/Twin_prime +""" + +# Author : Akshay Dubey (https://github.com/itsAkshayDubey) +from maths.prime_check import is_prime + + +def twin_prime(number: int) -> int: + """ + # doctest: +NORMALIZE_WHITESPACE + This functions takes an integer number as input. + returns n+2 if n and n+2 are prime numbers and -1 otherwise. + >>> twin_prime(3) + 5 + >>> twin_prime(4) + -1 + >>> twin_prime(5) + 7 + >>> twin_prime(17) + 19 + >>> twin_prime(0) + -1 + >>> twin_prime(6.0) + Traceback (most recent call last): + ... + TypeError: Input value of [number=6.0] must be an integer + """ + if not isinstance(number, int): + raise TypeError(f"Input value of [number={number}] must be an integer") + if is_prime(number) and is_prime(number + 2): + return number + 2 + else: + return -1 + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From 316e71b03448b6adb8a32d96cb4d6488ee7b7787 Mon Sep 17 00:00:00 2001 From: Gayathri Krishnan Date: Tue, 15 Nov 2022 19:07:59 +0530 Subject: [PATCH 0728/1543] Additional intro blockchain doc (#7974) * A deeper introduction to blockchain technology * Update README.md Rectified errors as image was not visible * Delete img1.jpg Deleting the image as it is not getting accepted in PR merge * Delete img2.jpg Deleting the image as it is not getting accepted in PR merge * Update README.md Removed all image s in the document * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update README.md Commited the suggested changes and submitting for review. * Update README.md Changed a sentence that needed grammatical correction. * Update README.md Added the changes suggested by review panel Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- blockchain/README.md | 39 ++++++++++++++++++++++++++++++++++++++- 1 file changed, 38 insertions(+), 1 deletion(-) diff --git a/blockchain/README.md b/blockchain/README.md index 5ae7f95ec981..b5fab7b36eaa 100644 --- a/blockchain/README.md +++ b/blockchain/README.md @@ -1,7 +1,44 @@ # Blockchain -A Blockchain is a type of distributed ledger technology (DLT) that consists of growing list of records, called blocks, that are securely linked together using cryptography. +A Blockchain is a type of **distributed ledger** technology (DLT) that consists of growing list of records, called **blocks**, that are securely linked together using **cryptography**. +Let's breakdown the terminologies in the above definition. We find below terminologies, + +- Digital Ledger Technology (DLT) +- Blocks +- Cryptography + +## Digital Ledger Technology + + It is otherwise called as distributed ledger technology. It is simply the opposite of centralized database. Firstly, what is a **ledger**? A ledger is a book or collection of accounts that records account transactions. + + *Why is Blockchain addressed as digital ledger if it can record more than account transactions? What other transaction details and information can it hold?* + +Digital Ledger Technology is just a ledger which is shared among multiple nodes. This way there exist no need for central authority to hold the info. Okay, how is it differentiated from central database and what are their benefits? + +There is an organization which has 4 branches whose data are stored in a centralized database. So even if one branch needs any data from ledger they need an approval from database in charge. And if one hacks the central database he gets to tamper and control all the data. + +Now lets assume every branch has a copy of the ledger and then once anything is added to the ledger by anyone branch it is gonna automatically reflect in all other ledgers available in other branch. This is done using Peer-to-peer network. + +So this means even if information is tampered in one branch we can find out. If one branch is hacked we can be alerted ,so we can safeguard other branches. Now, assume these branches as computers or nodes and the ledger is a transaction record or digital receipt. If one ledger is hacked in a node we can detect since there will be a mismatch in comparison with other node information. So this is the concept of Digital Ledger Technology. + +*Is it required for all nodes to have access to all information in other nodes? Wouldn't this require enormous storage space in each node?* + +## Blocks + +In short a block is nothing but collections of records with a labelled header. These are connected cryptographically. Once a new block is added to a chain, the previous block is connected, more precisely said as locked and hence, will remain unaltered. We can understand this concept once we get a clear understanding of working mechanism of blockchain. + +## Cryptography + +It is the practice and study of secure communication techniques in the midst of adversarial behavior. More broadly, cryptography is the creation and analysis of protocols that prevent third parties or the general public from accessing private messages. + +*Which cryptography technology is most widely used in blockchain and why?* + +So, in general, blockchain technology is a distributed record holder which records the information about ownership of an asset. To define precisely, +> Blockchain is a distributed, immutable ledger that makes it easier to record transactions and track assets in a corporate network. +An asset could be tangible (such as a house, car, cash, or land) or intangible (such as a business) (intellectual property, patents, copyrights, branding). A blockchain network can track and sell almost anything of value, lowering risk and costs for everyone involved. + +So this is all about introduction to blockchain technology. To learn more about the topic refer below links.... * * * From 3bf86b91e7d438eb2b9ecbab68060c007d270332 Mon Sep 17 00:00:00 2001 From: Dhruv Manilawala Date: Tue, 15 Nov 2022 19:25:14 +0530 Subject: [PATCH 0729/1543] fix: no implicit optional (#7984) --- data_structures/binary_tree/fenwick_tree.py | 2 +- fractals/julia_sets.py | 2 +- linear_algebra/src/schur_complement.py | 2 +- machine_learning/linear_discriminant_analysis.py | 2 +- project_euler/problem_074/sol1.py | 2 +- sorts/strand_sort.py | 2 +- 6 files changed, 6 insertions(+), 6 deletions(-) diff --git a/data_structures/binary_tree/fenwick_tree.py b/data_structures/binary_tree/fenwick_tree.py index babd75ac4b31..88b0873a10fb 100644 --- a/data_structures/binary_tree/fenwick_tree.py +++ b/data_structures/binary_tree/fenwick_tree.py @@ -8,7 +8,7 @@ class FenwickTree: More info: https://en.wikipedia.org/wiki/Fenwick_tree """ - def __init__(self, arr: list[int] = None, size: int = None) -> None: + def __init__(self, arr: list[int] | None = None, size: int | None = None) -> None: """ Constructor for the Fenwick tree diff --git a/fractals/julia_sets.py b/fractals/julia_sets.py index 35fdc45d020a..77d1d7c042ba 100644 --- a/fractals/julia_sets.py +++ b/fractals/julia_sets.py @@ -89,7 +89,7 @@ def iterate_function( function_params: Any, nb_iterations: int, z_0: numpy.ndarray, - infinity: float = None, + infinity: float | None = None, ) -> numpy.ndarray: """ Iterate the function "eval_function" exactly nb_iterations times. diff --git a/linear_algebra/src/schur_complement.py b/linear_algebra/src/schur_complement.py index f3cb736d9084..3a5f4443afd3 100644 --- a/linear_algebra/src/schur_complement.py +++ b/linear_algebra/src/schur_complement.py @@ -7,7 +7,7 @@ def schur_complement( mat_a: np.ndarray, mat_b: np.ndarray, mat_c: np.ndarray, - pseudo_inv: np.ndarray = None, + pseudo_inv: np.ndarray | None = None, ) -> np.ndarray: """ Schur complement of a symmetric matrix X given as a 2x2 block matrix diff --git a/machine_learning/linear_discriminant_analysis.py b/machine_learning/linear_discriminant_analysis.py index 9ef42ed19bab..f4fb5ba76b64 100644 --- a/machine_learning/linear_discriminant_analysis.py +++ b/machine_learning/linear_discriminant_analysis.py @@ -256,7 +256,7 @@ def valid_input( input_msg: str, err_msg: str, condition: Callable[[num], bool] = lambda x: True, - default: str = None, + default: str | None = None, ) -> num: """ Ask for user value and validate that it fulfill a condition. diff --git a/project_euler/problem_074/sol1.py b/project_euler/problem_074/sol1.py index a40a629033fa..a257d4d94fa8 100644 --- a/project_euler/problem_074/sol1.py +++ b/project_euler/problem_074/sol1.py @@ -71,7 +71,7 @@ def sum_digit_factorials(n: int) -> int: return ret -def chain_length(n: int, previous: set = None) -> int: +def chain_length(n: int, previous: set | None = None) -> int: """ Calculate the length of the chain of non-repeating terms starting with n. Previous is a set containing the previous member of the chain. diff --git a/sorts/strand_sort.py b/sorts/strand_sort.py index a89135a0691f..4cadd396178e 100644 --- a/sorts/strand_sort.py +++ b/sorts/strand_sort.py @@ -1,7 +1,7 @@ import operator -def strand_sort(arr: list, reverse: bool = False, solution: list = None) -> list: +def strand_sort(arr: list, reverse: bool = False, solution: list | None = None) -> list: """ Strand sort implementation source: https://en.wikipedia.org/wiki/Strand_sort From 4ce8ad9ce6e554360089e77e088df6dd8b4a69df Mon Sep 17 00:00:00 2001 From: Akshay Dubey <38462415+itsAkshayDubey@users.noreply.github.com> Date: Tue, 15 Nov 2022 22:58:49 +0530 Subject: [PATCH 0730/1543] algorithm: Liouville lambda function (#7986) * feat: Add liouville lambda function * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * refactor: Refactor if-else block * refactor: Refactor error handling for -ve numbers * refactor: Remove # doctest: +NORMALIZE_WHITESPACE Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- maths/liouville_lambda.py | 45 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 45 insertions(+) create mode 100644 maths/liouville_lambda.py diff --git a/maths/liouville_lambda.py b/maths/liouville_lambda.py new file mode 100644 index 000000000000..5993efa42d66 --- /dev/null +++ b/maths/liouville_lambda.py @@ -0,0 +1,45 @@ +""" +== Liouville Lambda Function == +The Liouville Lambda function, denoted by λ(n) +and λ(n) is 1 if n is the product of an even number of prime numbers, +and -1 if it is the product of an odd number of primes. + +https://en.wikipedia.org/wiki/Liouville_function +""" + +# Author : Akshay Dubey (https://github.com/itsAkshayDubey) +from maths.prime_factors import prime_factors + + +def liouville_lambda(number: int) -> int: + """ + This functions takes an integer number as input. + returns 1 if n has even number of prime factors and -1 otherwise. + >>> liouville_lambda(10) + 1 + >>> liouville_lambda(11) + -1 + >>> liouville_lambda(0) + Traceback (most recent call last): + ... + ValueError: Input must be a positive integer + >>> liouville_lambda(-1) + Traceback (most recent call last): + ... + ValueError: Input must be a positive integer + >>> liouville_lambda(11.0) + Traceback (most recent call last): + ... + TypeError: Input value of [number=11.0] must be an integer + """ + if not isinstance(number, int): + raise TypeError(f"Input value of [number={number}] must be an integer") + if number < 1: + raise ValueError("Input must be a positive integer") + return -1 if len(prime_factors(number)) % 2 else 1 + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From 8bfd1c844b388cb78b03952c7da28f07f3838fd1 Mon Sep 17 00:00:00 2001 From: Dhruv Manilawala Date: Tue, 15 Nov 2022 22:59:14 +0530 Subject: [PATCH 0731/1543] fix: mypy 0.991 issues (#7988) * fix: mypy 0.991 issues * fix: invalid condition for base case --- conversions/decimal_to_any.py | 5 +- data_structures/linked_list/__init__.py | 2 +- matrix/matrix_class.py | 732 ++++++++++++------------ 3 files changed, 370 insertions(+), 369 deletions(-) diff --git a/conversions/decimal_to_any.py b/conversions/decimal_to_any.py index 11a2af294829..c9c2e9a5fb71 100644 --- a/conversions/decimal_to_any.py +++ b/conversions/decimal_to_any.py @@ -76,8 +76,9 @@ def decimal_to_any(num: int, base: int) -> str: div, mod = divmod(num, base) if base >= 11 and 9 < mod < 36: actual_value = ALPHABET_VALUES[str(mod)] - mod = actual_value - new_value += str(mod) + else: + actual_value = str(mod) + new_value += actual_value div = num // base num = div if div == 0: diff --git a/data_structures/linked_list/__init__.py b/data_structures/linked_list/__init__.py index 85660a6d2c27..56b0e51baa93 100644 --- a/data_structures/linked_list/__init__.py +++ b/data_structures/linked_list/__init__.py @@ -49,7 +49,7 @@ def __str__(self) -> str: >>> print(linked_list) 9 --> 14 --> 23 """ - if not self.is_empty: + if self.is_empty(): return "" else: iterate = self.head diff --git a/matrix/matrix_class.py b/matrix/matrix_class.py index 0c3078fe6dc8..a73e8b92a286 100644 --- a/matrix/matrix_class.py +++ b/matrix/matrix_class.py @@ -1,366 +1,366 @@ -# An OOP approach to representing and manipulating matrices - -from __future__ import annotations - - -class Matrix: - """ - Matrix object generated from a 2D array where each element is an array representing - a row. - Rows can contain type int or float. - Common operations and information available. - >>> rows = [ - ... [1, 2, 3], - ... [4, 5, 6], - ... [7, 8, 9] - ... ] - >>> matrix = Matrix(rows) - >>> print(matrix) - [[1. 2. 3.] - [4. 5. 6.] - [7. 8. 9.]] - - Matrix rows and columns are available as 2D arrays - >>> matrix.rows - [[1, 2, 3], [4, 5, 6], [7, 8, 9]] - >>> matrix.columns() - [[1, 4, 7], [2, 5, 8], [3, 6, 9]] - - Order is returned as a tuple - >>> matrix.order - (3, 3) - - Squareness and invertability are represented as bool - >>> matrix.is_square - True - >>> matrix.is_invertable() - False - - Identity, Minors, Cofactors and Adjugate are returned as Matrices. Inverse can be - a Matrix or Nonetype - >>> print(matrix.identity()) - [[1. 0. 0.] - [0. 1. 0.] - [0. 0. 1.]] - >>> print(matrix.minors()) - [[-3. -6. -3.] - [-6. -12. -6.] - [-3. -6. -3.]] - >>> print(matrix.cofactors()) - [[-3. 6. -3.] - [6. -12. 6.] - [-3. 6. -3.]] - >>> # won't be apparent due to the nature of the cofactor matrix - >>> print(matrix.adjugate()) - [[-3. 6. -3.] - [6. -12. 6.] - [-3. 6. -3.]] - >>> matrix.inverse() - Traceback (most recent call last): - ... - TypeError: Only matrices with a non-zero determinant have an inverse - - Determinant is an int, float, or Nonetype - >>> matrix.determinant() - 0 - - Negation, scalar multiplication, addition, subtraction, multiplication and - exponentiation are available and all return a Matrix - >>> print(-matrix) - [[-1. -2. -3.] - [-4. -5. -6.] - [-7. -8. -9.]] - >>> matrix2 = matrix * 3 - >>> print(matrix2) - [[3. 6. 9.] - [12. 15. 18.] - [21. 24. 27.]] - >>> print(matrix + matrix2) - [[4. 8. 12.] - [16. 20. 24.] - [28. 32. 36.]] - >>> print(matrix - matrix2) - [[-2. -4. -6.] - [-8. -10. -12.] - [-14. -16. -18.]] - >>> print(matrix ** 3) - [[468. 576. 684.] - [1062. 1305. 1548.] - [1656. 2034. 2412.]] - - Matrices can also be modified - >>> matrix.add_row([10, 11, 12]) - >>> print(matrix) - [[1. 2. 3.] - [4. 5. 6.] - [7. 8. 9.] - [10. 11. 12.]] - >>> matrix2.add_column([8, 16, 32]) - >>> print(matrix2) - [[3. 6. 9. 8.] - [12. 15. 18. 16.] - [21. 24. 27. 32.]] - >>> print(matrix * matrix2) - [[90. 108. 126. 136.] - [198. 243. 288. 304.] - [306. 378. 450. 472.] - [414. 513. 612. 640.]] - """ - - def __init__(self, rows: list[list[int]]): - error = TypeError( - "Matrices must be formed from a list of zero or more lists containing at " - "least one and the same number of values, each of which must be of type " - "int or float." - ) - if len(rows) != 0: - cols = len(rows[0]) - if cols == 0: - raise error - for row in rows: - if len(row) != cols: - raise error - for value in row: - if not isinstance(value, (int, float)): - raise error - self.rows = rows - else: - self.rows = [] - - # MATRIX INFORMATION - def columns(self) -> list[list[int]]: - return [[row[i] for row in self.rows] for i in range(len(self.rows[0]))] - - @property - def num_rows(self) -> int: - return len(self.rows) - - @property - def num_columns(self) -> int: - return len(self.rows[0]) - - @property - def order(self) -> tuple[int, int]: - return (self.num_rows, self.num_columns) - - @property - def is_square(self) -> bool: - return self.order[0] == self.order[1] - - def identity(self) -> Matrix: - values = [ - [0 if column_num != row_num else 1 for column_num in range(self.num_rows)] - for row_num in range(self.num_rows) - ] - return Matrix(values) - - def determinant(self) -> int: - if not self.is_square: - return 0 - if self.order == (0, 0): - return 1 - if self.order == (1, 1): - return int(self.rows[0][0]) - if self.order == (2, 2): - return int( - (self.rows[0][0] * self.rows[1][1]) - - (self.rows[0][1] * self.rows[1][0]) - ) - else: - return sum( - self.rows[0][column] * self.cofactors().rows[0][column] - for column in range(self.num_columns) - ) - - def is_invertable(self) -> bool: - return bool(self.determinant()) - - def get_minor(self, row: int, column: int) -> int: - values = [ - [ - self.rows[other_row][other_column] - for other_column in range(self.num_columns) - if other_column != column - ] - for other_row in range(self.num_rows) - if other_row != row - ] - return Matrix(values).determinant() - - def get_cofactor(self, row: int, column: int) -> int: - if (row + column) % 2 == 0: - return self.get_minor(row, column) - return -1 * self.get_minor(row, column) - - def minors(self) -> Matrix: - return Matrix( - [ - [self.get_minor(row, column) for column in range(self.num_columns)] - for row in range(self.num_rows) - ] - ) - - def cofactors(self) -> Matrix: - return Matrix( - [ - [ - self.minors().rows[row][column] - if (row + column) % 2 == 0 - else self.minors().rows[row][column] * -1 - for column in range(self.minors().num_columns) - ] - for row in range(self.minors().num_rows) - ] - ) - - def adjugate(self) -> Matrix: - values = [ - [self.cofactors().rows[column][row] for column in range(self.num_columns)] - for row in range(self.num_rows) - ] - return Matrix(values) - - def inverse(self) -> Matrix: - determinant = self.determinant() - if not determinant: - raise TypeError("Only matrices with a non-zero determinant have an inverse") - return self.adjugate() * (1 / determinant) - - def __repr__(self) -> str: - return str(self.rows) - - def __str__(self) -> str: - if self.num_rows == 0: - return "[]" - if self.num_rows == 1: - return "[[" + ". ".join(str(self.rows[0])) + "]]" - return ( - "[" - + "\n ".join( - [ - "[" + ". ".join([str(value) for value in row]) + ".]" - for row in self.rows - ] - ) - + "]" - ) - - # MATRIX MANIPULATION - def add_row(self, row: list[int], position: int | None = None) -> None: - type_error = TypeError("Row must be a list containing all ints and/or floats") - if not isinstance(row, list): - raise type_error - for value in row: - if not isinstance(value, (int, float)): - raise type_error - if len(row) != self.num_columns: - raise ValueError( - "Row must be equal in length to the other rows in the matrix" - ) - if position is None: - self.rows.append(row) - else: - self.rows = self.rows[0:position] + [row] + self.rows[position:] - - def add_column(self, column: list[int], position: int | None = None) -> None: - type_error = TypeError( - "Column must be a list containing all ints and/or floats" - ) - if not isinstance(column, list): - raise type_error - for value in column: - if not isinstance(value, (int, float)): - raise type_error - if len(column) != self.num_rows: - raise ValueError( - "Column must be equal in length to the other columns in the matrix" - ) - if position is None: - self.rows = [self.rows[i] + [column[i]] for i in range(self.num_rows)] - else: - self.rows = [ - self.rows[i][0:position] + [column[i]] + self.rows[i][position:] - for i in range(self.num_rows) - ] - - # MATRIX OPERATIONS - def __eq__(self, other: object) -> bool: - if not isinstance(other, Matrix): - return NotImplemented - return self.rows == other.rows - - def __ne__(self, other: object) -> bool: - return not self == other - - def __neg__(self) -> Matrix: - return self * -1 - - def __add__(self, other: Matrix) -> Matrix: - if self.order != other.order: - raise ValueError("Addition requires matrices of the same order") - return Matrix( - [ - [self.rows[i][j] + other.rows[i][j] for j in range(self.num_columns)] - for i in range(self.num_rows) - ] - ) - - def __sub__(self, other: Matrix) -> Matrix: - if self.order != other.order: - raise ValueError("Subtraction requires matrices of the same order") - return Matrix( - [ - [self.rows[i][j] - other.rows[i][j] for j in range(self.num_columns)] - for i in range(self.num_rows) - ] - ) - - def __mul__(self, other: Matrix | int | float) -> Matrix: - if isinstance(other, (int, float)): - return Matrix( - [[int(element * other) for element in row] for row in self.rows] - ) - elif isinstance(other, Matrix): - if self.num_columns != other.num_rows: - raise ValueError( - "The number of columns in the first matrix must " - "be equal to the number of rows in the second" - ) - return Matrix( - [ - [Matrix.dot_product(row, column) for column in other.columns()] - for row in self.rows - ] - ) - else: - raise TypeError( - "A Matrix can only be multiplied by an int, float, or another matrix" - ) - - def __pow__(self, other: int) -> Matrix: - if not isinstance(other, int): - raise TypeError("A Matrix can only be raised to the power of an int") - if not self.is_square: - raise ValueError("Only square matrices can be raised to a power") - if other == 0: - return self.identity() - if other < 0: - if self.is_invertable: - return self.inverse() ** (-other) - raise ValueError( - "Only invertable matrices can be raised to a negative power" - ) - result = self - for _ in range(other - 1): - result *= self - return result - - @classmethod - def dot_product(cls, row: list[int], column: list[int]) -> int: - return sum(row[i] * column[i] for i in range(len(row))) - - -if __name__ == "__main__": - import doctest - - doctest.testmod() +# An OOP approach to representing and manipulating matrices + +from __future__ import annotations + + +class Matrix: + """ + Matrix object generated from a 2D array where each element is an array representing + a row. + Rows can contain type int or float. + Common operations and information available. + >>> rows = [ + ... [1, 2, 3], + ... [4, 5, 6], + ... [7, 8, 9] + ... ] + >>> matrix = Matrix(rows) + >>> print(matrix) + [[1. 2. 3.] + [4. 5. 6.] + [7. 8. 9.]] + + Matrix rows and columns are available as 2D arrays + >>> matrix.rows + [[1, 2, 3], [4, 5, 6], [7, 8, 9]] + >>> matrix.columns() + [[1, 4, 7], [2, 5, 8], [3, 6, 9]] + + Order is returned as a tuple + >>> matrix.order + (3, 3) + + Squareness and invertability are represented as bool + >>> matrix.is_square + True + >>> matrix.is_invertable() + False + + Identity, Minors, Cofactors and Adjugate are returned as Matrices. Inverse can be + a Matrix or Nonetype + >>> print(matrix.identity()) + [[1. 0. 0.] + [0. 1. 0.] + [0. 0. 1.]] + >>> print(matrix.minors()) + [[-3. -6. -3.] + [-6. -12. -6.] + [-3. -6. -3.]] + >>> print(matrix.cofactors()) + [[-3. 6. -3.] + [6. -12. 6.] + [-3. 6. -3.]] + >>> # won't be apparent due to the nature of the cofactor matrix + >>> print(matrix.adjugate()) + [[-3. 6. -3.] + [6. -12. 6.] + [-3. 6. -3.]] + >>> matrix.inverse() + Traceback (most recent call last): + ... + TypeError: Only matrices with a non-zero determinant have an inverse + + Determinant is an int, float, or Nonetype + >>> matrix.determinant() + 0 + + Negation, scalar multiplication, addition, subtraction, multiplication and + exponentiation are available and all return a Matrix + >>> print(-matrix) + [[-1. -2. -3.] + [-4. -5. -6.] + [-7. -8. -9.]] + >>> matrix2 = matrix * 3 + >>> print(matrix2) + [[3. 6. 9.] + [12. 15. 18.] + [21. 24. 27.]] + >>> print(matrix + matrix2) + [[4. 8. 12.] + [16. 20. 24.] + [28. 32. 36.]] + >>> print(matrix - matrix2) + [[-2. -4. -6.] + [-8. -10. -12.] + [-14. -16. -18.]] + >>> print(matrix ** 3) + [[468. 576. 684.] + [1062. 1305. 1548.] + [1656. 2034. 2412.]] + + Matrices can also be modified + >>> matrix.add_row([10, 11, 12]) + >>> print(matrix) + [[1. 2. 3.] + [4. 5. 6.] + [7. 8. 9.] + [10. 11. 12.]] + >>> matrix2.add_column([8, 16, 32]) + >>> print(matrix2) + [[3. 6. 9. 8.] + [12. 15. 18. 16.] + [21. 24. 27. 32.]] + >>> print(matrix * matrix2) + [[90. 108. 126. 136.] + [198. 243. 288. 304.] + [306. 378. 450. 472.] + [414. 513. 612. 640.]] + """ + + def __init__(self, rows: list[list[int]]): + error = TypeError( + "Matrices must be formed from a list of zero or more lists containing at " + "least one and the same number of values, each of which must be of type " + "int or float." + ) + if len(rows) != 0: + cols = len(rows[0]) + if cols == 0: + raise error + for row in rows: + if len(row) != cols: + raise error + for value in row: + if not isinstance(value, (int, float)): + raise error + self.rows = rows + else: + self.rows = [] + + # MATRIX INFORMATION + def columns(self) -> list[list[int]]: + return [[row[i] for row in self.rows] for i in range(len(self.rows[0]))] + + @property + def num_rows(self) -> int: + return len(self.rows) + + @property + def num_columns(self) -> int: + return len(self.rows[0]) + + @property + def order(self) -> tuple[int, int]: + return (self.num_rows, self.num_columns) + + @property + def is_square(self) -> bool: + return self.order[0] == self.order[1] + + def identity(self) -> Matrix: + values = [ + [0 if column_num != row_num else 1 for column_num in range(self.num_rows)] + for row_num in range(self.num_rows) + ] + return Matrix(values) + + def determinant(self) -> int: + if not self.is_square: + return 0 + if self.order == (0, 0): + return 1 + if self.order == (1, 1): + return int(self.rows[0][0]) + if self.order == (2, 2): + return int( + (self.rows[0][0] * self.rows[1][1]) + - (self.rows[0][1] * self.rows[1][0]) + ) + else: + return sum( + self.rows[0][column] * self.cofactors().rows[0][column] + for column in range(self.num_columns) + ) + + def is_invertable(self) -> bool: + return bool(self.determinant()) + + def get_minor(self, row: int, column: int) -> int: + values = [ + [ + self.rows[other_row][other_column] + for other_column in range(self.num_columns) + if other_column != column + ] + for other_row in range(self.num_rows) + if other_row != row + ] + return Matrix(values).determinant() + + def get_cofactor(self, row: int, column: int) -> int: + if (row + column) % 2 == 0: + return self.get_minor(row, column) + return -1 * self.get_minor(row, column) + + def minors(self) -> Matrix: + return Matrix( + [ + [self.get_minor(row, column) for column in range(self.num_columns)] + for row in range(self.num_rows) + ] + ) + + def cofactors(self) -> Matrix: + return Matrix( + [ + [ + self.minors().rows[row][column] + if (row + column) % 2 == 0 + else self.minors().rows[row][column] * -1 + for column in range(self.minors().num_columns) + ] + for row in range(self.minors().num_rows) + ] + ) + + def adjugate(self) -> Matrix: + values = [ + [self.cofactors().rows[column][row] for column in range(self.num_columns)] + for row in range(self.num_rows) + ] + return Matrix(values) + + def inverse(self) -> Matrix: + determinant = self.determinant() + if not determinant: + raise TypeError("Only matrices with a non-zero determinant have an inverse") + return self.adjugate() * (1 / determinant) + + def __repr__(self) -> str: + return str(self.rows) + + def __str__(self) -> str: + if self.num_rows == 0: + return "[]" + if self.num_rows == 1: + return "[[" + ". ".join(str(self.rows[0])) + "]]" + return ( + "[" + + "\n ".join( + [ + "[" + ". ".join([str(value) for value in row]) + ".]" + for row in self.rows + ] + ) + + "]" + ) + + # MATRIX MANIPULATION + def add_row(self, row: list[int], position: int | None = None) -> None: + type_error = TypeError("Row must be a list containing all ints and/or floats") + if not isinstance(row, list): + raise type_error + for value in row: + if not isinstance(value, (int, float)): + raise type_error + if len(row) != self.num_columns: + raise ValueError( + "Row must be equal in length to the other rows in the matrix" + ) + if position is None: + self.rows.append(row) + else: + self.rows = self.rows[0:position] + [row] + self.rows[position:] + + def add_column(self, column: list[int], position: int | None = None) -> None: + type_error = TypeError( + "Column must be a list containing all ints and/or floats" + ) + if not isinstance(column, list): + raise type_error + for value in column: + if not isinstance(value, (int, float)): + raise type_error + if len(column) != self.num_rows: + raise ValueError( + "Column must be equal in length to the other columns in the matrix" + ) + if position is None: + self.rows = [self.rows[i] + [column[i]] for i in range(self.num_rows)] + else: + self.rows = [ + self.rows[i][0:position] + [column[i]] + self.rows[i][position:] + for i in range(self.num_rows) + ] + + # MATRIX OPERATIONS + def __eq__(self, other: object) -> bool: + if not isinstance(other, Matrix): + return NotImplemented + return self.rows == other.rows + + def __ne__(self, other: object) -> bool: + return not self == other + + def __neg__(self) -> Matrix: + return self * -1 + + def __add__(self, other: Matrix) -> Matrix: + if self.order != other.order: + raise ValueError("Addition requires matrices of the same order") + return Matrix( + [ + [self.rows[i][j] + other.rows[i][j] for j in range(self.num_columns)] + for i in range(self.num_rows) + ] + ) + + def __sub__(self, other: Matrix) -> Matrix: + if self.order != other.order: + raise ValueError("Subtraction requires matrices of the same order") + return Matrix( + [ + [self.rows[i][j] - other.rows[i][j] for j in range(self.num_columns)] + for i in range(self.num_rows) + ] + ) + + def __mul__(self, other: Matrix | int | float) -> Matrix: + if isinstance(other, (int, float)): + return Matrix( + [[int(element * other) for element in row] for row in self.rows] + ) + elif isinstance(other, Matrix): + if self.num_columns != other.num_rows: + raise ValueError( + "The number of columns in the first matrix must " + "be equal to the number of rows in the second" + ) + return Matrix( + [ + [Matrix.dot_product(row, column) for column in other.columns()] + for row in self.rows + ] + ) + else: + raise TypeError( + "A Matrix can only be multiplied by an int, float, or another matrix" + ) + + def __pow__(self, other: int) -> Matrix: + if not isinstance(other, int): + raise TypeError("A Matrix can only be raised to the power of an int") + if not self.is_square: + raise ValueError("Only square matrices can be raised to a power") + if other == 0: + return self.identity() + if other < 0: + if self.is_invertable(): + return self.inverse() ** (-other) + raise ValueError( + "Only invertable matrices can be raised to a negative power" + ) + result = self + for _ in range(other - 1): + result *= self + return result + + @classmethod + def dot_product(cls, row: list[int], column: list[int]) -> int: + return sum(row[i] * column[i] for i in range(len(row))) + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From 0684ccdd69c62d5dc816bdc488bc079d06b9685a Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Tue, 15 Nov 2022 18:34:17 +0100 Subject: [PATCH 0732/1543] [pre-commit.ci] pre-commit autoupdate (#7983) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * [pre-commit.ci] pre-commit autoupdate updates: - [github.com/asottile/pyupgrade: v3.2.0 → v3.2.2](https://github.com/asottile/pyupgrade/compare/v3.2.0...v3.2.2) - [github.com/pre-commit/mirrors-mypy: v0.982 → v0.990](https://github.com/pre-commit/mirrors-mypy/compare/v0.982...v0.990) * updating DIRECTORY.md * Update .pre-commit-config.yaml * Downgrade to mypy v0.991 --> v0.990 * mpyp v0.991 * Update DIRECTORY.md Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> Co-authored-by: Christian Clauss --- .pre-commit-config.yaml | 6 +++--- DIRECTORY.md | 3 +++ 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index a0ea03b9b8cd..324a021ee205 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -27,11 +27,11 @@ repos: - --profile=black - repo: https://github.com/asottile/pyupgrade - rev: v3.2.0 + rev: v3.2.2 hooks: - id: pyupgrade args: - - --py310-plus + - --py311-plus - repo: https://github.com/PyCQA/flake8 rev: 5.0.4 @@ -52,7 +52,7 @@ repos: *flake8-plugins - repo: https://github.com/pre-commit/mirrors-mypy - rev: v0.982 + rev: v0.991 hooks: - id: mypy args: diff --git a/DIRECTORY.md b/DIRECTORY.md index 74243cd0687b..e2fffec57380 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -509,6 +509,7 @@ * [Area](maths/area.py) * [Area Under Curve](maths/area_under_curve.py) * [Armstrong Numbers](maths/armstrong_numbers.py) + * [Automorphic Number](maths/automorphic_number.py) * [Average Absolute Deviation](maths/average_absolute_deviation.py) * [Average Mean](maths/average_mean.py) * [Average Median](maths/average_median.py) @@ -603,6 +604,7 @@ * [Prime Sieve Eratosthenes](maths/prime_sieve_eratosthenes.py) * [Primelib](maths/primelib.py) * [Print Multiplication Table](maths/print_multiplication_table.py) + * [Pronic Number](maths/pronic_number.py) * [Proth Number](maths/proth_number.py) * [Pythagoras](maths/pythagoras.py) * [Qr Decomposition](maths/qr_decomposition.py) @@ -638,6 +640,7 @@ * [Test Prime Check](maths/test_prime_check.py) * [Trapezoidal Rule](maths/trapezoidal_rule.py) * [Triplet Sum](maths/triplet_sum.py) + * [Twin Prime](maths/twin_prime.py) * [Two Pointer](maths/two_pointer.py) * [Two Sum](maths/two_sum.py) * [Ugly Numbers](maths/ugly_numbers.py) From b33ea81a7437eaf7d048d92a9b75330c9d9e165e Mon Sep 17 00:00:00 2001 From: Akshay Dubey <38462415+itsAkshayDubey@users.noreply.github.com> Date: Fri, 18 Nov 2022 13:48:47 +0530 Subject: [PATCH 0733/1543] algorithm: Add juggler sequence (#7985) * feat: Add juggler sequence * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * refactor: Remove temp variable * refactor: Change error type for negative numbers * refactor: Remove # doctest: +NORMALIZE_WHITESPACE * refactor: Remove int typecasting * test: Add unit tests for n=10 and n=25 * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- maths/juggler_sequence.py | 61 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 61 insertions(+) create mode 100644 maths/juggler_sequence.py diff --git a/maths/juggler_sequence.py b/maths/juggler_sequence.py new file mode 100644 index 000000000000..9daba8bc0e8a --- /dev/null +++ b/maths/juggler_sequence.py @@ -0,0 +1,61 @@ +""" +== Juggler Sequence == +Juggler sequence start with any positive integer n. The next term is +obtained as follows: + If n term is even, the next term is floor value of square root of n . + If n is odd, the next term is floor value of 3 time the square root of n. + +https://en.wikipedia.org/wiki/Juggler_sequence +""" + +# Author : Akshay Dubey (https://github.com/itsAkshayDubey) +import math + + +def juggler_sequence(number: int) -> list[int]: + """ + >>> juggler_sequence(0) + Traceback (most recent call last): + ... + ValueError: Input value of [number=0] must be a positive integer + >>> juggler_sequence(1) + [1] + >>> juggler_sequence(2) + [2, 1] + >>> juggler_sequence(3) + [3, 5, 11, 36, 6, 2, 1] + >>> juggler_sequence(5) + [5, 11, 36, 6, 2, 1] + >>> juggler_sequence(10) + [10, 3, 5, 11, 36, 6, 2, 1] + >>> juggler_sequence(25) + [25, 125, 1397, 52214, 228, 15, 58, 7, 18, 4, 2, 1] + >>> juggler_sequence(6.0) + Traceback (most recent call last): + ... + TypeError: Input value of [number=6.0] must be an integer + >>> juggler_sequence(-1) + Traceback (most recent call last): + ... + ValueError: Input value of [number=-1] must be a positive integer + """ + if not isinstance(number, int): + raise TypeError(f"Input value of [number={number}] must be an integer") + if number < 1: + raise ValueError(f"Input value of [number={number}] must be a positive integer") + sequence = [number] + while number != 1: + if number % 2 == 0: + number = math.floor(math.sqrt(number)) + else: + number = math.floor( + math.sqrt(number) * math.sqrt(number) * math.sqrt(number) + ) + sequence.append(number) + return sequence + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From f01a1af1df28ba53fc4727ea0bb703b5744100a7 Mon Sep 17 00:00:00 2001 From: Swayam <74960567+practice404@users.noreply.github.com> Date: Sun, 20 Nov 2022 16:25:58 +0530 Subject: [PATCH 0734/1543] Bi directional dijkstra (#7982) * Added Bi-Directional Dijkstra * Added Bi-Directional Dijkstra * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Added doctest and type hints * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Rename Bi_directional_Dijkstra.py to bi_directional_dijkstra.py * Update bi_directional_dijkstra.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update bi_directional_dijkstra.py * Update bi_directional_dijkstra.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update bi_directional_dijkstra.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update bi_directional_dijkstra.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update bi_directional_dijkstra.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update bi_directional_dijkstra.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update bi_directional_dijkstra.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update bi_directional_dijkstra.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update bi_directional_dijkstra.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update bi_directional_dijkstra.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- graphs/bi_directional_dijkstra.py | 130 ++++++++++++++++++++++++++++++ 1 file changed, 130 insertions(+) create mode 100644 graphs/bi_directional_dijkstra.py diff --git a/graphs/bi_directional_dijkstra.py b/graphs/bi_directional_dijkstra.py new file mode 100644 index 000000000000..fc53e2f0d8f3 --- /dev/null +++ b/graphs/bi_directional_dijkstra.py @@ -0,0 +1,130 @@ +""" +Bi-directional Dijkstra's algorithm. + +A bi-directional approach is an efficient and +less time consuming optimization for Dijkstra's +searching algorithm + +Reference: shorturl.at/exHM7 +""" + +# Author: Swayam Singh (https://github.com/practice404) + + +from queue import PriorityQueue +from typing import Any + +import numpy as np + + +def bidirectional_dij( + source: str, destination: str, graph_forward: dict, graph_backward: dict +) -> int: + """ + Bi-directional Dijkstra's algorithm. + + Returns: + shortest_path_distance (int): length of the shortest path. + + Warnings: + If the destination is not reachable, function returns -1 + + >>> bidirectional_dij("E", "F", graph_fwd, graph_bwd) + 3 + """ + shortest_path_distance = -1 + + visited_forward = set() + visited_backward = set() + cst_fwd = {source: 0} + cst_bwd = {destination: 0} + parent_forward = {source: None} + parent_backward = {destination: None} + queue_forward: PriorityQueue[Any] = PriorityQueue() + queue_backward: PriorityQueue[Any] = PriorityQueue() + + shortest_distance = np.inf + + queue_forward.put((0, source)) + queue_backward.put((0, destination)) + + if source == destination: + return 0 + + while queue_forward and queue_backward: + while not queue_forward.empty(): + _, v_fwd = queue_forward.get() + + if v_fwd not in visited_forward: + break + else: + break + visited_forward.add(v_fwd) + + while not queue_backward.empty(): + _, v_bwd = queue_backward.get() + + if v_bwd not in visited_backward: + break + else: + break + visited_backward.add(v_bwd) + + # forward pass and relaxation + for nxt_fwd, d_forward in graph_forward[v_fwd]: + if nxt_fwd in visited_forward: + continue + old_cost_f = cst_fwd.get(nxt_fwd, np.inf) + new_cost_f = cst_fwd[v_fwd] + d_forward + if new_cost_f < old_cost_f: + queue_forward.put((new_cost_f, nxt_fwd)) + cst_fwd[nxt_fwd] = new_cost_f + parent_forward[nxt_fwd] = v_fwd + if nxt_fwd in visited_backward: + if cst_fwd[v_fwd] + d_forward + cst_bwd[nxt_fwd] < shortest_distance: + shortest_distance = cst_fwd[v_fwd] + d_forward + cst_bwd[nxt_fwd] + + # backward pass and relaxation + for nxt_bwd, d_backward in graph_backward[v_bwd]: + if nxt_bwd in visited_backward: + continue + old_cost_b = cst_bwd.get(nxt_bwd, np.inf) + new_cost_b = cst_bwd[v_bwd] + d_backward + if new_cost_b < old_cost_b: + queue_backward.put((new_cost_b, nxt_bwd)) + cst_bwd[nxt_bwd] = new_cost_b + parent_backward[nxt_bwd] = v_bwd + + if nxt_bwd in visited_forward: + if cst_bwd[v_bwd] + d_backward + cst_fwd[nxt_bwd] < shortest_distance: + shortest_distance = cst_bwd[v_bwd] + d_backward + cst_fwd[nxt_bwd] + + if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance: + break + + if shortest_distance != np.inf: + shortest_path_distance = shortest_distance + return shortest_path_distance + + +graph_fwd = { + "B": [["C", 1]], + "C": [["D", 1]], + "D": [["F", 1]], + "E": [["B", 1], ["G", 2]], + "F": [], + "G": [["F", 1]], +} +graph_bwd = { + "B": [["E", 1]], + "C": [["B", 1]], + "D": [["C", 1]], + "F": [["D", 1], ["G", 1]], + "E": [[None, np.inf]], + "G": [["E", 2]], +} + +if __name__ == "__main__": + import doctest + + doctest.testmod() From a25c53e8b0cc73ff718ec406ac04cca0c2ddbb02 Mon Sep 17 00:00:00 2001 From: Alexander Pantyukhin Date: Sun, 20 Nov 2022 14:59:25 +0400 Subject: [PATCH 0735/1543] Fix argument validation for count_1s_brian_kernighan_method (#7994) * Fix argument validation for count_1s_brian_kernighan_method * updating DIRECTORY.md Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- DIRECTORY.md | 1 + .../count_1s_brian_kernighan_method.py | 15 +++++++++------ 2 files changed, 10 insertions(+), 6 deletions(-) diff --git a/DIRECTORY.md b/DIRECTORY.md index e2fffec57380..83da4b76abca 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -571,6 +571,7 @@ * [Largest Subarray Sum](maths/largest_subarray_sum.py) * [Least Common Multiple](maths/least_common_multiple.py) * [Line Length](maths/line_length.py) + * [Liouville Lambda](maths/liouville_lambda.py) * [Lucas Lehmer Primality Test](maths/lucas_lehmer_primality_test.py) * [Lucas Series](maths/lucas_series.py) * [Maclaurin Series](maths/maclaurin_series.py) diff --git a/bit_manipulation/count_1s_brian_kernighan_method.py b/bit_manipulation/count_1s_brian_kernighan_method.py index e6d6d65345c4..2ed81b09d675 100644 --- a/bit_manipulation/count_1s_brian_kernighan_method.py +++ b/bit_manipulation/count_1s_brian_kernighan_method.py @@ -17,16 +17,19 @@ def get_1s_count(number: int) -> int: >>> get_1s_count(-1) Traceback (most recent call last): ... - ValueError: the value of input must be positive + ValueError: Input must be a non-negative integer >>> get_1s_count(0.8) Traceback (most recent call last): ... - TypeError: Input value must be an 'int' type + ValueError: Input must be a non-negative integer + >>> get_1s_count("25") + Traceback (most recent call last): + ... + ValueError: Input must be a non-negative integer """ - if number < 0: - raise ValueError("the value of input must be positive") - elif isinstance(number, float): - raise TypeError("Input value must be an 'int' type") + if not isinstance(number, int) or number < 0: + raise ValueError("Input must be a non-negative integer") + count = 0 while number: # This way we arrive at next set bit (next 1) instead of looping From f32d611689dc72bda67f1c4636ab1599c60d27a4 Mon Sep 17 00:00:00 2001 From: Mark Mayo Date: Mon, 21 Nov 2022 00:00:27 +1300 Subject: [PATCH 0736/1543] clean of unnecessary checks, imports, calls (#7993) --- backtracking/rat_in_maze.py | 4 ++-- boolean_algebra/not_gate.py | 2 +- cellular_automata/nagel_schrekenberg.py | 3 +-- ciphers/mixed_keyword_cypher.py | 4 ++-- compression/huffman.py | 2 +- data_structures/heap/min_heap.py | 2 +- .../test_digital_image_processing.py | 2 +- dynamic_programming/fizz_buzz.py | 5 ++--- dynamic_programming/max_sub_array.py | 3 +-- graphs/directed_and_undirected_(weighted)_graph.py | 8 ++++---- graphs/multi_heuristic_astar.py | 3 ++- linear_algebra/src/lib.py | 2 +- machine_learning/sequential_minimum_optimization.py | 10 ++-------- maths/find_min.py | 3 +-- maths/kadanes.py | 6 ++---- maths/largest_subarray_sum.py | 6 ++---- maths/series/geometric_series.py | 2 +- networking_flow/ford_fulkerson.py | 2 +- networking_flow/minimum_cut.py | 2 +- other/password.py | 10 +++------- project_euler/problem_025/sol1.py | 2 +- project_euler/problem_036/sol1.py | 2 +- quantum/q_fourier_transform.py | 4 ++-- quantum/q_full_adder.py | 6 +++++- quantum/superdense_coding.py | 2 +- sorts/msd_radix_sort.py | 2 +- strings/aho_corasick.py | 2 +- 27 files changed, 44 insertions(+), 57 deletions(-) diff --git a/backtracking/rat_in_maze.py b/backtracking/rat_in_maze.py index 2860880db540..7bde886dd558 100644 --- a/backtracking/rat_in_maze.py +++ b/backtracking/rat_in_maze.py @@ -88,12 +88,12 @@ def run_maze(maze: list[list[int]], i: int, j: int, solutions: list[list[int]]) solutions[i][j] = 1 return True - lower_flag = (not (i < 0)) and (not (j < 0)) # Check lower bounds + lower_flag = (not i < 0) and (not j < 0) # Check lower bounds upper_flag = (i < size) and (j < size) # Check upper bounds if lower_flag and upper_flag: # check for already visited and block points. - block_flag = (not (solutions[i][j])) and (not (maze[i][j])) + block_flag = (not solutions[i][j]) and (not maze[i][j]) if block_flag: # check visited solutions[i][j] = 1 diff --git a/boolean_algebra/not_gate.py b/boolean_algebra/not_gate.py index b41da602d936..eb85e9e44cd3 100644 --- a/boolean_algebra/not_gate.py +++ b/boolean_algebra/not_gate.py @@ -34,4 +34,4 @@ def test_not_gate() -> None: if __name__ == "__main__": print(not_gate(0)) - print(not_gate(1)) + print(not_gate(1)) diff --git a/cellular_automata/nagel_schrekenberg.py b/cellular_automata/nagel_schrekenberg.py index be44761ecf82..3fd6afca0153 100644 --- a/cellular_automata/nagel_schrekenberg.py +++ b/cellular_automata/nagel_schrekenberg.py @@ -45,8 +45,7 @@ def construct_highway( highway = [[-1] * number_of_cells] # Create a highway without any car i = 0 - if initial_speed < 0: - initial_speed = 0 + initial_speed = max(initial_speed, 0) while i < number_of_cells: highway[0][i] = ( randint(0, max_speed) if random_speed else initial_speed diff --git a/ciphers/mixed_keyword_cypher.py b/ciphers/mixed_keyword_cypher.py index f55c9c4286df..806004faa079 100644 --- a/ciphers/mixed_keyword_cypher.py +++ b/ciphers/mixed_keyword_cypher.py @@ -42,7 +42,7 @@ def mixed_keyword(key: str = "college", pt: str = "UNIVERSITY") -> str: s = [] for _ in range(len_temp): s.append(temp[k]) - if not (k < 25): + if k >= 25: break k += 1 modalpha.append(s) @@ -52,7 +52,7 @@ def mixed_keyword(key: str = "college", pt: str = "UNIVERSITY") -> str: k = 0 for j in range(len_temp): for m in modalpha: - if not (len(m) - 1 >= j): + if not len(m) - 1 >= j: break d[alpha[k]] = m[j] if not k < 25: diff --git a/compression/huffman.py b/compression/huffman.py index f619ed82c764..b337ac3ec3ff 100644 --- a/compression/huffman.py +++ b/compression/huffman.py @@ -56,7 +56,7 @@ def traverse_tree(root: Letter | TreeNode, bitstring: str) -> list[Letter]: Recursively traverse the Huffman Tree to set each Letter's bitstring dictionary, and return the list of Letters """ - if type(root) is Letter: + if isinstance(root, Letter): root.bitstring[root.letter] = bitstring return [root] treenode: TreeNode = root # type: ignore diff --git a/data_structures/heap/min_heap.py b/data_structures/heap/min_heap.py index 0403624f285a..ecb1876493b0 100644 --- a/data_structures/heap/min_heap.py +++ b/data_structures/heap/min_heap.py @@ -121,7 +121,7 @@ def insert(self, node): self.sift_up(len(self.heap) - 1) def is_empty(self): - return True if len(self.heap) == 0 else False + return len(self.heap) == 0 def decrease_key(self, node, new_value): assert ( diff --git a/digital_image_processing/test_digital_image_processing.py b/digital_image_processing/test_digital_image_processing.py index fdcebfdad161..c999464ce85e 100644 --- a/digital_image_processing/test_digital_image_processing.py +++ b/digital_image_processing/test_digital_image_processing.py @@ -10,7 +10,7 @@ from digital_image_processing import convert_to_negative as cn from digital_image_processing import sepia as sp from digital_image_processing.dithering import burkes as bs -from digital_image_processing.edge_detection import canny as canny +from digital_image_processing.edge_detection import canny from digital_image_processing.filters import convolve as conv from digital_image_processing.filters import gaussian_filter as gg from digital_image_processing.filters import local_binary_pattern as lbp diff --git a/dynamic_programming/fizz_buzz.py b/dynamic_programming/fizz_buzz.py index dd1d21b1075e..e77ab3de7b4b 100644 --- a/dynamic_programming/fizz_buzz.py +++ b/dynamic_programming/fizz_buzz.py @@ -33,10 +33,9 @@ def fizz_buzz(number: int, iterations: int) -> str: ... ValueError: iterations must be defined as integers """ - - if not type(iterations) == int: + if not isinstance(iterations, int): raise ValueError("iterations must be defined as integers") - if not type(number) == int or not number >= 1: + if not isinstance(number, int) or not number >= 1: raise ValueError( """starting number must be and integer and be more than 0""" diff --git a/dynamic_programming/max_sub_array.py b/dynamic_programming/max_sub_array.py index 42eca79a931e..07717fba4172 100644 --- a/dynamic_programming/max_sub_array.py +++ b/dynamic_programming/max_sub_array.py @@ -62,8 +62,7 @@ def max_sub_array(nums: list[int]) -> int: current = 0 for i in nums: current += i - if current < 0: - current = 0 + current = max(current, 0) best = max(best, current) return best diff --git a/graphs/directed_and_undirected_(weighted)_graph.py b/graphs/directed_and_undirected_(weighted)_graph.py index 43a72b89e3a7..b29485031083 100644 --- a/graphs/directed_and_undirected_(weighted)_graph.py +++ b/graphs/directed_and_undirected_(weighted)_graph.py @@ -167,7 +167,7 @@ def cycle_nodes(self): and not on_the_way_back ): len_stack = len(stack) - 1 - while True and len_stack >= 0: + while len_stack >= 0: if stack[len_stack] == node[1]: anticipating_nodes.add(node[1]) break @@ -220,7 +220,7 @@ def has_cycle(self): and not on_the_way_back ): len_stack_minus_one = len(stack) - 1 - while True and len_stack_minus_one >= 0: + while len_stack_minus_one >= 0: if stack[len_stack_minus_one] == node[1]: anticipating_nodes.add(node[1]) break @@ -392,7 +392,7 @@ def cycle_nodes(self): and not on_the_way_back ): len_stack = len(stack) - 1 - while True and len_stack >= 0: + while len_stack >= 0: if stack[len_stack] == node[1]: anticipating_nodes.add(node[1]) break @@ -445,7 +445,7 @@ def has_cycle(self): and not on_the_way_back ): len_stack_minus_one = len(stack) - 1 - while True and len_stack_minus_one >= 0: + while len_stack_minus_one >= 0: if stack[len_stack_minus_one] == node[1]: anticipating_nodes.add(node[1]) break diff --git a/graphs/multi_heuristic_astar.py b/graphs/multi_heuristic_astar.py index e16a983932d0..cd8e37b0099b 100644 --- a/graphs/multi_heuristic_astar.py +++ b/graphs/multi_heuristic_astar.py @@ -1,4 +1,5 @@ import heapq +import sys import numpy as np @@ -116,7 +117,7 @@ def do_something(back_pointer, goal, start): print(x, end=" ") x = back_pointer[x] print(x) - quit() + sys.exit() def valid(p: TPos): diff --git a/linear_algebra/src/lib.py b/linear_algebra/src/lib.py index 775e0244abb2..ac0398a31a07 100644 --- a/linear_algebra/src/lib.py +++ b/linear_algebra/src/lib.py @@ -129,7 +129,7 @@ def component(self, i: int) -> float: input: index (0-indexed) output: the i-th component of the vector. """ - if type(i) is int and -len(self.__components) <= i < len(self.__components): + if isinstance(i, int) and -len(self.__components) <= i < len(self.__components): return self.__components[i] else: raise Exception("index out of range") diff --git a/machine_learning/sequential_minimum_optimization.py b/machine_learning/sequential_minimum_optimization.py index 66535e806c43..3864f6421fcb 100644 --- a/machine_learning/sequential_minimum_optimization.py +++ b/machine_learning/sequential_minimum_optimization.py @@ -388,16 +388,10 @@ def _norm(self, data): return (data - self._min) / (self._max - self._min) def _is_unbound(self, index): - if 0.0 < self.alphas[index] < self._c: - return True - else: - return False + return bool(0.0 < self.alphas[index] < self._c) def _is_support(self, index): - if self.alphas[index] > 0: - return True - else: - return False + return bool(self.alphas[index] > 0) @property def unbound(self): diff --git a/maths/find_min.py b/maths/find_min.py index 228205ed7feb..2eac087c6388 100644 --- a/maths/find_min.py +++ b/maths/find_min.py @@ -24,8 +24,7 @@ def find_min(nums: list[int | float]) -> int | float: raise ValueError("find_min() arg is an empty sequence") min_num = nums[0] for num in nums: - if min_num > num: - min_num = num + min_num = min(min_num, num) return min_num diff --git a/maths/kadanes.py b/maths/kadanes.py index b23409e2b978..c2ea53a6cc84 100644 --- a/maths/kadanes.py +++ b/maths/kadanes.py @@ -49,10 +49,8 @@ def kadanes(arr: list) -> int: for i in arr: max_till_element += i - if max_sum <= max_till_element: - max_sum = max_till_element - if max_till_element < 0: - max_till_element = 0 + max_sum = max(max_sum, max_till_element) + max_till_element = max(max_till_element, 0) return max_sum diff --git a/maths/largest_subarray_sum.py b/maths/largest_subarray_sum.py index 0449e72e64e3..90f92c7127bf 100644 --- a/maths/largest_subarray_sum.py +++ b/maths/largest_subarray_sum.py @@ -11,10 +11,8 @@ def max_sub_array_sum(a: list, size: int = 0): max_ending_here = 0 for i in range(0, size): max_ending_here = max_ending_here + a[i] - if max_so_far < max_ending_here: - max_so_far = max_ending_here - if max_ending_here < 0: - max_ending_here = 0 + max_so_far = max(max_so_far, max_ending_here) + max_ending_here = max(max_ending_here, 0) return max_so_far diff --git a/maths/series/geometric_series.py b/maths/series/geometric_series.py index a875ab89a0c5..90c9fe77b733 100644 --- a/maths/series/geometric_series.py +++ b/maths/series/geometric_series.py @@ -52,7 +52,7 @@ def geometric_series( power = 1 multiple = common_ratio_r for _ in range(int(nth_term)): - if series == []: + if not series: series.append(start_term_a) else: power += 1 diff --git a/networking_flow/ford_fulkerson.py b/networking_flow/ford_fulkerson.py index 370e3848222a..716ed508e679 100644 --- a/networking_flow/ford_fulkerson.py +++ b/networking_flow/ford_fulkerson.py @@ -21,7 +21,7 @@ def bfs(graph, s, t, parent): visited[ind] = True parent[ind] = u - return True if visited[t] else False + return visited[t] def ford_fulkerson(graph, source, sink): diff --git a/networking_flow/minimum_cut.py b/networking_flow/minimum_cut.py index 33131315f4e1..164b45f1012a 100644 --- a/networking_flow/minimum_cut.py +++ b/networking_flow/minimum_cut.py @@ -24,7 +24,7 @@ def bfs(graph, s, t, parent): visited[ind] = True parent[ind] = u - return True if visited[t] else False + return visited[t] def mincut(graph, source, sink): diff --git a/other/password.py b/other/password.py index f463c7564536..9a6161af87d7 100644 --- a/other/password.py +++ b/other/password.py @@ -89,13 +89,9 @@ def is_strong_password(password: str, min_length: int = 8) -> bool: num = any(char in digits for char in password) spec_char = any(char in punctuation for char in password) - if upper and lower and num and spec_char: - return True - - else: - # Passwords should contain UPPERCASE, lowerase - # numbers, and special characters - return False + return upper and lower and num and spec_char + # Passwords should contain UPPERCASE, lowerase + # numbers, and special characters def main(): diff --git a/project_euler/problem_025/sol1.py b/project_euler/problem_025/sol1.py index c30a74a43cb0..803464b5d786 100644 --- a/project_euler/problem_025/sol1.py +++ b/project_euler/problem_025/sol1.py @@ -43,7 +43,7 @@ def fibonacci(n: int) -> int: 144 """ - if n == 1 or type(n) is not int: + if n == 1 or not isinstance(n, int): return 0 elif n == 2: return 1 diff --git a/project_euler/problem_036/sol1.py b/project_euler/problem_036/sol1.py index 425c41221395..1d27356ec51e 100644 --- a/project_euler/problem_036/sol1.py +++ b/project_euler/problem_036/sol1.py @@ -32,7 +32,7 @@ def is_palindrome(n: int | str) -> bool: False """ n = str(n) - return True if n == n[::-1] else False + return n == n[::-1] def solution(n: int = 1000000): diff --git a/quantum/q_fourier_transform.py b/quantum/q_fourier_transform.py index d138dfb452ee..07a257579529 100644 --- a/quantum/q_fourier_transform.py +++ b/quantum/q_fourier_transform.py @@ -55,9 +55,9 @@ def quantum_fourier_transform(number_of_qubits: int = 3) -> qiskit.result.counts ... ValueError: number of qubits must be exact integer. """ - if type(number_of_qubits) == str: + if isinstance(number_of_qubits, str): raise TypeError("number of qubits must be a integer.") - if not number_of_qubits > 0: + if number_of_qubits <= 0: raise ValueError("number of qubits must be > 0.") if math.floor(number_of_qubits) != number_of_qubits: raise ValueError("number of qubits must be exact integer.") diff --git a/quantum/q_full_adder.py b/quantum/q_full_adder.py index c6d03d170659..66d93198519e 100644 --- a/quantum/q_full_adder.py +++ b/quantum/q_full_adder.py @@ -60,7 +60,11 @@ def quantum_full_adder( ... ValueError: inputs must be less or equal to 2. """ - if (type(input_1) == str) or (type(input_2) == str) or (type(carry_in) == str): + if ( + isinstance(input_1, str) + or isinstance(input_2, str) + or isinstance(carry_in, str) + ): raise TypeError("inputs must be integers.") if (input_1 < 0) or (input_2 < 0) or (carry_in < 0): diff --git a/quantum/superdense_coding.py b/quantum/superdense_coding.py index 10ebc2d3593c..1087312f9f5d 100644 --- a/quantum/superdense_coding.py +++ b/quantum/superdense_coding.py @@ -53,7 +53,7 @@ def superdense_coding(bit_1: int = 1, bit_2: int = 1) -> qiskit.result.counts.Co ... ValueError: inputs must be less or equal to 1. """ - if (type(bit_1) == str) or (type(bit_2) == str): + if isinstance(bit_1, str) or isinstance(bit_2, str): raise TypeError("inputs must be integers.") if (bit_1 < 0) or (bit_2 < 0): raise ValueError("inputs must be positive.") diff --git a/sorts/msd_radix_sort.py b/sorts/msd_radix_sort.py index 84460e47b440..74ce21762906 100644 --- a/sorts/msd_radix_sort.py +++ b/sorts/msd_radix_sort.py @@ -133,7 +133,7 @@ def _msd_radix_sort_inplace( j = end_index - 1 while i <= j: changed = False - if not ((list_of_ints[i] >> bit_position) & 1): + if not (list_of_ints[i] >> bit_position) & 1: # found zero at the beginning i += 1 changed = True diff --git a/strings/aho_corasick.py b/strings/aho_corasick.py index 25ed649ce645..e32a4ba64fac 100644 --- a/strings/aho_corasick.py +++ b/strings/aho_corasick.py @@ -84,7 +84,7 @@ def search_in(self, string: str) -> dict[str, list[int]]: else: current_state = next_state for key in self.adlist[current_state]["output"]: - if not (key in result): + if key not in result: result[key] = [] result[key].append(i - len(key) + 1) return result From 08c22457058207dc465b9ba9fd95659d33b3f1dd Mon Sep 17 00:00:00 2001 From: Christian Clauss Date: Tue, 29 Nov 2022 16:56:41 +0100 Subject: [PATCH 0737/1543] Upgrade to flake8 v6 (#8007) * Upgrade to flake8 v6 * updating DIRECTORY.md Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- .flake8 | 6 ++++-- .pre-commit-config.yaml | 6 +++--- DIRECTORY.md | 2 ++ compression/huffman.py | 4 ++-- data_structures/binary_tree/non_recursive_segment_tree.py | 2 +- data_structures/binary_tree/segment_tree.py | 6 +++--- machine_learning/sequential_minimum_optimization.py | 2 +- project_euler/problem_107/sol1.py | 1 - 8 files changed, 16 insertions(+), 13 deletions(-) diff --git a/.flake8 b/.flake8 index 2f74f421d020..b68ee8533a61 100644 --- a/.flake8 +++ b/.flake8 @@ -4,5 +4,7 @@ max-line-length = 88 max-complexity = 19 extend-ignore = # Formatting style for `black` - E203 # Whitespace before ':' - W503 # Line break occurred before a binary operator + # E203 is whitespace before ':' + E203, + # W503 is line break occurred before a binary operator + W503 diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 324a021ee205..74502b3ea757 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,6 +1,6 @@ repos: - repo: https://github.com/pre-commit/pre-commit-hooks - rev: v4.3.0 + rev: v4.4.0 hooks: - id: check-executables-have-shebangs - id: check-yaml @@ -34,13 +34,13 @@ repos: - --py311-plus - repo: https://github.com/PyCQA/flake8 - rev: 5.0.4 + rev: 6.0.0 hooks: - id: flake8 # See .flake8 for args additional_dependencies: &flake8-plugins - flake8-bugbear - flake8-builtins - - flake8-broken-line + # - flake8-broken-line - flake8-comprehensions - pep8-naming diff --git a/DIRECTORY.md b/DIRECTORY.md index 83da4b76abca..b3b484f7358f 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -375,6 +375,7 @@ * [Articulation Points](graphs/articulation_points.py) * [Basic Graphs](graphs/basic_graphs.py) * [Bellman Ford](graphs/bellman_ford.py) + * [Bi Directional Dijkstra](graphs/bi_directional_dijkstra.py) * [Bidirectional A Star](graphs/bidirectional_a_star.py) * [Bidirectional Breadth First Search](graphs/bidirectional_breadth_first_search.py) * [Boruvka](graphs/boruvka.py) @@ -563,6 +564,7 @@ * [Is Ip V4 Address Valid](maths/is_ip_v4_address_valid.py) * [Is Square Free](maths/is_square_free.py) * [Jaccard Similarity](maths/jaccard_similarity.py) + * [Juggler Sequence](maths/juggler_sequence.py) * [Kadanes](maths/kadanes.py) * [Karatsuba](maths/karatsuba.py) * [Krishnamurthy Number](maths/krishnamurthy_number.py) diff --git a/compression/huffman.py b/compression/huffman.py index b337ac3ec3ff..65e5c2f25385 100644 --- a/compression/huffman.py +++ b/compression/huffman.py @@ -32,7 +32,7 @@ def parse_file(file_path: str) -> list[Letter]: if not c: break chars[c] = chars[c] + 1 if c in chars else 1 - return sorted((Letter(c, f) for c, f in chars.items()), key=lambda l: l.freq) + return sorted((Letter(c, f) for c, f in chars.items()), key=lambda x: x.freq) def build_tree(letters: list[Letter]) -> Letter | TreeNode: @@ -47,7 +47,7 @@ def build_tree(letters: list[Letter]) -> Letter | TreeNode: total_freq = left.freq + right.freq node = TreeNode(total_freq, left, right) response.append(node) - response.sort(key=lambda l: l.freq) + response.sort(key=lambda x: x.freq) return response[0] diff --git a/data_structures/binary_tree/non_recursive_segment_tree.py b/data_structures/binary_tree/non_recursive_segment_tree.py index 075ff6c912ff..04164e5cba4e 100644 --- a/data_structures/binary_tree/non_recursive_segment_tree.py +++ b/data_structures/binary_tree/non_recursive_segment_tree.py @@ -106,7 +106,7 @@ def query(self, l: int, r: int) -> T | None: # noqa: E741 l, r = l + self.N, r + self.N res: T | None = None - while l <= r: # noqa: E741 + while l <= r: if l % 2 == 1: res = self.st[l] if res is None else self.fn(res, self.st[l]) if r % 2 == 0: diff --git a/data_structures/binary_tree/segment_tree.py b/data_structures/binary_tree/segment_tree.py index 949a3ecdd32c..b0580386954a 100644 --- a/data_structures/binary_tree/segment_tree.py +++ b/data_structures/binary_tree/segment_tree.py @@ -16,7 +16,7 @@ def right(self, idx): return idx * 2 + 1 def build(self, idx, l, r): # noqa: E741 - if l == r: # noqa: E741 + if l == r: self.st[idx] = A[l] else: mid = (l + r) // 2 @@ -33,7 +33,7 @@ def update_recursive(self, idx, l, r, a, b, val): # noqa: E741 """ if r < a or l > b: return True - if l == r: # noqa: E741 + if l == r: self.st[idx] = val return True mid = (l + r) // 2 @@ -51,7 +51,7 @@ def query_recursive(self, idx, l, r, a, b): # noqa: E741 """ if r < a or l > b: return -math.inf - if l >= a and r <= b: # noqa: E741 + if l >= a and r <= b: return self.st[idx] mid = (l + r) // 2 q1 = self.query_recursive(self.left(idx), l, mid, a, b) diff --git a/machine_learning/sequential_minimum_optimization.py b/machine_learning/sequential_minimum_optimization.py index 3864f6421fcb..f5185e1d9576 100644 --- a/machine_learning/sequential_minimum_optimization.py +++ b/machine_learning/sequential_minimum_optimization.py @@ -314,7 +314,7 @@ def _get_new_alpha(self, i1, i2, a1, a2, e1, e2, y1, y2): l, h = max(0.0, a2 - a1), min(self._c, self._c + a2 - a1) else: l, h = max(0.0, a2 + a1 - self._c), min(self._c, a2 + a1) - if l == h: # noqa: E741 + if l == h: return None, None # calculate eta diff --git a/project_euler/problem_107/sol1.py b/project_euler/problem_107/sol1.py index b3f5685b95ef..4659eac24bd3 100644 --- a/project_euler/problem_107/sol1.py +++ b/project_euler/problem_107/sol1.py @@ -99,7 +99,6 @@ def solution(filename: str = "p107_network.txt") -> int: """ script_dir: str = os.path.abspath(os.path.dirname(__file__)) network_file: str = os.path.join(script_dir, filename) - adjacency_matrix: list[list[str]] edges: dict[EdgeT, int] = {} data: list[str] edge1: int From 361ddaf29e8b8b2a1e6d2107ad41ee9c7f704325 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Tue, 29 Nov 2022 21:55:30 +0530 Subject: [PATCH 0738/1543] [pre-commit.ci] pre-commit autoupdate (#8006) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - [github.com/pre-commit/pre-commit-hooks: v4.3.0 → v4.4.0](https://github.com/pre-commit/pre-commit-hooks/compare/v4.3.0...v4.4.0) - [github.com/PyCQA/flake8: 5.0.4 → 6.0.0](https://github.com/PyCQA/flake8/compare/5.0.4...6.0.0) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> Co-authored-by: Christian Clauss From 47bf3f58e04873ef609301b1e654f6ddcc02b0fa Mon Sep 17 00:00:00 2001 From: Alexander Pantyukhin Date: Tue, 29 Nov 2022 22:07:27 +0400 Subject: [PATCH 0739/1543] fix validation condition and add tests (#7997) * fix validation condition and add tests * updating DIRECTORY.md Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- bit_manipulation/index_of_rightmost_set_bit.py | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/bit_manipulation/index_of_rightmost_set_bit.py b/bit_manipulation/index_of_rightmost_set_bit.py index eb52ea4e63e3..c9c911660b08 100644 --- a/bit_manipulation/index_of_rightmost_set_bit.py +++ b/bit_manipulation/index_of_rightmost_set_bit.py @@ -19,9 +19,17 @@ def get_index_of_rightmost_set_bit(number: int) -> int: Traceback (most recent call last): ... ValueError: Input must be a non-negative integer + >>> get_index_of_rightmost_set_bit('test') + Traceback (most recent call last): + ... + ValueError: Input must be a non-negative integer + >>> get_index_of_rightmost_set_bit(1.25) + Traceback (most recent call last): + ... + ValueError: Input must be a non-negative integer """ - if number < 0 or not isinstance(number, int): + if not isinstance(number, int) or number < 0: raise ValueError("Input must be a non-negative integer") intermediate = number & ~(number - 1) From 6a86fe48671adb90504412acc2589c3ab1b18564 Mon Sep 17 00:00:00 2001 From: Alexander Pantyukhin Date: Tue, 29 Nov 2022 22:28:47 +0400 Subject: [PATCH 0740/1543] Add backtrack word search in matrix (#8005) * add backtracking word search * updating DIRECTORY.md * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * review notes fixes * additional fixes * add tests * small cleanup * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * small cleanup 2 * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update backtracking/word_search.py Co-authored-by: Christian Clauss * Update backtracking/word_search.py Co-authored-by: Christian Clauss * Update backtracking/word_search.py Co-authored-by: Christian Clauss * Update backtracking/word_search.py Co-authored-by: Christian Clauss Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- DIRECTORY.md | 1 + backtracking/word_search.py | 160 ++++++++++++++++++++++++++++++++++++ 2 files changed, 161 insertions(+) create mode 100644 backtracking/word_search.py diff --git a/DIRECTORY.md b/DIRECTORY.md index b3b484f7358f..51430a1e159e 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -33,6 +33,7 @@ * [Rat In Maze](backtracking/rat_in_maze.py) * [Sudoku](backtracking/sudoku.py) * [Sum Of Subsets](backtracking/sum_of_subsets.py) + * [Word Search](backtracking/word_search.py) ## Bit Manipulation * [Binary And Operator](bit_manipulation/binary_and_operator.py) diff --git a/backtracking/word_search.py b/backtracking/word_search.py new file mode 100644 index 000000000000..25d1436be36e --- /dev/null +++ b/backtracking/word_search.py @@ -0,0 +1,160 @@ +""" +Author : Alexander Pantyukhin +Date : November 24, 2022 + +Task: +Given an m x n grid of characters board and a string word, +return true if word exists in the grid. + +The word can be constructed from letters of sequentially adjacent cells, +where adjacent cells are horizontally or vertically neighboring. +The same letter cell may not be used more than once. + +Example: + +Matrix: +--------- +|A|B|C|E| +|S|F|C|S| +|A|D|E|E| +--------- + +Word: +"ABCCED" + +Result: +True + +Implementation notes: Use backtracking approach. +At each point, check all neighbors to try to find the next letter of the word. + +leetcode: https://leetcode.com/problems/word-search/ + +""" + + +def word_exists(board: list[list[str]], word: str) -> bool: + """ + >>> word_exists([["A","B","C","E"],["S","F","C","S"],["A","D","E","E"]], "ABCCED") + True + >>> word_exists([["A","B","C","E"],["S","F","C","S"],["A","D","E","E"]], "SEE") + True + >>> word_exists([["A","B","C","E"],["S","F","C","S"],["A","D","E","E"]], "ABCB") + False + >>> word_exists([["A"]], "A") + True + >>> word_exists([["A","A","A","A","A","A"], + ... ["A","A","A","A","A","A"], + ... ["A","A","A","A","A","A"], + ... ["A","A","A","A","A","A"], + ... ["A","A","A","A","A","B"], + ... ["A","A","A","A","B","A"]], + ... "AAAAAAAAAAAAABB") + False + >>> word_exists([["A"]], 123) + Traceback (most recent call last): + ... + ValueError: The word parameter should be a string of length greater than 0. + >>> word_exists([["A"]], "") + Traceback (most recent call last): + ... + ValueError: The word parameter should be a string of length greater than 0. + >>> word_exists([[]], "AB") + Traceback (most recent call last): + ... + ValueError: The board should be a non empty matrix of single chars strings. + >>> word_exists([], "AB") + Traceback (most recent call last): + ... + ValueError: The board should be a non empty matrix of single chars strings. + >>> word_exists([["A"], [21]], "AB") + Traceback (most recent call last): + ... + ValueError: The board should be a non empty matrix of single chars strings. + """ + + # Validate board + board_error_message = ( + "The board should be a non empty matrix of single chars strings." + ) + if not isinstance(board, list) or len(board) == 0: + raise ValueError(board_error_message) + + for row in board: + if not isinstance(row, list) or len(row) == 0: + raise ValueError(board_error_message) + + for item in row: + if not isinstance(item, str) or len(item) != 1: + raise ValueError(board_error_message) + + # Validate word + if not isinstance(word, str) or len(word) == 0: + raise ValueError( + "The word parameter should be a string of length greater than 0." + ) + + traverts_directions = [(0, 1), (0, -1), (-1, 0), (1, 0)] + len_word = len(word) + len_board = len(board) + len_board_column = len(board[0]) + + # Returns the hash key of matrix indexes. + def get_point_key(row: int, column: int) -> int: + """ + >>> len_board=10 + >>> len_board_column=20 + >>> get_point_key(0, 0) + 200 + """ + + return len_board * len_board_column * row + column + + # Return True if it's possible to search the word suffix + # starting from the word_index. + def exits_word( + row: int, column: int, word_index: int, visited_points_set: set[int] + ) -> bool: + """ + >>> board=[["A"]] + >>> word="B" + >>> exits_word(0, 0, 0, set()) + False + """ + + if board[row][column] != word[word_index]: + return False + + if word_index == len_word - 1: + return True + + for direction in traverts_directions: + next_i = row + direction[0] + next_j = column + direction[1] + if not (0 <= next_i < len_board and 0 <= next_j < len_board_column): + continue + + key = get_point_key(next_i, next_j) + if key in visited_points_set: + continue + + visited_points_set.add(key) + if exits_word(next_i, next_j, word_index + 1, visited_points_set): + return True + + visited_points_set.remove(key) + + return False + + for i in range(len_board): + for j in range(len_board_column): + if exits_word(i, j, 0, {get_point_key(i, j)}): + return True + + return False + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From 5654c6242ed5974fa8f2aa89d9689efa012bdafc Mon Sep 17 00:00:00 2001 From: Akshay Dubey <38462415+itsAkshayDubey@users.noreply.github.com> Date: Tue, 29 Nov 2022 23:59:21 +0530 Subject: [PATCH 0741/1543] algorithm: Hexagonal number (#8003) * feat: Add hexagonal number * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- maths/hexagonal_number.py | 48 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 48 insertions(+) create mode 100644 maths/hexagonal_number.py diff --git a/maths/hexagonal_number.py b/maths/hexagonal_number.py new file mode 100644 index 000000000000..28735c638f80 --- /dev/null +++ b/maths/hexagonal_number.py @@ -0,0 +1,48 @@ +""" +== Hexagonal Number == +The nth hexagonal number hn is the number of distinct dots +in a pattern of dots consisting of the outlines of regular +hexagons with sides up to n dots, when the hexagons are +overlaid so that they share one vertex. + +https://en.wikipedia.org/wiki/Hexagonal_number +""" + +# Author : Akshay Dubey (https://github.com/itsAkshayDubey) + + +def hexagonal(number: int) -> int: + """ + :param number: nth hexagonal number to calculate + :return: the nth hexagonal number + Note: A hexagonal number is only defined for positive integers + >>> hexagonal(4) + 28 + >>> hexagonal(11) + 231 + >>> hexagonal(22) + 946 + >>> hexagonal(0) + Traceback (most recent call last): + ... + ValueError: Input must be a positive integer + >>> hexagonal(-1) + Traceback (most recent call last): + ... + ValueError: Input must be a positive integer + >>> hexagonal(11.0) + Traceback (most recent call last): + ... + TypeError: Input value of [number=11.0] must be an integer + """ + if not isinstance(number, int): + raise TypeError(f"Input value of [number={number}] must be an integer") + if number < 1: + raise ValueError("Input must be a positive integer") + return number * (2 * number - 1) + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From d141fa8838369fafb3b28a8dd825ec1b20d34e03 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 5 Dec 2022 21:34:24 +0100 Subject: [PATCH 0742/1543] [pre-commit.ci] pre-commit autoupdate (#8017) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * [pre-commit.ci] pre-commit autoupdate updates: - [github.com/asottile/pyupgrade: v3.2.2 → v3.3.0](https://github.com/asottile/pyupgrade/compare/v3.2.2...v3.3.0) * updating DIRECTORY.md Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- .pre-commit-config.yaml | 2 +- DIRECTORY.md | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 74502b3ea757..3d83499f0e71 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -27,7 +27,7 @@ repos: - --profile=black - repo: https://github.com/asottile/pyupgrade - rev: v3.2.2 + rev: v3.3.0 hooks: - id: pyupgrade args: diff --git a/DIRECTORY.md b/DIRECTORY.md index 51430a1e159e..382ff3a6fb25 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -561,6 +561,7 @@ * [Greedy Coin Change](maths/greedy_coin_change.py) * [Hamming Numbers](maths/hamming_numbers.py) * [Hardy Ramanujanalgo](maths/hardy_ramanujanalgo.py) + * [Hexagonal Number](maths/hexagonal_number.py) * [Integration By Simpson Approx](maths/integration_by_simpson_approx.py) * [Is Ip V4 Address Valid](maths/is_ip_v4_address_valid.py) * [Is Square Free](maths/is_square_free.py) From b25915adf91cc39c98c597fce1eef9422f4e7d0d Mon Sep 17 00:00:00 2001 From: Aaryan Raj <97806283+iaaryanraj@users.noreply.github.com> Date: Sun, 11 Dec 2022 12:34:04 +0530 Subject: [PATCH 0743/1543] Add algorithm to convert decimal number to its simplest fraction form (#8001) * Added algorithm to convert decimal number to its simplest fraction form * Apply suggested changes --- maths/decimal_to_fraction.py | 48 ++++++++++++++++++++++++++++++++++++ 1 file changed, 48 insertions(+) create mode 100644 maths/decimal_to_fraction.py diff --git a/maths/decimal_to_fraction.py b/maths/decimal_to_fraction.py new file mode 100644 index 000000000000..9462bafe0171 --- /dev/null +++ b/maths/decimal_to_fraction.py @@ -0,0 +1,48 @@ +def decimal_to_fraction(decimal: int | float | str) -> tuple[int, int]: + """ + Return a decimal number in its simplest fraction form + >>> decimal_to_fraction(2) + (2, 1) + >>> decimal_to_fraction(89.) + (89, 1) + >>> decimal_to_fraction("67") + (67, 1) + >>> decimal_to_fraction("45.0") + (45, 1) + >>> decimal_to_fraction(1.5) + (3, 2) + >>> decimal_to_fraction("6.25") + (25, 4) + >>> decimal_to_fraction("78td") + Traceback (most recent call last): + ValueError: Please enter a valid number + """ + try: + decimal = float(decimal) + except ValueError: + raise ValueError("Please enter a valid number") + fractional_part = decimal - int(decimal) + if fractional_part == 0: + return int(decimal), 1 + else: + number_of_frac_digits = len(str(decimal).split(".")[1]) + numerator = int(decimal * (10**number_of_frac_digits)) + denominator = 10**number_of_frac_digits + divisor, dividend = denominator, numerator + while True: + remainder = dividend % divisor + if remainder == 0: + break + dividend, divisor = divisor, remainder + numerator, denominator = numerator / divisor, denominator / divisor + return int(numerator), int(denominator) + + +if __name__ == "__main__": + print(f"{decimal_to_fraction(2) = }") + print(f"{decimal_to_fraction(89.0) = }") + print(f"{decimal_to_fraction('67') = }") + print(f"{decimal_to_fraction('45.0') = }") + print(f"{decimal_to_fraction(1.5) = }") + print(f"{decimal_to_fraction('6.25') = }") + print(f"{decimal_to_fraction('78td') = }") From 40f165b789e9a2475415768db5acadf63e021e46 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 12 Dec 2022 21:29:50 +0100 Subject: [PATCH 0744/1543] [pre-commit.ci] pre-commit autoupdate (#8026) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * [pre-commit.ci] pre-commit autoupdate updates: - [github.com/psf/black: 22.10.0 → 22.12.0](https://github.com/psf/black/compare/22.10.0...22.12.0) - [github.com/asottile/pyupgrade: v3.3.0 → v3.3.1](https://github.com/asottile/pyupgrade/compare/v3.3.0...v3.3.1) * updating DIRECTORY.md Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- .pre-commit-config.yaml | 4 ++-- DIRECTORY.md | 1 + 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 3d83499f0e71..7cf4bedd7dac 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -15,7 +15,7 @@ repos: - id: auto-walrus - repo: https://github.com/psf/black - rev: 22.10.0 + rev: 22.12.0 hooks: - id: black @@ -27,7 +27,7 @@ repos: - --profile=black - repo: https://github.com/asottile/pyupgrade - rev: v3.3.0 + rev: v3.3.1 hooks: - id: pyupgrade args: diff --git a/DIRECTORY.md b/DIRECTORY.md index 382ff3a6fb25..0624eda2c585 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -533,6 +533,7 @@ * [Collatz Sequence](maths/collatz_sequence.py) * [Combinations](maths/combinations.py) * [Decimal Isolate](maths/decimal_isolate.py) + * [Decimal To Fraction](maths/decimal_to_fraction.py) * [Dodecahedron](maths/dodecahedron.py) * [Double Factorial Iterative](maths/double_factorial_iterative.py) * [Double Factorial Recursive](maths/double_factorial_recursive.py) From af8d52092232e1104154b733000716036e668444 Mon Sep 17 00:00:00 2001 From: Roberto Garcia <37519995+rga2@users.noreply.github.com> Date: Wed, 14 Dec 2022 22:10:09 -0600 Subject: [PATCH 0745/1543] Update is_even.py (#8028) --- bit_manipulation/is_even.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bit_manipulation/is_even.py b/bit_manipulation/is_even.py index b7b0841a1427..ba036f35aa1e 100644 --- a/bit_manipulation/is_even.py +++ b/bit_manipulation/is_even.py @@ -11,7 +11,7 @@ def is_even(number: int) -> bool: from the above examples we can observe that for all the odd integers there is always 1 set bit at the end also, 1 in binary can be represented as 001, 00001, or 0000001 - so for any odd integer n => n&1 is always equlas 1 else the integer is even + so for any odd integer n => n&1 is always equals 1 else the integer is even >>> is_even(1) False From 30277f8590a7bf636477fa4c4ad22cedf10588f5 Mon Sep 17 00:00:00 2001 From: Alexander Pantyukhin Date: Thu, 15 Dec 2022 08:11:32 +0400 Subject: [PATCH 0746/1543] add numbers different signs algorithm. (#8008) --- DIRECTORY.md | 1 + bit_manipulation/numbers_different_signs.py | 39 +++++++++++++++++++++ 2 files changed, 40 insertions(+) create mode 100644 bit_manipulation/numbers_different_signs.py diff --git a/DIRECTORY.md b/DIRECTORY.md index 0624eda2c585..34ce88a4f2ab 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -50,6 +50,7 @@ * [Index Of Rightmost Set Bit](bit_manipulation/index_of_rightmost_set_bit.py) * [Is Even](bit_manipulation/is_even.py) * [Is Power Of Two](bit_manipulation/is_power_of_two.py) + * [Numbers Different Signs](bit_manipulation/numbers_different_signs.py) * [Reverse Bits](bit_manipulation/reverse_bits.py) * [Single Bit Manipulation Operations](bit_manipulation/single_bit_manipulation_operations.py) diff --git a/bit_manipulation/numbers_different_signs.py b/bit_manipulation/numbers_different_signs.py new file mode 100644 index 000000000000..cf8b6d86f1eb --- /dev/null +++ b/bit_manipulation/numbers_different_signs.py @@ -0,0 +1,39 @@ +""" +Author : Alexander Pantyukhin +Date : November 30, 2022 + +Task: +Given two int numbers. Return True these numbers have opposite signs +or False otherwise. + +Implementation notes: Use bit manipulation. +Use XOR for two numbers. +""" + + +def different_signs(num1: int, num2: int) -> bool: + """ + Return True if numbers have opposite signs False otherwise. + + >>> different_signs(1, -1) + True + >>> different_signs(1, 1) + False + >>> different_signs(1000000000000000000000000000, -1000000000000000000000000000) + True + >>> different_signs(-1000000000000000000000000000, 1000000000000000000000000000) + True + >>> different_signs(50, 278) + False + >>> different_signs(0, 2) + False + >>> different_signs(2, 0) + False + """ + return num1 ^ num2 < 0 + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From 3f8b2af14bd3b64b838098f9e1830c0fea926a1a Mon Sep 17 00:00:00 2001 From: Victor Rodrigues da Silva <63797831+VictorRS27@users.noreply.github.com> Date: Sun, 18 Dec 2022 19:26:39 -0300 Subject: [PATCH 0747/1543] Add autoclave cipher (#8029) * Add autoclave cipher * Update autoclave with the given suggestions * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fixing errors * Another fixes * Update and rename autoclave.py to autokey.py * Rename gaussian_naive_bayes.py to gaussian_naive_bayes.py.broken.txt * Rename gradient_boosting_regressor.py to gradient_boosting_regressor.py.broken.txt * Rename random_forest_classifier.py to random_forest_classifier.py.broken.txt * Rename random_forest_regressor.py to random_forest_regressor.py.broken.txt * Rename equal_loudness_filter.py to equal_loudness_filter.py.broken.txt Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- ...py => equal_loudness_filter.py.broken.txt} | 0 ciphers/autokey.py | 131 ++++++++++++++++++ ....py => gaussian_naive_bayes.py.broken.txt} | 0 ...gradient_boosting_regressor.py.broken.txt} | 0 ...=> random_forest_classifier.py.broken.txt} | 0 ... => random_forest_regressor.py.broken.txt} | 0 6 files changed, 131 insertions(+) rename audio_filters/{equal_loudness_filter.py => equal_loudness_filter.py.broken.txt} (100%) create mode 100644 ciphers/autokey.py rename machine_learning/{gaussian_naive_bayes.py => gaussian_naive_bayes.py.broken.txt} (100%) rename machine_learning/{gradient_boosting_regressor.py => gradient_boosting_regressor.py.broken.txt} (100%) rename machine_learning/{random_forest_classifier.py => random_forest_classifier.py.broken.txt} (100%) rename machine_learning/{random_forest_regressor.py => random_forest_regressor.py.broken.txt} (100%) diff --git a/audio_filters/equal_loudness_filter.py b/audio_filters/equal_loudness_filter.py.broken.txt similarity index 100% rename from audio_filters/equal_loudness_filter.py rename to audio_filters/equal_loudness_filter.py.broken.txt diff --git a/ciphers/autokey.py b/ciphers/autokey.py new file mode 100644 index 000000000000..8683e6d37001 --- /dev/null +++ b/ciphers/autokey.py @@ -0,0 +1,131 @@ +""" +https://en.wikipedia.org/wiki/Autokey_cipher +An autokey cipher (also known as the autoclave cipher) is a cipher that +incorporates the message (the plaintext) into the key. +The key is generated from the message in some automated fashion, +sometimes by selecting certain letters from the text or, more commonly, +by adding a short primer key to the front of the message. +""" + + +def encrypt(plaintext: str, key: str) -> str: + """ + Encrypt a given plaintext (string) and key (string), returning the + encrypted ciphertext. + >>> encrypt("hello world", "coffee") + 'jsqqs avvwo' + >>> encrypt("coffee is good as python", "TheAlgorithms") + 'vvjfpk wj ohvp su ddylsv' + >>> encrypt("coffee is good as python", 2) + Traceback (most recent call last): + ... + TypeError: key must be a string + >>> encrypt("", "TheAlgorithms") + Traceback (most recent call last): + ... + ValueError: plaintext is empty + """ + if not isinstance(plaintext, str): + raise TypeError("plaintext must be a string") + if not isinstance(key, str): + raise TypeError("key must be a string") + + if not plaintext: + raise ValueError("plaintext is empty") + if not key: + raise ValueError("key is empty") + + key += plaintext + plaintext = plaintext.lower() + key = key.lower() + plaintext_iterator = 0 + key_iterator = 0 + ciphertext = "" + while plaintext_iterator < len(plaintext): + if ( + ord(plaintext[plaintext_iterator]) < 97 + or ord(plaintext[plaintext_iterator]) > 122 + ): + ciphertext += plaintext[plaintext_iterator] + plaintext_iterator += 1 + elif ord(key[key_iterator]) < 97 or ord(key[key_iterator]) > 122: + key_iterator += 1 + else: + ciphertext += chr( + ( + (ord(plaintext[plaintext_iterator]) - 97 + ord(key[key_iterator])) + - 97 + ) + % 26 + + 97 + ) + key_iterator += 1 + plaintext_iterator += 1 + return ciphertext + + +def decrypt(ciphertext: str, key: str) -> str: + """ + Decrypt a given ciphertext (string) and key (string), returning the decrypted + ciphertext. + >>> decrypt("jsqqs avvwo", "coffee") + 'hello world' + >>> decrypt("vvjfpk wj ohvp su ddylsv", "TheAlgorithms") + 'coffee is good as python' + >>> decrypt("vvjfpk wj ohvp su ddylsv", "") + Traceback (most recent call last): + ... + ValueError: key is empty + >>> decrypt(527.26, "TheAlgorithms") + Traceback (most recent call last): + ... + TypeError: ciphertext must be a string + """ + if not isinstance(ciphertext, str): + raise TypeError("ciphertext must be a string") + if not isinstance(key, str): + raise TypeError("key must be a string") + + if not ciphertext: + raise ValueError("ciphertext is empty") + if not key: + raise ValueError("key is empty") + + key = key.lower() + ciphertext_iterator = 0 + key_iterator = 0 + plaintext = "" + while ciphertext_iterator < len(ciphertext): + if ( + ord(ciphertext[ciphertext_iterator]) < 97 + or ord(ciphertext[ciphertext_iterator]) > 122 + ): + plaintext += ciphertext[ciphertext_iterator] + else: + plaintext += chr( + (ord(ciphertext[ciphertext_iterator]) - ord(key[key_iterator])) % 26 + + 97 + ) + key += chr( + (ord(ciphertext[ciphertext_iterator]) - ord(key[key_iterator])) % 26 + + 97 + ) + key_iterator += 1 + ciphertext_iterator += 1 + return plaintext + + +if __name__ == "__main__": + import doctest + + doctest.testmod() + operation = int(input("Type 1 to encrypt or 2 to decrypt:")) + if operation == 1: + plaintext = input("Typeplaintext to be encrypted:\n") + key = input("Type the key:\n") + print(encrypt(plaintext, key)) + elif operation == 2: + ciphertext = input("Type the ciphertext to be decrypted:\n") + key = input("Type the key:\n") + print(decrypt(ciphertext, key)) + decrypt("jsqqs avvwo", "coffee") diff --git a/machine_learning/gaussian_naive_bayes.py b/machine_learning/gaussian_naive_bayes.py.broken.txt similarity index 100% rename from machine_learning/gaussian_naive_bayes.py rename to machine_learning/gaussian_naive_bayes.py.broken.txt diff --git a/machine_learning/gradient_boosting_regressor.py b/machine_learning/gradient_boosting_regressor.py.broken.txt similarity index 100% rename from machine_learning/gradient_boosting_regressor.py rename to machine_learning/gradient_boosting_regressor.py.broken.txt diff --git a/machine_learning/random_forest_classifier.py b/machine_learning/random_forest_classifier.py.broken.txt similarity index 100% rename from machine_learning/random_forest_classifier.py rename to machine_learning/random_forest_classifier.py.broken.txt diff --git a/machine_learning/random_forest_regressor.py b/machine_learning/random_forest_regressor.py.broken.txt similarity index 100% rename from machine_learning/random_forest_regressor.py rename to machine_learning/random_forest_regressor.py.broken.txt From d4c5b22424d05d3198dc2e5a49427e929b058ccf Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 19 Dec 2022 23:04:34 +0100 Subject: [PATCH 0748/1543] [pre-commit.ci] pre-commit autoupdate (#8037) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * [pre-commit.ci] pre-commit autoupdate updates: - [github.com/MarcoGorelli/auto-walrus: v0.2.1 → v0.2.2](https://github.com/MarcoGorelli/auto-walrus/compare/v0.2.1...v0.2.2) - [github.com/PyCQA/isort: 5.10.1 → v5.11.3](https://github.com/PyCQA/isort/compare/5.10.1...v5.11.3) * updating DIRECTORY.md Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- .pre-commit-config.yaml | 4 ++-- DIRECTORY.md | 6 +----- 2 files changed, 3 insertions(+), 7 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 7cf4bedd7dac..0f5fe20a8854 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -10,7 +10,7 @@ repos: - id: requirements-txt-fixer - repo: https://github.com/MarcoGorelli/auto-walrus - rev: v0.2.1 + rev: v0.2.2 hooks: - id: auto-walrus @@ -20,7 +20,7 @@ repos: - id: black - repo: https://github.com/PyCQA/isort - rev: 5.10.1 + rev: v5.11.3 hooks: - id: isort args: diff --git a/DIRECTORY.md b/DIRECTORY.md index 34ce88a4f2ab..bec857a38b69 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -14,7 +14,6 @@ ## Audio Filters * [Butterworth Filter](audio_filters/butterworth_filter.py) - * [Equal Loudness Filter](audio_filters/equal_loudness_filter.py) * [Iir Filter](audio_filters/iir_filter.py) * [Show Response](audio_filters/show_response.py) @@ -79,6 +78,7 @@ * [A1Z26](ciphers/a1z26.py) * [Affine Cipher](ciphers/affine_cipher.py) * [Atbash](ciphers/atbash.py) + * [Autokey](ciphers/autokey.py) * [Baconian Cipher](ciphers/baconian_cipher.py) * [Base16](ciphers/base16.py) * [Base32](ciphers/base32.py) @@ -475,8 +475,6 @@ * [Decision Tree](machine_learning/decision_tree.py) * Forecasting * [Run](machine_learning/forecasting/run.py) - * [Gaussian Naive Bayes](machine_learning/gaussian_naive_bayes.py) - * [Gradient Boosting Regressor](machine_learning/gradient_boosting_regressor.py) * [Gradient Descent](machine_learning/gradient_descent.py) * [K Means Clust](machine_learning/k_means_clust.py) * [K Nearest Neighbours](machine_learning/k_nearest_neighbours.py) @@ -490,8 +488,6 @@ * [Lstm Prediction](machine_learning/lstm/lstm_prediction.py) * [Multilayer Perceptron Classifier](machine_learning/multilayer_perceptron_classifier.py) * [Polymonial Regression](machine_learning/polymonial_regression.py) - * [Random Forest Classifier](machine_learning/random_forest_classifier.py) - * [Random Forest Regressor](machine_learning/random_forest_regressor.py) * [Scoring Functions](machine_learning/scoring_functions.py) * [Self Organizing Map](machine_learning/self_organizing_map.py) * [Sequential Minimum Optimization](machine_learning/sequential_minimum_optimization.py) From 79ef431cec53020709268507b6515ff1e7e47680 Mon Sep 17 00:00:00 2001 From: Maxim Smolskiy Date: Sat, 24 Dec 2022 17:57:28 +0300 Subject: [PATCH 0749/1543] Reduce the complexity of sorts/merge_insertion_sort.py (#7954) * Reduce the complexity of sorts/merge_insertion_sort.py * Add tests * Lower the --max-complexity threshold in the file .flake8 --- .flake8 | 2 +- sorts/merge_insertion_sort.py | 79 +++++++++++++++++++++-------------- 2 files changed, 48 insertions(+), 33 deletions(-) diff --git a/.flake8 b/.flake8 index b68ee8533a61..77ca7a328a77 100644 --- a/.flake8 +++ b/.flake8 @@ -1,7 +1,7 @@ [flake8] max-line-length = 88 # max-complexity should be 10 -max-complexity = 19 +max-complexity = 17 extend-ignore = # Formatting style for `black` # E203 is whitespace before ':' diff --git a/sorts/merge_insertion_sort.py b/sorts/merge_insertion_sort.py index ecaa535457f4..4a5bdea0a33f 100644 --- a/sorts/merge_insertion_sort.py +++ b/sorts/merge_insertion_sort.py @@ -14,6 +14,53 @@ from __future__ import annotations +def binary_search_insertion(sorted_list, item): + """ + >>> binary_search_insertion([1, 2, 7, 9, 10], 4) + [1, 2, 4, 7, 9, 10] + """ + left = 0 + right = len(sorted_list) - 1 + while left <= right: + middle = (left + right) // 2 + if left == right: + if sorted_list[middle] < item: + left = middle + 1 + break + elif sorted_list[middle] < item: + left = middle + 1 + else: + right = middle - 1 + sorted_list.insert(left, item) + return sorted_list + + +def merge(left, right): + """ + >>> merge([[1, 6], [9, 10]], [[2, 3], [4, 5], [7, 8]]) + [[1, 6], [2, 3], [4, 5], [7, 8], [9, 10]] + """ + result = [] + while left and right: + if left[0][0] < right[0][0]: + result.append(left.pop(0)) + else: + result.append(right.pop(0)) + return result + left + right + + +def sortlist_2d(list_2d): + """ + >>> sortlist_2d([[9, 10], [1, 6], [7, 8], [2, 3], [4, 5]]) + [[1, 6], [2, 3], [4, 5], [7, 8], [9, 10]] + """ + length = len(list_2d) + if length <= 1: + return list_2d + middle = length // 2 + return merge(sortlist_2d(list_2d[:middle]), sortlist_2d(list_2d[middle:])) + + def merge_insertion_sort(collection: list[int]) -> list[int]: """Pure implementation of merge-insertion sort algorithm in Python @@ -38,38 +85,6 @@ def merge_insertion_sort(collection: list[int]) -> list[int]: True """ - def binary_search_insertion(sorted_list, item): - left = 0 - right = len(sorted_list) - 1 - while left <= right: - middle = (left + right) // 2 - if left == right: - if sorted_list[middle] < item: - left = middle + 1 - break - elif sorted_list[middle] < item: - left = middle + 1 - else: - right = middle - 1 - sorted_list.insert(left, item) - return sorted_list - - def sortlist_2d(list_2d): - def merge(left, right): - result = [] - while left and right: - if left[0][0] < right[0][0]: - result.append(left.pop(0)) - else: - result.append(right.pop(0)) - return result + left + right - - length = len(list_2d) - if length <= 1: - return list_2d - middle = length // 2 - return merge(sortlist_2d(list_2d[:middle]), sortlist_2d(list_2d[middle:])) - if len(collection) <= 1: return collection From 27d56ba3932d2ca2951a45232790794b2b0838d8 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 26 Dec 2022 22:02:50 +0100 Subject: [PATCH 0750/1543] [pre-commit.ci] pre-commit autoupdate (#8047) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * [pre-commit.ci] pre-commit autoupdate updates: - [github.com/PyCQA/isort: v5.11.3 → 5.11.4](https://github.com/PyCQA/isort/compare/v5.11.3...5.11.4) * Update .flake8 Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- .flake8 | 2 +- .pre-commit-config.yaml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.flake8 b/.flake8 index 77ca7a328a77..b68ee8533a61 100644 --- a/.flake8 +++ b/.flake8 @@ -1,7 +1,7 @@ [flake8] max-line-length = 88 # max-complexity should be 10 -max-complexity = 17 +max-complexity = 19 extend-ignore = # Formatting style for `black` # E203 is whitespace before ':' diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 0f5fe20a8854..8eb6d297e831 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -20,7 +20,7 @@ repos: - id: black - repo: https://github.com/PyCQA/isort - rev: v5.11.3 + rev: 5.11.4 hooks: - id: isort args: From 90686e39b9fd3b599a8cd77810e0fdbb74eae064 Mon Sep 17 00:00:00 2001 From: Lucia Harcekova <119792460+LuciaHarcekova@users.noreply.github.com> Date: Wed, 28 Dec 2022 17:34:35 +0000 Subject: [PATCH 0751/1543] Add LZ77 compression algorithm (#8059) * - add "lz77_compressor" class with compress and decompress methods using LZ77 compression algorithm * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * - use "list" instead "List", formatting * - fix spelling * - add Python type hints * - add 'Token' class to represent triplet (offset, length, indicator) * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * - add test, hange type rom List to list * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * - remove extra import * - remove extra types in comments * - better test * - edit comments * - add return types * - add tests for __str__ and __repr__ * Update lz77.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- compression/lz77.py | 227 ++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 227 insertions(+) create mode 100644 compression/lz77.py diff --git a/compression/lz77.py b/compression/lz77.py new file mode 100644 index 000000000000..7c1a6f6a4c19 --- /dev/null +++ b/compression/lz77.py @@ -0,0 +1,227 @@ +""" +LZ77 compression algorithm +- lossless data compression published in papers by Abraham Lempel and Jacob Ziv in 1977 +- also known as LZ1 or sliding-window compression +- form the basis for many variations including LZW, LZSS, LZMA and others + +It uses a “sliding window” method. Within the sliding window we have: + - search buffer + - look ahead buffer +len(sliding_window) = len(search_buffer) + len(look_ahead_buffer) + +LZ77 manages a dictionary that uses triples composed of: + - Offset into search buffer, it's the distance between the start of a phrase and + the beginning of a file. + - Length of the match, it's the number of characters that make up a phrase. + - The indicator is represented by a character that is going to be encoded next. + +As a file is parsed, the dictionary is dynamically updated to reflect the compressed +data contents and size. + +Examples: +"cabracadabrarrarrad" <-> [(0, 0, 'c'), (0, 0, 'a'), (0, 0, 'b'), (0, 0, 'r'), + (3, 1, 'c'), (2, 1, 'd'), (7, 4, 'r'), (3, 5, 'd')] +"ababcbababaa" <-> [(0, 0, 'a'), (0, 0, 'b'), (2, 2, 'c'), (4, 3, 'a'), (2, 2, 'a')] +"aacaacabcabaaac" <-> [(0, 0, 'a'), (1, 1, 'c'), (3, 4, 'b'), (3, 3, 'a'), (1, 2, 'c')] + +Sources: +en.wikipedia.org/wiki/LZ77_and_LZ78 +""" + + +from dataclasses import dataclass + +__version__ = "0.1" +__author__ = "Lucia Harcekova" + + +@dataclass +class Token: + """ + Dataclass representing triplet called token consisting of length, offset + and indicator. This triplet is used during LZ77 compression. + """ + + offset: int + length: int + indicator: str + + def __repr__(self) -> str: + """ + >>> token = Token(1, 2, "c") + >>> repr(token) + '(1, 2, c)' + >>> str(token) + '(1, 2, c)' + """ + return f"({self.offset}, {self.length}, {self.indicator})" + + +class LZ77Compressor: + """ + Class containing compress and decompress methods using LZ77 compression algorithm. + """ + + def __init__(self, window_size: int = 13, lookahead_buffer_size: int = 6) -> None: + self.window_size = window_size + self.lookahead_buffer_size = lookahead_buffer_size + self.search_buffer_size = self.window_size - self.lookahead_buffer_size + + def compress(self, text: str) -> list[Token]: + """ + Compress the given string text using LZ77 compression algorithm. + + Args: + text: string to be compressed + + Returns: + output: the compressed text as a list of Tokens + + >>> lz77_compressor = LZ77Compressor() + >>> str(lz77_compressor.compress("ababcbababaa")) + '[(0, 0, a), (0, 0, b), (2, 2, c), (4, 3, a), (2, 2, a)]' + >>> str(lz77_compressor.compress("aacaacabcabaaac")) + '[(0, 0, a), (1, 1, c), (3, 4, b), (3, 3, a), (1, 2, c)]' + """ + + output = [] + search_buffer = "" + + # while there are still characters in text to compress + while text: + + # find the next encoding phrase + # - triplet with offset, length, indicator (the next encoding character) + token = self._find_encoding_token(text, search_buffer) + + # update the search buffer: + # - add new characters from text into it + # - check if size exceed the max search buffer size, if so, drop the + # oldest elements + search_buffer += text[: token.length + 1] + if len(search_buffer) > self.search_buffer_size: + search_buffer = search_buffer[-self.search_buffer_size :] + + # update the text + text = text[token.length + 1 :] + + # append the token to output + output.append(token) + + return output + + def decompress(self, tokens: list[Token]) -> str: + """ + Convert the list of tokens into an output string. + + Args: + tokens: list containing triplets (offset, length, char) + + Returns: + output: decompressed text + + Tests: + >>> lz77_compressor = LZ77Compressor() + >>> lz77_compressor.decompress([Token(0, 0, 'c'), Token(0, 0, 'a'), + ... Token(0, 0, 'b'), Token(0, 0, 'r'), Token(3, 1, 'c'), + ... Token(2, 1, 'd'), Token(7, 4, 'r'), Token(3, 5, 'd')]) + 'cabracadabrarrarrad' + >>> lz77_compressor.decompress([Token(0, 0, 'a'), Token(0, 0, 'b'), + ... Token(2, 2, 'c'), Token(4, 3, 'a'), Token(2, 2, 'a')]) + 'ababcbababaa' + >>> lz77_compressor.decompress([Token(0, 0, 'a'), Token(1, 1, 'c'), + ... Token(3, 4, 'b'), Token(3, 3, 'a'), Token(1, 2, 'c')]) + 'aacaacabcabaaac' + """ + + output = "" + + for token in tokens: + for _ in range(token.length): + output += output[-token.offset] + output += token.indicator + + return output + + def _find_encoding_token(self, text: str, search_buffer: str) -> Token: + """Finds the encoding token for the first character in the text. + + Tests: + >>> lz77_compressor = LZ77Compressor() + >>> lz77_compressor._find_encoding_token("abrarrarrad", "abracad").offset + 7 + >>> lz77_compressor._find_encoding_token("adabrarrarrad", "cabrac").length + 1 + >>> lz77_compressor._find_encoding_token("abc", "xyz").offset + 0 + >>> lz77_compressor._find_encoding_token("", "xyz").offset + Traceback (most recent call last): + ... + ValueError: We need some text to work with. + >>> lz77_compressor._find_encoding_token("abc", "").offset + 0 + """ + + if not text: + raise ValueError("We need some text to work with.") + + # Initialise result parameters to default values + length, offset = 0, 0 + + if not search_buffer: + return Token(offset, length, text[length]) + + for i, character in enumerate(search_buffer): + found_offset = len(search_buffer) - i + if character == text[0]: + found_length = self._match_length_from_index(text, search_buffer, 0, i) + # if the found length is bigger than the current or if it's equal, + # which means it's offset is smaller: update offset and length + if found_length >= length: + offset, length = found_offset, found_length + + return Token(offset, length, text[length]) + + def _match_length_from_index( + self, text: str, window: str, text_index: int, window_index: int + ) -> int: + """Calculate the longest possible match of text and window characters from + text_index in text and window_index in window. + + Args: + text: _description_ + window: sliding window + text_index: index of character in text + window_index: index of character in sliding window + + Returns: + The maximum match between text and window, from given indexes. + + Tests: + >>> lz77_compressor = LZ77Compressor(13, 6) + >>> lz77_compressor._match_length_from_index("rarrad", "adabrar", 0, 4) + 5 + >>> lz77_compressor._match_length_from_index("adabrarrarrad", + ... "cabrac", 0, 1) + 1 + """ + if not text or text[text_index] != window[window_index]: + return 0 + return 1 + self._match_length_from_index( + text, window + text[text_index], text_index + 1, window_index + 1 + ) + + +if __name__ == "__main__": + from doctest import testmod + + testmod() + # Initialize compressor class + lz77_compressor = LZ77Compressor(window_size=13, lookahead_buffer_size=6) + + # Example + TEXT = "cabracadabrarrarrad" + compressed_text = lz77_compressor.compress(TEXT) + print(lz77_compressor.compress("ababcbababaa")) + decompressed_text = lz77_compressor.decompress(compressed_text) + assert decompressed_text == TEXT, "The LZ77 algorithm returned the invalid result." From b72d0681ec8fd6c02ee10ba04bae3fe97ffaebc6 Mon Sep 17 00:00:00 2001 From: Tianyi Zheng Date: Thu, 29 Dec 2022 09:06:26 -0800 Subject: [PATCH 0752/1543] Remove extra imports in gamma.py doctests (#8060) * Refactor bottom-up function to be class method * Add type hints * Update convolve function namespace * Remove depreciated np.float * updating DIRECTORY.md * updating DIRECTORY.md * updating DIRECTORY.md * updating DIRECTORY.md * Renamed function for consistency * updating DIRECTORY.md * Remove extra imports in gamma.py doctests Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> Co-authored-by: Chris O <46587501+ChrisO345@users.noreply.github.com> --- maths/gamma.py | 15 --------------- 1 file changed, 15 deletions(-) diff --git a/maths/gamma.py b/maths/gamma.py index 69cd819ef186..d5debc58764b 100644 --- a/maths/gamma.py +++ b/maths/gamma.py @@ -11,42 +11,27 @@ def gamma(num: float) -> float: used extension of the factorial function to complex numbers. The gamma function is defined for all complex numbers except the non-positive integers - - >>> gamma(-1) Traceback (most recent call last): ... ValueError: math domain error - - - >>> gamma(0) Traceback (most recent call last): ... ValueError: math domain error - - >>> gamma(9) 40320.0 - >>> from math import gamma as math_gamma >>> all(.99999999 < gamma(i) / math_gamma(i) <= 1.000000001 ... for i in range(1, 50)) True - - - >>> from math import gamma as math_gamma >>> gamma(-1)/math_gamma(-1) <= 1.000000001 Traceback (most recent call last): ... ValueError: math domain error - - - >>> from math import gamma as math_gamma >>> gamma(3.3) - math_gamma(3.3) <= 0.00000001 True """ - if num <= 0: raise ValueError("math domain error") From c6223c71d82c7ba57f3de9eed23963ec96de01bb Mon Sep 17 00:00:00 2001 From: Alexander Pantyukhin Date: Fri, 30 Dec 2022 09:47:40 +0400 Subject: [PATCH 0753/1543] add word_break dynamic approach up -> down. (#8039) * add word_break dynamic approach up -> down. * updating DIRECTORY.md * Update word_break.py fix review notes. * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update word_break.py fix review notes * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fix review notes * add trie type * Update word_break.py add typing Any to trie. * Update dynamic_programming/word_break.py Co-authored-by: Caeden Perelli-Harris * Update dynamic_programming/word_break.py Co-authored-by: Christian Clauss * Update dynamic_programming/word_break.py Co-authored-by: Christian Clauss * Update dynamic_programming/word_break.py Co-authored-by: Christian Clauss * Update dynamic_programming/word_break.py Co-authored-by: Christian Clauss * fix review notes Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Caeden Perelli-Harris Co-authored-by: Christian Clauss --- DIRECTORY.md | 1 + dynamic_programming/word_break.py | 111 ++++++++++++++++++++++++++++++ 2 files changed, 112 insertions(+) create mode 100644 dynamic_programming/word_break.py diff --git a/DIRECTORY.md b/DIRECTORY.md index bec857a38b69..3437df12cbf5 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -328,6 +328,7 @@ * [Subset Generation](dynamic_programming/subset_generation.py) * [Sum Of Subset](dynamic_programming/sum_of_subset.py) * [Viterbi](dynamic_programming/viterbi.py) + * [Word Break](dynamic_programming/word_break.py) ## Electronics * [Builtin Voltage](electronics/builtin_voltage.py) diff --git a/dynamic_programming/word_break.py b/dynamic_programming/word_break.py new file mode 100644 index 000000000000..642ea0edf40d --- /dev/null +++ b/dynamic_programming/word_break.py @@ -0,0 +1,111 @@ +""" +Author : Alexander Pantyukhin +Date : December 12, 2022 + +Task: +Given a string and a list of words, return true if the string can be +segmented into a space-separated sequence of one or more words. + +Note that the same word may be reused +multiple times in the segmentation. + +Implementation notes: Trie + Dynamic programming up -> down. +The Trie will be used to store the words. It will be useful for scanning +available words for the current position in the string. + +Leetcode: +https://leetcode.com/problems/word-break/description/ + +Runtime: O(n * n) +Space: O(n) +""" + +from functools import lru_cache +from typing import Any + + +def word_break(string: str, words: list[str]) -> bool: + """ + Return True if numbers have opposite signs False otherwise. + + >>> word_break("applepenapple", ["apple","pen"]) + True + >>> word_break("catsandog", ["cats","dog","sand","and","cat"]) + False + >>> word_break("cars", ["car","ca","rs"]) + True + >>> word_break('abc', []) + False + >>> word_break(123, ['a']) + Traceback (most recent call last): + ... + ValueError: the string should be not empty string + >>> word_break('', ['a']) + Traceback (most recent call last): + ... + ValueError: the string should be not empty string + >>> word_break('abc', [123]) + Traceback (most recent call last): + ... + ValueError: the words should be a list of non-empty strings + >>> word_break('abc', ['']) + Traceback (most recent call last): + ... + ValueError: the words should be a list of non-empty strings + """ + + # Validation + if not isinstance(string, str) or len(string) == 0: + raise ValueError("the string should be not empty string") + + if not isinstance(words, list) or not all( + isinstance(item, str) and len(item) > 0 for item in words + ): + raise ValueError("the words should be a list of non-empty strings") + + # Build trie + trie: dict[str, Any] = {} + word_keeper_key = "WORD_KEEPER" + + for word in words: + trie_node = trie + for c in word: + if c not in trie_node: + trie_node[c] = {} + + trie_node = trie_node[c] + + trie_node[word_keeper_key] = True + + len_string = len(string) + + # Dynamic programming method + @lru_cache(maxsize=None) + def is_breakable(index: int) -> bool: + """ + >>> string = 'a' + >>> is_breakable(1) + True + """ + if index == len_string: + return True + + trie_node = trie + for i in range(index, len_string): + trie_node = trie_node.get(string[i], None) + + if trie_node is None: + return False + + if trie_node.get(word_keeper_key, False) and is_breakable(i + 1): + return True + + return False + + return is_breakable(0) + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From d29afca93b278e7885f2395c1640aa90d109cc12 Mon Sep 17 00:00:00 2001 From: Tianyi Zheng Date: Sun, 1 Jan 2023 05:30:14 -0800 Subject: [PATCH 0754/1543] Fix get_top_billioners.py file name typo (#8066) --- .../{get_top_billioners.py => get_top_billionaires.py} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename web_programming/{get_top_billioners.py => get_top_billionaires.py} (100%) diff --git a/web_programming/get_top_billioners.py b/web_programming/get_top_billionaires.py similarity index 100% rename from web_programming/get_top_billioners.py rename to web_programming/get_top_billionaires.py From 7c1d23d4485904634a6755d5978d406be534421d Mon Sep 17 00:00:00 2001 From: Tianyi Zheng Date: Sun, 1 Jan 2023 17:10:59 -0800 Subject: [PATCH 0755/1543] Change prime_sieve_eratosthenes.py to return list (#8062) --- maths/prime_sieve_eratosthenes.py | 35 +++++++++++++++++++------------ 1 file changed, 22 insertions(+), 13 deletions(-) diff --git a/maths/prime_sieve_eratosthenes.py b/maths/prime_sieve_eratosthenes.py index 3a3c55085218..32eef9165bba 100644 --- a/maths/prime_sieve_eratosthenes.py +++ b/maths/prime_sieve_eratosthenes.py @@ -1,10 +1,10 @@ """ Sieve of Eratosthenes -Input : n =10 +Input: n = 10 Output: 2 3 5 7 -Input : n = 20 +Input: n = 20 Output: 2 3 5 7 11 13 17 19 you can read in detail about this at @@ -12,34 +12,43 @@ """ -def prime_sieve_eratosthenes(num): +def prime_sieve_eratosthenes(num: int) -> list[int]: """ - print the prime numbers up to n + Print the prime numbers up to n >>> prime_sieve_eratosthenes(10) - 2,3,5,7, + [2, 3, 5, 7] >>> prime_sieve_eratosthenes(20) - 2,3,5,7,11,13,17,19, + [2, 3, 5, 7, 11, 13, 17, 19] + >>> prime_sieve_eratosthenes(2) + [2] + >>> prime_sieve_eratosthenes(1) + [] + >>> prime_sieve_eratosthenes(-1) + Traceback (most recent call last): + ... + ValueError: Input must be a positive integer """ - primes = [True for i in range(num + 1)] - p = 2 + if num <= 0: + raise ValueError("Input must be a positive integer") + + primes = [True] * (num + 1) + p = 2 while p * p <= num: if primes[p]: for i in range(p * p, num + 1, p): primes[i] = False p += 1 - for prime in range(2, num + 1): - if primes[prime]: - print(prime, end=",") + return [prime for prime in range(2, num + 1) if primes[prime]] if __name__ == "__main__": import doctest doctest.testmod() - num = int(input()) - prime_sieve_eratosthenes(num) + user_num = int(input("Enter a positive integer: ").strip()) + print(prime_sieve_eratosthenes(user_num)) From 725731c8d289f742bfde3f159a538a47d19c27dc Mon Sep 17 00:00:00 2001 From: Tianyi Zheng Date: Mon, 2 Jan 2023 05:07:39 -0800 Subject: [PATCH 0756/1543] Refactor `local_weighted_learning.py` to use `np.array` (#8069) * updating DIRECTORY.md * Format local_weighted_learning.py doctests for clarity * Refactor local_weighted_learning.py to use np.array instead of np.mat The np.matrix class is planned to be eventually depreciated in favor of np.array, and current use of the class raises warnings in pytest * Update local_weighted_learning.py documentation Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- DIRECTORY.md | 3 +- .../local_weighted_learning.py | 116 ++++++++++-------- 2 files changed, 68 insertions(+), 51 deletions(-) diff --git a/DIRECTORY.md b/DIRECTORY.md index 3437df12cbf5..5ce9dca74c06 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -123,6 +123,7 @@ * [Huffman](compression/huffman.py) * [Lempel Ziv](compression/lempel_ziv.py) * [Lempel Ziv Decompress](compression/lempel_ziv_decompress.py) + * [Lz77](compression/lz77.py) * [Peak Signal To Noise Ratio](compression/peak_signal_to_noise_ratio.py) * [Run Length Encoding](compression/run_length_encoding.py) @@ -1162,7 +1163,7 @@ * [Get Amazon Product Data](web_programming/get_amazon_product_data.py) * [Get Imdb Top 250 Movies Csv](web_programming/get_imdb_top_250_movies_csv.py) * [Get Imdbtop](web_programming/get_imdbtop.py) - * [Get Top Billioners](web_programming/get_top_billioners.py) + * [Get Top Billionaires](web_programming/get_top_billionaires.py) * [Get Top Hn Posts](web_programming/get_top_hn_posts.py) * [Get User Tweets](web_programming/get_user_tweets.py) * [Giphy](web_programming/giphy.py) diff --git a/machine_learning/local_weighted_learning/local_weighted_learning.py b/machine_learning/local_weighted_learning/local_weighted_learning.py index df03fe0a178d..6260e9ac6bfe 100644 --- a/machine_learning/local_weighted_learning/local_weighted_learning.py +++ b/machine_learning/local_weighted_learning/local_weighted_learning.py @@ -1,76 +1,86 @@ -# Required imports to run this file import matplotlib.pyplot as plt import numpy as np -# weighted matrix -def weighted_matrix(point: np.mat, training_data_x: np.mat, bandwidth: float) -> np.mat: +def weighted_matrix( + point: np.array, training_data_x: np.array, bandwidth: float +) -> np.array: """ - Calculate the weight for every point in the - data set. It takes training_point , query_point, and tau - Here Tau is not a fixed value it can be varied depends on output. - tau --> bandwidth - xmat -->Training data - point --> the x where we want to make predictions - >>> weighted_matrix(np.array([1., 1.]),np.mat([[16.99, 10.34], [21.01,23.68], - ... [24.59,25.69]]), 0.6) - matrix([[1.43807972e-207, 0.00000000e+000, 0.00000000e+000], - [0.00000000e+000, 0.00000000e+000, 0.00000000e+000], - [0.00000000e+000, 0.00000000e+000, 0.00000000e+000]]) + Calculate the weight for every point in the data set. + point --> the x value at which we want to make predictions + >>> weighted_matrix( + ... np.array([1., 1.]), + ... np.array([[16.99, 10.34], [21.01,23.68], [24.59,25.69]]), + ... 0.6 + ... ) + array([[1.43807972e-207, 0.00000000e+000, 0.00000000e+000], + [0.00000000e+000, 0.00000000e+000, 0.00000000e+000], + [0.00000000e+000, 0.00000000e+000, 0.00000000e+000]]) """ - # m is the number of training samples - m, n = np.shape(training_data_x) - # Initializing weights as identity matrix - weights = np.mat(np.eye(m)) + m, _ = np.shape(training_data_x) # m is the number of training samples + weights = np.eye(m) # Initializing weights as identity matrix + # calculating weights for all training examples [x(i)'s] for j in range(m): diff = point - training_data_x[j] - weights[j, j] = np.exp(diff * diff.T / (-2.0 * bandwidth**2)) + weights[j, j] = np.exp(diff @ diff.T / (-2.0 * bandwidth**2)) return weights def local_weight( - point: np.mat, training_data_x: np.mat, training_data_y: np.mat, bandwidth: float -) -> np.mat: + point: np.array, + training_data_x: np.array, + training_data_y: np.array, + bandwidth: float, +) -> np.array: """ Calculate the local weights using the weight_matrix function on training data. Return the weighted matrix. - >>> local_weight(np.array([1., 1.]),np.mat([[16.99, 10.34], [21.01,23.68], - ... [24.59,25.69]]),np.mat([[1.01, 1.66, 3.5]]), 0.6) - matrix([[0.00873174], - [0.08272556]]) + >>> local_weight( + ... np.array([1., 1.]), + ... np.array([[16.99, 10.34], [21.01,23.68], [24.59,25.69]]), + ... np.array([[1.01, 1.66, 3.5]]), + ... 0.6 + ... ) + array([[0.00873174], + [0.08272556]]) """ weight = weighted_matrix(point, training_data_x, bandwidth) - w = (training_data_x.T * (weight * training_data_x)).I * ( - training_data_x.T * weight * training_data_y.T + w = np.linalg.inv(training_data_x.T @ (weight @ training_data_x)) @ ( + training_data_x.T @ weight @ training_data_y.T ) return w def local_weight_regression( - training_data_x: np.mat, training_data_y: np.mat, bandwidth: float -) -> np.mat: + training_data_x: np.array, training_data_y: np.array, bandwidth: float +) -> np.array: """ - Calculate predictions for each data point on axis. - >>> local_weight_regression(np.mat([[16.99, 10.34], [21.01,23.68], - ... [24.59,25.69]]),np.mat([[1.01, 1.66, 3.5]]), 0.6) + Calculate predictions for each data point on axis + >>> local_weight_regression( + ... np.array([[16.99, 10.34], [21.01, 23.68], [24.59, 25.69]]), + ... np.array([[1.01, 1.66, 3.5]]), + ... 0.6 + ... ) array([1.07173261, 1.65970737, 3.50160179]) """ - m, n = np.shape(training_data_x) + m, _ = np.shape(training_data_x) ypred = np.zeros(m) for i, item in enumerate(training_data_x): - ypred[i] = item * local_weight( + ypred[i] = item @ local_weight( item, training_data_x, training_data_y, bandwidth ) return ypred -def load_data(dataset_name: str, cola_name: str, colb_name: str) -> np.mat: +def load_data( + dataset_name: str, cola_name: str, colb_name: str +) -> tuple[np.array, np.array, np.array, np.array]: """ - Function used for loading data from the seaborn splitting into x and y points + Load data from seaborn and split it into x and y points """ import seaborn as sns @@ -78,23 +88,25 @@ def load_data(dataset_name: str, cola_name: str, colb_name: str) -> np.mat: col_a = np.array(data[cola_name]) # total_bill col_b = np.array(data[colb_name]) # tip - mcol_a = np.mat(col_a) - mcol_b = np.mat(col_b) + mcol_a = col_a.copy() + mcol_b = col_b.copy() - m = np.shape(mcol_b)[1] - one = np.ones((1, m), dtype=int) + one = np.ones(np.shape(mcol_b)[0], dtype=int) - # horizontal stacking - training_data_x = np.hstack((one.T, mcol_a.T)) + # pairing elements of one and mcol_a + training_data_x = np.column_stack((one, mcol_a)) return training_data_x, mcol_b, col_a, col_b -def get_preds(training_data_x: np.mat, mcol_b: np.mat, tau: float) -> np.ndarray: +def get_preds(training_data_x: np.array, mcol_b: np.array, tau: float) -> np.array: """ Get predictions with minimum error for each training data - >>> get_preds(np.mat([[16.99, 10.34], [21.01,23.68], - ... [24.59,25.69]]),np.mat([[1.01, 1.66, 3.5]]), 0.6) + >>> get_preds( + ... np.array([[16.99, 10.34], [21.01, 23.68], [24.59, 25.69]]), + ... np.array([[1.01, 1.66, 3.5]]), + ... 0.6 + ... ) array([1.07173261, 1.65970737, 3.50160179]) """ ypred = local_weight_regression(training_data_x, mcol_b, tau) @@ -102,15 +114,15 @@ def get_preds(training_data_x: np.mat, mcol_b: np.mat, tau: float) -> np.ndarray def plot_preds( - training_data_x: np.mat, - predictions: np.ndarray, - col_x: np.ndarray, - col_y: np.ndarray, + training_data_x: np.array, + predictions: np.array, + col_x: np.array, + col_y: np.array, cola_name: str, colb_name: str, ) -> plt.plot: """ - This function used to plot predictions and display the graph + Plot predictions and display the graph """ xsort = training_data_x.copy() xsort.sort(axis=0) @@ -128,6 +140,10 @@ def plot_preds( if __name__ == "__main__": + import doctest + + doctest.testmod() + training_data_x, mcol_b, col_a, col_b = load_data("tips", "total_bill", "tip") predictions = get_preds(training_data_x, mcol_b, 0.5) plot_preds(training_data_x, predictions, col_a, col_b, "total_bill", "tip") From 9f041e9cc82dab21401359d4cfa1b966fc30ddc4 Mon Sep 17 00:00:00 2001 From: Tianyi Zheng Date: Mon, 2 Jan 2023 05:15:14 -0800 Subject: [PATCH 0757/1543] Refactor `sierpinski_triangle.py` (#8068) * updating DIRECTORY.md * Update sierpinski_triangle.py header doc * Remove unused PROGNAME var in sierpinski_triangle.py The PROGNAME var was used to print an image description in the reference code that this implementation was taken from, but it's entirely unused here * Refactor triangle() function to not use list of vertices Since the number of vertices is always fixed at 3, there's no need to pass in the vertices as a list, and it's clearer to give the vertices distinct names rather than index them from the list * Refactor sierpinski_triangle.py to use tuples Tuples make more sense than lists for storing coordinate pairs * Flip if-statement condition in sierpinski_triangle.py to avoid nesting * Add type hints to sierpinski_triangle.py * Add doctests to sierpinski_triangle.py * Fix return types in doctests * Update fractals/sierpinski_triangle.py Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> Co-authored-by: Christian Clauss --- fractals/sierpinski_triangle.py | 114 +++++++++++++++++--------------- 1 file changed, 61 insertions(+), 53 deletions(-) diff --git a/fractals/sierpinski_triangle.py b/fractals/sierpinski_triangle.py index 084f6661f425..c28ec00b27fe 100644 --- a/fractals/sierpinski_triangle.py +++ b/fractals/sierpinski_triangle.py @@ -1,76 +1,84 @@ -#!/usr/bin/python - -"""Author Anurag Kumar | anuragkumarak95@gmail.com | git/anuragkumarak95 - -Simple example of Fractal generation using recursive function. - -What is Sierpinski Triangle? ->>The Sierpinski triangle (also with the original orthography Sierpinski), also called -the Sierpinski gasket or the Sierpinski Sieve, is a fractal and attractive fixed set -with the overall shape of an equilateral triangle, subdivided recursively into smaller -equilateral triangles. Originally constructed as a curve, this is one of the basic -examples of self-similar sets, i.e., it is a mathematically generated pattern that can -be reproducible at any magnification or reduction. It is named after the Polish -mathematician Wacław Sierpinski, but appeared as a decorative pattern many centuries -prior to the work of Sierpinski. +""" +Author Anurag Kumar | anuragkumarak95@gmail.com | git/anuragkumarak95 -Requirements(pip): - - turtle +Simple example of fractal generation using recursion. -Python: - - 2.6 +What is the Sierpiński Triangle? + The Sierpiński triangle (sometimes spelled Sierpinski), also called the +Sierpiński gasket or Sierpiński sieve, is a fractal attractive fixed set with +the overall shape of an equilateral triangle, subdivided recursively into +smaller equilateral triangles. Originally constructed as a curve, this is one of +the basic examples of self-similar sets—that is, it is a mathematically +generated pattern that is reproducible at any magnification or reduction. It is +named after the Polish mathematician Wacław Sierpiński, but appeared as a +decorative pattern many centuries before the work of Sierpiński. -Usage: - - $python sierpinski_triangle.py -Credits: This code was written by editing the code from -https://www.riannetrujillo.com/blog/python-fractal/ +Usage: python sierpinski_triangle.py +Credits: + The above description is taken from + https://en.wikipedia.org/wiki/Sierpi%C5%84ski_triangle + This code was written by editing the code from + https://www.riannetrujillo.com/blog/python-fractal/ """ import sys import turtle -PROGNAME = "Sierpinski Triangle" - -points = [[-175, -125], [0, 175], [175, -125]] # size of triangle - - -def get_mid(p1, p2): - return ((p1[0] + p2[0]) / 2, (p1[1] + p2[1]) / 2) # find midpoint - - -def triangle(points, depth): +def get_mid(p1: tuple[float, float], p2: tuple[float, float]) -> tuple[float, float]: + """ + Find the midpoint of two points + + >>> get_mid((0, 0), (2, 2)) + (1.0, 1.0) + >>> get_mid((-3, -3), (3, 3)) + (0.0, 0.0) + >>> get_mid((1, 0), (3, 2)) + (2.0, 1.0) + >>> get_mid((0, 0), (1, 1)) + (0.5, 0.5) + >>> get_mid((0, 0), (0, 0)) + (0.0, 0.0) + """ + return (p1[0] + p2[0]) / 2, (p1[1] + p2[1]) / 2 + + +def triangle( + vertex1: tuple[float, float], + vertex2: tuple[float, float], + vertex3: tuple[float, float], + depth: int, +) -> None: + """ + Recursively draw the Sierpinski triangle given the vertices of the triangle + and the recursion depth + """ my_pen.up() - my_pen.goto(points[0][0], points[0][1]) + my_pen.goto(vertex1[0], vertex1[1]) my_pen.down() - my_pen.goto(points[1][0], points[1][1]) - my_pen.goto(points[2][0], points[2][1]) - my_pen.goto(points[0][0], points[0][1]) + my_pen.goto(vertex2[0], vertex2[1]) + my_pen.goto(vertex3[0], vertex3[1]) + my_pen.goto(vertex1[0], vertex1[1]) - if depth > 0: - triangle( - [points[0], get_mid(points[0], points[1]), get_mid(points[0], points[2])], - depth - 1, - ) - triangle( - [points[1], get_mid(points[0], points[1]), get_mid(points[1], points[2])], - depth - 1, - ) - triangle( - [points[2], get_mid(points[2], points[1]), get_mid(points[0], points[2])], - depth - 1, - ) + if depth == 0: + return + + triangle(vertex1, get_mid(vertex1, vertex2), get_mid(vertex1, vertex3), depth - 1) + triangle(vertex2, get_mid(vertex1, vertex2), get_mid(vertex2, vertex3), depth - 1) + triangle(vertex3, get_mid(vertex3, vertex2), get_mid(vertex1, vertex3), depth - 1) if __name__ == "__main__": if len(sys.argv) != 2: raise ValueError( - "right format for using this script: " - "$python fractals.py " + "Correct format for using this script: " + "python fractals.py " ) my_pen = turtle.Turtle() my_pen.ht() my_pen.speed(5) my_pen.pencolor("red") - triangle(points, int(sys.argv[1])) + + vertices = [(-175, -125), (0, 175), (175, -125)] # vertices of triangle + triangle(vertices[0], vertices[1], vertices[2], int(sys.argv[1])) From 32a1ff9359b4de80b94ef26c55a5b24204d35382 Mon Sep 17 00:00:00 2001 From: Abhishek Mulik Date: Wed, 4 Jan 2023 06:17:15 +0530 Subject: [PATCH 0758/1543] Update is_palindrome.py (#8022) --- strings/is_palindrome.py | 19 ++++++++++++++++++- 1 file changed, 18 insertions(+), 1 deletion(-) diff --git a/strings/is_palindrome.py b/strings/is_palindrome.py index 5758af0cef9b..9bf2abd98486 100644 --- a/strings/is_palindrome.py +++ b/strings/is_palindrome.py @@ -16,7 +16,24 @@ def is_palindrome(s: str) -> bool: # Since punctuation, capitalization, and spaces are often ignored while checking # palindromes, we first remove them from our string. s = "".join(character for character in s.lower() if character.isalnum()) - return s == s[::-1] + # return s == s[::-1] the slicing method + # uses extra spaces we can + # better with iteration method. + + end = len(s) // 2 + n = len(s) + + # We need to traverse till half of the length of string + # as we can get access of the i'th last element from + # i'th index. + # eg: [0,1,2,3,4,5] => 4th index can be accessed + # with the help of 1st index (i==n-i-1) + # where n is length of string + + for i in range(end): + if s[i] != s[n - i - 1]: + return False + return True if __name__ == "__main__": From 4939e8463fc34c936a309d513cfe8153343cb9d5 Mon Sep 17 00:00:00 2001 From: Caeden Perelli-Harris Date: Sat, 7 Jan 2023 16:56:39 +0000 Subject: [PATCH 0759/1543] Create cached fibonacci algorithm (#8084) * feat: Add `fib_recursive_cached` func * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * doc: Show difference in time when caching algorithm Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- maths/fibonacci.py | 39 +++++++++++++++++++++++++++++++++++++-- 1 file changed, 37 insertions(+), 2 deletions(-) diff --git a/maths/fibonacci.py b/maths/fibonacci.py index e0da66ee5e3b..d58c9fc68c67 100644 --- a/maths/fibonacci.py +++ b/maths/fibonacci.py @@ -16,6 +16,7 @@ fib_binet runtime: 0.0174 ms """ +from functools import lru_cache from math import sqrt from time import time @@ -92,6 +93,39 @@ def fib_recursive_term(i: int) -> int: return [fib_recursive_term(i) for i in range(n + 1)] +def fib_recursive_cached(n: int) -> list[int]: + """ + Calculates the first n (0-indexed) Fibonacci numbers using recursion + >>> fib_iterative(0) + [0] + >>> fib_iterative(1) + [0, 1] + >>> fib_iterative(5) + [0, 1, 1, 2, 3, 5] + >>> fib_iterative(10) + [0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55] + >>> fib_iterative(-1) + Traceback (most recent call last): + ... + Exception: n is negative + """ + + @lru_cache(maxsize=None) + def fib_recursive_term(i: int) -> int: + """ + Calculates the i-th (0-indexed) Fibonacci number using recursion + """ + if i < 0: + raise Exception("n is negative") + if i < 2: + return i + return fib_recursive_term(i - 1) + fib_recursive_term(i - 2) + + if n < 0: + raise Exception("n is negative") + return [fib_recursive_term(i) for i in range(n + 1)] + + def fib_memoization(n: int) -> list[int]: """ Calculates the first n (0-indexed) Fibonacci numbers using memoization @@ -163,8 +197,9 @@ def fib_binet(n: int) -> list[int]: if __name__ == "__main__": - num = 20 + num = 30 time_func(fib_iterative, num) - time_func(fib_recursive, num) + time_func(fib_recursive, num) # Around 3s runtime + time_func(fib_recursive_cached, num) # Around 0ms runtime time_func(fib_memoization, num) time_func(fib_binet, num) From 1a27258bd6c3a35a403629b4ea7fc0228bcc892d Mon Sep 17 00:00:00 2001 From: MohammadReza Balakhaniyan <51448587+balakhaniyan@users.noreply.github.com> Date: Wed, 11 Jan 2023 02:17:02 +0330 Subject: [PATCH 0760/1543] gcd_of_n_numbers (#8057) * add maths/Gcd of N Numbers * add maths/Gcd of N Numbers * add maths/Gcd of N Numbers * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * add maths/Gcd of N Numbers * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * add maths/Gcd of N Numbers * add maths/Gcd of N Numbers * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * add maths/Gcd of N Numbers * add maths/Gcd of N Numbers * more pythonic * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * more pythonic * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * merged * merged * more readable * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- maths/gcd_of_n_numbers.py | 109 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 109 insertions(+) create mode 100644 maths/gcd_of_n_numbers.py diff --git a/maths/gcd_of_n_numbers.py b/maths/gcd_of_n_numbers.py new file mode 100644 index 000000000000..63236c236ada --- /dev/null +++ b/maths/gcd_of_n_numbers.py @@ -0,0 +1,109 @@ +""" +Gcd of N Numbers +Reference: https://en.wikipedia.org/wiki/Greatest_common_divisor +""" + +from collections import Counter + + +def get_factors( + number: int, factors: Counter | None = None, factor: int = 2 +) -> Counter: + """ + this is a recursive function for get all factors of number + >>> get_factors(45) + Counter({3: 2, 5: 1}) + >>> get_factors(2520) + Counter({2: 3, 3: 2, 5: 1, 7: 1}) + >>> get_factors(23) + Counter({23: 1}) + >>> get_factors(0) + Traceback (most recent call last): + ... + TypeError: number must be integer and greater than zero + >>> get_factors(-1) + Traceback (most recent call last): + ... + TypeError: number must be integer and greater than zero + >>> get_factors(1.5) + Traceback (most recent call last): + ... + TypeError: number must be integer and greater than zero + + factor can be all numbers from 2 to number that we check if number % factor == 0 + if it is equal to zero, we check again with number // factor + else we increase factor by one + """ + + match number: + case int(number) if number == 1: + return Counter({1: 1}) + case int(num) if number > 0: + number = num + case _: + raise TypeError("number must be integer and greater than zero") + + factors = factors or Counter() + + if number == factor: # break condition + # all numbers are factors of itself + factors[factor] += 1 + return factors + + if number % factor > 0: + # if it is greater than zero + # so it is not a factor of number and we check next number + return get_factors(number, factors, factor + 1) + + factors[factor] += 1 + # else we update factors (that is Counter(dict-like) type) and check again + return get_factors(number // factor, factors, factor) + + +def get_greatest_common_divisor(*numbers: int) -> int: + """ + get gcd of n numbers: + >>> get_greatest_common_divisor(18, 45) + 9 + >>> get_greatest_common_divisor(23, 37) + 1 + >>> get_greatest_common_divisor(2520, 8350) + 10 + >>> get_greatest_common_divisor(-10, 20) + Traceback (most recent call last): + ... + Exception: numbers must be integer and greater than zero + >>> get_greatest_common_divisor(1.5, 2) + Traceback (most recent call last): + ... + Exception: numbers must be integer and greater than zero + >>> get_greatest_common_divisor(1, 2, 3, 4, 5, 6, 7, 8, 9, 10) + 1 + >>> get_greatest_common_divisor("1", 2, 3, 4, 5, 6, 7, 8, 9, 10) + Traceback (most recent call last): + ... + Exception: numbers must be integer and greater than zero + """ + + # we just need factors, not numbers itself + try: + same_factors, *factors = map(get_factors, numbers) + except TypeError as e: + raise Exception("numbers must be integer and greater than zero") from e + + for factor in factors: + same_factors &= factor + # get common factor between all + # `&` return common elements with smaller value (for Counter type) + + # now, same_factors is something like {2: 2, 3: 4} that means 2 * 2 * 3 * 3 * 3 * 3 + mult = 1 + # power each factor and multiply + # for {2: 2, 3: 4}, it is [4, 81] and then 324 + for m in [factor**power for factor, power in same_factors.items()]: + mult *= m + return mult + + +if __name__ == "__main__": + print(get_greatest_common_divisor(18, 45)) # 9 From c00af459fe0a18ae6adca2aec5ca8c7ff64864c8 Mon Sep 17 00:00:00 2001 From: Caeden Perelli-Harris Date: Thu, 26 Jan 2023 07:12:11 +0000 Subject: [PATCH 0761/1543] feat: Concatenate both factorial implementations (#8099) * feat: Concatenate both factorial implementations * fix: Rename factorial recursive method --- .../{factorial_iterative.py => factorial.py} | 24 ++++++++++++++++ maths/factorial_recursive.py | 28 ------------------- 2 files changed, 24 insertions(+), 28 deletions(-) rename maths/{factorial_iterative.py => factorial.py} (58%) delete mode 100644 maths/factorial_recursive.py diff --git a/maths/factorial_iterative.py b/maths/factorial.py similarity index 58% rename from maths/factorial_iterative.py rename to maths/factorial.py index c6cf7de57ab2..bbf0efc011d8 100644 --- a/maths/factorial_iterative.py +++ b/maths/factorial.py @@ -34,6 +34,30 @@ def factorial(number: int) -> int: return value +def factorial_recursive(n: int) -> int: + """ + Calculate the factorial of a positive integer + https://en.wikipedia.org/wiki/Factorial + + >>> import math + >>> all(factorial(i) == math.factorial(i) for i in range(20)) + True + >>> factorial(0.1) + Traceback (most recent call last): + ... + ValueError: factorial() only accepts integral values + >>> factorial(-1) + Traceback (most recent call last): + ... + ValueError: factorial() not defined for negative values + """ + if not isinstance(n, int): + raise ValueError("factorial() only accepts integral values") + if n < 0: + raise ValueError("factorial() not defined for negative values") + return 1 if n == 0 or n == 1 else n * factorial(n - 1) + + if __name__ == "__main__": import doctest diff --git a/maths/factorial_recursive.py b/maths/factorial_recursive.py deleted file mode 100644 index 137112738905..000000000000 --- a/maths/factorial_recursive.py +++ /dev/null @@ -1,28 +0,0 @@ -def factorial(n: int) -> int: - """ - Calculate the factorial of a positive integer - https://en.wikipedia.org/wiki/Factorial - - >>> import math - >>> all(factorial(i) == math.factorial(i) for i in range(20)) - True - >>> factorial(0.1) - Traceback (most recent call last): - ... - ValueError: factorial() only accepts integral values - >>> factorial(-1) - Traceback (most recent call last): - ... - ValueError: factorial() not defined for negative values - """ - if not isinstance(n, int): - raise ValueError("factorial() only accepts integral values") - if n < 0: - raise ValueError("factorial() not defined for negative values") - return 1 if n == 0 or n == 1 else n * factorial(n - 1) - - -if __name__ == "__main__": - import doctest - - doctest.testmod() From 57c12fab2822df33b8da5a1fd9b95f2f7d64f130 Mon Sep 17 00:00:00 2001 From: Tianyi Zheng Date: Thu, 26 Jan 2023 02:13:03 -0500 Subject: [PATCH 0762/1543] Fix `mypy` errors in `lorentz_transformation_four_vector.py` (#8075) * updating DIRECTORY.md * Fix mypy errors in lorentz_transformation_four_vector.py * Remove unused symbol vars * Add function documentation and rewrite algorithm explanation Previous explanation was misleading, as the code only calculates Lorentz transformations for movement in the x direction (0 velocity in the y and z directions) and not movement in any direction * updating DIRECTORY.md * Update error message for speed Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- DIRECTORY.md | 1 + physics/lorentz_transformation_four_vector.py | 144 ++++++++---------- 2 files changed, 65 insertions(+), 80 deletions(-) diff --git a/DIRECTORY.md b/DIRECTORY.md index 5ce9dca74c06..31e86ea59b79 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -557,6 +557,7 @@ * [Gamma Recursive](maths/gamma_recursive.py) * [Gaussian](maths/gaussian.py) * [Gaussian Error Linear Unit](maths/gaussian_error_linear_unit.py) + * [Gcd Of N Numbers](maths/gcd_of_n_numbers.py) * [Greatest Common Divisor](maths/greatest_common_divisor.py) * [Greedy Coin Change](maths/greedy_coin_change.py) * [Hamming Numbers](maths/hamming_numbers.py) diff --git a/physics/lorentz_transformation_four_vector.py b/physics/lorentz_transformation_four_vector.py index f58b40e5906b..64be97245f29 100644 --- a/physics/lorentz_transformation_four_vector.py +++ b/physics/lorentz_transformation_four_vector.py @@ -1,39 +1,33 @@ """ -Lorentz transformation describes the transition from a reference frame P -to another reference frame P', each of which is moving in a direction with -respect to the other. The Lorentz transformation implemented in this code -is the relativistic version using a four vector described by Minkowsky Space: -x0 = ct, x1 = x, x2 = y, and x3 = z - -NOTE: Please note that x0 is c (speed of light) times t (time). - -So, the Lorentz transformation using a four vector is defined as: - -|ct'| | γ -γβ 0 0| |ct| -|x' | = |-γβ γ 0 0| *|x | -|y' | | 0 0 1 0| |y | -|z' | | 0 0 0 1| |z | - -Where: - 1 -γ = --------------- - ----------- - / v^2 | - /(1 - --- - -/ c^2 - - v -β = ----- - c +Lorentz transformations describe the transition between two inertial reference +frames F and F', each of which is moving in some direction with respect to the +other. This code only calculates Lorentz transformations for movement in the x +direction with no spacial rotation (i.e., a Lorentz boost in the x direction). +The Lorentz transformations are calculated here as linear transformations of +four-vectors [ct, x, y, z] described by Minkowski space. Note that t (time) is +multiplied by c (the speed of light) in the first entry of each four-vector. + +Thus, if X = [ct; x; y; z] and X' = [ct'; x'; y'; z'] are the four-vectors for +two inertial reference frames and X' moves in the x direction with velocity v +with respect to X, then the Lorentz transformation from X to X' is X' = BX, +where + + | γ -γβ 0 0| +B = |-γβ γ 0 0| + | 0 0 1 0| + | 0 0 0 1| + +is the matrix describing the Lorentz boost between X and X', +γ = 1 / √(1 - v²/c²) is the Lorentz factor, and β = v/c is the velocity as +a fraction of c. Reference: https://en.wikipedia.org/wiki/Lorentz_transformation """ -from __future__ import annotations from math import sqrt -import numpy as np # type: ignore -from sympy import symbols # type: ignore +import numpy as np +from sympy import symbols # Coefficient # Speed of light (m/s) @@ -41,79 +35,77 @@ # Symbols ct, x, y, z = symbols("ct x y z") -ct_p, x_p, y_p, z_p = symbols("ct' x' y' z'") # Vehicle's speed divided by speed of light (no units) def beta(velocity: float) -> float: """ + Calculates β = v/c, the given velocity as a fraction of c >>> beta(c) 1.0 - >>> beta(199792458) 0.666435904801848 - >>> beta(1e5) 0.00033356409519815205 - >>> beta(0.2) Traceback (most recent call last): ... - ValueError: Speed must be greater than 1! + ValueError: Speed must be greater than or equal to 1! """ if velocity > c: - raise ValueError("Speed must not exceed Light Speed 299,792,458 [m/s]!") - - # Usually the speed u should be much higher than 1 (c order of magnitude) + raise ValueError("Speed must not exceed light speed 299,792,458 [m/s]!") elif velocity < 1: - raise ValueError("Speed must be greater than 1!") + # Usually the speed should be much higher than 1 (c order of magnitude) + raise ValueError("Speed must be greater than or equal to 1!") + return velocity / c def gamma(velocity: float) -> float: """ + Calculate the Lorentz factor γ = 1 / √(1 - v²/c²) for a given velocity >>> gamma(4) 1.0000000000000002 - >>> gamma(1e5) 1.0000000556325075 - >>> gamma(3e7) 1.005044845777813 - >>> gamma(2.8e8) 2.7985595722318277 - >>> gamma(299792451) 4627.49902669495 - >>> gamma(0.3) Traceback (most recent call last): ... - ValueError: Speed must be greater than 1! - - >>> gamma(2*c) + ValueError: Speed must be greater than or equal to 1! + >>> gamma(2 * c) Traceback (most recent call last): ... - ValueError: Speed must not exceed Light Speed 299,792,458 [m/s]! + ValueError: Speed must not exceed light speed 299,792,458 [m/s]! """ - return 1 / (sqrt(1 - beta(velocity) ** 2)) + return 1 / sqrt(1 - beta(velocity) ** 2) -def transformation_matrix(velocity: float) -> np.array: +def transformation_matrix(velocity: float) -> np.ndarray: """ + Calculate the Lorentz transformation matrix for movement in the x direction: + + | γ -γβ 0 0| + |-γβ γ 0 0| + | 0 0 1 0| + | 0 0 0 1| + + where γ is the Lorentz factor and β is the velocity as a fraction of c >>> transformation_matrix(29979245) array([[ 1.00503781, -0.10050378, 0. , 0. ], [-0.10050378, 1.00503781, 0. , 0. ], [ 0. , 0. , 1. , 0. ], [ 0. , 0. , 0. , 1. ]]) - >>> transformation_matrix(19979245.2) array([[ 1.00222811, -0.06679208, 0. , 0. ], [-0.06679208, 1.00222811, 0. , 0. ], [ 0. , 0. , 1. , 0. ], [ 0. , 0. , 0. , 1. ]]) - >>> transformation_matrix(1) array([[ 1.00000000e+00, -3.33564095e-09, 0.00000000e+00, 0.00000000e+00], @@ -123,16 +115,14 @@ def transformation_matrix(velocity: float) -> np.array: 0.00000000e+00], [ 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 1.00000000e+00]]) - >>> transformation_matrix(0) Traceback (most recent call last): ... - ValueError: Speed must be greater than 1! - + ValueError: Speed must be greater than or equal to 1! >>> transformation_matrix(c * 1.5) Traceback (most recent call last): ... - ValueError: Speed must not exceed Light Speed 299,792,458 [m/s]! + ValueError: Speed must not exceed light speed 299,792,458 [m/s]! """ return np.array( [ @@ -144,44 +134,39 @@ def transformation_matrix(velocity: float) -> np.array: ) -def transform( - velocity: float, event: np.array = np.zeros(4), symbolic: bool = True # noqa: B008 -) -> np.array: +def transform(velocity: float, event: np.ndarray | None = None) -> np.ndarray: """ - >>> transform(29979245,np.array([1,2,3,4]), False) - array([ 3.01302757e+08, -3.01302729e+07, 3.00000000e+00, 4.00000000e+00]) + Calculate a Lorentz transformation for movement in the x direction given a + velocity and a four-vector for an inertial reference frame + If no four-vector is given, then calculate the transformation symbolically + with variables + >>> transform(29979245, np.array([1, 2, 3, 4])) + array([ 3.01302757e+08, -3.01302729e+07, 3.00000000e+00, 4.00000000e+00]) >>> transform(29979245) array([1.00503781498831*ct - 0.100503778816875*x, -0.100503778816875*ct + 1.00503781498831*x, 1.0*y, 1.0*z], dtype=object) - >>> transform(19879210.2) array([1.0022057787097*ct - 0.066456172618675*x, -0.066456172618675*ct + 1.0022057787097*x, 1.0*y, 1.0*z], dtype=object) - - >>> transform(299792459, np.array([1,1,1,1])) + >>> transform(299792459, np.array([1, 1, 1, 1])) Traceback (most recent call last): ... - ValueError: Speed must not exceed Light Speed 299,792,458 [m/s]! - - >>> transform(-1, np.array([1,1,1,1])) + ValueError: Speed must not exceed light speed 299,792,458 [m/s]! + >>> transform(-1, np.array([1, 1, 1, 1])) Traceback (most recent call last): ... - ValueError: Speed must be greater than 1! + ValueError: Speed must be greater than or equal to 1! """ - # Ensure event is not a vector of zeros - if not symbolic: - - # x0 is ct (speed of ligt * time) - event[0] = event[0] * c + # Ensure event is not empty + if event is None: + event = np.array([ct, x, y, z]) # Symbolic four vector else: + event[0] *= c # x0 is ct (speed of light * time) - # Symbolic four vector - event = np.array([ct, x, y, z]) - - return transformation_matrix(velocity).dot(event) + return transformation_matrix(velocity) @ event if __name__ == "__main__": @@ -197,9 +182,8 @@ def transform( print(f"y' = {four_vector[2]}") print(f"z' = {four_vector[3]}") - # Substitute symbols with numerical values: - values = np.array([1, 1, 1, 1]) - sub_dict = {ct: c * values[0], x: values[1], y: values[2], z: values[3]} - numerical_vector = [four_vector[i].subs(sub_dict) for i in range(0, 4)] + # Substitute symbols with numerical values + sub_dict = {ct: c, x: 1, y: 1, z: 1} + numerical_vector = [four_vector[i].subs(sub_dict) for i in range(4)] print(f"\n{numerical_vector}") From ed0a581f9347b8fddc1928e52232eea250108573 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 30 Jan 2023 23:42:15 +0100 Subject: [PATCH 0763/1543] [pre-commit.ci] pre-commit autoupdate (#8107) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * [pre-commit.ci] pre-commit autoupdate updates: - [github.com/PyCQA/isort: 5.11.4 → 5.12.0](https://github.com/PyCQA/isort/compare/5.11.4...5.12.0) * updating DIRECTORY.md --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- .pre-commit-config.yaml | 2 +- DIRECTORY.md | 3 +-- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 8eb6d297e831..b97ef288981b 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -20,7 +20,7 @@ repos: - id: black - repo: https://github.com/PyCQA/isort - rev: 5.11.4 + rev: 5.12.0 hooks: - id: isort args: diff --git a/DIRECTORY.md b/DIRECTORY.md index 31e86ea59b79..a8786cc2591f 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -543,8 +543,7 @@ * [Euler Modified](maths/euler_modified.py) * [Eulers Totient](maths/eulers_totient.py) * [Extended Euclidean Algorithm](maths/extended_euclidean_algorithm.py) - * [Factorial Iterative](maths/factorial_iterative.py) - * [Factorial Recursive](maths/factorial_recursive.py) + * [Factorial](maths/factorial.py) * [Factors](maths/factors.py) * [Fermat Little Theorem](maths/fermat_little_theorem.py) * [Fibonacci](maths/fibonacci.py) From c909da9b085957fcd16b6b30b6bdc0cf2855a150 Mon Sep 17 00:00:00 2001 From: Christian Clauss Date: Wed, 1 Feb 2023 14:14:54 +0100 Subject: [PATCH 0764/1543] pre-commit: Upgrade psf/black for stable style 2023 (#8110) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * pre-commit: Upgrade psf/black for stable style 2023 Updating https://github.com/psf/black ... updating 22.12.0 -> 23.1.0 for their `2023 stable style`. * https://github.com/psf/black/blob/main/CHANGES.md#2310 > This is the first [psf/black] release of 2023, and following our stability policy, it comes with a number of improvements to our stable style… Also, add https://github.com/tox-dev/pyproject-fmt and https://github.com/abravalheri/validate-pyproject to pre-commit. I only modified `.pre-commit-config.yaml` and all other files were modified by pre-commit.ci and psf/black. * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 12 +++++++++- arithmetic_analysis/newton_raphson_new.py | 1 - backtracking/n_queens_math.py | 1 - blockchain/chinese_remainder_theorem.py | 1 + ciphers/enigma_machine2.py | 1 - ciphers/playfair_cipher.py | 1 - ciphers/polybius.py | 1 - ciphers/xor_cipher.py | 2 -- compression/lz77.py | 1 - computer_vision/cnn_classification.py | 1 - computer_vision/harris_corner.py | 3 --- conversions/decimal_to_binary.py | 1 - conversions/molecular_chemistry.py | 1 - conversions/roman_numerals.py | 2 +- conversions/temperature_conversions.py | 1 - conversions/weight_conversion.py | 1 - .../binary_tree/binary_tree_traversals.py | 1 - .../inorder_tree_traversal_2022.py | 1 - data_structures/hashing/double_hash.py | 1 - data_structures/hashing/hash_table.py | 2 -- data_structures/heap/binomial_heap.py | 2 -- data_structures/heap/skew_heap.py | 1 - .../linked_list/doubly_linked_list_two.py | 2 -- data_structures/stacks/prefix_evaluation.py | 1 - .../stacks/stack_with_doubly_linked_list.py | 1 - data_structures/stacks/stock_span_problem.py | 2 -- .../filters/bilateral_filter.py | 1 - .../filters/local_binary_pattern.py | 1 - dynamic_programming/bitmask.py | 5 ---- .../iterating_through_submasks.py | 1 - electronics/coulombs_law.py | 1 - fractals/julia_sets.py | 1 - fractals/mandelbrot.py | 1 - geodesy/lamberts_ellipsoidal_distance.py | 1 - graphs/a_star.py | 1 - graphs/check_bipartite_graph_bfs.py | 1 - graphs/graph_matrix.py | 1 - graphs/karger.py | 1 - graphs/minimum_spanning_tree_boruvka.py | 1 - graphs/multi_heuristic_astar.py | 4 ++-- knapsack/recursive_approach_knapsack.py | 1 - linear_algebra/src/conjugate_gradient.py | 1 - machine_learning/k_means_clust.py | 3 --- machine_learning/self_organizing_map.py | 1 - .../sequential_minimum_optimization.py | 2 -- machine_learning/xgboost_classifier.py | 1 - maths/armstrong_numbers.py | 2 +- maths/binary_exponentiation.py | 1 - maths/combinations.py | 1 - maths/decimal_isolate.py | 1 - maths/fermat_little_theorem.py | 1 - maths/greedy_coin_change.py | 2 -- maths/integration_by_simpson_approx.py | 1 - maths/jaccard_similarity.py | 2 -- maths/least_common_multiple.py | 1 - maths/line_length.py | 2 -- maths/monte_carlo.py | 1 + maths/newton_raphson.py | 1 - maths/numerical_integration.py | 2 -- maths/primelib.py | 23 ------------------- maths/segmented_sieve.py | 1 - maths/two_pointer.py | 1 - maths/zellers_congruence.py | 1 - matrix/largest_square_area_in_matrix.py | 3 --- other/activity_selection.py | 1 - other/nested_brackets.py | 2 -- other/scoring_algorithm.py | 1 - other/sdes.py | 1 - physics/casimir_effect.py | 1 - physics/hubble_parameter.py | 1 - physics/newtons_law_of_gravitation.py | 1 - project_euler/problem_004/sol1.py | 2 -- project_euler/problem_074/sol2.py | 1 - project_euler/problem_089/sol1.py | 1 - project_euler/problem_092/sol1.py | 2 -- quantum/q_fourier_transform.py | 1 - quantum/quantum_teleportation.py | 1 - scheduling/highest_response_ratio_next.py | 2 -- searches/binary_search.py | 1 - searches/interpolation_search.py | 1 - searches/tabu_search.py | 1 - searches/ternary_search.py | 1 - sorts/comb_sort.py | 1 - sorts/odd_even_sort.py | 1 - sorts/odd_even_transposition_parallel.py | 1 - sorts/random_normal_distribution_quicksort.py | 2 -- sorts/shrink_shell_sort.py | 1 - sorts/stooge_sort.py | 1 - sorts/tim_sort.py | 1 - strings/dna.py | 1 - strings/hamming_distance.py | 1 - strings/levenshtein_distance.py | 2 -- strings/prefix_function.py | 1 - strings/text_justification.py | 1 - web_programming/fetch_anime_and_play.py | 9 ++------ web_programming/fetch_well_rx_price.py | 5 ---- web_programming/get_user_tweets.py | 1 - 97 files changed, 19 insertions(+), 154 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index b97ef288981b..f8d1a65db27b 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -15,7 +15,7 @@ repos: - id: auto-walrus - repo: https://github.com/psf/black - rev: 22.12.0 + rev: 23.1.0 hooks: - id: black @@ -26,6 +26,16 @@ repos: args: - --profile=black + - repo: https://github.com/tox-dev/pyproject-fmt + rev: "0.6.0" + hooks: + - id: pyproject-fmt + + - repo: https://github.com/abravalheri/validate-pyproject + rev: v0.12.1 + hooks: + - id: validate-pyproject + - repo: https://github.com/asottile/pyupgrade rev: v3.3.1 hooks: diff --git a/arithmetic_analysis/newton_raphson_new.py b/arithmetic_analysis/newton_raphson_new.py index dd1d7e0929cf..472cb5b5ac54 100644 --- a/arithmetic_analysis/newton_raphson_new.py +++ b/arithmetic_analysis/newton_raphson_new.py @@ -59,7 +59,6 @@ def newton_raphson( # Let's Execute if __name__ == "__main__": - # Find root of trigonometric function # Find value of pi print(f"The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}") diff --git a/backtracking/n_queens_math.py b/backtracking/n_queens_math.py index 2de784ded06b..23bd1590618b 100644 --- a/backtracking/n_queens_math.py +++ b/backtracking/n_queens_math.py @@ -107,7 +107,6 @@ def depth_first_search( # We iterate each column in the row to find all possible results in each row for col in range(n): - # We apply that we learned previously. First we check that in the current board # (possible_board) there are not other same value because if there is it means # that there are a collision in vertical. Then we apply the two formulas we diff --git a/blockchain/chinese_remainder_theorem.py b/blockchain/chinese_remainder_theorem.py index 54d861dd9f10..d3e75e77922a 100644 --- a/blockchain/chinese_remainder_theorem.py +++ b/blockchain/chinese_remainder_theorem.py @@ -53,6 +53,7 @@ def chinese_remainder_theorem(n1: int, r1: int, n2: int, r2: int) -> int: # ----------SAME SOLUTION USING InvertModulo instead ExtendedEuclid---------------- + # This function find the inverses of a i.e., a^(-1) def invert_modulo(a: int, n: int) -> int: """ diff --git a/ciphers/enigma_machine2.py b/ciphers/enigma_machine2.py index a877256ebeeb..07d21893f192 100644 --- a/ciphers/enigma_machine2.py +++ b/ciphers/enigma_machine2.py @@ -230,7 +230,6 @@ def enigma( # encryption/decryption process -------------------------- for symbol in text: if symbol in abc: - # 1st plugboard -------------------------- if symbol in plugboard: symbol = plugboard[symbol] diff --git a/ciphers/playfair_cipher.py b/ciphers/playfair_cipher.py index 89aedb7afdb8..7279fb23ecb2 100644 --- a/ciphers/playfair_cipher.py +++ b/ciphers/playfair_cipher.py @@ -39,7 +39,6 @@ def prepare_input(dirty: str) -> str: def generate_table(key: str) -> list[str]: - # I and J are used interchangeably to allow # us to use a 5x5 table (25 letters) alphabet = "ABCDEFGHIKLMNOPQRSTUVWXYZ" diff --git a/ciphers/polybius.py b/ciphers/polybius.py index c81c1d39533f..3539ab70c303 100644 --- a/ciphers/polybius.py +++ b/ciphers/polybius.py @@ -19,7 +19,6 @@ class PolybiusCipher: def __init__(self) -> None: - self.SQUARE = np.array(SQUARE) def letter_to_numbers(self, letter: str) -> np.ndarray: diff --git a/ciphers/xor_cipher.py b/ciphers/xor_cipher.py index ca9dfe20f7b6..379ef0ef7e50 100644 --- a/ciphers/xor_cipher.py +++ b/ciphers/xor_cipher.py @@ -130,7 +130,6 @@ def encrypt_file(self, file: str, key: int = 0) -> bool: try: with open(file) as fin: with open("encrypt.out", "w+") as fout: - # actual encrypt-process for line in fin: fout.write(self.encrypt_string(line, key)) @@ -155,7 +154,6 @@ def decrypt_file(self, file: str, key: int) -> bool: try: with open(file) as fin: with open("decrypt.out", "w+") as fout: - # actual encrypt-process for line in fin: fout.write(self.decrypt_string(line, key)) diff --git a/compression/lz77.py b/compression/lz77.py index 7c1a6f6a4c19..1b201c59f186 100644 --- a/compression/lz77.py +++ b/compression/lz77.py @@ -89,7 +89,6 @@ def compress(self, text: str) -> list[Token]: # while there are still characters in text to compress while text: - # find the next encoding phrase # - triplet with offset, length, indicator (the next encoding character) token = self._find_encoding_token(text, search_buffer) diff --git a/computer_vision/cnn_classification.py b/computer_vision/cnn_classification.py index 59e4556e069b..1c193fcbb50b 100644 --- a/computer_vision/cnn_classification.py +++ b/computer_vision/cnn_classification.py @@ -28,7 +28,6 @@ from tensorflow.keras import layers, models if __name__ == "__main__": - # Initialising the CNN # (Sequential- Building the model layer by layer) classifier = models.Sequential() diff --git a/computer_vision/harris_corner.py b/computer_vision/harris_corner.py index c8905bb6a9cd..0cc7522bc3af 100644 --- a/computer_vision/harris_corner.py +++ b/computer_vision/harris_corner.py @@ -9,7 +9,6 @@ class HarrisCorner: def __init__(self, k: float, window_size: int): - """ k : is an empirically determined constant in [0.04,0.06] window_size : neighbourhoods considered @@ -25,7 +24,6 @@ def __str__(self) -> str: return str(self.k) def detect(self, img_path: str) -> tuple[cv2.Mat, list[list[int]]]: - """ Returns the image with corners identified img_path : path of the image @@ -68,7 +66,6 @@ def detect(self, img_path: str) -> tuple[cv2.Mat, list[list[int]]]: if __name__ == "__main__": - edge_detect = HarrisCorner(0.04, 3) color_img, _ = edge_detect.detect("path_to_image") cv2.imwrite("detect.png", color_img) diff --git a/conversions/decimal_to_binary.py b/conversions/decimal_to_binary.py index cfda57ca714a..973c47c8af67 100644 --- a/conversions/decimal_to_binary.py +++ b/conversions/decimal_to_binary.py @@ -2,7 +2,6 @@ def decimal_to_binary(num: int) -> str: - """ Convert an Integer Decimal Number to a Binary Number as str. >>> decimal_to_binary(0) diff --git a/conversions/molecular_chemistry.py b/conversions/molecular_chemistry.py index 0024eb5cb5b8..51ffe534dd0d 100644 --- a/conversions/molecular_chemistry.py +++ b/conversions/molecular_chemistry.py @@ -86,7 +86,6 @@ def pressure_and_volume_to_temperature( if __name__ == "__main__": - import doctest doctest.testmod() diff --git a/conversions/roman_numerals.py b/conversions/roman_numerals.py index 61215a0c0730..75af2ac72882 100644 --- a/conversions/roman_numerals.py +++ b/conversions/roman_numerals.py @@ -47,7 +47,7 @@ def int_to_roman(number: int) -> str: True """ result = [] - for (arabic, roman) in ROMAN: + for arabic, roman in ROMAN: (factor, number) = divmod(number, arabic) result.append(roman * factor) if number == 0: diff --git a/conversions/temperature_conversions.py b/conversions/temperature_conversions.py index e5af465561f9..f7af6c8f1e2b 100644 --- a/conversions/temperature_conversions.py +++ b/conversions/temperature_conversions.py @@ -380,7 +380,6 @@ def reaumur_to_rankine(reaumur: float, ndigits: int = 2) -> float: if __name__ == "__main__": - import doctest doctest.testmod() diff --git a/conversions/weight_conversion.py b/conversions/weight_conversion.py index 18c4037317da..5c032a497a7b 100644 --- a/conversions/weight_conversion.py +++ b/conversions/weight_conversion.py @@ -307,7 +307,6 @@ def weight_conversion(from_type: str, to_type: str, value: float) -> float: if __name__ == "__main__": - import doctest doctest.testmod() diff --git a/data_structures/binary_tree/binary_tree_traversals.py b/data_structures/binary_tree/binary_tree_traversals.py index 54b1dc536f32..24dd1bd8cdc8 100644 --- a/data_structures/binary_tree/binary_tree_traversals.py +++ b/data_structures/binary_tree/binary_tree_traversals.py @@ -105,7 +105,6 @@ def populate_output(root: Node | None, level: int) -> None: if not root: return if level == 1: - output.append(root.data) elif level > 1: populate_output(root.left, level - 1) diff --git a/data_structures/binary_tree/inorder_tree_traversal_2022.py b/data_structures/binary_tree/inorder_tree_traversal_2022.py index 08001738f53d..e94ba7013a82 100644 --- a/data_structures/binary_tree/inorder_tree_traversal_2022.py +++ b/data_structures/binary_tree/inorder_tree_traversal_2022.py @@ -58,7 +58,6 @@ def inorder(node: None | BinaryTreeNode) -> list[int]: # if node is None,return def make_tree() -> BinaryTreeNode | None: - root = insert(None, 15) insert(root, 10) insert(root, 25) diff --git a/data_structures/hashing/double_hash.py b/data_structures/hashing/double_hash.py index 453e0d13106d..be21e74cadd0 100644 --- a/data_structures/hashing/double_hash.py +++ b/data_structures/hashing/double_hash.py @@ -24,7 +24,6 @@ def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) def __hash_function_2(self, value, data): - next_prime_gt = ( next_prime(value % self.size_table) if not is_prime(value % self.size_table) diff --git a/data_structures/hashing/hash_table.py b/data_structures/hashing/hash_table.py index 607454c8255f..7ca2f7c401cf 100644 --- a/data_structures/hashing/hash_table.py +++ b/data_structures/hashing/hash_table.py @@ -32,7 +32,6 @@ def hash_function(self, key): return key % self.size_table def _step_by_step(self, step_ord): - print(f"step {step_ord}") print(list(range(len(self.values)))) print(self.values) @@ -53,7 +52,6 @@ def _collision_resolution(self, key, data=None): new_key = self.hash_function(key + 1) while self.values[new_key] is not None and self.values[new_key] != key: - if self.values.count(None) > 0: new_key = self.hash_function(new_key + 1) else: diff --git a/data_structures/heap/binomial_heap.py b/data_structures/heap/binomial_heap.py index d79fac7a99d5..2e05c5c80a22 100644 --- a/data_structures/heap/binomial_heap.py +++ b/data_structures/heap/binomial_heap.py @@ -174,7 +174,6 @@ def merge_heaps(self, other): i.left_tree_size == i.parent.left_tree_size and i.left_tree_size != i.parent.parent.left_tree_size ): - # Neighbouring Nodes previous_node = i.left next_node = i.parent.parent @@ -233,7 +232,6 @@ def insert(self, val): and self.bottom_root.left_tree_size == self.bottom_root.parent.left_tree_size ): - # Next node next_node = self.bottom_root.parent.parent diff --git a/data_structures/heap/skew_heap.py b/data_structures/heap/skew_heap.py index 490db061deac..c4c13b08276a 100644 --- a/data_structures/heap/skew_heap.py +++ b/data_structures/heap/skew_heap.py @@ -71,7 +71,6 @@ class SkewHeap(Generic[T]): """ def __init__(self, data: Iterable[T] | None = ()) -> None: - """ >>> sh = SkewHeap([3, 1, 3, 7]) >>> list(sh) diff --git a/data_structures/linked_list/doubly_linked_list_two.py b/data_structures/linked_list/doubly_linked_list_two.py index 94b916a623f6..c19309c9f5a7 100644 --- a/data_structures/linked_list/doubly_linked_list_two.py +++ b/data_structures/linked_list/doubly_linked_list_two.py @@ -80,7 +80,6 @@ def get_tail_data(self): return None def set_head(self, node: Node) -> None: - if self.head is None: self.head = node self.tail = node @@ -143,7 +142,6 @@ def get_node(self, item: int) -> Node: raise Exception("Node not found") def delete_value(self, value): - if (node := self.get_node(value)) is not None: if node == self.head: self.head = self.head.get_next() diff --git a/data_structures/stacks/prefix_evaluation.py b/data_structures/stacks/prefix_evaluation.py index 00df2c1e63b0..f48eca23d7b5 100644 --- a/data_structures/stacks/prefix_evaluation.py +++ b/data_structures/stacks/prefix_evaluation.py @@ -36,7 +36,6 @@ def evaluate(expression): # iterate over the string in reverse order for c in expression.split()[::-1]: - # push operand to stack if is_operand(c): stack.append(int(c)) diff --git a/data_structures/stacks/stack_with_doubly_linked_list.py b/data_structures/stacks/stack_with_doubly_linked_list.py index a129665f209f..50c5236e073c 100644 --- a/data_structures/stacks/stack_with_doubly_linked_list.py +++ b/data_structures/stacks/stack_with_doubly_linked_list.py @@ -92,7 +92,6 @@ def print_stack(self) -> None: # Code execution starts here if __name__ == "__main__": - # Start with the empty stack stack: Stack[int] = Stack() diff --git a/data_structures/stacks/stock_span_problem.py b/data_structures/stacks/stock_span_problem.py index 19a81bd368de..de423c1ebf66 100644 --- a/data_structures/stacks/stock_span_problem.py +++ b/data_structures/stacks/stock_span_problem.py @@ -9,7 +9,6 @@ def calculation_span(price, s): - n = len(price) # Create a stack and push index of fist element to it st = [] @@ -20,7 +19,6 @@ def calculation_span(price, s): # Calculate span values for rest of the elements for i in range(1, n): - # Pop elements from stack while stack is not # empty and top of stack is smaller than price[i] while len(st) > 0 and price[st[0]] <= price[i]: diff --git a/digital_image_processing/filters/bilateral_filter.py b/digital_image_processing/filters/bilateral_filter.py index 1afa01d3fc1a..565da73f6b0e 100644 --- a/digital_image_processing/filters/bilateral_filter.py +++ b/digital_image_processing/filters/bilateral_filter.py @@ -50,7 +50,6 @@ def bilateral_filter( size_x, size_y = img.shape for i in range(kernel_size // 2, size_x - kernel_size // 2): for j in range(kernel_size // 2, size_y - kernel_size // 2): - img_s = get_slice(img, i, j, kernel_size) img_i = img_s - img_s[kernel_size // 2, kernel_size // 2] img_ig = vec_gaussian(img_i, intensity_variance) diff --git a/digital_image_processing/filters/local_binary_pattern.py b/digital_image_processing/filters/local_binary_pattern.py index e92e554a3e5f..907fe2cb0555 100644 --- a/digital_image_processing/filters/local_binary_pattern.py +++ b/digital_image_processing/filters/local_binary_pattern.py @@ -61,7 +61,6 @@ def local_binary_value(image: np.ndarray, x_coordinate: int, y_coordinate: int) if __name__ == "__main__": - # Reading the image and converting it to grayscale. image = cv2.imread( "digital_image_processing/image_data/lena.jpg", cv2.IMREAD_GRAYSCALE diff --git a/dynamic_programming/bitmask.py b/dynamic_programming/bitmask.py index f45250c9cb84..56bb8e96ba02 100644 --- a/dynamic_programming/bitmask.py +++ b/dynamic_programming/bitmask.py @@ -13,7 +13,6 @@ class AssignmentUsingBitmask: def __init__(self, task_performed, total): - self.total_tasks = total # total no of tasks (N) # DP table will have a dimension of (2^M)*N @@ -29,7 +28,6 @@ def __init__(self, task_performed, total): self.final_mask = (1 << len(task_performed)) - 1 def count_ways_until(self, mask, task_no): - # if mask == self.finalmask all persons are distributed tasks, return 1 if mask == self.final_mask: return 1 @@ -49,7 +47,6 @@ def count_ways_until(self, mask, task_no): # assign for the remaining tasks. if task_no in self.task: for p in self.task[task_no]: - # if p is already given a task if mask & (1 << p): continue @@ -64,7 +61,6 @@ def count_ways_until(self, mask, task_no): return self.dp[mask][task_no] def count_no_of_ways(self, task_performed): - # Store the list of persons for each task for i in range(len(task_performed)): for j in task_performed[i]: @@ -75,7 +71,6 @@ def count_no_of_ways(self, task_performed): if __name__ == "__main__": - total_tasks = 5 # total no of tasks (the value of N) # the list of tasks that can be done by M persons. diff --git a/dynamic_programming/iterating_through_submasks.py b/dynamic_programming/iterating_through_submasks.py index 21c64dba4ecc..4d0a250e8dfe 100644 --- a/dynamic_programming/iterating_through_submasks.py +++ b/dynamic_programming/iterating_through_submasks.py @@ -9,7 +9,6 @@ def list_of_submasks(mask: int) -> list[int]: - """ Args: mask : number which shows mask ( always integer > 0, zero does not have any diff --git a/electronics/coulombs_law.py b/electronics/coulombs_law.py index e41c0410cc9e..18c1a8179eb6 100644 --- a/electronics/coulombs_law.py +++ b/electronics/coulombs_law.py @@ -8,7 +8,6 @@ def couloumbs_law( force: float, charge1: float, charge2: float, distance: float ) -> dict[str, float]: - """ Apply Coulomb's Law on any three given values. These can be force, charge1, charge2, or distance, and then in a Python dict return name/value pair of diff --git a/fractals/julia_sets.py b/fractals/julia_sets.py index 77d1d7c042ba..482e1eddfecc 100644 --- a/fractals/julia_sets.py +++ b/fractals/julia_sets.py @@ -170,7 +170,6 @@ def ignore_overflow_warnings() -> None: if __name__ == "__main__": - z_0 = prepare_grid(window_size, nb_pixels) ignore_overflow_warnings() # See file header for explanations diff --git a/fractals/mandelbrot.py b/fractals/mandelbrot.py index f97bcd17031c..84dbda997562 100644 --- a/fractals/mandelbrot.py +++ b/fractals/mandelbrot.py @@ -114,7 +114,6 @@ def get_image( # loop through the image-coordinates for image_x in range(image_width): for image_y in range(image_height): - # determine the figure-coordinates based on the image-coordinates figure_height = figure_width / image_width * image_height figure_x = figure_center_x + (image_x / image_width - 0.5) * figure_width diff --git a/geodesy/lamberts_ellipsoidal_distance.py b/geodesy/lamberts_ellipsoidal_distance.py index 62ce59bb476f..4805674e51ab 100644 --- a/geodesy/lamberts_ellipsoidal_distance.py +++ b/geodesy/lamberts_ellipsoidal_distance.py @@ -10,7 +10,6 @@ def lamberts_ellipsoidal_distance( lat1: float, lon1: float, lat2: float, lon2: float ) -> float: - """ Calculate the shortest distance along the surface of an ellipsoid between two points on the surface of earth given longitudes and latitudes diff --git a/graphs/a_star.py b/graphs/a_star.py index 793ba3bda6b2..e8735179eab9 100644 --- a/graphs/a_star.py +++ b/graphs/a_star.py @@ -16,7 +16,6 @@ def search( cost: int, heuristic: list[list[int]], ) -> tuple[list[list[int]], list[list[int]]]: - closed = [ [0 for col in range(len(grid[0]))] for row in range(len(grid)) ] # the reference grid diff --git a/graphs/check_bipartite_graph_bfs.py b/graphs/check_bipartite_graph_bfs.py index 552b7eee283d..7fc57cbc78bd 100644 --- a/graphs/check_bipartite_graph_bfs.py +++ b/graphs/check_bipartite_graph_bfs.py @@ -20,7 +20,6 @@ def bfs(): visited[u] = True for neighbour in graph[u]: - if neighbour == u: return False diff --git a/graphs/graph_matrix.py b/graphs/graph_matrix.py index 987168426ba5..4adc6c0bb93b 100644 --- a/graphs/graph_matrix.py +++ b/graphs/graph_matrix.py @@ -8,7 +8,6 @@ def add_edge(self, u, v): self.graph[v - 1][u - 1] = 1 def show(self): - for i in self.graph: for j in i: print(j, end=" ") diff --git a/graphs/karger.py b/graphs/karger.py index f72128c8178a..3ef65c0d6d32 100644 --- a/graphs/karger.py +++ b/graphs/karger.py @@ -47,7 +47,6 @@ def partition_graph(graph: dict[str, list[str]]) -> set[tuple[str, str]]: graph_copy = {node: graph[node][:] for node in graph} while len(graph_copy) > 2: - # Choose a random edge. u = random.choice(list(graph_copy.keys())) v = random.choice(graph_copy[u]) diff --git a/graphs/minimum_spanning_tree_boruvka.py b/graphs/minimum_spanning_tree_boruvka.py index 6c72615cc729..663d8e26cfad 100644 --- a/graphs/minimum_spanning_tree_boruvka.py +++ b/graphs/minimum_spanning_tree_boruvka.py @@ -4,7 +4,6 @@ class Graph: """ def __init__(self): - self.num_vertices = 0 self.num_edges = 0 self.adjacency = {} diff --git a/graphs/multi_heuristic_astar.py b/graphs/multi_heuristic_astar.py index cd8e37b0099b..0a18ede6ed41 100644 --- a/graphs/multi_heuristic_astar.py +++ b/graphs/multi_heuristic_astar.py @@ -33,7 +33,7 @@ def put(self, item, priority): temp.append((pri, x)) (pri, x) = heapq.heappop(self.elements) temp.append((priority, item)) - for (pro, xxx) in temp: + for pro, xxx in temp: heapq.heappush(self.elements, (pro, xxx)) def remove_element(self, item): @@ -44,7 +44,7 @@ def remove_element(self, item): while x != item: temp.append((pro, x)) (pro, x) = heapq.heappop(self.elements) - for (prito, yyy) in temp: + for prito, yyy in temp: heapq.heappush(self.elements, (prito, yyy)) def top_show(self): diff --git a/knapsack/recursive_approach_knapsack.py b/knapsack/recursive_approach_knapsack.py index d813981cb79c..9a8ed1886a5b 100644 --- a/knapsack/recursive_approach_knapsack.py +++ b/knapsack/recursive_approach_knapsack.py @@ -46,7 +46,6 @@ def knapsack( if __name__ == "__main__": - import doctest doctest.testmod() diff --git a/linear_algebra/src/conjugate_gradient.py b/linear_algebra/src/conjugate_gradient.py index 418ae88a5f41..4cf566ec9e36 100644 --- a/linear_algebra/src/conjugate_gradient.py +++ b/linear_algebra/src/conjugate_gradient.py @@ -115,7 +115,6 @@ def conjugate_gradient( iterations = 0 while error > tol: - # Save this value so we only calculate the matrix-vector product once. w = np.dot(spd_matrix, p0) diff --git a/machine_learning/k_means_clust.py b/machine_learning/k_means_clust.py index 5dc2b7118b56..b6305469ed7d 100644 --- a/machine_learning/k_means_clust.py +++ b/machine_learning/k_means_clust.py @@ -74,7 +74,6 @@ def centroid_pairwise_dist(x, centroids): def assign_clusters(data, centroids): - # Compute distances between each data point and the set of centroids: # Fill in the blank (RHS only) distances_from_centroids = centroid_pairwise_dist(data, centroids) @@ -100,10 +99,8 @@ def revise_centroids(data, k, cluster_assignment): def compute_heterogeneity(data, k, centroids, cluster_assignment): - heterogeneity = 0.0 for i in range(k): - # Select all data points that belong to cluster i. Fill in the blank (RHS only) member_data_points = data[cluster_assignment == i, :] diff --git a/machine_learning/self_organizing_map.py b/machine_learning/self_organizing_map.py index 057c2a76b8ac..32fdf1d2b41d 100644 --- a/machine_learning/self_organizing_map.py +++ b/machine_learning/self_organizing_map.py @@ -49,7 +49,6 @@ def main() -> None: for _ in range(epochs): for j in range(len(training_samples)): - # training sample sample = training_samples[j] diff --git a/machine_learning/sequential_minimum_optimization.py b/machine_learning/sequential_minimum_optimization.py index f5185e1d9576..9c45c351272f 100644 --- a/machine_learning/sequential_minimum_optimization.py +++ b/machine_learning/sequential_minimum_optimization.py @@ -82,7 +82,6 @@ def fit(self): k = self._k state = None while True: - # 1: Find alpha1, alpha2 try: i1, i2 = self.choose_alpha.send(state) @@ -146,7 +145,6 @@ def fit(self): # Predict test samples def predict(self, test_samples, classify=True): - if test_samples.shape[1] > self.samples.shape[1]: raise ValueError( "Test samples' feature length does not equal to that of train samples" diff --git a/machine_learning/xgboost_classifier.py b/machine_learning/xgboost_classifier.py index 08967f1715a1..1da933cf690f 100644 --- a/machine_learning/xgboost_classifier.py +++ b/machine_learning/xgboost_classifier.py @@ -41,7 +41,6 @@ def xgboost(features: np.ndarray, target: np.ndarray) -> XGBClassifier: def main() -> None: - """ >>> main() diff --git a/maths/armstrong_numbers.py b/maths/armstrong_numbers.py index f62991b7415b..26709b428b78 100644 --- a/maths/armstrong_numbers.py +++ b/maths/armstrong_numbers.py @@ -62,7 +62,7 @@ def pluperfect_number(n: int) -> bool: digit_histogram[rem] += 1 digit_total += 1 - for (cnt, i) in zip(digit_histogram, range(len(digit_histogram))): + for cnt, i in zip(digit_histogram, range(len(digit_histogram))): total += cnt * i**digit_total return n == total diff --git a/maths/binary_exponentiation.py b/maths/binary_exponentiation.py index 8dda5245cf44..147b4285ffa1 100644 --- a/maths/binary_exponentiation.py +++ b/maths/binary_exponentiation.py @@ -5,7 +5,6 @@ def binary_exponentiation(a, n): - if n == 0: return 1 diff --git a/maths/combinations.py b/maths/combinations.py index 6db1d773faa6..a2324012c01f 100644 --- a/maths/combinations.py +++ b/maths/combinations.py @@ -39,7 +39,6 @@ def combinations(n: int, k: int) -> int: if __name__ == "__main__": - print( "The number of five-card hands possible from a standard", f"fifty-two card deck is: {combinations(52, 5)}\n", diff --git a/maths/decimal_isolate.py b/maths/decimal_isolate.py index cdf43ea5d0ef..058ed1bb90d1 100644 --- a/maths/decimal_isolate.py +++ b/maths/decimal_isolate.py @@ -5,7 +5,6 @@ def decimal_isolate(number: float, digit_amount: int) -> float: - """ Isolates the decimal part of a number. If digitAmount > 0 round to that decimal place, else print the entire decimal. diff --git a/maths/fermat_little_theorem.py b/maths/fermat_little_theorem.py index 73af3e28c618..eea03be245cb 100644 --- a/maths/fermat_little_theorem.py +++ b/maths/fermat_little_theorem.py @@ -6,7 +6,6 @@ def binary_exponentiation(a, n, mod): - if n == 0: return 1 diff --git a/maths/greedy_coin_change.py b/maths/greedy_coin_change.py index 29c2f1803d5c..7cf669bcb8cb 100644 --- a/maths/greedy_coin_change.py +++ b/maths/greedy_coin_change.py @@ -62,7 +62,6 @@ def find_minimum_change(denominations: list[int], value: str) -> list[int]: # Traverse through all denomination for denomination in reversed(denominations): - # Find denominations while int(total_value) >= int(denomination): total_value -= int(denomination) @@ -73,7 +72,6 @@ def find_minimum_change(denominations: list[int], value: str) -> list[int]: # Driver Code if __name__ == "__main__": - denominations = [] value = "0" diff --git a/maths/integration_by_simpson_approx.py b/maths/integration_by_simpson_approx.py index 408041de93f1..f77ae76135ee 100644 --- a/maths/integration_by_simpson_approx.py +++ b/maths/integration_by_simpson_approx.py @@ -35,7 +35,6 @@ def f(x: float) -> float: def simpson_integration(function, a: float, b: float, precision: int = 4) -> float: - """ Args: function : the function which's integration is desired diff --git a/maths/jaccard_similarity.py b/maths/jaccard_similarity.py index b299a81476ab..eab25188b2fd 100644 --- a/maths/jaccard_similarity.py +++ b/maths/jaccard_similarity.py @@ -51,7 +51,6 @@ def jaccard_similarity(set_a, set_b, alternative_union=False): """ if isinstance(set_a, set) and isinstance(set_b, set): - intersection = len(set_a.intersection(set_b)) if alternative_union: @@ -62,7 +61,6 @@ def jaccard_similarity(set_a, set_b, alternative_union=False): return intersection / union if isinstance(set_a, (list, tuple)) and isinstance(set_b, (list, tuple)): - intersection = [element for element in set_a if element in set_b] if alternative_union: diff --git a/maths/least_common_multiple.py b/maths/least_common_multiple.py index 0d087643e869..621d93720c41 100644 --- a/maths/least_common_multiple.py +++ b/maths/least_common_multiple.py @@ -67,7 +67,6 @@ def benchmark(): class TestLeastCommonMultiple(unittest.TestCase): - test_inputs = [ (10, 20), (13, 15), diff --git a/maths/line_length.py b/maths/line_length.py index ea27ee904a24..b810f2d9ad1f 100644 --- a/maths/line_length.py +++ b/maths/line_length.py @@ -10,7 +10,6 @@ def line_length( x_end: int | float, steps: int = 100, ) -> float: - """ Approximates the arc length of a line segment by treating the curve as a sequence of linear lines and summing their lengths @@ -41,7 +40,6 @@ def line_length( length = 0.0 for _ in range(steps): - # Approximates curve as a sequence of linear lines and sums their length x2 = (x_end - x_start) / steps + x1 fx2 = fnc(x2) diff --git a/maths/monte_carlo.py b/maths/monte_carlo.py index c13b8d0a4f6b..474f1f65deb4 100644 --- a/maths/monte_carlo.py +++ b/maths/monte_carlo.py @@ -18,6 +18,7 @@ def pi_estimator(iterations: int): 5. Multiply this value by 4 to get your estimate of pi. 6. Print the estimated and numpy value of pi """ + # A local function to see if a dot lands in the circle. def is_in_circle(x: float, y: float) -> bool: distance_from_centre = sqrt((x**2) + (y**2)) diff --git a/maths/newton_raphson.py b/maths/newton_raphson.py index f2b7cb9766d2..2c9cd1de95b0 100644 --- a/maths/newton_raphson.py +++ b/maths/newton_raphson.py @@ -19,7 +19,6 @@ def calc_derivative(f, a, h=0.001): def newton_raphson(f, x0=0, maxiter=100, step=0.0001, maxerror=1e-6, logsteps=False): - a = x0 # set the initial guess steps = [a] error = abs(f(a)) diff --git a/maths/numerical_integration.py b/maths/numerical_integration.py index 8f32fd3564df..f2d65f89e390 100644 --- a/maths/numerical_integration.py +++ b/maths/numerical_integration.py @@ -12,7 +12,6 @@ def trapezoidal_area( x_end: int | float, steps: int = 100, ) -> float: - """ Treats curve as a collection of linear lines and sums the area of the trapezium shape they form @@ -40,7 +39,6 @@ def trapezoidal_area( area = 0.0 for _ in range(steps): - # Approximates small segments of curve as linear and solve # for trapezoidal area x2 = (x_end - x_start) / steps + x1 diff --git a/maths/primelib.py b/maths/primelib.py index 9586227ea3ca..81d5737063f0 100644 --- a/maths/primelib.py +++ b/maths/primelib.py @@ -59,7 +59,6 @@ def is_prime(number: int) -> bool: status = False for divisor in range(2, int(round(sqrt(number))) + 1): - # if 'number' divisible by 'divisor' then sets 'status' # of false and break up the loop. if number % divisor == 0: @@ -95,9 +94,7 @@ def sieve_er(n): # actual sieve of erathostenes for i in range(len(begin_list)): - for j in range(i + 1, len(begin_list)): - if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0): begin_list[j] = 0 @@ -128,9 +125,7 @@ def get_prime_numbers(n): # iterates over all numbers between 2 up to N+1 # if a number is prime then appends to list 'ans' for number in range(2, n + 1): - if is_prime(number): - ans.append(number) # precondition @@ -160,14 +155,11 @@ def prime_factorization(number): quotient = number if number == 0 or number == 1: - ans.append(number) # if 'number' not prime then builds the prime factorization of 'number' elif not is_prime(number): - while quotient != 1: - if is_prime(factor) and (quotient % factor == 0): ans.append(factor) quotient /= factor @@ -298,11 +290,9 @@ def goldbach(number): loop = True while i < len_pn and loop: - j = i + 1 while j < len_pn and loop: - if prime_numbers[i] + prime_numbers[j] == number: loop = False ans.append(prime_numbers[i]) @@ -345,7 +335,6 @@ def gcd(number1, number2): rest = 0 while number2 != 0: - rest = number1 % number2 number1 = number2 number2 = rest @@ -380,13 +369,11 @@ def kg_v(number1, number2): # for kgV (x,1) if number1 > 1 and number2 > 1: - # builds the prime factorization of 'number1' and 'number2' prime_fac_1 = prime_factorization(number1) prime_fac_2 = prime_factorization(number2) elif number1 == 1 or number2 == 1: - prime_fac_1 = [] prime_fac_2 = [] ans = max(number1, number2) @@ -398,11 +385,8 @@ def kg_v(number1, number2): # iterates through primeFac1 for n in prime_fac_1: - if n not in done: - if n in prime_fac_2: - count1 = prime_fac_1.count(n) count2 = prime_fac_2.count(n) @@ -410,7 +394,6 @@ def kg_v(number1, number2): ans *= n else: - count1 = prime_fac_1.count(n) for _ in range(count1): @@ -420,9 +403,7 @@ def kg_v(number1, number2): # iterates through primeFac2 for n in prime_fac_2: - if n not in done: - count2 = prime_fac_2.count(n) for _ in range(count2): @@ -455,7 +436,6 @@ def get_prime(n): ans = 2 # this variable holds the answer while index < n: - index += 1 ans += 1 # counts to the next number @@ -499,7 +479,6 @@ def get_primes_between(p_number_1, p_number_2): number += 1 while number < p_number_2: - ans.append(number) number += 1 @@ -534,7 +513,6 @@ def get_divisors(n): ans = [] # will be returned. for divisor in range(1, n + 1): - if n % divisor == 0: ans.append(divisor) @@ -638,7 +616,6 @@ def fib(n): ans = 1 # this will be return for _ in range(n - 1): - tmp = ans ans += fib1 fib1 = tmp diff --git a/maths/segmented_sieve.py b/maths/segmented_sieve.py index 35ed9702b3be..e950a83b752a 100644 --- a/maths/segmented_sieve.py +++ b/maths/segmented_sieve.py @@ -25,7 +25,6 @@ def sieve(n: int) -> list[int]: while low <= n: temp = [True] * (high - low + 1) for each in in_prime: - t = math.floor(low / each) * each if t < low: t += each diff --git a/maths/two_pointer.py b/maths/two_pointer.py index ff234cddc9e4..d0fb0fc9c2f1 100644 --- a/maths/two_pointer.py +++ b/maths/two_pointer.py @@ -43,7 +43,6 @@ def two_pointer(nums: list[int], target: int) -> list[int]: j = len(nums) - 1 while i < j: - if nums[i] + nums[j] == target: return [i, j] elif nums[i] + nums[j] < target: diff --git a/maths/zellers_congruence.py b/maths/zellers_congruence.py index 624bbfe1061c..483fb000f86b 100644 --- a/maths/zellers_congruence.py +++ b/maths/zellers_congruence.py @@ -3,7 +3,6 @@ def zeller(date_input: str) -> str: - """ Zellers Congruence Algorithm Find the day of the week for nearly any Gregorian or Julian calendar date diff --git a/matrix/largest_square_area_in_matrix.py b/matrix/largest_square_area_in_matrix.py index cf975cb7ce1f..a93369c56bbd 100644 --- a/matrix/largest_square_area_in_matrix.py +++ b/matrix/largest_square_area_in_matrix.py @@ -59,7 +59,6 @@ def largest_square_area_in_matrix_top_down_approch( """ def update_area_of_max_square(row: int, col: int) -> int: - # BASE CASE if row >= rows or col >= cols: return 0 @@ -138,7 +137,6 @@ def largest_square_area_in_matrix_bottom_up( largest_square_area = 0 for row in range(rows - 1, -1, -1): for col in range(cols - 1, -1, -1): - right = dp_array[row][col + 1] diagonal = dp_array[row + 1][col + 1] bottom = dp_array[row + 1][col] @@ -169,7 +167,6 @@ def largest_square_area_in_matrix_bottom_up_space_optimization( largest_square_area = 0 for row in range(rows - 1, -1, -1): for col in range(cols - 1, -1, -1): - right = current_row[col + 1] diagonal = next_row[col + 1] bottom = next_row[col] diff --git a/other/activity_selection.py b/other/activity_selection.py index 18ff6a24c32a..2cc08d959862 100644 --- a/other/activity_selection.py +++ b/other/activity_selection.py @@ -25,7 +25,6 @@ def print_max_activities(start: list[int], finish: list[int]) -> None: # Consider rest of the activities for j in range(n): - # If this activity has start time greater than # or equal to the finish time of previously # selected activity, then select it diff --git a/other/nested_brackets.py b/other/nested_brackets.py index 9dd9a0f042ed..3f61a4e7006c 100644 --- a/other/nested_brackets.py +++ b/other/nested_brackets.py @@ -15,14 +15,12 @@ def is_balanced(s): - stack = [] open_brackets = set({"(", "[", "{"}) closed_brackets = set({")", "]", "}"}) open_to_closed = dict({"{": "}", "[": "]", "(": ")"}) for i in range(len(s)): - if s[i] in open_brackets: stack.append(s[i]) diff --git a/other/scoring_algorithm.py b/other/scoring_algorithm.py index 1e6293f8465c..00d87cfc0b73 100644 --- a/other/scoring_algorithm.py +++ b/other/scoring_algorithm.py @@ -26,7 +26,6 @@ def procentual_proximity( source_data: list[list[float]], weights: list[int] ) -> list[list[float]]: - """ weights - int list possible values - 0 / 1 diff --git a/other/sdes.py b/other/sdes.py index 695675000632..31105984b9bb 100644 --- a/other/sdes.py +++ b/other/sdes.py @@ -54,7 +54,6 @@ def function(expansion, s0, s1, key, message): if __name__ == "__main__": - key = input("Enter 10 bit key: ") message = input("Enter 8 bit message: ") diff --git a/physics/casimir_effect.py b/physics/casimir_effect.py index ee8a6c1eba53..e4a77e5b593f 100644 --- a/physics/casimir_effect.py +++ b/physics/casimir_effect.py @@ -47,7 +47,6 @@ def casimir_force(force: float, area: float, distance: float) -> dict[str, float]: - """ Input Parameters ---------------- diff --git a/physics/hubble_parameter.py b/physics/hubble_parameter.py index 7985647222c9..6bc62e7131c5 100644 --- a/physics/hubble_parameter.py +++ b/physics/hubble_parameter.py @@ -34,7 +34,6 @@ def hubble_parameter( dark_energy: float, redshift: float, ) -> float: - """ Input Parameters ---------------- diff --git a/physics/newtons_law_of_gravitation.py b/physics/newtons_law_of_gravitation.py index 0bb27bb2415d..4bbeddd61d5b 100644 --- a/physics/newtons_law_of_gravitation.py +++ b/physics/newtons_law_of_gravitation.py @@ -28,7 +28,6 @@ def gravitational_law( force: float, mass_1: float, mass_2: float, distance: float ) -> dict[str, float]: - """ Input Parameters ---------------- diff --git a/project_euler/problem_004/sol1.py b/project_euler/problem_004/sol1.py index b1e229289988..f237afdd942d 100644 --- a/project_euler/problem_004/sol1.py +++ b/project_euler/problem_004/sol1.py @@ -32,12 +32,10 @@ def solution(n: int = 998001) -> int: # fetches the next number for number in range(n - 1, 9999, -1): - str_number = str(number) # checks whether 'str_number' is a palindrome. if str_number == str_number[::-1]: - divisor = 999 # if 'number' is a product of two 3-digit numbers diff --git a/project_euler/problem_074/sol2.py b/project_euler/problem_074/sol2.py index d76bb014d629..b54bc023e387 100644 --- a/project_euler/problem_074/sol2.py +++ b/project_euler/problem_074/sol2.py @@ -111,7 +111,6 @@ def solution(chain_length: int = 60, number_limit: int = 1000000) -> int: chain_sets_lengths: dict[int, int] = {} for start_chain_element in range(1, number_limit): - # The temporary set will contain the elements of the chain chain_set = set() chain_set_length = 0 diff --git a/project_euler/problem_089/sol1.py b/project_euler/problem_089/sol1.py index 83609cd236e1..123159bdce09 100644 --- a/project_euler/problem_089/sol1.py +++ b/project_euler/problem_089/sol1.py @@ -138,5 +138,4 @@ def solution(roman_numerals_filename: str = "/p089_roman.txt") -> int: if __name__ == "__main__": - print(f"{solution() = }") diff --git a/project_euler/problem_092/sol1.py b/project_euler/problem_092/sol1.py index 33a6c06946f7..8d3f0c9ddd7b 100644 --- a/project_euler/problem_092/sol1.py +++ b/project_euler/problem_092/sol1.py @@ -15,7 +15,6 @@ def next_number(number: int) -> int: - """ Returns the next number of the chain by adding the square of each digit to form a new number. @@ -31,7 +30,6 @@ def next_number(number: int) -> int: sum_of_digits_squared = 0 while number: - # Increased Speed Slightly by checking every 5 digits together. sum_of_digits_squared += DIGITS_SQUARED[number % 100000] number //= 100000 diff --git a/quantum/q_fourier_transform.py b/quantum/q_fourier_transform.py index 07a257579529..762ac408190e 100644 --- a/quantum/q_fourier_transform.py +++ b/quantum/q_fourier_transform.py @@ -72,7 +72,6 @@ def quantum_fourier_transform(number_of_qubits: int = 3) -> qiskit.result.counts counter = number_of_qubits for i in range(counter): - quantum_circuit.h(number_of_qubits - i - 1) counter -= 1 for j in range(counter): diff --git a/quantum/quantum_teleportation.py b/quantum/quantum_teleportation.py index d04b44d15a05..5da79ed20183 100644 --- a/quantum/quantum_teleportation.py +++ b/quantum/quantum_teleportation.py @@ -18,7 +18,6 @@ def quantum_teleportation( theta: float = np.pi / 2, phi: float = np.pi / 2, lam: float = np.pi / 2 ) -> qiskit.result.counts.Counts: - """ # >>> quantum_teleportation() #{'00': 500, '11': 500} # ideally diff --git a/scheduling/highest_response_ratio_next.py b/scheduling/highest_response_ratio_next.py index a5c62ddbe952..9c999ec65053 100644 --- a/scheduling/highest_response_ratio_next.py +++ b/scheduling/highest_response_ratio_next.py @@ -37,7 +37,6 @@ def calculate_turn_around_time( arrival_time.sort() while no_of_process > finished_process_count: - """ If the current time is less than the arrival time of the process that arrives first among the processes that have not been performed, @@ -94,7 +93,6 @@ def calculate_waiting_time( if __name__ == "__main__": - no_of_process = 5 process_name = ["A", "B", "C", "D", "E"] arrival_time = [1, 2, 3, 4, 5] diff --git a/searches/binary_search.py b/searches/binary_search.py index 88fee47157c6..05dadd4fe965 100644 --- a/searches/binary_search.py +++ b/searches/binary_search.py @@ -261,7 +261,6 @@ def binary_search_std_lib(sorted_collection: list[int], item: int) -> int | None def binary_search_by_recursion( sorted_collection: list[int], item: int, left: int, right: int ) -> int | None: - """Pure implementation of binary search algorithm in Python by recursion Be careful collection must be ascending sorted, otherwise result will be diff --git a/searches/interpolation_search.py b/searches/interpolation_search.py index 35e6bc506661..49194c2600a0 100644 --- a/searches/interpolation_search.py +++ b/searches/interpolation_search.py @@ -49,7 +49,6 @@ def interpolation_search(sorted_collection, item): def interpolation_search_by_recursion(sorted_collection, item, left, right): - """Pure implementation of interpolation search algorithm in Python by recursion Be careful collection must be ascending sorted, otherwise result will be unpredictable diff --git a/searches/tabu_search.py b/searches/tabu_search.py index 3e1728286d98..d998ddc55976 100644 --- a/searches/tabu_search.py +++ b/searches/tabu_search.py @@ -220,7 +220,6 @@ def tabu_search( while not found: i = 0 while i < len(best_solution): - if best_solution[i] != solution[i]: first_exchange_node = best_solution[i] second_exchange_node = solution[i] diff --git a/searches/ternary_search.py b/searches/ternary_search.py index 9830cce36000..cb36e72faac6 100644 --- a/searches/ternary_search.py +++ b/searches/ternary_search.py @@ -103,7 +103,6 @@ def ite_ternary_search(array: list[int], target: int) -> int: left = two_third + 1 else: - left = one_third + 1 right = two_third - 1 else: diff --git a/sorts/comb_sort.py b/sorts/comb_sort.py index 16bd10c78fe5..3c8b1e99a454 100644 --- a/sorts/comb_sort.py +++ b/sorts/comb_sort.py @@ -37,7 +37,6 @@ def comb_sort(data: list) -> list: completed = False while not completed: - # Update the gap value for a next comb gap = int(gap / shrink_factor) if gap <= 1: diff --git a/sorts/odd_even_sort.py b/sorts/odd_even_sort.py index 9ef4462c72c0..7dfe03054bc3 100644 --- a/sorts/odd_even_sort.py +++ b/sorts/odd_even_sort.py @@ -30,7 +30,6 @@ def odd_even_sort(input_list: list) -> list: is_sorted = True for i in range(0, len(input_list) - 1, 2): # iterating over all even indices if input_list[i] > input_list[i + 1]: - input_list[i], input_list[i + 1] = input_list[i + 1], input_list[i] # swapping if elements not in order is_sorted = False diff --git a/sorts/odd_even_transposition_parallel.py b/sorts/odd_even_transposition_parallel.py index b656df3a3a90..87b0e4d1e20f 100644 --- a/sorts/odd_even_transposition_parallel.py +++ b/sorts/odd_even_transposition_parallel.py @@ -34,7 +34,6 @@ def oe_process(position, value, l_send, r_send, lr_cv, rr_cv, result_pipe): # we *could* stop early if we are sorted already, but it takes as long to # find out we are sorted as it does to sort the list with this algorithm for i in range(0, 10): - if (i + position) % 2 == 0 and r_send is not None: # send your value to your right neighbor process_lock.acquire() diff --git a/sorts/random_normal_distribution_quicksort.py b/sorts/random_normal_distribution_quicksort.py index 5777d5cb2e7a..f7f60903c546 100644 --- a/sorts/random_normal_distribution_quicksort.py +++ b/sorts/random_normal_distribution_quicksort.py @@ -19,7 +19,6 @@ def _in_place_quick_sort(a, start, end): def _in_place_partition(a, start, end): - count = 0 pivot = randint(start, end) temp = a[end] @@ -27,7 +26,6 @@ def _in_place_partition(a, start, end): a[pivot] = temp new_pivot_index = start - 1 for index in range(start, end): - count += 1 if a[index] < a[end]: # check if current val is less than pivot value new_pivot_index = new_pivot_index + 1 diff --git a/sorts/shrink_shell_sort.py b/sorts/shrink_shell_sort.py index 69992bfb75bc..f77b73d013a7 100644 --- a/sorts/shrink_shell_sort.py +++ b/sorts/shrink_shell_sort.py @@ -44,7 +44,6 @@ def shell_sort(collection: list) -> list: # Continue sorting until the gap is 1 while gap > 1: - # Decrease the gap value gap = int(gap / shrink) diff --git a/sorts/stooge_sort.py b/sorts/stooge_sort.py index de997a85df12..9a5bedeae21b 100644 --- a/sorts/stooge_sort.py +++ b/sorts/stooge_sort.py @@ -12,7 +12,6 @@ def stooge_sort(arr): def stooge(arr, i, h): - if i >= h: return diff --git a/sorts/tim_sort.py b/sorts/tim_sort.py index b95ff34cf384..c90c7e80390b 100644 --- a/sorts/tim_sort.py +++ b/sorts/tim_sort.py @@ -73,7 +73,6 @@ def tim_sort(lst): def main(): - lst = [5, 9, 10, 3, -4, 5, 178, 92, 46, -18, 0, 7] sorted_lst = tim_sort(lst) print(sorted_lst) diff --git a/strings/dna.py b/strings/dna.py index c2b96110b893..33e1063f4124 100644 --- a/strings/dna.py +++ b/strings/dna.py @@ -2,7 +2,6 @@ def dna(dna: str) -> str: - """ https://en.wikipedia.org/wiki/DNA Returns the second side of a DNA strand diff --git a/strings/hamming_distance.py b/strings/hamming_distance.py index 5de27dc77f44..a28949172aa4 100644 --- a/strings/hamming_distance.py +++ b/strings/hamming_distance.py @@ -35,7 +35,6 @@ def hamming_distance(string1: str, string2: str) -> int: if __name__ == "__main__": - import doctest doctest.testmod() diff --git a/strings/levenshtein_distance.py b/strings/levenshtein_distance.py index 9f7a7e3e65c4..7be4074dc39b 100644 --- a/strings/levenshtein_distance.py +++ b/strings/levenshtein_distance.py @@ -44,11 +44,9 @@ def levenshtein_distance(first_word: str, second_word: str) -> int: previous_row = list(range(len(second_word) + 1)) for i, c1 in enumerate(first_word): - current_row = [i + 1] for j, c2 in enumerate(second_word): - # Calculate insertions, deletions and substitutions insertions = previous_row[j + 1] + 1 deletions = current_row[j] + 1 diff --git a/strings/prefix_function.py b/strings/prefix_function.py index 6eca01635fe3..65bbe9100735 100644 --- a/strings/prefix_function.py +++ b/strings/prefix_function.py @@ -29,7 +29,6 @@ def prefix_function(input_string: str) -> list: prefix_result = [0] * len(input_string) for i in range(1, len(input_string)): - # use last results for better performance - dynamic programming j = prefix_result[i - 1] while j > 0 and input_string[i] != input_string[j]: diff --git a/strings/text_justification.py b/strings/text_justification.py index 5e86456c2456..b0ef12231224 100644 --- a/strings/text_justification.py +++ b/strings/text_justification.py @@ -33,7 +33,6 @@ def text_justification(word: str, max_width: int) -> list: words = word.split() def justify(line: list, width: int, max_width: int) -> str: - overall_spaces_count = max_width - width words_count = len(line) if len(line) == 1: diff --git a/web_programming/fetch_anime_and_play.py b/web_programming/fetch_anime_and_play.py index e11948d0ae78..3bd4f704dd8d 100644 --- a/web_programming/fetch_anime_and_play.py +++ b/web_programming/fetch_anime_and_play.py @@ -8,7 +8,6 @@ def search_scraper(anime_name: str) -> list: - """[summary] Take an url and @@ -66,7 +65,6 @@ def search_scraper(anime_name: str) -> list: def search_anime_episode_list(episode_endpoint: str) -> list: - """[summary] Take an url and @@ -116,7 +114,6 @@ def search_anime_episode_list(episode_endpoint: str) -> list: def get_anime_episode(episode_endpoint: str) -> list: - """[summary] Get click url and download url from episode url @@ -153,7 +150,6 @@ def get_anime_episode(episode_endpoint: str) -> list: if __name__ == "__main__": - anime_name = input("Enter anime name: ").strip() anime_list = search_scraper(anime_name) print("\n") @@ -161,9 +157,8 @@ def get_anime_episode(episode_endpoint: str) -> list: if len(anime_list) == 0: print("No anime found with this name") else: - print(f"Found {len(anime_list)} results: ") - for (i, anime) in enumerate(anime_list): + for i, anime in enumerate(anime_list): anime_title = anime["title"] print(f"{i+1}. {anime_title}") @@ -176,7 +171,7 @@ def get_anime_episode(episode_endpoint: str) -> list: print("No episode found for this anime") else: print(f"Found {len(episode_list)} results: ") - for (i, episode) in enumerate(episode_list): + for i, episode in enumerate(episode_list): print(f"{i+1}. {episode['title']}") episode_choice = int(input("\nChoose an episode by serial no: ").strip()) diff --git a/web_programming/fetch_well_rx_price.py b/web_programming/fetch_well_rx_price.py index 5174f39f9532..ee51b9a5051b 100644 --- a/web_programming/fetch_well_rx_price.py +++ b/web_programming/fetch_well_rx_price.py @@ -37,7 +37,6 @@ def fetch_pharmacy_and_price_list(drug_name: str, zip_code: str) -> list | None: """ try: - # Has user provided both inputs? if not drug_name or not zip_code: return None @@ -58,7 +57,6 @@ def fetch_pharmacy_and_price_list(drug_name: str, zip_code: str) -> list | None: grid_list = soup.find_all("div", {"class": "grid-x pharmCard"}) if grid_list and len(grid_list) > 0: for grid in grid_list: - # Get the pharmacy price. pharmacy_name = grid.find("p", {"class": "list-title"}).text @@ -79,7 +77,6 @@ def fetch_pharmacy_and_price_list(drug_name: str, zip_code: str) -> list | None: if __name__ == "__main__": - # Enter a drug name and a zip code drug_name = input("Enter drug name: ").strip() zip_code = input("Enter zip code: ").strip() @@ -89,10 +86,8 @@ def fetch_pharmacy_and_price_list(drug_name: str, zip_code: str) -> list | None: ) if pharmacy_price_list: - print(f"\nSearch results for {drug_name} at location {zip_code}:") for pharmacy_price in pharmacy_price_list: - name = pharmacy_price["pharmacy_name"] price = pharmacy_price["price"] diff --git a/web_programming/get_user_tweets.py b/web_programming/get_user_tweets.py index 28cf85541dc4..3abc69715727 100644 --- a/web_programming/get_user_tweets.py +++ b/web_programming/get_user_tweets.py @@ -10,7 +10,6 @@ def get_all_tweets(screen_name: str) -> None: - # authorize twitter, initialize tweepy auth = tweepy.OAuthHandler(consumer_key, consumer_secret) auth.set_access_token(access_key, access_secret) From 77b4fa8b3f2070ff708405cca1381b7860e316ab Mon Sep 17 00:00:00 2001 From: Damon Gregory <46330424+SheriffHobo@users.noreply.github.com> Date: Sun, 12 Feb 2023 07:55:25 -0800 Subject: [PATCH 0765/1543] fix_ci_badge (#8134) --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index da80c012b0c6..68a6e5e6fbce 100644 --- a/README.md +++ b/README.md @@ -22,7 +22,7 @@
- GitHub Workflow Status + GitHub Workflow Status pre-commit From 126e89d8a3983c1ffc9b3eefa1fbaff0f6fe4ead Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 13 Feb 2023 22:05:56 +0100 Subject: [PATCH 0766/1543] [pre-commit.ci] pre-commit autoupdate (#8141) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - [github.com/tox-dev/pyproject-fmt: 0.6.0 → 0.8.0](https://github.com/tox-dev/pyproject-fmt/compare/0.6.0...0.8.0) - [github.com/pre-commit/mirrors-mypy: v0.991 → v1.0.0](https://github.com/pre-commit/mirrors-mypy/compare/v0.991...v1.0.0) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index f8d1a65db27b..a1496984f950 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -27,7 +27,7 @@ repos: - --profile=black - repo: https://github.com/tox-dev/pyproject-fmt - rev: "0.6.0" + rev: "0.8.0" hooks: - id: pyproject-fmt @@ -62,7 +62,7 @@ repos: *flake8-plugins - repo: https://github.com/pre-commit/mirrors-mypy - rev: v0.991 + rev: v1.0.0 hooks: - id: mypy args: From 1bf03889c5e34420001e72b5d26cc0846dcd122a Mon Sep 17 00:00:00 2001 From: Jan Wojciechowski <96974442+yanvoi@users.noreply.github.com> Date: Sun, 19 Feb 2023 23:14:01 +0100 Subject: [PATCH 0767/1543] Update bogo_sort.py (#8144) --- sorts/bogo_sort.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/sorts/bogo_sort.py b/sorts/bogo_sort.py index b72f2089f3d2..9c133f0d8a55 100644 --- a/sorts/bogo_sort.py +++ b/sorts/bogo_sort.py @@ -31,8 +31,6 @@ def bogo_sort(collection): """ def is_sorted(collection): - if len(collection) < 2: - return True for i in range(len(collection) - 1): if collection[i] > collection[i + 1]: return False From 67676c3b790d9631ea99c89f71dc2bf65e9aa2ca Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Tue, 21 Feb 2023 08:33:44 +0100 Subject: [PATCH 0768/1543] [pre-commit.ci] pre-commit autoupdate (#8149) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * [pre-commit.ci] pre-commit autoupdate updates: - [github.com/tox-dev/pyproject-fmt: 0.8.0 → 0.9.1](https://github.com/tox-dev/pyproject-fmt/compare/0.8.0...0.9.1) - [github.com/pre-commit/mirrors-mypy: v1.0.0 → v1.0.1](https://github.com/pre-commit/mirrors-mypy/compare/v1.0.0...v1.0.1) * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 4 ++-- pyproject.toml | 1 - 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index a1496984f950..93064949e194 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -27,7 +27,7 @@ repos: - --profile=black - repo: https://github.com/tox-dev/pyproject-fmt - rev: "0.8.0" + rev: "0.9.1" hooks: - id: pyproject-fmt @@ -62,7 +62,7 @@ repos: *flake8-plugins - repo: https://github.com/pre-commit/mirrors-mypy - rev: v1.0.0 + rev: v1.0.1 hooks: - id: mypy args: diff --git a/pyproject.toml b/pyproject.toml index 410e7655b2b5..5f9b1aa06c0e 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -8,7 +8,6 @@ addopts = [ "--showlocals", ] - [tool.coverage.report] omit = [".env/*"] sort = "Cover" From 1c15cdff70893bc27ced2b390959e1d9cc493628 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 27 Feb 2023 23:08:40 +0100 Subject: [PATCH 0769/1543] [pre-commit.ci] pre-commit autoupdate (#8160) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * [pre-commit.ci] pre-commit autoupdate updates: - [github.com/tox-dev/pyproject-fmt: 0.9.1 → 0.9.2](https://github.com/tox-dev/pyproject-fmt/compare/0.9.1...0.9.2) * pre-commit: Add ruff --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- .pre-commit-config.yaml | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 93064949e194..9f27f985bb6a 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -27,7 +27,7 @@ repos: - --profile=black - repo: https://github.com/tox-dev/pyproject-fmt - rev: "0.9.1" + rev: "0.9.2" hooks: - id: pyproject-fmt @@ -43,6 +43,13 @@ repos: args: - --py311-plus + - repo: https://github.com/charliermarsh/ruff-pre-commit + rev: v0.0.253 + hooks: + - id: ruff + args: + - --ignore=E741 + - repo: https://github.com/PyCQA/flake8 rev: 6.0.0 hooks: From 64543faa980b526f79d287a073ebb7554749faf9 Mon Sep 17 00:00:00 2001 From: Christian Clauss Date: Wed, 1 Mar 2023 17:23:33 +0100 Subject: [PATCH 0770/1543] Make some ruff fixes (#8154) * Make some ruff fixes * Undo manual fix * Undo manual fix * Updates from ruff=0.0.251 --- audio_filters/iir_filter.py | 2 +- backtracking/n_queens_math.py | 6 +++--- backtracking/sum_of_subsets.py | 2 +- ciphers/bifid.py | 2 +- ciphers/diffie_hellman.py | 16 ++++++++-------- ciphers/polybius.py | 2 +- ciphers/xor_cipher.py | 18 ++++++++---------- computer_vision/mosaic_augmentation.py | 2 +- .../binary_tree/binary_search_tree.py | 2 +- .../binary_tree/binary_tree_traversals.py | 4 ++-- .../binary_tree/inorder_tree_traversal_2022.py | 2 +- data_structures/binary_tree/red_black_tree.py | 5 ++--- .../hashing/number_theory/prime_numbers.py | 2 +- data_structures/heap/binomial_heap.py | 4 ++-- .../linked_list/doubly_linked_list_two.py | 2 +- .../linked_list/singly_linked_list.py | 1 + data_structures/linked_list/skip_list.py | 5 +---- .../queue/circular_queue_linked_list.py | 2 +- .../dilation_operation.py | 2 +- .../erosion_operation.py | 2 +- dynamic_programming/all_construct.py | 2 +- dynamic_programming/fizz_buzz.py | 2 +- .../longest_common_subsequence.py | 10 ++-------- .../longest_increasing_subsequence.py | 2 +- graphs/basic_graphs.py | 14 ++++++-------- graphs/check_cycle.py | 9 ++++----- graphs/connected_components.py | 2 +- graphs/dijkstra_algorithm.py | 2 +- .../edmonds_karp_multiple_source_and_sink.py | 5 ++--- graphs/frequent_pattern_graph_miner.py | 6 +++--- graphs/minimum_spanning_tree_boruvka.py | 1 + graphs/minimum_spanning_tree_prims.py | 5 +---- graphs/minimum_spanning_tree_prims2.py | 16 +++++++--------- hashes/hamming_code.py | 5 ++--- linear_algebra/src/lib.py | 7 ++++--- machine_learning/gradient_descent.py | 2 ++ machine_learning/k_means_clust.py | 4 ++-- .../sequential_minimum_optimization.py | 9 ++++----- maths/abs.py | 6 +++--- maths/binary_exp_mod.py | 2 +- maths/jaccard_similarity.py | 1 + maths/largest_of_very_large_numbers.py | 1 + maths/radix2_fft.py | 5 +---- .../back_propagation_neural_network.py | 1 + other/graham_scan.py | 7 +++---- other/nested_brackets.py | 9 ++++----- physics/hubble_parameter.py | 4 ++-- project_euler/problem_005/sol1.py | 1 + project_euler/problem_009/sol1.py | 5 ++--- project_euler/problem_014/sol2.py | 5 +---- project_euler/problem_018/solution.py | 10 ++-------- project_euler/problem_019/sol1.py | 2 +- project_euler/problem_033/sol1.py | 8 +++----- project_euler/problem_064/sol1.py | 5 ++--- project_euler/problem_067/sol1.py | 10 ++-------- project_euler/problem_109/sol1.py | 2 +- project_euler/problem_203/sol1.py | 4 ++-- scheduling/shortest_job_first.py | 11 +++++------ scripts/build_directory_md.py | 5 ++--- searches/binary_tree_traversal.py | 1 + sorts/circle_sort.py | 13 ++++++------- sorts/counting_sort.py | 2 +- sorts/msd_radix_sort.py | 2 +- sorts/quick_sort.py | 2 +- sorts/recursive_quick_sort.py | 10 +++++----- sorts/tim_sort.py | 4 ++-- strings/autocomplete_using_trie.py | 5 +---- strings/check_anagrams.py | 5 +---- strings/is_palindrome.py | 5 +---- strings/snake_case_to_camel_pascal_case.py | 2 +- web_programming/convert_number_to_words.py | 6 +++--- web_programming/instagram_crawler.py | 2 +- web_programming/open_google_results.py | 5 +---- 73 files changed, 151 insertions(+), 203 deletions(-) diff --git a/audio_filters/iir_filter.py b/audio_filters/iir_filter.py index aae320365012..bd448175f6f3 100644 --- a/audio_filters/iir_filter.py +++ b/audio_filters/iir_filter.py @@ -47,7 +47,7 @@ def set_coefficients(self, a_coeffs: list[float], b_coeffs: list[float]) -> None >>> filt.set_coefficients(a_coeffs, b_coeffs) """ if len(a_coeffs) < self.order: - a_coeffs = [1.0] + a_coeffs + a_coeffs = [1.0, *a_coeffs] if len(a_coeffs) != self.order + 1: raise ValueError( diff --git a/backtracking/n_queens_math.py b/backtracking/n_queens_math.py index 23bd1590618b..f3b08ab0a05f 100644 --- a/backtracking/n_queens_math.py +++ b/backtracking/n_queens_math.py @@ -129,9 +129,9 @@ def depth_first_search( # If it is False we call dfs function again and we update the inputs depth_first_search( - possible_board + [col], - diagonal_right_collisions + [row - col], - diagonal_left_collisions + [row + col], + [*possible_board, col], + [*diagonal_right_collisions, row - col], + [*diagonal_left_collisions, row + col], boards, n, ) diff --git a/backtracking/sum_of_subsets.py b/backtracking/sum_of_subsets.py index 128e290718cd..c5e23321cb0c 100644 --- a/backtracking/sum_of_subsets.py +++ b/backtracking/sum_of_subsets.py @@ -44,7 +44,7 @@ def create_state_space_tree( nums, max_sum, index + 1, - path + [nums[index]], + [*path, nums[index]], result, remaining_nums_sum - nums[index], ) diff --git a/ciphers/bifid.py b/ciphers/bifid.py index c005e051a6ba..a15b381640aa 100644 --- a/ciphers/bifid.py +++ b/ciphers/bifid.py @@ -33,7 +33,7 @@ def letter_to_numbers(self, letter: str) -> np.ndarray: >>> np.array_equal(BifidCipher().letter_to_numbers('u'), [4,5]) True """ - index1, index2 = np.where(self.SQUARE == letter) + index1, index2 = np.where(letter == self.SQUARE) indexes = np.concatenate([index1 + 1, index2 + 1]) return indexes diff --git a/ciphers/diffie_hellman.py b/ciphers/diffie_hellman.py index 072f4aaaa6da..cd40a6b9c3b3 100644 --- a/ciphers/diffie_hellman.py +++ b/ciphers/diffie_hellman.py @@ -228,10 +228,10 @@ def generate_public_key(self) -> str: def is_valid_public_key(self, key: int) -> bool: # check if the other public key is valid based on NIST SP800-56 - if 2 <= key and key <= self.prime - 2: - if pow(key, (self.prime - 1) // 2, self.prime) == 1: - return True - return False + return ( + 2 <= key <= self.prime - 2 + and pow(key, (self.prime - 1) // 2, self.prime) == 1 + ) def generate_shared_key(self, other_key_str: str) -> str: other_key = int(other_key_str, base=16) @@ -243,10 +243,10 @@ def generate_shared_key(self, other_key_str: str) -> str: @staticmethod def is_valid_public_key_static(remote_public_key_str: int, prime: int) -> bool: # check if the other public key is valid based on NIST SP800-56 - if 2 <= remote_public_key_str and remote_public_key_str <= prime - 2: - if pow(remote_public_key_str, (prime - 1) // 2, prime) == 1: - return True - return False + return ( + 2 <= remote_public_key_str <= prime - 2 + and pow(remote_public_key_str, (prime - 1) // 2, prime) == 1 + ) @staticmethod def generate_shared_key_static( diff --git a/ciphers/polybius.py b/ciphers/polybius.py index 3539ab70c303..d83badf4ac0a 100644 --- a/ciphers/polybius.py +++ b/ciphers/polybius.py @@ -31,7 +31,7 @@ def letter_to_numbers(self, letter: str) -> np.ndarray: >>> np.array_equal(PolybiusCipher().letter_to_numbers('u'), [4,5]) True """ - index1, index2 = np.where(self.SQUARE == letter) + index1, index2 = np.where(letter == self.SQUARE) indexes = np.concatenate([index1 + 1, index2 + 1]) return indexes diff --git a/ciphers/xor_cipher.py b/ciphers/xor_cipher.py index 379ef0ef7e50..0f369e38f85f 100644 --- a/ciphers/xor_cipher.py +++ b/ciphers/xor_cipher.py @@ -128,11 +128,10 @@ def encrypt_file(self, file: str, key: int = 0) -> bool: assert isinstance(file, str) and isinstance(key, int) try: - with open(file) as fin: - with open("encrypt.out", "w+") as fout: - # actual encrypt-process - for line in fin: - fout.write(self.encrypt_string(line, key)) + with open(file) as fin, open("encrypt.out", "w+") as fout: + # actual encrypt-process + for line in fin: + fout.write(self.encrypt_string(line, key)) except OSError: return False @@ -152,11 +151,10 @@ def decrypt_file(self, file: str, key: int) -> bool: assert isinstance(file, str) and isinstance(key, int) try: - with open(file) as fin: - with open("decrypt.out", "w+") as fout: - # actual encrypt-process - for line in fin: - fout.write(self.decrypt_string(line, key)) + with open(file) as fin, open("decrypt.out", "w+") as fout: + # actual encrypt-process + for line in fin: + fout.write(self.decrypt_string(line, key)) except OSError: return False diff --git a/computer_vision/mosaic_augmentation.py b/computer_vision/mosaic_augmentation.py index e2953749753f..c150126d6bfb 100644 --- a/computer_vision/mosaic_augmentation.py +++ b/computer_vision/mosaic_augmentation.py @@ -159,7 +159,7 @@ def update_image_and_anno( new_anno.append([bbox[0], xmin, ymin, xmax, ymax]) # Remove bounding box small than scale of filter - if 0 < filter_scale: + if filter_scale > 0: new_anno = [ anno for anno in new_anno diff --git a/data_structures/binary_tree/binary_search_tree.py b/data_structures/binary_tree/binary_search_tree.py index fc512944eb50..cd88cc10e697 100644 --- a/data_structures/binary_tree/binary_search_tree.py +++ b/data_structures/binary_tree/binary_search_tree.py @@ -60,7 +60,7 @@ def __insert(self, value) -> None: else: # Tree is not empty parent_node = self.root # from root if parent_node is None: - return None + return while True: # While we don't get to a leaf if value < parent_node.value: # We go left if parent_node.left is None: diff --git a/data_structures/binary_tree/binary_tree_traversals.py b/data_structures/binary_tree/binary_tree_traversals.py index 24dd1bd8cdc8..71a895e76ce4 100644 --- a/data_structures/binary_tree/binary_tree_traversals.py +++ b/data_structures/binary_tree/binary_tree_traversals.py @@ -37,7 +37,7 @@ def preorder(root: Node | None) -> list[int]: >>> preorder(make_tree()) [1, 2, 4, 5, 3] """ - return [root.data] + preorder(root.left) + preorder(root.right) if root else [] + return [root.data, *preorder(root.left), *preorder(root.right)] if root else [] def postorder(root: Node | None) -> list[int]: @@ -55,7 +55,7 @@ def inorder(root: Node | None) -> list[int]: >>> inorder(make_tree()) [4, 2, 5, 1, 3] """ - return inorder(root.left) + [root.data] + inorder(root.right) if root else [] + return [*inorder(root.left), root.data, *inorder(root.right)] if root else [] def height(root: Node | None) -> int: diff --git a/data_structures/binary_tree/inorder_tree_traversal_2022.py b/data_structures/binary_tree/inorder_tree_traversal_2022.py index e94ba7013a82..1357527d2953 100644 --- a/data_structures/binary_tree/inorder_tree_traversal_2022.py +++ b/data_structures/binary_tree/inorder_tree_traversal_2022.py @@ -50,7 +50,7 @@ def inorder(node: None | BinaryTreeNode) -> list[int]: # if node is None,return """ if node: inorder_array = inorder(node.left_child) - inorder_array = inorder_array + [node.data] + inorder_array = [*inorder_array, node.data] inorder_array = inorder_array + inorder(node.right_child) else: inorder_array = [] diff --git a/data_structures/binary_tree/red_black_tree.py b/data_structures/binary_tree/red_black_tree.py index a9dbd699c3c1..b50d75d33689 100644 --- a/data_structures/binary_tree/red_black_tree.py +++ b/data_structures/binary_tree/red_black_tree.py @@ -319,9 +319,8 @@ def check_coloring(self) -> bool: """A helper function to recursively check Property 4 of a Red-Black Tree. See check_color_properties for more info. """ - if self.color == 1: - if color(self.left) == 1 or color(self.right) == 1: - return False + if self.color == 1 and 1 in (color(self.left), color(self.right)): + return False if self.left and not self.left.check_coloring(): return False if self.right and not self.right.check_coloring(): diff --git a/data_structures/hashing/number_theory/prime_numbers.py b/data_structures/hashing/number_theory/prime_numbers.py index b88ab76ecc23..0c25896f9880 100644 --- a/data_structures/hashing/number_theory/prime_numbers.py +++ b/data_structures/hashing/number_theory/prime_numbers.py @@ -52,7 +52,7 @@ def next_prime(value, factor=1, **kwargs): first_value_val = value while not is_prime(value): - value += 1 if not ("desc" in kwargs.keys() and kwargs["desc"] is True) else -1 + value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1 if value == first_value_val: return next_prime(value + 1, **kwargs) diff --git a/data_structures/heap/binomial_heap.py b/data_structures/heap/binomial_heap.py index 2e05c5c80a22..099bd2871023 100644 --- a/data_structures/heap/binomial_heap.py +++ b/data_structures/heap/binomial_heap.py @@ -136,12 +136,12 @@ def merge_heaps(self, other): # Empty heaps corner cases if other.size == 0: - return + return None if self.size == 0: self.size = other.size self.bottom_root = other.bottom_root self.min_node = other.min_node - return + return None # Update size self.size = self.size + other.size diff --git a/data_structures/linked_list/doubly_linked_list_two.py b/data_structures/linked_list/doubly_linked_list_two.py index c19309c9f5a7..e993cc5a20af 100644 --- a/data_structures/linked_list/doubly_linked_list_two.py +++ b/data_structures/linked_list/doubly_linked_list_two.py @@ -128,7 +128,7 @@ def insert_at_position(self, position: int, value: int) -> None: while node: if current_position == position: self.insert_before_node(node, new_node) - return None + return current_position += 1 node = node.next self.insert_after_node(self.tail, new_node) diff --git a/data_structures/linked_list/singly_linked_list.py b/data_structures/linked_list/singly_linked_list.py index 3e52c7e43cf5..bdeb5922ac67 100644 --- a/data_structures/linked_list/singly_linked_list.py +++ b/data_structures/linked_list/singly_linked_list.py @@ -107,6 +107,7 @@ def __getitem__(self, index: int) -> Any: for i, node in enumerate(self): if i == index: return node + return None # Used to change the data of a particular node def __setitem__(self, index: int, data: Any) -> None: diff --git a/data_structures/linked_list/skip_list.py b/data_structures/linked_list/skip_list.py index 96b0db7c896b..4413c53e520e 100644 --- a/data_structures/linked_list/skip_list.py +++ b/data_structures/linked_list/skip_list.py @@ -388,10 +388,7 @@ def traverse_keys(node): def test_iter_always_yields_sorted_values(): def is_sorted(lst): - for item, next_item in zip(lst, lst[1:]): - if next_item < item: - return False - return True + return all(next_item >= item for item, next_item in zip(lst, lst[1:])) skip_list = SkipList() for i in range(10): diff --git a/data_structures/queue/circular_queue_linked_list.py b/data_structures/queue/circular_queue_linked_list.py index e8c2b8bffc06..62042c4bce96 100644 --- a/data_structures/queue/circular_queue_linked_list.py +++ b/data_structures/queue/circular_queue_linked_list.py @@ -127,7 +127,7 @@ def dequeue(self) -> Any: """ self.check_can_perform_operation() if self.rear is None or self.front is None: - return + return None if self.front == self.rear: data = self.front.data self.front.data = None diff --git a/digital_image_processing/morphological_operations/dilation_operation.py b/digital_image_processing/morphological_operations/dilation_operation.py index 274880b0a50a..c8380737d219 100644 --- a/digital_image_processing/morphological_operations/dilation_operation.py +++ b/digital_image_processing/morphological_operations/dilation_operation.py @@ -32,7 +32,7 @@ def gray2binary(gray: np.array) -> np.array: [False, True, False], [False, True, False]]) """ - return (127 < gray) & (gray <= 255) + return (gray > 127) & (gray <= 255) def dilation(image: np.array, kernel: np.array) -> np.array: diff --git a/digital_image_processing/morphological_operations/erosion_operation.py b/digital_image_processing/morphological_operations/erosion_operation.py index 4b0a5eee8c03..c2cde2ea6990 100644 --- a/digital_image_processing/morphological_operations/erosion_operation.py +++ b/digital_image_processing/morphological_operations/erosion_operation.py @@ -32,7 +32,7 @@ def gray2binary(gray: np.array) -> np.array: [False, True, False], [False, True, False]]) """ - return (127 < gray) & (gray <= 255) + return (gray > 127) & (gray <= 255) def erosion(image: np.array, kernel: np.array) -> np.array: diff --git a/dynamic_programming/all_construct.py b/dynamic_programming/all_construct.py index 3839d01e6db0..6e53a702cbb1 100644 --- a/dynamic_programming/all_construct.py +++ b/dynamic_programming/all_construct.py @@ -34,7 +34,7 @@ def all_construct(target: str, word_bank: list[str] | None = None) -> list[list[ # slice condition if target[i : i + len(word)] == word: new_combinations: list[list[str]] = [ - [word] + way for way in table[i] + [word, *way] for way in table[i] ] # adds the word to every combination the current position holds # now,push that combination to the table[i+len(word)] diff --git a/dynamic_programming/fizz_buzz.py b/dynamic_programming/fizz_buzz.py index e77ab3de7b4b..e29116437a93 100644 --- a/dynamic_programming/fizz_buzz.py +++ b/dynamic_programming/fizz_buzz.py @@ -49,7 +49,7 @@ def fizz_buzz(number: int, iterations: int) -> str: out += "Fizz" if number % 5 == 0: out += "Buzz" - if not number % 3 == 0 and not number % 5 == 0: + if 0 not in (number % 3, number % 5): out += str(number) # print(out) diff --git a/dynamic_programming/longest_common_subsequence.py b/dynamic_programming/longest_common_subsequence.py index 3468fd87da8d..178b4169b213 100644 --- a/dynamic_programming/longest_common_subsequence.py +++ b/dynamic_programming/longest_common_subsequence.py @@ -42,20 +42,14 @@ def longest_common_subsequence(x: str, y: str): for i in range(1, m + 1): for j in range(1, n + 1): - if x[i - 1] == y[j - 1]: - match = 1 - else: - match = 0 + match = 1 if x[i - 1] == y[j - 1] else 0 l[i][j] = max(l[i - 1][j], l[i][j - 1], l[i - 1][j - 1] + match) seq = "" i, j = m, n while i > 0 and j > 0: - if x[i - 1] == y[j - 1]: - match = 1 - else: - match = 0 + match = 1 if x[i - 1] == y[j - 1] else 0 if l[i][j] == l[i - 1][j - 1] + match: if match == 1: diff --git a/dynamic_programming/longest_increasing_subsequence.py b/dynamic_programming/longest_increasing_subsequence.py index 6feed23529f1..d827893763c5 100644 --- a/dynamic_programming/longest_increasing_subsequence.py +++ b/dynamic_programming/longest_increasing_subsequence.py @@ -48,7 +48,7 @@ def longest_subsequence(array: list[int]) -> list[int]: # This function is recu i += 1 temp_array = [element for element in array[1:] if element >= pivot] - temp_array = [pivot] + longest_subsequence(temp_array) + temp_array = [pivot, *longest_subsequence(temp_array)] if len(temp_array) > len(longest_subseq): return temp_array else: diff --git a/graphs/basic_graphs.py b/graphs/basic_graphs.py index 298a97bf0e17..065b6185c123 100644 --- a/graphs/basic_graphs.py +++ b/graphs/basic_graphs.py @@ -139,10 +139,9 @@ def dijk(g, s): u = i known.add(u) for v in g[u]: - if v[0] not in known: - if dist[u] + v[1] < dist.get(v[0], 100000): - dist[v[0]] = dist[u] + v[1] - path[v[0]] = u + if v[0] not in known and dist[u] + v[1] < dist.get(v[0], 100000): + dist[v[0]] = dist[u] + v[1] + path[v[0]] = u for i in dist: if i != s: print(dist[i]) @@ -243,10 +242,9 @@ def prim(g, s): u = i known.add(u) for v in g[u]: - if v[0] not in known: - if v[1] < dist.get(v[0], 100000): - dist[v[0]] = v[1] - path[v[0]] = u + if v[0] not in known and v[1] < dist.get(v[0], 100000): + dist[v[0]] = v[1] + path[v[0]] = u return dist diff --git a/graphs/check_cycle.py b/graphs/check_cycle.py index dcc864988ca5..9fd1cd80f116 100644 --- a/graphs/check_cycle.py +++ b/graphs/check_cycle.py @@ -15,11 +15,10 @@ def check_cycle(graph: dict) -> bool: visited: set[int] = set() # To detect a back edge, keep track of vertices currently in the recursion stack rec_stk: set[int] = set() - for node in graph: - if node not in visited: - if depth_first_search(graph, node, visited, rec_stk): - return True - return False + return any( + node not in visited and depth_first_search(graph, node, visited, rec_stk) + for node in graph + ) def depth_first_search(graph: dict, vertex: int, visited: set, rec_stk: set) -> bool: diff --git a/graphs/connected_components.py b/graphs/connected_components.py index 4af7803d74a7..15c7633e13e8 100644 --- a/graphs/connected_components.py +++ b/graphs/connected_components.py @@ -27,7 +27,7 @@ def dfs(graph: dict, vert: int, visited: list) -> list: if not visited[neighbour]: connected_verts += dfs(graph, neighbour, visited) - return [vert] + connected_verts + return [vert, *connected_verts] def connected_components(graph: dict) -> list: diff --git a/graphs/dijkstra_algorithm.py b/graphs/dijkstra_algorithm.py index 1845dad05db2..452138fe904b 100644 --- a/graphs/dijkstra_algorithm.py +++ b/graphs/dijkstra_algorithm.py @@ -112,7 +112,7 @@ def dijkstra(self, src): self.dist[src] = 0 q = PriorityQueue() q.insert((0, src)) # (dist from src, node) - for u in self.adjList.keys(): + for u in self.adjList: if u != src: self.dist[u] = sys.maxsize # Infinity self.par[u] = -1 diff --git a/graphs/edmonds_karp_multiple_source_and_sink.py b/graphs/edmonds_karp_multiple_source_and_sink.py index 070d758e63b6..d0610804109f 100644 --- a/graphs/edmonds_karp_multiple_source_and_sink.py +++ b/graphs/edmonds_karp_multiple_source_and_sink.py @@ -163,9 +163,8 @@ def relabel(self, vertex_index): self.graph[vertex_index][to_index] - self.preflow[vertex_index][to_index] > 0 - ): - if min_height is None or self.heights[to_index] < min_height: - min_height = self.heights[to_index] + ) and (min_height is None or self.heights[to_index] < min_height): + min_height = self.heights[to_index] if min_height is not None: self.heights[vertex_index] = min_height + 1 diff --git a/graphs/frequent_pattern_graph_miner.py b/graphs/frequent_pattern_graph_miner.py index 87d5605a0bc8..208e57f9b32f 100644 --- a/graphs/frequent_pattern_graph_miner.py +++ b/graphs/frequent_pattern_graph_miner.py @@ -130,11 +130,11 @@ def create_edge(nodes, graph, cluster, c1): """ create edge between the nodes """ - for i in cluster[c1].keys(): + for i in cluster[c1]: count = 0 c2 = c1 + 1 while c2 < max(cluster.keys()): - for j in cluster[c2].keys(): + for j in cluster[c2]: """ creates edge only if the condition satisfies """ @@ -185,7 +185,7 @@ def find_freq_subgraph_given_support(s, cluster, graph): find edges of multiple frequent subgraphs """ k = int(s / 100 * (len(cluster) - 1)) - for i in cluster[k].keys(): + for i in cluster[k]: my_dfs(graph, tuple(cluster[k][i]), (["Header"],)) diff --git a/graphs/minimum_spanning_tree_boruvka.py b/graphs/minimum_spanning_tree_boruvka.py index 663d8e26cfad..3c6888037948 100644 --- a/graphs/minimum_spanning_tree_boruvka.py +++ b/graphs/minimum_spanning_tree_boruvka.py @@ -144,6 +144,7 @@ def union(self, item1, item2): self.rank[root1] += 1 self.parent[root2] = root1 return root1 + return None @staticmethod def boruvka_mst(graph): diff --git a/graphs/minimum_spanning_tree_prims.py b/graphs/minimum_spanning_tree_prims.py index f577866f0da6..5a08ec57ff4d 100644 --- a/graphs/minimum_spanning_tree_prims.py +++ b/graphs/minimum_spanning_tree_prims.py @@ -44,10 +44,7 @@ def bottom_to_top(self, val, index, heap, position): temp = position[index] while index != 0: - if index % 2 == 0: - parent = int((index - 2) / 2) - else: - parent = int((index - 1) / 2) + parent = int((index - 2) / 2) if index % 2 == 0 else int((index - 1) / 2) if val < heap[parent]: heap[index] = heap[parent] diff --git a/graphs/minimum_spanning_tree_prims2.py b/graphs/minimum_spanning_tree_prims2.py index 707be783d087..81f30ef615fe 100644 --- a/graphs/minimum_spanning_tree_prims2.py +++ b/graphs/minimum_spanning_tree_prims2.py @@ -135,14 +135,14 @@ def _bubble_up(self, elem: T) -> None: # only] curr_pos = self.position_map[elem] if curr_pos == 0: - return + return None parent_position = get_parent_position(curr_pos) _, weight = self.heap[curr_pos] _, parent_weight = self.heap[parent_position] if parent_weight > weight: self._swap_nodes(parent_position, curr_pos) return self._bubble_up(elem) - return + return None def _bubble_down(self, elem: T) -> None: # Place a node at the proper position (downward movement) [to be used @@ -154,24 +154,22 @@ def _bubble_down(self, elem: T) -> None: if child_left_position < self.elements and child_right_position < self.elements: _, child_left_weight = self.heap[child_left_position] _, child_right_weight = self.heap[child_right_position] - if child_right_weight < child_left_weight: - if child_right_weight < weight: - self._swap_nodes(child_right_position, curr_pos) - return self._bubble_down(elem) + if child_right_weight < child_left_weight and child_right_weight < weight: + self._swap_nodes(child_right_position, curr_pos) + return self._bubble_down(elem) if child_left_position < self.elements: _, child_left_weight = self.heap[child_left_position] if child_left_weight < weight: self._swap_nodes(child_left_position, curr_pos) return self._bubble_down(elem) else: - return + return None if child_right_position < self.elements: _, child_right_weight = self.heap[child_right_position] if child_right_weight < weight: self._swap_nodes(child_right_position, curr_pos) return self._bubble_down(elem) - else: - return + return None def _swap_nodes(self, node1_pos: int, node2_pos: int) -> None: # Swap the nodes at the given positions diff --git a/hashes/hamming_code.py b/hashes/hamming_code.py index 481a6750773a..dc93032183e0 100644 --- a/hashes/hamming_code.py +++ b/hashes/hamming_code.py @@ -126,9 +126,8 @@ def emitter_converter(size_par, data): aux = (bin_pos[cont_loop])[-1 * (bp)] except IndexError: aux = "0" - if aux == "1": - if x == "1": - cont_bo += 1 + if aux == "1" and x == "1": + cont_bo += 1 cont_loop += 1 parity.append(cont_bo % 2) diff --git a/linear_algebra/src/lib.py b/linear_algebra/src/lib.py index ac0398a31a07..e3556e74c3f3 100644 --- a/linear_algebra/src/lib.py +++ b/linear_algebra/src/lib.py @@ -108,7 +108,7 @@ def __mul__(self, other: float | Vector) -> float | Vector: mul implements the scalar multiplication and the dot-product """ - if isinstance(other, float) or isinstance(other, int): + if isinstance(other, (float, int)): ans = [c * other for c in self.__components] return Vector(ans) elif isinstance(other, Vector) and len(self) == len(other): @@ -216,7 +216,7 @@ def axpy(scalar: float, x: Vector, y: Vector) -> Vector: assert ( isinstance(x, Vector) and isinstance(y, Vector) - and (isinstance(scalar, int) or isinstance(scalar, float)) + and (isinstance(scalar, (int, float))) ) return x * scalar + y @@ -337,12 +337,13 @@ def __mul__(self, other: float | Vector) -> Vector | Matrix: "vector must have the same size as the " "number of columns of the matrix!" ) - elif isinstance(other, int) or isinstance(other, float): # matrix-scalar + elif isinstance(other, (int, float)): # matrix-scalar matrix = [ [self.__matrix[i][j] * other for j in range(self.__width)] for i in range(self.__height) ] return Matrix(matrix, self.__width, self.__height) + return None def height(self) -> int: """ diff --git a/machine_learning/gradient_descent.py b/machine_learning/gradient_descent.py index 9fa460a07562..5b74dad082e7 100644 --- a/machine_learning/gradient_descent.py +++ b/machine_learning/gradient_descent.py @@ -55,6 +55,7 @@ def output(example_no, data_set): return train_data[example_no][1] elif data_set == "test": return test_data[example_no][1] + return None def calculate_hypothesis_value(example_no, data_set): @@ -68,6 +69,7 @@ def calculate_hypothesis_value(example_no, data_set): return _hypothesis_value(train_data[example_no][0]) elif data_set == "test": return _hypothesis_value(test_data[example_no][0]) + return None def summation_of_cost_derivative(index, end=m): diff --git a/machine_learning/k_means_clust.py b/machine_learning/k_means_clust.py index b6305469ed7d..7c8142aab878 100644 --- a/machine_learning/k_means_clust.py +++ b/machine_learning/k_means_clust.py @@ -229,7 +229,7 @@ def report_generator( """ # Fill missing values with given rules if fill_missing_report: - df.fillna(value=fill_missing_report, inplace=True) + df = df.fillna(value=fill_missing_report) df["dummy"] = 1 numeric_cols = df.select_dtypes(np.number).columns report = ( @@ -338,7 +338,7 @@ def report_generator( ) report.columns.name = "" report = report.reset_index() - report.drop(columns=["index"], inplace=True) + report = report.drop(columns=["index"]) return report diff --git a/machine_learning/sequential_minimum_optimization.py b/machine_learning/sequential_minimum_optimization.py index 9c45c351272f..37172c8e9bf6 100644 --- a/machine_learning/sequential_minimum_optimization.py +++ b/machine_learning/sequential_minimum_optimization.py @@ -129,7 +129,7 @@ def fit(self): # error self._unbound = [i for i in self._all_samples if self._is_unbound(i)] for s in self.unbound: - if s == i1 or s == i2: + if s in (i1, i2): continue self._error[s] += ( y1 * (a1_new - a1) * k(i1, s) @@ -225,7 +225,7 @@ def _predict(self, sample): def _choose_alphas(self): locis = yield from self._choose_a1() if not locis: - return + return None return locis def _choose_a1(self): @@ -423,9 +423,8 @@ def _rbf(self, v1, v2): return np.exp(-1 * (self.gamma * np.linalg.norm(v1 - v2) ** 2)) def _check(self): - if self._kernel == self._rbf: - if self.gamma < 0: - raise ValueError("gamma value must greater than 0") + if self._kernel == self._rbf and self.gamma < 0: + raise ValueError("gamma value must greater than 0") def _get_kernel(self, kernel_name): maps = {"linear": self._linear, "poly": self._polynomial, "rbf": self._rbf} diff --git a/maths/abs.py b/maths/abs.py index cb0ffc8a5b61..b357e98d8680 100644 --- a/maths/abs.py +++ b/maths/abs.py @@ -75,9 +75,9 @@ def test_abs_val(): """ >>> test_abs_val() """ - assert 0 == abs_val(0) - assert 34 == abs_val(34) - assert 100000000000 == abs_val(-100000000000) + assert abs_val(0) == 0 + assert abs_val(34) == 34 + assert abs_val(-100000000000) == 100000000000 a = [-3, -1, 2, -11] assert abs_max(a) == -11 diff --git a/maths/binary_exp_mod.py b/maths/binary_exp_mod.py index 67dd1e728b18..df688892d690 100644 --- a/maths/binary_exp_mod.py +++ b/maths/binary_exp_mod.py @@ -6,7 +6,7 @@ def bin_exp_mod(a, n, b): 7 """ # mod b - assert not (b == 0), "This cannot accept modulo that is == 0" + assert b != 0, "This cannot accept modulo that is == 0" if n == 0: return 1 diff --git a/maths/jaccard_similarity.py b/maths/jaccard_similarity.py index eab25188b2fd..32054414c0c2 100644 --- a/maths/jaccard_similarity.py +++ b/maths/jaccard_similarity.py @@ -71,6 +71,7 @@ def jaccard_similarity(set_a, set_b, alternative_union=False): return len(intersection) / len(union) return len(intersection) / len(union) + return None if __name__ == "__main__": diff --git a/maths/largest_of_very_large_numbers.py b/maths/largest_of_very_large_numbers.py index d2dc0af18126..7e7fea004958 100644 --- a/maths/largest_of_very_large_numbers.py +++ b/maths/largest_of_very_large_numbers.py @@ -12,6 +12,7 @@ def res(x, y): return 0 elif y == 0: return 1 # any number raised to 0 is 1 + raise AssertionError("This should never happen") if __name__ == "__main__": # Main function diff --git a/maths/radix2_fft.py b/maths/radix2_fft.py index 1def58e1f226..af98f24f9538 100644 --- a/maths/radix2_fft.py +++ b/maths/radix2_fft.py @@ -80,10 +80,7 @@ def __init__(self, poly_a=None, poly_b=None): # Discrete fourier transform of A and B def __dft(self, which): - if which == "A": - dft = [[x] for x in self.polyA] - else: - dft = [[x] for x in self.polyB] + dft = [[x] for x in self.polyA] if which == "A" else [[x] for x in self.polyB] # Corner case if len(dft) <= 1: return dft[0] diff --git a/neural_network/back_propagation_neural_network.py b/neural_network/back_propagation_neural_network.py index cb47b829010c..9dd112115f5e 100644 --- a/neural_network/back_propagation_neural_network.py +++ b/neural_network/back_propagation_neural_network.py @@ -153,6 +153,7 @@ def train(self, xdata, ydata, train_round, accuracy): if mse < self.accuracy: print("----达到精度----") return mse + return None def cal_loss(self, ydata, ydata_): self.loss = np.sum(np.power((ydata - ydata_), 2)) diff --git a/other/graham_scan.py b/other/graham_scan.py index 8e83bfcf4c49..2eadb4e56668 100644 --- a/other/graham_scan.py +++ b/other/graham_scan.py @@ -125,10 +125,9 @@ def graham_scan(points: list[tuple[int, int]]) -> list[tuple[int, int]]: miny = y minx = x minidx = i - if y == miny: - if x < minx: - minx = x - minidx = i + if y == miny and x < minx: + minx = x + minidx = i # remove the lowest and the most left point from points for preparing for sort points.pop(minidx) diff --git a/other/nested_brackets.py b/other/nested_brackets.py index 3f61a4e7006c..ea48c0a5f532 100644 --- a/other/nested_brackets.py +++ b/other/nested_brackets.py @@ -24,11 +24,10 @@ def is_balanced(s): if s[i] in open_brackets: stack.append(s[i]) - elif s[i] in closed_brackets: - if len(stack) == 0 or ( - len(stack) > 0 and open_to_closed[stack.pop()] != s[i] - ): - return False + elif s[i] in closed_brackets and ( + len(stack) == 0 or (len(stack) > 0 and open_to_closed[stack.pop()] != s[i]) + ): + return False return len(stack) == 0 diff --git a/physics/hubble_parameter.py b/physics/hubble_parameter.py index 6bc62e7131c5..f7b2d28a6716 100644 --- a/physics/hubble_parameter.py +++ b/physics/hubble_parameter.py @@ -70,10 +70,10 @@ def hubble_parameter( 68.3 """ parameters = [redshift, radiation_density, matter_density, dark_energy] - if any(0 > p for p in parameters): + if any(p < 0 for p in parameters): raise ValueError("All input parameters must be positive") - if any(1 < p for p in parameters[1:4]): + if any(p > 1 for p in parameters[1:4]): raise ValueError("Relative densities cannot be greater than one") else: curvature = 1 - (matter_density + radiation_density + dark_energy) diff --git a/project_euler/problem_005/sol1.py b/project_euler/problem_005/sol1.py index f272c102d2bb..01cbd0e15ff7 100644 --- a/project_euler/problem_005/sol1.py +++ b/project_euler/problem_005/sol1.py @@ -63,6 +63,7 @@ def solution(n: int = 20) -> int: if i == 0: i = 1 return i + return None if __name__ == "__main__": diff --git a/project_euler/problem_009/sol1.py b/project_euler/problem_009/sol1.py index 1d908402b6b1..e65c9b857990 100644 --- a/project_euler/problem_009/sol1.py +++ b/project_euler/problem_009/sol1.py @@ -32,9 +32,8 @@ def solution() -> int: for a in range(300): for b in range(a + 1, 400): for c in range(b + 1, 500): - if (a + b + c) == 1000: - if (a**2) + (b**2) == (c**2): - return a * b * c + if (a + b + c) == 1000 and (a**2) + (b**2) == (c**2): + return a * b * c return -1 diff --git a/project_euler/problem_014/sol2.py b/project_euler/problem_014/sol2.py index d2a1d9f0e468..2448e652ce5b 100644 --- a/project_euler/problem_014/sol2.py +++ b/project_euler/problem_014/sol2.py @@ -34,10 +34,7 @@ def collatz_sequence_length(n: int) -> int: """Returns the Collatz sequence length for n.""" if n in COLLATZ_SEQUENCE_LENGTHS: return COLLATZ_SEQUENCE_LENGTHS[n] - if n % 2 == 0: - next_n = n // 2 - else: - next_n = 3 * n + 1 + next_n = n // 2 if n % 2 == 0 else 3 * n + 1 sequence_length = collatz_sequence_length(next_n) + 1 COLLATZ_SEQUENCE_LENGTHS[n] = sequence_length return sequence_length diff --git a/project_euler/problem_018/solution.py b/project_euler/problem_018/solution.py index 82fc3ce3c9db..70306148bb9e 100644 --- a/project_euler/problem_018/solution.py +++ b/project_euler/problem_018/solution.py @@ -48,14 +48,8 @@ def solution(): for i in range(1, len(a)): for j in range(len(a[i])): - if j != len(a[i - 1]): - number1 = a[i - 1][j] - else: - number1 = 0 - if j > 0: - number2 = a[i - 1][j - 1] - else: - number2 = 0 + number1 = a[i - 1][j] if j != len(a[i - 1]) else 0 + number2 = a[i - 1][j - 1] if j > 0 else 0 a[i][j] += max(number1, number2) return max(a[-1]) diff --git a/project_euler/problem_019/sol1.py b/project_euler/problem_019/sol1.py index ab59365843b2..0e38137d4f01 100644 --- a/project_euler/problem_019/sol1.py +++ b/project_euler/problem_019/sol1.py @@ -39,7 +39,7 @@ def solution(): while year < 2001: day += 7 - if (year % 4 == 0 and not year % 100 == 0) or (year % 400 == 0): + if (year % 4 == 0 and year % 100 != 0) or (year % 400 == 0): if day > days_per_month[month - 1] and month != 2: month += 1 day = day - days_per_month[month - 2] diff --git a/project_euler/problem_033/sol1.py b/project_euler/problem_033/sol1.py index e0c9a058af53..32be424b6a7b 100644 --- a/project_euler/problem_033/sol1.py +++ b/project_euler/problem_033/sol1.py @@ -20,11 +20,9 @@ def is_digit_cancelling(num: int, den: int) -> bool: - if num != den: - if num % 10 == den // 10: - if (num // 10) / (den % 10) == num / den: - return True - return False + return ( + num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den + ) def fraction_list(digit_len: int) -> list[str]: diff --git a/project_euler/problem_064/sol1.py b/project_euler/problem_064/sol1.py index 81ebcc7b73c3..12769decc62f 100644 --- a/project_euler/problem_064/sol1.py +++ b/project_euler/problem_064/sol1.py @@ -67,9 +67,8 @@ def solution(n: int = 10000) -> int: count_odd_periods = 0 for i in range(2, n + 1): sr = sqrt(i) - if sr - floor(sr) != 0: - if continuous_fraction_period(i) % 2 == 1: - count_odd_periods += 1 + if sr - floor(sr) != 0 and continuous_fraction_period(i) % 2 == 1: + count_odd_periods += 1 return count_odd_periods diff --git a/project_euler/problem_067/sol1.py b/project_euler/problem_067/sol1.py index f20c206cca11..2b41fedc6784 100644 --- a/project_euler/problem_067/sol1.py +++ b/project_euler/problem_067/sol1.py @@ -37,14 +37,8 @@ def solution(): for i in range(1, len(a)): for j in range(len(a[i])): - if j != len(a[i - 1]): - number1 = a[i - 1][j] - else: - number1 = 0 - if j > 0: - number2 = a[i - 1][j - 1] - else: - number2 = 0 + number1 = a[i - 1][j] if j != len(a[i - 1]) else 0 + number2 = a[i - 1][j - 1] if j > 0 else 0 a[i][j] += max(number1, number2) return max(a[-1]) diff --git a/project_euler/problem_109/sol1.py b/project_euler/problem_109/sol1.py index 852f001d38af..ef145dda590b 100644 --- a/project_euler/problem_109/sol1.py +++ b/project_euler/problem_109/sol1.py @@ -65,7 +65,7 @@ def solution(limit: int = 100) -> int: >>> solution(50) 12577 """ - singles: list[int] = list(range(1, 21)) + [25] + singles: list[int] = [*list(range(1, 21)), 25] doubles: list[int] = [2 * x for x in range(1, 21)] + [50] triples: list[int] = [3 * x for x in range(1, 21)] all_values: list[int] = singles + doubles + triples + [0] diff --git a/project_euler/problem_203/sol1.py b/project_euler/problem_203/sol1.py index 713b530b6af2..da9436246a7c 100644 --- a/project_euler/problem_203/sol1.py +++ b/project_euler/problem_203/sol1.py @@ -50,8 +50,8 @@ def get_pascal_triangle_unique_coefficients(depth: int) -> set[int]: coefficients = {1} previous_coefficients = [1] for _ in range(2, depth + 1): - coefficients_begins_one = previous_coefficients + [0] - coefficients_ends_one = [0] + previous_coefficients + coefficients_begins_one = [*previous_coefficients, 0] + coefficients_ends_one = [0, *previous_coefficients] previous_coefficients = [] for x, y in zip(coefficients_begins_one, coefficients_ends_one): coefficients.add(x + y) diff --git a/scheduling/shortest_job_first.py b/scheduling/shortest_job_first.py index b3f81bfd10e7..871de8207308 100644 --- a/scheduling/shortest_job_first.py +++ b/scheduling/shortest_job_first.py @@ -36,12 +36,11 @@ def calculate_waitingtime( # Process until all processes are completed while complete != no_of_processes: for j in range(no_of_processes): - if arrival_time[j] <= increment_time: - if remaining_time[j] > 0: - if remaining_time[j] < minm: - minm = remaining_time[j] - short = j - check = True + if arrival_time[j] <= increment_time and remaining_time[j] > 0: + if remaining_time[j] < minm: + minm = remaining_time[j] + short = j + check = True if not check: increment_time += 1 diff --git a/scripts/build_directory_md.py b/scripts/build_directory_md.py index 7572ce342720..b95be9ebc254 100755 --- a/scripts/build_directory_md.py +++ b/scripts/build_directory_md.py @@ -21,9 +21,8 @@ def md_prefix(i): def print_path(old_path: str, new_path: str) -> str: old_parts = old_path.split(os.sep) for i, new_part in enumerate(new_path.split(os.sep)): - if i + 1 > len(old_parts) or old_parts[i] != new_part: - if new_part: - print(f"{md_prefix(i)} {new_part.replace('_', ' ').title()}") + if (i + 1 > len(old_parts) or old_parts[i] != new_part) and new_part: + print(f"{md_prefix(i)} {new_part.replace('_', ' ').title()}") return new_path diff --git a/searches/binary_tree_traversal.py b/searches/binary_tree_traversal.py index 66814b47883d..76e80df25a13 100644 --- a/searches/binary_tree_traversal.py +++ b/searches/binary_tree_traversal.py @@ -37,6 +37,7 @@ def build_tree(): right_node = TreeNode(int(check)) node_found.right = right_node q.put(right_node) + return None def pre_order(node: TreeNode) -> None: diff --git a/sorts/circle_sort.py b/sorts/circle_sort.py index da3c59059516..271fa1e8d58a 100644 --- a/sorts/circle_sort.py +++ b/sorts/circle_sort.py @@ -58,14 +58,13 @@ def circle_sort_util(collection: list, low: int, high: int) -> bool: left += 1 right -= 1 - if left == right: - if collection[left] > collection[right + 1]: - collection[left], collection[right + 1] = ( - collection[right + 1], - collection[left], - ) + if left == right and collection[left] > collection[right + 1]: + collection[left], collection[right + 1] = ( + collection[right + 1], + collection[left], + ) - swapped = True + swapped = True mid = low + int((high - low) / 2) left_swap = circle_sort_util(collection, low, mid) diff --git a/sorts/counting_sort.py b/sorts/counting_sort.py index 892ec5d5f344..18c4b0323dcb 100644 --- a/sorts/counting_sort.py +++ b/sorts/counting_sort.py @@ -66,7 +66,7 @@ def counting_sort_string(string): if __name__ == "__main__": # Test string sort - assert "eghhiiinrsssttt" == counting_sort_string("thisisthestring") + assert counting_sort_string("thisisthestring") == "eghhiiinrsssttt" user_input = input("Enter numbers separated by a comma:\n").strip() unsorted = [int(item) for item in user_input.split(",")] diff --git a/sorts/msd_radix_sort.py b/sorts/msd_radix_sort.py index 74ce21762906..03f84c75b9d8 100644 --- a/sorts/msd_radix_sort.py +++ b/sorts/msd_radix_sort.py @@ -147,7 +147,7 @@ def _msd_radix_sort_inplace( list_of_ints[i], list_of_ints[j] = list_of_ints[j], list_of_ints[i] j -= 1 - if not j == i: + if j != i: i += 1 _msd_radix_sort_inplace(list_of_ints, bit_position, begin_index, i) diff --git a/sorts/quick_sort.py b/sorts/quick_sort.py index 70cd19d7afe0..b79d3eac3e48 100644 --- a/sorts/quick_sort.py +++ b/sorts/quick_sort.py @@ -39,7 +39,7 @@ def quick_sort(collection: list) -> list: for element in collection[pivot_index + 1 :]: (greater if element > pivot else lesser).append(element) - return quick_sort(lesser) + [pivot] + quick_sort(greater) + return [*quick_sort(lesser), pivot, *quick_sort(greater)] if __name__ == "__main__": diff --git a/sorts/recursive_quick_sort.py b/sorts/recursive_quick_sort.py index c28a14e37ebd..c29009aca673 100644 --- a/sorts/recursive_quick_sort.py +++ b/sorts/recursive_quick_sort.py @@ -9,11 +9,11 @@ def quick_sort(data: list) -> list: if len(data) <= 1: return data else: - return ( - quick_sort([e for e in data[1:] if e <= data[0]]) - + [data[0]] - + quick_sort([e for e in data[1:] if e > data[0]]) - ) + return [ + *quick_sort([e for e in data[1:] if e <= data[0]]), + data[0], + *quick_sort([e for e in data[1:] if e > data[0]]), + ] if __name__ == "__main__": diff --git a/sorts/tim_sort.py b/sorts/tim_sort.py index c90c7e80390b..138f11c71bcc 100644 --- a/sorts/tim_sort.py +++ b/sorts/tim_sort.py @@ -32,9 +32,9 @@ def merge(left, right): return left if left[0] < right[0]: - return [left[0]] + merge(left[1:], right) + return [left[0], *merge(left[1:], right)] - return [right[0]] + merge(left, right[1:]) + return [right[0], *merge(left, right[1:])] def tim_sort(lst): diff --git a/strings/autocomplete_using_trie.py b/strings/autocomplete_using_trie.py index 758260292a30..77a3050ab15f 100644 --- a/strings/autocomplete_using_trie.py +++ b/strings/autocomplete_using_trie.py @@ -27,10 +27,7 @@ def find_word(self, prefix: str) -> tuple | list: def _elements(self, d: dict) -> tuple: result = [] for c, v in d.items(): - if c == END: - sub_result = [" "] - else: - sub_result = [c + s for s in self._elements(v)] + sub_result = [" "] if c == END else [(c + s) for s in self._elements(v)] result.extend(sub_result) return tuple(result) diff --git a/strings/check_anagrams.py b/strings/check_anagrams.py index 0d2f8091a3f0..a364b98212ad 100644 --- a/strings/check_anagrams.py +++ b/strings/check_anagrams.py @@ -38,10 +38,7 @@ def check_anagrams(first_str: str, second_str: str) -> bool: count[first_str[i]] += 1 count[second_str[i]] -= 1 - for _count in count.values(): - if _count != 0: - return False - return True + return all(_count == 0 for _count in count.values()) if __name__ == "__main__": diff --git a/strings/is_palindrome.py b/strings/is_palindrome.py index 9bf2abd98486..406aa2e8d3c3 100644 --- a/strings/is_palindrome.py +++ b/strings/is_palindrome.py @@ -30,10 +30,7 @@ def is_palindrome(s: str) -> bool: # with the help of 1st index (i==n-i-1) # where n is length of string - for i in range(end): - if s[i] != s[n - i - 1]: - return False - return True + return all(s[i] == s[n - i - 1] for i in range(end)) if __name__ == "__main__": diff --git a/strings/snake_case_to_camel_pascal_case.py b/strings/snake_case_to_camel_pascal_case.py index eaabdcb87a0f..28a28b517a01 100644 --- a/strings/snake_case_to_camel_pascal_case.py +++ b/strings/snake_case_to_camel_pascal_case.py @@ -43,7 +43,7 @@ def snake_to_camel_case(input_str: str, use_pascal: bool = False) -> str: initial_word = "" if use_pascal else words[0] - return "".join([initial_word] + capitalized_words) + return "".join([initial_word, *capitalized_words]) if __name__ == "__main__": diff --git a/web_programming/convert_number_to_words.py b/web_programming/convert_number_to_words.py index 50612dec20dd..1e293df9660c 100644 --- a/web_programming/convert_number_to_words.py +++ b/web_programming/convert_number_to_words.py @@ -63,7 +63,7 @@ def convert(number: int) -> str: current = temp_num % 10 if counter % 2 == 0: addition = "" - if counter in placevalue.keys() and current != 0: + if counter in placevalue and current != 0: addition = placevalue[counter] if counter == 2: words = singles[current] + addition + words @@ -84,12 +84,12 @@ def convert(number: int) -> str: words = teens[number % 10] + words else: addition = "" - if counter in placevalue.keys(): + if counter in placevalue: addition = placevalue[counter] words = doubles[current] + addition + words else: addition = "" - if counter in placevalue.keys(): + if counter in placevalue: if current == 0 and ((temp_num % 100) // 10) == 0: addition = "" else: diff --git a/web_programming/instagram_crawler.py b/web_programming/instagram_crawler.py index 4536257a984e..0816cd181051 100644 --- a/web_programming/instagram_crawler.py +++ b/web_programming/instagram_crawler.py @@ -105,7 +105,7 @@ def test_instagram_user(username: str = "github") -> None: import os if os.environ.get("CI"): - return None # test failing on GitHub Actions + return # test failing on GitHub Actions instagram_user = InstagramUser(username) assert instagram_user.user_data assert isinstance(instagram_user.user_data, dict) diff --git a/web_programming/open_google_results.py b/web_programming/open_google_results.py index 2685bf62114d..f61e3666dd7e 100644 --- a/web_programming/open_google_results.py +++ b/web_programming/open_google_results.py @@ -7,10 +7,7 @@ from fake_useragent import UserAgent if __name__ == "__main__": - if len(argv) > 1: - query = "%20".join(argv[1:]) - else: - query = quote(str(input("Search: "))) + query = "%20".join(argv[1:]) if len(argv) > 1 else quote(str(input("Search: "))) print("Googling.....") From 069a14b1c55112bc4f4e08571fc3c2156bb69e5a Mon Sep 17 00:00:00 2001 From: Maxim Smolskiy Date: Thu, 2 Mar 2023 07:55:47 +0300 Subject: [PATCH 0771/1543] Add Project Euler problem 082 solution 1 (#6282) Update DIRECTORY.md --------- Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- DIRECTORY.md | 2 + project_euler/problem_082/__init__.py | 0 project_euler/problem_082/input.txt | 80 +++++++++++++++++++++++ project_euler/problem_082/sol1.py | 65 ++++++++++++++++++ project_euler/problem_082/test_matrix.txt | 5 ++ 5 files changed, 152 insertions(+) create mode 100644 project_euler/problem_082/__init__.py create mode 100644 project_euler/problem_082/input.txt create mode 100644 project_euler/problem_082/sol1.py create mode 100644 project_euler/problem_082/test_matrix.txt diff --git a/DIRECTORY.md b/DIRECTORY.md index a8786cc2591f..3d1bc967e4b5 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -918,6 +918,8 @@ * [Sol1](project_euler/problem_080/sol1.py) * Problem 081 * [Sol1](project_euler/problem_081/sol1.py) + * Problem 082 + * [Sol1](project_euler/problem_082/sol1.py) * Problem 085 * [Sol1](project_euler/problem_085/sol1.py) * Problem 086 diff --git a/project_euler/problem_082/__init__.py b/project_euler/problem_082/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/project_euler/problem_082/input.txt b/project_euler/problem_082/input.txt new file mode 100644 index 000000000000..f65322a7e541 --- /dev/null +++ b/project_euler/problem_082/input.txt @@ -0,0 +1,80 @@ +4445,2697,5115,718,2209,2212,654,4348,3079,6821,7668,3276,8874,4190,3785,2752,9473,7817,9137,496,7338,3434,7152,4355,4552,7917,7827,2460,2350,691,3514,5880,3145,7633,7199,3783,5066,7487,3285,1084,8985,760,872,8609,8051,1134,9536,5750,9716,9371,7619,5617,275,9721,2997,2698,1887,8825,6372,3014,2113,7122,7050,6775,5948,2758,1219,3539,348,7989,2735,9862,1263,8089,6401,9462,3168,2758,3748,5870 +1096,20,1318,7586,5167,2642,1443,5741,7621,7030,5526,4244,2348,4641,9827,2448,6918,5883,3737,300,7116,6531,567,5997,3971,6623,820,6148,3287,1874,7981,8424,7672,7575,6797,6717,1078,5008,4051,8795,5820,346,1851,6463,2117,6058,3407,8211,117,4822,1317,4377,4434,5925,8341,4800,1175,4173,690,8978,7470,1295,3799,8724,3509,9849,618,3320,7068,9633,2384,7175,544,6583,1908,9983,481,4187,9353,9377 +9607,7385,521,6084,1364,8983,7623,1585,6935,8551,2574,8267,4781,3834,2764,2084,2669,4656,9343,7709,2203,9328,8004,6192,5856,3555,2260,5118,6504,1839,9227,1259,9451,1388,7909,5733,6968,8519,9973,1663,5315,7571,3035,4325,4283,2304,6438,3815,9213,9806,9536,196,5542,6907,2475,1159,5820,9075,9470,2179,9248,1828,4592,9167,3713,4640,47,3637,309,7344,6955,346,378,9044,8635,7466,5036,9515,6385,9230 +7206,3114,7760,1094,6150,5182,7358,7387,4497,955,101,1478,7777,6966,7010,8417,6453,4955,3496,107,449,8271,131,2948,6185,784,5937,8001,6104,8282,4165,3642,710,2390,575,715,3089,6964,4217,192,5949,7006,715,3328,1152,66,8044,4319,1735,146,4818,5456,6451,4113,1063,4781,6799,602,1504,6245,6550,1417,1343,2363,3785,5448,4545,9371,5420,5068,4613,4882,4241,5043,7873,8042,8434,3939,9256,2187 +3620,8024,577,9997,7377,7682,1314,1158,6282,6310,1896,2509,5436,1732,9480,706,496,101,6232,7375,2207,2306,110,6772,3433,2878,8140,5933,8688,1399,2210,7332,6172,6403,7333,4044,2291,1790,2446,7390,8698,5723,3678,7104,1825,2040,140,3982,4905,4160,2200,5041,2512,1488,2268,1175,7588,8321,8078,7312,977,5257,8465,5068,3453,3096,1651,7906,253,9250,6021,8791,8109,6651,3412,345,4778,5152,4883,7505 +1074,5438,9008,2679,5397,5429,2652,3403,770,9188,4248,2493,4361,8327,9587,707,9525,5913,93,1899,328,2876,3604,673,8576,6908,7659,2544,3359,3883,5273,6587,3065,1749,3223,604,9925,6941,2823,8767,7039,3290,3214,1787,7904,3421,7137,9560,8451,2669,9219,6332,1576,5477,6755,8348,4164,4307,2984,4012,6629,1044,2874,6541,4942,903,1404,9125,5160,8836,4345,2581,460,8438,1538,5507,668,3352,2678,6942 +4295,1176,5596,1521,3061,9868,7037,7129,8933,6659,5947,5063,3653,9447,9245,2679,767,714,116,8558,163,3927,8779,158,5093,2447,5782,3967,1716,931,7772,8164,1117,9244,5783,7776,3846,8862,6014,2330,6947,1777,3112,6008,3491,1906,5952,314,4602,8994,5919,9214,3995,5026,7688,6809,5003,3128,2509,7477,110,8971,3982,8539,2980,4689,6343,5411,2992,5270,5247,9260,2269,7474,1042,7162,5206,1232,4556,4757 +510,3556,5377,1406,5721,4946,2635,7847,4251,8293,8281,6351,4912,287,2870,3380,3948,5322,3840,4738,9563,1906,6298,3234,8959,1562,6297,8835,7861,239,6618,1322,2553,2213,5053,5446,4402,6500,5182,8585,6900,5756,9661,903,5186,7687,5998,7997,8081,8955,4835,6069,2621,1581,732,9564,1082,1853,5442,1342,520,1737,3703,5321,4793,2776,1508,1647,9101,2499,6891,4336,7012,3329,3212,1442,9993,3988,4930,7706 +9444,3401,5891,9716,1228,7107,109,3563,2700,6161,5039,4992,2242,8541,7372,2067,1294,3058,1306,320,8881,5756,9326,411,8650,8824,5495,8282,8397,2000,1228,7817,2099,6473,3571,5994,4447,1299,5991,543,7874,2297,1651,101,2093,3463,9189,6872,6118,872,1008,1779,2805,9084,4048,2123,5877,55,3075,1737,9459,4535,6453,3644,108,5982,4437,5213,1340,6967,9943,5815,669,8074,1838,6979,9132,9315,715,5048 +3327,4030,7177,6336,9933,5296,2621,4785,2755,4832,2512,2118,2244,4407,2170,499,7532,9742,5051,7687,970,6924,3527,4694,5145,1306,2165,5940,2425,8910,3513,1909,6983,346,6377,4304,9330,7203,6605,3709,3346,970,369,9737,5811,4427,9939,3693,8436,5566,1977,3728,2399,3985,8303,2492,5366,9802,9193,7296,1033,5060,9144,2766,1151,7629,5169,5995,58,7619,7565,4208,1713,6279,3209,4908,9224,7409,1325,8540 +6882,1265,1775,3648,4690,959,5837,4520,5394,1378,9485,1360,4018,578,9174,2932,9890,3696,116,1723,1178,9355,7063,1594,1918,8574,7594,7942,1547,6166,7888,354,6932,4651,1010,7759,6905,661,7689,6092,9292,3845,9605,8443,443,8275,5163,7720,7265,6356,7779,1798,1754,5225,6661,1180,8024,5666,88,9153,1840,3508,1193,4445,2648,3538,6243,6375,8107,5902,5423,2520,1122,5015,6113,8859,9370,966,8673,2442 +7338,3423,4723,6533,848,8041,7921,8277,4094,5368,7252,8852,9166,2250,2801,6125,8093,5738,4038,9808,7359,9494,601,9116,4946,2702,5573,2921,9862,1462,1269,2410,4171,2709,7508,6241,7522,615,2407,8200,4189,5492,5649,7353,2590,5203,4274,710,7329,9063,956,8371,3722,4253,4785,1194,4828,4717,4548,940,983,2575,4511,2938,1827,2027,2700,1236,841,5760,1680,6260,2373,3851,1841,4968,1172,5179,7175,3509 +4420,1327,3560,2376,6260,2988,9537,4064,4829,8872,9598,3228,1792,7118,9962,9336,4368,9189,6857,1829,9863,6287,7303,7769,2707,8257,2391,2009,3975,4993,3068,9835,3427,341,8412,2134,4034,8511,6421,3041,9012,2983,7289,100,1355,7904,9186,6920,5856,2008,6545,8331,3655,5011,839,8041,9255,6524,3862,8788,62,7455,3513,5003,8413,3918,2076,7960,6108,3638,6999,3436,1441,4858,4181,1866,8731,7745,3744,1000 +356,8296,8325,1058,1277,4743,3850,2388,6079,6462,2815,5620,8495,5378,75,4324,3441,9870,1113,165,1544,1179,2834,562,6176,2313,6836,8839,2986,9454,5199,6888,1927,5866,8760,320,1792,8296,7898,6121,7241,5886,5814,2815,8336,1576,4314,3109,2572,6011,2086,9061,9403,3947,5487,9731,7281,3159,1819,1334,3181,5844,5114,9898,4634,2531,4412,6430,4262,8482,4546,4555,6804,2607,9421,686,8649,8860,7794,6672 +9870,152,1558,4963,8750,4754,6521,6256,8818,5208,5691,9659,8377,9725,5050,5343,2539,6101,1844,9700,7750,8114,5357,3001,8830,4438,199,9545,8496,43,2078,327,9397,106,6090,8181,8646,6414,7499,5450,4850,6273,5014,4131,7639,3913,6571,8534,9703,4391,7618,445,1320,5,1894,6771,7383,9191,4708,9706,6939,7937,8726,9382,5216,3685,2247,9029,8154,1738,9984,2626,9438,4167,6351,5060,29,1218,1239,4785 +192,5213,8297,8974,4032,6966,5717,1179,6523,4679,9513,1481,3041,5355,9303,9154,1389,8702,6589,7818,6336,3539,5538,3094,6646,6702,6266,2759,4608,4452,617,9406,8064,6379,444,5602,4950,1810,8391,1536,316,8714,1178,5182,5863,5110,5372,4954,1978,2971,5680,4863,2255,4630,5723,2168,538,1692,1319,7540,440,6430,6266,7712,7385,5702,620,641,3136,7350,1478,3155,2820,9109,6261,1122,4470,14,8493,2095 +1046,4301,6082,474,4974,7822,2102,5161,5172,6946,8074,9716,6586,9962,9749,5015,2217,995,5388,4402,7652,6399,6539,1349,8101,3677,1328,9612,7922,2879,231,5887,2655,508,4357,4964,3554,5930,6236,7384,4614,280,3093,9600,2110,7863,2631,6626,6620,68,1311,7198,7561,1768,5139,1431,221,230,2940,968,5283,6517,2146,1646,869,9402,7068,8645,7058,1765,9690,4152,2926,9504,2939,7504,6074,2944,6470,7859 +4659,736,4951,9344,1927,6271,8837,8711,3241,6579,7660,5499,5616,3743,5801,4682,9748,8796,779,1833,4549,8138,4026,775,4170,2432,4174,3741,7540,8017,2833,4027,396,811,2871,1150,9809,2719,9199,8504,1224,540,2051,3519,7982,7367,2761,308,3358,6505,2050,4836,5090,7864,805,2566,2409,6876,3361,8622,5572,5895,3280,441,7893,8105,1634,2929,274,3926,7786,6123,8233,9921,2674,5340,1445,203,4585,3837 +5759,338,7444,7968,7742,3755,1591,4839,1705,650,7061,2461,9230,9391,9373,2413,1213,431,7801,4994,2380,2703,6161,6878,8331,2538,6093,1275,5065,5062,2839,582,1014,8109,3525,1544,1569,8622,7944,2905,6120,1564,1839,5570,7579,1318,2677,5257,4418,5601,7935,7656,5192,1864,5886,6083,5580,6202,8869,1636,7907,4759,9082,5854,3185,7631,6854,5872,5632,5280,1431,2077,9717,7431,4256,8261,9680,4487,4752,4286 +1571,1428,8599,1230,7772,4221,8523,9049,4042,8726,7567,6736,9033,2104,4879,4967,6334,6716,3994,1269,8995,6539,3610,7667,6560,6065,874,848,4597,1711,7161,4811,6734,5723,6356,6026,9183,2586,5636,1092,7779,7923,8747,6887,7505,9909,1792,3233,4526,3176,1508,8043,720,5212,6046,4988,709,5277,8256,3642,1391,5803,1468,2145,3970,6301,7767,2359,8487,9771,8785,7520,856,1605,8972,2402,2386,991,1383,5963 +1822,4824,5957,6511,9868,4113,301,9353,6228,2881,2966,6956,9124,9574,9233,1601,7340,973,9396,540,4747,8590,9535,3650,7333,7583,4806,3593,2738,8157,5215,8472,2284,9473,3906,6982,5505,6053,7936,6074,7179,6688,1564,1103,6860,5839,2022,8490,910,7551,7805,881,7024,1855,9448,4790,1274,3672,2810,774,7623,4223,4850,6071,9975,4935,1915,9771,6690,3846,517,463,7624,4511,614,6394,3661,7409,1395,8127 +8738,3850,9555,3695,4383,2378,87,6256,6740,7682,9546,4255,6105,2000,1851,4073,8957,9022,6547,5189,2487,303,9602,7833,1628,4163,6678,3144,8589,7096,8913,5823,4890,7679,1212,9294,5884,2972,3012,3359,7794,7428,1579,4350,7246,4301,7779,7790,3294,9547,4367,3549,1958,8237,6758,3497,3250,3456,6318,1663,708,7714,6143,6890,3428,6853,9334,7992,591,6449,9786,1412,8500,722,5468,1371,108,3939,4199,2535 +7047,4323,1934,5163,4166,461,3544,2767,6554,203,6098,2265,9078,2075,4644,6641,8412,9183,487,101,7566,5622,1975,5726,2920,5374,7779,5631,3753,3725,2672,3621,4280,1162,5812,345,8173,9785,1525,955,5603,2215,2580,5261,2765,2990,5979,389,3907,2484,1232,5933,5871,3304,1138,1616,5114,9199,5072,7442,7245,6472,4760,6359,9053,7876,2564,9404,3043,9026,2261,3374,4460,7306,2326,966,828,3274,1712,3446 +3975,4565,8131,5800,4570,2306,8838,4392,9147,11,3911,7118,9645,4994,2028,6062,5431,2279,8752,2658,7836,994,7316,5336,7185,3289,1898,9689,2331,5737,3403,1124,2679,3241,7748,16,2724,5441,6640,9368,9081,5618,858,4969,17,2103,6035,8043,7475,2181,939,415,1617,8500,8253,2155,7843,7974,7859,1746,6336,3193,2617,8736,4079,6324,6645,8891,9396,5522,6103,1857,8979,3835,2475,1310,7422,610,8345,7615 +9248,5397,5686,2988,3446,4359,6634,9141,497,9176,6773,7448,1907,8454,916,1596,2241,1626,1384,2741,3649,5362,8791,7170,2903,2475,5325,6451,924,3328,522,90,4813,9737,9557,691,2388,1383,4021,1609,9206,4707,5200,7107,8104,4333,9860,5013,1224,6959,8527,1877,4545,7772,6268,621,4915,9349,5970,706,9583,3071,4127,780,8231,3017,9114,3836,7503,2383,1977,4870,8035,2379,9704,1037,3992,3642,1016,4303 +5093,138,4639,6609,1146,5565,95,7521,9077,2272,974,4388,2465,2650,722,4998,3567,3047,921,2736,7855,173,2065,4238,1048,5,6847,9548,8632,9194,5942,4777,7910,8971,6279,7253,2516,1555,1833,3184,9453,9053,6897,7808,8629,4877,1871,8055,4881,7639,1537,7701,2508,7564,5845,5023,2304,5396,3193,2955,1088,3801,6203,1748,3737,1276,13,4120,7715,8552,3047,2921,106,7508,304,1280,7140,2567,9135,5266 +6237,4607,7527,9047,522,7371,4883,2540,5867,6366,5301,1570,421,276,3361,527,6637,4861,2401,7522,5808,9371,5298,2045,5096,5447,7755,5115,7060,8529,4078,1943,1697,1764,5453,7085,960,2405,739,2100,5800,728,9737,5704,5693,1431,8979,6428,673,7540,6,7773,5857,6823,150,5869,8486,684,5816,9626,7451,5579,8260,3397,5322,6920,1879,2127,2884,5478,4977,9016,6165,6292,3062,5671,5968,78,4619,4763 +9905,7127,9390,5185,6923,3721,9164,9705,4341,1031,1046,5127,7376,6528,3248,4941,1178,7889,3364,4486,5358,9402,9158,8600,1025,874,1839,1783,309,9030,1843,845,8398,1433,7118,70,8071,2877,3904,8866,6722,4299,10,1929,5897,4188,600,1889,3325,2485,6473,4474,7444,6992,4846,6166,4441,2283,2629,4352,7775,1101,2214,9985,215,8270,9750,2740,8361,7103,5930,8664,9690,8302,9267,344,2077,1372,1880,9550 +5825,8517,7769,2405,8204,1060,3603,7025,478,8334,1997,3692,7433,9101,7294,7498,9415,5452,3850,3508,6857,9213,6807,4412,7310,854,5384,686,4978,892,8651,3241,2743,3801,3813,8588,6701,4416,6990,6490,3197,6838,6503,114,8343,5844,8646,8694,65,791,5979,2687,2621,2019,8097,1423,3644,9764,4921,3266,3662,5561,2476,8271,8138,6147,1168,3340,1998,9874,6572,9873,6659,5609,2711,3931,9567,4143,7833,8887 +6223,2099,2700,589,4716,8333,1362,5007,2753,2848,4441,8397,7192,8191,4916,9955,6076,3370,6396,6971,3156,248,3911,2488,4930,2458,7183,5455,170,6809,6417,3390,1956,7188,577,7526,2203,968,8164,479,8699,7915,507,6393,4632,1597,7534,3604,618,3280,6061,9793,9238,8347,568,9645,2070,5198,6482,5000,9212,6655,5961,7513,1323,3872,6170,3812,4146,2736,67,3151,5548,2781,9679,7564,5043,8587,1893,4531 +5826,3690,6724,2121,9308,6986,8106,6659,2142,1642,7170,2877,5757,6494,8026,6571,8387,9961,6043,9758,9607,6450,8631,8334,7359,5256,8523,2225,7487,1977,9555,8048,5763,2414,4948,4265,2427,8978,8088,8841,9208,9601,5810,9398,8866,9138,4176,5875,7212,3272,6759,5678,7649,4922,5422,1343,8197,3154,3600,687,1028,4579,2084,9467,4492,7262,7296,6538,7657,7134,2077,1505,7332,6890,8964,4879,7603,7400,5973,739 +1861,1613,4879,1884,7334,966,2000,7489,2123,4287,1472,3263,4726,9203,1040,4103,6075,6049,330,9253,4062,4268,1635,9960,577,1320,3195,9628,1030,4092,4979,6474,6393,2799,6967,8687,7724,7392,9927,2085,3200,6466,8702,265,7646,8665,7986,7266,4574,6587,612,2724,704,3191,8323,9523,3002,704,5064,3960,8209,2027,2758,8393,4875,4641,9584,6401,7883,7014,768,443,5490,7506,1852,2005,8850,5776,4487,4269 +4052,6687,4705,7260,6645,6715,3706,5504,8672,2853,1136,8187,8203,4016,871,1809,1366,4952,9294,5339,6872,2645,6083,7874,3056,5218,7485,8796,7401,3348,2103,426,8572,4163,9171,3176,948,7654,9344,3217,1650,5580,7971,2622,76,2874,880,2034,9929,1546,2659,5811,3754,7096,7436,9694,9960,7415,2164,953,2360,4194,2397,1047,2196,6827,575,784,2675,8821,6802,7972,5996,6699,2134,7577,2887,1412,4349,4380 +4629,2234,6240,8132,7592,3181,6389,1214,266,1910,2451,8784,2790,1127,6932,1447,8986,2492,5476,397,889,3027,7641,5083,5776,4022,185,3364,5701,2442,2840,4160,9525,4828,6602,2614,7447,3711,4505,7745,8034,6514,4907,2605,7753,6958,7270,6936,3006,8968,439,2326,4652,3085,3425,9863,5049,5361,8688,297,7580,8777,7916,6687,8683,7141,306,9569,2384,1500,3346,4601,7329,9040,6097,2727,6314,4501,4974,2829 +8316,4072,2025,6884,3027,1808,5714,7624,7880,8528,4205,8686,7587,3230,1139,7273,6163,6986,3914,9309,1464,9359,4474,7095,2212,7302,2583,9462,7532,6567,1606,4436,8981,5612,6796,4385,5076,2007,6072,3678,8331,1338,3299,8845,4783,8613,4071,1232,6028,2176,3990,2148,3748,103,9453,538,6745,9110,926,3125,473,5970,8728,7072,9062,1404,1317,5139,9862,6496,6062,3338,464,1600,2532,1088,8232,7739,8274,3873 +2341,523,7096,8397,8301,6541,9844,244,4993,2280,7689,4025,4196,5522,7904,6048,2623,9258,2149,9461,6448,8087,7245,1917,8340,7127,8466,5725,6996,3421,5313,512,9164,9837,9794,8369,4185,1488,7210,1524,1016,4620,9435,2478,7765,8035,697,6677,3724,6988,5853,7662,3895,9593,1185,4727,6025,5734,7665,3070,138,8469,6748,6459,561,7935,8646,2378,462,7755,3115,9690,8877,3946,2728,8793,244,6323,8666,4271 +6430,2406,8994,56,1267,3826,9443,7079,7579,5232,6691,3435,6718,5698,4144,7028,592,2627,217,734,6194,8156,9118,58,2640,8069,4127,3285,694,3197,3377,4143,4802,3324,8134,6953,7625,3598,3584,4289,7065,3434,2106,7132,5802,7920,9060,7531,3321,1725,1067,3751,444,5503,6785,7937,6365,4803,198,6266,8177,1470,6390,1606,2904,7555,9834,8667,2033,1723,5167,1666,8546,8152,473,4475,6451,7947,3062,3281 +2810,3042,7759,1741,2275,2609,7676,8640,4117,1958,7500,8048,1757,3954,9270,1971,4796,2912,660,5511,3553,1012,5757,4525,6084,7198,8352,5775,7726,8591,7710,9589,3122,4392,6856,5016,749,2285,3356,7482,9956,7348,2599,8944,495,3462,3578,551,4543,7207,7169,7796,1247,4278,6916,8176,3742,8385,2310,1345,8692,2667,4568,1770,8319,3585,4920,3890,4928,7343,5385,9772,7947,8786,2056,9266,3454,2807,877,2660 +6206,8252,5928,5837,4177,4333,207,7934,5581,9526,8906,1498,8411,2984,5198,5134,2464,8435,8514,8674,3876,599,5327,826,2152,4084,2433,9327,9697,4800,2728,3608,3849,3861,3498,9943,1407,3991,7191,9110,5666,8434,4704,6545,5944,2357,1163,4995,9619,6754,4200,9682,6654,4862,4744,5953,6632,1054,293,9439,8286,2255,696,8709,1533,1844,6441,430,1999,6063,9431,7018,8057,2920,6266,6799,356,3597,4024,6665 +3847,6356,8541,7225,2325,2946,5199,469,5450,7508,2197,9915,8284,7983,6341,3276,3321,16,1321,7608,5015,3362,8491,6968,6818,797,156,2575,706,9516,5344,5457,9210,5051,8099,1617,9951,7663,8253,9683,2670,1261,4710,1068,8753,4799,1228,2621,3275,6188,4699,1791,9518,8701,5932,4275,6011,9877,2933,4182,6059,2930,6687,6682,9771,654,9437,3169,8596,1827,5471,8909,2352,123,4394,3208,8756,5513,6917,2056 +5458,8173,3138,3290,4570,4892,3317,4251,9699,7973,1163,1935,5477,6648,9614,5655,9592,975,9118,2194,7322,8248,8413,3462,8560,1907,7810,6650,7355,2939,4973,6894,3933,3784,3200,2419,9234,4747,2208,2207,1945,2899,1407,6145,8023,3484,5688,7686,2737,3828,3704,9004,5190,9740,8643,8650,5358,4426,1522,1707,3613,9887,6956,2447,2762,833,1449,9489,2573,1080,4167,3456,6809,2466,227,7125,2759,6250,6472,8089 +3266,7025,9756,3914,1265,9116,7723,9788,6805,5493,2092,8688,6592,9173,4431,4028,6007,7131,4446,4815,3648,6701,759,3312,8355,4485,4187,5188,8746,7759,3528,2177,5243,8379,3838,7233,4607,9187,7216,2190,6967,2920,6082,7910,5354,3609,8958,6949,7731,494,8753,8707,1523,4426,3543,7085,647,6771,9847,646,5049,824,8417,5260,2730,5702,2513,9275,4279,2767,8684,1165,9903,4518,55,9682,8963,6005,2102,6523 +1998,8731,936,1479,5259,7064,4085,91,7745,7136,3773,3810,730,8255,2705,2653,9790,6807,2342,355,9344,2668,3690,2028,9679,8102,574,4318,6481,9175,5423,8062,2867,9657,7553,3442,3920,7430,3945,7639,3714,3392,2525,4995,4850,2867,7951,9667,486,9506,9888,781,8866,1702,3795,90,356,1483,4200,2131,6969,5931,486,6880,4404,1084,5169,4910,6567,8335,4686,5043,2614,3352,2667,4513,6472,7471,5720,1616 +8878,1613,1716,868,1906,2681,564,665,5995,2474,7496,3432,9491,9087,8850,8287,669,823,347,6194,2264,2592,7871,7616,8508,4827,760,2676,4660,4881,7572,3811,9032,939,4384,929,7525,8419,5556,9063,662,8887,7026,8534,3111,1454,2082,7598,5726,6687,9647,7608,73,3014,5063,670,5461,5631,3367,9796,8475,7908,5073,1565,5008,5295,4457,1274,4788,1728,338,600,8415,8535,9351,7750,6887,5845,1741,125 +3637,6489,9634,9464,9055,2413,7824,9517,7532,3577,7050,6186,6980,9365,9782,191,870,2497,8498,2218,2757,5420,6468,586,3320,9230,1034,1393,9886,5072,9391,1178,8464,8042,6869,2075,8275,3601,7715,9470,8786,6475,8373,2159,9237,2066,3264,5000,679,355,3069,4073,494,2308,5512,4334,9438,8786,8637,9774,1169,1949,6594,6072,4270,9158,7916,5752,6794,9391,6301,5842,3285,2141,3898,8027,4310,8821,7079,1307 +8497,6681,4732,7151,7060,5204,9030,7157,833,5014,8723,3207,9796,9286,4913,119,5118,7650,9335,809,3675,2597,5144,3945,5090,8384,187,4102,1260,2445,2792,4422,8389,9290,50,1765,1521,6921,8586,4368,1565,5727,7855,2003,4834,9897,5911,8630,5070,1330,7692,7557,7980,6028,5805,9090,8265,3019,3802,698,9149,5748,1965,9658,4417,5994,5584,8226,2937,272,5743,1278,5698,8736,2595,6475,5342,6596,1149,6920 +8188,8009,9546,6310,8772,2500,9846,6592,6872,3857,1307,8125,7042,1544,6159,2330,643,4604,7899,6848,371,8067,2062,3200,7295,1857,9505,6936,384,2193,2190,301,8535,5503,1462,7380,5114,4824,8833,1763,4974,8711,9262,6698,3999,2645,6937,7747,1128,2933,3556,7943,2885,3122,9105,5447,418,2899,5148,3699,9021,9501,597,4084,175,1621,1,1079,6067,5812,4326,9914,6633,5394,4233,6728,9084,1864,5863,1225 +9935,8793,9117,1825,9542,8246,8437,3331,9128,9675,6086,7075,319,1334,7932,3583,7167,4178,1726,7720,695,8277,7887,6359,5912,1719,2780,8529,1359,2013,4498,8072,1129,9998,1147,8804,9405,6255,1619,2165,7491,1,8882,7378,3337,503,5758,4109,3577,985,3200,7615,8058,5032,1080,6410,6873,5496,1466,2412,9885,5904,4406,3605,8770,4361,6205,9193,1537,9959,214,7260,9566,1685,100,4920,7138,9819,5637,976 +3466,9854,985,1078,7222,8888,5466,5379,3578,4540,6853,8690,3728,6351,7147,3134,6921,9692,857,3307,4998,2172,5783,3931,9417,2541,6299,13,787,2099,9131,9494,896,8600,1643,8419,7248,2660,2609,8579,91,6663,5506,7675,1947,6165,4286,1972,9645,3805,1663,1456,8853,5705,9889,7489,1107,383,4044,2969,3343,152,7805,4980,9929,5033,1737,9953,7197,9158,4071,1324,473,9676,3984,9680,3606,8160,7384,5432 +1005,4512,5186,3953,2164,3372,4097,3247,8697,3022,9896,4101,3871,6791,3219,2742,4630,6967,7829,5991,6134,1197,1414,8923,8787,1394,8852,5019,7768,5147,8004,8825,5062,9625,7988,1110,3992,7984,9966,6516,6251,8270,421,3723,1432,4830,6935,8095,9059,2214,6483,6846,3120,1587,6201,6691,9096,9627,6671,4002,3495,9939,7708,7465,5879,6959,6634,3241,3401,2355,9061,2611,7830,3941,2177,2146,5089,7079,519,6351 +7280,8586,4261,2831,7217,3141,9994,9940,5462,2189,4005,6942,9848,5350,8060,6665,7519,4324,7684,657,9453,9296,2944,6843,7499,7847,1728,9681,3906,6353,5529,2822,3355,3897,7724,4257,7489,8672,4356,3983,1948,6892,7415,4153,5893,4190,621,1736,4045,9532,7701,3671,1211,1622,3176,4524,9317,7800,5638,6644,6943,5463,3531,2821,1347,5958,3436,1438,2999,994,850,4131,2616,1549,3465,5946,690,9273,6954,7991 +9517,399,3249,2596,7736,2142,1322,968,7350,1614,468,3346,3265,7222,6086,1661,5317,2582,7959,4685,2807,2917,1037,5698,1529,3972,8716,2634,3301,3412,8621,743,8001,4734,888,7744,8092,3671,8941,1487,5658,7099,2781,99,1932,4443,4756,4652,9328,1581,7855,4312,5976,7255,6480,3996,2748,1973,9731,4530,2790,9417,7186,5303,3557,351,7182,9428,1342,9020,7599,1392,8304,2070,9138,7215,2008,9937,1106,7110 +7444,769,9688,632,1571,6820,8743,4338,337,3366,3073,1946,8219,104,4210,6986,249,5061,8693,7960,6546,1004,8857,5997,9352,4338,6105,5008,2556,6518,6694,4345,3727,7956,20,3954,8652,4424,9387,2035,8358,5962,5304,5194,8650,8282,1256,1103,2138,6679,1985,3653,2770,2433,4278,615,2863,1715,242,3790,2636,6998,3088,1671,2239,957,5411,4595,6282,2881,9974,2401,875,7574,2987,4587,3147,6766,9885,2965 +3287,3016,3619,6818,9073,6120,5423,557,2900,2015,8111,3873,1314,4189,1846,4399,7041,7583,2427,2864,3525,5002,2069,748,1948,6015,2684,438,770,8367,1663,7887,7759,1885,157,7770,4520,4878,3857,1137,3525,3050,6276,5569,7649,904,4533,7843,2199,5648,7628,9075,9441,3600,7231,2388,5640,9096,958,3058,584,5899,8150,1181,9616,1098,8162,6819,8171,1519,1140,7665,8801,2632,1299,9192,707,9955,2710,7314 +1772,2963,7578,3541,3095,1488,7026,2634,6015,4633,4370,2762,1650,2174,909,8158,2922,8467,4198,4280,9092,8856,8835,5457,2790,8574,9742,5054,9547,4156,7940,8126,9824,7340,8840,6574,3547,1477,3014,6798,7134,435,9484,9859,3031,4,1502,4133,1738,1807,4825,463,6343,9701,8506,9822,9555,8688,8168,3467,3234,6318,1787,5591,419,6593,7974,8486,9861,6381,6758,194,3061,4315,2863,4665,3789,2201,1492,4416 +126,8927,6608,5682,8986,6867,1715,6076,3159,788,3140,4744,830,9253,5812,5021,7616,8534,1546,9590,1101,9012,9821,8132,7857,4086,1069,7491,2988,1579,2442,4321,2149,7642,6108,250,6086,3167,24,9528,7663,2685,1220,9196,1397,5776,1577,1730,5481,977,6115,199,6326,2183,3767,5928,5586,7561,663,8649,9688,949,5913,9160,1870,5764,9887,4477,6703,1413,4995,5494,7131,2192,8969,7138,3997,8697,646,1028 +8074,1731,8245,624,4601,8706,155,8891,309,2552,8208,8452,2954,3124,3469,4246,3352,1105,4509,8677,9901,4416,8191,9283,5625,7120,2952,8881,7693,830,4580,8228,9459,8611,4499,1179,4988,1394,550,2336,6089,6872,269,7213,1848,917,6672,4890,656,1478,6536,3165,4743,4990,1176,6211,7207,5284,9730,4738,1549,4986,4942,8645,3698,9429,1439,2175,6549,3058,6513,1574,6988,8333,3406,5245,5431,7140,7085,6407 +7845,4694,2530,8249,290,5948,5509,1588,5940,4495,5866,5021,4626,3979,3296,7589,4854,1998,5627,3926,8346,6512,9608,1918,7070,4747,4182,2858,2766,4606,6269,4107,8982,8568,9053,4244,5604,102,2756,727,5887,2566,7922,44,5986,621,1202,374,6988,4130,3627,6744,9443,4568,1398,8679,397,3928,9159,367,2917,6127,5788,3304,8129,911,2669,1463,9749,264,4478,8940,1109,7309,2462,117,4692,7724,225,2312 +4164,3637,2000,941,8903,39,3443,7172,1031,3687,4901,8082,4945,4515,7204,9310,9349,9535,9940,218,1788,9245,2237,1541,5670,6538,6047,5553,9807,8101,1925,8714,445,8332,7309,6830,5786,5736,7306,2710,3034,1838,7969,6318,7912,2584,2080,7437,6705,2254,7428,820,782,9861,7596,3842,3631,8063,5240,6666,394,4565,7865,4895,9890,6028,6117,4724,9156,4473,4552,602,470,6191,4927,5387,884,3146,1978,3000 +4258,6880,1696,3582,5793,4923,2119,1155,9056,9698,6603,3768,5514,9927,9609,6166,6566,4536,4985,4934,8076,9062,6741,6163,7399,4562,2337,5600,2919,9012,8459,1308,6072,1225,9306,8818,5886,7243,7365,8792,6007,9256,6699,7171,4230,7002,8720,7839,4533,1671,478,7774,1607,2317,5437,4705,7886,4760,6760,7271,3081,2997,3088,7675,6208,3101,6821,6840,122,9633,4900,2067,8546,4549,2091,7188,5605,8599,6758,5229 +7854,5243,9155,3556,8812,7047,2202,1541,5993,4600,4760,713,434,7911,7426,7414,8729,322,803,7960,7563,4908,6285,6291,736,3389,9339,4132,8701,7534,5287,3646,592,3065,7582,2592,8755,6068,8597,1982,5782,1894,2900,6236,4039,6569,3037,5837,7698,700,7815,2491,7272,5878,3083,6778,6639,3589,5010,8313,2581,6617,5869,8402,6808,2951,2321,5195,497,2190,6187,1342,1316,4453,7740,4154,2959,1781,1482,8256 +7178,2046,4419,744,8312,5356,6855,8839,319,2962,5662,47,6307,8662,68,4813,567,2712,9931,1678,3101,8227,6533,4933,6656,92,5846,4780,6256,6361,4323,9985,1231,2175,7178,3034,9744,6155,9165,7787,5836,9318,7860,9644,8941,6480,9443,8188,5928,161,6979,2352,5628,6991,1198,8067,5867,6620,3778,8426,2994,3122,3124,6335,3918,8897,2655,9670,634,1088,1576,8935,7255,474,8166,7417,9547,2886,5560,3842 +6957,3111,26,7530,7143,1295,1744,6057,3009,1854,8098,5405,2234,4874,9447,2620,9303,27,7410,969,40,2966,5648,7596,8637,4238,3143,3679,7187,690,9980,7085,7714,9373,5632,7526,6707,3951,9734,4216,2146,3602,5371,6029,3039,4433,4855,4151,1449,3376,8009,7240,7027,4602,2947,9081,4045,8424,9352,8742,923,2705,4266,3232,2264,6761,363,2651,3383,7770,6730,7856,7340,9679,2158,610,4471,4608,910,6241 +4417,6756,1013,8797,658,8809,5032,8703,7541,846,3357,2920,9817,1745,9980,7593,4667,3087,779,3218,6233,5568,4296,2289,2654,7898,5021,9461,5593,8214,9173,4203,2271,7980,2983,5952,9992,8399,3468,1776,3188,9314,1720,6523,2933,621,8685,5483,8986,6163,3444,9539,4320,155,3992,2828,2150,6071,524,2895,5468,8063,1210,3348,9071,4862,483,9017,4097,6186,9815,3610,5048,1644,1003,9865,9332,2145,1944,2213 +9284,3803,4920,1927,6706,4344,7383,4786,9890,2010,5228,1224,3158,6967,8580,8990,8883,5213,76,8306,2031,4980,5639,9519,7184,5645,7769,3259,8077,9130,1317,3096,9624,3818,1770,695,2454,947,6029,3474,9938,3527,5696,4760,7724,7738,2848,6442,5767,6845,8323,4131,2859,7595,2500,4815,3660,9130,8580,7016,8231,4391,8369,3444,4069,4021,556,6154,627,2778,1496,4206,6356,8434,8491,3816,8231,3190,5575,1015 +3787,7572,1788,6803,5641,6844,1961,4811,8535,9914,9999,1450,8857,738,4662,8569,6679,2225,7839,8618,286,2648,5342,2294,3205,4546,176,8705,3741,6134,8324,8021,7004,5205,7032,6637,9442,5539,5584,4819,5874,5807,8589,6871,9016,983,1758,3786,1519,6241,185,8398,495,3370,9133,3051,4549,9674,7311,9738,3316,9383,2658,2776,9481,7558,619,3943,3324,6491,4933,153,9738,4623,912,3595,7771,7939,1219,4405 +2650,3883,4154,5809,315,7756,4430,1788,4451,1631,6461,7230,6017,5751,138,588,5282,2442,9110,9035,6349,2515,1570,6122,4192,4174,3530,1933,4186,4420,4609,5739,4135,2963,6308,1161,8809,8619,2796,3819,6971,8228,4188,1492,909,8048,2328,6772,8467,7671,9068,2226,7579,6422,7056,8042,3296,2272,3006,2196,7320,3238,3490,3102,37,1293,3212,4767,5041,8773,5794,4456,6174,7279,7054,2835,7053,9088,790,6640 +3101,1057,7057,3826,6077,1025,2955,1224,1114,6729,5902,4698,6239,7203,9423,1804,4417,6686,1426,6941,8071,1029,4985,9010,6122,6597,1622,1574,3513,1684,7086,5505,3244,411,9638,4150,907,9135,829,981,1707,5359,8781,9751,5,9131,3973,7159,1340,6955,7514,7993,6964,8198,1933,2797,877,3993,4453,8020,9349,8646,2779,8679,2961,3547,3374,3510,1129,3568,2241,2625,9138,5974,8206,7669,7678,1833,8700,4480 +4865,9912,8038,8238,782,3095,8199,1127,4501,7280,2112,2487,3626,2790,9432,1475,6312,8277,4827,2218,5806,7132,8752,1468,7471,6386,739,8762,8323,8120,5169,9078,9058,3370,9560,7987,8585,8531,5347,9312,1058,4271,1159,5286,5404,6925,8606,9204,7361,2415,560,586,4002,2644,1927,2824,768,4409,2942,3345,1002,808,4941,6267,7979,5140,8643,7553,9438,7320,4938,2666,4609,2778,8158,6730,3748,3867,1866,7181 +171,3771,7134,8927,4778,2913,3326,2004,3089,7853,1378,1729,4777,2706,9578,1360,5693,3036,1851,7248,2403,2273,8536,6501,9216,613,9671,7131,7719,6425,773,717,8803,160,1114,7554,7197,753,4513,4322,8499,4533,2609,4226,8710,6627,644,9666,6260,4870,5744,7385,6542,6203,7703,6130,8944,5589,2262,6803,6381,7414,6888,5123,7320,9392,9061,6780,322,8975,7050,5089,1061,2260,3199,1150,1865,5386,9699,6501 +3744,8454,6885,8277,919,1923,4001,6864,7854,5519,2491,6057,8794,9645,1776,5714,9786,9281,7538,6916,3215,395,2501,9618,4835,8846,9708,2813,3303,1794,8309,7176,2206,1602,1838,236,4593,2245,8993,4017,10,8215,6921,5206,4023,5932,6997,7801,262,7640,3107,8275,4938,7822,2425,3223,3886,2105,8700,9526,2088,8662,8034,7004,5710,2124,7164,3574,6630,9980,4242,2901,9471,1491,2117,4562,1130,9086,4117,6698 +2810,2280,2331,1170,4554,4071,8387,1215,2274,9848,6738,1604,7281,8805,439,1298,8318,7834,9426,8603,6092,7944,1309,8828,303,3157,4638,4439,9175,1921,4695,7716,1494,1015,1772,5913,1127,1952,1950,8905,4064,9890,385,9357,7945,5035,7082,5369,4093,6546,5187,5637,2041,8946,1758,7111,6566,1027,1049,5148,7224,7248,296,6169,375,1656,7993,2816,3717,4279,4675,1609,3317,42,6201,3100,3144,163,9530,4531 +7096,6070,1009,4988,3538,5801,7149,3063,2324,2912,7911,7002,4338,7880,2481,7368,3516,2016,7556,2193,1388,3865,8125,4637,4096,8114,750,3144,1938,7002,9343,4095,1392,4220,3455,6969,9647,1321,9048,1996,1640,6626,1788,314,9578,6630,2813,6626,4981,9908,7024,4355,3201,3521,3864,3303,464,1923,595,9801,3391,8366,8084,9374,1041,8807,9085,1892,9431,8317,9016,9221,8574,9981,9240,5395,2009,6310,2854,9255 +8830,3145,2960,9615,8220,6061,3452,2918,6481,9278,2297,3385,6565,7066,7316,5682,107,7646,4466,68,1952,9603,8615,54,7191,791,6833,2560,693,9733,4168,570,9127,9537,1925,8287,5508,4297,8452,8795,6213,7994,2420,4208,524,5915,8602,8330,2651,8547,6156,1812,6271,7991,9407,9804,1553,6866,1128,2119,4691,9711,8315,5879,9935,6900,482,682,4126,1041,428,6247,3720,5882,7526,2582,4327,7725,3503,2631 +2738,9323,721,7434,1453,6294,2957,3786,5722,6019,8685,4386,3066,9057,6860,499,5315,3045,5194,7111,3137,9104,941,586,3066,755,4177,8819,7040,5309,3583,3897,4428,7788,4721,7249,6559,7324,825,7311,3760,6064,6070,9672,4882,584,1365,9739,9331,5783,2624,7889,1604,1303,1555,7125,8312,425,8936,3233,7724,1480,403,7440,1784,1754,4721,1569,652,3893,4574,5692,9730,4813,9844,8291,9199,7101,3391,8914 +6044,2928,9332,3328,8588,447,3830,1176,3523,2705,8365,6136,5442,9049,5526,8575,8869,9031,7280,706,2794,8814,5767,4241,7696,78,6570,556,5083,1426,4502,3336,9518,2292,1885,3740,3153,9348,9331,8051,2759,5407,9028,7840,9255,831,515,2612,9747,7435,8964,4971,2048,4900,5967,8271,1719,9670,2810,6777,1594,6367,6259,8316,3815,1689,6840,9437,4361,822,9619,3065,83,6344,7486,8657,8228,9635,6932,4864 +8478,4777,6334,4678,7476,4963,6735,3096,5860,1405,5127,7269,7793,4738,227,9168,2996,8928,765,733,1276,7677,6258,1528,9558,3329,302,8901,1422,8277,6340,645,9125,8869,5952,141,8141,1816,9635,4025,4184,3093,83,2344,2747,9352,7966,1206,1126,1826,218,7939,2957,2729,810,8752,5247,4174,4038,8884,7899,9567,301,5265,5752,7524,4381,1669,3106,8270,6228,6373,754,2547,4240,2313,5514,3022,1040,9738 +2265,8192,1763,1369,8469,8789,4836,52,1212,6690,5257,8918,6723,6319,378,4039,2421,8555,8184,9577,1432,7139,8078,5452,9628,7579,4161,7490,5159,8559,1011,81,478,5840,1964,1334,6875,8670,9900,739,1514,8692,522,9316,6955,1345,8132,2277,3193,9773,3923,4177,2183,1236,6747,6575,4874,6003,6409,8187,745,8776,9440,7543,9825,2582,7381,8147,7236,5185,7564,6125,218,7991,6394,391,7659,7456,5128,5294 +2132,8992,8160,5782,4420,3371,3798,5054,552,5631,7546,4716,1332,6486,7892,7441,4370,6231,4579,2121,8615,1145,9391,1524,1385,2400,9437,2454,7896,7467,2928,8400,3299,4025,7458,4703,7206,6358,792,6200,725,4275,4136,7390,5984,4502,7929,5085,8176,4600,119,3568,76,9363,6943,2248,9077,9731,6213,5817,6729,4190,3092,6910,759,2682,8380,1254,9604,3011,9291,5329,9453,9746,2739,6522,3765,5634,1113,5789 +5304,5499,564,2801,679,2653,1783,3608,7359,7797,3284,796,3222,437,7185,6135,8571,2778,7488,5746,678,6140,861,7750,803,9859,9918,2425,3734,2698,9005,4864,9818,6743,2475,132,9486,3825,5472,919,292,4411,7213,7699,6435,9019,6769,1388,802,2124,1345,8493,9487,8558,7061,8777,8833,2427,2238,5409,4957,8503,3171,7622,5779,6145,2417,5873,5563,5693,9574,9491,1937,7384,4563,6842,5432,2751,3406,7981 diff --git a/project_euler/problem_082/sol1.py b/project_euler/problem_082/sol1.py new file mode 100644 index 000000000000..7b50dc887719 --- /dev/null +++ b/project_euler/problem_082/sol1.py @@ -0,0 +1,65 @@ +""" +Project Euler Problem 82: https://projecteuler.net/problem=82 + +The minimal path sum in the 5 by 5 matrix below, by starting in any cell +in the left column and finishing in any cell in the right column, +and only moving up, down, and right, is indicated in red and bold; +the sum is equal to 994. + + 131 673 [234] [103] [18] + [201] [96] [342] 965 150 + 630 803 746 422 111 + 537 699 497 121 956 + 805 732 524 37 331 + +Find the minimal path sum from the left column to the right column in matrix.txt +(https://projecteuler.net/project/resources/p082_matrix.txt) +(right click and "Save Link/Target As..."), +a 31K text file containing an 80 by 80 matrix. +""" + +import os + + +def solution(filename: str = "input.txt") -> int: + """ + Returns the minimal path sum in the matrix from the file, by starting in any cell + in the left column and finishing in any cell in the right column, + and only moving up, down, and right + + >>> solution("test_matrix.txt") + 994 + """ + + with open(os.path.join(os.path.dirname(__file__), filename)) as input_file: + matrix = [ + [int(element) for element in line.split(",")] + for line in input_file.readlines() + ] + + rows = len(matrix) + cols = len(matrix[0]) + + minimal_path_sums = [[-1 for _ in range(cols)] for _ in range(rows)] + for i in range(rows): + minimal_path_sums[i][0] = matrix[i][0] + + for j in range(1, cols): + for i in range(rows): + minimal_path_sums[i][j] = minimal_path_sums[i][j - 1] + matrix[i][j] + + for i in range(1, rows): + minimal_path_sums[i][j] = min( + minimal_path_sums[i][j], minimal_path_sums[i - 1][j] + matrix[i][j] + ) + + for i in range(rows - 2, -1, -1): + minimal_path_sums[i][j] = min( + minimal_path_sums[i][j], minimal_path_sums[i + 1][j] + matrix[i][j] + ) + + return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums) + + +if __name__ == "__main__": + print(f"{solution() = }") diff --git a/project_euler/problem_082/test_matrix.txt b/project_euler/problem_082/test_matrix.txt new file mode 100644 index 000000000000..76167d9e7fc1 --- /dev/null +++ b/project_euler/problem_082/test_matrix.txt @@ -0,0 +1,5 @@ +131,673,234,103,18 +201,96,342,965,150 +630,803,746,422,111 +537,699,497,121,956 +805,732,524,37,331 From ee778128bdf8d4d6d386cfdc500f3b3173f56c06 Mon Sep 17 00:00:00 2001 From: Maxim Smolskiy Date: Thu, 2 Mar 2023 07:57:07 +0300 Subject: [PATCH 0772/1543] Reduce the complexity of other/scoring_algorithm.py (#8045) * Increase the --max-complexity threshold in the file .flake8 --- other/scoring_algorithm.py | 57 ++++++++++++++++++++++++++++---------- 1 file changed, 43 insertions(+), 14 deletions(-) diff --git a/other/scoring_algorithm.py b/other/scoring_algorithm.py index 00d87cfc0b73..8e04a8f30dd7 100644 --- a/other/scoring_algorithm.py +++ b/other/scoring_algorithm.py @@ -23,29 +23,29 @@ """ -def procentual_proximity( - source_data: list[list[float]], weights: list[int] -) -> list[list[float]]: +def get_data(source_data: list[list[float]]) -> list[list[float]]: """ - weights - int list - possible values - 0 / 1 - 0 if lower values have higher weight in the data set - 1 if higher values have higher weight in the data set - - >>> procentual_proximity([[20, 60, 2012],[23, 90, 2015],[22, 50, 2011]], [0, 0, 1]) - [[20, 60, 2012, 2.0], [23, 90, 2015, 1.0], [22, 50, 2011, 1.3333333333333335]] + >>> get_data([[20, 60, 2012],[23, 90, 2015],[22, 50, 2011]]) + [[20.0, 23.0, 22.0], [60.0, 90.0, 50.0], [2012.0, 2015.0, 2011.0]] """ - - # getting data data_lists: list[list[float]] = [] for data in source_data: for i, el in enumerate(data): if len(data_lists) < i + 1: data_lists.append([]) data_lists[i].append(float(el)) + return data_lists + +def calculate_each_score( + data_lists: list[list[float]], weights: list[int] +) -> list[list[float]]: + """ + >>> calculate_each_score([[20, 23, 22], [60, 90, 50], [2012, 2015, 2011]], + ... [0, 0, 1]) + [[1.0, 0.0, 0.33333333333333337], [0.75, 0.0, 1.0], [0.25, 1.0, 0.0]] + """ score_lists: list[list[float]] = [] - # calculating each score for dlist, weight in zip(data_lists, weights): mind = min(dlist) maxd = max(dlist) @@ -72,14 +72,43 @@ def procentual_proximity( score_lists.append(score) + return score_lists + + +def generate_final_scores(score_lists: list[list[float]]) -> list[float]: + """ + >>> generate_final_scores([[1.0, 0.0, 0.33333333333333337], + ... [0.75, 0.0, 1.0], + ... [0.25, 1.0, 0.0]]) + [2.0, 1.0, 1.3333333333333335] + """ # initialize final scores final_scores: list[float] = [0 for i in range(len(score_lists[0]))] - # generate final scores for slist in score_lists: for j, ele in enumerate(slist): final_scores[j] = final_scores[j] + ele + return final_scores + + +def procentual_proximity( + source_data: list[list[float]], weights: list[int] +) -> list[list[float]]: + """ + weights - int list + possible values - 0 / 1 + 0 if lower values have higher weight in the data set + 1 if higher values have higher weight in the data set + + >>> procentual_proximity([[20, 60, 2012],[23, 90, 2015],[22, 50, 2011]], [0, 0, 1]) + [[20, 60, 2012, 2.0], [23, 90, 2015, 1.0], [22, 50, 2011, 1.3333333333333335]] + """ + + data_lists = get_data(source_data) + score_lists = calculate_each_score(data_lists, weights) + final_scores = generate_final_scores(score_lists) + # append scores to source data for i, ele in enumerate(final_scores): source_data[i].append(ele) From 9720e6a6cf52e2395e2d7ef3ef6ae91a355d318e Mon Sep 17 00:00:00 2001 From: Maxim Smolskiy Date: Thu, 2 Mar 2023 19:51:48 +0300 Subject: [PATCH 0773/1543] Add Project Euler problem 117 solution 1 (#6872) Update DIRECTORY.md --------- Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- DIRECTORY.md | 2 + project_euler/problem_117/__init__.py | 0 project_euler/problem_117/sol1.py | 53 +++++++++++++++++++++++++++ 3 files changed, 55 insertions(+) create mode 100644 project_euler/problem_117/__init__.py create mode 100644 project_euler/problem_117/sol1.py diff --git a/DIRECTORY.md b/DIRECTORY.md index 3d1bc967e4b5..4844841040d9 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -956,6 +956,8 @@ * [Sol1](project_euler/problem_115/sol1.py) * Problem 116 * [Sol1](project_euler/problem_116/sol1.py) + * Problem 117 + * [Sol1](project_euler/problem_117/sol1.py) * Problem 119 * [Sol1](project_euler/problem_119/sol1.py) * Problem 120 diff --git a/project_euler/problem_117/__init__.py b/project_euler/problem_117/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/project_euler/problem_117/sol1.py b/project_euler/problem_117/sol1.py new file mode 100644 index 000000000000..e8214454fac5 --- /dev/null +++ b/project_euler/problem_117/sol1.py @@ -0,0 +1,53 @@ +""" +Project Euler Problem 117: https://projecteuler.net/problem=117 + +Using a combination of grey square tiles and oblong tiles chosen from: +red tiles (measuring two units), green tiles (measuring three units), +and blue tiles (measuring four units), +it is possible to tile a row measuring five units in length +in exactly fifteen different ways. + + |grey|grey|grey|grey|grey| |red,red|grey|grey|grey| + + |grey|red,red|grey|grey| |grey|grey|red,red|grey| + + |grey|grey|grey|red,red| |red,red|red,red|grey| + + |red,red|grey|red,red| |grey|red,red|red,red| + + |green,green,green|grey|grey| |grey|green,green,green|grey| + + |grey|grey|green,green,green| |red,red|green,green,green| + + |green,green,green|red,red| |blue,blue,blue,blue|grey| + + |grey|blue,blue,blue,blue| + +How many ways can a row measuring fifty units in length be tiled? + +NOTE: This is related to Problem 116 (https://projecteuler.net/problem=116). +""" + + +def solution(length: int = 50) -> int: + """ + Returns the number of ways can a row of the given length be tiled + + >>> solution(5) + 15 + """ + + ways_number = [1] * (length + 1) + + for row_length in range(length + 1): + for tile_length in range(2, 5): + for tile_start in range(row_length - tile_length + 1): + ways_number[row_length] += ways_number[ + row_length - tile_start - tile_length + ] + + return ways_number[length] + + +if __name__ == "__main__": + print(f"{solution() = }") From 41b633a841084acac5a640042d365c985e23b357 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Tue, 7 Mar 2023 00:10:39 +0100 Subject: [PATCH 0774/1543] [pre-commit.ci] pre-commit autoupdate (#8168) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * [pre-commit.ci] pre-commit autoupdate updates: - [github.com/charliermarsh/ruff-pre-commit: v0.0.253 → v0.0.254](https://github.com/charliermarsh/ruff-pre-commit/compare/v0.0.253...v0.0.254) * Rename get_top_billionaires.py to get_top_billionaires.py.disabled * updating DIRECTORY.md --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- .pre-commit-config.yaml | 2 +- DIRECTORY.md | 1 - ...get_top_billionaires.py => get_top_billionaires.py.disabled} | 0 3 files changed, 1 insertion(+), 2 deletions(-) rename web_programming/{get_top_billionaires.py => get_top_billionaires.py.disabled} (100%) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 9f27f985bb6a..329407265a5a 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -44,7 +44,7 @@ repos: - --py311-plus - repo: https://github.com/charliermarsh/ruff-pre-commit - rev: v0.0.253 + rev: v0.0.254 hooks: - id: ruff args: diff --git a/DIRECTORY.md b/DIRECTORY.md index 4844841040d9..f25b0c6ff4e3 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -1167,7 +1167,6 @@ * [Get Amazon Product Data](web_programming/get_amazon_product_data.py) * [Get Imdb Top 250 Movies Csv](web_programming/get_imdb_top_250_movies_csv.py) * [Get Imdbtop](web_programming/get_imdbtop.py) - * [Get Top Billionaires](web_programming/get_top_billionaires.py) * [Get Top Hn Posts](web_programming/get_top_hn_posts.py) * [Get User Tweets](web_programming/get_user_tweets.py) * [Giphy](web_programming/giphy.py) diff --git a/web_programming/get_top_billionaires.py b/web_programming/get_top_billionaires.py.disabled similarity index 100% rename from web_programming/get_top_billionaires.py rename to web_programming/get_top_billionaires.py.disabled From 9e28ecca28176254c39bcc791733589c6091422e Mon Sep 17 00:00:00 2001 From: Subhendu Dash <71781104+subhendudash02@users.noreply.github.com> Date: Tue, 7 Mar 2023 21:46:25 +0530 Subject: [PATCH 0775/1543] Add circular convolution (#8158) * add circular convolution * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * add type hint for __init__ * rounding off final values to 2 and minor changes * add test case for unequal signals * changes in list comprehension and enumeraton --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- electronics/circular_convolution.py | 99 +++++++++++++++++++++++++++++ 1 file changed, 99 insertions(+) create mode 100644 electronics/circular_convolution.py diff --git a/electronics/circular_convolution.py b/electronics/circular_convolution.py new file mode 100644 index 000000000000..f2e35742e944 --- /dev/null +++ b/electronics/circular_convolution.py @@ -0,0 +1,99 @@ +# https://en.wikipedia.org/wiki/Circular_convolution + +""" +Circular convolution, also known as cyclic convolution, +is a special case of periodic convolution, which is the convolution of two +periodic functions that have the same period. Periodic convolution arises, +for example, in the context of the discrete-time Fourier transform (DTFT). +In particular, the DTFT of the product of two discrete sequences is the periodic +convolution of the DTFTs of the individual sequences. And each DTFT is a periodic +summation of a continuous Fourier transform function. + +Source: https://en.wikipedia.org/wiki/Circular_convolution +""" + +import doctest +from collections import deque + +import numpy as np + + +class CircularConvolution: + """ + This class stores the first and second signal and performs the circular convolution + """ + + def __init__(self) -> None: + """ + First signal and second signal are stored as 1-D array + """ + + self.first_signal = [2, 1, 2, -1] + self.second_signal = [1, 2, 3, 4] + + def circular_convolution(self) -> list[float]: + """ + This function performs the circular convolution of the first and second signal + using matrix method + + Usage: + >>> import circular_convolution as cc + >>> convolution = cc.CircularConvolution() + >>> convolution.circular_convolution() + [10, 10, 6, 14] + + >>> convolution.first_signal = [0.2, 0.4, 0.6, 0.8, 1.0, 1.2, 1.4, 1.6] + >>> convolution.second_signal = [0.1, 0.3, 0.5, 0.7, 0.9, 1.1, 1.3, 1.5] + >>> convolution.circular_convolution() + [5.2, 6.0, 6.48, 6.64, 6.48, 6.0, 5.2, 4.08] + + >>> convolution.first_signal = [-1, 1, 2, -2] + >>> convolution.second_signal = [0.5, 1, -1, 2, 0.75] + >>> convolution.circular_convolution() + [6.25, -3.0, 1.5, -2.0, -2.75] + + >>> convolution.first_signal = [1, -1, 2, 3, -1] + >>> convolution.second_signal = [1, 2, 3] + >>> convolution.circular_convolution() + [8, -2, 3, 4, 11] + + """ + + length_first_signal = len(self.first_signal) + length_second_signal = len(self.second_signal) + + max_length = max(length_first_signal, length_second_signal) + + # create a zero matrix of max_length x max_length + matrix = [[0] * max_length for i in range(max_length)] + + # fills the smaller signal with zeros to make both signals of same length + if length_first_signal < length_second_signal: + self.first_signal += [0] * (max_length - length_first_signal) + elif length_first_signal > length_second_signal: + self.second_signal += [0] * (max_length - length_second_signal) + + """ + Fills the matrix in the following way assuming 'x' is the signal of length 4 + [ + [x[0], x[3], x[2], x[1]], + [x[1], x[0], x[3], x[2]], + [x[2], x[1], x[0], x[3]], + [x[3], x[2], x[1], x[0]] + ] + """ + for i in range(max_length): + rotated_signal = deque(self.second_signal) + rotated_signal.rotate(i) + for j, item in enumerate(rotated_signal): + matrix[i][j] += item + + # multiply the matrix with the first signal + final_signal = np.matmul(np.transpose(matrix), np.transpose(self.first_signal)) + + # rounding-off to two decimal places + return [round(i, 2) for i in final_signal] + + +if __name__ == "__main__": + doctest.testmod() From f9cc25221c1521a0da9ee27d6a9bea1f14f4c986 Mon Sep 17 00:00:00 2001 From: Maxim Smolskiy Date: Fri, 10 Mar 2023 12:48:05 +0300 Subject: [PATCH 0776/1543] Reduce the complexity of backtracking/word_search.py (#8166) * Lower the --max-complexity threshold in the file .flake8 --- backtracking/word_search.py | 112 +++++++++++++++++++----------------- 1 file changed, 60 insertions(+), 52 deletions(-) diff --git a/backtracking/word_search.py b/backtracking/word_search.py index 25d1436be36e..c9d52012b42b 100644 --- a/backtracking/word_search.py +++ b/backtracking/word_search.py @@ -33,6 +33,61 @@ """ +def get_point_key(len_board: int, len_board_column: int, row: int, column: int) -> int: + """ + Returns the hash key of matrix indexes. + + >>> get_point_key(10, 20, 1, 0) + 200 + """ + + return len_board * len_board_column * row + column + + +def exits_word( + board: list[list[str]], + word: str, + row: int, + column: int, + word_index: int, + visited_points_set: set[int], +) -> bool: + """ + Return True if it's possible to search the word suffix + starting from the word_index. + + >>> exits_word([["A"]], "B", 0, 0, 0, set()) + False + """ + + if board[row][column] != word[word_index]: + return False + + if word_index == len(word) - 1: + return True + + traverts_directions = [(0, 1), (0, -1), (-1, 0), (1, 0)] + len_board = len(board) + len_board_column = len(board[0]) + for direction in traverts_directions: + next_i = row + direction[0] + next_j = column + direction[1] + if not (0 <= next_i < len_board and 0 <= next_j < len_board_column): + continue + + key = get_point_key(len_board, len_board_column, next_i, next_j) + if key in visited_points_set: + continue + + visited_points_set.add(key) + if exits_word(board, word, next_i, next_j, word_index + 1, visited_points_set): + return True + + visited_points_set.remove(key) + + return False + + def word_exists(board: list[list[str]], word: str) -> bool: """ >>> word_exists([["A","B","C","E"],["S","F","C","S"],["A","D","E","E"]], "ABCCED") @@ -77,6 +132,8 @@ def word_exists(board: list[list[str]], word: str) -> bool: board_error_message = ( "The board should be a non empty matrix of single chars strings." ) + + len_board = len(board) if not isinstance(board, list) or len(board) == 0: raise ValueError(board_error_message) @@ -94,61 +151,12 @@ def word_exists(board: list[list[str]], word: str) -> bool: "The word parameter should be a string of length greater than 0." ) - traverts_directions = [(0, 1), (0, -1), (-1, 0), (1, 0)] - len_word = len(word) - len_board = len(board) len_board_column = len(board[0]) - - # Returns the hash key of matrix indexes. - def get_point_key(row: int, column: int) -> int: - """ - >>> len_board=10 - >>> len_board_column=20 - >>> get_point_key(0, 0) - 200 - """ - - return len_board * len_board_column * row + column - - # Return True if it's possible to search the word suffix - # starting from the word_index. - def exits_word( - row: int, column: int, word_index: int, visited_points_set: set[int] - ) -> bool: - """ - >>> board=[["A"]] - >>> word="B" - >>> exits_word(0, 0, 0, set()) - False - """ - - if board[row][column] != word[word_index]: - return False - - if word_index == len_word - 1: - return True - - for direction in traverts_directions: - next_i = row + direction[0] - next_j = column + direction[1] - if not (0 <= next_i < len_board and 0 <= next_j < len_board_column): - continue - - key = get_point_key(next_i, next_j) - if key in visited_points_set: - continue - - visited_points_set.add(key) - if exits_word(next_i, next_j, word_index + 1, visited_points_set): - return True - - visited_points_set.remove(key) - - return False - for i in range(len_board): for j in range(len_board_column): - if exits_word(i, j, 0, {get_point_key(i, j)}): + if exits_word( + board, word, i, j, 0, {get_point_key(len_board, len_board_column, i, j)} + ): return True return False From 8959211100ba7a612d42a6e7db4755303b78c5a7 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 13 Mar 2023 23:18:35 +0100 Subject: [PATCH 0777/1543] [pre-commit.ci] pre-commit autoupdate (#8177) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * [pre-commit.ci] pre-commit autoupdate updates: - [github.com/charliermarsh/ruff-pre-commit: v0.0.254 → v0.0.255](https://github.com/charliermarsh/ruff-pre-commit/compare/v0.0.254...v0.0.255) - [github.com/pre-commit/mirrors-mypy: v1.0.1 → v1.1.1](https://github.com/pre-commit/mirrors-mypy/compare/v1.0.1...v1.1.1) - [github.com/codespell-project/codespell: v2.2.2 → v2.2.4](https://github.com/codespell-project/codespell/compare/v2.2.2...v2.2.4) * updating DIRECTORY.md * Fixes for new version of codespell --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> Co-authored-by: Christian Clauss --- .pre-commit-config.yaml | 8 ++++---- DIRECTORY.md | 1 + machine_learning/sequential_minimum_optimization.py | 2 +- physics/lorentz_transformation_four_vector.py | 2 +- 4 files changed, 7 insertions(+), 6 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 329407265a5a..9aa965e42aec 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -44,7 +44,7 @@ repos: - --py311-plus - repo: https://github.com/charliermarsh/ruff-pre-commit - rev: v0.0.254 + rev: v0.0.255 hooks: - id: ruff args: @@ -69,7 +69,7 @@ repos: *flake8-plugins - repo: https://github.com/pre-commit/mirrors-mypy - rev: v1.0.1 + rev: v1.1.1 hooks: - id: mypy args: @@ -79,11 +79,11 @@ repos: additional_dependencies: [types-requests] - repo: https://github.com/codespell-project/codespell - rev: v2.2.2 + rev: v2.2.4 hooks: - id: codespell args: - - --ignore-words-list=ans,crate,damon,fo,followings,hist,iff,mater,secant,som,sur,tim,zar + - --ignore-words-list=3rt,ans,crate,damon,fo,followings,hist,iff,kwanza,mater,secant,som,sur,tim,zar exclude: | (?x)^( ciphers/prehistoric_men.txt | diff --git a/DIRECTORY.md b/DIRECTORY.md index f25b0c6ff4e3..b2daaaa9c47d 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -334,6 +334,7 @@ ## Electronics * [Builtin Voltage](electronics/builtin_voltage.py) * [Carrier Concentration](electronics/carrier_concentration.py) + * [Circular Convolution](electronics/circular_convolution.py) * [Coulombs Law](electronics/coulombs_law.py) * [Electric Conductivity](electronics/electric_conductivity.py) * [Electric Power](electronics/electric_power.py) diff --git a/machine_learning/sequential_minimum_optimization.py b/machine_learning/sequential_minimum_optimization.py index 37172c8e9bf6..b68bd52f4de9 100644 --- a/machine_learning/sequential_minimum_optimization.py +++ b/machine_learning/sequential_minimum_optimization.py @@ -569,7 +569,7 @@ def plot_partition_boundary( """ We can not get the optimum w of our kernel svm model which is different from linear svm. For this reason, we generate randomly distributed points with high desity and - prediced values of these points are calculated by using our tained model. Then we + prediced values of these points are calculated by using our trained model. Then we could use this prediced values to draw contour map. And this contour map can represent svm's partition boundary. """ diff --git a/physics/lorentz_transformation_four_vector.py b/physics/lorentz_transformation_four_vector.py index 64be97245f29..f4fda4dff8cd 100644 --- a/physics/lorentz_transformation_four_vector.py +++ b/physics/lorentz_transformation_four_vector.py @@ -2,7 +2,7 @@ Lorentz transformations describe the transition between two inertial reference frames F and F', each of which is moving in some direction with respect to the other. This code only calculates Lorentz transformations for movement in the x -direction with no spacial rotation (i.e., a Lorentz boost in the x direction). +direction with no spatial rotation (i.e., a Lorentz boost in the x direction). The Lorentz transformations are calculated here as linear transformations of four-vectors [ct, x, y, z] described by Minkowski space. Note that t (time) is multiplied by c (the speed of light) in the first entry of each four-vector. From b797e437aeadcac50556d6606a547dc634cf5329 Mon Sep 17 00:00:00 2001 From: Andrey Date: Tue, 14 Mar 2023 01:31:27 +0100 Subject: [PATCH 0778/1543] Add hashmap implementation (#7967) --- data_structures/hashing/hash_map.py | 162 ++++++++++++++++++ .../hashing/tests/test_hash_map.py | 97 +++++++++++ 2 files changed, 259 insertions(+) create mode 100644 data_structures/hashing/hash_map.py create mode 100644 data_structures/hashing/tests/test_hash_map.py diff --git a/data_structures/hashing/hash_map.py b/data_structures/hashing/hash_map.py new file mode 100644 index 000000000000..1dfcc8bbf906 --- /dev/null +++ b/data_structures/hashing/hash_map.py @@ -0,0 +1,162 @@ +""" +Hash map with open addressing. + +https://en.wikipedia.org/wiki/Hash_table + +Another hash map implementation, with a good explanation. +Modern Dictionaries by Raymond Hettinger +https://www.youtube.com/watch?v=p33CVV29OG8 +""" +from collections.abc import Iterator, MutableMapping +from dataclasses import dataclass +from typing import Generic, TypeVar + +KEY = TypeVar("KEY") +VAL = TypeVar("VAL") + + +@dataclass(frozen=True, slots=True) +class _Item(Generic[KEY, VAL]): + key: KEY + val: VAL + + +class _DeletedItem(_Item): + def __init__(self) -> None: + super().__init__(None, None) + + def __bool__(self) -> bool: + return False + + +_deleted = _DeletedItem() + + +class HashMap(MutableMapping[KEY, VAL]): + """ + Hash map with open addressing. + """ + + def __init__( + self, initial_block_size: int = 8, capacity_factor: float = 0.75 + ) -> None: + self._initial_block_size = initial_block_size + self._buckets: list[_Item | None] = [None] * initial_block_size + assert 0.0 < capacity_factor < 1.0 + self._capacity_factor = capacity_factor + self._len = 0 + + def _get_bucket_index(self, key: KEY) -> int: + return hash(key) % len(self._buckets) + + def _get_next_ind(self, ind: int) -> int: + """ + Get next index. + + Implements linear open addressing. + """ + return (ind + 1) % len(self._buckets) + + def _try_set(self, ind: int, key: KEY, val: VAL) -> bool: + """ + Try to add value to the bucket. + + If bucket is empty or key is the same, does insert and return True. + + If bucket has another key or deleted placeholder, + that means that we need to check next bucket. + """ + stored = self._buckets[ind] + if not stored: + self._buckets[ind] = _Item(key, val) + self._len += 1 + return True + elif stored.key == key: + self._buckets[ind] = _Item(key, val) + return True + else: + return False + + def _is_full(self) -> bool: + """ + Return true if we have reached safe capacity. + + So we need to increase the number of buckets to avoid collisions. + """ + limit = len(self._buckets) * self._capacity_factor + return len(self) >= int(limit) + + def _is_sparse(self) -> bool: + """Return true if we need twice fewer buckets when we have now.""" + if len(self._buckets) <= self._initial_block_size: + return False + limit = len(self._buckets) * self._capacity_factor / 2 + return len(self) < limit + + def _resize(self, new_size: int) -> None: + old_buckets = self._buckets + self._buckets = [None] * new_size + self._len = 0 + for item in old_buckets: + if item: + self._add_item(item.key, item.val) + + def _size_up(self) -> None: + self._resize(len(self._buckets) * 2) + + def _size_down(self) -> None: + self._resize(len(self._buckets) // 2) + + def _iterate_buckets(self, key: KEY) -> Iterator[int]: + ind = self._get_bucket_index(key) + for _ in range(len(self._buckets)): + yield ind + ind = self._get_next_ind(ind) + + def _add_item(self, key: KEY, val: VAL) -> None: + for ind in self._iterate_buckets(key): + if self._try_set(ind, key, val): + break + + def __setitem__(self, key: KEY, val: VAL) -> None: + if self._is_full(): + self._size_up() + + self._add_item(key, val) + + def __delitem__(self, key: KEY) -> None: + for ind in self._iterate_buckets(key): + item = self._buckets[ind] + if item is None: + raise KeyError(key) + if item is _deleted: + continue + if item.key == key: + self._buckets[ind] = _deleted + self._len -= 1 + break + if self._is_sparse(): + self._size_down() + + def __getitem__(self, key: KEY) -> VAL: + for ind in self._iterate_buckets(key): + item = self._buckets[ind] + if item is None: + break + if item is _deleted: + continue + if item.key == key: + return item.val + raise KeyError(key) + + def __len__(self) -> int: + return self._len + + def __iter__(self) -> Iterator[KEY]: + yield from (item.key for item in self._buckets if item) + + def __repr__(self) -> str: + val_string = " ,".join( + f"{item.key}: {item.val}" for item in self._buckets if item + ) + return f"HashMap({val_string})" diff --git a/data_structures/hashing/tests/test_hash_map.py b/data_structures/hashing/tests/test_hash_map.py new file mode 100644 index 000000000000..929e67311996 --- /dev/null +++ b/data_structures/hashing/tests/test_hash_map.py @@ -0,0 +1,97 @@ +from operator import delitem, getitem, setitem + +import pytest + +from data_structures.hashing.hash_map import HashMap + + +def _get(k): + return getitem, k + + +def _set(k, v): + return setitem, k, v + + +def _del(k): + return delitem, k + + +def _run_operation(obj, fun, *args): + try: + return fun(obj, *args), None + except Exception as e: + return None, e + + +_add_items = ( + _set("key_a", "val_a"), + _set("key_b", "val_b"), +) + +_overwrite_items = [ + _set("key_a", "val_a"), + _set("key_a", "val_b"), +] + +_delete_items = [ + _set("key_a", "val_a"), + _set("key_b", "val_b"), + _del("key_a"), + _del("key_b"), + _set("key_a", "val_a"), + _del("key_a"), +] + +_access_absent_items = [ + _get("key_a"), + _del("key_a"), + _set("key_a", "val_a"), + _del("key_a"), + _del("key_a"), + _get("key_a"), +] + +_add_with_resize_up = [ + *[_set(x, x) for x in range(5)], # guaranteed upsize +] + +_add_with_resize_down = [ + *[_set(x, x) for x in range(5)], # guaranteed upsize + *[_del(x) for x in range(5)], + _set("key_a", "val_b"), +] + + +@pytest.mark.parametrize( + "operations", + ( + pytest.param(_add_items, id="add items"), + pytest.param(_overwrite_items, id="overwrite items"), + pytest.param(_delete_items, id="delete items"), + pytest.param(_access_absent_items, id="access absent items"), + pytest.param(_add_with_resize_up, id="add with resize up"), + pytest.param(_add_with_resize_down, id="add with resize down"), + ), +) +def test_hash_map_is_the_same_as_dict(operations): + my = HashMap(initial_block_size=4) + py = {} + for _, (fun, *args) in enumerate(operations): + my_res, my_exc = _run_operation(my, fun, *args) + py_res, py_exc = _run_operation(py, fun, *args) + assert my_res == py_res + assert str(my_exc) == str(py_exc) + assert set(py) == set(my) + assert len(py) == len(my) + assert set(my.items()) == set(py.items()) + + +def test_no_new_methods_was_added_to_api(): + def is_public(name: str) -> bool: + return not name.startswith("_") + + dict_public_names = {name for name in dir({}) if is_public(name)} + hash_public_names = {name for name in dir(HashMap()) if is_public(name)} + + assert dict_public_names > hash_public_names From 9701e459e884e883fc720277452ec592eae305d0 Mon Sep 17 00:00:00 2001 From: Maxim Smolskiy Date: Tue, 14 Mar 2023 08:39:36 +0300 Subject: [PATCH 0779/1543] Add Project Euler problem 100 solution 1 (#8175) Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- DIRECTORY.md | 2 ++ project_euler/problem_100/__init__.py | 0 project_euler/problem_100/sol1.py | 48 +++++++++++++++++++++++++++ 3 files changed, 50 insertions(+) create mode 100644 project_euler/problem_100/__init__.py create mode 100644 project_euler/problem_100/sol1.py diff --git a/DIRECTORY.md b/DIRECTORY.md index b2daaaa9c47d..e1ce44eedce1 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -937,6 +937,8 @@ * [Sol1](project_euler/problem_097/sol1.py) * Problem 099 * [Sol1](project_euler/problem_099/sol1.py) + * Problem 100 + * [Sol1](project_euler/problem_100/sol1.py) * Problem 101 * [Sol1](project_euler/problem_101/sol1.py) * Problem 102 diff --git a/project_euler/problem_100/__init__.py b/project_euler/problem_100/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/project_euler/problem_100/sol1.py b/project_euler/problem_100/sol1.py new file mode 100644 index 000000000000..367378e7ab17 --- /dev/null +++ b/project_euler/problem_100/sol1.py @@ -0,0 +1,48 @@ +""" +Project Euler Problem 100: https://projecteuler.net/problem=100 + +If a box contains twenty-one coloured discs, composed of fifteen blue discs and +six red discs, and two discs were taken at random, it can be seen that +the probability of taking two blue discs, P(BB) = (15/21) x (14/20) = 1/2. + +The next such arrangement, for which there is exactly 50% chance of taking two blue +discs at random, is a box containing eighty-five blue discs and thirty-five red discs. + +By finding the first arrangement to contain over 10^12 = 1,000,000,000,000 discs +in total, determine the number of blue discs that the box would contain. +""" + + +def solution(min_total: int = 10**12) -> int: + """ + Returns the number of blue discs for the first arrangement to contain + over min_total discs in total + + >>> solution(2) + 3 + + >>> solution(4) + 15 + + >>> solution(21) + 85 + """ + + prev_numerator = 1 + prev_denominator = 0 + + numerator = 1 + denominator = 1 + + while numerator <= 2 * min_total - 1: + prev_numerator += 2 * numerator + numerator += 2 * prev_numerator + + prev_denominator += 2 * denominator + denominator += 2 * prev_denominator + + return (denominator + 1) // 2 + + +if __name__ == "__main__": + print(f"{solution() = }") From 47b3c729826e864fb1d0a30b03cf95fa2adae591 Mon Sep 17 00:00:00 2001 From: David Leal Date: Mon, 13 Mar 2023 23:46:52 -0600 Subject: [PATCH 0780/1543] docs: add the other/miscellaneous form (#8163) Co-authored-by: Christian Clauss Co-authored-by: Dhruv Manilawala --- .github/ISSUE_TEMPLATE/other.yml | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) create mode 100644 .github/ISSUE_TEMPLATE/other.yml diff --git a/.github/ISSUE_TEMPLATE/other.yml b/.github/ISSUE_TEMPLATE/other.yml new file mode 100644 index 000000000000..44d6ff541506 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/other.yml @@ -0,0 +1,19 @@ +name: Other +description: Use this for any other issues. PLEASE do not create blank issues +labels: ["awaiting triage"] +body: + - type: textarea + id: issuedescription + attributes: + label: What would you like to share? + description: Provide a clear and concise explanation of your issue. + validations: + required: true + + - type: textarea + id: extrainfo + attributes: + label: Additional information + description: Is there anything else we should know about this issue? + validations: + required: false From adc3ccdabede375df5cff62c3c8f06d8a191a803 Mon Sep 17 00:00:00 2001 From: Maxim Smolskiy Date: Wed, 15 Mar 2023 15:56:03 +0300 Subject: [PATCH 0781/1543] Add Project Euler problem 131 solution 1 (#8179) Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- DIRECTORY.md | 5 +++ project_euler/problem_131/__init__.py | 0 project_euler/problem_131/sol1.py | 56 +++++++++++++++++++++++++++ 3 files changed, 61 insertions(+) create mode 100644 project_euler/problem_131/__init__.py create mode 100644 project_euler/problem_131/sol1.py diff --git a/DIRECTORY.md b/DIRECTORY.md index e1ce44eedce1..1d3177801a2c 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -196,11 +196,14 @@ * [Disjoint Set](data_structures/disjoint_set/disjoint_set.py) * Hashing * [Double Hash](data_structures/hashing/double_hash.py) + * [Hash Map](data_structures/hashing/hash_map.py) * [Hash Table](data_structures/hashing/hash_table.py) * [Hash Table With Linked List](data_structures/hashing/hash_table_with_linked_list.py) * Number Theory * [Prime Numbers](data_structures/hashing/number_theory/prime_numbers.py) * [Quadratic Probing](data_structures/hashing/quadratic_probing.py) + * Tests + * [Test Hash Map](data_structures/hashing/tests/test_hash_map.py) * Heap * [Binomial Heap](data_structures/heap/binomial_heap.py) * [Heap](data_structures/heap/heap.py) @@ -973,6 +976,8 @@ * [Sol1](project_euler/problem_125/sol1.py) * Problem 129 * [Sol1](project_euler/problem_129/sol1.py) + * Problem 131 + * [Sol1](project_euler/problem_131/sol1.py) * Problem 135 * [Sol1](project_euler/problem_135/sol1.py) * Problem 144 diff --git a/project_euler/problem_131/__init__.py b/project_euler/problem_131/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/project_euler/problem_131/sol1.py b/project_euler/problem_131/sol1.py new file mode 100644 index 000000000000..f5302aac8644 --- /dev/null +++ b/project_euler/problem_131/sol1.py @@ -0,0 +1,56 @@ +""" +Project Euler Problem 131: https://projecteuler.net/problem=131 + +There are some prime values, p, for which there exists a positive integer, n, +such that the expression n^3 + n^2p is a perfect cube. + +For example, when p = 19, 8^3 + 8^2 x 19 = 12^3. + +What is perhaps most surprising is that for each prime with this property +the value of n is unique, and there are only four such primes below one-hundred. + +How many primes below one million have this remarkable property? +""" + +from math import isqrt + + +def is_prime(number: int) -> bool: + """ + Determines whether number is prime + + >>> is_prime(3) + True + + >>> is_prime(4) + False + """ + + for divisor in range(2, isqrt(number) + 1): + if number % divisor == 0: + return False + return True + + +def solution(max_prime: int = 10**6) -> int: + """ + Returns number of primes below max_prime with the property + + >>> solution(100) + 4 + """ + + primes_count = 0 + cube_index = 1 + prime_candidate = 7 + while prime_candidate < max_prime: + primes_count += is_prime(prime_candidate) + + cube_index += 1 + prime_candidate += 6 * cube_index + + return primes_count + + +if __name__ == "__main__": + print(f"{solution() = }") From c96241b5a5052af466894ef90c7a7c749ba872eb Mon Sep 17 00:00:00 2001 From: Christian Clauss Date: Wed, 15 Mar 2023 13:58:25 +0100 Subject: [PATCH 0782/1543] Replace bandit, flake8, isort, and pyupgrade with ruff (#8178) * Replace bandit, flake8, isort, and pyupgrade with ruff * Comment on ruff rules * updating DIRECTORY.md --------- Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- .flake8 | 10 --- .github/workflows/ruff.yml | 16 ++++ .pre-commit-config.yaml | 78 +++++-------------- arithmetic_analysis/newton_raphson.py | 2 +- arithmetic_analysis/newton_raphson_new.py | 2 +- data_structures/heap/heap_generic.py | 1 - dynamic_programming/min_distance_up_bottom.py | 9 +-- dynamic_programming/minimum_tickets_cost.py | 4 +- dynamic_programming/word_break.py | 4 +- hashes/sha1.py | 12 +-- machine_learning/support_vector_machines.py | 4 +- maths/eulers_totient.py | 34 ++++---- maths/fibonacci.py | 4 +- maths/pythagoras.py | 6 +- other/quine.py | 1 + project_euler/problem_075/sol1.py | 3 +- pyproject.toml | 59 ++++++++++++-- sorts/external_sort.py | 2 +- strings/check_anagrams.py | 3 +- strings/word_occurrence.py | 3 +- web_programming/currency_converter.py | 2 +- 21 files changed, 127 insertions(+), 132 deletions(-) delete mode 100644 .flake8 create mode 100644 .github/workflows/ruff.yml diff --git a/.flake8 b/.flake8 deleted file mode 100644 index b68ee8533a61..000000000000 --- a/.flake8 +++ /dev/null @@ -1,10 +0,0 @@ -[flake8] -max-line-length = 88 -# max-complexity should be 10 -max-complexity = 19 -extend-ignore = - # Formatting style for `black` - # E203 is whitespace before ':' - E203, - # W503 is line break occurred before a binary operator - W503 diff --git a/.github/workflows/ruff.yml b/.github/workflows/ruff.yml new file mode 100644 index 000000000000..ca2d5be47327 --- /dev/null +++ b/.github/workflows/ruff.yml @@ -0,0 +1,16 @@ +# https://beta.ruff.rs +name: ruff +on: + push: + branches: + - master + pull_request: + branches: + - master +jobs: + ruff: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + - run: pip install --user ruff + - run: ruff --format=github . diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 9aa965e42aec..82aad6c65a9b 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -3,6 +3,7 @@ repos: rev: v4.4.0 hooks: - id: check-executables-have-shebangs + - id: check-toml - id: check-yaml - id: end-of-file-fixer types: [python] @@ -14,60 +15,41 @@ repos: hooks: - id: auto-walrus + - repo: https://github.com/charliermarsh/ruff-pre-commit + rev: v0.0.255 + hooks: + - id: ruff + - repo: https://github.com/psf/black rev: 23.1.0 hooks: - id: black - - repo: https://github.com/PyCQA/isort - rev: 5.12.0 + - repo: https://github.com/codespell-project/codespell + rev: v2.2.4 hooks: - - id: isort - args: - - --profile=black + - id: codespell + additional_dependencies: + - tomli - repo: https://github.com/tox-dev/pyproject-fmt rev: "0.9.2" hooks: - id: pyproject-fmt + - repo: local + hooks: + - id: validate-filenames + name: Validate filenames + entry: ./scripts/validate_filenames.py + language: script + pass_filenames: false + - repo: https://github.com/abravalheri/validate-pyproject rev: v0.12.1 hooks: - id: validate-pyproject - - repo: https://github.com/asottile/pyupgrade - rev: v3.3.1 - hooks: - - id: pyupgrade - args: - - --py311-plus - - - repo: https://github.com/charliermarsh/ruff-pre-commit - rev: v0.0.255 - hooks: - - id: ruff - args: - - --ignore=E741 - - - repo: https://github.com/PyCQA/flake8 - rev: 6.0.0 - hooks: - - id: flake8 # See .flake8 for args - additional_dependencies: &flake8-plugins - - flake8-bugbear - - flake8-builtins - # - flake8-broken-line - - flake8-comprehensions - - pep8-naming - - - repo: https://github.com/asottile/yesqa - rev: v1.4.0 - hooks: - - id: yesqa - additional_dependencies: - *flake8-plugins - - repo: https://github.com/pre-commit/mirrors-mypy rev: v1.1.1 hooks: @@ -77,25 +59,3 @@ repos: - --install-types # See mirrors-mypy README.md - --non-interactive additional_dependencies: [types-requests] - - - repo: https://github.com/codespell-project/codespell - rev: v2.2.4 - hooks: - - id: codespell - args: - - --ignore-words-list=3rt,ans,crate,damon,fo,followings,hist,iff,kwanza,mater,secant,som,sur,tim,zar - exclude: | - (?x)^( - ciphers/prehistoric_men.txt | - strings/dictionary.txt | - strings/words.txt | - project_euler/problem_022/p022_names.txt - )$ - - - repo: local - hooks: - - id: validate-filenames - name: Validate filenames - entry: ./scripts/validate_filenames.py - language: script - pass_filenames: false diff --git a/arithmetic_analysis/newton_raphson.py b/arithmetic_analysis/newton_raphson.py index 86ff9d350dde..aee2f07e5743 100644 --- a/arithmetic_analysis/newton_raphson.py +++ b/arithmetic_analysis/newton_raphson.py @@ -5,7 +5,7 @@ from __future__ import annotations from decimal import Decimal -from math import * # noqa: F401, F403 +from math import * # noqa: F403 from sympy import diff diff --git a/arithmetic_analysis/newton_raphson_new.py b/arithmetic_analysis/newton_raphson_new.py index 472cb5b5ac54..f61841e2eb84 100644 --- a/arithmetic_analysis/newton_raphson_new.py +++ b/arithmetic_analysis/newton_raphson_new.py @@ -8,7 +8,7 @@ # Newton's Method - https://en.wikipedia.org/wiki/Newton's_method from sympy import diff, lambdify, symbols -from sympy.functions import * # noqa: F401, F403 +from sympy.functions import * # noqa: F403 def newton_raphson( diff --git a/data_structures/heap/heap_generic.py b/data_structures/heap/heap_generic.py index b4d7019f41f9..ee92149e25a9 100644 --- a/data_structures/heap/heap_generic.py +++ b/data_structures/heap/heap_generic.py @@ -166,7 +166,6 @@ def test_heap() -> None: >>> h.get_top() [9, -40] """ - pass if __name__ == "__main__": diff --git a/dynamic_programming/min_distance_up_bottom.py b/dynamic_programming/min_distance_up_bottom.py index 49c361f24d45..4870c7ef4499 100644 --- a/dynamic_programming/min_distance_up_bottom.py +++ b/dynamic_programming/min_distance_up_bottom.py @@ -6,13 +6,13 @@ The aim is to demonstate up bottom approach for solving the task. The implementation was tested on the leetcode: https://leetcode.com/problems/edit-distance/ -""" -""" Levinstein distance Dynamic Programming: up -> down. """ +import functools + def min_distance_up_bottom(word1: str, word2: str) -> int: """ @@ -25,13 +25,10 @@ def min_distance_up_bottom(word1: str, word2: str) -> int: >>> min_distance_up_bottom("zooicoarchaeologist", "zoologist") 10 """ - - from functools import lru_cache - len_word1 = len(word1) len_word2 = len(word2) - @lru_cache(maxsize=None) + @functools.cache def min_distance(index1: int, index2: int) -> int: # if first word index is overflow - delete all from the second word if index1 >= len_word1: diff --git a/dynamic_programming/minimum_tickets_cost.py b/dynamic_programming/minimum_tickets_cost.py index d07056d9217f..6790c21f16ed 100644 --- a/dynamic_programming/minimum_tickets_cost.py +++ b/dynamic_programming/minimum_tickets_cost.py @@ -22,7 +22,7 @@ Dynamic Programming: up -> down. """ -from functools import lru_cache +import functools def mincost_tickets(days: list[int], costs: list[int]) -> int: @@ -106,7 +106,7 @@ def mincost_tickets(days: list[int], costs: list[int]) -> int: days_set = set(days) - @lru_cache(maxsize=None) + @functools.cache def dynamic_programming(index: int) -> int: if index > 365: return 0 diff --git a/dynamic_programming/word_break.py b/dynamic_programming/word_break.py index 642ea0edf40d..4d7ac869080c 100644 --- a/dynamic_programming/word_break.py +++ b/dynamic_programming/word_break.py @@ -20,7 +20,7 @@ Space: O(n) """ -from functools import lru_cache +import functools from typing import Any @@ -80,7 +80,7 @@ def word_break(string: str, words: list[str]) -> bool: len_string = len(string) # Dynamic programming method - @lru_cache(maxsize=None) + @functools.cache def is_breakable(index: int) -> bool: """ >>> string = 'a' diff --git a/hashes/sha1.py b/hashes/sha1.py index b19e0cfafea3..9f0437f208fa 100644 --- a/hashes/sha1.py +++ b/hashes/sha1.py @@ -26,7 +26,6 @@ import argparse import hashlib # hashlib is only used inside the Test class import struct -import unittest class SHA1Hash: @@ -128,14 +127,9 @@ def final_hash(self): return "%08x%08x%08x%08x%08x" % tuple(self.h) -class SHA1HashTest(unittest.TestCase): - """ - Test class for the SHA1Hash class. Inherits the TestCase class from unittest - """ - - def testMatchHashes(self): # noqa: N802 - msg = bytes("Test String", "utf-8") - self.assertEqual(SHA1Hash(msg).final_hash(), hashlib.sha1(msg).hexdigest()) +def test_sha1_hash(): + msg = b"Test String" + assert SHA1Hash(msg).final_hash() == hashlib.sha1(msg).hexdigest() # noqa: S324 def main(): diff --git a/machine_learning/support_vector_machines.py b/machine_learning/support_vector_machines.py index caec10175c50..df854cc850b1 100644 --- a/machine_learning/support_vector_machines.py +++ b/machine_learning/support_vector_machines.py @@ -56,7 +56,7 @@ def __init__( *, regularization: float = np.inf, kernel: str = "linear", - gamma: float = 0, + gamma: float = 0.0, ) -> None: self.regularization = regularization self.gamma = gamma @@ -65,7 +65,7 @@ def __init__( elif kernel == "rbf": if self.gamma == 0: raise ValueError("rbf kernel requires gamma") - if not (isinstance(self.gamma, float) or isinstance(self.gamma, int)): + if not isinstance(self.gamma, (float, int)): raise ValueError("gamma must be float or int") if not self.gamma > 0: raise ValueError("gamma must be > 0") diff --git a/maths/eulers_totient.py b/maths/eulers_totient.py index 6a35e69bde0b..a156647037b4 100644 --- a/maths/eulers_totient.py +++ b/maths/eulers_totient.py @@ -1,5 +1,20 @@ # Eulers Totient function finds the number of relative primes of a number n from 1 to n def totient(n: int) -> list: + """ + >>> n = 10 + >>> totient_calculation = totient(n) + >>> for i in range(1, n): + ... print(f"{i} has {totient_calculation[i]} relative primes.") + 1 has 0 relative primes. + 2 has 1 relative primes. + 3 has 2 relative primes. + 4 has 2 relative primes. + 5 has 4 relative primes. + 6 has 2 relative primes. + 7 has 6 relative primes. + 8 has 4 relative primes. + 9 has 6 relative primes. + """ is_prime = [True for i in range(n + 1)] totients = [i - 1 for i in range(n + 1)] primes = [] @@ -20,25 +35,6 @@ def totient(n: int) -> list: return totients -def test_totient() -> None: - """ - >>> n = 10 - >>> totient_calculation = totient(n) - >>> for i in range(1, n): - ... print(f"{i} has {totient_calculation[i]} relative primes.") - 1 has 0 relative primes. - 2 has 1 relative primes. - 3 has 2 relative primes. - 4 has 2 relative primes. - 5 has 4 relative primes. - 6 has 2 relative primes. - 7 has 6 relative primes. - 8 has 4 relative primes. - 9 has 6 relative primes. - """ - pass - - if __name__ == "__main__": import doctest diff --git a/maths/fibonacci.py b/maths/fibonacci.py index d58c9fc68c67..e810add69dc7 100644 --- a/maths/fibonacci.py +++ b/maths/fibonacci.py @@ -16,7 +16,7 @@ fib_binet runtime: 0.0174 ms """ -from functools import lru_cache +import functools from math import sqrt from time import time @@ -110,7 +110,7 @@ def fib_recursive_cached(n: int) -> list[int]: Exception: n is negative """ - @lru_cache(maxsize=None) + @functools.cache def fib_recursive_term(i: int) -> int: """ Calculates the i-th (0-indexed) Fibonacci number using recursion diff --git a/maths/pythagoras.py b/maths/pythagoras.py index 69a17731a0fd..7770e981d44d 100644 --- a/maths/pythagoras.py +++ b/maths/pythagoras.py @@ -14,17 +14,13 @@ def __repr__(self) -> str: def distance(a: Point, b: Point) -> float: - return math.sqrt(abs((b.x - a.x) ** 2 + (b.y - a.y) ** 2 + (b.z - a.z) ** 2)) - - -def test_distance() -> None: """ >>> point1 = Point(2, -1, 7) >>> point2 = Point(1, -3, 5) >>> print(f"Distance from {point1} to {point2} is {distance(point1, point2)}") Distance from Point(2, -1, 7) to Point(1, -3, 5) is 3.0 """ - pass + return math.sqrt(abs((b.x - a.x) ** 2 + (b.y - a.y) ** 2 + (b.z - a.z) ** 2)) if __name__ == "__main__": diff --git a/other/quine.py b/other/quine.py index 01e03bbb02cb..500a351d38dc 100644 --- a/other/quine.py +++ b/other/quine.py @@ -1,4 +1,5 @@ #!/bin/python3 +# ruff: noqa """ Quine: diff --git a/project_euler/problem_075/sol1.py b/project_euler/problem_075/sol1.py index b57604d76a86..0ccaf5dee7ec 100644 --- a/project_euler/problem_075/sol1.py +++ b/project_euler/problem_075/sol1.py @@ -29,7 +29,6 @@ from collections import defaultdict from math import gcd -from typing import DefaultDict def solution(limit: int = 1500000) -> int: @@ -43,7 +42,7 @@ def solution(limit: int = 1500000) -> int: >>> solution(50000) 5502 """ - frequencies: DefaultDict = defaultdict(int) + frequencies: defaultdict = defaultdict(int) euclid_m = 2 while 2 * euclid_m * (euclid_m + 1) <= limit: for euclid_n in range((euclid_m % 2) + 1, euclid_m, 2): diff --git a/pyproject.toml b/pyproject.toml index 5f9b1aa06c0e..6552101d2faa 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -12,8 +12,57 @@ addopts = [ omit = [".env/*"] sort = "Cover" -#[report] -#sort = Cover -#omit = -# .env/* -# backtracking/* +[tool.codespell] +ignore-words-list = "3rt,ans,crate,damon,fo,followings,hist,iff,kwanza,mater,secant,som,sur,tim,zar" +skip = "./.*,*.json,ciphers/prehistoric_men.txt,project_euler/problem_022/p022_names.txt,pyproject.toml,strings/dictionary.txt,strings/words.txt" + +[tool.ruff] +ignore = [ # `ruff rule S101` for a description of that rule + "B904", # B904: Within an `except` clause, raise exceptions with `raise ... from err` + "B905", # B905: `zip()` without an explicit `strict=` parameter + "E741", # E741: Ambiguous variable name 'l' + "G004", # G004 Logging statement uses f-string + "N999", # N999: Invalid module name + "PLC1901", # PLC1901: `{}` can be simplified to `{}` as an empty string is falsey + "PLR2004", # PLR2004: Magic value used in comparison + "PLR5501", # PLR5501: Consider using `elif` instead of `else` + "PLW0120", # PLW0120: `else` clause on loop without a `break` statement + "PLW060", # PLW060: Using global for `{name}` but no assignment is done -- DO NOT FIX + "PLW2901", # PLW2901: Redefined loop variable + "RUF00", # RUF00: Ambiguous unicode character -- DO NOT FIX + "RUF100", # RUF100: Unused `noqa` directive + "S101", # S101: Use of `assert` detected -- DO NOT FIX + "S105", # S105: Possible hardcoded password: 'password' + "S113", # S113: Probable use of requests call without timeout + "UP038", # UP038: Use `X | Y` in `{}` call instead of `(X, Y)` -- DO NOT FIX +] +select = [ # https://beta.ruff.rs/docs/rules + "A", # A: builtins + "B", # B: bugbear + "C40", # C40: comprehensions + "C90", # C90: mccabe code complexity + "E", # E: pycodestyle errors + "F", # F: pyflakes + "G", # G: logging format + "I", # I: isort + "N", # N: pep8 naming + "PL", # PL: pylint + "PIE", # PIE: pie + "PYI", # PYI: type hinting stub files + "RUF", # RUF: ruff + "S", # S: bandit + "TID", # TID: tidy imports + "UP", # UP: pyupgrade + "W", # W: pycodestyle warnings + "YTT", # YTT: year 2020 +] +target-version = "py311" + +[tool.ruff.mccabe] # DO NOT INCREASE THIS VALUE +max-complexity = 20 # default: 10 + +[tool.ruff.pylint] # DO NOT INCREASE THESE VALUES +max-args = 10 # default: 5 +max-branches = 20 # default: 12 +max-returns = 8 # default: 6 +max-statements = 88 # default: 50 diff --git a/sorts/external_sort.py b/sorts/external_sort.py index 7af7dc0a609d..e6b0d47f79f5 100644 --- a/sorts/external_sort.py +++ b/sorts/external_sort.py @@ -104,7 +104,7 @@ def get_file_handles(self, filenames, buffer_size): files = {} for i in range(len(filenames)): - files[i] = open(filenames[i], "r", buffer_size) + files[i] = open(filenames[i], "r", buffer_size) # noqa: UP015 return files diff --git a/strings/check_anagrams.py b/strings/check_anagrams.py index a364b98212ad..9dcdffcfb921 100644 --- a/strings/check_anagrams.py +++ b/strings/check_anagrams.py @@ -2,7 +2,6 @@ wiki: https://en.wikipedia.org/wiki/Anagram """ from collections import defaultdict -from typing import DefaultDict def check_anagrams(first_str: str, second_str: str) -> bool: @@ -30,7 +29,7 @@ def check_anagrams(first_str: str, second_str: str) -> bool: return False # Default values for count should be 0 - count: DefaultDict[str, int] = defaultdict(int) + count: defaultdict[str, int] = defaultdict(int) # For each character in input strings, # increment count in the corresponding diff --git a/strings/word_occurrence.py b/strings/word_occurrence.py index 8260620c38a4..5a18ebf771e4 100644 --- a/strings/word_occurrence.py +++ b/strings/word_occurrence.py @@ -1,7 +1,6 @@ # Created by sarathkaul on 17/11/19 # Modified by Arkadip Bhattacharya(@darkmatter18) on 20/04/2020 from collections import defaultdict -from typing import DefaultDict def word_occurrence(sentence: str) -> dict: @@ -15,7 +14,7 @@ def word_occurrence(sentence: str) -> dict: >>> dict(word_occurrence("Two spaces")) {'Two': 1, 'spaces': 1} """ - occurrence: DefaultDict[str, int] = defaultdict(int) + occurrence: defaultdict[str, int] = defaultdict(int) # Creating a dictionary containing count of each word for word in sentence.split(): occurrence[word] += 1 diff --git a/web_programming/currency_converter.py b/web_programming/currency_converter.py index 6fcc60e8feeb..69f2a2c4d421 100644 --- a/web_programming/currency_converter.py +++ b/web_programming/currency_converter.py @@ -8,7 +8,7 @@ import requests URL_BASE = "https://www.amdoren.com/api/currency.php" -TESTING = os.getenv("CI", False) +TESTING = os.getenv("CI", "") API_KEY = os.getenv("AMDOREN_API_KEY", "") if not API_KEY and not TESTING: From 521fbca61c6bdb84746564eb58c2ef2131260187 Mon Sep 17 00:00:00 2001 From: Christian Clauss Date: Thu, 16 Mar 2023 13:31:29 +0100 Subject: [PATCH 0783/1543] Replace flake8 with ruff (#8184) --- CONTRIBUTING.md | 6 +++--- audio_filters/equal_loudness_filter.py.broken.txt | 2 +- data_structures/binary_tree/red_black_tree.py | 4 ++-- digital_image_processing/change_contrast.py | 4 ++-- maths/is_square_free.py | 4 ++-- maths/mobius_function.py | 4 ++-- other/linear_congruential_generator.py | 8 ++++---- pyproject.toml | 1 + quantum/ripple_adder_classic.py | 6 +++--- 9 files changed, 20 insertions(+), 19 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 3ce5bd1edf68..6b6e4d21bfc7 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -81,11 +81,11 @@ We want your work to be readable by others; therefore, we encourage you to note black . ``` -- All submissions will need to pass the test `flake8 . --ignore=E203,W503 --max-line-length=88` before they will be accepted so if possible, try this test locally on your Python file(s) before submitting your pull request. +- All submissions will need to pass the test `ruff .` before they will be accepted so if possible, try this test locally on your Python file(s) before submitting your pull request. ```bash - python3 -m pip install flake8 # only required the first time - flake8 . --ignore=E203,W503 --max-line-length=88 --show-source + python3 -m pip install ruff # only required the first time + ruff . ``` - Original code submission require docstrings or comments to describe your work. diff --git a/audio_filters/equal_loudness_filter.py.broken.txt b/audio_filters/equal_loudness_filter.py.broken.txt index b9a3c50e1c33..88cba8533cf7 100644 --- a/audio_filters/equal_loudness_filter.py.broken.txt +++ b/audio_filters/equal_loudness_filter.py.broken.txt @@ -20,7 +20,7 @@ class EqualLoudnessFilter: samplerate, use with caution. Code based on matlab implementation at https://bit.ly/3eqh2HU - (url shortened for flake8) + (url shortened for ruff) Target curve: https://i.imgur.com/3g2VfaM.png Yulewalk response: https://i.imgur.com/J9LnJ4C.png diff --git a/data_structures/binary_tree/red_black_tree.py b/data_structures/binary_tree/red_black_tree.py index b50d75d33689..3ebc8d63939b 100644 --- a/data_structures/binary_tree/red_black_tree.py +++ b/data_structures/binary_tree/red_black_tree.py @@ -1,6 +1,6 @@ """ -python/black : true -flake8 : passed +psf/black : true +ruff : passed """ from __future__ import annotations diff --git a/digital_image_processing/change_contrast.py b/digital_image_processing/change_contrast.py index 6a150400249f..7e49694708f8 100644 --- a/digital_image_processing/change_contrast.py +++ b/digital_image_processing/change_contrast.py @@ -4,8 +4,8 @@ This algorithm is used in https://noivce.pythonanywhere.com/ Python web app. -python/black: True -flake8 : True +psf/black: True +ruff : True """ from PIL import Image diff --git a/maths/is_square_free.py b/maths/is_square_free.py index 4134398d258b..08c70dc32c38 100644 --- a/maths/is_square_free.py +++ b/maths/is_square_free.py @@ -1,7 +1,7 @@ """ References: wikipedia:square free number -python/black : True -flake8 : True +psf/black : True +ruff : True """ from __future__ import annotations diff --git a/maths/mobius_function.py b/maths/mobius_function.py index 4fcf35f21813..8abdc4cafcb4 100644 --- a/maths/mobius_function.py +++ b/maths/mobius_function.py @@ -1,8 +1,8 @@ """ References: https://en.wikipedia.org/wiki/M%C3%B6bius_function References: wikipedia:square free number -python/black : True -flake8 : True +psf/black : True +ruff : True """ from maths.is_square_free import is_square_free diff --git a/other/linear_congruential_generator.py b/other/linear_congruential_generator.py index 777ee6355b9b..c016310f9cfa 100644 --- a/other/linear_congruential_generator.py +++ b/other/linear_congruential_generator.py @@ -9,10 +9,10 @@ class LinearCongruentialGenerator: """ # The default value for **seed** is the result of a function call which is not - # normally recommended and causes flake8-bugbear to raise a B008 error. However, - # in this case, it is accptable because `LinearCongruentialGenerator.__init__()` - # will only be called once per instance and it ensures that each instance will - # generate a unique sequence of numbers. + # normally recommended and causes ruff to raise a B008 error. However, in this case, + # it is accptable because `LinearCongruentialGenerator.__init__()` will only be + # called once per instance and it ensures that each instance will generate a unique + # sequence of numbers. def __init__(self, multiplier, increment, modulo, seed=int(time())): # noqa: B008 """ diff --git a/pyproject.toml b/pyproject.toml index 6552101d2faa..169c3a71ba6c 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -56,6 +56,7 @@ select = [ # https://beta.ruff.rs/docs/rules "W", # W: pycodestyle warnings "YTT", # YTT: year 2020 ] +show-source = true target-version = "py311" [tool.ruff.mccabe] # DO NOT INCREASE THIS VALUE diff --git a/quantum/ripple_adder_classic.py b/quantum/ripple_adder_classic.py index c07757af7fff..b604395bc583 100644 --- a/quantum/ripple_adder_classic.py +++ b/quantum/ripple_adder_classic.py @@ -54,9 +54,9 @@ def full_adder( # The default value for **backend** is the result of a function call which is not -# normally recommended and causes flake8-bugbear to raise a B008 error. However, -# in this case, this is acceptable because `Aer.get_backend()` is called when the -# function is defined and that same backend is then reused for all function calls. +# normally recommended and causes ruff to raise a B008 error. However, in this case, +# this is acceptable because `Aer.get_backend()` is called when the function is defined +# and that same backend is then reused for all function calls. def ripple_adder( From 3f9150c1b2dd15808a4962e03a1455f8d825512c Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 20 Mar 2023 22:16:13 +0100 Subject: [PATCH 0784/1543] [pre-commit.ci] pre-commit autoupdate (#8294) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * [pre-commit.ci] pre-commit autoupdate updates: - [github.com/charliermarsh/ruff-pre-commit: v0.0.255 → v0.0.257](https://github.com/charliermarsh/ruff-pre-commit/compare/v0.0.255...v0.0.257) * Fix PLR1711 Useless statement at end of function --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- .pre-commit-config.yaml | 2 +- data_structures/binary_tree/avl_tree.py | 4 ---- machine_learning/polymonial_regression.py | 1 - 3 files changed, 1 insertion(+), 6 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 82aad6c65a9b..58cec4ff6ee6 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -16,7 +16,7 @@ repos: - id: auto-walrus - repo: https://github.com/charliermarsh/ruff-pre-commit - rev: v0.0.255 + rev: v0.0.257 hooks: - id: ruff diff --git a/data_structures/binary_tree/avl_tree.py b/data_structures/binary_tree/avl_tree.py index 320e7ed0d792..4c1fb17afe86 100644 --- a/data_structures/binary_tree/avl_tree.py +++ b/data_structures/binary_tree/avl_tree.py @@ -60,19 +60,15 @@ def get_height(self) -> int: def set_data(self, data: Any) -> None: self.data = data - return def set_left(self, node: MyNode | None) -> None: self.left = node - return def set_right(self, node: MyNode | None) -> None: self.right = node - return def set_height(self, height: int) -> None: self.height = height - return def get_height(node: MyNode | None) -> int: diff --git a/machine_learning/polymonial_regression.py b/machine_learning/polymonial_regression.py index 374c35f7f905..487fb814526f 100644 --- a/machine_learning/polymonial_regression.py +++ b/machine_learning/polymonial_regression.py @@ -34,7 +34,6 @@ def viz_polymonial(): plt.xlabel("Position level") plt.ylabel("Salary") plt.show() - return if __name__ == "__main__": From 7cdb011ba440a07768179bfaea190bddefc890d8 Mon Sep 17 00:00:00 2001 From: Genesis <128913081+KaixLina@users.noreply.github.com> Date: Sun, 26 Mar 2023 20:49:18 +0530 Subject: [PATCH 0785/1543] New gitter link added or replaced (#8551) * New gitter link added * ruff==0.0.258 * noqa: S310 * noqa: S310 * Update ruff.yml * Add Ruff rule S311 * Ruff v0.0.259 * return ("{:08x}" * 5).format(*self.h) * pickle.load(f) # noqa: S301 --------- Co-authored-by: Christian Clauss --- .github/stale.yml | 4 ++-- .pre-commit-config.yaml | 2 +- CONTRIBUTING.md | 4 ++-- README.md | 4 ++-- hashes/sha1.py | 2 +- machine_learning/sequential_minimum_optimization.py | 2 +- neural_network/convolution_neural_network.py | 2 +- project_euler/README.md | 2 +- pyproject.toml | 1 + web_programming/download_images_from_google_query.py | 2 +- 10 files changed, 13 insertions(+), 12 deletions(-) diff --git a/.github/stale.yml b/.github/stale.yml index 36ca56266b26..813f688348d8 100644 --- a/.github/stale.yml +++ b/.github/stale.yml @@ -45,7 +45,7 @@ pulls: closeComment: > Please reopen this pull request once you commit the changes requested or make improvements on the code. If this is not the case and you need - some help, feel free to seek help from our [Gitter](https://gitter.im/TheAlgorithms) + some help, feel free to seek help from our [Gitter](https://app.gitter.im/#/room/#TheAlgorithms_community:gitter.im) or ping one of the reviewers. Thank you for your contributions! issues: @@ -59,5 +59,5 @@ issues: closeComment: > Please reopen this issue once you add more information and updates here. If this is not the case and you need some help, feel free to seek help - from our [Gitter](https://gitter.im/TheAlgorithms) or ping one of the + from our [Gitter](https://app.gitter.im/#/room/#TheAlgorithms_community:gitter.im) or ping one of the reviewers. Thank you for your contributions! diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 58cec4ff6ee6..72a878387e15 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -16,7 +16,7 @@ repos: - id: auto-walrus - repo: https://github.com/charliermarsh/ruff-pre-commit - rev: v0.0.257 + rev: v0.0.259 hooks: - id: ruff diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 6b6e4d21bfc7..75e4fb893723 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -2,7 +2,7 @@ ## Before contributing -Welcome to [TheAlgorithms/Python](https://github.com/TheAlgorithms/Python)! Before sending your pull requests, make sure that you __read the whole guidelines__. If you have any doubt on the contributing guide, please feel free to [state it clearly in an issue](https://github.com/TheAlgorithms/Python/issues/new) or ask the community in [Gitter](https://gitter.im/TheAlgorithms). +Welcome to [TheAlgorithms/Python](https://github.com/TheAlgorithms/Python)! Before sending your pull requests, make sure that you __read the whole guidelines__. If you have any doubt on the contributing guide, please feel free to [state it clearly in an issue](https://github.com/TheAlgorithms/Python/issues/new) or ask the community in [Gitter](https://app.gitter.im/#/room/#TheAlgorithms_community:gitter.im). ## Contributing @@ -176,7 +176,7 @@ We want your work to be readable by others; therefore, we encourage you to note - Most importantly, - __Be consistent in the use of these guidelines when submitting.__ - - __Join__ us on [Discord](https://discord.com/invite/c7MnfGFGa6) and [Gitter](https://gitter.im/TheAlgorithms) __now!__ + - __Join__ us on [Discord](https://discord.com/invite/c7MnfGFGa6) and [Gitter](https://app.gitter.im/#/room/#TheAlgorithms_community:gitter.im) __now!__ - Happy coding! Writer [@poyea](https://github.com/poyea), Jun 2019. diff --git a/README.md b/README.md index 68a6e5e6fbce..3d2f1a110780 100644 --- a/README.md +++ b/README.md @@ -16,7 +16,7 @@ Discord chat - + Gitter chat @@ -42,7 +42,7 @@ Read through our [Contribution Guidelines](CONTRIBUTING.md) before you contribut ## Community Channels -We are on [Discord](https://discord.gg/c7MnfGFGa6) and [Gitter](https://gitter.im/TheAlgorithms)! Community channels are a great way for you to ask questions and get help. Please join us! +We are on [Discord](https://discord.gg/c7MnfGFGa6) and [Gitter](https://app.gitter.im/#/room/#TheAlgorithms_community:gitter.im)! Community channels are a great way for you to ask questions and get help. Please join us! ## List of Algorithms diff --git a/hashes/sha1.py b/hashes/sha1.py index 9f0437f208fa..b325ce3e43bb 100644 --- a/hashes/sha1.py +++ b/hashes/sha1.py @@ -124,7 +124,7 @@ def final_hash(self): self.h[3] + d & 0xFFFFFFFF, self.h[4] + e & 0xFFFFFFFF, ) - return "%08x%08x%08x%08x%08x" % tuple(self.h) + return ("{:08x}" * 5).format(*self.h) def test_sha1_hash(): diff --git a/machine_learning/sequential_minimum_optimization.py b/machine_learning/sequential_minimum_optimization.py index b68bd52f4de9..b24f5669e2e8 100644 --- a/machine_learning/sequential_minimum_optimization.py +++ b/machine_learning/sequential_minimum_optimization.py @@ -458,7 +458,7 @@ def test_cancel_data(): CANCER_DATASET_URL, headers={"User-Agent": "Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)"}, ) - response = urllib.request.urlopen(request) + response = urllib.request.urlopen(request) # noqa: S310 content = response.read().decode("utf-8") with open(r"cancel_data.csv", "w") as f: f.write(content) diff --git a/neural_network/convolution_neural_network.py b/neural_network/convolution_neural_network.py index bd0550212157..f5ec156f3593 100644 --- a/neural_network/convolution_neural_network.py +++ b/neural_network/convolution_neural_network.py @@ -77,7 +77,7 @@ def save_model(self, save_path): def read_model(cls, model_path): # read saved model with open(model_path, "rb") as f: - model_dic = pickle.load(f) + model_dic = pickle.load(f) # noqa: S301 conv_get = model_dic.get("conv1") conv_get.append(model_dic.get("step_conv1")) diff --git a/project_euler/README.md b/project_euler/README.md index e3dc035eee5e..4832d0078ebf 100644 --- a/project_euler/README.md +++ b/project_euler/README.md @@ -10,7 +10,7 @@ The solutions will be checked by our [automated testing on GitHub Actions](https ## Solution Guidelines -Welcome to [TheAlgorithms/Python](https://github.com/TheAlgorithms/Python)! Before reading the solution guidelines, make sure you read the whole [Contributing Guidelines](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md) as it won't be repeated in here. If you have any doubt on the guidelines, please feel free to [state it clearly in an issue](https://github.com/TheAlgorithms/Python/issues/new) or ask the community in [Gitter](https://gitter.im/TheAlgorithms). You can use the [template](https://github.com/TheAlgorithms/Python/blob/master/project_euler/README.md#solution-template) we have provided below as your starting point but be sure to read the [Coding Style](https://github.com/TheAlgorithms/Python/blob/master/project_euler/README.md#coding-style) part first. +Welcome to [TheAlgorithms/Python](https://github.com/TheAlgorithms/Python)! Before reading the solution guidelines, make sure you read the whole [Contributing Guidelines](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md) as it won't be repeated in here. If you have any doubt on the guidelines, please feel free to [state it clearly in an issue](https://github.com/TheAlgorithms/Python/issues/new) or ask the community in [Gitter](https://app.gitter.im/#/room/#TheAlgorithms_community:gitter.im). You can use the [template](https://github.com/TheAlgorithms/Python/blob/master/project_euler/README.md#solution-template) we have provided below as your starting point but be sure to read the [Coding Style](https://github.com/TheAlgorithms/Python/blob/master/project_euler/README.md#coding-style) part first. ### Coding Style diff --git a/pyproject.toml b/pyproject.toml index 169c3a71ba6c..23fe45e97d20 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -34,6 +34,7 @@ ignore = [ # `ruff rule S101` for a description of that rule "S101", # S101: Use of `assert` detected -- DO NOT FIX "S105", # S105: Possible hardcoded password: 'password' "S113", # S113: Probable use of requests call without timeout + "S311", # S311: Standard pseudo-random generators are not suitable for cryptographic purposes "UP038", # UP038: Use `X | Y` in `{}` call instead of `(X, Y)` -- DO NOT FIX ] select = [ # https://beta.ruff.rs/docs/rules diff --git a/web_programming/download_images_from_google_query.py b/web_programming/download_images_from_google_query.py index 9c0c21dc804e..441347459f8e 100644 --- a/web_programming/download_images_from_google_query.py +++ b/web_programming/download_images_from_google_query.py @@ -86,7 +86,7 @@ def download_images_from_google_query(query: str = "dhaka", max_images: int = 5) path_name = f"query_{query.replace(' ', '_')}" if not os.path.exists(path_name): os.makedirs(path_name) - urllib.request.urlretrieve( + urllib.request.urlretrieve( # noqa: S310 original_size_img, f"{path_name}/original_size_img_{index}.jpg" ) return index From 86b2ab09aab359ef1b4bea58ed3c1fdf5b989500 Mon Sep 17 00:00:00 2001 From: Christian Veenhuis Date: Sun, 26 Mar 2023 18:20:47 +0200 Subject: [PATCH 0786/1543] Fix broken links to Gitter Community (Fixes: #8197) (#8546) Co-authored-by: Christian Clauss --- .github/stale.yml | 4 ++-- CONTRIBUTING.md | 4 ++-- README.md | 4 ++-- project_euler/README.md | 2 +- 4 files changed, 7 insertions(+), 7 deletions(-) diff --git a/.github/stale.yml b/.github/stale.yml index 813f688348d8..0939e1f223ff 100644 --- a/.github/stale.yml +++ b/.github/stale.yml @@ -45,7 +45,7 @@ pulls: closeComment: > Please reopen this pull request once you commit the changes requested or make improvements on the code. If this is not the case and you need - some help, feel free to seek help from our [Gitter](https://app.gitter.im/#/room/#TheAlgorithms_community:gitter.im) + some help, feel free to seek help from our [Gitter](https://gitter.im/TheAlgorithms/community) or ping one of the reviewers. Thank you for your contributions! issues: @@ -59,5 +59,5 @@ issues: closeComment: > Please reopen this issue once you add more information and updates here. If this is not the case and you need some help, feel free to seek help - from our [Gitter](https://app.gitter.im/#/room/#TheAlgorithms_community:gitter.im) or ping one of the + from our [Gitter](https://gitter.im/TheAlgorithms/community) or ping one of the reviewers. Thank you for your contributions! diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 75e4fb893723..2bb0c2e39eee 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -2,7 +2,7 @@ ## Before contributing -Welcome to [TheAlgorithms/Python](https://github.com/TheAlgorithms/Python)! Before sending your pull requests, make sure that you __read the whole guidelines__. If you have any doubt on the contributing guide, please feel free to [state it clearly in an issue](https://github.com/TheAlgorithms/Python/issues/new) or ask the community in [Gitter](https://app.gitter.im/#/room/#TheAlgorithms_community:gitter.im). +Welcome to [TheAlgorithms/Python](https://github.com/TheAlgorithms/Python)! Before sending your pull requests, make sure that you __read the whole guidelines__. If you have any doubt on the contributing guide, please feel free to [state it clearly in an issue](https://github.com/TheAlgorithms/Python/issues/new) or ask the community in [Gitter](https://gitter.im/TheAlgorithms/community). ## Contributing @@ -176,7 +176,7 @@ We want your work to be readable by others; therefore, we encourage you to note - Most importantly, - __Be consistent in the use of these guidelines when submitting.__ - - __Join__ us on [Discord](https://discord.com/invite/c7MnfGFGa6) and [Gitter](https://app.gitter.im/#/room/#TheAlgorithms_community:gitter.im) __now!__ + - __Join__ us on [Discord](https://discord.com/invite/c7MnfGFGa6) and [Gitter](https://gitter.im/TheAlgorithms/community) __now!__ - Happy coding! Writer [@poyea](https://github.com/poyea), Jun 2019. diff --git a/README.md b/README.md index 3d2f1a110780..bf6e0ed3cf75 100644 --- a/README.md +++ b/README.md @@ -16,7 +16,7 @@ Discord chat - + Gitter chat @@ -42,7 +42,7 @@ Read through our [Contribution Guidelines](CONTRIBUTING.md) before you contribut ## Community Channels -We are on [Discord](https://discord.gg/c7MnfGFGa6) and [Gitter](https://app.gitter.im/#/room/#TheAlgorithms_community:gitter.im)! Community channels are a great way for you to ask questions and get help. Please join us! +We are on [Discord](https://discord.gg/c7MnfGFGa6) and [Gitter](https://gitter.im/TheAlgorithms/community)! Community channels are a great way for you to ask questions and get help. Please join us! ## List of Algorithms diff --git a/project_euler/README.md b/project_euler/README.md index 4832d0078ebf..16865edf2a67 100644 --- a/project_euler/README.md +++ b/project_euler/README.md @@ -10,7 +10,7 @@ The solutions will be checked by our [automated testing on GitHub Actions](https ## Solution Guidelines -Welcome to [TheAlgorithms/Python](https://github.com/TheAlgorithms/Python)! Before reading the solution guidelines, make sure you read the whole [Contributing Guidelines](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md) as it won't be repeated in here. If you have any doubt on the guidelines, please feel free to [state it clearly in an issue](https://github.com/TheAlgorithms/Python/issues/new) or ask the community in [Gitter](https://app.gitter.im/#/room/#TheAlgorithms_community:gitter.im). You can use the [template](https://github.com/TheAlgorithms/Python/blob/master/project_euler/README.md#solution-template) we have provided below as your starting point but be sure to read the [Coding Style](https://github.com/TheAlgorithms/Python/blob/master/project_euler/README.md#coding-style) part first. +Welcome to [TheAlgorithms/Python](https://github.com/TheAlgorithms/Python)! Before reading the solution guidelines, make sure you read the whole [Contributing Guidelines](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md) as it won't be repeated in here. If you have any doubt on the guidelines, please feel free to [state it clearly in an issue](https://github.com/TheAlgorithms/Python/issues/new) or ask the community in [Gitter](https://gitter.im/TheAlgorithms/community). You can use the [template](https://github.com/TheAlgorithms/Python/blob/master/project_euler/README.md#solution-template) we have provided below as your starting point but be sure to read the [Coding Style](https://github.com/TheAlgorithms/Python/blob/master/project_euler/README.md#coding-style) part first. ### Coding Style From ac111ee463065e372ad148dbafba630045ecf94c Mon Sep 17 00:00:00 2001 From: Maxim Smolskiy Date: Wed, 29 Mar 2023 00:41:54 +0300 Subject: [PATCH 0787/1543] Reduce the complexity of graphs/bi_directional_dijkstra.py (#8165) * Reduce the complexity of graphs/bi_directional_dijkstra.py * Try to lower the --max-complexity threshold in the file .flake8 * Lower the --max-complexity threshold in the file .flake8 * updating DIRECTORY.md * updating DIRECTORY.md * Try to lower max-complexity * Try to lower max-complexity * Try to lower max-complexity --------- Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- graphs/bi_directional_dijkstra.py | 95 +++++++++++++++++-------------- pyproject.toml | 2 +- 2 files changed, 53 insertions(+), 44 deletions(-) diff --git a/graphs/bi_directional_dijkstra.py b/graphs/bi_directional_dijkstra.py index fc53e2f0d8f3..a4489026be80 100644 --- a/graphs/bi_directional_dijkstra.py +++ b/graphs/bi_directional_dijkstra.py @@ -17,6 +17,32 @@ import numpy as np +def pass_and_relaxation( + graph: dict, + v: str, + visited_forward: set, + visited_backward: set, + cst_fwd: dict, + cst_bwd: dict, + queue: PriorityQueue, + parent: dict, + shortest_distance: float | int, +) -> float | int: + for nxt, d in graph[v]: + if nxt in visited_forward: + continue + old_cost_f = cst_fwd.get(nxt, np.inf) + new_cost_f = cst_fwd[v] + d + if new_cost_f < old_cost_f: + queue.put((new_cost_f, nxt)) + cst_fwd[nxt] = new_cost_f + parent[nxt] = v + if nxt in visited_backward: + if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance: + shortest_distance = cst_fwd[v] + d + cst_bwd[nxt] + return shortest_distance + + def bidirectional_dij( source: str, destination: str, graph_forward: dict, graph_backward: dict ) -> int: @@ -51,53 +77,36 @@ def bidirectional_dij( if source == destination: return 0 - while queue_forward and queue_backward: - while not queue_forward.empty(): - _, v_fwd = queue_forward.get() - - if v_fwd not in visited_forward: - break - else: - break + while not queue_forward.empty() and not queue_backward.empty(): + _, v_fwd = queue_forward.get() visited_forward.add(v_fwd) - while not queue_backward.empty(): - _, v_bwd = queue_backward.get() - - if v_bwd not in visited_backward: - break - else: - break + _, v_bwd = queue_backward.get() visited_backward.add(v_bwd) - # forward pass and relaxation - for nxt_fwd, d_forward in graph_forward[v_fwd]: - if nxt_fwd in visited_forward: - continue - old_cost_f = cst_fwd.get(nxt_fwd, np.inf) - new_cost_f = cst_fwd[v_fwd] + d_forward - if new_cost_f < old_cost_f: - queue_forward.put((new_cost_f, nxt_fwd)) - cst_fwd[nxt_fwd] = new_cost_f - parent_forward[nxt_fwd] = v_fwd - if nxt_fwd in visited_backward: - if cst_fwd[v_fwd] + d_forward + cst_bwd[nxt_fwd] < shortest_distance: - shortest_distance = cst_fwd[v_fwd] + d_forward + cst_bwd[nxt_fwd] - - # backward pass and relaxation - for nxt_bwd, d_backward in graph_backward[v_bwd]: - if nxt_bwd in visited_backward: - continue - old_cost_b = cst_bwd.get(nxt_bwd, np.inf) - new_cost_b = cst_bwd[v_bwd] + d_backward - if new_cost_b < old_cost_b: - queue_backward.put((new_cost_b, nxt_bwd)) - cst_bwd[nxt_bwd] = new_cost_b - parent_backward[nxt_bwd] = v_bwd - - if nxt_bwd in visited_forward: - if cst_bwd[v_bwd] + d_backward + cst_fwd[nxt_bwd] < shortest_distance: - shortest_distance = cst_bwd[v_bwd] + d_backward + cst_fwd[nxt_bwd] + shortest_distance = pass_and_relaxation( + graph_forward, + v_fwd, + visited_forward, + visited_backward, + cst_fwd, + cst_bwd, + queue_forward, + parent_forward, + shortest_distance, + ) + + shortest_distance = pass_and_relaxation( + graph_backward, + v_bwd, + visited_backward, + visited_forward, + cst_bwd, + cst_fwd, + queue_backward, + parent_backward, + shortest_distance, + ) if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance: break diff --git a/pyproject.toml b/pyproject.toml index 23fe45e97d20..48c3fbd4009d 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -61,7 +61,7 @@ show-source = true target-version = "py311" [tool.ruff.mccabe] # DO NOT INCREASE THIS VALUE -max-complexity = 20 # default: 10 +max-complexity = 17 # default: 10 [tool.ruff.pylint] # DO NOT INCREASE THESE VALUES max-args = 10 # default: 5 From a71f22dae54f830dbf68b3bd5e5e8d540e338a4c Mon Sep 17 00:00:00 2001 From: Rohan Anand <96521078+rohan472000@users.noreply.github.com> Date: Thu, 30 Mar 2023 10:39:21 +0530 Subject: [PATCH 0788/1543] Update cnn_classification.py (#8570) --- computer_vision/cnn_classification.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/computer_vision/cnn_classification.py b/computer_vision/cnn_classification.py index 1c193fcbb50b..9b5f8c95eebf 100644 --- a/computer_vision/cnn_classification.py +++ b/computer_vision/cnn_classification.py @@ -93,7 +93,7 @@ test_image = tf.keras.preprocessing.image.img_to_array(test_image) test_image = np.expand_dims(test_image, axis=0) result = classifier.predict(test_image) - training_set.class_indices + # training_set.class_indices if result[0][0] == 0: prediction = "Normal" if result[0][0] == 1: From a00492911a949a1e59072367bbabee22cd884106 Mon Sep 17 00:00:00 2001 From: Rohan Anand <96521078+rohan472000@users.noreply.github.com> Date: Fri, 31 Mar 2023 16:47:13 +0530 Subject: [PATCH 0789/1543] added a problem on kadane's algo and its solution. (#8569) * added kadane's algorithm directory with one problem's solution. * added type hints * Rename kaadne_algorithm/max_product_subarray.py to dynamic_programming/max_product_subarray.py * Update dynamic_programming/max_product_subarray.py Co-authored-by: Christian Clauss * Update max_product_subarray.py * Update max_product_subarray.py * Update dynamic_programming/max_product_subarray.py Co-authored-by: Christian Clauss * Update max_product_subarray.py * Update max_product_subarray.py * Update max_product_subarray.py * Update max_product_subarray.py * Update max_product_subarray.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update max_product_subarray.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update max_product_subarray.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update max_product_subarray.py * Update max_product_subarray.py * Update dynamic_programming/max_product_subarray.py Co-authored-by: Christian Clauss * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update dynamic_programming/max_product_subarray.py Co-authored-by: Christian Clauss * Update max_product_subarray.py --------- Co-authored-by: Christian Clauss Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- dynamic_programming/max_product_subarray.py | 53 +++++++++++++++++++++ 1 file changed, 53 insertions(+) create mode 100644 dynamic_programming/max_product_subarray.py diff --git a/dynamic_programming/max_product_subarray.py b/dynamic_programming/max_product_subarray.py new file mode 100644 index 000000000000..425859bc03e3 --- /dev/null +++ b/dynamic_programming/max_product_subarray.py @@ -0,0 +1,53 @@ +def max_product_subarray(numbers: list[int]) -> int: + """ + Returns the maximum product that can be obtained by multiplying a + contiguous subarray of the given integer list `nums`. + + Example: + >>> max_product_subarray([2, 3, -2, 4]) + 6 + >>> max_product_subarray((-2, 0, -1)) + 0 + >>> max_product_subarray([2, 3, -2, 4, -1]) + 48 + >>> max_product_subarray([-1]) + -1 + >>> max_product_subarray([0]) + 0 + >>> max_product_subarray([]) + 0 + >>> max_product_subarray("") + 0 + >>> max_product_subarray(None) + 0 + >>> max_product_subarray([2, 3, -2, 4.5, -1]) + Traceback (most recent call last): + ... + ValueError: numbers must be an iterable of integers + >>> max_product_subarray("ABC") + Traceback (most recent call last): + ... + ValueError: numbers must be an iterable of integers + """ + if not numbers: + return 0 + + if not isinstance(numbers, (list, tuple)) or not all( + isinstance(number, int) for number in numbers + ): + raise ValueError("numbers must be an iterable of integers") + + max_till_now = min_till_now = max_prod = numbers[0] + + for i in range(1, len(numbers)): + # update the maximum and minimum subarray products + number = numbers[i] + if number < 0: + max_till_now, min_till_now = min_till_now, max_till_now + max_till_now = max(number, max_till_now * number) + min_till_now = min(number, min_till_now * number) + + # update the maximum product found till now + max_prod = max(max_prod, max_till_now) + + return max_prod From 238fe8c494ab5be80c96441095d1c8958f95c04d Mon Sep 17 00:00:00 2001 From: NIKITA PANDEY <113332472+nikitapandeyy@users.noreply.github.com> Date: Fri, 31 Mar 2023 19:38:13 +0530 Subject: [PATCH 0790/1543] Update receive_file.py (#8541) * Update receive_file.py Here are the changes I made: Added the main() function and called it from if __name__ == "__main__" block. This makes it easier to test the code and import it into other programs. Added socket.AF_INET as the first argument to socket.socket(). This specifies the address family to be used, which is necessary when using connect(). Changed print(f"{data = }") to print("Received:", len(data), "bytes"). This makes it clearer what's happening and how much data is being received. Changed the final print statement to "Successfully received the file". This makes it more accurate and descriptive. Moved the import statement to the top of the file. This is a common convention in Python. * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- file_transfer/receive_file.py | 18 +++++++++++------- 1 file changed, 11 insertions(+), 7 deletions(-) diff --git a/file_transfer/receive_file.py b/file_transfer/receive_file.py index 37a503036dc2..f50ad9fe1107 100644 --- a/file_transfer/receive_file.py +++ b/file_transfer/receive_file.py @@ -1,8 +1,9 @@ -if __name__ == "__main__": - import socket # Import socket module +import socket + - sock = socket.socket() # Create a socket object - host = socket.gethostname() # Get local machine name +def main(): + sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + host = socket.gethostname() port = 12312 sock.connect((host, port)) @@ -13,11 +14,14 @@ print("Receiving data...") while True: data = sock.recv(1024) - print(f"{data = }") if not data: break - out_file.write(data) # Write data to a file + out_file.write(data) - print("Successfully got the file") + print("Successfully received the file") sock.close() print("Connection closed") + + +if __name__ == "__main__": + main() From 5ce63b5966b6ad9c7ce36c449fb31112c3e1d084 Mon Sep 17 00:00:00 2001 From: Tianyi Zheng Date: Sat, 1 Apr 2023 01:11:24 -0400 Subject: [PATCH 0791/1543] Fix `mypy` errors in `lu_decomposition.py` (attempt 2) (#8100) * updating DIRECTORY.md * Fix mypy errors in lu_decomposition.py * Replace for-loops with comprehensions * Add explanation of LU decomposition and extra doctests Add an explanation of LU decomposition with conditions for when an LU decomposition exists Add extra doctests to handle each of the possible conditions for when a decomposition exists/doesn't exist * updating DIRECTORY.md * updating DIRECTORY.md --------- Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- arithmetic_analysis/lu_decomposition.py | 91 ++++++++++++++++++------- 1 file changed, 65 insertions(+), 26 deletions(-) diff --git a/arithmetic_analysis/lu_decomposition.py b/arithmetic_analysis/lu_decomposition.py index 217719cf4da1..941c1dadf556 100644 --- a/arithmetic_analysis/lu_decomposition.py +++ b/arithmetic_analysis/lu_decomposition.py @@ -1,62 +1,101 @@ -"""Lower-Upper (LU) Decomposition. +""" +Lower–upper (LU) decomposition factors a matrix as a product of a lower +triangular matrix and an upper triangular matrix. A square matrix has an LU +decomposition under the following conditions: + - If the matrix is invertible, then it has an LU decomposition if and only + if all of its leading principal minors are non-zero (see + https://en.wikipedia.org/wiki/Minor_(linear_algebra) for an explanation of + leading principal minors of a matrix). + - If the matrix is singular (i.e., not invertible) and it has a rank of k + (i.e., it has k linearly independent columns), then it has an LU + decomposition if its first k leading principal minors are non-zero. + +This algorithm will simply attempt to perform LU decomposition on any square +matrix and raise an error if no such decomposition exists. -Reference: -- https://en.wikipedia.org/wiki/LU_decomposition +Reference: https://en.wikipedia.org/wiki/LU_decomposition """ from __future__ import annotations import numpy as np -from numpy import float64 -from numpy.typing import ArrayLike - -def lower_upper_decomposition( - table: ArrayLike[float64], -) -> tuple[ArrayLike[float64], ArrayLike[float64]]: - """Lower-Upper (LU) Decomposition - - Example: +def lower_upper_decomposition(table: np.ndarray) -> tuple[np.ndarray, np.ndarray]: + """ + Perform LU decomposition on a given matrix and raises an error if the matrix + isn't square or if no such decomposition exists >>> matrix = np.array([[2, -2, 1], [0, 1, 2], [5, 3, 1]]) - >>> outcome = lower_upper_decomposition(matrix) - >>> outcome[0] + >>> lower_mat, upper_mat = lower_upper_decomposition(matrix) + >>> lower_mat array([[1. , 0. , 0. ], [0. , 1. , 0. ], [2.5, 8. , 1. ]]) - >>> outcome[1] + >>> upper_mat array([[ 2. , -2. , 1. ], [ 0. , 1. , 2. ], [ 0. , 0. , -17.5]]) + >>> matrix = np.array([[4, 3], [6, 3]]) + >>> lower_mat, upper_mat = lower_upper_decomposition(matrix) + >>> lower_mat + array([[1. , 0. ], + [1.5, 1. ]]) + >>> upper_mat + array([[ 4. , 3. ], + [ 0. , -1.5]]) + + # Matrix is not square >>> matrix = np.array([[2, -2, 1], [0, 1, 2]]) - >>> lower_upper_decomposition(matrix) + >>> lower_mat, upper_mat = lower_upper_decomposition(matrix) Traceback (most recent call last): ... ValueError: 'table' has to be of square shaped array but got a 2x3 array: [[ 2 -2 1] [ 0 1 2]] + + # Matrix is invertible, but its first leading principal minor is 0 + >>> matrix = np.array([[0, 1], [1, 0]]) + >>> lower_mat, upper_mat = lower_upper_decomposition(matrix) + Traceback (most recent call last): + ... + ArithmeticError: No LU decomposition exists + + # Matrix is singular, but its first leading principal minor is 1 + >>> matrix = np.array([[1, 0], [1, 0]]) + >>> lower_mat, upper_mat = lower_upper_decomposition(matrix) + >>> lower_mat + array([[1., 0.], + [1., 1.]]) + >>> upper_mat + array([[1., 0.], + [0., 0.]]) + + # Matrix is singular, but its first leading principal minor is 0 + >>> matrix = np.array([[0, 1], [0, 1]]) + >>> lower_mat, upper_mat = lower_upper_decomposition(matrix) + Traceback (most recent call last): + ... + ArithmeticError: No LU decomposition exists """ - # Table that contains our data - # Table has to be a square array so we need to check first + # Ensure that table is a square array rows, columns = np.shape(table) if rows != columns: raise ValueError( - f"'table' has to be of square shaped array but got a {rows}x{columns} " - + f"array:\n{table}" + f"'table' has to be of square shaped array but got a " + f"{rows}x{columns} array:\n{table}" ) + lower = np.zeros((rows, columns)) upper = np.zeros((rows, columns)) for i in range(columns): for j in range(i): - total = 0 - for k in range(j): - total += lower[i][k] * upper[k][j] + total = sum(lower[i][k] * upper[k][j] for k in range(j)) + if upper[j][j] == 0: + raise ArithmeticError("No LU decomposition exists") lower[i][j] = (table[i][j] - total) / upper[j][j] lower[i][i] = 1 for j in range(i, columns): - total = 0 - for k in range(i): - total += lower[i][k] * upper[k][j] + total = sum(lower[i][k] * upper[k][j] for k in range(j)) upper[i][j] = table[i][j] - total return lower, upper From dc4f603dad22eab31892855555999b552e97e9d8 Mon Sep 17 00:00:00 2001 From: Maxim Smolskiy Date: Sat, 1 Apr 2023 08:47:24 +0300 Subject: [PATCH 0792/1543] Add Project Euler problem 187 solution 1 (#8182) Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- DIRECTORY.md | 2 + project_euler/problem_187/__init__.py | 0 project_euler/problem_187/sol1.py | 58 +++++++++++++++++++++++++++ 3 files changed, 60 insertions(+) create mode 100644 project_euler/problem_187/__init__.py create mode 100644 project_euler/problem_187/sol1.py diff --git a/DIRECTORY.md b/DIRECTORY.md index 1d3177801a2c..1a641d8ecb59 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -990,6 +990,8 @@ * [Sol1](project_euler/problem_174/sol1.py) * Problem 180 * [Sol1](project_euler/problem_180/sol1.py) + * Problem 187 + * [Sol1](project_euler/problem_187/sol1.py) * Problem 188 * [Sol1](project_euler/problem_188/sol1.py) * Problem 191 diff --git a/project_euler/problem_187/__init__.py b/project_euler/problem_187/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/project_euler/problem_187/sol1.py b/project_euler/problem_187/sol1.py new file mode 100644 index 000000000000..12f03e2a7023 --- /dev/null +++ b/project_euler/problem_187/sol1.py @@ -0,0 +1,58 @@ +""" +Project Euler Problem 187: https://projecteuler.net/problem=187 + +A composite is a number containing at least two prime factors. +For example, 15 = 3 x 5; 9 = 3 x 3; 12 = 2 x 2 x 3. + +There are ten composites below thirty containing precisely two, +not necessarily distinct, prime factors: 4, 6, 9, 10, 14, 15, 21, 22, 25, 26. + +How many composite integers, n < 10^8, have precisely two, +not necessarily distinct, prime factors? +""" + +from math import isqrt + + +def calculate_prime_numbers(max_number: int) -> list[int]: + """ + Returns prime numbers below max_number + + >>> calculate_prime_numbers(10) + [2, 3, 5, 7] + """ + + is_prime = [True] * max_number + for i in range(2, isqrt(max_number - 1) + 1): + if is_prime[i]: + for j in range(i**2, max_number, i): + is_prime[j] = False + + return [i for i in range(2, max_number) if is_prime[i]] + + +def solution(max_number: int = 10**8) -> int: + """ + Returns the number of composite integers below max_number have precisely two, + not necessarily distinct, prime factors + + >>> solution(30) + 10 + """ + + prime_numbers = calculate_prime_numbers(max_number // 2) + + semiprimes_count = 0 + left = 0 + right = len(prime_numbers) - 1 + while left <= right: + while prime_numbers[left] * prime_numbers[right] >= max_number: + right -= 1 + semiprimes_count += right - left + 1 + left += 1 + + return semiprimes_count + + +if __name__ == "__main__": + print(f"{solution() = }") From e4d90e2d5b92fdcff558f1848843dfbe20d81035 Mon Sep 17 00:00:00 2001 From: amirsoroush <114881632+amirsoroush@users.noreply.github.com> Date: Sat, 1 Apr 2023 09:26:43 +0300 Subject: [PATCH 0793/1543] change space complexity of linked list's __len__ from O(n) to O(1) (#8183) --- data_structures/linked_list/circular_linked_list.py | 2 +- data_structures/linked_list/doubly_linked_list.py | 2 +- data_structures/linked_list/merge_two_lists.py | 2 +- data_structures/linked_list/singly_linked_list.py | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/data_structures/linked_list/circular_linked_list.py b/data_structures/linked_list/circular_linked_list.py index 67a63cd55e19..9092fb29e3ff 100644 --- a/data_structures/linked_list/circular_linked_list.py +++ b/data_structures/linked_list/circular_linked_list.py @@ -24,7 +24,7 @@ def __iter__(self) -> Iterator[Any]: break def __len__(self) -> int: - return len(tuple(iter(self))) + return sum(1 for _ in self) def __repr__(self): return "->".join(str(item) for item in iter(self)) diff --git a/data_structures/linked_list/doubly_linked_list.py b/data_structures/linked_list/doubly_linked_list.py index 6c81493fff85..41d07d63e005 100644 --- a/data_structures/linked_list/doubly_linked_list.py +++ b/data_structures/linked_list/doubly_linked_list.py @@ -51,7 +51,7 @@ def __len__(self): >>> len(linked_list) == 5 True """ - return len(tuple(iter(self))) + return sum(1 for _ in self) def insert_at_head(self, data): self.insert_at_nth(0, data) diff --git a/data_structures/linked_list/merge_two_lists.py b/data_structures/linked_list/merge_two_lists.py index 61e2412aa7fd..ca0d3bb48540 100644 --- a/data_structures/linked_list/merge_two_lists.py +++ b/data_structures/linked_list/merge_two_lists.py @@ -44,7 +44,7 @@ def __len__(self) -> int: >>> len(SortedLinkedList(test_data_odd)) 8 """ - return len(tuple(iter(self))) + return sum(1 for _ in self) def __str__(self) -> str: """ diff --git a/data_structures/linked_list/singly_linked_list.py b/data_structures/linked_list/singly_linked_list.py index bdeb5922ac67..a8f9e8ebb977 100644 --- a/data_structures/linked_list/singly_linked_list.py +++ b/data_structures/linked_list/singly_linked_list.py @@ -72,7 +72,7 @@ def __len__(self) -> int: >>> len(linked_list) 0 """ - return len(tuple(iter(self))) + return sum(1 for _ in self) def __repr__(self) -> str: """ From 9e0c357a57f76abc354d704012040f3f5511a941 Mon Sep 17 00:00:00 2001 From: Dhruv Manilawala Date: Sat, 1 Apr 2023 11:59:26 +0530 Subject: [PATCH 0794/1543] chore: additional Project Euler solution hash (#8593) --- scripts/project_euler_answers.json | 109 ++++++++++++++++++++++++++++- 1 file changed, 108 insertions(+), 1 deletion(-) diff --git a/scripts/project_euler_answers.json b/scripts/project_euler_answers.json index 6d354363ee5f..f2b876934766 100644 --- a/scripts/project_euler_answers.json +++ b/scripts/project_euler_answers.json @@ -723,5 +723,112 @@ "722": "9687101dfe209fd65f57a10603baa38ba83c9152e43a8b802b96f1e07f568e0e", "723": "74832787e7d4e0cb7991256c8f6d02775dffec0684de234786f25f898003f2de", "724": "fa05e2b497e7eafa64574017a4c45aadef6b163d907b03d63ba3f4021096d329", - "725": "005c873563f51bbebfdb1f8dbc383259e9a98e506bc87ae8d8c9044b81fc6418" + "725": "005c873563f51bbebfdb1f8dbc383259e9a98e506bc87ae8d8c9044b81fc6418", + "726": "93e41c533136bf4b436e493090fd4e7b277234db2a69c62a871f775ff26681bf", + "727": "c366f7426ca9351dcdde2e3bea01181897cda4d9b44977678ea3828419b84851", + "728": "8de62a644511d27c7c23c7722f56112b3c1ab9b05a078a98a0891f09f92464c6", + "729": "0ae82177174eef99fc80a2ec921295f61a6ac4dfed86a1bf333a50c26d01955c", + "730": "78cd876a176c8fbf7c2155b80dccbdededdbc43c28ef17b5a6e554d649325d38", + "731": "54afb9f829be51d29f90eecbfe40e5ba91f3a3bf538de62f3e34674af15eb542", + "732": "c4dc4610dcafc806b30e5d3f5560b57f462218a04397809843a7110838f0ebac", + "733": "bdde7d98d057d6a6ae360fd2f872d8bccb7e7f2971df37a3c5f20712ea3c618f", + "734": "9a514875bd9af26fcc565337771f852d311cd77033186e4d957e7b6c7b8ce018", + "735": "8bbc5a27c0031d8c44f3f73c99622a202cd6ea9a080049d615a7ae80ce6024f9", + "736": "e0d4c78b9b3dae51940877aff28275d036eccfc641111c8e34227ff6015a0fab", + "737": "a600884bcaa01797310c83b198bad58c98530289305af29b0bf75f679af38d3a", + "738": "c85f15fdaafe7d5525acff960afef7e4b8ffded5a7ee0d1dc2b0e8d0c26b9b46", + "739": "8716e9302f0fb90153e2f522bd88a710361a897480e4ccc0542473c704793518", + "740": "6ff41ee34b263b742cda109aee3be9ad6c95eec2ce31d6a9fc5353bba1b41afd", + "741": "99ac0eb9589b895e5755895206bbad5febd6bc29b2912df1c7544c547e26bca3", + "742": "7d2761a240aa577348df4813ea248088d0d6d8d421142c712ed576cdc90d4df9", + "743": "d93c42a129c0961b4e36738efae3b7e8ffae3a4daeced20e85bb740d3d72522d", + "744": "211f76700a010461486dde6c723720be85e68c192cd8a8ed0a88860b8ae9b0f0", + "745": "2d32dc1fea2f1b8600c0ada927b057b566870ceb5362cce71ac3693dcb7136ae", + "746": "2df1c2a0181f0c25e8d13d2a1eadba55a6b06267a2b22075fcf6867fb2e10c02", + "747": "a8d8f93142e320c6f0dd386c7a3bfb011bbdc15b85291a9be8f0266b3608175e", + "748": "7de937e04c10386b240afb8bb2ff590009946df8b7850a0329ccdb59fca8955f", + "749": "1a55f5484ccf964aeb186faedefa01db05d87180891dc2280b6eb85b6efb4779", + "750": "fa4318c213179e6af1c949be7cf47210f4383e0a44d191e2bad44228d3192f14", + "751": "12fe650fcb3afc214b3d647c655070e8142cfd397441fc7636ad7e6ffcaefde2", + "752": "e416c0123bc6b82df8726b328494db31aa4781d938a0a6e2107b1e44c73c0434", + "753": "0ee3299bc89e1e4c2fc79285fb1cd84c887456358a825e56be92244b7115f5af", + "754": "1370574b16207c41d3dafb62aa898379ec101ac36843634b1633b7b509d4c35a", + "755": "78bb4b18b13f5254cfafe872c0e93791ab5206b2851960dc6aebea8f62b9580c", + "756": "6becaabbda2e9ea22373e62e989b6b70467efa24fbe2f0d124d7a99a53e93f74", + "757": "fbfee0a5c4fa57a1dd6cf0c9bb2423cf7e7bcb130e67114aa360e42234987314", + "758": "8e4dfc259cec9dfd89d4b4ac8c33c75af6e0f5f7926526ee22ad4d45f93d3c18", + "759": "40bac0ed2e4f7861a6d9a2d87191a9034e177c319aa40a43638cc1b69572e5f2", + "760": "7ab50386a211f0815593389ab05b57a1a5eb5cbf5b9a85fe4afc517dcab74e06", + "761": "1cdb0318ac16e11c8d2ae7b1d7ca7138f7b1a461e9d75bd69be0f9cdd3add0c5", + "762": "84c4662267d5809380a540dfc2881665b3019047d74d5ef0a01f86e45f4b5b59", + "763": "f0def5903139447fabe7d106db5fff660d94b45af7b8b48d789596cf65ab2514", + "764": "7b4131f4d1e13d091ca7dd4d32317a14a2a24e6e1abd214df1c14c215287b330", + "765": "7558b775727426bccd945f5aa6b3e131e6034a7b1ff8576332329ef65d6a1663", + "766": "23c309430fa9546adb617457dbfd30fb7432904595c8c000e9b67ea23f32a53b", + "767": "70aef22ac2db8a5bdfcc42ff8dafbd2901e85e268f5f3c45085aa40c590b1d42", + "768": "b69a808dfc654b037e2f47ace16f48fe3bb553b3c8eed3e2b6421942fbf521d0", + "769": "78537a30577e806c6d8d94725e54d2d52e56f7f39f89c133cd5d0a2aad7e46e4", + "770": "c9d80c19c4895d1498bf809fcc37c447fa961fb325e5667eb35d6aa992966b41", + "771": "9803ace30c0d90d422e703fdf25a10a9342d0178a277ebc20c7bd6feac4c7a15", + "772": "f5a1e391af815ea6453db58a1bd71790f433c44ed63e5e93d8f5c045dfd5a464", + "773": "e1b93fc323c4d9c383100603339548e1e56ce9c38bcdcc425024c12b862ea8cb", + "774": "3646cd098b213014fb7bbc9597871585e62ee0cf2770e141f1df771237cc09ab", + "775": "d9d7d515ce7350c9e5696d85f68bbb42daa74b9e171a601dd04c823b18bb7757", + "776": "83286074d3bc86a5b449facb5fe5eafc91eb4c8031e2fb5e716443402cd8ed0f", + "777": "e62616a387d05b619d47cee3d49d5d2db19393736bf54b6cdd20933c0531cb7e", + "778": "d4de958ba44d25353de5b380e04d06c7968794ad50dbf6231ad0049ff53e106b", + "779": "c08ce54a59afc4af62f28b80a9c9a5190822d124eed8d73fd6db3e19c81e2157", + "780": "fc7ba646c16482f0f4f5ce2b06d21183dba2bdeaf9469b36b55bc7bc2d87baf3", + "781": "8fa5733f06838fb61b55b3e9d59c5061d922147e59947fe52e566dd975b2199f", + "782": "9f757d92df401ee049bc066bb2625c6287e5e4bcd38c958396a77a578f036a24", + "783": "270ff37f60c267a673bd4b223e44941f01ae9cfbf6bbdf99ca57af89b1e9a66f", + "784": "388b17c4c7b829cef767f83b4686c903faeec1241edfe5f58ee91d2b0c7f8dfc", + "785": "77cf600204c5265e1d5d3d26bf28ba1e92e6f24def040c16977450bec8b1cb99", + "786": "fb14022b7edbc6c7bfde27f35b49f6acaa4f0fc383af27614cb9d4a1980e626b", + "787": "7516ba0ac1951665723dcc4adcc52764d9497e7b6ed30bdb9937ac9df82b7c4f", + "788": "adede1d30258bb0f353af11f559b67f8b823304c71e967f52db52d002760c24f", + "789": "0c82e744a1f9bc57fd8ae8b2f479998455bc45126de971c59b68541c254e303a", + "790": "319847122251afd20d4d650047c55981a509fa2be78abd7c9c3caa0555e60a05", + "791": "2e0bbdcd0a8460e1e33c55668d0dc9752379a78b9f3561d7a17b922a5541a3fb", + "792": "5f77834c5a509023dd95dd98411eae1dd4bafd125deca590632f409f92fd257b", + "793": "dbfd900a3b31eeec2f14b916f5151611541cb716d80b7b9a1229de12293a02ea", + "794": "d019fe415aba832c4c761140d60c466c9aaad52b504df3167c17f2d3f0b277a7", + "795": "617b259349da44c2af2664acde113673ab3bb03a85d31f1be8f01027d0ebd4d3", + "796": "cba6b30a818d073398e5802211987f0897523e4752987bb445b2bca079670e22", + "797": "61e42cac3d7858b8850111a8c64c56432a18dd058dfb6afd773f07d703703b1a", + "798": "ae8b155d6b77522af79f7e4017fefe92aaa5d45eff132c83dc4d4bcfc9686020", + "799": "a41cb14ddf8f1948a01f590fbe53d9ca4e2faf48375ce1c306f91acf7c94e005", + "800": "c6a47bc6f02cf06be16728fb308c83f2f2ae350325ef7016867f5bdaea849d71", + "801": "d14b358c76b55106613f9c0a2112393338dfd01513b0fd231b79fc8db20e41f0", + "802": "22ae33e67fb48accfaa3b36e70c5a19066b974194c3130680de0c7cdce2d0f2e", + "803": "d95b3f9bbb7054042c1fba4db02f7223a2dad94977a36f08c8aaf92f373f9e78", + "804": "b0b1cf7253593eb2334c75e66dbe22b4b4540347485f1ea24e80226b4b18171c", + "805": "41b1ff5db0e70984ad20c50d1a9ac2b5a53ccd5f42796c8e948ae8880005fbb9", + "806": "b9c813beb39671adb8e1530555cadca44c21ddc7127932274918df2091dbd9ca", + "807": "745fd9ba97970d85a29877942839e41fc192794420e86f3bde39fd26db7a8bff", + "808": "6c73b947eb603602a7e8afadc83eaaa381a46db8b82a6fb89c9c1d93cb023fce", + "809": "eebac7753da4c1230dfce0f15fc124ffff01b0e432f0b74623b60cff71bbc9a9", + "810": "42be7899672a1a0046823603ce60dbeda7250a56fcb8d0913093850c85394307", + "811": "8698cd28ae4d93db36631870c33e4a8a527d970050d994666115f54260b64138", + "812": "dc2495924f37353db8b846323b8085fae9db502e890c513ed2e64ed7281f567f", + "813": "92179dde05aa6557baca65699fda50ca024d33a77078d8e128caa3c5db84064b", + "814": "344ed8cb7684307c00b7f03d751729a7f9d2a5f4a4cb4574594113d69593c0c1", + "815": "f642cf15345af3feab60e26a02aee038f759914906a5b2b469b46fdeee50ff59", + "816": "058178444e85f2aedb2f75d824a469747381f0bd3235d8c72df4385fec86eb07", + "817": "582fdc2233298192b09ceaf1463d6be06a09894075532630aa9d9efcfcb31da4", + "818": "67f6964d6ff114a43371b8375c44db2f1362df4f110b4a7ce8d79cf1b76621a0", + "819": "c7a82513ad48dfc87f2c1e0f2915b71464b7f5a16501c71df4ae4a8741dceef3", + "820": "9b23ae0181f320aadda2637ac2179c8b41b00715630c3acb643c7aee3b81cf90", + "821": "0941e396ff15b98fd7827de8e33ef94996d48ba719a88ba8e2da7f2605df3e5c", + "822": "ed8ef7f568939b9df1b77ae58344940b91c7e154a4367fe2b179bc7b9484d4e6", + "823": "05139328571a86096032b57e3a6a02a61acad4fb0d8f8e1b5d0ffb0d063ba697", + "826": "7f40f14ca65e5c06dd9ec9bbb212adb4d97a503199cb3c30ed921a04373bbe1c", + "827": "80461f02c63654c642382a6ffb7a44d0a3554434dfcfcea00ba91537724c7106", + "828": "520c196175625a0230afb76579ea26033372de3ef4c78aceb146b84322bfa871", + "829": "ed0089e61cf5540dd4a8fef1c468b96cf57f1d2bb79968755ba856d547ddafdf", + "831": "8ec445084427419ca6da405e0ded9814a4b4e11a2be84d88a8dea421f8e49992", + "832": "cfcb9ebef9308823f64798b5e12a59bf77ff6f92b0eae3790a61c0a26f577010", + "833": "e6ff3a5b257eb53366a32bfc8ea410a00a78bafa63650c76ac2bceddfbb42ff5", + "834": "b0d2a7e7d629ef14db9e7352a9a06d6ca66f750429170bb169ca52c172b8cc96", + "835": "bdfa1b1eecbad79f5de48bc6daee4d2b07689d7fb172aa306dd6094172b396f0" } From d66e1e873288bf399559c9ca40310d4b031aec50 Mon Sep 17 00:00:00 2001 From: Maxim Smolskiy Date: Sat, 1 Apr 2023 15:18:13 +0300 Subject: [PATCH 0795/1543] Add Project Euler problem 800 solution 1 (#8567) Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- DIRECTORY.md | 3 ++ project_euler/problem_800/__init__.py | 0 project_euler/problem_800/sol1.py | 65 +++++++++++++++++++++++++++ 3 files changed, 68 insertions(+) create mode 100644 project_euler/problem_800/__init__.py create mode 100644 project_euler/problem_800/sol1.py diff --git a/DIRECTORY.md b/DIRECTORY.md index 1a641d8ecb59..18c573909773 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -317,6 +317,7 @@ * [Longest Sub Array](dynamic_programming/longest_sub_array.py) * [Matrix Chain Order](dynamic_programming/matrix_chain_order.py) * [Max Non Adjacent Sum](dynamic_programming/max_non_adjacent_sum.py) + * [Max Product Subarray](dynamic_programming/max_product_subarray.py) * [Max Sub Array](dynamic_programming/max_sub_array.py) * [Max Sum Contiguous Subsequence](dynamic_programming/max_sum_contiguous_subsequence.py) * [Min Distance Up Bottom](dynamic_programming/min_distance_up_bottom.py) @@ -1016,6 +1017,8 @@ * [Sol1](project_euler/problem_587/sol1.py) * Problem 686 * [Sol1](project_euler/problem_686/sol1.py) + * Problem 800 + * [Sol1](project_euler/problem_800/sol1.py) ## Quantum * [Bb84](quantum/bb84.py) diff --git a/project_euler/problem_800/__init__.py b/project_euler/problem_800/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/project_euler/problem_800/sol1.py b/project_euler/problem_800/sol1.py new file mode 100644 index 000000000000..f887787bcbc6 --- /dev/null +++ b/project_euler/problem_800/sol1.py @@ -0,0 +1,65 @@ +""" +Project Euler Problem 800: https://projecteuler.net/problem=800 + +An integer of the form p^q q^p with prime numbers p != q is called a hybrid-integer. +For example, 800 = 2^5 5^2 is a hybrid-integer. + +We define C(n) to be the number of hybrid-integers less than or equal to n. +You are given C(800) = 2 and C(800^800) = 10790 + +Find C(800800^800800) +""" + +from math import isqrt, log2 + + +def calculate_prime_numbers(max_number: int) -> list[int]: + """ + Returns prime numbers below max_number + + >>> calculate_prime_numbers(10) + [2, 3, 5, 7] + """ + + is_prime = [True] * max_number + for i in range(2, isqrt(max_number - 1) + 1): + if is_prime[i]: + for j in range(i**2, max_number, i): + is_prime[j] = False + + return [i for i in range(2, max_number) if is_prime[i]] + + +def solution(base: int = 800800, degree: int = 800800) -> int: + """ + Returns the number of hybrid-integers less than or equal to base^degree + + >>> solution(800, 1) + 2 + + >>> solution(800, 800) + 10790 + """ + + upper_bound = degree * log2(base) + max_prime = int(upper_bound) + prime_numbers = calculate_prime_numbers(max_prime) + + hybrid_integers_count = 0 + left = 0 + right = len(prime_numbers) - 1 + while left < right: + while ( + prime_numbers[right] * log2(prime_numbers[left]) + + prime_numbers[left] * log2(prime_numbers[right]) + > upper_bound + ): + right -= 1 + hybrid_integers_count += right - left + left += 1 + + return hybrid_integers_count + + +if __name__ == "__main__": + print(f"{solution() = }") From 3d2012c4ba3a9d9ddd80e518f0b5b9ba6c52df7d Mon Sep 17 00:00:00 2001 From: Maxim Smolskiy Date: Sat, 1 Apr 2023 15:20:08 +0300 Subject: [PATCH 0796/1543] Add Project Euler problem 94 solution 1 (#8599) Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- DIRECTORY.md | 2 ++ project_euler/problem_094/__init__.py | 0 project_euler/problem_094/sol1.py | 44 +++++++++++++++++++++++++++ 3 files changed, 46 insertions(+) create mode 100644 project_euler/problem_094/__init__.py create mode 100644 project_euler/problem_094/sol1.py diff --git a/DIRECTORY.md b/DIRECTORY.md index 18c573909773..c781b17bf05f 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -937,6 +937,8 @@ * [Sol1](project_euler/problem_091/sol1.py) * Problem 092 * [Sol1](project_euler/problem_092/sol1.py) + * Problem 094 + * [Sol1](project_euler/problem_094/sol1.py) * Problem 097 * [Sol1](project_euler/problem_097/sol1.py) * Problem 099 diff --git a/project_euler/problem_094/__init__.py b/project_euler/problem_094/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/project_euler/problem_094/sol1.py b/project_euler/problem_094/sol1.py new file mode 100644 index 000000000000..a41292fe26fd --- /dev/null +++ b/project_euler/problem_094/sol1.py @@ -0,0 +1,44 @@ +""" +Project Euler Problem 94: https://projecteuler.net/problem=94 + +It is easily proved that no equilateral triangle exists with integral length sides and +integral area. However, the almost equilateral triangle 5-5-6 has an area of 12 square +units. + +We shall define an almost equilateral triangle to be a triangle for which two sides are +equal and the third differs by no more than one unit. + +Find the sum of the perimeters of all almost equilateral triangles with integral side +lengths and area and whose perimeters do not exceed one billion (1,000,000,000). +""" + + +def solution(max_perimeter: int = 10**9) -> int: + """ + Returns the sum of the perimeters of all almost equilateral triangles with integral + side lengths and area and whose perimeters do not exceed max_perimeter + + >>> solution(20) + 16 + """ + + prev_value = 1 + value = 2 + + perimeters_sum = 0 + i = 0 + perimeter = 0 + while perimeter <= max_perimeter: + perimeters_sum += perimeter + + prev_value += 2 * value + value += prev_value + + perimeter = 2 * value + 2 if i % 2 == 0 else 2 * value - 2 + i += 1 + + return perimeters_sum + + +if __name__ == "__main__": + print(f"{solution() = }") From 63710883c8634772fadf0145899cea4a1eadc31d Mon Sep 17 00:00:00 2001 From: amirsoroush <114881632+amirsoroush@users.noreply.github.com> Date: Sat, 1 Apr 2023 15:23:21 +0300 Subject: [PATCH 0797/1543] Remove extra `len` calls in doubly-linked-list's methods (#8600) --- data_structures/linked_list/doubly_linked_list.py | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/data_structures/linked_list/doubly_linked_list.py b/data_structures/linked_list/doubly_linked_list.py index 41d07d63e005..69763d12da15 100644 --- a/data_structures/linked_list/doubly_linked_list.py +++ b/data_structures/linked_list/doubly_linked_list.py @@ -81,7 +81,9 @@ def insert_at_nth(self, index: int, data): .... IndexError: list index out of range """ - if not 0 <= index <= len(self): + length = len(self) + + if not 0 <= index <= length: raise IndexError("list index out of range") new_node = Node(data) if self.head is None: @@ -90,7 +92,7 @@ def insert_at_nth(self, index: int, data): self.head.previous = new_node new_node.next = self.head self.head = new_node - elif index == len(self): + elif index == length: self.tail.next = new_node new_node.previous = self.tail self.tail = new_node @@ -131,15 +133,17 @@ def delete_at_nth(self, index: int): .... IndexError: list index out of range """ - if not 0 <= index <= len(self) - 1: + length = len(self) + + if not 0 <= index <= length - 1: raise IndexError("list index out of range") delete_node = self.head # default first node - if len(self) == 1: + if length == 1: self.head = self.tail = None elif index == 0: self.head = self.head.next self.head.previous = None - elif index == len(self) - 1: + elif index == length - 1: delete_node = self.tail self.tail = self.tail.previous self.tail.next = None From 59cae167e0e6b830b7ff5c89f5f2b8c747fb84c2 Mon Sep 17 00:00:00 2001 From: Maxim Smolskiy Date: Sat, 1 Apr 2023 19:22:33 +0300 Subject: [PATCH 0798/1543] Reduce the complexity of digital_image_processing/edge detection/canny.py (#8167) * Reduce the complexity of digital_image_processing/edge_detection/canny.py * Fix * updating DIRECTORY.md * updating DIRECTORY.md * updating DIRECTORY.md * Fix review issues * Rename dst to destination --------- Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- .../edge_detection/canny.py | 129 ++++++++++-------- 1 file changed, 75 insertions(+), 54 deletions(-) diff --git a/digital_image_processing/edge_detection/canny.py b/digital_image_processing/edge_detection/canny.py index a830355267c4..f8cbeedb3874 100644 --- a/digital_image_processing/edge_detection/canny.py +++ b/digital_image_processing/edge_detection/canny.py @@ -18,105 +18,126 @@ def gen_gaussian_kernel(k_size, sigma): return g -def canny(image, threshold_low=15, threshold_high=30, weak=128, strong=255): - image_row, image_col = image.shape[0], image.shape[1] - # gaussian_filter - gaussian_out = img_convolve(image, gen_gaussian_kernel(9, sigma=1.4)) - # get the gradient and degree by sobel_filter - sobel_grad, sobel_theta = sobel_filter(gaussian_out) - gradient_direction = np.rad2deg(sobel_theta) - gradient_direction += PI - - dst = np.zeros((image_row, image_col)) - +def suppress_non_maximum(image_shape, gradient_direction, sobel_grad): """ Non-maximum suppression. If the edge strength of the current pixel is the largest compared to the other pixels in the mask with the same direction, the value will be preserved. Otherwise, the value will be suppressed. """ - for row in range(1, image_row - 1): - for col in range(1, image_col - 1): + destination = np.zeros(image_shape) + + for row in range(1, image_shape[0] - 1): + for col in range(1, image_shape[1] - 1): direction = gradient_direction[row, col] if ( - 0 <= direction < 22.5 + 0 <= direction < PI / 8 or 15 * PI / 8 <= direction <= 2 * PI or 7 * PI / 8 <= direction <= 9 * PI / 8 ): w = sobel_grad[row, col - 1] e = sobel_grad[row, col + 1] if sobel_grad[row, col] >= w and sobel_grad[row, col] >= e: - dst[row, col] = sobel_grad[row, col] + destination[row, col] = sobel_grad[row, col] - elif (PI / 8 <= direction < 3 * PI / 8) or ( - 9 * PI / 8 <= direction < 11 * PI / 8 + elif ( + PI / 8 <= direction < 3 * PI / 8 + or 9 * PI / 8 <= direction < 11 * PI / 8 ): sw = sobel_grad[row + 1, col - 1] ne = sobel_grad[row - 1, col + 1] if sobel_grad[row, col] >= sw and sobel_grad[row, col] >= ne: - dst[row, col] = sobel_grad[row, col] + destination[row, col] = sobel_grad[row, col] - elif (3 * PI / 8 <= direction < 5 * PI / 8) or ( - 11 * PI / 8 <= direction < 13 * PI / 8 + elif ( + 3 * PI / 8 <= direction < 5 * PI / 8 + or 11 * PI / 8 <= direction < 13 * PI / 8 ): n = sobel_grad[row - 1, col] s = sobel_grad[row + 1, col] if sobel_grad[row, col] >= n and sobel_grad[row, col] >= s: - dst[row, col] = sobel_grad[row, col] + destination[row, col] = sobel_grad[row, col] - elif (5 * PI / 8 <= direction < 7 * PI / 8) or ( - 13 * PI / 8 <= direction < 15 * PI / 8 + elif ( + 5 * PI / 8 <= direction < 7 * PI / 8 + or 13 * PI / 8 <= direction < 15 * PI / 8 ): nw = sobel_grad[row - 1, col - 1] se = sobel_grad[row + 1, col + 1] if sobel_grad[row, col] >= nw and sobel_grad[row, col] >= se: - dst[row, col] = sobel_grad[row, col] - - """ - High-Low threshold detection. If an edge pixel’s gradient value is higher - than the high threshold value, it is marked as a strong edge pixel. If an - edge pixel’s gradient value is smaller than the high threshold value and - larger than the low threshold value, it is marked as a weak edge pixel. If - an edge pixel's value is smaller than the low threshold value, it will be - suppressed. - """ - if dst[row, col] >= threshold_high: - dst[row, col] = strong - elif dst[row, col] <= threshold_low: - dst[row, col] = 0 + destination[row, col] = sobel_grad[row, col] + + return destination + + +def detect_high_low_threshold( + image_shape, destination, threshold_low, threshold_high, weak, strong +): + """ + High-Low threshold detection. If an edge pixel’s gradient value is higher + than the high threshold value, it is marked as a strong edge pixel. If an + edge pixel’s gradient value is smaller than the high threshold value and + larger than the low threshold value, it is marked as a weak edge pixel. If + an edge pixel's value is smaller than the low threshold value, it will be + suppressed. + """ + for row in range(1, image_shape[0] - 1): + for col in range(1, image_shape[1] - 1): + if destination[row, col] >= threshold_high: + destination[row, col] = strong + elif destination[row, col] <= threshold_low: + destination[row, col] = 0 else: - dst[row, col] = weak + destination[row, col] = weak + +def track_edge(image_shape, destination, weak, strong): """ Edge tracking. Usually a weak edge pixel caused from true edges will be connected to a strong edge pixel while noise responses are unconnected. As long as there is one strong edge pixel that is involved in its 8-connected neighborhood, that weak edge point can be identified as one that should be preserved. """ - for row in range(1, image_row): - for col in range(1, image_col): - if dst[row, col] == weak: + for row in range(1, image_shape[0]): + for col in range(1, image_shape[1]): + if destination[row, col] == weak: if 255 in ( - dst[row, col + 1], - dst[row, col - 1], - dst[row - 1, col], - dst[row + 1, col], - dst[row - 1, col - 1], - dst[row + 1, col - 1], - dst[row - 1, col + 1], - dst[row + 1, col + 1], + destination[row, col + 1], + destination[row, col - 1], + destination[row - 1, col], + destination[row + 1, col], + destination[row - 1, col - 1], + destination[row + 1, col - 1], + destination[row - 1, col + 1], + destination[row + 1, col + 1], ): - dst[row, col] = strong + destination[row, col] = strong else: - dst[row, col] = 0 + destination[row, col] = 0 + + +def canny(image, threshold_low=15, threshold_high=30, weak=128, strong=255): + # gaussian_filter + gaussian_out = img_convolve(image, gen_gaussian_kernel(9, sigma=1.4)) + # get the gradient and degree by sobel_filter + sobel_grad, sobel_theta = sobel_filter(gaussian_out) + gradient_direction = PI + np.rad2deg(sobel_theta) + + destination = suppress_non_maximum(image.shape, gradient_direction, sobel_grad) + + detect_high_low_threshold( + image.shape, destination, threshold_low, threshold_high, weak, strong + ) + + track_edge(image.shape, destination, weak, strong) - return dst + return destination if __name__ == "__main__": # read original image in gray mode lena = cv2.imread(r"../image_data/lena.jpg", 0) # canny edge detection - canny_dst = canny(lena) - cv2.imshow("canny", canny_dst) + canny_destination = canny(lena) + cv2.imshow("canny", canny_destination) cv2.waitKey(0) From a213cea5f5a74e0a6b19240526779a3b0b1f270d Mon Sep 17 00:00:00 2001 From: Tianyi Zheng Date: Sat, 1 Apr 2023 12:39:22 -0400 Subject: [PATCH 0799/1543] Fix `mypy` errors in `dilation_operation.py` (#8595) * updating DIRECTORY.md * Fix mypy errors in dilation_operation.py * Rename functions to use snake case * updating DIRECTORY.md * updating DIRECTORY.md * Replace raw file string with pathlib Path * Update digital_image_processing/morphological_operations/dilation_operation.py Co-authored-by: Christian Clauss --------- Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> Co-authored-by: Christian Clauss --- .../dilation_operation.py | 35 ++++++++++--------- 1 file changed, 18 insertions(+), 17 deletions(-) diff --git a/digital_image_processing/morphological_operations/dilation_operation.py b/digital_image_processing/morphological_operations/dilation_operation.py index c8380737d219..e49b955c1480 100644 --- a/digital_image_processing/morphological_operations/dilation_operation.py +++ b/digital_image_processing/morphological_operations/dilation_operation.py @@ -1,33 +1,35 @@ +from pathlib import Path + import numpy as np from PIL import Image -def rgb2gray(rgb: np.array) -> np.array: +def rgb_to_gray(rgb: np.ndarray) -> np.ndarray: """ Return gray image from rgb image - >>> rgb2gray(np.array([[[127, 255, 0]]])) + >>> rgb_to_gray(np.array([[[127, 255, 0]]])) array([[187.6453]]) - >>> rgb2gray(np.array([[[0, 0, 0]]])) + >>> rgb_to_gray(np.array([[[0, 0, 0]]])) array([[0.]]) - >>> rgb2gray(np.array([[[2, 4, 1]]])) + >>> rgb_to_gray(np.array([[[2, 4, 1]]])) array([[3.0598]]) - >>> rgb2gray(np.array([[[26, 255, 14], [5, 147, 20], [1, 200, 0]]])) + >>> rgb_to_gray(np.array([[[26, 255, 14], [5, 147, 20], [1, 200, 0]]])) array([[159.0524, 90.0635, 117.6989]]) """ r, g, b = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2] return 0.2989 * r + 0.5870 * g + 0.1140 * b -def gray2binary(gray: np.array) -> np.array: +def gray_to_binary(gray: np.ndarray) -> np.ndarray: """ Return binary image from gray image - >>> gray2binary(np.array([[127, 255, 0]])) + >>> gray_to_binary(np.array([[127, 255, 0]])) array([[False, True, False]]) - >>> gray2binary(np.array([[0]])) + >>> gray_to_binary(np.array([[0]])) array([[False]]) - >>> gray2binary(np.array([[26.2409, 4.9315, 1.4729]])) + >>> gray_to_binary(np.array([[26.2409, 4.9315, 1.4729]])) array([[False, False, False]]) - >>> gray2binary(np.array([[26, 255, 14], [5, 147, 20], [1, 200, 0]])) + >>> gray_to_binary(np.array([[26, 255, 14], [5, 147, 20], [1, 200, 0]])) array([[False, True, False], [False, True, False], [False, True, False]]) @@ -35,7 +37,7 @@ def gray2binary(gray: np.array) -> np.array: return (gray > 127) & (gray <= 255) -def dilation(image: np.array, kernel: np.array) -> np.array: +def dilation(image: np.ndarray, kernel: np.ndarray) -> np.ndarray: """ Return dilated image >>> dilation(np.array([[True, False, True]]), np.array([[0, 1, 0]])) @@ -61,14 +63,13 @@ def dilation(image: np.array, kernel: np.array) -> np.array: return output -# kernel to be applied -structuring_element = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]]) - - if __name__ == "__main__": # read original image - image = np.array(Image.open(r"..\image_data\lena.jpg")) - output = dilation(gray2binary(rgb2gray(image)), structuring_element) + lena_path = Path(__file__).resolve().parent / "image_data" / "lena.jpg" + lena = np.array(Image.open(lena_path)) + # kernel to be applied + structuring_element = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]]) + output = dilation(gray_to_binary(rgb_to_gray(lena)), structuring_element) # Save the output image pil_img = Image.fromarray(output).convert("RGB") pil_img.save("result_dilation.png") From 84b6852de80bb51c185c30942bff47f9c451c74d Mon Sep 17 00:00:00 2001 From: Blake Reimer Date: Sat, 1 Apr 2023 10:43:07 -0600 Subject: [PATCH 0800/1543] Graham's Law (#8162) * grahams law * doctest and type hints * doctest formatting * peer review updates --- physics/grahams_law.py | 208 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 208 insertions(+) create mode 100644 physics/grahams_law.py diff --git a/physics/grahams_law.py b/physics/grahams_law.py new file mode 100644 index 000000000000..6e5d75127e83 --- /dev/null +++ b/physics/grahams_law.py @@ -0,0 +1,208 @@ +""" +Title: Graham's Law of Effusion + +Description: Graham's law of effusion states that the rate of effusion of a gas is +inversely proportional to the square root of the molar mass of its particles: + +r1/r2 = sqrt(m2/m1) + +r1 = Rate of effusion for the first gas. +r2 = Rate of effusion for the second gas. +m1 = Molar mass of the first gas. +m2 = Molar mass of the second gas. + +(Description adapted from https://en.wikipedia.org/wiki/Graham%27s_law) +""" + +from math import pow, sqrt + + +def validate(*values: float) -> bool: + """ + Input Parameters: + ----------------- + effusion_rate_1: Effustion rate of first gas (m^2/s, mm^2/s, etc.) + effusion_rate_2: Effustion rate of second gas (m^2/s, mm^2/s, etc.) + molar_mass_1: Molar mass of the first gas (g/mol, kg/kmol, etc.) + molar_mass_2: Molar mass of the second gas (g/mol, kg/kmol, etc.) + + Returns: + -------- + >>> validate(2.016, 4.002) + True + >>> validate(-2.016, 4.002) + False + >>> validate() + False + """ + result = len(values) > 0 and all(value > 0.0 for value in values) + return result + + +def effusion_ratio(molar_mass_1: float, molar_mass_2: float) -> float | ValueError: + """ + Input Parameters: + ----------------- + molar_mass_1: Molar mass of the first gas (g/mol, kg/kmol, etc.) + molar_mass_2: Molar mass of the second gas (g/mol, kg/kmol, etc.) + + Returns: + -------- + >>> effusion_ratio(2.016, 4.002) + 1.408943 + >>> effusion_ratio(-2.016, 4.002) + ValueError('Input Error: Molar mass values must greater than 0.') + >>> effusion_ratio(2.016) + Traceback (most recent call last): + ... + TypeError: effusion_ratio() missing 1 required positional argument: 'molar_mass_2' + """ + return ( + round(sqrt(molar_mass_2 / molar_mass_1), 6) + if validate(molar_mass_1, molar_mass_2) + else ValueError("Input Error: Molar mass values must greater than 0.") + ) + + +def first_effusion_rate( + effusion_rate: float, molar_mass_1: float, molar_mass_2: float +) -> float | ValueError: + """ + Input Parameters: + ----------------- + effusion_rate: Effustion rate of second gas (m^2/s, mm^2/s, etc.) + molar_mass_1: Molar mass of the first gas (g/mol, kg/kmol, etc.) + molar_mass_2: Molar mass of the second gas (g/mol, kg/kmol, etc.) + + Returns: + -------- + >>> first_effusion_rate(1, 2.016, 4.002) + 1.408943 + >>> first_effusion_rate(-1, 2.016, 4.002) + ValueError('Input Error: Molar mass and effusion rate values must greater than 0.') + >>> first_effusion_rate(1) + Traceback (most recent call last): + ... + TypeError: first_effusion_rate() missing 2 required positional arguments: \ +'molar_mass_1' and 'molar_mass_2' + >>> first_effusion_rate(1, 2.016) + Traceback (most recent call last): + ... + TypeError: first_effusion_rate() missing 1 required positional argument: \ +'molar_mass_2' + """ + return ( + round(effusion_rate * sqrt(molar_mass_2 / molar_mass_1), 6) + if validate(effusion_rate, molar_mass_1, molar_mass_2) + else ValueError( + "Input Error: Molar mass and effusion rate values must greater than 0." + ) + ) + + +def second_effusion_rate( + effusion_rate: float, molar_mass_1: float, molar_mass_2: float +) -> float | ValueError: + """ + Input Parameters: + ----------------- + effusion_rate: Effustion rate of second gas (m^2/s, mm^2/s, etc.) + molar_mass_1: Molar mass of the first gas (g/mol, kg/kmol, etc.) + molar_mass_2: Molar mass of the second gas (g/mol, kg/kmol, etc.) + + Returns: + -------- + >>> second_effusion_rate(1, 2.016, 4.002) + 0.709752 + >>> second_effusion_rate(-1, 2.016, 4.002) + ValueError('Input Error: Molar mass and effusion rate values must greater than 0.') + >>> second_effusion_rate(1) + Traceback (most recent call last): + ... + TypeError: second_effusion_rate() missing 2 required positional arguments: \ +'molar_mass_1' and 'molar_mass_2' + >>> second_effusion_rate(1, 2.016) + Traceback (most recent call last): + ... + TypeError: second_effusion_rate() missing 1 required positional argument: \ +'molar_mass_2' + """ + return ( + round(effusion_rate / sqrt(molar_mass_2 / molar_mass_1), 6) + if validate(effusion_rate, molar_mass_1, molar_mass_2) + else ValueError( + "Input Error: Molar mass and effusion rate values must greater than 0." + ) + ) + + +def first_molar_mass( + molar_mass: float, effusion_rate_1: float, effusion_rate_2: float +) -> float | ValueError: + """ + Input Parameters: + ----------------- + molar_mass: Molar mass of the first gas (g/mol, kg/kmol, etc.) + effusion_rate_1: Effustion rate of first gas (m^2/s, mm^2/s, etc.) + effusion_rate_2: Effustion rate of second gas (m^2/s, mm^2/s, etc.) + + Returns: + -------- + >>> first_molar_mass(2, 1.408943, 0.709752) + 0.507524 + >>> first_molar_mass(-1, 2.016, 4.002) + ValueError('Input Error: Molar mass and effusion rate values must greater than 0.') + >>> first_molar_mass(1) + Traceback (most recent call last): + ... + TypeError: first_molar_mass() missing 2 required positional arguments: \ +'effusion_rate_1' and 'effusion_rate_2' + >>> first_molar_mass(1, 2.016) + Traceback (most recent call last): + ... + TypeError: first_molar_mass() missing 1 required positional argument: \ +'effusion_rate_2' + """ + return ( + round(molar_mass / pow(effusion_rate_1 / effusion_rate_2, 2), 6) + if validate(molar_mass, effusion_rate_1, effusion_rate_2) + else ValueError( + "Input Error: Molar mass and effusion rate values must greater than 0." + ) + ) + + +def second_molar_mass( + molar_mass: float, effusion_rate_1: float, effusion_rate_2: float +) -> float | ValueError: + """ + Input Parameters: + ----------------- + molar_mass: Molar mass of the first gas (g/mol, kg/kmol, etc.) + effusion_rate_1: Effustion rate of first gas (m^2/s, mm^2/s, etc.) + effusion_rate_2: Effustion rate of second gas (m^2/s, mm^2/s, etc.) + + Returns: + -------- + >>> second_molar_mass(2, 1.408943, 0.709752) + 1.970351 + >>> second_molar_mass(-2, 1.408943, 0.709752) + ValueError('Input Error: Molar mass and effusion rate values must greater than 0.') + >>> second_molar_mass(1) + Traceback (most recent call last): + ... + TypeError: second_molar_mass() missing 2 required positional arguments: \ +'effusion_rate_1' and 'effusion_rate_2' + >>> second_molar_mass(1, 2.016) + Traceback (most recent call last): + ... + TypeError: second_molar_mass() missing 1 required positional argument: \ +'effusion_rate_2' + """ + return ( + round(pow(effusion_rate_1 / effusion_rate_2, 2) / molar_mass, 6) + if validate(molar_mass, effusion_rate_1, effusion_rate_2) + else ValueError( + "Input Error: Molar mass and effusion rate values must greater than 0." + ) + ) From 56a40eb3ee9aa151defd97597f4e67acf294089f Mon Sep 17 00:00:00 2001 From: Maxim Smolskiy Date: Sat, 1 Apr 2023 20:43:11 +0300 Subject: [PATCH 0801/1543] Reenable files when TensorFlow supports the current Python (#8602) * Remove python_version < "3.11" for tensorflow * Reenable neural_network/input_data.py_tf * updating DIRECTORY.md * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Try to fix ruff * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Try to fix ruff * Try to fix ruff * Try to fix ruff * Try to fix pre-commit * Try to fix * Fix * Fix * Reenable dynamic_programming/k_means_clustering_tensorflow.py_tf * updating DIRECTORY.md * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Try to fix ruff --------- Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- DIRECTORY.md | 2 + ...py_tf => k_means_clustering_tensorflow.py} | 9 +- .../{input_data.py_tf => input_data.py} | 98 +++++++++---------- requirements.txt | 2 +- 4 files changed, 55 insertions(+), 56 deletions(-) rename dynamic_programming/{k_means_clustering_tensorflow.py_tf => k_means_clustering_tensorflow.py} (98%) rename neural_network/{input_data.py_tf => input_data.py} (83%) diff --git a/DIRECTORY.md b/DIRECTORY.md index c781b17bf05f..34967082b359 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -309,6 +309,7 @@ * [Floyd Warshall](dynamic_programming/floyd_warshall.py) * [Integer Partition](dynamic_programming/integer_partition.py) * [Iterating Through Submasks](dynamic_programming/iterating_through_submasks.py) + * [K Means Clustering Tensorflow](dynamic_programming/k_means_clustering_tensorflow.py) * [Knapsack](dynamic_programming/knapsack.py) * [Longest Common Subsequence](dynamic_programming/longest_common_subsequence.py) * [Longest Common Substring](dynamic_programming/longest_common_substring.py) @@ -685,6 +686,7 @@ * [2 Hidden Layers Neural Network](neural_network/2_hidden_layers_neural_network.py) * [Back Propagation Neural Network](neural_network/back_propagation_neural_network.py) * [Convolution Neural Network](neural_network/convolution_neural_network.py) + * [Input Data](neural_network/input_data.py) * [Perceptron](neural_network/perceptron.py) * [Simple Neural Network](neural_network/simple_neural_network.py) diff --git a/dynamic_programming/k_means_clustering_tensorflow.py_tf b/dynamic_programming/k_means_clustering_tensorflow.py similarity index 98% rename from dynamic_programming/k_means_clustering_tensorflow.py_tf rename to dynamic_programming/k_means_clustering_tensorflow.py index 4fbcedeaa0dc..8d3f6f0dfbcb 100644 --- a/dynamic_programming/k_means_clustering_tensorflow.py_tf +++ b/dynamic_programming/k_means_clustering_tensorflow.py @@ -1,9 +1,10 @@ -import tensorflow as tf from random import shuffle + +import tensorflow as tf from numpy import array -def TFKMeansCluster(vectors, noofclusters): +def tf_k_means_cluster(vectors, noofclusters): """ K-Means Clustering using TensorFlow. 'vectors' should be a n*k 2-D NumPy array, where n is the number @@ -30,7 +31,6 @@ def TFKMeansCluster(vectors, noofclusters): graph = tf.Graph() with graph.as_default(): - # SESSION OF COMPUTATION sess = tf.Session() @@ -95,8 +95,7 @@ def TFKMeansCluster(vectors, noofclusters): # iterations. To keep things simple, we will only do a set number of # iterations, instead of using a Stopping Criterion. noofiterations = 100 - for iteration_n in range(noofiterations): - + for _ in range(noofiterations): ##EXPECTATION STEP ##Based on the centroid locations till last iteration, compute ##the _expected_ centroid assignments. diff --git a/neural_network/input_data.py_tf b/neural_network/input_data.py similarity index 83% rename from neural_network/input_data.py_tf rename to neural_network/input_data.py index 0e22ac0bcda5..2a32f0b82c37 100644 --- a/neural_network/input_data.py_tf +++ b/neural_network/input_data.py @@ -21,13 +21,10 @@ import collections import gzip import os +import urllib import numpy -from six.moves import urllib -from six.moves import xrange # pylint: disable=redefined-builtin - -from tensorflow.python.framework import dtypes -from tensorflow.python.framework import random_seed +from tensorflow.python.framework import dtypes, random_seed from tensorflow.python.platform import gfile from tensorflow.python.util.deprecation import deprecated @@ -46,16 +43,16 @@ def _read32(bytestream): def _extract_images(f): """Extract the images into a 4D uint8 numpy array [index, y, x, depth]. - Args: - f: A file object that can be passed into a gzip reader. + Args: + f: A file object that can be passed into a gzip reader. - Returns: - data: A 4D uint8 numpy array [index, y, x, depth]. + Returns: + data: A 4D uint8 numpy array [index, y, x, depth]. - Raises: - ValueError: If the bytestream does not start with 2051. + Raises: + ValueError: If the bytestream does not start with 2051. - """ + """ print("Extracting", f.name) with gzip.GzipFile(fileobj=f) as bytestream: magic = _read32(bytestream) @@ -86,17 +83,17 @@ def _dense_to_one_hot(labels_dense, num_classes): def _extract_labels(f, one_hot=False, num_classes=10): """Extract the labels into a 1D uint8 numpy array [index]. - Args: - f: A file object that can be passed into a gzip reader. - one_hot: Does one hot encoding for the result. - num_classes: Number of classes for the one hot encoding. + Args: + f: A file object that can be passed into a gzip reader. + one_hot: Does one hot encoding for the result. + num_classes: Number of classes for the one hot encoding. - Returns: - labels: a 1D uint8 numpy array. + Returns: + labels: a 1D uint8 numpy array. - Raises: - ValueError: If the bystream doesn't start with 2049. - """ + Raises: + ValueError: If the bystream doesn't start with 2049. + """ print("Extracting", f.name) with gzip.GzipFile(fileobj=f) as bytestream: magic = _read32(bytestream) @@ -115,8 +112,8 @@ def _extract_labels(f, one_hot=False, num_classes=10): class _DataSet: """Container class for a _DataSet (deprecated). - THIS CLASS IS DEPRECATED. - """ + THIS CLASS IS DEPRECATED. + """ @deprecated( None, @@ -135,21 +132,21 @@ def __init__( ): """Construct a _DataSet. - one_hot arg is used only if fake_data is true. `dtype` can be either - `uint8` to leave the input as `[0, 255]`, or `float32` to rescale into - `[0, 1]`. Seed arg provides for convenient deterministic testing. - - Args: - images: The images - labels: The labels - fake_data: Ignore inages and labels, use fake data. - one_hot: Bool, return the labels as one hot vectors (if True) or ints (if - False). - dtype: Output image dtype. One of [uint8, float32]. `uint8` output has - range [0,255]. float32 output has range [0,1]. - reshape: Bool. If True returned images are returned flattened to vectors. - seed: The random seed to use. - """ + one_hot arg is used only if fake_data is true. `dtype` can be either + `uint8` to leave the input as `[0, 255]`, or `float32` to rescale into + `[0, 1]`. Seed arg provides for convenient deterministic testing. + + Args: + images: The images + labels: The labels + fake_data: Ignore inages and labels, use fake data. + one_hot: Bool, return the labels as one hot vectors (if True) or ints (if + False). + dtype: Output image dtype. One of [uint8, float32]. `uint8` output has + range [0,255]. float32 output has range [0,1]. + reshape: Bool. If True returned images are returned flattened to vectors. + seed: The random seed to use. + """ seed1, seed2 = random_seed.get_seed(seed) # If op level seed is not set, use whatever graph level seed is returned numpy.random.seed(seed1 if seed is None else seed2) @@ -206,8 +203,8 @@ def next_batch(self, batch_size, fake_data=False, shuffle=True): else: fake_label = 0 return ( - [fake_image for _ in xrange(batch_size)], - [fake_label for _ in xrange(batch_size)], + [fake_image for _ in range(batch_size)], + [fake_label for _ in range(batch_size)], ) start = self._index_in_epoch # Shuffle for the first epoch @@ -250,19 +247,19 @@ def next_batch(self, batch_size, fake_data=False, shuffle=True): def _maybe_download(filename, work_directory, source_url): """Download the data from source url, unless it's already here. - Args: - filename: string, name of the file in the directory. - work_directory: string, path to working directory. - source_url: url to download from if file doesn't exist. + Args: + filename: string, name of the file in the directory. + work_directory: string, path to working directory. + source_url: url to download from if file doesn't exist. - Returns: - Path to resulting file. - """ + Returns: + Path to resulting file. + """ if not gfile.Exists(work_directory): gfile.MakeDirs(work_directory) filepath = os.path.join(work_directory, filename) if not gfile.Exists(filepath): - urllib.request.urlretrieve(source_url, filepath) + urllib.request.urlretrieve(source_url, filepath) # noqa: S310 with gfile.GFile(filepath) as f: size = f.size() print("Successfully downloaded", filename, size, "bytes.") @@ -328,7 +325,8 @@ def fake(): if not 0 <= validation_size <= len(train_images): raise ValueError( - f"Validation size should be between 0 and {len(train_images)}. Received: {validation_size}." + f"Validation size should be between 0 and {len(train_images)}. " + f"Received: {validation_size}." ) validation_images = train_images[:validation_size] @@ -336,7 +334,7 @@ def fake(): train_images = train_images[validation_size:] train_labels = train_labels[validation_size:] - options = dict(dtype=dtype, reshape=reshape, seed=seed) + options = {"dtype": dtype, "reshape": reshape, "seed": seed} train = _DataSet(train_images, train_labels, **options) validation = _DataSet(validation_images, validation_labels, **options) diff --git a/requirements.txt b/requirements.txt index a1d607df07e1..acfbc823e77f 100644 --- a/requirements.txt +++ b/requirements.txt @@ -15,7 +15,7 @@ scikit-fuzzy scikit-learn statsmodels sympy -tensorflow; python_version < "3.11" +tensorflow texttable tweepy xgboost From 33114f0272bcc1fafa6ce0f40d92ded908747ce3 Mon Sep 17 00:00:00 2001 From: Tianyi Zheng Date: Sat, 1 Apr 2023 16:05:01 -0400 Subject: [PATCH 0802/1543] Revamp `md5.py` (#8065) * Add type hints to md5.py * Rename some vars to snake case * Specify functions imported from math * Rename vars and functions to be more descriptive * Make tests from test function into doctests * Clarify more var names * Refactor some MD5 code into preprocess function * Simplify loop indices in get_block_words * Add more detailed comments, docs, and doctests * updating DIRECTORY.md * updating DIRECTORY.md * updating DIRECTORY.md * updating DIRECTORY.md * updating DIRECTORY.md * Add type hints to md5.py * Rename some vars to snake case * Specify functions imported from math * Rename vars and functions to be more descriptive * Make tests from test function into doctests * Clarify more var names * Refactor some MD5 code into preprocess function * Simplify loop indices in get_block_words * Add more detailed comments, docs, and doctests * updating DIRECTORY.md * updating DIRECTORY.md * updating DIRECTORY.md * updating DIRECTORY.md * Convert str types to bytes * Add tests comparing md5_me to hashlib's md5 * Replace line-break backslashes with parentheses --------- Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- DIRECTORY.md | 1 + hashes/md5.py | 372 +++++++++++++++++++++++++++++++++++++++----------- 2 files changed, 290 insertions(+), 83 deletions(-) diff --git a/DIRECTORY.md b/DIRECTORY.md index 34967082b359..b1adc23f6e61 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -717,6 +717,7 @@ * [Archimedes Principle](physics/archimedes_principle.py) * [Casimir Effect](physics/casimir_effect.py) * [Centripetal Force](physics/centripetal_force.py) + * [Grahams Law](physics/grahams_law.py) * [Horizontal Projectile Motion](physics/horizontal_projectile_motion.py) * [Hubble Parameter](physics/hubble_parameter.py) * [Ideal Gas Law](physics/ideal_gas_law.py) diff --git a/hashes/md5.py b/hashes/md5.py index 2020bf2e53bf..2187006ec8a9 100644 --- a/hashes/md5.py +++ b/hashes/md5.py @@ -1,91 +1,223 @@ -import math +""" +The MD5 algorithm is a hash function that's commonly used as a checksum to +detect data corruption. The algorithm works by processing a given message in +blocks of 512 bits, padding the message as needed. It uses the blocks to operate +a 128-bit state and performs a total of 64 such operations. Note that all values +are little-endian, so inputs are converted as needed. +Although MD5 was used as a cryptographic hash function in the past, it's since +been cracked, so it shouldn't be used for security purposes. -def rearrange(bit_string_32): - """[summary] - Regroups the given binary string. +For more info, see https://en.wikipedia.org/wiki/MD5 +""" + +from collections.abc import Generator +from math import sin + + +def to_little_endian(string_32: bytes) -> bytes: + """ + Converts the given string to little-endian in groups of 8 chars. Arguments: - bitString32 {[string]} -- [32 bit binary] + string_32 {[string]} -- [32-char string] Raises: - ValueError -- [if the given string not are 32 bit binary string] + ValueError -- [input is not 32 char] Returns: - [string] -- [32 bit binary string] - >>> rearrange('1234567890abcdfghijklmnopqrstuvw') - 'pqrstuvwhijklmno90abcdfg12345678' + 32-char little-endian string + >>> to_little_endian(b'1234567890abcdfghijklmnopqrstuvw') + b'pqrstuvwhijklmno90abcdfg12345678' + >>> to_little_endian(b'1234567890') + Traceback (most recent call last): + ... + ValueError: Input must be of length 32 """ + if len(string_32) != 32: + raise ValueError("Input must be of length 32") - if len(bit_string_32) != 32: - raise ValueError("Need length 32") - new_string = "" + little_endian = b"" for i in [3, 2, 1, 0]: - new_string += bit_string_32[8 * i : 8 * i + 8] - return new_string + little_endian += string_32[8 * i : 8 * i + 8] + return little_endian + + +def reformat_hex(i: int) -> bytes: + """ + Converts the given non-negative integer to hex string. + Example: Suppose the input is the following: + i = 1234 -def reformat_hex(i): - """[summary] - Converts the given integer into 8-digit hex number. + The input is 0x000004d2 in hex, so the little-endian hex string is + "d2040000". Arguments: - i {[int]} -- [integer] + i {[int]} -- [integer] + + Raises: + ValueError -- [input is negative] + + Returns: + 8-char little-endian hex string + + >>> reformat_hex(1234) + b'd2040000' >>> reformat_hex(666) - '9a020000' + b'9a020000' + >>> reformat_hex(0) + b'00000000' + >>> reformat_hex(1234567890) + b'd2029649' + >>> reformat_hex(1234567890987654321) + b'b11c6cb1' + >>> reformat_hex(-1) + Traceback (most recent call last): + ... + ValueError: Input must be non-negative """ + if i < 0: + raise ValueError("Input must be non-negative") - hexrep = format(i, "08x") - thing = "" + hex_rep = format(i, "08x")[-8:] + little_endian_hex = b"" for i in [3, 2, 1, 0]: - thing += hexrep[2 * i : 2 * i + 2] - return thing + little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode("utf-8") + return little_endian_hex -def pad(bit_string): - """[summary] - Fills up the binary string to a 512 bit binary string +def preprocess(message: bytes) -> bytes: + """ + Preprocesses the message string: + - Convert message to bit string + - Pad bit string to a multiple of 512 chars: + - Append a 1 + - Append 0's until length = 448 (mod 512) + - Append length of original message (64 chars) + + Example: Suppose the input is the following: + message = "a" + + The message bit string is "01100001", which is 8 bits long. Thus, the + bit string needs 439 bits of padding so that + (bit_string + "1" + padding) = 448 (mod 512). + The message length is "000010000...0" in 64-bit little-endian binary. + The combined bit string is then 512 bits long. Arguments: - bitString {[string]} -- [binary string] + message {[string]} -- [message string] Returns: - [string] -- [binary string] + processed bit string padded to a multiple of 512 chars + + >>> preprocess(b"a") == (b"01100001" + b"1" + + ... (b"0" * 439) + b"00001000" + (b"0" * 56)) + True + >>> preprocess(b"") == b"1" + (b"0" * 447) + (b"0" * 64) + True """ - start_length = len(bit_string) - bit_string += "1" + bit_string = b"" + for char in message: + bit_string += format(char, "08b").encode("utf-8") + start_len = format(len(bit_string), "064b").encode("utf-8") + + # Pad bit_string to a multiple of 512 chars + bit_string += b"1" while len(bit_string) % 512 != 448: - bit_string += "0" - last_part = format(start_length, "064b") - bit_string += rearrange(last_part[32:]) + rearrange(last_part[:32]) + bit_string += b"0" + bit_string += to_little_endian(start_len[32:]) + to_little_endian(start_len[:32]) + return bit_string -def get_block(bit_string): - """[summary] - Iterator: - Returns by each call a list of length 16 with the 32 bit - integer blocks. +def get_block_words(bit_string: bytes) -> Generator[list[int], None, None]: + """ + Splits bit string into blocks of 512 chars and yields each block as a list + of 32-bit words + + Example: Suppose the input is the following: + bit_string = + "000000000...0" + # 0x00 (32 bits, padded to the right) + "000000010...0" + # 0x01 (32 bits, padded to the right) + "000000100...0" + # 0x02 (32 bits, padded to the right) + "000000110...0" + # 0x03 (32 bits, padded to the right) + ... + "000011110...0" # 0x0a (32 bits, padded to the right) + + Then len(bit_string) == 512, so there'll be 1 block. The block is split + into 32-bit words, and each word is converted to little endian. The + first word is interpreted as 0 in decimal, the second word is + interpreted as 1 in decimal, etc. + + Thus, block_words == [[0, 1, 2, 3, ..., 15]]. Arguments: - bit_string {[string]} -- [binary string >= 512] + bit_string {[string]} -- [bit string with multiple of 512 as length] + + Raises: + ValueError -- [length of bit string isn't multiple of 512] + + Yields: + a list of 16 32-bit words + + >>> test_string = ("".join(format(n << 24, "032b") for n in range(16)) + ... .encode("utf-8")) + >>> list(get_block_words(test_string)) + [[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]] + >>> list(get_block_words(test_string * 4)) == [list(range(16))] * 4 + True + >>> list(get_block_words(b"1" * 512)) == [[4294967295] * 16] + True + >>> list(get_block_words(b"")) + [] + >>> list(get_block_words(b"1111")) + Traceback (most recent call last): + ... + ValueError: Input must have length that's a multiple of 512 """ + if len(bit_string) % 512 != 0: + raise ValueError("Input must have length that's a multiple of 512") - curr_pos = 0 - while curr_pos < len(bit_string): - curr_part = bit_string[curr_pos : curr_pos + 512] - my_splits = [] - for i in range(16): - my_splits.append(int(rearrange(curr_part[32 * i : 32 * i + 32]), 2)) - yield my_splits - curr_pos += 512 + for pos in range(0, len(bit_string), 512): + block = bit_string[pos : pos + 512] + block_words = [] + for i in range(0, 512, 32): + block_words.append(int(to_little_endian(block[i : i + 32]), 2)) + yield block_words -def not32(i): +def not_32(i: int) -> int: """ - >>> not32(34) + Perform bitwise NOT on given int. + + Arguments: + i {[int]} -- [given int] + + Raises: + ValueError -- [input is negative] + + Returns: + Result of bitwise NOT on i + + >>> not_32(34) 4294967261 + >>> not_32(1234) + 4294966061 + >>> not_32(4294966061) + 1234 + >>> not_32(0) + 4294967295 + >>> not_32(1) + 4294967294 + >>> not_32(-1) + Traceback (most recent call last): + ... + ValueError: Input must be non-negative """ + if i < 0: + raise ValueError("Input must be non-negative") + i_str = format(i, "032b") new_str = "" for c in i_str: @@ -93,35 +225,114 @@ def not32(i): return int(new_str, 2) -def sum32(a, b): +def sum_32(a: int, b: int) -> int: + """ + Add two numbers as 32-bit ints. + + Arguments: + a {[int]} -- [first given int] + b {[int]} -- [second given int] + + Returns: + (a + b) as an unsigned 32-bit int + + >>> sum_32(1, 1) + 2 + >>> sum_32(2, 3) + 5 + >>> sum_32(0, 0) + 0 + >>> sum_32(-1, -1) + 4294967294 + >>> sum_32(4294967295, 1) + 0 + """ return (a + b) % 2**32 -def leftrot32(i, s): - return (i << s) ^ (i >> (32 - s)) +def left_rotate_32(i: int, shift: int) -> int: + """ + Rotate the bits of a given int left by a given amount. + + Arguments: + i {[int]} -- [given int] + shift {[int]} -- [shift amount] + + Raises: + ValueError -- [either given int or shift is negative] + Returns: + `i` rotated to the left by `shift` bits + + >>> left_rotate_32(1234, 1) + 2468 + >>> left_rotate_32(1111, 4) + 17776 + >>> left_rotate_32(2147483648, 1) + 1 + >>> left_rotate_32(2147483648, 3) + 4 + >>> left_rotate_32(4294967295, 4) + 4294967295 + >>> left_rotate_32(1234, 0) + 1234 + >>> left_rotate_32(0, 0) + 0 + >>> left_rotate_32(-1, 0) + Traceback (most recent call last): + ... + ValueError: Input must be non-negative + >>> left_rotate_32(0, -1) + Traceback (most recent call last): + ... + ValueError: Shift must be non-negative + """ + if i < 0: + raise ValueError("Input must be non-negative") + if shift < 0: + raise ValueError("Shift must be non-negative") + return ((i << shift) ^ (i >> (32 - shift))) % 2**32 + + +def md5_me(message: bytes) -> bytes: + """ + Returns the 32-char MD5 hash of a given message. -def md5me(test_string): - """[summary] - Returns a 32-bit hash code of the string 'testString' + Reference: https://en.wikipedia.org/wiki/MD5#Algorithm Arguments: - testString {[string]} -- [message] + message {[string]} -- [message] + + Returns: + 32-char MD5 hash string + + >>> md5_me(b"") + b'd41d8cd98f00b204e9800998ecf8427e' + >>> md5_me(b"The quick brown fox jumps over the lazy dog") + b'9e107d9d372bb6826bd81d3542a419d6' + >>> md5_me(b"The quick brown fox jumps over the lazy dog.") + b'e4d909c290d0fb1ca068ffaddf22cbd0' + + >>> import hashlib + >>> from string import ascii_letters + >>> msgs = [b"", ascii_letters.encode("utf-8"), "Üñîçø∂é".encode("utf-8"), + ... b"The quick brown fox jumps over the lazy dog."] + >>> all(md5_me(msg) == hashlib.md5(msg).hexdigest().encode("utf-8") for msg in msgs) + True """ - bs = "" - for i in test_string: - bs += format(ord(i), "08b") - bs = pad(bs) + # Convert to bit string, add padding and append message length + bit_string = preprocess(message) - tvals = [int(2**32 * abs(math.sin(i + 1))) for i in range(64)] + added_consts = [int(2**32 * abs(sin(i + 1))) for i in range(64)] + # Starting states a0 = 0x67452301 b0 = 0xEFCDAB89 c0 = 0x98BADCFE d0 = 0x10325476 - s = [ + shift_amounts = [ 7, 12, 17, @@ -188,51 +399,46 @@ def md5me(test_string): 21, ] - for m in get_block(bs): + # Process bit string in chunks, each with 16 32-char words + for block_words in get_block_words(bit_string): a = a0 b = b0 c = c0 d = d0 + + # Hash current chunk for i in range(64): if i <= 15: - # f = (B & C) | (not32(B) & D) + # f = (b & c) | (not_32(b) & d) # Alternate definition for f f = d ^ (b & (c ^ d)) g = i elif i <= 31: - # f = (D & B) | (not32(D) & C) + # f = (d & b) | (not_32(d) & c) # Alternate definition for f f = c ^ (d & (b ^ c)) g = (5 * i + 1) % 16 elif i <= 47: f = b ^ c ^ d g = (3 * i + 5) % 16 else: - f = c ^ (b | not32(d)) + f = c ^ (b | not_32(d)) g = (7 * i) % 16 - dtemp = d + f = (f + a + added_consts[i] + block_words[g]) % 2**32 + a = d d = c c = b - b = sum32(b, leftrot32((a + f + tvals[i] + m[g]) % 2**32, s[i])) - a = dtemp - a0 = sum32(a0, a) - b0 = sum32(b0, b) - c0 = sum32(c0, c) - d0 = sum32(d0, d) + b = sum_32(b, left_rotate_32(f, shift_amounts[i])) + + # Add hashed chunk to running total + a0 = sum_32(a0, a) + b0 = sum_32(b0, b) + c0 = sum_32(c0, c) + d0 = sum_32(d0, d) digest = reformat_hex(a0) + reformat_hex(b0) + reformat_hex(c0) + reformat_hex(d0) return digest -def test(): - assert md5me("") == "d41d8cd98f00b204e9800998ecf8427e" - assert ( - md5me("The quick brown fox jumps over the lazy dog") - == "9e107d9d372bb6826bd81d3542a419d6" - ) - print("Success.") - - if __name__ == "__main__": - test() import doctest doctest.testmod() From 5ca71895630719cc41f8171aba8be461fb8cc9d2 Mon Sep 17 00:00:00 2001 From: Christian Clauss Date: Sun, 2 Apr 2023 06:48:19 +0200 Subject: [PATCH 0803/1543] Rename quantum_random.py.DISABLED.txt to quantum_random.py (#8601) Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- DIRECTORY.md | 1 + quantum/{quantum_random.py.DISABLED.txt => quantum_random.py} | 0 2 files changed, 1 insertion(+) rename quantum/{quantum_random.py.DISABLED.txt => quantum_random.py} (100%) diff --git a/DIRECTORY.md b/DIRECTORY.md index b1adc23f6e61..8dd3fb5d9af1 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -1033,6 +1033,7 @@ * [Q Fourier Transform](quantum/q_fourier_transform.py) * [Q Full Adder](quantum/q_full_adder.py) * [Quantum Entanglement](quantum/quantum_entanglement.py) + * [Quantum Random](quantum/quantum_random.py) * [Quantum Teleportation](quantum/quantum_teleportation.py) * [Ripple Adder Classic](quantum/ripple_adder_classic.py) * [Single Qubit Measure](quantum/single_qubit_measure.py) diff --git a/quantum/quantum_random.py.DISABLED.txt b/quantum/quantum_random.py similarity index 100% rename from quantum/quantum_random.py.DISABLED.txt rename to quantum/quantum_random.py From ebc2d5d79f837931e80f7d5e7e1dece9ef48f760 Mon Sep 17 00:00:00 2001 From: Ishab Date: Sun, 2 Apr 2023 13:04:11 +0100 Subject: [PATCH 0804/1543] Add Project Euler problem 79 solution 1 (#8607) Co-authored-by: Dhruv Manilawala --- project_euler/problem_079/__init__.py | 0 project_euler/problem_079/keylog.txt | 50 ++++++++++++++++ project_euler/problem_079/keylog_test.txt | 16 ++++++ project_euler/problem_079/sol1.py | 69 +++++++++++++++++++++++ 4 files changed, 135 insertions(+) create mode 100644 project_euler/problem_079/__init__.py create mode 100644 project_euler/problem_079/keylog.txt create mode 100644 project_euler/problem_079/keylog_test.txt create mode 100644 project_euler/problem_079/sol1.py diff --git a/project_euler/problem_079/__init__.py b/project_euler/problem_079/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/project_euler/problem_079/keylog.txt b/project_euler/problem_079/keylog.txt new file mode 100644 index 000000000000..41f15673248d --- /dev/null +++ b/project_euler/problem_079/keylog.txt @@ -0,0 +1,50 @@ +319 +680 +180 +690 +129 +620 +762 +689 +762 +318 +368 +710 +720 +710 +629 +168 +160 +689 +716 +731 +736 +729 +316 +729 +729 +710 +769 +290 +719 +680 +318 +389 +162 +289 +162 +718 +729 +319 +790 +680 +890 +362 +319 +760 +316 +729 +380 +319 +728 +716 diff --git a/project_euler/problem_079/keylog_test.txt b/project_euler/problem_079/keylog_test.txt new file mode 100644 index 000000000000..2c7024bde948 --- /dev/null +++ b/project_euler/problem_079/keylog_test.txt @@ -0,0 +1,16 @@ +319 +680 +180 +690 +129 +620 +698 +318 +328 +310 +320 +610 +629 +198 +190 +631 diff --git a/project_euler/problem_079/sol1.py b/project_euler/problem_079/sol1.py new file mode 100644 index 000000000000..d34adcd243b0 --- /dev/null +++ b/project_euler/problem_079/sol1.py @@ -0,0 +1,69 @@ +""" +Project Euler Problem 79: https://projecteuler.net/problem=79 + +Passcode derivation + +A common security method used for online banking is to ask the user for three +random characters from a passcode. For example, if the passcode was 531278, +they may ask for the 2nd, 3rd, and 5th characters; the expected reply would +be: 317. + +The text file, keylog.txt, contains fifty successful login attempts. + +Given that the three characters are always asked for in order, analyse the file +so as to determine the shortest possible secret passcode of unknown length. +""" +import itertools +from pathlib import Path + + +def find_secret_passcode(logins: list[str]) -> int: + """ + Returns the shortest possible secret passcode of unknown length. + + >>> find_secret_passcode(["135", "259", "235", "189", "690", "168", "120", + ... "136", "289", "589", "160", "165", "580", "369", "250", "280"]) + 12365890 + + >>> find_secret_passcode(["426", "281", "061", "819" "268", "406", "420", + ... "428", "209", "689", "019", "421", "469", "261", "681", "201"]) + 4206819 + """ + + # Split each login by character e.g. '319' -> ('3', '1', '9') + split_logins = [tuple(login) for login in logins] + + unique_chars = {char for login in split_logins for char in login} + + for permutation in itertools.permutations(unique_chars): + satisfied = True + for login in logins: + if not ( + permutation.index(login[0]) + < permutation.index(login[1]) + < permutation.index(login[2]) + ): + satisfied = False + break + + if satisfied: + return int("".join(permutation)) + + raise Exception("Unable to find the secret passcode") + + +def solution(input_file: str = "keylog.txt") -> int: + """ + Returns the shortest possible secret passcode of unknown length + for successful login attempts given by `input_file` text file. + + >>> solution("keylog_test.txt") + 6312980 + """ + logins = Path(__file__).parent.joinpath(input_file).read_text().splitlines() + + return find_secret_passcode(logins) + + +if __name__ == "__main__": + print(f"{solution() = }") From 740ecfb121009612310ab9e1bc9d6ffe22b62ae4 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Tue, 4 Apr 2023 07:00:31 +0530 Subject: [PATCH 0805/1543] [pre-commit.ci] pre-commit autoupdate (#8611) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * [pre-commit.ci] pre-commit autoupdate updates: - [github.com/charliermarsh/ruff-pre-commit: v0.0.259 → v0.0.260](https://github.com/charliermarsh/ruff-pre-commit/compare/v0.0.259...v0.0.260) - [github.com/psf/black: 23.1.0 → 23.3.0](https://github.com/psf/black/compare/23.1.0...23.3.0) - [github.com/abravalheri/validate-pyproject: v0.12.1 → v0.12.2](https://github.com/abravalheri/validate-pyproject/compare/v0.12.1...v0.12.2) --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- .pre-commit-config.yaml | 6 +++--- DIRECTORY.md | 2 ++ 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 72a878387e15..d54ce5adddce 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -16,12 +16,12 @@ repos: - id: auto-walrus - repo: https://github.com/charliermarsh/ruff-pre-commit - rev: v0.0.259 + rev: v0.0.260 hooks: - id: ruff - repo: https://github.com/psf/black - rev: 23.1.0 + rev: 23.3.0 hooks: - id: black @@ -46,7 +46,7 @@ repos: pass_filenames: false - repo: https://github.com/abravalheri/validate-pyproject - rev: v0.12.1 + rev: v0.12.2 hooks: - id: validate-pyproject diff --git a/DIRECTORY.md b/DIRECTORY.md index 8dd3fb5d9af1..3764c471ce70 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -922,6 +922,8 @@ * [Sol1](project_euler/problem_077/sol1.py) * Problem 078 * [Sol1](project_euler/problem_078/sol1.py) + * Problem 079 + * [Sol1](project_euler/problem_079/sol1.py) * Problem 080 * [Sol1](project_euler/problem_080/sol1.py) * Problem 081 From b2b8585e63664a0c7aa18b95528e345c2738c4ae Mon Sep 17 00:00:00 2001 From: Ishan Dutta Date: Fri, 7 Apr 2023 21:21:25 +0530 Subject: [PATCH 0806/1543] Add LeNet Implementation in PyTorch (#7070) * add torch to requirements * add lenet architecture in pytorch * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * add type hints * remove file * add type hints * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * update variable name * add fail test * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * add newline * reformatting --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- computer_vision/lenet_pytorch.py | 82 ++++++++++++++++++++++++++++++++ requirements.txt | 1 + 2 files changed, 83 insertions(+) create mode 100644 computer_vision/lenet_pytorch.py diff --git a/computer_vision/lenet_pytorch.py b/computer_vision/lenet_pytorch.py new file mode 100644 index 000000000000..177a5ebfcdb4 --- /dev/null +++ b/computer_vision/lenet_pytorch.py @@ -0,0 +1,82 @@ +""" +LeNet Network + +Paper: http://vision.stanford.edu/cs598_spring07/papers/Lecun98.pdf +""" + +import numpy +import torch +import torch.nn as nn + + +class LeNet(nn.Module): + def __init__(self) -> None: + super().__init__() + + self.tanh = nn.Tanh() + self.avgpool = nn.AvgPool2d(kernel_size=2, stride=2) + + self.conv1 = nn.Conv2d( + in_channels=1, + out_channels=6, + kernel_size=(5, 5), + stride=(1, 1), + padding=(0, 0), + ) + self.conv2 = nn.Conv2d( + in_channels=6, + out_channels=16, + kernel_size=(5, 5), + stride=(1, 1), + padding=(0, 0), + ) + self.conv3 = nn.Conv2d( + in_channels=16, + out_channels=120, + kernel_size=(5, 5), + stride=(1, 1), + padding=(0, 0), + ) + + self.linear1 = nn.Linear(120, 84) + self.linear2 = nn.Linear(84, 10) + + def forward(self, image_array: numpy.ndarray) -> numpy.ndarray: + image_array = self.tanh(self.conv1(image_array)) + image_array = self.avgpool(image_array) + image_array = self.tanh(self.conv2(image_array)) + image_array = self.avgpool(image_array) + image_array = self.tanh(self.conv3(image_array)) + + image_array = image_array.reshape(image_array.shape[0], -1) + image_array = self.tanh(self.linear1(image_array)) + image_array = self.linear2(image_array) + return image_array + + +def test_model(image_tensor: torch.tensor) -> bool: + """ + Test the model on an input batch of 64 images + + Args: + image_tensor (torch.tensor): Batch of Images for the model + + >>> test_model(torch.randn(64, 1, 32, 32)) + True + + """ + try: + model = LeNet() + output = model(image_tensor) + except RuntimeError: + return False + + return output.shape == torch.zeros([64, 10]).shape + + +if __name__ == "__main__": + random_image_1 = torch.randn(64, 1, 32, 32) + random_image_2 = torch.randn(1, 32, 32) + + print(f"random_image_1 Model Passed: {test_model(random_image_1)}") + print(f"\nrandom_image_2 Model Passed: {test_model(random_image_2)}") diff --git a/requirements.txt b/requirements.txt index acfbc823e77f..e159fe010dc4 100644 --- a/requirements.txt +++ b/requirements.txt @@ -17,6 +17,7 @@ statsmodels sympy tensorflow texttable +torch tweepy xgboost yulewalker From 179298e3a291470ef30e850f23d98c2fb9055202 Mon Sep 17 00:00:00 2001 From: Christian Clauss Date: Sat, 8 Apr 2023 02:52:26 +0200 Subject: [PATCH 0807/1543] Revert "Add LeNet Implementation in PyTorch (#7070)" (#8621) This reverts commit b2b8585e63664a0c7aa18b95528e345c2738c4ae. --- computer_vision/lenet_pytorch.py | 82 -------------------------------- requirements.txt | 1 - 2 files changed, 83 deletions(-) delete mode 100644 computer_vision/lenet_pytorch.py diff --git a/computer_vision/lenet_pytorch.py b/computer_vision/lenet_pytorch.py deleted file mode 100644 index 177a5ebfcdb4..000000000000 --- a/computer_vision/lenet_pytorch.py +++ /dev/null @@ -1,82 +0,0 @@ -""" -LeNet Network - -Paper: http://vision.stanford.edu/cs598_spring07/papers/Lecun98.pdf -""" - -import numpy -import torch -import torch.nn as nn - - -class LeNet(nn.Module): - def __init__(self) -> None: - super().__init__() - - self.tanh = nn.Tanh() - self.avgpool = nn.AvgPool2d(kernel_size=2, stride=2) - - self.conv1 = nn.Conv2d( - in_channels=1, - out_channels=6, - kernel_size=(5, 5), - stride=(1, 1), - padding=(0, 0), - ) - self.conv2 = nn.Conv2d( - in_channels=6, - out_channels=16, - kernel_size=(5, 5), - stride=(1, 1), - padding=(0, 0), - ) - self.conv3 = nn.Conv2d( - in_channels=16, - out_channels=120, - kernel_size=(5, 5), - stride=(1, 1), - padding=(0, 0), - ) - - self.linear1 = nn.Linear(120, 84) - self.linear2 = nn.Linear(84, 10) - - def forward(self, image_array: numpy.ndarray) -> numpy.ndarray: - image_array = self.tanh(self.conv1(image_array)) - image_array = self.avgpool(image_array) - image_array = self.tanh(self.conv2(image_array)) - image_array = self.avgpool(image_array) - image_array = self.tanh(self.conv3(image_array)) - - image_array = image_array.reshape(image_array.shape[0], -1) - image_array = self.tanh(self.linear1(image_array)) - image_array = self.linear2(image_array) - return image_array - - -def test_model(image_tensor: torch.tensor) -> bool: - """ - Test the model on an input batch of 64 images - - Args: - image_tensor (torch.tensor): Batch of Images for the model - - >>> test_model(torch.randn(64, 1, 32, 32)) - True - - """ - try: - model = LeNet() - output = model(image_tensor) - except RuntimeError: - return False - - return output.shape == torch.zeros([64, 10]).shape - - -if __name__ == "__main__": - random_image_1 = torch.randn(64, 1, 32, 32) - random_image_2 = torch.randn(1, 32, 32) - - print(f"random_image_1 Model Passed: {test_model(random_image_1)}") - print(f"\nrandom_image_2 Model Passed: {test_model(random_image_2)}") diff --git a/requirements.txt b/requirements.txt index e159fe010dc4..acfbc823e77f 100644 --- a/requirements.txt +++ b/requirements.txt @@ -17,7 +17,6 @@ statsmodels sympy tensorflow texttable -torch tweepy xgboost yulewalker From 5cb0a000c47398c6d8af1ac43e2f83ae018f7182 Mon Sep 17 00:00:00 2001 From: amirsoroush <114881632+amirsoroush@users.noreply.github.com> Date: Sat, 8 Apr 2023 14:41:08 +0300 Subject: [PATCH 0808/1543] Queue implementation using two Stacks (#8617) * Queue implementation using two Stacks * fix typo in queue/queue_on_two_stacks.py * add 'iterable' to queue_on_two_stacks initializer * make queue_on_two_stacks.py generic class * fix ruff-UP007 in queue_on_two_stacks.py * enhance readability in queue_on_two_stacks.py * Create queue_by_two_stacks.py --------- Co-authored-by: Christian Clauss --- data_structures/queue/queue_by_two_stacks.py | 115 ++++++++++++++++ data_structures/queue/queue_on_two_stacks.py | 137 +++++++++++++++++++ 2 files changed, 252 insertions(+) create mode 100644 data_structures/queue/queue_by_two_stacks.py create mode 100644 data_structures/queue/queue_on_two_stacks.py diff --git a/data_structures/queue/queue_by_two_stacks.py b/data_structures/queue/queue_by_two_stacks.py new file mode 100644 index 000000000000..cd62f155a63b --- /dev/null +++ b/data_structures/queue/queue_by_two_stacks.py @@ -0,0 +1,115 @@ +"""Queue implementation using two stacks""" + +from collections.abc import Iterable +from typing import Generic, TypeVar + +_T = TypeVar("_T") + + +class QueueByTwoStacks(Generic[_T]): + def __init__(self, iterable: Iterable[_T] | None = None) -> None: + """ + >>> QueueByTwoStacks() + Queue(()) + >>> QueueByTwoStacks([10, 20, 30]) + Queue((10, 20, 30)) + >>> QueueByTwoStacks((i**2 for i in range(1, 4))) + Queue((1, 4, 9)) + """ + self._stack1: list[_T] = list(iterable or []) + self._stack2: list[_T] = [] + + def __len__(self) -> int: + """ + >>> len(QueueByTwoStacks()) + 0 + >>> from string import ascii_lowercase + >>> len(QueueByTwoStacks(ascii_lowercase)) + 26 + >>> queue = QueueByTwoStacks() + >>> for i in range(1, 11): + ... queue.put(i) + ... + >>> len(queue) + 10 + >>> for i in range(2): + ... queue.get() + 1 + 2 + >>> len(queue) + 8 + """ + + return len(self._stack1) + len(self._stack2) + + def __repr__(self) -> str: + """ + >>> queue = QueueByTwoStacks() + >>> queue + Queue(()) + >>> str(queue) + 'Queue(())' + >>> queue.put(10) + >>> queue + Queue((10,)) + >>> queue.put(20) + >>> queue.put(30) + >>> queue + Queue((10, 20, 30)) + """ + return f"Queue({tuple(self._stack2[::-1] + self._stack1)})" + + def put(self, item: _T) -> None: + """ + Put `item` into the Queue + + >>> queue = QueueByTwoStacks() + >>> queue.put(10) + >>> queue.put(20) + >>> len(queue) + 2 + >>> queue + Queue((10, 20)) + """ + + self._stack1.append(item) + + def get(self) -> _T: + """ + Get `item` from the Queue + + >>> queue = QueueByTwoStacks((10, 20, 30)) + >>> queue.get() + 10 + >>> queue.put(40) + >>> queue.get() + 20 + >>> queue.get() + 30 + >>> len(queue) + 1 + >>> queue.get() + 40 + >>> queue.get() + Traceback (most recent call last): + ... + IndexError: Queue is empty + """ + + # To reduce number of attribute look-ups in `while` loop. + stack1_pop = self._stack1.pop + stack2_append = self._stack2.append + + if not self._stack2: + while self._stack1: + stack2_append(stack1_pop()) + + if not self._stack2: + raise IndexError("Queue is empty") + return self._stack2.pop() + + +if __name__ == "__main__": + from doctest import testmod + + testmod() diff --git a/data_structures/queue/queue_on_two_stacks.py b/data_structures/queue/queue_on_two_stacks.py new file mode 100644 index 000000000000..61db2b512136 --- /dev/null +++ b/data_structures/queue/queue_on_two_stacks.py @@ -0,0 +1,137 @@ +"""Queue implementation using two stacks""" + +from collections.abc import Iterable +from typing import Generic, TypeVar + +_T = TypeVar("_T") + + +class QueueByTwoStacks(Generic[_T]): + def __init__(self, iterable: Iterable[_T] | None = None) -> None: + """ + >>> queue1 = QueueByTwoStacks() + >>> str(queue1) + 'Queue([])' + >>> queue2 = QueueByTwoStacks([10, 20, 30]) + >>> str(queue2) + 'Queue([10, 20, 30])' + >>> queue3 = QueueByTwoStacks((i**2 for i in range(1, 4))) + >>> str(queue3) + 'Queue([1, 4, 9])' + """ + + self._stack1: list[_T] = [] if iterable is None else list(iterable) + self._stack2: list[_T] = [] + + def __len__(self) -> int: + """ + >>> queue = QueueByTwoStacks() + >>> for i in range(1, 11): + ... queue.put(i) + ... + >>> len(queue) == 10 + True + >>> for i in range(2): + ... queue.get() + 1 + 2 + >>> len(queue) == 8 + True + """ + + return len(self._stack1) + len(self._stack2) + + def __repr__(self) -> str: + """ + >>> queue = QueueByTwoStacks() + >>> queue + Queue([]) + >>> str(queue) + 'Queue([])' + >>> queue.put(10) + >>> queue + Queue([10]) + >>> queue.put(20) + >>> queue.put(30) + >>> queue + Queue([10, 20, 30]) + """ + + items = self._stack2[::-1] + self._stack1 + return f"Queue({items})" + + def put(self, item: _T) -> None: + """ + Put `item` into the Queue + + >>> queue = QueueByTwoStacks() + >>> queue.put(10) + >>> queue.put(20) + >>> len(queue) == 2 + True + >>> str(queue) + 'Queue([10, 20])' + """ + + self._stack1.append(item) + + def get(self) -> _T: + """ + Get `item` from the Queue + + >>> queue = QueueByTwoStacks() + >>> for i in (10, 20, 30): + ... queue.put(i) + >>> queue.get() + 10 + >>> queue.put(40) + >>> queue.get() + 20 + >>> queue.get() + 30 + >>> len(queue) == 1 + True + >>> queue.get() + 40 + >>> queue.get() + Traceback (most recent call last): + ... + IndexError: Queue is empty + """ + + # To reduce number of attribute look-ups in `while` loop. + stack1_pop = self._stack1.pop + stack2_append = self._stack2.append + + if not self._stack2: + while self._stack1: + stack2_append(stack1_pop()) + + if not self._stack2: + raise IndexError("Queue is empty") + return self._stack2.pop() + + def size(self) -> int: + """ + Returns the length of the Queue + + >>> queue = QueueByTwoStacks() + >>> queue.size() + 0 + >>> queue.put(10) + >>> queue.put(20) + >>> queue.size() + 2 + >>> queue.get() + 10 + >>> queue.size() == 1 + True + """ + + return len(self) + + +if __name__ == "__main__": + from doctest import testmod + + testmod() From 2f9b03393c75f3ab14b491becae4ac5caf26de17 Mon Sep 17 00:00:00 2001 From: Christian Clauss Date: Sat, 8 Apr 2023 14:16:19 +0200 Subject: [PATCH 0809/1543] Delete queue_on_two_stacks.py which duplicates queue_by_two_stacks.py (#8624) * Delete queue_on_two_stacks.py which duplicates queue_by_two_stacks.py * updating DIRECTORY.md --------- Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- DIRECTORY.md | 1 + data_structures/queue/queue_on_two_stacks.py | 137 ------------------- 2 files changed, 1 insertion(+), 137 deletions(-) delete mode 100644 data_structures/queue/queue_on_two_stacks.py diff --git a/DIRECTORY.md b/DIRECTORY.md index 3764c471ce70..e3e0748ecf75 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -232,6 +232,7 @@ * [Double Ended Queue](data_structures/queue/double_ended_queue.py) * [Linked Queue](data_structures/queue/linked_queue.py) * [Priority Queue Using List](data_structures/queue/priority_queue_using_list.py) + * [Queue By Two Stacks](data_structures/queue/queue_by_two_stacks.py) * [Queue On List](data_structures/queue/queue_on_list.py) * [Queue On Pseudo Stack](data_structures/queue/queue_on_pseudo_stack.py) * Stacks diff --git a/data_structures/queue/queue_on_two_stacks.py b/data_structures/queue/queue_on_two_stacks.py deleted file mode 100644 index 61db2b512136..000000000000 --- a/data_structures/queue/queue_on_two_stacks.py +++ /dev/null @@ -1,137 +0,0 @@ -"""Queue implementation using two stacks""" - -from collections.abc import Iterable -from typing import Generic, TypeVar - -_T = TypeVar("_T") - - -class QueueByTwoStacks(Generic[_T]): - def __init__(self, iterable: Iterable[_T] | None = None) -> None: - """ - >>> queue1 = QueueByTwoStacks() - >>> str(queue1) - 'Queue([])' - >>> queue2 = QueueByTwoStacks([10, 20, 30]) - >>> str(queue2) - 'Queue([10, 20, 30])' - >>> queue3 = QueueByTwoStacks((i**2 for i in range(1, 4))) - >>> str(queue3) - 'Queue([1, 4, 9])' - """ - - self._stack1: list[_T] = [] if iterable is None else list(iterable) - self._stack2: list[_T] = [] - - def __len__(self) -> int: - """ - >>> queue = QueueByTwoStacks() - >>> for i in range(1, 11): - ... queue.put(i) - ... - >>> len(queue) == 10 - True - >>> for i in range(2): - ... queue.get() - 1 - 2 - >>> len(queue) == 8 - True - """ - - return len(self._stack1) + len(self._stack2) - - def __repr__(self) -> str: - """ - >>> queue = QueueByTwoStacks() - >>> queue - Queue([]) - >>> str(queue) - 'Queue([])' - >>> queue.put(10) - >>> queue - Queue([10]) - >>> queue.put(20) - >>> queue.put(30) - >>> queue - Queue([10, 20, 30]) - """ - - items = self._stack2[::-1] + self._stack1 - return f"Queue({items})" - - def put(self, item: _T) -> None: - """ - Put `item` into the Queue - - >>> queue = QueueByTwoStacks() - >>> queue.put(10) - >>> queue.put(20) - >>> len(queue) == 2 - True - >>> str(queue) - 'Queue([10, 20])' - """ - - self._stack1.append(item) - - def get(self) -> _T: - """ - Get `item` from the Queue - - >>> queue = QueueByTwoStacks() - >>> for i in (10, 20, 30): - ... queue.put(i) - >>> queue.get() - 10 - >>> queue.put(40) - >>> queue.get() - 20 - >>> queue.get() - 30 - >>> len(queue) == 1 - True - >>> queue.get() - 40 - >>> queue.get() - Traceback (most recent call last): - ... - IndexError: Queue is empty - """ - - # To reduce number of attribute look-ups in `while` loop. - stack1_pop = self._stack1.pop - stack2_append = self._stack2.append - - if not self._stack2: - while self._stack1: - stack2_append(stack1_pop()) - - if not self._stack2: - raise IndexError("Queue is empty") - return self._stack2.pop() - - def size(self) -> int: - """ - Returns the length of the Queue - - >>> queue = QueueByTwoStacks() - >>> queue.size() - 0 - >>> queue.put(10) - >>> queue.put(20) - >>> queue.size() - 2 - >>> queue.get() - 10 - >>> queue.size() == 1 - True - """ - - return len(self) - - -if __name__ == "__main__": - from doctest import testmod - - testmod() From 14bdd174bba7828ac2bf476f3697aa13fa179492 Mon Sep 17 00:00:00 2001 From: isidroas Date: Sat, 8 Apr 2023 19:39:24 +0200 Subject: [PATCH 0810/1543] Bloom Filter (#8615) * Bloom filter with tests * has functions constant * fix type * isort * passing ruff * type hints * type hints * from fail to erro * captital leter * type hints requested by boot * descriptive name for m * more descriptibe arguments II * moved movies_test to doctest * commented doctest * removed test_probability * estimated error * added types * again hash_ * Update data_structures/hashing/bloom_filter.py Co-authored-by: Christian Clauss * from b to bloom * Update data_structures/hashing/bloom_filter.py Co-authored-by: Christian Clauss * Update data_structures/hashing/bloom_filter.py Co-authored-by: Christian Clauss * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * syntax error in dict comprehension * from goodfather to godfather * removed Interestellar * forgot the last Godfather * Revert "removed Interestellar" This reverts commit 35fa5f5c4bf101d073aad43c37b0a423d8975071. * pretty dict * Apply suggestions from code review * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update bloom_filter.py --------- Co-authored-by: Christian Clauss Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- data_structures/hashing/bloom_filter.py | 105 ++++++++++++++++++++++++ 1 file changed, 105 insertions(+) create mode 100644 data_structures/hashing/bloom_filter.py diff --git a/data_structures/hashing/bloom_filter.py b/data_structures/hashing/bloom_filter.py new file mode 100644 index 000000000000..7fd0985bdc33 --- /dev/null +++ b/data_structures/hashing/bloom_filter.py @@ -0,0 +1,105 @@ +""" +See https://en.wikipedia.org/wiki/Bloom_filter + +The use of this data structure is to test membership in a set. +Compared to Python's built-in set() it is more space-efficient. +In the following example, only 8 bits of memory will be used: +>>> bloom = Bloom(size=8) + +Initially, the filter contains all zeros: +>>> bloom.bitstring +'00000000' + +When an element is added, two bits are set to 1 +since there are 2 hash functions in this implementation: +>>> "Titanic" in bloom +False +>>> bloom.add("Titanic") +>>> bloom.bitstring +'01100000' +>>> "Titanic" in bloom +True + +However, sometimes only one bit is added +because both hash functions return the same value +>>> bloom.add("Avatar") +>>> "Avatar" in bloom +True +>>> bloom.format_hash("Avatar") +'00000100' +>>> bloom.bitstring +'01100100' + +Not added elements should return False ... +>>> not_present_films = ("The Godfather", "Interstellar", "Parasite", "Pulp Fiction") +>>> { +... film: bloom.format_hash(film) for film in not_present_films +... } # doctest: +NORMALIZE_WHITESPACE +{'The Godfather': '00000101', + 'Interstellar': '00000011', + 'Parasite': '00010010', + 'Pulp Fiction': '10000100'} +>>> any(film in bloom for film in not_present_films) +False + +but sometimes there are false positives: +>>> "Ratatouille" in bloom +True +>>> bloom.format_hash("Ratatouille") +'01100000' + +The probability increases with the number of elements added. +The probability decreases with the number of bits in the bitarray. +>>> bloom.estimated_error_rate +0.140625 +>>> bloom.add("The Godfather") +>>> bloom.estimated_error_rate +0.25 +>>> bloom.bitstring +'01100101' +""" +from hashlib import md5, sha256 + +HASH_FUNCTIONS = (sha256, md5) + + +class Bloom: + def __init__(self, size: int = 8) -> None: + self.bitarray = 0b0 + self.size = size + + def add(self, value: str) -> None: + h = self.hash_(value) + self.bitarray |= h + + def exists(self, value: str) -> bool: + h = self.hash_(value) + return (h & self.bitarray) == h + + def __contains__(self, other: str) -> bool: + return self.exists(other) + + def format_bin(self, bitarray: int) -> str: + res = bin(bitarray)[2:] + return res.zfill(self.size) + + @property + def bitstring(self) -> str: + return self.format_bin(self.bitarray) + + def hash_(self, value: str) -> int: + res = 0b0 + for func in HASH_FUNCTIONS: + position = ( + int.from_bytes(func(value.encode()).digest(), "little") % self.size + ) + res |= 2**position + return res + + def format_hash(self, value: str) -> str: + return self.format_bin(self.hash_(value)) + + @property + def estimated_error_rate(self) -> float: + n_ones = bin(self.bitarray).count("1") + return (n_ones / self.size) ** len(HASH_FUNCTIONS) From d182f95646aa7c515afe0912a34e8c2a11a34ca3 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 10 Apr 2023 23:43:17 +0200 Subject: [PATCH 0811/1543] [pre-commit.ci] pre-commit autoupdate (#8634) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * [pre-commit.ci] pre-commit autoupdate updates: - [github.com/charliermarsh/ruff-pre-commit: v0.0.260 → v0.0.261](https://github.com/charliermarsh/ruff-pre-commit/compare/v0.0.260...v0.0.261) - [github.com/pre-commit/mirrors-mypy: v1.1.1 → v1.2.0](https://github.com/pre-commit/mirrors-mypy/compare/v1.1.1...v1.2.0) * updating DIRECTORY.md --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- .pre-commit-config.yaml | 4 ++-- DIRECTORY.md | 1 + 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index d54ce5adddce..55345a574ce9 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -16,7 +16,7 @@ repos: - id: auto-walrus - repo: https://github.com/charliermarsh/ruff-pre-commit - rev: v0.0.260 + rev: v0.0.261 hooks: - id: ruff @@ -51,7 +51,7 @@ repos: - id: validate-pyproject - repo: https://github.com/pre-commit/mirrors-mypy - rev: v1.1.1 + rev: v1.2.0 hooks: - id: mypy args: diff --git a/DIRECTORY.md b/DIRECTORY.md index e3e0748ecf75..36f5a752c48b 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -195,6 +195,7 @@ * [Alternate Disjoint Set](data_structures/disjoint_set/alternate_disjoint_set.py) * [Disjoint Set](data_structures/disjoint_set/disjoint_set.py) * Hashing + * [Bloom Filter](data_structures/hashing/bloom_filter.py) * [Double Hash](data_structures/hashing/double_hash.py) * [Hash Map](data_structures/hashing/hash_map.py) * [Hash Table](data_structures/hashing/hash_table.py) From 54dedf844a30d39bd42c66ebf9cd67ec186f47bb Mon Sep 17 00:00:00 2001 From: Diego Gasco <62801631+Diegomangasco@users.noreply.github.com> Date: Mon, 17 Apr 2023 00:34:22 +0200 Subject: [PATCH 0812/1543] Dimensionality reduction (#8590) --- machine_learning/dimensionality_reduction.py | 198 +++++++++++++++++++ 1 file changed, 198 insertions(+) create mode 100644 machine_learning/dimensionality_reduction.py diff --git a/machine_learning/dimensionality_reduction.py b/machine_learning/dimensionality_reduction.py new file mode 100644 index 000000000000..d2046f81af04 --- /dev/null +++ b/machine_learning/dimensionality_reduction.py @@ -0,0 +1,198 @@ +# Copyright (c) 2023 Diego Gasco (diego.gasco99@gmail.com), Diegomangasco on GitHub + +""" +Requirements: + - numpy version 1.21 + - scipy version 1.3.3 +Notes: + - Each column of the features matrix corresponds to a class item +""" + +import logging + +import numpy as np +import pytest +from scipy.linalg import eigh + +logging.basicConfig(level=logging.INFO, format="%(message)s") + + +def column_reshape(input_array: np.ndarray) -> np.ndarray: + """Function to reshape a row Numpy array into a column Numpy array + >>> input_array = np.array([1, 2, 3]) + >>> column_reshape(input_array) + array([[1], + [2], + [3]]) + """ + + return input_array.reshape((input_array.size, 1)) + + +def covariance_within_classes( + features: np.ndarray, labels: np.ndarray, classes: int +) -> np.ndarray: + """Function to compute the covariance matrix inside each class. + >>> features = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) + >>> labels = np.array([0, 1, 0]) + >>> covariance_within_classes(features, labels, 2) + array([[0.66666667, 0.66666667, 0.66666667], + [0.66666667, 0.66666667, 0.66666667], + [0.66666667, 0.66666667, 0.66666667]]) + """ + + covariance_sum = np.nan + for i in range(classes): + data = features[:, labels == i] + data_mean = data.mean(1) + # Centralize the data of class i + centered_data = data - column_reshape(data_mean) + if i > 0: + # If covariance_sum is not None + covariance_sum += np.dot(centered_data, centered_data.T) + else: + # If covariance_sum is np.nan (i.e. first loop) + covariance_sum = np.dot(centered_data, centered_data.T) + + return covariance_sum / features.shape[1] + + +def covariance_between_classes( + features: np.ndarray, labels: np.ndarray, classes: int +) -> np.ndarray: + """Function to compute the covariance matrix between multiple classes + >>> features = np.array([[9, 2, 3], [4, 3, 6], [1, 8, 9]]) + >>> labels = np.array([0, 1, 0]) + >>> covariance_between_classes(features, labels, 2) + array([[ 3.55555556, 1.77777778, -2.66666667], + [ 1.77777778, 0.88888889, -1.33333333], + [-2.66666667, -1.33333333, 2. ]]) + """ + + general_data_mean = features.mean(1) + covariance_sum = np.nan + for i in range(classes): + data = features[:, labels == i] + device_data = data.shape[1] + data_mean = data.mean(1) + if i > 0: + # If covariance_sum is not None + covariance_sum += device_data * np.dot( + column_reshape(data_mean) - column_reshape(general_data_mean), + (column_reshape(data_mean) - column_reshape(general_data_mean)).T, + ) + else: + # If covariance_sum is np.nan (i.e. first loop) + covariance_sum = device_data * np.dot( + column_reshape(data_mean) - column_reshape(general_data_mean), + (column_reshape(data_mean) - column_reshape(general_data_mean)).T, + ) + + return covariance_sum / features.shape[1] + + +def principal_component_analysis(features: np.ndarray, dimensions: int) -> np.ndarray: + """ + Principal Component Analysis. + + For more details, see: https://en.wikipedia.org/wiki/Principal_component_analysis. + Parameters: + * features: the features extracted from the dataset + * dimensions: to filter the projected data for the desired dimension + + >>> test_principal_component_analysis() + """ + + # Check if the features have been loaded + if features.any(): + data_mean = features.mean(1) + # Center the dataset + centered_data = features - np.reshape(data_mean, (data_mean.size, 1)) + covariance_matrix = np.dot(centered_data, centered_data.T) / features.shape[1] + _, eigenvectors = np.linalg.eigh(covariance_matrix) + # Take all the columns in the reverse order (-1), and then takes only the first + filtered_eigenvectors = eigenvectors[:, ::-1][:, 0:dimensions] + # Project the database on the new space + projected_data = np.dot(filtered_eigenvectors.T, features) + logging.info("Principal Component Analysis computed") + + return projected_data + else: + logging.basicConfig(level=logging.ERROR, format="%(message)s", force=True) + logging.error("Dataset empty") + raise AssertionError + + +def linear_discriminant_analysis( + features: np.ndarray, labels: np.ndarray, classes: int, dimensions: int +) -> np.ndarray: + """ + Linear Discriminant Analysis. + + For more details, see: https://en.wikipedia.org/wiki/Linear_discriminant_analysis. + Parameters: + * features: the features extracted from the dataset + * labels: the class labels of the features + * classes: the number of classes present in the dataset + * dimensions: to filter the projected data for the desired dimension + + >>> test_linear_discriminant_analysis() + """ + + # Check if the dimension desired is less than the number of classes + assert classes > dimensions + + # Check if features have been already loaded + if features.any: + _, eigenvectors = eigh( + covariance_between_classes(features, labels, classes), + covariance_within_classes(features, labels, classes), + ) + filtered_eigenvectors = eigenvectors[:, ::-1][:, :dimensions] + svd_matrix, _, _ = np.linalg.svd(filtered_eigenvectors) + filtered_svd_matrix = svd_matrix[:, 0:dimensions] + projected_data = np.dot(filtered_svd_matrix.T, features) + logging.info("Linear Discriminant Analysis computed") + + return projected_data + else: + logging.basicConfig(level=logging.ERROR, format="%(message)s", force=True) + logging.error("Dataset empty") + raise AssertionError + + +def test_linear_discriminant_analysis() -> None: + # Create dummy dataset with 2 classes and 3 features + features = np.array([[1, 2, 3, 4, 5], [2, 3, 4, 5, 6], [3, 4, 5, 6, 7]]) + labels = np.array([0, 0, 0, 1, 1]) + classes = 2 + dimensions = 2 + + # Assert that the function raises an AssertionError if dimensions > classes + with pytest.raises(AssertionError) as error_info: + projected_data = linear_discriminant_analysis( + features, labels, classes, dimensions + ) + if isinstance(projected_data, np.ndarray): + raise AssertionError( + "Did not raise AssertionError for dimensions > classes" + ) + assert error_info.type is AssertionError + + +def test_principal_component_analysis() -> None: + features = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) + dimensions = 2 + expected_output = np.array([[6.92820323, 8.66025404, 10.39230485], [3.0, 3.0, 3.0]]) + + with pytest.raises(AssertionError) as error_info: + output = principal_component_analysis(features, dimensions) + if not np.allclose(expected_output, output): + raise AssertionError + assert error_info.type is AssertionError + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From 2b051a2de4adf711857f5453286dff47d1d87636 Mon Sep 17 00:00:00 2001 From: Rohan Anand <96521078+rohan472000@users.noreply.github.com> Date: Tue, 18 Apr 2023 03:47:48 +0530 Subject: [PATCH 0813/1543] Create real_and_reactive_power.py (#8665) --- electronics/real_and_reactive_power.py | 49 ++++++++++++++++++++++++++ 1 file changed, 49 insertions(+) create mode 100644 electronics/real_and_reactive_power.py diff --git a/electronics/real_and_reactive_power.py b/electronics/real_and_reactive_power.py new file mode 100644 index 000000000000..81dcba800e82 --- /dev/null +++ b/electronics/real_and_reactive_power.py @@ -0,0 +1,49 @@ +import math + + +def real_power(apparent_power: float, power_factor: float) -> float: + """ + Calculate real power from apparent power and power factor. + + Examples: + >>> real_power(100, 0.9) + 90.0 + >>> real_power(0, 0.8) + 0.0 + >>> real_power(100, -0.9) + -90.0 + """ + if ( + not isinstance(power_factor, (int, float)) + or power_factor < -1 + or power_factor > 1 + ): + raise ValueError("power_factor must be a valid float value between -1 and 1.") + return apparent_power * power_factor + + +def reactive_power(apparent_power: float, power_factor: float) -> float: + """ + Calculate reactive power from apparent power and power factor. + + Examples: + >>> reactive_power(100, 0.9) + 43.58898943540673 + >>> reactive_power(0, 0.8) + 0.0 + >>> reactive_power(100, -0.9) + 43.58898943540673 + """ + if ( + not isinstance(power_factor, (int, float)) + or power_factor < -1 + or power_factor > 1 + ): + raise ValueError("power_factor must be a valid float value between -1 and 1.") + return apparent_power * math.sqrt(1 - power_factor**2) + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From b5047cfa114c6343b92370419772b9cf0f13e634 Mon Sep 17 00:00:00 2001 From: Rohan Anand <96521078+rohan472000@users.noreply.github.com> Date: Tue, 18 Apr 2023 13:00:01 +0530 Subject: [PATCH 0814/1543] Create apparent_power.py (#8664) * Create apparent_power.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update apparent_power.py * Update apparent_power.py * Update apparent_power.py * Update electronics/apparent_power.py Co-authored-by: Christian Clauss * Update electronics/apparent_power.py Co-authored-by: Christian Clauss * Update apparent_power.py * Update electronics/apparent_power.py Co-authored-by: Christian Clauss * Update apparent_power.py * Update apparent_power.py * Update apparent_power.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update apparent_power.py * Update apparent_power.py * Update apparent_power.py --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- electronics/apparent_power.py | 35 +++++++++++++++++++++++++++++++++++ 1 file changed, 35 insertions(+) create mode 100644 electronics/apparent_power.py diff --git a/electronics/apparent_power.py b/electronics/apparent_power.py new file mode 100644 index 000000000000..a6f1a50822f7 --- /dev/null +++ b/electronics/apparent_power.py @@ -0,0 +1,35 @@ +import cmath +import math + + +def apparent_power( + voltage: float, current: float, voltage_angle: float, current_angle: float +) -> complex: + """ + Calculate the apparent power in a single-phase AC circuit. + + >>> apparent_power(100, 5, 0, 0) + (500+0j) + >>> apparent_power(100, 5, 90, 0) + (3.061616997868383e-14+500j) + >>> apparent_power(100, 5, -45, -60) + (-129.40952255126027-482.9629131445341j) + >>> apparent_power(200, 10, -30, -90) + (-999.9999999999998-1732.0508075688776j) + """ + # Convert angles from degrees to radians + voltage_angle_rad = math.radians(voltage_angle) + current_angle_rad = math.radians(current_angle) + + # Convert voltage and current to rectangular form + voltage_rect = cmath.rect(voltage, voltage_angle_rad) + current_rect = cmath.rect(current, current_angle_rad) + + # Calculate apparent power + return voltage_rect * current_rect + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From 93ce8cb75da2740089df8db23fa493ce104a011b Mon Sep 17 00:00:00 2001 From: Rohan Anand <96521078+rohan472000@users.noreply.github.com> Date: Tue, 18 Apr 2023 13:14:06 +0530 Subject: [PATCH 0815/1543] added reference link. (#8667) * added reference link. * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- electronics/apparent_power.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/electronics/apparent_power.py b/electronics/apparent_power.py index a6f1a50822f7..0ce1c2aa95b9 100644 --- a/electronics/apparent_power.py +++ b/electronics/apparent_power.py @@ -8,6 +8,8 @@ def apparent_power( """ Calculate the apparent power in a single-phase AC circuit. + Reference: https://en.wikipedia.org/wiki/AC_power#Apparent_power + >>> apparent_power(100, 5, 0, 0) (500+0j) >>> apparent_power(100, 5, 90, 0) From 458debc237d41752c6c4223264a4bb23efb2ecec Mon Sep 17 00:00:00 2001 From: Rohan Anand <96521078+rohan472000@users.noreply.github.com> Date: Tue, 18 Apr 2023 13:32:20 +0530 Subject: [PATCH 0816/1543] added a problem with solution on sliding window. (#8566) * added a problem with solution on sliding window. * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * added hint for return type and parameter * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update minimum_size_subarray_sum.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update minimum_size_subarray_sum.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update minimum_size_subarray_sum.py * Update minimum_size_subarray_sum.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update minimum_size_subarray_sum.py * removed un-necessary docs and added 2 test cases * Rename sliding_window/minimum_size_subarray_sum.py to dynamic_programming/minimum_size_subarray_sum.py * Update minimum_size_subarray_sum.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update minimum_size_subarray_sum.py * Update minimum_size_subarray_sum.py * Update minimum_size_subarray_sum.py * Update minimum_size_subarray_sum.py * Update minimum_size_subarray_sum.py * Update minimum_size_subarray_sum.py * Update dynamic_programming/minimum_size_subarray_sum.py Co-authored-by: Christian Clauss * Update dynamic_programming/minimum_size_subarray_sum.py Co-authored-by: Christian Clauss * Update dynamic_programming/minimum_size_subarray_sum.py Co-authored-by: Christian Clauss * Update dynamic_programming/minimum_size_subarray_sum.py Co-authored-by: Christian Clauss * Update dynamic_programming/minimum_size_subarray_sum.py Co-authored-by: Christian Clauss * Update dynamic_programming/minimum_size_subarray_sum.py Co-authored-by: Christian Clauss * Update dynamic_programming/minimum_size_subarray_sum.py Co-authored-by: Christian Clauss * Update minimum_size_subarray_sum.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fix * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update minimum_size_subarray_sum.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update minimum_size_subarray_sum.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update minimum_size_subarray_sum.py * Update minimum_size_subarray_sum.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- .../minimum_size_subarray_sum.py | 62 +++++++++++++++++++ 1 file changed, 62 insertions(+) create mode 100644 dynamic_programming/minimum_size_subarray_sum.py diff --git a/dynamic_programming/minimum_size_subarray_sum.py b/dynamic_programming/minimum_size_subarray_sum.py new file mode 100644 index 000000000000..3868d73535fb --- /dev/null +++ b/dynamic_programming/minimum_size_subarray_sum.py @@ -0,0 +1,62 @@ +import sys + + +def minimum_subarray_sum(target: int, numbers: list[int]) -> int: + """ + Return the length of the shortest contiguous subarray in a list of numbers whose sum + is at least target. Reference: https://stackoverflow.com/questions/8269916 + + >>> minimum_subarray_sum(7, [2, 3, 1, 2, 4, 3]) + 2 + >>> minimum_subarray_sum(7, [2, 3, -1, 2, 4, -3]) + 4 + >>> minimum_subarray_sum(11, [1, 1, 1, 1, 1, 1, 1, 1]) + 0 + >>> minimum_subarray_sum(10, [1, 2, 3, 4, 5, 6, 7]) + 2 + >>> minimum_subarray_sum(5, [1, 1, 1, 1, 1, 5]) + 1 + >>> minimum_subarray_sum(0, []) + 0 + >>> minimum_subarray_sum(0, [1, 2, 3]) + 1 + >>> minimum_subarray_sum(10, [10, 20, 30]) + 1 + >>> minimum_subarray_sum(7, [1, 1, 1, 1, 1, 1, 10]) + 1 + >>> minimum_subarray_sum(6, []) + 0 + >>> minimum_subarray_sum(2, [1, 2, 3]) + 1 + >>> minimum_subarray_sum(-6, []) + 0 + >>> minimum_subarray_sum(-6, [3, 4, 5]) + 1 + >>> minimum_subarray_sum(8, None) + 0 + >>> minimum_subarray_sum(2, "ABC") + Traceback (most recent call last): + ... + ValueError: numbers must be an iterable of integers + """ + if not numbers: + return 0 + if target == 0 and target in numbers: + return 0 + if not isinstance(numbers, (list, tuple)) or not all( + isinstance(number, int) for number in numbers + ): + raise ValueError("numbers must be an iterable of integers") + + left = right = curr_sum = 0 + min_len = sys.maxsize + + while right < len(numbers): + curr_sum += numbers[right] + while curr_sum >= target and left <= right: + min_len = min(min_len, right - left + 1) + curr_sum -= numbers[left] + left += 1 + right += 1 + + return 0 if min_len == sys.maxsize else min_len From 11582943a555ae3b6a22938df6d3645b0327562e Mon Sep 17 00:00:00 2001 From: JulianStiebler <68881884+JulianStiebler@users.noreply.github.com> Date: Tue, 18 Apr 2023 11:57:48 +0200 Subject: [PATCH 0817/1543] Create maths/pi_generator.py (#8666) * Create pi_generator.py * Update pi_generator.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update pi_generator.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update pi_generator.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update pi_generator.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update pi_generator.py * Update pi_generator.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Updated commentary on line 28, added math.pi comparison & math.isclose() test * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Removed # noqa: E501 * printf() added as recommended by cclaus --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- maths/pi_generator.py | 94 +++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 94 insertions(+) create mode 100644 maths/pi_generator.py diff --git a/maths/pi_generator.py b/maths/pi_generator.py new file mode 100644 index 000000000000..dcd218aae309 --- /dev/null +++ b/maths/pi_generator.py @@ -0,0 +1,94 @@ +def calculate_pi(limit: int) -> str: + """ + https://en.wikipedia.org/wiki/Leibniz_formula_for_%CF%80 + Leibniz Formula for Pi + + The Leibniz formula is the special case arctan 1 = 1/4 Pi . + Leibniz's formula converges extremely slowly: it exhibits sublinear convergence. + + Convergence (https://en.wikipedia.org/wiki/Leibniz_formula_for_%CF%80#Convergence) + + We cannot try to prove against an interrupted, uncompleted generation. + https://en.wikipedia.org/wiki/Leibniz_formula_for_%CF%80#Unusual_behaviour + The errors can in fact be predicted; + but those calculations also approach infinity for accuracy. + + Our output will always be a string since we can defintely store all digits in there. + For simplicity' sake, let's just compare against known values and since our outpit + is a string, we need to convert to float. + + >>> import math + >>> float(calculate_pi(15)) == math.pi + True + + Since we cannot predict errors or interrupt any infinite alternating + series generation since they approach infinity, + or interrupt any alternating series, we are going to need math.isclose() + + >>> math.isclose(float(calculate_pi(50)), math.pi) + True + + >>> math.isclose(float(calculate_pi(100)), math.pi) + True + + Since math.pi-constant contains only 16 digits, here some test with preknown values: + + >>> calculate_pi(50) + '3.14159265358979323846264338327950288419716939937510' + >>> calculate_pi(80) + '3.14159265358979323846264338327950288419716939937510582097494459230781640628620899' + + To apply the Leibniz formula for calculating pi, + the variables q, r, t, k, n, and l are used for the iteration process. + """ + q = 1 + r = 0 + t = 1 + k = 1 + n = 3 + l = 3 + decimal = limit + counter = 0 + + result = "" + + """ + We will avoid using yield since we otherwise get a Generator-Object, + which we can't just compare against anything. We would have to make a list out of it + after the generation, so we will just stick to plain return logic: + """ + while counter != decimal + 1: + if 4 * q + r - t < n * t: + result += str(n) + if counter == 0: + result += "." + + if decimal == counter: + break + + counter += 1 + nr = 10 * (r - n * t) + n = ((10 * (3 * q + r)) // t) - 10 * n + q *= 10 + r = nr + else: + nr = (2 * q + r) * l + nn = (q * (7 * k) + 2 + (r * l)) // (t * l) + q *= k + t *= l + l += 2 + k += 1 + n = nn + r = nr + return result + + +def main() -> None: + print(f"{calculate_pi(50) = }") + import doctest + + doctest.testmod() + + +if __name__ == "__main__": + main() From bf30b18192dd7ff9a43523ee6efe5c015ae6b99c Mon Sep 17 00:00:00 2001 From: Rohan Anand <96521078+rohan472000@users.noreply.github.com> Date: Mon, 24 Apr 2023 10:58:30 +0530 Subject: [PATCH 0818/1543] Update linear_discriminant_analysis.py and rsa_cipher.py (#8680) * Update rsa_cipher.py by replacing %s with {} * Update rsa_cipher.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update linear_discriminant_analysis.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update linear_discriminant_analysis.py * Update linear_discriminant_analysis.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update linear_discriminant_analysis.py * Update linear_discriminant_analysis.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update linear_discriminant_analysis.py * Update machine_learning/linear_discriminant_analysis.py Co-authored-by: Christian Clauss * Update linear_discriminant_analysis.py * updated --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- ciphers/rsa_cipher.py | 14 ++++++++------ machine_learning/linear_discriminant_analysis.py | 2 +- 2 files changed, 9 insertions(+), 7 deletions(-) diff --git a/ciphers/rsa_cipher.py b/ciphers/rsa_cipher.py index de26992f5eeb..9c41cdc5d472 100644 --- a/ciphers/rsa_cipher.py +++ b/ciphers/rsa_cipher.py @@ -76,10 +76,11 @@ def encrypt_and_write_to_file( key_size, n, e = read_key_file(key_filename) if key_size < block_size * 8: sys.exit( - "ERROR: Block size is %s bits and key size is %s bits. The RSA cipher " + "ERROR: Block size is {} bits and key size is {} bits. The RSA cipher " "requires the block size to be equal to or greater than the key size. " - "Either decrease the block size or use different keys." - % (block_size * 8, key_size) + "Either decrease the block size or use different keys.".format( + block_size * 8, key_size + ) ) encrypted_blocks = [str(i) for i in encrypt_message(message, (n, e), block_size)] @@ -101,10 +102,11 @@ def read_from_file_and_decrypt(message_filename: str, key_filename: str) -> str: if key_size < block_size * 8: sys.exit( - "ERROR: Block size is %s bits and key size is %s bits. The RSA cipher " + "ERROR: Block size is {} bits and key size is {} bits. The RSA cipher " "requires the block size to be equal to or greater than the key size. " - "Did you specify the correct key file and encrypted file?" - % (block_size * 8, key_size) + "Did you specify the correct key file and encrypted file?".format( + block_size * 8, key_size + ) ) encrypted_blocks = [] diff --git a/machine_learning/linear_discriminant_analysis.py b/machine_learning/linear_discriminant_analysis.py index f4fb5ba76b64..c0a477be10c7 100644 --- a/machine_learning/linear_discriminant_analysis.py +++ b/machine_learning/linear_discriminant_analysis.py @@ -399,7 +399,7 @@ def main(): if input("Press any key to restart or 'q' for quit: ").strip().lower() == "q": print("\n" + "GoodBye!".center(100, "-") + "\n") break - system("cls" if name == "nt" else "clear") + system("clear" if name == "posix" else "cls") # noqa: S605 if __name__ == "__main__": From a650426350dc7833ff1110bc2e434763caed631e Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Tue, 25 Apr 2023 06:05:45 +0200 Subject: [PATCH 0819/1543] [pre-commit.ci] pre-commit autoupdate (#8691) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * [pre-commit.ci] pre-commit autoupdate updates: - [github.com/charliermarsh/ruff-pre-commit: v0.0.261 → v0.0.262](https://github.com/charliermarsh/ruff-pre-commit/compare/v0.0.261...v0.0.262) - [github.com/tox-dev/pyproject-fmt: 0.9.2 → 0.10.0](https://github.com/tox-dev/pyproject-fmt/compare/0.9.2...0.10.0) * updating DIRECTORY.md --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- .pre-commit-config.yaml | 4 ++-- DIRECTORY.md | 5 +++++ 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 55345a574ce9..288473ca365f 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -16,7 +16,7 @@ repos: - id: auto-walrus - repo: https://github.com/charliermarsh/ruff-pre-commit - rev: v0.0.261 + rev: v0.0.262 hooks: - id: ruff @@ -33,7 +33,7 @@ repos: - tomli - repo: https://github.com/tox-dev/pyproject-fmt - rev: "0.9.2" + rev: "0.10.0" hooks: - id: pyproject-fmt diff --git a/DIRECTORY.md b/DIRECTORY.md index 36f5a752c48b..8e67c85c6fa8 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -327,6 +327,7 @@ * [Minimum Coin Change](dynamic_programming/minimum_coin_change.py) * [Minimum Cost Path](dynamic_programming/minimum_cost_path.py) * [Minimum Partition](dynamic_programming/minimum_partition.py) + * [Minimum Size Subarray Sum](dynamic_programming/minimum_size_subarray_sum.py) * [Minimum Squares To Represent A Number](dynamic_programming/minimum_squares_to_represent_a_number.py) * [Minimum Steps To One](dynamic_programming/minimum_steps_to_one.py) * [Minimum Tickets Cost](dynamic_programming/minimum_tickets_cost.py) @@ -339,6 +340,7 @@ * [Word Break](dynamic_programming/word_break.py) ## Electronics + * [Apparent Power](electronics/apparent_power.py) * [Builtin Voltage](electronics/builtin_voltage.py) * [Carrier Concentration](electronics/carrier_concentration.py) * [Circular Convolution](electronics/circular_convolution.py) @@ -348,6 +350,7 @@ * [Electrical Impedance](electronics/electrical_impedance.py) * [Ind Reactance](electronics/ind_reactance.py) * [Ohms Law](electronics/ohms_law.py) + * [Real And Reactive Power](electronics/real_and_reactive_power.py) * [Resistor Equivalence](electronics/resistor_equivalence.py) * [Resonant Frequency](electronics/resonant_frequency.py) @@ -483,6 +486,7 @@ * [Astar](machine_learning/astar.py) * [Data Transformations](machine_learning/data_transformations.py) * [Decision Tree](machine_learning/decision_tree.py) + * [Dimensionality Reduction](machine_learning/dimensionality_reduction.py) * Forecasting * [Run](machine_learning/forecasting/run.py) * [Gradient Descent](machine_learning/gradient_descent.py) @@ -604,6 +608,7 @@ * [Perfect Number](maths/perfect_number.py) * [Perfect Square](maths/perfect_square.py) * [Persistence](maths/persistence.py) + * [Pi Generator](maths/pi_generator.py) * [Pi Monte Carlo Estimation](maths/pi_monte_carlo_estimation.py) * [Points Are Collinear 3D](maths/points_are_collinear_3d.py) * [Pollard Rho](maths/pollard_rho.py) From c1b3ea5355266bb47daba378ca10200c4d359453 Mon Sep 17 00:00:00 2001 From: Dipankar Mitra <50228537+Mitra-babu@users.noreply.github.com> Date: Tue, 25 Apr 2023 21:36:14 +0530 Subject: [PATCH 0820/1543] The tanh activation function is added (#8689) * tanh function been added * tanh function been added * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * tanh function is added * tanh function is added * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * tanh function added * tanh function added * tanh function is added * Apply suggestions from code review --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- maths/tanh.py | 42 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 42 insertions(+) create mode 100644 maths/tanh.py diff --git a/maths/tanh.py b/maths/tanh.py new file mode 100644 index 000000000000..ddab3e1ab717 --- /dev/null +++ b/maths/tanh.py @@ -0,0 +1,42 @@ +""" +This script demonstrates the implementation of the tangent hyperbolic +or tanh function. + +The function takes a vector of K real numbers as input and +then (e^x - e^(-x))/(e^x + e^(-x)). After through tanh, the +element of the vector mostly -1 between 1. + +Script inspired from its corresponding Wikipedia article +https://en.wikipedia.org/wiki/Activation_function +""" +import numpy as np + + +def tangent_hyperbolic(vector: np.array) -> np.array: + """ + Implements the tanh function + + Parameters: + vector: np.array + + Returns: + tanh (np.array): The input numpy array after applying tanh. + + mathematically (e^x - e^(-x))/(e^x + e^(-x)) can be written as (2/(1+e^(-2x))-1 + + Examples: + >>> tangent_hyperbolic(np.array([1,5,6,-0.67])) + array([ 0.76159416, 0.9999092 , 0.99998771, -0.58497988]) + + >>> tangent_hyperbolic(np.array([8,10,2,-0.98,13])) + array([ 0.99999977, 1. , 0.96402758, -0.7530659 , 1. ]) + + """ + + return (2 / (1 + np.exp(-2 * vector))) - 1 + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From 4c1f876567673db0934ba65d662ea221465ec921 Mon Sep 17 00:00:00 2001 From: Christian Clauss Date: Thu, 27 Apr 2023 19:32:07 +0200 Subject: [PATCH 0821/1543] Solving the `Top k most frequent words` problem using a max-heap (#8685) * Solving the `Top k most frequent words` problem using a max-heap * Mentioning Python standard library solution in `Top k most frequent words` docstring * ruff --fix . * updating DIRECTORY.md --------- Co-authored-by: Amos Paribocci Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- DIRECTORY.md | 1 + data_structures/heap/heap.py | 31 ++++-- .../linear_discriminant_analysis.py | 2 +- strings/top_k_frequent_words.py | 101 ++++++++++++++++++ 4 files changed, 128 insertions(+), 7 deletions(-) create mode 100644 strings/top_k_frequent_words.py diff --git a/DIRECTORY.md b/DIRECTORY.md index 8e67c85c6fa8..681d252b232d 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -1167,6 +1167,7 @@ * [Snake Case To Camel Pascal Case](strings/snake_case_to_camel_pascal_case.py) * [Split](strings/split.py) * [Text Justification](strings/text_justification.py) + * [Top K Frequent Words](strings/top_k_frequent_words.py) * [Upper](strings/upper.py) * [Wave](strings/wave.py) * [Wildcard Pattern Matching](strings/wildcard_pattern_matching.py) diff --git a/data_structures/heap/heap.py b/data_structures/heap/heap.py index b14c55d9db4c..c1004f349479 100644 --- a/data_structures/heap/heap.py +++ b/data_structures/heap/heap.py @@ -1,9 +1,28 @@ from __future__ import annotations +from abc import abstractmethod from collections.abc import Iterable +from typing import Generic, Protocol, TypeVar -class Heap: +class Comparable(Protocol): + @abstractmethod + def __lt__(self: T, other: T) -> bool: + pass + + @abstractmethod + def __gt__(self: T, other: T) -> bool: + pass + + @abstractmethod + def __eq__(self: T, other: object) -> bool: + pass + + +T = TypeVar("T", bound=Comparable) + + +class Heap(Generic[T]): """A Max Heap Implementation >>> unsorted = [103, 9, 1, 7, 11, 15, 25, 201, 209, 107, 5] @@ -27,7 +46,7 @@ class Heap: """ def __init__(self) -> None: - self.h: list[float] = [] + self.h: list[T] = [] self.heap_size: int = 0 def __repr__(self) -> str: @@ -79,7 +98,7 @@ def max_heapify(self, index: int) -> None: # fix the subsequent violation recursively if any self.max_heapify(violation) - def build_max_heap(self, collection: Iterable[float]) -> None: + def build_max_heap(self, collection: Iterable[T]) -> None: """build max heap from an unsorted array""" self.h = list(collection) self.heap_size = len(self.h) @@ -88,7 +107,7 @@ def build_max_heap(self, collection: Iterable[float]) -> None: for i in range(self.heap_size // 2 - 1, -1, -1): self.max_heapify(i) - def extract_max(self) -> float: + def extract_max(self) -> T: """get and remove max from heap""" if self.heap_size >= 2: me = self.h[0] @@ -102,7 +121,7 @@ def extract_max(self) -> float: else: raise Exception("Empty heap") - def insert(self, value: float) -> None: + def insert(self, value: T) -> None: """insert a new value into the max heap""" self.h.append(value) idx = (self.heap_size - 1) // 2 @@ -144,7 +163,7 @@ def heap_sort(self) -> None: ]: print(f"unsorted array: {unsorted}") - heap = Heap() + heap: Heap[int] = Heap() heap.build_max_heap(unsorted) print(f"after build heap: {heap}") diff --git a/machine_learning/linear_discriminant_analysis.py b/machine_learning/linear_discriminant_analysis.py index c0a477be10c7..88c047157893 100644 --- a/machine_learning/linear_discriminant_analysis.py +++ b/machine_learning/linear_discriminant_analysis.py @@ -399,7 +399,7 @@ def main(): if input("Press any key to restart or 'q' for quit: ").strip().lower() == "q": print("\n" + "GoodBye!".center(100, "-") + "\n") break - system("clear" if name == "posix" else "cls") # noqa: S605 + system("cls" if name == "nt" else "clear") # noqa: S605 if __name__ == "__main__": diff --git a/strings/top_k_frequent_words.py b/strings/top_k_frequent_words.py new file mode 100644 index 000000000000..f3d1e0cd5ca7 --- /dev/null +++ b/strings/top_k_frequent_words.py @@ -0,0 +1,101 @@ +""" +Finds the top K most frequent words from the provided word list. + +This implementation aims to show how to solve the problem using the Heap class +already present in this repository. +Computing order statistics is, in fact, a typical usage of heaps. + +This is mostly shown for educational purposes, since the problem can be solved +in a few lines using collections.Counter from the Python standard library: + +from collections import Counter +def top_k_frequent_words(words, k_value): + return [x[0] for x in Counter(words).most_common(k_value)] +""" + + +from collections import Counter +from functools import total_ordering + +from data_structures.heap.heap import Heap + + +@total_ordering +class WordCount: + def __init__(self, word: str, count: int) -> None: + self.word = word + self.count = count + + def __eq__(self, other: object) -> bool: + """ + >>> WordCount('a', 1).__eq__(WordCount('b', 1)) + True + >>> WordCount('a', 1).__eq__(WordCount('a', 1)) + True + >>> WordCount('a', 1).__eq__(WordCount('a', 2)) + False + >>> WordCount('a', 1).__eq__(WordCount('b', 2)) + False + >>> WordCount('a', 1).__eq__(1) + NotImplemented + """ + if not isinstance(other, WordCount): + return NotImplemented + return self.count == other.count + + def __lt__(self, other: object) -> bool: + """ + >>> WordCount('a', 1).__lt__(WordCount('b', 1)) + False + >>> WordCount('a', 1).__lt__(WordCount('a', 1)) + False + >>> WordCount('a', 1).__lt__(WordCount('a', 2)) + True + >>> WordCount('a', 1).__lt__(WordCount('b', 2)) + True + >>> WordCount('a', 2).__lt__(WordCount('a', 1)) + False + >>> WordCount('a', 2).__lt__(WordCount('b', 1)) + False + >>> WordCount('a', 1).__lt__(1) + NotImplemented + """ + if not isinstance(other, WordCount): + return NotImplemented + return self.count < other.count + + +def top_k_frequent_words(words: list[str], k_value: int) -> list[str]: + """ + Returns the `k_value` most frequently occurring words, + in non-increasing order of occurrence. + In this context, a word is defined as an element in the provided list. + + In case `k_value` is greater than the number of distinct words, a value of k equal + to the number of distinct words will be considered, instead. + + >>> top_k_frequent_words(['a', 'b', 'c', 'a', 'c', 'c'], 3) + ['c', 'a', 'b'] + >>> top_k_frequent_words(['a', 'b', 'c', 'a', 'c', 'c'], 2) + ['c', 'a'] + >>> top_k_frequent_words(['a', 'b', 'c', 'a', 'c', 'c'], 1) + ['c'] + >>> top_k_frequent_words(['a', 'b', 'c', 'a', 'c', 'c'], 0) + [] + >>> top_k_frequent_words([], 1) + [] + >>> top_k_frequent_words(['a', 'a'], 2) + ['a'] + """ + heap: Heap[WordCount] = Heap() + count_by_word = Counter(words) + heap.build_max_heap( + [WordCount(word, count) for word, count in count_by_word.items()] + ) + return [heap.extract_max().word for _ in range(min(k_value, len(count_by_word)))] + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From c4dcc44dd44f7e3e7c65debc8e173080fc693150 Mon Sep 17 00:00:00 2001 From: Sahil Goel <55365655+sahilg13@users.noreply.github.com> Date: Sun, 30 Apr 2023 13:33:22 -0400 Subject: [PATCH 0822/1543] Added an algorithm to calculate the present value of cash flows (#8700) * Added an algorithm to calculate the present value of cash flows * added doctest and reference * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Resolving deprecation issues with typing module * Fixing argument type checks and adding doctest case * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fixing failing doctest case by requiring less precision due to floating point inprecision * Updating return type * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Added test cases for more coverage * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Make improvements based on Rohan's suggestions * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update financial/present_value.py Committed first suggestion Co-authored-by: Christian Clauss * Update financial/present_value.py Committed second suggestion Co-authored-by: Christian Clauss * Update financial/present_value.py Committed third suggestion Co-authored-by: Christian Clauss * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- financial/present_value.py | 41 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 41 insertions(+) create mode 100644 financial/present_value.py diff --git a/financial/present_value.py b/financial/present_value.py new file mode 100644 index 000000000000..dc8191a6ef53 --- /dev/null +++ b/financial/present_value.py @@ -0,0 +1,41 @@ +""" +Reference: https://www.investopedia.com/terms/p/presentvalue.asp + +An algorithm that calculates the present value of a stream of yearly cash flows given... +1. The discount rate (as a decimal, not a percent) +2. An array of cash flows, with the index of the cash flow being the associated year + +Note: This algorithm assumes that cash flows are paid at the end of the specified year + + +def present_value(discount_rate: float, cash_flows: list[float]) -> float: + """ + >>> present_value(0.13, [10, 20.70, -293, 297]) + 4.69 + >>> present_value(0.07, [-109129.39, 30923.23, 15098.93, 29734,39]) + -42739.63 + >>> present_value(0.07, [109129.39, 30923.23, 15098.93, 29734,39]) + 175519.15 + >>> present_value(-1, [109129.39, 30923.23, 15098.93, 29734,39]) + Traceback (most recent call last): + ... + ValueError: Discount rate cannot be negative + >>> present_value(0.03, []) + Traceback (most recent call last): + ... + ValueError: Cash flows list cannot be empty + """ + if discount_rate < 0: + raise ValueError("Discount rate cannot be negative") + if not cash_flows: + raise ValueError("Cash flows list cannot be empty") + present_value = sum( + cash_flow / ((1 + discount_rate) ** i) for i, cash_flow in enumerate(cash_flows) + ) + return round(present_value, ndigits=2) + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From f6df26bf0f5c05d53b6fd24552de9e3eec2334aa Mon Sep 17 00:00:00 2001 From: Christian Clauss Date: Mon, 1 May 2023 02:59:42 +0200 Subject: [PATCH 0823/1543] Fix docstring in present_value.py (#8702) Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- DIRECTORY.md | 2 ++ financial/present_value.py | 1 + 2 files changed, 3 insertions(+) diff --git a/DIRECTORY.md b/DIRECTORY.md index 681d252b232d..167d062b4a9f 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -363,6 +363,7 @@ ## Financial * [Equated Monthly Installments](financial/equated_monthly_installments.py) * [Interest](financial/interest.py) + * [Present Value](financial/present_value.py) * [Price Plus Tax](financial/price_plus_tax.py) ## Fractals @@ -655,6 +656,7 @@ * [Sum Of Harmonic Series](maths/sum_of_harmonic_series.py) * [Sumset](maths/sumset.py) * [Sylvester Sequence](maths/sylvester_sequence.py) + * [Tanh](maths/tanh.py) * [Test Prime Check](maths/test_prime_check.py) * [Trapezoidal Rule](maths/trapezoidal_rule.py) * [Triplet Sum](maths/triplet_sum.py) diff --git a/financial/present_value.py b/financial/present_value.py index dc8191a6ef53..f74612b923af 100644 --- a/financial/present_value.py +++ b/financial/present_value.py @@ -6,6 +6,7 @@ 2. An array of cash flows, with the index of the cash flow being the associated year Note: This algorithm assumes that cash flows are paid at the end of the specified year +""" def present_value(discount_rate: float, cash_flows: list[float]) -> float: From e966c5cc0f856afab11a8bb150ef3b48f0c63112 Mon Sep 17 00:00:00 2001 From: Himanshu Tomar Date: Mon, 1 May 2023 15:53:03 +0530 Subject: [PATCH 0824/1543] Added minimum waiting time problem solution using greedy algorithm (#8701) * Added minimum waiting time problem solution using greedy algorithm * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * ruff --fix * Add type hints * Added two more doc test * Removed unnecessary comments * updated type hints * Updated the code as per the code review --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- DIRECTORY.md | 1 + greedy_methods/minimum_waiting_time.py | 48 ++++++++++++++++++++++++++ 2 files changed, 49 insertions(+) create mode 100644 greedy_methods/minimum_waiting_time.py diff --git a/DIRECTORY.md b/DIRECTORY.md index 167d062b4a9f..021669d13b4a 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -450,6 +450,7 @@ * [Fractional Knapsack](greedy_methods/fractional_knapsack.py) * [Fractional Knapsack 2](greedy_methods/fractional_knapsack_2.py) * [Optimal Merge Pattern](greedy_methods/optimal_merge_pattern.py) + * [Minimum Waiting Time ](greedy_methods/minimum_waiting_time.py) ## Hashes * [Adler32](hashes/adler32.py) diff --git a/greedy_methods/minimum_waiting_time.py b/greedy_methods/minimum_waiting_time.py new file mode 100644 index 000000000000..aaae8cf8f720 --- /dev/null +++ b/greedy_methods/minimum_waiting_time.py @@ -0,0 +1,48 @@ +""" +Calculate the minimum waiting time using a greedy algorithm. +reference: https://www.youtube.com/watch?v=Sf3eiO12eJs + +For doctests run following command: +python -m doctest -v minimum_waiting_time.py + +The minimum_waiting_time function uses a greedy algorithm to calculate the minimum +time for queries to complete. It sorts the list in non-decreasing order, calculates +the waiting time for each query by multiplying its position in the list with the +sum of all remaining query times, and returns the total waiting time. A doctest +ensures that the function produces the correct output. +""" + + +def minimum_waiting_time(queries: list[int]) -> int: + """ + This function takes a list of query times and returns the minimum waiting time + for all queries to be completed. + + Args: + queries: A list of queries measured in picoseconds + + Returns: + total_waiting_time: Minimum waiting time measured in picoseconds + + Examples: + >>> minimum_waiting_time([3, 2, 1, 2, 6]) + 17 + >>> minimum_waiting_time([3, 2, 1]) + 4 + >>> minimum_waiting_time([1, 2, 3, 4]) + 10 + >>> minimum_waiting_time([5, 5, 5, 5]) + 30 + >>> minimum_waiting_time([]) + 0 + """ + n = len(queries) + if n in (0, 1): + return 0 + return sum(query * (n - i - 1) for i, query in enumerate(sorted(queries))) + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From 777f966893d7042d350b44b05ce7f8431f561509 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 1 May 2023 23:48:56 +0200 Subject: [PATCH 0825/1543] [pre-commit.ci] pre-commit autoupdate (#8704) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * [pre-commit.ci] pre-commit autoupdate updates: - [github.com/charliermarsh/ruff-pre-commit: v0.0.262 → v0.0.263](https://github.com/charliermarsh/ruff-pre-commit/compare/v0.0.262...v0.0.263) - [github.com/tox-dev/pyproject-fmt: 0.10.0 → 0.11.1](https://github.com/tox-dev/pyproject-fmt/compare/0.10.0...0.11.1) * updating DIRECTORY.md --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- .pre-commit-config.yaml | 4 ++-- DIRECTORY.md | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 288473ca365f..accb57da35d3 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -16,7 +16,7 @@ repos: - id: auto-walrus - repo: https://github.com/charliermarsh/ruff-pre-commit - rev: v0.0.262 + rev: v0.0.263 hooks: - id: ruff @@ -33,7 +33,7 @@ repos: - tomli - repo: https://github.com/tox-dev/pyproject-fmt - rev: "0.10.0" + rev: "0.11.1" hooks: - id: pyproject-fmt diff --git a/DIRECTORY.md b/DIRECTORY.md index 021669d13b4a..826bd6fd39d4 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -449,8 +449,8 @@ ## Greedy Methods * [Fractional Knapsack](greedy_methods/fractional_knapsack.py) * [Fractional Knapsack 2](greedy_methods/fractional_knapsack_2.py) + * [Minimum Waiting Time](greedy_methods/minimum_waiting_time.py) * [Optimal Merge Pattern](greedy_methods/optimal_merge_pattern.py) - * [Minimum Waiting Time ](greedy_methods/minimum_waiting_time.py) ## Hashes * [Adler32](hashes/adler32.py) From 73105145090f0ce972f6fa29cc5d71f012dd8c92 Mon Sep 17 00:00:00 2001 From: Dipankar Mitra <50228537+Mitra-babu@users.noreply.github.com> Date: Tue, 2 May 2023 20:06:28 +0530 Subject: [PATCH 0826/1543] The ELU activation is added (#8699) * tanh function been added * tanh function been added * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * tanh function is added * tanh function is added * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * tanh function added * tanh function added * tanh function is added * Apply suggestions from code review * ELU activation function is added * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * elu activation is added * ELU activation is added * Update maths/elu_activation.py Co-authored-by: Christian Clauss * Exponential_linear_unit activation is added * Exponential_linear_unit activation is added --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- .../exponential_linear_unit.py | 40 +++++++++++++++++++ 1 file changed, 40 insertions(+) create mode 100644 neural_network/activation_functions/exponential_linear_unit.py diff --git a/neural_network/activation_functions/exponential_linear_unit.py b/neural_network/activation_functions/exponential_linear_unit.py new file mode 100644 index 000000000000..7a3cf1d84e71 --- /dev/null +++ b/neural_network/activation_functions/exponential_linear_unit.py @@ -0,0 +1,40 @@ +""" +Implements the Exponential Linear Unit or ELU function. + +The function takes a vector of K real numbers and a real number alpha as +input and then applies the ELU function to each element of the vector. + +Script inspired from its corresponding Wikipedia article +https://en.wikipedia.org/wiki/Rectifier_(neural_networks) +""" + +import numpy as np + + +def exponential_linear_unit(vector: np.ndarray, alpha: float) -> np.ndarray: + """ + Implements the ELU activation function. + Parameters: + vector: the array containing input of elu activation + alpha: hyper-parameter + return: + elu (np.array): The input numpy array after applying elu. + + Mathematically, f(x) = x, x>0 else (alpha * (e^x -1)), x<=0, alpha >=0 + + Examples: + >>> exponential_linear_unit(vector=np.array([2.3,0.6,-2,-3.8]), alpha=0.3) + array([ 2.3 , 0.6 , -0.25939942, -0.29328877]) + + >>> exponential_linear_unit(vector=np.array([-9.2,-0.3,0.45,-4.56]), alpha=0.067) + array([-0.06699323, -0.01736518, 0.45 , -0.06629904]) + + + """ + return np.where(vector > 0, vector, (alpha * (np.exp(vector) - 1))) + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From 91cc3a240f05922024d4c5523422138857c48ae0 Mon Sep 17 00:00:00 2001 From: Pronoy Mandal Date: Wed, 10 May 2023 15:04:36 +0530 Subject: [PATCH 0827/1543] Update game_of_life.py (#8703) Rectify spelling in docstring --- cellular_automata/game_of_life.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cellular_automata/game_of_life.py b/cellular_automata/game_of_life.py index 8e54702519b9..3382af7b5db6 100644 --- a/cellular_automata/game_of_life.py +++ b/cellular_automata/game_of_life.py @@ -34,7 +34,7 @@ from matplotlib import pyplot as plt from matplotlib.colors import ListedColormap -usage_doc = "Usage of script: script_nama " +usage_doc = "Usage of script: script_name " choice = [0] * 100 + [1] * 10 random.shuffle(choice) From 209a59ee562dd4b0358d8d1a12b112ec3f3e68ed Mon Sep 17 00:00:00 2001 From: Rohan Anand <96521078+rohan472000@users.noreply.github.com> Date: Wed, 10 May 2023 15:08:52 +0530 Subject: [PATCH 0828/1543] Update and_gate.py (#8690) * Update and_gate.py addressing issue #8656 by calling `test_and_gate()` , ensuring that all the assertions are verified before the actual output is printed. * Update and_gate.py addressing issue #8632 --- boolean_algebra/and_gate.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/boolean_algebra/and_gate.py b/boolean_algebra/and_gate.py index cbbcfde79f33..834116772ee7 100644 --- a/boolean_algebra/and_gate.py +++ b/boolean_algebra/and_gate.py @@ -43,6 +43,8 @@ def test_and_gate() -> None: if __name__ == "__main__": + test_and_gate() + print(and_gate(1, 0)) print(and_gate(0, 0)) print(and_gate(0, 1)) print(and_gate(1, 1)) From 44aa17fb86b0c04508580425b588c0f8a0cf4ce9 Mon Sep 17 00:00:00 2001 From: shricubed Date: Wed, 10 May 2023 14:50:32 -0400 Subject: [PATCH 0829/1543] Working binary insertion sort in Python (#8024) --- sorts/binary_insertion_sort.py | 61 ++++++++++++++++++++++++++++++++++ 1 file changed, 61 insertions(+) create mode 100644 sorts/binary_insertion_sort.py diff --git a/sorts/binary_insertion_sort.py b/sorts/binary_insertion_sort.py new file mode 100644 index 000000000000..8d41025583b1 --- /dev/null +++ b/sorts/binary_insertion_sort.py @@ -0,0 +1,61 @@ +""" +This is a pure Python implementation of the binary insertion sort algorithm + +For doctests run following command: +python -m doctest -v binary_insertion_sort.py +or +python3 -m doctest -v binary_insertion_sort.py + +For manual testing run: +python binary_insertion_sort.py +""" + + +def binary_insertion_sort(collection: list) -> list: + """Pure implementation of the binary insertion sort algorithm in Python + :param collection: some mutable ordered collection with heterogeneous + comparable items inside + :return: the same collection ordered by ascending + + Examples: + >>> binary_insertion_sort([0, 4, 1234, 4, 1]) + [0, 1, 4, 4, 1234] + >>> binary_insertion_sort([]) == sorted([]) + True + >>> binary_insertion_sort([-1, -2, -3]) == sorted([-1, -2, -3]) + True + >>> lst = ['d', 'a', 'b', 'e', 'c'] + >>> binary_insertion_sort(lst) == sorted(lst) + True + >>> import random + >>> collection = random.sample(range(-50, 50), 100) + >>> binary_insertion_sort(collection) == sorted(collection) + True + >>> import string + >>> collection = random.choices(string.ascii_letters + string.digits, k=100) + >>> binary_insertion_sort(collection) == sorted(collection) + True + """ + + n = len(collection) + for i in range(1, n): + val = collection[i] + low = 0 + high = i - 1 + + while low <= high: + mid = (low + high) // 2 + if val < collection[mid]: + high = mid - 1 + else: + low = mid + 1 + for j in range(i, low, -1): + collection[j] = collection[j - 1] + collection[low] = val + return collection + + +if __name__ == "__main__": + user_input = input("Enter numbers separated by a comma:\n").strip() + unsorted = [int(item) for item in user_input.split(",")] + print(binary_insertion_sort(unsorted)) From 997d56fb633e3bd726c1fac32a2d37277361d5e9 Mon Sep 17 00:00:00 2001 From: Margaret <62753112+meg-1@users.noreply.github.com> Date: Wed, 10 May 2023 21:53:47 +0300 Subject: [PATCH 0830/1543] Switch case (#7995) --- strings/string_switch_case.py | 108 ++++++++++++++++++++++++++++++++++ 1 file changed, 108 insertions(+) create mode 100644 strings/string_switch_case.py diff --git a/strings/string_switch_case.py b/strings/string_switch_case.py new file mode 100644 index 000000000000..9a07472dfd71 --- /dev/null +++ b/strings/string_switch_case.py @@ -0,0 +1,108 @@ +import re + +""" +general info: +https://en.wikipedia.org/wiki/Naming_convention_(programming)#Python_and_Ruby + +pascal case [ an upper Camel Case ]: https://en.wikipedia.org/wiki/Camel_case + +camel case: https://en.wikipedia.org/wiki/Camel_case + +kebab case [ can be found in general info ]: +https://en.wikipedia.org/wiki/Naming_convention_(programming)#Python_and_Ruby + +snake case: https://en.wikipedia.org/wiki/Snake_case +""" + + +# assistant functions +def split_input(str_: str) -> list: + """ + >>> split_input("one two 31235three4four") + [['one', 'two', '31235three4four']] + """ + return [char.split() for char in re.split(r"[^ a-z A-Z 0-9 \s]", str_)] + + +def to_simple_case(str_: str) -> str: + """ + >>> to_simple_case("one two 31235three4four") + 'OneTwo31235three4four' + """ + string_split = split_input(str_) + return "".join( + ["".join([char.capitalize() for char in sub_str]) for sub_str in string_split] + ) + + +def to_complex_case(text: str, upper: bool, separator: str) -> str: + """ + >>> to_complex_case("one two 31235three4four", True, "_") + 'ONE_TWO_31235THREE4FOUR' + >>> to_complex_case("one two 31235three4four", False, "-") + 'one-two-31235three4four' + """ + try: + string_split = split_input(text) + if upper: + res_str = "".join( + [ + separator.join([char.upper() for char in sub_str]) + for sub_str in string_split + ] + ) + else: + res_str = "".join( + [ + separator.join([char.lower() for char in sub_str]) + for sub_str in string_split + ] + ) + return res_str + except IndexError: + return "not valid string" + + +# main content +def to_pascal_case(text: str) -> str: + """ + >>> to_pascal_case("one two 31235three4four") + 'OneTwo31235three4four' + """ + return to_simple_case(text) + + +def to_camel_case(text: str) -> str: + """ + >>> to_camel_case("one two 31235three4four") + 'oneTwo31235three4four' + """ + try: + res_str = to_simple_case(text) + return res_str[0].lower() + res_str[1:] + except IndexError: + return "not valid string" + + +def to_snake_case(text: str, upper: bool) -> str: + """ + >>> to_snake_case("one two 31235three4four", True) + 'ONE_TWO_31235THREE4FOUR' + >>> to_snake_case("one two 31235three4four", False) + 'one_two_31235three4four' + """ + return to_complex_case(text, upper, "_") + + +def to_kebab_case(text: str, upper: bool) -> str: + """ + >>> to_kebab_case("one two 31235three4four", True) + 'ONE-TWO-31235THREE4FOUR' + >>> to_kebab_case("one two 31235three4four", False) + 'one-two-31235three4four' + """ + return to_complex_case(text, upper, "-") + + +if __name__ == "__main__": + __import__("doctest").testmod() From 6939538a41202bf05f958c9c2d7c1c20e2f87430 Mon Sep 17 00:00:00 2001 From: Margaret <62753112+meg-1@users.noreply.github.com> Date: Wed, 10 May 2023 21:55:48 +0300 Subject: [PATCH 0831/1543] adding the remove digit algorithm (#6708) --- maths/remove_digit.py | 37 +++++++++++++++++++++++++++++++++++++ 1 file changed, 37 insertions(+) create mode 100644 maths/remove_digit.py diff --git a/maths/remove_digit.py b/maths/remove_digit.py new file mode 100644 index 000000000000..db14ac902a6f --- /dev/null +++ b/maths/remove_digit.py @@ -0,0 +1,37 @@ +def remove_digit(num: int) -> int: + """ + + returns the biggest possible result + that can be achieved by removing + one digit from the given number + + >>> remove_digit(152) + 52 + >>> remove_digit(6385) + 685 + >>> remove_digit(-11) + 1 + >>> remove_digit(2222222) + 222222 + >>> remove_digit("2222222") + Traceback (most recent call last): + TypeError: only integers accepted as input + >>> remove_digit("string input") + Traceback (most recent call last): + TypeError: only integers accepted as input + """ + + if not isinstance(num, int): + raise TypeError("only integers accepted as input") + else: + num_str = str(abs(num)) + num_transpositions = [list(num_str) for char in range(len(num_str))] + for index in range(len(num_str)): + num_transpositions[index].pop(index) + return max( + int("".join(list(transposition))) for transposition in num_transpositions + ) + + +if __name__ == "__main__": + __import__("doctest").testmod() From 793e564e1d4bd6e00b6e2f80869c5fd1fd2872b3 Mon Sep 17 00:00:00 2001 From: Pronoy Mandal Date: Thu, 11 May 2023 00:30:59 +0530 Subject: [PATCH 0832/1543] Create maximum_subsequence.py (#7811) --- DIRECTORY.md | 1 + other/maximum_subsequence.py | 42 ++++++++++++++++++++++++++++++++++++ 2 files changed, 43 insertions(+) create mode 100644 other/maximum_subsequence.py diff --git a/DIRECTORY.md b/DIRECTORY.md index 826bd6fd39d4..a70ad6861d6f 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -716,6 +716,7 @@ * [Lru Cache](other/lru_cache.py) * [Magicdiamondpattern](other/magicdiamondpattern.py) * [Maximum Subarray](other/maximum_subarray.py) + * [Maximum Subsequence](other/maximum_subsequence.py) * [Nested Brackets](other/nested_brackets.py) * [Password](other/password.py) * [Quine](other/quine.py) diff --git a/other/maximum_subsequence.py b/other/maximum_subsequence.py new file mode 100644 index 000000000000..f81717596532 --- /dev/null +++ b/other/maximum_subsequence.py @@ -0,0 +1,42 @@ +from collections.abc import Sequence + + +def max_subsequence_sum(nums: Sequence[int] | None = None) -> int: + """Return the maximum possible sum amongst all non - empty subsequences. + + Raises: + ValueError: when nums is empty. + + >>> max_subsequence_sum([1,2,3,4,-2]) + 10 + >>> max_subsequence_sum([-2, -3, -1, -4, -6]) + -1 + >>> max_subsequence_sum([]) + Traceback (most recent call last): + . . . + ValueError: Input sequence should not be empty + >>> max_subsequence_sum() + Traceback (most recent call last): + . . . + ValueError: Input sequence should not be empty + """ + if nums is None or not nums: + raise ValueError("Input sequence should not be empty") + + ans = nums[0] + for i in range(1, len(nums)): + num = nums[i] + ans = max(ans, ans + num, num) + + return ans + + +if __name__ == "__main__": + import doctest + + doctest.testmod() + + # Try on a sample input from the user + n = int(input("Enter number of elements : ").strip()) + array = list(map(int, input("\nEnter the numbers : ").strip().split()))[:n] + print(max_subsequence_sum(array)) From 1faf10b5c2dff8cef3f5d59f60a126bd19bb1c44 Mon Sep 17 00:00:00 2001 From: Caeden Perelli-Harris Date: Sun, 14 May 2023 22:03:13 +0100 Subject: [PATCH 0833/1543] Correct ruff failures (#8732) * fix: Correct ruff problems * updating DIRECTORY.md * fix: Fix pre-commit errors * updating DIRECTORY.md --------- Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- DIRECTORY.md | 6 +++++- conversions/prefix_conversions_string.py | 4 ++-- conversions/rgb_hsv_conversion.py | 4 ++-- .../test_digital_image_processing.py | 2 +- ...ion.py => strassen_matrix_multiplication.py.BROKEN} | 2 +- dynamic_programming/fibonacci.py | 2 +- maths/euclidean_distance.py | 6 +++--- physics/horizontal_projectile_motion.py | 6 +++--- searches/binary_tree_traversal.py | 10 ++++------ 9 files changed, 22 insertions(+), 20 deletions(-) rename divide_and_conquer/{strassen_matrix_multiplication.py => strassen_matrix_multiplication.py.BROKEN} (99%) diff --git a/DIRECTORY.md b/DIRECTORY.md index a70ad6861d6f..fc6cbaf7ff41 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -294,7 +294,6 @@ * [Mergesort](divide_and_conquer/mergesort.py) * [Peak](divide_and_conquer/peak.py) * [Power](divide_and_conquer/power.py) - * [Strassen Matrix Multiplication](divide_and_conquer/strassen_matrix_multiplication.py) ## Dynamic Programming * [Abbreviation](dynamic_programming/abbreviation.py) @@ -632,6 +631,7 @@ * [Radians](maths/radians.py) * [Radix2 Fft](maths/radix2_fft.py) * [Relu](maths/relu.py) + * [Remove Digit](maths/remove_digit.py) * [Runge Kutta](maths/runge_kutta.py) * [Segmented Sieve](maths/segmented_sieve.py) * Series @@ -694,6 +694,8 @@ ## Neural Network * [2 Hidden Layers Neural Network](neural_network/2_hidden_layers_neural_network.py) + * Activation Functions + * [Exponential Linear Unit](neural_network/activation_functions/exponential_linear_unit.py) * [Back Propagation Neural Network](neural_network/back_propagation_neural_network.py) * [Convolution Neural Network](neural_network/convolution_neural_network.py) * [Input Data](neural_network/input_data.py) @@ -1080,6 +1082,7 @@ ## Sorts * [Bead Sort](sorts/bead_sort.py) + * [Binary Insertion Sort](sorts/binary_insertion_sort.py) * [Bitonic Sort](sorts/bitonic_sort.py) * [Bogo Sort](sorts/bogo_sort.py) * [Bubble Sort](sorts/bubble_sort.py) @@ -1170,6 +1173,7 @@ * [Reverse Words](strings/reverse_words.py) * [Snake Case To Camel Pascal Case](strings/snake_case_to_camel_pascal_case.py) * [Split](strings/split.py) + * [String Switch Case](strings/string_switch_case.py) * [Text Justification](strings/text_justification.py) * [Top K Frequent Words](strings/top_k_frequent_words.py) * [Upper](strings/upper.py) diff --git a/conversions/prefix_conversions_string.py b/conversions/prefix_conversions_string.py index 3851d7c8b993..9344c9672a1f 100644 --- a/conversions/prefix_conversions_string.py +++ b/conversions/prefix_conversions_string.py @@ -96,7 +96,7 @@ def add_si_prefix(value: float) -> str: for name_prefix, value_prefix in prefixes.items(): numerical_part = value / (10**value_prefix) if numerical_part > 1: - return f"{str(numerical_part)} {name_prefix}" + return f"{numerical_part!s} {name_prefix}" return str(value) @@ -111,7 +111,7 @@ def add_binary_prefix(value: float) -> str: for prefix in BinaryUnit: numerical_part = value / (2**prefix.value) if numerical_part > 1: - return f"{str(numerical_part)} {prefix.name}" + return f"{numerical_part!s} {prefix.name}" return str(value) diff --git a/conversions/rgb_hsv_conversion.py b/conversions/rgb_hsv_conversion.py index 081cfe1d75e0..74b3d33e49e7 100644 --- a/conversions/rgb_hsv_conversion.py +++ b/conversions/rgb_hsv_conversion.py @@ -121,8 +121,8 @@ def rgb_to_hsv(red: int, green: int, blue: int) -> list[float]: float_red = red / 255 float_green = green / 255 float_blue = blue / 255 - value = max(max(float_red, float_green), float_blue) - chroma = value - min(min(float_red, float_green), float_blue) + value = max(float_red, float_green, float_blue) + chroma = value - min(float_red, float_green, float_blue) saturation = 0 if value == 0 else chroma / value if chroma == 0: diff --git a/digital_image_processing/test_digital_image_processing.py b/digital_image_processing/test_digital_image_processing.py index c999464ce85e..fee7ab247b55 100644 --- a/digital_image_processing/test_digital_image_processing.py +++ b/digital_image_processing/test_digital_image_processing.py @@ -96,7 +96,7 @@ def test_nearest_neighbour( def test_local_binary_pattern(): - file_path: str = "digital_image_processing/image_data/lena.jpg" + file_path = "digital_image_processing/image_data/lena.jpg" # Reading the image and converting it to grayscale. image = imread(file_path, 0) diff --git a/divide_and_conquer/strassen_matrix_multiplication.py b/divide_and_conquer/strassen_matrix_multiplication.py.BROKEN similarity index 99% rename from divide_and_conquer/strassen_matrix_multiplication.py rename to divide_and_conquer/strassen_matrix_multiplication.py.BROKEN index 371605d6d4d4..2ca91c63bf4c 100644 --- a/divide_and_conquer/strassen_matrix_multiplication.py +++ b/divide_and_conquer/strassen_matrix_multiplication.py.BROKEN @@ -122,7 +122,7 @@ def strassen(matrix1: list, matrix2: list) -> list: if dimension1[0] == dimension1[1] and dimension2[0] == dimension2[1]: return [matrix1, matrix2] - maximum = max(max(dimension1), max(dimension2)) + maximum = max(dimension1, dimension2) maxim = int(math.pow(2, math.ceil(math.log2(maximum)))) new_matrix1 = matrix1 new_matrix2 = matrix2 diff --git a/dynamic_programming/fibonacci.py b/dynamic_programming/fibonacci.py index 7ec5993ef38d..c102493aa00b 100644 --- a/dynamic_programming/fibonacci.py +++ b/dynamic_programming/fibonacci.py @@ -24,7 +24,7 @@ def get(self, index: int) -> list: return self.sequence[:index] -def main(): +def main() -> None: print( "Fibonacci Series Using Dynamic Programming\n", "Enter the index of the Fibonacci number you want to calculate ", diff --git a/maths/euclidean_distance.py b/maths/euclidean_distance.py index 22012e92c9cf..9b29b37b0ce6 100644 --- a/maths/euclidean_distance.py +++ b/maths/euclidean_distance.py @@ -1,12 +1,12 @@ from __future__ import annotations +import typing from collections.abc import Iterable -from typing import Union import numpy as np -Vector = Union[Iterable[float], Iterable[int], np.ndarray] -VectorOut = Union[np.float64, int, float] +Vector = typing.Union[Iterable[float], Iterable[int], np.ndarray] # noqa: UP007 +VectorOut = typing.Union[np.float64, int, float] # noqa: UP007 def euclidean_distance(vector_1: Vector, vector_2: Vector) -> VectorOut: diff --git a/physics/horizontal_projectile_motion.py b/physics/horizontal_projectile_motion.py index dbde3660f62f..80f85a1b7146 100644 --- a/physics/horizontal_projectile_motion.py +++ b/physics/horizontal_projectile_motion.py @@ -147,6 +147,6 @@ def test_motion() -> None: # Print results print() print("Results: ") - print(f"Horizontal Distance: {str(horizontal_distance(init_vel, angle))} [m]") - print(f"Maximum Height: {str(max_height(init_vel, angle))} [m]") - print(f"Total Time: {str(total_time(init_vel, angle))} [s]") + print(f"Horizontal Distance: {horizontal_distance(init_vel, angle)!s} [m]") + print(f"Maximum Height: {max_height(init_vel, angle)!s} [m]") + print(f"Total Time: {total_time(init_vel, angle)!s} [s]") diff --git a/searches/binary_tree_traversal.py b/searches/binary_tree_traversal.py index 76e80df25a13..6fb841af4294 100644 --- a/searches/binary_tree_traversal.py +++ b/searches/binary_tree_traversal.py @@ -13,11 +13,9 @@ def __init__(self, data): self.left = None -def build_tree(): +def build_tree() -> TreeNode: print("\n********Press N to stop entering at any point of time********\n") - check = input("Enter the value of the root node: ").strip().lower() or "n" - if check == "n": - return None + check = input("Enter the value of the root node: ").strip().lower() q: queue.Queue = queue.Queue() tree_node = TreeNode(int(check)) q.put(tree_node) @@ -37,7 +35,7 @@ def build_tree(): right_node = TreeNode(int(check)) node_found.right = right_node q.put(right_node) - return None + raise def pre_order(node: TreeNode) -> None: @@ -272,7 +270,7 @@ def prompt(s: str = "", width=50, char="*") -> str: doctest.testmod() print(prompt("Binary Tree Traversals")) - node = build_tree() + node: TreeNode = build_tree() print(prompt("Pre Order Traversal")) pre_order(node) print(prompt() + "\n") From 2a57dafce096b51b4b28d1495116e79472c8a3f4 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 15 May 2023 22:27:59 +0100 Subject: [PATCH 0834/1543] [pre-commit.ci] pre-commit autoupdate (#8716) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - [github.com/charliermarsh/ruff-pre-commit: v0.0.263 → v0.0.267](https://github.com/charliermarsh/ruff-pre-commit/compare/v0.0.263...v0.0.267) - [github.com/tox-dev/pyproject-fmt: 0.11.1 → 0.11.2](https://github.com/tox-dev/pyproject-fmt/compare/0.11.1...0.11.2) - [github.com/pre-commit/mirrors-mypy: v1.2.0 → v1.3.0](https://github.com/pre-commit/mirrors-mypy/compare/v1.2.0...v1.3.0) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index accb57da35d3..6bdbc7370c9c 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -16,7 +16,7 @@ repos: - id: auto-walrus - repo: https://github.com/charliermarsh/ruff-pre-commit - rev: v0.0.263 + rev: v0.0.267 hooks: - id: ruff @@ -33,7 +33,7 @@ repos: - tomli - repo: https://github.com/tox-dev/pyproject-fmt - rev: "0.11.1" + rev: "0.11.2" hooks: - id: pyproject-fmt @@ -51,7 +51,7 @@ repos: - id: validate-pyproject - repo: https://github.com/pre-commit/mirrors-mypy - rev: v1.2.0 + rev: v1.3.0 hooks: - id: mypy args: From c0892a06515b8ea5030db2e8344dee2292bb10ad Mon Sep 17 00:00:00 2001 From: Maxim Smolskiy Date: Tue, 16 May 2023 00:47:50 +0300 Subject: [PATCH 0835/1543] Reduce the complexity of genetic_algorithm/basic_string.py (#8606) --- genetic_algorithm/basic_string.py | 95 ++++++++++++++++--------------- 1 file changed, 50 insertions(+), 45 deletions(-) diff --git a/genetic_algorithm/basic_string.py b/genetic_algorithm/basic_string.py index 45b8be651f6e..388e7219f54b 100644 --- a/genetic_algorithm/basic_string.py +++ b/genetic_algorithm/basic_string.py @@ -21,6 +21,54 @@ random.seed(random.randint(0, 1000)) +def evaluate(item: str, main_target: str) -> tuple[str, float]: + """ + Evaluate how similar the item is with the target by just + counting each char in the right position + >>> evaluate("Helxo Worlx", "Hello World") + ('Helxo Worlx', 9.0) + """ + score = len([g for position, g in enumerate(item) if g == main_target[position]]) + return (item, float(score)) + + +def crossover(parent_1: str, parent_2: str) -> tuple[str, str]: + """Slice and combine two string at a random point.""" + random_slice = random.randint(0, len(parent_1) - 1) + child_1 = parent_1[:random_slice] + parent_2[random_slice:] + child_2 = parent_2[:random_slice] + parent_1[random_slice:] + return (child_1, child_2) + + +def mutate(child: str, genes: list[str]) -> str: + """Mutate a random gene of a child with another one from the list.""" + child_list = list(child) + if random.uniform(0, 1) < MUTATION_PROBABILITY: + child_list[random.randint(0, len(child)) - 1] = random.choice(genes) + return "".join(child_list) + + +# Select, crossover and mutate a new population. +def select( + parent_1: tuple[str, float], + population_score: list[tuple[str, float]], + genes: list[str], +) -> list[str]: + """Select the second parent and generate new population""" + pop = [] + # Generate more children proportionally to the fitness score. + child_n = int(parent_1[1] * 100) + 1 + child_n = 10 if child_n >= 10 else child_n + for _ in range(child_n): + parent_2 = population_score[random.randint(0, N_SELECTED)][0] + + child_1, child_2 = crossover(parent_1[0], parent_2) + # Append new string to the population list. + pop.append(mutate(child_1, genes)) + pop.append(mutate(child_2, genes)) + return pop + + def basic(target: str, genes: list[str], debug: bool = True) -> tuple[int, int, str]: """ Verify that the target contains no genes besides the ones inside genes variable. @@ -70,17 +118,6 @@ def basic(target: str, genes: list[str], debug: bool = True) -> tuple[int, int, total_population += len(population) # Random population created. Now it's time to evaluate. - def evaluate(item: str, main_target: str = target) -> tuple[str, float]: - """ - Evaluate how similar the item is with the target by just - counting each char in the right position - >>> evaluate("Helxo Worlx", Hello World) - ["Helxo Worlx", 9] - """ - score = len( - [g for position, g in enumerate(item) if g == main_target[position]] - ) - return (item, float(score)) # Adding a bit of concurrency can make everything faster, # @@ -94,7 +131,7 @@ def evaluate(item: str, main_target: str = target) -> tuple[str, float]: # # but with a simple algorithm like this, it will probably be slower. # We just need to call evaluate for every item inside the population. - population_score = [evaluate(item) for item in population] + population_score = [evaluate(item, target) for item in population] # Check if there is a matching evolution. population_score = sorted(population_score, key=lambda x: x[1], reverse=True) @@ -121,41 +158,9 @@ def evaluate(item: str, main_target: str = target) -> tuple[str, float]: (item, score / len(target)) for item, score in population_score ] - # Select, crossover and mutate a new population. - def select(parent_1: tuple[str, float]) -> list[str]: - """Select the second parent and generate new population""" - pop = [] - # Generate more children proportionally to the fitness score. - child_n = int(parent_1[1] * 100) + 1 - child_n = 10 if child_n >= 10 else child_n - for _ in range(child_n): - parent_2 = population_score[ # noqa: B023 - random.randint(0, N_SELECTED) - ][0] - - child_1, child_2 = crossover(parent_1[0], parent_2) - # Append new string to the population list. - pop.append(mutate(child_1)) - pop.append(mutate(child_2)) - return pop - - def crossover(parent_1: str, parent_2: str) -> tuple[str, str]: - """Slice and combine two string at a random point.""" - random_slice = random.randint(0, len(parent_1) - 1) - child_1 = parent_1[:random_slice] + parent_2[random_slice:] - child_2 = parent_2[:random_slice] + parent_1[random_slice:] - return (child_1, child_2) - - def mutate(child: str) -> str: - """Mutate a random gene of a child with another one from the list.""" - child_list = list(child) - if random.uniform(0, 1) < MUTATION_PROBABILITY: - child_list[random.randint(0, len(child)) - 1] = random.choice(genes) - return "".join(child_list) - # This is selection for i in range(N_SELECTED): - population.extend(select(population_score[int(i)])) + population.extend(select(population_score[int(i)], population_score, genes)) # Check if the population has already reached the maximum value and if so, # break the cycle. If this check is disabled, the algorithm will take # forever to compute large strings, but will also calculate small strings in From 8102424950f2d3801eda7817d7f69288fd984a63 Mon Sep 17 00:00:00 2001 From: Tianyi Zheng Date: Tue, 16 May 2023 17:05:55 -0700 Subject: [PATCH 0836/1543] `local_weighted_learning.py`: fix `mypy` errors and more (#8073) --- .../local_weighted_learning.py | 188 +++++++++++------- 1 file changed, 112 insertions(+), 76 deletions(-) diff --git a/machine_learning/local_weighted_learning/local_weighted_learning.py b/machine_learning/local_weighted_learning/local_weighted_learning.py index 6260e9ac6bfe..8dd0e55d41df 100644 --- a/machine_learning/local_weighted_learning/local_weighted_learning.py +++ b/machine_learning/local_weighted_learning/local_weighted_learning.py @@ -1,14 +1,55 @@ +""" +Locally weighted linear regression, also called local regression, is a type of +non-parametric linear regression that prioritizes data closest to a given +prediction point. The algorithm estimates the vector of model coefficients β +using weighted least squares regression: + +β = (XᵀWX)⁻¹(XᵀWy), + +where X is the design matrix, y is the response vector, and W is the diagonal +weight matrix. + +This implementation calculates wᵢ, the weight of the ith training sample, using +the Gaussian weight: + +wᵢ = exp(-‖xᵢ - x‖²/(2τ²)), + +where xᵢ is the ith training sample, x is the prediction point, τ is the +"bandwidth", and ‖x‖ is the Euclidean norm (also called the 2-norm or the L² +norm). The bandwidth τ controls how quickly the weight of a training sample +decreases as its distance from the prediction point increases. One can think of +the Gaussian weight as a bell curve centered around the prediction point: a +training sample is weighted lower if it's farther from the center, and τ +controls the spread of the bell curve. + +Other types of locally weighted regression such as locally estimated scatterplot +smoothing (LOESS) typically use different weight functions. + +References: + - https://en.wikipedia.org/wiki/Local_regression + - https://en.wikipedia.org/wiki/Weighted_least_squares + - https://cs229.stanford.edu/notes2022fall/main_notes.pdf +""" + import matplotlib.pyplot as plt import numpy as np -def weighted_matrix( - point: np.array, training_data_x: np.array, bandwidth: float -) -> np.array: +def weight_matrix(point: np.ndarray, x_train: np.ndarray, tau: float) -> np.ndarray: """ - Calculate the weight for every point in the data set. - point --> the x value at which we want to make predictions - >>> weighted_matrix( + Calculate the weight of every point in the training data around a given + prediction point + + Args: + point: x-value at which the prediction is being made + x_train: ndarray of x-values for training + tau: bandwidth value, controls how quickly the weight of training values + decreases as the distance from the prediction point increases + + Returns: + m x m weight matrix around the prediction point, where m is the size of + the training set + >>> weight_matrix( ... np.array([1., 1.]), ... np.array([[16.99, 10.34], [21.01,23.68], [24.59,25.69]]), ... 0.6 @@ -17,25 +58,30 @@ def weighted_matrix( [0.00000000e+000, 0.00000000e+000, 0.00000000e+000], [0.00000000e+000, 0.00000000e+000, 0.00000000e+000]]) """ - m, _ = np.shape(training_data_x) # m is the number of training samples - weights = np.eye(m) # Initializing weights as identity matrix - - # calculating weights for all training examples [x(i)'s] + m = len(x_train) # Number of training samples + weights = np.eye(m) # Initialize weights as identity matrix for j in range(m): - diff = point - training_data_x[j] - weights[j, j] = np.exp(diff @ diff.T / (-2.0 * bandwidth**2)) + diff = point - x_train[j] + weights[j, j] = np.exp(diff @ diff.T / (-2.0 * tau**2)) + return weights def local_weight( - point: np.array, - training_data_x: np.array, - training_data_y: np.array, - bandwidth: float, -) -> np.array: + point: np.ndarray, x_train: np.ndarray, y_train: np.ndarray, tau: float +) -> np.ndarray: """ - Calculate the local weights using the weight_matrix function on training data. - Return the weighted matrix. + Calculate the local weights at a given prediction point using the weight + matrix for that point + + Args: + point: x-value at which the prediction is being made + x_train: ndarray of x-values for training + y_train: ndarray of y-values for training + tau: bandwidth value, controls how quickly the weight of training values + decreases as the distance from the prediction point increases + Returns: + ndarray of local weights >>> local_weight( ... np.array([1., 1.]), ... np.array([[16.99, 10.34], [21.01,23.68], [24.59,25.69]]), @@ -45,19 +91,28 @@ def local_weight( array([[0.00873174], [0.08272556]]) """ - weight = weighted_matrix(point, training_data_x, bandwidth) - w = np.linalg.inv(training_data_x.T @ (weight @ training_data_x)) @ ( - training_data_x.T @ weight @ training_data_y.T + weight_mat = weight_matrix(point, x_train, tau) + weight = np.linalg.inv(x_train.T @ weight_mat @ x_train) @ ( + x_train.T @ weight_mat @ y_train.T ) - return w + return weight def local_weight_regression( - training_data_x: np.array, training_data_y: np.array, bandwidth: float -) -> np.array: + x_train: np.ndarray, y_train: np.ndarray, tau: float +) -> np.ndarray: """ - Calculate predictions for each data point on axis + Calculate predictions for each point in the training data + + Args: + x_train: ndarray of x-values for training + y_train: ndarray of y-values for training + tau: bandwidth value, controls how quickly the weight of training values + decreases as the distance from the prediction point increases + + Returns: + ndarray of predictions >>> local_weight_regression( ... np.array([[16.99, 10.34], [21.01, 23.68], [24.59, 25.69]]), ... np.array([[1.01, 1.66, 3.5]]), @@ -65,77 +120,57 @@ def local_weight_regression( ... ) array([1.07173261, 1.65970737, 3.50160179]) """ - m, _ = np.shape(training_data_x) - ypred = np.zeros(m) + y_pred = np.zeros(len(x_train)) # Initialize array of predictions + for i, item in enumerate(x_train): + y_pred[i] = item @ local_weight(item, x_train, y_train, tau) - for i, item in enumerate(training_data_x): - ypred[i] = item @ local_weight( - item, training_data_x, training_data_y, bandwidth - ) - - return ypred + return y_pred def load_data( - dataset_name: str, cola_name: str, colb_name: str -) -> tuple[np.array, np.array, np.array, np.array]: + dataset_name: str, x_name: str, y_name: str +) -> tuple[np.ndarray, np.ndarray, np.ndarray]: """ Load data from seaborn and split it into x and y points + >>> pass # No doctests, function is for demo purposes only """ import seaborn as sns data = sns.load_dataset(dataset_name) - col_a = np.array(data[cola_name]) # total_bill - col_b = np.array(data[colb_name]) # tip - - mcol_a = col_a.copy() - mcol_b = col_b.copy() - - one = np.ones(np.shape(mcol_b)[0], dtype=int) + x_data = np.array(data[x_name]) + y_data = np.array(data[y_name]) - # pairing elements of one and mcol_a - training_data_x = np.column_stack((one, mcol_a)) + one = np.ones(len(y_data)) - return training_data_x, mcol_b, col_a, col_b + # pairing elements of one and x_data + x_train = np.column_stack((one, x_data)) - -def get_preds(training_data_x: np.array, mcol_b: np.array, tau: float) -> np.array: - """ - Get predictions with minimum error for each training data - >>> get_preds( - ... np.array([[16.99, 10.34], [21.01, 23.68], [24.59, 25.69]]), - ... np.array([[1.01, 1.66, 3.5]]), - ... 0.6 - ... ) - array([1.07173261, 1.65970737, 3.50160179]) - """ - ypred = local_weight_regression(training_data_x, mcol_b, tau) - return ypred + return x_train, x_data, y_data def plot_preds( - training_data_x: np.array, - predictions: np.array, - col_x: np.array, - col_y: np.array, - cola_name: str, - colb_name: str, -) -> plt.plot: + x_train: np.ndarray, + preds: np.ndarray, + x_data: np.ndarray, + y_data: np.ndarray, + x_name: str, + y_name: str, +) -> None: """ Plot predictions and display the graph + >>> pass # No doctests, function is for demo purposes only """ - xsort = training_data_x.copy() - xsort.sort(axis=0) - plt.scatter(col_x, col_y, color="blue") + x_train_sorted = np.sort(x_train, axis=0) + plt.scatter(x_data, y_data, color="blue") plt.plot( - xsort[:, 1], - predictions[training_data_x[:, 1].argsort(0)], + x_train_sorted[:, 1], + preds[x_train[:, 1].argsort(0)], color="yellow", linewidth=5, ) plt.title("Local Weighted Regression") - plt.xlabel(cola_name) - plt.ylabel(colb_name) + plt.xlabel(x_name) + plt.ylabel(y_name) plt.show() @@ -144,6 +179,7 @@ def plot_preds( doctest.testmod() - training_data_x, mcol_b, col_a, col_b = load_data("tips", "total_bill", "tip") - predictions = get_preds(training_data_x, mcol_b, 0.5) - plot_preds(training_data_x, predictions, col_a, col_b, "total_bill", "tip") + # Demo with a dataset from the seaborn module + training_data_x, total_bill, tip = load_data("tips", "total_bill", "tip") + predictions = local_weight_regression(training_data_x, tip, 5) + plot_preds(training_data_x, predictions, total_bill, tip, "total_bill", "tip") From 3dc143f7218a1221f346c0fccb516d1199850e18 Mon Sep 17 00:00:00 2001 From: Rohan Saraogi <62804340+r0sa2@users.noreply.github.com> Date: Wed, 17 May 2023 05:38:56 +0530 Subject: [PATCH 0837/1543] Added odd_sieve.py (#8740) --- maths/odd_sieve.py | 42 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 42 insertions(+) create mode 100644 maths/odd_sieve.py diff --git a/maths/odd_sieve.py b/maths/odd_sieve.py new file mode 100644 index 000000000000..60e92921a94c --- /dev/null +++ b/maths/odd_sieve.py @@ -0,0 +1,42 @@ +from itertools import compress, repeat +from math import ceil, sqrt + + +def odd_sieve(num: int) -> list[int]: + """ + Returns the prime numbers < `num`. The prime numbers are calculated using an + odd sieve implementation of the Sieve of Eratosthenes algorithm + (see for reference https://en.wikipedia.org/wiki/Sieve_of_Eratosthenes). + + >>> odd_sieve(2) + [] + >>> odd_sieve(3) + [2] + >>> odd_sieve(10) + [2, 3, 5, 7] + >>> odd_sieve(20) + [2, 3, 5, 7, 11, 13, 17, 19] + """ + + if num <= 2: + return [] + if num == 3: + return [2] + + # Odd sieve for numbers in range [3, num - 1] + sieve = bytearray(b"\x01") * ((num >> 1) - 1) + + for i in range(3, int(sqrt(num)) + 1, 2): + if sieve[(i >> 1) - 1]: + i_squared = i**2 + sieve[(i_squared >> 1) - 1 :: i] = repeat( + 0, ceil((num - i_squared) / (i << 1)) + ) + + return [2] + list(compress(range(3, num, 2), sieve)) + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From 61cfb43d2b9246d1e2019ce7f03cb91f452ed2ba Mon Sep 17 00:00:00 2001 From: Alexander Pantyukhin Date: Wed, 17 May 2023 04:21:16 +0400 Subject: [PATCH 0838/1543] Add h index (#8036) --- DIRECTORY.md | 1 + other/h_index.py | 71 ++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 72 insertions(+) create mode 100644 other/h_index.py diff --git a/DIRECTORY.md b/DIRECTORY.md index fc6cbaf7ff41..46bd51ce91ea 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -712,6 +712,7 @@ * [Gauss Easter](other/gauss_easter.py) * [Graham Scan](other/graham_scan.py) * [Greedy](other/greedy.py) + * [H Index](other/h_index.py) * [Least Recently Used](other/least_recently_used.py) * [Lfu Cache](other/lfu_cache.py) * [Linear Congruential Generator](other/linear_congruential_generator.py) diff --git a/other/h_index.py b/other/h_index.py new file mode 100644 index 000000000000..e91389675b16 --- /dev/null +++ b/other/h_index.py @@ -0,0 +1,71 @@ +""" +Task: +Given an array of integers citations where citations[i] is the number of +citations a researcher received for their ith paper, return compute the +researcher's h-index. + +According to the definition of h-index on Wikipedia: A scientist has an +index h if h of their n papers have at least h citations each, and the other +n - h papers have no more than h citations each. + +If there are several possible values for h, the maximum one is taken as the +h-index. + +H-Index link: https://en.wikipedia.org/wiki/H-index + +Implementation notes: +Use sorting of array + +Leetcode link: https://leetcode.com/problems/h-index/description/ + +n = len(citations) +Runtime Complexity: O(n * log(n)) +Space Complexity: O(1) + +""" + + +def h_index(citations: list[int]) -> int: + """ + Return H-index of citations + + >>> h_index([3, 0, 6, 1, 5]) + 3 + >>> h_index([1, 3, 1]) + 1 + >>> h_index([1, 2, 3]) + 2 + >>> h_index('test') + Traceback (most recent call last): + ... + ValueError: The citations should be a list of non negative integers. + >>> h_index([1,2,'3']) + Traceback (most recent call last): + ... + ValueError: The citations should be a list of non negative integers. + >>> h_index([1,2,-3]) + Traceback (most recent call last): + ... + ValueError: The citations should be a list of non negative integers. + """ + + # validate: + if not isinstance(citations, list) or not all( + isinstance(item, int) and item >= 0 for item in citations + ): + raise ValueError("The citations should be a list of non negative integers.") + + citations.sort() + len_citations = len(citations) + + for i in range(len_citations): + if citations[len_citations - 1 - i] <= i: + return i + + return len_citations + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From a2783c6597a154a87f60bb5878770d2f152a1d09 Mon Sep 17 00:00:00 2001 From: Harkishan Khuva <78949167+hakiKhuva@users.noreply.github.com> Date: Wed, 17 May 2023 05:52:24 +0530 Subject: [PATCH 0839/1543] Create guess_the_number_search.py (#7937) --- other/guess_the_number_search.py | 165 +++++++++++++++++++++++++++++++ 1 file changed, 165 insertions(+) create mode 100644 other/guess_the_number_search.py diff --git a/other/guess_the_number_search.py b/other/guess_the_number_search.py new file mode 100644 index 000000000000..0439223f2ec9 --- /dev/null +++ b/other/guess_the_number_search.py @@ -0,0 +1,165 @@ +""" +guess the number using lower,higher and the value to find or guess + +solution works by dividing lower and higher of number guessed + +suppose lower is 0, higher is 1000 and the number to guess is 355 + +>>> guess_the_number(10, 1000, 17) +started... +guess the number : 17 +details : [505, 257, 133, 71, 40, 25, 17] + +""" + + +def temp_input_value( + min_val: int = 10, max_val: int = 1000, option: bool = True +) -> int: + """ + Temporary input values for tests + + >>> temp_input_value(option=True) + 10 + + >>> temp_input_value(option=False) + 1000 + + >>> temp_input_value(min_val=100, option=True) + 100 + + >>> temp_input_value(min_val=100, max_val=50) + Traceback (most recent call last): + ... + ValueError: Invalid value for min_val or max_val (min_value < max_value) + + >>> temp_input_value("ten","fifty",1) + Traceback (most recent call last): + ... + AssertionError: Invalid type of value(s) specified to function! + + >>> temp_input_value(min_val=-100, max_val=500) + -100 + + >>> temp_input_value(min_val=-5100, max_val=-100) + -5100 + """ + assert ( + isinstance(min_val, int) + and isinstance(max_val, int) + and isinstance(option, bool) + ), "Invalid type of value(s) specified to function!" + + if min_val > max_val: + raise ValueError("Invalid value for min_val or max_val (min_value < max_value)") + return min_val if option else max_val + + +def get_avg(number_1: int, number_2: int) -> int: + """ + Return the mid-number(whole) of two integers a and b + + >>> get_avg(10, 15) + 12 + + >>> get_avg(20, 300) + 160 + + >>> get_avg("abcd", 300) + Traceback (most recent call last): + ... + TypeError: can only concatenate str (not "int") to str + + >>> get_avg(10.5,50.25) + 30 + """ + return int((number_1 + number_2) / 2) + + +def guess_the_number(lower: int, higher: int, to_guess: int) -> None: + """ + The `guess_the_number` function that guess the number by some operations + and using inner functions + + >>> guess_the_number(10, 1000, 17) + started... + guess the number : 17 + details : [505, 257, 133, 71, 40, 25, 17] + + >>> guess_the_number(-10000, 10000, 7) + started... + guess the number : 7 + details : [0, 5000, 2500, 1250, 625, 312, 156, 78, 39, 19, 9, 4, 6, 7] + + >>> guess_the_number(10, 1000, "a") + Traceback (most recent call last): + ... + AssertionError: argument values must be type of "int" + + >>> guess_the_number(10, 1000, 5) + Traceback (most recent call last): + ... + ValueError: guess value must be within the range of lower and higher value + + >>> guess_the_number(10000, 100, 5) + Traceback (most recent call last): + ... + ValueError: argument value for lower and higher must be(lower > higher) + """ + assert ( + isinstance(lower, int) and isinstance(higher, int) and isinstance(to_guess, int) + ), 'argument values must be type of "int"' + + if lower > higher: + raise ValueError("argument value for lower and higher must be(lower > higher)") + + if not lower < to_guess < higher: + raise ValueError( + "guess value must be within the range of lower and higher value" + ) + + def answer(number: int) -> str: + """ + Returns value by comparing with entered `to_guess` number + """ + if number > to_guess: + return "high" + elif number < to_guess: + return "low" + else: + return "same" + + print("started...") + + last_lowest = lower + last_highest = higher + + last_numbers = [] + + while True: + number = get_avg(last_lowest, last_highest) + last_numbers.append(number) + + if answer(number) == "low": + last_lowest = number + elif answer(number) == "high": + last_highest = number + else: + break + + print(f"guess the number : {last_numbers[-1]}") + print(f"details : {str(last_numbers)}") + + +def main() -> None: + """ + starting point or function of script + """ + lower = int(input("Enter lower value : ").strip()) + higher = int(input("Enter high value : ").strip()) + guess = int(input("Enter value to guess : ").strip()) + guess_the_number(lower, higher, guess) + + +if __name__ == "__main__": + main() From 9b3e4028c6927a17656e590e878c2a101bc4e951 Mon Sep 17 00:00:00 2001 From: Caeden Perelli-Harris Date: Wed, 17 May 2023 07:47:23 +0100 Subject: [PATCH 0840/1543] Fixes broken "Create guess_the_number_search.py" (#8746) --- DIRECTORY.md | 2 ++ other/guess_the_number_search.py | 2 +- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/DIRECTORY.md b/DIRECTORY.md index 46bd51ce91ea..82791cde183d 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -605,6 +605,7 @@ * [Newton Raphson](maths/newton_raphson.py) * [Number Of Digits](maths/number_of_digits.py) * [Numerical Integration](maths/numerical_integration.py) + * [Odd Sieve](maths/odd_sieve.py) * [Perfect Cube](maths/perfect_cube.py) * [Perfect Number](maths/perfect_number.py) * [Perfect Square](maths/perfect_square.py) @@ -712,6 +713,7 @@ * [Gauss Easter](other/gauss_easter.py) * [Graham Scan](other/graham_scan.py) * [Greedy](other/greedy.py) + * [Guess The Number Search](other/guess_the_number_search.py) * [H Index](other/h_index.py) * [Least Recently Used](other/least_recently_used.py) * [Lfu Cache](other/lfu_cache.py) diff --git a/other/guess_the_number_search.py b/other/guess_the_number_search.py index 0439223f2ec9..01e8898bbb8a 100644 --- a/other/guess_the_number_search.py +++ b/other/guess_the_number_search.py @@ -148,7 +148,7 @@ def answer(number: int) -> str: break print(f"guess the number : {last_numbers[-1]}") - print(f"details : {str(last_numbers)}") + print(f"details : {last_numbers!s}") def main() -> None: From cf5e34d4794fbba04d18c98d5d09854029c83466 Mon Sep 17 00:00:00 2001 From: Rohan Saraogi <62804340+r0sa2@users.noreply.github.com> Date: Fri, 19 May 2023 05:18:22 +0530 Subject: [PATCH 0841/1543] Added is_palindrome.py (#8748) --- maths/is_palindrome.py | 34 ++++++++++++++++++++++++++++++++++ 1 file changed, 34 insertions(+) create mode 100644 maths/is_palindrome.py diff --git a/maths/is_palindrome.py b/maths/is_palindrome.py new file mode 100644 index 000000000000..ba60573ab022 --- /dev/null +++ b/maths/is_palindrome.py @@ -0,0 +1,34 @@ +def is_palindrome(num: int) -> bool: + """ + Returns whether `num` is a palindrome or not + (see for reference https://en.wikipedia.org/wiki/Palindromic_number). + + >>> is_palindrome(-121) + False + >>> is_palindrome(0) + True + >>> is_palindrome(10) + False + >>> is_palindrome(11) + True + >>> is_palindrome(101) + True + >>> is_palindrome(120) + False + """ + if num < 0: + return False + + num_copy: int = num + rev_num: int = 0 + while num > 0: + rev_num = rev_num * 10 + (num % 10) + num //= 10 + + return num_copy == rev_num + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From edc17b60e00e704cb4109a0e6b18c6ad43234c26 Mon Sep 17 00:00:00 2001 From: Daniel Luo <103051750+DanielLuo7@users.noreply.github.com> Date: Thu, 18 May 2023 20:40:52 -0400 Subject: [PATCH 0842/1543] add __main__ around print (#8747) --- ciphers/mixed_keyword_cypher.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/ciphers/mixed_keyword_cypher.py b/ciphers/mixed_keyword_cypher.py index 806004faa079..93a0e3acb7b1 100644 --- a/ciphers/mixed_keyword_cypher.py +++ b/ciphers/mixed_keyword_cypher.py @@ -65,4 +65,5 @@ def mixed_keyword(key: str = "college", pt: str = "UNIVERSITY") -> str: return cypher -print(mixed_keyword("college", "UNIVERSITY")) +if __name__ == "__main__": + print(mixed_keyword("college", "UNIVERSITY")) From ce43a8ac4ad14e1639014d374b1137906218cfe3 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Tue, 23 May 2023 05:54:30 +0200 Subject: [PATCH 0843/1543] [pre-commit.ci] pre-commit autoupdate (#8759) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * [pre-commit.ci] pre-commit autoupdate updates: - [github.com/charliermarsh/ruff-pre-commit: v0.0.267 → v0.0.269](https://github.com/charliermarsh/ruff-pre-commit/compare/v0.0.267...v0.0.269) - [github.com/abravalheri/validate-pyproject: v0.12.2 → v0.13](https://github.com/abravalheri/validate-pyproject/compare/v0.12.2...v0.13) * updating DIRECTORY.md --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- .pre-commit-config.yaml | 4 ++-- DIRECTORY.md | 1 + 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 6bdbc7370c9c..bd5bca8f05ab 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -16,7 +16,7 @@ repos: - id: auto-walrus - repo: https://github.com/charliermarsh/ruff-pre-commit - rev: v0.0.267 + rev: v0.0.269 hooks: - id: ruff @@ -46,7 +46,7 @@ repos: pass_filenames: false - repo: https://github.com/abravalheri/validate-pyproject - rev: v0.12.2 + rev: v0.13 hooks: - id: validate-pyproject diff --git a/DIRECTORY.md b/DIRECTORY.md index 82791cde183d..3181a93f393d 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -577,6 +577,7 @@ * [Hexagonal Number](maths/hexagonal_number.py) * [Integration By Simpson Approx](maths/integration_by_simpson_approx.py) * [Is Ip V4 Address Valid](maths/is_ip_v4_address_valid.py) + * [Is Palindrome](maths/is_palindrome.py) * [Is Square Free](maths/is_square_free.py) * [Jaccard Similarity](maths/jaccard_similarity.py) * [Juggler Sequence](maths/juggler_sequence.py) From df88771905e68c0639069a92144d6b7af1d491ce Mon Sep 17 00:00:00 2001 From: Caeden Perelli-Harris Date: Thu, 25 May 2023 06:59:15 +0100 Subject: [PATCH 0844/1543] Mark fetch anime and play as broken (#8763) * updating DIRECTORY.md * updating DIRECTORY.md * fix: Correct ruff errors * fix: Mark anime algorithm as broken * updating DIRECTORY.md --------- Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- DIRECTORY.md | 1 - .../{fetch_anime_and_play.py => fetch_anime_and_play.py.BROKEN} | 0 2 files changed, 1 deletion(-) rename web_programming/{fetch_anime_and_play.py => fetch_anime_and_play.py.BROKEN} (100%) diff --git a/DIRECTORY.md b/DIRECTORY.md index 3181a93f393d..71bdf30b2ddb 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -1199,7 +1199,6 @@ * [Daily Horoscope](web_programming/daily_horoscope.py) * [Download Images From Google Query](web_programming/download_images_from_google_query.py) * [Emails From Url](web_programming/emails_from_url.py) - * [Fetch Anime And Play](web_programming/fetch_anime_and_play.py) * [Fetch Bbc News](web_programming/fetch_bbc_news.py) * [Fetch Github Info](web_programming/fetch_github_info.py) * [Fetch Jobs](web_programming/fetch_jobs.py) diff --git a/web_programming/fetch_anime_and_play.py b/web_programming/fetch_anime_and_play.py.BROKEN similarity index 100% rename from web_programming/fetch_anime_and_play.py rename to web_programming/fetch_anime_and_play.py.BROKEN From 200429fc4739c3757180635016614b984cfd2206 Mon Sep 17 00:00:00 2001 From: Chris O <46587501+ChrisO345@users.noreply.github.com> Date: Thu, 25 May 2023 18:04:42 +1200 Subject: [PATCH 0845/1543] Dual Number Automatic Differentiation (#8760) * Added dual_number_automatic_differentiation.py * updating DIRECTORY.md * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update maths/dual_number_automatic_differentiation.py --------- Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- DIRECTORY.md | 1 + .../dual_number_automatic_differentiation.py | 141 ++++++++++++++++++ 2 files changed, 142 insertions(+) create mode 100644 maths/dual_number_automatic_differentiation.py diff --git a/DIRECTORY.md b/DIRECTORY.md index 71bdf30b2ddb..a75723369b06 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -549,6 +549,7 @@ * [Dodecahedron](maths/dodecahedron.py) * [Double Factorial Iterative](maths/double_factorial_iterative.py) * [Double Factorial Recursive](maths/double_factorial_recursive.py) + * [Dual Number Automatic Differentiation](maths/dual_number_automatic_differentiation.py) * [Entropy](maths/entropy.py) * [Euclidean Distance](maths/euclidean_distance.py) * [Euclidean Gcd](maths/euclidean_gcd.py) diff --git a/maths/dual_number_automatic_differentiation.py b/maths/dual_number_automatic_differentiation.py new file mode 100644 index 000000000000..9aa75830c4a1 --- /dev/null +++ b/maths/dual_number_automatic_differentiation.py @@ -0,0 +1,141 @@ +from math import factorial + +""" +https://en.wikipedia.org/wiki/Automatic_differentiation#Automatic_differentiation_using_dual_numbers +https://blog.jliszka.org/2013/10/24/exact-numeric-nth-derivatives.html + +Note this only works for basic functions, f(x) where the power of x is positive. +""" + + +class Dual: + def __init__(self, real, rank): + self.real = real + if isinstance(rank, int): + self.duals = [1] * rank + else: + self.duals = rank + + def __repr__(self): + return ( + f"{self.real}+" + f"{'+'.join(str(dual)+'E'+str(n+1)for n,dual in enumerate(self.duals))}" + ) + + def reduce(self): + cur = self.duals.copy() + while cur[-1] == 0: + cur.pop(-1) + return Dual(self.real, cur) + + def __add__(self, other): + if not isinstance(other, Dual): + return Dual(self.real + other, self.duals) + s_dual = self.duals.copy() + o_dual = other.duals.copy() + if len(s_dual) > len(o_dual): + o_dual.extend([1] * (len(s_dual) - len(o_dual))) + elif len(s_dual) < len(o_dual): + s_dual.extend([1] * (len(o_dual) - len(s_dual))) + new_duals = [] + for i in range(len(s_dual)): + new_duals.append(s_dual[i] + o_dual[i]) + return Dual(self.real + other.real, new_duals) + + __radd__ = __add__ + + def __sub__(self, other): + return self + other * -1 + + def __mul__(self, other): + if not isinstance(other, Dual): + new_duals = [] + for i in self.duals: + new_duals.append(i * other) + return Dual(self.real * other, new_duals) + new_duals = [0] * (len(self.duals) + len(other.duals) + 1) + for i, item in enumerate(self.duals): + for j, jtem in enumerate(other.duals): + new_duals[i + j + 1] += item * jtem + for k in range(len(self.duals)): + new_duals[k] += self.duals[k] * other.real + for index in range(len(other.duals)): + new_duals[index] += other.duals[index] * self.real + return Dual(self.real * other.real, new_duals) + + __rmul__ = __mul__ + + def __truediv__(self, other): + if not isinstance(other, Dual): + new_duals = [] + for i in self.duals: + new_duals.append(i / other) + return Dual(self.real / other, new_duals) + raise ValueError() + + def __floordiv__(self, other): + if not isinstance(other, Dual): + new_duals = [] + for i in self.duals: + new_duals.append(i // other) + return Dual(self.real // other, new_duals) + raise ValueError() + + def __pow__(self, n): + if n < 0 or isinstance(n, float): + raise ValueError("power must be a positive integer") + if n == 0: + return 1 + if n == 1: + return self + x = self + for _ in range(n - 1): + x *= self + return x + + +def differentiate(func, position, order): + """ + >>> differentiate(lambda x: x**2, 2, 2) + 2 + >>> differentiate(lambda x: x**2 * x**4, 9, 2) + 196830 + >>> differentiate(lambda y: 0.5 * (y + 3) ** 6, 3.5, 4) + 7605.0 + >>> differentiate(lambda y: y ** 2, 4, 3) + 0 + >>> differentiate(8, 8, 8) + Traceback (most recent call last): + ... + ValueError: differentiate() requires a function as input for func + >>> differentiate(lambda x: x **2, "", 1) + Traceback (most recent call last): + ... + ValueError: differentiate() requires a float as input for position + >>> differentiate(lambda x: x**2, 3, "") + Traceback (most recent call last): + ... + ValueError: differentiate() requires an int as input for order + """ + if not callable(func): + raise ValueError("differentiate() requires a function as input for func") + if not isinstance(position, (float, int)): + raise ValueError("differentiate() requires a float as input for position") + if not isinstance(order, int): + raise ValueError("differentiate() requires an int as input for order") + d = Dual(position, 1) + result = func(d) + if order == 0: + return result.real + return result.duals[order - 1] * factorial(order) + + +if __name__ == "__main__": + import doctest + + doctest.testmod() + + def f(y): + return y**2 * y**4 + + print(differentiate(f, 9, 2)) From a6631487b0a9d6a310d8c45d211e8b7b7bd93cab Mon Sep 17 00:00:00 2001 From: Ratnesh Kumar <89133941+ratneshrt@users.noreply.github.com> Date: Thu, 25 May 2023 16:04:11 +0530 Subject: [PATCH 0846/1543] Fix CI badge in the README.md (#8137) From cfbbfd9896cc96379f7374a68ff04b245bb3527c Mon Sep 17 00:00:00 2001 From: Caeden Perelli-Harris Date: Thu, 25 May 2023 11:56:23 +0100 Subject: [PATCH 0847/1543] Merge and add benchmarks to palindrome algorithms in the strings/ directory (#8749) * refactor: Merge and add benchmarks to palindrome * updating DIRECTORY.md * chore: Fix failing tests * Update strings/palindrome.py Co-authored-by: Christian Clauss * Update palindrome.py --------- Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> Co-authored-by: Christian Clauss --- DIRECTORY.md | 1 - strings/is_palindrome.py | 41 ---------------------------------------- strings/palindrome.py | 40 ++++++++++++++++++++++++++++++++++++++- 3 files changed, 39 insertions(+), 43 deletions(-) delete mode 100644 strings/is_palindrome.py diff --git a/DIRECTORY.md b/DIRECTORY.md index a75723369b06..fe4baac863d0 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -1156,7 +1156,6 @@ * [Indian Phone Validator](strings/indian_phone_validator.py) * [Is Contains Unique Chars](strings/is_contains_unique_chars.py) * [Is Isogram](strings/is_isogram.py) - * [Is Palindrome](strings/is_palindrome.py) * [Is Pangram](strings/is_pangram.py) * [Is Spain National Id](strings/is_spain_national_id.py) * [Is Srilankan Phone Number](strings/is_srilankan_phone_number.py) diff --git a/strings/is_palindrome.py b/strings/is_palindrome.py deleted file mode 100644 index 406aa2e8d3c3..000000000000 --- a/strings/is_palindrome.py +++ /dev/null @@ -1,41 +0,0 @@ -def is_palindrome(s: str) -> bool: - """ - Determine if the string s is a palindrome. - - >>> is_palindrome("A man, A plan, A canal -- Panama!") - True - >>> is_palindrome("Hello") - False - >>> is_palindrome("Able was I ere I saw Elba") - True - >>> is_palindrome("racecar") - True - >>> is_palindrome("Mr. Owl ate my metal worm?") - True - """ - # Since punctuation, capitalization, and spaces are often ignored while checking - # palindromes, we first remove them from our string. - s = "".join(character for character in s.lower() if character.isalnum()) - # return s == s[::-1] the slicing method - # uses extra spaces we can - # better with iteration method. - - end = len(s) // 2 - n = len(s) - - # We need to traverse till half of the length of string - # as we can get access of the i'th last element from - # i'th index. - # eg: [0,1,2,3,4,5] => 4th index can be accessed - # with the help of 1st index (i==n-i-1) - # where n is length of string - - return all(s[i] == s[n - i - 1] for i in range(end)) - - -if __name__ == "__main__": - s = input("Please enter a string to see if it is a palindrome: ") - if is_palindrome(s): - print(f"'{s}' is a palindrome.") - else: - print(f"'{s}' is not a palindrome.") diff --git a/strings/palindrome.py b/strings/palindrome.py index dd1fe316f479..bfdb3ddcf396 100644 --- a/strings/palindrome.py +++ b/strings/palindrome.py @@ -1,5 +1,7 @@ # Algorithms to determine if a string is palindrome +from timeit import timeit + test_data = { "MALAYALAM": True, "String": False, @@ -33,6 +35,25 @@ def is_palindrome(s: str) -> bool: return True +def is_palindrome_traversal(s: str) -> bool: + """ + Return True if s is a palindrome otherwise return False. + + >>> all(is_palindrome_traversal(key) is value for key, value in test_data.items()) + True + """ + end = len(s) // 2 + n = len(s) + + # We need to traverse till half of the length of string + # as we can get access of the i'th last element from + # i'th index. + # eg: [0,1,2,3,4,5] => 4th index can be accessed + # with the help of 1st index (i==n-i-1) + # where n is length of string + return all(s[i] == s[n - i - 1] for i in range(end)) + + def is_palindrome_recursive(s: str) -> bool: """ Return True if s is a palindrome otherwise return False. @@ -40,7 +61,7 @@ def is_palindrome_recursive(s: str) -> bool: >>> all(is_palindrome_recursive(key) is value for key, value in test_data.items()) True """ - if len(s) <= 1: + if len(s) <= 2: return True if s[0] == s[len(s) - 1]: return is_palindrome_recursive(s[1:-1]) @@ -58,9 +79,26 @@ def is_palindrome_slice(s: str) -> bool: return s == s[::-1] +def benchmark_function(name: str) -> None: + stmt = f"all({name}(key) is value for key, value in test_data.items())" + setup = f"from __main__ import test_data, {name}" + number = 500000 + result = timeit(stmt=stmt, setup=setup, number=number) + print(f"{name:<35} finished {number:,} runs in {result:.5f} seconds") + + if __name__ == "__main__": for key, value in test_data.items(): assert is_palindrome(key) is is_palindrome_recursive(key) assert is_palindrome(key) is is_palindrome_slice(key) print(f"{key:21} {value}") print("a man a plan a canal panama") + + # finished 500,000 runs in 0.46793 seconds + benchmark_function("is_palindrome_slice") + # finished 500,000 runs in 0.85234 seconds + benchmark_function("is_palindrome") + # finished 500,000 runs in 1.32028 seconds + benchmark_function("is_palindrome_recursive") + # finished 500,000 runs in 2.08679 seconds + benchmark_function("is_palindrome_traversal") From a17791d022bdc942c8badabc52307c354069a7ae Mon Sep 17 00:00:00 2001 From: Juyoung Kim <61103343+JadeKim042386@users.noreply.github.com> Date: Thu, 25 May 2023 21:54:18 +0900 Subject: [PATCH 0848/1543] fix: graphs/greedy_best_first typo (#8766) #8764 --- graphs/greedy_best_first.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/graphs/greedy_best_first.py b/graphs/greedy_best_first.py index d49e65b9d814..35f7ca9feeef 100644 --- a/graphs/greedy_best_first.py +++ b/graphs/greedy_best_first.py @@ -58,8 +58,8 @@ def calculate_heuristic(self) -> float: The heuristic here is the Manhattan Distance Could elaborate to offer more than one choice """ - dy = abs(self.pos_x - self.goal_x) - dx = abs(self.pos_y - self.goal_y) + dx = abs(self.pos_x - self.goal_x) + dy = abs(self.pos_y - self.goal_y) return dx + dy def __lt__(self, other) -> bool: From dd3b499bfa972507759d0705b77e2e1946f42596 Mon Sep 17 00:00:00 2001 From: Christian Clauss Date: Fri, 26 May 2023 08:50:33 +0200 Subject: [PATCH 0849/1543] Rename is_palindrome.py to is_int_palindrome.py (#8768) * Rename is_palindrome.py to is_int_palindrome.py * updating DIRECTORY.md --------- Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- DIRECTORY.md | 2 +- maths/{is_palindrome.py => is_int_palindrome.py} | 14 +++++++------- 2 files changed, 8 insertions(+), 8 deletions(-) rename maths/{is_palindrome.py => is_int_palindrome.py} (67%) diff --git a/DIRECTORY.md b/DIRECTORY.md index fe4baac863d0..11ff93c91430 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -577,8 +577,8 @@ * [Hardy Ramanujanalgo](maths/hardy_ramanujanalgo.py) * [Hexagonal Number](maths/hexagonal_number.py) * [Integration By Simpson Approx](maths/integration_by_simpson_approx.py) + * [Is Int Palindrome](maths/is_int_palindrome.py) * [Is Ip V4 Address Valid](maths/is_ip_v4_address_valid.py) - * [Is Palindrome](maths/is_palindrome.py) * [Is Square Free](maths/is_square_free.py) * [Jaccard Similarity](maths/jaccard_similarity.py) * [Juggler Sequence](maths/juggler_sequence.py) diff --git a/maths/is_palindrome.py b/maths/is_int_palindrome.py similarity index 67% rename from maths/is_palindrome.py rename to maths/is_int_palindrome.py index ba60573ab022..63dc9e2138e8 100644 --- a/maths/is_palindrome.py +++ b/maths/is_int_palindrome.py @@ -1,19 +1,19 @@ -def is_palindrome(num: int) -> bool: +def is_int_palindrome(num: int) -> bool: """ Returns whether `num` is a palindrome or not (see for reference https://en.wikipedia.org/wiki/Palindromic_number). - >>> is_palindrome(-121) + >>> is_int_palindrome(-121) False - >>> is_palindrome(0) + >>> is_int_palindrome(0) True - >>> is_palindrome(10) + >>> is_int_palindrome(10) False - >>> is_palindrome(11) + >>> is_int_palindrome(11) True - >>> is_palindrome(101) + >>> is_int_palindrome(101) True - >>> is_palindrome(120) + >>> is_int_palindrome(120) False """ if num < 0: From 4b79d771cd81a820c195e62430100c416a1618ea Mon Sep 17 00:00:00 2001 From: Christian Clauss Date: Fri, 26 May 2023 09:34:17 +0200 Subject: [PATCH 0850/1543] Add more ruff rules (#8767) * Add more ruff rules * Add more ruff rules * pre-commit: Update ruff v0.0.269 -> v0.0.270 * Apply suggestions from code review * Fix doctest * Fix doctest (ignore whitespace) * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: Dhruv Manilawala Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 2 +- .../jacobi_iteration_method.py | 30 ++-- arithmetic_analysis/lu_decomposition.py | 5 +- audio_filters/iir_filter.py | 14 +- backtracking/knight_tour.py | 3 +- bit_manipulation/reverse_bits.py | 3 +- ciphers/base64.py | 12 +- ciphers/beaufort_cipher.py | 2 +- ciphers/cryptomath_module.py | 3 +- ciphers/enigma_machine2.py | 30 ++-- ciphers/hill_cipher.py | 7 +- .../astronomical_length_scale_conversion.py | 6 +- conversions/length_conversion.py | 6 +- conversions/speed_conversions.py | 3 +- conversions/weight_conversion.py | 3 +- .../binary_search_tree_recursive.py | 6 +- .../binary_tree/binary_tree_mirror.py | 3 +- data_structures/disjoint_set/disjoint_set.py | 3 +- .../linked_list/circular_linked_list.py | 8 +- .../linked_list/doubly_linked_list.py | 4 +- .../linked_list/singly_linked_list.py | 4 +- data_structures/stacks/stack.py | 6 +- digital_image_processing/dithering/burkes.py | 3 +- divide_and_conquer/convex_hull.py | 8 +- dynamic_programming/knapsack.py | 15 +- dynamic_programming/minimum_steps_to_one.py | 3 +- dynamic_programming/rod_cutting.py | 10 +- dynamic_programming/viterbi.py | 17 ++- electronics/resistor_equivalence.py | 6 +- genetic_algorithm/basic_string.py | 8 +- graphics/vector3_for_2d_rendering.py | 8 +- graphs/breadth_first_search_shortest_path.py | 3 +- linear_algebra/src/schur_complement.py | 14 +- machine_learning/similarity_search.py | 21 +-- machine_learning/support_vector_machines.py | 3 +- maths/3n_plus_1.py | 6 +- maths/automorphic_number.py | 3 +- maths/catalan_number.py | 6 +- .../dual_number_automatic_differentiation.py | 4 +- maths/hexagonal_number.py | 3 +- maths/juggler_sequence.py | 6 +- maths/liouville_lambda.py | 3 +- maths/manhattan_distance.py | 18 +-- maths/pronic_number.py | 3 +- maths/proth_number.py | 6 +- maths/radix2_fft.py | 2 +- maths/sieve_of_eratosthenes.py | 3 +- maths/sylvester_sequence.py | 3 +- maths/twin_prime.py | 3 +- matrix/matrix_operation.py | 12 +- matrix/sherman_morrison.py | 3 +- neural_network/input_data.py | 12 +- other/nested_brackets.py | 2 +- other/scoring_algorithm.py | 3 +- project_euler/problem_054/sol1.py | 6 +- project_euler/problem_068/sol1.py | 3 +- project_euler/problem_131/sol1.py | 5 +- pyproject.toml | 139 +++++++++++++----- scripts/build_directory_md.py | 2 +- sorts/dutch_national_flag_sort.py | 5 +- strings/barcode_validator.py | 3 +- strings/capitalize.py | 2 +- strings/is_spain_national_id.py | 3 +- strings/snake_case_to_camel_pascal_case.py | 8 +- web_programming/reddit.py | 3 +- web_programming/search_books_by_isbn.py | 3 +- web_programming/slack_message.py | 7 +- 67 files changed, 349 insertions(+), 223 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index bd5bca8f05ab..4c70ae219f74 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -16,7 +16,7 @@ repos: - id: auto-walrus - repo: https://github.com/charliermarsh/ruff-pre-commit - rev: v0.0.269 + rev: v0.0.270 hooks: - id: ruff diff --git a/arithmetic_analysis/jacobi_iteration_method.py b/arithmetic_analysis/jacobi_iteration_method.py index fe506a94a65d..17edf4bf4b8b 100644 --- a/arithmetic_analysis/jacobi_iteration_method.py +++ b/arithmetic_analysis/jacobi_iteration_method.py @@ -49,7 +49,9 @@ def jacobi_iteration_method( >>> constant = np.array([[2], [-6]]) >>> init_val = [0.5, -0.5, -0.5] >>> iterations = 3 - >>> jacobi_iteration_method(coefficient, constant, init_val, iterations) + >>> jacobi_iteration_method( + ... coefficient, constant, init_val, iterations + ... ) # doctest: +NORMALIZE_WHITESPACE Traceback (most recent call last): ... ValueError: Coefficient and constant matrices dimensions must be nxn and nx1 but @@ -59,7 +61,9 @@ def jacobi_iteration_method( >>> constant = np.array([[2], [-6], [-4]]) >>> init_val = [0.5, -0.5] >>> iterations = 3 - >>> jacobi_iteration_method(coefficient, constant, init_val, iterations) + >>> jacobi_iteration_method( + ... coefficient, constant, init_val, iterations + ... ) # doctest: +NORMALIZE_WHITESPACE Traceback (most recent call last): ... ValueError: Number of initial values must be equal to number of rows in coefficient @@ -79,24 +83,26 @@ def jacobi_iteration_method( rows2, cols2 = constant_matrix.shape if rows1 != cols1: - raise ValueError( - f"Coefficient matrix dimensions must be nxn but received {rows1}x{cols1}" - ) + msg = f"Coefficient matrix dimensions must be nxn but received {rows1}x{cols1}" + raise ValueError(msg) if cols2 != 1: - raise ValueError(f"Constant matrix must be nx1 but received {rows2}x{cols2}") + msg = f"Constant matrix must be nx1 but received {rows2}x{cols2}" + raise ValueError(msg) if rows1 != rows2: - raise ValueError( - f"""Coefficient and constant matrices dimensions must be nxn and nx1 but - received {rows1}x{cols1} and {rows2}x{cols2}""" + msg = ( + "Coefficient and constant matrices dimensions must be nxn and nx1 but " + f"received {rows1}x{cols1} and {rows2}x{cols2}" ) + raise ValueError(msg) if len(init_val) != rows1: - raise ValueError( - f"""Number of initial values must be equal to number of rows in coefficient - matrix but received {len(init_val)} and {rows1}""" + msg = ( + "Number of initial values must be equal to number of rows in coefficient " + f"matrix but received {len(init_val)} and {rows1}" ) + raise ValueError(msg) if iterations <= 0: raise ValueError("Iterations must be at least 1") diff --git a/arithmetic_analysis/lu_decomposition.py b/arithmetic_analysis/lu_decomposition.py index 941c1dadf556..eaabce5449c5 100644 --- a/arithmetic_analysis/lu_decomposition.py +++ b/arithmetic_analysis/lu_decomposition.py @@ -80,10 +80,11 @@ def lower_upper_decomposition(table: np.ndarray) -> tuple[np.ndarray, np.ndarray # Ensure that table is a square array rows, columns = np.shape(table) if rows != columns: - raise ValueError( - f"'table' has to be of square shaped array but got a " + msg = ( + "'table' has to be of square shaped array but got a " f"{rows}x{columns} array:\n{table}" ) + raise ValueError(msg) lower = np.zeros((rows, columns)) upper = np.zeros((rows, columns)) diff --git a/audio_filters/iir_filter.py b/audio_filters/iir_filter.py index bd448175f6f3..f3c1ad43b001 100644 --- a/audio_filters/iir_filter.py +++ b/audio_filters/iir_filter.py @@ -50,16 +50,18 @@ def set_coefficients(self, a_coeffs: list[float], b_coeffs: list[float]) -> None a_coeffs = [1.0, *a_coeffs] if len(a_coeffs) != self.order + 1: - raise ValueError( - f"Expected a_coeffs to have {self.order + 1} elements for {self.order}" - f"-order filter, got {len(a_coeffs)}" + msg = ( + f"Expected a_coeffs to have {self.order + 1} elements " + f"for {self.order}-order filter, got {len(a_coeffs)}" ) + raise ValueError(msg) if len(b_coeffs) != self.order + 1: - raise ValueError( - f"Expected b_coeffs to have {self.order + 1} elements for {self.order}" - f"-order filter, got {len(a_coeffs)}" + msg = ( + f"Expected b_coeffs to have {self.order + 1} elements " + f"for {self.order}-order filter, got {len(a_coeffs)}" ) + raise ValueError(msg) self.a_coeffs = a_coeffs self.b_coeffs = b_coeffs diff --git a/backtracking/knight_tour.py b/backtracking/knight_tour.py index bb650ece3f5e..cc88307b7fe8 100644 --- a/backtracking/knight_tour.py +++ b/backtracking/knight_tour.py @@ -91,7 +91,8 @@ def open_knight_tour(n: int) -> list[list[int]]: return board board[i][j] = 0 - raise ValueError(f"Open Kight Tour cannot be performed on a board of size {n}") + msg = f"Open Kight Tour cannot be performed on a board of size {n}" + raise ValueError(msg) if __name__ == "__main__": diff --git a/bit_manipulation/reverse_bits.py b/bit_manipulation/reverse_bits.py index 55608ae12908..a8c77c11bfdd 100644 --- a/bit_manipulation/reverse_bits.py +++ b/bit_manipulation/reverse_bits.py @@ -14,10 +14,11 @@ def get_reverse_bit_string(number: int) -> str: TypeError: operation can not be conducted on a object of type str """ if not isinstance(number, int): - raise TypeError( + msg = ( "operation can not be conducted on a object of type " f"{type(number).__name__}" ) + raise TypeError(msg) bit_string = "" for _ in range(0, 32): bit_string += str(number % 2) diff --git a/ciphers/base64.py b/ciphers/base64.py index 38a952acc307..2b950b1be37d 100644 --- a/ciphers/base64.py +++ b/ciphers/base64.py @@ -34,9 +34,8 @@ def base64_encode(data: bytes) -> bytes: """ # Make sure the supplied data is a bytes-like object if not isinstance(data, bytes): - raise TypeError( - f"a bytes-like object is required, not '{data.__class__.__name__}'" - ) + msg = f"a bytes-like object is required, not '{data.__class__.__name__}'" + raise TypeError(msg) binary_stream = "".join(bin(byte)[2:].zfill(8) for byte in data) @@ -88,10 +87,11 @@ def base64_decode(encoded_data: str) -> bytes: """ # Make sure encoded_data is either a string or a bytes-like object if not isinstance(encoded_data, bytes) and not isinstance(encoded_data, str): - raise TypeError( - "argument should be a bytes-like object or ASCII string, not " - f"'{encoded_data.__class__.__name__}'" + msg = ( + "argument should be a bytes-like object or ASCII string, " + f"not '{encoded_data.__class__.__name__}'" ) + raise TypeError(msg) # In case encoded_data is a bytes-like object, make sure it contains only # ASCII characters so we convert it to a string object diff --git a/ciphers/beaufort_cipher.py b/ciphers/beaufort_cipher.py index 8eae847a7ff7..788fc72b89c3 100644 --- a/ciphers/beaufort_cipher.py +++ b/ciphers/beaufort_cipher.py @@ -5,7 +5,7 @@ from string import ascii_uppercase dict1 = {char: i for i, char in enumerate(ascii_uppercase)} -dict2 = {i: char for i, char in enumerate(ascii_uppercase)} +dict2 = dict(enumerate(ascii_uppercase)) # This function generates the key in diff --git a/ciphers/cryptomath_module.py b/ciphers/cryptomath_module.py index be8764ff38c3..6f15f7b733e6 100644 --- a/ciphers/cryptomath_module.py +++ b/ciphers/cryptomath_module.py @@ -6,7 +6,8 @@ def gcd(a: int, b: int) -> int: def find_mod_inverse(a: int, m: int) -> int: if gcd(a, m) != 1: - raise ValueError(f"mod inverse of {a!r} and {m!r} does not exist") + msg = f"mod inverse of {a!r} and {m!r} does not exist" + raise ValueError(msg) u1, u2, u3 = 1, 0, a v1, v2, v3 = 0, 1, m while v3 != 0: diff --git a/ciphers/enigma_machine2.py b/ciphers/enigma_machine2.py index 07d21893f192..ec0d44e4a6c6 100644 --- a/ciphers/enigma_machine2.py +++ b/ciphers/enigma_machine2.py @@ -87,22 +87,20 @@ def _validator( # Checks if there are 3 unique rotors if (unique_rotsel := len(set(rotsel))) < 3: - raise Exception(f"Please use 3 unique rotors (not {unique_rotsel})") + msg = f"Please use 3 unique rotors (not {unique_rotsel})" + raise Exception(msg) # Checks if rotor positions are valid rotorpos1, rotorpos2, rotorpos3 = rotpos if not 0 < rotorpos1 <= len(abc): - raise ValueError( - "First rotor position is not within range of 1..26 (" f"{rotorpos1}" - ) + msg = f"First rotor position is not within range of 1..26 ({rotorpos1}" + raise ValueError(msg) if not 0 < rotorpos2 <= len(abc): - raise ValueError( - "Second rotor position is not within range of 1..26 (" f"{rotorpos2})" - ) + msg = f"Second rotor position is not within range of 1..26 ({rotorpos2})" + raise ValueError(msg) if not 0 < rotorpos3 <= len(abc): - raise ValueError( - "Third rotor position is not within range of 1..26 (" f"{rotorpos3})" - ) + msg = f"Third rotor position is not within range of 1..26 ({rotorpos3})" + raise ValueError(msg) # Validates string and returns dict pbdict = _plugboard(pb) @@ -130,9 +128,11 @@ def _plugboard(pbstring: str) -> dict[str, str]: # a) is type string # b) has even length (so pairs can be made) if not isinstance(pbstring, str): - raise TypeError(f"Plugboard setting isn't type string ({type(pbstring)})") + msg = f"Plugboard setting isn't type string ({type(pbstring)})" + raise TypeError(msg) elif len(pbstring) % 2 != 0: - raise Exception(f"Odd number of symbols ({len(pbstring)})") + msg = f"Odd number of symbols ({len(pbstring)})" + raise Exception(msg) elif pbstring == "": return {} @@ -142,9 +142,11 @@ def _plugboard(pbstring: str) -> dict[str, str]: tmppbl = set() for i in pbstring: if i not in abc: - raise Exception(f"'{i}' not in list of symbols") + msg = f"'{i}' not in list of symbols" + raise Exception(msg) elif i in tmppbl: - raise Exception(f"Duplicate symbol ({i})") + msg = f"Duplicate symbol ({i})" + raise Exception(msg) else: tmppbl.add(i) del tmppbl diff --git a/ciphers/hill_cipher.py b/ciphers/hill_cipher.py index f646d567b4c8..b4424e82298e 100644 --- a/ciphers/hill_cipher.py +++ b/ciphers/hill_cipher.py @@ -104,10 +104,11 @@ def check_determinant(self) -> None: req_l = len(self.key_string) if greatest_common_divisor(det, len(self.key_string)) != 1: - raise ValueError( - f"determinant modular {req_l} of encryption key({det}) is not co prime " - f"w.r.t {req_l}.\nTry another key." + msg = ( + f"determinant modular {req_l} of encryption key({det}) " + f"is not co prime w.r.t {req_l}.\nTry another key." ) + raise ValueError(msg) def process_text(self, text: str) -> str: """ diff --git a/conversions/astronomical_length_scale_conversion.py b/conversions/astronomical_length_scale_conversion.py index 804d82487a25..0f413644906d 100644 --- a/conversions/astronomical_length_scale_conversion.py +++ b/conversions/astronomical_length_scale_conversion.py @@ -77,15 +77,17 @@ def length_conversion(value: float, from_type: str, to_type: str) -> float: to_sanitized = UNIT_SYMBOL.get(to_sanitized, to_sanitized) if from_sanitized not in METRIC_CONVERSION: - raise ValueError( + msg = ( f"Invalid 'from_type' value: {from_type!r}.\n" f"Conversion abbreviations are: {', '.join(METRIC_CONVERSION)}" ) + raise ValueError(msg) if to_sanitized not in METRIC_CONVERSION: - raise ValueError( + msg = ( f"Invalid 'to_type' value: {to_type!r}.\n" f"Conversion abbreviations are: {', '.join(METRIC_CONVERSION)}" ) + raise ValueError(msg) from_exponent = METRIC_CONVERSION[from_sanitized] to_exponent = METRIC_CONVERSION[to_sanitized] exponent = 1 diff --git a/conversions/length_conversion.py b/conversions/length_conversion.py index 790d9c116845..d8f39515255e 100644 --- a/conversions/length_conversion.py +++ b/conversions/length_conversion.py @@ -104,15 +104,17 @@ def length_conversion(value: float, from_type: str, to_type: str) -> float: new_to = to_type.lower().rstrip("s") new_to = TYPE_CONVERSION.get(new_to, new_to) if new_from not in METRIC_CONVERSION: - raise ValueError( + msg = ( f"Invalid 'from_type' value: {from_type!r}.\n" f"Conversion abbreviations are: {', '.join(METRIC_CONVERSION)}" ) + raise ValueError(msg) if new_to not in METRIC_CONVERSION: - raise ValueError( + msg = ( f"Invalid 'to_type' value: {to_type!r}.\n" f"Conversion abbreviations are: {', '.join(METRIC_CONVERSION)}" ) + raise ValueError(msg) return value * METRIC_CONVERSION[new_from].from_ * METRIC_CONVERSION[new_to].to diff --git a/conversions/speed_conversions.py b/conversions/speed_conversions.py index 62da9e137bc7..ba497119d3f5 100644 --- a/conversions/speed_conversions.py +++ b/conversions/speed_conversions.py @@ -57,10 +57,11 @@ def convert_speed(speed: float, unit_from: str, unit_to: str) -> float: 115.078 """ if unit_to not in speed_chart or unit_from not in speed_chart_inverse: - raise ValueError( + msg = ( f"Incorrect 'from_type' or 'to_type' value: {unit_from!r}, {unit_to!r}\n" f"Valid values are: {', '.join(speed_chart_inverse)}" ) + raise ValueError(msg) return round(speed * speed_chart[unit_from] * speed_chart_inverse[unit_to], 3) diff --git a/conversions/weight_conversion.py b/conversions/weight_conversion.py index 5c032a497a7b..e8326e0b688f 100644 --- a/conversions/weight_conversion.py +++ b/conversions/weight_conversion.py @@ -299,10 +299,11 @@ def weight_conversion(from_type: str, to_type: str, value: float) -> float: 1.999999998903455 """ if to_type not in KILOGRAM_CHART or from_type not in WEIGHT_TYPE_CHART: - raise ValueError( + msg = ( f"Invalid 'from_type' or 'to_type' value: {from_type!r}, {to_type!r}\n" f"Supported values are: {', '.join(WEIGHT_TYPE_CHART)}" ) + raise ValueError(msg) return value * KILOGRAM_CHART[to_type] * WEIGHT_TYPE_CHART[from_type] diff --git a/data_structures/binary_tree/binary_search_tree_recursive.py b/data_structures/binary_tree/binary_search_tree_recursive.py index 97eb8e25bedd..b5b983b9ba4c 100644 --- a/data_structures/binary_tree/binary_search_tree_recursive.py +++ b/data_structures/binary_tree/binary_search_tree_recursive.py @@ -77,7 +77,8 @@ def _put(self, node: Node | None, label: int, parent: Node | None = None) -> Nod elif label > node.label: node.right = self._put(node.right, label, node) else: - raise Exception(f"Node with label {label} already exists") + msg = f"Node with label {label} already exists" + raise Exception(msg) return node @@ -100,7 +101,8 @@ def search(self, label: int) -> Node: def _search(self, node: Node | None, label: int) -> Node: if node is None: - raise Exception(f"Node with label {label} does not exist") + msg = f"Node with label {label} does not exist" + raise Exception(msg) else: if label < node.label: node = self._search(node.left, label) diff --git a/data_structures/binary_tree/binary_tree_mirror.py b/data_structures/binary_tree/binary_tree_mirror.py index 1ef950ad62d7..b8548f4ec515 100644 --- a/data_structures/binary_tree/binary_tree_mirror.py +++ b/data_structures/binary_tree/binary_tree_mirror.py @@ -31,7 +31,8 @@ def binary_tree_mirror(binary_tree: dict, root: int = 1) -> dict: if not binary_tree: raise ValueError("binary tree cannot be empty") if root not in binary_tree: - raise ValueError(f"root {root} is not present in the binary_tree") + msg = f"root {root} is not present in the binary_tree" + raise ValueError(msg) binary_tree_mirror_dictionary = dict(binary_tree) binary_tree_mirror_dict(binary_tree_mirror_dictionary, root) return binary_tree_mirror_dictionary diff --git a/data_structures/disjoint_set/disjoint_set.py b/data_structures/disjoint_set/disjoint_set.py index f8500bf2c3af..12dafb2d935e 100644 --- a/data_structures/disjoint_set/disjoint_set.py +++ b/data_structures/disjoint_set/disjoint_set.py @@ -56,7 +56,8 @@ def find_python_set(node: Node) -> set: for s in sets: if node.data in s: return s - raise ValueError(f"{node.data} is not in {sets}") + msg = f"{node.data} is not in {sets}" + raise ValueError(msg) def test_disjoint_set() -> None: diff --git a/data_structures/linked_list/circular_linked_list.py b/data_structures/linked_list/circular_linked_list.py index 9092fb29e3ff..325d91026137 100644 --- a/data_structures/linked_list/circular_linked_list.py +++ b/data_structures/linked_list/circular_linked_list.py @@ -94,25 +94,25 @@ def test_circular_linked_list() -> None: try: circular_linked_list.delete_front() - raise AssertionError() # This should not happen + raise AssertionError # This should not happen except IndexError: assert True # This should happen try: circular_linked_list.delete_tail() - raise AssertionError() # This should not happen + raise AssertionError # This should not happen except IndexError: assert True # This should happen try: circular_linked_list.delete_nth(-1) - raise AssertionError() + raise AssertionError except IndexError: assert True try: circular_linked_list.delete_nth(0) - raise AssertionError() + raise AssertionError except IndexError: assert True diff --git a/data_structures/linked_list/doubly_linked_list.py b/data_structures/linked_list/doubly_linked_list.py index 69763d12da15..1a6c48191c4e 100644 --- a/data_structures/linked_list/doubly_linked_list.py +++ b/data_structures/linked_list/doubly_linked_list.py @@ -198,13 +198,13 @@ def test_doubly_linked_list() -> None: try: linked_list.delete_head() - raise AssertionError() # This should not happen. + raise AssertionError # This should not happen. except IndexError: assert True # This should happen. try: linked_list.delete_tail() - raise AssertionError() # This should not happen. + raise AssertionError # This should not happen. except IndexError: assert True # This should happen. diff --git a/data_structures/linked_list/singly_linked_list.py b/data_structures/linked_list/singly_linked_list.py index a8f9e8ebb977..890e21c9b404 100644 --- a/data_structures/linked_list/singly_linked_list.py +++ b/data_structures/linked_list/singly_linked_list.py @@ -353,13 +353,13 @@ def test_singly_linked_list() -> None: try: linked_list.delete_head() - raise AssertionError() # This should not happen. + raise AssertionError # This should not happen. except IndexError: assert True # This should happen. try: linked_list.delete_tail() - raise AssertionError() # This should not happen. + raise AssertionError # This should not happen. except IndexError: assert True # This should happen. diff --git a/data_structures/stacks/stack.py b/data_structures/stacks/stack.py index 55d424d5018b..a14f4648a399 100644 --- a/data_structures/stacks/stack.py +++ b/data_structures/stacks/stack.py @@ -92,13 +92,13 @@ def test_stack() -> None: try: _ = stack.pop() - raise AssertionError() # This should not happen + raise AssertionError # This should not happen except StackUnderflowError: assert True # This should happen try: _ = stack.peek() - raise AssertionError() # This should not happen + raise AssertionError # This should not happen except StackUnderflowError: assert True # This should happen @@ -118,7 +118,7 @@ def test_stack() -> None: try: stack.push(200) - raise AssertionError() # This should not happen + raise AssertionError # This should not happen except StackOverflowError: assert True # This should happen diff --git a/digital_image_processing/dithering/burkes.py b/digital_image_processing/dithering/burkes.py index 2bf0bbe03225..0804104abe58 100644 --- a/digital_image_processing/dithering/burkes.py +++ b/digital_image_processing/dithering/burkes.py @@ -21,7 +21,8 @@ def __init__(self, input_img, threshold: int): self.max_threshold = int(self.get_greyscale(255, 255, 255)) if not self.min_threshold < threshold < self.max_threshold: - raise ValueError(f"Factor value should be from 0 to {self.max_threshold}") + msg = f"Factor value should be from 0 to {self.max_threshold}" + raise ValueError(msg) self.input_img = input_img self.threshold = threshold diff --git a/divide_and_conquer/convex_hull.py b/divide_and_conquer/convex_hull.py index 39e78be04a71..1ad933417da6 100644 --- a/divide_and_conquer/convex_hull.py +++ b/divide_and_conquer/convex_hull.py @@ -174,12 +174,12 @@ def _validate_input(points: list[Point] | list[list[float]]) -> list[Point]: """ if not hasattr(points, "__iter__"): - raise ValueError( - f"Expecting an iterable object but got an non-iterable type {points}" - ) + msg = f"Expecting an iterable object but got an non-iterable type {points}" + raise ValueError(msg) if not points: - raise ValueError(f"Expecting a list of points but got {points}") + msg = f"Expecting a list of points but got {points}" + raise ValueError(msg) return _construct_points(points) diff --git a/dynamic_programming/knapsack.py b/dynamic_programming/knapsack.py index b12d30313e31..489b5ada450a 100644 --- a/dynamic_programming/knapsack.py +++ b/dynamic_programming/knapsack.py @@ -78,17 +78,18 @@ def knapsack_with_example_solution(w: int, wt: list, val: list): num_items = len(wt) if num_items != len(val): - raise ValueError( - "The number of weights must be the " - "same as the number of values.\nBut " - f"got {num_items} weights and {len(val)} values" + msg = ( + "The number of weights must be the same as the number of values.\n" + f"But got {num_items} weights and {len(val)} values" ) + raise ValueError(msg) for i in range(num_items): if not isinstance(wt[i], int): - raise TypeError( - "All weights must be integers but " - f"got weight of type {type(wt[i])} at index {i}" + msg = ( + "All weights must be integers but got weight of " + f"type {type(wt[i])} at index {i}" ) + raise TypeError(msg) optimal_val, dp_table = knapsack(w, wt, val, num_items) example_optional_set: set = set() diff --git a/dynamic_programming/minimum_steps_to_one.py b/dynamic_programming/minimum_steps_to_one.py index f4eb7033dd20..8785027fbff3 100644 --- a/dynamic_programming/minimum_steps_to_one.py +++ b/dynamic_programming/minimum_steps_to_one.py @@ -42,7 +42,8 @@ def min_steps_to_one(number: int) -> int: """ if number <= 0: - raise ValueError(f"n must be greater than 0. Got n = {number}") + msg = f"n must be greater than 0. Got n = {number}" + raise ValueError(msg) table = [number + 1] * (number + 1) diff --git a/dynamic_programming/rod_cutting.py b/dynamic_programming/rod_cutting.py index 79104d8f4044..f80fa440ae86 100644 --- a/dynamic_programming/rod_cutting.py +++ b/dynamic_programming/rod_cutting.py @@ -177,13 +177,15 @@ def _enforce_args(n: int, prices: list): the rod """ if n < 0: - raise ValueError(f"n must be greater than or equal to 0. Got n = {n}") + msg = f"n must be greater than or equal to 0. Got n = {n}" + raise ValueError(msg) if n > len(prices): - raise ValueError( - "Each integral piece of rod must have a corresponding " - f"price. Got n = {n} but length of prices = {len(prices)}" + msg = ( + "Each integral piece of rod must have a corresponding price. " + f"Got n = {n} but length of prices = {len(prices)}" ) + raise ValueError(msg) def main(): diff --git a/dynamic_programming/viterbi.py b/dynamic_programming/viterbi.py index 93ab845e2ae8..764d45dc2c05 100644 --- a/dynamic_programming/viterbi.py +++ b/dynamic_programming/viterbi.py @@ -297,11 +297,13 @@ def _validate_list(_object: Any, var_name: str) -> None: """ if not isinstance(_object, list): - raise ValueError(f"{var_name} must be a list") + msg = f"{var_name} must be a list" + raise ValueError(msg) else: for x in _object: if not isinstance(x, str): - raise ValueError(f"{var_name} must be a list of strings") + msg = f"{var_name} must be a list of strings" + raise ValueError(msg) def _validate_dicts( @@ -384,14 +386,15 @@ def _validate_dict( ValueError: mock_name nested dictionary all values must be float """ if not isinstance(_object, dict): - raise ValueError(f"{var_name} must be a dict") + msg = f"{var_name} must be a dict" + raise ValueError(msg) if not all(isinstance(x, str) for x in _object): - raise ValueError(f"{var_name} all keys must be strings") + msg = f"{var_name} all keys must be strings" + raise ValueError(msg) if not all(isinstance(x, value_type) for x in _object.values()): nested_text = "nested dictionary " if nested else "" - raise ValueError( - f"{var_name} {nested_text}all values must be {value_type.__name__}" - ) + msg = f"{var_name} {nested_text}all values must be {value_type.__name__}" + raise ValueError(msg) if __name__ == "__main__": diff --git a/electronics/resistor_equivalence.py b/electronics/resistor_equivalence.py index 7142f838a065..55e7f2d6b5d2 100644 --- a/electronics/resistor_equivalence.py +++ b/electronics/resistor_equivalence.py @@ -23,7 +23,8 @@ def resistor_parallel(resistors: list[float]) -> float: index = 0 for resistor in resistors: if resistor <= 0: - raise ValueError(f"Resistor at index {index} has a negative or zero value!") + msg = f"Resistor at index {index} has a negative or zero value!" + raise ValueError(msg) first_sum += 1 / float(resistor) index += 1 return 1 / first_sum @@ -47,7 +48,8 @@ def resistor_series(resistors: list[float]) -> float: for resistor in resistors: sum_r += resistor if resistor < 0: - raise ValueError(f"Resistor at index {index} has a negative value!") + msg = f"Resistor at index {index} has a negative value!" + raise ValueError(msg) index += 1 return sum_r diff --git a/genetic_algorithm/basic_string.py b/genetic_algorithm/basic_string.py index 388e7219f54b..089c5c99a1ec 100644 --- a/genetic_algorithm/basic_string.py +++ b/genetic_algorithm/basic_string.py @@ -96,13 +96,13 @@ def basic(target: str, genes: list[str], debug: bool = True) -> tuple[int, int, # Verify if N_POPULATION is bigger than N_SELECTED if N_POPULATION < N_SELECTED: - raise ValueError(f"{N_POPULATION} must be bigger than {N_SELECTED}") + msg = f"{N_POPULATION} must be bigger than {N_SELECTED}" + raise ValueError(msg) # Verify that the target contains no genes besides the ones inside genes variable. not_in_genes_list = sorted({c for c in target if c not in genes}) if not_in_genes_list: - raise ValueError( - f"{not_in_genes_list} is not in genes list, evolution cannot converge" - ) + msg = f"{not_in_genes_list} is not in genes list, evolution cannot converge" + raise ValueError(msg) # Generate random starting population. population = [] diff --git a/graphics/vector3_for_2d_rendering.py b/graphics/vector3_for_2d_rendering.py index dfa22262a8d8..a332206e67b6 100644 --- a/graphics/vector3_for_2d_rendering.py +++ b/graphics/vector3_for_2d_rendering.py @@ -28,9 +28,8 @@ def convert_to_2d( TypeError: Input values must either be float or int: ['1', 2, 3, 10, 10] """ if not all(isinstance(val, (float, int)) for val in locals().values()): - raise TypeError( - "Input values must either be float or int: " f"{list(locals().values())}" - ) + msg = f"Input values must either be float or int: {list(locals().values())}" + raise TypeError(msg) projected_x = ((x * distance) / (z + distance)) * scale projected_y = ((y * distance) / (z + distance)) * scale return projected_x, projected_y @@ -71,10 +70,11 @@ def rotate( input_variables = locals() del input_variables["axis"] if not all(isinstance(val, (float, int)) for val in input_variables.values()): - raise TypeError( + msg = ( "Input values except axis must either be float or int: " f"{list(input_variables.values())}" ) + raise TypeError(msg) angle = (angle % 360) / 450 * 180 / math.pi if axis == "z": new_x = x * math.cos(angle) - y * math.sin(angle) diff --git a/graphs/breadth_first_search_shortest_path.py b/graphs/breadth_first_search_shortest_path.py index cb21076f91d2..d489b110b3a7 100644 --- a/graphs/breadth_first_search_shortest_path.py +++ b/graphs/breadth_first_search_shortest_path.py @@ -73,9 +73,10 @@ def shortest_path(self, target_vertex: str) -> str: target_vertex_parent = self.parent.get(target_vertex) if target_vertex_parent is None: - raise ValueError( + msg = ( f"No path from vertex: {self.source_vertex} to vertex: {target_vertex}" ) + raise ValueError(msg) return self.shortest_path(target_vertex_parent) + f"->{target_vertex}" diff --git a/linear_algebra/src/schur_complement.py b/linear_algebra/src/schur_complement.py index 3a5f4443afd3..750f4de5e397 100644 --- a/linear_algebra/src/schur_complement.py +++ b/linear_algebra/src/schur_complement.py @@ -31,16 +31,18 @@ def schur_complement( shape_c = np.shape(mat_c) if shape_a[0] != shape_b[0]: - raise ValueError( - f"Expected the same number of rows for A and B. \ - Instead found A of size {shape_a} and B of size {shape_b}" + msg = ( + "Expected the same number of rows for A and B. " + f"Instead found A of size {shape_a} and B of size {shape_b}" ) + raise ValueError(msg) if shape_b[1] != shape_c[1]: - raise ValueError( - f"Expected the same number of columns for B and C. \ - Instead found B of size {shape_b} and C of size {shape_c}" + msg = ( + "Expected the same number of columns for B and C. " + f"Instead found B of size {shape_b} and C of size {shape_c}" ) + raise ValueError(msg) a_inv = pseudo_inv if a_inv is None: diff --git a/machine_learning/similarity_search.py b/machine_learning/similarity_search.py index 72979181f67c..7a23ec463c8f 100644 --- a/machine_learning/similarity_search.py +++ b/machine_learning/similarity_search.py @@ -97,26 +97,29 @@ def similarity_search( """ if dataset.ndim != value_array.ndim: - raise ValueError( - f"Wrong input data's dimensions... dataset : {dataset.ndim}, " - f"value_array : {value_array.ndim}" + msg = ( + "Wrong input data's dimensions... " + f"dataset : {dataset.ndim}, value_array : {value_array.ndim}" ) + raise ValueError(msg) try: if dataset.shape[1] != value_array.shape[1]: - raise ValueError( - f"Wrong input data's shape... dataset : {dataset.shape[1]}, " - f"value_array : {value_array.shape[1]}" + msg = ( + "Wrong input data's shape... " + f"dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}" ) + raise ValueError(msg) except IndexError: if dataset.ndim != value_array.ndim: raise TypeError("Wrong shape") if dataset.dtype != value_array.dtype: - raise TypeError( - f"Input data have different datatype... dataset : {dataset.dtype}, " - f"value_array : {value_array.dtype}" + msg = ( + "Input data have different datatype... " + f"dataset : {dataset.dtype}, value_array : {value_array.dtype}" ) + raise TypeError(msg) answer = [] diff --git a/machine_learning/support_vector_machines.py b/machine_learning/support_vector_machines.py index df854cc850b1..24046115ebc4 100644 --- a/machine_learning/support_vector_machines.py +++ b/machine_learning/support_vector_machines.py @@ -74,7 +74,8 @@ def __init__( # sklear: def_gamma = 1/(n_features * X.var()) (wiki) # previously it was 1/(n_features) else: - raise ValueError(f"Unknown kernel: {kernel}") + msg = f"Unknown kernel: {kernel}" + raise ValueError(msg) # kernels def __linear(self, vector1: ndarray, vector2: ndarray) -> float: diff --git a/maths/3n_plus_1.py b/maths/3n_plus_1.py index 59fdec48e100..f9f6dfeb9faa 100644 --- a/maths/3n_plus_1.py +++ b/maths/3n_plus_1.py @@ -9,9 +9,11 @@ def n31(a: int) -> tuple[list[int], int]: """ if not isinstance(a, int): - raise TypeError(f"Must be int, not {type(a).__name__}") + msg = f"Must be int, not {type(a).__name__}" + raise TypeError(msg) if a < 1: - raise ValueError(f"Given integer must be positive, not {a}") + msg = f"Given integer must be positive, not {a}" + raise ValueError(msg) path = [a] while a != 1: diff --git a/maths/automorphic_number.py b/maths/automorphic_number.py index 103fc7301831..8ed9375632a4 100644 --- a/maths/automorphic_number.py +++ b/maths/automorphic_number.py @@ -40,7 +40,8 @@ def is_automorphic_number(number: int) -> bool: TypeError: Input value of [number=5.0] must be an integer """ if not isinstance(number, int): - raise TypeError(f"Input value of [number={number}] must be an integer") + msg = f"Input value of [number={number}] must be an integer" + raise TypeError(msg) if number < 0: return False number_square = number * number diff --git a/maths/catalan_number.py b/maths/catalan_number.py index 85607dc1eca4..20c2cfb17c06 100644 --- a/maths/catalan_number.py +++ b/maths/catalan_number.py @@ -31,10 +31,12 @@ def catalan(number: int) -> int: """ if not isinstance(number, int): - raise TypeError(f"Input value of [number={number}] must be an integer") + msg = f"Input value of [number={number}] must be an integer" + raise TypeError(msg) if number < 1: - raise ValueError(f"Input value of [number={number}] must be > 0") + msg = f"Input value of [number={number}] must be > 0" + raise ValueError(msg) current_number = 1 diff --git a/maths/dual_number_automatic_differentiation.py b/maths/dual_number_automatic_differentiation.py index 9aa75830c4a1..f98997c8be4d 100644 --- a/maths/dual_number_automatic_differentiation.py +++ b/maths/dual_number_automatic_differentiation.py @@ -71,7 +71,7 @@ def __truediv__(self, other): for i in self.duals: new_duals.append(i / other) return Dual(self.real / other, new_duals) - raise ValueError() + raise ValueError def __floordiv__(self, other): if not isinstance(other, Dual): @@ -79,7 +79,7 @@ def __floordiv__(self, other): for i in self.duals: new_duals.append(i // other) return Dual(self.real // other, new_duals) - raise ValueError() + raise ValueError def __pow__(self, n): if n < 0 or isinstance(n, float): diff --git a/maths/hexagonal_number.py b/maths/hexagonal_number.py index 28735c638f80..3677ab95ee00 100644 --- a/maths/hexagonal_number.py +++ b/maths/hexagonal_number.py @@ -36,7 +36,8 @@ def hexagonal(number: int) -> int: TypeError: Input value of [number=11.0] must be an integer """ if not isinstance(number, int): - raise TypeError(f"Input value of [number={number}] must be an integer") + msg = f"Input value of [number={number}] must be an integer" + raise TypeError(msg) if number < 1: raise ValueError("Input must be a positive integer") return number * (2 * number - 1) diff --git a/maths/juggler_sequence.py b/maths/juggler_sequence.py index 9daba8bc0e8a..7f65d1dff925 100644 --- a/maths/juggler_sequence.py +++ b/maths/juggler_sequence.py @@ -40,9 +40,11 @@ def juggler_sequence(number: int) -> list[int]: ValueError: Input value of [number=-1] must be a positive integer """ if not isinstance(number, int): - raise TypeError(f"Input value of [number={number}] must be an integer") + msg = f"Input value of [number={number}] must be an integer" + raise TypeError(msg) if number < 1: - raise ValueError(f"Input value of [number={number}] must be a positive integer") + msg = f"Input value of [number={number}] must be a positive integer" + raise ValueError(msg) sequence = [number] while number != 1: if number % 2 == 0: diff --git a/maths/liouville_lambda.py b/maths/liouville_lambda.py index 5993efa42d66..1ed228dd5434 100644 --- a/maths/liouville_lambda.py +++ b/maths/liouville_lambda.py @@ -33,7 +33,8 @@ def liouville_lambda(number: int) -> int: TypeError: Input value of [number=11.0] must be an integer """ if not isinstance(number, int): - raise TypeError(f"Input value of [number={number}] must be an integer") + msg = f"Input value of [number={number}] must be an integer" + raise TypeError(msg) if number < 1: raise ValueError("Input must be a positive integer") return -1 if len(prime_factors(number)) % 2 else 1 diff --git a/maths/manhattan_distance.py b/maths/manhattan_distance.py index 2711d4c8ccd6..413991468a49 100644 --- a/maths/manhattan_distance.py +++ b/maths/manhattan_distance.py @@ -15,15 +15,15 @@ def manhattan_distance(point_a: list, point_b: list) -> float: 9.0 >>> manhattan_distance([1,1], None) Traceback (most recent call last): - ... + ... ValueError: Missing an input >>> manhattan_distance([1,1], [2, 2, 2]) Traceback (most recent call last): - ... + ... ValueError: Both points must be in the same n-dimensional space >>> manhattan_distance([1,"one"], [2, 2, 2]) Traceback (most recent call last): - ... + ... TypeError: Expected a list of numbers as input, found str >>> manhattan_distance(1, [2, 2, 2]) Traceback (most recent call last): @@ -66,14 +66,14 @@ def _validate_point(point: list[float]) -> None: if isinstance(point, list): for item in point: if not isinstance(item, (int, float)): - raise TypeError( - f"Expected a list of numbers as input, " - f"found {type(item).__name__}" + msg = ( + "Expected a list of numbers as input, found " + f"{type(item).__name__}" ) + raise TypeError(msg) else: - raise TypeError( - f"Expected a list of numbers as input, found {type(point).__name__}" - ) + msg = f"Expected a list of numbers as input, found {type(point).__name__}" + raise TypeError(msg) else: raise ValueError("Missing an input") diff --git a/maths/pronic_number.py b/maths/pronic_number.py index 8b554dbbd602..cf4d3d2eb24b 100644 --- a/maths/pronic_number.py +++ b/maths/pronic_number.py @@ -41,7 +41,8 @@ def is_pronic(number: int) -> bool: TypeError: Input value of [number=6.0] must be an integer """ if not isinstance(number, int): - raise TypeError(f"Input value of [number={number}] must be an integer") + msg = f"Input value of [number={number}] must be an integer" + raise TypeError(msg) if number < 0 or number % 2 == 1: return False number_sqrt = int(number**0.5) diff --git a/maths/proth_number.py b/maths/proth_number.py index ce911473a2d2..47747ed260f7 100644 --- a/maths/proth_number.py +++ b/maths/proth_number.py @@ -29,10 +29,12 @@ def proth(number: int) -> int: """ if not isinstance(number, int): - raise TypeError(f"Input value of [number={number}] must be an integer") + msg = f"Input value of [number={number}] must be an integer" + raise TypeError(msg) if number < 1: - raise ValueError(f"Input value of [number={number}] must be > 0") + msg = f"Input value of [number={number}] must be > 0" + raise ValueError(msg) elif number == 1: return 3 elif number == 2: diff --git a/maths/radix2_fft.py b/maths/radix2_fft.py index af98f24f9538..2c5cdc004d1d 100644 --- a/maths/radix2_fft.py +++ b/maths/radix2_fft.py @@ -167,7 +167,7 @@ def __str__(self): f"{coef}*x^{i}" for coef, i in enumerate(self.product) ) - return "\n".join((a, b, c)) + return f"{a}\n{b}\n{c}" # Unit tests diff --git a/maths/sieve_of_eratosthenes.py b/maths/sieve_of_eratosthenes.py index 3cd6ce0b4d9d..a0520aa5cf50 100644 --- a/maths/sieve_of_eratosthenes.py +++ b/maths/sieve_of_eratosthenes.py @@ -34,7 +34,8 @@ def prime_sieve(num: int) -> list[int]: """ if num <= 0: - raise ValueError(f"{num}: Invalid input, please enter a positive integer.") + msg = f"{num}: Invalid input, please enter a positive integer." + raise ValueError(msg) sieve = [True] * (num + 1) prime = [] diff --git a/maths/sylvester_sequence.py b/maths/sylvester_sequence.py index 114c9dd58582..607424c6a90b 100644 --- a/maths/sylvester_sequence.py +++ b/maths/sylvester_sequence.py @@ -31,7 +31,8 @@ def sylvester(number: int) -> int: if number == 1: return 2 elif number < 1: - raise ValueError(f"The input value of [n={number}] has to be > 0") + msg = f"The input value of [n={number}] has to be > 0" + raise ValueError(msg) else: num = sylvester(number - 1) lower = num - 1 diff --git a/maths/twin_prime.py b/maths/twin_prime.py index e6ac0cc7805b..912b10b366c0 100644 --- a/maths/twin_prime.py +++ b/maths/twin_prime.py @@ -32,7 +32,8 @@ def twin_prime(number: int) -> int: TypeError: Input value of [number=6.0] must be an integer """ if not isinstance(number, int): - raise TypeError(f"Input value of [number={number}] must be an integer") + msg = f"Input value of [number={number}] must be an integer" + raise TypeError(msg) if is_prime(number) and is_prime(number + 2): return number + 2 else: diff --git a/matrix/matrix_operation.py b/matrix/matrix_operation.py index 576094902af4..f189f1898d33 100644 --- a/matrix/matrix_operation.py +++ b/matrix/matrix_operation.py @@ -70,10 +70,11 @@ def multiply(matrix_a: list[list[int]], matrix_b: list[list[int]]) -> list[list[ rows, cols = _verify_matrix_sizes(matrix_a, matrix_b) if cols[0] != rows[1]: - raise ValueError( - f"Cannot multiply matrix of dimensions ({rows[0]},{cols[0]}) " - f"and ({rows[1]},{cols[1]})" + msg = ( + "Cannot multiply matrix of dimensions " + f"({rows[0]},{cols[0]}) and ({rows[1]},{cols[1]})" ) + raise ValueError(msg) return [ [sum(m * n for m, n in zip(i, j)) for j in zip(*matrix_b)] for i in matrix_a ] @@ -174,10 +175,11 @@ def _verify_matrix_sizes( ) -> tuple[tuple[int, int], tuple[int, int]]: shape = _shape(matrix_a) + _shape(matrix_b) if shape[0] != shape[3] or shape[1] != shape[2]: - raise ValueError( - f"operands could not be broadcast together with shape " + msg = ( + "operands could not be broadcast together with shape " f"({shape[0], shape[1]}), ({shape[2], shape[3]})" ) + raise ValueError(msg) return (shape[0], shape[2]), (shape[1], shape[3]) diff --git a/matrix/sherman_morrison.py b/matrix/sherman_morrison.py index 39eddfed81f3..256271e8a87d 100644 --- a/matrix/sherman_morrison.py +++ b/matrix/sherman_morrison.py @@ -173,7 +173,8 @@ def __mul__(self, another: int | float | Matrix) -> Matrix: result[r, c] += self[r, i] * another[i, c] return result else: - raise TypeError(f"Unsupported type given for another ({type(another)})") + msg = f"Unsupported type given for another ({type(another)})" + raise TypeError(msg) def transpose(self) -> Matrix: """ diff --git a/neural_network/input_data.py b/neural_network/input_data.py index 2a32f0b82c37..94c018ece9ba 100644 --- a/neural_network/input_data.py +++ b/neural_network/input_data.py @@ -198,10 +198,7 @@ def next_batch(self, batch_size, fake_data=False, shuffle=True): """Return the next `batch_size` examples from this data set.""" if fake_data: fake_image = [1] * 784 - if self.one_hot: - fake_label = [1] + [0] * 9 - else: - fake_label = 0 + fake_label = [1] + [0] * 9 if self.one_hot else 0 return ( [fake_image for _ in range(batch_size)], [fake_label for _ in range(batch_size)], @@ -324,10 +321,11 @@ def fake(): test_labels = _extract_labels(f, one_hot=one_hot) if not 0 <= validation_size <= len(train_images): - raise ValueError( - f"Validation size should be between 0 and {len(train_images)}. " - f"Received: {validation_size}." + msg = ( + "Validation size should be between 0 and " + f"{len(train_images)}. Received: {validation_size}." ) + raise ValueError(msg) validation_images = train_images[:validation_size] validation_labels = train_labels[:validation_size] diff --git a/other/nested_brackets.py b/other/nested_brackets.py index ea48c0a5f532..19c6dd53c8b2 100644 --- a/other/nested_brackets.py +++ b/other/nested_brackets.py @@ -18,7 +18,7 @@ def is_balanced(s): stack = [] open_brackets = set({"(", "[", "{"}) closed_brackets = set({")", "]", "}"}) - open_to_closed = dict({"{": "}", "[": "]", "(": ")"}) + open_to_closed = {"{": "}", "[": "]", "(": ")"} for i in range(len(s)): if s[i] in open_brackets: diff --git a/other/scoring_algorithm.py b/other/scoring_algorithm.py index 8e04a8f30dd7..af04f432e433 100644 --- a/other/scoring_algorithm.py +++ b/other/scoring_algorithm.py @@ -68,7 +68,8 @@ def calculate_each_score( # weight not 0 or 1 else: - raise ValueError(f"Invalid weight of {weight:f} provided") + msg = f"Invalid weight of {weight:f} provided" + raise ValueError(msg) score_lists.append(score) diff --git a/project_euler/problem_054/sol1.py b/project_euler/problem_054/sol1.py index 9af7aef5a716..74409f32c712 100644 --- a/project_euler/problem_054/sol1.py +++ b/project_euler/problem_054/sol1.py @@ -119,10 +119,12 @@ def __init__(self, hand: str) -> None: For example: "6S 4C KC AS TH" """ if not isinstance(hand, str): - raise TypeError(f"Hand should be of type 'str': {hand!r}") + msg = f"Hand should be of type 'str': {hand!r}" + raise TypeError(msg) # split removes duplicate whitespaces so no need of strip if len(hand.split(" ")) != 5: - raise ValueError(f"Hand should contain only 5 cards: {hand!r}") + msg = f"Hand should contain only 5 cards: {hand!r}" + raise ValueError(msg) self._hand = hand self._first_pair = 0 self._second_pair = 0 diff --git a/project_euler/problem_068/sol1.py b/project_euler/problem_068/sol1.py index 772be359f630..cf814b001d57 100644 --- a/project_euler/problem_068/sol1.py +++ b/project_euler/problem_068/sol1.py @@ -73,7 +73,8 @@ def solution(gon_side: int = 5) -> int: if is_magic_gon(numbers): return int("".join(str(n) for n in numbers)) - raise ValueError(f"Magic {gon_side}-gon ring is impossible") + msg = f"Magic {gon_side}-gon ring is impossible" + raise ValueError(msg) def generate_gon_ring(gon_side: int, perm: list[int]) -> list[int]: diff --git a/project_euler/problem_131/sol1.py b/project_euler/problem_131/sol1.py index f5302aac8644..be3ea9c81ae4 100644 --- a/project_euler/problem_131/sol1.py +++ b/project_euler/problem_131/sol1.py @@ -26,10 +26,7 @@ def is_prime(number: int) -> bool: False """ - for divisor in range(2, isqrt(number) + 1): - if number % divisor == 0: - return False - return True + return all(number % divisor != 0 for divisor in range(2, isqrt(number) + 1)) def solution(max_prime: int = 10**6) -> int: diff --git a/pyproject.toml b/pyproject.toml index 48c3fbd4009d..a526196685f5 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -17,45 +17,88 @@ ignore-words-list = "3rt,ans,crate,damon,fo,followings,hist,iff,kwanza,mater,sec skip = "./.*,*.json,ciphers/prehistoric_men.txt,project_euler/problem_022/p022_names.txt,pyproject.toml,strings/dictionary.txt,strings/words.txt" [tool.ruff] -ignore = [ # `ruff rule S101` for a description of that rule - "B904", # B904: Within an `except` clause, raise exceptions with `raise ... from err` - "B905", # B905: `zip()` without an explicit `strict=` parameter - "E741", # E741: Ambiguous variable name 'l' - "G004", # G004 Logging statement uses f-string - "N999", # N999: Invalid module name - "PLC1901", # PLC1901: `{}` can be simplified to `{}` as an empty string is falsey - "PLR2004", # PLR2004: Magic value used in comparison - "PLR5501", # PLR5501: Consider using `elif` instead of `else` - "PLW0120", # PLW0120: `else` clause on loop without a `break` statement - "PLW060", # PLW060: Using global for `{name}` but no assignment is done -- DO NOT FIX - "PLW2901", # PLW2901: Redefined loop variable - "RUF00", # RUF00: Ambiguous unicode character -- DO NOT FIX - "RUF100", # RUF100: Unused `noqa` directive - "S101", # S101: Use of `assert` detected -- DO NOT FIX - "S105", # S105: Possible hardcoded password: 'password' - "S113", # S113: Probable use of requests call without timeout - "S311", # S311: Standard pseudo-random generators are not suitable for cryptographic purposes - "UP038", # UP038: Use `X | Y` in `{}` call instead of `(X, Y)` -- DO NOT FIX +ignore = [ # `ruff rule S101` for a description of that rule + "ARG001", # Unused function argument `amount` -- FIX ME? + "B904", # Within an `except` clause, raise exceptions with `raise ... from err` -- FIX ME + "B905", # `zip()` without an explicit `strict=` parameter -- FIX ME + "DTZ001", # The use of `datetime.datetime()` without `tzinfo` argument is not allowed -- FIX ME + "DTZ005", # The use of `datetime.datetime.now()` without `tzinfo` argument is not allowed -- FIX ME + "E741", # Ambiguous variable name 'l' -- FIX ME + "EM101", # Exception must not use a string literal, assign to variable first + "EXE001", # Shebang is present but file is not executable" -- FIX ME + "G004", # Logging statement uses f-string + "ICN001", # `matplotlib.pyplot` should be imported as `plt` -- FIX ME + "INP001", # File `x/y/z.py` is part of an implicit namespace package. Add an `__init__.py`. -- FIX ME + "N999", # Invalid module name -- FIX ME + "NPY002", # Replace legacy `np.random.choice` call with `np.random.Generator` -- FIX ME + "PGH003", # Use specific rule codes when ignoring type issues -- FIX ME + "PLC1901", # `{}` can be simplified to `{}` as an empty string is falsey + "PLR5501", # Consider using `elif` instead of `else` -- FIX ME + "PLW0120", # `else` clause on loop without a `break` statement -- FIX ME + "PLW060", # Using global for `{name}` but no assignment is done -- DO NOT FIX + "PLW2901", # PLW2901: Redefined loop variable -- FIX ME + "RUF00", # Ambiguous unicode character and other rules + "RUF100", # Unused `noqa` directive -- FIX ME + "S101", # Use of `assert` detected -- DO NOT FIX + "S105", # Possible hardcoded password: 'password' + "S113", # Probable use of requests call without timeout -- FIX ME + "S311", # Standard pseudo-random generators are not suitable for cryptographic purposes -- FIX ME + "SIM102", # Use a single `if` statement instead of nested `if` statements -- FIX ME + "SLF001", # Private member accessed: `_Iterator` -- FIX ME + "UP038", # Use `X | Y` in `{}` call instead of `(X, Y)` -- DO NOT FIX ] -select = [ # https://beta.ruff.rs/docs/rules - "A", # A: builtins - "B", # B: bugbear - "C40", # C40: comprehensions - "C90", # C90: mccabe code complexity - "E", # E: pycodestyle errors - "F", # F: pyflakes - "G", # G: logging format - "I", # I: isort - "N", # N: pep8 naming - "PL", # PL: pylint - "PIE", # PIE: pie - "PYI", # PYI: type hinting stub files - "RUF", # RUF: ruff - "S", # S: bandit - "TID", # TID: tidy imports - "UP", # UP: pyupgrade - "W", # W: pycodestyle warnings - "YTT", # YTT: year 2020 +select = [ # https://beta.ruff.rs/docs/rules + "A", # flake8-builtins + "ARG", # flake8-unused-arguments + "ASYNC", # flake8-async + "B", # flake8-bugbear + "BLE", # flake8-blind-except + "C4", # flake8-comprehensions + "C90", # McCabe cyclomatic complexity + "DTZ", # flake8-datetimez + "E", # pycodestyle + "EM", # flake8-errmsg + "EXE", # flake8-executable + "F", # Pyflakes + "FA", # flake8-future-annotations + "FLY", # flynt + "G", # flake8-logging-format + "I", # isort + "ICN", # flake8-import-conventions + "INP", # flake8-no-pep420 + "INT", # flake8-gettext + "N", # pep8-naming + "NPY", # NumPy-specific rules + "PGH", # pygrep-hooks + "PIE", # flake8-pie + "PL", # Pylint + "PYI", # flake8-pyi + "RSE", # flake8-raise + "RUF", # Ruff-specific rules + "S", # flake8-bandit + "SIM", # flake8-simplify + "SLF", # flake8-self + "T10", # flake8-debugger + "TD", # flake8-todos + "TID", # flake8-tidy-imports + "UP", # pyupgrade + "W", # pycodestyle + "YTT", # flake8-2020 + # "ANN", # flake8-annotations # FIX ME? + # "COM", # flake8-commas + # "D", # pydocstyle -- FIX ME? + # "DJ", # flake8-django + # "ERA", # eradicate -- DO NOT FIX + # "FBT", # flake8-boolean-trap # FIX ME + # "ISC", # flake8-implicit-str-concat # FIX ME + # "PD", # pandas-vet + # "PT", # flake8-pytest-style + # "PTH", # flake8-use-pathlib # FIX ME + # "Q", # flake8-quotes + # "RET", # flake8-return # FIX ME? + # "T20", # flake8-print + # "TCH", # flake8-type-checking + # "TRY", # tryceratops ] show-source = true target-version = "py311" @@ -63,7 +106,27 @@ target-version = "py311" [tool.ruff.mccabe] # DO NOT INCREASE THIS VALUE max-complexity = 17 # default: 10 +[tool.ruff.per-file-ignores] +"arithmetic_analysis/newton_raphson.py" = ["PGH001"] +"audio_filters/show_response.py" = ["ARG002"] +"data_structures/binary_tree/binary_search_tree_recursive.py" = ["BLE001"] +"data_structures/binary_tree/treap.py" = ["SIM114"] +"data_structures/hashing/hash_table.py" = ["ARG002"] +"data_structures/hashing/quadratic_probing.py" = ["ARG002"] +"data_structures/hashing/tests/test_hash_map.py" = ["BLE001"] +"data_structures/heap/max_heap.py" = ["SIM114"] +"graphs/minimum_spanning_tree_prims.py" = ["SIM114"] +"hashes/enigma_machine.py" = ["BLE001"] +"machine_learning/decision_tree.py" = ["SIM114"] +"machine_learning/linear_discriminant_analysis.py" = ["ARG005"] +"machine_learning/sequential_minimum_optimization.py" = ["SIM115"] +"matrix/sherman_morrison.py" = ["SIM103", "SIM114"] +"physics/newtons_second_law_of_motion.py" = ["BLE001"] +"project_euler/problem_099/sol1.py" = ["SIM115"] +"sorts/external_sort.py" = ["SIM115"] + [tool.ruff.pylint] # DO NOT INCREASE THESE VALUES +allow-magic-value-types = ["float", "int", "str"] max-args = 10 # default: 5 max-branches = 20 # default: 12 max-returns = 8 # default: 6 diff --git a/scripts/build_directory_md.py b/scripts/build_directory_md.py index b95be9ebc254..24bc00cd036f 100755 --- a/scripts/build_directory_md.py +++ b/scripts/build_directory_md.py @@ -33,7 +33,7 @@ def print_directory_md(top_dir: str = ".") -> None: if filepath != old_path: old_path = print_path(old_path, filepath) indent = (filepath.count(os.sep) + 1) if filepath else 0 - url = "/".join((filepath, filename)).replace(" ", "%20") + url = f"{filepath}/{filename}".replace(" ", "%20") filename = os.path.splitext(filename.replace("_", " ").title())[0] print(f"{md_prefix(indent)} [{filename}]({url})") diff --git a/sorts/dutch_national_flag_sort.py b/sorts/dutch_national_flag_sort.py index 79afefa73afe..758e3a887b84 100644 --- a/sorts/dutch_national_flag_sort.py +++ b/sorts/dutch_national_flag_sort.py @@ -84,9 +84,8 @@ def dutch_national_flag_sort(sequence: list) -> list: sequence[mid], sequence[high] = sequence[high], sequence[mid] high -= 1 else: - raise ValueError( - f"The elements inside the sequence must contains only {colors} values" - ) + msg = f"The elements inside the sequence must contains only {colors} values" + raise ValueError(msg) return sequence diff --git a/strings/barcode_validator.py b/strings/barcode_validator.py index e050cd337d74..b4f3864e2642 100644 --- a/strings/barcode_validator.py +++ b/strings/barcode_validator.py @@ -65,7 +65,8 @@ def get_barcode(barcode: str) -> int: ValueError: Barcode 'dwefgiweuf' has alphabetic characters. """ if str(barcode).isalpha(): - raise ValueError(f"Barcode '{barcode}' has alphabetic characters.") + msg = f"Barcode '{barcode}' has alphabetic characters." + raise ValueError(msg) elif int(barcode) < 0: raise ValueError("The entered barcode has a negative value. Try again.") else: diff --git a/strings/capitalize.py b/strings/capitalize.py index 63603aa07e2d..e7e97c2beb53 100644 --- a/strings/capitalize.py +++ b/strings/capitalize.py @@ -17,7 +17,7 @@ def capitalize(sentence: str) -> str: """ if not sentence: return "" - lower_to_upper = {lc: uc for lc, uc in zip(ascii_lowercase, ascii_uppercase)} + lower_to_upper = dict(zip(ascii_lowercase, ascii_uppercase)) return lower_to_upper.get(sentence[0], sentence[0]) + sentence[1:] diff --git a/strings/is_spain_national_id.py b/strings/is_spain_national_id.py index 67f49755f412..60d06e123aae 100644 --- a/strings/is_spain_national_id.py +++ b/strings/is_spain_national_id.py @@ -48,7 +48,8 @@ def is_spain_national_id(spanish_id: str) -> bool: """ if not isinstance(spanish_id, str): - raise TypeError(f"Expected string as input, found {type(spanish_id).__name__}") + msg = f"Expected string as input, found {type(spanish_id).__name__}" + raise TypeError(msg) spanish_id_clean = spanish_id.replace("-", "").upper() if len(spanish_id_clean) != 9: diff --git a/strings/snake_case_to_camel_pascal_case.py b/strings/snake_case_to_camel_pascal_case.py index 28a28b517a01..8219337a63b0 100644 --- a/strings/snake_case_to_camel_pascal_case.py +++ b/strings/snake_case_to_camel_pascal_case.py @@ -27,11 +27,11 @@ def snake_to_camel_case(input_str: str, use_pascal: bool = False) -> str: """ if not isinstance(input_str, str): - raise ValueError(f"Expected string as input, found {type(input_str)}") + msg = f"Expected string as input, found {type(input_str)}" + raise ValueError(msg) if not isinstance(use_pascal, bool): - raise ValueError( - f"Expected boolean as use_pascal parameter, found {type(use_pascal)}" - ) + msg = f"Expected boolean as use_pascal parameter, found {type(use_pascal)}" + raise ValueError(msg) words = input_str.split("_") diff --git a/web_programming/reddit.py b/web_programming/reddit.py index 6a31c81c34bd..5ca5f828c0fb 100644 --- a/web_programming/reddit.py +++ b/web_programming/reddit.py @@ -26,7 +26,8 @@ def get_subreddit_data( """ wanted_data = wanted_data or [] if invalid_search_terms := ", ".join(sorted(set(wanted_data) - valid_terms)): - raise ValueError(f"Invalid search term: {invalid_search_terms}") + msg = f"Invalid search term: {invalid_search_terms}" + raise ValueError(msg) response = requests.get( f"https://reddit.com/r/{subreddit}/{age}.json?limit={limit}", headers={"User-agent": "A random string"}, diff --git a/web_programming/search_books_by_isbn.py b/web_programming/search_books_by_isbn.py index abac3c70b22e..d5d4cfe92f20 100644 --- a/web_programming/search_books_by_isbn.py +++ b/web_programming/search_books_by_isbn.py @@ -22,7 +22,8 @@ def get_openlibrary_data(olid: str = "isbn/0140328726") -> dict: """ new_olid = olid.strip().strip("/") # Remove leading/trailing whitespace & slashes if new_olid.count("/") != 1: - raise ValueError(f"{olid} is not a valid Open Library olid") + msg = f"{olid} is not a valid Open Library olid" + raise ValueError(msg) return requests.get(f"https://openlibrary.org/{new_olid}.json").json() diff --git a/web_programming/slack_message.py b/web_programming/slack_message.py index f35aa3ca587e..5e97d6b64c75 100644 --- a/web_programming/slack_message.py +++ b/web_programming/slack_message.py @@ -7,10 +7,11 @@ def send_slack_message(message_body: str, slack_url: str) -> None: headers = {"Content-Type": "application/json"} response = requests.post(slack_url, json={"text": message_body}, headers=headers) if response.status_code != 200: - raise ValueError( - f"Request to slack returned an error {response.status_code}, " - f"the response is:\n{response.text}" + msg = ( + "Request to slack returned an error " + f"{response.status_code}, the response is:\n{response.text}" ) + raise ValueError(msg) if __name__ == "__main__": From c93659d7ce65e3717f06333e3d049ebaa888e597 Mon Sep 17 00:00:00 2001 From: Tianyi Zheng Date: Mon, 29 May 2023 17:37:54 -0700 Subject: [PATCH 0851/1543] Fix type error in `strassen_matrix_multiplication.py` (#8784) * Fix type error in strassen_matrix_multiplication.py * updating DIRECTORY.md --------- Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- DIRECTORY.md | 1 + ...ion.py.BROKEN => strassen_matrix_multiplication.py} | 10 ++++++---- 2 files changed, 7 insertions(+), 4 deletions(-) rename divide_and_conquer/{strassen_matrix_multiplication.py.BROKEN => strassen_matrix_multiplication.py} (97%) diff --git a/DIRECTORY.md b/DIRECTORY.md index 11ff93c91430..231b0e2f1d2f 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -294,6 +294,7 @@ * [Mergesort](divide_and_conquer/mergesort.py) * [Peak](divide_and_conquer/peak.py) * [Power](divide_and_conquer/power.py) + * [Strassen Matrix Multiplication](divide_and_conquer/strassen_matrix_multiplication.py) ## Dynamic Programming * [Abbreviation](dynamic_programming/abbreviation.py) diff --git a/divide_and_conquer/strassen_matrix_multiplication.py.BROKEN b/divide_and_conquer/strassen_matrix_multiplication.py similarity index 97% rename from divide_and_conquer/strassen_matrix_multiplication.py.BROKEN rename to divide_and_conquer/strassen_matrix_multiplication.py index 2ca91c63bf4c..cbfc7e5655db 100644 --- a/divide_and_conquer/strassen_matrix_multiplication.py.BROKEN +++ b/divide_and_conquer/strassen_matrix_multiplication.py @@ -112,17 +112,19 @@ def strassen(matrix1: list, matrix2: list) -> list: [[139, 163], [121, 134], [100, 121]] """ if matrix_dimensions(matrix1)[1] != matrix_dimensions(matrix2)[0]: - raise Exception( - "Unable to multiply these matrices, please check the dimensions. \n" - f"Matrix A:{matrix1} \nMatrix B:{matrix2}" + msg = ( + "Unable to multiply these matrices, please check the dimensions.\n" + f"Matrix A: {matrix1}\n" + f"Matrix B: {matrix2}" ) + raise Exception(msg) dimension1 = matrix_dimensions(matrix1) dimension2 = matrix_dimensions(matrix2) if dimension1[0] == dimension1[1] and dimension2[0] == dimension2[1]: return [matrix1, matrix2] - maximum = max(dimension1, dimension2) + maximum = max(*dimension1, *dimension2) maxim = int(math.pow(2, math.ceil(math.log2(maximum)))) new_matrix1 = matrix1 new_matrix2 = matrix2 From 4a27b544303e6bab90ed57b72fa3acf3d785429e Mon Sep 17 00:00:00 2001 From: Sundaram Kumar Jha Date: Wed, 31 May 2023 06:26:59 +0530 Subject: [PATCH 0852/1543] Update permutations.py (#8102) --- data_structures/arrays/permutations.py | 26 +++++++++++++++++++++++++- 1 file changed, 25 insertions(+), 1 deletion(-) diff --git a/data_structures/arrays/permutations.py b/data_structures/arrays/permutations.py index eb3f26517863..4558bd8d468a 100644 --- a/data_structures/arrays/permutations.py +++ b/data_structures/arrays/permutations.py @@ -1,7 +1,6 @@ def permute(nums: list[int]) -> list[list[int]]: """ Return all permutations. - >>> from itertools import permutations >>> numbers= [1,2,3] >>> all(list(nums) in permute(numbers) for nums in permutations(numbers)) @@ -20,7 +19,32 @@ def permute(nums: list[int]) -> list[list[int]]: return result +def permute2(nums): + """ + Return all permutations of the given list. + + >>> permute2([1, 2, 3]) + [[1, 2, 3], [1, 3, 2], [2, 1, 3], [2, 3, 1], [3, 2, 1], [3, 1, 2]] + """ + + def backtrack(start): + if start == len(nums) - 1: + output.append(nums[:]) + else: + for i in range(start, len(nums)): + nums[start], nums[i] = nums[i], nums[start] + backtrack(start + 1) + nums[start], nums[i] = nums[i], nums[start] # backtrack + + output = [] + backtrack(0) + return output + + if __name__ == "__main__": import doctest + # use res to print the data in permute2 function + res = permute2([1, 2, 3]) + print(res) doctest.testmod() From e871540e37b834673f9e6650b8e2281d7d36a8c3 Mon Sep 17 00:00:00 2001 From: Rudransh Bhardwaj <115872354+rudransh61@users.noreply.github.com> Date: Wed, 31 May 2023 20:33:02 +0530 Subject: [PATCH 0853/1543] Added rank of matrix in linear algebra (#8687) * Added rank of matrix in linear algebra * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Corrected name of function * Corrected Rank_of_Matrix.py * Completed rank_of_matrix.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * delete to rename Rank_of_Matrix.py * created rank_of_matrix * added more doctests in rank_of_matrix.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fixed some issues in rank_of_matrix.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * added moreeee doctestsss in rank_of_mtrix.py and fixed some bugss * Update linear_algebra/src/rank_of_matrix.py Co-authored-by: Christian Clauss * Update linear_algebra/src/rank_of_matrix.py Co-authored-by: Christian Clauss * Update linear_algebra/src/rank_of_matrix.py Co-authored-by: Christian Clauss * Update rank_of_matrix.py * Update linear_algebra/src/rank_of_matrix.py Co-authored-by: Caeden Perelli-Harris --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss Co-authored-by: Caeden Perelli-Harris --- linear_algebra/src/rank_of_matrix.py | 89 ++++++++++++++++++++++++++++ 1 file changed, 89 insertions(+) create mode 100644 linear_algebra/src/rank_of_matrix.py diff --git a/linear_algebra/src/rank_of_matrix.py b/linear_algebra/src/rank_of_matrix.py new file mode 100644 index 000000000000..7ff3c1699a69 --- /dev/null +++ b/linear_algebra/src/rank_of_matrix.py @@ -0,0 +1,89 @@ +""" +Calculate the rank of a matrix. + +See: https://en.wikipedia.org/wiki/Rank_(linear_algebra) +""" + + +def rank_of_matrix(matrix: list[list[int | float]]) -> int: + """ + Finds the rank of a matrix. + Args: + matrix: The matrix as a list of lists. + Returns: + The rank of the matrix. + Example: + >>> matrix1 = [[1, 2, 3], + ... [4, 5, 6], + ... [7, 8, 9]] + >>> rank_of_matrix(matrix1) + 2 + >>> matrix2 = [[1, 0, 0], + ... [0, 1, 0], + ... [0, 0, 0]] + >>> rank_of_matrix(matrix2) + 2 + >>> matrix3 = [[1, 2, 3, 4], + ... [5, 6, 7, 8], + ... [9, 10, 11, 12]] + >>> rank_of_matrix(matrix3) + 2 + >>> rank_of_matrix([[2,3,-1,-1], + ... [1,-1,-2,4], + ... [3,1,3,-2], + ... [6,3,0,-7]]) + 4 + >>> rank_of_matrix([[2,1,-3,-6], + ... [3,-3,1,2], + ... [1,1,1,2]]) + 3 + >>> rank_of_matrix([[2,-1,0], + ... [1,3,4], + ... [4,1,-3]]) + 3 + >>> rank_of_matrix([[3,2,1], + ... [-6,-4,-2]]) + 1 + >>> rank_of_matrix([[],[]]) + 0 + >>> rank_of_matrix([[1]]) + 1 + >>> rank_of_matrix([[]]) + 0 + """ + + rows = len(matrix) + columns = len(matrix[0]) + rank = min(rows, columns) + + for row in range(rank): + # Check if diagonal element is not zero + if matrix[row][row] != 0: + # Eliminate all the elements below the diagonal + for col in range(row + 1, rows): + multiplier = matrix[col][row] / matrix[row][row] + for i in range(row, columns): + matrix[col][i] -= multiplier * matrix[row][i] + else: + # Find a non-zero diagonal element to swap rows + reduce = True + for i in range(row + 1, rows): + if matrix[i][row] != 0: + matrix[row], matrix[i] = matrix[i], matrix[row] + reduce = False + break + if reduce: + rank -= 1 + for i in range(rows): + matrix[i][row] = matrix[i][rank] + + # Reduce the row pointer by one to stay on the same row + row -= 1 + + return rank + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From 4621b0bb4f5d3fff2fa4f0e53d6cb862fe002c60 Mon Sep 17 00:00:00 2001 From: nith2001 <75632283+nith2001@users.noreply.github.com> Date: Wed, 31 May 2023 13:06:12 -0700 Subject: [PATCH 0854/1543] Improved Graph Implementations (#8730) * Improved Graph Implementations Provides new implementation for graph_list.py and graph_matrix.py along with pytest suites for each. Fixes #8709 * Graph implementation style fixes, corrections, and refactored tests * Helpful docs about graph implementation * Refactored code to separate files and applied enumerate() * Renamed files and refactored code to fail fast * Error handling style fix * Fixed f-string code quality issue * Last f-string fix * Added return types to test functions and more style fixes * Added more function return types * Added more function return types pt2 * Fixed error messages --- graphs/graph_adjacency_list.py | 589 ++++++++++++++++++++++++++++++ graphs/graph_adjacency_matrix.py | 608 +++++++++++++++++++++++++++++++ graphs/graph_matrix.py | 24 -- graphs/tests/__init__.py | 0 4 files changed, 1197 insertions(+), 24 deletions(-) create mode 100644 graphs/graph_adjacency_list.py create mode 100644 graphs/graph_adjacency_matrix.py delete mode 100644 graphs/graph_matrix.py create mode 100644 graphs/tests/__init__.py diff --git a/graphs/graph_adjacency_list.py b/graphs/graph_adjacency_list.py new file mode 100644 index 000000000000..76f34f845860 --- /dev/null +++ b/graphs/graph_adjacency_list.py @@ -0,0 +1,589 @@ +#!/usr/bin/env python3 +""" +Author: Vikram Nithyanandam + +Description: +The following implementation is a robust unweighted Graph data structure +implemented using an adjacency list. This vertices and edges of this graph can be +effectively initialized and modified while storing your chosen generic +value in each vertex. + +Adjacency List: https://en.wikipedia.org/wiki/Adjacency_list + +Potential Future Ideas: +- Add a flag to set edge weights on and set edge weights +- Make edge weights and vertex values customizable to store whatever the client wants +- Support multigraph functionality if the client wants it +""" +from __future__ import annotations + +import random +import unittest +from pprint import pformat +from typing import Generic, TypeVar + +T = TypeVar("T") + + +class GraphAdjacencyList(Generic[T]): + def __init__( + self, vertices: list[T], edges: list[list[T]], directed: bool = True + ) -> None: + """ + Parameters: + - vertices: (list[T]) The list of vertex names the client wants to + pass in. Default is empty. + - edges: (list[list[T]]) The list of edges the client wants to + pass in. Each edge is a 2-element list. Default is empty. + - directed: (bool) Indicates if graph is directed or undirected. + Default is True. + """ + self.adj_list: dict[T, list[T]] = {} # dictionary of lists of T + self.directed = directed + + # Falsey checks + edges = edges or [] + vertices = vertices or [] + + for vertex in vertices: + self.add_vertex(vertex) + + for edge in edges: + if len(edge) != 2: + msg = f"Invalid input: {edge} is the wrong length." + raise ValueError(msg) + self.add_edge(edge[0], edge[1]) + + def add_vertex(self, vertex: T) -> None: + """ + Adds a vertex to the graph. If the given vertex already exists, + a ValueError will be thrown. + """ + if self.contains_vertex(vertex): + msg = f"Incorrect input: {vertex} is already in the graph." + raise ValueError(msg) + self.adj_list[vertex] = [] + + def add_edge(self, source_vertex: T, destination_vertex: T) -> None: + """ + Creates an edge from source vertex to destination vertex. If any + given vertex doesn't exist or the edge already exists, a ValueError + will be thrown. + """ + if not ( + self.contains_vertex(source_vertex) + and self.contains_vertex(destination_vertex) + ): + msg = ( + f"Incorrect input: Either {source_vertex} or " + f"{destination_vertex} does not exist" + ) + raise ValueError(msg) + if self.contains_edge(source_vertex, destination_vertex): + msg = ( + "Incorrect input: The edge already exists between " + f"{source_vertex} and {destination_vertex}" + ) + raise ValueError(msg) + + # add the destination vertex to the list associated with the source vertex + # and vice versa if not directed + self.adj_list[source_vertex].append(destination_vertex) + if not self.directed: + self.adj_list[destination_vertex].append(source_vertex) + + def remove_vertex(self, vertex: T) -> None: + """ + Removes the given vertex from the graph and deletes all incoming and + outgoing edges from the given vertex as well. If the given vertex + does not exist, a ValueError will be thrown. + """ + if not self.contains_vertex(vertex): + msg = f"Incorrect input: {vertex} does not exist in this graph." + raise ValueError(msg) + + if not self.directed: + # If not directed, find all neighboring vertices and delete all references + # of edges connecting to the given vertex + for neighbor in self.adj_list[vertex]: + self.adj_list[neighbor].remove(vertex) + else: + # If directed, search all neighbors of all vertices and delete all + # references of edges connecting to the given vertex + for edge_list in self.adj_list.values(): + if vertex in edge_list: + edge_list.remove(vertex) + + # Finally, delete the given vertex and all of its outgoing edge references + self.adj_list.pop(vertex) + + def remove_edge(self, source_vertex: T, destination_vertex: T) -> None: + """ + Removes the edge between the two vertices. If any given vertex + doesn't exist or the edge does not exist, a ValueError will be thrown. + """ + if not ( + self.contains_vertex(source_vertex) + and self.contains_vertex(destination_vertex) + ): + msg = ( + f"Incorrect input: Either {source_vertex} or " + f"{destination_vertex} does not exist" + ) + raise ValueError(msg) + if not self.contains_edge(source_vertex, destination_vertex): + msg = ( + "Incorrect input: The edge does NOT exist between " + f"{source_vertex} and {destination_vertex}" + ) + raise ValueError(msg) + + # remove the destination vertex from the list associated with the source + # vertex and vice versa if not directed + self.adj_list[source_vertex].remove(destination_vertex) + if not self.directed: + self.adj_list[destination_vertex].remove(source_vertex) + + def contains_vertex(self, vertex: T) -> bool: + """ + Returns True if the graph contains the vertex, False otherwise. + """ + return vertex in self.adj_list + + def contains_edge(self, source_vertex: T, destination_vertex: T) -> bool: + """ + Returns True if the graph contains the edge from the source_vertex to the + destination_vertex, False otherwise. If any given vertex doesn't exist, a + ValueError will be thrown. + """ + if not ( + self.contains_vertex(source_vertex) + and self.contains_vertex(destination_vertex) + ): + msg = ( + f"Incorrect input: Either {source_vertex} " + f"or {destination_vertex} does not exist." + ) + raise ValueError(msg) + + return destination_vertex in self.adj_list[source_vertex] + + def clear_graph(self) -> None: + """ + Clears all vertices and edges. + """ + self.adj_list = {} + + def __repr__(self) -> str: + return pformat(self.adj_list) + + +class TestGraphAdjacencyList(unittest.TestCase): + def __assert_graph_edge_exists_check( + self, + undirected_graph: GraphAdjacencyList, + directed_graph: GraphAdjacencyList, + edge: list[int], + ) -> None: + self.assertTrue(undirected_graph.contains_edge(edge[0], edge[1])) + self.assertTrue(undirected_graph.contains_edge(edge[1], edge[0])) + self.assertTrue(directed_graph.contains_edge(edge[0], edge[1])) + + def __assert_graph_edge_does_not_exist_check( + self, + undirected_graph: GraphAdjacencyList, + directed_graph: GraphAdjacencyList, + edge: list[int], + ) -> None: + self.assertFalse(undirected_graph.contains_edge(edge[0], edge[1])) + self.assertFalse(undirected_graph.contains_edge(edge[1], edge[0])) + self.assertFalse(directed_graph.contains_edge(edge[0], edge[1])) + + def __assert_graph_vertex_exists_check( + self, + undirected_graph: GraphAdjacencyList, + directed_graph: GraphAdjacencyList, + vertex: int, + ) -> None: + self.assertTrue(undirected_graph.contains_vertex(vertex)) + self.assertTrue(directed_graph.contains_vertex(vertex)) + + def __assert_graph_vertex_does_not_exist_check( + self, + undirected_graph: GraphAdjacencyList, + directed_graph: GraphAdjacencyList, + vertex: int, + ) -> None: + self.assertFalse(undirected_graph.contains_vertex(vertex)) + self.assertFalse(directed_graph.contains_vertex(vertex)) + + def __generate_random_edges( + self, vertices: list[int], edge_pick_count: int + ) -> list[list[int]]: + self.assertTrue(edge_pick_count <= len(vertices)) + + random_source_vertices: list[int] = random.sample( + vertices[0 : int(len(vertices) / 2)], edge_pick_count + ) + random_destination_vertices: list[int] = random.sample( + vertices[int(len(vertices) / 2) :], edge_pick_count + ) + random_edges: list[list[int]] = [] + + for source in random_source_vertices: + for dest in random_destination_vertices: + random_edges.append([source, dest]) + + return random_edges + + def __generate_graphs( + self, vertex_count: int, min_val: int, max_val: int, edge_pick_count: int + ) -> tuple[GraphAdjacencyList, GraphAdjacencyList, list[int], list[list[int]]]: + if max_val - min_val + 1 < vertex_count: + raise ValueError( + "Will result in duplicate vertices. Either increase range " + "between min_val and max_val or decrease vertex count." + ) + + # generate graph input + random_vertices: list[int] = random.sample( + range(min_val, max_val + 1), vertex_count + ) + random_edges: list[list[int]] = self.__generate_random_edges( + random_vertices, edge_pick_count + ) + + # build graphs + undirected_graph = GraphAdjacencyList( + vertices=random_vertices, edges=random_edges, directed=False + ) + directed_graph = GraphAdjacencyList( + vertices=random_vertices, edges=random_edges, directed=True + ) + + return undirected_graph, directed_graph, random_vertices, random_edges + + def test_init_check(self) -> None: + ( + undirected_graph, + directed_graph, + random_vertices, + random_edges, + ) = self.__generate_graphs(20, 0, 100, 4) + + # test graph initialization with vertices and edges + for num in random_vertices: + self.__assert_graph_vertex_exists_check( + undirected_graph, directed_graph, num + ) + + for edge in random_edges: + self.__assert_graph_edge_exists_check( + undirected_graph, directed_graph, edge + ) + self.assertFalse(undirected_graph.directed) + self.assertTrue(directed_graph.directed) + + def test_contains_vertex(self) -> None: + random_vertices: list[int] = random.sample(range(101), 20) + + # Build graphs WITHOUT edges + undirected_graph = GraphAdjacencyList( + vertices=random_vertices, edges=[], directed=False + ) + directed_graph = GraphAdjacencyList( + vertices=random_vertices, edges=[], directed=True + ) + + # Test contains_vertex + for num in range(101): + self.assertEqual( + num in random_vertices, undirected_graph.contains_vertex(num) + ) + self.assertEqual( + num in random_vertices, directed_graph.contains_vertex(num) + ) + + def test_add_vertices(self) -> None: + random_vertices: list[int] = random.sample(range(101), 20) + + # build empty graphs + undirected_graph: GraphAdjacencyList = GraphAdjacencyList( + vertices=[], edges=[], directed=False + ) + directed_graph: GraphAdjacencyList = GraphAdjacencyList( + vertices=[], edges=[], directed=True + ) + + # run add_vertex + for num in random_vertices: + undirected_graph.add_vertex(num) + + for num in random_vertices: + directed_graph.add_vertex(num) + + # test add_vertex worked + for num in random_vertices: + self.__assert_graph_vertex_exists_check( + undirected_graph, directed_graph, num + ) + + def test_remove_vertices(self) -> None: + random_vertices: list[int] = random.sample(range(101), 20) + + # build graphs WITHOUT edges + undirected_graph = GraphAdjacencyList( + vertices=random_vertices, edges=[], directed=False + ) + directed_graph = GraphAdjacencyList( + vertices=random_vertices, edges=[], directed=True + ) + + # test remove_vertex worked + for num in random_vertices: + self.__assert_graph_vertex_exists_check( + undirected_graph, directed_graph, num + ) + + undirected_graph.remove_vertex(num) + directed_graph.remove_vertex(num) + + self.__assert_graph_vertex_does_not_exist_check( + undirected_graph, directed_graph, num + ) + + def test_add_and_remove_vertices_repeatedly(self) -> None: + random_vertices1: list[int] = random.sample(range(51), 20) + random_vertices2: list[int] = random.sample(range(51, 101), 20) + + # build graphs WITHOUT edges + undirected_graph = GraphAdjacencyList( + vertices=random_vertices1, edges=[], directed=False + ) + directed_graph = GraphAdjacencyList( + vertices=random_vertices1, edges=[], directed=True + ) + + # test adding and removing vertices + for i, _ in enumerate(random_vertices1): + undirected_graph.add_vertex(random_vertices2[i]) + directed_graph.add_vertex(random_vertices2[i]) + + self.__assert_graph_vertex_exists_check( + undirected_graph, directed_graph, random_vertices2[i] + ) + + undirected_graph.remove_vertex(random_vertices1[i]) + directed_graph.remove_vertex(random_vertices1[i]) + + self.__assert_graph_vertex_does_not_exist_check( + undirected_graph, directed_graph, random_vertices1[i] + ) + + # remove all vertices + for i, _ in enumerate(random_vertices1): + undirected_graph.remove_vertex(random_vertices2[i]) + directed_graph.remove_vertex(random_vertices2[i]) + + self.__assert_graph_vertex_does_not_exist_check( + undirected_graph, directed_graph, random_vertices2[i] + ) + + def test_contains_edge(self) -> None: + # generate graphs and graph input + vertex_count = 20 + ( + undirected_graph, + directed_graph, + random_vertices, + random_edges, + ) = self.__generate_graphs(vertex_count, 0, 100, 4) + + # generate all possible edges for testing + all_possible_edges: list[list[int]] = [] + for i in range(vertex_count - 1): + for j in range(i + 1, vertex_count): + all_possible_edges.append([random_vertices[i], random_vertices[j]]) + all_possible_edges.append([random_vertices[j], random_vertices[i]]) + + # test contains_edge function + for edge in all_possible_edges: + if edge in random_edges: + self.__assert_graph_edge_exists_check( + undirected_graph, directed_graph, edge + ) + elif [edge[1], edge[0]] in random_edges: + # since this edge exists for undirected but the reverse + # may not exist for directed + self.__assert_graph_edge_exists_check( + undirected_graph, directed_graph, [edge[1], edge[0]] + ) + else: + self.__assert_graph_edge_does_not_exist_check( + undirected_graph, directed_graph, edge + ) + + def test_add_edge(self) -> None: + # generate graph input + random_vertices: list[int] = random.sample(range(101), 15) + random_edges: list[list[int]] = self.__generate_random_edges(random_vertices, 4) + + # build graphs WITHOUT edges + undirected_graph = GraphAdjacencyList( + vertices=random_vertices, edges=[], directed=False + ) + directed_graph = GraphAdjacencyList( + vertices=random_vertices, edges=[], directed=True + ) + + # run and test add_edge + for edge in random_edges: + undirected_graph.add_edge(edge[0], edge[1]) + directed_graph.add_edge(edge[0], edge[1]) + self.__assert_graph_edge_exists_check( + undirected_graph, directed_graph, edge + ) + + def test_remove_edge(self) -> None: + # generate graph input and graphs + ( + undirected_graph, + directed_graph, + random_vertices, + random_edges, + ) = self.__generate_graphs(20, 0, 100, 4) + + # run and test remove_edge + for edge in random_edges: + self.__assert_graph_edge_exists_check( + undirected_graph, directed_graph, edge + ) + undirected_graph.remove_edge(edge[0], edge[1]) + directed_graph.remove_edge(edge[0], edge[1]) + self.__assert_graph_edge_does_not_exist_check( + undirected_graph, directed_graph, edge + ) + + def test_add_and_remove_edges_repeatedly(self) -> None: + ( + undirected_graph, + directed_graph, + random_vertices, + random_edges, + ) = self.__generate_graphs(20, 0, 100, 4) + + # make some more edge options! + more_random_edges: list[list[int]] = [] + + while len(more_random_edges) != len(random_edges): + edges: list[list[int]] = self.__generate_random_edges(random_vertices, 4) + for edge in edges: + if len(more_random_edges) == len(random_edges): + break + elif edge not in more_random_edges and edge not in random_edges: + more_random_edges.append(edge) + + for i, _ in enumerate(random_edges): + undirected_graph.add_edge(more_random_edges[i][0], more_random_edges[i][1]) + directed_graph.add_edge(more_random_edges[i][0], more_random_edges[i][1]) + + self.__assert_graph_edge_exists_check( + undirected_graph, directed_graph, more_random_edges[i] + ) + + undirected_graph.remove_edge(random_edges[i][0], random_edges[i][1]) + directed_graph.remove_edge(random_edges[i][0], random_edges[i][1]) + + self.__assert_graph_edge_does_not_exist_check( + undirected_graph, directed_graph, random_edges[i] + ) + + def test_add_vertex_exception_check(self) -> None: + ( + undirected_graph, + directed_graph, + random_vertices, + random_edges, + ) = self.__generate_graphs(20, 0, 100, 4) + + for vertex in random_vertices: + with self.assertRaises(ValueError): + undirected_graph.add_vertex(vertex) + with self.assertRaises(ValueError): + directed_graph.add_vertex(vertex) + + def test_remove_vertex_exception_check(self) -> None: + ( + undirected_graph, + directed_graph, + random_vertices, + random_edges, + ) = self.__generate_graphs(20, 0, 100, 4) + + for i in range(101): + if i not in random_vertices: + with self.assertRaises(ValueError): + undirected_graph.remove_vertex(i) + with self.assertRaises(ValueError): + directed_graph.remove_vertex(i) + + def test_add_edge_exception_check(self) -> None: + ( + undirected_graph, + directed_graph, + random_vertices, + random_edges, + ) = self.__generate_graphs(20, 0, 100, 4) + + for edge in random_edges: + with self.assertRaises(ValueError): + undirected_graph.add_edge(edge[0], edge[1]) + with self.assertRaises(ValueError): + directed_graph.add_edge(edge[0], edge[1]) + + def test_remove_edge_exception_check(self) -> None: + ( + undirected_graph, + directed_graph, + random_vertices, + random_edges, + ) = self.__generate_graphs(20, 0, 100, 4) + + more_random_edges: list[list[int]] = [] + + while len(more_random_edges) != len(random_edges): + edges: list[list[int]] = self.__generate_random_edges(random_vertices, 4) + for edge in edges: + if len(more_random_edges) == len(random_edges): + break + elif edge not in more_random_edges and edge not in random_edges: + more_random_edges.append(edge) + + for edge in more_random_edges: + with self.assertRaises(ValueError): + undirected_graph.remove_edge(edge[0], edge[1]) + with self.assertRaises(ValueError): + directed_graph.remove_edge(edge[0], edge[1]) + + def test_contains_edge_exception_check(self) -> None: + ( + undirected_graph, + directed_graph, + random_vertices, + random_edges, + ) = self.__generate_graphs(20, 0, 100, 4) + + for vertex in random_vertices: + with self.assertRaises(ValueError): + undirected_graph.contains_edge(vertex, 102) + with self.assertRaises(ValueError): + directed_graph.contains_edge(vertex, 102) + + with self.assertRaises(ValueError): + undirected_graph.contains_edge(103, 102) + with self.assertRaises(ValueError): + directed_graph.contains_edge(103, 102) + + +if __name__ == "__main__": + unittest.main() diff --git a/graphs/graph_adjacency_matrix.py b/graphs/graph_adjacency_matrix.py new file mode 100644 index 000000000000..4d2e02f737f9 --- /dev/null +++ b/graphs/graph_adjacency_matrix.py @@ -0,0 +1,608 @@ +#!/usr/bin/env python3 +""" +Author: Vikram Nithyanandam + +Description: +The following implementation is a robust unweighted Graph data structure +implemented using an adjacency matrix. This vertices and edges of this graph can be +effectively initialized and modified while storing your chosen generic +value in each vertex. + +Adjacency Matrix: https://mathworld.wolfram.com/AdjacencyMatrix.html + +Potential Future Ideas: +- Add a flag to set edge weights on and set edge weights +- Make edge weights and vertex values customizable to store whatever the client wants +- Support multigraph functionality if the client wants it +""" +from __future__ import annotations + +import random +import unittest +from pprint import pformat +from typing import Generic, TypeVar + +T = TypeVar("T") + + +class GraphAdjacencyMatrix(Generic[T]): + def __init__( + self, vertices: list[T], edges: list[list[T]], directed: bool = True + ) -> None: + """ + Parameters: + - vertices: (list[T]) The list of vertex names the client wants to + pass in. Default is empty. + - edges: (list[list[T]]) The list of edges the client wants to + pass in. Each edge is a 2-element list. Default is empty. + - directed: (bool) Indicates if graph is directed or undirected. + Default is True. + """ + self.directed = directed + self.vertex_to_index: dict[T, int] = {} + self.adj_matrix: list[list[int]] = [] + + # Falsey checks + edges = edges or [] + vertices = vertices or [] + + for vertex in vertices: + self.add_vertex(vertex) + + for edge in edges: + if len(edge) != 2: + msg = f"Invalid input: {edge} must have length 2." + raise ValueError(msg) + self.add_edge(edge[0], edge[1]) + + def add_edge(self, source_vertex: T, destination_vertex: T) -> None: + """ + Creates an edge from source vertex to destination vertex. If any + given vertex doesn't exist or the edge already exists, a ValueError + will be thrown. + """ + if not ( + self.contains_vertex(source_vertex) + and self.contains_vertex(destination_vertex) + ): + msg = ( + f"Incorrect input: Either {source_vertex} or " + f"{destination_vertex} does not exist" + ) + raise ValueError(msg) + if self.contains_edge(source_vertex, destination_vertex): + msg = ( + "Incorrect input: The edge already exists between " + f"{source_vertex} and {destination_vertex}" + ) + raise ValueError(msg) + + # Get the indices of the corresponding vertices and set their edge value to 1. + u: int = self.vertex_to_index[source_vertex] + v: int = self.vertex_to_index[destination_vertex] + self.adj_matrix[u][v] = 1 + if not self.directed: + self.adj_matrix[v][u] = 1 + + def remove_edge(self, source_vertex: T, destination_vertex: T) -> None: + """ + Removes the edge between the two vertices. If any given vertex + doesn't exist or the edge does not exist, a ValueError will be thrown. + """ + if not ( + self.contains_vertex(source_vertex) + and self.contains_vertex(destination_vertex) + ): + msg = ( + f"Incorrect input: Either {source_vertex} or " + f"{destination_vertex} does not exist" + ) + raise ValueError(msg) + if not self.contains_edge(source_vertex, destination_vertex): + msg = ( + "Incorrect input: The edge does NOT exist between " + f"{source_vertex} and {destination_vertex}" + ) + raise ValueError(msg) + + # Get the indices of the corresponding vertices and set their edge value to 0. + u: int = self.vertex_to_index[source_vertex] + v: int = self.vertex_to_index[destination_vertex] + self.adj_matrix[u][v] = 0 + if not self.directed: + self.adj_matrix[v][u] = 0 + + def add_vertex(self, vertex: T) -> None: + """ + Adds a vertex to the graph. If the given vertex already exists, + a ValueError will be thrown. + """ + if self.contains_vertex(vertex): + msg = f"Incorrect input: {vertex} already exists in this graph." + raise ValueError(msg) + + # build column for vertex + for row in self.adj_matrix: + row.append(0) + + # build row for vertex and update other data structures + self.adj_matrix.append([0] * (len(self.adj_matrix) + 1)) + self.vertex_to_index[vertex] = len(self.adj_matrix) - 1 + + def remove_vertex(self, vertex: T) -> None: + """ + Removes the given vertex from the graph and deletes all incoming and + outgoing edges from the given vertex as well. If the given vertex + does not exist, a ValueError will be thrown. + """ + if not self.contains_vertex(vertex): + msg = f"Incorrect input: {vertex} does not exist in this graph." + raise ValueError(msg) + + # first slide up the rows by deleting the row corresponding to + # the vertex being deleted. + start_index = self.vertex_to_index[vertex] + self.adj_matrix.pop(start_index) + + # next, slide the columns to the left by deleting the values in + # the column corresponding to the vertex being deleted + for lst in self.adj_matrix: + lst.pop(start_index) + + # final clean up + self.vertex_to_index.pop(vertex) + + # decrement indices for vertices shifted by the deleted vertex in the adj matrix + for vertex in self.vertex_to_index: + if self.vertex_to_index[vertex] >= start_index: + self.vertex_to_index[vertex] = self.vertex_to_index[vertex] - 1 + + def contains_vertex(self, vertex: T) -> bool: + """ + Returns True if the graph contains the vertex, False otherwise. + """ + return vertex in self.vertex_to_index + + def contains_edge(self, source_vertex: T, destination_vertex: T) -> bool: + """ + Returns True if the graph contains the edge from the source_vertex to the + destination_vertex, False otherwise. If any given vertex doesn't exist, a + ValueError will be thrown. + """ + if not ( + self.contains_vertex(source_vertex) + and self.contains_vertex(destination_vertex) + ): + msg = ( + f"Incorrect input: Either {source_vertex} " + f"or {destination_vertex} does not exist." + ) + raise ValueError(msg) + + u = self.vertex_to_index[source_vertex] + v = self.vertex_to_index[destination_vertex] + return self.adj_matrix[u][v] == 1 + + def clear_graph(self) -> None: + """ + Clears all vertices and edges. + """ + self.vertex_to_index = {} + self.adj_matrix = [] + + def __repr__(self) -> str: + first = "Adj Matrix:\n" + pformat(self.adj_matrix) + second = "\nVertex to index mapping:\n" + pformat(self.vertex_to_index) + return first + second + + +class TestGraphMatrix(unittest.TestCase): + def __assert_graph_edge_exists_check( + self, + undirected_graph: GraphAdjacencyMatrix, + directed_graph: GraphAdjacencyMatrix, + edge: list[int], + ) -> None: + self.assertTrue(undirected_graph.contains_edge(edge[0], edge[1])) + self.assertTrue(undirected_graph.contains_edge(edge[1], edge[0])) + self.assertTrue(directed_graph.contains_edge(edge[0], edge[1])) + + def __assert_graph_edge_does_not_exist_check( + self, + undirected_graph: GraphAdjacencyMatrix, + directed_graph: GraphAdjacencyMatrix, + edge: list[int], + ) -> None: + self.assertFalse(undirected_graph.contains_edge(edge[0], edge[1])) + self.assertFalse(undirected_graph.contains_edge(edge[1], edge[0])) + self.assertFalse(directed_graph.contains_edge(edge[0], edge[1])) + + def __assert_graph_vertex_exists_check( + self, + undirected_graph: GraphAdjacencyMatrix, + directed_graph: GraphAdjacencyMatrix, + vertex: int, + ) -> None: + self.assertTrue(undirected_graph.contains_vertex(vertex)) + self.assertTrue(directed_graph.contains_vertex(vertex)) + + def __assert_graph_vertex_does_not_exist_check( + self, + undirected_graph: GraphAdjacencyMatrix, + directed_graph: GraphAdjacencyMatrix, + vertex: int, + ) -> None: + self.assertFalse(undirected_graph.contains_vertex(vertex)) + self.assertFalse(directed_graph.contains_vertex(vertex)) + + def __generate_random_edges( + self, vertices: list[int], edge_pick_count: int + ) -> list[list[int]]: + self.assertTrue(edge_pick_count <= len(vertices)) + + random_source_vertices: list[int] = random.sample( + vertices[0 : int(len(vertices) / 2)], edge_pick_count + ) + random_destination_vertices: list[int] = random.sample( + vertices[int(len(vertices) / 2) :], edge_pick_count + ) + random_edges: list[list[int]] = [] + + for source in random_source_vertices: + for dest in random_destination_vertices: + random_edges.append([source, dest]) + + return random_edges + + def __generate_graphs( + self, vertex_count: int, min_val: int, max_val: int, edge_pick_count: int + ) -> tuple[GraphAdjacencyMatrix, GraphAdjacencyMatrix, list[int], list[list[int]]]: + if max_val - min_val + 1 < vertex_count: + raise ValueError( + "Will result in duplicate vertices. Either increase " + "range between min_val and max_val or decrease vertex count" + ) + + # generate graph input + random_vertices: list[int] = random.sample( + range(min_val, max_val + 1), vertex_count + ) + random_edges: list[list[int]] = self.__generate_random_edges( + random_vertices, edge_pick_count + ) + + # build graphs + undirected_graph = GraphAdjacencyMatrix( + vertices=random_vertices, edges=random_edges, directed=False + ) + directed_graph = GraphAdjacencyMatrix( + vertices=random_vertices, edges=random_edges, directed=True + ) + + return undirected_graph, directed_graph, random_vertices, random_edges + + def test_init_check(self) -> None: + ( + undirected_graph, + directed_graph, + random_vertices, + random_edges, + ) = self.__generate_graphs(20, 0, 100, 4) + + # test graph initialization with vertices and edges + for num in random_vertices: + self.__assert_graph_vertex_exists_check( + undirected_graph, directed_graph, num + ) + + for edge in random_edges: + self.__assert_graph_edge_exists_check( + undirected_graph, directed_graph, edge + ) + + self.assertFalse(undirected_graph.directed) + self.assertTrue(directed_graph.directed) + + def test_contains_vertex(self) -> None: + random_vertices: list[int] = random.sample(range(101), 20) + + # Build graphs WITHOUT edges + undirected_graph = GraphAdjacencyMatrix( + vertices=random_vertices, edges=[], directed=False + ) + directed_graph = GraphAdjacencyMatrix( + vertices=random_vertices, edges=[], directed=True + ) + + # Test contains_vertex + for num in range(101): + self.assertEqual( + num in random_vertices, undirected_graph.contains_vertex(num) + ) + self.assertEqual( + num in random_vertices, directed_graph.contains_vertex(num) + ) + + def test_add_vertices(self) -> None: + random_vertices: list[int] = random.sample(range(101), 20) + + # build empty graphs + undirected_graph: GraphAdjacencyMatrix = GraphAdjacencyMatrix( + vertices=[], edges=[], directed=False + ) + directed_graph: GraphAdjacencyMatrix = GraphAdjacencyMatrix( + vertices=[], edges=[], directed=True + ) + + # run add_vertex + for num in random_vertices: + undirected_graph.add_vertex(num) + + for num in random_vertices: + directed_graph.add_vertex(num) + + # test add_vertex worked + for num in random_vertices: + self.__assert_graph_vertex_exists_check( + undirected_graph, directed_graph, num + ) + + def test_remove_vertices(self) -> None: + random_vertices: list[int] = random.sample(range(101), 20) + + # build graphs WITHOUT edges + undirected_graph = GraphAdjacencyMatrix( + vertices=random_vertices, edges=[], directed=False + ) + directed_graph = GraphAdjacencyMatrix( + vertices=random_vertices, edges=[], directed=True + ) + + # test remove_vertex worked + for num in random_vertices: + self.__assert_graph_vertex_exists_check( + undirected_graph, directed_graph, num + ) + + undirected_graph.remove_vertex(num) + directed_graph.remove_vertex(num) + + self.__assert_graph_vertex_does_not_exist_check( + undirected_graph, directed_graph, num + ) + + def test_add_and_remove_vertices_repeatedly(self) -> None: + random_vertices1: list[int] = random.sample(range(51), 20) + random_vertices2: list[int] = random.sample(range(51, 101), 20) + + # build graphs WITHOUT edges + undirected_graph = GraphAdjacencyMatrix( + vertices=random_vertices1, edges=[], directed=False + ) + directed_graph = GraphAdjacencyMatrix( + vertices=random_vertices1, edges=[], directed=True + ) + + # test adding and removing vertices + for i, _ in enumerate(random_vertices1): + undirected_graph.add_vertex(random_vertices2[i]) + directed_graph.add_vertex(random_vertices2[i]) + + self.__assert_graph_vertex_exists_check( + undirected_graph, directed_graph, random_vertices2[i] + ) + + undirected_graph.remove_vertex(random_vertices1[i]) + directed_graph.remove_vertex(random_vertices1[i]) + + self.__assert_graph_vertex_does_not_exist_check( + undirected_graph, directed_graph, random_vertices1[i] + ) + + # remove all vertices + for i, _ in enumerate(random_vertices1): + undirected_graph.remove_vertex(random_vertices2[i]) + directed_graph.remove_vertex(random_vertices2[i]) + + self.__assert_graph_vertex_does_not_exist_check( + undirected_graph, directed_graph, random_vertices2[i] + ) + + def test_contains_edge(self) -> None: + # generate graphs and graph input + vertex_count = 20 + ( + undirected_graph, + directed_graph, + random_vertices, + random_edges, + ) = self.__generate_graphs(vertex_count, 0, 100, 4) + + # generate all possible edges for testing + all_possible_edges: list[list[int]] = [] + for i in range(vertex_count - 1): + for j in range(i + 1, vertex_count): + all_possible_edges.append([random_vertices[i], random_vertices[j]]) + all_possible_edges.append([random_vertices[j], random_vertices[i]]) + + # test contains_edge function + for edge in all_possible_edges: + if edge in random_edges: + self.__assert_graph_edge_exists_check( + undirected_graph, directed_graph, edge + ) + elif [edge[1], edge[0]] in random_edges: + # since this edge exists for undirected but the reverse may + # not exist for directed + self.__assert_graph_edge_exists_check( + undirected_graph, directed_graph, [edge[1], edge[0]] + ) + else: + self.__assert_graph_edge_does_not_exist_check( + undirected_graph, directed_graph, edge + ) + + def test_add_edge(self) -> None: + # generate graph input + random_vertices: list[int] = random.sample(range(101), 15) + random_edges: list[list[int]] = self.__generate_random_edges(random_vertices, 4) + + # build graphs WITHOUT edges + undirected_graph = GraphAdjacencyMatrix( + vertices=random_vertices, edges=[], directed=False + ) + directed_graph = GraphAdjacencyMatrix( + vertices=random_vertices, edges=[], directed=True + ) + + # run and test add_edge + for edge in random_edges: + undirected_graph.add_edge(edge[0], edge[1]) + directed_graph.add_edge(edge[0], edge[1]) + self.__assert_graph_edge_exists_check( + undirected_graph, directed_graph, edge + ) + + def test_remove_edge(self) -> None: + # generate graph input and graphs + ( + undirected_graph, + directed_graph, + random_vertices, + random_edges, + ) = self.__generate_graphs(20, 0, 100, 4) + + # run and test remove_edge + for edge in random_edges: + self.__assert_graph_edge_exists_check( + undirected_graph, directed_graph, edge + ) + undirected_graph.remove_edge(edge[0], edge[1]) + directed_graph.remove_edge(edge[0], edge[1]) + self.__assert_graph_edge_does_not_exist_check( + undirected_graph, directed_graph, edge + ) + + def test_add_and_remove_edges_repeatedly(self) -> None: + ( + undirected_graph, + directed_graph, + random_vertices, + random_edges, + ) = self.__generate_graphs(20, 0, 100, 4) + + # make some more edge options! + more_random_edges: list[list[int]] = [] + + while len(more_random_edges) != len(random_edges): + edges: list[list[int]] = self.__generate_random_edges(random_vertices, 4) + for edge in edges: + if len(more_random_edges) == len(random_edges): + break + elif edge not in more_random_edges and edge not in random_edges: + more_random_edges.append(edge) + + for i, _ in enumerate(random_edges): + undirected_graph.add_edge(more_random_edges[i][0], more_random_edges[i][1]) + directed_graph.add_edge(more_random_edges[i][0], more_random_edges[i][1]) + + self.__assert_graph_edge_exists_check( + undirected_graph, directed_graph, more_random_edges[i] + ) + + undirected_graph.remove_edge(random_edges[i][0], random_edges[i][1]) + directed_graph.remove_edge(random_edges[i][0], random_edges[i][1]) + + self.__assert_graph_edge_does_not_exist_check( + undirected_graph, directed_graph, random_edges[i] + ) + + def test_add_vertex_exception_check(self) -> None: + ( + undirected_graph, + directed_graph, + random_vertices, + random_edges, + ) = self.__generate_graphs(20, 0, 100, 4) + + for vertex in random_vertices: + with self.assertRaises(ValueError): + undirected_graph.add_vertex(vertex) + with self.assertRaises(ValueError): + directed_graph.add_vertex(vertex) + + def test_remove_vertex_exception_check(self) -> None: + ( + undirected_graph, + directed_graph, + random_vertices, + random_edges, + ) = self.__generate_graphs(20, 0, 100, 4) + + for i in range(101): + if i not in random_vertices: + with self.assertRaises(ValueError): + undirected_graph.remove_vertex(i) + with self.assertRaises(ValueError): + directed_graph.remove_vertex(i) + + def test_add_edge_exception_check(self) -> None: + ( + undirected_graph, + directed_graph, + random_vertices, + random_edges, + ) = self.__generate_graphs(20, 0, 100, 4) + + for edge in random_edges: + with self.assertRaises(ValueError): + undirected_graph.add_edge(edge[0], edge[1]) + with self.assertRaises(ValueError): + directed_graph.add_edge(edge[0], edge[1]) + + def test_remove_edge_exception_check(self) -> None: + ( + undirected_graph, + directed_graph, + random_vertices, + random_edges, + ) = self.__generate_graphs(20, 0, 100, 4) + + more_random_edges: list[list[int]] = [] + + while len(more_random_edges) != len(random_edges): + edges: list[list[int]] = self.__generate_random_edges(random_vertices, 4) + for edge in edges: + if len(more_random_edges) == len(random_edges): + break + elif edge not in more_random_edges and edge not in random_edges: + more_random_edges.append(edge) + + for edge in more_random_edges: + with self.assertRaises(ValueError): + undirected_graph.remove_edge(edge[0], edge[1]) + with self.assertRaises(ValueError): + directed_graph.remove_edge(edge[0], edge[1]) + + def test_contains_edge_exception_check(self) -> None: + ( + undirected_graph, + directed_graph, + random_vertices, + random_edges, + ) = self.__generate_graphs(20, 0, 100, 4) + + for vertex in random_vertices: + with self.assertRaises(ValueError): + undirected_graph.contains_edge(vertex, 102) + with self.assertRaises(ValueError): + directed_graph.contains_edge(vertex, 102) + + with self.assertRaises(ValueError): + undirected_graph.contains_edge(103, 102) + with self.assertRaises(ValueError): + directed_graph.contains_edge(103, 102) + + +if __name__ == "__main__": + unittest.main() diff --git a/graphs/graph_matrix.py b/graphs/graph_matrix.py deleted file mode 100644 index 4adc6c0bb93b..000000000000 --- a/graphs/graph_matrix.py +++ /dev/null @@ -1,24 +0,0 @@ -class Graph: - def __init__(self, vertex): - self.vertex = vertex - self.graph = [[0] * vertex for i in range(vertex)] - - def add_edge(self, u, v): - self.graph[u - 1][v - 1] = 1 - self.graph[v - 1][u - 1] = 1 - - def show(self): - for i in self.graph: - for j in i: - print(j, end=" ") - print(" ") - - -g = Graph(100) - -g.add_edge(1, 4) -g.add_edge(4, 2) -g.add_edge(4, 5) -g.add_edge(2, 5) -g.add_edge(5, 3) -g.show() diff --git a/graphs/tests/__init__.py b/graphs/tests/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 From 3a9e5fa5ecea0df54ed3ffdcb74f46171199f552 Mon Sep 17 00:00:00 2001 From: Chris O <46587501+ChrisO345@users.noreply.github.com> Date: Fri, 2 Jun 2023 17:14:25 +1200 Subject: [PATCH 0855/1543] Create a Simultaneous Equation Solver Algorithm (#8773) * Added simultaneous_linear_equation_solver.py * Removed Augment class, replaced with recursive functions * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fixed edge cases * Update settings.json --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- .vscode/settings.json | 5 + maths/simultaneous_linear_equation_solver.py | 142 +++++++++++++++++++ 2 files changed, 147 insertions(+) create mode 100644 .vscode/settings.json create mode 100644 maths/simultaneous_linear_equation_solver.py diff --git a/.vscode/settings.json b/.vscode/settings.json new file mode 100644 index 000000000000..ef16fa1aa7ac --- /dev/null +++ b/.vscode/settings.json @@ -0,0 +1,5 @@ +{ + "githubPullRequests.ignoredPullRequestBranches": [ + "master" + ] +} diff --git a/maths/simultaneous_linear_equation_solver.py b/maths/simultaneous_linear_equation_solver.py new file mode 100644 index 000000000000..1287b2002d00 --- /dev/null +++ b/maths/simultaneous_linear_equation_solver.py @@ -0,0 +1,142 @@ +""" +https://en.wikipedia.org/wiki/Augmented_matrix + +This algorithm solves simultaneous linear equations of the form +λa + λb + λc + λd + ... = γ as [λ, λ, λ, λ, ..., γ] +Where λ & γ are individual coefficients, the no. of equations = no. of coefficients - 1 + +Note in order to work there must exist 1 equation where all instances of λ and γ != 0 +""" + + +def simplify(current_set: list[list]) -> list[list]: + """ + >>> simplify([[1, 2, 3], [4, 5, 6]]) + [[1.0, 2.0, 3.0], [0.0, 0.75, 1.5]] + >>> simplify([[5, 2, 5], [5, 1, 10]]) + [[1.0, 0.4, 1.0], [0.0, 0.2, -1.0]] + """ + # Divide each row by magnitude of first term --> creates 'unit' matrix + duplicate_set = current_set.copy() + for row_index, row in enumerate(duplicate_set): + magnitude = row[0] + for column_index, column in enumerate(row): + if magnitude == 0: + current_set[row_index][column_index] = column + continue + current_set[row_index][column_index] = column / magnitude + # Subtract to cancel term + first_row = current_set[0] + final_set = [first_row] + current_set = current_set[1::] + for row in current_set: + temp_row = [] + # If first term is 0, it is already in form we want, so we preserve it + if row[0] == 0: + final_set.append(row) + continue + for column_index in range(len(row)): + temp_row.append(first_row[column_index] - row[column_index]) + final_set.append(temp_row) + # Create next recursion iteration set + if len(final_set[0]) != 3: + current_first_row = final_set[0] + current_first_column = [] + next_iteration = [] + for row in final_set[1::]: + current_first_column.append(row[0]) + next_iteration.append(row[1::]) + resultant = simplify(next_iteration) + for i in range(len(resultant)): + resultant[i].insert(0, current_first_column[i]) + resultant.insert(0, current_first_row) + final_set = resultant + return final_set + + +def solve_simultaneous(equations: list[list]) -> list: + """ + >>> solve_simultaneous([[1, 2, 3],[4, 5, 6]]) + [-1.0, 2.0] + >>> solve_simultaneous([[0, -3, 1, 7],[3, 2, -1, 11],[5, 1, -2, 12]]) + [6.4, 1.2, 10.6] + >>> solve_simultaneous([]) + Traceback (most recent call last): + ... + IndexError: solve_simultaneous() requires n lists of length n+1 + >>> solve_simultaneous([[1, 2, 3],[1, 2]]) + Traceback (most recent call last): + ... + IndexError: solve_simultaneous() requires n lists of length n+1 + >>> solve_simultaneous([[1, 2, 3],["a", 7, 8]]) + Traceback (most recent call last): + ... + ValueError: solve_simultaneous() requires lists of integers + >>> solve_simultaneous([[0, 2, 3],[4, 0, 6]]) + Traceback (most recent call last): + ... + ValueError: solve_simultaneous() requires at least 1 full equation + """ + if len(equations) == 0: + raise IndexError("solve_simultaneous() requires n lists of length n+1") + _length = len(equations) + 1 + if any(len(item) != _length for item in equations): + raise IndexError("solve_simultaneous() requires n lists of length n+1") + for row in equations: + if any(not isinstance(column, (int, float)) for column in row): + raise ValueError("solve_simultaneous() requires lists of integers") + if len(equations) == 1: + return [equations[0][-1] / equations[0][0]] + data_set = equations.copy() + if any(0 in row for row in data_set): + temp_data = data_set.copy() + full_row = [] + for row_index, row in enumerate(temp_data): + if 0 not in row: + full_row = data_set.pop(row_index) + break + if not full_row: + raise ValueError("solve_simultaneous() requires at least 1 full equation") + data_set.insert(0, full_row) + useable_form = data_set.copy() + simplified = simplify(useable_form) + simplified = simplified[::-1] + solutions: list = [] + for row in simplified: + current_solution = row[-1] + if not solutions: + if row[-2] == 0: + solutions.append(0) + continue + solutions.append(current_solution / row[-2]) + continue + temp_row = row.copy()[: len(row) - 1 :] + while temp_row[0] == 0: + temp_row.pop(0) + if len(temp_row) == 0: + solutions.append(0) + continue + temp_row = temp_row[1::] + temp_row = temp_row[::-1] + for column_index, column in enumerate(temp_row): + current_solution -= column * solutions[column_index] + solutions.append(current_solution) + final = [] + for item in solutions: + final.append(float(round(item, 5))) + return final[::-1] + + +if __name__ == "__main__": + import doctest + + doctest.testmod() + eq = [ + [2, 1, 1, 1, 1, 4], + [1, 2, 1, 1, 1, 5], + [1, 1, 2, 1, 1, 6], + [1, 1, 1, 2, 1, 7], + [1, 1, 1, 1, 2, 8], + ] + print(solve_simultaneous(eq)) + print(solve_simultaneous([[4, 2]])) From 80d95fccc390d366a9f617d8628a546a7be7b2a3 Mon Sep 17 00:00:00 2001 From: Caeden Perelli-Harris Date: Sat, 3 Jun 2023 17:16:33 +0100 Subject: [PATCH 0856/1543] Pytest locally fails due to API_KEY env variable (#8738) * fix: Pytest locally fails due to API_KEY env variable (#8737) * chore: Fix ruff errors --- web_programming/currency_converter.py | 19 ++++++++++++------- 1 file changed, 12 insertions(+), 7 deletions(-) diff --git a/web_programming/currency_converter.py b/web_programming/currency_converter.py index 69f2a2c4d421..3bbcafa8f89b 100644 --- a/web_programming/currency_converter.py +++ b/web_programming/currency_converter.py @@ -8,13 +8,7 @@ import requests URL_BASE = "https://www.amdoren.com/api/currency.php" -TESTING = os.getenv("CI", "") -API_KEY = os.getenv("AMDOREN_API_KEY", "") -if not API_KEY and not TESTING: - raise KeyError( - "API key must be provided in the 'AMDOREN_API_KEY' environment variable." - ) # Currency and their description list_of_currencies = """ @@ -175,20 +169,31 @@ def convert_currency( - from_: str = "USD", to: str = "INR", amount: float = 1.0, api_key: str = API_KEY + from_: str = "USD", to: str = "INR", amount: float = 1.0, api_key: str = "" ) -> str: """https://www.amdoren.com/currency-api/""" + # Instead of manually generating parameters params = locals() + # from is a reserved keyword params["from"] = params.pop("from_") res = requests.get(URL_BASE, params=params).json() return str(res["amount"]) if res["error"] == 0 else res["error_message"] if __name__ == "__main__": + TESTING = os.getenv("CI", "") + API_KEY = os.getenv("AMDOREN_API_KEY", "") + + if not API_KEY and not TESTING: + raise KeyError( + "API key must be provided in the 'AMDOREN_API_KEY' environment variable." + ) + print( convert_currency( input("Enter from currency: ").strip(), input("Enter to currency: ").strip(), float(input("Enter the amount: ").strip()), + API_KEY, ) ) From fa12b9a286bf42d250b30a772e8f226dc14953f4 Mon Sep 17 00:00:00 2001 From: ShivaDahal99 <130563462+ShivaDahal99@users.noreply.github.com> Date: Wed, 7 Jun 2023 23:47:27 +0200 Subject: [PATCH 0857/1543] Speed of sound (#8803) * Create TestShiva * Delete TestShiva * Add speed of sound * Update physics/speed_of_sound.py Co-authored-by: Christian Clauss * Update physics/speed_of_sound.py Co-authored-by: Christian Clauss * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update speed_of_sound.py * Update speed_of_sound.py --------- Co-authored-by: jlhuhn <134317018+jlhuhn@users.noreply.github.com> Co-authored-by: Christian Clauss Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- physics/speed_of_sound.py | 52 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 52 insertions(+) create mode 100644 physics/speed_of_sound.py diff --git a/physics/speed_of_sound.py b/physics/speed_of_sound.py new file mode 100644 index 000000000000..a4658366a36c --- /dev/null +++ b/physics/speed_of_sound.py @@ -0,0 +1,52 @@ +""" +Title : Calculating the speed of sound + +Description : + The speed of sound (c) is the speed that a sound wave travels + per unit time (m/s). During propagation, the sound wave propagates + through an elastic medium. Its SI unit is meter per second (m/s). + + Only longitudinal waves can propagate in liquids and gas other then + solid where they also travel in transverse wave. The following Algo- + rithem calculates the speed of sound in fluid depanding on the bulk + module and the density of the fluid. + + Equation for calculating speed od sound in fluid: + c_fluid = (K_s*p)**0.5 + + c_fluid: speed of sound in fluid + K_s: isentropic bulk modulus + p: density of fluid + + + +Source : https://en.wikipedia.org/wiki/Speed_of_sound +""" + + +def speed_of_sound_in_a_fluid(density: float, bulk_modulus: float) -> float: + """ + This method calculates the speed of sound in fluid - + This is calculated from the other two provided values + Examples: + Example 1 --> Water 20°C: bulk_moduls= 2.15MPa, density=998kg/m³ + Example 2 --> Murcery 20°: bulk_moduls= 28.5MPa, density=13600kg/m³ + + >>> speed_of_sound_in_a_fluid(bulk_modulus=2.15*10**9, density=998) + 1467.7563207952705 + >>> speed_of_sound_in_a_fluid(bulk_modulus=28.5*10**9, density=13600) + 1447.614670861731 + """ + + if density <= 0: + raise ValueError("Impossible fluid density") + if bulk_modulus <= 0: + raise ValueError("Impossible bulk modulus") + + return (bulk_modulus / density) ** 0.5 + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From 7775de0ef779a28cec7d9f28af97a89b2bc29d7e Mon Sep 17 00:00:00 2001 From: Caeden Perelli-Harris Date: Thu, 8 Jun 2023 13:40:38 +0100 Subject: [PATCH 0858/1543] Create number container system algorithm (#8808) * feat: Create number container system algorithm * updating DIRECTORY.md * chore: Fix failing tests * Update other/number_container_system.py Co-authored-by: Christian Clauss * Update other/number_container_system.py Co-authored-by: Christian Clauss * Update other/number_container_system.py Co-authored-by: Christian Clauss * chore: Add more tests * chore: Create binary_search_insert failing test * type: Update typehints to accept str, list and range --------- Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> Co-authored-by: Christian Clauss --- DIRECTORY.md | 6 +- other/number_container_system.py | 180 +++++++++++++++++++++++++++++++ 2 files changed, 185 insertions(+), 1 deletion(-) create mode 100644 other/number_container_system.py diff --git a/DIRECTORY.md b/DIRECTORY.md index 231b0e2f1d2f..6dac4a9a5783 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -419,8 +419,9 @@ * [Frequent Pattern Graph Miner](graphs/frequent_pattern_graph_miner.py) * [G Topological Sort](graphs/g_topological_sort.py) * [Gale Shapley Bigraph](graphs/gale_shapley_bigraph.py) + * [Graph Adjacency List](graphs/graph_adjacency_list.py) + * [Graph Adjacency Matrix](graphs/graph_adjacency_matrix.py) * [Graph List](graphs/graph_list.py) - * [Graph Matrix](graphs/graph_matrix.py) * [Graphs Floyd Warshall](graphs/graphs_floyd_warshall.py) * [Greedy Best First](graphs/greedy_best_first.py) * [Greedy Min Vertex Cover](graphs/greedy_min_vertex_cover.py) @@ -479,6 +480,7 @@ * [Lib](linear_algebra/src/lib.py) * [Polynom For Points](linear_algebra/src/polynom_for_points.py) * [Power Iteration](linear_algebra/src/power_iteration.py) + * [Rank Of Matrix](linear_algebra/src/rank_of_matrix.py) * [Rayleigh Quotient](linear_algebra/src/rayleigh_quotient.py) * [Schur Complement](linear_algebra/src/schur_complement.py) * [Test Linear Algebra](linear_algebra/src/test_linear_algebra.py) @@ -651,6 +653,7 @@ * [Sigmoid Linear Unit](maths/sigmoid_linear_unit.py) * [Signum](maths/signum.py) * [Simpson Rule](maths/simpson_rule.py) + * [Simultaneous Linear Equation Solver](maths/simultaneous_linear_equation_solver.py) * [Sin](maths/sin.py) * [Sock Merchant](maths/sock_merchant.py) * [Softmax](maths/softmax.py) @@ -726,6 +729,7 @@ * [Maximum Subarray](other/maximum_subarray.py) * [Maximum Subsequence](other/maximum_subsequence.py) * [Nested Brackets](other/nested_brackets.py) + * [Number Container System](other/number_container_system.py) * [Password](other/password.py) * [Quine](other/quine.py) * [Scoring Algorithm](other/scoring_algorithm.py) diff --git a/other/number_container_system.py b/other/number_container_system.py new file mode 100644 index 000000000000..f547bc8a229e --- /dev/null +++ b/other/number_container_system.py @@ -0,0 +1,180 @@ +""" +A number container system that uses binary search to delete and insert values into +arrays with O(n logn) write times and O(1) read times. + +This container system holds integers at indexes. + +Further explained in this leetcode problem +> https://leetcode.com/problems/minimum-cost-tree-from-leaf-values +""" + + +class NumberContainer: + def __init__(self) -> None: + # numbermap keys are the number and its values are lists of indexes sorted + # in ascending order + self.numbermap: dict[int, list[int]] = {} + # indexmap keys are an index and it's values are the number at that index + self.indexmap: dict[int, int] = {} + + def binary_search_delete(self, array: list | str | range, item: int) -> list[int]: + """ + Removes the item from the sorted array and returns + the new array. + + >>> NumberContainer().binary_search_delete([1,2,3], 2) + [1, 3] + >>> NumberContainer().binary_search_delete([0, 0, 0], 0) + [0, 0] + >>> NumberContainer().binary_search_delete([-1, -1, -1], -1) + [-1, -1] + >>> NumberContainer().binary_search_delete([-1, 0], 0) + [-1] + >>> NumberContainer().binary_search_delete([-1, 0], -1) + [0] + >>> NumberContainer().binary_search_delete(range(7), 3) + [0, 1, 2, 4, 5, 6] + >>> NumberContainer().binary_search_delete([1.1, 2.2, 3.3], 2.2) + [1.1, 3.3] + >>> NumberContainer().binary_search_delete("abcde", "c") + ['a', 'b', 'd', 'e'] + >>> NumberContainer().binary_search_delete([0, -1, 2, 4], 0) + Traceback (most recent call last): + ... + ValueError: Either the item is not in the array or the array was unsorted + >>> NumberContainer().binary_search_delete([2, 0, 4, -1, 11], -1) + Traceback (most recent call last): + ... + ValueError: Either the item is not in the array or the array was unsorted + >>> NumberContainer().binary_search_delete(125, 1) + Traceback (most recent call last): + ... + TypeError: binary_search_delete() only accepts either a list, range or str + """ + if isinstance(array, (range, str)): + array = list(array) + elif not isinstance(array, list): + raise TypeError( + "binary_search_delete() only accepts either a list, range or str" + ) + + low = 0 + high = len(array) - 1 + + while low <= high: + mid = (low + high) // 2 + if array[mid] == item: + array.pop(mid) + return array + elif array[mid] < item: + low = mid + 1 + else: + high = mid - 1 + raise ValueError( + "Either the item is not in the array or the array was unsorted" + ) + + def binary_search_insert(self, array: list | str | range, index: int) -> list[int]: + """ + Inserts the index into the sorted array + at the correct position. + + >>> NumberContainer().binary_search_insert([1,2,3], 2) + [1, 2, 2, 3] + >>> NumberContainer().binary_search_insert([0,1,3], 2) + [0, 1, 2, 3] + >>> NumberContainer().binary_search_insert([-5, -3, 0, 0, 11, 103], 51) + [-5, -3, 0, 0, 11, 51, 103] + >>> NumberContainer().binary_search_insert([-5, -3, 0, 0, 11, 100, 103], 101) + [-5, -3, 0, 0, 11, 100, 101, 103] + >>> NumberContainer().binary_search_insert(range(10), 4) + [0, 1, 2, 3, 4, 4, 5, 6, 7, 8, 9] + >>> NumberContainer().binary_search_insert("abd", "c") + ['a', 'b', 'c', 'd'] + >>> NumberContainer().binary_search_insert(131, 23) + Traceback (most recent call last): + ... + TypeError: binary_search_insert() only accepts either a list, range or str + """ + if isinstance(array, (range, str)): + array = list(array) + elif not isinstance(array, list): + raise TypeError( + "binary_search_insert() only accepts either a list, range or str" + ) + + low = 0 + high = len(array) - 1 + + while low <= high: + mid = (low + high) // 2 + if array[mid] == index: + # If the item already exists in the array, + # insert it after the existing item + array.insert(mid + 1, index) + return array + elif array[mid] < index: + low = mid + 1 + else: + high = mid - 1 + + # If the item doesn't exist in the array, insert it at the appropriate position + array.insert(low, index) + return array + + def change(self, index: int, number: int) -> None: + """ + Changes (sets) the index as number + + >>> cont = NumberContainer() + >>> cont.change(0, 10) + >>> cont.change(0, 20) + >>> cont.change(-13, 20) + >>> cont.change(-100030, 20032903290) + """ + # Remove previous index + if index in self.indexmap: + n = self.indexmap[index] + if len(self.numbermap[n]) == 1: + del self.numbermap[n] + else: + self.numbermap[n] = self.binary_search_delete(self.numbermap[n], index) + + # Set new index + self.indexmap[index] = number + + # Number not seen before or empty so insert number value + if number not in self.numbermap: + self.numbermap[number] = [index] + + # Here we need to perform a binary search insertion in order to insert + # The item in the correct place + else: + self.numbermap[number] = self.binary_search_insert( + self.numbermap[number], index + ) + + def find(self, number: int) -> int: + """ + Returns the smallest index where the number is. + + >>> cont = NumberContainer() + >>> cont.find(10) + -1 + >>> cont.change(0, 10) + >>> cont.find(10) + 0 + >>> cont.change(0, 20) + >>> cont.find(10) + -1 + >>> cont.find(20) + 0 + """ + # Simply return the 0th index (smallest) of the indexes found (or -1) + return self.numbermap.get(number, [-1])[0] + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From 9c9da8ebf1d35ae40ac5438c05cc273f7c6d4473 Mon Sep 17 00:00:00 2001 From: Jan Wojciechowski <96974442+yanvoi@users.noreply.github.com> Date: Fri, 9 Jun 2023 11:06:37 +0200 Subject: [PATCH 0859/1543] Improve readability of ciphers/mixed_keyword_cypher.py (#8626) * refactored the code * the code will now pass the test * looked more into it and fixed the logic * made the code easier to read, added comments and fixed the logic * got rid of redundant code + plaintext can contain chars that are not in the alphabet * fixed the reduntant conversion of ascii_uppercase to a list * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * keyword and plaintext won't have default values * ran the ruff command * Update linear_discriminant_analysis.py and rsa_cipher.py (#8680) * Update rsa_cipher.py by replacing %s with {} * Update rsa_cipher.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update linear_discriminant_analysis.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update linear_discriminant_analysis.py * Update linear_discriminant_analysis.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update linear_discriminant_analysis.py * Update linear_discriminant_analysis.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update linear_discriminant_analysis.py * Update machine_learning/linear_discriminant_analysis.py Co-authored-by: Christian Clauss * Update linear_discriminant_analysis.py * updated --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss * fixed some difficulties * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * added comments, made printing mapping optional, added 1 test * shortened the line that was too long * Update ciphers/mixed_keyword_cypher.py Co-authored-by: Tianyi Zheng --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Rohan Anand <96521078+rohan472000@users.noreply.github.com> Co-authored-by: Christian Clauss Co-authored-by: Tianyi Zheng --- ciphers/mixed_keyword_cypher.py | 100 +++++++++++++++++--------------- 1 file changed, 53 insertions(+), 47 deletions(-) diff --git a/ciphers/mixed_keyword_cypher.py b/ciphers/mixed_keyword_cypher.py index 93a0e3acb7b1..b984808fced6 100644 --- a/ciphers/mixed_keyword_cypher.py +++ b/ciphers/mixed_keyword_cypher.py @@ -1,7 +1,11 @@ -def mixed_keyword(key: str = "college", pt: str = "UNIVERSITY") -> str: - """ +from string import ascii_uppercase + - For key:hello +def mixed_keyword( + keyword: str, plaintext: str, verbose: bool = False, alphabet: str = ascii_uppercase +) -> str: + """ + For keyword: hello H E L O A B C D @@ -12,58 +16,60 @@ def mixed_keyword(key: str = "college", pt: str = "UNIVERSITY") -> str: Y Z and map vertically - >>> mixed_keyword("college", "UNIVERSITY") # doctest: +NORMALIZE_WHITESPACE + >>> mixed_keyword("college", "UNIVERSITY", True) # doctest: +NORMALIZE_WHITESPACE {'A': 'C', 'B': 'A', 'C': 'I', 'D': 'P', 'E': 'U', 'F': 'Z', 'G': 'O', 'H': 'B', 'I': 'J', 'J': 'Q', 'K': 'V', 'L': 'L', 'M': 'D', 'N': 'K', 'O': 'R', 'P': 'W', 'Q': 'E', 'R': 'F', 'S': 'M', 'T': 'S', 'U': 'X', 'V': 'G', 'W': 'H', 'X': 'N', 'Y': 'T', 'Z': 'Y'} 'XKJGUFMJST' + + >>> mixed_keyword("college", "UNIVERSITY", False) # doctest: +NORMALIZE_WHITESPACE + 'XKJGUFMJST' """ - key = key.upper() - pt = pt.upper() - temp = [] - for i in key: - if i not in temp: - temp.append(i) - len_temp = len(temp) - # print(temp) - alpha = [] - modalpha = [] - for j in range(65, 91): - t = chr(j) - alpha.append(t) - if t not in temp: - temp.append(t) - # print(temp) - r = int(26 / 4) - # print(r) - k = 0 - for _ in range(r): - s = [] - for _ in range(len_temp): - s.append(temp[k]) - if k >= 25: - break - k += 1 - modalpha.append(s) - # print(modalpha) - d = {} - j = 0 - k = 0 - for j in range(len_temp): - for m in modalpha: - if not len(m) - 1 >= j: - break - d[alpha[k]] = m[j] - if not k < 25: + keyword = keyword.upper() + plaintext = plaintext.upper() + alphabet_set = set(alphabet) + + # create a list of unique characters in the keyword - their order matters + # it determines how we will map plaintext characters to the ciphertext + unique_chars = [] + for char in keyword: + if char in alphabet_set and char not in unique_chars: + unique_chars.append(char) + # the number of those unique characters will determine the number of rows + num_unique_chars_in_keyword = len(unique_chars) + + # create a shifted version of the alphabet + shifted_alphabet = unique_chars + [ + char for char in alphabet if char not in unique_chars + ] + + # create a modified alphabet by splitting the shifted alphabet into rows + modified_alphabet = [ + shifted_alphabet[k : k + num_unique_chars_in_keyword] + for k in range(0, 26, num_unique_chars_in_keyword) + ] + + # map the alphabet characters to the modified alphabet characters + # going 'vertically' through the modified alphabet - consider columns first + mapping = {} + letter_index = 0 + for column in range(num_unique_chars_in_keyword): + for row in modified_alphabet: + # if current row (the last one) is too short, break out of loop + if len(row) <= column: break - k += 1 - print(d) - cypher = "" - for i in pt: - cypher += d[i] - return cypher + + # map current letter to letter in modified alphabet + mapping[alphabet[letter_index]] = row[column] + letter_index += 1 + + if verbose: + print(mapping) + # create the encrypted text by mapping the plaintext to the modified alphabet + return "".join(mapping[char] if char in mapping else char for char in plaintext) if __name__ == "__main__": + # example use print(mixed_keyword("college", "UNIVERSITY")) From daa0c8f3d340485ce295570e6d76b38891e371bd Mon Sep 17 00:00:00 2001 From: Caeden Perelli-Harris Date: Sat, 10 Jun 2023 13:21:49 +0100 Subject: [PATCH 0860/1543] Create count negative numbers in matrix algorithm (#8813) * updating DIRECTORY.md * feat: Count negative numbers in sorted matrix * updating DIRECTORY.md * chore: Fix pre-commit * refactor: Combine functions into iteration * style: Reformat reference * feat: Add timings of each implementation * chore: Fix problems with algorithms-keeper bot * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * test: Remove doctest from benchmark function * Update matrix/count_negative_numbers_in_sorted_matrix.py Co-authored-by: Christian Clauss * Update matrix/count_negative_numbers_in_sorted_matrix.py Co-authored-by: Christian Clauss * Update matrix/count_negative_numbers_in_sorted_matrix.py Co-authored-by: Christian Clauss * Update matrix/count_negative_numbers_in_sorted_matrix.py Co-authored-by: Christian Clauss * Update matrix/count_negative_numbers_in_sorted_matrix.py Co-authored-by: Christian Clauss * Update matrix/count_negative_numbers_in_sorted_matrix.py Co-authored-by: Christian Clauss * refactor: Use sum instead of large iteration * refactor: Use len not sum * Update count_negative_numbers_in_sorted_matrix.py --------- Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- DIRECTORY.md | 2 + ...count_negative_numbers_in_sorted_matrix.py | 151 ++++++++++++++++++ 2 files changed, 153 insertions(+) create mode 100644 matrix/count_negative_numbers_in_sorted_matrix.py diff --git a/DIRECTORY.md b/DIRECTORY.md index 6dac4a9a5783..8511c261a3d2 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -679,6 +679,7 @@ ## Matrix * [Binary Search Matrix](matrix/binary_search_matrix.py) * [Count Islands In Matrix](matrix/count_islands_in_matrix.py) + * [Count Negative Numbers In Sorted Matrix](matrix/count_negative_numbers_in_sorted_matrix.py) * [Count Paths](matrix/count_paths.py) * [Cramers Rule 2X2](matrix/cramers_rule_2x2.py) * [Inverse Of Matrix](matrix/inverse_of_matrix.py) @@ -753,6 +754,7 @@ * [Potential Energy](physics/potential_energy.py) * [Rms Speed Of Molecule](physics/rms_speed_of_molecule.py) * [Shear Stress](physics/shear_stress.py) + * [Speed Of Sound](physics/speed_of_sound.py) ## Project Euler * Problem 001 diff --git a/matrix/count_negative_numbers_in_sorted_matrix.py b/matrix/count_negative_numbers_in_sorted_matrix.py new file mode 100644 index 000000000000..2799ff3b45fe --- /dev/null +++ b/matrix/count_negative_numbers_in_sorted_matrix.py @@ -0,0 +1,151 @@ +""" +Given an matrix of numbers in which all rows and all columns are sorted in decreasing +order, return the number of negative numbers in grid. + +Reference: https://leetcode.com/problems/count-negative-numbers-in-a-sorted-matrix +""" + + +def generate_large_matrix() -> list[list[int]]: + """ + >>> generate_large_matrix() # doctest: +ELLIPSIS + [[1000, ..., -999], [999, ..., -1001], ..., [2, ..., -1998]] + """ + return [list(range(1000 - i, -1000 - i, -1)) for i in range(1000)] + + +grid = generate_large_matrix() +test_grids = ( + [[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]], + [[3, 2], [1, 0]], + [[7, 7, 6]], + [[7, 7, 6], [-1, -2, -3]], + grid, +) + + +def validate_grid(grid: list[list[int]]) -> None: + """ + Validate that the rows and columns of the grid is sorted in decreasing order. + >>> for grid in test_grids: + ... validate_grid(grid) + """ + assert all(row == sorted(row, reverse=True) for row in grid) + assert all(list(col) == sorted(col, reverse=True) for col in zip(*grid)) + + +def find_negative_index(array: list[int]) -> int: + """ + Find the smallest negative index + + >>> find_negative_index([0,0,0,0]) + 4 + >>> find_negative_index([4,3,2,-1]) + 3 + >>> find_negative_index([1,0,-1,-10]) + 2 + >>> find_negative_index([0,0,0,-1]) + 3 + >>> find_negative_index([11,8,7,-3,-5,-9]) + 3 + >>> find_negative_index([-1,-1,-2,-3]) + 0 + >>> find_negative_index([5,1,0]) + 3 + >>> find_negative_index([-5,-5,-5]) + 0 + >>> find_negative_index([0]) + 1 + >>> find_negative_index([]) + 0 + """ + left = 0 + right = len(array) - 1 + + # Edge cases such as no values or all numbers are negative. + if not array or array[0] < 0: + return 0 + + while right + 1 > left: + mid = (left + right) // 2 + num = array[mid] + + # Num must be negative and the index must be greater than or equal to 0. + if num < 0 and array[mid - 1] >= 0: + return mid + + if num >= 0: + left = mid + 1 + else: + right = mid - 1 + # No negative numbers so return the last index of the array + 1 which is the length. + return len(array) + + +def count_negatives_binary_search(grid: list[list[int]]) -> int: + """ + An O(m logn) solution that uses binary search in order to find the boundary between + positive and negative numbers + + >>> [count_negatives_binary_search(grid) for grid in test_grids] + [8, 0, 0, 3, 1498500] + """ + total = 0 + bound = len(grid[0]) + + for i in range(len(grid)): + bound = find_negative_index(grid[i][:bound]) + total += bound + return (len(grid) * len(grid[0])) - total + + +def count_negatives_brute_force(grid: list[list[int]]) -> int: + """ + This solution is O(n^2) because it iterates through every column and row. + + >>> [count_negatives_brute_force(grid) for grid in test_grids] + [8, 0, 0, 3, 1498500] + """ + return len([number for row in grid for number in row if number < 0]) + + +def count_negatives_brute_force_with_break(grid: list[list[int]]) -> int: + """ + Similar to the brute force solution above but uses break in order to reduce the + number of iterations. + + >>> [count_negatives_brute_force_with_break(grid) for grid in test_grids] + [8, 0, 0, 3, 1498500] + """ + total = 0 + for row in grid: + for i, number in enumerate(row): + if number < 0: + total += len(row) - i + break + return total + + +def benchmark() -> None: + """Benchmark our functions next to each other""" + from timeit import timeit + + print("Running benchmarks") + setup = ( + "from __main__ import count_negatives_binary_search, " + "count_negatives_brute_force, count_negatives_brute_force_with_break, grid" + ) + for func in ( + "count_negatives_binary_search", # took 0.7727 seconds + "count_negatives_brute_force_with_break", # took 4.6505 seconds + "count_negatives_brute_force", # took 12.8160 seconds + ): + time = timeit(f"{func}(grid=grid)", setup=setup, number=500) + print(f"{func}() took {time:0.4f} seconds") + + +if __name__ == "__main__": + import doctest + + doctest.testmod() + benchmark() From 46379861257d43bb7140d261094bf17dc414950f Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Tue, 13 Jun 2023 00:09:33 +0200 Subject: [PATCH 0861/1543] [pre-commit.ci] pre-commit autoupdate (#8817) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - [github.com/charliermarsh/ruff-pre-commit: v0.0.270 → v0.0.272](https://github.com/charliermarsh/ruff-pre-commit/compare/v0.0.270...v0.0.272) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 4c70ae219f74..1d4b73681108 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -16,7 +16,7 @@ repos: - id: auto-walrus - repo: https://github.com/charliermarsh/ruff-pre-commit - rev: v0.0.270 + rev: v0.0.272 hooks: - id: ruff From e6f89a6b89941ffed911e96362be3611a45420e7 Mon Sep 17 00:00:00 2001 From: Ilkin Mengusoglu <113149540+imengus@users.noreply.github.com> Date: Sun, 18 Jun 2023 17:00:02 +0100 Subject: [PATCH 0862/1543] Simplex algorithm (#8825) * feat: added simplex.py * added docstrings * Update linear_programming/simplex.py Co-authored-by: Caeden Perelli-Harris * Update linear_programming/simplex.py Co-authored-by: Caeden Perelli-Harris * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update linear_programming/simplex.py Co-authored-by: Caeden Perelli-Harris * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * ruff fix Co-authored by: CaedenPH * removed README to add in separate PR * Update linear_programming/simplex.py Co-authored-by: Tianyi Zheng * Update linear_programming/simplex.py Co-authored-by: Tianyi Zheng * fix class docstring * add comments --------- Co-authored-by: Caeden Perelli-Harris Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Tianyi Zheng --- linear_programming/simplex.py | 311 ++++++++++++++++++++++++++++++++++ 1 file changed, 311 insertions(+) create mode 100644 linear_programming/simplex.py diff --git a/linear_programming/simplex.py b/linear_programming/simplex.py new file mode 100644 index 000000000000..ba64add40b5f --- /dev/null +++ b/linear_programming/simplex.py @@ -0,0 +1,311 @@ +""" +Python implementation of the simplex algorithm for solving linear programs in +tabular form with +- `>=`, `<=`, and `=` constraints and +- each variable `x1, x2, ...>= 0`. + +See https://gist.github.com/imengus/f9619a568f7da5bc74eaf20169a24d98 for how to +convert linear programs to simplex tableaus, and the steps taken in the simplex +algorithm. + +Resources: +https://en.wikipedia.org/wiki/Simplex_algorithm +https://tinyurl.com/simplex4beginners +""" +from typing import Any + +import numpy as np + + +class Tableau: + """Operate on simplex tableaus + + >>> t = Tableau(np.array([[-1,-1,0,0,-1],[1,3,1,0,4],[3,1,0,1,4.]]), 2) + Traceback (most recent call last): + ... + ValueError: RHS must be > 0 + """ + + def __init__(self, tableau: np.ndarray, n_vars: int) -> None: + # Check if RHS is negative + if np.any(tableau[:, -1], where=tableau[:, -1] < 0): + raise ValueError("RHS must be > 0") + + self.tableau = tableau + self.n_rows, _ = tableau.shape + + # Number of decision variables x1, x2, x3... + self.n_vars = n_vars + + # Number of artificial variables to be minimised + self.n_art_vars = len(np.where(tableau[self.n_vars : -1] == -1)[0]) + + # 2 if there are >= or == constraints (nonstandard), 1 otherwise (std) + self.n_stages = (self.n_art_vars > 0) + 1 + + # Number of slack variables added to make inequalities into equalities + self.n_slack = self.n_rows - self.n_stages + + # Objectives for each stage + self.objectives = ["max"] + + # In two stage simplex, first minimise then maximise + if self.n_art_vars: + self.objectives.append("min") + + self.col_titles = [""] + + # Index of current pivot row and column + self.row_idx = None + self.col_idx = None + + # Does objective row only contain (non)-negative values? + self.stop_iter = False + + @staticmethod + def generate_col_titles(*args: int) -> list[str]: + """Generate column titles for tableau of specific dimensions + + >>> Tableau.generate_col_titles(2, 3, 1) + ['x1', 'x2', 's1', 's2', 's3', 'a1', 'RHS'] + + >>> Tableau.generate_col_titles() + Traceback (most recent call last): + ... + ValueError: Must provide n_vars, n_slack, and n_art_vars + >>> Tableau.generate_col_titles(-2, 3, 1) + Traceback (most recent call last): + ... + ValueError: All arguments must be non-negative integers + """ + if len(args) != 3: + raise ValueError("Must provide n_vars, n_slack, and n_art_vars") + + if not all(x >= 0 and isinstance(x, int) for x in args): + raise ValueError("All arguments must be non-negative integers") + + # decision | slack | artificial + string_starts = ["x", "s", "a"] + titles = [] + for i in range(3): + for j in range(args[i]): + titles.append(string_starts[i] + str(j + 1)) + titles.append("RHS") + return titles + + def find_pivot(self, tableau: np.ndarray) -> tuple[Any, Any]: + """Finds the pivot row and column. + >>> t = Tableau(np.array([[-2,1,0,0,0], [3,1,1,0,6], [1,2,0,1,7.]]), 2) + >>> t.find_pivot(t.tableau) + (1, 0) + """ + objective = self.objectives[-1] + + # Find entries of highest magnitude in objective rows + sign = (objective == "min") - (objective == "max") + col_idx = np.argmax(sign * tableau[0, : self.n_vars]) + + # Choice is only valid if below 0 for maximise, and above for minimise + if sign * self.tableau[0, col_idx] <= 0: + self.stop_iter = True + return 0, 0 + + # Pivot row is chosen as having the lowest quotient when elements of + # the pivot column divide the right-hand side + + # Slice excluding the objective rows + s = slice(self.n_stages, self.n_rows) + + # RHS + dividend = tableau[s, -1] + + # Elements of pivot column within slice + divisor = tableau[s, col_idx] + + # Array filled with nans + nans = np.full(self.n_rows - self.n_stages, np.nan) + + # If element in pivot column is greater than zeron_stages, return + # quotient or nan otherwise + quotients = np.divide(dividend, divisor, out=nans, where=divisor > 0) + + # Arg of minimum quotient excluding the nan values. n_stages is added + # to compensate for earlier exclusion of objective columns + row_idx = np.nanargmin(quotients) + self.n_stages + return row_idx, col_idx + + def pivot(self, tableau: np.ndarray, row_idx: int, col_idx: int) -> np.ndarray: + """Pivots on value on the intersection of pivot row and column. + + >>> t = Tableau(np.array([[-2,-3,0,0,0],[1,3,1,0,4],[3,1,0,1,4.]]), 2) + >>> t.pivot(t.tableau, 1, 0).tolist() + ... # doctest: +NORMALIZE_WHITESPACE + [[0.0, 3.0, 2.0, 0.0, 8.0], + [1.0, 3.0, 1.0, 0.0, 4.0], + [0.0, -8.0, -3.0, 1.0, -8.0]] + """ + # Avoid changes to original tableau + piv_row = tableau[row_idx].copy() + + piv_val = piv_row[col_idx] + + # Entry becomes 1 + piv_row *= 1 / piv_val + + # Variable in pivot column becomes basic, ie the only non-zero entry + for idx, coeff in enumerate(tableau[:, col_idx]): + tableau[idx] += -coeff * piv_row + tableau[row_idx] = piv_row + return tableau + + def change_stage(self, tableau: np.ndarray) -> np.ndarray: + """Exits first phase of the two-stage method by deleting artificial + rows and columns, or completes the algorithm if exiting the standard + case. + + >>> t = Tableau(np.array([ + ... [3, 3, -1, -1, 0, 0, 4], + ... [2, 1, 0, 0, 0, 0, 0.], + ... [1, 2, -1, 0, 1, 0, 2], + ... [2, 1, 0, -1, 0, 1, 2] + ... ]), 2) + >>> t.change_stage(t.tableau).tolist() + ... # doctest: +NORMALIZE_WHITESPACE + [[2.0, 1.0, 0.0, 0.0, 0.0, 0.0], + [1.0, 2.0, -1.0, 0.0, 1.0, 2.0], + [2.0, 1.0, 0.0, -1.0, 0.0, 2.0]] + """ + # Objective of original objective row remains + self.objectives.pop() + + if not self.objectives: + return tableau + + # Slice containing ids for artificial columns + s = slice(-self.n_art_vars - 1, -1) + + # Delete the artificial variable columns + tableau = np.delete(tableau, s, axis=1) + + # Delete the objective row of the first stage + tableau = np.delete(tableau, 0, axis=0) + + self.n_stages = 1 + self.n_rows -= 1 + self.n_art_vars = 0 + self.stop_iter = False + return tableau + + def run_simplex(self) -> dict[Any, Any]: + """Operate on tableau until objective function cannot be + improved further. + + # Standard linear program: + Max: x1 + x2 + ST: x1 + 3x2 <= 4 + 3x1 + x2 <= 4 + >>> Tableau(np.array([[-1,-1,0,0,0],[1,3,1,0,4],[3,1,0,1,4.]]), + ... 2).run_simplex() + {'P': 2.0, 'x1': 1.0, 'x2': 1.0} + + # Optimal tableau input: + >>> Tableau(np.array([ + ... [0, 0, 0.25, 0.25, 2], + ... [0, 1, 0.375, -0.125, 1], + ... [1, 0, -0.125, 0.375, 1] + ... ]), 2).run_simplex() + {'P': 2.0, 'x1': 1.0, 'x2': 1.0} + + # Non-standard: >= constraints + Max: 2x1 + 3x2 + x3 + ST: x1 + x2 + x3 <= 40 + 2x1 + x2 - x3 >= 10 + - x2 + x3 >= 10 + >>> Tableau(np.array([ + ... [2, 0, 0, 0, -1, -1, 0, 0, 20], + ... [-2, -3, -1, 0, 0, 0, 0, 0, 0], + ... [1, 1, 1, 1, 0, 0, 0, 0, 40], + ... [2, 1, -1, 0, -1, 0, 1, 0, 10], + ... [0, -1, 1, 0, 0, -1, 0, 1, 10.] + ... ]), 3).run_simplex() + {'P': 70.0, 'x1': 10.0, 'x2': 10.0, 'x3': 20.0} + + # Non standard: minimisation and equalities + Min: x1 + x2 + ST: 2x1 + x2 = 12 + 6x1 + 5x2 = 40 + >>> Tableau(np.array([ + ... [8, 6, 0, -1, 0, -1, 0, 0, 52], + ... [1, 1, 0, 0, 0, 0, 0, 0, 0], + ... [2, 1, 1, 0, 0, 0, 0, 0, 12], + ... [2, 1, 0, -1, 0, 0, 1, 0, 12], + ... [6, 5, 0, 0, 1, 0, 0, 0, 40], + ... [6, 5, 0, 0, 0, -1, 0, 1, 40.] + ... ]), 2).run_simplex() + {'P': 7.0, 'x1': 5.0, 'x2': 2.0} + """ + # Stop simplex algorithm from cycling. + for _ in range(100): + # Completion of each stage removes an objective. If both stages + # are complete, then no objectives are left + if not self.objectives: + self.col_titles = self.generate_col_titles( + self.n_vars, self.n_slack, self.n_art_vars + ) + + # Find the values of each variable at optimal solution + return self.interpret_tableau(self.tableau, self.col_titles) + + row_idx, col_idx = self.find_pivot(self.tableau) + + # If there are no more negative values in objective row + if self.stop_iter: + # Delete artificial variable columns and rows. Update attributes + self.tableau = self.change_stage(self.tableau) + else: + self.tableau = self.pivot(self.tableau, row_idx, col_idx) + return {} + + def interpret_tableau( + self, tableau: np.ndarray, col_titles: list[str] + ) -> dict[str, float]: + """Given the final tableau, add the corresponding values of the basic + decision variables to the `output_dict` + >>> tableau = np.array([ + ... [0,0,0.875,0.375,5], + ... [0,1,0.375,-0.125,1], + ... [1,0,-0.125,0.375,1] + ... ]) + >>> t = Tableau(tableau, 2) + >>> t.interpret_tableau(tableau, ["x1", "x2", "s1", "s2", "RHS"]) + {'P': 5.0, 'x1': 1.0, 'x2': 1.0} + """ + # P = RHS of final tableau + output_dict = {"P": abs(tableau[0, -1])} + + for i in range(self.n_vars): + # Gives ids of nonzero entries in the ith column + nonzero = np.nonzero(tableau[:, i]) + n_nonzero = len(nonzero[0]) + + # First entry in the nonzero ids + nonzero_rowidx = nonzero[0][0] + nonzero_val = tableau[nonzero_rowidx, i] + + # If there is only one nonzero value in column, which is one + if n_nonzero == nonzero_val == 1: + rhs_val = tableau[nonzero_rowidx, -1] + output_dict[col_titles[i]] = rhs_val + + # Check for basic variables + for title in col_titles: + # Don't add RHS or slack variables to output dict + if title[0] not in "R-s-a": + output_dict.setdefault(title, 0) + return output_dict + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From b0f871032e78dd1d2f2214acbaae2fac88fa55b0 Mon Sep 17 00:00:00 2001 From: Frank-1998 <77809242+Frank-1998@users.noreply.github.com> Date: Sun, 18 Jun 2023 10:30:06 -0600 Subject: [PATCH 0863/1543] Fix removing the root node in binary_search_tree.py removes the whole tree (#8752) * fix issue #8715 * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- data_structures/binary_tree/binary_search_tree.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/data_structures/binary_tree/binary_search_tree.py b/data_structures/binary_tree/binary_search_tree.py index cd88cc10e697..c72195424c7c 100644 --- a/data_structures/binary_tree/binary_search_tree.py +++ b/data_structures/binary_tree/binary_search_tree.py @@ -40,7 +40,7 @@ def __reassign_nodes(self, node: Node, new_children: Node | None) -> None: else: node.parent.left = new_children else: - self.root = None + self.root = new_children def is_right(self, node: Node) -> bool: if node.parent and node.parent.right: From ea6c6056cf2215358834710bf89422310f831178 Mon Sep 17 00:00:00 2001 From: Turro <42980188+smturro2@users.noreply.github.com> Date: Mon, 19 Jun 2023 06:46:29 -0500 Subject: [PATCH 0864/1543] Added apr_interest function to financial (#6025) * Added apr_interest function to financial * Update interest.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update financial/interest.py * float --------- Co-authored-by: Christian Clauss Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- financial/interest.py | 41 +++++++++++++++++++++++++++++++++++++++-- 1 file changed, 39 insertions(+), 2 deletions(-) diff --git a/financial/interest.py b/financial/interest.py index c69c730457d9..33d02e27ccb3 100644 --- a/financial/interest.py +++ b/financial/interest.py @@ -4,7 +4,7 @@ def simple_interest( - principal: float, daily_interest_rate: float, days_between_payments: int + principal: float, daily_interest_rate: float, days_between_payments: float ) -> float: """ >>> simple_interest(18000.0, 0.06, 3) @@ -42,7 +42,7 @@ def simple_interest( def compound_interest( principal: float, nominal_annual_interest_rate_percentage: float, - number_of_compounding_periods: int, + number_of_compounding_periods: float, ) -> float: """ >>> compound_interest(10000.0, 0.05, 3) @@ -77,6 +77,43 @@ def compound_interest( ) +def apr_interest( + principal: float, + nominal_annual_percentage_rate: float, + number_of_years: float, +) -> float: + """ + >>> apr_interest(10000.0, 0.05, 3) + 1618.223072263547 + >>> apr_interest(10000.0, 0.05, 1) + 512.6749646744732 + >>> apr_interest(0.5, 0.05, 3) + 0.08091115361317736 + >>> apr_interest(10000.0, 0.06, -4) + Traceback (most recent call last): + ... + ValueError: number_of_years must be > 0 + >>> apr_interest(10000.0, -3.5, 3.0) + Traceback (most recent call last): + ... + ValueError: nominal_annual_percentage_rate must be >= 0 + >>> apr_interest(-5500.0, 0.01, 5) + Traceback (most recent call last): + ... + ValueError: principal must be > 0 + """ + if number_of_years <= 0: + raise ValueError("number_of_years must be > 0") + if nominal_annual_percentage_rate < 0: + raise ValueError("nominal_annual_percentage_rate must be >= 0") + if principal <= 0: + raise ValueError("principal must be > 0") + + return compound_interest( + principal, nominal_annual_percentage_rate / 365, number_of_years * 365 + ) + + if __name__ == "__main__": import doctest From 0dee4a402c85981af0c2d4c53af27a69a7eb91bf Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Tue, 20 Jun 2023 15:56:14 +0200 Subject: [PATCH 0865/1543] [pre-commit.ci] pre-commit autoupdate (#8827) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * [pre-commit.ci] pre-commit autoupdate updates: - [github.com/codespell-project/codespell: v2.2.4 → v2.2.5](https://github.com/codespell-project/codespell/compare/v2.2.4...v2.2.5) - [github.com/tox-dev/pyproject-fmt: 0.11.2 → 0.12.0](https://github.com/tox-dev/pyproject-fmt/compare/0.11.2...0.12.0) * updating DIRECTORY.md --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- .pre-commit-config.yaml | 4 ++-- DIRECTORY.md | 3 +++ 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 1d4b73681108..591fd7819a5a 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -26,14 +26,14 @@ repos: - id: black - repo: https://github.com/codespell-project/codespell - rev: v2.2.4 + rev: v2.2.5 hooks: - id: codespell additional_dependencies: - tomli - repo: https://github.com/tox-dev/pyproject-fmt - rev: "0.11.2" + rev: "0.12.0" hooks: - id: pyproject-fmt diff --git a/DIRECTORY.md b/DIRECTORY.md index 8511c261a3d2..6ec8d5111176 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -486,6 +486,9 @@ * [Test Linear Algebra](linear_algebra/src/test_linear_algebra.py) * [Transformations 2D](linear_algebra/src/transformations_2d.py) +## Linear Programming + * [Simplex](linear_programming/simplex.py) + ## Machine Learning * [Astar](machine_learning/astar.py) * [Data Transformations](machine_learning/data_transformations.py) From 07e68128883b84fb7e342c6bce88863a05fbbf62 Mon Sep 17 00:00:00 2001 From: Christian Clauss Date: Tue, 20 Jun 2023 18:03:16 +0200 Subject: [PATCH 0866/1543] Update .pre-commit-config.yaml (#8828) * Update .pre-commit-config.yaml * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- pyproject.toml | 36 ++++++++++++++++++------------------ 1 file changed, 18 insertions(+), 18 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index a526196685f5..1dcce044a313 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,21 +1,3 @@ -[tool.pytest.ini_options] -markers = [ - "mat_ops: mark a test as utilizing matrix operations.", -] -addopts = [ - "--durations=10", - "--doctest-modules", - "--showlocals", -] - -[tool.coverage.report] -omit = [".env/*"] -sort = "Cover" - -[tool.codespell] -ignore-words-list = "3rt,ans,crate,damon,fo,followings,hist,iff,kwanza,mater,secant,som,sur,tim,zar" -skip = "./.*,*.json,ciphers/prehistoric_men.txt,project_euler/problem_022/p022_names.txt,pyproject.toml,strings/dictionary.txt,strings/words.txt" - [tool.ruff] ignore = [ # `ruff rule S101` for a description of that rule "ARG001", # Unused function argument `amount` -- FIX ME? @@ -131,3 +113,21 @@ max-args = 10 # default: 5 max-branches = 20 # default: 12 max-returns = 8 # default: 6 max-statements = 88 # default: 50 + +[tool.pytest.ini_options] +markers = [ + "mat_ops: mark a test as utilizing matrix operations.", +] +addopts = [ + "--durations=10", + "--doctest-modules", + "--showlocals", +] + +[tool.coverage.report] +omit = [".env/*"] +sort = "Cover" + +[tool.codespell] +ignore-words-list = "3rt,ans,crate,damon,fo,followings,hist,iff,kwanza,mater,secant,som,sur,tim,zar" +skip = "./.*,*.json,ciphers/prehistoric_men.txt,project_euler/problem_022/p022_names.txt,pyproject.toml,strings/dictionary.txt,strings/words.txt" From 5b0890bd833eb85c58fae9afc4984d520e7e2ad6 Mon Sep 17 00:00:00 2001 From: "Linus M. Henkel" <86628476+linushenkel@users.noreply.github.com> Date: Thu, 22 Jun 2023 13:49:09 +0200 Subject: [PATCH 0867/1543] Dijkstra algorithm with binary grid (#8802) * Create TestShiva * Delete TestShiva * Implementation of the Dijkstra-Algorithm in a binary grid * Update double_ended_queue.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update least_common_multiple.py * Update sol1.py * Update pyproject.toml * Update pyproject.toml * https://github.com/astral-sh/ruff-pre-commit v0.0.274 --------- Co-authored-by: ShivaDahal99 <130563462+ShivaDahal99@users.noreply.github.com> Co-authored-by: jlhuhn <134317018+jlhuhn@users.noreply.github.com> Co-authored-by: Christian Clauss Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 4 +- data_structures/queue/double_ended_queue.py | 4 +- graphs/dijkstra_binary_grid.py | 89 +++++++++++++++++++++ maths/least_common_multiple.py | 6 +- project_euler/problem_054/sol1.py | 18 ++--- pyproject.toml | 1 + 6 files changed, 106 insertions(+), 16 deletions(-) create mode 100644 graphs/dijkstra_binary_grid.py diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 591fd7819a5a..3d4cc4084ccf 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -15,8 +15,8 @@ repos: hooks: - id: auto-walrus - - repo: https://github.com/charliermarsh/ruff-pre-commit - rev: v0.0.272 + - repo: https://github.com/astral-sh/ruff-pre-commit + rev: v0.0.274 hooks: - id: ruff diff --git a/data_structures/queue/double_ended_queue.py b/data_structures/queue/double_ended_queue.py index 637b7f62fd2c..2472371b42fe 100644 --- a/data_structures/queue/double_ended_queue.py +++ b/data_structures/queue/double_ended_queue.py @@ -32,7 +32,7 @@ class Deque: the number of nodes """ - __slots__ = ["_front", "_back", "_len"] + __slots__ = ("_front", "_back", "_len") @dataclass class _Node: @@ -54,7 +54,7 @@ class _Iterator: the current node of the iteration. """ - __slots__ = ["_cur"] + __slots__ = "_cur" def __init__(self, cur: Deque._Node | None) -> None: self._cur = cur diff --git a/graphs/dijkstra_binary_grid.py b/graphs/dijkstra_binary_grid.py new file mode 100644 index 000000000000..c23d8234328a --- /dev/null +++ b/graphs/dijkstra_binary_grid.py @@ -0,0 +1,89 @@ +""" +This script implements the Dijkstra algorithm on a binary grid. +The grid consists of 0s and 1s, where 1 represents +a walkable node and 0 represents an obstacle. +The algorithm finds the shortest path from a start node to a destination node. +Diagonal movement can be allowed or disallowed. +""" + +from heapq import heappop, heappush + +import numpy as np + + +def dijkstra( + grid: np.ndarray, + source: tuple[int, int], + destination: tuple[int, int], + allow_diagonal: bool, +) -> tuple[float | int, list[tuple[int, int]]]: + """ + Implements Dijkstra's algorithm on a binary grid. + + Args: + grid (np.ndarray): A 2D numpy array representing the grid. + 1 represents a walkable node and 0 represents an obstacle. + source (Tuple[int, int]): A tuple representing the start node. + destination (Tuple[int, int]): A tuple representing the + destination node. + allow_diagonal (bool): A boolean determining whether + diagonal movements are allowed. + + Returns: + Tuple[Union[float, int], List[Tuple[int, int]]]: + The shortest distance from the start node to the destination node + and the shortest path as a list of nodes. + + >>> dijkstra(np.array([[1, 1, 1], [0, 1, 0], [0, 1, 1]]), (0, 0), (2, 2), False) + (4.0, [(0, 0), (0, 1), (1, 1), (2, 1), (2, 2)]) + + >>> dijkstra(np.array([[1, 1, 1], [0, 1, 0], [0, 1, 1]]), (0, 0), (2, 2), True) + (2.0, [(0, 0), (1, 1), (2, 2)]) + + >>> dijkstra(np.array([[1, 1, 1], [0, 0, 1], [0, 1, 1]]), (0, 0), (2, 2), False) + (4.0, [(0, 0), (0, 1), (0, 2), (1, 2), (2, 2)]) + """ + rows, cols = grid.shape + dx = [-1, 1, 0, 0] + dy = [0, 0, -1, 1] + if allow_diagonal: + dx += [-1, -1, 1, 1] + dy += [-1, 1, -1, 1] + + queue, visited = [(0, source)], set() + matrix = np.full((rows, cols), np.inf) + matrix[source] = 0 + predecessors = np.empty((rows, cols), dtype=object) + predecessors[source] = None + + while queue: + (dist, (x, y)) = heappop(queue) + if (x, y) in visited: + continue + visited.add((x, y)) + + if (x, y) == destination: + path = [] + while (x, y) != source: + path.append((x, y)) + x, y = predecessors[x, y] + path.append(source) # add the source manually + path.reverse() + return matrix[destination], path + + for i in range(len(dx)): + nx, ny = x + dx[i], y + dy[i] + if 0 <= nx < rows and 0 <= ny < cols: + next_node = grid[nx][ny] + if next_node == 1 and matrix[nx, ny] > dist + 1: + heappush(queue, (dist + 1, (nx, ny))) + matrix[nx, ny] = dist + 1 + predecessors[nx, ny] = (x, y) + + return np.inf, [] + + +if __name__ == "__main__": + import doctest + + doctest.testmod() diff --git a/maths/least_common_multiple.py b/maths/least_common_multiple.py index 621d93720c41..10cc63ac7990 100644 --- a/maths/least_common_multiple.py +++ b/maths/least_common_multiple.py @@ -67,7 +67,7 @@ def benchmark(): class TestLeastCommonMultiple(unittest.TestCase): - test_inputs = [ + test_inputs = ( (10, 20), (13, 15), (4, 31), @@ -77,8 +77,8 @@ class TestLeastCommonMultiple(unittest.TestCase): (12, 25), (10, 25), (6, 9), - ] - expected_results = [20, 195, 124, 210, 1462, 60, 300, 50, 18] + ) + expected_results = (20, 195, 124, 210, 1462, 60, 300, 50, 18) def test_lcm_function(self): for i, (first_num, second_num) in enumerate(self.test_inputs): diff --git a/project_euler/problem_054/sol1.py b/project_euler/problem_054/sol1.py index 74409f32c712..86dfa5edd2f5 100644 --- a/project_euler/problem_054/sol1.py +++ b/project_euler/problem_054/sol1.py @@ -47,18 +47,18 @@ class PokerHand: """Create an object representing a Poker Hand based on an input of a - string which represents the best 5 card combination from the player's hand + string which represents the best 5-card combination from the player's hand and board cards. Attributes: (read-only) - hand: string representing the hand consisting of five cards + hand: a string representing the hand consisting of five cards Methods: compare_with(opponent): takes in player's hand (self) and opponent's hand (opponent) and compares both hands according to the rules of Texas Hold'em. Returns one of 3 strings (Win, Loss, Tie) based on whether - player's hand is better than opponent's hand. + player's hand is better than the opponent's hand. hand_name(): Returns a string made up of two parts: hand name and high card. @@ -66,11 +66,11 @@ class PokerHand: Supported operators: Rich comparison operators: <, >, <=, >=, ==, != - Supported builtin methods and functions: + Supported built-in methods and functions: list.sort(), sorted() """ - _HAND_NAME = [ + _HAND_NAME = ( "High card", "One pair", "Two pairs", @@ -81,10 +81,10 @@ class PokerHand: "Four of a kind", "Straight flush", "Royal flush", - ] + ) - _CARD_NAME = [ - "", # placeholder as lists are zero indexed + _CARD_NAME = ( + "", # placeholder as tuples are zero-indexed "One", "Two", "Three", @@ -99,7 +99,7 @@ class PokerHand: "Queen", "King", "Ace", - ] + ) def __init__(self, hand: str) -> None: """ diff --git a/pyproject.toml b/pyproject.toml index 1dcce044a313..4f21a95190da 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -103,6 +103,7 @@ max-complexity = 17 # default: 10 "machine_learning/linear_discriminant_analysis.py" = ["ARG005"] "machine_learning/sequential_minimum_optimization.py" = ["SIM115"] "matrix/sherman_morrison.py" = ["SIM103", "SIM114"] +"other/l*u_cache.py" = ["RUF012"] "physics/newtons_second_law_of_motion.py" = ["BLE001"] "project_euler/problem_099/sol1.py" = ["SIM115"] "sorts/external_sort.py" = ["SIM115"] From 5ffe601c86a9b44691a4dce37480c6d904102d49 Mon Sep 17 00:00:00 2001 From: Tianyi Zheng Date: Thu, 22 Jun 2023 05:24:34 -0700 Subject: [PATCH 0868/1543] Fix `mypy` errors in `maths/sigmoid_linear_unit.py` (#8786) * updating DIRECTORY.md * Fix mypy errors in sigmoid_linear_unit.py * updating DIRECTORY.md * updating DIRECTORY.md --------- Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- maths/sigmoid_linear_unit.py | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/maths/sigmoid_linear_unit.py b/maths/sigmoid_linear_unit.py index a8ada10dd8ec..0ee09bf82d38 100644 --- a/maths/sigmoid_linear_unit.py +++ b/maths/sigmoid_linear_unit.py @@ -17,7 +17,7 @@ import numpy as np -def sigmoid(vector: np.array) -> np.array: +def sigmoid(vector: np.ndarray) -> np.ndarray: """ Mathematical function sigmoid takes a vector x of K real numbers as input and returns 1/ (1 + e^-x). @@ -29,17 +29,15 @@ def sigmoid(vector: np.array) -> np.array: return 1 / (1 + np.exp(-vector)) -def sigmoid_linear_unit(vector: np.array) -> np.array: +def sigmoid_linear_unit(vector: np.ndarray) -> np.ndarray: """ Implements the Sigmoid Linear Unit (SiLU) or swish function Parameters: - vector (np.array): A numpy array consisting of real - values. + vector (np.ndarray): A numpy array consisting of real values Returns: - swish_vec (np.array): The input numpy array, after applying - swish. + swish_vec (np.ndarray): The input numpy array, after applying swish Examples: >>> sigmoid_linear_unit(np.array([-1.0, 1.0, 2.0])) From f54a9668103e560f20b50559fb54ac38a74d1fe8 Mon Sep 17 00:00:00 2001 From: Jan-Lukas Huhn <134317018+jlhuhn@users.noreply.github.com> Date: Thu, 22 Jun 2023 14:31:48 +0200 Subject: [PATCH 0869/1543] Energy conversions (#8801) * Create TestShiva * Delete TestShiva * Create energy_conversions.py * Update conversions/energy_conversions.py Co-authored-by: Caeden Perelli-Harris --------- Co-authored-by: ShivaDahal99 <130563462+ShivaDahal99@users.noreply.github.com> Co-authored-by: Caeden Perelli-Harris --- conversions/energy_conversions.py | 114 ++++++++++++++++++++++++++++++ 1 file changed, 114 insertions(+) create mode 100644 conversions/energy_conversions.py diff --git a/conversions/energy_conversions.py b/conversions/energy_conversions.py new file mode 100644 index 000000000000..51de6b313928 --- /dev/null +++ b/conversions/energy_conversions.py @@ -0,0 +1,114 @@ +""" +Conversion of energy units. + +Available units: joule, kilojoule, megajoule, gigajoule,\ + wattsecond, watthour, kilowatthour, newtonmeter, calorie_nutr,\ + kilocalorie_nutr, electronvolt, britishthermalunit_it, footpound + +USAGE : +-> Import this file into their respective project. +-> Use the function energy_conversion() for conversion of energy units. +-> Parameters : + -> from_type : From which type you want to convert + -> to_type : To which type you want to convert + -> value : the value which you want to convert + +REFERENCES : +-> Wikipedia reference: https://en.wikipedia.org/wiki/Units_of_energy +-> Wikipedia reference: https://en.wikipedia.org/wiki/Joule +-> Wikipedia reference: https://en.wikipedia.org/wiki/Kilowatt-hour +-> Wikipedia reference: https://en.wikipedia.org/wiki/Newton-metre +-> Wikipedia reference: https://en.wikipedia.org/wiki/Calorie +-> Wikipedia reference: https://en.wikipedia.org/wiki/Electronvolt +-> Wikipedia reference: https://en.wikipedia.org/wiki/British_thermal_unit +-> Wikipedia reference: https://en.wikipedia.org/wiki/Foot-pound_(energy) +-> Unit converter reference: https://www.unitconverters.net/energy-converter.html +""" + +ENERGY_CONVERSION: dict[str, float] = { + "joule": 1.0, + "kilojoule": 1_000, + "megajoule": 1_000_000, + "gigajoule": 1_000_000_000, + "wattsecond": 1.0, + "watthour": 3_600, + "kilowatthour": 3_600_000, + "newtonmeter": 1.0, + "calorie_nutr": 4_186.8, + "kilocalorie_nutr": 4_186_800.00, + "electronvolt": 1.602_176_634e-19, + "britishthermalunit_it": 1_055.055_85, + "footpound": 1.355_818, +} + + +def energy_conversion(from_type: str, to_type: str, value: float) -> float: + """ + Conversion of energy units. + >>> energy_conversion("joule", "joule", 1) + 1.0 + >>> energy_conversion("joule", "kilojoule", 1) + 0.001 + >>> energy_conversion("joule", "megajoule", 1) + 1e-06 + >>> energy_conversion("joule", "gigajoule", 1) + 1e-09 + >>> energy_conversion("joule", "wattsecond", 1) + 1.0 + >>> energy_conversion("joule", "watthour", 1) + 0.0002777777777777778 + >>> energy_conversion("joule", "kilowatthour", 1) + 2.7777777777777776e-07 + >>> energy_conversion("joule", "newtonmeter", 1) + 1.0 + >>> energy_conversion("joule", "calorie_nutr", 1) + 0.00023884589662749592 + >>> energy_conversion("joule", "kilocalorie_nutr", 1) + 2.388458966274959e-07 + >>> energy_conversion("joule", "electronvolt", 1) + 6.241509074460763e+18 + >>> energy_conversion("joule", "britishthermalunit_it", 1) + 0.0009478171226670134 + >>> energy_conversion("joule", "footpound", 1) + 0.7375621211696556 + >>> energy_conversion("joule", "megajoule", 1000) + 0.001 + >>> energy_conversion("calorie_nutr", "kilocalorie_nutr", 1000) + 1.0 + >>> energy_conversion("kilowatthour", "joule", 10) + 36000000.0 + >>> energy_conversion("britishthermalunit_it", "footpound", 1) + 778.1692306784539 + >>> energy_conversion("watthour", "joule", "a") # doctest: +ELLIPSIS + Traceback (most recent call last): + ... + TypeError: unsupported operand type(s) for /: 'str' and 'float' + >>> energy_conversion("wrongunit", "joule", 1) # doctest: +ELLIPSIS + Traceback (most recent call last): + ... + ValueError: Incorrect 'from_type' or 'to_type' value: 'wrongunit', 'joule' + Valid values are: joule, ... footpound + >>> energy_conversion("joule", "wrongunit", 1) # doctest: +ELLIPSIS + Traceback (most recent call last): + ... + ValueError: Incorrect 'from_type' or 'to_type' value: 'joule', 'wrongunit' + Valid values are: joule, ... footpound + >>> energy_conversion("123", "abc", 1) # doctest: +ELLIPSIS + Traceback (most recent call last): + ... + ValueError: Incorrect 'from_type' or 'to_type' value: '123', 'abc' + Valid values are: joule, ... footpound + """ + if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION: + msg = ( + f"Incorrect 'from_type' or 'to_type' value: {from_type!r}, {to_type!r}\n" + f"Valid values are: {', '.join(ENERGY_CONVERSION)}" + ) + raise ValueError(msg) + return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type] + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From 331585f3f866e210e23d11700b09a8770a1c2490 Mon Sep 17 00:00:00 2001 From: Himanshu Tomar Date: Fri, 23 Jun 2023 13:56:05 +0530 Subject: [PATCH 0870/1543] Algorithm: Calculating Product Sum from a Special Array with Nested Structures (#8761) * Added minimum waiting time problem solution using greedy algorithm * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * ruff --fix * Add type hints * Added two more doc test * Removed unnecessary comments * updated type hints * Updated the code as per the code review * Added recursive algo to calculate product sum from an array * Added recursive algo to calculate product sum from an array * Update doc string * Added doctest for product_sum function * Updated the code and added more doctests * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Added more test coverage for product_sum method * Update product_sum.py --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- DIRECTORY.md | 1 + data_structures/arrays/product_sum.py | 98 +++++++++++++++++++++++++++ 2 files changed, 99 insertions(+) create mode 100644 data_structures/arrays/product_sum.py diff --git a/DIRECTORY.md b/DIRECTORY.md index 6ec8d5111176..83389dab1f56 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -166,6 +166,7 @@ * Arrays * [Permutations](data_structures/arrays/permutations.py) * [Prefix Sum](data_structures/arrays/prefix_sum.py) + * [Product Sum Array](data_structures/arrays/product_sum.py) * Binary Tree * [Avl Tree](data_structures/binary_tree/avl_tree.py) * [Basic Binary Tree](data_structures/binary_tree/basic_binary_tree.py) diff --git a/data_structures/arrays/product_sum.py b/data_structures/arrays/product_sum.py new file mode 100644 index 000000000000..4fb906f369ab --- /dev/null +++ b/data_structures/arrays/product_sum.py @@ -0,0 +1,98 @@ +""" +Calculate the Product Sum from a Special Array. +reference: https://dev.to/sfrasica/algorithms-product-sum-from-an-array-dc6 + +Python doctests can be run with the following command: +python -m doctest -v product_sum.py + +Calculate the product sum of a "special" array which can contain integers or nested +arrays. The product sum is obtained by adding all elements and multiplying by their +respective depths. + +For example, in the array [x, y], the product sum is (x + y). In the array [x, [y, z]], +the product sum is x + 2 * (y + z). In the array [x, [y, [z]]], +the product sum is x + 2 * (y + 3z). + +Example Input: +[5, 2, [-7, 1], 3, [6, [-13, 8], 4]] +Output: 12 + +""" + + +def product_sum(arr: list[int | list], depth: int) -> int: + """ + Recursively calculates the product sum of an array. + + The product sum of an array is defined as the sum of its elements multiplied by + their respective depths. If an element is a list, its product sum is calculated + recursively by multiplying the sum of its elements with its depth plus one. + + Args: + arr: The array of integers and nested lists. + depth: The current depth level. + + Returns: + int: The product sum of the array. + + Examples: + >>> product_sum([1, 2, 3], 1) + 6 + >>> product_sum([-1, 2, [-3, 4]], 2) + 8 + >>> product_sum([1, 2, 3], -1) + -6 + >>> product_sum([1, 2, 3], 0) + 0 + >>> product_sum([1, 2, 3], 7) + 42 + >>> product_sum((1, 2, 3), 7) + 42 + >>> product_sum({1, 2, 3}, 7) + 42 + >>> product_sum([1, -1], 1) + 0 + >>> product_sum([1, -2], 1) + -1 + >>> product_sum([-3.5, [1, [0.5]]], 1) + 1.5 + + """ + total_sum = 0 + for ele in arr: + total_sum += product_sum(ele, depth + 1) if isinstance(ele, list) else ele + return total_sum * depth + + +def product_sum_array(array: list[int | list]) -> int: + """ + Calculates the product sum of an array. + + Args: + array (List[Union[int, List]]): The array of integers and nested lists. + + Returns: + int: The product sum of the array. + + Examples: + >>> product_sum_array([1, 2, 3]) + 6 + >>> product_sum_array([1, [2, 3]]) + 11 + >>> product_sum_array([1, [2, [3, 4]]]) + 47 + >>> product_sum_array([0]) + 0 + >>> product_sum_array([-3.5, [1, [0.5]]]) + 1.5 + >>> product_sum_array([1, -2]) + -1 + + """ + return product_sum(array, 1) + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From 267a8b72f97762383e7c313ed20df859115e2815 Mon Sep 17 00:00:00 2001 From: Tianyi Zheng Date: Fri, 23 Jun 2023 06:56:58 -0700 Subject: [PATCH 0871/1543] Clarify how to add issue numbers in PR template and CONTRIBUTING.md (#8833) * updating DIRECTORY.md * Clarify wording in PR template * Clarify CONTRIBUTING.md wording about adding issue numbers * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Add suggested change from review to CONTRIBUTING.md Co-authored-by: Christian Clauss * Incorporate review edit to CONTRIBUTING.md Co-authored-by: Christian Clauss --------- Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- .github/pull_request_template.md | 2 +- CONTRIBUTING.md | 7 ++++++- DIRECTORY.md | 2 ++ 3 files changed, 9 insertions(+), 2 deletions(-) diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md index b3ba8baf9c34..1f9797fae038 100644 --- a/.github/pull_request_template.md +++ b/.github/pull_request_template.md @@ -17,4 +17,4 @@ * [ ] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html). * [ ] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing. * [ ] All new algorithms include at least one URL that points to Wikipedia or another similar explanation. -* [ ] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`. +* [ ] If this pull request resolves one or more open issues then the description above includes the issue number(s) with a [closing keyword](https://docs.github.com/en/issues/tracking-your-work-with-issues/linking-a-pull-request-to-an-issue): "Fixes #ISSUE-NUMBER". diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 2bb0c2e39eee..618cca868d83 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -25,7 +25,12 @@ We appreciate any contribution, from fixing a grammar mistake in a comment to im Your contribution will be tested by our [automated testing on GitHub Actions](https://github.com/TheAlgorithms/Python/actions) to save time and mental energy. After you have submitted your pull request, you should see the GitHub Actions tests start to run at the bottom of your submission page. If those tests fail, then click on the ___details___ button try to read through the GitHub Actions output to understand the failure. If you do not understand, please leave a comment on your submission page and a community member will try to help. -Please help us keep our issue list small by adding fixes: #{$ISSUE_NO} to the commit message of pull requests that resolve open issues. GitHub will use this tag to auto-close the issue when the PR is merged. +Please help us keep our issue list small by adding `Fixes #{$ISSUE_NUMBER}` to the description of pull requests that resolve open issues. +For example, if your pull request fixes issue #10, then please add the following to its description: +``` +Fixes #10 +``` +GitHub will use this tag to [auto-close the issue](https://docs.github.com/en/issues/tracking-your-work-with-issues/linking-a-pull-request-to-an-issue) if and when the PR is merged. #### What is an Algorithm? diff --git a/DIRECTORY.md b/DIRECTORY.md index 83389dab1f56..1414aacf95f7 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -146,6 +146,7 @@ * [Decimal To Binary Recursion](conversions/decimal_to_binary_recursion.py) * [Decimal To Hexadecimal](conversions/decimal_to_hexadecimal.py) * [Decimal To Octal](conversions/decimal_to_octal.py) + * [Energy Conversions](conversions/energy_conversions.py) * [Excel Title To Column](conversions/excel_title_to_column.py) * [Hex To Bin](conversions/hex_to_bin.py) * [Hexadecimal To Decimal](conversions/hexadecimal_to_decimal.py) @@ -411,6 +412,7 @@ * [Dijkstra 2](graphs/dijkstra_2.py) * [Dijkstra Algorithm](graphs/dijkstra_algorithm.py) * [Dijkstra Alternate](graphs/dijkstra_alternate.py) + * [Dijkstra Binary Grid](graphs/dijkstra_binary_grid.py) * [Dinic](graphs/dinic.py) * [Directed And Undirected (Weighted) Graph](graphs/directed_and_undirected_(weighted)_graph.py) * [Edmonds Karp Multiple Source And Sink](graphs/edmonds_karp_multiple_source_and_sink.py) From 3bfa89dacf877b1d7a62b14f82d54e8de99a838e Mon Sep 17 00:00:00 2001 From: Christian Clauss Date: Sun, 25 Jun 2023 18:28:01 +0200 Subject: [PATCH 0872/1543] GitHub Actions build: Add more tests (#8837) * GitHub Actions build: Add more tests Re-enable some tests that were disabled in #6591. Fixes #8818 * updating DIRECTORY.md * TODO: Re-enable quantum tests * fails: pytest quantum/bb84.py quantum/q_fourier_transform.py --------- Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- .github/workflows/build.yml | 7 +++---- DIRECTORY.md | 2 +- 2 files changed, 4 insertions(+), 5 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 6b9cc890b6af..5229edaf8659 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -22,11 +22,10 @@ jobs: python -m pip install --upgrade pip setuptools six wheel python -m pip install pytest-cov -r requirements.txt - name: Run tests - # See: #6591 for re-enabling tests on Python v3.11 + # TODO: #8818 Re-enable quantum tests run: pytest - --ignore=computer_vision/cnn_classification.py - --ignore=machine_learning/lstm/lstm_prediction.py - --ignore=quantum/ + --ignore=quantum/bb84.py + --ignore=quantum/q_fourier_transform.py --ignore=project_euler/ --ignore=scripts/validate_solutions.py --cov-report=term-missing:skip-covered diff --git a/DIRECTORY.md b/DIRECTORY.md index 1414aacf95f7..0c21b9537fc1 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -167,7 +167,7 @@ * Arrays * [Permutations](data_structures/arrays/permutations.py) * [Prefix Sum](data_structures/arrays/prefix_sum.py) - * [Product Sum Array](data_structures/arrays/product_sum.py) + * [Product Sum](data_structures/arrays/product_sum.py) * Binary Tree * [Avl Tree](data_structures/binary_tree/avl_tree.py) * [Basic Binary Tree](data_structures/binary_tree/basic_binary_tree.py) From d764eec655c1c51f5ef3490d27ea72430191a000 Mon Sep 17 00:00:00 2001 From: Christian Clauss Date: Mon, 26 Jun 2023 05:24:50 +0200 Subject: [PATCH 0873/1543] Fix failing pytest quantum/bb84.py (#8838) * Fix failing pytest quantum/bb84.py * Update bb84.py test results to match current qiskit --- .github/workflows/build.yml | 1 - quantum/bb84.py | 4 ++-- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 5229edaf8659..fc8cb636979e 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -24,7 +24,6 @@ jobs: - name: Run tests # TODO: #8818 Re-enable quantum tests run: pytest - --ignore=quantum/bb84.py --ignore=quantum/q_fourier_transform.py --ignore=project_euler/ --ignore=scripts/validate_solutions.py diff --git a/quantum/bb84.py b/quantum/bb84.py index 60d64371fe63..e90a11c2aef3 100644 --- a/quantum/bb84.py +++ b/quantum/bb84.py @@ -64,10 +64,10 @@ def bb84(key_len: int = 8, seed: int | None = None) -> str: key: The key generated using BB84 protocol. >>> bb84(16, seed=0) - '1101101100010000' + '0111110111010010' >>> bb84(8, seed=0) - '01011011' + '10110001' """ # Set up the random number generator. rng = np.random.default_rng(seed=seed) From 62dcbea943e8cc4ea4d83eff115c4e6f6a4808af Mon Sep 17 00:00:00 2001 From: duongoku Date: Mon, 26 Jun 2023 14:39:18 +0700 Subject: [PATCH 0874/1543] Add power sum problem (#8832) * Add powersum problem * Add doctest * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Add more doctests * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Add more doctests * Improve paramater name * Fix line too long * Remove global variables * Apply suggestions from code review * Apply suggestions from code review --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- backtracking/power_sum.py | 93 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 93 insertions(+) create mode 100644 backtracking/power_sum.py diff --git a/backtracking/power_sum.py b/backtracking/power_sum.py new file mode 100644 index 000000000000..fcf1429f8570 --- /dev/null +++ b/backtracking/power_sum.py @@ -0,0 +1,93 @@ +""" +Problem source: https://www.hackerrank.com/challenges/the-power-sum/problem +Find the number of ways that a given integer X, can be expressed as the sum +of the Nth powers of unique, natural numbers. For example, if X=13 and N=2. +We have to find all combinations of unique squares adding up to 13. +The only solution is 2^2+3^2. Constraints: 1<=X<=1000, 2<=N<=10. +""" + +from math import pow + + +def backtrack( + needed_sum: int, + power: int, + current_number: int, + current_sum: int, + solutions_count: int, +) -> tuple[int, int]: + """ + >>> backtrack(13, 2, 1, 0, 0) + (0, 1) + >>> backtrack(100, 2, 1, 0, 0) + (0, 3) + >>> backtrack(100, 3, 1, 0, 0) + (0, 1) + >>> backtrack(800, 2, 1, 0, 0) + (0, 561) + >>> backtrack(1000, 10, 1, 0, 0) + (0, 0) + >>> backtrack(400, 2, 1, 0, 0) + (0, 55) + >>> backtrack(50, 1, 1, 0, 0) + (0, 3658) + """ + if current_sum == needed_sum: + # If the sum of the powers is equal to needed_sum, then we have a solution. + solutions_count += 1 + return current_sum, solutions_count + + i_to_n = int(pow(current_number, power)) + if current_sum + i_to_n <= needed_sum: + # If the sum of the powers is less than needed_sum, then continue adding powers. + current_sum += i_to_n + current_sum, solutions_count = backtrack( + needed_sum, power, current_number + 1, current_sum, solutions_count + ) + current_sum -= i_to_n + if i_to_n < needed_sum: + # If the power of i is less than needed_sum, then try with the next power. + current_sum, solutions_count = backtrack( + needed_sum, power, current_number + 1, current_sum, solutions_count + ) + return current_sum, solutions_count + + +def solve(needed_sum: int, power: int) -> int: + """ + >>> solve(13, 2) + 1 + >>> solve(100, 2) + 3 + >>> solve(100, 3) + 1 + >>> solve(800, 2) + 561 + >>> solve(1000, 10) + 0 + >>> solve(400, 2) + 55 + >>> solve(50, 1) + Traceback (most recent call last): + ... + ValueError: Invalid input + needed_sum must be between 1 and 1000, power between 2 and 10. + >>> solve(-10, 5) + Traceback (most recent call last): + ... + ValueError: Invalid input + needed_sum must be between 1 and 1000, power between 2 and 10. + """ + if not (1 <= needed_sum <= 1000 and 2 <= power <= 10): + raise ValueError( + "Invalid input\n" + "needed_sum must be between 1 and 1000, power between 2 and 10." + ) + + return backtrack(needed_sum, power, 1, 0, 0)[1] # Return the solutions_count + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From 69f20033e55ae62c337e2fb2146aea5fabf3e5a0 Mon Sep 17 00:00:00 2001 From: Tianyi Zheng Date: Mon, 26 Jun 2023 02:15:31 -0700 Subject: [PATCH 0875/1543] Remove duplicate implementation of Collatz sequence (#8836) * updating DIRECTORY.md * Remove duplicate implementation of Collatz sequence * updating DIRECTORY.md * Add suggestions from PR review --------- Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- DIRECTORY.md | 1 - maths/3n_plus_1.py | 151 -------------------------------------- maths/collatz_sequence.py | 69 +++++++++++------ 3 files changed, 46 insertions(+), 175 deletions(-) delete mode 100644 maths/3n_plus_1.py diff --git a/DIRECTORY.md b/DIRECTORY.md index 0c21b9537fc1..1e0e450bca2b 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -522,7 +522,6 @@ * [Xgboost Regressor](machine_learning/xgboost_regressor.py) ## Maths - * [3N Plus 1](maths/3n_plus_1.py) * [Abs](maths/abs.py) * [Add](maths/add.py) * [Addition Without Arithmetic](maths/addition_without_arithmetic.py) diff --git a/maths/3n_plus_1.py b/maths/3n_plus_1.py deleted file mode 100644 index f9f6dfeb9faa..000000000000 --- a/maths/3n_plus_1.py +++ /dev/null @@ -1,151 +0,0 @@ -from __future__ import annotations - - -def n31(a: int) -> tuple[list[int], int]: - """ - Returns the Collatz sequence and its length of any positive integer. - >>> n31(4) - ([4, 2, 1], 3) - """ - - if not isinstance(a, int): - msg = f"Must be int, not {type(a).__name__}" - raise TypeError(msg) - if a < 1: - msg = f"Given integer must be positive, not {a}" - raise ValueError(msg) - - path = [a] - while a != 1: - if a % 2 == 0: - a //= 2 - else: - a = 3 * a + 1 - path.append(a) - return path, len(path) - - -def test_n31(): - """ - >>> test_n31() - """ - assert n31(4) == ([4, 2, 1], 3) - assert n31(11) == ([11, 34, 17, 52, 26, 13, 40, 20, 10, 5, 16, 8, 4, 2, 1], 15) - assert n31(31) == ( - [ - 31, - 94, - 47, - 142, - 71, - 214, - 107, - 322, - 161, - 484, - 242, - 121, - 364, - 182, - 91, - 274, - 137, - 412, - 206, - 103, - 310, - 155, - 466, - 233, - 700, - 350, - 175, - 526, - 263, - 790, - 395, - 1186, - 593, - 1780, - 890, - 445, - 1336, - 668, - 334, - 167, - 502, - 251, - 754, - 377, - 1132, - 566, - 283, - 850, - 425, - 1276, - 638, - 319, - 958, - 479, - 1438, - 719, - 2158, - 1079, - 3238, - 1619, - 4858, - 2429, - 7288, - 3644, - 1822, - 911, - 2734, - 1367, - 4102, - 2051, - 6154, - 3077, - 9232, - 4616, - 2308, - 1154, - 577, - 1732, - 866, - 433, - 1300, - 650, - 325, - 976, - 488, - 244, - 122, - 61, - 184, - 92, - 46, - 23, - 70, - 35, - 106, - 53, - 160, - 80, - 40, - 20, - 10, - 5, - 16, - 8, - 4, - 2, - 1, - ], - 107, - ) - - -if __name__ == "__main__": - num = 4 - path, length = n31(num) - print(f"The Collatz sequence of {num} took {length} steps. \nPath: {path}") diff --git a/maths/collatz_sequence.py b/maths/collatz_sequence.py index 7b3636de69f4..4f3aa5582731 100644 --- a/maths/collatz_sequence.py +++ b/maths/collatz_sequence.py @@ -1,43 +1,66 @@ +""" +The Collatz conjecture is a famous unsolved problem in mathematics. Given a starting +positive integer, define the following sequence: +- If the current term n is even, then the next term is n/2. +- If the current term n is odd, then the next term is 3n + 1. +The conjecture claims that this sequence will always reach 1 for any starting number. + +Other names for this problem include the 3n + 1 problem, the Ulam conjecture, Kakutani's +problem, the Thwaites conjecture, Hasse's algorithm, the Syracuse problem, and the +hailstone sequence. + +Reference: https://en.wikipedia.org/wiki/Collatz_conjecture +""" + from __future__ import annotations +from collections.abc import Generator -def collatz_sequence(n: int) -> list[int]: + +def collatz_sequence(n: int) -> Generator[int, None, None]: """ - Collatz conjecture: start with any positive integer n. The next term is - obtained as follows: - If n term is even, the next term is: n / 2 . - If n is odd, the next term is: 3 * n + 1. - - The conjecture states the sequence will always reach 1 for any starting value n. - Example: - >>> collatz_sequence(2.1) + Generate the Collatz sequence starting at n. + >>> tuple(collatz_sequence(2.1)) Traceback (most recent call last): ... - Exception: Sequence only defined for natural numbers - >>> collatz_sequence(0) + Exception: Sequence only defined for positive integers + >>> tuple(collatz_sequence(0)) Traceback (most recent call last): ... - Exception: Sequence only defined for natural numbers - >>> collatz_sequence(43) # doctest: +NORMALIZE_WHITESPACE - [43, 130, 65, 196, 98, 49, 148, 74, 37, 112, 56, 28, 14, 7, - 22, 11, 34, 17, 52, 26, 13, 40, 20, 10, 5, 16, 8, 4, 2, 1] + Exception: Sequence only defined for positive integers + >>> tuple(collatz_sequence(4)) + (4, 2, 1) + >>> tuple(collatz_sequence(11)) + (11, 34, 17, 52, 26, 13, 40, 20, 10, 5, 16, 8, 4, 2, 1) + >>> tuple(collatz_sequence(31)) # doctest: +NORMALIZE_WHITESPACE + (31, 94, 47, 142, 71, 214, 107, 322, 161, 484, 242, 121, 364, 182, 91, 274, 137, + 412, 206, 103, 310, 155, 466, 233, 700, 350, 175, 526, 263, 790, 395, 1186, 593, + 1780, 890, 445, 1336, 668, 334, 167, 502, 251, 754, 377, 1132, 566, 283, 850, 425, + 1276, 638, 319, 958, 479, 1438, 719, 2158, 1079, 3238, 1619, 4858, 2429, 7288, 3644, + 1822, 911, 2734, 1367, 4102, 2051, 6154, 3077, 9232, 4616, 2308, 1154, 577, 1732, + 866, 433, 1300, 650, 325, 976, 488, 244, 122, 61, 184, 92, 46, 23, 70, 35, 106, 53, + 160, 80, 40, 20, 10, 5, 16, 8, 4, 2, 1) + >>> tuple(collatz_sequence(43)) # doctest: +NORMALIZE_WHITESPACE + (43, 130, 65, 196, 98, 49, 148, 74, 37, 112, 56, 28, 14, 7, 22, 11, 34, 17, 52, 26, + 13, 40, 20, 10, 5, 16, 8, 4, 2, 1) """ - if not isinstance(n, int) or n < 1: - raise Exception("Sequence only defined for natural numbers") + raise Exception("Sequence only defined for positive integers") - sequence = [n] + yield n while n != 1: - n = 3 * n + 1 if n & 1 else n // 2 - sequence.append(n) - return sequence + if n % 2 == 0: + n //= 2 + else: + n = 3 * n + 1 + yield n def main(): n = 43 - sequence = collatz_sequence(n) + sequence = tuple(collatz_sequence(n)) print(sequence) - print(f"collatz sequence from {n} took {len(sequence)} steps.") + print(f"Collatz sequence from {n} took {len(sequence)} steps.") if __name__ == "__main__": From 929d3d9219020d2978d5560e3b931df69a6f2d50 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Tue, 27 Jun 2023 07:23:54 +0200 Subject: [PATCH 0876/1543] [pre-commit.ci] pre-commit autoupdate (#8842) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * [pre-commit.ci] pre-commit autoupdate updates: - [github.com/astral-sh/ruff-pre-commit: v0.0.274 → v0.0.275](https://github.com/astral-sh/ruff-pre-commit/compare/v0.0.274...v0.0.275) - [github.com/tox-dev/pyproject-fmt: 0.12.0 → 0.12.1](https://github.com/tox-dev/pyproject-fmt/compare/0.12.0...0.12.1) - [github.com/pre-commit/mirrors-mypy: v1.3.0 → v1.4.1](https://github.com/pre-commit/mirrors-mypy/compare/v1.3.0...v1.4.1) * updating DIRECTORY.md --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- .pre-commit-config.yaml | 6 +++--- DIRECTORY.md | 1 + 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 3d4cc4084ccf..1d92d2ff31c1 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -16,7 +16,7 @@ repos: - id: auto-walrus - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.0.274 + rev: v0.0.275 hooks: - id: ruff @@ -33,7 +33,7 @@ repos: - tomli - repo: https://github.com/tox-dev/pyproject-fmt - rev: "0.12.0" + rev: "0.12.1" hooks: - id: pyproject-fmt @@ -51,7 +51,7 @@ repos: - id: validate-pyproject - repo: https://github.com/pre-commit/mirrors-mypy - rev: v1.3.0 + rev: v1.4.1 hooks: - id: mypy args: diff --git a/DIRECTORY.md b/DIRECTORY.md index 1e0e450bca2b..d25d665ef28b 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -29,6 +29,7 @@ * [Minmax](backtracking/minmax.py) * [N Queens](backtracking/n_queens.py) * [N Queens Math](backtracking/n_queens_math.py) + * [Power Sum](backtracking/power_sum.py) * [Rat In Maze](backtracking/rat_in_maze.py) * [Sudoku](backtracking/sudoku.py) * [Sum Of Subsets](backtracking/sum_of_subsets.py) From c9ee6ed1887fadd25c1c43c31ed55a99b2be5f24 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Tue, 4 Jul 2023 00:20:35 +0200 Subject: [PATCH 0877/1543] [pre-commit.ci] pre-commit autoupdate (#8853) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * [pre-commit.ci] pre-commit autoupdate updates: - [github.com/astral-sh/ruff-pre-commit: v0.0.275 → v0.0.276](https://github.com/astral-sh/ruff-pre-commit/compare/v0.0.275...v0.0.276) * Update double_ended_queue.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update double_ended_queue.py --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- .pre-commit-config.yaml | 2 +- data_structures/queue/double_ended_queue.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 1d92d2ff31c1..42ebeed14fa9 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -16,7 +16,7 @@ repos: - id: auto-walrus - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.0.275 + rev: v0.0.276 hooks: - id: ruff diff --git a/data_structures/queue/double_ended_queue.py b/data_structures/queue/double_ended_queue.py index 2472371b42fe..44dc863b9a4e 100644 --- a/data_structures/queue/double_ended_queue.py +++ b/data_structures/queue/double_ended_queue.py @@ -54,7 +54,7 @@ class _Iterator: the current node of the iteration. """ - __slots__ = "_cur" + __slots__ = ("_cur",) def __init__(self, cur: Deque._Node | None) -> None: self._cur = cur From a0eec90466beeb3b6ce0f7afd905f96454e9b14c Mon Sep 17 00:00:00 2001 From: Tianyi Zheng Date: Tue, 11 Jul 2023 02:44:12 -0700 Subject: [PATCH 0878/1543] Consolidate duplicate implementations of max subarray (#8849) * Remove max subarray sum duplicate implementations * updating DIRECTORY.md * Rename max_sum_contiguous_subsequence.py * Fix typo in dynamic_programming/max_subarray_sum.py * Remove duplicate divide and conquer max subarray * updating DIRECTORY.md --------- Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- DIRECTORY.md | 8 +- divide_and_conquer/max_subarray.py | 112 ++++++++++++++++++ divide_and_conquer/max_subarray_sum.py | 78 ------------ dynamic_programming/max_sub_array.py | 93 --------------- dynamic_programming/max_subarray_sum.py | 60 ++++++++++ .../max_sum_contiguous_subsequence.py | 20 ---- maths/kadanes.py | 63 ---------- maths/largest_subarray_sum.py | 21 ---- other/maximum_subarray.py | 32 ----- 9 files changed, 174 insertions(+), 313 deletions(-) create mode 100644 divide_and_conquer/max_subarray.py delete mode 100644 divide_and_conquer/max_subarray_sum.py delete mode 100644 dynamic_programming/max_sub_array.py create mode 100644 dynamic_programming/max_subarray_sum.py delete mode 100644 dynamic_programming/max_sum_contiguous_subsequence.py delete mode 100644 maths/kadanes.py delete mode 100644 maths/largest_subarray_sum.py delete mode 100644 other/maximum_subarray.py diff --git a/DIRECTORY.md b/DIRECTORY.md index d25d665ef28b..77938f45011b 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -293,7 +293,7 @@ * [Inversions](divide_and_conquer/inversions.py) * [Kth Order Statistic](divide_and_conquer/kth_order_statistic.py) * [Max Difference Pair](divide_and_conquer/max_difference_pair.py) - * [Max Subarray Sum](divide_and_conquer/max_subarray_sum.py) + * [Max Subarray](divide_and_conquer/max_subarray.py) * [Mergesort](divide_and_conquer/mergesort.py) * [Peak](divide_and_conquer/peak.py) * [Power](divide_and_conquer/power.py) @@ -324,8 +324,7 @@ * [Matrix Chain Order](dynamic_programming/matrix_chain_order.py) * [Max Non Adjacent Sum](dynamic_programming/max_non_adjacent_sum.py) * [Max Product Subarray](dynamic_programming/max_product_subarray.py) - * [Max Sub Array](dynamic_programming/max_sub_array.py) - * [Max Sum Contiguous Subsequence](dynamic_programming/max_sum_contiguous_subsequence.py) + * [Max Subarray Sum](dynamic_programming/max_subarray_sum.py) * [Min Distance Up Bottom](dynamic_programming/min_distance_up_bottom.py) * [Minimum Coin Change](dynamic_programming/minimum_coin_change.py) * [Minimum Cost Path](dynamic_programming/minimum_cost_path.py) @@ -591,12 +590,10 @@ * [Is Square Free](maths/is_square_free.py) * [Jaccard Similarity](maths/jaccard_similarity.py) * [Juggler Sequence](maths/juggler_sequence.py) - * [Kadanes](maths/kadanes.py) * [Karatsuba](maths/karatsuba.py) * [Krishnamurthy Number](maths/krishnamurthy_number.py) * [Kth Lexicographic Permutation](maths/kth_lexicographic_permutation.py) * [Largest Of Very Large Numbers](maths/largest_of_very_large_numbers.py) - * [Largest Subarray Sum](maths/largest_subarray_sum.py) * [Least Common Multiple](maths/least_common_multiple.py) * [Line Length](maths/line_length.py) * [Liouville Lambda](maths/liouville_lambda.py) @@ -733,7 +730,6 @@ * [Linear Congruential Generator](other/linear_congruential_generator.py) * [Lru Cache](other/lru_cache.py) * [Magicdiamondpattern](other/magicdiamondpattern.py) - * [Maximum Subarray](other/maximum_subarray.py) * [Maximum Subsequence](other/maximum_subsequence.py) * [Nested Brackets](other/nested_brackets.py) * [Number Container System](other/number_container_system.py) diff --git a/divide_and_conquer/max_subarray.py b/divide_and_conquer/max_subarray.py new file mode 100644 index 000000000000..851ef621a24c --- /dev/null +++ b/divide_and_conquer/max_subarray.py @@ -0,0 +1,112 @@ +""" +The maximum subarray problem is the task of finding the continuous subarray that has the +maximum sum within a given array of numbers. For example, given the array +[-2, 1, -3, 4, -1, 2, 1, -5, 4], the contiguous subarray with the maximum sum is +[4, -1, 2, 1], which has a sum of 6. + +This divide-and-conquer algorithm finds the maximum subarray in O(n log n) time. +""" +from __future__ import annotations + +import time +from collections.abc import Sequence +from random import randint + +from matplotlib import pyplot as plt + + +def max_subarray( + arr: Sequence[float], low: int, high: int +) -> tuple[int | None, int | None, float]: + """ + Solves the maximum subarray problem using divide and conquer. + :param arr: the given array of numbers + :param low: the start index + :param high: the end index + :return: the start index of the maximum subarray, the end index of the + maximum subarray, and the maximum subarray sum + + >>> nums = [-2, 1, -3, 4, -1, 2, 1, -5, 4] + >>> max_subarray(nums, 0, len(nums) - 1) + (3, 6, 6) + >>> nums = [2, 8, 9] + >>> max_subarray(nums, 0, len(nums) - 1) + (0, 2, 19) + >>> nums = [0, 0] + >>> max_subarray(nums, 0, len(nums) - 1) + (0, 0, 0) + >>> nums = [-1.0, 0.0, 1.0] + >>> max_subarray(nums, 0, len(nums) - 1) + (2, 2, 1.0) + >>> nums = [-2, -3, -1, -4, -6] + >>> max_subarray(nums, 0, len(nums) - 1) + (2, 2, -1) + >>> max_subarray([], 0, 0) + (None, None, 0) + """ + if not arr: + return None, None, 0 + if low == high: + return low, high, arr[low] + + mid = (low + high) // 2 + left_low, left_high, left_sum = max_subarray(arr, low, mid) + right_low, right_high, right_sum = max_subarray(arr, mid + 1, high) + cross_left, cross_right, cross_sum = max_cross_sum(arr, low, mid, high) + if left_sum >= right_sum and left_sum >= cross_sum: + return left_low, left_high, left_sum + elif right_sum >= left_sum and right_sum >= cross_sum: + return right_low, right_high, right_sum + return cross_left, cross_right, cross_sum + + +def max_cross_sum( + arr: Sequence[float], low: int, mid: int, high: int +) -> tuple[int, int, float]: + left_sum, max_left = float("-inf"), -1 + right_sum, max_right = float("-inf"), -1 + + summ: int | float = 0 + for i in range(mid, low - 1, -1): + summ += arr[i] + if summ > left_sum: + left_sum = summ + max_left = i + + summ = 0 + for i in range(mid + 1, high + 1): + summ += arr[i] + if summ > right_sum: + right_sum = summ + max_right = i + + return max_left, max_right, (left_sum + right_sum) + + +def time_max_subarray(input_size: int) -> float: + arr = [randint(1, input_size) for _ in range(input_size)] + start = time.time() + max_subarray(arr, 0, input_size - 1) + end = time.time() + return end - start + + +def plot_runtimes() -> None: + input_sizes = [10, 100, 1000, 10000, 50000, 100000, 200000, 300000, 400000, 500000] + runtimes = [time_max_subarray(input_size) for input_size in input_sizes] + print("No of Inputs\t\tTime Taken") + for input_size, runtime in zip(input_sizes, runtimes): + print(input_size, "\t\t", runtime) + plt.plot(input_sizes, runtimes) + plt.xlabel("Number of Inputs") + plt.ylabel("Time taken in seconds") + plt.show() + + +if __name__ == "__main__": + """ + A random simulation of this algorithm. + """ + from doctest import testmod + + testmod() diff --git a/divide_and_conquer/max_subarray_sum.py b/divide_and_conquer/max_subarray_sum.py deleted file mode 100644 index f23e81719025..000000000000 --- a/divide_and_conquer/max_subarray_sum.py +++ /dev/null @@ -1,78 +0,0 @@ -""" -Given a array of length n, max_subarray_sum() finds -the maximum of sum of contiguous sub-array using divide and conquer method. - -Time complexity : O(n log n) - -Ref : INTRODUCTION TO ALGORITHMS THIRD EDITION -(section : 4, sub-section : 4.1, page : 70) - -""" - - -def max_sum_from_start(array): - """This function finds the maximum contiguous sum of array from 0 index - - Parameters : - array (list[int]) : given array - - Returns : - max_sum (int) : maximum contiguous sum of array from 0 index - - """ - array_sum = 0 - max_sum = float("-inf") - for num in array: - array_sum += num - if array_sum > max_sum: - max_sum = array_sum - return max_sum - - -def max_cross_array_sum(array, left, mid, right): - """This function finds the maximum contiguous sum of left and right arrays - - Parameters : - array, left, mid, right (list[int], int, int, int) - - Returns : - (int) : maximum of sum of contiguous sum of left and right arrays - - """ - - max_sum_of_left = max_sum_from_start(array[left : mid + 1][::-1]) - max_sum_of_right = max_sum_from_start(array[mid + 1 : right + 1]) - return max_sum_of_left + max_sum_of_right - - -def max_subarray_sum(array, left, right): - """Maximum contiguous sub-array sum, using divide and conquer method - - Parameters : - array, left, right (list[int], int, int) : - given array, current left index and current right index - - Returns : - int : maximum of sum of contiguous sub-array - - """ - - # base case: array has only one element - if left == right: - return array[right] - - # Recursion - mid = (left + right) // 2 - left_half_sum = max_subarray_sum(array, left, mid) - right_half_sum = max_subarray_sum(array, mid + 1, right) - cross_sum = max_cross_array_sum(array, left, mid, right) - return max(left_half_sum, right_half_sum, cross_sum) - - -if __name__ == "__main__": - array = [-2, -5, 6, -2, -3, 1, 5, -6] - array_length = len(array) - print( - "Maximum sum of contiguous subarray:", - max_subarray_sum(array, 0, array_length - 1), - ) diff --git a/dynamic_programming/max_sub_array.py b/dynamic_programming/max_sub_array.py deleted file mode 100644 index 07717fba4172..000000000000 --- a/dynamic_programming/max_sub_array.py +++ /dev/null @@ -1,93 +0,0 @@ -""" -author : Mayank Kumar Jha (mk9440) -""" -from __future__ import annotations - - -def find_max_sub_array(a, low, high): - if low == high: - return low, high, a[low] - else: - mid = (low + high) // 2 - left_low, left_high, left_sum = find_max_sub_array(a, low, mid) - right_low, right_high, right_sum = find_max_sub_array(a, mid + 1, high) - cross_left, cross_right, cross_sum = find_max_cross_sum(a, low, mid, high) - if left_sum >= right_sum and left_sum >= cross_sum: - return left_low, left_high, left_sum - elif right_sum >= left_sum and right_sum >= cross_sum: - return right_low, right_high, right_sum - else: - return cross_left, cross_right, cross_sum - - -def find_max_cross_sum(a, low, mid, high): - left_sum, max_left = -999999999, -1 - right_sum, max_right = -999999999, -1 - summ = 0 - for i in range(mid, low - 1, -1): - summ += a[i] - if summ > left_sum: - left_sum = summ - max_left = i - summ = 0 - for i in range(mid + 1, high + 1): - summ += a[i] - if summ > right_sum: - right_sum = summ - max_right = i - return max_left, max_right, (left_sum + right_sum) - - -def max_sub_array(nums: list[int]) -> int: - """ - Finds the contiguous subarray which has the largest sum and return its sum. - - >>> max_sub_array([-2, 1, -3, 4, -1, 2, 1, -5, 4]) - 6 - - An empty (sub)array has sum 0. - >>> max_sub_array([]) - 0 - - If all elements are negative, the largest subarray would be the empty array, - having the sum 0. - >>> max_sub_array([-1, -2, -3]) - 0 - >>> max_sub_array([5, -2, -3]) - 5 - >>> max_sub_array([31, -41, 59, 26, -53, 58, 97, -93, -23, 84]) - 187 - """ - best = 0 - current = 0 - for i in nums: - current += i - current = max(current, 0) - best = max(best, current) - return best - - -if __name__ == "__main__": - """ - A random simulation of this algorithm. - """ - import time - from random import randint - - from matplotlib import pyplot as plt - - inputs = [10, 100, 1000, 10000, 50000, 100000, 200000, 300000, 400000, 500000] - tim = [] - for i in inputs: - li = [randint(1, i) for j in range(i)] - strt = time.time() - (find_max_sub_array(li, 0, len(li) - 1)) - end = time.time() - tim.append(end - strt) - print("No of Inputs Time Taken") - for i in range(len(inputs)): - print(inputs[i], "\t\t", tim[i]) - plt.plot(inputs, tim) - plt.xlabel("Number of Inputs") - plt.ylabel("Time taken in seconds ") - plt.show() diff --git a/dynamic_programming/max_subarray_sum.py b/dynamic_programming/max_subarray_sum.py new file mode 100644 index 000000000000..c76943472b97 --- /dev/null +++ b/dynamic_programming/max_subarray_sum.py @@ -0,0 +1,60 @@ +""" +The maximum subarray sum problem is the task of finding the maximum sum that can be +obtained from a contiguous subarray within a given array of numbers. For example, given +the array [-2, 1, -3, 4, -1, 2, 1, -5, 4], the contiguous subarray with the maximum sum +is [4, -1, 2, 1], so the maximum subarray sum is 6. + +Kadane's algorithm is a simple dynamic programming algorithm that solves the maximum +subarray sum problem in O(n) time and O(1) space. + +Reference: https://en.wikipedia.org/wiki/Maximum_subarray_problem +""" +from collections.abc import Sequence + + +def max_subarray_sum( + arr: Sequence[float], allow_empty_subarrays: bool = False +) -> float: + """ + Solves the maximum subarray sum problem using Kadane's algorithm. + :param arr: the given array of numbers + :param allow_empty_subarrays: if True, then the algorithm considers empty subarrays + + >>> max_subarray_sum([2, 8, 9]) + 19 + >>> max_subarray_sum([0, 0]) + 0 + >>> max_subarray_sum([-1.0, 0.0, 1.0]) + 1.0 + >>> max_subarray_sum([1, 2, 3, 4, -2]) + 10 + >>> max_subarray_sum([-2, 1, -3, 4, -1, 2, 1, -5, 4]) + 6 + >>> max_subarray_sum([2, 3, -9, 8, -2]) + 8 + >>> max_subarray_sum([-2, -3, -1, -4, -6]) + -1 + >>> max_subarray_sum([-2, -3, -1, -4, -6], allow_empty_subarrays=True) + 0 + >>> max_subarray_sum([]) + 0 + """ + if not arr: + return 0 + + max_sum = 0 if allow_empty_subarrays else float("-inf") + curr_sum = 0.0 + for num in arr: + curr_sum = max(0 if allow_empty_subarrays else num, curr_sum + num) + max_sum = max(max_sum, curr_sum) + + return max_sum + + +if __name__ == "__main__": + from doctest import testmod + + testmod() + + nums = [-2, 1, -3, 4, -1, 2, 1, -5, 4] + print(f"{max_subarray_sum(nums) = }") diff --git a/dynamic_programming/max_sum_contiguous_subsequence.py b/dynamic_programming/max_sum_contiguous_subsequence.py deleted file mode 100644 index bac592370c5d..000000000000 --- a/dynamic_programming/max_sum_contiguous_subsequence.py +++ /dev/null @@ -1,20 +0,0 @@ -def max_subarray_sum(nums: list) -> int: - """ - >>> max_subarray_sum([6 , 9, -1, 3, -7, -5, 10]) - 17 - """ - if not nums: - return 0 - n = len(nums) - - res, s, s_pre = nums[0], nums[0], nums[0] - for i in range(1, n): - s = max(nums[i], s_pre + nums[i]) - s_pre = s - res = max(res, s) - return res - - -if __name__ == "__main__": - nums = [6, 9, -1, 3, -7, -5, 10] - print(max_subarray_sum(nums)) diff --git a/maths/kadanes.py b/maths/kadanes.py deleted file mode 100644 index c2ea53a6cc84..000000000000 --- a/maths/kadanes.py +++ /dev/null @@ -1,63 +0,0 @@ -""" -Kadane's algorithm to get maximum subarray sum -https://medium.com/@rsinghal757/kadanes-algorithm-dynamic-programming-how-and-why-does-it-work-3fd8849ed73d -https://en.wikipedia.org/wiki/Maximum_subarray_problem -""" -test_data: tuple = ([-2, -8, -9], [2, 8, 9], [-1, 0, 1], [0, 0], []) - - -def negative_exist(arr: list) -> int: - """ - >>> negative_exist([-2,-8,-9]) - -2 - >>> [negative_exist(arr) for arr in test_data] - [-2, 0, 0, 0, 0] - """ - arr = arr or [0] - max_number = arr[0] - for i in arr: - if i >= 0: - return 0 - elif max_number <= i: - max_number = i - return max_number - - -def kadanes(arr: list) -> int: - """ - If negative_exist() returns 0 than this function will execute - else it will return the value return by negative_exist function - - For example: arr = [2, 3, -9, 8, -2] - Initially we set value of max_sum to 0 and max_till_element to 0 than when - max_sum is less than max_till particular element it will assign that value to - max_sum and when value of max_till_sum is less than 0 it will assign 0 to i - and after that whole process, return the max_sum - So the output for above arr is 8 - - >>> kadanes([2, 3, -9, 8, -2]) - 8 - >>> [kadanes(arr) for arr in test_data] - [-2, 19, 1, 0, 0] - """ - max_sum = negative_exist(arr) - if max_sum < 0: - return max_sum - - max_sum = 0 - max_till_element = 0 - - for i in arr: - max_till_element += i - max_sum = max(max_sum, max_till_element) - max_till_element = max(max_till_element, 0) - return max_sum - - -if __name__ == "__main__": - try: - print("Enter integer values sepatated by spaces") - arr = [int(x) for x in input().split()] - print(f"Maximum subarray sum of {arr} is {kadanes(arr)}") - except ValueError: - print("Please enter integer values.") diff --git a/maths/largest_subarray_sum.py b/maths/largest_subarray_sum.py deleted file mode 100644 index 90f92c7127bf..000000000000 --- a/maths/largest_subarray_sum.py +++ /dev/null @@ -1,21 +0,0 @@ -from sys import maxsize - - -def max_sub_array_sum(a: list, size: int = 0): - """ - >>> max_sub_array_sum([-13, -3, -25, -20, -3, -16, -23, -12, -5, -22, -15, -4, -7]) - -3 - """ - size = size or len(a) - max_so_far = -maxsize - 1 - max_ending_here = 0 - for i in range(0, size): - max_ending_here = max_ending_here + a[i] - max_so_far = max(max_so_far, max_ending_here) - max_ending_here = max(max_ending_here, 0) - return max_so_far - - -if __name__ == "__main__": - a = [-13, -3, -25, -20, 1, -16, -23, -12, -5, -22, -15, -4, -7] - print(("Maximum contiguous sum is", max_sub_array_sum(a, len(a)))) diff --git a/other/maximum_subarray.py b/other/maximum_subarray.py deleted file mode 100644 index 1c8c8cabcd2d..000000000000 --- a/other/maximum_subarray.py +++ /dev/null @@ -1,32 +0,0 @@ -from collections.abc import Sequence - - -def max_subarray_sum(nums: Sequence[int]) -> int: - """Return the maximum possible sum amongst all non - empty subarrays. - - Raises: - ValueError: when nums is empty. - - >>> max_subarray_sum([1,2,3,4,-2]) - 10 - >>> max_subarray_sum([-2,1,-3,4,-1,2,1,-5,4]) - 6 - """ - if not nums: - raise ValueError("Input sequence should not be empty") - - curr_max = ans = nums[0] - nums_len = len(nums) - - for i in range(1, nums_len): - num = nums[i] - curr_max = max(curr_max + num, num) - ans = max(curr_max, ans) - - return ans - - -if __name__ == "__main__": - n = int(input("Enter number of elements : ").strip()) - array = list(map(int, input("\nEnter the numbers : ").strip().split()))[:n] - print(max_subarray_sum(array)) From 44b1bcc7c7e0f15385530bf54c59ad4eb86fef0b Mon Sep 17 00:00:00 2001 From: Caeden Perelli-Harris Date: Tue, 11 Jul 2023 10:51:21 +0100 Subject: [PATCH 0879/1543] Fix failing tests from ruff/newton_raphson (ignore S307 "possibly insecure function") (#8862) * chore: Fix failing tests (ignore S307 "possibly insecure function") * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fix: Move noqa back to right line --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- arithmetic_analysis/newton_raphson.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/arithmetic_analysis/newton_raphson.py b/arithmetic_analysis/newton_raphson.py index aee2f07e5743..1b90ad4177f6 100644 --- a/arithmetic_analysis/newton_raphson.py +++ b/arithmetic_analysis/newton_raphson.py @@ -25,9 +25,11 @@ def newton_raphson( """ x = a while True: - x = Decimal(x) - (Decimal(eval(func)) / Decimal(eval(str(diff(func))))) + x = Decimal(x) - ( + Decimal(eval(func)) / Decimal(eval(str(diff(func)))) # noqa: S307 + ) # This number dictates the accuracy of the answer - if abs(eval(func)) < precision: + if abs(eval(func)) < precision: # noqa: S307 return float(x) From f614ed72170011d2d439f7901e1c8daa7deac8c4 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Tue, 11 Jul 2023 11:55:32 +0200 Subject: [PATCH 0880/1543] [pre-commit.ci] pre-commit autoupdate (#8860) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - [github.com/astral-sh/ruff-pre-commit: v0.0.276 → v0.0.277](https://github.com/astral-sh/ruff-pre-commit/compare/v0.0.276...v0.0.277) - [github.com/tox-dev/pyproject-fmt: 0.12.1 → 0.13.0](https://github.com/tox-dev/pyproject-fmt/compare/0.12.1...0.13.0) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 42ebeed14fa9..bf30703bdffc 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -16,7 +16,7 @@ repos: - id: auto-walrus - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.0.276 + rev: v0.0.277 hooks: - id: ruff @@ -33,7 +33,7 @@ repos: - tomli - repo: https://github.com/tox-dev/pyproject-fmt - rev: "0.12.1" + rev: "0.13.0" hooks: - id: pyproject-fmt From 5aefc00f0f1c692ce772ddbc616d7cd91233236b Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Tue, 18 Jul 2023 09:58:22 +0530 Subject: [PATCH 0881/1543] [pre-commit.ci] pre-commit autoupdate (#8872) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - [github.com/astral-sh/ruff-pre-commit: v0.0.277 → v0.0.278](https://github.com/astral-sh/ruff-pre-commit/compare/v0.0.277...v0.0.278) - [github.com/psf/black: 23.3.0 → 23.7.0](https://github.com/psf/black/compare/23.3.0...23.7.0) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index bf30703bdffc..13b955dd374f 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -16,12 +16,12 @@ repos: - id: auto-walrus - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.0.277 + rev: v0.0.278 hooks: - id: ruff - repo: https://github.com/psf/black - rev: 23.3.0 + rev: 23.7.0 hooks: - id: black From 93fb169627ea9fe43436a312fdfa751818808180 Mon Sep 17 00:00:00 2001 From: Caeden Perelli-Harris Date: Sat, 22 Jul 2023 13:05:10 +0300 Subject: [PATCH 0882/1543] [Upgrade Ruff] Fix all errors raised from ruff (#8879) * chore: Fix tests * chore: Fix failing ruff * chore: Fix ruff errors * chore: Fix ruff errors * chore: Fix ruff errors * chore: Fix ruff errors * chore: Fix ruff errors * chore: Fix ruff errors * chore: Fix ruff errors * chore: Fix ruff errors * chore: Fix ruff errors * chore: Fix ruff errors * chore: Fix ruff errors * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * chore: Fix ruff errors * chore: Fix ruff errors * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update cellular_automata/game_of_life.py Co-authored-by: Christian Clauss * chore: Update ruff version in pre-commit * chore: Fix ruff errors * Update edmonds_karp_multiple_source_and_sink.py * Update factorial.py * Update primelib.py * Update min_cost_string_conversion.py --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- .pre-commit-config.yaml | 2 +- cellular_automata/game_of_life.py | 2 +- data_structures/binary_tree/red_black_tree.py | 2 +- data_structures/trie/radix_tree.py | 4 ++-- divide_and_conquer/convex_hull.py | 2 +- ...directed_and_undirected_(weighted)_graph.py | 18 +++++++++--------- .../edmonds_karp_multiple_source_and_sink.py | 2 +- maths/factorial.py | 2 +- maths/primelib.py | 2 +- other/davisb_putnamb_logemannb_loveland.py | 2 +- project_euler/problem_009/sol3.py | 16 ++++++++++------ quantum/ripple_adder_classic.py | 2 +- strings/min_cost_string_conversion.py | 2 +- web_programming/convert_number_to_words.py | 4 +--- 14 files changed, 32 insertions(+), 30 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 13b955dd374f..5adf12cc70c5 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -16,7 +16,7 @@ repos: - id: auto-walrus - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.0.278 + rev: v0.0.280 hooks: - id: ruff diff --git a/cellular_automata/game_of_life.py b/cellular_automata/game_of_life.py index 3382af7b5db6..b69afdce03eb 100644 --- a/cellular_automata/game_of_life.py +++ b/cellular_automata/game_of_life.py @@ -98,7 +98,7 @@ def __judge_point(pt: bool, neighbours: list[list[bool]]) -> bool: if pt: if alive < 2: state = False - elif alive == 2 or alive == 3: + elif alive in {2, 3}: state = True elif alive > 3: state = False diff --git a/data_structures/binary_tree/red_black_tree.py b/data_structures/binary_tree/red_black_tree.py index 3ebc8d63939b..4ebe0e927ca0 100644 --- a/data_structures/binary_tree/red_black_tree.py +++ b/data_structures/binary_tree/red_black_tree.py @@ -152,7 +152,7 @@ def _insert_repair(self) -> None: self.grandparent.color = 1 self.grandparent._insert_repair() - def remove(self, label: int) -> RedBlackTree: + def remove(self, label: int) -> RedBlackTree: # noqa: PLR0912 """Remove label from this tree.""" if self.label == label: if self.left and self.right: diff --git a/data_structures/trie/radix_tree.py b/data_structures/trie/radix_tree.py index 66890346ec2b..cf2f25c29f13 100644 --- a/data_structures/trie/radix_tree.py +++ b/data_structures/trie/radix_tree.py @@ -156,7 +156,7 @@ def delete(self, word: str) -> bool: del self.nodes[word[0]] # We merge the current node with its only child if len(self.nodes) == 1 and not self.is_leaf: - merging_node = list(self.nodes.values())[0] + merging_node = next(iter(self.nodes.values())) self.is_leaf = merging_node.is_leaf self.prefix += merging_node.prefix self.nodes = merging_node.nodes @@ -165,7 +165,7 @@ def delete(self, word: str) -> bool: incoming_node.is_leaf = False # If there is 1 edge, we merge it with its child else: - merging_node = list(incoming_node.nodes.values())[0] + merging_node = next(iter(incoming_node.nodes.values())) incoming_node.is_leaf = merging_node.is_leaf incoming_node.prefix += merging_node.prefix incoming_node.nodes = merging_node.nodes diff --git a/divide_and_conquer/convex_hull.py b/divide_and_conquer/convex_hull.py index 1ad933417da6..1d1bf301def5 100644 --- a/divide_and_conquer/convex_hull.py +++ b/divide_and_conquer/convex_hull.py @@ -266,7 +266,7 @@ def convex_hull_bf(points: list[Point]) -> list[Point]: points_left_of_ij = points_right_of_ij = False ij_part_of_convex_hull = True for k in range(n): - if k != i and k != j: + if k not in {i, j}: det_k = _det(points[i], points[j], points[k]) if det_k > 0: diff --git a/graphs/directed_and_undirected_(weighted)_graph.py b/graphs/directed_and_undirected_(weighted)_graph.py index b29485031083..8ca645fdace8 100644 --- a/graphs/directed_and_undirected_(weighted)_graph.py +++ b/graphs/directed_and_undirected_(weighted)_graph.py @@ -39,7 +39,7 @@ def dfs(self, s=-2, d=-1): stack = [] visited = [] if s == -2: - s = list(self.graph)[0] + s = next(iter(self.graph)) stack.append(s) visited.append(s) ss = s @@ -87,7 +87,7 @@ def bfs(self, s=-2): d = deque() visited = [] if s == -2: - s = list(self.graph)[0] + s = next(iter(self.graph)) d.append(s) visited.append(s) while d: @@ -114,7 +114,7 @@ def topological_sort(self, s=-2): stack = [] visited = [] if s == -2: - s = list(self.graph)[0] + s = next(iter(self.graph)) stack.append(s) visited.append(s) ss = s @@ -146,7 +146,7 @@ def topological_sort(self, s=-2): def cycle_nodes(self): stack = [] visited = [] - s = list(self.graph)[0] + s = next(iter(self.graph)) stack.append(s) visited.append(s) parent = -2 @@ -199,7 +199,7 @@ def cycle_nodes(self): def has_cycle(self): stack = [] visited = [] - s = list(self.graph)[0] + s = next(iter(self.graph)) stack.append(s) visited.append(s) parent = -2 @@ -305,7 +305,7 @@ def dfs(self, s=-2, d=-1): stack = [] visited = [] if s == -2: - s = list(self.graph)[0] + s = next(iter(self.graph)) stack.append(s) visited.append(s) ss = s @@ -353,7 +353,7 @@ def bfs(self, s=-2): d = deque() visited = [] if s == -2: - s = list(self.graph)[0] + s = next(iter(self.graph)) d.append(s) visited.append(s) while d: @@ -371,7 +371,7 @@ def degree(self, u): def cycle_nodes(self): stack = [] visited = [] - s = list(self.graph)[0] + s = next(iter(self.graph)) stack.append(s) visited.append(s) parent = -2 @@ -424,7 +424,7 @@ def cycle_nodes(self): def has_cycle(self): stack = [] visited = [] - s = list(self.graph)[0] + s = next(iter(self.graph)) stack.append(s) visited.append(s) parent = -2 diff --git a/graphs/edmonds_karp_multiple_source_and_sink.py b/graphs/edmonds_karp_multiple_source_and_sink.py index d0610804109f..5c774f4b812b 100644 --- a/graphs/edmonds_karp_multiple_source_and_sink.py +++ b/graphs/edmonds_karp_multiple_source_and_sink.py @@ -113,7 +113,7 @@ def _algorithm(self): vertices_list = [ i for i in range(self.verticies_count) - if i != self.source_index and i != self.sink_index + if i not in {self.source_index, self.sink_index} ] # move through list diff --git a/maths/factorial.py b/maths/factorial.py index bbf0efc011d8..18cacdef9b1f 100644 --- a/maths/factorial.py +++ b/maths/factorial.py @@ -55,7 +55,7 @@ def factorial_recursive(n: int) -> int: raise ValueError("factorial() only accepts integral values") if n < 0: raise ValueError("factorial() not defined for negative values") - return 1 if n == 0 or n == 1 else n * factorial(n - 1) + return 1 if n in {0, 1} else n * factorial(n - 1) if __name__ == "__main__": diff --git a/maths/primelib.py b/maths/primelib.py index 81d5737063f0..28b5aee9dcc8 100644 --- a/maths/primelib.py +++ b/maths/primelib.py @@ -154,7 +154,7 @@ def prime_factorization(number): quotient = number - if number == 0 or number == 1: + if number in {0, 1}: ans.append(number) # if 'number' not prime then builds the prime factorization of 'number' diff --git a/other/davisb_putnamb_logemannb_loveland.py b/other/davisb_putnamb_logemannb_loveland.py index a1bea5b3992e..f5fb103ba528 100644 --- a/other/davisb_putnamb_logemannb_loveland.py +++ b/other/davisb_putnamb_logemannb_loveland.py @@ -253,7 +253,7 @@ def find_unit_clauses( unit_symbols = [] for clause in clauses: if len(clause) == 1: - unit_symbols.append(list(clause.literals.keys())[0]) + unit_symbols.append(next(iter(clause.literals.keys()))) else: f_count, n_count = 0, 0 for literal, value in clause.literals.items(): diff --git a/project_euler/problem_009/sol3.py b/project_euler/problem_009/sol3.py index d299f821d4f6..37340d3063bb 100644 --- a/project_euler/problem_009/sol3.py +++ b/project_euler/problem_009/sol3.py @@ -28,12 +28,16 @@ def solution() -> int: 31875000 """ - return [ - a * b * (1000 - a - b) - for a in range(1, 999) - for b in range(a, 999) - if (a * a + b * b == (1000 - a - b) ** 2) - ][0] + return next( + iter( + [ + a * b * (1000 - a - b) + for a in range(1, 999) + for b in range(a, 999) + if (a * a + b * b == (1000 - a - b) ** 2) + ] + ) + ) if __name__ == "__main__": diff --git a/quantum/ripple_adder_classic.py b/quantum/ripple_adder_classic.py index b604395bc583..2284141ccac2 100644 --- a/quantum/ripple_adder_classic.py +++ b/quantum/ripple_adder_classic.py @@ -107,7 +107,7 @@ def ripple_adder( res = qiskit.execute(circuit, backend, shots=1).result() # The result is in binary. Convert it back to int - return int(list(res.get_counts())[0], 2) + return int(next(iter(res.get_counts())), 2) if __name__ == "__main__": diff --git a/strings/min_cost_string_conversion.py b/strings/min_cost_string_conversion.py index 089c2532f900..0fad0b88c370 100644 --- a/strings/min_cost_string_conversion.py +++ b/strings/min_cost_string_conversion.py @@ -61,7 +61,7 @@ def assemble_transformation(ops: list[list[str]], i: int, j: int) -> list[str]: if i == 0 and j == 0: return [] else: - if ops[i][j][0] == "C" or ops[i][j][0] == "R": + if ops[i][j][0] in {"C", "R"}: seq = assemble_transformation(ops, i - 1, j - 1) seq.append(ops[i][j]) return seq diff --git a/web_programming/convert_number_to_words.py b/web_programming/convert_number_to_words.py index 1e293df9660c..dac9e3e38e7c 100644 --- a/web_programming/convert_number_to_words.py +++ b/web_programming/convert_number_to_words.py @@ -90,9 +90,7 @@ def convert(number: int) -> str: else: addition = "" if counter in placevalue: - if current == 0 and ((temp_num % 100) // 10) == 0: - addition = "" - else: + if current != 0 and ((temp_num % 100) // 10) != 0: addition = placevalue[counter] if ((temp_num % 100) // 10) == 1: words = teens[current] + addition + words From f7531d9874e0dd3682bf0ed7ae408927e1fae472 Mon Sep 17 00:00:00 2001 From: Tianyi Zheng Date: Sat, 22 Jul 2023 03:11:04 -0700 Subject: [PATCH 0883/1543] Add note in `CONTRIBUTING.md` about not asking to be assigned to issues (#8871) * Add note in CONTRIBUTING.md about not asking to be assigned to issues Add a paragraph to CONTRIBUTING.md explicitly asking contributors to not ask to be assigned to issues * Update CONTRIBUTING.md * Update CONTRIBUTING.md --------- Co-authored-by: Christian Clauss --- CONTRIBUTING.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 618cca868d83..4a1bb652738f 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -25,6 +25,8 @@ We appreciate any contribution, from fixing a grammar mistake in a comment to im Your contribution will be tested by our [automated testing on GitHub Actions](https://github.com/TheAlgorithms/Python/actions) to save time and mental energy. After you have submitted your pull request, you should see the GitHub Actions tests start to run at the bottom of your submission page. If those tests fail, then click on the ___details___ button try to read through the GitHub Actions output to understand the failure. If you do not understand, please leave a comment on your submission page and a community member will try to help. +If you are interested in resolving an [open issue](https://github.com/TheAlgorithms/Python/issues), simply make a pull request with your proposed fix. __We do not assign issues in this repo__ so please do not ask for permission to work on an issue. + Please help us keep our issue list small by adding `Fixes #{$ISSUE_NUMBER}` to the description of pull requests that resolve open issues. For example, if your pull request fixes issue #10, then please add the following to its description: ``` From 9e08c7726dee5b18585a76e54c71922ca96c0b3a Mon Sep 17 00:00:00 2001 From: Caeden Perelli-Harris Date: Sat, 22 Jul 2023 13:34:19 +0300 Subject: [PATCH 0884/1543] Small docstring time complexity fix in number_container _system (#8875) * fix: Write time is O(log n) not O(n log n) * chore: Update pre-commit ruff version * revert: Undo previous commit --- other/number_container_system.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/other/number_container_system.py b/other/number_container_system.py index f547bc8a229e..6c95dd0a3544 100644 --- a/other/number_container_system.py +++ b/other/number_container_system.py @@ -1,6 +1,6 @@ """ A number container system that uses binary search to delete and insert values into -arrays with O(n logn) write times and O(1) read times. +arrays with O(log n) write times and O(1) read times. This container system holds integers at indexes. From a03b739d23b59890b59d2d2288ebaa56e3be47ce Mon Sep 17 00:00:00 2001 From: Sangmin Jeon Date: Mon, 24 Jul 2023 18:29:05 +0900 Subject: [PATCH 0885/1543] Fix `radix_tree.py` insertion fail in ["*X", "*XX"] cases (#8870) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Fix insertion fail in ["*X", "*XX"] cases Consider a word, and a copy of that word, but with the last letter repeating twice. (e.g., ["ABC", "ABCC"]) When adding the second word's last letter, it only compares the previous word's prefix—the last letter of the word already in the Radix Tree: 'C'—and the letter to be added—the last letter of the word we're currently adding: 'C'. So it wrongly passes the "Case 1" check, marks the current node as a leaf node when it already was, then returns when there's still one more letter to add. The issue arises because `prefix` includes the letter of the node itself. (e.g., `nodes: {'C' : RadixNode()}, is_leaf: True, prefix: 'C'`) It can be easily fixed by simply adding the `is_leaf` check, asking if there are more letters to be added. - Test Case: `"A AA AAA AAAA"` - Fixed correct output: ``` Words: ['A', 'AA', 'AAA', 'AAAA'] Tree: - A (leaf) -- A (leaf) --- A (leaf) ---- A (leaf) ``` - Current incorrect output: ``` Words: ['A', 'AA', 'AAA', 'AAAA'] Tree: - A (leaf) -- AA (leaf) --- A (leaf) ``` *N.B.* This passed test cases for [Croatian Open Competition in Informatics 2012/2013 Contest #3 Task 5 HERKABE](https://hsin.hr/coci/archive/2012_2013/) * Add a doctest for previous fix * improve doctest readability --- data_structures/trie/radix_tree.py | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/data_structures/trie/radix_tree.py b/data_structures/trie/radix_tree.py index cf2f25c29f13..fadc50cb49a7 100644 --- a/data_structures/trie/radix_tree.py +++ b/data_structures/trie/radix_tree.py @@ -54,10 +54,17 @@ def insert(self, word: str) -> None: word (str): word to insert >>> RadixNode("myprefix").insert("mystring") + + >>> root = RadixNode() + >>> root.insert_many(['myprefix', 'myprefixA', 'myprefixAA']) + >>> root.print_tree() + - myprefix (leaf) + -- A (leaf) + --- A (leaf) """ # Case 1: If the word is the prefix of the node # Solution: We set the current node as leaf - if self.prefix == word: + if self.prefix == word and not self.is_leaf: self.is_leaf = True # Case 2: The node has no edges that have a prefix to the word From b77e6adf3abba674eb83ab7c0182bd6c89c08891 Mon Sep 17 00:00:00 2001 From: HManiac74 <63391783+HManiac74@users.noreply.github.com> Date: Tue, 25 Jul 2023 22:23:20 +0200 Subject: [PATCH 0886/1543] Add Docker devcontainer configuration files (#8887) * Added Docker container configuration files * Update Dockerfile Copy and install requirements * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Updated Docker devcontainer configuration * Update requierements.txt * Update Dockerfile * Update Dockerfile * Update .devcontainer/devcontainer.json Co-authored-by: Christian Clauss * Update Dockerfile * Update Dockerfile. Add linebreak --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- .devcontainer/Dockerfile | 6 +++++ .devcontainer/devcontainer.json | 42 +++++++++++++++++++++++++++++++++ 2 files changed, 48 insertions(+) create mode 100644 .devcontainer/Dockerfile create mode 100644 .devcontainer/devcontainer.json diff --git a/.devcontainer/Dockerfile b/.devcontainer/Dockerfile new file mode 100644 index 000000000000..27b25c09b1c9 --- /dev/null +++ b/.devcontainer/Dockerfile @@ -0,0 +1,6 @@ +# https://github.com/microsoft/vscode-dev-containers/blob/main/containers/python-3/README.md +ARG VARIANT=3.11-bookworm +FROM mcr.microsoft.com/vscode/devcontainers/python:${VARIANT} +COPY requirements.txt /tmp/pip-tmp/ +RUN python3 -m pip install --upgrade pip \ + && python3 -m pip install --no-cache-dir install ruff -r /tmp/pip-tmp/requirements.txt diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json new file mode 100644 index 000000000000..c5a855b2550c --- /dev/null +++ b/.devcontainer/devcontainer.json @@ -0,0 +1,42 @@ +{ + "name": "Python 3", + "build": { + "dockerfile": "Dockerfile", + "context": "..", + "args": { + // Update 'VARIANT' to pick a Python version: 3, 3.10, 3.9, 3.8, 3.7, 3.6 + // Append -bullseye or -buster to pin to an OS version. + // Use -bullseye variants on local on arm64/Apple Silicon. + "VARIANT": "3.11-bookworm", + } + }, + + // Configure tool-specific properties. + "customizations": { + // Configure properties specific to VS Code. + "vscode": { + // Set *default* container specific settings.json values on container create. + "settings": { + "python.defaultInterpreterPath": "/usr/local/bin/python", + "python.linting.enabled": true, + "python.formatting.blackPath": "/usr/local/py-utils/bin/black", + "python.linting.mypyPath": "/usr/local/py-utils/bin/mypy" + }, + + // Add the IDs of extensions you want installed when the container is created. + "extensions": [ + "ms-python.python", + "ms-python.vscode-pylance" + ] + } + }, + + // Use 'forwardPorts' to make a list of ports inside the container available locally. + // "forwardPorts": [], + + // Use 'postCreateCommand' to run commands after the container is created. + // "postCreateCommand": "pip3 install --user -r requirements.txt", + + // Comment out to connect as root instead. More info: https://aka.ms/vscode-remote/containers/non-root. + "remoteUser": "vscode" +} From dbaff345724040b270b3097cb02759f36ce0ef46 Mon Sep 17 00:00:00 2001 From: Christian Clauss Date: Fri, 28 Jul 2023 18:53:09 +0200 Subject: [PATCH 0887/1543] Fix ruff rules ISC flake8-implicit-str-concat (#8892) --- ciphers/diffie_hellman.py | 244 ++++++++++++------------- compression/burrows_wheeler.py | 2 +- neural_network/input_data.py | 4 +- pyproject.toml | 2 +- strings/is_srilankan_phone_number.py | 4 +- web_programming/world_covid19_stats.py | 5 +- 6 files changed, 128 insertions(+), 133 deletions(-) diff --git a/ciphers/diffie_hellman.py b/ciphers/diffie_hellman.py index cd40a6b9c3b3..aec7fb3eaf17 100644 --- a/ciphers/diffie_hellman.py +++ b/ciphers/diffie_hellman.py @@ -10,13 +10,13 @@ 5: { "prime": int( "FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1" - + "29024E088A67CC74020BBEA63B139B22514A08798E3404DD" - + "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245" - + "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED" - + "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D" - + "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F" - + "83655D23DCA3AD961C62F356208552BB9ED529077096966D" - + "670C354E4ABC9804F1746C08CA237327FFFFFFFFFFFFFFFF", + "29024E088A67CC74020BBEA63B139B22514A08798E3404DD" + "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245" + "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED" + "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D" + "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F" + "83655D23DCA3AD961C62F356208552BB9ED529077096966D" + "670C354E4ABC9804F1746C08CA237327FFFFFFFFFFFFFFFF", base=16, ), "generator": 2, @@ -25,16 +25,16 @@ 14: { "prime": int( "FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1" - + "29024E088A67CC74020BBEA63B139B22514A08798E3404DD" - + "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245" - + "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED" - + "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D" - + "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F" - + "83655D23DCA3AD961C62F356208552BB9ED529077096966D" - + "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B" - + "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9" - + "DE2BCBF6955817183995497CEA956AE515D2261898FA0510" - + "15728E5A8AACAA68FFFFFFFFFFFFFFFF", + "29024E088A67CC74020BBEA63B139B22514A08798E3404DD" + "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245" + "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED" + "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D" + "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F" + "83655D23DCA3AD961C62F356208552BB9ED529077096966D" + "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B" + "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9" + "DE2BCBF6955817183995497CEA956AE515D2261898FA0510" + "15728E5A8AACAA68FFFFFFFFFFFFFFFF", base=16, ), "generator": 2, @@ -43,21 +43,21 @@ 15: { "prime": int( "FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1" - + "29024E088A67CC74020BBEA63B139B22514A08798E3404DD" - + "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245" - + "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED" - + "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D" - + "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F" - + "83655D23DCA3AD961C62F356208552BB9ED529077096966D" - + "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B" - + "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9" - + "DE2BCBF6955817183995497CEA956AE515D2261898FA0510" - + "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64" - + "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7" - + "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B" - + "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C" - + "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31" - + "43DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF", + "29024E088A67CC74020BBEA63B139B22514A08798E3404DD" + "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245" + "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED" + "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D" + "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F" + "83655D23DCA3AD961C62F356208552BB9ED529077096966D" + "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B" + "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9" + "DE2BCBF6955817183995497CEA956AE515D2261898FA0510" + "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64" + "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7" + "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B" + "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C" + "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31" + "43DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF", base=16, ), "generator": 2, @@ -66,27 +66,27 @@ 16: { "prime": int( "FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1" - + "29024E088A67CC74020BBEA63B139B22514A08798E3404DD" - + "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245" - + "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED" - + "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D" - + "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F" - + "83655D23DCA3AD961C62F356208552BB9ED529077096966D" - + "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B" - + "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9" - + "DE2BCBF6955817183995497CEA956AE515D2261898FA0510" - + "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64" - + "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7" - + "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B" - + "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C" - + "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31" - + "43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7" - + "88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA" - + "2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6" - + "287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED" - + "1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9" - + "93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199" - + "FFFFFFFFFFFFFFFF", + "29024E088A67CC74020BBEA63B139B22514A08798E3404DD" + "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245" + "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED" + "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D" + "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F" + "83655D23DCA3AD961C62F356208552BB9ED529077096966D" + "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B" + "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9" + "DE2BCBF6955817183995497CEA956AE515D2261898FA0510" + "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64" + "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7" + "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B" + "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C" + "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31" + "43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7" + "88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA" + "2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6" + "287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED" + "1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9" + "93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199" + "FFFFFFFFFFFFFFFF", base=16, ), "generator": 2, @@ -95,33 +95,33 @@ 17: { "prime": int( "FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E08" - + "8A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B" - + "302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9" - + "A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE6" - + "49286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8" - + "FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D" - + "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C" - + "180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF695581718" - + "3995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D" - + "04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7D" - + "B3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D226" - + "1AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200C" - + "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFC" - + "E0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B26" - + "99C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB" - + "04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2" - + "233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127" - + "D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492" - + "36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406" - + "AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918" - + "DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B33205151" - + "2BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03" - + "F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97F" - + "BEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA" - + "CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58B" - + "B7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632" - + "387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E" - + "6DCC4024FFFFFFFFFFFFFFFF", + "8A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B" + "302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9" + "A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE6" + "49286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8" + "FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D" + "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C" + "180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF695581718" + "3995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D" + "04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7D" + "B3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D226" + "1AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200C" + "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFC" + "E0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B26" + "99C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB" + "04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2" + "233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127" + "D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492" + "36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406" + "AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918" + "DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B33205151" + "2BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03" + "F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97F" + "BEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA" + "CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58B" + "B7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632" + "387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E" + "6DCC4024FFFFFFFFFFFFFFFF", base=16, ), "generator": 2, @@ -130,48 +130,48 @@ 18: { "prime": int( "FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1" - + "29024E088A67CC74020BBEA63B139B22514A08798E3404DD" - + "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245" - + "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED" - + "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D" - + "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F" - + "83655D23DCA3AD961C62F356208552BB9ED529077096966D" - + "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B" - + "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9" - + "DE2BCBF6955817183995497CEA956AE515D2261898FA0510" - + "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64" - + "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7" - + "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B" - + "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C" - + "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31" - + "43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7" - + "88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA" - + "2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6" - + "287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED" - + "1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9" - + "93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492" - + "36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BD" - + "F8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831" - + "179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1B" - + "DB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF" - + "5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6" - + "D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F3" - + "23A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA" - + "CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE328" - + "06A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55C" - + "DA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE" - + "12BF2D5B0B7474D6E694F91E6DBE115974A3926F12FEE5E4" - + "38777CB6A932DF8CD8BEC4D073B931BA3BC832B68D9DD300" - + "741FA7BF8AFC47ED2576F6936BA424663AAB639C5AE4F568" - + "3423B4742BF1C978238F16CBE39D652DE3FDB8BEFC848AD9" - + "22222E04A4037C0713EB57A81A23F0C73473FC646CEA306B" - + "4BCBC8862F8385DDFA9D4B7FA2C087E879683303ED5BDD3A" - + "062B3CF5B3A278A66D2A13F83F44F82DDF310EE074AB6A36" - + "4597E899A0255DC164F31CC50846851DF9AB48195DED7EA1" - + "B1D510BD7EE74D73FAF36BC31ECFA268359046F4EB879F92" - + "4009438B481C6CD7889A002ED5EE382BC9190DA6FC026E47" - + "9558E4475677E9AA9E3050E2765694DFC81F56E880B96E71" - + "60C980DD98EDD3DFFFFFFFFFFFFFFFFF", + "29024E088A67CC74020BBEA63B139B22514A08798E3404DD" + "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245" + "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED" + "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D" + "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F" + "83655D23DCA3AD961C62F356208552BB9ED529077096966D" + "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B" + "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9" + "DE2BCBF6955817183995497CEA956AE515D2261898FA0510" + "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64" + "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7" + "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B" + "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C" + "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31" + "43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7" + "88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA" + "2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6" + "287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED" + "1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9" + "93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492" + "36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BD" + "F8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831" + "179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1B" + "DB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF" + "5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6" + "D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F3" + "23A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA" + "CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE328" + "06A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55C" + "DA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE" + "12BF2D5B0B7474D6E694F91E6DBE115974A3926F12FEE5E4" + "38777CB6A932DF8CD8BEC4D073B931BA3BC832B68D9DD300" + "741FA7BF8AFC47ED2576F6936BA424663AAB639C5AE4F568" + "3423B4742BF1C978238F16CBE39D652DE3FDB8BEFC848AD9" + "22222E04A4037C0713EB57A81A23F0C73473FC646CEA306B" + "4BCBC8862F8385DDFA9D4B7FA2C087E879683303ED5BDD3A" + "062B3CF5B3A278A66D2A13F83F44F82DDF310EE074AB6A36" + "4597E899A0255DC164F31CC50846851DF9AB48195DED7EA1" + "B1D510BD7EE74D73FAF36BC31ECFA268359046F4EB879F92" + "4009438B481C6CD7889A002ED5EE382BC9190DA6FC026E47" + "9558E4475677E9AA9E3050E2765694DFC81F56E880B96E71" + "60C980DD98EDD3DFFFFFFFFFFFFFFFFF", base=16, ), "generator": 2, diff --git a/compression/burrows_wheeler.py b/compression/burrows_wheeler.py index 0916b8a654d2..52bb045d9398 100644 --- a/compression/burrows_wheeler.py +++ b/compression/burrows_wheeler.py @@ -150,7 +150,7 @@ def reverse_bwt(bwt_string: str, idx_original_string: int) -> str: raise ValueError("The parameter idx_original_string must not be lower than 0.") if idx_original_string >= len(bwt_string): raise ValueError( - "The parameter idx_original_string must be lower than" " len(bwt_string)." + "The parameter idx_original_string must be lower than len(bwt_string)." ) ordered_rotations = [""] * len(bwt_string) diff --git a/neural_network/input_data.py b/neural_network/input_data.py index 94c018ece9ba..a58e64907e45 100644 --- a/neural_network/input_data.py +++ b/neural_network/input_data.py @@ -263,9 +263,7 @@ def _maybe_download(filename, work_directory, source_url): return filepath -@deprecated( - None, "Please use alternatives such as:" " tensorflow_datasets.load('mnist')" -) +@deprecated(None, "Please use alternatives such as: tensorflow_datasets.load('mnist')") def read_data_sets( train_dir, fake_data=False, diff --git a/pyproject.toml b/pyproject.toml index 4f21a95190da..f9091fb8578d 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -49,6 +49,7 @@ select = [ # https://beta.ruff.rs/docs/rules "ICN", # flake8-import-conventions "INP", # flake8-no-pep420 "INT", # flake8-gettext + "ISC", # flake8-implicit-str-concat "N", # pep8-naming "NPY", # NumPy-specific rules "PGH", # pygrep-hooks @@ -72,7 +73,6 @@ select = [ # https://beta.ruff.rs/docs/rules # "DJ", # flake8-django # "ERA", # eradicate -- DO NOT FIX # "FBT", # flake8-boolean-trap # FIX ME - # "ISC", # flake8-implicit-str-concat # FIX ME # "PD", # pandas-vet # "PT", # flake8-pytest-style # "PTH", # flake8-use-pathlib # FIX ME diff --git a/strings/is_srilankan_phone_number.py b/strings/is_srilankan_phone_number.py index 7bded93f7f1d..6456f85e1a3d 100644 --- a/strings/is_srilankan_phone_number.py +++ b/strings/is_srilankan_phone_number.py @@ -22,9 +22,7 @@ def is_sri_lankan_phone_number(phone: str) -> bool: False """ - pattern = re.compile( - r"^(?:0|94|\+94|0{2}94)" r"7(0|1|2|4|5|6|7|8)" r"(-| |)" r"\d{7}$" - ) + pattern = re.compile(r"^(?:0|94|\+94|0{2}94)7(0|1|2|4|5|6|7|8)(-| |)\d{7}$") return bool(re.search(pattern, phone)) diff --git a/web_programming/world_covid19_stats.py b/web_programming/world_covid19_stats.py index 1dd1ff6d188e..ca81abdc4ce9 100644 --- a/web_programming/world_covid19_stats.py +++ b/web_programming/world_covid19_stats.py @@ -22,6 +22,5 @@ def world_covid19_stats(url: str = "https://www.worldometers.info/coronavirus") if __name__ == "__main__": - print("\033[1m" + "COVID-19 Status of the World" + "\033[0m\n") - for key, value in world_covid19_stats().items(): - print(f"{key}\n{value}\n") + print("\033[1m COVID-19 Status of the World \033[0m\n") + print("\n".join(f"{key}\n{value}" for key, value in world_covid19_stats().items())) From 46454e204cc587d1ef044e4b1a11050c30aab4f6 Mon Sep 17 00:00:00 2001 From: Christian Clauss Date: Fri, 28 Jul 2023 18:54:45 +0200 Subject: [PATCH 0888/1543] [skip-ci] In .devcontainer/Dockerfile: pipx install pre-commit ruff (#8893) [skip-ci] In .devcontainer/Dockerfile: pipx install pre-commit ruff --- .devcontainer/Dockerfile | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/.devcontainer/Dockerfile b/.devcontainer/Dockerfile index 27b25c09b1c9..b5a5347c66b0 100644 --- a/.devcontainer/Dockerfile +++ b/.devcontainer/Dockerfile @@ -3,4 +3,6 @@ ARG VARIANT=3.11-bookworm FROM mcr.microsoft.com/vscode/devcontainers/python:${VARIANT} COPY requirements.txt /tmp/pip-tmp/ RUN python3 -m pip install --upgrade pip \ - && python3 -m pip install --no-cache-dir install ruff -r /tmp/pip-tmp/requirements.txt + && python3 -m pip install --no-cache-dir install -r /tmp/pip-tmp/requirements.txt \ + && pipx install pre-commit ruff \ + && pre-commit install From 4a83e3f0b1b2a3b414134c3498e57c0fea3b9fcf Mon Sep 17 00:00:00 2001 From: Caeden Perelli-Harris Date: Fri, 28 Jul 2023 21:12:31 +0300 Subject: [PATCH 0889/1543] Fix failing build due to missing requirement (#8900) * feat(cellular_automata): Create wa-tor algorithm * updating DIRECTORY.md * chore(quality): Implement algo-keeper bot changes * build: Fix broken ci * git rm cellular_automata/wa_tor.py * updating DIRECTORY.md --------- Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> Co-authored-by: Christian Clauss --- requirements.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/requirements.txt b/requirements.txt index acfbc823e77f..2702523d542e 100644 --- a/requirements.txt +++ b/requirements.txt @@ -9,6 +9,7 @@ pandas pillow projectq qiskit +qiskit-aer requests rich scikit-fuzzy From e406801f9e3967ff0533dfe8cb98a3249db48d33 Mon Sep 17 00:00:00 2001 From: Tianyi Zheng Date: Fri, 28 Jul 2023 11:17:46 -0700 Subject: [PATCH 0890/1543] Reimplement polynomial_regression.py (#8889) * Reimplement polynomial_regression.py Rename machine_learning/polymonial_regression.py to machine_learning/polynomial_regression.py Reimplement machine_learning/polynomial_regression.py using numpy because the old original implementation was just a how-to on doing polynomial regression using sklearn Add detailed function documentation, doctests, and algorithm explanation * updating DIRECTORY.md * Fix matrix formatting in docstrings * Try to fix failing doctest * Debugging failing doctest * Fix failing doctest attempt 2 * Remove unnecessary return value descriptions in docstrings * Readd placeholder doctest for main function * Fix typo in algorithm description --------- Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- DIRECTORY.md | 2 +- machine_learning/polymonial_regression.py | 44 ----- machine_learning/polynomial_regression.py | 213 ++++++++++++++++++++++ 3 files changed, 214 insertions(+), 45 deletions(-) delete mode 100644 machine_learning/polymonial_regression.py create mode 100644 machine_learning/polynomial_regression.py diff --git a/DIRECTORY.md b/DIRECTORY.md index 77938f45011b..133a1ab019d8 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -511,7 +511,7 @@ * Lstm * [Lstm Prediction](machine_learning/lstm/lstm_prediction.py) * [Multilayer Perceptron Classifier](machine_learning/multilayer_perceptron_classifier.py) - * [Polymonial Regression](machine_learning/polymonial_regression.py) + * [Polynomial Regression](machine_learning/polynomial_regression.py) * [Scoring Functions](machine_learning/scoring_functions.py) * [Self Organizing Map](machine_learning/self_organizing_map.py) * [Sequential Minimum Optimization](machine_learning/sequential_minimum_optimization.py) diff --git a/machine_learning/polymonial_regression.py b/machine_learning/polymonial_regression.py deleted file mode 100644 index 487fb814526f..000000000000 --- a/machine_learning/polymonial_regression.py +++ /dev/null @@ -1,44 +0,0 @@ -import pandas as pd -from matplotlib import pyplot as plt -from sklearn.linear_model import LinearRegression - -# Splitting the dataset into the Training set and Test set -from sklearn.model_selection import train_test_split - -# Fitting Polynomial Regression to the dataset -from sklearn.preprocessing import PolynomialFeatures - -# Importing the dataset -dataset = pd.read_csv( - "https://s3.us-west-2.amazonaws.com/public.gamelab.fun/dataset/" - "position_salaries.csv" -) -X = dataset.iloc[:, 1:2].values -y = dataset.iloc[:, 2].values - - -X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0) - - -poly_reg = PolynomialFeatures(degree=4) -X_poly = poly_reg.fit_transform(X) -pol_reg = LinearRegression() -pol_reg.fit(X_poly, y) - - -# Visualizing the Polymonial Regression results -def viz_polymonial(): - plt.scatter(X, y, color="red") - plt.plot(X, pol_reg.predict(poly_reg.fit_transform(X)), color="blue") - plt.title("Truth or Bluff (Linear Regression)") - plt.xlabel("Position level") - plt.ylabel("Salary") - plt.show() - - -if __name__ == "__main__": - viz_polymonial() - - # Predicting a new result with Polymonial Regression - pol_reg.predict(poly_reg.fit_transform([[5.5]])) - # output should be 132148.43750003 diff --git a/machine_learning/polynomial_regression.py b/machine_learning/polynomial_regression.py new file mode 100644 index 000000000000..5bafea96f41e --- /dev/null +++ b/machine_learning/polynomial_regression.py @@ -0,0 +1,213 @@ +""" +Polynomial regression is a type of regression analysis that models the relationship +between a predictor x and the response y as an mth-degree polynomial: + +y = β₀ + β₁x + β₂x² + ... + βₘxᵐ + ε + +By treating x, x², ..., xᵐ as distinct variables, we see that polynomial regression is a +special case of multiple linear regression. Therefore, we can use ordinary least squares +(OLS) estimation to estimate the vector of model parameters β = (β₀, β₁, β₂, ..., βₘ) +for polynomial regression: + +β = (XᵀX)⁻¹Xᵀy = X⁺y + +where X is the design matrix, y is the response vector, and X⁺ denotes the Moore–Penrose +pseudoinverse of X. In the case of polynomial regression, the design matrix is + + |1 x₁ x₁² ⋯ x₁ᵐ| +X = |1 x₂ x₂² ⋯ x₂ᵐ| + |⋮ ⋮ ⋮ ⋱ ⋮ | + |1 xₙ xₙ² ⋯ xₙᵐ| + +In OLS estimation, inverting XᵀX to compute X⁺ can be very numerically unstable. This +implementation sidesteps this need to invert XᵀX by computing X⁺ using singular value +decomposition (SVD): + +β = VΣ⁺Uᵀy + +where UΣVᵀ is an SVD of X. + +References: + - https://en.wikipedia.org/wiki/Polynomial_regression + - https://en.wikipedia.org/wiki/Moore%E2%80%93Penrose_inverse + - https://en.wikipedia.org/wiki/Numerical_methods_for_linear_least_squares + - https://en.wikipedia.org/wiki/Singular_value_decomposition +""" + +import matplotlib.pyplot as plt +import numpy as np + + +class PolynomialRegression: + __slots__ = "degree", "params" + + def __init__(self, degree: int) -> None: + """ + @raises ValueError: if the polynomial degree is negative + """ + if degree < 0: + raise ValueError("Polynomial degree must be non-negative") + + self.degree = degree + self.params = None + + @staticmethod + def _design_matrix(data: np.ndarray, degree: int) -> np.ndarray: + """ + Constructs a polynomial regression design matrix for the given input data. For + input data x = (x₁, x₂, ..., xₙ) and polynomial degree m, the design matrix is + the Vandermonde matrix + + |1 x₁ x₁² ⋯ x₁ᵐ| + X = |1 x₂ x₂² ⋯ x₂ᵐ| + |⋮ ⋮ ⋮ ⋱ ⋮ | + |1 xₙ xₙ² ⋯ xₙᵐ| + + Reference: https://en.wikipedia.org/wiki/Vandermonde_matrix + + @param data: the input predictor values x, either for model fitting or for + prediction + @param degree: the polynomial degree m + @returns: the Vandermonde matrix X (see above) + @raises ValueError: if input data is not N x 1 + + >>> x = np.array([0, 1, 2]) + >>> PolynomialRegression._design_matrix(x, degree=0) + array([[1], + [1], + [1]]) + >>> PolynomialRegression._design_matrix(x, degree=1) + array([[1, 0], + [1, 1], + [1, 2]]) + >>> PolynomialRegression._design_matrix(x, degree=2) + array([[1, 0, 0], + [1, 1, 1], + [1, 2, 4]]) + >>> PolynomialRegression._design_matrix(x, degree=3) + array([[1, 0, 0, 0], + [1, 1, 1, 1], + [1, 2, 4, 8]]) + >>> PolynomialRegression._design_matrix(np.array([[0, 0], [0 , 0]]), degree=3) + Traceback (most recent call last): + ... + ValueError: Data must have dimensions N x 1 + """ + rows, *remaining = data.shape + if remaining: + raise ValueError("Data must have dimensions N x 1") + + return np.vander(data, N=degree + 1, increasing=True) + + def fit(self, x_train: np.ndarray, y_train: np.ndarray) -> None: + """ + Computes the polynomial regression model parameters using ordinary least squares + (OLS) estimation: + + β = (XᵀX)⁻¹Xᵀy = X⁺y + + where X⁺ denotes the Moore–Penrose pseudoinverse of the design matrix X. This + function computes X⁺ using singular value decomposition (SVD). + + References: + - https://en.wikipedia.org/wiki/Moore%E2%80%93Penrose_inverse + - https://en.wikipedia.org/wiki/Singular_value_decomposition + - https://en.wikipedia.org/wiki/Multicollinearity + + @param x_train: the predictor values x for model fitting + @param y_train: the response values y for model fitting + @raises ArithmeticError: if X isn't full rank, then XᵀX is singular and β + doesn't exist + + >>> x = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]) + >>> y = x**3 - 2 * x**2 + 3 * x - 5 + >>> poly_reg = PolynomialRegression(degree=3) + >>> poly_reg.fit(x, y) + >>> poly_reg.params + array([-5., 3., -2., 1.]) + >>> poly_reg = PolynomialRegression(degree=20) + >>> poly_reg.fit(x, y) + Traceback (most recent call last): + ... + ArithmeticError: Design matrix is not full rank, can't compute coefficients + + Make sure errors don't grow too large: + >>> coefs = np.array([-250, 50, -2, 36, 20, -12, 10, 2, -1, -15, 1]) + >>> y = PolynomialRegression._design_matrix(x, len(coefs) - 1) @ coefs + >>> poly_reg = PolynomialRegression(degree=len(coefs) - 1) + >>> poly_reg.fit(x, y) + >>> np.allclose(poly_reg.params, coefs, atol=10e-3) + True + """ + X = PolynomialRegression._design_matrix(x_train, self.degree) # noqa: N806 + _, cols = X.shape + if np.linalg.matrix_rank(X) < cols: + raise ArithmeticError( + "Design matrix is not full rank, can't compute coefficients" + ) + + # np.linalg.pinv() computes the Moore–Penrose pseudoinverse using SVD + self.params = np.linalg.pinv(X) @ y_train + + def predict(self, data: np.ndarray) -> np.ndarray: + """ + Computes the predicted response values y for the given input data by + constructing the design matrix X and evaluating y = Xβ. + + @param data: the predictor values x for prediction + @returns: the predicted response values y = Xβ + @raises ArithmeticError: if this function is called before the model + parameters are fit + + >>> x = np.array([0, 1, 2, 3, 4]) + >>> y = x**3 - 2 * x**2 + 3 * x - 5 + >>> poly_reg = PolynomialRegression(degree=3) + >>> poly_reg.fit(x, y) + >>> poly_reg.predict(np.array([-1])) + array([-11.]) + >>> poly_reg.predict(np.array([-2])) + array([-27.]) + >>> poly_reg.predict(np.array([6])) + array([157.]) + >>> PolynomialRegression(degree=3).predict(x) + Traceback (most recent call last): + ... + ArithmeticError: Predictor hasn't been fit yet + """ + if self.params is None: + raise ArithmeticError("Predictor hasn't been fit yet") + + return PolynomialRegression._design_matrix(data, self.degree) @ self.params + + +def main() -> None: + """ + Fit a polynomial regression model to predict fuel efficiency using seaborn's mpg + dataset + + >>> pass # Placeholder, function is only for demo purposes + """ + import seaborn as sns + + mpg_data = sns.load_dataset("mpg") + + poly_reg = PolynomialRegression(degree=2) + poly_reg.fit(mpg_data.weight, mpg_data.mpg) + + weight_sorted = np.sort(mpg_data.weight) + predictions = poly_reg.predict(weight_sorted) + + plt.scatter(mpg_data.weight, mpg_data.mpg, color="gray", alpha=0.5) + plt.plot(weight_sorted, predictions, color="red", linewidth=3) + plt.title("Predicting Fuel Efficiency Using Polynomial Regression") + plt.xlabel("Weight (lbs)") + plt.ylabel("Fuel Efficiency (mpg)") + plt.show() + + +if __name__ == "__main__": + import doctest + + doctest.testmod() + + main() From a0b642cfe58c215b8ead3f2a40655e144e07aacc Mon Sep 17 00:00:00 2001 From: Alex Bernhardt <54606095+FatAnorexic@users.noreply.github.com> Date: Fri, 28 Jul 2023 14:30:05 -0400 Subject: [PATCH 0891/1543] Physics/basic orbital capture (#8857) * Added file basic_orbital_capture * updating DIRECTORY.md * added second source * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fixed spelling errors * accepted changes * updating DIRECTORY.md * corrected spelling error * Added file basic_orbital_capture * added second source * fixed spelling errors * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * applied changes * reviewed and checked file * added doctest * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * removed redundant constnant * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * added scipy imports * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * added doctests to capture_radii and scipy const * fixed conflicts * finalizing file. Added tests * Update physics/basic_orbital_capture.py --------- Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- DIRECTORY.md | 1 + physics/basic_orbital_capture.py | 178 +++++++++++++++++++++++++++++++ 2 files changed, 179 insertions(+) create mode 100644 physics/basic_orbital_capture.py diff --git a/DIRECTORY.md b/DIRECTORY.md index 133a1ab019d8..29514579ceb0 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -741,6 +741,7 @@ ## Physics * [Archimedes Principle](physics/archimedes_principle.py) + * [Basic Orbital Capture](physics/basic_orbital_capture.py) * [Casimir Effect](physics/casimir_effect.py) * [Centripetal Force](physics/centripetal_force.py) * [Grahams Law](physics/grahams_law.py) diff --git a/physics/basic_orbital_capture.py b/physics/basic_orbital_capture.py new file mode 100644 index 000000000000..eeb45e60240c --- /dev/null +++ b/physics/basic_orbital_capture.py @@ -0,0 +1,178 @@ +from math import pow, sqrt + +from scipy.constants import G, c, pi + +""" +These two functions will return the radii of impact for a target object +of mass M and radius R as well as it's effective cross sectional area σ(sigma). +That is to say any projectile with velocity v passing within σ, will impact the +target object with mass M. The derivation of which is given at the bottom +of this file. + +The derivation shows that a projectile does not need to aim directly at the target +body in order to hit it, as R_capture>R_target. Astronomers refer to the effective +cross section for capture as σ=π*R_capture**2. + +This algorithm does not account for an N-body problem. + +""" + + +def capture_radii( + target_body_radius: float, target_body_mass: float, projectile_velocity: float +) -> float: + """ + Input Params: + ------------- + target_body_radius: Radius of the central body SI units: meters | m + target_body_mass: Mass of the central body SI units: kilograms | kg + projectile_velocity: Velocity of object moving toward central body + SI units: meters/second | m/s + Returns: + -------- + >>> capture_radii(6.957e8, 1.99e30, 25000.0) + 17209590691.0 + >>> capture_radii(-6.957e8, 1.99e30, 25000.0) + Traceback (most recent call last): + ... + ValueError: Radius cannot be less than 0 + >>> capture_radii(6.957e8, -1.99e30, 25000.0) + Traceback (most recent call last): + ... + ValueError: Mass cannot be less than 0 + >>> capture_radii(6.957e8, 1.99e30, c+1) + Traceback (most recent call last): + ... + ValueError: Cannot go beyond speed of light + + Returned SI units: + ------------------ + meters | m + """ + + if target_body_mass < 0: + raise ValueError("Mass cannot be less than 0") + if target_body_radius < 0: + raise ValueError("Radius cannot be less than 0") + if projectile_velocity > c: + raise ValueError("Cannot go beyond speed of light") + + escape_velocity_squared = (2 * G * target_body_mass) / target_body_radius + capture_radius = target_body_radius * sqrt( + 1 + escape_velocity_squared / pow(projectile_velocity, 2) + ) + return round(capture_radius, 0) + + +def capture_area(capture_radius: float) -> float: + """ + Input Param: + ------------ + capture_radius: The radius of orbital capture and impact for a central body of + mass M and a projectile moving towards it with velocity v + SI units: meters | m + Returns: + -------- + >>> capture_area(17209590691) + 9.304455331329126e+20 + >>> capture_area(-1) + Traceback (most recent call last): + ... + ValueError: Cannot have a capture radius less than 0 + + Returned SI units: + ------------------ + meters*meters | m**2 + """ + + if capture_radius < 0: + raise ValueError("Cannot have a capture radius less than 0") + sigma = pi * pow(capture_radius, 2) + return round(sigma, 0) + + +if __name__ == "__main__": + from doctest import testmod + + testmod() + +""" +Derivation: + +Let: Mt=target mass, Rt=target radius, v=projectile_velocity, + r_0=radius of projectile at instant 0 to CM of target + v_p=v at closest approach, + r_p=radius from projectile to target CM at closest approach, + R_capture= radius of impact for projectile with velocity v + +(1)At time=0 the projectile's energy falling from infinity| E=K+U=0.5*m*(v**2)+0 + + E_initial=0.5*m*(v**2) + +(2)at time=0 the angular momentum of the projectile relative to CM target| + L_initial=m*r_0*v*sin(Θ)->m*r_0*v*(R_capture/r_0)->m*v*R_capture + + L_i=m*v*R_capture + +(3)The energy of the projectile at closest approach will be its kinetic energy + at closest approach plus gravitational potential energy(-(GMm)/R)| + E_p=K_p+U_p->E_p=0.5*m*(v_p**2)-(G*Mt*m)/r_p + + E_p=0.0.5*m*(v_p**2)-(G*Mt*m)/r_p + +(4)The angular momentum of the projectile relative to the target at closest + approach will be L_p=m*r_p*v_p*sin(Θ), however relative to the target Θ=90° + sin(90°)=1| + + L_p=m*r_p*v_p +(5)Using conservation of angular momentum and energy, we can write a quadratic + equation that solves for r_p| + + (a) + Ei=Ep-> 0.5*m*(v**2)=0.5*m*(v_p**2)-(G*Mt*m)/r_p-> v**2=v_p**2-(2*G*Mt)/r_p + + (b) + Li=Lp-> m*v*R_capture=m*r_p*v_p-> v*R_capture=r_p*v_p-> v_p=(v*R_capture)/r_p + + (c) b plugs int a| + v**2=((v*R_capture)/r_p)**2-(2*G*Mt)/r_p-> + + v**2-(v**2)*(R_c**2)/(r_p**2)+(2*G*Mt)/r_p=0-> + + (v**2)*(r_p**2)+2*G*Mt*r_p-(v**2)*(R_c**2)=0 + + (d) Using the quadratic formula, we'll solve for r_p then rearrange to solve to + R_capture + + r_p=(-2*G*Mt ± sqrt(4*G^2*Mt^2+ 4(v^4*R_c^2)))/(2*v^2)-> + + r_p=(-G*Mt ± sqrt(G^2*Mt+v^4*R_c^2))/v^2-> + + r_p<0 is something we can ignore, as it has no physical meaning for our purposes.-> + + r_p=(-G*Mt)/v^2 + sqrt(G^2*Mt^2/v^4 + R_c^2) + + (e)We are trying to solve for R_c. We are looking for impact, so we want r_p=Rt + + Rt + G*Mt/v^2 = sqrt(G^2*Mt^2/v^4 + R_c^2)-> + + (Rt + G*Mt/v^2)^2 = G^2*Mt^2/v^4 + R_c^2-> + + Rt^2 + 2*G*Mt*Rt/v^2 + G^2*Mt^2/v^4 = G^2*Mt^2/v^4 + R_c^2-> + + Rt**2 + 2*G*Mt*Rt/v**2 = R_c**2-> + + Rt**2 * (1 + 2*G*Mt/Rt *1/v**2) = R_c**2-> + + escape velocity = sqrt(2GM/R)= v_escape**2=2GM/R-> + + Rt**2 * (1 + v_esc**2/v**2) = R_c**2-> + +(6) + R_capture = Rt * sqrt(1 + v_esc**2/v**2) + +Source: Problem Set 3 #8 c.Fall_2017|Honors Astronomy|Professor Rachel Bezanson + +Source #2: http://www.nssc.ac.cn/wxzygx/weixin/201607/P020160718380095698873.pdf + 8.8 Planetary Rendezvous: Pg.368 +""" From 0ef930697632a1f05dbbd956c4ccab0473025f5b Mon Sep 17 00:00:00 2001 From: Tianyi Zheng Date: Fri, 28 Jul 2023 13:08:40 -0700 Subject: [PATCH 0892/1543] Disable quantum/quantum_random.py (attempt 2) (#8902) * Disable quantum/quantum_random.py Temporarily disable quantum/quantum_random.py because it produces an illegal instruction error that causes all builds to fail * updating DIRECTORY.md * Disable quantum/quantum_random.py attempt 2 --------- Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- DIRECTORY.md | 1 - quantum/{quantum_random.py => quantum_random.py.DISABLED.txt} | 0 2 files changed, 1 deletion(-) rename quantum/{quantum_random.py => quantum_random.py.DISABLED.txt} (100%) diff --git a/DIRECTORY.md b/DIRECTORY.md index 29514579ceb0..af150b12984b 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -1063,7 +1063,6 @@ * [Q Fourier Transform](quantum/q_fourier_transform.py) * [Q Full Adder](quantum/q_full_adder.py) * [Quantum Entanglement](quantum/quantum_entanglement.py) - * [Quantum Random](quantum/quantum_random.py) * [Quantum Teleportation](quantum/quantum_teleportation.py) * [Ripple Adder Classic](quantum/ripple_adder_classic.py) * [Single Qubit Measure](quantum/single_qubit_measure.py) diff --git a/quantum/quantum_random.py b/quantum/quantum_random.py.DISABLED.txt similarity index 100% rename from quantum/quantum_random.py rename to quantum/quantum_random.py.DISABLED.txt From 2cfef0913a36e967d828881386ae78457cf65f33 Mon Sep 17 00:00:00 2001 From: Colin Leroy-Mira Date: Sat, 29 Jul 2023 19:03:43 +0200 Subject: [PATCH 0893/1543] Fix greyscale computation and inverted coords (#8905) * Fix greyscale computation and inverted coords * Fix test * Add test cases * Add reference to the greyscaling formula --------- Co-authored-by: Colin Leroy-Mira --- digital_image_processing/dithering/burkes.py | 17 +++++++++++++---- 1 file changed, 13 insertions(+), 4 deletions(-) diff --git a/digital_image_processing/dithering/burkes.py b/digital_image_processing/dithering/burkes.py index 0804104abe58..35aedc16d404 100644 --- a/digital_image_processing/dithering/burkes.py +++ b/digital_image_processing/dithering/burkes.py @@ -39,9 +39,18 @@ def __init__(self, input_img, threshold: int): def get_greyscale(cls, blue: int, green: int, red: int) -> float: """ >>> Burkes.get_greyscale(3, 4, 5) - 3.753 + 4.185 + >>> Burkes.get_greyscale(0, 0, 0) + 0.0 + >>> Burkes.get_greyscale(255, 255, 255) + 255.0 """ - return 0.114 * blue + 0.587 * green + 0.2126 * red + """ + Formula from https://en.wikipedia.org/wiki/HSL_and_HSV + cf Lightness section, and Fig 13c. + We use the first of four possible. + """ + return 0.114 * blue + 0.587 * green + 0.299 * red def process(self) -> None: for y in range(self.height): @@ -49,10 +58,10 @@ def process(self) -> None: greyscale = int(self.get_greyscale(*self.input_img[y][x])) if self.threshold > greyscale + self.error_table[y][x]: self.output_img[y][x] = (0, 0, 0) - current_error = greyscale + self.error_table[x][y] + current_error = greyscale + self.error_table[y][x] else: self.output_img[y][x] = (255, 255, 255) - current_error = greyscale + self.error_table[x][y] - 255 + current_error = greyscale + self.error_table[y][x] - 255 """ Burkes error propagation (`*` is current pixel): From d31750adece86ebf39a09dd3adb2039098f58586 Mon Sep 17 00:00:00 2001 From: Yatharth Mathur <31852880+yatharthmathur@users.noreply.github.com> Date: Sun, 30 Jul 2023 02:27:45 -0700 Subject: [PATCH 0894/1543] Pythonic implementation of LRU Cache (#4630) * Added a more pythonic implementation of LRU_Cache.[#4628] * Added test cases and doctest * Fixed doc tests * Added more tests in doctests and fixed return types fixes [#4628] * better doctests * added doctests to main() * Added dutch_national_flag.py in sorts. fixing [#4636] * Delete dutch_national_flag.py incorrect commit * Update lru_cache_pythonic.py * Remove pontification --------- Co-authored-by: Christian Clauss --- other/lru_cache_pythonic.py | 113 ++++++++++++++++++++++++++++++++++++ 1 file changed, 113 insertions(+) create mode 100644 other/lru_cache_pythonic.py diff --git a/other/lru_cache_pythonic.py b/other/lru_cache_pythonic.py new file mode 100644 index 000000000000..425691ef18cf --- /dev/null +++ b/other/lru_cache_pythonic.py @@ -0,0 +1,113 @@ +""" +This implementation of LRU Cache uses the in-built Python dictionary (dict) which from +Python 3.6 onward maintain the insertion order of keys and ensures O(1) operations on +insert, delete and access. https://docs.python.org/3/library/stdtypes.html#typesmapping +""" +from typing import Any, Hashable + + +class LRUCache(dict): + def __init__(self, capacity: int) -> None: + """ + Initialize an LRU Cache with given capacity. + capacity : int -> the capacity of the LRU Cache + >>> cache = LRUCache(2) + >>> cache + {} + """ + self.remaining: int = capacity + + def get(self, key: Hashable) -> Any: + """ + This method returns the value associated with the key. + key : A hashable object that is mapped to a value in the LRU cache. + return -> Any object that has been stored as a value in the LRU cache. + + >>> cache = LRUCache(2) + >>> cache.put(1,1) + >>> cache.get(1) + 1 + >>> cache.get(2) + Traceback (most recent call last): + ... + KeyError: '2 not found.' + """ + if key not in self: + raise KeyError(f"{key} not found.") + val = self.pop(key) # Pop the key-value and re-insert to maintain the order + self[key] = val + return val + + def put(self, key: Hashable, value: Any) -> None: + """ + This method puts the value associated with the key provided in the LRU cache. + key : A hashable object that is mapped to a value in the LRU cache. + value: Any object that is to be associated with the key in the LRU cache. + >>> cache = LRUCache(2) + >>> cache.put(3,3) + >>> cache + {3: 3} + >>> cache.put(2,2) + >>> cache + {3: 3, 2: 2} + """ + # To pop the last value inside of the LRU cache + if key in self: + self.pop(key) + self[key] = value + return + + if self.remaining > 0: + self.remaining -= 1 + # To pop the least recently used item from the dictionary + else: + self.pop(next(iter(self))) + self[key] = value + + +def main() -> None: + """Example test case with LRU_Cache of size 2 + >>> main() + 1 + Key=2 not found in cache + Key=1 not found in cache + 3 + 4 + """ + cache = LRUCache(2) # Creates an LRU cache with size 2 + cache.put(1, 1) # cache = {1:1} + cache.put(2, 2) # cache = {1:1, 2:2} + try: + print(cache.get(1)) # Prints 1 + except KeyError: + print("Key not found in cache") + cache.put( + 3, 3 + ) # cache = {1:1, 3:3} key=2 is evicted because it wasn't used recently + try: + print(cache.get(2)) + except KeyError: + print("Key=2 not found in cache") # Prints key not found + cache.put( + 4, 4 + ) # cache = {4:4, 3:3} key=1 is evicted because it wasn't used recently + try: + print(cache.get(1)) + except KeyError: + print("Key=1 not found in cache") # Prints key not found + try: + print(cache.get(3)) # Prints value 3 + except KeyError: + print("Key not found in cache") + + try: + print(cache.get(4)) # Prints value 4 + except KeyError: + print("Key not found in cache") + + +if __name__ == "__main__": + import doctest + + doctest.testmod() + main() From 8b831cb60003443c9967ac8a33df4151dc883484 Mon Sep 17 00:00:00 2001 From: Bazif Rasool <45148731+Bazifrasool@users.noreply.github.com> Date: Sun, 30 Jul 2023 20:30:58 +0530 Subject: [PATCH 0895/1543] Added Altitude Pressure equation (#8909) * Added Altitude Pressure equation * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Removed trailing whitespaces * Removed pylint * Fix lru_cache_pythonic.py * Fixed spellings * Fix again lru_cache_pythonic.py * Update .vscode/settings.json Co-authored-by: Christian Clauss * Third fix lru_cache_pythonic.py * Update .vscode/settings.json Co-authored-by: Christian Clauss * 4th fix lru_cache_pythonic.py * Update physics/altitude_pressure.py Co-authored-by: Christian Clauss * lru_cache_pythonic.py: def get(self, key: Any, /) -> Any | None: * Delete lru_cache_pythonic.py * Added positive and negative pressure test cases * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- other/lru_cache_pythonic.py | 113 ----------------------------------- physics/altitude_pressure.py | 52 ++++++++++++++++ 2 files changed, 52 insertions(+), 113 deletions(-) delete mode 100644 other/lru_cache_pythonic.py create mode 100644 physics/altitude_pressure.py diff --git a/other/lru_cache_pythonic.py b/other/lru_cache_pythonic.py deleted file mode 100644 index 425691ef18cf..000000000000 --- a/other/lru_cache_pythonic.py +++ /dev/null @@ -1,113 +0,0 @@ -""" -This implementation of LRU Cache uses the in-built Python dictionary (dict) which from -Python 3.6 onward maintain the insertion order of keys and ensures O(1) operations on -insert, delete and access. https://docs.python.org/3/library/stdtypes.html#typesmapping -""" -from typing import Any, Hashable - - -class LRUCache(dict): - def __init__(self, capacity: int) -> None: - """ - Initialize an LRU Cache with given capacity. - capacity : int -> the capacity of the LRU Cache - >>> cache = LRUCache(2) - >>> cache - {} - """ - self.remaining: int = capacity - - def get(self, key: Hashable) -> Any: - """ - This method returns the value associated with the key. - key : A hashable object that is mapped to a value in the LRU cache. - return -> Any object that has been stored as a value in the LRU cache. - - >>> cache = LRUCache(2) - >>> cache.put(1,1) - >>> cache.get(1) - 1 - >>> cache.get(2) - Traceback (most recent call last): - ... - KeyError: '2 not found.' - """ - if key not in self: - raise KeyError(f"{key} not found.") - val = self.pop(key) # Pop the key-value and re-insert to maintain the order - self[key] = val - return val - - def put(self, key: Hashable, value: Any) -> None: - """ - This method puts the value associated with the key provided in the LRU cache. - key : A hashable object that is mapped to a value in the LRU cache. - value: Any object that is to be associated with the key in the LRU cache. - >>> cache = LRUCache(2) - >>> cache.put(3,3) - >>> cache - {3: 3} - >>> cache.put(2,2) - >>> cache - {3: 3, 2: 2} - """ - # To pop the last value inside of the LRU cache - if key in self: - self.pop(key) - self[key] = value - return - - if self.remaining > 0: - self.remaining -= 1 - # To pop the least recently used item from the dictionary - else: - self.pop(next(iter(self))) - self[key] = value - - -def main() -> None: - """Example test case with LRU_Cache of size 2 - >>> main() - 1 - Key=2 not found in cache - Key=1 not found in cache - 3 - 4 - """ - cache = LRUCache(2) # Creates an LRU cache with size 2 - cache.put(1, 1) # cache = {1:1} - cache.put(2, 2) # cache = {1:1, 2:2} - try: - print(cache.get(1)) # Prints 1 - except KeyError: - print("Key not found in cache") - cache.put( - 3, 3 - ) # cache = {1:1, 3:3} key=2 is evicted because it wasn't used recently - try: - print(cache.get(2)) - except KeyError: - print("Key=2 not found in cache") # Prints key not found - cache.put( - 4, 4 - ) # cache = {4:4, 3:3} key=1 is evicted because it wasn't used recently - try: - print(cache.get(1)) - except KeyError: - print("Key=1 not found in cache") # Prints key not found - try: - print(cache.get(3)) # Prints value 3 - except KeyError: - print("Key not found in cache") - - try: - print(cache.get(4)) # Prints value 4 - except KeyError: - print("Key not found in cache") - - -if __name__ == "__main__": - import doctest - - doctest.testmod() - main() diff --git a/physics/altitude_pressure.py b/physics/altitude_pressure.py new file mode 100644 index 000000000000..65307d223fa7 --- /dev/null +++ b/physics/altitude_pressure.py @@ -0,0 +1,52 @@ +""" +Title : Calculate altitude using Pressure + +Description : + The below algorithm approximates the altitude using Barometric formula + + +""" + + +def get_altitude_at_pressure(pressure: float) -> float: + """ + This method calculates the altitude from Pressure wrt to + Sea level pressure as reference .Pressure is in Pascals + https://en.wikipedia.org/wiki/Pressure_altitude + https://community.bosch-sensortec.com/t5/Question-and-answers/How-to-calculate-the-altitude-from-the-pressure-sensor-data/qaq-p/5702 + + H = 44330 * [1 - (P/p0)^(1/5.255) ] + + Where : + H = altitude (m) + P = measured pressure + p0 = reference pressure at sea level 101325 Pa + + Examples: + >>> get_altitude_at_pressure(pressure=100_000) + 105.47836610778828 + >>> get_altitude_at_pressure(pressure=101_325) + 0.0 + >>> get_altitude_at_pressure(pressure=80_000) + 1855.873388064995 + >>> get_altitude_at_pressure(pressure=201_325) + Traceback (most recent call last): + ... + ValueError: Value Higher than Pressure at Sea Level ! + >>> get_altitude_at_pressure(pressure=-80_000) + Traceback (most recent call last): + ... + ValueError: Atmospheric Pressure can not be negative ! + """ + + if pressure > 101325: + raise ValueError("Value Higher than Pressure at Sea Level !") + if pressure < 0: + raise ValueError("Atmospheric Pressure can not be negative !") + return 44_330 * (1 - (pressure / 101_325) ** (1 / 5.5255)) + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From d4f2873e39f041513aa9f5c287ec9b46e2236dad Mon Sep 17 00:00:00 2001 From: AmirSoroush Date: Mon, 31 Jul 2023 03:54:15 +0300 Subject: [PATCH 0896/1543] add reverse_inorder traversal to binary_tree_traversals.py (#8726) * add reverse_inorder traversal to binary_tree_traversals.py * Apply suggestions from code review Co-authored-by: Tianyi Zheng --------- Co-authored-by: Tianyi Zheng --- .../binary_tree/binary_tree_traversals.py | 22 ++++++++++++++----- 1 file changed, 16 insertions(+), 6 deletions(-) diff --git a/data_structures/binary_tree/binary_tree_traversals.py b/data_structures/binary_tree/binary_tree_traversals.py index 71a895e76ce4..2afb7604f9c6 100644 --- a/data_structures/binary_tree/binary_tree_traversals.py +++ b/data_structures/binary_tree/binary_tree_traversals.py @@ -58,6 +58,19 @@ def inorder(root: Node | None) -> list[int]: return [*inorder(root.left), root.data, *inorder(root.right)] if root else [] +def reverse_inorder(root: Node | None) -> list[int]: + """ + Reverse in-order traversal visits right subtree, root node, left subtree. + >>> reverse_inorder(make_tree()) + [3, 1, 5, 2, 4] + """ + return ( + [*reverse_inorder(root.right), root.data, *reverse_inorder(root.left)] + if root + else [] + ) + + def height(root: Node | None) -> int: """ Recursive function for calculating the height of the binary tree. @@ -161,15 +174,12 @@ def zigzag(root: Node | None) -> Sequence[Node | None] | list[Any]: def main() -> None: # Main function for testing. - """ - Create binary tree. - """ + # Create binary tree. root = make_tree() - """ - All Traversals of the binary are as follows: - """ + # All Traversals of the binary are as follows: print(f"In-order Traversal: {inorder(root)}") + print(f"Reverse In-order Traversal: {reverse_inorder(root)}") print(f"Pre-order Traversal: {preorder(root)}") print(f"Post-order Traversal: {postorder(root)}", "\n") From 4710e51deb2dc07e32884391a36d40e08398e6be Mon Sep 17 00:00:00 2001 From: David Leal Date: Sun, 30 Jul 2023 19:15:30 -0600 Subject: [PATCH 0897/1543] chore: use newest Discord invite link (#8696) * updating DIRECTORY.md * chore: use newest Discord invite link --------- Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index bf6e0ed3cf75..d8eba4e016fa 100644 --- a/README.md +++ b/README.md @@ -13,7 +13,7 @@ Contributions Welcome - + Discord chat @@ -42,7 +42,7 @@ Read through our [Contribution Guidelines](CONTRIBUTING.md) before you contribut ## Community Channels -We are on [Discord](https://discord.gg/c7MnfGFGa6) and [Gitter](https://gitter.im/TheAlgorithms/community)! Community channels are a great way for you to ask questions and get help. Please join us! +We are on [Discord](https://the-algorithms.com/discord) and [Gitter](https://gitter.im/TheAlgorithms/community)! Community channels are a great way for you to ask questions and get help. Please join us! ## List of Algorithms From 8cce9cf066396bb220515c03849fbc1a16d800d0 Mon Sep 17 00:00:00 2001 From: Almas Bekbayev <121730304+bekbayev@users.noreply.github.com> Date: Mon, 31 Jul 2023 07:32:05 +0600 Subject: [PATCH 0898/1543] Fix linear_search docstring return value (#8644) --- searches/linear_search.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/searches/linear_search.py b/searches/linear_search.py index 777080d14e36..ba6e81d6bae4 100644 --- a/searches/linear_search.py +++ b/searches/linear_search.py @@ -15,7 +15,7 @@ def linear_search(sequence: list, target: int) -> int: :param sequence: a collection with comparable items (as sorted items not required in Linear Search) :param target: item value to search - :return: index of found item or None if item is not found + :return: index of found item or -1 if item is not found Examples: >>> linear_search([0, 5, 7, 10, 15], 0) From 384c407a265ac44d15eecdd339bb154147cda4f8 Mon Sep 17 00:00:00 2001 From: AmirSoroush Date: Mon, 31 Jul 2023 05:07:35 +0300 Subject: [PATCH 0899/1543] Enhance the implementation of Queue using list (#8608) * enhance the implementation of queue using list * enhance readability of queue_on_list.py * rename 'queue_on_list' to 'queue_by_list' to match the class name --- data_structures/queue/queue_by_list.py | 141 +++++++++++++++++++++++++ data_structures/queue/queue_on_list.py | 52 --------- 2 files changed, 141 insertions(+), 52 deletions(-) create mode 100644 data_structures/queue/queue_by_list.py delete mode 100644 data_structures/queue/queue_on_list.py diff --git a/data_structures/queue/queue_by_list.py b/data_structures/queue/queue_by_list.py new file mode 100644 index 000000000000..4b05be9fd08e --- /dev/null +++ b/data_structures/queue/queue_by_list.py @@ -0,0 +1,141 @@ +"""Queue represented by a Python list""" + +from collections.abc import Iterable +from typing import Generic, TypeVar + +_T = TypeVar("_T") + + +class QueueByList(Generic[_T]): + def __init__(self, iterable: Iterable[_T] | None = None) -> None: + """ + >>> QueueByList() + Queue(()) + >>> QueueByList([10, 20, 30]) + Queue((10, 20, 30)) + >>> QueueByList((i**2 for i in range(1, 4))) + Queue((1, 4, 9)) + """ + self.entries: list[_T] = list(iterable or []) + + def __len__(self) -> int: + """ + >>> len(QueueByList()) + 0 + >>> from string import ascii_lowercase + >>> len(QueueByList(ascii_lowercase)) + 26 + >>> queue = QueueByList() + >>> for i in range(1, 11): + ... queue.put(i) + >>> len(queue) + 10 + >>> for i in range(2): + ... queue.get() + 1 + 2 + >>> len(queue) + 8 + """ + + return len(self.entries) + + def __repr__(self) -> str: + """ + >>> queue = QueueByList() + >>> queue + Queue(()) + >>> str(queue) + 'Queue(())' + >>> queue.put(10) + >>> queue + Queue((10,)) + >>> queue.put(20) + >>> queue.put(30) + >>> queue + Queue((10, 20, 30)) + """ + + return f"Queue({tuple(self.entries)})" + + def put(self, item: _T) -> None: + """Put `item` to the Queue + + >>> queue = QueueByList() + >>> queue.put(10) + >>> queue.put(20) + >>> len(queue) + 2 + >>> queue + Queue((10, 20)) + """ + + self.entries.append(item) + + def get(self) -> _T: + """ + Get `item` from the Queue + + >>> queue = QueueByList((10, 20, 30)) + >>> queue.get() + 10 + >>> queue.put(40) + >>> queue.get() + 20 + >>> queue.get() + 30 + >>> len(queue) + 1 + >>> queue.get() + 40 + >>> queue.get() + Traceback (most recent call last): + ... + IndexError: Queue is empty + """ + + if not self.entries: + raise IndexError("Queue is empty") + return self.entries.pop(0) + + def rotate(self, rotation: int) -> None: + """Rotate the items of the Queue `rotation` times + + >>> queue = QueueByList([10, 20, 30, 40]) + >>> queue + Queue((10, 20, 30, 40)) + >>> queue.rotate(1) + >>> queue + Queue((20, 30, 40, 10)) + >>> queue.rotate(2) + >>> queue + Queue((40, 10, 20, 30)) + """ + + put = self.entries.append + get = self.entries.pop + + for _ in range(rotation): + put(get(0)) + + def get_front(self) -> _T: + """Get the front item from the Queue + + >>> queue = QueueByList((10, 20, 30)) + >>> queue.get_front() + 10 + >>> queue + Queue((10, 20, 30)) + >>> queue.get() + 10 + >>> queue.get_front() + 20 + """ + + return self.entries[0] + + +if __name__ == "__main__": + from doctest import testmod + + testmod() diff --git a/data_structures/queue/queue_on_list.py b/data_structures/queue/queue_on_list.py deleted file mode 100644 index 71fca6b2f5f4..000000000000 --- a/data_structures/queue/queue_on_list.py +++ /dev/null @@ -1,52 +0,0 @@ -"""Queue represented by a Python list""" - - -class Queue: - def __init__(self): - self.entries = [] - self.length = 0 - self.front = 0 - - def __str__(self): - printed = "<" + str(self.entries)[1:-1] + ">" - return printed - - """Enqueues {@code item} - @param item - item to enqueue""" - - def put(self, item): - self.entries.append(item) - self.length = self.length + 1 - - """Dequeues {@code item} - @requirement: |self.length| > 0 - @return dequeued - item that was dequeued""" - - def get(self): - self.length = self.length - 1 - dequeued = self.entries[self.front] - # self.front-=1 - # self.entries = self.entries[self.front:] - self.entries = self.entries[1:] - return dequeued - - """Rotates the queue {@code rotation} times - @param rotation - number of times to rotate queue""" - - def rotate(self, rotation): - for _ in range(rotation): - self.put(self.get()) - - """Enqueues {@code item} - @return item at front of self.entries""" - - def get_front(self): - return self.entries[0] - - """Returns the length of this.entries""" - - def size(self): - return self.length From 629eb86ce0d30dd6031fa482f4a477ac3df345ab Mon Sep 17 00:00:00 2001 From: Tianyi Zheng Date: Sun, 30 Jul 2023 22:23:23 -0700 Subject: [PATCH 0900/1543] Fix merge conflicts to merge change from #5080 (#8911) * Input for user choose his Collatz sequence Now the user can tell the algorithm what number he wants to run on the Collatz Sequence. * updating DIRECTORY.md --------- Co-authored-by: Hugo Folloni Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- DIRECTORY.md | 1 + maths/collatz_sequence.py | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/DIRECTORY.md b/DIRECTORY.md index af150b12984b..aa9bd313b898 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -740,6 +740,7 @@ * [Tower Of Hanoi](other/tower_of_hanoi.py) ## Physics + * [Altitude Pressure](physics/altitude_pressure.py) * [Archimedes Principle](physics/archimedes_principle.py) * [Basic Orbital Capture](physics/basic_orbital_capture.py) * [Casimir Effect](physics/casimir_effect.py) diff --git a/maths/collatz_sequence.py b/maths/collatz_sequence.py index 4f3aa5582731..b47017146a1e 100644 --- a/maths/collatz_sequence.py +++ b/maths/collatz_sequence.py @@ -57,7 +57,7 @@ def collatz_sequence(n: int) -> Generator[int, None, None]: def main(): - n = 43 + n = int(input("Your number: ")) sequence = tuple(collatz_sequence(n)) print(sequence) print(f"Collatz sequence from {n} took {len(sequence)} steps.") From 0b0214c42f563e7af885058c0e3a32d292f7f1da Mon Sep 17 00:00:00 2001 From: roger-sato Date: Tue, 1 Aug 2023 03:46:30 +0900 Subject: [PATCH 0901/1543] Handle empty input case in Segment Tree build process (#8718) --- data_structures/binary_tree/segment_tree.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/data_structures/binary_tree/segment_tree.py b/data_structures/binary_tree/segment_tree.py index b0580386954a..5f822407d8cb 100644 --- a/data_structures/binary_tree/segment_tree.py +++ b/data_structures/binary_tree/segment_tree.py @@ -7,7 +7,8 @@ def __init__(self, a): self.st = [0] * ( 4 * self.N ) # approximate the overall size of segment tree with array N - self.build(1, 0, self.N - 1) + if self.N: + self.build(1, 0, self.N - 1) def left(self, idx): return idx * 2 From 90a8e6e0d210a5c526c8f485fa825e1649d217e2 Mon Sep 17 00:00:00 2001 From: Dylan Buchi Date: Mon, 31 Jul 2023 15:50:00 -0300 Subject: [PATCH 0902/1543] Update `sorts/bubble_sort.py` (#5802) * Add missing type annotations in bubble_sort.py * Refactor bubble_sort function --- sorts/bubble_sort.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/sorts/bubble_sort.py b/sorts/bubble_sort.py index aef2da272bd0..7da4362a5b97 100644 --- a/sorts/bubble_sort.py +++ b/sorts/bubble_sort.py @@ -1,4 +1,7 @@ -def bubble_sort(collection): +from typing import Any + + +def bubble_sort(collection: list[Any]) -> list[Any]: """Pure implementation of bubble sort algorithm in Python :param collection: some mutable ordered collection with heterogeneous @@ -28,9 +31,9 @@ def bubble_sort(collection): True """ length = len(collection) - for i in range(length - 1): + for i in reversed(range(length)): swapped = False - for j in range(length - 1 - i): + for j in range(i): if collection[j] > collection[j + 1]: swapped = True collection[j], collection[j + 1] = collection[j + 1], collection[j] From 5cf34d901e32b65425103309bbad0068b1851238 Mon Sep 17 00:00:00 2001 From: Tianyi Zheng Date: Mon, 31 Jul 2023 13:53:26 -0700 Subject: [PATCH 0903/1543] Ruff fixes (#8913) * updating DIRECTORY.md * Fix ruff error in eulerian_path_and_circuit_for_undirected_graph.py * Fix ruff error in newtons_second_law_of_motion.py --------- Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- DIRECTORY.md | 2 +- graphs/eulerian_path_and_circuit_for_undirected_graph.py | 2 +- physics/newtons_second_law_of_motion.py | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/DIRECTORY.md b/DIRECTORY.md index aa9bd313b898..fdcf0ceedf1f 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -236,8 +236,8 @@ * [Double Ended Queue](data_structures/queue/double_ended_queue.py) * [Linked Queue](data_structures/queue/linked_queue.py) * [Priority Queue Using List](data_structures/queue/priority_queue_using_list.py) + * [Queue By List](data_structures/queue/queue_by_list.py) * [Queue By Two Stacks](data_structures/queue/queue_by_two_stacks.py) - * [Queue On List](data_structures/queue/queue_on_list.py) * [Queue On Pseudo Stack](data_structures/queue/queue_on_pseudo_stack.py) * Stacks * [Balanced Parentheses](data_structures/stacks/balanced_parentheses.py) diff --git a/graphs/eulerian_path_and_circuit_for_undirected_graph.py b/graphs/eulerian_path_and_circuit_for_undirected_graph.py index 6c43c5d3e6e3..6b4ea8e21e8b 100644 --- a/graphs/eulerian_path_and_circuit_for_undirected_graph.py +++ b/graphs/eulerian_path_and_circuit_for_undirected_graph.py @@ -20,7 +20,7 @@ def check_circuit_or_path(graph, max_node): odd_degree_nodes = 0 odd_node = -1 for i in range(max_node): - if i not in graph.keys(): + if i not in graph: continue if len(graph[i]) % 2 == 1: odd_degree_nodes += 1 diff --git a/physics/newtons_second_law_of_motion.py b/physics/newtons_second_law_of_motion.py index cb53f8f6571f..53fab6ce78b9 100644 --- a/physics/newtons_second_law_of_motion.py +++ b/physics/newtons_second_law_of_motion.py @@ -60,7 +60,7 @@ def newtons_second_law_of_motion(mass: float, acceleration: float) -> float: >>> newtons_second_law_of_motion(2.0, 1) 2.0 """ - force = float() + force = 0.0 try: force = mass * acceleration except Exception: From f8fe72dc378232107100acc1924fef31b1198124 Mon Sep 17 00:00:00 2001 From: "Minha, Jeong" Date: Tue, 1 Aug 2023 06:24:12 +0900 Subject: [PATCH 0904/1543] Update game_of_life.py (#4921) * Update game_of_life.py docstring error fix delete no reason delete next_gen_canvas code(local variable) * Update cellular_automata/game_of_life.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: Tianyi Zheng Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- cellular_automata/game_of_life.py | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/cellular_automata/game_of_life.py b/cellular_automata/game_of_life.py index b69afdce03eb..d691a2b73af0 100644 --- a/cellular_automata/game_of_life.py +++ b/cellular_automata/game_of_life.py @@ -10,7 +10,7 @@ - 3.5 Usage: - - $python3 game_o_life + - $python3 game_of_life Game-Of-Life Rules: @@ -52,7 +52,8 @@ def seed(canvas: list[list[bool]]) -> None: def run(canvas: list[list[bool]]) -> list[list[bool]]: - """This function runs the rules of game through all points, and changes their + """ + This function runs the rules of game through all points, and changes their status accordingly.(in the same canvas) @Args: -- @@ -60,7 +61,7 @@ def run(canvas: list[list[bool]]) -> list[list[bool]]: @returns: -- - None + canvas of population after one step """ current_canvas = np.array(canvas) next_gen_canvas = np.array(create_canvas(current_canvas.shape[0])) @@ -70,10 +71,7 @@ def run(canvas: list[list[bool]]) -> list[list[bool]]: pt, current_canvas[r - 1 : r + 2, c - 1 : c + 2] ) - current_canvas = next_gen_canvas - del next_gen_canvas # cleaning memory as we move on. - return_canvas: list[list[bool]] = current_canvas.tolist() - return return_canvas + return next_gen_canvas.tolist() def __judge_point(pt: bool, neighbours: list[list[bool]]) -> bool: From f7c5e55609afa1e4e7ae2ee3f442bbd5d0b43b8a Mon Sep 17 00:00:00 2001 From: Jan Wojciechowski <96974442+yanvoi@users.noreply.github.com> Date: Tue, 1 Aug 2023 05:02:49 +0200 Subject: [PATCH 0905/1543] Window closing fix (#8625) * The window will now remain open after the fractal is finished being drawn, and will only close upon your click. * Update fractals/sierpinski_triangle.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: Tianyi Zheng Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- fractals/sierpinski_triangle.py | 1 + 1 file changed, 1 insertion(+) diff --git a/fractals/sierpinski_triangle.py b/fractals/sierpinski_triangle.py index c28ec00b27fe..45f7ab84cfff 100644 --- a/fractals/sierpinski_triangle.py +++ b/fractals/sierpinski_triangle.py @@ -82,3 +82,4 @@ def triangle( vertices = [(-175, -125), (0, 175), (175, -125)] # vertices of triangle triangle(vertices[0], vertices[1], vertices[2], int(sys.argv[1])) + turtle.Screen().exitonclick() From c9a7234a954dd280dc8192ae77a564e647d013d4 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Tue, 1 Aug 2023 09:26:23 +0530 Subject: [PATCH 0906/1543] [pre-commit.ci] pre-commit autoupdate (#8914) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - [github.com/astral-sh/ruff-pre-commit: v0.0.280 → v0.0.281](https://github.com/astral-sh/ruff-pre-commit/compare/v0.0.280...v0.0.281) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 5adf12cc70c5..e158bd8d6879 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -16,7 +16,7 @@ repos: - id: auto-walrus - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.0.280 + rev: v0.0.281 hooks: - id: ruff From ce218c57f1f494cfca69bc01ba660c97385e5330 Mon Sep 17 00:00:00 2001 From: AmirSoroush Date: Tue, 1 Aug 2023 21:23:34 +0300 Subject: [PATCH 0907/1543] =?UTF-8?q?fixes=20#8673;=20Add=20operator's=20a?= =?UTF-8?q?ssociativity=20check=20for=20stacks/infix=5Fto=5Fp=E2=80=A6=20(?= =?UTF-8?q?#8674)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * fixes #8673; Add operator's associativity check for stacks/infix_to_postfix_conversion.py * fix ruff N806 in stacks/infix_to_postfix_conversion.py * Update data_structures/stacks/infix_to_postfix_conversion.py Co-authored-by: Tianyi Zheng * Update data_structures/stacks/infix_to_postfix_conversion.py Co-authored-by: Tianyi Zheng --------- Co-authored-by: Tianyi Zheng --- .../stacks/infix_to_postfix_conversion.py | 50 +++++++++++++++++-- 1 file changed, 47 insertions(+), 3 deletions(-) diff --git a/data_structures/stacks/infix_to_postfix_conversion.py b/data_structures/stacks/infix_to_postfix_conversion.py index 9017443091cf..e697061937c9 100644 --- a/data_structures/stacks/infix_to_postfix_conversion.py +++ b/data_structures/stacks/infix_to_postfix_conversion.py @@ -4,9 +4,26 @@ https://en.wikipedia.org/wiki/Shunting-yard_algorithm """ +from typing import Literal + from .balanced_parentheses import balanced_parentheses from .stack import Stack +PRECEDENCES: dict[str, int] = { + "+": 1, + "-": 1, + "*": 2, + "/": 2, + "^": 3, +} +ASSOCIATIVITIES: dict[str, Literal["LR", "RL"]] = { + "+": "LR", + "-": "LR", + "*": "LR", + "/": "LR", + "^": "RL", +} + def precedence(char: str) -> int: """ @@ -14,7 +31,15 @@ def precedence(char: str) -> int: order of operation. https://en.wikipedia.org/wiki/Order_of_operations """ - return {"+": 1, "-": 1, "*": 2, "/": 2, "^": 3}.get(char, -1) + return PRECEDENCES.get(char, -1) + + +def associativity(char: str) -> Literal["LR", "RL"]: + """ + Return the associativity of the operator `char`. + https://en.wikipedia.org/wiki/Operator_associativity + """ + return ASSOCIATIVITIES[char] def infix_to_postfix(expression_str: str) -> str: @@ -35,6 +60,8 @@ def infix_to_postfix(expression_str: str) -> str: 'a b c * + d e * f + g * +' >>> infix_to_postfix("x^y/(5*z)+2") 'x y ^ 5 z * / 2 +' + >>> infix_to_postfix("2^3^2") + '2 3 2 ^ ^' """ if not balanced_parentheses(expression_str): raise ValueError("Mismatched parentheses") @@ -50,9 +77,26 @@ def infix_to_postfix(expression_str: str) -> str: postfix.append(stack.pop()) stack.pop() else: - while not stack.is_empty() and precedence(char) <= precedence(stack.peek()): + while True: + if stack.is_empty(): + stack.push(char) + break + + char_precedence = precedence(char) + tos_precedence = precedence(stack.peek()) + + if char_precedence > tos_precedence: + stack.push(char) + break + if char_precedence < tos_precedence: + postfix.append(stack.pop()) + continue + # Precedences are equal + if associativity(char) == "RL": + stack.push(char) + break postfix.append(stack.pop()) - stack.push(char) + while not stack.is_empty(): postfix.append(stack.pop()) return " ".join(postfix) From db6bd4b17f471d4def7aa441f1da43bb6a0f18ae Mon Sep 17 00:00:00 2001 From: Dipankar Mitra <50228537+Mitra-babu@users.noreply.github.com> Date: Mon, 7 Aug 2023 17:17:42 +0530 Subject: [PATCH 0908/1543] IQR function is added (#8851) * tanh function been added * tanh function been added * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * tanh function is added * tanh function is added * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * tanh function added * tanh function added * tanh function is added * Apply suggestions from code review * ELU activation function is added * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * elu activation is added * ELU activation is added * Update maths/elu_activation.py Co-authored-by: Christian Clauss * Exponential_linear_unit activation is added * Exponential_linear_unit activation is added * SiLU activation is added * SiLU activation is added * mish added * mish activation is added * inter_quartile_range function is added * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Mish activation function is added * Mish action is added * mish activation added * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * mish activation added * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * inter quartile range (IQR) function is added * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * IQR function is added * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * code optimized in IQR function * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * interquartile_range function is added * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update maths/interquartile_range.py Co-authored-by: Christian Clauss * Changes on interquartile_range * numpy removed from interquartile_range * Fixes from code review * Update interquartile_range.py --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- maths/interquartile_range.py | 66 ++++++++++++++++++++++++++++++++++++ 1 file changed, 66 insertions(+) create mode 100644 maths/interquartile_range.py diff --git a/maths/interquartile_range.py b/maths/interquartile_range.py new file mode 100644 index 000000000000..d4d72e73ef49 --- /dev/null +++ b/maths/interquartile_range.py @@ -0,0 +1,66 @@ +""" +An implementation of interquartile range (IQR) which is a measure of statistical +dispersion, which is the spread of the data. + +The function takes the list of numeric values as input and returns the IQR. + +Script inspired by this Wikipedia article: +https://en.wikipedia.org/wiki/Interquartile_range +""" +from __future__ import annotations + + +def find_median(nums: list[int | float]) -> float: + """ + This is the implementation of the median. + :param nums: The list of numeric nums + :return: Median of the list + >>> find_median(nums=([1, 2, 2, 3, 4])) + 2 + >>> find_median(nums=([1, 2, 2, 3, 4, 4])) + 2.5 + >>> find_median(nums=([-1, 2, 0, 3, 4, -4])) + 1.5 + >>> find_median(nums=([1.1, 2.2, 2, 3.3, 4.4, 4])) + 2.65 + """ + div, mod = divmod(len(nums), 2) + if mod: + return nums[div] + return (nums[div] + nums[(div) - 1]) / 2 + + +def interquartile_range(nums: list[int | float]) -> float: + """ + Return the interquartile range for a list of numeric values. + :param nums: The list of numeric values. + :return: interquartile range + + >>> interquartile_range(nums=[4, 1, 2, 3, 2]) + 2.0 + >>> interquartile_range(nums = [-2, -7, -10, 9, 8, 4, -67, 45]) + 17.0 + >>> interquartile_range(nums = [-2.1, -7.1, -10.1, 9.1, 8.1, 4.1, -67.1, 45.1]) + 17.2 + >>> interquartile_range(nums = [0, 0, 0, 0, 0]) + 0.0 + >>> interquartile_range(nums=[]) + Traceback (most recent call last): + ... + ValueError: The list is empty. Provide a non-empty list. + """ + if not nums: + raise ValueError("The list is empty. Provide a non-empty list.") + nums.sort() + length = len(nums) + div, mod = divmod(length, 2) + q1 = find_median(nums[:div]) + half_length = sum((div, mod)) + q3 = find_median(nums[half_length:length]) + return q3 - q1 + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From ac62cdb94fe2478fd809d9ec91e3b85304a5ac6d Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 7 Aug 2023 19:52:39 -0400 Subject: [PATCH 0909/1543] [pre-commit.ci] pre-commit autoupdate (#8930) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * [pre-commit.ci] pre-commit autoupdate updates: - [github.com/astral-sh/ruff-pre-commit: v0.0.281 → v0.0.282](https://github.com/astral-sh/ruff-pre-commit/compare/v0.0.281...v0.0.282) * updating DIRECTORY.md --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- .pre-commit-config.yaml | 2 +- DIRECTORY.md | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index e158bd8d6879..da6762123b04 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -16,7 +16,7 @@ repos: - id: auto-walrus - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.0.281 + rev: v0.0.282 hooks: - id: ruff diff --git a/DIRECTORY.md b/DIRECTORY.md index fdcf0ceedf1f..e6a1ff356143 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -585,6 +585,7 @@ * [Hardy Ramanujanalgo](maths/hardy_ramanujanalgo.py) * [Hexagonal Number](maths/hexagonal_number.py) * [Integration By Simpson Approx](maths/integration_by_simpson_approx.py) + * [Interquartile Range](maths/interquartile_range.py) * [Is Int Palindrome](maths/is_int_palindrome.py) * [Is Ip V4 Address Valid](maths/is_ip_v4_address_valid.py) * [Is Square Free](maths/is_square_free.py) From 842d03fb2ab7d83e4d4081c248d71e89bb520809 Mon Sep 17 00:00:00 2001 From: AmirSoroush Date: Wed, 9 Aug 2023 00:47:09 +0300 Subject: [PATCH 0910/1543] improvements to jump_search.py (#8932) * improvements to jump_search.py * add more tests to jump_search.py --- searches/jump_search.py | 45 +++++++++++++++++++++++++++++------------ 1 file changed, 32 insertions(+), 13 deletions(-) diff --git a/searches/jump_search.py b/searches/jump_search.py index 31a9656c55fe..3bc3c37809a1 100644 --- a/searches/jump_search.py +++ b/searches/jump_search.py @@ -4,14 +4,28 @@ until the element compared is bigger than the one searched. It will then perform a linear search until it matches the wanted number. If not found, it returns -1. + +https://en.wikipedia.org/wiki/Jump_search """ import math +from collections.abc import Sequence +from typing import Any, Protocol, TypeVar + + +class Comparable(Protocol): + def __lt__(self, other: Any, /) -> bool: + ... + +T = TypeVar("T", bound=Comparable) -def jump_search(arr: list, x: int) -> int: + +def jump_search(arr: Sequence[T], item: T) -> int: """ - Pure Python implementation of the jump search algorithm. + Python implementation of the jump search algorithm. + Return the index if the `item` is found, otherwise return -1. + Examples: >>> jump_search([0, 1, 2, 3, 4, 5], 3) 3 @@ -21,31 +35,36 @@ def jump_search(arr: list, x: int) -> int: -1 >>> jump_search([0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233, 377, 610], 55) 10 + >>> jump_search(["aa", "bb", "cc", "dd", "ee", "ff"], "ee") + 4 """ - n = len(arr) - step = int(math.floor(math.sqrt(n))) + arr_size = len(arr) + block_size = int(math.sqrt(arr_size)) + prev = 0 - while arr[min(step, n) - 1] < x: + step = block_size + while arr[min(step, arr_size) - 1] < item: prev = step - step += int(math.floor(math.sqrt(n))) - if prev >= n: + step += block_size + if prev >= arr_size: return -1 - while arr[prev] < x: - prev = prev + 1 - if prev == min(step, n): + while arr[prev] < item: + prev += 1 + if prev == min(step, arr_size): return -1 - if arr[prev] == x: + if arr[prev] == item: return prev return -1 if __name__ == "__main__": user_input = input("Enter numbers separated by a comma:\n").strip() - arr = [int(item) for item in user_input.split(",")] + array = [int(item) for item in user_input.split(",")] x = int(input("Enter the number to be searched:\n")) - res = jump_search(arr, x) + + res = jump_search(array, x) if res == -1: print("Number not found!") else: From ae0fc85401efd9816193a06e554a66600cc09a97 Mon Sep 17 00:00:00 2001 From: Tianyi Zheng Date: Wed, 9 Aug 2023 00:55:30 -0700 Subject: [PATCH 0911/1543] Fix ruff errors (#8936) * Fix ruff errors Renamed neural_network/input_data.py to neural_network/input_data.py_tf because it should be left out of the directory for the following reasons: 1. Its sole purpose is to be used by neural_network/gan.py_tf, which is itself left out of the directory because of issues with TensorFlow. 2. It was taken directly from TensorFlow's codebase and is actually already deprecated. If/when neural_network/gan.py_tf is eventually re-added back to the directory, its implementation should be changed to not use neural_network/input_data.py anyway. * updating DIRECTORY.md * Change input_data.py_tf file extension Change input_data.py_tf file extension because algorithms-keeper bot is being picky about it --------- Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- DIRECTORY.md | 1 - conversions/length_conversion.py | 30 +++++++++----- conversions/pressure_conversions.py | 28 ++++++++----- conversions/volume_conversions.py | 40 +++++++++++-------- .../binary_tree/distribute_coins.py | 10 +++-- electronics/electric_power.py | 22 +++++----- graphs/bi_directional_dijkstra.py | 4 +- maths/area_under_curve.py | 6 +-- maths/decimal_to_fraction.py | 2 +- maths/line_length.py | 6 +-- maths/numerical_integration.py | 6 +-- .../single_indeterminate_operations.py | 4 +- maths/series/geometric_series.py | 10 ++--- maths/series/p_series.py | 2 +- maths/volume.py | 2 +- matrix/matrix_class.py | 4 +- matrix/matrix_operation.py | 6 +-- matrix/searching_in_sorted_matrix.py | 4 +- matrix/sherman_morrison.py | 16 ++++---- ...t_data.py => input_data.py.DEPRECATED.txt} | 0 web_programming/covid_stats_via_xpath.py | 12 ++++-- 21 files changed, 121 insertions(+), 94 deletions(-) rename neural_network/{input_data.py => input_data.py.DEPRECATED.txt} (100%) diff --git a/DIRECTORY.md b/DIRECTORY.md index e6a1ff356143..5578c1c9a6dd 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -710,7 +710,6 @@ * [Exponential Linear Unit](neural_network/activation_functions/exponential_linear_unit.py) * [Back Propagation Neural Network](neural_network/back_propagation_neural_network.py) * [Convolution Neural Network](neural_network/convolution_neural_network.py) - * [Input Data](neural_network/input_data.py) * [Perceptron](neural_network/perceptron.py) * [Simple Neural Network](neural_network/simple_neural_network.py) diff --git a/conversions/length_conversion.py b/conversions/length_conversion.py index d8f39515255e..07fa93a198c7 100644 --- a/conversions/length_conversion.py +++ b/conversions/length_conversion.py @@ -22,9 +22,13 @@ -> Wikipedia reference: https://en.wikipedia.org/wiki/Millimeter """ -from collections import namedtuple +from typing import NamedTuple + + +class FromTo(NamedTuple): + from_factor: float + to_factor: float -from_to = namedtuple("from_to", "from_ to") TYPE_CONVERSION = { "millimeter": "mm", @@ -40,14 +44,14 @@ } METRIC_CONVERSION = { - "mm": from_to(0.001, 1000), - "cm": from_to(0.01, 100), - "m": from_to(1, 1), - "km": from_to(1000, 0.001), - "in": from_to(0.0254, 39.3701), - "ft": from_to(0.3048, 3.28084), - "yd": from_to(0.9144, 1.09361), - "mi": from_to(1609.34, 0.000621371), + "mm": FromTo(0.001, 1000), + "cm": FromTo(0.01, 100), + "m": FromTo(1, 1), + "km": FromTo(1000, 0.001), + "in": FromTo(0.0254, 39.3701), + "ft": FromTo(0.3048, 3.28084), + "yd": FromTo(0.9144, 1.09361), + "mi": FromTo(1609.34, 0.000621371), } @@ -115,7 +119,11 @@ def length_conversion(value: float, from_type: str, to_type: str) -> float: f"Conversion abbreviations are: {', '.join(METRIC_CONVERSION)}" ) raise ValueError(msg) - return value * METRIC_CONVERSION[new_from].from_ * METRIC_CONVERSION[new_to].to + return ( + value + * METRIC_CONVERSION[new_from].from_factor + * METRIC_CONVERSION[new_to].to_factor + ) if __name__ == "__main__": diff --git a/conversions/pressure_conversions.py b/conversions/pressure_conversions.py index e0cd18d234ba..fe78b1382677 100644 --- a/conversions/pressure_conversions.py +++ b/conversions/pressure_conversions.py @@ -19,19 +19,23 @@ -> https://www.unitconverters.net/pressure-converter.html """ -from collections import namedtuple +from typing import NamedTuple + + +class FromTo(NamedTuple): + from_factor: float + to_factor: float -from_to = namedtuple("from_to", "from_ to") PRESSURE_CONVERSION = { - "atm": from_to(1, 1), - "pascal": from_to(0.0000098, 101325), - "bar": from_to(0.986923, 1.01325), - "kilopascal": from_to(0.00986923, 101.325), - "megapascal": from_to(9.86923, 0.101325), - "psi": from_to(0.068046, 14.6959), - "inHg": from_to(0.0334211, 29.9213), - "torr": from_to(0.00131579, 760), + "atm": FromTo(1, 1), + "pascal": FromTo(0.0000098, 101325), + "bar": FromTo(0.986923, 1.01325), + "kilopascal": FromTo(0.00986923, 101.325), + "megapascal": FromTo(9.86923, 0.101325), + "psi": FromTo(0.068046, 14.6959), + "inHg": FromTo(0.0334211, 29.9213), + "torr": FromTo(0.00131579, 760), } @@ -71,7 +75,9 @@ def pressure_conversion(value: float, from_type: str, to_type: str) -> float: + ", ".join(PRESSURE_CONVERSION) ) return ( - value * PRESSURE_CONVERSION[from_type].from_ * PRESSURE_CONVERSION[to_type].to + value + * PRESSURE_CONVERSION[from_type].from_factor + * PRESSURE_CONVERSION[to_type].to_factor ) diff --git a/conversions/volume_conversions.py b/conversions/volume_conversions.py index 44d29009120c..cb240380534b 100644 --- a/conversions/volume_conversions.py +++ b/conversions/volume_conversions.py @@ -18,35 +18,39 @@ -> Wikipedia reference: https://en.wikipedia.org/wiki/Cup_(unit) """ -from collections import namedtuple +from typing import NamedTuple + + +class FromTo(NamedTuple): + from_factor: float + to_factor: float -from_to = namedtuple("from_to", "from_ to") METRIC_CONVERSION = { - "cubicmeter": from_to(1, 1), - "litre": from_to(0.001, 1000), - "kilolitre": from_to(1, 1), - "gallon": from_to(0.00454, 264.172), - "cubicyard": from_to(0.76455, 1.30795), - "cubicfoot": from_to(0.028, 35.3147), - "cup": from_to(0.000236588, 4226.75), + "cubic meter": FromTo(1, 1), + "litre": FromTo(0.001, 1000), + "kilolitre": FromTo(1, 1), + "gallon": FromTo(0.00454, 264.172), + "cubic yard": FromTo(0.76455, 1.30795), + "cubic foot": FromTo(0.028, 35.3147), + "cup": FromTo(0.000236588, 4226.75), } def volume_conversion(value: float, from_type: str, to_type: str) -> float: """ Conversion between volume units. - >>> volume_conversion(4, "cubicmeter", "litre") + >>> volume_conversion(4, "cubic meter", "litre") 4000 >>> volume_conversion(1, "litre", "gallon") 0.264172 - >>> volume_conversion(1, "kilolitre", "cubicmeter") + >>> volume_conversion(1, "kilolitre", "cubic meter") 1 - >>> volume_conversion(3, "gallon", "cubicyard") + >>> volume_conversion(3, "gallon", "cubic yard") 0.017814279 - >>> volume_conversion(2, "cubicyard", "litre") + >>> volume_conversion(2, "cubic yard", "litre") 1529.1 - >>> volume_conversion(4, "cubicfoot", "cup") + >>> volume_conversion(4, "cubic foot", "cup") 473.396 >>> volume_conversion(1, "cup", "kilolitre") 0.000236588 @@ -54,7 +58,7 @@ def volume_conversion(value: float, from_type: str, to_type: str) -> float: Traceback (most recent call last): ... ValueError: Invalid 'from_type' value: 'wrongUnit' Supported values are: - cubicmeter, litre, kilolitre, gallon, cubicyard, cubicfoot, cup + cubic meter, litre, kilolitre, gallon, cubic yard, cubic foot, cup """ if from_type not in METRIC_CONVERSION: raise ValueError( @@ -66,7 +70,11 @@ def volume_conversion(value: float, from_type: str, to_type: str) -> float: f"Invalid 'to_type' value: {to_type!r}. Supported values are:\n" + ", ".join(METRIC_CONVERSION) ) - return value * METRIC_CONVERSION[from_type].from_ * METRIC_CONVERSION[to_type].to + return ( + value + * METRIC_CONVERSION[from_type].from_factor + * METRIC_CONVERSION[to_type].to_factor + ) if __name__ == "__main__": diff --git a/data_structures/binary_tree/distribute_coins.py b/data_structures/binary_tree/distribute_coins.py index ea02afc2cea6..5712604cb87c 100644 --- a/data_structures/binary_tree/distribute_coins.py +++ b/data_structures/binary_tree/distribute_coins.py @@ -39,8 +39,8 @@ from __future__ import annotations -from collections import namedtuple from dataclasses import dataclass +from typing import NamedTuple @dataclass @@ -50,7 +50,9 @@ class TreeNode: right: TreeNode | None = None -CoinsDistribResult = namedtuple("CoinsDistribResult", "moves excess") +class CoinsDistribResult(NamedTuple): + moves: int + excess: int def distribute_coins(root: TreeNode | None) -> int: @@ -79,7 +81,7 @@ def distribute_coins(root: TreeNode | None) -> int: # Validation def count_nodes(node: TreeNode | None) -> int: """ - >>> count_nodes(None): + >>> count_nodes(None) 0 """ if node is None: @@ -89,7 +91,7 @@ def count_nodes(node: TreeNode | None) -> int: def count_coins(node: TreeNode | None) -> int: """ - >>> count_coins(None): + >>> count_coins(None) 0 """ if node is None: diff --git a/electronics/electric_power.py b/electronics/electric_power.py index e59795601791..8b92e320ace3 100644 --- a/electronics/electric_power.py +++ b/electronics/electric_power.py @@ -1,7 +1,12 @@ # https://en.m.wikipedia.org/wiki/Electric_power from __future__ import annotations -from collections import namedtuple +from typing import NamedTuple + + +class Result(NamedTuple): + name: str + value: float def electric_power(voltage: float, current: float, power: float) -> tuple: @@ -10,11 +15,11 @@ def electric_power(voltage: float, current: float, power: float) -> tuple: fundamental value of electrical system. examples are below: >>> electric_power(voltage=0, current=2, power=5) - result(name='voltage', value=2.5) + Result(name='voltage', value=2.5) >>> electric_power(voltage=2, current=2, power=0) - result(name='power', value=4.0) + Result(name='power', value=4.0) >>> electric_power(voltage=-2, current=3, power=0) - result(name='power', value=6.0) + Result(name='power', value=6.0) >>> electric_power(voltage=2, current=4, power=2) Traceback (most recent call last): ... @@ -28,9 +33,8 @@ def electric_power(voltage: float, current: float, power: float) -> tuple: ... ValueError: Power cannot be negative in any electrical/electronics system >>> electric_power(voltage=2.2, current=2.2, power=0) - result(name='power', value=4.84) + Result(name='power', value=4.84) """ - result = namedtuple("result", "name value") if (voltage, current, power).count(0) != 1: raise ValueError("Only one argument must be 0") elif power < 0: @@ -38,11 +42,11 @@ def electric_power(voltage: float, current: float, power: float) -> tuple: "Power cannot be negative in any electrical/electronics system" ) elif voltage == 0: - return result("voltage", power / current) + return Result("voltage", power / current) elif current == 0: - return result("current", power / voltage) + return Result("current", power / voltage) elif power == 0: - return result("power", float(round(abs(voltage * current), 2))) + return Result("power", float(round(abs(voltage * current), 2))) else: raise ValueError("Exactly one argument must be 0") diff --git a/graphs/bi_directional_dijkstra.py b/graphs/bi_directional_dijkstra.py index a4489026be80..529a235db625 100644 --- a/graphs/bi_directional_dijkstra.py +++ b/graphs/bi_directional_dijkstra.py @@ -26,8 +26,8 @@ def pass_and_relaxation( cst_bwd: dict, queue: PriorityQueue, parent: dict, - shortest_distance: float | int, -) -> float | int: + shortest_distance: float, +) -> float: for nxt, d in graph[v]: if nxt in visited_forward: continue diff --git a/maths/area_under_curve.py b/maths/area_under_curve.py index b557b2029657..0da6546b2e36 100644 --- a/maths/area_under_curve.py +++ b/maths/area_under_curve.py @@ -7,9 +7,9 @@ def trapezoidal_area( - fnc: Callable[[int | float], int | float], - x_start: int | float, - x_end: int | float, + fnc: Callable[[float], float], + x_start: float, + x_end: float, steps: int = 100, ) -> float: """ diff --git a/maths/decimal_to_fraction.py b/maths/decimal_to_fraction.py index 9462bafe0171..2aa8e3c3dfd6 100644 --- a/maths/decimal_to_fraction.py +++ b/maths/decimal_to_fraction.py @@ -1,4 +1,4 @@ -def decimal_to_fraction(decimal: int | float | str) -> tuple[int, int]: +def decimal_to_fraction(decimal: float | str) -> tuple[int, int]: """ Return a decimal number in its simplest fraction form >>> decimal_to_fraction(2) diff --git a/maths/line_length.py b/maths/line_length.py index b810f2d9ad1f..ed2efc31e96e 100644 --- a/maths/line_length.py +++ b/maths/line_length.py @@ -5,9 +5,9 @@ def line_length( - fnc: Callable[[int | float], int | float], - x_start: int | float, - x_end: int | float, + fnc: Callable[[float], float], + x_start: float, + x_end: float, steps: int = 100, ) -> float: """ diff --git a/maths/numerical_integration.py b/maths/numerical_integration.py index f2d65f89e390..4ac562644a07 100644 --- a/maths/numerical_integration.py +++ b/maths/numerical_integration.py @@ -7,9 +7,9 @@ def trapezoidal_area( - fnc: Callable[[int | float], int | float], - x_start: int | float, - x_end: int | float, + fnc: Callable[[float], float], + x_start: float, + x_end: float, steps: int = 100, ) -> float: """ diff --git a/maths/polynomials/single_indeterminate_operations.py b/maths/polynomials/single_indeterminate_operations.py index 8bafdb591793..e31e6caa3988 100644 --- a/maths/polynomials/single_indeterminate_operations.py +++ b/maths/polynomials/single_indeterminate_operations.py @@ -87,7 +87,7 @@ def __mul__(self, polynomial_2: Polynomial) -> Polynomial: return Polynomial(self.degree + polynomial_2.degree, coefficients) - def evaluate(self, substitution: int | float) -> int | float: + def evaluate(self, substitution: float) -> float: """ Evaluates the polynomial at x. >>> p = Polynomial(2, [1, 2, 3]) @@ -144,7 +144,7 @@ def derivative(self) -> Polynomial: coefficients[i] = self.coefficients[i + 1] * (i + 1) return Polynomial(self.degree - 1, coefficients) - def integral(self, constant: int | float = 0) -> Polynomial: + def integral(self, constant: float = 0) -> Polynomial: """ Returns the integral of the polynomial. >>> p = Polynomial(2, [1, 2, 3]) diff --git a/maths/series/geometric_series.py b/maths/series/geometric_series.py index 90c9fe77b733..b8d6a86206be 100644 --- a/maths/series/geometric_series.py +++ b/maths/series/geometric_series.py @@ -14,10 +14,10 @@ def geometric_series( - nth_term: float | int, - start_term_a: float | int, - common_ratio_r: float | int, -) -> list[float | int]: + nth_term: float, + start_term_a: float, + common_ratio_r: float, +) -> list[float]: """ Pure Python implementation of Geometric Series algorithm @@ -48,7 +48,7 @@ def geometric_series( """ if not all((nth_term, start_term_a, common_ratio_r)): return [] - series: list[float | int] = [] + series: list[float] = [] power = 1 multiple = common_ratio_r for _ in range(int(nth_term)): diff --git a/maths/series/p_series.py b/maths/series/p_series.py index 34fa3f2399af..a091a6f3fecf 100644 --- a/maths/series/p_series.py +++ b/maths/series/p_series.py @@ -13,7 +13,7 @@ from __future__ import annotations -def p_series(nth_term: int | float | str, power: int | float | str) -> list[str]: +def p_series(nth_term: float | str, power: float | str) -> list[str]: """ Pure Python implementation of P-Series algorithm :return: The P-Series starting from 1 to last (nth) term diff --git a/maths/volume.py b/maths/volume.py index 1da4584c893e..721974e68b66 100644 --- a/maths/volume.py +++ b/maths/volume.py @@ -8,7 +8,7 @@ from math import pi, pow -def vol_cube(side_length: int | float) -> float: +def vol_cube(side_length: float) -> float: """ Calculate the Volume of a Cube. >>> vol_cube(1) diff --git a/matrix/matrix_class.py b/matrix/matrix_class.py index a73e8b92a286..a5940a38e836 100644 --- a/matrix/matrix_class.py +++ b/matrix/matrix_class.py @@ -141,7 +141,7 @@ def num_columns(self) -> int: @property def order(self) -> tuple[int, int]: - return (self.num_rows, self.num_columns) + return self.num_rows, self.num_columns @property def is_square(self) -> bool: @@ -315,7 +315,7 @@ def __sub__(self, other: Matrix) -> Matrix: ] ) - def __mul__(self, other: Matrix | int | float) -> Matrix: + def __mul__(self, other: Matrix | float) -> Matrix: if isinstance(other, (int, float)): return Matrix( [[int(element * other) for element in row] for row in self.rows] diff --git a/matrix/matrix_operation.py b/matrix/matrix_operation.py index f189f1898d33..d63e758f1838 100644 --- a/matrix/matrix_operation.py +++ b/matrix/matrix_operation.py @@ -47,7 +47,7 @@ def subtract(matrix_a: list[list[int]], matrix_b: list[list[int]]) -> list[list[ raise TypeError("Expected a matrix, got int/list instead") -def scalar_multiply(matrix: list[list[int]], n: int | float) -> list[list[float]]: +def scalar_multiply(matrix: list[list[int]], n: float) -> list[list[float]]: """ >>> scalar_multiply([[1,2],[3,4]],5) [[5, 10], [15, 20]] @@ -189,9 +189,7 @@ def main() -> None: matrix_c = [[11, 12, 13, 14], [21, 22, 23, 24], [31, 32, 33, 34], [41, 42, 43, 44]] matrix_d = [[3, 0, 2], [2, 0, -2], [0, 1, 1]] print(f"Add Operation, {add(matrix_a, matrix_b) = } \n") - print( - f"Multiply Operation, {multiply(matrix_a, matrix_b) = } \n", - ) + print(f"Multiply Operation, {multiply(matrix_a, matrix_b) = } \n") print(f"Identity: {identity(5)}\n") print(f"Minor of {matrix_c} = {minor(matrix_c, 1, 2)} \n") print(f"Determinant of {matrix_b} = {determinant(matrix_b)} \n") diff --git a/matrix/searching_in_sorted_matrix.py b/matrix/searching_in_sorted_matrix.py index ddca3b1ce781..f55cc71d6f3a 100644 --- a/matrix/searching_in_sorted_matrix.py +++ b/matrix/searching_in_sorted_matrix.py @@ -1,9 +1,7 @@ from __future__ import annotations -def search_in_a_sorted_matrix( - mat: list[list[int]], m: int, n: int, key: int | float -) -> None: +def search_in_a_sorted_matrix(mat: list[list[int]], m: int, n: int, key: float) -> None: """ >>> search_in_a_sorted_matrix( ... [[2, 5, 7], [4, 8, 13], [9, 11, 15], [12, 17, 20]], 3, 3, 5) diff --git a/matrix/sherman_morrison.py b/matrix/sherman_morrison.py index 256271e8a87d..b6e50f70fdcf 100644 --- a/matrix/sherman_morrison.py +++ b/matrix/sherman_morrison.py @@ -22,7 +22,7 @@ def __init__(self, row: int, column: int, default_value: float = 0) -> None: """ self.row, self.column = row, column - self.array = [[default_value for c in range(column)] for r in range(row)] + self.array = [[default_value for _ in range(column)] for _ in range(row)] def __str__(self) -> str: """ @@ -54,15 +54,15 @@ def single_line(row_vector: list[float]) -> str: def __repr__(self) -> str: return str(self) - def validate_indicies(self, loc: tuple[int, int]) -> bool: + def validate_indices(self, loc: tuple[int, int]) -> bool: """ Check if given indices are valid to pick element from matrix. Example: >>> a = Matrix(2, 6, 0) - >>> a.validate_indicies((2, 7)) + >>> a.validate_indices((2, 7)) False - >>> a.validate_indicies((0, 0)) + >>> a.validate_indices((0, 0)) True """ if not (isinstance(loc, (list, tuple)) and len(loc) == 2): @@ -81,7 +81,7 @@ def __getitem__(self, loc: tuple[int, int]) -> Any: >>> a[1, 0] 7 """ - assert self.validate_indicies(loc) + assert self.validate_indices(loc) return self.array[loc[0]][loc[1]] def __setitem__(self, loc: tuple[int, int], value: float) -> None: @@ -96,7 +96,7 @@ def __setitem__(self, loc: tuple[int, int], value: float) -> None: [ 1, 1, 1] [ 1, 1, 51] """ - assert self.validate_indicies(loc) + assert self.validate_indices(loc) self.array[loc[0]][loc[1]] = value def __add__(self, another: Matrix) -> Matrix: @@ -145,7 +145,7 @@ def __neg__(self) -> Matrix: def __sub__(self, another: Matrix) -> Matrix: return self + (-another) - def __mul__(self, another: int | float | Matrix) -> Matrix: + def __mul__(self, another: float | Matrix) -> Matrix: """ Return self * another. @@ -233,7 +233,7 @@ def sherman_morrison(self, u: Matrix, v: Matrix) -> Any: v_t = v.transpose() numerator_factor = (v_t * self * u)[0, 0] + 1 if numerator_factor == 0: - return None # It's not invertable + return None # It's not invertible return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor)) diff --git a/neural_network/input_data.py b/neural_network/input_data.py.DEPRECATED.txt similarity index 100% rename from neural_network/input_data.py rename to neural_network/input_data.py.DEPRECATED.txt diff --git a/web_programming/covid_stats_via_xpath.py b/web_programming/covid_stats_via_xpath.py index 85ea5d940d85..a95130badad9 100644 --- a/web_programming/covid_stats_via_xpath.py +++ b/web_programming/covid_stats_via_xpath.py @@ -4,17 +4,21 @@ more convenient to use in Python web projects (e.g. Django or Flask-based) """ -from collections import namedtuple +from typing import NamedTuple import requests from lxml import html # type: ignore -covid_data = namedtuple("covid_data", "cases deaths recovered") +class CovidData(NamedTuple): + cases: int + deaths: int + recovered: int -def covid_stats(url: str = "https://www.worldometers.info/coronavirus/") -> covid_data: + +def covid_stats(url: str = "https://www.worldometers.info/coronavirus/") -> CovidData: xpath_str = '//div[@class = "maincounter-number"]/span/text()' - return covid_data(*html.fromstring(requests.get(url).content).xpath(xpath_str)) + return CovidData(*html.fromstring(requests.get(url).content).xpath(xpath_str)) fmt = """Total COVID-19 cases in the world: {} From c39b7eadbd4d81dda5e7ffe4c169d670483f0113 Mon Sep 17 00:00:00 2001 From: Suman <66205793+Suman2023@users.noreply.github.com> Date: Sun, 13 Aug 2023 03:28:37 +0530 Subject: [PATCH 0912/1543] updated the URL and HTML tags for scrapping yahoo finance (#8942) * updated the url and tags for yahoo finance * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * updated to return the error text --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- web_programming/current_stock_price.py | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/web_programming/current_stock_price.py b/web_programming/current_stock_price.py index df44da4ef351..0c06354d8998 100644 --- a/web_programming/current_stock_price.py +++ b/web_programming/current_stock_price.py @@ -3,12 +3,18 @@ def stock_price(symbol: str = "AAPL") -> str: - url = f"https://in.finance.yahoo.com/quote/{symbol}?s={symbol}" - soup = BeautifulSoup(requests.get(url).text, "html.parser") - class_ = "My(6px) Pos(r) smartphone_Mt(6px)" - return soup.find("div", class_=class_).find("span").text + url = f"https://finance.yahoo.com/quote/{symbol}?p={symbol}" + yahoo_finance_source = requests.get(url, headers={"USER-AGENT": "Mozilla/5.0"}).text + soup = BeautifulSoup(yahoo_finance_source, "html.parser") + specific_fin_streamer_tag = soup.find("fin-streamer", {"data-test": "qsp-price"}) + if specific_fin_streamer_tag: + text = specific_fin_streamer_tag.get_text() + return text + return "No tag with the specified data-test attribute found." + +# Search for the symbol at https://finance.yahoo.com/lookup if __name__ == "__main__": for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split(): print(f"Current {symbol:<4} stock price is {stock_price(symbol):>8}") From 4f2a346c277076ce1d69578ef52a9766e5040176 Mon Sep 17 00:00:00 2001 From: Maxim Smolskiy Date: Sun, 13 Aug 2023 13:05:42 +0300 Subject: [PATCH 0913/1543] Reduce the complexity of linear_algebra/src/polynom_for_points.py (#8605) * Reduce the complexity of linear_algebra/src/polynom_for_points.py * updating DIRECTORY.md * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fix * Fix review issues --------- Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- linear_algebra/src/polynom_for_points.py | 57 ++++++++---------------- 1 file changed, 19 insertions(+), 38 deletions(-) diff --git a/linear_algebra/src/polynom_for_points.py b/linear_algebra/src/polynom_for_points.py index f5e3db0cbb13..a9a9a8117c18 100644 --- a/linear_algebra/src/polynom_for_points.py +++ b/linear_algebra/src/polynom_for_points.py @@ -43,62 +43,43 @@ def points_to_polynomial(coordinates: list[list[int]]) -> str: x = len(coordinates) - count_of_line = 0 - matrix: list[list[float]] = [] # put the x and x to the power values in a matrix - while count_of_line < x: - count_in_line = 0 - a = coordinates[count_of_line][0] - count_line: list[float] = [] - while count_in_line < x: - count_line.append(a ** (x - (count_in_line + 1))) - count_in_line += 1 - matrix.append(count_line) - count_of_line += 1 + matrix: list[list[float]] = [ + [ + coordinates[count_of_line][0] ** (x - (count_in_line + 1)) + for count_in_line in range(x) + ] + for count_of_line in range(x) + ] - count_of_line = 0 # put the y values into a vector - vector: list[float] = [] - while count_of_line < x: - vector.append(coordinates[count_of_line][1]) - count_of_line += 1 + vector: list[float] = [coordinates[count_of_line][1] for count_of_line in range(x)] - count = 0 - - while count < x: - zahlen = 0 - while zahlen < x: - if count == zahlen: - zahlen += 1 - if zahlen == x: - break - bruch = matrix[zahlen][count] / matrix[count][count] + for count in range(x): + for number in range(x): + if count == number: + continue + fraction = matrix[number][count] / matrix[count][count] for counting_columns, item in enumerate(matrix[count]): # manipulating all the values in the matrix - matrix[zahlen][counting_columns] -= item * bruch + matrix[number][counting_columns] -= item * fraction # manipulating the values in the vector - vector[zahlen] -= vector[count] * bruch - zahlen += 1 - count += 1 + vector[number] -= vector[count] * fraction - count = 0 # make solutions - solution: list[str] = [] - while count < x: - solution.append(str(vector[count] / matrix[count][count])) - count += 1 + solution: list[str] = [ + str(vector[count] / matrix[count][count]) for count in range(x) + ] - count = 0 solved = "f(x)=" - while count < x: + for count in range(x): remove_e: list[str] = solution[count].split("E") if len(remove_e) > 1: solution[count] = f"{remove_e[0]}*10^{remove_e[1]}" solved += f"x^{x - (count + 1)}*{solution[count]}" if count + 1 != x: solved += "+" - count += 1 return solved From 9d86d4edaa754af06e0da9cac4a717f3765db7f4 Mon Sep 17 00:00:00 2001 From: Caeden Perelli-Harris Date: Mon, 14 Aug 2023 01:58:17 +0100 Subject: [PATCH 0914/1543] Create wa-tor algorithm (#8899) * feat(cellular_automata): Create wa-tor algorithm * updating DIRECTORY.md * chore(quality): Implement algo-keeper bot changes * Update cellular_automata/wa_tor.py Co-authored-by: Christian Clauss * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * refactor(repr): Return repr as python object * Update cellular_automata/wa_tor.py Co-authored-by: Tianyi Zheng * Update cellular_automata/wa_tor.py Co-authored-by: Tianyi Zheng * Update cellular_automata/wa_tor.py Co-authored-by: Tianyi Zheng * Update cellular_automata/wa_tor.py Co-authored-by: Tianyi Zheng * Update cellular_automata/wa_tor.py Co-authored-by: Tianyi Zheng * Update cellular_automata/wa_tor.py Co-authored-by: Tianyi Zheng * Update cellular_automata/wa_tor.py Co-authored-by: Tianyi Zheng * Update cellular_automata/wa_tor.py Co-authored-by: Tianyi Zheng * Update cellular_automata/wa_tor.py Co-authored-by: Tianyi Zheng * Update cellular_automata/wa_tor.py Co-authored-by: Tianyi Zheng * Update cellular_automata/wa_tor.py Co-authored-by: Tianyi Zheng * refactor(display): Rename to display_visually to visualise * refactor(wa-tor): Use double for loop * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * chore(wa-tor): Implement suggestions from code review --------- Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> Co-authored-by: Christian Clauss Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Tianyi Zheng --- DIRECTORY.md | 1 + cellular_automata/wa_tor.py | 550 ++++++++++++++++++++++++++++++++++++ 2 files changed, 551 insertions(+) create mode 100644 cellular_automata/wa_tor.py diff --git a/DIRECTORY.md b/DIRECTORY.md index 5578c1c9a6dd..cdcd1a8ae8cc 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -74,6 +74,7 @@ * [Game Of Life](cellular_automata/game_of_life.py) * [Nagel Schrekenberg](cellular_automata/nagel_schrekenberg.py) * [One Dimensional](cellular_automata/one_dimensional.py) + * [Wa Tor](cellular_automata/wa_tor.py) ## Ciphers * [A1Z26](ciphers/a1z26.py) diff --git a/cellular_automata/wa_tor.py b/cellular_automata/wa_tor.py new file mode 100644 index 000000000000..e423d1595bdb --- /dev/null +++ b/cellular_automata/wa_tor.py @@ -0,0 +1,550 @@ +""" +Wa-Tor algorithm (1984) + +@ https://en.wikipedia.org/wiki/Wa-Tor +@ https://beltoforion.de/en/wator/ +@ https://beltoforion.de/en/wator/images/wator_medium.webm + +This solution aims to completely remove any systematic approach +to the Wa-Tor planet, and utilise fully random methods. + +The constants are a working set that allows the Wa-Tor planet +to result in one of the three possible results. +""" + +from collections.abc import Callable +from random import randint, shuffle +from time import sleep +from typing import Literal + +WIDTH = 50 # Width of the Wa-Tor planet +HEIGHT = 50 # Height of the Wa-Tor planet + +PREY_INITIAL_COUNT = 30 # The initial number of prey entities +PREY_REPRODUCTION_TIME = 5 # The chronons before reproducing + +PREDATOR_INITIAL_COUNT = 50 # The initial number of predator entities +# The initial energy value of predator entities +PREDATOR_INITIAL_ENERGY_VALUE = 15 +# The energy value provided when consuming prey +PREDATOR_FOOD_VALUE = 5 +PREDATOR_REPRODUCTION_TIME = 20 # The chronons before reproducing + +MAX_ENTITIES = 500 # The max number of organisms on the board +# The number of entities to delete from the unbalanced side +DELETE_UNBALANCED_ENTITIES = 50 + + +class Entity: + """ + Represents an entity (either prey or predator). + + >>> e = Entity(True, coords=(0, 0)) + >>> e.prey + True + >>> e.coords + (0, 0) + >>> e.alive + True + """ + + def __init__(self, prey: bool, coords: tuple[int, int]) -> None: + self.prey = prey + # The (row, col) pos of the entity + self.coords = coords + + self.remaining_reproduction_time = ( + PREY_REPRODUCTION_TIME if prey else PREDATOR_REPRODUCTION_TIME + ) + self.energy_value = None if prey is True else PREDATOR_INITIAL_ENERGY_VALUE + self.alive = True + + def reset_reproduction_time(self) -> None: + """ + >>> e = Entity(True, coords=(0, 0)) + >>> e.reset_reproduction_time() + >>> e.remaining_reproduction_time == PREY_REPRODUCTION_TIME + True + >>> e = Entity(False, coords=(0, 0)) + >>> e.reset_reproduction_time() + >>> e.remaining_reproduction_time == PREDATOR_REPRODUCTION_TIME + True + """ + self.remaining_reproduction_time = ( + PREY_REPRODUCTION_TIME if self.prey is True else PREDATOR_REPRODUCTION_TIME + ) + + def __repr__(self) -> str: + """ + >>> Entity(prey=True, coords=(1, 1)) + Entity(prey=True, coords=(1, 1), remaining_reproduction_time=5) + >>> Entity(prey=False, coords=(2, 1)) # doctest: +NORMALIZE_WHITESPACE + Entity(prey=False, coords=(2, 1), + remaining_reproduction_time=20, energy_value=15) + """ + repr_ = ( + f"Entity(prey={self.prey}, coords={self.coords}, " + f"remaining_reproduction_time={self.remaining_reproduction_time}" + ) + if self.energy_value is not None: + repr_ += f", energy_value={self.energy_value}" + return f"{repr_})" + + +class WaTor: + """ + Represents the main Wa-Tor algorithm. + + :attr time_passed: A function that is called every time + time passes (a chronon) in order to visually display + the new Wa-Tor planet. The time_passed function can block + using time.sleep to slow the algorithm progression. + + >>> wt = WaTor(10, 15) + >>> wt.width + 10 + >>> wt.height + 15 + >>> len(wt.planet) + 15 + >>> len(wt.planet[0]) + 10 + >>> len(wt.get_entities()) == PREDATOR_INITIAL_COUNT + PREY_INITIAL_COUNT + True + """ + + time_passed: Callable[["WaTor", int], None] | None + + def __init__(self, width: int, height: int) -> None: + self.width = width + self.height = height + self.time_passed = None + + self.planet: list[list[Entity | None]] = [[None] * width for _ in range(height)] + + # Populate planet with predators and prey randomly + for _ in range(PREY_INITIAL_COUNT): + self.add_entity(prey=True) + for _ in range(PREDATOR_INITIAL_COUNT): + self.add_entity(prey=False) + self.set_planet(self.planet) + + def set_planet(self, planet: list[list[Entity | None]]) -> None: + """ + Ease of access for testing + + >>> wt = WaTor(WIDTH, HEIGHT) + >>> planet = [ + ... [None, None, None], + ... [None, Entity(True, coords=(1, 1)), None] + ... ] + >>> wt.set_planet(planet) + >>> wt.planet == planet + True + >>> wt.width + 3 + >>> wt.height + 2 + """ + self.planet = planet + self.width = len(planet[0]) + self.height = len(planet) + + def add_entity(self, prey: bool) -> None: + """ + Adds an entity, making sure the entity does + not override another entity + + >>> wt = WaTor(WIDTH, HEIGHT) + >>> wt.set_planet([[None, None], [None, None]]) + >>> wt.add_entity(True) + >>> len(wt.get_entities()) + 1 + >>> wt.add_entity(False) + >>> len(wt.get_entities()) + 2 + """ + while True: + row, col = randint(0, self.height - 1), randint(0, self.width - 1) + if self.planet[row][col] is None: + self.planet[row][col] = Entity(prey=prey, coords=(row, col)) + return + + def get_entities(self) -> list[Entity]: + """ + Returns a list of all the entities within the planet. + + >>> wt = WaTor(WIDTH, HEIGHT) + >>> len(wt.get_entities()) == PREDATOR_INITIAL_COUNT + PREY_INITIAL_COUNT + True + """ + return [entity for column in self.planet for entity in column if entity] + + def balance_predators_and_prey(self) -> None: + """ + Balances predators and preys so that prey + can not dominate the predators, blocking up + space for them to reproduce. + + >>> wt = WaTor(WIDTH, HEIGHT) + >>> for i in range(2000): + ... row, col = i // HEIGHT, i % WIDTH + ... wt.planet[row][col] = Entity(True, coords=(row, col)) + >>> entities = len(wt.get_entities()) + >>> wt.balance_predators_and_prey() + >>> len(wt.get_entities()) == entities + False + """ + entities = self.get_entities() + shuffle(entities) + + if len(entities) >= MAX_ENTITIES - MAX_ENTITIES / 10: + prey = [entity for entity in entities if entity.prey] + predators = [entity for entity in entities if not entity.prey] + + prey_count, predator_count = len(prey), len(predators) + + entities_to_purge = ( + prey[:DELETE_UNBALANCED_ENTITIES] + if prey_count > predator_count + else predators[:DELETE_UNBALANCED_ENTITIES] + ) + for entity in entities_to_purge: + self.planet[entity.coords[0]][entity.coords[1]] = None + + def get_surrounding_prey(self, entity: Entity) -> list[Entity]: + """ + Returns all the prey entities around (N, S, E, W) a predator entity. + + Subtly different to the try_to_move_to_unoccupied square. + + >>> wt = WaTor(WIDTH, HEIGHT) + >>> wt.set_planet([ + ... [None, Entity(True, (0, 1)), None], + ... [None, Entity(False, (1, 1)), None], + ... [None, Entity(True, (2, 1)), None]]) + >>> wt.get_surrounding_prey( + ... Entity(False, (1, 1))) # doctest: +NORMALIZE_WHITESPACE + [Entity(prey=True, coords=(0, 1), remaining_reproduction_time=5), + Entity(prey=True, coords=(2, 1), remaining_reproduction_time=5)] + >>> wt.set_planet([[Entity(False, (0, 0))]]) + >>> wt.get_surrounding_prey(Entity(False, (0, 0))) + [] + >>> wt.set_planet([ + ... [Entity(True, (0, 0)), Entity(False, (1, 0)), Entity(False, (2, 0))], + ... [None, Entity(False, (1, 1)), Entity(True, (2, 1))], + ... [None, None, None]]) + >>> wt.get_surrounding_prey(Entity(False, (1, 0))) + [Entity(prey=True, coords=(0, 0), remaining_reproduction_time=5)] + """ + row, col = entity.coords + adjacent: list[tuple[int, int]] = [ + (row - 1, col), # North + (row + 1, col), # South + (row, col - 1), # West + (row, col + 1), # East + ] + + return [ + ent + for r, c in adjacent + if 0 <= r < self.height + and 0 <= c < self.width + and (ent := self.planet[r][c]) is not None + and ent.prey + ] + + def move_and_reproduce( + self, entity: Entity, direction_orders: list[Literal["N", "E", "S", "W"]] + ) -> None: + """ + Attempts to move to an unoccupied neighbouring square + in either of the four directions (North, South, East, West). + If the move was successful and the remaining_reproduction time is + equal to 0, then a new prey or predator can also be created + in the previous square. + + :param direction_orders: Ordered list (like priority queue) depicting + order to attempt to move. Removes any systematic + approach of checking neighbouring squares. + + >>> planet = [ + ... [None, None, None], + ... [None, Entity(True, coords=(1, 1)), None], + ... [None, None, None] + ... ] + >>> wt = WaTor(WIDTH, HEIGHT) + >>> wt.set_planet(planet) + >>> wt.move_and_reproduce(Entity(True, coords=(1, 1)), direction_orders=["N"]) + >>> wt.planet # doctest: +NORMALIZE_WHITESPACE + [[None, Entity(prey=True, coords=(0, 1), remaining_reproduction_time=4), None], + [None, None, None], + [None, None, None]] + >>> wt.planet[0][0] = Entity(True, coords=(0, 0)) + >>> wt.move_and_reproduce(Entity(True, coords=(0, 1)), + ... direction_orders=["N", "W", "E", "S"]) + >>> wt.planet # doctest: +NORMALIZE_WHITESPACE + [[Entity(prey=True, coords=(0, 0), remaining_reproduction_time=5), None, + Entity(prey=True, coords=(0, 2), remaining_reproduction_time=4)], + [None, None, None], + [None, None, None]] + >>> wt.planet[0][1] = wt.planet[0][2] + >>> wt.planet[0][2] = None + >>> wt.move_and_reproduce(Entity(True, coords=(0, 1)), + ... direction_orders=["N", "W", "S", "E"]) + >>> wt.planet # doctest: +NORMALIZE_WHITESPACE + [[Entity(prey=True, coords=(0, 0), remaining_reproduction_time=5), None, None], + [None, Entity(prey=True, coords=(1, 1), remaining_reproduction_time=4), None], + [None, None, None]] + + >>> wt = WaTor(WIDTH, HEIGHT) + >>> reproducable_entity = Entity(False, coords=(0, 1)) + >>> reproducable_entity.remaining_reproduction_time = 0 + >>> wt.planet = [[None, reproducable_entity]] + >>> wt.move_and_reproduce(reproducable_entity, + ... direction_orders=["N", "W", "S", "E"]) + >>> wt.planet # doctest: +NORMALIZE_WHITESPACE + [[Entity(prey=False, coords=(0, 0), + remaining_reproduction_time=20, energy_value=15), + Entity(prey=False, coords=(0, 1), remaining_reproduction_time=20, + energy_value=15)]] + """ + row, col = coords = entity.coords + + adjacent_squares: dict[Literal["N", "E", "S", "W"], tuple[int, int]] = { + "N": (row - 1, col), # North + "S": (row + 1, col), # South + "W": (row, col - 1), # West + "E": (row, col + 1), # East + } + # Weight adjacent locations + adjacent: list[tuple[int, int]] = [] + for order in direction_orders: + adjacent.append(adjacent_squares[order]) + + for r, c in adjacent: + if ( + 0 <= r < self.height + and 0 <= c < self.width + and self.planet[r][c] is None + ): + # Move entity to empty adjacent square + self.planet[r][c] = entity + self.planet[row][col] = None + entity.coords = (r, c) + break + + # (2.) See if it possible to reproduce in previous square + if coords != entity.coords and entity.remaining_reproduction_time <= 0: + # Check if the entities on the planet is less than the max limit + if len(self.get_entities()) < MAX_ENTITIES: + # Reproduce in previous square + self.planet[row][col] = Entity(prey=entity.prey, coords=coords) + entity.reset_reproduction_time() + else: + entity.remaining_reproduction_time -= 1 + + def perform_prey_actions( + self, entity: Entity, direction_orders: list[Literal["N", "E", "S", "W"]] + ) -> None: + """ + Performs the actions for a prey entity + + For prey the rules are: + 1. At each chronon, a prey moves randomly to one of the adjacent unoccupied + squares. If there are no free squares, no movement takes place. + 2. Once a prey has survived a certain number of chronons it may reproduce. + This is done as it moves to a neighbouring square, + leaving behind a new prey in its old position. + Its reproduction time is also reset to zero. + + >>> wt = WaTor(WIDTH, HEIGHT) + >>> reproducable_entity = Entity(True, coords=(0, 1)) + >>> reproducable_entity.remaining_reproduction_time = 0 + >>> wt.planet = [[None, reproducable_entity]] + >>> wt.perform_prey_actions(reproducable_entity, + ... direction_orders=["N", "W", "S", "E"]) + >>> wt.planet # doctest: +NORMALIZE_WHITESPACE + [[Entity(prey=True, coords=(0, 0), remaining_reproduction_time=5), + Entity(prey=True, coords=(0, 1), remaining_reproduction_time=5)]] + """ + self.move_and_reproduce(entity, direction_orders) + + def perform_predator_actions( + self, + entity: Entity, + occupied_by_prey_coords: tuple[int, int] | None, + direction_orders: list[Literal["N", "E", "S", "W"]], + ) -> None: + """ + Performs the actions for a predator entity + + :param occupied_by_prey_coords: Move to this location if there is prey there + + For predators the rules are: + 1. At each chronon, a predator moves randomly to an adjacent square occupied + by a prey. If there is none, the predator moves to a random adjacent + unoccupied square. If there are no free squares, no movement takes place. + 2. At each chronon, each predator is deprived of a unit of energy. + 3. Upon reaching zero energy, a predator dies. + 4. If a predator moves to a square occupied by a prey, + it eats the prey and earns a certain amount of energy. + 5. Once a predator has survived a certain number of chronons + it may reproduce in exactly the same way as the prey. + + >>> wt = WaTor(WIDTH, HEIGHT) + >>> wt.set_planet([[Entity(True, coords=(0, 0)), Entity(False, coords=(0, 1))]]) + >>> wt.perform_predator_actions(Entity(False, coords=(0, 1)), (0, 0), []) + >>> wt.planet # doctest: +NORMALIZE_WHITESPACE + [[Entity(prey=False, coords=(0, 0), + remaining_reproduction_time=20, energy_value=19), None]] + """ + assert entity.energy_value is not None # [type checking] + + # (3.) If the entity has 0 energy, it will die + if entity.energy_value == 0: + self.planet[entity.coords[0]][entity.coords[1]] = None + return + + # (1.) Move to entity if possible + if occupied_by_prey_coords is not None: + # Kill the prey + prey = self.planet[occupied_by_prey_coords[0]][occupied_by_prey_coords[1]] + assert prey is not None + prey.alive = False + + # Move onto prey + self.planet[occupied_by_prey_coords[0]][occupied_by_prey_coords[1]] = entity + self.planet[entity.coords[0]][entity.coords[1]] = None + + entity.coords = occupied_by_prey_coords + # (4.) Eats the prey and earns energy + entity.energy_value += PREDATOR_FOOD_VALUE + else: + # (5.) If it has survived the certain number of chronons it will also + # reproduce in this function + self.move_and_reproduce(entity, direction_orders) + + # (2.) Each chronon, the predator is deprived of a unit of energy + entity.energy_value -= 1 + + def run(self, *, iteration_count: int) -> None: + """ + Emulate time passing by looping iteration_count times + + >>> wt = WaTor(WIDTH, HEIGHT) + >>> wt.run(iteration_count=PREDATOR_INITIAL_ENERGY_VALUE - 1) + >>> len(list(filter(lambda entity: entity.prey is False, + ... wt.get_entities()))) >= PREDATOR_INITIAL_COUNT + True + """ + for iter_num in range(iteration_count): + # Generate list of all entities in order to randomly + # pop an entity at a time to simulate true randomness + # This removes the systematic approach of iterating + # through each entity width by height + all_entities = self.get_entities() + + for __ in range(len(all_entities)): + entity = all_entities.pop(randint(0, len(all_entities) - 1)) + if entity.alive is False: + continue + + directions: list[Literal["N", "E", "S", "W"]] = ["N", "E", "S", "W"] + shuffle(directions) # Randomly shuffle directions + + if entity.prey: + self.perform_prey_actions(entity, directions) + else: + # Create list of surrounding prey + surrounding_prey = self.get_surrounding_prey(entity) + surrounding_prey_coords = None + + if surrounding_prey: + # Again, randomly shuffle directions + shuffle(surrounding_prey) + surrounding_prey_coords = surrounding_prey[0].coords + + self.perform_predator_actions( + entity, surrounding_prey_coords, directions + ) + + # Balance out the predators and prey + self.balance_predators_and_prey() + + if self.time_passed is not None: + # Call time_passed function for Wa-Tor planet + # visualisation in a terminal or a graph. + self.time_passed(self, iter_num) + + +def visualise(wt: WaTor, iter_number: int, *, colour: bool = True) -> None: + """ + Visually displays the Wa-Tor planet using + an ascii code in terminal to clear and re-print + the Wa-Tor planet at intervals. + + Uses ascii colour codes to colourfully display + the predators and prey. + + (0x60f197) Prey = # + (0xfffff) Predator = x + + >>> wt = WaTor(30, 30) + >>> wt.set_planet([ + ... [Entity(True, coords=(0, 0)), Entity(False, coords=(0, 1)), None], + ... [Entity(False, coords=(1, 0)), None, Entity(False, coords=(1, 2))], + ... [None, Entity(True, coords=(2, 1)), None] + ... ]) + >>> visualise(wt, 0, colour=False) # doctest: +NORMALIZE_WHITESPACE + # x . + x . x + . # . + + Iteration: 0 | Prey count: 2 | Predator count: 3 | + """ + if colour: + __import__("os").system("") + print("\x1b[0;0H\x1b[2J\x1b[?25l") + + reprint = "\x1b[0;0H" if colour else "" + ansi_colour_end = "\x1b[0m " if colour else " " + + planet = wt.planet + output = "" + + # Iterate over every entity in the planet + for row in planet: + for entity in row: + if entity is None: + output += " . " + else: + if colour is True: + output += ( + "\x1b[38;2;96;241;151m" + if entity.prey + else "\x1b[38;2;255;255;15m" + ) + output += f" {'#' if entity.prey else 'x'}{ansi_colour_end}" + + output += "\n" + + entities = wt.get_entities() + prey_count = sum(entity.prey for entity in entities) + + print( + f"{output}\n Iteration: {iter_number} | Prey count: {prey_count} | " + f"Predator count: {len(entities) - prey_count} | {reprint}" + ) + # Block the thread to be able to visualise seeing the algorithm + sleep(0.05) + + +if __name__ == "__main__": + import doctest + + doctest.testmod() + + wt = WaTor(WIDTH, HEIGHT) + wt.time_passed = visualise + wt.run(iteration_count=100_000) From f24ab2c60dabb11c37667c5899c39713e84fc871 Mon Sep 17 00:00:00 2001 From: Amir Hosseini <19665344+itsamirhn@users.noreply.github.com> Date: Mon, 14 Aug 2023 09:07:41 +0330 Subject: [PATCH 0915/1543] Add: Two Regex match algorithm (Recursive & DP) (#6321) * Add recursive solution to regex_match.py * Add dp solution to regex_match.py * Add link to regex_match.py * Minor edit * Minor change * Minor change * Update dynamic_programming/regex_match.py Co-authored-by: Tianyi Zheng * Update dynamic_programming/regex_match.py Co-authored-by: Tianyi Zheng * Fix ruff formatting in if statements * Update dynamic_programming/regex_match.py Co-authored-by: Tianyi Zheng * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: Tianyi Zheng Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- dynamic_programming/regex_match.py | 97 ++++++++++++++++++++++++++++++ 1 file changed, 97 insertions(+) create mode 100644 dynamic_programming/regex_match.py diff --git a/dynamic_programming/regex_match.py b/dynamic_programming/regex_match.py new file mode 100644 index 000000000000..200a882831c0 --- /dev/null +++ b/dynamic_programming/regex_match.py @@ -0,0 +1,97 @@ +""" +Regex matching check if a text matches pattern or not. +Pattern: + '.' Matches any single character. + '*' Matches zero or more of the preceding element. +More info: + https://medium.com/trick-the-interviwer/regular-expression-matching-9972eb74c03 +""" + + +def recursive_match(text: str, pattern: str) -> bool: + """ + Recursive matching algorithm. + + Time complexity: O(2 ^ (|text| + |pattern|)) + Space complexity: Recursion depth is O(|text| + |pattern|). + + :param text: Text to match. + :param pattern: Pattern to match. + :return: True if text matches pattern, False otherwise. + + >>> recursive_match('abc', 'a.c') + True + >>> recursive_match('abc', 'af*.c') + True + >>> recursive_match('abc', 'a.c*') + True + >>> recursive_match('abc', 'a.c*d') + False + >>> recursive_match('aa', '.*') + True + """ + if not pattern: + return not text + + if not text: + return pattern[-1] == "*" and recursive_match(text, pattern[:-2]) + + if text[-1] == pattern[-1] or pattern[-1] == ".": + return recursive_match(text[:-1], pattern[:-1]) + + if pattern[-1] == "*": + return recursive_match(text[:-1], pattern) or recursive_match( + text, pattern[:-2] + ) + + return False + + +def dp_match(text: str, pattern: str) -> bool: + """ + Dynamic programming matching algorithm. + + Time complexity: O(|text| * |pattern|) + Space complexity: O(|text| * |pattern|) + + :param text: Text to match. + :param pattern: Pattern to match. + :return: True if text matches pattern, False otherwise. + + >>> dp_match('abc', 'a.c') + True + >>> dp_match('abc', 'af*.c') + True + >>> dp_match('abc', 'a.c*') + True + >>> dp_match('abc', 'a.c*d') + False + >>> dp_match('aa', '.*') + True + """ + m = len(text) + n = len(pattern) + dp = [[False for _ in range(n + 1)] for _ in range(m + 1)] + dp[0][0] = True + + for j in range(1, n + 1): + dp[0][j] = pattern[j - 1] == "*" and dp[0][j - 2] + + for i in range(1, m + 1): + for j in range(1, n + 1): + if pattern[j - 1] in {".", text[i - 1]}: + dp[i][j] = dp[i - 1][j - 1] + elif pattern[j - 1] == "*": + dp[i][j] = dp[i][j - 2] + if pattern[j - 2] in {".", text[i - 1]}: + dp[i][j] |= dp[i - 1][j] + else: + dp[i][j] = False + + return dp[m][n] + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From 02d89bde679488e97cdb077c511b3dbfb660e2b8 Mon Sep 17 00:00:00 2001 From: Ajinkya Chikhale <86607732+ajinkyac03@users.noreply.github.com> Date: Mon, 14 Aug 2023 12:42:42 +0530 Subject: [PATCH 0916/1543] Added implementation for Tribonacci sequence using dp (#6356) * Added implementation for Tribonacci sequence using dp * Updated parameter name * Apply suggestions from code review --------- Co-authored-by: Tianyi Zheng --- dynamic_programming/tribonacci.py | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) create mode 100644 dynamic_programming/tribonacci.py diff --git a/dynamic_programming/tribonacci.py b/dynamic_programming/tribonacci.py new file mode 100644 index 000000000000..58e15da918e2 --- /dev/null +++ b/dynamic_programming/tribonacci.py @@ -0,0 +1,24 @@ +# Tribonacci sequence using Dynamic Programming + + +def tribonacci(num: int) -> list[int]: + """ + Given a number, return first n Tribonacci Numbers. + >>> tribonacci(5) + [0, 0, 1, 1, 2] + >>> tribonacci(8) + [0, 0, 1, 1, 2, 4, 7, 13] + """ + dp = [0] * num + dp[2] = 1 + + for i in range(3, num): + dp[i] = dp[i - 1] + dp[i - 2] + dp[i - 3] + + return dp + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From c290dd6a433b43b242336d49d227f5e25bbb76de Mon Sep 17 00:00:00 2001 From: Adithya Awati Date: Mon, 14 Aug 2023 12:46:24 +0530 Subject: [PATCH 0917/1543] Update run.py in machine_learning/forecasting (#8957) * Fixed reading CSV file, added type check for data_safety_checker function * Formatted run.py * updating DIRECTORY.md --------- Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- DIRECTORY.md | 1 + machine_learning/forecasting/ex_data.csv | 2 +- machine_learning/forecasting/run.py | 35 ++++++++++++------------ 3 files changed, 19 insertions(+), 19 deletions(-) diff --git a/DIRECTORY.md b/DIRECTORY.md index cdcd1a8ae8cc..3a244ca6caaf 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -336,6 +336,7 @@ * [Minimum Tickets Cost](dynamic_programming/minimum_tickets_cost.py) * [Optimal Binary Search Tree](dynamic_programming/optimal_binary_search_tree.py) * [Palindrome Partitioning](dynamic_programming/palindrome_partitioning.py) + * [Regex Match](dynamic_programming/regex_match.py) * [Rod Cutting](dynamic_programming/rod_cutting.py) * [Subset Generation](dynamic_programming/subset_generation.py) * [Sum Of Subset](dynamic_programming/sum_of_subset.py) diff --git a/machine_learning/forecasting/ex_data.csv b/machine_learning/forecasting/ex_data.csv index 1c429e649755..e6e73c4a1ca4 100644 --- a/machine_learning/forecasting/ex_data.csv +++ b/machine_learning/forecasting/ex_data.csv @@ -1,4 +1,4 @@ -total_user,total_events,days +total_users,total_events,days 18231,0.0,1 22621,1.0,2 15675,0.0,3 diff --git a/machine_learning/forecasting/run.py b/machine_learning/forecasting/run.py index 0909b76d8907..88c4a537b302 100644 --- a/machine_learning/forecasting/run.py +++ b/machine_learning/forecasting/run.py @@ -1,6 +1,6 @@ """ this is code for forecasting -but i modified it and used it for safety checker of data +but I modified it and used it for safety checker of data for ex: you have an online shop and for some reason some data are missing (the amount of data that u expected are not supposed to be) then we can use it @@ -102,6 +102,10 @@ def data_safety_checker(list_vote: list, actual_result: float) -> bool: """ safe = 0 not_safe = 0 + + if not isinstance(actual_result, float): + raise TypeError("Actual result should be float. Value passed is a list") + for i in list_vote: if i > actual_result: safe = not_safe + 1 @@ -114,16 +118,11 @@ def data_safety_checker(list_vote: list, actual_result: float) -> bool: if __name__ == "__main__": - # data_input_df = pd.read_csv("ex_data.csv", header=None) - data_input = [[18231, 0.0, 1], [22621, 1.0, 2], [15675, 0.0, 3], [23583, 1.0, 4]] - data_input_df = pd.DataFrame( - data_input, columns=["total_user", "total_even", "days"] - ) - """ data column = total user in a day, how much online event held in one day, what day is that(sunday-saturday) """ + data_input_df = pd.read_csv("ex_data.csv") # start normalization normalize_df = Normalizer().fit_transform(data_input_df.values) @@ -138,23 +137,23 @@ def data_safety_checker(list_vote: list, actual_result: float) -> bool: x_test = x[len(x) - 1 :] # for linear regression & sarimax - trn_date = total_date[: len(total_date) - 1] - trn_user = total_user[: len(total_user) - 1] - trn_match = total_match[: len(total_match) - 1] + train_date = total_date[: len(total_date) - 1] + train_user = total_user[: len(total_user) - 1] + train_match = total_match[: len(total_match) - 1] - tst_date = total_date[len(total_date) - 1 :] - tst_user = total_user[len(total_user) - 1 :] - tst_match = total_match[len(total_match) - 1 :] + test_date = total_date[len(total_date) - 1 :] + test_user = total_user[len(total_user) - 1 :] + test_match = total_match[len(total_match) - 1 :] # voting system with forecasting res_vote = [ linear_regression_prediction( - trn_date, trn_user, trn_match, tst_date, tst_match + train_date, train_user, train_match, test_date, test_match ), - sarimax_predictor(trn_user, trn_match, tst_match), - support_vector_regressor(x_train, x_test, trn_user), + sarimax_predictor(train_user, train_match, test_match), + support_vector_regressor(x_train, x_test, train_user), ] # check the safety of today's data - not_str = "" if data_safety_checker(res_vote, tst_user) else "not " - print("Today's data is {not_str}safe.") + not_str = "" if data_safety_checker(res_vote, test_user[0]) else "not " + print(f"Today's data is {not_str}safe.") From 4b7ecb6a8134379481dd3d5035cb99a627930462 Mon Sep 17 00:00:00 2001 From: Caeden Perelli-Harris Date: Mon, 14 Aug 2023 09:28:52 +0100 Subject: [PATCH 0918/1543] Create is valid email address algorithm (#8907) * feat(strings): Create is valid email address * updating DIRECTORY.md * feat(strings): Create is_valid_email_address algorithm * chore(is_valid_email_address): Implement changes from code review * Update strings/is_valid_email_address.py Co-authored-by: Tianyi Zheng * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * chore(is_valid_email_address): Fix ruff error * Update strings/is_valid_email_address.py Co-authored-by: Tianyi Zheng --------- Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> Co-authored-by: Tianyi Zheng Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- DIRECTORY.md | 1 + strings/is_valid_email_address.py | 117 ++++++++++++++++++++++++++++++ 2 files changed, 118 insertions(+) create mode 100644 strings/is_valid_email_address.py diff --git a/DIRECTORY.md b/DIRECTORY.md index 3a244ca6caaf..14152e4abd04 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -1171,6 +1171,7 @@ * [Is Pangram](strings/is_pangram.py) * [Is Spain National Id](strings/is_spain_national_id.py) * [Is Srilankan Phone Number](strings/is_srilankan_phone_number.py) + * [Is Valid Email Address](strings/is_valid_email_address.py) * [Jaro Winkler](strings/jaro_winkler.py) * [Join](strings/join.py) * [Knuth Morris Pratt](strings/knuth_morris_pratt.py) diff --git a/strings/is_valid_email_address.py b/strings/is_valid_email_address.py new file mode 100644 index 000000000000..205394f81297 --- /dev/null +++ b/strings/is_valid_email_address.py @@ -0,0 +1,117 @@ +""" +Implements an is valid email address algorithm + +@ https://en.wikipedia.org/wiki/Email_address +""" + +import string + +email_tests: tuple[tuple[str, bool], ...] = ( + ("simple@example.com", True), + ("very.common@example.com", True), + ("disposable.style.email.with+symbol@example.com", True), + ("other-email-with-hyphen@and.subdomains.example.com", True), + ("fully-qualified-domain@example.com", True), + ("user.name+tag+sorting@example.com", True), + ("x@example.com", True), + ("example-indeed@strange-example.com", True), + ("test/test@test.com", True), + ( + "123456789012345678901234567890123456789012345678901234567890123@example.com", + True, + ), + ("admin@mailserver1", True), + ("example@s.example", True), + ("Abc.example.com", False), + ("A@b@c@example.com", False), + ("abc@example..com", False), + ("a(c)d,e:f;gi[j\\k]l@example.com", False), + ( + "12345678901234567890123456789012345678901234567890123456789012345@example.com", + False, + ), + ("i.like.underscores@but_its_not_allowed_in_this_part", False), + ("", False), +) + +# The maximum octets (one character as a standard unicode character is one byte) +# that the local part and the domain part can have +MAX_LOCAL_PART_OCTETS = 64 +MAX_DOMAIN_OCTETS = 255 + + +def is_valid_email_address(email: str) -> bool: + """ + Returns True if the passed email address is valid. + + The local part of the email precedes the singular @ symbol and + is associated with a display-name. For example, "john.smith" + The domain is stricter than the local part and follows the @ symbol. + + Global email checks: + 1. There can only be one @ symbol in the email address. Technically if the + @ symbol is quoted in the local-part, then it is valid, however this + implementation ignores "" for now. + (See https://en.wikipedia.org/wiki/Email_address#:~:text=If%20quoted,) + 2. The local-part and the domain are limited to a certain number of octets. With + unicode storing a single character in one byte, each octet is equivalent to + a character. Hence, we can just check the length of the string. + Checks for the local-part: + 3. The local-part may contain: upper and lowercase latin letters, digits 0 to 9, + and printable characters (!#$%&'*+-/=?^_`{|}~) + 4. The local-part may also contain a "." in any place that is not the first or + last character, and may not have more than one "." consecutively. + + Checks for the domain: + 5. The domain may contain: upper and lowercase latin letters and digits 0 to 9 + 6. Hyphen "-", provided that it is not the first or last character + 7. The domain may also contain a "." in any place that is not the first or + last character, and may not have more than one "." consecutively. + + >>> for email, valid in email_tests: + ... assert is_valid_email_address(email) == valid + """ + + # (1.) Make sure that there is only one @ symbol in the email address + if email.count("@") != 1: + return False + + local_part, domain = email.split("@") + # (2.) Check octet length of the local part and domain + if len(local_part) > MAX_LOCAL_PART_OCTETS or len(domain) > MAX_DOMAIN_OCTETS: + return False + + # (3.) Validate the characters in the local-part + if any( + char not in string.ascii_letters + string.digits + ".(!#$%&'*+-/=?^_`{|}~)" + for char in local_part + ): + return False + + # (4.) Validate the placement of "." characters in the local-part + if local_part.startswith(".") or local_part.endswith(".") or ".." in local_part: + return False + + # (5.) Validate the characters in the domain + if any(char not in string.ascii_letters + string.digits + ".-" for char in domain): + return False + + # (6.) Validate the placement of "-" characters + if domain.startswith("-") or domain.endswith("."): + return False + + # (7.) Validate the placement of "." characters + if domain.startswith(".") or domain.endswith(".") or ".." in domain: + return False + return True + + +if __name__ == "__main__": + import doctest + + doctest.testmod() + + for email, valid in email_tests: + is_valid = is_valid_email_address(email) + assert is_valid == valid, f"{email} is {is_valid}" + print(f"Email address {email} is {'not ' if not is_valid else ''}valid") From ac68dc1128535b6798af256fcdab67340f6c0fd9 Mon Sep 17 00:00:00 2001 From: Adithya Awati <1ds21ai001@dsce.edu.in> Date: Mon, 14 Aug 2023 14:04:16 +0530 Subject: [PATCH 0919/1543] Fixed Pytest warnings for machine_learning/forecasting (#8958) * updating DIRECTORY.md * Fixed pyTest Warnings --------- Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- DIRECTORY.md | 1 + machine_learning/forecasting/run.py | 6 +++++- 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/DIRECTORY.md b/DIRECTORY.md index 14152e4abd04..384ce1b2209d 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -340,6 +340,7 @@ * [Rod Cutting](dynamic_programming/rod_cutting.py) * [Subset Generation](dynamic_programming/subset_generation.py) * [Sum Of Subset](dynamic_programming/sum_of_subset.py) + * [Tribonacci](dynamic_programming/tribonacci.py) * [Viterbi](dynamic_programming/viterbi.py) * [Word Break](dynamic_programming/word_break.py) diff --git a/machine_learning/forecasting/run.py b/machine_learning/forecasting/run.py index 88c4a537b302..64e719daacc2 100644 --- a/machine_learning/forecasting/run.py +++ b/machine_learning/forecasting/run.py @@ -11,6 +11,8 @@ u can just adjust it for ur own purpose """ +from warnings import simplefilter + import numpy as np import pandas as pd from sklearn.preprocessing import Normalizer @@ -45,8 +47,10 @@ def sarimax_predictor(train_user: list, train_match: list, test_match: list) -> >>> sarimax_predictor([4,2,6,8], [3,1,2,4], [2]) 6.6666671111109626 """ + # Suppress the User Warning raised by SARIMAX due to insufficient observations + simplefilter("ignore", UserWarning) order = (1, 2, 1) - seasonal_order = (1, 1, 0, 7) + seasonal_order = (1, 1, 1, 7) model = SARIMAX( train_user, exog=train_match, order=order, seasonal_order=seasonal_order ) From 2ab3bf2689d21e7375539c79ecee358e9d7c3359 Mon Sep 17 00:00:00 2001 From: robertjcalistri <85811008+robertjcalistri@users.noreply.github.com> Date: Mon, 14 Aug 2023 05:31:53 -0400 Subject: [PATCH 0920/1543] =?UTF-8?q?Added=20functions=20to=20calculate=20?= =?UTF-8?q?temperature=20of=20an=20ideal=20gas=20and=20number=20o=E2=80=A6?= =?UTF-8?q?=20(#8919)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Added functions to calculate temperature of an ideal gas and number of moles of an ideal gas * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update physics/ideal_gas_law.py Renamed function name Co-authored-by: Tianyi Zheng * Update physics/ideal_gas_law.py Updated formatting Co-authored-by: Tianyi Zheng * Update physics/ideal_gas_law.py Removed unnecessary parentheses Co-authored-by: Tianyi Zheng * Update physics/ideal_gas_law.py Removed unnecessary parentheses Co-authored-by: Tianyi Zheng * Update ideal_gas_law.py Updated incorrect function calls moles of gas system doctests * Update physics/ideal_gas_law.py --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Tianyi Zheng --- physics/ideal_gas_law.py | 34 ++++++++++++++++++++++++++++++++++ 1 file changed, 34 insertions(+) diff --git a/physics/ideal_gas_law.py b/physics/ideal_gas_law.py index 805da47b0079..09b4fb3a9c14 100644 --- a/physics/ideal_gas_law.py +++ b/physics/ideal_gas_law.py @@ -53,6 +53,40 @@ def volume_of_gas_system(moles: float, kelvin: float, pressure: float) -> float: return moles * kelvin * UNIVERSAL_GAS_CONSTANT / pressure +def temperature_of_gas_system(moles: float, volume: float, pressure: float) -> float: + """ + >>> temperature_of_gas_system(2, 100, 5) + 30.068090996146232 + >>> temperature_of_gas_system(11, 5009, 1000) + 54767.66101807144 + >>> temperature_of_gas_system(3, -0.46, 23.5) + Traceback (most recent call last): + ... + ValueError: Invalid inputs. Enter positive value. + """ + if moles < 0 or volume < 0 or pressure < 0: + raise ValueError("Invalid inputs. Enter positive value.") + + return pressure * volume / (moles * UNIVERSAL_GAS_CONSTANT) + + +def moles_of_gas_system(kelvin: float, volume: float, pressure: float) -> float: + """ + >>> moles_of_gas_system(100, 5, 10) + 0.06013618199229246 + >>> moles_of_gas_system(110, 5009, 1000) + 5476.766101807144 + >>> moles_of_gas_system(3, -0.46, 23.5) + Traceback (most recent call last): + ... + ValueError: Invalid inputs. Enter positive value. + """ + if kelvin < 0 or volume < 0 or pressure < 0: + raise ValueError("Invalid inputs. Enter positive value.") + + return pressure * volume / (kelvin * UNIVERSAL_GAS_CONSTANT) + + if __name__ == "__main__": from doctest import testmod From fb1b939a89fb08370297cbb455846f61f66847bc Mon Sep 17 00:00:00 2001 From: Caeden Perelli-Harris Date: Mon, 14 Aug 2023 12:17:27 +0100 Subject: [PATCH 0921/1543] Consolidate find_min and find_min recursive and find_max and find_max_recursive (#8960) * updating DIRECTORY.md * refactor(min-max): Consolidate implementations * updating DIRECTORY.md * refactor(min-max): Append _iterative to func name --------- Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- DIRECTORY.md | 2 -- maths/find_max.py | 65 +++++++++++++++++++++++++++++++++---- maths/find_max_recursion.py | 58 --------------------------------- maths/find_min.py | 65 +++++++++++++++++++++++++++++++++---- maths/find_min_recursion.py | 58 --------------------------------- 5 files changed, 118 insertions(+), 130 deletions(-) delete mode 100644 maths/find_max_recursion.py delete mode 100644 maths/find_min_recursion.py diff --git a/DIRECTORY.md b/DIRECTORY.md index 384ce1b2209d..be5fa3584a58 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -573,9 +573,7 @@ * [Fermat Little Theorem](maths/fermat_little_theorem.py) * [Fibonacci](maths/fibonacci.py) * [Find Max](maths/find_max.py) - * [Find Max Recursion](maths/find_max_recursion.py) * [Find Min](maths/find_min.py) - * [Find Min Recursion](maths/find_min_recursion.py) * [Floor](maths/floor.py) * [Gamma](maths/gamma.py) * [Gamma Recursive](maths/gamma_recursive.py) diff --git a/maths/find_max.py b/maths/find_max.py index 684fbe8161e8..729a80ab421c 100644 --- a/maths/find_max.py +++ b/maths/find_max.py @@ -1,23 +1,23 @@ from __future__ import annotations -def find_max(nums: list[int | float]) -> int | float: +def find_max_iterative(nums: list[int | float]) -> int | float: """ >>> for nums in ([3, 2, 1], [-3, -2, -1], [3, -3, 0], [3.0, 3.1, 2.9]): - ... find_max(nums) == max(nums) + ... find_max_iterative(nums) == max(nums) True True True True - >>> find_max([2, 4, 9, 7, 19, 94, 5]) + >>> find_max_iterative([2, 4, 9, 7, 19, 94, 5]) 94 - >>> find_max([]) + >>> find_max_iterative([]) Traceback (most recent call last): ... - ValueError: find_max() arg is an empty sequence + ValueError: find_max_iterative() arg is an empty sequence """ if len(nums) == 0: - raise ValueError("find_max() arg is an empty sequence") + raise ValueError("find_max_iterative() arg is an empty sequence") max_num = nums[0] for x in nums: if x > max_num: @@ -25,6 +25,59 @@ def find_max(nums: list[int | float]) -> int | float: return max_num +# Divide and Conquer algorithm +def find_max_recursive(nums: list[int | float], left: int, right: int) -> int | float: + """ + find max value in list + :param nums: contains elements + :param left: index of first element + :param right: index of last element + :return: max in nums + + >>> for nums in ([3, 2, 1], [-3, -2, -1], [3, -3, 0], [3.0, 3.1, 2.9]): + ... find_max_recursive(nums, 0, len(nums) - 1) == max(nums) + True + True + True + True + >>> nums = [1, 3, 5, 7, 9, 2, 4, 6, 8, 10] + >>> find_max_recursive(nums, 0, len(nums) - 1) == max(nums) + True + >>> find_max_recursive([], 0, 0) + Traceback (most recent call last): + ... + ValueError: find_max_recursive() arg is an empty sequence + >>> find_max_recursive(nums, 0, len(nums)) == max(nums) + Traceback (most recent call last): + ... + IndexError: list index out of range + >>> find_max_recursive(nums, -len(nums), -1) == max(nums) + True + >>> find_max_recursive(nums, -len(nums) - 1, -1) == max(nums) + Traceback (most recent call last): + ... + IndexError: list index out of range + """ + if len(nums) == 0: + raise ValueError("find_max_recursive() arg is an empty sequence") + if ( + left >= len(nums) + or left < -len(nums) + or right >= len(nums) + or right < -len(nums) + ): + raise IndexError("list index out of range") + if left == right: + return nums[left] + mid = (left + right) >> 1 # the middle + left_max = find_max_recursive(nums, left, mid) # find max in range[left, mid] + right_max = find_max_recursive( + nums, mid + 1, right + ) # find max in range[mid + 1, right] + + return left_max if left_max >= right_max else right_max + + if __name__ == "__main__": import doctest diff --git a/maths/find_max_recursion.py b/maths/find_max_recursion.py deleted file mode 100644 index 629932e0818f..000000000000 --- a/maths/find_max_recursion.py +++ /dev/null @@ -1,58 +0,0 @@ -from __future__ import annotations - - -# Divide and Conquer algorithm -def find_max(nums: list[int | float], left: int, right: int) -> int | float: - """ - find max value in list - :param nums: contains elements - :param left: index of first element - :param right: index of last element - :return: max in nums - - >>> for nums in ([3, 2, 1], [-3, -2, -1], [3, -3, 0], [3.0, 3.1, 2.9]): - ... find_max(nums, 0, len(nums) - 1) == max(nums) - True - True - True - True - >>> nums = [1, 3, 5, 7, 9, 2, 4, 6, 8, 10] - >>> find_max(nums, 0, len(nums) - 1) == max(nums) - True - >>> find_max([], 0, 0) - Traceback (most recent call last): - ... - ValueError: find_max() arg is an empty sequence - >>> find_max(nums, 0, len(nums)) == max(nums) - Traceback (most recent call last): - ... - IndexError: list index out of range - >>> find_max(nums, -len(nums), -1) == max(nums) - True - >>> find_max(nums, -len(nums) - 1, -1) == max(nums) - Traceback (most recent call last): - ... - IndexError: list index out of range - """ - if len(nums) == 0: - raise ValueError("find_max() arg is an empty sequence") - if ( - left >= len(nums) - or left < -len(nums) - or right >= len(nums) - or right < -len(nums) - ): - raise IndexError("list index out of range") - if left == right: - return nums[left] - mid = (left + right) >> 1 # the middle - left_max = find_max(nums, left, mid) # find max in range[left, mid] - right_max = find_max(nums, mid + 1, right) # find max in range[mid + 1, right] - - return left_max if left_max >= right_max else right_max - - -if __name__ == "__main__": - import doctest - - doctest.testmod(verbose=True) diff --git a/maths/find_min.py b/maths/find_min.py index 2eac087c6388..762562e36ef9 100644 --- a/maths/find_min.py +++ b/maths/find_min.py @@ -1,33 +1,86 @@ from __future__ import annotations -def find_min(nums: list[int | float]) -> int | float: +def find_min_iterative(nums: list[int | float]) -> int | float: """ Find Minimum Number in a List :param nums: contains elements :return: min number in list >>> for nums in ([3, 2, 1], [-3, -2, -1], [3, -3, 0], [3.0, 3.1, 2.9]): - ... find_min(nums) == min(nums) + ... find_min_iterative(nums) == min(nums) True True True True - >>> find_min([0, 1, 2, 3, 4, 5, -3, 24, -56]) + >>> find_min_iterative([0, 1, 2, 3, 4, 5, -3, 24, -56]) -56 - >>> find_min([]) + >>> find_min_iterative([]) Traceback (most recent call last): ... - ValueError: find_min() arg is an empty sequence + ValueError: find_min_iterative() arg is an empty sequence """ if len(nums) == 0: - raise ValueError("find_min() arg is an empty sequence") + raise ValueError("find_min_iterative() arg is an empty sequence") min_num = nums[0] for num in nums: min_num = min(min_num, num) return min_num +# Divide and Conquer algorithm +def find_min_recursive(nums: list[int | float], left: int, right: int) -> int | float: + """ + find min value in list + :param nums: contains elements + :param left: index of first element + :param right: index of last element + :return: min in nums + + >>> for nums in ([3, 2, 1], [-3, -2, -1], [3, -3, 0], [3.0, 3.1, 2.9]): + ... find_min_recursive(nums, 0, len(nums) - 1) == min(nums) + True + True + True + True + >>> nums = [1, 3, 5, 7, 9, 2, 4, 6, 8, 10] + >>> find_min_recursive(nums, 0, len(nums) - 1) == min(nums) + True + >>> find_min_recursive([], 0, 0) + Traceback (most recent call last): + ... + ValueError: find_min_recursive() arg is an empty sequence + >>> find_min_recursive(nums, 0, len(nums)) == min(nums) + Traceback (most recent call last): + ... + IndexError: list index out of range + >>> find_min_recursive(nums, -len(nums), -1) == min(nums) + True + >>> find_min_recursive(nums, -len(nums) - 1, -1) == min(nums) + Traceback (most recent call last): + ... + IndexError: list index out of range + """ + if len(nums) == 0: + raise ValueError("find_min_recursive() arg is an empty sequence") + if ( + left >= len(nums) + or left < -len(nums) + or right >= len(nums) + or right < -len(nums) + ): + raise IndexError("list index out of range") + if left == right: + return nums[left] + mid = (left + right) >> 1 # the middle + left_min = find_min_recursive(nums, left, mid) # find min in range[left, mid] + right_min = find_min_recursive( + nums, mid + 1, right + ) # find min in range[mid + 1, right] + + return left_min if left_min <= right_min else right_min + + if __name__ == "__main__": import doctest diff --git a/maths/find_min_recursion.py b/maths/find_min_recursion.py deleted file mode 100644 index 4d11015efcd5..000000000000 --- a/maths/find_min_recursion.py +++ /dev/null @@ -1,58 +0,0 @@ -from __future__ import annotations - - -# Divide and Conquer algorithm -def find_min(nums: list[int | float], left: int, right: int) -> int | float: - """ - find min value in list - :param nums: contains elements - :param left: index of first element - :param right: index of last element - :return: min in nums - - >>> for nums in ([3, 2, 1], [-3, -2, -1], [3, -3, 0], [3.0, 3.1, 2.9]): - ... find_min(nums, 0, len(nums) - 1) == min(nums) - True - True - True - True - >>> nums = [1, 3, 5, 7, 9, 2, 4, 6, 8, 10] - >>> find_min(nums, 0, len(nums) - 1) == min(nums) - True - >>> find_min([], 0, 0) - Traceback (most recent call last): - ... - ValueError: find_min() arg is an empty sequence - >>> find_min(nums, 0, len(nums)) == min(nums) - Traceback (most recent call last): - ... - IndexError: list index out of range - >>> find_min(nums, -len(nums), -1) == min(nums) - True - >>> find_min(nums, -len(nums) - 1, -1) == min(nums) - Traceback (most recent call last): - ... - IndexError: list index out of range - """ - if len(nums) == 0: - raise ValueError("find_min() arg is an empty sequence") - if ( - left >= len(nums) - or left < -len(nums) - or right >= len(nums) - or right < -len(nums) - ): - raise IndexError("list index out of range") - if left == right: - return nums[left] - mid = (left + right) >> 1 # the middle - left_min = find_min(nums, left, mid) # find min in range[left, mid] - right_min = find_min(nums, mid + 1, right) # find min in range[mid + 1, right] - - return left_min if left_min <= right_min else right_min - - -if __name__ == "__main__": - import doctest - - doctest.testmod(verbose=True) From 7021afda047b034958bfdb67e8479af2e8c7aeb9 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 14 Aug 2023 23:12:11 -0400 Subject: [PATCH 0922/1543] [pre-commit.ci] pre-commit autoupdate (#8963) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - [github.com/astral-sh/ruff-pre-commit: v0.0.282 → v0.0.284](https://github.com/astral-sh/ruff-pre-commit/compare/v0.0.282...v0.0.284) - [github.com/tox-dev/pyproject-fmt: 0.13.0 → 0.13.1](https://github.com/tox-dev/pyproject-fmt/compare/0.13.0...0.13.1) - [github.com/pre-commit/mirrors-mypy: v1.4.1 → v1.5.0](https://github.com/pre-commit/mirrors-mypy/compare/v1.4.1...v1.5.0) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index da6762123b04..b08139561639 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -16,7 +16,7 @@ repos: - id: auto-walrus - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.0.282 + rev: v0.0.284 hooks: - id: ruff @@ -33,7 +33,7 @@ repos: - tomli - repo: https://github.com/tox-dev/pyproject-fmt - rev: "0.13.0" + rev: "0.13.1" hooks: - id: pyproject-fmt @@ -51,7 +51,7 @@ repos: - id: validate-pyproject - repo: https://github.com/pre-commit/mirrors-mypy - rev: v1.4.1 + rev: v1.5.0 hooks: - id: mypy args: From 7618a92fee002475b3bed9227944972d346db440 Mon Sep 17 00:00:00 2001 From: Erfan Alimohammadi Date: Wed, 16 Aug 2023 00:07:49 +0330 Subject: [PATCH 0923/1543] Remove a slash in path to save the file correctly on Linux (#8053) --- computer_vision/flip_augmentation.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/computer_vision/flip_augmentation.py b/computer_vision/flip_augmentation.py index 93b4e3f6da79..77a8cbd7b14f 100644 --- a/computer_vision/flip_augmentation.py +++ b/computer_vision/flip_augmentation.py @@ -32,13 +32,13 @@ def main() -> None: letter_code = random_chars(32) file_name = paths[index].split(os.sep)[-1].rsplit(".", 1)[0] file_root = f"{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}" - cv2.imwrite(f"/{file_root}.jpg", image, [cv2.IMWRITE_JPEG_QUALITY, 85]) + cv2.imwrite(f"{file_root}.jpg", image, [cv2.IMWRITE_JPEG_QUALITY, 85]) print(f"Success {index+1}/{len(new_images)} with {file_name}") annos_list = [] for anno in new_annos[index]: obj = f"{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}" annos_list.append(obj) - with open(f"/{file_root}.txt", "w") as outfile: + with open(f"{file_root}.txt", "w") as outfile: outfile.write("\n".join(line for line in annos_list)) From 490e645ed3b7ae50f0d7e23e047d088ba069ed56 Mon Sep 17 00:00:00 2001 From: Caeden Perelli-Harris Date: Tue, 15 Aug 2023 22:27:41 +0100 Subject: [PATCH 0924/1543] Fix minor typing errors in maths/ (#8959) * updating DIRECTORY.md * types(maths): Fix pylance issues in maths * reset(vsc): Reset settings changes * Update maths/jaccard_similarity.py Co-authored-by: Tianyi Zheng * revert(erosion_operation): Revert erosion_operation * test(jaccard_similarity): Add doctest to test alternative_union * types(newton_raphson): Add typehints to func bodies --------- Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> Co-authored-by: Tianyi Zheng --- .../erosion_operation.py | 1 + digital_image_processing/rotation/rotation.py | 4 +- maths/average_median.py | 4 +- maths/euler_modified.py | 2 +- maths/gaussian_error_linear_unit.py | 4 +- maths/jaccard_similarity.py | 45 ++++++++++++------- maths/newton_raphson.py | 33 +++++++++----- maths/qr_decomposition.py | 2 +- maths/sigmoid.py | 2 +- maths/tanh.py | 4 +- 10 files changed, 65 insertions(+), 36 deletions(-) diff --git a/digital_image_processing/morphological_operations/erosion_operation.py b/digital_image_processing/morphological_operations/erosion_operation.py index c2cde2ea6990..c0e1ef847237 100644 --- a/digital_image_processing/morphological_operations/erosion_operation.py +++ b/digital_image_processing/morphological_operations/erosion_operation.py @@ -21,6 +21,7 @@ def rgb2gray(rgb: np.array) -> np.array: def gray2binary(gray: np.array) -> np.array: """ Return binary image from gray image + >>> gray2binary(np.array([[127, 255, 0]])) array([[False, True, False]]) >>> gray2binary(np.array([[0]])) diff --git a/digital_image_processing/rotation/rotation.py b/digital_image_processing/rotation/rotation.py index 958d16fafb91..0f5e36ddd5be 100644 --- a/digital_image_processing/rotation/rotation.py +++ b/digital_image_processing/rotation/rotation.py @@ -10,12 +10,12 @@ def get_rotation( ) -> np.ndarray: """ Get image rotation - :param img: np.array + :param img: np.ndarray :param pt1: 3x2 list :param pt2: 3x2 list :param rows: columns image shape :param cols: rows image shape - :return: np.array + :return: np.ndarray """ matrix = cv2.getAffineTransform(pt1, pt2) return cv2.warpAffine(img, matrix, (rows, cols)) diff --git a/maths/average_median.py b/maths/average_median.py index cd1ec1574893..f24e525736b3 100644 --- a/maths/average_median.py +++ b/maths/average_median.py @@ -19,7 +19,9 @@ def median(nums: list) -> int | float: Returns: Median. """ - sorted_list = sorted(nums) + # The sorted function returns list[SupportsRichComparisonT@sorted] + # which does not support `+` + sorted_list: list[int] = sorted(nums) length = len(sorted_list) mid_index = length >> 1 return ( diff --git a/maths/euler_modified.py b/maths/euler_modified.py index 14bddadf4c53..d02123e1e2fb 100644 --- a/maths/euler_modified.py +++ b/maths/euler_modified.py @@ -5,7 +5,7 @@ def euler_modified( ode_func: Callable, y0: float, x0: float, step_size: float, x_end: float -) -> np.array: +) -> np.ndarray: """ Calculate solution at each step to an ODE using Euler's Modified Method The Euler Method is straightforward to implement, but can't give accurate solutions. diff --git a/maths/gaussian_error_linear_unit.py b/maths/gaussian_error_linear_unit.py index 7b5f875143b9..18384bb6c864 100644 --- a/maths/gaussian_error_linear_unit.py +++ b/maths/gaussian_error_linear_unit.py @@ -13,7 +13,7 @@ import numpy as np -def sigmoid(vector: np.array) -> np.array: +def sigmoid(vector: np.ndarray) -> np.ndarray: """ Mathematical function sigmoid takes a vector x of K real numbers as input and returns 1/ (1 + e^-x). @@ -25,7 +25,7 @@ def sigmoid(vector: np.array) -> np.array: return 1 / (1 + np.exp(-vector)) -def gaussian_error_linear_unit(vector: np.array) -> np.array: +def gaussian_error_linear_unit(vector: np.ndarray) -> np.ndarray: """ Implements the Gaussian Error Linear Unit (GELU) function diff --git a/maths/jaccard_similarity.py b/maths/jaccard_similarity.py index 32054414c0c2..6b6243458fa8 100644 --- a/maths/jaccard_similarity.py +++ b/maths/jaccard_similarity.py @@ -14,7 +14,11 @@ """ -def jaccard_similarity(set_a, set_b, alternative_union=False): +def jaccard_similarity( + set_a: set[str] | list[str] | tuple[str], + set_b: set[str] | list[str] | tuple[str], + alternative_union=False, +): """ Finds the jaccard similarity between two sets. Essentially, its intersection over union. @@ -37,41 +41,52 @@ def jaccard_similarity(set_a, set_b, alternative_union=False): >>> set_b = {'c', 'd', 'e', 'f', 'h', 'i'} >>> jaccard_similarity(set_a, set_b) 0.375 - >>> jaccard_similarity(set_a, set_a) 1.0 - >>> jaccard_similarity(set_a, set_a, True) 0.5 - >>> set_a = ['a', 'b', 'c', 'd', 'e'] >>> set_b = ('c', 'd', 'e', 'f', 'h', 'i') >>> jaccard_similarity(set_a, set_b) 0.375 + >>> set_a = ('c', 'd', 'e', 'f', 'h', 'i') + >>> set_b = ['a', 'b', 'c', 'd', 'e'] + >>> jaccard_similarity(set_a, set_b) + 0.375 + >>> set_a = ('c', 'd', 'e', 'f', 'h', 'i') + >>> set_b = ['a', 'b', 'c', 'd'] + >>> jaccard_similarity(set_a, set_b, True) + 0.2 + >>> set_a = {'a', 'b'} + >>> set_b = ['c', 'd'] + >>> jaccard_similarity(set_a, set_b) + Traceback (most recent call last): + ... + ValueError: Set a and b must either both be sets or be either a list or a tuple. """ if isinstance(set_a, set) and isinstance(set_b, set): - intersection = len(set_a.intersection(set_b)) + intersection_length = len(set_a.intersection(set_b)) if alternative_union: - union = len(set_a) + len(set_b) + union_length = len(set_a) + len(set_b) else: - union = len(set_a.union(set_b)) + union_length = len(set_a.union(set_b)) - return intersection / union + return intersection_length / union_length - if isinstance(set_a, (list, tuple)) and isinstance(set_b, (list, tuple)): + elif isinstance(set_a, (list, tuple)) and isinstance(set_b, (list, tuple)): intersection = [element for element in set_a if element in set_b] if alternative_union: - union = len(set_a) + len(set_b) - return len(intersection) / union + return len(intersection) / (len(set_a) + len(set_b)) else: - union = set_a + [element for element in set_b if element not in set_a] + # Cast set_a to list because tuples cannot be mutated + union = list(set_a) + [element for element in set_b if element not in set_a] return len(intersection) / len(union) - - return len(intersection) / len(union) - return None + raise ValueError( + "Set a and b must either both be sets or be either a list or a tuple." + ) if __name__ == "__main__": diff --git a/maths/newton_raphson.py b/maths/newton_raphson.py index 2c9cd1de95b0..f6b227b5c9c1 100644 --- a/maths/newton_raphson.py +++ b/maths/newton_raphson.py @@ -1,16 +1,20 @@ """ - Author: P Shreyas Shetty - Implementation of Newton-Raphson method for solving equations of kind - f(x) = 0. It is an iterative method where solution is found by the expression - x[n+1] = x[n] + f(x[n])/f'(x[n]) - If no solution exists, then either the solution will not be found when iteration - limit is reached or the gradient f'(x[n]) approaches zero. In both cases, exception - is raised. If iteration limit is reached, try increasing maxiter. - """ +Author: P Shreyas Shetty +Implementation of Newton-Raphson method for solving equations of kind +f(x) = 0. It is an iterative method where solution is found by the expression + x[n+1] = x[n] + f(x[n])/f'(x[n]) +If no solution exists, then either the solution will not be found when iteration +limit is reached or the gradient f'(x[n]) approaches zero. In both cases, exception +is raised. If iteration limit is reached, try increasing maxiter. +""" + import math as m +from collections.abc import Callable + +DerivativeFunc = Callable[[float], float] -def calc_derivative(f, a, h=0.001): +def calc_derivative(f: DerivativeFunc, a: float, h: float = 0.001) -> float: """ Calculates derivative at point a for function f using finite difference method @@ -18,7 +22,14 @@ def calc_derivative(f, a, h=0.001): return (f(a + h) - f(a - h)) / (2 * h) -def newton_raphson(f, x0=0, maxiter=100, step=0.0001, maxerror=1e-6, logsteps=False): +def newton_raphson( + f: DerivativeFunc, + x0: float = 0, + maxiter: int = 100, + step: float = 0.0001, + maxerror: float = 1e-6, + logsteps: bool = False, +) -> tuple[float, float, list[float]]: a = x0 # set the initial guess steps = [a] error = abs(f(a)) @@ -36,7 +47,7 @@ def newton_raphson(f, x0=0, maxiter=100, step=0.0001, maxerror=1e-6, logsteps=Fa if logsteps: # If logstep is true, then log intermediate steps return a, error, steps - return a, error + return a, error, [] if __name__ == "__main__": diff --git a/maths/qr_decomposition.py b/maths/qr_decomposition.py index a8414fbece87..670b49206aa7 100644 --- a/maths/qr_decomposition.py +++ b/maths/qr_decomposition.py @@ -1,7 +1,7 @@ import numpy as np -def qr_householder(a): +def qr_householder(a: np.ndarray): """Return a QR-decomposition of the matrix A using Householder reflection. The QR-decomposition decomposes the matrix A of shape (m, n) into an diff --git a/maths/sigmoid.py b/maths/sigmoid.py index 147588e8871f..cb45bde2702c 100644 --- a/maths/sigmoid.py +++ b/maths/sigmoid.py @@ -11,7 +11,7 @@ import numpy as np -def sigmoid(vector: np.array) -> np.array: +def sigmoid(vector: np.ndarray) -> np.ndarray: """ Implements the sigmoid function diff --git a/maths/tanh.py b/maths/tanh.py index ddab3e1ab717..38a369d9118d 100644 --- a/maths/tanh.py +++ b/maths/tanh.py @@ -12,12 +12,12 @@ import numpy as np -def tangent_hyperbolic(vector: np.array) -> np.array: +def tangent_hyperbolic(vector: np.ndarray) -> np.ndarray: """ Implements the tanh function Parameters: - vector: np.array + vector: np.ndarray Returns: tanh (np.array): The input numpy array after applying tanh. From cecf1fdd529782d754e1aa4d6df099e391003c76 Mon Sep 17 00:00:00 2001 From: Juyoung Kim <61103343+JadeKim042386@users.noreply.github.com> Date: Wed, 16 Aug 2023 07:52:51 +0900 Subject: [PATCH 0925/1543] Fix greedy_best_first (#8775) * fix: typo #8770 * refactor: delete unnecessary continue * add test grids * fix: add \_\_eq\_\_ in Node class #8770 * fix: delete unnecessary code - node in self.open_nodes is always better node #8770 * fix: docstring * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fix: docstring max length * refactor: get the successors using a list comprehension * Apply suggestions from code review --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Tianyi Zheng --- graphs/greedy_best_first.py | 120 ++++++++++++++++++++---------------- 1 file changed, 67 insertions(+), 53 deletions(-) diff --git a/graphs/greedy_best_first.py b/graphs/greedy_best_first.py index 35f7ca9feeef..bb3160047e34 100644 --- a/graphs/greedy_best_first.py +++ b/graphs/greedy_best_first.py @@ -6,14 +6,32 @@ Path = list[tuple[int, int]] -grid = [ - [0, 0, 0, 0, 0, 0, 0], - [0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles - [0, 0, 0, 0, 0, 0, 0], - [0, 0, 1, 0, 0, 0, 0], - [1, 0, 1, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 1, 0, 0], +# 0's are free path whereas 1's are obstacles +TEST_GRIDS = [ + [ + [0, 0, 0, 0, 0, 0, 0], + [0, 1, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 1, 0, 0, 0, 0], + [1, 0, 1, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 1, 0, 0], + ], + [ + [0, 0, 0, 1, 1, 0, 0], + [0, 0, 0, 0, 1, 0, 1], + [0, 0, 0, 1, 1, 0, 0], + [0, 1, 0, 0, 1, 0, 0], + [1, 0, 0, 1, 1, 0, 1], + [0, 0, 0, 0, 0, 0, 0], + ], + [ + [0, 0, 1, 0, 0], + [0, 1, 0, 0, 0], + [0, 0, 1, 0, 1], + [1, 0, 0, 1, 1], + [0, 0, 0, 0, 0], + ], ] delta = ([-1, 0], [0, -1], [1, 0], [0, 1]) # up, left, down, right @@ -65,10 +83,14 @@ def calculate_heuristic(self) -> float: def __lt__(self, other) -> bool: return self.f_cost < other.f_cost + def __eq__(self, other) -> bool: + return self.pos == other.pos + class GreedyBestFirst: """ - >>> gbf = GreedyBestFirst((0, 0), (len(grid) - 1, len(grid[0]) - 1)) + >>> grid = TEST_GRIDS[2] + >>> gbf = GreedyBestFirst(grid, (0, 0), (len(grid) - 1, len(grid[0]) - 1)) >>> [x.pos for x in gbf.get_successors(gbf.start)] [(1, 0), (0, 1)] >>> (gbf.start.pos_y + delta[3][0], gbf.start.pos_x + delta[3][1]) @@ -78,11 +100,14 @@ class GreedyBestFirst: >>> gbf.retrace_path(gbf.start) [(0, 0)] >>> gbf.search() # doctest: +NORMALIZE_WHITESPACE - [(0, 0), (1, 0), (2, 0), (3, 0), (3, 1), (4, 1), (5, 1), (6, 1), - (6, 2), (6, 3), (5, 3), (5, 4), (5, 5), (6, 5), (6, 6)] + [(0, 0), (1, 0), (2, 0), (2, 1), (3, 1), (4, 1), (4, 2), (4, 3), + (4, 4)] """ - def __init__(self, start: tuple[int, int], goal: tuple[int, int]): + def __init__( + self, grid: list[list[int]], start: tuple[int, int], goal: tuple[int, int] + ): + self.grid = grid self.start = Node(start[1], start[0], goal[1], goal[0], 0, None) self.target = Node(goal[1], goal[0], goal[1], goal[0], 99999, None) @@ -114,14 +139,6 @@ def search(self) -> Path | None: if child_node not in self.open_nodes: self.open_nodes.append(child_node) - else: - # retrieve the best current path - better_node = self.open_nodes.pop(self.open_nodes.index(child_node)) - - if child_node.g_cost < better_node.g_cost: - self.open_nodes.append(child_node) - else: - self.open_nodes.append(better_node) if not self.reached: return [self.start.pos] @@ -131,28 +148,22 @@ def get_successors(self, parent: Node) -> list[Node]: """ Returns a list of successors (both in the grid and free spaces) """ - successors = [] - for action in delta: - pos_x = parent.pos_x + action[1] - pos_y = parent.pos_y + action[0] - - if not (0 <= pos_x <= len(grid[0]) - 1 and 0 <= pos_y <= len(grid) - 1): - continue - - if grid[pos_y][pos_x] != 0: - continue - - successors.append( - Node( - pos_x, - pos_y, - self.target.pos_y, - self.target.pos_x, - parent.g_cost + 1, - parent, - ) + return [ + Node( + pos_x, + pos_y, + self.target.pos_x, + self.target.pos_y, + parent.g_cost + 1, + parent, + ) + for action in delta + if ( + 0 <= (pos_x := parent.pos_x + action[1]) < len(self.grid[0]) + and 0 <= (pos_y := parent.pos_y + action[0]) < len(self.grid) + and self.grid[pos_y][pos_x] == 0 ) - return successors + ] def retrace_path(self, node: Node | None) -> Path: """ @@ -168,18 +179,21 @@ def retrace_path(self, node: Node | None) -> Path: if __name__ == "__main__": - init = (0, 0) - goal = (len(grid) - 1, len(grid[0]) - 1) - for elem in grid: - print(elem) - - print("------") - - greedy_bf = GreedyBestFirst(init, goal) - path = greedy_bf.search() - if path: - for pos_x, pos_y in path: - grid[pos_x][pos_y] = 2 + for idx, grid in enumerate(TEST_GRIDS): + print(f"==grid-{idx + 1}==") + init = (0, 0) + goal = (len(grid) - 1, len(grid[0]) - 1) for elem in grid: print(elem) + + print("------") + + greedy_bf = GreedyBestFirst(grid, init, goal) + path = greedy_bf.search() + if path: + for pos_x, pos_y in path: + grid[pos_x][pos_y] = 2 + + for elem in grid: + print(elem) From efaf526737a83815a609a00fd59370f25f6d2e09 Mon Sep 17 00:00:00 2001 From: isidroas Date: Wed, 16 Aug 2023 01:04:53 +0200 Subject: [PATCH 0926/1543] BST and RSA doctest (#8693) * rsa key doctest * move doctest to module docstring * all tests to doctest * moved is_right to property * is right test * fixed rsa doctest import * Test error when deleting non-existing element * fixing ruff EM102 * convert property 'is_right' to one-liner Also use 'is' instead of '==' Co-authored-by: Tianyi Zheng * child instead of children Co-authored-by: Tianyi Zheng * remove type hint * Update data_structures/binary_tree/binary_search_tree.py --------- Co-authored-by: Tianyi Zheng --- ciphers/rsa_key_generator.py | 25 +-- .../binary_tree/binary_search_tree.py | 155 ++++++++++-------- 2 files changed, 98 insertions(+), 82 deletions(-) diff --git a/ciphers/rsa_key_generator.py b/ciphers/rsa_key_generator.py index 2573ed01387b..eedc7336804a 100644 --- a/ciphers/rsa_key_generator.py +++ b/ciphers/rsa_key_generator.py @@ -2,8 +2,7 @@ import random import sys -from . import cryptomath_module as cryptoMath # noqa: N812 -from . import rabin_miller as rabinMiller # noqa: N812 +from . import cryptomath_module, rabin_miller def main() -> None: @@ -13,20 +12,26 @@ def main() -> None: def generate_key(key_size: int) -> tuple[tuple[int, int], tuple[int, int]]: - print("Generating prime p...") - p = rabinMiller.generate_large_prime(key_size) - print("Generating prime q...") - q = rabinMiller.generate_large_prime(key_size) + """ + >>> random.seed(0) # for repeatability + >>> public_key, private_key = generate_key(8) + >>> public_key + (26569, 239) + >>> private_key + (26569, 2855) + """ + p = rabin_miller.generate_large_prime(key_size) + q = rabin_miller.generate_large_prime(key_size) n = p * q - print("Generating e that is relatively prime to (p - 1) * (q - 1)...") + # Generate e that is relatively prime to (p - 1) * (q - 1) while True: e = random.randrange(2 ** (key_size - 1), 2 ** (key_size)) - if cryptoMath.gcd(e, (p - 1) * (q - 1)) == 1: + if cryptomath_module.gcd(e, (p - 1) * (q - 1)) == 1: break - print("Calculating d that is mod inverse of e...") - d = cryptoMath.find_mod_inverse(e, (p - 1) * (q - 1)) + # Calculate d that is mod inverse of e + d = cryptomath_module.find_mod_inverse(e, (p - 1) * (q - 1)) public_key = (n, e) private_key = (n, d) diff --git a/data_structures/binary_tree/binary_search_tree.py b/data_structures/binary_tree/binary_search_tree.py index c72195424c7c..a706d21e3bb2 100644 --- a/data_structures/binary_tree/binary_search_tree.py +++ b/data_structures/binary_tree/binary_search_tree.py @@ -1,5 +1,62 @@ -""" +r""" A binary search Tree + +Example + 8 + / \ + 3 10 + / \ \ + 1 6 14 + / \ / + 4 7 13 + +>>> t = BinarySearchTree() +>>> t.insert(8, 3, 6, 1, 10, 14, 13, 4, 7) +>>> print(" ".join(repr(i.value) for i in t.traversal_tree())) +8 3 1 6 4 7 10 14 13 +>>> print(" ".join(repr(i.value) for i in t.traversal_tree(postorder))) +1 4 7 6 3 13 14 10 8 +>>> t.remove(20) +Traceback (most recent call last): + ... +ValueError: Value 20 not found +>>> BinarySearchTree().search(6) +Traceback (most recent call last): + ... +IndexError: Warning: Tree is empty! please use another. + +Other example: + +>>> testlist = (8, 3, 6, 1, 10, 14, 13, 4, 7) +>>> t = BinarySearchTree() +>>> for i in testlist: +... t.insert(i) + +Prints all the elements of the list in order traversal +>>> print(t) +{'8': ({'3': (1, {'6': (4, 7)})}, {'10': (None, {'14': (13, None)})})} + +Test existence +>>> t.search(6) is not None +True +>>> t.search(-1) is not None +False + +>>> t.search(6).is_right +True +>>> t.search(1).is_right +False + +>>> t.get_max().value +14 +>>> t.get_min().value +1 +>>> t.empty() +False +>>> for i in testlist: +... t.remove(i) +>>> t.empty() +True """ from collections.abc import Iterable @@ -20,6 +77,10 @@ def __repr__(self) -> str: return str(self.value) return pformat({f"{self.value}": (self.left, self.right)}, indent=1) + @property + def is_right(self) -> bool: + return self.parent is not None and self is self.parent.right + class BinarySearchTree: def __init__(self, root: Node | None = None): @@ -35,18 +96,13 @@ def __reassign_nodes(self, node: Node, new_children: Node | None) -> None: if new_children is not None: # reset its kids new_children.parent = node.parent if node.parent is not None: # reset its parent - if self.is_right(node): # If it is the right children + if node.is_right: # If it is the right child node.parent.right = new_children else: node.parent.left = new_children else: self.root = new_children - def is_right(self, node: Node) -> bool: - if node.parent and node.parent.right: - return node == node.parent.right - return False - def empty(self) -> bool: return self.root is None @@ -119,22 +175,26 @@ def get_min(self, node: Node | None = None) -> Node | None: return node def remove(self, value: int) -> None: - node = self.search(value) # Look for the node with that label - if node is not None: - if node.left is None and node.right is None: # If it has no children - self.__reassign_nodes(node, None) - elif node.left is None: # Has only right children - self.__reassign_nodes(node, node.right) - elif node.right is None: # Has only left children - self.__reassign_nodes(node, node.left) - else: - tmp_node = self.get_max( - node.left - ) # Gets the max value of the left branch - self.remove(tmp_node.value) # type: ignore - node.value = ( - tmp_node.value # type: ignore - ) # Assigns the value to the node to delete and keep tree structure + # Look for the node with that label + node = self.search(value) + if node is None: + msg = f"Value {value} not found" + raise ValueError(msg) + + if node.left is None and node.right is None: # If it has no children + self.__reassign_nodes(node, None) + elif node.left is None: # Has only right children + self.__reassign_nodes(node, node.right) + elif node.right is None: # Has only left children + self.__reassign_nodes(node, node.left) + else: + predecessor = self.get_max( + node.left + ) # Gets the max value of the left branch + self.remove(predecessor.value) # type: ignore + node.value = ( + predecessor.value # type: ignore + ) # Assigns the value to the node to delete and keep tree structure def preorder_traverse(self, node: Node | None) -> Iterable: if node is not None: @@ -177,55 +237,6 @@ def postorder(curr_node: Node | None) -> list[Node]: return node_list -def binary_search_tree() -> None: - r""" - Example - 8 - / \ - 3 10 - / \ \ - 1 6 14 - / \ / - 4 7 13 - - >>> t = BinarySearchTree() - >>> t.insert(8, 3, 6, 1, 10, 14, 13, 4, 7) - >>> print(" ".join(repr(i.value) for i in t.traversal_tree())) - 8 3 1 6 4 7 10 14 13 - >>> print(" ".join(repr(i.value) for i in t.traversal_tree(postorder))) - 1 4 7 6 3 13 14 10 8 - >>> BinarySearchTree().search(6) - Traceback (most recent call last): - ... - IndexError: Warning: Tree is empty! please use another. - """ - testlist = (8, 3, 6, 1, 10, 14, 13, 4, 7) - t = BinarySearchTree() - for i in testlist: - t.insert(i) - - # Prints all the elements of the list in order traversal - print(t) - - if t.search(6) is not None: - print("The value 6 exists") - else: - print("The value 6 doesn't exist") - - if t.search(-1) is not None: - print("The value -1 exists") - else: - print("The value -1 doesn't exist") - - if not t.empty(): - print("Max Value: ", t.get_max().value) # type: ignore - print("Min Value: ", t.get_min().value) # type: ignore - - for i in testlist: - t.remove(i) - print(t) - - if __name__ == "__main__": import doctest From f66568e981edf5e384fe28a357daee3e13f16de9 Mon Sep 17 00:00:00 2001 From: Maxim Smolskiy Date: Wed, 16 Aug 2023 02:10:22 +0300 Subject: [PATCH 0927/1543] Reduce the complexity of boolean_algebra/quine_mc_cluskey.py (#8604) * Reduce the complexity of boolean_algebra/quine_mc_cluskey.py * updating DIRECTORY.md * Fix * Fix review issues * Fix * Fix review issues --------- Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- boolean_algebra/quine_mc_cluskey.py | 49 ++++++++++++----------------- 1 file changed, 20 insertions(+), 29 deletions(-) diff --git a/boolean_algebra/quine_mc_cluskey.py b/boolean_algebra/quine_mc_cluskey.py index 6788dfb28ba1..8e22e66726d4 100644 --- a/boolean_algebra/quine_mc_cluskey.py +++ b/boolean_algebra/quine_mc_cluskey.py @@ -74,10 +74,7 @@ def is_for_table(string1: str, string2: str, count: int) -> bool: """ list1 = list(string1) list2 = list(string2) - count_n = 0 - for i in range(len(list1)): - if list1[i] != list2[i]: - count_n += 1 + count_n = sum(item1 != item2 for item1, item2 in zip(list1, list2)) return count_n == count @@ -92,40 +89,34 @@ def selection(chart: list[list[int]], prime_implicants: list[str]) -> list[str]: temp = [] select = [0] * len(chart) for i in range(len(chart[0])): - count = 0 - rem = -1 - for j in range(len(chart)): - if chart[j][i] == 1: - count += 1 - rem = j + count = sum(row[i] == 1 for row in chart) if count == 1: + rem = max(j for j, row in enumerate(chart) if row[i] == 1) select[rem] = 1 - for i in range(len(select)): - if select[i] == 1: - for j in range(len(chart[0])): - if chart[i][j] == 1: - for k in range(len(chart)): - chart[k][j] = 0 - temp.append(prime_implicants[i]) + for i, item in enumerate(select): + if item != 1: + continue + for j in range(len(chart[0])): + if chart[i][j] != 1: + continue + for row in chart: + row[j] = 0 + temp.append(prime_implicants[i]) while True: - max_n = 0 - rem = -1 - count_n = 0 - for i in range(len(chart)): - count_n = chart[i].count(1) - if count_n > max_n: - max_n = count_n - rem = i + counts = [chart[i].count(1) for i in range(len(chart))] + max_n = max(counts) + rem = counts.index(max_n) if max_n == 0: return temp temp.append(prime_implicants[rem]) - for i in range(len(chart[0])): - if chart[rem][i] == 1: - for j in range(len(chart)): - chart[j][i] = 0 + for j in range(len(chart[0])): + if chart[rem][j] != 1: + continue + for i in range(len(chart)): + chart[i][j] = 0 def prime_implicant_chart( From bfed2fb7883fb7c472cd09afea1aad4e3f87d71b Mon Sep 17 00:00:00 2001 From: Saksham1970 <45041294+Saksham1970@users.noreply.github.com> Date: Wed, 16 Aug 2023 12:54:12 +0530 Subject: [PATCH 0928/1543] Added Continued fractions (#6846) * updating DIRECTORY.md * added continued fractions * updating DIRECTORY.md * Update maths/continued_fraction.py Co-authored-by: Caeden Perelli-Harris * Update maths/continued_fraction.py Co-authored-by: Caeden Perelli-Harris --------- Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> Co-authored-by: Caeden Perelli-Harris Co-authored-by: Tianyi Zheng --- DIRECTORY.md | 1 + maths/continued_fraction.py | 51 +++++++++++++++++++++++++++++++++++++ 2 files changed, 52 insertions(+) create mode 100644 maths/continued_fraction.py diff --git a/DIRECTORY.md b/DIRECTORY.md index be5fa3584a58..8d1567465fbc 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -555,6 +555,7 @@ * [Chudnovsky Algorithm](maths/chudnovsky_algorithm.py) * [Collatz Sequence](maths/collatz_sequence.py) * [Combinations](maths/combinations.py) + * [Continued Fraction](maths/continued_fraction.py) * [Decimal Isolate](maths/decimal_isolate.py) * [Decimal To Fraction](maths/decimal_to_fraction.py) * [Dodecahedron](maths/dodecahedron.py) diff --git a/maths/continued_fraction.py b/maths/continued_fraction.py new file mode 100644 index 000000000000..25ff649db77a --- /dev/null +++ b/maths/continued_fraction.py @@ -0,0 +1,51 @@ +""" +Finding the continuous fraction for a rational number using python + +https://en.wikipedia.org/wiki/Continued_fraction +""" + + +from fractions import Fraction + + +def continued_fraction(num: Fraction) -> list[int]: + """ + :param num: + Fraction of the number whose continued fractions to be found. + Use Fraction(str(number)) for more accurate results due to + float inaccuracies. + + :return: + The continued fraction of rational number. + It is the all commas in the (n + 1)-tuple notation. + + >>> continued_fraction(Fraction(2)) + [2] + >>> continued_fraction(Fraction("3.245")) + [3, 4, 12, 4] + >>> continued_fraction(Fraction("2.25")) + [2, 4] + >>> continued_fraction(1/Fraction("2.25")) + [0, 2, 4] + >>> continued_fraction(Fraction("415/93")) + [4, 2, 6, 7] + """ + numerator, denominator = num.as_integer_ratio() + continued_fraction_list: list[int] = [] + while True: + integer_part = int(numerator / denominator) + continued_fraction_list.append(integer_part) + numerator -= integer_part * denominator + if numerator == 0: + break + numerator, denominator = denominator, numerator + + return continued_fraction_list + + +if __name__ == "__main__": + import doctest + + doctest.testmod() + + print("Continued Fraction of 0.84375 is: ", continued_fraction(Fraction("0.84375"))) From 5c276a8377b9f4139dac9cfff83fd47b88511a40 Mon Sep 17 00:00:00 2001 From: homsim <103424895+homsim@users.noreply.github.com> Date: Wed, 16 Aug 2023 10:07:50 +0200 Subject: [PATCH 0929/1543] Quick fix: fig.canvas.set_window_title deprecated (#8961) Co-authored-by: homsim --- physics/n_body_simulation.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/physics/n_body_simulation.py b/physics/n_body_simulation.py index 2b701283f166..46330844df61 100644 --- a/physics/n_body_simulation.py +++ b/physics/n_body_simulation.py @@ -226,7 +226,7 @@ def plot( No doctest provided since this function does not have a return value. """ fig = plt.figure() - fig.canvas.set_window_title(title) + fig.canvas.manager.set_window_title(title) ax = plt.axes( xlim=(x_start, x_end), ylim=(y_start, y_end) ) # Set section to be plotted From beb43517c3552b72b9c8fc1710f681b0180418ec Mon Sep 17 00:00:00 2001 From: Tianyi Zheng Date: Wed, 16 Aug 2023 04:36:10 -0700 Subject: [PATCH 0930/1543] Fix `mypy` errors in `maths/gaussian_error_linear_unit.py` (#8610) * updating DIRECTORY.md * Fix mypy errors in gaussian_error_linear_unit.py * updating DIRECTORY.md * updating DIRECTORY.md --------- Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- maths/gaussian_error_linear_unit.py | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/maths/gaussian_error_linear_unit.py b/maths/gaussian_error_linear_unit.py index 18384bb6c864..b3cbd7810716 100644 --- a/maths/gaussian_error_linear_unit.py +++ b/maths/gaussian_error_linear_unit.py @@ -30,12 +30,10 @@ def gaussian_error_linear_unit(vector: np.ndarray) -> np.ndarray: Implements the Gaussian Error Linear Unit (GELU) function Parameters: - vector (np.array): A numpy array of shape (1,n) - consisting of real values + vector (np.ndarray): A numpy array of shape (1, n) consisting of real values Returns: - gelu_vec (np.array): The input numpy array, after applying - gelu. + gelu_vec (np.ndarray): The input numpy array, after applying gelu Examples: >>> gaussian_error_linear_unit(np.array([-1.0, 1.0, 2.0])) From fd7cc4cf8e731c16a5dd2cf30c4ddb0dd017d59e Mon Sep 17 00:00:00 2001 From: Caeden Perelli-Harris Date: Thu, 17 Aug 2023 02:21:00 +0100 Subject: [PATCH 0931/1543] Rename norgate to nor_gate to keep consistency (#8968) * refactor(boolean-algebra): Rename norgate to nor_gate * updating DIRECTORY.md --------- Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- DIRECTORY.md | 2 +- boolean_algebra/{norgate.py => nor_gate.py} | 0 2 files changed, 1 insertion(+), 1 deletion(-) rename boolean_algebra/{norgate.py => nor_gate.py} (100%) diff --git a/DIRECTORY.md b/DIRECTORY.md index 8d1567465fbc..d4a2bb48511a 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -62,7 +62,7 @@ ## Boolean Algebra * [And Gate](boolean_algebra/and_gate.py) * [Nand Gate](boolean_algebra/nand_gate.py) - * [Norgate](boolean_algebra/norgate.py) + * [Nor Gate](boolean_algebra/nor_gate.py) * [Not Gate](boolean_algebra/not_gate.py) * [Or Gate](boolean_algebra/or_gate.py) * [Quine Mc Cluskey](boolean_algebra/quine_mc_cluskey.py) diff --git a/boolean_algebra/norgate.py b/boolean_algebra/nor_gate.py similarity index 100% rename from boolean_algebra/norgate.py rename to boolean_algebra/nor_gate.py From f6b12420ce2a16ddf55c5226ea6f188936af33ad Mon Sep 17 00:00:00 2001 From: Kausthub Kannan <99611070+kausthub-kannan@users.noreply.github.com> Date: Thu, 17 Aug 2023 06:52:15 +0530 Subject: [PATCH 0932/1543] Added Leaky ReLU Activation Function (#8962) * Added Leaky ReLU activation function * Added Leaky ReLU activation function * Added Leaky ReLU activation function * Formatting and spelling fixes done --- .../leaky_rectified_linear_unit.py | 39 +++++++++++++++++++ 1 file changed, 39 insertions(+) create mode 100644 neural_network/activation_functions/leaky_rectified_linear_unit.py diff --git a/neural_network/activation_functions/leaky_rectified_linear_unit.py b/neural_network/activation_functions/leaky_rectified_linear_unit.py new file mode 100644 index 000000000000..019086fd9821 --- /dev/null +++ b/neural_network/activation_functions/leaky_rectified_linear_unit.py @@ -0,0 +1,39 @@ +""" +Leaky Rectified Linear Unit (Leaky ReLU) + +Use Case: Leaky ReLU addresses the problem of the vanishing gradient. +For more detailed information, you can refer to the following link: +https://en.wikipedia.org/wiki/Rectifier_(neural_networks)#Leaky_ReLU +""" + +import numpy as np + + +def leaky_rectified_linear_unit(vector: np.ndarray, alpha: float) -> np.ndarray: + """ + Implements the LeakyReLU activation function. + + Parameters: + vector (np.ndarray): The input array for LeakyReLU activation. + alpha (float): The slope for negative values. + + Returns: + np.ndarray: The input array after applying the LeakyReLU activation. + + Formula: f(x) = x if x > 0 else f(x) = alpha * x + + Examples: + >>> leaky_rectified_linear_unit(vector=np.array([2.3,0.6,-2,-3.8]), alpha=0.3) + array([ 2.3 , 0.6 , -0.6 , -1.14]) + + >>> leaky_rectified_linear_unit(np.array([-9.2, -0.3, 0.45, -4.56]), alpha=0.067) + array([-0.6164 , -0.0201 , 0.45 , -0.30552]) + + """ + return np.where(vector > 0, vector, alpha * vector) + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From a207187ddb368edb121153d4f6e190fcfb857427 Mon Sep 17 00:00:00 2001 From: Ilkin Mengusoglu <113149540+imengus@users.noreply.github.com> Date: Thu, 17 Aug 2023 22:34:53 +0100 Subject: [PATCH 0933/1543] Fix simplex.py (#8843) * changes to accommodate special case * changed n_slack calculation method * fix precommit typehints * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * n_art_vars inputs * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fix: docstrings and typehints * fix: doctest issues when running code * additional check and doctests * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fix ruff * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fix whitespace --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- linear_programming/simplex.py | 229 +++++++++++++++++++--------------- 1 file changed, 128 insertions(+), 101 deletions(-) diff --git a/linear_programming/simplex.py b/linear_programming/simplex.py index ba64add40b5f..bbc97d8e22bf 100644 --- a/linear_programming/simplex.py +++ b/linear_programming/simplex.py @@ -20,40 +20,60 @@ class Tableau: """Operate on simplex tableaus - >>> t = Tableau(np.array([[-1,-1,0,0,-1],[1,3,1,0,4],[3,1,0,1,4.]]), 2) + >>> Tableau(np.array([[-1,-1,0,0,1],[1,3,1,0,4],[3,1,0,1,4]]), 2, 2) + Traceback (most recent call last): + ... + TypeError: Tableau must have type float64 + + >>> Tableau(np.array([[-1,-1,0,0,-1],[1,3,1,0,4],[3,1,0,1,4.]]), 2, 2) Traceback (most recent call last): ... ValueError: RHS must be > 0 + + >>> Tableau(np.array([[-1,-1,0,0,1],[1,3,1,0,4],[3,1,0,1,4.]]), -2, 2) + Traceback (most recent call last): + ... + ValueError: number of (artificial) variables must be a natural number """ - def __init__(self, tableau: np.ndarray, n_vars: int) -> None: + # Max iteration number to prevent cycling + maxiter = 100 + + def __init__( + self, tableau: np.ndarray, n_vars: int, n_artificial_vars: int + ) -> None: + if tableau.dtype != "float64": + raise TypeError("Tableau must have type float64") + # Check if RHS is negative - if np.any(tableau[:, -1], where=tableau[:, -1] < 0): + if not (tableau[:, -1] >= 0).all(): raise ValueError("RHS must be > 0") + if n_vars < 2 or n_artificial_vars < 0: + raise ValueError( + "number of (artificial) variables must be a natural number" + ) + self.tableau = tableau - self.n_rows, _ = tableau.shape + self.n_rows, n_cols = tableau.shape # Number of decision variables x1, x2, x3... - self.n_vars = n_vars - - # Number of artificial variables to be minimised - self.n_art_vars = len(np.where(tableau[self.n_vars : -1] == -1)[0]) + self.n_vars, self.n_artificial_vars = n_vars, n_artificial_vars # 2 if there are >= or == constraints (nonstandard), 1 otherwise (std) - self.n_stages = (self.n_art_vars > 0) + 1 + self.n_stages = (self.n_artificial_vars > 0) + 1 # Number of slack variables added to make inequalities into equalities - self.n_slack = self.n_rows - self.n_stages + self.n_slack = n_cols - self.n_vars - self.n_artificial_vars - 1 # Objectives for each stage self.objectives = ["max"] # In two stage simplex, first minimise then maximise - if self.n_art_vars: + if self.n_artificial_vars: self.objectives.append("min") - self.col_titles = [""] + self.col_titles = self.generate_col_titles() # Index of current pivot row and column self.row_idx = None @@ -62,48 +82,39 @@ def __init__(self, tableau: np.ndarray, n_vars: int) -> None: # Does objective row only contain (non)-negative values? self.stop_iter = False - @staticmethod - def generate_col_titles(*args: int) -> list[str]: + def generate_col_titles(self) -> list[str]: """Generate column titles for tableau of specific dimensions - >>> Tableau.generate_col_titles(2, 3, 1) - ['x1', 'x2', 's1', 's2', 's3', 'a1', 'RHS'] - - >>> Tableau.generate_col_titles() - Traceback (most recent call last): - ... - ValueError: Must provide n_vars, n_slack, and n_art_vars - >>> Tableau.generate_col_titles(-2, 3, 1) - Traceback (most recent call last): - ... - ValueError: All arguments must be non-negative integers - """ - if len(args) != 3: - raise ValueError("Must provide n_vars, n_slack, and n_art_vars") + >>> Tableau(np.array([[-1,-1,0,0,1],[1,3,1,0,4],[3,1,0,1,4.]]), + ... 2, 0).generate_col_titles() + ['x1', 'x2', 's1', 's2', 'RHS'] - if not all(x >= 0 and isinstance(x, int) for x in args): - raise ValueError("All arguments must be non-negative integers") + >>> Tableau(np.array([[-1,-1,0,0,1],[1,3,1,0,4],[3,1,0,1,4.]]), + ... 2, 2).generate_col_titles() + ['x1', 'x2', 'RHS'] + """ + args = (self.n_vars, self.n_slack) - # decision | slack | artificial - string_starts = ["x", "s", "a"] + # decision | slack + string_starts = ["x", "s"] titles = [] - for i in range(3): + for i in range(2): for j in range(args[i]): titles.append(string_starts[i] + str(j + 1)) titles.append("RHS") return titles - def find_pivot(self, tableau: np.ndarray) -> tuple[Any, Any]: + def find_pivot(self) -> tuple[Any, Any]: """Finds the pivot row and column. - >>> t = Tableau(np.array([[-2,1,0,0,0], [3,1,1,0,6], [1,2,0,1,7.]]), 2) - >>> t.find_pivot(t.tableau) + >>> Tableau(np.array([[-2,1,0,0,0], [3,1,1,0,6], [1,2,0,1,7.]]), + ... 2, 0).find_pivot() (1, 0) """ objective = self.objectives[-1] # Find entries of highest magnitude in objective rows sign = (objective == "min") - (objective == "max") - col_idx = np.argmax(sign * tableau[0, : self.n_vars]) + col_idx = np.argmax(sign * self.tableau[0, :-1]) # Choice is only valid if below 0 for maximise, and above for minimise if sign * self.tableau[0, col_idx] <= 0: @@ -117,15 +128,15 @@ def find_pivot(self, tableau: np.ndarray) -> tuple[Any, Any]: s = slice(self.n_stages, self.n_rows) # RHS - dividend = tableau[s, -1] + dividend = self.tableau[s, -1] # Elements of pivot column within slice - divisor = tableau[s, col_idx] + divisor = self.tableau[s, col_idx] # Array filled with nans nans = np.full(self.n_rows - self.n_stages, np.nan) - # If element in pivot column is greater than zeron_stages, return + # If element in pivot column is greater than zero, return # quotient or nan otherwise quotients = np.divide(dividend, divisor, out=nans, where=divisor > 0) @@ -134,18 +145,18 @@ def find_pivot(self, tableau: np.ndarray) -> tuple[Any, Any]: row_idx = np.nanargmin(quotients) + self.n_stages return row_idx, col_idx - def pivot(self, tableau: np.ndarray, row_idx: int, col_idx: int) -> np.ndarray: + def pivot(self, row_idx: int, col_idx: int) -> np.ndarray: """Pivots on value on the intersection of pivot row and column. - >>> t = Tableau(np.array([[-2,-3,0,0,0],[1,3,1,0,4],[3,1,0,1,4.]]), 2) - >>> t.pivot(t.tableau, 1, 0).tolist() + >>> Tableau(np.array([[-2,-3,0,0,0],[1,3,1,0,4],[3,1,0,1,4.]]), + ... 2, 2).pivot(1, 0).tolist() ... # doctest: +NORMALIZE_WHITESPACE [[0.0, 3.0, 2.0, 0.0, 8.0], [1.0, 3.0, 1.0, 0.0, 4.0], [0.0, -8.0, -3.0, 1.0, -8.0]] """ # Avoid changes to original tableau - piv_row = tableau[row_idx].copy() + piv_row = self.tableau[row_idx].copy() piv_val = piv_row[col_idx] @@ -153,48 +164,47 @@ def pivot(self, tableau: np.ndarray, row_idx: int, col_idx: int) -> np.ndarray: piv_row *= 1 / piv_val # Variable in pivot column becomes basic, ie the only non-zero entry - for idx, coeff in enumerate(tableau[:, col_idx]): - tableau[idx] += -coeff * piv_row - tableau[row_idx] = piv_row - return tableau + for idx, coeff in enumerate(self.tableau[:, col_idx]): + self.tableau[idx] += -coeff * piv_row + self.tableau[row_idx] = piv_row + return self.tableau - def change_stage(self, tableau: np.ndarray) -> np.ndarray: + def change_stage(self) -> np.ndarray: """Exits first phase of the two-stage method by deleting artificial rows and columns, or completes the algorithm if exiting the standard case. - >>> t = Tableau(np.array([ + >>> Tableau(np.array([ ... [3, 3, -1, -1, 0, 0, 4], ... [2, 1, 0, 0, 0, 0, 0.], ... [1, 2, -1, 0, 1, 0, 2], ... [2, 1, 0, -1, 0, 1, 2] - ... ]), 2) - >>> t.change_stage(t.tableau).tolist() + ... ]), 2, 2).change_stage().tolist() ... # doctest: +NORMALIZE_WHITESPACE - [[2.0, 1.0, 0.0, 0.0, 0.0, 0.0], - [1.0, 2.0, -1.0, 0.0, 1.0, 2.0], - [2.0, 1.0, 0.0, -1.0, 0.0, 2.0]] + [[2.0, 1.0, 0.0, 0.0, 0.0], + [1.0, 2.0, -1.0, 0.0, 2.0], + [2.0, 1.0, 0.0, -1.0, 2.0]] """ # Objective of original objective row remains self.objectives.pop() if not self.objectives: - return tableau + return self.tableau # Slice containing ids for artificial columns - s = slice(-self.n_art_vars - 1, -1) + s = slice(-self.n_artificial_vars - 1, -1) # Delete the artificial variable columns - tableau = np.delete(tableau, s, axis=1) + self.tableau = np.delete(self.tableau, s, axis=1) # Delete the objective row of the first stage - tableau = np.delete(tableau, 0, axis=0) + self.tableau = np.delete(self.tableau, 0, axis=0) self.n_stages = 1 self.n_rows -= 1 - self.n_art_vars = 0 + self.n_artificial_vars = 0 self.stop_iter = False - return tableau + return self.tableau def run_simplex(self) -> dict[Any, Any]: """Operate on tableau until objective function cannot be @@ -205,15 +215,29 @@ def run_simplex(self) -> dict[Any, Any]: ST: x1 + 3x2 <= 4 3x1 + x2 <= 4 >>> Tableau(np.array([[-1,-1,0,0,0],[1,3,1,0,4],[3,1,0,1,4.]]), - ... 2).run_simplex() + ... 2, 0).run_simplex() {'P': 2.0, 'x1': 1.0, 'x2': 1.0} + # Standard linear program with 3 variables: + Max: 3x1 + x2 + 3x3 + ST: 2x1 + x2 + x3 ≤ 2 + x1 + 2x2 + 3x3 ≤ 5 + 2x1 + 2x2 + x3 ≤ 6 + >>> Tableau(np.array([ + ... [-3,-1,-3,0,0,0,0], + ... [2,1,1,1,0,0,2], + ... [1,2,3,0,1,0,5], + ... [2,2,1,0,0,1,6.] + ... ]),3,0).run_simplex() # doctest: +ELLIPSIS + {'P': 5.4, 'x1': 0.199..., 'x3': 1.6} + + # Optimal tableau input: >>> Tableau(np.array([ ... [0, 0, 0.25, 0.25, 2], ... [0, 1, 0.375, -0.125, 1], ... [1, 0, -0.125, 0.375, 1] - ... ]), 2).run_simplex() + ... ]), 2, 0).run_simplex() {'P': 2.0, 'x1': 1.0, 'x2': 1.0} # Non-standard: >= constraints @@ -227,7 +251,7 @@ def run_simplex(self) -> dict[Any, Any]: ... [1, 1, 1, 1, 0, 0, 0, 0, 40], ... [2, 1, -1, 0, -1, 0, 1, 0, 10], ... [0, -1, 1, 0, 0, -1, 0, 1, 10.] - ... ]), 3).run_simplex() + ... ]), 3, 2).run_simplex() {'P': 70.0, 'x1': 10.0, 'x2': 10.0, 'x3': 20.0} # Non standard: minimisation and equalities @@ -235,73 +259,76 @@ def run_simplex(self) -> dict[Any, Any]: ST: 2x1 + x2 = 12 6x1 + 5x2 = 40 >>> Tableau(np.array([ - ... [8, 6, 0, -1, 0, -1, 0, 0, 52], - ... [1, 1, 0, 0, 0, 0, 0, 0, 0], - ... [2, 1, 1, 0, 0, 0, 0, 0, 12], - ... [2, 1, 0, -1, 0, 0, 1, 0, 12], - ... [6, 5, 0, 0, 1, 0, 0, 0, 40], - ... [6, 5, 0, 0, 0, -1, 0, 1, 40.] - ... ]), 2).run_simplex() + ... [8, 6, 0, 0, 52], + ... [1, 1, 0, 0, 0], + ... [2, 1, 1, 0, 12], + ... [6, 5, 0, 1, 40.], + ... ]), 2, 2).run_simplex() {'P': 7.0, 'x1': 5.0, 'x2': 2.0} + + + # Pivot on slack variables + Max: 8x1 + 6x2 + ST: x1 + 3x2 <= 33 + 4x1 + 2x2 <= 48 + 2x1 + 4x2 <= 48 + x1 + x2 >= 10 + x1 >= 2 + >>> Tableau(np.array([ + ... [2, 1, 0, 0, 0, -1, -1, 0, 0, 12.0], + ... [-8, -6, 0, 0, 0, 0, 0, 0, 0, 0.0], + ... [1, 3, 1, 0, 0, 0, 0, 0, 0, 33.0], + ... [4, 2, 0, 1, 0, 0, 0, 0, 0, 60.0], + ... [2, 4, 0, 0, 1, 0, 0, 0, 0, 48.0], + ... [1, 1, 0, 0, 0, -1, 0, 1, 0, 10.0], + ... [1, 0, 0, 0, 0, 0, -1, 0, 1, 2.0] + ... ]), 2, 2).run_simplex() # doctest: +ELLIPSIS + {'P': 132.0, 'x1': 12.000... 'x2': 5.999...} """ # Stop simplex algorithm from cycling. - for _ in range(100): + for _ in range(Tableau.maxiter): # Completion of each stage removes an objective. If both stages # are complete, then no objectives are left if not self.objectives: - self.col_titles = self.generate_col_titles( - self.n_vars, self.n_slack, self.n_art_vars - ) - # Find the values of each variable at optimal solution - return self.interpret_tableau(self.tableau, self.col_titles) + return self.interpret_tableau() - row_idx, col_idx = self.find_pivot(self.tableau) + row_idx, col_idx = self.find_pivot() # If there are no more negative values in objective row if self.stop_iter: # Delete artificial variable columns and rows. Update attributes - self.tableau = self.change_stage(self.tableau) + self.tableau = self.change_stage() else: - self.tableau = self.pivot(self.tableau, row_idx, col_idx) + self.tableau = self.pivot(row_idx, col_idx) return {} - def interpret_tableau( - self, tableau: np.ndarray, col_titles: list[str] - ) -> dict[str, float]: + def interpret_tableau(self) -> dict[str, float]: """Given the final tableau, add the corresponding values of the basic decision variables to the `output_dict` - >>> tableau = np.array([ + >>> Tableau(np.array([ ... [0,0,0.875,0.375,5], ... [0,1,0.375,-0.125,1], ... [1,0,-0.125,0.375,1] - ... ]) - >>> t = Tableau(tableau, 2) - >>> t.interpret_tableau(tableau, ["x1", "x2", "s1", "s2", "RHS"]) + ... ]),2, 0).interpret_tableau() {'P': 5.0, 'x1': 1.0, 'x2': 1.0} """ # P = RHS of final tableau - output_dict = {"P": abs(tableau[0, -1])} + output_dict = {"P": abs(self.tableau[0, -1])} for i in range(self.n_vars): - # Gives ids of nonzero entries in the ith column - nonzero = np.nonzero(tableau[:, i]) + # Gives indices of nonzero entries in the ith column + nonzero = np.nonzero(self.tableau[:, i]) n_nonzero = len(nonzero[0]) - # First entry in the nonzero ids + # First entry in the nonzero indices nonzero_rowidx = nonzero[0][0] - nonzero_val = tableau[nonzero_rowidx, i] + nonzero_val = self.tableau[nonzero_rowidx, i] # If there is only one nonzero value in column, which is one - if n_nonzero == nonzero_val == 1: - rhs_val = tableau[nonzero_rowidx, -1] - output_dict[col_titles[i]] = rhs_val - - # Check for basic variables - for title in col_titles: - # Don't add RHS or slack variables to output dict - if title[0] not in "R-s-a": - output_dict.setdefault(title, 0) + if n_nonzero == 1 and nonzero_val == 1: + rhs_val = self.tableau[nonzero_rowidx, -1] + output_dict[self.col_titles[i]] = rhs_val return output_dict From 72c7b05caa7e5b109b7b42c796a8af39f99a5100 Mon Sep 17 00:00:00 2001 From: Boris Galochkin Date: Fri, 18 Aug 2023 04:38:19 +0300 Subject: [PATCH 0934/1543] Fix `sorts/bucket_sort.py` implementation (#5786) * Fix sorts/bucket_sort.py * updating DIRECTORY.md * Remove unused var in bucket_sort.py * Fix list index in bucket_sort.py --------- Co-authored-by: Tianyi Zheng Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- DIRECTORY.md | 1 + sorts/bucket_sort.py | 18 ++++++++++++------ 2 files changed, 13 insertions(+), 6 deletions(-) diff --git a/DIRECTORY.md b/DIRECTORY.md index d4a2bb48511a..e39a0674743a 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -710,6 +710,7 @@ * [2 Hidden Layers Neural Network](neural_network/2_hidden_layers_neural_network.py) * Activation Functions * [Exponential Linear Unit](neural_network/activation_functions/exponential_linear_unit.py) + * [Leaky Rectified Linear Unit](neural_network/activation_functions/leaky_rectified_linear_unit.py) * [Back Propagation Neural Network](neural_network/back_propagation_neural_network.py) * [Convolution Neural Network](neural_network/convolution_neural_network.py) * [Perceptron](neural_network/perceptron.py) diff --git a/sorts/bucket_sort.py b/sorts/bucket_sort.py index 7bcbe61a4526..c016e9e26e73 100644 --- a/sorts/bucket_sort.py +++ b/sorts/bucket_sort.py @@ -30,7 +30,7 @@ from __future__ import annotations -def bucket_sort(my_list: list) -> list: +def bucket_sort(my_list: list, bucket_count: int = 10) -> list: """ >>> data = [-1, 2, -5, 0] >>> bucket_sort(data) == sorted(data) @@ -43,21 +43,27 @@ def bucket_sort(my_list: list) -> list: True >>> bucket_sort([]) == sorted([]) True + >>> data = [-1e10, 1e10] + >>> bucket_sort(data) == sorted(data) + True >>> import random >>> collection = random.sample(range(-50, 50), 50) >>> bucket_sort(collection) == sorted(collection) True """ - if len(my_list) == 0: + + if len(my_list) == 0 or bucket_count <= 0: return [] + min_value, max_value = min(my_list), max(my_list) - bucket_count = int(max_value - min_value) + 1 + bucket_size = (max_value - min_value) / bucket_count buckets: list[list] = [[] for _ in range(bucket_count)] - for i in my_list: - buckets[int(i - min_value)].append(i) + for val in my_list: + index = min(int((val - min_value) / bucket_size), bucket_count - 1) + buckets[index].append(val) - return [v for bucket in buckets for v in sorted(bucket)] + return [val for bucket in buckets for val in sorted(bucket)] if __name__ == "__main__": From 5f7819e1cd192ecc89a7b7b929db63e045a47b45 Mon Sep 17 00:00:00 2001 From: Caeden Perelli-Harris Date: Fri, 18 Aug 2023 13:13:38 +0100 Subject: [PATCH 0935/1543] Fix get top billionaires BROKEN file (#8970) * updating DIRECTORY.md * fix(get-top-billionaires): Handle timestamp before epoch * updating DIRECTORY.md * revert(pyproject): Re-implement ignore lru_cache * fix(age): Update age to current year * fix(doctest): Make years since dynamic --------- Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- DIRECTORY.md | 1 + ...es.py.disabled => get_top_billionaires.py} | 27 ++++++++++++++----- 2 files changed, 21 insertions(+), 7 deletions(-) rename web_programming/{get_top_billionaires.py.disabled => get_top_billionaires.py} (72%) diff --git a/DIRECTORY.md b/DIRECTORY.md index e39a0674743a..1ff093d88766 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -1221,6 +1221,7 @@ * [Get Amazon Product Data](web_programming/get_amazon_product_data.py) * [Get Imdb Top 250 Movies Csv](web_programming/get_imdb_top_250_movies_csv.py) * [Get Imdbtop](web_programming/get_imdbtop.py) + * [Get Top Billionaires](web_programming/get_top_billionaires.py) * [Get Top Hn Posts](web_programming/get_top_hn_posts.py) * [Get User Tweets](web_programming/get_user_tweets.py) * [Giphy](web_programming/giphy.py) diff --git a/web_programming/get_top_billionaires.py.disabled b/web_programming/get_top_billionaires.py similarity index 72% rename from web_programming/get_top_billionaires.py.disabled rename to web_programming/get_top_billionaires.py index 6a8054e26270..6f986acb9181 100644 --- a/web_programming/get_top_billionaires.py.disabled +++ b/web_programming/get_top_billionaires.py @@ -3,7 +3,7 @@ This works for some of us but fails for others. """ -from datetime import datetime +from datetime import UTC, datetime, timedelta import requests from rich import box @@ -20,18 +20,31 @@ ) -def calculate_age(unix_date: int) -> str: +def calculate_age(unix_date: float) -> str: """Calculates age from given unix time format. Returns: Age as string - >>> calculate_age(-657244800000) - '73' - >>> calculate_age(46915200000) - '51' + >>> from datetime import datetime, UTC + >>> years_since_create = datetime.now(tz=UTC).year - 2022 + >>> int(calculate_age(-657244800000)) - years_since_create + 73 + >>> int(calculate_age(46915200000)) - years_since_create + 51 """ - birthdate = datetime.fromtimestamp(unix_date / 1000).date() + # Convert date from milliseconds to seconds + unix_date /= 1000 + + if unix_date < 0: + # Handle timestamp before epoch + epoch = datetime.fromtimestamp(0, tz=UTC) + seconds_since_epoch = (datetime.now(tz=UTC) - epoch).seconds + birthdate = ( + epoch - timedelta(seconds=abs(unix_date) - seconds_since_epoch) + ).date() + else: + birthdate = datetime.fromtimestamp(unix_date, tz=UTC).date() return str( TODAY.year - birthdate.year From 945803f65d79d0277c663a0e043228ed10996a92 Mon Sep 17 00:00:00 2001 From: Caeden Perelli-Harris Date: Fri, 18 Aug 2023 13:19:25 +0100 Subject: [PATCH 0936/1543] Unmark fetch anime and play as BROKEN and fix type errors (#8988) * updating DIRECTORY.md * type(fetch-anime-and-play): Fix type errors and re-enable * updating DIRECTORY.md --------- Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- DIRECTORY.md | 1 + ...play.py.BROKEN => fetch_anime_and_play.py} | 71 ++++++++++--------- 2 files changed, 38 insertions(+), 34 deletions(-) rename web_programming/{fetch_anime_and_play.py.BROKEN => fetch_anime_and_play.py} (70%) diff --git a/DIRECTORY.md b/DIRECTORY.md index 1ff093d88766..6af4ead56ebd 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -1213,6 +1213,7 @@ * [Daily Horoscope](web_programming/daily_horoscope.py) * [Download Images From Google Query](web_programming/download_images_from_google_query.py) * [Emails From Url](web_programming/emails_from_url.py) + * [Fetch Anime And Play](web_programming/fetch_anime_and_play.py) * [Fetch Bbc News](web_programming/fetch_bbc_news.py) * [Fetch Github Info](web_programming/fetch_github_info.py) * [Fetch Jobs](web_programming/fetch_jobs.py) diff --git a/web_programming/fetch_anime_and_play.py.BROKEN b/web_programming/fetch_anime_and_play.py similarity index 70% rename from web_programming/fetch_anime_and_play.py.BROKEN rename to web_programming/fetch_anime_and_play.py index 3bd4f704dd8d..366807785e85 100644 --- a/web_programming/fetch_anime_and_play.py.BROKEN +++ b/web_programming/fetch_anime_and_play.py @@ -1,7 +1,5 @@ -from xml.dom import NotFoundErr - import requests -from bs4 import BeautifulSoup, NavigableString +from bs4 import BeautifulSoup, NavigableString, Tag from fake_useragent import UserAgent BASE_URL = "https://ww1.gogoanime2.org" @@ -41,25 +39,23 @@ def search_scraper(anime_name: str) -> list: # get list of anime anime_ul = soup.find("ul", {"class": "items"}) + if anime_ul is None or isinstance(anime_ul, NavigableString): + msg = f"Could not find and anime with name {anime_name}" + raise ValueError(msg) anime_li = anime_ul.children # for each anime, insert to list. the name and url. anime_list = [] for anime in anime_li: - if not isinstance(anime, NavigableString): - try: - anime_url, anime_title = ( - anime.find("a")["href"], - anime.find("a")["title"], - ) - anime_list.append( - { - "title": anime_title, - "url": anime_url, - } - ) - except (NotFoundErr, KeyError): - pass + if isinstance(anime, Tag): + anime_url = anime.find("a") + if anime_url is None or isinstance(anime_url, NavigableString): + continue + anime_title = anime.find("a") + if anime_title is None or isinstance(anime_title, NavigableString): + continue + + anime_list.append({"title": anime_title["title"], "url": anime_url["href"]}) return anime_list @@ -93,22 +89,24 @@ def search_anime_episode_list(episode_endpoint: str) -> list: # With this id. get the episode list. episode_page_ul = soup.find("ul", {"id": "episode_related"}) + if episode_page_ul is None or isinstance(episode_page_ul, NavigableString): + msg = f"Could not find any anime eposiodes with name {anime_name}" + raise ValueError(msg) episode_page_li = episode_page_ul.children episode_list = [] for episode in episode_page_li: - try: - if not isinstance(episode, NavigableString): - episode_list.append( - { - "title": episode.find("div", {"class": "name"}).text.replace( - " ", "" - ), - "url": episode.find("a")["href"], - } - ) - except (KeyError, NotFoundErr): - pass + if isinstance(episode, Tag): + url = episode.find("a") + if url is None or isinstance(url, NavigableString): + continue + title = episode.find("div", {"class": "name"}) + if title is None or isinstance(title, NavigableString): + continue + + episode_list.append( + {"title": title.text.replace(" ", ""), "url": url["href"]} + ) return episode_list @@ -140,11 +138,16 @@ def get_anime_episode(episode_endpoint: str) -> list: soup = BeautifulSoup(response.text, "html.parser") - try: - episode_url = soup.find("iframe", {"id": "playerframe"})["src"] - download_url = episode_url.replace("/embed/", "/playlist/") + ".m3u8" - except (KeyError, NotFoundErr) as e: - raise e + url = soup.find("iframe", {"id": "playerframe"}) + if url is None or isinstance(url, NavigableString): + msg = f"Could not find url and download url from {episode_endpoint}" + raise RuntimeError(msg) + + episode_url = url["src"] + if not isinstance(episode_url, str): + msg = f"Could not find url and download url from {episode_endpoint}" + raise RuntimeError(msg) + download_url = episode_url.replace("/embed/", "/playlist/") + ".m3u8" return [f"{BASE_URL}{episode_url}", f"{BASE_URL}{download_url}"] From e887c14f1252cd7de3d99ef0553c448c8c9711df Mon Sep 17 00:00:00 2001 From: Tianyi Zheng Date: Fri, 18 Aug 2023 13:53:17 -0700 Subject: [PATCH 0937/1543] Fix continued_fraction.py to work for negative numbers (#8985) * Add doctests to continued_fraction.py for 0 and neg nums * Fix continued_fraction.py to work for negative nums Fix continued_fraction.py to work for negative nums by replacing int() call with floor() * Move comment in doctest --- maths/continued_fraction.py | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/maths/continued_fraction.py b/maths/continued_fraction.py index 25ff649db77a..04ff0b6ff0d2 100644 --- a/maths/continued_fraction.py +++ b/maths/continued_fraction.py @@ -6,6 +6,7 @@ from fractions import Fraction +from math import floor def continued_fraction(num: Fraction) -> list[int]: @@ -29,11 +30,17 @@ def continued_fraction(num: Fraction) -> list[int]: [0, 2, 4] >>> continued_fraction(Fraction("415/93")) [4, 2, 6, 7] + >>> continued_fraction(Fraction(0)) + [0] + >>> continued_fraction(Fraction(0.75)) + [0, 1, 3] + >>> continued_fraction(Fraction("-2.25")) # -2.25 = -3 + 0.75 + [-3, 1, 3] """ numerator, denominator = num.as_integer_ratio() continued_fraction_list: list[int] = [] while True: - integer_part = int(numerator / denominator) + integer_part = floor(numerator / denominator) continued_fraction_list.append(integer_part) numerator -= integer_part * denominator if numerator == 0: From 5ecb6baef8bf52f9bb99a1bb7cec4899b6df7ab4 Mon Sep 17 00:00:00 2001 From: Tianyi Zheng Date: Sun, 20 Aug 2023 05:36:00 -0700 Subject: [PATCH 0938/1543] Move and reimplement `convert_number_to_words.py` (#8998) * Move and reimplement convert_number_to_words.py - Move convert_number_to_words.py from web_programming/ to conversions/ - Reimplement the algorithm from scratch because the logic was very opaque and too heavily nested - Add support for the Western numbering system (both short and long) because the original implementation only supported the Indian numbering system - Add extensive doctests and error handling * updating DIRECTORY.md --------- Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- DIRECTORY.md | 2 +- conversions/convert_number_to_words.py | 205 +++++++++++++++++++++ web_programming/convert_number_to_words.py | 109 ----------- 3 files changed, 206 insertions(+), 110 deletions(-) create mode 100644 conversions/convert_number_to_words.py delete mode 100644 web_programming/convert_number_to_words.py diff --git a/DIRECTORY.md b/DIRECTORY.md index 6af4ead56ebd..653c1831d820 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -143,6 +143,7 @@ * [Binary To Decimal](conversions/binary_to_decimal.py) * [Binary To Hexadecimal](conversions/binary_to_hexadecimal.py) * [Binary To Octal](conversions/binary_to_octal.py) + * [Convert Number To Words](conversions/convert_number_to_words.py) * [Decimal To Any](conversions/decimal_to_any.py) * [Decimal To Binary](conversions/decimal_to_binary.py) * [Decimal To Binary Recursion](conversions/decimal_to_binary_recursion.py) @@ -1203,7 +1204,6 @@ ## Web Programming * [Co2 Emission](web_programming/co2_emission.py) - * [Convert Number To Words](web_programming/convert_number_to_words.py) * [Covid Stats Via Xpath](web_programming/covid_stats_via_xpath.py) * [Crawl Google Results](web_programming/crawl_google_results.py) * [Crawl Google Scholar Citation](web_programming/crawl_google_scholar_citation.py) diff --git a/conversions/convert_number_to_words.py b/conversions/convert_number_to_words.py new file mode 100644 index 000000000000..0e4405319f1f --- /dev/null +++ b/conversions/convert_number_to_words.py @@ -0,0 +1,205 @@ +from enum import Enum +from typing import ClassVar, Literal + + +class NumberingSystem(Enum): + SHORT = ( + (15, "quadrillion"), + (12, "trillion"), + (9, "billion"), + (6, "million"), + (3, "thousand"), + (2, "hundred"), + ) + + LONG = ( + (15, "billiard"), + (9, "milliard"), + (6, "million"), + (3, "thousand"), + (2, "hundred"), + ) + + INDIAN = ( + (14, "crore crore"), + (12, "lakh crore"), + (7, "crore"), + (5, "lakh"), + (3, "thousand"), + (2, "hundred"), + ) + + @classmethod + def max_value(cls, system: str) -> int: + """ + Gets the max value supported by the given number system. + + >>> NumberingSystem.max_value("short") == 10**18 - 1 + True + >>> NumberingSystem.max_value("long") == 10**21 - 1 + True + >>> NumberingSystem.max_value("indian") == 10**19 - 1 + True + """ + match (system_enum := cls[system.upper()]): + case cls.SHORT: + max_exp = system_enum.value[0][0] + 3 + case cls.LONG: + max_exp = system_enum.value[0][0] + 6 + case cls.INDIAN: + max_exp = 19 + case _: + raise ValueError("Invalid numbering system") + return 10**max_exp - 1 + + +class NumberWords(Enum): + ONES: ClassVar = { + 0: "", + 1: "one", + 2: "two", + 3: "three", + 4: "four", + 5: "five", + 6: "six", + 7: "seven", + 8: "eight", + 9: "nine", + } + + TEENS: ClassVar = { + 0: "ten", + 1: "eleven", + 2: "twelve", + 3: "thirteen", + 4: "fourteen", + 5: "fifteen", + 6: "sixteen", + 7: "seventeen", + 8: "eighteen", + 9: "nineteen", + } + + TENS: ClassVar = { + 2: "twenty", + 3: "thirty", + 4: "forty", + 5: "fifty", + 6: "sixty", + 7: "seventy", + 8: "eighty", + 9: "ninety", + } + + +def convert_small_number(num: int) -> str: + """ + Converts small, non-negative integers with irregular constructions in English (i.e., + numbers under 100) into words. + + >>> convert_small_number(0) + 'zero' + >>> convert_small_number(5) + 'five' + >>> convert_small_number(10) + 'ten' + >>> convert_small_number(15) + 'fifteen' + >>> convert_small_number(20) + 'twenty' + >>> convert_small_number(25) + 'twenty-five' + >>> convert_small_number(-1) + Traceback (most recent call last): + ... + ValueError: This function only accepts non-negative integers + >>> convert_small_number(123) + Traceback (most recent call last): + ... + ValueError: This function only converts numbers less than 100 + """ + if num < 0: + raise ValueError("This function only accepts non-negative integers") + if num >= 100: + raise ValueError("This function only converts numbers less than 100") + tens, ones = divmod(num, 10) + if tens == 0: + return NumberWords.ONES.value[ones] or "zero" + if tens == 1: + return NumberWords.TEENS.value[ones] + return ( + NumberWords.TENS.value[tens] + + ("-" if NumberWords.ONES.value[ones] else "") + + NumberWords.ONES.value[ones] + ) + + +def convert_number( + num: int, system: Literal["short", "long", "indian"] = "short" +) -> str: + """ + Converts an integer to English words. + + :param num: The integer to be converted + :param system: The numbering system (short, long, or Indian) + + >>> convert_number(0) + 'zero' + >>> convert_number(1) + 'one' + >>> convert_number(100) + 'one hundred' + >>> convert_number(-100) + 'negative one hundred' + >>> convert_number(123_456_789_012_345) # doctest: +NORMALIZE_WHITESPACE + 'one hundred twenty-three trillion four hundred fifty-six billion + seven hundred eighty-nine million twelve thousand three hundred forty-five' + >>> convert_number(123_456_789_012_345, "long") # doctest: +NORMALIZE_WHITESPACE + 'one hundred twenty-three thousand four hundred fifty-six milliard + seven hundred eighty-nine million twelve thousand three hundred forty-five' + >>> convert_number(12_34_56_78_90_12_345, "indian") # doctest: +NORMALIZE_WHITESPACE + 'one crore crore twenty-three lakh crore + forty-five thousand six hundred seventy-eight crore + ninety lakh twelve thousand three hundred forty-five' + >>> convert_number(10**18) + Traceback (most recent call last): + ... + ValueError: Input number is too large + >>> convert_number(10**21, "long") + Traceback (most recent call last): + ... + ValueError: Input number is too large + >>> convert_number(10**19, "indian") + Traceback (most recent call last): + ... + ValueError: Input number is too large + """ + word_groups = [] + + if num < 0: + word_groups.append("negative") + num *= -1 + + if num > NumberingSystem.max_value(system): + raise ValueError("Input number is too large") + + for power, unit in NumberingSystem[system.upper()].value: + digit_group, num = divmod(num, 10**power) + if digit_group > 0: + word_group = ( + convert_number(digit_group, system) + if digit_group >= 100 + else convert_small_number(digit_group) + ) + word_groups.append(f"{word_group} {unit}") + if num > 0 or not word_groups: # word_groups is only empty if input num was 0 + word_groups.append(convert_small_number(num)) + return " ".join(word_groups) + + +if __name__ == "__main__": + import doctest + + doctest.testmod() + + print(f"{convert_number(123456789) = }") diff --git a/web_programming/convert_number_to_words.py b/web_programming/convert_number_to_words.py deleted file mode 100644 index dac9e3e38e7c..000000000000 --- a/web_programming/convert_number_to_words.py +++ /dev/null @@ -1,109 +0,0 @@ -import math - - -def convert(number: int) -> str: - """ - Given a number return the number in words. - - >>> convert(123) - 'OneHundred,TwentyThree' - """ - if number == 0: - words = "Zero" - return words - else: - digits = math.log10(number) - digits = digits + 1 - singles = {} - singles[0] = "" - singles[1] = "One" - singles[2] = "Two" - singles[3] = "Three" - singles[4] = "Four" - singles[5] = "Five" - singles[6] = "Six" - singles[7] = "Seven" - singles[8] = "Eight" - singles[9] = "Nine" - - doubles = {} - doubles[0] = "" - doubles[2] = "Twenty" - doubles[3] = "Thirty" - doubles[4] = "Forty" - doubles[5] = "Fifty" - doubles[6] = "Sixty" - doubles[7] = "Seventy" - doubles[8] = "Eighty" - doubles[9] = "Ninety" - - teens = {} - teens[0] = "Ten" - teens[1] = "Eleven" - teens[2] = "Twelve" - teens[3] = "Thirteen" - teens[4] = "Fourteen" - teens[5] = "Fifteen" - teens[6] = "Sixteen" - teens[7] = "Seventeen" - teens[8] = "Eighteen" - teens[9] = "Nineteen" - - placevalue = {} - placevalue[2] = "Hundred," - placevalue[3] = "Thousand," - placevalue[5] = "Lakh," - placevalue[7] = "Crore," - - temp_num = number - words = "" - counter = 0 - digits = int(digits) - while counter < digits: - current = temp_num % 10 - if counter % 2 == 0: - addition = "" - if counter in placevalue and current != 0: - addition = placevalue[counter] - if counter == 2: - words = singles[current] + addition + words - elif counter == 0: - if ((temp_num % 100) // 10) == 1: - words = teens[current] + addition + words - temp_num = temp_num // 10 - counter += 1 - else: - words = singles[current] + addition + words - - else: - words = doubles[current] + addition + words - - else: - if counter == 1: - if current == 1: - words = teens[number % 10] + words - else: - addition = "" - if counter in placevalue: - addition = placevalue[counter] - words = doubles[current] + addition + words - else: - addition = "" - if counter in placevalue: - if current != 0 and ((temp_num % 100) // 10) != 0: - addition = placevalue[counter] - if ((temp_num % 100) // 10) == 1: - words = teens[current] + addition + words - temp_num = temp_num // 10 - counter += 1 - else: - words = singles[current] + addition + words - counter += 1 - temp_num = temp_num // 10 - return words - - -if __name__ == "__main__": - import doctest - - doctest.testmod() From 062957ef27fcaaf59753e3739052928ec37f220e Mon Sep 17 00:00:00 2001 From: Bama Charan Chhandogi Date: Sun, 20 Aug 2023 18:10:23 +0530 Subject: [PATCH 0939/1543] Octal to Binary Convert (#8949) * Octal to Binary Convert * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * mention return type * code scratch * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * mentioned return type * remove comment * added documention and some test cases * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * add another test case * fixes documention * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Documention and test cases added * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * documention problem solved * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * error in exit 1 * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Apply suggestions from code review --------- Co-authored-by: BamaCharanChhandogi Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Tianyi Zheng --- conversions/octal_to_binary.py | 54 ++++++++++++++++++++++++++++++++++ 1 file changed, 54 insertions(+) create mode 100644 conversions/octal_to_binary.py diff --git a/conversions/octal_to_binary.py b/conversions/octal_to_binary.py new file mode 100644 index 000000000000..84e1e85f33ca --- /dev/null +++ b/conversions/octal_to_binary.py @@ -0,0 +1,54 @@ +""" +* Author: Bama Charan Chhandogi (https://github.com/BamaCharanChhandogi) +* Description: Convert a Octal number to Binary. + +References for better understanding: +https://en.wikipedia.org/wiki/Binary_number +https://en.wikipedia.org/wiki/Octal +""" + + +def octal_to_binary(octal_number: str) -> str: + """ + Convert an Octal number to Binary. + + >>> octal_to_binary("17") + '001111' + >>> octal_to_binary("7") + '111' + >>> octal_to_binary("Av") + Traceback (most recent call last): + ... + ValueError: Non-octal value was passed to the function + >>> octal_to_binary("@#") + Traceback (most recent call last): + ... + ValueError: Non-octal value was passed to the function + >>> octal_to_binary("") + Traceback (most recent call last): + ... + ValueError: Empty string was passed to the function + """ + if not octal_number: + raise ValueError("Empty string was passed to the function") + + binary_number = "" + octal_digits = "01234567" + for digit in octal_number: + if digit not in octal_digits: + raise ValueError("Non-octal value was passed to the function") + + binary_digit = "" + value = int(digit) + for _ in range(3): + binary_digit = str(value % 2) + binary_digit + value //= 2 + binary_number += binary_digit + + return binary_number + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From 672e7bde2e5fad38a3bc4038d11a9c343e3667f7 Mon Sep 17 00:00:00 2001 From: Guduly <133545858+Guduly@users.noreply.github.com> Date: Sun, 20 Aug 2023 18:39:29 -0500 Subject: [PATCH 0940/1543] Update arc_length.py (#8964) * Update arc_length.py Wrote the output of testcase * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update arc_length.py Added the requested changes * Update arc_length.py followed the change request * Update arc_length.py followed suggestions --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- maths/arc_length.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/maths/arc_length.py b/maths/arc_length.py index 9e87ca38cc7d..4c518f321dc7 100644 --- a/maths/arc_length.py +++ b/maths/arc_length.py @@ -7,6 +7,8 @@ def arc_length(angle: int, radius: int) -> float: 3.9269908169872414 >>> arc_length(120, 15) 31.415926535897928 + >>> arc_length(90, 10) + 15.707963267948966 """ return 2 * pi * radius * (angle / 360) From 1984d9717158c89f9acca2b635a373bad7048633 Mon Sep 17 00:00:00 2001 From: Dom <97384583+tosemml@users.noreply.github.com> Date: Sun, 20 Aug 2023 16:43:09 -0700 Subject: [PATCH 0941/1543] Refactorings (#8987) * use np.dot * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * further improvements using array slicing Co-authored-by: Tianyi Zheng --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Tianyi Zheng --- arithmetic_analysis/gaussian_elimination.py | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/arithmetic_analysis/gaussian_elimination.py b/arithmetic_analysis/gaussian_elimination.py index f0f20af8e417..13f509a4f117 100644 --- a/arithmetic_analysis/gaussian_elimination.py +++ b/arithmetic_analysis/gaussian_elimination.py @@ -33,10 +33,7 @@ def retroactive_resolution( x: NDArray[float64] = np.zeros((rows, 1), dtype=float) for row in reversed(range(rows)): - total = 0 - for col in range(row + 1, columns): - total += coefficients[row, col] * x[col] - + total = np.dot(coefficients[row, row + 1 :], x[row + 1 :]) x[row, 0] = (vector[row] - total) / coefficients[row, row] return x From 1210559deb60b44cb9f57ce16c9bf6d79c0f443c Mon Sep 17 00:00:00 2001 From: Caeden Perelli-Harris Date: Mon, 21 Aug 2023 14:25:20 +0100 Subject: [PATCH 0942/1543] Consolidate decimal to binary iterative and recursive (#8999) * updating DIRECTORY.md * refactor(decimal-to-binary): Consolidate implementations * updating DIRECTORY.md * refactor(decimal-to-binary): Rename main and helper recursive --------- Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- DIRECTORY.md | 1 - conversions/decimal_to_binary.py | 67 +++++++++++++++++++--- conversions/decimal_to_binary_recursion.py | 53 ----------------- 3 files changed, 59 insertions(+), 62 deletions(-) delete mode 100644 conversions/decimal_to_binary_recursion.py diff --git a/DIRECTORY.md b/DIRECTORY.md index 653c1831d820..dd4404edd364 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -146,7 +146,6 @@ * [Convert Number To Words](conversions/convert_number_to_words.py) * [Decimal To Any](conversions/decimal_to_any.py) * [Decimal To Binary](conversions/decimal_to_binary.py) - * [Decimal To Binary Recursion](conversions/decimal_to_binary_recursion.py) * [Decimal To Hexadecimal](conversions/decimal_to_hexadecimal.py) * [Decimal To Octal](conversions/decimal_to_octal.py) * [Energy Conversions](conversions/energy_conversions.py) diff --git a/conversions/decimal_to_binary.py b/conversions/decimal_to_binary.py index 973c47c8af67..cf2b6040ec2a 100644 --- a/conversions/decimal_to_binary.py +++ b/conversions/decimal_to_binary.py @@ -1,27 +1,27 @@ """Convert a Decimal Number to a Binary Number.""" -def decimal_to_binary(num: int) -> str: +def decimal_to_binary_iterative(num: int) -> str: """ Convert an Integer Decimal Number to a Binary Number as str. - >>> decimal_to_binary(0) + >>> decimal_to_binary_iterative(0) '0b0' - >>> decimal_to_binary(2) + >>> decimal_to_binary_iterative(2) '0b10' - >>> decimal_to_binary(7) + >>> decimal_to_binary_iterative(7) '0b111' - >>> decimal_to_binary(35) + >>> decimal_to_binary_iterative(35) '0b100011' >>> # negatives work too - >>> decimal_to_binary(-2) + >>> decimal_to_binary_iterative(-2) '-0b10' >>> # other floats will error - >>> decimal_to_binary(16.16) # doctest: +ELLIPSIS + >>> decimal_to_binary_iterative(16.16) # doctest: +ELLIPSIS Traceback (most recent call last): ... TypeError: 'float' object cannot be interpreted as an integer >>> # strings will error as well - >>> decimal_to_binary('0xfffff') # doctest: +ELLIPSIS + >>> decimal_to_binary_iterative('0xfffff') # doctest: +ELLIPSIS Traceback (most recent call last): ... TypeError: 'str' object cannot be interpreted as an integer @@ -52,7 +52,58 @@ def decimal_to_binary(num: int) -> str: return "0b" + "".join(str(e) for e in binary) +def decimal_to_binary_recursive_helper(decimal: int) -> str: + """ + Take a positive integer value and return its binary equivalent. + >>> decimal_to_binary_recursive_helper(1000) + '1111101000' + >>> decimal_to_binary_recursive_helper("72") + '1001000' + >>> decimal_to_binary_recursive_helper("number") + Traceback (most recent call last): + ... + ValueError: invalid literal for int() with base 10: 'number' + """ + decimal = int(decimal) + if decimal in (0, 1): # Exit cases for the recursion + return str(decimal) + div, mod = divmod(decimal, 2) + return decimal_to_binary_recursive_helper(div) + str(mod) + + +def decimal_to_binary_recursive(number: str) -> str: + """ + Take an integer value and raise ValueError for wrong inputs, + call the function above and return the output with prefix "0b" & "-0b" + for positive and negative integers respectively. + >>> decimal_to_binary_recursive(0) + '0b0' + >>> decimal_to_binary_recursive(40) + '0b101000' + >>> decimal_to_binary_recursive(-40) + '-0b101000' + >>> decimal_to_binary_recursive(40.8) + Traceback (most recent call last): + ... + ValueError: Input value is not an integer + >>> decimal_to_binary_recursive("forty") + Traceback (most recent call last): + ... + ValueError: Input value is not an integer + """ + number = str(number).strip() + if not number: + raise ValueError("No input value was provided") + negative = "-" if number.startswith("-") else "" + number = number.lstrip("-") + if not number.isnumeric(): + raise ValueError("Input value is not an integer") + return f"{negative}0b{decimal_to_binary_recursive_helper(int(number))}" + + if __name__ == "__main__": import doctest doctest.testmod() + + print(decimal_to_binary_recursive(input("Input a decimal number: "))) diff --git a/conversions/decimal_to_binary_recursion.py b/conversions/decimal_to_binary_recursion.py deleted file mode 100644 index 05833ca670c3..000000000000 --- a/conversions/decimal_to_binary_recursion.py +++ /dev/null @@ -1,53 +0,0 @@ -def binary_recursive(decimal: int) -> str: - """ - Take a positive integer value and return its binary equivalent. - >>> binary_recursive(1000) - '1111101000' - >>> binary_recursive("72") - '1001000' - >>> binary_recursive("number") - Traceback (most recent call last): - ... - ValueError: invalid literal for int() with base 10: 'number' - """ - decimal = int(decimal) - if decimal in (0, 1): # Exit cases for the recursion - return str(decimal) - div, mod = divmod(decimal, 2) - return binary_recursive(div) + str(mod) - - -def main(number: str) -> str: - """ - Take an integer value and raise ValueError for wrong inputs, - call the function above and return the output with prefix "0b" & "-0b" - for positive and negative integers respectively. - >>> main(0) - '0b0' - >>> main(40) - '0b101000' - >>> main(-40) - '-0b101000' - >>> main(40.8) - Traceback (most recent call last): - ... - ValueError: Input value is not an integer - >>> main("forty") - Traceback (most recent call last): - ... - ValueError: Input value is not an integer - """ - number = str(number).strip() - if not number: - raise ValueError("No input value was provided") - negative = "-" if number.startswith("-") else "" - number = number.lstrip("-") - if not number.isnumeric(): - raise ValueError("Input value is not an integer") - return f"{negative}0b{binary_recursive(int(number))}" - - -if __name__ == "__main__": - from doctest import testmod - - testmod() From b3dc6ef035f097c9eb91911d8970668049e47d62 Mon Sep 17 00:00:00 2001 From: AmirSoroush Date: Tue, 22 Aug 2023 02:17:02 +0300 Subject: [PATCH 0943/1543] fixes #9002; improve insertion_sort algorithm (#9005) * fixes #9002; improve insertion_sort algorithm * add type hints to sorts/insertion_sort.py --- sorts/insertion_sort.py | 24 +++++++++++++++++------- 1 file changed, 17 insertions(+), 7 deletions(-) diff --git a/sorts/insertion_sort.py b/sorts/insertion_sort.py index 6d5bb2b46013..f11ddac349a0 100644 --- a/sorts/insertion_sort.py +++ b/sorts/insertion_sort.py @@ -13,8 +13,19 @@ python3 insertion_sort.py """ +from collections.abc import MutableSequence +from typing import Any, Protocol, TypeVar -def insertion_sort(collection: list) -> list: + +class Comparable(Protocol): + def __lt__(self, other: Any, /) -> bool: + ... + + +T = TypeVar("T", bound=Comparable) + + +def insertion_sort(collection: MutableSequence[T]) -> MutableSequence[T]: """A pure Python implementation of the insertion sort algorithm :param collection: some mutable ordered collection with heterogeneous @@ -40,13 +51,12 @@ def insertion_sort(collection: list) -> list: True """ - for insert_index, insert_value in enumerate(collection[1:]): - temp_index = insert_index - while insert_index >= 0 and insert_value < collection[insert_index]: - collection[insert_index + 1] = collection[insert_index] + for insert_index in range(1, len(collection)): + insert_value = collection[insert_index] + while insert_index > 0 and insert_value < collection[insert_index - 1]: + collection[insert_index] = collection[insert_index - 1] insert_index -= 1 - if insert_index != temp_index: - collection[insert_index + 1] = insert_value + collection[insert_index] = insert_value return collection From 04fd5c1b5e7880017d874f4305ca3396f868ee37 Mon Sep 17 00:00:00 2001 From: Caeden Perelli-Harris Date: Tue, 22 Aug 2023 00:20:51 +0100 Subject: [PATCH 0944/1543] Create langtons ant algorithm (#8967) * updating DIRECTORY.md * feat(cellular_automata): Langonts ant algorithm * updating DIRECTORY.md * Update cellular_automata/langtons_ant.py Co-authored-by: Tianyi Zheng * Apply suggestions from code review Co-authored-by: Tianyi Zheng * fix(langtons-ant): Set funcanimation interval to 1 --------- Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> Co-authored-by: Tianyi Zheng --- DIRECTORY.md | 1 + cellular_automata/langtons_ant.py | 106 ++++++++++++++++++++++++++++++ 2 files changed, 107 insertions(+) create mode 100644 cellular_automata/langtons_ant.py diff --git a/DIRECTORY.md b/DIRECTORY.md index dd4404edd364..866a3084f67b 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -72,6 +72,7 @@ ## Cellular Automata * [Conways Game Of Life](cellular_automata/conways_game_of_life.py) * [Game Of Life](cellular_automata/game_of_life.py) + * [Langtons Ant](cellular_automata/langtons_ant.py) * [Nagel Schrekenberg](cellular_automata/nagel_schrekenberg.py) * [One Dimensional](cellular_automata/one_dimensional.py) * [Wa Tor](cellular_automata/wa_tor.py) diff --git a/cellular_automata/langtons_ant.py b/cellular_automata/langtons_ant.py new file mode 100644 index 000000000000..983c626546ad --- /dev/null +++ b/cellular_automata/langtons_ant.py @@ -0,0 +1,106 @@ +""" +Langton's ant + +@ https://en.wikipedia.org/wiki/Langton%27s_ant +@ https://upload.wikimedia.org/wikipedia/commons/0/09/LangtonsAntAnimated.gif +""" + +from functools import partial + +from matplotlib import pyplot as plt +from matplotlib.animation import FuncAnimation + +WIDTH = 80 +HEIGHT = 80 + + +class LangtonsAnt: + """ + Represents the main LangonsAnt algorithm. + + >>> la = LangtonsAnt(2, 2) + >>> la.board + [[True, True], [True, True]] + >>> la.ant_position + (1, 1) + """ + + def __init__(self, width: int, height: int) -> None: + # Each square is either True or False where True is white and False is black + self.board = [[True] * width for _ in range(height)] + self.ant_position: tuple[int, int] = (width // 2, height // 2) + + # Initially pointing left (similar to the the wikipedia image) + # (0 = 0° | 1 = 90° | 2 = 180 ° | 3 = 270°) + self.ant_direction: int = 3 + + def move_ant(self, axes: plt.Axes | None, display: bool, _frame: int) -> None: + """ + Performs three tasks: + 1. The ant turns either clockwise or anti-clockwise according to the colour + of the square that it is currently on. If the square is white, the ant + turns clockwise, and if the square is black the ant turns anti-clockwise + 2. The ant moves one square in the direction that it is currently facing + 3. The square the ant was previously on is inverted (White -> Black and + Black -> White) + + If display is True, the board will also be displayed on the axes + + >>> la = LangtonsAnt(2, 2) + >>> la.move_ant(None, True, 0) + >>> la.board + [[True, True], [True, False]] + >>> la.move_ant(None, True, 0) + >>> la.board + [[True, False], [True, False]] + """ + directions = { + 0: (-1, 0), # 0° + 1: (0, 1), # 90° + 2: (1, 0), # 180° + 3: (0, -1), # 270° + } + x, y = self.ant_position + + # Turn clockwise or anti-clockwise according to colour of square + if self.board[x][y] is True: + # The square is white so turn 90° clockwise + self.ant_direction = (self.ant_direction + 1) % 4 + else: + # The square is black so turn 90° anti-clockwise + self.ant_direction = (self.ant_direction - 1) % 4 + + # Move ant + move_x, move_y = directions[self.ant_direction] + self.ant_position = (x + move_x, y + move_y) + + # Flip colour of square + self.board[x][y] = not self.board[x][y] + + if display and axes: + # Display the board on the axes + axes.get_xaxis().set_ticks([]) + axes.get_yaxis().set_ticks([]) + axes.imshow(self.board, cmap="gray", interpolation="nearest") + + def display(self, frames: int = 100_000) -> None: + """ + Displays the board without delay in a matplotlib plot + to visually understand and track the ant. + + >>> _ = LangtonsAnt(WIDTH, HEIGHT) + """ + fig, ax = plt.subplots() + # Assign animation to a variable to prevent it from getting garbage collected + self.animation = FuncAnimation( + fig, partial(self.move_ant, ax, True), frames=frames, interval=1 + ) + plt.show() + + +if __name__ == "__main__": + import doctest + + doctest.testmod() + + LangtonsAnt(WIDTH, HEIGHT).display() From c7aeaa3fd8a114ecf9b1e800dfb8cc3cc7a3cbaa Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Tue, 22 Aug 2023 07:42:14 +0200 Subject: [PATCH 0945/1543] [pre-commit.ci] pre-commit autoupdate (#9006) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * [pre-commit.ci] pre-commit autoupdate updates: - [github.com/astral-sh/ruff-pre-commit: v0.0.284 → v0.0.285](https://github.com/astral-sh/ruff-pre-commit/compare/v0.0.284...v0.0.285) - [github.com/abravalheri/validate-pyproject: v0.13 → v0.14](https://github.com/abravalheri/validate-pyproject/compare/v0.13...v0.14) - [github.com/pre-commit/mirrors-mypy: v1.5.0 → v1.5.1](https://github.com/pre-commit/mirrors-mypy/compare/v1.5.0...v1.5.1) * updating DIRECTORY.md --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- .pre-commit-config.yaml | 6 +++--- DIRECTORY.md | 1 + 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index b08139561639..ad3e0cd87f2e 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -16,7 +16,7 @@ repos: - id: auto-walrus - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.0.284 + rev: v0.0.285 hooks: - id: ruff @@ -46,12 +46,12 @@ repos: pass_filenames: false - repo: https://github.com/abravalheri/validate-pyproject - rev: v0.13 + rev: v0.14 hooks: - id: validate-pyproject - repo: https://github.com/pre-commit/mirrors-mypy - rev: v1.5.0 + rev: v1.5.1 hooks: - id: mypy args: diff --git a/DIRECTORY.md b/DIRECTORY.md index 866a3084f67b..ebb164d0496c 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -155,6 +155,7 @@ * [Hexadecimal To Decimal](conversions/hexadecimal_to_decimal.py) * [Length Conversion](conversions/length_conversion.py) * [Molecular Chemistry](conversions/molecular_chemistry.py) + * [Octal To Binary](conversions/octal_to_binary.py) * [Octal To Decimal](conversions/octal_to_decimal.py) * [Prefix Conversions](conversions/prefix_conversions.py) * [Prefix Conversions String](conversions/prefix_conversions_string.py) From fceacf977f0e4567d00f297686527ac9b4e5561f Mon Sep 17 00:00:00 2001 From: Caeden Perelli-Harris Date: Tue, 22 Aug 2023 10:33:47 +0100 Subject: [PATCH 0946/1543] Fix type errors in permutations (#9007) * updating DIRECTORY.md * types(permuations): Rename permute2 * Apply suggestions from code review Co-authored-by: Tianyi Zheng * fix(permutations): Call permute_recursive * fix(permutations): Correct permutations order --------- Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> Co-authored-by: Tianyi Zheng --- data_structures/arrays/permutations.py | 28 ++++++++++++-------------- 1 file changed, 13 insertions(+), 15 deletions(-) diff --git a/data_structures/arrays/permutations.py b/data_structures/arrays/permutations.py index 4558bd8d468a..0f029187b92b 100644 --- a/data_structures/arrays/permutations.py +++ b/data_structures/arrays/permutations.py @@ -1,17 +1,16 @@ -def permute(nums: list[int]) -> list[list[int]]: +def permute_recursive(nums: list[int]) -> list[list[int]]: """ Return all permutations. - >>> from itertools import permutations - >>> numbers= [1,2,3] - >>> all(list(nums) in permute(numbers) for nums in permutations(numbers)) - True + + >>> permute_recursive([1, 2, 3]) + [[3, 2, 1], [2, 3, 1], [1, 3, 2], [3, 1, 2], [2, 1, 3], [1, 2, 3]] """ - result = [] - if len(nums) == 1: - return [nums.copy()] + result: list[list[int]] = [] + if len(nums) == 0: + return [[]] for _ in range(len(nums)): n = nums.pop(0) - permutations = permute(nums) + permutations = permute_recursive(nums) for perm in permutations: perm.append(n) result.extend(permutations) @@ -19,15 +18,15 @@ def permute(nums: list[int]) -> list[list[int]]: return result -def permute2(nums): +def permute_backtrack(nums: list[int]) -> list[list[int]]: """ Return all permutations of the given list. - >>> permute2([1, 2, 3]) + >>> permute_backtrack([1, 2, 3]) [[1, 2, 3], [1, 3, 2], [2, 1, 3], [2, 3, 1], [3, 2, 1], [3, 1, 2]] """ - def backtrack(start): + def backtrack(start: int) -> None: if start == len(nums) - 1: output.append(nums[:]) else: @@ -36,7 +35,7 @@ def backtrack(start): backtrack(start + 1) nums[start], nums[i] = nums[i], nums[start] # backtrack - output = [] + output: list[list[int]] = [] backtrack(0) return output @@ -44,7 +43,6 @@ def backtrack(start): if __name__ == "__main__": import doctest - # use res to print the data in permute2 function - res = permute2([1, 2, 3]) + res = permute_backtrack([1, 2, 3]) print(res) doctest.testmod() From 0a9438071ee08121f069c77a5cb662206a4d348f Mon Sep 17 00:00:00 2001 From: Arijit De Date: Wed, 23 Aug 2023 18:06:59 +0530 Subject: [PATCH 0947/1543] Updated postfix_evaluation.py to support Unary operators (#8787) * Updated postfix_evaluation.py to support Unary operators and floating point numbers Fixes #8754 and #8724 Also merged evaluate_postfix_notations.py and postfix_evaluation.py into postfix_evaluation.py Signed-off-by: Arijit De * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Updated postfix_evaluation.py to support Unary operators and floating point numbers. Fixes #8754 and formatted code to pass ruff and black test. Also merged evaluate_postfix_notations.py and postfix_evaluation.py into postfix_evaluation.py which fixes #8724 and made sure it passes doctest Signed-off-by: Arijit De * Fixed return type hinting required by pre commit for evaluate function Signed-off-by: Arijit De * Changed line 186 to return only top of stack instead of calling the get_number function as it was converting float values to int, resulting in data loss. Fixes #8754 and #8724 Signed-off-by: Arijit De * Made the requested changes Also changed the code to make the evaluate function first convert all the numbers and then process the valid expression. * Fixes #8754, #8724 Updated postfix_evaluation.py postfix_evaluation.py now supports Unary operators and floating point numbers. Also merged evaluate_postfix_notations.py and postfix_evaluation.py into postfix_evaluation.py which fixes #8724. Added a doctest example with unary operator. * Fixes #8754, #8724 Updated postfix_evaluation.py postfix_evaluation.py now supports Unary operators and floating point numbers. Also merged evaluate_postfix_notations.py and postfix_evaluation.py into postfix_evaluation.py which fixes #8724. Added a doctest example with unary operator. * Fixes #8754, #8724 Updated the parse_token function of postfix_evaluation.py ostfix_evaluation.py now supports Unary operators and floating point numbers. Also merged evaluate_postfix_notations.py and postfix_evaluation.py into postfix_evaluation.py which fixes #8724. Added a doctest example with unary operator and invalid expression. * Fixes #8754, #8724 Updated postfix_evaluation.py postfix_evaluation.py now supports Unary operators and floating point numbers. Also merged evaluate_postfix_notations.py and postfix_evaluation.py into postfix_evaluation.py which fixes #8724. Added a doctest example with unary operator and invalid expression. * Update postfix_evaluation.py * Update postfix_evaluation.py * Update postfix_evaluation.py * Update postfix_evaluation.py * Update postfix_evaluation.py --------- Signed-off-by: Arijit De Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- .../stacks/evaluate_postfix_notations.py | 52 ----- data_structures/stacks/postfix_evaluation.py | 200 +++++++++++++++--- 2 files changed, 166 insertions(+), 86 deletions(-) delete mode 100644 data_structures/stacks/evaluate_postfix_notations.py diff --git a/data_structures/stacks/evaluate_postfix_notations.py b/data_structures/stacks/evaluate_postfix_notations.py deleted file mode 100644 index 51ea353b17de..000000000000 --- a/data_structures/stacks/evaluate_postfix_notations.py +++ /dev/null @@ -1,52 +0,0 @@ -""" -The Reverse Polish Nation also known as Polish postfix notation -or simply postfix notation. -https://en.wikipedia.org/wiki/Reverse_Polish_notation -Classic examples of simple stack implementations -Valid operators are +, -, *, /. -Each operand may be an integer or another expression. -""" -from __future__ import annotations - -from typing import Any - - -def evaluate_postfix(postfix_notation: list) -> int: - """ - >>> evaluate_postfix(["2", "1", "+", "3", "*"]) - 9 - >>> evaluate_postfix(["4", "13", "5", "/", "+"]) - 6 - >>> evaluate_postfix([]) - 0 - """ - if not postfix_notation: - return 0 - - operations = {"+", "-", "*", "/"} - stack: list[Any] = [] - - for token in postfix_notation: - if token in operations: - b, a = stack.pop(), stack.pop() - if token == "+": - stack.append(a + b) - elif token == "-": - stack.append(a - b) - elif token == "*": - stack.append(a * b) - else: - if a * b < 0 and a % b != 0: - stack.append(a // b + 1) - else: - stack.append(a // b) - else: - stack.append(int(token)) - - return stack.pop() - - -if __name__ == "__main__": - import doctest - - doctest.testmod() diff --git a/data_structures/stacks/postfix_evaluation.py b/data_structures/stacks/postfix_evaluation.py index 28128f82ec19..03a87b9e0fa3 100644 --- a/data_structures/stacks/postfix_evaluation.py +++ b/data_structures/stacks/postfix_evaluation.py @@ -1,4 +1,11 @@ """ +Reverse Polish Nation is also known as Polish postfix notation or simply postfix +notation. +https://en.wikipedia.org/wiki/Reverse_Polish_notation +Classic examples of simple stack implementations. +Valid operators are +, -, *, /. +Each operand may be an integer or another expression. + Output: Enter a Postfix Equation (space separated) = 5 6 9 * + @@ -17,52 +24,177 @@ Result = 59 """ -import operator as op +# Defining valid unary operator symbols +UNARY_OP_SYMBOLS = ("-", "+") + +# operators & their respective operation +OPERATORS = { + "^": lambda p, q: p**q, + "*": lambda p, q: p * q, + "/": lambda p, q: p / q, + "+": lambda p, q: p + q, + "-": lambda p, q: p - q, +} + + +def parse_token(token: str | float) -> float | str: + """ + Converts the given data to the appropriate number if it is indeed a number, else + returns the data as it is with a False flag. This function also serves as a check + of whether the input is a number or not. + + Parameters + ---------- + token: The data that needs to be converted to the appropriate operator or number. + + Returns + ------- + float or str + Returns a float if `token` is a number or a str if `token` is an operator + """ + if token in OPERATORS: + return token + try: + return float(token) + except ValueError: + msg = f"{token} is neither a number nor a valid operator" + raise ValueError(msg) + + +def evaluate(post_fix: list[str], verbose: bool = False) -> float: + """ + Evaluate postfix expression using a stack. + >>> evaluate(["0"]) + 0.0 + >>> evaluate(["-0"]) + -0.0 + >>> evaluate(["1"]) + 1.0 + >>> evaluate(["-1"]) + -1.0 + >>> evaluate(["-1.1"]) + -1.1 + >>> evaluate(["2", "1", "+", "3", "*"]) + 9.0 + >>> evaluate(["2", "1.9", "+", "3", "*"]) + 11.7 + >>> evaluate(["2", "-1.9", "+", "3", "*"]) + 0.30000000000000027 + >>> evaluate(["4", "13", "5", "/", "+"]) + 6.6 + >>> evaluate(["2", "-", "3", "+"]) + 1.0 + >>> evaluate(["-4", "5", "*", "6", "-"]) + -26.0 + >>> evaluate([]) + 0 + >>> evaluate(["4", "-", "6", "7", "/", "9", "8"]) + Traceback (most recent call last): + ... + ArithmeticError: Input is not a valid postfix expression + + Parameters + ---------- + post_fix: + The postfix expression is tokenized into operators and operands and stored + as a Python list + verbose: + Display stack contents while evaluating the expression if verbose is True -def solve(post_fix): + Returns + ------- + float + The evaluated value + """ + if not post_fix: + return 0 + # Checking the list to find out whether the postfix expression is valid + valid_expression = [parse_token(token) for token in post_fix] + if verbose: + # print table header + print("Symbol".center(8), "Action".center(12), "Stack", sep=" | ") + print("-" * (30 + len(post_fix))) stack = [] - div = lambda x, y: int(x / y) # noqa: E731 integer division operation - opr = { - "^": op.pow, - "*": op.mul, - "/": div, - "+": op.add, - "-": op.sub, - } # operators & their respective operation - - # print table header - print("Symbol".center(8), "Action".center(12), "Stack", sep=" | ") - print("-" * (30 + len(post_fix))) - - for x in post_fix: - if x.isdigit(): # if x in digit + for x in valid_expression: + if x not in OPERATORS: stack.append(x) # append x to stack - # output in tabular format - print(x.rjust(8), ("push(" + x + ")").ljust(12), ",".join(stack), sep=" | ") - else: + if verbose: + # output in tabular format + print( + f"{x}".rjust(8), + f"push({x})".ljust(12), + stack, + sep=" | ", + ) + continue + # If x is operator + # If only 1 value is inside the stack and + or - is encountered + # then this is unary + or - case + if x in UNARY_OP_SYMBOLS and len(stack) < 2: b = stack.pop() # pop stack + if x == "-": + b *= -1 # negate b + stack.append(b) + if verbose: + # output in tabular format + print( + "".rjust(8), + f"pop({b})".ljust(12), + stack, + sep=" | ", + ) + print( + str(x).rjust(8), + f"push({x}{b})".ljust(12), + stack, + sep=" | ", + ) + continue + b = stack.pop() # pop stack + if verbose: # output in tabular format - print("".rjust(8), ("pop(" + b + ")").ljust(12), ",".join(stack), sep=" | ") + print( + "".rjust(8), + f"pop({b})".ljust(12), + stack, + sep=" | ", + ) - a = stack.pop() # pop stack + a = stack.pop() # pop stack + if verbose: # output in tabular format - print("".rjust(8), ("pop(" + a + ")").ljust(12), ",".join(stack), sep=" | ") - - stack.append( - str(opr[x](int(a), int(b))) - ) # evaluate the 2 values popped from stack & push result to stack + print( + "".rjust(8), + f"pop({a})".ljust(12), + stack, + sep=" | ", + ) + # evaluate the 2 values popped from stack & push result to stack + stack.append(OPERATORS[x](a, b)) # type: ignore[index] + if verbose: # output in tabular format print( - x.rjust(8), - ("push(" + a + x + b + ")").ljust(12), - ",".join(stack), + f"{x}".rjust(8), + f"push({a}{x}{b})".ljust(12), + stack, sep=" | ", ) - - return int(stack[0]) + # If everything is executed correctly, the stack will contain + # only one element which is the result + if len(stack) != 1: + raise ArithmeticError("Input is not a valid postfix expression") + return float(stack[0]) if __name__ == "__main__": - Postfix = input("\n\nEnter a Postfix Equation (space separated) = ").split(" ") - print("\n\tResult = ", solve(Postfix)) + # Create a loop so that the user can evaluate postfix expressions multiple times + while True: + expression = input("Enter a Postfix Expression (space separated): ").split(" ") + prompt = "Do you want to see stack contents while evaluating? [y/N]: " + verbose = input(prompt).strip().lower() == "y" + output = evaluate(expression, verbose) + print("Result = ", output) + prompt = "Do you want to enter another expression? [y/N]: " + if input(prompt).strip().lower() != "y": + break From 421ace81edb0d9af3a173f4ca7e66cc900078c1d Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Tue, 29 Aug 2023 15:18:10 +0200 Subject: [PATCH 0948/1543] [pre-commit.ci] pre-commit autoupdate (#9013) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * [pre-commit.ci] pre-commit autoupdate updates: - [github.com/astral-sh/ruff-pre-commit: v0.0.285 → v0.0.286](https://github.com/astral-sh/ruff-pre-commit/compare/v0.0.285...v0.0.286) - [github.com/tox-dev/pyproject-fmt: 0.13.1 → 1.1.0](https://github.com/tox-dev/pyproject-fmt/compare/0.13.1...1.1.0) * updating DIRECTORY.md * Fis ruff rules PIE808,PLR1714 --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> Co-authored-by: Christian Clauss --- .pre-commit-config.yaml | 4 ++-- DIRECTORY.md | 1 - arithmetic_analysis/jacobi_iteration_method.py | 4 ++-- arithmetic_analysis/secant_method.py | 2 +- backtracking/hamiltonian_cycle.py | 2 +- backtracking/sudoku.py | 2 +- bit_manipulation/reverse_bits.py | 2 +- ciphers/trafid_cipher.py | 2 +- data_structures/binary_tree/lazy_segment_tree.py | 6 +++--- data_structures/linked_list/circular_linked_list.py | 2 +- data_structures/linked_list/doubly_linked_list.py | 6 +++--- data_structures/linked_list/is_palindrome.py | 2 +- data_structures/linked_list/singly_linked_list.py | 8 ++++---- data_structures/stacks/stock_span_problem.py | 2 +- digital_image_processing/filters/bilateral_filter.py | 4 ++-- digital_image_processing/filters/convolve.py | 4 ++-- .../filters/local_binary_pattern.py | 4 ++-- .../test_digital_image_processing.py | 4 ++-- divide_and_conquer/strassen_matrix_multiplication.py | 4 ++-- dynamic_programming/floyd_warshall.py | 10 +++++----- hashes/chaos_machine.py | 2 +- hashes/hamming_code.py | 4 ++-- hashes/sha1.py | 2 +- hashes/sha256.py | 2 +- machine_learning/gradient_descent.py | 2 +- machine_learning/linear_regression.py | 4 ++-- machine_learning/lstm/lstm_prediction.py | 4 ++-- maths/entropy.py | 2 +- maths/eulers_totient.py | 2 +- maths/greedy_coin_change.py | 2 +- maths/persistence.py | 4 ++-- maths/series/harmonic.py | 2 +- matrix/spiral_print.py | 2 +- other/magicdiamondpattern.py | 6 +++--- project_euler/problem_070/sol1.py | 2 +- project_euler/problem_112/sol1.py | 2 +- quantum/q_full_adder.py | 2 +- scheduling/highest_response_ratio_next.py | 6 +++--- sorts/counting_sort.py | 2 +- sorts/cycle_sort.py | 2 +- sorts/double_sort.py | 4 ++-- sorts/odd_even_transposition_parallel.py | 4 ++-- strings/rabin_karp.py | 2 +- 43 files changed, 70 insertions(+), 71 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index ad3e0cd87f2e..5c4e8579e116 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -16,7 +16,7 @@ repos: - id: auto-walrus - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.0.285 + rev: v0.0.286 hooks: - id: ruff @@ -33,7 +33,7 @@ repos: - tomli - repo: https://github.com/tox-dev/pyproject-fmt - rev: "0.13.1" + rev: "1.1.0" hooks: - id: pyproject-fmt diff --git a/DIRECTORY.md b/DIRECTORY.md index ebb164d0496c..43da91cb818e 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -245,7 +245,6 @@ * Stacks * [Balanced Parentheses](data_structures/stacks/balanced_parentheses.py) * [Dijkstras Two Stack Algorithm](data_structures/stacks/dijkstras_two_stack_algorithm.py) - * [Evaluate Postfix Notations](data_structures/stacks/evaluate_postfix_notations.py) * [Infix To Postfix Conversion](data_structures/stacks/infix_to_postfix_conversion.py) * [Infix To Prefix Conversion](data_structures/stacks/infix_to_prefix_conversion.py) * [Next Greater Element](data_structures/stacks/next_greater_element.py) diff --git a/arithmetic_analysis/jacobi_iteration_method.py b/arithmetic_analysis/jacobi_iteration_method.py index 17edf4bf4b8b..dba8a9ff44d3 100644 --- a/arithmetic_analysis/jacobi_iteration_method.py +++ b/arithmetic_analysis/jacobi_iteration_method.py @@ -152,9 +152,9 @@ def strictly_diagonally_dominant(table: NDArray[float64]) -> bool: is_diagonally_dominant = True - for i in range(0, rows): + for i in range(rows): total = 0 - for j in range(0, cols - 1): + for j in range(cols - 1): if i == j: continue else: diff --git a/arithmetic_analysis/secant_method.py b/arithmetic_analysis/secant_method.py index d28a46206d40..d39cb0ff30ef 100644 --- a/arithmetic_analysis/secant_method.py +++ b/arithmetic_analysis/secant_method.py @@ -20,7 +20,7 @@ def secant_method(lower_bound: float, upper_bound: float, repeats: int) -> float """ x0 = lower_bound x1 = upper_bound - for _ in range(0, repeats): + for _ in range(repeats): x0, x1 = x1, x1 - (f(x1) * (x1 - x0)) / (f(x1) - f(x0)) return x1 diff --git a/backtracking/hamiltonian_cycle.py b/backtracking/hamiltonian_cycle.py index 4a4156d70b32..e9916f83f861 100644 --- a/backtracking/hamiltonian_cycle.py +++ b/backtracking/hamiltonian_cycle.py @@ -95,7 +95,7 @@ def util_hamilton_cycle(graph: list[list[int]], path: list[int], curr_ind: int) return graph[path[curr_ind - 1]][path[0]] == 1 # Recursive Step - for next_ver in range(0, len(graph)): + for next_ver in range(len(graph)): if valid_connection(graph, next_ver, curr_ind, path): # Insert current vertex into path as next transition path[curr_ind] = next_ver diff --git a/backtracking/sudoku.py b/backtracking/sudoku.py index 698dedcc2125..6e4e3e8780f2 100644 --- a/backtracking/sudoku.py +++ b/backtracking/sudoku.py @@ -48,7 +48,7 @@ def is_safe(grid: Matrix, row: int, column: int, n: int) -> bool: is found) else returns True if it is 'safe' """ for i in range(9): - if grid[row][i] == n or grid[i][column] == n: + if n in {grid[row][i], grid[i][column]}: return False for i in range(3): diff --git a/bit_manipulation/reverse_bits.py b/bit_manipulation/reverse_bits.py index a8c77c11bfdd..74b4f2563234 100644 --- a/bit_manipulation/reverse_bits.py +++ b/bit_manipulation/reverse_bits.py @@ -20,7 +20,7 @@ def get_reverse_bit_string(number: int) -> str: ) raise TypeError(msg) bit_string = "" - for _ in range(0, 32): + for _ in range(32): bit_string += str(number % 2) number = number >> 1 return bit_string diff --git a/ciphers/trafid_cipher.py b/ciphers/trafid_cipher.py index 108ac652f0e4..8aa2263ca5ac 100644 --- a/ciphers/trafid_cipher.py +++ b/ciphers/trafid_cipher.py @@ -119,7 +119,7 @@ def decrypt_message( for i in range(0, len(message) + 1, period): a, b, c = __decrypt_part(message[i : i + period], character_to_number) - for j in range(0, len(a)): + for j in range(len(a)): decrypted_numeric.append(a[j] + b[j] + c[j]) for each in decrypted_numeric: diff --git a/data_structures/binary_tree/lazy_segment_tree.py b/data_structures/binary_tree/lazy_segment_tree.py index 050dfe0a6f2f..c26b0619380c 100644 --- a/data_structures/binary_tree/lazy_segment_tree.py +++ b/data_structures/binary_tree/lazy_segment_tree.py @@ -7,10 +7,10 @@ class SegmentTree: def __init__(self, size: int) -> None: self.size = size # approximate the overall size of segment tree with given value - self.segment_tree = [0 for i in range(0, 4 * size)] + self.segment_tree = [0 for i in range(4 * size)] # create array to store lazy update - self.lazy = [0 for i in range(0, 4 * size)] - self.flag = [0 for i in range(0, 4 * size)] # flag for lazy update + self.lazy = [0 for i in range(4 * size)] + self.flag = [0 for i in range(4 * size)] # flag for lazy update def left(self, idx: int) -> int: """ diff --git a/data_structures/linked_list/circular_linked_list.py b/data_structures/linked_list/circular_linked_list.py index 325d91026137..d9544f4263a6 100644 --- a/data_structures/linked_list/circular_linked_list.py +++ b/data_structures/linked_list/circular_linked_list.py @@ -125,7 +125,7 @@ def test_circular_linked_list() -> None: circular_linked_list.insert_tail(6) assert str(circular_linked_list) == "->".join(str(i) for i in range(1, 7)) circular_linked_list.insert_head(0) - assert str(circular_linked_list) == "->".join(str(i) for i in range(0, 7)) + assert str(circular_linked_list) == "->".join(str(i) for i in range(7)) assert circular_linked_list.delete_front() == 0 assert circular_linked_list.delete_tail() == 6 diff --git a/data_structures/linked_list/doubly_linked_list.py b/data_structures/linked_list/doubly_linked_list.py index 1a6c48191c4e..bd3445f9f6c5 100644 --- a/data_structures/linked_list/doubly_linked_list.py +++ b/data_structures/linked_list/doubly_linked_list.py @@ -98,7 +98,7 @@ def insert_at_nth(self, index: int, data): self.tail = new_node else: temp = self.head - for _ in range(0, index): + for _ in range(index): temp = temp.next temp.previous.next = new_node new_node.previous = temp.previous @@ -149,7 +149,7 @@ def delete_at_nth(self, index: int): self.tail.next = None else: temp = self.head - for _ in range(0, index): + for _ in range(index): temp = temp.next delete_node = temp temp.next.previous = temp.previous @@ -215,7 +215,7 @@ def test_doubly_linked_list() -> None: linked_list.insert_at_head(0) linked_list.insert_at_tail(11) - assert str(linked_list) == "->".join(str(i) for i in range(0, 12)) + assert str(linked_list) == "->".join(str(i) for i in range(12)) assert linked_list.delete_head() == 0 assert linked_list.delete_at_nth(9) == 10 diff --git a/data_structures/linked_list/is_palindrome.py b/data_structures/linked_list/is_palindrome.py index ec19e99f78c0..d540fb69f36b 100644 --- a/data_structures/linked_list/is_palindrome.py +++ b/data_structures/linked_list/is_palindrome.py @@ -68,7 +68,7 @@ def is_palindrome_dict(head): middle += 1 else: step = 0 - for i in range(0, len(v)): + for i in range(len(v)): if v[i] + v[len(v) - 1 - step] != checksum: return False step += 1 diff --git a/data_structures/linked_list/singly_linked_list.py b/data_structures/linked_list/singly_linked_list.py index 890e21c9b404..f4b2ddce12d7 100644 --- a/data_structures/linked_list/singly_linked_list.py +++ b/data_structures/linked_list/singly_linked_list.py @@ -370,7 +370,7 @@ def test_singly_linked_list() -> None: linked_list.insert_head(0) linked_list.insert_tail(11) - assert str(linked_list) == "->".join(str(i) for i in range(0, 12)) + assert str(linked_list) == "->".join(str(i) for i in range(12)) assert linked_list.delete_head() == 0 assert linked_list.delete_nth(9) == 10 @@ -378,11 +378,11 @@ def test_singly_linked_list() -> None: assert len(linked_list) == 9 assert str(linked_list) == "->".join(str(i) for i in range(1, 10)) - assert all(linked_list[i] == i + 1 for i in range(0, 9)) is True + assert all(linked_list[i] == i + 1 for i in range(9)) is True - for i in range(0, 9): + for i in range(9): linked_list[i] = -i - assert all(linked_list[i] == -i for i in range(0, 9)) is True + assert all(linked_list[i] == -i for i in range(9)) is True linked_list.reverse() assert str(linked_list) == "->".join(str(i) for i in range(-8, 1)) diff --git a/data_structures/stacks/stock_span_problem.py b/data_structures/stacks/stock_span_problem.py index de423c1ebf66..5efe58d25798 100644 --- a/data_structures/stacks/stock_span_problem.py +++ b/data_structures/stacks/stock_span_problem.py @@ -36,7 +36,7 @@ def calculation_span(price, s): # A utility function to print elements of array def print_array(arr, n): - for i in range(0, n): + for i in range(n): print(arr[i], end=" ") diff --git a/digital_image_processing/filters/bilateral_filter.py b/digital_image_processing/filters/bilateral_filter.py index 565da73f6b0e..199ac4d9939a 100644 --- a/digital_image_processing/filters/bilateral_filter.py +++ b/digital_image_processing/filters/bilateral_filter.py @@ -31,8 +31,8 @@ def get_slice(img: np.ndarray, x: int, y: int, kernel_size: int) -> np.ndarray: def get_gauss_kernel(kernel_size: int, spatial_variance: float) -> np.ndarray: # Creates a gaussian kernel of given dimension. arr = np.zeros((kernel_size, kernel_size)) - for i in range(0, kernel_size): - for j in range(0, kernel_size): + for i in range(kernel_size): + for j in range(kernel_size): arr[i, j] = math.sqrt( abs(i - kernel_size // 2) ** 2 + abs(j - kernel_size // 2) ** 2 ) diff --git a/digital_image_processing/filters/convolve.py b/digital_image_processing/filters/convolve.py index 299682010da6..004402f29ba9 100644 --- a/digital_image_processing/filters/convolve.py +++ b/digital_image_processing/filters/convolve.py @@ -11,8 +11,8 @@ def im2col(image, block_size): dst_width = rows - block_size[0] + 1 image_array = zeros((dst_height * dst_width, block_size[1] * block_size[0])) row = 0 - for i in range(0, dst_height): - for j in range(0, dst_width): + for i in range(dst_height): + for j in range(dst_width): window = ravel(image[i : i + block_size[0], j : j + block_size[1]]) image_array[row, :] = window row += 1 diff --git a/digital_image_processing/filters/local_binary_pattern.py b/digital_image_processing/filters/local_binary_pattern.py index 907fe2cb0555..861369ba6a32 100644 --- a/digital_image_processing/filters/local_binary_pattern.py +++ b/digital_image_processing/filters/local_binary_pattern.py @@ -71,8 +71,8 @@ def local_binary_value(image: np.ndarray, x_coordinate: int, y_coordinate: int) # Iterating through the image and calculating the # local binary pattern value for each pixel. - for i in range(0, image.shape[0]): - for j in range(0, image.shape[1]): + for i in range(image.shape[0]): + for j in range(image.shape[1]): lbp_image[i][j] = local_binary_value(image, i, j) cv2.imshow("local binary pattern", lbp_image) diff --git a/digital_image_processing/test_digital_image_processing.py b/digital_image_processing/test_digital_image_processing.py index fee7ab247b55..528b4bc3b74c 100644 --- a/digital_image_processing/test_digital_image_processing.py +++ b/digital_image_processing/test_digital_image_processing.py @@ -118,8 +118,8 @@ def test_local_binary_pattern(): # Iterating through the image and calculating the local binary pattern value # for each pixel. - for i in range(0, image.shape[0]): - for j in range(0, image.shape[1]): + for i in range(image.shape[0]): + for j in range(image.shape[1]): lbp_image[i][j] = lbp.local_binary_value(image, i, j) assert lbp_image.any() diff --git a/divide_and_conquer/strassen_matrix_multiplication.py b/divide_and_conquer/strassen_matrix_multiplication.py index cbfc7e5655db..1d03950ef9fe 100644 --- a/divide_and_conquer/strassen_matrix_multiplication.py +++ b/divide_and_conquer/strassen_matrix_multiplication.py @@ -131,7 +131,7 @@ def strassen(matrix1: list, matrix2: list) -> list: # Adding zeros to the matrices so that the arrays dimensions are the same and also # power of 2 - for i in range(0, maxim): + for i in range(maxim): if i < dimension1[0]: for _ in range(dimension1[1], maxim): new_matrix1[i].append(0) @@ -146,7 +146,7 @@ def strassen(matrix1: list, matrix2: list) -> list: final_matrix = actual_strassen(new_matrix1, new_matrix2) # Removing the additional zeros - for i in range(0, maxim): + for i in range(maxim): if i < dimension1[0]: for _ in range(dimension2[1], maxim): final_matrix[i].pop() diff --git a/dynamic_programming/floyd_warshall.py b/dynamic_programming/floyd_warshall.py index 614a3c72a992..2331f3e65483 100644 --- a/dynamic_programming/floyd_warshall.py +++ b/dynamic_programming/floyd_warshall.py @@ -5,19 +5,19 @@ class Graph: def __init__(self, n=0): # a graph with Node 0,1,...,N-1 self.n = n self.w = [ - [math.inf for j in range(0, n)] for i in range(0, n) + [math.inf for j in range(n)] for i in range(n) ] # adjacency matrix for weight self.dp = [ - [math.inf for j in range(0, n)] for i in range(0, n) + [math.inf for j in range(n)] for i in range(n) ] # dp[i][j] stores minimum distance from i to j def add_edge(self, u, v, w): self.dp[u][v] = w def floyd_warshall(self): - for k in range(0, self.n): - for i in range(0, self.n): - for j in range(0, self.n): + for k in range(self.n): + for i in range(self.n): + for j in range(self.n): self.dp[i][j] = min(self.dp[i][j], self.dp[i][k] + self.dp[k][j]) def show_min(self, u, v): diff --git a/hashes/chaos_machine.py b/hashes/chaos_machine.py index 238fdb1c0634..d2fde2f5e371 100644 --- a/hashes/chaos_machine.py +++ b/hashes/chaos_machine.py @@ -53,7 +53,7 @@ def xorshift(x, y): key = machine_time % m # Evolution (Time Length) - for _ in range(0, t): + for _ in range(t): # Variables (Position + Parameters) r = params_space[key] value = buffer_space[key] diff --git a/hashes/hamming_code.py b/hashes/hamming_code.py index dc93032183e0..8498ca920b36 100644 --- a/hashes/hamming_code.py +++ b/hashes/hamming_code.py @@ -135,7 +135,7 @@ def emitter_converter(size_par, data): # Mount the message cont_bp = 0 # parity bit counter - for x in range(0, size_par + len(data)): + for x in range(size_par + len(data)): if data_ord[x] is None: data_out.append(str(parity[cont_bp])) cont_bp += 1 @@ -228,7 +228,7 @@ def receptor_converter(size_par, data): # Mount the message cont_bp = 0 # Parity bit counter - for x in range(0, size_par + len(data_output)): + for x in range(size_par + len(data_output)): if data_ord[x] is None: data_out.append(str(parity[cont_bp])) cont_bp += 1 diff --git a/hashes/sha1.py b/hashes/sha1.py index b325ce3e43bb..8a03673f3c9f 100644 --- a/hashes/sha1.py +++ b/hashes/sha1.py @@ -97,7 +97,7 @@ def final_hash(self): for block in self.blocks: expanded_block = self.expand_block(block) a, b, c, d, e = self.h - for i in range(0, 80): + for i in range(80): if 0 <= i < 20: f = (b & c) | ((~b) & d) k = 0x5A827999 diff --git a/hashes/sha256.py b/hashes/sha256.py index 98f7c096e3b6..ba9aff8dbf41 100644 --- a/hashes/sha256.py +++ b/hashes/sha256.py @@ -138,7 +138,7 @@ def final_hash(self) -> None: a, b, c, d, e, f, g, h = self.hashes - for index in range(0, 64): + for index in range(64): if index > 15: # modify the zero-ed indexes at the end of the array s0 = ( diff --git a/machine_learning/gradient_descent.py b/machine_learning/gradient_descent.py index 5b74dad082e7..9ffc02bbc284 100644 --- a/machine_learning/gradient_descent.py +++ b/machine_learning/gradient_descent.py @@ -110,7 +110,7 @@ def run_gradient_descent(): while True: j += 1 temp_parameter_vector = [0, 0, 0, 0] - for i in range(0, len(parameter_vector)): + for i in range(len(parameter_vector)): cost_derivative = get_cost_derivative(i - 1) temp_parameter_vector[i] = ( parameter_vector[i] - LEARNING_RATE * cost_derivative diff --git a/machine_learning/linear_regression.py b/machine_learning/linear_regression.py index 75943ac9f2ad..0847112ad538 100644 --- a/machine_learning/linear_regression.py +++ b/machine_learning/linear_regression.py @@ -78,7 +78,7 @@ def run_linear_regression(data_x, data_y): theta = np.zeros((1, no_features)) - for i in range(0, iterations): + for i in range(iterations): theta = run_steep_gradient_descent(data_x, data_y, len_data, alpha, theta) error = sum_of_square_error(data_x, data_y, len_data, theta) print(f"At Iteration {i + 1} - Error is {error:.5f}") @@ -107,7 +107,7 @@ def main(): theta = run_linear_regression(data_x, data_y) len_result = theta.shape[1] print("Resultant Feature vector : ") - for i in range(0, len_result): + for i in range(len_result): print(f"{theta[0, i]:.5f}") diff --git a/machine_learning/lstm/lstm_prediction.py b/machine_learning/lstm/lstm_prediction.py index 74197c46a0ad..16530e935ea7 100644 --- a/machine_learning/lstm/lstm_prediction.py +++ b/machine_learning/lstm/lstm_prediction.py @@ -32,10 +32,10 @@ train_x, train_y = [], [] test_x, test_y = [], [] - for i in range(0, len(train_data) - forward_days - look_back + 1): + for i in range(len(train_data) - forward_days - look_back + 1): train_x.append(train_data[i : i + look_back]) train_y.append(train_data[i + look_back : i + look_back + forward_days]) - for i in range(0, len(test_data) - forward_days - look_back + 1): + for i in range(len(test_data) - forward_days - look_back + 1): test_x.append(test_data[i : i + look_back]) test_y.append(test_data[i + look_back : i + look_back + forward_days]) x_train = np.array(train_x) diff --git a/maths/entropy.py b/maths/entropy.py index 498c28f31bc4..23753d884484 100644 --- a/maths/entropy.py +++ b/maths/entropy.py @@ -101,7 +101,7 @@ def analyze_text(text: str) -> tuple[dict, dict]: # first case when we have space at start. two_char_strings[" " + text[0]] += 1 - for i in range(0, len(text) - 1): + for i in range(len(text) - 1): single_char_strings[text[i]] += 1 two_char_strings[text[i : i + 2]] += 1 return single_char_strings, two_char_strings diff --git a/maths/eulers_totient.py b/maths/eulers_totient.py index a156647037b4..00f0254c215a 100644 --- a/maths/eulers_totient.py +++ b/maths/eulers_totient.py @@ -21,7 +21,7 @@ def totient(n: int) -> list: for i in range(2, n + 1): if is_prime[i]: primes.append(i) - for j in range(0, len(primes)): + for j in range(len(primes)): if i * primes[j] >= n: break is_prime[i * primes[j]] = False diff --git a/maths/greedy_coin_change.py b/maths/greedy_coin_change.py index 7cf669bcb8cb..db2c381bc84a 100644 --- a/maths/greedy_coin_change.py +++ b/maths/greedy_coin_change.py @@ -81,7 +81,7 @@ def find_minimum_change(denominations: list[int], value: str) -> list[int]: ): n = int(input("Enter the number of denominations you want to add: ").strip()) - for i in range(0, n): + for i in range(n): denominations.append(int(input(f"Denomination {i}: ").strip())) value = input("Enter the change you want to make in Indian Currency: ").strip() else: diff --git a/maths/persistence.py b/maths/persistence.py index 607641e67200..c61a69a7c27d 100644 --- a/maths/persistence.py +++ b/maths/persistence.py @@ -28,7 +28,7 @@ def multiplicative_persistence(num: int) -> int: numbers = [int(i) for i in num_string] total = 1 - for i in range(0, len(numbers)): + for i in range(len(numbers)): total *= numbers[i] num_string = str(total) @@ -67,7 +67,7 @@ def additive_persistence(num: int) -> int: numbers = [int(i) for i in num_string] total = 0 - for i in range(0, len(numbers)): + for i in range(len(numbers)): total += numbers[i] num_string = str(total) diff --git a/maths/series/harmonic.py b/maths/series/harmonic.py index 50f29c93dd5f..35792d38af9b 100644 --- a/maths/series/harmonic.py +++ b/maths/series/harmonic.py @@ -45,7 +45,7 @@ def is_harmonic_series(series: list) -> bool: return True rec_series = [] series_len = len(series) - for i in range(0, series_len): + for i in range(series_len): if series[i] == 0: raise ValueError("Input series cannot have 0 as an element") rec_series.append(1 / series[i]) diff --git a/matrix/spiral_print.py b/matrix/spiral_print.py index 0d0be1527aec..5eef263f7aef 100644 --- a/matrix/spiral_print.py +++ b/matrix/spiral_print.py @@ -54,7 +54,7 @@ def spiral_print_clockwise(a: list[list[int]]) -> None: return # horizotal printing increasing - for i in range(0, mat_col): + for i in range(mat_col): print(a[0][i]) # vertical printing down for i in range(1, mat_row): diff --git a/other/magicdiamondpattern.py b/other/magicdiamondpattern.py index 0fc41d7a25d8..89b973bb41e8 100644 --- a/other/magicdiamondpattern.py +++ b/other/magicdiamondpattern.py @@ -7,10 +7,10 @@ def floyd(n): Parameters: n : size of pattern """ - for i in range(0, n): - for _ in range(0, n - i - 1): # printing spaces + for i in range(n): + for _ in range(n - i - 1): # printing spaces print(" ", end="") - for _ in range(0, i + 1): # printing stars + for _ in range(i + 1): # printing stars print("* ", end="") print() diff --git a/project_euler/problem_070/sol1.py b/project_euler/problem_070/sol1.py index 273f37efc5fc..57a6c1916374 100644 --- a/project_euler/problem_070/sol1.py +++ b/project_euler/problem_070/sol1.py @@ -44,7 +44,7 @@ def get_totients(max_one: int) -> list[int]: """ totients = [0] * max_one - for i in range(0, max_one): + for i in range(max_one): totients[i] = i for i in range(2, max_one): diff --git a/project_euler/problem_112/sol1.py b/project_euler/problem_112/sol1.py index b3ea6b35654a..31996d070771 100644 --- a/project_euler/problem_112/sol1.py +++ b/project_euler/problem_112/sol1.py @@ -49,7 +49,7 @@ def check_bouncy(n: int) -> bool: raise ValueError("check_bouncy() accepts only integer arguments") str_n = str(n) sorted_str_n = "".join(sorted(str_n)) - return sorted_str_n != str_n and sorted_str_n[::-1] != str_n + return str_n not in {sorted_str_n, sorted_str_n[::-1]} def solution(percent: float = 99) -> int: diff --git a/quantum/q_full_adder.py b/quantum/q_full_adder.py index 66d93198519e..ec4efa4346a5 100644 --- a/quantum/q_full_adder.py +++ b/quantum/q_full_adder.py @@ -88,7 +88,7 @@ def quantum_full_adder( quantum_circuit = qiskit.QuantumCircuit(qr, cr) - for i in range(0, 3): + for i in range(3): if entry[i] == 2: quantum_circuit.h(i) # for hadamard entries elif entry[i] == 1: diff --git a/scheduling/highest_response_ratio_next.py b/scheduling/highest_response_ratio_next.py index 9c999ec65053..057bd64cc729 100644 --- a/scheduling/highest_response_ratio_next.py +++ b/scheduling/highest_response_ratio_next.py @@ -53,7 +53,7 @@ def calculate_turn_around_time( loc = 0 # Saves the current response ratio. temp = 0 - for i in range(0, no_of_process): + for i in range(no_of_process): if finished_process[i] == 0 and arrival_time[i] <= current_time: temp = (burst_time[i] + (current_time - arrival_time[i])) / burst_time[ i @@ -87,7 +87,7 @@ def calculate_waiting_time( """ waiting_time = [0] * no_of_process - for i in range(0, no_of_process): + for i in range(no_of_process): waiting_time[i] = turn_around_time[i] - burst_time[i] return waiting_time @@ -106,7 +106,7 @@ def calculate_waiting_time( ) print("Process name \tArrival time \tBurst time \tTurn around time \tWaiting time") - for i in range(0, no_of_process): + for i in range(no_of_process): print( f"{process_name[i]}\t\t{arrival_time[i]}\t\t{burst_time[i]}\t\t" f"{turn_around_time[i]}\t\t\t{waiting_time[i]}" diff --git a/sorts/counting_sort.py b/sorts/counting_sort.py index 18c4b0323dcb..256952df52d2 100644 --- a/sorts/counting_sort.py +++ b/sorts/counting_sort.py @@ -49,7 +49,7 @@ def counting_sort(collection): # place the elements in the output, respecting the original order (stable # sort) from end to begin, updating counting_arr - for i in reversed(range(0, coll_len)): + for i in reversed(range(coll_len)): ordered[counting_arr[collection[i] - coll_min] - 1] = collection[i] counting_arr[collection[i] - coll_min] -= 1 diff --git a/sorts/cycle_sort.py b/sorts/cycle_sort.py index 806f40441d79..7177c8ea110d 100644 --- a/sorts/cycle_sort.py +++ b/sorts/cycle_sort.py @@ -19,7 +19,7 @@ def cycle_sort(array: list) -> list: [] """ array_len = len(array) - for cycle_start in range(0, array_len - 1): + for cycle_start in range(array_len - 1): item = array[cycle_start] pos = cycle_start diff --git a/sorts/double_sort.py b/sorts/double_sort.py index 5ca88a6745d5..a19641d94752 100644 --- a/sorts/double_sort.py +++ b/sorts/double_sort.py @@ -16,9 +16,9 @@ def double_sort(lst): """ no_of_elements = len(lst) for _ in range( - 0, int(((no_of_elements - 1) / 2) + 1) + int(((no_of_elements - 1) / 2) + 1) ): # we don't need to traverse to end of list as - for j in range(0, no_of_elements - 1): + for j in range(no_of_elements - 1): if ( lst[j + 1] < lst[j] ): # applying bubble sort algorithm from left to right (or forwards) diff --git a/sorts/odd_even_transposition_parallel.py b/sorts/odd_even_transposition_parallel.py index 87b0e4d1e20f..9e0d228bdc5b 100644 --- a/sorts/odd_even_transposition_parallel.py +++ b/sorts/odd_even_transposition_parallel.py @@ -33,7 +33,7 @@ def oe_process(position, value, l_send, r_send, lr_cv, rr_cv, result_pipe): # we perform n swaps since after n swaps we know we are sorted # we *could* stop early if we are sorted already, but it takes as long to # find out we are sorted as it does to sort the list with this algorithm - for i in range(0, 10): + for i in range(10): if (i + position) % 2 == 0 and r_send is not None: # send your value to your right neighbor process_lock.acquire() @@ -123,7 +123,7 @@ def odd_even_transposition(arr): p.start() # wait for the processes to end and write their values to the list - for p in range(0, len(result_pipe)): + for p in range(len(result_pipe)): arr[p] = result_pipe[p][0].recv() process_array_[p].join() return arr diff --git a/strings/rabin_karp.py b/strings/rabin_karp.py index 81ca611a76b3..532c689f8a97 100644 --- a/strings/rabin_karp.py +++ b/strings/rabin_karp.py @@ -38,7 +38,7 @@ def rabin_karp(pattern: str, text: str) -> bool: continue modulus_power = (modulus_power * alphabet_size) % modulus - for i in range(0, t_len - p_len + 1): + for i in range(t_len - p_len + 1): if text_hash == p_hash and text[i : i + p_len] == pattern: return True if i == t_len - p_len: From 5a4ea233cd30723628fb184bc05f969ad463b0af Mon Sep 17 00:00:00 2001 From: Kotmin <70173732+Kotmin@users.noreply.github.com> Date: Mon, 4 Sep 2023 19:38:26 +0200 Subject: [PATCH 0949/1543] Style sigmoid function in harmony with pep guideness (#6677) * Style sigmoid function in harmony with pep guideness * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Apply suggestions from code review --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Tianyi Zheng --- neural_network/back_propagation_neural_network.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/neural_network/back_propagation_neural_network.py b/neural_network/back_propagation_neural_network.py index 9dd112115f5e..bdd096b3f653 100644 --- a/neural_network/back_propagation_neural_network.py +++ b/neural_network/back_propagation_neural_network.py @@ -21,8 +21,8 @@ from matplotlib import pyplot as plt -def sigmoid(x): - return 1 / (1 + np.exp(-1 * x)) +def sigmoid(x: np.ndarray) -> np.ndarray: + return 1 / (1 + np.exp(-x)) class DenseLayer: From ac73be217863cc78af97bb86a9156ac38c4ae1e5 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Tue, 5 Sep 2023 08:27:05 +0530 Subject: [PATCH 0950/1543] [pre-commit.ci] pre-commit autoupdate (#9042) --- .pre-commit-config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 5c4e8579e116..c046789463cc 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -16,7 +16,7 @@ repos: - id: auto-walrus - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.0.286 + rev: v0.0.287 hooks: - id: ruff From 79b043d35ca266cf5053f5b62b2fe0f7bc6344d9 Mon Sep 17 00:00:00 2001 From: Rafael Zimmer Date: Tue, 5 Sep 2023 01:04:36 -0300 Subject: [PATCH 0951/1543] Texture analysis using Haralick Descriptors for Computer Vision tasks (#8004) * Create haralick_descriptors * Working on creating Unit Testing for Haralick Descriptors module * Type hinting for Haralick descriptors * Fixed docstrings, unit testing and formatting choices * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fixed line size formatting * Added final doctests * Changed main callable * Updated requirements.txt * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update computer_vision/haralick_descriptors.py No! What if the Kernel is empty? Example: >>> kernel = np.zeros((1)) >>> kernel or np.ones((3, 3)) array([[1., 1., 1.], [1., 1., 1.], [1., 1., 1.]]) Co-authored-by: Christian Clauss * Undone wrong commit * Update haralick_descriptors.py * Apply suggestions from code review * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fix ruff errors in haralick_descriptors.py * Add type hint to haralick_descriptors.py to fix ruff error * Update haralick_descriptors.py * Update haralick_descriptors.py * Update haralick_descriptors.py * Update haralick_descriptors.py * Try to fix mypy errors in haralick_descriptors.py * Update haralick_descriptors.py * Fix type hint in haralick_descriptors.py --------- Co-authored-by: Rafael Zimmer Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss Co-authored-by: Tianyi Zheng --- computer_vision/haralick_descriptors.py | 431 ++++++++++++++++++++++++ requirements.txt | 1 + 2 files changed, 432 insertions(+) create mode 100644 computer_vision/haralick_descriptors.py diff --git a/computer_vision/haralick_descriptors.py b/computer_vision/haralick_descriptors.py new file mode 100644 index 000000000000..1a86d84ea14b --- /dev/null +++ b/computer_vision/haralick_descriptors.py @@ -0,0 +1,431 @@ +""" +https://en.wikipedia.org/wiki/Image_texture +https://en.wikipedia.org/wiki/Co-occurrence_matrix#Application_to_image_analysis +""" +import imageio.v2 as imageio +import numpy as np + + +def root_mean_square_error(original: np.ndarray, reference: np.ndarray) -> float: + """Simple implementation of Root Mean Squared Error + for two N dimensional numpy arrays. + + Examples: + >>> root_mean_square_error(np.array([1, 2, 3]), np.array([1, 2, 3])) + 0.0 + >>> root_mean_square_error(np.array([1, 2, 3]), np.array([2, 2, 2])) + 0.816496580927726 + >>> root_mean_square_error(np.array([1, 2, 3]), np.array([6, 4, 2])) + 3.1622776601683795 + """ + return np.sqrt(((original - reference) ** 2).mean()) + + +def normalize_image( + image: np.ndarray, cap: float = 255.0, data_type: np.dtype = np.uint8 +) -> np.ndarray: + """ + Normalizes image in Numpy 2D array format, between ranges 0-cap, + as to fit uint8 type. + + Args: + image: 2D numpy array representing image as matrix, with values in any range + cap: Maximum cap amount for normalization + data_type: numpy data type to set output variable to + Returns: + return 2D numpy array of type uint8, corresponding to limited range matrix + + Examples: + >>> normalize_image(np.array([[1, 2, 3], [4, 5, 10]]), + ... cap=1.0, data_type=np.float64) + array([[0. , 0.11111111, 0.22222222], + [0.33333333, 0.44444444, 1. ]]) + >>> normalize_image(np.array([[4, 4, 3], [1, 7, 2]])) + array([[127, 127, 85], + [ 0, 255, 42]], dtype=uint8) + """ + normalized = (image - np.min(image)) / (np.max(image) - np.min(image)) * cap + return normalized.astype(data_type) + + +def normalize_array(array: np.ndarray, cap: float = 1) -> np.ndarray: + """Normalizes a 1D array, between ranges 0-cap. + + Args: + array: List containing values to be normalized between cap range. + cap: Maximum cap amount for normalization. + Returns: + return 1D numpy array, corresponding to limited range array + + Examples: + >>> normalize_array(np.array([2, 3, 5, 7])) + array([0. , 0.2, 0.6, 1. ]) + >>> normalize_array(np.array([[5], [7], [11], [13]])) + array([[0. ], + [0.25], + [0.75], + [1. ]]) + """ + diff = np.max(array) - np.min(array) + return (array - np.min(array)) / (1 if diff == 0 else diff) * cap + + +def grayscale(image: np.ndarray) -> np.ndarray: + """ + Uses luminance weights to transform RGB channel to greyscale, by + taking the dot product between the channel and the weights. + + Example: + >>> grayscale(np.array([[[108, 201, 72], [255, 11, 127]], + ... [[56, 56, 56], [128, 255, 107]]])) + array([[158, 97], + [ 56, 200]], dtype=uint8) + """ + return np.dot(image[:, :, 0:3], [0.299, 0.587, 0.114]).astype(np.uint8) + + +def binarize(image: np.ndarray, threshold: float = 127.0) -> np.ndarray: + """ + Binarizes a grayscale image based on a given threshold value, + setting values to 1 or 0 accordingly. + + Examples: + >>> binarize(np.array([[128, 255], [101, 156]])) + array([[1, 1], + [0, 1]]) + >>> binarize(np.array([[0.07, 1], [0.51, 0.3]]), threshold=0.5) + array([[0, 1], + [1, 0]]) + """ + return np.where(image > threshold, 1, 0) + + +def transform(image: np.ndarray, kind: str, kernel: np.ndarray = None) -> np.ndarray: + """ + Simple image transformation using one of two available filter functions: + Erosion and Dilation. + + Args: + image: binarized input image, onto which to apply transformation + kind: Can be either 'erosion', in which case the :func:np.max + function is called, or 'dilation', when :func:np.min is used instead. + kernel: n x n kernel with shape < :attr:image.shape, + to be used when applying convolution to original image + + Returns: + returns a numpy array with same shape as input image, + corresponding to applied binary transformation. + + Examples: + >>> img = np.array([[1, 0.5], [0.2, 0.7]]) + >>> img = binarize(img, threshold=0.5) + >>> transform(img, 'erosion') + array([[1, 1], + [1, 1]], dtype=uint8) + >>> transform(img, 'dilation') + array([[0, 0], + [0, 0]], dtype=uint8) + """ + if kernel is None: + kernel = np.ones((3, 3)) + + if kind == "erosion": + constant = 1 + apply = np.max + else: + constant = 0 + apply = np.min + + center_x, center_y = (x // 2 for x in kernel.shape) + + # Use padded image when applying convolotion + # to not go out of bounds of the original the image + transformed = np.zeros(image.shape, dtype=np.uint8) + padded = np.pad(image, 1, "constant", constant_values=constant) + + for x in range(center_x, padded.shape[0] - center_x): + for y in range(center_y, padded.shape[1] - center_y): + center = padded[ + x - center_x : x + center_x + 1, y - center_y : y + center_y + 1 + ] + # Apply transformation method to the centered section of the image + transformed[x - center_x, y - center_y] = apply(center[kernel == 1]) + + return transformed + + +def opening_filter(image: np.ndarray, kernel: np.ndarray = None) -> np.ndarray: + """ + Opening filter, defined as the sequence of + erosion and then a dilation filter on the same image. + + Examples: + >>> img = np.array([[1, 0.5], [0.2, 0.7]]) + >>> img = binarize(img, threshold=0.5) + >>> opening_filter(img) + array([[1, 1], + [1, 1]], dtype=uint8) + """ + if kernel is None: + np.ones((3, 3)) + + return transform(transform(image, "dilation", kernel), "erosion", kernel) + + +def closing_filter(image: np.ndarray, kernel: np.ndarray = None) -> np.ndarray: + """ + Opening filter, defined as the sequence of + dilation and then erosion filter on the same image. + + Examples: + >>> img = np.array([[1, 0.5], [0.2, 0.7]]) + >>> img = binarize(img, threshold=0.5) + >>> closing_filter(img) + array([[0, 0], + [0, 0]], dtype=uint8) + """ + if kernel is None: + kernel = np.ones((3, 3)) + return transform(transform(image, "erosion", kernel), "dilation", kernel) + + +def binary_mask( + image_gray: np.ndarray, image_map: np.ndarray +) -> tuple[np.ndarray, np.ndarray]: + """ + Apply binary mask, or thresholding based + on bit mask value (mapping mask is binary). + + Returns the mapped true value mask and its complementary false value mask. + + Example: + >>> img = np.array([[[108, 201, 72], [255, 11, 127]], + ... [[56, 56, 56], [128, 255, 107]]]) + >>> gray = grayscale(img) + >>> binary = binarize(gray) + >>> morphological = opening_filter(binary) + >>> binary_mask(gray, morphological) + (array([[1, 1], + [1, 1]], dtype=uint8), array([[158, 97], + [ 56, 200]], dtype=uint8)) + """ + true_mask, false_mask = image_gray.copy(), image_gray.copy() + true_mask[image_map == 1] = 1 + false_mask[image_map == 0] = 0 + + return true_mask, false_mask + + +def matrix_concurrency(image: np.ndarray, coordinate: tuple[int, int]) -> np.ndarray: + """ + Calculate sample co-occurrence matrix based on input image + as well as selected coordinates on image. + + Implementation is made using basic iteration, + as function to be performed (np.max) is non-linear and therefore + not callable on the frequency domain. + + Example: + >>> img = np.array([[[108, 201, 72], [255, 11, 127]], + ... [[56, 56, 56], [128, 255, 107]]]) + >>> gray = grayscale(img) + >>> binary = binarize(gray) + >>> morphological = opening_filter(binary) + >>> mask_1 = binary_mask(gray, morphological)[0] + >>> matrix_concurrency(mask_1, (0, 1)) + array([[0., 0.], + [0., 0.]]) + """ + matrix = np.zeros([np.max(image) + 1, np.max(image) + 1]) + + offset_x, offset_y = coordinate + + for x in range(1, image.shape[0] - 1): + for y in range(1, image.shape[1] - 1): + base_pixel = image[x, y] + offset_pixel = image[x + offset_x, y + offset_y] + + matrix[base_pixel, offset_pixel] += 1 + matrix_sum = np.sum(matrix) + return matrix / (1 if matrix_sum == 0 else matrix_sum) + + +def haralick_descriptors(matrix: np.ndarray) -> list[float]: + """Calculates all 8 Haralick descriptors based on co-occurence input matrix. + All descriptors are as follows: + Maximum probability, Inverse Difference, Homogeneity, Entropy, + Energy, Dissimilarity, Contrast and Correlation + + Args: + matrix: Co-occurence matrix to use as base for calculating descriptors. + + Returns: + Reverse ordered list of resulting descriptors + + Example: + >>> img = np.array([[[108, 201, 72], [255, 11, 127]], + ... [[56, 56, 56], [128, 255, 107]]]) + >>> gray = grayscale(img) + >>> binary = binarize(gray) + >>> morphological = opening_filter(binary) + >>> mask_1 = binary_mask(gray, morphological)[0] + >>> concurrency = matrix_concurrency(mask_1, (0, 1)) + >>> haralick_descriptors(concurrency) + [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] + """ + # Function np.indices could be used for bigger input types, + # but np.ogrid works just fine + i, j = np.ogrid[0 : matrix.shape[0], 0 : matrix.shape[1]] # np.indices() + + # Pre-calculate frequent multiplication and subtraction + prod = np.multiply(i, j) + sub = np.subtract(i, j) + + # Calculate numerical value of Maximum Probability + maximum_prob = np.max(matrix) + # Using the definition for each descriptor individually to calculate its matrix + correlation = prod * matrix + energy = np.power(matrix, 2) + contrast = matrix * np.power(sub, 2) + + dissimilarity = matrix * np.abs(sub) + inverse_difference = matrix / (1 + np.abs(sub)) + homogeneity = matrix / (1 + np.power(sub, 2)) + entropy = -(matrix[matrix > 0] * np.log(matrix[matrix > 0])) + + # Sum values for descriptors ranging from the first one to the last, + # as all are their respective origin matrix and not the resulting value yet. + return [ + maximum_prob, + correlation.sum(), + energy.sum(), + contrast.sum(), + dissimilarity.sum(), + inverse_difference.sum(), + homogeneity.sum(), + entropy.sum(), + ] + + +def get_descriptors( + masks: tuple[np.ndarray, np.ndarray], coordinate: tuple[int, int] +) -> np.ndarray: + """ + Calculate all Haralick descriptors for a sequence of + different co-occurrence matrices, given input masks and coordinates. + + Example: + >>> img = np.array([[[108, 201, 72], [255, 11, 127]], + ... [[56, 56, 56], [128, 255, 107]]]) + >>> gray = grayscale(img) + >>> binary = binarize(gray) + >>> morphological = opening_filter(binary) + >>> get_descriptors(binary_mask(gray, morphological), (0, 1)) + array([0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.]) + """ + descriptors = np.array( + [haralick_descriptors(matrix_concurrency(mask, coordinate)) for mask in masks] + ) + + # Concatenate each individual descriptor into + # one single list containing sequence of descriptors + return np.concatenate(descriptors, axis=None) + + +def euclidean(point_1: np.ndarray, point_2: np.ndarray) -> np.float32: + """ + Simple method for calculating the euclidean distance between two points, + with type np.ndarray. + + Example: + >>> a = np.array([1, 0, -2]) + >>> b = np.array([2, -1, 1]) + >>> euclidean(a, b) + 3.3166247903554 + """ + return np.sqrt(np.sum(np.square(point_1 - point_2))) + + +def get_distances(descriptors: np.ndarray, base: int) -> list[tuple[int, float]]: + """ + Calculate all Euclidean distances between a selected base descriptor + and all other Haralick descriptors + The resulting comparison is return in decreasing order, + showing which descriptor is the most similar to the selected base. + + Args: + descriptors: Haralick descriptors to compare with base index + base: Haralick descriptor index to use as base when calculating respective + euclidean distance to other descriptors. + + Returns: + Ordered distances between descriptors + + Example: + >>> index = 1 + >>> img = np.array([[[108, 201, 72], [255, 11, 127]], + ... [[56, 56, 56], [128, 255, 107]]]) + >>> gray = grayscale(img) + >>> binary = binarize(gray) + >>> morphological = opening_filter(binary) + >>> get_distances(get_descriptors( + ... binary_mask(gray, morphological), (0, 1)), + ... index) + [(0, 0.0), (1, 0.0), (2, 0.0), (3, 0.0), (4, 0.0), (5, 0.0), \ +(6, 0.0), (7, 0.0), (8, 0.0), (9, 0.0), (10, 0.0), (11, 0.0), (12, 0.0), \ +(13, 0.0), (14, 0.0), (15, 0.0)] + """ + distances = np.array( + [euclidean(descriptor, descriptors[base]) for descriptor in descriptors] + ) + # Normalize distances between range [0, 1] + normalized_distances: list[float] = normalize_array(distances, 1).tolist() + enum_distances = list(enumerate(normalized_distances)) + enum_distances.sort(key=lambda tup: tup[1], reverse=True) + return enum_distances + + +if __name__ == "__main__": + # Index to compare haralick descriptors to + index = int(input()) + q_value_list = [int(value) for value in input().split()] + q_value = (q_value_list[0], q_value_list[1]) + + # Format is the respective filter to apply, + # can be either 1 for the opening filter or else for the closing + parameters = {"format": int(input()), "threshold": int(input())} + + # Number of images to perform methods on + b_number = int(input()) + + files, descriptors = [], [] + + for _ in range(b_number): + file = input().rstrip() + files.append(file) + + # Open given image and calculate morphological filter, + # respective masks and correspondent Harralick Descriptors. + image = imageio.imread(file).astype(np.float32) + gray = grayscale(image) + threshold = binarize(gray, parameters["threshold"]) + + morphological = ( + opening_filter(threshold) + if parameters["format"] == 1 + else closing_filter(threshold) + ) + masks = binary_mask(gray, morphological) + descriptors.append(get_descriptors(masks, q_value)) + + # Transform ordered distances array into a sequence of indexes + # corresponding to original file position + distances = get_distances(np.array(descriptors), index) + indexed_distances = np.array(distances).astype(np.uint8)[:, 0] + + # Finally, print distances considering the Haralick descriptions from the base + # file to all other images using the morphology method of choice. + print(f"Query: {files[index]}") + print("Ranking:") + for idx, file_idx in enumerate(indexed_distances): + print(f"({idx}) {files[file_idx]}", end="\n") diff --git a/requirements.txt b/requirements.txt index 2702523d542e..1128e9d66820 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,5 +1,6 @@ beautifulsoup4 fake_useragent +imageio keras lxml matplotlib From 72f600036511c4999fa56bf007bf92ec465e94d7 Mon Sep 17 00:00:00 2001 From: Caeden Perelli-Harris Date: Tue, 5 Sep 2023 05:49:00 +0100 Subject: [PATCH 0952/1543] Fix get amazon product data erroring due to whitespace in headers (#9009) * updating DIRECTORY.md * fix(get-amazon-product-data): Remove whitespace in headers * refactor(get-amazon-product-data): Don't print to_csv --------- Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- web_programming/get_amazon_product_data.py | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/web_programming/get_amazon_product_data.py b/web_programming/get_amazon_product_data.py index c796793f2205..a16175688667 100644 --- a/web_programming/get_amazon_product_data.py +++ b/web_programming/get_amazon_product_data.py @@ -19,11 +19,13 @@ def get_amazon_product_data(product: str = "laptop") -> DataFrame: """ url = f"https://www.amazon.in/laptop/s?k={product}" header = { - "User-Agent": """Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 - (KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36""", + "User-Agent": ( + "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36" + "(KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36" + ), "Accept-Language": "en-US, en;q=0.5", } - soup = BeautifulSoup(requests.get(url, headers=header).text) + soup = BeautifulSoup(requests.get(url, headers=header).text, features="lxml") # Initialize a Pandas dataframe with the column titles data_frame = DataFrame( columns=[ @@ -74,8 +76,8 @@ def get_amazon_product_data(product: str = "laptop") -> DataFrame: except ValueError: discount = float("nan") except AttributeError: - pass - data_frame.loc[len(data_frame.index)] = [ + continue + data_frame.loc[str(len(data_frame.index))] = [ product_title, product_link, product_price, From 9e4f9962a02ae584b392670a13d54ef8731e8f7f Mon Sep 17 00:00:00 2001 From: David Ekong <66387173+davidekong@users.noreply.github.com> Date: Wed, 6 Sep 2023 15:00:09 +0100 Subject: [PATCH 0953/1543] Created harshad_numbers.py (#9023) * Created harshad_numbers.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update harshad_numbers.py Fixed a few errors * Update harshad_numbers.py Added function type hints * Update harshad_numbers.py Fixed depreciated Tuple and List usage * Update harshad_numbers.py Fixed incompatible types in assignments * Update harshad_numbers.py Fixed incompatible type assignments * Update maths/harshad_numbers.py Co-authored-by: Tianyi Zheng * Update maths/harshad_numbers.py Co-authored-by: Tianyi Zheng * Raised Value Error for negative inputs * Update maths/harshad_numbers.py Co-authored-by: Tianyi Zheng * Update maths/harshad_numbers.py Co-authored-by: Tianyi Zheng * Update maths/harshad_numbers.py Co-authored-by: Tianyi Zheng * Update harshad_numbers.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update harshad_numbers.py * Update harshad_numbers.py * Update harshad_numbers.py * Update harshad_numbers.py Added doc test to int_to_base, fixed nested loop, other minor changes * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Tianyi Zheng --- maths/harshad_numbers.py | 158 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 158 insertions(+) create mode 100644 maths/harshad_numbers.py diff --git a/maths/harshad_numbers.py b/maths/harshad_numbers.py new file mode 100644 index 000000000000..050c69e0bd15 --- /dev/null +++ b/maths/harshad_numbers.py @@ -0,0 +1,158 @@ +""" +A harshad number (or more specifically an n-harshad number) is a number that's +divisible by the sum of its digits in some given base n. +Reference: https://en.wikipedia.org/wiki/Harshad_number +""" + + +def int_to_base(number: int, base: int) -> str: + """ + Convert a given positive decimal integer to base 'base'. + Where 'base' ranges from 2 to 36. + + Examples: + >>> int_to_base(23, 2) + '10111' + >>> int_to_base(58, 5) + '213' + >>> int_to_base(167, 16) + 'A7' + >>> # bases below 2 and beyond 36 will error + >>> int_to_base(98, 1) + Traceback (most recent call last): + ... + ValueError: 'base' must be between 2 and 36 inclusive + >>> int_to_base(98, 37) + Traceback (most recent call last): + ... + ValueError: 'base' must be between 2 and 36 inclusive + """ + + if base < 2 or base > 36: + raise ValueError("'base' must be between 2 and 36 inclusive") + + digits = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ" + result = "" + + if number < 0: + raise ValueError("number must be a positive integer") + + while number > 0: + number, remainder = divmod(number, base) + result = digits[remainder] + result + + if result == "": + result = "0" + + return result + + +def sum_of_digits(num: int, base: int) -> str: + """ + Calculate the sum of digit values in a positive integer + converted to the given 'base'. + Where 'base' ranges from 2 to 36. + + Examples: + >>> sum_of_digits(103, 12) + '13' + >>> sum_of_digits(1275, 4) + '30' + >>> sum_of_digits(6645, 2) + '1001' + >>> # bases below 2 and beyond 36 will error + >>> sum_of_digits(543, 1) + Traceback (most recent call last): + ... + ValueError: 'base' must be between 2 and 36 inclusive + >>> sum_of_digits(543, 37) + Traceback (most recent call last): + ... + ValueError: 'base' must be between 2 and 36 inclusive + """ + + if base < 2 or base > 36: + raise ValueError("'base' must be between 2 and 36 inclusive") + + num_str = int_to_base(num, base) + res = sum(int(char, base) for char in num_str) + res_str = int_to_base(res, base) + return res_str + + +def harshad_numbers_in_base(limit: int, base: int) -> list[str]: + """ + Finds all Harshad numbers smaller than num in base 'base'. + Where 'base' ranges from 2 to 36. + + Examples: + >>> harshad_numbers_in_base(15, 2) + ['1', '10', '100', '110', '1000', '1010', '1100'] + >>> harshad_numbers_in_base(12, 34) + ['1', '2', '3', '4', '5', '6', '7', '8', '9', 'A', 'B'] + >>> harshad_numbers_in_base(12, 4) + ['1', '2', '3', '10', '12', '20', '21'] + >>> # bases below 2 and beyond 36 will error + >>> harshad_numbers_in_base(234, 37) + Traceback (most recent call last): + ... + ValueError: 'base' must be between 2 and 36 inclusive + >>> harshad_numbers_in_base(234, 1) + Traceback (most recent call last): + ... + ValueError: 'base' must be between 2 and 36 inclusive + """ + + if base < 2 or base > 36: + raise ValueError("'base' must be between 2 and 36 inclusive") + + if limit < 0: + return [] + + numbers = [ + int_to_base(i, base) + for i in range(1, limit) + if i % int(sum_of_digits(i, base), base) == 0 + ] + + return numbers + + +def is_harshad_number_in_base(num: int, base: int) -> bool: + """ + Determines whether n in base 'base' is a harshad number. + Where 'base' ranges from 2 to 36. + + Examples: + >>> is_harshad_number_in_base(18, 10) + True + >>> is_harshad_number_in_base(21, 10) + True + >>> is_harshad_number_in_base(-21, 5) + False + >>> # bases below 2 and beyond 36 will error + >>> is_harshad_number_in_base(45, 37) + Traceback (most recent call last): + ... + ValueError: 'base' must be between 2 and 36 inclusive + >>> is_harshad_number_in_base(45, 1) + Traceback (most recent call last): + ... + ValueError: 'base' must be between 2 and 36 inclusive + """ + + if base < 2 or base > 36: + raise ValueError("'base' must be between 2 and 36 inclusive") + + if num < 0: + return False + + n = int_to_base(num, base) + d = sum_of_digits(num, base) + return int(n, base) % int(d, base) == 0 + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From 153c35eac02b5f043824dfa72e071d2b3f756607 Mon Sep 17 00:00:00 2001 From: Adarsh Acharya <132294330+AdarshAcharya5@users.noreply.github.com> Date: Thu, 7 Sep 2023 00:46:51 +0530 Subject: [PATCH 0954/1543] Added Scaled Exponential Linear Unit Activation Function (#9027) * Added Scaled Exponential Linear Unit Activation Function * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update scaled_exponential_linear_unit.py * Update scaled_exponential_linear_unit.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update scaled_exponential_linear_unit.py * Update scaled_exponential_linear_unit.py * Update scaled_exponential_linear_unit.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update scaled_exponential_linear_unit.py * Update scaled_exponential_linear_unit.py --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .../scaled_exponential_linear_unit.py | 44 +++++++++++++++++++ 1 file changed, 44 insertions(+) create mode 100644 neural_network/activation_functions/scaled_exponential_linear_unit.py diff --git a/neural_network/activation_functions/scaled_exponential_linear_unit.py b/neural_network/activation_functions/scaled_exponential_linear_unit.py new file mode 100644 index 000000000000..f91dc6852136 --- /dev/null +++ b/neural_network/activation_functions/scaled_exponential_linear_unit.py @@ -0,0 +1,44 @@ +""" +Implements the Scaled Exponential Linear Unit or SELU function. +The function takes a vector of K real numbers and two real numbers +alpha(default = 1.6732) & lambda (default = 1.0507) as input and +then applies the SELU function to each element of the vector. +SELU is a self-normalizing activation function. It is a variant +of the ELU. The main advantage of SELU is that we can be sure +that the output will always be standardized due to its +self-normalizing behavior. That means there is no need to +include Batch-Normalization layers. +References : +https://iq.opengenus.org/scaled-exponential-linear-unit/ +""" + +import numpy as np + + +def scaled_exponential_linear_unit( + vector: np.ndarray, alpha: float = 1.6732, lambda_: float = 1.0507 +) -> np.ndarray: + """ + Applies the Scaled Exponential Linear Unit function to each element of the vector. + Parameters : + vector : np.ndarray + alpha : float (default = 1.6732) + lambda_ : float (default = 1.0507) + + Returns : np.ndarray + Formula : f(x) = lambda_ * x if x > 0 + lambda_ * alpha * (e**x - 1) if x <= 0 + Examples : + >>> scaled_exponential_linear_unit(vector=np.array([1.3, 3.7, 2.4])) + array([1.36591, 3.88759, 2.52168]) + + >>> scaled_exponential_linear_unit(vector=np.array([1.3, 4.7, 8.2])) + array([1.36591, 4.93829, 8.61574]) + """ + return lambda_ * np.where(vector > 0, vector, alpha * (np.exp(vector) - 1)) + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From 0cae02451a214cd70b36f2bf0b7a043c25aea99d Mon Sep 17 00:00:00 2001 From: Rohan Saraogi <62804340+r0sa2@users.noreply.github.com> Date: Thu, 7 Sep 2023 03:52:36 -0400 Subject: [PATCH 0955/1543] Added nth_sgonal_num.py (#8753) * Added nth_sgonal_num.py * Update and rename nth_sgonal_num.py to polygonal_numbers.py --------- Co-authored-by: Tianyi Zheng --- maths/polygonal_numbers.py | 32 ++++++++++++++++++++++++++++++++ 1 file changed, 32 insertions(+) create mode 100644 maths/polygonal_numbers.py diff --git a/maths/polygonal_numbers.py b/maths/polygonal_numbers.py new file mode 100644 index 000000000000..7a7dc91acb26 --- /dev/null +++ b/maths/polygonal_numbers.py @@ -0,0 +1,32 @@ +def polygonal_num(num: int, sides: int) -> int: + """ + Returns the `num`th `sides`-gonal number. It is assumed that `num` >= 0 and + `sides` >= 3 (see for reference https://en.wikipedia.org/wiki/Polygonal_number). + + >>> polygonal_num(0, 3) + 0 + >>> polygonal_num(3, 3) + 6 + >>> polygonal_num(5, 4) + 25 + >>> polygonal_num(2, 5) + 5 + >>> polygonal_num(-1, 0) + Traceback (most recent call last): + ... + ValueError: Invalid input: num must be >= 0 and sides must be >= 3. + >>> polygonal_num(0, 2) + Traceback (most recent call last): + ... + ValueError: Invalid input: num must be >= 0 and sides must be >= 3. + """ + if num < 0 or sides < 3: + raise ValueError("Invalid input: num must be >= 0 and sides must be >= 3.") + + return ((sides - 2) * num**2 - (sides - 4) * num) // 2 + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From c9b4b8002f24a33ea49c16dff5ef9cbebbd64b1d Mon Sep 17 00:00:00 2001 From: Saksham Saha Date: Fri, 8 Sep 2023 17:50:28 +0530 Subject: [PATCH 0956/1543] Added an add at position subroutiune to linked list (#9020) * added addAtPosition to simple linked list * added addAtPosition to simple linked list * modified the add function to take an optional position command * fixed type safety errors: * fixed type safety errors: * fixed type safety errors: * fixed type safety errors: * fixed size error * fixed size error * added doctest and updates the else after checking if posiiton argument less than 0 or not * added doctest and updates the else after checking if posiiton argument less than 0 or not * fixed the contributing.md mistake * added doctest for out of bounds position value, both negative and positive --- data_structures/linked_list/__init__.py | 52 ++++++++++++++++++++++++- 1 file changed, 50 insertions(+), 2 deletions(-) diff --git a/data_structures/linked_list/__init__.py b/data_structures/linked_list/__init__.py index 56b0e51baa93..225113f72cee 100644 --- a/data_structures/linked_list/__init__.py +++ b/data_structures/linked_list/__init__.py @@ -21,8 +21,56 @@ def __init__(self) -> None: self.head: Node | None = None self.size = 0 - def add(self, item: Any) -> None: - self.head = Node(item, self.head) + def add(self, item: Any, position: int = 0) -> None: + """ + Add an item to the LinkedList at the specified position. + Default position is 0 (the head). + + Args: + item (Any): The item to add to the LinkedList. + position (int, optional): The position at which to add the item. + Defaults to 0. + + Raises: + ValueError: If the position is negative or out of bounds. + + >>> linked_list = LinkedList() + >>> linked_list.add(1) + >>> linked_list.add(2) + >>> linked_list.add(3) + >>> linked_list.add(4, 2) + >>> print(linked_list) + 3 --> 2 --> 4 --> 1 + + # Test adding to a negative position + >>> linked_list.add(5, -3) + Traceback (most recent call last): + ... + ValueError: Position must be non-negative + + # Test adding to an out-of-bounds position + >>> linked_list.add(5,7) + Traceback (most recent call last): + ... + ValueError: Out of bounds + >>> linked_list.add(5, 4) + >>> print(linked_list) + 3 --> 2 --> 4 --> 1 --> 5 + """ + if position < 0: + raise ValueError("Position must be non-negative") + + if position == 0 or self.head is None: + new_node = Node(item, self.head) + self.head = new_node + else: + current = self.head + for _ in range(position - 1): + current = current.next + if current is None: + raise ValueError("Out of bounds") + new_node = Node(item, current.next) + current.next = new_node self.size += 1 def remove(self) -> Any: From 5a5ca06944148ad7232dd61dcf7c609c0c74c252 Mon Sep 17 00:00:00 2001 From: Saransh Chopra Date: Sat, 9 Sep 2023 23:28:43 +0530 Subject: [PATCH 0957/1543] Update `actions/checkout` with `fetch-depth: 0` (#9046) * Update `actions/checkout` with `fetch-depth: 0` * Update directory_writer.yml * Create junk.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update directory_writer.yml * Update directory_writer.yml --------- Co-authored-by: Christian Clauss Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .github/workflows/directory_writer.yml | 4 +++- arithmetic_analysis/junk.py | 0 2 files changed, 3 insertions(+), 1 deletion(-) create mode 100644 arithmetic_analysis/junk.py diff --git a/.github/workflows/directory_writer.yml b/.github/workflows/directory_writer.yml index 331962cef11e..702c15f1e29b 100644 --- a/.github/workflows/directory_writer.yml +++ b/.github/workflows/directory_writer.yml @@ -6,7 +6,9 @@ jobs: build: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v1 # v1, NOT v2 or v3 + - uses: actions/checkout@v4 + with: + fetch-depth: 0 - uses: actions/setup-python@v4 with: python-version: 3.x diff --git a/arithmetic_analysis/junk.py b/arithmetic_analysis/junk.py new file mode 100644 index 000000000000..e69de29bb2d1 From 97e2de0763d75b1875428d87818ef111481d5953 Mon Sep 17 00:00:00 2001 From: Kamil <32775019+quant12345@users.noreply.github.com> Date: Mon, 11 Sep 2023 15:11:22 +0500 Subject: [PATCH 0958/1543] Euler 070 partial replacement of numpy loops. (#9055) * Euler 070 partial replacement of numpy loops. * Update project_euler/problem_070/sol1.py * project_euler.yml: Upgrade actions/checkout@v4 and add numpy * Update project_euler.yml --------- Co-authored-by: Christian Clauss --- .github/workflows/project_euler.yml | 8 ++++---- project_euler/problem_070/sol1.py | 13 ++++++------- 2 files changed, 10 insertions(+), 11 deletions(-) diff --git a/.github/workflows/project_euler.yml b/.github/workflows/project_euler.yml index 460938219c14..7bbccf76e192 100644 --- a/.github/workflows/project_euler.yml +++ b/.github/workflows/project_euler.yml @@ -14,26 +14,26 @@ jobs: project-euler: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - uses: actions/setup-python@v4 with: python-version: 3.x - name: Install pytest and pytest-cov run: | python -m pip install --upgrade pip - python -m pip install --upgrade pytest pytest-cov + python -m pip install --upgrade numpy pytest pytest-cov - run: pytest --doctest-modules --cov-report=term-missing:skip-covered --cov=project_euler/ project_euler/ validate-solutions: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - uses: actions/setup-python@v4 with: python-version: 3.x - name: Install pytest and requests run: | python -m pip install --upgrade pip - python -m pip install --upgrade pytest requests + python -m pip install --upgrade numpy pytest requests - run: pytest scripts/validate_solutions.py env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} diff --git a/project_euler/problem_070/sol1.py b/project_euler/problem_070/sol1.py index 57a6c1916374..f1114a280a31 100644 --- a/project_euler/problem_070/sol1.py +++ b/project_euler/problem_070/sol1.py @@ -30,6 +30,8 @@ """ from __future__ import annotations +import numpy as np + def get_totients(max_one: int) -> list[int]: """ @@ -42,17 +44,14 @@ def get_totients(max_one: int) -> list[int]: >>> get_totients(10) [0, 1, 1, 2, 2, 4, 2, 6, 4, 6] """ - totients = [0] * max_one - - for i in range(max_one): - totients[i] = i + totients = np.arange(max_one) for i in range(2, max_one): if totients[i] == i: - for j in range(i, max_one, i): - totients[j] -= totients[j] // i + x = np.arange(i, max_one, i) # array of indexes to select + totients[x] -= totients[x] // i - return totients + return totients.tolist() def has_same_digits(num1: int, num2: int) -> bool: From 4246da387f8b48da5147320344d336886787aea1 Mon Sep 17 00:00:00 2001 From: Kamil <32775019+quant12345@users.noreply.github.com> Date: Mon, 11 Sep 2023 16:05:32 +0500 Subject: [PATCH 0959/1543] jacobi_iteration_method.py the use of vector operations, which reduces the calculation time by dozens of times (#8938) * Replaced loops in jacobi_iteration_method function with vector operations. That gives a reduction in the time for calculating the algorithm. * Replaced loops in jacobi_iteration_method function with vector operations. That gives a reduction in the time for calculating the algorithm. * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Delete main.py * Update jacobi_iteration_method.py Changed a line that was too long. * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update jacobi_iteration_method.py Changed the type of the returned list as required. * Update jacobi_iteration_method.py Replaced init_val with new_val. * Update jacobi_iteration_method.py Fixed bug: init_val: list[int] to list[float]. Since the numbers are fractional: init_val = [0.5, -0.5, -0.5]. * Update jacobi_iteration_method.py Changed comments, made variable names more understandable. * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update jacobi_iteration_method.py left the old algorithm commented out, as it clearly shows what is being done. * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update jacobi_iteration_method.py Edits upon request. * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .../jacobi_iteration_method.py | 34 +++++++++++++++++-- 1 file changed, 32 insertions(+), 2 deletions(-) diff --git a/arithmetic_analysis/jacobi_iteration_method.py b/arithmetic_analysis/jacobi_iteration_method.py index dba8a9ff44d3..44c52dd44640 100644 --- a/arithmetic_analysis/jacobi_iteration_method.py +++ b/arithmetic_analysis/jacobi_iteration_method.py @@ -12,7 +12,7 @@ def jacobi_iteration_method( coefficient_matrix: NDArray[float64], constant_matrix: NDArray[float64], - init_val: list[int], + init_val: list[float], iterations: int, ) -> list[float]: """ @@ -115,6 +115,7 @@ def jacobi_iteration_method( strictly_diagonally_dominant(table) + """ # Iterates the whole matrix for given number of times for _ in range(iterations): new_val = [] @@ -130,8 +131,37 @@ def jacobi_iteration_method( temp = (temp + val) / denom new_val.append(temp) init_val = new_val + """ + + # denominator - a list of values along the diagonal + denominator = np.diag(coefficient_matrix) + + # val_last - values of the last column of the table array + val_last = table[:, -1] + + # masks - boolean mask of all strings without diagonal + # elements array coefficient_matrix + masks = ~np.eye(coefficient_matrix.shape[0], dtype=bool) + + # no_diagonals - coefficient_matrix array values without diagonal elements + no_diagonals = coefficient_matrix[masks].reshape(-1, rows - 1) + + # Here we get 'i_col' - these are the column numbers, for each row + # without diagonal elements, except for the last column. + i_row, i_col = np.where(masks) + ind = i_col.reshape(-1, rows - 1) + + #'i_col' is converted to a two-dimensional list 'ind', which will be + # used to make selections from 'init_val' ('arr' array see below). + + # Iterates the whole matrix for given number of times + for _ in range(iterations): + arr = np.take(init_val, ind) + sum_product_rows = np.sum((-1) * no_diagonals * arr, axis=1) + new_val = (sum_product_rows + val_last) / denominator + init_val = new_val - return [float(i) for i in new_val] + return new_val.tolist() # Checks if the given matrix is strictly diagonally dominant From 1488cdea708485eb1d81c73126eab13cb9b04a47 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Tue, 12 Sep 2023 01:56:50 +0200 Subject: [PATCH 0960/1543] [pre-commit.ci] pre-commit autoupdate (#9056) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * [pre-commit.ci] pre-commit autoupdate updates: - [github.com/astral-sh/ruff-pre-commit: v0.0.287 → v0.0.288](https://github.com/astral-sh/ruff-pre-commit/compare/v0.0.287...v0.0.288) - [github.com/psf/black: 23.7.0 → 23.9.1](https://github.com/psf/black/compare/23.7.0...23.9.1) * updating DIRECTORY.md --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- .pre-commit-config.yaml | 4 ++-- DIRECTORY.md | 5 +++++ 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index c046789463cc..722b408ee9e9 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -16,12 +16,12 @@ repos: - id: auto-walrus - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.0.287 + rev: v0.0.288 hooks: - id: ruff - repo: https://github.com/psf/black - rev: 23.7.0 + rev: 23.9.1 hooks: - id: black diff --git a/DIRECTORY.md b/DIRECTORY.md index 43da91cb818e..1b802564f939 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -5,6 +5,7 @@ * [In Static Equilibrium](arithmetic_analysis/in_static_equilibrium.py) * [Intersection](arithmetic_analysis/intersection.py) * [Jacobi Iteration Method](arithmetic_analysis/jacobi_iteration_method.py) + * [Junk](arithmetic_analysis/junk.py) * [Lu Decomposition](arithmetic_analysis/lu_decomposition.py) * [Newton Forward Interpolation](arithmetic_analysis/newton_forward_interpolation.py) * [Newton Method](arithmetic_analysis/newton_method.py) @@ -133,6 +134,7 @@ ## Computer Vision * [Cnn Classification](computer_vision/cnn_classification.py) * [Flip Augmentation](computer_vision/flip_augmentation.py) + * [Haralick Descriptors](computer_vision/haralick_descriptors.py) * [Harris Corner](computer_vision/harris_corner.py) * [Horn Schunck](computer_vision/horn_schunck.py) * [Mean Threshold](computer_vision/mean_threshold.py) @@ -586,6 +588,7 @@ * [Greedy Coin Change](maths/greedy_coin_change.py) * [Hamming Numbers](maths/hamming_numbers.py) * [Hardy Ramanujanalgo](maths/hardy_ramanujanalgo.py) + * [Harshad Numbers](maths/harshad_numbers.py) * [Hexagonal Number](maths/hexagonal_number.py) * [Integration By Simpson Approx](maths/integration_by_simpson_approx.py) * [Interquartile Range](maths/interquartile_range.py) @@ -626,6 +629,7 @@ * [Pi Monte Carlo Estimation](maths/pi_monte_carlo_estimation.py) * [Points Are Collinear 3D](maths/points_are_collinear_3d.py) * [Pollard Rho](maths/pollard_rho.py) + * [Polygonal Numbers](maths/polygonal_numbers.py) * [Polynomial Evaluation](maths/polynomial_evaluation.py) * Polynomials * [Single Indeterminate Operations](maths/polynomials/single_indeterminate_operations.py) @@ -712,6 +716,7 @@ * Activation Functions * [Exponential Linear Unit](neural_network/activation_functions/exponential_linear_unit.py) * [Leaky Rectified Linear Unit](neural_network/activation_functions/leaky_rectified_linear_unit.py) + * [Scaled Exponential Linear Unit](neural_network/activation_functions/scaled_exponential_linear_unit.py) * [Back Propagation Neural Network](neural_network/back_propagation_neural_network.py) * [Convolution Neural Network](neural_network/convolution_neural_network.py) * [Perceptron](neural_network/perceptron.py) From fbad85d3ecbbb826a5891807c823149d38bbaed3 Mon Sep 17 00:00:00 2001 From: Tianyi Zheng Date: Sat, 16 Sep 2023 18:12:31 -0400 Subject: [PATCH 0961/1543] Delete empty junk file (#9062) * updating DIRECTORY.md * updating DIRECTORY.md * Delete empty junk file * updating DIRECTORY.md * Fix ruff errors * Fix more ruff errors --------- Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- DIRECTORY.md | 1 - arithmetic_analysis/junk.py | 0 computer_vision/haralick_descriptors.py | 8 +++++--- conversions/convert_number_to_words.py | 6 +++--- graphs/tarjans_scc.py | 2 +- 5 files changed, 9 insertions(+), 8 deletions(-) delete mode 100644 arithmetic_analysis/junk.py diff --git a/DIRECTORY.md b/DIRECTORY.md index 1b802564f939..d81e4ec1ee83 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -5,7 +5,6 @@ * [In Static Equilibrium](arithmetic_analysis/in_static_equilibrium.py) * [Intersection](arithmetic_analysis/intersection.py) * [Jacobi Iteration Method](arithmetic_analysis/jacobi_iteration_method.py) - * [Junk](arithmetic_analysis/junk.py) * [Lu Decomposition](arithmetic_analysis/lu_decomposition.py) * [Newton Forward Interpolation](arithmetic_analysis/newton_forward_interpolation.py) * [Newton Method](arithmetic_analysis/newton_method.py) diff --git a/arithmetic_analysis/junk.py b/arithmetic_analysis/junk.py deleted file mode 100644 index e69de29bb2d1..000000000000 diff --git a/computer_vision/haralick_descriptors.py b/computer_vision/haralick_descriptors.py index 1a86d84ea14b..413cea304f6c 100644 --- a/computer_vision/haralick_descriptors.py +++ b/computer_vision/haralick_descriptors.py @@ -100,7 +100,9 @@ def binarize(image: np.ndarray, threshold: float = 127.0) -> np.ndarray: return np.where(image > threshold, 1, 0) -def transform(image: np.ndarray, kind: str, kernel: np.ndarray = None) -> np.ndarray: +def transform( + image: np.ndarray, kind: str, kernel: np.ndarray | None = None +) -> np.ndarray: """ Simple image transformation using one of two available filter functions: Erosion and Dilation. @@ -154,7 +156,7 @@ def transform(image: np.ndarray, kind: str, kernel: np.ndarray = None) -> np.nda return transformed -def opening_filter(image: np.ndarray, kernel: np.ndarray = None) -> np.ndarray: +def opening_filter(image: np.ndarray, kernel: np.ndarray | None = None) -> np.ndarray: """ Opening filter, defined as the sequence of erosion and then a dilation filter on the same image. @@ -172,7 +174,7 @@ def opening_filter(image: np.ndarray, kernel: np.ndarray = None) -> np.ndarray: return transform(transform(image, "dilation", kernel), "erosion", kernel) -def closing_filter(image: np.ndarray, kernel: np.ndarray = None) -> np.ndarray: +def closing_filter(image: np.ndarray, kernel: np.ndarray | None = None) -> np.ndarray: """ Opening filter, defined as the sequence of dilation and then erosion filter on the same image. diff --git a/conversions/convert_number_to_words.py b/conversions/convert_number_to_words.py index 0e4405319f1f..0c428928b31d 100644 --- a/conversions/convert_number_to_words.py +++ b/conversions/convert_number_to_words.py @@ -54,7 +54,7 @@ def max_value(cls, system: str) -> int: class NumberWords(Enum): - ONES: ClassVar = { + ONES: ClassVar[dict[int, str]] = { 0: "", 1: "one", 2: "two", @@ -67,7 +67,7 @@ class NumberWords(Enum): 9: "nine", } - TEENS: ClassVar = { + TEENS: ClassVar[dict[int, str]] = { 0: "ten", 1: "eleven", 2: "twelve", @@ -80,7 +80,7 @@ class NumberWords(Enum): 9: "nineteen", } - TENS: ClassVar = { + TENS: ClassVar[dict[int, str]] = { 2: "twenty", 3: "thirty", 4: "forty", diff --git a/graphs/tarjans_scc.py b/graphs/tarjans_scc.py index 30f8ca8a204f..dfd2e52704d5 100644 --- a/graphs/tarjans_scc.py +++ b/graphs/tarjans_scc.py @@ -77,7 +77,7 @@ def create_graph(n, edges): n_vertices = 7 source = [0, 0, 1, 2, 3, 3, 4, 4, 6] target = [1, 3, 2, 0, 1, 4, 5, 6, 5] - edges = [(u, v) for u, v in zip(source, target)] + edges = list(zip(source, target)) g = create_graph(n_vertices, edges) assert [[5], [6], [4], [3, 2, 1, 0]] == tarjan(g) From dc50add8a78ebf34bc7bb050c1a0e61d207b9544 Mon Sep 17 00:00:00 2001 From: Rohan Anand <96521078+rohan472000@users.noreply.github.com> Date: Sat, 23 Sep 2023 14:21:36 +0530 Subject: [PATCH 0962/1543] Update xgboost_regressor.py (#9078) * Update xgboost_regressor.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- machine_learning/xgboost_regressor.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/machine_learning/xgboost_regressor.py b/machine_learning/xgboost_regressor.py index 023984fc1f59..a540e3ab03eb 100644 --- a/machine_learning/xgboost_regressor.py +++ b/machine_learning/xgboost_regressor.py @@ -27,7 +27,9 @@ def xgboost( ... 1.14300000e+03, 2.60958904e+00, 3.67800000e+01, -1.19780000e+02]])) array([[1.1139996]], dtype=float32) """ - xgb = XGBRegressor(verbosity=0, random_state=42) + xgb = XGBRegressor( + verbosity=0, random_state=42, tree_method="exact", base_score=0.5 + ) xgb.fit(features, target) # Predict target for test data predictions = xgb.predict(test_features) From b203150ac481743a6d8c1ef01091712a54dfbf6c Mon Sep 17 00:00:00 2001 From: omahs <73983677+omahs@users.noreply.github.com> Date: Sat, 23 Sep 2023 10:53:09 +0200 Subject: [PATCH 0963/1543] Fix typos (#9076) * fix typo * fix typo * fix typos * fix typo --- cellular_automata/langtons_ant.py | 2 +- compression/README.md | 4 ++-- hashes/README.md | 4 ++-- sorts/README.md | 2 +- 4 files changed, 6 insertions(+), 6 deletions(-) diff --git a/cellular_automata/langtons_ant.py b/cellular_automata/langtons_ant.py index 983c626546ad..9847c50a5c3e 100644 --- a/cellular_automata/langtons_ant.py +++ b/cellular_automata/langtons_ant.py @@ -30,7 +30,7 @@ def __init__(self, width: int, height: int) -> None: self.board = [[True] * width for _ in range(height)] self.ant_position: tuple[int, int] = (width // 2, height // 2) - # Initially pointing left (similar to the the wikipedia image) + # Initially pointing left (similar to the wikipedia image) # (0 = 0° | 1 = 90° | 2 = 180 ° | 3 = 270°) self.ant_direction: int = 3 diff --git a/compression/README.md b/compression/README.md index cf54ea986175..bad7ae1a2f76 100644 --- a/compression/README.md +++ b/compression/README.md @@ -1,9 +1,9 @@ # Compression Data compression is everywhere, you need it to store data without taking too much space. -Either the compression lose some data (then we talk about lossy compression, such as .jpg) or it does not (and then it is lossless compression, such as .png) +Either the compression loses some data (then we talk about lossy compression, such as .jpg) or it does not (and then it is lossless compression, such as .png) -Lossless compression is mainly used for archive purpose as it allow storing data without losing information about the file archived. On the other hand, lossy compression is used for transfer of file where quality isn't necessarily what is required (i.e: images on Twitter). +Lossless compression is mainly used for archive purpose as it allows storing data without losing information about the file archived. On the other hand, lossy compression is used for transfer of file where quality isn't necessarily what is required (i.e: images on Twitter). * * diff --git a/hashes/README.md b/hashes/README.md index 6df9a2fb6360..0237260eaa67 100644 --- a/hashes/README.md +++ b/hashes/README.md @@ -7,11 +7,11 @@ Unlike encryption, which is intended to protect data in transit, hashing is inte This is one of the first algorithms that has gained widespread acceptance. MD5 is hashing algorithm made by Ray Rivest that is known to suffer vulnerabilities. It was created in 1992 as the successor to MD4. Currently MD6 is in the works, but as of 2009 Rivest had removed it from NIST consideration for SHA-3. ### SHA -SHA stands for Security Hashing Algorithm and it’s probably best known as the hashing algorithm used in most SSL/TLS cipher suites. A cipher suite is a collection of ciphers and algorithms that are used for SSL/TLS connections. SHA handles the hashing aspects. SHA-1, as we mentioned earlier, is now deprecated. SHA-2 is now mandatory. SHA-2 is sometimes known has SHA-256, though variants with longer bit lengths are also available. +SHA stands for Security Hashing Algorithm and it’s probably best known as the hashing algorithm used in most SSL/TLS cipher suites. A cipher suite is a collection of ciphers and algorithms that are used for SSL/TLS connections. SHA handles the hashing aspects. SHA-1, as we mentioned earlier, is now deprecated. SHA-2 is now mandatory. SHA-2 is sometimes known as SHA-256, though variants with longer bit lengths are also available. ### SHA256 SHA 256 is a member of the SHA 2 algorithm family, under which SHA stands for Secure Hash Algorithm. It was a collaborative effort between both the NSA and NIST to implement a successor to the SHA 1 family, which was beginning to lose potency against brute force attacks. It was published in 2001. The importance of the 256 in the name refers to the final hash digest value, i.e. the hash value will remain 256 bits regardless of the size of the plaintext/cleartext. Other algorithms in the SHA family are similar to SHA 256 in some ways. ### Luhn -The Luhn algorithm, also renowned as the modulus 10 or mod 10 algorithm, is a straightforward checksum formula used to validate a wide range of identification numbers, including credit card numbers, IMEI numbers, and Canadian Social Insurance Numbers. A community of mathematicians developed the LUHN formula in the late 1960s. Companies offering credit cards quickly followed suit. Since the algorithm is in the public interest, anyone can use it. The algorithm is used by most credit cards and many government identification numbers as a simple method of differentiating valid figures from mistyped or otherwise incorrect numbers. It was created to guard against unintentional errors, not malicious attacks. \ No newline at end of file +The Luhn algorithm, also renowned as the modulus 10 or mod 10 algorithm, is a straightforward checksum formula used to validate a wide range of identification numbers, including credit card numbers, IMEI numbers, and Canadian Social Insurance Numbers. A community of mathematicians developed the LUHN formula in the late 1960s. Companies offering credit cards quickly followed suit. Since the algorithm is in the public interest, anyone can use it. The algorithm is used by most credit cards and many government identification numbers as a simple method of differentiating valid figures from mistyped or otherwise incorrect numbers. It was created to guard against unintentional errors, not malicious attacks. diff --git a/sorts/README.md b/sorts/README.md index ceb0207c2be4..f24427d582e7 100644 --- a/sorts/README.md +++ b/sorts/README.md @@ -4,7 +4,7 @@ is specified by the sorting algorithm. The most typical orders are lexical or nu of sorting lies in the fact that, if data is stored in a sorted manner, data searching can be highly optimised. Another use for sorting is to represent data in a more readable manner. -This section contains a lot of important algorithms that helps us to use sorting algorithms in various scenarios. +This section contains a lot of important algorithms that help us to use sorting algorithms in various scenarios. ## References * * From 53a51b3529ad5f985e6f65b5b3a4e155af1d2d63 Mon Sep 17 00:00:00 2001 From: Chris O <46587501+ChrisO345@users.noreply.github.com> Date: Sun, 24 Sep 2023 19:09:32 +1300 Subject: [PATCH 0964/1543] Rewrite of base32.py algorithm (#9068) * rewrite of base32.py * changed maps to list comprehension * Apply suggestions from code review Co-authored-by: Tianyi Zheng --------- Co-authored-by: Tianyi Zheng --- ciphers/base32.py | 51 +++++++++++++++++++++++++---------------------- 1 file changed, 27 insertions(+), 24 deletions(-) diff --git a/ciphers/base32.py b/ciphers/base32.py index fee53ccaf0c4..1924d1e185d7 100644 --- a/ciphers/base32.py +++ b/ciphers/base32.py @@ -1,42 +1,45 @@ -import base64 +""" +Base32 encoding and decoding +https://en.wikipedia.org/wiki/Base32 +""" +B32_CHARSET = "ABCDEFGHIJKLMNOPQRSTUVWXYZ234567" -def base32_encode(string: str) -> bytes: + +def base32_encode(data: bytes) -> bytes: """ - Encodes a given string to base32, returning a bytes-like object - >>> base32_encode("Hello World!") + >>> base32_encode(b"Hello World!") b'JBSWY3DPEBLW64TMMQQQ====' - >>> base32_encode("123456") + >>> base32_encode(b"123456") b'GEZDGNBVGY======' - >>> base32_encode("some long complex string") + >>> base32_encode(b"some long complex string") b'ONXW2ZJANRXW4ZZAMNXW24DMMV4CA43UOJUW4ZY=' """ - - # encoded the input (we need a bytes like object) - # then, b32encoded the bytes-like object - return base64.b32encode(string.encode("utf-8")) + binary_data = "".join(bin(ord(d))[2:].zfill(8) for d in data.decode("utf-8")) + binary_data = binary_data.ljust(5 * ((len(binary_data) // 5) + 1), "0") + b32_chunks = map("".join, zip(*[iter(binary_data)] * 5)) + b32_result = "".join(B32_CHARSET[int(chunk, 2)] for chunk in b32_chunks) + return bytes(b32_result.ljust(8 * ((len(b32_result) // 8) + 1), "="), "utf-8") -def base32_decode(encoded_bytes: bytes) -> str: +def base32_decode(data: bytes) -> bytes: """ - Decodes a given bytes-like object to a string, returning a string >>> base32_decode(b'JBSWY3DPEBLW64TMMQQQ====') - 'Hello World!' + b'Hello World!' >>> base32_decode(b'GEZDGNBVGY======') - '123456' + b'123456' >>> base32_decode(b'ONXW2ZJANRXW4ZZAMNXW24DMMV4CA43UOJUW4ZY=') - 'some long complex string' + b'some long complex string' """ - - # decode the bytes from base32 - # then, decode the bytes-like object to return as a string - return base64.b32decode(encoded_bytes).decode("utf-8") + binary_chunks = "".join( + bin(B32_CHARSET.index(_d))[2:].zfill(5) + for _d in data.decode("utf-8").strip("=") + ) + binary_data = list(map("".join, zip(*[iter(binary_chunks)] * 8))) + return bytes("".join([chr(int(_d, 2)) for _d in binary_data]), "utf-8") if __name__ == "__main__": - test = "Hello World!" - encoded = base32_encode(test) - print(encoded) + import doctest - decoded = base32_decode(encoded) - print(decoded) + doctest.testmod() From 708d9061413a5c049d63b97b08540aa4867d5523 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Sun, 24 Sep 2023 12:04:47 +0530 Subject: [PATCH 0965/1543] [pre-commit.ci] pre-commit autoupdate (#9067) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * [pre-commit.ci] pre-commit autoupdate updates: - [github.com/astral-sh/ruff-pre-commit: v0.0.288 → v0.0.290](https://github.com/astral-sh/ruff-pre-commit/compare/v0.0.288...v0.0.290) * Update .pre-commit-config.yaml --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- .pre-commit-config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 722b408ee9e9..809b841d0ea3 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -16,7 +16,7 @@ repos: - id: auto-walrus - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.0.288 + rev: v0.0.291 hooks: - id: ruff From 882fb2f3c972e67303dd65873f05b8f3d58724e1 Mon Sep 17 00:00:00 2001 From: Chris O <46587501+ChrisO345@users.noreply.github.com> Date: Sun, 24 Sep 2023 20:36:06 +1300 Subject: [PATCH 0966/1543] Rewrite of base85.py algorithm (#9069) * rewrite of base85.py * changed maps to list comprehension * Apply suggestions from code review Co-authored-by: Tianyi Zheng --------- Co-authored-by: Tianyi Zheng --- ciphers/base85.py | 57 ++++++++++++++++++++++++++++++++++------------- 1 file changed, 41 insertions(+), 16 deletions(-) diff --git a/ciphers/base85.py b/ciphers/base85.py index afd1aff79d11..f0228e5052dd 100644 --- a/ciphers/base85.py +++ b/ciphers/base85.py @@ -1,30 +1,55 @@ -import base64 +""" +Base85 (Ascii85) encoding and decoding +https://en.wikipedia.org/wiki/Ascii85 +""" -def base85_encode(string: str) -> bytes: + +def _base10_to_85(d: int) -> str: + return "".join(chr(d % 85 + 33)) + _base10_to_85(d // 85) if d > 0 else "" + + +def _base85_to_10(digits: list) -> int: + return sum(char * 85**i for i, char in enumerate(reversed(digits))) + + +def ascii85_encode(data: bytes) -> bytes: """ - >>> base85_encode("") + >>> ascii85_encode(b"") b'' - >>> base85_encode("12345") + >>> ascii85_encode(b"12345") b'0etOA2#' - >>> base85_encode("base 85") + >>> ascii85_encode(b"base 85") b'@UX=h+?24' """ - # encoded the input to a bytes-like object and then a85encode that - return base64.a85encode(string.encode("utf-8")) + binary_data = "".join(bin(ord(d))[2:].zfill(8) for d in data.decode("utf-8")) + null_values = (32 * ((len(binary_data) // 32) + 1) - len(binary_data)) // 8 + binary_data = binary_data.ljust(32 * ((len(binary_data) // 32) + 1), "0") + b85_chunks = [int(_s, 2) for _s in map("".join, zip(*[iter(binary_data)] * 32))] + result = "".join(_base10_to_85(chunk)[::-1] for chunk in b85_chunks) + return bytes(result[:-null_values] if null_values % 4 != 0 else result, "utf-8") -def base85_decode(a85encoded: bytes) -> str: +def ascii85_decode(data: bytes) -> bytes: """ - >>> base85_decode(b"") - '' - >>> base85_decode(b"0etOA2#") - '12345' - >>> base85_decode(b"@UX=h+?24") - 'base 85' + >>> ascii85_decode(b"") + b'' + >>> ascii85_decode(b"0etOA2#") + b'12345' + >>> ascii85_decode(b"@UX=h+?24") + b'base 85' """ - # a85decode the input into bytes and decode that into a human readable string - return base64.a85decode(a85encoded).decode("utf-8") + null_values = 5 * ((len(data) // 5) + 1) - len(data) + binary_data = data.decode("utf-8") + "u" * null_values + b85_chunks = map("".join, zip(*[iter(binary_data)] * 5)) + b85_segments = [[ord(_s) - 33 for _s in chunk] for chunk in b85_chunks] + results = [bin(_base85_to_10(chunk))[2::].zfill(32) for chunk in b85_segments] + char_chunks = [ + [chr(int(_s, 2)) for _s in map("".join, zip(*[iter(r)] * 8))] for r in results + ] + result = "".join("".join(char) for char in char_chunks) + offset = int(null_values % 5 == 0) + return bytes(result[: offset - null_values], "utf-8") if __name__ == "__main__": From 211247ef82fd54540e4cb832fbbb612ca5845700 Mon Sep 17 00:00:00 2001 From: Amir Lavasani Date: Mon, 25 Sep 2023 00:38:51 +0330 Subject: [PATCH 0967/1543] Add MFCC Feature Extraction Algorithm (#9057) * Add MFCC feature extraction to machine learning * Add standalone usage in comments * Apply suggestions from code review Co-authored-by: Christian Clauss * Delete empty junk file (#9062) * updating DIRECTORY.md * updating DIRECTORY.md * Delete empty junk file * updating DIRECTORY.md * Fix ruff errors * Fix more ruff errors --------- Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> * [main] Fix typo due to auto review change * Add doctests for all functions * Add MFCC feature extraction to machine learning * Add standalone usage in comments * Apply suggestions from code review Co-authored-by: Christian Clauss * [main] Fix typo due to auto review change * Add doctests for all functions * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fix some pre-commit issues * Update review issues * Remove types from docstring * Rename dct * Add mfcc docstring * Add typing to several functions * Apply suggestions from code review * Update mfcc.py * get_filter_points() -> tuple[np.ndarray, np.ndarray]: * algorithm --------- Co-authored-by: Christian Clauss Co-authored-by: Tianyi Zheng Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- machine_learning/mfcc.py | 479 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 479 insertions(+) create mode 100644 machine_learning/mfcc.py diff --git a/machine_learning/mfcc.py b/machine_learning/mfcc.py new file mode 100644 index 000000000000..7ce8ceb50ff2 --- /dev/null +++ b/machine_learning/mfcc.py @@ -0,0 +1,479 @@ +""" +Mel Frequency Cepstral Coefficients (MFCC) Calculation + +MFCC is an algorithm widely used in audio and speech processing to represent the +short-term power spectrum of a sound signal in a more compact and +discriminative way. It is particularly popular in speech and audio processing +tasks such as speech recognition and speaker identification. + +How Mel Frequency Cepstral Coefficients are Calculated: +1. Preprocessing: + - Load an audio signal and normalize it to ensure that the values fall + within a specific range (e.g., between -1 and 1). + - Frame the audio signal into overlapping, fixed-length segments, typically + using a technique like windowing to reduce spectral leakage. + +2. Fourier Transform: + - Apply a Fast Fourier Transform (FFT) to each audio frame to convert it + from the time domain to the frequency domain. This results in a + representation of the audio frame as a sequence of frequency components. + +3. Power Spectrum: + - Calculate the power spectrum by taking the squared magnitude of each + frequency component obtained from the FFT. This step measures the energy + distribution across different frequency bands. + +4. Mel Filterbank: + - Apply a set of triangular filterbanks spaced in the Mel frequency scale + to the power spectrum. These filters mimic the human auditory system's + frequency response. Each filterbank sums the power spectrum values within + its band. + +5. Logarithmic Compression: + - Take the logarithm (typically base 10) of the filterbank values to + compress the dynamic range. This step mimics the logarithmic response of + the human ear to sound intensity. + +6. Discrete Cosine Transform (DCT): + - Apply the Discrete Cosine Transform to the log filterbank energies to + obtain the MFCC coefficients. This transformation helps decorrelate the + filterbank energies and captures the most important features of the audio + signal. + +7. Feature Extraction: + - Select a subset of the DCT coefficients to form the feature vector. + Often, the first few coefficients (e.g., 12-13) are used for most + applications. + +References: +- Mel-Frequency Cepstral Coefficients (MFCCs): + https://en.wikipedia.org/wiki/Mel-frequency_cepstrum +- Speech and Language Processing by Daniel Jurafsky & James H. Martin: + https://web.stanford.edu/~jurafsky/slp3/ +- Mel Frequency Cepstral Coefficient (MFCC) tutorial + http://practicalcryptography.com/miscellaneous/machine-learning + /guide-mel-frequency-cepstral-coefficients-mfccs/ + +Author: Amir Lavasani +""" + + +import logging + +import numpy as np +import scipy.fftpack as fft +from scipy.signal import get_window + +logging.basicConfig(filename=f"{__file__}.log", level=logging.INFO) + + +def mfcc( + audio: np.ndarray, + sample_rate: int, + ftt_size: int = 1024, + hop_length: int = 20, + mel_filter_num: int = 10, + dct_filter_num: int = 40, +) -> np.ndarray: + """ + Calculate Mel Frequency Cepstral Coefficients (MFCCs) from an audio signal. + + Args: + audio: The input audio signal. + sample_rate: The sample rate of the audio signal (in Hz). + ftt_size: The size of the FFT window (default is 1024). + hop_length: The hop length for frame creation (default is 20ms). + mel_filter_num: The number of Mel filters (default is 10). + dct_filter_num: The number of DCT filters (default is 40). + + Returns: + A matrix of MFCCs for the input audio. + + Raises: + ValueError: If the input audio is empty. + + Example: + >>> sample_rate = 44100 # Sample rate of 44.1 kHz + >>> duration = 2.0 # Duration of 1 second + >>> t = np.linspace(0, duration, int(sample_rate * duration), endpoint=False) + >>> audio = 0.5 * np.sin(2 * np.pi * 440.0 * t) # Generate a 440 Hz sine wave + >>> mfccs = mfcc(audio, sample_rate) + >>> mfccs.shape + (40, 101) + """ + logging.info(f"Sample rate: {sample_rate}Hz") + logging.info(f"Audio duration: {len(audio) / sample_rate}s") + logging.info(f"Audio min: {np.min(audio)}") + logging.info(f"Audio max: {np.max(audio)}") + + # normalize audio + audio_normalized = normalize(audio) + + logging.info(f"Normalized audio min: {np.min(audio_normalized)}") + logging.info(f"Normalized audio max: {np.max(audio_normalized)}") + + # frame audio into + audio_framed = audio_frames( + audio_normalized, sample_rate, ftt_size=ftt_size, hop_length=hop_length + ) + + logging.info(f"Framed audio shape: {audio_framed.shape}") + logging.info(f"First frame: {audio_framed[0]}") + + # convert to frequency domain + # For simplicity we will choose the Hanning window. + window = get_window("hann", ftt_size, fftbins=True) + audio_windowed = audio_framed * window + + logging.info(f"Windowed audio shape: {audio_windowed.shape}") + logging.info(f"First frame: {audio_windowed[0]}") + + audio_fft = calculate_fft(audio_windowed, ftt_size) + logging.info(f"fft audio shape: {audio_fft.shape}") + logging.info(f"First frame: {audio_fft[0]}") + + audio_power = calculate_signal_power(audio_fft) + logging.info(f"power audio shape: {audio_power.shape}") + logging.info(f"First frame: {audio_power[0]}") + + filters = mel_spaced_filterbank(sample_rate, mel_filter_num, ftt_size) + logging.info(f"filters shape: {filters.shape}") + + audio_filtered = np.dot(filters, np.transpose(audio_power)) + audio_log = 10.0 * np.log10(audio_filtered) + logging.info(f"audio_log shape: {audio_log.shape}") + + dct_filters = discrete_cosine_transform(dct_filter_num, mel_filter_num) + cepstral_coefficents = np.dot(dct_filters, audio_log) + + logging.info(f"cepstral_coefficents shape: {cepstral_coefficents.shape}") + return cepstral_coefficents + + +def normalize(audio: np.ndarray) -> np.ndarray: + """ + Normalize an audio signal by scaling it to have values between -1 and 1. + + Args: + audio: The input audio signal. + + Returns: + The normalized audio signal. + + Examples: + >>> audio = np.array([1, 2, 3, 4, 5]) + >>> normalized_audio = normalize(audio) + >>> np.max(normalized_audio) + 1.0 + >>> np.min(normalized_audio) + 0.2 + """ + # Divide the entire audio signal by the maximum absolute value + return audio / np.max(np.abs(audio)) + + +def audio_frames( + audio: np.ndarray, + sample_rate: int, + hop_length: int = 20, + ftt_size: int = 1024, +) -> np.ndarray: + """ + Split an audio signal into overlapping frames. + + Args: + audio: The input audio signal. + sample_rate: The sample rate of the audio signal. + hop_length: The length of the hopping (default is 20ms). + ftt_size: The size of the FFT window (default is 1024). + + Returns: + An array of overlapping frames. + + Examples: + >>> audio = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]*1000) + >>> sample_rate = 8000 + >>> frames = audio_frames(audio, sample_rate, hop_length=10, ftt_size=512) + >>> frames.shape + (126, 512) + """ + + hop_size = np.round(sample_rate * hop_length / 1000).astype(int) + + # Pad the audio signal to handle edge cases + audio = np.pad(audio, int(ftt_size / 2), mode="reflect") + + # Calculate the number of frames + frame_count = int((len(audio) - ftt_size) / hop_size) + 1 + + # Initialize an array to store the frames + frames = np.zeros((frame_count, ftt_size)) + + # Split the audio signal into frames + for n in range(frame_count): + frames[n] = audio[n * hop_size : n * hop_size + ftt_size] + + return frames + + +def calculate_fft(audio_windowed: np.ndarray, ftt_size: int = 1024) -> np.ndarray: + """ + Calculate the Fast Fourier Transform (FFT) of windowed audio data. + + Args: + audio_windowed: The windowed audio signal. + ftt_size: The size of the FFT (default is 1024). + + Returns: + The FFT of the audio data. + + Examples: + >>> audio_windowed = np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]) + >>> audio_fft = calculate_fft(audio_windowed, ftt_size=4) + >>> np.allclose(audio_fft[0], np.array([6.0+0.j, -1.5+0.8660254j, -1.5-0.8660254j])) + True + """ + # Transpose the audio data to have time in rows and channels in columns + audio_transposed = np.transpose(audio_windowed) + + # Initialize an array to store the FFT results + audio_fft = np.empty( + (int(1 + ftt_size // 2), audio_transposed.shape[1]), + dtype=np.complex64, + order="F", + ) + + # Compute FFT for each channel + for n in range(audio_fft.shape[1]): + audio_fft[:, n] = fft.fft(audio_transposed[:, n], axis=0)[: audio_fft.shape[0]] + + # Transpose the FFT results back to the original shape + return np.transpose(audio_fft) + + +def calculate_signal_power(audio_fft: np.ndarray) -> np.ndarray: + """ + Calculate the power of the audio signal from its FFT. + + Args: + audio_fft: The FFT of the audio signal. + + Returns: + The power of the audio signal. + + Examples: + >>> audio_fft = np.array([1+2j, 2+3j, 3+4j, 4+5j]) + >>> power = calculate_signal_power(audio_fft) + >>> np.allclose(power, np.array([5, 13, 25, 41])) + True + """ + # Calculate the power by squaring the absolute values of the FFT coefficients + return np.square(np.abs(audio_fft)) + + +def freq_to_mel(freq: float) -> float: + """ + Convert a frequency in Hertz to the mel scale. + + Args: + freq: The frequency in Hertz. + + Returns: + The frequency in mel scale. + + Examples: + >>> round(freq_to_mel(1000), 2) + 999.99 + """ + # Use the formula to convert frequency to the mel scale + return 2595.0 * np.log10(1.0 + freq / 700.0) + + +def mel_to_freq(mels: float) -> float: + """ + Convert a frequency in the mel scale to Hertz. + + Args: + mels: The frequency in mel scale. + + Returns: + The frequency in Hertz. + + Examples: + >>> round(mel_to_freq(999.99), 2) + 1000.01 + """ + # Use the formula to convert mel scale to frequency + return 700.0 * (10.0 ** (mels / 2595.0) - 1.0) + + +def mel_spaced_filterbank( + sample_rate: int, mel_filter_num: int = 10, ftt_size: int = 1024 +) -> np.ndarray: + """ + Create a Mel-spaced filter bank for audio processing. + + Args: + sample_rate: The sample rate of the audio. + mel_filter_num: The number of mel filters (default is 10). + ftt_size: The size of the FFT (default is 1024). + + Returns: + Mel-spaced filter bank. + + Examples: + >>> round(mel_spaced_filterbank(8000, 10, 1024)[0][1], 10) + 0.0004603981 + """ + freq_min = 0 + freq_high = sample_rate // 2 + + logging.info(f"Minimum frequency: {freq_min}") + logging.info(f"Maximum frequency: {freq_high}") + + # Calculate filter points and mel frequencies + filter_points, mel_freqs = get_filter_points( + sample_rate, + freq_min, + freq_high, + mel_filter_num, + ftt_size, + ) + + filters = get_filters(filter_points, ftt_size) + + # normalize filters + # taken from the librosa library + enorm = 2.0 / (mel_freqs[2 : mel_filter_num + 2] - mel_freqs[:mel_filter_num]) + return filters * enorm[:, np.newaxis] + + +def get_filters(filter_points: np.ndarray, ftt_size: int) -> np.ndarray: + """ + Generate filters for audio processing. + + Args: + filter_points: A list of filter points. + ftt_size: The size of the FFT. + + Returns: + A matrix of filters. + + Examples: + >>> get_filters(np.array([0, 20, 51, 95, 161, 256], dtype=int), 512).shape + (4, 257) + """ + num_filters = len(filter_points) - 2 + filters = np.zeros((num_filters, int(ftt_size / 2) + 1)) + + for n in range(num_filters): + start = filter_points[n] + mid = filter_points[n + 1] + end = filter_points[n + 2] + + # Linearly increase values from 0 to 1 + filters[n, start:mid] = np.linspace(0, 1, mid - start) + + # Linearly decrease values from 1 to 0 + filters[n, mid:end] = np.linspace(1, 0, end - mid) + + return filters + + +def get_filter_points( + sample_rate: int, + freq_min: int, + freq_high: int, + mel_filter_num: int = 10, + ftt_size: int = 1024, +) -> tuple[np.ndarray, np.ndarray]: + """ + Calculate the filter points and frequencies for mel frequency filters. + + Args: + sample_rate: The sample rate of the audio. + freq_min: The minimum frequency in Hertz. + freq_high: The maximum frequency in Hertz. + mel_filter_num: The number of mel filters (default is 10). + ftt_size: The size of the FFT (default is 1024). + + Returns: + Filter points and corresponding frequencies. + + Examples: + >>> filter_points = get_filter_points(8000, 0, 4000, mel_filter_num=4, ftt_size=512) + >>> filter_points[0] + array([ 0, 20, 51, 95, 161, 256]) + >>> filter_points[1] + array([ 0. , 324.46707094, 799.33254207, 1494.30973963, + 2511.42581671, 4000. ]) + """ + # Convert minimum and maximum frequencies to mel scale + fmin_mel = freq_to_mel(freq_min) + fmax_mel = freq_to_mel(freq_high) + + logging.info(f"MEL min: {fmin_mel}") + logging.info(f"MEL max: {fmax_mel}") + + # Generate equally spaced mel frequencies + mels = np.linspace(fmin_mel, fmax_mel, num=mel_filter_num + 2) + + # Convert mel frequencies back to Hertz + freqs = mel_to_freq(mels) + + # Calculate filter points as integer values + filter_points = np.floor((ftt_size + 1) / sample_rate * freqs).astype(int) + + return filter_points, freqs + + +def discrete_cosine_transform(dct_filter_num: int, filter_num: int) -> np.ndarray: + """ + Compute the Discrete Cosine Transform (DCT) basis matrix. + + Args: + dct_filter_num: The number of DCT filters to generate. + filter_num: The number of the fbank filters. + + Returns: + The DCT basis matrix. + + Examples: + >>> round(discrete_cosine_transform(3, 5)[0][0], 5) + 0.44721 + """ + basis = np.empty((dct_filter_num, filter_num)) + basis[0, :] = 1.0 / np.sqrt(filter_num) + + samples = np.arange(1, 2 * filter_num, 2) * np.pi / (2.0 * filter_num) + + for i in range(1, dct_filter_num): + basis[i, :] = np.cos(i * samples) * np.sqrt(2.0 / filter_num) + + return basis + + +def example(wav_file_path: str = "./path-to-file/sample.wav") -> np.ndarray: + """ + Example function to calculate Mel Frequency Cepstral Coefficients + (MFCCs) from an audio file. + + Args: + wav_file_path: The path to the WAV audio file. + + Returns: + np.ndarray: The computed MFCCs for the audio. + """ + from scipy.io import wavfile + + # Load the audio from the WAV file + sample_rate, audio = wavfile.read(wav_file_path) + + # Calculate MFCCs + return mfcc(audio, sample_rate) + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From eace4cea32b831a1683b4c431379f0cd7b9061db Mon Sep 17 00:00:00 2001 From: gudlu1925 <120262240+gudlu1925@users.noreply.github.com> Date: Wed, 27 Sep 2023 11:14:06 +0530 Subject: [PATCH 0968/1543] Added Coulomb_Law (#8714) * Create coulomb_law.py * Update coulomb_law.py * Update coulomb_law.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update and rename coulomb_law.py to coulombs_law.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update coulombs_law.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update coulombs_law.py * Update coulombs_law.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update coulombs_law.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update coulombs_law.py --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Tianyi Zheng --- physics/coulombs_law.py | 42 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 42 insertions(+) create mode 100644 physics/coulombs_law.py diff --git a/physics/coulombs_law.py b/physics/coulombs_law.py new file mode 100644 index 000000000000..252e8ec0f74e --- /dev/null +++ b/physics/coulombs_law.py @@ -0,0 +1,42 @@ +""" +Coulomb's law states that the magnitude of the electrostatic force of attraction +or repulsion between two point charges is directly proportional to the product +of the magnitudes of charges and inversely proportional to the square of the +distance between them. + +F = k * q1 * q2 / r^2 + +k is Coulomb's constant and equals 1/(4π*ε0) +q1 is charge of first body (C) +q2 is charge of second body (C) +r is distance between two charged bodies (m) + +Reference: https://en.wikipedia.org/wiki/Coulomb%27s_law +""" + + +def coulombs_law(q1: float, q2: float, radius: float) -> float: + """ + Calculate the electrostatic force of attraction or repulsion + between two point charges + + >>> coulombs_law(15.5, 20, 15) + 12382849136.06 + >>> coulombs_law(1, 15, 5) + 5392531075.38 + >>> coulombs_law(20, -50, 15) + -39944674632.44 + >>> coulombs_law(-5, -8, 10) + 3595020716.92 + >>> coulombs_law(50, 100, 50) + 17975103584.6 + """ + if radius <= 0: + raise ValueError("The radius is always a positive non zero integer") + return round(((8.9875517923 * 10**9) * q1 * q2) / (radius**2), 2) + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From b2e186f4b769ae98d04f7f2408d3ac86da44c06f Mon Sep 17 00:00:00 2001 From: Okza Pradhana Date: Wed, 27 Sep 2023 13:06:19 +0700 Subject: [PATCH 0969/1543] feat(maths): add function to perform calculation (#6602) * feat(maths): add function to perform calculation - Add single function to calculate sum of two positive numbers using bitwise operator * docs: add wikipedia url as explanation * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Apply suggestions from code review Co-authored-by: Caeden Perelli-Harris * Update sum_of_two_positive_numbers_bitwise.py * Update sum_of_two_positive_numbers_bitwise.py * Update sum_of_two_positive_numbers_bitwise.py --------- Co-authored-by: Okza Pradhana Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Tianyi Zheng Co-authored-by: Caeden Perelli-Harris --- maths/sum_of_two_positive_numbers_bitwise.py | 55 ++++++++++++++++++++ 1 file changed, 55 insertions(+) create mode 100644 maths/sum_of_two_positive_numbers_bitwise.py diff --git a/maths/sum_of_two_positive_numbers_bitwise.py b/maths/sum_of_two_positive_numbers_bitwise.py new file mode 100644 index 000000000000..70eaf6887b64 --- /dev/null +++ b/maths/sum_of_two_positive_numbers_bitwise.py @@ -0,0 +1,55 @@ +""" +Calculates the sum of two non-negative integers using bitwise operators +Wikipedia explanation: https://en.wikipedia.org/wiki/Binary_number +""" + + +def bitwise_addition_recursive(number: int, other_number: int) -> int: + """ + >>> bitwise_addition_recursive(4, 5) + 9 + >>> bitwise_addition_recursive(8, 9) + 17 + >>> bitwise_addition_recursive(0, 4) + 4 + >>> bitwise_addition_recursive(4.5, 9) + Traceback (most recent call last): + ... + TypeError: Both arguments MUST be integers! + >>> bitwise_addition_recursive('4', 9) + Traceback (most recent call last): + ... + TypeError: Both arguments MUST be integers! + >>> bitwise_addition_recursive('4.5', 9) + Traceback (most recent call last): + ... + TypeError: Both arguments MUST be integers! + >>> bitwise_addition_recursive(-1, 9) + Traceback (most recent call last): + ... + ValueError: Both arguments MUST be non-negative! + >>> bitwise_addition_recursive(1, -9) + Traceback (most recent call last): + ... + ValueError: Both arguments MUST be non-negative! + """ + + if not isinstance(number, int) or not isinstance(other_number, int): + raise TypeError("Both arguments MUST be integers!") + + if number < 0 or other_number < 0: + raise ValueError("Both arguments MUST be non-negative!") + + bitwise_sum = number ^ other_number + carry = number & other_number + + if carry == 0: + return bitwise_sum + + return bitwise_addition_recursive(bitwise_sum, carry << 1) + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From 84ec9414e45380a5e946d4f73b921b274ecd4be7 Mon Sep 17 00:00:00 2001 From: thor-harsh <105957576+thor-harsh@users.noreply.github.com> Date: Wed, 27 Sep 2023 12:01:42 +0530 Subject: [PATCH 0970/1543] Update k_means_clust.py (#8996) * Update k_means_clust.py * Apply suggestions from code review --------- Co-authored-by: Tianyi Zheng --- machine_learning/k_means_clust.py | 23 ++++++++++------------- 1 file changed, 10 insertions(+), 13 deletions(-) diff --git a/machine_learning/k_means_clust.py b/machine_learning/k_means_clust.py index 7c8142aab878..d93c5addf2ee 100644 --- a/machine_learning/k_means_clust.py +++ b/machine_learning/k_means_clust.py @@ -11,10 +11,10 @@ - initial_centroids , initial centroid values generated by utility function(mentioned in usage). - maxiter , maximum number of iterations to process. - - heterogeneity , empty list that will be filled with hetrogeneity values if passed + - heterogeneity , empty list that will be filled with heterogeneity values if passed to kmeans func. Usage: - 1. define 'k' value, 'X' features array and 'hetrogeneity' empty list + 1. define 'k' value, 'X' features array and 'heterogeneity' empty list 2. create initial_centroids, initial_centroids = get_initial_centroids( X, @@ -31,8 +31,8 @@ record_heterogeneity=heterogeneity, verbose=True # whether to print logs in console or not.(default=False) ) - 4. Plot the loss function, hetrogeneity values for every iteration saved in - hetrogeneity list. + 4. Plot the loss function and heterogeneity values for every iteration saved in + heterogeneity list. plot_heterogeneity( heterogeneity, k @@ -198,13 +198,10 @@ def report_generator( df: pd.DataFrame, clustering_variables: np.ndarray, fill_missing_report=None ) -> pd.DataFrame: """ - Function generates easy-erading clustering report. It takes 2 arguments as an input: - DataFrame - dataframe with predicted cluester column; - FillMissingReport - dictionary of rules how we are going to fill missing - values of for final report generate (not included in modeling); - in order to run the function following libraries must be imported: - import pandas as pd - import numpy as np + Generates a clustering report. This function takes 2 arguments as input: + df - dataframe with predicted cluster column + fill_missing_report - dictionary of rules on how we are going to fill in missing + values for final generated report (not included in modelling); >>> data = pd.DataFrame() >>> data['numbers'] = [1, 2, 3] >>> data['col1'] = [0.5, 2.5, 4.5] @@ -306,10 +303,10 @@ def report_generator( a.columns = report.columns # rename columns to match report report = report.drop( report[report.Type == "count"].index - ) # drop count values except cluster size + ) # drop count values except for cluster size report = pd.concat( [report, a, clustersize, clusterproportion], axis=0 - ) # concat report with clustert size and nan values + ) # concat report with cluster size and nan values report["Mark"] = report["Features"].isin(clustering_variables) cols = report.columns.tolist() cols = cols[0:2] + cols[-1:] + cols[2:-1] From 5830b29e7ecf5437ce46bcdefda88eedea693043 Mon Sep 17 00:00:00 2001 From: Tianyi Zheng Date: Wed, 27 Sep 2023 08:00:34 -0400 Subject: [PATCH 0971/1543] Fix `mypy` errors in `erosion_operation.py` (#8603) * updating DIRECTORY.md * Fix mypy errors in erosion_operation.py * Rename functions to use snake case * updating DIRECTORY.md * updating DIRECTORY.md * Replace raw file string with pathlib Path * Fix function name in erosion_operation.py doctest --------- Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- .../erosion_operation.py | 39 +++++++++++-------- 1 file changed, 23 insertions(+), 16 deletions(-) diff --git a/digital_image_processing/morphological_operations/erosion_operation.py b/digital_image_processing/morphological_operations/erosion_operation.py index c0e1ef847237..53001da83468 100644 --- a/digital_image_processing/morphological_operations/erosion_operation.py +++ b/digital_image_processing/morphological_operations/erosion_operation.py @@ -1,34 +1,37 @@ +from pathlib import Path + import numpy as np from PIL import Image -def rgb2gray(rgb: np.array) -> np.array: +def rgb_to_gray(rgb: np.ndarray) -> np.ndarray: """ Return gray image from rgb image - >>> rgb2gray(np.array([[[127, 255, 0]]])) + + >>> rgb_to_gray(np.array([[[127, 255, 0]]])) array([[187.6453]]) - >>> rgb2gray(np.array([[[0, 0, 0]]])) + >>> rgb_to_gray(np.array([[[0, 0, 0]]])) array([[0.]]) - >>> rgb2gray(np.array([[[2, 4, 1]]])) + >>> rgb_to_gray(np.array([[[2, 4, 1]]])) array([[3.0598]]) - >>> rgb2gray(np.array([[[26, 255, 14], [5, 147, 20], [1, 200, 0]]])) + >>> rgb_to_gray(np.array([[[26, 255, 14], [5, 147, 20], [1, 200, 0]]])) array([[159.0524, 90.0635, 117.6989]]) """ r, g, b = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2] return 0.2989 * r + 0.5870 * g + 0.1140 * b -def gray2binary(gray: np.array) -> np.array: +def gray_to_binary(gray: np.ndarray) -> np.ndarray: """ Return binary image from gray image - >>> gray2binary(np.array([[127, 255, 0]])) + >>> gray_to_binary(np.array([[127, 255, 0]])) array([[False, True, False]]) - >>> gray2binary(np.array([[0]])) + >>> gray_to_binary(np.array([[0]])) array([[False]]) - >>> gray2binary(np.array([[26.2409, 4.9315, 1.4729]])) + >>> gray_to_binary(np.array([[26.2409, 4.9315, 1.4729]])) array([[False, False, False]]) - >>> gray2binary(np.array([[26, 255, 14], [5, 147, 20], [1, 200, 0]])) + >>> gray_to_binary(np.array([[26, 255, 14], [5, 147, 20], [1, 200, 0]])) array([[False, True, False], [False, True, False], [False, True, False]]) @@ -36,9 +39,10 @@ def gray2binary(gray: np.array) -> np.array: return (gray > 127) & (gray <= 255) -def erosion(image: np.array, kernel: np.array) -> np.array: +def erosion(image: np.ndarray, kernel: np.ndarray) -> np.ndarray: """ Return eroded image + >>> erosion(np.array([[True, True, False]]), np.array([[0, 1, 0]])) array([[False, False, False]]) >>> erosion(np.array([[True, False, False]]), np.array([[1, 1, 0]])) @@ -62,14 +66,17 @@ def erosion(image: np.array, kernel: np.array) -> np.array: return output -# kernel to be applied -structuring_element = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]]) - if __name__ == "__main__": # read original image - image = np.array(Image.open(r"..\image_data\lena.jpg")) + lena_path = Path(__file__).resolve().parent / "image_data" / "lena.jpg" + lena = np.array(Image.open(lena_path)) + + # kernel to be applied + structuring_element = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]]) + # Apply erosion operation to a binary image - output = erosion(gray2binary(rgb2gray(image)), structuring_element) + output = erosion(gray_to_binary(rgb_to_gray(lena)), structuring_element) + # Save the output image pil_img = Image.fromarray(output).convert("RGB") pil_img.save("result_erosion.png") From 76767d2f09d15aeff0a54cfc44652207eda2314e Mon Sep 17 00:00:00 2001 From: Tianyi Zheng Date: Wed, 27 Sep 2023 08:01:18 -0400 Subject: [PATCH 0972/1543] Consolidate the two existing kNN implementations (#8903) * Add type hints to k_nearest_neighbours.py * Refactor k_nearest_neighbours.py into class * Add documentation to k_nearest_neighbours.py * Use heap-based priority queue for k_nearest_neighbours.py * Delete knn_sklearn.py * updating DIRECTORY.md * Use optional args in k_nearest_neighbours.py for demo purposes * Fix wrong function arg in k_nearest_neighbours.py --------- Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- DIRECTORY.md | 1 - machine_learning/k_nearest_neighbours.py | 128 ++++++++++++++--------- machine_learning/knn_sklearn.py | 31 ------ 3 files changed, 79 insertions(+), 81 deletions(-) delete mode 100644 machine_learning/knn_sklearn.py diff --git a/DIRECTORY.md b/DIRECTORY.md index d81e4ec1ee83..902999460fe5 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -507,7 +507,6 @@ * [Gradient Descent](machine_learning/gradient_descent.py) * [K Means Clust](machine_learning/k_means_clust.py) * [K Nearest Neighbours](machine_learning/k_nearest_neighbours.py) - * [Knn Sklearn](machine_learning/knn_sklearn.py) * [Linear Discriminant Analysis](machine_learning/linear_discriminant_analysis.py) * [Linear Regression](machine_learning/linear_regression.py) * Local Weighted Learning diff --git a/machine_learning/k_nearest_neighbours.py b/machine_learning/k_nearest_neighbours.py index 2a90cfe5987a..a43757c5c20e 100644 --- a/machine_learning/k_nearest_neighbours.py +++ b/machine_learning/k_nearest_neighbours.py @@ -1,58 +1,88 @@ +""" +k-Nearest Neighbours (kNN) is a simple non-parametric supervised learning +algorithm used for classification. Given some labelled training data, a given +point is classified using its k nearest neighbours according to some distance +metric. The most commonly occurring label among the neighbours becomes the label +of the given point. In effect, the label of the given point is decided by a +majority vote. + +This implementation uses the commonly used Euclidean distance metric, but other +distance metrics can also be used. + +Reference: https://en.wikipedia.org/wiki/K-nearest_neighbors_algorithm +""" + from collections import Counter +from heapq import nsmallest import numpy as np from sklearn import datasets from sklearn.model_selection import train_test_split -data = datasets.load_iris() - -X = np.array(data["data"]) -y = np.array(data["target"]) -classes = data["target_names"] - -X_train, X_test, y_train, y_test = train_test_split(X, y) - - -def euclidean_distance(a, b): - """ - Gives the euclidean distance between two points - >>> euclidean_distance([0, 0], [3, 4]) - 5.0 - >>> euclidean_distance([1, 2, 3], [1, 8, 11]) - 10.0 - """ - return np.linalg.norm(np.array(a) - np.array(b)) - - -def classifier(train_data, train_target, classes, point, k=5): - """ - Classifies the point using the KNN algorithm - k closest points are found (ranked in ascending order of euclidean distance) - Params: - :train_data: Set of points that are classified into two or more classes - :train_target: List of classes in the order of train_data points - :classes: Labels of the classes - :point: The data point that needs to be classified - - >>> X_train = [[0, 0], [1, 0], [0, 1], [0.5, 0.5], [3, 3], [2, 3], [3, 2]] - >>> y_train = [0, 0, 0, 0, 1, 1, 1] - >>> classes = ['A','B']; point = [1.2,1.2] - >>> classifier(X_train, y_train, classes,point) - 'A' - """ - data = zip(train_data, train_target) - # List of distances of all points from the point to be classified - distances = [] - for data_point in data: - distance = euclidean_distance(data_point[0], point) - distances.append((distance, data_point[1])) - # Choosing 'k' points with the least distances. - votes = [i[1] for i in sorted(distances)[:k]] - # Most commonly occurring class among them - # is the class into which the point is classified - result = Counter(votes).most_common(1)[0][0] - return classes[result] + +class KNN: + def __init__( + self, + train_data: np.ndarray[float], + train_target: np.ndarray[int], + class_labels: list[str], + ) -> None: + """ + Create a kNN classifier using the given training data and class labels + """ + self.data = zip(train_data, train_target) + self.labels = class_labels + + @staticmethod + def _euclidean_distance(a: np.ndarray[float], b: np.ndarray[float]) -> float: + """ + Calculate the Euclidean distance between two points + >>> KNN._euclidean_distance(np.array([0, 0]), np.array([3, 4])) + 5.0 + >>> KNN._euclidean_distance(np.array([1, 2, 3]), np.array([1, 8, 11])) + 10.0 + """ + return np.linalg.norm(a - b) + + def classify(self, pred_point: np.ndarray[float], k: int = 5) -> str: + """ + Classify a given point using the kNN algorithm + >>> train_X = np.array( + ... [[0, 0], [1, 0], [0, 1], [0.5, 0.5], [3, 3], [2, 3], [3, 2]] + ... ) + >>> train_y = np.array([0, 0, 0, 0, 1, 1, 1]) + >>> classes = ['A', 'B'] + >>> knn = KNN(train_X, train_y, classes) + >>> point = np.array([1.2, 1.2]) + >>> knn.classify(point) + 'A' + """ + # Distances of all points from the point to be classified + distances = ( + (self._euclidean_distance(data_point[0], pred_point), data_point[1]) + for data_point in self.data + ) + + # Choosing k points with the shortest distances + votes = (i[1] for i in nsmallest(k, distances)) + + # Most commonly occurring class is the one into which the point is classified + result = Counter(votes).most_common(1)[0][0] + return self.labels[result] if __name__ == "__main__": - print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4])) + import doctest + + doctest.testmod() + + iris = datasets.load_iris() + + X = np.array(iris["data"]) + y = np.array(iris["target"]) + iris_classes = iris["target_names"] + + X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0) + iris_point = np.array([4.4, 3.1, 1.3, 1.4]) + classifier = KNN(X_train, y_train, iris_classes) + print(classifier.classify(iris_point, k=3)) diff --git a/machine_learning/knn_sklearn.py b/machine_learning/knn_sklearn.py deleted file mode 100644 index 4a621a4244b6..000000000000 --- a/machine_learning/knn_sklearn.py +++ /dev/null @@ -1,31 +0,0 @@ -from sklearn.datasets import load_iris -from sklearn.model_selection import train_test_split -from sklearn.neighbors import KNeighborsClassifier - -# Load iris file -iris = load_iris() -iris.keys() - - -print(f"Target names: \n {iris.target_names} ") -print(f"\n Features: \n {iris.feature_names}") - -# Train set e Test set -X_train, X_test, y_train, y_test = train_test_split( - iris["data"], iris["target"], random_state=4 -) - -# KNN - -knn = KNeighborsClassifier(n_neighbors=1) -knn.fit(X_train, y_train) - -# new array to test -X_new = [[1, 2, 1, 4], [2, 3, 4, 5]] - -prediction = knn.predict(X_new) - -print( - f"\nNew array: \n {X_new}\n\nTarget Names Prediction: \n" - f" {iris['target_names'][prediction]}" -) From f9b8759ba82cd7ca4e4a99b9bc9b661ace5a93cc Mon Sep 17 00:00:00 2001 From: Tianyi Zheng Date: Wed, 27 Sep 2023 09:54:40 -0400 Subject: [PATCH 0973/1543] Move bitwise add (#9097) * updating DIRECTORY.md * updating DIRECTORY.md * updating DIRECTORY.md * Move and rename maths/sum_of_two_positive_numbers_bitwise.py * updating DIRECTORY.md --------- Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- DIRECTORY.md | 3 +++ .../bitwise_addition_recursive.py | 0 2 files changed, 3 insertions(+) rename maths/sum_of_two_positive_numbers_bitwise.py => bit_manipulation/bitwise_addition_recursive.py (100%) diff --git a/DIRECTORY.md b/DIRECTORY.md index 902999460fe5..e596d96e5e83 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -43,6 +43,7 @@ * [Binary Shifts](bit_manipulation/binary_shifts.py) * [Binary Twos Complement](bit_manipulation/binary_twos_complement.py) * [Binary Xor Operator](bit_manipulation/binary_xor_operator.py) + * [Bitwise Addition Recursive](bit_manipulation/bitwise_addition_recursive.py) * [Count 1S Brian Kernighan Method](bit_manipulation/count_1s_brian_kernighan_method.py) * [Count Number Of One Bits](bit_manipulation/count_number_of_one_bits.py) * [Gray Code Sequence](bit_manipulation/gray_code_sequence.py) @@ -514,6 +515,7 @@ * [Logistic Regression](machine_learning/logistic_regression.py) * Lstm * [Lstm Prediction](machine_learning/lstm/lstm_prediction.py) + * [Mfcc](machine_learning/mfcc.py) * [Multilayer Perceptron Classifier](machine_learning/multilayer_perceptron_classifier.py) * [Polynomial Regression](machine_learning/polynomial_regression.py) * [Scoring Functions](machine_learning/scoring_functions.py) @@ -752,6 +754,7 @@ * [Basic Orbital Capture](physics/basic_orbital_capture.py) * [Casimir Effect](physics/casimir_effect.py) * [Centripetal Force](physics/centripetal_force.py) + * [Coulombs Law](physics/coulombs_law.py) * [Grahams Law](physics/grahams_law.py) * [Horizontal Projectile Motion](physics/horizontal_projectile_motion.py) * [Hubble Parameter](physics/hubble_parameter.py) diff --git a/maths/sum_of_two_positive_numbers_bitwise.py b/bit_manipulation/bitwise_addition_recursive.py similarity index 100% rename from maths/sum_of_two_positive_numbers_bitwise.py rename to bit_manipulation/bitwise_addition_recursive.py From 38c2b839819549d1ab8566675fab09db449875cc Mon Sep 17 00:00:00 2001 From: aryan1165 <111041731+aryan1165@users.noreply.github.com> Date: Wed, 27 Sep 2023 19:26:01 +0530 Subject: [PATCH 0974/1543] Deleted euclidean_gcd.py. Fixes#8063 (#9108) --- maths/euclidean_gcd.py | 47 ------------------------------------------ 1 file changed, 47 deletions(-) delete mode 100644 maths/euclidean_gcd.py diff --git a/maths/euclidean_gcd.py b/maths/euclidean_gcd.py deleted file mode 100644 index de4b250243db..000000000000 --- a/maths/euclidean_gcd.py +++ /dev/null @@ -1,47 +0,0 @@ -""" https://en.wikipedia.org/wiki/Euclidean_algorithm """ - - -def euclidean_gcd(a: int, b: int) -> int: - """ - Examples: - >>> euclidean_gcd(3, 5) - 1 - - >>> euclidean_gcd(6, 3) - 3 - """ - while b: - a, b = b, a % b - return a - - -def euclidean_gcd_recursive(a: int, b: int) -> int: - """ - Recursive method for euclicedan gcd algorithm - - Examples: - >>> euclidean_gcd_recursive(3, 5) - 1 - - >>> euclidean_gcd_recursive(6, 3) - 3 - """ - return a if b == 0 else euclidean_gcd_recursive(b, a % b) - - -def main(): - print(f"euclidean_gcd(3, 5) = {euclidean_gcd(3, 5)}") - print(f"euclidean_gcd(5, 3) = {euclidean_gcd(5, 3)}") - print(f"euclidean_gcd(1, 3) = {euclidean_gcd(1, 3)}") - print(f"euclidean_gcd(3, 6) = {euclidean_gcd(3, 6)}") - print(f"euclidean_gcd(6, 3) = {euclidean_gcd(6, 3)}") - - print(f"euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3, 5)}") - print(f"euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5, 3)}") - print(f"euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1, 3)}") - print(f"euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3, 6)}") - print(f"euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6, 3)}") - - -if __name__ == "__main__": - main() From 35dd529c85fc433e0780cdaff586c684208aa1b7 Mon Sep 17 00:00:00 2001 From: Hetarth Jain Date: Thu, 28 Sep 2023 23:54:46 +0530 Subject: [PATCH 0975/1543] Returning Index instead of boolean in knuth_morris_pratt (kmp) function, making it compatible with str.find(). (#9083) * Update knuth_morris_pratt.py - changed Boolean to Index * Update knuth_morris_pratt.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update knuth_morris_pratt.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update knuth_morris_pratt.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update back_propagation_neural_network.py * Update back_propagation_neural_network.py * Update strings/knuth_morris_pratt.py * Update knuth_morris_pratt.py * Update knuth_morris_pratt.py --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- strings/knuth_morris_pratt.py | 33 +++++++++++++++++++++++++-------- 1 file changed, 25 insertions(+), 8 deletions(-) diff --git a/strings/knuth_morris_pratt.py b/strings/knuth_morris_pratt.py index a488c171a93b..8a04eb2532c0 100644 --- a/strings/knuth_morris_pratt.py +++ b/strings/knuth_morris_pratt.py @@ -1,7 +1,7 @@ from __future__ import annotations -def kmp(pattern: str, text: str) -> bool: +def knuth_morris_pratt(text: str, pattern: str) -> int: """ The Knuth-Morris-Pratt Algorithm for finding a pattern within a piece of text with complexity O(n + m) @@ -14,6 +14,12 @@ def kmp(pattern: str, text: str) -> bool: 2) Step through the text one character at a time and compare it to a character in the pattern updating our location within the pattern if necessary + >>> kmp = "knuth_morris_pratt" + >>> all( + ... knuth_morris_pratt(kmp, s) == kmp.find(s) + ... for s in ("kn", "h_m", "rr", "tt", "not there") + ... ) + True """ # 1) Construct the failure array @@ -24,7 +30,7 @@ def kmp(pattern: str, text: str) -> bool: while i < len(text): if pattern[j] == text[i]: if j == (len(pattern) - 1): - return True + return i - j j += 1 # if this is a prefix in our pattern @@ -33,7 +39,7 @@ def kmp(pattern: str, text: str) -> bool: j = failure[j - 1] continue i += 1 - return False + return -1 def get_failure_array(pattern: str) -> list[int]: @@ -57,27 +63,38 @@ def get_failure_array(pattern: str) -> list[int]: if __name__ == "__main__": + import doctest + + doctest.testmod() + # Test 1) pattern = "abc1abc12" text1 = "alskfjaldsabc1abc1abc12k23adsfabcabc" text2 = "alskfjaldsk23adsfabcabc" - assert kmp(pattern, text1) and not kmp(pattern, text2) + assert knuth_morris_pratt(text1, pattern) and knuth_morris_pratt(text2, pattern) # Test 2) pattern = "ABABX" text = "ABABZABABYABABX" - assert kmp(pattern, text) + assert knuth_morris_pratt(text, pattern) # Test 3) pattern = "AAAB" text = "ABAAAAAB" - assert kmp(pattern, text) + assert knuth_morris_pratt(text, pattern) # Test 4) pattern = "abcdabcy" text = "abcxabcdabxabcdabcdabcy" - assert kmp(pattern, text) + assert knuth_morris_pratt(text, pattern) + + # Test 5) -> Doctests + kmp = "knuth_morris_pratt" + assert all( + knuth_morris_pratt(kmp, s) == kmp.find(s) + for s in ("kn", "h_m", "rr", "tt", "not there") + ) - # Test 5) + # Test 6) pattern = "aabaabaaa" assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2] From 467903aa33ad746262bd46d803231d0930131197 Mon Sep 17 00:00:00 2001 From: Belhadj Ahmed Walid <80895522+BAW2501@users.noreply.github.com> Date: Sat, 30 Sep 2023 05:33:13 +0100 Subject: [PATCH 0976/1543] added smith waterman algorithm (#9001) * added smith waterman algorithm * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * descriptive names for the parameters a and b * doctesting lowercase upcase empty string cases * updated block quot,fixed traceback and doctests * shorter block quote Co-authored-by: Tianyi Zheng * global vars to func params,more doctests * updated doctests * user access to SW params * formating --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Tianyi Zheng --- dynamic_programming/smith_waterman.py | 193 ++++++++++++++++++++++++++ 1 file changed, 193 insertions(+) create mode 100644 dynamic_programming/smith_waterman.py diff --git a/dynamic_programming/smith_waterman.py b/dynamic_programming/smith_waterman.py new file mode 100644 index 000000000000..4c5d58379f07 --- /dev/null +++ b/dynamic_programming/smith_waterman.py @@ -0,0 +1,193 @@ +""" +https://en.wikipedia.org/wiki/Smith%E2%80%93Waterman_algorithm +The Smith-Waterman algorithm is a dynamic programming algorithm used for sequence +alignment. It is particularly useful for finding similarities between two sequences, +such as DNA or protein sequences. In this implementation, gaps are penalized +linearly, meaning that the score is reduced by a fixed amount for each gap introduced +in the alignment. However, it's important to note that the Smith-Waterman algorithm +supports other gap penalty methods as well. +""" + + +def score_function( + source_char: str, + target_char: str, + match: int = 1, + mismatch: int = -1, + gap: int = -2, +) -> int: + """ + Calculate the score for a character pair based on whether they match or mismatch. + Returns 1 if the characters match, -1 if they mismatch, and -2 if either of the + characters is a gap. + >>> score_function('A', 'A') + 1 + >>> score_function('A', 'C') + -1 + >>> score_function('-', 'A') + -2 + >>> score_function('A', '-') + -2 + >>> score_function('-', '-') + -2 + """ + if "-" in (source_char, target_char): + return gap + return match if source_char == target_char else mismatch + + +def smith_waterman( + query: str, + subject: str, + match: int = 1, + mismatch: int = -1, + gap: int = -2, +) -> list[list[int]]: + """ + Perform the Smith-Waterman local sequence alignment algorithm. + Returns a 2D list representing the score matrix. Each value in the matrix + corresponds to the score of the best local alignment ending at that point. + >>> smith_waterman('ACAC', 'CA') + [[0, 0, 0], [0, 0, 1], [0, 1, 0], [0, 0, 2], [0, 1, 0]] + >>> smith_waterman('acac', 'ca') + [[0, 0, 0], [0, 0, 1], [0, 1, 0], [0, 0, 2], [0, 1, 0]] + >>> smith_waterman('ACAC', 'ca') + [[0, 0, 0], [0, 0, 1], [0, 1, 0], [0, 0, 2], [0, 1, 0]] + >>> smith_waterman('acac', 'CA') + [[0, 0, 0], [0, 0, 1], [0, 1, 0], [0, 0, 2], [0, 1, 0]] + >>> smith_waterman('ACAC', '') + [[0], [0], [0], [0], [0]] + >>> smith_waterman('', 'CA') + [[0, 0, 0]] + >>> smith_waterman('ACAC', 'CA') + [[0, 0, 0], [0, 0, 1], [0, 1, 0], [0, 0, 2], [0, 1, 0]] + + >>> smith_waterman('acac', 'ca') + [[0, 0, 0], [0, 0, 1], [0, 1, 0], [0, 0, 2], [0, 1, 0]] + + >>> smith_waterman('ACAC', 'ca') + [[0, 0, 0], [0, 0, 1], [0, 1, 0], [0, 0, 2], [0, 1, 0]] + + >>> smith_waterman('acac', 'CA') + [[0, 0, 0], [0, 0, 1], [0, 1, 0], [0, 0, 2], [0, 1, 0]] + + >>> smith_waterman('ACAC', '') + [[0], [0], [0], [0], [0]] + + >>> smith_waterman('', 'CA') + [[0, 0, 0]] + + >>> smith_waterman('AGT', 'AGT') + [[0, 0, 0, 0], [0, 1, 0, 0], [0, 0, 2, 0], [0, 0, 0, 3]] + + >>> smith_waterman('AGT', 'GTA') + [[0, 0, 0, 0], [0, 0, 0, 1], [0, 1, 0, 0], [0, 0, 2, 0]] + + >>> smith_waterman('AGT', 'GTC') + [[0, 0, 0, 0], [0, 0, 0, 0], [0, 1, 0, 0], [0, 0, 2, 0]] + + >>> smith_waterman('AGT', 'G') + [[0, 0], [0, 0], [0, 1], [0, 0]] + + >>> smith_waterman('G', 'AGT') + [[0, 0, 0, 0], [0, 0, 1, 0]] + + >>> smith_waterman('AGT', 'AGTCT') + [[0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0], [0, 0, 2, 0, 0, 0], [0, 0, 0, 3, 1, 1]] + + >>> smith_waterman('AGTCT', 'AGT') + [[0, 0, 0, 0], [0, 1, 0, 0], [0, 0, 2, 0], [0, 0, 0, 3], [0, 0, 0, 1], [0, 0, 0, 1]] + + >>> smith_waterman('AGTCT', 'GTC') + [[0, 0, 0, 0], [0, 0, 0, 0], [0, 1, 0, 0], [0, 0, 2, 0], [0, 0, 0, 3], [0, 0, 1, 1]] + """ + # make both query and subject uppercase + query = query.upper() + subject = subject.upper() + + # Initialize score matrix + m = len(query) + n = len(subject) + score = [[0] * (n + 1) for _ in range(m + 1)] + kwargs = {"match": match, "mismatch": mismatch, "gap": gap} + + for i in range(1, m + 1): + for j in range(1, n + 1): + # Calculate scores for each cell + match = score[i - 1][j - 1] + score_function( + query[i - 1], subject[j - 1], **kwargs + ) + delete = score[i - 1][j] + gap + insert = score[i][j - 1] + gap + + # Take maximum score + score[i][j] = max(0, match, delete, insert) + + return score + + +def traceback(score: list[list[int]], query: str, subject: str) -> str: + r""" + Perform traceback to find the optimal local alignment. + Starts from the highest scoring cell in the matrix and traces back recursively + until a 0 score is found. Returns the alignment strings. + >>> traceback([[0, 0, 0], [0, 0, 1], [0, 1, 0], [0, 0, 2], [0, 1, 0]], 'ACAC', 'CA') + 'CA\nCA' + >>> traceback([[0, 0, 0], [0, 0, 1], [0, 1, 0], [0, 0, 2], [0, 1, 0]], 'acac', 'ca') + 'CA\nCA' + >>> traceback([[0, 0, 0], [0, 0, 1], [0, 1, 0], [0, 0, 2], [0, 1, 0]], 'ACAC', 'ca') + 'CA\nCA' + >>> traceback([[0, 0, 0], [0, 0, 1], [0, 1, 0], [0, 0, 2], [0, 1, 0]], 'acac', 'CA') + 'CA\nCA' + >>> traceback([[0, 0, 0]], 'ACAC', '') + '' + """ + # make both query and subject uppercase + query = query.upper() + subject = subject.upper() + # find the indices of the maximum value in the score matrix + max_value = float("-inf") + i_max = j_max = 0 + for i, row in enumerate(score): + for j, value in enumerate(row): + if value > max_value: + max_value = value + i_max, j_max = i, j + # Traceback logic to find optimal alignment + i = i_max + j = j_max + align1 = "" + align2 = "" + gap = score_function("-", "-") + # guard against empty query or subject + if i == 0 or j == 0: + return "" + while i > 0 and j > 0: + if score[i][j] == score[i - 1][j - 1] + score_function( + query[i - 1], subject[j - 1] + ): + # optimal path is a diagonal take both letters + align1 = query[i - 1] + align1 + align2 = subject[j - 1] + align2 + i -= 1 + j -= 1 + elif score[i][j] == score[i - 1][j] + gap: + # optimal path is a vertical + align1 = query[i - 1] + align1 + align2 = f"-{align2}" + i -= 1 + else: + # optimal path is a horizontal + align1 = f"-{align1}" + align2 = subject[j - 1] + align2 + j -= 1 + + return f"{align1}\n{align2}" + + +if __name__ == "__main__": + query = "HEAGAWGHEE" + subject = "PAWHEAE" + + score = smith_waterman(query, subject, match=1, mismatch=-1, gap=-2) + print(traceback(score, query, subject)) From dec96438be1a165eaa300a4d6df33e338b4e44c6 Mon Sep 17 00:00:00 2001 From: Caeden Perelli-Harris Date: Sat, 30 Sep 2023 05:57:56 +0100 Subject: [PATCH 0977/1543] Create word search algorithm (#8906) * feat(other): Create word_search algorithm * updating DIRECTORY.md * doc(word_search): Link to wikipedia article * Apply suggestions from code review Co-authored-by: Tianyi Zheng * Update word_search.py --------- Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> Co-authored-by: Tianyi Zheng --- DIRECTORY.md | 1 + other/word_search.py | 396 +++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 397 insertions(+) create mode 100644 other/word_search.py diff --git a/DIRECTORY.md b/DIRECTORY.md index e596d96e5e83..aabbf27512ce 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -747,6 +747,7 @@ * [Scoring Algorithm](other/scoring_algorithm.py) * [Sdes](other/sdes.py) * [Tower Of Hanoi](other/tower_of_hanoi.py) + * [Word Search](other/word_search.py) ## Physics * [Altitude Pressure](physics/altitude_pressure.py) diff --git a/other/word_search.py b/other/word_search.py new file mode 100644 index 000000000000..a4796e220c7c --- /dev/null +++ b/other/word_search.py @@ -0,0 +1,396 @@ +""" +Creates a random wordsearch with eight different directions +that are best described as compass locations. + +@ https://en.wikipedia.org/wiki/Word_search +""" + + +from random import choice, randint, shuffle + +# The words to display on the word search - +# can be made dynamic by randonly selecting a certain number of +# words from a predefined word file, while ensuring the character +# count fits within the matrix size (n x m) +WORDS = ["cat", "dog", "snake", "fish"] + +WIDTH = 10 +HEIGHT = 10 + + +class WordSearch: + """ + >>> ws = WordSearch(WORDS, WIDTH, HEIGHT) + >>> ws.board # doctest: +ELLIPSIS + [[None, ..., None], ..., [None, ..., None]] + >>> ws.generate_board() + """ + + def __init__(self, words: list[str], width: int, height: int) -> None: + self.words = words + self.width = width + self.height = height + + # Board matrix holding each letter + self.board: list[list[str | None]] = [[None] * width for _ in range(height)] + + def insert_north(self, word: str, rows: list[int], cols: list[int]) -> None: + """ + >>> ws = WordSearch(WORDS, 3, 3) + >>> ws.insert_north("cat", [2], [2]) + >>> ws.board # doctest: +NORMALIZE_WHITESPACE + [[None, None, 't'], + [None, None, 'a'], + [None, None, 'c']] + >>> ws.insert_north("at", [0, 1, 2], [2, 1]) + >>> ws.board # doctest: +NORMALIZE_WHITESPACE + [[None, 't', 't'], + [None, 'a', 'a'], + [None, None, 'c']] + """ + word_length = len(word) + # Attempt to insert the word into each row and when successful, exit + for row in rows: + # Check if there is space above the row to fit in the word + if word_length > row + 1: + continue + + # Attempt to insert the word into each column + for col in cols: + # Only check to be made here is if there are existing letters + # above the column that will be overwritten + letters_above = [self.board[row - i][col] for i in range(word_length)] + if all(letter is None for letter in letters_above): + # Successful, insert the word north + for i in range(word_length): + self.board[row - i][col] = word[i] + return + + def insert_northeast(self, word: str, rows: list[int], cols: list[int]) -> None: + """ + >>> ws = WordSearch(WORDS, 3, 3) + >>> ws.insert_northeast("cat", [2], [0]) + >>> ws.board # doctest: +NORMALIZE_WHITESPACE + [[None, None, 't'], + [None, 'a', None], + ['c', None, None]] + >>> ws.insert_northeast("at", [0, 1], [2, 1, 0]) + >>> ws.board # doctest: +NORMALIZE_WHITESPACE + [[None, 't', 't'], + ['a', 'a', None], + ['c', None, None]] + """ + word_length = len(word) + # Attempt to insert the word into each row and when successful, exit + for row in rows: + # Check if there is space for the word above the row + if word_length > row + 1: + continue + + # Attempt to insert the word into each column + for col in cols: + # Check if there is space to the right of the word as well as above + if word_length + col > self.width: + continue + + # Check if there are existing letters + # to the right of the column that will be overwritten + letters_diagonal_left = [ + self.board[row - i][col + i] for i in range(word_length) + ] + if all(letter is None for letter in letters_diagonal_left): + # Successful, insert the word northeast + for i in range(word_length): + self.board[row - i][col + i] = word[i] + return + + def insert_east(self, word: str, rows: list[int], cols: list[int]) -> None: + """ + >>> ws = WordSearch(WORDS, 3, 3) + >>> ws.insert_east("cat", [1], [0]) + >>> ws.board # doctest: +NORMALIZE_WHITESPACE + [[None, None, None], + ['c', 'a', 't'], + [None, None, None]] + >>> ws.insert_east("at", [1, 0], [2, 1, 0]) + >>> ws.board # doctest: +NORMALIZE_WHITESPACE + [[None, 'a', 't'], + ['c', 'a', 't'], + [None, None, None]] + """ + word_length = len(word) + # Attempt to insert the word into each row and when successful, exit + for row in rows: + # Attempt to insert the word into each column + for col in cols: + # Check if there is space to the right of the word + if word_length + col > self.width: + continue + + # Check if there are existing letters + # to the right of the column that will be overwritten + letters_left = [self.board[row][col + i] for i in range(word_length)] + if all(letter is None for letter in letters_left): + # Successful, insert the word east + for i in range(word_length): + self.board[row][col + i] = word[i] + return + + def insert_southeast(self, word: str, rows: list[int], cols: list[int]) -> None: + """ + >>> ws = WordSearch(WORDS, 3, 3) + >>> ws.insert_southeast("cat", [0], [0]) + >>> ws.board # doctest: +NORMALIZE_WHITESPACE + [['c', None, None], + [None, 'a', None], + [None, None, 't']] + >>> ws.insert_southeast("at", [1, 0], [2, 1, 0]) + >>> ws.board # doctest: +NORMALIZE_WHITESPACE + [['c', None, None], + ['a', 'a', None], + [None, 't', 't']] + """ + word_length = len(word) + # Attempt to insert the word into each row and when successful, exit + for row in rows: + # Check if there is space for the word below the row + if word_length + row > self.height: + continue + + # Attempt to insert the word into each column + for col in cols: + # Check if there is space to the right of the word as well as below + if word_length + col > self.width: + continue + + # Check if there are existing letters + # to the right of the column that will be overwritten + letters_diagonal_left = [ + self.board[row + i][col + i] for i in range(word_length) + ] + if all(letter is None for letter in letters_diagonal_left): + # Successful, insert the word southeast + for i in range(word_length): + self.board[row + i][col + i] = word[i] + return + + def insert_south(self, word: str, rows: list[int], cols: list[int]) -> None: + """ + >>> ws = WordSearch(WORDS, 3, 3) + >>> ws.insert_south("cat", [0], [0]) + >>> ws.board # doctest: +NORMALIZE_WHITESPACE + [['c', None, None], + ['a', None, None], + ['t', None, None]] + >>> ws.insert_south("at", [2, 1, 0], [0, 1, 2]) + >>> ws.board # doctest: +NORMALIZE_WHITESPACE + [['c', None, None], + ['a', 'a', None], + ['t', 't', None]] + """ + word_length = len(word) + # Attempt to insert the word into each row and when successful, exit + for row in rows: + # Check if there is space below the row to fit in the word + if word_length + row > self.height: + continue + + # Attempt to insert the word into each column + for col in cols: + # Only check to be made here is if there are existing letters + # below the column that will be overwritten + letters_below = [self.board[row + i][col] for i in range(word_length)] + if all(letter is None for letter in letters_below): + # Successful, insert the word south + for i in range(word_length): + self.board[row + i][col] = word[i] + return + + def insert_southwest(self, word: str, rows: list[int], cols: list[int]) -> None: + """ + >>> ws = WordSearch(WORDS, 3, 3) + >>> ws.insert_southwest("cat", [0], [2]) + >>> ws.board # doctest: +NORMALIZE_WHITESPACE + [[None, None, 'c'], + [None, 'a', None], + ['t', None, None]] + >>> ws.insert_southwest("at", [1, 2], [2, 1, 0]) + >>> ws.board # doctest: +NORMALIZE_WHITESPACE + [[None, None, 'c'], + [None, 'a', 'a'], + ['t', 't', None]] + """ + word_length = len(word) + # Attempt to insert the word into each row and when successful, exit + for row in rows: + # Check if there is space for the word below the row + if word_length + row > self.height: + continue + + # Attempt to insert the word into each column + for col in cols: + # Check if there is space to the left of the word as well as below + if word_length > col + 1: + continue + + # Check if there are existing letters + # to the right of the column that will be overwritten + letters_diagonal_left = [ + self.board[row + i][col - i] for i in range(word_length) + ] + if all(letter is None for letter in letters_diagonal_left): + # Successful, insert the word southwest + for i in range(word_length): + self.board[row + i][col - i] = word[i] + return + + def insert_west(self, word: str, rows: list[int], cols: list[int]) -> None: + """ + >>> ws = WordSearch(WORDS, 3, 3) + >>> ws.insert_west("cat", [1], [2]) + >>> ws.board # doctest: +NORMALIZE_WHITESPACE + [[None, None, None], + ['t', 'a', 'c'], + [None, None, None]] + >>> ws.insert_west("at", [1, 0], [1, 2, 0]) + >>> ws.board # doctest: +NORMALIZE_WHITESPACE + [['t', 'a', None], + ['t', 'a', 'c'], + [None, None, None]] + """ + word_length = len(word) + # Attempt to insert the word into each row and when successful, exit + for row in rows: + # Attempt to insert the word into each column + for col in cols: + # Check if there is space to the left of the word + if word_length > col + 1: + continue + + # Check if there are existing letters + # to the left of the column that will be overwritten + letters_left = [self.board[row][col - i] for i in range(word_length)] + if all(letter is None for letter in letters_left): + # Successful, insert the word west + for i in range(word_length): + self.board[row][col - i] = word[i] + return + + def insert_northwest(self, word: str, rows: list[int], cols: list[int]) -> None: + """ + >>> ws = WordSearch(WORDS, 3, 3) + >>> ws.insert_northwest("cat", [2], [2]) + >>> ws.board # doctest: +NORMALIZE_WHITESPACE + [['t', None, None], + [None, 'a', None], + [None, None, 'c']] + >>> ws.insert_northwest("at", [1, 2], [0, 1]) + >>> ws.board # doctest: +NORMALIZE_WHITESPACE + [['t', None, None], + ['t', 'a', None], + [None, 'a', 'c']] + """ + word_length = len(word) + # Attempt to insert the word into each row and when successful, exit + for row in rows: + # Check if there is space for the word above the row + if word_length > row + 1: + continue + + # Attempt to insert the word into each column + for col in cols: + # Check if there is space to the left of the word as well as above + if word_length > col + 1: + continue + + # Check if there are existing letters + # to the right of the column that will be overwritten + letters_diagonal_left = [ + self.board[row - i][col - i] for i in range(word_length) + ] + if all(letter is None for letter in letters_diagonal_left): + # Successful, insert the word northwest + for i in range(word_length): + self.board[row - i][col - i] = word[i] + return + + def generate_board(self) -> None: + """ + Generates a board with a random direction for each word. + + >>> wt = WordSearch(WORDS, WIDTH, HEIGHT) + >>> wt.generate_board() + >>> len(list(filter(lambda word: word is not None, sum(wt.board, start=[]))) + ... ) == sum(map(lambda word: len(word), WORDS)) + True + """ + directions = ( + self.insert_north, + self.insert_northeast, + self.insert_east, + self.insert_southeast, + self.insert_south, + self.insert_southwest, + self.insert_west, + self.insert_northwest, + ) + for word in self.words: + # Shuffle the row order and column order that is used when brute forcing + # the insertion of the word + rows, cols = list(range(self.height)), list(range(self.width)) + shuffle(rows) + shuffle(cols) + + # Insert the word via the direction + choice(directions)(word, rows, cols) + + +def visualise_word_search( + board: list[list[str | None]] | None = None, *, add_fake_chars: bool = True +) -> None: + """ + Graphically displays the word search in the terminal. + + >>> ws = WordSearch(WORDS, 5, 5) + >>> ws.insert_north("cat", [4], [4]) + >>> visualise_word_search( + ... ws.board, add_fake_chars=False) # doctest: +NORMALIZE_WHITESPACE + # # # # # + # # # # # + # # # # t + # # # # a + # # # # c + >>> ws.insert_northeast("snake", [4], [4, 3, 2, 1, 0]) + >>> visualise_word_search( + ... ws.board, add_fake_chars=False) # doctest: +NORMALIZE_WHITESPACE + # # # # e + # # # k # + # # a # t + # n # # a + s # # # c + """ + if board is None: + word_search = WordSearch(WORDS, WIDTH, HEIGHT) + word_search.generate_board() + board = word_search.board + + result = "" + for row in range(len(board)): + for col in range(len(board[0])): + character = "#" + if (letter := board[row][col]) is not None: + character = letter + # Empty char, so add a fake char + elif add_fake_chars: + character = chr(randint(97, 122)) + result += f"{character} " + result += "\n" + print(result, end="") + + +if __name__ == "__main__": + import doctest + + doctest.testmod() + + visualise_word_search() From aaf7195465ddfe743cda707cac0feacf70287ecd Mon Sep 17 00:00:00 2001 From: Tianyi Zheng Date: Sat, 30 Sep 2023 23:10:33 -0400 Subject: [PATCH 0978/1543] Fix mypy error in web_programming/reddit.py (#9162) * updating DIRECTORY.md * updating DIRECTORY.md * Fix mypy error in web_programming/reddit.py web_programming/reddit.py:36: error: Missing named argument "response" for "HTTPError" [call-arg] --------- Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- DIRECTORY.md | 2 +- web_programming/reddit.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/DIRECTORY.md b/DIRECTORY.md index aabbf27512ce..001da2c15b99 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -341,6 +341,7 @@ * [Palindrome Partitioning](dynamic_programming/palindrome_partitioning.py) * [Regex Match](dynamic_programming/regex_match.py) * [Rod Cutting](dynamic_programming/rod_cutting.py) + * [Smith Waterman](dynamic_programming/smith_waterman.py) * [Subset Generation](dynamic_programming/subset_generation.py) * [Sum Of Subset](dynamic_programming/sum_of_subset.py) * [Tribonacci](dynamic_programming/tribonacci.py) @@ -567,7 +568,6 @@ * [Dual Number Automatic Differentiation](maths/dual_number_automatic_differentiation.py) * [Entropy](maths/entropy.py) * [Euclidean Distance](maths/euclidean_distance.py) - * [Euclidean Gcd](maths/euclidean_gcd.py) * [Euler Method](maths/euler_method.py) * [Euler Modified](maths/euler_modified.py) * [Eulers Totient](maths/eulers_totient.py) diff --git a/web_programming/reddit.py b/web_programming/reddit.py index 5ca5f828c0fb..1c165ecc49ec 100644 --- a/web_programming/reddit.py +++ b/web_programming/reddit.py @@ -33,7 +33,7 @@ def get_subreddit_data( headers={"User-agent": "A random string"}, ) if response.status_code == 429: - raise requests.HTTPError + raise requests.HTTPError(response=response) data = response.json() if not wanted_data: From 5f8d1cb5c99cccf6e5ce62fbca9c3dcd60a75292 Mon Sep 17 00:00:00 2001 From: Tianyi Zheng Date: Sat, 30 Sep 2023 23:31:35 -0400 Subject: [PATCH 0979/1543] Fix DeprecationWarning in local_weighted_learning.py (#9165) Fix DeprecationWarning that occurs during build due to converting an np.ndarray to a scalar implicitly --- .../local_weighted_learning/local_weighted_learning.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/machine_learning/local_weighted_learning/local_weighted_learning.py b/machine_learning/local_weighted_learning/local_weighted_learning.py index 8dd0e55d41df..ada6f7cd2520 100644 --- a/machine_learning/local_weighted_learning/local_weighted_learning.py +++ b/machine_learning/local_weighted_learning/local_weighted_learning.py @@ -122,7 +122,7 @@ def local_weight_regression( """ y_pred = np.zeros(len(x_train)) # Initialize array of predictions for i, item in enumerate(x_train): - y_pred[i] = item @ local_weight(item, x_train, y_train, tau) + y_pred[i] = np.dot(item, local_weight(item, x_train, y_train, tau)) return y_pred From 320d895b86133b4b5c489df39ab245fa6be4bce8 Mon Sep 17 00:00:00 2001 From: aryan1165 <111041731+aryan1165@users.noreply.github.com> Date: Sun, 1 Oct 2023 09:36:15 +0530 Subject: [PATCH 0980/1543] Fixed permute_recursive() by passing nums.copy(). Fixes #9014 (#9161) * Fixes #9014 * Fixed permute_recursive() by passing nums.copy() --- data_structures/arrays/permutations.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/data_structures/arrays/permutations.py b/data_structures/arrays/permutations.py index 0f029187b92b..4906dd5c2ae1 100644 --- a/data_structures/arrays/permutations.py +++ b/data_structures/arrays/permutations.py @@ -10,7 +10,7 @@ def permute_recursive(nums: list[int]) -> list[list[int]]: return [[]] for _ in range(len(nums)): n = nums.pop(0) - permutations = permute_recursive(nums) + permutations = permute_recursive(nums.copy()) for perm in permutations: perm.append(n) result.extend(permutations) @@ -43,6 +43,6 @@ def backtrack(start: int) -> None: if __name__ == "__main__": import doctest - res = permute_backtrack([1, 2, 3]) - print(res) + result = permute_backtrack([1, 2, 3]) + print(result) doctest.testmod() From 280dfc1a22adb08aa71984ee4b22e4df220a8e68 Mon Sep 17 00:00:00 2001 From: Tianyi Zheng Date: Sun, 1 Oct 2023 00:07:25 -0400 Subject: [PATCH 0981/1543] Fix DeprecationWarning in local_weighted_learning.py (Attempt 2) (#9170) * Fix DeprecationWarning in local_weighted_learning.py Fix DeprecationWarning that occurs during build due to converting an np.ndarray to a scalar implicitly * DeprecationWarning fix attempt 2 --- .../local_weighted_learning/local_weighted_learning.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/machine_learning/local_weighted_learning/local_weighted_learning.py b/machine_learning/local_weighted_learning/local_weighted_learning.py index ada6f7cd2520..f3056da40e24 100644 --- a/machine_learning/local_weighted_learning/local_weighted_learning.py +++ b/machine_learning/local_weighted_learning/local_weighted_learning.py @@ -122,7 +122,7 @@ def local_weight_regression( """ y_pred = np.zeros(len(x_train)) # Initialize array of predictions for i, item in enumerate(x_train): - y_pred[i] = np.dot(item, local_weight(item, x_train, y_train, tau)) + y_pred[i] = np.dot(item, local_weight(item, x_train, y_train, tau)).item() return y_pred From 832610ab1d05c8cea2814adcc8db5597e7e5ede7 Mon Sep 17 00:00:00 2001 From: aryan1165 <111041731+aryan1165@users.noreply.github.com> Date: Sun, 1 Oct 2023 10:10:53 +0530 Subject: [PATCH 0982/1543] Deleted sorts/random_pivot_quick_sort.py (#9178) --- sorts/random_pivot_quick_sort.py | 44 -------------------------------- 1 file changed, 44 deletions(-) delete mode 100644 sorts/random_pivot_quick_sort.py diff --git a/sorts/random_pivot_quick_sort.py b/sorts/random_pivot_quick_sort.py deleted file mode 100644 index 748b6741047e..000000000000 --- a/sorts/random_pivot_quick_sort.py +++ /dev/null @@ -1,44 +0,0 @@ -""" -Picks the random index as the pivot -""" -import random - - -def partition(a, left_index, right_index): - pivot = a[left_index] - i = left_index + 1 - for j in range(left_index + 1, right_index): - if a[j] < pivot: - a[j], a[i] = a[i], a[j] - i += 1 - a[left_index], a[i - 1] = a[i - 1], a[left_index] - return i - 1 - - -def quick_sort_random(a, left, right): - if left < right: - pivot = random.randint(left, right - 1) - a[pivot], a[left] = ( - a[left], - a[pivot], - ) # switches the pivot with the left most bound - pivot_index = partition(a, left, right) - quick_sort_random( - a, left, pivot_index - ) # recursive quicksort to the left of the pivot point - quick_sort_random( - a, pivot_index + 1, right - ) # recursive quicksort to the right of the pivot point - - -def main(): - user_input = input("Enter numbers separated by a comma:\n").strip() - arr = [int(item) for item in user_input.split(",")] - - quick_sort_random(arr, 0, len(arr)) - - print(arr) - - -if __name__ == "__main__": - main() From 3dbafd3f0db55e040a7fd277134d86ec3accfb57 Mon Sep 17 00:00:00 2001 From: aryan1165 <111041731+aryan1165@users.noreply.github.com> Date: Sun, 1 Oct 2023 10:51:46 +0530 Subject: [PATCH 0983/1543] Deleted random_normal_distribution_quicksort.py. Fixes #9124 (#9182) --- sorts/random_normal_distribution_quicksort.py | 62 ------------------- 1 file changed, 62 deletions(-) delete mode 100644 sorts/random_normal_distribution_quicksort.py diff --git a/sorts/random_normal_distribution_quicksort.py b/sorts/random_normal_distribution_quicksort.py deleted file mode 100644 index f7f60903c546..000000000000 --- a/sorts/random_normal_distribution_quicksort.py +++ /dev/null @@ -1,62 +0,0 @@ -from random import randint -from tempfile import TemporaryFile - -import numpy as np - - -def _in_place_quick_sort(a, start, end): - count = 0 - if start < end: - pivot = randint(start, end) - temp = a[end] - a[end] = a[pivot] - a[pivot] = temp - - p, count = _in_place_partition(a, start, end) - count += _in_place_quick_sort(a, start, p - 1) - count += _in_place_quick_sort(a, p + 1, end) - return count - - -def _in_place_partition(a, start, end): - count = 0 - pivot = randint(start, end) - temp = a[end] - a[end] = a[pivot] - a[pivot] = temp - new_pivot_index = start - 1 - for index in range(start, end): - count += 1 - if a[index] < a[end]: # check if current val is less than pivot value - new_pivot_index = new_pivot_index + 1 - temp = a[new_pivot_index] - a[new_pivot_index] = a[index] - a[index] = temp - - temp = a[new_pivot_index + 1] - a[new_pivot_index + 1] = a[end] - a[end] = temp - return new_pivot_index + 1, count - - -outfile = TemporaryFile() -p = 100 # 1000 elements are to be sorted - - -mu, sigma = 0, 1 # mean and standard deviation -X = np.random.normal(mu, sigma, p) -np.save(outfile, X) -print("The array is") -print(X) - - -outfile.seek(0) # using the same array -M = np.load(outfile) -r = len(M) - 1 -z = _in_place_quick_sort(M, 0, r) - -print( - "No of Comparisons for 100 elements selected from a standard normal distribution" - "is :" -) -print(z) From fbbbd5db05987e735ec35fc658136001d3e9e663 Mon Sep 17 00:00:00 2001 From: aryan1165 <111041731+aryan1165@users.noreply.github.com> Date: Sun, 1 Oct 2023 11:04:03 +0530 Subject: [PATCH 0984/1543] Deleted add.py. As stated in #6216 (#9180) --- maths/add.py | 19 ------------------- 1 file changed, 19 deletions(-) delete mode 100644 maths/add.py diff --git a/maths/add.py b/maths/add.py deleted file mode 100644 index c89252c645ea..000000000000 --- a/maths/add.py +++ /dev/null @@ -1,19 +0,0 @@ -""" -Just to check -""" - - -def add(a: float, b: float) -> float: - """ - >>> add(2, 2) - 4 - >>> add(2, -2) - 0 - """ - return a + b - - -if __name__ == "__main__": - a = 5 - b = 6 - print(f"The sum of {a} + {b} is {add(a, b)}") From eaa87bd791cdc18d210d775f3258767751f9d3fe Mon Sep 17 00:00:00 2001 From: aryan1165 <111041731+aryan1165@users.noreply.github.com> Date: Sun, 1 Oct 2023 14:13:48 +0530 Subject: [PATCH 0985/1543] Made binary tree memory-friendly using generators based travels. Fixes (#9208) #8725 --- .../binary_tree/binary_tree_traversals.py | 56 +++++++++++-------- 1 file changed, 34 insertions(+), 22 deletions(-) diff --git a/data_structures/binary_tree/binary_tree_traversals.py b/data_structures/binary_tree/binary_tree_traversals.py index 2afb7604f9c6..5dbbbe623906 100644 --- a/data_structures/binary_tree/binary_tree_traversals.py +++ b/data_structures/binary_tree/binary_tree_traversals.py @@ -1,12 +1,12 @@ -# https://en.wikipedia.org/wiki/Tree_traversal from __future__ import annotations from collections import deque -from collections.abc import Sequence +from collections.abc import Generator, Sequence from dataclasses import dataclass from typing import Any +# https://en.wikipedia.org/wiki/Tree_traversal @dataclass class Node: data: int @@ -31,44 +31,56 @@ def make_tree() -> Node | None: return tree -def preorder(root: Node | None) -> list[int]: +def preorder(root: Node | None) -> Generator[int, None, None]: """ Pre-order traversal visits root node, left subtree, right subtree. - >>> preorder(make_tree()) + >>> list(preorder(make_tree())) [1, 2, 4, 5, 3] """ - return [root.data, *preorder(root.left), *preorder(root.right)] if root else [] + if not root: + return + yield root.data + yield from preorder(root.left) + yield from preorder(root.right) -def postorder(root: Node | None) -> list[int]: +def postorder(root: Node | None) -> Generator[int, None, None]: """ Post-order traversal visits left subtree, right subtree, root node. - >>> postorder(make_tree()) + >>> list(postorder(make_tree())) [4, 5, 2, 3, 1] """ - return postorder(root.left) + postorder(root.right) + [root.data] if root else [] + if not root: + return + yield from postorder(root.left) + yield from postorder(root.right) + yield root.data -def inorder(root: Node | None) -> list[int]: +def inorder(root: Node | None) -> Generator[int, None, None]: """ In-order traversal visits left subtree, root node, right subtree. - >>> inorder(make_tree()) + >>> list(inorder(make_tree())) [4, 2, 5, 1, 3] """ - return [*inorder(root.left), root.data, *inorder(root.right)] if root else [] + if not root: + return + yield from inorder(root.left) + yield root.data + yield from inorder(root.right) -def reverse_inorder(root: Node | None) -> list[int]: +def reverse_inorder(root: Node | None) -> Generator[int, None, None]: """ Reverse in-order traversal visits right subtree, root node, left subtree. - >>> reverse_inorder(make_tree()) + >>> list(reverse_inorder(make_tree())) [3, 1, 5, 2, 4] """ - return ( - [*reverse_inorder(root.right), root.data, *reverse_inorder(root.left)] - if root - else [] - ) + if not root: + return + yield from reverse_inorder(root.right) + yield root.data + yield from reverse_inorder(root.left) def height(root: Node | None) -> int: @@ -178,10 +190,10 @@ def main() -> None: # Main function for testing. root = make_tree() # All Traversals of the binary are as follows: - print(f"In-order Traversal: {inorder(root)}") - print(f"Reverse In-order Traversal: {reverse_inorder(root)}") - print(f"Pre-order Traversal: {preorder(root)}") - print(f"Post-order Traversal: {postorder(root)}", "\n") + print(f"In-order Traversal: {list(inorder(root))}") + print(f"Reverse In-order Traversal: {list(reverse_inorder(root))}") + print(f"Pre-order Traversal: {list(preorder(root))}") + print(f"Post-order Traversal: {list(postorder(root))}", "\n") print(f"Height of Tree: {height(root)}", "\n") From cfabd91a8ba83bbe23d2790494e2450118044fcc Mon Sep 17 00:00:00 2001 From: Shreya Bhalgat <85868386+shreyabhalgat@users.noreply.github.com> Date: Sun, 1 Oct 2023 16:58:20 +0530 Subject: [PATCH 0986/1543] Add missing number algorithm (#9203) * Added missing_number algorithm using bit manipulation * Update bit_manipulation/missing_number.py --------- Co-authored-by: Christian Clauss --- bit_manipulation/missing_number.py | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) create mode 100644 bit_manipulation/missing_number.py diff --git a/bit_manipulation/missing_number.py b/bit_manipulation/missing_number.py new file mode 100644 index 000000000000..92502a778ace --- /dev/null +++ b/bit_manipulation/missing_number.py @@ -0,0 +1,21 @@ +def find_missing_number(nums: list[int]) -> int: + """ + Finds the missing number in a list of consecutive integers. + + Args: + nums: A list of integers. + + Returns: + The missing number. + + Example: + >>> find_missing_number([0, 1, 3, 4]) + 2 + """ + n = len(nums) + missing_number = n + + for i in range(n): + missing_number ^= i ^ nums[i] + + return missing_number From 596d93423862da8c8e419e9b74c1321b7d26b7a1 Mon Sep 17 00:00:00 2001 From: Christian Clauss Date: Sun, 1 Oct 2023 13:58:30 +0200 Subject: [PATCH 0987/1543] Fix ruff warning (#9272) --- .github/workflows/ruff.yml | 2 +- DIRECTORY.md | 3 --- 2 files changed, 1 insertion(+), 4 deletions(-) diff --git a/.github/workflows/ruff.yml b/.github/workflows/ruff.yml index ca2d5be47327..e71ac8a4e933 100644 --- a/.github/workflows/ruff.yml +++ b/.github/workflows/ruff.yml @@ -13,4 +13,4 @@ jobs: steps: - uses: actions/checkout@v3 - run: pip install --user ruff - - run: ruff --format=github . + - run: ruff --output-format=github . diff --git a/DIRECTORY.md b/DIRECTORY.md index 001da2c15b99..4ae1c69f7099 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -530,7 +530,6 @@ ## Maths * [Abs](maths/abs.py) - * [Add](maths/add.py) * [Addition Without Arithmetic](maths/addition_without_arithmetic.py) * [Aliquot Sum](maths/aliquot_sum.py) * [Allocation Number](maths/allocation_number.py) @@ -1141,8 +1140,6 @@ * [Quick Sort](sorts/quick_sort.py) * [Quick Sort 3 Partition](sorts/quick_sort_3_partition.py) * [Radix Sort](sorts/radix_sort.py) - * [Random Normal Distribution Quicksort](sorts/random_normal_distribution_quicksort.py) - * [Random Pivot Quick Sort](sorts/random_pivot_quick_sort.py) * [Recursive Bubble Sort](sorts/recursive_bubble_sort.py) * [Recursive Insertion Sort](sorts/recursive_insertion_sort.py) * [Recursive Mergesort Array](sorts/recursive_mergesort_array.py) From 43c3f4ea4070bfbe1f41f4b861c7ff3f89953715 Mon Sep 17 00:00:00 2001 From: Bama Charan Chhandogi Date: Sun, 1 Oct 2023 20:16:12 +0530 Subject: [PATCH 0988/1543] add Three sum (#9177) * add Three sum * add Three sum * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * update * update * update * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * update * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * add documention --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- maths/three_sum.py | 47 ++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 47 insertions(+) create mode 100644 maths/three_sum.py diff --git a/maths/three_sum.py b/maths/three_sum.py new file mode 100644 index 000000000000..09956f8415a0 --- /dev/null +++ b/maths/three_sum.py @@ -0,0 +1,47 @@ +""" +https://en.wikipedia.org/wiki/3SUM +""" + + +def three_sum(nums: list[int]) -> list[list[int]]: + """ + Find all unique triplets in a sorted array of integers that sum up to zero. + + Args: + nums: A sorted list of integers. + + Returns: + A list of lists containing unique triplets that sum up to zero. + + >>> three_sum([-1, 0, 1, 2, -1, -4]) + [[-1, -1, 2], [-1, 0, 1]] + >>> three_sum([1, 2, 3, 4]) + [] + """ + nums.sort() + ans = [] + for i in range(len(nums) - 2): + if i == 0 or (nums[i] != nums[i - 1]): + low, high, c = i + 1, len(nums) - 1, 0 - nums[i] + while low < high: + if nums[low] + nums[high] == c: + ans.append([nums[i], nums[low], nums[high]]) + + while low < high and nums[low] == nums[low + 1]: + low += 1 + while low < high and nums[high] == nums[high - 1]: + high -= 1 + + low += 1 + high -= 1 + elif nums[low] + nums[high] < c: + low += 1 + else: + high -= 1 + return ans + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From bacad12a1f64d92a793ccc2ec88535c9a4092fb6 Mon Sep 17 00:00:00 2001 From: Muhammad Umer Farooq <115654418+Muhammadummerr@users.noreply.github.com> Date: Sun, 1 Oct 2023 21:11:16 +0500 Subject: [PATCH 0989/1543] [NEW ALGORITHM] Rotate linked list by K. (#9278) * Rotate linked list by k. * Rotate linked list by k. * updated variable name. * Update data_structures/linked_list/rotate_linked_list_by_k.py Co-authored-by: Christian Clauss * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update data_structures/linked_list/rotate_linked_list_by_k.py Co-authored-by: Christian Clauss * Update data_structures/linked_list/rotate_linked_list_by_k.py * Make Node a dataclass --------- Co-authored-by: Christian Clauss Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .../linked_list/rotate_to_the_right.py | 156 ++++++++++++++++++ 1 file changed, 156 insertions(+) create mode 100644 data_structures/linked_list/rotate_to_the_right.py diff --git a/data_structures/linked_list/rotate_to_the_right.py b/data_structures/linked_list/rotate_to_the_right.py new file mode 100644 index 000000000000..51b10481c0ce --- /dev/null +++ b/data_structures/linked_list/rotate_to_the_right.py @@ -0,0 +1,156 @@ +from __future__ import annotations + +from dataclasses import dataclass + + +@dataclass +class Node: + data: int + next_node: Node | None = None + + +def print_linked_list(head: Node | None) -> None: + """ + Print the entire linked list iteratively. + + This function prints the elements of a linked list separated by '->'. + + Parameters: + head (Node | None): The head of the linked list to be printed, + or None if the linked list is empty. + + >>> head = insert_node(None, 0) + >>> head = insert_node(head, 2) + >>> head = insert_node(head, 1) + >>> print_linked_list(head) + 0->2->1 + >>> head = insert_node(head, 4) + >>> head = insert_node(head, 5) + >>> print_linked_list(head) + 0->2->1->4->5 + """ + if head is None: + return + while head.next_node is not None: + print(head.data, end="->") + head = head.next_node + print(head.data) + + +def insert_node(head: Node | None, data: int) -> Node: + """ + Insert a new node at the end of a linked list and return the new head. + + Parameters: + head (Node | None): The head of the linked list. + data (int): The data to be inserted into the new node. + + Returns: + Node: The new head of the linked list. + + >>> head = insert_node(None, 10) + >>> head = insert_node(head, 9) + >>> head = insert_node(head, 8) + >>> print_linked_list(head) + 10->9->8 + """ + new_node = Node(data) + # If the linked list is empty, the new_node becomes the head + if head is None: + return new_node + + temp_node = head + while temp_node.next_node: + temp_node = temp_node.next_node + + temp_node.next_node = new_node # type: ignore + return head + + +def rotate_to_the_right(head: Node, places: int) -> Node: + """ + Rotate a linked list to the right by places times. + + Parameters: + head: The head of the linked list. + places: The number of places to rotate. + + Returns: + Node: The head of the rotated linked list. + + >>> rotate_to_the_right(None, places=1) + Traceback (most recent call last): + ... + ValueError: The linked list is empty. + >>> head = insert_node(None, 1) + >>> rotate_to_the_right(head, places=1) == head + True + >>> head = insert_node(None, 1) + >>> head = insert_node(head, 2) + >>> head = insert_node(head, 3) + >>> head = insert_node(head, 4) + >>> head = insert_node(head, 5) + >>> new_head = rotate_to_the_right(head, places=2) + >>> print_linked_list(new_head) + 4->5->1->2->3 + """ + # Check if the list is empty or has only one element + if not head: + raise ValueError("The linked list is empty.") + + if head.next_node is None: + return head + + # Calculate the length of the linked list + length = 1 + temp_node = head + while temp_node.next_node is not None: + length += 1 + temp_node = temp_node.next_node + + # Adjust the value of places to avoid places longer than the list. + places %= length + + if places == 0: + return head # As no rotation is needed. + + # Find the new head position after rotation. + new_head_index = length - places + + # Traverse to the new head position + temp_node = head + for _ in range(new_head_index - 1): + assert temp_node.next_node + temp_node = temp_node.next_node + + # Update pointers to perform rotation + assert temp_node.next_node + new_head = temp_node.next_node + temp_node.next_node = None + temp_node = new_head + while temp_node.next_node: + temp_node = temp_node.next_node + temp_node.next_node = head + + assert new_head + return new_head + + +if __name__ == "__main__": + import doctest + + doctest.testmod() + head = insert_node(None, 5) + head = insert_node(head, 1) + head = insert_node(head, 2) + head = insert_node(head, 4) + head = insert_node(head, 3) + + print("Original list: ", end="") + print_linked_list(head) + + places = 3 + new_head = rotate_to_the_right(head, places) + + print(f"After {places} iterations: ", end="") + print_linked_list(new_head) From 18cdbc416504391bc9246f1874bd752ea730c710 Mon Sep 17 00:00:00 2001 From: aryan1165 <111041731+aryan1165@users.noreply.github.com> Date: Sun, 1 Oct 2023 22:24:05 +0530 Subject: [PATCH 0990/1543] binary_search_traversals.py made memory-friendly using generators. Fixes #8725 completely. (#9237) * Made binary tree memory-friendly using generators based travels. Fixes #8725 * Made binary tree memory-friendly using generators based travels. Fixes #8725 * Fixed pre-commit errors --- .../binary_tree/binary_tree_traversals.py | 57 ++++++++----------- 1 file changed, 23 insertions(+), 34 deletions(-) diff --git a/data_structures/binary_tree/binary_tree_traversals.py b/data_structures/binary_tree/binary_tree_traversals.py index 5dbbbe623906..2b33cdca4fed 100644 --- a/data_structures/binary_tree/binary_tree_traversals.py +++ b/data_structures/binary_tree/binary_tree_traversals.py @@ -1,9 +1,8 @@ from __future__ import annotations from collections import deque -from collections.abc import Generator, Sequence +from collections.abc import Generator from dataclasses import dataclass -from typing import Any # https://en.wikipedia.org/wiki/Tree_traversal @@ -94,96 +93,86 @@ def height(root: Node | None) -> int: return (max(height(root.left), height(root.right)) + 1) if root else 0 -def level_order(root: Node | None) -> Sequence[Node | None]: +def level_order(root: Node | None) -> Generator[int, None, None]: """ Returns a list of nodes value from a whole binary tree in Level Order Traverse. Level Order traverse: Visit nodes of the tree level-by-level. """ - output: list[Any] = [] if root is None: - return output + return process_queue = deque([root]) while process_queue: node = process_queue.popleft() - output.append(node.data) + yield node.data if node.left: process_queue.append(node.left) if node.right: process_queue.append(node.right) - return output def get_nodes_from_left_to_right( root: Node | None, level: int -) -> Sequence[Node | None]: +) -> Generator[int, None, None]: """ Returns a list of nodes value from a particular level: Left to right direction of the binary tree. """ - output: list[Any] = [] - def populate_output(root: Node | None, level: int) -> None: + def populate_output(root: Node | None, level: int) -> Generator[int, None, None]: if not root: return if level == 1: - output.append(root.data) + yield root.data elif level > 1: - populate_output(root.left, level - 1) - populate_output(root.right, level - 1) + yield from populate_output(root.left, level - 1) + yield from populate_output(root.right, level - 1) - populate_output(root, level) - return output + yield from populate_output(root, level) def get_nodes_from_right_to_left( root: Node | None, level: int -) -> Sequence[Node | None]: +) -> Generator[int, None, None]: """ Returns a list of nodes value from a particular level: Right to left direction of the binary tree. """ - output: list[Any] = [] - def populate_output(root: Node | None, level: int) -> None: + def populate_output(root: Node | None, level: int) -> Generator[int, None, None]: if root is None: return if level == 1: - output.append(root.data) + yield root.data elif level > 1: - populate_output(root.right, level - 1) - populate_output(root.left, level - 1) + yield from populate_output(root.right, level - 1) + yield from populate_output(root.left, level - 1) - populate_output(root, level) - return output + yield from populate_output(root, level) -def zigzag(root: Node | None) -> Sequence[Node | None] | list[Any]: +def zigzag(root: Node | None) -> Generator[int, None, None]: """ ZigZag traverse: Returns a list of nodes value from left to right and right to left, alternatively. """ if root is None: - return [] - - output: list[Sequence[Node | None]] = [] + return flag = 0 height_tree = height(root) for h in range(1, height_tree + 1): if not flag: - output.append(get_nodes_from_left_to_right(root, h)) + yield from get_nodes_from_left_to_right(root, h) flag = 1 else: - output.append(get_nodes_from_right_to_left(root, h)) + yield from get_nodes_from_right_to_left(root, h) flag = 0 - return output - def main() -> None: # Main function for testing. # Create binary tree. @@ -198,15 +187,15 @@ def main() -> None: # Main function for testing. print(f"Height of Tree: {height(root)}", "\n") print("Complete Level Order Traversal: ") - print(level_order(root), "\n") + print(f"{list(level_order(root))} \n") print("Level-wise order Traversal: ") for level in range(1, height(root) + 1): - print(f"Level {level}:", get_nodes_from_left_to_right(root, level=level)) + print(f"Level {level}:", list(get_nodes_from_left_to_right(root, level=level))) print("\nZigZag order Traversal: ") - print(zigzag(root)) + print(f"{list(zigzag(root))}") if __name__ == "__main__": From 8d94f7745f81c8f7c33bdd3d0c0740861b9c98e7 Mon Sep 17 00:00:00 2001 From: Kamil <32775019+quant12345@users.noreply.github.com> Date: Sun, 1 Oct 2023 23:14:58 +0500 Subject: [PATCH 0991/1543] Euler072 - application of vector operations to reduce calculation time and refactoring numpy (#9229) * Replacing the generator with numpy vector operations from lu_decomposition. * Revert "Replacing the generator with numpy vector operations from lu_decomposition." This reverts commit ad217c66165898d62b76cc89ba09c2d7049b6448. * Application of vector operations to reduce calculation time and refactoring numpy. * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- project_euler/problem_072/sol1.py | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/project_euler/problem_072/sol1.py b/project_euler/problem_072/sol1.py index a2a0eeeb31c5..5a28be564556 100644 --- a/project_euler/problem_072/sol1.py +++ b/project_euler/problem_072/sol1.py @@ -21,6 +21,8 @@ Time: 1 sec """ +import numpy as np + def solution(limit: int = 1_000_000) -> int: """ @@ -33,14 +35,15 @@ def solution(limit: int = 1_000_000) -> int: 304191 """ - phi = [i - 1 for i in range(limit + 1)] + # generating an array from -1 to limit + phi = np.arange(-1, limit) for i in range(2, limit + 1): if phi[i] == i - 1: - for j in range(2 * i, limit + 1, i): - phi[j] -= phi[j] // i + ind = np.arange(2 * i, limit + 1, i) # indexes for selection + phi[ind] -= phi[ind] // i - return sum(phi[2 : limit + 1]) + return np.sum(phi[2 : limit + 1]) if __name__ == "__main__": From 24e7edbe5bc771023335544a7a9cf7895140c1fe Mon Sep 17 00:00:00 2001 From: Dhruv Manilawala Date: Mon, 2 Oct 2023 02:48:16 +0530 Subject: [PATCH 0992/1543] Remove myself from CODEOWNERS (#9325) --- .github/CODEOWNERS | 2 +- DIRECTORY.md | 3 +++ 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index abf99ab227be..05cd709a8f62 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -69,7 +69,7 @@ # /other/ @cclauss # TODO: Uncomment this line after Hacktoberfest -/project_euler/ @dhruvmanila +# /project_euler/ # /quantum/ diff --git a/DIRECTORY.md b/DIRECTORY.md index 4ae1c69f7099..7d3ceee144be 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -51,6 +51,7 @@ * [Index Of Rightmost Set Bit](bit_manipulation/index_of_rightmost_set_bit.py) * [Is Even](bit_manipulation/is_even.py) * [Is Power Of Two](bit_manipulation/is_power_of_two.py) + * [Missing Number](bit_manipulation/missing_number.py) * [Numbers Different Signs](bit_manipulation/numbers_different_signs.py) * [Reverse Bits](bit_manipulation/reverse_bits.py) * [Single Bit Manipulation Operations](bit_manipulation/single_bit_manipulation_operations.py) @@ -232,6 +233,7 @@ * [Merge Two Lists](data_structures/linked_list/merge_two_lists.py) * [Middle Element Of Linked List](data_structures/linked_list/middle_element_of_linked_list.py) * [Print Reverse](data_structures/linked_list/print_reverse.py) + * [Rotate To The Right](data_structures/linked_list/rotate_to_the_right.py) * [Singly Linked List](data_structures/linked_list/singly_linked_list.py) * [Skip List](data_structures/linked_list/skip_list.py) * [Swap Nodes](data_structures/linked_list/swap_nodes.py) @@ -676,6 +678,7 @@ * [Sylvester Sequence](maths/sylvester_sequence.py) * [Tanh](maths/tanh.py) * [Test Prime Check](maths/test_prime_check.py) + * [Three Sum](maths/three_sum.py) * [Trapezoidal Rule](maths/trapezoidal_rule.py) * [Triplet Sum](maths/triplet_sum.py) * [Twin Prime](maths/twin_prime.py) From e798e5acdee69416d61c8ab65cea4da8a5c16355 Mon Sep 17 00:00:00 2001 From: Bama Charan Chhandogi Date: Mon, 2 Oct 2023 05:49:39 +0530 Subject: [PATCH 0993/1543] add reverse k group linkedlist (#9323) * add reverse k group linkedlist * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * update * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * update * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * update * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * update * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * update * Update reverse_k_group.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update reverse_k_group.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update reverse_k_group.py --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- .../linked_list/reverse_k_group.py | 118 ++++++++++++++++++ 1 file changed, 118 insertions(+) create mode 100644 data_structures/linked_list/reverse_k_group.py diff --git a/data_structures/linked_list/reverse_k_group.py b/data_structures/linked_list/reverse_k_group.py new file mode 100644 index 000000000000..5fc45491a540 --- /dev/null +++ b/data_structures/linked_list/reverse_k_group.py @@ -0,0 +1,118 @@ +from __future__ import annotations + +from collections.abc import Iterable, Iterator +from dataclasses import dataclass + + +@dataclass +class Node: + data: int + next_node: Node | None = None + + +class LinkedList: + def __init__(self, ints: Iterable[int]) -> None: + self.head: Node | None = None + for i in ints: + self.append(i) + + def __iter__(self) -> Iterator[int]: + """ + >>> ints = [] + >>> list(LinkedList(ints)) == ints + True + >>> ints = tuple(range(5)) + >>> tuple(LinkedList(ints)) == ints + True + """ + node = self.head + while node: + yield node.data + node = node.next_node + + def __len__(self) -> int: + """ + >>> for i in range(3): + ... len(LinkedList(range(i))) == i + True + True + True + >>> len(LinkedList("abcdefgh")) + 8 + """ + return sum(1 for _ in self) + + def __str__(self) -> str: + """ + >>> str(LinkedList([])) + '' + >>> str(LinkedList(range(5))) + '0 -> 1 -> 2 -> 3 -> 4' + """ + return " -> ".join([str(node) for node in self]) + + def append(self, data: int) -> None: + """ + >>> ll = LinkedList([1, 2]) + >>> tuple(ll) + (1, 2) + >>> ll.append(3) + >>> tuple(ll) + (1, 2, 3) + >>> ll.append(4) + >>> tuple(ll) + (1, 2, 3, 4) + >>> len(ll) + 4 + """ + if not self.head: + self.head = Node(data) + return + node = self.head + while node.next_node: + node = node.next_node + node.next_node = Node(data) + + def reverse_k_nodes(self, group_size: int) -> None: + """ + reverse nodes within groups of size k + >>> ll = LinkedList([1, 2, 3, 4, 5]) + >>> ll.reverse_k_nodes(2) + >>> tuple(ll) + (2, 1, 4, 3, 5) + >>> str(ll) + '2 -> 1 -> 4 -> 3 -> 5' + """ + if self.head is None or self.head.next_node is None: + return + + length = len(self) + dummy_head = Node(0) + dummy_head.next_node = self.head + previous_node = dummy_head + + while length >= group_size: + current_node = previous_node.next_node + assert current_node + next_node = current_node.next_node + for _ in range(1, group_size): + assert next_node, current_node + current_node.next_node = next_node.next_node + assert previous_node + next_node.next_node = previous_node.next_node + previous_node.next_node = next_node + next_node = current_node.next_node + previous_node = current_node + length -= group_size + self.head = dummy_head.next_node + + +if __name__ == "__main__": + import doctest + + doctest.testmod() + ll = LinkedList([1, 2, 3, 4, 5]) + print(f"Original Linked List: {ll}") + k = 2 + ll.reverse_k_nodes(k) + print(f"After reversing groups of size {k}: {ll}") From 9640a4041a7b331e506daab1b31dd30fb47b228d Mon Sep 17 00:00:00 2001 From: Saksham Chawla <51916697+saksham-chawla@users.noreply.github.com> Date: Mon, 2 Oct 2023 19:58:36 +0530 Subject: [PATCH 0994/1543] Add typing to binary_exponentiation_2.py (#9475) --- maths/binary_exponentiation_2.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/maths/binary_exponentiation_2.py b/maths/binary_exponentiation_2.py index 51ec4baf2598..af8f776dd266 100644 --- a/maths/binary_exponentiation_2.py +++ b/maths/binary_exponentiation_2.py @@ -11,7 +11,7 @@ """ -def b_expo(a, b): +def b_expo(a: int, b: int) -> int: res = 0 while b > 0: if b & 1: @@ -23,7 +23,7 @@ def b_expo(a, b): return res -def b_expo_mod(a, b, c): +def b_expo_mod(a: int, b: int, c: int) -> int: res = 0 while b > 0: if b & 1: From 89a65a861724d2eb8c6a60a9e1655d7af9cdc836 Mon Sep 17 00:00:00 2001 From: Saksham Chawla <51916697+saksham-chawla@users.noreply.github.com> Date: Mon, 2 Oct 2023 19:59:06 +0530 Subject: [PATCH 0995/1543] Add typing to binary_exponentiation.py (#9471) * Add typing to binary_exponentiation.py * Update binary_exponentiation.py * float to int division change as per review --- maths/binary_exponentiation.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/maths/binary_exponentiation.py b/maths/binary_exponentiation.py index 147b4285ffa1..05de939d1bde 100644 --- a/maths/binary_exponentiation.py +++ b/maths/binary_exponentiation.py @@ -4,7 +4,7 @@ # Time Complexity : O(logn) -def binary_exponentiation(a, n): +def binary_exponentiation(a: int, n: int) -> int: if n == 0: return 1 @@ -12,7 +12,7 @@ def binary_exponentiation(a, n): return binary_exponentiation(a, n - 1) * a else: - b = binary_exponentiation(a, n / 2) + b = binary_exponentiation(a, n // 2) return b * b From 97154cfa351e35ddf0727691a92998cfd7be4e5b Mon Sep 17 00:00:00 2001 From: Saksham Chawla <51916697+saksham-chawla@users.noreply.github.com> Date: Mon, 2 Oct 2023 20:00:34 +0530 Subject: [PATCH 0996/1543] Add typing to binary_exp_mod.py (#9469) * Add typing to binary_exp_mod.py * Update binary_exp_mod.py * review changes --- maths/binary_exp_mod.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/maths/binary_exp_mod.py b/maths/binary_exp_mod.py index df688892d690..8893182a3496 100644 --- a/maths/binary_exp_mod.py +++ b/maths/binary_exp_mod.py @@ -1,4 +1,4 @@ -def bin_exp_mod(a, n, b): +def bin_exp_mod(a: int, n: int, b: int) -> int: """ >>> bin_exp_mod(3, 4, 5) 1 @@ -13,7 +13,7 @@ def bin_exp_mod(a, n, b): if n % 2 == 1: return (bin_exp_mod(a, n - 1, b) * a) % b - r = bin_exp_mod(a, n / 2, b) + r = bin_exp_mod(a, n // 2, b) return (r * r) % b From 73118b9f67f49fae14eb9a39e47ec9127ef1f155 Mon Sep 17 00:00:00 2001 From: Saksham Chawla <51916697+saksham-chawla@users.noreply.github.com> Date: Mon, 2 Oct 2023 20:11:34 +0530 Subject: [PATCH 0997/1543] Add typing to binary_exponentiation_3.py (#9477) --- maths/binary_exponentiation_3.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/maths/binary_exponentiation_3.py b/maths/binary_exponentiation_3.py index dd4e70e74129..9cd143e09207 100644 --- a/maths/binary_exponentiation_3.py +++ b/maths/binary_exponentiation_3.py @@ -11,7 +11,7 @@ """ -def b_expo(a, b): +def b_expo(a: int, b: int) -> int: res = 1 while b > 0: if b & 1: @@ -23,7 +23,7 @@ def b_expo(a, b): return res -def b_expo_mod(a, b, c): +def b_expo_mod(a: int, b: int, c: int) -> int: res = 1 while b > 0: if b & 1: From 95345f6f5b0e6ae10f54a33850298634e05766ee Mon Sep 17 00:00:00 2001 From: Saksham Chawla <51916697+saksham-chawla@users.noreply.github.com> Date: Mon, 2 Oct 2023 20:51:45 +0530 Subject: [PATCH 0998/1543] Add typng to binomial_coefficient.py (#9480) --- maths/binomial_coefficient.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/maths/binomial_coefficient.py b/maths/binomial_coefficient.py index 0d4b3d1a8d9a..6d5b46cb5861 100644 --- a/maths/binomial_coefficient.py +++ b/maths/binomial_coefficient.py @@ -1,4 +1,4 @@ -def binomial_coefficient(n, r): +def binomial_coefficient(n: int, r: int) -> int: """ Find binomial coefficient using pascals triangle. From 8c7bd1c48d1e4029aa115d50fb3034e199bef7f9 Mon Sep 17 00:00:00 2001 From: Varshaa Shetty Date: Tue, 3 Oct 2023 03:17:10 +0530 Subject: [PATCH 0999/1543] Deleted minmax.py (#9482) --- backtracking/minmax.py | 69 ------------------------------------------ 1 file changed, 69 deletions(-) delete mode 100644 backtracking/minmax.py diff --git a/backtracking/minmax.py b/backtracking/minmax.py deleted file mode 100644 index 9b87183cfdb7..000000000000 --- a/backtracking/minmax.py +++ /dev/null @@ -1,69 +0,0 @@ -""" -Minimax helps to achieve maximum score in a game by checking all possible moves. - -""" -from __future__ import annotations - -import math - - -def minimax( - depth: int, node_index: int, is_max: bool, scores: list[int], height: float -) -> int: - """ - depth is current depth in game tree. - node_index is index of current node in scores[]. - scores[] contains the leaves of game tree. - height is maximum height of game tree. - - >>> scores = [90, 23, 6, 33, 21, 65, 123, 34423] - >>> height = math.log(len(scores), 2) - >>> minimax(0, 0, True, scores, height) - 65 - >>> minimax(-1, 0, True, scores, height) - Traceback (most recent call last): - ... - ValueError: Depth cannot be less than 0 - >>> minimax(0, 0, True, [], 2) - Traceback (most recent call last): - ... - ValueError: Scores cannot be empty - >>> scores = [3, 5, 2, 9, 12, 5, 23, 23] - >>> height = math.log(len(scores), 2) - >>> minimax(0, 0, True, scores, height) - 12 - """ - - if depth < 0: - raise ValueError("Depth cannot be less than 0") - - if not scores: - raise ValueError("Scores cannot be empty") - - if depth == height: - return scores[node_index] - - return ( - max( - minimax(depth + 1, node_index * 2, False, scores, height), - minimax(depth + 1, node_index * 2 + 1, False, scores, height), - ) - if is_max - else min( - minimax(depth + 1, node_index * 2, True, scores, height), - minimax(depth + 1, node_index * 2 + 1, True, scores, height), - ) - ) - - -def main() -> None: - scores = [90, 23, 6, 33, 21, 65, 123, 34423] - height = math.log(len(scores), 2) - print(f"Optimal value : {minimax(0, 0, True, scores, height)}") - - -if __name__ == "__main__": - import doctest - - doctest.testmod() - main() From f8fe8fe41f74c8ecc5c8555ca43d65bd12b4f073 Mon Sep 17 00:00:00 2001 From: aryan1165 <111041731+aryan1165@users.noreply.github.com> Date: Tue, 3 Oct 2023 03:27:00 +0530 Subject: [PATCH 1000/1543] Removed maths/miller_rabin.py , Double implementation. #8098 (#9228) * Removed ciphers/rabin_miller.py as it is already there maths/miller_rabin.py * Renamed miller_rabin.py to rabain_miller.py * Restore ciphers/rabin_miller.py and removed maths/rabin_miller.py --- maths/miller_rabin.py | 51 ------------------------------------------- 1 file changed, 51 deletions(-) delete mode 100644 maths/miller_rabin.py diff --git a/maths/miller_rabin.py b/maths/miller_rabin.py deleted file mode 100644 index 9f2668dbab14..000000000000 --- a/maths/miller_rabin.py +++ /dev/null @@ -1,51 +0,0 @@ -import random - -from .binary_exp_mod import bin_exp_mod - - -# This is a probabilistic check to test primality, useful for big numbers! -# if it's a prime, it will return true -# if it's not a prime, the chance of it returning true is at most 1/4**prec -def is_prime_big(n, prec=1000): - """ - >>> from maths.prime_check import is_prime - >>> # all(is_prime_big(i) == is_prime(i) for i in range(1000)) # 3.45s - >>> all(is_prime_big(i) == is_prime(i) for i in range(256)) - True - """ - if n < 2: - return False - - if n % 2 == 0: - return n == 2 - - # this means n is odd - d = n - 1 - exp = 0 - while d % 2 == 0: - d /= 2 - exp += 1 - - # n - 1=d*(2**exp) - count = 0 - while count < prec: - a = random.randint(2, n - 1) - b = bin_exp_mod(a, d, n) - if b != 1: - flag = True - for _ in range(exp): - if b == n - 1: - flag = False - break - b = b * b - b %= n - if flag: - return False - count += 1 - return True - - -if __name__ == "__main__": - n = abs(int(input("Enter bound : ").strip())) - print("Here's the list of primes:") - print(", ".join(str(i) for i in range(n + 1) if is_prime_big(i))) From f964dcbf2ff7c70e4aca20532a38dfb02ce8a4c0 Mon Sep 17 00:00:00 2001 From: Christian Clauss Date: Tue, 3 Oct 2023 05:05:43 +0200 Subject: [PATCH 1001/1543] pre-commit autoupdate && pre-commit run --all-files (#9516) * pre-commit autoupdate && pre-commit run --all-files * updating DIRECTORY.md --------- Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- .pre-commit-config.yaml | 4 ++-- DIRECTORY.md | 1 + 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 809b841d0ea3..dbf7ff341243 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -16,7 +16,7 @@ repos: - id: auto-walrus - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.0.291 + rev: v0.0.292 hooks: - id: ruff @@ -33,7 +33,7 @@ repos: - tomli - repo: https://github.com/tox-dev/pyproject-fmt - rev: "1.1.0" + rev: "1.2.0" hooks: - id: pyproject-fmt diff --git a/DIRECTORY.md b/DIRECTORY.md index 7d3ceee144be..24c68171c9bc 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -233,6 +233,7 @@ * [Merge Two Lists](data_structures/linked_list/merge_two_lists.py) * [Middle Element Of Linked List](data_structures/linked_list/middle_element_of_linked_list.py) * [Print Reverse](data_structures/linked_list/print_reverse.py) + * [Reverse K Group](data_structures/linked_list/reverse_k_group.py) * [Rotate To The Right](data_structures/linked_list/rotate_to_the_right.py) * [Singly Linked List](data_structures/linked_list/singly_linked_list.py) * [Skip List](data_structures/linked_list/skip_list.py) From 0f4e51245f33175b4fb311f633d3821210741bdd Mon Sep 17 00:00:00 2001 From: Christian Clauss Date: Tue, 3 Oct 2023 11:17:10 +0200 Subject: [PATCH 1002/1543] Upgrade to Python 3.12 (#9576) * DRAFT: GitHub Actions: Test on Python 3.12 Repeats #8777 * #8777 Some of our dependencies will not be ready yet. * Python 3.12: Disable qiskit and tensorflow algorithms * updating DIRECTORY.md --------- Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- .github/workflows/build.yml | 5 +++-- .github/workflows/ruff.yml | 2 +- CONTRIBUTING.md | 2 +- DIRECTORY.md | 19 ------------------- backtracking/combination_sum.py | 2 +- ....py => cnn_classification.py.DISABLED.txt} | 0 ...ans_clustering_tensorflow.py.DISABLED.txt} | 0 ...ns.py => fuzzy_operations.py.DISABLED.txt} | 0 ...ion.py => lstm_prediction.py.DISABLED.txt} | 0 maths/maclaurin_series.py | 8 ++++---- quantum/{bb84.py => bb84.py.DISABLED.txt} | 0 ...jozsa.py => deutsch_jozsa.py.DISABLED.txt} | 1 + ...lf_adder.py => half_adder.py.DISABLED.txt} | 1 + .../{not_gate.py => not_gate.py.DISABLED.txt} | 0 ..._adder.py => q_full_adder.py.DISABLED.txt} | 0 ...y => quantum_entanglement.py.DISABLED.txt} | 0 ... => quantum_teleportation.py.DISABLED.txt} | 0 ...y => ripple_adder_classic.py.DISABLED.txt} | 0 ...y => single_qubit_measure.py.DISABLED.txt} | 0 ...g.py => superdense_coding.py.DISABLED.txt} | 0 requirements.txt | 6 +++--- 21 files changed, 15 insertions(+), 31 deletions(-) rename computer_vision/{cnn_classification.py => cnn_classification.py.DISABLED.txt} (100%) rename dynamic_programming/{k_means_clustering_tensorflow.py => k_means_clustering_tensorflow.py.DISABLED.txt} (100%) rename fuzzy_logic/{fuzzy_operations.py => fuzzy_operations.py.DISABLED.txt} (100%) rename machine_learning/lstm/{lstm_prediction.py => lstm_prediction.py.DISABLED.txt} (100%) rename quantum/{bb84.py => bb84.py.DISABLED.txt} (100%) rename quantum/{deutsch_jozsa.py => deutsch_jozsa.py.DISABLED.txt} (99%) mode change 100755 => 100644 rename quantum/{half_adder.py => half_adder.py.DISABLED.txt} (99%) mode change 100755 => 100644 rename quantum/{not_gate.py => not_gate.py.DISABLED.txt} (100%) rename quantum/{q_full_adder.py => q_full_adder.py.DISABLED.txt} (100%) rename quantum/{quantum_entanglement.py => quantum_entanglement.py.DISABLED.txt} (100%) rename quantum/{quantum_teleportation.py => quantum_teleportation.py.DISABLED.txt} (100%) rename quantum/{ripple_adder_classic.py => ripple_adder_classic.py.DISABLED.txt} (100%) rename quantum/{single_qubit_measure.py => single_qubit_measure.py.DISABLED.txt} (100%) rename quantum/{superdense_coding.py => superdense_coding.py.DISABLED.txt} (100%) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index fc8cb636979e..60c1d6d119d0 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -9,10 +9,11 @@ jobs: build: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - uses: actions/setup-python@v4 with: - python-version: 3.11 + python-version: 3.12 + allow-prereleases: true - uses: actions/cache@v3 with: path: ~/.cache/pip diff --git a/.github/workflows/ruff.yml b/.github/workflows/ruff.yml index e71ac8a4e933..496f1460e074 100644 --- a/.github/workflows/ruff.yml +++ b/.github/workflows/ruff.yml @@ -11,6 +11,6 @@ jobs: ruff: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - run: pip install --user ruff - run: ruff --output-format=github . diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 4a1bb652738f..7a67ce33cd62 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -73,7 +73,7 @@ pre-commit run --all-files --show-diff-on-failure We want your work to be readable by others; therefore, we encourage you to note the following: -- Please write in Python 3.11+. For instance: `print()` is a function in Python 3 so `print "Hello"` will *not* work but `print("Hello")` will. +- Please write in Python 3.12+. For instance: `print()` is a function in Python 3 so `print "Hello"` will *not* work but `print("Hello")` will. - Please focus hard on the naming of functions, classes, and variables. Help your reader by using __descriptive names__ that can help you to remove redundant comments. - Single letter variable names are *old school* so please avoid them unless their life only spans a few lines. - Expand acronyms because `gcd()` is hard to understand but `greatest_common_divisor()` is not. diff --git a/DIRECTORY.md b/DIRECTORY.md index 24c68171c9bc..9a913aa786e1 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -26,7 +26,6 @@ * [Hamiltonian Cycle](backtracking/hamiltonian_cycle.py) * [Knight Tour](backtracking/knight_tour.py) * [Minimax](backtracking/minimax.py) - * [Minmax](backtracking/minmax.py) * [N Queens](backtracking/n_queens.py) * [N Queens Math](backtracking/n_queens_math.py) * [Power Sum](backtracking/power_sum.py) @@ -133,7 +132,6 @@ * [Run Length Encoding](compression/run_length_encoding.py) ## Computer Vision - * [Cnn Classification](computer_vision/cnn_classification.py) * [Flip Augmentation](computer_vision/flip_augmentation.py) * [Haralick Descriptors](computer_vision/haralick_descriptors.py) * [Harris Corner](computer_vision/harris_corner.py) @@ -321,7 +319,6 @@ * [Floyd Warshall](dynamic_programming/floyd_warshall.py) * [Integer Partition](dynamic_programming/integer_partition.py) * [Iterating Through Submasks](dynamic_programming/iterating_through_submasks.py) - * [K Means Clustering Tensorflow](dynamic_programming/k_means_clustering_tensorflow.py) * [Knapsack](dynamic_programming/knapsack.py) * [Longest Common Subsequence](dynamic_programming/longest_common_subsequence.py) * [Longest Common Substring](dynamic_programming/longest_common_substring.py) @@ -384,9 +381,6 @@ * [Mandelbrot](fractals/mandelbrot.py) * [Sierpinski Triangle](fractals/sierpinski_triangle.py) -## Fuzzy Logic - * [Fuzzy Operations](fuzzy_logic/fuzzy_operations.py) - ## Genetic Algorithm * [Basic String](genetic_algorithm/basic_string.py) @@ -517,8 +511,6 @@ * Local Weighted Learning * [Local Weighted Learning](machine_learning/local_weighted_learning/local_weighted_learning.py) * [Logistic Regression](machine_learning/logistic_regression.py) - * Lstm - * [Lstm Prediction](machine_learning/lstm/lstm_prediction.py) * [Mfcc](machine_learning/mfcc.py) * [Multilayer Perceptron Classifier](machine_learning/multilayer_perceptron_classifier.py) * [Polynomial Regression](machine_learning/polynomial_regression.py) @@ -613,7 +605,6 @@ * [Matrix Exponentiation](maths/matrix_exponentiation.py) * [Max Sum Sliding Window](maths/max_sum_sliding_window.py) * [Median Of Two Arrays](maths/median_of_two_arrays.py) - * [Miller Rabin](maths/miller_rabin.py) * [Mobius Function](maths/mobius_function.py) * [Modular Exponential](maths/modular_exponential.py) * [Monte Carlo](maths/monte_carlo.py) @@ -1071,17 +1062,7 @@ * [Sol1](project_euler/problem_800/sol1.py) ## Quantum - * [Bb84](quantum/bb84.py) - * [Deutsch Jozsa](quantum/deutsch_jozsa.py) - * [Half Adder](quantum/half_adder.py) - * [Not Gate](quantum/not_gate.py) * [Q Fourier Transform](quantum/q_fourier_transform.py) - * [Q Full Adder](quantum/q_full_adder.py) - * [Quantum Entanglement](quantum/quantum_entanglement.py) - * [Quantum Teleportation](quantum/quantum_teleportation.py) - * [Ripple Adder Classic](quantum/ripple_adder_classic.py) - * [Single Qubit Measure](quantum/single_qubit_measure.py) - * [Superdense Coding](quantum/superdense_coding.py) ## Scheduling * [First Come First Served](scheduling/first_come_first_served.py) diff --git a/backtracking/combination_sum.py b/backtracking/combination_sum.py index f555adb751d0..3c6ed81f44f0 100644 --- a/backtracking/combination_sum.py +++ b/backtracking/combination_sum.py @@ -47,7 +47,7 @@ def combination_sum(candidates: list, target: int) -> list: >>> combination_sum([-8, 2.3, 0], 1) Traceback (most recent call last): ... - RecursionError: maximum recursion depth exceeded in comparison + RecursionError: maximum recursion depth exceeded """ path = [] # type: list[int] answer = [] # type: list[int] diff --git a/computer_vision/cnn_classification.py b/computer_vision/cnn_classification.py.DISABLED.txt similarity index 100% rename from computer_vision/cnn_classification.py rename to computer_vision/cnn_classification.py.DISABLED.txt diff --git a/dynamic_programming/k_means_clustering_tensorflow.py b/dynamic_programming/k_means_clustering_tensorflow.py.DISABLED.txt similarity index 100% rename from dynamic_programming/k_means_clustering_tensorflow.py rename to dynamic_programming/k_means_clustering_tensorflow.py.DISABLED.txt diff --git a/fuzzy_logic/fuzzy_operations.py b/fuzzy_logic/fuzzy_operations.py.DISABLED.txt similarity index 100% rename from fuzzy_logic/fuzzy_operations.py rename to fuzzy_logic/fuzzy_operations.py.DISABLED.txt diff --git a/machine_learning/lstm/lstm_prediction.py b/machine_learning/lstm/lstm_prediction.py.DISABLED.txt similarity index 100% rename from machine_learning/lstm/lstm_prediction.py rename to machine_learning/lstm/lstm_prediction.py.DISABLED.txt diff --git a/maths/maclaurin_series.py b/maths/maclaurin_series.py index e55839bc15ba..806e5f9b0788 100644 --- a/maths/maclaurin_series.py +++ b/maths/maclaurin_series.py @@ -17,9 +17,9 @@ def maclaurin_sin(theta: float, accuracy: int = 30) -> float: >>> all(isclose(maclaurin_sin(x, 50), sin(x)) for x in range(-25, 25)) True >>> maclaurin_sin(10) - -0.544021110889369 + -0.5440211108893691 >>> maclaurin_sin(-10) - 0.5440211108893703 + 0.5440211108893704 >>> maclaurin_sin(10, 15) -0.5440211108893689 >>> maclaurin_sin(-10, 15) @@ -69,9 +69,9 @@ def maclaurin_cos(theta: float, accuracy: int = 30) -> float: >>> all(isclose(maclaurin_cos(x, 50), cos(x)) for x in range(-25, 25)) True >>> maclaurin_cos(5) - 0.28366218546322675 + 0.2836621854632268 >>> maclaurin_cos(-5) - 0.2836621854632266 + 0.2836621854632265 >>> maclaurin_cos(10, 15) -0.8390715290764525 >>> maclaurin_cos(-10, 15) diff --git a/quantum/bb84.py b/quantum/bb84.py.DISABLED.txt similarity index 100% rename from quantum/bb84.py rename to quantum/bb84.py.DISABLED.txt diff --git a/quantum/deutsch_jozsa.py b/quantum/deutsch_jozsa.py.DISABLED.txt old mode 100755 new mode 100644 similarity index 99% rename from quantum/deutsch_jozsa.py rename to quantum/deutsch_jozsa.py.DISABLED.txt index 95c3e65b5edf..5c8a379debfc --- a/quantum/deutsch_jozsa.py +++ b/quantum/deutsch_jozsa.py.DISABLED.txt @@ -1,3 +1,4 @@ +# DISABLED!! #!/usr/bin/env python3 """ Deutsch-Jozsa Algorithm is one of the first examples of a quantum diff --git a/quantum/half_adder.py b/quantum/half_adder.py.DISABLED.txt old mode 100755 new mode 100644 similarity index 99% rename from quantum/half_adder.py rename to quantum/half_adder.py.DISABLED.txt index 21a57ddcf2dd..800d563ec76f --- a/quantum/half_adder.py +++ b/quantum/half_adder.py.DISABLED.txt @@ -1,3 +1,4 @@ +# DISABLED!! #!/usr/bin/env python3 """ Build a half-adder quantum circuit that takes two bits as input, diff --git a/quantum/not_gate.py b/quantum/not_gate.py.DISABLED.txt similarity index 100% rename from quantum/not_gate.py rename to quantum/not_gate.py.DISABLED.txt diff --git a/quantum/q_full_adder.py b/quantum/q_full_adder.py.DISABLED.txt similarity index 100% rename from quantum/q_full_adder.py rename to quantum/q_full_adder.py.DISABLED.txt diff --git a/quantum/quantum_entanglement.py b/quantum/quantum_entanglement.py.DISABLED.txt similarity index 100% rename from quantum/quantum_entanglement.py rename to quantum/quantum_entanglement.py.DISABLED.txt diff --git a/quantum/quantum_teleportation.py b/quantum/quantum_teleportation.py.DISABLED.txt similarity index 100% rename from quantum/quantum_teleportation.py rename to quantum/quantum_teleportation.py.DISABLED.txt diff --git a/quantum/ripple_adder_classic.py b/quantum/ripple_adder_classic.py.DISABLED.txt similarity index 100% rename from quantum/ripple_adder_classic.py rename to quantum/ripple_adder_classic.py.DISABLED.txt diff --git a/quantum/single_qubit_measure.py b/quantum/single_qubit_measure.py.DISABLED.txt similarity index 100% rename from quantum/single_qubit_measure.py rename to quantum/single_qubit_measure.py.DISABLED.txt diff --git a/quantum/superdense_coding.py b/quantum/superdense_coding.py.DISABLED.txt similarity index 100% rename from quantum/superdense_coding.py rename to quantum/superdense_coding.py.DISABLED.txt diff --git a/requirements.txt b/requirements.txt index 1128e9d66820..25dba6f5a250 100644 --- a/requirements.txt +++ b/requirements.txt @@ -9,15 +9,15 @@ opencv-python pandas pillow projectq -qiskit -qiskit-aer +qiskit ; python_version < '3.12' +qiskit-aer ; python_version < '3.12' requests rich scikit-fuzzy scikit-learn statsmodels sympy -tensorflow +tensorflow ; python_version < '3.12' texttable tweepy xgboost From da03c14d39ec8c6a3c253951541b902172bb92fc Mon Sep 17 00:00:00 2001 From: Christian Clauss Date: Tue, 3 Oct 2023 11:48:58 +0200 Subject: [PATCH 1003/1543] Fix accuracy in maclaurin_series on Python 3.12 (#9581) --- maths/maclaurin_series.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/maths/maclaurin_series.py b/maths/maclaurin_series.py index 806e5f9b0788..d5c3c3ab958b 100644 --- a/maths/maclaurin_series.py +++ b/maths/maclaurin_series.py @@ -21,9 +21,9 @@ def maclaurin_sin(theta: float, accuracy: int = 30) -> float: >>> maclaurin_sin(-10) 0.5440211108893704 >>> maclaurin_sin(10, 15) - -0.5440211108893689 + -0.544021110889369 >>> maclaurin_sin(-10, 15) - 0.5440211108893703 + 0.5440211108893704 >>> maclaurin_sin("10") Traceback (most recent call last): ... @@ -73,7 +73,7 @@ def maclaurin_cos(theta: float, accuracy: int = 30) -> float: >>> maclaurin_cos(-5) 0.2836621854632265 >>> maclaurin_cos(10, 15) - -0.8390715290764525 + -0.8390715290764524 >>> maclaurin_cos(-10, 15) -0.8390715290764521 >>> maclaurin_cos("10") From e60779c202880275e786f0f857f4261b90a41d51 Mon Sep 17 00:00:00 2001 From: Christian Clauss Date: Tue, 3 Oct 2023 12:04:59 +0200 Subject: [PATCH 1004/1543] Upgrade our Devcontainer to Python 3.12 on Debian bookworm (#9580) --- .devcontainer/Dockerfile | 2 +- .devcontainer/README.md | 1 + .devcontainer/devcontainer.json | 4 ++-- 3 files changed, 4 insertions(+), 3 deletions(-) create mode 100644 .devcontainer/README.md diff --git a/.devcontainer/Dockerfile b/.devcontainer/Dockerfile index b5a5347c66b0..6aa0073bf95b 100644 --- a/.devcontainer/Dockerfile +++ b/.devcontainer/Dockerfile @@ -1,5 +1,5 @@ # https://github.com/microsoft/vscode-dev-containers/blob/main/containers/python-3/README.md -ARG VARIANT=3.11-bookworm +ARG VARIANT=3.12-bookworm FROM mcr.microsoft.com/vscode/devcontainers/python:${VARIANT} COPY requirements.txt /tmp/pip-tmp/ RUN python3 -m pip install --upgrade pip \ diff --git a/.devcontainer/README.md b/.devcontainer/README.md new file mode 100644 index 000000000000..ec3cdb61de7a --- /dev/null +++ b/.devcontainer/README.md @@ -0,0 +1 @@ +https://code.visualstudio.com/docs/devcontainers/tutorial diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json index c5a855b2550c..ae1d4fb7494d 100644 --- a/.devcontainer/devcontainer.json +++ b/.devcontainer/devcontainer.json @@ -4,10 +4,10 @@ "dockerfile": "Dockerfile", "context": "..", "args": { - // Update 'VARIANT' to pick a Python version: 3, 3.10, 3.9, 3.8, 3.7, 3.6 + // Update 'VARIANT' to pick a Python version: 3, 3.11, 3.10, 3.9, 3.8 // Append -bullseye or -buster to pin to an OS version. // Use -bullseye variants on local on arm64/Apple Silicon. - "VARIANT": "3.11-bookworm", + "VARIANT": "3.12-bookworm", } }, From b60a94b5b305487ca5f5755ab6de2bf0adeb3d78 Mon Sep 17 00:00:00 2001 From: dekomori_sanae09 Date: Tue, 3 Oct 2023 19:23:27 +0530 Subject: [PATCH 1005/1543] merge double_factorial (#9431) * merge double_factorial * fix ruff error * fix merge issues * change test case * fix import error --- maths/double_factorial.py | 60 +++++++++++++++++++++++++++++ maths/double_factorial_iterative.py | 33 ---------------- maths/double_factorial_recursive.py | 31 --------------- 3 files changed, 60 insertions(+), 64 deletions(-) create mode 100644 maths/double_factorial.py delete mode 100644 maths/double_factorial_iterative.py delete mode 100644 maths/double_factorial_recursive.py diff --git a/maths/double_factorial.py b/maths/double_factorial.py new file mode 100644 index 000000000000..3c3a28304e95 --- /dev/null +++ b/maths/double_factorial.py @@ -0,0 +1,60 @@ +def double_factorial_recursive(n: int) -> int: + """ + Compute double factorial using recursive method. + Recursion can be costly for large numbers. + + To learn about the theory behind this algorithm: + https://en.wikipedia.org/wiki/Double_factorial + + >>> from math import prod + >>> all(double_factorial_recursive(i) == prod(range(i, 0, -2)) for i in range(20)) + True + >>> double_factorial_recursive(0.1) + Traceback (most recent call last): + ... + ValueError: double_factorial_recursive() only accepts integral values + >>> double_factorial_recursive(-1) + Traceback (most recent call last): + ... + ValueError: double_factorial_recursive() not defined for negative values + """ + if not isinstance(n, int): + raise ValueError("double_factorial_recursive() only accepts integral values") + if n < 0: + raise ValueError("double_factorial_recursive() not defined for negative values") + return 1 if n <= 1 else n * double_factorial_recursive(n - 2) + + +def double_factorial_iterative(num: int) -> int: + """ + Compute double factorial using iterative method. + + To learn about the theory behind this algorithm: + https://en.wikipedia.org/wiki/Double_factorial + + >>> from math import prod + >>> all(double_factorial_iterative(i) == prod(range(i, 0, -2)) for i in range(20)) + True + >>> double_factorial_iterative(0.1) + Traceback (most recent call last): + ... + ValueError: double_factorial_iterative() only accepts integral values + >>> double_factorial_iterative(-1) + Traceback (most recent call last): + ... + ValueError: double_factorial_iterative() not defined for negative values + """ + if not isinstance(num, int): + raise ValueError("double_factorial_iterative() only accepts integral values") + if num < 0: + raise ValueError("double_factorial_iterative() not defined for negative values") + value = 1 + for i in range(num, 0, -2): + value *= i + return value + + +if __name__ == "__main__": + import doctest + + doctest.testmod() diff --git a/maths/double_factorial_iterative.py b/maths/double_factorial_iterative.py deleted file mode 100644 index b2b58aa04c28..000000000000 --- a/maths/double_factorial_iterative.py +++ /dev/null @@ -1,33 +0,0 @@ -def double_factorial(num: int) -> int: - """ - Compute double factorial using iterative method. - - To learn about the theory behind this algorithm: - https://en.wikipedia.org/wiki/Double_factorial - - >>> import math - >>> all(double_factorial(i) == math.prod(range(i, 0, -2)) for i in range(20)) - True - >>> double_factorial(0.1) - Traceback (most recent call last): - ... - ValueError: double_factorial() only accepts integral values - >>> double_factorial(-1) - Traceback (most recent call last): - ... - ValueError: double_factorial() not defined for negative values - """ - if not isinstance(num, int): - raise ValueError("double_factorial() only accepts integral values") - if num < 0: - raise ValueError("double_factorial() not defined for negative values") - value = 1 - for i in range(num, 0, -2): - value *= i - return value - - -if __name__ == "__main__": - import doctest - - doctest.testmod() diff --git a/maths/double_factorial_recursive.py b/maths/double_factorial_recursive.py deleted file mode 100644 index 05c9b29680a7..000000000000 --- a/maths/double_factorial_recursive.py +++ /dev/null @@ -1,31 +0,0 @@ -def double_factorial(n: int) -> int: - """ - Compute double factorial using recursive method. - Recursion can be costly for large numbers. - - To learn about the theory behind this algorithm: - https://en.wikipedia.org/wiki/Double_factorial - - >>> import math - >>> all(double_factorial(i) == math.prod(range(i, 0, -2)) for i in range(20)) - True - >>> double_factorial(0.1) - Traceback (most recent call last): - ... - ValueError: double_factorial() only accepts integral values - >>> double_factorial(-1) - Traceback (most recent call last): - ... - ValueError: double_factorial() not defined for negative values - """ - if not isinstance(n, int): - raise ValueError("double_factorial() only accepts integral values") - if n < 0: - raise ValueError("double_factorial() not defined for negative values") - return 1 if n <= 1 else n * double_factorial(n - 2) - - -if __name__ == "__main__": - import doctest - - doctest.testmod() From 0a84b8f842c4c72f400d96313d992b608d621d07 Mon Sep 17 00:00:00 2001 From: Aasheesh <126905285+AasheeshLikePanner@users.noreply.github.com> Date: Tue, 3 Oct 2023 21:10:11 +0530 Subject: [PATCH 1006/1543] Changing Name of file and adding doctests in file. (#9513) * Adding doctests and changing file name * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update binary_multiplication.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update binary_multiplication.py * Changing comment and changing name function * Changing comment and changing name function * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update binary_multiplication.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update binary_multiplication.py * Update binary_multiplication.py --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Tianyi Zheng --- maths/binary_exponentiation_2.py | 50 --------------- maths/binary_multiplication.py | 101 +++++++++++++++++++++++++++++++ 2 files changed, 101 insertions(+), 50 deletions(-) delete mode 100644 maths/binary_exponentiation_2.py create mode 100644 maths/binary_multiplication.py diff --git a/maths/binary_exponentiation_2.py b/maths/binary_exponentiation_2.py deleted file mode 100644 index af8f776dd266..000000000000 --- a/maths/binary_exponentiation_2.py +++ /dev/null @@ -1,50 +0,0 @@ -""" -* Binary Exponentiation with Multiplication -* This is a method to find a*b in a time complexity of O(log b) -* This is one of the most commonly used methods of finding result of multiplication. -* Also useful in cases where solution to (a*b)%c is required, -* where a,b,c can be numbers over the computers calculation limits. -* Done using iteration, can also be done using recursion - -* @author chinmoy159 -* @version 1.0 dated 10/08/2017 -""" - - -def b_expo(a: int, b: int) -> int: - res = 0 - while b > 0: - if b & 1: - res += a - - a += a - b >>= 1 - - return res - - -def b_expo_mod(a: int, b: int, c: int) -> int: - res = 0 - while b > 0: - if b & 1: - res = ((res % c) + (a % c)) % c - - a += a - b >>= 1 - - return res - - -""" -* Wondering how this method works ! -* It's pretty simple. -* Let's say you need to calculate a ^ b -* RULE 1 : a * b = (a+a) * (b/2) ---- example : 4 * 4 = (4+4) * (4/2) = 8 * 2 -* RULE 2 : IF b is ODD, then ---- a * b = a + (a * (b - 1)) :: where (b - 1) is even. -* Once b is even, repeat the process to get a * b -* Repeat the process till b = 1 OR b = 0, because a*1 = a AND a*0 = 0 -* -* As far as the modulo is concerned, -* the fact : (a+b) % c = ((a%c) + (b%c)) % c -* Now apply RULE 1 OR 2, whichever is required. -""" diff --git a/maths/binary_multiplication.py b/maths/binary_multiplication.py new file mode 100644 index 000000000000..0cc5a575f445 --- /dev/null +++ b/maths/binary_multiplication.py @@ -0,0 +1,101 @@ +""" +Binary Multiplication +This is a method to find a*b in a time complexity of O(log b) +This is one of the most commonly used methods of finding result of multiplication. +Also useful in cases where solution to (a*b)%c is required, +where a,b,c can be numbers over the computers calculation limits. +Done using iteration, can also be done using recursion + +Let's say you need to calculate a * b +RULE 1 : a * b = (a+a) * (b/2) ---- example : 4 * 4 = (4+4) * (4/2) = 8 * 2 +RULE 2 : IF b is odd, then ---- a * b = a + (a * (b - 1)), where (b - 1) is even. +Once b is even, repeat the process to get a * b +Repeat the process until b = 1 or b = 0, because a*1 = a and a*0 = 0 + +As far as the modulo is concerned, +the fact : (a+b) % c = ((a%c) + (b%c)) % c +Now apply RULE 1 or 2, whichever is required. + +@author chinmoy159 +""" + + +def binary_multiply(a: int, b: int) -> int: + """ + Multiply 'a' and 'b' using bitwise multiplication. + + Parameters: + a (int): The first number. + b (int): The second number. + + Returns: + int: a * b + + Examples: + >>> binary_multiply(2, 3) + 6 + >>> binary_multiply(5, 0) + 0 + >>> binary_multiply(3, 4) + 12 + >>> binary_multiply(10, 5) + 50 + >>> binary_multiply(0, 5) + 0 + >>> binary_multiply(2, 1) + 2 + >>> binary_multiply(1, 10) + 10 + """ + res = 0 + while b > 0: + if b & 1: + res += a + + a += a + b >>= 1 + + return res + + +def binary_mod_multiply(a: int, b: int, modulus: int) -> int: + """ + Calculate (a * b) % c using binary multiplication and modular arithmetic. + + Parameters: + a (int): The first number. + b (int): The second number. + modulus (int): The modulus. + + Returns: + int: (a * b) % modulus. + + Examples: + >>> binary_mod_multiply(2, 3, 5) + 1 + >>> binary_mod_multiply(5, 0, 7) + 0 + >>> binary_mod_multiply(3, 4, 6) + 0 + >>> binary_mod_multiply(10, 5, 13) + 11 + >>> binary_mod_multiply(2, 1, 5) + 2 + >>> binary_mod_multiply(1, 10, 3) + 1 + """ + res = 0 + while b > 0: + if b & 1: + res = ((res % modulus) + (a % modulus)) % modulus + + a += a + b >>= 1 + + return res + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From 81661bd2d0c34363de7d3e1e802fe2f75b9a1fa4 Mon Sep 17 00:00:00 2001 From: Ayush Yadav <115359450+ayush-yadavv@users.noreply.github.com> Date: Wed, 4 Oct 2023 05:17:26 +0530 Subject: [PATCH 1007/1543] Update newtons_law_of_gravitation.py : Typo(Space Removed) (#9351) --- physics/newtons_law_of_gravitation.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/physics/newtons_law_of_gravitation.py b/physics/newtons_law_of_gravitation.py index 4bbeddd61d5b..ae9da2f1e949 100644 --- a/physics/newtons_law_of_gravitation.py +++ b/physics/newtons_law_of_gravitation.py @@ -3,7 +3,7 @@ provided that the other three parameters are given. Description : Newton's Law of Universal Gravitation explains the presence of force of -attraction between bodies having a definite mass situated at a distance. It is usually +attraction between bodies having a definite mass situated at a distance. It is usually stated as that, every particle attracts every other particle in the universe with a force that is directly proportional to the product of their masses and inversely proportional to the square of the distance between their centers. The publication of the From 12431389e32c290aae8c046ce9d8504d698d5f41 Mon Sep 17 00:00:00 2001 From: "Tan Kai Qun, Jeremy" Date: Wed, 4 Oct 2023 10:47:03 +0900 Subject: [PATCH 1008/1543] Add typing to topological_sort.py (#9650) * Add typing * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: Jeremy Tan Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- sorts/topological_sort.py | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/sorts/topological_sort.py b/sorts/topological_sort.py index 59a0c8571b53..efce8165fcac 100644 --- a/sorts/topological_sort.py +++ b/sorts/topological_sort.py @@ -5,11 +5,17 @@ # b c # / \ # d e -edges = {"a": ["c", "b"], "b": ["d", "e"], "c": [], "d": [], "e": []} -vertices = ["a", "b", "c", "d", "e"] +edges: dict[str, list[str]] = { + "a": ["c", "b"], + "b": ["d", "e"], + "c": [], + "d": [], + "e": [], +} +vertices: list[str] = ["a", "b", "c", "d", "e"] -def topological_sort(start, visited, sort): +def topological_sort(start: str, visited: list[str], sort: list[str]) -> list[str]: """Perform topological sort on a directed acyclic graph.""" current = start # add current to visited From 28f1e68f005f99eb628efd1af899bdfe1c1bc99e Mon Sep 17 00:00:00 2001 From: "Tan Kai Qun, Jeremy" Date: Wed, 4 Oct 2023 11:05:47 +0900 Subject: [PATCH 1009/1543] Add typing (#9651) Co-authored-by: Jeremy Tan --- sorts/stooge_sort.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/sorts/stooge_sort.py b/sorts/stooge_sort.py index 9a5bedeae21b..767c6a05924f 100644 --- a/sorts/stooge_sort.py +++ b/sorts/stooge_sort.py @@ -1,4 +1,4 @@ -def stooge_sort(arr): +def stooge_sort(arr: list[int]) -> list[int]: """ Examples: >>> stooge_sort([18.1, 0, -7.1, -1, 2, 2]) @@ -11,7 +11,7 @@ def stooge_sort(arr): return arr -def stooge(arr, i, h): +def stooge(arr: list[int], i: int, h: int) -> None: if i >= h: return From a7133eca13d312fa729e2872048c7d9a662f6c8c Mon Sep 17 00:00:00 2001 From: "Tan Kai Qun, Jeremy" Date: Wed, 4 Oct 2023 11:06:52 +0900 Subject: [PATCH 1010/1543] Add typing (#9652) Co-authored-by: Jeremy Tan --- sorts/shell_sort.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sorts/shell_sort.py b/sorts/shell_sort.py index 10ae9ba407ec..b65609c974b7 100644 --- a/sorts/shell_sort.py +++ b/sorts/shell_sort.py @@ -3,7 +3,7 @@ """ -def shell_sort(collection): +def shell_sort(collection: list[int]) -> list[int]: """Pure implementation of shell sort algorithm in Python :param collection: Some mutable ordered collection with heterogeneous comparable items inside From 8c23cc5117b338ea907045260274ac40301a4e0e Mon Sep 17 00:00:00 2001 From: "Tan Kai Qun, Jeremy" Date: Wed, 4 Oct 2023 11:07:25 +0900 Subject: [PATCH 1011/1543] Add typing (#9654) Co-authored-by: Jeremy Tan --- sorts/selection_sort.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sorts/selection_sort.py b/sorts/selection_sort.py index f3beb31b7070..28971a5e1aad 100644 --- a/sorts/selection_sort.py +++ b/sorts/selection_sort.py @@ -11,7 +11,7 @@ """ -def selection_sort(collection): +def selection_sort(collection: list[int]) -> list[int]: """Pure implementation of the selection sort algorithm in Python :param collection: some mutable ordered collection with heterogeneous comparable items inside From 700df39ad446da895d413c0383632871459f0e9f Mon Sep 17 00:00:00 2001 From: aryan1165 <111041731+aryan1165@users.noreply.github.com> Date: Wed, 4 Oct 2023 09:04:55 +0530 Subject: [PATCH 1012/1543] Fixed file name in transposition_cipher_encrypt_decrypt_file.py. Fixing bug file not found. (#9426) * Fixed file name in trnasposition_cipher_encrypt_decrypt_file.py * Removed Output.txt * Removed Output.txt * Fixed build errors --- ciphers/prehistoric_men.txt | 1196 ++++++++--------- ...ansposition_cipher_encrypt_decrypt_file.py | 4 +- 2 files changed, 600 insertions(+), 600 deletions(-) diff --git a/ciphers/prehistoric_men.txt b/ciphers/prehistoric_men.txt index a58e533a8405..8d1b2bd8c8d1 100644 --- a/ciphers/prehistoric_men.txt +++ b/ciphers/prehistoric_men.txt @@ -40,8 +40,8 @@ Transcriber's note: version referred to above. One example of this might occur in the second paragraph under "Choppers and Adze-like Tools", page 46, which contains the phrase - an adze cutting edge is ? shaped. The symbol before - shaped looks like a sharply-italicized sans-serif L. + �an adze cutting edge is ? shaped�. The symbol before + �shaped� looks like a sharply-italicized sans-serif �L�. Devices that cannot display that symbol may substitute a question mark, a square, or other symbol. @@ -98,7 +98,7 @@ forced or pedantic; at least I have done my very best to tell the story simply and clearly. Many friends have aided in the preparation of the book. The whimsical -charm of Miss Susan Richerts illustrations add enormously to the +charm of Miss Susan Richert�s illustrations add enormously to the spirit I wanted. She gave freely of her own time on the drawings and in planning the book with me. My colleagues at the University of Chicago, especially Professor Wilton M. Krogman (now of the University @@ -108,7 +108,7 @@ the Department of Anthropology, gave me counsel in matters bearing on their special fields, and the Department of Anthropology bore some of the expense of the illustrations. From Mrs. Irma Hunter and Mr. Arnold Maremont, who are not archeologists at all and have only an intelligent -laymans notion of archeology, I had sound advice on how best to tell +layman�s notion of archeology, I had sound advice on how best to tell the story. I am deeply indebted to all these friends. While I was preparing the second edition, I had the great fortune @@ -117,13 +117,13 @@ Washburn, now of the Department of Anthropology of the University of California, and the fourth, fifth, and sixth chapters with Professor Hallum L. Movius, Jr., of the Peabody Museum, Harvard University. The book has gained greatly in accuracy thereby. In matters of dating, -Professor Movius and the indications of Professor W. F. Libbys Carbon +Professor Movius and the indications of Professor W. F. Libby�s Carbon 14 chronology project have both encouraged me to choose the lowest dates now current for the events of the Pleistocene Ice Age. There is still no certain way of fixing a direct chronology for most of the -Pleistocene, but Professor Libbys method appears very promising for +Pleistocene, but Professor Libby�s method appears very promising for its end range and for proto-historic dates. In any case, this book -names periods, and new dates may be written in against mine, if new +names �periods,� and new dates may be written in against mine, if new and better dating systems appear. I wish to thank Dr. Clifford C. Gregg, Director of Chicago Natural @@ -150,7 +150,7 @@ Clark Howell of the Department of Anthropology of the University of Chicago in reworking the earlier chapters, and he was very patient in the matter, which I sincerely appreciate. -All of Mrs. Susan Richert Allens original drawings appear, but a few +All of Mrs. Susan Richert Allen�s original drawings appear, but a few necessary corrections have been made in some of the charts and some new drawings have been added by Mr. John Pfiffner, Staff Artist, Chicago Natural History Museum. @@ -200,7 +200,7 @@ HOW WE LEARN about Prehistoric Men Prehistory means the time before written history began. Actually, more -than 99 per cent of mans story is prehistory. Man is at least half a +than 99 per cent of man�s story is prehistory. Man is at least half a million years old, but he did not begin to write history (or to write anything) until about 5,000 years ago. @@ -216,7 +216,7 @@ The scientists who study the bones and teeth and any other parts they find of the bodies of prehistoric men, are called _physical anthropologists_. Physical anthropologists are trained, much like doctors, to know all about the human body. They study living people, -too; they know more about the biological facts of human races than +too; they know more about the biological facts of human �races� than anybody else. If the police find a badly decayed body in a trunk, they ask a physical anthropologist to tell them what the person originally looked like. The physical anthropologists who specialize in @@ -228,14 +228,14 @@ ARCHEOLOGISTS There is a kind of scientist who studies the things that prehistoric men made and did. Such a scientist is called an _archeologist_. It is -the archeologists business to look for the stone and metal tools, the +the archeologist�s business to look for the stone and metal tools, the pottery, the graves, and the caves or huts of the men who lived before history began. But there is more to archeology than just looking for things. In -Professor V. Gordon Childes words, archeology furnishes a sort of +Professor V. Gordon Childe�s words, archeology �furnishes a sort of history of human activity, provided always that the actions have -produced concrete results and left recognizable material traces. You +produced concrete results and left recognizable material traces.� You will see that there are at least three points in what Childe says: 1. The archeologists have to find the traces of things left behind by @@ -245,7 +245,7 @@ will see that there are at least three points in what Childe says: too soft or too breakable to last through the years. However, 3. The archeologist must use whatever he can find to tell a story--to - make a sort of history--from the objects and living-places and + make a �sort of history�--from the objects and living-places and graves that have escaped destruction. What I mean is this: Let us say you are walking through a dump yard, @@ -253,8 +253,8 @@ and you find a rusty old spark plug. If you want to think about what the spark plug means, you quickly remember that it is a part of an automobile motor. This tells you something about the man who threw the spark plug on the dump. He either had an automobile, or he knew -or lived near someone who did. He cant have lived so very long ago, -youll remember, because spark plugs and automobiles are only about +or lived near someone who did. He can�t have lived so very long ago, +you�ll remember, because spark plugs and automobiles are only about sixty years old. When you think about the old spark plug in this way you have @@ -264,8 +264,8 @@ It is the same way with the man-made things we archeologists find and put in museums. Usually, only a few of these objects are pretty to look at; but each of them has some sort of story to tell. Making the interpretation of his finds is the most important part of the -archeologists job. It is the way he gets at the sort of history of -human activity which is expected of archeology. +archeologist�s job. It is the way he gets at the �sort of history of +human activity� which is expected of archeology. SOME OTHER SCIENTISTS @@ -274,7 +274,7 @@ There are many other scientists who help the archeologist and the physical anthropologist find out about prehistoric men. The geologists help us tell the age of the rocks or caves or gravel beds in which human bones or man-made objects are found. There are other scientists -with names which all begin with paleo (the Greek word for old). The +with names which all begin with �paleo� (the Greek word for �old�). The _paleontologists_ study fossil animals. There are also, for example, such scientists as _paleobotanists_ and _paleoclimatologists_, who study ancient plants and climates. These scientists help us to know @@ -306,20 +306,20 @@ systems. The rate of disappearance of radioactivity as time passes.[1]] [1] It is important that the limitations of the radioactive carbon - dating system be held in mind. As the statistics involved in + �dating� system be held in mind. As the statistics involved in the system are used, there are two chances in three that the - date of the sample falls within the range given as plus or - minus an added number of years. For example, the date for the - Jarmo village (see chart), given as 6750 200 B.C., really + �date� of the sample falls within the range given as plus or + minus an added number of years. For example, the �date� for the + Jarmo village (see chart), given as 6750 � 200 B.C., really means that there are only two chances in three that the real date of the charcoal sampled fell between 6950 and 6550 B.C. We have also begun to suspect that there are ways in which the - samples themselves may have become contaminated, either on + samples themselves may have become �contaminated,� either on the early or on the late side. We now tend to be suspicious of single radioactive carbon determinations, or of determinations from one site alone. But as a fabric of consistent determinations for several or more sites of one archeological - period, we gain confidence in the dates. + period, we gain confidence in the dates. HOW THE SCIENTISTS FIND OUT @@ -330,9 +330,9 @@ about prehistoric men. We also need a word about _how_ they find out. All our finds came by accident until about a hundred years ago. Men digging wells, or digging in caves for fertilizer, often turned up ancient swords or pots or stone arrowheads. People also found some odd -pieces of stone that didnt look like natural forms, but they also -didnt look like any known tool. As a result, the people who found them -gave them queer names; for example, thunderbolts. The people thought +pieces of stone that didn�t look like natural forms, but they also +didn�t look like any known tool. As a result, the people who found them +gave them queer names; for example, �thunderbolts.� The people thought the strange stones came to earth as bolts of lightning. We know now that these strange stones were prehistoric stone tools. @@ -349,7 +349,7 @@ story of cave men on Mount Carmel, in Palestine, began to be known. Planned archeological digging is only about a century old. Even before this, however, a few men realized the significance of objects they dug from the ground; one of these early archeologists was our own Thomas -Jefferson. The first real mound-digger was a German grocers clerk, +Jefferson. The first real mound-digger was a German grocer�s clerk, Heinrich Schliemann. Schliemann made a fortune as a merchant, first in Europe and then in the California gold-rush of 1849. He became an American citizen. Then he retired and had both money and time to test @@ -389,16 +389,16 @@ used had been a soft, unbaked mud-brick, and most of the debris consisted of fallen or rain-melted mud from these mud-bricks. This idea of _stratification_, like the cake layers, was already a -familiar one to the geologists by Schliemanns time. They could show +familiar one to the geologists by Schliemann�s time. They could show that their lowest layer of rock was oldest or earliest, and that the -overlying layers became more recent as one moved upward. Schliemanns +overlying layers became more recent as one moved upward. Schliemann�s digging proved the same thing at Troy. His first (lowest and earliest) city had at least nine layers above it; he thought that the second -layer contained the remains of Homers Troy. We now know that Homeric +layer contained the remains of Homer�s Troy. We now know that Homeric Troy was layer VIIa from the bottom; also, we count eleven layers or sub-layers in total. -Schliemanns work marks the beginnings of modern archeology. Scholars +Schliemann�s work marks the beginnings of modern archeology. Scholars soon set out to dig on ancient sites, from Egypt to Central America. @@ -410,21 +410,21 @@ Archeologists began to get ideas as to the kinds of objects that belonged together. If you compared a mail-order catalogue of 1890 with one of today, you would see a lot of differences. If you really studied the two catalogues hard, you would also begin to see that certain -objects go together. Horseshoes and metal buggy tires and pieces of +objects �go together.� Horseshoes and metal buggy tires and pieces of harness would begin to fit into a picture with certain kinds of coal stoves and furniture and china dishes and kerosene lamps. Our friend the spark plug, and radios and electric refrigerators and light bulbs would fit into a picture with different kinds of furniture and dishes -and tools. You wont be old enough to remember the kind of hats that -women wore in 1890, but youve probably seen pictures of them, and you -know very well they couldnt be worn with the fashions of today. +and tools. You won�t be old enough to remember the kind of hats that +women wore in 1890, but you�ve probably seen pictures of them, and you +know very well they couldn�t be worn with the fashions of today. This is one of the ways that archeologists study their materials. The various tools and weapons and jewelry, the pottery, the kinds of houses, and even the ways of burying the dead tend to fit into pictures. Some archeologists call all of the things that go together to make such a picture an _assemblage_. The assemblage of the first layer -of Schliemanns Troy was as different from that of the seventh layer as +of Schliemann�s Troy was as different from that of the seventh layer as our 1900 mail-order catalogue is from the one of today. The archeologists who came after Schliemann began to notice other @@ -433,23 +433,23 @@ idea that people will buy better mousetraps goes back into very ancient times. Today, if we make good automobiles or radios, we can sell some of them in Turkey or even in Timbuktu. This means that a few present-day types of American automobiles and radios form part -of present-day assemblages in both Turkey and Timbuktu. The total -present-day assemblage of Turkey is quite different from that of +of present-day �assemblages� in both Turkey and Timbuktu. The total +present-day �assemblage� of Turkey is quite different from that of Timbuktu or that of America, but they have at least some automobiles and some radios in common. Now these automobiles and radios will eventually wear out. Let us suppose we could go to some remote part of Turkey or to Timbuktu in a -dream. We dont know what the date is, in our dream, but we see all +dream. We don�t know what the date is, in our dream, but we see all sorts of strange things and ways of living in both places. Nobody tells us what the date is. But suddenly we see a 1936 Ford; so we know that in our dream it has to be at least the year 1936, and only as many years after that as we could reasonably expect a Ford to keep -in running order. The Ford would probably break down in twenty years -time, so the Turkish or Timbuktu assemblage were seeing in our dream +in running order. The Ford would probably break down in twenty years� +time, so the Turkish or Timbuktu �assemblage� we�re seeing in our dream has to date at about A.D. 1936-56. -Archeologists not only date their ancient materials in this way; they +Archeologists not only �date� their ancient materials in this way; they also see over what distances and between which peoples trading was done. It turns out that there was a good deal of trading in ancient times, probably all on a barter and exchange basis. @@ -480,13 +480,13 @@ site. They find the remains of everything that would last through time, in several different layers. They know that the assemblage in the bottom layer was laid down earlier than the assemblage in the next layer above, and so on up to the topmost layer, which is the latest. -They look at the results of other digs and find that some other +They look at the results of other �digs� and find that some other archeologist 900 miles away has found ax-heads in his lowest layer, exactly like the ax-heads of their fifth layer. This means that their fifth layer must have been lived in at about the same time as was the first layer in the site 200 miles away. It also may mean that the people who lived in the two layers knew and traded with each other. Or -it could mean that they didnt necessarily know each other, but simply +it could mean that they didn�t necessarily know each other, but simply that both traded with a third group at about the same time. You can see that the more we dig and find, the more clearly the main @@ -501,8 +501,8 @@ those of domesticated animals, for instance, sheep or cattle, and therefore the people must have kept herds. More important than anything else--as our structure grows more -complicated and our materials increase--is the fact that a sort -of history of human activity does begin to appear. The habits or +complicated and our materials increase--is the fact that �a sort +of history of human activity� does begin to appear. The habits or traditions that men formed in the making of their tools and in the ways they did things, begin to stand out for us. How characteristic were these habits and traditions? What areas did they spread over? @@ -519,7 +519,7 @@ method--chemical tests of the bones--that will enable them to discover what the blood-type may have been. One thing is sure. We have never found a group of skeletons so absolutely similar among themselves--so cast from a single mould, so to speak--that we could claim to have a -pure race. I am sure we never shall. +�pure� race. I am sure we never shall. We become particularly interested in any signs of change--when new materials and tool types and ways of doing things replace old ones. We @@ -527,7 +527,7 @@ watch for signs of social change and progress in one way or another. We must do all this without one word of written history to aid us. Everything we are concerned with goes back to the time _before_ men -learned to write. That is the prehistorians job--to find out what +learned to write. That is the prehistorian�s job--to find out what happened before history began. @@ -538,9 +538,9 @@ THE CHANGING WORLD in which Prehistoric Men Lived [Illustration] -Mankind, well say, is at least a half million years old. It is very +Mankind, we�ll say, is at least a half million years old. It is very hard to understand how long a time half a million years really is. -If we were to compare this whole length of time to one day, wed get +If we were to compare this whole length of time to one day, we�d get something like this: The present time is midnight, and Jesus was born just five minutes and thirty-six seconds ago. Earliest history began less than fifteen minutes ago. Everything before 11:45 was in @@ -569,7 +569,7 @@ book; it would mainly affect the dates earlier than 25,000 years ago. CHANGES IN ENVIRONMENT -The earth probably hasnt changed much in the last 5,000 years (250 +The earth probably hasn�t changed much in the last 5,000 years (250 generations). Men have built things on its surface and dug into it and drawn boundaries on maps of it, but the places where rivers, lakes, seas, and mountains now stand have changed very little. @@ -605,7 +605,7 @@ the glaciers covered most of Canada and the northern United States and reached down to southern England and France in Europe. Smaller ice sheets sat like caps on the Rockies, the Alps, and the Himalayas. The continental glaciation only happened north of the equator, however, so -remember that Ice Age is only half true. +remember that �Ice Age� is only half true. As you know, the amount of water on and about the earth does not vary. These large glaciers contained millions of tons of water frozen into @@ -677,9 +677,9 @@ their dead. At about the time when the last great glacier was finally melting away, men in the Near East made the first basic change in human economy. They began to plant grain, and they learned to raise and herd certain -animals. This meant that they could store food in granaries and on the -hoof against the bad times of the year. This first really basic change -in mans way of living has been called the food-producing revolution. +animals. This meant that they could store food in granaries and �on the +hoof� against the bad times of the year. This first really basic change +in man�s way of living has been called the �food-producing revolution.� By the time it happened, a modern kind of climate was beginning. Men had already grown to look as they do now. Know-how in ways of living had developed and progressed, slowly but surely, up to a point. It was @@ -698,25 +698,25 @@ Prehistoric Men THEMSELVES DO WE KNOW WHERE MAN ORIGINATED? -For a long time some scientists thought the cradle of mankind was in +For a long time some scientists thought the �cradle of mankind� was in central Asia. Other scientists insisted it was in Africa, and still -others said it might have been in Europe. Actually, we dont know -where it was. We dont even know that there was only _one_ cradle. -If we had to choose a cradle at this moment, we would probably say +others said it might have been in Europe. Actually, we don�t know +where it was. We don�t even know that there was only _one_ �cradle.� +If we had to choose a �cradle� at this moment, we would probably say Africa. But the southern portions of Asia and Europe may also have been included in the general area. The scene of the early development of -mankind was certainly the Old World. It is pretty certain men didnt +mankind was certainly the Old World. It is pretty certain men didn�t reach North or South America until almost the end of the Ice Age--had they done so earlier we would certainly have found some trace of them by now. The earliest tools we have yet found come from central and south -Africa. By the dating system Im using, these tools must be over +Africa. By the dating system I�m using, these tools must be over 500,000 years old. There are now reports that a few such early tools have been found--at the Sterkfontein cave in South Africa--along with -the bones of small fossil men called australopithecines. +the bones of small fossil men called �australopithecines.� -Not all scientists would agree that the australopithecines were men, +Not all scientists would agree that the australopithecines were �men,� or would agree that the tools were made by the australopithecines themselves. For these sticklers, the earliest bones of men come from the island of Java. The date would be about 450,000 years ago. So far, @@ -727,12 +727,12 @@ Let me say it another way. How old are the earliest traces of men we now have? Over half a million years. This was a time when the first alpine glaciation was happening in the north. What has been found so far? The tools which the men of those times made, in different parts -of Africa. It is now fairly generally agreed that the men who made -the tools were the australopithecines. There is also a more man-like +of Africa. It is now fairly generally agreed that the �men� who made +the tools were the australopithecines. There is also a more �man-like� jawbone at Kanam in Kenya, but its find-spot has been questioned. The next earliest bones we have were found in Java, and they may be almost a hundred thousand years younger than the earliest African finds. We -havent yet found the tools of these early Javanese. Our knowledge of +haven�t yet found the tools of these early Javanese. Our knowledge of tool-using in Africa spreads quickly as time goes on: soon after the appearance of tools in the south we shall have them from as far north as Algeria. @@ -758,30 +758,30 @@ prove it. MEN AND APES Many people used to get extremely upset at the ill-formed notion -that man descended from the apes. Such words were much more likely -to start fights or monkey trials than the correct notion that all +that �man descended from the apes.� Such words were much more likely +to start fights or �monkey trials� than the correct notion that all living animals, including man, ascended or evolved from a single-celled organism which lived in the primeval seas hundreds of millions of years -ago. Men are mammals, of the order called Primates, and mans living -relatives are the great apes. Men didnt descend from the apes or +ago. Men are mammals, of the order called Primates, and man�s living +relatives are the great apes. Men didn�t �descend� from the apes or apes from men, and mankind must have had much closer relatives who have since become extinct. Men stand erect. They also walk and run on their two feet. Apes are happiest in trees, swinging with their arms from branch to branch. Few branches of trees will hold the mighty gorilla, although he still -manages to sleep in trees. Apes cant stand really erect in our sense, +manages to sleep in trees. Apes can�t stand really erect in our sense, and when they have to run on the ground, they use the knuckles of their hands as well as their feet. A key group of fossil bones here are the south African australopithecines. These are called the _Australopithecinae_ or -man-apes or sometimes even ape-men. We do not _know_ that they were +�man-apes� or sometimes even �ape-men.� We do not _know_ that they were directly ancestral to men but they can hardly have been so to apes. -Presently Ill describe them a bit more. The reason I mention them +Presently I�ll describe them a bit more. The reason I mention them here is that while they had brains no larger than those of apes, their hipbones were enough like ours so that they must have stood erect. -There is no good reason to think they couldnt have walked as we do. +There is no good reason to think they couldn�t have walked as we do. BRAINS, HANDS, AND TOOLS @@ -801,12 +801,12 @@ Nobody knows which of these three is most important, or which came first. Most probably the growth of all three things was very much blended together. If you think about each of the things, you will see what I mean. Unless your hand is more flexible than a paw, and your -thumb will work against (or oppose) your fingers, you cant hold a tool -very well. But you wouldnt get the idea of using a tool unless you had +thumb will work against (or oppose) your fingers, you can�t hold a tool +very well. But you wouldn�t get the idea of using a tool unless you had enough brain to help you see cause and effect. And it is rather hard to see how your hand and brain would develop unless they had something to -practice on--like using tools. In Professor Krogmans words, the hand -must become the obedient servant of the eye and the brain. It is the +practice on--like using tools. In Professor Krogman�s words, �the hand +must become the obedient servant of the eye and the brain.� It is the _co-ordination_ of these things that counts. Many other things must have been happening to the bodies of the @@ -820,17 +820,17 @@ little by little, all together. Men became men very slowly. WHEN SHALL WE CALL MEN MEN? -What do I mean when I say men? People who looked pretty much as we +What do I mean when I say �men�? People who looked pretty much as we do, and who used different tools to do different things, are men to me. -Well probably never know whether the earliest ones talked or not. They +We�ll probably never know whether the earliest ones talked or not. They probably had vocal cords, so they could make sounds, but did they know how to make sounds work as symbols to carry meanings? But if the fossil -bones look like our skeletons, and if we find tools which well agree -couldnt have been made by nature or by animals, then Id say we had +bones look like our skeletons, and if we find tools which we�ll agree +couldn�t have been made by nature or by animals, then I�d say we had traces of _men_. The australopithecine finds of the Transvaal and Bechuanaland, in -south Africa, are bound to come into the discussion here. Ive already +south Africa, are bound to come into the discussion here. I�ve already told you that the australopithecines could have stood upright and walked on their two hind legs. They come from the very base of the Pleistocene or Ice Age, and a few coarse stone tools have been found @@ -848,17 +848,17 @@ bones. The doubt as to whether the australopithecines used the tools themselves goes like this--just suppose some man-like creature (whose bones we have not yet found) made the tools and used them to kill and butcher australopithecines. Hence a few experts tend to let -australopithecines still hang in limbo as man-apes. +australopithecines still hang in limbo as �man-apes.� THE EARLIEST MEN WE KNOW -Ill postpone talking about the tools of early men until the next +I�ll postpone talking about the tools of early men until the next chapter. The men whose bones were the earliest of the Java lot have been given the name _Meganthropus_. The bones are very fragmentary. We would not understand them very well unless we had the somewhat later -Javanese lot--the more commonly known _Pithecanthropus_ or Java -man--against which to refer them for study. One of the less well-known +Javanese lot--the more commonly known _Pithecanthropus_ or �Java +man�--against which to refer them for study. One of the less well-known and earliest fragments, a piece of lower jaw and some teeth, rather strongly resembles the lower jaws and teeth of the australopithecine type. Was _Meganthropus_ a sort of half-way point between the @@ -872,7 +872,7 @@ finds of Java man were made in 1891-92 by Dr. Eugene Dubois, a Dutch doctor in the colonial service. Finds have continued to be made. There are now bones enough to account for four skulls. There are also four jaws and some odd teeth and thigh bones. Java man, generally speaking, -was about five feet six inches tall, and didnt hold his head very +was about five feet six inches tall, and didn�t hold his head very erect. His skull was very thick and heavy and had room for little more than two-thirds as large a brain as we have. He had big teeth and a big jaw and enormous eyebrow ridges. @@ -885,22 +885,22 @@ belonged to his near descendants. Remember that there are several varieties of men in the whole early Java lot, at least two of which are earlier than the _Pithecanthropus_, -Java man. Some of the earlier ones seem to have gone in for +�Java man.� Some of the earlier ones seem to have gone in for bigness, in tooth-size at least. _Meganthropus_ is one of these earlier varieties. As we said, he _may_ turn out to be a link to the australopithecines, who _may_ or _may not_ be ancestral to men. _Meganthropus_ is best understandable in terms of _Pithecanthropus_, who appeared later in the same general area. _Pithecanthropus_ is pretty well understandable from the bones he left us, and also because -of his strong resemblance to the fully tool-using cave-dwelling Peking -man, _Sinanthropus_, about whom we shall talk next. But you can see +of his strong resemblance to the fully tool-using cave-dwelling �Peking +man,� _Sinanthropus_, about whom we shall talk next. But you can see that the physical anthropologists and prehistoric archeologists still have a lot of work to do on the problem of earliest men. PEKING MEN AND SOME EARLY WESTERNERS -The earliest known Chinese are called _Sinanthropus_, or Peking man, +The earliest known Chinese are called _Sinanthropus_, or �Peking man,� because the finds were made near that city. In World War II, the United States Marine guard at our Embassy in Peking tried to help get the bones out of the city before the Japanese attack. Nobody knows where @@ -913,9 +913,9 @@ casts of the bones. Peking man lived in a cave in a limestone hill, made tools, cracked animal bones to get the marrow out, and used fire. Incidentally, the bones of Peking man were found because Chinese dig for what they call -dragon bones and dragon teeth. Uneducated Chinese buy these things +�dragon bones� and �dragon teeth.� Uneducated Chinese buy these things in their drug stores and grind them into powder for medicine. The -dragon teeth and bones are really fossils of ancient animals, and +�dragon teeth� and �bones� are really fossils of ancient animals, and sometimes of men. The people who supply the drug stores have learned where to dig for strange bones and teeth. Paleontologists who get to China go to the drug stores to buy fossils. In a roundabout way, this @@ -924,7 +924,7 @@ is how the fallen-in cave of Peking man at Choukoutien was discovered. Peking man was not quite as tall as Java man but he probably stood straighter. His skull looked very much like that of the Java skull except that it had room for a slightly larger brain. His face was less -brutish than was Java mans face, but this isnt saying much. +brutish than was Java man�s face, but this isn�t saying much. Peking man dates from early in the interglacial period following the second alpine glaciation. He probably lived close to 350,000 years @@ -946,9 +946,9 @@ big ridges over the eyes. The more fragmentary skull from Swanscombe in England (p. 11) has been much more carefully studied. Only the top and back of that skull have been found. Since the skull rounds up nicely, it has been assumed that the face and forehead must have been quite -modern. Careful comparison with Steinheim shows that this was not +�modern.� Careful comparison with Steinheim shows that this was not necessarily so. This is important because it bears on the question of -how early truly modern man appeared. +how early truly �modern� man appeared. Recently two fragmentary jaws were found at Ternafine in Algeria, northwest Africa. They look like the jaws of Peking man. Tools were @@ -971,22 +971,22 @@ modern Australian natives. During parts of the Ice Age there was a land bridge all the way from Java to Australia. -TWO ENGLISHMEN WHO WERENT OLD +TWO ENGLISHMEN WHO WEREN�T OLD The older textbooks contain descriptions of two English finds which were thought to be very old. These were called Piltdown (_Eoanthropus dawsoni_) and Galley Hill. The skulls were very modern in appearance. In 1948-49, British scientists began making chemical tests which proved that neither of these finds is very old. It is now known that both -Piltdown man and the tools which were said to have been found with +�Piltdown man� and the tools which were said to have been found with him were part of an elaborate fake! -TYPICAL CAVE MEN +TYPICAL �CAVE MEN� The next men we have to talk about are all members of a related group. -These are the Neanderthal group. Neanderthal man himself was found in -the Neander Valley, near Dsseldorf, Germany, in 1856. He was the first +These are the Neanderthal group. �Neanderthal man� himself was found in +the Neander Valley, near D�sseldorf, Germany, in 1856. He was the first human fossil to be recognized as such. [Illustration: PRINCIPAL KNOWN TYPES OF FOSSIL MEN @@ -999,7 +999,7 @@ human fossil to be recognized as such. PITHECANTHROPUS] Some of us think that the neanderthaloids proper are only those people -of western Europe who didnt get out before the beginning of the last +of western Europe who didn�t get out before the beginning of the last great glaciation, and who found themselves hemmed in by the glaciers in the Alps and northern Europe. Being hemmed in, they intermarried a bit too much and developed into a special type. Professor F. Clark @@ -1010,7 +1010,7 @@ pre-neanderthaloids. There are traces of these pre-neanderthaloids pretty much throughout Europe during the third interglacial period--say 100,000 years ago. The pre-neanderthaloids are represented by such finds as the ones at Ehringsdorf in Germany and Saccopastore in Italy. -I wont describe them for you, since they are simply less extreme than +I won�t describe them for you, since they are simply less extreme than the neanderthaloids proper--about half way between Steinheim and the classic Neanderthal people. @@ -1019,24 +1019,24 @@ get caught in the pocket of the southwest corner of Europe at the onset of the last great glaciation became the classic Neanderthalers. Out in the Near East, Howell thinks, it is possible to see traces of people evolving from the pre-neanderthaloid type toward that of fully modern -man. Certainly, we dont see such extreme cases of neanderthaloidism +man. Certainly, we don�t see such extreme cases of �neanderthaloidism� outside of western Europe. There are at least a dozen good examples in the main or classic Neanderthal group in Europe. They date to just before and in the earlier part of the last great glaciation (85,000 to 40,000 years ago). -Many of the finds have been made in caves. The cave men the movies +Many of the finds have been made in caves. The �cave men� the movies and the cartoonists show you are probably meant to be Neanderthalers. -Im not at all sure they dragged their women by the hair; the women +I�m not at all sure they dragged their women by the hair; the women were probably pretty tough, too! Neanderthal men had large bony heads, but plenty of room for brains. Some had brain cases even larger than the average for modern man. Their faces were heavy, and they had eyebrow ridges of bone, but the ridges were not as big as those of Java man. Their foreheads were very low, -and they didnt have much chin. They were about five feet three inches -tall, but were heavy and barrel-chested. But the Neanderthalers didnt -slouch as much as theyve been blamed for, either. +and they didn�t have much chin. They were about five feet three inches +tall, but were heavy and barrel-chested. But the Neanderthalers didn�t +slouch as much as they�ve been blamed for, either. One important thing about the Neanderthal group is that there is a fair number of them to study. Just as important is the fact that we know @@ -1059,10 +1059,10 @@ different-looking people. EARLY MODERN MEN -How early is modern man (_Homo sapiens_), the wise man? Some people +How early is modern man (_Homo sapiens_), the �wise man�? Some people have thought that he was very early, a few still think so. Piltdown and Galley Hill, which were quite modern in anatomical appearance and -_supposedly_ very early in date, were the best evidence for very +_supposedly_ very early in date, were the best �evidence� for very early modern men. Now that Piltdown has been liquidated and Galley Hill is known to be very late, what is left of the idea? @@ -1073,13 +1073,13 @@ the Ternafine jaws, you might come to the conclusion that the crown of the Swanscombe head was that of a modern-like man. Two more skulls, again without faces, are available from a French -cave site, Fontchevade. They come from the time of the last great +cave site, Font�chevade. They come from the time of the last great interglacial, as did the pre-neanderthaloids. The crowns of the -Fontchevade skulls also look quite modern. There is a bit of the +Font�chevade skulls also look quite modern. There is a bit of the forehead preserved on one of these skulls and the brow-ridge is not heavy. Nevertheless, there is a suggestion that the bones belonged to an immature individual. In this case, his (or even more so, if _her_) -brow-ridges would have been weak anyway. The case for the Fontchevade +brow-ridges would have been weak anyway. The case for the Font�chevade fossils, as modern type men, is little stronger than that for Swanscombe, although Professor Vallois believes it a good case. @@ -1101,8 +1101,8 @@ of the onset of colder weather, when the last glaciation was beginning in the north--say 75,000 years ago. The 70 per cent modern group came from only one cave, Mugharet es-Skhul -(cave of the kids). The other group, from several caves, had bones of -men of the type weve been calling pre-neanderthaloid which we noted +(�cave of the kids�). The other group, from several caves, had bones of +men of the type we�ve been calling pre-neanderthaloid which we noted were widespread in Europe and beyond. The tools which came with each of these finds were generally similar, and McCown and Keith, and other scholars since their study, have tended to assume that both the Skhul @@ -1131,26 +1131,26 @@ important fossil men of later Europe are shown in the chart on page DIFFERENCES IN THE EARLY MODERNS The main early European moderns have been divided into two groups, the -Cro-Magnon group and the Combe Capelle-Brnn group. Cro-Magnon people +Cro-Magnon group and the Combe Capelle-Br�nn group. Cro-Magnon people were tall and big-boned, with large, long, and rugged heads. They must have been built like many present-day Scandinavians. The Combe -Capelle-Brnn people were shorter; they had narrow heads and faces, and -big eyebrow-ridges. Of course we dont find the skin or hair of these -people. But there is little doubt they were Caucasoids (Whites). +Capelle-Br�nn people were shorter; they had narrow heads and faces, and +big eyebrow-ridges. Of course we don�t find the skin or hair of these +people. But there is little doubt they were Caucasoids (�Whites�). Another important find came in the Italian Riviera, near Monte Carlo. Here, in a cave near Grimaldi, there was a grave containing a woman and a young boy, buried together. The two skeletons were first called -Negroid because some features of their bones were thought to resemble +�Negroid� because some features of their bones were thought to resemble certain features of modern African Negro bones. But more recently, Professor E. A. Hooton and other experts questioned the use of the word -Negroid in describing the Grimaldi skeletons. It is true that nothing +�Negroid� in describing the Grimaldi skeletons. It is true that nothing is known of the skin color, hair form, or any other fleshy feature of -the Grimaldi people, so that the word Negroid in its usual meaning is +the Grimaldi people, so that the word �Negroid� in its usual meaning is not proper here. It is also not clear whether the features of the bones -claimed to be Negroid are really so at all. +claimed to be �Negroid� are really so at all. -From a place called Wadjak, in Java, we have proto-Australoid skulls +From a place called Wadjak, in Java, we have �proto-Australoid� skulls which closely resemble those of modern Australian natives. Some of the skulls found in South Africa, especially the Boskop skull, look like those of modern Bushmen, but are much bigger. The ancestors of @@ -1159,12 +1159,12 @@ Desert. True African Negroes were forest people who apparently expanded out of the west central African area only in the last several thousand years. Although dark in skin color, neither the Australians nor the Bushmen are Negroes; neither the Wadjak nor the Boskop skulls are -Negroid. +�Negroid.� -As weve already mentioned, Professor Weidenreich believed that Peking +As we�ve already mentioned, Professor Weidenreich believed that Peking man was already on the way to becoming a Mongoloid. Anyway, the -Mongoloids would seem to have been present by the time of the Upper -Cave at Choukoutien, the _Sinanthropus_ find-spot. +Mongoloids would seem to have been present by the time of the �Upper +Cave� at Choukoutien, the _Sinanthropus_ find-spot. WHAT THE DIFFERENCES MEAN @@ -1175,14 +1175,14 @@ From area to area, men tended to look somewhat different, just as they do today. This is all quite natural. People _tended_ to mate near home; in the anthropological jargon, they made up geographically localized breeding populations. The simple continental division of -stocks--black = Africa, yellow = Asia, white = Europe--is too simple +�stocks�--black = Africa, yellow = Asia, white = Europe--is too simple a picture to fit the facts. People became accustomed to life in some -particular area within a continent (we might call it a natural area). +particular area within a continent (we might call it a �natural area�). As they went on living there, they evolved towards some particular physical variety. It would, of course, have been difficult to draw a clear boundary between two adjacent areas. There must always have been some mating across the boundaries in every case. One thing human -beings dont do, and never have done, is to mate for purity. It is +beings don�t do, and never have done, is to mate for �purity.� It is self-righteous nonsense when we try to kid ourselves into thinking that they do. @@ -1195,28 +1195,28 @@ and they must do the writing about races. I shall, however, give two modern definitions of race, and then make one comment. Dr. William G. Boyd, professor of Immunochemistry, School of - Medicine, Boston University: We may define a human race as a + Medicine, Boston University: �We may define a human race as a population which differs significantly from other human populations in regard to the frequency of one or more of the genes it - possesses. + possesses.� Professor Sherwood L. Washburn, professor of Physical Anthropology, - Department of Anthropology, the University of California: A race + Department of Anthropology, the University of California: �A �race� is a group of genetically similar populations, and races intergrade - because there are always intermediate populations. + because there are always intermediate populations.� My comment is that the ideas involved here are all biological: they concern groups, _not_ individuals. Boyd and Washburn may differ a bit -on what they want to consider a population, but a population is a +on what they want to consider a �population,� but a population is a group nevertheless, and genetics is biology to the hilt. Now a lot of people still think of race in terms of how people dress or fix their food or of other habits or customs they have. The next step is to talk -about racial purity. None of this has anything whatever to do with +about racial �purity.� None of this has anything whatever to do with race proper, which is a matter of the biology of groups. -Incidentally, Im told that if man very carefully _controls_ +Incidentally, I�m told that if man very carefully _controls_ the breeding of certain animals over generations--dogs, cattle, -chickens--he might achieve a pure race of animals. But he doesnt do +chickens--he might achieve a �pure� race of animals. But he doesn�t do it. Some unfortunate genetic trait soon turns up, so this has just as carefully to be bred out again, and so on. @@ -1240,20 +1240,20 @@ date to the second great interglacial period, about 350,000 years ago. Piltdown and Galley Hill are out, and with them, much of the starch in the old idea that there were two distinct lines of development -in human evolution: (1) a line of paleoanthropic development from +in human evolution: (1) a line of �paleoanthropic� development from Heidelberg to the Neanderthalers where it became extinct, and (2) a -very early modern line, through Piltdown, Galley Hill, Swanscombe, to +very early �modern� line, through Piltdown, Galley Hill, Swanscombe, to us. Swanscombe, Steinheim, and Ternafine are just as easily cases of very early pre-neanderthaloids. The pre-neanderthaloids were very widespread during the third interglacial: Ehringsdorf, Saccopastore, some of the Mount Carmel -people, and probably Fontchevade are cases in point. A variety of +people, and probably Font�chevade are cases in point. A variety of their descendants can be seen, from Java (Solo), Africa (Rhodesian man), and about the Mediterranean and in western Europe. As the acute cold of the last glaciation set in, the western Europeans found themselves surrounded by water, ice, or bitter cold tundra. To vastly -over-simplify it, they bred in and became classic neanderthaloids. +over-simplify it, they �bred in� and became classic neanderthaloids. But on Mount Carmel, the Skhul cave-find with its 70 per cent modern features shows what could happen elsewhere at the same time. @@ -1263,12 +1263,12 @@ modern skeletons of men. The modern skeletons differ from place to place, just as different groups of men living in different places still look different. -What became of the Neanderthalers? Nobody can tell me for sure. Ive a -hunch they were simply bred out again when the cold weather was over. +What became of the Neanderthalers? Nobody can tell me for sure. I�ve a +hunch they were simply �bred out� again when the cold weather was over. Many Americans, as the years go by, are no longer ashamed to claim they -have Indian blood in their veins. Give us a few more generations +have �Indian blood in their veins.� Give us a few more generations and there will not be very many other Americans left to whom we can -brag about it. It certainly isnt inconceivable to me to imagine a +brag about it. It certainly isn�t inconceivable to me to imagine a little Cro-Magnon boy bragging to his friends about his tough, strong, Neanderthaler great-great-great-great-grandfather! @@ -1281,15 +1281,15 @@ Cultural BEGINNINGS Men, unlike the lower animals, are made up of much more than flesh and -blood and bones; for men have culture. +blood and bones; for men have �culture.� WHAT IS CULTURE? -Culture is a word with many meanings. The doctors speak of making a -culture of a certain kind of bacteria, and ants are said to have a -culture. Then there is the Emily Post kind of culture--you say a -person is cultured, or that he isnt, depending on such things as +�Culture� is a word with many meanings. The doctors speak of making a +�culture� of a certain kind of bacteria, and ants are said to have a +�culture.� Then there is the Emily Post kind of �culture�--you say a +person is �cultured,� or that he isn�t, depending on such things as whether or not he eats peas with his knife. The anthropologists use the word too, and argue heatedly over its finer @@ -1300,7 +1300,7 @@ men from another. In this sense, a CULTURE means the way the members of a group of people think and believe and live, the tools they make, and the way they do things. Professor Robert Redfield says a culture is an organized or formalized body of conventional understandings. -Conventional understandings means the whole set of rules, beliefs, +�Conventional understandings� means the whole set of rules, beliefs, and standards which a group of people lives by. These understandings show themselves in art, and in the other things a people may make and do. The understandings continue to last, through tradition, from one @@ -1325,12 +1325,12 @@ Egyptians. I mean their beliefs as to why grain grew, as well as their ability to make tools with which to reap the grain. I mean their beliefs about life after death. What I am thinking about as culture is a thing which lasted in time. If any one Egyptian, even the Pharaoh, -died, it didnt affect the Egyptian culture of that particular moment. +died, it didn�t affect the Egyptian culture of that particular moment. PREHISTORIC CULTURES -For that long period of mans history that is all prehistory, we have +For that long period of man�s history that is all prehistory, we have no written descriptions of cultures. We find only the tools men made, the places where they lived, the graves in which they buried their dead. Fortunately for us, these tools and living places and graves all @@ -1345,15 +1345,15 @@ of the classic European Neanderthal group of men, we have found few cave-dwelling places of very early prehistoric men. First, there is the fallen-in cave where Peking man was found, near Peking. Then there are two or three other _early_, but not _very early_, possibilities. The -finds at the base of the French cave of Fontchevade, those in one of +finds at the base of the French cave of Font�chevade, those in one of the Makapan caves in South Africa, and several open sites such as Dr. -L. S. B. Leakeys Olorgesailie in Kenya doubtless all lie earlier than +L. S. B. Leakey�s Olorgesailie in Kenya doubtless all lie earlier than the time of the main European Neanderthal group, but none are so early as the Peking finds. You can see that we know very little about the home life of earlier prehistoric men. We find different kinds of early stone tools, but we -cant even be really sure which tools may have been used together. +can�t even be really sure which tools may have been used together. WHY LITTLE HAS LASTED FROM EARLY TIMES @@ -1380,11 +1380,11 @@ there first! The front of this enormous sheet of ice moved down over the country, crushing and breaking and plowing up everything, like a gigantic bulldozer. You can see what happened to our camp site. -Everything the glacier couldnt break, it pushed along in front of it +Everything the glacier couldn�t break, it pushed along in front of it or plowed beneath it. Rocks were ground to gravel, and soil was caught into the ice, which afterwards melted and ran off as muddy water. Hard -tools of flint sometimes remained whole. Human bones werent so hard; -its a wonder _any_ of them lasted. Gushing streams of melt water +tools of flint sometimes remained whole. Human bones weren�t so hard; +it�s a wonder _any_ of them lasted. Gushing streams of melt water flushed out the debris from underneath the glacier, and water flowed off the surface and through great crevasses. The hard materials these waters carried were even more rolled and ground up. Finally, such @@ -1407,26 +1407,26 @@ all up, and so we cannot say which particular sets of tools belonged together in the first place. -EOLITHS +�EOLITHS� But what sort of tools do we find earliest? For almost a century, people have been picking up odd bits of flint and other stone in the oldest Ice Age gravels in England and France. It is now thought these -odd bits of stone werent actually worked by prehistoric men. The -stones were given a name, _eoliths_, or dawn stones. You can see them +odd bits of stone weren�t actually worked by prehistoric men. The +stones were given a name, _eoliths_, or �dawn stones.� You can see them in many museums; but you can be pretty sure that very few of them were actually fashioned by men. -It is impossible to pick out eoliths that seem to be made in any -one _tradition_. By tradition I mean a set of habits for making one -kind of tool for some particular job. No two eoliths look very much +It is impossible to pick out �eoliths� that seem to be made in any +one _tradition_. By �tradition� I mean a set of habits for making one +kind of tool for some particular job. No two �eoliths� look very much alike: tools made as part of some one tradition all look much alike. -Now its easy to suppose that the very earliest prehistoric men picked -up and used almost any sort of stone. This wouldnt be surprising; you -and I do it when we go camping. In other words, some of these eoliths +Now it�s easy to suppose that the very earliest prehistoric men picked +up and used almost any sort of stone. This wouldn�t be surprising; you +and I do it when we go camping. In other words, some of these �eoliths� may actually have been used by prehistoric men. They must have used anything that might be handy when they needed it. We could have figured -that out without the eoliths. +that out without the �eoliths.� THE ROAD TO STANDARDIZATION @@ -1434,7 +1434,7 @@ THE ROAD TO STANDARDIZATION Reasoning from what we know or can easily imagine, there should have been three major steps in the prehistory of tool-making. The first step would have been simple _utilization_ of what was at hand. This is the -step into which the eoliths would fall. The second step would have +step into which the �eoliths� would fall. The second step would have been _fashioning_--the haphazard preparation of a tool when there was a need for it. Probably many of the earlier pebble tools, which I shall describe next, fall into this group. The third step would have been @@ -1447,7 +1447,7 @@ tradition appears. PEBBLE TOOLS -At the beginning of the last chapter, youll remember that I said there +At the beginning of the last chapter, you�ll remember that I said there were tools from very early geological beds. The earliest bones of men have not yet been found in such early beds although the Sterkfontein australopithecine cave approaches this early date. The earliest tools @@ -1467,7 +1467,7 @@ Old World besides Africa; in fact, some prehistorians already claim to have identified a few. Since the forms and the distinct ways of making the earlier pebble tools had not yet sufficiently jelled into a set tradition, they are difficult for us to recognize. It is not -so difficult, however, if there are great numbers of possibles +so difficult, however, if there are great numbers of �possibles� available. A little later in time the tradition becomes more clearly set, and pebble tools are easier to recognize. So far, really large collections of pebble tools have only been found and examined in Africa. @@ -1475,9 +1475,9 @@ collections of pebble tools have only been found and examined in Africa. CORE-BIFACE TOOLS -The next tradition well look at is the _core_ or biface one. The tools +The next tradition we�ll look at is the _core_ or biface one. The tools are large pear-shaped pieces of stone trimmed flat on the two opposite -sides or faces. Hence biface has been used to describe these tools. +sides or �faces.� Hence �biface� has been used to describe these tools. The front view is like that of a pear with a rather pointed top, and the back view looks almost exactly the same. Look at them side on, and you can see that the front and back faces are the same and have been @@ -1488,7 +1488,7 @@ illustration. [Illustration: ABBEVILLIAN BIFACE] We have very little idea of the way in which these core-bifaces were -used. They have been called hand axes, but this probably gives the +used. They have been called �hand axes,� but this probably gives the wrong idea, for an ax, to us, is not a pointed tool. All of these early tools must have been used for a number of jobs--chopping, scraping, cutting, hitting, picking, and prying. Since the core-bifaces tend to @@ -1505,7 +1505,7 @@ a big block of stone. You had to break off the flake in such a way that it was broad and thin, and also had a good sharp cutting edge. Once you really got on to the trick of doing it, this was probably a simpler way to make a good cutting tool than preparing a biface. You have to know -how, though; Ive tried it and have mashed my fingers more than once. +how, though; I�ve tried it and have mashed my fingers more than once. The flake tools look as if they were meant mainly for chopping, scraping, and cutting jobs. When one made a flake tool, the idea seems @@ -1535,9 +1535,9 @@ tradition. It probably has its earliest roots in the pebble tool tradition of African type. There are several kinds of tools in this tradition, but all differ from the western core-bifaces and flakes. There are broad, heavy scrapers or cleavers, and tools with an -adze-like cutting edge. These last-named tools are called hand adzes, -just as the core-bifaces of the west have often been called hand -axes. The section of an adze cutting edge is ? shaped; the section of +adze-like cutting edge. These last-named tools are called �hand adzes,� +just as the core-bifaces of the west have often been called �hand +axes.� The section of an adze cutting edge is ? shaped; the section of an ax is < shaped. [Illustration: ANYATHIAN ADZE-LIKE TOOL] @@ -1581,17 +1581,17 @@ stratification.[3] Soan (India) Flake: - Typical Mousterian + �Typical Mousterian� Levalloiso-Mousterian Levalloisian Tayacian Clactonian (localized in England) Core-biface: - Some blended elements in Mousterian + Some blended elements in �Mousterian� Micoquian (= Acheulean 6 and 7) Acheulean - Abbevillian (once called Chellean) + Abbevillian (once called �Chellean�) Pebble tool: Oldowan @@ -1608,8 +1608,8 @@ out of glacial gravels the easiest thing to do first is to isolate individual types of tools into groups. First you put a bushel-basketful of tools on a table and begin matching up types. Then you give names to the groups of each type. The groups and the types are really matters of -the archeologists choice; in real life, they were probably less exact -than the archeologists lists of them. We now know pretty well in which +the archeologists� choice; in real life, they were probably less exact +than the archeologists� lists of them. We now know pretty well in which of the early traditions the various early groups belong. @@ -1635,9 +1635,9 @@ production must have been passed on from one generation to another. I could even guess that the notions of the ideal type of one or the other of these tools stood out in the minds of men of those times -somewhat like a symbol of perfect tool for good job. If this were -so--remember its only a wild guess of mine--then men were already -symbol users. Now lets go on a further step to the fact that the words +somewhat like a symbol of �perfect tool for good job.� If this were +so--remember it�s only a wild guess of mine--then men were already +symbol users. Now let�s go on a further step to the fact that the words men speak are simply sounds, each different sound being a symbol for a different meaning. If standardized tool-making suggests symbol-making, is it also possible that crude word-symbols were also being made? I @@ -1650,7 +1650,7 @@ of our second step is more suggestive, although we may not yet feel sure that many of the earlier pebble tools were man-made products. But with the step to standardization and the appearance of the traditions, I believe we must surely be dealing with the traces of culture-bearing -_men_. The conventional understandings which Professor Redfields +_men_. The �conventional understandings� which Professor Redfield�s definition of culture suggests are now evidenced for us in the persistent habits for the preparation of stone tools. Were we able to see the other things these prehistoric men must have made--in materials @@ -1666,19 +1666,19 @@ In the last chapter, I told you that many of the older archeologists and human paleontologists used to think that modern man was very old. The supposed ages of Piltdown and Galley Hill were given as evidence of the great age of anatomically modern man, and some interpretations -of the Swanscombe and Fontchevade fossils were taken to support +of the Swanscombe and Font�chevade fossils were taken to support this view. The conclusion was that there were two parallel lines or -phyla of men already present well back in the Pleistocene. The -first of these, the more primitive or paleoanthropic line, was +�phyla� of men already present well back in the Pleistocene. The +first of these, the more primitive or �paleoanthropic� line, was said to include Heidelberg, the proto-neanderthaloids and classic -Neanderthal. The more anatomically modern or neanthropic line was +Neanderthal. The more anatomically modern or �neanthropic� line was thought to consist of Piltdown and the others mentioned above. The Neanderthaler or paleoanthropic line was thought to have become extinct after the first phase of the last great glaciation. Of course, the modern or neanthropic line was believed to have persisted into the -present, as the basis for the worlds population today. But with +present, as the basis for the world�s population today. But with Piltdown liquidated, Galley Hill known to be very late, and Swanscombe -and Fontchevade otherwise interpreted, there is little left of the +and Font�chevade otherwise interpreted, there is little left of the so-called parallel phyla theory. While the theory was in vogue, however, and as long as the European @@ -1695,9 +1695,9 @@ where they had actually been dropped by the men who made and used them. The tools came, rather, from the secondary hodge-podge of the glacial gravels. I tried to give you a picture of the bulldozing action of glaciers (p. 40) and of the erosion and weathering that were -side-effects of a glacially conditioned climate on the earths surface. +side-effects of a glacially conditioned climate on the earth�s surface. As we said above, if one simply plucks tools out of the redeposited -gravels, his natural tendency is to type the tools by groups, and to +gravels, his natural tendency is to �type� the tools by groups, and to think that the groups stand for something _on their own_. In 1906, M. Victor Commont actually made a rare find of what seems @@ -1705,15 +1705,15 @@ to have been a kind of workshop site, on a terrace above the Somme river in France. Here, Commont realized, flake tools appeared clearly in direct association with core-biface tools. Few prehistorians paid attention to Commont or his site, however. It was easier to believe -that flake tools represented a distinct culture and that this -culture was that of the Neanderthaler or paleoanthropic line, and -that the core-bifaces stood for another culture which was that of the +that flake tools represented a distinct �culture� and that this +�culture� was that of the Neanderthaler or paleoanthropic line, and +that the core-bifaces stood for another �culture� which was that of the supposed early modern or neanthropic line. Of course, I am obviously skipping many details here. Some later sites with Neanderthal fossils do seem to have only flake tools, but other such sites have both types of tools. The flake tools which appeared _with_ the core-bifaces in the Swanscombe gravels were never made much of, although it -was embarrassing for the parallel phyla people that Fontchevade +was embarrassing for the parallel phyla people that Font�chevade ran heavily to flake tools. All in all, the parallel phyla theory flourished because it seemed so neat and easy to understand. @@ -1722,20 +1722,20 @@ TRADITIONS ARE TOOL-MAKING HABITS, NOT CULTURES In case you think I simply enjoy beating a dead horse, look in any standard book on prehistory written twenty (or even ten) years ago, or -in most encyclopedias. Youll find that each of the individual tool -types, of the West, at least, was supposed to represent a culture. -The cultures were believed to correspond to parallel lines of human +in most encyclopedias. You�ll find that each of the individual tool +types, of the West, at least, was supposed to represent a �culture.� +The �cultures� were believed to correspond to parallel lines of human evolution. In 1937, Mr. Harper Kelley strongly re-emphasized the importance -of Commonts workshop site and the presence of flake tools with -core-bifaces. Next followed Dr. Movius clear delineation of the +of Commont�s workshop site and the presence of flake tools with +core-bifaces. Next followed Dr. Movius� clear delineation of the chopper-chopping tool tradition of the Far East. This spoiled the nice symmetry of the flake-tool = paleoanthropic, core-biface = neanthropic equations. Then came increasing understanding of the importance of the pebble tools in Africa, and the location of several more workshop sites there, especially at Olorgesailie in Kenya. Finally came the -liquidation of Piltdown and the deflation of Galley Hills date. So it +liquidation of Piltdown and the deflation of Galley Hill�s date. So it is at last possible to picture an individual prehistoric man making a flake tool to do one job and a core-biface tool to do another. Commont showed us this picture in 1906, but few believed him. @@ -1751,7 +1751,7 @@ that of the cave on Mount Carmel in Palestine, where the blended pre-neanderthaloid, 70 per cent modern-type skulls were found. Here, in the same level with the skulls, were 9,784 flint tools. Of these, only three--doubtless strays--were core-bifaces; all the rest were flake -tools or flake chips. We noted above how the Fontchevade cave ran to +tools or flake chips. We noted above how the Font�chevade cave ran to flake tools. The only conclusion I would draw from this is that times and circumstances did exist in which prehistoric men needed only flake tools. So they only made flake tools for those particular times and @@ -1773,13 +1773,13 @@ piece of bone. From the gravels which yield the Clactonian flakes of England comes the fire-hardened point of a wooden spear. There are also the chance finds of the fossil human bones themselves, of which we spoke in the last chapter. Aside from the cave of Peking man, none -of the earliest tools have been found in caves. Open air or workshop +of the earliest tools have been found in caves. Open air or �workshop� sites which do not seem to have been disturbed later by some geological agency are very rare. The chart on page 65 shows graphically what the situation in west-central Europe seems to have been. It is not yet certain whether -there were pebble tools there or not. The Fontchevade cave comes +there were pebble tools there or not. The Font�chevade cave comes into the picture about 100,000 years ago or more. But for the earlier hundreds of thousands of years--below the red-dotted line on the chart--the tools we find come almost entirely from the haphazard @@ -1790,13 +1790,13 @@ kinds of all-purpose tools. Almost any one of them could be used for hacking, chopping, cutting, and scraping; so the men who used them must have been living in a rough and ready sort of way. They found or hunted their food wherever they could. In the anthropological jargon, they -were food-gatherers, pure and simple. +were �food-gatherers,� pure and simple. Because of the mixture in the gravels and in the materials they -carried, we cant be sure which animals these men hunted. Bones of +carried, we can�t be sure which animals these men hunted. Bones of the larger animals turn up in the gravels, but they could just as well belong to the animals who hunted the men, rather than the other -way about. We dont know. This is why camp sites like Commonts and +way about. We don�t know. This is why camp sites like Commont�s and Olorgesailie in Kenya are so important when we do find them. The animal bones at Olorgesailie belonged to various mammals of extremely large size. Probably they were taken in pit-traps, but there are a number of @@ -1809,18 +1809,18 @@ animal. Professor F. Clark Howell recently returned from excavating another important open air site at Isimila in Tanganyika. The site yielded the bones of many fossil animals and also thousands of core-bifaces, -flakes, and choppers. But Howells reconstruction of the food-getting -habits of the Isimila people certainly suggests that the word hunting -is too dignified for what they did; scavenging would be much nearer +flakes, and choppers. But Howell�s reconstruction of the food-getting +habits of the Isimila people certainly suggests that the word �hunting� +is too dignified for what they did; �scavenging� would be much nearer the mark. During a great part of this time the climate was warm and pleasant. The second interglacial period (the time between the second and third great alpine glaciations) lasted a long time, and during much of this time -the climate may have been even better than ours is now. We dont know +the climate may have been even better than ours is now. We don�t know that earlier prehistoric men in Europe or Africa lived in caves. They may not have needed to; much of the weather may have been so nice that -they lived in the open. Perhaps they didnt wear clothes, either. +they lived in the open. Perhaps they didn�t wear clothes, either. WHAT THE PEKING CAVE-FINDS TELL US @@ -1832,7 +1832,7 @@ were bones of dangerous animals, members of the wolf, bear, and cat families. Some of the cat bones belonged to beasts larger than tigers. There were also bones of other wild animals: buffalo, camel, deer, elephants, horses, sheep, and even ostriches. Seventy per cent of the -animals Peking man killed were fallow deer. Its much too cold and dry +animals Peking man killed were fallow deer. It�s much too cold and dry in north China for all these animals to live there today. So this list helps us know that the weather was reasonably warm, and that there was enough rain to grow grass for the grazing animals. The list also helps @@ -1840,7 +1840,7 @@ the paleontologists to date the find. Peking man also seems to have eaten plant food, for there are hackberry seeds in the debris of the cave. His tools were made of sandstone and -quartz and sometimes of a rather bad flint. As weve already seen, they +quartz and sometimes of a rather bad flint. As we�ve already seen, they belong in the chopper-tool tradition. It seems fairly clear that some of the edges were chipped by right-handed people. There are also many split pieces of heavy bone. Peking man probably split them so he could @@ -1850,10 +1850,10 @@ Many of these split bones were the bones of Peking men. Each one of the skulls had already had the base broken out of it. In no case were any of the bones resting together in their natural relation to one another. There is nothing like a burial; all of the bones are scattered. Now -its true that animals could have scattered bodies that were not cared +it�s true that animals could have scattered bodies that were not cared for or buried. But splitting bones lengthwise and carefully removing the base of a skull call for both the tools and the people to use them. -Its pretty clear who the people were. Peking man was a cannibal. +It�s pretty clear who the people were. Peking man was a cannibal. * * * * * @@ -1862,8 +1862,8 @@ prehistoric men. In those days life was rough. You evidently had to watch out not only for dangerous animals but also for your fellow men. You ate whatever you could catch or find growing. But you had sense enough to build fires, and you had already formed certain habits for -making the kinds of stone tools you needed. Thats about all we know. -But I think well have to admit that cultural beginnings had been made, +making the kinds of stone tools you needed. That�s about all we know. +But I think we�ll have to admit that cultural beginnings had been made, and that these early people were really _men_. @@ -1876,16 +1876,16 @@ MORE EVIDENCE of Culture While the dating is not yet sure, the material that we get from caves in Europe must go back to about 100,000 years ago; the time of the -classic Neanderthal group followed soon afterwards. We dont know why +classic Neanderthal group followed soon afterwards. We don�t know why there is no earlier material in the caves; apparently they were not used before the last interglacial phase (the period just before the last great glaciation). We know that men of the classic Neanderthal group were living in caves from about 75,000 to 45,000 years ago. New radioactive carbon dates even suggest that some of the traces of -culture well describe in this chapter may have lasted to about 35,000 +culture we�ll describe in this chapter may have lasted to about 35,000 years ago. Probably some of the pre-neanderthaloid types of men had also lived in caves. But we have so far found their bones in caves only -in Palestine and at Fontchevade. +in Palestine and at Font�chevade. THE CAVE LAYERS @@ -1893,7 +1893,7 @@ THE CAVE LAYERS In parts of France, some peasants still live in caves. In prehistoric time, many generations of people lived in them. As a result, many caves have deep layers of debris. The first people moved in and lived -on the rock floor. They threw on the floor whatever they didnt want, +on the rock floor. They threw on the floor whatever they didn�t want, and they tracked in mud; nobody bothered to clean house in those days. Their debris--junk and mud and garbage and what not--became packed into a layer. As time went on, and generations passed, the layer grew @@ -1910,20 +1910,20 @@ earliest to latest. This is the _stratification_ we talked about (p. [Illustration: SECTION OF SHELTER ON LOWER TERRACE, LE MOUSTIER] -While we may find a mix-up in caves, its not nearly as bad as the +While we may find a mix-up in caves, it�s not nearly as bad as the mixing up that was done by glaciers. The animal bones and shells, the fireplaces, the bones of men, and the tools the men made all belong -together, if they come from one layer. Thats the reason why the cave +together, if they come from one layer. That�s the reason why the cave of Peking man is so important. It is also the reason why the caves in Europe and the Near East are so important. We can get an idea of which things belong together and which lot came earliest and which latest. In most cases, prehistoric men lived only in the mouths of caves. -They didnt like the dark inner chambers as places to live in. They +They didn�t like the dark inner chambers as places to live in. They preferred rock-shelters, at the bases of overhanging cliffs, if there was enough overhang to give shelter. When the weather was good, they no -doubt lived in the open air as well. Ill go on using the term cave -since its more familiar, but remember that I really mean rock-shelter, +doubt lived in the open air as well. I�ll go on using the term �cave� +since it�s more familiar, but remember that I really mean rock-shelter, as a place in which people actually lived. The most important European cave sites are in Spain, France, and @@ -1933,29 +1933,29 @@ found when the out-of-the-way parts of Europe, Africa, and Asia are studied. -AN INDUSTRY DEFINED +AN �INDUSTRY� DEFINED We have already seen that the earliest European cave materials are -those from the cave of Fontchevade. Movius feels certain that the +those from the cave of Font�chevade. Movius feels certain that the lowest materials here date back well into the third interglacial stage, -that which lay between the Riss (next to the last) and the Wrm I +that which lay between the Riss (next to the last) and the W�rm I (first stage of the last) alpine glaciations. This material consists of an _industry_ of stone tools, apparently all made in the flake -tradition. This is the first time we have used the word industry. +tradition. This is the first time we have used the word �industry.� It is useful to call all of the different tools found together in one layer and made of _one kind of material_ an industry; that is, the tools must be found together as men left them. Tools taken from the glacial gravels (or from windswept desert surfaces or river gravels -or any geological deposit) are not together in this sense. We might -say the latter have only geological, not archeological context. +or any geological deposit) are not �together� in this sense. We might +say the latter have only �geological,� not �archeological� context. Archeological context means finding things just as men left them. We -can tell what tools go together in an industrial sense only if we +can tell what tools go together in an �industrial� sense only if we have archeological context. -Up to now, the only things we could have called industries were the +Up to now, the only things we could have called �industries� were the worked stone industry and perhaps the worked (?) bone industry of the Peking cave. We could add some of the very clear cases of open air -sites, like Olorgesailie. We couldnt use the term for the stone tools +sites, like Olorgesailie. We couldn�t use the term for the stone tools from the glacial gravels, because we do not know which tools belonged together. But when the cave materials begin to appear in Europe, we can begin to speak of industries. Most of the European caves of this time @@ -1964,16 +1964,16 @@ contain industries of flint tools alone. THE EARLIEST EUROPEAN CAVE LAYERS -Weve just mentioned the industry from what is said to be the oldest +We�ve just mentioned the industry from what is said to be the oldest inhabited cave in Europe; that is, the industry from the deepest layer -of the site at Fontchevade. Apparently it doesnt amount to much. The +of the site at Font�chevade. Apparently it doesn�t amount to much. The tools are made of stone, in the flake tradition, and are very poorly worked. This industry is called _Tayacian_. Its type tool seems to be a smallish flake tool, but there are also larger flakes which seem to have been fashioned for hacking. In fact, the type tool seems to be simply a smaller edition of the Clactonian tool (pictured on p. 45). -None of the Fontchevade tools are really good. There are scrapers, +None of the Font�chevade tools are really good. There are scrapers, and more or less pointed tools, and tools that may have been used for hacking and chopping. Many of the tools from the earlier glacial gravels are better made than those of this first industry we see in @@ -2005,7 +2005,7 @@ core-biface and the flake traditions. The core-biface tools usually make up less than half of all the tools in the industry. However, the name of the biface type of tool is generally given to the whole industry. It is called the _Acheulean_, actually a late form of it, as -Acheulean is also used for earlier core-biface tools taken from the +�Acheulean� is also used for earlier core-biface tools taken from the glacial gravels. In western Europe, the name used is _Upper Acheulean_ or _Micoquian_. The same terms have been borrowed to name layers E and F in the Tabun cave, on Mount Carmel in Palestine. @@ -2029,7 +2029,7 @@ those used for at least one of the flake industries we shall mention presently. There is very little else in these early cave layers. We do not have -a proper industry of bone tools. There are traces of fire, and of +a proper �industry� of bone tools. There are traces of fire, and of animal bones, and a few shells. In Palestine, there are many more bones of deer than of gazelle in these layers; the deer lives in a wetter climate than does the gazelle. In the European cave layers, the @@ -2043,18 +2043,18 @@ bones of fossil men definitely in place with this industry. FLAKE INDUSTRIES FROM THE CAVES Two more stone industries--the _Levalloisian_ and the -_Mousterian_--turn up at approximately the same time in the European +�_Mousterian_�--turn up at approximately the same time in the European cave layers. Their tools seem to be mainly in the flake tradition, but according to some of the authorities their preparation also shows some combination with the habits by which the core-biface tools were prepared. -Now notice that I dont tell you the Levalloisian and the Mousterian +Now notice that I don�t tell you the Levalloisian and the �Mousterian� layers are both above the late Acheulean layers. Look at the cave -section (p. 57) and youll find that some Mousterian of Acheulean -tradition appears above some typical Mousterian. This means that +section (p. 57) and you�ll find that some �Mousterian of Acheulean +tradition� appears above some �typical Mousterian.� This means that there may be some kinds of Acheulean industries that are later than -some kinds of Mousterian. The same is true of the Levalloisian. +some kinds of �Mousterian.� The same is true of the Levalloisian. There were now several different kinds of habits that men used in making stone tools. These habits were based on either one or the other @@ -2072,7 +2072,7 @@ were no patent laws in those days. The extremely complicated interrelationships of the different habits used by the tool-makers of this range of time are at last being -systematically studied. M. Franois Bordes has developed a statistical +systematically studied. M. Fran�ois Bordes has developed a statistical method of great importance for understanding these tool preparation habits. @@ -2081,22 +2081,22 @@ THE LEVALLOISIAN AND MOUSTERIAN The easiest Levalloisian tool to spot is a big flake tool. The trick in making it was to fashion carefully a big chunk of stone (called -the Levalloisian tortoise core, because it resembles the shape of +the Levalloisian �tortoise core,� because it resembles the shape of a turtle-shell) and then to whack this in such a way that a large flake flew off. This large thin flake, with sharp cutting edges, is the finished Levalloisian tool. There were various other tools in a Levalloisian industry, but this is the characteristic _Levalloisian_ tool. -There are several typical Mousterian stone tools. Different from -the tools of the Levalloisian type, these were made from disc-like -cores. There are medium-sized flake side scrapers. There are also -some small pointed tools and some small hand axes. The last of these +There are several �typical Mousterian� stone tools. Different from +the tools of the Levalloisian type, these were made from �disc-like +cores.� There are medium-sized flake �side scrapers.� There are also +some small pointed tools and some small �hand axes.� The last of these tool types is often a flake worked on both of the flat sides (that is, bifacially). There are also pieces of flint worked into the form of crude balls. The pointed tools may have been fixed on shafts to make short jabbing spears; the round flint balls may have been used as -bolas. Actually, we dont _know_ what either tool was used for. The +bolas. Actually, we don�t _know_ what either tool was used for. The points and side scrapers are illustrated (pp. 64 and 66). [Illustration: LEVALLOIS FLAKE] @@ -2108,9 +2108,9 @@ Nowadays the archeologists are less and less sure of the importance of any one specific tool type and name. Twenty years ago, they used to speak simply of Acheulean or Levalloisian or Mousterian tools. Now, more and more, _all_ of the tools from some one layer in a -cave are called an industry, which is given a mixed name. Thus we -have Levalloiso-Mousterian, and Acheuleo-Levalloisian, and even -Acheuleo-Mousterian (or Mousterian of Acheulean tradition). Bordes +cave are called an �industry,� which is given a mixed name. Thus we +have �Levalloiso-Mousterian,� and �Acheuleo-Levalloisian,� and even +�Acheuleo-Mousterian� (or �Mousterian of Acheulean tradition�). Bordes� systematic work is beginning to clear up some of our confusion. The time of these late Acheuleo-Levalloiso-Mousterioid industries @@ -2120,16 +2120,16 @@ phase of the last great glaciation. It was also the time that the classic group of Neanderthal men was living in Europe. A number of the Neanderthal fossil finds come from these cave layers. Before the different habits of tool preparation were understood it used to be -popular to say Neanderthal man was Mousterian man. I think this is -wrong. What used to be called Mousterian is now known to be a variety +popular to say Neanderthal man was �Mousterian man.� I think this is +wrong. What used to be called �Mousterian� is now known to be a variety of industries with tools of both core-biface and flake habits, and -so mixed that the word Mousterian used alone really doesnt mean +so mixed that the word �Mousterian� used alone really doesn�t mean anything. The Neanderthalers doubtless understood the tool preparation habits by means of which Acheulean, Levalloisian and Mousterian type tools were produced. We also have the more modern-like Mount Carmel people, found in a cave layer of Palestine with tools almost entirely -in the flake tradition, called Levalloiso-Mousterian, and the -Fontchevade-Tayacian (p. 59). +in the flake tradition, called �Levalloiso-Mousterian,� and the +Font�chevade-Tayacian (p. 59). [Illustration: MOUSTERIAN POINT] @@ -2165,7 +2165,7 @@ which seem to have served as anvils or chopping blocks, are fairly common. Bits of mineral, used as coloring matter, have also been found. We -dont know what the color was used for. +don�t know what the color was used for. [Illustration: MOUSTERIAN SIDE SCRAPER] @@ -2230,7 +2230,7 @@ might suggest some notion of hoarding up the spirits or the strength of bears killed in the hunt. Probably the people lived in small groups, as hunting and food-gathering seldom provide enough food for large groups of people. These groups probably had some kind of leader or -chief. Very likely the rude beginnings of rules for community life +�chief.� Very likely the rude beginnings of rules for community life and politics, and even law, were being made. But what these were, we do not know. We can only guess about such things, as we can only guess about many others; for example, how the idea of a family must have been @@ -2246,8 +2246,8 @@ small. The mixtures and blendings of the habits used in making stone tools must mean that there were also mixtures and blends in many of the other ideas and beliefs of these small groups. And what this probably means is that there was no one _culture_ of the time. It is -certainly unlikely that there were simply three cultures, Acheulean, -Levalloisian, and Mousterian, as has been thought in the past. +certainly unlikely that there were simply three cultures, �Acheulean,� +�Levalloisian,� and �Mousterian,� as has been thought in the past. Rather there must have been a great variety of loosely related cultures at about the same stage of advancement. We could say, too, that here we really begin to see, for the first time, that remarkable ability @@ -2272,7 +2272,7 @@ related habits for the making of tools. But the men who made them must have looked much like the men of the West. Their tools were different, but just as useful. -As to what the men of the West looked like, Ive already hinted at all +As to what the men of the West looked like, I�ve already hinted at all we know so far (pp. 29 ff.). The Neanderthalers were present at the time. Some more modern-like men must have been about, too, since fossils of them have turned up at Mount Carmel in Palestine, and at @@ -2306,7 +2306,7 @@ A NEW TRADITION APPEARS Something new was probably beginning to happen in the European-Mediterranean area about 40,000 years ago, though all the rest of the Old World seems to have been going on as it had been. I -cant be sure of this because the information we are using as a basis +can�t be sure of this because the information we are using as a basis for dates is very inaccurate for the areas outside of Europe and the Mediterranean. @@ -2325,7 +2325,7 @@ drawing shows. It has sharp cutting edges, and makes a very useful knife. The real trick is to be able to make one. It is almost impossible to make a blade out of any stone but flint or a natural volcanic glass called obsidian. And even if you have flint or obsidian, -you first have to work up a special cone-shaped blade-core, from +you first have to work up a special cone-shaped �blade-core,� from which to whack off blades. [Illustration: PLAIN BLADE] @@ -2351,8 +2351,8 @@ found in equally early cave levels in Syria; their popularity there seems to fluctuate a bit. Some more or less parallel-sided flakes are known in the Levalloisian industry in France, but they are probably no earlier than Tabun E. The Tabun blades are part of a local late -Acheulean industry, which is characterized by core-biface hand -axes, but which has many flake tools as well. Professor F. E. +�Acheulean� industry, which is characterized by core-biface �hand +axes,� but which has many flake tools as well. Professor F. E. Zeuner believes that this industry may be more than 120,000 years old; actually its date has not yet been fixed, but it is very old--older than the fossil finds of modern-like men in the same caves. @@ -2371,7 +2371,7 @@ We are not sure just where the earliest _persisting_ habits for the production of blade tools developed. Impressed by the very early momentary appearance of blades at Tabun on Mount Carmel, Professor Dorothy A. Garrod first favored the Near East as a center of origin. -She spoke of some as yet unidentified Asiatic centre, which she +She spoke of �some as yet unidentified Asiatic centre,� which she thought might be in the highlands of Iran or just beyond. But more recent work has been done in this area, especially by Professor Coon, and the blade tools do not seem to have an early appearance there. When @@ -2395,21 +2395,21 @@ core (and the striking of the Levalloisian flake from it) might have followed through to the conical core and punch technique for the production of blades. Professor Garrod is much impressed with the speed of change during the later phases of the last glaciation, and its -probable consequences. She speaks of the greater number of industries +probable consequences. She speaks of �the greater number of industries having enough individual character to be classified as distinct ... -since evolution now starts to outstrip diffusion. Her evolution here +since evolution now starts to outstrip diffusion.� Her �evolution� here is of course an industrial evolution rather than a biological one. Certainly the people of Europe had begun to make blade tools during the warm spell after the first phase of the last glaciation. By about 40,000 years ago blades were well established. The bones of the blade -tool makers weve found so far indicate that anatomically modern men +tool makers we�ve found so far indicate that anatomically modern men had now certainly appeared. Unfortunately, only a few fossil men have so far been found from the very beginning of the blade tool range in Europe (or elsewhere). What I certainly shall _not_ tell you is that conquering bands of fine, strong, anatomically modern men, armed with superior blade tools, came sweeping out of the East to exterminate the -lowly Neanderthalers. Even if we dont know exactly what happened, Id -lay a good bet it wasnt that simple. +lowly Neanderthalers. Even if we don�t know exactly what happened, I�d +lay a good bet it wasn�t that simple. We do know a good deal about different blade industries in Europe. Almost all of them come from cave layers. There is a great deal of @@ -2418,7 +2418,7 @@ this complication; in fact, it doubtless simplifies it too much. But it may suggest all the complication of industries which is going on at this time. You will note that the upper portion of my much simpler chart (p. 65) covers the same material (in the section -marked Various Blade-Tool Industries). That chart is certainly too +marked �Various Blade-Tool Industries�). That chart is certainly too simplified. You will realize that all this complication comes not only from @@ -2429,7 +2429,7 @@ a good deal of climatic change at this time. The plants and animals that men used for food were changing, too. The great variety of tools and industries we now find reflect these changes and the ability of men to keep up with the times. Now, for example, is the first time we are -sure that there are tools to _make_ other tools. They also show mens +sure that there are tools to _make_ other tools. They also show men�s increasing ability to adapt themselves. @@ -2437,15 +2437,15 @@ SPECIAL TYPES OF BLADE TOOLS The most useful tools that appear at this time were made from blades. - 1. The backed blade. This is a knife made of a flint blade, with - one edge purposely blunted, probably to save the users fingers + 1. The �backed� blade. This is a knife made of a flint blade, with + one edge purposely blunted, probably to save the user�s fingers from being cut. There are several shapes of backed blades (p. 73). [Illustration: TWO BURINS] - 2. The _burin_ or graver. The burin was the original chisel. Its - cutting edge is _transverse_, like a chisels. Some burins are + 2. The _burin_ or �graver.� The burin was the original chisel. Its + cutting edge is _transverse_, like a chisel�s. Some burins are made like a screw-driver, save that burins are sharp. Others have edges more like the blade of a chisel or a push plane, with only one bevel. Burins were probably used to make slots in wood @@ -2456,29 +2456,29 @@ The most useful tools that appear at this time were made from blades. [Illustration: TANGED POINT] - 3. The tanged point. These stone points were used to tip arrows or + 3. The �tanged� point. These stone points were used to tip arrows or light spears. They were made from blades, and they had a long tang at the bottom where they were fixed to the shaft. At the place where the tang met the main body of the stone point, there was - a marked shoulder, the beginnings of a barb. Such points had + a marked �shoulder,� the beginnings of a barb. Such points had either one or two shoulders. [Illustration: NOTCHED BLADE] - 4. The notched or strangulated blade. Along with the points for + 4. The �notched� or �strangulated� blade. Along with the points for arrows or light spears must go a tool to prepare the arrow or - spear shaft. Today, such a tool would be called a draw-knife or - a spoke-shave, and this is what the notched blades probably are. + spear shaft. Today, such a tool would be called a �draw-knife� or + a �spoke-shave,� and this is what the notched blades probably are. Our spoke-shaves have sharp straight cutting blades and really - shave. Notched blades of flint probably scraped rather than cut. + �shave.� Notched blades of flint probably scraped rather than cut. - 5. The awl, drill, or borer. These blade tools are worked out + 5. The �awl,� �drill,� or �borer.� These blade tools are worked out to a spike-like point. They must have been used for making holes in wood, bone, shell, skin, or other things. [Illustration: DRILL OR AWL] - 6. The end-scraper on a blade is a tool with one or both ends + 6. The �end-scraper on a blade� is a tool with one or both ends worked so as to give a good scraping edge. It could have been used to hollow out wood or bone, scrape hides, remove bark from trees, and a number of other things (p. 78). @@ -2489,11 +2489,11 @@ usually made of blades, but the best examples are so carefully worked on both sides (bifacially) that it is impossible to see the original blade. This tool is - 7. The laurel leaf point. Some of these tools were long and + 7. The �laurel leaf� point. Some of these tools were long and dagger-like, and must have been used as knives or daggers. Others - were small, called willow leaf, and must have been mounted on + were small, called �willow leaf,� and must have been mounted on spear or arrow shafts. Another typical Solutrean tool is the - shouldered point. Both the laurel leaf and shouldered point + �shouldered� point. Both the �laurel leaf� and �shouldered� point types are illustrated (see above and p. 79). [Illustration: END-SCRAPER ON A BLADE] @@ -2507,17 +2507,17 @@ second is a core tool. [Illustration: SHOULDERED POINT] - 8. The keel-shaped round scraper is usually small and quite round, + 8. The �keel-shaped round scraper� is usually small and quite round, and has had chips removed up to a peak in the center. It is called - keel-shaped because it is supposed to look (when upside down) + �keel-shaped� because it is supposed to look (when upside down) like a section through a boat. Actually, it looks more like a tent or an umbrella. Its outer edges are sharp all the way around, and it was probably a general purpose scraping tool (see illustration, p. 81). - 9. The keel-shaped nosed scraper is a much larger and heavier tool + 9. The �keel-shaped nosed scraper� is a much larger and heavier tool than the round scraper. It was made on a core with a flat bottom, - and has one nicely worked end or nose. Such tools are usually + and has one nicely worked end or �nose.� Such tools are usually large enough to be easily grasped, and probably were used like push planes (see illustration, p. 81). @@ -2530,7 +2530,7 @@ the most easily recognized blade tools, although they show differences in detail at different times. There are also many other kinds. Not all of these tools appear in any one industry at one time. Thus the different industries shown in the chart (p. 72) each have only some -of the blade tools weve just listed, and also a few flake tools. Some +of the blade tools we�ve just listed, and also a few flake tools. Some industries even have a few core tools. The particular types of blade tools appearing in one cave layer or another, and the frequency of appearance of the different types, tell which industry we have in each @@ -2545,15 +2545,15 @@ to appear. There are knives, pins, needles with eyes, and little double-pointed straight bars of bone that were probably fish-hooks. The fish-line would have been fastened in the center of the bar; when the fish swallowed the bait, the bar would have caught cross-wise in the -fishs mouth. +fish�s mouth. One quite special kind of bone tool is a long flat point for a light spear. It has a deep notch cut up into the breadth of its base, and is -called a split-based bone point (p. 82). We know examples of bone +called a �split-based bone point� (p. 82). We know examples of bone beads from these times, and of bone handles for flint tools. Pierced teeth of some animals were worn as beads or pendants, but I am not sure -that elks teeth were worn this early. There are even spool-shaped -buttons or toggles. +that elks� teeth were worn this early. There are even spool-shaped +�buttons� or toggles. [Illustration: SPLIT-BASED BONE POINT] @@ -2595,12 +2595,12 @@ almost to have served as sketch blocks. The surfaces of these various objects may show animals, or rather abstract floral designs, or geometric designs. -[Illustration: VENUS FIGURINE FROM WILLENDORF] +[Illustration: �VENUS� FIGURINE FROM WILLENDORF] Some of the movable art is not done on tools. The most remarkable examples of this class are little figures of women. These women seem to be pregnant, and their most female characteristics are much emphasized. -It is thought that these Venus or Mother-goddess figurines may be +It is thought that these �Venus� or �Mother-goddess� figurines may be meant to show the great forces of nature--fertility and the birth of life. @@ -2616,21 +2616,21 @@ are different styles in the cave art. The really great cave art is pretty well restricted to southern France and Cantabrian (northwestern) Spain. -There are several interesting things about the Franco-Cantabrian cave +There are several interesting things about the �Franco-Cantabrian� cave art. It was done deep down in the darkest and most dangerous parts of the caves, although the men lived only in the openings of caves. If you think what they must have had for lights--crude lamps of hollowed stone have been found, which must have burned some kind of oil or grease, with a matted hair or fiber wick--and of the animals that may have -lurked in the caves, youll understand the part about danger. Then, -too, were sure the pictures these people painted were not simply to be +lurked in the caves, you�ll understand the part about danger. Then, +too, we�re sure the pictures these people painted were not simply to be looked at and admired, for they painted one picture right over other pictures which had been done earlier. Clearly, it was the _act_ of _painting_ that counted. The painter had to go way down into the most mysterious depths of the earth and create an animal in paint. Possibly he believed that by doing this he gained some sort of magic power over the same kind of animal when he hunted it in the open air. It certainly -doesnt look as if he cared very much about the picture he painted--as +doesn�t look as if he cared very much about the picture he painted--as a finished product to be admired--for he or somebody else soon went down and painted another animal right over the one he had done. @@ -2683,10 +2683,10 @@ it. Their art is another example of the direction the human mind was taking. And when I say human, I mean it in the fullest sense, for this is the time in which fully modern man has appeared. On page 34, we -spoke of the Cro-Magnon group and of the Combe Capelle-Brnn group of -Caucasoids and of the Grimaldi Negroids, who are no longer believed +spoke of the Cro-Magnon group and of the Combe Capelle-Br�nn group of +Caucasoids and of the Grimaldi �Negroids,� who are no longer believed to be Negroid. I doubt that any one of these groups produced most of -the achievements of the times. Its not yet absolutely sure which +the achievements of the times. It�s not yet absolutely sure which particular group produced the great cave art. The artists were almost certainly a blend of several (no doubt already mixed) groups. The pair of Grimaldians were buried in a grave with a sprinkling of red ochre, @@ -2705,9 +2705,9 @@ also found about the shore of the Mediterranean basin, and it moved into northern Europe as the last glaciation pulled northward. People began making blade tools of very small size. They learned how to chip very slender and tiny blades from a prepared core. Then they made these -little blades into tiny triangles, half-moons (lunates), trapezoids, +little blades into tiny triangles, half-moons (�lunates�), trapezoids, and several other geometric forms. These little tools are called -microliths. They are so small that most of them must have been fixed +�microliths.� They are so small that most of them must have been fixed in handles or shafts. [Illustration: MICROLITHS @@ -2726,7 +2726,7 @@ One corner of each little triangle stuck out, and the whole thing made a fine barbed harpoon. In historic times in Egypt, geometric trapezoidal microliths were still in use as arrowheads. They were fastened--broad end out--on the end of an arrow shaft. It seems queer -to give an arrow a point shaped like a T. Actually, the little points +to give an arrow a point shaped like a �T.� Actually, the little points were very sharp, and must have pierced the hides of animals very easily. We also think that the broader cutting edge of the point may have caused more bleeding than a pointed arrowhead would. In hunting @@ -2739,7 +2739,7 @@ is some evidence that they appear early in the Near East. Their use was very common in northwest Africa but this came later. The microlith makers who reached south Russia and central Europe possibly moved up out of the Near East. Or it may have been the other way around; we -simply dont yet know. +simply don�t yet know. Remember that the microliths we are talking about here were made from carefully prepared little blades, and are often geometric in outline. @@ -2749,7 +2749,7 @@ even some flake scrapers, in most microlithic industries. I emphasize this bladelet and the geometric character of the microlithic industries of the western Old World, since there has sometimes been confusion in the matter. Sometimes small flake chips, utilized as minute pointed -tools, have been called microliths. They may be _microlithic_ in size +tools, have been called �microliths.� They may be _microlithic_ in size in terms of the general meaning of the word, but they do not seem to belong to the sub-tradition of the blade tool preparation habits which we have been discussing here. @@ -2763,10 +2763,10 @@ in western Asia too, and early, although Professor Garrod is no longer sure that the whole tradition originated in the Near East. If you look again at my chart (p. 72) you will note that in western Asia I list some of the names of the western European industries, but with the -qualification -like (for example, Gravettian-like). The western +qualification �-like� (for example, �Gravettian-like�). The western Asiatic blade-tool industries do vaguely recall some aspects of those of western Europe, but we would probably be better off if we used -completely local names for them. The Emiran of my chart is such an +completely local names for them. The �Emiran� of my chart is such an example; its industry includes a long spike-like blade point which has no western European counterpart. @@ -2774,13 +2774,13 @@ When we last spoke of Africa (p. 66), I told you that stone tools there were continuing in the Levalloisian flake tradition, and were becoming smaller. At some time during this process, two new tool types appeared in northern Africa: one was the Aterian point with -a tang (p. 67), and the other was a sort of laurel leaf point, -called the Sbaikian. These two tool types were both produced from +a tang (p. 67), and the other was a sort of �laurel leaf� point, +called the �Sbaikian.� These two tool types were both produced from flakes. The Sbaikian points, especially, are roughly similar to some of the Solutrean points of Europe. It has been suggested that both the Sbaikian and Aterian points may be seen on their way to France through their appearance in the Spanish cave deposits of Parpallo, but there is -also a rival pre-Solutrean in central Europe. We still do not know +also a rival �pre-Solutrean� in central Europe. We still do not know whether there was any contact between the makers of these north African tools and the Solutrean tool-makers. What does seem clear is that the blade-tool tradition itself arrived late in northern Africa. @@ -2788,11 +2788,11 @@ blade-tool tradition itself arrived late in northern Africa. NETHER AFRICA -Blade tools and laurel leaf points and some other probably late +Blade tools and �laurel leaf� points and some other probably late stone tool types also appear in central and southern Africa. There are geometric microliths on bladelets and even some coarse pottery in east Africa. There is as yet no good way of telling just where these -items belong in time; in broad geological terms they are late. +items belong in time; in broad geological terms they are �late.� Some people have guessed that they are as early as similar European and Near Eastern examples, but I doubt it. The makers of small-sized Levalloisian flake tools occupied much of Africa until very late in @@ -2823,18 +2823,18 @@ ancestors of the American Indians came from Asia. The stone-tool traditions of Europe, Africa, the Near and Middle East, and central Siberia, did _not_ move into the New World. With only a very few special or late exceptions, there are _no_ core-bifaces, -flakes, or blade tools of the Old World. Such things just havent been +flakes, or blade tools of the Old World. Such things just haven�t been found here. -This is why I say its a shame we dont know more of the end of the +This is why I say it�s a shame we don�t know more of the end of the chopper-tool tradition in the Far East. According to Weidenreich, the Mongoloids were in the Far East long before the end of the last glaciation. If the genetics of the blood group types do demand a non-Mongoloid ancestry for the American Indians, who else may have been in the Far East 25,000 years ago? We know a little about the habits for making stone tools which these first people brought with them, -and these habits dont conform with those of the western Old World. -Wed better keep our eyes open for whatever happened to the end of +and these habits don�t conform with those of the western Old World. +We�d better keep our eyes open for whatever happened to the end of the chopper-tool tradition in northern China; already there are hints that it lasted late there. Also we should watch future excavations in eastern Siberia. Perhaps we shall find the chopper-tool tradition @@ -2846,13 +2846,13 @@ THE NEW ERA Perhaps it comes in part from the way I read the evidence and perhaps in part it is only intuition, but I feel that the materials of this chapter suggest a new era in the ways of life. Before about 40,000 -years ago, people simply gathered their food, wandering over large +years ago, people simply �gathered� their food, wandering over large areas to scavenge or to hunt in a simple sort of way. But here we -have seen them settling-in more, perhaps restricting themselves in +have seen them �settling-in� more, perhaps restricting themselves in their wanderings and adapting themselves to a given locality in more intensive ways. This intensification might be suggested by the word -collecting. The ways of life we described in the earlier chapters -were food-gathering ways, but now an era of food-collecting has +�collecting.� The ways of life we described in the earlier chapters +were �food-gathering� ways, but now an era of �food-collecting� has begun. We shall see further intensifications of it in the next chapter. @@ -2883,8 +2883,8 @@ The last great glaciation of the Ice Age was a two-part affair, with a sub-phase at the end of the second part. In Europe the last sub-phase of this glaciation commenced somewhere around 15,000 years ago. Then the glaciers began to melt back, for the last time. Remember that -Professor Antevs (p. 19) isnt sure the Ice Age is over yet! This -melting sometimes went by fits and starts, and the weather wasnt +Professor Antevs (p. 19) isn�t sure the Ice Age is over yet! This +melting sometimes went by fits and starts, and the weather wasn�t always changing for the better; but there was at least one time when European weather was even better than it is now. @@ -2927,16 +2927,16 @@ Sweden. Much of this north European material comes from bogs and swamps where it had become water-logged and has kept very well. Thus we have much more complete _assemblages_[4] than for any time earlier. - [4] Assemblage is a useful word when there are different kinds of + [4] �Assemblage� is a useful word when there are different kinds of archeological materials belonging together, from one area and of - one time. An assemblage is made up of a number of industries + one time. An assemblage is made up of a number of �industries� (that is, all the tools in chipped stone, all the tools in bone, all the tools in wood, the traces of houses, etc.) and everything else that manages to survive, such as the art, the burials, the bones of the animals used as food, and the traces of plant foods; in fact, everything that has been left to us and can be used to help reconstruct the lives of the people to - whom it once belonged. Our own present-day assemblage would be + whom it once belonged. Our own present-day �assemblage� would be the sum total of all the objects in our mail-order catalogues, department stores and supply houses of every sort, our churches, our art galleries and other buildings, together with our roads, @@ -2976,7 +2976,7 @@ found. It seems likely that the Maglemosian bog finds are remains of summer camps, and that in winter the people moved to higher and drier regions. -Childe calls them the Forest folk; they probably lived much the +Childe calls them the �Forest folk�; they probably lived much the same sort of life as did our pre-agricultural Indians of the north central states. They hunted small game or deer; they did a great deal of fishing; they collected what plant food they could find. In fact, @@ -3010,7 +3010,7 @@ South of the north European belt the hunting-food-collecting peoples were living on as best they could during this time. One interesting group, which seems to have kept to the regions of sandy soil and scrub forest, made great quantities of geometric microliths. These are the -materials called _Tardenoisian_. The materials of the Forest folk of +materials called _Tardenoisian_. The materials of the �Forest folk� of France and central Europe generally are called _Azilian_; Dr. Movius believes the term might best be restricted to the area south of the Loire River. @@ -3032,24 +3032,24 @@ to it than this. Professor Mathiassen of Copenhagen, who knows the archeological remains of this time very well, poses a question. He speaks of the material -as being neither rich nor progressive, in fact rather stagnant, but -he goes on to add that the people had a certain receptiveness and +as being neither rich nor progressive, in fact �rather stagnant,� but +he goes on to add that the people had a certain �receptiveness� and were able to adapt themselves quickly when the next change did come. -My own understanding of the situation is that the Forest folk made +My own understanding of the situation is that the �Forest folk� made nothing as spectacular as had the producers of the earlier Magdalenian assemblage and the Franco-Cantabrian art. On the other hand, they _seem_ to have been making many more different kinds of tools for many more different kinds of tasks than had their Ice Age forerunners. I -emphasize seem because the preservation in the Maglemosian bogs +emphasize �seem� because the preservation in the Maglemosian bogs is very complete; certainly we cannot list anywhere near as many different things for earlier times as we did for the Maglemosians (p. 94). I believe this experimentation with all kinds of new tools and gadgets, this intensification of adaptiveness (p. 91), this -receptiveness, even if it is still only pointed toward hunting, +�receptiveness,� even if it is still only pointed toward hunting, fishing, and food-collecting, is an important thing. Remember that the only marker we have handy for the _beginning_ of -this tendency toward receptiveness and experimentation is the +this tendency toward �receptiveness� and experimentation is the little microlithic blade tools of various geometric forms. These, we saw, began before the last ice had melted away, and they lasted on in use for a very long time. I wish there were a better marker than @@ -3063,7 +3063,7 @@ CHANGES IN OTHER AREAS? All this last section was about Europe. How about the rest of the world when the last glaciers were melting away? -We simply dont know much about this particular time in other parts +We simply don�t know much about this particular time in other parts of the world except in Europe, the Mediterranean basin and the Middle East. People were certainly continuing to move into the New World by way of Siberia and the Bering Strait about this time. But for the @@ -3075,10 +3075,10 @@ clear information. REAL CHANGE AND PRELUDE IN THE NEAR EAST The appearance of the microliths and the developments made by the -Forest folk of northwestern Europe also mark an end. They show us +�Forest folk� of northwestern Europe also mark an end. They show us the terminal phase of the old food-collecting way of life. It grows increasingly clear that at about the same time that the Maglemosian and -other Forest folk were adapting themselves to hunting, fishing, and +other �Forest folk� were adapting themselves to hunting, fishing, and collecting in new ways to fit the post-glacial environment, something completely new was being made ready in western Asia. @@ -3098,7 +3098,7 @@ simply gathering or collecting it. When their food-production became reasonably effective, people could and did settle down in village-farming communities. With the appearance of the little farming villages, a new way of life was actually under way. Professor Childe -has good reason to speak of the food-producing revolution, for it was +has good reason to speak of the �food-producing revolution,� for it was indeed a revolution. @@ -3117,8 +3117,8 @@ before the _how_ and _why_ answers begin to appear. Anthropologically trained archeologists are fascinated with the cultures of men in times of great change. About ten or twelve thousand years ago, the general level of culture in many parts of the world seems to have been ready -for change. In northwestern Europe, we saw that cultures changed -just enough so that they would not have to change. We linked this to +for change. In northwestern Europe, we saw that cultures �changed +just enough so that they would not have to change.� We linked this to environmental changes with the coming of post-glacial times. In western Asia, we archeologists can prove that the food-producing @@ -3155,7 +3155,7 @@ living as the Maglemosians did? These are the questions we still have to face. -CULTURAL RECEPTIVENESS AND PROMISING ENVIRONMENTS +CULTURAL �RECEPTIVENESS� AND PROMISING ENVIRONMENTS Until the archeologists and the natural scientists--botanists, geologists, zoologists, and general ecologists--have spent many more @@ -3163,15 +3163,15 @@ years on the problem, we shall not have full _how_ and _why_ answers. I do think, however, that we are beginning to understand what to look for. We shall have to learn much more of what makes the cultures of men -receptive and experimental. Did change in the environment alone -force it? Was it simply a case of Professor Toynbees challenge and -response? I cannot believe the answer is quite that simple. Were it -so simple, we should want to know why the change hadnt come earlier, +�receptive� and experimental. Did change in the environment alone +force it? Was it simply a case of Professor Toynbee�s �challenge and +response?� I cannot believe the answer is quite that simple. Were it +so simple, we should want to know why the change hadn�t come earlier, along with earlier environmental changes. We shall not know the answer, however, until we have excavated the traces of many more cultures of the time in question. We shall doubtless also have to learn more about, and think imaginatively about, the simpler cultures still left today. -The mechanics of culture in general will be bound to interest us. +The �mechanics� of culture in general will be bound to interest us. It will also be necessary to learn much more of the environments of 10,000 to 12,000 years ago. In which regions of the world were the @@ -3228,7 +3228,7 @@ THE OLD THEORY TOO SIMPLE FOR THE FACTS This theory was set up before we really knew anything in detail about the later prehistory of the Near and Middle East. We now know that -the facts which have been found dont fit the old theory at all well. +the facts which have been found don�t fit the old theory at all well. Also, I have yet to find an American meteorologist who feels that we know enough about the changes in the weather pattern to say that it can have been so simple and direct. And, of course, the glacial ice which @@ -3238,7 +3238,7 @@ of great alpine glaciers, and long periods of warm weather in between. If the rain belt moved north as the glaciers melted for the last time, it must have moved in the same direction in earlier times. Thus, the forced neighborliness of men, plants, and animals in river valleys and -oases must also have happened earlier. Why didnt domestication happen +oases must also have happened earlier. Why didn�t domestication happen earlier, then? Furthermore, it does not seem to be in the oases and river valleys @@ -3275,20 +3275,20 @@ archeologists, probably through habit, favor an old scheme of Grecized names for the subdivisions: paleolithic, mesolithic, neolithic. I refuse to use these words myself. They have meant too many different things to too many different people and have tended to hide some pretty -fuzzy thinking. Probably you havent even noticed my own scheme of -subdivision up to now, but Id better tell you in general what it is. +fuzzy thinking. Probably you haven�t even noticed my own scheme of +subdivision up to now, but I�d better tell you in general what it is. I think of the earliest great group of archeological materials, from which we can deduce only a food-gathering way of culture, as the -_food-gathering stage_. I say stage rather than age, because it +_food-gathering stage_. I say �stage� rather than �age,� because it is not quite over yet; there are still a few primitive people in out-of-the-way parts of the world who remain in the _food-gathering stage_. In fact, Professor Julian Steward would probably prefer to call it a food-gathering _level_ of existence, rather than a stage. This would be perfectly acceptable to me. I also tend to find myself using _collecting_, rather than _gathering_, for the more recent aspects or -era of the stage, as the word collecting appears to have more sense -of purposefulness and specialization than does gathering (see p. +era of the stage, as the word �collecting� appears to have more sense +of purposefulness and specialization than does �gathering� (see p. 91). Now, while I think we could make several possible subdivisions of the @@ -3297,22 +3297,22 @@ believe the only one which means much to us here is the last or _terminal sub-era of food-collecting_ of the whole food-gathering stage. The microliths seem to mark its approach in the northwestern part of the Old World. It is really shown best in the Old World by -the materials of the Forest folk, the cultural adaptation to the +the materials of the �Forest folk,� the cultural adaptation to the post-glacial environment in northwestern Europe. We talked about -the Forest folk at the beginning of this chapter, and I used the +the �Forest folk� at the beginning of this chapter, and I used the Maglemosian assemblage of Denmark as an example. [5] It is difficult to find words which have a sequence or gradation of meaning with respect to both development and a range of time in the past, or with a range of time from somewhere in the past which is perhaps not yet ended. One standard Webster definition - of _stage_ is: One of the steps into which the material - development of man ... is divided. I cannot find any dictionary + of _stage_ is: �One of the steps into which the material + development of man ... is divided.� I cannot find any dictionary definition that suggests which of the words, _stage_ or _era_, has the meaning of a longer span of time. Therefore, I have chosen to let my eras be shorter, and to subdivide my stages - into eras. Webster gives _era_ as: A signal stage of history, - an epoch. When I want to subdivide my eras, I find myself using + into eras. Webster gives _era_ as: �A signal stage of history, + an epoch.� When I want to subdivide my eras, I find myself using _sub-eras_. Thus I speak of the _eras_ within a _stage_ and of the _sub-eras_ within an _era_; that is, I do so when I feel that I really have to, and when the evidence is clear enough to @@ -3328,9 +3328,9 @@ realms of culture. It is rather that for most of prehistoric time the materials left to the archeologists tend to limit our deductions to technology and economics. -Im so soon out of my competence, as conventional ancient history +I�m so soon out of my competence, as conventional ancient history begins, that I shall only suggest the earlier eras of the -food-producing stage to you. This book is about prehistory, and Im not +food-producing stage to you. This book is about prehistory, and I�m not a universal historian. @@ -3339,28 +3339,28 @@ THE TWO EARLIEST ERAS OF THE FOOD-PRODUCING STAGE The food-producing stage seems to appear in western Asia with really revolutionary suddenness. It is seen by the relative speed with which the traces of new crafts appear in the earliest village-farming -community sites weve dug. It is seen by the spread and multiplication +community sites we�ve dug. It is seen by the spread and multiplication of these sites themselves, and the remarkable growth in human -population we deduce from this increase in sites. Well look at some +population we deduce from this increase in sites. We�ll look at some of these sites and the archeological traces they yield in the next chapter. When such village sites begin to appear, I believe we are in the _era of the primary village-farming community_. I also believe this is the second era of the food-producing stage. The first era of the food-producing stage, I believe, was an _era of -incipient cultivation and animal domestication_. I keep saying I -believe because the actual evidence for this earlier era is so slight +incipient cultivation and animal domestication_. I keep saying �I +believe� because the actual evidence for this earlier era is so slight that one has to set it up mainly by playing a hunch for it. The reason for playing the hunch goes about as follows. One thing we seem to be able to see, in the food-collecting era in general, is a tendency for people to begin to settle down. This settling down seemed to become further intensified in the terminal -era. How this is connected with Professor Mathiassens receptiveness +era. How this is connected with Professor Mathiassen�s �receptiveness� and the tendency to be experimental, we do not exactly know. The evidence from the New World comes into play here as well as that from the Old World. With this settling down in one place, the people of the -terminal era--especially the Forest folk whom we know best--began +terminal era--especially the �Forest folk� whom we know best--began making a great variety of new things. I remarked about this earlier in the chapter. Dr. Robert M. Adams is of the opinion that this atmosphere of experimentation with new tools--with new ways of collecting food--is @@ -3368,9 +3368,9 @@ the kind of atmosphere in which one might expect trials at planting and at animal domestication to have been made. We first begin to find traces of more permanent life in outdoor camp sites, although caves were still inhabited at the beginning of the terminal era. It is not -surprising at all that the Forest folk had already domesticated the +surprising at all that the �Forest folk� had already domesticated the dog. In this sense, the whole era of food-collecting was becoming ready -and almost incipient for cultivation and animal domestication. +and almost �incipient� for cultivation and animal domestication. Northwestern Europe was not the place for really effective beginnings in agriculture and animal domestication. These would have had to take @@ -3425,13 +3425,13 @@ zone which surrounds the drainage basin of the Tigris and Euphrates Rivers at elevations of from approximately 2,000 to 5,000 feet. The lower alluvial land of the Tigris-Euphrates basin itself has very little rainfall. Some years ago Professor James Henry Breasted called -the alluvial lands of the Tigris-Euphrates a part of the fertile -crescent. These alluvial lands are very fertile if irrigated. Breasted +the alluvial lands of the Tigris-Euphrates a part of the �fertile +crescent.� These alluvial lands are very fertile if irrigated. Breasted was most interested in the oriental civilizations of conventional ancient history, and irrigation had been discovered before they appeared. -The country of hilly flanks above Breasteds crescent receives from +The country of hilly flanks above Breasted�s crescent receives from 10 to 20 or more inches of winter rainfall each year, which is about what Kansas has. Above the hilly-flanks zone tower the peaks and ridges of the Lebanon-Amanus chain bordering the coast-line from Palestine @@ -3440,7 +3440,7 @@ range of the Iraq-Iran borderland. This rugged mountain frame for our hilly-flanks zone rises to some magnificent alpine scenery, with peaks of from ten to fifteen thousand feet in elevation. There are several gaps in the Mediterranean coastal portion of the frame, through which -the winters rain-bearing winds from the sea may break so as to carry +the winter�s rain-bearing winds from the sea may break so as to carry rain to the foothills of the Taurus and the Zagros. The picture I hope you will have from this description is that of an @@ -3482,7 +3482,7 @@ hilly-flanks zone in their wild state. With a single exception--that of the dog--the earliest positive evidence of domestication includes the two forms of wheat, the barley, and the goat. The evidence comes from within the hilly-flanks zone. -However, it comes from a settled village proper, Jarmo (which Ill +However, it comes from a settled village proper, Jarmo (which I�ll describe in the next chapter), and is thus from the era of the primary village-farming community. We are still without positive evidence of domesticated grain and animals in the first era of the food-producing @@ -3534,9 +3534,9 @@ and the spread of ideas of people who had passed on into one of the more developed eras. In many cases, the terminal era of food-collecting was ended by the incoming of the food-producing peoples themselves. For example, the practices of food-production were carried into Europe -by the actual movement of some numbers of peoples (we dont know how +by the actual movement of some numbers of peoples (we don�t know how many) who had reached at least the level of the primary village-farming -community. The Forest folk learned food-production from them. There +community. The �Forest folk� learned food-production from them. There was never an era of incipient cultivation and domestication proper in Europe, if my hunch is right. @@ -3547,16 +3547,16 @@ The way I see it, two things were required in order that an era of incipient cultivation and domestication could begin. First, there had to be the natural environment of a nuclear area, with its whole group of plants and animals capable of domestication. This is the aspect of -the matter which weve said is directly given by nature. But it is +the matter which we�ve said is directly given by nature. But it is quite possible that such an environment with such a group of plants and animals in it may have existed well before ten thousand years ago in the Near East. It is also quite possible that the same promising condition may have existed in regions which never developed into nuclear areas proper. Here, again, we come back to the cultural factor. -I think it was that atmosphere of experimentation weve talked about -once or twice before. I cant define it for you, other than to say that +I think it was that �atmosphere of experimentation� we�ve talked about +once or twice before. I can�t define it for you, other than to say that by the end of the Ice Age, the general level of many cultures was ready -for change. Ask me how and why this was so, and Ill tell you we dont +for change. Ask me how and why this was so, and I�ll tell you we don�t know yet, and that if we did understand this kind of question, there would be no need for me to go on being a prehistorian! @@ -3590,7 +3590,7 @@ such collections for the modern wild forms of animals and plants from some of our nuclear areas. In the nuclear area in the Near East, some of the wild animals, at least, have already become extinct. There are no longer wild cattle or wild horses in western Asia. We know they were -there from the finds weve made in caves of late Ice Age times, and +there from the finds we�ve made in caves of late Ice Age times, and from some slightly later sites. @@ -3601,7 +3601,7 @@ incipient era of cultivation and animal domestication. I am closing this chapter with descriptions of two of the best Near Eastern examples I know of. You may not be satisfied that what I am able to describe makes a full-bodied era of development at all. Remember, however, that -Ive told you Im largely playing a kind of a hunch, and also that the +I�ve told you I�m largely playing a kind of a hunch, and also that the archeological materials of this era will always be extremely difficult to interpret. At the beginning of any new way of life, there will be a great tendency for people to make-do, at first, with tools and habits @@ -3613,7 +3613,7 @@ THE NATUFIAN, AN ASSEMBLAGE OF THE INCIPIENT ERA The assemblage called the Natufian comes from the upper layers of a number of caves in Palestine. Traces of its flint industry have also -turned up in Syria and Lebanon. We dont know just how old it is. I +turned up in Syria and Lebanon. We don�t know just how old it is. I guess that it probably falls within five hundred years either way of about 5000 B.C. @@ -3662,7 +3662,7 @@ pendants. There were also beads and pendants of pierced teeth and shell. A number of Natufian burials have been found in the caves; some burials were grouped together in one grave. The people who were buried within the Mount Carmel cave were laid on their backs in an extended position, -while those on the terrace seem to have been flexed (placed in their +while those on the terrace seem to have been �flexed� (placed in their graves in a curled-up position). This may mean no more than that it was easier to dig a long hole in cave dirt than in the hard-packed dirt of the terrace. The people often had some kind of object buried with them, @@ -3679,7 +3679,7 @@ beads. GROUND STONE BONE] -The animal bones of the Natufian layers show beasts of a modern type, +The animal bones of the Natufian layers show beasts of a �modern� type, but with some differences from those of present-day Palestine. The bones of the gazelle far outnumber those of the deer; since gazelles like a much drier climate than deer, Palestine must then have had much @@ -3692,9 +3692,9 @@ Maglemosian of northern Europe. More recently, it has been reported that a domesticated goat is also part of the Natufian finds. The study of the human bones from the Natufian burials is not yet -complete. Until Professor McCowns study becomes available, we may note -Professor Coons assessment that these people were of a basically -Mediterranean type. +complete. Until Professor McCown�s study becomes available, we may note +Professor Coon�s assessment that these people were of a �basically +Mediterranean type.� THE KARIM SHAHIR ASSEMBLAGE @@ -3704,11 +3704,11 @@ of a temporary open site or encampment. It lies on the top of a bluff in the Kurdish hill-country of northeastern Iraq. It was dug by Dr. Bruce Howe of the expedition I directed in 1950-51 for the Oriental Institute and the American Schools of Oriental Research. In 1954-55, -our expedition located another site, Mlefaat, with general resemblance +our expedition located another site, M�lefaat, with general resemblance to Karim Shahir, but about a hundred miles north of it. In 1956, Dr. Ralph Solecki located still another Karim Shahir type of site called Zawi Chemi Shanidar. The Zawi Chemi site has a radiocarbon date of 8900 - 300 B.C. +� 300 B.C. Karim Shahir has evidence of only one very shallow level of occupation. It was probably not lived on very long, although the people who lived @@ -3717,7 +3717,7 @@ layer yielded great numbers of fist-sized cracked pieces of limestone, which had been carried up from the bed of a stream at the bottom of the bluff. We think these cracked stones had something to do with a kind of architecture, but we were unable to find positive traces of hut plans. -At Mlefaat and Zawi Chemi, there were traces of rounded hut plans. +At M�lefaat and Zawi Chemi, there were traces of rounded hut plans. As in the Natufian, the great bulk of small objects of the Karim Shahir assemblage was in chipped flint. A large proportion of the flint tools @@ -3737,7 +3737,7 @@ clay figurines which seemed to be of animal form. UNBAKED CLAY SHELL BONE - ARCHITECTURE] + �ARCHITECTURE�] Karim Shahir did not yield direct evidence of the kind of vegetable food its people ate. The animal bones showed a considerable @@ -3746,7 +3746,7 @@ domestication--sheep, goat, cattle, horse, dog--as compared with animal bones from the earlier cave sites of the area, which have a high proportion of bones of wild forms like deer and gazelle. But we do not know that any of the Karim Shahir animals were actually domesticated. -Some of them may have been, in an incipient way, but we have no means +Some of them may have been, in an �incipient� way, but we have no means at the moment that will tell us from the bones alone. @@ -3761,7 +3761,7 @@ goat, and the general animal situation at Karim Shahir to hint at an incipient approach to food-production. At Karim Shahir, there was the tendency to settle down out in the open; this is echoed by the new reports of open air Natufian sites. The large number of cracked stones -certainly indicates that it was worth the peoples while to have some +certainly indicates that it was worth the peoples� while to have some kind of structure, even if the site as a whole was short-lived. It is a part of my hunch that these things all point toward @@ -3771,13 +3771,13 @@ which we shall look at next, are fully food-producing, the Natufian and Karim Shahir folk had not yet arrived. I think they were part of a general build-up to full scale food-production. They were possibly controlling a few animals of several kinds and perhaps one or two -plants, without realizing the full possibilities of this control as a +plants, without realizing the full possibilities of this �control� as a new way of life. This is why I think of the Karim Shahir and Natufian folk as being at a level, or in an era, of incipient cultivation and domestication. But we shall have to do a great deal more excavation in this range of time -before well get the kind of positive information we need. +before we�ll get the kind of positive information we need. SUMMARY @@ -3798,7 +3798,7 @@ history. We know the earliest village-farming communities appeared in western Asia, in a nuclear area. We do not yet know why the Near Eastern -experiment came first, or why it didnt happen earlier in some other +experiment came first, or why it didn�t happen earlier in some other nuclear area. Apparently, the level of culture and the promise of the natural environment were ready first in western Asia. The next sites we look at will show a simple but effective food-production already @@ -3835,7 +3835,7 @@ contrast between food-collecting and food-producing as ways of life. THE DIFFERENCE BETWEEN FOOD-COLLECTORS AND FOOD-PRODUCERS -Childe used the word revolution because of the radical change that +Childe used the word �revolution� because of the radical change that took place in the habits and customs of man. Food-collectors--that is, hunters, fishers, berry- and nut-gatherers--had to live in small groups or bands, for they had to be ready to move wherever their food supply @@ -3851,7 +3851,7 @@ for clothing beyond the tools that were probably used to dress the skins of animals; no time to think of much of anything but food and protection and disposal of the dead when death did come: an existence which takes nature as it finds it, which does little or nothing to -modify nature--all in all, a savages existence, and a very tough one. +modify nature--all in all, a savage�s existence, and a very tough one. A man who spends his whole life following animals just to kill them to eat, or moving from one berry patch to another, is really living just like an animal himself. @@ -3859,10 +3859,10 @@ like an animal himself. THE FOOD-PRODUCING ECONOMY -Against this picture let me try to draw another--that of mans life -after food-production had begun. His meat was stored on the hoof, +Against this picture let me try to draw another--that of man�s life +after food-production had begun. His meat was stored �on the hoof,� his grain in silos or great pottery jars. He lived in a house: it was -worth his while to build one, because he couldnt move far from his +worth his while to build one, because he couldn�t move far from his fields and flocks. In his neighborhood enough food could be grown and enough animals bred so that many people were kept busy. They all lived close to their flocks and fields, in a village. The village was @@ -3872,7 +3872,7 @@ Children and old men could shepherd the animals by day or help with the lighter work in the fields. After the crops had been harvested the younger men might go hunting and some of them would fish, but the food they brought in was only an addition to the food in the village; the -villagers wouldnt starve, even if the hunters and fishermen came home +villagers wouldn�t starve, even if the hunters and fishermen came home empty-handed. There was more time to do different things, too. They began to modify @@ -3885,23 +3885,23 @@ people in the village who were becoming full-time craftsmen. Other things were changing, too. The villagers must have had to agree on new rules for living together. The head man of the village had problems different from those of the chief of the small -food-collectors band. If somebodys flock of sheep spoiled a wheat +food-collectors� band. If somebody�s flock of sheep spoiled a wheat field, the owner wanted payment for the grain he lost. The chief of the hunters was never bothered with such questions. Even the gods had changed. The spirits and the magic that had been used by hunters -werent of any use to the villagers. They needed gods who would watch +weren�t of any use to the villagers. They needed gods who would watch over the fields and the flocks, and they eventually began to erect buildings where their gods might dwell, and where the men who knew most about the gods might live. -WAS FOOD-PRODUCTION A REVOLUTION? +WAS FOOD-PRODUCTION A �REVOLUTION�? If you can see the difference between these two pictures--between life in the food-collecting stage and life after food-production -had begun--youll see why Professor Childe speaks of a revolution. -By revolution, he doesnt mean that it happened over night or that -it happened only once. We dont know exactly how long it took. Some +had begun--you�ll see why Professor Childe speaks of a revolution. +By revolution, he doesn�t mean that it happened over night or that +it happened only once. We don�t know exactly how long it took. Some people think that all these changes may have occurred in less than 500 years, but I doubt that. The incipient era was probably an affair of some duration. Once the level of the village-farming community had @@ -3915,7 +3915,7 @@ been achieved with truly revolutionary suddenness. GAPS IN OUR KNOWLEDGE OF THE NEAR EAST -If youll look again at the chart (p. 111) youll see that I have +If you�ll look again at the chart (p. 111) you�ll see that I have very few sites and assemblages to name in the incipient era of cultivation and domestication, and not many in the earlier part of the primary village-farming level either. Thanks in no small part @@ -3926,20 +3926,20 @@ yard-stick here. But I am far from being able to show you a series of Sears Roebuck catalogues, even century by century, for any part of the nuclear area. There is still a great deal of earth to move, and a great mass of material to recover and interpret before we even begin to -understand how and why. +understand �how� and �why.� Perhaps here, because this kind of archeology is really my specialty, -youll excuse it if I become personal for a moment. I very much look +you�ll excuse it if I become personal for a moment. I very much look forward to having further part in closing some of the gaps in knowledge -of the Near East. This is not, as Ive told you, the spectacular +of the Near East. This is not, as I�ve told you, the spectacular range of Near Eastern archeology. There are no royal tombs, no gold, no great buildings or sculpture, no writing, in fact nothing to excite the normal museum at all. Nevertheless it is a range which, idea-wise, gives the archeologist tremendous satisfaction. The country of the hilly flanks is an exciting combination of green grasslands and mountainous ridges. The Kurds, who inhabit the part of the area -in which Ive worked most recently, are an extremely interesting and -hospitable people. Archeologists dont become rich, but Ill forego +in which I�ve worked most recently, are an extremely interesting and +hospitable people. Archeologists don�t become rich, but I�ll forego the Cadillac for any bright spring morning in the Kurdish hills, on a good site with a happy crew of workmen and an interested and efficient staff. It is probably impossible to convey the full feeling which life @@ -3965,15 +3965,15 @@ like the use of pottery borrowed from the more developed era of the same time in the nuclear area. The same general explanation doubtless holds true for certain materials in Egypt, along the upper Nile and in the Kharga oasis: these materials, called Sebilian III, the Khartoum -neolithic, and the Khargan microlithic, are from surface sites, +�neolithic,� and the Khargan microlithic, are from surface sites, not from caves. The chart (p. 111) shows where I would place these materials in era and time. [Illustration: THE HILLY FLANKS OF THE CRESCENT AND EARLY SITES OF THE NEAR EAST] -Both Mlefaat and Dr. Soleckis Zawi Chemi Shanidar site appear to have -been slightly more settled in than was Karim Shahir itself. But I do +Both M�lefaat and Dr. Solecki�s Zawi Chemi Shanidar site appear to have +been slightly more �settled in� than was Karim Shahir itself. But I do not think they belong to the era of farming-villages proper. The first site of this era, in the hills of Iraqi Kurdistan, is Jarmo, on which we have spent three seasons of work. Following Jarmo comes a variety of @@ -3989,9 +3989,9 @@ times when their various cultures flourished, there must have been many little villages which shared the same general assemblage. We are only now beginning to locate them again. Thus, if I speak of Jarmo, or Jericho, or Sialk as single examples of their particular kinds of -assemblages, I dont mean that they were unique at all. I think I could +assemblages, I don�t mean that they were unique at all. I think I could take you to the sites of at least three more Jarmos, within twenty -miles of the original one. They are there, but they simply havent yet +miles of the original one. They are there, but they simply haven�t yet been excavated. In 1956, a Danish expedition discovered material of Jarmo type at Shimshara, only two dozen miles northeast of Jarmo, and below an assemblage of Hassunan type (which I shall describe presently). @@ -4000,15 +4000,15 @@ below an assemblage of Hassunan type (which I shall describe presently). THE GAP BETWEEN KARIM SHAHIR AND JARMO As we see the matter now, there is probably still a gap in the -available archeological record between the Karim Shahir-Mlefaat-Zawi +available archeological record between the Karim Shahir-M�lefaat-Zawi Chemi group (of the incipient era) and that of Jarmo (of the village-farming era). Although some items of the Jarmo type materials do reflect the beginnings of traditions set in the Karim Shahir group (see p. 120), there is not a clear continuity. Moreover--to the degree that we may trust a few radiocarbon dates--there would appear to be around two thousand years of difference in time. The single -available Zawi Chemi date is 8900 300 B.C.; the most reasonable -group of dates from Jarmo average to about 6750 200 B.C. I am +available Zawi Chemi �date� is 8900 � 300 B.C.; the most reasonable +group of �dates� from Jarmo average to about 6750 � 200 B.C. I am uncertain about this two thousand years--I do not think it can have been so long. @@ -4021,7 +4021,7 @@ JARMO, IN THE KURDISH HILLS, IRAQ The site of Jarmo has a depth of deposit of about twenty-seven feet, and approximately a dozen layers of architectural renovation and -change. Nevertheless it is a one period site: its assemblage remains +change. Nevertheless it is a �one period� site: its assemblage remains essentially the same throughout, although one or two new items are added in later levels. It covers about four acres of the top of a bluff, below which runs a small stream. Jarmo lies in the hill country @@ -4078,7 +4078,7 @@ human beings in clay; one type of human figurine they favored was that of a markedly pregnant woman, probably the expression of some sort of fertility spirit. They provided their house floors with baked-in-place depressions, either as basins or hearths, and later with domed ovens of -clay. As weve noted, the houses themselves were of clay or mud; one +clay. As we�ve noted, the houses themselves were of clay or mud; one could almost say they were built up like a house-sized pot. Then, finally, the idea of making portable pottery itself appeared, although I very much doubt that the people of the Jarmo village discovered the @@ -4095,11 +4095,11 @@ over three hundred miles to the north. Already a bulk carrying trade had been established--the forerunner of commerce--and the routes were set by which, in later times, the metal trade was to move. -There are now twelve radioactive carbon dates from Jarmo. The most -reasonable cluster of determinations averages to about 6750 200 -B.C., although there is a completely unreasonable range of dates +There are now twelve radioactive carbon �dates� from Jarmo. The most +reasonable cluster of determinations averages to about 6750 � 200 +B.C., although there is a completely unreasonable range of �dates� running from 3250 to 9250 B.C.! _If_ I am right in what I take to be -reasonable, the first flush of the food-producing revolution had been +�reasonable,� the first flush of the food-producing revolution had been achieved almost nine thousand years ago. @@ -4117,7 +4117,7 @@ it, but the Hassunan sites seem to cluster at slightly lower elevations than those we have been talking about so far. The catalogue of the Hassuna assemblage is of course more full and -elaborate than that of Jarmo. The Iraqi governments archeologists +elaborate than that of Jarmo. The Iraqi government�s archeologists who dug Hassuna itself, exposed evidence of increasing architectural know-how. The walls of houses were still formed of puddled mud; sun-dried bricks appear only in later periods. There were now several @@ -4130,16 +4130,16 @@ largely disappeared by Hassunan times. The flint work of the Hassunan catalogue is, by and large, a wretched affair. We might guess that the kinaesthetic concentration of the Hassuna craftsmen now went into other categories; that is, they suddenly discovered they might have more fun -working with the newer materials. Its a shame, for example, that none +working with the newer materials. It�s a shame, for example, that none of their weaving is preserved for us. The two available radiocarbon determinations from Hassunan contexts -stand at about 5100 and 5600 B.C. 250 years. +stand at about 5100 and 5600 B.C. � 250 years. OTHER EARLY VILLAGE SITES IN THE NUCLEAR AREA -Ill now name and very briefly describe a few of the other early +I�ll now name and very briefly describe a few of the other early village assemblages either in or adjacent to the hilly flanks of the crescent. Unfortunately, we do not have radioactive carbon dates for many of these materials. We may guess that some particular assemblage, @@ -4177,7 +4177,7 @@ ecological niche, some seven hundred feet below sea level; it is geographically within the hilly-flanks zone but environmentally not part of it. -Several radiocarbon dates for Jericho fall within the range of those +Several radiocarbon �dates� for Jericho fall within the range of those I find reasonable for Jarmo, and their internal statistical consistency is far better than that for the Jarmo determinations. It is not yet clear exactly what this means. @@ -4226,7 +4226,7 @@ how things were made are different; the Sialk assemblage represents still another cultural pattern. I suspect it appeared a bit later in time than did that of Hassuna. There is an important new item in the Sialk catalogue. The Sialk people made small drills or pins of -hammered copper. Thus the metallurgists specialized craft had made its +hammered copper. Thus the metallurgist�s specialized craft had made its appearance. There is at least one very early Iranian site on the inward slopes @@ -4246,7 +4246,7 @@ shore of the Fayum lake. The Fayum materials come mainly from grain bins or silos. Another site, Merimde, in the western part of the Nile delta, shows the remains of a true village, but it may be slightly later than the settlement of the Fayum. There are radioactive carbon -dates for the Fayum materials at about 4275 B.C. 320 years, which +�dates� for the Fayum materials at about 4275 B.C. � 320 years, which is almost fifteen hundred years later than the determinations suggested for the Hassunan or Syro-Cilician assemblages. I suspect that this is a somewhat over-extended indication of the time it took for the @@ -4260,13 +4260,13 @@ the mound called Shaheinab. The Shaheinab catalogue roughly corresponds to that of the Fayum; the distance between the two places, as the Nile flows, is roughly 1,500 miles. Thus it took almost a thousand years for the new way of life to be carried as far south into Africa as Khartoum; -the two Shaheinab dates average about 3300 B.C. 400 years. +the two Shaheinab �dates� average about 3300 B.C. � 400 years. If the movement was up the Nile (southward), as these dates suggest, then I suspect that the earliest available village material of middle Egypt, the so-called Tasian, is also later than that of the Fayum. The Tasian materials come from a few graves near a village called Deir -Tasa, and I have an uncomfortable feeling that the Tasian assemblage +Tasa, and I have an uncomfortable feeling that the Tasian �assemblage� may be mainly an artificial selection of poor examples of objects which belong in the following range of time. @@ -4280,7 +4280,7 @@ spread outward in space from the nuclear area, as time went on. There is good archeological evidence that both these processes took place. For the hill country of northeastern Iraq, in the nuclear area, we have already noticed how the succession (still with gaps) from Karim -Shahir, through Mlefaat and Jarmo, to Hassuna can be charted (see +Shahir, through M�lefaat and Jarmo, to Hassuna can be charted (see chart, p. 111). In the next chapter, we shall continue this charting and description of what happened in Iraq upward through time. We also watched traces of the new way of life move through space up the Nile @@ -4299,7 +4299,7 @@ appearance of the village-farming community there--is still an open one. In the last chapter, we noted the probability of an independent nuclear area in southeastern Asia. Professor Carl Sauer strongly champions the great importance of this area as _the_ original center -of agricultural pursuits, as a kind of cradle of all incipient eras +of agricultural pursuits, as a kind of �cradle� of all incipient eras of the Old World at least. While there is certainly not the slightest archeological evidence to allow us to go that far, we may easily expect that an early southeast Asian development would have been felt in @@ -4311,13 +4311,13 @@ way of life moved well beyond Khartoum in Africa. THE SPREAD OF THE VILLAGE-FARMING COMMUNITY WAY OF LIFE INTO EUROPE -How about Europe? I wont give you many details. You can easily imagine +How about Europe? I won�t give you many details. You can easily imagine that the late prehistoric prelude to European history is a complicated affair. We all know very well how complicated an area Europe is now, with its welter of different languages and cultures. Remember, however, that a great deal of archeology has been done on the late prehistory of Europe, and very little on that of further Asia and Africa. If we knew -as much about these areas as we do of Europe, I expect wed find them +as much about these areas as we do of Europe, I expect we�d find them just as complicated. This much is clear for Europe, as far as the spread of the @@ -4329,21 +4329,21 @@ in western Asia. I do not, of course, mean that there were traveling salesmen who carried these ideas and things to Europe with a commercial gleam in their eyes. The process took time, and the ideas and things must have been passed on from one group of people to the next. There -was also some actual movement of peoples, but we dont know the size of +was also some actual movement of peoples, but we don�t know the size of the groups that moved. -The story of the colonization of Europe by the first farmers is +The story of the �colonization� of Europe by the first farmers is thus one of (1) the movement from the eastern Mediterranean lands of some people who were farmers; (2) the spread of ideas and things beyond the Near East itself and beyond the paths along which the -colonists moved; and (3) the adaptations of the ideas and things -by the indigenous Forest folk, about whose receptiveness Professor +�colonists� moved; and (3) the adaptations of the ideas and things +by the indigenous �Forest folk�, about whose �receptiveness� Professor Mathiassen speaks (p. 97). It is important to note that the resulting cultures in the new European environment were European, not Near -Eastern. The late Professor Childe remarked that the peoples of the +Eastern. The late Professor Childe remarked that �the peoples of the West were not slavish imitators; they adapted the gifts from the East ... into a new and organic whole capable of developing on its own -original lines. +original lines.� THE WAYS TO EUROPE @@ -4389,19 +4389,19 @@ Hill, the earliest known trace of village-farming communities in England, is about 2500 B.C. I would expect about 5500 B.C. to be a safe date to give for the well-developed early village communities of Syro-Cilicia. We suspect that the spread throughout Europe did not -proceed at an even rate. Professor Piggott writes that at a date +proceed at an even rate. Professor Piggott writes that �at a date probably about 2600 B.C., simple agricultural communities were being established in Spain and southern France, and from the latter region a spread northwards can be traced ... from points on the French seaboard of the [English] Channel ... there were emigrations of a certain number of these tribes by boat, across to the chalk lands of Wessex and Sussex [in England], probably not more than three or four generations later -than the formation of the south French colonies. +than the formation of the south French colonies.� New radiocarbon determinations are becoming available all the time--already several suggest that the food-producing way of life had reached the lower Rhine and Holland by 4000 B.C. But not all -prehistorians accept these dates, so I do not show them on my map +prehistorians accept these �dates,� so I do not show them on my map (p. 139). @@ -4427,7 +4427,7 @@ concentric sets of banks and ditches. Traces of oblong timber houses have been found, but not within the enclosures. The second type of structure is mine-shafts, dug down into the chalk beds where good flint for the making of axes or hoes could be found. The third type -of structure is long simple mounds or unchambered barrows, in one +of structure is long simple mounds or �unchambered barrows,� in one end of which burials were made. It has been commonly believed that the Windmill Hill assemblage belonged entirely to the cultural tradition which moved up through France to the Channel. Professor Piggott is now @@ -4443,12 +4443,12 @@ consists mainly of tombs and the contents of tombs, with only very rare settlement sites. The tombs were of some size and received the bodies of many people. The tombs themselves were built of stone, heaped over with earth; the stones enclosed a passage to a central chamber -(passage graves), or to a simple long gallery, along the sides of -which the bodies were laid (gallery graves). The general type of -construction is called megalithic (= great stone), and the whole +(�passage graves�), or to a simple long gallery, along the sides of +which the bodies were laid (�gallery graves�). The general type of +construction is called �megalithic� (= great stone), and the whole earth-mounded structure is often called a _barrow_. Since many have -proper chambers, in one sense or another, we used the term unchambered -barrow above to distinguish those of the Windmill Hill type from these +proper chambers, in one sense or another, we used the term �unchambered +barrow� above to distinguish those of the Windmill Hill type from these megalithic structures. There is some evidence for sacrifice, libations, and ceremonial fires, and it is clear that some form of community ritual was focused on the megalithic tombs. @@ -4466,7 +4466,7 @@ The third early British group of antiquities of this general time It is not so certain that the people who made this assemblage, called Peterborough, were actually farmers. While they may on occasion have practiced a simple agriculture, many items of their assemblage link -them closely with that of the Forest folk of earlier times in +them closely with that of the �Forest folk� of earlier times in England and in the Baltic countries. Their pottery is decorated with impressions of cords and is quite different from that of Windmill Hill and the megalithic builders. In addition, the distribution of their @@ -4479,7 +4479,7 @@ to acquire the raw material for stone axes. A probably slightly later culture, whose traces are best known from Skara Brae on Orkney, also had its roots in those cultures of the -Baltic area which fused out of the meeting of the Forest folk and +Baltic area which fused out of the meeting of the �Forest folk� and the peoples who took the eastern way into Europe. Skara Brae is very well preserved, having been built of thin stone slabs about which dune-sand drifted after the village died. The individual houses, the @@ -4498,14 +4498,14 @@ details which I have omitted in order to shorten the story. I believe some of the difficulty we have in understanding the establishment of the first farming communities in Europe is with -the word colonization. We have a natural tendency to think of -colonization as it has happened within the last few centuries. In the +the word �colonization.� We have a natural tendency to think of +�colonization� as it has happened within the last few centuries. In the case of the colonization of the Americas, for example, the colonists came relatively quickly, and in increasingly vast numbers. They had vastly superior technical, political, and war-making skills, compared with those of the Indians. There was not much mixing with the Indians. The case in Europe five or six thousand years ago must have been very -different. I wonder if it is even proper to call people colonists +different. I wonder if it is even proper to call people �colonists� who move some miles to a new region, settle down and farm it for some years, then move on again, generation after generation? The ideas and the things which these new people carried were only _potentially_ @@ -4521,12 +4521,12 @@ migrants were moving by boat, long distances may have been covered in a short time. Remember, however, we seem to have about three thousand years between the early Syro-Cilician villages and Windmill Hill. -Let me repeat Professor Childe again. The peoples of the West were +Let me repeat Professor Childe again. �The peoples of the West were not slavish imitators: they adapted the gifts from the East ... into a new and organic whole capable of developing on its own original -lines. Childe is of course completely conscious of the fact that his -peoples of the West were in part the descendants of migrants who came -originally from the East, bringing their gifts with them. This +lines.� Childe is of course completely conscious of the fact that his +�peoples of the West� were in part the descendants of migrants who came +originally from the �East,� bringing their �gifts� with them. This was the late prehistoric achievement of Europe--to take new ideas and things and some migrant peoples and, by mixing them with the old in its own environments, to forge a new and unique series of cultures. @@ -4553,14 +4553,14 @@ things first happened there and also because I know it best. There is another interesting thing, too. We have seen that the first experiment in village-farming took place in the Near East. So did -the first experiment in civilization. Both experiments took. The +the first experiment in civilization. Both experiments �took.� The traditions we live by today are based, ultimately, on those ancient beginnings in food-production and civilization in the Near East. -WHAT CIVILIZATION MEANS +WHAT �CIVILIZATION� MEANS -I shall not try to define civilization for you; rather, I shall +I shall not try to define �civilization� for you; rather, I shall tell you what the word brings to my mind. To me civilization means urbanization: the fact that there are cities. It means a formal political set-up--that there are kings or governing bodies that the @@ -4606,7 +4606,7 @@ of Mexico, the Mayas of Yucatan and Guatemala, and the Incas of the Andes were civilized. -WHY DIDNT CIVILIZATION COME TO ALL FOOD-PRODUCERS? +WHY DIDN�T CIVILIZATION COME TO ALL FOOD-PRODUCERS? Once you have food-production, even at the well-advanced level of the village-farming community, what else has to happen before you @@ -4625,13 +4625,13 @@ early civilization, is still an open and very interesting question. WHERE CIVILIZATION FIRST APPEARED IN THE NEAR EAST You remember that our earliest village-farming communities lay along -the hilly flanks of a great crescent. (See map on p. 125.) -Professor Breasteds fertile crescent emphasized the rich river +the hilly flanks of a great �crescent.� (See map on p. 125.) +Professor Breasted�s �fertile crescent� emphasized the rich river valleys of the Nile and the Tigris-Euphrates Rivers. Our hilly-flanks area of the crescent zone arches up from Egypt through Palestine and Syria, along southern Turkey into northern Iraq, and down along the southwestern fringe of Iran. The earliest food-producing villages we -know already existed in this area by about 6750 B.C. ( 200 years). +know already existed in this area by about 6750 B.C. (� 200 years). Now notice that this hilly-flanks zone does not include southern Mesopotamia, the alluvial land of the lower Tigris and Euphrates in @@ -4639,7 +4639,7 @@ Iraq, or the Nile Valley proper. The earliest known villages of classic Mesopotamia and Egypt seem to appear fifteen hundred or more years after those of the hilly-flanks zone. For example, the early Fayum village which lies near a lake west of the Nile Valley proper (see p. -135) has a radiocarbon date of 4275 B.C. 320 years. It was in the +135) has a radiocarbon date of 4275 B.C. � 320 years. It was in the river lands, however, that the immediate beginnings of civilization were made. @@ -4657,8 +4657,8 @@ THE HILLY-FLANKS ZONE VERSUS THE RIVER LANDS Why did these two civilizations spring up in these two river lands which apparently were not even part of the area where the -village-farming community began? Why didnt we have the first -civilizations in Palestine, Syria, north Iraq, or Iran, where were +village-farming community began? Why didn�t we have the first +civilizations in Palestine, Syria, north Iraq, or Iran, where we�re sure food-production had had a long time to develop? I think the probable answer gives a clue to the ways in which civilization began in Egypt and Mesopotamia. @@ -4669,7 +4669,7 @@ and Syria. There are pleasant mountain slopes, streams running out to the sea, and rain, at least in the winter months. The rain belt and the foothills of the Turkish mountains also extend to northern Iraq and on to the Iranian plateau. The Iranian plateau has its mountain valleys, -streams, and some rain. These hilly flanks of the crescent, through +streams, and some rain. These hilly flanks of the �crescent,� through most of its arc, are almost made-to-order for beginning farmers. The grassy slopes of the higher hills would be pasture for their herds and flocks. As soon as the earliest experiments with agriculture and @@ -4720,10 +4720,10 @@ Obviously, we can no longer find the first dikes or reservoirs of the Nile Valley, or the first canals or ditches of Mesopotamia. The same land has been lived on far too long for any traces of the first attempts to be left; or, especially in Egypt, it has been covered by -the yearly deposits of silt, dropped by the river floods. But were +the yearly deposits of silt, dropped by the river floods. But we�re pretty sure the first food-producers of Egypt and southern Mesopotamia must have made such dikes, canals, and ditches. In the first place, -there cant have been enough rain for them to grow things otherwise. +there can�t have been enough rain for them to grow things otherwise. In the second place, the patterns for such projects seem to have been pretty well set by historic times. @@ -4733,10 +4733,10 @@ CONTROL OF THE RIVERS THE BUSINESS OF EVERYONE Here, then, is a _part_ of the reason why civilization grew in Egypt and Mesopotamia first--not in Palestine, Syria, or Iran. In the latter areas, people could manage to produce their food as individuals. It -wasnt too hard; there were rain and some streams, and good pasturage +wasn�t too hard; there were rain and some streams, and good pasturage for the animals even if a crop or two went wrong. In Egypt and Mesopotamia, people had to put in a much greater amount of work, and -this work couldnt be individual work. Whole villages or groups of +this work couldn�t be individual work. Whole villages or groups of people had to turn out to fix dikes or dig ditches. The dikes had to be repaired and the ditches carefully cleared of silt each year, or they would become useless. @@ -4745,7 +4745,7 @@ There also had to be hard and fast rules. The person who lived nearest the ditch or the reservoir must not be allowed to take all the water and leave none for his neighbors. It was not only a business of learning to control the rivers and of making their waters do the -farmers work. It also meant controlling men. But once these men had +farmer�s work. It also meant controlling men. But once these men had managed both kinds of controls, what a wonderful yield they had! The soil was already fertile, and the silt which came in the floods and ditches kept adding fertile soil. @@ -4756,7 +4756,7 @@ THE GERM OF CIVILIZATION IN EGYPT AND MESOPOTAMIA This learning to work together for the common good was the real germ of the Egyptian and the Mesopotamian civilizations. The bare elements of civilization were already there: the need for a governing hand and for -laws to see that the communities work was done and that the water was +laws to see that the communities� work was done and that the water was justly shared. You may object that there is a sort of chicken and egg paradox in this idea. How could the people set up the rules until they had managed to get a way to live, and how could they manage to get a @@ -4781,12 +4781,12 @@ My explanation has been pointed particularly at Egypt and Mesopotamia. I have already told you that the irrigation and water-control part of it does not apply to the development of the Aztecs or the Mayas, or perhaps anybody else. But I think that a fair part of the story of -Egypt and Mesopotamia must be as Ive just told you. +Egypt and Mesopotamia must be as I�ve just told you. I am particularly anxious that you do _not_ understand me to mean that irrigation _caused_ civilization. I am sure it was not that simple at all. For, in fact, a complex and highly engineered irrigation system -proper did not come until later times. Lets say rather that the simple +proper did not come until later times. Let�s say rather that the simple beginnings of irrigation allowed and in fact encouraged a great number of things in the technological, political, social, and moral realms of culture. We do not yet understand what all these things were or how @@ -4842,7 +4842,7 @@ the mound which later became the holy Sumerian city of Eridu, Iraqi archeologists uncovered a handsome painted pottery. Pottery of the same type had been noticed earlier by German archeologists on the surface of a small mound, awash in the spring floods, near the remains of the -Biblical city of Erich (Sumerian = Uruk; Arabic = Warka). This Eridu +Biblical city of Erich (Sumerian = Uruk; Arabic = Warka). This �Eridu� pottery, which is about all we have of the assemblage of the people who once produced it, may be seen as a blend of the Samarran and Halafian painted pottery styles. This may over-simplify the case, but as yet we @@ -4864,7 +4864,7 @@ seems to move into place before the Halaf manifestation is finished, and to blend with it. The Ubaidian assemblage in the south is by far the more spectacular. The development of the temple has been traced at Eridu from a simple little structure to a monumental building some -62 feet long, with a pilaster-decorated faade and an altar in its +62 feet long, with a pilaster-decorated fa�ade and an altar in its central chamber. There is painted Ubaidian pottery, but the style is hurried and somewhat careless and gives the _impression_ of having been a cheap mass-production means of decoration when compared with the @@ -4879,7 +4879,7 @@ turtle-like faces are another item in the southern Ubaidian assemblage. There is a large Ubaid cemetery at Eridu, much of it still awaiting excavation. The few skeletons so far tentatively studied reveal a -completely modern type of Mediterraneanoid; the individuals whom the +completely modern type of �Mediterraneanoid�; the individuals whom the skeletons represent would undoubtedly blend perfectly into the modern population of southern Iraq. What the Ubaidian assemblage says to us is that these people had already adapted themselves and their culture to @@ -4925,7 +4925,7 @@ woven stuffs must have been the mediums of exchange. Over what area did the trading net-work of Ubaid extend? We start with the idea that the Ubaidian assemblage is most richly developed in the south. We assume, I think, correctly, that it represents a cultural flowering of the south. -On the basis of the pottery of the still elusive Eridu immigrants +On the basis of the pottery of the still elusive �Eridu� immigrants who had first followed the rivers into alluvial Mesopotamia, we get the notion that the characteristic painted pottery style of Ubaid was developed in the southland. If this reconstruction is correct @@ -4935,7 +4935,7 @@ assemblage of (and from the southern point of view, _fairly_ pure) Ubaidian material in northern Iraq. The pottery appears all along the Iranian flanks, even well east of the head of the Persian Gulf, and ends in a later and spectacular flourish in an extremely handsome -painted style called the Susa style. Ubaidian pottery has been noted +painted style called the �Susa� style. Ubaidian pottery has been noted up the valleys of both of the great rivers, well north of the Iraqi and Syrian borders on the southern flanks of the Anatolian plateau. It reaches the Mediterranean Sea and the valley of the Orontes in @@ -4965,10 +4965,10 @@ Mesopotamia. Next, much to our annoyance, we have what is almost a temporary black-out. According to the system of terminology I favor, our next -assemblage after that of Ubaid is called the _Warka_ phase, from +�assemblage� after that of Ubaid is called the _Warka_ phase, from the Arabic name for the site of Uruk or Erich. We know it only from six or seven levels in a narrow test-pit at Warka, and from an even -smaller hole at another site. This assemblage, so far, is known only +smaller hole at another site. This �assemblage,� so far, is known only by its pottery, some of which still bears Ubaidian style painting. The characteristic Warkan pottery is unpainted, with smoothed red or gray surfaces and peculiar shapes. Unquestionably, there must be a great @@ -4979,7 +4979,7 @@ have to excavate it! THE DAWN OF CIVILIZATION After our exasperation with the almost unknown Warka interlude, -following the brilliant false dawn of Ubaid, we move next to an +following the brilliant �false dawn� of Ubaid, we move next to an assemblage which yields traces of a preponderance of those elements which we noted (p. 144) as meaning civilization. This assemblage is that called _Proto-Literate_; it already contains writing. On @@ -4988,8 +4988,8 @@ history--and no longer prehistory--the assemblage is named for the historical implications of its content, and no longer after the name of the site where it was first found. Since some of the older books used site-names for this assemblage, I will tell you that the Proto-Literate -includes the latter half of what used to be called the Uruk period -_plus_ all of what used to be called the Jemdet Nasr period. It shows +includes the latter half of what used to be called the �Uruk period� +_plus_ all of what used to be called the �Jemdet Nasr period.� It shows a consistent development from beginning to end. I shall, in fact, leave much of the description and the historic @@ -5033,18 +5033,18 @@ mental block seems to have been removed. Clay tablets bearing pictographic signs are the Proto-Literate forerunners of cuneiform writing. The earliest examples are not well -understood but they seem to be devices for making accounts and -for remembering accounts. Different from the later case in Egypt, +understood but they seem to be �devices for making accounts and +for remembering accounts.� Different from the later case in Egypt, where writing appears fully formed in the earliest examples, the development from simple pictographic signs to proper cuneiform writing may be traced, step by step, in Mesopotamia. It is most probable that the development of writing was connected with the temple and -the need for keeping account of the temples possessions. Professor +the need for keeping account of the temple�s possessions. Professor Jacobsen sees writing as a means for overcoming space, time, and the -increasing complications of human affairs: Literacy, which began +increasing complications of human affairs: �Literacy, which began with ... civilization, enhanced mightily those very tendencies in its development which characterize it as a civilization and mark it off as -such from other types of culture. +such from other types of culture.� [Illustration: RELIEF ON A PROTO-LITERATE STONE VASE, WARKA @@ -5098,7 +5098,7 @@ civilized way of life. I suppose you could say that the difference in the approach is that as a prehistorian I have been looking forward or upward in time, while the -historians look backward to glimpse what Ive been describing here. My +historians look backward to glimpse what I�ve been describing here. My base-line was half a million years ago with a being who had little more than the capacity to make tools and fire to distinguish him from the animals about him. Thus my point of view and that of the conventional @@ -5114,17 +5114,17 @@ End of PREHISTORY [Illustration] -Youll doubtless easily recall your general course in ancient history: +You�ll doubtless easily recall your general course in ancient history: how the Sumerian dynasties of Mesopotamia were supplanted by those of Babylonia, how the Hittite kingdom appeared in Anatolian Turkey, and about the three great phases of Egyptian history. The literate kingdom of Crete arose, and by 1500 B.C. there were splendid fortified Mycenean towns on the mainland of Greece. This was the time--about the whole eastern end of the Mediterranean--of what Professor Breasted called the -first great internationalism, with flourishing trade, international +�first great internationalism,� with flourishing trade, international treaties, and royal marriages between Egyptians, Babylonians, and -Hittites. By 1200 B.C., the whole thing had fragmented: the peoples of -the sea were restless in their isles, and the great ancient centers in +Hittites. By 1200 B.C., the whole thing had fragmented: �the peoples of +the sea were restless in their isles,� and the great ancient centers in Egypt, Mesopotamia, and Anatolia were eclipsed. Numerous smaller states arose--Assyria, Phoenicia, Israel--and the Trojan war was fought. Finally Assyria became the paramount power of all the Near East, @@ -5135,7 +5135,7 @@ but casting them with its own tradition into a new mould, arose in mainland Greece. I once shocked my Classical colleagues to the core by referring to -Greece as a second degree derived civilization, but there is much +Greece as �a second degree derived civilization,� but there is much truth in this. The principles of bronze- and then of iron-working, of the alphabet, and of many other elements in Greek culture were borrowed from western Asia. Our debt to the Greeks is too well known for me even @@ -5146,7 +5146,7 @@ Greece fell in its turn to Rome, and in 55 B.C. Caesar invaded Britain. I last spoke of Britain on page 142; I had chosen it as my single example for telling you something of how the earliest farming communities were established in Europe. Now I will continue with -Britains later prehistory, so you may sense something of the end of +Britain�s later prehistory, so you may sense something of the end of prehistory itself. Remember that Britain is simply a single example we select; the same thing could be done for all the other countries of Europe, and will be possible also, some day, for further Asia and @@ -5186,20 +5186,20 @@ few Battle-axe folk elements, including, in fact, stone battle-axes, reached England with the earliest Beaker folk,[6] coming from the Rhineland. - [6] The British authors use the term Beaker folk to mean both + [6] The British authors use the term �Beaker folk� to mean both archeological assemblage and human physical type. They speak - of a ... tall, heavy-boned, rugged, and round-headed strain + of a �... tall, heavy-boned, rugged, and round-headed� strain which they take to have developed, apparently in the Rhineland, by a mixture of the original (Spanish?) beaker-makers and the northeast European battle-axe makers. However, since the science of physical anthropology is very much in flux at the moment, and since I am not able to assess the evidence for these - physical types, I _do not_ use the term folk in this book with + physical types, I _do not_ use the term �folk� in this book with its usual meaning of standardized physical type. When I use - folk here, I mean simply _the makers of a given archeological + �folk� here, I mean simply _the makers of a given archeological assemblage_. The difficulty only comes when assemblages are named for some item in them; it is too clumsy to make an - adjective of the item and refer to a beakerian assemblage. + adjective of the item and refer to a �beakerian� assemblage. The Beaker folk settled earliest in the agriculturally fertile south and east. There seem to have been several phases of Beaker folk @@ -5211,7 +5211,7 @@ folk are known. They buried their dead singly, sometimes in conspicuous individual barrows with the dead warrior in his full trappings. The spectacular element in the assemblage of the Beaker folk is a group of large circular monuments with ditches and with uprights of wood or -stone. These henges became truly monumental several hundred years +stone. These �henges� became truly monumental several hundred years later; while they were occasionally dedicated with a burial, they were not primarily tombs. The effect of the invasion of the Beaker folk seems to cut across the whole fabric of life in Britain. @@ -5221,7 +5221,7 @@ seems to cut across the whole fabric of life in Britain. There was, however, a second major element in British life at this time. It shows itself in the less well understood traces of a group again called after one of the items in their catalogue, the Food-vessel -folk. There are many burials in these food-vessel pots in northern +folk. There are many burials in these �food-vessel� pots in northern England, Scotland, and Ireland, and the pottery itself seems to link back to that of the Peterborough assemblage. Like the earlier Peterborough people in the highland zone before them, the makers of @@ -5238,8 +5238,8 @@ MORE INVASIONS About 1500 B.C., the situation became further complicated by the arrival of new people in the region of southern England anciently called Wessex. The traces suggest the Brittany coast of France as a -source, and the people seem at first to have been a small but heroic -group of aristocrats. Their heroes are buried with wealth and +source, and the people seem at first to have been a small but �heroic� +group of aristocrats. Their �heroes� are buried with wealth and ceremony, surrounded by their axes and daggers of bronze, their gold ornaments, and amber and jet beads. These rich finds show that the trade-linkage these warriors patronized spread from the Baltic sources @@ -5265,10 +5265,10 @@ which must have been necessary before such a great monument could have been built. -THIS ENGLAND +�THIS ENGLAND� The range from 1900 to about 1400 B.C. includes the time of development -of the archeological features usually called the Early Bronze Age +of the archeological features usually called the �Early Bronze Age� in Britain. In fact, traces of the Wessex warriors persisted down to about 1200 B.C. The main regions of the island were populated, and the adjustments to the highland and lowland zones were distinct and well @@ -5279,7 +5279,7 @@ trading role, separated from the European continent but conveniently adjacent to it. The tin of Cornwall--so important in the production of good bronze--as well as the copper of the west and of Ireland, taken with the gold of Ireland and the general excellence of Irish -metal work, assured Britain a traders place in the then known world. +metal work, assured Britain a trader�s place in the then known world. Contacts with the eastern Mediterranean may have been by sea, with Cornish tin as the attraction, or may have been made by the Food-vessel middlemen on their trips to the Baltic coast. There they would have @@ -5292,9 +5292,9 @@ relative isolation gave some peace and also gave time for a leveling and further fusion of culture. The separate cultural traditions began to have more in common. The growing of barley, the herding of sheep and cattle, and the production of woolen garments were already features -common to all Britains inhabitants save a few in the remote highlands, +common to all Britain�s inhabitants save a few in the remote highlands, the far north, and the distant islands not yet fully touched by -food-production. The personality of Britain was being formed. +food-production. The �personality of Britain� was being formed. CREMATION BURIALS BEGIN @@ -5325,9 +5325,9 @@ which we shall mention below. The British cremation-burial-in-urns folk survived a long time in the highland zone. In the general British scheme, they make up what is -called the Middle Bronze Age, but in the highland zone they last +called the �Middle Bronze Age,� but in the highland zone they last until after 900 B.C. and are considered to be a specialized highland -Late Bronze Age. In the highland zone, these later cremation-burial +�Late Bronze Age.� In the highland zone, these later cremation-burial folk seem to have continued the older Food-vessel tradition of being middlemen in the metal market. @@ -5379,12 +5379,12 @@ to get a picture of estate or tribal boundaries which included village communities; we find a variety of tools in bronze, and even whetstones which show that iron has been honed on them (although the scarce iron has not been found). Let me give you the picture in Professor S. -Piggotts words: The ... Late Bronze Age of southern England was but +Piggott�s words: �The ... Late Bronze Age of southern England was but the forerunner of the earliest Iron Age in the same region, not only in the techniques of agriculture, but almost certainly in terms of ethnic kinship ... we can with some assurance talk of the Celts ... the great early Celtic expansion of the Continent is recognized to be that of the -Urnfield people. +Urnfield people.� Thus, certainly by 500 B.C., there were people in Britain, some of whose descendants we may recognize today in name or language in remote @@ -5399,11 +5399,11 @@ efficient set of tools than does bronze. Iron tools seem first to have been made in quantity in Hittite Anatolia about 1500 B.C. In continental Europe, the earliest, so-called Hallstatt, iron-using cultures appeared in Germany soon after 750 B.C. Somewhat later, -Greek and especially Etruscan exports of _objets dart_--which moved +Greek and especially Etruscan exports of _objets d�art_--which moved with a flourishing trans-Alpine wine trade--influenced the Hallstatt iron-working tradition. Still later new classical motifs, together with older Hallstatt, oriental, and northern nomad motifs, gave rise to a -new style in metal decoration which characterizes the so-called La Tne +new style in metal decoration which characterizes the so-called La T�ne phase. A few iron users reached Britain a little before 400 B.C. Not long @@ -5422,7 +5422,7 @@ HILL-FORTS AND FARMS The earliest iron-users seem to have entrenched themselves temporarily within hill-top forts, mainly in the south. Gradually, they moved inland, establishing _individual_ farm sites with extensive systems -of rectangular fields. We recognize these fields by the lynchets or +of rectangular fields. We recognize these fields by the �lynchets� or lines of soil-creep which plowing left on the slopes of hills. New crops appeared; there were now bread wheat, oats, and rye, as well as barley. @@ -5434,7 +5434,7 @@ various outbuildings and pits for the storage of grain. Weaving was done on the farm, but not blacksmithing, which must have been a specialized trade. Save for the lack of firearms, the place might almost be taken for a farmstead on the American frontier in the early -1800s. +1800�s. Toward 250 B.C. there seems to have been a hasty attempt to repair the hill-forts and to build new ones, evidently in response to signs of @@ -5446,9 +5446,9 @@ THE SECOND PHASE Perhaps the hill-forts were not entirely effective or perhaps a compromise was reached. In any case, the newcomers from the Marne district did establish themselves, first in the southeast and then to -the north and west. They brought iron with decoration of the La Tne +the north and west. They brought iron with decoration of the La T�ne type and also the two-wheeled chariot. Like the Wessex warriors of -over a thousand years earlier, they made heroes graves, with their +over a thousand years earlier, they made �heroes�� graves, with their warriors buried in the war-chariots and dressed in full trappings. [Illustration: CELTIC BUCKLE] @@ -5457,7 +5457,7 @@ The metal work of these Marnian newcomers is excellent. The peculiar Celtic art style, based originally on the classic tendril motif, is colorful and virile, and fits with Greek and Roman descriptions of Celtic love of color in dress. There is a strong trace of these -newcomers northward in Yorkshire, linked by Ptolemys description to +newcomers northward in Yorkshire, linked by Ptolemy�s description to the Parisii, doubtless part of the Celtic tribe which originally gave its name to Paris on the Seine. Near Glastonbury, in Somerset, two villages in swamps have been excavated. They seem to date toward the @@ -5469,7 +5469,7 @@ villagers. In Scotland, which yields its first iron tools at a date of about 100 B.C., and in northern Ireland even slightly earlier, the effects of the -two phases of newcomers tend especially to blend. Hill-forts, brochs +two phases of newcomers tend especially to blend. Hill-forts, �brochs� (stone-built round towers) and a variety of other strange structures seem to appear as the new ideas develop in the comparative isolation of northern Britain. @@ -5493,27 +5493,27 @@ at last, we can even begin to speak of dynasties and individuals. Some time before 55 B.C., the Catuvellauni, originally from the Marne district in France, had possessed themselves of a large part of southeastern England. They evidently sailed up the Thames and built a -town of over a hundred acres in area. Here ruled Cassivellaunus, the -first man in England whose name we know, and whose town Caesar sacked. +town of over a hundred acres in area. Here ruled Cassivellaunus, �the +first man in England whose name we know,� and whose town Caesar sacked. The town sprang up elsewhere again, however. THE END OF PREHISTORY Prehistory, strictly speaking, is now over in southern Britain. -Claudius effective invasion took place in 43 A.D.; by 83 A.D., a raid +Claudius� effective invasion took place in 43 A.D.; by 83 A.D., a raid had been made as far north as Aberdeen in Scotland. But by 127 A.D., Hadrian had completed his wall from the Solway to the Tyne, and the Romans settled behind it. In Scotland, Romanization can have affected -the countryside very little. Professor Piggott adds that ... it is +the countryside very little. Professor Piggott adds that �... it is when the pressure of Romanization is relaxed by the break-up of the Dark Ages that we see again the Celtic metal-smiths handling their material with the same consummate skill as they had before the Roman Conquest, and with traditional styles that had not even then forgotten -their Marnian and Belgic heritage. +their Marnian and Belgic heritage.� In fact, many centuries go by, in Britain as well as in the rest of -Europe, before the archeologists task is complete and the historian on +Europe, before the archeologist�s task is complete and the historian on his own is able to describe the ways of men in the past. @@ -5524,7 +5524,7 @@ you will have noticed how often I had to refer to the European continent itself. Britain, beyond the English Channel for all of her later prehistory, had a much simpler course of events than did most of the rest of Europe in later prehistoric times. This holds, in spite -of all the invasions and reverberations from the continent. Most +of all the �invasions� and �reverberations� from the continent. Most of Europe was the scene of an even more complicated ebb and flow of cultural change, save in some of its more remote mountain valleys and peninsulas. @@ -5536,7 +5536,7 @@ accounts and some good general accounts of part of the range from about 3000 B.C. to A.D. 1. I suspect that the difficulty of making a good book that covers all of its later prehistory is another aspect of what makes Europe so very complicated a continent today. The prehistoric -foundations for Europes very complicated set of civilizations, +foundations for Europe�s very complicated set of civilizations, cultures, and sub-cultures--which begin to appear as history proceeds--were in themselves very complicated. @@ -5552,8 +5552,8 @@ of their journeys. But by the same token, they had had time en route to take on their characteristic European aspects. Some time ago, Sir Cyril Fox wrote a famous book called _The -Personality of Britain_, sub-titled Its Influence on Inhabitant and -Invader in Prehistoric and Early Historic Times. We have not gone +Personality of Britain_, sub-titled �Its Influence on Inhabitant and +Invader in Prehistoric and Early Historic Times.� We have not gone into the post-Roman early historic period here; there are still the Anglo-Saxons and Normans to account for as well as the effects of the Romans. But what I have tried to do was to begin the story of @@ -5570,7 +5570,7 @@ Summary In the pages you have read so far, you have been brought through the -earliest 99 per cent of the story of mans life on this planet. I have +earliest 99 per cent of the story of man�s life on this planet. I have left only 1 per cent of the story for the historians to tell. @@ -5601,7 +5601,7 @@ But I think there may have been a few. Certainly the pace of the first act accelerated with the swing from simple gathering to more intensified collecting. The great cave art of France and Spain was probably an expression of a climax. Even the ideas of burying the dead -and of the Venus figurines must also point to levels of human thought +and of the �Venus� figurines must also point to levels of human thought and activity that were over and above pure food-getting. @@ -5629,7 +5629,7 @@ five thousand years after the second act began. But it could never have happened in the first act at all. There is another curious thing about the first act. Many of the players -didnt know it was over and they kept on with their roles long after +didn�t know it was over and they kept on with their roles long after the second act had begun. On the edges of the stage there are today some players who are still going on with the first act. The Eskimos, and the native Australians, and certain tribes in the Amazon jungle are @@ -5680,20 +5680,20 @@ act may have lessons for us and give depth to our thinking. I know there are at least _some_ lessons, even in the present incomplete state of our knowledge. The players who began the second act--that of food-production--separately, in different parts of the world, were not -all of one pure race nor did they have pure cultural traditions. +all of one �pure race� nor did they have �pure� cultural traditions. Some apparently quite mixed Mediterraneans got off to the first start on the second act and brought it to its first two climaxes as well. Peoples of quite different physical type achieved the first climaxes in China and in the New World. In our British example of how the late prehistory of Europe worked, we -listed a continuous series of invasions and reverberations. After +listed a continuous series of �invasions� and �reverberations.� After each of these came fusion. Even though the Channel protected Britain from some of the extreme complications of the mixture and fusion of continental Europe, you can see how silly it would be to refer to a -pure British race or a pure British culture. We speak of the United -States as a melting pot. But this is nothing new. Actually, Britain -and all the rest of the world have been melting pots at one time or +�pure� British race or a �pure� British culture. We speak of the United +States as a �melting pot.� But this is nothing new. Actually, Britain +and all the rest of the world have been �melting pots� at one time or another. By the time the written records of Mesopotamia and Egypt begin to turn @@ -5703,12 +5703,12 @@ itself, we are thrown back on prehistoric archeology. And this is as true for China, India, Middle America, and the Andes, as it is for the Near East. -There are lessons to be learned from all of mans past, not simply +There are lessons to be learned from all of man�s past, not simply lessons of how to fight battles or win peace conferences, but of how human society evolves from one stage to another. Many of these lessons can only be looked for in the prehistoric past. So far, we have only made a beginning. There is much still to do, and many gaps in the story -are yet to be filled. The prehistorians job is to find the evidence, +are yet to be filled. The prehistorian�s job is to find the evidence, to fill the gaps, and to discover the lessons men have learned in the past. As I see it, this is not only an exciting but a very practical goal for which to strive. @@ -5745,7 +5745,7 @@ paperbound books.) GEOCHRONOLOGY AND THE ICE AGE -(Two general books. Some Pleistocene geologists disagree with Zeuners +(Two general books. Some Pleistocene geologists disagree with Zeuner�s interpretation of the dating evidence, but their points of view appear in professional journals, in articles too cumbersome to list here.) @@ -5815,7 +5815,7 @@ GENERAL PREHISTORY Press. Movius, Hallam L., Jr. - Old World Prehistory: Paleolithic in _Anthropology Today_. + �Old World Prehistory: Paleolithic� in _Anthropology Today_. Kroeber, A. L., ed. 1953. University of Chicago Press. Oakley, Kenneth P. @@ -5826,7 +5826,7 @@ GENERAL PREHISTORY _British Prehistory._ 1949. Oxford University Press. Pittioni, Richard - _Die Urgeschichtlichen Grundlagen der Europischen Kultur._ + _Die Urgeschichtlichen Grundlagen der Europ�ischen Kultur._ 1949. Deuticke. (A single book which does attempt to cover the whole range of European prehistory to ca. 1 A.D.) @@ -5834,7 +5834,7 @@ GENERAL PREHISTORY THE NEAR EAST Adams, Robert M. - Developmental Stages in Ancient Mesopotamia, _in_ Steward, + �Developmental Stages in Ancient Mesopotamia,� _in_ Steward, Julian, _et al_, _Irrigation Civilizations: A Comparative Study_. 1955. Pan American Union. @@ -6000,7 +6000,7 @@ Index Bolas, 54 - Bordes, Franois, 62 + Bordes, Fran�ois, 62 Borer, 77 @@ -6028,7 +6028,7 @@ Index killed by stampede, 86 Burials, 66, 86; - in henges, 164; + in �henges,� 164; in urns, 168 Burins, 75 @@ -6085,7 +6085,7 @@ Index Combe Capelle, 30 - Combe Capelle-Brnn group, 34 + Combe Capelle-Br�nn group, 34 Commont, Victor, 51 @@ -6097,7 +6097,7 @@ Index Corrals for cattle, 140 - Cradle of mankind, 136 + �Cradle of mankind,� 136 Cremation, 167 @@ -6123,7 +6123,7 @@ Index Domestication, of animals, 100, 105, 107; of plants, 100 - Dragon teeth fossils in China, 28 + �Dragon teeth� fossils in China, 28 Drill, 77 @@ -6176,9 +6176,9 @@ Index Fayum, 135; radiocarbon date, 146 - Fertile Crescent, 107, 146 + �Fertile Crescent,� 107, 146 - Figurines, Venus, 84; + Figurines, �Venus,� 84; at Jarmo, 128; at Ubaid, 153 @@ -6197,7 +6197,7 @@ Index Flint industry, 127 - Fontchevade, 32, 56, 58 + Font�chevade, 32, 56, 58 Food-collecting, 104, 121; end of, 104 @@ -6223,7 +6223,7 @@ Index Food-vessel folk, 164 - Forest folk, 97, 98, 104, 110 + �Forest folk,� 97, 98, 104, 110 Fox, Sir Cyril, 174 @@ -6379,7 +6379,7 @@ Index Land bridges in Mediterranean, 19 - La Tne phase, 170 + La T�ne phase, 170 Laurel leaf point, 78, 89 @@ -6404,7 +6404,7 @@ Index Mammoth, 93; in cave art, 85 - Man-apes, 26 + �Man-apes,� 26 Mango, 107 @@ -6435,7 +6435,7 @@ Index Microliths, 87; at Jarmo, 130; - lunates, 87; + �lunates,� 87; trapezoids, 87; triangles, 87 @@ -6443,7 +6443,7 @@ Index Mine-shafts, 140 - Mlefaat, 126, 127 + M�lefaat, 126, 127 Mongoloids, 29, 90 @@ -6453,9 +6453,9 @@ Index Mount Carmel, 11, 33, 52, 59, 64, 69, 113, 114 - Mousterian man, 64 + �Mousterian man,� 64 - Mousterian tools, 61, 62; + �Mousterian� tools, 61, 62; of Acheulean tradition, 62 Movius, H. L., 47 @@ -6471,7 +6471,7 @@ Index Near East, beginnings of civilization in, 20, 144; cave sites, 58; climate in Ice Age, 99; - Fertile Crescent, 107, 146; + �Fertile Crescent,� 107, 146; food-production in, 99; Natufian assemblage in, 113-115; stone tools, 114 @@ -6539,7 +6539,7 @@ Index Pig, wild, 108 - Piltdown man, 29 + �Piltdown man,� 29 Pins, 80 @@ -6578,7 +6578,7 @@ Index Race, 35; biological, 36; - pure, 16 + �pure,� 16 Radioactivity, 9, 10 @@ -6795,7 +6795,7 @@ Index Writing, 158; cuneiform, 158 - Wrm I glaciation, 58 + W�rm I glaciation, 58 Zebu cattle, domestication of, 107 @@ -6810,7 +6810,7 @@ Index -Transcribers note: +Transcriber�s note: Punctuation, hyphenation, and spelling were made consistent when a predominant preference was found in this book; otherwise they were not diff --git a/ciphers/transposition_cipher_encrypt_decrypt_file.py b/ciphers/transposition_cipher_encrypt_decrypt_file.py index 6296b1e6d709..b9630243d7f3 100644 --- a/ciphers/transposition_cipher_encrypt_decrypt_file.py +++ b/ciphers/transposition_cipher_encrypt_decrypt_file.py @@ -6,8 +6,8 @@ def main() -> None: - input_file = "Prehistoric Men.txt" - output_file = "Output.txt" + input_file = "./prehistoric_men.txt" + output_file = "./Output.txt" key = int(input("Enter key: ")) mode = input("Encrypt/Decrypt [e/d]: ") From 24dbdd0b88bdfd4ddb940cf0b681075c66842cc3 Mon Sep 17 00:00:00 2001 From: Raghav <83136390+Raghav-Bell@users.noreply.github.com> Date: Wed, 4 Oct 2023 11:38:13 +0530 Subject: [PATCH 1013/1543] Update coulombs_law.py docs (#9667) * Update coulombs_law.py distance is positive non zero real number (float type) hence corrected docs which says only integer. * Update physics/coulombs_law.py --------- Co-authored-by: Tianyi Zheng --- physics/coulombs_law.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/physics/coulombs_law.py b/physics/coulombs_law.py index 252e8ec0f74e..fe2d358f653e 100644 --- a/physics/coulombs_law.py +++ b/physics/coulombs_law.py @@ -32,7 +32,7 @@ def coulombs_law(q1: float, q2: float, radius: float) -> float: 17975103584.6 """ if radius <= 0: - raise ValueError("The radius is always a positive non zero integer") + raise ValueError("The radius is always a positive number") return round(((8.9875517923 * 10**9) * q1 * q2) / (radius**2), 2) From 3fd3497f15982a7286326b520b5e7b52767da1f3 Mon Sep 17 00:00:00 2001 From: Siddhant Totade Date: Wed, 4 Oct 2023 14:55:26 +0530 Subject: [PATCH 1014/1543] Add Comments (#9668) * docs : add comment in circular_linked_list.py and swap_nodes.py * docs : improve comments * docs : improved docs and tested on pre-commit * docs : add comment in circular_linked_list.py and swap_nodes.py * docs : improve comments * docs : improved docs and tested on pre-commit * docs : modified comments * Update circular_linked_list.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * docs : improved * Update data_structures/linked_list/circular_linked_list.py Co-authored-by: Christian Clauss * Update data_structures/linked_list/circular_linked_list.py Co-authored-by: Christian Clauss * Update data_structures/linked_list/swap_nodes.py Co-authored-by: Christian Clauss * Update data_structures/linked_list/swap_nodes.py Co-authored-by: Christian Clauss * Update data_structures/linked_list/swap_nodes.py Co-authored-by: Christian Clauss * Update data_structures/linked_list/swap_nodes.py Co-authored-by: Christian Clauss * Update requirements.txt Co-authored-by: Christian Clauss * Update data_structures/linked_list/circular_linked_list.py Co-authored-by: Christian Clauss * Apply suggestions from code review * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update circular_linked_list.py --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- .../linked_list/circular_linked_list.py | 87 ++++++++++++++++--- data_structures/linked_list/swap_nodes.py | 47 ++++++++-- 2 files changed, 113 insertions(+), 21 deletions(-) diff --git a/data_structures/linked_list/circular_linked_list.py b/data_structures/linked_list/circular_linked_list.py index d9544f4263a6..72212f46be15 100644 --- a/data_structures/linked_list/circular_linked_list.py +++ b/data_structures/linked_list/circular_linked_list.py @@ -6,16 +6,29 @@ class Node: def __init__(self, data: Any): + """ + Initialize a new Node with the given data. + Args: + data: The data to be stored in the node. + """ self.data: Any = data - self.next: Node | None = None + self.next: Node | None = None # Reference to the next node class CircularLinkedList: - def __init__(self): - self.head = None - self.tail = None + def __init__(self) -> None: + """ + Initialize an empty Circular Linked List. + """ + self.head = None # Reference to the head (first node) + self.tail = None # Reference to the tail (last node) def __iter__(self) -> Iterator[Any]: + """ + Iterate through all nodes in the Circular Linked List yielding their data. + Yields: + The data of each node in the linked list. + """ node = self.head while self.head: yield node.data @@ -24,25 +37,48 @@ def __iter__(self) -> Iterator[Any]: break def __len__(self) -> int: + """ + Get the length (number of nodes) in the Circular Linked List. + """ return sum(1 for _ in self) - def __repr__(self): + def __repr__(self) -> str: + """ + Generate a string representation of the Circular Linked List. + Returns: + A string of the format "1->2->....->N". + """ return "->".join(str(item) for item in iter(self)) def insert_tail(self, data: Any) -> None: + """ + Insert a node with the given data at the end of the Circular Linked List. + """ self.insert_nth(len(self), data) def insert_head(self, data: Any) -> None: + """ + Insert a node with the given data at the beginning of the Circular Linked List. + """ self.insert_nth(0, data) def insert_nth(self, index: int, data: Any) -> None: + """ + Insert the data of the node at the nth pos in the Circular Linked List. + Args: + index: The index at which the data should be inserted. + data: The data to be inserted. + + Raises: + IndexError: If the index is out of range. + """ if index < 0 or index > len(self): raise IndexError("list index out of range.") new_node = Node(data) if self.head is None: - new_node.next = new_node # first node points itself + new_node.next = new_node # First node points to itself self.tail = self.head = new_node - elif index == 0: # insert at head + elif index == 0: # Insert at the head new_node.next = self.head self.head = self.tail.next = new_node else: @@ -51,22 +87,43 @@ def insert_nth(self, index: int, data: Any) -> None: temp = temp.next new_node.next = temp.next temp.next = new_node - if index == len(self) - 1: # insert at tail + if index == len(self) - 1: # Insert at the tail self.tail = new_node - def delete_front(self): + def delete_front(self) -> Any: + """ + Delete and return the data of the node at the front of the Circular Linked List. + Raises: + IndexError: If the list is empty. + """ return self.delete_nth(0) def delete_tail(self) -> Any: + """ + Delete and return the data of the node at the end of the Circular Linked List. + Returns: + Any: The data of the deleted node. + Raises: + IndexError: If the index is out of range. + """ return self.delete_nth(len(self) - 1) def delete_nth(self, index: int = 0) -> Any: + """ + Delete and return the data of the node at the nth pos in Circular Linked List. + Args: + index (int): The index of the node to be deleted. Defaults to 0. + Returns: + Any: The data of the deleted node. + Raises: + IndexError: If the index is out of range. + """ if not 0 <= index < len(self): raise IndexError("list index out of range.") delete_node = self.head - if self.head == self.tail: # just one node + if self.head == self.tail: # Just one node self.head = self.tail = None - elif index == 0: # delete head node + elif index == 0: # Delete head node self.tail.next = self.tail.next.next self.head = self.head.next else: @@ -75,16 +132,22 @@ def delete_nth(self, index: int = 0) -> Any: temp = temp.next delete_node = temp.next temp.next = temp.next.next - if index == len(self) - 1: # delete at tail + if index == len(self) - 1: # Delete at tail self.tail = temp return delete_node.data def is_empty(self) -> bool: + """ + Check if the Circular Linked List is empty. + Returns: + bool: True if the list is empty, False otherwise. + """ return len(self) == 0 def test_circular_linked_list() -> None: """ + Test cases for the CircularLinkedList class. >>> test_circular_linked_list() """ circular_linked_list = CircularLinkedList() diff --git a/data_structures/linked_list/swap_nodes.py b/data_structures/linked_list/swap_nodes.py index 3f825756b3d2..da6aa07a79fd 100644 --- a/data_structures/linked_list/swap_nodes.py +++ b/data_structures/linked_list/swap_nodes.py @@ -2,30 +2,56 @@ class Node: - def __init__(self, data: Any): + def __init__(self, data: Any) -> None: + """ + Initialize a new Node with the given data. + + Args: + data: The data to be stored in the node. + + """ self.data = data - self.next = None + self.next = None # Reference to the next node class LinkedList: - def __init__(self): - self.head = None + def __init__(self) -> None: + """ + Initialize an empty Linked List. + """ + self.head = None # Reference to the head (first node) def print_list(self): + """ + Print the elements of the Linked List in order. + """ temp = self.head while temp is not None: print(temp.data, end=" ") temp = temp.next print() - # adding nodes - def push(self, new_data: Any): + def push(self, new_data: Any) -> None: + """ + Add a new node with the given data to the beginning of the Linked List. + Args: + new_data (Any): The data to be added to the new node. + """ new_node = Node(new_data) new_node.next = self.head self.head = new_node - # swapping nodes - def swap_nodes(self, node_data_1, node_data_2): + def swap_nodes(self, node_data_1, node_data_2) -> None: + """ + Swap the positions of two nodes in the Linked List based on their data values. + Args: + node_data_1: Data value of the first node to be swapped. + node_data_2: Data value of the second node to be swapped. + + + Note: + If either of the specified data values isn't found then, no swapping occurs. + """ if node_data_1 == node_data_2: return else: @@ -40,6 +66,7 @@ def swap_nodes(self, node_data_1, node_data_2): if node_1 is None or node_2 is None: return + # Swap the data values of the two nodes node_1.data, node_2.data = node_2.data, node_1.data @@ -48,8 +75,10 @@ def swap_nodes(self, node_data_1, node_data_2): for i in range(5, 0, -1): ll.push(i) + print("Original Linked List:") ll.print_list() ll.swap_nodes(1, 4) - print("After swapping") + print("After swapping the nodes whose data is 1 and 4:") + ll.print_list() From dfdd78135df938d948ba3044aca628aca08886e7 Mon Sep 17 00:00:00 2001 From: Tianyi Zheng Date: Wed, 4 Oct 2023 12:05:00 -0400 Subject: [PATCH 1015/1543] Fix mypy errors in circular_linked_list.py and swap_nodes.py (#9707) * updating DIRECTORY.md * Fix mypy errors in circular_linked_list.py * Fix mypy errors in swap_nodes.py --------- Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- DIRECTORY.md | 5 ++--- .../linked_list/circular_linked_list.py | 22 +++++++++++++------ data_structures/linked_list/swap_nodes.py | 4 ++-- 3 files changed, 19 insertions(+), 12 deletions(-) diff --git a/DIRECTORY.md b/DIRECTORY.md index 9a913aa786e1..4f4cc423d678 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -541,8 +541,8 @@ * [Basic Maths](maths/basic_maths.py) * [Binary Exp Mod](maths/binary_exp_mod.py) * [Binary Exponentiation](maths/binary_exponentiation.py) - * [Binary Exponentiation 2](maths/binary_exponentiation_2.py) * [Binary Exponentiation 3](maths/binary_exponentiation_3.py) + * [Binary Multiplication](maths/binary_multiplication.py) * [Binomial Coefficient](maths/binomial_coefficient.py) * [Binomial Distribution](maths/binomial_distribution.py) * [Bisection](maths/bisection.py) @@ -557,8 +557,7 @@ * [Decimal Isolate](maths/decimal_isolate.py) * [Decimal To Fraction](maths/decimal_to_fraction.py) * [Dodecahedron](maths/dodecahedron.py) - * [Double Factorial Iterative](maths/double_factorial_iterative.py) - * [Double Factorial Recursive](maths/double_factorial_recursive.py) + * [Double Factorial](maths/double_factorial.py) * [Dual Number Automatic Differentiation](maths/dual_number_automatic_differentiation.py) * [Entropy](maths/entropy.py) * [Euclidean Distance](maths/euclidean_distance.py) diff --git a/data_structures/linked_list/circular_linked_list.py b/data_structures/linked_list/circular_linked_list.py index 72212f46be15..ef6658733a95 100644 --- a/data_structures/linked_list/circular_linked_list.py +++ b/data_structures/linked_list/circular_linked_list.py @@ -20,8 +20,8 @@ def __init__(self) -> None: """ Initialize an empty Circular Linked List. """ - self.head = None # Reference to the head (first node) - self.tail = None # Reference to the tail (last node) + self.head: Node | None = None # Reference to the head (first node) + self.tail: Node | None = None # Reference to the tail (last node) def __iter__(self) -> Iterator[Any]: """ @@ -30,7 +30,7 @@ def __iter__(self) -> Iterator[Any]: The data of each node in the linked list. """ node = self.head - while self.head: + while node: yield node.data node = node.next if node == self.head: @@ -74,17 +74,20 @@ def insert_nth(self, index: int, data: Any) -> None: """ if index < 0 or index > len(self): raise IndexError("list index out of range.") - new_node = Node(data) + new_node: Node = Node(data) if self.head is None: new_node.next = new_node # First node points to itself self.tail = self.head = new_node elif index == 0: # Insert at the head new_node.next = self.head + assert self.tail is not None # List is not empty, tail exists self.head = self.tail.next = new_node else: - temp = self.head + temp: Node | None = self.head for _ in range(index - 1): + assert temp is not None temp = temp.next + assert temp is not None new_node.next = temp.next temp.next = new_node if index == len(self) - 1: # Insert at the tail @@ -120,16 +123,21 @@ def delete_nth(self, index: int = 0) -> Any: """ if not 0 <= index < len(self): raise IndexError("list index out of range.") - delete_node = self.head + + assert self.head is not None and self.tail is not None + delete_node: Node = self.head if self.head == self.tail: # Just one node self.head = self.tail = None elif index == 0: # Delete head node + assert self.tail.next is not None self.tail.next = self.tail.next.next self.head = self.head.next else: - temp = self.head + temp: Node | None = self.head for _ in range(index - 1): + assert temp is not None temp = temp.next + assert temp is not None and temp.next is not None delete_node = temp.next temp.next = temp.next.next if index == len(self) - 1: # Delete at tail diff --git a/data_structures/linked_list/swap_nodes.py b/data_structures/linked_list/swap_nodes.py index da6aa07a79fd..31dcb02bfa9a 100644 --- a/data_structures/linked_list/swap_nodes.py +++ b/data_structures/linked_list/swap_nodes.py @@ -11,7 +11,7 @@ def __init__(self, data: Any) -> None: """ self.data = data - self.next = None # Reference to the next node + self.next: Node | None = None # Reference to the next node class LinkedList: @@ -19,7 +19,7 @@ def __init__(self) -> None: """ Initialize an empty Linked List. """ - self.head = None # Reference to the head (first node) + self.head: Node | None = None # Reference to the head (first node) def print_list(self): """ From d74349793b613b0948608409a572426a9800c3a1 Mon Sep 17 00:00:00 2001 From: halfhearted <99018821+Arunsiva003@users.noreply.github.com> Date: Wed, 4 Oct 2023 22:09:28 +0530 Subject: [PATCH 1016/1543] Arunsiva003 patch 1 flatten tree (#9695) * infix to prefix missing feature added * infix to prefix missing feature added * infix to prefix missing feature added * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * infix to prefix missing feature added (comments) * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * infix to prefix missing feature added (comments) * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * newly updated infix_to_prefix * newly updated infix_to_prefix_2 * newly updated infix_to_prefix_3 * from the beginning * Created flatten_binarytree_to_linkedlist.py * Update flatten_binarytree_to_linkedlist.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update flatten_binarytree_to_linkedlist.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update flatten_binarytree_to_linkedlist.py * Update flatten_binarytree_to_linkedlist.py * Update flatten_binarytree_to_linkedlist.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update flatten_binarytree_to_linkedlist.py (space added) * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update flatten_binarytree_to_linkedlist.py space added * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update flatten_binarytree_to_linkedlist.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * flatten binary tree to linked list - 1 * flatten binary tree to linked list final * flatten binary tree to linked list final * review updated * Update flatten_binarytree_to_linkedlist.py * Update .pre-commit-config.yaml * Update flatten_binarytree_to_linkedlist.py * Update flatten_binarytree_to_linkedlist.py --------- Co-authored-by: ArunSiva Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- .../flatten_binarytree_to_linkedlist.py | 138 ++++++++++++++++++ 1 file changed, 138 insertions(+) create mode 100644 data_structures/binary_tree/flatten_binarytree_to_linkedlist.py diff --git a/data_structures/binary_tree/flatten_binarytree_to_linkedlist.py b/data_structures/binary_tree/flatten_binarytree_to_linkedlist.py new file mode 100644 index 000000000000..8820a509ecba --- /dev/null +++ b/data_structures/binary_tree/flatten_binarytree_to_linkedlist.py @@ -0,0 +1,138 @@ +""" +Binary Tree Flattening Algorithm + +This code defines an algorithm to flatten a binary tree into a linked list +represented using the right pointers of the tree nodes. It uses in-place +flattening and demonstrates the flattening process along with a display +function to visualize the flattened linked list. +https://www.geeksforgeeks.org/flatten-a-binary-tree-into-linked-list + +Author: Arunkumar A +Date: 04/09/2023 +""" +from __future__ import annotations + + +class TreeNode: + """ + A TreeNode has data variable and pointers to TreeNode objects + for its left and right children. + """ + + def __init__(self, data: int) -> None: + self.data = data + self.left: TreeNode | None = None + self.right: TreeNode | None = None + + +def build_tree() -> TreeNode: + """ + Build and return a sample binary tree. + + Returns: + TreeNode: The root of the binary tree. + + Examples: + >>> root = build_tree() + >>> root.data + 1 + >>> root.left.data + 2 + >>> root.right.data + 5 + >>> root.left.left.data + 3 + >>> root.left.right.data + 4 + >>> root.right.right.data + 6 + """ + root = TreeNode(1) + root.left = TreeNode(2) + root.right = TreeNode(5) + root.left.left = TreeNode(3) + root.left.right = TreeNode(4) + root.right.right = TreeNode(6) + return root + + +def flatten(root: TreeNode | None) -> None: + """ + Flatten a binary tree into a linked list in-place, where the linked list is + represented using the right pointers of the tree nodes. + + Args: + root (TreeNode): The root of the binary tree to be flattened. + + Examples: + >>> root = TreeNode(1) + >>> root.left = TreeNode(2) + >>> root.right = TreeNode(5) + >>> root.left.left = TreeNode(3) + >>> root.left.right = TreeNode(4) + >>> root.right.right = TreeNode(6) + >>> flatten(root) + >>> root.data + 1 + >>> root.right.right is None + False + >>> root.right.right = TreeNode(3) + >>> root.right.right.right is None + True + """ + if not root: + return + + # Flatten the left subtree + flatten(root.left) + + # Save the right subtree + right_subtree = root.right + + # Make the left subtree the new right subtree + root.right = root.left + root.left = None + + # Find the end of the new right subtree + current = root + while current.right: + current = current.right + + # Append the original right subtree to the end + current.right = right_subtree + + # Flatten the updated right subtree + flatten(right_subtree) + + +def display_linked_list(root: TreeNode | None) -> None: + """ + Display the flattened linked list. + + Args: + root (TreeNode | None): The root of the flattened linked list. + + Examples: + >>> root = TreeNode(1) + >>> root.right = TreeNode(2) + >>> root.right.right = TreeNode(3) + >>> display_linked_list(root) + 1 2 3 + >>> root = None + >>> display_linked_list(root) + + """ + current = root + while current: + if current.right is None: + print(current.data, end="") + break + print(current.data, end=" ") + current = current.right + + +if __name__ == "__main__": + print("Flattened Linked List:") + root = build_tree() + flatten(root) + display_linked_list(root) From 922d6a88b3be2ff0dd69dd47d90e40aa95afd105 Mon Sep 17 00:00:00 2001 From: Bama Charan Chhandogi Date: Wed, 4 Oct 2023 22:51:46 +0530 Subject: [PATCH 1017/1543] add median of matrix (#9363) * add median of matrix * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * update * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fix formating * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Apply suggestions from code review --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- matrix/median_matrix.py | 38 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 38 insertions(+) create mode 100644 matrix/median_matrix.py diff --git a/matrix/median_matrix.py b/matrix/median_matrix.py new file mode 100644 index 000000000000..116e609a587c --- /dev/null +++ b/matrix/median_matrix.py @@ -0,0 +1,38 @@ +""" +https://en.wikipedia.org/wiki/Median +""" + + +def median(matrix: list[list[int]]) -> int: + """ + Calculate the median of a sorted matrix. + + Args: + matrix: A 2D matrix of integers. + + Returns: + The median value of the matrix. + + Examples: + >>> matrix = [[1, 3, 5], [2, 6, 9], [3, 6, 9]] + >>> median(matrix) + 5 + + >>> matrix = [[1, 2, 3], [4, 5, 6]] + >>> median(matrix) + 3 + """ + # Flatten the matrix into a sorted 1D list + linear = sorted(num for row in matrix for num in row) + + # Calculate the middle index + mid = (len(linear) - 1) // 2 + + # Return the median + return linear[mid] + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From d5806258d4f9eb0e5652e1edfac0613aacb71fb6 Mon Sep 17 00:00:00 2001 From: Bama Charan Chhandogi Date: Wed, 4 Oct 2023 23:48:59 +0530 Subject: [PATCH 1018/1543] add median of two sorted array (#9386) * add median of two sorted array * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fix syntax * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fix syntax * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * improve code * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * add documentation * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * update * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- data_structures/arrays/median_two_array.py | 61 ++++++++++++++++++++++ 1 file changed, 61 insertions(+) create mode 100644 data_structures/arrays/median_two_array.py diff --git a/data_structures/arrays/median_two_array.py b/data_structures/arrays/median_two_array.py new file mode 100644 index 000000000000..972b0ee44201 --- /dev/null +++ b/data_structures/arrays/median_two_array.py @@ -0,0 +1,61 @@ +""" +https://www.enjoyalgorithms.com/blog/median-of-two-sorted-arrays +""" + + +def find_median_sorted_arrays(nums1: list[int], nums2: list[int]) -> float: + """ + Find the median of two arrays. + + Args: + nums1: The first array. + nums2: The second array. + + Returns: + The median of the two arrays. + + Examples: + >>> find_median_sorted_arrays([1, 3], [2]) + 2.0 + + >>> find_median_sorted_arrays([1, 2], [3, 4]) + 2.5 + + >>> find_median_sorted_arrays([0, 0], [0, 0]) + 0.0 + + >>> find_median_sorted_arrays([], []) + Traceback (most recent call last): + ... + ValueError: Both input arrays are empty. + + >>> find_median_sorted_arrays([], [1]) + 1.0 + + >>> find_median_sorted_arrays([-1000], [1000]) + 0.0 + + >>> find_median_sorted_arrays([-1.1, -2.2], [-3.3, -4.4]) + -2.75 + """ + if not nums1 and not nums2: + raise ValueError("Both input arrays are empty.") + + # Merge the arrays into a single sorted array. + merged = sorted(nums1 + nums2) + total = len(merged) + + if total % 2 == 1: # If the total number of elements is odd + return float(merged[total // 2]) # then return the middle element + + # If the total number of elements is even, calculate + # the average of the two middle elements as the median. + middle1 = merged[total // 2 - 1] + middle2 = merged[total // 2] + return (float(middle1) + float(middle2)) / 2.0 + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From c16d2f8865c8ce28ae6d4d815d3f6c3008e94f74 Mon Sep 17 00:00:00 2001 From: Muhammad Umer Farooq <115654418+Muhammadummerr@users.noreply.github.com> Date: Wed, 4 Oct 2023 23:43:17 +0500 Subject: [PATCH 1019/1543] UPDATED rat_in_maze.py (#9148) * UPDATED rat_in_maze.py * Update reddit.py in Webprogramming b/c it was causing error in pre-commit tests while raising PR. * UPDATED rat_in_maze.py * fixed return type to only maze,otherwise raise valueError. * fixed whitespaces error,improved matrix visual. * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * updated. * Try * updated * updated * Apply suggestions from code review --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- backtracking/rat_in_maze.py | 181 ++++++++++++++++++++++++++---------- 1 file changed, 130 insertions(+), 51 deletions(-) diff --git a/backtracking/rat_in_maze.py b/backtracking/rat_in_maze.py index 7bde886dd558..626c83cb4a15 100644 --- a/backtracking/rat_in_maze.py +++ b/backtracking/rat_in_maze.py @@ -1,91 +1,164 @@ from __future__ import annotations -def solve_maze(maze: list[list[int]]) -> bool: +def solve_maze( + maze: list[list[int]], + source_row: int, + source_column: int, + destination_row: int, + destination_column: int, +) -> list[list[int]]: """ This method solves the "rat in maze" problem. - In this problem we have some n by n matrix, a start point and an end point. - We want to go from the start to the end. In this matrix zeroes represent walls - and ones paths we can use. Parameters : - maze(2D matrix) : maze + - maze: A two dimensional matrix of zeros and ones. + - source_row: The row index of the starting point. + - source_column: The column index of the starting point. + - destination_row: The row index of the destination point. + - destination_column: The column index of the destination point. Returns: - Return: True if the maze has a solution or False if it does not. + - solution: A 2D matrix representing the solution path if it exists. + Raises: + - ValueError: If no solution exists or if the source or + destination coordinates are invalid. + Description: + This method navigates through a maze represented as an n by n matrix, + starting from a specified source cell and + aiming to reach a destination cell. + The maze consists of walls (1s) and open paths (0s). + By providing custom row and column values, the source and destination + cells can be adjusted. >>> maze = [[0, 1, 0, 1, 1], ... [0, 0, 0, 0, 0], ... [1, 0, 1, 0, 1], ... [0, 0, 1, 0, 0], ... [1, 0, 0, 1, 0]] - >>> solve_maze(maze) - [1, 0, 0, 0, 0] - [1, 1, 1, 1, 0] - [0, 0, 0, 1, 0] - [0, 0, 0, 1, 1] - [0, 0, 0, 0, 1] - True + >>> solve_maze(maze,0,0,len(maze)-1,len(maze)-1) # doctest: +NORMALIZE_WHITESPACE + [[0, 1, 1, 1, 1], + [0, 0, 0, 0, 1], + [1, 1, 1, 0, 1], + [1, 1, 1, 0, 0], + [1, 1, 1, 1, 0]] + + Note: + In the output maze, the zeros (0s) represent one of the possible + paths from the source to the destination. >>> maze = [[0, 1, 0, 1, 1], ... [0, 0, 0, 0, 0], ... [0, 0, 0, 0, 1], ... [0, 0, 0, 0, 0], ... [0, 0, 0, 0, 0]] - >>> solve_maze(maze) - [1, 0, 0, 0, 0] - [1, 0, 0, 0, 0] - [1, 0, 0, 0, 0] - [1, 0, 0, 0, 0] - [1, 1, 1, 1, 1] - True + >>> solve_maze(maze,0,0,len(maze)-1,len(maze)-1) # doctest: +NORMALIZE_WHITESPACE + [[0, 1, 1, 1, 1], + [0, 1, 1, 1, 1], + [0, 1, 1, 1, 1], + [0, 1, 1, 1, 1], + [0, 0, 0, 0, 0]] >>> maze = [[0, 0, 0], ... [0, 1, 0], ... [1, 0, 0]] - >>> solve_maze(maze) - [1, 1, 1] - [0, 0, 1] - [0, 0, 1] - True + >>> solve_maze(maze,0,0,len(maze)-1,len(maze)-1) # doctest: +NORMALIZE_WHITESPACE + [[0, 0, 0], + [1, 1, 0], + [1, 1, 0]] - >>> maze = [[0, 1, 0], + >>> maze = [[1, 0, 0], ... [0, 1, 0], ... [1, 0, 0]] - >>> solve_maze(maze) - No solution exists! - False + >>> solve_maze(maze,0,1,len(maze)-1,len(maze)-1) # doctest: +NORMALIZE_WHITESPACE + [[1, 0, 0], + [1, 1, 0], + [1, 1, 0]] + + >>> maze = [[1, 1, 0, 0, 1, 0, 0, 1], + ... [1, 0, 1, 0, 0, 1, 1, 1], + ... [0, 1, 0, 1, 0, 0, 1, 0], + ... [1, 1, 1, 0, 0, 1, 0, 1], + ... [0, 1, 0, 0, 1, 0, 1, 1], + ... [0, 0, 0, 1, 1, 1, 0, 1], + ... [0, 1, 0, 1, 0, 1, 1, 1], + ... [1, 1, 0, 0, 0, 0, 0, 1]] + >>> solve_maze(maze,0,2,len(maze)-1,2) # doctest: +NORMALIZE_WHITESPACE + [[1, 1, 0, 0, 1, 1, 1, 1], + [1, 1, 1, 0, 0, 1, 1, 1], + [1, 1, 1, 1, 0, 1, 1, 1], + [1, 1, 1, 0, 0, 1, 1, 1], + [1, 1, 0, 0, 1, 1, 1, 1], + [1, 1, 0, 1, 1, 1, 1, 1], + [1, 1, 0, 1, 1, 1, 1, 1], + [1, 1, 0, 1, 1, 1, 1, 1]] + >>> maze = [[1, 0, 0], + ... [0, 1, 1], + ... [1, 0, 1]] + >>> solve_maze(maze,0,1,len(maze)-1,len(maze)-1) + Traceback (most recent call last): + ... + ValueError: No solution exists! + + >>> maze = [[0, 0], + ... [1, 1]] + >>> solve_maze(maze,0,0,len(maze)-1,len(maze)-1) + Traceback (most recent call last): + ... + ValueError: No solution exists! >>> maze = [[0, 1], ... [1, 0]] - >>> solve_maze(maze) - No solution exists! - False + >>> solve_maze(maze,2,0,len(maze)-1,len(maze)-1) + Traceback (most recent call last): + ... + ValueError: Invalid source or destination coordinates + + >>> maze = [[1, 0, 0], + ... [0, 1, 0], + ... [1, 0, 0]] + >>> solve_maze(maze,0,1,len(maze),len(maze)-1) + Traceback (most recent call last): + ... + ValueError: Invalid source or destination coordinates """ size = len(maze) + # Check if source and destination coordinates are Invalid. + if not (0 <= source_row <= size - 1 and 0 <= source_column <= size - 1) or ( + not (0 <= destination_row <= size - 1 and 0 <= destination_column <= size - 1) + ): + raise ValueError("Invalid source or destination coordinates") # We need to create solution object to save path. - solutions = [[0 for _ in range(size)] for _ in range(size)] - solved = run_maze(maze, 0, 0, solutions) + solutions = [[1 for _ in range(size)] for _ in range(size)] + solved = run_maze( + maze, source_row, source_column, destination_row, destination_column, solutions + ) if solved: - print("\n".join(str(row) for row in solutions)) + return solutions else: - print("No solution exists!") - return solved + raise ValueError("No solution exists!") -def run_maze(maze: list[list[int]], i: int, j: int, solutions: list[list[int]]) -> bool: +def run_maze( + maze: list[list[int]], + i: int, + j: int, + destination_row: int, + destination_column: int, + solutions: list[list[int]], +) -> bool: """ This method is recursive starting from (i, j) and going in one of four directions: up, down, left, right. If a path is found to destination it returns True otherwise it returns False. - Parameters: - maze(2D matrix) : maze + Parameters + maze: A two dimensional matrix of zeros and ones. i, j : coordinates of matrix - solutions(2D matrix) : solutions + solutions: A two dimensional matrix of solutions. Returns: Boolean if path is found True, Otherwise False. """ size = len(maze) # Final check point. - if i == j == (size - 1): - solutions[i][j] = 1 + if i == destination_row and j == destination_column and maze[i][j] == 0: + solutions[i][j] = 0 return True lower_flag = (not i < 0) and (not j < 0) # Check lower bounds @@ -93,21 +166,27 @@ def run_maze(maze: list[list[int]], i: int, j: int, solutions: list[list[int]]) if lower_flag and upper_flag: # check for already visited and block points. - block_flag = (not solutions[i][j]) and (not maze[i][j]) + block_flag = (solutions[i][j]) and (not maze[i][j]) if block_flag: # check visited - solutions[i][j] = 1 + solutions[i][j] = 0 # check for directions if ( - run_maze(maze, i + 1, j, solutions) - or run_maze(maze, i, j + 1, solutions) - or run_maze(maze, i - 1, j, solutions) - or run_maze(maze, i, j - 1, solutions) + run_maze(maze, i + 1, j, destination_row, destination_column, solutions) + or run_maze( + maze, i, j + 1, destination_row, destination_column, solutions + ) + or run_maze( + maze, i - 1, j, destination_row, destination_column, solutions + ) + or run_maze( + maze, i, j - 1, destination_row, destination_column, solutions + ) ): return True - solutions[i][j] = 0 + solutions[i][j] = 1 return False return False @@ -115,4 +194,4 @@ def run_maze(maze: list[list[int]], i: int, j: int, solutions: list[list[int]]) if __name__ == "__main__": import doctest - doctest.testmod() + doctest.testmod(optionflags=doctest.NORMALIZE_WHITESPACE) From 26d650ec2820e265e69c88608959a3e18f28c5d5 Mon Sep 17 00:00:00 2001 From: piyush-poddar <143445461+piyush-poddar@users.noreply.github.com> Date: Thu, 5 Oct 2023 01:58:19 +0530 Subject: [PATCH 1020/1543] Moved relu.py from maths/ to neural_network/activation_functions (#9753) * Moved file relu.py from maths/ to neural_network/activation_functions * Renamed relu.py to rectified_linear_unit.py * Renamed relu.py to rectified_linear_unit.py in DIRECTORY.md --- DIRECTORY.md | 2 +- .../activation_functions/rectified_linear_unit.py | 0 2 files changed, 1 insertion(+), 1 deletion(-) rename maths/relu.py => neural_network/activation_functions/rectified_linear_unit.py (100%) diff --git a/DIRECTORY.md b/DIRECTORY.md index 4f4cc423d678..696a059bb4c8 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -639,7 +639,6 @@ * [Quadratic Equations Complex Numbers](maths/quadratic_equations_complex_numbers.py) * [Radians](maths/radians.py) * [Radix2 Fft](maths/radix2_fft.py) - * [Relu](maths/relu.py) * [Remove Digit](maths/remove_digit.py) * [Runge Kutta](maths/runge_kutta.py) * [Segmented Sieve](maths/segmented_sieve.py) @@ -710,6 +709,7 @@ * [Exponential Linear Unit](neural_network/activation_functions/exponential_linear_unit.py) * [Leaky Rectified Linear Unit](neural_network/activation_functions/leaky_rectified_linear_unit.py) * [Scaled Exponential Linear Unit](neural_network/activation_functions/scaled_exponential_linear_unit.py) + * [Rectified Linear Unit](neural_network/activation_functions/rectified_linear_unit.py) * [Back Propagation Neural Network](neural_network/back_propagation_neural_network.py) * [Convolution Neural Network](neural_network/convolution_neural_network.py) * [Perceptron](neural_network/perceptron.py) diff --git a/maths/relu.py b/neural_network/activation_functions/rectified_linear_unit.py similarity index 100% rename from maths/relu.py rename to neural_network/activation_functions/rectified_linear_unit.py From 6a391d113d8f0efdd69e69c8da7b44766594449a Mon Sep 17 00:00:00 2001 From: Raghav <83136390+Raghav-Bell@users.noreply.github.com> Date: Thu, 5 Oct 2023 04:46:19 +0530 Subject: [PATCH 1021/1543] Added Photoelectric effect equation (#9666) * Added Photoelectric effect equation Photoelectric effect is one of the demonstration of quanta of energy. * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fixed doctest Co-authored-by: Rohan Anand <96521078+rohan472000@users.noreply.github.com> * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Rohan Anand <96521078+rohan472000@users.noreply.github.com> --- physics/photoelectric_effect.py | 67 +++++++++++++++++++++++++++++++++ 1 file changed, 67 insertions(+) create mode 100644 physics/photoelectric_effect.py diff --git a/physics/photoelectric_effect.py b/physics/photoelectric_effect.py new file mode 100644 index 000000000000..3a0138ffe045 --- /dev/null +++ b/physics/photoelectric_effect.py @@ -0,0 +1,67 @@ +""" +The photoelectric effect is the emission of electrons when electromagnetic radiation , +such as light, hits a material. Electrons emitted in this manner are called +photoelectrons. + +In 1905, Einstein proposed a theory of the photoelectric effect using a concept that +light consists of tiny packets of energy known as photons or light quanta. Each packet +carries energy hv that is proportional to the frequency v of the corresponding +electromagnetic wave. The proportionality constant h has become known as the +Planck constant. In the range of kinetic energies of the electrons that are removed from +their varying atomic bindings by the absorption of a photon of energy hv, the highest +kinetic energy K_max is : + +K_max = hv-W + +Here, W is the minimum energy required to remove an electron from the surface of the +material. It is called the work function of the surface + +Reference: https://en.wikipedia.org/wiki/Photoelectric_effect + +""" + +PLANCK_CONSTANT_JS = 6.6261 * pow(10, -34) # in SI (Js) +PLANCK_CONSTANT_EVS = 4.1357 * pow(10, -15) # in eVs + + +def maximum_kinetic_energy( + frequency: float, work_function: float, in_ev: bool = False +) -> float: + """ + Calculates the maximum kinetic energy of emitted electron from the surface. + if the maximum kinetic energy is zero then no electron will be emitted + or given electromagnetic wave frequency is small. + + frequency (float): Frequency of electromagnetic wave. + work_function (float): Work function of the surface. + in_ev (optional)(bool): Pass True if values are in eV. + + Usage example: + >>> maximum_kinetic_energy(1000000,2) + 0 + >>> maximum_kinetic_energy(1000000,2,True) + 0 + >>> maximum_kinetic_energy(10000000000000000,2,True) + 39.357000000000006 + >>> maximum_kinetic_energy(-9,20) + Traceback (most recent call last): + ... + ValueError: Frequency can't be negative. + + >>> maximum_kinetic_energy(1000,"a") + Traceback (most recent call last): + ... + TypeError: unsupported operand type(s) for -: 'float' and 'str' + + """ + if frequency < 0: + raise ValueError("Frequency can't be negative.") + if in_ev: + return max(PLANCK_CONSTANT_EVS * frequency - work_function, 0) + return max(PLANCK_CONSTANT_JS * frequency - work_function, 0) + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From 2fd43c0f7ff1d7f72fa65a528ddabccf90c89a0d Mon Sep 17 00:00:00 2001 From: Tauseef Hilal Tantary Date: Thu, 5 Oct 2023 05:03:12 +0530 Subject: [PATCH 1022/1543] [New Algorithm] - Bell Numbers (#9324) * Add Bell Numbers * Use descriptive variable names * Add type hints * Fix failing tests --- maths/bell_numbers.py | 78 +++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 78 insertions(+) create mode 100644 maths/bell_numbers.py diff --git a/maths/bell_numbers.py b/maths/bell_numbers.py new file mode 100644 index 000000000000..660ec6e6aa09 --- /dev/null +++ b/maths/bell_numbers.py @@ -0,0 +1,78 @@ +""" +Bell numbers represent the number of ways to partition a set into non-empty +subsets. This module provides functions to calculate Bell numbers for sets of +integers. In other words, the first (n + 1) Bell numbers. + +For more information about Bell numbers, refer to: +https://en.wikipedia.org/wiki/Bell_number +""" + + +def bell_numbers(max_set_length: int) -> list[int]: + """ + Calculate Bell numbers for the sets of lengths from 0 to max_set_length. + In other words, calculate first (max_set_length + 1) Bell numbers. + + Args: + max_set_length (int): The maximum length of the sets for which + Bell numbers are calculated. + + Returns: + list: A list of Bell numbers for sets of lengths from 0 to max_set_length. + + Examples: + >>> bell_numbers(0) + [1] + >>> bell_numbers(1) + [1, 1] + >>> bell_numbers(5) + [1, 1, 2, 5, 15, 52] + """ + if max_set_length < 0: + raise ValueError("max_set_length must be non-negative") + + bell = [0] * (max_set_length + 1) + bell[0] = 1 + + for i in range(1, max_set_length + 1): + for j in range(i): + bell[i] += _binomial_coefficient(i - 1, j) * bell[j] + + return bell + + +def _binomial_coefficient(total_elements: int, elements_to_choose: int) -> int: + """ + Calculate the binomial coefficient C(total_elements, elements_to_choose) + + Args: + total_elements (int): The total number of elements. + elements_to_choose (int): The number of elements to choose. + + Returns: + int: The binomial coefficient C(total_elements, elements_to_choose). + + Examples: + >>> _binomial_coefficient(5, 2) + 10 + >>> _binomial_coefficient(6, 3) + 20 + """ + if elements_to_choose in {0, total_elements}: + return 1 + + if elements_to_choose > total_elements - elements_to_choose: + elements_to_choose = total_elements - elements_to_choose + + coefficient = 1 + for i in range(elements_to_choose): + coefficient *= total_elements - i + coefficient //= i + 1 + + return coefficient + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From 1fda96b7044d9fa08c84f09f54a345ebf052b2eb Mon Sep 17 00:00:00 2001 From: Sanket Kittad <86976526+sanketkittad@users.noreply.github.com> Date: Thu, 5 Oct 2023 05:10:14 +0530 Subject: [PATCH 1023/1543] Palindromic (#9288) * added longest palindromic subsequence * removed * added longest palindromic subsequence * added longest palindromic subsequence link * added comments --- .../longest_palindromic_subsequence.py | 44 +++++++++++++++++++ 1 file changed, 44 insertions(+) create mode 100644 dynamic_programming/longest_palindromic_subsequence.py diff --git a/dynamic_programming/longest_palindromic_subsequence.py b/dynamic_programming/longest_palindromic_subsequence.py new file mode 100644 index 000000000000..a60d95e460e6 --- /dev/null +++ b/dynamic_programming/longest_palindromic_subsequence.py @@ -0,0 +1,44 @@ +""" +author: Sanket Kittad +Given a string s, find the longest palindromic subsequence's length in s. +Input: s = "bbbab" +Output: 4 +Explanation: One possible longest palindromic subsequence is "bbbb". +Leetcode link: https://leetcode.com/problems/longest-palindromic-subsequence/description/ +""" + + +def longest_palindromic_subsequence(input_string: str) -> int: + """ + This function returns the longest palindromic subsequence in a string + >>> longest_palindromic_subsequence("bbbab") + 4 + >>> longest_palindromic_subsequence("bbabcbcab") + 7 + """ + n = len(input_string) + rev = input_string[::-1] + m = len(rev) + dp = [[-1] * (m + 1) for i in range(n + 1)] + for i in range(n + 1): + dp[i][0] = 0 + for i in range(m + 1): + dp[0][i] = 0 + + # create and initialise dp array + for i in range(1, n + 1): + for j in range(1, m + 1): + # If characters at i and j are the same + # include them in the palindromic subsequence + if input_string[i - 1] == rev[j - 1]: + dp[i][j] = 1 + dp[i - 1][j - 1] + else: + dp[i][j] = max(dp[i - 1][j], dp[i][j - 1]) + + return dp[n][m] + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From 935d1d3225ede4c0650165d5dfd8f5eb35b54f5e Mon Sep 17 00:00:00 2001 From: Vipin Karthic <143083087+vipinkarthic@users.noreply.github.com> Date: Thu, 5 Oct 2023 11:27:55 +0530 Subject: [PATCH 1024/1543] Added Mirror Formulae Equation (#9717) * Python mirror_formulae.py is added to the repository * Changes done after reading readme.md * Changes for running doctest on all platforms * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Change 2 for Doctests * Changes for doctest 2 * updating DIRECTORY.md --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- DIRECTORY.md | 9 ++- physics/mirror_formulae.py | 127 +++++++++++++++++++++++++++++++++++++ 2 files changed, 135 insertions(+), 1 deletion(-) create mode 100644 physics/mirror_formulae.py diff --git a/DIRECTORY.md b/DIRECTORY.md index 696a059bb4c8..5f23cbd6c922 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -170,6 +170,7 @@ ## Data Structures * Arrays + * [Median Two Array](data_structures/arrays/median_two_array.py) * [Permutations](data_structures/arrays/permutations.py) * [Prefix Sum](data_structures/arrays/prefix_sum.py) * [Product Sum](data_structures/arrays/product_sum.py) @@ -185,6 +186,7 @@ * [Diff Views Of Binary Tree](data_structures/binary_tree/diff_views_of_binary_tree.py) * [Distribute Coins](data_structures/binary_tree/distribute_coins.py) * [Fenwick Tree](data_structures/binary_tree/fenwick_tree.py) + * [Flatten Binarytree To Linkedlist](data_structures/binary_tree/flatten_binarytree_to_linkedlist.py) * [Inorder Tree Traversal 2022](data_structures/binary_tree/inorder_tree_traversal_2022.py) * [Is Bst](data_structures/binary_tree/is_bst.py) * [Lazy Segment Tree](data_structures/binary_tree/lazy_segment_tree.py) @@ -324,6 +326,7 @@ * [Longest Common Substring](dynamic_programming/longest_common_substring.py) * [Longest Increasing Subsequence](dynamic_programming/longest_increasing_subsequence.py) * [Longest Increasing Subsequence O(Nlogn)](dynamic_programming/longest_increasing_subsequence_o(nlogn).py) + * [Longest Palindromic Subsequence](dynamic_programming/longest_palindromic_subsequence.py) * [Longest Sub Array](dynamic_programming/longest_sub_array.py) * [Matrix Chain Order](dynamic_programming/matrix_chain_order.py) * [Max Non Adjacent Sum](dynamic_programming/max_non_adjacent_sum.py) @@ -539,6 +542,7 @@ * [Average Mode](maths/average_mode.py) * [Bailey Borwein Plouffe](maths/bailey_borwein_plouffe.py) * [Basic Maths](maths/basic_maths.py) + * [Bell Numbers](maths/bell_numbers.py) * [Binary Exp Mod](maths/binary_exp_mod.py) * [Binary Exponentiation](maths/binary_exponentiation.py) * [Binary Exponentiation 3](maths/binary_exponentiation_3.py) @@ -690,6 +694,7 @@ * [Matrix Class](matrix/matrix_class.py) * [Matrix Operation](matrix/matrix_operation.py) * [Max Area Of Island](matrix/max_area_of_island.py) + * [Median Matrix](matrix/median_matrix.py) * [Nth Fibonacci Using Matrix Exponentiation](matrix/nth_fibonacci_using_matrix_exponentiation.py) * [Pascal Triangle](matrix/pascal_triangle.py) * [Rotate Matrix](matrix/rotate_matrix.py) @@ -708,8 +713,8 @@ * Activation Functions * [Exponential Linear Unit](neural_network/activation_functions/exponential_linear_unit.py) * [Leaky Rectified Linear Unit](neural_network/activation_functions/leaky_rectified_linear_unit.py) - * [Scaled Exponential Linear Unit](neural_network/activation_functions/scaled_exponential_linear_unit.py) * [Rectified Linear Unit](neural_network/activation_functions/rectified_linear_unit.py) + * [Scaled Exponential Linear Unit](neural_network/activation_functions/scaled_exponential_linear_unit.py) * [Back Propagation Neural Network](neural_network/back_propagation_neural_network.py) * [Convolution Neural Network](neural_network/convolution_neural_network.py) * [Perceptron](neural_network/perceptron.py) @@ -756,9 +761,11 @@ * [Kinetic Energy](physics/kinetic_energy.py) * [Lorentz Transformation Four Vector](physics/lorentz_transformation_four_vector.py) * [Malus Law](physics/malus_law.py) + * [Mirror Formulae](physics/mirror_formulae.py) * [N Body Simulation](physics/n_body_simulation.py) * [Newtons Law Of Gravitation](physics/newtons_law_of_gravitation.py) * [Newtons Second Law Of Motion](physics/newtons_second_law_of_motion.py) + * [Photoelectric Effect](physics/photoelectric_effect.py) * [Potential Energy](physics/potential_energy.py) * [Rms Speed Of Molecule](physics/rms_speed_of_molecule.py) * [Shear Stress](physics/shear_stress.py) diff --git a/physics/mirror_formulae.py b/physics/mirror_formulae.py new file mode 100644 index 000000000000..f1b4ac2c7baf --- /dev/null +++ b/physics/mirror_formulae.py @@ -0,0 +1,127 @@ +""" +This module contains the functions to calculate the focal length, object distance +and image distance of a mirror. + +The mirror formula is an equation that relates the object distance (u), +image distance (v), and focal length (f) of a spherical mirror. +It is commonly used in optics to determine the position and characteristics +of an image formed by a mirror. It is expressed using the formulae : + +------------------- +| 1/f = 1/v + 1/u | +------------------- + +Where, +f = Focal length of the spherical mirror (metre) +v = Image distance from the mirror (metre) +u = Object distance from the mirror (metre) + + +The signs of the distances are taken with respect to the sign convention. +The sign convention is as follows: + 1) Object is always placed to the left of mirror + 2) Distances measured in the direction of the incident ray are positive + and the distances measured in the direction opposite to that of the incident + rays are negative. + 3) All distances are measured from the pole of the mirror. + + +There are a few assumptions that are made while using the mirror formulae. +They are as follows: + 1) Thin Mirror: The mirror is assumed to be thin, meaning its thickness is + negligible compared to its radius of curvature. This assumption allows + us to treat the mirror as a two-dimensional surface. + 2) Spherical Mirror: The mirror is assumed to have a spherical shape. While this + assumption may not hold exactly for all mirrors, it is a reasonable approximation + for most practical purposes. + 3) Small Angles: The angles involved in the derivation are assumed to be small. + This assumption allows us to use the small-angle approximation, where the tangent + of a small angle is approximately equal to the angle itself. It simplifies the + calculations and makes the derivation more manageable. + 4) Paraxial Rays: The mirror formula is derived using paraxial rays, which are + rays that are close to the principal axis and make small angles with it. This + assumption ensures that the rays are close enough to the principal axis, making the + calculations more accurate. + 5) Reflection and Refraction Laws: The derivation assumes that the laws of + reflection and refraction hold. + These laws state that the angle of incidence is equal to the angle of reflection + for reflection, and the incident and refracted rays lie in the same plane and + obey Snell's law for refraction. + +(Description and Assumptions adapted from +https://www.collegesearch.in/articles/mirror-formula-derivation) + +(Sign Convention adapted from +https://www.toppr.com/ask/content/concept/sign-convention-for-mirrors-210189/) + + +""" + + +def focal_length(distance_of_object: float, distance_of_image: float) -> float: + """ + >>> from math import isclose + >>> isclose(focal_length(10, 20), 6.66666666666666) + True + >>> from math import isclose + >>> isclose(focal_length(9.5, 6.7), 3.929012346) + True + >>> focal_length(0, 20) + Traceback (most recent call last): + ... + ValueError: Invalid inputs. Enter non zero values with respect + to the sign convention. + """ + + if distance_of_object == 0 or distance_of_image == 0: + raise ValueError( + "Invalid inputs. Enter non zero values with respect to the sign convention." + ) + focal_length = 1 / ((1 / distance_of_object) + (1 / distance_of_image)) + return focal_length + + +def object_distance(focal_length: float, distance_of_image: float) -> float: + """ + >>> from math import isclose + >>> isclose(object_distance(30, 20), -60.0) + True + >>> from math import isclose + >>> isclose(object_distance(10.5, 11.7), 102.375) + True + >>> object_distance(90, 0) + Traceback (most recent call last): + ... + ValueError: Invalid inputs. Enter non zero values with respect + to the sign convention. + """ + + if distance_of_image == 0 or focal_length == 0: + raise ValueError( + "Invalid inputs. Enter non zero values with respect to the sign convention." + ) + object_distance = 1 / ((1 / focal_length) - (1 / distance_of_image)) + return object_distance + + +def image_distance(focal_length: float, distance_of_object: float) -> float: + """ + >>> from math import isclose + >>> isclose(image_distance(10, 40), 13.33333333) + True + >>> from math import isclose + >>> isclose(image_distance(1.5, 6.7), 1.932692308) + True + >>> image_distance(0, 0) + Traceback (most recent call last): + ... + ValueError: Invalid inputs. Enter non zero values with respect + to the sign convention. + """ + + if distance_of_object == 0 or focal_length == 0: + raise ValueError( + "Invalid inputs. Enter non zero values with respect to the sign convention." + ) + image_distance = 1 / ((1 / focal_length) - (1 / distance_of_object)) + return image_distance From 4b6301d4ce91638d39689f7be7db797f99623964 Mon Sep 17 00:00:00 2001 From: rtang09 <49603415+rtang09@users.noreply.github.com> Date: Wed, 4 Oct 2023 23:12:08 -0700 Subject: [PATCH 1025/1543] Fletcher 16 (#9775) * Add files via upload * Update fletcher16.py * Update fletcher16.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update fletcher16.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update fletcher16.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update fletcher16.py --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- hashes/fletcher16.py | 36 ++++++++++++++++++++++++++++++++++++ 1 file changed, 36 insertions(+) create mode 100644 hashes/fletcher16.py diff --git a/hashes/fletcher16.py b/hashes/fletcher16.py new file mode 100644 index 000000000000..7c23c98d72c5 --- /dev/null +++ b/hashes/fletcher16.py @@ -0,0 +1,36 @@ +""" +The Fletcher checksum is an algorithm for computing a position-dependent +checksum devised by John G. Fletcher (1934–2012) at Lawrence Livermore Labs +in the late 1970s.[1] The objective of the Fletcher checksum was to +provide error-detection properties approaching those of a cyclic +redundancy check but with the lower computational effort associated +with summation techniques. + +Source: https://en.wikipedia.org/wiki/Fletcher%27s_checksum +""" + + +def fletcher16(text: str) -> int: + """ + Loop through every character in the data and add to two sums. + + >>> fletcher16('hello world') + 6752 + >>> fletcher16('onethousandfourhundredthirtyfour') + 28347 + >>> fletcher16('The quick brown fox jumps over the lazy dog.') + 5655 + """ + data = bytes(text, "ascii") + sum1 = 0 + sum2 = 0 + for character in data: + sum1 = (sum1 + character) % 255 + sum2 = (sum1 + sum2) % 255 + return (sum2 << 8) | sum1 + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From 0d324de7ab9c354d958fd93f6046d0111014d95a Mon Sep 17 00:00:00 2001 From: Vipin Karthic <143083087+vipinkarthic@users.noreply.github.com> Date: Thu, 5 Oct 2023 13:18:15 +0530 Subject: [PATCH 1026/1543] Doctest Error Correction of mirror_formulae.py (#9782) * Python mirror_formulae.py is added to the repository * Changes done after reading readme.md * Changes for running doctest on all platforms * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Change 2 for Doctests * Changes for doctest 2 * updating DIRECTORY.md * Doctest whitespace error rectification to mirror_formulae.py * updating DIRECTORY.md --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> Co-authored-by: Christian Clauss --- DIRECTORY.md | 1 + physics/mirror_formulae.py | 6 +++--- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/DIRECTORY.md b/DIRECTORY.md index 5f23cbd6c922..b0ba3c3852da 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -469,6 +469,7 @@ * [Djb2](hashes/djb2.py) * [Elf](hashes/elf.py) * [Enigma Machine](hashes/enigma_machine.py) + * [Fletcher16](hashes/fletcher16.py) * [Hamming Code](hashes/hamming_code.py) * [Luhn](hashes/luhn.py) * [Md5](hashes/md5.py) diff --git a/physics/mirror_formulae.py b/physics/mirror_formulae.py index f1b4ac2c7baf..7efc52438140 100644 --- a/physics/mirror_formulae.py +++ b/physics/mirror_formulae.py @@ -66,7 +66,7 @@ def focal_length(distance_of_object: float, distance_of_image: float) -> float: >>> from math import isclose >>> isclose(focal_length(9.5, 6.7), 3.929012346) True - >>> focal_length(0, 20) + >>> focal_length(0, 20) # doctest: +NORMALIZE_WHITESPACE Traceback (most recent call last): ... ValueError: Invalid inputs. Enter non zero values with respect @@ -89,7 +89,7 @@ def object_distance(focal_length: float, distance_of_image: float) -> float: >>> from math import isclose >>> isclose(object_distance(10.5, 11.7), 102.375) True - >>> object_distance(90, 0) + >>> object_distance(90, 0) # doctest: +NORMALIZE_WHITESPACE Traceback (most recent call last): ... ValueError: Invalid inputs. Enter non zero values with respect @@ -112,7 +112,7 @@ def image_distance(focal_length: float, distance_of_object: float) -> float: >>> from math import isclose >>> isclose(image_distance(1.5, 6.7), 1.932692308) True - >>> image_distance(0, 0) + >>> image_distance(0, 0) # doctest: +NORMALIZE_WHITESPACE Traceback (most recent call last): ... ValueError: Invalid inputs. Enter non zero values with respect From f3be0ae9e60a0ed2185e55c0758ddf401e604f8c Mon Sep 17 00:00:00 2001 From: Naman <37952726+namansharma18899@users.noreply.github.com> Date: Thu, 5 Oct 2023 14:07:23 +0530 Subject: [PATCH 1027/1543] Added largest pow of 2 le num (#9374) --- bit_manipulation/largest_pow_of_two_le_num.py | 60 +++++++++++++++++++ 1 file changed, 60 insertions(+) create mode 100644 bit_manipulation/largest_pow_of_two_le_num.py diff --git a/bit_manipulation/largest_pow_of_two_le_num.py b/bit_manipulation/largest_pow_of_two_le_num.py new file mode 100644 index 000000000000..6ef827312199 --- /dev/null +++ b/bit_manipulation/largest_pow_of_two_le_num.py @@ -0,0 +1,60 @@ +""" +Author : Naman Sharma +Date : October 2, 2023 + +Task: +To Find the largest power of 2 less than or equal to a given number. + +Implementation notes: Use bit manipulation. +We start from 1 & left shift the set bit to check if (res<<1)<=number. +Each left bit shift represents a pow of 2. + +For example: +number: 15 +res: 1 0b1 + 2 0b10 + 4 0b100 + 8 0b1000 + 16 0b10000 (Exit) +""" + + +def largest_pow_of_two_le_num(number: int) -> int: + """ + Return the largest power of two less than or equal to a number. + + >>> largest_pow_of_two_le_num(0) + 0 + >>> largest_pow_of_two_le_num(1) + 1 + >>> largest_pow_of_two_le_num(-1) + 0 + >>> largest_pow_of_two_le_num(3) + 2 + >>> largest_pow_of_two_le_num(15) + 8 + >>> largest_pow_of_two_le_num(99) + 64 + >>> largest_pow_of_two_le_num(178) + 128 + >>> largest_pow_of_two_le_num(999999) + 524288 + >>> largest_pow_of_two_le_num(99.9) + Traceback (most recent call last): + ... + TypeError: Input value must be a 'int' type + """ + if isinstance(number, float): + raise TypeError("Input value must be a 'int' type") + if number <= 0: + return 0 + res = 1 + while (res << 1) <= number: + res <<= 1 + return res + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From e29024d14ade8ff4cdb43d1da6a7738f44685e5e Mon Sep 17 00:00:00 2001 From: Rohan Sardar <77870108+RohanSardar@users.noreply.github.com> Date: Thu, 5 Oct 2023 14:22:40 +0530 Subject: [PATCH 1028/1543] Program to convert a given string to Pig Latin (#9712) * Program to convert a given string to Pig Latin This is a program to convert a user given string to its respective Pig Latin form As per wikipedia (link: https://en.wikipedia.org/wiki/Pig_Latin#Rules) For words that begin with consonant sounds, all letters before the initial vowel are placed at the end of the word sequence. Then, "ay" is added, as in the following examples: "pig" = "igpay" "latin" = "atinlay" "banana" = "ananabay" When words begin with consonant clusters (multiple consonants that form one sound), the whole sound is added to the end when speaking or writing. "friends" = "iendsfray" "smile" = "ilesmay" "string" = "ingstray" For words that begin with vowel sounds, one just adds "hay", "way" or "yay" to the end. Examples are: "eat" = "eatway" "omelet" = "omeletway" "are" = "areway" * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update pig_latin.py Added f-string * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update pig_latin.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update pig_latin.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update pig_latin.py * Update pig_latin.py * Update pig_latin.py --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- strings/pig_latin.py | 44 ++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 44 insertions(+) create mode 100644 strings/pig_latin.py diff --git a/strings/pig_latin.py b/strings/pig_latin.py new file mode 100644 index 000000000000..457dbb5a6cf6 --- /dev/null +++ b/strings/pig_latin.py @@ -0,0 +1,44 @@ +def pig_latin(word: str) -> str: + """Compute the piglatin of a given string. + + https://en.wikipedia.org/wiki/Pig_Latin + + Usage examples: + >>> pig_latin("pig") + 'igpay' + >>> pig_latin("latin") + 'atinlay' + >>> pig_latin("banana") + 'ananabay' + >>> pig_latin("friends") + 'iendsfray' + >>> pig_latin("smile") + 'ilesmay' + >>> pig_latin("string") + 'ingstray' + >>> pig_latin("eat") + 'eatway' + >>> pig_latin("omelet") + 'omeletway' + >>> pig_latin("are") + 'areway' + >>> pig_latin(" ") + '' + >>> pig_latin(None) + '' + """ + if not (word or "").strip(): + return "" + word = word.lower() + if word[0] in "aeiou": + return f"{word}way" + for i, char in enumerate(word): # noqa: B007 + if char in "aeiou": + break + return f"{word[i:]}{word[:i]}ay" + + +if __name__ == "__main__": + print(f"{pig_latin('friends') = }") + word = input("Enter a word: ") + print(f"{pig_latin(word) = }") From dffbe458c07d492b9c599376233f9f6295527339 Mon Sep 17 00:00:00 2001 From: Chris O <46587501+ChrisO345@users.noreply.github.com> Date: Fri, 6 Oct 2023 00:26:33 +1300 Subject: [PATCH 1029/1543] Update contributing guidelines to say not to open new issues for algorithms (#9760) * updated CONTRIBUTING.md with markdown anchors and issues * removed testing header from previous PR --- CONTRIBUTING.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 7a67ce33cd62..bf3420185c1a 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -25,8 +25,12 @@ We appreciate any contribution, from fixing a grammar mistake in a comment to im Your contribution will be tested by our [automated testing on GitHub Actions](https://github.com/TheAlgorithms/Python/actions) to save time and mental energy. After you have submitted your pull request, you should see the GitHub Actions tests start to run at the bottom of your submission page. If those tests fail, then click on the ___details___ button try to read through the GitHub Actions output to understand the failure. If you do not understand, please leave a comment on your submission page and a community member will try to help. +#### Issues + If you are interested in resolving an [open issue](https://github.com/TheAlgorithms/Python/issues), simply make a pull request with your proposed fix. __We do not assign issues in this repo__ so please do not ask for permission to work on an issue. +__Do not__ create an issue to contribute an algorithm. Please submit a pull request instead. + Please help us keep our issue list small by adding `Fixes #{$ISSUE_NUMBER}` to the description of pull requests that resolve open issues. For example, if your pull request fixes issue #10, then please add the following to its description: ``` From 0e3ea3fbab0297f38ed48b9e2f694cc43f8af567 Mon Sep 17 00:00:00 2001 From: Kamil <32775019+quant12345@users.noreply.github.com> Date: Thu, 5 Oct 2023 16:30:39 +0500 Subject: [PATCH 1030/1543] Fermat_little_theorem type annotation (#9794) * Replacing the generator with numpy vector operations from lu_decomposition. * Revert "Replacing the generator with numpy vector operations from lu_decomposition." This reverts commit ad217c66165898d62b76cc89ba09c2d7049b6448. * Added type annotation. * Update fermat_little_theorem.py Used other syntax. * Update fermat_little_theorem.py * Update maths/fermat_little_theorem.py --------- Co-authored-by: Tianyi Zheng --- maths/fermat_little_theorem.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/maths/fermat_little_theorem.py b/maths/fermat_little_theorem.py index eea03be245cb..4a3ecd05ce91 100644 --- a/maths/fermat_little_theorem.py +++ b/maths/fermat_little_theorem.py @@ -5,7 +5,7 @@ # Wikipedia reference: https://en.wikipedia.org/wiki/Fermat%27s_little_theorem -def binary_exponentiation(a, n, mod): +def binary_exponentiation(a: int, n: float, mod: int) -> int: if n == 0: return 1 From 1b6c5cc2713743b8a74fd9c92e0a1b6442d63a7f Mon Sep 17 00:00:00 2001 From: Kamil <32775019+quant12345@users.noreply.github.com> Date: Thu, 5 Oct 2023 17:30:43 +0500 Subject: [PATCH 1031/1543] Karatsuba type annotation (#9800) * Replacing the generator with numpy vector operations from lu_decomposition. * Revert "Replacing the generator with numpy vector operations from lu_decomposition." This reverts commit ad217c66165898d62b76cc89ba09c2d7049b6448. * Added type annotation. --- maths/karatsuba.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/maths/karatsuba.py b/maths/karatsuba.py index 4bf4aecdc068..3d29e31d2107 100644 --- a/maths/karatsuba.py +++ b/maths/karatsuba.py @@ -1,7 +1,7 @@ """ Multiply two numbers using Karatsuba algorithm """ -def karatsuba(a, b): +def karatsuba(a: int, b: int) -> int: """ >>> karatsuba(15463, 23489) == 15463 * 23489 True From f159a3350650843e0b3e856e612cda56eabb4237 Mon Sep 17 00:00:00 2001 From: Abul Hasan <33129246+haxkd@users.noreply.github.com> Date: Thu, 5 Oct 2023 18:09:14 +0530 Subject: [PATCH 1032/1543] convert to the base minus 2 of a number (#9748) * Fix: Issue 9588 * Fix: Issue 9588 * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fix: Issue 9588 * Fix: Issue #9588 * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fix: Issue #9588 * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fix: Issue #9588 * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fix: issue #9793 * fix: issue #9793 * fix: issue #9588 * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- maths/base_neg2_conversion.py | 37 +++++++++++++++++++++++++++++++++++ 1 file changed, 37 insertions(+) create mode 100644 maths/base_neg2_conversion.py diff --git a/maths/base_neg2_conversion.py b/maths/base_neg2_conversion.py new file mode 100644 index 000000000000..81d40d37e79d --- /dev/null +++ b/maths/base_neg2_conversion.py @@ -0,0 +1,37 @@ +def decimal_to_negative_base_2(num: int) -> int: + """ + This function returns the number negative base 2 + of the decimal number of the input data. + + Args: + int: The decimal number to convert. + + Returns: + int: The negative base 2 number. + + Examples: + >>> decimal_to_negative_base_2(0) + 0 + >>> decimal_to_negative_base_2(-19) + 111101 + >>> decimal_to_negative_base_2(4) + 100 + >>> decimal_to_negative_base_2(7) + 11011 + """ + if num == 0: + return 0 + ans = "" + while num != 0: + num, rem = divmod(num, -2) + if rem < 0: + rem += 2 + num += 1 + ans = str(rem) + ans + return int(ans) + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From 9bfc314e878e36a5f5d8974ec188ad7f0db8c5a1 Mon Sep 17 00:00:00 2001 From: Kamil <32775019+quant12345@users.noreply.github.com> Date: Thu, 5 Oct 2023 17:39:29 +0500 Subject: [PATCH 1033/1543] hardy_ramanujanalgo type annotation (#9799) * Replacing the generator with numpy vector operations from lu_decomposition. * Revert "Replacing the generator with numpy vector operations from lu_decomposition." This reverts commit ad217c66165898d62b76cc89ba09c2d7049b6448. * Added type annotation. --- maths/hardy_ramanujanalgo.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/maths/hardy_ramanujanalgo.py b/maths/hardy_ramanujanalgo.py index 6929533fc389..31ec76fbe10b 100644 --- a/maths/hardy_ramanujanalgo.py +++ b/maths/hardy_ramanujanalgo.py @@ -4,7 +4,7 @@ import math -def exact_prime_factor_count(n): +def exact_prime_factor_count(n: int) -> int: """ >>> exact_prime_factor_count(51242183) 3 From 6643c955376174c307c982b1d5cc39778c40bea1 Mon Sep 17 00:00:00 2001 From: Adebisi Ahmed Date: Thu, 5 Oct 2023 14:18:54 +0100 Subject: [PATCH 1034/1543] add gas station (#9446) * feat: add gas station * make code more readable make code more readable * update test * Update gas_station.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * tuple[GasStation, ...] * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: Christian Clauss Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- greedy_methods/gas_station.py | 97 +++++++++++++++++++++++++++++++++++ 1 file changed, 97 insertions(+) create mode 100644 greedy_methods/gas_station.py diff --git a/greedy_methods/gas_station.py b/greedy_methods/gas_station.py new file mode 100644 index 000000000000..2427375d2664 --- /dev/null +++ b/greedy_methods/gas_station.py @@ -0,0 +1,97 @@ +""" +Task: +There are n gas stations along a circular route, where the amount of gas +at the ith station is gas_quantities[i]. + +You have a car with an unlimited gas tank and it costs costs[i] of gas +to travel from the ith station to its next (i + 1)th station. +You begin the journey with an empty tank at one of the gas stations. + +Given two integer arrays gas_quantities and costs, return the starting +gas station's index if you can travel around the circuit once +in the clockwise direction otherwise, return -1. +If there exists a solution, it is guaranteed to be unique + +Reference: https://leetcode.com/problems/gas-station/description + +Implementation notes: +First, check whether the total gas is enough to complete the journey. If not, return -1. +However, if there is enough gas, it is guaranteed that there is a valid +starting index to reach the end of the journey. +Greedily calculate the net gain (gas_quantity - cost) at each station. +If the net gain ever goes below 0 while iterating through the stations, +start checking from the next station. + +""" +from dataclasses import dataclass + + +@dataclass +class GasStation: + gas_quantity: int + cost: int + + +def get_gas_stations( + gas_quantities: list[int], costs: list[int] +) -> tuple[GasStation, ...]: + """ + This function returns a tuple of gas stations. + + Args: + gas_quantities: Amount of gas available at each station + costs: The cost of gas required to move from one station to the next + + Returns: + A tuple of gas stations + + >>> gas_stations = get_gas_stations([1, 2, 3, 4, 5], [3, 4, 5, 1, 2]) + >>> len(gas_stations) + 5 + >>> gas_stations[0] + GasStation(gas_quantity=1, cost=3) + >>> gas_stations[-1] + GasStation(gas_quantity=5, cost=2) + """ + return tuple( + GasStation(quantity, cost) for quantity, cost in zip(gas_quantities, costs) + ) + + +def can_complete_journey(gas_stations: tuple[GasStation, ...]) -> int: + """ + This function returns the index from which to start the journey + in order to reach the end. + + Args: + gas_quantities [list]: Amount of gas available at each station + cost [list]: The cost of gas required to move from one station to the next + + Returns: + start [int]: start index needed to complete the journey + + Examples: + >>> can_complete_journey(get_gas_stations([1, 2, 3, 4, 5], [3, 4, 5, 1, 2])) + 3 + >>> can_complete_journey(get_gas_stations([2, 3, 4], [3, 4, 3])) + -1 + """ + total_gas = sum(gas_station.gas_quantity for gas_station in gas_stations) + total_cost = sum(gas_station.cost for gas_station in gas_stations) + if total_gas < total_cost: + return -1 + + start = 0 + net = 0 + for i, gas_station in enumerate(gas_stations): + net += gas_station.gas_quantity - gas_station.cost + if net < 0: + start = i + 1 + net = 0 + return start + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From 55ee273419ae76ddeda250374921644615b88393 Mon Sep 17 00:00:00 2001 From: Wei Jiang <42140605+Jiang15@users.noreply.github.com> Date: Thu, 5 Oct 2023 16:00:48 +0200 Subject: [PATCH 1035/1543] [bug fixing] Edge case of the double ended queue (#9823) * fix the edge case of the double ended queue pop the last element * refactoring doc --------- Co-authored-by: Jiang15 --- data_structures/queue/double_ended_queue.py | 62 +++++++++++++++------ 1 file changed, 45 insertions(+), 17 deletions(-) diff --git a/data_structures/queue/double_ended_queue.py b/data_structures/queue/double_ended_queue.py index 44dc863b9a4e..17a23038d288 100644 --- a/data_structures/queue/double_ended_queue.py +++ b/data_structures/queue/double_ended_queue.py @@ -242,12 +242,20 @@ def pop(self) -> Any: Removes the last element of the deque and returns it. Time complexity: O(1) @returns topop.val: the value of the node to pop. - >>> our_deque = Deque([1, 2, 3, 15182]) - >>> our_popped = our_deque.pop() - >>> our_popped + >>> our_deque1 = Deque([1]) + >>> our_popped1 = our_deque1.pop() + >>> our_popped1 + 1 + >>> our_deque1 + [] + + >>> our_deque2 = Deque([1, 2, 3, 15182]) + >>> our_popped2 = our_deque2.pop() + >>> our_popped2 15182 - >>> our_deque + >>> our_deque2 [1, 2, 3] + >>> from collections import deque >>> deque_collections = deque([1, 2, 3, 15182]) >>> collections_popped = deque_collections.pop() @@ -255,18 +263,24 @@ def pop(self) -> Any: 15182 >>> deque_collections deque([1, 2, 3]) - >>> list(our_deque) == list(deque_collections) + >>> list(our_deque2) == list(deque_collections) True - >>> our_popped == collections_popped + >>> our_popped2 == collections_popped True """ # make sure the deque has elements to pop assert not self.is_empty(), "Deque is empty." topop = self._back - self._back = self._back.prev_node # set new back - # drop the last node - python will deallocate memory automatically - self._back.next_node = None + # if only one element in the queue: point the front and back to None + # else remove one element from back + if self._front == self._back: + self._front = None + self._back = None + else: + self._back = self._back.prev_node # set new back + # drop the last node, python will deallocate memory automatically + self._back.next_node = None self._len -= 1 @@ -277,11 +291,17 @@ def popleft(self) -> Any: Removes the first element of the deque and returns it. Time complexity: O(1) @returns topop.val: the value of the node to pop. - >>> our_deque = Deque([15182, 1, 2, 3]) - >>> our_popped = our_deque.popleft() - >>> our_popped + >>> our_deque1 = Deque([1]) + >>> our_popped1 = our_deque1.pop() + >>> our_popped1 + 1 + >>> our_deque1 + [] + >>> our_deque2 = Deque([15182, 1, 2, 3]) + >>> our_popped2 = our_deque2.popleft() + >>> our_popped2 15182 - >>> our_deque + >>> our_deque2 [1, 2, 3] >>> from collections import deque >>> deque_collections = deque([15182, 1, 2, 3]) @@ -290,17 +310,23 @@ def popleft(self) -> Any: 15182 >>> deque_collections deque([1, 2, 3]) - >>> list(our_deque) == list(deque_collections) + >>> list(our_deque2) == list(deque_collections) True - >>> our_popped == collections_popped + >>> our_popped2 == collections_popped True """ # make sure the deque has elements to pop assert not self.is_empty(), "Deque is empty." topop = self._front - self._front = self._front.next_node # set new front and drop the first node - self._front.prev_node = None + # if only one element in the queue: point the front and back to None + # else remove one element from front + if self._front == self._back: + self._front = None + self._back = None + else: + self._front = self._front.next_node # set new front and drop the first node + self._front.prev_node = None self._len -= 1 @@ -432,3 +458,5 @@ def __repr__(self) -> str: import doctest doctest.testmod() + dq = Deque([3]) + dq.pop() From deb0480b3a07e50b93f88d4351d1fce000574d05 Mon Sep 17 00:00:00 2001 From: Aasheesh <126905285+AasheeshLikePanner@users.noreply.github.com> Date: Thu, 5 Oct 2023 19:37:44 +0530 Subject: [PATCH 1036/1543] Changing the directory of sigmoid_linear_unit.py (#9824) * Changing the directory of sigmoid_linear_unit.py * Delete neural_network/activation_functions/__init__.py --------- Co-authored-by: Tianyi Zheng --- .../activation_functions}/sigmoid_linear_unit.py | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename {maths => neural_network/activation_functions}/sigmoid_linear_unit.py (100%) diff --git a/maths/sigmoid_linear_unit.py b/neural_network/activation_functions/sigmoid_linear_unit.py similarity index 100% rename from maths/sigmoid_linear_unit.py rename to neural_network/activation_functions/sigmoid_linear_unit.py From 87494f1fa1022368d154477bdc035fd01f9e4382 Mon Sep 17 00:00:00 2001 From: Parth <100679824+pa-kh039@users.noreply.github.com> Date: Thu, 5 Oct 2023 21:51:28 +0530 Subject: [PATCH 1037/1543] largest divisible subset (#9825) * largest divisible subset * minor tweaks * adding more test cases Co-authored-by: Christian Clauss * improving code for better readability Co-authored-by: Christian Clauss * update Co-authored-by: Christian Clauss * update Co-authored-by: Christian Clauss * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * suggested changes done, and further modfications * final update * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update largest_divisible_subset.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update largest_divisible_subset.py --------- Co-authored-by: Christian Clauss Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .../largest_divisible_subset.py | 74 +++++++++++++++++++ 1 file changed, 74 insertions(+) create mode 100644 dynamic_programming/largest_divisible_subset.py diff --git a/dynamic_programming/largest_divisible_subset.py b/dynamic_programming/largest_divisible_subset.py new file mode 100644 index 000000000000..db38636e29db --- /dev/null +++ b/dynamic_programming/largest_divisible_subset.py @@ -0,0 +1,74 @@ +from __future__ import annotations + + +def largest_divisible_subset(items: list[int]) -> list[int]: + """ + Algorithm to find the biggest subset in the given array such that for any 2 elements + x and y in the subset, either x divides y or y divides x. + >>> largest_divisible_subset([1, 16, 7, 8, 4]) + [16, 8, 4, 1] + >>> largest_divisible_subset([1, 2, 3]) + [2, 1] + >>> largest_divisible_subset([-1, -2, -3]) + [-3] + >>> largest_divisible_subset([1, 2, 4, 8]) + [8, 4, 2, 1] + >>> largest_divisible_subset((1, 2, 4, 8)) + [8, 4, 2, 1] + >>> largest_divisible_subset([1, 1, 1]) + [1, 1, 1] + >>> largest_divisible_subset([0, 0, 0]) + [0, 0, 0] + >>> largest_divisible_subset([-1, -1, -1]) + [-1, -1, -1] + >>> largest_divisible_subset([]) + [] + """ + # Sort the array in ascending order as the sequence does not matter we only have to + # pick up a subset. + items = sorted(items) + + number_of_items = len(items) + + # Initialize memo with 1s and hash with increasing numbers + memo = [1] * number_of_items + hash_array = list(range(number_of_items)) + + # Iterate through the array + for i, item in enumerate(items): + for prev_index in range(i): + if ((items[prev_index] != 0 and item % items[prev_index]) == 0) and ( + (1 + memo[prev_index]) > memo[i] + ): + memo[i] = 1 + memo[prev_index] + hash_array[i] = prev_index + + ans = -1 + last_index = -1 + + # Find the maximum length and its corresponding index + for i, memo_item in enumerate(memo): + if memo_item > ans: + ans = memo_item + last_index = i + + # Reconstruct the divisible subset + if last_index == -1: + return [] + result = [items[last_index]] + while hash_array[last_index] != last_index: + last_index = hash_array[last_index] + result.append(items[last_index]) + + return result + + +if __name__ == "__main__": + from doctest import testmod + + testmod() + + items = [1, 16, 7, 8, 4] + print( + f"The longest divisible subset of {items} is {largest_divisible_subset(items)}." + ) From b76115e8d184fbad1d6c400fcdd964e821f09e9b Mon Sep 17 00:00:00 2001 From: Pronay Debnath Date: Thu, 5 Oct 2023 23:03:05 +0530 Subject: [PATCH 1038/1543] Updated check_bipartite_graph_dfs.py (#9525) * Create dijkstra_algorithm.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update dijkstra_algorithm.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update dijkstra_algorithm.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update dijkstra_algorithm.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Delete greedy_methods/dijkstra_algorithm.py * Update check_bipartite_graph_dfs.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update check_bipartite_graph_dfs.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update graphs/check_bipartite_graph_dfs.py Co-authored-by: Christian Clauss * Update graphs/check_bipartite_graph_dfs.py Co-authored-by: Christian Clauss * Update check_bipartite_graph_dfs.py * Update check_bipartite_graph_dfs.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update check_bipartite_graph_dfs.py * Update check_bipartite_graph_dfs.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update check_bipartite_graph_dfs.py * Update check_bipartite_graph_dfs.py * Update check_bipartite_graph_dfs.py * Let's use self-documenting variable names This is complex code so let's use self-documenting function and variable names to help readers to understand. We should not shorten names to simplify the code formatting but use understandable name and leave to code formatting to psf/black. I am not sure if `nbor` was supposed to be `neighbour`. ;-) * Update check_bipartite_graph_dfs.py --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- graphs/check_bipartite_graph_dfs.py | 73 +++++++++++++++++++---------- 1 file changed, 47 insertions(+), 26 deletions(-) diff --git a/graphs/check_bipartite_graph_dfs.py b/graphs/check_bipartite_graph_dfs.py index fd644230449c..b13a9eb95afb 100644 --- a/graphs/check_bipartite_graph_dfs.py +++ b/graphs/check_bipartite_graph_dfs.py @@ -1,34 +1,55 @@ -# Check whether Graph is Bipartite or Not using DFS +from collections import defaultdict -# A Bipartite Graph is a graph whose vertices can be divided into two independent sets, -# U and V such that every edge (u, v) either connects a vertex from U to V or a vertex -# from V to U. In other words, for every edge (u, v), either u belongs to U and v to V, -# or u belongs to V and v to U. We can also say that there is no edge that connects -# vertices of same set. -def check_bipartite_dfs(graph): - visited = [False] * len(graph) - color = [-1] * len(graph) +def is_bipartite(graph: defaultdict[int, list[int]]) -> bool: + """ + Check whether a graph is Bipartite or not using Depth-First Search (DFS). - def dfs(v, c): - visited[v] = True - color[v] = c - for u in graph[v]: - if not visited[u]: - dfs(u, 1 - c) + A Bipartite Graph is a graph whose vertices can be divided into two independent + sets, U and V such that every edge (u, v) either connects a vertex from + U to V or a vertex from V to U. In other words, for every edge (u, v), + either u belongs to U and v to V, or u belongs to V and v to U. There is + no edge that connects vertices of the same set. - for i in range(len(graph)): - if not visited[i]: - dfs(i, 0) + Args: + graph: An adjacency list representing the graph. - for i in range(len(graph)): - for j in graph[i]: - if color[i] == color[j]: - return False + Returns: + True if there's no edge that connects vertices of the same set, False otherwise. - return True + Examples: + >>> is_bipartite( + ... defaultdict(list, {0: [1, 2], 1: [0, 3], 2: [0, 4], 3: [1], 4: [2]}) + ... ) + False + >>> is_bipartite(defaultdict(list, {0: [1, 2], 1: [0, 2], 2: [0, 1]})) + True + """ + def depth_first_search(node: int, color: int) -> bool: + visited[node] = color + return any( + visited[neighbour] == color + or ( + visited[neighbour] == -1 + and not depth_first_search(neighbour, 1 - color) + ) + for neighbour in graph[node] + ) -# Adjacency list of graph -graph = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []} -print(check_bipartite_dfs(graph)) + visited: defaultdict[int, int] = defaultdict(lambda: -1) + + return all( + not (visited[node] == -1 and not depth_first_search(node, 0)) for node in graph + ) + + +if __name__ == "__main__": + import doctest + + result = doctest.testmod() + + if result.failed: + print(f"{result.failed} test(s) failed.") + else: + print("All tests passed!") From cffdf99c55dcda89a5ce0fb2bf3cb685d168d136 Mon Sep 17 00:00:00 2001 From: Muhammad Umer Farooq <115654418+Muhammadummerr@users.noreply.github.com> Date: Thu, 5 Oct 2023 23:44:55 +0500 Subject: [PATCH 1039/1543] Updated prime_numbers.py testcases. (#9851) * Updated prime_numbers.py testcases. * revert __main__ code. --- maths/prime_numbers.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/maths/prime_numbers.py b/maths/prime_numbers.py index c5297ed9264c..38cc6670385d 100644 --- a/maths/prime_numbers.py +++ b/maths/prime_numbers.py @@ -17,8 +17,8 @@ def slow_primes(max_n: int) -> Generator[int, None, None]: [2, 3, 5, 7, 11] >>> list(slow_primes(33)) [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31] - >>> list(slow_primes(10000))[-1] - 9973 + >>> list(slow_primes(1000))[-1] + 997 """ numbers: Generator = (i for i in range(1, (max_n + 1))) for i in (n for n in numbers if n > 1): @@ -44,8 +44,8 @@ def primes(max_n: int) -> Generator[int, None, None]: [2, 3, 5, 7, 11] >>> list(primes(33)) [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31] - >>> list(primes(10000))[-1] - 9973 + >>> list(primes(1000))[-1] + 997 """ numbers: Generator = (i for i in range(1, (max_n + 1))) for i in (n for n in numbers if n > 1): @@ -73,8 +73,8 @@ def fast_primes(max_n: int) -> Generator[int, None, None]: [2, 3, 5, 7, 11] >>> list(fast_primes(33)) [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31] - >>> list(fast_primes(10000))[-1] - 9973 + >>> list(fast_primes(1000))[-1] + 997 """ numbers: Generator = (i for i in range(1, (max_n + 1), 2)) # It's useless to test even numbers as they will not be prime From 5869fda74245b55a3bda4ccc5ac62a84ab40766f Mon Sep 17 00:00:00 2001 From: Christian Clauss Date: Thu, 5 Oct 2023 23:55:13 +0200 Subject: [PATCH 1040/1543] print reverse: A LinkedList with a tail pointer (#9875) * print reverse: A LinkedList with a tail pointer * updating DIRECTORY.md --------- Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- DIRECTORY.md | 7 +- data_structures/linked_list/print_reverse.py | 134 +++++++++++++------ 2 files changed, 101 insertions(+), 40 deletions(-) diff --git a/DIRECTORY.md b/DIRECTORY.md index b0ba3c3852da..c199a4329202 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -50,6 +50,7 @@ * [Index Of Rightmost Set Bit](bit_manipulation/index_of_rightmost_set_bit.py) * [Is Even](bit_manipulation/is_even.py) * [Is Power Of Two](bit_manipulation/is_power_of_two.py) + * [Largest Pow Of Two Le Num](bit_manipulation/largest_pow_of_two_le_num.py) * [Missing Number](bit_manipulation/missing_number.py) * [Numbers Different Signs](bit_manipulation/numbers_different_signs.py) * [Reverse Bits](bit_manipulation/reverse_bits.py) @@ -322,6 +323,7 @@ * [Integer Partition](dynamic_programming/integer_partition.py) * [Iterating Through Submasks](dynamic_programming/iterating_through_submasks.py) * [Knapsack](dynamic_programming/knapsack.py) + * [Largest Divisible Subset](dynamic_programming/largest_divisible_subset.py) * [Longest Common Subsequence](dynamic_programming/longest_common_subsequence.py) * [Longest Common Substring](dynamic_programming/longest_common_substring.py) * [Longest Increasing Subsequence](dynamic_programming/longest_increasing_subsequence.py) @@ -460,6 +462,7 @@ ## Greedy Methods * [Fractional Knapsack](greedy_methods/fractional_knapsack.py) * [Fractional Knapsack 2](greedy_methods/fractional_knapsack_2.py) + * [Gas Station](greedy_methods/gas_station.py) * [Minimum Waiting Time](greedy_methods/minimum_waiting_time.py) * [Optimal Merge Pattern](greedy_methods/optimal_merge_pattern.py) @@ -542,6 +545,7 @@ * [Average Median](maths/average_median.py) * [Average Mode](maths/average_mode.py) * [Bailey Borwein Plouffe](maths/bailey_borwein_plouffe.py) + * [Base Neg2 Conversion](maths/base_neg2_conversion.py) * [Basic Maths](maths/basic_maths.py) * [Bell Numbers](maths/bell_numbers.py) * [Binary Exp Mod](maths/binary_exp_mod.py) @@ -657,7 +661,6 @@ * [P Series](maths/series/p_series.py) * [Sieve Of Eratosthenes](maths/sieve_of_eratosthenes.py) * [Sigmoid](maths/sigmoid.py) - * [Sigmoid Linear Unit](maths/sigmoid_linear_unit.py) * [Signum](maths/signum.py) * [Simpson Rule](maths/simpson_rule.py) * [Simultaneous Linear Equation Solver](maths/simultaneous_linear_equation_solver.py) @@ -716,6 +719,7 @@ * [Leaky Rectified Linear Unit](neural_network/activation_functions/leaky_rectified_linear_unit.py) * [Rectified Linear Unit](neural_network/activation_functions/rectified_linear_unit.py) * [Scaled Exponential Linear Unit](neural_network/activation_functions/scaled_exponential_linear_unit.py) + * [Sigmoid Linear Unit](neural_network/activation_functions/sigmoid_linear_unit.py) * [Back Propagation Neural Network](neural_network/back_propagation_neural_network.py) * [Convolution Neural Network](neural_network/convolution_neural_network.py) * [Perceptron](neural_network/perceptron.py) @@ -1180,6 +1184,7 @@ * [Naive String Search](strings/naive_string_search.py) * [Ngram](strings/ngram.py) * [Palindrome](strings/palindrome.py) + * [Pig Latin](strings/pig_latin.py) * [Prefix Function](strings/prefix_function.py) * [Rabin Karp](strings/rabin_karp.py) * [Remove Duplicate](strings/remove_duplicate.py) diff --git a/data_structures/linked_list/print_reverse.py b/data_structures/linked_list/print_reverse.py index f83d5607ffdd..a023745dee69 100644 --- a/data_structures/linked_list/print_reverse.py +++ b/data_structures/linked_list/print_reverse.py @@ -1,22 +1,91 @@ from __future__ import annotations +from collections.abc import Iterable, Iterator +from dataclasses import dataclass + +@dataclass class Node: - def __init__(self, data=None): - self.data = data - self.next = None + data: int + next_node: Node | None = None + + +class LinkedList: + """A class to represent a Linked List. + Use a tail pointer to speed up the append() operation. + """ + + def __init__(self) -> None: + """Initialize a LinkedList with the head node set to None. + >>> linked_list = LinkedList() + >>> (linked_list.head, linked_list.tail) + (None, None) + """ + self.head: Node | None = None + self.tail: Node | None = None # Speeds up the append() operation + + def __iter__(self) -> Iterator[int]: + """Iterate the LinkedList yielding each Node's data. + >>> linked_list = LinkedList() + >>> items = (1, 2, 3, 4, 5) + >>> linked_list.extend(items) + >>> tuple(linked_list) == items + True + """ + node = self.head + while node: + yield node.data + node = node.next_node + + def __repr__(self) -> str: + """Returns a string representation of the LinkedList. + >>> linked_list = LinkedList() + >>> str(linked_list) + '' + >>> linked_list.append(1) + >>> str(linked_list) + '1' + >>> linked_list.extend([2, 3, 4, 5]) + >>> str(linked_list) + '1 -> 2 -> 3 -> 4 -> 5' + """ + return " -> ".join([str(data) for data in self]) - def __repr__(self): - """Returns a visual representation of the node and all its following nodes.""" - string_rep = [] - temp = self - while temp: - string_rep.append(f"{temp.data}") - temp = temp.next - return "->".join(string_rep) + def append(self, data: int) -> None: + """Appends a new node with the given data to the end of the LinkedList. + >>> linked_list = LinkedList() + >>> str(linked_list) + '' + >>> linked_list.append(1) + >>> str(linked_list) + '1' + >>> linked_list.append(2) + >>> str(linked_list) + '1 -> 2' + """ + if self.tail: + self.tail.next_node = self.tail = Node(data) + else: + self.head = self.tail = Node(data) + def extend(self, items: Iterable[int]) -> None: + """Appends each item to the end of the LinkedList. + >>> linked_list = LinkedList() + >>> linked_list.extend([]) + >>> str(linked_list) + '' + >>> linked_list.extend([1, 2]) + >>> str(linked_list) + '1 -> 2' + >>> linked_list.extend([3,4]) + >>> str(linked_list) + '1 -> 2 -> 3 -> 4' + """ + for item in items: + self.append(item) -def make_linked_list(elements_list: list): + +def make_linked_list(elements_list: Iterable[int]) -> LinkedList: """Creates a Linked List from the elements of the given sequence (list/tuple) and returns the head of the Linked List. >>> make_linked_list([]) @@ -28,43 +97,30 @@ def make_linked_list(elements_list: list): >>> make_linked_list(['abc']) abc >>> make_linked_list([7, 25]) - 7->25 + 7 -> 25 """ if not elements_list: raise Exception("The Elements List is empty") - current = head = Node(elements_list[0]) - for i in range(1, len(elements_list)): - current.next = Node(elements_list[i]) - current = current.next - return head + linked_list = LinkedList() + linked_list.extend(elements_list) + return linked_list -def print_reverse(head_node: Node) -> None: +def in_reverse(linked_list: LinkedList) -> str: """Prints the elements of the given Linked List in reverse order - >>> print_reverse([]) - >>> linked_list = make_linked_list([69, 88, 73]) - >>> print_reverse(linked_list) - 73 - 88 - 69 + >>> in_reverse(LinkedList()) + '' + >>> in_reverse(make_linked_list([69, 88, 73])) + '73 <- 88 <- 69' """ - if head_node is not None and isinstance(head_node, Node): - print_reverse(head_node.next) - print(head_node.data) + return " <- ".join(str(line) for line in reversed(tuple(linked_list))) -def main(): +if __name__ == "__main__": from doctest import testmod testmod() - - linked_list = make_linked_list([14, 52, 14, 12, 43]) - print("Linked List:") - print(linked_list) - print("Elements in Reverse:") - print_reverse(linked_list) - - -if __name__ == "__main__": - main() + linked_list = make_linked_list((14, 52, 14, 12, 43)) + print(f"Linked List: {linked_list}") + print(f"Reverse List: {in_reverse(linked_list)}") From 7f94a73eec45edfd215e8f07148c9c657b4e4b89 Mon Sep 17 00:00:00 2001 From: Marek Mazij <112333347+Mrk-Mzj@users.noreply.github.com> Date: Fri, 6 Oct 2023 00:05:23 +0200 Subject: [PATCH 1041/1543] camelCase to snake_case conversion - Fixes #9726 (#9727) * First commit camel case to snake case conversion algorithm, including numbers * code modified to not use regex --- strings/camel_case_to_snake_case.py | 60 +++++++++++++++++++++++++++++ 1 file changed, 60 insertions(+) create mode 100644 strings/camel_case_to_snake_case.py diff --git a/strings/camel_case_to_snake_case.py b/strings/camel_case_to_snake_case.py new file mode 100644 index 000000000000..582907be2edb --- /dev/null +++ b/strings/camel_case_to_snake_case.py @@ -0,0 +1,60 @@ +def camel_to_snake_case(input_str: str) -> str: + """ + Transforms a camelCase (or PascalCase) string to snake_case + + >>> camel_to_snake_case("someRandomString") + 'some_random_string' + + >>> camel_to_snake_case("SomeRandomStr#ng") + 'some_random_str_ng' + + >>> camel_to_snake_case("123someRandom123String123") + '123_some_random_123_string_123' + + >>> camel_to_snake_case("123SomeRandom123String123") + '123_some_random_123_string_123' + + >>> camel_to_snake_case(123) + Traceback (most recent call last): + ... + ValueError: Expected string as input, found + + """ + + # check for invalid input type + if not isinstance(input_str, str): + msg = f"Expected string as input, found {type(input_str)}" + raise ValueError(msg) + + snake_str = "" + + for index, char in enumerate(input_str): + if char.isupper(): + snake_str += "_" + char.lower() + + # if char is lowercase but proceeded by a digit: + elif input_str[index - 1].isdigit() and char.islower(): + snake_str += "_" + char + + # if char is a digit proceeded by a letter: + elif input_str[index - 1].isalpha() and char.isnumeric(): + snake_str += "_" + char.lower() + + # if char is not alphanumeric: + elif not char.isalnum(): + snake_str += "_" + + else: + snake_str += char + + # remove leading underscore + if snake_str[0] == "_": + snake_str = snake_str[1:] + + return snake_str + + +if __name__ == "__main__": + from doctest import testmod + + testmod() From 13317e4f7f260f59e6e53595f802c9d12ec0db4a Mon Sep 17 00:00:00 2001 From: Akshay B Shetty <107768228+NinjaSoulPirate@users.noreply.github.com> Date: Fri, 6 Oct 2023 03:57:13 +0530 Subject: [PATCH 1042/1543] feat: :sparkles: calculating the resitance of resistor using color codes (#9874) --- electronics/resistor_color_code.py | 373 +++++++++++++++++++++++++++++ 1 file changed, 373 insertions(+) create mode 100644 electronics/resistor_color_code.py diff --git a/electronics/resistor_color_code.py b/electronics/resistor_color_code.py new file mode 100644 index 000000000000..b0534b813def --- /dev/null +++ b/electronics/resistor_color_code.py @@ -0,0 +1,373 @@ +""" +Title : Calculating the resistance of a n band resistor using the color codes + +Description : + Resistors resist the flow of electrical current.Each one has a value that tells how + strongly it resists current flow.This value's unit is the ohm, often noted with the + Greek letter omega: Ω. + + The colored bands on a resistor can tell you everything you need to know about its + value and tolerance, as long as you understand how to read them. The order in which + the colors are arranged is very important, and each value of resistor has its own + unique combination. + + The color coding for resistors is an international standard that is defined in IEC + 60062. + + The number of bands present in a resistor varies from three to six. These represent + significant figures, multiplier, tolerance, reliability, and temperature coefficient + Each color used for a type of band has a value assigned to it. It is read from left + to right. + All resistors will have significant figures and multiplier bands. In a three band + resistor first two bands from the left represent significant figures and the third + represents the multiplier band. + + Significant figures - The number of significant figures band in a resistor can vary + from two to three. + Colors and values associated with significant figure bands - + (Black = 0, Brown = 1, Red = 2, Orange = 3, Yellow = 4, Green = 5, Blue = 6, + Violet = 7, Grey = 8, White = 9) + + Multiplier - There will be one multiplier band in a resistor. It is multiplied with + the significant figures obtained from previous bands. + Colors and values associated with multiplier band - + (Black = 100, Brown = 10^1, Red = 10^2, Orange = 10^3, Yellow = 10^4, Green = 10^5, + Blue = 10^6, Violet = 10^7, Grey = 10^8, White = 10^9, Gold = 10^-1, Silver = 10^-2) + Note that multiplier bands use Gold and Silver which are not used for significant + figure bands. + + Tolerance - The tolerance band is not always present. It can be seen in four band + resistors and above. This is a percentage by which the resistor value can vary. + Colors and values associated with tolerance band - + (Brown = 1%, Red = 2%, Orange = 0.05%, Yellow = 0.02%, Green = 0.5%,Blue = 0.25%, + Violet = 0.1%, Grey = 0.01%, Gold = 5%, Silver = 10%) + If no color is mentioned then by default tolerance is 20% + Note that tolerance band does not use Black and White colors. + + Temperature Coeffecient - Indicates the change in resistance of the component as + a function of ambient temperature in terms of ppm/K. + It is present in six band resistors. + Colors and values associated with Temperature coeffecient - + (Black = 250 ppm/K, Brown = 100 ppm/K, Red = 50 ppm/K, Orange = 15 ppm/K, + Yellow = 25 ppm/K, Green = 20 ppm/K, Blue = 10 ppm/K, Violet = 5 ppm/K, + Grey = 1 ppm/K) + Note that temperature coeffecient band does not use White, Gold, Silver colors. + +Sources : + https://www.calculator.net/resistor-calculator.html + https://learn.parallax.com/support/reference/resistor-color-codes + https://byjus.com/physics/resistor-colour-codes/ +""" +valid_colors: list = [ + "Black", + "Brown", + "Red", + "Orange", + "Yellow", + "Green", + "Blue", + "Violet", + "Grey", + "White", + "Gold", + "Silver", +] + +significant_figures_color_values: dict[str, int] = { + "Black": 0, + "Brown": 1, + "Red": 2, + "Orange": 3, + "Yellow": 4, + "Green": 5, + "Blue": 6, + "Violet": 7, + "Grey": 8, + "White": 9, +} + +multiplier_color_values: dict[str, float] = { + "Black": 10**0, + "Brown": 10**1, + "Red": 10**2, + "Orange": 10**3, + "Yellow": 10**4, + "Green": 10**5, + "Blue": 10**6, + "Violet": 10**7, + "Grey": 10**8, + "White": 10**9, + "Gold": 10**-1, + "Silver": 10**-2, +} + +tolerance_color_values: dict[str, float] = { + "Brown": 1, + "Red": 2, + "Orange": 0.05, + "Yellow": 0.02, + "Green": 0.5, + "Blue": 0.25, + "Violet": 0.1, + "Grey": 0.01, + "Gold": 5, + "Silver": 10, +} + +temperature_coeffecient_color_values: dict[str, int] = { + "Black": 250, + "Brown": 100, + "Red": 50, + "Orange": 15, + "Yellow": 25, + "Green": 20, + "Blue": 10, + "Violet": 5, + "Grey": 1, +} + +band_types: dict[int, dict[str, int]] = { + 3: {"significant": 2, "multiplier": 1}, + 4: {"significant": 2, "multiplier": 1, "tolerance": 1}, + 5: {"significant": 3, "multiplier": 1, "tolerance": 1}, + 6: {"significant": 3, "multiplier": 1, "tolerance": 1, "temp_coeffecient": 1}, +} + + +def get_significant_digits(colors: list) -> str: + """ + Function returns the digit associated with the color. Function takes a + list containing colors as input and returns digits as string + + >>> get_significant_digits(['Black','Blue']) + '06' + + >>> get_significant_digits(['Aqua','Blue']) + Traceback (most recent call last): + ... + ValueError: Aqua is not a valid color for significant figure bands + + """ + digit = "" + for color in colors: + if color not in significant_figures_color_values: + msg = f"{color} is not a valid color for significant figure bands" + raise ValueError(msg) + digit = digit + str(significant_figures_color_values[color]) + return str(digit) + + +def get_multiplier(color: str) -> float: + """ + Function returns the multiplier value associated with the color. + Function takes color as input and returns multiplier value + + >>> get_multiplier('Gold') + 0.1 + + >>> get_multiplier('Ivory') + Traceback (most recent call last): + ... + ValueError: Ivory is not a valid color for multiplier band + + """ + if color not in multiplier_color_values: + msg = f"{color} is not a valid color for multiplier band" + raise ValueError(msg) + return multiplier_color_values[color] + + +def get_tolerance(color: str) -> float: + """ + Function returns the tolerance value associated with the color. + Function takes color as input and returns tolerance value. + + >>> get_tolerance('Green') + 0.5 + + >>> get_tolerance('Indigo') + Traceback (most recent call last): + ... + ValueError: Indigo is not a valid color for tolerance band + + """ + if color not in tolerance_color_values: + msg = f"{color} is not a valid color for tolerance band" + raise ValueError(msg) + return tolerance_color_values[color] + + +def get_temperature_coeffecient(color: str) -> int: + """ + Function returns the temperature coeffecient value associated with the color. + Function takes color as input and returns temperature coeffecient value. + + >>> get_temperature_coeffecient('Yellow') + 25 + + >>> get_temperature_coeffecient('Cyan') + Traceback (most recent call last): + ... + ValueError: Cyan is not a valid color for temperature coeffecient band + + """ + if color not in temperature_coeffecient_color_values: + msg = f"{color} is not a valid color for temperature coeffecient band" + raise ValueError(msg) + return temperature_coeffecient_color_values[color] + + +def get_band_type_count(total_number_of_bands: int, type_of_band: str) -> int: + """ + Function returns the number of bands of a given type in a resistor with n bands + Function takes total_number_of_bands and type_of_band as input and returns + number of bands belonging to that type in the given resistor + + >>> get_band_type_count(3,'significant') + 2 + + >>> get_band_type_count(2,'significant') + Traceback (most recent call last): + ... + ValueError: 2 is not a valid number of bands + + >>> get_band_type_count(3,'sign') + Traceback (most recent call last): + ... + ValueError: sign is not valid for a 3 band resistor + + >>> get_band_type_count(3,'tolerance') + Traceback (most recent call last): + ... + ValueError: tolerance is not valid for a 3 band resistor + + >>> get_band_type_count(5,'temp_coeffecient') + Traceback (most recent call last): + ... + ValueError: temp_coeffecient is not valid for a 5 band resistor + + """ + if total_number_of_bands not in band_types: + msg = f"{total_number_of_bands} is not a valid number of bands" + raise ValueError(msg) + if type_of_band not in band_types[total_number_of_bands]: + msg = f"{type_of_band} is not valid for a {total_number_of_bands} band resistor" + raise ValueError(msg) + return band_types[total_number_of_bands][type_of_band] + + +def check_validity(number_of_bands: int, colors: list) -> bool: + """ + Function checks if the input provided is valid or not. + Function takes number_of_bands and colors as input and returns + True if it is valid + + >>> check_validity(3, ["Black","Blue","Orange"]) + True + + >>> check_validity(4, ["Black","Blue","Orange"]) + Traceback (most recent call last): + ... + ValueError: Expecting 4 colors, provided 3 colors + + >>> check_validity(3, ["Cyan","Red","Yellow"]) + Traceback (most recent call last): + ... + ValueError: Cyan is not a valid color + + """ + if number_of_bands >= 3 and number_of_bands <= 6: + if number_of_bands == len(colors): + for color in colors: + if color not in valid_colors: + msg = f"{color} is not a valid color" + raise ValueError(msg) + return True + else: + msg = f"Expecting {number_of_bands} colors, provided {len(colors)} colors" + raise ValueError(msg) + else: + msg = "Invalid number of bands. Resistor bands must be 3 to 6" + raise ValueError(msg) + + +def calculate_resistance(number_of_bands: int, color_code_list: list) -> dict: + """ + Function calculates the total resistance of the resistor using the color codes. + Function takes number_of_bands, color_code_list as input and returns + resistance + + >>> calculate_resistance(3, ["Black","Blue","Orange"]) + {'resistance': '6000Ω ±20% '} + + >>> calculate_resistance(4, ["Orange","Green","Blue","Gold"]) + {'resistance': '35000000Ω ±5% '} + + >>> calculate_resistance(5, ["Violet","Brown","Grey","Silver","Green"]) + {'resistance': '7.18Ω ±0.5% '} + + >>> calculate_resistance(6, ["Red","Green","Blue","Yellow","Orange","Grey"]) + {'resistance': '2560000Ω ±0.05% 1 ppm/K'} + + >>> calculate_resistance(0, ["Violet","Brown","Grey","Silver","Green"]) + Traceback (most recent call last): + ... + ValueError: Invalid number of bands. Resistor bands must be 3 to 6 + + >>> calculate_resistance(4, ["Violet","Brown","Grey","Silver","Green"]) + Traceback (most recent call last): + ... + ValueError: Expecting 4 colors, provided 5 colors + + >>> calculate_resistance(4, ["Violet","Silver","Brown","Grey"]) + Traceback (most recent call last): + ... + ValueError: Silver is not a valid color for significant figure bands + + >>> calculate_resistance(4, ["Violet","Blue","Lime","Grey"]) + Traceback (most recent call last): + ... + ValueError: Lime is not a valid color + + """ + is_valid = check_validity(number_of_bands, color_code_list) + if is_valid: + number_of_significant_bands = get_band_type_count( + number_of_bands, "significant" + ) + significant_colors = color_code_list[:number_of_significant_bands] + significant_digits = int(get_significant_digits(significant_colors)) + multiplier_color = color_code_list[number_of_significant_bands] + multiplier = get_multiplier(multiplier_color) + if number_of_bands == 3: + tolerance_color = None + else: + tolerance_color = color_code_list[number_of_significant_bands + 1] + tolerance = ( + 20 if tolerance_color is None else get_tolerance(str(tolerance_color)) + ) + if number_of_bands != 6: + temperature_coeffecient_color = None + else: + temperature_coeffecient_color = color_code_list[ + number_of_significant_bands + 2 + ] + temperature_coeffecient = ( + 0 + if temperature_coeffecient_color is None + else get_temperature_coeffecient(str(temperature_coeffecient_color)) + ) + resisitance = significant_digits * multiplier + if temperature_coeffecient == 0: + answer = f"{resisitance}Ω ±{tolerance}% " + else: + answer = f"{resisitance}Ω ±{tolerance}% {temperature_coeffecient} ppm/K" + return {"resistance": answer} + else: + raise ValueError("Input is invalid") + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From b316a9612826905b963a465f0f02febaed761ccc Mon Sep 17 00:00:00 2001 From: Abul Hasan <33129246+haxkd@users.noreply.github.com> Date: Fri, 6 Oct 2023 04:15:10 +0530 Subject: [PATCH 1043/1543] Match a pattern and String using backtracking (#9861) * Fix: Issue 9588 * Fix: Issue 9588 * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fix: Issue 9588 * Fix: Issue #9588 * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fix: Issue #9588 * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fix: Issue #9588 * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fix: issue #9793 * fix: issue #9793 * fix: issue #9588 * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fix: issue #9844 * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fix: issue #9844 * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fix: issue #9844 * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fix: issue #9844 * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- backtracking/match_word_pattern.py | 61 ++++++++++++++++++++++++++++++ 1 file changed, 61 insertions(+) create mode 100644 backtracking/match_word_pattern.py diff --git a/backtracking/match_word_pattern.py b/backtracking/match_word_pattern.py new file mode 100644 index 000000000000..bfa9b1354d51 --- /dev/null +++ b/backtracking/match_word_pattern.py @@ -0,0 +1,61 @@ +def match_word_pattern(pattern: str, input_string: str) -> bool: + """ + Determine if a given pattern matches a string using backtracking. + + pattern: The pattern to match. + input_string: The string to match against the pattern. + return: True if the pattern matches the string, False otherwise. + + >>> match_word_pattern("aba", "GraphTreesGraph") + True + + >>> match_word_pattern("xyx", "PythonRubyPython") + True + + >>> match_word_pattern("GG", "PythonJavaPython") + False + """ + + def backtrack(pattern_index: int, str_index: int) -> bool: + """ + >>> backtrack(0, 0) + True + + >>> backtrack(0, 1) + True + + >>> backtrack(0, 4) + False + """ + if pattern_index == len(pattern) and str_index == len(input_string): + return True + if pattern_index == len(pattern) or str_index == len(input_string): + return False + char = pattern[pattern_index] + if char in pattern_map: + mapped_str = pattern_map[char] + if input_string.startswith(mapped_str, str_index): + return backtrack(pattern_index + 1, str_index + len(mapped_str)) + else: + return False + for end in range(str_index + 1, len(input_string) + 1): + substr = input_string[str_index:end] + if substr in str_map: + continue + pattern_map[char] = substr + str_map[substr] = char + if backtrack(pattern_index + 1, end): + return True + del pattern_map[char] + del str_map[substr] + return False + + pattern_map: dict[str, str] = {} + str_map: dict[str, str] = {} + return backtrack(0, 0) + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From cd684fd94762c4df5529d19d1ede6fc927428815 Mon Sep 17 00:00:00 2001 From: Dean Bring Date: Thu, 5 Oct 2023 15:45:40 -0700 Subject: [PATCH 1044/1543] Added algorithm to deeply clone a graph (#9765) * Added algorithm to deeply clone a graph * Fixed file name and removed a function call * Removed nested function and fixed class parameter types * Fixed doctests * bug fix * Added class decorator * Updated doctests and fixed precommit errors * Cleaned up code * Simplified doctest * Added doctests * Code simplification --- graphs/deep_clone_graph.py | 77 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 77 insertions(+) create mode 100644 graphs/deep_clone_graph.py diff --git a/graphs/deep_clone_graph.py b/graphs/deep_clone_graph.py new file mode 100644 index 000000000000..55678b4c01ec --- /dev/null +++ b/graphs/deep_clone_graph.py @@ -0,0 +1,77 @@ +""" +LeetCode 133. Clone Graph +https://leetcode.com/problems/clone-graph/ + +Given a reference of a node in a connected undirected graph. + +Return a deep copy (clone) of the graph. + +Each node in the graph contains a value (int) and a list (List[Node]) of its +neighbors. +""" +from dataclasses import dataclass + + +@dataclass +class Node: + value: int = 0 + neighbors: list["Node"] | None = None + + def __post_init__(self) -> None: + """ + >>> Node(3).neighbors + [] + """ + self.neighbors = self.neighbors or [] + + def __hash__(self) -> int: + """ + >>> hash(Node(3)) != 0 + True + """ + return id(self) + + +def clone_graph(node: Node | None) -> Node | None: + """ + This function returns a clone of a connected undirected graph. + >>> clone_graph(Node(1)) + Node(value=1, neighbors=[]) + >>> clone_graph(Node(1, [Node(2)])) + Node(value=1, neighbors=[Node(value=2, neighbors=[])]) + >>> clone_graph(None) is None + True + """ + if not node: + return None + + originals_to_clones = {} # map nodes to clones + + stack = [node] + + while stack: + original = stack.pop() + + if original in originals_to_clones: + continue + + originals_to_clones[original] = Node(original.value) + + stack.extend(original.neighbors or []) + + for original, clone in originals_to_clones.items(): + for neighbor in original.neighbors or []: + cloned_neighbor = originals_to_clones[neighbor] + + if not clone.neighbors: + clone.neighbors = [] + + clone.neighbors.append(cloned_neighbor) + + return originals_to_clones[node] + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From 9200c64464492117bff792f1f43b19050070af4a Mon Sep 17 00:00:00 2001 From: Aroson <74296409+Aroson1@users.noreply.github.com> Date: Fri, 6 Oct 2023 04:46:51 +0530 Subject: [PATCH 1045/1543] Added Wheatstone Bridge Algorithm (#9872) * Add files via upload * Update wheatstone_bridge.py * Update wheatstone_bridge.py --- electronics/wheatstone_bridge.py | 41 ++++++++++++++++++++++++++++++++ 1 file changed, 41 insertions(+) create mode 100644 electronics/wheatstone_bridge.py diff --git a/electronics/wheatstone_bridge.py b/electronics/wheatstone_bridge.py new file mode 100644 index 000000000000..3529a09339c4 --- /dev/null +++ b/electronics/wheatstone_bridge.py @@ -0,0 +1,41 @@ +# https://en.wikipedia.org/wiki/Wheatstone_bridge +from __future__ import annotations + + +def wheatstone_solver( + resistance_1: float, resistance_2: float, resistance_3: float +) -> float: + """ + This function can calculate the unknown resistance in an wheatstone network, + given that the three other resistances in the network are known. + The formula to calculate the same is: + + --------------- + |Rx=(R2/R1)*R3| + --------------- + + Usage examples: + >>> wheatstone_solver(resistance_1=2, resistance_2=4, resistance_3=5) + 10.0 + >>> wheatstone_solver(resistance_1=356, resistance_2=234, resistance_3=976) + 641.5280898876405 + >>> wheatstone_solver(resistance_1=2, resistance_2=-1, resistance_3=2) + Traceback (most recent call last): + ... + ValueError: All resistance values must be positive + >>> wheatstone_solver(resistance_1=0, resistance_2=0, resistance_3=2) + Traceback (most recent call last): + ... + ValueError: All resistance values must be positive + """ + + if resistance_1 <= 0 or resistance_2 <= 0 or resistance_3 <= 0: + raise ValueError("All resistance values must be positive") + else: + return float((resistance_2 / resistance_1) * resistance_3) + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From 19fc788197474f75c56cc3755582cc583be9e52f Mon Sep 17 00:00:00 2001 From: ojas wani <52542740+ojas-wani@users.noreply.github.com> Date: Thu, 5 Oct 2023 16:43:45 -0700 Subject: [PATCH 1046/1543] added laplacian_filter file (#9783) * added laplacian_filter file * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * updated laplacian.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * updated laplacian_py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * updated laplacian_filter.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * updated laplacian_filter.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * updated laplacian_filter.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * required changes to laplacian file * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * changed laplacian_filter.py * changed laplacian_filter.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * changed laplacian_filter.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * changed laplacian_filter.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * updated laplacian_filter.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * update laplacian_filter.py * update laplacian_filter.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * update laplacian_filter.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * changed laplacian_filter.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * changed laplacian_filter.py * changed laplacian_filter.py * changed laplacian_filter.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update laplacian_filter.py * Add a test --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- .../filters/laplacian_filter.py | 81 +++++++++++++++++++ 1 file changed, 81 insertions(+) create mode 100644 digital_image_processing/filters/laplacian_filter.py diff --git a/digital_image_processing/filters/laplacian_filter.py b/digital_image_processing/filters/laplacian_filter.py new file mode 100644 index 000000000000..69b9616e4d30 --- /dev/null +++ b/digital_image_processing/filters/laplacian_filter.py @@ -0,0 +1,81 @@ +# @Author : ojas-wani +# @File : laplacian_filter.py +# @Date : 10/04/2023 + +import numpy as np +from cv2 import ( + BORDER_DEFAULT, + COLOR_BGR2GRAY, + CV_64F, + cvtColor, + filter2D, + imread, + imshow, + waitKey, +) + +from digital_image_processing.filters.gaussian_filter import gaussian_filter + + +def my_laplacian(src: np.ndarray, ksize: int) -> np.ndarray: + """ + :param src: the source image, which should be a grayscale or color image. + :param ksize: the size of the kernel used to compute the Laplacian filter, + which can be 1, 3, 5, or 7. + + >>> my_laplacian(src=np.array([]), ksize=0) + Traceback (most recent call last): + ... + ValueError: ksize must be in (1, 3, 5, 7) + """ + kernels = { + 1: np.array([[0, -1, 0], [-1, 4, -1], [0, -1, 0]]), + 3: np.array([[0, 1, 0], [1, -4, 1], [0, 1, 0]]), + 5: np.array( + [ + [0, 0, -1, 0, 0], + [0, -1, -2, -1, 0], + [-1, -2, 16, -2, -1], + [0, -1, -2, -1, 0], + [0, 0, -1, 0, 0], + ] + ), + 7: np.array( + [ + [0, 0, 0, -1, 0, 0, 0], + [0, 0, -2, -3, -2, 0, 0], + [0, -2, -7, -10, -7, -2, 0], + [-1, -3, -10, 68, -10, -3, -1], + [0, -2, -7, -10, -7, -2, 0], + [0, 0, -2, -3, -2, 0, 0], + [0, 0, 0, -1, 0, 0, 0], + ] + ), + } + if ksize not in kernels: + msg = f"ksize must be in {tuple(kernels)}" + raise ValueError(msg) + + # Apply the Laplacian kernel using convolution + return filter2D( + src, CV_64F, kernels[ksize], 0, borderType=BORDER_DEFAULT, anchor=(0, 0) + ) + + +if __name__ == "__main__": + # read original image + img = imread(r"../image_data/lena.jpg") + + # turn image in gray scale value + gray = cvtColor(img, COLOR_BGR2GRAY) + + # Applying gaussian filter + blur_image = gaussian_filter(gray, 3, sigma=1) + + # Apply multiple Kernel to detect edges + laplacian_image = my_laplacian(ksize=3, src=blur_image) + + imshow("Original image", img) + imshow("Detected edges using laplacian filter", laplacian_image) + + waitKey(0) From 17af6444497a64dbe803904e2ef27d0e2a280f8c Mon Sep 17 00:00:00 2001 From: JeevaRamanathan <64531160+JeevaRamanathan@users.noreply.github.com> Date: Fri, 6 Oct 2023 05:30:58 +0530 Subject: [PATCH 1047/1543] Symmetric tree (#9871) * symmectric tree * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * removed trailing spaces * escape sequence fix * added return type * added class * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * wordings fix * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * added static method * added type * added static method * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * wordings fix * testcase added * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * testcase added for mirror function * testcase added for mirror function * made the requested changes * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * made the requested changes * doc test added for symmetric, asymmetric * Update symmetric_tree.py --------- Co-authored-by: jeevaramanthan.m Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- data_structures/binary_tree/symmetric_tree.py | 101 ++++++++++++++++++ 1 file changed, 101 insertions(+) create mode 100644 data_structures/binary_tree/symmetric_tree.py diff --git a/data_structures/binary_tree/symmetric_tree.py b/data_structures/binary_tree/symmetric_tree.py new file mode 100644 index 000000000000..331a25849c1c --- /dev/null +++ b/data_structures/binary_tree/symmetric_tree.py @@ -0,0 +1,101 @@ +""" +Given the root of a binary tree, check whether it is a mirror of itself +(i.e., symmetric around its center). + +Leetcode reference: https://leetcode.com/problems/symmetric-tree/ +""" +from __future__ import annotations + +from dataclasses import dataclass + + +@dataclass +class Node: + """ + A Node has data variable and pointers to Nodes to its left and right. + """ + + data: int + left: Node | None = None + right: Node | None = None + + +def make_symmetric_tree() -> Node: + r""" + Create a symmetric tree for testing. + The tree looks like this: + 1 + / \ + 2 2 + / \ / \ + 3 4 4 3 + """ + root = Node(1) + root.left = Node(2) + root.right = Node(2) + root.left.left = Node(3) + root.left.right = Node(4) + root.right.left = Node(4) + root.right.right = Node(3) + return root + + +def make_asymmetric_tree() -> Node: + r""" + Create a asymmetric tree for testing. + The tree looks like this: + 1 + / \ + 2 2 + / \ / \ + 3 4 3 4 + """ + root = Node(1) + root.left = Node(2) + root.right = Node(2) + root.left.left = Node(3) + root.left.right = Node(4) + root.right.left = Node(3) + root.right.right = Node(4) + return root + + +def is_symmetric_tree(tree: Node) -> bool: + """ + Test cases for is_symmetric_tree function + >>> is_symmetric_tree(make_symmetric_tree()) + True + >>> is_symmetric_tree(make_asymmetric_tree()) + False + """ + if tree: + return is_mirror(tree.left, tree.right) + return True # An empty tree is considered symmetric. + + +def is_mirror(left: Node | None, right: Node | None) -> bool: + """ + >>> tree1 = make_symmetric_tree() + >>> tree1.right.right = Node(3) + >>> is_mirror(tree1.left, tree1.right) + True + >>> tree2 = make_asymmetric_tree() + >>> is_mirror(tree2.left, tree2.right) + False + """ + if left is None and right is None: + # Both sides are empty, which is symmetric. + return True + if left is None or right is None: + # One side is empty while the other is not, which is not symmetric. + return False + if left.data == right.data: + # The values match, so check the subtree + return is_mirror(left.left, right.right) and is_mirror(left.right, right.left) + return False + + +if __name__ == "__main__": + from doctest import testmod + + testmod() From d0c54acd75cedf14cff353869482a0487fea1697 Mon Sep 17 00:00:00 2001 From: Christian Clauss Date: Fri, 6 Oct 2023 04:31:11 +0200 Subject: [PATCH 1048/1543] Use dataclasses in singly_linked_list.py (#9886) --- DIRECTORY.md | 7 + .../linked_list/singly_linked_list.py | 151 ++++++++++-------- 2 files changed, 93 insertions(+), 65 deletions(-) diff --git a/DIRECTORY.md b/DIRECTORY.md index c199a4329202..a975b9264be0 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -25,6 +25,7 @@ * [Combination Sum](backtracking/combination_sum.py) * [Hamiltonian Cycle](backtracking/hamiltonian_cycle.py) * [Knight Tour](backtracking/knight_tour.py) + * [Match Word Pattern](backtracking/match_word_pattern.py) * [Minimax](backtracking/minimax.py) * [N Queens](backtracking/n_queens.py) * [N Queens Math](backtracking/n_queens_math.py) @@ -199,6 +200,7 @@ * [Red Black Tree](data_structures/binary_tree/red_black_tree.py) * [Segment Tree](data_structures/binary_tree/segment_tree.py) * [Segment Tree Other](data_structures/binary_tree/segment_tree_other.py) + * [Symmetric Tree](data_structures/binary_tree/symmetric_tree.py) * [Treap](data_structures/binary_tree/treap.py) * [Wavelet Tree](data_structures/binary_tree/wavelet_tree.py) * Disjoint Set @@ -277,6 +279,7 @@ * [Convolve](digital_image_processing/filters/convolve.py) * [Gabor Filter](digital_image_processing/filters/gabor_filter.py) * [Gaussian Filter](digital_image_processing/filters/gaussian_filter.py) + * [Laplacian Filter](digital_image_processing/filters/laplacian_filter.py) * [Local Binary Pattern](digital_image_processing/filters/local_binary_pattern.py) * [Median Filter](digital_image_processing/filters/median_filter.py) * [Sobel Filter](digital_image_processing/filters/sobel_filter.py) @@ -365,8 +368,10 @@ * [Ind Reactance](electronics/ind_reactance.py) * [Ohms Law](electronics/ohms_law.py) * [Real And Reactive Power](electronics/real_and_reactive_power.py) + * [Resistor Color Code](electronics/resistor_color_code.py) * [Resistor Equivalence](electronics/resistor_equivalence.py) * [Resonant Frequency](electronics/resonant_frequency.py) + * [Wheatstone Bridge](electronics/wheatstone_bridge.py) ## File Transfer * [Receive File](file_transfer/receive_file.py) @@ -415,6 +420,7 @@ * [Check Bipartite Graph Dfs](graphs/check_bipartite_graph_dfs.py) * [Check Cycle](graphs/check_cycle.py) * [Connected Components](graphs/connected_components.py) + * [Deep Clone Graph](graphs/deep_clone_graph.py) * [Depth First Search](graphs/depth_first_search.py) * [Depth First Search 2](graphs/depth_first_search_2.py) * [Dijkstra](graphs/dijkstra.py) @@ -1159,6 +1165,7 @@ * [Autocomplete Using Trie](strings/autocomplete_using_trie.py) * [Barcode Validator](strings/barcode_validator.py) * [Boyer Moore Search](strings/boyer_moore_search.py) + * [Camel Case To Snake Case](strings/camel_case_to_snake_case.py) * [Can String Be Rearranged As Palindrome](strings/can_string_be_rearranged_as_palindrome.py) * [Capitalize](strings/capitalize.py) * [Check Anagrams](strings/check_anagrams.py) diff --git a/data_structures/linked_list/singly_linked_list.py b/data_structures/linked_list/singly_linked_list.py index f4b2ddce12d7..2c6713a47ad9 100644 --- a/data_structures/linked_list/singly_linked_list.py +++ b/data_structures/linked_list/singly_linked_list.py @@ -1,27 +1,38 @@ +from __future__ import annotations + +from collections.abc import Iterator +from dataclasses import dataclass from typing import Any +@dataclass class Node: - def __init__(self, data: Any): - """ - Create and initialize Node class instance. - >>> Node(20) - Node(20) - >>> Node("Hello, world!") - Node(Hello, world!) - >>> Node(None) - Node(None) - >>> Node(True) - Node(True) - """ - self.data = data - self.next = None + """ + Create and initialize Node class instance. + >>> Node(20) + Node(20) + >>> Node("Hello, world!") + Node(Hello, world!) + >>> Node(None) + Node(None) + >>> Node(True) + Node(True) + """ + + data: Any + next_node: Node | None = None def __repr__(self) -> str: """ Get the string representation of this node. >>> Node(10).__repr__() 'Node(10)' + >>> repr(Node(10)) + 'Node(10)' + >>> str(Node(10)) + 'Node(10)' + >>> Node(10) + Node(10) """ return f"Node({self.data})" @@ -31,10 +42,12 @@ def __init__(self): """ Create and initialize LinkedList class instance. >>> linked_list = LinkedList() + >>> linked_list.head is None + True """ self.head = None - def __iter__(self) -> Any: + def __iter__(self) -> Iterator[Any]: """ This function is intended for iterators to access and iterate through data inside linked list. @@ -51,7 +64,7 @@ def __iter__(self) -> Any: node = self.head while node: yield node.data - node = node.next + node = node.next_node def __len__(self) -> int: """ @@ -81,9 +94,16 @@ def __repr__(self) -> str: >>> linked_list.insert_tail(1) >>> linked_list.insert_tail(3) >>> linked_list.__repr__() - '1->3' + '1 -> 3' + >>> repr(linked_list) + '1 -> 3' + >>> str(linked_list) + '1 -> 3' + >>> linked_list.insert_tail(5) + >>> f"{linked_list}" + '1 -> 3 -> 5' """ - return "->".join([str(item) for item in self]) + return " -> ".join([str(item) for item in self]) def __getitem__(self, index: int) -> Any: """ @@ -134,7 +154,7 @@ def __setitem__(self, index: int, data: Any) -> None: raise ValueError("list index out of range.") current = self.head for _ in range(index): - current = current.next + current = current.next_node current.data = data def insert_tail(self, data: Any) -> None: @@ -146,10 +166,10 @@ def insert_tail(self, data: Any) -> None: tail >>> linked_list.insert_tail("tail_2") >>> linked_list - tail->tail_2 + tail -> tail_2 >>> linked_list.insert_tail("tail_3") >>> linked_list - tail->tail_2->tail_3 + tail -> tail_2 -> tail_3 """ self.insert_nth(len(self), data) @@ -162,10 +182,10 @@ def insert_head(self, data: Any) -> None: head >>> linked_list.insert_head("head_2") >>> linked_list - head_2->head + head_2 -> head >>> linked_list.insert_head("head_3") >>> linked_list - head_3->head_2->head + head_3 -> head_2 -> head """ self.insert_nth(0, data) @@ -177,13 +197,13 @@ def insert_nth(self, index: int, data: Any) -> None: >>> linked_list.insert_tail("second") >>> linked_list.insert_tail("third") >>> linked_list - first->second->third + first -> second -> third >>> linked_list.insert_nth(1, "fourth") >>> linked_list - first->fourth->second->third + first -> fourth -> second -> third >>> linked_list.insert_nth(3, "fifth") >>> linked_list - first->fourth->second->fifth->third + first -> fourth -> second -> fifth -> third """ if not 0 <= index <= len(self): raise IndexError("list index out of range") @@ -191,14 +211,14 @@ def insert_nth(self, index: int, data: Any) -> None: if self.head is None: self.head = new_node elif index == 0: - new_node.next = self.head # link new_node to head + new_node.next_node = self.head # link new_node to head self.head = new_node else: temp = self.head for _ in range(index - 1): - temp = temp.next - new_node.next = temp.next - temp.next = new_node + temp = temp.next_node + new_node.next_node = temp.next_node + temp.next_node = new_node def print_list(self) -> None: # print every node data """ @@ -208,7 +228,7 @@ def print_list(self) -> None: # print every node data >>> linked_list.insert_tail("second") >>> linked_list.insert_tail("third") >>> linked_list - first->second->third + first -> second -> third """ print(self) @@ -221,11 +241,11 @@ def delete_head(self) -> Any: >>> linked_list.insert_tail("second") >>> linked_list.insert_tail("third") >>> linked_list - first->second->third + first -> second -> third >>> linked_list.delete_head() 'first' >>> linked_list - second->third + second -> third >>> linked_list.delete_head() 'second' >>> linked_list @@ -248,11 +268,11 @@ def delete_tail(self) -> Any: # delete from tail >>> linked_list.insert_tail("second") >>> linked_list.insert_tail("third") >>> linked_list - first->second->third + first -> second -> third >>> linked_list.delete_tail() 'third' >>> linked_list - first->second + first -> second >>> linked_list.delete_tail() 'second' >>> linked_list @@ -275,11 +295,11 @@ def delete_nth(self, index: int = 0) -> Any: >>> linked_list.insert_tail("second") >>> linked_list.insert_tail("third") >>> linked_list - first->second->third + first -> second -> third >>> linked_list.delete_nth(1) # delete middle 'second' >>> linked_list - first->third + first -> third >>> linked_list.delete_nth(5) # this raises error Traceback (most recent call last): ... @@ -293,13 +313,13 @@ def delete_nth(self, index: int = 0) -> Any: raise IndexError("List index out of range.") delete_node = self.head # default first node if index == 0: - self.head = self.head.next + self.head = self.head.next_node else: temp = self.head for _ in range(index - 1): - temp = temp.next - delete_node = temp.next - temp.next = temp.next.next + temp = temp.next_node + delete_node = temp.next_node + temp.next_node = temp.next_node.next_node return delete_node.data def is_empty(self) -> bool: @@ -322,22 +342,22 @@ def reverse(self) -> None: >>> linked_list.insert_tail("second") >>> linked_list.insert_tail("third") >>> linked_list - first->second->third + first -> second -> third >>> linked_list.reverse() >>> linked_list - third->second->first + third -> second -> first """ prev = None current = self.head while current: # Store the current node's next node. - next_node = current.next - # Make the current node's next point backwards - current.next = prev + next_node = current.next_node + # Make the current node's next_node point backwards + current.next_node = prev # Make the previous node be the current node prev = current - # Make the current node the next node (to progress iteration) + # Make the current node the next_node node (to progress iteration) current = next_node # Return prev in order to put the head at the end self.head = prev @@ -366,17 +386,17 @@ def test_singly_linked_list() -> None: for i in range(10): assert len(linked_list) == i linked_list.insert_nth(i, i + 1) - assert str(linked_list) == "->".join(str(i) for i in range(1, 11)) + assert str(linked_list) == " -> ".join(str(i) for i in range(1, 11)) linked_list.insert_head(0) linked_list.insert_tail(11) - assert str(linked_list) == "->".join(str(i) for i in range(12)) + assert str(linked_list) == " -> ".join(str(i) for i in range(12)) assert linked_list.delete_head() == 0 assert linked_list.delete_nth(9) == 10 assert linked_list.delete_tail() == 11 assert len(linked_list) == 9 - assert str(linked_list) == "->".join(str(i) for i in range(1, 10)) + assert str(linked_list) == " -> ".join(str(i) for i in range(1, 10)) assert all(linked_list[i] == i + 1 for i in range(9)) is True @@ -385,7 +405,7 @@ def test_singly_linked_list() -> None: assert all(linked_list[i] == -i for i in range(9)) is True linked_list.reverse() - assert str(linked_list) == "->".join(str(i) for i in range(-8, 1)) + assert str(linked_list) == " -> ".join(str(i) for i in range(-8, 1)) def test_singly_linked_list_2() -> None: @@ -417,56 +437,57 @@ def test_singly_linked_list_2() -> None: # Check if it's empty or not assert linked_list.is_empty() is False assert ( - str(linked_list) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->" - "-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2" + str(linked_list) + == "-9 -> 100 -> Node(77345112) -> dlrow olleH -> 7 -> 5555 -> " + "0 -> -192.55555 -> Hello, world! -> 77.9 -> Node(10) -> None -> None -> 12.2" ) # Delete the head result = linked_list.delete_head() assert result == -9 assert ( - str(linked_list) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->" - "Hello, world!->77.9->Node(10)->None->None->12.2" + str(linked_list) == "100 -> Node(77345112) -> dlrow olleH -> 7 -> 5555 -> 0 -> " + "-192.55555 -> Hello, world! -> 77.9 -> Node(10) -> None -> None -> 12.2" ) # Delete the tail result = linked_list.delete_tail() assert result == 12.2 assert ( - str(linked_list) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->" - "Hello, world!->77.9->Node(10)->None->None" + str(linked_list) == "100 -> Node(77345112) -> dlrow olleH -> 7 -> 5555 -> 0 -> " + "-192.55555 -> Hello, world! -> 77.9 -> Node(10) -> None -> None" ) # Delete a node in specific location in linked list result = linked_list.delete_nth(10) assert result is None assert ( - str(linked_list) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->" - "Hello, world!->77.9->Node(10)->None" + str(linked_list) == "100 -> Node(77345112) -> dlrow olleH -> 7 -> 5555 -> 0 -> " + "-192.55555 -> Hello, world! -> 77.9 -> Node(10) -> None" ) # Add a Node instance to its head linked_list.insert_head(Node("Hello again, world!")) assert ( str(linked_list) - == "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->" - "7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None" + == "Node(Hello again, world!) -> 100 -> Node(77345112) -> dlrow olleH -> " + "7 -> 5555 -> 0 -> -192.55555 -> Hello, world! -> 77.9 -> Node(10) -> None" ) # Add None to its tail linked_list.insert_tail(None) assert ( str(linked_list) - == "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->" - "7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None" + == "Node(Hello again, world!) -> 100 -> Node(77345112) -> dlrow olleH -> 7 -> " + "5555 -> 0 -> -192.55555 -> Hello, world! -> 77.9 -> Node(10) -> None -> None" ) # Reverse the linked list linked_list.reverse() assert ( str(linked_list) - == "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->" - "7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)" + == "None -> None -> Node(10) -> 77.9 -> Hello, world! -> -192.55555 -> 0 -> " + "5555 -> 7 -> dlrow olleH -> Node(77345112) -> 100 -> Node(Hello again, world!)" ) From 795e97e87f6760a693769097613ace56a6addc8d Mon Sep 17 00:00:00 2001 From: Sarvjeet Singh <63469455+aazad20@users.noreply.github.com> Date: Fri, 6 Oct 2023 19:19:34 +0530 Subject: [PATCH 1049/1543] Added Majority Voting Algorithm (#9866) * Create MajorityVoteAlgorithm.py * Update and rename MajorityVoteAlgorithm.py to majorityvotealgorithm.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update and rename majorityvotealgorithm.py to majority_vote_algorithm.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update majority_vote_algorithm.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update majority_vote_algorithm.py * Update majority_vote_algorithm.py * Update other/majority_vote_algorithm.py Co-authored-by: Christian Clauss * renaming variables majority_vote_algorithm.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update majority_vote_algorithm.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update majority_vote_algorithm.py * Update majority_vote_algorithm.py * Update majority_vote_algorithm.py * Update majority_vote_algorithm.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update other/majority_vote_algorithm.py Co-authored-by: Christian Clauss * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update other/majority_vote_algorithm.py Co-authored-by: Christian Clauss * adding more testcases majority_vote_algorithm.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update majority_vote_algorithm.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update majority_vote_algorithm.py --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- other/majority_vote_algorithm.py | 37 ++++++++++++++++++++++++++++++++ 1 file changed, 37 insertions(+) create mode 100644 other/majority_vote_algorithm.py diff --git a/other/majority_vote_algorithm.py b/other/majority_vote_algorithm.py new file mode 100644 index 000000000000..ab8b386dd2e5 --- /dev/null +++ b/other/majority_vote_algorithm.py @@ -0,0 +1,37 @@ +""" +This is Booyer-Moore Majority Vote Algorithm. The problem statement goes like this: +Given an integer array of size n, find all elements that appear more than ⌊ n/k ⌋ times. +We have to solve in O(n) time and O(1) Space. +URL : https://en.wikipedia.org/wiki/Boyer%E2%80%93Moore_majority_vote_algorithm +""" +from collections import Counter + + +def majority_vote(votes: list[int], votes_needed_to_win: int) -> list[int]: + """ + >>> majority_vote([1, 2, 2, 3, 1, 3, 2], 3) + [2] + >>> majority_vote([1, 2, 2, 3, 1, 3, 2], 2) + [] + >>> majority_vote([1, 2, 2, 3, 1, 3, 2], 4) + [1, 2, 3] + """ + majority_candidate_counter: Counter[int] = Counter() + for vote in votes: + majority_candidate_counter[vote] += 1 + if len(majority_candidate_counter) == votes_needed_to_win: + majority_candidate_counter -= Counter(set(majority_candidate_counter)) + majority_candidate_counter = Counter( + vote for vote in votes if vote in majority_candidate_counter + ) + return [ + vote + for vote in majority_candidate_counter + if majority_candidate_counter[vote] > len(votes) / votes_needed_to_win + ] + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From 995c5533c645250c120b11f0eddc53909fc3d012 Mon Sep 17 00:00:00 2001 From: fxdup <47389903+fxdup@users.noreply.github.com> Date: Fri, 6 Oct 2023 14:46:58 -0400 Subject: [PATCH 1050/1543] Consolidate gamma (#9769) * refactor(gamma): Append _iterative to func name * refactor(gamma): Consolidate implementations * refactor(gamma): Redundant test function removal * Update maths/gamma.py --------- Co-authored-by: Tianyi Zheng --- maths/gamma.py | 91 ++++++++++++++++++++++++++++++++++------ maths/gamma_recursive.py | 77 ---------------------------------- 2 files changed, 79 insertions(+), 89 deletions(-) delete mode 100644 maths/gamma_recursive.py diff --git a/maths/gamma.py b/maths/gamma.py index d5debc58764b..822bbc74456f 100644 --- a/maths/gamma.py +++ b/maths/gamma.py @@ -1,35 +1,43 @@ +""" +Gamma function is a very useful tool in math and physics. +It helps calculating complex integral in a convenient way. +for more info: https://en.wikipedia.org/wiki/Gamma_function +In mathematics, the gamma function is one commonly +used extension of the factorial function to complex numbers. +The gamma function is defined for all complex numbers except +the non-positive integers +Python's Standard Library math.gamma() function overflows around gamma(171.624). +""" import math from numpy import inf from scipy.integrate import quad -def gamma(num: float) -> float: +def gamma_iterative(num: float) -> float: """ - https://en.wikipedia.org/wiki/Gamma_function - In mathematics, the gamma function is one commonly - used extension of the factorial function to complex numbers. - The gamma function is defined for all complex numbers except the non-positive - integers - >>> gamma(-1) + Calculates the value of Gamma function of num + where num is either an integer (1, 2, 3..) or a half-integer (0.5, 1.5, 2.5 ...). + + >>> gamma_iterative(-1) Traceback (most recent call last): ... ValueError: math domain error - >>> gamma(0) + >>> gamma_iterative(0) Traceback (most recent call last): ... ValueError: math domain error - >>> gamma(9) + >>> gamma_iterative(9) 40320.0 >>> from math import gamma as math_gamma - >>> all(.99999999 < gamma(i) / math_gamma(i) <= 1.000000001 + >>> all(.99999999 < gamma_iterative(i) / math_gamma(i) <= 1.000000001 ... for i in range(1, 50)) True - >>> gamma(-1)/math_gamma(-1) <= 1.000000001 + >>> gamma_iterative(-1)/math_gamma(-1) <= 1.000000001 Traceback (most recent call last): ... ValueError: math domain error - >>> gamma(3.3) - math_gamma(3.3) <= 0.00000001 + >>> gamma_iterative(3.3) - math_gamma(3.3) <= 0.00000001 True """ if num <= 0: @@ -42,7 +50,66 @@ def integrand(x: float, z: float) -> float: return math.pow(x, z - 1) * math.exp(-x) +def gamma_recursive(num: float) -> float: + """ + Calculates the value of Gamma function of num + where num is either an integer (1, 2, 3..) or a half-integer (0.5, 1.5, 2.5 ...). + Implemented using recursion + Examples: + >>> from math import isclose, gamma as math_gamma + >>> gamma_recursive(0.5) + 1.7724538509055159 + >>> gamma_recursive(1) + 1.0 + >>> gamma_recursive(2) + 1.0 + >>> gamma_recursive(3.5) + 3.3233509704478426 + >>> gamma_recursive(171.5) + 9.483367566824795e+307 + >>> all(isclose(gamma_recursive(num), math_gamma(num)) + ... for num in (0.5, 2, 3.5, 171.5)) + True + >>> gamma_recursive(0) + Traceback (most recent call last): + ... + ValueError: math domain error + >>> gamma_recursive(-1.1) + Traceback (most recent call last): + ... + ValueError: math domain error + >>> gamma_recursive(-4) + Traceback (most recent call last): + ... + ValueError: math domain error + >>> gamma_recursive(172) + Traceback (most recent call last): + ... + OverflowError: math range error + >>> gamma_recursive(1.1) + Traceback (most recent call last): + ... + NotImplementedError: num must be an integer or a half-integer + """ + if num <= 0: + raise ValueError("math domain error") + if num > 171.5: + raise OverflowError("math range error") + elif num - int(num) not in (0, 0.5): + raise NotImplementedError("num must be an integer or a half-integer") + elif num == 0.5: + return math.sqrt(math.pi) + else: + return 1.0 if num == 1 else (num - 1) * gamma_recursive(num - 1) + + if __name__ == "__main__": from doctest import testmod testmod() + num = 1.0 + while num: + num = float(input("Gamma of: ")) + print(f"gamma_iterative({num}) = {gamma_iterative(num)}") + print(f"gamma_recursive({num}) = {gamma_recursive(num)}") + print("\nEnter 0 to exit...") diff --git a/maths/gamma_recursive.py b/maths/gamma_recursive.py deleted file mode 100644 index 3d6b8c5e8138..000000000000 --- a/maths/gamma_recursive.py +++ /dev/null @@ -1,77 +0,0 @@ -""" -Gamma function is a very useful tool in math and physics. -It helps calculating complex integral in a convenient way. -for more info: https://en.wikipedia.org/wiki/Gamma_function -Python's Standard Library math.gamma() function overflows around gamma(171.624). -""" -from math import pi, sqrt - - -def gamma(num: float) -> float: - """ - Calculates the value of Gamma function of num - where num is either an integer (1, 2, 3..) or a half-integer (0.5, 1.5, 2.5 ...). - Implemented using recursion - Examples: - >>> from math import isclose, gamma as math_gamma - >>> gamma(0.5) - 1.7724538509055159 - >>> gamma(2) - 1.0 - >>> gamma(3.5) - 3.3233509704478426 - >>> gamma(171.5) - 9.483367566824795e+307 - >>> all(isclose(gamma(num), math_gamma(num)) for num in (0.5, 2, 3.5, 171.5)) - True - >>> gamma(0) - Traceback (most recent call last): - ... - ValueError: math domain error - >>> gamma(-1.1) - Traceback (most recent call last): - ... - ValueError: math domain error - >>> gamma(-4) - Traceback (most recent call last): - ... - ValueError: math domain error - >>> gamma(172) - Traceback (most recent call last): - ... - OverflowError: math range error - >>> gamma(1.1) - Traceback (most recent call last): - ... - NotImplementedError: num must be an integer or a half-integer - """ - if num <= 0: - raise ValueError("math domain error") - if num > 171.5: - raise OverflowError("math range error") - elif num - int(num) not in (0, 0.5): - raise NotImplementedError("num must be an integer or a half-integer") - elif num == 0.5: - return sqrt(pi) - else: - return 1.0 if num == 1 else (num - 1) * gamma(num - 1) - - -def test_gamma() -> None: - """ - >>> test_gamma() - """ - assert gamma(0.5) == sqrt(pi) - assert gamma(1) == 1.0 - assert gamma(2) == 1.0 - - -if __name__ == "__main__": - from doctest import testmod - - testmod() - num = 1.0 - while num: - num = float(input("Gamma of: ")) - print(f"gamma({num}) = {gamma(num)}") - print("\nEnter 0 to exit...") From c6ec99d57140cbf8b54077d379dfffeb6c7ad280 Mon Sep 17 00:00:00 2001 From: Kausthub Kannan Date: Sat, 7 Oct 2023 00:53:05 +0530 Subject: [PATCH 1051/1543] Added Mish Activation Function (#9942) * Added Mish Activation Function * Apply suggestions from code review --------- Co-authored-by: Tianyi Zheng --- neural_network/activation_functions/mish.py | 39 +++++++++++++++++++++ 1 file changed, 39 insertions(+) create mode 100644 neural_network/activation_functions/mish.py diff --git a/neural_network/activation_functions/mish.py b/neural_network/activation_functions/mish.py new file mode 100644 index 000000000000..e4f98307f2ba --- /dev/null +++ b/neural_network/activation_functions/mish.py @@ -0,0 +1,39 @@ +""" +Mish Activation Function + +Use Case: Improved version of the ReLU activation function used in Computer Vision. +For more detailed information, you can refer to the following link: +https://en.wikipedia.org/wiki/Rectifier_(neural_networks)#Mish +""" + +import numpy as np + + +def mish(vector: np.ndarray) -> np.ndarray: + """ + Implements the Mish activation function. + + Parameters: + vector (np.ndarray): The input array for Mish activation. + + Returns: + np.ndarray: The input array after applying the Mish activation. + + Formula: + f(x) = x * tanh(softplus(x)) = x * tanh(ln(1 + e^x)) + + Examples: + >>> mish(vector=np.array([2.3,0.6,-2,-3.8])) + array([ 2.26211893, 0.46613649, -0.25250148, -0.08405831]) + + >>> mish(np.array([-9.2, -0.3, 0.45, -4.56])) + array([-0.00092952, -0.15113318, 0.33152014, -0.04745745]) + + """ + return vector * np.tanh(np.log(1 + np.exp(vector))) + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From 80a2087e0aa349b81fb6bbc5d73dae920f560e75 Mon Sep 17 00:00:00 2001 From: Kausthub Kannan Date: Sat, 7 Oct 2023 01:56:09 +0530 Subject: [PATCH 1052/1543] Added Softplus activation function (#9944) --- .../activation_functions/softplus.py | 37 +++++++++++++++++++ 1 file changed, 37 insertions(+) create mode 100644 neural_network/activation_functions/softplus.py diff --git a/neural_network/activation_functions/softplus.py b/neural_network/activation_functions/softplus.py new file mode 100644 index 000000000000..35fdf41afc96 --- /dev/null +++ b/neural_network/activation_functions/softplus.py @@ -0,0 +1,37 @@ +""" +Softplus Activation Function + +Use Case: The Softplus function is a smooth approximation of the ReLU function. +For more detailed information, you can refer to the following link: +https://en.wikipedia.org/wiki/Rectifier_(neural_networks)#Softplus +""" + +import numpy as np + + +def softplus(vector: np.ndarray) -> np.ndarray: + """ + Implements the Softplus activation function. + + Parameters: + vector (np.ndarray): The input array for the Softplus activation. + + Returns: + np.ndarray: The input array after applying the Softplus activation. + + Formula: f(x) = ln(1 + e^x) + + Examples: + >>> softplus(np.array([2.3, 0.6, -2, -3.8])) + array([2.39554546, 1.03748795, 0.12692801, 0.02212422]) + + >>> softplus(np.array([-9.2, -0.3, 0.45, -4.56])) + array([1.01034298e-04, 5.54355244e-01, 9.43248946e-01, 1.04077103e-02]) + """ + return np.log(1 + np.exp(vector)) + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From 2122474e41f2b85500e1f9347d98c9efc15aba4e Mon Sep 17 00:00:00 2001 From: Kamil <32775019+quant12345@users.noreply.github.com> Date: Sat, 7 Oct 2023 14:09:39 +0500 Subject: [PATCH 1053/1543] Segmented sieve - doctests (#9945) * Replacing the generator with numpy vector operations from lu_decomposition. * Revert "Replacing the generator with numpy vector operations from lu_decomposition." This reverts commit ad217c66165898d62b76cc89ba09c2d7049b6448. * Added doctests. * Update segmented_sieve.py Removed unnecessary check. * Update segmented_sieve.py Added checks for 0 and negative numbers. * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update segmented_sieve.py * Update segmented_sieve.py Added float number check. * Update segmented_sieve.py * Update segmented_sieve.py simplified verification * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update segmented_sieve.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update segmented_sieve.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * ValueError: Number 22.2 must instead be a positive integer --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- maths/segmented_sieve.py | 38 ++++++++++++++++++++++++++++++++++++-- 1 file changed, 36 insertions(+), 2 deletions(-) diff --git a/maths/segmented_sieve.py b/maths/segmented_sieve.py index e950a83b752a..125390edc588 100644 --- a/maths/segmented_sieve.py +++ b/maths/segmented_sieve.py @@ -4,7 +4,36 @@ def sieve(n: int) -> list[int]: - """Segmented Sieve.""" + """ + Segmented Sieve. + + Examples: + >>> sieve(8) + [2, 3, 5, 7] + + >>> sieve(27) + [2, 3, 5, 7, 11, 13, 17, 19, 23] + + >>> sieve(0) + Traceback (most recent call last): + ... + ValueError: Number 0 must instead be a positive integer + + >>> sieve(-1) + Traceback (most recent call last): + ... + ValueError: Number -1 must instead be a positive integer + + >>> sieve(22.2) + Traceback (most recent call last): + ... + ValueError: Number 22.2 must instead be a positive integer + """ + + if n <= 0 or isinstance(n, float): + msg = f"Number {n} must instead be a positive integer" + raise ValueError(msg) + in_prime = [] start = 2 end = int(math.sqrt(n)) # Size of every segment @@ -42,4 +71,9 @@ def sieve(n: int) -> list[int]: return prime -print(sieve(10**6)) +if __name__ == "__main__": + import doctest + + doctest.testmod() + + print(f"{sieve(10**6) = }") From 678e0aa8cfdaae1d17536fdcf489bebe1e12cfc6 Mon Sep 17 00:00:00 2001 From: Saahil Mahato <115351000+saahil-mahato@users.noreply.github.com> Date: Sat, 7 Oct 2023 15:20:23 +0545 Subject: [PATCH 1054/1543] Mention square matrices in strassen docs and make it more clear (#9839) * refactor: fix strassen matrix multiplication docs * refactor: make docs more clear --- divide_and_conquer/strassen_matrix_multiplication.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/divide_and_conquer/strassen_matrix_multiplication.py b/divide_and_conquer/strassen_matrix_multiplication.py index 1d03950ef9fe..f529a255d2ef 100644 --- a/divide_and_conquer/strassen_matrix_multiplication.py +++ b/divide_and_conquer/strassen_matrix_multiplication.py @@ -74,7 +74,7 @@ def print_matrix(matrix: list) -> None: def actual_strassen(matrix_a: list, matrix_b: list) -> list: """ Recursive function to calculate the product of two matrices, using the Strassen - Algorithm. It only supports even length matrices. + Algorithm. It only supports square matrices of any size that is a power of 2. """ if matrix_dimensions(matrix_a) == (2, 2): return default_matrix_multiplication(matrix_a, matrix_b) @@ -129,8 +129,8 @@ def strassen(matrix1: list, matrix2: list) -> list: new_matrix1 = matrix1 new_matrix2 = matrix2 - # Adding zeros to the matrices so that the arrays dimensions are the same and also - # power of 2 + # Adding zeros to the matrices to convert them both into square matrices of equal + # dimensions that are a power of 2 for i in range(maxim): if i < dimension1[0]: for _ in range(dimension1[1], maxim): From 78af0c43c623332029c9ad1d240d81577aac5d72 Mon Sep 17 00:00:00 2001 From: Pronay Debnath Date: Sat, 7 Oct 2023 21:21:30 +0530 Subject: [PATCH 1055/1543] Create fractional_cover_problem.py (#9973) * Create fractional_cover_problem.py * Update fractional_cover_problem.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update fractional_cover_problem.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update fractional_cover_problem.py * Update fractional_cover_problem.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update fractional_cover_problem.py * Update fractional_cover_problem.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update fractional_cover_problem.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Lose __eq__() * Update fractional_cover_problem.py * Define Item property ratio --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- greedy_methods/fractional_cover_problem.py | 102 +++++++++++++++++++++ 1 file changed, 102 insertions(+) create mode 100644 greedy_methods/fractional_cover_problem.py diff --git a/greedy_methods/fractional_cover_problem.py b/greedy_methods/fractional_cover_problem.py new file mode 100644 index 000000000000..e37c363f1db9 --- /dev/null +++ b/greedy_methods/fractional_cover_problem.py @@ -0,0 +1,102 @@ +# https://en.wikipedia.org/wiki/Set_cover_problem + +from dataclasses import dataclass +from operator import attrgetter + + +@dataclass +class Item: + weight: int + value: int + + @property + def ratio(self) -> float: + """ + Return the value-to-weight ratio for the item. + + Returns: + float: The value-to-weight ratio for the item. + + Examples: + >>> Item(10, 65).ratio + 6.5 + + >>> Item(20, 100).ratio + 5.0 + + >>> Item(30, 120).ratio + 4.0 + """ + return self.value / self.weight + + +def fractional_cover(items: list[Item], capacity: int) -> float: + """ + Solve the Fractional Cover Problem. + + Args: + items: A list of items, where each item has weight and value attributes. + capacity: The maximum weight capacity of the knapsack. + + Returns: + The maximum value that can be obtained by selecting fractions of items to cover + the knapsack's capacity. + + Raises: + ValueError: If capacity is negative. + + Examples: + >>> fractional_cover((Item(10, 60), Item(20, 100), Item(30, 120)), capacity=50) + 240.0 + + >>> fractional_cover([Item(20, 100), Item(30, 120), Item(10, 60)], capacity=25) + 135.0 + + >>> fractional_cover([Item(10, 60), Item(20, 100), Item(30, 120)], capacity=60) + 280.0 + + >>> fractional_cover(items=[Item(5, 30), Item(10, 60), Item(15, 90)], capacity=30) + 180.0 + + >>> fractional_cover(items=[], capacity=50) + 0.0 + + >>> fractional_cover(items=[Item(10, 60)], capacity=5) + 30.0 + + >>> fractional_cover(items=[Item(10, 60)], capacity=1) + 6.0 + + >>> fractional_cover(items=[Item(10, 60)], capacity=0) + 0.0 + + >>> fractional_cover(items=[Item(10, 60)], capacity=-1) + Traceback (most recent call last): + ... + ValueError: Capacity cannot be negative + """ + if capacity < 0: + raise ValueError("Capacity cannot be negative") + + total_value = 0.0 + remaining_capacity = capacity + + # Sort the items by their value-to-weight ratio in descending order + for item in sorted(items, key=attrgetter("ratio"), reverse=True): + if remaining_capacity == 0: + break + + weight_taken = min(item.weight, remaining_capacity) + total_value += weight_taken * item.ratio + remaining_capacity -= weight_taken + + return total_value + + +if __name__ == "__main__": + import doctest + + if result := doctest.testmod().failed: + print(f"{result} test(s) failed") + else: + print("All tests passed") From 112daddc4de91d60bbdd3201fc9a6a4afc60f57a Mon Sep 17 00:00:00 2001 From: dhruvtrigotra <72982592+dhruvtrigotra@users.noreply.github.com> Date: Sun, 8 Oct 2023 00:34:24 +0530 Subject: [PATCH 1056/1543] charging_capacitor (#10016) * charging_capacitor * charging_capacitor * Final edits --------- Co-authored-by: Christian Clauss --- electronics/charging_capacitor.py | 71 +++++++++++++++++++++++++++++++ 1 file changed, 71 insertions(+) create mode 100644 electronics/charging_capacitor.py diff --git a/electronics/charging_capacitor.py b/electronics/charging_capacitor.py new file mode 100644 index 000000000000..4029b0ecf267 --- /dev/null +++ b/electronics/charging_capacitor.py @@ -0,0 +1,71 @@ +# source - The ARRL Handbook for Radio Communications +# https://en.wikipedia.org/wiki/RC_time_constant + +""" +Description +----------- +When a capacitor is connected with a potential source (AC or DC). It starts to charge +at a general speed but when a resistor is connected in the circuit with in series to +a capacitor then the capacitor charges slowly means it will take more time than usual. +while the capacitor is being charged, the voltage is in exponential function with time. + +'resistance(ohms) * capacitance(farads)' is called RC-timeconstant which may also be +represented as τ (tau). By using this RC-timeconstant we can find the voltage at any +time 't' from the initiation of charging a capacitor with the help of the exponential +function containing RC. Both at charging and discharging of a capacitor. +""" +from math import exp # value of exp = 2.718281828459… + + +def charging_capacitor( + source_voltage: float, # voltage in volts. + resistance: float, # resistance in ohms. + capacitance: float, # capacitance in farads. + time_sec: float, # time in seconds after charging initiation of capacitor. +) -> float: + """ + Find capacitor voltage at any nth second after initiating its charging. + + Examples + -------- + >>> charging_capacitor(source_voltage=.2,resistance=.9,capacitance=8.4,time_sec=.5) + 0.013 + + >>> charging_capacitor(source_voltage=2.2,resistance=3.5,capacitance=2.4,time_sec=9) + 1.446 + + >>> charging_capacitor(source_voltage=15,resistance=200,capacitance=20,time_sec=2) + 0.007 + + >>> charging_capacitor(20, 2000, 30*pow(10,-5), 4) + 19.975 + + >>> charging_capacitor(source_voltage=0,resistance=10.0,capacitance=.30,time_sec=3) + Traceback (most recent call last): + ... + ValueError: Source voltage must be positive. + + >>> charging_capacitor(source_voltage=20,resistance=-2000,capacitance=30,time_sec=4) + Traceback (most recent call last): + ... + ValueError: Resistance must be positive. + + >>> charging_capacitor(source_voltage=30,resistance=1500,capacitance=0,time_sec=4) + Traceback (most recent call last): + ... + ValueError: Capacitance must be positive. + """ + + if source_voltage <= 0: + raise ValueError("Source voltage must be positive.") + if resistance <= 0: + raise ValueError("Resistance must be positive.") + if capacitance <= 0: + raise ValueError("Capacitance must be positive.") + return round(source_voltage * (1 - exp(-time_sec / (resistance * capacitance))), 3) + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From 60291738d2552999545c414bb8a8e90f86c69678 Mon Sep 17 00:00:00 2001 From: Kosuri L Indu <118645569+kosuri-indu@users.noreply.github.com> Date: Sun, 8 Oct 2023 00:38:38 +0530 Subject: [PATCH 1057/1543] add : trapped water program under dynamic programming (#10027) * to add the trapped water program * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * to make changes for error : B006 * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * to make changes for error : B006 * to make changes for error : B006 * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * to make changes in doctest * to make changes in doctest * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update dynamic_programming/trapped_water.py Co-authored-by: Christian Clauss * Update dynamic_programming/trapped_water.py Co-authored-by: Christian Clauss * to make changes in parameters * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * to make changes in parameters * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update dynamic_programming/trapped_water.py Co-authored-by: Christian Clauss * to make changes in parameters * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * for negative heights * Update dynamic_programming/trapped_water.py Co-authored-by: Christian Clauss * to remove falsy * Final edits * tuple[int, ...] --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- dynamic_programming/trapped_water.py | 60 ++++++++++++++++++++++++++++ 1 file changed, 60 insertions(+) create mode 100644 dynamic_programming/trapped_water.py diff --git a/dynamic_programming/trapped_water.py b/dynamic_programming/trapped_water.py new file mode 100644 index 000000000000..8bec9fac5fef --- /dev/null +++ b/dynamic_programming/trapped_water.py @@ -0,0 +1,60 @@ +""" +Given an array of non-negative integers representing an elevation map where the width +of each bar is 1, this program calculates how much rainwater can be trapped. + +Example - height = (0, 1, 0, 2, 1, 0, 1, 3, 2, 1, 2, 1) +Output: 6 +This problem can be solved using the concept of "DYNAMIC PROGRAMMING". + +We calculate the maximum height of bars on the left and right of every bar in array. +Then iterate over the width of structure and at each index. +The amount of water that will be stored is equal to minimum of maximum height of bars +on both sides minus height of bar at current position. +""" + + +def trapped_rainwater(heights: tuple[int, ...]) -> int: + """ + The trapped_rainwater function calculates the total amount of rainwater that can be + trapped given an array of bar heights. + It uses a dynamic programming approach, determining the maximum height of bars on + both sides for each bar, and then computing the trapped water above each bar. + The function returns the total trapped water. + + >>> trapped_rainwater((0, 1, 0, 2, 1, 0, 1, 3, 2, 1, 2, 1)) + 6 + >>> trapped_rainwater((7, 1, 5, 3, 6, 4)) + 9 + >>> trapped_rainwater((7, 1, 5, 3, 6, -1)) + Traceback (most recent call last): + ... + ValueError: No height can be negative + """ + if not heights: + return 0 + if any(h < 0 for h in heights): + raise ValueError("No height can be negative") + length = len(heights) + + left_max = [0] * length + left_max[0] = heights[0] + for i, height in enumerate(heights[1:], start=1): + left_max[i] = max(height, left_max[i - 1]) + + right_max = [0] * length + right_max[-1] = heights[-1] + for i in range(length - 2, -1, -1): + right_max[i] = max(heights[i], right_max[i + 1]) + + return sum( + min(left, right) - height + for left, right, height in zip(left_max, right_max, heights) + ) + + +if __name__ == "__main__": + import doctest + + doctest.testmod() + print(f"{trapped_rainwater((0, 1, 0, 2, 1, 0, 1, 3, 2, 1, 2, 1)) = }") + print(f"{trapped_rainwater((7, 1, 5, 3, 6, 4)) = }") From 895dffb412d80f29c65a062bf6d91fd2a70d8818 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Sat, 7 Oct 2023 21:32:28 +0200 Subject: [PATCH 1058/1543] [pre-commit.ci] pre-commit autoupdate (#9543) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * [pre-commit.ci] pre-commit autoupdate updates: - [github.com/astral-sh/ruff-pre-commit: v0.0.291 → v0.0.292](https://github.com/astral-sh/ruff-pre-commit/compare/v0.0.291...v0.0.292) - [github.com/codespell-project/codespell: v2.2.5 → v2.2.6](https://github.com/codespell-project/codespell/compare/v2.2.5...v2.2.6) - [github.com/tox-dev/pyproject-fmt: 1.1.0 → 1.2.0](https://github.com/tox-dev/pyproject-fmt/compare/1.1.0...1.2.0) * updating DIRECTORY.md * Fix typos in test_min_spanning_tree_prim.py * Fix typos * codespell --ignore-words-list=manuel --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> Co-authored-by: Tianyi Zheng Co-authored-by: Christian Clauss --- .pre-commit-config.yaml | 2 +- .../cnn_classification.py.DISABLED.txt | 4 +-- computer_vision/mosaic_augmentation.py | 2 +- dynamic_programming/min_distance_up_bottom.py | 11 +++--- graphs/tests/test_min_spanning_tree_prim.py | 8 ++--- hashes/sha1.py | 36 ++++++++++--------- maths/pi_generator.py | 31 +++++++--------- maths/radians.py | 4 +-- maths/square_root.py | 7 ++-- neural_network/convolution_neural_network.py | 8 ++--- neural_network/gan.py_tf | 2 +- other/graham_scan.py | 8 ++--- other/linear_congruential_generator.py | 4 +-- other/password.py | 12 +++---- physics/speed_of_sound.py | 30 +++++++--------- project_euler/problem_035/sol1.py | 12 +++---- project_euler/problem_135/sol1.py | 30 +++++++--------- project_euler/problem_493/sol1.py | 2 +- pyproject.toml | 2 +- 19 files changed, 97 insertions(+), 118 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index dbf7ff341243..8a88dcc07622 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -26,7 +26,7 @@ repos: - id: black - repo: https://github.com/codespell-project/codespell - rev: v2.2.5 + rev: v2.2.6 hooks: - id: codespell additional_dependencies: diff --git a/computer_vision/cnn_classification.py.DISABLED.txt b/computer_vision/cnn_classification.py.DISABLED.txt index 9b5f8c95eebf..b813b71033f3 100644 --- a/computer_vision/cnn_classification.py.DISABLED.txt +++ b/computer_vision/cnn_classification.py.DISABLED.txt @@ -11,10 +11,10 @@ Download dataset from : https://lhncbc.nlm.nih.gov/LHC-publications/pubs/TuberculosisChestXrayImageDataSets.html 1. Download the dataset folder and create two folder training set and test set -in the parent dataste folder +in the parent dataset folder 2. Move 30-40 image from both TB positive and TB Negative folder in the test set folder -3. The labels of the iamges will be extracted from the folder name +3. The labels of the images will be extracted from the folder name the image is present in. """ diff --git a/computer_vision/mosaic_augmentation.py b/computer_vision/mosaic_augmentation.py index c150126d6bfb..cd923dfe095f 100644 --- a/computer_vision/mosaic_augmentation.py +++ b/computer_vision/mosaic_augmentation.py @@ -8,7 +8,7 @@ import cv2 import numpy as np -# Parrameters +# Parameters OUTPUT_SIZE = (720, 1280) # Height, Width SCALE_RANGE = (0.4, 0.6) # if height or width lower than this scale, drop it. FILTER_TINY_SCALE = 1 / 100 diff --git a/dynamic_programming/min_distance_up_bottom.py b/dynamic_programming/min_distance_up_bottom.py index 4870c7ef4499..6b38a41a1c0a 100644 --- a/dynamic_programming/min_distance_up_bottom.py +++ b/dynamic_programming/min_distance_up_bottom.py @@ -1,11 +1,8 @@ """ Author : Alexander Pantyukhin Date : October 14, 2022 -This is implementation Dynamic Programming up bottom approach -to find edit distance. -The aim is to demonstate up bottom approach for solving the task. -The implementation was tested on the -leetcode: https://leetcode.com/problems/edit-distance/ +This is an implementation of the up-bottom approach to find edit distance. +The implementation was tested on Leetcode: https://leetcode.com/problems/edit-distance/ Levinstein distance Dynamic Programming: up -> down. @@ -30,10 +27,10 @@ def min_distance_up_bottom(word1: str, word2: str) -> int: @functools.cache def min_distance(index1: int, index2: int) -> int: - # if first word index is overflow - delete all from the second word + # if first word index overflows - delete all from the second word if index1 >= len_word1: return len_word2 - index2 - # if second word index is overflow - delete all from the first word + # if second word index overflows - delete all from the first word if index2 >= len_word2: return len_word1 - index1 diff = int(word1[index1] != word2[index2]) # current letters not identical diff --git a/graphs/tests/test_min_spanning_tree_prim.py b/graphs/tests/test_min_spanning_tree_prim.py index 91feab28fc81..66e5706dadb1 100644 --- a/graphs/tests/test_min_spanning_tree_prim.py +++ b/graphs/tests/test_min_spanning_tree_prim.py @@ -22,12 +22,12 @@ def test_prim_successful_result(): [1, 7, 11], ] - adjancency = defaultdict(list) + adjacency = defaultdict(list) for node1, node2, cost in edges: - adjancency[node1].append([node2, cost]) - adjancency[node2].append([node1, cost]) + adjacency[node1].append([node2, cost]) + adjacency[node2].append([node1, cost]) - result = mst(adjancency) + result = mst(adjacency) expected = [ [7, 6, 1], diff --git a/hashes/sha1.py b/hashes/sha1.py index 8a03673f3c9f..a0fa688f863e 100644 --- a/hashes/sha1.py +++ b/hashes/sha1.py @@ -1,26 +1,28 @@ """ -Demonstrates implementation of SHA1 Hash function in a Python class and gives utilities -to find hash of string or hash of text from a file. +Implementation of the SHA1 hash function and gives utilities to find hash of string or +hash of text from a file. Also contains a Test class to verify that the generated hash +matches what is returned by the hashlib library + Usage: python sha1.py --string "Hello World!!" python sha1.py --file "hello_world.txt" When run without any arguments, it prints the hash of the string "Hello World!! Welcome to Cryptography" -Also contains a Test class to verify that the generated Hash is same as that -returned by the hashlib library -SHA1 hash or SHA1 sum of a string is a cryptographic function which means it is easy +SHA1 hash or SHA1 sum of a string is a cryptographic function, which means it is easy to calculate forwards but extremely difficult to calculate backwards. What this means -is, you can easily calculate the hash of a string, but it is extremely difficult to -know the original string if you have its hash. This property is useful to communicate -securely, send encrypted messages and is very useful in payment systems, blockchain -and cryptocurrency etc. -The Algorithm as described in the reference: +is you can easily calculate the hash of a string, but it is extremely difficult to know +the original string if you have its hash. This property is useful for communicating +securely, send encrypted messages and is very useful in payment systems, blockchain and +cryptocurrency etc. + +The algorithm as described in the reference: First we start with a message. The message is padded and the length of the message is added to the end. It is then split into blocks of 512 bits or 64 bytes. The blocks are then processed one at a time. Each block must be expanded and compressed. -The value after each compression is added to a 160bit buffer called the current hash -state. After the last block is processed the current hash state is returned as +The value after each compression is added to a 160-bit buffer called the current hash +state. After the last block is processed, the current hash state is returned as the final hash. + Reference: https://deadhacker.com/2006/02/21/sha-1-illustrated/ """ import argparse @@ -30,18 +32,18 @@ class SHA1Hash: """ - Class to contain the entire pipeline for SHA1 Hashing Algorithm + Class to contain the entire pipeline for SHA1 hashing algorithm >>> SHA1Hash(bytes('Allan', 'utf-8')).final_hash() '872af2d8ac3d8695387e7c804bf0e02c18df9e6e' """ def __init__(self, data): """ - Inititates the variables data and h. h is a list of 5 8-digit Hexadecimal + Initiates the variables data and h. h is a list of 5 8-digit hexadecimal numbers corresponding to (1732584193, 4023233417, 2562383102, 271733878, 3285377520) respectively. We will start with this as a message digest. 0x is how you write - Hexadecimal numbers in Python + hexadecimal numbers in Python """ self.data = data self.h = [0x67452301, 0xEFCDAB89, 0x98BADCFE, 0x10325476, 0xC3D2E1F0] @@ -90,7 +92,7 @@ def final_hash(self): For each block, the variable h that was initialized is copied to a,b,c,d,e and these 5 variables a,b,c,d,e undergo several changes. After all the blocks are processed, these 5 variables are pairwise added to h ie a to h[0], b to h[1] - and so on. This h becomes our final hash which is returned. + and so on. This h becomes our final hash which is returned. """ self.padded_data = self.padding() self.blocks = self.split_blocks() @@ -135,7 +137,7 @@ def test_sha1_hash(): def main(): """ Provides option 'string' or 'file' to take input and prints the calculated SHA1 - hash. unittest.main() has been commented because we probably don't want to run + hash. unittest.main() has been commented out because we probably don't want to run the test each time. """ # unittest.main() diff --git a/maths/pi_generator.py b/maths/pi_generator.py index dcd218aae309..addd921747ba 100644 --- a/maths/pi_generator.py +++ b/maths/pi_generator.py @@ -3,60 +3,53 @@ def calculate_pi(limit: int) -> str: https://en.wikipedia.org/wiki/Leibniz_formula_for_%CF%80 Leibniz Formula for Pi - The Leibniz formula is the special case arctan 1 = 1/4 Pi . + The Leibniz formula is the special case arctan(1) = pi / 4. Leibniz's formula converges extremely slowly: it exhibits sublinear convergence. Convergence (https://en.wikipedia.org/wiki/Leibniz_formula_for_%CF%80#Convergence) We cannot try to prove against an interrupted, uncompleted generation. https://en.wikipedia.org/wiki/Leibniz_formula_for_%CF%80#Unusual_behaviour - The errors can in fact be predicted; - but those calculations also approach infinity for accuracy. + The errors can in fact be predicted, but those calculations also approach infinity + for accuracy. - Our output will always be a string since we can defintely store all digits in there. - For simplicity' sake, let's just compare against known values and since our outpit - is a string, we need to convert to float. + Our output will be a string so that we can definitely store all digits. >>> import math >>> float(calculate_pi(15)) == math.pi True - Since we cannot predict errors or interrupt any infinite alternating - series generation since they approach infinity, - or interrupt any alternating series, we are going to need math.isclose() + Since we cannot predict errors or interrupt any infinite alternating series + generation since they approach infinity, or interrupt any alternating series, we'll + need math.isclose() >>> math.isclose(float(calculate_pi(50)), math.pi) True - >>> math.isclose(float(calculate_pi(100)), math.pi) True - Since math.pi-constant contains only 16 digits, here some test with preknown values: + Since math.pi contains only 16 digits, here are some tests with known values: >>> calculate_pi(50) '3.14159265358979323846264338327950288419716939937510' >>> calculate_pi(80) '3.14159265358979323846264338327950288419716939937510582097494459230781640628620899' - - To apply the Leibniz formula for calculating pi, - the variables q, r, t, k, n, and l are used for the iteration process. """ + # Variables used for the iteration process q = 1 r = 0 t = 1 k = 1 n = 3 l = 3 + decimal = limit counter = 0 result = "" - """ - We will avoid using yield since we otherwise get a Generator-Object, - which we can't just compare against anything. We would have to make a list out of it - after the generation, so we will just stick to plain return logic: - """ + # We can't compare against anything if we make a generator, + # so we'll stick with plain return logic while counter != decimal + 1: if 4 * q + r - t < n * t: result += str(n) diff --git a/maths/radians.py b/maths/radians.py index 465467a3ba08..b8ac61cb135c 100644 --- a/maths/radians.py +++ b/maths/radians.py @@ -3,7 +3,7 @@ def radians(degree: float) -> float: """ - Coverts the given angle from degrees to radians + Converts the given angle from degrees to radians https://en.wikipedia.org/wiki/Radian >>> radians(180) @@ -16,7 +16,7 @@ def radians(degree: float) -> float: 1.9167205845401725 >>> from math import radians as math_radians - >>> all(abs(radians(i)-math_radians(i)) <= 0.00000001 for i in range(-2, 361)) + >>> all(abs(radians(i) - math_radians(i)) <= 1e-8 for i in range(-2, 361)) True """ diff --git a/maths/square_root.py b/maths/square_root.py index 2cbf14beae18..4462ccb75261 100644 --- a/maths/square_root.py +++ b/maths/square_root.py @@ -19,14 +19,13 @@ def get_initial_point(a: float) -> float: def square_root_iterative( - a: float, max_iter: int = 9999, tolerance: float = 0.00000000000001 + a: float, max_iter: int = 9999, tolerance: float = 1e-14 ) -> float: """ - Square root is aproximated using Newtons method. + Square root approximated using Newton's method. https://en.wikipedia.org/wiki/Newton%27s_method - >>> all(abs(square_root_iterative(i)-math.sqrt(i)) <= .00000000000001 - ... for i in range(500)) + >>> all(abs(square_root_iterative(i) - math.sqrt(i)) <= 1e-14 for i in range(500)) True >>> square_root_iterative(-1) diff --git a/neural_network/convolution_neural_network.py b/neural_network/convolution_neural_network.py index f5ec156f3593..f2e88fe7bd88 100644 --- a/neural_network/convolution_neural_network.py +++ b/neural_network/convolution_neural_network.py @@ -2,7 +2,7 @@ - - - - - -- - - - - - - - - - - - - - - - - - - - - - - Name - - CNN - Convolution Neural Network For Photo Recognizing Goal - - Recognize Handing Writing Word Photo - Detail:Total 5 layers neural network + Detail: Total 5 layers neural network * Convolution layer * Pooling layer * Input layer layer of BP @@ -24,7 +24,7 @@ def __init__( self, conv1_get, size_p1, bp_num1, bp_num2, bp_num3, rate_w=0.2, rate_t=0.2 ): """ - :param conv1_get: [a,c,d],size, number, step of convolution kernel + :param conv1_get: [a,c,d], size, number, step of convolution kernel :param size_p1: pooling size :param bp_num1: units number of flatten layer :param bp_num2: units number of hidden layer @@ -71,7 +71,7 @@ def save_model(self, save_path): with open(save_path, "wb") as f: pickle.dump(model_dic, f) - print(f"Model saved: {save_path}") + print(f"Model saved: {save_path}") @classmethod def read_model(cls, model_path): @@ -210,7 +210,7 @@ def _calculate_gradient_from_pool( def train( self, patterns, datas_train, datas_teach, n_repeat, error_accuracy, draw_e=bool ): - # model traning + # model training print("----------------------Start Training-------------------------") print((" - - Shape: Train_Data ", np.shape(datas_train))) print((" - - Shape: Teach_Data ", np.shape(datas_teach))) diff --git a/neural_network/gan.py_tf b/neural_network/gan.py_tf index deb062c48dc7..9c6e1c05b8b4 100644 --- a/neural_network/gan.py_tf +++ b/neural_network/gan.py_tf @@ -158,7 +158,7 @@ if __name__ == "__main__": # G_b2 = np.random.normal(size=(784),scale=(1. / np.sqrt(784 / 2.))) *0.002 G_b7 = np.zeros(784) - # 3. For Adam Optimzier + # 3. For Adam Optimizer v1, m1 = 0, 0 v2, m2 = 0, 0 v3, m3 = 0, 0 diff --git a/other/graham_scan.py b/other/graham_scan.py index 2eadb4e56668..3f11d40f141c 100644 --- a/other/graham_scan.py +++ b/other/graham_scan.py @@ -1,5 +1,5 @@ """ -This is a pure Python implementation of the merge-insertion sort algorithm +This is a pure Python implementation of the Graham scan algorithm Source: https://en.wikipedia.org/wiki/Graham_scan For doctests run following command: @@ -142,8 +142,8 @@ def graham_scan(points: list[tuple[int, int]]) -> list[tuple[int, int]]: stack.append(sorted_points[0]) stack.append(sorted_points[1]) stack.append(sorted_points[2]) - # In any ways, the first 3 points line are towards left. - # Because we sort them the angle from minx, miny. + # The first 3 points lines are towards the left because we sort them by their angle + # from minx, miny. current_direction = Direction.left for i in range(3, len(sorted_points)): @@ -164,7 +164,7 @@ def graham_scan(points: list[tuple[int, int]]) -> list[tuple[int, int]]: break elif current_direction == Direction.right: # If the straight line is towards right, - # every previous points on those straigh line is not convex hull. + # every previous points on that straight line is not convex hull. stack.pop() if next_direction == Direction.right: stack.pop() diff --git a/other/linear_congruential_generator.py b/other/linear_congruential_generator.py index c016310f9cfa..c7de15b94bbd 100644 --- a/other/linear_congruential_generator.py +++ b/other/linear_congruential_generator.py @@ -8,9 +8,9 @@ class LinearCongruentialGenerator: A pseudorandom number generator. """ - # The default value for **seed** is the result of a function call which is not + # The default value for **seed** is the result of a function call, which is not # normally recommended and causes ruff to raise a B008 error. However, in this case, - # it is accptable because `LinearCongruentialGenerator.__init__()` will only be + # it is acceptable because `LinearCongruentialGenerator.__init__()` will only be # called once per instance and it ensures that each instance will generate a unique # sequence of numbers. diff --git a/other/password.py b/other/password.py index 9a6161af87d7..1ce0d52316e6 100644 --- a/other/password.py +++ b/other/password.py @@ -63,11 +63,12 @@ def random_characters(chars_incl, i): pass # Put your code here... -# This Will Check Whether A Given Password Is Strong Or Not -# It Follows The Rule that Length Of Password Should Be At Least 8 Characters -# And At Least 1 Lower, 1 Upper, 1 Number And 1 Special Character def is_strong_password(password: str, min_length: int = 8) -> bool: """ + This will check whether a given password is strong or not. The password must be at + least as long as the provided minimum length, and it must contain at least 1 + lowercase letter, 1 uppercase letter, 1 number and 1 special character. + >>> is_strong_password('Hwea7$2!') True >>> is_strong_password('Sh0r1') @@ -81,7 +82,6 @@ def is_strong_password(password: str, min_length: int = 8) -> bool: """ if len(password) < min_length: - # Your Password must be at least 8 characters long return False upper = any(char in ascii_uppercase for char in password) @@ -90,8 +90,6 @@ def is_strong_password(password: str, min_length: int = 8) -> bool: spec_char = any(char in punctuation for char in password) return upper and lower and num and spec_char - # Passwords should contain UPPERCASE, lowerase - # numbers, and special characters def main(): @@ -104,7 +102,7 @@ def main(): "Alternative Password generated:", alternative_password_generator(chars_incl, length), ) - print("[If you are thinking of using this passsword, You better save it.]") + print("[If you are thinking of using this password, You better save it.]") if __name__ == "__main__": diff --git a/physics/speed_of_sound.py b/physics/speed_of_sound.py index a4658366a36c..3fa952cdb411 100644 --- a/physics/speed_of_sound.py +++ b/physics/speed_of_sound.py @@ -2,39 +2,35 @@ Title : Calculating the speed of sound Description : - The speed of sound (c) is the speed that a sound wave travels - per unit time (m/s). During propagation, the sound wave propagates - through an elastic medium. Its SI unit is meter per second (m/s). + The speed of sound (c) is the speed that a sound wave travels per unit time (m/s). + During propagation, the sound wave propagates through an elastic medium. - Only longitudinal waves can propagate in liquids and gas other then - solid where they also travel in transverse wave. The following Algo- - rithem calculates the speed of sound in fluid depanding on the bulk - module and the density of the fluid. + Sound propagates as longitudinal waves in liquids and gases and as transverse waves + in solids. This file calculates the speed of sound in a fluid based on its bulk + module and density. - Equation for calculating speed od sound in fluid: - c_fluid = (K_s*p)**0.5 + Equation for the speed of sound in a fluid: + c_fluid = sqrt(K_s / p) c_fluid: speed of sound in fluid K_s: isentropic bulk modulus p: density of fluid - - Source : https://en.wikipedia.org/wiki/Speed_of_sound """ def speed_of_sound_in_a_fluid(density: float, bulk_modulus: float) -> float: """ - This method calculates the speed of sound in fluid - - This is calculated from the other two provided values + Calculates the speed of sound in a fluid from its density and bulk modulus + Examples: - Example 1 --> Water 20°C: bulk_moduls= 2.15MPa, density=998kg/m³ - Example 2 --> Murcery 20°: bulk_moduls= 28.5MPa, density=13600kg/m³ + Example 1 --> Water 20°C: bulk_modulus= 2.15MPa, density=998kg/m³ + Example 2 --> Mercury 20°C: bulk_modulus= 28.5MPa, density=13600kg/m³ - >>> speed_of_sound_in_a_fluid(bulk_modulus=2.15*10**9, density=998) + >>> speed_of_sound_in_a_fluid(bulk_modulus=2.15e9, density=998) 1467.7563207952705 - >>> speed_of_sound_in_a_fluid(bulk_modulus=28.5*10**9, density=13600) + >>> speed_of_sound_in_a_fluid(bulk_modulus=28.5e9, density=13600) 1447.614670861731 """ diff --git a/project_euler/problem_035/sol1.py b/project_euler/problem_035/sol1.py index 17a4e9088ae2..644c992ed8a5 100644 --- a/project_euler/problem_035/sol1.py +++ b/project_euler/problem_035/sol1.py @@ -11,18 +11,18 @@ How many circular primes are there below one million? To solve this problem in an efficient manner, we will first mark all the primes -below 1 million using the Seive of Eratosthenes. Then, out of all these primes, -we will rule out the numbers which contain an even digit. After this we will +below 1 million using the Sieve of Eratosthenes. Then, out of all these primes, +we will rule out the numbers which contain an even digit. After this we will generate each circular combination of the number and check if all are prime. """ from __future__ import annotations -seive = [True] * 1000001 +sieve = [True] * 1000001 i = 2 while i * i <= 1000000: - if seive[i]: + if sieve[i]: for j in range(i * i, 1000001, i): - seive[j] = False + sieve[j] = False i += 1 @@ -36,7 +36,7 @@ def is_prime(n: int) -> bool: >>> is_prime(25363) False """ - return seive[n] + return sieve[n] def contains_an_even_digit(n: int) -> bool: diff --git a/project_euler/problem_135/sol1.py b/project_euler/problem_135/sol1.py index d71a0439c7e9..ac91fa4e2b9d 100644 --- a/project_euler/problem_135/sol1.py +++ b/project_euler/problem_135/sol1.py @@ -1,28 +1,22 @@ """ Project Euler Problem 135: https://projecteuler.net/problem=135 -Given the positive integers, x, y, and z, -are consecutive terms of an arithmetic progression, -the least value of the positive integer, n, -for which the equation, +Given the positive integers, x, y, and z, are consecutive terms of an arithmetic +progression, the least value of the positive integer, n, for which the equation, x2 − y2 − z2 = n, has exactly two solutions is n = 27: 342 − 272 − 202 = 122 − 92 − 62 = 27 -It turns out that n = 1155 is the least value -which has exactly ten solutions. +It turns out that n = 1155 is the least value which has exactly ten solutions. -How many values of n less than one million -have exactly ten distinct solutions? +How many values of n less than one million have exactly ten distinct solutions? -Taking x,y,z of the form a+d,a,a-d respectively, -the given equation reduces to a*(4d-a)=n. -Calculating no of solutions for every n till 1 million by fixing a -,and n must be multiple of a. -Total no of steps=n*(1/1+1/2+1/3+1/4..+1/n) -,so roughly O(nlogn) time complexity. - +Taking x, y, z of the form a + d, a, a - d respectively, the given equation reduces to +a * (4d - a) = n. +Calculating no of solutions for every n till 1 million by fixing a, and n must be a +multiple of a. Total no of steps = n * (1/1 + 1/2 + 1/3 + 1/4 + ... + 1/n), so roughly +O(nlogn) time complexity. """ @@ -42,15 +36,15 @@ def solution(limit: int = 1000000) -> int: for first_term in range(1, limit): for n in range(first_term, limit, first_term): common_difference = first_term + n / first_term - if common_difference % 4: # d must be divisble by 4 + if common_difference % 4: # d must be divisible by 4 continue else: common_difference /= 4 if ( first_term > common_difference and first_term < 4 * common_difference - ): # since x,y,z are positive integers - frequency[n] += 1 # so z>0 and a>d ,also 4d 0, a > d and 4d < a count = sum(1 for x in frequency[1:limit] if x == 10) diff --git a/project_euler/problem_493/sol1.py b/project_euler/problem_493/sol1.py index c9879a528230..4d96c6c3207e 100644 --- a/project_euler/problem_493/sol1.py +++ b/project_euler/problem_493/sol1.py @@ -9,7 +9,7 @@ This combinatorial problem can be solved by decomposing the problem into the following steps: -1. Calculate the total number of possible picking cominations +1. Calculate the total number of possible picking combinations [combinations := binom_coeff(70, 20)] 2. Calculate the number of combinations with one colour missing [missing := binom_coeff(60, 20)] diff --git a/pyproject.toml b/pyproject.toml index f9091fb8578d..75da7a04513e 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -130,5 +130,5 @@ omit = [".env/*"] sort = "Cover" [tool.codespell] -ignore-words-list = "3rt,ans,crate,damon,fo,followings,hist,iff,kwanza,mater,secant,som,sur,tim,zar" +ignore-words-list = "3rt,ans,crate,damon,fo,followings,hist,iff,kwanza,manuel,mater,secant,som,sur,tim,zar" skip = "./.*,*.json,ciphers/prehistoric_men.txt,project_euler/problem_022/p022_names.txt,pyproject.toml,strings/dictionary.txt,strings/words.txt" From fa077e6703758afcae4f19347a4388b9230d568f Mon Sep 17 00:00:00 2001 From: hollowcrust <72879387+hollowcrust@users.noreply.github.com> Date: Sun, 8 Oct 2023 16:58:48 +0800 Subject: [PATCH 1059/1543] Add doctests, type hints; fix bug for dynamic_programming/minimum_partition.py (#10012) * Add doctests, type hints; fix bug * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- dynamic_programming/minimum_partition.py | 24 +++++++++++++++++++++--- 1 file changed, 21 insertions(+), 3 deletions(-) diff --git a/dynamic_programming/minimum_partition.py b/dynamic_programming/minimum_partition.py index 3daa9767fde4..e6188cb33b3a 100644 --- a/dynamic_programming/minimum_partition.py +++ b/dynamic_programming/minimum_partition.py @@ -3,13 +3,25 @@ """ -def find_min(arr): +def find_min(arr: list[int]) -> int: + """ + >>> find_min([1, 2, 3, 4, 5]) + 1 + >>> find_min([5, 5, 5, 5, 5]) + 5 + >>> find_min([5, 5, 5, 5]) + 0 + >>> find_min([3]) + 3 + >>> find_min([]) + 0 + """ n = len(arr) s = sum(arr) dp = [[False for x in range(s + 1)] for y in range(n + 1)] - for i in range(1, n + 1): + for i in range(n + 1): dp[i][0] = True for i in range(1, s + 1): @@ -17,7 +29,7 @@ def find_min(arr): for i in range(1, n + 1): for j in range(1, s + 1): - dp[i][j] = dp[i][j - 1] + dp[i][j] = dp[i - 1][j] if arr[i - 1] <= j: dp[i][j] = dp[i][j] or dp[i - 1][j - arr[i - 1]] @@ -28,3 +40,9 @@ def find_min(arr): break return diff + + +if __name__ == "__main__": + from doctest import testmod + + testmod() From 937ce83b150f0a217c7fa63c75a095534ae8bfeb Mon Sep 17 00:00:00 2001 From: Om Alve Date: Sun, 8 Oct 2023 16:35:01 +0530 Subject: [PATCH 1060/1543] Added fractionated_morse_cipher (#9442) * Added fractionated_morse_cipher * Added return type hint for main function * Added doctest for main * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Replaced main function * changed the references section Co-authored-by: Christian Clauss * removed repetitive datatype hint in the docstring Co-authored-by: Christian Clauss * changed dictionary comprehension variable names to something more compact Co-authored-by: Christian Clauss * Update fractionated_morse_cipher.py --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- ciphers/fractionated_morse_cipher.py | 167 +++++++++++++++++++++++++++ 1 file changed, 167 insertions(+) create mode 100644 ciphers/fractionated_morse_cipher.py diff --git a/ciphers/fractionated_morse_cipher.py b/ciphers/fractionated_morse_cipher.py new file mode 100644 index 000000000000..c1d5dc6d50aa --- /dev/null +++ b/ciphers/fractionated_morse_cipher.py @@ -0,0 +1,167 @@ +""" +Python program for the Fractionated Morse Cipher. + +The Fractionated Morse cipher first converts the plaintext to Morse code, +then enciphers fixed-size blocks of Morse code back to letters. +This procedure means plaintext letters are mixed into the ciphertext letters, +making it more secure than substitution ciphers. + +http://practicalcryptography.com/ciphers/fractionated-morse-cipher/ +""" +import string + +MORSE_CODE_DICT = { + "A": ".-", + "B": "-...", + "C": "-.-.", + "D": "-..", + "E": ".", + "F": "..-.", + "G": "--.", + "H": "....", + "I": "..", + "J": ".---", + "K": "-.-", + "L": ".-..", + "M": "--", + "N": "-.", + "O": "---", + "P": ".--.", + "Q": "--.-", + "R": ".-.", + "S": "...", + "T": "-", + "U": "..-", + "V": "...-", + "W": ".--", + "X": "-..-", + "Y": "-.--", + "Z": "--..", + " ": "", +} + +# Define possible trigrams of Morse code +MORSE_COMBINATIONS = [ + "...", + "..-", + "..x", + ".-.", + ".--", + ".-x", + ".x.", + ".x-", + ".xx", + "-..", + "-.-", + "-.x", + "--.", + "---", + "--x", + "-x.", + "-x-", + "-xx", + "x..", + "x.-", + "x.x", + "x-.", + "x--", + "x-x", + "xx.", + "xx-", + "xxx", +] + +# Create a reverse dictionary for Morse code +REVERSE_DICT = {value: key for key, value in MORSE_CODE_DICT.items()} + + +def encode_to_morse(plaintext: str) -> str: + """Encode a plaintext message into Morse code. + + Args: + plaintext: The plaintext message to encode. + + Returns: + The Morse code representation of the plaintext message. + + Example: + >>> encode_to_morse("defend the east") + '-..x.x..-.x.x-.x-..xx-x....x.xx.x.-x...x-' + """ + return "x".join([MORSE_CODE_DICT.get(letter.upper(), "") for letter in plaintext]) + + +def encrypt_fractionated_morse(plaintext: str, key: str) -> str: + """Encrypt a plaintext message using Fractionated Morse Cipher. + + Args: + plaintext: The plaintext message to encrypt. + key: The encryption key. + + Returns: + The encrypted ciphertext. + + Example: + >>> encrypt_fractionated_morse("defend the east","Roundtable") + 'ESOAVVLJRSSTRX' + + """ + morse_code = encode_to_morse(plaintext) + key = key.upper() + string.ascii_uppercase + key = "".join(sorted(set(key), key=key.find)) + + # Ensure morse_code length is a multiple of 3 + padding_length = 3 - (len(morse_code) % 3) + morse_code += "x" * padding_length + + fractionated_morse_dict = {v: k for k, v in zip(key, MORSE_COMBINATIONS)} + fractionated_morse_dict["xxx"] = "" + encrypted_text = "".join( + [ + fractionated_morse_dict[morse_code[i : i + 3]] + for i in range(0, len(morse_code), 3) + ] + ) + return encrypted_text + + +def decrypt_fractionated_morse(ciphertext: str, key: str) -> str: + """Decrypt a ciphertext message encrypted with Fractionated Morse Cipher. + + Args: + ciphertext: The ciphertext message to decrypt. + key: The decryption key. + + Returns: + The decrypted plaintext message. + + Example: + >>> decrypt_fractionated_morse("ESOAVVLJRSSTRX","Roundtable") + 'DEFEND THE EAST' + """ + key = key.upper() + string.ascii_uppercase + key = "".join(sorted(set(key), key=key.find)) + + inverse_fractionated_morse_dict = dict(zip(key, MORSE_COMBINATIONS)) + morse_code = "".join( + [inverse_fractionated_morse_dict.get(letter, "") for letter in ciphertext] + ) + decrypted_text = "".join( + [REVERSE_DICT[code] for code in morse_code.split("x")] + ).strip() + return decrypted_text + + +if __name__ == "__main__": + """ + Example usage of Fractionated Morse Cipher. + """ + plaintext = "defend the east" + print("Plain Text:", plaintext) + key = "ROUNDTABLE" + + ciphertext = encrypt_fractionated_morse(plaintext, key) + print("Encrypted:", ciphertext) + + decrypted_text = decrypt_fractionated_morse(ciphertext, key) + print("Decrypted:", decrypted_text) From 08d394126c9d46fc9d227a0dc1e343ad1fa70679 Mon Sep 17 00:00:00 2001 From: Kausthub Kannan Date: Sun, 8 Oct 2023 21:18:22 +0530 Subject: [PATCH 1061/1543] Changed Mish Activation Function to use Softplus (#10111) --- neural_network/activation_functions/mish.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/neural_network/activation_functions/mish.py b/neural_network/activation_functions/mish.py index e4f98307f2ba..e51655df8a3f 100644 --- a/neural_network/activation_functions/mish.py +++ b/neural_network/activation_functions/mish.py @@ -7,6 +7,7 @@ """ import numpy as np +from softplus import softplus def mish(vector: np.ndarray) -> np.ndarray: @@ -30,7 +31,7 @@ def mish(vector: np.ndarray) -> np.ndarray: array([-0.00092952, -0.15113318, 0.33152014, -0.04745745]) """ - return vector * np.tanh(np.log(1 + np.exp(vector))) + return vector * np.tanh(softplus(vector)) if __name__ == "__main__": From 6860daea60a512b202481bd5dd00d6534e162b77 Mon Sep 17 00:00:00 2001 From: Aarya Balwadkar <142713127+AaryaBalwadkar@users.noreply.github.com> Date: Sun, 8 Oct 2023 21:23:38 +0530 Subject: [PATCH 1062/1543] Made Changes shifted CRT, modular division to maths directory (#10084) --- {blockchain => maths}/chinese_remainder_theorem.py | 0 {blockchain => maths}/modular_division.py | 0 2 files changed, 0 insertions(+), 0 deletions(-) rename {blockchain => maths}/chinese_remainder_theorem.py (100%) rename {blockchain => maths}/modular_division.py (100%) diff --git a/blockchain/chinese_remainder_theorem.py b/maths/chinese_remainder_theorem.py similarity index 100% rename from blockchain/chinese_remainder_theorem.py rename to maths/chinese_remainder_theorem.py diff --git a/blockchain/modular_division.py b/maths/modular_division.py similarity index 100% rename from blockchain/modular_division.py rename to maths/modular_division.py From 81b29066d206217cb689fe2c9c8d530a1aa66cbe Mon Sep 17 00:00:00 2001 From: Arnav Kohli <95236897+THEGAMECHANGER416@users.noreply.github.com> Date: Sun, 8 Oct 2023 21:34:43 +0530 Subject: [PATCH 1063/1543] Created folder for losses in Machine_Learning (#9969) * Created folder for losses in Machine_Learning * Update binary_cross_entropy.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update mean_squared_error.py * Update binary_cross_entropy.py * Update mean_squared_error.py * Update binary_cross_entropy.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update mean_squared_error.py * Update binary_cross_entropy.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update mean_squared_error.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update binary_cross_entropy.py * Update mean_squared_error.py * Update binary_cross_entropy.py * Update mean_squared_error.py * Update machine_learning/losses/binary_cross_entropy.py Co-authored-by: Christian Clauss * Update machine_learning/losses/mean_squared_error.py Co-authored-by: Christian Clauss * Update machine_learning/losses/binary_cross_entropy.py Co-authored-by: Christian Clauss * Update mean_squared_error.py * Update machine_learning/losses/mean_squared_error.py Co-authored-by: Tianyi Zheng * Update binary_cross_entropy.py * Update mean_squared_error.py * Update binary_cross_entropy.py * Update mean_squared_error.py * Update mean_squared_error.py * Update binary_cross_entropy.py * renamed: losses -> loss_functions * updated 2 files * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update mean_squared_error.py * Update mean_squared_error.py * Update binary_cross_entropy.py * Update mean_squared_error.py --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss Co-authored-by: Tianyi Zheng --- .../loss_functions/binary_cross_entropy.py | 59 +++++++++++++++++++ .../loss_functions/mean_squared_error.py | 51 ++++++++++++++++ 2 files changed, 110 insertions(+) create mode 100644 machine_learning/loss_functions/binary_cross_entropy.py create mode 100644 machine_learning/loss_functions/mean_squared_error.py diff --git a/machine_learning/loss_functions/binary_cross_entropy.py b/machine_learning/loss_functions/binary_cross_entropy.py new file mode 100644 index 000000000000..4ebca7f21757 --- /dev/null +++ b/machine_learning/loss_functions/binary_cross_entropy.py @@ -0,0 +1,59 @@ +""" +Binary Cross-Entropy (BCE) Loss Function + +Description: +Quantifies dissimilarity between true labels (0 or 1) and predicted probabilities. +It's widely used in binary classification tasks. + +Formula: +BCE = -Σ(y_true * log(y_pred) + (1 - y_true) * log(1 - y_pred)) + +Source: +[Wikipedia - Cross entropy](https://en.wikipedia.org/wiki/Cross_entropy) +""" + +import numpy as np + + +def binary_cross_entropy( + y_true: np.ndarray, y_pred: np.ndarray, epsilon: float = 1e-15 +) -> float: + """ + Calculate the BCE Loss between true labels and predicted probabilities. + + Parameters: + - y_true: True binary labels (0 or 1). + - y_pred: Predicted probabilities for class 1. + - epsilon: Small constant to avoid numerical instability. + + Returns: + - bce_loss: Binary Cross-Entropy Loss. + + Example Usage: + >>> true_labels = np.array([0, 1, 1, 0, 1]) + >>> predicted_probs = np.array([0.2, 0.7, 0.9, 0.3, 0.8]) + >>> binary_cross_entropy(true_labels, predicted_probs) + 0.2529995012327421 + >>> true_labels = np.array([0, 1, 1, 0, 1]) + >>> predicted_probs = np.array([0.3, 0.8, 0.9, 0.2]) + >>> binary_cross_entropy(true_labels, predicted_probs) + Traceback (most recent call last): + ... + ValueError: Input arrays must have the same length. + """ + if len(y_true) != len(y_pred): + raise ValueError("Input arrays must have the same length.") + # Clip predicted probabilities to avoid log(0) and log(1) + y_pred = np.clip(y_pred, epsilon, 1 - epsilon) + + # Calculate binary cross-entropy loss + bce_loss = -(y_true * np.log(y_pred) + (1 - y_true) * np.log(1 - y_pred)) + + # Take the mean over all samples + return np.mean(bce_loss) + + +if __name__ == "__main__": + import doctest + + doctest.testmod() diff --git a/machine_learning/loss_functions/mean_squared_error.py b/machine_learning/loss_functions/mean_squared_error.py new file mode 100644 index 000000000000..d2b0e1e158ba --- /dev/null +++ b/machine_learning/loss_functions/mean_squared_error.py @@ -0,0 +1,51 @@ +""" +Mean Squared Error (MSE) Loss Function + +Description: +MSE measures the mean squared difference between true values and predicted values. +It serves as a measure of the model's accuracy in regression tasks. + +Formula: +MSE = (1/n) * Σ(y_true - y_pred)^2 + +Source: +[Wikipedia - Mean squared error](https://en.wikipedia.org/wiki/Mean_squared_error) +""" + +import numpy as np + + +def mean_squared_error(y_true: np.ndarray, y_pred: np.ndarray) -> float: + """ + Calculate the Mean Squared Error (MSE) between two arrays. + + Parameters: + - y_true: The true values (ground truth). + - y_pred: The predicted values. + + Returns: + - mse: The Mean Squared Error between y_true and y_pred. + + Example usage: + >>> true_values = np.array([1.0, 2.0, 3.0, 4.0, 5.0]) + >>> predicted_values = np.array([0.8, 2.1, 2.9, 4.2, 5.2]) + >>> mean_squared_error(true_values, predicted_values) + 0.028000000000000032 + >>> true_labels = np.array([1.0, 2.0, 3.0, 4.0, 5.0]) + >>> predicted_probs = np.array([0.3, 0.8, 0.9, 0.2]) + >>> mean_squared_error(true_labels, predicted_probs) + Traceback (most recent call last): + ... + ValueError: Input arrays must have the same length. + """ + if len(y_true) != len(y_pred): + raise ValueError("Input arrays must have the same length.") + + squared_errors = (y_true - y_pred) ** 2 + return np.mean(squared_errors) + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From a12b07f352d51af1cb86c14f865cf2b18aba3ea1 Mon Sep 17 00:00:00 2001 From: Kausthub Kannan Date: Sun, 8 Oct 2023 21:38:37 +0530 Subject: [PATCH 1064/1543] Added Squareplus Activation Function (#9977) * Added Squareplus Activation Function * Added parameter beta to the function * Fixed Squareplus Function * Update neural_network/activation_functions/squareplus.py --------- Co-authored-by: Tianyi Zheng --- .../activation_functions/squareplus.py | 38 +++++++++++++++++++ 1 file changed, 38 insertions(+) create mode 100644 neural_network/activation_functions/squareplus.py diff --git a/neural_network/activation_functions/squareplus.py b/neural_network/activation_functions/squareplus.py new file mode 100644 index 000000000000..40fa800d6b4a --- /dev/null +++ b/neural_network/activation_functions/squareplus.py @@ -0,0 +1,38 @@ +""" +Squareplus Activation Function + +Use Case: Squareplus designed to enhance positive values and suppress negative values. +For more detailed information, you can refer to the following link: +https://en.wikipedia.org/wiki/Rectifier_(neural_networks)#Squareplus +""" + +import numpy as np + + +def squareplus(vector: np.ndarray, beta: float) -> np.ndarray: + """ + Implements the SquarePlus activation function. + + Parameters: + vector (np.ndarray): The input array for the SquarePlus activation. + beta (float): size of the curved region + + Returns: + np.ndarray: The input array after applying the SquarePlus activation. + + Formula: f(x) = ( x + sqrt(x^2 + b) ) / 2 + + Examples: + >>> squareplus(np.array([2.3, 0.6, -2, -3.8]), beta=2) + array([2.5 , 1.06811457, 0.22474487, 0.12731349]) + + >>> squareplus(np.array([-9.2, -0.3, 0.45, -4.56]), beta=3) + array([0.0808119 , 0.72891979, 1.11977651, 0.15893419]) + """ + return (vector + np.sqrt(vector**2 + beta)) / 2 + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From e89ae55d8e157cb7c6c3f855188a0fde29083c35 Mon Sep 17 00:00:00 2001 From: Saurabh Mahapatra <98408932+its-100rabh@users.noreply.github.com> Date: Sun, 8 Oct 2023 21:40:41 +0530 Subject: [PATCH 1065/1543] Create strip.py (#10011) * Create strip.py * Update strip.py --- strings/strip.py | 33 +++++++++++++++++++++++++++++++++ 1 file changed, 33 insertions(+) create mode 100644 strings/strip.py diff --git a/strings/strip.py b/strings/strip.py new file mode 100644 index 000000000000..d4f901f0c7ea --- /dev/null +++ b/strings/strip.py @@ -0,0 +1,33 @@ +def strip(user_string: str, characters: str = " \t\n\r") -> str: + """ + Remove leading and trailing characters (whitespace by default) from a string. + + Args: + user_string (str): The input string to be stripped. + characters (str, optional): Optional characters to be removed + (default is whitespace). + + Returns: + str: The stripped string. + + Examples: + >>> strip(" hello ") + 'hello' + >>> strip("...world...", ".") + 'world' + >>> strip("123hello123", "123") + 'hello' + >>> strip("") + '' + """ + + start = 0 + end = len(user_string) + + while start < end and user_string[start] in characters: + start += 1 + + while end > start and user_string[end - 1] in characters: + end -= 1 + + return user_string[start:end] From 982bc2735872592d036c20389859071f36b13469 Mon Sep 17 00:00:00 2001 From: Kosuri L Indu <118645569+kosuri-indu@users.noreply.github.com> Date: Sun, 8 Oct 2023 22:37:02 +0530 Subject: [PATCH 1066/1543] add : Best time to buy and sell stock program under GREEDY methods (#10114) * to add best_time_stock program * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update best_time_to_buy_and_sell_stock.py --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- .../best_time_to_buy_and_sell_stock.py | 42 +++++++++++++++++++ 1 file changed, 42 insertions(+) create mode 100644 greedy_methods/best_time_to_buy_and_sell_stock.py diff --git a/greedy_methods/best_time_to_buy_and_sell_stock.py b/greedy_methods/best_time_to_buy_and_sell_stock.py new file mode 100644 index 000000000000..4aea19172ece --- /dev/null +++ b/greedy_methods/best_time_to_buy_and_sell_stock.py @@ -0,0 +1,42 @@ +""" +Given a list of stock prices calculate the maximum profit that can be made from a +single buy and sell of one share of stock. We only allowed to complete one buy +transaction and one sell transaction but must buy before we sell. + +Example : prices = [7, 1, 5, 3, 6, 4] +max_profit will return 5 - which is by buying at price 1 and selling at price 6. + +This problem can be solved using the concept of "GREEDY ALGORITHM". + +We iterate over the price array once, keeping track of the lowest price point +(buy) and the maximum profit we can get at each point. The greedy choice at each point +is to either buy at the current price if it's less than our current buying price, or +sell at the current price if the profit is more than our current maximum profit. +""" + + +def max_profit(prices: list[int]) -> int: + """ + >>> max_profit([7, 1, 5, 3, 6, 4]) + 5 + >>> max_profit([7, 6, 4, 3, 1]) + 0 + """ + if not prices: + return 0 + + min_price = prices[0] + max_profit: int = 0 + + for price in prices: + min_price = min(price, min_price) + max_profit = max(price - min_price, max_profit) + + return max_profit + + +if __name__ == "__main__": + import doctest + + doctest.testmod() + print(max_profit([7, 1, 5, 3, 6, 4])) From e7a59bfff5b182fb01530aa4b1a29b804efb1425 Mon Sep 17 00:00:00 2001 From: SubhranShu2332 <124662904+SubhranShu2332@users.noreply.github.com> Date: Mon, 9 Oct 2023 00:47:02 +0530 Subject: [PATCH 1067/1543] In place of calculating the factorial several times we can run a loop k times to calculate the combination (#10051) * In place of calculating the factorial several times we can run a loop k times to calculate the combination for example: 5 C 3 = 5! / (3! * (5-3)! ) = (5*4*3*2*1)/[(3*2*1)*(2*1)] =(5*4*3)/(3*2*1) so running a loop k times will reduce the time complexity to O(k) * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update maths/combinations.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Tianyi Zheng --- maths/combinations.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/maths/combinations.py b/maths/combinations.py index a2324012c01f..6e9e1a807067 100644 --- a/maths/combinations.py +++ b/maths/combinations.py @@ -1,7 +1,6 @@ """ https://en.wikipedia.org/wiki/Combination """ -from math import factorial def combinations(n: int, k: int) -> int: @@ -35,7 +34,11 @@ def combinations(n: int, k: int) -> int: # to calculate a factorial of a negative number, which is not possible if n < k or k < 0: raise ValueError("Please enter positive integers for n and k where n >= k") - return factorial(n) // (factorial(k) * factorial(n - k)) + res = 1 + for i in range(k): + res *= n - i + res //= i + 1 + return res if __name__ == "__main__": From c8f6f79f8038ef090a396725c80fa77d9186fb4b Mon Sep 17 00:00:00 2001 From: Siddharth Warrier <117698635+siddwarr@users.noreply.github.com> Date: Mon, 9 Oct 2023 01:10:14 +0530 Subject: [PATCH 1068/1543] Power of 4 (#9505) * added power_of_4 * updated power_of_4 * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * updated power_of_4 * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * updated power_of_4 * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * updated power_of_4 * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * updated power_of_4 * added type check * added tescase --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- bit_manipulation/power_of_4.py | 67 ++++++++++++++++++++++++++++++++++ 1 file changed, 67 insertions(+) create mode 100644 bit_manipulation/power_of_4.py diff --git a/bit_manipulation/power_of_4.py b/bit_manipulation/power_of_4.py new file mode 100644 index 000000000000..09e6e28621df --- /dev/null +++ b/bit_manipulation/power_of_4.py @@ -0,0 +1,67 @@ +""" + +Task: +Given a positive int number. Return True if this number is power of 4 +or False otherwise. + +Implementation notes: Use bit manipulation. +For example if the number is the power of 2 it's bits representation: +n = 0..100..00 +n - 1 = 0..011..11 + +n & (n - 1) - no intersections = 0 +If the number is a power of 4 then it should be a power of 2 +and the set bit should be at an odd position. +""" + + +def power_of_4(number: int) -> bool: + """ + Return True if this number is power of 4 or False otherwise. + + >>> power_of_4(0) + Traceback (most recent call last): + ... + ValueError: number must be positive + >>> power_of_4(1) + True + >>> power_of_4(2) + False + >>> power_of_4(4) + True + >>> power_of_4(6) + False + >>> power_of_4(8) + False + >>> power_of_4(17) + False + >>> power_of_4(64) + True + >>> power_of_4(-1) + Traceback (most recent call last): + ... + ValueError: number must be positive + >>> power_of_4(1.2) + Traceback (most recent call last): + ... + TypeError: number must be an integer + + """ + if not isinstance(number, int): + raise TypeError("number must be an integer") + if number <= 0: + raise ValueError("number must be positive") + if number & (number - 1) == 0: + c = 0 + while number: + c += 1 + number >>= 1 + return c % 2 == 1 + else: + return False + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From 17de908d1ad5a3eb7eb0c850e64394b62e4674c3 Mon Sep 17 00:00:00 2001 From: Achal Jain Date: Mon, 9 Oct 2023 01:11:30 +0530 Subject: [PATCH 1069/1543] Added Median of Medians Algorithm (#9864) * Added Median of Medians Algorithm * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update median_of_medians.py as per requested changes * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- searches/median_of_medians.py | 107 ++++++++++++++++++++++++++++++++++ 1 file changed, 107 insertions(+) create mode 100644 searches/median_of_medians.py diff --git a/searches/median_of_medians.py b/searches/median_of_medians.py new file mode 100644 index 000000000000..a8011a34af76 --- /dev/null +++ b/searches/median_of_medians.py @@ -0,0 +1,107 @@ +""" +A Python implementation of the Median of Medians algorithm +to select pivots for quick_select, which is efficient for +calculating the value that would appear in the index of a +list if it would be sorted, even if it is not already +sorted. Search in time complexity O(n) at any rank +deterministically +https://en.wikipedia.org/wiki/Median_of_medians +""" + + +def median_of_five(arr: list) -> int: + """ + Return the median of the input list + :param arr: Array to find median of + :return: median of arr + + >>> median_of_five([2, 4, 5, 7, 899]) + 5 + >>> median_of_five([5, 7, 899, 54, 32]) + 32 + >>> median_of_five([5, 4, 3, 2]) + 4 + >>> median_of_five([3, 5, 7, 10, 2]) + 5 + """ + arr = sorted(arr) + return arr[len(arr) // 2] + + +def median_of_medians(arr: list) -> int: + """ + Return a pivot to partition data on by calculating + Median of medians of input data + :param arr: The data to be checked (a list) + :return: median of medians of input array + + >>> median_of_medians([2, 4, 5, 7, 899, 54, 32]) + 54 + >>> median_of_medians([5, 7, 899, 54, 32]) + 32 + >>> median_of_medians([5, 4, 3, 2]) + 4 + >>> median_of_medians([3, 5, 7, 10, 2, 12]) + 12 + """ + + if len(arr) <= 5: + return median_of_five(arr) + medians = [] + i = 0 + while i < len(arr): + if (i + 4) <= len(arr): + medians.append(median_of_five(arr[i:].copy())) + else: + medians.append(median_of_five(arr[i : i + 5].copy())) + i += 5 + return median_of_medians(medians) + + +def quick_select(arr: list, target: int) -> int: + """ + Two way partition the data into smaller and greater lists, + in relationship to the pivot + :param arr: The data to be searched (a list) + :param target: The rank to be searched + :return: element at rank target + + >>> quick_select([2, 4, 5, 7, 899, 54, 32], 5) + 32 + >>> quick_select([2, 4, 5, 7, 899, 54, 32], 1) + 2 + >>> quick_select([5, 4, 3, 2], 2) + 3 + >>> quick_select([3, 5, 7, 10, 2, 12], 3) + 5 + """ + + # Invalid Input + if target > len(arr): + return -1 + + # x is the estimated pivot by median of medians algorithm + x = median_of_medians(arr) + left = [] + right = [] + check = False + for i in range(len(arr)): + if arr[i] < x: + left.append(arr[i]) + elif arr[i] > x: + right.append(arr[i]) + elif arr[i] == x and not check: + check = True + else: + right.append(arr[i]) + rank_x = len(left) + 1 + if rank_x == target: + answer = x + elif rank_x > target: + answer = quick_select(left, target) + elif rank_x < target: + answer = quick_select(right, target - rank_x) + return answer + + +print(median_of_five([5, 4, 3, 2])) From 8e108ed92ab9a50d5a3e6f647fa33238270e21d1 Mon Sep 17 00:00:00 2001 From: Tianyi Zheng Date: Sun, 8 Oct 2023 15:43:07 -0400 Subject: [PATCH 1070/1543] Rename maths/binary_exponentiation_3.py (#9656) * updating DIRECTORY.md * Rename binary_exponentiation_3.py Rename binary_exponentiation_3.py to binary_exponentiation_2.py because the original binary_exponentiation_2.py was renamed to binary_multiplication.py in PR #9513 * updating DIRECTORY.md * updating DIRECTORY.md * updating DIRECTORY.md * updating DIRECTORY.md --------- Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- DIRECTORY.md | 5 +++-- ...binary_exponentiation_3.py => binary_exponentiation_2.py} | 0 2 files changed, 3 insertions(+), 2 deletions(-) rename maths/{binary_exponentiation_3.py => binary_exponentiation_2.py} (100%) diff --git a/DIRECTORY.md b/DIRECTORY.md index a975b9264be0..55b270624438 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -556,7 +556,7 @@ * [Bell Numbers](maths/bell_numbers.py) * [Binary Exp Mod](maths/binary_exp_mod.py) * [Binary Exponentiation](maths/binary_exponentiation.py) - * [Binary Exponentiation 3](maths/binary_exponentiation_3.py) + * [Binary Exponentiation 2](maths/binary_exponentiation_2.py) * [Binary Multiplication](maths/binary_multiplication.py) * [Binomial Coefficient](maths/binomial_coefficient.py) * [Binomial Distribution](maths/binomial_distribution.py) @@ -588,7 +588,6 @@ * [Find Min](maths/find_min.py) * [Floor](maths/floor.py) * [Gamma](maths/gamma.py) - * [Gamma Recursive](maths/gamma_recursive.py) * [Gaussian](maths/gaussian.py) * [Gaussian Error Linear Unit](maths/gaussian_error_linear_unit.py) * [Gcd Of N Numbers](maths/gcd_of_n_numbers.py) @@ -723,6 +722,7 @@ * Activation Functions * [Exponential Linear Unit](neural_network/activation_functions/exponential_linear_unit.py) * [Leaky Rectified Linear Unit](neural_network/activation_functions/leaky_rectified_linear_unit.py) + * [Mish](neural_network/activation_functions/mish.py) * [Rectified Linear Unit](neural_network/activation_functions/rectified_linear_unit.py) * [Scaled Exponential Linear Unit](neural_network/activation_functions/scaled_exponential_linear_unit.py) * [Sigmoid Linear Unit](neural_network/activation_functions/sigmoid_linear_unit.py) @@ -748,6 +748,7 @@ * [Linear Congruential Generator](other/linear_congruential_generator.py) * [Lru Cache](other/lru_cache.py) * [Magicdiamondpattern](other/magicdiamondpattern.py) + * [Majority Vote Algorithm](other/majority_vote_algorithm.py) * [Maximum Subsequence](other/maximum_subsequence.py) * [Nested Brackets](other/nested_brackets.py) * [Number Container System](other/number_container_system.py) diff --git a/maths/binary_exponentiation_3.py b/maths/binary_exponentiation_2.py similarity index 100% rename from maths/binary_exponentiation_3.py rename to maths/binary_exponentiation_2.py From 2d02500332533bb314f91675a3c30ea05bd52b5a Mon Sep 17 00:00:00 2001 From: halfhearted <99018821+Arunsiva003@users.noreply.github.com> Date: Mon, 9 Oct 2023 01:14:49 +0530 Subject: [PATCH 1071/1543] equilibrium index in an array (#9856) * equilibrium index in an array * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * equilibrium index in an array * equilibrium index in an array * equilibrium index in an array removed type in docstring --------- Co-authored-by: ArunSiva Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .../arrays/equilibrium_index_in_array.py | 59 +++++++++++++++++++ 1 file changed, 59 insertions(+) create mode 100644 data_structures/arrays/equilibrium_index_in_array.py diff --git a/data_structures/arrays/equilibrium_index_in_array.py b/data_structures/arrays/equilibrium_index_in_array.py new file mode 100644 index 000000000000..4099896d226d --- /dev/null +++ b/data_structures/arrays/equilibrium_index_in_array.py @@ -0,0 +1,59 @@ +""" +Find the Equilibrium Index of an Array. +Reference: https://www.geeksforgeeks.org/equilibrium-index-of-an-array/ + +Python doctests can be run with the following command: +python -m doctest -v equilibrium_index.py + +Given a sequence arr[] of size n, this function returns +an equilibrium index (if any) or -1 if no equilibrium index exists. + +The equilibrium index of an array is an index such that the sum of +elements at lower indexes is equal to the sum of elements at higher indexes. + + + +Example Input: +arr = [-7, 1, 5, 2, -4, 3, 0] +Output: 3 + +""" + + +def equilibrium_index(arr: list[int], size: int) -> int: + """ + Find the equilibrium index of an array. + + Args: + arr : The input array of integers. + size : The size of the array. + + Returns: + int: The equilibrium index or -1 if no equilibrium index exists. + + Examples: + >>> equilibrium_index([-7, 1, 5, 2, -4, 3, 0], 7) + 3 + >>> equilibrium_index([1, 2, 3, 4, 5], 5) + -1 + >>> equilibrium_index([1, 1, 1, 1, 1], 5) + 2 + >>> equilibrium_index([2, 4, 6, 8, 10, 3], 6) + -1 + """ + total_sum = sum(arr) + left_sum = 0 + + for i in range(size): + total_sum -= arr[i] + if left_sum == total_sum: + return i + left_sum += arr[i] + + return -1 + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From 66e4ea6a621cccabd6116f1543432899a4411daa Mon Sep 17 00:00:00 2001 From: Anshu Sharma <142900182+AnshuSharma111@users.noreply.github.com> Date: Mon, 9 Oct 2023 01:47:22 +0530 Subject: [PATCH 1072/1543] Consolidated two scripts reverse_letters.py and reverse_long_words.py into one (#10140) * Conolidated two scripts reverse_letters.py and reverse_long_words.py into one because of similar functionality * Added a new line to accomodate characters without going over 88 char limit * fixed grammar to pass pre-commit * Changed faulty test case entirely to pass pre commit * fixed a test case which was wrong --------- Co-authored-by: Keyboard-1 <142900182+Keyboard-1@users.noreply.github.com> --- DIRECTORY.md | 1 - strings/reverse_letters.py | 27 ++++++++++++++++----------- strings/reverse_long_words.py | 21 --------------------- 3 files changed, 16 insertions(+), 33 deletions(-) delete mode 100644 strings/reverse_long_words.py diff --git a/DIRECTORY.md b/DIRECTORY.md index 55b270624438..b1a23a239b01 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -1197,7 +1197,6 @@ * [Rabin Karp](strings/rabin_karp.py) * [Remove Duplicate](strings/remove_duplicate.py) * [Reverse Letters](strings/reverse_letters.py) - * [Reverse Long Words](strings/reverse_long_words.py) * [Reverse Words](strings/reverse_words.py) * [Snake Case To Camel Pascal Case](strings/snake_case_to_camel_pascal_case.py) * [Split](strings/split.py) diff --git a/strings/reverse_letters.py b/strings/reverse_letters.py index 10b8a6d72a0f..4f73f816b382 100644 --- a/strings/reverse_letters.py +++ b/strings/reverse_letters.py @@ -1,19 +1,24 @@ -def reverse_letters(input_str: str) -> str: +def reverse_letters(sentence: str, length: int = 0) -> str: """ - Reverses letters in a given string without adjusting the position of the words - >>> reverse_letters('The cat in the hat') - 'ehT tac ni eht tah' - >>> reverse_letters('The quick brown fox jumped over the lazy dog.') - 'ehT kciuq nworb xof depmuj revo eht yzal .god' - >>> reverse_letters('Is this true?') - 'sI siht ?eurt' - >>> reverse_letters("I love Python") - 'I evol nohtyP' + Reverse all words that are longer than the given length of characters in a sentence. + If unspecified, length is taken as 0 + + >>> reverse_letters("Hey wollef sroirraw", 3) + 'Hey fellow warriors' + >>> reverse_letters("nohtyP is nohtyP", 2) + 'Python is Python' + >>> reverse_letters("1 12 123 1234 54321 654321", 0) + '1 21 321 4321 12345 123456' + >>> reverse_letters("racecar") + 'racecar' """ - return " ".join([word[::-1] for word in input_str.split()]) + return " ".join( + "".join(word[::-1]) if len(word) > length else word for word in sentence.split() + ) if __name__ == "__main__": import doctest doctest.testmod() + print(reverse_letters("Hey wollef sroirraw")) diff --git a/strings/reverse_long_words.py b/strings/reverse_long_words.py deleted file mode 100644 index 39ef11513f40..000000000000 --- a/strings/reverse_long_words.py +++ /dev/null @@ -1,21 +0,0 @@ -def reverse_long_words(sentence: str) -> str: - """ - Reverse all words that are longer than 4 characters in a sentence. - - >>> reverse_long_words("Hey wollef sroirraw") - 'Hey fellow warriors' - >>> reverse_long_words("nohtyP is nohtyP") - 'Python is Python' - >>> reverse_long_words("1 12 123 1234 54321 654321") - '1 12 123 1234 12345 123456' - """ - return " ".join( - "".join(word[::-1]) if len(word) > 4 else word for word in sentence.split() - ) - - -if __name__ == "__main__": - import doctest - - doctest.testmod() - print(reverse_long_words("Hey wollef sroirraw")) From a2b695dabd6e9c6dd82784bd534c2e7570939be2 Mon Sep 17 00:00:00 2001 From: Megan Payne Date: Sun, 8 Oct 2023 23:33:50 +0200 Subject: [PATCH 1073/1543] Added Germain primes algorithm to the maths folder (#10120) * Added algorithm for Germain Primes to maths folder * Fixed test errors Germain primes. * Formatting Germain primes after pre-commit * Fixed path to maths * Update maths/germain_primes.py Co-authored-by: Tianyi Zheng * Update maths/germain_primes.py Co-authored-by: Tianyi Zheng * Added function for safe primes * Update maths/germain_primes.py Co-authored-by: Tianyi Zheng * Apply suggestions from code review --------- Co-authored-by: Megan Payne Co-authored-by: Tianyi Zheng --- maths/germain_primes.py | 72 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 72 insertions(+) create mode 100644 maths/germain_primes.py diff --git a/maths/germain_primes.py b/maths/germain_primes.py new file mode 100644 index 000000000000..078d1967f41a --- /dev/null +++ b/maths/germain_primes.py @@ -0,0 +1,72 @@ +""" +A Sophie Germain prime is any prime p, where 2p + 1 is also prime. +The second number, 2p + 1 is called a safe prime. + +Examples of Germain primes include: 2, 3, 5, 11, 23 + +Their corresponding safe primes: 5, 7, 11, 23, 47 +https://en.wikipedia.org/wiki/Safe_and_Sophie_Germain_primes +""" + +from maths.prime_check import is_prime + + +def is_germain_prime(number: int) -> bool: + """Checks if input number and 2*number + 1 are prime. + + >>> is_germain_prime(3) + True + >>> is_germain_prime(11) + True + >>> is_germain_prime(4) + False + >>> is_germain_prime(23) + True + >>> is_germain_prime(13) + False + >>> is_germain_prime(20) + False + >>> is_germain_prime('abc') + Traceback (most recent call last): + ... + TypeError: Input value must be a positive integer. Input value: abc + """ + if not isinstance(number, int) or number < 1: + msg = f"Input value must be a positive integer. Input value: {number}" + raise TypeError(msg) + + return is_prime(number) and is_prime(2 * number + 1) + + +def is_safe_prime(number: int) -> bool: + """Checks if input number and (number - 1)/2 are prime. + The smallest safe prime is 5, with the Germain prime is 2. + + >>> is_safe_prime(5) + True + >>> is_safe_prime(11) + True + >>> is_safe_prime(1) + False + >>> is_safe_prime(2) + False + >>> is_safe_prime(3) + False + >>> is_safe_prime(47) + True + >>> is_safe_prime('abc') + Traceback (most recent call last): + ... + TypeError: Input value must be a positive integer. Input value: abc + """ + if not isinstance(number, int) or number < 1: + msg = f"Input value must be a positive integer. Input value: {number}" + raise TypeError(msg) + + return (number - 1) % 2 == 0 and is_prime(number) and is_prime((number - 1) // 2) + + +if __name__ == "__main__": + from doctest import testmod + + testmod() From 2260961a803ebd037f471ef18fa5032a547d42da Mon Sep 17 00:00:00 2001 From: Saahil Mahato <115351000+saahil-mahato@users.noreply.github.com> Date: Mon, 9 Oct 2023 05:04:28 +0545 Subject: [PATCH 1074/1543] Add Soboleva Modified Hyberbolic Tangent function (#10043) * Add Sobovela Modified Hyberbolic Tangent function * fix: typo * Update and rename sobovela_modified_hyperbolic_tangent.py to soboleva_modified_hyperbolic_tangent.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fix: typo * Apply suggestions from code review --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Tianyi Zheng --- .../soboleva_modified_hyperbolic_tangent.py | 49 +++++++++++++++++++ 1 file changed, 49 insertions(+) create mode 100644 neural_network/activation_functions/soboleva_modified_hyperbolic_tangent.py diff --git a/neural_network/activation_functions/soboleva_modified_hyperbolic_tangent.py b/neural_network/activation_functions/soboleva_modified_hyperbolic_tangent.py new file mode 100644 index 000000000000..603ac0b7e120 --- /dev/null +++ b/neural_network/activation_functions/soboleva_modified_hyperbolic_tangent.py @@ -0,0 +1,49 @@ +""" +This script implements the Soboleva Modified Hyperbolic Tangent function. + +The function applies the Soboleva Modified Hyperbolic Tangent function +to each element of the vector. + +More details about the activation function can be found on: +https://en.wikipedia.org/wiki/Soboleva_modified_hyperbolic_tangent +""" + + +import numpy as np + + +def soboleva_modified_hyperbolic_tangent( + vector: np.ndarray, a_value: float, b_value: float, c_value: float, d_value: float +) -> np.ndarray: + """ + Implements the Soboleva Modified Hyperbolic Tangent function + + Parameters: + vector (ndarray): A vector that consists of numeric values + a_value (float): parameter a of the equation + b_value (float): parameter b of the equation + c_value (float): parameter c of the equation + d_value (float): parameter d of the equation + + Returns: + vector (ndarray): Input array after applying SMHT function + + >>> vector = np.array([5.4, -2.4, 6.3, -5.23, 3.27, 0.56]) + >>> soboleva_modified_hyperbolic_tangent(vector, 0.2, 0.4, 0.6, 0.8) + array([ 0.11075085, -0.28236685, 0.07861169, -0.1180085 , 0.22999056, + 0.1566043 ]) + """ + + # Separate the numerator and denominator for simplicity + # Calculate the numerator and denominator element-wise + numerator = np.exp(a_value * vector) - np.exp(-b_value * vector) + denominator = np.exp(c_value * vector) + np.exp(-d_value * vector) + + # Calculate and return the final result element-wise + return numerator / denominator + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From ed19b1cf0c3d8284027e17cc025d65b3f924acc0 Mon Sep 17 00:00:00 2001 From: Saahil Mahato <115351000+saahil-mahato@users.noreply.github.com> Date: Mon, 9 Oct 2023 05:19:50 +0545 Subject: [PATCH 1075/1543] Add binary step activation function (#10030) * Add binary step activation function * fix: ruff line too long error * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * refactor: add link to directory * revert: add link to directory * fix: algorithm bug and docs * Update neural_network/activation_functions/binary_step.py * fix: ruff line too long error --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Tianyi Zheng --- .../activation_functions/binary_step.py | 36 +++++++++++++++++++ 1 file changed, 36 insertions(+) create mode 100644 neural_network/activation_functions/binary_step.py diff --git a/neural_network/activation_functions/binary_step.py b/neural_network/activation_functions/binary_step.py new file mode 100644 index 000000000000..8f8f4d405fd2 --- /dev/null +++ b/neural_network/activation_functions/binary_step.py @@ -0,0 +1,36 @@ +""" +This script demonstrates the implementation of the Binary Step function. + +It's an activation function in which the neuron is activated if the input is positive +or 0, else it is deactivated + +It's a simple activation function which is mentioned in this wikipedia article: +https://en.wikipedia.org/wiki/Activation_function +""" + + +import numpy as np + + +def binary_step(vector: np.ndarray) -> np.ndarray: + """ + Implements the binary step function + + Parameters: + vector (ndarray): A vector that consists of numeric values + + Returns: + vector (ndarray): Input vector after applying binary step function + + >>> vector = np.array([-1.2, 0, 2, 1.45, -3.7, 0.3]) + >>> binary_step(vector) + array([0, 1, 1, 1, 0, 1]) + """ + + return np.where(vector >= 0, 1, 0) + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From 12e8e9ca876ed3ae7f1effa1de407ca29a06cb36 Mon Sep 17 00:00:00 2001 From: Sai Harsha Kottapalli Date: Mon, 9 Oct 2023 17:36:16 +0530 Subject: [PATCH 1076/1543] Add DocTests to is_palindrome.py (#10081) * add doctest ut * test complete * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * format * ruff update * cover line 154 * Update data_structures/linked_list/is_palindrome.py Co-authored-by: Christian Clauss * use dataclass * pre-commit fix * Fix mypy errors * use future annotations --------- Co-authored-by: Harsha Kottapalli Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- data_structures/linked_list/is_palindrome.py | 176 +++++++++++++++---- 1 file changed, 142 insertions(+), 34 deletions(-) diff --git a/data_structures/linked_list/is_palindrome.py b/data_structures/linked_list/is_palindrome.py index d540fb69f36b..7d89f085c67f 100644 --- a/data_structures/linked_list/is_palindrome.py +++ b/data_structures/linked_list/is_palindrome.py @@ -1,65 +1,167 @@ -def is_palindrome(head): +from __future__ import annotations + +from dataclasses import dataclass + + +@dataclass +class ListNode: + val: int = 0 + next_node: ListNode | None = None + + +def is_palindrome(head: ListNode | None) -> bool: + """ + Check if a linked list is a palindrome. + + Args: + head: The head of the linked list. + + Returns: + bool: True if the linked list is a palindrome, False otherwise. + + Examples: + >>> is_palindrome(None) + True + + >>> is_palindrome(ListNode(1)) + True + + >>> is_palindrome(ListNode(1, ListNode(2))) + False + + >>> is_palindrome(ListNode(1, ListNode(2, ListNode(1)))) + True + + >>> is_palindrome(ListNode(1, ListNode(2, ListNode(2, ListNode(1))))) + True + """ if not head: return True # split the list to two parts - fast, slow = head.next, head - while fast and fast.next: - fast = fast.next.next - slow = slow.next - second = slow.next - slow.next = None # Don't forget here! But forget still works! + fast: ListNode | None = head.next_node + slow: ListNode | None = head + while fast and fast.next_node: + fast = fast.next_node.next_node + slow = slow.next_node if slow else None + if slow: + # slow will always be defined, + # adding this check to resolve mypy static check + second = slow.next_node + slow.next_node = None # Don't forget here! But forget still works! # reverse the second part - node = None + node: ListNode | None = None while second: - nxt = second.next - second.next = node + nxt = second.next_node + second.next_node = node node = second second = nxt # compare two parts # second part has the same or one less node - while node: + while node and head: if node.val != head.val: return False - node = node.next - head = head.next + node = node.next_node + head = head.next_node return True -def is_palindrome_stack(head): - if not head or not head.next: +def is_palindrome_stack(head: ListNode | None) -> bool: + """ + Check if a linked list is a palindrome using a stack. + + Args: + head (ListNode): The head of the linked list. + + Returns: + bool: True if the linked list is a palindrome, False otherwise. + + Examples: + >>> is_palindrome_stack(None) + True + + >>> is_palindrome_stack(ListNode(1)) + True + + >>> is_palindrome_stack(ListNode(1, ListNode(2))) + False + + >>> is_palindrome_stack(ListNode(1, ListNode(2, ListNode(1)))) + True + + >>> is_palindrome_stack(ListNode(1, ListNode(2, ListNode(2, ListNode(1))))) + True + """ + if not head or not head.next_node: return True # 1. Get the midpoint (slow) - slow = fast = cur = head - while fast and fast.next: - fast, slow = fast.next.next, slow.next - - # 2. Push the second half into the stack - stack = [slow.val] - while slow.next: - slow = slow.next - stack.append(slow.val) - - # 3. Comparison - while stack: - if stack.pop() != cur.val: - return False - cur = cur.next + slow: ListNode | None = head + fast: ListNode | None = head + while fast and fast.next_node: + fast = fast.next_node.next_node + slow = slow.next_node if slow else None + + # slow will always be defined, + # adding this check to resolve mypy static check + if slow: + stack = [slow.val] + + # 2. Push the second half into the stack + while slow.next_node: + slow = slow.next_node + stack.append(slow.val) + + # 3. Comparison + cur: ListNode | None = head + while stack and cur: + if stack.pop() != cur.val: + return False + cur = cur.next_node return True -def is_palindrome_dict(head): - if not head or not head.next: +def is_palindrome_dict(head: ListNode | None) -> bool: + """ + Check if a linked list is a palindrome using a dictionary. + + Args: + head (ListNode): The head of the linked list. + + Returns: + bool: True if the linked list is a palindrome, False otherwise. + + Examples: + >>> is_palindrome_dict(None) + True + + >>> is_palindrome_dict(ListNode(1)) + True + + >>> is_palindrome_dict(ListNode(1, ListNode(2))) + False + + >>> is_palindrome_dict(ListNode(1, ListNode(2, ListNode(1)))) + True + + >>> is_palindrome_dict(ListNode(1, ListNode(2, ListNode(2, ListNode(1))))) + True + + >>> is_palindrome_dict(\ + ListNode(\ + 1, ListNode(2, ListNode(1, ListNode(3, ListNode(2, ListNode(1))))))) + False + """ + if not head or not head.next_node: return True - d = {} + d: dict[int, list[int]] = {} pos = 0 while head: if head.val in d: d[head.val].append(pos) else: d[head.val] = [pos] - head = head.next + head = head.next_node pos += 1 checksum = pos - 1 middle = 0 @@ -75,3 +177,9 @@ def is_palindrome_dict(head): if middle > 1: return False return True + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From 876087be998d5b366d68cbb9394b6b92b7f619f6 Mon Sep 17 00:00:00 2001 From: Sai Harsha Kottapalli Date: Mon, 9 Oct 2023 17:46:43 +0530 Subject: [PATCH 1077/1543] Add DocTests to magicdiamondpattern.py (#10135) * magicdiamondpattern doctest * remove start part --------- Co-authored-by: Harsha Kottapalli --- other/magicdiamondpattern.py | 76 ++++++++++++++++++++++++------------ 1 file changed, 50 insertions(+), 26 deletions(-) diff --git a/other/magicdiamondpattern.py b/other/magicdiamondpattern.py index 89b973bb41e8..58889280ab17 100644 --- a/other/magicdiamondpattern.py +++ b/other/magicdiamondpattern.py @@ -4,52 +4,76 @@ # Function to print upper half of diamond (pyramid) def floyd(n): """ - Parameters: - n : size of pattern + Print the upper half of a diamond pattern with '*' characters. + + Args: + n (int): Size of the pattern. + + Examples: + >>> floyd(3) + ' * \\n * * \\n* * * \\n' + + >>> floyd(5) + ' * \\n * * \\n * * * \\n * * * * \\n* * * * * \\n' """ + result = "" for i in range(n): for _ in range(n - i - 1): # printing spaces - print(" ", end="") + result += " " for _ in range(i + 1): # printing stars - print("* ", end="") - print() + result += "* " + result += "\n" + return result # Function to print lower half of diamond (pyramid) def reverse_floyd(n): """ - Parameters: - n : size of pattern + Print the lower half of a diamond pattern with '*' characters. + + Args: + n (int): Size of the pattern. + + Examples: + >>> reverse_floyd(3) + '* * * \\n * * \\n * \\n ' + + >>> reverse_floyd(5) + '* * * * * \\n * * * * \\n * * * \\n * * \\n * \\n ' """ + result = "" for i in range(n, 0, -1): for _ in range(i, 0, -1): # printing stars - print("* ", end="") - print() + result += "* " + result += "\n" for _ in range(n - i + 1, 0, -1): # printing spaces - print(" ", end="") + result += " " + return result # Function to print complete diamond pattern of "*" def pretty_print(n): """ - Parameters: - n : size of pattern + Print a complete diamond pattern with '*' characters. + + Args: + n (int): Size of the pattern. + + Examples: + >>> pretty_print(0) + ' ... .... nothing printing :(' + + >>> pretty_print(3) + ' * \\n * * \\n* * * \\n* * * \\n * * \\n * \\n ' """ if n <= 0: - print(" ... .... nothing printing :(") - return - floyd(n) # upper half - reverse_floyd(n) # lower half + return " ... .... nothing printing :(" + upper_half = floyd(n) # upper half + lower_half = reverse_floyd(n) # lower half + return upper_half + lower_half if __name__ == "__main__": - print(r"| /\ | |- | |- |--| |\ /| |-") - print(r"|/ \| |- |_ |_ |__| | \/ | |_") - K = 1 - while K: - user_number = int(input("enter the number and , and see the magic : ")) - print() - pretty_print(user_number) - K = int(input("press 0 to exit... and 1 to continue...")) - - print("Good Bye...") + import doctest + + doctest.testmod() From 583a614fefaa9c932e6d650abfea2eaa75a93b05 Mon Sep 17 00:00:00 2001 From: Siddik Patel <70135775+Siddikpatel@users.noreply.github.com> Date: Mon, 9 Oct 2023 17:49:12 +0530 Subject: [PATCH 1078/1543] Removed redundant greatest_common_divisor code (#9358) * Deleted greatest_common_divisor def from many files and instead imported the method from Maths folder * Deleted greatest_common_divisor def from many files and instead imported the method from Maths folder, also fixed comments * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Deleted greatest_common_divisor def from many files and instead imported the method from Maths folder, also fixed comments * Imports organized * recursive gcd function implementation rolledback * more gcd duplicates removed * more gcd duplicates removed * Update maths/carmichael_number.py * updated files * moved a file to another location --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Tianyi Zheng --- blockchain/diophantine_equation.py | 32 +++--------------------- ciphers/affine_cipher.py | 6 +++-- ciphers/cryptomath_module.py | 7 ++---- ciphers/hill_cipher.py | 14 +---------- ciphers/rsa_key_generator.py | 4 ++- maths/carmichael_number.py | 11 ++------ maths/least_common_multiple.py | 22 ++-------------- maths/primelib.py | 40 +++--------------------------- project_euler/problem_005/sol2.py | 19 ++------------ 9 files changed, 24 insertions(+), 131 deletions(-) diff --git a/blockchain/diophantine_equation.py b/blockchain/diophantine_equation.py index 22b0cad75c63..7110d90230c9 100644 --- a/blockchain/diophantine_equation.py +++ b/blockchain/diophantine_equation.py @@ -1,11 +1,13 @@ from __future__ import annotations +from maths.greatest_common_divisor import greatest_common_divisor + def diophantine(a: int, b: int, c: int) -> tuple[float, float]: """ Diophantine Equation : Given integers a,b,c ( at least one of a and b != 0), the diophantine equation a*x + b*y = c has a solution (where x and y are integers) - iff gcd(a,b) divides c. + iff greatest_common_divisor(a,b) divides c. GCD ( Greatest Common Divisor ) or HCF ( Highest Common Factor ) @@ -22,7 +24,7 @@ def diophantine(a: int, b: int, c: int) -> tuple[float, float]: assert ( c % greatest_common_divisor(a, b) == 0 - ) # greatest_common_divisor(a,b) function implemented below + ) # greatest_common_divisor(a,b) is in maths directory (d, x, y) = extended_gcd(a, b) # extended_gcd(a,b) function implemented below r = c / d return (r * x, r * y) @@ -69,32 +71,6 @@ def diophantine_all_soln(a: int, b: int, c: int, n: int = 2) -> None: print(x, y) -def greatest_common_divisor(a: int, b: int) -> int: - """ - Euclid's Lemma : d divides a and b, if and only if d divides a-b and b - - Euclid's Algorithm - - >>> greatest_common_divisor(7,5) - 1 - - Note : In number theory, two integers a and b are said to be relatively prime, - mutually prime, or co-prime if the only positive integer (factor) that - divides both of them is 1 i.e., gcd(a,b) = 1. - - >>> greatest_common_divisor(121, 11) - 11 - - """ - if a < b: - a, b = b, a - - while a % b != 0: - a, b = b, a % b - - return b - - def extended_gcd(a: int, b: int) -> tuple[int, int, int]: """ Extended Euclid's Algorithm : If d divides a and b and d = a*x + b*y for integers diff --git a/ciphers/affine_cipher.py b/ciphers/affine_cipher.py index cd1e33b88425..10d16367cced 100644 --- a/ciphers/affine_cipher.py +++ b/ciphers/affine_cipher.py @@ -1,6 +1,8 @@ import random import sys +from maths.greatest_common_divisor import gcd_by_iterative + from . import cryptomath_module as cryptomath SYMBOLS = ( @@ -26,7 +28,7 @@ def check_keys(key_a: int, key_b: int, mode: str) -> None: "Key A must be greater than 0 and key B must " f"be between 0 and {len(SYMBOLS) - 1}." ) - if cryptomath.gcd(key_a, len(SYMBOLS)) != 1: + if gcd_by_iterative(key_a, len(SYMBOLS)) != 1: sys.exit( f"Key A {key_a} and the symbol set size {len(SYMBOLS)} " "are not relatively prime. Choose a different key." @@ -76,7 +78,7 @@ def get_random_key() -> int: while True: key_b = random.randint(2, len(SYMBOLS)) key_b = random.randint(2, len(SYMBOLS)) - if cryptomath.gcd(key_b, len(SYMBOLS)) == 1 and key_b % len(SYMBOLS) != 0: + if gcd_by_iterative(key_b, len(SYMBOLS)) == 1 and key_b % len(SYMBOLS) != 0: return key_b * len(SYMBOLS) + key_b diff --git a/ciphers/cryptomath_module.py b/ciphers/cryptomath_module.py index 6f15f7b733e6..02e94e4b9e92 100644 --- a/ciphers/cryptomath_module.py +++ b/ciphers/cryptomath_module.py @@ -1,11 +1,8 @@ -def gcd(a: int, b: int) -> int: - while a != 0: - a, b = b % a, a - return b +from maths.greatest_common_divisor import gcd_by_iterative def find_mod_inverse(a: int, m: int) -> int: - if gcd(a, m) != 1: + if gcd_by_iterative(a, m) != 1: msg = f"mod inverse of {a!r} and {m!r} does not exist" raise ValueError(msg) u1, u2, u3 = 1, 0, a diff --git a/ciphers/hill_cipher.py b/ciphers/hill_cipher.py index b4424e82298e..1201fda901e5 100644 --- a/ciphers/hill_cipher.py +++ b/ciphers/hill_cipher.py @@ -39,19 +39,7 @@ import numpy - -def greatest_common_divisor(a: int, b: int) -> int: - """ - >>> greatest_common_divisor(4, 8) - 4 - >>> greatest_common_divisor(8, 4) - 4 - >>> greatest_common_divisor(4, 7) - 1 - >>> greatest_common_divisor(0, 10) - 10 - """ - return b if a == 0 else greatest_common_divisor(b % a, a) +from maths.greatest_common_divisor import greatest_common_divisor class HillCipher: diff --git a/ciphers/rsa_key_generator.py b/ciphers/rsa_key_generator.py index eedc7336804a..44970e8cbc15 100644 --- a/ciphers/rsa_key_generator.py +++ b/ciphers/rsa_key_generator.py @@ -2,6 +2,8 @@ import random import sys +from maths.greatest_common_divisor import gcd_by_iterative + from . import cryptomath_module, rabin_miller @@ -27,7 +29,7 @@ def generate_key(key_size: int) -> tuple[tuple[int, int], tuple[int, int]]: # Generate e that is relatively prime to (p - 1) * (q - 1) while True: e = random.randrange(2 ** (key_size - 1), 2 ** (key_size)) - if cryptomath_module.gcd(e, (p - 1) * (q - 1)) == 1: + if gcd_by_iterative(e, (p - 1) * (q - 1)) == 1: break # Calculate d that is mod inverse of e diff --git a/maths/carmichael_number.py b/maths/carmichael_number.py index c9c144759246..81712520ffc7 100644 --- a/maths/carmichael_number.py +++ b/maths/carmichael_number.py @@ -10,14 +10,7 @@ Examples of Carmichael Numbers: 561, 1105, ... https://en.wikipedia.org/wiki/Carmichael_number """ - - -def gcd(a: int, b: int) -> int: - if a < b: - return gcd(b, a) - if a % b == 0: - return b - return gcd(b, a % b) +from maths.greatest_common_divisor import greatest_common_divisor def power(x: int, y: int, mod: int) -> int: @@ -33,7 +26,7 @@ def power(x: int, y: int, mod: int) -> int: def is_carmichael_number(n: int) -> bool: b = 2 while b < n: - if gcd(b, n) == 1 and power(b, n - 1, n) != 1: + if greatest_common_divisor(b, n) == 1 and power(b, n - 1, n) != 1: return False b += 1 return True diff --git a/maths/least_common_multiple.py b/maths/least_common_multiple.py index 10cc63ac7990..4f28da8ab2a7 100644 --- a/maths/least_common_multiple.py +++ b/maths/least_common_multiple.py @@ -1,6 +1,8 @@ import unittest from timeit import timeit +from maths.greatest_common_divisor import greatest_common_divisor + def least_common_multiple_slow(first_num: int, second_num: int) -> int: """ @@ -20,26 +22,6 @@ def least_common_multiple_slow(first_num: int, second_num: int) -> int: return common_mult -def greatest_common_divisor(a: int, b: int) -> int: - """ - Calculate Greatest Common Divisor (GCD). - see greatest_common_divisor.py - >>> greatest_common_divisor(24, 40) - 8 - >>> greatest_common_divisor(1, 1) - 1 - >>> greatest_common_divisor(1, 800) - 1 - >>> greatest_common_divisor(11, 37) - 1 - >>> greatest_common_divisor(3, 5) - 1 - >>> greatest_common_divisor(16, 4) - 4 - """ - return b if a == 0 else greatest_common_divisor(b % a, a) - - def least_common_multiple_fast(first_num: int, second_num: int) -> int: """ Find the least common multiple of two numbers. diff --git a/maths/primelib.py b/maths/primelib.py index 28b5aee9dcc8..cf01750cf912 100644 --- a/maths/primelib.py +++ b/maths/primelib.py @@ -21,7 +21,6 @@ is_even(number) is_odd(number) -gcd(number1, number2) // greatest common divisor kg_v(number1, number2) // least common multiple get_divisors(number) // all divisors of 'number' inclusive 1, number is_perfect_number(number) @@ -40,6 +39,8 @@ from math import sqrt +from maths.greatest_common_divisor import gcd_by_iterative + def is_prime(number: int) -> bool: """ @@ -317,39 +318,6 @@ def goldbach(number): # ---------------------------------------------- -def gcd(number1, number2): - """ - Greatest common divisor - input: two positive integer 'number1' and 'number2' - returns the greatest common divisor of 'number1' and 'number2' - """ - - # precondition - assert ( - isinstance(number1, int) - and isinstance(number2, int) - and (number1 >= 0) - and (number2 >= 0) - ), "'number1' and 'number2' must been positive integer." - - rest = 0 - - while number2 != 0: - rest = number1 % number2 - number1 = number2 - number2 = rest - - # precondition - assert isinstance(number1, int) and ( - number1 >= 0 - ), "'number' must been from type int and positive" - - return number1 - - -# ---------------------------------------------------- - - def kg_v(number1, number2): """ Least common multiple @@ -567,14 +535,14 @@ def simplify_fraction(numerator, denominator): ), "The arguments must been from type int and 'denominator' != 0" # build the greatest common divisor of numerator and denominator. - gcd_of_fraction = gcd(abs(numerator), abs(denominator)) + gcd_of_fraction = gcd_by_iterative(abs(numerator), abs(denominator)) # precondition assert ( isinstance(gcd_of_fraction, int) and (numerator % gcd_of_fraction == 0) and (denominator % gcd_of_fraction == 0) - ), "Error in function gcd(...,...)" + ), "Error in function gcd_by_iterative(...,...)" return (numerator // gcd_of_fraction, denominator // gcd_of_fraction) diff --git a/project_euler/problem_005/sol2.py b/project_euler/problem_005/sol2.py index 1b3e5e130f03..4558e21fd0f0 100644 --- a/project_euler/problem_005/sol2.py +++ b/project_euler/problem_005/sol2.py @@ -1,3 +1,5 @@ +from maths.greatest_common_divisor import greatest_common_divisor + """ Project Euler Problem 5: https://projecteuler.net/problem=5 @@ -16,23 +18,6 @@ """ -def greatest_common_divisor(x: int, y: int) -> int: - """ - Euclidean Greatest Common Divisor algorithm - - >>> greatest_common_divisor(0, 0) - 0 - >>> greatest_common_divisor(23, 42) - 1 - >>> greatest_common_divisor(15, 33) - 3 - >>> greatest_common_divisor(12345, 67890) - 15 - """ - - return x if y == 0 else greatest_common_divisor(y, x % y) - - def lcm(x: int, y: int) -> int: """ Least Common Multiple. From 53d78b9cc09021c8f65fae41f8b345304a88aedd Mon Sep 17 00:00:00 2001 From: Kausthub Kannan Date: Mon, 9 Oct 2023 20:03:47 +0530 Subject: [PATCH 1079/1543] Added Huber Loss Function (#10141) --- machine_learning/loss_functions/huber_loss.py | 52 +++++++++++++++++++ 1 file changed, 52 insertions(+) create mode 100644 machine_learning/loss_functions/huber_loss.py diff --git a/machine_learning/loss_functions/huber_loss.py b/machine_learning/loss_functions/huber_loss.py new file mode 100644 index 000000000000..202e013f2928 --- /dev/null +++ b/machine_learning/loss_functions/huber_loss.py @@ -0,0 +1,52 @@ +""" +Huber Loss Function + +Description: +Huber loss function describes the penalty incurred by an estimation procedure. +It serves as a measure of the model's accuracy in regression tasks. + +Formula: +Huber Loss = if |y_true - y_pred| <= delta then 0.5 * (y_true - y_pred)^2 + else delta * |y_true - y_pred| - 0.5 * delta^2 + +Source: +[Wikipedia - Huber Loss](https://en.wikipedia.org/wiki/Huber_loss) +""" + +import numpy as np + + +def huber_loss(y_true: np.ndarray, y_pred: np.ndarray, delta: float) -> float: + """ + Calculate the mean of Huber Loss. + + Parameters: + - y_true: The true values (ground truth). + - y_pred: The predicted values. + + Returns: + - huber_loss: The mean of Huber Loss between y_true and y_pred. + + Example usage: + >>> true_values = np.array([0.9, 10.0, 2.0, 1.0, 5.2]) + >>> predicted_values = np.array([0.8, 2.1, 2.9, 4.2, 5.2]) + >>> np.isclose(huber_loss(true_values, predicted_values, 1.0), 2.102) + True + >>> true_labels = np.array([11.0, 21.0, 3.32, 4.0, 5.0]) + >>> predicted_probs = np.array([8.3, 20.8, 2.9, 11.2, 5.0]) + >>> np.isclose(huber_loss(true_labels, predicted_probs, 1.0), 1.80164) + True + """ + + if len(y_true) != len(y_pred): + raise ValueError("Input arrays must have the same length.") + + huber_mse = 0.5 * (y_true - y_pred) ** 2 + huber_mae = delta * (np.abs(y_true - y_pred) - 0.5 * delta) + return np.where(np.abs(y_true - y_pred) <= delta, huber_mse, huber_mae).mean() + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From c0da015b7d49f9f6e99fffd279f65c5605a0ebe1 Mon Sep 17 00:00:00 2001 From: Sai Harsha Kottapalli Date: Mon, 9 Oct 2023 20:49:05 +0530 Subject: [PATCH 1080/1543] Add DocTests to diffie.py (#10156) * diffie doctest * fix ut * update doctest --------- Co-authored-by: Harsha Kottapalli --- ciphers/diffie.py | 45 +++++++++++++++++++++++++++++++++------------ 1 file changed, 33 insertions(+), 12 deletions(-) diff --git a/ciphers/diffie.py b/ciphers/diffie.py index 4ff90be009c1..1e1e868999b6 100644 --- a/ciphers/diffie.py +++ b/ciphers/diffie.py @@ -1,11 +1,28 @@ from __future__ import annotations -def find_primitive(n: int) -> int | None: - for r in range(1, n): +def find_primitive(modulus: int) -> int | None: + """ + Find a primitive root modulo modulus, if one exists. + + Args: + modulus : The modulus for which to find a primitive root. + + Returns: + The primitive root if one exists, or None if there is none. + + Examples: + >>> find_primitive(7) # Modulo 7 has primitive root 3 + 3 + >>> find_primitive(11) # Modulo 11 has primitive root 2 + 2 + >>> find_primitive(8) == None # Modulo 8 has no primitive root + True + """ + for r in range(1, modulus): li = [] - for x in range(n - 1): - val = pow(r, x, n) + for x in range(modulus - 1): + val = pow(r, x, modulus) if val in li: break li.append(val) @@ -15,18 +32,22 @@ def find_primitive(n: int) -> int | None: if __name__ == "__main__": - q = int(input("Enter a prime number q: ")) - a = find_primitive(q) - if a is None: - print(f"Cannot find the primitive for the value: {a!r}") + import doctest + + doctest.testmod() + + prime = int(input("Enter a prime number q: ")) + primitive_root = find_primitive(prime) + if primitive_root is None: + print(f"Cannot find the primitive for the value: {primitive_root!r}") else: a_private = int(input("Enter private key of A: ")) - a_public = pow(a, a_private, q) + a_public = pow(primitive_root, a_private, prime) b_private = int(input("Enter private key of B: ")) - b_public = pow(a, b_private, q) + b_public = pow(primitive_root, b_private, prime) - a_secret = pow(b_public, a_private, q) - b_secret = pow(a_public, b_private, q) + a_secret = pow(b_public, a_private, prime) + b_secret = pow(a_public, b_private, prime) print("The key value generated by A is: ", a_secret) print("The key value generated by B is: ", b_secret) From ba828fe621d1f5623fffcf0014b243da3a6122fc Mon Sep 17 00:00:00 2001 From: Kamil <32775019+quant12345@users.noreply.github.com> Date: Mon, 9 Oct 2023 20:46:38 +0500 Subject: [PATCH 1081/1543] test_digital_image_processing -> test_local_binary_pattern replacing a large image with a smaller one (#10161) * Replacing the generator with numpy vector operations from lu_decomposition. * Revert "Replacing the generator with numpy vector operations from lu_decomposition." This reverts commit ad217c66165898d62b76cc89ba09c2d7049b6448. * Replaced lena.jpg with lena_small.jpg to make tests faster. * Update digital_image_processing/test_digital_image_processing.py Co-authored-by: Christian Clauss * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update test_digital_image_processing.py tests fail, I'll try an empty commit * Apply suggestions from code review * Update test_digital_image_processing.py added clarifications * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update test_digital_image_processing.py --------- Co-authored-by: Christian Clauss Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Tianyi Zheng --- .../test_digital_image_processing.py | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/digital_image_processing/test_digital_image_processing.py b/digital_image_processing/test_digital_image_processing.py index 528b4bc3b74c..2e5630458c8e 100644 --- a/digital_image_processing/test_digital_image_processing.py +++ b/digital_image_processing/test_digital_image_processing.py @@ -96,9 +96,16 @@ def test_nearest_neighbour( def test_local_binary_pattern(): - file_path = "digital_image_processing/image_data/lena.jpg" + # pull request 10161 before: + # "digital_image_processing/image_data/lena.jpg" + # after: "digital_image_processing/image_data/lena_small.jpg" - # Reading the image and converting it to grayscale. + from os import getenv # Speed up our Continuous Integration tests + + file_name = "lena_small.jpg" if getenv("CI") else "lena.jpg" + file_path = f"digital_image_processing/image_data/{file_name}" + + # Reading the image and converting it to grayscale image = imread(file_path, 0) # Test for get_neighbors_pixel function() return not None From 844270c6e91387940e062a1522f58bde1026bb08 Mon Sep 17 00:00:00 2001 From: Christian Clauss Date: Mon, 9 Oct 2023 19:42:07 +0200 Subject: [PATCH 1082/1543] Remove backslashes from is_palindrome.py (#10169) @SaiHarshaK Fixes https://github.com/TheAlgorithms/Python/pull/10081#discussion_r1349651289 --- data_structures/linked_list/is_palindrome.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/data_structures/linked_list/is_palindrome.py b/data_structures/linked_list/is_palindrome.py index 7d89f085c67f..f949d9a2f201 100644 --- a/data_structures/linked_list/is_palindrome.py +++ b/data_structures/linked_list/is_palindrome.py @@ -147,9 +147,11 @@ def is_palindrome_dict(head: ListNode | None) -> bool: >>> is_palindrome_dict(ListNode(1, ListNode(2, ListNode(2, ListNode(1))))) True - >>> is_palindrome_dict(\ - ListNode(\ - 1, ListNode(2, ListNode(1, ListNode(3, ListNode(2, ListNode(1))))))) + >>> is_palindrome_dict( + ... ListNode( + ... 1, ListNode(2, ListNode(1, ListNode(3, ListNode(2, ListNode(1))))) + ... ) + ... ) False """ if not head or not head.next_node: From 5d0a46814e5b69f79d623187912c0f81ab5ab7a7 Mon Sep 17 00:00:00 2001 From: aryan1165 <111041731+aryan1165@users.noreply.github.com> Date: Tue, 10 Oct 2023 01:08:04 +0530 Subject: [PATCH 1083/1543] Added ciphers/permutation_cipher.py. (#9163) * Added permutation_cipher.py * Added type hints for parameters * Added doctest in functions * Update ciphers/permutation_cipher.py Ya i felt the same but held back because there is a implementation of transposition_cipher.py. But that's is different from the one i have implemented here. Co-authored-by: Tianyi Zheng * Update ciphers/permutation_cipher.py Co-authored-by: Tianyi Zheng * Update ciphers/permutation_cipher.py Co-authored-by: Tianyi Zheng * Update ciphers/permutation_cipher.py Co-authored-by: Tianyi Zheng * Update ciphers/permutation_cipher.py Co-authored-by: Tianyi Zheng * Update ciphers/permutation_cipher.py Co-authored-by: Tianyi Zheng * Update ciphers/permutation_cipher.py Co-authored-by: Tianyi Zheng * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Changes --------- Co-authored-by: Tianyi Zheng Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- ciphers/permutation_cipher.py | 142 ++++++++++++++++++++++++++++++++++ 1 file changed, 142 insertions(+) create mode 100644 ciphers/permutation_cipher.py diff --git a/ciphers/permutation_cipher.py b/ciphers/permutation_cipher.py new file mode 100644 index 000000000000..c3f3fd1f7f94 --- /dev/null +++ b/ciphers/permutation_cipher.py @@ -0,0 +1,142 @@ +""" +The permutation cipher, also called the transposition cipher, is a simple encryption +technique that rearranges the characters in a message based on a secret key. It +divides the message into blocks and applies a permutation to the characters within +each block according to the key. The key is a sequence of unique integers that +determine the order of character rearrangement. + +For more info: https://www.nku.edu/~christensen/1402%20permutation%20ciphers.pdf +""" +import random + + +def generate_valid_block_size(message_length: int) -> int: + """ + Generate a valid block size that is a factor of the message length. + + Args: + message_length (int): The length of the message. + + Returns: + int: A valid block size. + + Example: + >>> random.seed(1) + >>> generate_valid_block_size(12) + 3 + """ + block_sizes = [ + block_size + for block_size in range(2, message_length + 1) + if message_length % block_size == 0 + ] + return random.choice(block_sizes) + + +def generate_permutation_key(block_size: int) -> list[int]: + """ + Generate a random permutation key of a specified block size. + + Args: + block_size (int): The size of each permutation block. + + Returns: + list[int]: A list containing a random permutation of digits. + + Example: + >>> random.seed(0) + >>> generate_permutation_key(4) + [2, 0, 1, 3] + """ + digits = list(range(block_size)) + random.shuffle(digits) + return digits + + +def encrypt( + message: str, key: list[int] | None = None, block_size: int | None = None +) -> tuple[str, list[int]]: + """ + Encrypt a message using a permutation cipher with block rearrangement using a key. + + Args: + message (str): The plaintext message to be encrypted. + key (list[int]): The permutation key for decryption. + block_size (int): The size of each permutation block. + + Returns: + tuple: A tuple containing the encrypted message and the encryption key. + + Example: + >>> encrypted_message, key = encrypt("HELLO WORLD") + >>> decrypted_message = decrypt(encrypted_message, key) + >>> decrypted_message + 'HELLO WORLD' + """ + message = message.upper() + message_length = len(message) + + if key is None or block_size is None: + block_size = generate_valid_block_size(message_length) + key = generate_permutation_key(block_size) + + encrypted_message = "" + + for i in range(0, message_length, block_size): + block = message[i : i + block_size] + rearranged_block = [block[digit] for digit in key] + encrypted_message += "".join(rearranged_block) + + return encrypted_message, key + + +def decrypt(encrypted_message: str, key: list[int]) -> str: + """ + Decrypt an encrypted message using a permutation cipher with block rearrangement. + + Args: + encrypted_message (str): The encrypted message. + key (list[int]): The permutation key for decryption. + + Returns: + str: The decrypted plaintext message. + + Example: + >>> encrypted_message, key = encrypt("HELLO WORLD") + >>> decrypted_message = decrypt(encrypted_message, key) + >>> decrypted_message + 'HELLO WORLD' + """ + key_length = len(key) + decrypted_message = "" + + for i in range(0, len(encrypted_message), key_length): + block = encrypted_message[i : i + key_length] + original_block = [""] * key_length + for j, digit in enumerate(key): + original_block[digit] = block[j] + decrypted_message += "".join(original_block) + + return decrypted_message + + +def main() -> None: + """ + Driver function to pass message to get encrypted, then decrypted. + + Example: + >>> main() + Decrypted message: HELLO WORLD + """ + message = "HELLO WORLD" + encrypted_message, key = encrypt(message) + + decrypted_message = decrypt(encrypted_message, key) + print(f"Decrypted message: {decrypted_message}") + + +if __name__ == "__main__": + import doctest + + doctest.testmod() + main() From b0aa35c7b360f1d141705b97c89d51603a3461a6 Mon Sep 17 00:00:00 2001 From: Dean Bring Date: Mon, 9 Oct 2023 14:21:46 -0700 Subject: [PATCH 1084/1543] Added the Chebyshev distance function (#10144) * Added the Chebyshev distance function * Remove float cast and made error handling more precise --- maths/chebyshev_distance.py | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) create mode 100644 maths/chebyshev_distance.py diff --git a/maths/chebyshev_distance.py b/maths/chebyshev_distance.py new file mode 100644 index 000000000000..4801d391621f --- /dev/null +++ b/maths/chebyshev_distance.py @@ -0,0 +1,20 @@ +def chebyshev_distance(point_a: list[float], point_b: list[float]) -> float: + """ + This function calculates the Chebyshev distance (also known as the + Chessboard distance) between two n-dimensional points represented as lists. + + https://en.wikipedia.org/wiki/Chebyshev_distance + + >>> chebyshev_distance([1.0, 1.0], [2.0, 2.0]) + 1.0 + >>> chebyshev_distance([1.0, 1.0, 9.0], [2.0, 2.0, -5.2]) + 14.2 + >>> chebyshev_distance([1.0], [2.0, 2.0]) + Traceback (most recent call last): + ... + ValueError: Both points must have the same dimension. + """ + if len(point_a) != len(point_b): + raise ValueError("Both points must have the same dimension.") + + return max(abs(a - b) for a, b in zip(point_a, point_b)) From 53638fcec4ff990ced9afb569c18b927df652596 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 9 Oct 2023 23:38:32 +0200 Subject: [PATCH 1085/1543] [pre-commit.ci] pre-commit autoupdate (#10197) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - [github.com/pre-commit/pre-commit-hooks: v4.4.0 → v4.5.0](https://github.com/pre-commit/pre-commit-hooks/compare/v4.4.0...v4.5.0) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 8a88dcc07622..7340a0fd08ee 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,6 +1,6 @@ repos: - repo: https://github.com/pre-commit/pre-commit-hooks - rev: v4.4.0 + rev: v4.5.0 hooks: - id: check-executables-have-shebangs - id: check-toml From b9a797f3d4f1a66da1e213bd92e08fa9cf6c3643 Mon Sep 17 00:00:00 2001 From: Dean Bring Date: Mon, 9 Oct 2023 16:00:37 -0700 Subject: [PATCH 1086/1543] Added the Minkowski distance function (#10143) * Added the Minkowski distance function * Made error handling more precise * Added note about floating point errors and corresponding doctest --- maths/minkowski_distance.py | 45 +++++++++++++++++++++++++++++++++++++ 1 file changed, 45 insertions(+) create mode 100644 maths/minkowski_distance.py diff --git a/maths/minkowski_distance.py b/maths/minkowski_distance.py new file mode 100644 index 000000000000..3237124e8d36 --- /dev/null +++ b/maths/minkowski_distance.py @@ -0,0 +1,45 @@ +def minkowski_distance( + point_a: list[float], + point_b: list[float], + order: int, +) -> float: + """ + This function calculates the Minkowski distance for a given order between + two n-dimensional points represented as lists. For the case of order = 1, + the Minkowski distance degenerates to the Manhattan distance. For + order = 2, the usual Euclidean distance is obtained. + + https://en.wikipedia.org/wiki/Minkowski_distance + + Note: due to floating point calculation errors the output of this + function may be inaccurate. + + >>> minkowski_distance([1.0, 1.0], [2.0, 2.0], 1) + 2.0 + >>> minkowski_distance([1.0, 2.0, 3.0, 4.0], [5.0, 6.0, 7.0, 8.0], 2) + 8.0 + >>> import numpy as np + >>> np.isclose(5.0, minkowski_distance([5.0], [0.0], 3)) + True + >>> minkowski_distance([1.0], [2.0], -1) + Traceback (most recent call last): + ... + ValueError: The order must be greater than or equal to 1. + >>> minkowski_distance([1.0], [1.0, 2.0], 1) + Traceback (most recent call last): + ... + ValueError: Both points must have the same dimension. + """ + if order < 1: + raise ValueError("The order must be greater than or equal to 1.") + + if len(point_a) != len(point_b): + raise ValueError("Both points must have the same dimension.") + + return sum(abs(a - b) ** order for a, b in zip(point_a, point_b)) ** (1 / order) + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From 7b996e2c221aa88b5688ea08f2bb3a391b5be2c6 Mon Sep 17 00:00:00 2001 From: Kamil <32775019+quant12345@users.noreply.github.com> Date: Tue, 10 Oct 2023 09:16:02 +0500 Subject: [PATCH 1087/1543] backtracking -> word_search - replacing the example in doctest (#10188) * Replacing the generator with numpy vector operations from lu_decomposition. * Revert "Replacing the generator with numpy vector operations from lu_decomposition." This reverts commit ad217c66165898d62b76cc89ba09c2d7049b6448. * Replacing the example in doctest with a less resource-intensive example. --- backtracking/word_search.py | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) diff --git a/backtracking/word_search.py b/backtracking/word_search.py index c9d52012b42b..8a9b2f1b5359 100644 --- a/backtracking/word_search.py +++ b/backtracking/word_search.py @@ -98,13 +98,7 @@ def word_exists(board: list[list[str]], word: str) -> bool: False >>> word_exists([["A"]], "A") True - >>> word_exists([["A","A","A","A","A","A"], - ... ["A","A","A","A","A","A"], - ... ["A","A","A","A","A","A"], - ... ["A","A","A","A","A","A"], - ... ["A","A","A","A","A","B"], - ... ["A","A","A","A","B","A"]], - ... "AAAAAAAAAAAAABB") + >>> word_exists([["B", "A", "A"], ["A", "A", "A"], ["A", "B", "A"]], "ABB") False >>> word_exists([["A"]], 123) Traceback (most recent call last): From 4f8fa3c44a29cafaed64a73588a309e88d1f3ded Mon Sep 17 00:00:00 2001 From: Md Mahiuddin <68785084+mahiuddin-dev@users.noreply.github.com> Date: Tue, 10 Oct 2023 10:19:40 +0600 Subject: [PATCH 1088/1543] TypeError for non-integer input (#9250) * type error check * remove str input * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- maths/number_of_digits.py | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/maths/number_of_digits.py b/maths/number_of_digits.py index 86bc67f72490..bb9c0d248fd1 100644 --- a/maths/number_of_digits.py +++ b/maths/number_of_digits.py @@ -16,7 +16,15 @@ def num_digits(n: int) -> int: 1 >>> num_digits(-123456) 6 + >>> num_digits('123') # Raises a TypeError for non-integer input + Traceback (most recent call last): + ... + TypeError: Input must be an integer """ + + if not isinstance(n, int): + raise TypeError("Input must be an integer") + digits = 0 n = abs(n) while True: @@ -42,7 +50,15 @@ def num_digits_fast(n: int) -> int: 1 >>> num_digits_fast(-123456) 6 + >>> num_digits('123') # Raises a TypeError for non-integer input + Traceback (most recent call last): + ... + TypeError: Input must be an integer """ + + if not isinstance(n, int): + raise TypeError("Input must be an integer") + return 1 if n == 0 else math.floor(math.log(abs(n), 10) + 1) @@ -61,7 +77,15 @@ def num_digits_faster(n: int) -> int: 1 >>> num_digits_faster(-123456) 6 + >>> num_digits('123') # Raises a TypeError for non-integer input + Traceback (most recent call last): + ... + TypeError: Input must be an integer """ + + if not isinstance(n, int): + raise TypeError("Input must be an integer") + return len(str(abs(n))) From 1b4c4e7db216305e059cc087c3f09bc6d3e17575 Mon Sep 17 00:00:00 2001 From: dimonalik <114773527+dimonalik@users.noreply.github.com> Date: Tue, 10 Oct 2023 07:34:36 +0300 Subject: [PATCH 1089/1543] Made problem explanation more clear (#9841) * Update minimum_steps_to_one.py Made the problem explanation more clear and readable * updating DIRECTORY.md * Apply suggestions from code review --------- Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> Co-authored-by: Tianyi Zheng --- dynamic_programming/minimum_steps_to_one.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dynamic_programming/minimum_steps_to_one.py b/dynamic_programming/minimum_steps_to_one.py index 8785027fbff3..68eaf56e21a7 100644 --- a/dynamic_programming/minimum_steps_to_one.py +++ b/dynamic_programming/minimum_steps_to_one.py @@ -1,7 +1,7 @@ """ YouTube Explanation: https://www.youtube.com/watch?v=f2xi3c1S95M -Given an integer n, return the minimum steps to 1 +Given an integer n, return the minimum steps from n to 1 AVAILABLE STEPS: * Decrement by 1 From 9c02f1220e571f2922855e245c5a92d4f2220f8a Mon Sep 17 00:00:00 2001 From: AkhilYadavPadala <142014008+AkhilYadavPadala@users.noreply.github.com> Date: Tue, 10 Oct 2023 10:43:32 +0530 Subject: [PATCH 1090/1543] seperation between description and docstrings (#9687) * seperation between description and docstrings * Update maths/factorial.py --------- Co-authored-by: sarayu sree Co-authored-by: Tianyi Zheng --- maths/factorial.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/maths/factorial.py b/maths/factorial.py index 18cacdef9b1f..aaf90f384bb9 100644 --- a/maths/factorial.py +++ b/maths/factorial.py @@ -1,4 +1,5 @@ -"""Factorial of a positive integer -- https://en.wikipedia.org/wiki/Factorial +""" +Factorial of a positive integer -- https://en.wikipedia.org/wiki/Factorial """ From f3acb52cadade9e7d012bf7f50cad32669b67b75 Mon Sep 17 00:00:00 2001 From: Paarth Goyal <138299656+pluto-tofu@users.noreply.github.com> Date: Tue, 10 Oct 2023 10:54:04 +0530 Subject: [PATCH 1091/1543] Added the algorithm to compute Reynolds number in the physics section (#9913) * added the algorithm to compute Reynolds number * fixed file name issue * Apply suggestions from code review --------- Co-authored-by: Tianyi Zheng --- physics/reynolds_number.py | 63 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 63 insertions(+) create mode 100644 physics/reynolds_number.py diff --git a/physics/reynolds_number.py b/physics/reynolds_number.py new file mode 100644 index 000000000000..dffe690f8822 --- /dev/null +++ b/physics/reynolds_number.py @@ -0,0 +1,63 @@ +""" +Title : computing the Reynolds number to find + out the type of flow (laminar or turbulent) + +Reynolds number is a dimensionless quantity that is used to determine +the type of flow pattern as laminar or turbulent while flowing through a +pipe. Reynolds number is defined by the ratio of inertial forces to that of +viscous forces. + +R = Inertial Forces / Viscous Forces +R = (ρ * V * D)/μ + +where : +ρ = Density of fluid (in Kg/m^3) +D = Diameter of pipe through which fluid flows (in m) +V = Velocity of flow of the fluid (in m/s) +μ = Viscosity of the fluid (in Ns/m^2) + +If the Reynolds number calculated is high (greater than 2000), then the +flow through the pipe is said to be turbulent. If Reynolds number is low +(less than 2000), the flow is said to be laminar. Numerically, these are +acceptable values, although in general the laminar and turbulent flows +are classified according to a range. Laminar flow falls below Reynolds +number of 1100 and turbulent falls in a range greater than 2200. +Laminar flow is the type of flow in which the fluid travels smoothly in +regular paths. Conversely, turbulent flow isn't smooth and follows an +irregular path with lots of mixing. + +Reference : https://byjus.com/physics/reynolds-number/ +""" + + +def reynolds_number( + density: float, velocity: float, diameter: float, viscosity: float +) -> float: + """ + >>> reynolds_number(900, 2.5, 0.05, 0.4) + 281.25 + >>> reynolds_number(450, 3.86, 0.078, 0.23) + 589.0695652173912 + >>> reynolds_number(234, -4.5, 0.3, 0.44) + 717.9545454545454 + >>> reynolds_number(-90, 2, 0.045, 1) + Traceback (most recent call last): + ... + ValueError: please ensure that density, diameter and viscosity are positive + >>> reynolds_number(0, 2, -0.4, -2) + Traceback (most recent call last): + ... + ValueError: please ensure that density, diameter and viscosity are positive + """ + + if density <= 0 or diameter <= 0 or viscosity <= 0: + raise ValueError( + "please ensure that density, diameter and viscosity are positive" + ) + return (density * abs(velocity) * diameter) / viscosity + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From 6d136036672072a2c4870da7741d4ad3026a7357 Mon Sep 17 00:00:00 2001 From: Vipin Karthic <143083087+vipinkarthic@users.noreply.github.com> Date: Tue, 10 Oct 2023 11:22:37 +0530 Subject: [PATCH 1092/1543] Fixes #9943 Added Doctests to binary_exponentiation_3.py (#10121) * Python mirror_formulae.py is added to the repository * Changes done after reading readme.md * Changes for running doctest on all platforms * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Change 2 for Doctests * Changes for doctest 2 * updating DIRECTORY.md * Doctest whitespace error rectification to mirror_formulae.py * updating DIRECTORY.md * Adding Thermodynamic Work Done Formulae * Work done on/by body in a thermodynamic setting * updating DIRECTORY.md * updating DIRECTORY.md * Doctest adiition to binary_exponentiation_3.py * Change 1 * updating DIRECTORY.md * Rename binary_exponentiation_3.py to binary_exponentiation_2.py * updating DIRECTORY.md * updating DIRECTORY.md * Formatting --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> Co-authored-by: Christian Clauss Co-authored-by: Tianyi Zheng --- DIRECTORY.md | 26 ++++++++++++-- maths/binary_exponentiation_2.py | 59 +++++++++++++++++++------------- 2 files changed, 59 insertions(+), 26 deletions(-) diff --git a/DIRECTORY.md b/DIRECTORY.md index b1a23a239b01..015efb3c796d 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -54,13 +54,12 @@ * [Largest Pow Of Two Le Num](bit_manipulation/largest_pow_of_two_le_num.py) * [Missing Number](bit_manipulation/missing_number.py) * [Numbers Different Signs](bit_manipulation/numbers_different_signs.py) + * [Power Of 4](bit_manipulation/power_of_4.py) * [Reverse Bits](bit_manipulation/reverse_bits.py) * [Single Bit Manipulation Operations](bit_manipulation/single_bit_manipulation_operations.py) ## Blockchain - * [Chinese Remainder Theorem](blockchain/chinese_remainder_theorem.py) * [Diophantine Equation](blockchain/diophantine_equation.py) - * [Modular Division](blockchain/modular_division.py) ## Boolean Algebra * [And Gate](boolean_algebra/and_gate.py) @@ -101,11 +100,13 @@ * [Diffie Hellman](ciphers/diffie_hellman.py) * [Elgamal Key Generator](ciphers/elgamal_key_generator.py) * [Enigma Machine2](ciphers/enigma_machine2.py) + * [Fractionated Morse Cipher](ciphers/fractionated_morse_cipher.py) * [Hill Cipher](ciphers/hill_cipher.py) * [Mixed Keyword Cypher](ciphers/mixed_keyword_cypher.py) * [Mono Alphabetic Ciphers](ciphers/mono_alphabetic_ciphers.py) * [Morse Code](ciphers/morse_code.py) * [Onepad Cipher](ciphers/onepad_cipher.py) + * [Permutation Cipher](ciphers/permutation_cipher.py) * [Playfair Cipher](ciphers/playfair_cipher.py) * [Polybius](ciphers/polybius.py) * [Porta Cipher](ciphers/porta_cipher.py) @@ -172,6 +173,7 @@ ## Data Structures * Arrays + * [Equilibrium Index In Array](data_structures/arrays/equilibrium_index_in_array.py) * [Median Two Array](data_structures/arrays/median_two_array.py) * [Permutations](data_structures/arrays/permutations.py) * [Prefix Sum](data_structures/arrays/prefix_sum.py) @@ -352,6 +354,7 @@ * [Smith Waterman](dynamic_programming/smith_waterman.py) * [Subset Generation](dynamic_programming/subset_generation.py) * [Sum Of Subset](dynamic_programming/sum_of_subset.py) + * [Trapped Water](dynamic_programming/trapped_water.py) * [Tribonacci](dynamic_programming/tribonacci.py) * [Viterbi](dynamic_programming/viterbi.py) * [Word Break](dynamic_programming/word_break.py) @@ -360,6 +363,7 @@ * [Apparent Power](electronics/apparent_power.py) * [Builtin Voltage](electronics/builtin_voltage.py) * [Carrier Concentration](electronics/carrier_concentration.py) + * [Charging Capacitor](electronics/charging_capacitor.py) * [Circular Convolution](electronics/circular_convolution.py) * [Coulombs Law](electronics/coulombs_law.py) * [Electric Conductivity](electronics/electric_conductivity.py) @@ -466,6 +470,8 @@ * [Test Min Spanning Tree Prim](graphs/tests/test_min_spanning_tree_prim.py) ## Greedy Methods + * [Best Time To Buy And Sell Stock](greedy_methods/best_time_to_buy_and_sell_stock.py) + * [Fractional Cover Problem](greedy_methods/fractional_cover_problem.py) * [Fractional Knapsack](greedy_methods/fractional_knapsack.py) * [Fractional Knapsack 2](greedy_methods/fractional_knapsack_2.py) * [Gas Station](greedy_methods/gas_station.py) @@ -524,6 +530,10 @@ * Local Weighted Learning * [Local Weighted Learning](machine_learning/local_weighted_learning/local_weighted_learning.py) * [Logistic Regression](machine_learning/logistic_regression.py) + * Loss Functions + * [Binary Cross Entropy](machine_learning/loss_functions/binary_cross_entropy.py) + * [Huber Loss](machine_learning/loss_functions/huber_loss.py) + * [Mean Squared Error](machine_learning/loss_functions/mean_squared_error.py) * [Mfcc](machine_learning/mfcc.py) * [Multilayer Perceptron Classifier](machine_learning/multilayer_perceptron_classifier.py) * [Polynomial Regression](machine_learning/polynomial_regression.py) @@ -564,7 +574,9 @@ * [Carmichael Number](maths/carmichael_number.py) * [Catalan Number](maths/catalan_number.py) * [Ceil](maths/ceil.py) + * [Chebyshev Distance](maths/chebyshev_distance.py) * [Check Polygon](maths/check_polygon.py) + * [Chinese Remainder Theorem](maths/chinese_remainder_theorem.py) * [Chudnovsky Algorithm](maths/chudnovsky_algorithm.py) * [Collatz Sequence](maths/collatz_sequence.py) * [Combinations](maths/combinations.py) @@ -591,6 +603,7 @@ * [Gaussian](maths/gaussian.py) * [Gaussian Error Linear Unit](maths/gaussian_error_linear_unit.py) * [Gcd Of N Numbers](maths/gcd_of_n_numbers.py) + * [Germain Primes](maths/germain_primes.py) * [Greatest Common Divisor](maths/greatest_common_divisor.py) * [Greedy Coin Change](maths/greedy_coin_change.py) * [Hamming Numbers](maths/hamming_numbers.py) @@ -618,7 +631,9 @@ * [Matrix Exponentiation](maths/matrix_exponentiation.py) * [Max Sum Sliding Window](maths/max_sum_sliding_window.py) * [Median Of Two Arrays](maths/median_of_two_arrays.py) + * [Minkowski Distance](maths/minkowski_distance.py) * [Mobius Function](maths/mobius_function.py) + * [Modular Division](maths/modular_division.py) * [Modular Exponential](maths/modular_exponential.py) * [Monte Carlo](maths/monte_carlo.py) * [Monte Carlo Dice](maths/monte_carlo_dice.py) @@ -720,12 +735,16 @@ ## Neural Network * [2 Hidden Layers Neural Network](neural_network/2_hidden_layers_neural_network.py) * Activation Functions + * [Binary Step](neural_network/activation_functions/binary_step.py) * [Exponential Linear Unit](neural_network/activation_functions/exponential_linear_unit.py) * [Leaky Rectified Linear Unit](neural_network/activation_functions/leaky_rectified_linear_unit.py) * [Mish](neural_network/activation_functions/mish.py) * [Rectified Linear Unit](neural_network/activation_functions/rectified_linear_unit.py) * [Scaled Exponential Linear Unit](neural_network/activation_functions/scaled_exponential_linear_unit.py) * [Sigmoid Linear Unit](neural_network/activation_functions/sigmoid_linear_unit.py) + * [Soboleva Modified Hyperbolic Tangent](neural_network/activation_functions/soboleva_modified_hyperbolic_tangent.py) + * [Softplus](neural_network/activation_functions/softplus.py) + * [Squareplus](neural_network/activation_functions/squareplus.py) * [Back Propagation Neural Network](neural_network/back_propagation_neural_network.py) * [Convolution Neural Network](neural_network/convolution_neural_network.py) * [Perceptron](neural_network/perceptron.py) @@ -779,6 +798,7 @@ * [Newtons Second Law Of Motion](physics/newtons_second_law_of_motion.py) * [Photoelectric Effect](physics/photoelectric_effect.py) * [Potential Energy](physics/potential_energy.py) + * [Reynolds Number](physics/reynolds_number.py) * [Rms Speed Of Molecule](physics/rms_speed_of_molecule.py) * [Shear Stress](physics/shear_stress.py) * [Speed Of Sound](physics/speed_of_sound.py) @@ -1101,6 +1121,7 @@ * [Interpolation Search](searches/interpolation_search.py) * [Jump Search](searches/jump_search.py) * [Linear Search](searches/linear_search.py) + * [Median Of Medians](searches/median_of_medians.py) * [Quick Select](searches/quick_select.py) * [Sentinel Linear Search](searches/sentinel_linear_search.py) * [Simple Binary Search](searches/simple_binary_search.py) @@ -1201,6 +1222,7 @@ * [Snake Case To Camel Pascal Case](strings/snake_case_to_camel_pascal_case.py) * [Split](strings/split.py) * [String Switch Case](strings/string_switch_case.py) + * [Strip](strings/strip.py) * [Text Justification](strings/text_justification.py) * [Top K Frequent Words](strings/top_k_frequent_words.py) * [Upper](strings/upper.py) diff --git a/maths/binary_exponentiation_2.py b/maths/binary_exponentiation_2.py index 9cd143e09207..edb6b66b2594 100644 --- a/maths/binary_exponentiation_2.py +++ b/maths/binary_exponentiation_2.py @@ -1,17 +1,33 @@ """ -* Binary Exponentiation for Powers -* This is a method to find a^b in a time complexity of O(log b) -* This is one of the most commonly used methods of finding powers. -* Also useful in cases where solution to (a^b)%c is required, -* where a,b,c can be numbers over the computers calculation limits. -* Done using iteration, can also be done using recursion - -* @author chinmoy159 -* @version 1.0 dated 10/08/2017 +Binary Exponentiation +This is a method to find a^b in O(log b) time complexity +This is one of the most commonly used methods of exponentiation +It's also useful when the solution to (a^b) % c is required because a, b, c may be +over the computer's calculation limits + +Let's say you need to calculate a ^ b +- RULE 1 : a ^ b = (a*a) ^ (b/2) ---- example : 4 ^ 4 = (4*4) ^ (4/2) = 16 ^ 2 +- RULE 2 : IF b is odd, then a ^ b = a * (a ^ (b - 1)), where b - 1 is even +Once b is even, repeat the process until b = 1 or b = 0, because a^1 = a and a^0 = 1 + +For modular exponentiation, we use the fact that (a*b) % c = ((a%c) * (b%c)) % c +Now apply RULE 1 or 2 as required + +@author chinmoy159 """ def b_expo(a: int, b: int) -> int: + """ + >>> b_expo(2, 10) + 1024 + >>> b_expo(9, 0) + 1 + >>> b_expo(0, 12) + 0 + >>> b_expo(4, 12) + 16777216 + """ res = 1 while b > 0: if b & 1: @@ -24,6 +40,16 @@ def b_expo(a: int, b: int) -> int: def b_expo_mod(a: int, b: int, c: int) -> int: + """ + >>> b_expo_mod(2, 10, 1000000007) + 1024 + >>> b_expo_mod(11, 13, 19) + 11 + >>> b_expo_mod(0, 19, 20) + 0 + >>> b_expo_mod(15, 5, 4) + 3 + """ res = 1 while b > 0: if b & 1: @@ -33,18 +59,3 @@ def b_expo_mod(a: int, b: int, c: int) -> int: b >>= 1 return res - - -""" -* Wondering how this method works ! -* It's pretty simple. -* Let's say you need to calculate a ^ b -* RULE 1 : a ^ b = (a*a) ^ (b/2) ---- example : 4 ^ 4 = (4*4) ^ (4/2) = 16 ^ 2 -* RULE 2 : IF b is ODD, then ---- a ^ b = a * (a ^ (b - 1)) :: where (b - 1) is even. -* Once b is even, repeat the process to get a ^ b -* Repeat the process till b = 1 OR b = 0, because a^1 = a AND a^0 = 1 -* -* As far as the modulo is concerned, -* the fact : (a*b) % c = ((a%c) * (b%c)) % c -* Now apply RULE 1 OR 2 whichever is required. -""" From 59fc0cefefce77718044eb797e2c33cf8a7e1f9a Mon Sep 17 00:00:00 2001 From: Arnav Kohli <95236897+THEGAMECHANGER416@users.noreply.github.com> Date: Tue, 10 Oct 2023 18:50:49 +0530 Subject: [PATCH 1093/1543] Added categorical_crossentropy loss function (#10152) --- .../categorical_cross_entropy.py | 85 +++++++++++++++++++ 1 file changed, 85 insertions(+) create mode 100644 machine_learning/loss_functions/categorical_cross_entropy.py diff --git a/machine_learning/loss_functions/categorical_cross_entropy.py b/machine_learning/loss_functions/categorical_cross_entropy.py new file mode 100644 index 000000000000..68f98902b473 --- /dev/null +++ b/machine_learning/loss_functions/categorical_cross_entropy.py @@ -0,0 +1,85 @@ +""" +Categorical Cross-Entropy Loss + +This function calculates the Categorical Cross-Entropy Loss between true class +labels and predicted class probabilities. + +Formula: +Categorical Cross-Entropy Loss = -Σ(y_true * ln(y_pred)) + +Resources: +- [Wikipedia - Cross entropy](https://en.wikipedia.org/wiki/Cross_entropy) +""" + +import numpy as np + + +def categorical_cross_entropy( + y_true: np.ndarray, y_pred: np.ndarray, epsilon: float = 1e-15 +) -> float: + """ + Calculate Categorical Cross-Entropy Loss between true class labels and + predicted class probabilities. + + Parameters: + - y_true: True class labels (one-hot encoded) as a NumPy array. + - y_pred: Predicted class probabilities as a NumPy array. + - epsilon: Small constant to avoid numerical instability. + + Returns: + - ce_loss: Categorical Cross-Entropy Loss as a floating-point number. + + Example: + >>> true_labels = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]) + >>> pred_probs = np.array([[0.9, 0.1, 0.0], [0.2, 0.7, 0.1], [0.0, 0.1, 0.9]]) + >>> categorical_cross_entropy(true_labels, pred_probs) + 0.567395975254385 + + >>> y_true = np.array([[1, 0], [0, 1]]) + >>> y_pred = np.array([[0.9, 0.1, 0.0], [0.2, 0.7, 0.1]]) + >>> categorical_cross_entropy(y_true, y_pred) + Traceback (most recent call last): + ... + ValueError: Input arrays must have the same shape. + + >>> y_true = np.array([[2, 0, 1], [1, 0, 0]]) + >>> y_pred = np.array([[0.9, 0.1, 0.0], [0.2, 0.7, 0.1]]) + >>> categorical_cross_entropy(y_true, y_pred) + Traceback (most recent call last): + ... + ValueError: y_true must be one-hot encoded. + + >>> y_true = np.array([[1, 0, 1], [1, 0, 0]]) + >>> y_pred = np.array([[0.9, 0.1, 0.0], [0.2, 0.7, 0.1]]) + >>> categorical_cross_entropy(y_true, y_pred) + Traceback (most recent call last): + ... + ValueError: y_true must be one-hot encoded. + + >>> y_true = np.array([[1, 0, 0], [0, 1, 0]]) + >>> y_pred = np.array([[0.9, 0.1, 0.1], [0.2, 0.7, 0.1]]) + >>> categorical_cross_entropy(y_true, y_pred) + Traceback (most recent call last): + ... + ValueError: Predicted probabilities must sum to approximately 1. + """ + if y_true.shape != y_pred.shape: + raise ValueError("Input arrays must have the same shape.") + + if np.any((y_true != 0) & (y_true != 1)) or np.any(y_true.sum(axis=1) != 1): + raise ValueError("y_true must be one-hot encoded.") + + if not np.all(np.isclose(np.sum(y_pred, axis=1), 1, rtol=epsilon, atol=epsilon)): + raise ValueError("Predicted probabilities must sum to approximately 1.") + + # Clip predicted probabilities to avoid log(0) + y_pred = np.clip(y_pred, epsilon, 1) + + # Calculate categorical cross-entropy loss + return -np.sum(y_true * np.log(y_pred)) + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From 0b440285e813c54cda188eac278bda6fa4b1169f Mon Sep 17 00:00:00 2001 From: Kamil <32775019+quant12345@users.noreply.github.com> Date: Tue, 10 Oct 2023 19:24:51 +0500 Subject: [PATCH 1094/1543] Gaussian_elemination - change to remove warning (#10221) * Replacing the generator with numpy vector operations from lu_decomposition. * Revert "Replacing the generator with numpy vector operations from lu_decomposition." This reverts commit ad217c66165898d62b76cc89ba09c2d7049b6448. * Removes the warning: Conversion of an array with ndim > 0 to a scalar is deprecated, and will error in future. Ensure you extract a single element from your array before performing this operation --- arithmetic_analysis/gaussian_elimination.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arithmetic_analysis/gaussian_elimination.py b/arithmetic_analysis/gaussian_elimination.py index 13f509a4f117..a1a35131b157 100644 --- a/arithmetic_analysis/gaussian_elimination.py +++ b/arithmetic_analysis/gaussian_elimination.py @@ -34,7 +34,7 @@ def retroactive_resolution( x: NDArray[float64] = np.zeros((rows, 1), dtype=float) for row in reversed(range(rows)): total = np.dot(coefficients[row, row + 1 :], x[row + 1 :]) - x[row, 0] = (vector[row] - total) / coefficients[row, row] + x[row, 0] = (vector[row][0] - total[0]) / coefficients[row, row] return x From 5be5d21bed4bb546c81b5771bebca336978111e7 Mon Sep 17 00:00:00 2001 From: hollowcrust <72879387+hollowcrust@users.noreply.github.com> Date: Wed, 11 Oct 2023 00:52:53 +0800 Subject: [PATCH 1095/1543] Add tests for infix_2_postfix() in infix_to_prefix_conversion.py (#10095) * Add doctests, exceptions, type hints and fix bug for infix_to_prefix_conversion.py Add doctests Add exceptions for expressions with invalid bracket positions Add type hints for functions Fix a bug on line 53 (57 in PR) * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Change type hints in infix_to_prefix_conversion.py * Remove printing trailing whitespace in the output table * Fix type hint errors * Fix doctests * Adjust table convention in output and doctests * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Add doctests for infix_2_postfix() * Update print_width * Update print_width * Fix the doctests --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- .../stacks/infix_to_prefix_conversion.py | 73 +++++++++++++++++++ 1 file changed, 73 insertions(+) diff --git a/data_structures/stacks/infix_to_prefix_conversion.py b/data_structures/stacks/infix_to_prefix_conversion.py index 6f6d5d57e2cb..1127211d59f9 100644 --- a/data_structures/stacks/infix_to_prefix_conversion.py +++ b/data_structures/stacks/infix_to_prefix_conversion.py @@ -16,6 +16,39 @@ def infix_2_postfix(infix): + """ + >>> infix_2_postfix("a+b^c") # doctest: +NORMALIZE_WHITESPACE + Symbol | Stack | Postfix + ---------------------------- + a | | a + + | + | a + b | + | ab + ^ | +^ | ab + c | +^ | abc + | + | abc^ + | | abc^+ + 'abc^+' + >>> infix_2_postfix("1*((-a)*2+b)") + Traceback (most recent call last): + ... + KeyError: '(' + >>> infix_2_postfix("") + Symbol | Stack | Postfix + ---------------------------- + '' + >>> infix_2_postfix("(()") # doctest: +NORMALIZE_WHITESPACE + Symbol | Stack | Postfix + ---------------------------- + ( | ( | + ( | (( | + ) | ( | + | | ( + '(' + >>> infix_2_postfix("())") + Traceback (most recent call last): + ... + IndexError: list index out of range + """ stack = [] post_fix = [] priority = { @@ -74,6 +107,42 @@ def infix_2_postfix(infix): def infix_2_prefix(infix): + """ + >>> infix_2_prefix("a+b^c") # doctest: +NORMALIZE_WHITESPACE + Symbol | Stack | Postfix + ---------------------------- + c | | c + ^ | ^ | c + b | ^ | cb + + | + | cb^ + a | + | cb^a + | | cb^a+ + '+a^bc' + + >>> infix_2_prefix("1*((-a)*2+b)") + Traceback (most recent call last): + ... + KeyError: '(' + + >>> infix_2_prefix('') + Symbol | Stack | Postfix + ---------------------------- + '' + + >>> infix_2_prefix('(()') + Traceback (most recent call last): + ... + IndexError: list index out of range + + >>> infix_2_prefix('())') # doctest: +NORMALIZE_WHITESPACE + Symbol | Stack | Postfix + ---------------------------- + ( | ( | + ( | (( | + ) | ( | + | | ( + '(' + """ infix = list(infix[::-1]) # reverse the infix equation for i in range(len(infix)): @@ -88,6 +157,10 @@ def infix_2_prefix(infix): if __name__ == "__main__": + from doctest import testmod + + testmod() + Infix = input("\nEnter an Infix Equation = ") # Input an Infix equation Infix = "".join(Infix.split()) # Remove spaces from the input print("\n\t", Infix, "(Infix) -> ", infix_2_prefix(Infix), "(Prefix)") From 9a5a6c663cefb8cbc63329c27188f64462072a4c Mon Sep 17 00:00:00 2001 From: Kamil <32775019+quant12345@users.noreply.github.com> Date: Wed, 11 Oct 2023 01:14:13 +0500 Subject: [PATCH 1096/1543] carmichael_number - add doctests (#10038) * Replacing the generator with numpy vector operations from lu_decomposition. * Revert "Replacing the generator with numpy vector operations from lu_decomposition." This reverts commit ad217c66165898d62b76cc89ba09c2d7049b6448. * Added doctests * Update carmichael_number.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update carmichael_number.py I make an empty commit to reset: tests are failing. * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update carmichael_number.py Made changes taking into account the addition: from maths.greatest_common_divisor import greatest_common_divisor. Now instead of gcd it is used: greatest_common_divisor. * Update carmichael_number.py * Update carmichael_number.py * Update carmichael_number.py I added a check for 0 and negative numbers in the tests and the code itself. Simplified obtaining the final result. * Update carmichael_number.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update maths/carmichael_number.py Co-authored-by: Tianyi Zheng * Update carmichael_number.py * Update carmichael_number.py --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Tianyi Zheng --- maths/carmichael_number.py | 55 +++++++++++++++++++++++++++++++++----- 1 file changed, 49 insertions(+), 6 deletions(-) diff --git a/maths/carmichael_number.py b/maths/carmichael_number.py index 81712520ffc7..08b5c70e8fe7 100644 --- a/maths/carmichael_number.py +++ b/maths/carmichael_number.py @@ -10,10 +10,21 @@ Examples of Carmichael Numbers: 561, 1105, ... https://en.wikipedia.org/wiki/Carmichael_number """ + from maths.greatest_common_divisor import greatest_common_divisor def power(x: int, y: int, mod: int) -> int: + """ + + Examples: + >>> power(2, 15, 3) + 2 + + >>> power(5, 1, 30) + 5 + """ + if y == 0: return 1 temp = power(x, y // 2, mod) % mod @@ -24,15 +35,47 @@ def power(x: int, y: int, mod: int) -> int: def is_carmichael_number(n: int) -> bool: - b = 2 - while b < n: - if greatest_common_divisor(b, n) == 1 and power(b, n - 1, n) != 1: - return False - b += 1 - return True + """ + + Examples: + >>> is_carmichael_number(562) + False + + >>> is_carmichael_number(561) + True + + >>> is_carmichael_number(5.1) + Traceback (most recent call last): + ... + ValueError: Number 5.1 must instead be a positive integer + + >>> is_carmichael_number(-7) + Traceback (most recent call last): + ... + ValueError: Number -7 must instead be a positive integer + + >>> is_carmichael_number(0) + Traceback (most recent call last): + ... + ValueError: Number 0 must instead be a positive integer + """ + + if n <= 0 or not isinstance(n, int): + msg = f"Number {n} must instead be a positive integer" + raise ValueError(msg) + + return all( + power(b, n - 1, n) == 1 + for b in range(2, n) + if greatest_common_divisor(b, n) == 1 + ) if __name__ == "__main__": + import doctest + + doctest.testmod() + number = int(input("Enter number: ").strip()) if is_carmichael_number(number): print(f"{number} is a Carmichael Number.") From 00707392332b90cc9babf7258b1de3e0efa0a580 Mon Sep 17 00:00:00 2001 From: Kamil <32775019+quant12345@users.noreply.github.com> Date: Wed, 11 Oct 2023 01:18:31 +0500 Subject: [PATCH 1097/1543] k_means_clust - change to remove warning (#10244) * Replacing the generator with numpy vector operations from lu_decomposition. * Revert "Replacing the generator with numpy vector operations from lu_decomposition." This reverts commit ad217c66165898d62b76cc89ba09c2d7049b6448. * the change removes the warning: /home/runner/work/Python/Python/machine_learning/k_means_clust.py:236: FutureWarning: The provided callable is currently using SeriesGroupBy.sum. In a future version of pandas, the provided callable will be used directly. To keep current behavior pass the string "sum" instead. .agg( And /home/runner/work/Python/Python/machine_learning/k_means_clust.py:236: FutureWarning: The provided callable is currently using SeriesGroupBy.mean. In a future version of pandas, the provided callable will be used directly. To keep current behavior pass the string "mean" instead. .agg( --- machine_learning/k_means_clust.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/machine_learning/k_means_clust.py b/machine_learning/k_means_clust.py index d93c5addf2ee..3fe151442e2e 100644 --- a/machine_learning/k_means_clust.py +++ b/machine_learning/k_means_clust.py @@ -235,7 +235,7 @@ def report_generator( ] # group by cluster number .agg( [ - ("sum", np.sum), + ("sum", "sum"), ("mean_with_zeros", lambda x: np.mean(np.nan_to_num(x))), ("mean_without_zeros", lambda x: x.replace(0, np.NaN).mean()), ( @@ -248,7 +248,7 @@ def report_generator( ) ), ), - ("mean_with_na", np.mean), + ("mean_with_na", "mean"), ("min", lambda x: x.min()), ("5%", lambda x: x.quantile(0.05)), ("25%", lambda x: x.quantile(0.25)), From c850227bee5efd9383d1cb8150500eb304c809fc Mon Sep 17 00:00:00 2001 From: cornbread-eater <146371786+cornbread-eater@users.noreply.github.com> Date: Tue, 10 Oct 2023 14:07:07 -0700 Subject: [PATCH 1098/1543] Add doctests to primelib.py (#10242) * Update primelib.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- maths/primelib.py | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/maths/primelib.py b/maths/primelib.py index cf01750cf912..d5c124255e56 100644 --- a/maths/primelib.py +++ b/maths/primelib.py @@ -574,6 +574,11 @@ def fib(n): """ input: positive integer 'n' returns the n-th fibonacci term , indexing by 0 + + >>> fib(5) + 8 + >>> fib(99) + 354224848179261915075 """ # precondition @@ -589,3 +594,9 @@ def fib(n): fib1 = tmp return ans + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From 672fda913087ab64f9eb7b3a5600cbf83680fb8e Mon Sep 17 00:00:00 2001 From: hollowcrust <72879387+hollowcrust@users.noreply.github.com> Date: Wed, 11 Oct 2023 17:00:49 +0800 Subject: [PATCH 1099/1543] Fix bug and edit doctests for infix_to_prefix_conversion (#10259) * Fix bug and edit doctests * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Add type hints, raiseError and other minor adjustments * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Cleaning code * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .../stacks/infix_to_prefix_conversion.py | 101 +++++++++++------- 1 file changed, 64 insertions(+), 37 deletions(-) diff --git a/data_structures/stacks/infix_to_prefix_conversion.py b/data_structures/stacks/infix_to_prefix_conversion.py index 1127211d59f9..beff421c0cfa 100644 --- a/data_structures/stacks/infix_to_prefix_conversion.py +++ b/data_structures/stacks/infix_to_prefix_conversion.py @@ -15,7 +15,7 @@ """ -def infix_2_postfix(infix): +def infix_2_postfix(infix: str) -> str: """ >>> infix_2_postfix("a+b^c") # doctest: +NORMALIZE_WHITESPACE Symbol | Stack | Postfix @@ -28,22 +28,35 @@ def infix_2_postfix(infix): | + | abc^ | | abc^+ 'abc^+' - >>> infix_2_postfix("1*((-a)*2+b)") - Traceback (most recent call last): - ... - KeyError: '(' + + >>> infix_2_postfix("1*((-a)*2+b)") # doctest: +NORMALIZE_WHITESPACE + Symbol | Stack | Postfix + ------------------------------------------- + 1 | | 1 + * | * | 1 + ( | *( | 1 + ( | *(( | 1 + - | *((- | 1 + a | *((- | 1a + ) | *( | 1a- + * | *(* | 1a- + 2 | *(* | 1a-2 + + | *(+ | 1a-2* + b | *(+ | 1a-2*b + ) | * | 1a-2*b+ + | | 1a-2*b+* + '1a-2*b+*' + >>> infix_2_postfix("") Symbol | Stack | Postfix ---------------------------- '' - >>> infix_2_postfix("(()") # doctest: +NORMALIZE_WHITESPACE - Symbol | Stack | Postfix - ---------------------------- - ( | ( | - ( | (( | - ) | ( | - | | ( - '(' + + >>> infix_2_postfix("(()") + Traceback (most recent call last): + ... + ValueError: invalid expression + >>> infix_2_postfix("())") Traceback (most recent call last): ... @@ -59,7 +72,7 @@ def infix_2_postfix(infix): "+": 1, "-": 1, } # Priority of each operator - print_width = len(infix) if (len(infix) > 7) else 7 + print_width = max(len(infix), 7) # Print table header for output print( @@ -76,6 +89,9 @@ def infix_2_postfix(infix): elif x == "(": stack.append(x) # if x is "(" push to Stack elif x == ")": # if x is ")" pop stack until "(" is encountered + if len(stack) == 0: # close bracket without open bracket + raise IndexError("list index out of range") + while stack[-1] != "(": post_fix.append(stack.pop()) # Pop stack & add the content to Postfix stack.pop() @@ -83,7 +99,7 @@ def infix_2_postfix(infix): if len(stack) == 0: stack.append(x) # If stack is empty, push x to stack else: # while priority of x is not > priority of element in the stack - while len(stack) > 0 and priority[x] <= priority[stack[-1]]: + while stack and stack[-1] != "(" and priority[x] <= priority[stack[-1]]: post_fix.append(stack.pop()) # pop stack & add to Postfix stack.append(x) # push x to stack @@ -95,6 +111,9 @@ def infix_2_postfix(infix): ) # Output in tabular format while len(stack) > 0: # while stack is not empty + if stack[-1] == "(": # open bracket with no close bracket + raise ValueError("invalid expression") + post_fix.append(stack.pop()) # pop stack & add to Postfix print( " ".center(8), @@ -106,7 +125,7 @@ def infix_2_postfix(infix): return "".join(post_fix) # return Postfix as str -def infix_2_prefix(infix): +def infix_2_prefix(infix: str) -> str: """ >>> infix_2_prefix("a+b^c") # doctest: +NORMALIZE_WHITESPACE Symbol | Stack | Postfix @@ -119,10 +138,23 @@ def infix_2_prefix(infix): | | cb^a+ '+a^bc' - >>> infix_2_prefix("1*((-a)*2+b)") - Traceback (most recent call last): - ... - KeyError: '(' + >>> infix_2_prefix("1*((-a)*2+b)") # doctest: +NORMALIZE_WHITESPACE + Symbol | Stack | Postfix + ------------------------------------------- + ( | ( | + b | ( | b + + | (+ | b + 2 | (+ | b2 + * | (+* | b2 + ( | (+*( | b2 + a | (+*( | b2a + - | (+*(- | b2a + ) | (+* | b2a- + ) | | b2a-*+ + * | * | b2a-*+ + 1 | * | b2a-*+1 + | | b2a-*+1* + '*1+*-a2b' >>> infix_2_prefix('') Symbol | Stack | Postfix @@ -134,26 +166,21 @@ def infix_2_prefix(infix): ... IndexError: list index out of range - >>> infix_2_prefix('())') # doctest: +NORMALIZE_WHITESPACE - Symbol | Stack | Postfix - ---------------------------- - ( | ( | - ( | (( | - ) | ( | - | | ( - '(' + >>> infix_2_prefix('())') + Traceback (most recent call last): + ... + ValueError: invalid expression """ - infix = list(infix[::-1]) # reverse the infix equation + reversed_infix = list(infix[::-1]) # reverse the infix equation - for i in range(len(infix)): - if infix[i] == "(": - infix[i] = ")" # change "(" to ")" - elif infix[i] == ")": - infix[i] = "(" # change ")" to "(" + for i in range(len(reversed_infix)): + if reversed_infix[i] == "(": + reversed_infix[i] = ")" # change "(" to ")" + elif reversed_infix[i] == ")": + reversed_infix[i] = "(" # change ")" to "(" - return (infix_2_postfix("".join(infix)))[ - ::-1 - ] # call infix_2_postfix on Infix, return reverse of Postfix + # call infix_2_postfix on Infix, return reverse of Postfix + return (infix_2_postfix("".join(reversed_infix)))[::-1] if __name__ == "__main__": From 5fb6496d1bcd076018e6c829c312f486ed7bb2ee Mon Sep 17 00:00:00 2001 From: Ricardo Martinez Peinado <43684906+rmp2000@users.noreply.github.com> Date: Wed, 11 Oct 2023 12:11:05 +0200 Subject: [PATCH 1100/1543] Improve primelib.py test coverage #9943 (#10251) * Update the doctest of primelib.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Correct errors for the doctest of primelib.py * last error for the doctest of primelib.py * last error for the doctest of primelib.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- maths/primelib.py | 243 +++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 238 insertions(+), 5 deletions(-) diff --git a/maths/primelib.py b/maths/primelib.py index d5c124255e56..7e33844be12b 100644 --- a/maths/primelib.py +++ b/maths/primelib.py @@ -46,6 +46,19 @@ def is_prime(number: int) -> bool: """ input: positive integer 'number' returns true if 'number' is prime otherwise false. + + >>> is_prime(3) + True + >>> is_prime(10) + False + >>> is_prime(-1) + Traceback (most recent call last): + ... + AssertionError: 'number' must been an int and positive + >>> is_prime("test") + Traceback (most recent call last): + ... + AssertionError: 'number' must been an int and positive """ # precondition @@ -83,6 +96,16 @@ def sieve_er(n): This function implements the algorithm called sieve of erathostenes. + >>> sieve_er(8) + [2, 3, 5, 7] + >>> sieve_er(-1) + Traceback (most recent call last): + ... + AssertionError: 'N' must been an int and > 2 + >>> sieve_er("test") + Traceback (most recent call last): + ... + AssertionError: 'N' must been an int and > 2 """ # precondition @@ -116,6 +139,17 @@ def get_prime_numbers(n): input: positive integer 'N' > 2 returns a list of prime numbers from 2 up to N (inclusive) This function is more efficient as function 'sieveEr(...)' + + >>> get_prime_numbers(8) + [2, 3, 5, 7] + >>> get_prime_numbers(-1) + Traceback (most recent call last): + ... + AssertionError: 'N' must been an int and > 2 + >>> get_prime_numbers("test") + Traceback (most recent call last): + ... + AssertionError: 'N' must been an int and > 2 """ # precondition @@ -142,6 +176,21 @@ def prime_factorization(number): """ input: positive integer 'number' returns a list of the prime number factors of 'number' + + >>> prime_factorization(0) + [0] + >>> prime_factorization(8) + [2, 2, 2] + >>> prime_factorization(287) + [7, 41] + >>> prime_factorization(-1) + Traceback (most recent call last): + ... + AssertionError: 'number' must been an int and >= 0 + >>> prime_factorization("test") + Traceback (most recent call last): + ... + AssertionError: 'number' must been an int and >= 0 """ # precondition @@ -183,12 +232,27 @@ def greatest_prime_factor(number): """ input: positive integer 'number' >= 0 returns the greatest prime number factor of 'number' + + >>> greatest_prime_factor(0) + 0 + >>> greatest_prime_factor(8) + 2 + >>> greatest_prime_factor(287) + 41 + >>> greatest_prime_factor(-1) + Traceback (most recent call last): + ... + AssertionError: 'number' must been an int and >= 0 + >>> greatest_prime_factor("test") + Traceback (most recent call last): + ... + AssertionError: 'number' must been an int and >= 0 """ # precondition assert isinstance(number, int) and ( number >= 0 - ), "'number' bust been an int and >= 0" + ), "'number' must been an int and >= 0" ans = 0 @@ -210,12 +274,27 @@ def smallest_prime_factor(number): """ input: integer 'number' >= 0 returns the smallest prime number factor of 'number' + + >>> smallest_prime_factor(0) + 0 + >>> smallest_prime_factor(8) + 2 + >>> smallest_prime_factor(287) + 7 + >>> smallest_prime_factor(-1) + Traceback (most recent call last): + ... + AssertionError: 'number' must been an int and >= 0 + >>> smallest_prime_factor("test") + Traceback (most recent call last): + ... + AssertionError: 'number' must been an int and >= 0 """ # precondition assert isinstance(number, int) and ( number >= 0 - ), "'number' bust been an int and >= 0" + ), "'number' must been an int and >= 0" ans = 0 @@ -237,11 +316,24 @@ def is_even(number): """ input: integer 'number' returns true if 'number' is even, otherwise false. + + >>> is_even(0) + True + >>> is_even(8) + True + >>> is_even(287) + False + >>> is_even(-1) + False + >>> is_even("test") + Traceback (most recent call last): + ... + AssertionError: 'number' must been an int """ # precondition assert isinstance(number, int), "'number' must been an int" - assert isinstance(number % 2 == 0, bool), "compare bust been from type bool" + assert isinstance(number % 2 == 0, bool), "compare must been from type bool" return number % 2 == 0 @@ -253,11 +345,24 @@ def is_odd(number): """ input: integer 'number' returns true if 'number' is odd, otherwise false. + + >>> is_odd(0) + False + >>> is_odd(8) + False + >>> is_odd(287) + True + >>> is_odd(-1) + True + >>> is_odd("test") + Traceback (most recent call last): + ... + AssertionError: 'number' must been an int """ # precondition assert isinstance(number, int), "'number' must been an int" - assert isinstance(number % 2 != 0, bool), "compare bust been from type bool" + assert isinstance(number % 2 != 0, bool), "compare must been from type bool" return number % 2 != 0 @@ -270,6 +375,23 @@ def goldbach(number): Goldbach's assumption input: a even positive integer 'number' > 2 returns a list of two prime numbers whose sum is equal to 'number' + + >>> goldbach(8) + [3, 5] + >>> goldbach(824) + [3, 821] + >>> goldbach(0) + Traceback (most recent call last): + ... + AssertionError: 'number' must been an int, even and > 2 + >>> goldbach(-1) + Traceback (most recent call last): + ... + AssertionError: 'number' must been an int, even and > 2 + >>> goldbach("test") + Traceback (most recent call last): + ... + AssertionError: 'number' must been an int, even and > 2 """ # precondition @@ -323,6 +445,23 @@ def kg_v(number1, number2): Least common multiple input: two positive integer 'number1' and 'number2' returns the least common multiple of 'number1' and 'number2' + + >>> kg_v(8,10) + 40 + >>> kg_v(824,67) + 55208 + >>> kg_v(0) + Traceback (most recent call last): + ... + TypeError: kg_v() missing 1 required positional argument: 'number2' + >>> kg_v(10,-1) + Traceback (most recent call last): + ... + AssertionError: 'number1' and 'number2' must been positive integer. + >>> kg_v("test","test2") + Traceback (most recent call last): + ... + AssertionError: 'number1' and 'number2' must been positive integer. """ # precondition @@ -395,6 +534,21 @@ def get_prime(n): Gets the n-th prime number. input: positive integer 'n' >= 0 returns the n-th prime number, beginning at index 0 + + >>> get_prime(0) + 2 + >>> get_prime(8) + 23 + >>> get_prime(824) + 6337 + >>> get_prime(-1) + Traceback (most recent call last): + ... + AssertionError: 'number' must been a positive int + >>> get_prime("test") + Traceback (most recent call last): + ... + AssertionError: 'number' must been a positive int """ # precondition @@ -430,6 +584,25 @@ def get_primes_between(p_number_1, p_number_2): pNumber1 < pNumber2 returns a list of all prime numbers between 'pNumber1' (exclusive) and 'pNumber2' (exclusive) + + >>> get_primes_between(3, 67) + [5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61] + >>> get_primes_between(0) + Traceback (most recent call last): + ... + TypeError: get_primes_between() missing 1 required positional argument: 'p_number_2' + >>> get_primes_between(0, 1) + Traceback (most recent call last): + ... + AssertionError: The arguments must been prime numbers and 'pNumber1' < 'pNumber2' + >>> get_primes_between(-1, 3) + Traceback (most recent call last): + ... + AssertionError: 'number' must been an int and positive + >>> get_primes_between("test","test") + Traceback (most recent call last): + ... + AssertionError: 'number' must been an int and positive """ # precondition @@ -473,6 +646,19 @@ def get_divisors(n): """ input: positive integer 'n' >= 1 returns all divisors of n (inclusive 1 and 'n') + + >>> get_divisors(8) + [1, 2, 4, 8] + >>> get_divisors(824) + [1, 2, 4, 8, 103, 206, 412, 824] + >>> get_divisors(-1) + Traceback (most recent call last): + ... + AssertionError: 'n' must been int and >= 1 + >>> get_divisors("test") + Traceback (most recent call last): + ... + AssertionError: 'n' must been int and >= 1 """ # precondition @@ -497,6 +683,19 @@ def is_perfect_number(number): """ input: positive integer 'number' > 1 returns true if 'number' is a perfect number otherwise false. + + >>> is_perfect_number(28) + True + >>> is_perfect_number(824) + False + >>> is_perfect_number(-1) + Traceback (most recent call last): + ... + AssertionError: 'number' must been an int and >= 1 + >>> is_perfect_number("test") + Traceback (most recent call last): + ... + AssertionError: 'number' must been an int and >= 1 """ # precondition @@ -525,6 +724,15 @@ def simplify_fraction(numerator, denominator): input: two integer 'numerator' and 'denominator' assumes: 'denominator' != 0 returns: a tuple with simplify numerator and denominator. + + >>> simplify_fraction(10, 20) + (1, 2) + >>> simplify_fraction(10, -1) + (10, -1) + >>> simplify_fraction("test","test") + Traceback (most recent call last): + ... + AssertionError: The arguments must been from type int and 'denominator' != 0 """ # precondition @@ -554,6 +762,19 @@ def factorial(n): """ input: positive integer 'n' returns the factorial of 'n' (n!) + + >>> factorial(0) + 1 + >>> factorial(20) + 2432902008176640000 + >>> factorial(-1) + Traceback (most recent call last): + ... + AssertionError: 'n' must been a int and >= 0 + >>> factorial("test") + Traceback (most recent call last): + ... + AssertionError: 'n' must been a int and >= 0 """ # precondition @@ -570,15 +791,27 @@ def factorial(n): # ------------------------------------------------------------------- -def fib(n): +def fib(n: int) -> int: """ input: positive integer 'n' returns the n-th fibonacci term , indexing by 0 + >>> fib(0) + 1 >>> fib(5) 8 + >>> fib(20) + 10946 >>> fib(99) 354224848179261915075 + >>> fib(-1) + Traceback (most recent call last): + ... + AssertionError: 'n' must been an int and >= 0 + >>> fib("test") + Traceback (most recent call last): + ... + AssertionError: 'n' must been an int and >= 0 """ # precondition From d5323dbaee21a9ae209efa17852b02c3101a0220 Mon Sep 17 00:00:00 2001 From: Aasheesh <126905285+AasheeshLikePanner@users.noreply.github.com> Date: Wed, 11 Oct 2023 23:50:18 +0530 Subject: [PATCH 1101/1543] Adding doctests in simpson_rule.py (#10269) * Adding doctests in simpson_rule.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update maths/simpson_rule.py Co-authored-by: Christian Clauss * Update maths/simpson_rule.py Co-authored-by: Christian Clauss * Adding doctests in simpson_rule.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Adding doctests in simpson_rule.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Adding doctests in simpson_rule.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Adding doctests in simpson_rule.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Adding doctests in simpson_rule.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update simpson_rule.py * Adding doctests in simpson_rule.py * Adding doctests in simpson_rule.py --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- maths/simpson_rule.py | 43 +++++++++++++++++++++++++++++++++++++++---- 1 file changed, 39 insertions(+), 4 deletions(-) diff --git a/maths/simpson_rule.py b/maths/simpson_rule.py index d66dc39a7171..e75fb557a2f5 100644 --- a/maths/simpson_rule.py +++ b/maths/simpson_rule.py @@ -1,7 +1,7 @@ """ Numerical integration or quadrature for a smooth function f with known values at x_i -This method is the classical approach of suming 'Equally Spaced Abscissas' +This method is the classical approach of summing 'Equally Spaced Abscissas' method 2: "Simpson Rule" @@ -9,9 +9,41 @@ """ -def method_2(boundary, steps): +def method_2(boundary: list[int], steps: int) -> float: # "Simpson Rule" # int(f) = delta_x/2 * (b-a)/3*(f1 + 4f2 + 2f_3 + ... + fn) + """ + Calculate the definite integral of a function using Simpson's Rule. + :param boundary: A list containing the lower and upper bounds of integration. + :param steps: The number of steps or resolution for the integration. + :return: The approximate integral value. + + >>> round(method_2([0, 2, 4], 10), 10) + 2.6666666667 + >>> round(method_2([2, 0], 10), 10) + -0.2666666667 + >>> round(method_2([-2, -1], 10), 10) + 2.172 + >>> round(method_2([0, 1], 10), 10) + 0.3333333333 + >>> round(method_2([0, 2], 10), 10) + 2.6666666667 + >>> round(method_2([0, 2], 100), 10) + 2.5621226667 + >>> round(method_2([0, 1], 1000), 10) + 0.3320026653 + >>> round(method_2([0, 2], 0), 10) + Traceback (most recent call last): + ... + ZeroDivisionError: Number of steps must be greater than zero + >>> round(method_2([0, 2], -10), 10) + Traceback (most recent call last): + ... + ZeroDivisionError: Number of steps must be greater than zero + """ + if steps <= 0: + raise ZeroDivisionError("Number of steps must be greater than zero") + h = (boundary[1] - boundary[0]) / steps a = boundary[0] b = boundary[1] @@ -41,11 +73,14 @@ def f(x): # enter your function here def main(): a = 0.0 # Lower bound of integration b = 1.0 # Upper bound of integration - steps = 10.0 # define number of steps or resolution - boundary = [a, b] # define boundary of integration + steps = 10.0 # number of steps or resolution + boundary = [a, b] # boundary of integration y = method_2(boundary, steps) print(f"y = {y}") if __name__ == "__main__": + import doctest + + doctest.testmod() main() From 3f094fe49d14e64d2c8f0e2c14d339ab6d0ee735 Mon Sep 17 00:00:00 2001 From: Christian Clauss Date: Wed, 11 Oct 2023 20:30:02 +0200 Subject: [PATCH 1102/1543] Ruff pandas vet (#10281) * Python linting: Add ruff rules for Pandas-vet and Pytest-style * updating DIRECTORY.md --------- Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- .pre-commit-config.yaml | 2 +- DIRECTORY.md | 1 + blockchain/diophantine_equation.py | 6 +- ciphers/xor_cipher.py | 18 +++-- conversions/decimal_to_hexadecimal.py | 3 +- .../binary_search_tree_recursive.py | 28 ++++---- .../hashing/tests/test_hash_map.py | 4 +- .../linked_list/circular_linked_list.py | 6 +- .../test_digital_image_processing.py | 3 +- graphs/graph_adjacency_list.py | 60 ++++++++-------- graphs/graph_adjacency_matrix.py | 60 ++++++++-------- hashes/sha256.py | 2 +- knapsack/tests/test_greedy_knapsack.py | 16 ++--- knapsack/tests/test_knapsack.py | 8 +-- linear_algebra/src/lib.py | 11 ++- linear_algebra/src/schur_complement.py | 7 +- linear_algebra/src/test_linear_algebra.py | 69 ++++++++++--------- machine_learning/dimensionality_reduction.py | 4 +- machine_learning/k_means_clust.py | 69 +++++++++---------- maths/least_common_multiple.py | 4 +- maths/modular_division.py | 10 ++- maths/prime_check.py | 48 ++++++------- matrix/sherman_morrison.py | 6 +- matrix/tests/test_matrix_operation.py | 28 ++++---- project_euler/problem_054/test_poker_hand.py | 14 ++-- pyproject.toml | 8 ++- strings/knuth_morris_pratt.py | 3 +- strings/rabin_karp.py | 3 +- 28 files changed, 260 insertions(+), 241 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 7340a0fd08ee..84f4a7770d00 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -51,7 +51,7 @@ repos: - id: validate-pyproject - repo: https://github.com/pre-commit/mirrors-mypy - rev: v1.5.1 + rev: v1.6.0 hooks: - id: mypy args: diff --git a/DIRECTORY.md b/DIRECTORY.md index 015efb3c796d..2c6000c94ed4 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -532,6 +532,7 @@ * [Logistic Regression](machine_learning/logistic_regression.py) * Loss Functions * [Binary Cross Entropy](machine_learning/loss_functions/binary_cross_entropy.py) + * [Categorical Cross Entropy](machine_learning/loss_functions/categorical_cross_entropy.py) * [Huber Loss](machine_learning/loss_functions/huber_loss.py) * [Mean Squared Error](machine_learning/loss_functions/mean_squared_error.py) * [Mfcc](machine_learning/mfcc.py) diff --git a/blockchain/diophantine_equation.py b/blockchain/diophantine_equation.py index 7110d90230c9..ae6a145d2922 100644 --- a/blockchain/diophantine_equation.py +++ b/blockchain/diophantine_equation.py @@ -83,7 +83,8 @@ def extended_gcd(a: int, b: int) -> tuple[int, int, int]: (1, -2, 3) """ - assert a >= 0 and b >= 0 + assert a >= 0 + assert b >= 0 if b == 0: d, x, y = a, 1, 0 @@ -92,7 +93,8 @@ def extended_gcd(a: int, b: int) -> tuple[int, int, int]: x = q y = p - q * (a // b) - assert a % d == 0 and b % d == 0 + assert a % d == 0 + assert b % d == 0 assert d == a * x + b * y return (d, x, y) diff --git a/ciphers/xor_cipher.py b/ciphers/xor_cipher.py index 0f369e38f85f..559036d305c5 100644 --- a/ciphers/xor_cipher.py +++ b/ciphers/xor_cipher.py @@ -38,7 +38,8 @@ def encrypt(self, content: str, key: int) -> list[str]: """ # precondition - assert isinstance(key, int) and isinstance(content, str) + assert isinstance(key, int) + assert isinstance(content, str) key = key or self.__key or 1 @@ -56,7 +57,8 @@ def decrypt(self, content: str, key: int) -> list[str]: """ # precondition - assert isinstance(key, int) and isinstance(content, list) + assert isinstance(key, int) + assert isinstance(content, list) key = key or self.__key or 1 @@ -74,7 +76,8 @@ def encrypt_string(self, content: str, key: int = 0) -> str: """ # precondition - assert isinstance(key, int) and isinstance(content, str) + assert isinstance(key, int) + assert isinstance(content, str) key = key or self.__key or 1 @@ -99,7 +102,8 @@ def decrypt_string(self, content: str, key: int = 0) -> str: """ # precondition - assert isinstance(key, int) and isinstance(content, str) + assert isinstance(key, int) + assert isinstance(content, str) key = key or self.__key or 1 @@ -125,7 +129,8 @@ def encrypt_file(self, file: str, key: int = 0) -> bool: """ # precondition - assert isinstance(file, str) and isinstance(key, int) + assert isinstance(file, str) + assert isinstance(key, int) try: with open(file) as fin, open("encrypt.out", "w+") as fout: @@ -148,7 +153,8 @@ def decrypt_file(self, file: str, key: int) -> bool: """ # precondition - assert isinstance(file, str) and isinstance(key, int) + assert isinstance(file, str) + assert isinstance(key, int) try: with open(file) as fin, open("decrypt.out", "w+") as fout: diff --git a/conversions/decimal_to_hexadecimal.py b/conversions/decimal_to_hexadecimal.py index 5ea48401f488..b1fb4f082242 100644 --- a/conversions/decimal_to_hexadecimal.py +++ b/conversions/decimal_to_hexadecimal.py @@ -57,7 +57,8 @@ def decimal_to_hexadecimal(decimal: float) -> str: >>> decimal_to_hexadecimal(-256) == hex(-256) True """ - assert type(decimal) in (int, float) and decimal == int(decimal) + assert isinstance(decimal, (int, float)) + assert decimal == int(decimal) decimal = int(decimal) hexadecimal = "" negative = False diff --git a/data_structures/binary_tree/binary_search_tree_recursive.py b/data_structures/binary_tree/binary_search_tree_recursive.py index b5b983b9ba4c..13b9b392175c 100644 --- a/data_structures/binary_tree/binary_search_tree_recursive.py +++ b/data_structures/binary_tree/binary_search_tree_recursive.py @@ -12,6 +12,8 @@ import unittest from collections.abc import Iterator +import pytest + class Node: def __init__(self, label: int, parent: Node | None) -> None: @@ -78,7 +80,7 @@ def _put(self, node: Node | None, label: int, parent: Node | None = None) -> Nod node.right = self._put(node.right, label, node) else: msg = f"Node with label {label} already exists" - raise Exception(msg) + raise ValueError(msg) return node @@ -95,14 +97,14 @@ def search(self, label: int) -> Node: >>> node = t.search(3) Traceback (most recent call last): ... - Exception: Node with label 3 does not exist + ValueError: Node with label 3 does not exist """ return self._search(self.root, label) def _search(self, node: Node | None, label: int) -> Node: if node is None: msg = f"Node with label {label} does not exist" - raise Exception(msg) + raise ValueError(msg) else: if label < node.label: node = self._search(node.left, label) @@ -124,7 +126,7 @@ def remove(self, label: int) -> None: >>> t.remove(3) Traceback (most recent call last): ... - Exception: Node with label 3 does not exist + ValueError: Node with label 3 does not exist """ node = self.search(label) if node.right and node.left: @@ -179,7 +181,7 @@ def exists(self, label: int) -> bool: try: self.search(label) return True - except Exception: + except ValueError: return False def get_max_label(self) -> int: @@ -190,7 +192,7 @@ def get_max_label(self) -> int: >>> t.get_max_label() Traceback (most recent call last): ... - Exception: Binary search tree is empty + ValueError: Binary search tree is empty >>> t.put(8) >>> t.put(10) @@ -198,7 +200,7 @@ def get_max_label(self) -> int: 10 """ if self.root is None: - raise Exception("Binary search tree is empty") + raise ValueError("Binary search tree is empty") node = self.root while node.right is not None: @@ -214,7 +216,7 @@ def get_min_label(self) -> int: >>> t.get_min_label() Traceback (most recent call last): ... - Exception: Binary search tree is empty + ValueError: Binary search tree is empty >>> t.put(8) >>> t.put(10) @@ -222,7 +224,7 @@ def get_min_label(self) -> int: 8 """ if self.root is None: - raise Exception("Binary search tree is empty") + raise ValueError("Binary search tree is empty") node = self.root while node.left is not None: @@ -359,7 +361,7 @@ def test_put(self) -> None: assert t.root.left.left.parent == t.root.left assert t.root.left.left.label == 1 - with self.assertRaises(Exception): # noqa: B017 + with pytest.raises(ValueError): t.put(1) def test_search(self) -> None: @@ -371,7 +373,7 @@ def test_search(self) -> None: node = t.search(13) assert node.label == 13 - with self.assertRaises(Exception): # noqa: B017 + with pytest.raises(ValueError): t.search(2) def test_remove(self) -> None: @@ -517,7 +519,7 @@ def test_get_max_label(self) -> None: assert t.get_max_label() == 14 t.empty() - with self.assertRaises(Exception): # noqa: B017 + with pytest.raises(ValueError): t.get_max_label() def test_get_min_label(self) -> None: @@ -526,7 +528,7 @@ def test_get_min_label(self) -> None: assert t.get_min_label() == 1 t.empty() - with self.assertRaises(Exception): # noqa: B017 + with pytest.raises(ValueError): t.get_min_label() def test_inorder_traversal(self) -> None: diff --git a/data_structures/hashing/tests/test_hash_map.py b/data_structures/hashing/tests/test_hash_map.py index 929e67311996..4292c0178b7b 100644 --- a/data_structures/hashing/tests/test_hash_map.py +++ b/data_structures/hashing/tests/test_hash_map.py @@ -65,14 +65,14 @@ def _run_operation(obj, fun, *args): @pytest.mark.parametrize( "operations", - ( + [ pytest.param(_add_items, id="add items"), pytest.param(_overwrite_items, id="overwrite items"), pytest.param(_delete_items, id="delete items"), pytest.param(_access_absent_items, id="access absent items"), pytest.param(_add_with_resize_up, id="add with resize up"), pytest.param(_add_with_resize_down, id="add with resize down"), - ), + ], ) def test_hash_map_is_the_same_as_dict(operations): my = HashMap(initial_block_size=4) diff --git a/data_structures/linked_list/circular_linked_list.py b/data_structures/linked_list/circular_linked_list.py index ef6658733a95..54343c80a30f 100644 --- a/data_structures/linked_list/circular_linked_list.py +++ b/data_structures/linked_list/circular_linked_list.py @@ -124,7 +124,8 @@ def delete_nth(self, index: int = 0) -> Any: if not 0 <= index < len(self): raise IndexError("list index out of range.") - assert self.head is not None and self.tail is not None + assert self.head is not None + assert self.tail is not None delete_node: Node = self.head if self.head == self.tail: # Just one node self.head = self.tail = None @@ -137,7 +138,8 @@ def delete_nth(self, index: int = 0) -> Any: for _ in range(index - 1): assert temp is not None temp = temp.next - assert temp is not None and temp.next is not None + assert temp is not None + assert temp.next is not None delete_node = temp.next temp.next = temp.next.next if index == len(self) - 1: # Delete at tail diff --git a/digital_image_processing/test_digital_image_processing.py b/digital_image_processing/test_digital_image_processing.py index 2e5630458c8e..7993110d6bdd 100644 --- a/digital_image_processing/test_digital_image_processing.py +++ b/digital_image_processing/test_digital_image_processing.py @@ -73,7 +73,8 @@ def test_median_filter(): def test_sobel_filter(): grad, theta = sob.sobel_filter(gray) - assert grad.any() and theta.any() + assert grad.any() + assert theta.any() def test_sepia(): diff --git a/graphs/graph_adjacency_list.py b/graphs/graph_adjacency_list.py index 76f34f845860..d0b94f03e9b4 100644 --- a/graphs/graph_adjacency_list.py +++ b/graphs/graph_adjacency_list.py @@ -22,6 +22,8 @@ from pprint import pformat from typing import Generic, TypeVar +import pytest + T = TypeVar("T") @@ -185,9 +187,9 @@ def __assert_graph_edge_exists_check( directed_graph: GraphAdjacencyList, edge: list[int], ) -> None: - self.assertTrue(undirected_graph.contains_edge(edge[0], edge[1])) - self.assertTrue(undirected_graph.contains_edge(edge[1], edge[0])) - self.assertTrue(directed_graph.contains_edge(edge[0], edge[1])) + assert undirected_graph.contains_edge(edge[0], edge[1]) + assert undirected_graph.contains_edge(edge[1], edge[0]) + assert directed_graph.contains_edge(edge[0], edge[1]) def __assert_graph_edge_does_not_exist_check( self, @@ -195,9 +197,9 @@ def __assert_graph_edge_does_not_exist_check( directed_graph: GraphAdjacencyList, edge: list[int], ) -> None: - self.assertFalse(undirected_graph.contains_edge(edge[0], edge[1])) - self.assertFalse(undirected_graph.contains_edge(edge[1], edge[0])) - self.assertFalse(directed_graph.contains_edge(edge[0], edge[1])) + assert not undirected_graph.contains_edge(edge[0], edge[1]) + assert not undirected_graph.contains_edge(edge[1], edge[0]) + assert not directed_graph.contains_edge(edge[0], edge[1]) def __assert_graph_vertex_exists_check( self, @@ -205,8 +207,8 @@ def __assert_graph_vertex_exists_check( directed_graph: GraphAdjacencyList, vertex: int, ) -> None: - self.assertTrue(undirected_graph.contains_vertex(vertex)) - self.assertTrue(directed_graph.contains_vertex(vertex)) + assert undirected_graph.contains_vertex(vertex) + assert directed_graph.contains_vertex(vertex) def __assert_graph_vertex_does_not_exist_check( self, @@ -214,13 +216,13 @@ def __assert_graph_vertex_does_not_exist_check( directed_graph: GraphAdjacencyList, vertex: int, ) -> None: - self.assertFalse(undirected_graph.contains_vertex(vertex)) - self.assertFalse(directed_graph.contains_vertex(vertex)) + assert not undirected_graph.contains_vertex(vertex) + assert not directed_graph.contains_vertex(vertex) def __generate_random_edges( self, vertices: list[int], edge_pick_count: int ) -> list[list[int]]: - self.assertTrue(edge_pick_count <= len(vertices)) + assert edge_pick_count <= len(vertices) random_source_vertices: list[int] = random.sample( vertices[0 : int(len(vertices) / 2)], edge_pick_count @@ -281,8 +283,8 @@ def test_init_check(self) -> None: self.__assert_graph_edge_exists_check( undirected_graph, directed_graph, edge ) - self.assertFalse(undirected_graph.directed) - self.assertTrue(directed_graph.directed) + assert not undirected_graph.directed + assert directed_graph.directed def test_contains_vertex(self) -> None: random_vertices: list[int] = random.sample(range(101), 20) @@ -297,12 +299,8 @@ def test_contains_vertex(self) -> None: # Test contains_vertex for num in range(101): - self.assertEqual( - num in random_vertices, undirected_graph.contains_vertex(num) - ) - self.assertEqual( - num in random_vertices, directed_graph.contains_vertex(num) - ) + assert (num in random_vertices) == undirected_graph.contains_vertex(num) + assert (num in random_vertices) == directed_graph.contains_vertex(num) def test_add_vertices(self) -> None: random_vertices: list[int] = random.sample(range(101), 20) @@ -507,9 +505,9 @@ def test_add_vertex_exception_check(self) -> None: ) = self.__generate_graphs(20, 0, 100, 4) for vertex in random_vertices: - with self.assertRaises(ValueError): + with pytest.raises(ValueError): undirected_graph.add_vertex(vertex) - with self.assertRaises(ValueError): + with pytest.raises(ValueError): directed_graph.add_vertex(vertex) def test_remove_vertex_exception_check(self) -> None: @@ -522,9 +520,9 @@ def test_remove_vertex_exception_check(self) -> None: for i in range(101): if i not in random_vertices: - with self.assertRaises(ValueError): + with pytest.raises(ValueError): undirected_graph.remove_vertex(i) - with self.assertRaises(ValueError): + with pytest.raises(ValueError): directed_graph.remove_vertex(i) def test_add_edge_exception_check(self) -> None: @@ -536,9 +534,9 @@ def test_add_edge_exception_check(self) -> None: ) = self.__generate_graphs(20, 0, 100, 4) for edge in random_edges: - with self.assertRaises(ValueError): + with pytest.raises(ValueError): undirected_graph.add_edge(edge[0], edge[1]) - with self.assertRaises(ValueError): + with pytest.raises(ValueError): directed_graph.add_edge(edge[0], edge[1]) def test_remove_edge_exception_check(self) -> None: @@ -560,9 +558,9 @@ def test_remove_edge_exception_check(self) -> None: more_random_edges.append(edge) for edge in more_random_edges: - with self.assertRaises(ValueError): + with pytest.raises(ValueError): undirected_graph.remove_edge(edge[0], edge[1]) - with self.assertRaises(ValueError): + with pytest.raises(ValueError): directed_graph.remove_edge(edge[0], edge[1]) def test_contains_edge_exception_check(self) -> None: @@ -574,14 +572,14 @@ def test_contains_edge_exception_check(self) -> None: ) = self.__generate_graphs(20, 0, 100, 4) for vertex in random_vertices: - with self.assertRaises(ValueError): + with pytest.raises(ValueError): undirected_graph.contains_edge(vertex, 102) - with self.assertRaises(ValueError): + with pytest.raises(ValueError): directed_graph.contains_edge(vertex, 102) - with self.assertRaises(ValueError): + with pytest.raises(ValueError): undirected_graph.contains_edge(103, 102) - with self.assertRaises(ValueError): + with pytest.raises(ValueError): directed_graph.contains_edge(103, 102) diff --git a/graphs/graph_adjacency_matrix.py b/graphs/graph_adjacency_matrix.py index 4d2e02f737f9..cdef388d9098 100644 --- a/graphs/graph_adjacency_matrix.py +++ b/graphs/graph_adjacency_matrix.py @@ -22,6 +22,8 @@ from pprint import pformat from typing import Generic, TypeVar +import pytest + T = TypeVar("T") @@ -203,9 +205,9 @@ def __assert_graph_edge_exists_check( directed_graph: GraphAdjacencyMatrix, edge: list[int], ) -> None: - self.assertTrue(undirected_graph.contains_edge(edge[0], edge[1])) - self.assertTrue(undirected_graph.contains_edge(edge[1], edge[0])) - self.assertTrue(directed_graph.contains_edge(edge[0], edge[1])) + assert undirected_graph.contains_edge(edge[0], edge[1]) + assert undirected_graph.contains_edge(edge[1], edge[0]) + assert directed_graph.contains_edge(edge[0], edge[1]) def __assert_graph_edge_does_not_exist_check( self, @@ -213,9 +215,9 @@ def __assert_graph_edge_does_not_exist_check( directed_graph: GraphAdjacencyMatrix, edge: list[int], ) -> None: - self.assertFalse(undirected_graph.contains_edge(edge[0], edge[1])) - self.assertFalse(undirected_graph.contains_edge(edge[1], edge[0])) - self.assertFalse(directed_graph.contains_edge(edge[0], edge[1])) + assert not undirected_graph.contains_edge(edge[0], edge[1]) + assert not undirected_graph.contains_edge(edge[1], edge[0]) + assert not directed_graph.contains_edge(edge[0], edge[1]) def __assert_graph_vertex_exists_check( self, @@ -223,8 +225,8 @@ def __assert_graph_vertex_exists_check( directed_graph: GraphAdjacencyMatrix, vertex: int, ) -> None: - self.assertTrue(undirected_graph.contains_vertex(vertex)) - self.assertTrue(directed_graph.contains_vertex(vertex)) + assert undirected_graph.contains_vertex(vertex) + assert directed_graph.contains_vertex(vertex) def __assert_graph_vertex_does_not_exist_check( self, @@ -232,13 +234,13 @@ def __assert_graph_vertex_does_not_exist_check( directed_graph: GraphAdjacencyMatrix, vertex: int, ) -> None: - self.assertFalse(undirected_graph.contains_vertex(vertex)) - self.assertFalse(directed_graph.contains_vertex(vertex)) + assert not undirected_graph.contains_vertex(vertex) + assert not directed_graph.contains_vertex(vertex) def __generate_random_edges( self, vertices: list[int], edge_pick_count: int ) -> list[list[int]]: - self.assertTrue(edge_pick_count <= len(vertices)) + assert edge_pick_count <= len(vertices) random_source_vertices: list[int] = random.sample( vertices[0 : int(len(vertices) / 2)], edge_pick_count @@ -300,8 +302,8 @@ def test_init_check(self) -> None: undirected_graph, directed_graph, edge ) - self.assertFalse(undirected_graph.directed) - self.assertTrue(directed_graph.directed) + assert not undirected_graph.directed + assert directed_graph.directed def test_contains_vertex(self) -> None: random_vertices: list[int] = random.sample(range(101), 20) @@ -316,12 +318,8 @@ def test_contains_vertex(self) -> None: # Test contains_vertex for num in range(101): - self.assertEqual( - num in random_vertices, undirected_graph.contains_vertex(num) - ) - self.assertEqual( - num in random_vertices, directed_graph.contains_vertex(num) - ) + assert (num in random_vertices) == undirected_graph.contains_vertex(num) + assert (num in random_vertices) == directed_graph.contains_vertex(num) def test_add_vertices(self) -> None: random_vertices: list[int] = random.sample(range(101), 20) @@ -526,9 +524,9 @@ def test_add_vertex_exception_check(self) -> None: ) = self.__generate_graphs(20, 0, 100, 4) for vertex in random_vertices: - with self.assertRaises(ValueError): + with pytest.raises(ValueError): undirected_graph.add_vertex(vertex) - with self.assertRaises(ValueError): + with pytest.raises(ValueError): directed_graph.add_vertex(vertex) def test_remove_vertex_exception_check(self) -> None: @@ -541,9 +539,9 @@ def test_remove_vertex_exception_check(self) -> None: for i in range(101): if i not in random_vertices: - with self.assertRaises(ValueError): + with pytest.raises(ValueError): undirected_graph.remove_vertex(i) - with self.assertRaises(ValueError): + with pytest.raises(ValueError): directed_graph.remove_vertex(i) def test_add_edge_exception_check(self) -> None: @@ -555,9 +553,9 @@ def test_add_edge_exception_check(self) -> None: ) = self.__generate_graphs(20, 0, 100, 4) for edge in random_edges: - with self.assertRaises(ValueError): + with pytest.raises(ValueError): undirected_graph.add_edge(edge[0], edge[1]) - with self.assertRaises(ValueError): + with pytest.raises(ValueError): directed_graph.add_edge(edge[0], edge[1]) def test_remove_edge_exception_check(self) -> None: @@ -579,9 +577,9 @@ def test_remove_edge_exception_check(self) -> None: more_random_edges.append(edge) for edge in more_random_edges: - with self.assertRaises(ValueError): + with pytest.raises(ValueError): undirected_graph.remove_edge(edge[0], edge[1]) - with self.assertRaises(ValueError): + with pytest.raises(ValueError): directed_graph.remove_edge(edge[0], edge[1]) def test_contains_edge_exception_check(self) -> None: @@ -593,14 +591,14 @@ def test_contains_edge_exception_check(self) -> None: ) = self.__generate_graphs(20, 0, 100, 4) for vertex in random_vertices: - with self.assertRaises(ValueError): + with pytest.raises(ValueError): undirected_graph.contains_edge(vertex, 102) - with self.assertRaises(ValueError): + with pytest.raises(ValueError): directed_graph.contains_edge(vertex, 102) - with self.assertRaises(ValueError): + with pytest.raises(ValueError): undirected_graph.contains_edge(103, 102) - with self.assertRaises(ValueError): + with pytest.raises(ValueError): directed_graph.contains_edge(103, 102) diff --git a/hashes/sha256.py b/hashes/sha256.py index ba9aff8dbf41..bcc83edca480 100644 --- a/hashes/sha256.py +++ b/hashes/sha256.py @@ -203,7 +203,7 @@ def test_match_hashes(self) -> None: import hashlib msg = bytes("Test String", "utf-8") - self.assertEqual(SHA256(msg).hash, hashlib.sha256(msg).hexdigest()) + assert SHA256(msg).hash == hashlib.sha256(msg).hexdigest() def main() -> None: diff --git a/knapsack/tests/test_greedy_knapsack.py b/knapsack/tests/test_greedy_knapsack.py index b7b62d5d80b4..e6a40084109e 100644 --- a/knapsack/tests/test_greedy_knapsack.py +++ b/knapsack/tests/test_greedy_knapsack.py @@ -1,5 +1,7 @@ import unittest +import pytest + from knapsack import greedy_knapsack as kp @@ -16,7 +18,7 @@ def test_sorted(self): profit = [10, 20, 30, 40, 50, 60] weight = [2, 4, 6, 8, 10, 12] max_weight = 100 - self.assertEqual(kp.calc_profit(profit, weight, max_weight), 210) + assert kp.calc_profit(profit, weight, max_weight) == 210 def test_negative_max_weight(self): """ @@ -26,7 +28,7 @@ def test_negative_max_weight(self): # profit = [10, 20, 30, 40, 50, 60] # weight = [2, 4, 6, 8, 10, 12] # max_weight = -15 - self.assertRaisesRegex(ValueError, "max_weight must greater than zero.") + pytest.raises(ValueError, match="max_weight must greater than zero.") def test_negative_profit_value(self): """ @@ -36,7 +38,7 @@ def test_negative_profit_value(self): # profit = [10, -20, 30, 40, 50, 60] # weight = [2, 4, 6, 8, 10, 12] # max_weight = 15 - self.assertRaisesRegex(ValueError, "Weight can not be negative.") + pytest.raises(ValueError, match="Weight can not be negative.") def test_negative_weight_value(self): """ @@ -46,7 +48,7 @@ def test_negative_weight_value(self): # profit = [10, 20, 30, 40, 50, 60] # weight = [2, -4, 6, -8, 10, 12] # max_weight = 15 - self.assertRaisesRegex(ValueError, "Profit can not be negative.") + pytest.raises(ValueError, match="Profit can not be negative.") def test_null_max_weight(self): """ @@ -56,7 +58,7 @@ def test_null_max_weight(self): # profit = [10, 20, 30, 40, 50, 60] # weight = [2, 4, 6, 8, 10, 12] # max_weight = null - self.assertRaisesRegex(ValueError, "max_weight must greater than zero.") + pytest.raises(ValueError, match="max_weight must greater than zero.") def test_unequal_list_length(self): """ @@ -66,9 +68,7 @@ def test_unequal_list_length(self): # profit = [10, 20, 30, 40, 50] # weight = [2, 4, 6, 8, 10, 12] # max_weight = 100 - self.assertRaisesRegex( - IndexError, "The length of profit and weight must be same." - ) + pytest.raises(IndexError, match="The length of profit and weight must be same.") if __name__ == "__main__": diff --git a/knapsack/tests/test_knapsack.py b/knapsack/tests/test_knapsack.py index 248855fbce53..6932bbb3536b 100644 --- a/knapsack/tests/test_knapsack.py +++ b/knapsack/tests/test_knapsack.py @@ -20,12 +20,12 @@ def test_base_case(self): val = [0] w = [0] c = len(val) - self.assertEqual(k.knapsack(cap, w, val, c), 0) + assert k.knapsack(cap, w, val, c) == 0 val = [60] w = [10] c = len(val) - self.assertEqual(k.knapsack(cap, w, val, c), 0) + assert k.knapsack(cap, w, val, c) == 0 def test_easy_case(self): """ @@ -35,7 +35,7 @@ def test_easy_case(self): val = [1, 2, 3] w = [3, 2, 1] c = len(val) - self.assertEqual(k.knapsack(cap, w, val, c), 5) + assert k.knapsack(cap, w, val, c) == 5 def test_knapsack(self): """ @@ -45,7 +45,7 @@ def test_knapsack(self): val = [60, 100, 120] w = [10, 20, 30] c = len(val) - self.assertEqual(k.knapsack(cap, w, val, c), 220) + assert k.knapsack(cap, w, val, c) == 220 if __name__ == "__main__": diff --git a/linear_algebra/src/lib.py b/linear_algebra/src/lib.py index e3556e74c3f3..5074faf31d1d 100644 --- a/linear_algebra/src/lib.py +++ b/linear_algebra/src/lib.py @@ -200,7 +200,8 @@ def unit_basis_vector(dimension: int, pos: int) -> Vector: at index 'pos' (indexing at 0) """ # precondition - assert isinstance(dimension, int) and (isinstance(pos, int)) + assert isinstance(dimension, int) + assert isinstance(pos, int) ans = [0] * dimension ans[pos] = 1 return Vector(ans) @@ -213,11 +214,9 @@ def axpy(scalar: float, x: Vector, y: Vector) -> Vector: computes the axpy operation """ # precondition - assert ( - isinstance(x, Vector) - and isinstance(y, Vector) - and (isinstance(scalar, (int, float))) - ) + assert isinstance(x, Vector) + assert isinstance(y, Vector) + assert isinstance(scalar, (int, float)) return x * scalar + y diff --git a/linear_algebra/src/schur_complement.py b/linear_algebra/src/schur_complement.py index 750f4de5e397..1cc084043856 100644 --- a/linear_algebra/src/schur_complement.py +++ b/linear_algebra/src/schur_complement.py @@ -1,6 +1,7 @@ import unittest import numpy as np +import pytest def schur_complement( @@ -70,14 +71,14 @@ def test_schur_complement(self) -> None: det_a = np.linalg.det(a) det_s = np.linalg.det(s) - self.assertAlmostEqual(det_x, det_a * det_s) + assert np.is_close(det_x, det_a * det_s) def test_improper_a_b_dimensions(self) -> None: a = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]]) b = np.array([[0, 3], [3, 0], [2, 3]]) c = np.array([[2, 1], [6, 3]]) - with self.assertRaises(ValueError): + with pytest.raises(ValueError): schur_complement(a, b, c) def test_improper_b_c_dimensions(self) -> None: @@ -85,7 +86,7 @@ def test_improper_b_c_dimensions(self) -> None: b = np.array([[0, 3], [3, 0], [2, 3]]) c = np.array([[2, 1, 3], [6, 3, 5]]) - with self.assertRaises(ValueError): + with pytest.raises(ValueError): schur_complement(a, b, c) diff --git a/linear_algebra/src/test_linear_algebra.py b/linear_algebra/src/test_linear_algebra.py index 50d079572e0f..95ab408b3d86 100644 --- a/linear_algebra/src/test_linear_algebra.py +++ b/linear_algebra/src/test_linear_algebra.py @@ -8,6 +8,8 @@ """ import unittest +import pytest + from .lib import ( Matrix, Vector, @@ -24,8 +26,8 @@ def test_component(self) -> None: test for method component() """ x = Vector([1, 2, 3]) - self.assertEqual(x.component(0), 1) - self.assertEqual(x.component(2), 3) + assert x.component(0) == 1 + assert x.component(2) == 3 _ = Vector() def test_str(self) -> None: @@ -33,14 +35,14 @@ def test_str(self) -> None: test for method toString() """ x = Vector([0, 0, 0, 0, 0, 1]) - self.assertEqual(str(x), "(0,0,0,0,0,1)") + assert str(x) == "(0,0,0,0,0,1)" def test_size(self) -> None: """ test for method size() """ x = Vector([1, 2, 3, 4]) - self.assertEqual(len(x), 4) + assert len(x) == 4 def test_euclidean_length(self) -> None: """ @@ -50,10 +52,10 @@ def test_euclidean_length(self) -> None: y = Vector([1, 2, 3, 4, 5]) z = Vector([0, 0, 0, 0, 0, 0, 0, 0, 0, 0]) w = Vector([1, -1, 1, -1, 2, -3, 4, -5]) - self.assertAlmostEqual(x.euclidean_length(), 2.236, 3) - self.assertAlmostEqual(y.euclidean_length(), 7.416, 3) - self.assertEqual(z.euclidean_length(), 0) - self.assertAlmostEqual(w.euclidean_length(), 7.616, 3) + assert x.euclidean_length() == pytest.approx(2.236, abs=1e-3) + assert y.euclidean_length() == pytest.approx(7.416, abs=1e-3) + assert z.euclidean_length() == 0 + assert w.euclidean_length() == pytest.approx(7.616, abs=1e-3) def test_add(self) -> None: """ @@ -61,9 +63,9 @@ def test_add(self) -> None: """ x = Vector([1, 2, 3]) y = Vector([1, 1, 1]) - self.assertEqual((x + y).component(0), 2) - self.assertEqual((x + y).component(1), 3) - self.assertEqual((x + y).component(2), 4) + assert (x + y).component(0) == 2 + assert (x + y).component(1) == 3 + assert (x + y).component(2) == 4 def test_sub(self) -> None: """ @@ -71,9 +73,9 @@ def test_sub(self) -> None: """ x = Vector([1, 2, 3]) y = Vector([1, 1, 1]) - self.assertEqual((x - y).component(0), 0) - self.assertEqual((x - y).component(1), 1) - self.assertEqual((x - y).component(2), 2) + assert (x - y).component(0) == 0 + assert (x - y).component(1) == 1 + assert (x - y).component(2) == 2 def test_mul(self) -> None: """ @@ -82,20 +84,20 @@ def test_mul(self) -> None: x = Vector([1, 2, 3]) a = Vector([2, -1, 4]) # for test of dot product b = Vector([1, -2, -1]) - self.assertEqual(str(x * 3.0), "(3.0,6.0,9.0)") - self.assertEqual((a * b), 0) + assert str(x * 3.0) == "(3.0,6.0,9.0)" + assert a * b == 0 def test_zero_vector(self) -> None: """ test for global function zero_vector() """ - self.assertEqual(str(zero_vector(10)).count("0"), 10) + assert str(zero_vector(10)).count("0") == 10 def test_unit_basis_vector(self) -> None: """ test for global function unit_basis_vector() """ - self.assertEqual(str(unit_basis_vector(3, 1)), "(0,1,0)") + assert str(unit_basis_vector(3, 1)) == "(0,1,0)" def test_axpy(self) -> None: """ @@ -103,7 +105,7 @@ def test_axpy(self) -> None: """ x = Vector([1, 2, 3]) y = Vector([1, 0, 1]) - self.assertEqual(str(axpy(2, x, y)), "(3,4,7)") + assert str(axpy(2, x, y)) == "(3,4,7)" def test_copy(self) -> None: """ @@ -111,7 +113,7 @@ def test_copy(self) -> None: """ x = Vector([1, 0, 0, 0, 0, 0]) y = x.copy() - self.assertEqual(str(x), str(y)) + assert str(x) == str(y) def test_change_component(self) -> None: """ @@ -120,14 +122,14 @@ def test_change_component(self) -> None: x = Vector([1, 0, 0]) x.change_component(0, 0) x.change_component(1, 1) - self.assertEqual(str(x), "(0,1,0)") + assert str(x) == "(0,1,0)" def test_str_matrix(self) -> None: """ test for Matrix method str() """ a = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]], 3, 3) - self.assertEqual("|1,2,3|\n|2,4,5|\n|6,7,8|\n", str(a)) + assert str(a) == "|1,2,3|\n|2,4,5|\n|6,7,8|\n" def test_minor(self) -> None: """ @@ -137,7 +139,7 @@ def test_minor(self) -> None: minors = [[-3, -14, -10], [-5, -10, -5], [-2, -1, 0]] for x in range(a.height()): for y in range(a.width()): - self.assertEqual(minors[x][y], a.minor(x, y)) + assert minors[x][y] == a.minor(x, y) def test_cofactor(self) -> None: """ @@ -147,14 +149,14 @@ def test_cofactor(self) -> None: cofactors = [[-3, 14, -10], [5, -10, 5], [-2, 1, 0]] for x in range(a.height()): for y in range(a.width()): - self.assertEqual(cofactors[x][y], a.cofactor(x, y)) + assert cofactors[x][y] == a.cofactor(x, y) def test_determinant(self) -> None: """ test for Matrix method determinant() """ a = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]], 3, 3) - self.assertEqual(-5, a.determinant()) + assert a.determinant() == -5 def test__mul__matrix(self) -> None: """ @@ -162,8 +164,8 @@ def test__mul__matrix(self) -> None: """ a = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]], 3, 3) x = Vector([1, 2, 3]) - self.assertEqual("(14,32,50)", str(a * x)) - self.assertEqual("|2,4,6|\n|8,10,12|\n|14,16,18|\n", str(a * 2)) + assert str(a * x) == "(14,32,50)" + assert str(a * 2) == "|2,4,6|\n|8,10,12|\n|14,16,18|\n" def test_change_component_matrix(self) -> None: """ @@ -171,14 +173,14 @@ def test_change_component_matrix(self) -> None: """ a = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]], 3, 3) a.change_component(0, 2, 5) - self.assertEqual("|1,2,5|\n|2,4,5|\n|6,7,8|\n", str(a)) + assert str(a) == "|1,2,5|\n|2,4,5|\n|6,7,8|\n" def test_component_matrix(self) -> None: """ test for Matrix method component() """ a = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]], 3, 3) - self.assertEqual(7, a.component(2, 1), 0.01) + assert a.component(2, 1) == 7, 0.01 def test__add__matrix(self) -> None: """ @@ -186,7 +188,7 @@ def test__add__matrix(self) -> None: """ a = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]], 3, 3) b = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]], 3, 3) - self.assertEqual("|2,4,10|\n|4,8,10|\n|12,14,18|\n", str(a + b)) + assert str(a + b) == "|2,4,10|\n|4,8,10|\n|12,14,18|\n" def test__sub__matrix(self) -> None: """ @@ -194,15 +196,14 @@ def test__sub__matrix(self) -> None: """ a = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]], 3, 3) b = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]], 3, 3) - self.assertEqual("|0,0,-4|\n|0,0,0|\n|0,0,-2|\n", str(a - b)) + assert str(a - b) == "|0,0,-4|\n|0,0,0|\n|0,0,-2|\n" def test_square_zero_matrix(self) -> None: """ test for global function square_zero_matrix() """ - self.assertEqual( - "|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n", - str(square_zero_matrix(5)), + assert str(square_zero_matrix(5)) == ( + "|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n" ) diff --git a/machine_learning/dimensionality_reduction.py b/machine_learning/dimensionality_reduction.py index d2046f81af04..50d442ecc3de 100644 --- a/machine_learning/dimensionality_reduction.py +++ b/machine_learning/dimensionality_reduction.py @@ -169,7 +169,7 @@ def test_linear_discriminant_analysis() -> None: dimensions = 2 # Assert that the function raises an AssertionError if dimensions > classes - with pytest.raises(AssertionError) as error_info: + with pytest.raises(AssertionError) as error_info: # noqa: PT012 projected_data = linear_discriminant_analysis( features, labels, classes, dimensions ) @@ -185,7 +185,7 @@ def test_principal_component_analysis() -> None: dimensions = 2 expected_output = np.array([[6.92820323, 8.66025404, 10.39230485], [3.0, 3.0, 3.0]]) - with pytest.raises(AssertionError) as error_info: + with pytest.raises(AssertionError) as error_info: # noqa: PT012 output = principal_component_analysis(features, dimensions) if not np.allclose(expected_output, output): raise AssertionError diff --git a/machine_learning/k_means_clust.py b/machine_learning/k_means_clust.py index 3fe151442e2e..ebad66ac8e8f 100644 --- a/machine_learning/k_means_clust.py +++ b/machine_learning/k_means_clust.py @@ -128,7 +128,7 @@ def plot_heterogeneity(heterogeneity, k): def kmeans( data, k, initial_centroids, maxiter=500, record_heterogeneity=None, verbose=False ): - """This function runs k-means on given data and initial set of centroids. + """Runs k-means on given data and initial set of centroids. maxiter: maximum number of iterations to run.(default=500) record_heterogeneity: (optional) a list, to store the history of heterogeneity as function of iterations @@ -195,20 +195,20 @@ def kmeans( def report_generator( - df: pd.DataFrame, clustering_variables: np.ndarray, fill_missing_report=None + predicted: pd.DataFrame, clustering_variables: np.ndarray, fill_missing_report=None ) -> pd.DataFrame: """ - Generates a clustering report. This function takes 2 arguments as input: - df - dataframe with predicted cluster column + Generate a clustering report given these two arguments: + predicted - dataframe with predicted cluster column fill_missing_report - dictionary of rules on how we are going to fill in missing values for final generated report (not included in modelling); - >>> data = pd.DataFrame() - >>> data['numbers'] = [1, 2, 3] - >>> data['col1'] = [0.5, 2.5, 4.5] - >>> data['col2'] = [100, 200, 300] - >>> data['col3'] = [10, 20, 30] - >>> data['Cluster'] = [1, 1, 2] - >>> report_generator(data, ['col1', 'col2'], 0) + >>> predicted = pd.DataFrame() + >>> predicted['numbers'] = [1, 2, 3] + >>> predicted['col1'] = [0.5, 2.5, 4.5] + >>> predicted['col2'] = [100, 200, 300] + >>> predicted['col3'] = [10, 20, 30] + >>> predicted['Cluster'] = [1, 1, 2] + >>> report_generator(predicted, ['col1', 'col2'], 0) Features Type Mark 1 2 0 # of Customers ClusterSize False 2.000000 1.000000 1 % of Customers ClusterProportion False 0.666667 0.333333 @@ -226,11 +226,11 @@ def report_generator( """ # Fill missing values with given rules if fill_missing_report: - df = df.fillna(value=fill_missing_report) - df["dummy"] = 1 - numeric_cols = df.select_dtypes(np.number).columns + predicted = predicted.fillna(value=fill_missing_report) + predicted["dummy"] = 1 + numeric_cols = predicted.select_dtypes(np.number).columns report = ( - df.groupby(["Cluster"])[ # construct report dataframe + predicted.groupby(["Cluster"])[ # construct report dataframe numeric_cols ] # group by cluster number .agg( @@ -267,46 +267,43 @@ def report_generator( .rename(index=str, columns={"level_0": "Features", "level_1": "Type"}) ) # rename columns # calculate the size of cluster(count of clientID's) + # avoid SettingWithCopyWarning clustersize = report[ (report["Features"] == "dummy") & (report["Type"] == "count") - ].copy() # avoid SettingWithCopyWarning - clustersize.Type = ( - "ClusterSize" # rename created cluster df to match report column names - ) + ].copy() + # rename created predicted cluster to match report column names + clustersize.Type = "ClusterSize" clustersize.Features = "# of Customers" + # calculating the proportion of cluster clusterproportion = pd.DataFrame( - clustersize.iloc[:, 2:].values - / clustersize.iloc[:, 2:].values.sum() # calculating the proportion of cluster + clustersize.iloc[:, 2:].to_numpy() / clustersize.iloc[:, 2:].to_numpy().sum() ) - clusterproportion[ - "Type" - ] = "% of Customers" # rename created cluster df to match report column names + # rename created predicted cluster to match report column names + clusterproportion["Type"] = "% of Customers" clusterproportion["Features"] = "ClusterProportion" cols = clusterproportion.columns.tolist() cols = cols[-2:] + cols[:-2] clusterproportion = clusterproportion[cols] # rearrange columns to match report clusterproportion.columns = report.columns + # generating dataframe with count of nan values a = pd.DataFrame( abs( - report[report["Type"] == "count"].iloc[:, 2:].values - - clustersize.iloc[:, 2:].values + report[report["Type"] == "count"].iloc[:, 2:].to_numpy() + - clustersize.iloc[:, 2:].to_numpy() ) - ) # generating df with count of nan values + ) a["Features"] = 0 a["Type"] = "# of nan" - a.Features = report[ - report["Type"] == "count" - ].Features.tolist() # filling values in order to match report + # filling values in order to match report + a.Features = report[report["Type"] == "count"].Features.tolist() cols = a.columns.tolist() cols = cols[-2:] + cols[:-2] a = a[cols] # rearrange columns to match report a.columns = report.columns # rename columns to match report - report = report.drop( - report[report.Type == "count"].index - ) # drop count values except for cluster size - report = pd.concat( - [report, a, clustersize, clusterproportion], axis=0 - ) # concat report with cluster size and nan values + # drop count values except for cluster size + report = report.drop(report[report.Type == "count"].index) + # concat report with cluster size and nan values + report = pd.concat([report, a, clustersize, clusterproportion], axis=0) report["Mark"] = report["Features"].isin(clustering_variables) cols = report.columns.tolist() cols = cols[0:2] + cols[-1:] + cols[2:-1] diff --git a/maths/least_common_multiple.py b/maths/least_common_multiple.py index 4f28da8ab2a7..a5c4bf8e3625 100644 --- a/maths/least_common_multiple.py +++ b/maths/least_common_multiple.py @@ -67,8 +67,8 @@ def test_lcm_function(self): slow_result = least_common_multiple_slow(first_num, second_num) fast_result = least_common_multiple_fast(first_num, second_num) with self.subTest(i=i): - self.assertEqual(slow_result, self.expected_results[i]) - self.assertEqual(fast_result, self.expected_results[i]) + assert slow_result == self.expected_results[i] + assert fast_result == self.expected_results[i] if __name__ == "__main__": diff --git a/maths/modular_division.py b/maths/modular_division.py index a9d0f65c5b27..260d5683705d 100644 --- a/maths/modular_division.py +++ b/maths/modular_division.py @@ -28,7 +28,9 @@ def modular_division(a: int, b: int, n: int) -> int: 4 """ - assert n > 1 and a > 0 and greatest_common_divisor(a, n) == 1 + assert n > 1 + assert a > 0 + assert greatest_common_divisor(a, n) == 1 (d, t, s) = extended_gcd(n, a) # Implemented below x = (b * s) % n return x @@ -86,7 +88,8 @@ def extended_gcd(a: int, b: int) -> tuple[int, int, int]: ** extended_gcd function is used when d = gcd(a,b) is required in output """ - assert a >= 0 and b >= 0 + assert a >= 0 + assert b >= 0 if b == 0: d, x, y = a, 1, 0 @@ -95,7 +98,8 @@ def extended_gcd(a: int, b: int) -> tuple[int, int, int]: x = q y = p - q * (a // b) - assert a % d == 0 and b % d == 0 + assert a % d == 0 + assert b % d == 0 assert d == a * x + b * y return (d, x, y) diff --git a/maths/prime_check.py b/maths/prime_check.py index 80ab8bc5d2cd..c17877a57705 100644 --- a/maths/prime_check.py +++ b/maths/prime_check.py @@ -3,6 +3,8 @@ import math import unittest +import pytest + def is_prime(number: int) -> bool: """Checks to see if a number is a prime in O(sqrt(n)). @@ -50,33 +52,31 @@ def is_prime(number: int) -> bool: class Test(unittest.TestCase): def test_primes(self): - self.assertTrue(is_prime(2)) - self.assertTrue(is_prime(3)) - self.assertTrue(is_prime(5)) - self.assertTrue(is_prime(7)) - self.assertTrue(is_prime(11)) - self.assertTrue(is_prime(13)) - self.assertTrue(is_prime(17)) - self.assertTrue(is_prime(19)) - self.assertTrue(is_prime(23)) - self.assertTrue(is_prime(29)) + assert is_prime(2) + assert is_prime(3) + assert is_prime(5) + assert is_prime(7) + assert is_prime(11) + assert is_prime(13) + assert is_prime(17) + assert is_prime(19) + assert is_prime(23) + assert is_prime(29) def test_not_primes(self): - with self.assertRaises(AssertionError): + with pytest.raises(AssertionError): is_prime(-19) - self.assertFalse( - is_prime(0), - "Zero doesn't have any positive factors, primes must have exactly two.", - ) - self.assertFalse( - is_prime(1), - "One only has 1 positive factor, primes must have exactly two.", - ) - self.assertFalse(is_prime(2 * 2)) - self.assertFalse(is_prime(2 * 3)) - self.assertFalse(is_prime(3 * 3)) - self.assertFalse(is_prime(3 * 5)) - self.assertFalse(is_prime(3 * 5 * 7)) + assert not is_prime( + 0 + ), "Zero doesn't have any positive factors, primes must have exactly two." + assert not is_prime( + 1 + ), "One only has 1 positive factor, primes must have exactly two." + assert not is_prime(2 * 2) + assert not is_prime(2 * 3) + assert not is_prime(3 * 3) + assert not is_prime(3 * 5) + assert not is_prime(3 * 5 * 7) if __name__ == "__main__": diff --git a/matrix/sherman_morrison.py b/matrix/sherman_morrison.py index b6e50f70fdcf..7f10ae706e85 100644 --- a/matrix/sherman_morrison.py +++ b/matrix/sherman_morrison.py @@ -114,7 +114,8 @@ def __add__(self, another: Matrix) -> Matrix: # Validation assert isinstance(another, Matrix) - assert self.row == another.row and self.column == another.column + assert self.row == another.row + assert self.column == another.column # Add result = Matrix(self.row, self.column) @@ -225,7 +226,8 @@ def sherman_morrison(self, u: Matrix, v: Matrix) -> Any: """ # Size validation - assert isinstance(u, Matrix) and isinstance(v, Matrix) + assert isinstance(u, Matrix) + assert isinstance(v, Matrix) assert self.row == self.column == u.row == v.row # u, v should be column vector assert u.column == v.column == 1 # u, v should be column vector diff --git a/matrix/tests/test_matrix_operation.py b/matrix/tests/test_matrix_operation.py index 65b35fd7e78b..638f97daa2ed 100644 --- a/matrix/tests/test_matrix_operation.py +++ b/matrix/tests/test_matrix_operation.py @@ -31,14 +31,14 @@ logger.addHandler(stream_handler) -@pytest.mark.mat_ops +@pytest.mark.mat_ops() @pytest.mark.parametrize( ("mat1", "mat2"), [(mat_a, mat_b), (mat_c, mat_d), (mat_d, mat_e), (mat_f, mat_h)] ) def test_addition(mat1, mat2): if (np.array(mat1)).shape < (2, 2) or (np.array(mat2)).shape < (2, 2): + logger.info(f"\n\t{test_addition.__name__} returned integer") with pytest.raises(TypeError): - logger.info(f"\n\t{test_addition.__name__} returned integer") matop.add(mat1, mat2) elif (np.array(mat1)).shape == (np.array(mat2)).shape: logger.info(f"\n\t{test_addition.__name__} with same matrix dims") @@ -46,19 +46,19 @@ def test_addition(mat1, mat2): theo = matop.add(mat1, mat2) assert theo == act else: + logger.info(f"\n\t{test_addition.__name__} with different matrix dims") with pytest.raises(ValueError): - logger.info(f"\n\t{test_addition.__name__} with different matrix dims") matop.add(mat1, mat2) -@pytest.mark.mat_ops +@pytest.mark.mat_ops() @pytest.mark.parametrize( ("mat1", "mat2"), [(mat_a, mat_b), (mat_c, mat_d), (mat_d, mat_e), (mat_f, mat_h)] ) def test_subtraction(mat1, mat2): if (np.array(mat1)).shape < (2, 2) or (np.array(mat2)).shape < (2, 2): + logger.info(f"\n\t{test_subtraction.__name__} returned integer") with pytest.raises(TypeError): - logger.info(f"\n\t{test_subtraction.__name__} returned integer") matop.subtract(mat1, mat2) elif (np.array(mat1)).shape == (np.array(mat2)).shape: logger.info(f"\n\t{test_subtraction.__name__} with same matrix dims") @@ -66,12 +66,12 @@ def test_subtraction(mat1, mat2): theo = matop.subtract(mat1, mat2) assert theo == act else: + logger.info(f"\n\t{test_subtraction.__name__} with different matrix dims") with pytest.raises(ValueError): - logger.info(f"\n\t{test_subtraction.__name__} with different matrix dims") assert matop.subtract(mat1, mat2) -@pytest.mark.mat_ops +@pytest.mark.mat_ops() @pytest.mark.parametrize( ("mat1", "mat2"), [(mat_a, mat_b), (mat_c, mat_d), (mat_d, mat_e), (mat_f, mat_h)] ) @@ -86,33 +86,33 @@ def test_multiplication(mat1, mat2): theo = matop.multiply(mat1, mat2) assert theo == act else: + logger.info( + f"\n\t{test_multiplication.__name__} does not meet dim requirements" + ) with pytest.raises(ValueError): - logger.info( - f"\n\t{test_multiplication.__name__} does not meet dim requirements" - ) assert matop.subtract(mat1, mat2) -@pytest.mark.mat_ops +@pytest.mark.mat_ops() def test_scalar_multiply(): act = (3.5 * np.array(mat_a)).tolist() theo = matop.scalar_multiply(mat_a, 3.5) assert theo == act -@pytest.mark.mat_ops +@pytest.mark.mat_ops() def test_identity(): act = (np.identity(5)).tolist() theo = matop.identity(5) assert theo == act -@pytest.mark.mat_ops +@pytest.mark.mat_ops() @pytest.mark.parametrize("mat", [mat_a, mat_b, mat_c, mat_d, mat_e, mat_f]) def test_transpose(mat): if (np.array(mat)).shape < (2, 2): + logger.info(f"\n\t{test_transpose.__name__} returned integer") with pytest.raises(TypeError): - logger.info(f"\n\t{test_transpose.__name__} returned integer") matop.transpose(mat) else: act = (np.transpose(mat)).tolist() diff --git a/project_euler/problem_054/test_poker_hand.py b/project_euler/problem_054/test_poker_hand.py index 5735bfc37947..ba5e0c8a2643 100644 --- a/project_euler/problem_054/test_poker_hand.py +++ b/project_euler/problem_054/test_poker_hand.py @@ -147,39 +147,39 @@ def generate_random_hands(number_of_hands: int = 100): return (generate_random_hand() for _ in range(number_of_hands)) -@pytest.mark.parametrize("hand, expected", TEST_FLUSH) +@pytest.mark.parametrize(("hand", "expected"), TEST_FLUSH) def test_hand_is_flush(hand, expected): assert PokerHand(hand)._is_flush() == expected -@pytest.mark.parametrize("hand, expected", TEST_STRAIGHT) +@pytest.mark.parametrize(("hand", "expected"), TEST_STRAIGHT) def test_hand_is_straight(hand, expected): assert PokerHand(hand)._is_straight() == expected -@pytest.mark.parametrize("hand, expected, card_values", TEST_FIVE_HIGH_STRAIGHT) +@pytest.mark.parametrize(("hand", "expected", "card_values"), TEST_FIVE_HIGH_STRAIGHT) def test_hand_is_five_high_straight(hand, expected, card_values): player = PokerHand(hand) assert player._is_five_high_straight() == expected assert player._card_values == card_values -@pytest.mark.parametrize("hand, expected", TEST_KIND) +@pytest.mark.parametrize(("hand", "expected"), TEST_KIND) def test_hand_is_same_kind(hand, expected): assert PokerHand(hand)._is_same_kind() == expected -@pytest.mark.parametrize("hand, expected", TEST_TYPES) +@pytest.mark.parametrize(("hand", "expected"), TEST_TYPES) def test_hand_values(hand, expected): assert PokerHand(hand)._hand_type == expected -@pytest.mark.parametrize("hand, other, expected", TEST_COMPARE) +@pytest.mark.parametrize(("hand", "other", "expected"), TEST_COMPARE) def test_compare_simple(hand, other, expected): assert PokerHand(hand).compare_with(PokerHand(other)) == expected -@pytest.mark.parametrize("hand, other, expected", generate_random_hands()) +@pytest.mark.parametrize(("hand", "other", "expected"), generate_random_hands()) def test_compare_random(hand, other, expected): assert PokerHand(hand).compare_with(PokerHand(other)) == expected diff --git a/pyproject.toml b/pyproject.toml index 75da7a04513e..fe5f2f09c4ec 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -19,6 +19,8 @@ ignore = [ # `ruff rule S101` for a description of that rule "PLW0120", # `else` clause on loop without a `break` statement -- FIX ME "PLW060", # Using global for `{name}` but no assignment is done -- DO NOT FIX "PLW2901", # PLW2901: Redefined loop variable -- FIX ME + "PT011", # `pytest.raises(Exception)` is too broad, set the `match` parameter or use a more specific exception + "PT018", # Assertion should be broken down into multiple parts "RUF00", # Ambiguous unicode character and other rules "RUF100", # Unused `noqa` directive -- FIX ME "S101", # Use of `assert` detected -- DO NOT FIX @@ -37,6 +39,7 @@ select = [ # https://beta.ruff.rs/docs/rules "BLE", # flake8-blind-except "C4", # flake8-comprehensions "C90", # McCabe cyclomatic complexity + "DJ", # flake8-django "DTZ", # flake8-datetimez "E", # pycodestyle "EM", # flake8-errmsg @@ -52,9 +55,11 @@ select = [ # https://beta.ruff.rs/docs/rules "ISC", # flake8-implicit-str-concat "N", # pep8-naming "NPY", # NumPy-specific rules + "PD", # pandas-vet "PGH", # pygrep-hooks "PIE", # flake8-pie "PL", # Pylint + "PT", # flake8-pytest-style "PYI", # flake8-pyi "RSE", # flake8-raise "RUF", # Ruff-specific rules @@ -70,11 +75,8 @@ select = [ # https://beta.ruff.rs/docs/rules # "ANN", # flake8-annotations # FIX ME? # "COM", # flake8-commas # "D", # pydocstyle -- FIX ME? - # "DJ", # flake8-django # "ERA", # eradicate -- DO NOT FIX # "FBT", # flake8-boolean-trap # FIX ME - # "PD", # pandas-vet - # "PT", # flake8-pytest-style # "PTH", # flake8-use-pathlib # FIX ME # "Q", # flake8-quotes # "RET", # flake8-return # FIX ME? diff --git a/strings/knuth_morris_pratt.py b/strings/knuth_morris_pratt.py index 8a04eb2532c0..5120779c571e 100644 --- a/strings/knuth_morris_pratt.py +++ b/strings/knuth_morris_pratt.py @@ -71,7 +71,8 @@ def get_failure_array(pattern: str) -> list[int]: pattern = "abc1abc12" text1 = "alskfjaldsabc1abc1abc12k23adsfabcabc" text2 = "alskfjaldsk23adsfabcabc" - assert knuth_morris_pratt(text1, pattern) and knuth_morris_pratt(text2, pattern) + assert knuth_morris_pratt(text1, pattern) + assert knuth_morris_pratt(text2, pattern) # Test 2) pattern = "ABABX" diff --git a/strings/rabin_karp.py b/strings/rabin_karp.py index 532c689f8a97..9c0d0fe5c739 100644 --- a/strings/rabin_karp.py +++ b/strings/rabin_karp.py @@ -60,7 +60,8 @@ def test_rabin_karp() -> None: pattern = "abc1abc12" text1 = "alskfjaldsabc1abc1abc12k23adsfabcabc" text2 = "alskfjaldsk23adsfabcabc" - assert rabin_karp(pattern, text1) and not rabin_karp(pattern, text2) + assert rabin_karp(pattern, text1) + assert not rabin_karp(pattern, text2) # Test 2) pattern = "ABABX" From 92fbe60082b782d8b85e9667bd6d7832b5383fa3 Mon Sep 17 00:00:00 2001 From: Vipin Karthic <143083087+vipinkarthic@users.noreply.github.com> Date: Thu, 12 Oct 2023 00:35:24 +0530 Subject: [PATCH 1103/1543] Added doctests to carmichael_number.py (#10210) Co-authored-by: Tianyi Zheng --- maths/carmichael_number.py | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/maths/carmichael_number.py b/maths/carmichael_number.py index 08b5c70e8fe7..c73908545702 100644 --- a/maths/carmichael_number.py +++ b/maths/carmichael_number.py @@ -16,11 +16,9 @@ def power(x: int, y: int, mod: int) -> int: """ - Examples: >>> power(2, 15, 3) 2 - >>> power(5, 1, 30) 5 """ @@ -36,14 +34,19 @@ def power(x: int, y: int, mod: int) -> int: def is_carmichael_number(n: int) -> bool: """ - Examples: - >>> is_carmichael_number(562) + >>> is_carmichael_number(4) False - >>> is_carmichael_number(561) True - + >>> is_carmichael_number(562) + False + >>> is_carmichael_number(900) + False + >>> is_carmichael_number(1105) + True + >>> is_carmichael_number(8911) + True >>> is_carmichael_number(5.1) Traceback (most recent call last): ... From 09ce6b23d7529aa0e02a6b5cfef1a9b831a3c9ad Mon Sep 17 00:00:00 2001 From: Siddharth Warrier <117698635+siddwarr@users.noreply.github.com> Date: Thu, 12 Oct 2023 14:38:55 +0530 Subject: [PATCH 1104/1543] Count pairs with given sum (#10282) * added power_of_4 * deleted power_of_4 * added pairs_with_given_sum * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * updated the comment * updated return hint * updated type hints * updated the variable * updated annotation * updated code * updated code * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * added the problem link and used defaultdict * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * corrected import formatting * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update pairs_with_given_sum.py * Update data_structures/arrays/pairs_with_given_sum.py --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- .../arrays/pairs_with_given_sum.py | 28 +++++++++++++++++++ 1 file changed, 28 insertions(+) create mode 100644 data_structures/arrays/pairs_with_given_sum.py diff --git a/data_structures/arrays/pairs_with_given_sum.py b/data_structures/arrays/pairs_with_given_sum.py new file mode 100644 index 000000000000..c4a5ceeae456 --- /dev/null +++ b/data_structures/arrays/pairs_with_given_sum.py @@ -0,0 +1,28 @@ +#!/usr/bin/env python3 + +""" +Given an array of integers and an integer req_sum, find the number of pairs of array +elements whose sum is equal to req_sum. + +https://practice.geeksforgeeks.org/problems/count-pairs-with-given-sum5022/0 +""" +from itertools import combinations + + +def pairs_with_sum(arr: list, req_sum: int) -> int: + """ + Return the no. of pairs with sum "sum" + >>> pairs_with_sum([1, 5, 7, 1], 6) + 2 + >>> pairs_with_sum([1, 1, 1, 1, 1, 1, 1, 1], 2) + 28 + >>> pairs_with_sum([1, 7, 6, 2, 5, 4, 3, 1, 9, 8], 7) + 4 + """ + return len([1 for a, b in combinations(arr, 2) if a + b == req_sum]) + + +if __name__ == "__main__": + from doctest import testmod + + testmod() From 7ea812996c8ee1fa2eb9fbc72b7caaae8eb8ff0e Mon Sep 17 00:00:00 2001 From: Poojan Smart <44301271+PoojanSmart@users.noreply.github.com> Date: Thu, 12 Oct 2023 19:24:07 +0530 Subject: [PATCH 1105/1543] Adds exponential moving average algorithm (#10273) * Adds exponential moving average algorithm * code clean up * spell correction * Modifies I/O types of function * Replaces generator function * Resolved mypy type error * readibility of code and documentation * Update exponential_moving_average.py --------- Co-authored-by: Christian Clauss --- financial/exponential_moving_average.py | 73 +++++++++++++++++++++++++ 1 file changed, 73 insertions(+) create mode 100644 financial/exponential_moving_average.py diff --git a/financial/exponential_moving_average.py b/financial/exponential_moving_average.py new file mode 100644 index 000000000000..0b6cea3b4c91 --- /dev/null +++ b/financial/exponential_moving_average.py @@ -0,0 +1,73 @@ +""" + Calculate the exponential moving average (EMA) on the series of stock prices. + Wikipedia Reference: https://en.wikipedia.org/wiki/Exponential_smoothing + https://www.investopedia.com/terms/e/ema.asp#toc-what-is-an-exponential + -moving-average-ema + + Exponential moving average is used in finance to analyze changes stock prices. + EMA is used in conjunction with Simple moving average (SMA), EMA reacts to the + changes in the value quicker than SMA, which is one of the advantages of using EMA. +""" + +from collections.abc import Iterator + + +def exponential_moving_average( + stock_prices: Iterator[float], window_size: int +) -> Iterator[float]: + """ + Yields exponential moving averages of the given stock prices. + >>> tuple(exponential_moving_average(iter([2, 5, 3, 8.2, 6, 9, 10]), 3)) + (2, 3.5, 3.25, 5.725, 5.8625, 7.43125, 8.715625) + + :param stock_prices: A stream of stock prices + :param window_size: The number of stock prices that will trigger a new calculation + of the exponential average (window_size > 0) + :return: Yields a sequence of exponential moving averages + + Formula: + + st = alpha * xt + (1 - alpha) * st_prev + + Where, + st : Exponential moving average at timestamp t + xt : stock price in from the stock prices at timestamp t + st_prev : Exponential moving average at timestamp t-1 + alpha : 2/(1 + window_size) - smoothing factor + + Exponential moving average (EMA) is a rule of thumb technique for + smoothing time series data using an exponential window function. + """ + + if window_size <= 0: + raise ValueError("window_size must be > 0") + + # Calculating smoothing factor + alpha = 2 / (1 + window_size) + + # Exponential average at timestamp t + moving_average = 0.0 + + for i, stock_price in enumerate(stock_prices): + if i <= window_size: + # Assigning simple moving average till the window_size for the first time + # is reached + moving_average = (moving_average + stock_price) * 0.5 if i else stock_price + else: + # Calculating exponential moving average based on current timestamp data + # point and previous exponential average value + moving_average = (alpha * stock_price) + ((1 - alpha) * moving_average) + yield moving_average + + +if __name__ == "__main__": + import doctest + + doctest.testmod() + + stock_prices = [2.0, 5, 3, 8.2, 6, 9, 10] + window_size = 3 + result = tuple(exponential_moving_average(iter(stock_prices), window_size)) + print(f"{stock_prices = }") + print(f"{window_size = }") + print(f"{result = }") From ecf21bfc87c1d1cd4730e628279b609151bc6c57 Mon Sep 17 00:00:00 2001 From: Daniela Large <133594563+dannylarge144@users.noreply.github.com> Date: Thu, 12 Oct 2023 16:51:06 +0100 Subject: [PATCH 1106/1543] Added imply gate to boolean algebra (#9849) * Add files via upload * Update imply_gate.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update boolean_algebra/imply_gate.py Co-authored-by: Tianyi Zheng * Update imply_gate.py Made changes requested * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update imply_gate.py --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Tianyi Zheng --- boolean_algebra/imply_gate.py | 40 +++++++++++++++++++++++++++++++++++ 1 file changed, 40 insertions(+) create mode 100644 boolean_algebra/imply_gate.py diff --git a/boolean_algebra/imply_gate.py b/boolean_algebra/imply_gate.py new file mode 100644 index 000000000000..151a7ad6439a --- /dev/null +++ b/boolean_algebra/imply_gate.py @@ -0,0 +1,40 @@ +""" +An IMPLY Gate is a logic gate in boolean algebra which results to 1 if +either input 1 is 0, or if input 1 is 1, then the output is 1 only if input 2 is 1. +It is true if input 1 implies input 2. + +Following is the truth table of an IMPLY Gate: + ------------------------------ + | Input 1 | Input 2 | Output | + ------------------------------ + | 0 | 0 | 1 | + | 0 | 1 | 1 | + | 1 | 0 | 0 | + | 1 | 1 | 1 | + ------------------------------ + +Refer - https://en.wikipedia.org/wiki/IMPLY_gate +""" + + +def imply_gate(input_1: int, input_2: int) -> int: + """ + Calculate IMPLY of the input values + + >>> imply_gate(0, 0) + 1 + >>> imply_gate(0, 1) + 1 + >>> imply_gate(1, 0) + 0 + >>> imply_gate(1, 1) + 1 + """ + return int(input_1 == 0 or input_2 == 1) + + +if __name__ == "__main__": + print(imply_gate(0, 0)) + print(imply_gate(0, 1)) + print(imply_gate(1, 0)) + print(imply_gate(1, 1)) From b94cdbab1a7f3793e63526cd29a8f415ff0b55ac Mon Sep 17 00:00:00 2001 From: Pranavkumar Mallela <87595299+pranav-mallela@users.noreply.github.com> Date: Fri, 13 Oct 2023 01:21:53 +0530 Subject: [PATCH 1107/1543] add find triplets with 0 sum (3sum) (#10040) * add find triplets with 0 sum (3sum) * Update find_triplets_with_0_sum.py * Update find_triplets_with_0_sum.py --------- Co-authored-by: Christian Clauss --- .../arrays/find_triplets_with_0_sum.py | 24 +++++++++++++++++++ 1 file changed, 24 insertions(+) create mode 100644 data_structures/arrays/find_triplets_with_0_sum.py diff --git a/data_structures/arrays/find_triplets_with_0_sum.py b/data_structures/arrays/find_triplets_with_0_sum.py new file mode 100644 index 000000000000..8217ff857e3d --- /dev/null +++ b/data_structures/arrays/find_triplets_with_0_sum.py @@ -0,0 +1,24 @@ +from itertools import combinations + + +def find_triplets_with_0_sum(nums: list[int]) -> list[list[int]]: + """ + Given a list of integers, return elements a, b, c such that a + b + c = 0. + Args: + nums: list of integers + Returns: + list of lists of integers where sum(each_list) == 0 + Examples: + >>> find_triplets_with_0_sum([-1, 0, 1, 2, -1, -4]) + [[-1, -1, 2], [-1, 0, 1]] + >>> find_triplets_with_0_sum([]) + [] + >>> find_triplets_with_0_sum([0, 0, 0]) + [[0, 0, 0]] + >>> find_triplets_with_0_sum([1, 2, 3, 0, -1, -2, -3]) + [[-3, 0, 3], [-3, 1, 2], [-2, -1, 3], [-2, 0, 2], [-1, 0, 1]] + """ + return [ + list(x) + for x in sorted({abc for abc in combinations(sorted(nums), 3) if not sum(abc)}) + ] From 24f6f8c137a6ba9784c06da3694a1d36781b7a88 Mon Sep 17 00:00:00 2001 From: Daniela Large <133594563+dannylarge144@users.noreply.github.com> Date: Fri, 13 Oct 2023 05:29:39 +0100 Subject: [PATCH 1108/1543] Added nimply gate to boolean_algebra (#10344) * Add files via upload * Update imply_gate.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update boolean_algebra/imply_gate.py Co-authored-by: Tianyi Zheng * Update imply_gate.py Made changes requested * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update imply_gate.py * Added nimply gate * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Tianyi Zheng --- boolean_algebra/nimply_gate.py | 40 ++++++++++++++++++++++++++++++++++ 1 file changed, 40 insertions(+) create mode 100644 boolean_algebra/nimply_gate.py diff --git a/boolean_algebra/nimply_gate.py b/boolean_algebra/nimply_gate.py new file mode 100644 index 000000000000..6e34332d9112 --- /dev/null +++ b/boolean_algebra/nimply_gate.py @@ -0,0 +1,40 @@ +""" +An NIMPLY Gate is a logic gate in boolean algebra which results to 0 if +either input 1 is 0, or if input 1 is 1, then it is 0 only if input 2 is 1. +It is false if input 1 implies input 2. It is the negated form of imply + +Following is the truth table of an NIMPLY Gate: + ------------------------------ + | Input 1 | Input 2 | Output | + ------------------------------ + | 0 | 0 | 0 | + | 0 | 1 | 0 | + | 1 | 0 | 1 | + | 1 | 1 | 0 | + ------------------------------ + +Refer - https://en.wikipedia.org/wiki/NIMPLY_gate +""" + + +def nimply_gate(input_1: int, input_2: int) -> int: + """ + Calculate NIMPLY of the input values + + >>> nimply_gate(0, 0) + 0 + >>> nimply_gate(0, 1) + 0 + >>> nimply_gate(1, 0) + 1 + >>> nimply_gate(1, 1) + 0 + """ + return int(input_1 == 1 and input_2 == 0) + + +if __name__ == "__main__": + print(nimply_gate(0, 0)) + print(nimply_gate(0, 1)) + print(nimply_gate(1, 0)) + print(nimply_gate(1, 1)) From ebe66935d2842a0e0cbea58dcc647428f357f15e Mon Sep 17 00:00:00 2001 From: Saahil Mahato <115351000+saahil-mahato@users.noreply.github.com> Date: Fri, 13 Oct 2023 11:49:48 +0545 Subject: [PATCH 1109/1543] Add Solovay-Strassen Primality test (#10335) * Add Solovay-Strassen Primality test * fix: resolve comments * refactor: docs change --- maths/solovay_strassen_primality_test.py | 107 +++++++++++++++++++++++ 1 file changed, 107 insertions(+) create mode 100644 maths/solovay_strassen_primality_test.py diff --git a/maths/solovay_strassen_primality_test.py b/maths/solovay_strassen_primality_test.py new file mode 100644 index 000000000000..1d11d458369a --- /dev/null +++ b/maths/solovay_strassen_primality_test.py @@ -0,0 +1,107 @@ +""" +This script implements the Solovay-Strassen Primality test. + +This probabilistic primality test is based on Euler's criterion. It is similar +to the Fermat test but uses quadratic residues. It can quickly identify +composite numbers but may occasionally classify composite numbers as prime. + +More details and concepts about this can be found on: +https://en.wikipedia.org/wiki/Solovay%E2%80%93Strassen_primality_test +""" + + +import random + + +def jacobi_symbol(random_a: int, number: int) -> int: + """ + Calculate the Jacobi symbol. The Jacobi symbol is a generalization + of the Legendre symbol, which can be used to simplify computations involving + quadratic residues. The Jacobi symbol is used in primality tests, like the + Solovay-Strassen test, because it helps determine if an integer is a + quadratic residue modulo a given modulus, providing valuable information + about the number's potential primality or compositeness. + + Parameters: + random_a: A randomly chosen integer from 2 to n-2 (inclusive) + number: The number that is tested for primality + + Returns: + jacobi_symbol: The Jacobi symbol is a mathematical function + used to determine whether an integer is a quadratic residue modulo + another integer (usually prime) or not. + + >>> jacobi_symbol(2, 13) + -1 + >>> jacobi_symbol(5, 19) + 1 + >>> jacobi_symbol(7, 14) + 0 + """ + + if random_a in (0, 1): + return random_a + + random_a %= number + t = 1 + + while random_a != 0: + while random_a % 2 == 0: + random_a //= 2 + r = number % 8 + if r in (3, 5): + t = -t + + random_a, number = number, random_a + + if random_a % 4 == number % 4 == 3: + t = -t + + random_a %= number + + return t if number == 1 else 0 + + +def solovay_strassen(number: int, iterations: int) -> bool: + """ + Check whether the input number is prime or not using + the Solovay-Strassen Primality test + + Parameters: + number: The number that is tested for primality + iterations: The number of times that the test is run + which effects the accuracy + + Returns: + result: True if number is probably prime and false + if not + + >>> random.seed(10) + >>> solovay_strassen(13, 5) + True + >>> solovay_strassen(9, 10) + False + >>> solovay_strassen(17, 15) + True + """ + + if number <= 1: + return False + if number <= 3: + return True + + for _ in range(iterations): + a = random.randint(2, number - 2) + x = jacobi_symbol(a, number) + y = pow(a, (number - 1) // 2, number) + + if x == 0 or y != x % number: + return False + + return True + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From c85506262d8fc6fcf154651ce8affdfb96b57ece Mon Sep 17 00:00:00 2001 From: Saahil Mahato <115351000+saahil-mahato@users.noreply.github.com> Date: Fri, 13 Oct 2023 19:03:52 +0545 Subject: [PATCH 1110/1543] Add Damerau-Levenshtein distance algorithm (#10159) * Add Damerau-Levenshtein distance algorithm * fix: precommit check * fix: doc correction * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * refactor: use variable for length and doc correction * Update damerau_levenshtein_distance.py * Update damerau_levenshtein_distance.py --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- strings/damerau_levenshtein_distance.py | 71 +++++++++++++++++++++++++ 1 file changed, 71 insertions(+) create mode 100644 strings/damerau_levenshtein_distance.py diff --git a/strings/damerau_levenshtein_distance.py b/strings/damerau_levenshtein_distance.py new file mode 100644 index 000000000000..72de019499e2 --- /dev/null +++ b/strings/damerau_levenshtein_distance.py @@ -0,0 +1,71 @@ +""" +This script is a implementation of the Damerau-Levenshtein distance algorithm. + +It's an algorithm that measures the edit distance between two string sequences + +More information about this algorithm can be found in this wikipedia article: +https://en.wikipedia.org/wiki/Damerau%E2%80%93Levenshtein_distance +""" + + +def damerau_levenshtein_distance(first_string: str, second_string: str) -> int: + """ + Implements the Damerau-Levenshtein distance algorithm that measures + the edit distance between two strings. + + Parameters: + first_string: The first string to compare + second_string: The second string to compare + + Returns: + distance: The edit distance between the first and second strings + + >>> damerau_levenshtein_distance("cat", "cut") + 1 + >>> damerau_levenshtein_distance("kitten", "sitting") + 3 + >>> damerau_levenshtein_distance("hello", "world") + 4 + >>> damerau_levenshtein_distance("book", "back") + 2 + >>> damerau_levenshtein_distance("container", "containment") + 3 + >>> damerau_levenshtein_distance("container", "containment") + 3 + """ + # Create a dynamic programming matrix to store the distances + dp_matrix = [[0] * (len(second_string) + 1) for _ in range(len(first_string) + 1)] + + # Initialize the matrix + for i in range(len(first_string) + 1): + dp_matrix[i][0] = i + for j in range(len(second_string) + 1): + dp_matrix[0][j] = j + + # Fill the matrix + for i, first_char in enumerate(first_string, start=1): + for j, second_char in enumerate(second_string, start=1): + cost = int(first_char != second_char) + + dp_matrix[i][j] = min( + dp_matrix[i - 1][j] + 1, # Deletion + dp_matrix[i][j - 1] + 1, # Insertion + dp_matrix[i - 1][j - 1] + cost, # Substitution + ) + + if ( + i > 1 + and j > 1 + and first_string[i - 1] == second_string[j - 2] + and first_string[i - 2] == second_string[j - 1] + ): + # Transposition + dp_matrix[i][j] = min(dp_matrix[i][j], dp_matrix[i - 2][j - 2] + cost) + + return dp_matrix[-1][-1] + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From 1117a50665b053ef7716cf1e80b29e11d30886c7 Mon Sep 17 00:00:00 2001 From: Saurabh Mahapatra <98408932+its-100rabh@users.noreply.github.com> Date: Fri, 13 Oct 2023 21:25:32 +0530 Subject: [PATCH 1111/1543] Modified comments on lower.py (#10369) --- strings/lower.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/strings/lower.py b/strings/lower.py index 9ae419123ceb..49256b0169ef 100644 --- a/strings/lower.py +++ b/strings/lower.py @@ -14,9 +14,9 @@ def lower(word: str) -> str: 'what' """ - # converting to ascii value int value and checking to see if char is a capital - # letter if it is a capital letter it is getting shift by 32 which makes it a lower - # case letter + # Converting to ASCII value, obtaining the integer representation + # and checking to see if the character is a capital letter. + # If it is a capital letter, it is shifted by 32, making it a lowercase letter. return "".join(chr(ord(char) + 32) if "A" <= char <= "Z" else char for char in word) From d96029e13d181229c692b8e4cafe2661cdae919e Mon Sep 17 00:00:00 2001 From: SalmanSi <114280969+SalmanSi@users.noreply.github.com> Date: Fri, 13 Oct 2023 22:48:31 +0500 Subject: [PATCH 1112/1543] added doctests for dynamicprogramming/minimum_partition (#10033) * added doctests * added doctests * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Add doctests to integer_partition.py * Update minimum_partition.py --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- dynamic_programming/integer_partition.py | 24 +++++++++++++++ dynamic_programming/minimum_partition.py | 38 ++++++++++++++++++++---- 2 files changed, 57 insertions(+), 5 deletions(-) diff --git a/dynamic_programming/integer_partition.py b/dynamic_programming/integer_partition.py index 8ed2e51bd4bd..145bc29d0fca 100644 --- a/dynamic_programming/integer_partition.py +++ b/dynamic_programming/integer_partition.py @@ -3,10 +3,34 @@ partitions into exactly k parts plus the number of partitions into at least k-1 parts. Subtracting 1 from each part of a partition of n into k parts gives a partition of n-k into k parts. These two facts together are used for this algorithm. +* https://en.wikipedia.org/wiki/Partition_(number_theory) +* https://en.wikipedia.org/wiki/Partition_function_(number_theory) """ def partition(m: int) -> int: + """ + >>> partition(5) + 7 + >>> partition(7) + 15 + >>> partition(100) + 190569292 + >>> partition(1_000) + 24061467864032622473692149727991 + >>> partition(-7) + Traceback (most recent call last): + ... + IndexError: list index out of range + >>> partition(0) + Traceback (most recent call last): + ... + IndexError: list assignment index out of range + >>> partition(7.8) + Traceback (most recent call last): + ... + TypeError: 'float' object cannot be interpreted as an integer + """ memo: list[list[int]] = [[0 for _ in range(m)] for _ in range(m + 1)] for i in range(m + 1): memo[i][0] = 1 diff --git a/dynamic_programming/minimum_partition.py b/dynamic_programming/minimum_partition.py index e6188cb33b3a..748c0599efb0 100644 --- a/dynamic_programming/minimum_partition.py +++ b/dynamic_programming/minimum_partition.py @@ -3,7 +3,7 @@ """ -def find_min(arr: list[int]) -> int: +def find_min(numbers: list[int]) -> int: """ >>> find_min([1, 2, 3, 4, 5]) 1 @@ -15,9 +15,37 @@ def find_min(arr: list[int]) -> int: 3 >>> find_min([]) 0 + >>> find_min([1, 2, 3, 4]) + 0 + >>> find_min([0, 0, 0, 0]) + 0 + >>> find_min([-1, -5, 5, 1]) + 0 + >>> find_min([-1, -5, 5, 1]) + 0 + >>> find_min([9, 9, 9, 9, 9]) + 9 + >>> find_min([1, 5, 10, 3]) + 1 + >>> find_min([-1, 0, 1]) + 0 + >>> find_min(range(10, 0, -1)) + 1 + >>> find_min([-1]) + Traceback (most recent call last): + -- + IndexError: list assignment index out of range + >>> find_min([0, 0, 0, 1, 2, -4]) + Traceback (most recent call last): + ... + IndexError: list assignment index out of range + >>> find_min([-1, -5, -10, -3]) + Traceback (most recent call last): + ... + IndexError: list assignment index out of range """ - n = len(arr) - s = sum(arr) + n = len(numbers) + s = sum(numbers) dp = [[False for x in range(s + 1)] for y in range(n + 1)] @@ -31,8 +59,8 @@ def find_min(arr: list[int]) -> int: for j in range(1, s + 1): dp[i][j] = dp[i - 1][j] - if arr[i - 1] <= j: - dp[i][j] = dp[i][j] or dp[i - 1][j - arr[i - 1]] + if numbers[i - 1] <= j: + dp[i][j] = dp[i][j] or dp[i - 1][j - numbers[i - 1]] for j in range(int(s / 2), -1, -1): if dp[n][j] is True: From 9fb0cd271efec0fc651a5143aedda42f3dc93ea8 Mon Sep 17 00:00:00 2001 From: Dale Dai <145884899+CouldNot@users.noreply.github.com> Date: Fri, 13 Oct 2023 23:47:08 -0700 Subject: [PATCH 1113/1543] Expand euler phi function doctest (#10401) --- maths/basic_maths.py | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/maths/basic_maths.py b/maths/basic_maths.py index 26c52c54983e..c9e3d00fa23b 100644 --- a/maths/basic_maths.py +++ b/maths/basic_maths.py @@ -98,7 +98,17 @@ def euler_phi(n: int) -> int: """Calculate Euler's Phi Function. >>> euler_phi(100) 40 + >>> euler_phi(0) + Traceback (most recent call last): + ... + ValueError: Only positive numbers are accepted + >>> euler_phi(-10) + Traceback (most recent call last): + ... + ValueError: Only positive numbers are accepted """ + if n <= 0: + raise ValueError("Only positive numbers are accepted") s = n for x in set(prime_factors(n)): s *= (x - 1) / x From 0b2c9fb6f164468b51baa4866c1b8c4f01ec8b64 Mon Sep 17 00:00:00 2001 From: Baron105 <76466796+Baron105@users.noreply.github.com> Date: Sat, 14 Oct 2023 12:31:23 +0530 Subject: [PATCH 1114/1543] Adding avg and mps speed formulae for ideal gases (#10229) * avg and mps speed formulae added * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * avg and mps speed formulae added * fixed_spacing * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * spacing * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * ws * added amicable numbers * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * added amicable numbers * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * spacing * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * removed * changed name of file and added code improvements * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * issues fixed due to pi * requested changes added --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- physics/speeds_of_gas_molecules.py | 111 +++++++++++++++++++++++++++++ 1 file changed, 111 insertions(+) create mode 100644 physics/speeds_of_gas_molecules.py diff --git a/physics/speeds_of_gas_molecules.py b/physics/speeds_of_gas_molecules.py new file mode 100644 index 000000000000..a50d1c0f6d76 --- /dev/null +++ b/physics/speeds_of_gas_molecules.py @@ -0,0 +1,111 @@ +""" +The root-mean-square, average and most probable speeds of gas molecules are +derived from the Maxwell-Boltzmann distribution. The Maxwell-Boltzmann +distribution is a probability distribution that describes the distribution of +speeds of particles in an ideal gas. + +The distribution is given by the following equation: + + ------------------------------------------------- + | f(v) = (M/2πRT)^(3/2) * 4πv^2 * e^(-Mv^2/2RT) | + ------------------------------------------------- + +where: + f(v) is the fraction of molecules with a speed v + M is the molar mass of the gas in kg/mol + R is the gas constant + T is the absolute temperature + +More information about the Maxwell-Boltzmann distribution can be found here: +https://en.wikipedia.org/wiki/Maxwell%E2%80%93Boltzmann_distribution + +The average speed can be calculated by integrating the Maxwell-Boltzmann distribution +from 0 to infinity and dividing by the total number of molecules. The result is: + + --------------------- + | vavg = √(8RT/πM) | + --------------------- + +The most probable speed is the speed at which the Maxwell-Boltzmann distribution +is at its maximum. This can be found by differentiating the Maxwell-Boltzmann +distribution with respect to v and setting the result equal to zero. The result is: + + --------------------- + | vmp = √(2RT/M) | + --------------------- + +The root-mean-square speed is another measure of the average speed +of the molecules in a gas. It is calculated by taking the square root +of the average of the squares of the speeds of the molecules. The result is: + + --------------------- + | vrms = √(3RT/M) | + --------------------- + +Here we have defined functions to calculate the average and +most probable speeds of molecules in a gas given the +temperature and molar mass of the gas. +""" + +# import the constants R and pi from the scipy.constants library +from scipy.constants import R, pi + + +def avg_speed_of_molecule(temperature: float, molar_mass: float) -> float: + """ + Takes the temperature (in K) and molar mass (in kg/mol) of a gas + and returns the average speed of a molecule in the gas (in m/s). + + Examples: + >>> avg_speed_of_molecule(273, 0.028) # nitrogen at 273 K + 454.3488755020387 + >>> avg_speed_of_molecule(300, 0.032) # oxygen at 300 K + 445.52572733919885 + >>> avg_speed_of_molecule(-273, 0.028) # invalid temperature + Traceback (most recent call last): + ... + Exception: Absolute temperature cannot be less than 0 K + >>> avg_speed_of_molecule(273, 0) # invalid molar mass + Traceback (most recent call last): + ... + Exception: Molar mass should be greater than 0 kg/mol + """ + + if temperature < 0: + raise Exception("Absolute temperature cannot be less than 0 K") + if molar_mass <= 0: + raise Exception("Molar mass should be greater than 0 kg/mol") + return (8 * R * temperature / (pi * molar_mass)) ** 0.5 + + +def mps_speed_of_molecule(temperature: float, molar_mass: float) -> float: + """ + Takes the temperature (in K) and molar mass (in kg/mol) of a gas + and returns the most probable speed of a molecule in the gas (in m/s). + + Examples: + >>> mps_speed_of_molecule(273, 0.028) # nitrogen at 273 K + 402.65620701908966 + >>> mps_speed_of_molecule(300, 0.032) # oxygen at 300 K + 394.836895549922 + >>> mps_speed_of_molecule(-273, 0.028) # invalid temperature + Traceback (most recent call last): + ... + Exception: Absolute temperature cannot be less than 0 K + >>> mps_speed_of_molecule(273, 0) # invalid molar mass + Traceback (most recent call last): + ... + Exception: Molar mass should be greater than 0 kg/mol + """ + + if temperature < 0: + raise Exception("Absolute temperature cannot be less than 0 K") + if molar_mass <= 0: + raise Exception("Molar mass should be greater than 0 kg/mol") + return (2 * R * temperature / molar_mass) ** 0.5 + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From 37cae3f56169348e97262b1b8f7671785be77a5b Mon Sep 17 00:00:00 2001 From: Muhammad Umer Farooq <115654418+Muhammadummerr@users.noreply.github.com> Date: Sat, 14 Oct 2023 13:31:43 +0500 Subject: [PATCH 1115/1543] Updated test cases of power_sum.py (#9978) * Updated test cases of power_sum.py * updated * updated. * remove extra comment and used ** instead of pow * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update backtracking/power_sum.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Tianyi Zheng --- backtracking/power_sum.py | 42 +++++++++++++++++++-------------------- 1 file changed, 20 insertions(+), 22 deletions(-) diff --git a/backtracking/power_sum.py b/backtracking/power_sum.py index fcf1429f8570..ee2eac426ec7 100644 --- a/backtracking/power_sum.py +++ b/backtracking/power_sum.py @@ -6,8 +6,6 @@ The only solution is 2^2+3^2. Constraints: 1<=X<=1000, 2<=N<=10. """ -from math import pow - def backtrack( needed_sum: int, @@ -19,25 +17,25 @@ def backtrack( """ >>> backtrack(13, 2, 1, 0, 0) (0, 1) - >>> backtrack(100, 2, 1, 0, 0) - (0, 3) - >>> backtrack(100, 3, 1, 0, 0) + >>> backtrack(10, 2, 1, 0, 0) + (0, 1) + >>> backtrack(10, 3, 1, 0, 0) + (0, 0) + >>> backtrack(20, 2, 1, 0, 0) (0, 1) - >>> backtrack(800, 2, 1, 0, 0) - (0, 561) - >>> backtrack(1000, 10, 1, 0, 0) + >>> backtrack(15, 10, 1, 0, 0) (0, 0) - >>> backtrack(400, 2, 1, 0, 0) - (0, 55) - >>> backtrack(50, 1, 1, 0, 0) - (0, 3658) + >>> backtrack(16, 2, 1, 0, 0) + (0, 1) + >>> backtrack(20, 1, 1, 0, 0) + (0, 64) """ if current_sum == needed_sum: # If the sum of the powers is equal to needed_sum, then we have a solution. solutions_count += 1 return current_sum, solutions_count - i_to_n = int(pow(current_number, power)) + i_to_n = current_number**power if current_sum + i_to_n <= needed_sum: # If the sum of the powers is less than needed_sum, then continue adding powers. current_sum += i_to_n @@ -57,17 +55,17 @@ def solve(needed_sum: int, power: int) -> int: """ >>> solve(13, 2) 1 - >>> solve(100, 2) - 3 - >>> solve(100, 3) + >>> solve(10, 2) 1 - >>> solve(800, 2) - 561 - >>> solve(1000, 10) + >>> solve(10, 3) 0 - >>> solve(400, 2) - 55 - >>> solve(50, 1) + >>> solve(20, 2) + 1 + >>> solve(15, 10) + 0 + >>> solve(16, 2) + 1 + >>> solve(20, 1) Traceback (most recent call last): ... ValueError: Invalid input From 71b372f5e2fd313268018df237d401efd7795464 Mon Sep 17 00:00:00 2001 From: Tianyi Zheng Date: Sat, 14 Oct 2023 09:34:05 -0400 Subject: [PATCH 1116/1543] Remove doctest in `xgboost_regressor.py` main function (#10422) * updating DIRECTORY.md * updating DIRECTORY.md * updating DIRECTORY.md * updating DIRECTORY.md * Update xgboost_regressor.py --------- Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- machine_learning/xgboost_regressor.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/machine_learning/xgboost_regressor.py b/machine_learning/xgboost_regressor.py index a540e3ab03eb..52e041c55ea2 100644 --- a/machine_learning/xgboost_regressor.py +++ b/machine_learning/xgboost_regressor.py @@ -39,13 +39,13 @@ def xgboost( def main() -> None: """ - >>> main() - Mean Absolute Error : 0.30957163379906033 - Mean Square Error : 0.22611560196662744 - The URL for this algorithm https://xgboost.readthedocs.io/en/stable/ California house price dataset is used to demonstrate the algorithm. + + Expected error values: + Mean Absolute Error: 0.30957163379906033 + Mean Square Error: 0.22611560196662744 """ # Load California house price dataset california = fetch_california_housing() @@ -55,8 +55,8 @@ def main() -> None: ) predictions = xgboost(x_train, y_train, x_test) # Error printing - print(f"Mean Absolute Error : {mean_absolute_error(y_test, predictions)}") - print(f"Mean Square Error : {mean_squared_error(y_test, predictions)}") + print(f"Mean Absolute Error: {mean_absolute_error(y_test, predictions)}") + print(f"Mean Square Error: {mean_squared_error(y_test, predictions)}") if __name__ == "__main__": From 212cdfe36c3599804027c79c26ee814e53a12703 Mon Sep 17 00:00:00 2001 From: Dean Bring Date: Sat, 14 Oct 2023 08:35:12 -0700 Subject: [PATCH 1117/1543] Added validate sudoku board function (#9881) * Added algorithm to deeply clone a graph * Fixed file name and removed a function call * Removed nested function and fixed class parameter types * Fixed doctests * bug fix * Added class decorator * Updated doctests and fixed precommit errors * Cleaned up code * Simplified doctest * Added doctests * Code simplification * Created function which validates sudoku boards * Update matrix/validate_sudoku_board.py * Fixed precommit errors * Removed file accidentally included * Improved readability and simplicity * Add timeit benchmarks * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update validate_sudoku_board.py --------- Co-authored-by: Christian Clauss Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- matrix/validate_sudoku_board.py | 107 ++++++++++++++++++++++++++++++++ 1 file changed, 107 insertions(+) create mode 100644 matrix/validate_sudoku_board.py diff --git a/matrix/validate_sudoku_board.py b/matrix/validate_sudoku_board.py new file mode 100644 index 000000000000..0ee7b3df0b83 --- /dev/null +++ b/matrix/validate_sudoku_board.py @@ -0,0 +1,107 @@ +""" +LeetCode 36. Valid Sudoku +https://leetcode.com/problems/valid-sudoku/ +https://en.wikipedia.org/wiki/Sudoku + +Determine if a 9 x 9 Sudoku board is valid. Only the filled cells need to be +validated according to the following rules: + +- Each row must contain the digits 1-9 without repetition. +- Each column must contain the digits 1-9 without repetition. +- Each of the nine 3 x 3 sub-boxes of the grid must contain the digits 1-9 + without repetition. + +Note: + +A Sudoku board (partially filled) could be valid but is not necessarily +solvable. + +Only the filled cells need to be validated according to the mentioned rules. +""" + +from collections import defaultdict + +NUM_SQUARES = 9 +EMPTY_CELL = "." + + +def is_valid_sudoku_board(sudoku_board: list[list[str]]) -> bool: + """ + This function validates (but does not solve) a sudoku board. + The board may be valid but unsolvable. + + >>> is_valid_sudoku_board([ + ... ["5","3",".",".","7",".",".",".","."] + ... ,["6",".",".","1","9","5",".",".","."] + ... ,[".","9","8",".",".",".",".","6","."] + ... ,["8",".",".",".","6",".",".",".","3"] + ... ,["4",".",".","8",".","3",".",".","1"] + ... ,["7",".",".",".","2",".",".",".","6"] + ... ,[".","6",".",".",".",".","2","8","."] + ... ,[".",".",".","4","1","9",".",".","5"] + ... ,[".",".",".",".","8",".",".","7","9"] + ... ]) + True + >>> is_valid_sudoku_board([ + ... ["8","3",".",".","7",".",".",".","."] + ... ,["6",".",".","1","9","5",".",".","."] + ... ,[".","9","8",".",".",".",".","6","."] + ... ,["8",".",".",".","6",".",".",".","3"] + ... ,["4",".",".","8",".","3",".",".","1"] + ... ,["7",".",".",".","2",".",".",".","6"] + ... ,[".","6",".",".",".",".","2","8","."] + ... ,[".",".",".","4","1","9",".",".","5"] + ... ,[".",".",".",".","8",".",".","7","9"] + ... ]) + False + >>> is_valid_sudoku_board([["1", "2", "3", "4", "5", "6", "7", "8", "9"]]) + Traceback (most recent call last): + ... + ValueError: Sudoku boards must be 9x9 squares. + >>> is_valid_sudoku_board( + ... [["1"], ["2"], ["3"], ["4"], ["5"], ["6"], ["7"], ["8"], ["9"]] + ... ) + Traceback (most recent call last): + ... + ValueError: Sudoku boards must be 9x9 squares. + """ + if len(sudoku_board) != NUM_SQUARES or ( + any(len(row) != NUM_SQUARES for row in sudoku_board) + ): + error_message = f"Sudoku boards must be {NUM_SQUARES}x{NUM_SQUARES} squares." + raise ValueError(error_message) + + row_values: defaultdict[int, set[str]] = defaultdict(set) + col_values: defaultdict[int, set[str]] = defaultdict(set) + box_values: defaultdict[tuple[int, int], set[str]] = defaultdict(set) + + for row in range(NUM_SQUARES): + for col in range(NUM_SQUARES): + value = sudoku_board[row][col] + + if value == EMPTY_CELL: + continue + + box = (row // 3, col // 3) + + if ( + value in row_values[row] + or value in col_values[col] + or value in box_values[box] + ): + return False + + row_values[row].add(value) + col_values[col].add(value) + box_values[box].add(value) + + return True + + +if __name__ == "__main__": + from doctest import testmod + from timeit import timeit + + testmod() + print(timeit("is_valid_sudoku_board(valid_board)", globals=globals())) + print(timeit("is_valid_sudoku_board(invalid_board)", globals=globals())) From 3ba23384794bc5ce61a300b96d2b721d9d58eccd Mon Sep 17 00:00:00 2001 From: Aakash Giri Date: Sat, 14 Oct 2023 21:47:11 +0530 Subject: [PATCH 1118/1543] Add Title Case Conversion (#10439) [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci added more test case and type hint [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci updated naming convention --- strings/title.py | 57 ++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 57 insertions(+) create mode 100644 strings/title.py diff --git a/strings/title.py b/strings/title.py new file mode 100644 index 000000000000..1ec2df548e2d --- /dev/null +++ b/strings/title.py @@ -0,0 +1,57 @@ +def to_title_case(word: str) -> str: + """ + Converts a string to capitalized case, preserving the input as is + + >>> to_title_case("Aakash") + 'Aakash' + + >>> to_title_case("aakash") + 'Aakash' + + >>> to_title_case("AAKASH") + 'Aakash' + + >>> to_title_case("aAkAsH") + 'Aakash' + """ + + """ + Convert the first character to uppercase if it's lowercase + """ + if "a" <= word[0] <= "z": + word = chr(ord(word[0]) - 32) + word[1:] + + """ + Convert the remaining characters to lowercase if they are uppercase + """ + for i in range(1, len(word)): + if "A" <= word[i] <= "Z": + word = word[:i] + chr(ord(word[i]) + 32) + word[i + 1 :] + + return word + + +def sentence_to_title_case(input_str: str) -> str: + """ + Converts a string to title case, preserving the input as is + + >>> sentence_to_title_case("Aakash Giri") + 'Aakash Giri' + + >>> sentence_to_title_case("aakash giri") + 'Aakash Giri' + + >>> sentence_to_title_case("AAKASH GIRI") + 'Aakash Giri' + + >>> sentence_to_title_case("aAkAsH gIrI") + 'Aakash Giri' + """ + + return " ".join(to_title_case(word) for word in input_str.split()) + + +if __name__ == "__main__": + from doctest import testmod + + testmod() From 1969259868451684ab05663cc208f06af20d483f Mon Sep 17 00:00:00 2001 From: Manpreet Singh <63737630+ManpreetSingh2004@users.noreply.github.com> Date: Sat, 14 Oct 2023 23:05:01 +0530 Subject: [PATCH 1119/1543] Performance: 80% faster Project Euler 145 (#10445) * Performance: 80% faster Project Euler145 * Added timeit benchmark * >>> slow_solution() doctest --- project_euler/problem_145/sol1.py | 70 +++++++++++++++++++++++++++---- 1 file changed, 63 insertions(+), 7 deletions(-) diff --git a/project_euler/problem_145/sol1.py b/project_euler/problem_145/sol1.py index e9fc1a199161..71b851178fdb 100644 --- a/project_euler/problem_145/sol1.py +++ b/project_euler/problem_145/sol1.py @@ -17,17 +17,17 @@ ODD_DIGITS = [1, 3, 5, 7, 9] -def reversible_numbers( +def slow_reversible_numbers( remaining_length: int, remainder: int, digits: list[int], length: int ) -> int: """ Count the number of reversible numbers of given length. Iterate over possible digits considering parity of current sum remainder. - >>> reversible_numbers(1, 0, [0], 1) + >>> slow_reversible_numbers(1, 0, [0], 1) 0 - >>> reversible_numbers(2, 0, [0] * 2, 2) + >>> slow_reversible_numbers(2, 0, [0] * 2, 2) 20 - >>> reversible_numbers(3, 0, [0] * 3, 3) + >>> slow_reversible_numbers(3, 0, [0] * 3, 3) 100 """ if remaining_length == 0: @@ -51,7 +51,7 @@ def reversible_numbers( result = 0 for digit in range(10): digits[length // 2] = digit - result += reversible_numbers( + result += slow_reversible_numbers( 0, (remainder + 2 * digit) // 10, digits, length ) return result @@ -67,7 +67,7 @@ def reversible_numbers( for digit2 in other_parity_digits: digits[(length - remaining_length) // 2] = digit2 - result += reversible_numbers( + result += slow_reversible_numbers( remaining_length - 2, (remainder + digit1 + digit2) // 10, digits, @@ -76,6 +76,42 @@ def reversible_numbers( return result +def slow_solution(max_power: int = 9) -> int: + """ + To evaluate the solution, use solution() + >>> slow_solution(3) + 120 + >>> slow_solution(6) + 18720 + >>> slow_solution(7) + 68720 + """ + result = 0 + for length in range(1, max_power + 1): + result += slow_reversible_numbers(length, 0, [0] * length, length) + return result + + +def reversible_numbers( + remaining_length: int, remainder: int, digits: list[int], length: int +) -> int: + """ + Count the number of reversible numbers of given length. + Iterate over possible digits considering parity of current sum remainder. + >>> reversible_numbers(1, 0, [0], 1) + 0 + >>> reversible_numbers(2, 0, [0] * 2, 2) + 20 + >>> reversible_numbers(3, 0, [0] * 3, 3) + 100 + """ + # There exist no reversible 1, 5, 9, 13 (ie. 4k+1) digit numbers + if (length - 1) % 4 == 0: + return 0 + + return slow_reversible_numbers(length, 0, [0] * length, length) + + def solution(max_power: int = 9) -> int: """ To evaluate the solution, use solution() @@ -92,5 +128,25 @@ def solution(max_power: int = 9) -> int: return result +def benchmark() -> None: + """ + Benchmarks + """ + # Running performance benchmarks... + # slow_solution : 292.9300301000003 + # solution : 54.90970860000016 + + from timeit import timeit + + print("Running performance benchmarks...") + + print(f"slow_solution : {timeit('slow_solution()', globals=globals(), number=10)}") + print(f"solution : {timeit('solution()', globals=globals(), number=10)}") + + if __name__ == "__main__": - print(f"{solution() = }") + print(f"Solution : {solution()}") + benchmark() + + # for i in range(1, 15): + # print(f"{i}. {reversible_numbers(i, 0, [0]*i, i)}") From f968dda5e9b81bd7dd3c5e9b7a69a9a08ed3ead7 Mon Sep 17 00:00:00 2001 From: Saurabh Mahapatra <98408932+its-100rabh@users.noreply.github.com> Date: Sun, 15 Oct 2023 00:32:37 +0530 Subject: [PATCH 1120/1543] Updated Comments on upper.py (#10442) * Updated Comments on upper.py * Update upper.py * Update upper.py --------- Co-authored-by: Christian Clauss --- strings/upper.py | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/strings/upper.py b/strings/upper.py index 5edd40b79808..0f68a27b99c6 100644 --- a/strings/upper.py +++ b/strings/upper.py @@ -1,6 +1,8 @@ def upper(word: str) -> str: """ - Will convert the entire string to uppercase letters + Convert an entire string to ASCII uppercase letters by looking for lowercase ASCII + letters and subtracting 32 from their integer representation to get the uppercase + letter. >>> upper("wow") 'WOW' @@ -11,10 +13,6 @@ def upper(word: str) -> str: >>> upper("wh[]32") 'WH[]32' """ - - # Converting to ascii value int value and checking to see if char is a lower letter - # if it is a lowercase letter it is getting shift by 32 which makes it an uppercase - # case letter return "".join(chr(ord(char) - 32) if "a" <= char <= "z" else char for char in word) From c9ba5e1b6f319e34815660542d8ca0c777c8008a Mon Sep 17 00:00:00 2001 From: Tianyi Zheng Date: Sat, 14 Oct 2023 16:08:52 -0400 Subject: [PATCH 1121/1543] Disable unused dependencies (#10467) Comment out dependencies in requirements.txt that are only used by currently-disabled files --- requirements.txt | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/requirements.txt b/requirements.txt index 25dba6f5a250..1e64818bbb6a 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,19 +1,19 @@ beautifulsoup4 fake_useragent imageio -keras +keras ; python_version < '3.12' lxml matplotlib numpy opencv-python pandas pillow -projectq +# projectq # uncomment once quantum/quantum_random.py is fixed qiskit ; python_version < '3.12' qiskit-aer ; python_version < '3.12' requests rich -scikit-fuzzy +# scikit-fuzzy # uncomment once fuzzy_logic/fuzzy_operations.py is fixed scikit-learn statsmodels sympy @@ -21,4 +21,4 @@ tensorflow ; python_version < '3.12' texttable tweepy xgboost -yulewalker +# yulewalker # uncomment once audio_filters/equal_loudness_filter.py is fixed From 3ecad36f92d26676cc73276553cd99763b025b33 Mon Sep 17 00:00:00 2001 From: Manpreet Singh <63737630+ManpreetSingh2004@users.noreply.github.com> Date: Sun, 15 Oct 2023 10:15:44 +0530 Subject: [PATCH 1122/1543] fix: incorrect range detection in find_missing_number (#10361) * Fix incorrect range detection in find_missing_number * Support consecutive decreasing numbers Added support for consecutive decreasing numbers in the find_missing_number function. * Support unordered numbers --- bit_manipulation/missing_number.py | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-) diff --git a/bit_manipulation/missing_number.py b/bit_manipulation/missing_number.py index 92502a778ace..32b949daa717 100644 --- a/bit_manipulation/missing_number.py +++ b/bit_manipulation/missing_number.py @@ -11,11 +11,18 @@ def find_missing_number(nums: list[int]) -> int: Example: >>> find_missing_number([0, 1, 3, 4]) 2 + >>> find_missing_number([1, 3, 4, 5, 6]) + 2 + >>> find_missing_number([6, 5, 4, 2, 1]) + 3 + >>> find_missing_number([6, 1, 5, 3, 4]) + 2 """ - n = len(nums) - missing_number = n + low = min(nums) + high = max(nums) + missing_number = high - for i in range(n): - missing_number ^= i ^ nums[i] + for i in range(low, high): + missing_number ^= i ^ nums[i - low] return missing_number From 7dbc30181826aa26600f8d24c92b1587b31677c6 Mon Sep 17 00:00:00 2001 From: Ravi Kumar <119737193+ravi-ivar-7@users.noreply.github.com> Date: Sun, 15 Oct 2023 14:37:29 +0530 Subject: [PATCH 1123/1543] added rkf45 method (#10438) * added rkf45 method * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Updated rkf45.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Updated rkf45.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update rkf45.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update rkf45.py with suggestions * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Improved Code Quality rkf45.py * Added more test cases and exception rkf45.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update rkf45.py * corrected some spellings. rkf45.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update rkf45.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update rkf45.py --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- maths/rkf45.py | 112 +++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 112 insertions(+) create mode 100644 maths/rkf45.py diff --git a/maths/rkf45.py b/maths/rkf45.py new file mode 100644 index 000000000000..29fd447b61b8 --- /dev/null +++ b/maths/rkf45.py @@ -0,0 +1,112 @@ +""" +Use the Runge-Kutta-Fehlberg method to solve Ordinary Differential Equations. +""" + +from collections.abc import Callable + +import numpy as np + + +def runge_futta_fehlberg_45( + func: Callable, + x_initial: float, + y_initial: float, + step_size: float, + x_final: float, +) -> np.ndarray: + """ + Solve an Ordinary Differential Equations using Runge-Kutta-Fehlberg Method (rkf45) + of order 5. + + https://en.wikipedia.org/wiki/Runge%E2%80%93Kutta%E2%80%93Fehlberg_method + + args: + func: An ordinary differential equation (ODE) as function of x and y. + x_initial: The initial value of x. + y_initial: The initial value of y. + step_size: The increment value of x. + x_final: The final value of x. + + Returns: + Solution of y at each nodal point + + # exact value of y[1] is tan(0.2) = 0.2027100937470787 + >>> def f(x, y): + ... return 1 + y**2 + >>> y = runge_futta_fehlberg_45(f, 0, 0, 0.2, 1) + >>> y[1] + 0.2027100937470787 + >>> def f(x,y): + ... return x + >>> y = runge_futta_fehlberg_45(f, -1, 0, 0.2, 0) + >>> y[1] + -0.18000000000000002 + >>> y = runge_futta_fehlberg_45(5, 0, 0, 0.1, 1) + Traceback (most recent call last): + ... + TypeError: 'int' object is not callable + >>> def f(x, y): + ... return x + y + >>> y = runge_futta_fehlberg_45(f, 0, 0, 0.2, -1) + Traceback (most recent call last): + ... + ValueError: The final value x must be greater than initial value of x. + >>> def f(x, y): + ... return x + >>> y = runge_futta_fehlberg_45(f, -1, 0, -0.2, 0) + Traceback (most recent call last): + ... + ValueError: Step size must be positive. + """ + if x_initial >= x_final: + raise ValueError("The final value x must be greater than initial value of x.") + + if step_size <= 0: + raise ValueError("Step size must be positive.") + + n = int((x_final - x_initial) / step_size) + y = np.zeros( + (n + 1), + ) + x = np.zeros(n + 1) + y[0] = y_initial + x[0] = x_initial + for i in range(n): + k1 = step_size * func(x[i], y[i]) + k2 = step_size * func(x[i] + step_size / 4, y[i] + k1 / 4) + k3 = step_size * func( + x[i] + (3 / 8) * step_size, y[i] + (3 / 32) * k1 + (9 / 32) * k2 + ) + k4 = step_size * func( + x[i] + (12 / 13) * step_size, + y[i] + (1932 / 2197) * k1 - (7200 / 2197) * k2 + (7296 / 2197) * k3, + ) + k5 = step_size * func( + x[i] + step_size, + y[i] + (439 / 216) * k1 - 8 * k2 + (3680 / 513) * k3 - (845 / 4104) * k4, + ) + k6 = step_size * func( + x[i] + step_size / 2, + y[i] + - (8 / 27) * k1 + + 2 * k2 + - (3544 / 2565) * k3 + + (1859 / 4104) * k4 + - (11 / 40) * k5, + ) + y[i + 1] = ( + y[i] + + (16 / 135) * k1 + + (6656 / 12825) * k3 + + (28561 / 56430) * k4 + - (9 / 50) * k5 + + (2 / 55) * k6 + ) + x[i + 1] = step_size + x[i] + return y + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From 1ebae5d43e2ce23ef98a0804bf1fa077d2fa5daf Mon Sep 17 00:00:00 2001 From: Manpreet Singh <63737630+ManpreetSingh2004@users.noreply.github.com> Date: Sun, 15 Oct 2023 14:47:22 +0530 Subject: [PATCH 1124/1543] Performance: 75% faster Project Euler 187 (#10503) * Add comments and wikipedia link in calculate_prime_numbers * Add improved calculate_prime_numbers * Separate slow_solution and new_solution * Use for loops in solution * Separate while_solution and new solution * Add performance benchmark * Add doctest for calculate_prime_numbers * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Removed white space --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- project_euler/problem_187/sol1.py | 118 ++++++++++++++++++++++++++++-- 1 file changed, 111 insertions(+), 7 deletions(-) diff --git a/project_euler/problem_187/sol1.py b/project_euler/problem_187/sol1.py index 12f03e2a7023..8944776fef50 100644 --- a/project_euler/problem_187/sol1.py +++ b/project_euler/problem_187/sol1.py @@ -14,29 +14,89 @@ from math import isqrt -def calculate_prime_numbers(max_number: int) -> list[int]: +def slow_calculate_prime_numbers(max_number: int) -> list[int]: """ - Returns prime numbers below max_number + Returns prime numbers below max_number. + See: https://en.wikipedia.org/wiki/Sieve_of_Eratosthenes - >>> calculate_prime_numbers(10) + >>> slow_calculate_prime_numbers(10) [2, 3, 5, 7] + + >>> slow_calculate_prime_numbers(2) + [] """ + # List containing a bool value for every number below max_number/2 is_prime = [True] * max_number + for i in range(2, isqrt(max_number - 1) + 1): if is_prime[i]: + # Mark all multiple of i as not prime for j in range(i**2, max_number, i): is_prime[j] = False return [i for i in range(2, max_number) if is_prime[i]] -def solution(max_number: int = 10**8) -> int: +def calculate_prime_numbers(max_number: int) -> list[int]: + """ + Returns prime numbers below max_number. + See: https://en.wikipedia.org/wiki/Sieve_of_Eratosthenes + + >>> calculate_prime_numbers(10) + [2, 3, 5, 7] + + >>> calculate_prime_numbers(2) + [] + """ + + if max_number <= 2: + return [] + + # List containing a bool value for every odd number below max_number/2 + is_prime = [True] * (max_number // 2) + + for i in range(3, isqrt(max_number - 1) + 1, 2): + if is_prime[i // 2]: + # Mark all multiple of i as not prime using list slicing + is_prime[i**2 // 2 :: i] = [False] * ( + # Same as: (max_number - (i**2)) // (2 * i) + 1 + # but faster than len(is_prime[i**2 // 2 :: i]) + len(range(i**2 // 2, max_number // 2, i)) + ) + + return [2] + [2 * i + 1 for i in range(1, max_number // 2) if is_prime[i]] + + +def slow_solution(max_number: int = 10**8) -> int: """ Returns the number of composite integers below max_number have precisely two, - not necessarily distinct, prime factors + not necessarily distinct, prime factors. - >>> solution(30) + >>> slow_solution(30) + 10 + """ + + prime_numbers = slow_calculate_prime_numbers(max_number // 2) + + semiprimes_count = 0 + left = 0 + right = len(prime_numbers) - 1 + while left <= right: + while prime_numbers[left] * prime_numbers[right] >= max_number: + right -= 1 + semiprimes_count += right - left + 1 + left += 1 + + return semiprimes_count + + +def while_solution(max_number: int = 10**8) -> int: + """ + Returns the number of composite integers below max_number have precisely two, + not necessarily distinct, prime factors. + + >>> while_solution(30) 10 """ @@ -54,5 +114,49 @@ def solution(max_number: int = 10**8) -> int: return semiprimes_count +def solution(max_number: int = 10**8) -> int: + """ + Returns the number of composite integers below max_number have precisely two, + not necessarily distinct, prime factors. + + >>> solution(30) + 10 + """ + + prime_numbers = calculate_prime_numbers(max_number // 2) + + semiprimes_count = 0 + right = len(prime_numbers) - 1 + for left in range(len(prime_numbers)): + if left > right: + break + for r in range(right, left - 2, -1): + if prime_numbers[left] * prime_numbers[r] < max_number: + break + right = r + semiprimes_count += right - left + 1 + + return semiprimes_count + + +def benchmark() -> None: + """ + Benchmarks + """ + # Running performance benchmarks... + # slow_solution : 108.50874730000032 + # while_sol : 28.09581200000048 + # solution : 25.063097400000515 + + from timeit import timeit + + print("Running performance benchmarks...") + + print(f"slow_solution : {timeit('slow_solution()', globals=globals(), number=10)}") + print(f"while_sol : {timeit('while_solution()', globals=globals(), number=10)}") + print(f"solution : {timeit('solution()', globals=globals(), number=10)}") + + if __name__ == "__main__": - print(f"{solution() = }") + print(f"Solution: {solution()}") + benchmark() From 85cdb93a0d7a306633faa03a134d0d39da7076a8 Mon Sep 17 00:00:00 2001 From: Kosuri L Indu <118645569+kosuri-indu@users.noreply.github.com> Date: Sun, 15 Oct 2023 15:48:28 +0530 Subject: [PATCH 1125/1543] [Add] : Job Sequence program under GREEDY methods (#10482) * to add job seq program * to add job seq program * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * to add definitions in parameters * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * to add definitions in parameters * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * to add definitions in parameters * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * changes as recommended * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * type hint error resolved * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * removed lambda * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * import stmts order * Update and rename job_sequence.py to job_sequence_with_deadline.py --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- scheduling/job_sequence_with_deadline.py | 62 ++++++++++++++++++++++++ 1 file changed, 62 insertions(+) create mode 100644 scheduling/job_sequence_with_deadline.py diff --git a/scheduling/job_sequence_with_deadline.py b/scheduling/job_sequence_with_deadline.py new file mode 100644 index 000000000000..fccb49cd88e8 --- /dev/null +++ b/scheduling/job_sequence_with_deadline.py @@ -0,0 +1,62 @@ +""" +Given a list of tasks, each with a deadline and reward, calculate which tasks can be +completed to yield the maximum reward. Each task takes one unit of time to complete, +and we can only work on one task at a time. Once a task has passed its deadline, it +can no longer be scheduled. + +Example : +tasks_info = [(4, 20), (1, 10), (1, 40), (1, 30)] +max_tasks will return (2, [2, 0]) - +Scheduling these tasks would result in a reward of 40 + 20 + +This problem can be solved using the concept of "GREEDY ALGORITHM". +Time Complexity - O(n log n) +https://medium.com/@nihardudhat2000/job-sequencing-with-deadline-17ddbb5890b5 +""" +from dataclasses import dataclass +from operator import attrgetter + + +@dataclass +class Task: + task_id: int + deadline: int + reward: int + + +def max_tasks(tasks_info: list[tuple[int, int]]) -> list[int]: + """ + Create a list of Task objects that are sorted so the highest rewards come first. + Return a list of those task ids that can be completed before i becomes too high. + >>> max_tasks([(4, 20), (1, 10), (1, 40), (1, 30)]) + [2, 0] + >>> max_tasks([(1, 10), (2, 20), (3, 30), (2, 40)]) + [3, 2] + >>> max_tasks([(9, 10)]) + [0] + >>> max_tasks([(-9, 10)]) + [] + >>> max_tasks([]) + [] + >>> max_tasks([(0, 10), (0, 20), (0, 30), (0, 40)]) + [] + >>> max_tasks([(-1, 10), (-2, 20), (-3, 30), (-4, 40)]) + [] + """ + tasks = sorted( + ( + Task(task_id, deadline, reward) + for task_id, (deadline, reward) in enumerate(tasks_info) + ), + key=attrgetter("reward"), + reverse=True, + ) + return [task.task_id for i, task in enumerate(tasks, start=1) if task.deadline >= i] + + +if __name__ == "__main__": + import doctest + + doctest.testmod() + print(f"{max_tasks([(4, 20), (1, 10), (1, 40), (1, 30)]) = }") + print(f"{max_tasks([(1, 10), (2, 20), (3, 30), (2, 40)]) = }") From 777eca813a8030e7a674072c79da144e92dde07a Mon Sep 17 00:00:00 2001 From: Ravi Kumar <119737193+ravi-ivar-7@users.noreply.github.com> Date: Sun, 15 Oct 2023 16:25:56 +0530 Subject: [PATCH 1126/1543] Corrected typo in function name and doctests. rkf45.py (#10518) * Corrected typo in function name and doctests. rkf45.py There was a mistake in name of function (runge_futta_fehlberg instead of runge_kutta_fehlberg) . I have corrected this in function name and also doctest. * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Rename rkf45.py to runge_kutta_fehlberg_45.py * Update runge_kutta_fehlberg_45.py --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- maths/{rkf45.py => runge_kutta_fehlberg_45.py} | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) rename maths/{rkf45.py => runge_kutta_fehlberg_45.py} (84%) diff --git a/maths/rkf45.py b/maths/runge_kutta_fehlberg_45.py similarity index 84% rename from maths/rkf45.py rename to maths/runge_kutta_fehlberg_45.py index 29fd447b61b8..8181fe3015fc 100644 --- a/maths/rkf45.py +++ b/maths/runge_kutta_fehlberg_45.py @@ -7,7 +7,7 @@ import numpy as np -def runge_futta_fehlberg_45( +def runge_kutta_fehlberg_45( func: Callable, x_initial: float, y_initial: float, @@ -33,33 +33,35 @@ def runge_futta_fehlberg_45( # exact value of y[1] is tan(0.2) = 0.2027100937470787 >>> def f(x, y): ... return 1 + y**2 - >>> y = runge_futta_fehlberg_45(f, 0, 0, 0.2, 1) + >>> y = runge_kutta_fehlberg_45(f, 0, 0, 0.2, 1) >>> y[1] 0.2027100937470787 >>> def f(x,y): ... return x - >>> y = runge_futta_fehlberg_45(f, -1, 0, 0.2, 0) + >>> y = runge_kutta_fehlberg_45(f, -1, 0, 0.2, 0) >>> y[1] -0.18000000000000002 - >>> y = runge_futta_fehlberg_45(5, 0, 0, 0.1, 1) + >>> y = runge_kutta_fehlberg_45(5, 0, 0, 0.1, 1) Traceback (most recent call last): ... TypeError: 'int' object is not callable >>> def f(x, y): ... return x + y - >>> y = runge_futta_fehlberg_45(f, 0, 0, 0.2, -1) + >>> y = runge_kutta_fehlberg_45(f, 0, 0, 0.2, -1) Traceback (most recent call last): ... - ValueError: The final value x must be greater than initial value of x. + ValueError: The final value of x must be greater than initial value of x. >>> def f(x, y): ... return x - >>> y = runge_futta_fehlberg_45(f, -1, 0, -0.2, 0) + >>> y = runge_kutta_fehlberg_45(f, -1, 0, -0.2, 0) Traceback (most recent call last): ... ValueError: Step size must be positive. """ if x_initial >= x_final: - raise ValueError("The final value x must be greater than initial value of x.") + raise ValueError( + "The final value of x must be greater than initial value of x." + ) if step_size <= 0: raise ValueError("Step size must be positive.") From 79a91cca956b99acf5e4bd785ff0640c9e591b89 Mon Sep 17 00:00:00 2001 From: Christian Clauss Date: Sun, 15 Oct 2023 16:57:08 +0200 Subject: [PATCH 1127/1543] Fix typo in filename: ciphers/trifid_cipher.py (#10516) * Update and rename trafid_cipher.py to trifid_cipher.py * updating DIRECTORY.md --------- Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- DIRECTORY.md | 14 +++++++++++++- ciphers/{trafid_cipher.py => trifid_cipher.py} | 0 2 files changed, 13 insertions(+), 1 deletion(-) rename ciphers/{trafid_cipher.py => trifid_cipher.py} (100%) diff --git a/DIRECTORY.md b/DIRECTORY.md index 2c6000c94ed4..ceee9972dd97 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -63,7 +63,9 @@ ## Boolean Algebra * [And Gate](boolean_algebra/and_gate.py) + * [Imply Gate](boolean_algebra/imply_gate.py) * [Nand Gate](boolean_algebra/nand_gate.py) + * [Nimply Gate](boolean_algebra/nimply_gate.py) * [Nor Gate](boolean_algebra/nor_gate.py) * [Not Gate](boolean_algebra/not_gate.py) * [Or Gate](boolean_algebra/or_gate.py) @@ -119,9 +121,9 @@ * [Shuffled Shift Cipher](ciphers/shuffled_shift_cipher.py) * [Simple Keyword Cypher](ciphers/simple_keyword_cypher.py) * [Simple Substitution Cipher](ciphers/simple_substitution_cipher.py) - * [Trafid Cipher](ciphers/trafid_cipher.py) * [Transposition Cipher](ciphers/transposition_cipher.py) * [Transposition Cipher Encrypt Decrypt File](ciphers/transposition_cipher_encrypt_decrypt_file.py) + * [Trifid Cipher](ciphers/trifid_cipher.py) * [Vigenere Cipher](ciphers/vigenere_cipher.py) * [Xor Cipher](ciphers/xor_cipher.py) @@ -174,7 +176,9 @@ ## Data Structures * Arrays * [Equilibrium Index In Array](data_structures/arrays/equilibrium_index_in_array.py) + * [Find Triplets With 0 Sum](data_structures/arrays/find_triplets_with_0_sum.py) * [Median Two Array](data_structures/arrays/median_two_array.py) + * [Pairs With Given Sum](data_structures/arrays/pairs_with_given_sum.py) * [Permutations](data_structures/arrays/permutations.py) * [Prefix Sum](data_structures/arrays/prefix_sum.py) * [Product Sum](data_structures/arrays/product_sum.py) @@ -385,6 +389,7 @@ ## Financial * [Equated Monthly Installments](financial/equated_monthly_installments.py) + * [Exponential Moving Average](financial/exponential_moving_average.py) * [Interest](financial/interest.py) * [Present Value](financial/present_value.py) * [Price Plus Tax](financial/price_plus_tax.py) @@ -670,6 +675,7 @@ * [Radians](maths/radians.py) * [Radix2 Fft](maths/radix2_fft.py) * [Remove Digit](maths/remove_digit.py) + * [Rkf45](maths/rkf45.py) * [Runge Kutta](maths/runge_kutta.py) * [Segmented Sieve](maths/segmented_sieve.py) * Series @@ -688,6 +694,7 @@ * [Sin](maths/sin.py) * [Sock Merchant](maths/sock_merchant.py) * [Softmax](maths/softmax.py) + * [Solovay Strassen Primality Test](maths/solovay_strassen_primality_test.py) * [Square Root](maths/square_root.py) * [Sum Of Arithmetic Series](maths/sum_of_arithmetic_series.py) * [Sum Of Digits](maths/sum_of_digits.py) @@ -728,6 +735,7 @@ * [Spiral Print](matrix/spiral_print.py) * Tests * [Test Matrix Operation](matrix/tests/test_matrix_operation.py) + * [Validate Sudoku Board](matrix/validate_sudoku_board.py) ## Networking Flow * [Ford Fulkerson](networking_flow/ford_fulkerson.py) @@ -803,6 +811,7 @@ * [Rms Speed Of Molecule](physics/rms_speed_of_molecule.py) * [Shear Stress](physics/shear_stress.py) * [Speed Of Sound](physics/speed_of_sound.py) + * [Speeds Of Gas Molecules](physics/speeds_of_gas_molecules.py) ## Project Euler * Problem 001 @@ -1106,6 +1115,7 @@ ## Scheduling * [First Come First Served](scheduling/first_come_first_served.py) * [Highest Response Ratio Next](scheduling/highest_response_ratio_next.py) + * [Job Sequence With Deadline](scheduling/job_sequence_with_deadline.py) * [Job Sequencing With Deadline](scheduling/job_sequencing_with_deadline.py) * [Multi Level Feedback Queue](scheduling/multi_level_feedback_queue.py) * [Non Preemptive Shortest Job First](scheduling/non_preemptive_shortest_job_first.py) @@ -1193,6 +1203,7 @@ * [Capitalize](strings/capitalize.py) * [Check Anagrams](strings/check_anagrams.py) * [Credit Card Validator](strings/credit_card_validator.py) + * [Damerau Levenshtein Distance](strings/damerau_levenshtein_distance.py) * [Detecting English Programmatically](strings/detecting_english_programmatically.py) * [Dna](strings/dna.py) * [Frequency Finder](strings/frequency_finder.py) @@ -1225,6 +1236,7 @@ * [String Switch Case](strings/string_switch_case.py) * [Strip](strings/strip.py) * [Text Justification](strings/text_justification.py) + * [Title](strings/title.py) * [Top K Frequent Words](strings/top_k_frequent_words.py) * [Upper](strings/upper.py) * [Wave](strings/wave.py) diff --git a/ciphers/trafid_cipher.py b/ciphers/trifid_cipher.py similarity index 100% rename from ciphers/trafid_cipher.py rename to ciphers/trifid_cipher.py From b5474ab68a0e1eea6bbfba445feca39db471c62f Mon Sep 17 00:00:00 2001 From: Rahul Jangra <106389897+leonado10000@users.noreply.github.com> Date: Sun, 15 Oct 2023 20:33:03 +0530 Subject: [PATCH 1128/1543] [ADD] : maths joint probabilty distribution (#10508) * Create joint_probability_distribution.py * Update joint_probability_distribution.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update joint_probability_distribution.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update joint_probability_distribution.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update joint_probability_distribution.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update joint_probability_distribution.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update joint_probability_distribution.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update joint_probability_distribution.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update joint_probability_distribution.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update joint_probability_distribution.py * Update joint_probability_distribution.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update joint_probability_distribution.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update joint_probability_distribution.py * Update joint_probability_distribution.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update maclaurin_series.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Revert changes to maclaurin_series.py * Revert changes to maclaurin_series.py * Update joint_probability_distribution.py * Update joint_probability_distribution.py * Update joint_probability_distribution.py --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- maths/joint_probability_distribution.py | 124 ++++++++++++++++++++++++ 1 file changed, 124 insertions(+) create mode 100644 maths/joint_probability_distribution.py diff --git a/maths/joint_probability_distribution.py b/maths/joint_probability_distribution.py new file mode 100644 index 000000000000..6fbcea40c358 --- /dev/null +++ b/maths/joint_probability_distribution.py @@ -0,0 +1,124 @@ +""" +Calculate joint probability distribution +https://en.wikipedia.org/wiki/Joint_probability_distribution +""" + + +def joint_probability_distribution( + x_values: list[int], + y_values: list[int], + x_probabilities: list[float], + y_probabilities: list[float], +) -> dict: + """ + >>> joint_distribution = joint_probability_distribution( + ... [1, 2], [-2, 5, 8], [0.7, 0.3], [0.3, 0.5, 0.2] + ... ) + >>> from math import isclose + >>> isclose(joint_distribution.pop((1, 8)), 0.14) + True + >>> joint_distribution + {(1, -2): 0.21, (1, 5): 0.35, (2, -2): 0.09, (2, 5): 0.15, (2, 8): 0.06} + """ + return { + (x, y): x_prob * y_prob + for x, x_prob in zip(x_values, x_probabilities) + for y, y_prob in zip(y_values, y_probabilities) + } + + +# Function to calculate the expectation (mean) +def expectation(values: list, probabilities: list) -> float: + """ + >>> from math import isclose + >>> isclose(expectation([1, 2], [0.7, 0.3]), 1.3) + True + """ + return sum(x * p for x, p in zip(values, probabilities)) + + +# Function to calculate the variance +def variance(values: list[int], probabilities: list[float]) -> float: + """ + >>> from math import isclose + >>> isclose(variance([1,2],[0.7,0.3]), 0.21) + True + """ + mean = expectation(values, probabilities) + return sum((x - mean) ** 2 * p for x, p in zip(values, probabilities)) + + +# Function to calculate the covariance +def covariance( + x_values: list[int], + y_values: list[int], + x_probabilities: list[float], + y_probabilities: list[float], +) -> float: + """ + >>> covariance([1, 2], [-2, 5, 8], [0.7, 0.3], [0.3, 0.5, 0.2]) + -2.7755575615628914e-17 + """ + mean_x = expectation(x_values, x_probabilities) + mean_y = expectation(y_values, y_probabilities) + return sum( + (x - mean_x) * (y - mean_y) * px * py + for x, px in zip(x_values, x_probabilities) + for y, py in zip(y_values, y_probabilities) + ) + + +# Function to calculate the standard deviation +def standard_deviation(variance: float) -> float: + """ + >>> standard_deviation(0.21) + 0.458257569495584 + """ + return variance**0.5 + + +if __name__ == "__main__": + from doctest import testmod + + testmod() + # Input values for X and Y + x_vals = input("Enter values of X separated by spaces: ").split() + y_vals = input("Enter values of Y separated by spaces: ").split() + + # Convert input values to integers + x_values = [int(x) for x in x_vals] + y_values = [int(y) for y in y_vals] + + # Input probabilities for X and Y + x_probs = input("Enter probabilities for X separated by spaces: ").split() + y_probs = input("Enter probabilities for Y separated by spaces: ").split() + assert len(x_values) == len(x_probs) + assert len(y_values) == len(y_probs) + + # Convert input probabilities to floats + x_probabilities = [float(p) for p in x_probs] + y_probabilities = [float(p) for p in y_probs] + + # Calculate the joint probability distribution + jpd = joint_probability_distribution( + x_values, y_values, x_probabilities, y_probabilities + ) + + # Print the joint probability distribution + print( + "\n".join( + f"P(X={x}, Y={y}) = {probability}" for (x, y), probability in jpd.items() + ) + ) + mean_xy = expectation( + [x * y for x in x_values for y in y_values], + [px * py for px in x_probabilities for py in y_probabilities], + ) + print(f"x mean: {expectation(x_values, x_probabilities) = }") + print(f"y mean: {expectation(y_values, y_probabilities) = }") + print(f"xy mean: {mean_xy}") + print(f"x: {variance(x_values, x_probabilities) = }") + print(f"y: {variance(y_values, y_probabilities) = }") + print(f"{covariance(x_values, y_values, x_probabilities, y_probabilities) = }") + print(f"x: {standard_deviation(variance(x_values, x_probabilities)) = }") + print(f"y: {standard_deviation(variance(y_values, y_probabilities)) = }") From 755659a62f2c976e1e359a4c0af576b2aa8843a8 Mon Sep 17 00:00:00 2001 From: Tianyi Zheng Date: Sun, 15 Oct 2023 11:16:56 -0400 Subject: [PATCH 1129/1543] Omit `project_euler/` from coverage reports (#10469) * Omit project_euler/ and scripts/ from coverage reports * Add scripts/ back into coverage reports --- pyproject.toml | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index fe5f2f09c4ec..9c9262d77748 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -128,7 +128,10 @@ addopts = [ ] [tool.coverage.report] -omit = [".env/*"] +omit = [ + ".env/*", + "project_euler/*" +] sort = "Cover" [tool.codespell] From 52040a7bf1795e32cbf3863729c010aa55020063 Mon Sep 17 00:00:00 2001 From: Aroson <74296409+Aroson1@users.noreply.github.com> Date: Sun, 15 Oct 2023 21:05:02 +0530 Subject: [PATCH 1130/1543] Added 555 timer duty cycle and freqency in astable mode. (#10456) * Add files via upload * Update wheatstone_bridge.py * Update wheatstone_bridge.py * Create IC_555_Timer.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update IC_555_Timer.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update IC_555_Timer.py * Update and rename IC_555_Timer.py to ic_555_timer.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update ic_555_timer.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update ic_555_timer.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update ic_555_timer.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Cleanup ic_555_timer.py --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- electronics/ic_555_timer.py | 75 +++++++++++++++++++++++++++++++++++++ 1 file changed, 75 insertions(+) create mode 100644 electronics/ic_555_timer.py diff --git a/electronics/ic_555_timer.py b/electronics/ic_555_timer.py new file mode 100644 index 000000000000..e187e1928dca --- /dev/null +++ b/electronics/ic_555_timer.py @@ -0,0 +1,75 @@ +from __future__ import annotations + +""" + Calculate the frequency and/or duty cycle of an astable 555 timer. + * https://en.wikipedia.org/wiki/555_timer_IC#Astable + + These functions take in the value of the external resistances (in ohms) + and capacitance (in Microfarad), and calculates the following: + + ------------------------------------- + | Freq = 1.44 /[( R1+ 2 x R2) x C1] | ... in Hz + ------------------------------------- + where Freq is the frequency, + R1 is the first resistance in ohms, + R2 is the second resistance in ohms, + C1 is the capacitance in Microfarads. + + ------------------------------------------------ + | Duty Cycle = (R1 + R2) / (R1 + 2 x R2) x 100 | ... in % + ------------------------------------------------ + where R1 is the first resistance in ohms, + R2 is the second resistance in ohms. +""" + + +def astable_frequency( + resistance_1: float, resistance_2: float, capacitance: float +) -> float: + """ + Usage examples: + >>> astable_frequency(resistance_1=45, resistance_2=45, capacitance=7) + 1523.8095238095239 + >>> astable_frequency(resistance_1=356, resistance_2=234, capacitance=976) + 1.7905459175553078 + >>> astable_frequency(resistance_1=2, resistance_2=-1, capacitance=2) + Traceback (most recent call last): + ... + ValueError: All values must be positive + >>> astable_frequency(resistance_1=45, resistance_2=45, capacitance=0) + Traceback (most recent call last): + ... + ValueError: All values must be positive + """ + + if resistance_1 <= 0 or resistance_2 <= 0 or capacitance <= 0: + raise ValueError("All values must be positive") + return (1.44 / ((resistance_1 + 2 * resistance_2) * capacitance)) * 10**6 + + +def astable_duty_cycle(resistance_1: float, resistance_2: float) -> float: + """ + Usage examples: + >>> astable_duty_cycle(resistance_1=45, resistance_2=45) + 66.66666666666666 + >>> astable_duty_cycle(resistance_1=356, resistance_2=234) + 71.60194174757282 + >>> astable_duty_cycle(resistance_1=2, resistance_2=-1) + Traceback (most recent call last): + ... + ValueError: All values must be positive + >>> astable_duty_cycle(resistance_1=0, resistance_2=0) + Traceback (most recent call last): + ... + ValueError: All values must be positive + """ + + if resistance_1 <= 0 or resistance_2 <= 0: + raise ValueError("All values must be positive") + return (resistance_1 + resistance_2) / (resistance_1 + 2 * resistance_2) * 100 + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From b2636d90b3fe697ff64a62b928edfbeccf216e8a Mon Sep 17 00:00:00 2001 From: K Anamithra Date: Sun, 15 Oct 2023 22:11:29 +0530 Subject: [PATCH 1131/1543] added implementing stack using two queues (#10076) * added implementing stack using two queues * Update Stack using two queues * Update stack_using_two_queues.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update stack_using_two_queues.py * Update stack_using_two_queues.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update stack_using_two_queues.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update stack_using_two_queues.py * Update stack_using_two_queues.py --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- .../stacks/stack_using_two_queues.py | 85 +++++++++++++++++++ 1 file changed, 85 insertions(+) create mode 100644 data_structures/stacks/stack_using_two_queues.py diff --git a/data_structures/stacks/stack_using_two_queues.py b/data_structures/stacks/stack_using_two_queues.py new file mode 100644 index 000000000000..4b73246a045c --- /dev/null +++ b/data_structures/stacks/stack_using_two_queues.py @@ -0,0 +1,85 @@ +from __future__ import annotations + +from collections import deque +from dataclasses import dataclass, field + + +@dataclass +class StackWithQueues: + """ + https://www.geeksforgeeks.org/implement-stack-using-queue/ + + >>> stack = StackWithQueues() + >>> stack.push(1) + >>> stack.push(2) + >>> stack.push(3) + >>> stack.peek() + 3 + >>> stack.pop() + 3 + >>> stack.peek() + 2 + >>> stack.pop() + 2 + >>> stack.pop() + 1 + >>> stack.peek() is None + True + >>> stack.pop() + Traceback (most recent call last): + ... + IndexError: pop from an empty deque + """ + + main_queue: deque[int] = field(default_factory=deque) + temp_queue: deque[int] = field(default_factory=deque) + + def push(self, item: int) -> None: + self.temp_queue.append(item) + while self.main_queue: + self.temp_queue.append(self.main_queue.popleft()) + self.main_queue, self.temp_queue = self.temp_queue, self.main_queue + + def pop(self) -> int: + return self.main_queue.popleft() + + def peek(self) -> int | None: + return self.main_queue[0] if self.main_queue else None + + +if __name__ == "__main__": + import doctest + + doctest.testmod() + + stack: StackWithQueues | None = StackWithQueues() + while stack: + print("\nChoose operation:") + print("1. Push") + print("2. Pop") + print("3. Peek") + print("4. Quit") + + choice = input("Enter choice (1/2/3/4): ") + + if choice == "1": + element = int(input("Enter an integer to push: ").strip()) + stack.push(element) + print(f"{element} pushed onto the stack.") + elif choice == "2": + popped_element = stack.pop() + if popped_element is not None: + print(f"Popped element: {popped_element}") + else: + print("Stack is empty.") + elif choice == "3": + peeked_element = stack.peek() + if peeked_element is not None: + print(f"Top element: {peeked_element}") + else: + print("Stack is empty.") + elif choice == "4": + del stack + stack = None + else: + print("Invalid choice. Please try again.") From 68e6d5ad7e9af8929a22a889b1182706abbfcb50 Mon Sep 17 00:00:00 2001 From: Christian Clauss Date: Sun, 15 Oct 2023 19:11:05 +0200 Subject: [PATCH 1132/1543] validate_solutions.py: os.getenv('GITHUB_TOKEN', '') (#10546) * validate_solutions.py: os.getenv('GITHUB_TOKEN', '') @tianyizheng02 * updating DIRECTORY.md * f this --------- Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- DIRECTORY.md | 4 +++- scripts/validate_solutions.py | 2 +- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/DIRECTORY.md b/DIRECTORY.md index ceee9972dd97..6213f26b6d93 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -373,6 +373,7 @@ * [Electric Conductivity](electronics/electric_conductivity.py) * [Electric Power](electronics/electric_power.py) * [Electrical Impedance](electronics/electrical_impedance.py) + * [Ic 555 Timer](electronics/ic_555_timer.py) * [Ind Reactance](electronics/ind_reactance.py) * [Ohms Law](electronics/ohms_law.py) * [Real And Reactive Power](electronics/real_and_reactive_power.py) @@ -622,6 +623,7 @@ * [Is Ip V4 Address Valid](maths/is_ip_v4_address_valid.py) * [Is Square Free](maths/is_square_free.py) * [Jaccard Similarity](maths/jaccard_similarity.py) + * [Joint Probability Distribution](maths/joint_probability_distribution.py) * [Juggler Sequence](maths/juggler_sequence.py) * [Karatsuba](maths/karatsuba.py) * [Krishnamurthy Number](maths/krishnamurthy_number.py) @@ -675,8 +677,8 @@ * [Radians](maths/radians.py) * [Radix2 Fft](maths/radix2_fft.py) * [Remove Digit](maths/remove_digit.py) - * [Rkf45](maths/rkf45.py) * [Runge Kutta](maths/runge_kutta.py) + * [Runge Kutta Fehlberg 45](maths/runge_kutta_fehlberg_45.py) * [Segmented Sieve](maths/segmented_sieve.py) * Series * [Arithmetic](maths/series/arithmetic.py) diff --git a/scripts/validate_solutions.py b/scripts/validate_solutions.py index ca4af5261a8f..f27ec9ca60aa 100755 --- a/scripts/validate_solutions.py +++ b/scripts/validate_solutions.py @@ -55,7 +55,7 @@ def added_solution_file_path() -> list[pathlib.Path]: solution_file_paths = [] headers = { "Accept": "application/vnd.github.v3+json", - "Authorization": "token " + os.environ["GITHUB_TOKEN"], + "Authorization": f"token {os.getenv('GITHUB_TOKEN', '')}", } files = requests.get(get_files_url(), headers=headers).json() for file in files: From 7bdd1cd2beadf494685d1da63fb410343290de98 Mon Sep 17 00:00:00 2001 From: Barun Parua <76466796+Baron105@users.noreply.github.com> Date: Sun, 15 Oct 2023 22:43:40 +0530 Subject: [PATCH 1133/1543] updated physics/archimedes_principle.py (#10479) * avg and mps speed formulae added * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * avg and mps speed formulae added * fixed_spacing * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * spacing * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * ws * added amicable numbers * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * added amicable numbers * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * spacing * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * removed * changed name of file and added code improvements * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * issues fixed due to pi * requested changes added * added some doctests for exception handling, imported g from scipy and allowed zero gravity * removed_scipy_import * Update and rename archimedes_principle.py to archimedes_principle_of_buoyant_force.py * Update archimedes_principle_of_buoyant_force.py --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- physics/archimedes_principle.py | 49 --------------- .../archimedes_principle_of_buoyant_force.py | 63 +++++++++++++++++++ 2 files changed, 63 insertions(+), 49 deletions(-) delete mode 100644 physics/archimedes_principle.py create mode 100644 physics/archimedes_principle_of_buoyant_force.py diff --git a/physics/archimedes_principle.py b/physics/archimedes_principle.py deleted file mode 100644 index 6ecfc65e7461..000000000000 --- a/physics/archimedes_principle.py +++ /dev/null @@ -1,49 +0,0 @@ -""" -Calculates buoyant force on object submerged within static fluid. -Discovered by greek mathematician, Archimedes. The principle is named after him. - -Equation for calculating buoyant force: -Fb = ρ * V * g - -Source: -- https://en.wikipedia.org/wiki/Archimedes%27_principle -""" - - -# Acceleration Constant on Earth (unit m/s^2) -g = 9.80665 - - -def archimedes_principle( - fluid_density: float, volume: float, gravity: float = g -) -> float: - """ - Args: - fluid_density: density of fluid (kg/m^3) - volume: volume of object / liquid being displaced by object - gravity: Acceleration from gravity. Gravitational force on system, - Default is Earth Gravity - returns: - buoyant force on object in Newtons - - >>> archimedes_principle(fluid_density=997, volume=0.5, gravity=9.8) - 4885.3 - >>> archimedes_principle(fluid_density=997, volume=0.7) - 6844.061035 - """ - - if fluid_density <= 0: - raise ValueError("Impossible fluid density") - if volume < 0: - raise ValueError("Impossible Object volume") - if gravity <= 0: - raise ValueError("Impossible Gravity") - - return fluid_density * gravity * volume - - -if __name__ == "__main__": - import doctest - - # run doctest - doctest.testmod() diff --git a/physics/archimedes_principle_of_buoyant_force.py b/physics/archimedes_principle_of_buoyant_force.py new file mode 100644 index 000000000000..5f569837220f --- /dev/null +++ b/physics/archimedes_principle_of_buoyant_force.py @@ -0,0 +1,63 @@ +""" +Calculate the buoyant force of any body completely or partially submerged in a static +fluid. This principle was discovered by the Greek mathematician Archimedes. + +Equation for calculating buoyant force: +Fb = ρ * V * g + +https://en.wikipedia.org/wiki/Archimedes%27_principle +""" + + +# Acceleration Constant on Earth (unit m/s^2) +g = 9.80665 # Also available in scipy.constants.g + + +def archimedes_principle( + fluid_density: float, volume: float, gravity: float = g +) -> float: + """ + Args: + fluid_density: density of fluid (kg/m^3) + volume: volume of object/liquid being displaced by the object (m^3) + gravity: Acceleration from gravity. Gravitational force on the system, + The default is Earth Gravity + returns: + the buoyant force on an object in Newtons + + >>> archimedes_principle(fluid_density=500, volume=4, gravity=9.8) + 19600.0 + >>> archimedes_principle(fluid_density=997, volume=0.5, gravity=9.8) + 4885.3 + >>> archimedes_principle(fluid_density=997, volume=0.7) + 6844.061035 + >>> archimedes_principle(fluid_density=997, volume=-0.7) + Traceback (most recent call last): + ... + ValueError: Impossible object volume + >>> archimedes_principle(fluid_density=0, volume=0.7) + Traceback (most recent call last): + ... + ValueError: Impossible fluid density + >>> archimedes_principle(fluid_density=997, volume=0.7, gravity=0) + 0.0 + >>> archimedes_principle(fluid_density=997, volume=0.7, gravity=-9.8) + Traceback (most recent call last): + ... + ValueError: Impossible gravity + """ + + if fluid_density <= 0: + raise ValueError("Impossible fluid density") + if volume <= 0: + raise ValueError("Impossible object volume") + if gravity < 0: + raise ValueError("Impossible gravity") + + return fluid_density * gravity * volume + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From 89d12dfe99d51f7df983ddbc6b0c93e1130fc47b Mon Sep 17 00:00:00 2001 From: Kosuri L Indu <118645569+kosuri-indu@users.noreply.github.com> Date: Mon, 16 Oct 2023 00:57:47 +0530 Subject: [PATCH 1134/1543] [Add] : Wildcard Matching program under DYNAMIC PROGRAMMING (#10403) * To add wildcard_matching.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * changes for doctest errors * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * code changes * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- dynamic_programming/wildcard_matching.py | 62 ++++++++++++++++++++++++ 1 file changed, 62 insertions(+) create mode 100644 dynamic_programming/wildcard_matching.py diff --git a/dynamic_programming/wildcard_matching.py b/dynamic_programming/wildcard_matching.py new file mode 100644 index 000000000000..4ffc4b5d46aa --- /dev/null +++ b/dynamic_programming/wildcard_matching.py @@ -0,0 +1,62 @@ +""" +Given two strings, an input string and a pattern, +this program checks if the input string matches the pattern. + +Example : +input_string = "baaabab" +pattern = "*****ba*****ab" +Output: True + +This problem can be solved using the concept of "DYNAMIC PROGRAMMING". + +We create a 2D boolean matrix, where each entry match_matrix[i][j] is True +if the first i characters in input_string match the first j characters +of pattern. We initialize the first row and first column based on specific +rules, then fill up the rest of the matrix using a bottom-up dynamic +programming approach. + +The amount of match that will be determined is equal to match_matrix[n][m] +where n and m are lengths of the input_string and pattern respectively. + +""" + + +def is_pattern_match(input_string: str, pattern: str) -> bool: + """ + >>> is_pattern_match('baaabab','*****ba*****ba') + False + >>> is_pattern_match('baaabab','*****ba*****ab') + True + >>> is_pattern_match('aa','*') + True + """ + + input_length = len(input_string) + pattern_length = len(pattern) + + match_matrix = [[False] * (pattern_length + 1) for _ in range(input_length + 1)] + + match_matrix[0][0] = True + + for j in range(1, pattern_length + 1): + if pattern[j - 1] == "*": + match_matrix[0][j] = match_matrix[0][j - 1] + + for i in range(1, input_length + 1): + for j in range(1, pattern_length + 1): + if pattern[j - 1] in ("?", input_string[i - 1]): + match_matrix[i][j] = match_matrix[i - 1][j - 1] + elif pattern[j - 1] == "*": + match_matrix[i][j] = match_matrix[i - 1][j] or match_matrix[i][j - 1] + else: + match_matrix[i][j] = False + + return match_matrix[input_length][pattern_length] + + +if __name__ == "__main__": + import doctest + + doctest.testmod() + + print(f"{is_pattern_match('baaabab','*****ba*****ab')}") From 4004b862d583a32cb1a809c4ea54d87635a273eb Mon Sep 17 00:00:00 2001 From: Christian Clauss Date: Sun, 15 Oct 2023 21:40:13 +0200 Subject: [PATCH 1135/1543] Revert "validate_solutions.py: os.getenv('GITHUB_TOKEN', '')" (#10552) * Revert "validate_solutions.py: os.getenv('GITHUB_TOKEN', '') (#10546)" This reverts commit 68e6d5ad7e9af8929a22a889b1182706abbfcb50. * updating DIRECTORY.md --------- Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- DIRECTORY.md | 4 +++- scripts/validate_solutions.py | 2 +- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/DIRECTORY.md b/DIRECTORY.md index 6213f26b6d93..5c63e6316547 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -265,6 +265,7 @@ * [Postfix Evaluation](data_structures/stacks/postfix_evaluation.py) * [Prefix Evaluation](data_structures/stacks/prefix_evaluation.py) * [Stack](data_structures/stacks/stack.py) + * [Stack Using Two Queues](data_structures/stacks/stack_using_two_queues.py) * [Stack With Doubly Linked List](data_structures/stacks/stack_with_doubly_linked_list.py) * [Stack With Singly Linked List](data_structures/stacks/stack_with_singly_linked_list.py) * [Stock Span Problem](data_structures/stacks/stock_span_problem.py) @@ -361,6 +362,7 @@ * [Trapped Water](dynamic_programming/trapped_water.py) * [Tribonacci](dynamic_programming/tribonacci.py) * [Viterbi](dynamic_programming/viterbi.py) + * [Wildcard Matching](dynamic_programming/wildcard_matching.py) * [Word Break](dynamic_programming/word_break.py) ## Electronics @@ -791,7 +793,7 @@ ## Physics * [Altitude Pressure](physics/altitude_pressure.py) - * [Archimedes Principle](physics/archimedes_principle.py) + * [Archimedes Principle Of Buoyant Force](physics/archimedes_principle_of_buoyant_force.py) * [Basic Orbital Capture](physics/basic_orbital_capture.py) * [Casimir Effect](physics/casimir_effect.py) * [Centripetal Force](physics/centripetal_force.py) diff --git a/scripts/validate_solutions.py b/scripts/validate_solutions.py index f27ec9ca60aa..ca4af5261a8f 100755 --- a/scripts/validate_solutions.py +++ b/scripts/validate_solutions.py @@ -55,7 +55,7 @@ def added_solution_file_path() -> list[pathlib.Path]: solution_file_paths = [] headers = { "Accept": "application/vnd.github.v3+json", - "Authorization": f"token {os.getenv('GITHUB_TOKEN', '')}", + "Authorization": "token " + os.environ["GITHUB_TOKEN"], } files = requests.get(get_files_url(), headers=headers).json() for file in files: From 902278f656b38ed68e148cf8c9ac2cbd10fcfb7e Mon Sep 17 00:00:00 2001 From: Aasheesh <126905285+AasheeshLikePanner@users.noreply.github.com> Date: Mon, 16 Oct 2023 01:26:02 +0530 Subject: [PATCH 1136/1543] Changes the code To return the list in dynamic_programming/subset_generation.py (#10191) * Changing the code to return tuple * Changing the code to return tuple * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update dynamic_programming/subset_generation.py Co-authored-by: Christian Clauss * Adding doctests in subset_generation.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update subset_generation.py * Update subset_generation.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update subset_generation.py * Update subset_generation.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update dynamic_programming/subset_generation.py Co-authored-by: Christian Clauss * Update stock_span_problem.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update subset_generation.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update subset_generation.py * Update subset_generation.py * Update subset_generation.py * Update subset_generation.py --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- dynamic_programming/subset_generation.py | 90 ++++++++++++++---------- 1 file changed, 53 insertions(+), 37 deletions(-) diff --git a/dynamic_programming/subset_generation.py b/dynamic_programming/subset_generation.py index 819fd8106def..1be412b9374d 100644 --- a/dynamic_programming/subset_generation.py +++ b/dynamic_programming/subset_generation.py @@ -1,44 +1,60 @@ -# Print all subset combinations of n element in given set of r element. - - -def combination_util(arr, n, r, index, data, i): +def subset_combinations(elements: list[int], n: int) -> list: """ - Current combination is ready to be printed, print it - arr[] ---> Input Array - data[] ---> Temporary array to store current combination - start & end ---> Staring and Ending indexes in arr[] - index ---> Current index in data[] - r ---> Size of a combination to be printed + Compute n-element combinations from a given list using dynamic programming. + Args: + elements: The list of elements from which combinations will be generated. + n: The number of elements in each combination. + Returns: + A list of tuples, each representing a combination of n elements. + >>> subset_combinations(elements=[10, 20, 30, 40], n=2) + [(10, 20), (10, 30), (10, 40), (20, 30), (20, 40), (30, 40)] + >>> subset_combinations(elements=[1, 2, 3], n=1) + [(1,), (2,), (3,)] + >>> subset_combinations(elements=[1, 2, 3], n=3) + [(1, 2, 3)] + >>> subset_combinations(elements=[42], n=1) + [(42,)] + >>> subset_combinations(elements=[6, 7, 8, 9], n=4) + [(6, 7, 8, 9)] + >>> subset_combinations(elements=[10, 20, 30, 40, 50], n=0) + [()] + >>> subset_combinations(elements=[1, 2, 3, 4], n=2) + [(1, 2), (1, 3), (1, 4), (2, 3), (2, 4), (3, 4)] + >>> subset_combinations(elements=[1, 'apple', 3.14], n=2) + [(1, 'apple'), (1, 3.14), ('apple', 3.14)] + >>> subset_combinations(elements=['single'], n=0) + [()] + >>> subset_combinations(elements=[], n=9) + [] + >>> from itertools import combinations + >>> all(subset_combinations(items, n) == list(combinations(items, n)) + ... for items, n in ( + ... ([10, 20, 30, 40], 2), ([1, 2, 3], 1), ([1, 2, 3], 3), ([42], 1), + ... ([6, 7, 8, 9], 4), ([10, 20, 30, 40, 50], 1), ([1, 2, 3, 4], 2), + ... ([1, 'apple', 3.14], 2), (['single'], 0), ([], 9))) + True """ - if index == r: - for j in range(r): - print(data[j], end=" ") - print(" ") - return - # When no more elements are there to put in data[] - if i >= n: - return - # current is included, put next at next location - data[index] = arr[i] - combination_util(arr, n, r, index + 1, data, i + 1) - # current is excluded, replace it with - # next (Note that i+1 is passed, but - # index is not changed) - combination_util(arr, n, r, index, data, i + 1) - # The main function that prints all combinations - # of size r in arr[] of size n. This function - # mainly uses combinationUtil() + r = len(elements) + if n > r: + return [] + + dp: list[list[tuple]] = [[] for _ in range(r + 1)] + dp[0].append(()) -def print_combination(arr, n, r): - # A temporary array to store all combination one by one - data = [0] * r - # Print all combination using temporary array 'data[]' - combination_util(arr, n, r, 0, data, 0) + for i in range(1, r + 1): + for j in range(i, 0, -1): + for prev_combination in dp[j - 1]: + dp[j].append(tuple(prev_combination) + (elements[i - 1],)) + + try: + return sorted(dp[n]) + except TypeError: + return dp[n] if __name__ == "__main__": - # Driver code to check the function above - arr = [10, 20, 30, 40, 50] - print_combination(arr, len(arr), 3) - # This code is contributed by Ambuj sahu + from doctest import testmod + + testmod() + print(f"{subset_combinations(elements=[10, 20, 30, 40], n=2) = }") From 3d6f3c41881da75653b804d7a5964ea90df9d2ad Mon Sep 17 00:00:00 2001 From: hollowcrust <72879387+hollowcrust@users.noreply.github.com> Date: Mon, 16 Oct 2023 04:13:27 +0800 Subject: [PATCH 1137/1543] Added data_structures/arrays/sparse_table.py (#10437) * Create sparse_table.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Descriptive names for variables * Fix ruff check error * Update sparse_table.py * Add comments, change variable names * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fix typo * Update sparse_table.py --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- data_structures/arrays/sparse_table.py | 94 ++++++++++++++++++++++++++ 1 file changed, 94 insertions(+) create mode 100644 data_structures/arrays/sparse_table.py diff --git a/data_structures/arrays/sparse_table.py b/data_structures/arrays/sparse_table.py new file mode 100644 index 000000000000..a15d5649e712 --- /dev/null +++ b/data_structures/arrays/sparse_table.py @@ -0,0 +1,94 @@ +""" + Sparse table is a data structure that allows answering range queries on + a static number list, i.e. the elements do not change throughout all the queries. + + The implementation below will solve the problem of Range Minimum Query: + Finding the minimum value of a subset [L..R] of a static number list. + + Overall time complexity: O(nlogn) + Overall space complexity: O(nlogn) + + Wikipedia link: https://en.wikipedia.org/wiki/Range_minimum_query +""" +from math import log2 + + +def build_sparse_table(number_list: list[int]) -> list[list[int]]: + """ + Precompute range minimum queries with power of two length and store the precomputed + values in a table. + + >>> build_sparse_table([8, 1, 0, 3, 4, 9, 3]) + [[8, 1, 0, 3, 4, 9, 3], [1, 0, 0, 3, 4, 3, 0], [0, 0, 0, 3, 0, 0, 0]] + >>> build_sparse_table([3, 1, 9]) + [[3, 1, 9], [1, 1, 0]] + >>> build_sparse_table([]) + Traceback (most recent call last): + ... + ValueError: empty number list not allowed + """ + if not number_list: + raise ValueError("empty number list not allowed") + + length = len(number_list) + # Initialise sparse_table -- sparse_table[j][i] represents the minimum value of the + # subset of length (2 ** j) of number_list, starting from index i. + + # smallest power of 2 subset length that fully covers number_list + row = int(log2(length)) + 1 + sparse_table = [[0 for i in range(length)] for j in range(row)] + + # minimum of subset of length 1 is that value itself + for i, value in enumerate(number_list): + sparse_table[0][i] = value + j = 1 + + # compute the minimum value for all intervals with size (2 ** j) + while (1 << j) <= length: + i = 0 + # while subset starting from i still have at least (2 ** j) elements + while (i + (1 << j) - 1) < length: + # split range [i, i + 2 ** j] and find minimum of 2 halves + sparse_table[j][i] = min( + sparse_table[j - 1][i + (1 << (j - 1))], sparse_table[j - 1][i] + ) + i += 1 + j += 1 + return sparse_table + + +def query(sparse_table: list[list[int]], left_bound: int, right_bound: int) -> int: + """ + >>> query(build_sparse_table([8, 1, 0, 3, 4, 9, 3]), 0, 4) + 0 + >>> query(build_sparse_table([8, 1, 0, 3, 4, 9, 3]), 4, 6) + 3 + >>> query(build_sparse_table([3, 1, 9]), 2, 2) + 9 + >>> query(build_sparse_table([3, 1, 9]), 0, 1) + 1 + >>> query(build_sparse_table([8, 1, 0, 3, 4, 9, 3]), 0, 11) + Traceback (most recent call last): + ... + IndexError: list index out of range + >>> query(build_sparse_table([]), 0, 0) + Traceback (most recent call last): + ... + ValueError: empty number list not allowed + """ + if left_bound < 0 or right_bound >= len(sparse_table[0]): + raise IndexError("list index out of range") + + # highest subset length of power of 2 that is within range [left_bound, right_bound] + j = int(log2(right_bound - left_bound + 1)) + + # minimum of 2 overlapping smaller subsets: + # [left_bound, left_bound + 2 ** j - 1] and [right_bound - 2 ** j + 1, right_bound] + return min(sparse_table[j][right_bound - (1 << j) + 1], sparse_table[j][left_bound]) + + +if __name__ == "__main__": + from doctest import testmod + + testmod() + print(f"{query(build_sparse_table([3, 1, 9]), 2, 2) = }") From ec952927baea776bcb0f35d282448d32f3721047 Mon Sep 17 00:00:00 2001 From: dhruvtrigotra <72982592+dhruvtrigotra@users.noreply.github.com> Date: Mon, 16 Oct 2023 02:11:39 +0530 Subject: [PATCH 1138/1543] charging_inductor (#10427) * charging_capacitor * charging_capacitor * Final edits * charging_inductor --------- Co-authored-by: Christian Clauss --- electronics/charging_inductor.py | 96 ++++++++++++++++++++++++++++++++ 1 file changed, 96 insertions(+) create mode 100644 electronics/charging_inductor.py diff --git a/electronics/charging_inductor.py b/electronics/charging_inductor.py new file mode 100644 index 000000000000..e5c0126c248a --- /dev/null +++ b/electronics/charging_inductor.py @@ -0,0 +1,96 @@ +# source - The ARRL Handbook for Radio Communications +# https://en.wikipedia.org/wiki/RL_circuit + +""" +Description +----------- +Inductor is a passive electronic device which stores energy but unlike capacitor, it +stores energy in its 'magnetic field' or 'magnetostatic field'. + +When inductor is connected to 'DC' current source nothing happens it just works like a +wire because it's real effect cannot be seen while 'DC' is connected, its not even +going to store energy. Inductor stores energy only when it is working on 'AC' current. + +Connecting a inductor in series with a resistor(when R = 0) to a 'AC' potential source, +from zero to a finite value causes a sudden voltage to induced in inductor which +opposes the current. which results in initially slowly current rise. However it would +cease if there is no further changes in current. With resistance zero current will never +stop rising. + +'Resistance(ohms) / Inductance(henrys)' is known as RL-timeconstant. It also represents +as τ (tau). While the charging of a inductor with a resistor results in +a exponential function. + +when inductor is connected across 'AC' potential source. It starts to store the energy +in its 'magnetic field'.with the help 'RL-time-constant' we can find current at any time +in inductor while it is charging. +""" +from math import exp # value of exp = 2.718281828459… + + +def charging_inductor( + source_voltage: float, # source_voltage should be in volts. + resistance: float, # resistance should be in ohms. + inductance: float, # inductance should be in henrys. + time: float, # time should in seconds. +) -> float: + """ + Find inductor current at any nth second after initiating its charging. + + Examples + -------- + >>> charging_inductor(source_voltage=5.8,resistance=1.5,inductance=2.3,time=2) + 2.817 + + >>> charging_inductor(source_voltage=8,resistance=5,inductance=3,time=2) + 1.543 + + >>> charging_inductor(source_voltage=8,resistance=5*pow(10,2),inductance=3,time=2) + 0.016 + + >>> charging_inductor(source_voltage=-8,resistance=100,inductance=15,time=12) + Traceback (most recent call last): + ... + ValueError: Source voltage must be positive. + + >>> charging_inductor(source_voltage=80,resistance=-15,inductance=100,time=5) + Traceback (most recent call last): + ... + ValueError: Resistance must be positive. + + >>> charging_inductor(source_voltage=12,resistance=200,inductance=-20,time=5) + Traceback (most recent call last): + ... + ValueError: Inductance must be positive. + + >>> charging_inductor(source_voltage=0,resistance=200,inductance=20,time=5) + Traceback (most recent call last): + ... + ValueError: Source voltage must be positive. + + >>> charging_inductor(source_voltage=10,resistance=0,inductance=20,time=5) + Traceback (most recent call last): + ... + ValueError: Resistance must be positive. + + >>> charging_inductor(source_voltage=15, resistance=25, inductance=0, time=5) + Traceback (most recent call last): + ... + ValueError: Inductance must be positive. + """ + + if source_voltage <= 0: + raise ValueError("Source voltage must be positive.") + if resistance <= 0: + raise ValueError("Resistance must be positive.") + if inductance <= 0: + raise ValueError("Inductance must be positive.") + return round( + source_voltage / resistance * (1 - exp((-time * resistance) / inductance)), 3 + ) + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From bcda3bf64ea20db11cb4b1b81536e2f05ee584fd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ab=C3=ADlio=20Azevedo?= Date: Sun, 15 Oct 2023 18:31:11 -0300 Subject: [PATCH 1139/1543] test: adding more tests to a star algorithm (#10397) * test: adding more tests to a star algorithm * Apply suggestions from code review * Update a_star.py --------- Co-authored-by: Tianyi Zheng --- graphs/a_star.py | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) diff --git a/graphs/a_star.py b/graphs/a_star.py index e8735179eab9..06da3b5cd863 100644 --- a/graphs/a_star.py +++ b/graphs/a_star.py @@ -16,6 +16,31 @@ def search( cost: int, heuristic: list[list[int]], ) -> tuple[list[list[int]], list[list[int]]]: + """ + Search for a path on a grid avoiding obstacles. + >>> grid = [[0, 1, 0, 0, 0, 0], + ... [0, 1, 0, 0, 0, 0], + ... [0, 1, 0, 0, 0, 0], + ... [0, 1, 0, 0, 1, 0], + ... [0, 0, 0, 0, 1, 0]] + >>> init = [0, 0] + >>> goal = [len(grid) - 1, len(grid[0]) - 1] + >>> cost = 1 + >>> heuristic = [[0] * len(grid[0]) for _ in range(len(grid))] + >>> heuristic = [[0 for row in range(len(grid[0]))] for col in range(len(grid))] + >>> for i in range(len(grid)): + ... for j in range(len(grid[0])): + ... heuristic[i][j] = abs(i - goal[0]) + abs(j - goal[1]) + ... if grid[i][j] == 1: + ... heuristic[i][j] = 99 + >>> path, action = search(grid, init, goal, cost, heuristic) + >>> path # doctest: +NORMALIZE_WHITESPACE + [[0, 0], [1, 0], [2, 0], [3, 0], [4, 0], [4, 1], [4, 2], [4, 3], [3, 3], + [2, 3], [2, 4], [2, 5], [3, 5], [4, 5]] + >>> action # doctest: +NORMALIZE_WHITESPACE + [[0, 0, 0, 0, 0, 0], [2, 0, 0, 0, 0, 0], [2, 0, 0, 0, 3, 3], + [2, 0, 0, 0, 0, 2], [2, 3, 3, 3, 0, 2]] + """ closed = [ [0 for col in range(len(grid[0]))] for row in range(len(grid)) ] # the reference grid From d00888de7629b093bcf750ae046318be1e9a1fa3 Mon Sep 17 00:00:00 2001 From: Jeel Gajera <83470656+JeelGajera@users.noreply.github.com> Date: Mon, 16 Oct 2023 03:19:53 +0530 Subject: [PATCH 1140/1543] feat: adding Apriori Algorithm (#10491) * feat: adding Apriori Algorithm * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fix: doctest, typo * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fix: type error, code refactore * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fix: refactore code * fix: doctest * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fix: E501, B007 * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fix: err * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fix: arg typ err * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fix: typo * fix: typo * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Replace generate_candidates() with itertools.combinations() * mypy * Update apriori_algorithm.py --------- Co-authored-by: Jeel Gajera Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- DIRECTORY.md | 1 + machine_learning/apriori_algorithm.py | 112 ++++++++++++++++++++++++++ 2 files changed, 113 insertions(+) create mode 100644 machine_learning/apriori_algorithm.py diff --git a/DIRECTORY.md b/DIRECTORY.md index 5c63e6316547..55781df03b91 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -554,6 +554,7 @@ * [Word Frequency Functions](machine_learning/word_frequency_functions.py) * [Xgboost Classifier](machine_learning/xgboost_classifier.py) * [Xgboost Regressor](machine_learning/xgboost_regressor.py) + * [Apriori Algorithm](machine_learning/apriori_algorithm.py) ## Maths * [Abs](maths/abs.py) diff --git a/machine_learning/apriori_algorithm.py b/machine_learning/apriori_algorithm.py new file mode 100644 index 000000000000..d9fd1f82ea3c --- /dev/null +++ b/machine_learning/apriori_algorithm.py @@ -0,0 +1,112 @@ +""" +Apriori Algorithm is a Association rule mining technique, also known as market basket +analysis, aims to discover interesting relationships or associations among a set of +items in a transactional or relational database. + +For example, Apriori Algorithm states: "If a customer buys item A and item B, then they +are likely to buy item C." This rule suggests a relationship between items A, B, and C, +indicating that customers who purchased A and B are more likely to also purchase item C. + +WIKI: https://en.wikipedia.org/wiki/Apriori_algorithm +Examples: https://www.kaggle.com/code/earthian/apriori-association-rules-mining +""" +from itertools import combinations + + +def load_data() -> list[list[str]]: + """ + Returns a sample transaction dataset. + + >>> load_data() + [['milk'], ['milk', 'butter'], ['milk', 'bread'], ['milk', 'bread', 'chips']] + """ + return [["milk"], ["milk", "butter"], ["milk", "bread"], ["milk", "bread", "chips"]] + + +def prune(itemset: list, candidates: list, length: int) -> list: + """ + Prune candidate itemsets that are not frequent. + The goal of pruning is to filter out candidate itemsets that are not frequent. This + is done by checking if all the (k-1) subsets of a candidate itemset are present in + the frequent itemsets of the previous iteration (valid subsequences of the frequent + itemsets from the previous iteration). + + Prunes candidate itemsets that are not frequent. + + >>> itemset = ['X', 'Y', 'Z'] + >>> candidates = [['X', 'Y'], ['X', 'Z'], ['Y', 'Z']] + >>> prune(itemset, candidates, 2) + [['X', 'Y'], ['X', 'Z'], ['Y', 'Z']] + + >>> itemset = ['1', '2', '3', '4'] + >>> candidates = ['1', '2', '4'] + >>> prune(itemset, candidates, 3) + [] + """ + pruned = [] + for candidate in candidates: + is_subsequence = True + for item in candidate: + if item not in itemset or itemset.count(item) < length - 1: + is_subsequence = False + break + if is_subsequence: + pruned.append(candidate) + return pruned + + +def apriori(data: list[list[str]], min_support: int) -> list[tuple[list[str], int]]: + """ + Returns a list of frequent itemsets and their support counts. + + >>> data = [['A', 'B', 'C'], ['A', 'B'], ['A', 'C'], ['A', 'D'], ['B', 'C']] + >>> apriori(data, 2) + [(['A', 'B'], 1), (['A', 'C'], 2), (['B', 'C'], 2)] + + >>> data = [['1', '2', '3'], ['1', '2'], ['1', '3'], ['1', '4'], ['2', '3']] + >>> apriori(data, 3) + [] + """ + itemset = [list(transaction) for transaction in data] + frequent_itemsets = [] + length = 1 + + while itemset: + # Count itemset support + counts = [0] * len(itemset) + for transaction in data: + for j, candidate in enumerate(itemset): + if all(item in transaction for item in candidate): + counts[j] += 1 + + # Prune infrequent itemsets + itemset = [item for i, item in enumerate(itemset) if counts[i] >= min_support] + + # Append frequent itemsets (as a list to maintain order) + for i, item in enumerate(itemset): + frequent_itemsets.append((sorted(item), counts[i])) + + length += 1 + itemset = prune(itemset, list(combinations(itemset, length)), length) + + return frequent_itemsets + + +if __name__ == "__main__": + """ + Apriori algorithm for finding frequent itemsets. + + Args: + data: A list of transactions, where each transaction is a list of items. + min_support: The minimum support threshold for frequent itemsets. + + Returns: + A list of frequent itemsets along with their support counts. + """ + import doctest + + doctest.testmod() + + # user-defined threshold or minimum support level + frequent_itemsets = apriori(data=load_data(), min_support=2) + print("\n".join(f"{itemset}: {support}" for itemset, support in frequent_itemsets)) From e6aae1cf66b7e962b886255703b5802d58f27fd3 Mon Sep 17 00:00:00 2001 From: Pooja Sharma <75516191+Shailaputri@users.noreply.github.com> Date: Mon, 16 Oct 2023 05:02:45 +0530 Subject: [PATCH 1141/1543] Dynamic programming/matrix chain multiplication (#10562) * updating DIRECTORY.md * spell changes * updating DIRECTORY.md * real world applications * updating DIRECTORY.md * Update matrix_chain_multiplication.py Add a non-dp solution with benchmarks. * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update matrix_chain_multiplication.py * Update matrix_chain_multiplication.py * Update matrix_chain_multiplication.py --------- Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> Co-authored-by: Pooja Sharma Co-authored-by: Christian Clauss Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- DIRECTORY.md | 5 +- .../matrix_chain_multiplication.py | 143 ++++++++++++++++++ 2 files changed, 147 insertions(+), 1 deletion(-) create mode 100644 dynamic_programming/matrix_chain_multiplication.py diff --git a/DIRECTORY.md b/DIRECTORY.md index 55781df03b91..cef1e06b78aa 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -182,6 +182,7 @@ * [Permutations](data_structures/arrays/permutations.py) * [Prefix Sum](data_structures/arrays/prefix_sum.py) * [Product Sum](data_structures/arrays/product_sum.py) + * [Sparse Table](data_structures/arrays/sparse_table.py) * Binary Tree * [Avl Tree](data_structures/binary_tree/avl_tree.py) * [Basic Binary Tree](data_structures/binary_tree/basic_binary_tree.py) @@ -340,6 +341,7 @@ * [Longest Increasing Subsequence O(Nlogn)](dynamic_programming/longest_increasing_subsequence_o(nlogn).py) * [Longest Palindromic Subsequence](dynamic_programming/longest_palindromic_subsequence.py) * [Longest Sub Array](dynamic_programming/longest_sub_array.py) + * [Matrix Chain Multiplication](dynamic_programming/matrix_chain_multiplication.py) * [Matrix Chain Order](dynamic_programming/matrix_chain_order.py) * [Max Non Adjacent Sum](dynamic_programming/max_non_adjacent_sum.py) * [Max Product Subarray](dynamic_programming/max_product_subarray.py) @@ -370,6 +372,7 @@ * [Builtin Voltage](electronics/builtin_voltage.py) * [Carrier Concentration](electronics/carrier_concentration.py) * [Charging Capacitor](electronics/charging_capacitor.py) + * [Charging Inductor](electronics/charging_inductor.py) * [Circular Convolution](electronics/circular_convolution.py) * [Coulombs Law](electronics/coulombs_law.py) * [Electric Conductivity](electronics/electric_conductivity.py) @@ -524,6 +527,7 @@ * [Simplex](linear_programming/simplex.py) ## Machine Learning + * [Apriori Algorithm](machine_learning/apriori_algorithm.py) * [Astar](machine_learning/astar.py) * [Data Transformations](machine_learning/data_transformations.py) * [Decision Tree](machine_learning/decision_tree.py) @@ -554,7 +558,6 @@ * [Word Frequency Functions](machine_learning/word_frequency_functions.py) * [Xgboost Classifier](machine_learning/xgboost_classifier.py) * [Xgboost Regressor](machine_learning/xgboost_regressor.py) - * [Apriori Algorithm](machine_learning/apriori_algorithm.py) ## Maths * [Abs](maths/abs.py) diff --git a/dynamic_programming/matrix_chain_multiplication.py b/dynamic_programming/matrix_chain_multiplication.py new file mode 100644 index 000000000000..084254a61f6c --- /dev/null +++ b/dynamic_programming/matrix_chain_multiplication.py @@ -0,0 +1,143 @@ +""" +Find the minimum number of multiplications needed to multiply chain of matrices. +Reference: https://www.geeksforgeeks.org/matrix-chain-multiplication-dp-8/ + +The algorithm has interesting real-world applications. Example: +1. Image transformations in Computer Graphics as images are composed of matrix. +2. Solve complex polynomial equations in the field of algebra using least processing + power. +3. Calculate overall impact of macroeconomic decisions as economic equations involve a + number of variables. +4. Self-driving car navigation can be made more accurate as matrix multiplication can + accurately determine position and orientation of obstacles in short time. + +Python doctests can be run with the following command: +python -m doctest -v matrix_chain_multiply.py + +Given a sequence arr[] that represents chain of 2D matrices such that the dimension of +the ith matrix is arr[i-1]*arr[i]. +So suppose arr = [40, 20, 30, 10, 30] means we have 4 matrices of dimensions +40*20, 20*30, 30*10 and 10*30. + +matrix_chain_multiply() returns an integer denoting minimum number of multiplications to +multiply the chain. + +We do not need to perform actual multiplication here. +We only need to decide the order in which to perform the multiplication. + +Hints: +1. Number of multiplications (ie cost) to multiply 2 matrices +of size m*p and p*n is m*p*n. +2. Cost of matrix multiplication is associative ie (M1*M2)*M3 != M1*(M2*M3) +3. Matrix multiplication is not commutative. So, M1*M2 does not mean M2*M1 can be done. +4. To determine the required order, we can try different combinations. +So, this problem has overlapping sub-problems and can be solved using recursion. +We use Dynamic Programming for optimal time complexity. + +Example input: +arr = [40, 20, 30, 10, 30] +output: 26000 +""" +from collections.abc import Iterator +from contextlib import contextmanager +from functools import cache +from sys import maxsize + + +def matrix_chain_multiply(arr: list[int]) -> int: + """ + Find the minimum number of multiplcations required to multiply the chain of matrices + + Args: + arr: The input array of integers. + + Returns: + Minimum number of multiplications needed to multiply the chain + + Examples: + >>> matrix_chain_multiply([1, 2, 3, 4, 3]) + 30 + >>> matrix_chain_multiply([10]) + 0 + >>> matrix_chain_multiply([10, 20]) + 0 + >>> matrix_chain_multiply([19, 2, 19]) + 722 + >>> matrix_chain_multiply(list(range(1, 100))) + 323398 + + # >>> matrix_chain_multiply(list(range(1, 251))) + # 2626798 + """ + if len(arr) < 2: + return 0 + # initialising 2D dp matrix + n = len(arr) + dp = [[maxsize for j in range(n)] for i in range(n)] + # we want minimum cost of multiplication of matrices + # of dimension (i*k) and (k*j). This cost is arr[i-1]*arr[k]*arr[j]. + for i in range(n - 1, 0, -1): + for j in range(i, n): + if i == j: + dp[i][j] = 0 + continue + for k in range(i, j): + dp[i][j] = min( + dp[i][j], dp[i][k] + dp[k + 1][j] + arr[i - 1] * arr[k] * arr[j] + ) + + return dp[1][n - 1] + + +def matrix_chain_order(dims: list[int]) -> int: + """ + Source: https://en.wikipedia.org/wiki/Matrix_chain_multiplication + The dynamic programming solution is faster than cached the recursive solution and + can handle larger inputs. + >>> matrix_chain_order([1, 2, 3, 4, 3]) + 30 + >>> matrix_chain_order([10]) + 0 + >>> matrix_chain_order([10, 20]) + 0 + >>> matrix_chain_order([19, 2, 19]) + 722 + >>> matrix_chain_order(list(range(1, 100))) + 323398 + + # >>> matrix_chain_order(list(range(1, 251))) # Max before RecursionError is raised + # 2626798 + """ + + @cache + def a(i: int, j: int) -> int: + return min( + (a(i, k) + dims[i] * dims[k] * dims[j] + a(k, j) for k in range(i + 1, j)), + default=0, + ) + + return a(0, len(dims) - 1) + + +@contextmanager +def elapsed_time(msg: str) -> Iterator: + # print(f"Starting: {msg}") + from time import perf_counter_ns + + start = perf_counter_ns() + yield + print(f"Finished: {msg} in {(perf_counter_ns() - start) / 10 ** 9} seconds.") + + +if __name__ == "__main__": + import doctest + + doctest.testmod() + with elapsed_time("matrix_chain_order"): + print(f"{matrix_chain_order(list(range(1, 251))) = }") + with elapsed_time("matrix_chain_multiply"): + print(f"{matrix_chain_multiply(list(range(1, 251))) = }") + with elapsed_time("matrix_chain_order"): + print(f"{matrix_chain_order(list(range(1, 251))) = }") + with elapsed_time("matrix_chain_multiply"): + print(f"{matrix_chain_multiply(list(range(1, 251))) = }") From b6b45eb1cee564e3c563966244f124051c28b8e7 Mon Sep 17 00:00:00 2001 From: Tianyi Zheng Date: Sun, 15 Oct 2023 19:41:45 -0400 Subject: [PATCH 1142/1543] Fix numpy deprecation warning in `2_hidden_layers_neural_network.py` (#10424) * updating DIRECTORY.md * updating DIRECTORY.md * updating DIRECTORY.md * updating DIRECTORY.md * Fix deprecation warning in 2_hidden_layers_neural_network.py Fix numpy deprecation warning: DeprecationWarning: Conversion of an array with ndim > 0 to a scalar is deprecated, and will error in future. Ensure you extract a single element from your array before performing this operation. (Deprecated NumPy 1.25.) --------- Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- neural_network/2_hidden_layers_neural_network.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/neural_network/2_hidden_layers_neural_network.py b/neural_network/2_hidden_layers_neural_network.py index 9c5772326165..7b374a93d039 100644 --- a/neural_network/2_hidden_layers_neural_network.py +++ b/neural_network/2_hidden_layers_neural_network.py @@ -196,7 +196,7 @@ def predict(self, input_arr: numpy.ndarray) -> int: >>> output_val = numpy.array(([0], [1], [1]), dtype=float) >>> nn = TwoHiddenLayerNeuralNetwork(input_val, output_val) >>> nn.train(output_val, 1000, False) - >>> nn.predict([0,1,0]) in (0, 1) + >>> nn.predict([0, 1, 0]) in (0, 1) True """ @@ -221,7 +221,7 @@ def predict(self, input_arr: numpy.ndarray) -> int: ) ) - return int(self.layer_between_second_hidden_layer_and_output > 0.6) + return int((self.layer_between_second_hidden_layer_and_output > 0.6)[0]) def sigmoid(value: numpy.ndarray) -> numpy.ndarray: From 73ebf7bdb12f4bced39f25766ac4d2cd9b6ab525 Mon Sep 17 00:00:00 2001 From: Tianyi Zheng Date: Sun, 15 Oct 2023 19:42:55 -0400 Subject: [PATCH 1143/1543] Move and rename `maths/greedy_coin_change.py` (#10418) * updating DIRECTORY.md * updating DIRECTORY.md * updating DIRECTORY.md * updating DIRECTORY.md * Move greedy_coin_change.py to greedy_methods/ and rename file --------- Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- .../minimum_coin_change.py | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename maths/greedy_coin_change.py => greedy_methods/minimum_coin_change.py (100%) diff --git a/maths/greedy_coin_change.py b/greedy_methods/minimum_coin_change.py similarity index 100% rename from maths/greedy_coin_change.py rename to greedy_methods/minimum_coin_change.py From c2f14e8a78c1700a4101746a1a6e3d70be50aa07 Mon Sep 17 00:00:00 2001 From: Chris O <46587501+ChrisO345@users.noreply.github.com> Date: Mon, 16 Oct 2023 12:44:06 +1300 Subject: [PATCH 1144/1543] Add note to feature_request.yml about not opening issues for new algorithms (#10142) --- .github/ISSUE_TEMPLATE/feature_request.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/ISSUE_TEMPLATE/feature_request.yml b/.github/ISSUE_TEMPLATE/feature_request.yml index 09a159b2193e..20823bd58ab1 100644 --- a/.github/ISSUE_TEMPLATE/feature_request.yml +++ b/.github/ISSUE_TEMPLATE/feature_request.yml @@ -6,6 +6,7 @@ body: attributes: value: > Before requesting please search [existing issues](https://github.com/TheAlgorithms/Python/labels/enhancement). + Do not create issues to implement new algorithms as these will be closed. Usage questions such as "How do I...?" belong on the [Discord](https://discord.gg/c7MnfGFGa6) and will be closed. @@ -13,7 +14,6 @@ body: attributes: label: "Feature description" description: > - This could be new algorithms, data structures or improving any existing - implementations. + This could include new topics or improving any existing implementations. validations: required: true From bb8f194957c4308cbb0bf16a4e07acbe34d2087e Mon Sep 17 00:00:00 2001 From: Tianyi Zheng Date: Sun, 15 Oct 2023 20:01:01 -0400 Subject: [PATCH 1145/1543] Delete `texttable` from dependencies (#10565) * Disable unused dependencies Comment out dependencies in requirements.txt that are only used by currently-disabled files * Delete unused dependency --- requirements.txt | 1 - 1 file changed, 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 1e64818bbb6a..05d9f1e8c545 100644 --- a/requirements.txt +++ b/requirements.txt @@ -18,7 +18,6 @@ scikit-learn statsmodels sympy tensorflow ; python_version < '3.12' -texttable tweepy xgboost # yulewalker # uncomment once audio_filters/equal_loudness_filter.py is fixed From 1a26d76c60422030cf0c57c62623866d3f3229f2 Mon Sep 17 00:00:00 2001 From: "Gabrielly de S. Pinto Dantas" Date: Sun, 15 Oct 2023 21:44:10 -0300 Subject: [PATCH 1146/1543] add tests for tree_sort (#10015) * add tests for tree_sort * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update tree_sort.py --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- sorts/tree_sort.py | 99 +++++++++++++++++++++++++++------------------- 1 file changed, 58 insertions(+), 41 deletions(-) diff --git a/sorts/tree_sort.py b/sorts/tree_sort.py index 78c3e893e0ce..e63a3253ba19 100644 --- a/sorts/tree_sort.py +++ b/sorts/tree_sort.py @@ -1,53 +1,70 @@ """ Tree_sort algorithm. -Build a BST and in order traverse. +Build a Binary Search Tree and then iterate thru it to get a sorted list. """ +from __future__ import annotations +from collections.abc import Iterator +from dataclasses import dataclass + +@dataclass class Node: - # BST data structure - def __init__(self, val): - self.val = val - self.left = None - self.right = None - - def insert(self, val): - if self.val: - if val < self.val: - if self.left is None: - self.left = Node(val) - else: - self.left.insert(val) - elif val > self.val: - if self.right is None: - self.right = Node(val) - else: - self.right.insert(val) - else: - self.val = val - - -def inorder(root, res): - # Recursive traversal - if root: - inorder(root.left, res) - res.append(root.val) - inorder(root.right, res) - - -def tree_sort(arr): - # Build BST + val: int + left: Node | None = None + right: Node | None = None + + def __iter__(self) -> Iterator[int]: + if self.left: + yield from self.left + yield self.val + if self.right: + yield from self.right + + def __len__(self) -> int: + return sum(1 for _ in self) + + def insert(self, val: int) -> None: + if val < self.val: + if self.left is None: + self.left = Node(val) + else: + self.left.insert(val) + elif val > self.val: + if self.right is None: + self.right = Node(val) + else: + self.right.insert(val) + + +def tree_sort(arr: list[int]) -> tuple[int, ...]: + """ + >>> tree_sort([]) + () + >>> tree_sort((1,)) + (1,) + >>> tree_sort((1, 2)) + (1, 2) + >>> tree_sort([5, 2, 7]) + (2, 5, 7) + >>> tree_sort((5, -4, 9, 2, 7)) + (-4, 2, 5, 7, 9) + >>> tree_sort([5, 6, 1, -1, 4, 37, 2, 7]) + (-1, 1, 2, 4, 5, 6, 7, 37) + >>> tree_sort(range(10, -10, -1)) == tuple(sorted(range(10, -10, -1))) + True + """ if len(arr) == 0: - return arr + return tuple(arr) root = Node(arr[0]) - for i in range(1, len(arr)): - root.insert(arr[i]) - # Traverse BST in order. - res = [] - inorder(root, res) - return res + for item in arr[1:]: + root.insert(item) + return tuple(root) if __name__ == "__main__": - print(tree_sort([10, 1, 3, 2, 9, 14, 13])) + import doctest + + doctest.testmod() + print(f"{tree_sort([5, 6, 1, -1, 4, 37, -3, 7]) = }") From cc0405d05cb4c5009e8bf826e3f641c427ba70d5 Mon Sep 17 00:00:00 2001 From: Yousha Mahamuni <40205524+yousha806@users.noreply.github.com> Date: Mon, 16 Oct 2023 08:17:27 +0530 Subject: [PATCH 1147/1543] Update volume.py with volume of Icosahedron (#9628) * Update volume.py with volume of Icosahedron Added function to find volume of a regular Icosahedron * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Apply suggestions from code review * Update volume.py --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Tianyi Zheng --- maths/volume.py | 30 ++++++++++++++++++++++++++++++ 1 file changed, 30 insertions(+) diff --git a/maths/volume.py b/maths/volume.py index 721974e68b66..b4df4e475783 100644 --- a/maths/volume.py +++ b/maths/volume.py @@ -469,6 +469,35 @@ def vol_torus(torus_radius: float, tube_radius: float) -> float: return 2 * pow(pi, 2) * torus_radius * pow(tube_radius, 2) +def vol_icosahedron(tri_side: float) -> float: + """Calculate the Volume of an Icosahedron. + Wikipedia reference: https://en.wikipedia.org/wiki/Regular_icosahedron + + >>> from math import isclose + >>> isclose(vol_icosahedron(2.5), 34.088984228514256) + True + >>> isclose(vol_icosahedron(10), 2181.694990624912374) + True + >>> isclose(vol_icosahedron(5), 272.711873828114047) + True + >>> isclose(vol_icosahedron(3.49), 92.740688412033628) + True + >>> vol_icosahedron(0) + 0.0 + >>> vol_icosahedron(-1) + Traceback (most recent call last): + ... + ValueError: vol_icosahedron() only accepts non-negative values + >>> vol_icosahedron(-0.2) + Traceback (most recent call last): + ... + ValueError: vol_icosahedron() only accepts non-negative values + """ + if tri_side < 0: + raise ValueError("vol_icosahedron() only accepts non-negative values") + return tri_side**3 * (3 + 5**0.5) * 5 / 12 + + def main(): """Print the Results of Various Volume Calculations.""" print("Volumes:") @@ -489,6 +518,7 @@ def main(): print( f"Hollow Circular Cylinder: {vol_hollow_circular_cylinder(1, 2, 3) = }" ) # ~= 28.3 + print(f"Icosahedron: {vol_icosahedron(2.5) = }") # ~=34.09 if __name__ == "__main__": From f4ff73b1bdaa4349315beaf44e093c59f6c87fd3 Mon Sep 17 00:00:00 2001 From: Akshar Goyal Date: Mon, 16 Oct 2023 03:21:43 -0400 Subject: [PATCH 1148/1543] Converted tests into doctests (#10572) * Converted tests into doctests * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Removed commented code * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- boolean_algebra/and_gate.py | 18 +++--------------- boolean_algebra/imply_gate.py | 7 +++---- boolean_algebra/nand_gate.py | 17 +++-------------- boolean_algebra/nimply_gate.py | 7 +++---- boolean_algebra/not_gate.py | 13 +++---------- boolean_algebra/or_gate.py | 17 +++-------------- boolean_algebra/xnor_gate.py | 17 +++-------------- boolean_algebra/xor_gate.py | 15 +++------------ 8 files changed, 24 insertions(+), 87 deletions(-) diff --git a/boolean_algebra/and_gate.py b/boolean_algebra/and_gate.py index 834116772ee7..f0fd45c9f81e 100644 --- a/boolean_algebra/and_gate.py +++ b/boolean_algebra/and_gate.py @@ -32,19 +32,7 @@ def and_gate(input_1: int, input_2: int) -> int: return int((input_1, input_2).count(0) == 0) -def test_and_gate() -> None: - """ - Tests the and_gate function - """ - assert and_gate(0, 0) == 0 - assert and_gate(0, 1) == 0 - assert and_gate(1, 0) == 0 - assert and_gate(1, 1) == 1 - - if __name__ == "__main__": - test_and_gate() - print(and_gate(1, 0)) - print(and_gate(0, 0)) - print(and_gate(0, 1)) - print(and_gate(1, 1)) + import doctest + + doctest.testmod() diff --git a/boolean_algebra/imply_gate.py b/boolean_algebra/imply_gate.py index 151a7ad6439a..b64ebaceb306 100644 --- a/boolean_algebra/imply_gate.py +++ b/boolean_algebra/imply_gate.py @@ -34,7 +34,6 @@ def imply_gate(input_1: int, input_2: int) -> int: if __name__ == "__main__": - print(imply_gate(0, 0)) - print(imply_gate(0, 1)) - print(imply_gate(1, 0)) - print(imply_gate(1, 1)) + import doctest + + doctest.testmod() diff --git a/boolean_algebra/nand_gate.py b/boolean_algebra/nand_gate.py index ea3303d16b25..80f9d12db89a 100644 --- a/boolean_algebra/nand_gate.py +++ b/boolean_algebra/nand_gate.py @@ -30,18 +30,7 @@ def nand_gate(input_1: int, input_2: int) -> int: return int((input_1, input_2).count(0) != 0) -def test_nand_gate() -> None: - """ - Tests the nand_gate function - """ - assert nand_gate(0, 0) == 1 - assert nand_gate(0, 1) == 1 - assert nand_gate(1, 0) == 1 - assert nand_gate(1, 1) == 0 - - if __name__ == "__main__": - print(nand_gate(0, 0)) - print(nand_gate(0, 1)) - print(nand_gate(1, 0)) - print(nand_gate(1, 1)) + import doctest + + doctest.testmod() diff --git a/boolean_algebra/nimply_gate.py b/boolean_algebra/nimply_gate.py index 6e34332d9112..68e82c8db8d9 100644 --- a/boolean_algebra/nimply_gate.py +++ b/boolean_algebra/nimply_gate.py @@ -34,7 +34,6 @@ def nimply_gate(input_1: int, input_2: int) -> int: if __name__ == "__main__": - print(nimply_gate(0, 0)) - print(nimply_gate(0, 1)) - print(nimply_gate(1, 0)) - print(nimply_gate(1, 1)) + import doctest + + doctest.testmod() diff --git a/boolean_algebra/not_gate.py b/boolean_algebra/not_gate.py index eb85e9e44cd3..cfa74cf42204 100644 --- a/boolean_algebra/not_gate.py +++ b/boolean_algebra/not_gate.py @@ -24,14 +24,7 @@ def not_gate(input_1: int) -> int: return 1 if input_1 == 0 else 0 -def test_not_gate() -> None: - """ - Tests the not_gate function - """ - assert not_gate(0) == 1 - assert not_gate(1) == 0 - - if __name__ == "__main__": - print(not_gate(0)) - print(not_gate(1)) + import doctest + + doctest.testmod() diff --git a/boolean_algebra/or_gate.py b/boolean_algebra/or_gate.py index aa7e6645e33f..0fd4e5a5dc18 100644 --- a/boolean_algebra/or_gate.py +++ b/boolean_algebra/or_gate.py @@ -29,18 +29,7 @@ def or_gate(input_1: int, input_2: int) -> int: return int((input_1, input_2).count(1) != 0) -def test_or_gate() -> None: - """ - Tests the or_gate function - """ - assert or_gate(0, 0) == 0 - assert or_gate(0, 1) == 1 - assert or_gate(1, 0) == 1 - assert or_gate(1, 1) == 1 - - if __name__ == "__main__": - print(or_gate(0, 1)) - print(or_gate(1, 0)) - print(or_gate(0, 0)) - print(or_gate(1, 1)) + import doctest + + doctest.testmod() diff --git a/boolean_algebra/xnor_gate.py b/boolean_algebra/xnor_gate.py index 45ab2700ec35..05b756da2960 100644 --- a/boolean_algebra/xnor_gate.py +++ b/boolean_algebra/xnor_gate.py @@ -31,18 +31,7 @@ def xnor_gate(input_1: int, input_2: int) -> int: return 1 if input_1 == input_2 else 0 -def test_xnor_gate() -> None: - """ - Tests the xnor_gate function - """ - assert xnor_gate(0, 0) == 1 - assert xnor_gate(0, 1) == 0 - assert xnor_gate(1, 0) == 0 - assert xnor_gate(1, 1) == 1 - - if __name__ == "__main__": - print(xnor_gate(0, 0)) - print(xnor_gate(0, 1)) - print(xnor_gate(1, 0)) - print(xnor_gate(1, 1)) + import doctest + + doctest.testmod() diff --git a/boolean_algebra/xor_gate.py b/boolean_algebra/xor_gate.py index db4f5b45c3c6..f3922e426e3d 100644 --- a/boolean_algebra/xor_gate.py +++ b/boolean_algebra/xor_gate.py @@ -31,16 +31,7 @@ def xor_gate(input_1: int, input_2: int) -> int: return (input_1, input_2).count(0) % 2 -def test_xor_gate() -> None: - """ - Tests the xor_gate function - """ - assert xor_gate(0, 0) == 0 - assert xor_gate(0, 1) == 1 - assert xor_gate(1, 0) == 1 - assert xor_gate(1, 1) == 0 - - if __name__ == "__main__": - print(xor_gate(0, 0)) - print(xor_gate(0, 1)) + import doctest + + doctest.testmod() From 3c14e6ae3aa6506ca8e5baa73321f3a04caf83d0 Mon Sep 17 00:00:00 2001 From: Kamil <32775019+quant12345@users.noreply.github.com> Date: Mon, 16 Oct 2023 12:29:46 +0500 Subject: [PATCH 1149/1543] Refactoring and optimization of the lu_decomposition algorithm (#9231) * Replacing the generator with numpy vector operations from lu_decomposition. * Revert "Replacing the generator with numpy vector operations from lu_decomposition." This reverts commit ad217c66165898d62b76cc89ba09c2d7049b6448. * Replacing the generator with numpy vector operations from lu_decomposition. --- arithmetic_analysis/lu_decomposition.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/arithmetic_analysis/lu_decomposition.py b/arithmetic_analysis/lu_decomposition.py index eaabce5449c5..094b20abfecc 100644 --- a/arithmetic_analysis/lu_decomposition.py +++ b/arithmetic_analysis/lu_decomposition.py @@ -88,15 +88,19 @@ def lower_upper_decomposition(table: np.ndarray) -> tuple[np.ndarray, np.ndarray lower = np.zeros((rows, columns)) upper = np.zeros((rows, columns)) + + # in 'total', the necessary data is extracted through slices + # and the sum of the products is obtained. + for i in range(columns): for j in range(i): - total = sum(lower[i][k] * upper[k][j] for k in range(j)) + total = np.sum(lower[i, :i] * upper[:i, j]) if upper[j][j] == 0: raise ArithmeticError("No LU decomposition exists") lower[i][j] = (table[i][j] - total) / upper[j][j] lower[i][i] = 1 for j in range(i, columns): - total = sum(lower[i][k] * upper[k][j] for k in range(j)) + total = np.sum(lower[i, :i] * upper[:i, j]) upper[i][j] = table[i][j] - total return lower, upper From e9b3f20cec28b492b2e22e68ea61ec75ce3b9df8 Mon Sep 17 00:00:00 2001 From: hollowcrust <72879387+hollowcrust@users.noreply.github.com> Date: Mon, 16 Oct 2023 16:03:16 +0800 Subject: [PATCH 1150/1543] Delete dynamic_programming/longest_sub_array.py (#10073) --- dynamic_programming/longest_sub_array.py | 33 ------------------------ 1 file changed, 33 deletions(-) delete mode 100644 dynamic_programming/longest_sub_array.py diff --git a/dynamic_programming/longest_sub_array.py b/dynamic_programming/longest_sub_array.py deleted file mode 100644 index b477acf61e66..000000000000 --- a/dynamic_programming/longest_sub_array.py +++ /dev/null @@ -1,33 +0,0 @@ -""" -Author : Yvonne - -This is a pure Python implementation of Dynamic Programming solution to the - longest_sub_array problem. - -The problem is : -Given an array, to find the longest and continuous sub array and get the max sum of the - sub array in the given array. -""" - - -class SubArray: - def __init__(self, arr): - # we need a list not a string, so do something to change the type - self.array = arr.split(",") - - def solve_sub_array(self): - rear = [int(self.array[0])] * len(self.array) - sum_value = [int(self.array[0])] * len(self.array) - for i in range(1, len(self.array)): - sum_value[i] = max( - int(self.array[i]) + sum_value[i - 1], int(self.array[i]) - ) - rear[i] = max(sum_value[i], rear[i - 1]) - return rear[len(self.array) - 1] - - -if __name__ == "__main__": - whole_array = input("please input some numbers:") - array = SubArray(whole_array) - re = array.solve_sub_array() - print(("the results is:", re)) From 96f81770d7e047f24c3203e913bf346754936330 Mon Sep 17 00:00:00 2001 From: Praful Katare <47990928+Kpraful@users.noreply.github.com> Date: Mon, 16 Oct 2023 13:43:34 +0530 Subject: [PATCH 1151/1543] Adds Doc test in depth_first_search_2.py (#10094) * Adds Doc test in depth_first_search_2.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fixes depth_first_search_2.py formatting * Cleanup --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Tianyi Zheng --- graphs/depth_first_search_2.py | 80 ++++++++++++++++++++++++++++++---- 1 file changed, 71 insertions(+), 9 deletions(-) diff --git a/graphs/depth_first_search_2.py b/graphs/depth_first_search_2.py index 3072d527c1c7..5ff13af33168 100644 --- a/graphs/depth_first_search_2.py +++ b/graphs/depth_first_search_2.py @@ -9,12 +9,44 @@ def __init__(self): # for printing the Graph vertices def print_graph(self) -> None: + """ + Print the graph vertices. + + Example: + >>> g = Graph() + >>> g.add_edge(0, 1) + >>> g.add_edge(0, 2) + >>> g.add_edge(1, 2) + >>> g.add_edge(2, 0) + >>> g.add_edge(2, 3) + >>> g.add_edge(3, 3) + >>> g.print_graph() + {0: [1, 2], 1: [2], 2: [0, 3], 3: [3]} + 0 -> 1 -> 2 + 1 -> 2 + 2 -> 0 -> 3 + 3 -> 3 + """ print(self.vertex) for i in self.vertex: print(i, " -> ", " -> ".join([str(j) for j in self.vertex[i]])) # for adding the edge between two vertices def add_edge(self, from_vertex: int, to_vertex: int) -> None: + """ + Add an edge between two vertices. + + :param from_vertex: The source vertex. + :param to_vertex: The destination vertex. + + Example: + >>> g = Graph() + >>> g.add_edge(0, 1) + >>> g.add_edge(0, 2) + >>> g.print_graph() + {0: [1, 2]} + 0 -> 1 -> 2 + """ # check if vertex is already present, if from_vertex in self.vertex: self.vertex[from_vertex].append(to_vertex) @@ -23,6 +55,21 @@ def add_edge(self, from_vertex: int, to_vertex: int) -> None: self.vertex[from_vertex] = [to_vertex] def dfs(self) -> None: + """ + Perform depth-first search (DFS) traversal on the graph + and print the visited vertices. + + Example: + >>> g = Graph() + >>> g.add_edge(0, 1) + >>> g.add_edge(0, 2) + >>> g.add_edge(1, 2) + >>> g.add_edge(2, 0) + >>> g.add_edge(2, 3) + >>> g.add_edge(3, 3) + >>> g.dfs() + 0 1 2 3 + """ # visited array for storing already visited nodes visited = [False] * len(self.vertex) @@ -32,18 +79,41 @@ def dfs(self) -> None: self.dfs_recursive(i, visited) def dfs_recursive(self, start_vertex: int, visited: list) -> None: + """ + Perform a recursive depth-first search (DFS) traversal on the graph. + + :param start_vertex: The starting vertex for the traversal. + :param visited: A list to track visited vertices. + + Example: + >>> g = Graph() + >>> g.add_edge(0, 1) + >>> g.add_edge(0, 2) + >>> g.add_edge(1, 2) + >>> g.add_edge(2, 0) + >>> g.add_edge(2, 3) + >>> g.add_edge(3, 3) + >>> visited = [False] * len(g.vertex) + >>> g.dfs_recursive(0, visited) + 0 1 2 3 + """ # mark start vertex as visited visited[start_vertex] = True - print(start_vertex, end=" ") + print(start_vertex, end="") # Recur for all the vertices that are adjacent to this node for i in self.vertex: if not visited[i]: + print(" ", end="") self.dfs_recursive(i, visited) if __name__ == "__main__": + import doctest + + doctest.testmod() + g = Graph() g.add_edge(0, 1) g.add_edge(0, 2) @@ -55,11 +125,3 @@ def dfs_recursive(self, start_vertex: int, visited: list) -> None: g.print_graph() print("DFS:") g.dfs() - - # OUTPUT: - # 0 -> 1 -> 2 - # 1 -> 2 - # 2 -> 0 -> 3 - # 3 -> 3 - # DFS: - # 0 1 2 3 From 69707bf6939d63a93b0d4b278cc367c42a976c6d Mon Sep 17 00:00:00 2001 From: Dwarkadhish Kamthane <72198604+dwarka-9504@users.noreply.github.com> Date: Mon, 16 Oct 2023 13:51:03 +0530 Subject: [PATCH 1152/1543] Minimization of while loop in Armstrong Numbers (#9976) * Minimization of while loop in Armstrong Numbers The while loop is removed and simple length calculation is used so the task of minimization of while loop is achieved * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- maths/armstrong_numbers.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/maths/armstrong_numbers.py b/maths/armstrong_numbers.py index 26709b428b78..e1c25d4676c3 100644 --- a/maths/armstrong_numbers.py +++ b/maths/armstrong_numbers.py @@ -29,9 +29,7 @@ def armstrong_number(n: int) -> bool: number_of_digits = 0 temp = n # Calculation of digits of the number - while temp > 0: - number_of_digits += 1 - temp //= 10 + number_of_digits = len(str(n)) # Dividing number into separate digits and find Armstrong number temp = n while temp > 0: From 7acf4bf73b5a43bdb375f7a34da227bf6deeaf35 Mon Sep 17 00:00:00 2001 From: Christian Clauss Date: Mon, 16 Oct 2023 16:16:09 +0200 Subject: [PATCH 1153/1543] Rename binary_tree_traversals.md to README.md (#10599) --- DIRECTORY.md | 3 +-- .../binary_tree/{binary_tree_traversals.md => README.md} | 0 2 files changed, 1 insertion(+), 2 deletions(-) rename data_structures/binary_tree/{binary_tree_traversals.md => README.md} (100%) diff --git a/DIRECTORY.md b/DIRECTORY.md index cef1e06b78aa..65628be59a92 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -340,7 +340,6 @@ * [Longest Increasing Subsequence](dynamic_programming/longest_increasing_subsequence.py) * [Longest Increasing Subsequence O(Nlogn)](dynamic_programming/longest_increasing_subsequence_o(nlogn).py) * [Longest Palindromic Subsequence](dynamic_programming/longest_palindromic_subsequence.py) - * [Longest Sub Array](dynamic_programming/longest_sub_array.py) * [Matrix Chain Multiplication](dynamic_programming/matrix_chain_multiplication.py) * [Matrix Chain Order](dynamic_programming/matrix_chain_order.py) * [Max Non Adjacent Sum](dynamic_programming/max_non_adjacent_sum.py) @@ -486,6 +485,7 @@ * [Fractional Knapsack](greedy_methods/fractional_knapsack.py) * [Fractional Knapsack 2](greedy_methods/fractional_knapsack_2.py) * [Gas Station](greedy_methods/gas_station.py) + * [Minimum Coin Change](greedy_methods/minimum_coin_change.py) * [Minimum Waiting Time](greedy_methods/minimum_waiting_time.py) * [Optimal Merge Pattern](greedy_methods/optimal_merge_pattern.py) @@ -618,7 +618,6 @@ * [Gcd Of N Numbers](maths/gcd_of_n_numbers.py) * [Germain Primes](maths/germain_primes.py) * [Greatest Common Divisor](maths/greatest_common_divisor.py) - * [Greedy Coin Change](maths/greedy_coin_change.py) * [Hamming Numbers](maths/hamming_numbers.py) * [Hardy Ramanujanalgo](maths/hardy_ramanujanalgo.py) * [Harshad Numbers](maths/harshad_numbers.py) diff --git a/data_structures/binary_tree/binary_tree_traversals.md b/data_structures/binary_tree/README.md similarity index 100% rename from data_structures/binary_tree/binary_tree_traversals.md rename to data_structures/binary_tree/README.md From 3923e590d77979de31fabd4df34e69e8933e690d Mon Sep 17 00:00:00 2001 From: Christian Clauss Date: Mon, 16 Oct 2023 16:17:48 +0200 Subject: [PATCH 1154/1543] Tree_sort.py: Disable slow doctest (#10584) --- sorts/tree_sort.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/sorts/tree_sort.py b/sorts/tree_sort.py index e63a3253ba19..dc95856f44c8 100644 --- a/sorts/tree_sort.py +++ b/sorts/tree_sort.py @@ -52,8 +52,9 @@ def tree_sort(arr: list[int]) -> tuple[int, ...]: (-4, 2, 5, 7, 9) >>> tree_sort([5, 6, 1, -1, 4, 37, 2, 7]) (-1, 1, 2, 4, 5, 6, 7, 37) - >>> tree_sort(range(10, -10, -1)) == tuple(sorted(range(10, -10, -1))) - True + + # >>> tree_sort(range(10, -10, -1)) == tuple(sorted(range(10, -10, -1))) + # True """ if len(arr) == 0: return tuple(arr) From c15dda405a26bd9cb1554a43598c4c85a6320d4c Mon Sep 17 00:00:00 2001 From: Saswat Susmoy <72549122+Saswatsusmoy@users.noreply.github.com> Date: Mon, 16 Oct 2023 20:13:53 +0530 Subject: [PATCH 1155/1543] Update basic_binary_tree.py (#10388) * Update basic_binary_tree.py * Update basic_binary_tree.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: Christian Clauss Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .../binary_tree/basic_binary_tree.py | 197 +++++++++--------- 1 file changed, 103 insertions(+), 94 deletions(-) diff --git a/data_structures/binary_tree/basic_binary_tree.py b/data_structures/binary_tree/basic_binary_tree.py index 65dccf247b51..0439413d95b5 100644 --- a/data_structures/binary_tree/basic_binary_tree.py +++ b/data_structures/binary_tree/basic_binary_tree.py @@ -1,101 +1,110 @@ from __future__ import annotations +from collections.abc import Iterator +from dataclasses import dataclass + +@dataclass class Node: - """ - A Node has data variable and pointers to Nodes to its left and right. - """ - - def __init__(self, data: int) -> None: - self.data = data - self.left: Node | None = None - self.right: Node | None = None - - -def display(tree: Node | None) -> None: # In Order traversal of the tree - """ - >>> root = Node(1) - >>> root.left = Node(0) - >>> root.right = Node(2) - >>> display(root) - 0 - 1 - 2 - >>> display(root.right) - 2 - """ - if tree: - display(tree.left) - print(tree.data) - display(tree.right) - - -def depth_of_tree(tree: Node | None) -> int: - """ - Recursive function that returns the depth of a binary tree. - - >>> root = Node(0) - >>> depth_of_tree(root) - 1 - >>> root.left = Node(0) - >>> depth_of_tree(root) - 2 - >>> root.right = Node(0) - >>> depth_of_tree(root) - 2 - >>> root.left.right = Node(0) - >>> depth_of_tree(root) - 3 - >>> depth_of_tree(root.left) - 2 - """ - return 1 + max(depth_of_tree(tree.left), depth_of_tree(tree.right)) if tree else 0 - - -def is_full_binary_tree(tree: Node) -> bool: - """ - Returns True if this is a full binary tree - - >>> root = Node(0) - >>> is_full_binary_tree(root) - True - >>> root.left = Node(0) - >>> is_full_binary_tree(root) - False - >>> root.right = Node(0) - >>> is_full_binary_tree(root) - True - >>> root.left.left = Node(0) - >>> is_full_binary_tree(root) - False - >>> root.right.right = Node(0) - >>> is_full_binary_tree(root) - False - """ - if not tree: - return True - if tree.left and tree.right: - return is_full_binary_tree(tree.left) and is_full_binary_tree(tree.right) - else: - return not tree.left and not tree.right - - -def main() -> None: # Main function for testing. - tree = Node(1) - tree.left = Node(2) - tree.right = Node(3) - tree.left.left = Node(4) - tree.left.right = Node(5) - tree.left.right.left = Node(6) - tree.right.left = Node(7) - tree.right.left.left = Node(8) - tree.right.left.left.right = Node(9) - - print(is_full_binary_tree(tree)) - print(depth_of_tree(tree)) - print("Tree is: ") - display(tree) + data: int + left: Node | None = None + right: Node | None = None + + def __iter__(self) -> Iterator[int]: + if self.left: + yield from self.left + yield self.data + if self.right: + yield from self.right + + def __len__(self) -> int: + return sum(1 for _ in self) + + def is_full(self) -> bool: + if not self or (not self.left and not self.right): + return True + if self.left and self.right: + return self.left.is_full() and self.right.is_full() + return False + + +@dataclass +class BinaryTree: + root: Node + + def __iter__(self) -> Iterator[int]: + return iter(self.root) + + def __len__(self) -> int: + return len(self.root) + + @classmethod + def small_tree(cls) -> BinaryTree: + """ + Return a small binary tree with 3 nodes. + >>> binary_tree = BinaryTree.small_tree() + >>> len(binary_tree) + 3 + >>> list(binary_tree) + [1, 2, 3] + """ + binary_tree = BinaryTree(Node(2)) + binary_tree.root.left = Node(1) + binary_tree.root.right = Node(3) + return binary_tree + + @classmethod + def medium_tree(cls) -> BinaryTree: + """ + Return a medium binary tree with 3 nodes. + >>> binary_tree = BinaryTree.medium_tree() + >>> len(binary_tree) + 7 + >>> list(binary_tree) + [1, 2, 3, 4, 5, 6, 7] + """ + binary_tree = BinaryTree(Node(4)) + binary_tree.root.left = two = Node(2) + two.left = Node(1) + two.right = Node(3) + binary_tree.root.right = five = Node(5) + five.right = six = Node(6) + six.right = Node(7) + return binary_tree + + def depth(self) -> int: + """ + Returns the depth of the tree + + >>> BinaryTree(Node(1)).depth() + 1 + >>> BinaryTree.small_tree().depth() + 2 + >>> BinaryTree.medium_tree().depth() + 4 + """ + return self._depth(self.root) + + def _depth(self, node: Node | None) -> int: # noqa: UP007 + if not node: + return 0 + return 1 + max(self._depth(node.left), self._depth(node.right)) + + def is_full(self) -> bool: + """ + Returns True if the tree is full + + >>> BinaryTree(Node(1)).is_full() + True + >>> BinaryTree.small_tree().is_full() + True + >>> BinaryTree.medium_tree().is_full() + False + """ + return self.root.is_full() if __name__ == "__main__": - main() + import doctest + + doctest.testmod() From 5a1305b6fe98808bf534c54e12ac64c1e4e4ce0f Mon Sep 17 00:00:00 2001 From: ivan53 Date: Mon, 16 Oct 2023 07:48:26 -0700 Subject: [PATCH 1156/1543] Fix benchmark to test with the provided number instead on 25 (#10587) --- bit_manipulation/count_number_of_one_bits.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/bit_manipulation/count_number_of_one_bits.py b/bit_manipulation/count_number_of_one_bits.py index a1687503a383..f0c9f927620a 100644 --- a/bit_manipulation/count_number_of_one_bits.py +++ b/bit_manipulation/count_number_of_one_bits.py @@ -70,11 +70,13 @@ def do_benchmark(number: int) -> None: setup = "import __main__ as z" print(f"Benchmark when {number = }:") print(f"{get_set_bits_count_using_modulo_operator(number) = }") - timing = timeit("z.get_set_bits_count_using_modulo_operator(25)", setup=setup) + timing = timeit( + f"z.get_set_bits_count_using_modulo_operator({number})", setup=setup + ) print(f"timeit() runs in {timing} seconds") print(f"{get_set_bits_count_using_brian_kernighans_algorithm(number) = }") timing = timeit( - "z.get_set_bits_count_using_brian_kernighans_algorithm(25)", + f"z.get_set_bits_count_using_brian_kernighans_algorithm({number})", setup=setup, ) print(f"timeit() runs in {timing} seconds") From 778e2010d6ae89c61a93672e49b86041b6ca1108 Mon Sep 17 00:00:00 2001 From: Vinayak Upadhyay Date: Mon, 16 Oct 2023 22:16:44 +0530 Subject: [PATCH 1157/1543] Added functionality to calculate the diameter of given binary tree (#10526) * Added code to find diameter of given binary tree * Modified diameter_of_binary_tree file * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update diameter_of_binary_tree.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update diameter_of_binary_tree.py --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- .../binary_tree/diameter_of_binary_tree.py | 72 +++++++++++++++++++ 1 file changed, 72 insertions(+) create mode 100644 data_structures/binary_tree/diameter_of_binary_tree.py diff --git a/data_structures/binary_tree/diameter_of_binary_tree.py b/data_structures/binary_tree/diameter_of_binary_tree.py new file mode 100644 index 000000000000..bbe70b028d24 --- /dev/null +++ b/data_structures/binary_tree/diameter_of_binary_tree.py @@ -0,0 +1,72 @@ +""" +The diameter/width of a tree is defined as the number of nodes on the longest path +between two end nodes. +""" +from __future__ import annotations + +from dataclasses import dataclass + + +@dataclass +class Node: + data: int + left: Node | None = None + right: Node | None = None + + def depth(self) -> int: + """ + >>> root = Node(1) + >>> root.depth() + 1 + >>> root.left = Node(2) + >>> root.depth() + 2 + >>> root.left.depth() + 1 + >>> root.right = Node(3) + >>> root.depth() + 2 + """ + left_depth = self.left.depth() if self.left else 0 + right_depth = self.right.depth() if self.right else 0 + return max(left_depth, right_depth) + 1 + + def diameter(self) -> int: + """ + >>> root = Node(1) + >>> root.diameter() + 1 + >>> root.left = Node(2) + >>> root.diameter() + 2 + >>> root.left.diameter() + 1 + >>> root.right = Node(3) + >>> root.diameter() + 3 + """ + left_depth = self.left.depth() if self.left else 0 + right_depth = self.right.depth() if self.right else 0 + return left_depth + right_depth + 1 + + +if __name__ == "__main__": + from doctest import testmod + + testmod() + root = Node(1) + root.left = Node(2) + root.right = Node(3) + root.left.left = Node(4) + root.left.right = Node(5) + r""" + Constructed binary tree is + 1 + / \ + 2 3 + / \ + 4 5 + """ + print(f"{root.diameter() = }") # 4 + print(f"{root.left.diameter() = }") # 3 + print(f"{root.right.diameter() = }") # 1 From 1e468c1028e407ea38bd7e9511dc0f3d0d45a6e0 Mon Sep 17 00:00:00 2001 From: halfhearted <99018821+Arunsiva003@users.noreply.github.com> Date: Mon, 16 Oct 2023 22:42:33 +0530 Subject: [PATCH 1158/1543] Floor and ceil in Binary search tree added (#10432) * earliest deadline first scheduling algo added * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * earliest deadline first scheduling algo added * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * ceil and floor and bst * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * ceil and floor and bst 2 * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * ceil and floor and bst 3 * Update and rename floor_ceil_in_bst.py to floor_and_ceiling.py * Delete scheduling/shortest_deadline_first.py --------- Co-authored-by: ArunSiva Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- .../binary_tree/floor_and_ceiling.py | 87 +++++++++++++++++++ 1 file changed, 87 insertions(+) create mode 100644 data_structures/binary_tree/floor_and_ceiling.py diff --git a/data_structures/binary_tree/floor_and_ceiling.py b/data_structures/binary_tree/floor_and_ceiling.py new file mode 100644 index 000000000000..f8a1adbd967b --- /dev/null +++ b/data_structures/binary_tree/floor_and_ceiling.py @@ -0,0 +1,87 @@ +""" +In a binary search tree (BST): +* The floor of key 'k' is the maximum value that is smaller than or equal to 'k'. +* The ceiling of key 'k' is the minimum value that is greater than or equal to 'k'. + +Reference: +https://bit.ly/46uB0a2 + +Author : Arunkumar +Date : 14th October 2023 +""" +from __future__ import annotations + +from collections.abc import Iterator +from dataclasses import dataclass + + +@dataclass +class Node: + key: int + left: Node | None = None + right: Node | None = None + + def __iter__(self) -> Iterator[int]: + if self.left: + yield from self.left + yield self.key + if self.right: + yield from self.right + + def __len__(self) -> int: + return sum(1 for _ in self) + + +def floor_ceiling(root: Node | None, key: int) -> tuple[int | None, int | None]: + """ + Find the floor and ceiling values for a given key in a Binary Search Tree (BST). + + Args: + root: The root of the binary search tree. + key: The key for which to find the floor and ceiling. + + Returns: + A tuple containing the floor and ceiling values, respectively. + + Examples: + >>> root = Node(10) + >>> root.left = Node(5) + >>> root.right = Node(20) + >>> root.left.left = Node(3) + >>> root.left.right = Node(7) + >>> root.right.left = Node(15) + >>> root.right.right = Node(25) + >>> tuple(root) + (3, 5, 7, 10, 15, 20, 25) + >>> floor_ceiling(root, 8) + (7, 10) + >>> floor_ceiling(root, 14) + (10, 15) + >>> floor_ceiling(root, -1) + (None, 3) + >>> floor_ceiling(root, 30) + (25, None) + """ + floor_val = None + ceiling_val = None + + while root: + if root.key == key: + floor_val = root.key + ceiling_val = root.key + break + + if key < root.key: + ceiling_val = root.key + root = root.left + else: + floor_val = root.key + root = root.right + + return floor_val, ceiling_val + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From 922bbee80ce292ca27eee33d38e82ecf73e33dcd Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 16 Oct 2023 20:23:33 +0200 Subject: [PATCH 1159/1543] [pre-commit.ci] pre-commit autoupdate (#10613) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * [pre-commit.ci] pre-commit autoupdate updates: - [github.com/abravalheri/validate-pyproject: v0.14 → v0.15](https://github.com/abravalheri/validate-pyproject/compare/v0.14...v0.15) * updating DIRECTORY.md --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- .pre-commit-config.yaml | 2 +- DIRECTORY.md | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 84f4a7770d00..b3def463ded2 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -46,7 +46,7 @@ repos: pass_filenames: false - repo: https://github.com/abravalheri/validate-pyproject - rev: v0.14 + rev: v0.15 hooks: - id: validate-pyproject diff --git a/DIRECTORY.md b/DIRECTORY.md index 65628be59a92..d878f1c79a2d 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -192,10 +192,12 @@ * [Binary Tree Node Sum](data_structures/binary_tree/binary_tree_node_sum.py) * [Binary Tree Path Sum](data_structures/binary_tree/binary_tree_path_sum.py) * [Binary Tree Traversals](data_structures/binary_tree/binary_tree_traversals.py) + * [Diameter Of Binary Tree](data_structures/binary_tree/diameter_of_binary_tree.py) * [Diff Views Of Binary Tree](data_structures/binary_tree/diff_views_of_binary_tree.py) * [Distribute Coins](data_structures/binary_tree/distribute_coins.py) * [Fenwick Tree](data_structures/binary_tree/fenwick_tree.py) * [Flatten Binarytree To Linkedlist](data_structures/binary_tree/flatten_binarytree_to_linkedlist.py) + * [Floor And Ceiling](data_structures/binary_tree/floor_and_ceiling.py) * [Inorder Tree Traversal 2022](data_structures/binary_tree/inorder_tree_traversal_2022.py) * [Is Bst](data_structures/binary_tree/is_bst.py) * [Lazy Segment Tree](data_structures/binary_tree/lazy_segment_tree.py) From fcea18c9f0b68e2ba35c8f91bf0702d7c727c4df Mon Sep 17 00:00:00 2001 From: Adarsh Sidnal <97141741+Adarshsidnal@users.noreply.github.com> Date: Tue, 17 Oct 2023 04:26:14 +0530 Subject: [PATCH 1160/1543] Added an algorithm transfrom bst to greater sum tree (#9777) * Added an algorithm transfrom bst to greater sum tree * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update and rename transform_bst_sum_tree.py to is_sum_tree.py --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- data_structures/binary_tree/is_sum_tree.py | 161 +++++++++++++++++++++ 1 file changed, 161 insertions(+) create mode 100644 data_structures/binary_tree/is_sum_tree.py diff --git a/data_structures/binary_tree/is_sum_tree.py b/data_structures/binary_tree/is_sum_tree.py new file mode 100644 index 000000000000..3f9cf1d560a6 --- /dev/null +++ b/data_structures/binary_tree/is_sum_tree.py @@ -0,0 +1,161 @@ +""" +Is a binary tree a sum tree where the value of every non-leaf node is equal to the sum +of the values of its left and right subtrees? +https://www.geeksforgeeks.org/check-if-a-given-binary-tree-is-sumtree +""" +from __future__ import annotations + +from collections.abc import Iterator +from dataclasses import dataclass + + +@dataclass +class Node: + data: int + left: Node | None = None + right: Node | None = None + + def __iter__(self) -> Iterator[int]: + """ + >>> root = Node(2) + >>> list(root) + [2] + >>> root.left = Node(1) + >>> tuple(root) + (1, 2) + """ + if self.left: + yield from self.left + yield self.data + if self.right: + yield from self.right + + def __len__(self) -> int: + """ + >>> root = Node(2) + >>> len(root) + 1 + >>> root.left = Node(1) + >>> len(root) + 2 + """ + return sum(1 for _ in self) + + @property + def is_sum_node(self) -> bool: + """ + >>> root = Node(3) + >>> root.is_sum_node + True + >>> root.left = Node(1) + >>> root.is_sum_node + False + >>> root.right = Node(2) + >>> root.is_sum_node + True + """ + if not self.left and not self.right: + return True # leaf nodes are considered sum nodes + left_sum = sum(self.left) if self.left else 0 + right_sum = sum(self.right) if self.right else 0 + return all( + ( + self.data == left_sum + right_sum, + self.left.is_sum_node if self.left else True, + self.right.is_sum_node if self.right else True, + ) + ) + + +@dataclass +class BinaryTree: + root: Node + + def __iter__(self) -> Iterator[int]: + """ + >>> list(BinaryTree.build_a_tree()) + [1, 2, 7, 11, 15, 29, 35, 40] + """ + return iter(self.root) + + def __len__(self) -> int: + """ + >>> len(BinaryTree.build_a_tree()) + 8 + """ + return len(self.root) + + def __str__(self) -> str: + """ + Returns a string representation of the inorder traversal of the binary tree. + + >>> str(list(BinaryTree.build_a_tree())) + '[1, 2, 7, 11, 15, 29, 35, 40]' + """ + return str(list(self)) + + @property + def is_sum_tree(self) -> bool: + """ + >>> BinaryTree.build_a_tree().is_sum_tree + False + >>> BinaryTree.build_a_sum_tree().is_sum_tree + True + """ + return self.root.is_sum_node + + @classmethod + def build_a_tree(cls) -> BinaryTree: + r""" + Create a binary tree with the specified structure: + 11 + / \ + 2 29 + / \ / \ + 1 7 15 40 + \ + 35 + >>> list(BinaryTree.build_a_tree()) + [1, 2, 7, 11, 15, 29, 35, 40] + """ + tree = BinaryTree(Node(11)) + root = tree.root + root.left = Node(2) + root.right = Node(29) + root.left.left = Node(1) + root.left.right = Node(7) + root.right.left = Node(15) + root.right.right = Node(40) + root.right.right.left = Node(35) + return tree + + @classmethod + def build_a_sum_tree(cls) -> BinaryTree: + r""" + Create a binary tree with the specified structure: + 26 + / \ + 10 3 + / \ \ + 4 6 3 + >>> list(BinaryTree.build_a_sum_tree()) + [4, 10, 6, 26, 3, 3] + """ + tree = BinaryTree(Node(26)) + root = tree.root + root.left = Node(10) + root.right = Node(3) + root.left.left = Node(4) + root.left.right = Node(6) + root.right.right = Node(3) + return tree + + +if __name__ == "__main__": + from doctest import testmod + + testmod() + tree = BinaryTree.build_a_tree() + print(f"{tree} has {len(tree)} nodes and {tree.is_sum_tree = }.") + tree = BinaryTree.build_a_sum_tree() + print(f"{tree} has {len(tree)} nodes and {tree.is_sum_tree = }.") From 5f629b60499cfb3ac27f6520bf947764b5b45c28 Mon Sep 17 00:00:00 2001 From: Sandeepa Dilshan Alagiyawanna <108791571+SandeepaDilshanAlagiyawanna@users.noreply.github.com> Date: Tue, 17 Oct 2023 04:47:49 +0530 Subject: [PATCH 1161/1543] Optimize and_gate and nand_gate (#10591) * Added more optimized sudoku solver algorithm * Added more optimized sudoku solver algorithm and File Renamed * and_gate is Optimized * and_gate is Optimized * and_gate is Optimized * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- boolean_algebra/and_gate.py | 2 +- boolean_algebra/nand_gate.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/boolean_algebra/and_gate.py b/boolean_algebra/and_gate.py index f0fd45c9f81e..6ae66b5b0a77 100644 --- a/boolean_algebra/and_gate.py +++ b/boolean_algebra/and_gate.py @@ -29,7 +29,7 @@ def and_gate(input_1: int, input_2: int) -> int: >>> and_gate(1, 1) 1 """ - return int((input_1, input_2).count(0) == 0) + return int(input_1 and input_2) if __name__ == "__main__": diff --git a/boolean_algebra/nand_gate.py b/boolean_algebra/nand_gate.py index 80f9d12db89a..ea7a6815dcc9 100644 --- a/boolean_algebra/nand_gate.py +++ b/boolean_algebra/nand_gate.py @@ -27,7 +27,7 @@ def nand_gate(input_1: int, input_2: int) -> int: >>> nand_gate(1, 1) 0 """ - return int((input_1, input_2).count(0) != 0) + return int(not (input_1 and input_2)) if __name__ == "__main__": From b5786c87d820cc4d68707731df0812507063bf8b Mon Sep 17 00:00:00 2001 From: aryandgandhi <44215148+aryandgandhi@users.noreply.github.com> Date: Mon, 16 Oct 2023 20:25:07 -0500 Subject: [PATCH 1162/1543] update segmenttree docstrings Fixes #9943 (#9975) * update docstrings * update docstrings * update docstrings --- data_structures/binary_tree/segment_tree.py | 41 ++++++++++++++++++++- 1 file changed, 39 insertions(+), 2 deletions(-) diff --git a/data_structures/binary_tree/segment_tree.py b/data_structures/binary_tree/segment_tree.py index 5f822407d8cb..3b0b32946f6e 100644 --- a/data_structures/binary_tree/segment_tree.py +++ b/data_structures/binary_tree/segment_tree.py @@ -3,7 +3,8 @@ class SegmentTree: def __init__(self, a): - self.N = len(a) + self.A = a + self.N = len(self.A) self.st = [0] * ( 4 * self.N ) # approximate the overall size of segment tree with array N @@ -11,14 +12,32 @@ def __init__(self, a): self.build(1, 0, self.N - 1) def left(self, idx): + """ + Returns the left child index for a given index in a binary tree. + + >>> s = SegmentTree([1, 2, 3]) + >>> s.left(1) + 2 + >>> s.left(2) + 4 + """ return idx * 2 def right(self, idx): + """ + Returns the right child index for a given index in a binary tree. + + >>> s = SegmentTree([1, 2, 3]) + >>> s.right(1) + 3 + >>> s.right(2) + 5 + """ return idx * 2 + 1 def build(self, idx, l, r): # noqa: E741 if l == r: - self.st[idx] = A[l] + self.st[idx] = self.A[l] else: mid = (l + r) // 2 self.build(self.left(idx), l, mid) @@ -26,6 +45,15 @@ def build(self, idx, l, r): # noqa: E741 self.st[idx] = max(self.st[self.left(idx)], self.st[self.right(idx)]) def update(self, a, b, val): + """ + Update the values in the segment tree in the range [a,b] with the given value. + + >>> s = SegmentTree([1, 2, 3, 4, 5]) + >>> s.update(2, 4, 10) + True + >>> s.query(1, 5) + 10 + """ return self.update_recursive(1, 0, self.N - 1, a - 1, b - 1, val) def update_recursive(self, idx, l, r, a, b, val): # noqa: E741 @@ -44,6 +72,15 @@ def update_recursive(self, idx, l, r, a, b, val): # noqa: E741 return True def query(self, a, b): + """ + Query the maximum value in the range [a,b]. + + >>> s = SegmentTree([1, 2, 3, 4, 5]) + >>> s.query(1, 3) + 3 + >>> s.query(1, 5) + 5 + """ return self.query_recursive(1, 0, self.N - 1, a - 1, b - 1) def query_recursive(self, idx, l, r, a, b): # noqa: E741 From 00165a5fb2d125c7e6ab33e424bdcac8dec2b5b6 Mon Sep 17 00:00:00 2001 From: Saurabh Mahapatra <98408932+its-100rabh@users.noreply.github.com> Date: Tue, 17 Oct 2023 12:06:12 +0530 Subject: [PATCH 1163/1543] Added test cases to join.py (#10629) * Added test cases to join.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- strings/join.py | 28 +++++++++++++++++++++++++--- 1 file changed, 25 insertions(+), 3 deletions(-) diff --git a/strings/join.py b/strings/join.py index 739856c1aa93..5c02f65a20ce 100644 --- a/strings/join.py +++ b/strings/join.py @@ -1,10 +1,21 @@ """ -Program to join a list of strings with a given separator +Program to join a list of strings with a separator """ def join(separator: str, separated: list[str]) -> str: """ + Joins a list of strings using a separator + and returns the result. + + :param separator: Separator to be used + for joining the strings. + :param separated: List of strings to be joined. + + :return: Joined string with the specified separator. + + Examples: + >>> join("", ["a", "b", "c", "d"]) 'abcd' >>> join("#", ["a", "b", "c", "d"]) @@ -13,16 +24,27 @@ def join(separator: str, separated: list[str]) -> str: 'a' >>> join(" ", ["You", "are", "amazing!"]) 'You are amazing!' + + This example should raise an + exception for non-string elements: >>> join("#", ["a", "b", "c", 1]) Traceback (most recent call last): ... - Exception: join() accepts only strings to be joined + Exception: join() accepts only strings + + Additional test case with a different separator: + >>> join("-", ["apple", "banana", "cherry"]) + 'apple-banana-cherry' """ + joined = "" for word_or_phrase in separated: if not isinstance(word_or_phrase, str): - raise Exception("join() accepts only strings to be joined") + raise Exception("join() accepts only strings") joined += word_or_phrase + separator + + # Remove the trailing separator + # by stripping it from the result return joined.strip(separator) From c6c3bd339947eb6f10f77754f34a49915799c82f Mon Sep 17 00:00:00 2001 From: Kushagra Agarwal <94402194+developer-kush@users.noreply.github.com> Date: Tue, 17 Oct 2023 12:40:24 +0530 Subject: [PATCH 1164/1543] Hacktoberfest: Added Octal Number to Hexadecimal Number Conversion Algorithm (#10533) * Added Octal to Hexadecimal Conversion program under 'conversions' directory * Update conversions/octal_to_hexadecimal.py fix: minor improvement to directly return hexadecimal value Co-authored-by: Tianyi Zheng * Update conversions/octal_to_hexadecimal.py fix: improvement updates to octal to hexadecimal Co-authored-by: Tianyi Zheng * Update conversions/octal_to_hexadecimal.py fix: Readablility improvements to octal to hexadecimal convertor Co-authored-by: Tianyi Zheng * Update conversions/octal_to_hexadecimal.py fix: readability improvements in octal_to_hexadecimal.py Co-authored-by: Tianyi Zheng * Update conversions/octal_to_hexadecimal.py fix: readability improvements in octal_to_hexadecimal.py Co-authored-by: Tianyi Zheng * fix: Fixed all the errors in octal_to_hexadecimal.py after commiting suggested changes * fix: modified the prefix of hex numbers to the '0x' standard in octal_to_hexadecimal.py --------- Co-authored-by: Tianyi Zheng --- conversions/octal_to_hexadecimal.py | 65 +++++++++++++++++++++++++++++ 1 file changed, 65 insertions(+) create mode 100644 conversions/octal_to_hexadecimal.py diff --git a/conversions/octal_to_hexadecimal.py b/conversions/octal_to_hexadecimal.py new file mode 100644 index 000000000000..0615d79b5c53 --- /dev/null +++ b/conversions/octal_to_hexadecimal.py @@ -0,0 +1,65 @@ +def octal_to_hex(octal: str) -> str: + """ + Convert an Octal number to Hexadecimal number. + For more information: https://en.wikipedia.org/wiki/Octal + + >>> octal_to_hex("100") + '0x40' + >>> octal_to_hex("235") + '0x9D' + >>> octal_to_hex(17) + Traceback (most recent call last): + ... + TypeError: Expected a string as input + >>> octal_to_hex("Av") + Traceback (most recent call last): + ... + ValueError: Not a Valid Octal Number + >>> octal_to_hex("") + Traceback (most recent call last): + ... + ValueError: Empty string was passed to the function + """ + + if not isinstance(octal, str): + raise TypeError("Expected a string as input") + if octal.startswith("0o"): + octal = octal[2:] + if octal == "": + raise ValueError("Empty string was passed to the function") + if any(char not in "01234567" for char in octal): + raise ValueError("Not a Valid Octal Number") + + decimal = 0 + for char in octal: + decimal <<= 3 + decimal |= int(char) + + hex_char = "0123456789ABCDEF" + + revhex = "" + while decimal: + revhex += hex_char[decimal & 15] + decimal >>= 4 + + return "0x" + revhex[::-1] + + +if __name__ == "__main__": + import doctest + + doctest.testmod() + + nums = ["030", "100", "247", "235", "007"] + + ## Main Tests + + for num in nums: + hexadecimal = octal_to_hex(num) + expected = "0x" + hex(int(num, 8))[2:].upper() + + assert hexadecimal == expected + + print(f"Hex of '0o{num}' is: {hexadecimal}") + print(f"Expected was: {expected}") + print("---") From ac3bd1032c02ff5c2f6eb16f2bf5a1b24d106d1c Mon Sep 17 00:00:00 2001 From: ojas wani <52542740+ojas-wani@users.noreply.github.com> Date: Tue, 17 Oct 2023 02:25:25 -0700 Subject: [PATCH 1165/1543] Add matrix_multiplication (#10045) * added laplacian_filter file * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * updated laplacian.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * updated laplacian_py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * updated laplacian_filter.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * updated laplacian_filter.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * updated laplacian_filter.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * required changes to laplacian file * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * changed laplacian_filter.py * changed laplacian_filter.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * changed laplacian_filter.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * changed laplacian_filter.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * updated laplacian_filter.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * update laplacian_filter.py * update laplacian_filter.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * update laplacian_filter.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * changed laplacian_filter.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * changed laplacian_filter.py * changed laplacian_filter.py * changed laplacian_filter.py * add matrix_multiplication.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * update matrix_multiplication * update matrix_multiplication * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * make changes * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * update * update * updates * resolve conflict * add doctest * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * make changes * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * update laplacian.py * add doctests * more doctest added * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * try to resolve ruff error * try to reslve ruff error * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * update doctest * attemp - resolve ruff error * resolve build error * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * resolve build issue * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * update build * doctest update * update doctest * update doctest * update doctest * fix ruff error * file location changed * Delete digital_image_processing/filters/laplacian_filter.py * Create laplacian_filter.py * Update matrix_multiplication_recursion.py * Update matrix_multiplication_recursion.py * Update matrix_multiplication_recursion.py * Update matrix_multiplication_recursion.py * Update matrix_multiplication_recursion.py --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- matrix/matrix_multiplication_recursion.py | 180 ++++++++++++++++++++++ 1 file changed, 180 insertions(+) create mode 100644 matrix/matrix_multiplication_recursion.py diff --git a/matrix/matrix_multiplication_recursion.py b/matrix/matrix_multiplication_recursion.py new file mode 100644 index 000000000000..287142480ce7 --- /dev/null +++ b/matrix/matrix_multiplication_recursion.py @@ -0,0 +1,180 @@ +# @Author : ojas-wani +# @File : matrix_multiplication_recursion.py +# @Date : 10/06/2023 + + +""" +Perform matrix multiplication using a recursive algorithm. +https://en.wikipedia.org/wiki/Matrix_multiplication +""" +# type Matrix = list[list[int]] # psf/black currenttly fails on this line +Matrix = list[list[int]] + +matrix_1_to_4 = [ + [1, 2], + [3, 4], +] + +matrix_5_to_8 = [ + [5, 6], + [7, 8], +] + +matrix_5_to_9_high = [ + [5, 6], + [7, 8], + [9], +] + +matrix_5_to_9_wide = [ + [5, 6], + [7, 8, 9], +] + +matrix_count_up = [ + [1, 2, 3, 4], + [5, 6, 7, 8], + [9, 10, 11, 12], + [13, 14, 15, 16], +] + +matrix_unordered = [ + [5, 8, 1, 2], + [6, 7, 3, 0], + [4, 5, 9, 1], + [2, 6, 10, 14], +] +matrices = ( + matrix_1_to_4, + matrix_5_to_8, + matrix_5_to_9_high, + matrix_5_to_9_wide, + matrix_count_up, + matrix_unordered, +) + + +def is_square(matrix: Matrix) -> bool: + """ + >>> is_square([]) + True + >>> is_square(matrix_1_to_4) + True + >>> is_square(matrix_5_to_9_high) + False + """ + len_matrix = len(matrix) + return all(len(row) == len_matrix for row in matrix) + + +def matrix_multiply(matrix_a: Matrix, matrix_b: Matrix) -> Matrix: + """ + >>> matrix_multiply(matrix_1_to_4, matrix_5_to_8) + [[19, 22], [43, 50]] + """ + return [ + [sum(a * b for a, b in zip(row, col)) for col in zip(*matrix_b)] + for row in matrix_a + ] + + +def matrix_multiply_recursive(matrix_a: Matrix, matrix_b: Matrix) -> Matrix: + """ + :param matrix_a: A square Matrix. + :param matrix_b: Another square Matrix with the same dimensions as matrix_a. + :return: Result of matrix_a * matrix_b. + :raises ValueError: If the matrices cannot be multiplied. + + >>> matrix_multiply_recursive([], []) + [] + >>> matrix_multiply_recursive(matrix_1_to_4, matrix_5_to_8) + [[19, 22], [43, 50]] + >>> matrix_multiply_recursive(matrix_count_up, matrix_unordered) + [[37, 61, 74, 61], [105, 165, 166, 129], [173, 269, 258, 197], [241, 373, 350, 265]] + >>> matrix_multiply_recursive(matrix_1_to_4, matrix_5_to_9_wide) + Traceback (most recent call last): + ... + ValueError: Invalid matrix dimensions + >>> matrix_multiply_recursive(matrix_1_to_4, matrix_5_to_9_high) + Traceback (most recent call last): + ... + ValueError: Invalid matrix dimensions + >>> matrix_multiply_recursive(matrix_1_to_4, matrix_count_up) + Traceback (most recent call last): + ... + ValueError: Invalid matrix dimensions + """ + if not matrix_a or not matrix_b: + return [] + if not all( + (len(matrix_a) == len(matrix_b), is_square(matrix_a), is_square(matrix_b)) + ): + raise ValueError("Invalid matrix dimensions") + + # Initialize the result matrix with zeros + result = [[0] * len(matrix_b[0]) for _ in range(len(matrix_a))] + + # Recursive multiplication of matrices + def multiply( + i_loop: int, + j_loop: int, + k_loop: int, + matrix_a: Matrix, + matrix_b: Matrix, + result: Matrix, + ) -> None: + """ + :param matrix_a: A square Matrix. + :param matrix_b: Another square Matrix with the same dimensions as matrix_a. + :param result: Result matrix + :param i: Index used for iteration during multiplication. + :param j: Index used for iteration during multiplication. + :param k: Index used for iteration during multiplication. + >>> 0 > 1 # Doctests in inner functions are never run + True + """ + if i_loop >= len(matrix_a): + return + if j_loop >= len(matrix_b[0]): + return multiply(i_loop + 1, 0, 0, matrix_a, matrix_b, result) + if k_loop >= len(matrix_b): + return multiply(i_loop, j_loop + 1, 0, matrix_a, matrix_b, result) + result[i_loop][j_loop] += matrix_a[i_loop][k_loop] * matrix_b[k_loop][j_loop] + return multiply(i_loop, j_loop, k_loop + 1, matrix_a, matrix_b, result) + + # Perform the recursive matrix multiplication + multiply(0, 0, 0, matrix_a, matrix_b, result) + return result + + +if __name__ == "__main__": + from doctest import testmod + + failure_count, test_count = testmod() + if not failure_count: + matrix_a = matrices[0] + for matrix_b in matrices[1:]: + print("Multiplying:") + for row in matrix_a: + print(row) + print("By:") + for row in matrix_b: + print(row) + print("Result:") + try: + result = matrix_multiply_recursive(matrix_a, matrix_b) + for row in result: + print(row) + assert result == matrix_multiply(matrix_a, matrix_b) + except ValueError as e: + print(f"{e!r}") + print() + matrix_a = matrix_b + + print("Benchmark:") + from functools import partial + from timeit import timeit + + mytimeit = partial(timeit, globals=globals(), number=100_000) + for func in ("matrix_multiply", "matrix_multiply_recursive"): + print(f"{func:>25}(): {mytimeit(f'{func}(matrix_count_up, matrix_unordered)')}") From 72bd653e04a944f51ae6c047204b62d8a07db9d4 Mon Sep 17 00:00:00 2001 From: RaymondDashWu <33266041+RaymondDashWu@users.noreply.github.com> Date: Tue, 17 Oct 2023 07:57:33 -0700 Subject: [PATCH 1166/1543] Test cases for all_combinations (#10633) * [ADD] Test cases for all_combinations * [DEL] documentation reverted b/c redundant * Update all_combinations.py --------- Co-authored-by: Christian Clauss --- backtracking/all_combinations.py | 47 ++++++++++++++++++++++++++------ 1 file changed, 38 insertions(+), 9 deletions(-) diff --git a/backtracking/all_combinations.py b/backtracking/all_combinations.py index bde60f0328ba..ecbcc5882ec1 100644 --- a/backtracking/all_combinations.py +++ b/backtracking/all_combinations.py @@ -1,15 +1,40 @@ """ In this problem, we want to determine all possible combinations of k numbers out of 1 ... n. We use backtracking to solve this problem. - Time complexity: O(C(n,k)) which is O(n choose k) = O((n!/(k! * (n - k)!))) + + Time complexity: O(C(n,k)) which is O(n choose k) = O((n!/(k! * (n - k)!))), """ from __future__ import annotations +from itertools import combinations + + +def combination_lists(n: int, k: int) -> list[list[int]]: + """ + >>> combination_lists(n=4, k=2) + [[1, 2], [1, 3], [1, 4], [2, 3], [2, 4], [3, 4]] + """ + return [list(x) for x in combinations(range(1, n + 1), k)] + def generate_all_combinations(n: int, k: int) -> list[list[int]]: """ >>> generate_all_combinations(n=4, k=2) [[1, 2], [1, 3], [1, 4], [2, 3], [2, 4], [3, 4]] + >>> generate_all_combinations(n=0, k=0) + [[]] + >>> generate_all_combinations(n=10, k=-1) + Traceback (most recent call last): + ... + RecursionError: maximum recursion depth exceeded + >>> generate_all_combinations(n=-1, k=10) + [] + >>> generate_all_combinations(n=5, k=4) + [[1, 2, 3, 4], [1, 2, 3, 5], [1, 2, 4, 5], [1, 3, 4, 5], [2, 3, 4, 5]] + >>> from itertools import combinations + >>> all(generate_all_combinations(n, k) == combination_lists(n, k) + ... for n in range(1, 6) for k in range(1, 6)) + True """ result: list[list[int]] = [] @@ -34,13 +59,17 @@ def create_all_state( current_list.pop() -def print_all_state(total_list: list[list[int]]) -> None: - for i in total_list: - print(*i) +if __name__ == "__main__": + from doctest import testmod + testmod() + print(generate_all_combinations(n=4, k=2)) + tests = ((n, k) for n in range(1, 5) for k in range(1, 5)) + for n, k in tests: + print(n, k, generate_all_combinations(n, k) == combination_lists(n, k)) -if __name__ == "__main__": - n = 4 - k = 2 - total_list = generate_all_combinations(n, k) - print_all_state(total_list) + print("Benchmark:") + from timeit import timeit + + for func in ("combination_lists", "generate_all_combinations"): + print(f"{func:>25}(): {timeit(f'{func}(n=4, k = 2)', globals=globals())}") From 09c2b2d006e3ca217f2ef082d62a0c35560667ef Mon Sep 17 00:00:00 2001 From: Anubhavpandey27 <61093307+Anubhavpandey27@users.noreply.github.com> Date: Tue, 17 Oct 2023 22:37:40 +0530 Subject: [PATCH 1167/1543] Add arrays/sudoku_solver.py (#10623) * Create Sudoku_Solver Each of the digits 1-9 must occur exactly once in each row. Each of the digits 1-9 must occur exactly once in each column. Each of the digits 1-9 must occur exactly once in each of the 9 3x3 sub-boxes of the grid. The '.' character indicates empty cells. * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Rename Sudoku_Solver to sudoku_solver.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update sudoku_solver.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- data_structures/arrays/sudoku_solver.py | 220 ++++++++++++++++++++++++ 1 file changed, 220 insertions(+) create mode 100644 data_structures/arrays/sudoku_solver.py diff --git a/data_structures/arrays/sudoku_solver.py b/data_structures/arrays/sudoku_solver.py new file mode 100644 index 000000000000..8d38bd7295ea --- /dev/null +++ b/data_structures/arrays/sudoku_solver.py @@ -0,0 +1,220 @@ +""" +Please do not modify this file! It is published at https://norvig.com/sudoku.html with +only minimal changes to work with modern versions of Python. If you have improvements, +please make them in a separate file. +""" +import random +import time + + +def cross(items_a, items_b): + "Cross product of elements in A and elements in B." + return [a + b for a in items_a for b in items_b] + + +digits = "123456789" +rows = "ABCDEFGHI" +cols = digits +squares = cross(rows, cols) +unitlist = ( + [cross(rows, c) for c in cols] + + [cross(r, cols) for r in rows] + + [cross(rs, cs) for rs in ("ABC", "DEF", "GHI") for cs in ("123", "456", "789")] +) +units = {s: [u for u in unitlist if s in u] for s in squares} +peers = {s: set(sum(units[s], [])) - {s} for s in squares} + + +def test(): + "A set of unit tests." + assert len(squares) == 81 + assert len(unitlist) == 27 + assert all(len(units[s]) == 3 for s in squares) + assert all(len(peers[s]) == 20 for s in squares) + assert units["C2"] == [ + ["A2", "B2", "C2", "D2", "E2", "F2", "G2", "H2", "I2"], + ["C1", "C2", "C3", "C4", "C5", "C6", "C7", "C8", "C9"], + ["A1", "A2", "A3", "B1", "B2", "B3", "C1", "C2", "C3"], + ] + # fmt: off + assert peers["C2"] == { + "A2", "B2", "D2", "E2", "F2", "G2", "H2", "I2", "C1", "C3", + "C4", "C5", "C6", "C7", "C8", "C9", "A1", "A3", "B1", "B3" + } + # fmt: on + print("All tests pass.") + + +def parse_grid(grid): + """Convert grid to a dict of possible values, {square: digits}, or + return False if a contradiction is detected.""" + ## To start, every square can be any digit; then assign values from the grid. + values = {s: digits for s in squares} + for s, d in grid_values(grid).items(): + if d in digits and not assign(values, s, d): + return False ## (Fail if we can't assign d to square s.) + return values + + +def grid_values(grid): + "Convert grid into a dict of {square: char} with '0' or '.' for empties." + chars = [c for c in grid if c in digits or c in "0."] + assert len(chars) == 81 + return dict(zip(squares, chars)) + + +def assign(values, s, d): + """Eliminate all the other values (except d) from values[s] and propagate. + Return values, except return False if a contradiction is detected.""" + other_values = values[s].replace(d, "") + if all(eliminate(values, s, d2) for d2 in other_values): + return values + else: + return False + + +def eliminate(values, s, d): + """Eliminate d from values[s]; propagate when values or places <= 2. + Return values, except return False if a contradiction is detected.""" + if d not in values[s]: + return values ## Already eliminated + values[s] = values[s].replace(d, "") + ## (1) If a square s is reduced to one value d2, then eliminate d2 from the peers. + if len(values[s]) == 0: + return False ## Contradiction: removed last value + elif len(values[s]) == 1: + d2 = values[s] + if not all(eliminate(values, s2, d2) for s2 in peers[s]): + return False + ## (2) If a unit u is reduced to only one place for a value d, then put it there. + for u in units[s]: + dplaces = [s for s in u if d in values[s]] + if len(dplaces) == 0: + return False ## Contradiction: no place for this value + elif len(dplaces) == 1: + # d can only be in one place in unit; assign it there + if not assign(values, dplaces[0], d): + return False + return values + + +def display(values): + "Display these values as a 2-D grid." + width = 1 + max(len(values[s]) for s in squares) + line = "+".join(["-" * (width * 3)] * 3) + for r in rows: + print( + "".join( + values[r + c].center(width) + ("|" if c in "36" else "") for c in cols + ) + ) + if r in "CF": + print(line) + print() + + +def solve(grid): + return search(parse_grid(grid)) + + +def some(seq): + "Return some element of seq that is true." + for e in seq: + if e: + return e + return False + + +def search(values): + "Using depth-first search and propagation, try all possible values." + if values is False: + return False ## Failed earlier + if all(len(values[s]) == 1 for s in squares): + return values ## Solved! + ## Chose the unfilled square s with the fewest possibilities + n, s = min((len(values[s]), s) for s in squares if len(values[s]) > 1) + return some(search(assign(values.copy(), s, d)) for d in values[s]) + + +def solve_all(grids, name="", showif=0.0): + """Attempt to solve a sequence of grids. Report results. + When showif is a number of seconds, display puzzles that take longer. + When showif is None, don't display any puzzles.""" + + def time_solve(grid): + start = time.monotonic() + values = solve(grid) + t = time.monotonic() - start + ## Display puzzles that take long enough + if showif is not None and t > showif: + display(grid_values(grid)) + if values: + display(values) + print("(%.5f seconds)\n" % t) + return (t, solved(values)) + + times, results = zip(*[time_solve(grid) for grid in grids]) + if (n := len(grids)) > 1: + print( + "Solved %d of %d %s puzzles (avg %.2f secs (%d Hz), max %.2f secs)." + % (sum(results), n, name, sum(times) / n, n / sum(times), max(times)) + ) + + +def solved(values): + "A puzzle is solved if each unit is a permutation of the digits 1 to 9." + + def unitsolved(unit): + return {values[s] for s in unit} == set(digits) + + return values is not False and all(unitsolved(unit) for unit in unitlist) + + +def from_file(filename, sep="\n"): + "Parse a file into a list of strings, separated by sep." + return open(filename).read().strip().split(sep) # noqa: SIM115 + + +def random_puzzle(assignments=17): + """Make a random puzzle with N or more assignments. Restart on contradictions. + Note the resulting puzzle is not guaranteed to be solvable, but empirically + about 99.8% of them are solvable. Some have multiple solutions.""" + values = {s: digits for s in squares} + for s in shuffled(squares): + if not assign(values, s, random.choice(values[s])): + break + ds = [values[s] for s in squares if len(values[s]) == 1] + if len(ds) >= assignments and len(set(ds)) >= 8: + return "".join(values[s] if len(values[s]) == 1 else "." for s in squares) + return random_puzzle(assignments) ## Give up and make a new puzzle + + +def shuffled(seq): + "Return a randomly shuffled copy of the input sequence." + seq = list(seq) + random.shuffle(seq) + return seq + + +grid1 = ( + "003020600900305001001806400008102900700000008006708200002609500800203009005010300" +) +grid2 = ( + "4.....8.5.3..........7......2.....6.....8.4......1.......6.3.7.5..2.....1.4......" +) +hard1 = ( + ".....6....59.....82....8....45........3........6..3.54...325..6.................." +) + +if __name__ == "__main__": + test() + # solve_all(from_file("easy50.txt", '========'), "easy", None) + # solve_all(from_file("top95.txt"), "hard", None) + # solve_all(from_file("hardest.txt"), "hardest", None) + solve_all([random_puzzle() for _ in range(99)], "random", 100.0) + for puzzle in (grid1, grid2): # , hard1): # Takes 22 sec to solve on my M1 Mac. + display(parse_grid(puzzle)) + start = time.monotonic() + solve(puzzle) + t = time.monotonic() - start + print("Solved: %.5f sec" % t) From 9de1c49fe13f009e08dcf5009a798bef43f2230b Mon Sep 17 00:00:00 2001 From: Marek Mazij <112333347+Mrk-Mzj@users.noreply.github.com> Date: Tue, 17 Oct 2023 20:24:16 +0200 Subject: [PATCH 1168/1543] feat: Polish ID (PESEL) checker added (#10618) * feat: Polish ID (PESEL) checker added * refactor: 'sum' variable renamed to 'subtotal' * style: typos * Apply suggestions from code review * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: Christian Clauss Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- strings/is_polish_national_id.py | 92 ++++++++++++++++++++++++++++++++ 1 file changed, 92 insertions(+) create mode 100644 strings/is_polish_national_id.py diff --git a/strings/is_polish_national_id.py b/strings/is_polish_national_id.py new file mode 100644 index 000000000000..8b463a24532a --- /dev/null +++ b/strings/is_polish_national_id.py @@ -0,0 +1,92 @@ +def is_polish_national_id(input_str: str) -> bool: + """ + Verification of the correctness of the PESEL number. + www-gov-pl.translate.goog/web/gov/czym-jest-numer-pesel?_x_tr_sl=auto&_x_tr_tl=en + + PESEL can start with 0, that's why we take str as input, + but convert it to int for some calculations. + + + >>> is_polish_national_id(123) + Traceback (most recent call last): + ... + ValueError: Expected str as input, found + + >>> is_polish_national_id("abc") + Traceback (most recent call last): + ... + ValueError: Expected number as input + + >>> is_polish_national_id("02070803628") # correct PESEL + True + + >>> is_polish_national_id("02150803629") # wrong month + False + + >>> is_polish_national_id("02075503622") # wrong day + False + + >>> is_polish_national_id("-99012212349") # wrong range + False + + >>> is_polish_national_id("990122123499999") # wrong range + False + + >>> is_polish_national_id("02070803621") # wrong checksum + False + """ + + # check for invalid input type + if not isinstance(input_str, str): + msg = f"Expected str as input, found {type(input_str)}" + raise ValueError(msg) + + # check if input can be converted to int + try: + input_int = int(input_str) + except ValueError: + msg = "Expected number as input" + raise ValueError(msg) + + # check number range + if not 10100000 <= input_int <= 99923199999: + return False + + # check month correctness + month = int(input_str[2:4]) + + if ( + month not in range(1, 13) # year 1900-1999 + and month not in range(21, 33) # 2000-2099 + and month not in range(41, 53) # 2100-2199 + and month not in range(61, 73) # 2200-2299 + and month not in range(81, 93) # 1800-1899 + ): + return False + + # check day correctness + day = int(input_str[4:6]) + + if day not in range(1, 32): + return False + + # check the checksum + multipliers = [1, 3, 7, 9, 1, 3, 7, 9, 1, 3] + subtotal = 0 + + digits_to_check = str(input_str)[:-1] # cut off the checksum + + for index, digit in enumerate(digits_to_check): + # Multiply corresponding digits and multipliers. + # In case of a double-digit result, add only the last digit. + subtotal += (int(digit) * multipliers[index]) % 10 + + checksum = 10 - subtotal % 10 + + return checksum == input_int % 10 + + +if __name__ == "__main__": + from doctest import testmod + + testmod() From 9da6f48b46f41c6361416c259dcfec531fb39a01 Mon Sep 17 00:00:00 2001 From: Manmita Das <34617961+manmita@users.noreply.github.com> Date: Wed, 18 Oct 2023 04:07:57 +0530 Subject: [PATCH 1169/1543] Add binary_coded_decimal.py (#10656) * added decimal to bcd sequence * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * updated with fixes * Update and rename bcd_sequence.py to binary_coded_decimal.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update binary_coded_decimal.py * Update binary_coded_decimal.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- bit_manipulation/binary_coded_decimal.py | 29 ++++++++++++++++++++++++ 1 file changed, 29 insertions(+) create mode 100644 bit_manipulation/binary_coded_decimal.py diff --git a/bit_manipulation/binary_coded_decimal.py b/bit_manipulation/binary_coded_decimal.py new file mode 100644 index 000000000000..676fd6d54fc5 --- /dev/null +++ b/bit_manipulation/binary_coded_decimal.py @@ -0,0 +1,29 @@ +def binary_coded_decimal(number: int) -> str: + """ + Find binary coded decimal (bcd) of integer base 10. + Each digit of the number is represented by a 4-bit binary. + Example: + >>> binary_coded_decimal(-2) + '0b0000' + >>> binary_coded_decimal(-1) + '0b0000' + >>> binary_coded_decimal(0) + '0b0000' + >>> binary_coded_decimal(3) + '0b0011' + >>> binary_coded_decimal(2) + '0b0010' + >>> binary_coded_decimal(12) + '0b00010010' + >>> binary_coded_decimal(987) + '0b100110000111' + """ + return "0b" + "".join( + str(bin(int(digit)))[2:].zfill(4) for digit in str(max(0, number)) + ) + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From 788e4ede9bf4eb180e4b784268d98d657efbd9da Mon Sep 17 00:00:00 2001 From: Jai Vignesh J <108923524+Jaivignesh-afk@users.noreply.github.com> Date: Wed, 18 Oct 2023 04:20:57 +0530 Subject: [PATCH 1170/1543] Fix doctest power recursion (#10659) * Added doctests to power_using_recursion.py * Added doctest to power_using_recursion.py * Update power_using_recursion.py * Update power_using_recursion.py --------- Co-authored-by: Christian Clauss --- maths/power_using_recursion.py | 19 ++++++++++++++++++- 1 file changed, 18 insertions(+), 1 deletion(-) diff --git a/maths/power_using_recursion.py b/maths/power_using_recursion.py index f82097f6d8ec..e82635ba0005 100644 --- a/maths/power_using_recursion.py +++ b/maths/power_using_recursion.py @@ -15,18 +15,35 @@ def power(base: int, exponent: int) -> float: """ - power(3, 4) + >>> power(3, 4) 81 >>> power(2, 0) 1 >>> all(power(base, exponent) == pow(base, exponent) ... for base in range(-10, 10) for exponent in range(10)) True + >>> power('a', 1) + 'a' + >>> power('a', 2) + Traceback (most recent call last): + ... + TypeError: can't multiply sequence by non-int of type 'str' + >>> power('a', 'b') + Traceback (most recent call last): + ... + TypeError: unsupported operand type(s) for -: 'str' and 'int' + >>> power(2, -1) + Traceback (most recent call last): + ... + RecursionError: maximum recursion depth exceeded """ return base * power(base, (exponent - 1)) if exponent else 1 if __name__ == "__main__": + from doctests import testmod + + testmod() print("Raise base to the power of exponent using recursion...") base = int(input("Enter the base: ").strip()) exponent = int(input("Enter the exponent: ").strip()) From 361f64c21d7d2528828e20e2eedd59b8d69e5c18 Mon Sep 17 00:00:00 2001 From: Poojan Smart <44301271+PoojanSmart@users.noreply.github.com> Date: Wed, 18 Oct 2023 19:39:13 +0530 Subject: [PATCH 1171/1543] Adds hinge loss function algorithm (#10628) * Adds exponential moving average algorithm * code clean up * spell correction * Modifies I/O types of function * Replaces generator function * Resolved mypy type error * readibility of code and documentation * Update exponential_moving_average.py * Adds hinge loss function * suggested doc and refactoring changes * refactoring --------- Co-authored-by: Christian Clauss --- machine_learning/loss_functions/hinge_loss.py | 64 +++++++++++++++++++ 1 file changed, 64 insertions(+) create mode 100644 machine_learning/loss_functions/hinge_loss.py diff --git a/machine_learning/loss_functions/hinge_loss.py b/machine_learning/loss_functions/hinge_loss.py new file mode 100644 index 000000000000..5480a8cd62ee --- /dev/null +++ b/machine_learning/loss_functions/hinge_loss.py @@ -0,0 +1,64 @@ +""" +Hinge Loss + +Description: +Compute the Hinge loss used for training SVM (Support Vector Machine). + +Formula: +loss = max(0, 1 - true * pred) + +Reference: https://en.wikipedia.org/wiki/Hinge_loss + +Author: Poojan Smart +Email: smrtpoojan@gmail.com +""" + +import numpy as np + + +def hinge_loss(y_true: np.ndarray, y_pred: np.ndarray) -> float: + """ + Calculate the mean hinge loss for y_true and y_pred for binary classification. + + Args: + y_true: Array of actual values (ground truth) encoded as -1 and 1. + y_pred: Array of predicted values. + + Returns: + The hinge loss between y_true and y_pred. + + Examples: + >>> y_true = np.array([-1, 1, 1, -1, 1]) + >>> pred = np.array([-4, -0.3, 0.7, 5, 10]) + >>> hinge_loss(y_true, pred) + 1.52 + >>> y_true = np.array([-1, 1, 1, -1, 1, 1]) + >>> pred = np.array([-4, -0.3, 0.7, 5, 10]) + >>> hinge_loss(y_true, pred) + Traceback (most recent call last): + ... + ValueError: Length of predicted and actual array must be same. + >>> y_true = np.array([-1, 1, 10, -1, 1]) + >>> pred = np.array([-4, -0.3, 0.7, 5, 10]) + >>> hinge_loss(y_true, pred) + Traceback (most recent call last): + ... + ValueError: y_true can have values -1 or 1 only. + """ + + if len(y_true) != len(y_pred): + raise ValueError("Length of predicted and actual array must be same.") + + # Raise value error when y_true (encoded labels) have any other values + # than -1 and 1 + if np.any((y_true != -1) & (y_true != 1)): + raise ValueError("y_true can have values -1 or 1 only.") + + hinge_losses = np.maximum(0, 1.0 - (y_true * y_pred)) + return np.mean(hinge_losses) + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From 572de4f15e266057e806a751006156a212a3812e Mon Sep 17 00:00:00 2001 From: Shivansh Bhatnagar Date: Wed, 18 Oct 2023 20:20:18 +0530 Subject: [PATCH 1172/1543] Added A General Swish Activation Function inNeural Networks (#10415) * Added A General Swish Activation Function inNeural Networks * Added the general swish function in the SiLU function and renamed it as swish.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: Shivansh Bhatnagar Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .../{sigmoid_linear_unit.py => swish.py} | 20 +++++++++++++++++++ 1 file changed, 20 insertions(+) rename neural_network/activation_functions/{sigmoid_linear_unit.py => swish.py} (72%) diff --git a/neural_network/activation_functions/sigmoid_linear_unit.py b/neural_network/activation_functions/swish.py similarity index 72% rename from neural_network/activation_functions/sigmoid_linear_unit.py rename to neural_network/activation_functions/swish.py index 0ee09bf82d38..ab3d8fa1203b 100644 --- a/neural_network/activation_functions/sigmoid_linear_unit.py +++ b/neural_network/activation_functions/swish.py @@ -12,6 +12,7 @@ This script is inspired by a corresponding research paper. * https://arxiv.org/abs/1710.05941 +* https://blog.paperspace.com/swish-activation-function/ """ import numpy as np @@ -49,6 +50,25 @@ def sigmoid_linear_unit(vector: np.ndarray) -> np.ndarray: return vector * sigmoid(vector) +def swish(vector: np.ndarray, trainable_parameter: int) -> np.ndarray: + """ + Parameters: + vector (np.ndarray): A numpy array consisting of real values + trainable_parameter: Use to implement various Swish Activation Functions + + Returns: + swish_vec (np.ndarray): The input numpy array, after applying swish + + Examples: + >>> swish(np.array([-1.0, 1.0, 2.0]), 2) + array([-0.11920292, 0.88079708, 1.96402758]) + + >>> swish(np.array([-2]), 1) + array([-0.23840584]) + """ + return vector * sigmoid(trainable_parameter * vector) + + if __name__ == "__main__": import doctest From 9adb7ced16725e3f6cf24cf93ac81a8dcd351665 Mon Sep 17 00:00:00 2001 From: rtang09 <49603415+rtang09@users.noreply.github.com> Date: Thu, 19 Oct 2023 05:02:04 -0700 Subject: [PATCH 1173/1543] Update primelib.py (#10209) * Update primelib.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Tianyi Zheng --- maths/primelib.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/maths/primelib.py b/maths/primelib.py index 7e33844be12b..e2d432e1846a 100644 --- a/maths/primelib.py +++ b/maths/primelib.py @@ -51,6 +51,10 @@ def is_prime(number: int) -> bool: True >>> is_prime(10) False + >>> is_prime(97) + True + >>> is_prime(9991) + False >>> is_prime(-1) Traceback (most recent call last): ... From 30c8d5573a8b052210238487167a3ec2d7241d06 Mon Sep 17 00:00:00 2001 From: rtang09 <49603415+rtang09@users.noreply.github.com> Date: Thu, 19 Oct 2023 05:15:23 -0700 Subject: [PATCH 1174/1543] Update binary_exponentiation.py (#10253) * Update binary_exponentiation.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- maths/binary_exponentiation.py | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/maths/binary_exponentiation.py b/maths/binary_exponentiation.py index 05de939d1bde..7eeca89262a9 100644 --- a/maths/binary_exponentiation.py +++ b/maths/binary_exponentiation.py @@ -5,6 +5,12 @@ def binary_exponentiation(a: int, n: int) -> int: + """ + >>> binary_exponentiation(3, 5) + 243 + >>> binary_exponentiation(10, 3) + 1000 + """ if n == 0: return 1 @@ -17,6 +23,10 @@ def binary_exponentiation(a: int, n: int) -> int: if __name__ == "__main__": + import doctest + + doctest.testmod() + try: BASE = int(input("Enter Base : ").strip()) POWER = int(input("Enter Power : ").strip()) From b301e589e2c68f583bf3a09f6d4ca224175383b9 Mon Sep 17 00:00:00 2001 From: Iyiola Aloko <48067557+ialoko@users.noreply.github.com> Date: Thu, 19 Oct 2023 08:21:48 -0400 Subject: [PATCH 1175/1543] Update binary_exponentiation.py (#10342) Co-authored-by: Tianyi Zheng --- maths/binary_exponentiation.py | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/maths/binary_exponentiation.py b/maths/binary_exponentiation.py index 7eeca89262a9..f613767f547e 100644 --- a/maths/binary_exponentiation.py +++ b/maths/binary_exponentiation.py @@ -6,10 +6,21 @@ def binary_exponentiation(a: int, n: int) -> int: """ + Compute a number raised by some quantity + >>> binary_exponentiation(-1, 3) + -1 + >>> binary_exponentiation(-1, 4) + 1 + >>> binary_exponentiation(2, 2) + 4 >>> binary_exponentiation(3, 5) 243 >>> binary_exponentiation(10, 3) 1000 + >>> binary_exponentiation(5e3, 1) + 5000.0 + >>> binary_exponentiation(-5e3, 1) + -5000.0 """ if n == 0: return 1 @@ -28,7 +39,7 @@ def binary_exponentiation(a: int, n: int) -> int: doctest.testmod() try: - BASE = int(input("Enter Base : ").strip()) + BASE = int(float(input("Enter Base : ").strip())) POWER = int(input("Enter Power : ").strip()) except ValueError: print("Invalid literal for integer") From 33888646af9d74e46da0175df75b3e5892a72fc7 Mon Sep 17 00:00:00 2001 From: anshul-2010 <96651393+anshul-2010@users.noreply.github.com> Date: Thu, 19 Oct 2023 18:08:02 +0530 Subject: [PATCH 1176/1543] Edit Distance Algorithm for String Matching (#10571) * Edit Distance Algorithm for String Matching * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Apply suggestions from code review * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update edit_distance.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Tianyi Zheng --- strings/edit_distance.py | 32 ++++++++++++++++++++++++++++++++ 1 file changed, 32 insertions(+) create mode 100644 strings/edit_distance.py diff --git a/strings/edit_distance.py b/strings/edit_distance.py new file mode 100644 index 000000000000..e842c8555c8e --- /dev/null +++ b/strings/edit_distance.py @@ -0,0 +1,32 @@ +def edit_distance(source: str, target: str) -> int: + """ + Edit distance algorithm is a string metric, i.e., it is a way of quantifying how + dissimilar two strings are to one another. It is measured by counting the minimum + number of operations required to transform one string into another. + + This implementation assumes that the cost of operations (insertion, deletion and + substitution) is always 1 + + Args: + source: the initial string with respect to which we are calculating the edit + distance for the target + target: the target string, formed after performing n operations on the source string + + >>> edit_distance("GATTIC", "GALTIC") + 1 + """ + if len(source) == 0: + return len(target) + elif len(target) == 0: + return len(source) + + delta = int(source[-1] != target[-1]) # Substitution + return min( + edit_distance(source[:-1], target[:-1]) + delta, + edit_distance(source, target[:-1]) + 1, + edit_distance(source[:-1], target) + 1, + ) + + +if __name__ == "__main__": + print(edit_distance("ATCGCTG", "TAGCTAA")) # Answer is 4 From 289a4dd6d35a3dd402c98db04d2f39cfc08ea1be Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ab=C3=ADlio=20Azevedo?= Date: Thu, 19 Oct 2023 11:35:41 -0300 Subject: [PATCH 1177/1543] docs: add test scenarios to pull request template (#10396) * docs: add test scenarios to pull request template * Update .github/pull_request_template.md --------- Co-authored-by: Christian Clauss --- .github/pull_request_template.md | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md index 1f9797fae038..e2ae0966cda5 100644 --- a/.github/pull_request_template.md +++ b/.github/pull_request_template.md @@ -4,6 +4,7 @@ * [ ] Add an algorithm? * [ ] Fix a bug or typo in an existing algorithm? +* [ ] Add or change doctests? -- Note: Please avoid changing both code and tests in a single pull request. * [ ] Documentation change? ### Checklist: From bd3072b84512b33a6fd7d788812340daa8ac3465 Mon Sep 17 00:00:00 2001 From: Ankit Avinash <128812932+Void426@users.noreply.github.com> Date: Thu, 19 Oct 2023 20:16:39 +0530 Subject: [PATCH 1178/1543] Added Mean Squared Logarithmic Error (MSLE) Loss Function (#10637) * Added Mean Squared Logarithmic Error (MSLE) * Added Mean Squared Logarithmic Error (MSLE) * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .../mean_squared_logarithmic_error.py | 55 +++++++++++++++++++ 1 file changed, 55 insertions(+) create mode 100644 machine_learning/loss_functions/mean_squared_logarithmic_error.py diff --git a/machine_learning/loss_functions/mean_squared_logarithmic_error.py b/machine_learning/loss_functions/mean_squared_logarithmic_error.py new file mode 100644 index 000000000000..935ebff37a51 --- /dev/null +++ b/machine_learning/loss_functions/mean_squared_logarithmic_error.py @@ -0,0 +1,55 @@ +""" +Mean Squared Logarithmic Error (MSLE) Loss Function + +Description: +MSLE measures the mean squared logarithmic difference between +true values and predicted values, particularly useful when +dealing with regression problems involving skewed or large-value +targets. It is often used when the relative differences between +predicted and true values are more important than absolute +differences. + +Formula: +MSLE = (1/n) * Σ(log(1 + y_true) - log(1 + y_pred))^2 + +Source: +(https://insideaiml.com/blog/MeanSquared-Logarithmic-Error-Loss-1035) +""" + +import numpy as np + + +def mean_squared_logarithmic_error(y_true: np.ndarray, y_pred: np.ndarray) -> float: + """ + Calculate the Mean Squared Logarithmic Error (MSLE) between two arrays. + + Parameters: + - y_true: The true values (ground truth). + - y_pred: The predicted values. + + Returns: + - msle: The Mean Squared Logarithmic Error between y_true and y_pred. + + Example usage: + >>> true_values = np.array([1.0, 2.0, 3.0, 4.0, 5.0]) + >>> predicted_values = np.array([0.8, 2.1, 2.9, 4.2, 5.2]) + >>> mean_squared_logarithmic_error(true_values, predicted_values) + 0.0030860877925181344 + >>> true_labels = np.array([1.0, 2.0, 3.0, 4.0, 5.0]) + >>> predicted_probs = np.array([0.3, 0.8, 0.9, 0.2]) + >>> mean_squared_logarithmic_error(true_labels, predicted_probs) + Traceback (most recent call last): + ... + ValueError: Input arrays must have the same length. + """ + if len(y_true) != len(y_pred): + raise ValueError("Input arrays must have the same length.") + + squared_logarithmic_errors = (np.log1p(y_true) - np.log1p(y_pred)) ** 2 + return np.mean(squared_logarithmic_errors) + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From 69876140673881efefcb177e3ba2575b0c221438 Mon Sep 17 00:00:00 2001 From: ketan96-m <40893179+ketan96-m@users.noreply.github.com> Date: Thu, 19 Oct 2023 09:48:53 -0500 Subject: [PATCH 1179/1543] *added docstring and doctest for find_isolated_nodes (#10684) *added docstring and doctest for edglist *added docstring and doctest for adjm Co-authored-by: Ketan --- graphs/basic_graphs.py | 81 ++++++++++++++++++++++++++++++++++++++---- 1 file changed, 75 insertions(+), 6 deletions(-) diff --git a/graphs/basic_graphs.py b/graphs/basic_graphs.py index 065b6185c123..25c8045b3d2b 100644 --- a/graphs/basic_graphs.py +++ b/graphs/basic_graphs.py @@ -185,10 +185,29 @@ def topo(g, ind=None, q=None): def adjm(): - n = input().strip() + r""" + Reading an Adjacency matrix + + Parameters: + None + + Returns: + tuple: A tuple containing a list of edges and number of edges + + Example: + >>> # Simulate user input for 3 nodes + >>> input_data = "4\n0 1 0 1\n1 0 1 0\n0 1 0 1\n1 0 1 0\n" + >>> import sys,io + >>> original_input = sys.stdin + >>> sys.stdin = io.StringIO(input_data) # Redirect stdin for testing + >>> adjm() + ([(0, 1, 0, 1), (1, 0, 1, 0), (0, 1, 0, 1), (1, 0, 1, 0)], 4) + >>> sys.stdin = original_input # Restore original stdin + """ + n = int(input().strip()) a = [] for _ in range(n): - a.append(map(int, input().strip().split())) + a.append(tuple(map(int, input().strip().split()))) return a, n @@ -260,10 +279,29 @@ def prim(g, s): def edglist(): - n, m = map(int, input().split(" ")) + r""" + Get the edges and number of edges from the user + + Parameters: + None + + Returns: + tuple: A tuple containing a list of edges and number of edges + + Example: + >>> # Simulate user input for 3 edges and 4 vertices: (1, 2), (2, 3), (3, 4) + >>> input_data = "4 3\n1 2\n2 3\n3 4\n" + >>> import sys,io + >>> original_input = sys.stdin + >>> sys.stdin = io.StringIO(input_data) # Redirect stdin for testing + >>> edglist() + ([(1, 2), (2, 3), (3, 4)], 4) + >>> sys.stdin = original_input # Restore original stdin + """ + n, m = tuple(map(int, input().split(" "))) edges = [] for _ in range(m): - edges.append(map(int, input().split(" "))) + edges.append(tuple(map(int, input().split(" ")))) return edges, n @@ -278,7 +316,9 @@ def edglist(): def krusk(e_and_n): - # Sort edges on the basis of distance + """ + Sort edges on the basis of distance + """ (e, n) = e_and_n e.sort(reverse=True, key=lambda x: x[2]) s = [{i} for i in range(1, n + 1)] @@ -299,8 +339,37 @@ def krusk(e_and_n): break -# find the isolated node in the graph def find_isolated_nodes(graph): + """ + Find the isolated node in the graph + + Parameters: + graph (dict): A dictionary representing a graph. + + Returns: + list: A list of isolated nodes. + + Examples: + >>> graph1 = {1: [2, 3], 2: [1, 3], 3: [1, 2], 4: []} + >>> find_isolated_nodes(graph1) + [4] + + >>> graph2 = {'A': ['B', 'C'], 'B': ['A'], 'C': ['A'], 'D': []} + >>> find_isolated_nodes(graph2) + ['D'] + + >>> graph3 = {'X': [], 'Y': [], 'Z': []} + >>> find_isolated_nodes(graph3) + ['X', 'Y', 'Z'] + + >>> graph4 = {1: [2, 3], 2: [1, 3], 3: [1, 2]} + >>> find_isolated_nodes(graph4) + [] + + >>> graph5 = {} + >>> find_isolated_nodes(graph5) + [] + """ isolated = [] for node in graph: if not graph[node]: From 26ffad9d17232668d0630edb70167e5123a7f35c Mon Sep 17 00:00:00 2001 From: Christian Clauss Date: Thu, 19 Oct 2023 19:31:51 +0200 Subject: [PATCH 1180/1543] Simplify is_bst.py (#10627) * Simplify is_bst.py * updating DIRECTORY.md * Update is_bst.py * Rename is_bst.py to is_sorted.py * updating DIRECTORY.md * Update data_structures/binary_tree/is_sorted.py Co-authored-by: Tianyi Zheng --------- Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> Co-authored-by: Tianyi Zheng --- DIRECTORY.md | 3 +- data_structures/binary_tree/is_bst.py | 131 ----------------------- data_structures/binary_tree/is_sorted.py | 97 +++++++++++++++++ 3 files changed, 99 insertions(+), 132 deletions(-) delete mode 100644 data_structures/binary_tree/is_bst.py create mode 100644 data_structures/binary_tree/is_sorted.py diff --git a/DIRECTORY.md b/DIRECTORY.md index d878f1c79a2d..0999d2e8687a 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -199,7 +199,8 @@ * [Flatten Binarytree To Linkedlist](data_structures/binary_tree/flatten_binarytree_to_linkedlist.py) * [Floor And Ceiling](data_structures/binary_tree/floor_and_ceiling.py) * [Inorder Tree Traversal 2022](data_structures/binary_tree/inorder_tree_traversal_2022.py) - * [Is Bst](data_structures/binary_tree/is_bst.py) + * [Is Sorted](data_structures/binary_tree/is_sorted.py) + * [Is Sum Tree](data_structures/binary_tree/is_sum_tree.py) * [Lazy Segment Tree](data_structures/binary_tree/lazy_segment_tree.py) * [Lowest Common Ancestor](data_structures/binary_tree/lowest_common_ancestor.py) * [Maximum Fenwick Tree](data_structures/binary_tree/maximum_fenwick_tree.py) diff --git a/data_structures/binary_tree/is_bst.py b/data_structures/binary_tree/is_bst.py deleted file mode 100644 index 0b2ef8c9ffde..000000000000 --- a/data_structures/binary_tree/is_bst.py +++ /dev/null @@ -1,131 +0,0 @@ -""" -Author : Alexander Pantyukhin -Date : November 2, 2022 - -Task: -Given the root of a binary tree, determine if it is a valid binary search -tree (BST). - -A valid binary search tree is defined as follows: - -- The left subtree of a node contains only nodes with keys less than the node's key. -- The right subtree of a node contains only nodes with keys greater than the node's key. -- Both the left and right subtrees must also be binary search trees. - -Implementation notes: -Depth-first search approach. - -leetcode: https://leetcode.com/problems/validate-binary-search-tree/ - -Let n is the number of nodes in tree -Runtime: O(n) -Space: O(1) -""" - -from __future__ import annotations - -from dataclasses import dataclass - - -@dataclass -class TreeNode: - data: float - left: TreeNode | None = None - right: TreeNode | None = None - - -def is_binary_search_tree(root: TreeNode | None) -> bool: - """ - >>> is_binary_search_tree(TreeNode(data=2, - ... left=TreeNode(data=1), - ... right=TreeNode(data=3)) - ... ) - True - - >>> is_binary_search_tree(TreeNode(data=0, - ... left=TreeNode(data=-11), - ... right=TreeNode(data=3)) - ... ) - True - - >>> is_binary_search_tree(TreeNode(data=5, - ... left=TreeNode(data=1), - ... right=TreeNode(data=4, left=TreeNode(data=3))) - ... ) - False - - >>> is_binary_search_tree(TreeNode(data='a', - ... left=TreeNode(data=1), - ... right=TreeNode(data=4, left=TreeNode(data=3))) - ... ) - Traceback (most recent call last): - ... - ValueError: Each node should be type of TreeNode and data should be float. - - >>> is_binary_search_tree(TreeNode(data=2, - ... left=TreeNode([]), - ... right=TreeNode(data=4, left=TreeNode(data=3))) - ... ) - Traceback (most recent call last): - ... - ValueError: Each node should be type of TreeNode and data should be float. - """ - - # Validation - def is_valid_tree(node: TreeNode | None) -> bool: - """ - >>> is_valid_tree(None) - True - >>> is_valid_tree('abc') - False - >>> is_valid_tree(TreeNode(data='not a float')) - False - >>> is_valid_tree(TreeNode(data=1, left=TreeNode('123'))) - False - """ - if node is None: - return True - - if not isinstance(node, TreeNode): - return False - - try: - float(node.data) - except (TypeError, ValueError): - return False - - return is_valid_tree(node.left) and is_valid_tree(node.right) - - if not is_valid_tree(root): - raise ValueError( - "Each node should be type of TreeNode and data should be float." - ) - - def is_binary_search_tree_recursive_check( - node: TreeNode | None, left_bound: float, right_bound: float - ) -> bool: - """ - >>> is_binary_search_tree_recursive_check(None) - True - >>> is_binary_search_tree_recursive_check(TreeNode(data=1), 10, 20) - False - """ - - if node is None: - return True - - return ( - left_bound < node.data < right_bound - and is_binary_search_tree_recursive_check(node.left, left_bound, node.data) - and is_binary_search_tree_recursive_check( - node.right, node.data, right_bound - ) - ) - - return is_binary_search_tree_recursive_check(root, -float("inf"), float("inf")) - - -if __name__ == "__main__": - import doctest - - doctest.testmod() diff --git a/data_structures/binary_tree/is_sorted.py b/data_structures/binary_tree/is_sorted.py new file mode 100644 index 000000000000..5876c5a9c96a --- /dev/null +++ b/data_structures/binary_tree/is_sorted.py @@ -0,0 +1,97 @@ +""" +Given the root of a binary tree, determine if it is a valid binary search tree (BST). + +A valid binary search tree is defined as follows: +- The left subtree of a node contains only nodes with keys less than the node's key. +- The right subtree of a node contains only nodes with keys greater than the node's key. +- Both the left and right subtrees must also be binary search trees. + +In effect, a binary tree is a valid BST if its nodes are sorted in ascending order. +leetcode: https://leetcode.com/problems/validate-binary-search-tree/ + +If n is the number of nodes in the tree then: +Runtime: O(n) +Space: O(1) +""" +from __future__ import annotations + +from collections.abc import Iterator +from dataclasses import dataclass + + +@dataclass +class Node: + data: float + left: Node | None = None + right: Node | None = None + + def __iter__(self) -> Iterator[float]: + """ + >>> root = Node(data=2.1) + >>> list(root) + [2.1] + >>> root.left=Node(data=2.0) + >>> list(root) + [2.0, 2.1] + >>> root.right=Node(data=2.2) + >>> list(root) + [2.0, 2.1, 2.2] + """ + if self.left: + yield from self.left + yield self.data + if self.right: + yield from self.right + + @property + def is_sorted(self) -> bool: + """ + >>> Node(data='abc').is_sorted + True + >>> Node(data=2, + ... left=Node(data=1.999), + ... right=Node(data=3)).is_sorted + True + >>> Node(data=0, + ... left=Node(data=0), + ... right=Node(data=0)).is_sorted + True + >>> Node(data=0, + ... left=Node(data=-11), + ... right=Node(data=3)).is_sorted + True + >>> Node(data=5, + ... left=Node(data=1), + ... right=Node(data=4, left=Node(data=3))).is_sorted + False + >>> Node(data='a', + ... left=Node(data=1), + ... right=Node(data=4, left=Node(data=3))).is_sorted + Traceback (most recent call last): + ... + TypeError: '<' not supported between instances of 'str' and 'int' + >>> Node(data=2, + ... left=Node([]), + ... right=Node(data=4, left=Node(data=3))).is_sorted + Traceback (most recent call last): + ... + TypeError: '<' not supported between instances of 'int' and 'list' + """ + if self.left and (self.data < self.left.data or not self.left.is_sorted): + return False + if self.right and (self.data > self.right.data or not self.right.is_sorted): + return False + return True + + +if __name__ == "__main__": + import doctest + + doctest.testmod() + tree = Node(data=2.1, left=Node(data=2.0), right=Node(data=2.2)) + print(f"Tree {list(tree)} is sorted: {tree.is_sorted = }.") + assert tree.right + tree.right.data = 2.0 + print(f"Tree {list(tree)} is sorted: {tree.is_sorted = }.") + tree.right.data = 2.1 + print(f"Tree {list(tree)} is sorted: {tree.is_sorted = }.") From be94690decde9f0e1df78b41d2a22e7e69bc176d Mon Sep 17 00:00:00 2001 From: NikhithaBandari <91549688+NikhithaBandari@users.noreply.github.com> Date: Thu, 19 Oct 2023 23:19:47 +0530 Subject: [PATCH 1181/1543] Create swap_all_odd_and_even_bits.py (#10692) * Create swap_all_odd_and_even_bits.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update swap_all_odd_and_even_bits.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * 6: 00000110 --> 9: 00001001 * Update swap_all_odd_and_even_bits.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- .../swap_all_odd_and_even_bits.py | 58 +++++++++++++++++++ 1 file changed, 58 insertions(+) create mode 100644 bit_manipulation/swap_all_odd_and_even_bits.py diff --git a/bit_manipulation/swap_all_odd_and_even_bits.py b/bit_manipulation/swap_all_odd_and_even_bits.py new file mode 100644 index 000000000000..5ec84417bea6 --- /dev/null +++ b/bit_manipulation/swap_all_odd_and_even_bits.py @@ -0,0 +1,58 @@ +def show_bits(before: int, after: int) -> str: + """ + >>> print(show_bits(0, 0xFFFF)) + 0: 00000000 + 65535: 1111111111111111 + """ + return f"{before:>5}: {before:08b}\n{after:>5}: {after:08b}" + + +def swap_odd_even_bits(num: int) -> int: + """ + 1. We use bitwise AND operations to separate the even bits (0, 2, 4, 6, etc.) and + odd bits (1, 3, 5, 7, etc.) in the input number. + 2. We then right-shift the even bits by 1 position and left-shift the odd bits by + 1 position to swap them. + 3. Finally, we combine the swapped even and odd bits using a bitwise OR operation + to obtain the final result. + >>> print(show_bits(0, swap_odd_even_bits(0))) + 0: 00000000 + 0: 00000000 + >>> print(show_bits(1, swap_odd_even_bits(1))) + 1: 00000001 + 2: 00000010 + >>> print(show_bits(2, swap_odd_even_bits(2))) + 2: 00000010 + 1: 00000001 + >>> print(show_bits(3, swap_odd_even_bits(3))) + 3: 00000011 + 3: 00000011 + >>> print(show_bits(4, swap_odd_even_bits(4))) + 4: 00000100 + 8: 00001000 + >>> print(show_bits(5, swap_odd_even_bits(5))) + 5: 00000101 + 10: 00001010 + >>> print(show_bits(6, swap_odd_even_bits(6))) + 6: 00000110 + 9: 00001001 + >>> print(show_bits(23, swap_odd_even_bits(23))) + 23: 00010111 + 43: 00101011 + """ + # Get all even bits - 0xAAAAAAAA is a 32-bit number with all even bits set to 1 + even_bits = num & 0xAAAAAAAA + + # Get all odd bits - 0x55555555 is a 32-bit number with all odd bits set to 1 + odd_bits = num & 0x55555555 + + # Right shift even bits and left shift odd bits and swap them + return even_bits >> 1 | odd_bits << 1 + + +if __name__ == "__main__": + import doctest + + doctest.testmod() + for i in (-1, 0, 1, 2, 3, 4, 23, 24): + print(show_bits(i, swap_odd_even_bits(i)), "\n") From 34f48b684bce39cb24667e5181b268c9f3bf9980 Mon Sep 17 00:00:00 2001 From: Anupamaraie <91787285+Anupamaraie@users.noreply.github.com> Date: Fri, 20 Oct 2023 01:50:16 +0545 Subject: [PATCH 1182/1543] Create vernam_cipher.py (#10702) * Create vernam_cipher.py added vernam cipher * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update vernam_cipher.py added return type * Update vernam_cipher.py added type hint for plaintext and key * Update vernam_cipher.py added tests * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update vernam_cipher.py Added tests * Update vernam_cipher.py * Update vernam_cipher.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update vernam_cipher.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update vernam_cipher.py * Update vernam_cipher.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update vernam_cipher.py * Update vernam_cipher.py * Update vernam_cipher.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update vernam_cipher.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update vernam_cipher.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update vernam_cipher.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update vernam_cipher.py * Update vernam_cipher.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update vernam_cipher.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update vernam_cipher.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update vernam_cipher.py * Update vernam_cipher.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update vernam_cipher.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update vernam_cipher.py * Update vernam_cipher.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update vernam_cipher.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update vernam_cipher.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update vernam_cipher.py * Update ciphers/vernam_cipher.py * Update vernam_cipher.py --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- ciphers/vernam_cipher.py | 42 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 42 insertions(+) create mode 100644 ciphers/vernam_cipher.py diff --git a/ciphers/vernam_cipher.py b/ciphers/vernam_cipher.py new file mode 100644 index 000000000000..197f28635a1c --- /dev/null +++ b/ciphers/vernam_cipher.py @@ -0,0 +1,42 @@ +def vernam_encrypt(plaintext: str, key: str) -> str: + """ + >>> vernam_encrypt("HELLO","KEY") + 'RIJVS' + """ + ciphertext = "" + for i in range(len(plaintext)): + ct = ord(key[i % len(key)]) - 65 + ord(plaintext[i]) - 65 + while ct > 25: + ct = ct - 26 + ciphertext += chr(65 + ct) + return ciphertext + + +def vernam_decrypt(ciphertext: str, key: str) -> str: + """ + >>> vernam_decrypt("RIJVS","KEY") + 'HELLO' + """ + decrypted_text = "" + for i in range(len(ciphertext)): + ct = ord(ciphertext[i]) - ord(key[i % len(key)]) + while ct < 0: + ct = 26 + ct + decrypted_text += chr(65 + ct) + return decrypted_text + + +if __name__ == "__main__": + from doctest import testmod + + testmod() + + # Example usage + plaintext = "HELLO" + key = "KEY" + encrypted_text = vernam_encrypt(plaintext, key) + decrypted_text = vernam_decrypt(encrypted_text, key) + print("\n\n") + print("Plaintext:", plaintext) + print("Encrypted:", encrypted_text) + print("Decrypted:", decrypted_text) From 9875f374f4762d6219067b2e7909a762f25b68ba Mon Sep 17 00:00:00 2001 From: Adam Ross <14985050+R055A@users.noreply.github.com> Date: Thu, 19 Oct 2023 22:45:51 +0200 Subject: [PATCH 1183/1543] Consolidate bubble sort iterative and recursive (#10651) * Consolidate bubble sort iterative and recursive * Update sorts/bubble_sort.py Co-authored-by: Christian Clauss * Refactor bubble sort func signature, doctest, timer * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update bubble_sort.py --------- Co-authored-by: Christian Clauss Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- sorts/bubble_sort.py | 109 ++++++++++++++++++++++++++++----- sorts/recursive_bubble_sort.py | 42 ------------- 2 files changed, 92 insertions(+), 59 deletions(-) delete mode 100644 sorts/recursive_bubble_sort.py diff --git a/sorts/bubble_sort.py b/sorts/bubble_sort.py index 7da4362a5b97..bdf85c70dd35 100644 --- a/sorts/bubble_sort.py +++ b/sorts/bubble_sort.py @@ -1,7 +1,7 @@ from typing import Any -def bubble_sort(collection: list[Any]) -> list[Any]: +def bubble_sort_iterative(collection: list[Any]) -> list[Any]: """Pure implementation of bubble sort algorithm in Python :param collection: some mutable ordered collection with heterogeneous @@ -9,25 +9,37 @@ def bubble_sort(collection: list[Any]) -> list[Any]: :return: the same collection ordered by ascending Examples: - >>> bubble_sort([0, 5, 2, 3, 2]) + >>> bubble_sort_iterative([0, 5, 2, 3, 2]) [0, 2, 2, 3, 5] - >>> bubble_sort([0, 5, 2, 3, 2]) == sorted([0, 5, 2, 3, 2]) + >>> bubble_sort_iterative([]) + [] + >>> bubble_sort_iterative([-2, -45, -5]) + [-45, -5, -2] + >>> bubble_sort_iterative([-23, 0, 6, -4, 34]) + [-23, -4, 0, 6, 34] + >>> bubble_sort_iterative([0, 5, 2, 3, 2]) == sorted([0, 5, 2, 3, 2]) True - >>> bubble_sort([]) == sorted([]) + >>> bubble_sort_iterative([]) == sorted([]) True - >>> bubble_sort([-2, -45, -5]) == sorted([-2, -45, -5]) + >>> bubble_sort_iterative([-2, -45, -5]) == sorted([-2, -45, -5]) True - >>> bubble_sort([-23, 0, 6, -4, 34]) == sorted([-23, 0, 6, -4, 34]) + >>> bubble_sort_iterative([-23, 0, 6, -4, 34]) == sorted([-23, 0, 6, -4, 34]) True - >>> bubble_sort(['d', 'a', 'b', 'e', 'c']) == sorted(['d', 'a', 'b', 'e', 'c']) + >>> bubble_sort_iterative(['d', 'a', 'b', 'e']) == sorted(['d', 'a', 'b', 'e']) True + >>> bubble_sort_iterative(['z', 'a', 'y', 'b', 'x', 'c']) + ['a', 'b', 'c', 'x', 'y', 'z'] + >>> bubble_sort_iterative([1.1, 3.3, 5.5, 7.7, 2.2, 4.4, 6.6]) + [1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7] + >>> bubble_sort_iterative([1, 3.3, 5, 7.7, 2, 4.4, 6]) + [1, 2, 3.3, 4.4, 5, 6, 7.7] >>> import random - >>> collection = random.sample(range(-50, 50), 100) - >>> bubble_sort(collection) == sorted(collection) + >>> collection_arg = random.sample(range(-50, 50), 100) + >>> bubble_sort_iterative(collection_arg) == sorted(collection_arg) True >>> import string - >>> collection = random.choices(string.ascii_letters + string.digits, k=100) - >>> bubble_sort(collection) == sorted(collection) + >>> collection_arg = random.choices(string.ascii_letters + string.digits, k=100) + >>> bubble_sort_iterative(collection_arg) == sorted(collection_arg) True """ length = len(collection) @@ -42,14 +54,77 @@ def bubble_sort(collection: list[Any]) -> list[Any]: return collection +def bubble_sort_recursive(collection: list[Any]) -> list[Any]: + """It is similar iterative bubble sort but recursive. + + :param collection: mutable ordered sequence of elements + :return: the same list in ascending order + + Examples: + >>> bubble_sort_recursive([0, 5, 2, 3, 2]) + [0, 2, 2, 3, 5] + >>> bubble_sort_iterative([]) + [] + >>> bubble_sort_recursive([-2, -45, -5]) + [-45, -5, -2] + >>> bubble_sort_recursive([-23, 0, 6, -4, 34]) + [-23, -4, 0, 6, 34] + >>> bubble_sort_recursive([0, 5, 2, 3, 2]) == sorted([0, 5, 2, 3, 2]) + True + >>> bubble_sort_recursive([]) == sorted([]) + True + >>> bubble_sort_recursive([-2, -45, -5]) == sorted([-2, -45, -5]) + True + >>> bubble_sort_recursive([-23, 0, 6, -4, 34]) == sorted([-23, 0, 6, -4, 34]) + True + >>> bubble_sort_recursive(['d', 'a', 'b', 'e']) == sorted(['d', 'a', 'b', 'e']) + True + >>> bubble_sort_recursive(['z', 'a', 'y', 'b', 'x', 'c']) + ['a', 'b', 'c', 'x', 'y', 'z'] + >>> bubble_sort_recursive([1.1, 3.3, 5.5, 7.7, 2.2, 4.4, 6.6]) + [1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7] + >>> bubble_sort_recursive([1, 3.3, 5, 7.7, 2, 4.4, 6]) + [1, 2, 3.3, 4.4, 5, 6, 7.7] + >>> import random + >>> collection_arg = random.sample(range(-50, 50), 100) + >>> bubble_sort_recursive(collection_arg) == sorted(collection_arg) + True + >>> import string + >>> collection_arg = random.choices(string.ascii_letters + string.digits, k=100) + >>> bubble_sort_recursive(collection_arg) == sorted(collection_arg) + True + """ + length = len(collection) + swapped = False + for i in range(length - 1): + if collection[i] > collection[i + 1]: + collection[i], collection[i + 1] = collection[i + 1], collection[i] + swapped = True + + return collection if not swapped else bubble_sort_recursive(collection) + + if __name__ == "__main__": import doctest - import time + from random import sample + from timeit import timeit doctest.testmod() - user_input = input("Enter numbers separated by a comma:").strip() - unsorted = [int(item) for item in user_input.split(",")] - start = time.process_time() - print(*bubble_sort(unsorted), sep=",") - print(f"Processing time: {(time.process_time() - start)%1e9 + 7}") + # Benchmark: Iterative seems slightly faster than recursive. + num_runs = 10_000 + unsorted = sample(range(-50, 50), 100) + timer_iterative = timeit( + "bubble_sort_iterative(unsorted[:])", globals=globals(), number=num_runs + ) + print("\nIterative bubble sort:") + print(*bubble_sort_iterative(unsorted), sep=",") + print(f"Processing time (iterative): {timer_iterative:.5f}s for {num_runs:,} runs") + + unsorted = sample(range(-50, 50), 100) + timer_recursive = timeit( + "bubble_sort_recursive(unsorted[:])", globals=globals(), number=num_runs + ) + print("\nRecursive bubble sort:") + print(*bubble_sort_recursive(unsorted), sep=",") + print(f"Processing time (recursive): {timer_recursive:.5f}s for {num_runs:,} runs") diff --git a/sorts/recursive_bubble_sort.py b/sorts/recursive_bubble_sort.py deleted file mode 100644 index 82af89593e5b..000000000000 --- a/sorts/recursive_bubble_sort.py +++ /dev/null @@ -1,42 +0,0 @@ -def bubble_sort(list_data: list, length: int = 0) -> list: - """ - It is similar is bubble sort but recursive. - :param list_data: mutable ordered sequence of elements - :param length: length of list data - :return: the same list in ascending order - - >>> bubble_sort([0, 5, 2, 3, 2], 5) - [0, 2, 2, 3, 5] - - >>> bubble_sort([], 0) - [] - - >>> bubble_sort([-2, -45, -5], 3) - [-45, -5, -2] - - >>> bubble_sort([-23, 0, 6, -4, 34], 5) - [-23, -4, 0, 6, 34] - - >>> bubble_sort([-23, 0, 6, -4, 34], 5) == sorted([-23, 0, 6, -4, 34]) - True - - >>> bubble_sort(['z','a','y','b','x','c'], 6) - ['a', 'b', 'c', 'x', 'y', 'z'] - - >>> bubble_sort([1.1, 3.3, 5.5, 7.7, 2.2, 4.4, 6.6]) - [1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7] - """ - length = length or len(list_data) - swapped = False - for i in range(length - 1): - if list_data[i] > list_data[i + 1]: - list_data[i], list_data[i + 1] = list_data[i + 1], list_data[i] - swapped = True - - return list_data if not swapped else bubble_sort(list_data, length - 1) - - -if __name__ == "__main__": - import doctest - - doctest.testmod() From 51805338afbbf76c3d1371b60ba301eaaf094359 Mon Sep 17 00:00:00 2001 From: Tianyi Zheng Date: Thu, 19 Oct 2023 23:35:38 -0400 Subject: [PATCH 1184/1543] Fix ruff error in `machine_learning/sequential_minimum_optimization.py` (#10717) * updating DIRECTORY.md * Try to fix ruff error in sequential_minimum_optimization.py * Update sequential_minimum_optimization.py * Update sequential_minimum_optimization.py * Update sequential_minimum_optimization.py --------- Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- DIRECTORY.md | 13 +++++++++++-- machine_learning/sequential_minimum_optimization.py | 2 +- 2 files changed, 12 insertions(+), 3 deletions(-) diff --git a/DIRECTORY.md b/DIRECTORY.md index 0999d2e8687a..1aaabf782fe3 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -37,6 +37,7 @@ ## Bit Manipulation * [Binary And Operator](bit_manipulation/binary_and_operator.py) + * [Binary Coded Decimal](bit_manipulation/binary_coded_decimal.py) * [Binary Count Setbits](bit_manipulation/binary_count_setbits.py) * [Binary Count Trailing Zeros](bit_manipulation/binary_count_trailing_zeros.py) * [Binary Or Operator](bit_manipulation/binary_or_operator.py) @@ -57,6 +58,7 @@ * [Power Of 4](bit_manipulation/power_of_4.py) * [Reverse Bits](bit_manipulation/reverse_bits.py) * [Single Bit Manipulation Operations](bit_manipulation/single_bit_manipulation_operations.py) + * [Swap All Odd And Even Bits](bit_manipulation/swap_all_odd_and_even_bits.py) ## Blockchain * [Diophantine Equation](blockchain/diophantine_equation.py) @@ -124,6 +126,7 @@ * [Transposition Cipher](ciphers/transposition_cipher.py) * [Transposition Cipher Encrypt Decrypt File](ciphers/transposition_cipher_encrypt_decrypt_file.py) * [Trifid Cipher](ciphers/trifid_cipher.py) + * [Vernam Cipher](ciphers/vernam_cipher.py) * [Vigenere Cipher](ciphers/vigenere_cipher.py) * [Xor Cipher](ciphers/xor_cipher.py) @@ -163,6 +166,7 @@ * [Molecular Chemistry](conversions/molecular_chemistry.py) * [Octal To Binary](conversions/octal_to_binary.py) * [Octal To Decimal](conversions/octal_to_decimal.py) + * [Octal To Hexadecimal](conversions/octal_to_hexadecimal.py) * [Prefix Conversions](conversions/prefix_conversions.py) * [Prefix Conversions String](conversions/prefix_conversions_string.py) * [Pressure Conversions](conversions/pressure_conversions.py) @@ -183,6 +187,7 @@ * [Prefix Sum](data_structures/arrays/prefix_sum.py) * [Product Sum](data_structures/arrays/product_sum.py) * [Sparse Table](data_structures/arrays/sparse_table.py) + * [Sudoku Solver](data_structures/arrays/sudoku_solver.py) * Binary Tree * [Avl Tree](data_structures/binary_tree/avl_tree.py) * [Basic Binary Tree](data_structures/binary_tree/basic_binary_tree.py) @@ -548,8 +553,10 @@ * Loss Functions * [Binary Cross Entropy](machine_learning/loss_functions/binary_cross_entropy.py) * [Categorical Cross Entropy](machine_learning/loss_functions/categorical_cross_entropy.py) + * [Hinge Loss](machine_learning/loss_functions/hinge_loss.py) * [Huber Loss](machine_learning/loss_functions/huber_loss.py) * [Mean Squared Error](machine_learning/loss_functions/mean_squared_error.py) + * [Mean Squared Logarithmic Error](machine_learning/loss_functions/mean_squared_logarithmic_error.py) * [Mfcc](machine_learning/mfcc.py) * [Multilayer Perceptron Classifier](machine_learning/multilayer_perceptron_classifier.py) * [Polynomial Regression](machine_learning/polynomial_regression.py) @@ -734,6 +741,7 @@ * [Inverse Of Matrix](matrix/inverse_of_matrix.py) * [Largest Square Area In Matrix](matrix/largest_square_area_in_matrix.py) * [Matrix Class](matrix/matrix_class.py) + * [Matrix Multiplication Recursion](matrix/matrix_multiplication_recursion.py) * [Matrix Operation](matrix/matrix_operation.py) * [Max Area Of Island](matrix/max_area_of_island.py) * [Median Matrix](matrix/median_matrix.py) @@ -760,10 +768,10 @@ * [Mish](neural_network/activation_functions/mish.py) * [Rectified Linear Unit](neural_network/activation_functions/rectified_linear_unit.py) * [Scaled Exponential Linear Unit](neural_network/activation_functions/scaled_exponential_linear_unit.py) - * [Sigmoid Linear Unit](neural_network/activation_functions/sigmoid_linear_unit.py) * [Soboleva Modified Hyperbolic Tangent](neural_network/activation_functions/soboleva_modified_hyperbolic_tangent.py) * [Softplus](neural_network/activation_functions/softplus.py) * [Squareplus](neural_network/activation_functions/squareplus.py) + * [Swish](neural_network/activation_functions/swish.py) * [Back Propagation Neural Network](neural_network/back_propagation_neural_network.py) * [Convolution Neural Network](neural_network/convolution_neural_network.py) * [Perceptron](neural_network/perceptron.py) @@ -1185,7 +1193,6 @@ * [Quick Sort](sorts/quick_sort.py) * [Quick Sort 3 Partition](sorts/quick_sort_3_partition.py) * [Radix Sort](sorts/radix_sort.py) - * [Recursive Bubble Sort](sorts/recursive_bubble_sort.py) * [Recursive Insertion Sort](sorts/recursive_insertion_sort.py) * [Recursive Mergesort Array](sorts/recursive_mergesort_array.py) * [Recursive Quick Sort](sorts/recursive_quick_sort.py) @@ -1216,12 +1223,14 @@ * [Damerau Levenshtein Distance](strings/damerau_levenshtein_distance.py) * [Detecting English Programmatically](strings/detecting_english_programmatically.py) * [Dna](strings/dna.py) + * [Edit Distance](strings/edit_distance.py) * [Frequency Finder](strings/frequency_finder.py) * [Hamming Distance](strings/hamming_distance.py) * [Indian Phone Validator](strings/indian_phone_validator.py) * [Is Contains Unique Chars](strings/is_contains_unique_chars.py) * [Is Isogram](strings/is_isogram.py) * [Is Pangram](strings/is_pangram.py) + * [Is Polish National Id](strings/is_polish_national_id.py) * [Is Spain National Id](strings/is_spain_national_id.py) * [Is Srilankan Phone Number](strings/is_srilankan_phone_number.py) * [Is Valid Email Address](strings/is_valid_email_address.py) diff --git a/machine_learning/sequential_minimum_optimization.py b/machine_learning/sequential_minimum_optimization.py index b24f5669e2e8..9e2304859f8d 100644 --- a/machine_learning/sequential_minimum_optimization.py +++ b/machine_learning/sequential_minimum_optimization.py @@ -454,7 +454,7 @@ def test_cancel_data(): print("Hello!\nStart test svm by smo algorithm!") # 0: download dataset and load into pandas' dataframe if not os.path.exists(r"cancel_data.csv"): - request = urllib.request.Request( + request = urllib.request.Request( # noqa: S310 CANCER_DATASET_URL, headers={"User-Agent": "Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)"}, ) From 4154428351cd60db504eb232e3b7900987a2fa19 Mon Sep 17 00:00:00 2001 From: Saptadeep Banerjee <69459134+imSanko@users.noreply.github.com> Date: Fri, 20 Oct 2023 09:59:24 +0530 Subject: [PATCH 1185/1543] [ADD]: Improved tests in power recursion! (#10664) * Added new tests! * [ADD]: Inproved Tests * fixed * Removed spaces * Changed the file name * Added Changes * changed the code and kept the test cases * changed the code and kept the test cases * missed the line * removed spaces * Update power_using_recursion.py --------- Co-authored-by: Christian Clauss --- maths/power_using_recursion.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/maths/power_using_recursion.py b/maths/power_using_recursion.py index e82635ba0005..462fc45bff64 100644 --- a/maths/power_using_recursion.py +++ b/maths/power_using_recursion.py @@ -15,6 +15,8 @@ def power(base: int, exponent: int) -> float: """ + Calculate the power of a base raised to an exponent. + >>> power(3, 4) 81 >>> power(2, 0) From 82fc24ce96036b6e1180de06c513bbaacda6a550 Mon Sep 17 00:00:00 2001 From: RaymondDashWu <33266041+RaymondDashWu@users.noreply.github.com> Date: Thu, 19 Oct 2023 21:42:20 -0700 Subject: [PATCH 1186/1543] Test cases for check_bipartite_graph_bfs (#10688) * [ADD] tests for check_bipartite_graph_bfs * linter fix? * linter fix * [ADD] more test cases check_bipartite_graph_bfs * doctest fixes. Forgot to add 'Traceback...' * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * missed a Traceback * Update check_bipartite_graph_bfs.py --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- graphs/check_bipartite_graph_bfs.py | 45 +++++++++++++++++++++++++++++ 1 file changed, 45 insertions(+) diff --git a/graphs/check_bipartite_graph_bfs.py b/graphs/check_bipartite_graph_bfs.py index 7fc57cbc78bd..6c385d54e0b6 100644 --- a/graphs/check_bipartite_graph_bfs.py +++ b/graphs/check_bipartite_graph_bfs.py @@ -10,6 +10,48 @@ def check_bipartite(graph): + """ + >>> check_bipartite({}) + True + >>> check_bipartite({0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2]}) + True + >>> check_bipartite({0: [1, 2, 3], 1: [0, 2], 2: [0, 1, 3], 3: [0, 2]}) + False + >>> check_bipartite({0: [4], 1: [], 2: [4], 3: [4], 4: [0, 2, 3]}) + True + >>> check_bipartite({0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: [0]}) + False + >>> check_bipartite({7: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: [0]}) + Traceback (most recent call last): + ... + KeyError: 0 + >>> check_bipartite({0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 9: [0]}) + Traceback (most recent call last): + ... + KeyError: 4 + >>> check_bipartite({0: [-1, 3], 1: [0, -2]}) + Traceback (most recent call last): + ... + IndexError: list index out of range + >>> check_bipartite({-1: [0, 2], 0: [-1, 1], 1: [0, 2], 2: [-1, 1]}) + True + >>> check_bipartite({0.9: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2]}) + Traceback (most recent call last): + ... + KeyError: 0 + >>> check_bipartite({0: [1.0, 3.0], 1.0: [0, 2.0], 2.0: [1.0, 3.0], 3.0: [0, 2.0]}) + Traceback (most recent call last): + ... + TypeError: list indices must be integers or slices, not float + >>> check_bipartite({"a": [1, 3], "b": [0, 2], "c": [1, 3], "d": [0, 2]}) + Traceback (most recent call last): + ... + KeyError: 0 + >>> check_bipartite({0: ["b", "d"], 1: ["a", "c"], 2: ["b", "d"], 3: ["a", "c"]}) + Traceback (most recent call last): + ... + TypeError: list indices must be integers or slices, not str + """ queue = Queue() visited = [False] * len(graph) color = [-1] * len(graph) @@ -45,3 +87,6 @@ def bfs(): if __name__ == "__main__": # Adjacency List of graph print(check_bipartite({0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2]})) + import doctest + + doctest.testmod() From 197604898b85e84cfbaee0a0dd06095db8d1c7b6 Mon Sep 17 00:00:00 2001 From: shivaparihar6119 <122152343+shivaparihar6119@users.noreply.github.com> Date: Fri, 20 Oct 2023 11:39:58 +0530 Subject: [PATCH 1187/1543] Concatenates both check bipatrite graphs(bfs&dfs) (#10708) * sync * fixes#8098 * deleted: graphs/check_bipartite_graph_all.py new file: graphs/check_bipatrite,py * renamed: graphs/check_bipatrite,py -> graphs/check_bipatrite.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Add the new tests --------- Co-authored-by: Christian Clauss Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- DIRECTORY.md | 10 -- graphs/check_bipartite_graph_bfs.py | 92 -------------- graphs/check_bipartite_graph_dfs.py | 55 --------- graphs/check_bipatrite.py | 179 ++++++++++++++++++++++++++++ 4 files changed, 179 insertions(+), 157 deletions(-) delete mode 100644 graphs/check_bipartite_graph_bfs.py delete mode 100644 graphs/check_bipartite_graph_dfs.py create mode 100644 graphs/check_bipatrite.py diff --git a/DIRECTORY.md b/DIRECTORY.md index 1aaabf782fe3..1320c70ef629 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -65,9 +65,7 @@ ## Boolean Algebra * [And Gate](boolean_algebra/and_gate.py) - * [Imply Gate](boolean_algebra/imply_gate.py) * [Nand Gate](boolean_algebra/nand_gate.py) - * [Nimply Gate](boolean_algebra/nimply_gate.py) * [Nor Gate](boolean_algebra/nor_gate.py) * [Not Gate](boolean_algebra/not_gate.py) * [Or Gate](boolean_algebra/or_gate.py) @@ -180,9 +178,7 @@ ## Data Structures * Arrays * [Equilibrium Index In Array](data_structures/arrays/equilibrium_index_in_array.py) - * [Find Triplets With 0 Sum](data_structures/arrays/find_triplets_with_0_sum.py) * [Median Two Array](data_structures/arrays/median_two_array.py) - * [Pairs With Given Sum](data_structures/arrays/pairs_with_given_sum.py) * [Permutations](data_structures/arrays/permutations.py) * [Prefix Sum](data_structures/arrays/prefix_sum.py) * [Product Sum](data_structures/arrays/product_sum.py) @@ -402,7 +398,6 @@ ## Financial * [Equated Monthly Installments](financial/equated_monthly_installments.py) - * [Exponential Moving Average](financial/exponential_moving_average.py) * [Interest](financial/interest.py) * [Present Value](financial/present_value.py) * [Price Plus Tax](financial/price_plus_tax.py) @@ -711,7 +706,6 @@ * [Sin](maths/sin.py) * [Sock Merchant](maths/sock_merchant.py) * [Softmax](maths/softmax.py) - * [Solovay Strassen Primality Test](maths/solovay_strassen_primality_test.py) * [Square Root](maths/square_root.py) * [Sum Of Arithmetic Series](maths/sum_of_arithmetic_series.py) * [Sum Of Digits](maths/sum_of_digits.py) @@ -753,7 +747,6 @@ * [Spiral Print](matrix/spiral_print.py) * Tests * [Test Matrix Operation](matrix/tests/test_matrix_operation.py) - * [Validate Sudoku Board](matrix/validate_sudoku_board.py) ## Networking Flow * [Ford Fulkerson](networking_flow/ford_fulkerson.py) @@ -829,7 +822,6 @@ * [Rms Speed Of Molecule](physics/rms_speed_of_molecule.py) * [Shear Stress](physics/shear_stress.py) * [Speed Of Sound](physics/speed_of_sound.py) - * [Speeds Of Gas Molecules](physics/speeds_of_gas_molecules.py) ## Project Euler * Problem 001 @@ -1220,7 +1212,6 @@ * [Capitalize](strings/capitalize.py) * [Check Anagrams](strings/check_anagrams.py) * [Credit Card Validator](strings/credit_card_validator.py) - * [Damerau Levenshtein Distance](strings/damerau_levenshtein_distance.py) * [Detecting English Programmatically](strings/detecting_english_programmatically.py) * [Dna](strings/dna.py) * [Edit Distance](strings/edit_distance.py) @@ -1255,7 +1246,6 @@ * [String Switch Case](strings/string_switch_case.py) * [Strip](strings/strip.py) * [Text Justification](strings/text_justification.py) - * [Title](strings/title.py) * [Top K Frequent Words](strings/top_k_frequent_words.py) * [Upper](strings/upper.py) * [Wave](strings/wave.py) diff --git a/graphs/check_bipartite_graph_bfs.py b/graphs/check_bipartite_graph_bfs.py deleted file mode 100644 index 6c385d54e0b6..000000000000 --- a/graphs/check_bipartite_graph_bfs.py +++ /dev/null @@ -1,92 +0,0 @@ -# Check whether Graph is Bipartite or Not using BFS - - -# A Bipartite Graph is a graph whose vertices can be divided into two independent sets, -# U and V such that every edge (u, v) either connects a vertex from U to V or a vertex -# from V to U. In other words, for every edge (u, v), either u belongs to U and v to V, -# or u belongs to V and v to U. We can also say that there is no edge that connects -# vertices of same set. -from queue import Queue - - -def check_bipartite(graph): - """ - >>> check_bipartite({}) - True - >>> check_bipartite({0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2]}) - True - >>> check_bipartite({0: [1, 2, 3], 1: [0, 2], 2: [0, 1, 3], 3: [0, 2]}) - False - >>> check_bipartite({0: [4], 1: [], 2: [4], 3: [4], 4: [0, 2, 3]}) - True - >>> check_bipartite({0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: [0]}) - False - >>> check_bipartite({7: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: [0]}) - Traceback (most recent call last): - ... - KeyError: 0 - >>> check_bipartite({0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 9: [0]}) - Traceback (most recent call last): - ... - KeyError: 4 - >>> check_bipartite({0: [-1, 3], 1: [0, -2]}) - Traceback (most recent call last): - ... - IndexError: list index out of range - >>> check_bipartite({-1: [0, 2], 0: [-1, 1], 1: [0, 2], 2: [-1, 1]}) - True - >>> check_bipartite({0.9: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2]}) - Traceback (most recent call last): - ... - KeyError: 0 - >>> check_bipartite({0: [1.0, 3.0], 1.0: [0, 2.0], 2.0: [1.0, 3.0], 3.0: [0, 2.0]}) - Traceback (most recent call last): - ... - TypeError: list indices must be integers or slices, not float - >>> check_bipartite({"a": [1, 3], "b": [0, 2], "c": [1, 3], "d": [0, 2]}) - Traceback (most recent call last): - ... - KeyError: 0 - >>> check_bipartite({0: ["b", "d"], 1: ["a", "c"], 2: ["b", "d"], 3: ["a", "c"]}) - Traceback (most recent call last): - ... - TypeError: list indices must be integers or slices, not str - """ - queue = Queue() - visited = [False] * len(graph) - color = [-1] * len(graph) - - def bfs(): - while not queue.empty(): - u = queue.get() - visited[u] = True - - for neighbour in graph[u]: - if neighbour == u: - return False - - if color[neighbour] == -1: - color[neighbour] = 1 - color[u] - queue.put(neighbour) - - elif color[neighbour] == color[u]: - return False - - return True - - for i in range(len(graph)): - if not visited[i]: - queue.put(i) - color[i] = 0 - if bfs() is False: - return False - - return True - - -if __name__ == "__main__": - # Adjacency List of graph - print(check_bipartite({0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2]})) - import doctest - - doctest.testmod() diff --git a/graphs/check_bipartite_graph_dfs.py b/graphs/check_bipartite_graph_dfs.py deleted file mode 100644 index b13a9eb95afb..000000000000 --- a/graphs/check_bipartite_graph_dfs.py +++ /dev/null @@ -1,55 +0,0 @@ -from collections import defaultdict - - -def is_bipartite(graph: defaultdict[int, list[int]]) -> bool: - """ - Check whether a graph is Bipartite or not using Depth-First Search (DFS). - - A Bipartite Graph is a graph whose vertices can be divided into two independent - sets, U and V such that every edge (u, v) either connects a vertex from - U to V or a vertex from V to U. In other words, for every edge (u, v), - either u belongs to U and v to V, or u belongs to V and v to U. There is - no edge that connects vertices of the same set. - - Args: - graph: An adjacency list representing the graph. - - Returns: - True if there's no edge that connects vertices of the same set, False otherwise. - - Examples: - >>> is_bipartite( - ... defaultdict(list, {0: [1, 2], 1: [0, 3], 2: [0, 4], 3: [1], 4: [2]}) - ... ) - False - >>> is_bipartite(defaultdict(list, {0: [1, 2], 1: [0, 2], 2: [0, 1]})) - True - """ - - def depth_first_search(node: int, color: int) -> bool: - visited[node] = color - return any( - visited[neighbour] == color - or ( - visited[neighbour] == -1 - and not depth_first_search(neighbour, 1 - color) - ) - for neighbour in graph[node] - ) - - visited: defaultdict[int, int] = defaultdict(lambda: -1) - - return all( - not (visited[node] == -1 and not depth_first_search(node, 0)) for node in graph - ) - - -if __name__ == "__main__": - import doctest - - result = doctest.testmod() - - if result.failed: - print(f"{result.failed} test(s) failed.") - else: - print("All tests passed!") diff --git a/graphs/check_bipatrite.py b/graphs/check_bipatrite.py new file mode 100644 index 000000000000..10b9cc965251 --- /dev/null +++ b/graphs/check_bipatrite.py @@ -0,0 +1,179 @@ +from collections import defaultdict, deque + + +def is_bipartite_dfs(graph: defaultdict[int, list[int]]) -> bool: + """ + Check if a graph is bipartite using depth-first search (DFS). + + Args: + graph: Adjacency list representing the graph. + + Returns: + True if bipartite, False otherwise. + + Checks if the graph can be divided into two sets of vertices, such that no two + vertices within the same set are connected by an edge. + + Examples: + # FIXME: This test should pass. + >>> is_bipartite_dfs(defaultdict(list, {0: [1, 2], 1: [0, 3], 2: [0, 4]})) + Traceback (most recent call last): + ... + RuntimeError: dictionary changed size during iteration + >>> is_bipartite_dfs(defaultdict(list, {0: [1, 2], 1: [0, 3], 2: [0, 1]})) + False + >>> is_bipartite_dfs({}) + True + >>> is_bipartite_dfs({0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2]}) + True + >>> is_bipartite_dfs({0: [1, 2, 3], 1: [0, 2], 2: [0, 1, 3], 3: [0, 2]}) + False + >>> is_bipartite_dfs({0: [4], 1: [], 2: [4], 3: [4], 4: [0, 2, 3]}) + True + >>> is_bipartite_dfs({0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: [0]}) + False + >>> is_bipartite_dfs({7: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: [0]}) + Traceback (most recent call last): + ... + KeyError: 0 + + # FIXME: This test should fails with KeyError: 4. + >>> is_bipartite_dfs({0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 9: [0]}) + False + >>> is_bipartite_dfs({0: [-1, 3], 1: [0, -2]}) + Traceback (most recent call last): + ... + KeyError: -1 + >>> is_bipartite_dfs({-1: [0, 2], 0: [-1, 1], 1: [0, 2], 2: [-1, 1]}) + True + >>> is_bipartite_dfs({0.9: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2]}) + Traceback (most recent call last): + ... + KeyError: 0 + + # FIXME: This test should fails with TypeError: list indices must be integers or... + >>> is_bipartite_dfs({0: [1.0, 3.0], 1.0: [0, 2.0], 2.0: [1.0, 3.0], 3.0: [0, 2.0]}) + True + >>> is_bipartite_dfs({"a": [1, 3], "b": [0, 2], "c": [1, 3], "d": [0, 2]}) + Traceback (most recent call last): + ... + KeyError: 1 + >>> is_bipartite_dfs({0: ["b", "d"], 1: ["a", "c"], 2: ["b", "d"], 3: ["a", "c"]}) + Traceback (most recent call last): + ... + KeyError: 'b' + """ + + def depth_first_search(node: int, color: int) -> bool: + """ + Perform Depth-First Search (DFS) on the graph starting from a node. + + Args: + node: The current node being visited. + color: The color assigned to the current node. + + Returns: + True if the graph is bipartite starting from the current node, + False otherwise. + """ + if visited[node] == -1: + visited[node] = color + for neighbor in graph[node]: + if not depth_first_search(neighbor, 1 - color): + return False + return visited[node] == color + + visited: defaultdict[int, int] = defaultdict(lambda: -1) + for node in graph: + if visited[node] == -1 and not depth_first_search(node, 0): + return False + return True + + +def is_bipartite_bfs(graph: defaultdict[int, list[int]]) -> bool: + """ + Check if a graph is bipartite using a breadth-first search (BFS). + + Args: + graph: Adjacency list representing the graph. + + Returns: + True if bipartite, False otherwise. + + Check if the graph can be divided into two sets of vertices, such that no two + vertices within the same set are connected by an edge. + + Examples: + # FIXME: This test should pass. + >>> is_bipartite_bfs(defaultdict(list, {0: [1, 2], 1: [0, 3], 2: [0, 4]})) + Traceback (most recent call last): + ... + RuntimeError: dictionary changed size during iteration + >>> is_bipartite_bfs(defaultdict(list, {0: [1, 2], 1: [0, 2], 2: [0, 1]})) + False + >>> is_bipartite_bfs({}) + True + >>> is_bipartite_bfs({0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2]}) + True + >>> is_bipartite_bfs({0: [1, 2, 3], 1: [0, 2], 2: [0, 1, 3], 3: [0, 2]}) + False + >>> is_bipartite_bfs({0: [4], 1: [], 2: [4], 3: [4], 4: [0, 2, 3]}) + True + >>> is_bipartite_bfs({0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: [0]}) + False + >>> is_bipartite_bfs({7: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: [0]}) + Traceback (most recent call last): + ... + KeyError: 0 + + # FIXME: This test should fails with KeyError: 4. + >>> is_bipartite_bfs({0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 9: [0]}) + False + >>> is_bipartite_bfs({0: [-1, 3], 1: [0, -2]}) + Traceback (most recent call last): + ... + KeyError: -1 + >>> is_bipartite_bfs({-1: [0, 2], 0: [-1, 1], 1: [0, 2], 2: [-1, 1]}) + True + >>> is_bipartite_bfs({0.9: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2]}) + Traceback (most recent call last): + ... + KeyError: 0 + + # FIXME: This test should fails with TypeError: list indices must be integers or... + >>> is_bipartite_bfs({0: [1.0, 3.0], 1.0: [0, 2.0], 2.0: [1.0, 3.0], 3.0: [0, 2.0]}) + True + >>> is_bipartite_bfs({"a": [1, 3], "b": [0, 2], "c": [1, 3], "d": [0, 2]}) + Traceback (most recent call last): + ... + KeyError: 1 + >>> is_bipartite_bfs({0: ["b", "d"], 1: ["a", "c"], 2: ["b", "d"], 3: ["a", "c"]}) + Traceback (most recent call last): + ... + KeyError: 'b' + """ + visited: defaultdict[int, int] = defaultdict(lambda: -1) + for node in graph: + if visited[node] == -1: + queue: deque[int] = deque() + queue.append(node) + visited[node] = 0 + while queue: + curr_node = queue.popleft() + for neighbor in graph[curr_node]: + if visited[neighbor] == -1: + visited[neighbor] = 1 - visited[curr_node] + queue.append(neighbor) + elif visited[neighbor] == visited[curr_node]: + return False + return True + + +if __name__ == "__main": + import doctest + + result = doctest.testmod() + if result.failed: + print(f"{result.failed} test(s) failed.") + else: + print("All tests passed!") From 6f2d6f72d56f832dcfaaf226688c1dab4cdb9d0e Mon Sep 17 00:00:00 2001 From: Tianyi Zheng Date: Fri, 20 Oct 2023 02:17:31 -0400 Subject: [PATCH 1188/1543] Move files for special numbers to own directory (#10714) --- .../armstrong_numbers.py | 196 +++++------ .../automorphic_number.py | 0 maths/{ => special_numbers}/bell_numbers.py | 0 .../carmichael_number.py | 0 maths/{ => special_numbers}/catalan_number.py | 0 .../{ => special_numbers}/hamming_numbers.py | 0 .../{ => special_numbers}/harshad_numbers.py | 316 +++++++++--------- .../{ => special_numbers}/hexagonal_number.py | 0 .../krishnamurthy_number.py | 0 maths/{ => special_numbers}/perfect_number.py | 0 .../polygonal_numbers.py | 0 maths/{ => special_numbers}/pronic_number.py | 0 maths/{ => special_numbers}/proth_number.py | 0 maths/{ => special_numbers}/ugly_numbers.py | 108 +++--- maths/{ => special_numbers}/weird_number.py | 0 15 files changed, 310 insertions(+), 310 deletions(-) rename maths/{ => special_numbers}/armstrong_numbers.py (96%) rename maths/{ => special_numbers}/automorphic_number.py (100%) rename maths/{ => special_numbers}/bell_numbers.py (100%) rename maths/{ => special_numbers}/carmichael_number.py (100%) rename maths/{ => special_numbers}/catalan_number.py (100%) rename maths/{ => special_numbers}/hamming_numbers.py (100%) rename maths/{ => special_numbers}/harshad_numbers.py (96%) rename maths/{ => special_numbers}/hexagonal_number.py (100%) rename maths/{ => special_numbers}/krishnamurthy_number.py (100%) rename maths/{ => special_numbers}/perfect_number.py (100%) rename maths/{ => special_numbers}/polygonal_numbers.py (100%) rename maths/{ => special_numbers}/pronic_number.py (100%) rename maths/{ => special_numbers}/proth_number.py (100%) rename maths/{ => special_numbers}/ugly_numbers.py (96%) rename maths/{ => special_numbers}/weird_number.py (100%) diff --git a/maths/armstrong_numbers.py b/maths/special_numbers/armstrong_numbers.py similarity index 96% rename from maths/armstrong_numbers.py rename to maths/special_numbers/armstrong_numbers.py index e1c25d4676c3..b037aacb16c3 100644 --- a/maths/armstrong_numbers.py +++ b/maths/special_numbers/armstrong_numbers.py @@ -1,98 +1,98 @@ -""" -An Armstrong number is equal to the sum of its own digits each raised to the -power of the number of digits. - -For example, 370 is an Armstrong number because 3*3*3 + 7*7*7 + 0*0*0 = 370. - -Armstrong numbers are also called Narcissistic numbers and Pluperfect numbers. - -On-Line Encyclopedia of Integer Sequences entry: https://oeis.org/A005188 -""" -PASSING = (1, 153, 370, 371, 1634, 24678051, 115132219018763992565095597973971522401) -FAILING: tuple = (-153, -1, 0, 1.2, 200, "A", [], {}, None) - - -def armstrong_number(n: int) -> bool: - """ - Return True if n is an Armstrong number or False if it is not. - - >>> all(armstrong_number(n) for n in PASSING) - True - >>> any(armstrong_number(n) for n in FAILING) - False - """ - if not isinstance(n, int) or n < 1: - return False - - # Initialization of sum and number of digits. - total = 0 - number_of_digits = 0 - temp = n - # Calculation of digits of the number - number_of_digits = len(str(n)) - # Dividing number into separate digits and find Armstrong number - temp = n - while temp > 0: - rem = temp % 10 - total += rem**number_of_digits - temp //= 10 - return n == total - - -def pluperfect_number(n: int) -> bool: - """Return True if n is a pluperfect number or False if it is not - - >>> all(armstrong_number(n) for n in PASSING) - True - >>> any(armstrong_number(n) for n in FAILING) - False - """ - if not isinstance(n, int) or n < 1: - return False - - # Init a "histogram" of the digits - digit_histogram = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0] - digit_total = 0 - total = 0 - temp = n - while temp > 0: - temp, rem = divmod(temp, 10) - digit_histogram[rem] += 1 - digit_total += 1 - - for cnt, i in zip(digit_histogram, range(len(digit_histogram))): - total += cnt * i**digit_total - - return n == total - - -def narcissistic_number(n: int) -> bool: - """Return True if n is a narcissistic number or False if it is not. - - >>> all(armstrong_number(n) for n in PASSING) - True - >>> any(armstrong_number(n) for n in FAILING) - False - """ - if not isinstance(n, int) or n < 1: - return False - expo = len(str(n)) # the power that all digits will be raised to - # check if sum of each digit multiplied expo times is equal to number - return n == sum(int(i) ** expo for i in str(n)) - - -def main(): - """ - Request that user input an integer and tell them if it is Armstrong number. - """ - num = int(input("Enter an integer to see if it is an Armstrong number: ").strip()) - print(f"{num} is {'' if armstrong_number(num) else 'not '}an Armstrong number.") - print(f"{num} is {'' if narcissistic_number(num) else 'not '}an Armstrong number.") - print(f"{num} is {'' if pluperfect_number(num) else 'not '}an Armstrong number.") - - -if __name__ == "__main__": - import doctest - - doctest.testmod() - main() +""" +An Armstrong number is equal to the sum of its own digits each raised to the +power of the number of digits. + +For example, 370 is an Armstrong number because 3*3*3 + 7*7*7 + 0*0*0 = 370. + +Armstrong numbers are also called Narcissistic numbers and Pluperfect numbers. + +On-Line Encyclopedia of Integer Sequences entry: https://oeis.org/A005188 +""" +PASSING = (1, 153, 370, 371, 1634, 24678051, 115132219018763992565095597973971522401) +FAILING: tuple = (-153, -1, 0, 1.2, 200, "A", [], {}, None) + + +def armstrong_number(n: int) -> bool: + """ + Return True if n is an Armstrong number or False if it is not. + + >>> all(armstrong_number(n) for n in PASSING) + True + >>> any(armstrong_number(n) for n in FAILING) + False + """ + if not isinstance(n, int) or n < 1: + return False + + # Initialization of sum and number of digits. + total = 0 + number_of_digits = 0 + temp = n + # Calculation of digits of the number + number_of_digits = len(str(n)) + # Dividing number into separate digits and find Armstrong number + temp = n + while temp > 0: + rem = temp % 10 + total += rem**number_of_digits + temp //= 10 + return n == total + + +def pluperfect_number(n: int) -> bool: + """Return True if n is a pluperfect number or False if it is not + + >>> all(armstrong_number(n) for n in PASSING) + True + >>> any(armstrong_number(n) for n in FAILING) + False + """ + if not isinstance(n, int) or n < 1: + return False + + # Init a "histogram" of the digits + digit_histogram = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0] + digit_total = 0 + total = 0 + temp = n + while temp > 0: + temp, rem = divmod(temp, 10) + digit_histogram[rem] += 1 + digit_total += 1 + + for cnt, i in zip(digit_histogram, range(len(digit_histogram))): + total += cnt * i**digit_total + + return n == total + + +def narcissistic_number(n: int) -> bool: + """Return True if n is a narcissistic number or False if it is not. + + >>> all(armstrong_number(n) for n in PASSING) + True + >>> any(armstrong_number(n) for n in FAILING) + False + """ + if not isinstance(n, int) or n < 1: + return False + expo = len(str(n)) # the power that all digits will be raised to + # check if sum of each digit multiplied expo times is equal to number + return n == sum(int(i) ** expo for i in str(n)) + + +def main(): + """ + Request that user input an integer and tell them if it is Armstrong number. + """ + num = int(input("Enter an integer to see if it is an Armstrong number: ").strip()) + print(f"{num} is {'' if armstrong_number(num) else 'not '}an Armstrong number.") + print(f"{num} is {'' if narcissistic_number(num) else 'not '}an Armstrong number.") + print(f"{num} is {'' if pluperfect_number(num) else 'not '}an Armstrong number.") + + +if __name__ == "__main__": + import doctest + + doctest.testmod() + main() diff --git a/maths/automorphic_number.py b/maths/special_numbers/automorphic_number.py similarity index 100% rename from maths/automorphic_number.py rename to maths/special_numbers/automorphic_number.py diff --git a/maths/bell_numbers.py b/maths/special_numbers/bell_numbers.py similarity index 100% rename from maths/bell_numbers.py rename to maths/special_numbers/bell_numbers.py diff --git a/maths/carmichael_number.py b/maths/special_numbers/carmichael_number.py similarity index 100% rename from maths/carmichael_number.py rename to maths/special_numbers/carmichael_number.py diff --git a/maths/catalan_number.py b/maths/special_numbers/catalan_number.py similarity index 100% rename from maths/catalan_number.py rename to maths/special_numbers/catalan_number.py diff --git a/maths/hamming_numbers.py b/maths/special_numbers/hamming_numbers.py similarity index 100% rename from maths/hamming_numbers.py rename to maths/special_numbers/hamming_numbers.py diff --git a/maths/harshad_numbers.py b/maths/special_numbers/harshad_numbers.py similarity index 96% rename from maths/harshad_numbers.py rename to maths/special_numbers/harshad_numbers.py index 050c69e0bd15..61667adfa127 100644 --- a/maths/harshad_numbers.py +++ b/maths/special_numbers/harshad_numbers.py @@ -1,158 +1,158 @@ -""" -A harshad number (or more specifically an n-harshad number) is a number that's -divisible by the sum of its digits in some given base n. -Reference: https://en.wikipedia.org/wiki/Harshad_number -""" - - -def int_to_base(number: int, base: int) -> str: - """ - Convert a given positive decimal integer to base 'base'. - Where 'base' ranges from 2 to 36. - - Examples: - >>> int_to_base(23, 2) - '10111' - >>> int_to_base(58, 5) - '213' - >>> int_to_base(167, 16) - 'A7' - >>> # bases below 2 and beyond 36 will error - >>> int_to_base(98, 1) - Traceback (most recent call last): - ... - ValueError: 'base' must be between 2 and 36 inclusive - >>> int_to_base(98, 37) - Traceback (most recent call last): - ... - ValueError: 'base' must be between 2 and 36 inclusive - """ - - if base < 2 or base > 36: - raise ValueError("'base' must be between 2 and 36 inclusive") - - digits = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ" - result = "" - - if number < 0: - raise ValueError("number must be a positive integer") - - while number > 0: - number, remainder = divmod(number, base) - result = digits[remainder] + result - - if result == "": - result = "0" - - return result - - -def sum_of_digits(num: int, base: int) -> str: - """ - Calculate the sum of digit values in a positive integer - converted to the given 'base'. - Where 'base' ranges from 2 to 36. - - Examples: - >>> sum_of_digits(103, 12) - '13' - >>> sum_of_digits(1275, 4) - '30' - >>> sum_of_digits(6645, 2) - '1001' - >>> # bases below 2 and beyond 36 will error - >>> sum_of_digits(543, 1) - Traceback (most recent call last): - ... - ValueError: 'base' must be between 2 and 36 inclusive - >>> sum_of_digits(543, 37) - Traceback (most recent call last): - ... - ValueError: 'base' must be between 2 and 36 inclusive - """ - - if base < 2 or base > 36: - raise ValueError("'base' must be between 2 and 36 inclusive") - - num_str = int_to_base(num, base) - res = sum(int(char, base) for char in num_str) - res_str = int_to_base(res, base) - return res_str - - -def harshad_numbers_in_base(limit: int, base: int) -> list[str]: - """ - Finds all Harshad numbers smaller than num in base 'base'. - Where 'base' ranges from 2 to 36. - - Examples: - >>> harshad_numbers_in_base(15, 2) - ['1', '10', '100', '110', '1000', '1010', '1100'] - >>> harshad_numbers_in_base(12, 34) - ['1', '2', '3', '4', '5', '6', '7', '8', '9', 'A', 'B'] - >>> harshad_numbers_in_base(12, 4) - ['1', '2', '3', '10', '12', '20', '21'] - >>> # bases below 2 and beyond 36 will error - >>> harshad_numbers_in_base(234, 37) - Traceback (most recent call last): - ... - ValueError: 'base' must be between 2 and 36 inclusive - >>> harshad_numbers_in_base(234, 1) - Traceback (most recent call last): - ... - ValueError: 'base' must be between 2 and 36 inclusive - """ - - if base < 2 or base > 36: - raise ValueError("'base' must be between 2 and 36 inclusive") - - if limit < 0: - return [] - - numbers = [ - int_to_base(i, base) - for i in range(1, limit) - if i % int(sum_of_digits(i, base), base) == 0 - ] - - return numbers - - -def is_harshad_number_in_base(num: int, base: int) -> bool: - """ - Determines whether n in base 'base' is a harshad number. - Where 'base' ranges from 2 to 36. - - Examples: - >>> is_harshad_number_in_base(18, 10) - True - >>> is_harshad_number_in_base(21, 10) - True - >>> is_harshad_number_in_base(-21, 5) - False - >>> # bases below 2 and beyond 36 will error - >>> is_harshad_number_in_base(45, 37) - Traceback (most recent call last): - ... - ValueError: 'base' must be between 2 and 36 inclusive - >>> is_harshad_number_in_base(45, 1) - Traceback (most recent call last): - ... - ValueError: 'base' must be between 2 and 36 inclusive - """ - - if base < 2 or base > 36: - raise ValueError("'base' must be between 2 and 36 inclusive") - - if num < 0: - return False - - n = int_to_base(num, base) - d = sum_of_digits(num, base) - return int(n, base) % int(d, base) == 0 - - -if __name__ == "__main__": - import doctest - - doctest.testmod() +""" +A harshad number (or more specifically an n-harshad number) is a number that's +divisible by the sum of its digits in some given base n. +Reference: https://en.wikipedia.org/wiki/Harshad_number +""" + + +def int_to_base(number: int, base: int) -> str: + """ + Convert a given positive decimal integer to base 'base'. + Where 'base' ranges from 2 to 36. + + Examples: + >>> int_to_base(23, 2) + '10111' + >>> int_to_base(58, 5) + '213' + >>> int_to_base(167, 16) + 'A7' + >>> # bases below 2 and beyond 36 will error + >>> int_to_base(98, 1) + Traceback (most recent call last): + ... + ValueError: 'base' must be between 2 and 36 inclusive + >>> int_to_base(98, 37) + Traceback (most recent call last): + ... + ValueError: 'base' must be between 2 and 36 inclusive + """ + + if base < 2 or base > 36: + raise ValueError("'base' must be between 2 and 36 inclusive") + + digits = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ" + result = "" + + if number < 0: + raise ValueError("number must be a positive integer") + + while number > 0: + number, remainder = divmod(number, base) + result = digits[remainder] + result + + if result == "": + result = "0" + + return result + + +def sum_of_digits(num: int, base: int) -> str: + """ + Calculate the sum of digit values in a positive integer + converted to the given 'base'. + Where 'base' ranges from 2 to 36. + + Examples: + >>> sum_of_digits(103, 12) + '13' + >>> sum_of_digits(1275, 4) + '30' + >>> sum_of_digits(6645, 2) + '1001' + >>> # bases below 2 and beyond 36 will error + >>> sum_of_digits(543, 1) + Traceback (most recent call last): + ... + ValueError: 'base' must be between 2 and 36 inclusive + >>> sum_of_digits(543, 37) + Traceback (most recent call last): + ... + ValueError: 'base' must be between 2 and 36 inclusive + """ + + if base < 2 or base > 36: + raise ValueError("'base' must be between 2 and 36 inclusive") + + num_str = int_to_base(num, base) + res = sum(int(char, base) for char in num_str) + res_str = int_to_base(res, base) + return res_str + + +def harshad_numbers_in_base(limit: int, base: int) -> list[str]: + """ + Finds all Harshad numbers smaller than num in base 'base'. + Where 'base' ranges from 2 to 36. + + Examples: + >>> harshad_numbers_in_base(15, 2) + ['1', '10', '100', '110', '1000', '1010', '1100'] + >>> harshad_numbers_in_base(12, 34) + ['1', '2', '3', '4', '5', '6', '7', '8', '9', 'A', 'B'] + >>> harshad_numbers_in_base(12, 4) + ['1', '2', '3', '10', '12', '20', '21'] + >>> # bases below 2 and beyond 36 will error + >>> harshad_numbers_in_base(234, 37) + Traceback (most recent call last): + ... + ValueError: 'base' must be between 2 and 36 inclusive + >>> harshad_numbers_in_base(234, 1) + Traceback (most recent call last): + ... + ValueError: 'base' must be between 2 and 36 inclusive + """ + + if base < 2 or base > 36: + raise ValueError("'base' must be between 2 and 36 inclusive") + + if limit < 0: + return [] + + numbers = [ + int_to_base(i, base) + for i in range(1, limit) + if i % int(sum_of_digits(i, base), base) == 0 + ] + + return numbers + + +def is_harshad_number_in_base(num: int, base: int) -> bool: + """ + Determines whether n in base 'base' is a harshad number. + Where 'base' ranges from 2 to 36. + + Examples: + >>> is_harshad_number_in_base(18, 10) + True + >>> is_harshad_number_in_base(21, 10) + True + >>> is_harshad_number_in_base(-21, 5) + False + >>> # bases below 2 and beyond 36 will error + >>> is_harshad_number_in_base(45, 37) + Traceback (most recent call last): + ... + ValueError: 'base' must be between 2 and 36 inclusive + >>> is_harshad_number_in_base(45, 1) + Traceback (most recent call last): + ... + ValueError: 'base' must be between 2 and 36 inclusive + """ + + if base < 2 or base > 36: + raise ValueError("'base' must be between 2 and 36 inclusive") + + if num < 0: + return False + + n = int_to_base(num, base) + d = sum_of_digits(num, base) + return int(n, base) % int(d, base) == 0 + + +if __name__ == "__main__": + import doctest + + doctest.testmod() diff --git a/maths/hexagonal_number.py b/maths/special_numbers/hexagonal_number.py similarity index 100% rename from maths/hexagonal_number.py rename to maths/special_numbers/hexagonal_number.py diff --git a/maths/krishnamurthy_number.py b/maths/special_numbers/krishnamurthy_number.py similarity index 100% rename from maths/krishnamurthy_number.py rename to maths/special_numbers/krishnamurthy_number.py diff --git a/maths/perfect_number.py b/maths/special_numbers/perfect_number.py similarity index 100% rename from maths/perfect_number.py rename to maths/special_numbers/perfect_number.py diff --git a/maths/polygonal_numbers.py b/maths/special_numbers/polygonal_numbers.py similarity index 100% rename from maths/polygonal_numbers.py rename to maths/special_numbers/polygonal_numbers.py diff --git a/maths/pronic_number.py b/maths/special_numbers/pronic_number.py similarity index 100% rename from maths/pronic_number.py rename to maths/special_numbers/pronic_number.py diff --git a/maths/proth_number.py b/maths/special_numbers/proth_number.py similarity index 100% rename from maths/proth_number.py rename to maths/special_numbers/proth_number.py diff --git a/maths/ugly_numbers.py b/maths/special_numbers/ugly_numbers.py similarity index 96% rename from maths/ugly_numbers.py rename to maths/special_numbers/ugly_numbers.py index 81bd928c6b3d..c6ceb784622a 100644 --- a/maths/ugly_numbers.py +++ b/maths/special_numbers/ugly_numbers.py @@ -1,54 +1,54 @@ -""" -Ugly numbers are numbers whose only prime factors are 2, 3 or 5. The sequence -1, 2, 3, 4, 5, 6, 8, 9, 10, 12, 15, … shows the first 11 ugly numbers. By convention, -1 is included. -Given an integer n, we have to find the nth ugly number. - -For more details, refer this article -https://www.geeksforgeeks.org/ugly-numbers/ -""" - - -def ugly_numbers(n: int) -> int: - """ - Returns the nth ugly number. - >>> ugly_numbers(100) - 1536 - >>> ugly_numbers(0) - 1 - >>> ugly_numbers(20) - 36 - >>> ugly_numbers(-5) - 1 - >>> ugly_numbers(-5.5) - Traceback (most recent call last): - ... - TypeError: 'float' object cannot be interpreted as an integer - """ - ugly_nums = [1] - - i2, i3, i5 = 0, 0, 0 - next_2 = ugly_nums[i2] * 2 - next_3 = ugly_nums[i3] * 3 - next_5 = ugly_nums[i5] * 5 - - for _ in range(1, n): - next_num = min(next_2, next_3, next_5) - ugly_nums.append(next_num) - if next_num == next_2: - i2 += 1 - next_2 = ugly_nums[i2] * 2 - if next_num == next_3: - i3 += 1 - next_3 = ugly_nums[i3] * 3 - if next_num == next_5: - i5 += 1 - next_5 = ugly_nums[i5] * 5 - return ugly_nums[-1] - - -if __name__ == "__main__": - from doctest import testmod - - testmod(verbose=True) - print(f"{ugly_numbers(200) = }") +""" +Ugly numbers are numbers whose only prime factors are 2, 3 or 5. The sequence +1, 2, 3, 4, 5, 6, 8, 9, 10, 12, 15, … shows the first 11 ugly numbers. By convention, +1 is included. +Given an integer n, we have to find the nth ugly number. + +For more details, refer this article +https://www.geeksforgeeks.org/ugly-numbers/ +""" + + +def ugly_numbers(n: int) -> int: + """ + Returns the nth ugly number. + >>> ugly_numbers(100) + 1536 + >>> ugly_numbers(0) + 1 + >>> ugly_numbers(20) + 36 + >>> ugly_numbers(-5) + 1 + >>> ugly_numbers(-5.5) + Traceback (most recent call last): + ... + TypeError: 'float' object cannot be interpreted as an integer + """ + ugly_nums = [1] + + i2, i3, i5 = 0, 0, 0 + next_2 = ugly_nums[i2] * 2 + next_3 = ugly_nums[i3] * 3 + next_5 = ugly_nums[i5] * 5 + + for _ in range(1, n): + next_num = min(next_2, next_3, next_5) + ugly_nums.append(next_num) + if next_num == next_2: + i2 += 1 + next_2 = ugly_nums[i2] * 2 + if next_num == next_3: + i3 += 1 + next_3 = ugly_nums[i3] * 3 + if next_num == next_5: + i5 += 1 + next_5 = ugly_nums[i5] * 5 + return ugly_nums[-1] + + +if __name__ == "__main__": + from doctest import testmod + + testmod(verbose=True) + print(f"{ugly_numbers(200) = }") diff --git a/maths/weird_number.py b/maths/special_numbers/weird_number.py similarity index 100% rename from maths/weird_number.py rename to maths/special_numbers/weird_number.py From ce0ede6476fb14ba18ef03246b169a7e5615bdec Mon Sep 17 00:00:00 2001 From: Tianyi Zheng Date: Fri, 20 Oct 2023 03:08:23 -0400 Subject: [PATCH 1189/1543] Fix typo in DPLL file name (#10723) * Fix DPLL file name * updating DIRECTORY.md --------- Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- DIRECTORY.md | 46 +++++++++++-------- ...d.py => davis_putnam_logemann_loveland.py} | 0 2 files changed, 28 insertions(+), 18 deletions(-) rename other/{davisb_putnamb_logemannb_loveland.py => davis_putnam_logemann_loveland.py} (100%) diff --git a/DIRECTORY.md b/DIRECTORY.md index 1320c70ef629..5b7ca856ea15 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -65,7 +65,9 @@ ## Boolean Algebra * [And Gate](boolean_algebra/and_gate.py) + * [Imply Gate](boolean_algebra/imply_gate.py) * [Nand Gate](boolean_algebra/nand_gate.py) + * [Nimply Gate](boolean_algebra/nimply_gate.py) * [Nor Gate](boolean_algebra/nor_gate.py) * [Not Gate](boolean_algebra/not_gate.py) * [Or Gate](boolean_algebra/or_gate.py) @@ -178,7 +180,9 @@ ## Data Structures * Arrays * [Equilibrium Index In Array](data_structures/arrays/equilibrium_index_in_array.py) + * [Find Triplets With 0 Sum](data_structures/arrays/find_triplets_with_0_sum.py) * [Median Two Array](data_structures/arrays/median_two_array.py) + * [Pairs With Given Sum](data_structures/arrays/pairs_with_given_sum.py) * [Permutations](data_structures/arrays/permutations.py) * [Prefix Sum](data_structures/arrays/prefix_sum.py) * [Product Sum](data_structures/arrays/product_sum.py) @@ -398,6 +402,7 @@ ## Financial * [Equated Monthly Installments](financial/equated_monthly_installments.py) + * [Exponential Moving Average](financial/exponential_moving_average.py) * [Interest](financial/interest.py) * [Present Value](financial/present_value.py) * [Price Plus Tax](financial/price_plus_tax.py) @@ -433,8 +438,7 @@ * [Breadth First Search Shortest Path](graphs/breadth_first_search_shortest_path.py) * [Breadth First Search Shortest Path 2](graphs/breadth_first_search_shortest_path_2.py) * [Breadth First Search Zero One Shortest Path](graphs/breadth_first_search_zero_one_shortest_path.py) - * [Check Bipartite Graph Bfs](graphs/check_bipartite_graph_bfs.py) - * [Check Bipartite Graph Dfs](graphs/check_bipartite_graph_dfs.py) + * [Check Bipatrite](graphs/check_bipatrite.py) * [Check Cycle](graphs/check_cycle.py) * [Connected Components](graphs/connected_components.py) * [Deep Clone Graph](graphs/deep_clone_graph.py) @@ -572,8 +576,6 @@ * [Arc Length](maths/arc_length.py) * [Area](maths/area.py) * [Area Under Curve](maths/area_under_curve.py) - * [Armstrong Numbers](maths/armstrong_numbers.py) - * [Automorphic Number](maths/automorphic_number.py) * [Average Absolute Deviation](maths/average_absolute_deviation.py) * [Average Mean](maths/average_mean.py) * [Average Median](maths/average_median.py) @@ -581,7 +583,6 @@ * [Bailey Borwein Plouffe](maths/bailey_borwein_plouffe.py) * [Base Neg2 Conversion](maths/base_neg2_conversion.py) * [Basic Maths](maths/basic_maths.py) - * [Bell Numbers](maths/bell_numbers.py) * [Binary Exp Mod](maths/binary_exp_mod.py) * [Binary Exponentiation](maths/binary_exponentiation.py) * [Binary Exponentiation 2](maths/binary_exponentiation_2.py) @@ -589,8 +590,6 @@ * [Binomial Coefficient](maths/binomial_coefficient.py) * [Binomial Distribution](maths/binomial_distribution.py) * [Bisection](maths/bisection.py) - * [Carmichael Number](maths/carmichael_number.py) - * [Catalan Number](maths/catalan_number.py) * [Ceil](maths/ceil.py) * [Chebyshev Distance](maths/chebyshev_distance.py) * [Check Polygon](maths/check_polygon.py) @@ -623,10 +622,7 @@ * [Gcd Of N Numbers](maths/gcd_of_n_numbers.py) * [Germain Primes](maths/germain_primes.py) * [Greatest Common Divisor](maths/greatest_common_divisor.py) - * [Hamming Numbers](maths/hamming_numbers.py) * [Hardy Ramanujanalgo](maths/hardy_ramanujanalgo.py) - * [Harshad Numbers](maths/harshad_numbers.py) - * [Hexagonal Number](maths/hexagonal_number.py) * [Integration By Simpson Approx](maths/integration_by_simpson_approx.py) * [Interquartile Range](maths/interquartile_range.py) * [Is Int Palindrome](maths/is_int_palindrome.py) @@ -636,7 +632,6 @@ * [Joint Probability Distribution](maths/joint_probability_distribution.py) * [Juggler Sequence](maths/juggler_sequence.py) * [Karatsuba](maths/karatsuba.py) - * [Krishnamurthy Number](maths/krishnamurthy_number.py) * [Kth Lexicographic Permutation](maths/kth_lexicographic_permutation.py) * [Largest Of Very Large Numbers](maths/largest_of_very_large_numbers.py) * [Least Common Multiple](maths/least_common_multiple.py) @@ -661,14 +656,12 @@ * [Numerical Integration](maths/numerical_integration.py) * [Odd Sieve](maths/odd_sieve.py) * [Perfect Cube](maths/perfect_cube.py) - * [Perfect Number](maths/perfect_number.py) * [Perfect Square](maths/perfect_square.py) * [Persistence](maths/persistence.py) * [Pi Generator](maths/pi_generator.py) * [Pi Monte Carlo Estimation](maths/pi_monte_carlo_estimation.py) * [Points Are Collinear 3D](maths/points_are_collinear_3d.py) * [Pollard Rho](maths/pollard_rho.py) - * [Polygonal Numbers](maths/polygonal_numbers.py) * [Polynomial Evaluation](maths/polynomial_evaluation.py) * Polynomials * [Single Indeterminate Operations](maths/polynomials/single_indeterminate_operations.py) @@ -679,8 +672,6 @@ * [Prime Sieve Eratosthenes](maths/prime_sieve_eratosthenes.py) * [Primelib](maths/primelib.py) * [Print Multiplication Table](maths/print_multiplication_table.py) - * [Pronic Number](maths/pronic_number.py) - * [Proth Number](maths/proth_number.py) * [Pythagoras](maths/pythagoras.py) * [Qr Decomposition](maths/qr_decomposition.py) * [Quadratic Equations Complex Numbers](maths/quadratic_equations_complex_numbers.py) @@ -706,6 +697,23 @@ * [Sin](maths/sin.py) * [Sock Merchant](maths/sock_merchant.py) * [Softmax](maths/softmax.py) + * [Solovay Strassen Primality Test](maths/solovay_strassen_primality_test.py) + * Special Numbers + * [Armstrong Numbers](maths/special_numbers/armstrong_numbers.py) + * [Automorphic Number](maths/special_numbers/automorphic_number.py) + * [Bell Numbers](maths/special_numbers/bell_numbers.py) + * [Carmichael Number](maths/special_numbers/carmichael_number.py) + * [Catalan Number](maths/special_numbers/catalan_number.py) + * [Hamming Numbers](maths/special_numbers/hamming_numbers.py) + * [Harshad Numbers](maths/special_numbers/harshad_numbers.py) + * [Hexagonal Number](maths/special_numbers/hexagonal_number.py) + * [Krishnamurthy Number](maths/special_numbers/krishnamurthy_number.py) + * [Perfect Number](maths/special_numbers/perfect_number.py) + * [Polygonal Numbers](maths/special_numbers/polygonal_numbers.py) + * [Pronic Number](maths/special_numbers/pronic_number.py) + * [Proth Number](maths/special_numbers/proth_number.py) + * [Ugly Numbers](maths/special_numbers/ugly_numbers.py) + * [Weird Number](maths/special_numbers/weird_number.py) * [Square Root](maths/square_root.py) * [Sum Of Arithmetic Series](maths/sum_of_arithmetic_series.py) * [Sum Of Digits](maths/sum_of_digits.py) @@ -721,9 +729,7 @@ * [Twin Prime](maths/twin_prime.py) * [Two Pointer](maths/two_pointer.py) * [Two Sum](maths/two_sum.py) - * [Ugly Numbers](maths/ugly_numbers.py) * [Volume](maths/volume.py) - * [Weird Number](maths/weird_number.py) * [Zellers Congruence](maths/zellers_congruence.py) ## Matrix @@ -747,6 +753,7 @@ * [Spiral Print](matrix/spiral_print.py) * Tests * [Test Matrix Operation](matrix/tests/test_matrix_operation.py) + * [Validate Sudoku Board](matrix/validate_sudoku_board.py) ## Networking Flow * [Ford Fulkerson](networking_flow/ford_fulkerson.py) @@ -773,7 +780,7 @@ ## Other * [Activity Selection](other/activity_selection.py) * [Alternative List Arrange](other/alternative_list_arrange.py) - * [Davisb Putnamb Logemannb Loveland](other/davisb_putnamb_logemannb_loveland.py) + * [Davis Putnam Logemann Loveland](other/davis_putnam_logemann_loveland.py) * [Dijkstra Bankers Algorithm](other/dijkstra_bankers_algorithm.py) * [Doomsday](other/doomsday.py) * [Fischer Yates Shuffle](other/fischer_yates_shuffle.py) @@ -822,6 +829,7 @@ * [Rms Speed Of Molecule](physics/rms_speed_of_molecule.py) * [Shear Stress](physics/shear_stress.py) * [Speed Of Sound](physics/speed_of_sound.py) + * [Speeds Of Gas Molecules](physics/speeds_of_gas_molecules.py) ## Project Euler * Problem 001 @@ -1212,6 +1220,7 @@ * [Capitalize](strings/capitalize.py) * [Check Anagrams](strings/check_anagrams.py) * [Credit Card Validator](strings/credit_card_validator.py) + * [Damerau Levenshtein Distance](strings/damerau_levenshtein_distance.py) * [Detecting English Programmatically](strings/detecting_english_programmatically.py) * [Dna](strings/dna.py) * [Edit Distance](strings/edit_distance.py) @@ -1246,6 +1255,7 @@ * [String Switch Case](strings/string_switch_case.py) * [Strip](strings/strip.py) * [Text Justification](strings/text_justification.py) + * [Title](strings/title.py) * [Top K Frequent Words](strings/top_k_frequent_words.py) * [Upper](strings/upper.py) * [Wave](strings/wave.py) diff --git a/other/davisb_putnamb_logemannb_loveland.py b/other/davis_putnam_logemann_loveland.py similarity index 100% rename from other/davisb_putnamb_logemannb_loveland.py rename to other/davis_putnam_logemann_loveland.py From 579937613a6dc7e099b710e3d57767a2fab115ad Mon Sep 17 00:00:00 2001 From: Saptadeep Banerjee <69459134+imSanko@users.noreply.github.com> Date: Fri, 20 Oct 2023 16:32:30 +0530 Subject: [PATCH 1190/1543] Added New Tests in Signum (#10724) * Added new tests! * [ADD]: Inproved Tests * fixed * Removed spaces * Changed the file name * Added Changes * changed the code and kept the test cases * changed the code and kept the test cases * missed the line * removed spaces * Update power_using_recursion.py * Added new tests in Signum * Few things added * Removed few stuff and added few changes * Fixed few things * Reverted the function * Update maths/signum.py Co-authored-by: Christian Clauss * Added few things * Update maths/signum.py Co-authored-by: Christian Clauss * Added the type hint back * Update signum.py --------- Co-authored-by: Christian Clauss --- maths/signum.py | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/maths/signum.py b/maths/signum.py index 148f931767c1..c89753e76637 100644 --- a/maths/signum.py +++ b/maths/signum.py @@ -7,12 +7,29 @@ def signum(num: float) -> int: """ Applies signum function on the number + Custom test cases: >>> signum(-10) -1 >>> signum(10) 1 >>> signum(0) 0 + >>> signum(-20.5) + -1 + >>> signum(20.5) + 1 + >>> signum(-1e-6) + -1 + >>> signum(1e-6) + 1 + >>> signum("Hello") + Traceback (most recent call last): + ... + TypeError: '<' not supported between instances of 'str' and 'int' + >>> signum([]) + Traceback (most recent call last): + ... + TypeError: '<' not supported between instances of 'list' and 'int' """ if num < 0: return -1 @@ -22,10 +39,17 @@ def signum(num: float) -> int: def test_signum() -> None: """ Tests the signum function + >>> test_signum() """ assert signum(5) == 1 assert signum(-5) == -1 assert signum(0) == 0 + assert signum(10.5) == 1 + assert signum(-10.5) == -1 + assert signum(1e-6) == 1 + assert signum(-1e-6) == -1 + assert signum(123456789) == 1 + assert signum(-123456789) == -1 if __name__ == "__main__": From 52a987ea2f299c8215c1107b8dd793919c962f10 Mon Sep 17 00:00:00 2001 From: Ope Oluwaferanmi <111365699+FEROS01@users.noreply.github.com> Date: Fri, 20 Oct 2023 22:28:21 +0100 Subject: [PATCH 1191/1543] Add docstrings and doctests and fix a bug ciphers/trifid_cipher.py (#10716) * Added docstrings,doctests and fixed a bug * Added docstrings,doctests and fixed a bug * Added docstrings,doctests and fixed a bug * Added docstrings and doctests with a bug fix * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Added docstrings and doctests with a bug fix * Update ciphers/trifid_cipher.py Co-authored-by: Christian Clauss * Update ciphers/trifid_cipher.py Co-authored-by: Christian Clauss * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Docstrings edit * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update trifid_cipher.py * Update pyproject.toml --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- ciphers/trifid_cipher.py | 191 +++++++++++++++++++++++++++------------ pyproject.toml | 2 +- 2 files changed, 134 insertions(+), 59 deletions(-) diff --git a/ciphers/trifid_cipher.py b/ciphers/trifid_cipher.py index 8aa2263ca5ac..16b9faf67688 100644 --- a/ciphers/trifid_cipher.py +++ b/ciphers/trifid_cipher.py @@ -1,15 +1,35 @@ -# https://en.wikipedia.org/wiki/Trifid_cipher +""" +The trifid cipher uses a table to fractionate each plaintext letter into a trigram, +mixes the constituents of the trigrams, and then applies the table in reverse to turn +these mixed trigrams into ciphertext letters. + +https://en.wikipedia.org/wiki/Trifid_cipher +""" + from __future__ import annotations +# fmt: off +TEST_CHARACTER_TO_NUMBER = { + "A": "111", "B": "112", "C": "113", "D": "121", "E": "122", "F": "123", "G": "131", + "H": "132", "I": "133", "J": "211", "K": "212", "L": "213", "M": "221", "N": "222", + "O": "223", "P": "231", "Q": "232", "R": "233", "S": "311", "T": "312", "U": "313", + "V": "321", "W": "322", "X": "323", "Y": "331", "Z": "332", "+": "333", +} +# fmt: off -def __encrypt_part(message_part: str, character_to_number: dict[str, str]) -> str: - one, two, three = "", "", "" - tmp = [] +TEST_NUMBER_TO_CHARACTER = {val: key for key, val in TEST_CHARACTER_TO_NUMBER.items()} - for character in message_part: - tmp.append(character_to_number[character]) - for each in tmp: +def __encrypt_part(message_part: str, character_to_number: dict[str, str]) -> str: + """ + Arrange the triagram value of each letter of 'message_part' vertically and join + them horizontally. + + >>> __encrypt_part('ASK', TEST_CHARACTER_TO_NUMBER) + '132111112' + """ + one, two, three = "", "", "" + for each in (character_to_number[character] for character in message_part): one += each[0] two += each[1] three += each[2] @@ -20,12 +40,16 @@ def __encrypt_part(message_part: str, character_to_number: dict[str, str]) -> st def __decrypt_part( message_part: str, character_to_number: dict[str, str] ) -> tuple[str, str, str]: - tmp, this_part = "", "" + """ + Convert each letter of the input string into their respective trigram values, join + them and split them into three equal groups of strings which are returned. + + >>> __decrypt_part('ABCDE', TEST_CHARACTER_TO_NUMBER) + ('11111', '21131', '21122') + """ + this_part = "".join(character_to_number[character] for character in message_part) result = [] - - for character in message_part: - this_part += character_to_number[character] - + tmp = "" for digit in this_part: tmp += digit if len(tmp) == len(message_part): @@ -38,6 +62,42 @@ def __decrypt_part( def __prepare( message: str, alphabet: str ) -> tuple[str, str, dict[str, str], dict[str, str]]: + """ + A helper function that generates the triagrams and assigns each letter of the + alphabet to its corresponding triagram and stores this in a dictionary + ("character_to_number" and "number_to_character") after confirming if the + alphabet's length is 27. + + >>> test = __prepare('I aM a BOy','abCdeFghijkLmnopqrStuVwxYZ+') + >>> expected = ('IAMABOY','ABCDEFGHIJKLMNOPQRSTUVWXYZ+', + ... TEST_CHARACTER_TO_NUMBER, TEST_NUMBER_TO_CHARACTER) + >>> test == expected + True + + Testing with incomplete alphabet + >>> __prepare('I aM a BOy','abCdeFghijkLmnopqrStuVw') + Traceback (most recent call last): + ... + KeyError: 'Length of alphabet has to be 27.' + + Testing with extra long alphabets + >>> __prepare('I aM a BOy','abCdeFghijkLmnopqrStuVwxyzzwwtyyujjgfd') + Traceback (most recent call last): + ... + KeyError: 'Length of alphabet has to be 27.' + + Testing with punctuations that are not in the given alphabet + >>> __prepare('am i a boy?','abCdeFghijkLmnopqrStuVwxYZ+') + Traceback (most recent call last): + ... + ValueError: Each message character has to be included in alphabet! + + Testing with numbers + >>> __prepare(500,'abCdeFghijkLmnopqrStuVwxYZ+') + Traceback (most recent call last): + ... + AttributeError: 'int' object has no attribute 'replace' + """ # Validate message and alphabet, set to upper and remove spaces alphabet = alphabet.replace(" ", "").upper() message = message.replace(" ", "").upper() @@ -45,45 +105,14 @@ def __prepare( # Check length and characters if len(alphabet) != 27: raise KeyError("Length of alphabet has to be 27.") - for each in message: - if each not in alphabet: - raise ValueError("Each message character has to be included in alphabet!") + if any(char not in alphabet for char in message): + raise ValueError("Each message character has to be included in alphabet!") # Generate dictionares - numbers = ( - "111", - "112", - "113", - "121", - "122", - "123", - "131", - "132", - "133", - "211", - "212", - "213", - "221", - "222", - "223", - "231", - "232", - "233", - "311", - "312", - "313", - "321", - "322", - "323", - "331", - "332", - "333", - ) - character_to_number = {} - number_to_character = {} - for letter, number in zip(alphabet, numbers): - character_to_number[letter] = number - number_to_character[number] = letter + character_to_number = dict(zip(alphabet, TEST_CHARACTER_TO_NUMBER.values())) + number_to_character = { + number: letter for letter, number in character_to_number.items() + } return message, alphabet, character_to_number, number_to_character @@ -91,44 +120,90 @@ def __prepare( def encrypt_message( message: str, alphabet: str = "ABCDEFGHIJKLMNOPQRSTUVWXYZ.", period: int = 5 ) -> str: + """ + encrypt_message + =============== + + Encrypts a message using the trifid_cipher. Any punctuatuions that + would be used should be added to the alphabet. + + PARAMETERS + ---------- + + * message: The message you want to encrypt. + * alphabet (optional): The characters to be used for the cipher . + * period (optional): The number of characters you want in a group whilst + encrypting. + + >>> encrypt_message('I am a boy') + 'BCDGBQY' + + >>> encrypt_message(' ') + '' + + >>> encrypt_message(' aide toi le c iel ta id era ', + ... 'FELIXMARDSTBCGHJKNOPQUVWYZ+',5) + 'FMJFVOISSUFTFPUFEQQC' + + """ message, alphabet, character_to_number, number_to_character = __prepare( message, alphabet ) - encrypted, encrypted_numeric = "", "" + encrypted_numeric = "" for i in range(0, len(message) + 1, period): encrypted_numeric += __encrypt_part( message[i : i + period], character_to_number ) + encrypted = "" for i in range(0, len(encrypted_numeric), 3): encrypted += number_to_character[encrypted_numeric[i : i + 3]] - return encrypted def decrypt_message( message: str, alphabet: str = "ABCDEFGHIJKLMNOPQRSTUVWXYZ.", period: int = 5 ) -> str: + """ + decrypt_message + =============== + + Decrypts a trifid_cipher encrypted message . + + PARAMETERS + ---------- + + * message: The message you want to decrypt . + * alphabet (optional): The characters used for the cipher. + * period (optional): The number of characters used in grouping when it + was encrypted. + + >>> decrypt_message('BCDGBQY') + 'IAMABOY' + + Decrypting with your own alphabet and period + >>> decrypt_message('FMJFVOISSUFTFPUFEQQC','FELIXMARDSTBCGHJKNOPQUVWYZ+',5) + 'AIDETOILECIELTAIDERA' + """ message, alphabet, character_to_number, number_to_character = __prepare( message, alphabet ) - decrypted_numeric = [] - decrypted = "" - for i in range(0, len(message) + 1, period): + decrypted_numeric = [] + for i in range(0, len(message), period): a, b, c = __decrypt_part(message[i : i + period], character_to_number) for j in range(len(a)): decrypted_numeric.append(a[j] + b[j] + c[j]) - for each in decrypted_numeric: - decrypted += number_to_character[each] - - return decrypted + return "".join(number_to_character[each] for each in decrypted_numeric) if __name__ == "__main__": + import doctest + + doctest.testmod() msg = "DEFEND THE EAST WALL OF THE CASTLE." encrypted = encrypt_message(msg, "EPSDUCVWYM.ZLKXNBTFGORIJHAQ") decrypted = decrypt_message(encrypted, "EPSDUCVWYM.ZLKXNBTFGORIJHAQ") diff --git a/pyproject.toml b/pyproject.toml index 9c9262d77748..790a328b3564 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -135,5 +135,5 @@ omit = [ sort = "Cover" [tool.codespell] -ignore-words-list = "3rt,ans,crate,damon,fo,followings,hist,iff,kwanza,manuel,mater,secant,som,sur,tim,zar" +ignore-words-list = "3rt,ans,crate,damon,fo,followings,hist,iff,kwanza,manuel,mater,secant,som,sur,tim,toi,zar" skip = "./.*,*.json,ciphers/prehistoric_men.txt,project_euler/problem_022/p022_names.txt,pyproject.toml,strings/dictionary.txt,strings/words.txt" From 5645084dcd5cf398caefa40641ac99144a40e572 Mon Sep 17 00:00:00 2001 From: Tianyi Zheng Date: Fri, 20 Oct 2023 17:29:42 -0400 Subject: [PATCH 1192/1543] Consolidate loss functions into a single file (#10737) * Consolidate loss functions into single file * updating DIRECTORY.md * Fix typo --------- Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- DIRECTORY.md | 8 +- machine_learning/loss_functions.py | 252 ++++++++++++++++++ .../loss_functions/binary_cross_entropy.py | 59 ---- .../categorical_cross_entropy.py | 85 ------ machine_learning/loss_functions/hinge_loss.py | 64 ----- machine_learning/loss_functions/huber_loss.py | 52 ---- .../loss_functions/mean_squared_error.py | 51 ---- .../mean_squared_logarithmic_error.py | 55 ---- 8 files changed, 253 insertions(+), 373 deletions(-) create mode 100644 machine_learning/loss_functions.py delete mode 100644 machine_learning/loss_functions/binary_cross_entropy.py delete mode 100644 machine_learning/loss_functions/categorical_cross_entropy.py delete mode 100644 machine_learning/loss_functions/hinge_loss.py delete mode 100644 machine_learning/loss_functions/huber_loss.py delete mode 100644 machine_learning/loss_functions/mean_squared_error.py delete mode 100644 machine_learning/loss_functions/mean_squared_logarithmic_error.py diff --git a/DIRECTORY.md b/DIRECTORY.md index 5b7ca856ea15..b92f8f877e97 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -549,13 +549,7 @@ * Local Weighted Learning * [Local Weighted Learning](machine_learning/local_weighted_learning/local_weighted_learning.py) * [Logistic Regression](machine_learning/logistic_regression.py) - * Loss Functions - * [Binary Cross Entropy](machine_learning/loss_functions/binary_cross_entropy.py) - * [Categorical Cross Entropy](machine_learning/loss_functions/categorical_cross_entropy.py) - * [Hinge Loss](machine_learning/loss_functions/hinge_loss.py) - * [Huber Loss](machine_learning/loss_functions/huber_loss.py) - * [Mean Squared Error](machine_learning/loss_functions/mean_squared_error.py) - * [Mean Squared Logarithmic Error](machine_learning/loss_functions/mean_squared_logarithmic_error.py) + * [Loss Functions](machine_learning/loss_functions.py) * [Mfcc](machine_learning/mfcc.py) * [Multilayer Perceptron Classifier](machine_learning/multilayer_perceptron_classifier.py) * [Polynomial Regression](machine_learning/polynomial_regression.py) diff --git a/machine_learning/loss_functions.py b/machine_learning/loss_functions.py new file mode 100644 index 000000000000..0fa0956ed572 --- /dev/null +++ b/machine_learning/loss_functions.py @@ -0,0 +1,252 @@ +import numpy as np + + +def binary_cross_entropy( + y_true: np.ndarray, y_pred: np.ndarray, epsilon: float = 1e-15 +) -> float: + """ + Calculate the mean binary cross-entropy (BCE) loss between true labels and predicted + probabilities. + + BCE loss quantifies dissimilarity between true labels (0 or 1) and predicted + probabilities. It's widely used in binary classification tasks. + + BCE = -Σ(y_true * ln(y_pred) + (1 - y_true) * ln(1 - y_pred)) + + Reference: https://en.wikipedia.org/wiki/Cross_entropy + + Parameters: + - y_true: True binary labels (0 or 1) + - y_pred: Predicted probabilities for class 1 + - epsilon: Small constant to avoid numerical instability + + >>> true_labels = np.array([0, 1, 1, 0, 1]) + >>> predicted_probs = np.array([0.2, 0.7, 0.9, 0.3, 0.8]) + >>> binary_cross_entropy(true_labels, predicted_probs) + 0.2529995012327421 + >>> true_labels = np.array([0, 1, 1, 0, 1]) + >>> predicted_probs = np.array([0.3, 0.8, 0.9, 0.2]) + >>> binary_cross_entropy(true_labels, predicted_probs) + Traceback (most recent call last): + ... + ValueError: Input arrays must have the same length. + """ + if len(y_true) != len(y_pred): + raise ValueError("Input arrays must have the same length.") + + y_pred = np.clip(y_pred, epsilon, 1 - epsilon) # Clip predictions to avoid log(0) + bce_loss = -(y_true * np.log(y_pred) + (1 - y_true) * np.log(1 - y_pred)) + return np.mean(bce_loss) + + +def categorical_cross_entropy( + y_true: np.ndarray, y_pred: np.ndarray, epsilon: float = 1e-15 +) -> float: + """ + Calculate categorical cross-entropy (CCE) loss between true class labels and + predicted class probabilities. + + CCE = -Σ(y_true * ln(y_pred)) + + Reference: https://en.wikipedia.org/wiki/Cross_entropy + + Parameters: + - y_true: True class labels (one-hot encoded) + - y_pred: Predicted class probabilities + - epsilon: Small constant to avoid numerical instability + + >>> true_labels = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]) + >>> pred_probs = np.array([[0.9, 0.1, 0.0], [0.2, 0.7, 0.1], [0.0, 0.1, 0.9]]) + >>> categorical_cross_entropy(true_labels, pred_probs) + 0.567395975254385 + >>> true_labels = np.array([[1, 0], [0, 1]]) + >>> pred_probs = np.array([[0.9, 0.1, 0.0], [0.2, 0.7, 0.1]]) + >>> categorical_cross_entropy(true_labels, pred_probs) + Traceback (most recent call last): + ... + ValueError: Input arrays must have the same shape. + >>> true_labels = np.array([[2, 0, 1], [1, 0, 0]]) + >>> pred_probs = np.array([[0.9, 0.1, 0.0], [0.2, 0.7, 0.1]]) + >>> categorical_cross_entropy(true_labels, pred_probs) + Traceback (most recent call last): + ... + ValueError: y_true must be one-hot encoded. + >>> true_labels = np.array([[1, 0, 1], [1, 0, 0]]) + >>> pred_probs = np.array([[0.9, 0.1, 0.0], [0.2, 0.7, 0.1]]) + >>> categorical_cross_entropy(true_labels, pred_probs) + Traceback (most recent call last): + ... + ValueError: y_true must be one-hot encoded. + >>> true_labels = np.array([[1, 0, 0], [0, 1, 0]]) + >>> pred_probs = np.array([[0.9, 0.1, 0.1], [0.2, 0.7, 0.1]]) + >>> categorical_cross_entropy(true_labels, pred_probs) + Traceback (most recent call last): + ... + ValueError: Predicted probabilities must sum to approximately 1. + """ + if y_true.shape != y_pred.shape: + raise ValueError("Input arrays must have the same shape.") + + if np.any((y_true != 0) & (y_true != 1)) or np.any(y_true.sum(axis=1) != 1): + raise ValueError("y_true must be one-hot encoded.") + + if not np.all(np.isclose(np.sum(y_pred, axis=1), 1, rtol=epsilon, atol=epsilon)): + raise ValueError("Predicted probabilities must sum to approximately 1.") + + y_pred = np.clip(y_pred, epsilon, 1) # Clip predictions to avoid log(0) + return -np.sum(y_true * np.log(y_pred)) + + +def hinge_loss(y_true: np.ndarray, y_pred: np.ndarray) -> float: + """ + Calculate the mean hinge loss for between true labels and predicted probabilities + for training support vector machines (SVMs). + + Hinge loss = max(0, 1 - true * pred) + + Reference: https://en.wikipedia.org/wiki/Hinge_loss + + Args: + - y_true: actual values (ground truth) encoded as -1 or 1 + - y_pred: predicted values + + >>> true_labels = np.array([-1, 1, 1, -1, 1]) + >>> pred = np.array([-4, -0.3, 0.7, 5, 10]) + >>> hinge_loss(true_labels, pred) + 1.52 + >>> true_labels = np.array([-1, 1, 1, -1, 1, 1]) + >>> pred = np.array([-4, -0.3, 0.7, 5, 10]) + >>> hinge_loss(true_labels, pred) + Traceback (most recent call last): + ... + ValueError: Length of predicted and actual array must be same. + >>> true_labels = np.array([-1, 1, 10, -1, 1]) + >>> pred = np.array([-4, -0.3, 0.7, 5, 10]) + >>> hinge_loss(true_labels, pred) + Traceback (most recent call last): + ... + ValueError: y_true can have values -1 or 1 only. + """ + if len(y_true) != len(y_pred): + raise ValueError("Length of predicted and actual array must be same.") + + if np.any((y_true != -1) & (y_true != 1)): + raise ValueError("y_true can have values -1 or 1 only.") + + hinge_losses = np.maximum(0, 1.0 - (y_true * y_pred)) + return np.mean(hinge_losses) + + +def huber_loss(y_true: np.ndarray, y_pred: np.ndarray, delta: float) -> float: + """ + Calculate the mean Huber loss between the given ground truth and predicted values. + + The Huber loss describes the penalty incurred by an estimation procedure, and it + serves as a measure of accuracy for regression models. + + Huber loss = + 0.5 * (y_true - y_pred)^2 if |y_true - y_pred| <= delta + delta * |y_true - y_pred| - 0.5 * delta^2 otherwise + + Reference: https://en.wikipedia.org/wiki/Huber_loss + + Parameters: + - y_true: The true values (ground truth) + - y_pred: The predicted values + + >>> true_values = np.array([0.9, 10.0, 2.0, 1.0, 5.2]) + >>> predicted_values = np.array([0.8, 2.1, 2.9, 4.2, 5.2]) + >>> np.isclose(huber_loss(true_values, predicted_values, 1.0), 2.102) + True + >>> true_labels = np.array([11.0, 21.0, 3.32, 4.0, 5.0]) + >>> predicted_probs = np.array([8.3, 20.8, 2.9, 11.2, 5.0]) + >>> np.isclose(huber_loss(true_labels, predicted_probs, 1.0), 1.80164) + True + >>> true_labels = np.array([11.0, 21.0, 3.32, 4.0]) + >>> predicted_probs = np.array([8.3, 20.8, 2.9, 11.2, 5.0]) + >>> huber_loss(true_labels, predicted_probs, 1.0) + Traceback (most recent call last): + ... + ValueError: Input arrays must have the same length. + """ + if len(y_true) != len(y_pred): + raise ValueError("Input arrays must have the same length.") + + huber_mse = 0.5 * (y_true - y_pred) ** 2 + huber_mae = delta * (np.abs(y_true - y_pred) - 0.5 * delta) + return np.where(np.abs(y_true - y_pred) <= delta, huber_mse, huber_mae).mean() + + +def mean_squared_error(y_true: np.ndarray, y_pred: np.ndarray) -> float: + """ + Calculate the mean squared error (MSE) between ground truth and predicted values. + + MSE measures the squared difference between true values and predicted values, and it + serves as a measure of accuracy for regression models. + + MSE = (1/n) * Σ(y_true - y_pred)^2 + + Reference: https://en.wikipedia.org/wiki/Mean_squared_error + + Parameters: + - y_true: The true values (ground truth) + - y_pred: The predicted values + + >>> true_values = np.array([1.0, 2.0, 3.0, 4.0, 5.0]) + >>> predicted_values = np.array([0.8, 2.1, 2.9, 4.2, 5.2]) + >>> np.isclose(mean_squared_error(true_values, predicted_values), 0.028) + True + >>> true_labels = np.array([1.0, 2.0, 3.0, 4.0, 5.0]) + >>> predicted_probs = np.array([0.3, 0.8, 0.9, 0.2]) + >>> mean_squared_error(true_labels, predicted_probs) + Traceback (most recent call last): + ... + ValueError: Input arrays must have the same length. + """ + if len(y_true) != len(y_pred): + raise ValueError("Input arrays must have the same length.") + + squared_errors = (y_true - y_pred) ** 2 + return np.mean(squared_errors) + + +def mean_squared_logarithmic_error(y_true: np.ndarray, y_pred: np.ndarray) -> float: + """ + Calculate the mean squared logarithmic error (MSLE) between ground truth and + predicted values. + + MSLE measures the squared logarithmic difference between true values and predicted + values for regression models. It's particularly useful for dealing with skewed or + large-value data, and it's often used when the relative differences between + predicted and true values are more important than absolute differences. + + MSLE = (1/n) * Σ(log(1 + y_true) - log(1 + y_pred))^2 + + Reference: https://insideaiml.com/blog/MeanSquared-Logarithmic-Error-Loss-1035 + + Parameters: + - y_true: The true values (ground truth) + - y_pred: The predicted values + + >>> true_values = np.array([1.0, 2.0, 3.0, 4.0, 5.0]) + >>> predicted_values = np.array([0.8, 2.1, 2.9, 4.2, 5.2]) + >>> mean_squared_logarithmic_error(true_values, predicted_values) + 0.0030860877925181344 + >>> true_labels = np.array([1.0, 2.0, 3.0, 4.0, 5.0]) + >>> predicted_probs = np.array([0.3, 0.8, 0.9, 0.2]) + >>> mean_squared_logarithmic_error(true_labels, predicted_probs) + Traceback (most recent call last): + ... + ValueError: Input arrays must have the same length. + """ + if len(y_true) != len(y_pred): + raise ValueError("Input arrays must have the same length.") + + squared_logarithmic_errors = (np.log1p(y_true) - np.log1p(y_pred)) ** 2 + return np.mean(squared_logarithmic_errors) + + +if __name__ == "__main__": + import doctest + + doctest.testmod() diff --git a/machine_learning/loss_functions/binary_cross_entropy.py b/machine_learning/loss_functions/binary_cross_entropy.py deleted file mode 100644 index 4ebca7f21757..000000000000 --- a/machine_learning/loss_functions/binary_cross_entropy.py +++ /dev/null @@ -1,59 +0,0 @@ -""" -Binary Cross-Entropy (BCE) Loss Function - -Description: -Quantifies dissimilarity between true labels (0 or 1) and predicted probabilities. -It's widely used in binary classification tasks. - -Formula: -BCE = -Σ(y_true * log(y_pred) + (1 - y_true) * log(1 - y_pred)) - -Source: -[Wikipedia - Cross entropy](https://en.wikipedia.org/wiki/Cross_entropy) -""" - -import numpy as np - - -def binary_cross_entropy( - y_true: np.ndarray, y_pred: np.ndarray, epsilon: float = 1e-15 -) -> float: - """ - Calculate the BCE Loss between true labels and predicted probabilities. - - Parameters: - - y_true: True binary labels (0 or 1). - - y_pred: Predicted probabilities for class 1. - - epsilon: Small constant to avoid numerical instability. - - Returns: - - bce_loss: Binary Cross-Entropy Loss. - - Example Usage: - >>> true_labels = np.array([0, 1, 1, 0, 1]) - >>> predicted_probs = np.array([0.2, 0.7, 0.9, 0.3, 0.8]) - >>> binary_cross_entropy(true_labels, predicted_probs) - 0.2529995012327421 - >>> true_labels = np.array([0, 1, 1, 0, 1]) - >>> predicted_probs = np.array([0.3, 0.8, 0.9, 0.2]) - >>> binary_cross_entropy(true_labels, predicted_probs) - Traceback (most recent call last): - ... - ValueError: Input arrays must have the same length. - """ - if len(y_true) != len(y_pred): - raise ValueError("Input arrays must have the same length.") - # Clip predicted probabilities to avoid log(0) and log(1) - y_pred = np.clip(y_pred, epsilon, 1 - epsilon) - - # Calculate binary cross-entropy loss - bce_loss = -(y_true * np.log(y_pred) + (1 - y_true) * np.log(1 - y_pred)) - - # Take the mean over all samples - return np.mean(bce_loss) - - -if __name__ == "__main__": - import doctest - - doctest.testmod() diff --git a/machine_learning/loss_functions/categorical_cross_entropy.py b/machine_learning/loss_functions/categorical_cross_entropy.py deleted file mode 100644 index 68f98902b473..000000000000 --- a/machine_learning/loss_functions/categorical_cross_entropy.py +++ /dev/null @@ -1,85 +0,0 @@ -""" -Categorical Cross-Entropy Loss - -This function calculates the Categorical Cross-Entropy Loss between true class -labels and predicted class probabilities. - -Formula: -Categorical Cross-Entropy Loss = -Σ(y_true * ln(y_pred)) - -Resources: -- [Wikipedia - Cross entropy](https://en.wikipedia.org/wiki/Cross_entropy) -""" - -import numpy as np - - -def categorical_cross_entropy( - y_true: np.ndarray, y_pred: np.ndarray, epsilon: float = 1e-15 -) -> float: - """ - Calculate Categorical Cross-Entropy Loss between true class labels and - predicted class probabilities. - - Parameters: - - y_true: True class labels (one-hot encoded) as a NumPy array. - - y_pred: Predicted class probabilities as a NumPy array. - - epsilon: Small constant to avoid numerical instability. - - Returns: - - ce_loss: Categorical Cross-Entropy Loss as a floating-point number. - - Example: - >>> true_labels = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]) - >>> pred_probs = np.array([[0.9, 0.1, 0.0], [0.2, 0.7, 0.1], [0.0, 0.1, 0.9]]) - >>> categorical_cross_entropy(true_labels, pred_probs) - 0.567395975254385 - - >>> y_true = np.array([[1, 0], [0, 1]]) - >>> y_pred = np.array([[0.9, 0.1, 0.0], [0.2, 0.7, 0.1]]) - >>> categorical_cross_entropy(y_true, y_pred) - Traceback (most recent call last): - ... - ValueError: Input arrays must have the same shape. - - >>> y_true = np.array([[2, 0, 1], [1, 0, 0]]) - >>> y_pred = np.array([[0.9, 0.1, 0.0], [0.2, 0.7, 0.1]]) - >>> categorical_cross_entropy(y_true, y_pred) - Traceback (most recent call last): - ... - ValueError: y_true must be one-hot encoded. - - >>> y_true = np.array([[1, 0, 1], [1, 0, 0]]) - >>> y_pred = np.array([[0.9, 0.1, 0.0], [0.2, 0.7, 0.1]]) - >>> categorical_cross_entropy(y_true, y_pred) - Traceback (most recent call last): - ... - ValueError: y_true must be one-hot encoded. - - >>> y_true = np.array([[1, 0, 0], [0, 1, 0]]) - >>> y_pred = np.array([[0.9, 0.1, 0.1], [0.2, 0.7, 0.1]]) - >>> categorical_cross_entropy(y_true, y_pred) - Traceback (most recent call last): - ... - ValueError: Predicted probabilities must sum to approximately 1. - """ - if y_true.shape != y_pred.shape: - raise ValueError("Input arrays must have the same shape.") - - if np.any((y_true != 0) & (y_true != 1)) or np.any(y_true.sum(axis=1) != 1): - raise ValueError("y_true must be one-hot encoded.") - - if not np.all(np.isclose(np.sum(y_pred, axis=1), 1, rtol=epsilon, atol=epsilon)): - raise ValueError("Predicted probabilities must sum to approximately 1.") - - # Clip predicted probabilities to avoid log(0) - y_pred = np.clip(y_pred, epsilon, 1) - - # Calculate categorical cross-entropy loss - return -np.sum(y_true * np.log(y_pred)) - - -if __name__ == "__main__": - import doctest - - doctest.testmod() diff --git a/machine_learning/loss_functions/hinge_loss.py b/machine_learning/loss_functions/hinge_loss.py deleted file mode 100644 index 5480a8cd62ee..000000000000 --- a/machine_learning/loss_functions/hinge_loss.py +++ /dev/null @@ -1,64 +0,0 @@ -""" -Hinge Loss - -Description: -Compute the Hinge loss used for training SVM (Support Vector Machine). - -Formula: -loss = max(0, 1 - true * pred) - -Reference: https://en.wikipedia.org/wiki/Hinge_loss - -Author: Poojan Smart -Email: smrtpoojan@gmail.com -""" - -import numpy as np - - -def hinge_loss(y_true: np.ndarray, y_pred: np.ndarray) -> float: - """ - Calculate the mean hinge loss for y_true and y_pred for binary classification. - - Args: - y_true: Array of actual values (ground truth) encoded as -1 and 1. - y_pred: Array of predicted values. - - Returns: - The hinge loss between y_true and y_pred. - - Examples: - >>> y_true = np.array([-1, 1, 1, -1, 1]) - >>> pred = np.array([-4, -0.3, 0.7, 5, 10]) - >>> hinge_loss(y_true, pred) - 1.52 - >>> y_true = np.array([-1, 1, 1, -1, 1, 1]) - >>> pred = np.array([-4, -0.3, 0.7, 5, 10]) - >>> hinge_loss(y_true, pred) - Traceback (most recent call last): - ... - ValueError: Length of predicted and actual array must be same. - >>> y_true = np.array([-1, 1, 10, -1, 1]) - >>> pred = np.array([-4, -0.3, 0.7, 5, 10]) - >>> hinge_loss(y_true, pred) - Traceback (most recent call last): - ... - ValueError: y_true can have values -1 or 1 only. - """ - - if len(y_true) != len(y_pred): - raise ValueError("Length of predicted and actual array must be same.") - - # Raise value error when y_true (encoded labels) have any other values - # than -1 and 1 - if np.any((y_true != -1) & (y_true != 1)): - raise ValueError("y_true can have values -1 or 1 only.") - - hinge_losses = np.maximum(0, 1.0 - (y_true * y_pred)) - return np.mean(hinge_losses) - - -if __name__ == "__main__": - import doctest - - doctest.testmod() diff --git a/machine_learning/loss_functions/huber_loss.py b/machine_learning/loss_functions/huber_loss.py deleted file mode 100644 index 202e013f2928..000000000000 --- a/machine_learning/loss_functions/huber_loss.py +++ /dev/null @@ -1,52 +0,0 @@ -""" -Huber Loss Function - -Description: -Huber loss function describes the penalty incurred by an estimation procedure. -It serves as a measure of the model's accuracy in regression tasks. - -Formula: -Huber Loss = if |y_true - y_pred| <= delta then 0.5 * (y_true - y_pred)^2 - else delta * |y_true - y_pred| - 0.5 * delta^2 - -Source: -[Wikipedia - Huber Loss](https://en.wikipedia.org/wiki/Huber_loss) -""" - -import numpy as np - - -def huber_loss(y_true: np.ndarray, y_pred: np.ndarray, delta: float) -> float: - """ - Calculate the mean of Huber Loss. - - Parameters: - - y_true: The true values (ground truth). - - y_pred: The predicted values. - - Returns: - - huber_loss: The mean of Huber Loss between y_true and y_pred. - - Example usage: - >>> true_values = np.array([0.9, 10.0, 2.0, 1.0, 5.2]) - >>> predicted_values = np.array([0.8, 2.1, 2.9, 4.2, 5.2]) - >>> np.isclose(huber_loss(true_values, predicted_values, 1.0), 2.102) - True - >>> true_labels = np.array([11.0, 21.0, 3.32, 4.0, 5.0]) - >>> predicted_probs = np.array([8.3, 20.8, 2.9, 11.2, 5.0]) - >>> np.isclose(huber_loss(true_labels, predicted_probs, 1.0), 1.80164) - True - """ - - if len(y_true) != len(y_pred): - raise ValueError("Input arrays must have the same length.") - - huber_mse = 0.5 * (y_true - y_pred) ** 2 - huber_mae = delta * (np.abs(y_true - y_pred) - 0.5 * delta) - return np.where(np.abs(y_true - y_pred) <= delta, huber_mse, huber_mae).mean() - - -if __name__ == "__main__": - import doctest - - doctest.testmod() diff --git a/machine_learning/loss_functions/mean_squared_error.py b/machine_learning/loss_functions/mean_squared_error.py deleted file mode 100644 index d2b0e1e158ba..000000000000 --- a/machine_learning/loss_functions/mean_squared_error.py +++ /dev/null @@ -1,51 +0,0 @@ -""" -Mean Squared Error (MSE) Loss Function - -Description: -MSE measures the mean squared difference between true values and predicted values. -It serves as a measure of the model's accuracy in regression tasks. - -Formula: -MSE = (1/n) * Σ(y_true - y_pred)^2 - -Source: -[Wikipedia - Mean squared error](https://en.wikipedia.org/wiki/Mean_squared_error) -""" - -import numpy as np - - -def mean_squared_error(y_true: np.ndarray, y_pred: np.ndarray) -> float: - """ - Calculate the Mean Squared Error (MSE) between two arrays. - - Parameters: - - y_true: The true values (ground truth). - - y_pred: The predicted values. - - Returns: - - mse: The Mean Squared Error between y_true and y_pred. - - Example usage: - >>> true_values = np.array([1.0, 2.0, 3.0, 4.0, 5.0]) - >>> predicted_values = np.array([0.8, 2.1, 2.9, 4.2, 5.2]) - >>> mean_squared_error(true_values, predicted_values) - 0.028000000000000032 - >>> true_labels = np.array([1.0, 2.0, 3.0, 4.0, 5.0]) - >>> predicted_probs = np.array([0.3, 0.8, 0.9, 0.2]) - >>> mean_squared_error(true_labels, predicted_probs) - Traceback (most recent call last): - ... - ValueError: Input arrays must have the same length. - """ - if len(y_true) != len(y_pred): - raise ValueError("Input arrays must have the same length.") - - squared_errors = (y_true - y_pred) ** 2 - return np.mean(squared_errors) - - -if __name__ == "__main__": - import doctest - - doctest.testmod() diff --git a/machine_learning/loss_functions/mean_squared_logarithmic_error.py b/machine_learning/loss_functions/mean_squared_logarithmic_error.py deleted file mode 100644 index 935ebff37a51..000000000000 --- a/machine_learning/loss_functions/mean_squared_logarithmic_error.py +++ /dev/null @@ -1,55 +0,0 @@ -""" -Mean Squared Logarithmic Error (MSLE) Loss Function - -Description: -MSLE measures the mean squared logarithmic difference between -true values and predicted values, particularly useful when -dealing with regression problems involving skewed or large-value -targets. It is often used when the relative differences between -predicted and true values are more important than absolute -differences. - -Formula: -MSLE = (1/n) * Σ(log(1 + y_true) - log(1 + y_pred))^2 - -Source: -(https://insideaiml.com/blog/MeanSquared-Logarithmic-Error-Loss-1035) -""" - -import numpy as np - - -def mean_squared_logarithmic_error(y_true: np.ndarray, y_pred: np.ndarray) -> float: - """ - Calculate the Mean Squared Logarithmic Error (MSLE) between two arrays. - - Parameters: - - y_true: The true values (ground truth). - - y_pred: The predicted values. - - Returns: - - msle: The Mean Squared Logarithmic Error between y_true and y_pred. - - Example usage: - >>> true_values = np.array([1.0, 2.0, 3.0, 4.0, 5.0]) - >>> predicted_values = np.array([0.8, 2.1, 2.9, 4.2, 5.2]) - >>> mean_squared_logarithmic_error(true_values, predicted_values) - 0.0030860877925181344 - >>> true_labels = np.array([1.0, 2.0, 3.0, 4.0, 5.0]) - >>> predicted_probs = np.array([0.3, 0.8, 0.9, 0.2]) - >>> mean_squared_logarithmic_error(true_labels, predicted_probs) - Traceback (most recent call last): - ... - ValueError: Input arrays must have the same length. - """ - if len(y_true) != len(y_pred): - raise ValueError("Input arrays must have the same length.") - - squared_logarithmic_errors = (np.log1p(y_true) - np.log1p(y_pred)) ** 2 - return np.mean(squared_logarithmic_errors) - - -if __name__ == "__main__": - import doctest - - doctest.testmod() From 47c19d9b2da6a56f47b520e6c5ca6b654a5eff47 Mon Sep 17 00:00:00 2001 From: Jeel Gajera <83470656+JeelGajera@users.noreply.github.com> Date: Sat, 21 Oct 2023 20:21:29 +0530 Subject: [PATCH 1193/1543] Add: FP Growth Algorithm (#10746) * Add: FP Growth Algorithm * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * changes names * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Revert "changes names" This reverts commit c0470094d01391294617df6a92734b78b470b127. * refactore code * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update frequent_pattern_growth.py --------- Co-authored-by: Jeel Gajera Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- DIRECTORY.md | 1 + machine_learning/frequent_pattern_growth.py | 349 ++++++++++++++++++++ 2 files changed, 350 insertions(+) create mode 100644 machine_learning/frequent_pattern_growth.py diff --git a/DIRECTORY.md b/DIRECTORY.md index b92f8f877e97..916d993c563a 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -541,6 +541,7 @@ * [Dimensionality Reduction](machine_learning/dimensionality_reduction.py) * Forecasting * [Run](machine_learning/forecasting/run.py) + * [Frequent Pattern Growth Algorithm](machine_learning/frequent_pattern_growth.py) * [Gradient Descent](machine_learning/gradient_descent.py) * [K Means Clust](machine_learning/k_means_clust.py) * [K Nearest Neighbours](machine_learning/k_nearest_neighbours.py) diff --git a/machine_learning/frequent_pattern_growth.py b/machine_learning/frequent_pattern_growth.py new file mode 100644 index 000000000000..205d598464a1 --- /dev/null +++ b/machine_learning/frequent_pattern_growth.py @@ -0,0 +1,349 @@ +""" +The Frequent Pattern Growth algorithm (FP-Growth) is a widely used data mining +technique for discovering frequent itemsets in large transaction databases. + +It overcomes some of the limitations of traditional methods such as Apriori by +efficiently constructing the FP-Tree + +WIKI: https://athena.ecs.csus.edu/~mei/associationcw/FpGrowth.html + +Examples: https://www.javatpoint.com/fp-growth-algorithm-in-data-mining +""" +from __future__ import annotations + +from dataclasses import dataclass, field + + +@dataclass +class TreeNode: + """ + A node in a Frequent Pattern tree. + + Args: + name: The name of this node. + num_occur: The number of occurrences of the node. + parent_node: The parent node. + + Example: + >>> parent = TreeNode("Parent", 1, None) + >>> child = TreeNode("Child", 2, parent) + >>> child.name + 'Child' + >>> child.count + 2 + """ + + name: str + count: int + parent: TreeNode | None = None + children: dict[str, TreeNode] = field(default_factory=dict) + node_link: TreeNode | None = None + + def __repr__(self) -> str: + return f"TreeNode({self.name!r}, {self.count!r}, {self.parent!r})" + + def inc(self, num_occur: int) -> None: + self.count += num_occur + + def disp(self, ind: int = 1) -> None: + print(f"{' ' * ind} {self.name} {self.count}") + for child in self.children.values(): + child.disp(ind + 1) + + +def create_tree(data_set: list, min_sup: int = 1) -> tuple[TreeNode, dict]: + """ + Create Frequent Pattern tree + + Args: + data_set: A list of transactions, where each transaction is a list of items. + min_sup: The minimum support threshold. + Items with support less than this will be pruned. Default is 1. + + Returns: + The root of the FP-Tree. + header_table: The header table dictionary with item information. + + Example: + >>> data_set = [ + ... ['A', 'B', 'C'], + ... ['A', 'C'], + ... ['A', 'B', 'E'], + ... ['A', 'B', 'C', 'E'], + ... ['B', 'E'] + ... ] + >>> min_sup = 2 + >>> fp_tree, header_table = create_tree(data_set, min_sup) + >>> fp_tree + TreeNode('Null Set', 1, None) + >>> len(header_table) + 4 + >>> header_table["A"] + [[4, None], TreeNode('A', 4, TreeNode('Null Set', 1, None))] + >>> header_table["E"][1] # doctest: +NORMALIZE_WHITESPACE + TreeNode('E', 1, TreeNode('B', 3, TreeNode('A', 4, TreeNode('Null Set', 1, None)))) + >>> sorted(header_table) + ['A', 'B', 'C', 'E'] + >>> fp_tree.name + 'Null Set' + >>> sorted(fp_tree.children) + ['A', 'B'] + >>> fp_tree.children['A'].name + 'A' + >>> sorted(fp_tree.children['A'].children) + ['B', 'C'] + """ + header_table: dict = {} + for trans in data_set: + for item in trans: + header_table[item] = header_table.get(item, [0, None]) + header_table[item][0] += 1 + + for k in list(header_table): + if header_table[k][0] < min_sup: + del header_table[k] + + if not (freq_item_set := set(header_table)): + return TreeNode("Null Set", 1, None), {} + + for k in header_table: + header_table[k] = [header_table[k], None] + + fp_tree = TreeNode("Null Set", 1, None) # Parent is None for the root node + for tran_set in data_set: + local_d = { + item: header_table[item][0] for item in tran_set if item in freq_item_set + } + if local_d: + sorted_items = sorted( + local_d.items(), key=lambda item_info: item_info[1], reverse=True + ) + ordered_items = [item[0] for item in sorted_items] + update_tree(ordered_items, fp_tree, header_table, 1) + + return fp_tree, header_table + + +def update_tree(items: list, in_tree: TreeNode, header_table: dict, count: int) -> None: + """ + Update the FP-Tree with a transaction. + + Args: + items: List of items in the transaction. + in_tree: The current node in the FP-Tree. + header_table: The header table dictionary with item information. + count: The count of the transaction. + + Example: + >>> data_set = [ + ... ['A', 'B', 'C'], + ... ['A', 'C'], + ... ['A', 'B', 'E'], + ... ['A', 'B', 'C', 'E'], + ... ['B', 'E'] + ... ] + >>> min_sup = 2 + >>> fp_tree, header_table = create_tree(data_set, min_sup) + >>> fp_tree + TreeNode('Null Set', 1, None) + >>> transaction = ['A', 'B', 'E'] + >>> update_tree(transaction, fp_tree, header_table, 1) + >>> fp_tree + TreeNode('Null Set', 1, None) + >>> fp_tree.children['A'].children['B'].children['E'].children + {} + >>> fp_tree.children['A'].children['B'].children['E'].count + 2 + >>> header_table['E'][1].name + 'E' + """ + if items[0] in in_tree.children: + in_tree.children[items[0]].inc(count) + else: + in_tree.children[items[0]] = TreeNode(items[0], count, in_tree) + if header_table[items[0]][1] is None: + header_table[items[0]][1] = in_tree.children[items[0]] + else: + update_header(header_table[items[0]][1], in_tree.children[items[0]]) + if len(items) > 1: + update_tree(items[1:], in_tree.children[items[0]], header_table, count) + + +def update_header(node_to_test: TreeNode, target_node: TreeNode) -> TreeNode: + """ + Update the header table with a node link. + + Args: + node_to_test: The node to be updated in the header table. + target_node: The node to link to. + + Example: + >>> data_set = [ + ... ['A', 'B', 'C'], + ... ['A', 'C'], + ... ['A', 'B', 'E'], + ... ['A', 'B', 'C', 'E'], + ... ['B', 'E'] + ... ] + >>> min_sup = 2 + >>> fp_tree, header_table = create_tree(data_set, min_sup) + >>> fp_tree + TreeNode('Null Set', 1, None) + >>> node1 = TreeNode("A", 3, None) + >>> node2 = TreeNode("B", 4, None) + >>> node1 + TreeNode('A', 3, None) + >>> node1 = update_header(node1, node2) + >>> node1 + TreeNode('A', 3, None) + >>> node1.node_link + TreeNode('B', 4, None) + >>> node2.node_link is None + True + """ + while node_to_test.node_link is not None: + node_to_test = node_to_test.node_link + if node_to_test.node_link is None: + node_to_test.node_link = target_node + # Return the updated node + return node_to_test + + +def ascend_tree(leaf_node: TreeNode, prefix_path: list[str]) -> None: + """ + Ascend the FP-Tree from a leaf node to its root, adding item names to the prefix + path. + + Args: + leaf_node: The leaf node to start ascending from. + prefix_path: A list to store the item as they are ascended. + + Example: + >>> data_set = [ + ... ['A', 'B', 'C'], + ... ['A', 'C'], + ... ['A', 'B', 'E'], + ... ['A', 'B', 'C', 'E'], + ... ['B', 'E'] + ... ] + >>> min_sup = 2 + >>> fp_tree, header_table = create_tree(data_set, min_sup) + + >>> path = [] + >>> ascend_tree(fp_tree.children['A'], path) + >>> path # ascending from a leaf node 'A' + ['A'] + """ + if leaf_node.parent is not None: + prefix_path.append(leaf_node.name) + ascend_tree(leaf_node.parent, prefix_path) + + +def find_prefix_path(base_pat: frozenset, tree_node: TreeNode | None) -> dict: + """ + Find the conditional pattern base for a given base pattern. + + Args: + base_pat: The base pattern for which to find the conditional pattern base. + tree_node: The node in the FP-Tree. + + Example: + >>> data_set = [ + ... ['A', 'B', 'C'], + ... ['A', 'C'], + ... ['A', 'B', 'E'], + ... ['A', 'B', 'C', 'E'], + ... ['B', 'E'] + ... ] + >>> min_sup = 2 + >>> fp_tree, header_table = create_tree(data_set, min_sup) + >>> fp_tree + TreeNode('Null Set', 1, None) + >>> len(header_table) + 4 + >>> base_pattern = frozenset(['A']) + >>> sorted(find_prefix_path(base_pattern, fp_tree.children['A'])) + [] + """ + cond_pats: dict = {} + while tree_node is not None: + prefix_path: list = [] + ascend_tree(tree_node, prefix_path) + if len(prefix_path) > 1: + cond_pats[frozenset(prefix_path[1:])] = tree_node.count + tree_node = tree_node.node_link + return cond_pats + + +def mine_tree( + in_tree: TreeNode, + header_table: dict, + min_sup: int, + pre_fix: set, + freq_item_list: list, +) -> None: + """ + Mine the FP-Tree recursively to discover frequent itemsets. + + Args: + in_tree: The FP-Tree to mine. + header_table: The header table dictionary with item information. + min_sup: The minimum support threshold. + pre_fix: A set of items as a prefix for the itemsets being mined. + freq_item_list: A list to store the frequent itemsets. + + Example: + >>> data_set = [ + ... ['A', 'B', 'C'], + ... ['A', 'C'], + ... ['A', 'B', 'E'], + ... ['A', 'B', 'C', 'E'], + ... ['B', 'E'] + ... ] + >>> min_sup = 2 + >>> fp_tree, header_table = create_tree(data_set, min_sup) + >>> fp_tree + TreeNode('Null Set', 1, None) + >>> frequent_itemsets = [] + >>> mine_tree(fp_tree, header_table, min_sup, set([]), frequent_itemsets) + >>> expe_itm = [{'C'}, {'C', 'A'}, {'E'}, {'A', 'E'}, {'E', 'B'}, {'A'}, {'B'}] + >>> all(expected in frequent_itemsets for expected in expe_itm) + True + """ + sorted_items = sorted(header_table.items(), key=lambda item_info: item_info[1][0]) + big_l = [item[0] for item in sorted_items] + for base_pat in big_l: + new_freq_set = pre_fix.copy() + new_freq_set.add(base_pat) + freq_item_list.append(new_freq_set) + cond_patt_bases = find_prefix_path(base_pat, header_table[base_pat][1]) + my_cond_tree, my_head = create_tree(list(cond_patt_bases), min_sup) + if my_head is not None: + # Pass header_table[base_pat][1] as node_to_test to update_header + header_table[base_pat][1] = update_header( + header_table[base_pat][1], my_cond_tree + ) + mine_tree(my_cond_tree, my_head, min_sup, new_freq_set, freq_item_list) + + +if __name__ == "__main__": + from doctest import testmod + + testmod() + data_set: list[frozenset] = [ + frozenset(["bread", "milk", "cheese"]), + frozenset(["bread", "milk"]), + frozenset(["bread", "diapers"]), + frozenset(["bread", "milk", "diapers"]), + frozenset(["milk", "diapers"]), + frozenset(["milk", "cheese"]), + frozenset(["diapers", "cheese"]), + frozenset(["bread", "milk", "cheese", "diapers"]), + ] + print(f"{len(data_set) = }") + fp_tree, header_table = create_tree(data_set, min_sup=3) + print(f"{fp_tree = }") + print(f"{len(header_table) = }") + freq_items: list = [] + mine_tree(fp_tree, header_table, 3, set(), freq_items) + print(f"{freq_items = }") From 06edc0eea0220f29491f75351cde1af9716aca8d Mon Sep 17 00:00:00 2001 From: Tianyi Zheng Date: Sat, 21 Oct 2023 13:27:36 -0400 Subject: [PATCH 1194/1543] Consolidate binary exponentiation files (#10742) * Consolidate binary exponentiation files * updating DIRECTORY.md * Fix typos in doctests * Add suggestions from code review * Fix timeit benchmarks --------- Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- DIRECTORY.md | 2 - maths/binary_exp_mod.py | 28 ---- maths/binary_exponentiation.py | 214 ++++++++++++++++++++++++++----- maths/binary_exponentiation_2.py | 61 --------- 4 files changed, 181 insertions(+), 124 deletions(-) delete mode 100644 maths/binary_exp_mod.py delete mode 100644 maths/binary_exponentiation_2.py diff --git a/DIRECTORY.md b/DIRECTORY.md index 916d993c563a..9e0166ad80c5 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -578,9 +578,7 @@ * [Bailey Borwein Plouffe](maths/bailey_borwein_plouffe.py) * [Base Neg2 Conversion](maths/base_neg2_conversion.py) * [Basic Maths](maths/basic_maths.py) - * [Binary Exp Mod](maths/binary_exp_mod.py) * [Binary Exponentiation](maths/binary_exponentiation.py) - * [Binary Exponentiation 2](maths/binary_exponentiation_2.py) * [Binary Multiplication](maths/binary_multiplication.py) * [Binomial Coefficient](maths/binomial_coefficient.py) * [Binomial Distribution](maths/binomial_distribution.py) diff --git a/maths/binary_exp_mod.py b/maths/binary_exp_mod.py deleted file mode 100644 index 8893182a3496..000000000000 --- a/maths/binary_exp_mod.py +++ /dev/null @@ -1,28 +0,0 @@ -def bin_exp_mod(a: int, n: int, b: int) -> int: - """ - >>> bin_exp_mod(3, 4, 5) - 1 - >>> bin_exp_mod(7, 13, 10) - 7 - """ - # mod b - assert b != 0, "This cannot accept modulo that is == 0" - if n == 0: - return 1 - - if n % 2 == 1: - return (bin_exp_mod(a, n - 1, b) * a) % b - - r = bin_exp_mod(a, n // 2, b) - return (r * r) % b - - -if __name__ == "__main__": - try: - BASE = int(input("Enter Base : ").strip()) - POWER = int(input("Enter Power : ").strip()) - MODULO = int(input("Enter Modulo : ").strip()) - except ValueError: - print("Invalid literal for integer") - - print(bin_exp_mod(BASE, POWER, MODULO)) diff --git a/maths/binary_exponentiation.py b/maths/binary_exponentiation.py index f613767f547e..51ce86d26c41 100644 --- a/maths/binary_exponentiation.py +++ b/maths/binary_exponentiation.py @@ -1,48 +1,196 @@ -"""Binary Exponentiation.""" +""" +Binary Exponentiation -# Author : Junth Basnet -# Time Complexity : O(logn) +This is a method to find a^b in O(log b) time complexity and is one of the most commonly +used methods of exponentiation. The method is also useful for modular exponentiation, +when the solution to (a^b) % c is required. +To calculate a^b: +- If b is even, then a^b = (a * a)^(b / 2) +- If b is odd, then a^b = a * a^(b - 1) +Repeat until b = 1 or b = 0 -def binary_exponentiation(a: int, n: int) -> int: +For modular exponentiation, we use the fact that (a * b) % c = ((a % c) * (b % c)) % c +""" + + +def binary_exp_recursive(base: float, exponent: int) -> float: """ - Compute a number raised by some quantity - >>> binary_exponentiation(-1, 3) + Computes a^b recursively, where a is the base and b is the exponent + + >>> binary_exp_recursive(3, 5) + 243 + >>> binary_exp_recursive(11, 13) + 34522712143931 + >>> binary_exp_recursive(-1, 3) -1 - >>> binary_exponentiation(-1, 4) + >>> binary_exp_recursive(0, 5) + 0 + >>> binary_exp_recursive(3, 1) + 3 + >>> binary_exp_recursive(3, 0) 1 - >>> binary_exponentiation(2, 2) - 4 - >>> binary_exponentiation(3, 5) + >>> binary_exp_recursive(1.5, 4) + 5.0625 + >>> binary_exp_recursive(3, -1) + Traceback (most recent call last): + ... + ValueError: Exponent must be a non-negative integer + """ + if exponent < 0: + raise ValueError("Exponent must be a non-negative integer") + + if exponent == 0: + return 1 + + if exponent % 2 == 1: + return binary_exp_recursive(base, exponent - 1) * base + + b = binary_exp_recursive(base, exponent // 2) + return b * b + + +def binary_exp_iterative(base: float, exponent: int) -> float: + """ + Computes a^b iteratively, where a is the base and b is the exponent + + >>> binary_exp_iterative(3, 5) 243 - >>> binary_exponentiation(10, 3) - 1000 - >>> binary_exponentiation(5e3, 1) - 5000.0 - >>> binary_exponentiation(-5e3, 1) - -5000.0 - """ - if n == 0: + >>> binary_exp_iterative(11, 13) + 34522712143931 + >>> binary_exp_iterative(-1, 3) + -1 + >>> binary_exp_iterative(0, 5) + 0 + >>> binary_exp_iterative(3, 1) + 3 + >>> binary_exp_iterative(3, 0) + 1 + >>> binary_exp_iterative(1.5, 4) + 5.0625 + >>> binary_exp_iterative(3, -1) + Traceback (most recent call last): + ... + ValueError: Exponent must be a non-negative integer + """ + if exponent < 0: + raise ValueError("Exponent must be a non-negative integer") + + res: int | float = 1 + while exponent > 0: + if exponent & 1: + res *= base + + base *= base + exponent >>= 1 + + return res + + +def binary_exp_mod_recursive(base: float, exponent: int, modulus: int) -> float: + """ + Computes a^b % c recursively, where a is the base, b is the exponent, and c is the + modulus + + >>> binary_exp_mod_recursive(3, 4, 5) + 1 + >>> binary_exp_mod_recursive(11, 13, 7) + 4 + >>> binary_exp_mod_recursive(1.5, 4, 3) + 2.0625 + >>> binary_exp_mod_recursive(7, -1, 10) + Traceback (most recent call last): + ... + ValueError: Exponent must be a non-negative integer + >>> binary_exp_mod_recursive(7, 13, 0) + Traceback (most recent call last): + ... + ValueError: Modulus must be a positive integer + """ + if exponent < 0: + raise ValueError("Exponent must be a non-negative integer") + if modulus <= 0: + raise ValueError("Modulus must be a positive integer") + + if exponent == 0: return 1 - elif n % 2 == 1: - return binary_exponentiation(a, n - 1) * a + if exponent % 2 == 1: + return (binary_exp_mod_recursive(base, exponent - 1, modulus) * base) % modulus - else: - b = binary_exponentiation(a, n // 2) - return b * b + r = binary_exp_mod_recursive(base, exponent // 2, modulus) + return (r * r) % modulus -if __name__ == "__main__": - import doctest +def binary_exp_mod_iterative(base: float, exponent: int, modulus: int) -> float: + """ + Computes a^b % c iteratively, where a is the base, b is the exponent, and c is the + modulus - doctest.testmod() + >>> binary_exp_mod_iterative(3, 4, 5) + 1 + >>> binary_exp_mod_iterative(11, 13, 7) + 4 + >>> binary_exp_mod_iterative(1.5, 4, 3) + 2.0625 + >>> binary_exp_mod_iterative(7, -1, 10) + Traceback (most recent call last): + ... + ValueError: Exponent must be a non-negative integer + >>> binary_exp_mod_iterative(7, 13, 0) + Traceback (most recent call last): + ... + ValueError: Modulus must be a positive integer + """ + if exponent < 0: + raise ValueError("Exponent must be a non-negative integer") + if modulus <= 0: + raise ValueError("Modulus must be a positive integer") + + res: int | float = 1 + while exponent > 0: + if exponent & 1: + res = ((res % modulus) * (base % modulus)) % modulus + + base *= base + exponent >>= 1 + + return res + + +if __name__ == "__main__": + from timeit import timeit - try: - BASE = int(float(input("Enter Base : ").strip())) - POWER = int(input("Enter Power : ").strip()) - except ValueError: - print("Invalid literal for integer") + a = 1269380576 + b = 374 + c = 34 - RESULT = binary_exponentiation(BASE, POWER) - print(f"{BASE}^({POWER}) : {RESULT}") + runs = 100_000 + print( + timeit( + f"binary_exp_recursive({a}, {b})", + setup="from __main__ import binary_exp_recursive", + number=runs, + ) + ) + print( + timeit( + f"binary_exp_iterative({a}, {b})", + setup="from __main__ import binary_exp_iterative", + number=runs, + ) + ) + print( + timeit( + f"binary_exp_mod_recursive({a}, {b}, {c})", + setup="from __main__ import binary_exp_mod_recursive", + number=runs, + ) + ) + print( + timeit( + f"binary_exp_mod_iterative({a}, {b}, {c})", + setup="from __main__ import binary_exp_mod_iterative", + number=runs, + ) + ) diff --git a/maths/binary_exponentiation_2.py b/maths/binary_exponentiation_2.py deleted file mode 100644 index edb6b66b2594..000000000000 --- a/maths/binary_exponentiation_2.py +++ /dev/null @@ -1,61 +0,0 @@ -""" -Binary Exponentiation -This is a method to find a^b in O(log b) time complexity -This is one of the most commonly used methods of exponentiation -It's also useful when the solution to (a^b) % c is required because a, b, c may be -over the computer's calculation limits - -Let's say you need to calculate a ^ b -- RULE 1 : a ^ b = (a*a) ^ (b/2) ---- example : 4 ^ 4 = (4*4) ^ (4/2) = 16 ^ 2 -- RULE 2 : IF b is odd, then a ^ b = a * (a ^ (b - 1)), where b - 1 is even -Once b is even, repeat the process until b = 1 or b = 0, because a^1 = a and a^0 = 1 - -For modular exponentiation, we use the fact that (a*b) % c = ((a%c) * (b%c)) % c -Now apply RULE 1 or 2 as required - -@author chinmoy159 -""" - - -def b_expo(a: int, b: int) -> int: - """ - >>> b_expo(2, 10) - 1024 - >>> b_expo(9, 0) - 1 - >>> b_expo(0, 12) - 0 - >>> b_expo(4, 12) - 16777216 - """ - res = 1 - while b > 0: - if b & 1: - res *= a - - a *= a - b >>= 1 - - return res - - -def b_expo_mod(a: int, b: int, c: int) -> int: - """ - >>> b_expo_mod(2, 10, 1000000007) - 1024 - >>> b_expo_mod(11, 13, 19) - 11 - >>> b_expo_mod(0, 19, 20) - 0 - >>> b_expo_mod(15, 5, 4) - 3 - """ - res = 1 - while b > 0: - if b & 1: - res = ((res % c) * (a % c)) % c - - a *= a - b >>= 1 - - return res From b814cf3781a97c273a779823b8b8ab388417b7b4 Mon Sep 17 00:00:00 2001 From: Kiarash Hajian <133909368+kiarash8112@users.noreply.github.com> Date: Sat, 21 Oct 2023 14:53:34 -0400 Subject: [PATCH 1195/1543] add exponential search algorithm (#10732) * add exponential_search algorithm * replace binary_search with binary_search_recursion * convert left type to int to be useable in binary_search_recursion * add docs and tests for exponential_search algorithm * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * move exponential_search to binary_search.py to pass github auto build tests delete exponential_search.py file * Update searches/binary_search.py Co-authored-by: Christian Clauss * remove additional space searches/binary_search.py Co-authored-by: Christian Clauss * return single data type in exponential_search searches/binary_search.py Co-authored-by: Christian Clauss * add doctest mod searches/binary_search.py Co-authored-by: Christian Clauss * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * use // instread of int() convert searches/binary_search.py Co-authored-by: Christian Clauss * change test according to new code searches/binary_search.py Co-authored-by: Christian Clauss * fix binary_search_recursion multiple type return error * add a timeit benchmark for exponential_search * sort input of binary search to be equal in performance test with exponential_search * raise value error instead of sorting input in binary and exonential search to fix bugs * Update binary_search.py --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss Co-authored-by: user --- searches/binary_search.py | 149 +++++++++++++++++++++++++------------- 1 file changed, 100 insertions(+), 49 deletions(-) diff --git a/searches/binary_search.py b/searches/binary_search.py index 05dadd4fe965..586be39c9a0d 100644 --- a/searches/binary_search.py +++ b/searches/binary_search.py @@ -1,9 +1,9 @@ #!/usr/bin/env python3 """ -This is pure Python implementation of binary search algorithms +Pure Python implementations of binary search algorithms -For doctests run following command: +For doctests run the following command: python3 -m doctest -v binary_search.py For manual testing run: @@ -34,16 +34,12 @@ def bisect_left( Examples: >>> bisect_left([0, 5, 7, 10, 15], 0) 0 - >>> bisect_left([0, 5, 7, 10, 15], 6) 2 - >>> bisect_left([0, 5, 7, 10, 15], 20) 5 - >>> bisect_left([0, 5, 7, 10, 15], 15, 1, 3) 3 - >>> bisect_left([0, 5, 7, 10, 15], 6, 2) 2 """ @@ -79,16 +75,12 @@ def bisect_right( Examples: >>> bisect_right([0, 5, 7, 10, 15], 0) 1 - >>> bisect_right([0, 5, 7, 10, 15], 15) 5 - >>> bisect_right([0, 5, 7, 10, 15], 6) 2 - >>> bisect_right([0, 5, 7, 10, 15], 15, 1, 3) 3 - >>> bisect_right([0, 5, 7, 10, 15], 6, 2) 2 """ @@ -124,7 +116,6 @@ def insort_left( >>> insort_left(sorted_collection, 6) >>> sorted_collection [0, 5, 6, 7, 10, 15] - >>> sorted_collection = [(0, 0), (5, 5), (7, 7), (10, 10), (15, 15)] >>> item = (5, 5) >>> insort_left(sorted_collection, item) @@ -134,12 +125,10 @@ def insort_left( True >>> item is sorted_collection[2] False - >>> sorted_collection = [0, 5, 7, 10, 15] >>> insort_left(sorted_collection, 20) >>> sorted_collection [0, 5, 7, 10, 15, 20] - >>> sorted_collection = [0, 5, 7, 10, 15] >>> insort_left(sorted_collection, 15, 1, 3) >>> sorted_collection @@ -167,7 +156,6 @@ def insort_right( >>> insort_right(sorted_collection, 6) >>> sorted_collection [0, 5, 6, 7, 10, 15] - >>> sorted_collection = [(0, 0), (5, 5), (7, 7), (10, 10), (15, 15)] >>> item = (5, 5) >>> insort_right(sorted_collection, item) @@ -177,12 +165,10 @@ def insort_right( False >>> item is sorted_collection[2] True - >>> sorted_collection = [0, 5, 7, 10, 15] >>> insort_right(sorted_collection, 20) >>> sorted_collection [0, 5, 7, 10, 15, 20] - >>> sorted_collection = [0, 5, 7, 10, 15] >>> insort_right(sorted_collection, 15, 1, 3) >>> sorted_collection @@ -191,29 +177,28 @@ def insort_right( sorted_collection.insert(bisect_right(sorted_collection, item, lo, hi), item) -def binary_search(sorted_collection: list[int], item: int) -> int | None: - """Pure implementation of binary search algorithm in Python +def binary_search(sorted_collection: list[int], item: int) -> int: + """Pure implementation of a binary search algorithm in Python - Be careful collection must be ascending sorted, otherwise result will be + Be careful collection must be ascending sorted otherwise, the result will be unpredictable :param sorted_collection: some ascending sorted collection with comparable items :param item: item value to search - :return: index of found item or None if item is not found + :return: index of the found item or -1 if the item is not found Examples: >>> binary_search([0, 5, 7, 10, 15], 0) 0 - >>> binary_search([0, 5, 7, 10, 15], 15) 4 - >>> binary_search([0, 5, 7, 10, 15], 5) 1 - >>> binary_search([0, 5, 7, 10, 15], 6) - + -1 """ + if list(sorted_collection) != sorted(sorted_collection): + raise ValueError("sorted_collection must be sorted in ascending order") left = 0 right = len(sorted_collection) - 1 @@ -226,66 +211,66 @@ def binary_search(sorted_collection: list[int], item: int) -> int | None: right = midpoint - 1 else: left = midpoint + 1 - return None + return -1 -def binary_search_std_lib(sorted_collection: list[int], item: int) -> int | None: - """Pure implementation of binary search algorithm in Python using stdlib +def binary_search_std_lib(sorted_collection: list[int], item: int) -> int: + """Pure implementation of a binary search algorithm in Python using stdlib - Be careful collection must be ascending sorted, otherwise result will be + Be careful collection must be ascending sorted otherwise, the result will be unpredictable :param sorted_collection: some ascending sorted collection with comparable items :param item: item value to search - :return: index of found item or None if item is not found + :return: index of the found item or -1 if the item is not found Examples: >>> binary_search_std_lib([0, 5, 7, 10, 15], 0) 0 - >>> binary_search_std_lib([0, 5, 7, 10, 15], 15) 4 - >>> binary_search_std_lib([0, 5, 7, 10, 15], 5) 1 - >>> binary_search_std_lib([0, 5, 7, 10, 15], 6) - + -1 """ + if list(sorted_collection) != sorted(sorted_collection): + raise ValueError("sorted_collection must be sorted in ascending order") index = bisect.bisect_left(sorted_collection, item) if index != len(sorted_collection) and sorted_collection[index] == item: return index - return None + return -1 def binary_search_by_recursion( - sorted_collection: list[int], item: int, left: int, right: int -) -> int | None: - """Pure implementation of binary search algorithm in Python by recursion + sorted_collection: list[int], item: int, left: int = 0, right: int = -1 +) -> int: + """Pure implementation of a binary search algorithm in Python by recursion - Be careful collection must be ascending sorted, otherwise result will be + Be careful collection must be ascending sorted otherwise, the result will be unpredictable First recursion should be started with left=0 and right=(len(sorted_collection)-1) :param sorted_collection: some ascending sorted collection with comparable items :param item: item value to search - :return: index of found item or None if item is not found + :return: index of the found item or -1 if the item is not found Examples: >>> binary_search_by_recursion([0, 5, 7, 10, 15], 0, 0, 4) 0 - >>> binary_search_by_recursion([0, 5, 7, 10, 15], 15, 0, 4) 4 - >>> binary_search_by_recursion([0, 5, 7, 10, 15], 5, 0, 4) 1 - >>> binary_search_by_recursion([0, 5, 7, 10, 15], 6, 0, 4) - + -1 """ + if right < 0: + right = len(sorted_collection) - 1 + if list(sorted_collection) != sorted(sorted_collection): + raise ValueError("sorted_collection must be sorted in ascending order") if right < left: - return None + return -1 midpoint = left + (right - left) // 2 @@ -297,12 +282,78 @@ def binary_search_by_recursion( return binary_search_by_recursion(sorted_collection, item, midpoint + 1, right) +def exponential_search(sorted_collection: list[int], item: int) -> int: + """Pure implementation of an exponential search algorithm in Python + Resources used: + https://en.wikipedia.org/wiki/Exponential_search + + Be careful collection must be ascending sorted otherwise, result will be + unpredictable + + :param sorted_collection: some ascending sorted collection with comparable items + :param item: item value to search + :return: index of the found item or -1 if the item is not found + + the order of this algorithm is O(lg I) where I is index position of item if exist + + Examples: + >>> exponential_search([0, 5, 7, 10, 15], 0) + 0 + >>> exponential_search([0, 5, 7, 10, 15], 15) + 4 + >>> exponential_search([0, 5, 7, 10, 15], 5) + 1 + >>> exponential_search([0, 5, 7, 10, 15], 6) + -1 + """ + if list(sorted_collection) != sorted(sorted_collection): + raise ValueError("sorted_collection must be sorted in ascending order") + bound = 1 + while bound < len(sorted_collection) and sorted_collection[bound] < item: + bound *= 2 + left = bound // 2 + right = min(bound, len(sorted_collection) - 1) + last_result = binary_search_by_recursion( + sorted_collection=sorted_collection, item=item, left=left, right=right + ) + if last_result is None: + return -1 + return last_result + + +searches = ( # Fastest to slowest... + binary_search_std_lib, + binary_search, + exponential_search, + binary_search_by_recursion, +) + + if __name__ == "__main__": - user_input = input("Enter numbers separated by comma:\n").strip() + import doctest + import timeit + + doctest.testmod() + for search in searches: + name = f"{search.__name__:>26}" + print(f"{name}: {search([0, 5, 7, 10, 15], 10) = }") # type: ignore[operator] + + print("\nBenchmarks...") + setup = "collection = range(1000)" + for search in searches: + name = search.__name__ + print( + f"{name:>26}:", + timeit.timeit( + f"{name}(collection, 500)", setup=setup, number=5_000, globals=globals() + ), + ) + + user_input = input("\nEnter numbers separated by comma: ").strip() collection = sorted(int(item) for item in user_input.split(",")) - target = int(input("Enter a single number to be found in the list:\n")) - result = binary_search(collection, target) - if result is None: + target = int(input("Enter a single number to be found in the list: ")) + result = binary_search(sorted_collection=collection, item=target) + if result == -1: print(f"{target} was not found in {collection}.") else: - print(f"{target} was found at position {result} in {collection}.") + print(f"{target} was found at position {result} of {collection}.") From 4707fdb0f27bdc1e7442ce5940da335d58885104 Mon Sep 17 00:00:00 2001 From: Saptadeep Banerjee <69459134+imSanko@users.noreply.github.com> Date: Sun, 22 Oct 2023 03:35:37 +0530 Subject: [PATCH 1196/1543] Add tests for Perfect_Number (#10745) * Added new tests! * [ADD]: Inproved Tests * fixed * Removed spaces * Changed the file name * Added Changes * changed the code and kept the test cases * changed the code and kept the test cases * missed the line * removed spaces * Update power_using_recursion.py * Added new tests in Signum * Few things added * Removed few stuff and added few changes * Fixed few things * Reverted the function * Update maths/signum.py Co-authored-by: Christian Clauss * Added few things * Update maths/signum.py Co-authored-by: Christian Clauss * Added the type hint back * Update signum.py * Added NEW tests for Perfect_Number * Update maths/special_numbers/perfect_number.py Co-authored-by: Christian Clauss * Added the line back * Update maths/special_numbers/perfect_number.py Co-authored-by: Christian Clauss * Fixed a space * Updated * Reverted changes * Added the old code and FIXED few LINES * Fixed few things * Changed Test CASES * Update perfect_number.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: Christian Clauss Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- maths/perfect_number.py | 77 +++++++++++++++++++++++++ maths/special_numbers/perfect_number.py | 29 ++++++++-- 2 files changed, 102 insertions(+), 4 deletions(-) create mode 100644 maths/perfect_number.py diff --git a/maths/perfect_number.py b/maths/perfect_number.py new file mode 100644 index 000000000000..df6b6e3d91d8 --- /dev/null +++ b/maths/perfect_number.py @@ -0,0 +1,77 @@ +""" +== Perfect Number == +In number theory, a perfect number is a positive integer that is equal to the sum of +its positive divisors, excluding the number itself. +For example: 6 ==> divisors[1, 2, 3, 6] + Excluding 6, the sum(divisors) is 1 + 2 + 3 = 6 + So, 6 is a Perfect Number + +Other examples of Perfect Numbers: 28, 486, ... + +https://en.wikipedia.org/wiki/Perfect_number +""" + + +def perfect(number: int) -> bool: + """ + Check if a number is a perfect number. + + A perfect number is a positive integer that is equal to the sum of its proper + divisors (excluding itself). + + Args: + number: The number to be checked. + + Returns: + True if the number is a perfect number otherwise, False. + Start from 1 because dividing by 0 will raise ZeroDivisionError. + A number at most can be divisible by the half of the number except the number + itself. For example, 6 is at most can be divisible by 3 except by 6 itself. + Examples: + >>> perfect(27) + False + >>> perfect(28) + True + >>> perfect(29) + False + >>> perfect(6) + True + >>> perfect(12) + False + >>> perfect(496) + True + >>> perfect(8128) + True + >>> perfect(0) + False + >>> perfect(-1) + False + >>> perfect(12.34) + Traceback (most recent call last): + ... + ValueError: number must an integer + >>> perfect("Hello") + Traceback (most recent call last): + ... + ValueError: number must an integer + """ + if not isinstance(number, int): + raise ValueError("number must an integer") + if number <= 0: + return False + return sum(i for i in range(1, number // 2 + 1) if number % i == 0) == number + + +if __name__ == "__main__": + from doctest import testmod + + testmod() + print("Program to check whether a number is a Perfect number or not...") + try: + number = int(input("Enter a positive integer: ").strip()) + except ValueError: + msg = "number must an integer" + print(msg) + raise ValueError(msg) + + print(f"{number} is {'' if perfect(number) else 'not '}a Perfect Number.") diff --git a/maths/special_numbers/perfect_number.py b/maths/special_numbers/perfect_number.py index 148e988fb4c5..160ab2d967ad 100644 --- a/maths/special_numbers/perfect_number.py +++ b/maths/special_numbers/perfect_number.py @@ -14,16 +14,37 @@ def perfect(number: int) -> bool: """ + Check if a number is a perfect number. + + A perfect number is a positive integer that is equal to the sum of its proper + divisors (excluding itself). + + Args: + number: The number to be checked. + + Returns: + True if the number is a perfect number, False otherwise. + + Examples: >>> perfect(27) False >>> perfect(28) True >>> perfect(29) False - - Start from 1 because dividing by 0 will raise ZeroDivisionError. - A number at most can be divisible by the half of the number except the number - itself. For example, 6 is at most can be divisible by 3 except by 6 itself. + >>> perfect(6) + True + >>> perfect(12) + False + >>> perfect(496) + True + >>> perfect(8128) + True + >>> perfect(0) + >>> perfect(-3) + >>> perfect(12.34) + >>> perfect("day") + >>> perfect(["call"]) """ return sum(i for i in range(1, number // 2 + 1) if number % i == 0) == number From d9562033f6b15c17e0b48181c087731751abd7a6 Mon Sep 17 00:00:00 2001 From: Barun Parua <76466796+Baron105@users.noreply.github.com> Date: Sun, 22 Oct 2023 04:03:50 +0530 Subject: [PATCH 1197/1543] added a function to calculate perceived frequency by observer using Doppler Effect (#10776) * avg and mps speed formulae added * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * avg and mps speed formulae added * fixed_spacing * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * spacing * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * ws * added amicable numbers * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * added amicable numbers * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * spacing * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * removed * changed name of file and added code improvements * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * issues fixed due to pi * requested changes added * Created doppler_effect_of_sound.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Updated doppler_effect_of_sound.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * added desc names * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fixed spacing * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * renamed doppler_effect_of_sound.py to doppler_frequency.py * used expection handling rather than print statements * fixed spacing for ruff * Update doppler_frequency.py This is super slick! Well done. --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- maths/special_numbers/perfect_number.py | 32 +++++- .../{perceptron.py => perceptron.py.DISABLED} | 0 physics/doppler_frequency.py | 104 ++++++++++++++++++ 3 files changed, 132 insertions(+), 4 deletions(-) rename neural_network/{perceptron.py => perceptron.py.DISABLED} (100%) create mode 100644 physics/doppler_frequency.py diff --git a/maths/special_numbers/perfect_number.py b/maths/special_numbers/perfect_number.py index 160ab2d967ad..a022dc677638 100644 --- a/maths/special_numbers/perfect_number.py +++ b/maths/special_numbers/perfect_number.py @@ -25,6 +25,10 @@ def perfect(number: int) -> bool: Returns: True if the number is a perfect number, False otherwise. + Start from 1 because dividing by 0 will raise ZeroDivisionError. + A number at most can be divisible by the half of the number except the number + itself. For example, 6 is at most can be divisible by 3 except by 6 itself. + Examples: >>> perfect(27) False @@ -41,15 +45,35 @@ def perfect(number: int) -> bool: >>> perfect(8128) True >>> perfect(0) - >>> perfect(-3) + False + >>> perfect(-1) + False >>> perfect(12.34) - >>> perfect("day") - >>> perfect(["call"]) + Traceback (most recent call last): + ... + ValueError: number must be an integer + >>> perfect("Hello") + Traceback (most recent call last): + ... + ValueError: number must be an integer """ + if not isinstance(number, int): + raise ValueError("number must be an integer") + if number <= 0: + return False return sum(i for i in range(1, number // 2 + 1) if number % i == 0) == number if __name__ == "__main__": + from doctest import testmod + + testmod() print("Program to check whether a number is a Perfect number or not...") - number = int(input("Enter number: ").strip()) + try: + number = int(input("Enter a positive integer: ").strip()) + except ValueError: + msg = "number must be an integer" + print(msg) + raise ValueError(msg) + print(f"{number} is {'' if perfect(number) else 'not '}a Perfect Number.") diff --git a/neural_network/perceptron.py b/neural_network/perceptron.py.DISABLED similarity index 100% rename from neural_network/perceptron.py rename to neural_network/perceptron.py.DISABLED diff --git a/physics/doppler_frequency.py b/physics/doppler_frequency.py new file mode 100644 index 000000000000..2a761c72d9b8 --- /dev/null +++ b/physics/doppler_frequency.py @@ -0,0 +1,104 @@ +""" +Doppler's effect + +The Doppler effect (also Doppler shift) is the change in the frequency of a wave in +relation to an observer who is moving relative to the source of the wave. The Doppler +effect is named after the physicist Christian Doppler. A common example of Doppler +shift is the change of pitch heard when a vehicle sounding a horn approaches and +recedes from an observer. + +The reason for the Doppler effect is that when the source of the waves is moving +towards the observer, each successive wave crest is emitted from a position closer to +the observer than the crest of the previous wave. Therefore, each wave takes slightly +less time to reach the observer than the previous wave. Hence, the time between the +arrivals of successive wave crests at the observer is reduced, causing an increase in +the frequency. Similarly, if the source of waves is moving away from the observer, +each wave is emitted from a position farther from the observer than the previous wave, +so the arrival time between successive waves is increased, reducing the frequency. + +If the source of waves is stationary but the observer is moving with respect to the +source, the transmission velocity of the waves changes (ie the rate at which the +observer receives waves) even if the wavelength and frequency emitted from the source +remain constant. + +These results are all summarized by the Doppler formula: + + f = (f0 * (v + v0)) / (v - vs) + +where: + f: frequency of the wave + f0: frequency of the wave when the source is stationary + v: velocity of the wave in the medium + v0: velocity of the observer, positive if the observer is moving towards the source + vs: velocity of the source, positive if the source is moving towards the observer + +Doppler's effect has many applications in physics and engineering, such as radar, +astronomy, medical imaging, and seismology. + +References: +https://en.wikipedia.org/wiki/Doppler_effect + +Now, we will implement a function that calculates the frequency of a wave as a function +of the frequency of the wave when the source is stationary, the velocity of the wave +in the medium, the velocity of the observer and the velocity of the source. +""" + + +def doppler_effect( + org_freq: float, wave_vel: float, obs_vel: float, src_vel: float +) -> float: + """ + Input Parameters: + ----------------- + org_freq: frequency of the wave when the source is stationary + wave_vel: velocity of the wave in the medium + obs_vel: velocity of the observer, +ve if the observer is moving towards the source + src_vel: velocity of the source, +ve if the source is moving towards the observer + + Returns: + -------- + f: frequency of the wave as perceived by the observer + + Docstring Tests: + >>> doppler_effect(100, 330, 10, 0) # observer moving towards the source + 103.03030303030303 + >>> doppler_effect(100, 330, -10, 0) # observer moving away from the source + 96.96969696969697 + >>> doppler_effect(100, 330, 0, 10) # source moving towards the observer + 103.125 + >>> doppler_effect(100, 330, 0, -10) # source moving away from the observer + 97.05882352941177 + >>> doppler_effect(100, 330, 10, 10) # source & observer moving towards each other + 106.25 + >>> doppler_effect(100, 330, -10, -10) # source and observer moving away + 94.11764705882354 + >>> doppler_effect(100, 330, 10, 330) # source moving at same speed as the wave + Traceback (most recent call last): + ... + ZeroDivisionError: Division by zero implies vs=v and observer in front of the source + >>> doppler_effect(100, 330, 10, 340) # source moving faster than the wave + Traceback (most recent call last): + ... + ValueError: Non-positive frequency implies vs>v or v0>v (in the opposite direction) + >>> doppler_effect(100, 330, -340, 10) # observer moving faster than the wave + Traceback (most recent call last): + ... + ValueError: Non-positive frequency implies vs>v or v0>v (in the opposite direction) + """ + + if wave_vel == src_vel: + raise ZeroDivisionError( + "Division by zero implies vs=v and observer in front of the source" + ) + doppler_freq = (org_freq * (wave_vel + obs_vel)) / (wave_vel - src_vel) + if doppler_freq <= 0: + raise ValueError( + "Non-positive frequency implies vs>v or v0>v (in the opposite direction)" + ) + return doppler_freq + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From c92e86bd7950b443fe39ccb19b587df44feaa068 Mon Sep 17 00:00:00 2001 From: "Precious C. Jacob" <72174492+PreciousJac0b@users.noreply.github.com> Date: Sun, 22 Oct 2023 00:33:49 +0100 Subject: [PATCH 1198/1543] Add tests to data_structures/linked_list/swap_nodes.py (#10751) * Added doctests to the swap_nodes file under linkedlist data structure * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Added doctests to the swap_nodes file under linkedlist data structure * Added doctests to the swap_nodes file under linkedlist data structure * Added doctests to the swap_nodes file under linkedlist data structure * Update swap_nodes.py --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- data_structures/linked_list/swap_nodes.py | 152 +++++++++++++++------- 1 file changed, 108 insertions(+), 44 deletions(-) diff --git a/data_structures/linked_list/swap_nodes.py b/data_structures/linked_list/swap_nodes.py index 31dcb02bfa9a..d66512087d2d 100644 --- a/data_structures/linked_list/swap_nodes.py +++ b/data_structures/linked_list/swap_nodes.py @@ -1,49 +1,73 @@ +from __future__ import annotations + +from collections.abc import Iterator +from dataclasses import dataclass from typing import Any +@dataclass class Node: - def __init__(self, data: Any) -> None: - """ - Initialize a new Node with the given data. - - Args: - data: The data to be stored in the node. - - """ - self.data = data - self.next: Node | None = None # Reference to the next node + data: Any + next_node: Node | None = None +@dataclass class LinkedList: - def __init__(self) -> None: + head: Node | None = None + + def __iter__(self) -> Iterator: """ - Initialize an empty Linked List. + >>> linked_list = LinkedList() + >>> list(linked_list) + [] + >>> linked_list.push(0) + >>> tuple(linked_list) + (0,) """ - self.head: Node | None = None # Reference to the head (first node) + node = self.head + while node: + yield node.data + node = node.next_node - def print_list(self): + def __len__(self) -> int: """ - Print the elements of the Linked List in order. + >>> linked_list = LinkedList() + >>> len(linked_list) + 0 + >>> linked_list.push(0) + >>> len(linked_list) + 1 """ - temp = self.head - while temp is not None: - print(temp.data, end=" ") - temp = temp.next - print() + return sum(1 for _ in self) def push(self, new_data: Any) -> None: """ Add a new node with the given data to the beginning of the Linked List. + Args: new_data (Any): The data to be added to the new node. + + Returns: + None + + Examples: + >>> linked_list = LinkedList() + >>> linked_list.push(5) + >>> linked_list.push(4) + >>> linked_list.push(3) + >>> linked_list.push(2) + >>> linked_list.push(1) + >>> list(linked_list) + [1, 2, 3, 4, 5] """ new_node = Node(new_data) - new_node.next = self.head + new_node.next_node = self.head self.head = new_node - def swap_nodes(self, node_data_1, node_data_2) -> None: + def swap_nodes(self, node_data_1: Any, node_data_2: Any) -> None: """ Swap the positions of two nodes in the Linked List based on their data values. + Args: node_data_1: Data value of the first node to be swapped. node_data_2: Data value of the second node to be swapped. @@ -51,34 +75,74 @@ def swap_nodes(self, node_data_1, node_data_2) -> None: Note: If either of the specified data values isn't found then, no swapping occurs. + + Examples: + When both values are present in a linked list. + >>> linked_list = LinkedList() + >>> linked_list.push(5) + >>> linked_list.push(4) + >>> linked_list.push(3) + >>> linked_list.push(2) + >>> linked_list.push(1) + >>> list(linked_list) + [1, 2, 3, 4, 5] + >>> linked_list.swap_nodes(1, 5) + >>> tuple(linked_list) + (5, 2, 3, 4, 1) + + When one value is present and the other isn't in the linked list. + >>> second_list = LinkedList() + >>> second_list.push(6) + >>> second_list.push(7) + >>> second_list.push(8) + >>> second_list.push(9) + >>> second_list.swap_nodes(1, 6) is None + True + + When both values are absent in the linked list. + >>> second_list = LinkedList() + >>> second_list.push(10) + >>> second_list.push(9) + >>> second_list.push(8) + >>> second_list.push(7) + >>> second_list.swap_nodes(1, 3) is None + True + + When linkedlist is empty. + >>> second_list = LinkedList() + >>> second_list.swap_nodes(1, 3) is None + True + + Returns: + None """ if node_data_1 == node_data_2: return - else: - node_1 = self.head - while node_1 is not None and node_1.data != node_data_1: - node_1 = node_1.next - - node_2 = self.head - while node_2 is not None and node_2.data != node_data_2: - node_2 = node_2.next - - if node_1 is None or node_2 is None: - return - # Swap the data values of the two nodes - node_1.data, node_2.data = node_2.data, node_1.data + node_1 = self.head + while node_1 and node_1.data != node_data_1: + node_1 = node_1.next_node + node_2 = self.head + while node_2 and node_2.data != node_data_2: + node_2 = node_2.next_node + if node_1 is None or node_2 is None: + return + # Swap the data values of the two nodes + node_1.data, node_2.data = node_2.data, node_1.data if __name__ == "__main__": - ll = LinkedList() - for i in range(5, 0, -1): - ll.push(i) + """ + Python script that outputs the swap of nodes in a linked list. + """ + from doctest import testmod - print("Original Linked List:") - ll.print_list() - - ll.swap_nodes(1, 4) - print("After swapping the nodes whose data is 1 and 4:") + testmod() + linked_list = LinkedList() + for i in range(5, 0, -1): + linked_list.push(i) - ll.print_list() + print(f"Original Linked List: {list(linked_list)}") + linked_list.swap_nodes(1, 4) + print(f"Modified Linked List: {list(linked_list)}") + print("After swapping the nodes whose data is 1 and 4.") From d73a4c2ee035698de437086230985574766f195b Mon Sep 17 00:00:00 2001 From: santiditomas <72716997+santiditomas@users.noreply.github.com> Date: Sat, 21 Oct 2023 20:59:41 -0300 Subject: [PATCH 1199/1543] adding new physics algorithm: center of mass (#10743) * adding new physics algorithm: center of mass * Add changes requested by the reviewer * Add changes requested by the reviewer * Update center_of_mass.py * Update center_of_mass.py --------- Co-authored-by: Christian Clauss --- physics/center_of_mass.py | 109 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 109 insertions(+) create mode 100644 physics/center_of_mass.py diff --git a/physics/center_of_mass.py b/physics/center_of_mass.py new file mode 100644 index 000000000000..bd9ba2480584 --- /dev/null +++ b/physics/center_of_mass.py @@ -0,0 +1,109 @@ +""" +Calculating the center of mass for a discrete system of particles, given their +positions and masses. + +Description: + +In physics, the center of mass of a distribution of mass in space (sometimes referred +to as the barycenter or balance point) is the unique point at any given time where the +weighted relative position of the distributed mass sums to zero. This is the point to +which a force may be applied to cause a linear acceleration without an angular +acceleration. + +Calculations in mechanics are often simplified when formulated with respect to the +center of mass. It is a hypothetical point where the entire mass of an object may be +assumed to be concentrated to visualize its motion. In other words, the center of mass +is the particle equivalent of a given object for the application of Newton's laws of +motion. + +In the case of a system of particles P_i, i = 1, ..., n , each with mass m_i that are +located in space with coordinates r_i, i = 1, ..., n , the coordinates R of the center +of mass corresponds to: + +R = (Σ(mi * ri) / Σ(mi)) + +Reference: https://en.wikipedia.org/wiki/Center_of_mass +""" +from collections import namedtuple + +Particle = namedtuple("Particle", "x y z mass") # noqa: PYI024 +Coord3D = namedtuple("Coord3D", "x y z") # noqa: PYI024 + + +def center_of_mass(particles: list[Particle]) -> Coord3D: + """ + Input Parameters + ---------------- + particles: list(Particle): + A list of particles where each particle is a tuple with it´s (x, y, z) position and + it´s mass. + + Returns + ------- + Coord3D: + A tuple with the coordinates of the center of mass (Xcm, Ycm, Zcm) rounded to two + decimal places. + + Examples + -------- + >>> center_of_mass([ + ... Particle(1.5, 4, 3.4, 4), + ... Particle(5, 6.8, 7, 8.1), + ... Particle(9.4, 10.1, 11.6, 12) + ... ]) + Coord3D(x=6.61, y=7.98, z=8.69) + + >>> center_of_mass([ + ... Particle(1, 2, 3, 4), + ... Particle(5, 6, 7, 8), + ... Particle(9, 10, 11, 12) + ... ]) + Coord3D(x=6.33, y=7.33, z=8.33) + + >>> center_of_mass([ + ... Particle(1, 2, 3, -4), + ... Particle(5, 6, 7, 8), + ... Particle(9, 10, 11, 12) + ... ]) + Traceback (most recent call last): + ... + ValueError: Mass of all particles must be greater than 0 + + >>> center_of_mass([ + ... Particle(1, 2, 3, 0), + ... Particle(5, 6, 7, 8), + ... Particle(9, 10, 11, 12) + ... ]) + Traceback (most recent call last): + ... + ValueError: Mass of all particles must be greater than 0 + + >>> center_of_mass([]) + Traceback (most recent call last): + ... + ValueError: No particles provided + """ + if not particles: + raise ValueError("No particles provided") + + if any(particle.mass <= 0 for particle in particles): + raise ValueError("Mass of all particles must be greater than 0") + + total_mass = sum(particle.mass for particle in particles) + + center_of_mass_x = round( + sum(particle.x * particle.mass for particle in particles) / total_mass, 2 + ) + center_of_mass_y = round( + sum(particle.y * particle.mass for particle in particles) / total_mass, 2 + ) + center_of_mass_z = round( + sum(particle.z * particle.mass for particle in particles) / total_mass, 2 + ) + return Coord3D(center_of_mass_x, center_of_mass_y, center_of_mass_z) + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From 0601b56173021fe96fb070d20085962b036e85c3 Mon Sep 17 00:00:00 2001 From: gio-puter <103840942+gio-puter@users.noreply.github.com> Date: Sat, 21 Oct 2023 22:42:26 -0700 Subject: [PATCH 1200/1543] Add tests without modifying code (#10740) * Contributes to #9943 Added doctest to largest_of_very_large_numbers.py Added doctest to word_patterns.py Added doctest to onepad_cipher.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Contributes to #9943 Added doctest to maths/largest_of_very_large_numbers.py Added doctest to strings/word_patterns.py Added doctest to ciphers/onepad_cipher.py * Add tests without modifying code #10740 Added test to maths/largest_of_very_large_numbers Added test to strings/word_patterns.py Added test to ciphers/onepad_cipher.py --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- ciphers/onepad_cipher.py | 37 ++++++++++++++++++++++++-- maths/largest_of_very_large_numbers.py | 13 +++++++++ strings/word_patterns.py | 21 +++++++++++++++ 3 files changed, 69 insertions(+), 2 deletions(-) diff --git a/ciphers/onepad_cipher.py b/ciphers/onepad_cipher.py index 4bfe35b7180a..c4fb22e14a06 100644 --- a/ciphers/onepad_cipher.py +++ b/ciphers/onepad_cipher.py @@ -4,7 +4,27 @@ class Onepad: @staticmethod def encrypt(text: str) -> tuple[list[int], list[int]]: - """Function to encrypt text using pseudo-random numbers""" + """ + Function to encrypt text using pseudo-random numbers + >>> Onepad().encrypt("") + ([], []) + >>> Onepad().encrypt([]) + ([], []) + >>> random.seed(1) + >>> Onepad().encrypt(" ") + ([6969], [69]) + >>> random.seed(1) + >>> Onepad().encrypt("Hello") + ([9729, 114756, 4653, 31309, 10492], [69, 292, 33, 131, 61]) + >>> Onepad().encrypt(1) + Traceback (most recent call last): + ... + TypeError: 'int' object is not iterable + >>> Onepad().encrypt(1.1) + Traceback (most recent call last): + ... + TypeError: 'float' object is not iterable + """ plain = [ord(i) for i in text] key = [] cipher = [] @@ -17,7 +37,20 @@ def encrypt(text: str) -> tuple[list[int], list[int]]: @staticmethod def decrypt(cipher: list[int], key: list[int]) -> str: - """Function to decrypt text using pseudo-random numbers.""" + """ + Function to decrypt text using pseudo-random numbers. + >>> Onepad().decrypt([], []) + '' + >>> Onepad().decrypt([35], []) + '' + >>> Onepad().decrypt([], [35]) + Traceback (most recent call last): + ... + IndexError: list index out of range + >>> random.seed(1) + >>> Onepad().decrypt([9729, 114756, 4653, 31309, 10492], [69, 292, 33, 131, 61]) + 'Hello' + """ plain = [] for i in range(len(key)): p = int((cipher[i] - (key[i]) ** 2) / key[i]) diff --git a/maths/largest_of_very_large_numbers.py b/maths/largest_of_very_large_numbers.py index 7e7fea004958..eb5c121fd262 100644 --- a/maths/largest_of_very_large_numbers.py +++ b/maths/largest_of_very_large_numbers.py @@ -4,6 +4,19 @@ def res(x, y): + """ + Reduces large number to a more manageable number + >>> res(5, 7) + 4.892790030352132 + >>> res(0, 5) + 0 + >>> res(3, 0) + 1 + >>> res(-1, 5) + Traceback (most recent call last): + ... + ValueError: math domain error + """ if 0 not in (x, y): # We use the relation x^y = y*log10(x), where 10 is the base. return y * math.log10(x) diff --git a/strings/word_patterns.py b/strings/word_patterns.py index d12d267e7b35..ed603e9fefeb 100644 --- a/strings/word_patterns.py +++ b/strings/word_patterns.py @@ -1,11 +1,32 @@ def get_word_pattern(word: str) -> str: """ + Returns numerical pattern of character appearances in given word + >>> get_word_pattern("") + '' + >>> get_word_pattern(" ") + '0' >>> get_word_pattern("pattern") '0.1.2.2.3.4.5' >>> get_word_pattern("word pattern") '0.1.2.3.4.5.6.7.7.8.2.9' >>> get_word_pattern("get word pattern") '0.1.2.3.4.5.6.7.3.8.9.2.2.1.6.10' + >>> get_word_pattern() + Traceback (most recent call last): + ... + TypeError: get_word_pattern() missing 1 required positional argument: 'word' + >>> get_word_pattern(1) + Traceback (most recent call last): + ... + AttributeError: 'int' object has no attribute 'upper' + >>> get_word_pattern(1.1) + Traceback (most recent call last): + ... + AttributeError: 'float' object has no attribute 'upper' + >>> get_word_pattern([]) + Traceback (most recent call last): + ... + AttributeError: 'list' object has no attribute 'upper' """ word = word.upper() next_num = 0 From 7d0f6e012acb42271652f9a398675305b7e270d2 Mon Sep 17 00:00:00 2001 From: Kento <75509362+nkstonks@users.noreply.github.com> Date: Sun, 22 Oct 2023 20:08:08 +1100 Subject: [PATCH 1201/1543] Updated doctests for nor_gate (#10791) * added other possible cases * added test for correct output of truth table * updating DIRECTORY.md * Update nor_gate.py --------- Co-authored-by: = <=> Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> Co-authored-by: Christian Clauss --- DIRECTORY.md | 6 ++-- boolean_algebra/nor_gate.py | 55 +++++++++++++++++++++++++------------ 2 files changed, 41 insertions(+), 20 deletions(-) diff --git a/DIRECTORY.md b/DIRECTORY.md index 9e0166ad80c5..c37c4f99ba88 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -541,7 +541,7 @@ * [Dimensionality Reduction](machine_learning/dimensionality_reduction.py) * Forecasting * [Run](machine_learning/forecasting/run.py) - * [Frequent Pattern Growth Algorithm](machine_learning/frequent_pattern_growth.py) + * [Frequent Pattern Growth](machine_learning/frequent_pattern_growth.py) * [Gradient Descent](machine_learning/gradient_descent.py) * [K Means Clust](machine_learning/k_means_clust.py) * [K Nearest Neighbours](machine_learning/k_nearest_neighbours.py) @@ -649,6 +649,7 @@ * [Numerical Integration](maths/numerical_integration.py) * [Odd Sieve](maths/odd_sieve.py) * [Perfect Cube](maths/perfect_cube.py) + * [Perfect Number](maths/perfect_number.py) * [Perfect Square](maths/perfect_square.py) * [Persistence](maths/persistence.py) * [Pi Generator](maths/pi_generator.py) @@ -767,7 +768,6 @@ * [Swish](neural_network/activation_functions/swish.py) * [Back Propagation Neural Network](neural_network/back_propagation_neural_network.py) * [Convolution Neural Network](neural_network/convolution_neural_network.py) - * [Perceptron](neural_network/perceptron.py) * [Simple Neural Network](neural_network/simple_neural_network.py) ## Other @@ -803,8 +803,10 @@ * [Archimedes Principle Of Buoyant Force](physics/archimedes_principle_of_buoyant_force.py) * [Basic Orbital Capture](physics/basic_orbital_capture.py) * [Casimir Effect](physics/casimir_effect.py) + * [Center Of Mass](physics/center_of_mass.py) * [Centripetal Force](physics/centripetal_force.py) * [Coulombs Law](physics/coulombs_law.py) + * [Doppler Frequency](physics/doppler_frequency.py) * [Grahams Law](physics/grahams_law.py) * [Horizontal Projectile Motion](physics/horizontal_projectile_motion.py) * [Hubble Parameter](physics/hubble_parameter.py) diff --git a/boolean_algebra/nor_gate.py b/boolean_algebra/nor_gate.py index 2c27b80afdbe..0c8ab1c0af61 100644 --- a/boolean_algebra/nor_gate.py +++ b/boolean_algebra/nor_gate.py @@ -1,15 +1,18 @@ """ -A NOR Gate is a logic gate in boolean algebra which results to false(0) -if any of the input is 1, and True(1) if both the inputs are 0. +A NOR Gate is a logic gate in boolean algebra which results in false(0) if any of the +inputs is 1, and True(1) if all inputs are 0. Following is the truth table of a NOR Gate: - | Input 1 | Input 2 | Output | - | 0 | 0 | 1 | - | 0 | 1 | 0 | - | 1 | 0 | 0 | - | 1 | 1 | 0 | + Truth Table of NOR Gate: + | Input 1 | Input 2 | Output | + | 0 | 0 | 1 | + | 0 | 1 | 0 | + | 1 | 0 | 0 | + | 1 | 1 | 0 | -Following is the code implementation of the NOR Gate + Code provided by Akshaj Vishwanathan +https://www.geeksforgeeks.org/logic-gates-in-python """ +from collections.abc import Callable def nor_gate(input_1: int, input_2: int) -> int: @@ -30,19 +33,35 @@ def nor_gate(input_1: int, input_2: int) -> int: return int(input_1 == input_2 == 0) -def main() -> None: - print("Truth Table of NOR Gate:") - print("| Input 1 | Input 2 | Output |") - print(f"| 0 | 0 | {nor_gate(0, 0)} |") - print(f"| 0 | 1 | {nor_gate(0, 1)} |") - print(f"| 1 | 0 | {nor_gate(1, 0)} |") - print(f"| 1 | 1 | {nor_gate(1, 1)} |") +def truth_table(func: Callable) -> str: + """ + >>> print(truth_table(nor_gate)) + Truth Table of NOR Gate: + | Input 1 | Input 2 | Output | + | 0 | 0 | 1 | + | 0 | 1 | 0 | + | 1 | 0 | 0 | + | 1 | 1 | 0 | + """ + + def make_table_row(items: list | tuple) -> str: + """ + >>> make_table_row(("One", "Two", "Three")) + '| One | Two | Three |' + """ + return f"| {' | '.join(f'{item:^8}' for item in items)} |" + + return "\n".join( + ( + "Truth Table of NOR Gate:", + make_table_row(("Input 1", "Input 2", "Output")), + *[make_table_row((i, j, func(i, j))) for i in (0, 1) for j in (0, 1)], + ) + ) if __name__ == "__main__": import doctest doctest.testmod() - main() -"""Code provided by Akshaj Vishwanathan""" -"""Reference: https://www.geeksforgeeks.org/logic-gates-in-python/""" + print(truth_table(nor_gate)) From 6c8743f1e62c785e58a45f785b380f27693aadf9 Mon Sep 17 00:00:00 2001 From: Jeel Gajera <83470656+JeelGajera@users.noreply.github.com> Date: Sun, 22 Oct 2023 19:21:30 +0530 Subject: [PATCH 1202/1543] Add: Time Conversion Function (#10749) * Add: Time Conversion Function * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update conversions/time_conversions.py Co-authored-by: Christian Clauss * fix: required changes * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fix: err * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update time_conversions.py --------- Co-authored-by: Jeel Gajera Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- DIRECTORY.md | 1 + conversions/time_conversions.py | 86 +++++++++++++++++++++++++++++++++ 2 files changed, 87 insertions(+) create mode 100644 conversions/time_conversions.py diff --git a/DIRECTORY.md b/DIRECTORY.md index c37c4f99ba88..f45102ae1ce1 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -174,6 +174,7 @@ * [Roman Numerals](conversions/roman_numerals.py) * [Speed Conversions](conversions/speed_conversions.py) * [Temperature Conversions](conversions/temperature_conversions.py) + * [Time Conversions](conversions/time_conversions.py) * [Volume Conversions](conversions/volume_conversions.py) * [Weight Conversion](conversions/weight_conversion.py) diff --git a/conversions/time_conversions.py b/conversions/time_conversions.py new file mode 100644 index 000000000000..8c30f5bc4a45 --- /dev/null +++ b/conversions/time_conversions.py @@ -0,0 +1,86 @@ +""" +A unit of time is any particular time interval, used as a standard way of measuring or +expressing duration. The base unit of time in the International System of Units (SI), +and by extension most of the Western world, is the second, defined as about 9 billion +oscillations of the caesium atom. + +https://en.wikipedia.org/wiki/Unit_of_time +""" + +time_chart: dict[str, float] = { + "seconds": 1.0, + "minutes": 60.0, # 1 minute = 60 sec + "hours": 3600.0, # 1 hour = 60 minutes = 3600 seconds + "days": 86400.0, # 1 day = 24 hours = 1440 min = 86400 sec + "weeks": 604800.0, # 1 week=7d=168hr=10080min = 604800 sec + "months": 2629800.0, # Approximate value for a month in seconds + "years": 31557600.0, # Approximate value for a year in seconds +} + +time_chart_inverse: dict[str, float] = { + key: 1 / value for key, value in time_chart.items() +} + + +def convert_time(time_value: float, unit_from: str, unit_to: str) -> float: + """ + Convert time from one unit to another using the time_chart above. + + >>> convert_time(3600, "seconds", "hours") + 1.0 + >>> convert_time(3500, "Seconds", "Hours") + 0.972 + >>> convert_time(1, "DaYs", "hours") + 24.0 + >>> convert_time(120, "minutes", "SeCoNdS") + 7200.0 + >>> convert_time(2, "WEEKS", "days") + 14.0 + >>> convert_time(0.5, "hours", "MINUTES") + 30.0 + >>> convert_time(-3600, "seconds", "hours") + Traceback (most recent call last): + ... + ValueError: 'time_value' must be a non-negative number. + >>> convert_time("Hello", "hours", "minutes") + Traceback (most recent call last): + ... + ValueError: 'time_value' must be a non-negative number. + >>> convert_time([0, 1, 2], "weeks", "days") + Traceback (most recent call last): + ... + ValueError: 'time_value' must be a non-negative number. + >>> convert_time(1, "cool", "century") # doctest: +ELLIPSIS + Traceback (most recent call last): + ... + ValueError: Invalid unit cool is not in seconds, minutes, hours, days, weeks, ... + >>> convert_time(1, "seconds", "hot") # doctest: +ELLIPSIS + Traceback (most recent call last): + ... + ValueError: Invalid unit hot is not in seconds, minutes, hours, days, weeks, ... + """ + if not isinstance(time_value, (int, float)) or time_value < 0: + msg = "'time_value' must be a non-negative number." + raise ValueError(msg) + + unit_from = unit_from.lower() + unit_to = unit_to.lower() + if unit_from not in time_chart or unit_to not in time_chart: + invalid_unit = unit_from if unit_from not in time_chart else unit_to + msg = f"Invalid unit {invalid_unit} is not in {', '.join(time_chart)}." + raise ValueError(msg) + + return round( + time_value * time_chart[unit_from] * time_chart_inverse[unit_to], + 3, + ) + + +if __name__ == "__main__": + import doctest + + doctest.testmod() + print(f"{convert_time(3600,'seconds', 'hours') = :,}") + print(f"{convert_time(360, 'days', 'months') = :,}") + print(f"{convert_time(360, 'months', 'years') = :,}") + print(f"{convert_time(1, 'years', 'seconds') = :,}") From a8b94abc8b9131e260a5281f4c95a0d4f2d03325 Mon Sep 17 00:00:00 2001 From: Hardik Pawar <97388607+Hardvan@users.noreply.github.com> Date: Mon, 23 Oct 2023 00:21:56 +0530 Subject: [PATCH 1203/1543] Enhance readability of N Queens (#9265) * Enhance readability of N Queens * Simplify is_safe code * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Apply suggestions from code review --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Tianyi Zheng --- backtracking/n_queens.py | 53 ++++++++++++++++++++-------------------- 1 file changed, 26 insertions(+), 27 deletions(-) diff --git a/backtracking/n_queens.py b/backtracking/n_queens.py index bbf0ce44f91c..0f237d95e7c8 100644 --- a/backtracking/n_queens.py +++ b/backtracking/n_queens.py @@ -17,40 +17,39 @@ def is_safe(board: list[list[int]], row: int, column: int) -> bool: This function returns a boolean value True if it is safe to place a queen there considering the current state of the board. - Parameters : - board(2D matrix) : board - row ,column : coordinates of the cell on a board + Parameters: + board (2D matrix): The chessboard + row, column: Coordinates of the cell on the board - Returns : + Returns: Boolean Value """ - for i in range(len(board)): - if board[row][i] == 1: - return False - for i in range(len(board)): - if board[i][column] == 1: - return False - for i, j in zip(range(row, -1, -1), range(column, -1, -1)): - if board[i][j] == 1: - return False - for i, j in zip(range(row, -1, -1), range(column, len(board))): - if board[i][j] == 1: - return False - return True + + n = len(board) # Size of the board + + # Check if there is any queen in the same row, column, + # left upper diagonal, and right upper diagonal + return ( + all(board[i][j] != 1 for i, j in zip(range(row, -1, -1), range(column, n))) + and all( + board[i][j] != 1 for i, j in zip(range(row, -1, -1), range(column, -1, -1)) + ) + and all(board[i][j] != 1 for i, j in zip(range(row, n), range(column, n))) + and all(board[i][j] != 1 for i, j in zip(range(row, n), range(column, -1, -1))) + ) def solve(board: list[list[int]], row: int) -> bool: """ - It creates a state space tree and calls the safe function until it receives a - False Boolean and terminates that branch and backtracks to the next + This function creates a state space tree and calls the safe function until it + receives a False Boolean and terminates that branch and backtracks to the next possible solution branch. """ if row >= len(board): """ - If the row number exceeds N we have board with a successful combination + If the row number exceeds N, we have a board with a successful combination and that combination is appended to the solution list and the board is printed. - """ solution.append(board) printboard(board) @@ -58,9 +57,9 @@ def solve(board: list[list[int]], row: int) -> bool: return True for i in range(len(board)): """ - For every row it iterates through each column to check if it is feasible to + For every row, it iterates through each column to check if it is feasible to place a queen there. - If all the combinations for that particular branch are successful the board is + If all the combinations for that particular branch are successful, the board is reinitialized for the next possible combination. """ if is_safe(board, row, i): @@ -77,14 +76,14 @@ def printboard(board: list[list[int]]) -> None: for i in range(len(board)): for j in range(len(board)): if board[i][j] == 1: - print("Q", end=" ") + print("Q", end=" ") # Queen is present else: - print(".", end=" ") + print(".", end=" ") # Empty cell print() -# n=int(input("The no. of queens")) +# Number of queens (e.g., n=8 for an 8x8 board) n = 8 board = [[0 for i in range(n)] for j in range(n)] solve(board, 0) -print("The total no. of solutions are :", len(solution)) +print("The total number of solutions are:", len(solution)) From fdb0635c71318da758fafcda80154d03dbbd5c5a Mon Sep 17 00:00:00 2001 From: Anshu Sharma <142900182+AnshuSharma111@users.noreply.github.com> Date: Mon, 23 Oct 2023 03:09:31 +0530 Subject: [PATCH 1204/1543] added doctest to playfair_cipher.py (#10823) * added doctest to playfair_cipher.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Added newline to EOF andremoved trailing whitespace * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Apply suggestions from code review --------- Co-authored-by: Keyboard-1 <142900182+Keyboard-1@users.noreply.github.com> Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Tianyi Zheng --- ciphers/playfair_cipher.py | 59 ++++++++++++++++++++++++++++++++++++-- 1 file changed, 57 insertions(+), 2 deletions(-) diff --git a/ciphers/playfair_cipher.py b/ciphers/playfair_cipher.py index 7279fb23ecb2..86b45bc4fb6a 100644 --- a/ciphers/playfair_cipher.py +++ b/ciphers/playfair_cipher.py @@ -1,3 +1,24 @@ +""" +https://en.wikipedia.org/wiki/Playfair_cipher#Description + +The Playfair cipher was developed by Charles Wheatstone in 1854 +It's use was heavily promotedby Lord Playfair, hence its name + +Some features of the Playfair cipher are: + +1) It was the first literal diagram substitution cipher +2) It is a manual symmetric encryption technique +3) It is a multiple letter encryption cipher + +The implementation in the code below encodes alphabets only. +It removes spaces, special characters and numbers from the +code. + +Playfair is no longer used by military forces because of known +insecurities and of the advent of automated encryption devices. +This cipher is regarded as insecure since before World War I. +""" + import itertools import string from collections.abc import Generator, Iterable @@ -60,11 +81,26 @@ def generate_table(key: str) -> list[str]: def encode(plaintext: str, key: str) -> str: + """ + Encode the given plaintext using the Playfair cipher. + Takes the plaintext and the key as input and returns the encoded string. + + >>> encode("Hello", "MONARCHY") + 'CFSUPM' + >>> encode("attack on the left flank", "EMERGENCY") + 'DQZSBYFSDZFMFNLOHFDRSG' + >>> encode("Sorry!", "SPECIAL") + 'AVXETX' + >>> encode("Number 1", "NUMBER") + 'UMBENF' + >>> encode("Photosynthesis!", "THE SUN") + 'OEMHQHVCHESUKE' + """ + table = generate_table(key) plaintext = prepare_input(plaintext) ciphertext = "" - # https://en.wikipedia.org/wiki/Playfair_cipher#Description for char1, char2 in chunker(plaintext, 2): row1, col1 = divmod(table.index(char1), 5) row2, col2 = divmod(table.index(char2), 5) @@ -83,10 +119,20 @@ def encode(plaintext: str, key: str) -> str: def decode(ciphertext: str, key: str) -> str: + """ + Decode the input string using the provided key. + + >>> decode("BMZFAZRZDH", "HAZARD") + 'FIREHAZARD' + >>> decode("HNBWBPQT", "AUTOMOBILE") + 'DRIVINGX' + >>> decode("SLYSSAQS", "CASTLE") + 'ATXTACKX' + """ + table = generate_table(key) plaintext = "" - # https://en.wikipedia.org/wiki/Playfair_cipher#Description for char1, char2 in chunker(ciphertext, 2): row1, col1 = divmod(table.index(char1), 5) row2, col2 = divmod(table.index(char2), 5) @@ -102,3 +148,12 @@ def decode(ciphertext: str, key: str) -> str: plaintext += table[row2 * 5 + col1] return plaintext + + +if __name__ == "__main__": + import doctest + + doctest.testmod() + + print("Encoded:", encode("BYE AND THANKS", "GREETING")) + print("Decoded:", decode("CXRBANRLBALQ", "GREETING")) From abd6bca074e8a846d5e306311845b46f7581012e Mon Sep 17 00:00:00 2001 From: Ankit Avinash <128812932+Void426@users.noreply.github.com> Date: Mon, 23 Oct 2023 10:54:51 +0530 Subject: [PATCH 1205/1543] Added Binary Focal Cross Entropy (#10674) * Added Binary Focal Cross Entropy * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fixed Issue * Fixed Issue * Added BFCE loss to loss_functions.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update machine_learning/loss_functions.py --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Tianyi Zheng --- machine_learning/loss_functions.py | 51 ++++++++++++++++++++++++++++++ 1 file changed, 51 insertions(+) diff --git a/machine_learning/loss_functions.py b/machine_learning/loss_functions.py index 0fa0956ed572..ef34296360e2 100644 --- a/machine_learning/loss_functions.py +++ b/machine_learning/loss_functions.py @@ -39,6 +39,57 @@ def binary_cross_entropy( return np.mean(bce_loss) +def binary_focal_cross_entropy( + y_true: np.ndarray, + y_pred: np.ndarray, + gamma: float = 2.0, + alpha: float = 0.25, + epsilon: float = 1e-15, +) -> float: + """ + Calculate the mean binary focal cross-entropy (BFCE) loss between true labels + and predicted probabilities. + + BFCE loss quantifies dissimilarity between true labels (0 or 1) and predicted + probabilities. It's a variation of binary cross-entropy that addresses class + imbalance by focusing on hard examples. + + BCFE = -Σ(alpha * (1 - y_pred)**gamma * y_true * log(y_pred) + + (1 - alpha) * y_pred**gamma * (1 - y_true) * log(1 - y_pred)) + + Reference: [Lin et al., 2018](https://arxiv.org/pdf/1708.02002.pdf) + + Parameters: + - y_true: True binary labels (0 or 1). + - y_pred: Predicted probabilities for class 1. + - gamma: Focusing parameter for modulating the loss (default: 2.0). + - alpha: Weighting factor for class 1 (default: 0.25). + - epsilon: Small constant to avoid numerical instability. + + >>> true_labels = np.array([0, 1, 1, 0, 1]) + >>> predicted_probs = np.array([0.2, 0.7, 0.9, 0.3, 0.8]) + >>> binary_focal_cross_entropy(true_labels, predicted_probs) + 0.008257977659239775 + >>> true_labels = np.array([0, 1, 1, 0, 1]) + >>> predicted_probs = np.array([0.3, 0.8, 0.9, 0.2]) + >>> binary_focal_cross_entropy(true_labels, predicted_probs) + Traceback (most recent call last): + ... + ValueError: Input arrays must have the same length. + """ + if len(y_true) != len(y_pred): + raise ValueError("Input arrays must have the same length.") + # Clip predicted probabilities to avoid log(0) + y_pred = np.clip(y_pred, epsilon, 1 - epsilon) + + bcfe_loss = -( + alpha * (1 - y_pred) ** gamma * y_true * np.log(y_pred) + + (1 - alpha) * y_pred**gamma * (1 - y_true) * np.log(1 - y_pred) + ) + + return np.mean(bcfe_loss) + + def categorical_cross_entropy( y_true: np.ndarray, y_pred: np.ndarray, epsilon: float = 1e-15 ) -> float: From dc4e89805a642d1c6e3fe031276edbfde3c1f40c Mon Sep 17 00:00:00 2001 From: Suyash Dongre <109069262+Suyashd999@users.noreply.github.com> Date: Mon, 23 Oct 2023 10:57:33 +0530 Subject: [PATCH 1206/1543] Added docstring/documentation for sigmoid_function (#10756) * Added doctest for sigmoid_function * Added doctest for sigmoid_function * Added doctest for sigmoid_function --- machine_learning/logistic_regression.py | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/machine_learning/logistic_regression.py b/machine_learning/logistic_regression.py index 87bc8f6681cc..f9da0104ab4b 100644 --- a/machine_learning/logistic_regression.py +++ b/machine_learning/logistic_regression.py @@ -28,6 +28,21 @@ def sigmoid_function(z): + """ + Also known as Logistic Function. + + 1 + f(x) = ------- + 1 + e⁻ˣ + + The sigmoid function approaches a value of 1 as its input 'x' becomes + increasing positive. Opposite for negative values. + + Reference: https://en.wikipedia.org/wiki/Sigmoid_function + + @param z: input to the function + @returns: returns value in the range 0 to 1 + """ return 1 / (1 + np.exp(-z)) From 68faebe711899bf6072ceedb16ccf1fbdc7d2434 Mon Sep 17 00:00:00 2001 From: Pratik Tripathy <117454569+SilverDragonOfR@users.noreply.github.com> Date: Mon, 23 Oct 2023 11:05:10 +0530 Subject: [PATCH 1207/1543] feat: Add mass energy equivalence in physics and doctests (#10202) * updating DIRECTORY.md * feat: Add mass energy equivalence in physics * updating DIRECTORY.md * updating DIRECTORY.md * Apply suggestions from code review * Update physics/mass_energy_equivalence.py * Update mass_energy_equivalence.py --------- Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> Co-authored-by: Tianyi Zheng --- DIRECTORY.md | 1 + physics/mass_energy_equivalence.py | 77 ++++++++++++++++++++++++++++++ 2 files changed, 78 insertions(+) create mode 100644 physics/mass_energy_equivalence.py diff --git a/DIRECTORY.md b/DIRECTORY.md index f45102ae1ce1..c07e1550d1eb 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -815,6 +815,7 @@ * [Kinetic Energy](physics/kinetic_energy.py) * [Lorentz Transformation Four Vector](physics/lorentz_transformation_four_vector.py) * [Malus Law](physics/malus_law.py) + * [Mass Energy Equivalence](physics/mass_energy_equivalence.py) * [Mirror Formulae](physics/mirror_formulae.py) * [N Body Simulation](physics/n_body_simulation.py) * [Newtons Law Of Gravitation](physics/newtons_law_of_gravitation.py) diff --git a/physics/mass_energy_equivalence.py b/physics/mass_energy_equivalence.py new file mode 100644 index 000000000000..4a4c7890f4e0 --- /dev/null +++ b/physics/mass_energy_equivalence.py @@ -0,0 +1,77 @@ +""" +Title: +Finding the energy equivalence of mass and mass equivalence of energy +by Einstein's equation. + +Description: +Einstein's mass-energy equivalence is a pivotal concept in theoretical physics. +It asserts that energy (E) and mass (m) are directly related by the speed of +light in vacuum (c) squared, as described in the equation E = mc². This means that +mass and energy are interchangeable; a mass increase corresponds to an energy increase, +and vice versa. This principle has profound implications in nuclear reactions, +explaining the release of immense energy from minuscule changes in atomic nuclei. + +Equations: +E = mc² and m = E/c², where m is mass, E is Energy, c is speed of light in vacuum. + +Reference: +https://en.wikipedia.org/wiki/Mass%E2%80%93energy_equivalence +""" + +from scipy.constants import c # speed of light in vacuum (299792458 m/s) + + +def energy_from_mass(mass: float) -> float: + """ + Calculates the Energy equivalence of the Mass using E = mc² + in SI units J from Mass in kg. + + mass (float): Mass of body. + + Usage example: + >>> energy_from_mass(124.56) + 1.11948945063458e+19 + >>> energy_from_mass(320) + 2.8760165719578165e+19 + >>> energy_from_mass(0) + 0.0 + >>> energy_from_mass(-967.9) + Traceback (most recent call last): + ... + ValueError: Mass can't be negative. + + """ + if mass < 0: + raise ValueError("Mass can't be negative.") + return mass * c**2 + + +def mass_from_energy(energy: float) -> float: + """ + Calculates the Mass equivalence of the Energy using m = E/c² + in SI units kg from Energy in J. + + energy (float): Mass of body. + + Usage example: + >>> mass_from_energy(124.56) + 1.3859169098203872e-15 + >>> mass_from_energy(320) + 3.560480179371579e-15 + >>> mass_from_energy(0) + 0.0 + >>> mass_from_energy(-967.9) + Traceback (most recent call last): + ... + ValueError: Energy can't be negative. + + """ + if energy < 0: + raise ValueError("Energy can't be negative.") + return energy / c**2 + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From abc390967d5479ec74bfd384a86cefa5ddbf6d40 Mon Sep 17 00:00:00 2001 From: Paarth Goyal <138299656+pluto-tofu@users.noreply.github.com> Date: Mon, 23 Oct 2023 11:13:30 +0530 Subject: [PATCH 1208/1543] =?UTF-8?q?Added=20the=20algorithm=20to=20comput?= =?UTF-8?q?e=20the=20terminal=20velocity=20of=20an=20object=20fal=E2=80=A6?= =?UTF-8?q?=20(#10237)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * added the algorithm to compute the terminal velocity of an object falling in a fluid * fixed spelling mistake * fixed issues in topic description * imported the value of g from scipy and changed the doctests accordingly * fixed formatting * Apply suggestions from code review --------- Co-authored-by: Tianyi Zheng --- physics/terminal_velocity.py | 60 ++++++++++++++++++++++++++++++++++++ 1 file changed, 60 insertions(+) create mode 100644 physics/terminal_velocity.py diff --git a/physics/terminal_velocity.py b/physics/terminal_velocity.py new file mode 100644 index 000000000000..cec54162e2b4 --- /dev/null +++ b/physics/terminal_velocity.py @@ -0,0 +1,60 @@ +""" +Title : Computing the terminal velocity of an object falling + through a fluid. + +Terminal velocity is defined as the highest velocity attained by an +object falling through a fluid. It is observed when the sum of drag force +and buoyancy is equal to the downward gravity force acting on the +object. The acceleration of the object is zero as the net force acting on +the object is zero. + +Vt = ((2 * m * g)/(ρ * A * Cd))^0.5 + +where : +Vt = Terminal velocity (in m/s) +m = Mass of the falling object (in Kg) +g = Acceleration due to gravity (value taken : imported from scipy) +ρ = Density of the fluid through which the object is falling (in Kg/m^3) +A = Projected area of the object (in m^2) +Cd = Drag coefficient (dimensionless) + +Reference : https://byjus.com/physics/derivation-of-terminal-velocity/ +""" + +from scipy.constants import g + + +def terminal_velocity( + mass: float, density: float, area: float, drag_coefficient: float +) -> float: + """ + >>> terminal_velocity(1, 25, 0.6, 0.77) + 1.3031197996044768 + >>> terminal_velocity(2, 100, 0.45, 0.23) + 1.9467947148674276 + >>> terminal_velocity(5, 50, 0.2, 0.5) + 4.428690551393267 + >>> terminal_velocity(-5, 50, -0.2, -2) + Traceback (most recent call last): + ... + ValueError: mass, density, area and the drag coefficient all need to be positive + >>> terminal_velocity(3, -20, -1, 2) + Traceback (most recent call last): + ... + ValueError: mass, density, area and the drag coefficient all need to be positive + >>> terminal_velocity(-2, -1, -0.44, -1) + Traceback (most recent call last): + ... + ValueError: mass, density, area and the drag coefficient all need to be positive + """ + if mass <= 0 or density <= 0 or area <= 0 or drag_coefficient <= 0: + raise ValueError( + "mass, density, area and the drag coefficient all need to be positive" + ) + return ((2 * mass * g) / (density * area * drag_coefficient)) ** 0.5 + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From a9cee1d933606092eb966a601eb1d9efd6e054af Mon Sep 17 00:00:00 2001 From: Dale Dai <145884899+CouldNot@users.noreply.github.com> Date: Sun, 22 Oct 2023 22:56:59 -0700 Subject: [PATCH 1209/1543] Add perfect cube binary search (#10477) * Add perfect cube binary search algorithm * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Add support for testing negative perfect cubes * Add TypeError check for invalid inputs * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- maths/perfect_cube.py | 43 +++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 41 insertions(+), 2 deletions(-) diff --git a/maths/perfect_cube.py b/maths/perfect_cube.py index 9ad287e41e75..a732b7cce6c8 100644 --- a/maths/perfect_cube.py +++ b/maths/perfect_cube.py @@ -11,6 +11,45 @@ def perfect_cube(n: int) -> bool: return (val * val * val) == n +def perfect_cube_binary_search(n: int) -> bool: + """ + Check if a number is a perfect cube or not using binary search. + Time complexity : O(Log(n)) + Space complexity: O(1) + + >>> perfect_cube_binary_search(27) + True + >>> perfect_cube_binary_search(64) + True + >>> perfect_cube_binary_search(4) + False + >>> perfect_cube_binary_search("a") + Traceback (most recent call last): + ... + TypeError: perfect_cube_binary_search() only accepts integers + >>> perfect_cube_binary_search(0.1) + Traceback (most recent call last): + ... + TypeError: perfect_cube_binary_search() only accepts integers + """ + if not isinstance(n, int): + raise TypeError("perfect_cube_binary_search() only accepts integers") + if n < 0: + n = -n + left = 0 + right = n + while left <= right: + mid = left + (right - left) // 2 + if mid * mid * mid == n: + return True + elif mid * mid * mid < n: + left = mid + 1 + else: + right = mid - 1 + return False + + if __name__ == "__main__": - print(perfect_cube(27)) - print(perfect_cube(4)) + import doctest + + doctest.testmod() From a8b6bda993484b3be9fd541a10dd9ac9c4111dda Mon Sep 17 00:00:00 2001 From: Tianyi Zheng Date: Mon, 23 Oct 2023 03:31:30 -0400 Subject: [PATCH 1210/1543] Delete `arithmetic_analysis/` directory and relocate its contents (#10824) * Remove eval from arithmetic_analysis/newton_raphson.py * Relocate contents of arithmetic_analysis/ Delete the arithmetic_analysis/ directory and relocate its files because the purpose of the directory was always ill-defined. "Arithmetic analysis" isn't a field of math, and the directory's files contained algorithms for linear algebra, numerical analysis, and physics. Relocated the directory's linear algebra algorithms to linear_algebra/, its numerical analysis algorithms to a new subdirectory called maths/numerical_analysis/, and its single physics algorithm to physics/. * updating DIRECTORY.md --------- Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- DIRECTORY.md | 43 +- arithmetic_analysis/README.md | 7 - arithmetic_analysis/image_data/__init__.py | 0 .../gaussian_elimination.py | 0 .../jacobi_iteration_method.py | 406 +++++++++--------- .../lu_decomposition.py | 0 .../numerical_analysis}/bisection.py | 0 .../bisection_2.py} | 0 .../integration_by_simpson_approx.py | 0 .../numerical_analysis}/intersection.py | 0 .../nevilles_method.py | 0 .../newton_forward_interpolation.py | 0 .../numerical_analysis}/newton_method.py | 0 .../numerical_analysis}/newton_raphson.py | 35 +- .../newton_raphson_2.py} | 0 .../numerical_analysis}/newton_raphson_new.py | 0 .../numerical_integration.py | 0 maths/{ => numerical_analysis}/runge_kutta.py | 0 .../runge_kutta_fehlberg_45.py | 0 .../numerical_analysis}/secant_method.py | 0 .../{ => numerical_analysis}/simpson_rule.py | 0 maths/{ => numerical_analysis}/square_root.py | 0 .../image_data/2D_problems.jpg | Bin .../image_data/2D_problems_1.jpg | Bin .../image_data}/__init__.py | 0 .../in_static_equilibrium.py | 188 ++++---- 26 files changed, 335 insertions(+), 344 deletions(-) delete mode 100644 arithmetic_analysis/README.md delete mode 100644 arithmetic_analysis/image_data/__init__.py rename {arithmetic_analysis => linear_algebra}/gaussian_elimination.py (100%) rename {arithmetic_analysis => linear_algebra}/jacobi_iteration_method.py (96%) rename {arithmetic_analysis => linear_algebra}/lu_decomposition.py (100%) rename {arithmetic_analysis => maths/numerical_analysis}/bisection.py (100%) rename maths/{bisection.py => numerical_analysis/bisection_2.py} (100%) rename maths/{ => numerical_analysis}/integration_by_simpson_approx.py (100%) rename {arithmetic_analysis => maths/numerical_analysis}/intersection.py (100%) rename maths/{ => numerical_analysis}/nevilles_method.py (100%) rename {arithmetic_analysis => maths/numerical_analysis}/newton_forward_interpolation.py (100%) rename {arithmetic_analysis => maths/numerical_analysis}/newton_method.py (100%) rename {arithmetic_analysis => maths/numerical_analysis}/newton_raphson.py (61%) rename maths/{newton_raphson.py => numerical_analysis/newton_raphson_2.py} (100%) rename {arithmetic_analysis => maths/numerical_analysis}/newton_raphson_new.py (100%) rename maths/{ => numerical_analysis}/numerical_integration.py (100%) rename maths/{ => numerical_analysis}/runge_kutta.py (100%) rename maths/{ => numerical_analysis}/runge_kutta_fehlberg_45.py (100%) rename {arithmetic_analysis => maths/numerical_analysis}/secant_method.py (100%) rename maths/{ => numerical_analysis}/simpson_rule.py (100%) rename maths/{ => numerical_analysis}/square_root.py (100%) rename {arithmetic_analysis => physics}/image_data/2D_problems.jpg (100%) rename {arithmetic_analysis => physics}/image_data/2D_problems_1.jpg (100%) rename {arithmetic_analysis => physics/image_data}/__init__.py (100%) rename {arithmetic_analysis => physics}/in_static_equilibrium.py (96%) diff --git a/DIRECTORY.md b/DIRECTORY.md index c07e1550d1eb..1e3711fe8dda 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -1,17 +1,4 @@ -## Arithmetic Analysis - * [Bisection](arithmetic_analysis/bisection.py) - * [Gaussian Elimination](arithmetic_analysis/gaussian_elimination.py) - * [In Static Equilibrium](arithmetic_analysis/in_static_equilibrium.py) - * [Intersection](arithmetic_analysis/intersection.py) - * [Jacobi Iteration Method](arithmetic_analysis/jacobi_iteration_method.py) - * [Lu Decomposition](arithmetic_analysis/lu_decomposition.py) - * [Newton Forward Interpolation](arithmetic_analysis/newton_forward_interpolation.py) - * [Newton Method](arithmetic_analysis/newton_method.py) - * [Newton Raphson](arithmetic_analysis/newton_raphson.py) - * [Newton Raphson New](arithmetic_analysis/newton_raphson_new.py) - * [Secant Method](arithmetic_analysis/secant_method.py) - ## Audio Filters * [Butterworth Filter](audio_filters/butterworth_filter.py) * [Iir Filter](audio_filters/iir_filter.py) @@ -520,6 +507,9 @@ * [Test Knapsack](knapsack/tests/test_knapsack.py) ## Linear Algebra + * [Gaussian Elimination](linear_algebra/gaussian_elimination.py) + * [Jacobi Iteration Method](linear_algebra/jacobi_iteration_method.py) + * [Lu Decomposition](linear_algebra/lu_decomposition.py) * Src * [Conjugate Gradient](linear_algebra/src/conjugate_gradient.py) * [Lib](linear_algebra/src/lib.py) @@ -583,7 +573,6 @@ * [Binary Multiplication](maths/binary_multiplication.py) * [Binomial Coefficient](maths/binomial_coefficient.py) * [Binomial Distribution](maths/binomial_distribution.py) - * [Bisection](maths/bisection.py) * [Ceil](maths/ceil.py) * [Chebyshev Distance](maths/chebyshev_distance.py) * [Check Polygon](maths/check_polygon.py) @@ -617,7 +606,6 @@ * [Germain Primes](maths/germain_primes.py) * [Greatest Common Divisor](maths/greatest_common_divisor.py) * [Hardy Ramanujanalgo](maths/hardy_ramanujanalgo.py) - * [Integration By Simpson Approx](maths/integration_by_simpson_approx.py) * [Interquartile Range](maths/interquartile_range.py) * [Is Int Palindrome](maths/is_int_palindrome.py) * [Is Ip V4 Address Valid](maths/is_ip_v4_address_valid.py) @@ -644,10 +632,24 @@ * [Modular Exponential](maths/modular_exponential.py) * [Monte Carlo](maths/monte_carlo.py) * [Monte Carlo Dice](maths/monte_carlo_dice.py) - * [Nevilles Method](maths/nevilles_method.py) - * [Newton Raphson](maths/newton_raphson.py) * [Number Of Digits](maths/number_of_digits.py) - * [Numerical Integration](maths/numerical_integration.py) + * Numerical Analysis + * [Bisection](maths/numerical_analysis/bisection.py) + * [Bisection 2](maths/numerical_analysis/bisection_2.py) + * [Integration By Simpson Approx](maths/numerical_analysis/integration_by_simpson_approx.py) + * [Intersection](maths/numerical_analysis/intersection.py) + * [Nevilles Method](maths/numerical_analysis/nevilles_method.py) + * [Newton Forward Interpolation](maths/numerical_analysis/newton_forward_interpolation.py) + * [Newton Method](maths/numerical_analysis/newton_method.py) + * [Newton Raphson](maths/numerical_analysis/newton_raphson.py) + * [Newton Raphson 2](maths/numerical_analysis/newton_raphson_2.py) + * [Newton Raphson New](maths/numerical_analysis/newton_raphson_new.py) + * [Numerical Integration](maths/numerical_analysis/numerical_integration.py) + * [Runge Kutta](maths/numerical_analysis/runge_kutta.py) + * [Runge Kutta Fehlberg 45](maths/numerical_analysis/runge_kutta_fehlberg_45.py) + * [Secant Method](maths/numerical_analysis/secant_method.py) + * [Simpson Rule](maths/numerical_analysis/simpson_rule.py) + * [Square Root](maths/numerical_analysis/square_root.py) * [Odd Sieve](maths/odd_sieve.py) * [Perfect Cube](maths/perfect_cube.py) * [Perfect Number](maths/perfect_number.py) @@ -673,8 +675,6 @@ * [Radians](maths/radians.py) * [Radix2 Fft](maths/radix2_fft.py) * [Remove Digit](maths/remove_digit.py) - * [Runge Kutta](maths/runge_kutta.py) - * [Runge Kutta Fehlberg 45](maths/runge_kutta_fehlberg_45.py) * [Segmented Sieve](maths/segmented_sieve.py) * Series * [Arithmetic](maths/series/arithmetic.py) @@ -687,7 +687,6 @@ * [Sieve Of Eratosthenes](maths/sieve_of_eratosthenes.py) * [Sigmoid](maths/sigmoid.py) * [Signum](maths/signum.py) - * [Simpson Rule](maths/simpson_rule.py) * [Simultaneous Linear Equation Solver](maths/simultaneous_linear_equation_solver.py) * [Sin](maths/sin.py) * [Sock Merchant](maths/sock_merchant.py) @@ -709,7 +708,6 @@ * [Proth Number](maths/special_numbers/proth_number.py) * [Ugly Numbers](maths/special_numbers/ugly_numbers.py) * [Weird Number](maths/special_numbers/weird_number.py) - * [Square Root](maths/square_root.py) * [Sum Of Arithmetic Series](maths/sum_of_arithmetic_series.py) * [Sum Of Digits](maths/sum_of_digits.py) * [Sum Of Geometric Progression](maths/sum_of_geometric_progression.py) @@ -812,6 +810,7 @@ * [Horizontal Projectile Motion](physics/horizontal_projectile_motion.py) * [Hubble Parameter](physics/hubble_parameter.py) * [Ideal Gas Law](physics/ideal_gas_law.py) + * [In Static Equilibrium](physics/in_static_equilibrium.py) * [Kinetic Energy](physics/kinetic_energy.py) * [Lorentz Transformation Four Vector](physics/lorentz_transformation_four_vector.py) * [Malus Law](physics/malus_law.py) diff --git a/arithmetic_analysis/README.md b/arithmetic_analysis/README.md deleted file mode 100644 index 45cf321eb6ad..000000000000 --- a/arithmetic_analysis/README.md +++ /dev/null @@ -1,7 +0,0 @@ -# Arithmetic analysis - -Arithmetic analysis is a branch of mathematics that deals with solving linear equations. - -* -* -* diff --git a/arithmetic_analysis/image_data/__init__.py b/arithmetic_analysis/image_data/__init__.py deleted file mode 100644 index e69de29bb2d1..000000000000 diff --git a/arithmetic_analysis/gaussian_elimination.py b/linear_algebra/gaussian_elimination.py similarity index 100% rename from arithmetic_analysis/gaussian_elimination.py rename to linear_algebra/gaussian_elimination.py diff --git a/arithmetic_analysis/jacobi_iteration_method.py b/linear_algebra/jacobi_iteration_method.py similarity index 96% rename from arithmetic_analysis/jacobi_iteration_method.py rename to linear_algebra/jacobi_iteration_method.py index 44c52dd44640..8c91a19ef1b0 100644 --- a/arithmetic_analysis/jacobi_iteration_method.py +++ b/linear_algebra/jacobi_iteration_method.py @@ -1,203 +1,203 @@ -""" -Jacobi Iteration Method - https://en.wikipedia.org/wiki/Jacobi_method -""" -from __future__ import annotations - -import numpy as np -from numpy import float64 -from numpy.typing import NDArray - - -# Method to find solution of system of linear equations -def jacobi_iteration_method( - coefficient_matrix: NDArray[float64], - constant_matrix: NDArray[float64], - init_val: list[float], - iterations: int, -) -> list[float]: - """ - Jacobi Iteration Method: - An iterative algorithm to determine the solutions of strictly diagonally dominant - system of linear equations - - 4x1 + x2 + x3 = 2 - x1 + 5x2 + 2x3 = -6 - x1 + 2x2 + 4x3 = -4 - - x_init = [0.5, -0.5 , -0.5] - - Examples: - - >>> coefficient = np.array([[4, 1, 1], [1, 5, 2], [1, 2, 4]]) - >>> constant = np.array([[2], [-6], [-4]]) - >>> init_val = [0.5, -0.5, -0.5] - >>> iterations = 3 - >>> jacobi_iteration_method(coefficient, constant, init_val, iterations) - [0.909375, -1.14375, -0.7484375] - - - >>> coefficient = np.array([[4, 1, 1], [1, 5, 2]]) - >>> constant = np.array([[2], [-6], [-4]]) - >>> init_val = [0.5, -0.5, -0.5] - >>> iterations = 3 - >>> jacobi_iteration_method(coefficient, constant, init_val, iterations) - Traceback (most recent call last): - ... - ValueError: Coefficient matrix dimensions must be nxn but received 2x3 - - >>> coefficient = np.array([[4, 1, 1], [1, 5, 2], [1, 2, 4]]) - >>> constant = np.array([[2], [-6]]) - >>> init_val = [0.5, -0.5, -0.5] - >>> iterations = 3 - >>> jacobi_iteration_method( - ... coefficient, constant, init_val, iterations - ... ) # doctest: +NORMALIZE_WHITESPACE - Traceback (most recent call last): - ... - ValueError: Coefficient and constant matrices dimensions must be nxn and nx1 but - received 3x3 and 2x1 - - >>> coefficient = np.array([[4, 1, 1], [1, 5, 2], [1, 2, 4]]) - >>> constant = np.array([[2], [-6], [-4]]) - >>> init_val = [0.5, -0.5] - >>> iterations = 3 - >>> jacobi_iteration_method( - ... coefficient, constant, init_val, iterations - ... ) # doctest: +NORMALIZE_WHITESPACE - Traceback (most recent call last): - ... - ValueError: Number of initial values must be equal to number of rows in coefficient - matrix but received 2 and 3 - - >>> coefficient = np.array([[4, 1, 1], [1, 5, 2], [1, 2, 4]]) - >>> constant = np.array([[2], [-6], [-4]]) - >>> init_val = [0.5, -0.5, -0.5] - >>> iterations = 0 - >>> jacobi_iteration_method(coefficient, constant, init_val, iterations) - Traceback (most recent call last): - ... - ValueError: Iterations must be at least 1 - """ - - rows1, cols1 = coefficient_matrix.shape - rows2, cols2 = constant_matrix.shape - - if rows1 != cols1: - msg = f"Coefficient matrix dimensions must be nxn but received {rows1}x{cols1}" - raise ValueError(msg) - - if cols2 != 1: - msg = f"Constant matrix must be nx1 but received {rows2}x{cols2}" - raise ValueError(msg) - - if rows1 != rows2: - msg = ( - "Coefficient and constant matrices dimensions must be nxn and nx1 but " - f"received {rows1}x{cols1} and {rows2}x{cols2}" - ) - raise ValueError(msg) - - if len(init_val) != rows1: - msg = ( - "Number of initial values must be equal to number of rows in coefficient " - f"matrix but received {len(init_val)} and {rows1}" - ) - raise ValueError(msg) - - if iterations <= 0: - raise ValueError("Iterations must be at least 1") - - table: NDArray[float64] = np.concatenate( - (coefficient_matrix, constant_matrix), axis=1 - ) - - rows, cols = table.shape - - strictly_diagonally_dominant(table) - - """ - # Iterates the whole matrix for given number of times - for _ in range(iterations): - new_val = [] - for row in range(rows): - temp = 0 - for col in range(cols): - if col == row: - denom = table[row][col] - elif col == cols - 1: - val = table[row][col] - else: - temp += (-1) * table[row][col] * init_val[col] - temp = (temp + val) / denom - new_val.append(temp) - init_val = new_val - """ - - # denominator - a list of values along the diagonal - denominator = np.diag(coefficient_matrix) - - # val_last - values of the last column of the table array - val_last = table[:, -1] - - # masks - boolean mask of all strings without diagonal - # elements array coefficient_matrix - masks = ~np.eye(coefficient_matrix.shape[0], dtype=bool) - - # no_diagonals - coefficient_matrix array values without diagonal elements - no_diagonals = coefficient_matrix[masks].reshape(-1, rows - 1) - - # Here we get 'i_col' - these are the column numbers, for each row - # without diagonal elements, except for the last column. - i_row, i_col = np.where(masks) - ind = i_col.reshape(-1, rows - 1) - - #'i_col' is converted to a two-dimensional list 'ind', which will be - # used to make selections from 'init_val' ('arr' array see below). - - # Iterates the whole matrix for given number of times - for _ in range(iterations): - arr = np.take(init_val, ind) - sum_product_rows = np.sum((-1) * no_diagonals * arr, axis=1) - new_val = (sum_product_rows + val_last) / denominator - init_val = new_val - - return new_val.tolist() - - -# Checks if the given matrix is strictly diagonally dominant -def strictly_diagonally_dominant(table: NDArray[float64]) -> bool: - """ - >>> table = np.array([[4, 1, 1, 2], [1, 5, 2, -6], [1, 2, 4, -4]]) - >>> strictly_diagonally_dominant(table) - True - - >>> table = np.array([[4, 1, 1, 2], [1, 5, 2, -6], [1, 2, 3, -4]]) - >>> strictly_diagonally_dominant(table) - Traceback (most recent call last): - ... - ValueError: Coefficient matrix is not strictly diagonally dominant - """ - - rows, cols = table.shape - - is_diagonally_dominant = True - - for i in range(rows): - total = 0 - for j in range(cols - 1): - if i == j: - continue - else: - total += table[i][j] - - if table[i][i] <= total: - raise ValueError("Coefficient matrix is not strictly diagonally dominant") - - return is_diagonally_dominant - - -# Test Cases -if __name__ == "__main__": - import doctest - - doctest.testmod() +""" +Jacobi Iteration Method - https://en.wikipedia.org/wiki/Jacobi_method +""" +from __future__ import annotations + +import numpy as np +from numpy import float64 +from numpy.typing import NDArray + + +# Method to find solution of system of linear equations +def jacobi_iteration_method( + coefficient_matrix: NDArray[float64], + constant_matrix: NDArray[float64], + init_val: list[float], + iterations: int, +) -> list[float]: + """ + Jacobi Iteration Method: + An iterative algorithm to determine the solutions of strictly diagonally dominant + system of linear equations + + 4x1 + x2 + x3 = 2 + x1 + 5x2 + 2x3 = -6 + x1 + 2x2 + 4x3 = -4 + + x_init = [0.5, -0.5 , -0.5] + + Examples: + + >>> coefficient = np.array([[4, 1, 1], [1, 5, 2], [1, 2, 4]]) + >>> constant = np.array([[2], [-6], [-4]]) + >>> init_val = [0.5, -0.5, -0.5] + >>> iterations = 3 + >>> jacobi_iteration_method(coefficient, constant, init_val, iterations) + [0.909375, -1.14375, -0.7484375] + + + >>> coefficient = np.array([[4, 1, 1], [1, 5, 2]]) + >>> constant = np.array([[2], [-6], [-4]]) + >>> init_val = [0.5, -0.5, -0.5] + >>> iterations = 3 + >>> jacobi_iteration_method(coefficient, constant, init_val, iterations) + Traceback (most recent call last): + ... + ValueError: Coefficient matrix dimensions must be nxn but received 2x3 + + >>> coefficient = np.array([[4, 1, 1], [1, 5, 2], [1, 2, 4]]) + >>> constant = np.array([[2], [-6]]) + >>> init_val = [0.5, -0.5, -0.5] + >>> iterations = 3 + >>> jacobi_iteration_method( + ... coefficient, constant, init_val, iterations + ... ) # doctest: +NORMALIZE_WHITESPACE + Traceback (most recent call last): + ... + ValueError: Coefficient and constant matrices dimensions must be nxn and nx1 but + received 3x3 and 2x1 + + >>> coefficient = np.array([[4, 1, 1], [1, 5, 2], [1, 2, 4]]) + >>> constant = np.array([[2], [-6], [-4]]) + >>> init_val = [0.5, -0.5] + >>> iterations = 3 + >>> jacobi_iteration_method( + ... coefficient, constant, init_val, iterations + ... ) # doctest: +NORMALIZE_WHITESPACE + Traceback (most recent call last): + ... + ValueError: Number of initial values must be equal to number of rows in coefficient + matrix but received 2 and 3 + + >>> coefficient = np.array([[4, 1, 1], [1, 5, 2], [1, 2, 4]]) + >>> constant = np.array([[2], [-6], [-4]]) + >>> init_val = [0.5, -0.5, -0.5] + >>> iterations = 0 + >>> jacobi_iteration_method(coefficient, constant, init_val, iterations) + Traceback (most recent call last): + ... + ValueError: Iterations must be at least 1 + """ + + rows1, cols1 = coefficient_matrix.shape + rows2, cols2 = constant_matrix.shape + + if rows1 != cols1: + msg = f"Coefficient matrix dimensions must be nxn but received {rows1}x{cols1}" + raise ValueError(msg) + + if cols2 != 1: + msg = f"Constant matrix must be nx1 but received {rows2}x{cols2}" + raise ValueError(msg) + + if rows1 != rows2: + msg = ( + "Coefficient and constant matrices dimensions must be nxn and nx1 but " + f"received {rows1}x{cols1} and {rows2}x{cols2}" + ) + raise ValueError(msg) + + if len(init_val) != rows1: + msg = ( + "Number of initial values must be equal to number of rows in coefficient " + f"matrix but received {len(init_val)} and {rows1}" + ) + raise ValueError(msg) + + if iterations <= 0: + raise ValueError("Iterations must be at least 1") + + table: NDArray[float64] = np.concatenate( + (coefficient_matrix, constant_matrix), axis=1 + ) + + rows, cols = table.shape + + strictly_diagonally_dominant(table) + + """ + # Iterates the whole matrix for given number of times + for _ in range(iterations): + new_val = [] + for row in range(rows): + temp = 0 + for col in range(cols): + if col == row: + denom = table[row][col] + elif col == cols - 1: + val = table[row][col] + else: + temp += (-1) * table[row][col] * init_val[col] + temp = (temp + val) / denom + new_val.append(temp) + init_val = new_val + """ + + # denominator - a list of values along the diagonal + denominator = np.diag(coefficient_matrix) + + # val_last - values of the last column of the table array + val_last = table[:, -1] + + # masks - boolean mask of all strings without diagonal + # elements array coefficient_matrix + masks = ~np.eye(coefficient_matrix.shape[0], dtype=bool) + + # no_diagonals - coefficient_matrix array values without diagonal elements + no_diagonals = coefficient_matrix[masks].reshape(-1, rows - 1) + + # Here we get 'i_col' - these are the column numbers, for each row + # without diagonal elements, except for the last column. + i_row, i_col = np.where(masks) + ind = i_col.reshape(-1, rows - 1) + + #'i_col' is converted to a two-dimensional list 'ind', which will be + # used to make selections from 'init_val' ('arr' array see below). + + # Iterates the whole matrix for given number of times + for _ in range(iterations): + arr = np.take(init_val, ind) + sum_product_rows = np.sum((-1) * no_diagonals * arr, axis=1) + new_val = (sum_product_rows + val_last) / denominator + init_val = new_val + + return new_val.tolist() + + +# Checks if the given matrix is strictly diagonally dominant +def strictly_diagonally_dominant(table: NDArray[float64]) -> bool: + """ + >>> table = np.array([[4, 1, 1, 2], [1, 5, 2, -6], [1, 2, 4, -4]]) + >>> strictly_diagonally_dominant(table) + True + + >>> table = np.array([[4, 1, 1, 2], [1, 5, 2, -6], [1, 2, 3, -4]]) + >>> strictly_diagonally_dominant(table) + Traceback (most recent call last): + ... + ValueError: Coefficient matrix is not strictly diagonally dominant + """ + + rows, cols = table.shape + + is_diagonally_dominant = True + + for i in range(rows): + total = 0 + for j in range(cols - 1): + if i == j: + continue + else: + total += table[i][j] + + if table[i][i] <= total: + raise ValueError("Coefficient matrix is not strictly diagonally dominant") + + return is_diagonally_dominant + + +# Test Cases +if __name__ == "__main__": + import doctest + + doctest.testmod() diff --git a/arithmetic_analysis/lu_decomposition.py b/linear_algebra/lu_decomposition.py similarity index 100% rename from arithmetic_analysis/lu_decomposition.py rename to linear_algebra/lu_decomposition.py diff --git a/arithmetic_analysis/bisection.py b/maths/numerical_analysis/bisection.py similarity index 100% rename from arithmetic_analysis/bisection.py rename to maths/numerical_analysis/bisection.py diff --git a/maths/bisection.py b/maths/numerical_analysis/bisection_2.py similarity index 100% rename from maths/bisection.py rename to maths/numerical_analysis/bisection_2.py diff --git a/maths/integration_by_simpson_approx.py b/maths/numerical_analysis/integration_by_simpson_approx.py similarity index 100% rename from maths/integration_by_simpson_approx.py rename to maths/numerical_analysis/integration_by_simpson_approx.py diff --git a/arithmetic_analysis/intersection.py b/maths/numerical_analysis/intersection.py similarity index 100% rename from arithmetic_analysis/intersection.py rename to maths/numerical_analysis/intersection.py diff --git a/maths/nevilles_method.py b/maths/numerical_analysis/nevilles_method.py similarity index 100% rename from maths/nevilles_method.py rename to maths/numerical_analysis/nevilles_method.py diff --git a/arithmetic_analysis/newton_forward_interpolation.py b/maths/numerical_analysis/newton_forward_interpolation.py similarity index 100% rename from arithmetic_analysis/newton_forward_interpolation.py rename to maths/numerical_analysis/newton_forward_interpolation.py diff --git a/arithmetic_analysis/newton_method.py b/maths/numerical_analysis/newton_method.py similarity index 100% rename from arithmetic_analysis/newton_method.py rename to maths/numerical_analysis/newton_method.py diff --git a/arithmetic_analysis/newton_raphson.py b/maths/numerical_analysis/newton_raphson.py similarity index 61% rename from arithmetic_analysis/newton_raphson.py rename to maths/numerical_analysis/newton_raphson.py index 1b90ad4177f6..8491ca8003bc 100644 --- a/arithmetic_analysis/newton_raphson.py +++ b/maths/numerical_analysis/newton_raphson.py @@ -5,42 +5,41 @@ from __future__ import annotations from decimal import Decimal -from math import * # noqa: F403 -from sympy import diff +from sympy import diff, lambdify, symbols -def newton_raphson( - func: str, a: float | Decimal, precision: float = 10**-10 -) -> float: +def newton_raphson(func: str, a: float | Decimal, precision: float = 1e-10) -> float: """Finds root from the point 'a' onwards by Newton-Raphson method >>> newton_raphson("sin(x)", 2) 3.1415926536808043 - >>> newton_raphson("x**2 - 5*x +2", 0.4) + >>> newton_raphson("x**2 - 5*x + 2", 0.4) 0.4384471871911695 >>> newton_raphson("x**2 - 5", 0.1) 2.23606797749979 - >>> newton_raphson("log(x)- 1", 2) + >>> newton_raphson("log(x) - 1", 2) 2.718281828458938 """ - x = a + x = symbols("x") + f = lambdify(x, func, "math") + f_derivative = lambdify(x, diff(func), "math") + x_curr = a while True: - x = Decimal(x) - ( - Decimal(eval(func)) / Decimal(eval(str(diff(func)))) # noqa: S307 - ) - # This number dictates the accuracy of the answer - if abs(eval(func)) < precision: # noqa: S307 - return float(x) + x_curr = Decimal(x_curr) - Decimal(f(x_curr)) / Decimal(f_derivative(x_curr)) + if abs(f(x_curr)) < precision: + return float(x_curr) -# Let's Execute if __name__ == "__main__": - # Find root of trigonometric function + import doctest + + doctest.testmod() + # Find value of pi print(f"The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}") # Find root of polynomial print(f"The root of x**2 - 5*x + 2 = 0 is {newton_raphson('x**2 - 5*x + 2', 0.4)}") - # Find Square Root of 5 + # Find value of e print(f"The root of log(x) - 1 = 0 is {newton_raphson('log(x) - 1', 2)}") - # Exponential Roots + # Find root of exponential function print(f"The root of exp(x) - 1 = 0 is {newton_raphson('exp(x) - 1', 0)}") diff --git a/maths/newton_raphson.py b/maths/numerical_analysis/newton_raphson_2.py similarity index 100% rename from maths/newton_raphson.py rename to maths/numerical_analysis/newton_raphson_2.py diff --git a/arithmetic_analysis/newton_raphson_new.py b/maths/numerical_analysis/newton_raphson_new.py similarity index 100% rename from arithmetic_analysis/newton_raphson_new.py rename to maths/numerical_analysis/newton_raphson_new.py diff --git a/maths/numerical_integration.py b/maths/numerical_analysis/numerical_integration.py similarity index 100% rename from maths/numerical_integration.py rename to maths/numerical_analysis/numerical_integration.py diff --git a/maths/runge_kutta.py b/maths/numerical_analysis/runge_kutta.py similarity index 100% rename from maths/runge_kutta.py rename to maths/numerical_analysis/runge_kutta.py diff --git a/maths/runge_kutta_fehlberg_45.py b/maths/numerical_analysis/runge_kutta_fehlberg_45.py similarity index 100% rename from maths/runge_kutta_fehlberg_45.py rename to maths/numerical_analysis/runge_kutta_fehlberg_45.py diff --git a/arithmetic_analysis/secant_method.py b/maths/numerical_analysis/secant_method.py similarity index 100% rename from arithmetic_analysis/secant_method.py rename to maths/numerical_analysis/secant_method.py diff --git a/maths/simpson_rule.py b/maths/numerical_analysis/simpson_rule.py similarity index 100% rename from maths/simpson_rule.py rename to maths/numerical_analysis/simpson_rule.py diff --git a/maths/square_root.py b/maths/numerical_analysis/square_root.py similarity index 100% rename from maths/square_root.py rename to maths/numerical_analysis/square_root.py diff --git a/arithmetic_analysis/image_data/2D_problems.jpg b/physics/image_data/2D_problems.jpg similarity index 100% rename from arithmetic_analysis/image_data/2D_problems.jpg rename to physics/image_data/2D_problems.jpg diff --git a/arithmetic_analysis/image_data/2D_problems_1.jpg b/physics/image_data/2D_problems_1.jpg similarity index 100% rename from arithmetic_analysis/image_data/2D_problems_1.jpg rename to physics/image_data/2D_problems_1.jpg diff --git a/arithmetic_analysis/__init__.py b/physics/image_data/__init__.py similarity index 100% rename from arithmetic_analysis/__init__.py rename to physics/image_data/__init__.py diff --git a/arithmetic_analysis/in_static_equilibrium.py b/physics/in_static_equilibrium.py similarity index 96% rename from arithmetic_analysis/in_static_equilibrium.py rename to physics/in_static_equilibrium.py index 7aaecf174a5e..d56299f60858 100644 --- a/arithmetic_analysis/in_static_equilibrium.py +++ b/physics/in_static_equilibrium.py @@ -1,94 +1,94 @@ -""" -Checks if a system of forces is in static equilibrium. -""" -from __future__ import annotations - -from numpy import array, cos, cross, float64, radians, sin -from numpy.typing import NDArray - - -def polar_force( - magnitude: float, angle: float, radian_mode: bool = False -) -> list[float]: - """ - Resolves force along rectangular components. - (force, angle) => (force_x, force_y) - >>> import math - >>> force = polar_force(10, 45) - >>> math.isclose(force[0], 7.071067811865477) - True - >>> math.isclose(force[1], 7.0710678118654755) - True - >>> force = polar_force(10, 3.14, radian_mode=True) - >>> math.isclose(force[0], -9.999987317275396) - True - >>> math.isclose(force[1], 0.01592652916486828) - True - """ - if radian_mode: - return [magnitude * cos(angle), magnitude * sin(angle)] - return [magnitude * cos(radians(angle)), magnitude * sin(radians(angle))] - - -def in_static_equilibrium( - forces: NDArray[float64], location: NDArray[float64], eps: float = 10**-1 -) -> bool: - """ - Check if a system is in equilibrium. - It takes two numpy.array objects. - forces ==> [ - [force1_x, force1_y], - [force2_x, force2_y], - ....] - location ==> [ - [x1, y1], - [x2, y2], - ....] - >>> force = array([[1, 1], [-1, 2]]) - >>> location = array([[1, 0], [10, 0]]) - >>> in_static_equilibrium(force, location) - False - """ - # summation of moments is zero - moments: NDArray[float64] = cross(location, forces) - sum_moments: float = sum(moments) - return abs(sum_moments) < eps - - -if __name__ == "__main__": - # Test to check if it works - forces = array( - [ - polar_force(718.4, 180 - 30), - polar_force(879.54, 45), - polar_force(100, -90), - ] - ) - - location: NDArray[float64] = array([[0, 0], [0, 0], [0, 0]]) - - assert in_static_equilibrium(forces, location) - - # Problem 1 in image_data/2D_problems.jpg - forces = array( - [ - polar_force(30 * 9.81, 15), - polar_force(215, 180 - 45), - polar_force(264, 90 - 30), - ] - ) - - location = array([[0, 0], [0, 0], [0, 0]]) - - assert in_static_equilibrium(forces, location) - - # Problem in image_data/2D_problems_1.jpg - forces = array([[0, -2000], [0, -1200], [0, 15600], [0, -12400]]) - - location = array([[0, 0], [6, 0], [10, 0], [12, 0]]) - - assert in_static_equilibrium(forces, location) - - import doctest - - doctest.testmod() +""" +Checks if a system of forces is in static equilibrium. +""" +from __future__ import annotations + +from numpy import array, cos, cross, float64, radians, sin +from numpy.typing import NDArray + + +def polar_force( + magnitude: float, angle: float, radian_mode: bool = False +) -> list[float]: + """ + Resolves force along rectangular components. + (force, angle) => (force_x, force_y) + >>> import math + >>> force = polar_force(10, 45) + >>> math.isclose(force[0], 7.071067811865477) + True + >>> math.isclose(force[1], 7.0710678118654755) + True + >>> force = polar_force(10, 3.14, radian_mode=True) + >>> math.isclose(force[0], -9.999987317275396) + True + >>> math.isclose(force[1], 0.01592652916486828) + True + """ + if radian_mode: + return [magnitude * cos(angle), magnitude * sin(angle)] + return [magnitude * cos(radians(angle)), magnitude * sin(radians(angle))] + + +def in_static_equilibrium( + forces: NDArray[float64], location: NDArray[float64], eps: float = 10**-1 +) -> bool: + """ + Check if a system is in equilibrium. + It takes two numpy.array objects. + forces ==> [ + [force1_x, force1_y], + [force2_x, force2_y], + ....] + location ==> [ + [x1, y1], + [x2, y2], + ....] + >>> force = array([[1, 1], [-1, 2]]) + >>> location = array([[1, 0], [10, 0]]) + >>> in_static_equilibrium(force, location) + False + """ + # summation of moments is zero + moments: NDArray[float64] = cross(location, forces) + sum_moments: float = sum(moments) + return abs(sum_moments) < eps + + +if __name__ == "__main__": + # Test to check if it works + forces = array( + [ + polar_force(718.4, 180 - 30), + polar_force(879.54, 45), + polar_force(100, -90), + ] + ) + + location: NDArray[float64] = array([[0, 0], [0, 0], [0, 0]]) + + assert in_static_equilibrium(forces, location) + + # Problem 1 in image_data/2D_problems.jpg + forces = array( + [ + polar_force(30 * 9.81, 15), + polar_force(215, 180 - 45), + polar_force(264, 90 - 30), + ] + ) + + location = array([[0, 0], [0, 0], [0, 0]]) + + assert in_static_equilibrium(forces, location) + + # Problem in image_data/2D_problems_1.jpg + forces = array([[0, -2000], [0, -1200], [0, 15600], [0, -12400]]) + + location = array([[0, 0], [6, 0], [10, 0], [12, 0]]) + + assert in_static_equilibrium(forces, location) + + import doctest + + doctest.testmod() From 417b7edfc3fdfe9534a56e3e7d0a368f76b3edb4 Mon Sep 17 00:00:00 2001 From: Krishna-singhal <65902764+Krishna-Singhal@users.noreply.github.com> Date: Mon, 23 Oct 2023 13:21:27 +0530 Subject: [PATCH 1211/1543] code enhancement in `sort.double_sort` (#10798) * don't need to return list because list is mutable * Don't need to return list as list is mutable * use advantage of python in swapping * filter blank inputs from input list * minor changes * minor mistake * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * more readable * Update double_sort.py * last fixes * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Apply suggestions from code review * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- sorts/double_sort.py | 40 +++++++++++++++++++++------------------- 1 file changed, 21 insertions(+), 19 deletions(-) diff --git a/sorts/double_sort.py b/sorts/double_sort.py index a19641d94752..bd5fdca1e63c 100644 --- a/sorts/double_sort.py +++ b/sorts/double_sort.py @@ -1,4 +1,7 @@ -def double_sort(lst): +from typing import Any + + +def double_sort(collection: list[Any]) -> list[Any]: """This sorting algorithm sorts an array using the principle of bubble sort, but does it both from left to right and right to left. Hence, it's called "Double sort" @@ -14,29 +17,28 @@ def double_sort(lst): >>> double_sort([-3, 10, 16, -42, 29]) == sorted([-3, 10, 16, -42, 29]) True """ - no_of_elements = len(lst) + no_of_elements = len(collection) for _ in range( int(((no_of_elements - 1) / 2) + 1) ): # we don't need to traverse to end of list as for j in range(no_of_elements - 1): - if ( - lst[j + 1] < lst[j] - ): # applying bubble sort algorithm from left to right (or forwards) - temp = lst[j + 1] - lst[j + 1] = lst[j] - lst[j] = temp - if ( - lst[no_of_elements - 1 - j] < lst[no_of_elements - 2 - j] - ): # applying bubble sort algorithm from right to left (or backwards) - temp = lst[no_of_elements - 1 - j] - lst[no_of_elements - 1 - j] = lst[no_of_elements - 2 - j] - lst[no_of_elements - 2 - j] = temp - return lst + # apply the bubble sort algorithm from left to right (or forwards) + if collection[j + 1] < collection[j]: + collection[j], collection[j + 1] = collection[j + 1], collection[j] + # apply the bubble sort algorithm from right to left (or backwards) + if collection[no_of_elements - 1 - j] < collection[no_of_elements - 2 - j]: + ( + collection[no_of_elements - 1 - j], + collection[no_of_elements - 2 - j], + ) = ( + collection[no_of_elements - 2 - j], + collection[no_of_elements - 1 - j], + ) + return collection if __name__ == "__main__": - print("enter the list to be sorted") - lst = [int(x) for x in input().split()] # inputing elements of the list in one line - sorted_lst = double_sort(lst) + # allow the user to input the elements of the list on one line + unsorted = [int(x) for x in input("Enter the list to be sorted: ").split() if x] print("the sorted list is") - print(sorted_lst) + print(f"{double_sort(unsorted) = }") From d051db1f14cbb0edd2b0db1e4edef76cce6c7823 Mon Sep 17 00:00:00 2001 From: Berat Osman Demiralay Date: Mon, 23 Oct 2023 16:25:07 +0300 Subject: [PATCH 1212/1543] Add Simple Moving Average (SMA) Calculation (#9300) * Add Simple Moving Average (SMA) Calculation This commit adds a Python script for calculating the Simple Moving Average (SMA) of a time series data. The script also includes a doctest that verifies the correctness of the SMA calculations for a sample dataset. Usage: - Run the script with your own time series data and specify the window size for SMA calculations. * Update financial/simple_moving_average.py Co-authored-by: Tianyi Zheng * Update financial/simple_moving_average.py Co-authored-by: Tianyi Zheng * Update financial/simple_moving_average.py Co-authored-by: Tianyi Zheng * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update simple_moving_average.py * Update financial/simple_moving_average.py * Update simple_moving_average.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: Tianyi Zheng Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- financial/simple_moving_average.py | 68 ++++++++++++++++++++++++++++++ 1 file changed, 68 insertions(+) create mode 100644 financial/simple_moving_average.py diff --git a/financial/simple_moving_average.py b/financial/simple_moving_average.py new file mode 100644 index 000000000000..d5d68ffd3dab --- /dev/null +++ b/financial/simple_moving_average.py @@ -0,0 +1,68 @@ +""" +The Simple Moving Average (SMA) is a statistical calculation used to analyze data points +by creating a constantly updated average price over a specific time period. +In finance, SMA is often used in time series analysis to smooth out price data +and identify trends. + +Reference: https://en.wikipedia.org/wiki/Moving_average +""" +from collections.abc import Sequence + + +def simple_moving_average( + data: Sequence[float], window_size: int +) -> list[float | None]: + """ + Calculate the simple moving average (SMA) for some given time series data. + + :param data: A list of numerical data points. + :param window_size: An integer representing the size of the SMA window. + :return: A list of SMA values with the same length as the input data. + + Examples: + >>> sma = simple_moving_average([10, 12, 15, 13, 14, 16, 18, 17, 19, 21], 3) + >>> [round(value, 2) if value is not None else None for value in sma] + [None, None, 12.33, 13.33, 14.0, 14.33, 16.0, 17.0, 18.0, 19.0] + >>> simple_moving_average([10, 12, 15], 5) + [None, None, None] + >>> simple_moving_average([10, 12, 15, 13, 14, 16, 18, 17, 19, 21], 0) + Traceback (most recent call last): + ... + ValueError: Window size must be a positive integer + """ + if window_size < 1: + raise ValueError("Window size must be a positive integer") + + sma: list[float | None] = [] + + for i in range(len(data)): + if i < window_size - 1: + sma.append(None) # SMA not available for early data points + else: + window = data[i - window_size + 1 : i + 1] + sma_value = sum(window) / window_size + sma.append(sma_value) + return sma + + +if __name__ == "__main__": + import doctest + + doctest.testmod() + + # Example data (replace with your own time series data) + data = [10, 12, 15, 13, 14, 16, 18, 17, 19, 21] + + # Specify the window size for the SMA + window_size = 3 + + # Calculate the Simple Moving Average + sma_values = simple_moving_average(data, window_size) + + # Print the SMA values + print("Simple Moving Average (SMA) Values:") + for i, value in enumerate(sma_values): + if value is not None: + print(f"Day {i + 1}: {value:.2f}") + else: + print(f"Day {i + 1}: Not enough data for SMA") From 4cbefadbd7adee486e33a6b66014a2474e81f82e Mon Sep 17 00:00:00 2001 From: Tauseef Hilal Tantary Date: Mon, 23 Oct 2023 19:21:09 +0530 Subject: [PATCH 1213/1543] [New Algorithm] - Triangular Numbers (#10663) * Add New Algorithm: Triangular Numbers * Calculate nth triangular number instead of generating a list * Handle 0th position and update function name and docstring --- maths/special_numbers/triangular_numbers.py | 43 +++++++++++++++++++++ 1 file changed, 43 insertions(+) create mode 100644 maths/special_numbers/triangular_numbers.py diff --git a/maths/special_numbers/triangular_numbers.py b/maths/special_numbers/triangular_numbers.py new file mode 100644 index 000000000000..5be89e6108b2 --- /dev/null +++ b/maths/special_numbers/triangular_numbers.py @@ -0,0 +1,43 @@ +""" +A triangular number or triangle number counts objects arranged in an +equilateral triangle. This module provides a function to generate n'th +triangular number. + +For more information about triangular numbers, refer to: +https://en.wikipedia.org/wiki/Triangular_number +""" + + +def triangular_number(position: int) -> int: + """ + Generate the triangular number at the specified position. + + Args: + position (int): The position of the triangular number to generate. + + Returns: + int: The triangular number at the specified position. + + Raises: + ValueError: If `position` is negative. + + Examples: + >>> triangular_number(1) + 1 + >>> triangular_number(3) + 6 + >>> triangular_number(-1) + Traceback (most recent call last): + ... + ValueError: param `position` must be non-negative + """ + if position < 0: + raise ValueError("param `position` must be non-negative") + + return position * (position + 1) // 2 + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From 54e2aa67e8f74435b15e2a2864a7fb00981979af Mon Sep 17 00:00:00 2001 From: Hardik Pawar <97388607+Hardvan@users.noreply.github.com> Date: Mon, 23 Oct 2023 23:12:02 +0530 Subject: [PATCH 1214/1543] Enhance readability of Minimax (#10838) * Enhance readability of Minimax * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Reduce line overflow * Update backtracking/minimax.py Co-authored-by: Tianyi Zheng * Update backtracking/minimax.py Co-authored-by: Tianyi Zheng * Update backtracking/minimax.py Co-authored-by: Tianyi Zheng * Remove line overflow --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Tianyi Zheng --- backtracking/minimax.py | 26 +++++++++++++++++++++++++- 1 file changed, 25 insertions(+), 1 deletion(-) diff --git a/backtracking/minimax.py b/backtracking/minimax.py index 6e310131e069..6dece2990a1c 100644 --- a/backtracking/minimax.py +++ b/backtracking/minimax.py @@ -16,6 +16,22 @@ def minimax( depth: int, node_index: int, is_max: bool, scores: list[int], height: float ) -> int: """ + This function implements the minimax algorithm, which helps achieve the optimal + score for a player in a two-player game by checking all possible moves. + If the player is the maximizer, then the score is maximized. + If the player is the minimizer, then the score is minimized. + + Parameters: + - depth: Current depth in the game tree. + - node_index: Index of the current node in the scores list. + - is_max: A boolean indicating whether the current move + is for the maximizer (True) or minimizer (False). + - scores: A list containing the scores of the leaves of the game tree. + - height: The maximum height of the game tree. + + Returns: + - An integer representing the optimal score for the current player. + >>> import math >>> scores = [90, 23, 6, 33, 21, 65, 123, 34423] >>> height = math.log(len(scores), 2) @@ -37,19 +53,24 @@ def minimax( if depth < 0: raise ValueError("Depth cannot be less than 0") - if len(scores) == 0: raise ValueError("Scores cannot be empty") + # Base case: If the current depth equals the height of the tree, + # return the score of the current node. if depth == height: return scores[node_index] + # If it's the maximizer's turn, choose the maximum score + # between the two possible moves. if is_max: return max( minimax(depth + 1, node_index * 2, False, scores, height), minimax(depth + 1, node_index * 2 + 1, False, scores, height), ) + # If it's the minimizer's turn, choose the minimum score + # between the two possible moves. return min( minimax(depth + 1, node_index * 2, True, scores, height), minimax(depth + 1, node_index * 2 + 1, True, scores, height), @@ -57,8 +78,11 @@ def minimax( def main() -> None: + # Sample scores and height calculation scores = [90, 23, 6, 33, 21, 65, 123, 34423] height = math.log(len(scores), 2) + + # Calculate and print the optimal value using the minimax algorithm print("Optimal value : ", end="") print(minimax(0, 0, True, scores, height)) From 30122062b93cdeba8bacb0a4a3c783bc8069b7a0 Mon Sep 17 00:00:00 2001 From: Aqib Javid Bhat Date: Mon, 23 Oct 2023 23:26:43 +0530 Subject: [PATCH 1215/1543] Add Floyd's Cycle Detection Algorithm (#10833) * Add Floyd's Cycle Detection Algorithm * Add tests for add_node function * Apply suggestions from code review * Update floyds_cycle_detection.py --------- Co-authored-by: Tianyi Zheng --- .../linked_list/floyds_cycle_detection.py | 150 ++++++++++++++++++ 1 file changed, 150 insertions(+) create mode 100644 data_structures/linked_list/floyds_cycle_detection.py diff --git a/data_structures/linked_list/floyds_cycle_detection.py b/data_structures/linked_list/floyds_cycle_detection.py new file mode 100644 index 000000000000..6c3f13760260 --- /dev/null +++ b/data_structures/linked_list/floyds_cycle_detection.py @@ -0,0 +1,150 @@ +""" +Floyd's cycle detection algorithm is a popular algorithm used to detect cycles +in a linked list. It uses two pointers, a slow pointer and a fast pointer, +to traverse the linked list. The slow pointer moves one node at a time while the fast +pointer moves two nodes at a time. If there is a cycle in the linked list, +the fast pointer will eventually catch up to the slow pointer and they will +meet at the same node. If there is no cycle, the fast pointer will reach the end of +the linked list and the algorithm will terminate. + +For more information: https://en.wikipedia.org/wiki/Cycle_detection#Floyd's_tortoise_and_hare +""" + +from collections.abc import Iterator +from dataclasses import dataclass +from typing import Any, Self + + +@dataclass +class Node: + """ + A class representing a node in a singly linked list. + """ + + data: Any + next_node: Self | None = None + + +@dataclass +class LinkedList: + """ + A class representing a singly linked list. + """ + + head: Node | None = None + + def __iter__(self) -> Iterator: + """ + Iterates through the linked list. + + Returns: + Iterator: An iterator over the linked list. + + Examples: + >>> linked_list = LinkedList() + >>> list(linked_list) + [] + >>> linked_list.add_node(1) + >>> tuple(linked_list) + (1,) + """ + visited = [] + node = self.head + while node: + # Avoid infinite loop in there's a cycle + if node in visited: + return + visited.append(node) + yield node.data + node = node.next_node + + def add_node(self, data: Any) -> None: + """ + Adds a new node to the end of the linked list. + + Args: + data (Any): The data to be stored in the new node. + + Examples: + >>> linked_list = LinkedList() + >>> linked_list.add_node(1) + >>> linked_list.add_node(2) + >>> linked_list.add_node(3) + >>> linked_list.add_node(4) + >>> tuple(linked_list) + (1, 2, 3, 4) + """ + new_node = Node(data) + + if self.head is None: + self.head = new_node + return + + current_node = self.head + while current_node.next_node is not None: + current_node = current_node.next_node + + current_node.next_node = new_node + + def detect_cycle(self) -> bool: + """ + Detects if there is a cycle in the linked list using + Floyd's cycle detection algorithm. + + Returns: + bool: True if there is a cycle, False otherwise. + + Examples: + >>> linked_list = LinkedList() + >>> linked_list.add_node(1) + >>> linked_list.add_node(2) + >>> linked_list.add_node(3) + >>> linked_list.add_node(4) + + >>> linked_list.detect_cycle() + False + + # Create a cycle in the linked list + >>> linked_list.head.next_node.next_node.next_node = linked_list.head.next_node + + >>> linked_list.detect_cycle() + True + """ + if self.head is None: + return False + + slow_pointer: Node | None = self.head + fast_pointer: Node | None = self.head + + while fast_pointer is not None and fast_pointer.next_node is not None: + slow_pointer = slow_pointer.next_node if slow_pointer else None + fast_pointer = fast_pointer.next_node.next_node + if slow_pointer == fast_pointer: + return True + + return False + + +if __name__ == "__main__": + import doctest + + doctest.testmod() + + linked_list = LinkedList() + linked_list.add_node(1) + linked_list.add_node(2) + linked_list.add_node(3) + linked_list.add_node(4) + + # Create a cycle in the linked list + # It first checks if the head, next_node, and next_node.next_node attributes of the + # linked list are not None to avoid any potential type errors. + if ( + linked_list.head + and linked_list.head.next_node + and linked_list.head.next_node.next_node + ): + linked_list.head.next_node.next_node.next_node = linked_list.head.next_node + + has_cycle = linked_list.detect_cycle() + print(has_cycle) # Output: True From ffd3a56c35f5ec274c819e8f2596ab5134cf9c36 Mon Sep 17 00:00:00 2001 From: SEIKH NABAB UDDIN <93948993+nababuddin@users.noreply.github.com> Date: Mon, 23 Oct 2023 23:42:28 +0530 Subject: [PATCH 1216/1543] Updated Selection Sort (#10855) * Update selection_sort.py * Update selection_sort.py --- sorts/selection_sort.py | 34 +++++++++++----------------------- 1 file changed, 11 insertions(+), 23 deletions(-) diff --git a/sorts/selection_sort.py b/sorts/selection_sort.py index 28971a5e1aad..506836b53e44 100644 --- a/sorts/selection_sort.py +++ b/sorts/selection_sort.py @@ -1,22 +1,9 @@ -""" -This is a pure Python implementation of the selection sort algorithm - -For doctests run following command: -python -m doctest -v selection_sort.py -or -python3 -m doctest -v selection_sort.py - -For manual testing run: -python selection_sort.py -""" - - def selection_sort(collection: list[int]) -> list[int]: - """Pure implementation of the selection sort algorithm in Python - :param collection: some mutable ordered collection with heterogeneous - comparable items inside - :return: the same collection ordered by ascending + """ + Sorts a list in ascending order using the selection sort algorithm. + :param collection: A list of integers to be sorted. + :return: The sorted list. Examples: >>> selection_sort([0, 5, 3, 2, 2]) @@ -31,16 +18,17 @@ def selection_sort(collection: list[int]) -> list[int]: length = len(collection) for i in range(length - 1): - least = i + min_index = i for k in range(i + 1, length): - if collection[k] < collection[least]: - least = k - if least != i: - collection[least], collection[i] = (collection[i], collection[least]) + if collection[k] < collection[min_index]: + min_index = k + if min_index != i: + collection[i], collection[min_index] = collection[min_index], collection[i] return collection if __name__ == "__main__": user_input = input("Enter numbers separated by a comma:\n").strip() unsorted = [int(item) for item in user_input.split(",")] - print(selection_sort(unsorted)) + sorted_list = selection_sort(unsorted) + print("Sorted List:", sorted_list) From e5d6969f38ecf03f3e3a1e35fcfd3ae2484b6e08 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 23 Oct 2023 22:29:16 +0200 Subject: [PATCH 1217/1543] [pre-commit.ci] pre-commit autoupdate (#10856) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * [pre-commit.ci] pre-commit autoupdate updates: - [github.com/astral-sh/ruff-pre-commit: v0.0.292 → v0.1.1](https://github.com/astral-sh/ruff-pre-commit/compare/v0.0.292...v0.1.1) - [github.com/psf/black: 23.9.1 → 23.10.0](https://github.com/psf/black/compare/23.9.1...23.10.0) - [github.com/pre-commit/mirrors-mypy: v1.6.0 → v1.6.1](https://github.com/pre-commit/mirrors-mypy/compare/v1.6.0...v1.6.1) * updating DIRECTORY.md --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- .pre-commit-config.yaml | 6 +++--- DIRECTORY.md | 4 ++++ 2 files changed, 7 insertions(+), 3 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index b3def463ded2..e0b9922fae7e 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -16,12 +16,12 @@ repos: - id: auto-walrus - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.0.292 + rev: v0.1.1 hooks: - id: ruff - repo: https://github.com/psf/black - rev: 23.9.1 + rev: 23.10.0 hooks: - id: black @@ -51,7 +51,7 @@ repos: - id: validate-pyproject - repo: https://github.com/pre-commit/mirrors-mypy - rev: v1.6.0 + rev: v1.6.1 hooks: - id: mypy args: diff --git a/DIRECTORY.md b/DIRECTORY.md index 1e3711fe8dda..dfd1a2c0c809 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -233,6 +233,7 @@ * [Deque Doubly](data_structures/linked_list/deque_doubly.py) * [Doubly Linked List](data_structures/linked_list/doubly_linked_list.py) * [Doubly Linked List Two](data_structures/linked_list/doubly_linked_list_two.py) + * [Floyds Cycle Detection](data_structures/linked_list/floyds_cycle_detection.py) * [From Sequence](data_structures/linked_list/from_sequence.py) * [Has Loop](data_structures/linked_list/has_loop.py) * [Is Palindrome](data_structures/linked_list/is_palindrome.py) @@ -394,6 +395,7 @@ * [Interest](financial/interest.py) * [Present Value](financial/present_value.py) * [Price Plus Tax](financial/price_plus_tax.py) + * [Simple Moving Average](financial/simple_moving_average.py) ## Fractals * [Julia Sets](fractals/julia_sets.py) @@ -706,6 +708,7 @@ * [Polygonal Numbers](maths/special_numbers/polygonal_numbers.py) * [Pronic Number](maths/special_numbers/pronic_number.py) * [Proth Number](maths/special_numbers/proth_number.py) + * [Triangular Numbers](maths/special_numbers/triangular_numbers.py) * [Ugly Numbers](maths/special_numbers/ugly_numbers.py) * [Weird Number](maths/special_numbers/weird_number.py) * [Sum Of Arithmetic Series](maths/sum_of_arithmetic_series.py) @@ -826,6 +829,7 @@ * [Shear Stress](physics/shear_stress.py) * [Speed Of Sound](physics/speed_of_sound.py) * [Speeds Of Gas Molecules](physics/speeds_of_gas_molecules.py) + * [Terminal Velocity](physics/terminal_velocity.py) ## Project Euler * Problem 001 From b98312ca9f2df491017e189b353e6b382b323eed Mon Sep 17 00:00:00 2001 From: Tianyi Zheng Date: Mon, 23 Oct 2023 16:37:17 -0400 Subject: [PATCH 1218/1543] Consolidate Newton-Raphson implementations (#10859) * updating DIRECTORY.md * updating DIRECTORY.md * Consolidate Newton-Raphson duplicates * Rename consolidated Newton-Raphson file * updating DIRECTORY.md * updating DIRECTORY.md * Fix doctest precision * Fix doctest precision again --------- Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- DIRECTORY.md | 3 - maths/numerical_analysis/newton_method.py | 54 ------- maths/numerical_analysis/newton_raphson.py | 142 +++++++++++++----- maths/numerical_analysis/newton_raphson_2.py | 64 -------- .../numerical_analysis/newton_raphson_new.py | 83 ---------- 5 files changed, 105 insertions(+), 241 deletions(-) delete mode 100644 maths/numerical_analysis/newton_method.py delete mode 100644 maths/numerical_analysis/newton_raphson_2.py delete mode 100644 maths/numerical_analysis/newton_raphson_new.py diff --git a/DIRECTORY.md b/DIRECTORY.md index dfd1a2c0c809..f0b1f7c13c2b 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -642,10 +642,7 @@ * [Intersection](maths/numerical_analysis/intersection.py) * [Nevilles Method](maths/numerical_analysis/nevilles_method.py) * [Newton Forward Interpolation](maths/numerical_analysis/newton_forward_interpolation.py) - * [Newton Method](maths/numerical_analysis/newton_method.py) * [Newton Raphson](maths/numerical_analysis/newton_raphson.py) - * [Newton Raphson 2](maths/numerical_analysis/newton_raphson_2.py) - * [Newton Raphson New](maths/numerical_analysis/newton_raphson_new.py) * [Numerical Integration](maths/numerical_analysis/numerical_integration.py) * [Runge Kutta](maths/numerical_analysis/runge_kutta.py) * [Runge Kutta Fehlberg 45](maths/numerical_analysis/runge_kutta_fehlberg_45.py) diff --git a/maths/numerical_analysis/newton_method.py b/maths/numerical_analysis/newton_method.py deleted file mode 100644 index 5127bfcafd9a..000000000000 --- a/maths/numerical_analysis/newton_method.py +++ /dev/null @@ -1,54 +0,0 @@ -"""Newton's Method.""" - -# Newton's Method - https://en.wikipedia.org/wiki/Newton%27s_method -from collections.abc import Callable - -RealFunc = Callable[[float], float] # type alias for a real -> real function - - -# function is the f(x) and derivative is the f'(x) -def newton( - function: RealFunc, - derivative: RealFunc, - starting_int: int, -) -> float: - """ - >>> newton(lambda x: x ** 3 - 2 * x - 5, lambda x: 3 * x ** 2 - 2, 3) - 2.0945514815423474 - >>> newton(lambda x: x ** 3 - 1, lambda x: 3 * x ** 2, -2) - 1.0 - >>> newton(lambda x: x ** 3 - 1, lambda x: 3 * x ** 2, -4) - 1.0000000000000102 - >>> import math - >>> newton(math.sin, math.cos, 1) - 0.0 - >>> newton(math.sin, math.cos, 2) - 3.141592653589793 - >>> newton(math.cos, lambda x: -math.sin(x), 2) - 1.5707963267948966 - >>> newton(math.cos, lambda x: -math.sin(x), 0) - Traceback (most recent call last): - ... - ZeroDivisionError: Could not find root - """ - prev_guess = float(starting_int) - while True: - try: - next_guess = prev_guess - function(prev_guess) / derivative(prev_guess) - except ZeroDivisionError: - raise ZeroDivisionError("Could not find root") from None - if abs(prev_guess - next_guess) < 10**-5: - return next_guess - prev_guess = next_guess - - -def f(x: float) -> float: - return (x**3) - (2 * x) - 5 - - -def f1(x: float) -> float: - return 3 * (x**2) - 2 - - -if __name__ == "__main__": - print(newton(f, f1, 3)) diff --git a/maths/numerical_analysis/newton_raphson.py b/maths/numerical_analysis/newton_raphson.py index 8491ca8003bc..feee38f905dd 100644 --- a/maths/numerical_analysis/newton_raphson.py +++ b/maths/numerical_analysis/newton_raphson.py @@ -1,45 +1,113 @@ -# Implementing Newton Raphson method in Python -# Author: Syed Haseeb Shah (github.com/QuantumNovice) -# The Newton-Raphson method (also known as Newton's method) is a way to -# quickly find a good approximation for the root of a real-valued function -from __future__ import annotations - -from decimal import Decimal - -from sympy import diff, lambdify, symbols - - -def newton_raphson(func: str, a: float | Decimal, precision: float = 1e-10) -> float: - """Finds root from the point 'a' onwards by Newton-Raphson method - >>> newton_raphson("sin(x)", 2) - 3.1415926536808043 - >>> newton_raphson("x**2 - 5*x + 2", 0.4) - 0.4384471871911695 - >>> newton_raphson("x**2 - 5", 0.1) - 2.23606797749979 - >>> newton_raphson("log(x) - 1", 2) - 2.718281828458938 +""" +The Newton-Raphson method (aka the Newton method) is a root-finding algorithm that +approximates a root of a given real-valued function f(x). It is an iterative method +given by the formula + +x_{n + 1} = x_n + f(x_n) / f'(x_n) + +with the precision of the approximation increasing as the number of iterations increase. + +Reference: https://en.wikipedia.org/wiki/Newton%27s_method +""" +from collections.abc import Callable + +RealFunc = Callable[[float], float] + + +def calc_derivative(f: RealFunc, x: float, delta_x: float = 1e-3) -> float: + """ + Approximate the derivative of a function f(x) at a point x using the finite + difference method + + >>> import math + >>> tolerance = 1e-5 + >>> derivative = calc_derivative(lambda x: x**2, 2) + >>> math.isclose(derivative, 4, abs_tol=tolerance) + True + >>> derivative = calc_derivative(math.sin, 0) + >>> math.isclose(derivative, 1, abs_tol=tolerance) + True + """ + return (f(x + delta_x / 2) - f(x - delta_x / 2)) / delta_x + + +def newton_raphson( + f: RealFunc, + x0: float = 0, + max_iter: int = 100, + step: float = 1e-6, + max_error: float = 1e-6, + log_steps: bool = False, +) -> tuple[float, float, list[float]]: """ - x = symbols("x") - f = lambdify(x, func, "math") - f_derivative = lambdify(x, diff(func), "math") - x_curr = a - while True: - x_curr = Decimal(x_curr) - Decimal(f(x_curr)) / Decimal(f_derivative(x_curr)) - if abs(f(x_curr)) < precision: - return float(x_curr) + Find a root of the given function f using the Newton-Raphson method. + + :param f: A real-valued single-variable function + :param x0: Initial guess + :param max_iter: Maximum number of iterations + :param step: Step size of x, used to approximate f'(x) + :param max_error: Maximum approximation error + :param log_steps: bool denoting whether to log intermediate steps + + :return: A tuple containing the approximation, the error, and the intermediate + steps. If log_steps is False, then an empty list is returned for the third + element of the tuple. + + :raises ZeroDivisionError: The derivative approaches 0. + :raises ArithmeticError: No solution exists, or the solution isn't found before the + iteration limit is reached. + + >>> import math + >>> tolerance = 1e-15 + >>> root, *_ = newton_raphson(lambda x: x**2 - 5*x + 2, 0.4, max_error=tolerance) + >>> math.isclose(root, (5 - math.sqrt(17)) / 2, abs_tol=tolerance) + True + >>> root, *_ = newton_raphson(lambda x: math.log(x) - 1, 2, max_error=tolerance) + >>> math.isclose(root, math.e, abs_tol=tolerance) + True + >>> root, *_ = newton_raphson(math.sin, 1, max_error=tolerance) + >>> math.isclose(root, 0, abs_tol=tolerance) + True + >>> newton_raphson(math.cos, 0) + Traceback (most recent call last): + ... + ZeroDivisionError: No converging solution found, zero derivative + >>> newton_raphson(lambda x: x**2 + 1, 2) + Traceback (most recent call last): + ... + ArithmeticError: No converging solution found, iteration limit reached + """ + + def f_derivative(x: float) -> float: + return calc_derivative(f, x, step) + + a = x0 # Set initial guess + steps = [] + for _ in range(max_iter): + if log_steps: # Log intermediate steps + steps.append(a) + + error = abs(f(a)) + if error < max_error: + return a, error, steps + + if f_derivative(a) == 0: + raise ZeroDivisionError("No converging solution found, zero derivative") + a -= f(a) / f_derivative(a) # Calculate next estimate + raise ArithmeticError("No converging solution found, iteration limit reached") if __name__ == "__main__": import doctest + from math import exp, tanh doctest.testmod() - # Find value of pi - print(f"The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}") - # Find root of polynomial - print(f"The root of x**2 - 5*x + 2 = 0 is {newton_raphson('x**2 - 5*x + 2', 0.4)}") - # Find value of e - print(f"The root of log(x) - 1 = 0 is {newton_raphson('log(x) - 1', 2)}") - # Find root of exponential function - print(f"The root of exp(x) - 1 = 0 is {newton_raphson('exp(x) - 1', 0)}") + def func(x: float) -> float: + return tanh(x) ** 2 - exp(3 * x) + + solution, err, steps = newton_raphson( + func, x0=10, max_iter=100, step=1e-6, log_steps=True + ) + print(f"{solution=}, {err=}") + print("\n".join(str(x) for x in steps)) diff --git a/maths/numerical_analysis/newton_raphson_2.py b/maths/numerical_analysis/newton_raphson_2.py deleted file mode 100644 index f6b227b5c9c1..000000000000 --- a/maths/numerical_analysis/newton_raphson_2.py +++ /dev/null @@ -1,64 +0,0 @@ -""" -Author: P Shreyas Shetty -Implementation of Newton-Raphson method for solving equations of kind -f(x) = 0. It is an iterative method where solution is found by the expression - x[n+1] = x[n] + f(x[n])/f'(x[n]) -If no solution exists, then either the solution will not be found when iteration -limit is reached or the gradient f'(x[n]) approaches zero. In both cases, exception -is raised. If iteration limit is reached, try increasing maxiter. -""" - -import math as m -from collections.abc import Callable - -DerivativeFunc = Callable[[float], float] - - -def calc_derivative(f: DerivativeFunc, a: float, h: float = 0.001) -> float: - """ - Calculates derivative at point a for function f using finite difference - method - """ - return (f(a + h) - f(a - h)) / (2 * h) - - -def newton_raphson( - f: DerivativeFunc, - x0: float = 0, - maxiter: int = 100, - step: float = 0.0001, - maxerror: float = 1e-6, - logsteps: bool = False, -) -> tuple[float, float, list[float]]: - a = x0 # set the initial guess - steps = [a] - error = abs(f(a)) - f1 = lambda x: calc_derivative(f, x, h=step) # noqa: E731 Derivative of f(x) - for _ in range(maxiter): - if f1(a) == 0: - raise ValueError("No converging solution found") - a = a - f(a) / f1(a) # Calculate the next estimate - if logsteps: - steps.append(a) - if error < maxerror: - break - else: - raise ValueError("Iteration limit reached, no converging solution found") - if logsteps: - # If logstep is true, then log intermediate steps - return a, error, steps - return a, error, [] - - -if __name__ == "__main__": - from matplotlib import pyplot as plt - - f = lambda x: m.tanh(x) ** 2 - m.exp(3 * x) # noqa: E731 - solution, error, steps = newton_raphson( - f, x0=10, maxiter=1000, step=1e-6, logsteps=True - ) - plt.plot([abs(f(x)) for x in steps]) - plt.xlabel("step") - plt.ylabel("error") - plt.show() - print(f"solution = {{{solution:f}}}, error = {{{error:f}}}") diff --git a/maths/numerical_analysis/newton_raphson_new.py b/maths/numerical_analysis/newton_raphson_new.py deleted file mode 100644 index f61841e2eb84..000000000000 --- a/maths/numerical_analysis/newton_raphson_new.py +++ /dev/null @@ -1,83 +0,0 @@ -# Implementing Newton Raphson method in Python -# Author: Saksham Gupta -# -# The Newton-Raphson method (also known as Newton's method) is a way to -# quickly find a good approximation for the root of a functreal-valued ion -# The method can also be extended to complex functions -# -# Newton's Method - https://en.wikipedia.org/wiki/Newton's_method - -from sympy import diff, lambdify, symbols -from sympy.functions import * # noqa: F403 - - -def newton_raphson( - function: str, - starting_point: complex, - variable: str = "x", - precision: float = 10**-10, - multiplicity: int = 1, -) -> complex: - """Finds root from the 'starting_point' onwards by Newton-Raphson method - Refer to https://docs.sympy.org/latest/modules/functions/index.html - for usable mathematical functions - - >>> newton_raphson("sin(x)", 2) - 3.141592653589793 - >>> newton_raphson("x**4 -5", 0.4 + 5j) - (-7.52316384526264e-37+1.4953487812212207j) - >>> newton_raphson('log(y) - 1', 2, variable='y') - 2.7182818284590455 - >>> newton_raphson('exp(x) - 1', 10, precision=0.005) - 1.2186556186174883e-10 - >>> newton_raphson('cos(x)', 0) - Traceback (most recent call last): - ... - ZeroDivisionError: Could not find root - """ - - x = symbols(variable) - func = lambdify(x, function) - diff_function = lambdify(x, diff(function, x)) - - prev_guess = starting_point - - while True: - if diff_function(prev_guess) != 0: - next_guess = prev_guess - multiplicity * func(prev_guess) / diff_function( - prev_guess - ) - else: - raise ZeroDivisionError("Could not find root") from None - - # Precision is checked by comparing the difference of consecutive guesses - if abs(next_guess - prev_guess) < precision: - return next_guess - - prev_guess = next_guess - - -# Let's Execute -if __name__ == "__main__": - # Find root of trigonometric function - # Find value of pi - print(f"The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}") - - # Find root of polynomial - # Find fourth Root of 5 - print(f"The root of x**4 - 5 = 0 is {newton_raphson('x**4 -5', 0.4 +5j)}") - - # Find value of e - print( - "The root of log(y) - 1 = 0 is ", - f"{newton_raphson('log(y) - 1', 2, variable='y')}", - ) - - # Exponential Roots - print( - "The root of exp(x) - 1 = 0 is", - f"{newton_raphson('exp(x) - 1', 10, precision=0.005)}", - ) - - # Find root of cos(x) - print(f"The root of cos(x) = 0 is {newton_raphson('cos(x)', 0)}") From 6971af2416af051b13f888bebdfefa222c89c15d Mon Sep 17 00:00:00 2001 From: Marek Mazij <112333347+Mrk-Mzj@users.noreply.github.com> Date: Tue, 24 Oct 2023 00:22:09 +0200 Subject: [PATCH 1219/1543] feat: RGB to CMYK color converter (#10741) * feat: code functional, commented, tested * fix: compering types, exception msg, line length * fix: type hints --- conversions/rgb_cmyk_conversion.py | 71 ++++++++++++++++++++++++++++++ 1 file changed, 71 insertions(+) create mode 100644 conversions/rgb_cmyk_conversion.py diff --git a/conversions/rgb_cmyk_conversion.py b/conversions/rgb_cmyk_conversion.py new file mode 100644 index 000000000000..07d65b704c44 --- /dev/null +++ b/conversions/rgb_cmyk_conversion.py @@ -0,0 +1,71 @@ +def rgb_to_cmyk(r_input: int, g_input: int, b_input: int) -> tuple[int, int, int, int]: + """ + Simple RGB to CMYK conversion. Returns percentages of CMYK paint. + https://www.programmingalgorithms.com/algorithm/rgb-to-cmyk/ + + Note: this is a very popular algorithm that converts colors linearly and gives + only approximate results. Actual preparation for printing requires advanced color + conversion considering the color profiles and parameters of the target device. + + >>> rgb_to_cmyk(255, 200, "a") + Traceback (most recent call last): + ... + ValueError: Expected int, found (, , ) + + >>> rgb_to_cmyk(255, 255, 999) + Traceback (most recent call last): + ... + ValueError: Expected int of the range 0..255 + + >>> rgb_to_cmyk(255, 255, 255) # white + (0, 0, 0, 0) + + >>> rgb_to_cmyk(128, 128, 128) # gray + (0, 0, 0, 50) + + >>> rgb_to_cmyk(0, 0, 0) # black + (0, 0, 0, 100) + + >>> rgb_to_cmyk(255, 0, 0) # red + (0, 100, 100, 0) + + >>> rgb_to_cmyk(0, 255, 0) # green + (100, 0, 100, 0) + + >>> rgb_to_cmyk(0, 0, 255) # blue + (100, 100, 0, 0) + """ + + if ( + not isinstance(r_input, int) + or not isinstance(g_input, int) + or not isinstance(b_input, int) + ): + msg = f"Expected int, found {type(r_input), type(g_input), type(b_input)}" + raise ValueError(msg) + + if not 0 <= r_input < 256 or not 0 <= g_input < 256 or not 0 <= b_input < 256: + raise ValueError("Expected int of the range 0..255") + + # changing range from 0..255 to 0..1 + r = r_input / 255 + g = g_input / 255 + b = b_input / 255 + + k = 1 - max(r, g, b) + + if k == 1: # pure black + return 0, 0, 0, 100 + + c = round(100 * (1 - r - k) / (1 - k)) + m = round(100 * (1 - g - k) / (1 - k)) + y = round(100 * (1 - b - k) / (1 - k)) + k = round(100 * k) + + return c, m, y, k + + +if __name__ == "__main__": + from doctest import testmod + + testmod() From 481aff7928b6a352c3cfa49045f0dd390d9d0868 Mon Sep 17 00:00:00 2001 From: Gourav Raj <59208847+gouravrajbit@users.noreply.github.com> Date: Tue, 24 Oct 2023 13:24:38 +0530 Subject: [PATCH 1220/1543] Add `Mirror a Binary Tree` solution (#9534) * Add `Invert a Binary Tree` solution * Add type * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Add `doctest` * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Add `test` to `get_tree_inorder` * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Add `test` changes * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fix lint errors * Fix precommit errors * Update and rename invert_binary_tree.py to mirror_binary_tree.py --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- .../binary_tree/mirror_binary_tree.py | 153 ++++++++++++++++++ 1 file changed, 153 insertions(+) create mode 100644 data_structures/binary_tree/mirror_binary_tree.py diff --git a/data_structures/binary_tree/mirror_binary_tree.py b/data_structures/binary_tree/mirror_binary_tree.py new file mode 100644 index 000000000000..39305c2a9da2 --- /dev/null +++ b/data_structures/binary_tree/mirror_binary_tree.py @@ -0,0 +1,153 @@ +""" +Given the root of a binary tree, mirror the tree, and return its root. + +Leetcode problem reference: https://leetcode.com/problems/mirror-binary-tree/ +""" +from __future__ import annotations + +from collections.abc import Iterator +from dataclasses import dataclass + + +@dataclass +class Node: + """ + A Node has value variable and pointers to Nodes to its left and right. + """ + + value: int + left: Node | None = None + right: Node | None = None + + def __iter__(self) -> Iterator[int]: + if self.left: + yield from self.left + yield self.value + if self.right: + yield from self.right + + def __len__(self) -> int: + return sum(1 for _ in self) + + def mirror(self) -> Node: + """ + Mirror the binary tree rooted at this node by swapping left and right children. + + >>> tree = Node(0) + >>> list(tree) + [0] + >>> list(tree.mirror()) + [0] + >>> tree = Node(1, Node(0), Node(3, Node(2), Node(4, None, Node(5)))) + >>> tuple(tree) + (0, 1, 2, 3, 4, 5) + >>> tuple(tree.mirror()) + (5, 4, 3, 2, 1, 0) + """ + self.left, self.right = self.right, self.left + if self.left: + self.left.mirror() + if self.right: + self.right.mirror() + return self + + +def make_tree_seven() -> Node: + r""" + Return a binary tree with 7 nodes that looks like this: + 1 + / \ + 2 3 + / \ / \ + 4 5 6 7 + + >>> tree_seven = make_tree_seven() + >>> len(tree_seven) + 7 + >>> list(tree_seven) + [4, 2, 5, 1, 6, 3, 7] + """ + tree = Node(1) + tree.left = Node(2) + tree.right = Node(3) + tree.left.left = Node(4) + tree.left.right = Node(5) + tree.right.left = Node(6) + tree.right.right = Node(7) + return tree + + +def make_tree_nine() -> Node: + r""" + Return a binary tree with 9 nodes that looks like this: + 1 + / \ + 2 3 + / \ \ + 4 5 6 + / \ \ + 7 8 9 + + >>> tree_nine = make_tree_nine() + >>> len(tree_nine) + 9 + >>> list(tree_nine) + [7, 4, 8, 2, 5, 9, 1, 3, 6] + """ + tree = Node(1) + tree.left = Node(2) + tree.right = Node(3) + tree.left.left = Node(4) + tree.left.right = Node(5) + tree.right.right = Node(6) + tree.left.left.left = Node(7) + tree.left.left.right = Node(8) + tree.left.right.right = Node(9) + return tree + + +def main() -> None: + r""" + Mirror binary trees with the given root and returns the root + + >>> tree = make_tree_nine() + >>> tuple(tree) + (7, 4, 8, 2, 5, 9, 1, 3, 6) + >>> tuple(tree.mirror()) + (6, 3, 1, 9, 5, 2, 8, 4, 7) + + nine_tree: + 1 + / \ + 2 3 + / \ \ + 4 5 6 + / \ \ + 7 8 9 + + The mirrored tree looks like this: + 1 + / \ + 3 2 + / / \ + 6 5 4 + / / \ + 9 8 7 + """ + trees = {"zero": Node(0), "seven": make_tree_seven(), "nine": make_tree_nine()} + for name, tree in trees.items(): + print(f" The {name} tree: {tuple(tree)}") + # (0,) + # (4, 2, 5, 1, 6, 3, 7) + # (7, 4, 8, 2, 5, 9, 1, 3, 6) + print(f"Mirror of {name} tree: {tuple(tree.mirror())}") + # (0,) + # (7, 3, 6, 1, 5, 2, 4) + # (6, 3, 1, 9, 5, 2, 8, 4, 7) + + +if __name__ == "__main__": + import doctest + + doctest.testmod() + main() From 17059b7ece0e9b2aa0f6e1789d635d6c3eef93ca Mon Sep 17 00:00:00 2001 From: Bhavesh Mathur <130584844+bhavesh1oo@users.noreply.github.com> Date: Tue, 24 Oct 2023 14:33:22 +0530 Subject: [PATCH 1221/1543] Added doctests , type hints for other/nested_brackets.py (#10872) * Added doctests , type hints * Update nested_brackets.py --------- Co-authored-by: Christian Clauss --- other/nested_brackets.py | 69 ++++++++++++++++++++++++++++------------ 1 file changed, 49 insertions(+), 20 deletions(-) diff --git a/other/nested_brackets.py b/other/nested_brackets.py index 19c6dd53c8b2..5760fa29b2fd 100644 --- a/other/nested_brackets.py +++ b/other/nested_brackets.py @@ -3,9 +3,9 @@ brackets are properly nested. A sequence of brackets s is considered properly nested if any of the following conditions are true: - - s is empty - - s has the form (U) or [U] or {U} where U is a properly nested string - - s has the form VW where V and W are properly nested strings + - s is empty + - s has the form (U) or [U] or {U} where U is a properly nested string + - s has the form VW where V and W are properly nested strings For example, the string "()()[()]" is properly nested but "[(()]" is not. @@ -14,31 +14,60 @@ """ -def is_balanced(s): - stack = [] - open_brackets = set({"(", "[", "{"}) - closed_brackets = set({")", "]", "}"}) +def is_balanced(s: str) -> bool: + """ + >>> is_balanced("") + True + >>> is_balanced("()") + True + >>> is_balanced("[]") + True + >>> is_balanced("{}") + True + >>> is_balanced("()[]{}") + True + >>> is_balanced("(())") + True + >>> is_balanced("[[") + False + >>> is_balanced("([{}])") + True + >>> is_balanced("(()[)]") + False + >>> is_balanced("([)]") + False + >>> is_balanced("[[()]]") + True + >>> is_balanced("(()(()))") + True + >>> is_balanced("]") + False + >>> is_balanced("Life is a bowl of cherries.") + True + >>> is_balanced("Life is a bowl of che{}ies.") + True + >>> is_balanced("Life is a bowl of che}{ies.") + False + """ open_to_closed = {"{": "}", "[": "]", "(": ")"} - - for i in range(len(s)): - if s[i] in open_brackets: - stack.append(s[i]) - - elif s[i] in closed_brackets and ( - len(stack) == 0 or (len(stack) > 0 and open_to_closed[stack.pop()] != s[i]) + stack = [] + for symbol in s: + if symbol in open_to_closed: + stack.append(symbol) + elif symbol in open_to_closed.values() and ( + not stack or open_to_closed[stack.pop()] != symbol ): return False - - return len(stack) == 0 + return not stack # stack should be empty def main(): s = input("Enter sequence of brackets: ") - if is_balanced(s): - print(s, "is balanced") - else: - print(s, "is not balanced") + print(f"'{s}' is {'' if is_balanced(s) else 'not '}balanced.") if __name__ == "__main__": + from doctest import testmod + + testmod() main() From eb17fcf8f5e77a6d3c870427db02b258515b4997 Mon Sep 17 00:00:00 2001 From: Christian Clauss Date: Tue, 24 Oct 2023 14:45:36 +0200 Subject: [PATCH 1222/1543] Use dataclasses in circular_linked_list.py (#10884) * Use dataclasses in circular_linked_list.py * updating DIRECTORY.md --------- Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- DIRECTORY.md | 2 + .../linked_list/circular_linked_list.py | 49 ++++++++----------- 2 files changed, 23 insertions(+), 28 deletions(-) diff --git a/DIRECTORY.md b/DIRECTORY.md index f0b1f7c13c2b..5f8eabb6df88 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -157,6 +157,7 @@ * [Prefix Conversions](conversions/prefix_conversions.py) * [Prefix Conversions String](conversions/prefix_conversions_string.py) * [Pressure Conversions](conversions/pressure_conversions.py) + * [Rgb Cmyk Conversion](conversions/rgb_cmyk_conversion.py) * [Rgb Hsv Conversion](conversions/rgb_hsv_conversion.py) * [Roman Numerals](conversions/roman_numerals.py) * [Speed Conversions](conversions/speed_conversions.py) @@ -198,6 +199,7 @@ * [Lowest Common Ancestor](data_structures/binary_tree/lowest_common_ancestor.py) * [Maximum Fenwick Tree](data_structures/binary_tree/maximum_fenwick_tree.py) * [Merge Two Binary Trees](data_structures/binary_tree/merge_two_binary_trees.py) + * [Mirror Binary Tree](data_structures/binary_tree/mirror_binary_tree.py) * [Non Recursive Segment Tree](data_structures/binary_tree/non_recursive_segment_tree.py) * [Number Of Possible Binary Trees](data_structures/binary_tree/number_of_possible_binary_trees.py) * [Red Black Tree](data_structures/binary_tree/red_black_tree.py) diff --git a/data_structures/linked_list/circular_linked_list.py b/data_structures/linked_list/circular_linked_list.py index 54343c80a30f..bb64441d4560 100644 --- a/data_structures/linked_list/circular_linked_list.py +++ b/data_structures/linked_list/circular_linked_list.py @@ -1,27 +1,20 @@ from __future__ import annotations from collections.abc import Iterator +from dataclasses import dataclass from typing import Any +@dataclass class Node: - def __init__(self, data: Any): - """ - Initialize a new Node with the given data. - Args: - data: The data to be stored in the node. - """ - self.data: Any = data - self.next: Node | None = None # Reference to the next node + data: Any + next_node: Node | None = None +@dataclass class CircularLinkedList: - def __init__(self) -> None: - """ - Initialize an empty Circular Linked List. - """ - self.head: Node | None = None # Reference to the head (first node) - self.tail: Node | None = None # Reference to the tail (last node) + head: Node | None = None # Reference to the head (first node) + tail: Node | None = None # Reference to the tail (last node) def __iter__(self) -> Iterator[Any]: """ @@ -32,7 +25,7 @@ def __iter__(self) -> Iterator[Any]: node = self.head while node: yield node.data - node = node.next + node = node.next_node if node == self.head: break @@ -76,20 +69,20 @@ def insert_nth(self, index: int, data: Any) -> None: raise IndexError("list index out of range.") new_node: Node = Node(data) if self.head is None: - new_node.next = new_node # First node points to itself + new_node.next_node = new_node # First node points to itself self.tail = self.head = new_node elif index == 0: # Insert at the head - new_node.next = self.head + new_node.next_node = self.head assert self.tail is not None # List is not empty, tail exists - self.head = self.tail.next = new_node + self.head = self.tail.next_node = new_node else: temp: Node | None = self.head for _ in range(index - 1): assert temp is not None - temp = temp.next + temp = temp.next_node assert temp is not None - new_node.next = temp.next - temp.next = new_node + new_node.next_node = temp.next_node + temp.next_node = new_node if index == len(self) - 1: # Insert at the tail self.tail = new_node @@ -130,18 +123,18 @@ def delete_nth(self, index: int = 0) -> Any: if self.head == self.tail: # Just one node self.head = self.tail = None elif index == 0: # Delete head node - assert self.tail.next is not None - self.tail.next = self.tail.next.next - self.head = self.head.next + assert self.tail.next_node is not None + self.tail.next_node = self.tail.next_node.next_node + self.head = self.head.next_node else: temp: Node | None = self.head for _ in range(index - 1): assert temp is not None - temp = temp.next + temp = temp.next_node assert temp is not None - assert temp.next is not None - delete_node = temp.next - temp.next = temp.next.next + assert temp.next_node is not None + delete_node = temp.next_node + temp.next_node = temp.next_node.next_node if index == len(self) - 1: # Delete at tail self.tail = temp return delete_node.data From a23dd7ecbea89be8f6b3c7fcf214425274db0d02 Mon Sep 17 00:00:00 2001 From: SEIKH NABAB UDDIN <93948993+nababuddin@users.noreply.github.com> Date: Tue, 24 Oct 2023 18:42:32 +0530 Subject: [PATCH 1223/1543] Change from only weatherstack to both (#10882) * Update current_weather.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update current_weather.py * Update current_weather.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update current_weather.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update current_weather.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update current_weather.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update current_weather.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update current_weather.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update current_weather.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update current_weather.py * Update current_weather.py * Update current_weather.py * Update current_weather.py * import requests --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- web_programming/current_weather.py | 64 ++++++++++++++++++++---------- 1 file changed, 42 insertions(+), 22 deletions(-) diff --git a/web_programming/current_weather.py b/web_programming/current_weather.py index 3ed4c8a95a0c..3b6cd177cdfb 100644 --- a/web_programming/current_weather.py +++ b/web_programming/current_weather.py @@ -1,30 +1,50 @@ import requests -APPID = "" # <-- Put your OpenWeatherMap appid here! -URL_BASE = "https://api.openweathermap.org/data/2.5/" - - -def current_weather(q: str = "Chicago", appid: str = APPID) -> dict: - """https://openweathermap.org/api""" - return requests.get(URL_BASE + "weather", params=locals()).json() - - -def weather_forecast(q: str = "Kolkata, India", appid: str = APPID) -> dict: - """https://openweathermap.org/forecast5""" - return requests.get(URL_BASE + "forecast", params=locals()).json() - - -def weather_onecall(lat: float = 55.68, lon: float = 12.57, appid: str = APPID) -> dict: - """https://openweathermap.org/api/one-call-api""" - return requests.get(URL_BASE + "onecall", params=locals()).json() +# Put your API key(s) here +OPENWEATHERMAP_API_KEY = "" +WEATHERSTACK_API_KEY = "" + +# Define the URL for the APIs with placeholders +OPENWEATHERMAP_URL_BASE = "https://api.openweathermap.org/data/2.5/weather" +WEATHERSTACK_URL_BASE = "http://api.weatherstack.com/current" + + +def current_weather(location: str) -> list[dict]: + """ + >>> current_weather("location") + Traceback (most recent call last): + ... + ValueError: No API keys provided or no valid data returned. + """ + weather_data = [] + if OPENWEATHERMAP_API_KEY: + params_openweathermap = {"q": location, "appid": OPENWEATHERMAP_API_KEY} + response_openweathermap = requests.get( + OPENWEATHERMAP_URL_BASE, params=params_openweathermap + ) + weather_data.append({"OpenWeatherMap": response_openweathermap.json()}) + if WEATHERSTACK_API_KEY: + params_weatherstack = {"query": location, "access_key": WEATHERSTACK_API_KEY} + response_weatherstack = requests.get( + WEATHERSTACK_URL_BASE, params=params_weatherstack + ) + weather_data.append({"Weatherstack": response_weatherstack.json()}) + if not weather_data: + raise ValueError("No API keys provided or no valid data returned.") + return weather_data if __name__ == "__main__": from pprint import pprint - while True: - location = input("Enter a location:").strip() + location = "to be determined..." + while location: + location = input("Enter a location (city name or latitude,longitude): ").strip() if location: - pprint(current_weather(location)) - else: - break + try: + weather_data = current_weather(location) + for forecast in weather_data: + pprint(forecast) + except ValueError as e: + print(repr(e)) + location = "" From 28f4c16132170bf1e00d414809aff0c31d043e22 Mon Sep 17 00:00:00 2001 From: Saptadeep Banerjee <69459134+imSanko@users.noreply.github.com> Date: Tue, 24 Oct 2023 19:16:00 +0530 Subject: [PATCH 1224/1543] Tried new TESTS for the binomial_coefficient (#10822) * Tried new TESTS for the binomial_coefficient * Fix the tests request * Update binomial_coefficient.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update binomial_coefficient.py --------- Co-authored-by: Christian Clauss Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- maths/binomial_coefficient.py | 46 +++++++++++++++++++++++++++++++++-- 1 file changed, 44 insertions(+), 2 deletions(-) diff --git a/maths/binomial_coefficient.py b/maths/binomial_coefficient.py index 6d5b46cb5861..24c54326e305 100644 --- a/maths/binomial_coefficient.py +++ b/maths/binomial_coefficient.py @@ -1,10 +1,48 @@ def binomial_coefficient(n: int, r: int) -> int: """ - Find binomial coefficient using pascals triangle. + Find binomial coefficient using Pascal's triangle. + + Calculate C(n, r) using Pascal's triangle. + + :param n: The total number of items. + :param r: The number of items to choose. + :return: The binomial coefficient C(n, r). >>> binomial_coefficient(10, 5) 252 + >>> binomial_coefficient(10, 0) + 1 + >>> binomial_coefficient(0, 10) + 1 + >>> binomial_coefficient(10, 10) + 1 + >>> binomial_coefficient(5, 2) + 10 + >>> binomial_coefficient(5, 6) + 0 + >>> binomial_coefficient(3, 5) + 0 + >>> binomial_coefficient(-2, 3) + Traceback (most recent call last): + ... + ValueError: n and r must be non-negative integers + >>> binomial_coefficient(5, -1) + Traceback (most recent call last): + ... + ValueError: n and r must be non-negative integers + >>> binomial_coefficient(10.1, 5) + Traceback (most recent call last): + ... + TypeError: 'float' object cannot be interpreted as an integer + >>> binomial_coefficient(10, 5.1) + Traceback (most recent call last): + ... + TypeError: 'float' object cannot be interpreted as an integer """ + if n < 0 or r < 0: + raise ValueError("n and r must be non-negative integers") + if 0 in (n, r): + return 1 c = [0 for i in range(r + 1)] # nc0 = 1 c[0] = 1 @@ -17,4 +55,8 @@ def binomial_coefficient(n: int, r: int) -> int: return c[r] -print(binomial_coefficient(n=10, r=5)) +if __name__ == "__main__": + from doctest import testmod + + testmod() + print(binomial_coefficient(n=10, r=5)) From aeee0f42a5684e42cb77b664570dd2d29e04b7c1 Mon Sep 17 00:00:00 2001 From: Hardik Pawar <97388607+Hardvan@users.noreply.github.com> Date: Tue, 24 Oct 2023 20:06:24 +0530 Subject: [PATCH 1225/1543] Add doctests for fractional knapsack (#10891) * Add doctests for fractional knapsack * Update greedy_methods/fractional_knapsack.py Co-authored-by: Christian Clauss * Run doctests * Update greedy_methods/fractional_knapsack.py Co-authored-by: Christian Clauss * Update greedy_methods/fractional_knapsack.py --------- Co-authored-by: Christian Clauss --- greedy_methods/fractional_knapsack.py | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/greedy_methods/fractional_knapsack.py b/greedy_methods/fractional_knapsack.py index 58976d40c02b..d52b56f23569 100644 --- a/greedy_methods/fractional_knapsack.py +++ b/greedy_methods/fractional_knapsack.py @@ -6,6 +6,30 @@ def frac_knapsack(vl, wt, w, n): """ >>> frac_knapsack([60, 100, 120], [10, 20, 30], 50, 3) 240.0 + >>> frac_knapsack([10, 40, 30, 50], [5, 4, 6, 3], 10, 4) + 105.0 + >>> frac_knapsack([10, 40, 30, 50], [5, 4, 6, 3], 8, 4) + 95.0 + >>> frac_knapsack([10, 40, 30, 50], [5, 4, 6], 8, 4) + 60.0 + >>> frac_knapsack([10, 40, 30], [5, 4, 6, 3], 8, 4) + 60.0 + >>> frac_knapsack([10, 40, 30, 50], [5, 4, 6, 3], 0, 4) + 0 + >>> frac_knapsack([10, 40, 30, 50], [5, 4, 6, 3], 8, 0) + 95.0 + >>> frac_knapsack([10, 40, 30, 50], [5, 4, 6, 3], -8, 4) + 0 + >>> frac_knapsack([10, 40, 30, 50], [5, 4, 6, 3], 8, -4) + 95.0 + >>> frac_knapsack([10, 40, 30, 50], [5, 4, 6, 3], 800, 4) + 130 + >>> frac_knapsack([10, 40, 30, 50], [5, 4, 6, 3], 8, 400) + 95.0 + >>> frac_knapsack("ABCD", [5, 4, 6, 3], 8, 400) + Traceback (most recent call last): + ... + TypeError: unsupported operand type(s) for /: 'str' and 'int' """ r = sorted(zip(vl, wt), key=lambda x: x[0] / x[1], reverse=True) From 28302db9417daf769bec3aface9016afabeb5133 Mon Sep 17 00:00:00 2001 From: Dhruv Manilawala Date: Tue, 24 Oct 2023 21:23:17 +0530 Subject: [PATCH 1226/1543] Remove myself from CODEOWNERS (#10220) --- .github/CODEOWNERS | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 05cd709a8f62..a0531cdeec69 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -7,7 +7,7 @@ # Order is important. The last matching pattern has the most precedence. -/.* @cclauss @dhruvmanila +/.* @cclauss # /arithmetic_analysis/ From fd227d802661d4be4babae66075542dc153b4569 Mon Sep 17 00:00:00 2001 From: Hardik Pawar <97388607+Hardvan@users.noreply.github.com> Date: Wed, 25 Oct 2023 03:05:38 +0530 Subject: [PATCH 1227/1543] Add function docstrings, comments and type hints (#10893) * Add function docstrings, comments and type hints * Fix type mismatch * Fix type hint error * Fix float to int error * Update ford_fulkerson.py * Update ford_fulkerson.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update ford_fulkerson.py --------- Co-authored-by: Christian Clauss Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- networking_flow/ford_fulkerson.py | 109 ++++++++++++++++++++++-------- 1 file changed, 80 insertions(+), 29 deletions(-) diff --git a/networking_flow/ford_fulkerson.py b/networking_flow/ford_fulkerson.py index 716ed508e679..7d5fb522e012 100644 --- a/networking_flow/ford_fulkerson.py +++ b/networking_flow/ford_fulkerson.py @@ -1,39 +1,95 @@ -# Ford-Fulkerson Algorithm for Maximum Flow Problem """ +Ford-Fulkerson Algorithm for Maximum Flow Problem +* https://en.wikipedia.org/wiki/Ford%E2%80%93Fulkerson_algorithm + Description: - (1) Start with initial flow as 0; - (2) Choose augmenting path from source to sink and add path to flow; + (1) Start with initial flow as 0 + (2) Choose the augmenting path from source to sink and add the path to flow """ +graph = [ + [0, 16, 13, 0, 0, 0], + [0, 0, 10, 12, 0, 0], + [0, 4, 0, 0, 14, 0], + [0, 0, 9, 0, 0, 20], + [0, 0, 0, 7, 0, 4], + [0, 0, 0, 0, 0, 0], +] + + +def breadth_first_search(graph: list, source: int, sink: int, parents: list) -> bool: + """ + This function returns True if there is a node that has not iterated. + + Args: + graph: Adjacency matrix of graph + source: Source + sink: Sink + parents: Parent list + + Returns: + True if there is a node that has not iterated. + >>> breadth_first_search(graph, 0, 5, [-1, -1, -1, -1, -1, -1]) + True + >>> breadth_first_search(graph, 0, 6, [-1, -1, -1, -1, -1, -1]) + Traceback (most recent call last): + ... + IndexError: list index out of range + """ + visited = [False] * len(graph) # Mark all nodes as not visited + queue = [] # breadth-first search queue -def bfs(graph, s, t, parent): - # Return True if there is node that has not iterated. - visited = [False] * len(graph) - queue = [] - queue.append(s) - visited[s] = True + # Source node + queue.append(source) + visited[source] = True while queue: - u = queue.pop(0) - for ind in range(len(graph[u])): - if visited[ind] is False and graph[u][ind] > 0: + u = queue.pop(0) # Pop the front node + # Traverse all adjacent nodes of u + for ind, node in enumerate(graph[u]): + if visited[ind] is False and node > 0: queue.append(ind) visited[ind] = True - parent[ind] = u + parents[ind] = u + return visited[sink] - return visited[t] +def ford_fulkerson(graph: list, source: int, sink: int) -> int: + """ + This function returns the maximum flow from source to sink in the given graph. -def ford_fulkerson(graph, source, sink): - # This array is filled by BFS and to store path + CAUTION: This function changes the given graph. + + Args: + graph: Adjacency matrix of graph + source: Source + sink: Sink + + Returns: + Maximum flow + + >>> test_graph = [ + ... [0, 16, 13, 0, 0, 0], + ... [0, 0, 10, 12, 0, 0], + ... [0, 4, 0, 0, 14, 0], + ... [0, 0, 9, 0, 0, 20], + ... [0, 0, 0, 7, 0, 4], + ... [0, 0, 0, 0, 0, 0], + ... ] + >>> ford_fulkerson(test_graph, 0, 5) + 23 + """ + # This array is filled by breadth-first search and to store path parent = [-1] * (len(graph)) max_flow = 0 - while bfs(graph, source, sink, parent): - path_flow = float("Inf") + + # While there is a path from source to sink + while breadth_first_search(graph, source, sink, parent): + path_flow = int(1e9) # Infinite value s = sink while s != source: - # Find the minimum value in select path + # Find the minimum value in the selected path path_flow = min(path_flow, graph[parent[s]][s]) s = parent[s] @@ -45,17 +101,12 @@ def ford_fulkerson(graph, source, sink): graph[u][v] -= path_flow graph[v][u] += path_flow v = parent[v] + return max_flow -graph = [ - [0, 16, 13, 0, 0, 0], - [0, 0, 10, 12, 0, 0], - [0, 4, 0, 0, 14, 0], - [0, 0, 9, 0, 0, 20], - [0, 0, 0, 7, 0, 4], - [0, 0, 0, 0, 0, 0], -] +if __name__ == "__main__": + from doctest import testmod -source, sink = 0, 5 -print(ford_fulkerson(graph, source, sink)) + testmod() + print(f"{ford_fulkerson(graph, source=0, sink=5) = }") From dab4e648965a92a7f73aa5fe6ad8b8afc0fde7f9 Mon Sep 17 00:00:00 2001 From: Bisma nadeem <130698042+Bisma-Nadeemm@users.noreply.github.com> Date: Wed, 25 Oct 2023 02:51:04 +0500 Subject: [PATCH 1228/1543] Code enhancements in binary_insertion_sort.py (#10918) * Code enhancements in binary_insertion_sort.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Apply suggestions from code review --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- sorts/binary_insertion_sort.py | 25 +++++++++++++++---------- 1 file changed, 15 insertions(+), 10 deletions(-) diff --git a/sorts/binary_insertion_sort.py b/sorts/binary_insertion_sort.py index 8d41025583b1..50653a99e7ce 100644 --- a/sorts/binary_insertion_sort.py +++ b/sorts/binary_insertion_sort.py @@ -12,10 +12,11 @@ def binary_insertion_sort(collection: list) -> list: - """Pure implementation of the binary insertion sort algorithm in Python - :param collection: some mutable ordered collection with heterogeneous - comparable items inside - :return: the same collection ordered by ascending + """ + Sorts a list using the binary insertion sort algorithm. + + :param collection: A mutable ordered collection with comparable items. + :return: The same collection ordered in ascending order. Examples: >>> binary_insertion_sort([0, 4, 1234, 4, 1]) @@ -39,23 +40,27 @@ def binary_insertion_sort(collection: list) -> list: n = len(collection) for i in range(1, n): - val = collection[i] + value_to_insert = collection[i] low = 0 high = i - 1 while low <= high: mid = (low + high) // 2 - if val < collection[mid]: + if value_to_insert < collection[mid]: high = mid - 1 else: low = mid + 1 for j in range(i, low, -1): collection[j] = collection[j - 1] - collection[low] = val + collection[low] = value_to_insert return collection -if __name__ == "__main__": +if __name__ == "__main": user_input = input("Enter numbers separated by a comma:\n").strip() - unsorted = [int(item) for item in user_input.split(",")] - print(binary_insertion_sort(unsorted)) + try: + unsorted = [int(item) for item in user_input.split(",")] + except ValueError: + print("Invalid input. Please enter valid integers separated by commas.") + raise + print(f"{binary_insertion_sort(unsorted) = }") From 76acc6de607eebdc0d0d5c68396030d8e240a6ea Mon Sep 17 00:00:00 2001 From: Iyiola Aloko <48067557+ialoko@users.noreply.github.com> Date: Tue, 24 Oct 2023 17:53:31 -0400 Subject: [PATCH 1229/1543] Adding doctests to frequency_finder.py (#10341) * Update frequency_finder.py * Update frequency_finder.py --------- Co-authored-by: Christian Clauss --- strings/frequency_finder.py | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/strings/frequency_finder.py b/strings/frequency_finder.py index 19f97afbbe37..8479c81ae464 100644 --- a/strings/frequency_finder.py +++ b/strings/frequency_finder.py @@ -49,6 +49,15 @@ def get_item_at_index_zero(x: tuple) -> str: def get_frequency_order(message: str) -> str: + """ + Get the frequency order of the letters in the given string + >>> get_frequency_order('Hello World') + 'LOWDRHEZQXJKVBPYGFMUCSNIAT' + >>> get_frequency_order('Hello@') + 'LHOEZQXJKVBPYGFWMUCDRSNIAT' + >>> get_frequency_order('h') + 'HZQXJKVBPYGFWMUCLDRSNIOATE' + """ letter_to_freq = get_letter_count(message) freq_to_letter: dict[int, list[str]] = { freq: [] for letter, freq in letter_to_freq.items() From c2c6cb0f5c46346cab99121d236b2f5748e3c1df Mon Sep 17 00:00:00 2001 From: Christian Clauss Date: Wed, 25 Oct 2023 22:28:23 +0200 Subject: [PATCH 1230/1543] Add dataclasses to binary_search_tree.py (#10920) --- .../binary_tree/binary_search_tree.py | 69 ++++++++++++++++--- 1 file changed, 60 insertions(+), 9 deletions(-) diff --git a/data_structures/binary_tree/binary_search_tree.py b/data_structures/binary_tree/binary_search_tree.py index a706d21e3bb2..38691c4755c9 100644 --- a/data_structures/binary_tree/binary_search_tree.py +++ b/data_structures/binary_tree/binary_search_tree.py @@ -14,6 +14,16 @@ >>> t.insert(8, 3, 6, 1, 10, 14, 13, 4, 7) >>> print(" ".join(repr(i.value) for i in t.traversal_tree())) 8 3 1 6 4 7 10 14 13 + +>>> tuple(i.value for i in t.traversal_tree(inorder)) +(1, 3, 4, 6, 7, 8, 10, 13, 14) +>>> tuple(t) +(1, 3, 4, 6, 7, 8, 10, 13, 14) +>>> t.find_kth_smallest(3, t.root) +4 +>>> tuple(t)[3-1] +4 + >>> print(" ".join(repr(i.value) for i in t.traversal_tree(postorder))) 1 4 7 6 3 13 14 10 8 >>> t.remove(20) @@ -39,8 +49,12 @@ Test existence >>> t.search(6) is not None True +>>> 6 in t +True >>> t.search(-1) is not None False +>>> -1 in t +False >>> t.search(6).is_right True @@ -49,26 +63,47 @@ >>> t.get_max().value 14 +>>> max(t) +14 >>> t.get_min().value 1 +>>> min(t) +1 >>> t.empty() False +>>> not t +False >>> for i in testlist: ... t.remove(i) >>> t.empty() True +>>> not t +True """ +from __future__ import annotations -from collections.abc import Iterable +from collections.abc import Iterable, Iterator +from dataclasses import dataclass from typing import Any +@dataclass class Node: - def __init__(self, value: int | None = None): - self.value = value - self.parent: Node | None = None # Added in order to delete a node easier - self.left: Node | None = None - self.right: Node | None = None + value: int + left: Node | None = None + right: Node | None = None + parent: Node | None = None # Added in order to delete a node easier + + def __iter__(self) -> Iterator[int]: + """ + >>> list(Node(0)) + [0] + >>> list(Node(0, Node(-1), Node(1), None)) + [-1, 0, 1] + """ + yield from self.left or [] + yield self.value + yield from self.right or [] def __repr__(self) -> str: from pprint import pformat @@ -79,12 +114,18 @@ def __repr__(self) -> str: @property def is_right(self) -> bool: - return self.parent is not None and self is self.parent.right + return bool(self.parent and self is self.parent.right) +@dataclass class BinarySearchTree: - def __init__(self, root: Node | None = None): - self.root = root + root: Node | None = None + + def __bool__(self) -> bool: + return bool(self.root) + + def __iter__(self) -> Iterator[int]: + yield from self.root or [] def __str__(self) -> str: """ @@ -227,6 +268,16 @@ def find_kth_smallest(self, k: int, node: Node) -> int: return arr[k - 1] +def inorder(curr_node: Node | None) -> list[Node]: + """ + inorder (left, self, right) + """ + node_list = [] + if curr_node is not None: + node_list = inorder(curr_node.left) + [curr_node] + inorder(curr_node.right) + return node_list + + def postorder(curr_node: Node | None) -> list[Node]: """ postOrder (left, right, self) From 3d0a409ce119e1b7734ebaa2ffea660f5359080b Mon Sep 17 00:00:00 2001 From: AdityaAtoZ <129844626+AdityaAtoZ@users.noreply.github.com> Date: Thu, 26 Oct 2023 02:03:35 +0530 Subject: [PATCH 1231/1543] Improved Equilibrium Index of an Array. (#10899) * Improved Equilibrium Index of an Array. This is the modifications made to the original code: 1. Create Doctest Instructions: Python "doctest" can be executed by running the following command: python -m doctest -v equilibrium_index.py. 2. Deleted Argument {size}: Deleted the `size` argument because `len(arr)} allows the array's length to be determined inside the function, simplifying and improving the readability of the function signature. 3. Used {enumerate}: To improve code readability and indicate that we're working with element-index pairs, we iterated through the array using both elements and their indices using the `enumerate` function. 4. Optimized the Loop: To prevent pointless additions, the loop was improved by initializing {left_sum} with the value of the first element (arr[0]). Furthermore, since the beginning and last items (0 and size - 1) cannot be equilibrium indices, there is no need to check them, saving further computations. * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .../arrays/equilibrium_index_in_array.py | 21 +++++++++---------- 1 file changed, 10 insertions(+), 11 deletions(-) diff --git a/data_structures/arrays/equilibrium_index_in_array.py b/data_structures/arrays/equilibrium_index_in_array.py index 4099896d226d..8802db6206bb 100644 --- a/data_structures/arrays/equilibrium_index_in_array.py +++ b/data_structures/arrays/equilibrium_index_in_array.py @@ -2,7 +2,7 @@ Find the Equilibrium Index of an Array. Reference: https://www.geeksforgeeks.org/equilibrium-index-of-an-array/ -Python doctests can be run with the following command: +Python doctest can be run with the following command: python -m doctest -v equilibrium_index.py Given a sequence arr[] of size n, this function returns @@ -20,35 +20,34 @@ """ -def equilibrium_index(arr: list[int], size: int) -> int: +def equilibrium_index(arr: list[int]) -> int: """ Find the equilibrium index of an array. Args: - arr : The input array of integers. - size : The size of the array. + arr (list[int]): The input array of integers. Returns: int: The equilibrium index or -1 if no equilibrium index exists. Examples: - >>> equilibrium_index([-7, 1, 5, 2, -4, 3, 0], 7) + >>> equilibrium_index([-7, 1, 5, 2, -4, 3, 0]) 3 - >>> equilibrium_index([1, 2, 3, 4, 5], 5) + >>> equilibrium_index([1, 2, 3, 4, 5]) -1 - >>> equilibrium_index([1, 1, 1, 1, 1], 5) + >>> equilibrium_index([1, 1, 1, 1, 1]) 2 - >>> equilibrium_index([2, 4, 6, 8, 10, 3], 6) + >>> equilibrium_index([2, 4, 6, 8, 10, 3]) -1 """ total_sum = sum(arr) left_sum = 0 - for i in range(size): - total_sum -= arr[i] + for i, value in enumerate(arr): + total_sum -= value if left_sum == total_sum: return i - left_sum += arr[i] + left_sum += value return -1 From e1e5963812c3f59a60181307bccf15792ad2406c Mon Sep 17 00:00:00 2001 From: Bisma nadeem <130698042+Bisma-Nadeemm@users.noreply.github.com> Date: Thu, 26 Oct 2023 02:26:54 +0500 Subject: [PATCH 1232/1543] Code Enhancements in merge_sort.py (#10911) * Code Enhancements in merge_sort.py This enhanced code includes improved variable naming, error handling for user input, and more detailed docstrings. It's now more robust and readable. * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- sorts/merge_sort.py | 47 ++++++++++++++++++++++++++------------------- 1 file changed, 27 insertions(+), 20 deletions(-) diff --git a/sorts/merge_sort.py b/sorts/merge_sort.py index e80b1cb226ec..0628b848b794 100644 --- a/sorts/merge_sort.py +++ b/sorts/merge_sort.py @@ -12,9 +12,13 @@ def merge_sort(collection: list) -> list: """ - :param collection: some mutable ordered collection with heterogeneous - comparable items inside - :return: the same collection ordered by ascending + Sorts a list using the merge sort algorithm. + + :param collection: A mutable ordered collection with comparable items. + :return: The same collection ordered in ascending order. + + Time Complexity: O(n log n) + Examples: >>> merge_sort([0, 5, 3, 2, 2]) [0, 2, 2, 3, 5] @@ -26,31 +30,34 @@ def merge_sort(collection: list) -> list: def merge(left: list, right: list) -> list: """ - Merge left and right. + Merge two sorted lists into a single sorted list. - :param left: left collection - :param right: right collection - :return: merge result + :param left: Left collection + :param right: Right collection + :return: Merged result """ - - def _merge(): - while left and right: - yield (left if left[0] <= right[0] else right).pop(0) - yield from left - yield from right - - return list(_merge()) + result = [] + while left and right: + result.append(left.pop(0) if left[0] <= right[0] else right.pop(0)) + result.extend(left) + result.extend(right) + return result if len(collection) <= 1: return collection - mid = len(collection) // 2 - return merge(merge_sort(collection[:mid]), merge_sort(collection[mid:])) + mid_index = len(collection) // 2 + return merge(merge_sort(collection[:mid_index]), merge_sort(collection[mid_index:])) if __name__ == "__main__": import doctest doctest.testmod() - user_input = input("Enter numbers separated by a comma:\n").strip() - unsorted = [int(item) for item in user_input.split(",")] - print(*merge_sort(unsorted), sep=",") + + try: + user_input = input("Enter numbers separated by a comma:\n").strip() + unsorted = [int(item) for item in user_input.split(",")] + sorted_list = merge_sort(unsorted) + print(*sorted_list, sep=",") + except ValueError: + print("Invalid input. Please enter valid integers separated by commas.") From 0ffe506ea79fcd9820a6c9bf3194a3bfcd677b57 Mon Sep 17 00:00:00 2001 From: Humzafazal72 <125209604+Humzafazal72@users.noreply.github.com> Date: Thu, 26 Oct 2023 04:05:35 +0500 Subject: [PATCH 1233/1543] added mean absolute percentage error (#10464) * added mean absolute percentage error * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * added mean_absolute_percentage_error * added mean_absolute_percentage_error * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * added mean_absolute_percentage_error * added mean_absolute_percentage_error * added mean absolute percentage error * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * added mean absolute percentage error * added mean absolute percentage error * added mean absolute percentage error * added mean absolute percentage error * added mean absolute percentage error * Update machine_learning/loss_functions.py --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Tianyi Zheng --- machine_learning/loss_functions.py | 45 ++++++++++++++++++++++++++++++ 1 file changed, 45 insertions(+) diff --git a/machine_learning/loss_functions.py b/machine_learning/loss_functions.py index ef34296360e2..e5b7a713b6f2 100644 --- a/machine_learning/loss_functions.py +++ b/machine_learning/loss_functions.py @@ -297,6 +297,51 @@ def mean_squared_logarithmic_error(y_true: np.ndarray, y_pred: np.ndarray) -> fl return np.mean(squared_logarithmic_errors) +def mean_absolute_percentage_error( + y_true: np.ndarray, y_pred: np.ndarray, epsilon: float = 1e-15 +) -> float: + """ + Calculate the Mean Absolute Percentage Error between y_true and y_pred. + + Mean Absolute Percentage Error calculates the average of the absolute + percentage differences between the predicted and true values. + + Formula = (Σ|y_true[i]-Y_pred[i]/y_true[i]|)/n + + Source: https://stephenallwright.com/good-mape-score/ + + Parameters: + y_true (np.ndarray): Numpy array containing true/target values. + y_pred (np.ndarray): Numpy array containing predicted values. + + Returns: + float: The Mean Absolute Percentage error between y_true and y_pred. + + Examples: + >>> y_true = np.array([10, 20, 30, 40]) + >>> y_pred = np.array([12, 18, 33, 45]) + >>> mean_absolute_percentage_error(y_true, y_pred) + 0.13125 + + >>> y_true = np.array([1, 2, 3, 4]) + >>> y_pred = np.array([2, 3, 4, 5]) + >>> mean_absolute_percentage_error(y_true, y_pred) + 0.5208333333333333 + + >>> y_true = np.array([34, 37, 44, 47, 48, 48, 46, 43, 32, 27, 26, 24]) + >>> y_pred = np.array([37, 40, 46, 44, 46, 50, 45, 44, 34, 30, 22, 23]) + >>> mean_absolute_percentage_error(y_true, y_pred) + 0.064671076436071 + """ + if len(y_true) != len(y_pred): + raise ValueError("The length of the two arrays should be the same.") + + y_true = np.where(y_true == 0, epsilon, y_true) + absolute_percentage_diff = np.abs((y_true - y_pred) / y_true) + + return np.mean(absolute_percentage_diff) + + if __name__ == "__main__": import doctest From 0e7f8284a32286534691e437d67405b6a09b10e1 Mon Sep 17 00:00:00 2001 From: Dale Dai <145884899+CouldNot@users.noreply.github.com> Date: Wed, 25 Oct 2023 22:27:46 -0700 Subject: [PATCH 1234/1543] Add error tests in doctest and fix error message (#10930) * Add error tests in doctest and fix error message * Change AssertationError to ValueError * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- maths/prime_check.py | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-) diff --git a/maths/prime_check.py b/maths/prime_check.py index c17877a57705..f1bc4def2469 100644 --- a/maths/prime_check.py +++ b/maths/prime_check.py @@ -29,12 +29,19 @@ def is_prime(number: int) -> bool: True >>> is_prime(67483) False + >>> is_prime(16.1) + Traceback (most recent call last): + ... + ValueError: is_prime() only accepts positive integers + >>> is_prime(-4) + Traceback (most recent call last): + ... + ValueError: is_prime() only accepts positive integers """ # precondition - assert isinstance(number, int) and ( - number >= 0 - ), "'number' must been an int and positive" + if not isinstance(number, int) or not number >= 0: + raise ValueError("is_prime() only accepts positive integers") if 1 < number < 4: # 2 and 3 are primes @@ -64,7 +71,7 @@ def test_primes(self): assert is_prime(29) def test_not_primes(self): - with pytest.raises(AssertionError): + with pytest.raises(ValueError): is_prime(-19) assert not is_prime( 0 From 1a5d5cf93d30fc123af680ee9c58eb955932972b Mon Sep 17 00:00:00 2001 From: Megan Payne Date: Thu, 26 Oct 2023 07:31:47 +0200 Subject: [PATCH 1235/1543] Mean absolute error (#10927) * added mean absolute error to loss_functions.py * added doctest to mean absolute error to loss_functions.py * fixed long line in loss_functions.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fixed error in MAE * Update machine_learning/loss_functions.py Co-authored-by: Tianyi Zheng --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Tianyi Zheng --- machine_learning/loss_functions.py | 37 ++++++++++++++++++++++++++++++ 1 file changed, 37 insertions(+) diff --git a/machine_learning/loss_functions.py b/machine_learning/loss_functions.py index e5b7a713b6f2..ea1f390e358a 100644 --- a/machine_learning/loss_functions.py +++ b/machine_learning/loss_functions.py @@ -261,6 +261,43 @@ def mean_squared_error(y_true: np.ndarray, y_pred: np.ndarray) -> float: return np.mean(squared_errors) +def mean_absolute_error(y_true: np.ndarray, y_pred: np.ndarray) -> float: + """ + Calculates the Mean Absolute Error (MAE) between ground truth (observed) + and predicted values. + + MAE measures the absolute difference between true values and predicted values. + + Equation: + MAE = (1/n) * Σ(abs(y_true - y_pred)) + + Reference: https://en.wikipedia.org/wiki/Mean_absolute_error + + Parameters: + - y_true: The true values (ground truth) + - y_pred: The predicted values + + >>> true_values = np.array([1.0, 2.0, 3.0, 4.0, 5.0]) + >>> predicted_values = np.array([0.8, 2.1, 2.9, 4.2, 5.2]) + >>> np.isclose(mean_absolute_error(true_values, predicted_values), 0.16) + True + >>> true_values = np.array([1.0, 2.0, 3.0, 4.0, 5.0]) + >>> predicted_values = np.array([0.8, 2.1, 2.9, 4.2, 5.2]) + >>> np.isclose(mean_absolute_error(true_values, predicted_values), 2.16) + False + >>> true_labels = np.array([1.0, 2.0, 3.0, 4.0, 5.0]) + >>> predicted_probs = np.array([0.3, 0.8, 0.9, 5.2]) + >>> mean_absolute_error(true_labels, predicted_probs) + Traceback (most recent call last): + ... + ValueError: Input arrays must have the same length. + """ + if len(y_true) != len(y_pred): + raise ValueError("Input arrays must have the same length.") + + return np.mean(abs(y_true - y_pred)) + + def mean_squared_logarithmic_error(y_true: np.ndarray, y_pred: np.ndarray) -> float: """ Calculate the mean squared logarithmic error (MSLE) between ground truth and From a8f05fe0a5d8b7e88d99c160b177ff3f3f07edcc Mon Sep 17 00:00:00 2001 From: Ed Date: Thu, 26 Oct 2023 00:02:35 -0700 Subject: [PATCH 1236/1543] Add doctests and type hints (#10974) * Add doctests and type hints * Apply suggestions from code review * Update tarjans_scc.py * Update tarjans_scc.py --------- Co-authored-by: Tianyi Zheng --- graphs/tarjans_scc.py | 35 +++++++++++++++++++++++++++++------ 1 file changed, 29 insertions(+), 6 deletions(-) diff --git a/graphs/tarjans_scc.py b/graphs/tarjans_scc.py index dfd2e52704d5..a75dc4d2ca95 100644 --- a/graphs/tarjans_scc.py +++ b/graphs/tarjans_scc.py @@ -1,7 +1,7 @@ from collections import deque -def tarjan(g): +def tarjan(g: list[list[int]]) -> list[list[int]]: """ Tarjan's algo for finding strongly connected components in a directed graph @@ -19,15 +19,30 @@ def tarjan(g): Complexity: strong_connect() is called at most once for each node and has a complexity of O(|E|) as it is DFS. Therefore this has complexity O(|V| + |E|) for a graph G = (V, E) + + >>> tarjan([[2, 3, 4], [2, 3, 4], [0, 1, 3], [0, 1, 2], [1]]) + [[4, 3, 1, 2, 0]] + >>> tarjan([[], [], [], []]) + [[0], [1], [2], [3]] + >>> a = [0, 1, 2, 3, 4, 5, 4] + >>> b = [1, 0, 3, 2, 5, 4, 0] + >>> n = 7 + >>> sorted(tarjan(create_graph(n, list(zip(a, b))))) == sorted( + ... tarjan(create_graph(n, list(zip(a[::-1], b[::-1]))))) + True + >>> a = [0, 1, 2, 3, 4, 5, 6] + >>> b = [0, 1, 2, 3, 4, 5, 6] + >>> sorted(tarjan(create_graph(n, list(zip(a, b))))) + [[0], [1], [2], [3], [4], [5], [6]] """ n = len(g) - stack = deque() + stack: deque[int] = deque() on_stack = [False for _ in range(n)] index_of = [-1 for _ in range(n)] lowlink_of = index_of[:] - def strong_connect(v, index, components): + def strong_connect(v: int, index: int, components: list[list[int]]) -> int: index_of[v] = index # the number when this node is seen lowlink_of[v] = index # lowest rank node reachable from here index += 1 @@ -57,7 +72,7 @@ def strong_connect(v, index, components): components.append(component) return index - components = [] + components: list[list[int]] = [] for v in range(n): if index_of[v] == -1: strong_connect(v, 0, components) @@ -65,8 +80,16 @@ def strong_connect(v, index, components): return components -def create_graph(n, edges): - g = [[] for _ in range(n)] +def create_graph(n: int, edges: list[tuple[int, int]]) -> list[list[int]]: + """ + >>> n = 7 + >>> source = [0, 0, 1, 2, 3, 3, 4, 4, 6] + >>> target = [1, 3, 2, 0, 1, 4, 5, 6, 5] + >>> edges = list(zip(source, target)) + >>> create_graph(n, edges) + [[1, 3], [2], [0], [1, 4], [5, 6], [], [5]] + """ + g: list[list[int]] = [[] for _ in range(n)] for u, v in edges: g[u].append(v) return g From c71c280726fb4e9487833993042e54598fe94fd9 Mon Sep 17 00:00:00 2001 From: Ravi Kumar <119737193+ravi-ivar-7@users.noreply.github.com> Date: Thu, 26 Oct 2023 12:50:28 +0530 Subject: [PATCH 1237/1543] added runge kutta gills method to maths/ numerical_analysis (#10967) * added runge kutta gills method * Apply suggestions from code review --------- Co-authored-by: Tianyi Zheng --- maths/numerical_analysis/runge_kutta_gills.py | 89 +++++++++++++++++++ 1 file changed, 89 insertions(+) create mode 100644 maths/numerical_analysis/runge_kutta_gills.py diff --git a/maths/numerical_analysis/runge_kutta_gills.py b/maths/numerical_analysis/runge_kutta_gills.py new file mode 100644 index 000000000000..2bd9cd6129b8 --- /dev/null +++ b/maths/numerical_analysis/runge_kutta_gills.py @@ -0,0 +1,89 @@ +""" +Use the Runge-Kutta-Gill's method of order 4 to solve Ordinary Differential Equations. + +https://www.geeksforgeeks.org/gills-4th-order-method-to-solve-differential-equations/ +Author : Ravi Kumar +""" +from collections.abc import Callable +from math import sqrt + +import numpy as np + + +def runge_kutta_gills( + func: Callable[[float, float], float], + x_initial: float, + y_initial: float, + step_size: float, + x_final: float, +) -> np.ndarray: + """ + Solve an Ordinary Differential Equations using Runge-Kutta-Gills Method of order 4. + + args: + func: An ordinary differential equation (ODE) as function of x and y. + x_initial: The initial value of x. + y_initial: The initial value of y. + step_size: The increment value of x. + x_final: The final value of x. + + Returns: + Solution of y at each nodal point + + >>> def f(x, y): + ... return (x-y)/2 + >>> y = runge_kutta_gills(f, 0, 3, 0.2, 5) + >>> y[-1] + 3.4104259225717537 + + >>> def f(x,y): + ... return x + >>> y = runge_kutta_gills(f, -1, 0, 0.2, 0) + >>> y + array([ 0. , -0.18, -0.32, -0.42, -0.48, -0.5 ]) + + >>> def f(x, y): + ... return x + y + >>> y = runge_kutta_gills(f, 0, 0, 0.2, -1) + Traceback (most recent call last): + ... + ValueError: The final value of x must be greater than initial value of x. + + >>> def f(x, y): + ... return x + >>> y = runge_kutta_gills(f, -1, 0, -0.2, 0) + Traceback (most recent call last): + ... + ValueError: Step size must be positive. + """ + if x_initial >= x_final: + raise ValueError( + "The final value of x must be greater than initial value of x." + ) + + if step_size <= 0: + raise ValueError("Step size must be positive.") + + n = int((x_final - x_initial) / step_size) + y = np.zeros(n + 1) + y[0] = y_initial + for i in range(n): + k1 = step_size * func(x_initial, y[i]) + k2 = step_size * func(x_initial + step_size / 2, y[i] + k1 / 2) + k3 = step_size * func( + x_initial + step_size / 2, + y[i] + (-0.5 + 1 / sqrt(2)) * k1 + (1 - 1 / sqrt(2)) * k2, + ) + k4 = step_size * func( + x_initial + step_size, y[i] - (1 / sqrt(2)) * k2 + (1 + 1 / sqrt(2)) * k3 + ) + + y[i + 1] = y[i] + (k1 + (2 - sqrt(2)) * k2 + (2 + sqrt(2)) * k3 + k4) / 6 + x_initial += step_size + return y + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From dd7d18d49e9edc635f692b1f3db933e8ea717023 Mon Sep 17 00:00:00 2001 From: Suyash Dongre <109069262+Suyashd999@users.noreply.github.com> Date: Thu, 26 Oct 2023 13:25:56 +0530 Subject: [PATCH 1238/1543] Added doctest, docstring and typehint for sigmoid_function & cost_function (#10828) * Added doctest for sigmoid_function & cost_function * Update logistic_regression.py * Update logistic_regression.py * Minor formatting changes in doctests * Apply suggestions from code review * Made requested changes in logistic_regression.py * Apply suggestions from code review --------- Co-authored-by: Tianyi Zheng --- machine_learning/logistic_regression.py | 60 ++++++++++++++++++++++++- 1 file changed, 58 insertions(+), 2 deletions(-) diff --git a/machine_learning/logistic_regression.py b/machine_learning/logistic_regression.py index f9da0104ab4b..59a70fd65cf9 100644 --- a/machine_learning/logistic_regression.py +++ b/machine_learning/logistic_regression.py @@ -27,7 +27,7 @@ # classification problems -def sigmoid_function(z): +def sigmoid_function(z: float | np.ndarray) -> float | np.ndarray: """ Also known as Logistic Function. @@ -42,11 +42,63 @@ def sigmoid_function(z): @param z: input to the function @returns: returns value in the range 0 to 1 + + Examples: + >>> sigmoid_function(4) + 0.9820137900379085 + >>> sigmoid_function(np.array([-3, 3])) + array([0.04742587, 0.95257413]) + >>> sigmoid_function(np.array([-3, 3, 1])) + array([0.04742587, 0.95257413, 0.73105858]) + >>> sigmoid_function(np.array([-0.01, -2, -1.9])) + array([0.49750002, 0.11920292, 0.13010847]) + >>> sigmoid_function(np.array([-1.3, 5.3, 12])) + array([0.21416502, 0.9950332 , 0.99999386]) + >>> sigmoid_function(np.array([0.01, 0.02, 4.1])) + array([0.50249998, 0.50499983, 0.9836975 ]) + >>> sigmoid_function(np.array([0.8])) + array([0.68997448]) """ return 1 / (1 + np.exp(-z)) -def cost_function(h, y): +def cost_function(h: np.ndarray, y: np.ndarray) -> float: + """ + Cost function quantifies the error between predicted and expected values. + The cost function used in Logistic Regression is called Log Loss + or Cross Entropy Function. + + J(θ) = (1/m) * Σ [ -y * log(hθ(x)) - (1 - y) * log(1 - hθ(x)) ] + + Where: + - J(θ) is the cost that we want to minimize during training + - m is the number of training examples + - Σ represents the summation over all training examples + - y is the actual binary label (0 or 1) for a given example + - hθ(x) is the predicted probability that x belongs to the positive class + + @param h: the output of sigmoid function. It is the estimated probability + that the input example 'x' belongs to the positive class + + @param y: the actual binary label associated with input example 'x' + + Examples: + >>> estimations = sigmoid_function(np.array([0.3, -4.3, 8.1])) + >>> cost_function(h=estimations,y=np.array([1, 0, 1])) + 0.18937868932131605 + >>> estimations = sigmoid_function(np.array([4, 3, 1])) + >>> cost_function(h=estimations,y=np.array([1, 0, 0])) + 1.459999655669926 + >>> estimations = sigmoid_function(np.array([4, -3, -1])) + >>> cost_function(h=estimations,y=np.array([1,0,0])) + 0.1266663223365915 + >>> estimations = sigmoid_function(0) + >>> cost_function(h=estimations,y=np.array([1])) + 0.6931471805599453 + + References: + - https://en.wikipedia.org/wiki/Logistic_regression + """ return (-y * np.log(h) - (1 - y) * np.log(1 - h)).mean() @@ -75,6 +127,10 @@ def logistic_reg(alpha, x, y, max_iterations=70000): # In[68]: if __name__ == "__main__": + import doctest + + doctest.testmod() + iris = datasets.load_iris() x = iris.data[:, :2] y = (iris.target != 0) * 1 From e5a6a97c3277fbf849b77d1328720782128ecafd Mon Sep 17 00:00:00 2001 From: Sanjay <146640686+san-jay-14@users.noreply.github.com> Date: Thu, 26 Oct 2023 13:28:40 +0530 Subject: [PATCH 1239/1543] Added Lens formulae to the Physics repository (#10187) * Added Lens formulae to the Physics repository * Resolved the commented issues * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update lens_formulae.py --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Tianyi Zheng --- physics/lens_formulae.py | 131 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 131 insertions(+) create mode 100644 physics/lens_formulae.py diff --git a/physics/lens_formulae.py b/physics/lens_formulae.py new file mode 100644 index 000000000000..162f3a8f3b29 --- /dev/null +++ b/physics/lens_formulae.py @@ -0,0 +1,131 @@ +""" +This module has functions which calculate focal length of lens, distance of +image from the lens and distance of object from the lens. +The above is calculated using the lens formula. + +In optics, the relationship between the distance of the image (v), +the distance of the object (u), and +the focal length (f) of the lens is given by the formula known as the Lens formula. +The Lens formula is applicable for convex as well as concave lenses. The formula +is given as follows: + +------------------- +| 1/f = 1/v + 1/u | +------------------- + +Where + f = focal length of the lens in meters. + v = distance of the image from the lens in meters. + u = distance of the object from the lens in meters. + +To make our calculations easy few assumptions are made while deriving the formula +which are important to keep in mind before solving this equation. +The assumptions are as follows: + 1. The object O is a point object lying somewhere on the principle axis. + 2. The lens is thin. + 3. The aperture of the lens taken must be small. + 4. The angles of incidence and angle of refraction should be small. + +Sign convention is a set of rules to set signs for image distance, object distance, +focal length, etc +for mathematical analysis of image formation. According to it: + 1. Object is always placed to the left of lens. + 2. All distances are measured from the optical centre of the mirror. + 3. Distances measured in the direction of the incident ray are positive and + the distances measured in the direction opposite + to that of the incident rays are negative. + 4. Distances measured along y-axis above the principal axis are positive and + that measured along y-axis below the principal + axis are negative. + +Note: Sign convention can be reversed and will still give the correct results. + +Reference for Sign convention: +https://www.toppr.com/ask/content/concept/sign-convention-for-lenses-210246/ + +Reference for assumptions: +https://testbook.com/physics/derivation-of-lens-maker-formula +""" + + +def focal_length_of_lens( + object_distance_from_lens: float, image_distance_from_lens: float +) -> float: + """ + Doctests: + >>> from math import isclose + >>> isclose(focal_length_of_lens(10,4), 6.666666666666667) + True + >>> from math import isclose + >>> isclose(focal_length_of_lens(2.7,5.8), -5.0516129032258075) + True + >>> focal_length_of_lens(0, 20) # doctest: +NORMALIZE_WHITESPACE + Traceback (most recent call last): + ... + ValueError: Invalid inputs. Enter non zero values with respect + to the sign convention. + """ + + if object_distance_from_lens == 0 or image_distance_from_lens == 0: + raise ValueError( + "Invalid inputs. Enter non zero values with respect to the sign convention." + ) + focal_length = 1 / ( + (1 / image_distance_from_lens) - (1 / object_distance_from_lens) + ) + return focal_length + + +def object_distance( + focal_length_of_lens: float, image_distance_from_lens: float +) -> float: + """ + Doctests: + >>> from math import isclose + >>> isclose(object_distance(10,40), -13.333333333333332) + True + + >>> from math import isclose + >>> isclose(object_distance(6.2,1.5), 1.9787234042553192) + True + + >>> object_distance(0, 20) # doctest: +NORMALIZE_WHITESPACE + Traceback (most recent call last): + ... + ValueError: Invalid inputs. Enter non zero values with respect + to the sign convention. + """ + + if image_distance_from_lens == 0 or focal_length_of_lens == 0: + raise ValueError( + "Invalid inputs. Enter non zero values with respect to the sign convention." + ) + + object_distance = 1 / ((1 / image_distance_from_lens) - (1 / focal_length_of_lens)) + return object_distance + + +def image_distance( + focal_length_of_lens: float, object_distance_from_lens: float +) -> float: + """ + Doctests: + >>> from math import isclose + >>> isclose(image_distance(50,40), 22.22222222222222) + True + >>> from math import isclose + >>> isclose(image_distance(5.3,7.9), 3.1719696969696973) + True + + >>> object_distance(0, 20) # doctest: +NORMALIZE_WHITESPACE + Traceback (most recent call last): + ... + ValueError: Invalid inputs. Enter non zero values with respect + to the sign convention. + """ + if object_distance_from_lens == 0 or focal_length_of_lens == 0: + raise ValueError( + "Invalid inputs. Enter non zero values with respect to the sign convention." + ) + image_distance = 1 / ((1 / object_distance_from_lens) + (1 / focal_length_of_lens)) + return image_distance From e791a2067baf3b23c0413f32c7388e3b2a95744e Mon Sep 17 00:00:00 2001 From: Mary-0165 <146911989+Mary-0165@users.noreply.github.com> Date: Thu, 26 Oct 2023 13:40:13 +0530 Subject: [PATCH 1240/1543] Capacitor equivalence algorithm (#9814) * capacitor equivalence algorithm * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Apply suggestions from code review * Update capacitor_equivalence.py --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Tianyi Zheng --- electronics/capacitor_equivalence.py | 53 ++++++++++++++++++++++++++++ 1 file changed, 53 insertions(+) create mode 100644 electronics/capacitor_equivalence.py diff --git a/electronics/capacitor_equivalence.py b/electronics/capacitor_equivalence.py new file mode 100644 index 000000000000..274b18afb3ef --- /dev/null +++ b/electronics/capacitor_equivalence.py @@ -0,0 +1,53 @@ +# https://farside.ph.utexas.edu/teaching/316/lectures/node46.html + +from __future__ import annotations + + +def capacitor_parallel(capacitors: list[float]) -> float: + """ + Ceq = C1 + C2 + ... + Cn + Calculate the equivalent resistance for any number of capacitors in parallel. + >>> capacitor_parallel([5.71389, 12, 3]) + 20.71389 + >>> capacitor_parallel([5.71389, 12, -3]) + Traceback (most recent call last): + ... + ValueError: Capacitor at index 2 has a negative value! + """ + sum_c = 0.0 + for index, capacitor in enumerate(capacitors): + if capacitor < 0: + msg = f"Capacitor at index {index} has a negative value!" + raise ValueError(msg) + sum_c += capacitor + return sum_c + + +def capacitor_series(capacitors: list[float]) -> float: + """ + Ceq = 1/ (1/C1 + 1/C2 + ... + 1/Cn) + >>> capacitor_series([5.71389, 12, 3]) + 1.6901062252507735 + >>> capacitor_series([5.71389, 12, -3]) + Traceback (most recent call last): + ... + ValueError: Capacitor at index 2 has a negative or zero value! + >>> capacitor_series([5.71389, 12, 0.000]) + Traceback (most recent call last): + ... + ValueError: Capacitor at index 2 has a negative or zero value! + """ + + first_sum = 0.0 + for index, capacitor in enumerate(capacitors): + if capacitor <= 0: + msg = f"Capacitor at index {index} has a negative or zero value!" + raise ValueError(msg) + first_sum += 1 / capacitor + return 1 / first_sum + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From ade2837e410ec286819f0f4fd977bb411a95b379 Mon Sep 17 00:00:00 2001 From: Saurabh Mahapatra <98408932+its-100rabh@users.noreply.github.com> Date: Thu, 26 Oct 2023 13:55:08 +0530 Subject: [PATCH 1241/1543] Update capitalize.py (#10573) * Update capitalize.py * Update strings/capitalize.py --------- Co-authored-by: Tianyi Zheng --- strings/capitalize.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/strings/capitalize.py b/strings/capitalize.py index e7e97c2beb53..c0b45e0d9614 100644 --- a/strings/capitalize.py +++ b/strings/capitalize.py @@ -3,7 +3,8 @@ def capitalize(sentence: str) -> str: """ - This function will capitalize the first letter of a sentence or a word + Capitalizes the first letter of a sentence or word. + >>> capitalize("hello world") 'Hello world' >>> capitalize("123 hello world") @@ -17,6 +18,10 @@ def capitalize(sentence: str) -> str: """ if not sentence: return "" + + # Create a dictionary that maps lowercase letters to uppercase letters + # Capitalize the first character if it's a lowercase letter + # Concatenate the capitalized character with the rest of the string lower_to_upper = dict(zip(ascii_lowercase, ascii_uppercase)) return lower_to_upper.get(sentence[0], sentence[0]) + sentence[1:] From 6497917352c73371730e50f063acd61cf4268076 Mon Sep 17 00:00:00 2001 From: Neha <129765919+neha3423@users.noreply.github.com> Date: Thu, 26 Oct 2023 14:24:30 +0530 Subject: [PATCH 1242/1543] Added Kth largest element algorithm (#10687) * neha3423 * neha3423 * neha3423 * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * neha3423 * neha3423 * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * neha323 * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * neha3423 * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * neha3423 * neha3423 * neha3423 * neha3423 * Added test case for tuple * Update kth_largest_element.py --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- data_structures/arrays/kth_largest_element.py | 117 ++++++++++++++++++ 1 file changed, 117 insertions(+) create mode 100644 data_structures/arrays/kth_largest_element.py diff --git a/data_structures/arrays/kth_largest_element.py b/data_structures/arrays/kth_largest_element.py new file mode 100644 index 000000000000..f25cc68e9b72 --- /dev/null +++ b/data_structures/arrays/kth_largest_element.py @@ -0,0 +1,117 @@ +""" +Given an array of integers and an integer k, find the kth largest element in the array. + +https://stackoverflow.com/questions/251781 +""" + + +def partition(arr: list[int], low: int, high: int) -> int: + """ + Partitions list based on the pivot element. + + This function rearranges the elements in the input list 'elements' such that + all elements greater than or equal to the chosen pivot are on the right side + of the pivot, and all elements smaller than the pivot are on the left side. + + Args: + arr: The list to be partitioned + low: The lower index of the list + high: The higher index of the list + + Returns: + int: The index of pivot element after partitioning + + Examples: + >>> partition([3, 1, 4, 5, 9, 2, 6, 5, 3, 5], 0, 9) + 4 + >>> partition([7, 1, 4, 5, 9, 2, 6, 5, 8], 0, 8) + 1 + >>> partition(['apple', 'cherry', 'date', 'banana'], 0, 3) + 2 + >>> partition([3.1, 1.2, 5.6, 4.7], 0, 3) + 1 + """ + pivot = arr[high] + i = low - 1 + for j in range(low, high): + if arr[j] >= pivot: + i += 1 + arr[i], arr[j] = arr[j], arr[i] + arr[i + 1], arr[high] = arr[high], arr[i + 1] + return i + 1 + + +def kth_largest_element(arr: list[int], position: int) -> int: + """ + Finds the kth largest element in a list. + Should deliver similar results to: + ```python + def kth_largest_element(arr, position): + return sorted(arr)[-position] + ``` + + Args: + nums: The list of numbers. + k: The position of the desired kth largest element. + + Returns: + int: The kth largest element. + + Examples: + >>> kth_largest_element([3, 1, 4, 1, 5, 9, 2, 6, 5, 3, 5], 3) + 5 + >>> kth_largest_element([2, 5, 6, 1, 9, 3, 8, 4, 7, 3, 5], 1) + 9 + >>> kth_largest_element([2, 5, 6, 1, 9, 3, 8, 4, 7, 3, 5], -2) + Traceback (most recent call last): + ... + ValueError: Invalid value of 'position' + >>> kth_largest_element([9, 1, 3, 6, 7, 9, 8, 4, 2, 4, 9], 110) + Traceback (most recent call last): + ... + ValueError: Invalid value of 'position' + >>> kth_largest_element([1, 2, 4, 3, 5, 9, 7, 6, 5, 9, 3], 0) + Traceback (most recent call last): + ... + ValueError: Invalid value of 'position' + >>> kth_largest_element(['apple', 'cherry', 'date', 'banana'], 2) + 'cherry' + >>> kth_largest_element([3.1, 1.2, 5.6, 4.7,7.9,5,0], 2) + 5.6 + >>> kth_largest_element([-2, -5, -4, -1], 1) + -1 + >>> kth_largest_element([], 1) + -1 + >>> kth_largest_element([3.1, 1.2, 5.6, 4.7, 7.9, 5, 0], 1.5) + Traceback (most recent call last): + ... + ValueError: The position should be an integer + >>> kth_largest_element((4, 6, 1, 2), 4) + Traceback (most recent call last): + ... + TypeError: 'tuple' object does not support item assignment + """ + if not arr: + return -1 + if not isinstance(position, int): + raise ValueError("The position should be an integer") + if not 1 <= position <= len(arr): + raise ValueError("Invalid value of 'position'") + low, high = 0, len(arr) - 1 + while low <= high: + if low > len(arr) - 1 or high < 0: + return -1 + pivot_index = partition(arr, low, high) + if pivot_index == position - 1: + return arr[pivot_index] + elif pivot_index > position - 1: + high = pivot_index - 1 + else: + low = pivot_index + 1 + return -1 + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From 42c49ee1174506dd04dc2dff422328cdb7dc7201 Mon Sep 17 00:00:00 2001 From: Habip Akyol <127725897+habipakyol@users.noreply.github.com> Date: Thu, 26 Oct 2023 14:24:17 +0300 Subject: [PATCH 1243/1543] Fix typo in haralick_descriptors.py (#10988) --- computer_vision/haralick_descriptors.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/computer_vision/haralick_descriptors.py b/computer_vision/haralick_descriptors.py index 413cea304f6c..007421e34263 100644 --- a/computer_vision/haralick_descriptors.py +++ b/computer_vision/haralick_descriptors.py @@ -253,13 +253,13 @@ def matrix_concurrency(image: np.ndarray, coordinate: tuple[int, int]) -> np.nda def haralick_descriptors(matrix: np.ndarray) -> list[float]: - """Calculates all 8 Haralick descriptors based on co-occurence input matrix. + """Calculates all 8 Haralick descriptors based on co-occurrence input matrix. All descriptors are as follows: Maximum probability, Inverse Difference, Homogeneity, Entropy, Energy, Dissimilarity, Contrast and Correlation Args: - matrix: Co-occurence matrix to use as base for calculating descriptors. + matrix: Co-occurrence matrix to use as base for calculating descriptors. Returns: Reverse ordered list of resulting descriptors From 29b8ccdc2f685e815f12fd6e9e8b9faee21e338d Mon Sep 17 00:00:00 2001 From: Suyash Dongre <109069262+Suyashd999@users.noreply.github.com> Date: Thu, 26 Oct 2023 17:42:28 +0530 Subject: [PATCH 1244/1543] Added doctest to hash_table.py (#10984) --- data_structures/hashing/hash_table.py | 81 +++++++++++++++++++++++++++ 1 file changed, 81 insertions(+) diff --git a/data_structures/hashing/hash_table.py b/data_structures/hashing/hash_table.py index 7ca2f7c401cf..5bf431328da4 100644 --- a/data_structures/hashing/hash_table.py +++ b/data_structures/hashing/hash_table.py @@ -21,6 +21,29 @@ def __init__( self._keys: dict = {} def keys(self): + """ + The keys function returns a dictionary containing the key value pairs. + key being the index number in hash table and value being the data value. + + Examples: + 1. creating HashTable with size 10 and inserting 3 elements + >>> ht = HashTable(10) + >>> ht.insert_data(10) + >>> ht.insert_data(20) + >>> ht.insert_data(30) + >>> ht.keys() + {0: 10, 1: 20, 2: 30} + + 2. creating HashTable with size 5 and inserting 5 elements + >>> ht = HashTable(5) + >>> ht.insert_data(5) + >>> ht.insert_data(4) + >>> ht.insert_data(3) + >>> ht.insert_data(2) + >>> ht.insert_data(1) + >>> ht.keys() + {0: 5, 4: 4, 3: 3, 2: 2, 1: 1} + """ return self._keys def balanced_factor(self): @@ -37,6 +60,43 @@ def _step_by_step(self, step_ord): print(self.values) def bulk_insert(self, values): + """ + bulk_insert is used for entering more than one element at a time + in the HashTable. + + Examples: + 1. + >>> ht = HashTable(5) + >>> ht.bulk_insert((10,20,30)) + step 1 + [0, 1, 2, 3, 4] + [10, None, None, None, None] + step 2 + [0, 1, 2, 3, 4] + [10, 20, None, None, None] + step 3 + [0, 1, 2, 3, 4] + [10, 20, 30, None, None] + + 2. + >>> ht = HashTable(5) + >>> ht.bulk_insert([5,4,3,2,1]) + step 1 + [0, 1, 2, 3, 4] + [5, None, None, None, None] + step 2 + [0, 1, 2, 3, 4] + [5, None, None, None, 4] + step 3 + [0, 1, 2, 3, 4] + [5, None, None, 3, 4] + step 4 + [0, 1, 2, 3, 4] + [5, None, 2, 3, 4] + step 5 + [0, 1, 2, 3, 4] + [5, 1, 2, 3, 4] + """ i = 1 self.__aux_list = values for value in values: @@ -69,6 +129,21 @@ def rehashing(self): self.insert_data(value) def insert_data(self, data): + """ + insert_data is used for inserting a single element at a time in the HashTable. + + Examples: + + >>> ht = HashTable(3) + >>> ht.insert_data(5) + >>> ht.keys() + {2: 5} + >>> ht = HashTable(5) + >>> ht.insert_data(30) + >>> ht.insert_data(50) + >>> ht.keys() + {0: 30, 1: 50} + """ key = self.hash_function(data) if self.values[key] is None: @@ -84,3 +159,9 @@ def insert_data(self, data): else: self.rehashing() self.insert_data(data) + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From 69f7f3208e0297cea9ccd9d02b9fb690f2ee3b93 Mon Sep 17 00:00:00 2001 From: Akash_Jambulkar <97665573+Akash-Jambulkar@users.noreply.github.com> Date: Thu, 26 Oct 2023 17:57:31 +0530 Subject: [PATCH 1245/1543] Update cocktail_shaker_sort.py (#10987) * Update cocktail_shaker_sort.py Added a docstring with clear explanations of the function and its parameters. Changed variable names i, start, and end for better readability. Improved comments to describe the purpose of each section of the algorithm. Adjusted the loop ranges to make the code more concise and readable. Removed redundant comments and variable assignments. Provided a clear message when printing the sorted list. * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update cocktail_shaker_sort.py * typing: ignore[operator] * Update cocktail_shaker_sort.py * Update cocktail_shaker_sort.py --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- sorts/cocktail_shaker_sort.py | 52 +++++++++++++++++++++++++---------- 1 file changed, 37 insertions(+), 15 deletions(-) diff --git a/sorts/cocktail_shaker_sort.py b/sorts/cocktail_shaker_sort.py index b738ff31d768..de126426d986 100644 --- a/sorts/cocktail_shaker_sort.py +++ b/sorts/cocktail_shaker_sort.py @@ -1,40 +1,62 @@ -""" https://en.wikipedia.org/wiki/Cocktail_shaker_sort """ +""" +An implementation of the cocktail shaker sort algorithm in pure Python. +https://en.wikipedia.org/wiki/Cocktail_shaker_sort +""" -def cocktail_shaker_sort(unsorted: list) -> list: + +def cocktail_shaker_sort(arr: list[int]) -> list[int]: """ - Pure implementation of the cocktail shaker sort algorithm in Python. + Sorts a list using the Cocktail Shaker Sort algorithm. + + :param arr: List of elements to be sorted. + :return: Sorted list. + >>> cocktail_shaker_sort([4, 5, 2, 1, 2]) [1, 2, 2, 4, 5] - >>> cocktail_shaker_sort([-4, 5, 0, 1, 2, 11]) [-4, 0, 1, 2, 5, 11] - >>> cocktail_shaker_sort([0.1, -2.4, 4.4, 2.2]) [-2.4, 0.1, 2.2, 4.4] - >>> cocktail_shaker_sort([1, 2, 3, 4, 5]) [1, 2, 3, 4, 5] - >>> cocktail_shaker_sort([-4, -5, -24, -7, -11]) [-24, -11, -7, -5, -4] + >>> cocktail_shaker_sort(["elderberry", "banana", "date", "apple", "cherry"]) + ['apple', 'banana', 'cherry', 'date', 'elderberry'] + >>> cocktail_shaker_sort((-4, -5, -24, -7, -11)) + Traceback (most recent call last): + ... + TypeError: 'tuple' object does not support item assignment """ - for i in range(len(unsorted) - 1, 0, -1): + start, end = 0, len(arr) - 1 + + while start < end: swapped = False - for j in range(i, 0, -1): - if unsorted[j] < unsorted[j - 1]: - unsorted[j], unsorted[j - 1] = unsorted[j - 1], unsorted[j] + # Pass from left to right + for i in range(start, end): + if arr[i] > arr[i + 1]: + arr[i], arr[i + 1] = arr[i + 1], arr[i] swapped = True - for j in range(i): - if unsorted[j] > unsorted[j + 1]: - unsorted[j], unsorted[j + 1] = unsorted[j + 1], unsorted[j] + if not swapped: + break + + end -= 1 # Decrease the end pointer after each pass + + # Pass from right to left + for i in range(end, start, -1): + if arr[i] < arr[i - 1]: + arr[i], arr[i - 1] = arr[i - 1], arr[i] swapped = True if not swapped: break - return unsorted + + start += 1 # Increase the start pointer after each pass + + return arr if __name__ == "__main__": From 579250363db1975440c75f4f6d486b88ff568cdb Mon Sep 17 00:00:00 2001 From: Tianyi Zheng Date: Thu, 26 Oct 2023 08:36:53 -0400 Subject: [PATCH 1246/1543] Speed up `dijkstra_bankers_algorithm.py` (#10861) * updating DIRECTORY.md * Rename dijkstra_bankers_algorithm.py * Remove sleep() call * updating DIRECTORY.md --------- Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- DIRECTORY.md | 2 +- other/{dijkstra_bankers_algorithm.py => bankers_algorithm.py} | 3 --- 2 files changed, 1 insertion(+), 4 deletions(-) rename other/{dijkstra_bankers_algorithm.py => bankers_algorithm.py} (99%) diff --git a/DIRECTORY.md b/DIRECTORY.md index 5f8eabb6df88..d108acf8dcfb 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -774,8 +774,8 @@ ## Other * [Activity Selection](other/activity_selection.py) * [Alternative List Arrange](other/alternative_list_arrange.py) + * [Bankers Algorithm](other/bankers_algorithm.py) * [Davis Putnam Logemann Loveland](other/davis_putnam_logemann_loveland.py) - * [Dijkstra Bankers Algorithm](other/dijkstra_bankers_algorithm.py) * [Doomsday](other/doomsday.py) * [Fischer Yates Shuffle](other/fischer_yates_shuffle.py) * [Gauss Easter](other/gauss_easter.py) diff --git a/other/dijkstra_bankers_algorithm.py b/other/bankers_algorithm.py similarity index 99% rename from other/dijkstra_bankers_algorithm.py rename to other/bankers_algorithm.py index be7bceba125d..858eb0b2c524 100644 --- a/other/dijkstra_bankers_algorithm.py +++ b/other/bankers_algorithm.py @@ -17,8 +17,6 @@ from __future__ import annotations -import time - import numpy as np test_claim_vector = [8, 5, 9, 7] @@ -216,7 +214,6 @@ def __pretty_data(self): "Initial Available Resources: " + " ".join(str(x) for x in self.__available_resources()) ) - time.sleep(1) if __name__ == "__main__": From 8adbf47c75e6881f8778fc4e9490628c71cc9fa1 Mon Sep 17 00:00:00 2001 From: Kishan Kumar Rai Date: Thu, 26 Oct 2023 18:21:28 +0530 Subject: [PATCH 1247/1543] Fix Typo & Grammatical Errors (#10980) --- CONTRIBUTING.md | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index bf3420185c1a..096582e45afa 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -2,20 +2,20 @@ ## Before contributing -Welcome to [TheAlgorithms/Python](https://github.com/TheAlgorithms/Python)! Before sending your pull requests, make sure that you __read the whole guidelines__. If you have any doubt on the contributing guide, please feel free to [state it clearly in an issue](https://github.com/TheAlgorithms/Python/issues/new) or ask the community in [Gitter](https://gitter.im/TheAlgorithms/community). +Welcome to [TheAlgorithms/Python](https://github.com/TheAlgorithms/Python)! Before submitting your pull requests, please ensure that you __read the whole guidelines__. If you have any doubts about the contributing guide, please feel free to [state it clearly in an issue](https://github.com/TheAlgorithms/Python/issues/new) or ask the community on [Gitter](https://gitter.im/TheAlgorithms/community). ## Contributing ### Contributor -We are very happy that you are considering implementing algorithms and data structures for others! This repository is referenced and used by learners from all over the globe. Being one of our contributors, you agree and confirm that: +We are delighted that you are considering implementing algorithms and data structures for others! This repository is referenced and used by learners from all over the globe. By being one of our contributors, you agree and confirm that: -- You did your work - no plagiarism allowed +- You did your work - no plagiarism allowed. - Any plagiarized work will not be merged. -- Your work will be distributed under [MIT License](LICENSE.md) once your pull request is merged -- Your submitted work fulfils or mostly fulfils our styles and standards +- Your work will be distributed under [MIT License](LICENSE.md) once your pull request is merged. +- Your submitted work fulfills or mostly fulfills our styles and standards. -__New implementation__ is welcome! For example, new solutions for a problem, different representations for a graph data structure or algorithm designs with different complexity but __identical implementation__ of an existing implementation is not allowed. Please check whether the solution is already implemented or not before submitting your pull request. +__New implementation__ is welcome! For example, new solutions for a problem, different representations for a graph data structure or algorithm designs with different complexity, but __identical implementation__ of an existing implementation is not allowed. Please check whether the solution is already implemented or not before submitting your pull request. __Improving comments__ and __writing proper tests__ are also highly welcome. @@ -23,7 +23,7 @@ __Improving comments__ and __writing proper tests__ are also highly welcome. We appreciate any contribution, from fixing a grammar mistake in a comment to implementing complex algorithms. Please read this section if you are contributing your work. -Your contribution will be tested by our [automated testing on GitHub Actions](https://github.com/TheAlgorithms/Python/actions) to save time and mental energy. After you have submitted your pull request, you should see the GitHub Actions tests start to run at the bottom of your submission page. If those tests fail, then click on the ___details___ button try to read through the GitHub Actions output to understand the failure. If you do not understand, please leave a comment on your submission page and a community member will try to help. +Your contribution will be tested by our [automated testing on GitHub Actions](https://github.com/TheAlgorithms/Python/actions) to save time and mental energy. After you have submitted your pull request, you should see the GitHub Actions tests start to run at the bottom of your submission page. If those tests fail, then click on the ___details___ button to read through the GitHub Actions output to understand the failure. If you do not understand, please leave a comment on your submission page and a community member will try to help. #### Issues @@ -58,7 +58,7 @@ Algorithms should: * contain doctests that test both valid and erroneous input values * return all calculation results instead of printing or plotting them -Algorithms in this repo should not be how-to examples for existing Python packages. Instead, they should perform internal calculations or manipulations to convert input values into different output values. Those calculations or manipulations can use data types, classes, or functions of existing Python packages but each algorithm in this repo should add unique value. +Algorithms in this repo should not be how-to examples for existing Python packages. Instead, they should perform internal calculations or manipulations to convert input values into different output values. Those calculations or manipulations can use data types, classes, or functions of existing Python packages but each algorithm in this repo should add unique value. #### Pre-commit plugin Use [pre-commit](https://pre-commit.com/#installation) to automatically format your code to match our coding style: @@ -77,7 +77,7 @@ pre-commit run --all-files --show-diff-on-failure We want your work to be readable by others; therefore, we encourage you to note the following: -- Please write in Python 3.12+. For instance: `print()` is a function in Python 3 so `print "Hello"` will *not* work but `print("Hello")` will. +- Please write in Python 3.12+. For instance: `print()` is a function in Python 3 so `print "Hello"` will *not* work but `print("Hello")` will. - Please focus hard on the naming of functions, classes, and variables. Help your reader by using __descriptive names__ that can help you to remove redundant comments. - Single letter variable names are *old school* so please avoid them unless their life only spans a few lines. - Expand acronyms because `gcd()` is hard to understand but `greatest_common_divisor()` is not. @@ -145,7 +145,7 @@ We want your work to be readable by others; therefore, we encourage you to note python3 -m doctest -v my_submission.py ``` - The use of the Python builtin `input()` function is __not__ encouraged: + The use of the Python built-in `input()` function is __not__ encouraged: ```python input('Enter your input:') From 34b25c0c769b417e82bc32cd4d3a801637ee57ab Mon Sep 17 00:00:00 2001 From: Tiela Rose Black-Law <26930264+tielarose@users.noreply.github.com> Date: Thu, 26 Oct 2023 05:52:47 -0700 Subject: [PATCH 1248/1543] Add doctest for maths/primelib (#10978) --- maths/primelib.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/maths/primelib.py b/maths/primelib.py index e2d432e1846a..a26b0eaeb328 100644 --- a/maths/primelib.py +++ b/maths/primelib.py @@ -454,6 +454,8 @@ def kg_v(number1, number2): 40 >>> kg_v(824,67) 55208 + >>> kg_v(1, 10) + 10 >>> kg_v(0) Traceback (most recent call last): ... From a8dfd403f6df2275272190a55edb6a739880f6a9 Mon Sep 17 00:00:00 2001 From: Ed Date: Thu, 26 Oct 2023 07:33:42 -0700 Subject: [PATCH 1249/1543] Add new algorithm index_2d_array_in_1d (#10973) * Add new algorithm index_2d_array_in_1d * Add doctest for iter function * The power of dataclasses * Update index_2d_array_in_1d.py --------- Co-authored-by: Christian Clauss --- .../arrays/index_2d_array_in_1d.py | 105 ++++++++++++++++++ 1 file changed, 105 insertions(+) create mode 100644 data_structures/arrays/index_2d_array_in_1d.py diff --git a/data_structures/arrays/index_2d_array_in_1d.py b/data_structures/arrays/index_2d_array_in_1d.py new file mode 100644 index 000000000000..27a9fa5f9121 --- /dev/null +++ b/data_structures/arrays/index_2d_array_in_1d.py @@ -0,0 +1,105 @@ +""" +Retrieves the value of an 0-indexed 1D index from a 2D array. +There are two ways to retrieve value(s): + +1. Index2DArrayIterator(matrix) -> Iterator[int] +This iterator allows you to iterate through a 2D array by passing in the matrix and +calling next(your_iterator). You can also use the iterator in a loop. +Examples: +list(Index2DArrayIterator(matrix)) +set(Index2DArrayIterator(matrix)) +tuple(Index2DArrayIterator(matrix)) +sum(Index2DArrayIterator(matrix)) +-5 in Index2DArrayIterator(matrix) + +2. index_2d_array_in_1d(array: list[int], index: int) -> int +This function allows you to provide a 2D array and a 0-indexed 1D integer index, +and retrieves the integer value at that index. + +Python doctests can be run using this command: +python3 -m doctest -v index_2d_array_in_1d.py +""" + +from collections.abc import Iterator +from dataclasses import dataclass + + +@dataclass +class Index2DArrayIterator: + matrix: list[list[int]] + + def __iter__(self) -> Iterator[int]: + """ + >>> tuple(Index2DArrayIterator([[5], [-523], [-1], [34], [0]])) + (5, -523, -1, 34, 0) + >>> tuple(Index2DArrayIterator([[5, -523, -1], [34, 0]])) + (5, -523, -1, 34, 0) + >>> tuple(Index2DArrayIterator([[5, -523, -1, 34, 0]])) + (5, -523, -1, 34, 0) + >>> t = Index2DArrayIterator([[5, 2, 25], [23, 14, 5], [324, -1, 0]]) + >>> tuple(t) + (5, 2, 25, 23, 14, 5, 324, -1, 0) + >>> list(t) + [5, 2, 25, 23, 14, 5, 324, -1, 0] + >>> sorted(t) + [-1, 0, 2, 5, 5, 14, 23, 25, 324] + >>> tuple(t)[3] + 23 + >>> sum(t) + 397 + >>> -1 in t + True + >>> t = iter(Index2DArrayIterator([[5], [-523], [-1], [34], [0]])) + >>> next(t) + 5 + >>> next(t) + -523 + """ + for row in self.matrix: + yield from row + + +def index_2d_array_in_1d(array: list[list[int]], index: int) -> int: + """ + Retrieves the value of the one-dimensional index from a two-dimensional array. + + Args: + array: A 2D array of integers where all rows are the same size and all + columns are the same size. + index: A 1D index. + + Returns: + int: The 0-indexed value of the 1D index in the array. + + Examples: + >>> index_2d_array_in_1d([[0, 1, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11]], 5) + 5 + >>> index_2d_array_in_1d([[0, 1, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11]], -1) + Traceback (most recent call last): + ... + ValueError: index out of range + >>> index_2d_array_in_1d([[0, 1, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11]], 12) + Traceback (most recent call last): + ... + ValueError: index out of range + >>> index_2d_array_in_1d([[]], 0) + Traceback (most recent call last): + ... + ValueError: no items in array + """ + rows = len(array) + cols = len(array[0]) + + if rows == 0 or cols == 0: + raise ValueError("no items in array") + + if index < 0 or index >= rows * cols: + raise ValueError("index out of range") + + return array[index // cols][index % cols] + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From fe4aad0ec94a2d2f28470dd8eaad3ff1bf74c5c8 Mon Sep 17 00:00:00 2001 From: Suyash Dongre <109069262+Suyashd999@users.noreply.github.com> Date: Thu, 26 Oct 2023 20:51:45 +0530 Subject: [PATCH 1250/1543] Added doctest & docstring to quadratic_probing.py (#10996) * Added doctest & docstring to quadratic_probing.py * Update quadratic_probing.py * Update quadratic_probing.py --- data_structures/hashing/quadratic_probing.py | 55 ++++++++++++++++++++ 1 file changed, 55 insertions(+) diff --git a/data_structures/hashing/quadratic_probing.py b/data_structures/hashing/quadratic_probing.py index 0930340a347f..2f3401ec8918 100644 --- a/data_structures/hashing/quadratic_probing.py +++ b/data_structures/hashing/quadratic_probing.py @@ -12,6 +12,55 @@ def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) def _collision_resolution(self, key, data=None): + """ + Quadratic probing is an open addressing scheme used for resolving + collisions in hash table. + + It works by taking the original hash index and adding successive + values of an arbitrary quadratic polynomial until open slot is found. + + Hash + 1², Hash + 2², Hash + 3² .... Hash + n² + + reference: + - https://en.wikipedia.org/wiki/Quadratic_probing + e.g: + 1. Create hash table with size 7 + >>> qp = QuadraticProbing(7) + >>> qp.insert_data(90) + >>> qp.insert_data(340) + >>> qp.insert_data(24) + >>> qp.insert_data(45) + >>> qp.insert_data(99) + >>> qp.insert_data(73) + >>> qp.insert_data(7) + >>> qp.keys() + {11: 45, 14: 99, 7: 24, 0: 340, 5: 73, 6: 90, 8: 7} + + 2. Create hash table with size 8 + >>> qp = QuadraticProbing(8) + >>> qp.insert_data(0) + >>> qp.insert_data(999) + >>> qp.insert_data(111) + >>> qp.keys() + {0: 0, 7: 999, 3: 111} + + 3. Try to add three data elements when the size is two + >>> qp = QuadraticProbing(2) + >>> qp.insert_data(0) + >>> qp.insert_data(999) + >>> qp.insert_data(111) + >>> qp.keys() + {0: 0, 4: 999, 1: 111} + + 4. Try to add three data elements when the size is one + >>> qp = QuadraticProbing(1) + >>> qp.insert_data(0) + >>> qp.insert_data(999) + >>> qp.insert_data(111) + >>> qp.keys() + {4: 999, 1: 111} + """ + i = 1 new_key = self.hash_function(key + i * i) @@ -27,3 +76,9 @@ def _collision_resolution(self, key, data=None): break return new_key + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From 5987f861926c7560cd46c1e33c3cc2c0506c0ee1 Mon Sep 17 00:00:00 2001 From: Poojan Smart <44301271+PoojanSmart@users.noreply.github.com> Date: Fri, 27 Oct 2023 14:17:24 +0530 Subject: [PATCH 1251/1543] Add automatic differentiation algorithm (#10977) * Added automatic differentiation algorithm * file name changed * Resolved pre commit errors * updated dependency * added noqa for ignoring check * adding typing_extension for adding Self type in __new__ * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * sorted requirement.text dependency * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * resolved ruff --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- machine_learning/automatic_differentiation.py | 327 ++++++++++++++++++ requirements.txt | 3 +- 2 files changed, 329 insertions(+), 1 deletion(-) create mode 100644 machine_learning/automatic_differentiation.py diff --git a/machine_learning/automatic_differentiation.py b/machine_learning/automatic_differentiation.py new file mode 100644 index 000000000000..cd2e5cdaa782 --- /dev/null +++ b/machine_learning/automatic_differentiation.py @@ -0,0 +1,327 @@ +""" +Demonstration of the Automatic Differentiation (Reverse mode). + +Reference: https://en.wikipedia.org/wiki/Automatic_differentiation + +Author: Poojan Smart +Email: smrtpoojan@gmail.com +""" +from __future__ import annotations + +from collections import defaultdict +from enum import Enum +from types import TracebackType +from typing import Any + +import numpy as np +from typing_extensions import Self # noqa: UP035 + + +class OpType(Enum): + """ + Class represents list of supported operations on Variable for gradient calculation. + """ + + ADD = 0 + SUB = 1 + MUL = 2 + DIV = 3 + MATMUL = 4 + POWER = 5 + NOOP = 6 + + +class Variable: + """ + Class represents n-dimensional object which is used to wrap numpy array on which + operations will be performed and the gradient will be calculated. + + Examples: + >>> Variable(5.0) + Variable(5.0) + >>> Variable([5.0, 2.9]) + Variable([5. 2.9]) + >>> Variable([5.0, 2.9]) + Variable([1.0, 5.5]) + Variable([6. 8.4]) + >>> Variable([[8.0, 10.0]]) + Variable([[ 8. 10.]]) + """ + + def __init__(self, value: Any) -> None: + self.value = np.array(value) + + # pointers to the operations to which the Variable is input + self.param_to: list[Operation] = [] + # pointer to the operation of which the Variable is output of + self.result_of: Operation = Operation(OpType.NOOP) + + def __repr__(self) -> str: + return f"Variable({self.value})" + + def to_ndarray(self) -> np.ndarray: + return self.value + + def __add__(self, other: Variable) -> Variable: + result = Variable(self.value + other.value) + + with GradientTracker() as tracker: + # if tracker is enabled, computation graph will be updated + if tracker.enabled: + tracker.append(OpType.ADD, params=[self, other], output=result) + return result + + def __sub__(self, other: Variable) -> Variable: + result = Variable(self.value - other.value) + + with GradientTracker() as tracker: + # if tracker is enabled, computation graph will be updated + if tracker.enabled: + tracker.append(OpType.SUB, params=[self, other], output=result) + return result + + def __mul__(self, other: Variable) -> Variable: + result = Variable(self.value * other.value) + + with GradientTracker() as tracker: + # if tracker is enabled, computation graph will be updated + if tracker.enabled: + tracker.append(OpType.MUL, params=[self, other], output=result) + return result + + def __truediv__(self, other: Variable) -> Variable: + result = Variable(self.value / other.value) + + with GradientTracker() as tracker: + # if tracker is enabled, computation graph will be updated + if tracker.enabled: + tracker.append(OpType.DIV, params=[self, other], output=result) + return result + + def __matmul__(self, other: Variable) -> Variable: + result = Variable(self.value @ other.value) + + with GradientTracker() as tracker: + # if tracker is enabled, computation graph will be updated + if tracker.enabled: + tracker.append(OpType.MATMUL, params=[self, other], output=result) + return result + + def __pow__(self, power: int) -> Variable: + result = Variable(self.value**power) + + with GradientTracker() as tracker: + # if tracker is enabled, computation graph will be updated + if tracker.enabled: + tracker.append( + OpType.POWER, + params=[self], + output=result, + other_params={"power": power}, + ) + return result + + def add_param_to(self, param_to: Operation) -> None: + self.param_to.append(param_to) + + def add_result_of(self, result_of: Operation) -> None: + self.result_of = result_of + + +class Operation: + """ + Class represents operation between single or two Variable objects. + Operation objects contains type of operation, pointers to input Variable + objects and pointer to resulting Variable from the operation. + """ + + def __init__( + self, + op_type: OpType, + other_params: dict | None = None, + ) -> None: + self.op_type = op_type + self.other_params = {} if other_params is None else other_params + + def add_params(self, params: list[Variable]) -> None: + self.params = params + + def add_output(self, output: Variable) -> None: + self.output = output + + def __eq__(self, value) -> bool: + return self.op_type == value if isinstance(value, OpType) else False + + +class GradientTracker: + """ + Class contains methods to compute partial derivatives of Variable + based on the computation graph. + + Examples: + + >>> with GradientTracker() as tracker: + ... a = Variable([2.0, 5.0]) + ... b = Variable([1.0, 2.0]) + ... m = Variable([1.0, 2.0]) + ... c = a + b + ... d = a * b + ... e = c / d + >>> tracker.gradient(e, a) + array([-0.25, -0.04]) + >>> tracker.gradient(e, b) + array([-1. , -0.25]) + >>> tracker.gradient(e, m) is None + True + + >>> with GradientTracker() as tracker: + ... a = Variable([[2.0, 5.0]]) + ... b = Variable([[1.0], [2.0]]) + ... c = a @ b + >>> tracker.gradient(c, a) + array([[1., 2.]]) + >>> tracker.gradient(c, b) + array([[2.], + [5.]]) + + >>> with GradientTracker() as tracker: + ... a = Variable([[2.0, 5.0]]) + ... b = a ** 3 + >>> tracker.gradient(b, a) + array([[12., 75.]]) + """ + + instance = None + + def __new__(cls) -> Self: + """ + Executes at the creation of class object and returns if + object is already created. This class follows singleton + design pattern. + """ + if cls.instance is None: + cls.instance = super().__new__(cls) + return cls.instance + + def __init__(self) -> None: + self.enabled = False + + def __enter__(self) -> Self: + self.enabled = True + return self + + def __exit__( + self, + exc_type: type[BaseException] | None, + exc: BaseException | None, + traceback: TracebackType | None, + ) -> None: + self.enabled = False + + def append( + self, + op_type: OpType, + params: list[Variable], + output: Variable, + other_params: dict | None = None, + ) -> None: + """ + Adds Operation object to the related Variable objects for + creating computational graph for calculating gradients. + + Args: + op_type: Operation type + params: Input parameters to the operation + output: Output variable of the operation + """ + operation = Operation(op_type, other_params=other_params) + param_nodes = [] + for param in params: + param.add_param_to(operation) + param_nodes.append(param) + output.add_result_of(operation) + + operation.add_params(param_nodes) + operation.add_output(output) + + def gradient(self, target: Variable, source: Variable) -> np.ndarray | None: + """ + Reverse accumulation of partial derivatives to calculate gradients + of target variable with respect to source variable. + + Args: + target: target variable for which gradients are calculated. + source: source variable with respect to which the gradients are + calculated. + + Returns: + Gradient of the source variable with respect to the target variable + """ + + # partial derivatives with respect to target + partial_deriv = defaultdict(lambda: 0) + partial_deriv[target] = np.ones_like(target.to_ndarray()) + + # iterating through each operations in the computation graph + operation_queue = [target.result_of] + while len(operation_queue) > 0: + operation = operation_queue.pop() + for param in operation.params: + # as per the chain rule, multiplying partial derivatives + # of variables with respect to the target + dparam_doutput = self.derivative(param, operation) + dparam_dtarget = dparam_doutput * partial_deriv[operation.output] + partial_deriv[param] += dparam_dtarget + + if param.result_of and param.result_of != OpType.NOOP: + operation_queue.append(param.result_of) + + return partial_deriv.get(source) + + def derivative(self, param: Variable, operation: Operation) -> np.ndarray: + """ + Compute the derivative of given operation/function + + Args: + param: variable to be differentiated + operation: function performed on the input variable + + Returns: + Derivative of input variable with respect to the output of + the operation + """ + params = operation.params + + if operation == OpType.ADD: + return np.ones_like(params[0].to_ndarray(), dtype=np.float64) + if operation == OpType.SUB: + if params[0] == param: + return np.ones_like(params[0].to_ndarray(), dtype=np.float64) + return -np.ones_like(params[1].to_ndarray(), dtype=np.float64) + if operation == OpType.MUL: + return ( + params[1].to_ndarray().T + if params[0] == param + else params[0].to_ndarray().T + ) + if operation == OpType.DIV: + if params[0] == param: + return 1 / params[1].to_ndarray() + return -params[0].to_ndarray() / (params[1].to_ndarray() ** 2) + if operation == OpType.MATMUL: + return ( + params[1].to_ndarray().T + if params[0] == param + else params[0].to_ndarray().T + ) + if operation == OpType.POWER: + power = operation.other_params["power"] + return power * (params[0].to_ndarray() ** (power - 1)) + + err_msg = f"invalid operation type: {operation.op_type}" + raise ValueError(err_msg) + + +if __name__ == "__main__": + import doctest + + doctest.testmod() diff --git a/requirements.txt b/requirements.txt index 05d9f1e8c545..8937f6bb0dae 100644 --- a/requirements.txt +++ b/requirements.txt @@ -19,5 +19,6 @@ statsmodels sympy tensorflow ; python_version < '3.12' tweepy -xgboost # yulewalker # uncomment once audio_filters/equal_loudness_filter.py is fixed +typing_extensions +xgboost From 34eb9c529a74c3f3d1b878a1c7ca2529686b41f8 Mon Sep 17 00:00:00 2001 From: Suyash Dongre <109069262+Suyashd999@users.noreply.github.com> Date: Fri, 27 Oct 2023 18:06:43 +0530 Subject: [PATCH 1252/1543] Added doctest to hash_table.py (#11023) * Added doctest to hash_table.py * Update hash_table.py * Update hash_table.py * Update hash_table.py * Update hash_table.py * Apply suggestions from code review * Update hash_table.py --------- Co-authored-by: Christian Clauss --- data_structures/hashing/hash_table.py | 113 ++++++++++++++++++++++++++ 1 file changed, 113 insertions(+) diff --git a/data_structures/hashing/hash_table.py b/data_structures/hashing/hash_table.py index 5bf431328da4..7fe57068f6a3 100644 --- a/data_structures/hashing/hash_table.py +++ b/data_structures/hashing/hash_table.py @@ -52,6 +52,30 @@ def balanced_factor(self): ) def hash_function(self, key): + """ + Generates hash for the given key value + + Examples: + + Creating HashTable with size 5 + >>> ht = HashTable(5) + >>> ht.hash_function(10) + 0 + >>> ht.hash_function(20) + 0 + >>> ht.hash_function(4) + 4 + >>> ht.hash_function(18) + 3 + >>> ht.hash_function(-18) + 2 + >>> ht.hash_function(18.5) + 3.5 + >>> ht.hash_function(0) + 0 + >>> ht.hash_function(-0) + 0 + """ return key % self.size_table def _step_by_step(self, step_ord): @@ -105,10 +129,99 @@ def bulk_insert(self, values): i += 1 def _set_value(self, key, data): + """ + _set_value functions allows to update value at a particular hash + + Examples: + 1. _set_value in HashTable of size 5 + >>> ht = HashTable(5) + >>> ht.insert_data(10) + >>> ht.insert_data(20) + >>> ht.insert_data(30) + >>> ht._set_value(0,15) + >>> ht.keys() + {0: 15, 1: 20, 2: 30} + + 2. _set_value in HashTable of size 2 + >>> ht = HashTable(2) + >>> ht.insert_data(17) + >>> ht.insert_data(18) + >>> ht.insert_data(99) + >>> ht._set_value(3,15) + >>> ht.keys() + {3: 15, 2: 17, 4: 99} + + 3. _set_value in HashTable when hash is not present + >>> ht = HashTable(2) + >>> ht.insert_data(17) + >>> ht.insert_data(18) + >>> ht.insert_data(99) + >>> ht._set_value(0,15) + >>> ht.keys() + {3: 18, 2: 17, 4: 99, 0: 15} + + 4. _set_value in HashTable when multiple hash are not present + >>> ht = HashTable(2) + >>> ht.insert_data(17) + >>> ht.insert_data(18) + >>> ht.insert_data(99) + >>> ht._set_value(0,15) + >>> ht._set_value(1,20) + >>> ht.keys() + {3: 18, 2: 17, 4: 99, 0: 15, 1: 20} + """ self.values[key] = data self._keys[key] = data def _collision_resolution(self, key, data=None): + """ + This method is a type of open addressing which is used for handling collision. + + In this implementation the concept of linear probing has been used. + + The hash table is searched sequentially from the original location of the + hash, if the new hash/location we get is already occupied we check for the next + hash/location. + + references: + - https://en.wikipedia.org/wiki/Linear_probing + + Examples: + 1. The collision will be with keys 18 & 99, so new hash will be created for 99 + >>> ht = HashTable(3) + >>> ht.insert_data(17) + >>> ht.insert_data(18) + >>> ht.insert_data(99) + >>> ht.keys() + {2: 17, 0: 18, 1: 99} + + 2. The collision will be with keys 17 & 101, so new hash + will be created for 101 + >>> ht = HashTable(4) + >>> ht.insert_data(17) + >>> ht.insert_data(18) + >>> ht.insert_data(99) + >>> ht.insert_data(101) + >>> ht.keys() + {1: 17, 2: 18, 3: 99, 0: 101} + + 2. The collision will be with all keys, so new hash will be created for all + >>> ht = HashTable(1) + >>> ht.insert_data(17) + >>> ht.insert_data(18) + >>> ht.insert_data(99) + >>> ht.keys() + {2: 17, 3: 18, 4: 99} + + 3. Trying to insert float key in hash + >>> ht = HashTable(1) + >>> ht.insert_data(17) + >>> ht.insert_data(18) + >>> ht.insert_data(99.99) + Traceback (most recent call last): + ... + TypeError: list indices must be integers or slices, not float + """ new_key = self.hash_function(key + 1) while self.values[new_key] is not None and self.values[new_key] != key: From e4eda145833565443be2e5ed4c805fbaaa9d964e Mon Sep 17 00:00:00 2001 From: Poojan Smart <44301271+PoojanSmart@users.noreply.github.com> Date: Fri, 27 Oct 2023 20:14:33 +0530 Subject: [PATCH 1253/1543] Add perplexity loss algorithm (#11028) --- machine_learning/loss_functions.py | 92 ++++++++++++++++++++++++++++++ 1 file changed, 92 insertions(+) diff --git a/machine_learning/loss_functions.py b/machine_learning/loss_functions.py index ea1f390e358a..36a760326f3d 100644 --- a/machine_learning/loss_functions.py +++ b/machine_learning/loss_functions.py @@ -379,6 +379,98 @@ def mean_absolute_percentage_error( return np.mean(absolute_percentage_diff) +def perplexity_loss( + y_true: np.ndarray, y_pred: np.ndarray, epsilon: float = 1e-7 +) -> float: + """ + Calculate the perplexity for the y_true and y_pred. + + Compute the Perplexity which useful in predicting language model + accuracy in Natural Language Processing (NLP.) + Perplexity is measure of how certain the model in its predictions. + + Perplexity Loss = exp(-1/N (Σ ln(p(x))) + + Reference: + https://en.wikipedia.org/wiki/Perplexity + + Args: + y_true: Actual label encoded sentences of shape (batch_size, sentence_length) + y_pred: Predicted sentences of shape (batch_size, sentence_length, vocab_size) + epsilon: Small floating point number to avoid getting inf for log(0) + + Returns: + Perplexity loss between y_true and y_pred. + + >>> y_true = np.array([[1, 4], [2, 3]]) + >>> y_pred = np.array( + ... [[[0.28, 0.19, 0.21 , 0.15, 0.15], + ... [0.24, 0.19, 0.09, 0.18, 0.27]], + ... [[0.03, 0.26, 0.21, 0.18, 0.30], + ... [0.28, 0.10, 0.33, 0.15, 0.12]]] + ... ) + >>> perplexity_loss(y_true, y_pred) + 5.0247347775367945 + >>> y_true = np.array([[1, 4], [2, 3]]) + >>> y_pred = np.array( + ... [[[0.28, 0.19, 0.21 , 0.15, 0.15], + ... [0.24, 0.19, 0.09, 0.18, 0.27], + ... [0.30, 0.10, 0.20, 0.15, 0.25]], + ... [[0.03, 0.26, 0.21, 0.18, 0.30], + ... [0.28, 0.10, 0.33, 0.15, 0.12], + ... [0.30, 0.10, 0.20, 0.15, 0.25]],] + ... ) + >>> perplexity_loss(y_true, y_pred) + Traceback (most recent call last): + ... + ValueError: Sentence length of y_true and y_pred must be equal. + >>> y_true = np.array([[1, 4], [2, 11]]) + >>> y_pred = np.array( + ... [[[0.28, 0.19, 0.21 , 0.15, 0.15], + ... [0.24, 0.19, 0.09, 0.18, 0.27]], + ... [[0.03, 0.26, 0.21, 0.18, 0.30], + ... [0.28, 0.10, 0.33, 0.15, 0.12]]] + ... ) + >>> perplexity_loss(y_true, y_pred) + Traceback (most recent call last): + ... + ValueError: Label value must not be greater than vocabulary size. + >>> y_true = np.array([[1, 4]]) + >>> y_pred = np.array( + ... [[[0.28, 0.19, 0.21 , 0.15, 0.15], + ... [0.24, 0.19, 0.09, 0.18, 0.27]], + ... [[0.03, 0.26, 0.21, 0.18, 0.30], + ... [0.28, 0.10, 0.33, 0.15, 0.12]]] + ... ) + >>> perplexity_loss(y_true, y_pred) + Traceback (most recent call last): + ... + ValueError: Batch size of y_true and y_pred must be equal. + """ + + vocab_size = y_pred.shape[2] + + if y_true.shape[0] != y_pred.shape[0]: + raise ValueError("Batch size of y_true and y_pred must be equal.") + if y_true.shape[1] != y_pred.shape[1]: + raise ValueError("Sentence length of y_true and y_pred must be equal.") + if np.max(y_true) > vocab_size: + raise ValueError("Label value must not be greater than vocabulary size.") + + # Matrix to select prediction value only for true class + filter_matrix = np.array( + [[list(np.eye(vocab_size)[word]) for word in sentence] for sentence in y_true] + ) + + # Getting the matrix containing prediction for only true class + true_class_pred = np.sum(y_pred * filter_matrix, axis=2).clip(epsilon, 1) + + # Calculating perplexity for each sentence + perp_losses = np.exp(np.negative(np.mean(np.log(true_class_pred), axis=1))) + + return np.mean(perp_losses) + + if __name__ == "__main__": import doctest From f336cca8f8b2989d612068845f147ce885676148 Mon Sep 17 00:00:00 2001 From: Suyash Dongre <109069262+Suyashd999@users.noreply.github.com> Date: Fri, 27 Oct 2023 22:10:42 +0530 Subject: [PATCH 1254/1543] Added doctest to double_hash.py (#11020) * Added doctest to double_hash.py * Update double_hash.py --- data_structures/hashing/double_hash.py | 33 ++++++++++++++++++++++++++ 1 file changed, 33 insertions(+) diff --git a/data_structures/hashing/double_hash.py b/data_structures/hashing/double_hash.py index be21e74cadd0..76c6c86814ec 100644 --- a/data_structures/hashing/double_hash.py +++ b/data_structures/hashing/double_hash.py @@ -35,6 +35,33 @@ def __hash_double_function(self, key, data, increment): return (increment * self.__hash_function_2(key, data)) % self.size_table def _collision_resolution(self, key, data=None): + """ + Examples: + + 1. Try to add three data elements when the size is three + >>> dh = DoubleHash(3) + >>> dh.insert_data(10) + >>> dh.insert_data(20) + >>> dh.insert_data(30) + >>> dh.keys() + {1: 10, 2: 20, 0: 30} + + 2. Try to add three data elements when the size is two + >>> dh = DoubleHash(2) + >>> dh.insert_data(10) + >>> dh.insert_data(20) + >>> dh.insert_data(30) + >>> dh.keys() + {10: 10, 9: 20, 8: 30} + + 3. Try to add three data elements when the size is four + >>> dh = DoubleHash(4) + >>> dh.insert_data(10) + >>> dh.insert_data(20) + >>> dh.insert_data(30) + >>> dh.keys() + {9: 20, 10: 10, 8: 30} + """ i = 1 new_key = self.hash_function(data) @@ -50,3 +77,9 @@ def _collision_resolution(self, key, data=None): i += 1 return new_key + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From 185a35589ab14bf27f23266a25d8e1bcced646b2 Mon Sep 17 00:00:00 2001 From: Khushi Shukla Date: Fri, 27 Oct 2023 22:12:34 +0530 Subject: [PATCH 1255/1543] Create monotonic_array.py (#11025) * Create monotonic_array.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update monotonic_array.py --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- data_structures/arrays/monotonic_array.py | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) create mode 100644 data_structures/arrays/monotonic_array.py diff --git a/data_structures/arrays/monotonic_array.py b/data_structures/arrays/monotonic_array.py new file mode 100644 index 000000000000..c50a21530814 --- /dev/null +++ b/data_structures/arrays/monotonic_array.py @@ -0,0 +1,23 @@ +# https://leetcode.com/problems/monotonic-array/ +def is_monotonic(nums: list[int]) -> bool: + """ + Check if a list is monotonic. + + >>> is_monotonic([1, 2, 2, 3]) + True + >>> is_monotonic([6, 5, 4, 4]) + True + >>> is_monotonic([1, 3, 2]) + False + """ + return all(nums[i] <= nums[i + 1] for i in range(len(nums) - 1)) or all( + nums[i] >= nums[i + 1] for i in range(len(nums) - 1) + ) + + +# Test the function with your examples +if __name__ == "__main__": + # Test the function with your examples + print(is_monotonic([1, 2, 2, 3])) # Output: True + print(is_monotonic([6, 5, 4, 4])) # Output: True + print(is_monotonic([1, 3, 2])) # Output: False From b0837d39859452ed7bd6e5b7adbdf172f70228bf Mon Sep 17 00:00:00 2001 From: Adam Ross <14985050+R055A@users.noreply.github.com> Date: Fri, 27 Oct 2023 22:10:32 +0200 Subject: [PATCH 1256/1543] Increase code coverage for dijkstra algorithm (#10695) * Increase code coverage for dijkstra algorithm * Add missing code coverage Refactor to pass mypy * Fix missing code coverage * Remove code changes, keep doctest * Remove ALL of the code changes * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update dijkstra_algorithm.py --------- Co-authored-by: Christian Clauss Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- graphs/dijkstra_algorithm.py | 313 +++++++++++++++++++++++++++++++++-- 1 file changed, 299 insertions(+), 14 deletions(-) diff --git a/graphs/dijkstra_algorithm.py b/graphs/dijkstra_algorithm.py index 452138fe904b..2efa2cb634ff 100644 --- a/graphs/dijkstra_algorithm.py +++ b/graphs/dijkstra_algorithm.py @@ -11,35 +11,127 @@ class PriorityQueue: # Based on Min Heap def __init__(self): + """ + Priority queue class constructor method. + + Examples: + >>> priority_queue_test = PriorityQueue() + >>> priority_queue_test.cur_size + 0 + >>> priority_queue_test.array + [] + >>> priority_queue_test.pos + {} + """ self.cur_size = 0 self.array = [] self.pos = {} # To store the pos of node in array def is_empty(self): + """ + Conditional boolean method to determine if the priority queue is empty or not. + + Examples: + >>> priority_queue_test = PriorityQueue() + >>> priority_queue_test.is_empty() + True + >>> priority_queue_test.insert((2, 'A')) + >>> priority_queue_test.is_empty() + False + """ return self.cur_size == 0 def min_heapify(self, idx): + """ + Sorts the queue array so that the minimum element is root. + + Examples: + >>> priority_queue_test = PriorityQueue() + >>> priority_queue_test.cur_size = 3 + >>> priority_queue_test.pos = {'A': 0, 'B': 1, 'C': 2} + + >>> priority_queue_test.array = [(5, 'A'), (10, 'B'), (15, 'C')] + >>> priority_queue_test.min_heapify(0) + Traceback (most recent call last): + ... + TypeError: 'list' object is not callable + >>> priority_queue_test.array + [(5, 'A'), (10, 'B'), (15, 'C')] + + >>> priority_queue_test.array = [(10, 'A'), (5, 'B'), (15, 'C')] + >>> priority_queue_test.min_heapify(0) + Traceback (most recent call last): + ... + TypeError: 'list' object is not callable + >>> priority_queue_test.array + [(10, 'A'), (5, 'B'), (15, 'C')] + + >>> priority_queue_test.array = [(10, 'A'), (15, 'B'), (5, 'C')] + >>> priority_queue_test.min_heapify(0) + Traceback (most recent call last): + ... + TypeError: 'list' object is not callable + >>> priority_queue_test.array + [(10, 'A'), (15, 'B'), (5, 'C')] + + >>> priority_queue_test.array = [(10, 'A'), (5, 'B')] + >>> priority_queue_test.cur_size = len(priority_queue_test.array) + >>> priority_queue_test.pos = {'A': 0, 'B': 1} + >>> priority_queue_test.min_heapify(0) + Traceback (most recent call last): + ... + TypeError: 'list' object is not callable + >>> priority_queue_test.array + [(10, 'A'), (5, 'B')] + """ lc = self.left(idx) rc = self.right(idx) - if lc < self.cur_size and self.array(lc)[0] < self.array(idx)[0]: + if lc < self.cur_size and self.array(lc)[0] < self.array[idx][0]: smallest = lc else: smallest = idx - if rc < self.cur_size and self.array(rc)[0] < self.array(smallest)[0]: + if rc < self.cur_size and self.array(rc)[0] < self.array[smallest][0]: smallest = rc if smallest != idx: self.swap(idx, smallest) self.min_heapify(smallest) def insert(self, tup): - # Inserts a node into the Priority Queue + """ + Inserts a node into the Priority Queue. + + Examples: + >>> priority_queue_test = PriorityQueue() + >>> priority_queue_test.insert((10, 'A')) + >>> priority_queue_test.array + [(10, 'A')] + >>> priority_queue_test.insert((15, 'B')) + >>> priority_queue_test.array + [(10, 'A'), (15, 'B')] + >>> priority_queue_test.insert((5, 'C')) + >>> priority_queue_test.array + [(5, 'C'), (10, 'A'), (15, 'B')] + """ self.pos[tup[1]] = self.cur_size self.cur_size += 1 self.array.append((sys.maxsize, tup[1])) self.decrease_key((sys.maxsize, tup[1]), tup[0]) def extract_min(self): - # Removes and returns the min element at top of priority queue + """ + Removes and returns the min element at top of priority queue. + + Examples: + >>> priority_queue_test = PriorityQueue() + >>> priority_queue_test.array = [(10, 'A'), (15, 'B')] + >>> priority_queue_test.cur_size = len(priority_queue_test.array) + >>> priority_queue_test.pos = {'A': 0, 'B': 1} + >>> priority_queue_test.insert((5, 'C')) + >>> priority_queue_test.extract_min() + 'C' + >>> priority_queue_test.array[0] + (15, 'B') + """ min_node = self.array[0][1] self.array[0] = self.array[self.cur_size - 1] self.cur_size -= 1 @@ -48,20 +140,61 @@ def extract_min(self): return min_node def left(self, i): - # returns the index of left child + """ + Returns the index of left child + + Examples: + >>> priority_queue_test = PriorityQueue() + >>> priority_queue_test.left(0) + 1 + >>> priority_queue_test.left(1) + 3 + """ return 2 * i + 1 def right(self, i): - # returns the index of right child + """ + Returns the index of right child + + Examples: + >>> priority_queue_test = PriorityQueue() + >>> priority_queue_test.right(0) + 2 + >>> priority_queue_test.right(1) + 4 + """ return 2 * i + 2 def par(self, i): - # returns the index of parent + """ + Returns the index of parent + + Examples: + >>> priority_queue_test = PriorityQueue() + >>> priority_queue_test.par(1) + 0 + >>> priority_queue_test.par(2) + 1 + >>> priority_queue_test.par(4) + 2 + """ return math.floor(i / 2) def swap(self, i, j): - # swaps array elements at indices i and j - # update the pos{} + """ + Swaps array elements at indices i and j, update the pos{} + + Examples: + >>> priority_queue_test = PriorityQueue() + >>> priority_queue_test.array = [(10, 'A'), (15, 'B')] + >>> priority_queue_test.cur_size = len(priority_queue_test.array) + >>> priority_queue_test.pos = {'A': 0, 'B': 1} + >>> priority_queue_test.swap(0, 1) + >>> priority_queue_test.array + [(15, 'B'), (10, 'A')] + >>> priority_queue_test.pos + {'A': 1, 'B': 0} + """ self.pos[self.array[i][1]] = j self.pos[self.array[j][1]] = i temp = self.array[i] @@ -69,6 +202,18 @@ def swap(self, i, j): self.array[j] = temp def decrease_key(self, tup, new_d): + """ + Decrease the key value for a given tuple, assuming the new_d is at most old_d. + + Examples: + >>> priority_queue_test = PriorityQueue() + >>> priority_queue_test.array = [(10, 'A'), (15, 'B')] + >>> priority_queue_test.cur_size = len(priority_queue_test.array) + >>> priority_queue_test.pos = {'A': 0, 'B': 1} + >>> priority_queue_test.decrease_key((10, 'A'), 5) + >>> priority_queue_test.array + [(5, 'A'), (15, 'B')] + """ idx = self.pos[tup[1]] # assuming the new_d is atmost old_d self.array[idx] = (new_d, tup[1]) @@ -79,6 +224,20 @@ def decrease_key(self, tup, new_d): class Graph: def __init__(self, num): + """ + Graph class constructor + + Examples: + >>> graph_test = Graph(1) + >>> graph_test.num_nodes + 1 + >>> graph_test.dist + [0] + >>> graph_test.par + [-1] + >>> graph_test.adjList + {} + """ self.adjList = {} # To store graph: u -> (v,w) self.num_nodes = num # Number of nodes in graph # To store the distance from source vertex @@ -86,8 +245,16 @@ def __init__(self, num): self.par = [-1] * self.num_nodes # To store the path def add_edge(self, u, v, w): - # Edge going from node u to v and v to u with weight w - # u (w)-> v, v (w) -> u + """ + Add edge going from node u to v and v to u with weight w: u (w)-> v, v (w) -> u + + Examples: + >>> graph_test = Graph(1) + >>> graph_test.add_edge(1, 2, 1) + >>> graph_test.add_edge(2, 3, 2) + >>> graph_test.adjList + {1: [(2, 1)], 2: [(1, 1), (3, 2)], 3: [(2, 2)]} + """ # Check if u already in graph if u in self.adjList: self.adjList[u].append((v, w)) @@ -101,11 +268,99 @@ def add_edge(self, u, v, w): self.adjList[v] = [(u, w)] def show_graph(self): - # u -> v(w) + """ + Show the graph: u -> v(w) + + Examples: + >>> graph_test = Graph(1) + >>> graph_test.add_edge(1, 2, 1) + >>> graph_test.show_graph() + 1 -> 2(1) + 2 -> 1(1) + >>> graph_test.add_edge(2, 3, 2) + >>> graph_test.show_graph() + 1 -> 2(1) + 2 -> 1(1) -> 3(2) + 3 -> 2(2) + """ for u in self.adjList: print(u, "->", " -> ".join(str(f"{v}({w})") for v, w in self.adjList[u])) def dijkstra(self, src): + """ + Dijkstra algorithm + + Examples: + >>> graph_test = Graph(3) + >>> graph_test.add_edge(0, 1, 2) + >>> graph_test.add_edge(1, 2, 2) + >>> graph_test.dijkstra(0) + Distance from node: 0 + Node 0 has distance: 0 + Node 1 has distance: 2 + Node 2 has distance: 4 + >>> graph_test.dist + [0, 2, 4] + + >>> graph_test = Graph(2) + >>> graph_test.add_edge(0, 1, 2) + >>> graph_test.dijkstra(0) + Distance from node: 0 + Node 0 has distance: 0 + Node 1 has distance: 2 + >>> graph_test.dist + [0, 2] + + >>> graph_test = Graph(3) + >>> graph_test.add_edge(0, 1, 2) + >>> graph_test.dijkstra(0) + Distance from node: 0 + Node 0 has distance: 0 + Node 1 has distance: 2 + Node 2 has distance: 0 + >>> graph_test.dist + [0, 2, 0] + + >>> graph_test = Graph(3) + >>> graph_test.add_edge(0, 1, 2) + >>> graph_test.add_edge(1, 2, 2) + >>> graph_test.add_edge(0, 2, 1) + >>> graph_test.dijkstra(0) + Distance from node: 0 + Node 0 has distance: 0 + Node 1 has distance: 2 + Node 2 has distance: 1 + >>> graph_test.dist + [0, 2, 1] + + >>> graph_test = Graph(4) + >>> graph_test.add_edge(0, 1, 4) + >>> graph_test.add_edge(1, 2, 2) + >>> graph_test.add_edge(2, 3, 1) + >>> graph_test.add_edge(0, 2, 3) + >>> graph_test.dijkstra(0) + Distance from node: 0 + Node 0 has distance: 0 + Node 1 has distance: 4 + Node 2 has distance: 3 + Node 3 has distance: 4 + >>> graph_test.dist + [0, 4, 3, 4] + + >>> graph_test = Graph(4) + >>> graph_test.add_edge(0, 1, 4) + >>> graph_test.add_edge(1, 2, 2) + >>> graph_test.add_edge(2, 3, 1) + >>> graph_test.add_edge(0, 2, 7) + >>> graph_test.dijkstra(0) + Distance from node: 0 + Node 0 has distance: 0 + Node 1 has distance: 4 + Node 2 has distance: 6 + Node 3 has distance: 7 + >>> graph_test.dist + [0, 4, 6, 7] + """ # Flush old junk values in par[] self.par = [-1] * self.num_nodes # src is the source node @@ -135,13 +390,40 @@ def dijkstra(self, src): self.show_distances(src) def show_distances(self, src): + """ + Show the distances from src to all other nodes in a graph + + Examples: + >>> graph_test = Graph(1) + >>> graph_test.show_distances(0) + Distance from node: 0 + Node 0 has distance: 0 + """ print(f"Distance from node: {src}") for u in range(self.num_nodes): print(f"Node {u} has distance: {self.dist[u]}") def show_path(self, src, dest): - # To show the shortest path from src to dest - # WARNING: Use it *after* calling dijkstra + """ + Shows the shortest path from src to dest. + WARNING: Use it *after* calling dijkstra. + + Examples: + >>> graph_test = Graph(4) + >>> graph_test.add_edge(0, 1, 1) + >>> graph_test.add_edge(1, 2, 2) + >>> graph_test.add_edge(2, 3, 3) + >>> graph_test.dijkstra(0) + Distance from node: 0 + Node 0 has distance: 0 + Node 1 has distance: 1 + Node 2 has distance: 3 + Node 3 has distance: 6 + >>> graph_test.show_path(0, 3) # doctest: +NORMALIZE_WHITESPACE + ----Path to reach 3 from 0---- + 0 -> 1 -> 2 -> 3 + Total cost of path: 6 + """ path = [] cost = 0 temp = dest @@ -167,6 +449,9 @@ def show_path(self, src, dest): if __name__ == "__main__": + from doctest import testmod + + testmod() graph = Graph(9) graph.add_edge(0, 1, 4) graph.add_edge(0, 7, 8) From 0eb1825af2114c60792dc5cbd43ca1259ae95a24 Mon Sep 17 00:00:00 2001 From: RaymondDashWu <33266041+RaymondDashWu@users.noreply.github.com> Date: Fri, 27 Oct 2023 13:13:32 -0700 Subject: [PATCH 1257/1543] Tests for odd_even_transposition_parallel (#10926) * [ADD] tests for odd_even_transposition_parallel * adding another test because build failed 6 hrs * comment out all tests to see if it fails * list(range(10)[::-1]) test uncommented * [a, x, c] test uncommented * [1.9, 42.0, 2.8] test uncommented * [False, True, False] test uncommented * [1, 32.0, 9] test uncommented * [1, 32.0, 9] test uncommented * [-442, -98, -554, 266, -491, 985, -53, -529, 82, -429] test uncommented * test non global lock * [DEL] Testing multiple data types. Couldn't get doctest to work * [ADD] Comment on why non global process lock --- sorts/odd_even_transposition_parallel.py | 25 ++++++++++++++++++++++-- 1 file changed, 23 insertions(+), 2 deletions(-) diff --git a/sorts/odd_even_transposition_parallel.py b/sorts/odd_even_transposition_parallel.py index 9e0d228bdc5b..b8ab46df1e59 100644 --- a/sorts/odd_even_transposition_parallel.py +++ b/sorts/odd_even_transposition_parallel.py @@ -13,7 +13,8 @@ from multiprocessing import Lock, Pipe, Process # lock used to ensure that two processes do not access a pipe at the same time -process_lock = Lock() +# NOTE This breaks testing on build runner. May work better locally +# process_lock = Lock() """ The function run by the processes that sorts the list @@ -28,7 +29,7 @@ def oe_process(position, value, l_send, r_send, lr_cv, rr_cv, result_pipe): - global process_lock + process_lock = Lock() # we perform n swaps since after n swaps we know we are sorted # we *could* stop early if we are sorted already, but it takes as long to @@ -72,6 +73,26 @@ def oe_process(position, value, l_send, r_send, lr_cv, rr_cv, result_pipe): def odd_even_transposition(arr): + """ + >>> odd_even_transposition(list(range(10)[::-1])) == sorted(list(range(10)[::-1])) + True + >>> odd_even_transposition(["a", "x", "c"]) == sorted(["x", "a", "c"]) + True + >>> odd_even_transposition([1.9, 42.0, 2.8]) == sorted([1.9, 42.0, 2.8]) + True + >>> odd_even_transposition([False, True, False]) == sorted([False, False, True]) + True + >>> odd_even_transposition([1, 32.0, 9]) == sorted([False, False, True]) + False + >>> odd_even_transposition([1, 32.0, 9]) == sorted([1.0, 32, 9.0]) + True + >>> unsorted_list = [-442, -98, -554, 266, -491, 985, -53, -529, 82, -429] + >>> odd_even_transposition(unsorted_list) == sorted(unsorted_list) + True + >>> unsorted_list = [-442, -98, -554, 266, -491, 985, -53, -529, 82, -429] + >>> odd_even_transposition(unsorted_list) == sorted(unsorted_list + [1]) + False + """ process_array_ = [] result_pipe = [] # initialize the list of pipes where the values will be retrieved From 5df16f11eb536f76b74d468de33114f25c2c9ac1 Mon Sep 17 00:00:00 2001 From: Tiela Rose Black-Law <26930264+tielarose@users.noreply.github.com> Date: Fri, 27 Oct 2023 14:13:51 -0700 Subject: [PATCH 1258/1543] Add doctest to hashes/hamming_code.py (#10961) --- hashes/hamming_code.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/hashes/hamming_code.py b/hashes/hamming_code.py index 8498ca920b36..4a6efcf23f63 100644 --- a/hashes/hamming_code.py +++ b/hashes/hamming_code.py @@ -77,6 +77,10 @@ def emitter_converter(size_par, data): >>> emitter_converter(4, "101010111111") ['1', '1', '1', '1', '0', '1', '0', '0', '1', '0', '1', '1', '1', '1', '1', '1'] + >>> emitter_converter(5, "101010111111") + Traceback (most recent call last): + ... + ValueError: size of parity don't match with size of data """ if size_par + len(data) <= 2**size_par - (len(data) - 1): raise ValueError("size of parity don't match with size of data") From a0e80a74c817c8edd35737d2fbf7d38dd71fa43d Mon Sep 17 00:00:00 2001 From: Sanket Nikam <77570082+SannketNikam@users.noreply.github.com> Date: Sat, 28 Oct 2023 02:47:58 +0530 Subject: [PATCH 1259/1543] Added Gradient Boosting Classifier (#10944) * Added Gradient Boosting Classifier * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update gradient_boosting_classifier.py * Update gradient_boosting_classifier.py * Update gradient_boosting_classifier.py * Update gradient_boosting_classifier.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .../gradient_boosting_classifier.py | 118 ++++++++++++++++++ 1 file changed, 118 insertions(+) create mode 100644 machine_learning/gradient_boosting_classifier.py diff --git a/machine_learning/gradient_boosting_classifier.py b/machine_learning/gradient_boosting_classifier.py new file mode 100644 index 000000000000..2902394d8226 --- /dev/null +++ b/machine_learning/gradient_boosting_classifier.py @@ -0,0 +1,118 @@ +import numpy as np +from sklearn.datasets import load_iris +from sklearn.metrics import accuracy_score +from sklearn.model_selection import train_test_split +from sklearn.tree import DecisionTreeRegressor + + +class GradientBoostingClassifier: + def __init__(self, n_estimators: int = 100, learning_rate: float = 0.1) -> None: + """ + Initialize a GradientBoostingClassifier. + + Parameters: + - n_estimators (int): The number of weak learners to train. + - learning_rate (float): The learning rate for updating the model. + + Attributes: + - n_estimators (int): The number of weak learners. + - learning_rate (float): The learning rate. + - models (list): A list to store the trained weak learners. + """ + self.n_estimators = n_estimators + self.learning_rate = learning_rate + self.models: list[tuple[DecisionTreeRegressor, float]] = [] + + def fit(self, features: np.ndarray, target: np.ndarray) -> None: + """ + Fit the GradientBoostingClassifier to the training data. + + Parameters: + - features (np.ndarray): The training features. + - target (np.ndarray): The target values. + + Returns: + None + + >>> import numpy as np + >>> from sklearn.datasets import load_iris + >>> clf = GradientBoostingClassifier(n_estimators=100, learning_rate=0.1) + >>> iris = load_iris() + >>> X, y = iris.data, iris.target + >>> clf.fit(X, y) + >>> # Check if the model is trained + >>> len(clf.models) == 100 + True + """ + for _ in range(self.n_estimators): + # Calculate the pseudo-residuals + residuals = -self.gradient(target, self.predict(features)) + # Fit a weak learner (e.g., decision tree) to the residuals + model = DecisionTreeRegressor(max_depth=1) + model.fit(features, residuals) + # Update the model by adding the weak learner with a learning rate + self.models.append((model, self.learning_rate)) + + def predict(self, features: np.ndarray) -> np.ndarray: + """ + Make predictions on input data. + + Parameters: + - features (np.ndarray): The input data for making predictions. + + Returns: + - np.ndarray: An array of binary predictions (-1 or 1). + + >>> import numpy as np + >>> from sklearn.datasets import load_iris + >>> clf = GradientBoostingClassifier(n_estimators=100, learning_rate=0.1) + >>> iris = load_iris() + >>> X, y = iris.data, iris.target + >>> clf.fit(X, y) + >>> y_pred = clf.predict(X) + >>> # Check if the predictions have the correct shape + >>> y_pred.shape == y.shape + True + """ + # Initialize predictions with zeros + predictions = np.zeros(features.shape[0]) + for model, learning_rate in self.models: + predictions += learning_rate * model.predict(features) + return np.sign(predictions) # Convert to binary predictions (-1 or 1) + + def gradient(self, target: np.ndarray, y_pred: np.ndarray) -> np.ndarray: + """ + Calculate the negative gradient (pseudo-residuals) for logistic loss. + + Parameters: + - target (np.ndarray): The target values. + - y_pred (np.ndarray): The predicted values. + + Returns: + - np.ndarray: An array of pseudo-residuals. + + >>> import numpy as np + >>> clf = GradientBoostingClassifier(n_estimators=100, learning_rate=0.1) + >>> target = np.array([0, 1, 0, 1]) + >>> y_pred = np.array([0.2, 0.8, 0.3, 0.7]) + >>> residuals = clf.gradient(target, y_pred) + >>> # Check if residuals have the correct shape + >>> residuals.shape == target.shape + True + """ + return -target / (1 + np.exp(target * y_pred)) + + +if __name__ == "__main__": + iris = load_iris() + X, y = iris.data, iris.target + X_train, X_test, y_train, y_test = train_test_split( + X, y, test_size=0.2, random_state=42 + ) + + clf = GradientBoostingClassifier(n_estimators=100, learning_rate=0.1) + clf.fit(X_train, y_train) + + y_pred = clf.predict(X_test) + accuracy = accuracy_score(y_test, y_pred) + print(f"Accuracy: {accuracy:.2f}") From 1e1ee00782d300c22e3d7a425ace5d0c7cefb200 Mon Sep 17 00:00:00 2001 From: Manmita Das <34617961+manmita@users.noreply.github.com> Date: Sat, 28 Oct 2023 03:18:15 +0530 Subject: [PATCH 1260/1543] Excess 3 code (#11001) * added excess-3 code * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * updated with fixes * updated with fixes * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update excess_3_code.py --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- bit_manipulation/excess_3_code.py | 27 +++++++++++++++++++++++++++ 1 file changed, 27 insertions(+) create mode 100644 bit_manipulation/excess_3_code.py diff --git a/bit_manipulation/excess_3_code.py b/bit_manipulation/excess_3_code.py new file mode 100644 index 000000000000..7beaabd90e8a --- /dev/null +++ b/bit_manipulation/excess_3_code.py @@ -0,0 +1,27 @@ +def excess_3_code(number: int) -> str: + """ + Find excess-3 code of integer base 10. + Add 3 to all digits in a decimal number then convert to a binary-coded decimal. + https://en.wikipedia.org/wiki/Excess-3 + + >>> excess_3_code(0) + '0b0011' + >>> excess_3_code(3) + '0b0110' + >>> excess_3_code(2) + '0b0101' + >>> excess_3_code(20) + '0b01010011' + >>> excess_3_code(120) + '0b010001010011' + """ + num = "" + for digit in str(max(0, number)): + num += str(bin(int(digit) + 3))[2:].zfill(4) + return "0b" + num + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From f2436318cef6dba173282f83def4ebf7bd1d2aba Mon Sep 17 00:00:00 2001 From: Shreya123714 <95279016+Shreya123714@users.noreply.github.com> Date: Sun, 29 Oct 2023 00:02:12 +0530 Subject: [PATCH 1261/1543] Add FuzzySet Class for Triangular Fuzzy Sets (#11036) * Added Opertation for triangular fuzzy sets * Added Sources * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fix the bug , for which test were failing * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Add type hints and improve parameter names * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Add Test For fuzzy_operations.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fix the bug in fuzzy_operations.py * Add test_fuzzy_logic.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fix the bug in fuzzy_operations.py & deleted test * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fixed the typo error * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Again done * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * corrected wrong intendation due to which test fail * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fix * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * bug fixed * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Add test_fuzzy_logic * Modified fuzzy_operations.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fixed the bug, made a FuzzySet dataclass * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Replaced assertEqual of unittest to assert python * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * lets see * Changed test * orderd the import statements * Add docstring and dataclass the FuzzySet * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update fuzzy_operations.py * Delete fuzzy_logic/test_fuzzy_logic.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * https://en.wikipedia.org/wiki/Fuzzy_set --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- fuzzy_logic/fuzzy_operations.py | 195 ++++++++++++++++++++++++++++++++ 1 file changed, 195 insertions(+) create mode 100644 fuzzy_logic/fuzzy_operations.py diff --git a/fuzzy_logic/fuzzy_operations.py b/fuzzy_logic/fuzzy_operations.py new file mode 100644 index 000000000000..e41cd2120049 --- /dev/null +++ b/fuzzy_logic/fuzzy_operations.py @@ -0,0 +1,195 @@ +""" +By @Shreya123714 + +https://en.wikipedia.org/wiki/Fuzzy_set +""" + +from __future__ import annotations + +from dataclasses import dataclass + +import matplotlib.pyplot as plt +import numpy as np + + +@dataclass +class FuzzySet: + """ + A class for representing and manipulating triangular fuzzy sets. + Attributes: + name: The name or label of the fuzzy set. + left_boundary: The left boundary of the fuzzy set. + peak: The peak (central) value of the fuzzy set. + right_boundary: The right boundary of the fuzzy set. + Methods: + membership(x): Calculate the membership value of an input 'x' in the fuzzy set. + union(other): Calculate the union of this fuzzy set with another fuzzy set. + intersection(other): Calculate the intersection of this fuzzy set with another. + complement(): Calculate the complement (negation) of this fuzzy set. + plot(): Plot the membership function of the fuzzy set. + + >>> sheru = FuzzySet("Sheru", 0.4, 1, 0.6) + >>> sheru + FuzzySet(name='Sheru', left_boundary=0.4, peak=1, right_boundary=0.6) + >>> str(sheru) + 'Sheru: [0.4, 1, 0.6]' + + >>> siya = FuzzySet("Siya", 0.5, 1, 0.7) + >>> siya + FuzzySet(name='Siya', left_boundary=0.5, peak=1, right_boundary=0.7) + + # Complement Operation + >>> sheru.complement() + FuzzySet(name='¬Sheru', left_boundary=0.4, peak=0.6, right_boundary=0) + >>> siya.complement() # doctest: +NORMALIZE_WHITESPACE + FuzzySet(name='¬Siya', left_boundary=0.30000000000000004, peak=0.5, + right_boundary=0) + + # Intersection Operation + >>> siya.intersection(sheru) + FuzzySet(name='Siya ∩ Sheru', left_boundary=0.5, peak=0.6, right_boundary=1.0) + + # Membership Operation + >>> sheru.membership(0.5) + 0.16666666666666663 + >>> sheru.membership(0.6) + 0.0 + + # Union Operations + >>> siya.union(sheru) + FuzzySet(name='Siya ∪ Sheru', left_boundary=0.4, peak=0.7, right_boundary=1.0) + """ + + name: str + left_boundary: float + peak: float + right_boundary: float + + def __str__(self) -> str: + """ + >>> FuzzySet("fuzzy_set", 0.1, 0.2, 0.3) + FuzzySet(name='fuzzy_set', left_boundary=0.1, peak=0.2, right_boundary=0.3) + """ + return ( + f"{self.name}: [{self.left_boundary}, {self.peak}, {self.right_boundary}]" + ) + + def complement(self) -> FuzzySet: + """ + Calculate the complement (negation) of this fuzzy set. + Returns: + FuzzySet: A new fuzzy set representing the complement. + + >>> FuzzySet("fuzzy_set", 0.1, 0.2, 0.3).complement() + FuzzySet(name='¬fuzzy_set', left_boundary=0.7, peak=0.9, right_boundary=0.8) + """ + return FuzzySet( + f"¬{self.name}", + 1 - self.right_boundary, + 1 - self.left_boundary, + 1 - self.peak, + ) + + def intersection(self, other) -> FuzzySet: + """ + Calculate the intersection of this fuzzy set + with another fuzzy set. + Args: + other: Another fuzzy set to intersect with. + Returns: + A new fuzzy set representing the intersection. + + >>> FuzzySet("a", 0.1, 0.2, 0.3).intersection(FuzzySet("b", 0.4, 0.5, 0.6)) + FuzzySet(name='a ∩ b', left_boundary=0.4, peak=0.3, right_boundary=0.35) + """ + return FuzzySet( + f"{self.name} ∩ {other.name}", + max(self.left_boundary, other.left_boundary), + min(self.right_boundary, other.right_boundary), + (self.peak + other.peak) / 2, + ) + + def membership(self, x: float) -> float: + """ + Calculate the membership value of an input 'x' in the fuzzy set. + Returns: + The membership value of 'x' in the fuzzy set. + + >>> a = FuzzySet("a", 0.1, 0.2, 0.3) + >>> a.membership(0.09) + 0.0 + >>> a.membership(0.1) + 0.0 + >>> a.membership(0.11) + 0.09999999999999995 + >>> a.membership(0.4) + 0.0 + >>> FuzzySet("A", 0, 0.5, 1).membership(0.1) + 0.2 + >>> FuzzySet("B", 0.2, 0.7, 1).membership(0.6) + 0.8 + """ + if x <= self.left_boundary or x >= self.right_boundary: + return 0.0 + elif self.left_boundary < x <= self.peak: + return (x - self.left_boundary) / (self.peak - self.left_boundary) + elif self.peak < x < self.right_boundary: + return (self.right_boundary - x) / (self.right_boundary - self.peak) + msg = f"Invalid value {x} for fuzzy set {self}" + raise ValueError(msg) + + def union(self, other) -> FuzzySet: + """ + Calculate the union of this fuzzy set with another fuzzy set. + Args: + other (FuzzySet): Another fuzzy set to union with. + Returns: + FuzzySet: A new fuzzy set representing the union. + + >>> FuzzySet("a", 0.1, 0.2, 0.3).union(FuzzySet("b", 0.4, 0.5, 0.6)) + FuzzySet(name='a ∪ b', left_boundary=0.1, peak=0.6, right_boundary=0.35) + """ + return FuzzySet( + f"{self.name} ∪ {other.name}", + min(self.left_boundary, other.left_boundary), + max(self.right_boundary, other.right_boundary), + (self.peak + other.peak) / 2, + ) + + def plot(self): + """ + Plot the membership function of the fuzzy set. + """ + x = np.linspace(0, 1, 1000) + y = [self.membership(xi) for xi in x] + + plt.plot(x, y, label=self.name) + + +if __name__ == "__main__": + from doctest import testmod + + testmod() + a = FuzzySet("A", 0, 0.5, 1) + b = FuzzySet("B", 0.2, 0.7, 1) + + a.plot() + b.plot() + + plt.xlabel("x") + plt.ylabel("Membership") + plt.legend() + plt.show() + + union_ab = a.union(b) + intersection_ab = a.intersection(b) + complement_a = a.complement() + + union_ab.plot() + intersection_ab.plot() + complement_a.plot() + + plt.xlabel("x") + plt.ylabel("Membership") + plt.legend() + plt.show() From b51b833e0a0339421c76ee53662521689b1c9d62 Mon Sep 17 00:00:00 2001 From: Suyash Dongre <109069262+Suyashd999@users.noreply.github.com> Date: Sun, 29 Oct 2023 01:13:20 +0530 Subject: [PATCH 1262/1543] Added doctest to heap.py (#11059) --- data_structures/heap/heap.py | 75 ++++++++++++++++++++++++++++++++++-- 1 file changed, 72 insertions(+), 3 deletions(-) diff --git a/data_structures/heap/heap.py b/data_structures/heap/heap.py index c1004f349479..29bff3af07e3 100644 --- a/data_structures/heap/heap.py +++ b/data_structures/heap/heap.py @@ -81,6 +81,9 @@ def right_child_idx(self, parent_idx: int) -> int | None: def max_heapify(self, index: int) -> None: """ correct a single violation of the heap property in a subtree's root. + + It is the function that is responsible for restoring the property + of Max heap i.e the maximum element is always at top. """ if index < self.heap_size: violation: int = index @@ -99,7 +102,29 @@ def max_heapify(self, index: int) -> None: self.max_heapify(violation) def build_max_heap(self, collection: Iterable[T]) -> None: - """build max heap from an unsorted array""" + """ + build max heap from an unsorted array + + >>> h = Heap() + >>> h.build_max_heap([20,40,50,20,10]) + >>> h + [50, 40, 20, 20, 10] + + >>> h = Heap() + >>> h.build_max_heap([1,2,3,4,5,6,7,8,9,0]) + >>> h + [9, 8, 7, 4, 5, 6, 3, 2, 1, 0] + + >>> h = Heap() + >>> h.build_max_heap([514,5,61,57,8,99,105]) + >>> h + [514, 57, 105, 5, 8, 99, 61] + + >>> h = Heap() + >>> h.build_max_heap([514,5,61.6,57,8,9.9,105]) + >>> h + [514, 57, 105, 5, 8, 9.9, 61.6] + """ self.h = list(collection) self.heap_size = len(self.h) if self.heap_size > 1: @@ -108,7 +133,24 @@ def build_max_heap(self, collection: Iterable[T]) -> None: self.max_heapify(i) def extract_max(self) -> T: - """get and remove max from heap""" + """ + get and remove max from heap + + >>> h = Heap() + >>> h.build_max_heap([20,40,50,20,10]) + >>> h.extract_max() + 50 + + >>> h = Heap() + >>> h.build_max_heap([514,5,61,57,8,99,105]) + >>> h.extract_max() + 514 + + >>> h = Heap() + >>> h.build_max_heap([1,2,3,4,5,6,7,8,9,0]) + >>> h.extract_max() + 9 + """ if self.heap_size >= 2: me = self.h[0] self.h[0] = self.h.pop(-1) @@ -122,7 +164,34 @@ def extract_max(self) -> T: raise Exception("Empty heap") def insert(self, value: T) -> None: - """insert a new value into the max heap""" + """ + insert a new value into the max heap + + >>> h = Heap() + >>> h.insert(10) + >>> h + [10] + + >>> h = Heap() + >>> h.insert(10) + >>> h.insert(10) + >>> h + [10, 10] + + >>> h = Heap() + >>> h.insert(10) + >>> h.insert(10.1) + >>> h + [10.1, 10] + + >>> h = Heap() + >>> h.insert(0.1) + >>> h.insert(0) + >>> h.insert(9) + >>> h.insert(5) + >>> h + [9, 5, 0.1, 0] + """ self.h.append(value) idx = (self.heap_size - 1) // 2 self.heap_size += 1 From d80ee90178d48e530a2df3966fee3b5e06ec3ecc Mon Sep 17 00:00:00 2001 From: Khushi Shukla Date: Sun, 29 Oct 2023 02:43:14 +0530 Subject: [PATCH 1263/1543] Create crossword_puzzle_solver.py (#11011) * Create crossword_puzzle_solver.py * Update crossword_puzzle_solver.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update crossword_puzzle_solver.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update crossword_puzzle_solver.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update crossword_puzzle_solver.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update crossword_puzzle_solver.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update crossword_puzzle_solver.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update crossword_puzzle_solver.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update crossword_puzzle_solver.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update crossword_puzzle_solver.py * Update backtracking/crossword_puzzle_solver.py * Update crossword_puzzle_solver.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update crossword_puzzle_solver.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update crossword_puzzle_solver.py * Apply suggestions from code review * Update crossword_puzzle_solver.py * Update crossword_puzzle_solver.py --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- backtracking/crossword_puzzle_solver.py | 132 ++++++++++++++++++++++++ 1 file changed, 132 insertions(+) create mode 100644 backtracking/crossword_puzzle_solver.py diff --git a/backtracking/crossword_puzzle_solver.py b/backtracking/crossword_puzzle_solver.py new file mode 100644 index 000000000000..b9c01c4efea9 --- /dev/null +++ b/backtracking/crossword_puzzle_solver.py @@ -0,0 +1,132 @@ +# https://www.geeksforgeeks.org/solve-crossword-puzzle/ + + +def is_valid( + puzzle: list[list[str]], word: str, row: int, col: int, vertical: bool +) -> bool: + """ + Check if a word can be placed at the given position. + + >>> puzzle = [ + ... ['', '', '', ''], + ... ['', '', '', ''], + ... ['', '', '', ''], + ... ['', '', '', ''] + ... ] + >>> is_valid(puzzle, 'word', 0, 0, True) + True + >>> puzzle = [ + ... ['', '', '', ''], + ... ['', '', '', ''], + ... ['', '', '', ''], + ... ['', '', '', ''] + ... ] + >>> is_valid(puzzle, 'word', 0, 0, False) + True + """ + for i in range(len(word)): + if vertical: + if row + i >= len(puzzle) or puzzle[row + i][col] != "": + return False + else: + if col + i >= len(puzzle[0]) or puzzle[row][col + i] != "": + return False + return True + + +def place_word( + puzzle: list[list[str]], word: str, row: int, col: int, vertical: bool +) -> None: + """ + Place a word at the given position. + + >>> puzzle = [ + ... ['', '', '', ''], + ... ['', '', '', ''], + ... ['', '', '', ''], + ... ['', '', '', ''] + ... ] + >>> place_word(puzzle, 'word', 0, 0, True) + >>> puzzle + [['w', '', '', ''], ['o', '', '', ''], ['r', '', '', ''], ['d', '', '', '']] + """ + for i, char in enumerate(word): + if vertical: + puzzle[row + i][col] = char + else: + puzzle[row][col + i] = char + + +def remove_word( + puzzle: list[list[str]], word: str, row: int, col: int, vertical: bool +) -> None: + """ + Remove a word from the given position. + + >>> puzzle = [ + ... ['w', '', '', ''], + ... ['o', '', '', ''], + ... ['r', '', '', ''], + ... ['d', '', '', ''] + ... ] + >>> remove_word(puzzle, 'word', 0, 0, True) + >>> puzzle + [['', '', '', ''], ['', '', '', ''], ['', '', '', ''], ['', '', '', '']] + """ + for i in range(len(word)): + if vertical: + puzzle[row + i][col] = "" + else: + puzzle[row][col + i] = "" + + +def solve_crossword(puzzle: list[list[str]], words: list[str]) -> bool: + """ + Solve the crossword puzzle using backtracking. + + >>> puzzle = [ + ... ['', '', '', ''], + ... ['', '', '', ''], + ... ['', '', '', ''], + ... ['', '', '', ''] + ... ] + + >>> words = ['word', 'four', 'more', 'last'] + >>> solve_crossword(puzzle, words) + True + >>> puzzle = [ + ... ['', '', '', ''], + ... ['', '', '', ''], + ... ['', '', '', ''], + ... ['', '', '', ''] + ... ] + >>> words = ['word', 'four', 'more', 'paragraphs'] + >>> solve_crossword(puzzle, words) + False + """ + for row in range(len(puzzle)): + for col in range(len(puzzle[0])): + if puzzle[row][col] == "": + for word in words: + for vertical in [True, False]: + if is_valid(puzzle, word, row, col, vertical): + place_word(puzzle, word, row, col, vertical) + words.remove(word) + if solve_crossword(puzzle, words): + return True + words.append(word) + remove_word(puzzle, word, row, col, vertical) + return False + return True + + +if __name__ == "__main__": + PUZZLE = [[""] * 3 for _ in range(3)] + WORDS = ["cat", "dog", "car"] + + if solve_crossword(PUZZLE, WORDS): + print("Solution found:") + for row in PUZZLE: + print(" ".join(row)) + else: + print("No solution found:") From 444dfb0a0f7b1e9b0b2f171b426dca26bcd1937a Mon Sep 17 00:00:00 2001 From: Ravi Kumar <119737193+ravi-ivar-7@users.noreply.github.com> Date: Sun, 29 Oct 2023 03:42:17 +0530 Subject: [PATCH 1264/1543] Added adams-bashforth method of order 2, 3, 4, 5 (#10969) * added runge kutta gills method * added adams-bashforth method of order 2, 3, 4, 5 * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update adams_bashforth.py * Deleted extraneous file, maths/numerical_analysis/runge_kutta_gills.py * Added doctests to each function adams_bashforth.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update adams_bashforth.py --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- maths/numerical_analysis/adams_bashforth.py | 230 ++++++++++++++++++++ 1 file changed, 230 insertions(+) create mode 100644 maths/numerical_analysis/adams_bashforth.py diff --git a/maths/numerical_analysis/adams_bashforth.py b/maths/numerical_analysis/adams_bashforth.py new file mode 100644 index 000000000000..d61f022a413d --- /dev/null +++ b/maths/numerical_analysis/adams_bashforth.py @@ -0,0 +1,230 @@ +""" +Use the Adams-Bashforth methods to solve Ordinary Differential Equations. + +https://en.wikipedia.org/wiki/Linear_multistep_method +Author : Ravi Kumar +""" +from collections.abc import Callable +from dataclasses import dataclass + +import numpy as np + + +@dataclass +class AdamsBashforth: + """ + args: + func: An ordinary differential equation (ODE) as function of x and y. + x_initials: List containing initial required values of x. + y_initials: List containing initial required values of y. + step_size: The increment value of x. + x_final: The final value of x. + + Returns: Solution of y at each nodal point + + >>> def f(x, y): + ... return x + y + >>> AdamsBashforth(f, [0, 0.2, 0.4], [0, 0.2, 1], 0.2, 1) # doctest: +ELLIPSIS + AdamsBashforth(func=..., x_initials=[0, 0.2, 0.4], y_initials=[0, 0.2, 1], step...) + >>> AdamsBashforth(f, [0, 0.2, 1], [0, 0, 0.04], 0.2, 1).step_2() + Traceback (most recent call last): + ... + ValueError: The final value of x must be greater than the initial values of x. + + >>> AdamsBashforth(f, [0, 0.2, 0.3], [0, 0, 0.04], 0.2, 1).step_3() + Traceback (most recent call last): + ... + ValueError: x-values must be equally spaced according to step size. + + >>> AdamsBashforth(f,[0,0.2,0.4,0.6,0.8],[0,0,0.04,0.128,0.307],-0.2,1).step_5() + Traceback (most recent call last): + ... + ValueError: Step size must be positive. + """ + + func: Callable[[float, float], float] + x_initials: list[float] + y_initials: list[float] + step_size: float + x_final: float + + def __post_init__(self) -> None: + if self.x_initials[-1] >= self.x_final: + raise ValueError( + "The final value of x must be greater than the initial values of x." + ) + + if self.step_size <= 0: + raise ValueError("Step size must be positive.") + + if not all( + round(x1 - x0, 10) == self.step_size + for x0, x1 in zip(self.x_initials, self.x_initials[1:]) + ): + raise ValueError("x-values must be equally spaced according to step size.") + + def step_2(self) -> np.ndarray: + """ + >>> def f(x, y): + ... return x + >>> AdamsBashforth(f, [0, 0.2], [0, 0], 0.2, 1).step_2() + array([0. , 0. , 0.06, 0.16, 0.3 , 0.48]) + + >>> AdamsBashforth(f, [0, 0.2, 0.4], [0, 0, 0.04], 0.2, 1).step_2() + Traceback (most recent call last): + ... + ValueError: Insufficient initial points information. + """ + + if len(self.x_initials) != 2 or len(self.y_initials) != 2: + raise ValueError("Insufficient initial points information.") + + x_0, x_1 = self.x_initials[:2] + y_0, y_1 = self.y_initials[:2] + + n = int((self.x_final - x_1) / self.step_size) + y = np.zeros(n + 2) + y[0] = y_0 + y[1] = y_1 + + for i in range(n): + y[i + 2] = y[i + 1] + (self.step_size / 2) * ( + 3 * self.func(x_1, y[i + 1]) - self.func(x_0, y[i]) + ) + x_0 = x_1 + x_1 += self.step_size + + return y + + def step_3(self) -> np.ndarray: + """ + >>> def f(x, y): + ... return x + y + >>> y = AdamsBashforth(f, [0, 0.2, 0.4], [0, 0, 0.04], 0.2, 1).step_3() + >>> y[3] + 0.15533333333333332 + + >>> AdamsBashforth(f, [0, 0.2], [0, 0], 0.2, 1).step_3() + Traceback (most recent call last): + ... + ValueError: Insufficient initial points information. + """ + if len(self.x_initials) != 3 or len(self.y_initials) != 3: + raise ValueError("Insufficient initial points information.") + + x_0, x_1, x_2 = self.x_initials[:3] + y_0, y_1, y_2 = self.y_initials[:3] + + n = int((self.x_final - x_2) / self.step_size) + y = np.zeros(n + 4) + y[0] = y_0 + y[1] = y_1 + y[2] = y_2 + + for i in range(n + 1): + y[i + 3] = y[i + 2] + (self.step_size / 12) * ( + 23 * self.func(x_2, y[i + 2]) + - 16 * self.func(x_1, y[i + 1]) + + 5 * self.func(x_0, y[i]) + ) + x_0 = x_1 + x_1 = x_2 + x_2 += self.step_size + + return y + + def step_4(self) -> np.ndarray: + """ + >>> def f(x,y): + ... return x + y + >>> y = AdamsBashforth( + ... f, [0, 0.2, 0.4, 0.6], [0, 0, 0.04, 0.128], 0.2, 1).step_4() + >>> y[4] + 0.30699999999999994 + >>> y[5] + 0.5771083333333333 + + >>> AdamsBashforth(f, [0, 0.2, 0.4], [0, 0, 0.04], 0.2, 1).step_4() + Traceback (most recent call last): + ... + ValueError: Insufficient initial points information. + """ + + if len(self.x_initials) != 4 or len(self.y_initials) != 4: + raise ValueError("Insufficient initial points information.") + + x_0, x_1, x_2, x_3 = self.x_initials[:4] + y_0, y_1, y_2, y_3 = self.y_initials[:4] + + n = int((self.x_final - x_3) / self.step_size) + y = np.zeros(n + 4) + y[0] = y_0 + y[1] = y_1 + y[2] = y_2 + y[3] = y_3 + + for i in range(n): + y[i + 4] = y[i + 3] + (self.step_size / 24) * ( + 55 * self.func(x_3, y[i + 3]) + - 59 * self.func(x_2, y[i + 2]) + + 37 * self.func(x_1, y[i + 1]) + - 9 * self.func(x_0, y[i]) + ) + x_0 = x_1 + x_1 = x_2 + x_2 = x_3 + x_3 += self.step_size + + return y + + def step_5(self) -> np.ndarray: + """ + >>> def f(x,y): + ... return x + y + >>> y = AdamsBashforth( + ... f, [0, 0.2, 0.4, 0.6, 0.8], [0, 0.02140, 0.02140, 0.22211, 0.42536], + ... 0.2, 1).step_5() + >>> y[-1] + 0.05436839444444452 + + >>> AdamsBashforth(f, [0, 0.2, 0.4], [0, 0, 0.04], 0.2, 1).step_5() + Traceback (most recent call last): + ... + ValueError: Insufficient initial points information. + """ + + if len(self.x_initials) != 5 or len(self.y_initials) != 5: + raise ValueError("Insufficient initial points information.") + + x_0, x_1, x_2, x_3, x_4 = self.x_initials[:5] + y_0, y_1, y_2, y_3, y_4 = self.y_initials[:5] + + n = int((self.x_final - x_4) / self.step_size) + y = np.zeros(n + 6) + y[0] = y_0 + y[1] = y_1 + y[2] = y_2 + y[3] = y_3 + y[4] = y_4 + + for i in range(n + 1): + y[i + 5] = y[i + 4] + (self.step_size / 720) * ( + 1901 * self.func(x_4, y[i + 4]) + - 2774 * self.func(x_3, y[i + 3]) + - 2616 * self.func(x_2, y[i + 2]) + - 1274 * self.func(x_1, y[i + 1]) + + 251 * self.func(x_0, y[i]) + ) + x_0 = x_1 + x_1 = x_2 + x_2 = x_3 + x_3 = x_4 + x_4 += self.step_size + + return y + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From aa5c97d72c2382ed07c54b17d0b0d74684ca4734 Mon Sep 17 00:00:00 2001 From: Tapas Singhal <98687345+Shocker-lov-t@users.noreply.github.com> Date: Sun, 29 Oct 2023 04:17:46 +0530 Subject: [PATCH 1265/1543] Create ipv4_conversion.py (#11008) * Create ipconversion.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update conversions/ipconversion.py * Update ipconversion.py * Rename ipconversion.py to ipv4_conversion.py * forward_propagation(32, 450_000) # Was 10_000_000 --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- conversions/ipv4_conversion.py | 85 +++++++++++++++++++++++++ neural_network/simple_neural_network.py | 2 +- 2 files changed, 86 insertions(+), 1 deletion(-) create mode 100644 conversions/ipv4_conversion.py diff --git a/conversions/ipv4_conversion.py b/conversions/ipv4_conversion.py new file mode 100644 index 000000000000..862309b7251e --- /dev/null +++ b/conversions/ipv4_conversion.py @@ -0,0 +1,85 @@ +# https://www.geeksforgeeks.org/convert-ip-address-to-integer-and-vice-versa/ + + +def ipv4_to_decimal(ipv4_address: str) -> int: + """ + Convert an IPv4 address to its decimal representation. + + Args: + ip_address: A string representing an IPv4 address (e.g., "192.168.0.1"). + + Returns: + int: The decimal representation of the IP address. + + >>> ipv4_to_decimal("192.168.0.1") + 3232235521 + >>> ipv4_to_decimal("10.0.0.255") + 167772415 + >>> ipv4_to_decimal("10.0.255") + Traceback (most recent call last): + ... + ValueError: Invalid IPv4 address format + >>> ipv4_to_decimal("10.0.0.256") + Traceback (most recent call last): + ... + ValueError: Invalid IPv4 octet 256 + """ + + octets = [int(octet) for octet in ipv4_address.split(".")] + if len(octets) != 4: + raise ValueError("Invalid IPv4 address format") + + decimal_ipv4 = 0 + for octet in octets: + if not 0 <= octet <= 255: + raise ValueError(f"Invalid IPv4 octet {octet}") # noqa: EM102 + decimal_ipv4 = (decimal_ipv4 << 8) + int(octet) + + return decimal_ipv4 + + +def alt_ipv4_to_decimal(ipv4_address: str) -> int: + """ + >>> alt_ipv4_to_decimal("192.168.0.1") + 3232235521 + >>> alt_ipv4_to_decimal("10.0.0.255") + 167772415 + """ + return int("0x" + "".join(f"{int(i):02x}" for i in ipv4_address.split(".")), 16) + + +def decimal_to_ipv4(decimal_ipv4: int) -> str: + """ + Convert a decimal representation of an IP address to its IPv4 format. + + Args: + decimal_ipv4: An integer representing the decimal IP address. + + Returns: + The IPv4 representation of the decimal IP address. + + >>> decimal_to_ipv4(3232235521) + '192.168.0.1' + >>> decimal_to_ipv4(167772415) + '10.0.0.255' + >>> decimal_to_ipv4(-1) + Traceback (most recent call last): + ... + ValueError: Invalid decimal IPv4 address + """ + + if not (0 <= decimal_ipv4 <= 4294967295): + raise ValueError("Invalid decimal IPv4 address") + + ip_parts = [] + for _ in range(4): + ip_parts.append(str(decimal_ipv4 & 255)) + decimal_ipv4 >>= 8 + + return ".".join(reversed(ip_parts)) + + +if __name__ == "__main__": + import doctest + + doctest.testmod() diff --git a/neural_network/simple_neural_network.py b/neural_network/simple_neural_network.py index f2a3234873b5..8751a38908cf 100644 --- a/neural_network/simple_neural_network.py +++ b/neural_network/simple_neural_network.py @@ -28,7 +28,7 @@ def sigmoid_function(value: float, deriv: bool = False) -> float: def forward_propagation(expected: int, number_propagations: int) -> float: """Return the value found after the forward propagation training. - >>> res = forward_propagation(32, 10000000) + >>> res = forward_propagation(32, 450_000) # Was 10_000_000 >>> res > 31 and res < 33 True From e3eb9daba41512280dd54205c532874ccd2f1b91 Mon Sep 17 00:00:00 2001 From: Ed Date: Sat, 28 Oct 2023 15:48:50 -0700 Subject: [PATCH 1266/1543] Add bitap_string_match algo (#11060) * Add bitap_string_match algo * Fix types * Fix spelling and add ignore word * Add suggested changes and change return type * Resolve suggestions --- pyproject.toml | 2 +- strings/bitap_string_match.py | 79 +++++++++++++++++++++++++++++++++++ 2 files changed, 80 insertions(+), 1 deletion(-) create mode 100644 strings/bitap_string_match.py diff --git a/pyproject.toml b/pyproject.toml index 790a328b3564..5d27142d16e2 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -135,5 +135,5 @@ omit = [ sort = "Cover" [tool.codespell] -ignore-words-list = "3rt,ans,crate,damon,fo,followings,hist,iff,kwanza,manuel,mater,secant,som,sur,tim,toi,zar" +ignore-words-list = "3rt,ans,bitap,crate,damon,fo,followings,hist,iff,kwanza,manuel,mater,secant,som,sur,tim,toi,zar" skip = "./.*,*.json,ciphers/prehistoric_men.txt,project_euler/problem_022/p022_names.txt,pyproject.toml,strings/dictionary.txt,strings/words.txt" diff --git a/strings/bitap_string_match.py b/strings/bitap_string_match.py new file mode 100644 index 000000000000..bd8a0f0d73ec --- /dev/null +++ b/strings/bitap_string_match.py @@ -0,0 +1,79 @@ +""" +Bitap exact string matching +https://en.wikipedia.org/wiki/Bitap_algorithm + +Searches for a pattern inside text, and returns the index of the first occurrence +of the pattern. Both text and pattern consist of lowercase alphabetical characters only. + +Complexity: O(m*n) + n = length of text + m = length of pattern + +Python doctests can be run using this command: +python3 -m doctest -v bitap_string_match.py +""" + + +def bitap_string_match(text: str, pattern: str) -> int: + """ + Retrieves the index of the first occurrence of pattern in text. + + Args: + text: A string consisting only of lowercase alphabetical characters. + pattern: A string consisting only of lowercase alphabetical characters. + + Returns: + int: The index where pattern first occurs. Return -1 if not found. + + >>> bitap_string_match('abdabababc', 'ababc') + 5 + >>> bitap_string_match('aaaaaaaaaaaaaaaaaa', 'a') + 0 + >>> bitap_string_match('zxywsijdfosdfnso', 'zxywsijdfosdfnso') + 0 + >>> bitap_string_match('abdabababc', '') + 0 + >>> bitap_string_match('abdabababc', 'c') + 9 + >>> bitap_string_match('abdabababc', 'fofosdfo') + -1 + >>> bitap_string_match('abdab', 'fofosdfo') + -1 + """ + if not pattern: + return 0 + m = len(pattern) + if m > len(text): + return -1 + + # Initial state of bit string 1110 + state = ~1 + # Bit = 0 if character appears at index, and 1 otherwise + pattern_mask: list[int] = [~0] * 27 # 1111 + + for i, char in enumerate(pattern): + # For the pattern mask for this character, set the bit to 0 for each i + # the character appears. + pattern_index: int = ord(char) - ord("a") + pattern_mask[pattern_index] &= ~(1 << i) + + for i, char in enumerate(text): + text_index = ord(char) - ord("a") + # If this character does not appear in pattern, it's pattern mask is 1111. + # Performing a bitwise OR between state and 1111 will reset the state to 1111 + # and start searching the start of pattern again. + state |= pattern_mask[text_index] + state <<= 1 + + # If the mth bit (counting right to left) of the state is 0, then we have + # found pattern in text + if (state & (1 << m)) == 0: + return i - m + 1 + + return -1 + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From 50195616817983e8c820daf41c252ecbabac0ae2 Mon Sep 17 00:00:00 2001 From: Tapas Singhal <98687345+Shocker-lov-t@users.noreply.github.com> Date: Sun, 29 Oct 2023 13:12:32 +0530 Subject: [PATCH 1267/1543] Create multiplexer.py (#11064) * Create multiplexer.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Doctests should show how the algorithm fails --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- boolean_algebra/multiplexer.py | 42 ++++++++++++++++++++++++++++++++++ 1 file changed, 42 insertions(+) create mode 100644 boolean_algebra/multiplexer.py diff --git a/boolean_algebra/multiplexer.py b/boolean_algebra/multiplexer.py new file mode 100644 index 000000000000..7e65c785c829 --- /dev/null +++ b/boolean_algebra/multiplexer.py @@ -0,0 +1,42 @@ +def mux(input0: int, input1: int, select: int) -> int: + """ + Implement a 2-to-1 Multiplexer. + + :param input0: The first input value (0 or 1). + :param input1: The second input value (0 or 1). + :param select: The select signal (0 or 1) to choose between input0 and input1. + :return: The output based on the select signal. input1 if select else input0. + + https://www.electrically4u.com/solved-problems-on-multiplexer + https://en.wikipedia.org/wiki/Multiplexer + + >>> mux(0, 1, 0) + 0 + >>> mux(0, 1, 1) + 1 + >>> mux(1, 0, 0) + 1 + >>> mux(1, 0, 1) + 0 + >>> mux(2, 1, 0) + Traceback (most recent call last): + ... + ValueError: Inputs and select signal must be 0 or 1 + >>> mux(0, -1, 0) + Traceback (most recent call last): + ... + ValueError: Inputs and select signal must be 0 or 1 + >>> mux(0, 1, 1.1) + Traceback (most recent call last): + ... + ValueError: Inputs and select signal must be 0 or 1 + """ + if all(i in (0, 1) for i in (input0, input1, select)): + return input1 if select else input0 + raise ValueError("Inputs and select signal must be 0 or 1") + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From bad39cd15439f4adeab06707c7ceab2de85adb7f Mon Sep 17 00:00:00 2001 From: ojas wani <52542740+ojas-wani@users.noreply.github.com> Date: Sun, 29 Oct 2023 02:37:07 -0700 Subject: [PATCH 1268/1543] Add more doctest to intro_sort.py #9943 (#11068) * added laplacian_filter file * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * updated laplacian.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * updated laplacian_py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * updated laplacian_filter.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * updated laplacian_filter.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * updated laplacian_filter.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * required changes to laplacian file * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * changed laplacian_filter.py * changed laplacian_filter.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * changed laplacian_filter.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * changed laplacian_filter.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * updated laplacian_filter.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * update laplacian_filter.py * update laplacian_filter.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * update laplacian_filter.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * changed laplacian_filter.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * changed laplacian_filter.py * changed laplacian_filter.py * changed laplacian_filter.py * add matrix_multiplication.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * update matrix_multiplication * update matrix_multiplication * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * make changes * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * update * update * updates * resolve conflict * add doctest * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * make changes * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * update laplacian.py * add doctests * more doctest added * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * try to resolve ruff error * try to reslve ruff error * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * update doctest * attemp - resolve ruff error * resolve build error * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * resolve build issue * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * update build * doctest update * update doctest * update doctest * update doctest * fix ruff error * file location changed * Delete digital_image_processing/filters/laplacian_filter.py * Create laplacian_filter.py * Update matrix_multiplication_recursion.py * Update matrix_multiplication_recursion.py * Update matrix_multiplication_recursion.py * Update matrix_multiplication_recursion.py * Update matrix_multiplication_recursion.py * Add doctest to median_of_3 * add doctest to median_of_3 function * Update intro_sort.py * Update sorts/intro_sort.py --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- sorts/intro_sort.py | 31 +++++++++++-------------------- 1 file changed, 11 insertions(+), 20 deletions(-) diff --git a/sorts/intro_sort.py b/sorts/intro_sort.py index f0e3645adbb7..908d2886533a 100644 --- a/sorts/intro_sort.py +++ b/sorts/intro_sort.py @@ -1,5 +1,5 @@ """ -Introspective Sort is hybrid sort (Quick Sort + Heap Sort + Insertion Sort) +Introspective Sort is a hybrid sort (Quick Sort + Heap Sort + Insertion Sort) if the size of the list is under 16, use insertion sort https://en.wikipedia.org/wiki/Introsort """ @@ -9,7 +9,6 @@ def insertion_sort(array: list, start: int = 0, end: int = 0) -> list: """ >>> array = [4, 2, 6, 8, 1, 7, 8, 22, 14, 56, 27, 79, 23, 45, 14, 12] - >>> insertion_sort(array, 0, len(array)) [1, 2, 4, 6, 7, 8, 8, 12, 14, 14, 22, 23, 27, 45, 56, 79] """ @@ -27,8 +26,7 @@ def insertion_sort(array: list, start: int = 0, end: int = 0) -> list: def heapify(array: list, index: int, heap_size: int) -> None: # Max Heap """ >>> array = [4, 2, 6, 8, 1, 7, 8, 22, 14, 56, 27, 79, 23, 45, 14, 12] - - >>> heapify(array, len(array) // 2 ,len(array)) + >>> heapify(array, len(array) // 2, len(array)) """ largest = index left_index = 2 * index + 1 # Left Node @@ -47,9 +45,7 @@ def heapify(array: list, index: int, heap_size: int) -> None: # Max Heap def heap_sort(array: list) -> list: """ - >>> array = [4, 2, 6, 8, 1, 7, 8, 22, 14, 56, 27, 79, 23, 45, 14, 12] - - >>> heap_sort(array) + >>> heap_sort([4, 2, 6, 8, 1, 7, 8, 22, 14, 56, 27, 79, 23, 45, 14, 12]) [1, 2, 4, 6, 7, 8, 8, 12, 14, 14, 22, 23, 27, 45, 56, 79] """ n = len(array) @@ -69,9 +65,14 @@ def median_of_3( ) -> int: """ >>> array = [4, 2, 6, 8, 1, 7, 8, 22, 14, 56, 27, 79, 23, 45, 14, 12] - - >>> median_of_3(array, 0, 0 + ((len(array) - 0) // 2) + 1, len(array) - 1) + >>> median_of_3(array, 0, ((len(array) - 0) // 2) + 1, len(array) - 1) 12 + >>> array = [13, 2, 6, 8, 1, 7, 8, 22, 14, 56, 27, 79, 23, 45, 14, 12] + >>> median_of_3(array, 0, ((len(array) - 0) // 2) + 1, len(array) - 1) + 13 + >>> array = [4, 2, 6, 8, 1, 7, 8, 22, 15, 14, 27, 79, 23, 45, 14, 16] + >>> median_of_3(array, 0, ((len(array) - 0) // 2) + 1, len(array) - 1) + 14 """ if (array[first_index] > array[middle_index]) != ( array[first_index] > array[last_index] @@ -88,7 +89,6 @@ def median_of_3( def partition(array: list, low: int, high: int, pivot: int) -> int: """ >>> array = [4, 2, 6, 8, 1, 7, 8, 22, 14, 56, 27, 79, 23, 45, 14, 12] - >>> partition(array, 0, len(array), 12) 8 """ @@ -115,22 +115,16 @@ def sort(array: list) -> list: Examples: >>> sort([4, 2, 6, 8, 1, 7, 8, 22, 14, 56, 27, 79, 23, 45, 14, 12]) [1, 2, 4, 6, 7, 8, 8, 12, 14, 14, 22, 23, 27, 45, 56, 79] - >>> sort([-1, -5, -3, -13, -44]) [-44, -13, -5, -3, -1] - >>> sort([]) [] - >>> sort([5]) [5] - >>> sort([-3, 0, -7, 6, 23, -34]) [-34, -7, -3, 0, 6, 23] - >>> sort([1.7, 1.0, 3.3, 2.1, 0.3 ]) [0.3, 1.0, 1.7, 2.1, 3.3] - >>> sort(['d', 'a', 'b', 'e', 'c']) ['a', 'b', 'c', 'd', 'e'] """ @@ -146,9 +140,7 @@ def intro_sort( ) -> list: """ >>> array = [4, 2, 6, 8, 1, 7, 8, 22, 14, 56, 27, 79, 23, 45, 14, 12] - >>> max_depth = 2 * math.ceil(math.log2(len(array))) - >>> intro_sort(array, 0, len(array), 16, max_depth) [1, 2, 4, 6, 7, 8, 8, 12, 14, 14, 22, 23, 27, 45, 56, 79] """ @@ -167,7 +159,6 @@ def intro_sort( import doctest doctest.testmod() - user_input = input("Enter numbers separated by a comma : ").strip() unsorted = [float(item) for item in user_input.split(",")] - print(sort(unsorted)) + print(f"{sort(unsorted) = }") From adb13a106389aa2382a6315e9f008f9f855a89f8 Mon Sep 17 00:00:00 2001 From: SEIKH NABAB UDDIN <93948993+nababuddin@users.noreply.github.com> Date: Sun, 29 Oct 2023 15:22:50 +0530 Subject: [PATCH 1269/1543] Update instagram_pic.py (#10957) * Update instagram_pic.py * Update instagram_pic.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update instagram_pic.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update instagram_pic.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update instagram_pic.py * Update instagram_pic.py * Update instagram_pic.py * Update instagram_pic.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update instagram_pic.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update instagram_pic.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update instagram_pic.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fast fail instead of nested ifs and PEP8: Keep try/except blocks small * Update instagram_pic.py --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- web_programming/instagram_pic.py | 51 +++++++++++++++++++++++++------- 1 file changed, 41 insertions(+), 10 deletions(-) diff --git a/web_programming/instagram_pic.py b/web_programming/instagram_pic.py index 8521da674d7d..2630c8659232 100644 --- a/web_programming/instagram_pic.py +++ b/web_programming/instagram_pic.py @@ -3,14 +3,45 @@ import requests from bs4 import BeautifulSoup -if __name__ == "__main__": - url = input("Enter image url: ").strip() - print(f"Downloading image from {url} ...") - soup = BeautifulSoup(requests.get(url).content, "html.parser") - # The image URL is in the content field of the first meta tag with property og:image - image_url = soup.find("meta", {"property": "og:image"})["content"] - image_data = requests.get(image_url).content + +def download_image(url: str) -> str: + """ + Download an image from a given URL by scraping the 'og:image' meta tag. + + Parameters: + url: The URL to scrape. + + Returns: + A message indicating the result of the operation. + """ + try: + response = requests.get(url) + response.raise_for_status() + except requests.exceptions.RequestException as e: + return f"An error occurred during the HTTP request to {url}: {e!r}" + + soup = BeautifulSoup(response.text, "html.parser") + image_meta_tag = soup.find("meta", {"property": "og:image"}) + if not image_meta_tag: + return "No meta tag with property 'og:image' was found." + + image_url = image_meta_tag.get("content") + if not image_url: + return f"Image URL not found in meta tag {image_meta_tag}." + + try: + image_data = requests.get(image_url).content + except requests.exceptions.RequestException as e: + return f"An error occurred during the HTTP request to {image_url}: {e!r}" + if not image_data: + return f"Failed to download the image from {image_url}." + file_name = f"{datetime.now():%Y-%m-%d_%H:%M:%S}.jpg" - with open(file_name, "wb") as fp: - fp.write(image_data) - print(f"Done. Image saved to disk as {file_name}.") + with open(file_name, "wb") as out_file: + out_file.write(image_data) + return f"Image downloaded and saved in the file {file_name}" + + +if __name__ == "__main__": + url = input("Enter image URL: ").strip() or "https://www.instagram.com" + print(f"download_image({url}): {download_image(url)}") From 8217f9bd35e5975e3660217b37b2aac62c1280da Mon Sep 17 00:00:00 2001 From: Tapas Singhal <98687345+Shocker-lov-t@users.noreply.github.com> Date: Sun, 29 Oct 2023 15:55:39 +0530 Subject: [PATCH 1270/1543] Create find_previous_power_of_two.py (#11004) * Create find_previous_power_of_two.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update find_previous_power_of_two.py This change avoids the unnecessary left shift operation * Update find_previous_power_of_two.py --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- .../find_previous_power_of_two.py | 30 +++++++++++++++++++ 1 file changed, 30 insertions(+) create mode 100644 bit_manipulation/find_previous_power_of_two.py diff --git a/bit_manipulation/find_previous_power_of_two.py b/bit_manipulation/find_previous_power_of_two.py new file mode 100644 index 000000000000..8ac74ac98478 --- /dev/null +++ b/bit_manipulation/find_previous_power_of_two.py @@ -0,0 +1,30 @@ +def find_previous_power_of_two(number: int) -> int: + """ + Find the largest power of two that is less than or equal to a given integer. + https://stackoverflow.com/questions/1322510 + + >>> [find_previous_power_of_two(i) for i in range(18)] + [0, 1, 2, 2, 4, 4, 4, 4, 8, 8, 8, 8, 8, 8, 8, 8, 16, 16] + >>> find_previous_power_of_two(-5) + Traceback (most recent call last): + ... + ValueError: Input must be a non-negative integer + >>> find_previous_power_of_two(10.5) + Traceback (most recent call last): + ... + ValueError: Input must be a non-negative integer + """ + if not isinstance(number, int) or number < 0: + raise ValueError("Input must be a non-negative integer") + if number == 0: + return 0 + power = 1 + while power <= number: + power <<= 1 # Equivalent to multiplying by 2 + return power >> 1 if number > 1 else 1 + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From 403d3b8a29e754b8f3bbb1000a54fee42a87341b Mon Sep 17 00:00:00 2001 From: Aqib Javid Bhat Date: Sun, 29 Oct 2023 16:28:28 +0530 Subject: [PATCH 1271/1543] Add Integer Square Root Algorithm (#10949) * Add Integer Square Root Algorithm * Update integer_square_root.py * Update integer_square_root.py --------- Co-authored-by: Christian Clauss --- maths/integer_square_root.py | 73 ++++++++++++++++++++++++++++++++++++ 1 file changed, 73 insertions(+) create mode 100644 maths/integer_square_root.py diff --git a/maths/integer_square_root.py b/maths/integer_square_root.py new file mode 100644 index 000000000000..27e874a43c79 --- /dev/null +++ b/maths/integer_square_root.py @@ -0,0 +1,73 @@ +""" +Integer Square Root Algorithm -- An efficient method to calculate the square root of a +non-negative integer 'num' rounded down to the nearest integer. It uses a binary search +approach to find the integer square root without using any built-in exponent functions +or operators. +* https://en.wikipedia.org/wiki/Integer_square_root +* https://docs.python.org/3/library/math.html#math.isqrt +Note: + - This algorithm is designed for non-negative integers only. + - The result is rounded down to the nearest integer. + - The algorithm has a time complexity of O(log(x)). + - Original algorithm idea based on binary search. +""" + + +def integer_square_root(num: int) -> int: + """ + Returns the integer square root of a non-negative integer num. + Args: + num: A non-negative integer. + Returns: + The integer square root of num. + Raises: + ValueError: If num is not an integer or is negative. + >>> [integer_square_root(i) for i in range(18)] + [0, 1, 1, 1, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 4, 4] + >>> integer_square_root(625) + 25 + >>> integer_square_root(2_147_483_647) + 46340 + >>> from math import isqrt + >>> all(integer_square_root(i) == isqrt(i) for i in range(20)) + True + >>> integer_square_root(-1) + Traceback (most recent call last): + ... + ValueError: num must be non-negative integer + >>> integer_square_root(1.5) + Traceback (most recent call last): + ... + ValueError: num must be non-negative integer + >>> integer_square_root("0") + Traceback (most recent call last): + ... + ValueError: num must be non-negative integer + """ + if not isinstance(num, int) or num < 0: + raise ValueError("num must be non-negative integer") + + if num < 2: + return num + + left_bound = 0 + right_bound = num // 2 + + while left_bound <= right_bound: + mid = left_bound + (right_bound - left_bound) // 2 + mid_squared = mid * mid + if mid_squared == num: + return mid + + if mid_squared < num: + left_bound = mid + 1 + else: + right_bound = mid - 1 + + return right_bound + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From cc22d0b0bac9fec13913ba07bc67d58c06482c83 Mon Sep 17 00:00:00 2001 From: aayushsoni4 <120650736+aayushsoni4@users.noreply.github.com> Date: Sun, 29 Oct 2023 18:25:31 +0530 Subject: [PATCH 1272/1543] Generate parentheses (#10903) * Add: Matrix Prefix Sum * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Changes made in Matrix Prefix Sum * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Changes made in Matrix Prefix Sum * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Changes made in Matrix Prefix Sum * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Changes made in Matrix Prefix Sum * Changes made in Matrix Prefix Sum * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Changes made in Matrix Prefix Sum * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Changes made in Matrix Prefix Sum * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Changes made in Matrix Prefix Sum * Changes made in Matrix Prefix Sum * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Changes made in Matrix Prefix Sum * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Changes made in Matrix Prefix Sum * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Changes made in Matrix Prefix Sum * Changes made in Matrix Prefix Sum * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Changes made in Matrix Prefix Sum * Changes made in Matrix Prefix Sum * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Changes made in Matrix Prefix Sum * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Changes made in Matrix Prefix Sum * Add: Distinct Subsequences * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Changes made in Distinct Subsequences * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Changes made in Distinct Subsequences * Changes made in Distinct Subsequences * Changes made in Distinct Subsequences * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Removed Distinct Subsequences * Add: Generate Parentheses * Add: Generate Parentheses * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Add: Generate Parentheses * Add: Generate Parentheses * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Add: Generate Parentheses * Add: Generate Parentheses * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update backtracking/generate_parentheses.py * Delete matrix/matrix_prefix_sum.py --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- backtracking/generate_parentheses.py | 77 ++++++++++++++++++++++++++++ 1 file changed, 77 insertions(+) create mode 100644 backtracking/generate_parentheses.py diff --git a/backtracking/generate_parentheses.py b/backtracking/generate_parentheses.py new file mode 100644 index 000000000000..18c21e2a9b51 --- /dev/null +++ b/backtracking/generate_parentheses.py @@ -0,0 +1,77 @@ +""" +author: Aayush Soni +Given n pairs of parentheses, write a function to generate all +combinations of well-formed parentheses. +Input: n = 2 +Output: ["(())","()()"] +Leetcode link: https://leetcode.com/problems/generate-parentheses/description/ +""" + + +def backtrack( + partial: str, open_count: int, close_count: int, n: int, result: list[str] +) -> None: + """ + Generate valid combinations of balanced parentheses using recursion. + + :param partial: A string representing the current combination. + :param open_count: An integer representing the count of open parentheses. + :param close_count: An integer representing the count of close parentheses. + :param n: An integer representing the total number of pairs. + :param result: A list to store valid combinations. + :return: None + + This function uses recursion to explore all possible combinations, + ensuring that at each step, the parentheses remain balanced. + + Example: + >>> result = [] + >>> backtrack("", 0, 0, 2, result) + >>> result + ['(())', '()()'] + """ + if len(partial) == 2 * n: + # When the combination is complete, add it to the result. + result.append(partial) + return + + if open_count < n: + # If we can add an open parenthesis, do so, and recurse. + backtrack(partial + "(", open_count + 1, close_count, n, result) + + if close_count < open_count: + # If we can add a close parenthesis (it won't make the combination invalid), + # do so, and recurse. + backtrack(partial + ")", open_count, close_count + 1, n, result) + + +def generate_parenthesis(n: int) -> list[str]: + """ + Generate valid combinations of balanced parentheses for a given n. + + :param n: An integer representing the number of pairs of parentheses. + :return: A list of strings with valid combinations. + + This function uses a recursive approach to generate the combinations. + + Time Complexity: O(2^(2n)) - In the worst case, we have 2^(2n) combinations. + Space Complexity: O(n) - where 'n' is the number of pairs. + + Example 1: + >>> generate_parenthesis(3) + ['((()))', '(()())', '(())()', '()(())', '()()()'] + + Example 2: + >>> generate_parenthesis(1) + ['()'] + """ + + result: list[str] = [] + backtrack("", 0, 0, n, result) + return result + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From 7c1dfec08644e4034717844b139e8db948706ccc Mon Sep 17 00:00:00 2001 From: Farzad Hayat Date: Sun, 29 Oct 2023 22:57:04 +1000 Subject: [PATCH 1273/1543] XOR Cipher: doctests and bug fixes (#10840) * Fixed bug with key modulus wrapping. Should be wrapping on 256, not 255. * Fixed bug with incorrect assertion type in decrypt function. * Added doctests for 4 out of 6 methods --- ciphers/xor_cipher.py | 91 ++++++++++++++++++++++++++++++++++++++----- 1 file changed, 82 insertions(+), 9 deletions(-) diff --git a/ciphers/xor_cipher.py b/ciphers/xor_cipher.py index 559036d305c5..e30955d41ff1 100644 --- a/ciphers/xor_cipher.py +++ b/ciphers/xor_cipher.py @@ -35,6 +35,22 @@ def encrypt(self, content: str, key: int) -> list[str]: output: encrypted string 'content' as a list of chars if key not passed the method uses the key by the constructor. otherwise key = 1 + + Empty list + >>> XORCipher().encrypt("", 5) + [] + + One key + >>> XORCipher().encrypt("hallo welt", 1) + ['i', '`', 'm', 'm', 'n', '!', 'v', 'd', 'm', 'u'] + + Normal key + >>> XORCipher().encrypt("HALLO WELT", 32) + ['h', 'a', 'l', 'l', 'o', '\\x00', 'w', 'e', 'l', 't'] + + Key greater than 255 + >>> XORCipher().encrypt("hallo welt", 256) + ['h', 'a', 'l', 'l', 'o', ' ', 'w', 'e', 'l', 't'] """ # precondition @@ -44,7 +60,7 @@ def encrypt(self, content: str, key: int) -> list[str]: key = key or self.__key or 1 # make sure key is an appropriate size - key %= 255 + key %= 256 return [chr(ord(ch) ^ key) for ch in content] @@ -54,16 +70,32 @@ def decrypt(self, content: str, key: int) -> list[str]: output: decrypted string 'content' as a list of chars if key not passed the method uses the key by the constructor. otherwise key = 1 + + Empty list + >>> XORCipher().decrypt("", 5) + [] + + One key + >>> XORCipher().decrypt("hallo welt", 1) + ['i', '`', 'm', 'm', 'n', '!', 'v', 'd', 'm', 'u'] + + Normal key + >>> XORCipher().decrypt("HALLO WELT", 32) + ['h', 'a', 'l', 'l', 'o', '\\x00', 'w', 'e', 'l', 't'] + + Key greater than 255 + >>> XORCipher().decrypt("hallo welt", 256) + ['h', 'a', 'l', 'l', 'o', ' ', 'w', 'e', 'l', 't'] """ # precondition assert isinstance(key, int) - assert isinstance(content, list) + assert isinstance(content, str) key = key or self.__key or 1 # make sure key is an appropriate size - key %= 255 + key %= 256 return [chr(ord(ch) ^ key) for ch in content] @@ -73,6 +105,22 @@ def encrypt_string(self, content: str, key: int = 0) -> str: output: encrypted string 'content' if key not passed the method uses the key by the constructor. otherwise key = 1 + + Empty list + >>> XORCipher().encrypt_string("", 5) + '' + + One key + >>> XORCipher().encrypt_string("hallo welt", 1) + 'i`mmn!vdmu' + + Normal key + >>> XORCipher().encrypt_string("HALLO WELT", 32) + 'hallo\\x00welt' + + Key greater than 255 + >>> XORCipher().encrypt_string("hallo welt", 256) + 'hallo welt' """ # precondition @@ -81,9 +129,8 @@ def encrypt_string(self, content: str, key: int = 0) -> str: key = key or self.__key or 1 - # make sure key can be any size - while key > 255: - key -= 255 + # make sure key is an appropriate size + key %= 256 # This will be returned ans = "" @@ -99,6 +146,22 @@ def decrypt_string(self, content: str, key: int = 0) -> str: output: decrypted string 'content' if key not passed the method uses the key by the constructor. otherwise key = 1 + + Empty list + >>> XORCipher().decrypt_string("", 5) + '' + + One key + >>> XORCipher().decrypt_string("hallo welt", 1) + 'i`mmn!vdmu' + + Normal key + >>> XORCipher().decrypt_string("HALLO WELT", 32) + 'hallo\\x00welt' + + Key greater than 255 + >>> XORCipher().decrypt_string("hallo welt", 256) + 'hallo welt' """ # precondition @@ -107,9 +170,8 @@ def decrypt_string(self, content: str, key: int = 0) -> str: key = key or self.__key or 1 - # make sure key can be any size - while key > 255: - key -= 255 + # make sure key is an appropriate size + key %= 256 # This will be returned ans = "" @@ -132,6 +194,9 @@ def encrypt_file(self, file: str, key: int = 0) -> bool: assert isinstance(file, str) assert isinstance(key, int) + # make sure key is an appropriate size + key %= 256 + try: with open(file) as fin, open("encrypt.out", "w+") as fout: # actual encrypt-process @@ -156,6 +221,9 @@ def decrypt_file(self, file: str, key: int) -> bool: assert isinstance(file, str) assert isinstance(key, int) + # make sure key is an appropriate size + key %= 256 + try: with open(file) as fin, open("decrypt.out", "w+") as fout: # actual encrypt-process @@ -168,6 +236,11 @@ def decrypt_file(self, file: str, key: int) -> bool: return True +if __name__ == "__main__": + from doctest import testmod + + testmod() + # Tests # crypt = XORCipher() # key = 67 From 6b588e4d44085d8f2a60b023f09558442ea7ae91 Mon Sep 17 00:00:00 2001 From: Kento <75509362+nkstonks@users.noreply.github.com> Date: Sun, 29 Oct 2023 23:57:40 +1100 Subject: [PATCH 1274/1543] Added doctests for fibonacci.py (#10836) * added other possible cases * added test for correct output of truth table * few fibonacci tests added * updating DIRECTORY.md * Update nor_gate.py * updating DIRECTORY.md * Update fibonacci.py removed whitespace * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: = <=> Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> Co-authored-by: Christian Clauss Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- maths/fibonacci.py | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/maths/fibonacci.py b/maths/fibonacci.py index e810add69dc7..8cdd6cdb160e 100644 --- a/maths/fibonacci.py +++ b/maths/fibonacci.py @@ -81,6 +81,18 @@ def fib_recursive(n: int) -> list[int]: def fib_recursive_term(i: int) -> int: """ Calculates the i-th (0-indexed) Fibonacci number using recursion + >>> fib_recursive_term(0) + 0 + >>> fib_recursive_term(1) + 1 + >>> fib_recursive_term(5) + 5 + >>> fib_recursive_term(10) + 55 + >>> fib_recursive_term(-1) + Traceback (most recent call last): + ... + Exception: n is negative """ if i < 0: raise Exception("n is negative") @@ -197,6 +209,10 @@ def fib_binet(n: int) -> list[int]: if __name__ == "__main__": + import doctest + + doctest.testmod() + num = 30 time_func(fib_iterative, num) time_func(fib_recursive, num) # Around 3s runtime From d59cf1734fd8216d90fa21ed579e18a41b63755f Mon Sep 17 00:00:00 2001 From: Arshdeep Singh Sachdeva Date: Sun, 29 Oct 2023 07:55:37 -0700 Subject: [PATCH 1275/1543] Add running key cipher (#10834) * Add running key cipher * update running key cipher add doctests and hints * Add test case * Update return value * range(len()) is almost always a hint to use enumerate() --------- Co-authored-by: Christian Clauss --- ciphers/running_key_cipher.py | 75 +++++++++++++++++++++++++++++++++++ 1 file changed, 75 insertions(+) create mode 100644 ciphers/running_key_cipher.py diff --git a/ciphers/running_key_cipher.py b/ciphers/running_key_cipher.py new file mode 100644 index 000000000000..6bda417be898 --- /dev/null +++ b/ciphers/running_key_cipher.py @@ -0,0 +1,75 @@ +""" +https://en.wikipedia.org/wiki/Running_key_cipher +""" + + +def running_key_encrypt(key: str, plaintext: str) -> str: + """ + Encrypts the plaintext using the Running Key Cipher. + + :param key: The running key (long piece of text). + :param plaintext: The plaintext to be encrypted. + :return: The ciphertext. + """ + plaintext = plaintext.replace(" ", "").upper() + key = key.replace(" ", "").upper() + key_length = len(key) + ciphertext = [] + ord_a = ord("A") + + for i, char in enumerate(plaintext): + p = ord(char) - ord_a + k = ord(key[i % key_length]) - ord_a + c = (p + k) % 26 + ciphertext.append(chr(c + ord_a)) + + return "".join(ciphertext) + + +def running_key_decrypt(key: str, ciphertext: str) -> str: + """ + Decrypts the ciphertext using the Running Key Cipher. + + :param key: The running key (long piece of text). + :param ciphertext: The ciphertext to be decrypted. + :return: The plaintext. + """ + ciphertext = ciphertext.replace(" ", "").upper() + key = key.replace(" ", "").upper() + key_length = len(key) + plaintext = [] + ord_a = ord("A") + + for i, char in enumerate(ciphertext): + c = ord(char) - ord_a + k = ord(key[i % key_length]) - ord_a + p = (c - k) % 26 + plaintext.append(chr(p + ord_a)) + + return "".join(plaintext) + + +def test_running_key_encrypt() -> None: + """ + >>> key = "How does the duck know that? said Victor" + >>> ciphertext = running_key_encrypt(key, "DEFEND THIS") + >>> running_key_decrypt(key, ciphertext) == "DEFENDTHIS" + True + """ + + +if __name__ == "__main__": + import doctest + + doctest.testmod() + test_running_key_encrypt() + + plaintext = input("Enter the plaintext: ").upper() + print(f"\n{plaintext = }") + + key = "How does the duck know that? said Victor" + encrypted_text = running_key_encrypt(key, plaintext) + print(f"{encrypted_text = }") + + decrypted_text = running_key_decrypt(key, encrypted_text) + print(f"{decrypted_text = }") From 3ad90cea831ee12d9c168735cbd6fab3acac446f Mon Sep 17 00:00:00 2001 From: dragon <51738561+08183080@users.noreply.github.com> Date: Sun, 29 Oct 2023 23:40:01 +0800 Subject: [PATCH 1276/1543] add a yield method to fibonaci (#10826) * add a yiled method to fibonaci * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fibonaci * Update fibonacci.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update fibonacci.py --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- maths/fibonacci.py | 79 ++++++++++++++++++++++++++++++---------------- 1 file changed, 51 insertions(+), 28 deletions(-) diff --git a/maths/fibonacci.py b/maths/fibonacci.py index 8cdd6cdb160e..927700b0418e 100644 --- a/maths/fibonacci.py +++ b/maths/fibonacci.py @@ -1,4 +1,3 @@ -# fibonacci.py """ Calculates the Fibonacci sequence using iteration, recursion, memoization, and a simplified form of Binet's formula @@ -9,14 +8,12 @@ NOTE 2: the Binet's formula function is much more limited in the size of inputs that it can handle due to the size limitations of Python floats -RESULTS: (n = 20) -fib_iterative runtime: 0.0055 ms -fib_recursive runtime: 6.5627 ms -fib_memoization runtime: 0.0107 ms -fib_binet runtime: 0.0174 ms +See benchmark numbers in __main__ for performance comparisons/ +https://en.wikipedia.org/wiki/Fibonacci_number for more information """ import functools +from collections.abc import Iterator from math import sqrt from time import time @@ -35,6 +32,31 @@ def time_func(func, *args, **kwargs): return output +def fib_iterative_yield(n: int) -> Iterator[int]: + """ + Calculates the first n (1-indexed) Fibonacci numbers using iteration with yield + >>> list(fib_iterative_yield(0)) + [0] + >>> tuple(fib_iterative_yield(1)) + (0, 1) + >>> tuple(fib_iterative_yield(5)) + (0, 1, 1, 2, 3, 5) + >>> tuple(fib_iterative_yield(10)) + (0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55) + >>> tuple(fib_iterative_yield(-1)) + Traceback (most recent call last): + ... + ValueError: n is negative + """ + if n < 0: + raise ValueError("n is negative") + a, b = 0, 1 + yield a + for _ in range(n): + yield b + a, b = b, a + b + + def fib_iterative(n: int) -> list[int]: """ Calculates the first n (0-indexed) Fibonacci numbers using iteration @@ -49,10 +71,10 @@ def fib_iterative(n: int) -> list[int]: >>> fib_iterative(-1) Traceback (most recent call last): ... - Exception: n is negative + ValueError: n is negative """ if n < 0: - raise Exception("n is negative") + raise ValueError("n is negative") if n == 0: return [0] fib = [0, 1] @@ -75,7 +97,7 @@ def fib_recursive(n: int) -> list[int]: >>> fib_iterative(-1) Traceback (most recent call last): ... - Exception: n is negative + ValueError: n is negative """ def fib_recursive_term(i: int) -> int: @@ -95,13 +117,13 @@ def fib_recursive_term(i: int) -> int: Exception: n is negative """ if i < 0: - raise Exception("n is negative") + raise ValueError("n is negative") if i < 2: return i return fib_recursive_term(i - 1) + fib_recursive_term(i - 2) if n < 0: - raise Exception("n is negative") + raise ValueError("n is negative") return [fib_recursive_term(i) for i in range(n + 1)] @@ -119,7 +141,7 @@ def fib_recursive_cached(n: int) -> list[int]: >>> fib_iterative(-1) Traceback (most recent call last): ... - Exception: n is negative + ValueError: n is negative """ @functools.cache @@ -128,13 +150,13 @@ def fib_recursive_term(i: int) -> int: Calculates the i-th (0-indexed) Fibonacci number using recursion """ if i < 0: - raise Exception("n is negative") + raise ValueError("n is negative") if i < 2: return i return fib_recursive_term(i - 1) + fib_recursive_term(i - 2) if n < 0: - raise Exception("n is negative") + raise ValueError("n is negative") return [fib_recursive_term(i) for i in range(n + 1)] @@ -152,10 +174,10 @@ def fib_memoization(n: int) -> list[int]: >>> fib_iterative(-1) Traceback (most recent call last): ... - Exception: n is negative + ValueError: n is negative """ if n < 0: - raise Exception("n is negative") + raise ValueError("n is negative") # Cache must be outside recursuive function # other it will reset every time it calls itself. cache: dict[int, int] = {0: 0, 1: 1, 2: 1} # Prefilled cache @@ -193,29 +215,30 @@ def fib_binet(n: int) -> list[int]: >>> fib_binet(-1) Traceback (most recent call last): ... - Exception: n is negative + ValueError: n is negative >>> fib_binet(1475) Traceback (most recent call last): ... - Exception: n is too large + ValueError: n is too large """ if n < 0: - raise Exception("n is negative") + raise ValueError("n is negative") if n >= 1475: - raise Exception("n is too large") + raise ValueError("n is too large") sqrt_5 = sqrt(5) phi = (1 + sqrt_5) / 2 return [round(phi**i / sqrt_5) for i in range(n + 1)] if __name__ == "__main__": - import doctest - - doctest.testmod() + from doctest import testmod + testmod() + # Time on an M1 MacBook Pro -- Fastest to slowest num = 30 - time_func(fib_iterative, num) - time_func(fib_recursive, num) # Around 3s runtime - time_func(fib_recursive_cached, num) # Around 0ms runtime - time_func(fib_memoization, num) - time_func(fib_binet, num) + time_func(fib_iterative_yield, num) # 0.0012 ms + time_func(fib_iterative, num) # 0.0031 ms + time_func(fib_binet, num) # 0.0062 ms + time_func(fib_memoization, num) # 0.0100 ms + time_func(fib_recursive_cached, num) # 0.0153 ms + time_func(fib_recursive, num) # 257.0910 ms From 67c85ee289b66f9c8ac02c6732240965eec879a2 Mon Sep 17 00:00:00 2001 From: Suyash Dongre <109069262+Suyashd999@users.noreply.github.com> Date: Sun, 29 Oct 2023 21:31:54 +0530 Subject: [PATCH 1277/1543] Added doctest to hash_map.py (#11082) * Added doctest to heap.py * Added doctest to hash_map.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update hash_map.py --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- data_structures/hashing/hash_map.py | 111 +++++++++++++++++++++++++++- 1 file changed, 110 insertions(+), 1 deletion(-) diff --git a/data_structures/hashing/hash_map.py b/data_structures/hashing/hash_map.py index 1dfcc8bbf906..1689e07afd9f 100644 --- a/data_structures/hashing/hash_map.py +++ b/data_structures/hashing/hash_map.py @@ -54,6 +54,14 @@ def _get_next_ind(self, ind: int) -> int: Get next index. Implements linear open addressing. + >>> HashMap(5)._get_next_ind(3) + 4 + >>> HashMap(5)._get_next_ind(5) + 1 + >>> HashMap(5)._get_next_ind(6) + 2 + >>> HashMap(5)._get_next_ind(9) + 0 """ return (ind + 1) % len(self._buckets) @@ -82,6 +90,14 @@ def _is_full(self) -> bool: Return true if we have reached safe capacity. So we need to increase the number of buckets to avoid collisions. + + >>> hm = HashMap(2) + >>> hm._add_item(1, 10) + >>> hm._add_item(2, 20) + >>> hm._is_full() + True + >>> HashMap(2)._is_full() + False """ limit = len(self._buckets) * self._capacity_factor return len(self) >= int(limit) @@ -114,17 +130,104 @@ def _iterate_buckets(self, key: KEY) -> Iterator[int]: ind = self._get_next_ind(ind) def _add_item(self, key: KEY, val: VAL) -> None: + """ + Try to add 3 elements when the size is 5 + >>> hm = HashMap(5) + >>> hm._add_item(1, 10) + >>> hm._add_item(2, 20) + >>> hm._add_item(3, 30) + >>> hm + HashMap(1: 10, 2: 20, 3: 30) + + Try to add 3 elements when the size is 5 + >>> hm = HashMap(5) + >>> hm._add_item(-5, 10) + >>> hm._add_item(6, 30) + >>> hm._add_item(-7, 20) + >>> hm + HashMap(-5: 10, 6: 30, -7: 20) + + Try to add 3 elements when size is 1 + >>> hm = HashMap(1) + >>> hm._add_item(10, 13.2) + >>> hm._add_item(6, 5.26) + >>> hm._add_item(7, 5.155) + >>> hm + HashMap(10: 13.2) + + Trying to add an element with a key that is a floating point value + >>> hm = HashMap(5) + >>> hm._add_item(1.5, 10) + >>> hm + HashMap(1.5: 10) + + 5. Trying to add an item with the same key + >>> hm = HashMap(5) + >>> hm._add_item(1, 10) + >>> hm._add_item(1, 20) + >>> hm + HashMap(1: 20) + """ for ind in self._iterate_buckets(key): if self._try_set(ind, key, val): break def __setitem__(self, key: KEY, val: VAL) -> None: + """ + 1. Changing value of item whose key is present + >>> hm = HashMap(5) + >>> hm._add_item(1, 10) + >>> hm.__setitem__(1, 20) + >>> hm + HashMap(1: 20) + + 2. Changing value of item whose key is not present + >>> hm = HashMap(5) + >>> hm._add_item(1, 10) + >>> hm.__setitem__(0, 20) + >>> hm + HashMap(0: 20, 1: 10) + + 3. Changing the value of the same item multiple times + >>> hm = HashMap(5) + >>> hm._add_item(1, 10) + >>> hm.__setitem__(1, 20) + >>> hm.__setitem__(1, 30) + >>> hm + HashMap(1: 30) + """ if self._is_full(): self._size_up() self._add_item(key, val) def __delitem__(self, key: KEY) -> None: + """ + >>> hm = HashMap(5) + >>> hm._add_item(1, 10) + >>> hm._add_item(2, 20) + >>> hm._add_item(3, 30) + >>> hm.__delitem__(3) + >>> hm + HashMap(1: 10, 2: 20) + >>> hm = HashMap(5) + >>> hm._add_item(-5, 10) + >>> hm._add_item(6, 30) + >>> hm._add_item(-7, 20) + >>> hm.__delitem__(-5) + >>> hm + HashMap(6: 30, -7: 20) + + # Trying to remove a non-existing item + >>> hm = HashMap(5) + >>> hm._add_item(1, 10) + >>> hm._add_item(2, 20) + >>> hm._add_item(3, 30) + >>> hm.__delitem__(4) + Traceback (most recent call last): + ... + KeyError: 4 + """ for ind in self._iterate_buckets(key): item = self._buckets[ind] if item is None: @@ -156,7 +259,13 @@ def __iter__(self) -> Iterator[KEY]: yield from (item.key for item in self._buckets if item) def __repr__(self) -> str: - val_string = " ,".join( + val_string = ", ".join( f"{item.key}: {item.val}" for item in self._buckets if item ) return f"HashMap({val_string})" + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From be60f42a5fe29c0e83a049a803f17992bf66be47 Mon Sep 17 00:00:00 2001 From: Aqib Javid Bhat Date: Sun, 29 Oct 2023 22:12:41 +0530 Subject: [PATCH 1278/1543] Add Josephus Problem (#10928) * Add Josephus Problem * Add iterative implementation of Josephus Problem * Add descriptive variable names * Update maths/josephus_problem.py * Update josephus_problem.py --------- Co-authored-by: Christian Clauss --- maths/josephus_problem.py | 130 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 130 insertions(+) create mode 100644 maths/josephus_problem.py diff --git a/maths/josephus_problem.py b/maths/josephus_problem.py new file mode 100644 index 000000000000..271292ba1d9f --- /dev/null +++ b/maths/josephus_problem.py @@ -0,0 +1,130 @@ +""" +The Josephus problem is a famous theoretical problem related to a certain +counting-out game. This module provides functions to solve the Josephus problem +for num_people and a step_size. + +The Josephus problem is defined as follows: +- num_people are standing in a circle. +- Starting with a specified person, you count around the circle, + skipping a fixed number of people (step_size). +- The person at which you stop counting is eliminated from the circle. +- The counting continues until only one person remains. + +For more information about the Josephus problem, refer to: +https://en.wikipedia.org/wiki/Josephus_problem +""" + + +def josephus_recursive(num_people: int, step_size: int) -> int: + """ + Solve the Josephus problem for num_people and a step_size recursively. + + Args: + num_people: A positive integer representing the number of people. + step_size: A positive integer representing the step size for elimination. + + Returns: + The position of the last person remaining. + + Raises: + ValueError: If num_people or step_size is not a positive integer. + + Examples: + >>> josephus_recursive(7, 3) + 3 + >>> josephus_recursive(10, 2) + 4 + >>> josephus_recursive(0, 2) + Traceback (most recent call last): + ... + ValueError: num_people or step_size is not a positive integer. + >>> josephus_recursive(1.9, 2) + Traceback (most recent call last): + ... + ValueError: num_people or step_size is not a positive integer. + >>> josephus_recursive(-2, 2) + Traceback (most recent call last): + ... + ValueError: num_people or step_size is not a positive integer. + >>> josephus_recursive(7, 0) + Traceback (most recent call last): + ... + ValueError: num_people or step_size is not a positive integer. + >>> josephus_recursive(7, -2) + Traceback (most recent call last): + ... + ValueError: num_people or step_size is not a positive integer. + >>> josephus_recursive(1_000, 0.01) + Traceback (most recent call last): + ... + ValueError: num_people or step_size is not a positive integer. + >>> josephus_recursive("cat", "dog") + Traceback (most recent call last): + ... + ValueError: num_people or step_size is not a positive integer. + """ + if ( + not isinstance(num_people, int) + or not isinstance(step_size, int) + or num_people <= 0 + or step_size <= 0 + ): + raise ValueError("num_people or step_size is not a positive integer.") + + if num_people == 1: + return 0 + + return (josephus_recursive(num_people - 1, step_size) + step_size) % num_people + + +def find_winner(num_people: int, step_size: int) -> int: + """ + Find the winner of the Josephus problem for num_people and a step_size. + + Args: + num_people (int): Number of people. + step_size (int): Step size for elimination. + + Returns: + int: The position of the last person remaining (1-based index). + + Examples: + >>> find_winner(7, 3) + 4 + >>> find_winner(10, 2) + 5 + """ + return josephus_recursive(num_people, step_size) + 1 + + +def josephus_iterative(num_people: int, step_size: int) -> int: + """ + Solve the Josephus problem for num_people and a step_size iteratively. + + Args: + num_people (int): The number of people in the circle. + step_size (int): The number of steps to take before eliminating someone. + + Returns: + int: The position of the last person standing. + + Examples: + >>> josephus_iterative(5, 2) + 3 + >>> josephus_iterative(7, 3) + 4 + """ + circle = list(range(1, num_people + 1)) + current = 0 + + while len(circle) > 1: + current = (current + step_size - 1) % len(circle) + circle.pop(current) + + return circle[0] + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From eafdb8b86697eb9dbdc03916679719dff2f6425a Mon Sep 17 00:00:00 2001 From: dahhou ilyas <110790236+dahhou-ilyas@users.noreply.github.com> Date: Sun, 29 Oct 2023 18:24:37 +0100 Subject: [PATCH 1279/1543] Dahhou ilyas (#10058) * add new programme in dynamique programming wildcard_matching * add new programme in dynamique programming wildcard_matching * fix bug * fix * fix * fix * fix * fix * fix error recrusion * fix error recrusion * bug fix * add doctest * The power of enumerate() --------- Co-authored-by: Christian Clauss --- dynamic_programming/wildcard_matching.py | 92 +++++++++++++----------- 1 file changed, 49 insertions(+), 43 deletions(-) diff --git a/dynamic_programming/wildcard_matching.py b/dynamic_programming/wildcard_matching.py index 4ffc4b5d46aa..d9a1392720bd 100644 --- a/dynamic_programming/wildcard_matching.py +++ b/dynamic_programming/wildcard_matching.py @@ -1,62 +1,68 @@ """ -Given two strings, an input string and a pattern, -this program checks if the input string matches the pattern. +Author : ilyas dahhou +Date : Oct 7, 2023 -Example : -input_string = "baaabab" -pattern = "*****ba*****ab" -Output: True +Task: +Given an input string and a pattern, implement wildcard pattern matching with support +for '?' and '*' where: +'?' matches any single character. +'*' matches any sequence of characters (including the empty sequence). +The matching should cover the entire input string (not partial). -This problem can be solved using the concept of "DYNAMIC PROGRAMMING". - -We create a 2D boolean matrix, where each entry match_matrix[i][j] is True -if the first i characters in input_string match the first j characters -of pattern. We initialize the first row and first column based on specific -rules, then fill up the rest of the matrix using a bottom-up dynamic -programming approach. - -The amount of match that will be determined is equal to match_matrix[n][m] -where n and m are lengths of the input_string and pattern respectively. +Runtime complexity: O(m * n) +The implementation was tested on the +leetcode: https://leetcode.com/problems/wildcard-matching/ """ -def is_pattern_match(input_string: str, pattern: str) -> bool: +def is_match(string: str, pattern: str) -> bool: """ - >>> is_pattern_match('baaabab','*****ba*****ba') + >>> is_match("", "") + True + >>> is_match("aa", "a") False - >>> is_pattern_match('baaabab','*****ba*****ab') + >>> is_match("abc", "abc") + True + >>> is_match("abc", "*c") + True + >>> is_match("abc", "a*") True - >>> is_pattern_match('aa','*') + >>> is_match("abc", "*a*") + True + >>> is_match("abc", "?b?") + True + >>> is_match("abc", "*?") + True + >>> is_match("abc", "a*d") + False + >>> is_match("abc", "a*c?") + False + >>> is_match('baaabab','*****ba*****ba') + False + >>> is_match('baaabab','*****ba*****ab') + True + >>> is_match('aa','*') True """ - - input_length = len(input_string) - pattern_length = len(pattern) - - match_matrix = [[False] * (pattern_length + 1) for _ in range(input_length + 1)] - - match_matrix[0][0] = True - - for j in range(1, pattern_length + 1): - if pattern[j - 1] == "*": - match_matrix[0][j] = match_matrix[0][j - 1] - - for i in range(1, input_length + 1): - for j in range(1, pattern_length + 1): - if pattern[j - 1] in ("?", input_string[i - 1]): - match_matrix[i][j] = match_matrix[i - 1][j - 1] + dp = [[False] * (len(pattern) + 1) for _ in string + "1"] + dp[0][0] = True + # Fill in the first row + for j, char in enumerate(pattern, 1): + if char == "*": + dp[0][j] = dp[0][j - 1] + # Fill in the rest of the DP table + for i, s_char in enumerate(string, 1): + for j, p_char in enumerate(pattern, 1): + if p_char in (s_char, "?"): + dp[i][j] = dp[i - 1][j - 1] elif pattern[j - 1] == "*": - match_matrix[i][j] = match_matrix[i - 1][j] or match_matrix[i][j - 1] - else: - match_matrix[i][j] = False - - return match_matrix[input_length][pattern_length] + dp[i][j] = dp[i - 1][j] or dp[i][j - 1] + return dp[len(string)][len(pattern)] if __name__ == "__main__": import doctest doctest.testmod() - - print(f"{is_pattern_match('baaabab','*****ba*****ab')}") + print(f"{is_match('baaabab','*****ba*****ab') = }") From 760d9bedc1a7ff06a75fafaeb519a5b1979a2885 Mon Sep 17 00:00:00 2001 From: Aryansh B Date: Mon, 30 Oct 2023 02:27:37 +0530 Subject: [PATCH 1280/1543] Added Fast Inverse Square Root (#11054) * Feat: Added Fast inverse square root * Fix: Added typehint * Fix: Added doctests that break the code, changed var name * updating DIRECTORY.md * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fix: fixed length of docstring * Update fast_inverse_sqrt.py --------- Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- DIRECTORY.md | 10 +++++++ maths/fast_inverse_sqrt.py | 54 ++++++++++++++++++++++++++++++++++++++ 2 files changed, 64 insertions(+) create mode 100644 maths/fast_inverse_sqrt.py diff --git a/DIRECTORY.md b/DIRECTORY.md index d108acf8dcfb..9b2c8ce735c3 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -34,6 +34,7 @@ * [Bitwise Addition Recursive](bit_manipulation/bitwise_addition_recursive.py) * [Count 1S Brian Kernighan Method](bit_manipulation/count_1s_brian_kernighan_method.py) * [Count Number Of One Bits](bit_manipulation/count_number_of_one_bits.py) + * [Excess 3 Code](bit_manipulation/excess_3_code.py) * [Gray Code Sequence](bit_manipulation/gray_code_sequence.py) * [Highest Set Bit](bit_manipulation/highest_set_bit.py) * [Index Of Rightmost Set Bit](bit_manipulation/index_of_rightmost_set_bit.py) @@ -170,7 +171,10 @@ * Arrays * [Equilibrium Index In Array](data_structures/arrays/equilibrium_index_in_array.py) * [Find Triplets With 0 Sum](data_structures/arrays/find_triplets_with_0_sum.py) + * [Index 2D Array In 1D](data_structures/arrays/index_2d_array_in_1d.py) + * [Kth Largest Element](data_structures/arrays/kth_largest_element.py) * [Median Two Array](data_structures/arrays/median_two_array.py) + * [Monotonic Array](data_structures/arrays/monotonic_array.py) * [Pairs With Given Sum](data_structures/arrays/pairs_with_given_sum.py) * [Permutations](data_structures/arrays/permutations.py) * [Prefix Sum](data_structures/arrays/prefix_sum.py) @@ -368,6 +372,7 @@ ## Electronics * [Apparent Power](electronics/apparent_power.py) * [Builtin Voltage](electronics/builtin_voltage.py) + * [Capacitor Equivalence](electronics/capacitor_equivalence.py) * [Carrier Concentration](electronics/carrier_concentration.py) * [Charging Capacitor](electronics/charging_capacitor.py) * [Charging Inductor](electronics/charging_inductor.py) @@ -531,12 +536,14 @@ ## Machine Learning * [Apriori Algorithm](machine_learning/apriori_algorithm.py) * [Astar](machine_learning/astar.py) + * [Automatic Differentiation](machine_learning/automatic_differentiation.py) * [Data Transformations](machine_learning/data_transformations.py) * [Decision Tree](machine_learning/decision_tree.py) * [Dimensionality Reduction](machine_learning/dimensionality_reduction.py) * Forecasting * [Run](machine_learning/forecasting/run.py) * [Frequent Pattern Growth](machine_learning/frequent_pattern_growth.py) + * [Gradient Boosting Classifier](machine_learning/gradient_boosting_classifier.py) * [Gradient Descent](machine_learning/gradient_descent.py) * [K Means Clust](machine_learning/k_means_clust.py) * [K Nearest Neighbours](machine_learning/k_nearest_neighbours.py) @@ -598,6 +605,7 @@ * [Extended Euclidean Algorithm](maths/extended_euclidean_algorithm.py) * [Factorial](maths/factorial.py) * [Factors](maths/factors.py) + * [Fast Inverse Sqrt](maths/fast_inverse_sqrt.py) * [Fermat Little Theorem](maths/fermat_little_theorem.py) * [Fibonacci](maths/fibonacci.py) * [Find Max](maths/find_max.py) @@ -648,6 +656,7 @@ * [Numerical Integration](maths/numerical_analysis/numerical_integration.py) * [Runge Kutta](maths/numerical_analysis/runge_kutta.py) * [Runge Kutta Fehlberg 45](maths/numerical_analysis/runge_kutta_fehlberg_45.py) + * [Runge Kutta Gills](maths/numerical_analysis/runge_kutta_gills.py) * [Secant Method](maths/numerical_analysis/secant_method.py) * [Simpson Rule](maths/numerical_analysis/simpson_rule.py) * [Square Root](maths/numerical_analysis/square_root.py) @@ -814,6 +823,7 @@ * [Ideal Gas Law](physics/ideal_gas_law.py) * [In Static Equilibrium](physics/in_static_equilibrium.py) * [Kinetic Energy](physics/kinetic_energy.py) + * [Lens Formulae](physics/lens_formulae.py) * [Lorentz Transformation Four Vector](physics/lorentz_transformation_four_vector.py) * [Malus Law](physics/malus_law.py) * [Mass Energy Equivalence](physics/mass_energy_equivalence.py) diff --git a/maths/fast_inverse_sqrt.py b/maths/fast_inverse_sqrt.py new file mode 100644 index 000000000000..79385bb84877 --- /dev/null +++ b/maths/fast_inverse_sqrt.py @@ -0,0 +1,54 @@ +""" +Fast inverse square root (1/sqrt(x)) using the Quake III algorithm. +Reference: https://en.wikipedia.org/wiki/Fast_inverse_square_root +Accuracy: https://en.wikipedia.org/wiki/Fast_inverse_square_root#Accuracy +""" + +import struct + + +def fast_inverse_sqrt(number: float) -> float: + """ + Compute the fast inverse square root of a floating-point number using the famous + Quake III algorithm. + + :param float number: Input number for which to calculate the inverse square root. + :return float: The fast inverse square root of the input number. + + Example: + >>> fast_inverse_sqrt(10) + 0.3156857923527257 + >>> fast_inverse_sqrt(4) + 0.49915357479239103 + >>> fast_inverse_sqrt(4.1) + 0.4932849504615651 + >>> fast_inverse_sqrt(0) + Traceback (most recent call last): + ... + ValueError: Input must be a positive number. + >>> fast_inverse_sqrt(-1) + Traceback (most recent call last): + ... + ValueError: Input must be a positive number. + >>> from math import isclose, sqrt + >>> all(isclose(fast_inverse_sqrt(i), 1 / sqrt(i), rel_tol=0.00132) + ... for i in range(50, 60)) + True + """ + if number <= 0: + raise ValueError("Input must be a positive number.") + i = struct.unpack(">i", struct.pack(">f", number))[0] + i = 0x5F3759DF - (i >> 1) + y = struct.unpack(">f", struct.pack(">i", i))[0] + return y * (1.5 - 0.5 * number * y * y) + + +if __name__ == "__main__": + from doctest import testmod + + testmod() + # https://en.wikipedia.org/wiki/Fast_inverse_square_root#Accuracy + from math import sqrt + + for i in range(5, 101, 5): + print(f"{i:>3}: {(1 / sqrt(i)) - fast_inverse_sqrt(i):.5f}") From c7a1331b34d6644f546f049058c1d9738fbc9b4c Mon Sep 17 00:00:00 2001 From: Khushi Shukla Date: Mon, 30 Oct 2023 02:50:57 +0530 Subject: [PATCH 1281/1543] Create karnaugh_map_simplification.py (#11056) * Create karnaugh_map_simplification.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update karnaugh_map_simplification.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update karnaugh_map_simplification.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update karnaugh_map_simplification.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update karnaugh_map_simplification.py * Update boolean_algebra/karnaugh_map_simplification.py Co-authored-by: Christian Clauss * Update karnaugh_map_simplification.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update karnaugh_map_simplification.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update karnaugh_map_simplification.py * Update karnaugh_map_simplification.py --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- .../karnaugh_map_simplification.py | 55 +++++++++++++++++++ 1 file changed, 55 insertions(+) create mode 100644 boolean_algebra/karnaugh_map_simplification.py diff --git a/boolean_algebra/karnaugh_map_simplification.py b/boolean_algebra/karnaugh_map_simplification.py new file mode 100644 index 000000000000..c7f2d4c6b897 --- /dev/null +++ b/boolean_algebra/karnaugh_map_simplification.py @@ -0,0 +1,55 @@ +""" +https://en.wikipedia.org/wiki/Karnaugh_map +https://www.allaboutcircuits.com/technical-articles/karnaugh-map-boolean-algebraic-simplification-technique +""" + + +def simplify_kmap(kmap: list[list[int]]) -> str: + """ + Simplify the Karnaugh map. + >>> simplify_kmap(kmap=[[0, 1], [1, 1]]) + "A'B + AB' + AB" + >>> simplify_kmap(kmap=[[0, 0], [0, 0]]) + '' + >>> simplify_kmap(kmap=[[0, 1], [1, -1]]) + "A'B + AB' + AB" + >>> simplify_kmap(kmap=[[0, 1], [1, 2]]) + "A'B + AB' + AB" + >>> simplify_kmap(kmap=[[0, 1], [1, 1.1]]) + "A'B + AB' + AB" + >>> simplify_kmap(kmap=[[0, 1], [1, 'a']]) + "A'B + AB' + AB" + """ + simplified_f = [] + for a, row in enumerate(kmap): + for b, item in enumerate(row): + if item: + term = ("A" if a else "A'") + ("B" if b else "B'") + simplified_f.append(term) + return " + ".join(simplified_f) + + +def main() -> None: + """ + Main function to create and simplify a K-Map. + + >>> main() + [0, 1] + [1, 1] + Simplified Expression: + A'B + AB' + AB + """ + kmap = [[0, 1], [1, 1]] + + # Manually generate the product of [0, 1] and [0, 1] + + for row in kmap: + print(row) + + print("Simplified Expression:") + print(simplify_kmap(kmap)) + + +if __name__ == "__main__": + main() + print(f"{simplify_kmap(kmap=[[0, 1], [1, 1]]) = }") From 13e66c18d2738dd7a223c12ebbfc989faa4bcfce Mon Sep 17 00:00:00 2001 From: chien liu Date: Sun, 29 Oct 2023 22:22:19 +0100 Subject: [PATCH 1282/1543] Fix typo power_using_recursion.py (#11083) --- maths/power_using_recursion.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/maths/power_using_recursion.py b/maths/power_using_recursion.py index 462fc45bff64..29283ca0f67c 100644 --- a/maths/power_using_recursion.py +++ b/maths/power_using_recursion.py @@ -43,7 +43,7 @@ def power(base: int, exponent: int) -> float: if __name__ == "__main__": - from doctests import testmod + from doctest import testmod testmod() print("Raise base to the power of exponent using recursion...") From 2531f8e221f04014821e16eb5eb1d3c52e5f174c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ab=C3=ADlio=20Azevedo?= Date: Sun, 29 Oct 2023 18:43:32 -0300 Subject: [PATCH 1283/1543] test: adding more tests to missing number algorithm (#10394) * test: adding more tests to missing number algorithm * Update missing_number.py --------- Co-authored-by: Christian Clauss --- bit_manipulation/missing_number.py | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/bit_manipulation/missing_number.py b/bit_manipulation/missing_number.py index 32b949daa717..554887b17562 100644 --- a/bit_manipulation/missing_number.py +++ b/bit_manipulation/missing_number.py @@ -11,6 +11,12 @@ def find_missing_number(nums: list[int]) -> int: Example: >>> find_missing_number([0, 1, 3, 4]) 2 + >>> find_missing_number([4, 3, 1, 0]) + 2 + >>> find_missing_number([-4, -3, -1, 0]) + -2 + >>> find_missing_number([-2, 2, 1, 3, 0]) + -1 >>> find_missing_number([1, 3, 4, 5, 6]) 2 >>> find_missing_number([6, 5, 4, 2, 1]) @@ -26,3 +32,9 @@ def find_missing_number(nums: list[int]) -> int: missing_number ^= i ^ nums[i - low] return missing_number + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From c38b222212de921295440b2b1236376136f37136 Mon Sep 17 00:00:00 2001 From: dekomori_sanae09 Date: Mon, 30 Oct 2023 04:37:21 +0530 Subject: [PATCH 1284/1543] serialize deserialize binary tree (#9625) * added serialize and desrialize bin tree * format files * added type hints * added type hints * Use dataclass .__eq__(), .__iter__(), and .__repr__() --------- Co-authored-by: Christian Clauss --- .../serialize_deserialize_binary_tree.py | 140 ++++++++++++++++++ 1 file changed, 140 insertions(+) create mode 100644 data_structures/binary_tree/serialize_deserialize_binary_tree.py diff --git a/data_structures/binary_tree/serialize_deserialize_binary_tree.py b/data_structures/binary_tree/serialize_deserialize_binary_tree.py new file mode 100644 index 000000000000..7d3e0c61f96d --- /dev/null +++ b/data_structures/binary_tree/serialize_deserialize_binary_tree.py @@ -0,0 +1,140 @@ +from __future__ import annotations + +from collections.abc import Iterator +from dataclasses import dataclass + + +@dataclass +class TreeNode: + """ + A binary tree node has a value, left child, and right child. + + Props: + value: The value of the node. + left: The left child of the node. + right: The right child of the node. + """ + + value: int = 0 + left: TreeNode | None = None + right: TreeNode | None = None + + def __post_init__(self): + if not isinstance(self.value, int): + raise TypeError("Value must be an integer.") + + def __iter__(self) -> Iterator[TreeNode]: + """ + Iterate through the tree in preorder. + + Returns: + An iterator of the tree nodes. + + >>> list(TreeNode(1)) + [1,null,null] + >>> tuple(TreeNode(1, TreeNode(2), TreeNode(3))) + (1,2,null,null,3,null,null, 2,null,null, 3,null,null) + """ + yield self + yield from self.left or () + yield from self.right or () + + def __len__(self) -> int: + """ + Count the number of nodes in the tree. + + Returns: + The number of nodes in the tree. + + >>> len(TreeNode(1)) + 1 + >>> len(TreeNode(1, TreeNode(2), TreeNode(3))) + 3 + """ + return sum(1 for _ in self) + + def __repr__(self) -> str: + """ + Represent the tree as a string. + + Returns: + A string representation of the tree. + + >>> repr(TreeNode(1)) + '1,null,null' + >>> repr(TreeNode(1, TreeNode(2), TreeNode(3))) + '1,2,null,null,3,null,null' + >>> repr(TreeNode(1, TreeNode(2), TreeNode(3, TreeNode(4), TreeNode(5)))) + '1,2,null,null,3,4,null,null,5,null,null' + """ + return f"{self.value},{self.left!r},{self.right!r}".replace("None", "null") + + @classmethod + def five_tree(cls) -> TreeNode: + """ + >>> repr(TreeNode.five_tree()) + '1,2,null,null,3,4,null,null,5,null,null' + """ + root = TreeNode(1) + root.left = TreeNode(2) + root.right = TreeNode(3) + root.right.left = TreeNode(4) + root.right.right = TreeNode(5) + return root + + +def deserialize(data: str) -> TreeNode | None: + """ + Deserialize a string to a binary tree. + + Args: + data(str): The serialized string. + + Returns: + The root of the binary tree. + + >>> root = TreeNode.five_tree() + >>> serialzed_data = repr(root) + >>> deserialized = deserialize(serialzed_data) + >>> root == deserialized + True + >>> root is deserialized # two separate trees + False + >>> root.right.right.value = 6 + >>> root == deserialized + False + >>> serialzed_data = repr(root) + >>> deserialized = deserialize(serialzed_data) + >>> root == deserialized + True + >>> deserialize("") + Traceback (most recent call last): + ... + ValueError: Data cannot be empty. + """ + + if not data: + raise ValueError("Data cannot be empty.") + + # Split the serialized string by a comma to get node values + nodes = data.split(",") + + def build_tree() -> TreeNode | None: + # Get the next value from the list + value = nodes.pop(0) + + if value == "null": + return None + + node = TreeNode(int(value)) + node.left = build_tree() # Recursively build left subtree + node.right = build_tree() # Recursively build right subtree + return node + + return build_tree() + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From ad9948d5d4d65d1457b58d278e780a1b9470a715 Mon Sep 17 00:00:00 2001 From: Mohammad Esfandiyar Date: Mon, 30 Oct 2023 16:50:47 +0330 Subject: [PATCH 1285/1543] implementation of Gaussian Elimination pivoting as a numerical linear algebra algorithm (#10457) * Adding new implementation Adding my python implementation of Gaussian Elimination pivoting as a numerical linear algebra algorithm * Delete linear_algebra/src/GaussianEliminationpivoting.py * Adding new implementation Adding my python implementation of Gaussian Elimination pivoting as a numerical linear algebra algorithm * Delete linear_algebra/src/gaussianeliminationpivoting.py * Adding new implementation Adding my python implementation of Gaussian Elimination pivoting as a numerical linear algebra algorithm for the third time because the last two times had conflict with the rules in PR * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Delete linear_algebra/src/gaussianeliminationpivoting.py * Adding gaussianeliminationpivoting.py Adding my python implementation of Gaussian Elimination pivoting as a numerical linear algebra algorithm for the fourth time because the last three times had conflict with the rules in PR and bots * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update gaussianeliminationpivoting.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update gaussianeliminationpivoting.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update gaussianeliminationpivoting.py I changed a to matrix and coeff_matrix for better clarity * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update gaussianeliminationpivoting.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update gaussianeliminationpivoting.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update gaussianeliminationpivoting.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update and rename gaussianeliminationpivoting.py to gaussian_elimination_pivoting.py renamed the file * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update gaussian_elimination_pivoting.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update gaussian_elimination_pivoting.py * Update gaussian_elimination_pivoting.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update gaussian_elimination_pivoting.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update gaussian_elimination_pivoting.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update gaussian_elimination_pivoting.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update gaussian_elimination_pivoting.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update gaussian_elimination_pivoting.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update gaussian_elimination_pivoting.py * Update gaussian_elimination_pivoting.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update gaussian_elimination_pivoting.py * Update gaussian_elimination_pivoting.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update gaussian_elimination_pivoting.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update gaussian_elimination_pivoting.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update gaussian_elimination_pivoting.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update gaussian_elimination_pivoting.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update gaussian_elimination_pivoting.py * Update gaussian_elimination_pivoting.py * Update gaussian_elimination_pivoting.py * Update gaussian_elimination_pivoting.py * Delete linear_algebra/src/gaussian_elimination_pivoting.py * Add files via upload * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update gaussian_elimination_pivoting.py * Update gaussian_elimination_pivoting.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Delete linear_algebra/src/gaussian_elimination_pivoting/text.py * Add files via upload * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update linear_algebra/src/gaussian_elimination_pivoting/gaussian_elimination_pivoting.py Co-authored-by: Christian Clauss * Update gaussian_elimination_pivoting.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update gaussian_elimination_pivoting.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update gaussian_elimination_pivoting.py * Update gaussian_elimination_pivoting.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update gaussian_elimination_pivoting.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update gaussian_elimination_pivoting.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update gaussian_elimination_pivoting.py * Update gaussian_elimination_pivoting.py * Update gaussian_elimination_pivoting.py * Update gaussian_elimination_pivoting.py * Update gaussian_elimination_pivoting.py * Update gaussian_elimination_pivoting.py * Update gaussian_elimination_pivoting.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update gaussian_elimination_pivoting.py * Update gaussian_elimination_pivoting.py * Update gaussian_elimination_pivoting.py * Update gaussian_elimination_pivoting.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- .../gaussian_elimination_pivoting.py | 101 ++++++++++++++++++ .../gaussian_elimination_pivoting/matrix.txt | 4 + 2 files changed, 105 insertions(+) create mode 100644 linear_algebra/src/gaussian_elimination_pivoting/gaussian_elimination_pivoting.py create mode 100644 linear_algebra/src/gaussian_elimination_pivoting/matrix.txt diff --git a/linear_algebra/src/gaussian_elimination_pivoting/gaussian_elimination_pivoting.py b/linear_algebra/src/gaussian_elimination_pivoting/gaussian_elimination_pivoting.py new file mode 100644 index 000000000000..2a86350e9fc6 --- /dev/null +++ b/linear_algebra/src/gaussian_elimination_pivoting/gaussian_elimination_pivoting.py @@ -0,0 +1,101 @@ +import numpy as np + +matrix = np.array( + [ + [5.0, -5.0, -3.0, 4.0, -11.0], + [1.0, -4.0, 6.0, -4.0, -10.0], + [-2.0, -5.0, 4.0, -5.0, -12.0], + [-3.0, -3.0, 5.0, -5.0, 8.0], + ], + dtype=float, +) + + +def solve_linear_system(matrix: np.ndarray) -> np.ndarray: + """ + Solve a linear system of equations using Gaussian elimination with partial pivoting + + Args: + - matrix: Coefficient matrix with the last column representing the constants. + + Returns: + - Solution vector. + + Raises: + - ValueError: If the matrix is not correct (i.e., singular). + + https://courses.engr.illinois.edu/cs357/su2013/lect.htm Lecture 7 + + Example: + >>> A = np.array([[2, 1, -1], [-3, -1, 2], [-2, 1, 2]], dtype=float) + >>> B = np.array([8, -11, -3], dtype=float) + >>> solution = solve_linear_system(np.column_stack((A, B))) + >>> np.allclose(solution, np.array([2., 3., -1.])) + True + >>> solve_linear_system(np.array([[0, 0], [0, 0]], dtype=float)) + array([nan, nan]) + """ + ab = np.copy(matrix) + num_of_rows = ab.shape[0] + num_of_columns = ab.shape[1] - 1 + x_lst: list[float] = [] + + # Lead element search + for column_num in range(num_of_rows): + for i in range(column_num, num_of_columns): + if abs(ab[i][column_num]) > abs(ab[column_num][column_num]): + ab[[column_num, i]] = ab[[i, column_num]] + if ab[column_num, column_num] == 0.0: + raise ValueError("Matrix is not correct") + else: + pass + if column_num != 0: + for i in range(column_num, num_of_rows): + ab[i, :] -= ( + ab[i, column_num - 1] + / ab[column_num - 1, column_num - 1] + * ab[column_num - 1, :] + ) + + # Upper triangular matrix + for column_num in range(num_of_rows): + for i in range(column_num, num_of_columns): + if abs(ab[i][column_num]) > abs(ab[column_num][column_num]): + ab[[column_num, i]] = ab[[i, column_num]] + if ab[column_num, column_num] == 0.0: + raise ValueError("Matrix is not correct") + else: + pass + if column_num != 0: + for i in range(column_num, num_of_rows): + ab[i, :] -= ( + ab[i, column_num - 1] + / ab[column_num - 1, column_num - 1] + * ab[column_num - 1, :] + ) + + # Find x vector (Back Substitution) + for column_num in range(num_of_rows - 1, -1, -1): + x = ab[column_num, -1] / ab[column_num, column_num] + x_lst.insert(0, x) + for i in range(column_num - 1, -1, -1): + ab[i, -1] -= ab[i, column_num] * x + + # Return the solution vector + return np.asarray(x_lst) + + +if __name__ == "__main__": + from doctest import testmod + from pathlib import Path + + testmod() + file_path = Path(__file__).parent / "matrix.txt" + try: + matrix = np.loadtxt(file_path) + except FileNotFoundError: + print(f"Error: {file_path} not found. Using default matrix instead.") + + # Example usage: + print(f"Matrix:\n{matrix}") + print(f"{solve_linear_system(matrix) = }") diff --git a/linear_algebra/src/gaussian_elimination_pivoting/matrix.txt b/linear_algebra/src/gaussian_elimination_pivoting/matrix.txt new file mode 100644 index 000000000000..dd895ad856ee --- /dev/null +++ b/linear_algebra/src/gaussian_elimination_pivoting/matrix.txt @@ -0,0 +1,4 @@ +5.0 -5.0 -3.0 4.0 -11.0 +1.0 -4.0 6.0 -4.0 -10.0 +-2.0 -5.0 4.0 -5.0 -12.0 +-3.0 -3.0 5.0 -5.0 8.0 \ No newline at end of file From ddd4023fe66cd4a0605d4f7de5ae85680ac94167 Mon Sep 17 00:00:00 2001 From: Devashri Deulkar <95555641+Devadeut@users.noreply.github.com> Date: Mon, 30 Oct 2023 23:45:49 +0530 Subject: [PATCH 1286/1543] Happy number (new algorithm) (#10864) * Happy number (new algorithm) adding new algorithm * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update maths/special_numbers/happy_number.py Co-authored-by: Christian Clauss * Update happy_number.py added new changes * Update happy_number.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update happy_number.py * Update happy_number.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update happy_number.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update happy_number.py added ValueError part in code * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update happy_number.py modified and added raise Error code * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update happy_number.py --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- maths/special_numbers/happy_number.py | 48 +++++++++++++++++++++++++++ 1 file changed, 48 insertions(+) create mode 100644 maths/special_numbers/happy_number.py diff --git a/maths/special_numbers/happy_number.py b/maths/special_numbers/happy_number.py new file mode 100644 index 000000000000..eac3167e304b --- /dev/null +++ b/maths/special_numbers/happy_number.py @@ -0,0 +1,48 @@ +def is_happy_number(number: int) -> bool: + """ + A happy number is a number which eventually reaches 1 when replaced by the sum of + the square of each digit. + + :param number: The number to check for happiness. + :return: True if the number is a happy number, False otherwise. + + >>> is_happy_number(19) + True + >>> is_happy_number(2) + False + >>> is_happy_number(23) + True + >>> is_happy_number(1) + True + >>> is_happy_number(0) + Traceback (most recent call last): + ... + ValueError: number=0 must be a positive integer + >>> is_happy_number(-19) + Traceback (most recent call last): + ... + ValueError: number=-19 must be a positive integer + >>> is_happy_number(19.1) + Traceback (most recent call last): + ... + ValueError: number=19.1 must be a positive integer + >>> is_happy_number("happy") + Traceback (most recent call last): + ... + ValueError: number='happy' must be a positive integer + """ + if not isinstance(number, int) or number <= 0: + msg = f"{number=} must be a positive integer" + raise ValueError(msg) + + seen = set() + while number != 1 and number not in seen: + seen.add(number) + number = sum(int(digit) ** 2 for digit in str(number)) + return number == 1 + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From 79a327fc07388a093e132d9df94723f24c162315 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 30 Oct 2023 19:17:00 +0100 Subject: [PATCH 1287/1543] [pre-commit.ci] pre-commit autoupdate (#11106) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * [pre-commit.ci] pre-commit autoupdate updates: - [github.com/astral-sh/ruff-pre-commit: v0.1.1 → v0.1.3](https://github.com/astral-sh/ruff-pre-commit/compare/v0.1.1...v0.1.3) - [github.com/psf/black: 23.10.0 → 23.10.1](https://github.com/psf/black/compare/23.10.0...23.10.1) - [github.com/tox-dev/pyproject-fmt: 1.2.0 → 1.3.0](https://github.com/tox-dev/pyproject-fmt/compare/1.2.0...1.3.0) * updating DIRECTORY.md --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- .pre-commit-config.yaml | 6 +++--- DIRECTORY.md | 17 +++++++++++++++++ 2 files changed, 20 insertions(+), 3 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index e0b9922fae7e..784993e6b00c 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -16,12 +16,12 @@ repos: - id: auto-walrus - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.1.1 + rev: v0.1.3 hooks: - id: ruff - repo: https://github.com/psf/black - rev: 23.10.0 + rev: 23.10.1 hooks: - id: black @@ -33,7 +33,7 @@ repos: - tomli - repo: https://github.com/tox-dev/pyproject-fmt - rev: "1.2.0" + rev: "1.3.0" hooks: - id: pyproject-fmt diff --git a/DIRECTORY.md b/DIRECTORY.md index 9b2c8ce735c3..ee4a521f708b 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -10,6 +10,8 @@ * [All Subsequences](backtracking/all_subsequences.py) * [Coloring](backtracking/coloring.py) * [Combination Sum](backtracking/combination_sum.py) + * [Crossword Puzzle Solver](backtracking/crossword_puzzle_solver.py) + * [Generate Parentheses](backtracking/generate_parentheses.py) * [Hamiltonian Cycle](backtracking/hamiltonian_cycle.py) * [Knight Tour](backtracking/knight_tour.py) * [Match Word Pattern](backtracking/match_word_pattern.py) @@ -35,6 +37,7 @@ * [Count 1S Brian Kernighan Method](bit_manipulation/count_1s_brian_kernighan_method.py) * [Count Number Of One Bits](bit_manipulation/count_number_of_one_bits.py) * [Excess 3 Code](bit_manipulation/excess_3_code.py) + * [Find Previous Power Of Two](bit_manipulation/find_previous_power_of_two.py) * [Gray Code Sequence](bit_manipulation/gray_code_sequence.py) * [Highest Set Bit](bit_manipulation/highest_set_bit.py) * [Index Of Rightmost Set Bit](bit_manipulation/index_of_rightmost_set_bit.py) @@ -54,6 +57,8 @@ ## Boolean Algebra * [And Gate](boolean_algebra/and_gate.py) * [Imply Gate](boolean_algebra/imply_gate.py) + * [Karnaugh Map Simplification](boolean_algebra/karnaugh_map_simplification.py) + * [Multiplexer](boolean_algebra/multiplexer.py) * [Nand Gate](boolean_algebra/nand_gate.py) * [Nimply Gate](boolean_algebra/nimply_gate.py) * [Nor Gate](boolean_algebra/nor_gate.py) @@ -108,6 +113,7 @@ * [Rsa Cipher](ciphers/rsa_cipher.py) * [Rsa Factorization](ciphers/rsa_factorization.py) * [Rsa Key Generator](ciphers/rsa_key_generator.py) + * [Running Key Cipher](ciphers/running_key_cipher.py) * [Shuffled Shift Cipher](ciphers/shuffled_shift_cipher.py) * [Simple Keyword Cypher](ciphers/simple_keyword_cypher.py) * [Simple Substitution Cipher](ciphers/simple_substitution_cipher.py) @@ -150,6 +156,7 @@ * [Excel Title To Column](conversions/excel_title_to_column.py) * [Hex To Bin](conversions/hex_to_bin.py) * [Hexadecimal To Decimal](conversions/hexadecimal_to_decimal.py) + * [Ipv4 Conversion](conversions/ipv4_conversion.py) * [Length Conversion](conversions/length_conversion.py) * [Molecular Chemistry](conversions/molecular_chemistry.py) * [Octal To Binary](conversions/octal_to_binary.py) @@ -209,6 +216,7 @@ * [Red Black Tree](data_structures/binary_tree/red_black_tree.py) * [Segment Tree](data_structures/binary_tree/segment_tree.py) * [Segment Tree Other](data_structures/binary_tree/segment_tree_other.py) + * [Serialize Deserialize Binary Tree](data_structures/binary_tree/serialize_deserialize_binary_tree.py) * [Symmetric Tree](data_structures/binary_tree/symmetric_tree.py) * [Treap](data_structures/binary_tree/treap.py) * [Wavelet Tree](data_structures/binary_tree/wavelet_tree.py) @@ -410,6 +418,9 @@ * [Mandelbrot](fractals/mandelbrot.py) * [Sierpinski Triangle](fractals/sierpinski_triangle.py) +## Fuzzy Logic + * [Fuzzy Operations](fuzzy_logic/fuzzy_operations.py) + ## Genetic Algorithm * [Basic String](genetic_algorithm/basic_string.py) @@ -521,6 +532,8 @@ * [Lu Decomposition](linear_algebra/lu_decomposition.py) * Src * [Conjugate Gradient](linear_algebra/src/conjugate_gradient.py) + * Gaussian Elimination Pivoting + * [Gaussian Elimination Pivoting](linear_algebra/src/gaussian_elimination_pivoting/gaussian_elimination_pivoting.py) * [Lib](linear_algebra/src/lib.py) * [Polynom For Points](linear_algebra/src/polynom_for_points.py) * [Power Iteration](linear_algebra/src/power_iteration.py) @@ -618,12 +631,14 @@ * [Germain Primes](maths/germain_primes.py) * [Greatest Common Divisor](maths/greatest_common_divisor.py) * [Hardy Ramanujanalgo](maths/hardy_ramanujanalgo.py) + * [Integer Square Root](maths/integer_square_root.py) * [Interquartile Range](maths/interquartile_range.py) * [Is Int Palindrome](maths/is_int_palindrome.py) * [Is Ip V4 Address Valid](maths/is_ip_v4_address_valid.py) * [Is Square Free](maths/is_square_free.py) * [Jaccard Similarity](maths/jaccard_similarity.py) * [Joint Probability Distribution](maths/joint_probability_distribution.py) + * [Josephus Problem](maths/josephus_problem.py) * [Juggler Sequence](maths/juggler_sequence.py) * [Karatsuba](maths/karatsuba.py) * [Kth Lexicographic Permutation](maths/kth_lexicographic_permutation.py) @@ -646,6 +661,7 @@ * [Monte Carlo Dice](maths/monte_carlo_dice.py) * [Number Of Digits](maths/number_of_digits.py) * Numerical Analysis + * [Adams Bashforth](maths/numerical_analysis/adams_bashforth.py) * [Bisection](maths/numerical_analysis/bisection.py) * [Bisection 2](maths/numerical_analysis/bisection_2.py) * [Integration By Simpson Approx](maths/numerical_analysis/integration_by_simpson_approx.py) @@ -1223,6 +1239,7 @@ * [Anagrams](strings/anagrams.py) * [Autocomplete Using Trie](strings/autocomplete_using_trie.py) * [Barcode Validator](strings/barcode_validator.py) + * [Bitap String Match](strings/bitap_string_match.py) * [Boyer Moore Search](strings/boyer_moore_search.py) * [Camel Case To Snake Case](strings/camel_case_to_snake_case.py) * [Can String Be Rearranged As Palindrome](strings/can_string_be_rearranged_as_palindrome.py) From b072ba657f045a899ad133006d54ce5c9035c7f4 Mon Sep 17 00:00:00 2001 From: Akshar Goyal Date: Mon, 30 Oct 2023 20:00:48 -0400 Subject: [PATCH 1288/1543] Added tests for validate_sudoku_board.py (#11108) --- matrix/validate_sudoku_board.py | 60 +++++++++++++++++++++++++++++++++ 1 file changed, 60 insertions(+) diff --git a/matrix/validate_sudoku_board.py b/matrix/validate_sudoku_board.py index 0ee7b3df0b83..a7e08d169059 100644 --- a/matrix/validate_sudoku_board.py +++ b/matrix/validate_sudoku_board.py @@ -54,6 +54,66 @@ def is_valid_sudoku_board(sudoku_board: list[list[str]]) -> bool: ... ,[".",".",".",".","8",".",".","7","9"] ... ]) False + >>> is_valid_sudoku_board([ + ... ["1","2","3","4","5","6","7","8","9"] + ... ,["4","5","6","7","8","9","1","2","3"] + ... ,["7","8","9","1","2","3","4","5","6"] + ... ,[".",".",".",".",".",".",".",".","."] + ... ,[".",".",".",".",".",".",".",".","."] + ... ,[".",".",".",".",".",".",".",".","."] + ... ,[".",".",".",".",".",".",".",".","."] + ... ,[".",".",".",".",".",".",".",".","."] + ... ,[".",".",".",".",".",".",".",".","."] + ... ]) + True + >>> is_valid_sudoku_board([ + ... ["1","2","3",".",".",".",".",".","."] + ... ,["4","5","6",".",".",".",".",".","."] + ... ,["7","8","9",".",".",".",".",".","."] + ... ,[".",".",".","4","5","6",".",".","."] + ... ,[".",".",".","7","8","9",".",".","."] + ... ,[".",".",".","1","2","3",".",".","."] + ... ,[".",".",".",".",".",".","7","8","9"] + ... ,[".",".",".",".",".",".","1","2","3"] + ... ,[".",".",".",".",".",".","4","5","6"] + ... ]) + True + >>> is_valid_sudoku_board([ + ... ["1","2","3",".",".",".","5","6","4"] + ... ,["4","5","6",".",".",".","8","9","7"] + ... ,["7","8","9",".",".",".","2","3","1"] + ... ,[".",".",".","4","5","6",".",".","."] + ... ,[".",".",".","7","8","9",".",".","."] + ... ,[".",".",".","1","2","3",".",".","."] + ... ,["3","1","2",".",".",".","7","8","9"] + ... ,["6","4","5",".",".",".","1","2","3"] + ... ,["9","7","8",".",".",".","4","5","6"] + ... ]) + True + >>> is_valid_sudoku_board([ + ... ["1","2","3","4","5","6","7","8","9"] + ... ,["2",".",".",".",".",".",".",".","8"] + ... ,["3",".",".",".",".",".",".",".","7"] + ... ,["4",".",".",".",".",".",".",".","6"] + ... ,["5",".",".",".",".",".",".",".","5"] + ... ,["6",".",".",".",".",".",".",".","4"] + ... ,["7",".",".",".",".",".",".",".","3"] + ... ,["8",".",".",".",".",".",".",".","2"] + ... ,["9","8","7","6","5","4","3","2","1"] + ... ]) + False + >>> is_valid_sudoku_board([ + ... ["1","2","3","8","9","7","5","6","4"] + ... ,["4","5","6","2","3","1","8","9","7"] + ... ,["7","8","9","5","6","4","2","3","1"] + ... ,["2","3","1","4","5","6","9","7","8"] + ... ,["5","6","4","7","8","9","3","1","2"] + ... ,["8","9","7","1","2","3","6","4","5"] + ... ,["3","1","2","6","4","5","7","8","9"] + ... ,["6","4","5","9","7","8","1","2","3"] + ... ,["9","7","8","3","1","2","4","5","6"] + ... ]) + True >>> is_valid_sudoku_board([["1", "2", "3", "4", "5", "6", "7", "8", "9"]]) Traceback (most recent call last): ... From 99f3a0e4c9b1a6d9ff5bba2adf65d90d55f2250a Mon Sep 17 00:00:00 2001 From: Arya Hariharan <84255987+Arya-Hari@users.noreply.github.com> Date: Tue, 31 Oct 2023 12:23:38 +0530 Subject: [PATCH 1289/1543] adding-docstrings (#11114) * adding-docstrings * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update intro_sort.py * Update intro_sort.py * Remove blank lines --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- sorts/intro_sort.py | 27 +++++++++++++++++++++++++++ 1 file changed, 27 insertions(+) diff --git a/sorts/intro_sort.py b/sorts/intro_sort.py index 908d2886533a..5a5741dc8375 100644 --- a/sorts/intro_sort.py +++ b/sorts/intro_sort.py @@ -11,6 +11,18 @@ def insertion_sort(array: list, start: int = 0, end: int = 0) -> list: >>> array = [4, 2, 6, 8, 1, 7, 8, 22, 14, 56, 27, 79, 23, 45, 14, 12] >>> insertion_sort(array, 0, len(array)) [1, 2, 4, 6, 7, 8, 8, 12, 14, 14, 22, 23, 27, 45, 56, 79] + >>> array = [21, 15, 11, 45, -2, -11, 46] + >>> insertion_sort(array, 0, len(array)) + [-11, -2, 11, 15, 21, 45, 46] + >>> array = [-2, 0, 89, 11, 48, 79, 12] + >>> insertion_sort(array, 0, len(array)) + [-2, 0, 11, 12, 48, 79, 89] + >>> array = ['a', 'z', 'd', 'p', 'v', 'l', 'o', 'o'] + >>> insertion_sort(array, 0, len(array)) + ['a', 'd', 'l', 'o', 'o', 'p', 'v', 'z'] + >>> array = [73.568, 73.56, -45.03, 1.7, 0, 89.45] + >>> insertion_sort(array, 0, len(array)) + [-45.03, 0, 1.7, 73.56, 73.568, 89.45] """ end = end or len(array) for i in range(start, end): @@ -47,6 +59,12 @@ def heap_sort(array: list) -> list: """ >>> heap_sort([4, 2, 6, 8, 1, 7, 8, 22, 14, 56, 27, 79, 23, 45, 14, 12]) [1, 2, 4, 6, 7, 8, 8, 12, 14, 14, 22, 23, 27, 45, 56, 79] + >>> heap_sort([-2, -11, 0, 0, 0, 87, 45, -69, 78, 12, 10, 103, 89, 52]) + [-69, -11, -2, 0, 0, 0, 10, 12, 45, 52, 78, 87, 89, 103] + >>> heap_sort(['b', 'd', 'e', 'f', 'g', 'p', 'x', 'z', 'b', 's', 'e', 'u', 'v']) + ['b', 'b', 'd', 'e', 'e', 'f', 'g', 'p', 's', 'u', 'v', 'x', 'z'] + >>> heap_sort([6.2, -45.54, 8465.20, 758.56, -457.0, 0, 1, 2.879, 1.7, 11.7]) + [-457.0, -45.54, 0, 1, 1.7, 2.879, 6.2, 11.7, 758.56, 8465.2] """ n = len(array) @@ -91,6 +109,15 @@ def partition(array: list, low: int, high: int, pivot: int) -> int: >>> array = [4, 2, 6, 8, 1, 7, 8, 22, 14, 56, 27, 79, 23, 45, 14, 12] >>> partition(array, 0, len(array), 12) 8 + >>> array = [21, 15, 11, 45, -2, -11, 46] + >>> partition(array, 0, len(array), 15) + 3 + >>> array = ['a', 'z', 'd', 'p', 'v', 'l', 'o', 'o'] + >>> partition(array, 0, len(array), 'p') + 5 + >>> array = [6.2, -45.54, 8465.20, 758.56, -457.0, 0, 1, 2.879, 1.7, 11.7] + >>> partition(array, 0, len(array), 2.879) + 6 """ i = low j = high From ebfdb127e76e76c122d3110155abf644474b9fa9 Mon Sep 17 00:00:00 2001 From: Suyash Dongre <109069262+Suyashd999@users.noreply.github.com> Date: Sat, 4 Nov 2023 17:34:57 +0530 Subject: [PATCH 1290/1543] Added doctest to hash_map.py (#11105) * Added doctest to heap.py * Added doctest to hash_map.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update hash_map.py * Added doctest to hash_map.py * Added doctest to hash_map.py * Added doctest to detecting_english_programmatically.py * Update detecting_english_programmatically.py --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- data_structures/hashing/hash_map.py | 33 +++++++++++++++++++++++++++++ 1 file changed, 33 insertions(+) diff --git a/data_structures/hashing/hash_map.py b/data_structures/hashing/hash_map.py index 1689e07afd9f..6a6f8e54d5e9 100644 --- a/data_structures/hashing/hash_map.py +++ b/data_structures/hashing/hash_map.py @@ -242,6 +242,25 @@ def __delitem__(self, key: KEY) -> None: self._size_down() def __getitem__(self, key: KEY) -> VAL: + """ + Returns the item at the given key + + >>> hm = HashMap(5) + >>> hm._add_item(1, 10) + >>> hm.__getitem__(1) + 10 + + >>> hm = HashMap(5) + >>> hm._add_item(10, -10) + >>> hm._add_item(20, -20) + >>> hm.__getitem__(20) + -20 + + >>> hm = HashMap(5) + >>> hm._add_item(-1, 10) + >>> hm.__getitem__(-1) + 10 + """ for ind in self._iterate_buckets(key): item = self._buckets[ind] if item is None: @@ -253,6 +272,20 @@ def __getitem__(self, key: KEY) -> VAL: raise KeyError(key) def __len__(self) -> int: + """ + Returns the number of items present in hashmap + + >>> hm = HashMap(5) + >>> hm._add_item(1, 10) + >>> hm._add_item(2, 20) + >>> hm._add_item(3, 30) + >>> hm.__len__() + 3 + + >>> hm = HashMap(5) + >>> hm.__len__() + 0 + """ return self._len def __iter__(self) -> Iterator[KEY]: From 257cfbdf6e2a55d48727f533ef15295065e0057b Mon Sep 17 00:00:00 2001 From: Suyash Dongre <109069262+Suyashd999@users.noreply.github.com> Date: Sun, 5 Nov 2023 13:46:00 +0530 Subject: [PATCH 1291/1543] Added doctest to decision_tree.py (#11143) * Added doctest to decision_tree.py * Update decision_tree.py * Update machine_learning/decision_tree.py * Update machine_learning/decision_tree.py * raise ValueError() * Update decision_tree.py --------- Co-authored-by: Christian Clauss --- machine_learning/decision_tree.py | 49 ++++++++++++++++++++++--------- 1 file changed, 35 insertions(+), 14 deletions(-) diff --git a/machine_learning/decision_tree.py b/machine_learning/decision_tree.py index 7cd1b02c4181..c67e09c7f114 100644 --- a/machine_learning/decision_tree.py +++ b/machine_learning/decision_tree.py @@ -18,7 +18,7 @@ def __init__(self, depth=5, min_leaf_size=5): def mean_squared_error(self, labels, prediction): """ mean_squared_error: - @param labels: a one dimensional numpy array + @param labels: a one-dimensional numpy array @param prediction: a floating point value return value: mean_squared_error calculates the error if prediction is used to estimate the labels @@ -44,26 +44,47 @@ def mean_squared_error(self, labels, prediction): def train(self, x, y): """ train: - @param x: a one dimensional numpy array - @param y: a one dimensional numpy array. + @param x: a one-dimensional numpy array + @param y: a one-dimensional numpy array. The contents of y are the labels for the corresponding X values - train does not have a return value - """ - - """ - this section is to check that the inputs conform to our dimensionality + train() does not have a return value + + Examples: + 1. Try to train when x & y are of same length & 1 dimensions (No errors) + >>> dt = DecisionTree() + >>> dt.train(np.array([10,20,30,40,50]),np.array([0,0,0,1,1])) + + 2. Try to train when x is 2 dimensions + >>> dt = DecisionTree() + >>> dt.train(np.array([[1,2,3,4,5],[1,2,3,4,5]]),np.array([0,0,0,1,1])) + Traceback (most recent call last): + ... + ValueError: Input data set must be one-dimensional + + 3. Try to train when x and y are not of the same length + >>> dt = DecisionTree() + >>> dt.train(np.array([1,2,3,4,5]),np.array([[0,0,0,1,1],[0,0,0,1,1]])) + Traceback (most recent call last): + ... + ValueError: x and y have different lengths + + 4. Try to train when x & y are of the same length but different dimensions + >>> dt = DecisionTree() + >>> dt.train(np.array([1,2,3,4,5]),np.array([[1],[2],[3],[4],[5]])) + Traceback (most recent call last): + ... + ValueError: Data set labels must be one-dimensional + + This section is to check that the inputs conform to our dimensionality constraints """ if x.ndim != 1: - print("Error: Input data set must be one dimensional") - return + raise ValueError("Input data set must be one-dimensional") if len(x) != len(y): - print("Error: X and y have different lengths") - return + raise ValueError("x and y have different lengths") if y.ndim != 1: - print("Error: Data set labels must be one dimensional") - return + raise ValueError("Data set labels must be one-dimensional") if len(x) < 2 * self.min_leaf_size: self.prediction = np.mean(y) From 1e50cf366022a5c44abfa5adf5e01bef62524cc3 Mon Sep 17 00:00:00 2001 From: Suyash Dongre <109069262+Suyashd999@users.noreply.github.com> Date: Sun, 5 Nov 2023 14:08:39 +0530 Subject: [PATCH 1292/1543] Added doctest to binary_search_tree.py (#11141) * Added doctest to binary_search_tree.py * Update binary_search_tree.py * Update binary_search_tree.py --------- Co-authored-by: Christian Clauss --- .../binary_tree/binary_search_tree.py | 32 +++++++++++++++---- 1 file changed, 26 insertions(+), 6 deletions(-) diff --git a/data_structures/binary_tree/binary_search_tree.py b/data_structures/binary_tree/binary_search_tree.py index 38691c4755c9..f08f278a8e47 100644 --- a/data_structures/binary_tree/binary_search_tree.py +++ b/data_structures/binary_tree/binary_search_tree.py @@ -10,8 +10,7 @@ / \ / 4 7 13 ->>> t = BinarySearchTree() ->>> t.insert(8, 3, 6, 1, 10, 14, 13, 4, 7) +>>> t = BinarySearchTree().insert(8, 3, 6, 1, 10, 14, 13, 4, 7) >>> print(" ".join(repr(i.value) for i in t.traversal_tree())) 8 3 1 6 4 7 10 14 13 @@ -40,7 +39,16 @@ >>> testlist = (8, 3, 6, 1, 10, 14, 13, 4, 7) >>> t = BinarySearchTree() >>> for i in testlist: -... t.insert(i) +... t.insert(i) # doctest: +ELLIPSIS +BinarySearchTree(root=8) +BinarySearchTree(root={'8': (3, None)}) +BinarySearchTree(root={'8': ({'3': (None, 6)}, None)}) +BinarySearchTree(root={'8': ({'3': (1, 6)}, None)}) +BinarySearchTree(root={'8': ({'3': (1, 6)}, 10)}) +BinarySearchTree(root={'8': ({'3': (1, 6)}, {'10': (None, 14)})}) +BinarySearchTree(root={'8': ({'3': (1, 6)}, {'10': (None, {'14': (13, None)})})}) +BinarySearchTree(root={'8': ({'3': (1, {'6': (4, None)})}, {'10': (None, {'14': ... +BinarySearchTree(root={'8': ({'3': (1, {'6': (4, 7)})}, {'10': (None, {'14': (13, ... Prints all the elements of the list in order traversal >>> print(t) @@ -84,7 +92,7 @@ from collections.abc import Iterable, Iterator from dataclasses import dataclass -from typing import Any +from typing import Any, Self @dataclass @@ -145,7 +153,18 @@ def __reassign_nodes(self, node: Node, new_children: Node | None) -> None: self.root = new_children def empty(self) -> bool: - return self.root is None + """ + Returns True if the tree does not have any element(s). + False if the tree has element(s). + + >>> BinarySearchTree().empty() + True + >>> BinarySearchTree().insert(1).empty() + False + >>> BinarySearchTree().insert(8, 3, 6, 1, 10, 14, 13, 4, 7).empty() + False + """ + return not self.root def __insert(self, value) -> None: """ @@ -173,9 +192,10 @@ def __insert(self, value) -> None: parent_node = parent_node.right new_node.parent = parent_node - def insert(self, *values) -> None: + def insert(self, *values) -> Self: for value in values: self.__insert(value) + return self def search(self, value) -> Node | None: if self.empty(): From e48ea7d39643f3c15f830ccf63a363378858a001 Mon Sep 17 00:00:00 2001 From: SEIKH NABAB UDDIN <93948993+nababuddin@users.noreply.github.com> Date: Sun, 5 Nov 2023 14:13:52 +0530 Subject: [PATCH 1293/1543] Create get_ip_geolocation.py (#10902) * Create get_ip_geolocation.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update get_ip_geolocation.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update get_ip_geolocation.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- web_programming/get_ip_geolocation.py | 40 +++++++++++++++++++++++++++ 1 file changed, 40 insertions(+) create mode 100644 web_programming/get_ip_geolocation.py diff --git a/web_programming/get_ip_geolocation.py b/web_programming/get_ip_geolocation.py new file mode 100644 index 000000000000..62eaeafceb7e --- /dev/null +++ b/web_programming/get_ip_geolocation.py @@ -0,0 +1,40 @@ +import requests + + +# Function to get geolocation data for an IP address +def get_ip_geolocation(ip_address: str) -> str: + try: + # Construct the URL for the IP geolocation API + url = f"https://ipinfo.io/{ip_address}/json" + + # Send a GET request to the API + response = requests.get(url) + + # Check if the HTTP request was successful + response.raise_for_status() + + # Parse the response as JSON + data = response.json() + + # Check if city, region, and country information is available + if "city" in data and "region" in data and "country" in data: + location = f"Location: {data['city']}, {data['region']}, {data['country']}" + else: + location = "Location data not found." + + return location + except requests.exceptions.RequestException as e: + # Handle network-related exceptions + return f"Request error: {e}" + except ValueError as e: + # Handle JSON parsing errors + return f"JSON parsing error: {e}" + + +if __name__ == "__main__": + # Prompt the user to enter an IP address + ip_address = input("Enter an IP address: ") + + # Get the geolocation data and print it + location = get_ip_geolocation(ip_address) + print(location) From eb989c08cdbf82e1a4db6481371f3e9ccb3bcf99 Mon Sep 17 00:00:00 2001 From: Sunny Kumar <37464973+Skyad@users.noreply.github.com> Date: Mon, 6 Nov 2023 17:40:50 +0530 Subject: [PATCH 1294/1543] Data structures/arrays/triplet sum (#11134) * updated code for find triplets with 0 sum Signed-off-by: Skyad <777.sunnykumar@gmail.com> * extra line added at the end of file Signed-off-by: Sunny Kumar * extra line added at the end of file Signed-off-by: Skyad <777.sunnykumar@gmail.com> * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * file updated with comments Signed-off-by: Skyad <777.sunnykumar@gmail.com> * updated the comments as suggested by community Signed-off-by: Sunny Kumar * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * file updated according to community comments Signed-off-by: Skyad <777.sunnykumar@gmail.com> * Update find_triplets_with_0_sum.py --------- Signed-off-by: Skyad <777.sunnykumar@gmail.com> Signed-off-by: Sunny Kumar Co-authored-by: Sunny Kumar Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- .../arrays/find_triplets_with_0_sum.py | 63 +++++++++++++++++++ 1 file changed, 63 insertions(+) diff --git a/data_structures/arrays/find_triplets_with_0_sum.py b/data_structures/arrays/find_triplets_with_0_sum.py index 8217ff857e3d..52e521906873 100644 --- a/data_structures/arrays/find_triplets_with_0_sum.py +++ b/data_structures/arrays/find_triplets_with_0_sum.py @@ -22,3 +22,66 @@ def find_triplets_with_0_sum(nums: list[int]) -> list[list[int]]: list(x) for x in sorted({abc for abc in combinations(sorted(nums), 3) if not sum(abc)}) ] + + +def find_triplets_with_0_sum_hashing(arr: list[int]) -> list[list[int]]: + """ + Function for finding the triplets with a given sum in the array using hashing. + + Given a list of integers, return elements a, b, c such that a + b + c = 0. + + Args: + nums: list of integers + Returns: + list of lists of integers where sum(each_list) == 0 + Examples: + >>> find_triplets_with_0_sum_hashing([-1, 0, 1, 2, -1, -4]) + [[-1, 0, 1], [-1, -1, 2]] + >>> find_triplets_with_0_sum_hashing([]) + [] + >>> find_triplets_with_0_sum_hashing([0, 0, 0]) + [[0, 0, 0]] + >>> find_triplets_with_0_sum_hashing([1, 2, 3, 0, -1, -2, -3]) + [[-1, 0, 1], [-3, 1, 2], [-2, 0, 2], [-2, -1, 3], [-3, 0, 3]] + + Time complexity: O(N^2) + Auxiliary Space: O(N) + + """ + target_sum = 0 + + # Initialize the final output array with blank. + output_arr = [] + + # Set the initial element as arr[i]. + for index, item in enumerate(arr[:-2]): + # to store second elements that can complement the final sum. + set_initialize = set() + + # current sum needed for reaching the target sum + current_sum = target_sum - item + + # Traverse the subarray arr[i+1:]. + for other_item in arr[index + 1 :]: + # required value for the second element + required_value = current_sum - other_item + + # Verify if the desired value exists in the set. + if required_value in set_initialize: + # finding triplet elements combination. + combination_array = sorted([item, other_item, required_value]) + if combination_array not in output_arr: + output_arr.append(combination_array) + + # Include the current element in the set + # for subsequent complement verification. + set_initialize.add(other_item) + + # Return all the triplet combinations. + return output_arr + + +if __name__ == "__main__": + from doctest import testmod + + testmod() From fa508d7b8bf9696805e97deac71e657256500ab7 Mon Sep 17 00:00:00 2001 From: Suyash Dongre <109069262+Suyashd999@users.noreply.github.com> Date: Mon, 6 Nov 2023 17:44:39 +0530 Subject: [PATCH 1295/1543] Added doctest to detecting_english_programmatically.py (#11135) --- strings/detecting_english_programmatically.py | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/strings/detecting_english_programmatically.py b/strings/detecting_english_programmatically.py index b9000101beb4..e30e2ea8dd8b 100644 --- a/strings/detecting_english_programmatically.py +++ b/strings/detecting_english_programmatically.py @@ -25,6 +25,18 @@ def get_english_count(message: str) -> float: def remove_non_letters(message: str) -> str: + """ + >>> remove_non_letters("Hi! how are you?") + 'Hi how are you' + >>> remove_non_letters("P^y%t)h@o*n") + 'Python' + >>> remove_non_letters("1+1=2") + '' + >>> remove_non_letters("www.google.com/") + 'wwwgooglecom' + >>> remove_non_letters("") + '' + """ return "".join(symbol for symbol in message if symbol in LETTERS_AND_SPACE) From 12e401650c8afd4b6cf69ddab09a882d1eb6ff5c Mon Sep 17 00:00:00 2001 From: Suyash Dongre <109069262+Suyashd999@users.noreply.github.com> Date: Mon, 6 Nov 2023 17:48:41 +0530 Subject: [PATCH 1296/1543] Added doctest to string_switch_case.py (#11136) * Added doctest to string_switch_case.py * Update string_switch_case.py --- strings/string_switch_case.py | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/strings/string_switch_case.py b/strings/string_switch_case.py index 9a07472dfd71..c16d9fa552f9 100644 --- a/strings/string_switch_case.py +++ b/strings/string_switch_case.py @@ -28,6 +28,12 @@ def to_simple_case(str_: str) -> str: """ >>> to_simple_case("one two 31235three4four") 'OneTwo31235three4four' + >>> to_simple_case("This should be combined") + 'ThisShouldBeCombined' + >>> to_simple_case("The first letters are capitalized, then string is merged") + 'TheFirstLettersAreCapitalizedThenStringIsMerged' + >>> to_simple_case("special characters :, ', %, ^, $, are ignored") + 'SpecialCharactersAreIgnored' """ string_split = split_input(str_) return "".join( @@ -37,6 +43,14 @@ def to_simple_case(str_: str) -> str: def to_complex_case(text: str, upper: bool, separator: str) -> str: """ + Returns the string concatenated with the delimiter we provide. + + Parameters: + @text: The string on which we want to perform operation + @upper: Boolean value to determine whether we want capitalized result or not + @separator: The delimiter with which we want to concatenate words + + Examples: >>> to_complex_case("one two 31235three4four", True, "_") 'ONE_TWO_31235THREE4FOUR' >>> to_complex_case("one two 31235three4four", False, "-") From a13e9c21374caf40652ee75cc3620f3ac0c72ff3 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Tue, 7 Nov 2023 06:49:09 +0600 Subject: [PATCH 1297/1543] [pre-commit.ci] pre-commit autoupdate (#11146) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * [pre-commit.ci] pre-commit autoupdate updates: - [github.com/astral-sh/ruff-pre-commit: v0.1.3 → v0.1.4](https://github.com/astral-sh/ruff-pre-commit/compare/v0.1.3...v0.1.4) - [github.com/tox-dev/pyproject-fmt: 1.3.0 → 1.4.1](https://github.com/tox-dev/pyproject-fmt/compare/1.3.0...1.4.1) * updating DIRECTORY.md * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- .pre-commit-config.yaml | 4 ++-- DIRECTORY.md | 2 ++ pyproject.toml | 8 ++++---- 3 files changed, 8 insertions(+), 6 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 784993e6b00c..1bb3de782275 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -16,7 +16,7 @@ repos: - id: auto-walrus - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.1.3 + rev: v0.1.4 hooks: - id: ruff @@ -33,7 +33,7 @@ repos: - tomli - repo: https://github.com/tox-dev/pyproject-fmt - rev: "1.3.0" + rev: "1.4.1" hooks: - id: pyproject-fmt diff --git a/DIRECTORY.md b/DIRECTORY.md index ee4a521f708b..cb4b00b045b5 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -725,6 +725,7 @@ * [Carmichael Number](maths/special_numbers/carmichael_number.py) * [Catalan Number](maths/special_numbers/catalan_number.py) * [Hamming Numbers](maths/special_numbers/hamming_numbers.py) + * [Happy Number](maths/special_numbers/happy_number.py) * [Harshad Numbers](maths/special_numbers/harshad_numbers.py) * [Hexagonal Number](maths/special_numbers/hexagonal_number.py) * [Krishnamurthy Number](maths/special_numbers/krishnamurthy_number.py) @@ -1310,6 +1311,7 @@ * [Get Amazon Product Data](web_programming/get_amazon_product_data.py) * [Get Imdb Top 250 Movies Csv](web_programming/get_imdb_top_250_movies_csv.py) * [Get Imdbtop](web_programming/get_imdbtop.py) + * [Get Ip Geolocation](web_programming/get_ip_geolocation.py) * [Get Top Billionaires](web_programming/get_top_billionaires.py) * [Get Top Hn Posts](web_programming/get_top_hn_posts.py) * [Get User Tweets](web_programming/get_user_tweets.py) diff --git a/pyproject.toml b/pyproject.toml index 5d27142d16e2..c7163dc78371 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -117,6 +117,10 @@ max-branches = 20 # default: 12 max-returns = 8 # default: 6 max-statements = 88 # default: 50 +[tool.codespell] +ignore-words-list = "3rt,ans,bitap,crate,damon,fo,followings,hist,iff,kwanza,manuel,mater,secant,som,sur,tim,toi,zar" +skip = "./.*,*.json,ciphers/prehistoric_men.txt,project_euler/problem_022/p022_names.txt,pyproject.toml,strings/dictionary.txt,strings/words.txt" + [tool.pytest.ini_options] markers = [ "mat_ops: mark a test as utilizing matrix operations.", @@ -133,7 +137,3 @@ omit = [ "project_euler/*" ] sort = "Cover" - -[tool.codespell] -ignore-words-list = "3rt,ans,bitap,crate,damon,fo,followings,hist,iff,kwanza,manuel,mater,secant,som,sur,tim,toi,zar" -skip = "./.*,*.json,ciphers/prehistoric_men.txt,project_euler/problem_022/p022_names.txt,pyproject.toml,strings/dictionary.txt,strings/words.txt" From 8b7352626e54b619113b771a7e9586aabe603fa7 Mon Sep 17 00:00:00 2001 From: Suyash Dongre <109069262+Suyashd999@users.noreply.github.com> Date: Sun, 12 Nov 2023 07:43:04 +0530 Subject: [PATCH 1298/1543] Added doctest to randomized_heap.py (#11151) --- data_structures/heap/randomized_heap.py | 30 +++++++++++++++++++++++-- 1 file changed, 28 insertions(+), 2 deletions(-) diff --git a/data_structures/heap/randomized_heap.py b/data_structures/heap/randomized_heap.py index c0f9888f80c7..12888c1f4089 100644 --- a/data_structures/heap/randomized_heap.py +++ b/data_structures/heap/randomized_heap.py @@ -22,14 +22,40 @@ def __init__(self, value: T) -> None: @property def value(self) -> T: - """Return the value of the node.""" + """ + Return the value of the node. + + >>> rhn = RandomizedHeapNode(10) + >>> rhn.value + 10 + >>> rhn = RandomizedHeapNode(-10) + >>> rhn.value + -10 + """ return self._value @staticmethod def merge( root1: RandomizedHeapNode[T] | None, root2: RandomizedHeapNode[T] | None ) -> RandomizedHeapNode[T] | None: - """Merge 2 nodes together.""" + """ + Merge 2 nodes together. + + >>> rhn1 = RandomizedHeapNode(10) + >>> rhn2 = RandomizedHeapNode(20) + >>> RandomizedHeapNode.merge(rhn1, rhn2).value + 10 + + >>> rhn1 = RandomizedHeapNode(20) + >>> rhn2 = RandomizedHeapNode(10) + >>> RandomizedHeapNode.merge(rhn1, rhn2).value + 10 + + >>> rhn1 = RandomizedHeapNode(5) + >>> rhn2 = RandomizedHeapNode(0) + >>> RandomizedHeapNode.merge(rhn1, rhn2).value + 0 + """ if not root1: return root2 From fb17eeab7d1fbc608a538b6d154d2c08781e087d Mon Sep 17 00:00:00 2001 From: Suyash Dongre <109069262+Suyashd999@users.noreply.github.com> Date: Sun, 12 Nov 2023 07:46:43 +0530 Subject: [PATCH 1299/1543] Added doctest to stack.py (#11149) --- data_structures/stacks/stack.py | 92 +++++++++++++++++++++++++++++++-- 1 file changed, 87 insertions(+), 5 deletions(-) diff --git a/data_structures/stacks/stack.py b/data_structures/stacks/stack.py index a14f4648a399..93698f5aa116 100644 --- a/data_structures/stacks/stack.py +++ b/data_structures/stacks/stack.py @@ -33,7 +33,23 @@ def __str__(self) -> str: return str(self.stack) def push(self, data: T) -> None: - """Push an element to the top of the stack.""" + """ + Push an element to the top of the stack. + + >>> S = Stack(2) # stack size = 2 + >>> S.push(10) + >>> S.push(20) + >>> print(S) + [10, 20] + + >>> S = Stack(1) # stack size = 1 + >>> S.push(10) + >>> S.push(20) + Traceback (most recent call last): + ... + data_structures.stacks.stack.StackOverflowError + + """ if len(self.stack) >= self.limit: raise StackOverflowError self.stack.append(data) @@ -42,6 +58,12 @@ def pop(self) -> T: """ Pop an element off of the top of the stack. + >>> S = Stack() + >>> S.push(-5) + >>> S.push(10) + >>> S.pop() + 10 + >>> Stack().pop() Traceback (most recent call last): ... @@ -55,7 +77,13 @@ def peek(self) -> T: """ Peek at the top-most element of the stack. - >>> Stack().pop() + >>> S = Stack() + >>> S.push(-5) + >>> S.push(10) + >>> S.peek() + 10 + + >>> Stack().peek() Traceback (most recent call last): ... data_structures.stacks.stack.StackUnderflowError @@ -65,18 +93,68 @@ def peek(self) -> T: return self.stack[-1] def is_empty(self) -> bool: - """Check if a stack is empty.""" + """ + Check if a stack is empty. + + >>> S = Stack() + >>> S.is_empty() + True + + >>> S = Stack() + >>> S.push(10) + >>> S.is_empty() + False + """ return not bool(self.stack) def is_full(self) -> bool: + """ + >>> S = Stack() + >>> S.is_full() + False + + >>> S = Stack(1) + >>> S.push(10) + >>> S.is_full() + True + """ return self.size() == self.limit def size(self) -> int: - """Return the size of the stack.""" + """ + Return the size of the stack. + + >>> S = Stack(3) + >>> S.size() + 0 + + >>> S = Stack(3) + >>> S.push(10) + >>> S.size() + 1 + + >>> S = Stack(3) + >>> S.push(10) + >>> S.push(20) + >>> S.size() + 2 + """ return len(self.stack) def __contains__(self, item: T) -> bool: - """Check if item is in stack""" + """ + Check if item is in stack + + >>> S = Stack(3) + >>> S.push(10) + >>> 10 in S + True + + >>> S = Stack(3) + >>> S.push(10) + >>> 20 in S + False + """ return item in self.stack @@ -131,3 +209,7 @@ def test_stack() -> None: if __name__ == "__main__": test_stack() + + import doctest + + doctest.testmod() From 0e2e6abd6f24d0d816212ff0480a18abecd3028b Mon Sep 17 00:00:00 2001 From: Suyash Dongre <109069262+Suyashd999@users.noreply.github.com> Date: Mon, 13 Nov 2023 16:35:22 +0530 Subject: [PATCH 1300/1543] Added doctest to heap.py (#11129) * Added doctest to heap.py * Update heap.py --- data_structures/heap/heap.py | 32 +++++++++++++++++++++++++++++++- 1 file changed, 31 insertions(+), 1 deletion(-) diff --git a/data_structures/heap/heap.py b/data_structures/heap/heap.py index 29bff3af07e3..7b15e69f13ca 100644 --- a/data_structures/heap/heap.py +++ b/data_structures/heap/heap.py @@ -53,7 +53,37 @@ def __repr__(self) -> str: return str(self.h) def parent_index(self, child_idx: int) -> int | None: - """return the parent index of given child""" + """ + returns the parent index based on the given child index + + >>> h = Heap() + >>> h.build_max_heap([103, 9, 1, 7, 11, 15, 25, 201, 209, 107, 5]) + >>> h + [209, 201, 25, 103, 107, 15, 1, 9, 7, 11, 5] + + >>> h.parent_index(-1) # returns none if index is <=0 + + >>> h.parent_index(0) # returns none if index is <=0 + + >>> h.parent_index(1) + 0 + >>> h.parent_index(2) + 0 + >>> h.parent_index(3) + 1 + >>> h.parent_index(4) + 1 + >>> h.parent_index(5) + 2 + >>> h.parent_index(10.5) + 4.0 + >>> h.parent_index(209.0) + 104.0 + >>> h.parent_index("Test") + Traceback (most recent call last): + ... + TypeError: '>' not supported between instances of 'str' and 'int' + """ if child_idx > 0: return (child_idx - 1) // 2 return None From 5f61af4fbbab33704b4aebd6523c64f8e6360869 Mon Sep 17 00:00:00 2001 From: MC <129918860+FishyGitHubUser@users.noreply.github.com> Date: Thu, 16 Nov 2023 19:00:48 +0800 Subject: [PATCH 1301/1543] Fix ignore venv in build_directory_md.py (#11156) Co-authored-by: MICHAEL CASTLE --- scripts/build_directory_md.py | 6 +++++- web_programming/{get_imdbtop.py => get_imdbtop.py.DISABLED} | 0 2 files changed, 5 insertions(+), 1 deletion(-) rename web_programming/{get_imdbtop.py => get_imdbtop.py.DISABLED} (100%) diff --git a/scripts/build_directory_md.py b/scripts/build_directory_md.py index 24bc00cd036f..aa95b95db4b5 100755 --- a/scripts/build_directory_md.py +++ b/scripts/build_directory_md.py @@ -6,7 +6,11 @@ def good_file_paths(top_dir: str = ".") -> Iterator[str]: for dir_path, dir_names, filenames in os.walk(top_dir): - dir_names[:] = [d for d in dir_names if d != "scripts" and d[0] not in "._"] + dir_names[:] = [ + d + for d in dir_names + if d != "scripts" and d[0] not in "._" and "venv" not in d + ] for filename in filenames: if filename == "__init__.py": continue diff --git a/web_programming/get_imdbtop.py b/web_programming/get_imdbtop.py.DISABLED similarity index 100% rename from web_programming/get_imdbtop.py rename to web_programming/get_imdbtop.py.DISABLED From 3999abfea392209fcb67c2218774a229878cf4cb Mon Sep 17 00:00:00 2001 From: Margaret <62753112+meg-1@users.noreply.github.com> Date: Fri, 24 Nov 2023 20:00:21 +0200 Subject: [PATCH 1302/1543] adding a geometry module (#11138) * adding a geometry module * fixing errors and adding type hints * Create code_review_feedback.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * implementing suggestions * fixing ruff errors * Update geometry/code_review_feedback.py * Update geometry/code_review_feedback.py * Update geometry/geometry.py * Apply suggestions from code review * Delete geometry/code_review_feedback.py * Update geometry/geometry.py * Update geometry/geometry.py --------- Co-authored-by: Christian Clauss Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- geometry/geometry.py | 259 +++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 259 insertions(+) create mode 100644 geometry/geometry.py diff --git a/geometry/geometry.py b/geometry/geometry.py new file mode 100644 index 000000000000..9e353dee17a7 --- /dev/null +++ b/geometry/geometry.py @@ -0,0 +1,259 @@ +from __future__ import annotations + +import math +from dataclasses import dataclass, field +from types import NoneType +from typing import Self + +# Building block classes + + +@dataclass +class Angle: + """ + An Angle in degrees (unit of measurement) + + >>> Angle() + Angle(degrees=90) + >>> Angle(45.5) + Angle(degrees=45.5) + >>> Angle(-1) + Traceback (most recent call last): + ... + TypeError: degrees must be a numeric value between 0 and 360. + >>> Angle(361) + Traceback (most recent call last): + ... + TypeError: degrees must be a numeric value between 0 and 360. + """ + + degrees: float = 90 + + def __post_init__(self) -> None: + if not isinstance(self.degrees, (int, float)) or not 0 <= self.degrees <= 360: + raise TypeError("degrees must be a numeric value between 0 and 360.") + + +@dataclass +class Side: + """ + A side of a two dimensional Shape such as Polygon, etc. + adjacent_sides: a list of sides which are adjacent to the current side + angle: the angle in degrees between each adjacent side + length: the length of the current side in meters + + >>> Side(5) + Side(length=5, angle=Angle(degrees=90), next_side=None) + >>> Side(5, Angle(45.6)) + Side(length=5, angle=Angle(degrees=45.6), next_side=None) + >>> Side(5, Angle(45.6), Side(1, Angle(2))) # doctest: +ELLIPSIS + Side(length=5, angle=Angle(degrees=45.6), next_side=Side(length=1, angle=Angle(d... + """ + + length: float + angle: Angle = field(default_factory=Angle) + next_side: Side | None = None + + def __post_init__(self) -> None: + if not isinstance(self.length, (int, float)) or self.length <= 0: + raise TypeError("length must be a positive numeric value.") + if not isinstance(self.angle, Angle): + raise TypeError("angle must be an Angle object.") + if not isinstance(self.next_side, (Side, NoneType)): + raise TypeError("next_side must be a Side or None.") + + +@dataclass +class Ellipse: + """ + A geometric Ellipse on a 2D surface + + >>> Ellipse(5, 10) + Ellipse(major_radius=5, minor_radius=10) + >>> Ellipse(5, 10) is Ellipse(5, 10) + False + >>> Ellipse(5, 10) == Ellipse(5, 10) + True + """ + + major_radius: float + minor_radius: float + + @property + def area(self) -> float: + """ + >>> Ellipse(5, 10).area + 157.07963267948966 + """ + return math.pi * self.major_radius * self.minor_radius + + @property + def perimeter(self) -> float: + """ + >>> Ellipse(5, 10).perimeter + 47.12388980384689 + """ + return math.pi * (self.major_radius + self.minor_radius) + + +class Circle(Ellipse): + """ + A geometric Circle on a 2D surface + + >>> Circle(5) + Circle(radius=5) + >>> Circle(5) is Circle(5) + False + >>> Circle(5) == Circle(5) + True + >>> Circle(5).area + 78.53981633974483 + >>> Circle(5).perimeter + 31.41592653589793 + """ + + def __init__(self, radius: float) -> None: + super().__init__(radius, radius) + self.radius = radius + + def __repr__(self) -> str: + return f"Circle(radius={self.radius})" + + @property + def diameter(self) -> float: + """ + >>> Circle(5).diameter + 10 + """ + return self.radius * 2 + + def max_parts(self, num_cuts: float) -> float: + """ + Return the maximum number of parts that circle can be divided into if cut + 'num_cuts' times. + + >>> circle = Circle(5) + >>> circle.max_parts(0) + 1.0 + >>> circle.max_parts(7) + 29.0 + >>> circle.max_parts(54) + 1486.0 + >>> circle.max_parts(22.5) + 265.375 + >>> circle.max_parts(-222) + Traceback (most recent call last): + ... + TypeError: num_cuts must be a positive numeric value. + >>> circle.max_parts("-222") + Traceback (most recent call last): + ... + TypeError: num_cuts must be a positive numeric value. + """ + if not isinstance(num_cuts, (int, float)) or num_cuts < 0: + raise TypeError("num_cuts must be a positive numeric value.") + return (num_cuts + 2 + num_cuts**2) * 0.5 + + +@dataclass +class Polygon: + """ + An abstract class which represents Polygon on a 2D surface. + + >>> Polygon() + Polygon(sides=[]) + """ + + sides: list[Side] = field(default_factory=list) + + def add_side(self, side: Side) -> Self: + """ + >>> Polygon().add_side(Side(5)) + Polygon(sides=[Side(length=5, angle=Angle(degrees=90), next_side=None)]) + """ + self.sides.append(side) + return self + + def get_side(self, index: int) -> Side: + """ + >>> Polygon().get_side(0) + Traceback (most recent call last): + ... + IndexError: list index out of range + >>> Polygon().add_side(Side(5)).get_side(-1) + Side(length=5, angle=Angle(degrees=90), next_side=None) + """ + return self.sides[index] + + def set_side(self, index: int, side: Side) -> Self: + """ + >>> Polygon().set_side(0, Side(5)) + Traceback (most recent call last): + ... + IndexError: list assignment index out of range + >>> Polygon().add_side(Side(5)).set_side(0, Side(10)) + Polygon(sides=[Side(length=10, angle=Angle(degrees=90), next_side=None)]) + """ + self.sides[index] = side + return self + + +class Rectangle(Polygon): + """ + A geometric rectangle on a 2D surface. + + >>> rectangle_one = Rectangle(5, 10) + >>> rectangle_one.perimeter() + 30 + >>> rectangle_one.area() + 50 + """ + + def __init__(self, short_side_length: float, long_side_length: float) -> None: + super().__init__() + self.short_side_length = short_side_length + self.long_side_length = long_side_length + self.post_init() + + def post_init(self) -> None: + """ + >>> Rectangle(5, 10) # doctest: +NORMALIZE_WHITESPACE + Rectangle(sides=[Side(length=5, angle=Angle(degrees=90), next_side=None), + Side(length=10, angle=Angle(degrees=90), next_side=None)]) + """ + self.short_side = Side(self.short_side_length) + self.long_side = Side(self.long_side_length) + super().add_side(self.short_side) + super().add_side(self.long_side) + + def perimeter(self) -> float: + return (self.short_side.length + self.long_side.length) * 2 + + def area(self) -> float: + return self.short_side.length * self.long_side.length + + +@dataclass +class Square(Rectangle): + """ + a structure which represents a + geometrical square on a 2D surface + >>> square_one = Square(5) + >>> square_one.perimeter() + 20 + >>> square_one.area() + 25 + """ + + def __init__(self, side_length: float) -> None: + super().__init__(side_length, side_length) + + def perimeter(self) -> float: + return super().perimeter() + + def area(self) -> float: + return super().area() + + +if __name__ == "__main__": + __import__("doctest").testmod() From b8e7a4c76c4a4929ac2c7e784b0c151be47c1e6e Mon Sep 17 00:00:00 2001 From: MC <129918860+FishyGitHubUser@users.noreply.github.com> Date: Sat, 25 Nov 2023 19:17:59 +0800 Subject: [PATCH 1303/1543] Fix typo in knight_tour.py (#11173) --- backtracking/knight_tour.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/backtracking/knight_tour.py b/backtracking/knight_tour.py index cc88307b7fe8..5f7dee8d97bf 100644 --- a/backtracking/knight_tour.py +++ b/backtracking/knight_tour.py @@ -79,7 +79,7 @@ def open_knight_tour(n: int) -> list[list[int]]: >>> open_knight_tour(2) Traceback (most recent call last): ... - ValueError: Open Kight Tour cannot be performed on a board of size 2 + ValueError: Open Knight Tour cannot be performed on a board of size 2 """ board = [[0 for i in range(n)] for j in range(n)] @@ -91,7 +91,7 @@ def open_knight_tour(n: int) -> list[list[int]]: return board board[i][j] = 0 - msg = f"Open Kight Tour cannot be performed on a board of size {n}" + msg = f"Open Knight Tour cannot be performed on a board of size {n}" raise ValueError(msg) From 5898b9603bbe9b449cf5a2e331cf0c7d3245a788 Mon Sep 17 00:00:00 2001 From: Rahid Zeynalov <44039543+rahidzeynal@users.noreply.github.com> Date: Sat, 25 Nov 2023 15:25:46 +0400 Subject: [PATCH 1304/1543] Typo deicmal -> decimal (#11169) --- bit_manipulation/is_even.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bit_manipulation/is_even.py b/bit_manipulation/is_even.py index ba036f35aa1e..6f95a1160797 100644 --- a/bit_manipulation/is_even.py +++ b/bit_manipulation/is_even.py @@ -1,7 +1,7 @@ def is_even(number: int) -> bool: """ return true if the input integer is even - Explanation: Lets take a look at the following deicmal to binary conversions + Explanation: Lets take a look at the following decimal to binary conversions 2 => 10 14 => 1110 100 => 1100100 From 4151a13b57fbd881d3fce3bb61101fe58ad541ae Mon Sep 17 00:00:00 2001 From: Clark <1009013283@qq.com> Date: Sat, 25 Nov 2023 20:26:03 +0800 Subject: [PATCH 1305/1543] add graphs/ant_colony_optimization_algorithms.py (#11163) * add ant_colonyant_colony_optimization_algorithms.py * Modify details * Modify type annotation * Add tests for KeyError, IndexError, StopIteration, etc. * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: Christian Clauss Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- graphs/ant_colony_optimization_algorithms.py | 226 +++++++++++++++++++ 1 file changed, 226 insertions(+) create mode 100644 graphs/ant_colony_optimization_algorithms.py diff --git a/graphs/ant_colony_optimization_algorithms.py b/graphs/ant_colony_optimization_algorithms.py new file mode 100644 index 000000000000..652ad6144297 --- /dev/null +++ b/graphs/ant_colony_optimization_algorithms.py @@ -0,0 +1,226 @@ +""" +Use an ant colony optimization algorithm to solve the travelling salesman problem (TSP) +which asks the following question: +"Given a list of cities and the distances between each pair of cities, what is the + shortest possible route that visits each city exactly once and returns to the origin + city?" + +https://en.wikipedia.org/wiki/Ant_colony_optimization_algorithms +https://en.wikipedia.org/wiki/Travelling_salesman_problem + +Author: Clark +""" + +import copy +import random + +cities = { + 0: [0, 0], + 1: [0, 5], + 2: [3, 8], + 3: [8, 10], + 4: [12, 8], + 5: [12, 4], + 6: [8, 0], + 7: [6, 2], +} + + +def main( + cities: dict[int, list[int]], + ants_num: int, + iterations_num: int, + pheromone_evaporation: float, + alpha: float, + beta: float, + q: float, # Pheromone system parameters Q,which is a constant +) -> tuple[list[int], float]: + """ + Ant colony algorithm main function + >>> main(cities=cities, ants_num=10, iterations_num=20, + ... pheromone_evaporation=0.7, alpha=1.0, beta=5.0, q=10) + ([0, 1, 2, 3, 4, 5, 6, 7, 0], 37.909778143828696) + >>> main(cities={0: [0, 0], 1: [2, 2]}, ants_num=5, iterations_num=5, + ... pheromone_evaporation=0.7, alpha=1.0, beta=5.0, q=10) + ([0, 1, 0], 5.656854249492381) + >>> main(cities={0: [0, 0], 1: [2, 2], 4: [4, 4]}, ants_num=5, iterations_num=5, + ... pheromone_evaporation=0.7, alpha=1.0, beta=5.0, q=10) + Traceback (most recent call last): + ... + IndexError: list index out of range + >>> main(cities={}, ants_num=5, iterations_num=5, + ... pheromone_evaporation=0.7, alpha=1.0, beta=5.0, q=10) + Traceback (most recent call last): + ... + StopIteration + >>> main(cities={0: [0, 0], 1: [2, 2]}, ants_num=0, iterations_num=5, + ... pheromone_evaporation=0.7, alpha=1.0, beta=5.0, q=10) + ([], inf) + >>> main(cities={0: [0, 0], 1: [2, 2]}, ants_num=5, iterations_num=0, + ... pheromone_evaporation=0.7, alpha=1.0, beta=5.0, q=10) + ([], inf) + >>> main(cities={0: [0, 0], 1: [2, 2]}, ants_num=5, iterations_num=5, + ... pheromone_evaporation=1, alpha=1.0, beta=5.0, q=10) + ([0, 1, 0], 5.656854249492381) + >>> main(cities={0: [0, 0], 1: [2, 2]}, ants_num=5, iterations_num=5, + ... pheromone_evaporation=0, alpha=1.0, beta=5.0, q=10) + ([0, 1, 0], 5.656854249492381) + """ + # Initialize the pheromone matrix + cities_num = len(cities) + pheromone = [[1.0] * cities_num] * cities_num + + best_path: list[int] = [] + best_distance = float("inf") + for _ in range(iterations_num): + ants_route = [] + for _ in range(ants_num): + unvisited_cities = copy.deepcopy(cities) + current_city = {next(iter(cities.keys())): next(iter(cities.values()))} + del unvisited_cities[next(iter(current_city.keys()))] + ant_route = [next(iter(current_city.keys()))] + while unvisited_cities: + current_city, unvisited_cities = city_select( + pheromone, current_city, unvisited_cities, alpha, beta + ) + ant_route.append(next(iter(current_city.keys()))) + ant_route.append(0) + ants_route.append(ant_route) + + pheromone, best_path, best_distance = pheromone_update( + pheromone, + cities, + pheromone_evaporation, + ants_route, + q, + best_path, + best_distance, + ) + return best_path, best_distance + + +def distance(city1: list[int], city2: list[int]) -> float: + """ + Calculate the distance between two coordinate points + >>> distance([0, 0], [3, 4] ) + 5.0 + >>> distance([0, 0], [-3, 4] ) + 5.0 + >>> distance([0, 0], [-3, -4] ) + 5.0 + """ + return (((city1[0] - city2[0]) ** 2) + ((city1[1] - city2[1]) ** 2)) ** 0.5 + + +def pheromone_update( + pheromone: list[list[float]], + cities: dict[int, list[int]], + pheromone_evaporation: float, + ants_route: list[list[int]], + q: float, # Pheromone system parameters Q,which is a constant + best_path: list[int], + best_distance: float, +) -> tuple[list[list[float]], list[int], float]: + """ + Update pheromones on the route and update the best route + >>> + >>> pheromone_update(pheromone=[[1.0, 1.0], [1.0, 1.0]], + ... cities={0: [0,0], 1: [2,2]}, pheromone_evaporation=0.7, + ... ants_route=[[0, 1, 0]], q=10, best_path=[], + ... best_distance=float("inf")) + ([[0.7, 4.235533905932737], [4.235533905932737, 0.7]], [0, 1, 0], 5.656854249492381) + >>> pheromone_update(pheromone=[], + ... cities={0: [0,0], 1: [2,2]}, pheromone_evaporation=0.7, + ... ants_route=[[0, 1, 0]], q=10, best_path=[], + ... best_distance=float("inf")) + Traceback (most recent call last): + ... + IndexError: list index out of range + >>> pheromone_update(pheromone=[[1.0, 1.0], [1.0, 1.0]], + ... cities={}, pheromone_evaporation=0.7, + ... ants_route=[[0, 1, 0]], q=10, best_path=[], + ... best_distance=float("inf")) + Traceback (most recent call last): + ... + KeyError: 0 + """ + for a in range(len(cities)): # Update the volatilization of pheromone on all routes + for b in range(len(cities)): + pheromone[a][b] *= pheromone_evaporation + for ant_route in ants_route: + total_distance = 0.0 + for i in range(len(ant_route) - 1): # Calculate total distance + total_distance += distance(cities[ant_route[i]], cities[ant_route[i + 1]]) + delta_pheromone = q / total_distance + for i in range(len(ant_route) - 1): # Update pheromones + pheromone[ant_route[i]][ant_route[i + 1]] += delta_pheromone + pheromone[ant_route[i + 1]][ant_route[i]] = pheromone[ant_route[i]][ + ant_route[i + 1] + ] + + if total_distance < best_distance: + best_path = ant_route + best_distance = total_distance + + return pheromone, best_path, best_distance + + +def city_select( + pheromone: list[list[float]], + current_city: dict[int, list[int]], + unvisited_cities: dict[int, list[int]], + alpha: float, + beta: float, +) -> tuple[dict[int, list[int]], dict[int, list[int]]]: + """ + Choose the next city for ants + >>> city_select(pheromone=[[1.0, 1.0], [1.0, 1.0]], current_city={0: [0, 0]}, + ... unvisited_cities={1: [2, 2]}, alpha=1.0, beta=5.0) + ({1: [2, 2]}, {}) + >>> city_select(pheromone=[], current_city={0: [0,0]}, + ... unvisited_cities={1: [2, 2]}, alpha=1.0, beta=5.0) + Traceback (most recent call last): + ... + IndexError: list index out of range + >>> city_select(pheromone=[[1.0, 1.0], [1.0, 1.0]], current_city={}, + ... unvisited_cities={1: [2, 2]}, alpha=1.0, beta=5.0) + Traceback (most recent call last): + ... + StopIteration + >>> city_select(pheromone=[[1.0, 1.0], [1.0, 1.0]], current_city={0: [0, 0]}, + ... unvisited_cities={}, alpha=1.0, beta=5.0) + Traceback (most recent call last): + ... + IndexError: list index out of range + """ + probabilities = [] + for city in unvisited_cities: + city_distance = distance( + unvisited_cities[city], next(iter(current_city.values())) + ) + probability = (pheromone[city][next(iter(current_city.keys()))] ** alpha) * ( + (1 / city_distance) ** beta + ) + probabilities.append(probability) + + chosen_city_i = random.choices( + list(unvisited_cities.keys()), weights=probabilities + )[0] + chosen_city = {chosen_city_i: unvisited_cities[chosen_city_i]} + del unvisited_cities[next(iter(chosen_city.keys()))] + return chosen_city, unvisited_cities + + +if __name__ == "__main__": + best_path, best_distance = main( + cities=cities, + ants_num=10, + iterations_num=20, + pheromone_evaporation=0.7, + alpha=1.0, + beta=5.0, + q=10, + ) + + print(f"{best_path = }") + print(f"{best_distance = }") From 050b2a6e2cf0e474b75cf48abe4aa134b97643e4 Mon Sep 17 00:00:00 2001 From: moaldeen <132774635+moaldeen@users.noreply.github.com> Date: Sat, 25 Nov 2023 08:31:17 -0500 Subject: [PATCH 1306/1543] Bug fix combinations (#11158) * Update all_combinations.py The original implementation had limitations in handling edge cases and certain input parameters, leading to potential RecursionError. * Update all_combinations.py Added checks to handle cases where n or k are negative or where k is greater than n. In such scenarios, the function now returns an empty list, avoiding invalid recursive calls. * Update error handling Added checks to handle cases where `n` or `k` are negative or where `k` is greater than `n`. In such scenarios, the function now returns an empty list, avoiding invalid recursive calls. * Update backtracking/all_combinations.py * Update all_combinations.py --------- Co-authored-by: Christian Clauss --- backtracking/all_combinations.py | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/backtracking/all_combinations.py b/backtracking/all_combinations.py index ecbcc5882ec1..407304948c39 100644 --- a/backtracking/all_combinations.py +++ b/backtracking/all_combinations.py @@ -26,9 +26,11 @@ def generate_all_combinations(n: int, k: int) -> list[list[int]]: >>> generate_all_combinations(n=10, k=-1) Traceback (most recent call last): ... - RecursionError: maximum recursion depth exceeded + ValueError: k must not be negative >>> generate_all_combinations(n=-1, k=10) - [] + Traceback (most recent call last): + ... + ValueError: n must not be negative >>> generate_all_combinations(n=5, k=4) [[1, 2, 3, 4], [1, 2, 3, 5], [1, 2, 4, 5], [1, 3, 4, 5], [2, 3, 4, 5]] >>> from itertools import combinations @@ -36,6 +38,10 @@ def generate_all_combinations(n: int, k: int) -> list[list[int]]: ... for n in range(1, 6) for k in range(1, 6)) True """ + if k < 0: + raise ValueError("k must not be negative") + if n < 0: + raise ValueError("n must not be negative") result: list[list[int]] = [] create_all_state(1, n, k, [], result) From 8b39a0fb54d0f63489952606d2036d1a63f981e3 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Sat, 25 Nov 2023 14:53:18 +0100 Subject: [PATCH 1307/1543] [pre-commit.ci] pre-commit autoupdate (#11154) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * [pre-commit.ci] pre-commit autoupdate updates: - [github.com/astral-sh/ruff-pre-commit: v0.1.4 → v0.1.6](https://github.com/astral-sh/ruff-pre-commit/compare/v0.1.4...v0.1.6) - [github.com/psf/black: 23.10.1 → 23.11.0](https://github.com/psf/black/compare/23.10.1...23.11.0) - [github.com/tox-dev/pyproject-fmt: 1.4.1 → 1.5.1](https://github.com/tox-dev/pyproject-fmt/compare/1.4.1...1.5.1) - [github.com/pre-commit/mirrors-mypy: v1.6.1 → v1.7.0](https://github.com/pre-commit/mirrors-mypy/compare/v1.6.1...v1.7.0) * updating DIRECTORY.md * Update spiral_print.py * Update matrix/spiral_print.py * Update matrix/spiral_print.py --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> Co-authored-by: Christian Clauss --- .pre-commit-config.yaml | 8 ++++---- DIRECTORY.md | 1 - matrix/spiral_print.py | 2 +- 3 files changed, 5 insertions(+), 6 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 1bb3de782275..9a0f78fdde5a 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -16,12 +16,12 @@ repos: - id: auto-walrus - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.1.4 + rev: v0.1.6 hooks: - id: ruff - repo: https://github.com/psf/black - rev: 23.10.1 + rev: 23.11.0 hooks: - id: black @@ -33,7 +33,7 @@ repos: - tomli - repo: https://github.com/tox-dev/pyproject-fmt - rev: "1.4.1" + rev: "1.5.1" hooks: - id: pyproject-fmt @@ -51,7 +51,7 @@ repos: - id: validate-pyproject - repo: https://github.com/pre-commit/mirrors-mypy - rev: v1.6.1 + rev: v1.7.0 hooks: - id: mypy args: diff --git a/DIRECTORY.md b/DIRECTORY.md index cb4b00b045b5..438950325380 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -1310,7 +1310,6 @@ * [Fetch Well Rx Price](web_programming/fetch_well_rx_price.py) * [Get Amazon Product Data](web_programming/get_amazon_product_data.py) * [Get Imdb Top 250 Movies Csv](web_programming/get_imdb_top_250_movies_csv.py) - * [Get Imdbtop](web_programming/get_imdbtop.py) * [Get Ip Geolocation](web_programming/get_ip_geolocation.py) * [Get Top Billionaires](web_programming/get_top_billionaires.py) * [Get Top Hn Posts](web_programming/get_top_hn_posts.py) diff --git a/matrix/spiral_print.py b/matrix/spiral_print.py index 5eef263f7aef..7ba0a275157b 100644 --- a/matrix/spiral_print.py +++ b/matrix/spiral_print.py @@ -116,7 +116,7 @@ def spiral_traversal(matrix: list[list]) -> list[int]: [1, 2, 3, 4, 8, 12, 11, 10, 9, 5, 6, 7] + spiral_traversal([]) """ if matrix: - return list(matrix.pop(0)) + spiral_traversal(list(zip(*matrix))[::-1]) + return list(matrix.pop(0)) + spiral_traversal(list(zip(*matrix))[::-1]) # type: ignore else: return [] From 86ae30d29e4813c2ef071d7d27f1302b6be6cc0c Mon Sep 17 00:00:00 2001 From: Harsh Kumar <61012869+cyrixninja@users.noreply.github.com> Date: Sat, 25 Nov 2023 19:50:42 +0530 Subject: [PATCH 1308/1543] Create Spearman's rank correlation coefficient (#11155) * Create spearman_rank_correlation_coefficient.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fixed Issues * Added More Description * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fixed Issues * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Tried Fixing Issues * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Tried Fixing Issues * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fixed Issues * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fixed Issues * Apply suggestions from code review * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update maths/spearman_rank_correlation_coefficient.py --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- .../spearman_rank_correlation_coefficient.py | 82 +++++++++++++++++++ 1 file changed, 82 insertions(+) create mode 100644 maths/spearman_rank_correlation_coefficient.py diff --git a/maths/spearman_rank_correlation_coefficient.py b/maths/spearman_rank_correlation_coefficient.py new file mode 100644 index 000000000000..32ff6b9e3d71 --- /dev/null +++ b/maths/spearman_rank_correlation_coefficient.py @@ -0,0 +1,82 @@ +from collections.abc import Sequence + + +def assign_ranks(data: Sequence[float]) -> list[int]: + """ + Assigns ranks to elements in the array. + + :param data: List of floats. + :return: List of ints representing the ranks. + + Example: + >>> assign_ranks([3.2, 1.5, 4.0, 2.7, 5.1]) + [3, 1, 4, 2, 5] + + >>> assign_ranks([10.5, 8.1, 12.4, 9.3, 11.0]) + [3, 1, 5, 2, 4] + """ + ranked_data = sorted((value, index) for index, value in enumerate(data)) + ranks = [0] * len(data) + + for position, (_, index) in enumerate(ranked_data): + ranks[index] = position + 1 + + return ranks + + +def calculate_spearman_rank_correlation( + variable_1: Sequence[float], variable_2: Sequence[float] +) -> float: + """ + Calculates Spearman's rank correlation coefficient. + + :param variable_1: List of floats representing the first variable. + :param variable_2: List of floats representing the second variable. + :return: Spearman's rank correlation coefficient. + + Example Usage: + + >>> x = [1, 2, 3, 4, 5] + >>> y = [5, 4, 3, 2, 1] + >>> calculate_spearman_rank_correlation(x, y) + -1.0 + + >>> x = [1, 2, 3, 4, 5] + >>> y = [2, 4, 6, 8, 10] + >>> calculate_spearman_rank_correlation(x, y) + 1.0 + + >>> x = [1, 2, 3, 4, 5] + >>> y = [5, 1, 2, 9, 5] + >>> calculate_spearman_rank_correlation(x, y) + 0.6 + """ + n = len(variable_1) + rank_var1 = assign_ranks(variable_1) + rank_var2 = assign_ranks(variable_2) + + # Calculate differences of ranks + d = [rx - ry for rx, ry in zip(rank_var1, rank_var2)] + + # Calculate the sum of squared differences + d_squared = sum(di**2 for di in d) + + # Calculate the Spearman's rank correlation coefficient + rho = 1 - (6 * d_squared) / (n * (n**2 - 1)) + + return rho + + +if __name__ == "__main__": + import doctest + + doctest.testmod() + + # Example usage: + print( + f"{calculate_spearman_rank_correlation([1, 2, 3, 4, 5], [2, 4, 6, 8, 10]) = }" + ) + + print(f"{calculate_spearman_rank_correlation([1, 2, 3, 4, 5], [5, 4, 3, 2, 1]) = }") + + print(f"{calculate_spearman_rank_correlation([1, 2, 3, 4, 5], [5, 1, 2, 9, 5]) = }") From 84a1533fd5d262dae767a9298de1c1d7fcb2bec9 Mon Sep 17 00:00:00 2001 From: Suyash Dongre <109069262+Suyashd999@users.noreply.github.com> Date: Sat, 25 Nov 2023 19:59:44 +0530 Subject: [PATCH 1309/1543] Added doctest to binary_search_tree.py (#11145) * Added doctest to binary_search_tree.py * Apply suggestions from code review --------- Co-authored-by: Christian Clauss --- .../binary_tree/binary_search_tree.py | 42 +++++++++++++++++++ 1 file changed, 42 insertions(+) diff --git a/data_structures/binary_tree/binary_search_tree.py b/data_structures/binary_tree/binary_search_tree.py index f08f278a8e47..9071f03dcc8c 100644 --- a/data_structures/binary_tree/binary_search_tree.py +++ b/data_structures/binary_tree/binary_search_tree.py @@ -198,6 +198,30 @@ def insert(self, *values) -> Self: return self def search(self, value) -> Node | None: + """ + >>> tree = BinarySearchTree().insert(10, 20, 30, 40, 50) + >>> tree.search(10) + {'10': (None, {'20': (None, {'30': (None, {'40': (None, 50)})})})} + >>> tree.search(20) + {'20': (None, {'30': (None, {'40': (None, 50)})})} + >>> tree.search(30) + {'30': (None, {'40': (None, 50)})} + >>> tree.search(40) + {'40': (None, 50)} + >>> tree.search(50) + 50 + >>> tree.search(5) is None # element not present + True + >>> tree.search(0) is None # element not present + True + >>> tree.search(-5) is None # element not present + True + >>> BinarySearchTree().search(10) + Traceback (most recent call last): + ... + IndexError: Warning: Tree is empty! please use another. + """ + if self.empty(): raise IndexError("Warning: Tree is empty! please use another.") else: @@ -210,6 +234,15 @@ def search(self, value) -> Node | None: def get_max(self, node: Node | None = None) -> Node | None: """ We go deep on the right branch + + >>> BinarySearchTree().insert(10, 20, 30, 40, 50).get_max() + 50 + >>> BinarySearchTree().insert(-5, -1, 0.1, -0.3, -4.5).get_max() + {'0.1': (-0.3, None)} + >>> BinarySearchTree().insert(1, 78.3, 30, 74.0, 1).get_max() + {'78.3': ({'30': (1, 74.0)}, None)} + >>> BinarySearchTree().insert(1, 783, 30, 740, 1).get_max() + {'783': ({'30': (1, 740)}, None)} """ if node is None: if self.root is None: @@ -224,6 +257,15 @@ def get_max(self, node: Node | None = None) -> Node | None: def get_min(self, node: Node | None = None) -> Node | None: """ We go deep on the left branch + + >>> BinarySearchTree().insert(10, 20, 30, 40, 50).get_min() + {'10': (None, {'20': (None, {'30': (None, {'40': (None, 50)})})})} + >>> BinarySearchTree().insert(-5, -1, 0, -0.3, -4.5).get_min() + {'-5': (None, {'-1': (-4.5, {'0': (-0.3, None)})})} + >>> BinarySearchTree().insert(1, 78.3, 30, 74.0, 1).get_min() + {'1': (None, {'78.3': ({'30': (1, 74.0)}, None)})} + >>> BinarySearchTree().insert(1, 783, 30, 740, 1).get_min() + {'1': (None, {'783': ({'30': (1, 740)}, None)})} """ if node is None: node = self.root From 154e5e8681b7ae9711fbef0b89f0ce365a8bf5bf Mon Sep 17 00:00:00 2001 From: Pedram_Mohajer <48964282+pedram-mohajer@users.noreply.github.com> Date: Sun, 26 Nov 2023 17:46:54 -0500 Subject: [PATCH 1310/1543] Update levenshtein_distance.py (#11171) * Update levenshtein_distance.py * Update levenshtein_distance.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update levenshtein_distance.py * Update levenshtein_distance.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update levenshtein_distance.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update levenshtein_distance.py * Update levenshtein_distance.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- strings/levenshtein_distance.py | 93 ++++++++++++++++++++++++++------- 1 file changed, 74 insertions(+), 19 deletions(-) diff --git a/strings/levenshtein_distance.py b/strings/levenshtein_distance.py index 7be4074dc39b..3af6608723a5 100644 --- a/strings/levenshtein_distance.py +++ b/strings/levenshtein_distance.py @@ -1,20 +1,9 @@ -""" -This is a Python implementation of the levenshtein distance. -Levenshtein distance is a string metric for measuring the -difference between two sequences. - -For doctests run following command: -python -m doctest -v levenshtein-distance.py -or -python3 -m doctest -v levenshtein-distance.py - -For manual testing run: -python levenshtein-distance.py -""" +from collections.abc import Callable def levenshtein_distance(first_word: str, second_word: str) -> int: - """Implementation of the levenshtein distance in Python. + """ + Implementation of the Levenshtein distance in Python. :param first_word: the first word to measure the difference. :param second_word: the second word to measure the difference. :return: the levenshtein distance between the two words. @@ -47,7 +36,7 @@ def levenshtein_distance(first_word: str, second_word: str) -> int: current_row = [i + 1] for j, c2 in enumerate(second_word): - # Calculate insertions, deletions and substitutions + # Calculate insertions, deletions, and substitutions insertions = previous_row[j + 1] + 1 deletions = current_row[j] + 1 substitutions = previous_row[j] + (c1 != c2) @@ -62,9 +51,75 @@ def levenshtein_distance(first_word: str, second_word: str) -> int: return previous_row[-1] +def levenshtein_distance_optimized(first_word: str, second_word: str) -> int: + """ + Compute the Levenshtein distance between two words (strings). + The function is optimized for efficiency by modifying rows in place. + :param first_word: the first word to measure the difference. + :param second_word: the second word to measure the difference. + :return: the Levenshtein distance between the two words. + Examples: + >>> levenshtein_distance_optimized("planet", "planetary") + 3 + >>> levenshtein_distance_optimized("", "test") + 4 + >>> levenshtein_distance_optimized("book", "back") + 2 + >>> levenshtein_distance_optimized("book", "book") + 0 + >>> levenshtein_distance_optimized("test", "") + 4 + >>> levenshtein_distance_optimized("", "") + 0 + >>> levenshtein_distance_optimized("orchestration", "container") + 10 + """ + if len(first_word) < len(second_word): + return levenshtein_distance_optimized(second_word, first_word) + + if len(second_word) == 0: + return len(first_word) + + previous_row = list(range(len(second_word) + 1)) + + for i, c1 in enumerate(first_word): + current_row = [i + 1] + [0] * len(second_word) + + for j, c2 in enumerate(second_word): + insertions = previous_row[j + 1] + 1 + deletions = current_row[j] + 1 + substitutions = previous_row[j] + (c1 != c2) + current_row[j + 1] = min(insertions, deletions, substitutions) + + previous_row = current_row + + return previous_row[-1] + + +def benchmark_levenshtein_distance(func: Callable) -> None: + """ + Benchmark the Levenshtein distance function. + :param str: The name of the function being benchmarked. + :param func: The function to be benchmarked. + """ + from timeit import timeit + + stmt = f"{func.__name__}('sitting', 'kitten')" + setup = f"from __main__ import {func.__name__}" + number = 25_000 + result = timeit(stmt=stmt, setup=setup, number=number) + print(f"{func.__name__:<30} finished {number:,} runs in {result:.5f} seconds") + + if __name__ == "__main__": - first_word = input("Enter the first word:\n").strip() - second_word = input("Enter the second word:\n").strip() + # Get user input for words + first_word = input("Enter the first word for Levenshtein distance:\n").strip() + second_word = input("Enter the second word for Levenshtein distance:\n").strip() + + # Calculate and print Levenshtein distances + print(f"{levenshtein_distance(first_word, second_word) = }") + print(f"{levenshtein_distance_optimized(first_word, second_word) = }") - result = levenshtein_distance(first_word, second_word) - print(f"Levenshtein distance between {first_word} and {second_word} is {result}") + # Benchmark the Levenshtein distance functions + benchmark_levenshtein_distance(levenshtein_distance) + benchmark_levenshtein_distance(levenshtein_distance_optimized) From b8600035768da179adc709814f4b455b844982cc Mon Sep 17 00:00:00 2001 From: Pedram_Mohajer <48964282+pedram-mohajer@users.noreply.github.com> Date: Mon, 27 Nov 2023 12:43:51 -0500 Subject: [PATCH 1311/1543] Add doctest to is_safe function (#11183) --- backtracking/n_queens.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/backtracking/n_queens.py b/backtracking/n_queens.py index 0f237d95e7c8..2cd8c703fc72 100644 --- a/backtracking/n_queens.py +++ b/backtracking/n_queens.py @@ -24,6 +24,10 @@ def is_safe(board: list[list[int]], row: int, column: int) -> bool: Returns: Boolean Value + >>> is_safe([[0, 0, 0], [0, 0, 0], [0, 0, 0]], 1, 1) + True + >>> is_safe([[1, 0, 0], [0, 0, 0], [0, 0, 0]], 1, 1) + False """ n = len(board) # Size of the board From 0ac97f359f2c4b1a4b96db6a083fac95ca0cfe97 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 27 Nov 2023 19:13:24 +0100 Subject: [PATCH 1312/1543] [pre-commit.ci] pre-commit autoupdate (#11184) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * [pre-commit.ci] pre-commit autoupdate updates: - [github.com/pre-commit/mirrors-mypy: v1.7.0 → v1.7.1](https://github.com/pre-commit/mirrors-mypy/compare/v1.7.0...v1.7.1) * updating DIRECTORY.md --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- .pre-commit-config.yaml | 2 +- DIRECTORY.md | 5 +++++ 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 9a0f78fdde5a..28f83a638d7b 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -51,7 +51,7 @@ repos: - id: validate-pyproject - repo: https://github.com/pre-commit/mirrors-mypy - rev: v1.7.0 + rev: v1.7.1 hooks: - id: mypy args: diff --git a/DIRECTORY.md b/DIRECTORY.md index 438950325380..ea0ba22bcc13 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -428,12 +428,16 @@ * [Haversine Distance](geodesy/haversine_distance.py) * [Lamberts Ellipsoidal Distance](geodesy/lamberts_ellipsoidal_distance.py) +## Geometry + * [Geometry](geometry/geometry.py) + ## Graphics * [Bezier Curve](graphics/bezier_curve.py) * [Vector3 For 2D Rendering](graphics/vector3_for_2d_rendering.py) ## Graphs * [A Star](graphs/a_star.py) + * [Ant Colony Optimization Algorithms](graphs/ant_colony_optimization_algorithms.py) * [Articulation Points](graphs/articulation_points.py) * [Basic Graphs](graphs/basic_graphs.py) * [Bellman Ford](graphs/bellman_ford.py) @@ -718,6 +722,7 @@ * [Sock Merchant](maths/sock_merchant.py) * [Softmax](maths/softmax.py) * [Solovay Strassen Primality Test](maths/solovay_strassen_primality_test.py) + * [Spearman Rank Correlation Coefficient](maths/spearman_rank_correlation_coefficient.py) * Special Numbers * [Armstrong Numbers](maths/special_numbers/armstrong_numbers.py) * [Automorphic Number](maths/special_numbers/automorphic_number.py) From 82e539dc8226abe803aa562402cfe9f19ded9e22 Mon Sep 17 00:00:00 2001 From: Pedram_Mohajer <48964282+pedram-mohajer@users.noreply.github.com> Date: Fri, 1 Dec 2023 11:53:47 -0500 Subject: [PATCH 1313/1543] Create smallestRange.py (#11179) * Create smallestRange.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update smallestRange.py * Update smallestRange.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update smallestRange.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update and rename smallestRange.py to smallestrange.py * Update smallestrange.py * Update smallestrange.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update smallestrange.py * Rename smallestrange.py to smallest_range.py * Update smallest_range.py * Update smallest_range.py * Update smallest_range.py --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- greedy_methods/smallest_range.py | 71 ++++++++++++++++++++++++++++++++ 1 file changed, 71 insertions(+) create mode 100644 greedy_methods/smallest_range.py diff --git a/greedy_methods/smallest_range.py b/greedy_methods/smallest_range.py new file mode 100644 index 000000000000..e2b7f8d7e96a --- /dev/null +++ b/greedy_methods/smallest_range.py @@ -0,0 +1,71 @@ +""" +smallest_range function takes a list of sorted integer lists and finds the smallest +range that includes at least one number from each list, using a min heap for efficiency. +""" + +from heapq import heappop, heappush +from sys import maxsize + + +def smallest_range(nums: list[list[int]]) -> list[int]: + """ + Find the smallest range from each list in nums. + + Uses min heap for efficiency. The range includes at least one number from each list. + + Args: + nums: List of k sorted integer lists. + + Returns: + list: Smallest range as a two-element list. + + Examples: + >>> smallest_range([[4, 10, 15, 24, 26], [0, 9, 12, 20], [5, 18, 22, 30]]) + [20, 24] + >>> smallest_range([[1, 2, 3], [1, 2, 3], [1, 2, 3]]) + [1, 1] + >>> smallest_range(((1, 2, 3), (1, 2, 3), (1, 2, 3))) + [1, 1] + >>> smallest_range(((-3, -2, -1), (0, 0, 0), (1, 2, 3))) + [-1, 1] + >>> smallest_range([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) + [3, 7] + >>> smallest_range([[0, 0, 0], [0, 0, 0], [0, 0, 0]]) + [0, 0] + >>> smallest_range([[], [], []]) + Traceback (most recent call last): + ... + IndexError: list index out of range + """ + + min_heap: list[tuple[int, int, int]] = [] + current_max = -maxsize - 1 + + for i, items in enumerate(nums): + heappush(min_heap, (items[0], i, 0)) + current_max = max(current_max, items[0]) + + # Initialize smallest_range with large integer values + smallest_range = [-maxsize - 1, maxsize] + + while min_heap: + current_min, list_index, element_index = heappop(min_heap) + + if current_max - current_min < smallest_range[1] - smallest_range[0]: + smallest_range = [current_min, current_max] + + if element_index == len(nums[list_index]) - 1: + break + + next_element = nums[list_index][element_index + 1] + heappush(min_heap, (next_element, list_index, element_index + 1)) + current_max = max(current_max, next_element) + + return smallest_range + + +if __name__ == "__main__": + from doctest import testmod + + testmod() + print(f"{smallest_range([[1, 2, 3], [1, 2, 3], [1, 2, 3]])}") # Output: [1, 1] From a73f37b2ecf29aeee1b0417ac53016f5ad0fbeee Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 4 Dec 2023 20:00:34 +0100 Subject: [PATCH 1314/1543] [pre-commit.ci] pre-commit autoupdate (#11195) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * [pre-commit.ci] pre-commit autoupdate updates: - [github.com/tox-dev/pyproject-fmt: 1.5.1 → 1.5.3](https://github.com/tox-dev/pyproject-fmt/compare/1.5.1...1.5.3) * updating DIRECTORY.md --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- .pre-commit-config.yaml | 2 +- DIRECTORY.md | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 28f83a638d7b..5ec7a5765817 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -33,7 +33,7 @@ repos: - tomli - repo: https://github.com/tox-dev/pyproject-fmt - rev: "1.5.1" + rev: "1.5.3" hooks: - id: pyproject-fmt diff --git a/DIRECTORY.md b/DIRECTORY.md index ea0ba22bcc13..2ee72df37f3f 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -507,6 +507,7 @@ * [Minimum Coin Change](greedy_methods/minimum_coin_change.py) * [Minimum Waiting Time](greedy_methods/minimum_waiting_time.py) * [Optimal Merge Pattern](greedy_methods/optimal_merge_pattern.py) + * [Smallest Range](greedy_methods/smallest_range.py) ## Hashes * [Adler32](hashes/adler32.py) From c14a580c9e7340ee1d826a52af0f95c077b564b4 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 11 Dec 2023 19:27:15 +0100 Subject: [PATCH 1315/1543] [pre-commit.ci] pre-commit autoupdate (#11210) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - [github.com/astral-sh/ruff-pre-commit: v0.1.6 → v0.1.7](https://github.com/astral-sh/ruff-pre-commit/compare/v0.1.6...v0.1.7) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 5ec7a5765817..9688f1cbb5fc 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -16,7 +16,7 @@ repos: - id: auto-walrus - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.1.6 + rev: v0.1.7 hooks: - id: ruff From 2d0ed135a08dbe7da8c696d70ee7fb1a01f2cc91 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 18 Dec 2023 21:17:46 +0100 Subject: [PATCH 1316/1543] [pre-commit.ci] pre-commit autoupdate (#11215) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - [github.com/astral-sh/ruff-pre-commit: v0.1.7 → v0.1.8](https://github.com/astral-sh/ruff-pre-commit/compare/v0.1.7...v0.1.8) - [github.com/psf/black: 23.11.0 → 23.12.0](https://github.com/psf/black/compare/23.11.0...23.12.0) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 9688f1cbb5fc..c8a11e38aeab 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -16,12 +16,12 @@ repos: - id: auto-walrus - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.1.7 + rev: v0.1.8 hooks: - id: ruff - repo: https://github.com/psf/black - rev: 23.11.0 + rev: 23.12.0 hooks: - id: black From b46fc1de04350f91971187d831d8e3292ea0bace Mon Sep 17 00:00:00 2001 From: Indrajeet Mishra Date: Wed, 20 Dec 2023 04:35:27 +0530 Subject: [PATCH 1317/1543] Corrected the Python Doctest command in equilibrium_index_in_array.py script (#11212) Co-authored-by: Indrajeet Mishra --- data_structures/arrays/equilibrium_index_in_array.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/data_structures/arrays/equilibrium_index_in_array.py b/data_structures/arrays/equilibrium_index_in_array.py index 8802db6206bb..0717a45d9f4b 100644 --- a/data_structures/arrays/equilibrium_index_in_array.py +++ b/data_structures/arrays/equilibrium_index_in_array.py @@ -3,7 +3,7 @@ Reference: https://www.geeksforgeeks.org/equilibrium-index-of-an-array/ Python doctest can be run with the following command: -python -m doctest -v equilibrium_index.py +python -m doctest -v equilibrium_index_in_array.py Given a sequence arr[] of size n, this function returns an equilibrium index (if any) or -1 if no equilibrium index exists. From 7b9f82cc447c2d2ce91373c097bf610d5b0f906a Mon Sep 17 00:00:00 2001 From: Tushar Pamnani <121151091+tusharpamnani@users.noreply.github.com> Date: Wed, 20 Dec 2023 07:29:51 +0530 Subject: [PATCH 1318/1543] optimize quicksort implementation (#11196) * optimize quicksort implementation * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Apply suggestions from code review * Update quick_sort.py --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Tianyi Zheng --- sorts/quick_sort.py | 23 +++++++++++++---------- 1 file changed, 13 insertions(+), 10 deletions(-) diff --git a/sorts/quick_sort.py b/sorts/quick_sort.py index b79d3eac3e48..6b95fc144426 100644 --- a/sorts/quick_sort.py +++ b/sorts/quick_sort.py @@ -13,10 +13,10 @@ def quick_sort(collection: list) -> list: - """A pure Python implementation of quick sort algorithm + """A pure Python implementation of quicksort algorithm. :param collection: a mutable collection of comparable items - :return: the same collection ordered by ascending + :return: the same collection ordered in ascending order Examples: >>> quick_sort([0, 5, 3, 2, 2]) @@ -26,23 +26,26 @@ def quick_sort(collection: list) -> list: >>> quick_sort([-2, 5, 0, -45]) [-45, -2, 0, 5] """ + # Base case: if the collection has 0 or 1 elements, it is already sorted if len(collection) < 2: return collection - pivot_index = randrange(len(collection)) # Use random element as pivot - pivot = collection[pivot_index] - greater: list[int] = [] # All elements greater than pivot - lesser: list[int] = [] # All elements less than or equal to pivot - for element in collection[:pivot_index]: - (greater if element > pivot else lesser).append(element) + # Randomly select a pivot index and remove the pivot element from the collection + pivot_index = randrange(len(collection)) + pivot = collection.pop(pivot_index) - for element in collection[pivot_index + 1 :]: - (greater if element > pivot else lesser).append(element) + # Partition the remaining elements into two groups: lesser or equal, and greater + lesser = [item for item in collection if item <= pivot] + greater = [item for item in collection if item > pivot] + # Recursively sort the lesser and greater groups, and combine with the pivot return [*quick_sort(lesser), pivot, *quick_sort(greater)] if __name__ == "__main__": + # Get user input and convert it into a list of integers user_input = input("Enter numbers separated by a comma:\n").strip() unsorted = [int(item) for item in user_input.split(",")] + + # Print the result of sorting the user-provided list print(quick_sort(unsorted)) From 94c8e1ab73032d27bc8c60b733bb93393b9f1b02 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 25 Dec 2023 19:24:39 +0100 Subject: [PATCH 1319/1543] [pre-commit.ci] pre-commit autoupdate (#11223) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - [github.com/astral-sh/ruff-pre-commit: v0.1.8 → v0.1.9](https://github.com/astral-sh/ruff-pre-commit/compare/v0.1.8...v0.1.9) - [github.com/psf/black: 23.12.0 → 23.12.1](https://github.com/psf/black/compare/23.12.0...23.12.1) - [github.com/pre-commit/mirrors-mypy: v1.7.1 → v1.8.0](https://github.com/pre-commit/mirrors-mypy/compare/v1.7.1...v1.8.0) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index c8a11e38aeab..61ec3a54a69c 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -16,12 +16,12 @@ repos: - id: auto-walrus - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.1.8 + rev: v0.1.9 hooks: - id: ruff - repo: https://github.com/psf/black - rev: 23.12.0 + rev: 23.12.1 hooks: - id: black @@ -51,7 +51,7 @@ repos: - id: validate-pyproject - repo: https://github.com/pre-commit/mirrors-mypy - rev: v1.7.1 + rev: v1.8.0 hooks: - id: mypy args: From 51c5c87b9ab4eb04c3825cd20cfdba0f31a098f5 Mon Sep 17 00:00:00 2001 From: Param Thakkar <128291516+ParamThakkar123@users.noreply.github.com> Date: Wed, 27 Dec 2023 14:05:29 +0530 Subject: [PATCH 1320/1543] File moved to neural_network/activation_functions (#11216) * added GELU activation functions file * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update gaussian_error_linear_unit.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update gaussian_error_linear_unit.py * Delete neural_network/activation_functions/gaussian_error_linear_unit.py * Rename maths/gaussian_error_linear_unit.py to neural_network/activation_functions/gaussian_error_linear_unit.py --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .../activation_functions}/gaussian_error_linear_unit.py | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename {maths => neural_network/activation_functions}/gaussian_error_linear_unit.py (100%) diff --git a/maths/gaussian_error_linear_unit.py b/neural_network/activation_functions/gaussian_error_linear_unit.py similarity index 100% rename from maths/gaussian_error_linear_unit.py rename to neural_network/activation_functions/gaussian_error_linear_unit.py From 9caf4784aada17dc75348f77cc8c356df503c0f3 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 8 Jan 2024 19:16:25 +0100 Subject: [PATCH 1321/1543] [pre-commit.ci] pre-commit autoupdate (#11231) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * [pre-commit.ci] pre-commit autoupdate updates: - [github.com/astral-sh/ruff-pre-commit: v0.1.9 → v0.1.11](https://github.com/astral-sh/ruff-pre-commit/compare/v0.1.9...v0.1.11) * updating DIRECTORY.md --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- .pre-commit-config.yaml | 2 +- DIRECTORY.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 61ec3a54a69c..0e06ba7a5250 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -16,7 +16,7 @@ repos: - id: auto-walrus - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.1.9 + rev: v0.1.11 hooks: - id: ruff diff --git a/DIRECTORY.md b/DIRECTORY.md index 2ee72df37f3f..b5392fd09114 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -631,7 +631,6 @@ * [Floor](maths/floor.py) * [Gamma](maths/gamma.py) * [Gaussian](maths/gaussian.py) - * [Gaussian Error Linear Unit](maths/gaussian_error_linear_unit.py) * [Gcd Of N Numbers](maths/gcd_of_n_numbers.py) * [Germain Primes](maths/germain_primes.py) * [Greatest Common Divisor](maths/greatest_common_divisor.py) @@ -791,6 +790,7 @@ * Activation Functions * [Binary Step](neural_network/activation_functions/binary_step.py) * [Exponential Linear Unit](neural_network/activation_functions/exponential_linear_unit.py) + * [Gaussian Error Linear Unit](neural_network/activation_functions/gaussian_error_linear_unit.py) * [Leaky Rectified Linear Unit](neural_network/activation_functions/leaky_rectified_linear_unit.py) * [Mish](neural_network/activation_functions/mish.py) * [Rectified Linear Unit](neural_network/activation_functions/rectified_linear_unit.py) From 227944eb2933b22a102eb88703b4a0b648f39af5 Mon Sep 17 00:00:00 2001 From: Piotr Idzik <65706193+vil02@users.noreply.github.com> Date: Fri, 12 Jan 2024 17:12:15 +0100 Subject: [PATCH 1322/1543] fix: consider months and days in `years_old` (#11234) * fix: do not consider months in `calculate_age` * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update get_top_billionaires.py * Update get_top_billionaires.py * Update get_top_billionaires.py * TODAY = datetime.utcnow() * Update get_top_billionaires.py * Update build.yml --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- .github/workflows/build.yml | 2 +- web_programming/get_top_billionaires.py | 72 ++++++++++++------------- 2 files changed, 37 insertions(+), 37 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 60c1d6d119d0..1631feb2ba06 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -10,7 +10,7 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 - - uses: actions/setup-python@v4 + - uses: actions/setup-python@v5 with: python-version: 3.12 allow-prereleases: true diff --git a/web_programming/get_top_billionaires.py b/web_programming/get_top_billionaires.py index 6f986acb9181..703b635eef82 100644 --- a/web_programming/get_top_billionaires.py +++ b/web_programming/get_top_billionaires.py @@ -3,7 +3,7 @@ This works for some of us but fails for others. """ -from datetime import UTC, datetime, timedelta +from datetime import UTC, date, datetime import requests from rich import box @@ -11,8 +11,7 @@ from rich import table as rich_table LIMIT = 10 -TODAY = datetime.now() - +TODAY = datetime.now(tz=UTC) API_URL = ( "https://www.forbes.com/forbesapi/person/rtb/0/position/true.json" "?fields=personName,gender,source,countryOfCitizenship,birthDate,finalWorth" @@ -20,40 +19,40 @@ ) -def calculate_age(unix_date: float) -> str: - """Calculates age from given unix time format. +def years_old(birth_timestamp: int, today: date | None = None) -> int: + """ + Calculate the age in years based on the given birth date. Only the year, month, + and day are used in the calculation. The time of day is ignored. + + Args: + birth_timestamp: The date of birth. + today: (useful for writing tests) or if None then datetime.date.today(). Returns: - Age as string - - >>> from datetime import datetime, UTC - >>> years_since_create = datetime.now(tz=UTC).year - 2022 - >>> int(calculate_age(-657244800000)) - years_since_create - 73 - >>> int(calculate_age(46915200000)) - years_since_create - 51 + int: The age in years. + + Examples: + >>> today = date(2024, 1, 12) + >>> years_old(birth_timestamp=datetime(1959, 11, 20).timestamp(), today=today) + 64 + >>> years_old(birth_timestamp=datetime(1970, 2, 13).timestamp(), today=today) + 53 + >>> all( + ... years_old(datetime(today.year - i, 1, 12).timestamp(), today=today) == i + ... for i in range(1, 111) + ... ) + True """ - # Convert date from milliseconds to seconds - unix_date /= 1000 - - if unix_date < 0: - # Handle timestamp before epoch - epoch = datetime.fromtimestamp(0, tz=UTC) - seconds_since_epoch = (datetime.now(tz=UTC) - epoch).seconds - birthdate = ( - epoch - timedelta(seconds=abs(unix_date) - seconds_since_epoch) - ).date() - else: - birthdate = datetime.fromtimestamp(unix_date, tz=UTC).date() - return str( - TODAY.year - - birthdate.year - - ((TODAY.month, TODAY.day) < (birthdate.month, birthdate.day)) + today = today or TODAY.date() + birth_date = datetime.fromtimestamp(birth_timestamp, tz=UTC).date() + return (today.year - birth_date.year) - ( + (today.month, today.day) < (birth_date.month, birth_date.day) ) -def get_forbes_real_time_billionaires() -> list[dict[str, str]]: - """Get top 10 realtime billionaires using forbes API. +def get_forbes_real_time_billionaires() -> list[dict[str, int | str]]: + """ + Get the top 10 real-time billionaires using Forbes API. Returns: List of top 10 realtime billionaires data. @@ -66,21 +65,22 @@ def get_forbes_real_time_billionaires() -> list[dict[str, str]]: "Country": person["countryOfCitizenship"], "Gender": person["gender"], "Worth ($)": f"{person['finalWorth'] / 1000:.1f} Billion", - "Age": calculate_age(person["birthDate"]), + "Age": years_old(person["birthDate"]), } for person in response_json["personList"]["personsLists"] ] -def display_billionaires(forbes_billionaires: list[dict[str, str]]) -> None: - """Display Forbes real time billionaires in a rich table. +def display_billionaires(forbes_billionaires: list[dict[str, int | str]]) -> None: + """ + Display Forbes real-time billionaires in a rich table. Args: - forbes_billionaires (list): Forbes top 10 real time billionaires + forbes_billionaires (list): Forbes top 10 real-time billionaires """ table = rich_table.Table( - title=f"Forbes Top {LIMIT} Real Time Billionaires at {TODAY:%Y-%m-%d %H:%M}", + title=f"Forbes Top {LIMIT} Real-Time Billionaires at {TODAY:%Y-%m-%d %H:%M}", style="green", highlight=True, box=box.SQUARE, From a56f24e83d971c8f49d194b859b9b7acbf7df084 Mon Sep 17 00:00:00 2001 From: Piotr Idzik <65706193+vil02@users.noreply.github.com> Date: Fri, 12 Jan 2024 17:46:26 +0100 Subject: [PATCH 1323/1543] fix: use `GITHUB_ACTOR` in `git config` (#11233) --- .github/workflows/directory_writer.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/directory_writer.yml b/.github/workflows/directory_writer.yml index 702c15f1e29b..e92c93604904 100644 --- a/.github/workflows/directory_writer.yml +++ b/.github/workflows/directory_writer.yml @@ -15,8 +15,8 @@ jobs: - name: Write DIRECTORY.md run: | scripts/build_directory_md.py 2>&1 | tee DIRECTORY.md - git config --global user.name github-actions - git config --global user.email '${GITHUB_ACTOR}@users.noreply.github.com' + git config --global user.name "$GITHUB_ACTOR" + git config --global user.email "$GITHUB_ACTOR@users.noreply.github.com" git remote set-url origin https://x-access-token:${{ secrets.GITHUB_TOKEN }}@github.com/$GITHUB_REPOSITORY - name: Update DIRECTORY.md run: | From ffb93adf46971be35699e7642d79e90284b3c7f1 Mon Sep 17 00:00:00 2001 From: Piotr Idzik <65706193+vil02@users.noreply.github.com> Date: Fri, 12 Jan 2024 18:25:59 +0100 Subject: [PATCH 1324/1543] chore: update `actions/setup-python` to `v5` (#11236) --- .github/workflows/directory_writer.yml | 2 +- .github/workflows/project_euler.yml | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/directory_writer.yml b/.github/workflows/directory_writer.yml index e92c93604904..55d89f455a25 100644 --- a/.github/workflows/directory_writer.yml +++ b/.github/workflows/directory_writer.yml @@ -9,7 +9,7 @@ jobs: - uses: actions/checkout@v4 with: fetch-depth: 0 - - uses: actions/setup-python@v4 + - uses: actions/setup-python@v5 with: python-version: 3.x - name: Write DIRECTORY.md diff --git a/.github/workflows/project_euler.yml b/.github/workflows/project_euler.yml index 7bbccf76e192..59e1208a650d 100644 --- a/.github/workflows/project_euler.yml +++ b/.github/workflows/project_euler.yml @@ -15,7 +15,7 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 - - uses: actions/setup-python@v4 + - uses: actions/setup-python@v5 with: python-version: 3.x - name: Install pytest and pytest-cov @@ -27,7 +27,7 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 - - uses: actions/setup-python@v4 + - uses: actions/setup-python@v5 with: python-version: 3.x - name: Install pytest and requests From 13559aee437dab6ed88ecb1a6737cb39094c9e24 Mon Sep 17 00:00:00 2001 From: Piotr Idzik <65706193+vil02@users.noreply.github.com> Date: Sat, 13 Jan 2024 12:24:58 +0100 Subject: [PATCH 1325/1543] style: use proper indentation in `ruff.yml` (#11237) * style: use proper indentation in `ruff.yml` * chore: run `prettier` on `yml` files * Update .pre-commit-config.yaml * Update .pre-commit-config.yaml * Update .pre-commit-config.yaml * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update .pre-commit-config.yaml * chore: run prettier on workflow files --------- Co-authored-by: Christian Clauss Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .github/workflows/build.yml | 10 +++++----- .github/workflows/ruff.yml | 6 +++--- .pre-commit-config.yaml | 8 +++++++- 3 files changed, 15 insertions(+), 9 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 1631feb2ba06..906edfdae1ed 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -25,10 +25,10 @@ jobs: - name: Run tests # TODO: #8818 Re-enable quantum tests run: pytest - --ignore=quantum/q_fourier_transform.py - --ignore=project_euler/ - --ignore=scripts/validate_solutions.py - --cov-report=term-missing:skip-covered - --cov=. . + --ignore=quantum/q_fourier_transform.py + --ignore=project_euler/ + --ignore=scripts/validate_solutions.py + --cov-report=term-missing:skip-covered + --cov=. . - if: ${{ success() }} run: scripts/build_directory_md.py 2>&1 | tee DIRECTORY.md diff --git a/.github/workflows/ruff.yml b/.github/workflows/ruff.yml index 496f1460e074..9ebabed3600a 100644 --- a/.github/workflows/ruff.yml +++ b/.github/workflows/ruff.yml @@ -11,6 +11,6 @@ jobs: ruff: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v4 - - run: pip install --user ruff - - run: ruff --output-format=github . + - uses: actions/checkout@v4 + - run: pip install --user ruff + - run: ruff --output-format=github . diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 0e06ba7a5250..31e141049441 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -13,7 +13,7 @@ repos: - repo: https://github.com/MarcoGorelli/auto-walrus rev: v0.2.2 hooks: - - id: auto-walrus + - id: auto-walrus - repo: https://github.com/astral-sh/ruff-pre-commit rev: v0.1.11 @@ -59,3 +59,9 @@ repos: - --install-types # See mirrors-mypy README.md - --non-interactive additional_dependencies: [types-requests] + + - repo: https://github.com/pre-commit/mirrors-prettier + rev: "v3.1.0" + hooks: + - id: prettier + types_or: [toml, yaml] From dd47651bfca06b31941827ed3f41517bf5718508 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 15 Jan 2024 19:19:36 +0100 Subject: [PATCH 1326/1543] [pre-commit.ci] pre-commit autoupdate (#11246) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - [github.com/astral-sh/ruff-pre-commit: v0.1.11 → v0.1.13](https://github.com/astral-sh/ruff-pre-commit/compare/v0.1.11...v0.1.13) - [github.com/tox-dev/pyproject-fmt: 1.5.3 → 1.6.0](https://github.com/tox-dev/pyproject-fmt/compare/1.5.3...1.6.0) - [github.com/pre-commit/mirrors-prettier: v3.1.0 → v4.0.0-alpha.8](https://github.com/pre-commit/mirrors-prettier/compare/v3.1.0...v4.0.0-alpha.8) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 31e141049441..97603510b426 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -16,7 +16,7 @@ repos: - id: auto-walrus - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.1.11 + rev: v0.1.13 hooks: - id: ruff @@ -33,7 +33,7 @@ repos: - tomli - repo: https://github.com/tox-dev/pyproject-fmt - rev: "1.5.3" + rev: "1.6.0" hooks: - id: pyproject-fmt @@ -61,7 +61,7 @@ repos: additional_dependencies: [types-requests] - repo: https://github.com/pre-commit/mirrors-prettier - rev: "v3.1.0" + rev: "v4.0.0-alpha.8" hooks: - id: prettier types_or: [toml, yaml] From 4b6f688344b8347f555f10ca04b80ee36b5a1e82 Mon Sep 17 00:00:00 2001 From: AtomicVar Date: Tue, 16 Jan 2024 16:39:54 +0800 Subject: [PATCH 1327/1543] Use compiled black as the pre-commit formatter (#11247) * Use compiled black as the pre-commit formatter * ruff-format * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Keep GitHub Actions up to date with Dependabot --------- Co-authored-by: Christian Clauss Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .github/.github/dependabot.yml | 8 ++++++++ .pre-commit-config.yaml | 6 +----- audio_filters/butterworth_filter.py | 16 ++++++++++++---- conversions/convert_number_to_words.py | 2 +- digital_image_processing/filters/gabor_filter.py | 6 +++--- ...rian_path_and_circuit_for_undirected_graph.py | 2 +- physics/n_body_simulation.py | 4 +--- project_euler/problem_056/sol1.py | 4 +--- 8 files changed, 28 insertions(+), 20 deletions(-) create mode 100644 .github/.github/dependabot.yml diff --git a/.github/.github/dependabot.yml b/.github/.github/dependabot.yml new file mode 100644 index 000000000000..15e494ec867e --- /dev/null +++ b/.github/.github/dependabot.yml @@ -0,0 +1,8 @@ +# Keep GitHub Actions up to date with Dependabot... +# https://docs.github.com/en/code-security/dependabot/working-with-dependabot/keeping-your-actions-up-to-date-with-dependabot +version: 2 +updates: + - package-ecosystem: "github-actions" + directory: "/" + schedule: + interval: "daily" diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 97603510b426..38cc7c8fc3ff 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -19,11 +19,7 @@ repos: rev: v0.1.13 hooks: - id: ruff - - - repo: https://github.com/psf/black - rev: 23.12.1 - hooks: - - id: black + - id: ruff-format - repo: https://github.com/codespell-project/codespell rev: v2.2.6 diff --git a/audio_filters/butterworth_filter.py b/audio_filters/butterworth_filter.py index cffedb7a68fd..6449bc3f3dce 100644 --- a/audio_filters/butterworth_filter.py +++ b/audio_filters/butterworth_filter.py @@ -11,7 +11,9 @@ def make_lowpass( - frequency: int, samplerate: int, q_factor: float = 1 / sqrt(2) # noqa: B008 + frequency: int, + samplerate: int, + q_factor: float = 1 / sqrt(2), # noqa: B008 ) -> IIRFilter: """ Creates a low-pass filter @@ -39,7 +41,9 @@ def make_lowpass( def make_highpass( - frequency: int, samplerate: int, q_factor: float = 1 / sqrt(2) # noqa: B008 + frequency: int, + samplerate: int, + q_factor: float = 1 / sqrt(2), # noqa: B008 ) -> IIRFilter: """ Creates a high-pass filter @@ -67,7 +71,9 @@ def make_highpass( def make_bandpass( - frequency: int, samplerate: int, q_factor: float = 1 / sqrt(2) # noqa: B008 + frequency: int, + samplerate: int, + q_factor: float = 1 / sqrt(2), # noqa: B008 ) -> IIRFilter: """ Creates a band-pass filter @@ -96,7 +102,9 @@ def make_bandpass( def make_allpass( - frequency: int, samplerate: int, q_factor: float = 1 / sqrt(2) # noqa: B008 + frequency: int, + samplerate: int, + q_factor: float = 1 / sqrt(2), # noqa: B008 ) -> IIRFilter: """ Creates an all-pass filter diff --git a/conversions/convert_number_to_words.py b/conversions/convert_number_to_words.py index 0c428928b31d..dbab44c72e1f 100644 --- a/conversions/convert_number_to_words.py +++ b/conversions/convert_number_to_words.py @@ -41,7 +41,7 @@ def max_value(cls, system: str) -> int: >>> NumberingSystem.max_value("indian") == 10**19 - 1 True """ - match (system_enum := cls[system.upper()]): + match system_enum := cls[system.upper()]: case cls.SHORT: max_exp = system_enum.value[0][0] + 3 case cls.LONG: diff --git a/digital_image_processing/filters/gabor_filter.py b/digital_image_processing/filters/gabor_filter.py index 8f9212a35a79..aaec567f4c99 100644 --- a/digital_image_processing/filters/gabor_filter.py +++ b/digital_image_processing/filters/gabor_filter.py @@ -48,9 +48,9 @@ def gabor_filter_kernel( _y = -sin_theta * px + cos_theta * py # fill kernel - gabor[y, x] = np.exp( - -(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) - ) * np.cos(2 * np.pi * _x / lambd + psi) + gabor[y, x] = np.exp(-(_x**2 + gamma**2 * _y**2) / (2 * sigma**2)) * np.cos( + 2 * np.pi * _x / lambd + psi + ) return gabor diff --git a/graphs/eulerian_path_and_circuit_for_undirected_graph.py b/graphs/eulerian_path_and_circuit_for_undirected_graph.py index 6b4ea8e21e8b..5b146eaa845b 100644 --- a/graphs/eulerian_path_and_circuit_for_undirected_graph.py +++ b/graphs/eulerian_path_and_circuit_for_undirected_graph.py @@ -56,7 +56,7 @@ def main(): g4 = {1: [2, 3], 2: [1, 3], 3: [1, 2]} g5 = { 1: [], - 2: [] + 2: [], # all degree is zero } max_node = 10 diff --git a/physics/n_body_simulation.py b/physics/n_body_simulation.py index 46330844df61..ec008784ba62 100644 --- a/physics/n_body_simulation.py +++ b/physics/n_body_simulation.py @@ -165,9 +165,7 @@ def update_system(self, delta_time: float) -> None: # Calculation of the distance using Pythagoras's theorem # Extra factor due to the softening technique - distance = (dif_x**2 + dif_y**2 + self.softening_factor) ** ( - 1 / 2 - ) + distance = (dif_x**2 + dif_y**2 + self.softening_factor) ** (1 / 2) # Newton's law of universal gravitation. force_x += ( diff --git a/project_euler/problem_056/sol1.py b/project_euler/problem_056/sol1.py index c772bec58692..828dbd3a8ddf 100644 --- a/project_euler/problem_056/sol1.py +++ b/project_euler/problem_056/sol1.py @@ -30,9 +30,7 @@ def solution(a: int = 100, b: int = 100) -> int: # RETURN the MAXIMUM from the list of SUMs of the list of INT converted from STR of # BASE raised to the POWER return max( - sum(int(x) for x in str(base**power)) - for base in range(a) - for power in range(b) + sum(int(x) for x in str(base**power)) for base in range(a) for power in range(b) ) From 0101dd42dc83f567bddebc386b17f2b4f6bbaa36 Mon Sep 17 00:00:00 2001 From: Ataf Fazledin Ahamed Date: Tue, 16 Jan 2024 14:43:33 +0600 Subject: [PATCH 1328/1543] Fixed Inappropriate Logical Expression (#11203) Signed-off-by: fazledyn-or --- data_structures/binary_tree/red_black_tree.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/data_structures/binary_tree/red_black_tree.py b/data_structures/binary_tree/red_black_tree.py index 4ebe0e927ca0..fc299301da8a 100644 --- a/data_structures/binary_tree/red_black_tree.py +++ b/data_structures/binary_tree/red_black_tree.py @@ -451,7 +451,7 @@ def is_left(self) -> bool: """Returns true iff this node is the left child of its parent.""" if self.parent is None: return False - return self.parent.left is self.parent.left is self + return self.parent.left is self def is_right(self) -> bool: """Returns true iff this node is the right child of its parent.""" From 05a5cdacc3cfd9814ad6f5cb2d4dec86109b640a Mon Sep 17 00:00:00 2001 From: Suyash Dongre <109069262+Suyashd999@users.noreply.github.com> Date: Thu, 18 Jan 2024 18:09:27 +0530 Subject: [PATCH 1329/1543] Added doctest to skew_heap.py (#11147) * Added doctest to skew_heap.py * Update skew_heap.py * Update data_structures/heap/skew_heap.py Co-authored-by: Saptadeep Banerjee <69459134+imSanko@users.noreply.github.com> * Update skew_heap.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update skew_heap.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: Saptadeep Banerjee <69459134+imSanko@users.noreply.github.com> Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- data_structures/heap/skew_heap.py | 45 +++++++++++++++++++++++++++++-- 1 file changed, 43 insertions(+), 2 deletions(-) diff --git a/data_structures/heap/skew_heap.py b/data_structures/heap/skew_heap.py index c4c13b08276a..0839db711cb1 100644 --- a/data_structures/heap/skew_heap.py +++ b/data_structures/heap/skew_heap.py @@ -21,14 +21,55 @@ def __init__(self, value: T) -> None: @property def value(self) -> T: - """Return the value of the node.""" + """ + Return the value of the node. + + >>> SkewNode(0).value + 0 + >>> SkewNode(3.14159).value + 3.14159 + >>> SkewNode("hello").value + 'hello' + >>> SkewNode(None).value + + >>> SkewNode(True).value + True + >>> SkewNode([]).value + [] + >>> SkewNode({}).value + {} + >>> SkewNode(set()).value + set() + >>> SkewNode(0.0).value + 0.0 + >>> SkewNode(-1e-10).value + -1e-10 + >>> SkewNode(10).value + 10 + >>> SkewNode(-10.5).value + -10.5 + >>> SkewNode().value + Traceback (most recent call last): + ... + TypeError: SkewNode.__init__() missing 1 required positional argument: 'value' + """ return self._value @staticmethod def merge( root1: SkewNode[T] | None, root2: SkewNode[T] | None ) -> SkewNode[T] | None: - """Merge 2 nodes together.""" + """ + Merge 2 nodes together. + >>> SkewNode.merge(SkewNode(10),SkewNode(-10.5)).value + -10.5 + >>> SkewNode.merge(SkewNode(10),SkewNode(10.5)).value + 10 + >>> SkewNode.merge(SkewNode(10),SkewNode(10)).value + 10 + >>> SkewNode.merge(SkewNode(-100),SkewNode(-10.5)).value + -100 + """ if not root1: return root2 From 3952ba703a5b84a37891a001037c5c366d20941a Mon Sep 17 00:00:00 2001 From: AtomicVar Date: Thu, 18 Jan 2024 20:41:29 +0800 Subject: [PATCH 1330/1543] Add categorical focal cross-entropy loss algorithm (#11248) --- machine_learning/loss_functions.py | 102 +++++++++++++++++++++++++++++ 1 file changed, 102 insertions(+) diff --git a/machine_learning/loss_functions.py b/machine_learning/loss_functions.py index 36a760326f3d..f05fa0cbe686 100644 --- a/machine_learning/loss_functions.py +++ b/machine_learning/loss_functions.py @@ -148,6 +148,108 @@ def categorical_cross_entropy( return -np.sum(y_true * np.log(y_pred)) +def categorical_focal_cross_entropy( + y_true: np.ndarray, + y_pred: np.ndarray, + alpha: np.ndarray = None, + gamma: float = 2.0, + epsilon: float = 1e-15, +) -> float: + """ + Calculate the mean categorical focal cross-entropy (CFCE) loss between true + labels and predicted probabilities for multi-class classification. + + CFCE loss is a generalization of binary focal cross-entropy for multi-class + classification. It addresses class imbalance by focusing on hard examples. + + CFCE = -Σ alpha * (1 - y_pred)**gamma * y_true * log(y_pred) + + Reference: [Lin et al., 2018](https://arxiv.org/pdf/1708.02002.pdf) + + Parameters: + - y_true: True labels in one-hot encoded form. + - y_pred: Predicted probabilities for each class. + - alpha: Array of weighting factors for each class. + - gamma: Focusing parameter for modulating the loss (default: 2.0). + - epsilon: Small constant to avoid numerical instability. + + Returns: + - The mean categorical focal cross-entropy loss. + + >>> true_labels = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]) + >>> pred_probs = np.array([[0.9, 0.1, 0.0], [0.2, 0.7, 0.1], [0.0, 0.1, 0.9]]) + >>> alpha = np.array([0.6, 0.2, 0.7]) + >>> categorical_focal_cross_entropy(true_labels, pred_probs, alpha) + 0.0025966118981496423 + + >>> true_labels = np.array([[0, 1, 0], [0, 0, 1]]) + >>> pred_probs = np.array([[0.05, 0.95, 0], [0.1, 0.8, 0.1]]) + >>> alpha = np.array([0.25, 0.25, 0.25]) + >>> categorical_focal_cross_entropy(true_labels, pred_probs, alpha) + 0.23315276982014324 + + >>> true_labels = np.array([[1, 0], [0, 1]]) + >>> pred_probs = np.array([[0.9, 0.1, 0.0], [0.2, 0.7, 0.1]]) + >>> categorical_cross_entropy(true_labels, pred_probs) + Traceback (most recent call last): + ... + ValueError: Input arrays must have the same shape. + + >>> true_labels = np.array([[2, 0, 1], [1, 0, 0]]) + >>> pred_probs = np.array([[0.9, 0.1, 0.0], [0.2, 0.7, 0.1]]) + >>> categorical_focal_cross_entropy(true_labels, pred_probs) + Traceback (most recent call last): + ... + ValueError: y_true must be one-hot encoded. + + >>> true_labels = np.array([[1, 0, 1], [1, 0, 0]]) + >>> pred_probs = np.array([[0.9, 0.1, 0.0], [0.2, 0.7, 0.1]]) + >>> categorical_focal_cross_entropy(true_labels, pred_probs) + Traceback (most recent call last): + ... + ValueError: y_true must be one-hot encoded. + + >>> true_labels = np.array([[1, 0, 0], [0, 1, 0]]) + >>> pred_probs = np.array([[0.9, 0.1, 0.1], [0.2, 0.7, 0.1]]) + >>> categorical_focal_cross_entropy(true_labels, pred_probs) + Traceback (most recent call last): + ... + ValueError: Predicted probabilities must sum to approximately 1. + + >>> true_labels = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]) + >>> pred_probs = np.array([[0.9, 0.1, 0.0], [0.2, 0.7, 0.1], [0.0, 0.1, 0.9]]) + >>> alpha = np.array([0.6, 0.2]) + >>> categorical_focal_cross_entropy(true_labels, pred_probs, alpha) + Traceback (most recent call last): + ... + ValueError: Length of alpha must match the number of classes. + """ + if y_true.shape != y_pred.shape: + raise ValueError("Shape of y_true and y_pred must be the same.") + + if alpha is None: + alpha = np.ones(y_true.shape[1]) + + if np.any((y_true != 0) & (y_true != 1)) or np.any(y_true.sum(axis=1) != 1): + raise ValueError("y_true must be one-hot encoded.") + + if len(alpha) != y_true.shape[1]: + raise ValueError("Length of alpha must match the number of classes.") + + if not np.all(np.isclose(np.sum(y_pred, axis=1), 1, rtol=epsilon, atol=epsilon)): + raise ValueError("Predicted probabilities must sum to approximately 1.") + + # Clip predicted probabilities to avoid log(0) + y_pred = np.clip(y_pred, epsilon, 1 - epsilon) + + # Calculate loss for each class and sum across classes + cfce_loss = -np.sum( + alpha * np.power(1 - y_pred, gamma) * y_true * np.log(y_pred), axis=1 + ) + + return np.mean(cfce_loss) + + def hinge_loss(y_true: np.ndarray, y_pred: np.ndarray) -> float: """ Calculate the mean hinge loss for between true labels and predicted probabilities From b01571dc4f5754d3da44b8a0b6dabb44986c666e Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 22 Jan 2024 19:20:43 +0100 Subject: [PATCH 1331/1543] [pre-commit.ci] pre-commit autoupdate (#11255) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - [github.com/astral-sh/ruff-pre-commit: v0.1.13 → v0.1.14](https://github.com/astral-sh/ruff-pre-commit/compare/v0.1.13...v0.1.14) - [github.com/tox-dev/pyproject-fmt: 1.6.0 → 1.7.0](https://github.com/tox-dev/pyproject-fmt/compare/1.6.0...1.7.0) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 38cc7c8fc3ff..7fae092d043c 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -16,7 +16,7 @@ repos: - id: auto-walrus - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.1.13 + rev: v0.1.14 hooks: - id: ruff - id: ruff-format @@ -29,7 +29,7 @@ repos: - tomli - repo: https://github.com/tox-dev/pyproject-fmt - rev: "1.6.0" + rev: "1.7.0" hooks: - id: pyproject-fmt From b092d7755cf94b4758440b68bc97ac30154f4c55 Mon Sep 17 00:00:00 2001 From: Geoffrey Logovi <52314615+geoffreylgv@users.noreply.github.com> Date: Wed, 24 Jan 2024 06:15:39 +0000 Subject: [PATCH 1332/1543] fixes #11256 : computer vision link update in .computer_vision/README.md (#11257) --- computer_vision/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/computer_vision/README.md b/computer_vision/README.md index 8d2f4a130d05..1657128fd25e 100644 --- a/computer_vision/README.md +++ b/computer_vision/README.md @@ -8,4 +8,4 @@ Image processing and computer vision are a little different from each other. Ima While computer vision comes from modelling image processing using the techniques of machine learning, computer vision applies machine learning to recognize patterns for interpretation of images (much like the process of visual reasoning of human vision). * -* +* From c0e700c91c63c1b3ea50575b10a6c1665dfd6404 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 29 Jan 2024 21:00:37 +0100 Subject: [PATCH 1333/1543] [pre-commit.ci] pre-commit autoupdate (#11261) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - [github.com/abravalheri/validate-pyproject: v0.15 → v0.16](https://github.com/abravalheri/validate-pyproject/compare/v0.15...v0.16) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 7fae092d043c..0d13745a5a47 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -42,7 +42,7 @@ repos: pass_filenames: false - repo: https://github.com/abravalheri/validate-pyproject - rev: v0.15 + rev: v0.16 hooks: - id: validate-pyproject From c1d29ba459648bf8111e19e32988cb36ee8a94b0 Mon Sep 17 00:00:00 2001 From: AtomicVar Date: Tue, 30 Jan 2024 16:18:56 +0800 Subject: [PATCH 1334/1543] Add smooth l1 loss algorithm (#11239) --- machine_learning/loss_functions.py | 56 ++++++++++++++++++++++++++++++ 1 file changed, 56 insertions(+) diff --git a/machine_learning/loss_functions.py b/machine_learning/loss_functions.py index f05fa0cbe686..16e5a3278b73 100644 --- a/machine_learning/loss_functions.py +++ b/machine_learning/loss_functions.py @@ -573,6 +573,62 @@ def perplexity_loss( return np.mean(perp_losses) +def smooth_l1_loss(y_true: np.ndarray, y_pred: np.ndarray, beta: float = 1.0) -> float: + """ + Calculate the Smooth L1 Loss between y_true and y_pred. + + The Smooth L1 Loss is less sensitive to outliers than the L2 Loss and is often used + in regression problems, such as object detection. + + Smooth L1 Loss = + 0.5 * (x - y)^2 / beta, if |x - y| < beta + |x - y| - 0.5 * beta, otherwise + + Reference: + https://pytorch.org/docs/stable/generated/torch.nn.SmoothL1Loss.html + + Args: + y_true: Array of true values. + y_pred: Array of predicted values. + beta: Specifies the threshold at which to change between L1 and L2 loss. + + Returns: + The calculated Smooth L1 Loss between y_true and y_pred. + + Raises: + ValueError: If the length of the two arrays is not the same. + + >>> y_true = np.array([3, 5, 2, 7]) + >>> y_pred = np.array([2.9, 4.8, 2.1, 7.2]) + >>> smooth_l1_loss(y_true, y_pred, 1.0) + 0.012500000000000022 + + >>> y_true = np.array([2, 4, 6]) + >>> y_pred = np.array([1, 5, 7]) + >>> smooth_l1_loss(y_true, y_pred, 1.0) + 0.5 + + >>> y_true = np.array([1, 3, 5, 7]) + >>> y_pred = np.array([1, 3, 5, 7]) + >>> smooth_l1_loss(y_true, y_pred, 1.0) + 0.0 + + >>> y_true = np.array([1, 3, 5]) + >>> y_pred = np.array([1, 3, 5, 7]) + >>> smooth_l1_loss(y_true, y_pred, 1.0) + Traceback (most recent call last): + ... + ValueError: The length of the two arrays should be the same. + """ + + if len(y_true) != len(y_pred): + raise ValueError("The length of the two arrays should be the same.") + + diff = np.abs(y_true - y_pred) + loss = np.where(diff < beta, 0.5 * diff**2 / beta, diff - 0.5 * beta) + return np.mean(loss) + + if __name__ == "__main__": import doctest From 8995f45cb505e9cb1aafe3b35c6a00d9aff5f871 Mon Sep 17 00:00:00 2001 From: Christian Clauss Date: Thu, 1 Feb 2024 07:10:35 +0100 Subject: [PATCH 1335/1543] Rename .github/.github/dependabot.yml to .github/dependabot.yml (#11264) * Rename .github/.github/dependabot.yml to .github/dependabot.yml * runs-on: macos-14 # ubuntu-latest * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update build.yml --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .github/{.github => }/dependabot.yml | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename .github/{.github => }/dependabot.yml (100%) diff --git a/.github/.github/dependabot.yml b/.github/dependabot.yml similarity index 100% rename from .github/.github/dependabot.yml rename to .github/dependabot.yml From 6a169740e8c71c6c8236b09eb7b523895fedcfbd Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 1 Feb 2024 08:11:41 +0100 Subject: [PATCH 1336/1543] Bump actions/cache from 3 to 4 (#11265) Bumps [actions/cache](https://github.com/actions/cache) from 3 to 4. - [Release notes](https://github.com/actions/cache/releases) - [Changelog](https://github.com/actions/cache/blob/main/RELEASES.md) - [Commits](https://github.com/actions/cache/compare/v3...v4) --- updated-dependencies: - dependency-name: actions/cache dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/build.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 906edfdae1ed..a113b4608678 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -14,7 +14,7 @@ jobs: with: python-version: 3.12 allow-prereleases: true - - uses: actions/cache@v3 + - uses: actions/cache@v4 with: path: ~/.cache/pip key: ${{ runner.os }}-pip-${{ hashFiles('requirements.txt') }} From 4128f19170135cf7ccadb2afa8b2ab0a464c5765 Mon Sep 17 00:00:00 2001 From: Anthony Klarman <148516349+tonguegrease@users.noreply.github.com> Date: Fri, 2 Feb 2024 03:22:58 -0500 Subject: [PATCH 1337/1543] Fixed lines that needed to be uncommented after Hacktoberfest (#11267) * uncommented lines * uncommented lines * Update CODEOWNERS --------- Co-authored-by: Christian Clauss --- .github/CODEOWNERS | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index a0531cdeec69..d2ac43c7df31 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -21,15 +21,15 @@ # /cellular_automata/ -# /ciphers/ @cclauss # TODO: Uncomment this line after Hacktoberfest +# /ciphers/ # /compression/ # /computer_vision/ -# /conversions/ @cclauss # TODO: Uncomment this line after Hacktoberfest +# /conversions/ -# /data_structures/ @cclauss # TODO: Uncomment this line after Hacktoberfest +# /data_structures/ # /digital_image_processing/ @@ -67,7 +67,7 @@ # /neural_network/ -# /other/ @cclauss # TODO: Uncomment this line after Hacktoberfest +# /other/ # /project_euler/ @@ -81,7 +81,7 @@ # /sorts/ -# /strings/ @cclauss # TODO: Uncomment this line after Hacktoberfest +# /strings/ # /traversals/ From ed8d9209daff975eb3be6e0bf8cfa13e330347ca Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 5 Feb 2024 20:48:10 +0100 Subject: [PATCH 1338/1543] [pre-commit.ci] pre-commit autoupdate (#11275) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * [pre-commit.ci] pre-commit autoupdate updates: - [github.com/astral-sh/ruff-pre-commit: v0.1.14 → v0.2.0](https://github.com/astral-sh/ruff-pre-commit/compare/v0.1.14...v0.2.0) * Upgrade pyproject.toml * Revert sudoku_solver.py RUF017 Avoid quadratic list summation --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- .pre-commit-config.yaml | 2 +- ciphers/mixed_keyword_cypher.py | 2 +- data_structures/arrays/sudoku_solver.py | 2 +- data_structures/linked_list/is_palindrome.py | 4 +--- .../filters/gaussian_filter.py | 4 +--- electronics/resistor_equivalence.py | 8 ++------ hashes/hamming_code.py | 18 ++++++------------ machine_learning/k_means_clust.py | 2 +- .../sequential_minimum_optimization.py | 2 +- neural_network/convolution_neural_network.py | 6 +++--- pyproject.toml | 14 +++++++------- 11 files changed, 25 insertions(+), 39 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 0d13745a5a47..c29c6982643e 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -16,7 +16,7 @@ repos: - id: auto-walrus - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.1.14 + rev: v0.2.0 hooks: - id: ruff - id: ruff-format diff --git a/ciphers/mixed_keyword_cypher.py b/ciphers/mixed_keyword_cypher.py index b984808fced6..1b186108a73e 100644 --- a/ciphers/mixed_keyword_cypher.py +++ b/ciphers/mixed_keyword_cypher.py @@ -67,7 +67,7 @@ def mixed_keyword( if verbose: print(mapping) # create the encrypted text by mapping the plaintext to the modified alphabet - return "".join(mapping[char] if char in mapping else char for char in plaintext) + return "".join(mapping.get(char, char) for char in plaintext) if __name__ == "__main__": diff --git a/data_structures/arrays/sudoku_solver.py b/data_structures/arrays/sudoku_solver.py index 8d38bd7295ea..20ac32e3b071 100644 --- a/data_structures/arrays/sudoku_solver.py +++ b/data_structures/arrays/sudoku_solver.py @@ -22,7 +22,7 @@ def cross(items_a, items_b): + [cross(rs, cs) for rs in ("ABC", "DEF", "GHI") for cs in ("123", "456", "789")] ) units = {s: [u for u in unitlist if s in u] for s in squares} -peers = {s: set(sum(units[s], [])) - {s} for s in squares} +peers = {s: set(sum(units[s], [])) - {s} for s in squares} # noqa: RUF017 def test(): diff --git a/data_structures/linked_list/is_palindrome.py b/data_structures/linked_list/is_palindrome.py index f949d9a2f201..da788e3e5045 100644 --- a/data_structures/linked_list/is_palindrome.py +++ b/data_structures/linked_list/is_palindrome.py @@ -171,11 +171,9 @@ def is_palindrome_dict(head: ListNode | None) -> bool: if len(v) % 2 != 0: middle += 1 else: - step = 0 - for i in range(len(v)): + for step, i in enumerate(range(len(v))): if v[i] + v[len(v) - 1 - step] != checksum: return False - step += 1 if middle > 1: return False return True diff --git a/digital_image_processing/filters/gaussian_filter.py b/digital_image_processing/filters/gaussian_filter.py index 87fa67fb65ea..634d836e5edc 100644 --- a/digital_image_processing/filters/gaussian_filter.py +++ b/digital_image_processing/filters/gaussian_filter.py @@ -22,11 +22,9 @@ def gaussian_filter(image, k_size, sigma): # im2col, turn the k_size*k_size pixels into a row and np.vstack all rows image_array = zeros((dst_height * dst_width, k_size * k_size)) - row = 0 - for i, j in product(range(dst_height), range(dst_width)): + for row, (i, j) in enumerate(product(range(dst_height), range(dst_width))): window = ravel(image[i : i + k_size, j : j + k_size]) image_array[row, :] = window - row += 1 # turn the kernel into shape(k*k, 1) gaussian_kernel = gen_gaussian_kernel(k_size, sigma) diff --git a/electronics/resistor_equivalence.py b/electronics/resistor_equivalence.py index 55e7f2d6b5d2..c4ea7d4b757e 100644 --- a/electronics/resistor_equivalence.py +++ b/electronics/resistor_equivalence.py @@ -20,13 +20,11 @@ def resistor_parallel(resistors: list[float]) -> float: """ first_sum = 0.00 - index = 0 - for resistor in resistors: + for index, resistor in enumerate(resistors): if resistor <= 0: msg = f"Resistor at index {index} has a negative or zero value!" raise ValueError(msg) first_sum += 1 / float(resistor) - index += 1 return 1 / first_sum @@ -44,13 +42,11 @@ def resistor_series(resistors: list[float]) -> float: ValueError: Resistor at index 2 has a negative value! """ sum_r = 0.00 - index = 0 - for resistor in resistors: + for index, resistor in enumerate(resistors): sum_r += resistor if resistor < 0: msg = f"Resistor at index {index} has a negative value!" raise ValueError(msg) - index += 1 return sum_r diff --git a/hashes/hamming_code.py b/hashes/hamming_code.py index 4a6efcf23f63..b34fdd4c7a74 100644 --- a/hashes/hamming_code.py +++ b/hashes/hamming_code.py @@ -123,8 +123,7 @@ def emitter_converter(size_par, data): # Bit counter one for a given parity cont_bo = 0 # counter to control the loop reading - cont_loop = 0 - for x in data_ord: + for cont_loop, x in enumerate(data_ord): if x is not None: try: aux = (bin_pos[cont_loop])[-1 * (bp)] @@ -132,7 +131,6 @@ def emitter_converter(size_par, data): aux = "0" if aux == "1" and x == "1": cont_bo += 1 - cont_loop += 1 parity.append(cont_bo % 2) qtd_bp += 1 @@ -164,10 +162,10 @@ def receptor_converter(size_par, data): parity_received = [] data_output = [] - for x in range(1, len(data) + 1): + for i, item in enumerate(data, 1): # Performs a template of bit positions - who should be given, # and who should be parity - if qtd_bp < size_par and (np.log(x) / np.log(2)).is_integer(): + if qtd_bp < size_par and (np.log(i) / np.log(2)).is_integer(): data_out_gab.append("P") qtd_bp = qtd_bp + 1 else: @@ -175,10 +173,9 @@ def receptor_converter(size_par, data): # Sorts the data to the new output size if data_out_gab[-1] == "D": - data_output.append(data[cont_data]) + data_output.append(item) else: - parity_received.append(data[cont_data]) - cont_data += 1 + parity_received.append(item) # -----------calculates the parity with the data data_out = [] @@ -215,9 +212,7 @@ def receptor_converter(size_par, data): for bp in range(1, size_par + 1): # Bit counter one for a certain parity cont_bo = 0 - # Counter to control loop reading - cont_loop = 0 - for x in data_ord: + for cont_loop, x in enumerate(data_ord): if x is not None: try: aux = (bin_pos[cont_loop])[-1 * (bp)] @@ -225,7 +220,6 @@ def receptor_converter(size_par, data): aux = "0" if aux == "1" and x == "1": cont_bo += 1 - cont_loop += 1 parity.append(str(cont_bo % 2)) qtd_bp += 1 diff --git a/machine_learning/k_means_clust.py b/machine_learning/k_means_clust.py index ebad66ac8e8f..4a219edc3bb1 100644 --- a/machine_learning/k_means_clust.py +++ b/machine_learning/k_means_clust.py @@ -237,7 +237,7 @@ def report_generator( [ ("sum", "sum"), ("mean_with_zeros", lambda x: np.mean(np.nan_to_num(x))), - ("mean_without_zeros", lambda x: x.replace(0, np.NaN).mean()), + ("mean_without_zeros", lambda x: x.replace(0, np.nan).mean()), ( "mean_25-75", lambda x: np.mean( diff --git a/machine_learning/sequential_minimum_optimization.py b/machine_learning/sequential_minimum_optimization.py index 9e2304859f8d..9ee8c52fb2e9 100644 --- a/machine_learning/sequential_minimum_optimization.py +++ b/machine_learning/sequential_minimum_optimization.py @@ -589,7 +589,7 @@ def plot_partition_boundary( ax.contour( xrange, yrange, - np.mat(grid).T, + np.asmatrix(grid).T, levels=(-1, 0, 1), linestyles=("--", "-", "--"), linewidths=(1, 1, 1), diff --git a/neural_network/convolution_neural_network.py b/neural_network/convolution_neural_network.py index f2e88fe7bd88..e9726a0cb4a7 100644 --- a/neural_network/convolution_neural_network.py +++ b/neural_network/convolution_neural_network.py @@ -41,11 +41,11 @@ def __init__( self.rate_weight = rate_w self.rate_thre = rate_t self.w_conv1 = [ - np.mat(-1 * np.random.rand(self.conv1[0], self.conv1[0]) + 0.5) + np.asmatrix(-1 * np.random.rand(self.conv1[0], self.conv1[0]) + 0.5) for i in range(self.conv1[1]) ] - self.wkj = np.mat(-1 * np.random.rand(self.num_bp3, self.num_bp2) + 0.5) - self.vji = np.mat(-1 * np.random.rand(self.num_bp2, self.num_bp1) + 0.5) + self.wkj = np.asmatrix(-1 * np.random.rand(self.num_bp3, self.num_bp2) + 0.5) + self.vji = np.asmatrix(-1 * np.random.rand(self.num_bp2, self.num_bp1) + 0.5) self.thre_conv1 = -2 * np.random.rand(self.conv1[1]) + 1 self.thre_bp2 = -2 * np.random.rand(self.num_bp2) + 1 self.thre_bp3 = -2 * np.random.rand(self.num_bp3) + 1 diff --git a/pyproject.toml b/pyproject.toml index c7163dc78371..2e7da519da8b 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,5 +1,5 @@ [tool.ruff] -ignore = [ # `ruff rule S101` for a description of that rule +lint.ignore = [ # `ruff rule S101` for a description of that rule "ARG001", # Unused function argument `amount` -- FIX ME? "B904", # Within an `except` clause, raise exceptions with `raise ... from err` -- FIX ME "B905", # `zip()` without an explicit `strict=` parameter -- FIX ME @@ -31,7 +31,7 @@ ignore = [ # `ruff rule S101` for a description of that rule "SLF001", # Private member accessed: `_Iterator` -- FIX ME "UP038", # Use `X | Y` in `{}` call instead of `(X, Y)` -- DO NOT FIX ] -select = [ # https://beta.ruff.rs/docs/rules +lint.select = [ # https://beta.ruff.rs/docs/rules "A", # flake8-builtins "ARG", # flake8-unused-arguments "ASYNC", # flake8-async @@ -84,13 +84,13 @@ select = [ # https://beta.ruff.rs/docs/rules # "TCH", # flake8-type-checking # "TRY", # tryceratops ] -show-source = true -target-version = "py311" +output-format = "full" +target-version = "py312" -[tool.ruff.mccabe] # DO NOT INCREASE THIS VALUE +[tool.ruff.lint.mccabe] # DO NOT INCREASE THIS VALUE max-complexity = 17 # default: 10 -[tool.ruff.per-file-ignores] +[tool.ruff.lint.per-file-ignores] "arithmetic_analysis/newton_raphson.py" = ["PGH001"] "audio_filters/show_response.py" = ["ARG002"] "data_structures/binary_tree/binary_search_tree_recursive.py" = ["BLE001"] @@ -110,7 +110,7 @@ max-complexity = 17 # default: 10 "project_euler/problem_099/sol1.py" = ["SIM115"] "sorts/external_sort.py" = ["SIM115"] -[tool.ruff.pylint] # DO NOT INCREASE THESE VALUES +[tool.ruff.lint.pylint] # DO NOT INCREASE THESE VALUES allow-magic-value-types = ["float", "int", "str"] max-args = 10 # default: 5 max-branches = 20 # default: 12 From 5d6846b2bd1fa16edfc89025e00f69a802774faa Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 12 Feb 2024 21:53:20 +0100 Subject: [PATCH 1339/1543] [pre-commit.ci] pre-commit autoupdate (#11292) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - [github.com/astral-sh/ruff-pre-commit: v0.2.0 → v0.2.1](https://github.com/astral-sh/ruff-pre-commit/compare/v0.2.0...v0.2.1) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index c29c6982643e..79d7d58d0863 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -16,7 +16,7 @@ repos: - id: auto-walrus - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.2.0 + rev: v0.2.1 hooks: - id: ruff - id: ruff-format From c6ca1942e14a6e88c7ea1b96ef3a6d17ca843f52 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 19 Feb 2024 23:00:06 +0100 Subject: [PATCH 1340/1543] [pre-commit.ci] pre-commit autoupdate (#11296) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - [github.com/astral-sh/ruff-pre-commit: v0.2.1 → v0.2.2](https://github.com/astral-sh/ruff-pre-commit/compare/v0.2.1...v0.2.2) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 79d7d58d0863..be8364a7fc0b 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -16,7 +16,7 @@ repos: - id: auto-walrus - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.2.1 + rev: v0.2.2 hooks: - id: ruff - id: ruff-format From fd27953d44416a5f1541ed6e6923844b6070d086 Mon Sep 17 00:00:00 2001 From: Maxim Smolskiy Date: Tue, 12 Mar 2024 11:35:49 +0300 Subject: [PATCH 1341/1543] Reenable files when TensorFlow supports the current Python (#11318) * Remove python_version < '3.12' for tensorflow * Reenable dynamic_programming/k_means_clustering_tensorflow.py * updating DIRECTORY.md * Try to fix ruff * Try to fix ruff * Try to fix ruff * Try to fix ruff * Try to fix ruff * Reenable machine_learning/lstm/lstm_prediction.py * updating DIRECTORY.md * Try to fix ruff * Reenable computer_vision/cnn_classification.py * updating DIRECTORY.md * Reenable neural_network/input_data.py * updating DIRECTORY.md * Try to fix ruff * Try to fix ruff * Try to fix mypy * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Try to fix ruff * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: MaximSmolskiy Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- DIRECTORY.md | 5 +++++ ciphers/rsa_cipher.py | 17 +++++++---------- ...on.py.DISABLED.txt => cnn_classification.py} | 0 ...LED.txt => k_means_clustering_tensorflow.py} | 0 ...ction.py.DISABLED.txt => lstm_prediction.py} | 8 ++++---- ...put_data.py.DEPRECATED.txt => input_data.py} | 9 +++++++-- other/lfu_cache.py | 5 +++-- requirements.txt | 2 +- 8 files changed, 27 insertions(+), 19 deletions(-) rename computer_vision/{cnn_classification.py.DISABLED.txt => cnn_classification.py} (100%) rename dynamic_programming/{k_means_clustering_tensorflow.py.DISABLED.txt => k_means_clustering_tensorflow.py} (100%) rename machine_learning/lstm/{lstm_prediction.py.DISABLED.txt => lstm_prediction.py} (90%) rename neural_network/{input_data.py.DEPRECATED.txt => input_data.py} (98%) diff --git a/DIRECTORY.md b/DIRECTORY.md index b5392fd09114..2f828aa512a9 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -134,6 +134,7 @@ * [Run Length Encoding](compression/run_length_encoding.py) ## Computer Vision + * [Cnn Classification](computer_vision/cnn_classification.py) * [Flip Augmentation](computer_vision/flip_augmentation.py) * [Haralick Descriptors](computer_vision/haralick_descriptors.py) * [Harris Corner](computer_vision/harris_corner.py) @@ -344,6 +345,7 @@ * [Floyd Warshall](dynamic_programming/floyd_warshall.py) * [Integer Partition](dynamic_programming/integer_partition.py) * [Iterating Through Submasks](dynamic_programming/iterating_through_submasks.py) + * [K Means Clustering Tensorflow](dynamic_programming/k_means_clustering_tensorflow.py) * [Knapsack](dynamic_programming/knapsack.py) * [Largest Divisible Subset](dynamic_programming/largest_divisible_subset.py) * [Longest Common Subsequence](dynamic_programming/longest_common_subsequence.py) @@ -571,6 +573,8 @@ * [Local Weighted Learning](machine_learning/local_weighted_learning/local_weighted_learning.py) * [Logistic Regression](machine_learning/logistic_regression.py) * [Loss Functions](machine_learning/loss_functions.py) + * Lstm + * [Lstm Prediction](machine_learning/lstm/lstm_prediction.py) * [Mfcc](machine_learning/mfcc.py) * [Multilayer Perceptron Classifier](machine_learning/multilayer_perceptron_classifier.py) * [Polynomial Regression](machine_learning/polynomial_regression.py) @@ -801,6 +805,7 @@ * [Swish](neural_network/activation_functions/swish.py) * [Back Propagation Neural Network](neural_network/back_propagation_neural_network.py) * [Convolution Neural Network](neural_network/convolution_neural_network.py) + * [Input Data](neural_network/input_data.py) * [Simple Neural Network](neural_network/simple_neural_network.py) ## Other diff --git a/ciphers/rsa_cipher.py b/ciphers/rsa_cipher.py index 9c41cdc5d472..3bc2ebe5fc74 100644 --- a/ciphers/rsa_cipher.py +++ b/ciphers/rsa_cipher.py @@ -76,11 +76,9 @@ def encrypt_and_write_to_file( key_size, n, e = read_key_file(key_filename) if key_size < block_size * 8: sys.exit( - "ERROR: Block size is {} bits and key size is {} bits. The RSA cipher " - "requires the block size to be equal to or greater than the key size. " - "Either decrease the block size or use different keys.".format( - block_size * 8, key_size - ) + f"ERROR: Block size is {block_size * 8} bits and key size is {key_size} " + "bits. The RSA cipher requires the block size to be equal to or greater " + "than the key size. Either decrease the block size or use different keys." ) encrypted_blocks = [str(i) for i in encrypt_message(message, (n, e), block_size)] @@ -102,11 +100,10 @@ def read_from_file_and_decrypt(message_filename: str, key_filename: str) -> str: if key_size < block_size * 8: sys.exit( - "ERROR: Block size is {} bits and key size is {} bits. The RSA cipher " - "requires the block size to be equal to or greater than the key size. " - "Did you specify the correct key file and encrypted file?".format( - block_size * 8, key_size - ) + f"ERROR: Block size is {block_size * 8} bits and key size is {key_size} " + "bits. The RSA cipher requires the block size to be equal to or greater " + "than the key size. Did you specify the correct key file and encrypted " + "file?" ) encrypted_blocks = [] diff --git a/computer_vision/cnn_classification.py.DISABLED.txt b/computer_vision/cnn_classification.py similarity index 100% rename from computer_vision/cnn_classification.py.DISABLED.txt rename to computer_vision/cnn_classification.py diff --git a/dynamic_programming/k_means_clustering_tensorflow.py.DISABLED.txt b/dynamic_programming/k_means_clustering_tensorflow.py similarity index 100% rename from dynamic_programming/k_means_clustering_tensorflow.py.DISABLED.txt rename to dynamic_programming/k_means_clustering_tensorflow.py diff --git a/machine_learning/lstm/lstm_prediction.py.DISABLED.txt b/machine_learning/lstm/lstm_prediction.py similarity index 90% rename from machine_learning/lstm/lstm_prediction.py.DISABLED.txt rename to machine_learning/lstm/lstm_prediction.py index 16530e935ea7..ecbd451266ad 100644 --- a/machine_learning/lstm/lstm_prediction.py.DISABLED.txt +++ b/machine_learning/lstm/lstm_prediction.py @@ -17,11 +17,11 @@ make sure you set the price column on line number 21. Here we use a dataset which have the price on 3rd column. """ - df = pd.read_csv("sample_data.csv", header=None) - len_data = df.shape[:1][0] + sample_data = pd.read_csv("sample_data.csv", header=None) + len_data = sample_data.shape[:1][0] # If you're using some other dataset input the target column - actual_data = df.iloc[:, 1:2] - actual_data = actual_data.values.reshape(len_data, 1) + actual_data = sample_data.iloc[:, 1:2] + actual_data = actual_data.to_numpy().reshape(len_data, 1) actual_data = MinMaxScaler().fit_transform(actual_data) look_back = 10 forward_days = 5 diff --git a/neural_network/input_data.py.DEPRECATED.txt b/neural_network/input_data.py similarity index 98% rename from neural_network/input_data.py.DEPRECATED.txt rename to neural_network/input_data.py index a58e64907e45..2128449c03e9 100644 --- a/neural_network/input_data.py.DEPRECATED.txt +++ b/neural_network/input_data.py @@ -18,9 +18,9 @@ """ -import collections import gzip import os +import typing import urllib import numpy @@ -28,7 +28,12 @@ from tensorflow.python.platform import gfile from tensorflow.python.util.deprecation import deprecated -_Datasets = collections.namedtuple("_Datasets", ["train", "validation", "test"]) + +class _Datasets(typing.NamedTuple): + train: "_DataSet" + validation: "_DataSet" + test: "_DataSet" + # CVDF mirror of http://yann.lecun.com/exdb/mnist/ DEFAULT_SOURCE_URL = "https://storage.googleapis.com/cvdf-datasets/mnist/" diff --git a/other/lfu_cache.py b/other/lfu_cache.py index b68ba3a4605c..788fdf19bb60 100644 --- a/other/lfu_cache.py +++ b/other/lfu_cache.py @@ -24,8 +24,9 @@ def __init__(self, key: T | None, val: U | None): self.prev: DoubleLinkedListNode[T, U] | None = None def __repr__(self) -> str: - return "Node: key: {}, val: {}, freq: {}, has next: {}, has prev: {}".format( - self.key, self.val, self.freq, self.next is not None, self.prev is not None + return ( + f"Node: key: {self.key}, val: {self.val}, freq: {self.freq}, " + f"has next: {self.next is not None}, has prev: {self.prev is not None}" ) diff --git a/requirements.txt b/requirements.txt index 8937f6bb0dae..bb3d671393b9 100644 --- a/requirements.txt +++ b/requirements.txt @@ -17,7 +17,7 @@ rich scikit-learn statsmodels sympy -tensorflow ; python_version < '3.12' +tensorflow tweepy # yulewalker # uncomment once audio_filters/equal_loudness_filter.py is fixed typing_extensions From 5f95d6f805088aa7e21849b1ba97cdcf059333a9 Mon Sep 17 00:00:00 2001 From: guangwu Date: Tue, 12 Mar 2024 16:40:32 +0800 Subject: [PATCH 1342/1543] fix: function name typo (#11319) * fix: function name typo Signed-off-by: guoguangwu * lfu_cache.py: Use f-strings * rsa_cipher.py: Use f-strings --------- Signed-off-by: guoguangwu Co-authored-by: Christian Clauss --- ciphers/rsa_cipher.py | 3 +-- machine_learning/astar.py | 4 ++-- 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/ciphers/rsa_cipher.py b/ciphers/rsa_cipher.py index 3bc2ebe5fc74..ac9782a49fff 100644 --- a/ciphers/rsa_cipher.py +++ b/ciphers/rsa_cipher.py @@ -102,8 +102,7 @@ def read_from_file_and_decrypt(message_filename: str, key_filename: str) -> str: sys.exit( f"ERROR: Block size is {block_size * 8} bits and key size is {key_size} " "bits. The RSA cipher requires the block size to be equal to or greater " - "than the key size. Did you specify the correct key file and encrypted " - "file?" + "than the key size. Were the correct key file and encrypted file specified?" ) encrypted_blocks = [] diff --git a/machine_learning/astar.py b/machine_learning/astar.py index 7a60ed225a2d..ff5208266343 100644 --- a/machine_learning/astar.py +++ b/machine_learning/astar.py @@ -57,7 +57,7 @@ def __init__(self, world_size=(5, 5)): def show(self): print(self.w) - def get_neigbours(self, cell): + def get_neighbours(self, cell): """ Return the neighbours of cell """ @@ -110,7 +110,7 @@ def astar(world, start, goal): _closed.append(_open.pop(min_f)) if current == goal: break - for n in world.get_neigbours(current): + for n in world.get_neighbours(current): for c in _closed: if c == n: continue From bc8df6de3143b417c4d174200fd7edd0dbba4ce3 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Wed, 13 Mar 2024 07:52:41 +0100 Subject: [PATCH 1343/1543] [pre-commit.ci] pre-commit autoupdate (#11322) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * [pre-commit.ci] pre-commit autoupdate updates: - [github.com/astral-sh/ruff-pre-commit: v0.2.2 → v0.3.2](https://github.com/astral-sh/ruff-pre-commit/compare/v0.2.2...v0.3.2) - [github.com/pre-commit/mirrors-mypy: v1.8.0 → v1.9.0](https://github.com/pre-commit/mirrors-mypy/compare/v1.8.0...v1.9.0) * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 4 +- backtracking/all_combinations.py | 7 +- backtracking/all_permutations.py | 9 ++- backtracking/all_subsequences.py | 1 + backtracking/coloring.py | 8 +- backtracking/hamiltonian_cycle.py | 10 +-- backtracking/minimax.py | 1 + backtracking/n_queens.py | 11 +-- backtracking/n_queens_math.py | 1 + backtracking/sudoku.py | 1 + backtracking/sum_of_subsets.py | 11 +-- boolean_algebra/nor_gate.py | 1 + cellular_automata/conways_game_of_life.py | 1 + cellular_automata/game_of_life.py | 3 +- cellular_automata/nagel_schrekenberg.py | 1 + ciphers/a1z26.py | 1 + ciphers/atbash.py | 3 +- ciphers/base32.py | 1 + ciphers/enigma_machine2.py | 1 + ciphers/fractionated_morse_cipher.py | 1 + ciphers/hill_cipher.py | 1 + ciphers/permutation_cipher.py | 1 + ciphers/rail_fence_cipher.py | 2 +- ciphers/rsa_factorization.py | 1 + ciphers/xor_cipher.py | 33 ++++---- compression/burrows_wheeler.py | 1 + compression/lempel_ziv.py | 4 +- compression/lempel_ziv_decompress.py | 4 +- compression/lz77.py | 1 - computer_vision/haralick_descriptors.py | 1 + computer_vision/horn_schunck.py | 16 ++-- conversions/decimal_to_hexadecimal.py | 2 +- conversions/prefix_conversions.py | 1 + conversions/temperature_conversions.py | 2 +- .../arrays/pairs_with_given_sum.py | 1 + data_structures/arrays/sparse_table.py | 15 ++-- data_structures/arrays/sudoku_solver.py | 1 + data_structures/binary_tree/avl_tree.py | 1 + .../binary_tree/binary_search_tree.py | 1 + .../binary_search_tree_recursive.py | 1 + .../binary_tree/binary_tree_node_sum.py | 1 - .../binary_tree/diameter_of_binary_tree.py | 1 + .../flatten_binarytree_to_linkedlist.py | 1 + .../binary_tree/floor_and_ceiling.py | 1 + data_structures/binary_tree/is_sorted.py | 1 + data_structures/binary_tree/is_sum_tree.py | 1 + .../binary_tree/merge_two_binary_trees.py | 1 + .../binary_tree/mirror_binary_tree.py | 1 + .../binary_tree/non_recursive_segment_tree.py | 1 + .../number_of_possible_binary_trees.py | 1 + data_structures/binary_tree/red_black_tree.py | 1 + .../binary_tree/segment_tree_other.py | 1 + data_structures/binary_tree/symmetric_tree.py | 1 + data_structures/binary_tree/wavelet_tree.py | 1 + data_structures/disjoint_set/disjoint_set.py | 4 +- data_structures/hashing/bloom_filter.py | 1 + data_structures/hashing/double_hash.py | 1 + data_structures/hashing/hash_map.py | 1 + .../hashing/number_theory/prime_numbers.py | 2 +- data_structures/linked_list/__init__.py | 1 + .../linked_list/merge_two_lists.py | 1 + data_structures/linked_list/skip_list.py | 1 + data_structures/queue/double_ended_queue.py | 1 + data_structures/queue/linked_queue.py | 3 +- .../queue/queue_on_pseudo_stack.py | 1 + .../stacks/dijkstras_two_stack_algorithm.py | 1 + .../stacks/stack_with_singly_linked_list.py | 3 +- .../convert_to_negative.py | 3 +- digital_image_processing/dithering/burkes.py | 1 + .../filters/bilateral_filter.py | 1 + .../filters/gaussian_filter.py | 1 + .../filters/median_filter.py | 1 + .../histogram_stretch.py | 1 + digital_image_processing/resize/resize.py | 3 +- digital_image_processing/sepia.py | 3 +- .../test_digital_image_processing.py | 1 + divide_and_conquer/convex_hull.py | 1 + divide_and_conquer/kth_order_statistic.py | 1 + divide_and_conquer/max_subarray.py | 1 + divide_and_conquer/peak.py | 1 + dynamic_programming/all_construct.py | 1 + dynamic_programming/bitmask.py | 1 + dynamic_programming/fast_fibonacci.py | 1 + .../iterating_through_submasks.py | 1 + .../longest_increasing_subsequence.py | 1 + .../matrix_chain_multiplication.py | 1 + dynamic_programming/max_subarray_sum.py | 1 + electronics/charging_capacitor.py | 1 + electronics/charging_inductor.py | 1 + electronics/resistor_color_code.py | 1 + financial/exponential_moving_average.py | 16 ++-- financial/simple_moving_average.py | 1 + fractals/koch_snowflake.py | 1 - fractals/mandelbrot.py | 1 - fractals/sierpinski_triangle.py | 1 + graphs/bi_directional_dijkstra.py | 1 - graphs/bidirectional_a_star.py | 1 + graphs/bidirectional_breadth_first_search.py | 1 + graphs/boruvka.py | 37 ++++----- graphs/breadth_first_search.py | 3 +- graphs/breadth_first_search_2.py | 1 + graphs/breadth_first_search_shortest_path.py | 1 + .../breadth_first_search_shortest_path_2.py | 9 ++- ...dth_first_search_zero_one_shortest_path.py | 1 + graphs/deep_clone_graph.py | 1 + graphs/depth_first_search.py | 1 + graphs/depth_first_search_2.py | 2 +- graphs/dijkstra.py | 1 + graphs/even_tree.py | 1 + graphs/frequent_pattern_graph_miner.py | 1 + graphs/graph_adjacency_list.py | 1 + graphs/graph_adjacency_matrix.py | 1 + graphs/graphs_floyd_warshall.py | 4 +- graphs/minimum_spanning_tree_prims2.py | 1 + graphs/page_rank.py | 1 + graphs/prim.py | 4 +- greedy_methods/gas_station.py | 1 + hashes/adler32.py | 12 +-- hashes/hamming_code.py | 76 +++++++++---------- hashes/luhn.py | 3 +- hashes/sdbm.py | 30 ++++---- hashes/sha1.py | 1 + knapsack/knapsack.py | 5 +- knapsack/tests/test_knapsack.py | 1 + linear_algebra/gaussian_elimination.py | 1 - linear_algebra/jacobi_iteration_method.py | 1 + linear_algebra/lu_decomposition.py | 1 + linear_algebra/src/conjugate_gradient.py | 1 + linear_algebra/src/lib.py | 13 ++-- linear_algebra/src/rayleigh_quotient.py | 1 + linear_algebra/src/test_linear_algebra.py | 1 + linear_algebra/src/transformations_2d.py | 1 + linear_programming/simplex.py | 1 + machine_learning/apriori_algorithm.py | 1 + machine_learning/astar.py | 1 + machine_learning/automatic_differentiation.py | 1 + machine_learning/data_transformations.py | 1 + machine_learning/decision_tree.py | 1 + machine_learning/frequent_pattern_growth.py | 1 + machine_learning/gradient_descent.py | 1 + machine_learning/k_means_clust.py | 1 + .../linear_discriminant_analysis.py | 65 ++++++++-------- machine_learning/linear_regression.py | 1 + machine_learning/logistic_regression.py | 1 + machine_learning/lstm/lstm_prediction.py | 9 ++- machine_learning/mfcc.py | 1 - machine_learning/self_organizing_map.py | 1 + .../sequential_minimum_optimization.py | 1 - machine_learning/similarity_search.py | 1 + maths/allocation_number.py | 1 + maths/area.py | 1 + maths/area_under_curve.py | 1 + maths/basic_maths.py | 1 + maths/binomial_distribution.py | 3 +- maths/chinese_remainder_theorem.py | 1 + maths/continued_fraction.py | 1 - maths/entropy.py | 1 + maths/gamma.py | 1 + maths/gaussian.py | 1 + maths/interquartile_range.py | 1 + maths/is_square_free.py | 1 + maths/karatsuba.py | 2 +- maths/lucas_lehmer_primality_test.py | 14 ++-- maths/maclaurin_series.py | 1 + maths/max_sum_sliding_window.py | 1 + maths/modular_exponential.py | 8 +- maths/monte_carlo.py | 1 + maths/numerical_analysis/adams_bashforth.py | 1 + maths/numerical_analysis/nevilles_method.py | 14 ++-- maths/numerical_analysis/newton_raphson.py | 1 + .../numerical_integration.py | 1 + maths/numerical_analysis/runge_kutta_gills.py | 1 + maths/numerical_analysis/secant_method.py | 1 + maths/prime_factors.py | 1 + maths/series/geometric_series.py | 1 - maths/series/p_series.py | 1 - maths/sieve_of_eratosthenes.py | 1 + maths/solovay_strassen_primality_test.py | 1 - maths/special_numbers/armstrong_numbers.py | 1 + maths/special_numbers/weird_number.py | 1 + maths/tanh.py | 1 + maths/triplet_sum.py | 1 + maths/two_pointer.py | 1 + maths/two_sum.py | 1 + maths/volume.py | 1 + matrix/matrix_multiplication_recursion.py | 1 + networking_flow/ford_fulkerson.py | 1 + .../activation_functions/binary_step.py | 1 - .../rectified_linear_unit.py | 1 + .../soboleva_modified_hyperbolic_tangent.py | 1 - .../back_propagation_neural_network.py | 1 + neural_network/convolution_neural_network.py | 27 +++---- neural_network/input_data.py | 1 - other/davis_putnam_logemann_loveland.py | 1 + other/fischer_yates_shuffle.py | 1 + other/gauss_easter.py | 1 + other/majority_vote_algorithm.py | 1 + other/quine.py | 1 + other/word_search.py | 1 - .../archimedes_principle_of_buoyant_force.py | 1 - physics/center_of_mass.py | 1 + physics/in_static_equilibrium.py | 1 + physics/n_body_simulation.py | 1 - physics/rms_speed_of_molecule.py | 1 - project_euler/problem_002/sol4.py | 1 + project_euler/problem_003/sol1.py | 1 + project_euler/problem_006/sol3.py | 1 + project_euler/problem_007/sol2.py | 1 + project_euler/problem_007/sol3.py | 1 + project_euler/problem_008/sol2.py | 1 + project_euler/problem_008/sol3.py | 1 + project_euler/problem_010/sol2.py | 1 + project_euler/problem_013/sol1.py | 1 + project_euler/problem_014/sol2.py | 1 + project_euler/problem_015/sol1.py | 1 + project_euler/problem_018/solution.py | 1 + project_euler/problem_020/sol2.py | 1 + project_euler/problem_020/sol3.py | 1 + project_euler/problem_021/sol1.py | 1 + project_euler/problem_022/sol1.py | 1 + project_euler/problem_022/sol2.py | 1 + project_euler/problem_024/sol1.py | 1 + project_euler/problem_025/sol2.py | 1 + project_euler/problem_030/sol1.py | 3 +- project_euler/problem_032/sol32.py | 1 + project_euler/problem_033/sol1.py | 1 + project_euler/problem_035/sol1.py | 1 + project_euler/problem_036/sol1.py | 1 + project_euler/problem_038/sol1.py | 1 + project_euler/problem_041/sol1.py | 1 + project_euler/problem_042/solution42.py | 1 + project_euler/problem_043/sol1.py | 1 - project_euler/problem_050/sol1.py | 1 + project_euler/problem_051/sol1.py | 1 + project_euler/problem_053/sol1.py | 1 + project_euler/problem_054/sol1.py | 1 + project_euler/problem_058/sol1.py | 1 + project_euler/problem_059/sol1.py | 1 + project_euler/problem_067/sol1.py | 1 + project_euler/problem_067/sol2.py | 1 + project_euler/problem_070/sol1.py | 1 + project_euler/problem_074/sol1.py | 1 - project_euler/problem_074/sol2.py | 1 + project_euler/problem_077/sol1.py | 1 + project_euler/problem_079/sol1.py | 1 + project_euler/problem_080/sol1.py | 1 + project_euler/problem_081/sol1.py | 1 + project_euler/problem_085/sol1.py | 1 + project_euler/problem_086/sol1.py | 1 - project_euler/problem_091/sol1.py | 1 - project_euler/problem_101/sol1.py | 1 + project_euler/problem_102/sol1.py | 1 + project_euler/problem_107/sol1.py | 1 + project_euler/problem_123/sol1.py | 1 + project_euler/problem_144/sol1.py | 1 - project_euler/problem_145/sol1.py | 1 + project_euler/problem_173/sol1.py | 1 - project_euler/problem_180/sol1.py | 1 + project_euler/problem_191/sol1.py | 1 - project_euler/problem_203/sol1.py | 1 + project_euler/problem_551/sol1.py | 1 - scheduling/highest_response_ratio_next.py | 1 + scheduling/job_sequence_with_deadline.py | 1 + .../non_preemptive_shortest_job_first.py | 1 - scheduling/round_robin.py | 1 + scheduling/shortest_job_first.py | 1 + searches/binary_search.py | 1 + searches/binary_tree_traversal.py | 1 + searches/fibonacci_search.py | 1 + searches/jump_search.py | 3 +- searches/quick_select.py | 1 + searches/simple_binary_search.py | 1 + searches/tabu_search.py | 1 + searches/ternary_search.py | 1 + sorts/bitonic_sort.py | 1 + sorts/bucket_sort.py | 1 + sorts/dutch_national_flag_sort.py | 1 - sorts/insertion_sort.py | 3 +- sorts/intro_sort.py | 1 + sorts/msd_radix_sort.py | 1 + sorts/odd_even_transposition_parallel.py | 1 + sorts/pigeon_sort.py | 15 ++-- sorts/quick_sort.py | 1 + sorts/radix_sort.py | 1 + sorts/recursive_insertion_sort.py | 1 + sorts/slowsort.py | 1 + sorts/tree_sort.py | 1 + strings/boyer_moore_search.py | 1 + strings/check_anagrams.py | 1 + strings/top_k_frequent_words.py | 1 - web_programming/co2_emission.py | 1 + web_programming/emails_from_url.py | 1 + web_programming/fetch_github_info.py | 1 + web_programming/fetch_jobs.py | 1 + web_programming/get_amazon_product_data.py | 1 - web_programming/recaptcha_verification.py | 1 + web_programming/search_books_by_isbn.py | 1 + 297 files changed, 498 insertions(+), 295 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index be8364a7fc0b..a17c4c323c30 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -16,7 +16,7 @@ repos: - id: auto-walrus - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.2.2 + rev: v0.3.2 hooks: - id: ruff - id: ruff-format @@ -47,7 +47,7 @@ repos: - id: validate-pyproject - repo: https://github.com/pre-commit/mirrors-mypy - rev: v1.8.0 + rev: v1.9.0 hooks: - id: mypy args: diff --git a/backtracking/all_combinations.py b/backtracking/all_combinations.py index 407304948c39..390decf3a05b 100644 --- a/backtracking/all_combinations.py +++ b/backtracking/all_combinations.py @@ -1,9 +1,10 @@ """ - In this problem, we want to determine all possible combinations of k - numbers out of 1 ... n. We use backtracking to solve this problem. +In this problem, we want to determine all possible combinations of k +numbers out of 1 ... n. We use backtracking to solve this problem. - Time complexity: O(C(n,k)) which is O(n choose k) = O((n!/(k! * (n - k)!))), +Time complexity: O(C(n,k)) which is O(n choose k) = O((n!/(k! * (n - k)!))), """ + from __future__ import annotations from itertools import combinations diff --git a/backtracking/all_permutations.py b/backtracking/all_permutations.py index ff8a53e0dd0e..c483cd62c99b 100644 --- a/backtracking/all_permutations.py +++ b/backtracking/all_permutations.py @@ -1,10 +1,11 @@ """ - In this problem, we want to determine all possible permutations - of the given sequence. We use backtracking to solve this problem. +In this problem, we want to determine all possible permutations +of the given sequence. We use backtracking to solve this problem. - Time complexity: O(n! * n), - where n denotes the length of the given sequence. +Time complexity: O(n! * n), +where n denotes the length of the given sequence. """ + from __future__ import annotations diff --git a/backtracking/all_subsequences.py b/backtracking/all_subsequences.py index c465fc542407..7844a829d046 100644 --- a/backtracking/all_subsequences.py +++ b/backtracking/all_subsequences.py @@ -5,6 +5,7 @@ Time complexity: O(2^n), where n denotes the length of the given sequence. """ + from __future__ import annotations from typing import Any diff --git a/backtracking/coloring.py b/backtracking/coloring.py index 9d539de8a3c4..f10cdbcf9d26 100644 --- a/backtracking/coloring.py +++ b/backtracking/coloring.py @@ -1,9 +1,9 @@ """ - Graph Coloring also called "m coloring problem" - consists of coloring a given graph with at most m colors - such that no adjacent vertices are assigned the same color +Graph Coloring also called "m coloring problem" +consists of coloring a given graph with at most m colors +such that no adjacent vertices are assigned the same color - Wikipedia: https://en.wikipedia.org/wiki/Graph_coloring +Wikipedia: https://en.wikipedia.org/wiki/Graph_coloring """ diff --git a/backtracking/hamiltonian_cycle.py b/backtracking/hamiltonian_cycle.py index e9916f83f861..f6e4212e47f4 100644 --- a/backtracking/hamiltonian_cycle.py +++ b/backtracking/hamiltonian_cycle.py @@ -1,10 +1,10 @@ """ - A Hamiltonian cycle (Hamiltonian circuit) is a graph cycle - through a graph that visits each node exactly once. - Determining whether such paths and cycles exist in graphs - is the 'Hamiltonian path problem', which is NP-complete. +A Hamiltonian cycle (Hamiltonian circuit) is a graph cycle +through a graph that visits each node exactly once. +Determining whether such paths and cycles exist in graphs +is the 'Hamiltonian path problem', which is NP-complete. - Wikipedia: https://en.wikipedia.org/wiki/Hamiltonian_path +Wikipedia: https://en.wikipedia.org/wiki/Hamiltonian_path """ diff --git a/backtracking/minimax.py b/backtracking/minimax.py index 6dece2990a1c..4eef90b75483 100644 --- a/backtracking/minimax.py +++ b/backtracking/minimax.py @@ -7,6 +7,7 @@ leaves of game tree is stored in scores[] height is maximum height of Game tree """ + from __future__ import annotations import math diff --git a/backtracking/n_queens.py b/backtracking/n_queens.py index 2cd8c703fc72..81668b17a0ac 100644 --- a/backtracking/n_queens.py +++ b/backtracking/n_queens.py @@ -1,12 +1,13 @@ """ - The nqueens problem is of placing N queens on a N * N - chess board such that no queen can attack any other queens placed - on that chess board. - This means that one queen cannot have any other queen on its horizontal, vertical and - diagonal lines. +The nqueens problem is of placing N queens on a N * N +chess board such that no queen can attack any other queens placed +on that chess board. +This means that one queen cannot have any other queen on its horizontal, vertical and +diagonal lines. """ + from __future__ import annotations solution = [] diff --git a/backtracking/n_queens_math.py b/backtracking/n_queens_math.py index f3b08ab0a05f..287d1f090373 100644 --- a/backtracking/n_queens_math.py +++ b/backtracking/n_queens_math.py @@ -75,6 +75,7 @@ for another one or vice versa. """ + from __future__ import annotations diff --git a/backtracking/sudoku.py b/backtracking/sudoku.py index 6e4e3e8780f2..8f5459c76d45 100644 --- a/backtracking/sudoku.py +++ b/backtracking/sudoku.py @@ -9,6 +9,7 @@ have solved the puzzle. else, we backtrack and place another number in that cell and repeat this process. """ + from __future__ import annotations Matrix = list[list[int]] diff --git a/backtracking/sum_of_subsets.py b/backtracking/sum_of_subsets.py index c5e23321cb0c..f34d3ca34339 100644 --- a/backtracking/sum_of_subsets.py +++ b/backtracking/sum_of_subsets.py @@ -1,11 +1,12 @@ """ - The sum-of-subsetsproblem states that a set of non-negative integers, and a - value M, determine all possible subsets of the given set whose summation sum - equal to given M. +The sum-of-subsetsproblem states that a set of non-negative integers, and a +value M, determine all possible subsets of the given set whose summation sum +equal to given M. - Summation of the chosen numbers must be equal to given number M and one number - can be used only once. +Summation of the chosen numbers must be equal to given number M and one number +can be used only once. """ + from __future__ import annotations diff --git a/boolean_algebra/nor_gate.py b/boolean_algebra/nor_gate.py index 0c8ab1c0af61..d4d6f0da23ea 100644 --- a/boolean_algebra/nor_gate.py +++ b/boolean_algebra/nor_gate.py @@ -12,6 +12,7 @@ Code provided by Akshaj Vishwanathan https://www.geeksforgeeks.org/logic-gates-in-python """ + from collections.abc import Callable diff --git a/cellular_automata/conways_game_of_life.py b/cellular_automata/conways_game_of_life.py index 84f4d5be40da..364a34c3aba6 100644 --- a/cellular_automata/conways_game_of_life.py +++ b/cellular_automata/conways_game_of_life.py @@ -2,6 +2,7 @@ Conway's Game of Life implemented in Python. https://en.wikipedia.org/wiki/Conway%27s_Game_of_Life """ + from __future__ import annotations from PIL import Image diff --git a/cellular_automata/game_of_life.py b/cellular_automata/game_of_life.py index d691a2b73af0..67e647d6475b 100644 --- a/cellular_automata/game_of_life.py +++ b/cellular_automata/game_of_life.py @@ -26,7 +26,8 @@ 4. Any dead cell with exactly three live neighbours be- comes a live cell, as if by reproduction. - """ +""" + import random import sys diff --git a/cellular_automata/nagel_schrekenberg.py b/cellular_automata/nagel_schrekenberg.py index 3fd6afca0153..bcdca902afee 100644 --- a/cellular_automata/nagel_schrekenberg.py +++ b/cellular_automata/nagel_schrekenberg.py @@ -24,6 +24,7 @@ >>> simulate(construct_highway(5, 2, -2), 3, 0, 2) [[0, -1, 0, -1, 0], [0, -1, 0, -1, -1], [0, -1, -1, 1, -1], [-1, 1, -1, 0, -1]] """ + from random import randint, random diff --git a/ciphers/a1z26.py b/ciphers/a1z26.py index 0f0eb7c5c083..a1377ea6d397 100644 --- a/ciphers/a1z26.py +++ b/ciphers/a1z26.py @@ -5,6 +5,7 @@ https://www.dcode.fr/letter-number-cipher http://bestcodes.weebly.com/a1z26.html """ + from __future__ import annotations diff --git a/ciphers/atbash.py b/ciphers/atbash.py index 0a86a800c51a..4e8f663ed02d 100644 --- a/ciphers/atbash.py +++ b/ciphers/atbash.py @@ -1,4 +1,5 @@ -""" https://en.wikipedia.org/wiki/Atbash """ +"""https://en.wikipedia.org/wiki/Atbash""" + import string diff --git a/ciphers/base32.py b/ciphers/base32.py index 1924d1e185d7..911afa2452c0 100644 --- a/ciphers/base32.py +++ b/ciphers/base32.py @@ -3,6 +3,7 @@ https://en.wikipedia.org/wiki/Base32 """ + B32_CHARSET = "ABCDEFGHIJKLMNOPQRSTUVWXYZ234567" diff --git a/ciphers/enigma_machine2.py b/ciphers/enigma_machine2.py index ec0d44e4a6c6..163aa7172c11 100644 --- a/ciphers/enigma_machine2.py +++ b/ciphers/enigma_machine2.py @@ -14,6 +14,7 @@ Created by TrapinchO """ + from __future__ import annotations RotorPositionT = tuple[int, int, int] diff --git a/ciphers/fractionated_morse_cipher.py b/ciphers/fractionated_morse_cipher.py index c1d5dc6d50aa..6c4c415abac1 100644 --- a/ciphers/fractionated_morse_cipher.py +++ b/ciphers/fractionated_morse_cipher.py @@ -8,6 +8,7 @@ http://practicalcryptography.com/ciphers/fractionated-morse-cipher/ """ + import string MORSE_CODE_DICT = { diff --git a/ciphers/hill_cipher.py b/ciphers/hill_cipher.py index 1201fda901e5..ea337a72dc04 100644 --- a/ciphers/hill_cipher.py +++ b/ciphers/hill_cipher.py @@ -35,6 +35,7 @@ https://www.youtube.com/watch?v=4RhLNDqcjpA """ + import string import numpy diff --git a/ciphers/permutation_cipher.py b/ciphers/permutation_cipher.py index c3f3fd1f7f94..9e1c64a7b4ea 100644 --- a/ciphers/permutation_cipher.py +++ b/ciphers/permutation_cipher.py @@ -7,6 +7,7 @@ For more info: https://www.nku.edu/~christensen/1402%20permutation%20ciphers.pdf """ + import random diff --git a/ciphers/rail_fence_cipher.py b/ciphers/rail_fence_cipher.py index 47ee7db89831..5b2311a115e4 100644 --- a/ciphers/rail_fence_cipher.py +++ b/ciphers/rail_fence_cipher.py @@ -1,4 +1,4 @@ -""" https://en.wikipedia.org/wiki/Rail_fence_cipher """ +"""https://en.wikipedia.org/wiki/Rail_fence_cipher""" def encrypt(input_string: str, key: int) -> str: diff --git a/ciphers/rsa_factorization.py b/ciphers/rsa_factorization.py index 9ee52777ed83..0a358a4fc2d4 100644 --- a/ciphers/rsa_factorization.py +++ b/ciphers/rsa_factorization.py @@ -7,6 +7,7 @@ More readable source: https://www.di-mgt.com.au/rsa_factorize_n.html large number can take minutes to factor, therefore are not included in doctest. """ + from __future__ import annotations import math diff --git a/ciphers/xor_cipher.py b/ciphers/xor_cipher.py index e30955d41ff1..24d88a0fd588 100644 --- a/ciphers/xor_cipher.py +++ b/ciphers/xor_cipher.py @@ -1,21 +1,22 @@ """ - author: Christian Bender - date: 21.12.2017 - class: XORCipher - - This class implements the XOR-cipher algorithm and provides - some useful methods for encrypting and decrypting strings and - files. - - Overview about methods - - - encrypt : list of char - - decrypt : list of char - - encrypt_string : str - - decrypt_string : str - - encrypt_file : boolean - - decrypt_file : boolean +author: Christian Bender +date: 21.12.2017 +class: XORCipher + +This class implements the XOR-cipher algorithm and provides +some useful methods for encrypting and decrypting strings and +files. + +Overview about methods + +- encrypt : list of char +- decrypt : list of char +- encrypt_string : str +- decrypt_string : str +- encrypt_file : boolean +- decrypt_file : boolean """ + from __future__ import annotations diff --git a/compression/burrows_wheeler.py b/compression/burrows_wheeler.py index 52bb045d9398..ce493a70c8f9 100644 --- a/compression/burrows_wheeler.py +++ b/compression/burrows_wheeler.py @@ -10,6 +10,7 @@ original character. The BWT is thus a "free" method of improving the efficiency of text compression algorithms, costing only some extra computation. """ + from __future__ import annotations from typing import TypedDict diff --git a/compression/lempel_ziv.py b/compression/lempel_ziv.py index ea6f33944a91..ac3f0c6cfc06 100644 --- a/compression/lempel_ziv.py +++ b/compression/lempel_ziv.py @@ -1,6 +1,6 @@ """ - One of the several implementations of Lempel–Ziv–Welch compression algorithm - https://en.wikipedia.org/wiki/Lempel%E2%80%93Ziv%E2%80%93Welch +One of the several implementations of Lempel–Ziv–Welch compression algorithm +https://en.wikipedia.org/wiki/Lempel%E2%80%93Ziv%E2%80%93Welch """ import math diff --git a/compression/lempel_ziv_decompress.py b/compression/lempel_ziv_decompress.py index ddedc3d6d32a..0e49c83fb790 100644 --- a/compression/lempel_ziv_decompress.py +++ b/compression/lempel_ziv_decompress.py @@ -1,6 +1,6 @@ """ - One of the several implementations of Lempel–Ziv–Welch decompression algorithm - https://en.wikipedia.org/wiki/Lempel%E2%80%93Ziv%E2%80%93Welch +One of the several implementations of Lempel–Ziv–Welch decompression algorithm +https://en.wikipedia.org/wiki/Lempel%E2%80%93Ziv%E2%80%93Welch """ import math diff --git a/compression/lz77.py b/compression/lz77.py index 1b201c59f186..09b8b021e9d5 100644 --- a/compression/lz77.py +++ b/compression/lz77.py @@ -28,7 +28,6 @@ en.wikipedia.org/wiki/LZ77_and_LZ78 """ - from dataclasses import dataclass __version__ = "0.1" diff --git a/computer_vision/haralick_descriptors.py b/computer_vision/haralick_descriptors.py index 007421e34263..712bd49668f8 100644 --- a/computer_vision/haralick_descriptors.py +++ b/computer_vision/haralick_descriptors.py @@ -2,6 +2,7 @@ https://en.wikipedia.org/wiki/Image_texture https://en.wikipedia.org/wiki/Co-occurrence_matrix#Application_to_image_analysis """ + import imageio.v2 as imageio import numpy as np diff --git a/computer_vision/horn_schunck.py b/computer_vision/horn_schunck.py index b63e0268294c..f33b5b1c794b 100644 --- a/computer_vision/horn_schunck.py +++ b/computer_vision/horn_schunck.py @@ -1,12 +1,12 @@ """ - The Horn-Schunck method estimates the optical flow for every single pixel of - a sequence of images. - It works by assuming brightness constancy between two consecutive frames - and smoothness in the optical flow. - - Useful resources: - Wikipedia: https://en.wikipedia.org/wiki/Horn%E2%80%93Schunck_method - Paper: http://image.diku.dk/imagecanon/material/HornSchunckOptical_Flow.pdf +The Horn-Schunck method estimates the optical flow for every single pixel of +a sequence of images. +It works by assuming brightness constancy between two consecutive frames +and smoothness in the optical flow. + +Useful resources: +Wikipedia: https://en.wikipedia.org/wiki/Horn%E2%80%93Schunck_method +Paper: http://image.diku.dk/imagecanon/material/HornSchunckOptical_Flow.pdf """ from typing import SupportsIndex diff --git a/conversions/decimal_to_hexadecimal.py b/conversions/decimal_to_hexadecimal.py index b1fb4f082242..ee79592de5ca 100644 --- a/conversions/decimal_to_hexadecimal.py +++ b/conversions/decimal_to_hexadecimal.py @@ -1,4 +1,4 @@ -""" Convert Base 10 (Decimal) Values to Hexadecimal Representations """ +"""Convert Base 10 (Decimal) Values to Hexadecimal Representations""" # set decimal value for each hexadecimal digit values = { diff --git a/conversions/prefix_conversions.py b/conversions/prefix_conversions.py index 06b759e355a7..714677f3b242 100644 --- a/conversions/prefix_conversions.py +++ b/conversions/prefix_conversions.py @@ -1,6 +1,7 @@ """ Convert International System of Units (SI) and Binary prefixes """ + from __future__ import annotations from enum import Enum diff --git a/conversions/temperature_conversions.py b/conversions/temperature_conversions.py index f7af6c8f1e2b..dde1d2f0f166 100644 --- a/conversions/temperature_conversions.py +++ b/conversions/temperature_conversions.py @@ -1,4 +1,4 @@ -""" Convert between different units of temperature """ +"""Convert between different units of temperature""" def celsius_to_fahrenheit(celsius: float, ndigits: int = 2) -> float: diff --git a/data_structures/arrays/pairs_with_given_sum.py b/data_structures/arrays/pairs_with_given_sum.py index c4a5ceeae456..b27bd78e1e0f 100644 --- a/data_structures/arrays/pairs_with_given_sum.py +++ b/data_structures/arrays/pairs_with_given_sum.py @@ -6,6 +6,7 @@ https://practice.geeksforgeeks.org/problems/count-pairs-with-given-sum5022/0 """ + from itertools import combinations diff --git a/data_structures/arrays/sparse_table.py b/data_structures/arrays/sparse_table.py index a15d5649e712..4606fe908607 100644 --- a/data_structures/arrays/sparse_table.py +++ b/data_structures/arrays/sparse_table.py @@ -1,15 +1,16 @@ """ - Sparse table is a data structure that allows answering range queries on - a static number list, i.e. the elements do not change throughout all the queries. +Sparse table is a data structure that allows answering range queries on +a static number list, i.e. the elements do not change throughout all the queries. - The implementation below will solve the problem of Range Minimum Query: - Finding the minimum value of a subset [L..R] of a static number list. +The implementation below will solve the problem of Range Minimum Query: +Finding the minimum value of a subset [L..R] of a static number list. - Overall time complexity: O(nlogn) - Overall space complexity: O(nlogn) +Overall time complexity: O(nlogn) +Overall space complexity: O(nlogn) - Wikipedia link: https://en.wikipedia.org/wiki/Range_minimum_query +Wikipedia link: https://en.wikipedia.org/wiki/Range_minimum_query """ + from math import log2 diff --git a/data_structures/arrays/sudoku_solver.py b/data_structures/arrays/sudoku_solver.py index 20ac32e3b071..c9dffcde2379 100644 --- a/data_structures/arrays/sudoku_solver.py +++ b/data_structures/arrays/sudoku_solver.py @@ -3,6 +3,7 @@ only minimal changes to work with modern versions of Python. If you have improvements, please make them in a separate file. """ + import random import time diff --git a/data_structures/binary_tree/avl_tree.py b/data_structures/binary_tree/avl_tree.py index 4c1fb17afe86..041ed7e36d16 100644 --- a/data_structures/binary_tree/avl_tree.py +++ b/data_structures/binary_tree/avl_tree.py @@ -5,6 +5,7 @@ For testing run: python avl_tree.py """ + from __future__ import annotations import math diff --git a/data_structures/binary_tree/binary_search_tree.py b/data_structures/binary_tree/binary_search_tree.py index 9071f03dcc8c..08a60a12065d 100644 --- a/data_structures/binary_tree/binary_search_tree.py +++ b/data_structures/binary_tree/binary_search_tree.py @@ -88,6 +88,7 @@ >>> not t True """ + from __future__ import annotations from collections.abc import Iterable, Iterator diff --git a/data_structures/binary_tree/binary_search_tree_recursive.py b/data_structures/binary_tree/binary_search_tree_recursive.py index 13b9b392175c..6af1b053f42c 100644 --- a/data_structures/binary_tree/binary_search_tree_recursive.py +++ b/data_structures/binary_tree/binary_search_tree_recursive.py @@ -7,6 +7,7 @@ To run an example: python binary_search_tree_recursive.py """ + from __future__ import annotations import unittest diff --git a/data_structures/binary_tree/binary_tree_node_sum.py b/data_structures/binary_tree/binary_tree_node_sum.py index 5a13e74e3c9f..066617b616c4 100644 --- a/data_structures/binary_tree/binary_tree_node_sum.py +++ b/data_structures/binary_tree/binary_tree_node_sum.py @@ -8,7 +8,6 @@ frames that could be in memory is `n` """ - from __future__ import annotations from collections.abc import Iterator diff --git a/data_structures/binary_tree/diameter_of_binary_tree.py b/data_structures/binary_tree/diameter_of_binary_tree.py index bbe70b028d24..75e5e7373323 100644 --- a/data_structures/binary_tree/diameter_of_binary_tree.py +++ b/data_structures/binary_tree/diameter_of_binary_tree.py @@ -2,6 +2,7 @@ The diameter/width of a tree is defined as the number of nodes on the longest path between two end nodes. """ + from __future__ import annotations from dataclasses import dataclass diff --git a/data_structures/binary_tree/flatten_binarytree_to_linkedlist.py b/data_structures/binary_tree/flatten_binarytree_to_linkedlist.py index 8820a509ecba..9b2c7b9af24b 100644 --- a/data_structures/binary_tree/flatten_binarytree_to_linkedlist.py +++ b/data_structures/binary_tree/flatten_binarytree_to_linkedlist.py @@ -10,6 +10,7 @@ Author: Arunkumar A Date: 04/09/2023 """ + from __future__ import annotations diff --git a/data_structures/binary_tree/floor_and_ceiling.py b/data_structures/binary_tree/floor_and_ceiling.py index f8a1adbd967b..b464aefad3a2 100644 --- a/data_structures/binary_tree/floor_and_ceiling.py +++ b/data_structures/binary_tree/floor_and_ceiling.py @@ -9,6 +9,7 @@ Author : Arunkumar Date : 14th October 2023 """ + from __future__ import annotations from collections.abc import Iterator diff --git a/data_structures/binary_tree/is_sorted.py b/data_structures/binary_tree/is_sorted.py index 5876c5a9c96a..509a426611e5 100644 --- a/data_structures/binary_tree/is_sorted.py +++ b/data_structures/binary_tree/is_sorted.py @@ -13,6 +13,7 @@ Runtime: O(n) Space: O(1) """ + from __future__ import annotations from collections.abc import Iterator diff --git a/data_structures/binary_tree/is_sum_tree.py b/data_structures/binary_tree/is_sum_tree.py index 3f9cf1d560a6..846bea0fe0f2 100644 --- a/data_structures/binary_tree/is_sum_tree.py +++ b/data_structures/binary_tree/is_sum_tree.py @@ -3,6 +3,7 @@ of the values of its left and right subtrees? https://www.geeksforgeeks.org/check-if-a-given-binary-tree-is-sumtree """ + from __future__ import annotations from collections.abc import Iterator diff --git a/data_structures/binary_tree/merge_two_binary_trees.py b/data_structures/binary_tree/merge_two_binary_trees.py index 3380f8c5fb31..6bbb30428704 100644 --- a/data_structures/binary_tree/merge_two_binary_trees.py +++ b/data_structures/binary_tree/merge_two_binary_trees.py @@ -5,6 +5,7 @@ both nodes to the new value of the merged node. Otherwise, the NOT null node will be used as the node of new tree. """ + from __future__ import annotations diff --git a/data_structures/binary_tree/mirror_binary_tree.py b/data_structures/binary_tree/mirror_binary_tree.py index 39305c2a9da2..62e2f08dd4e0 100644 --- a/data_structures/binary_tree/mirror_binary_tree.py +++ b/data_structures/binary_tree/mirror_binary_tree.py @@ -3,6 +3,7 @@ Leetcode problem reference: https://leetcode.com/problems/mirror-binary-tree/ """ + from __future__ import annotations from collections.abc import Iterator diff --git a/data_structures/binary_tree/non_recursive_segment_tree.py b/data_structures/binary_tree/non_recursive_segment_tree.py index 04164e5cba4e..42c78a3a1be0 100644 --- a/data_structures/binary_tree/non_recursive_segment_tree.py +++ b/data_structures/binary_tree/non_recursive_segment_tree.py @@ -35,6 +35,7 @@ >>> st.query(0, 2) [1, 2, 3] """ + from __future__ import annotations from collections.abc import Callable diff --git a/data_structures/binary_tree/number_of_possible_binary_trees.py b/data_structures/binary_tree/number_of_possible_binary_trees.py index 684c518b1eb6..1c3dff37e7d9 100644 --- a/data_structures/binary_tree/number_of_possible_binary_trees.py +++ b/data_structures/binary_tree/number_of_possible_binary_trees.py @@ -6,6 +6,7 @@ Further details at Wikipedia: https://en.wikipedia.org/wiki/Catalan_number """ + """ Our Contribution: Basically we Create the 2 function: diff --git a/data_structures/binary_tree/red_black_tree.py b/data_structures/binary_tree/red_black_tree.py index fc299301da8a..3b5845cd957b 100644 --- a/data_structures/binary_tree/red_black_tree.py +++ b/data_structures/binary_tree/red_black_tree.py @@ -2,6 +2,7 @@ psf/black : true ruff : passed """ + from __future__ import annotations from collections.abc import Iterator diff --git a/data_structures/binary_tree/segment_tree_other.py b/data_structures/binary_tree/segment_tree_other.py index cc77c4951f1a..95f21ddd4777 100644 --- a/data_structures/binary_tree/segment_tree_other.py +++ b/data_structures/binary_tree/segment_tree_other.py @@ -3,6 +3,7 @@ allowing queries to be done later in log(N) time function takes 2 values and returns a same type value """ + from collections.abc import Sequence from queue import Queue diff --git a/data_structures/binary_tree/symmetric_tree.py b/data_structures/binary_tree/symmetric_tree.py index 331a25849c1c..98a766cab988 100644 --- a/data_structures/binary_tree/symmetric_tree.py +++ b/data_structures/binary_tree/symmetric_tree.py @@ -4,6 +4,7 @@ Leetcode reference: https://leetcode.com/problems/symmetric-tree/ """ + from __future__ import annotations from dataclasses import dataclass diff --git a/data_structures/binary_tree/wavelet_tree.py b/data_structures/binary_tree/wavelet_tree.py index 041e140f5b15..2da571e8d326 100644 --- a/data_structures/binary_tree/wavelet_tree.py +++ b/data_structures/binary_tree/wavelet_tree.py @@ -7,6 +7,7 @@ 2. https://www.youtube.com/watch?v=4aSv9PcecDw&t=811s 3. https://www.youtube.com/watch?v=CybAgVF-MMc&t=1178s """ + from __future__ import annotations test_array = [2, 1, 4, 5, 6, 0, 8, 9, 1, 2, 0, 6, 4, 2, 0, 6, 5, 3, 2, 7] diff --git a/data_structures/disjoint_set/disjoint_set.py b/data_structures/disjoint_set/disjoint_set.py index 12dafb2d935e..edc4736b6132 100644 --- a/data_structures/disjoint_set/disjoint_set.py +++ b/data_structures/disjoint_set/disjoint_set.py @@ -1,6 +1,6 @@ """ - Disjoint set. - Reference: https://en.wikipedia.org/wiki/Disjoint-set_data_structure +Disjoint set. +Reference: https://en.wikipedia.org/wiki/Disjoint-set_data_structure """ diff --git a/data_structures/hashing/bloom_filter.py b/data_structures/hashing/bloom_filter.py index 7fd0985bdc33..eb2cb4b79c46 100644 --- a/data_structures/hashing/bloom_filter.py +++ b/data_structures/hashing/bloom_filter.py @@ -58,6 +58,7 @@ >>> bloom.bitstring '01100101' """ + from hashlib import md5, sha256 HASH_FUNCTIONS = (sha256, md5) diff --git a/data_structures/hashing/double_hash.py b/data_structures/hashing/double_hash.py index 76c6c86814ec..324282cbfd8d 100644 --- a/data_structures/hashing/double_hash.py +++ b/data_structures/hashing/double_hash.py @@ -11,6 +11,7 @@ Reference: https://en.wikipedia.org/wiki/Double_hashing """ + from .hash_table import HashTable from .number_theory.prime_numbers import is_prime, next_prime diff --git a/data_structures/hashing/hash_map.py b/data_structures/hashing/hash_map.py index 6a6f8e54d5e9..9213d6930f67 100644 --- a/data_structures/hashing/hash_map.py +++ b/data_structures/hashing/hash_map.py @@ -7,6 +7,7 @@ Modern Dictionaries by Raymond Hettinger https://www.youtube.com/watch?v=p33CVV29OG8 """ + from collections.abc import Iterator, MutableMapping from dataclasses import dataclass from typing import Generic, TypeVar diff --git a/data_structures/hashing/number_theory/prime_numbers.py b/data_structures/hashing/number_theory/prime_numbers.py index 0c25896f9880..2549a1477b2b 100644 --- a/data_structures/hashing/number_theory/prime_numbers.py +++ b/data_structures/hashing/number_theory/prime_numbers.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 """ - module to operations with prime numbers +module to operations with prime numbers """ import math diff --git a/data_structures/linked_list/__init__.py b/data_structures/linked_list/__init__.py index 225113f72cee..00ef337a1211 100644 --- a/data_structures/linked_list/__init__.py +++ b/data_structures/linked_list/__init__.py @@ -5,6 +5,7 @@ head node gives us access of the complete list - Last node: points to null """ + from __future__ import annotations from typing import Any diff --git a/data_structures/linked_list/merge_two_lists.py b/data_structures/linked_list/merge_two_lists.py index ca0d3bb48540..e47dbdadcf39 100644 --- a/data_structures/linked_list/merge_two_lists.py +++ b/data_structures/linked_list/merge_two_lists.py @@ -1,6 +1,7 @@ """ Algorithm that merges two sorted linked lists into one sorted linked list. """ + from __future__ import annotations from collections.abc import Iterable, Iterator diff --git a/data_structures/linked_list/skip_list.py b/data_structures/linked_list/skip_list.py index 4413c53e520e..88d3e0daddf0 100644 --- a/data_structures/linked_list/skip_list.py +++ b/data_structures/linked_list/skip_list.py @@ -2,6 +2,7 @@ Based on "Skip Lists: A Probabilistic Alternative to Balanced Trees" by William Pugh https://epaperpress.com/sortsearch/download/skiplist.pdf """ + from __future__ import annotations from random import random diff --git a/data_structures/queue/double_ended_queue.py b/data_structures/queue/double_ended_queue.py index 17a23038d288..607d0bda3df4 100644 --- a/data_structures/queue/double_ended_queue.py +++ b/data_structures/queue/double_ended_queue.py @@ -1,6 +1,7 @@ """ Implementation of double ended queue. """ + from __future__ import annotations from collections.abc import Iterable diff --git a/data_structures/queue/linked_queue.py b/data_structures/queue/linked_queue.py index 3af97d28e4f7..80f6d309af9a 100644 --- a/data_structures/queue/linked_queue.py +++ b/data_structures/queue/linked_queue.py @@ -1,4 +1,5 @@ -""" A Queue using a linked list like structure """ +"""A Queue using a linked list like structure""" + from __future__ import annotations from collections.abc import Iterator diff --git a/data_structures/queue/queue_on_pseudo_stack.py b/data_structures/queue/queue_on_pseudo_stack.py index d9845100008e..2da67ecc263c 100644 --- a/data_structures/queue/queue_on_pseudo_stack.py +++ b/data_structures/queue/queue_on_pseudo_stack.py @@ -1,4 +1,5 @@ """Queue represented by a pseudo stack (represented by a list with pop and append)""" + from typing import Any diff --git a/data_structures/stacks/dijkstras_two_stack_algorithm.py b/data_structures/stacks/dijkstras_two_stack_algorithm.py index 976c9a53c931..94d19156f1c3 100644 --- a/data_structures/stacks/dijkstras_two_stack_algorithm.py +++ b/data_structures/stacks/dijkstras_two_stack_algorithm.py @@ -29,6 +29,7 @@ NOTE: It only works with whole numbers. """ + __author__ = "Alexander Joslin" import operator as op diff --git a/data_structures/stacks/stack_with_singly_linked_list.py b/data_structures/stacks/stack_with_singly_linked_list.py index f5ce83b863ce..8e77c2b967ef 100644 --- a/data_structures/stacks/stack_with_singly_linked_list.py +++ b/data_structures/stacks/stack_with_singly_linked_list.py @@ -1,4 +1,5 @@ -""" A Stack using a linked list like structure """ +"""A Stack using a linked list like structure""" + from __future__ import annotations from collections.abc import Iterator diff --git a/digital_image_processing/convert_to_negative.py b/digital_image_processing/convert_to_negative.py index 7df44138973c..9bf2d8f2c075 100644 --- a/digital_image_processing/convert_to_negative.py +++ b/digital_image_processing/convert_to_negative.py @@ -1,6 +1,7 @@ """ - Implemented an algorithm using opencv to convert a colored image into its negative +Implemented an algorithm using opencv to convert a colored image into its negative """ + from cv2 import destroyAllWindows, imread, imshow, waitKey diff --git a/digital_image_processing/dithering/burkes.py b/digital_image_processing/dithering/burkes.py index 35aedc16d404..4b59356d8f08 100644 --- a/digital_image_processing/dithering/burkes.py +++ b/digital_image_processing/dithering/burkes.py @@ -1,6 +1,7 @@ """ Implementation Burke's algorithm (dithering) """ + import numpy as np from cv2 import destroyAllWindows, imread, imshow, waitKey diff --git a/digital_image_processing/filters/bilateral_filter.py b/digital_image_processing/filters/bilateral_filter.py index 199ac4d9939a..6ef4434d959c 100644 --- a/digital_image_processing/filters/bilateral_filter.py +++ b/digital_image_processing/filters/bilateral_filter.py @@ -9,6 +9,7 @@ Output: img:A 2d zero padded image with values in between 0 and 1 """ + import math import sys diff --git a/digital_image_processing/filters/gaussian_filter.py b/digital_image_processing/filters/gaussian_filter.py index 634d836e5edc..0c34e59fafe5 100644 --- a/digital_image_processing/filters/gaussian_filter.py +++ b/digital_image_processing/filters/gaussian_filter.py @@ -1,6 +1,7 @@ """ Implementation of gaussian filter algorithm """ + from itertools import product from cv2 import COLOR_BGR2GRAY, cvtColor, imread, imshow, waitKey diff --git a/digital_image_processing/filters/median_filter.py b/digital_image_processing/filters/median_filter.py index 174018569d62..fc8b582ef67a 100644 --- a/digital_image_processing/filters/median_filter.py +++ b/digital_image_processing/filters/median_filter.py @@ -1,6 +1,7 @@ """ Implementation of median filter algorithm """ + from cv2 import COLOR_BGR2GRAY, cvtColor, imread, imshow, waitKey from numpy import divide, int8, multiply, ravel, sort, zeros_like diff --git a/digital_image_processing/histogram_equalization/histogram_stretch.py b/digital_image_processing/histogram_equalization/histogram_stretch.py index 5ea7773e32d9..1270c964dee6 100644 --- a/digital_image_processing/histogram_equalization/histogram_stretch.py +++ b/digital_image_processing/histogram_equalization/histogram_stretch.py @@ -3,6 +3,7 @@ @author: Binish125 """ + import copy import os diff --git a/digital_image_processing/resize/resize.py b/digital_image_processing/resize/resize.py index 4836521f9f58..7bde118da69b 100644 --- a/digital_image_processing/resize/resize.py +++ b/digital_image_processing/resize/resize.py @@ -1,4 +1,5 @@ -""" Multiple image resizing techniques """ +"""Multiple image resizing techniques""" + import numpy as np from cv2 import destroyAllWindows, imread, imshow, waitKey diff --git a/digital_image_processing/sepia.py b/digital_image_processing/sepia.py index e9dd2c06066d..1924a80451e5 100644 --- a/digital_image_processing/sepia.py +++ b/digital_image_processing/sepia.py @@ -1,6 +1,7 @@ """ - Implemented an algorithm using opencv to tone an image with sepia technique +Implemented an algorithm using opencv to tone an image with sepia technique """ + from cv2 import destroyAllWindows, imread, imshow, waitKey diff --git a/digital_image_processing/test_digital_image_processing.py b/digital_image_processing/test_digital_image_processing.py index 7993110d6bdd..d1200f4d65ca 100644 --- a/digital_image_processing/test_digital_image_processing.py +++ b/digital_image_processing/test_digital_image_processing.py @@ -1,6 +1,7 @@ """ PyTest's for Digital Image Processing """ + import numpy as np from cv2 import COLOR_BGR2GRAY, cvtColor, imread from numpy import array, uint8 diff --git a/divide_and_conquer/convex_hull.py b/divide_and_conquer/convex_hull.py index 1d1bf301def5..a5d8b713bdbc 100644 --- a/divide_and_conquer/convex_hull.py +++ b/divide_and_conquer/convex_hull.py @@ -12,6 +12,7 @@ which have not been implemented here, yet. """ + from __future__ import annotations from collections.abc import Iterable diff --git a/divide_and_conquer/kth_order_statistic.py b/divide_and_conquer/kth_order_statistic.py index 666ad1a39b8a..23fd8be5ea47 100644 --- a/divide_and_conquer/kth_order_statistic.py +++ b/divide_and_conquer/kth_order_statistic.py @@ -8,6 +8,7 @@ For more information of this algorithm: https://web.stanford.edu/class/archive/cs/cs161/cs161.1138/lectures/08/Small08.pdf """ + from __future__ import annotations from random import choice diff --git a/divide_and_conquer/max_subarray.py b/divide_and_conquer/max_subarray.py index 851ef621a24c..0fad7ab5d920 100644 --- a/divide_and_conquer/max_subarray.py +++ b/divide_and_conquer/max_subarray.py @@ -6,6 +6,7 @@ This divide-and-conquer algorithm finds the maximum subarray in O(n log n) time. """ + from __future__ import annotations import time diff --git a/divide_and_conquer/peak.py b/divide_and_conquer/peak.py index e60f28bfbe29..71ab5ac86574 100644 --- a/divide_and_conquer/peak.py +++ b/divide_and_conquer/peak.py @@ -7,6 +7,7 @@ (From Kleinberg and Tardos. Algorithm Design. Addison Wesley 2006: Chapter 5 Solved Exercise 1) """ + from __future__ import annotations diff --git a/dynamic_programming/all_construct.py b/dynamic_programming/all_construct.py index 6e53a702cbb1..5d585fc7fcec 100644 --- a/dynamic_programming/all_construct.py +++ b/dynamic_programming/all_construct.py @@ -2,6 +2,7 @@ Program to list all the ways a target string can be constructed from the given list of substrings """ + from __future__ import annotations diff --git a/dynamic_programming/bitmask.py b/dynamic_programming/bitmask.py index 56bb8e96ba02..a6e6a0cda7bf 100644 --- a/dynamic_programming/bitmask.py +++ b/dynamic_programming/bitmask.py @@ -8,6 +8,7 @@ a person can do only one task and a task is performed only by one person. Find the total no of ways in which the tasks can be distributed. """ + from collections import defaultdict diff --git a/dynamic_programming/fast_fibonacci.py b/dynamic_programming/fast_fibonacci.py index f48186a34c25..9f956ca2f979 100644 --- a/dynamic_programming/fast_fibonacci.py +++ b/dynamic_programming/fast_fibonacci.py @@ -4,6 +4,7 @@ This program calculates the nth Fibonacci number in O(log(n)). It's possible to calculate F(1_000_000) in less than a second. """ + from __future__ import annotations import sys diff --git a/dynamic_programming/iterating_through_submasks.py b/dynamic_programming/iterating_through_submasks.py index 4d0a250e8dfe..372dd2c74a71 100644 --- a/dynamic_programming/iterating_through_submasks.py +++ b/dynamic_programming/iterating_through_submasks.py @@ -5,6 +5,7 @@ its submasks. The mask s is submask of m if only bits that were included in bitmask are set """ + from __future__ import annotations diff --git a/dynamic_programming/longest_increasing_subsequence.py b/dynamic_programming/longest_increasing_subsequence.py index d827893763c5..2a78e2e7ad1d 100644 --- a/dynamic_programming/longest_increasing_subsequence.py +++ b/dynamic_programming/longest_increasing_subsequence.py @@ -10,6 +10,7 @@ Example: [10, 22, 9, 33, 21, 50, 41, 60, 80] as input will return [10, 22, 33, 41, 60, 80] as output """ + from __future__ import annotations diff --git a/dynamic_programming/matrix_chain_multiplication.py b/dynamic_programming/matrix_chain_multiplication.py index 084254a61f6c..da6e525ce816 100644 --- a/dynamic_programming/matrix_chain_multiplication.py +++ b/dynamic_programming/matrix_chain_multiplication.py @@ -38,6 +38,7 @@ arr = [40, 20, 30, 10, 30] output: 26000 """ + from collections.abc import Iterator from contextlib import contextmanager from functools import cache diff --git a/dynamic_programming/max_subarray_sum.py b/dynamic_programming/max_subarray_sum.py index c76943472b97..8c1dc0889a85 100644 --- a/dynamic_programming/max_subarray_sum.py +++ b/dynamic_programming/max_subarray_sum.py @@ -9,6 +9,7 @@ Reference: https://en.wikipedia.org/wiki/Maximum_subarray_problem """ + from collections.abc import Sequence diff --git a/electronics/charging_capacitor.py b/electronics/charging_capacitor.py index 4029b0ecf267..0021e4e345e0 100644 --- a/electronics/charging_capacitor.py +++ b/electronics/charging_capacitor.py @@ -14,6 +14,7 @@ time 't' from the initiation of charging a capacitor with the help of the exponential function containing RC. Both at charging and discharging of a capacitor. """ + from math import exp # value of exp = 2.718281828459… diff --git a/electronics/charging_inductor.py b/electronics/charging_inductor.py index e5c0126c248a..8a3bbc0bbfcd 100644 --- a/electronics/charging_inductor.py +++ b/electronics/charging_inductor.py @@ -25,6 +25,7 @@ in its 'magnetic field'.with the help 'RL-time-constant' we can find current at any time in inductor while it is charging. """ + from math import exp # value of exp = 2.718281828459… diff --git a/electronics/resistor_color_code.py b/electronics/resistor_color_code.py index b0534b813def..189d19946d9d 100644 --- a/electronics/resistor_color_code.py +++ b/electronics/resistor_color_code.py @@ -58,6 +58,7 @@ https://learn.parallax.com/support/reference/resistor-color-codes https://byjus.com/physics/resistor-colour-codes/ """ + valid_colors: list = [ "Black", "Brown", diff --git a/financial/exponential_moving_average.py b/financial/exponential_moving_average.py index 0b6cea3b4c91..b56eb2712415 100644 --- a/financial/exponential_moving_average.py +++ b/financial/exponential_moving_average.py @@ -1,12 +1,12 @@ """ - Calculate the exponential moving average (EMA) on the series of stock prices. - Wikipedia Reference: https://en.wikipedia.org/wiki/Exponential_smoothing - https://www.investopedia.com/terms/e/ema.asp#toc-what-is-an-exponential - -moving-average-ema - - Exponential moving average is used in finance to analyze changes stock prices. - EMA is used in conjunction with Simple moving average (SMA), EMA reacts to the - changes in the value quicker than SMA, which is one of the advantages of using EMA. +Calculate the exponential moving average (EMA) on the series of stock prices. +Wikipedia Reference: https://en.wikipedia.org/wiki/Exponential_smoothing +https://www.investopedia.com/terms/e/ema.asp#toc-what-is-an-exponential +-moving-average-ema + +Exponential moving average is used in finance to analyze changes stock prices. +EMA is used in conjunction with Simple moving average (SMA), EMA reacts to the +changes in the value quicker than SMA, which is one of the advantages of using EMA. """ from collections.abc import Iterator diff --git a/financial/simple_moving_average.py b/financial/simple_moving_average.py index d5d68ffd3dab..f5ae444fd027 100644 --- a/financial/simple_moving_average.py +++ b/financial/simple_moving_average.py @@ -6,6 +6,7 @@ Reference: https://en.wikipedia.org/wiki/Moving_average """ + from collections.abc import Sequence diff --git a/fractals/koch_snowflake.py b/fractals/koch_snowflake.py index b0aaa86b11d8..30cd4b39c7c1 100644 --- a/fractals/koch_snowflake.py +++ b/fractals/koch_snowflake.py @@ -20,7 +20,6 @@ - numpy """ - from __future__ import annotations import matplotlib.pyplot as plt # type: ignore diff --git a/fractals/mandelbrot.py b/fractals/mandelbrot.py index 84dbda997562..5eb9af0aafe1 100644 --- a/fractals/mandelbrot.py +++ b/fractals/mandelbrot.py @@ -15,7 +15,6 @@ (see also https://en.wikipedia.org/wiki/Plotting_algorithms_for_the_Mandelbrot_set ) """ - import colorsys from PIL import Image # type: ignore diff --git a/fractals/sierpinski_triangle.py b/fractals/sierpinski_triangle.py index 45f7ab84cfff..ceb2001b681d 100644 --- a/fractals/sierpinski_triangle.py +++ b/fractals/sierpinski_triangle.py @@ -22,6 +22,7 @@ This code was written by editing the code from https://www.riannetrujillo.com/blog/python-fractal/ """ + import sys import turtle diff --git a/graphs/bi_directional_dijkstra.py b/graphs/bi_directional_dijkstra.py index 529a235db625..7b9eac6c8587 100644 --- a/graphs/bi_directional_dijkstra.py +++ b/graphs/bi_directional_dijkstra.py @@ -10,7 +10,6 @@ # Author: Swayam Singh (https://github.com/practice404) - from queue import PriorityQueue from typing import Any diff --git a/graphs/bidirectional_a_star.py b/graphs/bidirectional_a_star.py index 373d67142aa9..00f623de3493 100644 --- a/graphs/bidirectional_a_star.py +++ b/graphs/bidirectional_a_star.py @@ -1,6 +1,7 @@ """ https://en.wikipedia.org/wiki/Bidirectional_search """ + from __future__ import annotations import time diff --git a/graphs/bidirectional_breadth_first_search.py b/graphs/bidirectional_breadth_first_search.py index 511b080a9add..71c5a9aff08f 100644 --- a/graphs/bidirectional_breadth_first_search.py +++ b/graphs/bidirectional_breadth_first_search.py @@ -1,6 +1,7 @@ """ https://en.wikipedia.org/wiki/Bidirectional_search """ + from __future__ import annotations import time diff --git a/graphs/boruvka.py b/graphs/boruvka.py index 2715a3085948..3dc059ff6a62 100644 --- a/graphs/boruvka.py +++ b/graphs/boruvka.py @@ -1,29 +1,30 @@ """Borůvka's algorithm. - Determines the minimum spanning tree (MST) of a graph using the Borůvka's algorithm. - Borůvka's algorithm is a greedy algorithm for finding a minimum spanning tree in a - connected graph, or a minimum spanning forest if a graph that is not connected. +Determines the minimum spanning tree (MST) of a graph using the Borůvka's algorithm. +Borůvka's algorithm is a greedy algorithm for finding a minimum spanning tree in a +connected graph, or a minimum spanning forest if a graph that is not connected. - The time complexity of this algorithm is O(ELogV), where E represents the number - of edges, while V represents the number of nodes. - O(number_of_edges Log number_of_nodes) +The time complexity of this algorithm is O(ELogV), where E represents the number +of edges, while V represents the number of nodes. +O(number_of_edges Log number_of_nodes) - The space complexity of this algorithm is O(V + E), since we have to keep a couple - of lists whose sizes are equal to the number of nodes, as well as keep all the - edges of a graph inside of the data structure itself. +The space complexity of this algorithm is O(V + E), since we have to keep a couple +of lists whose sizes are equal to the number of nodes, as well as keep all the +edges of a graph inside of the data structure itself. - Borůvka's algorithm gives us pretty much the same result as other MST Algorithms - - they all find the minimum spanning tree, and the time complexity is approximately - the same. +Borůvka's algorithm gives us pretty much the same result as other MST Algorithms - +they all find the minimum spanning tree, and the time complexity is approximately +the same. - One advantage that Borůvka's algorithm has compared to the alternatives is that it - doesn't need to presort the edges or maintain a priority queue in order to find the - minimum spanning tree. - Even though that doesn't help its complexity, since it still passes the edges logE - times, it is a bit simpler to code. +One advantage that Borůvka's algorithm has compared to the alternatives is that it +doesn't need to presort the edges or maintain a priority queue in order to find the +minimum spanning tree. +Even though that doesn't help its complexity, since it still passes the edges logE +times, it is a bit simpler to code. - Details: https://en.wikipedia.org/wiki/Bor%C5%AFvka%27s_algorithm +Details: https://en.wikipedia.org/wiki/Bor%C5%AFvka%27s_algorithm """ + from __future__ import annotations from typing import Any diff --git a/graphs/breadth_first_search.py b/graphs/breadth_first_search.py index 171d3875f3c5..cab79be39ed3 100644 --- a/graphs/breadth_first_search.py +++ b/graphs/breadth_first_search.py @@ -1,6 +1,7 @@ #!/usr/bin/python -""" Author: OMKAR PATHAK """ +"""Author: OMKAR PATHAK""" + from __future__ import annotations from queue import Queue diff --git a/graphs/breadth_first_search_2.py b/graphs/breadth_first_search_2.py index a0b92b90b456..ccadfa346bf1 100644 --- a/graphs/breadth_first_search_2.py +++ b/graphs/breadth_first_search_2.py @@ -12,6 +12,7 @@ mark w as explored add w to Q (at the end) """ + from __future__ import annotations from collections import deque diff --git a/graphs/breadth_first_search_shortest_path.py b/graphs/breadth_first_search_shortest_path.py index d489b110b3a7..c06440bccef3 100644 --- a/graphs/breadth_first_search_shortest_path.py +++ b/graphs/breadth_first_search_shortest_path.py @@ -1,6 +1,7 @@ """Breath First Search (BFS) can be used when finding the shortest path from a given source node to a target node in an unweighted graph. """ + from __future__ import annotations graph = { diff --git a/graphs/breadth_first_search_shortest_path_2.py b/graphs/breadth_first_search_shortest_path_2.py index b0c8d353ba04..4f9b6e65bdf3 100644 --- a/graphs/breadth_first_search_shortest_path_2.py +++ b/graphs/breadth_first_search_shortest_path_2.py @@ -1,9 +1,10 @@ """Breadth-first search shortest path implementations. - doctest: - python -m doctest -v bfs_shortest_path.py - Manual test: - python bfs_shortest_path.py +doctest: +python -m doctest -v bfs_shortest_path.py +Manual test: +python bfs_shortest_path.py """ + demo_graph = { "A": ["B", "C", "E"], "B": ["A", "D", "E"], diff --git a/graphs/breadth_first_search_zero_one_shortest_path.py b/graphs/breadth_first_search_zero_one_shortest_path.py index 78047c5d2237..d3a255bac1ef 100644 --- a/graphs/breadth_first_search_zero_one_shortest_path.py +++ b/graphs/breadth_first_search_zero_one_shortest_path.py @@ -3,6 +3,7 @@ 0-1-graph is the weighted graph with the weights equal to 0 or 1. Link: https://codeforces.com/blog/entry/22276 """ + from __future__ import annotations from collections import deque diff --git a/graphs/deep_clone_graph.py b/graphs/deep_clone_graph.py index 55678b4c01ec..18ea99c6a52d 100644 --- a/graphs/deep_clone_graph.py +++ b/graphs/deep_clone_graph.py @@ -9,6 +9,7 @@ Each node in the graph contains a value (int) and a list (List[Node]) of its neighbors. """ + from dataclasses import dataclass diff --git a/graphs/depth_first_search.py b/graphs/depth_first_search.py index f20a503ca395..a666e74ce607 100644 --- a/graphs/depth_first_search.py +++ b/graphs/depth_first_search.py @@ -1,4 +1,5 @@ """Non recursive implementation of a DFS algorithm.""" + from __future__ import annotations diff --git a/graphs/depth_first_search_2.py b/graphs/depth_first_search_2.py index 5ff13af33168..8fe48b7f2b42 100644 --- a/graphs/depth_first_search_2.py +++ b/graphs/depth_first_search_2.py @@ -1,6 +1,6 @@ #!/usr/bin/python -""" Author: OMKAR PATHAK """ +"""Author: OMKAR PATHAK""" class Graph: diff --git a/graphs/dijkstra.py b/graphs/dijkstra.py index b0bdfab60649..87e9d2233bb2 100644 --- a/graphs/dijkstra.py +++ b/graphs/dijkstra.py @@ -30,6 +30,7 @@ distance between each vertex that makes up the path from start vertex to target vertex. """ + import heapq diff --git a/graphs/even_tree.py b/graphs/even_tree.py index 92ffb4b232f7..7d47899527a7 100644 --- a/graphs/even_tree.py +++ b/graphs/even_tree.py @@ -12,6 +12,7 @@ Note: The tree input will be such that it can always be decomposed into components containing an even number of nodes. """ + # pylint: disable=invalid-name from collections import defaultdict diff --git a/graphs/frequent_pattern_graph_miner.py b/graphs/frequent_pattern_graph_miner.py index 208e57f9b32f..f8da73f3438e 100644 --- a/graphs/frequent_pattern_graph_miner.py +++ b/graphs/frequent_pattern_graph_miner.py @@ -8,6 +8,7 @@ URL: https://www.researchgate.net/publication/235255851 """ + # fmt: off edge_array = [ ['ab-e1', 'ac-e3', 'ad-e5', 'bc-e4', 'bd-e2', 'be-e6', 'bh-e12', 'cd-e2', 'ce-e4', diff --git a/graphs/graph_adjacency_list.py b/graphs/graph_adjacency_list.py index d0b94f03e9b4..abc75311cd60 100644 --- a/graphs/graph_adjacency_list.py +++ b/graphs/graph_adjacency_list.py @@ -15,6 +15,7 @@ - Make edge weights and vertex values customizable to store whatever the client wants - Support multigraph functionality if the client wants it """ + from __future__ import annotations import random diff --git a/graphs/graph_adjacency_matrix.py b/graphs/graph_adjacency_matrix.py index cdef388d9098..059a6aa9ffb5 100644 --- a/graphs/graph_adjacency_matrix.py +++ b/graphs/graph_adjacency_matrix.py @@ -15,6 +15,7 @@ - Make edge weights and vertex values customizable to store whatever the client wants - Support multigraph functionality if the client wants it """ + from __future__ import annotations import random diff --git a/graphs/graphs_floyd_warshall.py b/graphs/graphs_floyd_warshall.py index 56cf8b9e382b..aaed9ac5df8b 100644 --- a/graphs/graphs_floyd_warshall.py +++ b/graphs/graphs_floyd_warshall.py @@ -1,7 +1,7 @@ # floyd_warshall.py """ - The problem is to find the shortest distance between all pairs of vertices in a - weighted directed graph that can have negative edge weights. +The problem is to find the shortest distance between all pairs of vertices in a +weighted directed graph that can have negative edge weights. """ diff --git a/graphs/minimum_spanning_tree_prims2.py b/graphs/minimum_spanning_tree_prims2.py index 81f30ef615fe..cc918f81dfe8 100644 --- a/graphs/minimum_spanning_tree_prims2.py +++ b/graphs/minimum_spanning_tree_prims2.py @@ -6,6 +6,7 @@ at a time, from an arbitrary starting vertex, at each step adding the cheapest possible connection from the tree to another vertex. """ + from __future__ import annotations from sys import maxsize diff --git a/graphs/page_rank.py b/graphs/page_rank.py index b9e4c4a72a93..c0ce3a94c76b 100644 --- a/graphs/page_rank.py +++ b/graphs/page_rank.py @@ -1,6 +1,7 @@ """ Author: https://github.com/bhushan-borole """ + """ The input graph for the algorithm is: diff --git a/graphs/prim.py b/graphs/prim.py index 6cb1a6def359..5b3ce04441ec 100644 --- a/graphs/prim.py +++ b/graphs/prim.py @@ -1,8 +1,8 @@ """Prim's Algorithm. - Determines the minimum spanning tree(MST) of a graph using the Prim's Algorithm. +Determines the minimum spanning tree(MST) of a graph using the Prim's Algorithm. - Details: https://en.wikipedia.org/wiki/Prim%27s_algorithm +Details: https://en.wikipedia.org/wiki/Prim%27s_algorithm """ import heapq as hq diff --git a/greedy_methods/gas_station.py b/greedy_methods/gas_station.py index 2427375d2664..6391ce379329 100644 --- a/greedy_methods/gas_station.py +++ b/greedy_methods/gas_station.py @@ -23,6 +23,7 @@ start checking from the next station. """ + from dataclasses import dataclass diff --git a/hashes/adler32.py b/hashes/adler32.py index 611ebc88b80f..38d76ab12aa0 100644 --- a/hashes/adler32.py +++ b/hashes/adler32.py @@ -1,11 +1,11 @@ """ - Adler-32 is a checksum algorithm which was invented by Mark Adler in 1995. - Compared to a cyclic redundancy check of the same length, it trades reliability for - speed (preferring the latter). - Adler-32 is more reliable than Fletcher-16, and slightly less reliable than - Fletcher-32.[2] +Adler-32 is a checksum algorithm which was invented by Mark Adler in 1995. +Compared to a cyclic redundancy check of the same length, it trades reliability for +speed (preferring the latter). +Adler-32 is more reliable than Fletcher-16, and slightly less reliable than +Fletcher-32.[2] - source: https://en.wikipedia.org/wiki/Adler-32 +source: https://en.wikipedia.org/wiki/Adler-32 """ MOD_ADLER = 65521 diff --git a/hashes/hamming_code.py b/hashes/hamming_code.py index b34fdd4c7a74..b3095852ac51 100644 --- a/hashes/hamming_code.py +++ b/hashes/hamming_code.py @@ -4,44 +4,44 @@ # Black: True """ - * This code implement the Hamming code: - https://en.wikipedia.org/wiki/Hamming_code - In telecommunication, - Hamming codes are a family of linear error-correcting codes. Hamming - codes can detect up to two-bit errors or correct one-bit errors - without detection of uncorrected errors. By contrast, the simple - parity code cannot correct errors, and can detect only an odd number - of bits in error. Hamming codes are perfect codes, that is, they - achieve the highest possible rate for codes with their block length - and minimum distance of three. - - * the implemented code consists of: - * a function responsible for encoding the message (emitterConverter) - * return the encoded message - * a function responsible for decoding the message (receptorConverter) - * return the decoded message and a ack of data integrity - - * how to use: - to be used you must declare how many parity bits (sizePari) - you want to include in the message. - it is desired (for test purposes) to select a bit to be set - as an error. This serves to check whether the code is working correctly. - Lastly, the variable of the message/word that must be desired to be - encoded (text). - - * how this work: - declaration of variables (sizePari, be, text) - - converts the message/word (text) to binary using the - text_to_bits function - encodes the message using the rules of hamming encoding - decodes the message using the rules of hamming encoding - print the original message, the encoded message and the - decoded message - - forces an error in the coded text variable - decodes the message that was forced the error - print the original message, the encoded message, the bit changed - message and the decoded message +* This code implement the Hamming code: + https://en.wikipedia.org/wiki/Hamming_code - In telecommunication, +Hamming codes are a family of linear error-correcting codes. Hamming +codes can detect up to two-bit errors or correct one-bit errors +without detection of uncorrected errors. By contrast, the simple +parity code cannot correct errors, and can detect only an odd number +of bits in error. Hamming codes are perfect codes, that is, they +achieve the highest possible rate for codes with their block length +and minimum distance of three. + +* the implemented code consists of: + * a function responsible for encoding the message (emitterConverter) + * return the encoded message + * a function responsible for decoding the message (receptorConverter) + * return the decoded message and a ack of data integrity + +* how to use: + to be used you must declare how many parity bits (sizePari) + you want to include in the message. + it is desired (for test purposes) to select a bit to be set + as an error. This serves to check whether the code is working correctly. + Lastly, the variable of the message/word that must be desired to be + encoded (text). + +* how this work: + declaration of variables (sizePari, be, text) + + converts the message/word (text) to binary using the + text_to_bits function + encodes the message using the rules of hamming encoding + decodes the message using the rules of hamming encoding + print the original message, the encoded message and the + decoded message + + forces an error in the coded text variable + decodes the message that was forced the error + print the original message, the encoded message, the bit changed + message and the decoded message """ # Imports diff --git a/hashes/luhn.py b/hashes/luhn.py index bb77fd05c556..a29bf39e3d82 100644 --- a/hashes/luhn.py +++ b/hashes/luhn.py @@ -1,4 +1,5 @@ -""" Luhn Algorithm """ +"""Luhn Algorithm""" + from __future__ import annotations diff --git a/hashes/sdbm.py b/hashes/sdbm.py index a5432874ba7d..a5abc6f3185b 100644 --- a/hashes/sdbm.py +++ b/hashes/sdbm.py @@ -1,21 +1,21 @@ """ - This algorithm was created for sdbm (a public-domain reimplementation of ndbm) - database library. - It was found to do well in scrambling bits, causing better distribution of the keys - and fewer splits. - It also happens to be a good general hashing function with good distribution. - The actual function (pseudo code) is: - for i in i..len(str): - hash(i) = hash(i - 1) * 65599 + str[i]; +This algorithm was created for sdbm (a public-domain reimplementation of ndbm) +database library. +It was found to do well in scrambling bits, causing better distribution of the keys +and fewer splits. +It also happens to be a good general hashing function with good distribution. +The actual function (pseudo code) is: + for i in i..len(str): + hash(i) = hash(i - 1) * 65599 + str[i]; - What is included below is the faster version used in gawk. [there is even a faster, - duff-device version] - The magic constant 65599 was picked out of thin air while experimenting with - different constants. - It turns out to be a prime. - This is one of the algorithms used in berkeley db (see sleepycat) and elsewhere. +What is included below is the faster version used in gawk. [there is even a faster, +duff-device version] +The magic constant 65599 was picked out of thin air while experimenting with +different constants. +It turns out to be a prime. +This is one of the algorithms used in berkeley db (see sleepycat) and elsewhere. - source: http://www.cse.yorku.ca/~oz/hash.html +source: http://www.cse.yorku.ca/~oz/hash.html """ diff --git a/hashes/sha1.py b/hashes/sha1.py index a0fa688f863e..75a1423e9b5f 100644 --- a/hashes/sha1.py +++ b/hashes/sha1.py @@ -25,6 +25,7 @@ Reference: https://deadhacker.com/2006/02/21/sha-1-illustrated/ """ + import argparse import hashlib # hashlib is only used inside the Test class import struct diff --git a/knapsack/knapsack.py b/knapsack/knapsack.py index 18a36c3bcdda..bb507be1ba3c 100644 --- a/knapsack/knapsack.py +++ b/knapsack/knapsack.py @@ -1,6 +1,7 @@ -""" A naive recursive implementation of 0-1 Knapsack Problem - https://en.wikipedia.org/wiki/Knapsack_problem +"""A naive recursive implementation of 0-1 Knapsack Problem +https://en.wikipedia.org/wiki/Knapsack_problem """ + from __future__ import annotations diff --git a/knapsack/tests/test_knapsack.py b/knapsack/tests/test_knapsack.py index 6932bbb3536b..7bfb8780627b 100644 --- a/knapsack/tests/test_knapsack.py +++ b/knapsack/tests/test_knapsack.py @@ -6,6 +6,7 @@ This file contains the test-suite for the knapsack problem. """ + import unittest from knapsack import knapsack as k diff --git a/linear_algebra/gaussian_elimination.py b/linear_algebra/gaussian_elimination.py index a1a35131b157..724773c0db98 100644 --- a/linear_algebra/gaussian_elimination.py +++ b/linear_algebra/gaussian_elimination.py @@ -3,7 +3,6 @@ Gaussian elimination - https://en.wikipedia.org/wiki/Gaussian_elimination """ - import numpy as np from numpy import float64 from numpy.typing import NDArray diff --git a/linear_algebra/jacobi_iteration_method.py b/linear_algebra/jacobi_iteration_method.py index 8c91a19ef1b0..2cc9c103018b 100644 --- a/linear_algebra/jacobi_iteration_method.py +++ b/linear_algebra/jacobi_iteration_method.py @@ -1,6 +1,7 @@ """ Jacobi Iteration Method - https://en.wikipedia.org/wiki/Jacobi_method """ + from __future__ import annotations import numpy as np diff --git a/linear_algebra/lu_decomposition.py b/linear_algebra/lu_decomposition.py index 094b20abfecc..1d364163d9a7 100644 --- a/linear_algebra/lu_decomposition.py +++ b/linear_algebra/lu_decomposition.py @@ -15,6 +15,7 @@ Reference: https://en.wikipedia.org/wiki/LU_decomposition """ + from __future__ import annotations import numpy as np diff --git a/linear_algebra/src/conjugate_gradient.py b/linear_algebra/src/conjugate_gradient.py index 4cf566ec9e36..4c0b58deb978 100644 --- a/linear_algebra/src/conjugate_gradient.py +++ b/linear_algebra/src/conjugate_gradient.py @@ -3,6 +3,7 @@ - https://en.wikipedia.org/wiki/Conjugate_gradient_method - https://en.wikipedia.org/wiki/Definite_symmetric_matrix """ + from typing import Any import numpy as np diff --git a/linear_algebra/src/lib.py b/linear_algebra/src/lib.py index 5074faf31d1d..5af6c62e3ad4 100644 --- a/linear_algebra/src/lib.py +++ b/linear_algebra/src/lib.py @@ -18,6 +18,7 @@ - function square_zero_matrix(N) - function random_matrix(W, H, a, b) """ + from __future__ import annotations import math @@ -96,12 +97,10 @@ def __sub__(self, other: Vector) -> Vector: raise Exception("must have the same size") @overload - def __mul__(self, other: float) -> Vector: - ... + def __mul__(self, other: float) -> Vector: ... @overload - def __mul__(self, other: Vector) -> float: - ... + def __mul__(self, other: Vector) -> float: ... def __mul__(self, other: float | Vector) -> float | Vector: """ @@ -309,12 +308,10 @@ def __sub__(self, other: Matrix) -> Matrix: raise Exception("matrices must have the same dimension!") @overload - def __mul__(self, other: float) -> Matrix: - ... + def __mul__(self, other: float) -> Matrix: ... @overload - def __mul__(self, other: Vector) -> Vector: - ... + def __mul__(self, other: Vector) -> Vector: ... def __mul__(self, other: float | Vector) -> Vector | Matrix: """ diff --git a/linear_algebra/src/rayleigh_quotient.py b/linear_algebra/src/rayleigh_quotient.py index 4773429cbf1b..46bf1671d2b1 100644 --- a/linear_algebra/src/rayleigh_quotient.py +++ b/linear_algebra/src/rayleigh_quotient.py @@ -1,6 +1,7 @@ """ https://en.wikipedia.org/wiki/Rayleigh_quotient """ + from typing import Any import numpy as np diff --git a/linear_algebra/src/test_linear_algebra.py b/linear_algebra/src/test_linear_algebra.py index 95ab408b3d86..fc5f90fd5cbe 100644 --- a/linear_algebra/src/test_linear_algebra.py +++ b/linear_algebra/src/test_linear_algebra.py @@ -6,6 +6,7 @@ This file contains the test-suite for the linear algebra library. """ + import unittest import pytest diff --git a/linear_algebra/src/transformations_2d.py b/linear_algebra/src/transformations_2d.py index cdf42100d5d9..b4185cd2848f 100644 --- a/linear_algebra/src/transformations_2d.py +++ b/linear_algebra/src/transformations_2d.py @@ -11,6 +11,7 @@ reflection(45) = [[0.05064397763545947, 0.893996663600558], [0.893996663600558, 0.7018070490682369]] """ + from math import cos, sin diff --git a/linear_programming/simplex.py b/linear_programming/simplex.py index bbc97d8e22bf..dc171bacd3a2 100644 --- a/linear_programming/simplex.py +++ b/linear_programming/simplex.py @@ -12,6 +12,7 @@ https://en.wikipedia.org/wiki/Simplex_algorithm https://tinyurl.com/simplex4beginners """ + from typing import Any import numpy as np diff --git a/machine_learning/apriori_algorithm.py b/machine_learning/apriori_algorithm.py index d9fd1f82ea3c..09a89ac236bd 100644 --- a/machine_learning/apriori_algorithm.py +++ b/machine_learning/apriori_algorithm.py @@ -10,6 +10,7 @@ WIKI: https://en.wikipedia.org/wiki/Apriori_algorithm Examples: https://www.kaggle.com/code/earthian/apriori-association-rules-mining """ + from itertools import combinations diff --git a/machine_learning/astar.py b/machine_learning/astar.py index ff5208266343..a5859e51fe70 100644 --- a/machine_learning/astar.py +++ b/machine_learning/astar.py @@ -12,6 +12,7 @@ https://en.wikipedia.org/wiki/A*_search_algorithm """ + import numpy as np diff --git a/machine_learning/automatic_differentiation.py b/machine_learning/automatic_differentiation.py index cd2e5cdaa782..5c2708247c21 100644 --- a/machine_learning/automatic_differentiation.py +++ b/machine_learning/automatic_differentiation.py @@ -6,6 +6,7 @@ Author: Poojan Smart Email: smrtpoojan@gmail.com """ + from __future__ import annotations from collections import defaultdict diff --git a/machine_learning/data_transformations.py b/machine_learning/data_transformations.py index ecfd3b9e27c2..a1c28d514fd5 100644 --- a/machine_learning/data_transformations.py +++ b/machine_learning/data_transformations.py @@ -25,6 +25,7 @@ 2. non-gaussian (non-normal) distributions work better with normalization 3. If a column or list of values has extreme values / outliers, use standardization """ + from statistics import mean, stdev diff --git a/machine_learning/decision_tree.py b/machine_learning/decision_tree.py index c67e09c7f114..7f129919a3ce 100644 --- a/machine_learning/decision_tree.py +++ b/machine_learning/decision_tree.py @@ -3,6 +3,7 @@ Input data set: The input data set must be 1-dimensional with continuous labels. Output: The decision tree maps a real number input to a real number output. """ + import numpy as np diff --git a/machine_learning/frequent_pattern_growth.py b/machine_learning/frequent_pattern_growth.py index 205d598464a1..6b9870f5e1d2 100644 --- a/machine_learning/frequent_pattern_growth.py +++ b/machine_learning/frequent_pattern_growth.py @@ -9,6 +9,7 @@ Examples: https://www.javatpoint.com/fp-growth-algorithm-in-data-mining """ + from __future__ import annotations from dataclasses import dataclass, field diff --git a/machine_learning/gradient_descent.py b/machine_learning/gradient_descent.py index 9ffc02bbc284..db38b3c95b52 100644 --- a/machine_learning/gradient_descent.py +++ b/machine_learning/gradient_descent.py @@ -2,6 +2,7 @@ Implementation of gradient descent algorithm for minimizing cost of a linear hypothesis function. """ + import numpy # List of input, output pairs diff --git a/machine_learning/k_means_clust.py b/machine_learning/k_means_clust.py index 4a219edc3bb1..9f6646944458 100644 --- a/machine_learning/k_means_clust.py +++ b/machine_learning/k_means_clust.py @@ -40,6 +40,7 @@ 5. Transfers Dataframe into excel format it must have feature called 'Clust' with k means clustering numbers in it. """ + import warnings import numpy as np diff --git a/machine_learning/linear_discriminant_analysis.py b/machine_learning/linear_discriminant_analysis.py index 88c047157893..606e11f3698e 100644 --- a/machine_learning/linear_discriminant_analysis.py +++ b/machine_learning/linear_discriminant_analysis.py @@ -1,47 +1,48 @@ """ - Linear Discriminant Analysis +Linear Discriminant Analysis - Assumptions About Data : - 1. The input variables has a gaussian distribution. - 2. The variance calculated for each input variables by class grouping is the - same. - 3. The mix of classes in your training set is representative of the problem. +Assumptions About Data : + 1. The input variables has a gaussian distribution. + 2. The variance calculated for each input variables by class grouping is the + same. + 3. The mix of classes in your training set is representative of the problem. - Learning The Model : - The LDA model requires the estimation of statistics from the training data : - 1. Mean of each input value for each class. - 2. Probability of an instance belong to each class. - 3. Covariance for the input data for each class +Learning The Model : + The LDA model requires the estimation of statistics from the training data : + 1. Mean of each input value for each class. + 2. Probability of an instance belong to each class. + 3. Covariance for the input data for each class - Calculate the class means : - mean(x) = 1/n ( for i = 1 to i = n --> sum(xi)) + Calculate the class means : + mean(x) = 1/n ( for i = 1 to i = n --> sum(xi)) - Calculate the class probabilities : - P(y = 0) = count(y = 0) / (count(y = 0) + count(y = 1)) - P(y = 1) = count(y = 1) / (count(y = 0) + count(y = 1)) + Calculate the class probabilities : + P(y = 0) = count(y = 0) / (count(y = 0) + count(y = 1)) + P(y = 1) = count(y = 1) / (count(y = 0) + count(y = 1)) - Calculate the variance : - We can calculate the variance for dataset in two steps : - 1. Calculate the squared difference for each input variable from the - group mean. - 2. Calculate the mean of the squared difference. - ------------------------------------------------ - Squared_Difference = (x - mean(k)) ** 2 - Variance = (1 / (count(x) - count(classes))) * - (for i = 1 to i = n --> sum(Squared_Difference(xi))) + Calculate the variance : + We can calculate the variance for dataset in two steps : + 1. Calculate the squared difference for each input variable from the + group mean. + 2. Calculate the mean of the squared difference. + ------------------------------------------------ + Squared_Difference = (x - mean(k)) ** 2 + Variance = (1 / (count(x) - count(classes))) * + (for i = 1 to i = n --> sum(Squared_Difference(xi))) - Making Predictions : - discriminant(x) = x * (mean / variance) - - ((mean ** 2) / (2 * variance)) + Ln(probability) - --------------------------------------------------------------------------- - After calculating the discriminant value for each class, the class with the - largest discriminant value is taken as the prediction. +Making Predictions : + discriminant(x) = x * (mean / variance) - + ((mean ** 2) / (2 * variance)) + Ln(probability) + --------------------------------------------------------------------------- + After calculating the discriminant value for each class, the class with the + largest discriminant value is taken as the prediction. - Author: @EverLookNeverSee +Author: @EverLookNeverSee """ + from collections.abc import Callable from math import log from os import name, system diff --git a/machine_learning/linear_regression.py b/machine_learning/linear_regression.py index 0847112ad538..39bee5712c16 100644 --- a/machine_learning/linear_regression.py +++ b/machine_learning/linear_regression.py @@ -7,6 +7,7 @@ fit our dataset. In this particular code, I had used a CSGO dataset (ADR vs Rating). We try to best fit a line through dataset and estimate the parameters. """ + import numpy as np import requests diff --git a/machine_learning/logistic_regression.py b/machine_learning/logistic_regression.py index 59a70fd65cf9..090af5382185 100644 --- a/machine_learning/logistic_regression.py +++ b/machine_learning/logistic_regression.py @@ -14,6 +14,7 @@ Coursera ML course https://medium.com/@martinpella/logistic-regression-from-scratch-in-python-124c5636b8ac """ + import numpy as np from matplotlib import pyplot as plt from sklearn import datasets diff --git a/machine_learning/lstm/lstm_prediction.py b/machine_learning/lstm/lstm_prediction.py index ecbd451266ad..f0fd12c9de7f 100644 --- a/machine_learning/lstm/lstm_prediction.py +++ b/machine_learning/lstm/lstm_prediction.py @@ -1,9 +1,10 @@ """ - Create a Long Short Term Memory (LSTM) network model - An LSTM is a type of Recurrent Neural Network (RNN) as discussed at: - * https://colah.github.io/posts/2015-08-Understanding-LSTMs - * https://en.wikipedia.org/wiki/Long_short-term_memory +Create a Long Short Term Memory (LSTM) network model +An LSTM is a type of Recurrent Neural Network (RNN) as discussed at: +* https://colah.github.io/posts/2015-08-Understanding-LSTMs +* https://en.wikipedia.org/wiki/Long_short-term_memory """ + import numpy as np import pandas as pd from sklearn.preprocessing import MinMaxScaler diff --git a/machine_learning/mfcc.py b/machine_learning/mfcc.py index 7ce8ceb50ff2..a1e99ce4ad40 100644 --- a/machine_learning/mfcc.py +++ b/machine_learning/mfcc.py @@ -57,7 +57,6 @@ Author: Amir Lavasani """ - import logging import numpy as np diff --git a/machine_learning/self_organizing_map.py b/machine_learning/self_organizing_map.py index 32fdf1d2b41d..fb9d0074e791 100644 --- a/machine_learning/self_organizing_map.py +++ b/machine_learning/self_organizing_map.py @@ -1,6 +1,7 @@ """ https://en.wikipedia.org/wiki/Self-organizing_map """ + import math diff --git a/machine_learning/sequential_minimum_optimization.py b/machine_learning/sequential_minimum_optimization.py index 9ee8c52fb2e9..be16baca1a4c 100644 --- a/machine_learning/sequential_minimum_optimization.py +++ b/machine_learning/sequential_minimum_optimization.py @@ -30,7 +30,6 @@ https://www.microsoft.com/en-us/research/wp-content/uploads/2016/02/tr-98-14.pdf """ - import os import sys import urllib.request diff --git a/machine_learning/similarity_search.py b/machine_learning/similarity_search.py index 7a23ec463c8f..0bc3b17d7e5a 100644 --- a/machine_learning/similarity_search.py +++ b/machine_learning/similarity_search.py @@ -7,6 +7,7 @@ 1. the nearest vector 2. distance between the vector and the nearest vector (float) """ + from __future__ import annotations import math diff --git a/maths/allocation_number.py b/maths/allocation_number.py index d419e74d01ff..52f1ac4bdb23 100644 --- a/maths/allocation_number.py +++ b/maths/allocation_number.py @@ -5,6 +5,7 @@ for i in allocation_list: requests.get(url,headers={'Range':f'bytes={i}'}) """ + from __future__ import annotations diff --git a/maths/area.py b/maths/area.py index ea7216c8fe3f..31a654206977 100644 --- a/maths/area.py +++ b/maths/area.py @@ -2,6 +2,7 @@ Find the area of various geometric shapes Wikipedia reference: https://en.wikipedia.org/wiki/Area """ + from math import pi, sqrt, tan diff --git a/maths/area_under_curve.py b/maths/area_under_curve.py index 0da6546b2e36..10aec768fa09 100644 --- a/maths/area_under_curve.py +++ b/maths/area_under_curve.py @@ -1,6 +1,7 @@ """ Approximates the area under the curve using the trapezoidal rule """ + from __future__ import annotations from collections.abc import Callable diff --git a/maths/basic_maths.py b/maths/basic_maths.py index c9e3d00fa23b..833f31c18b9e 100644 --- a/maths/basic_maths.py +++ b/maths/basic_maths.py @@ -1,4 +1,5 @@ """Implementation of Basic Math in Python.""" + import math diff --git a/maths/binomial_distribution.py b/maths/binomial_distribution.py index 5b56f2d59244..eabcaea0d1b2 100644 --- a/maths/binomial_distribution.py +++ b/maths/binomial_distribution.py @@ -1,5 +1,6 @@ """For more information about the Binomial Distribution - - https://en.wikipedia.org/wiki/Binomial_distribution""" +https://en.wikipedia.org/wiki/Binomial_distribution""" + from math import factorial diff --git a/maths/chinese_remainder_theorem.py b/maths/chinese_remainder_theorem.py index d3e75e77922a..18af63d106e8 100644 --- a/maths/chinese_remainder_theorem.py +++ b/maths/chinese_remainder_theorem.py @@ -11,6 +11,7 @@ 1. Use extended euclid algorithm to find x,y such that a*x + b*y = 1 2. Take n = ra*by + rb*ax """ + from __future__ import annotations diff --git a/maths/continued_fraction.py b/maths/continued_fraction.py index 04ff0b6ff0d2..2c38bf88b1e9 100644 --- a/maths/continued_fraction.py +++ b/maths/continued_fraction.py @@ -4,7 +4,6 @@ https://en.wikipedia.org/wiki/Continued_fraction """ - from fractions import Fraction from math import floor diff --git a/maths/entropy.py b/maths/entropy.py index 23753d884484..76fac4ee717d 100644 --- a/maths/entropy.py +++ b/maths/entropy.py @@ -4,6 +4,7 @@ Implementation of entropy of information https://en.wikipedia.org/wiki/Entropy_(information_theory) """ + from __future__ import annotations import math diff --git a/maths/gamma.py b/maths/gamma.py index 822bbc74456f..e328cd8b22b7 100644 --- a/maths/gamma.py +++ b/maths/gamma.py @@ -8,6 +8,7 @@ the non-positive integers Python's Standard Library math.gamma() function overflows around gamma(171.624). """ + import math from numpy import inf diff --git a/maths/gaussian.py b/maths/gaussian.py index 51ebc2e25849..0e02010a9c67 100644 --- a/maths/gaussian.py +++ b/maths/gaussian.py @@ -1,6 +1,7 @@ """ Reference: https://en.wikipedia.org/wiki/Gaussian_function """ + from numpy import exp, pi, sqrt diff --git a/maths/interquartile_range.py b/maths/interquartile_range.py index d4d72e73ef49..e91a651647d4 100644 --- a/maths/interquartile_range.py +++ b/maths/interquartile_range.py @@ -7,6 +7,7 @@ Script inspired by this Wikipedia article: https://en.wikipedia.org/wiki/Interquartile_range """ + from __future__ import annotations diff --git a/maths/is_square_free.py b/maths/is_square_free.py index 08c70dc32c38..a336c37e8dbc 100644 --- a/maths/is_square_free.py +++ b/maths/is_square_free.py @@ -3,6 +3,7 @@ psf/black : True ruff : True """ + from __future__ import annotations diff --git a/maths/karatsuba.py b/maths/karatsuba.py index 3d29e31d2107..0e063fb44b83 100644 --- a/maths/karatsuba.py +++ b/maths/karatsuba.py @@ -1,4 +1,4 @@ -""" Multiply two numbers using Karatsuba algorithm """ +"""Multiply two numbers using Karatsuba algorithm""" def karatsuba(a: int, b: int) -> int: diff --git a/maths/lucas_lehmer_primality_test.py b/maths/lucas_lehmer_primality_test.py index 0a5621aacd79..292387414dee 100644 --- a/maths/lucas_lehmer_primality_test.py +++ b/maths/lucas_lehmer_primality_test.py @@ -1,13 +1,13 @@ """ - In mathematics, the Lucas–Lehmer test (LLT) is a primality test for Mersenne - numbers. https://en.wikipedia.org/wiki/Lucas%E2%80%93Lehmer_primality_test +In mathematics, the Lucas–Lehmer test (LLT) is a primality test for Mersenne +numbers. https://en.wikipedia.org/wiki/Lucas%E2%80%93Lehmer_primality_test - A Mersenne number is a number that is one less than a power of two. - That is M_p = 2^p - 1 - https://en.wikipedia.org/wiki/Mersenne_prime +A Mersenne number is a number that is one less than a power of two. +That is M_p = 2^p - 1 +https://en.wikipedia.org/wiki/Mersenne_prime - The Lucas–Lehmer test is the primality test used by the - Great Internet Mersenne Prime Search (GIMPS) to locate large primes. +The Lucas–Lehmer test is the primality test used by the +Great Internet Mersenne Prime Search (GIMPS) to locate large primes. """ diff --git a/maths/maclaurin_series.py b/maths/maclaurin_series.py index d5c3c3ab958b..6ec5551a5e6e 100644 --- a/maths/maclaurin_series.py +++ b/maths/maclaurin_series.py @@ -1,6 +1,7 @@ """ https://en.wikipedia.org/wiki/Taylor_series#Trigonometric_functions """ + from math import factorial, pi diff --git a/maths/max_sum_sliding_window.py b/maths/max_sum_sliding_window.py index c6f9b4ed0ad7..090117429604 100644 --- a/maths/max_sum_sliding_window.py +++ b/maths/max_sum_sliding_window.py @@ -6,6 +6,7 @@ called 'Window sliding technique' where the nested loops can be converted to a single loop to reduce time complexity. """ + from __future__ import annotations diff --git a/maths/modular_exponential.py b/maths/modular_exponential.py index 42987dbf3a24..a27e29ebc02a 100644 --- a/maths/modular_exponential.py +++ b/maths/modular_exponential.py @@ -1,8 +1,8 @@ """ - Modular Exponential. - Modular exponentiation is a type of exponentiation performed over a modulus. - For more explanation, please check - https://en.wikipedia.org/wiki/Modular_exponentiation +Modular Exponential. +Modular exponentiation is a type of exponentiation performed over a modulus. +For more explanation, please check +https://en.wikipedia.org/wiki/Modular_exponentiation """ """Calculate Modular Exponential.""" diff --git a/maths/monte_carlo.py b/maths/monte_carlo.py index 474f1f65deb4..d174a0b188a2 100644 --- a/maths/monte_carlo.py +++ b/maths/monte_carlo.py @@ -1,6 +1,7 @@ """ @author: MatteoRaso """ + from collections.abc import Callable from math import pi, sqrt from random import uniform diff --git a/maths/numerical_analysis/adams_bashforth.py b/maths/numerical_analysis/adams_bashforth.py index d61f022a413d..fb406171098a 100644 --- a/maths/numerical_analysis/adams_bashforth.py +++ b/maths/numerical_analysis/adams_bashforth.py @@ -4,6 +4,7 @@ https://en.wikipedia.org/wiki/Linear_multistep_method Author : Ravi Kumar """ + from collections.abc import Callable from dataclasses import dataclass diff --git a/maths/numerical_analysis/nevilles_method.py b/maths/numerical_analysis/nevilles_method.py index 1f48b43fbd22..256b61f5f218 100644 --- a/maths/numerical_analysis/nevilles_method.py +++ b/maths/numerical_analysis/nevilles_method.py @@ -1,11 +1,11 @@ """ - Python program to show how to interpolate and evaluate a polynomial - using Neville's method. - Neville’s method evaluates a polynomial that passes through a - given set of x and y points for a particular x value (x0) using the - Newton polynomial form. - Reference: - https://rpubs.com/aaronsc32/nevilles-method-polynomial-interpolation +Python program to show how to interpolate and evaluate a polynomial +using Neville's method. +Neville’s method evaluates a polynomial that passes through a +given set of x and y points for a particular x value (x0) using the +Newton polynomial form. +Reference: + https://rpubs.com/aaronsc32/nevilles-method-polynomial-interpolation """ diff --git a/maths/numerical_analysis/newton_raphson.py b/maths/numerical_analysis/newton_raphson.py index feee38f905dd..10fb244bf426 100644 --- a/maths/numerical_analysis/newton_raphson.py +++ b/maths/numerical_analysis/newton_raphson.py @@ -9,6 +9,7 @@ Reference: https://en.wikipedia.org/wiki/Newton%27s_method """ + from collections.abc import Callable RealFunc = Callable[[float], float] diff --git a/maths/numerical_analysis/numerical_integration.py b/maths/numerical_analysis/numerical_integration.py index 4ac562644a07..f64436ec48c1 100644 --- a/maths/numerical_analysis/numerical_integration.py +++ b/maths/numerical_analysis/numerical_integration.py @@ -1,6 +1,7 @@ """ Approximates the area under the curve using the trapezoidal rule """ + from __future__ import annotations from collections.abc import Callable diff --git a/maths/numerical_analysis/runge_kutta_gills.py b/maths/numerical_analysis/runge_kutta_gills.py index 2bd9cd6129b8..451cde4cb935 100644 --- a/maths/numerical_analysis/runge_kutta_gills.py +++ b/maths/numerical_analysis/runge_kutta_gills.py @@ -4,6 +4,7 @@ https://www.geeksforgeeks.org/gills-4th-order-method-to-solve-differential-equations/ Author : Ravi Kumar """ + from collections.abc import Callable from math import sqrt diff --git a/maths/numerical_analysis/secant_method.py b/maths/numerical_analysis/secant_method.py index d39cb0ff30ef..9fff8222cdde 100644 --- a/maths/numerical_analysis/secant_method.py +++ b/maths/numerical_analysis/secant_method.py @@ -2,6 +2,7 @@ Implementing Secant method in Python Author: dimgrichr """ + from math import exp diff --git a/maths/prime_factors.py b/maths/prime_factors.py index e520ae3a6d04..47abcf10e618 100644 --- a/maths/prime_factors.py +++ b/maths/prime_factors.py @@ -1,6 +1,7 @@ """ python/black : True """ + from __future__ import annotations diff --git a/maths/series/geometric_series.py b/maths/series/geometric_series.py index b8d6a86206be..55c42fd90e99 100644 --- a/maths/series/geometric_series.py +++ b/maths/series/geometric_series.py @@ -9,7 +9,6 @@ python3 geometric_series.py """ - from __future__ import annotations diff --git a/maths/series/p_series.py b/maths/series/p_series.py index a091a6f3fecf..93812f443857 100644 --- a/maths/series/p_series.py +++ b/maths/series/p_series.py @@ -9,7 +9,6 @@ python3 p_series.py """ - from __future__ import annotations diff --git a/maths/sieve_of_eratosthenes.py b/maths/sieve_of_eratosthenes.py index a0520aa5cf50..3923dc3e1612 100644 --- a/maths/sieve_of_eratosthenes.py +++ b/maths/sieve_of_eratosthenes.py @@ -10,6 +10,7 @@ doctest provider: Bruno Simas Hadlich (https://github.com/brunohadlich) Also thanks to Dmitry (https://github.com/LizardWizzard) for finding the problem """ + from __future__ import annotations import math diff --git a/maths/solovay_strassen_primality_test.py b/maths/solovay_strassen_primality_test.py index 1d11d458369a..b2d905b07bed 100644 --- a/maths/solovay_strassen_primality_test.py +++ b/maths/solovay_strassen_primality_test.py @@ -9,7 +9,6 @@ https://en.wikipedia.org/wiki/Solovay%E2%80%93Strassen_primality_test """ - import random diff --git a/maths/special_numbers/armstrong_numbers.py b/maths/special_numbers/armstrong_numbers.py index b037aacb16c3..b2b4010a8f5b 100644 --- a/maths/special_numbers/armstrong_numbers.py +++ b/maths/special_numbers/armstrong_numbers.py @@ -8,6 +8,7 @@ On-Line Encyclopedia of Integer Sequences entry: https://oeis.org/A005188 """ + PASSING = (1, 153, 370, 371, 1634, 24678051, 115132219018763992565095597973971522401) FAILING: tuple = (-153, -1, 0, 1.2, 200, "A", [], {}, None) diff --git a/maths/special_numbers/weird_number.py b/maths/special_numbers/weird_number.py index 2834a9fee31e..5c9240d0ea4e 100644 --- a/maths/special_numbers/weird_number.py +++ b/maths/special_numbers/weird_number.py @@ -3,6 +3,7 @@ Fun fact: The set of weird numbers has positive asymptotic density. """ + from math import sqrt diff --git a/maths/tanh.py b/maths/tanh.py index 38a369d9118d..011d6f17e22b 100644 --- a/maths/tanh.py +++ b/maths/tanh.py @@ -9,6 +9,7 @@ Script inspired from its corresponding Wikipedia article https://en.wikipedia.org/wiki/Activation_function """ + import numpy as np diff --git a/maths/triplet_sum.py b/maths/triplet_sum.py index af77ed145bce..e74f67daad47 100644 --- a/maths/triplet_sum.py +++ b/maths/triplet_sum.py @@ -3,6 +3,7 @@ we are required to find a triplet from the array such that it's sum is equal to the target. """ + from __future__ import annotations from itertools import permutations diff --git a/maths/two_pointer.py b/maths/two_pointer.py index d0fb0fc9c2f1..8a6d8eb7aff0 100644 --- a/maths/two_pointer.py +++ b/maths/two_pointer.py @@ -17,6 +17,7 @@ [1]: https://github.com/TheAlgorithms/Python/blob/master/other/two_sum.py """ + from __future__ import annotations diff --git a/maths/two_sum.py b/maths/two_sum.py index 12ad332d6c4e..58c933a5078a 100644 --- a/maths/two_sum.py +++ b/maths/two_sum.py @@ -11,6 +11,7 @@ Because nums[0] + nums[1] = 2 + 7 = 9, return [0, 1]. """ + from __future__ import annotations diff --git a/maths/volume.py b/maths/volume.py index b4df4e475783..33be9bdd131a 100644 --- a/maths/volume.py +++ b/maths/volume.py @@ -3,6 +3,7 @@ * https://en.wikipedia.org/wiki/Volume * https://en.wikipedia.org/wiki/Spherical_cap """ + from __future__ import annotations from math import pi, pow diff --git a/matrix/matrix_multiplication_recursion.py b/matrix/matrix_multiplication_recursion.py index 287142480ce7..57c4d80de017 100644 --- a/matrix/matrix_multiplication_recursion.py +++ b/matrix/matrix_multiplication_recursion.py @@ -7,6 +7,7 @@ Perform matrix multiplication using a recursive algorithm. https://en.wikipedia.org/wiki/Matrix_multiplication """ + # type Matrix = list[list[int]] # psf/black currenttly fails on this line Matrix = list[list[int]] diff --git a/networking_flow/ford_fulkerson.py b/networking_flow/ford_fulkerson.py index 7d5fb522e012..b47d3b68f3d1 100644 --- a/networking_flow/ford_fulkerson.py +++ b/networking_flow/ford_fulkerson.py @@ -6,6 +6,7 @@ (1) Start with initial flow as 0 (2) Choose the augmenting path from source to sink and add the path to flow """ + graph = [ [0, 16, 13, 0, 0, 0], [0, 0, 10, 12, 0, 0], diff --git a/neural_network/activation_functions/binary_step.py b/neural_network/activation_functions/binary_step.py index 8f8f4d405fd2..d3d774602182 100644 --- a/neural_network/activation_functions/binary_step.py +++ b/neural_network/activation_functions/binary_step.py @@ -8,7 +8,6 @@ https://en.wikipedia.org/wiki/Activation_function """ - import numpy as np diff --git a/neural_network/activation_functions/rectified_linear_unit.py b/neural_network/activation_functions/rectified_linear_unit.py index 458c6bd5c391..2d5cf96fd387 100644 --- a/neural_network/activation_functions/rectified_linear_unit.py +++ b/neural_network/activation_functions/rectified_linear_unit.py @@ -9,6 +9,7 @@ Script inspired from its corresponding Wikipedia article https://en.wikipedia.org/wiki/Rectifier_(neural_networks) """ + from __future__ import annotations import numpy as np diff --git a/neural_network/activation_functions/soboleva_modified_hyperbolic_tangent.py b/neural_network/activation_functions/soboleva_modified_hyperbolic_tangent.py index 603ac0b7e120..a053e690ba44 100644 --- a/neural_network/activation_functions/soboleva_modified_hyperbolic_tangent.py +++ b/neural_network/activation_functions/soboleva_modified_hyperbolic_tangent.py @@ -8,7 +8,6 @@ https://en.wikipedia.org/wiki/Soboleva_modified_hyperbolic_tangent """ - import numpy as np diff --git a/neural_network/back_propagation_neural_network.py b/neural_network/back_propagation_neural_network.py index bdd096b3f653..7e0bdbbe2857 100644 --- a/neural_network/back_propagation_neural_network.py +++ b/neural_network/back_propagation_neural_network.py @@ -17,6 +17,7 @@ Date: 2017.11.23 """ + import numpy as np from matplotlib import pyplot as plt diff --git a/neural_network/convolution_neural_network.py b/neural_network/convolution_neural_network.py index e9726a0cb4a7..07cc456b7466 100644 --- a/neural_network/convolution_neural_network.py +++ b/neural_network/convolution_neural_network.py @@ -1,18 +1,19 @@ """ - - - - - - -- - - - - - - - - - - - - - - - - - - - - - - - Name - - CNN - Convolution Neural Network For Photo Recognizing - Goal - - Recognize Handing Writing Word Photo - Detail: Total 5 layers neural network - * Convolution layer - * Pooling layer - * Input layer layer of BP - * Hidden layer of BP - * Output layer of BP - Author: Stephen Lee - Github: 245885195@qq.com - Date: 2017.9.20 - - - - - - -- - - - - - - - - - - - - - - - - - - - - - - + - - - - - -- - - - - - - - - - - - - - - - - - - - - - - +Name - - CNN - Convolution Neural Network For Photo Recognizing +Goal - - Recognize Handing Writing Word Photo +Detail: Total 5 layers neural network + * Convolution layer + * Pooling layer + * Input layer layer of BP + * Hidden layer of BP + * Output layer of BP +Author: Stephen Lee +Github: 245885195@qq.com +Date: 2017.9.20 +- - - - - -- - - - - - - - - - - - - - - - - - - - - - - """ + import pickle import numpy as np diff --git a/neural_network/input_data.py b/neural_network/input_data.py index 2128449c03e9..f7ae86b48e65 100644 --- a/neural_network/input_data.py +++ b/neural_network/input_data.py @@ -17,7 +17,6 @@ This module and all its submodules are deprecated. """ - import gzip import os import typing diff --git a/other/davis_putnam_logemann_loveland.py b/other/davis_putnam_logemann_loveland.py index f5fb103ba528..436577eb5b5d 100644 --- a/other/davis_putnam_logemann_loveland.py +++ b/other/davis_putnam_logemann_loveland.py @@ -8,6 +8,7 @@ For more information about the algorithm: https://en.wikipedia.org/wiki/DPLL_algorithm """ + from __future__ import annotations import random diff --git a/other/fischer_yates_shuffle.py b/other/fischer_yates_shuffle.py index fa2f4dce9db0..37e11479a4c9 100644 --- a/other/fischer_yates_shuffle.py +++ b/other/fischer_yates_shuffle.py @@ -5,6 +5,7 @@ For more details visit wikipedia/Fischer-Yates-Shuffle. """ + import random from typing import Any diff --git a/other/gauss_easter.py b/other/gauss_easter.py index 4447d4ab86af..d1c525593f79 100644 --- a/other/gauss_easter.py +++ b/other/gauss_easter.py @@ -1,6 +1,7 @@ """ https://en.wikipedia.org/wiki/Computus#Gauss'_Easter_algorithm """ + import math from datetime import datetime, timedelta diff --git a/other/majority_vote_algorithm.py b/other/majority_vote_algorithm.py index ab8b386dd2e5..8d3b56707d06 100644 --- a/other/majority_vote_algorithm.py +++ b/other/majority_vote_algorithm.py @@ -4,6 +4,7 @@ We have to solve in O(n) time and O(1) Space. URL : https://en.wikipedia.org/wiki/Boyer%E2%80%93Moore_majority_vote_algorithm """ + from collections import Counter diff --git a/other/quine.py b/other/quine.py index 500a351d38dc..08e885bc1ce7 100644 --- a/other/quine.py +++ b/other/quine.py @@ -8,4 +8,5 @@ More info on: https://en.wikipedia.org/wiki/Quine_(computing) """ + print((lambda quine: quine % quine)("print((lambda quine: quine %% quine)(%r))")) diff --git a/other/word_search.py b/other/word_search.py index a4796e220c7c..9e8acadbd9a4 100644 --- a/other/word_search.py +++ b/other/word_search.py @@ -5,7 +5,6 @@ @ https://en.wikipedia.org/wiki/Word_search """ - from random import choice, randint, shuffle # The words to display on the word search - diff --git a/physics/archimedes_principle_of_buoyant_force.py b/physics/archimedes_principle_of_buoyant_force.py index 5f569837220f..71043e0e1111 100644 --- a/physics/archimedes_principle_of_buoyant_force.py +++ b/physics/archimedes_principle_of_buoyant_force.py @@ -8,7 +8,6 @@ https://en.wikipedia.org/wiki/Archimedes%27_principle """ - # Acceleration Constant on Earth (unit m/s^2) g = 9.80665 # Also available in scipy.constants.g diff --git a/physics/center_of_mass.py b/physics/center_of_mass.py index bd9ba2480584..59c3b807f401 100644 --- a/physics/center_of_mass.py +++ b/physics/center_of_mass.py @@ -24,6 +24,7 @@ Reference: https://en.wikipedia.org/wiki/Center_of_mass """ + from collections import namedtuple Particle = namedtuple("Particle", "x y z mass") # noqa: PYI024 diff --git a/physics/in_static_equilibrium.py b/physics/in_static_equilibrium.py index d56299f60858..e3c2f9d07aed 100644 --- a/physics/in_static_equilibrium.py +++ b/physics/in_static_equilibrium.py @@ -1,6 +1,7 @@ """ Checks if a system of forces is in static equilibrium. """ + from __future__ import annotations from numpy import array, cos, cross, float64, radians, sin diff --git a/physics/n_body_simulation.py b/physics/n_body_simulation.py index ec008784ba62..4d555716199a 100644 --- a/physics/n_body_simulation.py +++ b/physics/n_body_simulation.py @@ -11,7 +11,6 @@ (See also http://www.shodor.org/refdesk/Resources/Algorithms/EulersMethod/ ) """ - from __future__ import annotations import random diff --git a/physics/rms_speed_of_molecule.py b/physics/rms_speed_of_molecule.py index 478cee01c7fd..fb23eb8a21cf 100644 --- a/physics/rms_speed_of_molecule.py +++ b/physics/rms_speed_of_molecule.py @@ -20,7 +20,6 @@ alternative method. """ - UNIVERSAL_GAS_CONSTANT = 8.3144598 diff --git a/project_euler/problem_002/sol4.py b/project_euler/problem_002/sol4.py index 70b7d6a80a1d..3a2e4fce341c 100644 --- a/project_euler/problem_002/sol4.py +++ b/project_euler/problem_002/sol4.py @@ -14,6 +14,7 @@ References: - https://en.wikipedia.org/wiki/Fibonacci_number """ + import math from decimal import Decimal, getcontext diff --git a/project_euler/problem_003/sol1.py b/project_euler/problem_003/sol1.py index a7d01bb041ba..d1c0e61cf1a6 100644 --- a/project_euler/problem_003/sol1.py +++ b/project_euler/problem_003/sol1.py @@ -10,6 +10,7 @@ References: - https://en.wikipedia.org/wiki/Prime_number#Unique_factorization """ + import math diff --git a/project_euler/problem_006/sol3.py b/project_euler/problem_006/sol3.py index 529f233c9f8e..16445258c2b7 100644 --- a/project_euler/problem_006/sol3.py +++ b/project_euler/problem_006/sol3.py @@ -15,6 +15,7 @@ Find the difference between the sum of the squares of the first one hundred natural numbers and the square of the sum. """ + import math diff --git a/project_euler/problem_007/sol2.py b/project_euler/problem_007/sol2.py index 75d351889ea8..fd99453c1100 100644 --- a/project_euler/problem_007/sol2.py +++ b/project_euler/problem_007/sol2.py @@ -11,6 +11,7 @@ References: - https://en.wikipedia.org/wiki/Prime_number """ + import math diff --git a/project_euler/problem_007/sol3.py b/project_euler/problem_007/sol3.py index 774260db99a0..39db51a93427 100644 --- a/project_euler/problem_007/sol3.py +++ b/project_euler/problem_007/sol3.py @@ -11,6 +11,7 @@ References: - https://en.wikipedia.org/wiki/Prime_number """ + import itertools import math diff --git a/project_euler/problem_008/sol2.py b/project_euler/problem_008/sol2.py index 889c3a3143c2..f83cb1db30b6 100644 --- a/project_euler/problem_008/sol2.py +++ b/project_euler/problem_008/sol2.py @@ -30,6 +30,7 @@ Find the thirteen adjacent digits in the 1000-digit number that have the greatest product. What is the value of this product? """ + from functools import reduce N = ( diff --git a/project_euler/problem_008/sol3.py b/project_euler/problem_008/sol3.py index c6081aa05e2c..bf3bcb05b7e9 100644 --- a/project_euler/problem_008/sol3.py +++ b/project_euler/problem_008/sol3.py @@ -30,6 +30,7 @@ Find the thirteen adjacent digits in the 1000-digit number that have the greatest product. What is the value of this product? """ + import sys N = ( diff --git a/project_euler/problem_010/sol2.py b/project_euler/problem_010/sol2.py index 245cca1d1720..1a1fc0f33cb3 100644 --- a/project_euler/problem_010/sol2.py +++ b/project_euler/problem_010/sol2.py @@ -10,6 +10,7 @@ References: - https://en.wikipedia.org/wiki/Prime_number """ + import math from collections.abc import Iterator from itertools import takewhile diff --git a/project_euler/problem_013/sol1.py b/project_euler/problem_013/sol1.py index 7a414a9379e0..87d0e0a60e9b 100644 --- a/project_euler/problem_013/sol1.py +++ b/project_euler/problem_013/sol1.py @@ -5,6 +5,7 @@ Work out the first ten digits of the sum of the following one-hundred 50-digit numbers. """ + import os diff --git a/project_euler/problem_014/sol2.py b/project_euler/problem_014/sol2.py index 2448e652ce5b..797b0f9886fe 100644 --- a/project_euler/problem_014/sol2.py +++ b/project_euler/problem_014/sol2.py @@ -25,6 +25,7 @@ Which starting number, under one million, produces the longest chain? """ + from __future__ import annotations COLLATZ_SEQUENCE_LENGTHS = {1: 1} diff --git a/project_euler/problem_015/sol1.py b/project_euler/problem_015/sol1.py index fb2020d6179f..fd9014a406f6 100644 --- a/project_euler/problem_015/sol1.py +++ b/project_euler/problem_015/sol1.py @@ -5,6 +5,7 @@ the right and down, there are exactly 6 routes to the bottom right corner. How many such routes are there through a 20×20 grid? """ + from math import factorial diff --git a/project_euler/problem_018/solution.py b/project_euler/problem_018/solution.py index 70306148bb9e..cbe8743be15f 100644 --- a/project_euler/problem_018/solution.py +++ b/project_euler/problem_018/solution.py @@ -27,6 +27,7 @@ 63 66 04 68 89 53 67 30 73 16 69 87 40 31 04 62 98 27 23 09 70 98 73 93 38 53 60 04 23 """ + import os diff --git a/project_euler/problem_020/sol2.py b/project_euler/problem_020/sol2.py index 676e96e7836a..a1d56ade7708 100644 --- a/project_euler/problem_020/sol2.py +++ b/project_euler/problem_020/sol2.py @@ -8,6 +8,7 @@ Find the sum of the digits in the number 100! """ + from math import factorial diff --git a/project_euler/problem_020/sol3.py b/project_euler/problem_020/sol3.py index 4f28ac5fcfde..1886e05463f4 100644 --- a/project_euler/problem_020/sol3.py +++ b/project_euler/problem_020/sol3.py @@ -8,6 +8,7 @@ Find the sum of the digits in the number 100! """ + from math import factorial diff --git a/project_euler/problem_021/sol1.py b/project_euler/problem_021/sol1.py index 353510ae8f94..f6dbfa8864db 100644 --- a/project_euler/problem_021/sol1.py +++ b/project_euler/problem_021/sol1.py @@ -13,6 +13,7 @@ Evaluate the sum of all the amicable numbers under 10000. """ + from math import sqrt diff --git a/project_euler/problem_022/sol1.py b/project_euler/problem_022/sol1.py index 982906245e87..b6386186e7df 100644 --- a/project_euler/problem_022/sol1.py +++ b/project_euler/problem_022/sol1.py @@ -14,6 +14,7 @@ What is the total of all the name scores in the file? """ + import os diff --git a/project_euler/problem_022/sol2.py b/project_euler/problem_022/sol2.py index 5ae41c84686e..f7092ea1cd12 100644 --- a/project_euler/problem_022/sol2.py +++ b/project_euler/problem_022/sol2.py @@ -14,6 +14,7 @@ What is the total of all the name scores in the file? """ + import os diff --git a/project_euler/problem_024/sol1.py b/project_euler/problem_024/sol1.py index 1c6378b38260..3fb1bd4ec582 100644 --- a/project_euler/problem_024/sol1.py +++ b/project_euler/problem_024/sol1.py @@ -9,6 +9,7 @@ What is the millionth lexicographic permutation of the digits 0, 1, 2, 3, 4, 5, 6, 7, 8 and 9? """ + from itertools import permutations diff --git a/project_euler/problem_025/sol2.py b/project_euler/problem_025/sol2.py index 6f49e89fb465..9e950b355f7a 100644 --- a/project_euler/problem_025/sol2.py +++ b/project_euler/problem_025/sol2.py @@ -23,6 +23,7 @@ What is the index of the first term in the Fibonacci sequence to contain 1000 digits? """ + from collections.abc import Generator diff --git a/project_euler/problem_030/sol1.py b/project_euler/problem_030/sol1.py index 2c6b4e4e85d5..7d83e314523f 100644 --- a/project_euler/problem_030/sol1.py +++ b/project_euler/problem_030/sol1.py @@ -1,4 +1,4 @@ -""" Problem Statement (Digit Fifth Powers): https://projecteuler.net/problem=30 +"""Problem Statement (Digit Fifth Powers): https://projecteuler.net/problem=30 Surprisingly there are only three numbers that can be written as the sum of fourth powers of their digits: @@ -21,7 +21,6 @@ and hence a number between 1000 and 1000000 """ - DIGITS_FIFTH_POWER = {str(digit): digit**5 for digit in range(10)} diff --git a/project_euler/problem_032/sol32.py b/project_euler/problem_032/sol32.py index c4d11e86c877..a402b5584061 100644 --- a/project_euler/problem_032/sol32.py +++ b/project_euler/problem_032/sol32.py @@ -12,6 +12,7 @@ HINT: Some products can be obtained in more than one way so be sure to only include it once in your sum. """ + import itertools diff --git a/project_euler/problem_033/sol1.py b/project_euler/problem_033/sol1.py index 32be424b6a7b..187fd61bde6c 100644 --- a/project_euler/problem_033/sol1.py +++ b/project_euler/problem_033/sol1.py @@ -14,6 +14,7 @@ If the product of these four fractions is given in its lowest common terms, find the value of the denominator. """ + from __future__ import annotations from fractions import Fraction diff --git a/project_euler/problem_035/sol1.py b/project_euler/problem_035/sol1.py index 644c992ed8a5..cf9f6821d798 100644 --- a/project_euler/problem_035/sol1.py +++ b/project_euler/problem_035/sol1.py @@ -15,6 +15,7 @@ we will rule out the numbers which contain an even digit. After this we will generate each circular combination of the number and check if all are prime. """ + from __future__ import annotations sieve = [True] * 1000001 diff --git a/project_euler/problem_036/sol1.py b/project_euler/problem_036/sol1.py index 1d27356ec51e..3865b2a39ea9 100644 --- a/project_euler/problem_036/sol1.py +++ b/project_euler/problem_036/sol1.py @@ -14,6 +14,7 @@ (Please note that the palindromic number, in either base, may not include leading zeros.) """ + from __future__ import annotations diff --git a/project_euler/problem_038/sol1.py b/project_euler/problem_038/sol1.py index e4a6d09f8f7d..5bef273ea2a9 100644 --- a/project_euler/problem_038/sol1.py +++ b/project_euler/problem_038/sol1.py @@ -37,6 +37,7 @@ => 100 <= a < 334, candidate = a * 10^6 + 2a * 10^3 + 3a = 1002003 * a """ + from __future__ import annotations diff --git a/project_euler/problem_041/sol1.py b/project_euler/problem_041/sol1.py index 2ef0120684c3..0c37f5469a6c 100644 --- a/project_euler/problem_041/sol1.py +++ b/project_euler/problem_041/sol1.py @@ -10,6 +10,7 @@ So we will check only 7 digit pandigital numbers to obtain the largest possible pandigital prime. """ + from __future__ import annotations import math diff --git a/project_euler/problem_042/solution42.py b/project_euler/problem_042/solution42.py index f8a54e40eaab..f678bcdef710 100644 --- a/project_euler/problem_042/solution42.py +++ b/project_euler/problem_042/solution42.py @@ -13,6 +13,7 @@ containing nearly two-thousand common English words, how many are triangle words? """ + import os # Precomputes a list of the 100 first triangular numbers diff --git a/project_euler/problem_043/sol1.py b/project_euler/problem_043/sol1.py index c533f40da9c9..f3a2c71edc4e 100644 --- a/project_euler/problem_043/sol1.py +++ b/project_euler/problem_043/sol1.py @@ -18,7 +18,6 @@ Find the sum of all 0 to 9 pandigital numbers with this property. """ - from itertools import permutations diff --git a/project_euler/problem_050/sol1.py b/project_euler/problem_050/sol1.py index fc6e6f2b9a5d..0a5f861f0ef0 100644 --- a/project_euler/problem_050/sol1.py +++ b/project_euler/problem_050/sol1.py @@ -15,6 +15,7 @@ Which prime, below one-million, can be written as the sum of the most consecutive primes? """ + from __future__ import annotations diff --git a/project_euler/problem_051/sol1.py b/project_euler/problem_051/sol1.py index 921704bc4455..dc740c8b947d 100644 --- a/project_euler/problem_051/sol1.py +++ b/project_euler/problem_051/sol1.py @@ -15,6 +15,7 @@ Find the smallest prime which, by replacing part of the number (not necessarily adjacent digits) with the same digit, is part of an eight prime value family. """ + from __future__ import annotations from collections import Counter diff --git a/project_euler/problem_053/sol1.py b/project_euler/problem_053/sol1.py index 0692bbe0ebb8..a32b73c545d6 100644 --- a/project_euler/problem_053/sol1.py +++ b/project_euler/problem_053/sol1.py @@ -16,6 +16,7 @@ How many, not necessarily distinct, values of nCr, for 1 ≤ n ≤ 100, are greater than one-million? """ + from math import factorial diff --git a/project_euler/problem_054/sol1.py b/project_euler/problem_054/sol1.py index 86dfa5edd2f5..66aa3a0826f5 100644 --- a/project_euler/problem_054/sol1.py +++ b/project_euler/problem_054/sol1.py @@ -40,6 +40,7 @@ https://www.codewars.com/kata/ranking-poker-hands https://www.codewars.com/kata/sortable-poker-hands """ + from __future__ import annotations import os diff --git a/project_euler/problem_058/sol1.py b/project_euler/problem_058/sol1.py index 6a991c58b6b8..1d2f406eafdb 100644 --- a/project_euler/problem_058/sol1.py +++ b/project_euler/problem_058/sol1.py @@ -33,6 +33,7 @@ count of current primes. """ + import math diff --git a/project_euler/problem_059/sol1.py b/project_euler/problem_059/sol1.py index b795dd243b08..65bfd3f0b0fb 100644 --- a/project_euler/problem_059/sol1.py +++ b/project_euler/problem_059/sol1.py @@ -25,6 +25,7 @@ must contain common English words, decrypt the message and find the sum of the ASCII values in the original text. """ + from __future__ import annotations import string diff --git a/project_euler/problem_067/sol1.py b/project_euler/problem_067/sol1.py index 2b41fedc6784..171ff8c268f6 100644 --- a/project_euler/problem_067/sol1.py +++ b/project_euler/problem_067/sol1.py @@ -11,6 +11,7 @@ 'Save Link/Target As...'), a 15K text file containing a triangle with one-hundred rows. """ + import os diff --git a/project_euler/problem_067/sol2.py b/project_euler/problem_067/sol2.py index 2e88a57170a8..4fb093d49956 100644 --- a/project_euler/problem_067/sol2.py +++ b/project_euler/problem_067/sol2.py @@ -11,6 +11,7 @@ 'Save Link/Target As...'), a 15K text file containing a triangle with one-hundred rows. """ + import os diff --git a/project_euler/problem_070/sol1.py b/project_euler/problem_070/sol1.py index f1114a280a31..9874b7418559 100644 --- a/project_euler/problem_070/sol1.py +++ b/project_euler/problem_070/sol1.py @@ -28,6 +28,7 @@ Finding totients https://en.wikipedia.org/wiki/Euler's_totient_function#Euler's_product_formula """ + from __future__ import annotations import numpy as np diff --git a/project_euler/problem_074/sol1.py b/project_euler/problem_074/sol1.py index a257d4d94fa8..91440b3fd02b 100644 --- a/project_euler/problem_074/sol1.py +++ b/project_euler/problem_074/sol1.py @@ -27,7 +27,6 @@ non-repeating terms? """ - DIGIT_FACTORIALS = { "0": 1, "1": 1, diff --git a/project_euler/problem_074/sol2.py b/project_euler/problem_074/sol2.py index b54bc023e387..52a996bfa51d 100644 --- a/project_euler/problem_074/sol2.py +++ b/project_euler/problem_074/sol2.py @@ -33,6 +33,7 @@ is greater then the desired one. After generating each chain, the length is checked and the counter increases. """ + from math import factorial DIGIT_FACTORIAL: dict[str, int] = {str(digit): factorial(digit) for digit in range(10)} diff --git a/project_euler/problem_077/sol1.py b/project_euler/problem_077/sol1.py index 6098ea9e50a6..e8f4e979a625 100644 --- a/project_euler/problem_077/sol1.py +++ b/project_euler/problem_077/sol1.py @@ -12,6 +12,7 @@ What is the first value which can be written as the sum of primes in over five thousand different ways? """ + from __future__ import annotations from functools import lru_cache diff --git a/project_euler/problem_079/sol1.py b/project_euler/problem_079/sol1.py index d34adcd243b0..74392e9bd094 100644 --- a/project_euler/problem_079/sol1.py +++ b/project_euler/problem_079/sol1.py @@ -13,6 +13,7 @@ Given that the three characters are always asked for in order, analyse the file so as to determine the shortest possible secret passcode of unknown length. """ + import itertools from pathlib import Path diff --git a/project_euler/problem_080/sol1.py b/project_euler/problem_080/sol1.py index 916998bdd8ad..8cfcbd41b588 100644 --- a/project_euler/problem_080/sol1.py +++ b/project_euler/problem_080/sol1.py @@ -6,6 +6,7 @@ square roots. Time: 5 October 2020, 18:30 """ + import decimal diff --git a/project_euler/problem_081/sol1.py b/project_euler/problem_081/sol1.py index aef6106b54df..293027bddd0e 100644 --- a/project_euler/problem_081/sol1.py +++ b/project_euler/problem_081/sol1.py @@ -13,6 +13,7 @@ and down in matrix.txt (https://projecteuler.net/project/resources/p081_matrix.txt), a 31K text file containing an 80 by 80 matrix. """ + import os diff --git a/project_euler/problem_085/sol1.py b/project_euler/problem_085/sol1.py index d0f29796498c..d0b361ee750d 100644 --- a/project_euler/problem_085/sol1.py +++ b/project_euler/problem_085/sol1.py @@ -44,6 +44,7 @@ Reference: https://en.wikipedia.org/wiki/Triangular_number https://en.wikipedia.org/wiki/Quadratic_formula """ + from __future__ import annotations from math import ceil, floor, sqrt diff --git a/project_euler/problem_086/sol1.py b/project_euler/problem_086/sol1.py index 064af215c049..cbd2b648e0ac 100644 --- a/project_euler/problem_086/sol1.py +++ b/project_euler/problem_086/sol1.py @@ -66,7 +66,6 @@ """ - from math import sqrt diff --git a/project_euler/problem_091/sol1.py b/project_euler/problem_091/sol1.py index 6c9aa3fa6c70..7db98fca0049 100644 --- a/project_euler/problem_091/sol1.py +++ b/project_euler/problem_091/sol1.py @@ -11,7 +11,6 @@ Given that 0 ≤ x1, y1, x2, y2 ≤ 50, how many right triangles can be formed? """ - from itertools import combinations, product diff --git a/project_euler/problem_101/sol1.py b/project_euler/problem_101/sol1.py index d5c503af796a..2d209333cf31 100644 --- a/project_euler/problem_101/sol1.py +++ b/project_euler/problem_101/sol1.py @@ -41,6 +41,7 @@ Find the sum of FITs for the BOPs. """ + from __future__ import annotations from collections.abc import Callable diff --git a/project_euler/problem_102/sol1.py b/project_euler/problem_102/sol1.py index 4f6e6361e3e8..85fe5eac1e22 100644 --- a/project_euler/problem_102/sol1.py +++ b/project_euler/problem_102/sol1.py @@ -18,6 +18,7 @@ NOTE: The first two examples in the file represent the triangles in the example given above. """ + from __future__ import annotations from pathlib import Path diff --git a/project_euler/problem_107/sol1.py b/project_euler/problem_107/sol1.py index 4659eac24bd3..3fe75909e2ea 100644 --- a/project_euler/problem_107/sol1.py +++ b/project_euler/problem_107/sol1.py @@ -27,6 +27,7 @@ We use Prim's algorithm to find a Minimum Spanning Tree. Reference: https://en.wikipedia.org/wiki/Prim%27s_algorithm """ + from __future__ import annotations import os diff --git a/project_euler/problem_123/sol1.py b/project_euler/problem_123/sol1.py index f74cdd999401..7239e13a51e9 100644 --- a/project_euler/problem_123/sol1.py +++ b/project_euler/problem_123/sol1.py @@ -37,6 +37,7 @@ r = 2pn when n is odd r = 2 when n is even. """ + from __future__ import annotations from collections.abc import Generator diff --git a/project_euler/problem_144/sol1.py b/project_euler/problem_144/sol1.py index b5f103b64ff5..bc16bf985f41 100644 --- a/project_euler/problem_144/sol1.py +++ b/project_euler/problem_144/sol1.py @@ -29,7 +29,6 @@ How many times does the beam hit the internal surface of the white cell before exiting? """ - from math import isclose, sqrt diff --git a/project_euler/problem_145/sol1.py b/project_euler/problem_145/sol1.py index 71b851178fdb..ce4438289722 100644 --- a/project_euler/problem_145/sol1.py +++ b/project_euler/problem_145/sol1.py @@ -13,6 +13,7 @@ How many reversible numbers are there below one-billion (10^9)? """ + EVEN_DIGITS = [0, 2, 4, 6, 8] ODD_DIGITS = [1, 3, 5, 7, 9] diff --git a/project_euler/problem_173/sol1.py b/project_euler/problem_173/sol1.py index 5416e25462cc..9235d00e1752 100644 --- a/project_euler/problem_173/sol1.py +++ b/project_euler/problem_173/sol1.py @@ -11,7 +11,6 @@ Using up to one million tiles how many different square laminae can be formed? """ - from math import ceil, sqrt diff --git a/project_euler/problem_180/sol1.py b/project_euler/problem_180/sol1.py index 12e34dcaa76b..72baed42b99e 100644 --- a/project_euler/problem_180/sol1.py +++ b/project_euler/problem_180/sol1.py @@ -44,6 +44,7 @@ Reference: https://en.wikipedia.org/wiki/Fermat%27s_Last_Theorem """ + from __future__ import annotations from fractions import Fraction diff --git a/project_euler/problem_191/sol1.py b/project_euler/problem_191/sol1.py index 6bff9d54eeca..efb2a5d086ad 100644 --- a/project_euler/problem_191/sol1.py +++ b/project_euler/problem_191/sol1.py @@ -25,7 +25,6 @@ https://projecteuler.net/problem=191 """ - cache: dict[tuple[int, int, int], int] = {} diff --git a/project_euler/problem_203/sol1.py b/project_euler/problem_203/sol1.py index da9436246a7c..8ad089ec09aa 100644 --- a/project_euler/problem_203/sol1.py +++ b/project_euler/problem_203/sol1.py @@ -27,6 +27,7 @@ References: - https://en.wikipedia.org/wiki/Pascal%27s_triangle """ + from __future__ import annotations diff --git a/project_euler/problem_551/sol1.py b/project_euler/problem_551/sol1.py index 2cd75efbb68d..100e9d41dd31 100644 --- a/project_euler/problem_551/sol1.py +++ b/project_euler/problem_551/sol1.py @@ -12,7 +12,6 @@ Find a(10^15) """ - ks = range(2, 20 + 1) base = [10**k for k in range(ks[-1] + 1)] memo: dict[int, dict[int, list[list[int]]]] = {} diff --git a/scheduling/highest_response_ratio_next.py b/scheduling/highest_response_ratio_next.py index 057bd64cc729..112c2a85220f 100644 --- a/scheduling/highest_response_ratio_next.py +++ b/scheduling/highest_response_ratio_next.py @@ -4,6 +4,7 @@ to mitigate the problem of process starvation. https://en.wikipedia.org/wiki/Highest_response_ratio_next """ + from statistics import mean import numpy as np diff --git a/scheduling/job_sequence_with_deadline.py b/scheduling/job_sequence_with_deadline.py index fccb49cd88e8..ee1fdbd0e55c 100644 --- a/scheduling/job_sequence_with_deadline.py +++ b/scheduling/job_sequence_with_deadline.py @@ -13,6 +13,7 @@ Time Complexity - O(n log n) https://medium.com/@nihardudhat2000/job-sequencing-with-deadline-17ddbb5890b5 """ + from dataclasses import dataclass from operator import attrgetter diff --git a/scheduling/non_preemptive_shortest_job_first.py b/scheduling/non_preemptive_shortest_job_first.py index 69c974b0044d..cb7ffd3abd9c 100644 --- a/scheduling/non_preemptive_shortest_job_first.py +++ b/scheduling/non_preemptive_shortest_job_first.py @@ -5,7 +5,6 @@ https://en.wikipedia.org/wiki/Shortest_job_next """ - from __future__ import annotations from statistics import mean diff --git a/scheduling/round_robin.py b/scheduling/round_robin.py index e8d54dd9a553..5f6c7f341baa 100644 --- a/scheduling/round_robin.py +++ b/scheduling/round_robin.py @@ -3,6 +3,7 @@ In Round Robin each process is assigned a fixed time slot in a cyclic way. https://en.wikipedia.org/wiki/Round-robin_scheduling """ + from __future__ import annotations from statistics import mean diff --git a/scheduling/shortest_job_first.py b/scheduling/shortest_job_first.py index 871de8207308..cfd0417ea62d 100644 --- a/scheduling/shortest_job_first.py +++ b/scheduling/shortest_job_first.py @@ -3,6 +3,7 @@ Please note arrival time and burst Please use spaces to separate times entered. """ + from __future__ import annotations import pandas as pd diff --git a/searches/binary_search.py b/searches/binary_search.py index 586be39c9a0d..2e66b672d5b4 100644 --- a/searches/binary_search.py +++ b/searches/binary_search.py @@ -9,6 +9,7 @@ For manual testing run: python3 binary_search.py """ + from __future__ import annotations import bisect diff --git a/searches/binary_tree_traversal.py b/searches/binary_tree_traversal.py index 6fb841af4294..4897ef17299c 100644 --- a/searches/binary_tree_traversal.py +++ b/searches/binary_tree_traversal.py @@ -1,6 +1,7 @@ """ This is pure Python implementation of tree traversal algorithms """ + from __future__ import annotations import queue diff --git a/searches/fibonacci_search.py b/searches/fibonacci_search.py index 55fc05d39eeb..ec3dfa7f30f6 100644 --- a/searches/fibonacci_search.py +++ b/searches/fibonacci_search.py @@ -10,6 +10,7 @@ For manual testing run: python3 fibonacci_search.py """ + from functools import lru_cache diff --git a/searches/jump_search.py b/searches/jump_search.py index 3bc3c37809a1..e72d85e8a868 100644 --- a/searches/jump_search.py +++ b/searches/jump_search.py @@ -14,8 +14,7 @@ class Comparable(Protocol): - def __lt__(self, other: Any, /) -> bool: - ... + def __lt__(self, other: Any, /) -> bool: ... T = TypeVar("T", bound=Comparable) diff --git a/searches/quick_select.py b/searches/quick_select.py index 5ede8c4dd07f..c8282e1fa5fc 100644 --- a/searches/quick_select.py +++ b/searches/quick_select.py @@ -4,6 +4,7 @@ sorted, even if it is not already sorted https://en.wikipedia.org/wiki/Quickselect """ + import random diff --git a/searches/simple_binary_search.py b/searches/simple_binary_search.py index ff043d7369af..00e83ff9e4a3 100644 --- a/searches/simple_binary_search.py +++ b/searches/simple_binary_search.py @@ -7,6 +7,7 @@ For manual testing run: python3 simple_binary_search.py """ + from __future__ import annotations diff --git a/searches/tabu_search.py b/searches/tabu_search.py index d998ddc55976..fd482a81224c 100644 --- a/searches/tabu_search.py +++ b/searches/tabu_search.py @@ -24,6 +24,7 @@ -s size_of_tabu_search e.g. python tabu_search.py -f tabudata2.txt -i 4 -s 3 """ + import argparse import copy diff --git a/searches/ternary_search.py b/searches/ternary_search.py index cb36e72faac6..8dcd6b5bde2e 100644 --- a/searches/ternary_search.py +++ b/searches/ternary_search.py @@ -6,6 +6,7 @@ Time Complexity : O(log3 N) Space Complexity : O(1) """ + from __future__ import annotations # This is the precision for this function which can be altered. diff --git a/sorts/bitonic_sort.py b/sorts/bitonic_sort.py index b65f877a45e3..600f8139603a 100644 --- a/sorts/bitonic_sort.py +++ b/sorts/bitonic_sort.py @@ -3,6 +3,7 @@ Note that this program works only when size of input is a power of 2. """ + from __future__ import annotations diff --git a/sorts/bucket_sort.py b/sorts/bucket_sort.py index c016e9e26e73..1c1320a58a7d 100644 --- a/sorts/bucket_sort.py +++ b/sorts/bucket_sort.py @@ -27,6 +27,7 @@ Source: https://en.wikipedia.org/wiki/Bucket_sort """ + from __future__ import annotations diff --git a/sorts/dutch_national_flag_sort.py b/sorts/dutch_national_flag_sort.py index 758e3a887b84..b4f1665cea00 100644 --- a/sorts/dutch_national_flag_sort.py +++ b/sorts/dutch_national_flag_sort.py @@ -23,7 +23,6 @@ python dnf_sort.py """ - # Python program to sort a sequence containing only 0, 1 and 2 in a single pass. red = 0 # The first color of the flag. white = 1 # The second color of the flag. diff --git a/sorts/insertion_sort.py b/sorts/insertion_sort.py index f11ddac349a0..46b263d84a33 100644 --- a/sorts/insertion_sort.py +++ b/sorts/insertion_sort.py @@ -18,8 +18,7 @@ class Comparable(Protocol): - def __lt__(self, other: Any, /) -> bool: - ... + def __lt__(self, other: Any, /) -> bool: ... T = TypeVar("T", bound=Comparable) diff --git a/sorts/intro_sort.py b/sorts/intro_sort.py index 5a5741dc8375..1184b381b05d 100644 --- a/sorts/intro_sort.py +++ b/sorts/intro_sort.py @@ -3,6 +3,7 @@ if the size of the list is under 16, use insertion sort https://en.wikipedia.org/wiki/Introsort """ + import math diff --git a/sorts/msd_radix_sort.py b/sorts/msd_radix_sort.py index 03f84c75b9d8..6aba4263663a 100644 --- a/sorts/msd_radix_sort.py +++ b/sorts/msd_radix_sort.py @@ -4,6 +4,7 @@ them. https://en.wikipedia.org/wiki/Radix_sort """ + from __future__ import annotations diff --git a/sorts/odd_even_transposition_parallel.py b/sorts/odd_even_transposition_parallel.py index b8ab46df1e59..9d2bcdbd7576 100644 --- a/sorts/odd_even_transposition_parallel.py +++ b/sorts/odd_even_transposition_parallel.py @@ -10,6 +10,7 @@ They are synchronized with locks and message passing but other forms of synchronization could be used. """ + from multiprocessing import Lock, Pipe, Process # lock used to ensure that two processes do not access a pipe at the same time diff --git a/sorts/pigeon_sort.py b/sorts/pigeon_sort.py index 3e6d4c09c46f..fdfa692f4680 100644 --- a/sorts/pigeon_sort.py +++ b/sorts/pigeon_sort.py @@ -1,14 +1,15 @@ """ - This is an implementation of Pigeon Hole Sort. - For doctests run following command: +This is an implementation of Pigeon Hole Sort. +For doctests run following command: - python3 -m doctest -v pigeon_sort.py - or - python -m doctest -v pigeon_sort.py +python3 -m doctest -v pigeon_sort.py +or +python -m doctest -v pigeon_sort.py - For manual testing run: - python pigeon_sort.py +For manual testing run: +python pigeon_sort.py """ + from __future__ import annotations diff --git a/sorts/quick_sort.py b/sorts/quick_sort.py index 6b95fc144426..374d52e75c81 100644 --- a/sorts/quick_sort.py +++ b/sorts/quick_sort.py @@ -7,6 +7,7 @@ For manual testing run: python3 quick_sort.py """ + from __future__ import annotations from random import randrange diff --git a/sorts/radix_sort.py b/sorts/radix_sort.py index 832b6162f349..1dbf5fbd1365 100644 --- a/sorts/radix_sort.py +++ b/sorts/radix_sort.py @@ -3,6 +3,7 @@ Source: https://en.wikipedia.org/wiki/Radix_sort """ + from __future__ import annotations RADIX = 10 diff --git a/sorts/recursive_insertion_sort.py b/sorts/recursive_insertion_sort.py index 297dbe9457e6..93465350bee2 100644 --- a/sorts/recursive_insertion_sort.py +++ b/sorts/recursive_insertion_sort.py @@ -1,6 +1,7 @@ """ A recursive implementation of the insertion sort algorithm """ + from __future__ import annotations diff --git a/sorts/slowsort.py b/sorts/slowsort.py index a5f4e873ebb2..394e6eed50b1 100644 --- a/sorts/slowsort.py +++ b/sorts/slowsort.py @@ -8,6 +8,7 @@ Source: https://en.wikipedia.org/wiki/Slowsort """ + from __future__ import annotations diff --git a/sorts/tree_sort.py b/sorts/tree_sort.py index dc95856f44c8..056864957d4d 100644 --- a/sorts/tree_sort.py +++ b/sorts/tree_sort.py @@ -3,6 +3,7 @@ Build a Binary Search Tree and then iterate thru it to get a sorted list. """ + from __future__ import annotations from collections.abc import Iterator diff --git a/strings/boyer_moore_search.py b/strings/boyer_moore_search.py index 117305d32fd3..9615d2fd659b 100644 --- a/strings/boyer_moore_search.py +++ b/strings/boyer_moore_search.py @@ -17,6 +17,7 @@ n=length of main string m=length of pattern string """ + from __future__ import annotations diff --git a/strings/check_anagrams.py b/strings/check_anagrams.py index 9dcdffcfb921..d747368b2373 100644 --- a/strings/check_anagrams.py +++ b/strings/check_anagrams.py @@ -1,6 +1,7 @@ """ wiki: https://en.wikipedia.org/wiki/Anagram """ + from collections import defaultdict diff --git a/strings/top_k_frequent_words.py b/strings/top_k_frequent_words.py index f3d1e0cd5ca7..40fa7fc85cd1 100644 --- a/strings/top_k_frequent_words.py +++ b/strings/top_k_frequent_words.py @@ -13,7 +13,6 @@ def top_k_frequent_words(words, k_value): return [x[0] for x in Counter(words).most_common(k_value)] """ - from collections import Counter from functools import total_ordering diff --git a/web_programming/co2_emission.py b/web_programming/co2_emission.py index 97927e7ef541..88a426cb976d 100644 --- a/web_programming/co2_emission.py +++ b/web_programming/co2_emission.py @@ -1,6 +1,7 @@ """ Get CO2 emission data from the UK CarbonIntensity API """ + from datetime import date import requests diff --git a/web_programming/emails_from_url.py b/web_programming/emails_from_url.py index 074ef878c0d7..6b4bacfe7d5a 100644 --- a/web_programming/emails_from_url.py +++ b/web_programming/emails_from_url.py @@ -1,4 +1,5 @@ """Get the site emails from URL.""" + from __future__ import annotations __author__ = "Muhammad Umer Farooq" diff --git a/web_programming/fetch_github_info.py b/web_programming/fetch_github_info.py index aa4e1d7b1963..7a4985b68841 100644 --- a/web_programming/fetch_github_info.py +++ b/web_programming/fetch_github_info.py @@ -17,6 +17,7 @@ #!/usr/bin/env bash export USER_TOKEN="" """ + from __future__ import annotations import os diff --git a/web_programming/fetch_jobs.py b/web_programming/fetch_jobs.py index 5af90a0bb239..49abd3c88eec 100644 --- a/web_programming/fetch_jobs.py +++ b/web_programming/fetch_jobs.py @@ -1,6 +1,7 @@ """ Scraping jobs given job title and location from indeed website """ + from __future__ import annotations from collections.abc import Generator diff --git a/web_programming/get_amazon_product_data.py b/web_programming/get_amazon_product_data.py index a16175688667..c2f2ac5ab291 100644 --- a/web_programming/get_amazon_product_data.py +++ b/web_programming/get_amazon_product_data.py @@ -4,7 +4,6 @@ information will include title, URL, price, ratings, and the discount available. """ - from itertools import zip_longest import requests diff --git a/web_programming/recaptcha_verification.py b/web_programming/recaptcha_verification.py index 47c6c42f2ad0..b03afb28ec53 100644 --- a/web_programming/recaptcha_verification.py +++ b/web_programming/recaptcha_verification.py @@ -31,6 +31,7 @@ Below a Django function for the views.py file contains a login form for demonstrating recaptcha verification. """ + import requests try: diff --git a/web_programming/search_books_by_isbn.py b/web_programming/search_books_by_isbn.py index d5d4cfe92f20..07429e9a9678 100644 --- a/web_programming/search_books_by_isbn.py +++ b/web_programming/search_books_by_isbn.py @@ -3,6 +3,7 @@ ISBN: https://en.wikipedia.org/wiki/International_Standard_Book_Number """ + from json import JSONDecodeError # Workaround for requests.exceptions.JSONDecodeError import requests From 435309a61aa70303133306c9fe06a3df118c9a5c Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 18 Mar 2024 20:46:32 +0100 Subject: [PATCH 1344/1543] [pre-commit.ci] pre-commit autoupdate (#11325) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - [github.com/astral-sh/ruff-pre-commit: v0.3.2 → v0.3.3](https://github.com/astral-sh/ruff-pre-commit/compare/v0.3.2...v0.3.3) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index a17c4c323c30..c4b30f29a5b5 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -16,7 +16,7 @@ repos: - id: auto-walrus - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.3.2 + rev: v0.3.3 hooks: - id: ruff - id: ruff-format From 8faf823e83a1b7a036e2f2569c0c185924c05307 Mon Sep 17 00:00:00 2001 From: Margaret <62753112+meg-1@users.noreply.github.com> Date: Wed, 20 Mar 2024 15:33:40 +0200 Subject: [PATCH 1345/1543] adding a proper fractions algorithm (#11224) * adding a proper fractions algorithm * Implementing suggestions in maths/numerical_analysis/proper_fractions.py Co-authored-by: Christian Clauss * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Implementing suggestions to proper_fractions.py * Fixing ruff errors in proper_fractions.py * Apply suggestions from code review * ruff check --output-format=github . * Update maths/numerical_analysis/proper_fractions.py * Update proper_fractions.py --------- Co-authored-by: Christian Clauss Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .github/workflows/ruff.yml | 2 +- maths/numerical_analysis/proper_fractions.py | 40 ++++++++++++++++++++ 2 files changed, 41 insertions(+), 1 deletion(-) create mode 100644 maths/numerical_analysis/proper_fractions.py diff --git a/.github/workflows/ruff.yml b/.github/workflows/ruff.yml index 9ebabed3600a..d354eba672ae 100644 --- a/.github/workflows/ruff.yml +++ b/.github/workflows/ruff.yml @@ -13,4 +13,4 @@ jobs: steps: - uses: actions/checkout@v4 - run: pip install --user ruff - - run: ruff --output-format=github . + - run: ruff check --output-format=github . diff --git a/maths/numerical_analysis/proper_fractions.py b/maths/numerical_analysis/proper_fractions.py new file mode 100644 index 000000000000..774ce9a24876 --- /dev/null +++ b/maths/numerical_analysis/proper_fractions.py @@ -0,0 +1,40 @@ +from math import gcd + + +def proper_fractions(denominator: int) -> list[str]: + """ + this algorithm returns a list of proper fractions, in the + range between 0 and 1, which can be formed with the given denominator + https://en.wikipedia.org/wiki/Fraction#Proper_and_improper_fractions + + >>> proper_fractions(10) + ['1/10', '3/10', '7/10', '9/10'] + >>> proper_fractions(5) + ['1/5', '2/5', '3/5', '4/5'] + >>> proper_fractions(-15) + Traceback (most recent call last): + ... + ValueError: The Denominator Cannot be less than 0 + >>> proper_fractions(0) + [] + >>> proper_fractions(1.2) + Traceback (most recent call last): + ... + ValueError: The Denominator must be an integer + """ + + if denominator < 0: + raise ValueError("The Denominator Cannot be less than 0") + elif isinstance(denominator, float): + raise ValueError("The Denominator must be an integer") + return [ + f"{numerator}/{denominator}" + for numerator in range(1, denominator) + if gcd(numerator, denominator) == 1 + ] + + +if __name__ == "__main__": + from doctest import testmod + + testmod() From a936e94704b09841784358a4ac002401f3faceed Mon Sep 17 00:00:00 2001 From: Maxim Smolskiy Date: Wed, 20 Mar 2024 17:00:17 +0300 Subject: [PATCH 1346/1543] Enable ruff ARG001 rule (#11321) * Enable ruff ARG001 rule * Fix dynamic_programming/combination_sum_iv.py * Fix machine_learning/frequent_pattern_growth.py * Fix other/davis_putnam_logemann_loveland.py * Fix other/password.py * Fix * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fix physics/n_body_simulation.py * Fix project_euler/problem_145/sol1.py * Fix project_euler/problem_174/sol1.py * Fix scheduling/highest_response_ratio_next.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fix * Fix * Fix scheduling/job_sequencing_with_deadline.py * Fix scheduling/job_sequencing_with_deadline.py * Fix * Fix --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- dynamic_programming/combination_sum_iv.py | 11 +++++------ machine_learning/frequent_pattern_growth.py | 4 ++-- other/davis_putnam_logemann_loveland.py | 3 ++- other/password.py | 12 ------------ physics/n_body_simulation.py | 2 +- project_euler/problem_145/sol1.py | 2 +- project_euler/problem_174/sol1.py | 4 +++- pyproject.toml | 1 - scheduling/highest_response_ratio_next.py | 5 ++++- scheduling/job_sequencing_with_deadline.py | 7 +++---- web_programming/nasa_data.py | 2 +- 11 files changed, 22 insertions(+), 31 deletions(-) diff --git a/dynamic_programming/combination_sum_iv.py b/dynamic_programming/combination_sum_iv.py index b2aeb0824f64..4526729b70b7 100644 --- a/dynamic_programming/combination_sum_iv.py +++ b/dynamic_programming/combination_sum_iv.py @@ -22,12 +22,12 @@ """ -def combination_sum_iv(n: int, array: list[int], target: int) -> int: +def combination_sum_iv(array: list[int], target: int) -> int: """ Function checks the all possible combinations, and returns the count of possible combination in exponential Time Complexity. - >>> combination_sum_iv(3, [1,2,5], 5) + >>> combination_sum_iv([1,2,5], 5) 9 """ @@ -41,13 +41,13 @@ def count_of_possible_combinations(target: int) -> int: return count_of_possible_combinations(target) -def combination_sum_iv_dp_array(n: int, array: list[int], target: int) -> int: +def combination_sum_iv_dp_array(array: list[int], target: int) -> int: """ Function checks the all possible combinations, and returns the count of possible combination in O(N^2) Time Complexity as we are using Dynamic programming array here. - >>> combination_sum_iv_dp_array(3, [1,2,5], 5) + >>> combination_sum_iv_dp_array([1,2,5], 5) 9 """ @@ -96,7 +96,6 @@ def combination_sum_iv_bottom_up(n: int, array: list[int], target: int) -> int: import doctest doctest.testmod() - n = 3 target = 5 array = [1, 2, 5] - print(combination_sum_iv(n, array, target)) + print(combination_sum_iv(array, target)) diff --git a/machine_learning/frequent_pattern_growth.py b/machine_learning/frequent_pattern_growth.py index 6b9870f5e1d2..947f8692f298 100644 --- a/machine_learning/frequent_pattern_growth.py +++ b/machine_learning/frequent_pattern_growth.py @@ -240,7 +240,7 @@ def ascend_tree(leaf_node: TreeNode, prefix_path: list[str]) -> None: ascend_tree(leaf_node.parent, prefix_path) -def find_prefix_path(base_pat: frozenset, tree_node: TreeNode | None) -> dict: +def find_prefix_path(base_pat: frozenset, tree_node: TreeNode | None) -> dict: # noqa: ARG001 """ Find the conditional pattern base for a given base pattern. @@ -277,7 +277,7 @@ def find_prefix_path(base_pat: frozenset, tree_node: TreeNode | None) -> dict: def mine_tree( - in_tree: TreeNode, + in_tree: TreeNode, # noqa: ARG001 header_table: dict, min_sup: int, pre_fix: set, diff --git a/other/davis_putnam_logemann_loveland.py b/other/davis_putnam_logemann_loveland.py index 436577eb5b5d..5c6e2d9ffd5e 100644 --- a/other/davis_putnam_logemann_loveland.py +++ b/other/davis_putnam_logemann_loveland.py @@ -227,7 +227,8 @@ def find_pure_symbols( def find_unit_clauses( - clauses: list[Clause], model: dict[str, bool | None] + clauses: list[Clause], + model: dict[str, bool | None], # noqa: ARG001 ) -> tuple[list[str], dict[str, bool | None]]: """ Returns the unit symbols and their values to satisfy clause. diff --git a/other/password.py b/other/password.py index 1ce0d52316e6..dff1316c049c 100644 --- a/other/password.py +++ b/other/password.py @@ -51,18 +51,6 @@ def random(chars_incl: str, i: int) -> str: return "".join(secrets.choice(chars_incl) for _ in range(i)) -def random_number(chars_incl, i): - pass # Put your code here... - - -def random_letters(chars_incl, i): - pass # Put your code here... - - -def random_characters(chars_incl, i): - pass # Put your code here... - - def is_strong_password(password: str, min_length: int = 8) -> bool: """ This will check whether a given password is strong or not. The password must be at diff --git a/physics/n_body_simulation.py b/physics/n_body_simulation.py index 4d555716199a..9bfb6b3c6864 100644 --- a/physics/n_body_simulation.py +++ b/physics/n_body_simulation.py @@ -239,7 +239,7 @@ def plot( ax.add_patch(patch) # Function called at each step of the animation - def update(frame: int) -> list[plt.Circle]: + def update(frame: int) -> list[plt.Circle]: # noqa: ARG001 update_step(body_system, DELTA_TIME, patches) return patches diff --git a/project_euler/problem_145/sol1.py b/project_euler/problem_145/sol1.py index ce4438289722..583bb03a0a90 100644 --- a/project_euler/problem_145/sol1.py +++ b/project_euler/problem_145/sol1.py @@ -110,7 +110,7 @@ def reversible_numbers( if (length - 1) % 4 == 0: return 0 - return slow_reversible_numbers(length, 0, [0] * length, length) + return slow_reversible_numbers(remaining_length, remainder, digits, length) def solution(max_power: int = 9) -> int: diff --git a/project_euler/problem_174/sol1.py b/project_euler/problem_174/sol1.py index cbc0df5a9d65..33c1b158adbb 100644 --- a/project_euler/problem_174/sol1.py +++ b/project_euler/problem_174/sol1.py @@ -26,6 +26,8 @@ def solution(t_limit: int = 1000000, n_limit: int = 10) -> int: Return the sum of N(n) for 1 <= n <= n_limit. >>> solution(1000,5) + 222 + >>> solution(1000,10) 249 >>> solution(10000,10) 2383 @@ -45,7 +47,7 @@ def solution(t_limit: int = 1000000, n_limit: int = 10) -> int: for hole_width in range(hole_width_lower_bound, outer_width - 1, 2): count[outer_width * outer_width - hole_width * hole_width] += 1 - return sum(1 for n in count.values() if 1 <= n <= 10) + return sum(1 for n in count.values() if 1 <= n <= n_limit) if __name__ == "__main__": diff --git a/pyproject.toml b/pyproject.toml index 2e7da519da8b..a69ab7aa6437 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,5 @@ [tool.ruff] lint.ignore = [ # `ruff rule S101` for a description of that rule - "ARG001", # Unused function argument `amount` -- FIX ME? "B904", # Within an `except` clause, raise exceptions with `raise ... from err` -- FIX ME "B905", # `zip()` without an explicit `strict=` parameter -- FIX ME "DTZ001", # The use of `datetime.datetime()` without `tzinfo` argument is not allowed -- FIX ME diff --git a/scheduling/highest_response_ratio_next.py b/scheduling/highest_response_ratio_next.py index 112c2a85220f..b549835616bf 100644 --- a/scheduling/highest_response_ratio_next.py +++ b/scheduling/highest_response_ratio_next.py @@ -75,7 +75,10 @@ def calculate_turn_around_time( def calculate_waiting_time( - process_name: list, turn_around_time: list, burst_time: list, no_of_process: int + process_name: list, # noqa: ARG001 + turn_around_time: list, + burst_time: list, + no_of_process: int, ) -> list: """ Calculate the waiting time of each processes. diff --git a/scheduling/job_sequencing_with_deadline.py b/scheduling/job_sequencing_with_deadline.py index 7b23c0b3575f..13946948492f 100644 --- a/scheduling/job_sequencing_with_deadline.py +++ b/scheduling/job_sequencing_with_deadline.py @@ -1,9 +1,8 @@ -def job_sequencing_with_deadlines(num_jobs: int, jobs: list) -> list: +def job_sequencing_with_deadlines(jobs: list) -> list: """ Function to find the maximum profit by doing jobs in a given time frame Args: - num_jobs [int]: Number of jobs jobs [list]: A list of tuples of (job_id, deadline, profit) Returns: @@ -11,10 +10,10 @@ def job_sequencing_with_deadlines(num_jobs: int, jobs: list) -> list: in a given time frame Examples: - >>> job_sequencing_with_deadlines(4, + >>> job_sequencing_with_deadlines( ... [(1, 4, 20), (2, 1, 10), (3, 1, 40), (4, 1, 30)]) [2, 60] - >>> job_sequencing_with_deadlines(5, + >>> job_sequencing_with_deadlines( ... [(1, 2, 100), (2, 1, 19), (3, 2, 27), (4, 1, 25), (5, 1, 15)]) [2, 127] """ diff --git a/web_programming/nasa_data.py b/web_programming/nasa_data.py index c0a2c4fdd1a7..81125e0a4f05 100644 --- a/web_programming/nasa_data.py +++ b/web_programming/nasa_data.py @@ -3,7 +3,7 @@ import requests -def get_apod_data(api_key: str, download: bool = False, path: str = ".") -> dict: +def get_apod_data(api_key: str) -> dict: """ Get the APOD(Astronomical Picture of the day) data Get your API Key from: https://api.nasa.gov/ From 481c071e8423ed3b17ddff96b905da3d27d4f7b4 Mon Sep 17 00:00:00 2001 From: Mehdi Oudghiri <144174136+PAxitoo@users.noreply.github.com> Date: Wed, 20 Mar 2024 15:07:55 +0100 Subject: [PATCH 1347/1543] add vicsek to fractals (#11306) Co-authored-by: BastosLaG --- fractals/vicsek.py | 76 ++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 76 insertions(+) create mode 100644 fractals/vicsek.py diff --git a/fractals/vicsek.py b/fractals/vicsek.py new file mode 100644 index 000000000000..290fe95b79b4 --- /dev/null +++ b/fractals/vicsek.py @@ -0,0 +1,76 @@ +"""Authors Bastien Capiaux & Mehdi Oudghiri + +The Vicsek fractal algorithm is a recursive algorithm that creates a +pattern known as the Vicsek fractal or the Vicsek square. +It is based on the concept of self-similarity, where the pattern at each +level of recursion resembles the overall pattern. +The algorithm involves dividing a square into 9 equal smaller squares, +removing the center square, and then repeating this process on the remaining 8 squares. +This results in a pattern that exhibits self-similarity and has a +square-shaped outline with smaller squares within it. + +Source: https://en.wikipedia.org/wiki/Vicsek_fractal +""" + +import turtle + + +def draw_cross(x: float, y: float, length: float): + """ + Draw a cross at the specified position and with the specified length. + """ + turtle.up() + turtle.goto(x - length / 2, y - length / 6) + turtle.down() + turtle.seth(0) + turtle.begin_fill() + for _ in range(4): + turtle.fd(length / 3) + turtle.right(90) + turtle.fd(length / 3) + turtle.left(90) + turtle.fd(length / 3) + turtle.left(90) + turtle.end_fill() + + +def draw_fractal_recursive(x: float, y: float, length: float, depth: float): + """ + Recursively draw the Vicsek fractal at the specified position, with the + specified length and depth. + """ + if depth == 0: + draw_cross(x, y, length) + return + + draw_fractal_recursive(x, y, length / 3, depth - 1) + draw_fractal_recursive(x + length / 3, y, length / 3, depth - 1) + draw_fractal_recursive(x - length / 3, y, length / 3, depth - 1) + draw_fractal_recursive(x, y + length / 3, length / 3, depth - 1) + draw_fractal_recursive(x, y - length / 3, length / 3, depth - 1) + + +def set_color(rgb: str): + turtle.color(rgb) + + +def draw_vicsek_fractal(x: float, y: float, length: float, depth: float, color="blue"): + """ + Draw the Vicsek fractal at the specified position, with the specified + length and depth. + """ + turtle.speed(0) + turtle.hideturtle() + set_color(color) + draw_fractal_recursive(x, y, length, depth) + turtle.Screen().update() + + +def main(): + draw_vicsek_fractal(0, 0, 800, 4) + + turtle.done() + + +if __name__ == "__main__": + main() From 102e9a31b673e5444678fd55640a0038b6a16a9d Mon Sep 17 00:00:00 2001 From: Maxim Smolskiy Date: Mon, 25 Mar 2024 10:43:24 +0300 Subject: [PATCH 1348/1543] Enable ruff DTZ001 rule (#11326) * updating DIRECTORY.md * Enable ruff DTZ001 rule * Fix other/gauss_easter.py * Fix * Fix * Fix * Fix * Fix * Fix --------- Co-authored-by: MaximSmolskiy --- DIRECTORY.md | 2 ++ other/gauss_easter.py | 16 ++++++++-------- pyproject.toml | 1 - 3 files changed, 10 insertions(+), 9 deletions(-) diff --git a/DIRECTORY.md b/DIRECTORY.md index 2f828aa512a9..01667c9feee8 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -419,6 +419,7 @@ * [Koch Snowflake](fractals/koch_snowflake.py) * [Mandelbrot](fractals/mandelbrot.py) * [Sierpinski Triangle](fractals/sierpinski_triangle.py) + * [Vicsek](fractals/vicsek.py) ## Fuzzy Logic * [Fuzzy Operations](fuzzy_logic/fuzzy_operations.py) @@ -678,6 +679,7 @@ * [Newton Forward Interpolation](maths/numerical_analysis/newton_forward_interpolation.py) * [Newton Raphson](maths/numerical_analysis/newton_raphson.py) * [Numerical Integration](maths/numerical_analysis/numerical_integration.py) + * [Proper Fractions](maths/numerical_analysis/proper_fractions.py) * [Runge Kutta](maths/numerical_analysis/runge_kutta.py) * [Runge Kutta Fehlberg 45](maths/numerical_analysis/runge_kutta_fehlberg_45.py) * [Runge Kutta Gills](maths/numerical_analysis/runge_kutta_gills.py) diff --git a/other/gauss_easter.py b/other/gauss_easter.py index d1c525593f79..7ccea7f5bbf0 100644 --- a/other/gauss_easter.py +++ b/other/gauss_easter.py @@ -3,7 +3,7 @@ """ import math -from datetime import datetime, timedelta +from datetime import UTC, datetime, timedelta def gauss_easter(year: int) -> datetime: @@ -11,16 +11,16 @@ def gauss_easter(year: int) -> datetime: Calculation Gregorian easter date for given year >>> gauss_easter(2007) - datetime.datetime(2007, 4, 8, 0, 0) + datetime.datetime(2007, 4, 8, 0, 0, tzinfo=datetime.timezone.utc) >>> gauss_easter(2008) - datetime.datetime(2008, 3, 23, 0, 0) + datetime.datetime(2008, 3, 23, 0, 0, tzinfo=datetime.timezone.utc) >>> gauss_easter(2020) - datetime.datetime(2020, 4, 12, 0, 0) + datetime.datetime(2020, 4, 12, 0, 0, tzinfo=datetime.timezone.utc) >>> gauss_easter(2021) - datetime.datetime(2021, 4, 4, 0, 0) + datetime.datetime(2021, 4, 4, 0, 0, tzinfo=datetime.timezone.utc) """ metonic_cycle = year % 19 julian_leap_year = year % 4 @@ -45,11 +45,11 @@ def gauss_easter(year: int) -> datetime: ) % 7 if days_to_add == 29 and days_from_phm_to_sunday == 6: - return datetime(year, 4, 19) + return datetime(year, 4, 19, tzinfo=UTC) elif days_to_add == 28 and days_from_phm_to_sunday == 6: - return datetime(year, 4, 18) + return datetime(year, 4, 18, tzinfo=UTC) else: - return datetime(year, 3, 22) + timedelta( + return datetime(year, 3, 22, tzinfo=UTC) + timedelta( days=int(days_to_add + days_from_phm_to_sunday) ) diff --git a/pyproject.toml b/pyproject.toml index a69ab7aa6437..09093433a47a 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -2,7 +2,6 @@ lint.ignore = [ # `ruff rule S101` for a description of that rule "B904", # Within an `except` clause, raise exceptions with `raise ... from err` -- FIX ME "B905", # `zip()` without an explicit `strict=` parameter -- FIX ME - "DTZ001", # The use of `datetime.datetime()` without `tzinfo` argument is not allowed -- FIX ME "DTZ005", # The use of `datetime.datetime.now()` without `tzinfo` argument is not allowed -- FIX ME "E741", # Ambiguous variable name 'l' -- FIX ME "EM101", # Exception must not use a string literal, assign to variable first From ead54314f26615769ce8b055b25e25f9dbbb1f83 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 25 Mar 2024 20:21:21 +0100 Subject: [PATCH 1349/1543] [pre-commit.ci] pre-commit autoupdate (#11328) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - [github.com/astral-sh/ruff-pre-commit: v0.3.3 → v0.3.4](https://github.com/astral-sh/ruff-pre-commit/compare/v0.3.3...v0.3.4) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index c4b30f29a5b5..8b101207d5ff 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -16,7 +16,7 @@ repos: - id: auto-walrus - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.3.3 + rev: v0.3.4 hooks: - id: ruff - id: ruff-format From b5cb1fba0debb5df7e5aea6bb069c6e3f130dba5 Mon Sep 17 00:00:00 2001 From: Maxim Smolskiy Date: Mon, 25 Mar 2024 23:54:11 +0300 Subject: [PATCH 1350/1543] Enable ruff DTZ005 rule (#11327) * Enable ruff DTZ005 rule * Fix other/gauss_easter.py * Fix * Fix web_programming/instagram_pic.py * Fix web_programming/instagram_video.py * Apply suggestions from code review * Update instagram_pic.py * datetime.now(tz=UTC).astimezone() * .astimezone() * Fix --------- Co-authored-by: Christian Clauss --- other/gauss_easter.py | 4 ++-- pyproject.toml | 1 - web_programming/instagram_pic.py | 4 ++-- web_programming/instagram_video.py | 4 ++-- 4 files changed, 6 insertions(+), 7 deletions(-) diff --git a/other/gauss_easter.py b/other/gauss_easter.py index 7ccea7f5bbf0..8c8c37c92796 100644 --- a/other/gauss_easter.py +++ b/other/gauss_easter.py @@ -55,6 +55,6 @@ def gauss_easter(year: int) -> datetime: if __name__ == "__main__": - for year in (1994, 2000, 2010, 2021, 2023): - tense = "will be" if year > datetime.now().year else "was" + for year in (1994, 2000, 2010, 2021, 2023, 2032, 2100): + tense = "will be" if year > datetime.now(tz=UTC).year else "was" print(f"Easter in {year} {tense} {gauss_easter(year)}") diff --git a/pyproject.toml b/pyproject.toml index 09093433a47a..5187491e5ee7 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -2,7 +2,6 @@ lint.ignore = [ # `ruff rule S101` for a description of that rule "B904", # Within an `except` clause, raise exceptions with `raise ... from err` -- FIX ME "B905", # `zip()` without an explicit `strict=` parameter -- FIX ME - "DTZ005", # The use of `datetime.datetime.now()` without `tzinfo` argument is not allowed -- FIX ME "E741", # Ambiguous variable name 'l' -- FIX ME "EM101", # Exception must not use a string literal, assign to variable first "EXE001", # Shebang is present but file is not executable" -- FIX ME diff --git a/web_programming/instagram_pic.py b/web_programming/instagram_pic.py index 2630c8659232..2d987c1766dc 100644 --- a/web_programming/instagram_pic.py +++ b/web_programming/instagram_pic.py @@ -1,4 +1,4 @@ -from datetime import datetime +from datetime import UTC, datetime import requests from bs4 import BeautifulSoup @@ -36,7 +36,7 @@ def download_image(url: str) -> str: if not image_data: return f"Failed to download the image from {image_url}." - file_name = f"{datetime.now():%Y-%m-%d_%H:%M:%S}.jpg" + file_name = f"{datetime.now(tz=UTC).astimezone():%Y-%m-%d_%H:%M:%S}.jpg" with open(file_name, "wb") as out_file: out_file.write(image_data) return f"Image downloaded and saved in the file {file_name}" diff --git a/web_programming/instagram_video.py b/web_programming/instagram_video.py index 243cece1a50e..1f1b0e297034 100644 --- a/web_programming/instagram_video.py +++ b/web_programming/instagram_video.py @@ -1,4 +1,4 @@ -from datetime import datetime +from datetime import UTC, datetime import requests @@ -11,7 +11,7 @@ def download_video(url: str) -> bytes: if __name__ == "__main__": url = input("Enter Video/IGTV url: ").strip() - file_name = f"{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4" + file_name = f"{datetime.now(tz=UTC).astimezone():%Y-%m-%d_%H:%M:%S}.mp4" with open(file_name, "wb") as fp: fp.write(download_video(url)) print(f"Done. Video saved to disk as {file_name}.") From 19fd435042a3191f6a5787a6eaf58e9c47920845 Mon Sep 17 00:00:00 2001 From: MrBubb1es <63935943+MrBubb1es@users.noreply.github.com> Date: Thu, 28 Mar 2024 12:19:51 -0500 Subject: [PATCH 1351/1543] Improved doctests for some functions (#11334) --- .../binary_tree/binary_tree_traversals.py | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/data_structures/binary_tree/binary_tree_traversals.py b/data_structures/binary_tree/binary_tree_traversals.py index 2b33cdca4fed..49c208335b2c 100644 --- a/data_structures/binary_tree/binary_tree_traversals.py +++ b/data_structures/binary_tree/binary_tree_traversals.py @@ -97,6 +97,8 @@ def level_order(root: Node | None) -> Generator[int, None, None]: """ Returns a list of nodes value from a whole binary tree in Level Order Traverse. Level Order traverse: Visit nodes of the tree level-by-level. + >>> list(level_order(make_tree())) + [1, 2, 3, 4, 5] """ if root is None: @@ -120,6 +122,10 @@ def get_nodes_from_left_to_right( """ Returns a list of nodes value from a particular level: Left to right direction of the binary tree. + >>> list(get_nodes_from_left_to_right(make_tree(), 1)) + [1] + >>> list(get_nodes_from_left_to_right(make_tree(), 2)) + [2, 3] """ def populate_output(root: Node | None, level: int) -> Generator[int, None, None]: @@ -140,10 +146,14 @@ def get_nodes_from_right_to_left( """ Returns a list of nodes value from a particular level: Right to left direction of the binary tree. + >>> list(get_nodes_from_right_to_left(make_tree(), 1)) + [1] + >>> list(get_nodes_from_right_to_left(make_tree(), 2)) + [3, 2] """ def populate_output(root: Node | None, level: int) -> Generator[int, None, None]: - if root is None: + if not root: return if level == 1: yield root.data @@ -158,6 +168,8 @@ def zigzag(root: Node | None) -> Generator[int, None, None]: """ ZigZag traverse: Returns a list of nodes value from left to right and right to left, alternatively. + >>> list(zigzag(make_tree())) + [1, 3, 2, 4, 5] """ if root is None: return From 516a3028d1f6b6e7e11ae4501fdaee50a0965464 Mon Sep 17 00:00:00 2001 From: Maxim Smolskiy Date: Thu, 28 Mar 2024 20:25:41 +0300 Subject: [PATCH 1352/1543] Enable ruff PLR5501 rule (#11332) * Enable ruff PLR5501 rule * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- backtracking/crossword_puzzle_solver.py | 5 +- cellular_automata/game_of_life.py | 5 +- ciphers/decrypt_caesar_with_chi_squared.py | 21 +++--- data_structures/binary_tree/avl_tree.py | 10 +-- .../binary_tree/binary_search_tree.py | 9 ++- .../binary_search_tree_recursive.py | 22 +++---- data_structures/binary_tree/red_black_tree.py | 66 +++++++++---------- data_structures/binary_tree/treap.py | 29 ++++---- data_structures/heap/max_heap.py | 7 +- .../stacks/infix_to_prefix_conversion.py | 13 ++-- data_structures/trie/radix_tree.py | 45 +++++++------ divide_and_conquer/convex_hull.py | 15 ++--- graphs/graph_list.py | 46 ++++++------- graphs/minimum_spanning_tree_prims.py | 7 +- graphs/multi_heuristic_astar.py | 33 +++++----- machine_learning/forecasting/run.py | 7 +- maths/largest_of_very_large_numbers.py | 9 ++- maths/pollard_rho.py | 13 ++-- matrix/cramers_rule_2x2.py | 15 ++--- project_euler/problem_019/sol1.py | 7 +- pyproject.toml | 1 - searches/hill_climbing.py | 7 +- searches/interpolation_search.py | 35 +++++----- strings/min_cost_string_conversion.py | 23 ++++--- 24 files changed, 211 insertions(+), 239 deletions(-) diff --git a/backtracking/crossword_puzzle_solver.py b/backtracking/crossword_puzzle_solver.py index b9c01c4efea9..e702c7e52153 100644 --- a/backtracking/crossword_puzzle_solver.py +++ b/backtracking/crossword_puzzle_solver.py @@ -28,9 +28,8 @@ def is_valid( if vertical: if row + i >= len(puzzle) or puzzle[row + i][col] != "": return False - else: - if col + i >= len(puzzle[0]) or puzzle[row][col + i] != "": - return False + elif col + i >= len(puzzle[0]) or puzzle[row][col + i] != "": + return False return True diff --git a/cellular_automata/game_of_life.py b/cellular_automata/game_of_life.py index 67e647d6475b..76276b272d65 100644 --- a/cellular_automata/game_of_life.py +++ b/cellular_automata/game_of_life.py @@ -101,9 +101,8 @@ def __judge_point(pt: bool, neighbours: list[list[bool]]) -> bool: state = True elif alive > 3: state = False - else: - if alive == 3: - state = True + elif alive == 3: + state = True return state diff --git a/ciphers/decrypt_caesar_with_chi_squared.py b/ciphers/decrypt_caesar_with_chi_squared.py index 6c36860207cd..10832203e531 100644 --- a/ciphers/decrypt_caesar_with_chi_squared.py +++ b/ciphers/decrypt_caesar_with_chi_squared.py @@ -206,20 +206,19 @@ def decrypt_caesar_with_chi_squared( # Add the margin of error to the total chi squared statistic chi_squared_statistic += chi_letter_value - else: - if letter.lower() in frequencies: - # Get the amount of times the letter occurs in the message - occurrences = decrypted_with_shift.count(letter) + elif letter.lower() in frequencies: + # Get the amount of times the letter occurs in the message + occurrences = decrypted_with_shift.count(letter) - # Get the excepcted amount of times the letter should appear based - # on letter frequencies - expected = frequencies[letter] * occurrences + # Get the excepcted amount of times the letter should appear based + # on letter frequencies + expected = frequencies[letter] * occurrences - # Complete the chi squared statistic formula - chi_letter_value = ((occurrences - expected) ** 2) / expected + # Complete the chi squared statistic formula + chi_letter_value = ((occurrences - expected) ** 2) / expected - # Add the margin of error to the total chi squared statistic - chi_squared_statistic += chi_letter_value + # Add the margin of error to the total chi squared statistic + chi_squared_statistic += chi_letter_value # Add the data to the chi_squared_statistic_values dictionary chi_squared_statistic_values[shift] = ( diff --git a/data_structures/binary_tree/avl_tree.py b/data_structures/binary_tree/avl_tree.py index 041ed7e36d16..9fca7237404c 100644 --- a/data_structures/binary_tree/avl_tree.py +++ b/data_structures/binary_tree/avl_tree.py @@ -215,11 +215,11 @@ def del_node(root: MyNode, data: Any) -> MyNode | None: return root else: root.set_left(del_node(left_child, data)) - else: # root.get_data() < data - if right_child is None: - return root - else: - root.set_right(del_node(right_child, data)) + # root.get_data() < data + elif right_child is None: + return root + else: + root.set_right(del_node(right_child, data)) if get_height(right_child) - get_height(left_child) == 2: assert right_child is not None diff --git a/data_structures/binary_tree/binary_search_tree.py b/data_structures/binary_tree/binary_search_tree.py index 08a60a12065d..090e3e25fe6d 100644 --- a/data_structures/binary_tree/binary_search_tree.py +++ b/data_structures/binary_tree/binary_search_tree.py @@ -185,12 +185,11 @@ def __insert(self, value) -> None: break else: parent_node = parent_node.left + elif parent_node.right is None: + parent_node.right = new_node + break else: - if parent_node.right is None: - parent_node.right = new_node - break - else: - parent_node = parent_node.right + parent_node = parent_node.right new_node.parent = parent_node def insert(self, *values) -> Self: diff --git a/data_structures/binary_tree/binary_search_tree_recursive.py b/data_structures/binary_tree/binary_search_tree_recursive.py index 6af1b053f42c..d94ac5253360 100644 --- a/data_structures/binary_tree/binary_search_tree_recursive.py +++ b/data_structures/binary_tree/binary_search_tree_recursive.py @@ -74,14 +74,13 @@ def put(self, label: int) -> None: def _put(self, node: Node | None, label: int, parent: Node | None = None) -> Node: if node is None: node = Node(label, parent) + elif label < node.label: + node.left = self._put(node.left, label, node) + elif label > node.label: + node.right = self._put(node.right, label, node) else: - if label < node.label: - node.left = self._put(node.left, label, node) - elif label > node.label: - node.right = self._put(node.right, label, node) - else: - msg = f"Node with label {label} already exists" - raise ValueError(msg) + msg = f"Node with label {label} already exists" + raise ValueError(msg) return node @@ -106,11 +105,10 @@ def _search(self, node: Node | None, label: int) -> Node: if node is None: msg = f"Node with label {label} does not exist" raise ValueError(msg) - else: - if label < node.label: - node = self._search(node.left, label) - elif label > node.label: - node = self._search(node.right, label) + elif label < node.label: + node = self._search(node.left, label) + elif label > node.label: + node = self._search(node.right, label) return node diff --git a/data_structures/binary_tree/red_black_tree.py b/data_structures/binary_tree/red_black_tree.py index 3b5845cd957b..bdd808c828e0 100644 --- a/data_structures/binary_tree/red_black_tree.py +++ b/data_structures/binary_tree/red_black_tree.py @@ -107,12 +107,11 @@ def insert(self, label: int) -> RedBlackTree: else: self.left = RedBlackTree(label, 1, self) self.left._insert_repair() + elif self.right: + self.right.insert(label) else: - if self.right: - self.right.insert(label) - else: - self.right = RedBlackTree(label, 1, self) - self.right._insert_repair() + self.right = RedBlackTree(label, 1, self) + self.right._insert_repair() return self.parent or self def _insert_repair(self) -> None: @@ -178,36 +177,34 @@ def remove(self, label: int) -> RedBlackTree: # noqa: PLR0912 self.parent.left = None else: self.parent.right = None - else: - # The node is black - if child is None: - # This node and its child are black - if self.parent is None: - # The tree is now empty - return RedBlackTree(None) - else: - self._remove_repair() - if self.is_left(): - self.parent.left = None - else: - self.parent.right = None - self.parent = None + # The node is black + elif child is None: + # This node and its child are black + if self.parent is None: + # The tree is now empty + return RedBlackTree(None) else: - # This node is black and its child is red - # Move the child node here and make it black - self.label = child.label - self.left = child.left - self.right = child.right - if self.left: - self.left.parent = self - if self.right: - self.right.parent = self + self._remove_repair() + if self.is_left(): + self.parent.left = None + else: + self.parent.right = None + self.parent = None + else: + # This node is black and its child is red + # Move the child node here and make it black + self.label = child.label + self.left = child.left + self.right = child.right + if self.left: + self.left.parent = self + if self.right: + self.right.parent = self elif self.label is not None and self.label > label: if self.left: self.left.remove(label) - else: - if self.right: - self.right.remove(label) + elif self.right: + self.right.remove(label) return self.parent or self def _remove_repair(self) -> None: @@ -369,11 +366,10 @@ def search(self, label: int) -> RedBlackTree | None: return None else: return self.right.search(label) + elif self.left is None: + return None else: - if self.left is None: - return None - else: - return self.left.search(label) + return self.left.search(label) def floor(self, label: int) -> int | None: """Returns the largest element in this tree which is at most label. diff --git a/data_structures/binary_tree/treap.py b/data_structures/binary_tree/treap.py index a53ac566ed54..e7ddf931b83a 100644 --- a/data_structures/binary_tree/treap.py +++ b/data_structures/binary_tree/treap.py @@ -43,22 +43,21 @@ def split(root: Node | None, value: int) -> tuple[Node | None, Node | None]: return None, None elif root.value is None: return None, None + elif value < root.value: + """ + Right tree's root will be current node. + Now we split(with the same value) current node's left son + Left tree: left part of that split + Right tree's left son: right part of that split + """ + left, root.left = split(root.left, value) + return left, root else: - if value < root.value: - """ - Right tree's root will be current node. - Now we split(with the same value) current node's left son - Left tree: left part of that split - Right tree's left son: right part of that split - """ - left, root.left = split(root.left, value) - return left, root - else: - """ - Just symmetric to previous case - """ - root.right, right = split(root.right, value) - return root, right + """ + Just symmetric to previous case + """ + root.right, right = split(root.right, value) + return root, right def merge(left: Node | None, right: Node | None) -> Node | None: diff --git a/data_structures/heap/max_heap.py b/data_structures/heap/max_heap.py index fbc8eed09226..5a9f9cf88433 100644 --- a/data_structures/heap/max_heap.py +++ b/data_structures/heap/max_heap.py @@ -40,11 +40,10 @@ def __swap_down(self, i: int) -> None: while self.__size >= 2 * i: if 2 * i + 1 > self.__size: bigger_child = 2 * i + elif self.__heap[2 * i] > self.__heap[2 * i + 1]: + bigger_child = 2 * i else: - if self.__heap[2 * i] > self.__heap[2 * i + 1]: - bigger_child = 2 * i - else: - bigger_child = 2 * i + 1 + bigger_child = 2 * i + 1 temporary = self.__heap[i] if self.__heap[i] < self.__heap[bigger_child]: self.__heap[i] = self.__heap[bigger_child] diff --git a/data_structures/stacks/infix_to_prefix_conversion.py b/data_structures/stacks/infix_to_prefix_conversion.py index beff421c0cfa..878473b93c19 100644 --- a/data_structures/stacks/infix_to_prefix_conversion.py +++ b/data_structures/stacks/infix_to_prefix_conversion.py @@ -95,13 +95,12 @@ def infix_2_postfix(infix: str) -> str: while stack[-1] != "(": post_fix.append(stack.pop()) # Pop stack & add the content to Postfix stack.pop() - else: - if len(stack) == 0: - stack.append(x) # If stack is empty, push x to stack - else: # while priority of x is not > priority of element in the stack - while stack and stack[-1] != "(" and priority[x] <= priority[stack[-1]]: - post_fix.append(stack.pop()) # pop stack & add to Postfix - stack.append(x) # push x to stack + elif len(stack) == 0: + stack.append(x) # If stack is empty, push x to stack + else: # while priority of x is not > priority of element in the stack + while stack and stack[-1] != "(" and priority[x] <= priority[stack[-1]]: + post_fix.append(stack.pop()) # pop stack & add to Postfix + stack.append(x) # push x to stack print( x.center(8), diff --git a/data_structures/trie/radix_tree.py b/data_structures/trie/radix_tree.py index fadc50cb49a7..caf566a6ce30 100644 --- a/data_structures/trie/radix_tree.py +++ b/data_structures/trie/radix_tree.py @@ -153,31 +153,30 @@ def delete(self, word: str) -> bool: # We have word remaining so we check the next node elif remaining_word != "": return incoming_node.delete(remaining_word) + # If it is not a leaf, we don't have to delete + elif not incoming_node.is_leaf: + return False else: - # If it is not a leaf, we don't have to delete - if not incoming_node.is_leaf: - return False + # We delete the nodes if no edges go from it + if len(incoming_node.nodes) == 0: + del self.nodes[word[0]] + # We merge the current node with its only child + if len(self.nodes) == 1 and not self.is_leaf: + merging_node = next(iter(self.nodes.values())) + self.is_leaf = merging_node.is_leaf + self.prefix += merging_node.prefix + self.nodes = merging_node.nodes + # If there is more than 1 edge, we just mark it as non-leaf + elif len(incoming_node.nodes) > 1: + incoming_node.is_leaf = False + # If there is 1 edge, we merge it with its child else: - # We delete the nodes if no edges go from it - if len(incoming_node.nodes) == 0: - del self.nodes[word[0]] - # We merge the current node with its only child - if len(self.nodes) == 1 and not self.is_leaf: - merging_node = next(iter(self.nodes.values())) - self.is_leaf = merging_node.is_leaf - self.prefix += merging_node.prefix - self.nodes = merging_node.nodes - # If there is more than 1 edge, we just mark it as non-leaf - elif len(incoming_node.nodes) > 1: - incoming_node.is_leaf = False - # If there is 1 edge, we merge it with its child - else: - merging_node = next(iter(incoming_node.nodes.values())) - incoming_node.is_leaf = merging_node.is_leaf - incoming_node.prefix += merging_node.prefix - incoming_node.nodes = merging_node.nodes - - return True + merging_node = next(iter(incoming_node.nodes.values())) + incoming_node.is_leaf = merging_node.is_leaf + incoming_node.prefix += merging_node.prefix + incoming_node.nodes = merging_node.nodes + + return True def print_tree(self, height: int = 0) -> None: """Print the tree diff --git a/divide_and_conquer/convex_hull.py b/divide_and_conquer/convex_hull.py index a5d8b713bdbc..93f6daf1f88c 100644 --- a/divide_and_conquer/convex_hull.py +++ b/divide_and_conquer/convex_hull.py @@ -274,14 +274,13 @@ def convex_hull_bf(points: list[Point]) -> list[Point]: points_left_of_ij = True elif det_k < 0: points_right_of_ij = True - else: - # point[i], point[j], point[k] all lie on a straight line - # if point[k] is to the left of point[i] or it's to the - # right of point[j], then point[i], point[j] cannot be - # part of the convex hull of A - if points[k] < points[i] or points[k] > points[j]: - ij_part_of_convex_hull = False - break + # point[i], point[j], point[k] all lie on a straight line + # if point[k] is to the left of point[i] or it's to the + # right of point[j], then point[i], point[j] cannot be + # part of the convex hull of A + elif points[k] < points[i] or points[k] > points[j]: + ij_part_of_convex_hull = False + break if points_left_of_ij and points_right_of_ij: ij_part_of_convex_hull = False diff --git a/graphs/graph_list.py b/graphs/graph_list.py index e871f3b8a9d6..6563cbb76132 100644 --- a/graphs/graph_list.py +++ b/graphs/graph_list.py @@ -120,29 +120,29 @@ def add_edge( else: self.adj_list[source_vertex] = [destination_vertex] self.adj_list[destination_vertex] = [source_vertex] - else: # For directed graphs - # if both source vertex and destination vertex are present in adjacency - # list, add destination vertex to source vertex list of adjacent vertices. - if source_vertex in self.adj_list and destination_vertex in self.adj_list: - self.adj_list[source_vertex].append(destination_vertex) - # if only source vertex is present in adjacency list, add destination - # vertex to source vertex list of adjacent vertices and create a new vertex - # with destination vertex as key, which has no adjacent vertex - elif source_vertex in self.adj_list: - self.adj_list[source_vertex].append(destination_vertex) - self.adj_list[destination_vertex] = [] - # if only destination vertex is present in adjacency list, create a new - # vertex with source vertex as key and assign a list containing destination - # vertex as first adjacent vertex - elif destination_vertex in self.adj_list: - self.adj_list[source_vertex] = [destination_vertex] - # if both source vertex and destination vertex are not present in adjacency - # list, create a new vertex with source vertex as key and a list containing - # destination vertex as it's first adjacent vertex. Then create a new vertex - # with destination vertex as key, which has no adjacent vertex - else: - self.adj_list[source_vertex] = [destination_vertex] - self.adj_list[destination_vertex] = [] + # For directed graphs + # if both source vertex and destination vertex are present in adjacency + # list, add destination vertex to source vertex list of adjacent vertices. + elif source_vertex in self.adj_list and destination_vertex in self.adj_list: + self.adj_list[source_vertex].append(destination_vertex) + # if only source vertex is present in adjacency list, add destination + # vertex to source vertex list of adjacent vertices and create a new vertex + # with destination vertex as key, which has no adjacent vertex + elif source_vertex in self.adj_list: + self.adj_list[source_vertex].append(destination_vertex) + self.adj_list[destination_vertex] = [] + # if only destination vertex is present in adjacency list, create a new + # vertex with source vertex as key and assign a list containing destination + # vertex as first adjacent vertex + elif destination_vertex in self.adj_list: + self.adj_list[source_vertex] = [destination_vertex] + # if both source vertex and destination vertex are not present in adjacency + # list, create a new vertex with source vertex as key and a list containing + # destination vertex as it's first adjacent vertex. Then create a new vertex + # with destination vertex as key, which has no adjacent vertex + else: + self.adj_list[source_vertex] = [destination_vertex] + self.adj_list[destination_vertex] = [] return self diff --git a/graphs/minimum_spanning_tree_prims.py b/graphs/minimum_spanning_tree_prims.py index 5a08ec57ff4d..90c9f4c91e86 100644 --- a/graphs/minimum_spanning_tree_prims.py +++ b/graphs/minimum_spanning_tree_prims.py @@ -18,11 +18,10 @@ def top_to_bottom(self, heap, start, size, positions): else: if 2 * start + 2 >= size: smallest_child = 2 * start + 1 + elif heap[2 * start + 1] < heap[2 * start + 2]: + smallest_child = 2 * start + 1 else: - if heap[2 * start + 1] < heap[2 * start + 2]: - smallest_child = 2 * start + 1 - else: - smallest_child = 2 * start + 2 + smallest_child = 2 * start + 2 if heap[smallest_child] < heap[start]: temp, temp1 = heap[smallest_child], positions[smallest_child] heap[smallest_child], positions[smallest_child] = ( diff --git a/graphs/multi_heuristic_astar.py b/graphs/multi_heuristic_astar.py index 0a18ede6ed41..6af9a187a4e9 100644 --- a/graphs/multi_heuristic_astar.py +++ b/graphs/multi_heuristic_astar.py @@ -270,24 +270,23 @@ def multi_a_star(start: TPos, goal: TPos, n_heuristic: int): back_pointer, ) close_list_inad.append(get_s) + elif g_function[goal] <= open_list[0].minkey(): + if g_function[goal] < float("inf"): + do_something(back_pointer, goal, start) else: - if g_function[goal] <= open_list[0].minkey(): - if g_function[goal] < float("inf"): - do_something(back_pointer, goal, start) - else: - get_s = open_list[0].top_show() - visited.add(get_s) - expand_state( - get_s, - 0, - visited, - g_function, - close_list_anchor, - close_list_inad, - open_list, - back_pointer, - ) - close_list_anchor.append(get_s) + get_s = open_list[0].top_show() + visited.add(get_s) + expand_state( + get_s, + 0, + visited, + g_function, + close_list_anchor, + close_list_inad, + open_list, + back_pointer, + ) + close_list_anchor.append(get_s) print("No path found to goal") print() for i in range(n - 1, -1, -1): diff --git a/machine_learning/forecasting/run.py b/machine_learning/forecasting/run.py index 64e719daacc2..dbb86caf8568 100644 --- a/machine_learning/forecasting/run.py +++ b/machine_learning/forecasting/run.py @@ -113,11 +113,10 @@ def data_safety_checker(list_vote: list, actual_result: float) -> bool: for i in list_vote: if i > actual_result: safe = not_safe + 1 + elif abs(abs(i) - abs(actual_result)) <= 0.1: + safe += 1 else: - if abs(abs(i) - abs(actual_result)) <= 0.1: - safe += 1 - else: - not_safe += 1 + not_safe += 1 return safe > not_safe diff --git a/maths/largest_of_very_large_numbers.py b/maths/largest_of_very_large_numbers.py index eb5c121fd262..edee50371e02 100644 --- a/maths/largest_of_very_large_numbers.py +++ b/maths/largest_of_very_large_numbers.py @@ -20,11 +20,10 @@ def res(x, y): if 0 not in (x, y): # We use the relation x^y = y*log10(x), where 10 is the base. return y * math.log10(x) - else: - if x == 0: # 0 raised to any number is 0 - return 0 - elif y == 0: - return 1 # any number raised to 0 is 1 + elif x == 0: # 0 raised to any number is 0 + return 0 + elif y == 0: + return 1 # any number raised to 0 is 1 raise AssertionError("This should never happen") diff --git a/maths/pollard_rho.py b/maths/pollard_rho.py index 5082f54f71a8..e8bc89cef6c5 100644 --- a/maths/pollard_rho.py +++ b/maths/pollard_rho.py @@ -94,14 +94,13 @@ def rand_fn(value: int, step: int, modulus: int) -> int: if divisor == 1: # No common divisor yet, just keep searching. continue + # We found a common divisor! + elif divisor == num: + # Unfortunately, the divisor is ``num`` itself and is useless. + break else: - # We found a common divisor! - if divisor == num: - # Unfortunately, the divisor is ``num`` itself and is useless. - break - else: - # The divisor is a nontrivial factor of ``num``! - return divisor + # The divisor is a nontrivial factor of ``num``! + return divisor # If we made it here, then this attempt failed. # We need to pick a new starting seed for the tortoise and hare diff --git a/matrix/cramers_rule_2x2.py b/matrix/cramers_rule_2x2.py index 4f52dbe646ad..081035bec002 100644 --- a/matrix/cramers_rule_2x2.py +++ b/matrix/cramers_rule_2x2.py @@ -73,12 +73,11 @@ def cramers_rule_2x2(equation1: list[int], equation2: list[int]) -> tuple[float, raise ValueError("Infinite solutions. (Consistent system)") else: raise ValueError("No solution. (Inconsistent system)") + elif determinant_x == determinant_y == 0: + # Trivial solution (Inconsistent system) + return (0.0, 0.0) else: - if determinant_x == determinant_y == 0: - # Trivial solution (Inconsistent system) - return (0.0, 0.0) - else: - x = determinant_x / determinant - y = determinant_y / determinant - # Non-Trivial Solution (Consistent system) - return (x, y) + x = determinant_x / determinant + y = determinant_y / determinant + # Non-Trivial Solution (Consistent system) + return (x, y) diff --git a/project_euler/problem_019/sol1.py b/project_euler/problem_019/sol1.py index 0e38137d4f01..656f104c390d 100644 --- a/project_euler/problem_019/sol1.py +++ b/project_euler/problem_019/sol1.py @@ -46,10 +46,9 @@ def solution(): elif day > 29 and month == 2: month += 1 day = day - 29 - else: - if day > days_per_month[month - 1]: - month += 1 - day = day - days_per_month[month - 2] + elif day > days_per_month[month - 1]: + month += 1 + day = day - days_per_month[month - 2] if month > 12: year += 1 diff --git a/pyproject.toml b/pyproject.toml index 5187491e5ee7..290a6b7599be 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -12,7 +12,6 @@ lint.ignore = [ # `ruff rule S101` for a description of that rule "NPY002", # Replace legacy `np.random.choice` call with `np.random.Generator` -- FIX ME "PGH003", # Use specific rule codes when ignoring type issues -- FIX ME "PLC1901", # `{}` can be simplified to `{}` as an empty string is falsey - "PLR5501", # Consider using `elif` instead of `else` -- FIX ME "PLW0120", # `else` clause on loop without a `break` statement -- FIX ME "PLW060", # Using global for `{name}` but no assignment is done -- DO NOT FIX "PLW2901", # PLW2901: Redefined loop variable -- FIX ME diff --git a/searches/hill_climbing.py b/searches/hill_climbing.py index 83a3b8b74e27..689b7e5cca8f 100644 --- a/searches/hill_climbing.py +++ b/searches/hill_climbing.py @@ -137,11 +137,10 @@ def hill_climbing( if change > max_change and change > 0: max_change = change next_state = neighbor - else: # finding min + elif change < min_change and change < 0: # finding min # to direction with greatest descent - if change < min_change and change < 0: - min_change = change - next_state = neighbor + min_change = change + next_state = neighbor if next_state is not None: # we found at least one neighbor which improved the current state current_state = next_state diff --git a/searches/interpolation_search.py b/searches/interpolation_search.py index 49194c2600a0..0591788aa40b 100644 --- a/searches/interpolation_search.py +++ b/searches/interpolation_search.py @@ -33,18 +33,16 @@ def interpolation_search(sorted_collection, item): current_item = sorted_collection[point] if current_item == item: return point + elif point < left: + right = left + left = point + elif point > right: + left = right + right = point + elif item < current_item: + right = point - 1 else: - if point < left: - right = left - left = point - elif point > right: - left = right - right = point - else: - if item < current_item: - right = point - 1 - else: - left = point + 1 + left = point + 1 return None @@ -79,15 +77,14 @@ def interpolation_search_by_recursion(sorted_collection, item, left, right): return interpolation_search_by_recursion(sorted_collection, item, point, left) elif point > right: return interpolation_search_by_recursion(sorted_collection, item, right, left) + elif sorted_collection[point] > item: + return interpolation_search_by_recursion( + sorted_collection, item, left, point - 1 + ) else: - if sorted_collection[point] > item: - return interpolation_search_by_recursion( - sorted_collection, item, left, point - 1 - ) - else: - return interpolation_search_by_recursion( - sorted_collection, item, point + 1, right - ) + return interpolation_search_by_recursion( + sorted_collection, item, point + 1, right + ) def __assert_sorted(collection): diff --git a/strings/min_cost_string_conversion.py b/strings/min_cost_string_conversion.py index 0fad0b88c370..d147a9d7954c 100644 --- a/strings/min_cost_string_conversion.py +++ b/strings/min_cost_string_conversion.py @@ -60,19 +60,18 @@ def compute_transform_tables( def assemble_transformation(ops: list[list[str]], i: int, j: int) -> list[str]: if i == 0 and j == 0: return [] + elif ops[i][j][0] in {"C", "R"}: + seq = assemble_transformation(ops, i - 1, j - 1) + seq.append(ops[i][j]) + return seq + elif ops[i][j][0] == "D": + seq = assemble_transformation(ops, i - 1, j) + seq.append(ops[i][j]) + return seq else: - if ops[i][j][0] in {"C", "R"}: - seq = assemble_transformation(ops, i - 1, j - 1) - seq.append(ops[i][j]) - return seq - elif ops[i][j][0] == "D": - seq = assemble_transformation(ops, i - 1, j) - seq.append(ops[i][j]) - return seq - else: - seq = assemble_transformation(ops, i, j - 1) - seq.append(ops[i][j]) - return seq + seq = assemble_transformation(ops, i, j - 1) + seq.append(ops[i][j]) + return seq if __name__ == "__main__": From da47d5c88ccf18e27c5b8f10830376031ad1792a Mon Sep 17 00:00:00 2001 From: Maxim Smolskiy Date: Thu, 28 Mar 2024 20:26:41 +0300 Subject: [PATCH 1353/1543] Enable ruff N999 rule (#11331) * Enable ruff N999 rule * updating DIRECTORY.md --------- Co-authored-by: MaximSmolskiy --- DIRECTORY.md | 6 +++--- ...(nlogn).py => longest_increasing_subsequence_o_nlogn.py} | 0 ...)_graph.py => directed_and_undirected_weighted_graph.py} | 0 ...eural_network.py => two_hidden_layers_neural_network.py} | 0 pyproject.toml | 1 - 5 files changed, 3 insertions(+), 4 deletions(-) rename dynamic_programming/{longest_increasing_subsequence_o(nlogn).py => longest_increasing_subsequence_o_nlogn.py} (100%) rename graphs/{directed_and_undirected_(weighted)_graph.py => directed_and_undirected_weighted_graph.py} (100%) rename neural_network/{2_hidden_layers_neural_network.py => two_hidden_layers_neural_network.py} (100%) diff --git a/DIRECTORY.md b/DIRECTORY.md index 01667c9feee8..f6d6cb463faa 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -351,7 +351,7 @@ * [Longest Common Subsequence](dynamic_programming/longest_common_subsequence.py) * [Longest Common Substring](dynamic_programming/longest_common_substring.py) * [Longest Increasing Subsequence](dynamic_programming/longest_increasing_subsequence.py) - * [Longest Increasing Subsequence O(Nlogn)](dynamic_programming/longest_increasing_subsequence_o(nlogn).py) + * [Longest Increasing Subsequence O Nlogn](dynamic_programming/longest_increasing_subsequence_o_nlogn.py) * [Longest Palindromic Subsequence](dynamic_programming/longest_palindromic_subsequence.py) * [Matrix Chain Multiplication](dynamic_programming/matrix_chain_multiplication.py) * [Matrix Chain Order](dynamic_programming/matrix_chain_order.py) @@ -465,7 +465,7 @@ * [Dijkstra Alternate](graphs/dijkstra_alternate.py) * [Dijkstra Binary Grid](graphs/dijkstra_binary_grid.py) * [Dinic](graphs/dinic.py) - * [Directed And Undirected (Weighted) Graph](graphs/directed_and_undirected_(weighted)_graph.py) + * [Directed And Undirected Weighted Graph](graphs/directed_and_undirected_weighted_graph.py) * [Edmonds Karp Multiple Source And Sink](graphs/edmonds_karp_multiple_source_and_sink.py) * [Eulerian Path And Circuit For Undirected Graph](graphs/eulerian_path_and_circuit_for_undirected_graph.py) * [Even Tree](graphs/even_tree.py) @@ -792,7 +792,6 @@ * [Minimum Cut](networking_flow/minimum_cut.py) ## Neural Network - * [2 Hidden Layers Neural Network](neural_network/2_hidden_layers_neural_network.py) * Activation Functions * [Binary Step](neural_network/activation_functions/binary_step.py) * [Exponential Linear Unit](neural_network/activation_functions/exponential_linear_unit.py) @@ -809,6 +808,7 @@ * [Convolution Neural Network](neural_network/convolution_neural_network.py) * [Input Data](neural_network/input_data.py) * [Simple Neural Network](neural_network/simple_neural_network.py) + * [Two Hidden Layers Neural Network](neural_network/two_hidden_layers_neural_network.py) ## Other * [Activity Selection](other/activity_selection.py) diff --git a/dynamic_programming/longest_increasing_subsequence_o(nlogn).py b/dynamic_programming/longest_increasing_subsequence_o_nlogn.py similarity index 100% rename from dynamic_programming/longest_increasing_subsequence_o(nlogn).py rename to dynamic_programming/longest_increasing_subsequence_o_nlogn.py diff --git a/graphs/directed_and_undirected_(weighted)_graph.py b/graphs/directed_and_undirected_weighted_graph.py similarity index 100% rename from graphs/directed_and_undirected_(weighted)_graph.py rename to graphs/directed_and_undirected_weighted_graph.py diff --git a/neural_network/2_hidden_layers_neural_network.py b/neural_network/two_hidden_layers_neural_network.py similarity index 100% rename from neural_network/2_hidden_layers_neural_network.py rename to neural_network/two_hidden_layers_neural_network.py diff --git a/pyproject.toml b/pyproject.toml index 290a6b7599be..5b2eb07b4555 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -8,7 +8,6 @@ lint.ignore = [ # `ruff rule S101` for a description of that rule "G004", # Logging statement uses f-string "ICN001", # `matplotlib.pyplot` should be imported as `plt` -- FIX ME "INP001", # File `x/y/z.py` is part of an implicit namespace package. Add an `__init__.py`. -- FIX ME - "N999", # Invalid module name -- FIX ME "NPY002", # Replace legacy `np.random.choice` call with `np.random.Generator` -- FIX ME "PGH003", # Use specific rule codes when ignoring type issues -- FIX ME "PLC1901", # `{}` can be simplified to `{}` as an empty string is falsey From efb7463cde48305cfebb8a547273c93edbdaaee5 Mon Sep 17 00:00:00 2001 From: Maxim Smolskiy Date: Thu, 28 Mar 2024 20:28:54 +0300 Subject: [PATCH 1354/1543] Enable ruff PLW0120 rule (#11330) Co-authored-by: Christian Clauss --- pyproject.toml | 1 - searches/fibonacci_search.py | 3 +-- searches/ternary_search.py | 3 +-- 3 files changed, 2 insertions(+), 5 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 5b2eb07b4555..b9f3115df92a 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -11,7 +11,6 @@ lint.ignore = [ # `ruff rule S101` for a description of that rule "NPY002", # Replace legacy `np.random.choice` call with `np.random.Generator` -- FIX ME "PGH003", # Use specific rule codes when ignoring type issues -- FIX ME "PLC1901", # `{}` can be simplified to `{}` as an empty string is falsey - "PLW0120", # `else` clause on loop without a `break` statement -- FIX ME "PLW060", # Using global for `{name}` but no assignment is done -- DO NOT FIX "PLW2901", # PLW2901: Redefined loop variable -- FIX ME "PT011", # `pytest.raises(Exception)` is too broad, set the `match` parameter or use a more specific exception diff --git a/searches/fibonacci_search.py b/searches/fibonacci_search.py index ec3dfa7f30f6..7b2252a68be2 100644 --- a/searches/fibonacci_search.py +++ b/searches/fibonacci_search.py @@ -123,8 +123,7 @@ def fibonacci_search(arr: list, val: int) -> int: elif val > item_k_1: offset += fibonacci(fibb_k - 1) fibb_k -= 2 - else: - return -1 + return -1 if __name__ == "__main__": diff --git a/searches/ternary_search.py b/searches/ternary_search.py index 8dcd6b5bde2e..73e4b1ddc68b 100644 --- a/searches/ternary_search.py +++ b/searches/ternary_search.py @@ -106,8 +106,7 @@ def ite_ternary_search(array: list[int], target: int) -> int: else: left = one_third + 1 right = two_third - 1 - else: - return -1 + return -1 def rec_ternary_search(left: int, right: int, array: list[int], target: int) -> int: From f2246ce7fd539d94fd9299bd2fe42469dafab03f Mon Sep 17 00:00:00 2001 From: Maxim Smolskiy Date: Thu, 28 Mar 2024 21:03:23 +0300 Subject: [PATCH 1355/1543] Enable ruff ICN001 rule (#11329) * Enable ruff ICN001 rule * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- ciphers/hill_cipher.py | 38 ++++----- fractals/julia_sets.py | 54 ++++++------ fractals/koch_snowflake.py | 34 ++++---- graphics/bezier_curve.py | 2 +- machine_learning/gradient_descent.py | 4 +- neural_network/input_data.py | 32 +++---- .../two_hidden_layers_neural_network.py | 84 +++++++++---------- pyproject.toml | 1 - 8 files changed, 121 insertions(+), 128 deletions(-) diff --git a/ciphers/hill_cipher.py b/ciphers/hill_cipher.py index ea337a72dc04..33b2529f017b 100644 --- a/ciphers/hill_cipher.py +++ b/ciphers/hill_cipher.py @@ -38,7 +38,7 @@ import string -import numpy +import numpy as np from maths.greatest_common_divisor import greatest_common_divisor @@ -49,11 +49,11 @@ class HillCipher: # i.e. a total of 36 characters # take x and return x % len(key_string) - modulus = numpy.vectorize(lambda x: x % 36) + modulus = np.vectorize(lambda x: x % 36) - to_int = numpy.vectorize(round) + to_int = np.vectorize(round) - def __init__(self, encrypt_key: numpy.ndarray) -> None: + def __init__(self, encrypt_key: np.ndarray) -> None: """ encrypt_key is an NxN numpy array """ @@ -63,7 +63,7 @@ def __init__(self, encrypt_key: numpy.ndarray) -> None: def replace_letters(self, letter: str) -> int: """ - >>> hill_cipher = HillCipher(numpy.array([[2, 5], [1, 6]])) + >>> hill_cipher = HillCipher(np.array([[2, 5], [1, 6]])) >>> hill_cipher.replace_letters('T') 19 >>> hill_cipher.replace_letters('0') @@ -73,7 +73,7 @@ def replace_letters(self, letter: str) -> int: def replace_digits(self, num: int) -> str: """ - >>> hill_cipher = HillCipher(numpy.array([[2, 5], [1, 6]])) + >>> hill_cipher = HillCipher(np.array([[2, 5], [1, 6]])) >>> hill_cipher.replace_digits(19) 'T' >>> hill_cipher.replace_digits(26) @@ -83,10 +83,10 @@ def replace_digits(self, num: int) -> str: def check_determinant(self) -> None: """ - >>> hill_cipher = HillCipher(numpy.array([[2, 5], [1, 6]])) + >>> hill_cipher = HillCipher(np.array([[2, 5], [1, 6]])) >>> hill_cipher.check_determinant() """ - det = round(numpy.linalg.det(self.encrypt_key)) + det = round(np.linalg.det(self.encrypt_key)) if det < 0: det = det % len(self.key_string) @@ -101,7 +101,7 @@ def check_determinant(self) -> None: def process_text(self, text: str) -> str: """ - >>> hill_cipher = HillCipher(numpy.array([[2, 5], [1, 6]])) + >>> hill_cipher = HillCipher(np.array([[2, 5], [1, 6]])) >>> hill_cipher.process_text('Testing Hill Cipher') 'TESTINGHILLCIPHERR' >>> hill_cipher.process_text('hello') @@ -117,7 +117,7 @@ def process_text(self, text: str) -> str: def encrypt(self, text: str) -> str: """ - >>> hill_cipher = HillCipher(numpy.array([[2, 5], [1, 6]])) + >>> hill_cipher = HillCipher(np.array([[2, 5], [1, 6]])) >>> hill_cipher.encrypt('testing hill cipher') 'WHXYJOLM9C6XT085LL' >>> hill_cipher.encrypt('hello') @@ -129,7 +129,7 @@ def encrypt(self, text: str) -> str: for i in range(0, len(text) - self.break_key + 1, self.break_key): batch = text[i : i + self.break_key] vec = [self.replace_letters(char) for char in batch] - batch_vec = numpy.array([vec]).T + batch_vec = np.array([vec]).T batch_encrypted = self.modulus(self.encrypt_key.dot(batch_vec)).T.tolist()[ 0 ] @@ -140,14 +140,14 @@ def encrypt(self, text: str) -> str: return encrypted - def make_decrypt_key(self) -> numpy.ndarray: + def make_decrypt_key(self) -> np.ndarray: """ - >>> hill_cipher = HillCipher(numpy.array([[2, 5], [1, 6]])) + >>> hill_cipher = HillCipher(np.array([[2, 5], [1, 6]])) >>> hill_cipher.make_decrypt_key() array([[ 6, 25], [ 5, 26]]) """ - det = round(numpy.linalg.det(self.encrypt_key)) + det = round(np.linalg.det(self.encrypt_key)) if det < 0: det = det % len(self.key_string) @@ -158,16 +158,14 @@ def make_decrypt_key(self) -> numpy.ndarray: break inv_key = ( - det_inv - * numpy.linalg.det(self.encrypt_key) - * numpy.linalg.inv(self.encrypt_key) + det_inv * np.linalg.det(self.encrypt_key) * np.linalg.inv(self.encrypt_key) ) return self.to_int(self.modulus(inv_key)) def decrypt(self, text: str) -> str: """ - >>> hill_cipher = HillCipher(numpy.array([[2, 5], [1, 6]])) + >>> hill_cipher = HillCipher(np.array([[2, 5], [1, 6]])) >>> hill_cipher.decrypt('WHXYJOLM9C6XT085LL') 'TESTINGHILLCIPHERR' >>> hill_cipher.decrypt('85FF00') @@ -180,7 +178,7 @@ def decrypt(self, text: str) -> str: for i in range(0, len(text) - self.break_key + 1, self.break_key): batch = text[i : i + self.break_key] vec = [self.replace_letters(char) for char in batch] - batch_vec = numpy.array([vec]).T + batch_vec = np.array([vec]).T batch_decrypted = self.modulus(decrypt_key.dot(batch_vec)).T.tolist()[0] decrypted_batch = "".join( self.replace_digits(num) for num in batch_decrypted @@ -199,7 +197,7 @@ def main() -> None: row = [int(x) for x in input().split()] hill_matrix.append(row) - hc = HillCipher(numpy.array(hill_matrix)) + hc = HillCipher(np.array(hill_matrix)) print("Would you like to encrypt or decrypt some text? (1 or 2)") option = input("\n1. Encrypt\n2. Decrypt\n") diff --git a/fractals/julia_sets.py b/fractals/julia_sets.py index 482e1eddfecc..1eef4573ba19 100644 --- a/fractals/julia_sets.py +++ b/fractals/julia_sets.py @@ -25,8 +25,8 @@ from collections.abc import Callable from typing import Any -import numpy -from matplotlib import pyplot +import matplotlib.pyplot as plt +import numpy as np c_cauliflower = 0.25 + 0.0j c_polynomial_1 = -0.4 + 0.6j @@ -37,22 +37,20 @@ nb_pixels = 666 -def eval_exponential(c_parameter: complex, z_values: numpy.ndarray) -> numpy.ndarray: +def eval_exponential(c_parameter: complex, z_values: np.ndarray) -> np.ndarray: """ Evaluate $e^z + c$. >>> eval_exponential(0, 0) 1.0 - >>> abs(eval_exponential(1, numpy.pi*1.j)) < 1e-15 + >>> abs(eval_exponential(1, np.pi*1.j)) < 1e-15 True >>> abs(eval_exponential(1.j, 0)-1-1.j) < 1e-15 True """ - return numpy.exp(z_values) + c_parameter + return np.exp(z_values) + c_parameter -def eval_quadratic_polynomial( - c_parameter: complex, z_values: numpy.ndarray -) -> numpy.ndarray: +def eval_quadratic_polynomial(c_parameter: complex, z_values: np.ndarray) -> np.ndarray: """ >>> eval_quadratic_polynomial(0, 2) 4 @@ -66,7 +64,7 @@ def eval_quadratic_polynomial( return z_values * z_values + c_parameter -def prepare_grid(window_size: float, nb_pixels: int) -> numpy.ndarray: +def prepare_grid(window_size: float, nb_pixels: int) -> np.ndarray: """ Create a grid of complex values of size nb_pixels*nb_pixels with real and imaginary parts ranging from -window_size to window_size (inclusive). @@ -77,20 +75,20 @@ def prepare_grid(window_size: float, nb_pixels: int) -> numpy.ndarray: [ 0.-1.j, 0.+0.j, 0.+1.j], [ 1.-1.j, 1.+0.j, 1.+1.j]]) """ - x = numpy.linspace(-window_size, window_size, nb_pixels) + x = np.linspace(-window_size, window_size, nb_pixels) x = x.reshape((nb_pixels, 1)) - y = numpy.linspace(-window_size, window_size, nb_pixels) + y = np.linspace(-window_size, window_size, nb_pixels) y = y.reshape((1, nb_pixels)) return x + 1.0j * y def iterate_function( - eval_function: Callable[[Any, numpy.ndarray], numpy.ndarray], + eval_function: Callable[[Any, np.ndarray], np.ndarray], function_params: Any, nb_iterations: int, - z_0: numpy.ndarray, + z_0: np.ndarray, infinity: float | None = None, -) -> numpy.ndarray: +) -> np.ndarray: """ Iterate the function "eval_function" exactly nb_iterations times. The first argument of the function is a parameter which is contained in @@ -98,22 +96,22 @@ def iterate_function( values to iterate from. This function returns the final iterates. - >>> iterate_function(eval_quadratic_polynomial, 0, 3, numpy.array([0,1,2])).shape + >>> iterate_function(eval_quadratic_polynomial, 0, 3, np.array([0,1,2])).shape (3,) - >>> numpy.round(iterate_function(eval_quadratic_polynomial, + >>> np.round(iterate_function(eval_quadratic_polynomial, ... 0, ... 3, - ... numpy.array([0,1,2]))[0]) + ... np.array([0,1,2]))[0]) 0j - >>> numpy.round(iterate_function(eval_quadratic_polynomial, + >>> np.round(iterate_function(eval_quadratic_polynomial, ... 0, ... 3, - ... numpy.array([0,1,2]))[1]) + ... np.array([0,1,2]))[1]) (1+0j) - >>> numpy.round(iterate_function(eval_quadratic_polynomial, + >>> np.round(iterate_function(eval_quadratic_polynomial, ... 0, ... 3, - ... numpy.array([0,1,2]))[2]) + ... np.array([0,1,2]))[2]) (256+0j) """ @@ -121,8 +119,8 @@ def iterate_function( for _ in range(nb_iterations): z_n = eval_function(function_params, z_n) if infinity is not None: - numpy.nan_to_num(z_n, copy=False, nan=infinity) - z_n[abs(z_n) == numpy.inf] = infinity + np.nan_to_num(z_n, copy=False, nan=infinity) + z_n[abs(z_n) == np.inf] = infinity return z_n @@ -130,21 +128,21 @@ def show_results( function_label: str, function_params: Any, escape_radius: float, - z_final: numpy.ndarray, + z_final: np.ndarray, ) -> None: """ Plots of whether the absolute value of z_final is greater than the value of escape_radius. Adds the function_label and function_params to the title. - >>> show_results('80', 0, 1, numpy.array([[0,1,.5],[.4,2,1.1],[.2,1,1.3]])) + >>> show_results('80', 0, 1, np.array([[0,1,.5],[.4,2,1.1],[.2,1,1.3]])) """ abs_z_final = (abs(z_final)).transpose() abs_z_final[:, :] = abs_z_final[::-1, :] - pyplot.matshow(abs_z_final < escape_radius) - pyplot.title(f"Julia set of ${function_label}$, $c={function_params}$") - pyplot.show() + plt.matshow(abs_z_final < escape_radius) + plt.title(f"Julia set of ${function_label}$, $c={function_params}$") + plt.show() def ignore_overflow_warnings() -> None: diff --git a/fractals/koch_snowflake.py b/fractals/koch_snowflake.py index 30cd4b39c7c1..724b78f41a69 100644 --- a/fractals/koch_snowflake.py +++ b/fractals/koch_snowflake.py @@ -22,25 +22,25 @@ from __future__ import annotations -import matplotlib.pyplot as plt # type: ignore -import numpy +import matplotlib.pyplot as plt +import numpy as np # initial triangle of Koch snowflake -VECTOR_1 = numpy.array([0, 0]) -VECTOR_2 = numpy.array([0.5, 0.8660254]) -VECTOR_3 = numpy.array([1, 0]) +VECTOR_1 = np.array([0, 0]) +VECTOR_2 = np.array([0.5, 0.8660254]) +VECTOR_3 = np.array([1, 0]) INITIAL_VECTORS = [VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1] # uncomment for simple Koch curve instead of Koch snowflake # INITIAL_VECTORS = [VECTOR_1, VECTOR_3] -def iterate(initial_vectors: list[numpy.ndarray], steps: int) -> list[numpy.ndarray]: +def iterate(initial_vectors: list[np.ndarray], steps: int) -> list[np.ndarray]: """ Go through the number of iterations determined by the argument "steps". Be careful with high values (above 5) since the time to calculate increases exponentially. - >>> iterate([numpy.array([0, 0]), numpy.array([1, 0])], 1) + >>> iterate([np.array([0, 0]), np.array([1, 0])], 1) [array([0, 0]), array([0.33333333, 0. ]), array([0.5 , \ 0.28867513]), array([0.66666667, 0. ]), array([1, 0])] """ @@ -50,13 +50,13 @@ def iterate(initial_vectors: list[numpy.ndarray], steps: int) -> list[numpy.ndar return vectors -def iteration_step(vectors: list[numpy.ndarray]) -> list[numpy.ndarray]: +def iteration_step(vectors: list[np.ndarray]) -> list[np.ndarray]: """ Loops through each pair of adjacent vectors. Each line between two adjacent vectors is divided into 4 segments by adding 3 additional vectors in-between the original two vectors. The vector in the middle is constructed through a 60 degree rotation so it is bent outwards. - >>> iteration_step([numpy.array([0, 0]), numpy.array([1, 0])]) + >>> iteration_step([np.array([0, 0]), np.array([1, 0])]) [array([0, 0]), array([0.33333333, 0. ]), array([0.5 , \ 0.28867513]), array([0.66666667, 0. ]), array([1, 0])] """ @@ -74,22 +74,22 @@ def iteration_step(vectors: list[numpy.ndarray]) -> list[numpy.ndarray]: return new_vectors -def rotate(vector: numpy.ndarray, angle_in_degrees: float) -> numpy.ndarray: +def rotate(vector: np.ndarray, angle_in_degrees: float) -> np.ndarray: """ Standard rotation of a 2D vector with a rotation matrix (see https://en.wikipedia.org/wiki/Rotation_matrix ) - >>> rotate(numpy.array([1, 0]), 60) + >>> rotate(np.array([1, 0]), 60) array([0.5 , 0.8660254]) - >>> rotate(numpy.array([1, 0]), 90) + >>> rotate(np.array([1, 0]), 90) array([6.123234e-17, 1.000000e+00]) """ - theta = numpy.radians(angle_in_degrees) - c, s = numpy.cos(theta), numpy.sin(theta) - rotation_matrix = numpy.array(((c, -s), (s, c))) - return numpy.dot(rotation_matrix, vector) + theta = np.radians(angle_in_degrees) + c, s = np.cos(theta), np.sin(theta) + rotation_matrix = np.array(((c, -s), (s, c))) + return np.dot(rotation_matrix, vector) -def plot(vectors: list[numpy.ndarray]) -> None: +def plot(vectors: list[np.ndarray]) -> None: """ Utility function to plot the vectors using matplotlib.pyplot No doctest was implemented since this function does not have a return value diff --git a/graphics/bezier_curve.py b/graphics/bezier_curve.py index 7c22329ad8b4..6eeb89da6bdf 100644 --- a/graphics/bezier_curve.py +++ b/graphics/bezier_curve.py @@ -78,7 +78,7 @@ def plot_curve(self, step_size: float = 0.01): step_size: defines the step(s) at which to evaluate the Bezier curve. The smaller the step size, the finer the curve produced. """ - from matplotlib import pyplot as plt # type: ignore + from matplotlib import pyplot as plt to_plot_x: list[float] = [] # x coordinates of points to plot to_plot_y: list[float] = [] # y coordinates of points to plot diff --git a/machine_learning/gradient_descent.py b/machine_learning/gradient_descent.py index db38b3c95b52..95463faf5635 100644 --- a/machine_learning/gradient_descent.py +++ b/machine_learning/gradient_descent.py @@ -3,7 +3,7 @@ function. """ -import numpy +import numpy as np # List of input, output pairs train_data = ( @@ -116,7 +116,7 @@ def run_gradient_descent(): temp_parameter_vector[i] = ( parameter_vector[i] - LEARNING_RATE * cost_derivative ) - if numpy.allclose( + if np.allclose( parameter_vector, temp_parameter_vector, atol=absolute_error_limit, diff --git a/neural_network/input_data.py b/neural_network/input_data.py index f7ae86b48e65..9d4195487dbb 100644 --- a/neural_network/input_data.py +++ b/neural_network/input_data.py @@ -22,7 +22,7 @@ import typing import urllib -import numpy +import numpy as np from tensorflow.python.framework import dtypes, random_seed from tensorflow.python.platform import gfile from tensorflow.python.util.deprecation import deprecated @@ -39,8 +39,8 @@ class _Datasets(typing.NamedTuple): def _read32(bytestream): - dt = numpy.dtype(numpy.uint32).newbyteorder(">") - return numpy.frombuffer(bytestream.read(4), dtype=dt)[0] + dt = np.dtype(np.uint32).newbyteorder(">") + return np.frombuffer(bytestream.read(4), dtype=dt)[0] @deprecated(None, "Please use tf.data to implement this functionality.") @@ -68,7 +68,7 @@ def _extract_images(f): rows = _read32(bytestream) cols = _read32(bytestream) buf = bytestream.read(rows * cols * num_images) - data = numpy.frombuffer(buf, dtype=numpy.uint8) + data = np.frombuffer(buf, dtype=np.uint8) data = data.reshape(num_images, rows, cols, 1) return data @@ -77,8 +77,8 @@ def _extract_images(f): def _dense_to_one_hot(labels_dense, num_classes): """Convert class labels from scalars to one-hot vectors.""" num_labels = labels_dense.shape[0] - index_offset = numpy.arange(num_labels) * num_classes - labels_one_hot = numpy.zeros((num_labels, num_classes)) + index_offset = np.arange(num_labels) * num_classes + labels_one_hot = np.zeros((num_labels, num_classes)) labels_one_hot.flat[index_offset + labels_dense.ravel()] = 1 return labels_one_hot @@ -107,7 +107,7 @@ def _extract_labels(f, one_hot=False, num_classes=10): ) num_items = _read32(bytestream) buf = bytestream.read(num_items) - labels = numpy.frombuffer(buf, dtype=numpy.uint8) + labels = np.frombuffer(buf, dtype=np.uint8) if one_hot: return _dense_to_one_hot(labels, num_classes) return labels @@ -153,7 +153,7 @@ def __init__( """ seed1, seed2 = random_seed.get_seed(seed) # If op level seed is not set, use whatever graph level seed is returned - numpy.random.seed(seed1 if seed is None else seed2) + np.random.seed(seed1 if seed is None else seed2) dtype = dtypes.as_dtype(dtype).base_dtype if dtype not in (dtypes.uint8, dtypes.float32): raise TypeError("Invalid image dtype %r, expected uint8 or float32" % dtype) @@ -175,8 +175,8 @@ def __init__( ) if dtype == dtypes.float32: # Convert from [0, 255] -> [0.0, 1.0]. - images = images.astype(numpy.float32) - images = numpy.multiply(images, 1.0 / 255.0) + images = images.astype(np.float32) + images = np.multiply(images, 1.0 / 255.0) self._images = images self._labels = labels self._epochs_completed = 0 @@ -210,8 +210,8 @@ def next_batch(self, batch_size, fake_data=False, shuffle=True): start = self._index_in_epoch # Shuffle for the first epoch if self._epochs_completed == 0 and start == 0 and shuffle: - perm0 = numpy.arange(self._num_examples) - numpy.random.shuffle(perm0) + perm0 = np.arange(self._num_examples) + np.random.shuffle(perm0) self._images = self.images[perm0] self._labels = self.labels[perm0] # Go to the next epoch @@ -224,8 +224,8 @@ def next_batch(self, batch_size, fake_data=False, shuffle=True): labels_rest_part = self._labels[start : self._num_examples] # Shuffle the data if shuffle: - perm = numpy.arange(self._num_examples) - numpy.random.shuffle(perm) + perm = np.arange(self._num_examples) + np.random.shuffle(perm) self._images = self.images[perm] self._labels = self.labels[perm] # Start next epoch @@ -235,8 +235,8 @@ def next_batch(self, batch_size, fake_data=False, shuffle=True): images_new_part = self._images[start:end] labels_new_part = self._labels[start:end] return ( - numpy.concatenate((images_rest_part, images_new_part), axis=0), - numpy.concatenate((labels_rest_part, labels_new_part), axis=0), + np.concatenate((images_rest_part, images_new_part), axis=0), + np.concatenate((labels_rest_part, labels_new_part), axis=0), ) else: self._index_in_epoch += batch_size diff --git a/neural_network/two_hidden_layers_neural_network.py b/neural_network/two_hidden_layers_neural_network.py index 7b374a93d039..dea7e2342d9f 100644 --- a/neural_network/two_hidden_layers_neural_network.py +++ b/neural_network/two_hidden_layers_neural_network.py @@ -5,11 +5,11 @@ - https://en.wikipedia.org/wiki/Feedforward_neural_network (Feedforward) """ -import numpy +import numpy as np class TwoHiddenLayerNeuralNetwork: - def __init__(self, input_array: numpy.ndarray, output_array: numpy.ndarray) -> None: + def __init__(self, input_array: np.ndarray, output_array: np.ndarray) -> None: """ This function initializes the TwoHiddenLayerNeuralNetwork class with random weights for every layer and initializes predicted output with zeroes. @@ -28,30 +28,28 @@ def __init__(self, input_array: numpy.ndarray, output_array: numpy.ndarray) -> N # Random initial weights are assigned. # self.input_array.shape[1] is used to represent number of nodes in input layer. # First hidden layer consists of 4 nodes. - self.input_layer_and_first_hidden_layer_weights = numpy.random.rand( + self.input_layer_and_first_hidden_layer_weights = np.random.rand( self.input_array.shape[1], 4 ) # Random initial values for the first hidden layer. # First hidden layer has 4 nodes. # Second hidden layer has 3 nodes. - self.first_hidden_layer_and_second_hidden_layer_weights = numpy.random.rand( - 4, 3 - ) + self.first_hidden_layer_and_second_hidden_layer_weights = np.random.rand(4, 3) # Random initial values for the second hidden layer. # Second hidden layer has 3 nodes. # Output layer has 1 node. - self.second_hidden_layer_and_output_layer_weights = numpy.random.rand(3, 1) + self.second_hidden_layer_and_output_layer_weights = np.random.rand(3, 1) # Real output values provided. self.output_array = output_array # Predicted output values by the neural network. # Predicted_output array initially consists of zeroes. - self.predicted_output = numpy.zeros(output_array.shape) + self.predicted_output = np.zeros(output_array.shape) - def feedforward(self) -> numpy.ndarray: + def feedforward(self) -> np.ndarray: """ The information moves in only one direction i.e. forward from the input nodes, through the two hidden nodes and to the output nodes. @@ -60,24 +58,24 @@ def feedforward(self) -> numpy.ndarray: Return layer_between_second_hidden_layer_and_output (i.e the last layer of the neural network). - >>> input_val = numpy.array(([0, 0, 0], [0, 0, 0], [0, 0, 0]), dtype=float) - >>> output_val = numpy.array(([0], [0], [0]), dtype=float) + >>> input_val = np.array(([0, 0, 0], [0, 0, 0], [0, 0, 0]), dtype=float) + >>> output_val = np.array(([0], [0], [0]), dtype=float) >>> nn = TwoHiddenLayerNeuralNetwork(input_val, output_val) >>> res = nn.feedforward() - >>> array_sum = numpy.sum(res) - >>> numpy.isnan(array_sum) + >>> array_sum = np.sum(res) + >>> np.isnan(array_sum) False """ # Layer_between_input_and_first_hidden_layer is the layer connecting the # input nodes with the first hidden layer nodes. self.layer_between_input_and_first_hidden_layer = sigmoid( - numpy.dot(self.input_array, self.input_layer_and_first_hidden_layer_weights) + np.dot(self.input_array, self.input_layer_and_first_hidden_layer_weights) ) # layer_between_first_hidden_layer_and_second_hidden_layer is the layer # connecting the first hidden set of nodes with the second hidden set of nodes. self.layer_between_first_hidden_layer_and_second_hidden_layer = sigmoid( - numpy.dot( + np.dot( self.layer_between_input_and_first_hidden_layer, self.first_hidden_layer_and_second_hidden_layer_weights, ) @@ -86,7 +84,7 @@ def feedforward(self) -> numpy.ndarray: # layer_between_second_hidden_layer_and_output is the layer connecting # second hidden layer with the output node. self.layer_between_second_hidden_layer_and_output = sigmoid( - numpy.dot( + np.dot( self.layer_between_first_hidden_layer_and_second_hidden_layer, self.second_hidden_layer_and_output_layer_weights, ) @@ -100,8 +98,8 @@ def back_propagation(self) -> None: error rate obtained in the previous epoch (i.e., iteration). Updation is done using derivative of sogmoid activation function. - >>> input_val = numpy.array(([0, 0, 0], [0, 0, 0], [0, 0, 0]), dtype=float) - >>> output_val = numpy.array(([0], [0], [0]), dtype=float) + >>> input_val = np.array(([0, 0, 0], [0, 0, 0], [0, 0, 0]), dtype=float) + >>> output_val = np.array(([0], [0], [0]), dtype=float) >>> nn = TwoHiddenLayerNeuralNetwork(input_val, output_val) >>> res = nn.feedforward() >>> nn.back_propagation() @@ -110,15 +108,15 @@ def back_propagation(self) -> None: False """ - updated_second_hidden_layer_and_output_layer_weights = numpy.dot( + updated_second_hidden_layer_and_output_layer_weights = np.dot( self.layer_between_first_hidden_layer_and_second_hidden_layer.T, 2 * (self.output_array - self.predicted_output) * sigmoid_derivative(self.predicted_output), ) - updated_first_hidden_layer_and_second_hidden_layer_weights = numpy.dot( + updated_first_hidden_layer_and_second_hidden_layer_weights = np.dot( self.layer_between_input_and_first_hidden_layer.T, - numpy.dot( + np.dot( 2 * (self.output_array - self.predicted_output) * sigmoid_derivative(self.predicted_output), @@ -128,10 +126,10 @@ def back_propagation(self) -> None: self.layer_between_first_hidden_layer_and_second_hidden_layer ), ) - updated_input_layer_and_first_hidden_layer_weights = numpy.dot( + updated_input_layer_and_first_hidden_layer_weights = np.dot( self.input_array.T, - numpy.dot( - numpy.dot( + np.dot( + np.dot( 2 * (self.output_array - self.predicted_output) * sigmoid_derivative(self.predicted_output), @@ -155,7 +153,7 @@ def back_propagation(self) -> None: updated_second_hidden_layer_and_output_layer_weights ) - def train(self, output: numpy.ndarray, iterations: int, give_loss: bool) -> None: + def train(self, output: np.ndarray, iterations: int, give_loss: bool) -> None: """ Performs the feedforwarding and back propagation process for the given number of iterations. @@ -166,8 +164,8 @@ def train(self, output: numpy.ndarray, iterations: int, give_loss: bool) -> None give_loss : boolean value, If True then prints loss for each iteration, If False then nothing is printed - >>> input_val = numpy.array(([0, 0, 0], [0, 1, 0], [0, 0, 1]), dtype=float) - >>> output_val = numpy.array(([0], [1], [1]), dtype=float) + >>> input_val = np.array(([0, 0, 0], [0, 1, 0], [0, 0, 1]), dtype=float) + >>> output_val = np.array(([0], [1], [1]), dtype=float) >>> nn = TwoHiddenLayerNeuralNetwork(input_val, output_val) >>> first_iteration_weights = nn.feedforward() >>> nn.back_propagation() @@ -179,10 +177,10 @@ def train(self, output: numpy.ndarray, iterations: int, give_loss: bool) -> None self.output = self.feedforward() self.back_propagation() if give_loss: - loss = numpy.mean(numpy.square(output - self.feedforward())) + loss = np.mean(np.square(output - self.feedforward())) print(f"Iteration {iteration} Loss: {loss}") - def predict(self, input_arr: numpy.ndarray) -> int: + def predict(self, input_arr: np.ndarray) -> int: """ Predict's the output for the given input values using the trained neural network. @@ -192,8 +190,8 @@ def predict(self, input_arr: numpy.ndarray) -> int: than the threshold value else returns 0, as the real output values are in binary. - >>> input_val = numpy.array(([0, 0, 0], [0, 1, 0], [0, 0, 1]), dtype=float) - >>> output_val = numpy.array(([0], [1], [1]), dtype=float) + >>> input_val = np.array(([0, 0, 0], [0, 1, 0], [0, 0, 1]), dtype=float) + >>> output_val = np.array(([0], [1], [1]), dtype=float) >>> nn = TwoHiddenLayerNeuralNetwork(input_val, output_val) >>> nn.train(output_val, 1000, False) >>> nn.predict([0, 1, 0]) in (0, 1) @@ -204,18 +202,18 @@ def predict(self, input_arr: numpy.ndarray) -> int: self.array = input_arr self.layer_between_input_and_first_hidden_layer = sigmoid( - numpy.dot(self.array, self.input_layer_and_first_hidden_layer_weights) + np.dot(self.array, self.input_layer_and_first_hidden_layer_weights) ) self.layer_between_first_hidden_layer_and_second_hidden_layer = sigmoid( - numpy.dot( + np.dot( self.layer_between_input_and_first_hidden_layer, self.first_hidden_layer_and_second_hidden_layer_weights, ) ) self.layer_between_second_hidden_layer_and_output = sigmoid( - numpy.dot( + np.dot( self.layer_between_first_hidden_layer_and_second_hidden_layer, self.second_hidden_layer_and_output_layer_weights, ) @@ -224,26 +222,26 @@ def predict(self, input_arr: numpy.ndarray) -> int: return int((self.layer_between_second_hidden_layer_and_output > 0.6)[0]) -def sigmoid(value: numpy.ndarray) -> numpy.ndarray: +def sigmoid(value: np.ndarray) -> np.ndarray: """ Applies sigmoid activation function. return normalized values - >>> sigmoid(numpy.array(([1, 0, 2], [1, 0, 0]), dtype=numpy.float64)) + >>> sigmoid(np.array(([1, 0, 2], [1, 0, 0]), dtype=np.float64)) array([[0.73105858, 0.5 , 0.88079708], [0.73105858, 0.5 , 0.5 ]]) """ - return 1 / (1 + numpy.exp(-value)) + return 1 / (1 + np.exp(-value)) -def sigmoid_derivative(value: numpy.ndarray) -> numpy.ndarray: +def sigmoid_derivative(value: np.ndarray) -> np.ndarray: """ Provides the derivative value of the sigmoid function. returns derivative of the sigmoid value - >>> sigmoid_derivative(numpy.array(([1, 0, 2], [1, 0, 0]), dtype=numpy.float64)) + >>> sigmoid_derivative(np.array(([1, 0, 2], [1, 0, 0]), dtype=np.float64)) array([[ 0., 0., -2.], [ 0., 0., 0.]]) """ @@ -264,7 +262,7 @@ def example() -> int: True """ # Input values. - test_input = numpy.array( + test_input = np.array( ( [0, 0, 0], [0, 0, 1], @@ -275,11 +273,11 @@ def example() -> int: [1, 1, 0], [1, 1, 1], ), - dtype=numpy.float64, + dtype=np.float64, ) # True output values for the given input values. - output = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]), dtype=numpy.float64) + output = np.array(([0], [1], [1], [0], [1], [0], [0], [1]), dtype=np.float64) # Calling neural network class. neural_network = TwoHiddenLayerNeuralNetwork( @@ -290,7 +288,7 @@ def example() -> int: # Set give_loss to True if you want to see loss in every iteration. neural_network.train(output=output, iterations=10, give_loss=False) - return neural_network.predict(numpy.array(([1, 1, 1]), dtype=numpy.float64)) + return neural_network.predict(np.array(([1, 1, 1]), dtype=np.float64)) if __name__ == "__main__": diff --git a/pyproject.toml b/pyproject.toml index b9f3115df92a..22da7cb777b5 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -6,7 +6,6 @@ lint.ignore = [ # `ruff rule S101` for a description of that rule "EM101", # Exception must not use a string literal, assign to variable first "EXE001", # Shebang is present but file is not executable" -- FIX ME "G004", # Logging statement uses f-string - "ICN001", # `matplotlib.pyplot` should be imported as `plt` -- FIX ME "INP001", # File `x/y/z.py` is part of an implicit namespace package. Add an `__init__.py`. -- FIX ME "NPY002", # Replace legacy `np.random.choice` call with `np.random.Generator` -- FIX ME "PGH003", # Use specific rule codes when ignoring type issues -- FIX ME From c328b000ecdd4ad08d029999144e7ec702022390 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 1 Apr 2024 21:35:37 +0200 Subject: [PATCH 1356/1543] [pre-commit.ci] pre-commit autoupdate (#11339) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - [github.com/astral-sh/ruff-pre-commit: v0.3.4 → v0.3.5](https://github.com/astral-sh/ruff-pre-commit/compare/v0.3.4...v0.3.5) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 8b101207d5ff..e6b1b0442c04 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -16,7 +16,7 @@ repos: - id: auto-walrus - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.3.4 + rev: v0.3.5 hooks: - id: ruff - id: ruff-format From 39daaf8248b37404f69e8459d0378d77b59c6c0f Mon Sep 17 00:00:00 2001 From: Maxim Smolskiy Date: Mon, 1 Apr 2024 22:36:41 +0300 Subject: [PATCH 1357/1543] Enable ruff RUF100 rule (#11337) --- audio_filters/butterworth_filter.py | 14 +++++++------- data_structures/binary_tree/basic_binary_tree.py | 2 +- .../binary_tree/non_recursive_segment_tree.py | 2 +- data_structures/binary_tree/red_black_tree.py | 2 +- data_structures/binary_tree/segment_tree.py | 6 +++--- data_structures/heap/min_heap.py | 2 +- dynamic_programming/longest_common_subsequence.py | 2 +- .../longest_increasing_subsequence_o_nlogn.py | 4 ++-- graphs/articulation_points.py | 2 +- graphs/dinic.py | 2 +- other/sdes.py | 4 ++-- project_euler/problem_011/sol2.py | 2 +- pyproject.toml | 1 - strings/manacher.py | 2 +- 14 files changed, 23 insertions(+), 24 deletions(-) diff --git a/audio_filters/butterworth_filter.py b/audio_filters/butterworth_filter.py index 6449bc3f3dce..4e6ea1b18fb4 100644 --- a/audio_filters/butterworth_filter.py +++ b/audio_filters/butterworth_filter.py @@ -13,7 +13,7 @@ def make_lowpass( frequency: int, samplerate: int, - q_factor: float = 1 / sqrt(2), # noqa: B008 + q_factor: float = 1 / sqrt(2), ) -> IIRFilter: """ Creates a low-pass filter @@ -43,7 +43,7 @@ def make_lowpass( def make_highpass( frequency: int, samplerate: int, - q_factor: float = 1 / sqrt(2), # noqa: B008 + q_factor: float = 1 / sqrt(2), ) -> IIRFilter: """ Creates a high-pass filter @@ -73,7 +73,7 @@ def make_highpass( def make_bandpass( frequency: int, samplerate: int, - q_factor: float = 1 / sqrt(2), # noqa: B008 + q_factor: float = 1 / sqrt(2), ) -> IIRFilter: """ Creates a band-pass filter @@ -104,7 +104,7 @@ def make_bandpass( def make_allpass( frequency: int, samplerate: int, - q_factor: float = 1 / sqrt(2), # noqa: B008 + q_factor: float = 1 / sqrt(2), ) -> IIRFilter: """ Creates an all-pass filter @@ -132,7 +132,7 @@ def make_peak( frequency: int, samplerate: int, gain_db: float, - q_factor: float = 1 / sqrt(2), # noqa: B008 + q_factor: float = 1 / sqrt(2), ) -> IIRFilter: """ Creates a peak filter @@ -164,7 +164,7 @@ def make_lowshelf( frequency: int, samplerate: int, gain_db: float, - q_factor: float = 1 / sqrt(2), # noqa: B008 + q_factor: float = 1 / sqrt(2), ) -> IIRFilter: """ Creates a low-shelf filter @@ -201,7 +201,7 @@ def make_highshelf( frequency: int, samplerate: int, gain_db: float, - q_factor: float = 1 / sqrt(2), # noqa: B008 + q_factor: float = 1 / sqrt(2), ) -> IIRFilter: """ Creates a high-shelf filter diff --git a/data_structures/binary_tree/basic_binary_tree.py b/data_structures/binary_tree/basic_binary_tree.py index 0439413d95b5..9d4c1bdbb57a 100644 --- a/data_structures/binary_tree/basic_binary_tree.py +++ b/data_structures/binary_tree/basic_binary_tree.py @@ -85,7 +85,7 @@ def depth(self) -> int: """ return self._depth(self.root) - def _depth(self, node: Node | None) -> int: # noqa: UP007 + def _depth(self, node: Node | None) -> int: if not node: return 0 return 1 + max(self._depth(node.left), self._depth(node.right)) diff --git a/data_structures/binary_tree/non_recursive_segment_tree.py b/data_structures/binary_tree/non_recursive_segment_tree.py index 42c78a3a1be0..45c476701d79 100644 --- a/data_structures/binary_tree/non_recursive_segment_tree.py +++ b/data_structures/binary_tree/non_recursive_segment_tree.py @@ -87,7 +87,7 @@ def update(self, p: int, v: T) -> None: p = p // 2 self.st[p] = self.fn(self.st[p * 2], self.st[p * 2 + 1]) - def query(self, l: int, r: int) -> T | None: # noqa: E741 + def query(self, l: int, r: int) -> T | None: """ Get range query value in log(N) time :param l: left element index diff --git a/data_structures/binary_tree/red_black_tree.py b/data_structures/binary_tree/red_black_tree.py index bdd808c828e0..e68d8d1e3735 100644 --- a/data_structures/binary_tree/red_black_tree.py +++ b/data_structures/binary_tree/red_black_tree.py @@ -152,7 +152,7 @@ def _insert_repair(self) -> None: self.grandparent.color = 1 self.grandparent._insert_repair() - def remove(self, label: int) -> RedBlackTree: # noqa: PLR0912 + def remove(self, label: int) -> RedBlackTree: """Remove label from this tree.""" if self.label == label: if self.left and self.right: diff --git a/data_structures/binary_tree/segment_tree.py b/data_structures/binary_tree/segment_tree.py index 3b0b32946f6e..bb9c1ae2268b 100644 --- a/data_structures/binary_tree/segment_tree.py +++ b/data_structures/binary_tree/segment_tree.py @@ -35,7 +35,7 @@ def right(self, idx): """ return idx * 2 + 1 - def build(self, idx, l, r): # noqa: E741 + def build(self, idx, l, r): if l == r: self.st[idx] = self.A[l] else: @@ -56,7 +56,7 @@ def update(self, a, b, val): """ return self.update_recursive(1, 0, self.N - 1, a - 1, b - 1, val) - def update_recursive(self, idx, l, r, a, b, val): # noqa: E741 + def update_recursive(self, idx, l, r, a, b, val): """ update(1, 1, N, a, b, v) for update val v to [a,b] """ @@ -83,7 +83,7 @@ def query(self, a, b): """ return self.query_recursive(1, 0, self.N - 1, a - 1, b - 1) - def query_recursive(self, idx, l, r, a, b): # noqa: E741 + def query_recursive(self, idx, l, r, a, b): """ query(1, 1, N, a, b) for query max of [a,b] """ diff --git a/data_structures/heap/min_heap.py b/data_structures/heap/min_heap.py index ecb1876493b0..39f6d99e8a4c 100644 --- a/data_structures/heap/min_heap.py +++ b/data_structures/heap/min_heap.py @@ -66,7 +66,7 @@ def build_heap(self, array): # this is min-heapify method def sift_down(self, idx, array): while True: - l = self.get_left_child_idx(idx) # noqa: E741 + l = self.get_left_child_idx(idx) r = self.get_right_child_idx(idx) smallest = idx diff --git a/dynamic_programming/longest_common_subsequence.py b/dynamic_programming/longest_common_subsequence.py index 178b4169b213..22f50a166ae4 100644 --- a/dynamic_programming/longest_common_subsequence.py +++ b/dynamic_programming/longest_common_subsequence.py @@ -38,7 +38,7 @@ def longest_common_subsequence(x: str, y: str): n = len(y) # declaring the array for storing the dp values - l = [[0] * (n + 1) for _ in range(m + 1)] # noqa: E741 + l = [[0] * (n + 1) for _ in range(m + 1)] for i in range(1, m + 1): for j in range(1, n + 1): diff --git a/dynamic_programming/longest_increasing_subsequence_o_nlogn.py b/dynamic_programming/longest_increasing_subsequence_o_nlogn.py index 5e11d729f395..44e333e97779 100644 --- a/dynamic_programming/longest_increasing_subsequence_o_nlogn.py +++ b/dynamic_programming/longest_increasing_subsequence_o_nlogn.py @@ -7,13 +7,13 @@ from __future__ import annotations -def ceil_index(v, l, r, key): # noqa: E741 +def ceil_index(v, l, r, key): while r - l > 1: m = (l + r) // 2 if v[m] >= key: r = m else: - l = m # noqa: E741 + l = m return r diff --git a/graphs/articulation_points.py b/graphs/articulation_points.py index d28045282425..3fcaffd73725 100644 --- a/graphs/articulation_points.py +++ b/graphs/articulation_points.py @@ -1,5 +1,5 @@ # Finding Articulation Points in Undirected Graph -def compute_ap(l): # noqa: E741 +def compute_ap(l): n = len(l) out_edge_count = 0 low = [0] * n diff --git a/graphs/dinic.py b/graphs/dinic.py index aaf3a119525c..4f5e81236984 100644 --- a/graphs/dinic.py +++ b/graphs/dinic.py @@ -37,7 +37,7 @@ def depth_first_search(self, vertex, sink, flow): # Here we calculate the flow that reaches the sink def max_flow(self, source, sink): flow, self.q[0] = 0, source - for l in range(31): # noqa: E741 l = 30 maybe faster for random data + for l in range(31): # l = 30 maybe faster for random data while True: self.lvl, self.ptr = [0] * len(self.q), [0] * len(self.q) qi, qe, self.lvl[source] = 0, 1, 1 diff --git a/other/sdes.py b/other/sdes.py index 31105984b9bb..a69add3430c3 100644 --- a/other/sdes.py +++ b/other/sdes.py @@ -44,9 +44,9 @@ def function(expansion, s0, s1, key, message): right = message[4:] temp = apply_table(right, expansion) temp = xor(temp, key) - l = apply_sbox(s0, temp[:4]) # noqa: E741 + l = apply_sbox(s0, temp[:4]) r = apply_sbox(s1, temp[4:]) - l = "0" * (2 - len(l)) + l # noqa: E741 + l = "0" * (2 - len(l)) + l r = "0" * (2 - len(r)) + r temp = apply_table(l + r, p4_table) temp = xor(left, temp) diff --git a/project_euler/problem_011/sol2.py b/project_euler/problem_011/sol2.py index 9ea0db991aaf..2958305331a9 100644 --- a/project_euler/problem_011/sol2.py +++ b/project_euler/problem_011/sol2.py @@ -35,7 +35,7 @@ def solution(): 70600674 """ with open(os.path.dirname(__file__) + "/grid.txt") as f: - l = [] # noqa: E741 + l = [] for _ in range(20): l.append([int(x) for x in f.readline().split()]) diff --git a/pyproject.toml b/pyproject.toml index 22da7cb777b5..c8a8744abc83 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -15,7 +15,6 @@ lint.ignore = [ # `ruff rule S101` for a description of that rule "PT011", # `pytest.raises(Exception)` is too broad, set the `match` parameter or use a more specific exception "PT018", # Assertion should be broken down into multiple parts "RUF00", # Ambiguous unicode character and other rules - "RUF100", # Unused `noqa` directive -- FIX ME "S101", # Use of `assert` detected -- DO NOT FIX "S105", # Possible hardcoded password: 'password' "S113", # Probable use of requests call without timeout -- FIX ME diff --git a/strings/manacher.py b/strings/manacher.py index c58c7c19ec44..ca546e533acd 100644 --- a/strings/manacher.py +++ b/strings/manacher.py @@ -50,7 +50,7 @@ def palindromic_string(input_string: str) -> str: # does this string is ending after the previously explored end (that is r) ? # if yes the update the new r to the last index of this if j + k - 1 > r: - l = j - k + 1 # noqa: E741 + l = j - k + 1 r = j + k - 1 # update max_length and start position From f8a948914b928d9fd3c0e32c034bd90315caa389 Mon Sep 17 00:00:00 2001 From: Maxim Smolskiy Date: Mon, 1 Apr 2024 22:39:31 +0300 Subject: [PATCH 1358/1543] Enable ruff NPY002 rule (#11336) --- linear_algebra/src/conjugate_gradient.py | 6 ++++-- machine_learning/decision_tree.py | 3 ++- machine_learning/k_means_clust.py | 6 +++--- machine_learning/sequential_minimum_optimization.py | 5 +++-- neural_network/back_propagation_neural_network.py | 8 +++++--- neural_network/convolution_neural_network.py | 13 +++++++------ neural_network/input_data.py | 6 +++--- neural_network/two_hidden_layers_neural_network.py | 9 +++++---- pyproject.toml | 1 - 9 files changed, 32 insertions(+), 25 deletions(-) diff --git a/linear_algebra/src/conjugate_gradient.py b/linear_algebra/src/conjugate_gradient.py index 4c0b58deb978..45da35813978 100644 --- a/linear_algebra/src/conjugate_gradient.py +++ b/linear_algebra/src/conjugate_gradient.py @@ -61,7 +61,8 @@ def _create_spd_matrix(dimension: int) -> Any: >>> _is_matrix_spd(spd_matrix) True """ - random_matrix = np.random.randn(dimension, dimension) + rng = np.random.default_rng() + random_matrix = rng.normal(size=(dimension, dimension)) spd_matrix = np.dot(random_matrix, random_matrix.T) assert _is_matrix_spd(spd_matrix) return spd_matrix @@ -157,7 +158,8 @@ def test_conjugate_gradient() -> None: # Create linear system with SPD matrix and known solution x_true. dimension = 3 spd_matrix = _create_spd_matrix(dimension) - x_true = np.random.randn(dimension, 1) + rng = np.random.default_rng() + x_true = rng.normal(size=(dimension, 1)) b = np.dot(spd_matrix, x_true) # Numpy solution. diff --git a/machine_learning/decision_tree.py b/machine_learning/decision_tree.py index 7f129919a3ce..e48905eeac6a 100644 --- a/machine_learning/decision_tree.py +++ b/machine_learning/decision_tree.py @@ -187,7 +187,8 @@ def main(): tree = DecisionTree(depth=10, min_leaf_size=10) tree.train(x, y) - test_cases = (np.random.rand(10) * 2) - 1 + rng = np.random.default_rng() + test_cases = (rng.random(10) * 2) - 1 predictions = np.array([tree.predict(x) for x in test_cases]) avg_error = np.mean((predictions - test_cases) ** 2) diff --git a/machine_learning/k_means_clust.py b/machine_learning/k_means_clust.py index 9f6646944458..a926362fc18b 100644 --- a/machine_learning/k_means_clust.py +++ b/machine_learning/k_means_clust.py @@ -55,12 +55,12 @@ def get_initial_centroids(data, k, seed=None): """Randomly choose k data points as initial centroids""" - if seed is not None: # useful for obtaining consistent results - np.random.seed(seed) + # useful for obtaining consistent results + rng = np.random.default_rng(seed) n = data.shape[0] # number of data points # Pick K indices from range [0, N). - rand_indices = np.random.randint(0, n, k) + rand_indices = rng.integers(0, n, k) # Keep centroids as dense format, as many entries will be nonzero due to averaging. # As long as at least one document in a cluster contains a word, diff --git a/machine_learning/sequential_minimum_optimization.py b/machine_learning/sequential_minimum_optimization.py index be16baca1a4c..408d59ab5d29 100644 --- a/machine_learning/sequential_minimum_optimization.py +++ b/machine_learning/sequential_minimum_optimization.py @@ -289,12 +289,13 @@ def _choose_a2(self, i1): if cmd is None: return - for i2 in np.roll(self.unbound, np.random.choice(self.length)): + rng = np.random.default_rng() + for i2 in np.roll(self.unbound, rng.choice(self.length)): cmd = yield i1, i2 if cmd is None: return - for i2 in np.roll(self._all_samples, np.random.choice(self.length)): + for i2 in np.roll(self._all_samples, rng.choice(self.length)): cmd = yield i1, i2 if cmd is None: return diff --git a/neural_network/back_propagation_neural_network.py b/neural_network/back_propagation_neural_network.py index 7e0bdbbe2857..6131a13e945e 100644 --- a/neural_network/back_propagation_neural_network.py +++ b/neural_network/back_propagation_neural_network.py @@ -51,8 +51,9 @@ def __init__( self.is_input_layer = is_input_layer def initializer(self, back_units): - self.weight = np.asmatrix(np.random.normal(0, 0.5, (self.units, back_units))) - self.bias = np.asmatrix(np.random.normal(0, 0.5, self.units)).T + rng = np.random.default_rng() + self.weight = np.asmatrix(rng.normal(0, 0.5, (self.units, back_units))) + self.bias = np.asmatrix(rng.normal(0, 0.5, self.units)).T if self.activation is None: self.activation = sigmoid @@ -174,7 +175,8 @@ def plot_loss(self): def example(): - x = np.random.randn(10, 10) + rng = np.random.default_rng() + x = rng.normal(size=(10, 10)) y = np.asarray( [ [0.8, 0.4], diff --git a/neural_network/convolution_neural_network.py b/neural_network/convolution_neural_network.py index 07cc456b7466..3c551924442d 100644 --- a/neural_network/convolution_neural_network.py +++ b/neural_network/convolution_neural_network.py @@ -41,15 +41,16 @@ def __init__( self.size_pooling1 = size_p1 self.rate_weight = rate_w self.rate_thre = rate_t + rng = np.random.default_rng() self.w_conv1 = [ - np.asmatrix(-1 * np.random.rand(self.conv1[0], self.conv1[0]) + 0.5) + np.asmatrix(-1 * rng.random((self.conv1[0], self.conv1[0])) + 0.5) for i in range(self.conv1[1]) ] - self.wkj = np.asmatrix(-1 * np.random.rand(self.num_bp3, self.num_bp2) + 0.5) - self.vji = np.asmatrix(-1 * np.random.rand(self.num_bp2, self.num_bp1) + 0.5) - self.thre_conv1 = -2 * np.random.rand(self.conv1[1]) + 1 - self.thre_bp2 = -2 * np.random.rand(self.num_bp2) + 1 - self.thre_bp3 = -2 * np.random.rand(self.num_bp3) + 1 + self.wkj = np.asmatrix(-1 * rng.random((self.num_bp3, self.num_bp2)) + 0.5) + self.vji = np.asmatrix(-1 * rng.random((self.num_bp2, self.num_bp1)) + 0.5) + self.thre_conv1 = -2 * rng.random(self.conv1[1]) + 1 + self.thre_bp2 = -2 * rng.random(self.num_bp2) + 1 + self.thre_bp3 = -2 * rng.random(self.num_bp3) + 1 def save_model(self, save_path): # save model dict with pickle diff --git a/neural_network/input_data.py b/neural_network/input_data.py index 9d4195487dbb..d189e3f9e0d9 100644 --- a/neural_network/input_data.py +++ b/neural_network/input_data.py @@ -153,7 +153,7 @@ def __init__( """ seed1, seed2 = random_seed.get_seed(seed) # If op level seed is not set, use whatever graph level seed is returned - np.random.seed(seed1 if seed is None else seed2) + self._rng = np.random.default_rng(seed1 if seed is None else seed2) dtype = dtypes.as_dtype(dtype).base_dtype if dtype not in (dtypes.uint8, dtypes.float32): raise TypeError("Invalid image dtype %r, expected uint8 or float32" % dtype) @@ -211,7 +211,7 @@ def next_batch(self, batch_size, fake_data=False, shuffle=True): # Shuffle for the first epoch if self._epochs_completed == 0 and start == 0 and shuffle: perm0 = np.arange(self._num_examples) - np.random.shuffle(perm0) + self._rng.shuffle(perm0) self._images = self.images[perm0] self._labels = self.labels[perm0] # Go to the next epoch @@ -225,7 +225,7 @@ def next_batch(self, batch_size, fake_data=False, shuffle=True): # Shuffle the data if shuffle: perm = np.arange(self._num_examples) - np.random.shuffle(perm) + self._rng.shuffle(perm) self._images = self.images[perm] self._labels = self.labels[perm] # Start next epoch diff --git a/neural_network/two_hidden_layers_neural_network.py b/neural_network/two_hidden_layers_neural_network.py index dea7e2342d9f..d488de590cc2 100644 --- a/neural_network/two_hidden_layers_neural_network.py +++ b/neural_network/two_hidden_layers_neural_network.py @@ -28,19 +28,20 @@ def __init__(self, input_array: np.ndarray, output_array: np.ndarray) -> None: # Random initial weights are assigned. # self.input_array.shape[1] is used to represent number of nodes in input layer. # First hidden layer consists of 4 nodes. - self.input_layer_and_first_hidden_layer_weights = np.random.rand( - self.input_array.shape[1], 4 + rng = np.random.default_rng() + self.input_layer_and_first_hidden_layer_weights = rng.random( + (self.input_array.shape[1], 4) ) # Random initial values for the first hidden layer. # First hidden layer has 4 nodes. # Second hidden layer has 3 nodes. - self.first_hidden_layer_and_second_hidden_layer_weights = np.random.rand(4, 3) + self.first_hidden_layer_and_second_hidden_layer_weights = rng.random((4, 3)) # Random initial values for the second hidden layer. # Second hidden layer has 3 nodes. # Output layer has 1 node. - self.second_hidden_layer_and_output_layer_weights = np.random.rand(3, 1) + self.second_hidden_layer_and_output_layer_weights = rng.random((3, 1)) # Real output values provided. self.output_array = output_array diff --git a/pyproject.toml b/pyproject.toml index c8a8744abc83..50cd38005f09 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -7,7 +7,6 @@ lint.ignore = [ # `ruff rule S101` for a description of that rule "EXE001", # Shebang is present but file is not executable" -- FIX ME "G004", # Logging statement uses f-string "INP001", # File `x/y/z.py` is part of an implicit namespace package. Add an `__init__.py`. -- FIX ME - "NPY002", # Replace legacy `np.random.choice` call with `np.random.Generator` -- FIX ME "PGH003", # Use specific rule codes when ignoring type issues -- FIX ME "PLC1901", # `{}` can be simplified to `{}` as an empty string is falsey "PLW060", # Using global for `{name}` but no assignment is done -- DO NOT FIX From 93fb555e0a97096f62a122e73cfdc6f0579cefbe Mon Sep 17 00:00:00 2001 From: Maxim Smolskiy Date: Tue, 2 Apr 2024 04:27:56 +0300 Subject: [PATCH 1359/1543] Enable ruff SIM102 rule (#11341) * Enable ruff SIM102 rule * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fix * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- data_structures/arrays/sudoku_solver.py | 7 +++---- .../stacks/balanced_parentheses.py | 7 ++++--- graphs/a_star.py | 20 ++++++++++++------- graphs/bi_directional_dijkstra.py | 8 +++++--- other/davis_putnam_logemann_loveland.py | 7 +++---- project_euler/problem_033/sol1.py | 10 +++++++--- project_euler/problem_037/sol1.py | 7 ++++--- project_euler/problem_107/sol1.py | 9 +++++---- project_euler/problem_207/sol1.py | 8 +++++--- pyproject.toml | 1 - scheduling/shortest_job_first.py | 13 +++++++----- scripts/validate_solutions.py | 11 ++++++---- web_programming/emails_from_url.py | 15 ++++++++------ 13 files changed, 73 insertions(+), 50 deletions(-) diff --git a/data_structures/arrays/sudoku_solver.py b/data_structures/arrays/sudoku_solver.py index c9dffcde2379..5c1cff06f9d4 100644 --- a/data_structures/arrays/sudoku_solver.py +++ b/data_structures/arrays/sudoku_solver.py @@ -92,10 +92,9 @@ def eliminate(values, s, d): dplaces = [s for s in u if d in values[s]] if len(dplaces) == 0: return False ## Contradiction: no place for this value - elif len(dplaces) == 1: - # d can only be in one place in unit; assign it there - if not assign(values, dplaces[0], d): - return False + # d can only be in one place in unit; assign it there + elif len(dplaces) == 1 and not assign(values, dplaces[0], d): + return False return values diff --git a/data_structures/stacks/balanced_parentheses.py b/data_structures/stacks/balanced_parentheses.py index 3c036c220e5c..928815bb2111 100644 --- a/data_structures/stacks/balanced_parentheses.py +++ b/data_structures/stacks/balanced_parentheses.py @@ -19,9 +19,10 @@ def balanced_parentheses(parentheses: str) -> bool: for bracket in parentheses: if bracket in bracket_pairs: stack.push(bracket) - elif bracket in (")", "]", "}"): - if stack.is_empty() or bracket_pairs[stack.pop()] != bracket: - return False + elif bracket in (")", "]", "}") and ( + stack.is_empty() or bracket_pairs[stack.pop()] != bracket + ): + return False return stack.is_empty() diff --git a/graphs/a_star.py b/graphs/a_star.py index 06da3b5cd863..1d7063ccc55a 100644 --- a/graphs/a_star.py +++ b/graphs/a_star.py @@ -75,13 +75,19 @@ def search( for i in range(len(DIRECTIONS)): # to try out different valid actions x2 = x + DIRECTIONS[i][0] y2 = y + DIRECTIONS[i][1] - if x2 >= 0 and x2 < len(grid) and y2 >= 0 and y2 < len(grid[0]): - if closed[x2][y2] == 0 and grid[x2][y2] == 0: - g2 = g + cost - f2 = g2 + heuristic[x2][y2] - cell.append([f2, g2, x2, y2]) - closed[x2][y2] = 1 - action[x2][y2] = i + if ( + x2 >= 0 + and x2 < len(grid) + and y2 >= 0 + and y2 < len(grid[0]) + and closed[x2][y2] == 0 + and grid[x2][y2] == 0 + ): + g2 = g + cost + f2 = g2 + heuristic[x2][y2] + cell.append([f2, g2, x2, y2]) + closed[x2][y2] = 1 + action[x2][y2] = i invpath = [] x = goal[0] y = goal[1] diff --git a/graphs/bi_directional_dijkstra.py b/graphs/bi_directional_dijkstra.py index 7b9eac6c8587..d2c4030b921b 100644 --- a/graphs/bi_directional_dijkstra.py +++ b/graphs/bi_directional_dijkstra.py @@ -36,9 +36,11 @@ def pass_and_relaxation( queue.put((new_cost_f, nxt)) cst_fwd[nxt] = new_cost_f parent[nxt] = v - if nxt in visited_backward: - if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance: - shortest_distance = cst_fwd[v] + d + cst_bwd[nxt] + if ( + nxt in visited_backward + and cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance + ): + shortest_distance = cst_fwd[v] + d + cst_bwd[nxt] return shortest_distance diff --git a/other/davis_putnam_logemann_loveland.py b/other/davis_putnam_logemann_loveland.py index 5c6e2d9ffd5e..3a76f3dfef08 100644 --- a/other/davis_putnam_logemann_loveland.py +++ b/other/davis_putnam_logemann_loveland.py @@ -64,10 +64,9 @@ def assign(self, model: dict[str, bool | None]) -> None: value = model[symbol] else: continue - if value is not None: - # Complement assignment if literal is in complemented form - if literal.endswith("'"): - value = not value + # Complement assignment if literal is in complemented form + if value is not None and literal.endswith("'"): + value = not value self.literals[literal] = value def evaluate(self, model: dict[str, bool | None]) -> bool | None: diff --git a/project_euler/problem_033/sol1.py b/project_euler/problem_033/sol1.py index 187fd61bde6c..71790d34fbed 100644 --- a/project_euler/problem_033/sol1.py +++ b/project_euler/problem_033/sol1.py @@ -44,9 +44,13 @@ def fraction_list(digit_len: int) -> list[str]: last_digit = int("1" + "0" * digit_len) for num in range(den, last_digit): while den <= 99: - if (num != den) and (num % 10 == den // 10) and (den % 10 != 0): - if is_digit_cancelling(num, den): - solutions.append(f"{num}/{den}") + if ( + (num != den) + and (num % 10 == den // 10) + and (den % 10 != 0) + and is_digit_cancelling(num, den) + ): + solutions.append(f"{num}/{den}") den += 1 num += 1 den = 10 diff --git a/project_euler/problem_037/sol1.py b/project_euler/problem_037/sol1.py index ef7686cbcb96..9c09065f4bd0 100644 --- a/project_euler/problem_037/sol1.py +++ b/project_euler/problem_037/sol1.py @@ -85,9 +85,10 @@ def validate(n: int) -> bool: >>> validate(3797) True """ - if len(str(n)) > 3: - if not is_prime(int(str(n)[-3:])) or not is_prime(int(str(n)[:3])): - return False + if len(str(n)) > 3 and ( + not is_prime(int(str(n)[-3:])) or not is_prime(int(str(n)[:3])) + ): + return False return True diff --git a/project_euler/problem_107/sol1.py b/project_euler/problem_107/sol1.py index 3fe75909e2ea..79cdd937042e 100644 --- a/project_euler/problem_107/sol1.py +++ b/project_euler/problem_107/sol1.py @@ -81,10 +81,11 @@ def prims_algorithm(self) -> Graph: while len(subgraph.vertices) < len(self.vertices): min_weight = max(self.edges.values()) + 1 for edge, weight in self.edges.items(): - if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices): - if weight < min_weight: - min_edge = edge - min_weight = weight + if (edge[0] in subgraph.vertices) ^ ( + edge[1] in subgraph.vertices + ) and weight < min_weight: + min_edge = edge + min_weight = weight subgraph.add_edge(min_edge, min_weight) diff --git a/project_euler/problem_207/sol1.py b/project_euler/problem_207/sol1.py index 2b3591f51cfa..c83dc1d4aaef 100644 --- a/project_euler/problem_207/sol1.py +++ b/project_euler/problem_207/sol1.py @@ -88,9 +88,11 @@ def solution(max_proportion: float = 1 / 12345) -> int: total_partitions += 1 if check_partition_perfect(partition_candidate): perfect_partitions += 1 - if perfect_partitions > 0: - if perfect_partitions / total_partitions < max_proportion: - return int(partition_candidate) + if ( + perfect_partitions > 0 + and perfect_partitions / total_partitions < max_proportion + ): + return int(partition_candidate) integer += 1 diff --git a/pyproject.toml b/pyproject.toml index 50cd38005f09..e3cf42c92c54 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -18,7 +18,6 @@ lint.ignore = [ # `ruff rule S101` for a description of that rule "S105", # Possible hardcoded password: 'password' "S113", # Probable use of requests call without timeout -- FIX ME "S311", # Standard pseudo-random generators are not suitable for cryptographic purposes -- FIX ME - "SIM102", # Use a single `if` statement instead of nested `if` statements -- FIX ME "SLF001", # Private member accessed: `_Iterator` -- FIX ME "UP038", # Use `X | Y` in `{}` call instead of `(X, Y)` -- DO NOT FIX ] diff --git a/scheduling/shortest_job_first.py b/scheduling/shortest_job_first.py index cfd0417ea62d..6899ec87c591 100644 --- a/scheduling/shortest_job_first.py +++ b/scheduling/shortest_job_first.py @@ -37,11 +37,14 @@ def calculate_waitingtime( # Process until all processes are completed while complete != no_of_processes: for j in range(no_of_processes): - if arrival_time[j] <= increment_time and remaining_time[j] > 0: - if remaining_time[j] < minm: - minm = remaining_time[j] - short = j - check = True + if ( + arrival_time[j] <= increment_time + and remaining_time[j] > 0 + and remaining_time[j] < minm + ): + minm = remaining_time[j] + short = j + check = True if not check: increment_time += 1 diff --git a/scripts/validate_solutions.py b/scripts/validate_solutions.py index ca4af5261a8f..0afbdde315c7 100755 --- a/scripts/validate_solutions.py +++ b/scripts/validate_solutions.py @@ -71,10 +71,13 @@ def added_solution_file_path() -> list[pathlib.Path]: def collect_solution_file_paths() -> list[pathlib.Path]: - if os.environ.get("CI") and os.environ.get("GITHUB_EVENT_NAME") == "pull_request": - # Return only if there are any, otherwise default to all solutions - if filepaths := added_solution_file_path(): - return filepaths + # Return only if there are any, otherwise default to all solutions + if ( + os.environ.get("CI") + and os.environ.get("GITHUB_EVENT_NAME") == "pull_request" + and (filepaths := added_solution_file_path()) + ): + return filepaths return all_solution_file_paths() diff --git a/web_programming/emails_from_url.py b/web_programming/emails_from_url.py index 6b4bacfe7d5a..26c88e1b13a5 100644 --- a/web_programming/emails_from_url.py +++ b/web_programming/emails_from_url.py @@ -30,12 +30,15 @@ def handle_starttag(self, tag: str, attrs: list[tuple[str, str | None]]) -> None if tag == "a": # Check the list of defined attributes. for name, value in attrs: - # If href is defined, and not empty nor # print it. - if name == "href" and value != "#" and value != "": - # If not already in urls. - if value not in self.urls: - url = parse.urljoin(self.domain, value) - self.urls.append(url) + # If href is defined, not empty nor # print it and not already in urls. + if ( + name == "href" + and value != "#" + and value != "" + and value not in self.urls + ): + url = parse.urljoin(self.domain, value) + self.urls.append(url) # Get main domain name (example.com) From f8cdb3e9482ddca85cd1bffa96c038afc13f9c85 Mon Sep 17 00:00:00 2001 From: Maxim Smolskiy Date: Tue, 2 Apr 2024 19:44:37 +0300 Subject: [PATCH 1360/1543] Enable ruff S105 rule (#11343) * Enable ruff S105 rule * Update web_programming/recaptcha_verification.py --------- Co-authored-by: Christian Clauss --- pyproject.toml | 1 - web_programming/recaptcha_verification.py | 2 +- 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index e3cf42c92c54..65a0754d678c 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -15,7 +15,6 @@ lint.ignore = [ # `ruff rule S101` for a description of that rule "PT018", # Assertion should be broken down into multiple parts "RUF00", # Ambiguous unicode character and other rules "S101", # Use of `assert` detected -- DO NOT FIX - "S105", # Possible hardcoded password: 'password' "S113", # Probable use of requests call without timeout -- FIX ME "S311", # Standard pseudo-random generators are not suitable for cryptographic purposes -- FIX ME "SLF001", # Private member accessed: `_Iterator` -- FIX ME diff --git a/web_programming/recaptcha_verification.py b/web_programming/recaptcha_verification.py index b03afb28ec53..c9b691b28a8b 100644 --- a/web_programming/recaptcha_verification.py +++ b/web_programming/recaptcha_verification.py @@ -43,7 +43,7 @@ def login_using_recaptcha(request): # Enter your recaptcha secret key here - secret_key = "secretKey" + secret_key = "secretKey" # noqa: S105 url = "https://www.google.com/recaptcha/api/siteverify" # when method is not POST, direct user to login page From f437f922792b8c7e3fbb168a1ec6bfdf183a7304 Mon Sep 17 00:00:00 2001 From: Maxim Smolskiy Date: Tue, 2 Apr 2024 22:13:56 +0300 Subject: [PATCH 1361/1543] Enable ruff INP001 rule (#11346) * Enable ruff INP001 rule * Fix * Fix * Fix * Fix * Fix --- data_structures/arrays/__init__.py | 0 data_structures/hashing/tests/__init__.py | 0 digital_image_processing/morphological_operations/__init__.py | 0 electronics/__init__.py | 0 electronics/circular_convolution.py | 3 +-- fractals/__init__.py | 0 geometry/__init__.py | 0 greedy_methods/__init__.py | 0 linear_algebra/src/gaussian_elimination_pivoting/__init__.py | 0 linear_programming/__init__.py | 0 maths/numerical_analysis/__init__.py | 0 maths/special_numbers/__init__.py | 0 neural_network/activation_functions/__init__.py | 0 neural_network/activation_functions/mish.py | 3 ++- pyproject.toml | 1 - 15 files changed, 3 insertions(+), 4 deletions(-) create mode 100644 data_structures/arrays/__init__.py create mode 100644 data_structures/hashing/tests/__init__.py create mode 100644 digital_image_processing/morphological_operations/__init__.py create mode 100644 electronics/__init__.py create mode 100644 fractals/__init__.py create mode 100644 geometry/__init__.py create mode 100644 greedy_methods/__init__.py create mode 100644 linear_algebra/src/gaussian_elimination_pivoting/__init__.py create mode 100644 linear_programming/__init__.py create mode 100644 maths/numerical_analysis/__init__.py create mode 100644 maths/special_numbers/__init__.py create mode 100644 neural_network/activation_functions/__init__.py diff --git a/data_structures/arrays/__init__.py b/data_structures/arrays/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/data_structures/hashing/tests/__init__.py b/data_structures/hashing/tests/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/digital_image_processing/morphological_operations/__init__.py b/digital_image_processing/morphological_operations/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/electronics/__init__.py b/electronics/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/electronics/circular_convolution.py b/electronics/circular_convolution.py index f2e35742e944..768f2ad941bc 100644 --- a/electronics/circular_convolution.py +++ b/electronics/circular_convolution.py @@ -37,8 +37,7 @@ def circular_convolution(self) -> list[float]: using matrix method Usage: - >>> import circular_convolution as cc - >>> convolution = cc.CircularConvolution() + >>> convolution = CircularConvolution() >>> convolution.circular_convolution() [10, 10, 6, 14] diff --git a/fractals/__init__.py b/fractals/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/geometry/__init__.py b/geometry/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/greedy_methods/__init__.py b/greedy_methods/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/linear_algebra/src/gaussian_elimination_pivoting/__init__.py b/linear_algebra/src/gaussian_elimination_pivoting/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/linear_programming/__init__.py b/linear_programming/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/maths/numerical_analysis/__init__.py b/maths/numerical_analysis/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/maths/special_numbers/__init__.py b/maths/special_numbers/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/neural_network/activation_functions/__init__.py b/neural_network/activation_functions/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/neural_network/activation_functions/mish.py b/neural_network/activation_functions/mish.py index e51655df8a3f..57a91413fe50 100644 --- a/neural_network/activation_functions/mish.py +++ b/neural_network/activation_functions/mish.py @@ -7,7 +7,8 @@ """ import numpy as np -from softplus import softplus + +from .softplus import softplus def mish(vector: np.ndarray) -> np.ndarray: diff --git a/pyproject.toml b/pyproject.toml index 65a0754d678c..9689cf2b37aa 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -6,7 +6,6 @@ lint.ignore = [ # `ruff rule S101` for a description of that rule "EM101", # Exception must not use a string literal, assign to variable first "EXE001", # Shebang is present but file is not executable" -- FIX ME "G004", # Logging statement uses f-string - "INP001", # File `x/y/z.py` is part of an implicit namespace package. Add an `__init__.py`. -- FIX ME "PGH003", # Use specific rule codes when ignoring type issues -- FIX ME "PLC1901", # `{}` can be simplified to `{}` as an empty string is falsey "PLW060", # Using global for `{name}` but no assignment is done -- DO NOT FIX From f5bbea3776a5038d0e428ce3c06c25086076e212 Mon Sep 17 00:00:00 2001 From: Maxim Smolskiy Date: Tue, 2 Apr 2024 22:18:47 +0300 Subject: [PATCH 1362/1543] Enable ruff RUF005 rule (#11344) --- data_structures/binary_tree/binary_search_tree.py | 2 +- dynamic_programming/subset_generation.py | 2 +- maths/odd_sieve.py | 2 +- pyproject.toml | 5 ++++- 4 files changed, 7 insertions(+), 4 deletions(-) diff --git a/data_structures/binary_tree/binary_search_tree.py b/data_structures/binary_tree/binary_search_tree.py index 090e3e25fe6d..32194ddc2043 100644 --- a/data_structures/binary_tree/binary_search_tree.py +++ b/data_structures/binary_tree/binary_search_tree.py @@ -336,7 +336,7 @@ def inorder(curr_node: Node | None) -> list[Node]: """ node_list = [] if curr_node is not None: - node_list = inorder(curr_node.left) + [curr_node] + inorder(curr_node.right) + node_list = [*inorder(curr_node.left), curr_node, *inorder(curr_node.right)] return node_list diff --git a/dynamic_programming/subset_generation.py b/dynamic_programming/subset_generation.py index 1be412b9374d..d490bca737ba 100644 --- a/dynamic_programming/subset_generation.py +++ b/dynamic_programming/subset_generation.py @@ -45,7 +45,7 @@ def subset_combinations(elements: list[int], n: int) -> list: for i in range(1, r + 1): for j in range(i, 0, -1): for prev_combination in dp[j - 1]: - dp[j].append(tuple(prev_combination) + (elements[i - 1],)) + dp[j].append((*prev_combination, elements[i - 1])) try: return sorted(dp[n]) diff --git a/maths/odd_sieve.py b/maths/odd_sieve.py index 60e92921a94c..06605ca54296 100644 --- a/maths/odd_sieve.py +++ b/maths/odd_sieve.py @@ -33,7 +33,7 @@ def odd_sieve(num: int) -> list[int]: 0, ceil((num - i_squared) / (i << 1)) ) - return [2] + list(compress(range(3, num, 2), sieve)) + return [2, *list(compress(range(3, num, 2), sieve))] if __name__ == "__main__": diff --git a/pyproject.toml b/pyproject.toml index 9689cf2b37aa..e1d7dc91b2b8 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -12,7 +12,10 @@ lint.ignore = [ # `ruff rule S101` for a description of that rule "PLW2901", # PLW2901: Redefined loop variable -- FIX ME "PT011", # `pytest.raises(Exception)` is too broad, set the `match` parameter or use a more specific exception "PT018", # Assertion should be broken down into multiple parts - "RUF00", # Ambiguous unicode character and other rules + "RUF001", # String contains ambiguous {}. Did you mean {}? + "RUF002", # Docstring contains ambiguous {}. Did you mean {}? + "RUF003", # Comment contains ambiguous {}. Did you mean {}? + "RUF007", # Prefer itertools.pairwise() over zip() when iterating over successive pairs "S101", # Use of `assert` detected -- DO NOT FIX "S113", # Probable use of requests call without timeout -- FIX ME "S311", # Standard pseudo-random generators are not suitable for cryptographic purposes -- FIX ME From 53b2926704f3ad3ec2134a114be3a338e755e28a Mon Sep 17 00:00:00 2001 From: Maxim Smolskiy Date: Tue, 2 Apr 2024 22:29:34 +0300 Subject: [PATCH 1363/1543] Enable ruff PGH003 rule (#11345) * Enable ruff PGH003 rule * Fix * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fix --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- compression/huffman.py | 4 ++-- data_structures/binary_tree/binary_search_tree.py | 4 ++-- data_structures/linked_list/rotate_to_the_right.py | 2 +- fractals/mandelbrot.py | 2 +- graphics/bezier_curve.py | 2 +- maths/entropy.py | 4 ++-- matrix/spiral_print.py | 4 +++- matrix/tests/test_matrix_operation.py | 2 +- project_euler/problem_092/sol1.py | 2 +- project_euler/problem_104/sol1.py | 2 +- pyproject.toml | 1 - scripts/validate_filenames.py | 2 +- scripts/validate_solutions.py | 6 +++--- web_programming/covid_stats_via_xpath.py | 2 +- 14 files changed, 20 insertions(+), 19 deletions(-) diff --git a/compression/huffman.py b/compression/huffman.py index 65e5c2f25385..44eda6c03180 100644 --- a/compression/huffman.py +++ b/compression/huffman.py @@ -40,7 +40,7 @@ def build_tree(letters: list[Letter]) -> Letter | TreeNode: Run through the list of Letters and build the min heap for the Huffman Tree. """ - response: list[Letter | TreeNode] = letters # type: ignore + response: list[Letter | TreeNode] = list(letters) while len(response) > 1: left = response.pop(0) right = response.pop(0) @@ -59,7 +59,7 @@ def traverse_tree(root: Letter | TreeNode, bitstring: str) -> list[Letter]: if isinstance(root, Letter): root.bitstring[root.letter] = bitstring return [root] - treenode: TreeNode = root # type: ignore + treenode: TreeNode = root letters = [] letters += traverse_tree(treenode.left, bitstring + "0") letters += traverse_tree(treenode.right, bitstring + "1") diff --git a/data_structures/binary_tree/binary_search_tree.py b/data_structures/binary_tree/binary_search_tree.py index 32194ddc2043..3f214d0113a4 100644 --- a/data_structures/binary_tree/binary_search_tree.py +++ b/data_structures/binary_tree/binary_search_tree.py @@ -294,9 +294,9 @@ def remove(self, value: int) -> None: predecessor = self.get_max( node.left ) # Gets the max value of the left branch - self.remove(predecessor.value) # type: ignore + self.remove(predecessor.value) # type: ignore[union-attr] node.value = ( - predecessor.value # type: ignore + predecessor.value # type: ignore[union-attr] ) # Assigns the value to the node to delete and keep tree structure def preorder_traverse(self, node: Node | None) -> Iterable: diff --git a/data_structures/linked_list/rotate_to_the_right.py b/data_structures/linked_list/rotate_to_the_right.py index 51b10481c0ce..6b1c54f4be4d 100644 --- a/data_structures/linked_list/rotate_to_the_right.py +++ b/data_structures/linked_list/rotate_to_the_right.py @@ -63,7 +63,7 @@ def insert_node(head: Node | None, data: int) -> Node: while temp_node.next_node: temp_node = temp_node.next_node - temp_node.next_node = new_node # type: ignore + temp_node.next_node = new_node return head diff --git a/fractals/mandelbrot.py b/fractals/mandelbrot.py index 5eb9af0aafe1..359d965a882d 100644 --- a/fractals/mandelbrot.py +++ b/fractals/mandelbrot.py @@ -17,7 +17,7 @@ import colorsys -from PIL import Image # type: ignore +from PIL import Image def get_distance(x: float, y: float, max_step: int) -> float: diff --git a/graphics/bezier_curve.py b/graphics/bezier_curve.py index 6eeb89da6bdf..9d906f179c92 100644 --- a/graphics/bezier_curve.py +++ b/graphics/bezier_curve.py @@ -2,7 +2,7 @@ # https://www.tutorialspoint.com/computer_graphics/computer_graphics_curves.htm from __future__ import annotations -from scipy.special import comb # type: ignore +from scipy.special import comb class BezierCurve: diff --git a/maths/entropy.py b/maths/entropy.py index 76fac4ee717d..39ec67bea038 100644 --- a/maths/entropy.py +++ b/maths/entropy.py @@ -96,8 +96,8 @@ def analyze_text(text: str) -> tuple[dict, dict]: The first dictionary stores the frequency of single character strings. The second dictionary stores the frequency of two character strings. """ - single_char_strings = Counter() # type: ignore - two_char_strings = Counter() # type: ignore + single_char_strings = Counter() # type: ignore[var-annotated] + two_char_strings = Counter() # type: ignore[var-annotated] single_char_strings[text[-1]] += 1 # first case when we have space at start. diff --git a/matrix/spiral_print.py b/matrix/spiral_print.py index 7ba0a275157b..c16dde69cb56 100644 --- a/matrix/spiral_print.py +++ b/matrix/spiral_print.py @@ -116,7 +116,9 @@ def spiral_traversal(matrix: list[list]) -> list[int]: [1, 2, 3, 4, 8, 12, 11, 10, 9, 5, 6, 7] + spiral_traversal([]) """ if matrix: - return list(matrix.pop(0)) + spiral_traversal(list(zip(*matrix))[::-1]) # type: ignore + return list(matrix.pop(0)) + spiral_traversal( + [list(row) for row in zip(*matrix)][::-1] + ) else: return [] diff --git a/matrix/tests/test_matrix_operation.py b/matrix/tests/test_matrix_operation.py index 638f97daa2ed..addc870ca205 100644 --- a/matrix/tests/test_matrix_operation.py +++ b/matrix/tests/test_matrix_operation.py @@ -12,7 +12,7 @@ import sys import numpy as np -import pytest # type: ignore +import pytest # Custom/local libraries from matrix import matrix_operation as matop diff --git a/project_euler/problem_092/sol1.py b/project_euler/problem_092/sol1.py index 8d3f0c9ddd7b..3e45e82207a7 100644 --- a/project_euler/problem_092/sol1.py +++ b/project_euler/problem_092/sol1.py @@ -68,7 +68,7 @@ def chain(number: int) -> bool: """ if CHAINS[number - 1] is not None: - return CHAINS[number - 1] # type: ignore + return CHAINS[number - 1] # type: ignore[return-value] number_chain = chain(next_number(number)) CHAINS[number - 1] = number_chain diff --git a/project_euler/problem_104/sol1.py b/project_euler/problem_104/sol1.py index 60fd6fe99adb..d84dbcfc9c65 100644 --- a/project_euler/problem_104/sol1.py +++ b/project_euler/problem_104/sol1.py @@ -15,7 +15,7 @@ import sys -sys.set_int_max_str_digits(0) # type: ignore +sys.set_int_max_str_digits(0) def check(number: int) -> bool: diff --git a/pyproject.toml b/pyproject.toml index e1d7dc91b2b8..7eac811395ae 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -6,7 +6,6 @@ lint.ignore = [ # `ruff rule S101` for a description of that rule "EM101", # Exception must not use a string literal, assign to variable first "EXE001", # Shebang is present but file is not executable" -- FIX ME "G004", # Logging statement uses f-string - "PGH003", # Use specific rule codes when ignoring type issues -- FIX ME "PLC1901", # `{}` can be simplified to `{}` as an empty string is falsey "PLW060", # Using global for `{name}` but no assignment is done -- DO NOT FIX "PLW2901", # PLW2901: Redefined loop variable -- FIX ME diff --git a/scripts/validate_filenames.py b/scripts/validate_filenames.py index ed23f3907114..0890024dd349 100755 --- a/scripts/validate_filenames.py +++ b/scripts/validate_filenames.py @@ -4,7 +4,7 @@ try: from .build_directory_md import good_file_paths except ImportError: - from build_directory_md import good_file_paths # type: ignore + from build_directory_md import good_file_paths # type: ignore[no-redef] filepaths = list(good_file_paths()) assert filepaths, "good_file_paths() failed!" diff --git a/scripts/validate_solutions.py b/scripts/validate_solutions.py index 0afbdde315c7..68dcd68b3947 100755 --- a/scripts/validate_solutions.py +++ b/scripts/validate_solutions.py @@ -21,8 +21,8 @@ def convert_path_to_module(file_path: pathlib.Path) -> ModuleType: """Converts a file path to a Python module""" spec = importlib.util.spec_from_file_location(file_path.name, str(file_path)) - module = importlib.util.module_from_spec(spec) # type: ignore - spec.loader.exec_module(module) # type: ignore + module = importlib.util.module_from_spec(spec) # type: ignore[arg-type] + spec.loader.exec_module(module) # type: ignore[union-attr] return module @@ -92,7 +92,7 @@ def test_project_euler(solution_path: pathlib.Path) -> None: problem_number: str = solution_path.parent.name[8:].zfill(3) expected: str = PROBLEM_ANSWERS[problem_number] solution_module = convert_path_to_module(solution_path) - answer = str(solution_module.solution()) # type: ignore + answer = str(solution_module.solution()) answer = hashlib.sha256(answer.encode()).hexdigest() assert ( answer == expected diff --git a/web_programming/covid_stats_via_xpath.py b/web_programming/covid_stats_via_xpath.py index a95130badad9..7011a02bffa8 100644 --- a/web_programming/covid_stats_via_xpath.py +++ b/web_programming/covid_stats_via_xpath.py @@ -7,7 +7,7 @@ from typing import NamedTuple import requests -from lxml import html # type: ignore +from lxml import html class CovidData(NamedTuple): From cc2f5b13088b8a98181983b5589f48749016d4ce Mon Sep 17 00:00:00 2001 From: Maxim Smolskiy Date: Mon, 8 Apr 2024 14:22:54 +0300 Subject: [PATCH 1364/1543] Do not fix ruff EXE001 rule (#11350) * Do not fix ruff EXE001 rule * Fix --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index 7eac811395ae..264f06d1f750 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -4,7 +4,7 @@ lint.ignore = [ # `ruff rule S101` for a description of that rule "B905", # `zip()` without an explicit `strict=` parameter -- FIX ME "E741", # Ambiguous variable name 'l' -- FIX ME "EM101", # Exception must not use a string literal, assign to variable first - "EXE001", # Shebang is present but file is not executable" -- FIX ME + "EXE001", # Shebang is present but file is not executable -- DO NOT FIX "G004", # Logging statement uses f-string "PLC1901", # `{}` can be simplified to `{}` as an empty string is falsey "PLW060", # Using global for `{name}` but no assignment is done -- DO NOT FIX From 9e55c9d9845c07ce6390ab92a2d86be4816d4a69 Mon Sep 17 00:00:00 2001 From: Jiayou Qin <90779499+Jiayoqin@users.noreply.github.com> Date: Mon, 8 Apr 2024 07:35:22 -0400 Subject: [PATCH 1365/1543] Added documentations (#11352) * Added documentations * Update data_structures/queue/circular_queue.py --------- Co-authored-by: Christian Clauss --- data_structures/queue/circular_queue.py | 7 +++++-- data_structures/queue/circular_queue_linked_list.py | 2 +- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/data_structures/queue/circular_queue.py b/data_structures/queue/circular_queue.py index 93a6ef805c7c..f2fb4c01e467 100644 --- a/data_structures/queue/circular_queue.py +++ b/data_structures/queue/circular_queue.py @@ -25,6 +25,7 @@ def __len__(self) -> int: def is_empty(self) -> bool: """ + Checks whether the queue is empty or not >>> cq = CircularQueue(5) >>> cq.is_empty() True @@ -35,6 +36,7 @@ def is_empty(self) -> bool: def first(self): """ + Returns the first element of the queue >>> cq = CircularQueue(5) >>> cq.first() False @@ -45,7 +47,8 @@ def first(self): def enqueue(self, data): """ - This function insert an element in the queue using self.rear value as an index + This function inserts an element at the end of the queue using self.rear value + as an index. >>> cq = CircularQueue(5) >>> cq.enqueue("A") # doctest: +ELLIPSIS >> cq = CircularQueue(5) >>> cq.dequeue() Traceback (most recent call last): diff --git a/data_structures/queue/circular_queue_linked_list.py b/data_structures/queue/circular_queue_linked_list.py index 62042c4bce96..da8629678e52 100644 --- a/data_structures/queue/circular_queue_linked_list.py +++ b/data_structures/queue/circular_queue_linked_list.py @@ -39,7 +39,7 @@ def create_linked_list(self, initial_capacity: int) -> None: def is_empty(self) -> bool: """ - Checks where the queue is empty or not + Checks whether the queue is empty or not >>> cq = CircularQueueLinkedList() >>> cq.is_empty() True From 14ca726951473dd1993b6b13993105ea3b077ac3 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Tue, 9 Apr 2024 07:23:51 +0200 Subject: [PATCH 1366/1543] [pre-commit.ci] pre-commit autoupdate (#11355) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - [github.com/pre-commit/pre-commit-hooks: v4.5.0 → v4.6.0](https://github.com/pre-commit/pre-commit-hooks/compare/v4.5.0...v4.6.0) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index e6b1b0442c04..d4b8d1136ed7 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,6 +1,6 @@ repos: - repo: https://github.com/pre-commit/pre-commit-hooks - rev: v4.5.0 + rev: v4.6.0 hooks: - id: check-executables-have-shebangs - id: check-toml From 0a9a860eb1174a513b231db2cf1a3378ff7c5b33 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 15 Apr 2024 22:21:33 +0200 Subject: [PATCH 1367/1543] [pre-commit.ci] pre-commit autoupdate (#11364) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - [github.com/MarcoGorelli/auto-walrus: v0.2.2 → 0.3.3](https://github.com/MarcoGorelli/auto-walrus/compare/v0.2.2...0.3.3) - [github.com/astral-sh/ruff-pre-commit: v0.3.5 → v0.3.7](https://github.com/astral-sh/ruff-pre-commit/compare/v0.3.5...v0.3.7) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index d4b8d1136ed7..9472bcfa3e07 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -11,12 +11,12 @@ repos: - id: requirements-txt-fixer - repo: https://github.com/MarcoGorelli/auto-walrus - rev: v0.2.2 + rev: 0.3.3 hooks: - id: auto-walrus - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.3.5 + rev: v0.3.7 hooks: - id: ruff - id: ruff-format From a42eb357027328085f928a4ab6c7aa770aeb1d6b Mon Sep 17 00:00:00 2001 From: Maxim Smolskiy Date: Fri, 19 Apr 2024 22:30:22 +0300 Subject: [PATCH 1368/1543] Enable ruff E741 rule (#11370) * Enable ruff E741 rule * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .../binary_tree/non_recursive_segment_tree.py | 22 ++++++------ data_structures/binary_tree/segment_tree.py | 36 +++++++++---------- data_structures/heap/min_heap.py | 12 +++---- .../longest_common_subsequence.py | 10 +++--- .../longest_increasing_subsequence_o_nlogn.py | 14 ++++---- graphs/articulation_points.py | 10 +++--- graphs/dinic.py | 2 +- .../sequential_minimum_optimization.py | 4 +-- maths/pi_generator.py | 10 +++--- other/sdes.py | 10 +++--- project_euler/problem_011/sol2.py | 22 ++++++++---- pyproject.toml | 1 - strings/jaro_winkler.py | 8 ++--- strings/manacher.py | 33 ++++++++--------- 14 files changed, 102 insertions(+), 92 deletions(-) diff --git a/data_structures/binary_tree/non_recursive_segment_tree.py b/data_structures/binary_tree/non_recursive_segment_tree.py index 45c476701d79..ca0d5c111c4f 100644 --- a/data_structures/binary_tree/non_recursive_segment_tree.py +++ b/data_structures/binary_tree/non_recursive_segment_tree.py @@ -87,12 +87,12 @@ def update(self, p: int, v: T) -> None: p = p // 2 self.st[p] = self.fn(self.st[p * 2], self.st[p * 2 + 1]) - def query(self, l: int, r: int) -> T | None: + def query(self, left: int, right: int) -> T | None: """ Get range query value in log(N) time - :param l: left element index - :param r: right element index - :return: element combined in the range [l, r] + :param left: left element index + :param right: right element index + :return: element combined in the range [left, right] >>> st = SegmentTree([1, 2, 3, 4], lambda a, b: a + b) >>> st.query(0, 2) @@ -104,15 +104,15 @@ def query(self, l: int, r: int) -> T | None: >>> st.query(2, 3) 7 """ - l, r = l + self.N, r + self.N + left, right = left + self.N, right + self.N res: T | None = None - while l <= r: - if l % 2 == 1: - res = self.st[l] if res is None else self.fn(res, self.st[l]) - if r % 2 == 0: - res = self.st[r] if res is None else self.fn(res, self.st[r]) - l, r = (l + 1) // 2, (r - 1) // 2 + while left <= right: + if left % 2 == 1: + res = self.st[left] if res is None else self.fn(res, self.st[left]) + if right % 2 == 0: + res = self.st[right] if res is None else self.fn(res, self.st[right]) + left, right = (left + 1) // 2, (right - 1) // 2 return res diff --git a/data_structures/binary_tree/segment_tree.py b/data_structures/binary_tree/segment_tree.py index bb9c1ae2268b..c7069b3f6069 100644 --- a/data_structures/binary_tree/segment_tree.py +++ b/data_structures/binary_tree/segment_tree.py @@ -35,13 +35,13 @@ def right(self, idx): """ return idx * 2 + 1 - def build(self, idx, l, r): - if l == r: - self.st[idx] = self.A[l] + def build(self, idx, left, right): + if left == right: + self.st[idx] = self.A[left] else: - mid = (l + r) // 2 - self.build(self.left(idx), l, mid) - self.build(self.right(idx), mid + 1, r) + mid = (left + right) // 2 + self.build(self.left(idx), left, mid) + self.build(self.right(idx), mid + 1, right) self.st[idx] = max(self.st[self.left(idx)], self.st[self.right(idx)]) def update(self, a, b, val): @@ -56,18 +56,18 @@ def update(self, a, b, val): """ return self.update_recursive(1, 0, self.N - 1, a - 1, b - 1, val) - def update_recursive(self, idx, l, r, a, b, val): + def update_recursive(self, idx, left, right, a, b, val): """ update(1, 1, N, a, b, v) for update val v to [a,b] """ - if r < a or l > b: + if right < a or left > b: return True - if l == r: + if left == right: self.st[idx] = val return True - mid = (l + r) // 2 - self.update_recursive(self.left(idx), l, mid, a, b, val) - self.update_recursive(self.right(idx), mid + 1, r, a, b, val) + mid = (left + right) // 2 + self.update_recursive(self.left(idx), left, mid, a, b, val) + self.update_recursive(self.right(idx), mid + 1, right, a, b, val) self.st[idx] = max(self.st[self.left(idx)], self.st[self.right(idx)]) return True @@ -83,17 +83,17 @@ def query(self, a, b): """ return self.query_recursive(1, 0, self.N - 1, a - 1, b - 1) - def query_recursive(self, idx, l, r, a, b): + def query_recursive(self, idx, left, right, a, b): """ query(1, 1, N, a, b) for query max of [a,b] """ - if r < a or l > b: + if right < a or left > b: return -math.inf - if l >= a and r <= b: + if left >= a and right <= b: return self.st[idx] - mid = (l + r) // 2 - q1 = self.query_recursive(self.left(idx), l, mid, a, b) - q2 = self.query_recursive(self.right(idx), mid + 1, r, a, b) + mid = (left + right) // 2 + q1 = self.query_recursive(self.left(idx), left, mid, a, b) + q2 = self.query_recursive(self.right(idx), mid + 1, right, a, b) return max(q1, q2) def show_data(self): diff --git a/data_structures/heap/min_heap.py b/data_structures/heap/min_heap.py index 39f6d99e8a4c..ce7ed570a58d 100644 --- a/data_structures/heap/min_heap.py +++ b/data_structures/heap/min_heap.py @@ -66,14 +66,14 @@ def build_heap(self, array): # this is min-heapify method def sift_down(self, idx, array): while True: - l = self.get_left_child_idx(idx) - r = self.get_right_child_idx(idx) + left = self.get_left_child_idx(idx) + right = self.get_right_child_idx(idx) smallest = idx - if l < len(array) and array[l] < array[idx]: - smallest = l - if r < len(array) and array[r] < array[smallest]: - smallest = r + if left < len(array) and array[left] < array[idx]: + smallest = left + if right < len(array) and array[right] < array[smallest]: + smallest = right if smallest != idx: array[idx], array[smallest] = array[smallest], array[idx] diff --git a/dynamic_programming/longest_common_subsequence.py b/dynamic_programming/longest_common_subsequence.py index 22f50a166ae4..9a98b1736ed5 100644 --- a/dynamic_programming/longest_common_subsequence.py +++ b/dynamic_programming/longest_common_subsequence.py @@ -38,30 +38,30 @@ def longest_common_subsequence(x: str, y: str): n = len(y) # declaring the array for storing the dp values - l = [[0] * (n + 1) for _ in range(m + 1)] + dp = [[0] * (n + 1) for _ in range(m + 1)] for i in range(1, m + 1): for j in range(1, n + 1): match = 1 if x[i - 1] == y[j - 1] else 0 - l[i][j] = max(l[i - 1][j], l[i][j - 1], l[i - 1][j - 1] + match) + dp[i][j] = max(dp[i - 1][j], dp[i][j - 1], dp[i - 1][j - 1] + match) seq = "" i, j = m, n while i > 0 and j > 0: match = 1 if x[i - 1] == y[j - 1] else 0 - if l[i][j] == l[i - 1][j - 1] + match: + if dp[i][j] == dp[i - 1][j - 1] + match: if match == 1: seq = x[i - 1] + seq i -= 1 j -= 1 - elif l[i][j] == l[i - 1][j]: + elif dp[i][j] == dp[i - 1][j]: i -= 1 else: j -= 1 - return l[m][n], seq + return dp[m][n], seq if __name__ == "__main__": diff --git a/dynamic_programming/longest_increasing_subsequence_o_nlogn.py b/dynamic_programming/longest_increasing_subsequence_o_nlogn.py index 44e333e97779..bbc7a62b6b5c 100644 --- a/dynamic_programming/longest_increasing_subsequence_o_nlogn.py +++ b/dynamic_programming/longest_increasing_subsequence_o_nlogn.py @@ -7,14 +7,14 @@ from __future__ import annotations -def ceil_index(v, l, r, key): - while r - l > 1: - m = (l + r) // 2 - if v[m] >= key: - r = m +def ceil_index(v, left, right, key): + while right - left > 1: + middle = (left + right) // 2 + if v[middle] >= key: + right = middle else: - l = m - return r + left = middle + return right def longest_increasing_subsequence_length(v: list[int]) -> int: diff --git a/graphs/articulation_points.py b/graphs/articulation_points.py index 3fcaffd73725..0bf16e55bc04 100644 --- a/graphs/articulation_points.py +++ b/graphs/articulation_points.py @@ -1,6 +1,6 @@ # Finding Articulation Points in Undirected Graph -def compute_ap(l): - n = len(l) +def compute_ap(graph): + n = len(graph) out_edge_count = 0 low = [0] * n visited = [False] * n @@ -12,7 +12,7 @@ def dfs(root, at, parent, out_edge_count): visited[at] = True low[at] = at - for to in l[at]: + for to in graph[at]: if to == parent: pass elif not visited[to]: @@ -41,7 +41,7 @@ def dfs(root, at, parent, out_edge_count): # Adjacency list of graph -data = { +graph = { 0: [1, 2], 1: [0, 2], 2: [0, 1, 3, 5], @@ -52,4 +52,4 @@ def dfs(root, at, parent, out_edge_count): 7: [6, 8], 8: [5, 7], } -compute_ap(data) +compute_ap(graph) diff --git a/graphs/dinic.py b/graphs/dinic.py index 4f5e81236984..7919e6bc060a 100644 --- a/graphs/dinic.py +++ b/graphs/dinic.py @@ -37,7 +37,7 @@ def depth_first_search(self, vertex, sink, flow): # Here we calculate the flow that reaches the sink def max_flow(self, source, sink): flow, self.q[0] = 0, source - for l in range(31): # l = 30 maybe faster for random data + for l in range(31): # l = 30 maybe faster for random data # noqa: E741 while True: self.lvl, self.ptr = [0] * len(self.q), [0] * len(self.q) qi, qe, self.lvl[source] = 0, 1, 1 diff --git a/machine_learning/sequential_minimum_optimization.py b/machine_learning/sequential_minimum_optimization.py index 408d59ab5d29..3abdd6ccbed8 100644 --- a/machine_learning/sequential_minimum_optimization.py +++ b/machine_learning/sequential_minimum_optimization.py @@ -309,9 +309,9 @@ def _get_new_alpha(self, i1, i2, a1, a2, e1, e2, y1, y2): # calculate L and H which bound the new alpha2 s = y1 * y2 if s == -1: - l, h = max(0.0, a2 - a1), min(self._c, self._c + a2 - a1) + l, h = max(0.0, a2 - a1), min(self._c, self._c + a2 - a1) # noqa: E741 else: - l, h = max(0.0, a2 + a1 - self._c), min(self._c, a2 + a1) + l, h = max(0.0, a2 + a1 - self._c), min(self._c, a2 + a1) # noqa: E741 if l == h: return None, None diff --git a/maths/pi_generator.py b/maths/pi_generator.py index addd921747ba..97f2c540c1ce 100644 --- a/maths/pi_generator.py +++ b/maths/pi_generator.py @@ -41,7 +41,7 @@ def calculate_pi(limit: int) -> str: t = 1 k = 1 n = 3 - l = 3 + m = 3 decimal = limit counter = 0 @@ -65,11 +65,11 @@ def calculate_pi(limit: int) -> str: q *= 10 r = nr else: - nr = (2 * q + r) * l - nn = (q * (7 * k) + 2 + (r * l)) // (t * l) + nr = (2 * q + r) * m + nn = (q * (7 * k) + 2 + (r * m)) // (t * m) q *= k - t *= l - l += 2 + t *= m + m += 2 k += 1 n = nn r = nr diff --git a/other/sdes.py b/other/sdes.py index a69add3430c3..42186f453a3d 100644 --- a/other/sdes.py +++ b/other/sdes.py @@ -44,11 +44,11 @@ def function(expansion, s0, s1, key, message): right = message[4:] temp = apply_table(right, expansion) temp = xor(temp, key) - l = apply_sbox(s0, temp[:4]) - r = apply_sbox(s1, temp[4:]) - l = "0" * (2 - len(l)) + l - r = "0" * (2 - len(r)) + r - temp = apply_table(l + r, p4_table) + left_bin_str = apply_sbox(s0, temp[:4]) + right_bin_str = apply_sbox(s1, temp[4:]) + left_bin_str = "0" * (2 - len(left_bin_str)) + left_bin_str + right_bin_str = "0" * (2 - len(right_bin_str)) + right_bin_str + temp = apply_table(left_bin_str + right_bin_str, p4_table) temp = xor(left, temp) return temp + right diff --git a/project_euler/problem_011/sol2.py b/project_euler/problem_011/sol2.py index 2958305331a9..09bf315702c5 100644 --- a/project_euler/problem_011/sol2.py +++ b/project_euler/problem_011/sol2.py @@ -35,37 +35,47 @@ def solution(): 70600674 """ with open(os.path.dirname(__file__) + "/grid.txt") as f: - l = [] + grid = [] for _ in range(20): - l.append([int(x) for x in f.readline().split()]) + grid.append([int(x) for x in f.readline().split()]) maximum = 0 # right for i in range(20): for j in range(17): - temp = l[i][j] * l[i][j + 1] * l[i][j + 2] * l[i][j + 3] + temp = grid[i][j] * grid[i][j + 1] * grid[i][j + 2] * grid[i][j + 3] if temp > maximum: maximum = temp # down for i in range(17): for j in range(20): - temp = l[i][j] * l[i + 1][j] * l[i + 2][j] * l[i + 3][j] + temp = grid[i][j] * grid[i + 1][j] * grid[i + 2][j] * grid[i + 3][j] if temp > maximum: maximum = temp # diagonal 1 for i in range(17): for j in range(17): - temp = l[i][j] * l[i + 1][j + 1] * l[i + 2][j + 2] * l[i + 3][j + 3] + temp = ( + grid[i][j] + * grid[i + 1][j + 1] + * grid[i + 2][j + 2] + * grid[i + 3][j + 3] + ) if temp > maximum: maximum = temp # diagonal 2 for i in range(17): for j in range(3, 20): - temp = l[i][j] * l[i + 1][j - 1] * l[i + 2][j - 2] * l[i + 3][j - 3] + temp = ( + grid[i][j] + * grid[i + 1][j - 1] + * grid[i + 2][j - 2] + * grid[i + 3][j - 3] + ) if temp > maximum: maximum = temp return maximum diff --git a/pyproject.toml b/pyproject.toml index 264f06d1f750..1ac70b2fab93 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -2,7 +2,6 @@ lint.ignore = [ # `ruff rule S101` for a description of that rule "B904", # Within an `except` clause, raise exceptions with `raise ... from err` -- FIX ME "B905", # `zip()` without an explicit `strict=` parameter -- FIX ME - "E741", # Ambiguous variable name 'l' -- FIX ME "EM101", # Exception must not use a string literal, assign to variable first "EXE001", # Shebang is present but file is not executable -- DO NOT FIX "G004", # Logging statement uses f-string diff --git a/strings/jaro_winkler.py b/strings/jaro_winkler.py index f4a8fbad3ac8..c18f0d85d9f4 100644 --- a/strings/jaro_winkler.py +++ b/strings/jaro_winkler.py @@ -28,12 +28,12 @@ def jaro_winkler(str1: str, str2: str) -> float: def get_matched_characters(_str1: str, _str2: str) -> str: matched = [] limit = min(len(_str1), len(_str2)) // 2 - for i, l in enumerate(_str1): + for i, char in enumerate(_str1): left = int(max(0, i - limit)) right = int(min(i + limit + 1, len(_str2))) - if l in _str2[left:right]: - matched.append(l) - _str2 = f"{_str2[0:_str2.index(l)]} {_str2[_str2.index(l) + 1:]}" + if char in _str2[left:right]: + matched.append(char) + _str2 = f"{_str2[0:_str2.index(char)]} {_str2[_str2.index(char) + 1:]}" return "".join(matched) diff --git a/strings/manacher.py b/strings/manacher.py index ca546e533acd..fc8b01cd9c1c 100644 --- a/strings/manacher.py +++ b/strings/manacher.py @@ -9,9 +9,9 @@ def palindromic_string(input_string: str) -> str: 1. first this convert input_string("xyx") into new_string("x|y|x") where odd positions are actual input characters. - 2. for each character in new_string it find corresponding length and store the - length and l,r to store previously calculated info.(please look the explanation - for details) + 2. for each character in new_string it find corresponding length and + store the length and left,right to store previously calculated info. + (please look the explanation for details) 3. return corresponding output_string by removing all "|" """ @@ -29,7 +29,7 @@ def palindromic_string(input_string: str) -> str: # we will store the starting and ending of previous furthest ending palindromic # substring - l, r = 0, 0 + left, right = 0, 0 # length[i] shows the length of palindromic substring with center i length = [1 for i in range(len(new_input_string))] @@ -37,7 +37,7 @@ def palindromic_string(input_string: str) -> str: # for each character in new_string find corresponding palindromic string start = 0 for j in range(len(new_input_string)): - k = 1 if j > r else min(length[l + r - j] // 2, r - j + 1) + k = 1 if j > right else min(length[left + right - j] // 2, right - j + 1) while ( j - k >= 0 and j + k < len(new_input_string) @@ -47,11 +47,11 @@ def palindromic_string(input_string: str) -> str: length[j] = 2 * k - 1 - # does this string is ending after the previously explored end (that is r) ? - # if yes the update the new r to the last index of this - if j + k - 1 > r: - l = j - k + 1 - r = j + k - 1 + # does this string is ending after the previously explored end (that is right) ? + # if yes the update the new right to the last index of this + if j + k - 1 > right: + left = j - k + 1 + right = j + k - 1 # update max_length and start position if max_length < length[j]: @@ -78,8 +78,9 @@ def palindromic_string(input_string: str) -> str: consider the string for which we are calculating the longest palindromic substring is shown above where ... are some characters in between and right now we are calculating the length of palindromic substring with center at a5 with following conditions : -i) we have stored the length of palindromic substring which has center at a3 (starts at - l ends at r) and it is the furthest ending till now, and it has ending after a6 +i) we have stored the length of palindromic substring which has center at a3 + (starts at left ends at right) and it is the furthest ending till now, + and it has ending after a6 ii) a2 and a4 are equally distant from a3 so char(a2) == char(a4) iii) a0 and a6 are equally distant from a3 so char(a0) == char(a6) iv) a1 is corresponding equal character of a5 in palindrome with center a3 (remember @@ -98,11 +99,11 @@ def palindromic_string(input_string: str) -> str: a1 but this only holds if a0 and a6 are inside the limits of palindrome centered at a3 so finally .. -len_of_palindrome__at(a5) = min(len_of_palindrome_at(a1), r-a5) -where a3 lies from l to r and we have to keep updating that +len_of_palindrome__at(a5) = min(len_of_palindrome_at(a1), right-a5) +where a3 lies from left to right and we have to keep updating that -and if the a5 lies outside of l,r boundary we calculate length of palindrome with -bruteforce and update l,r. +and if the a5 lies outside of left,right boundary we calculate length of palindrome with +bruteforce and update left,right. it gives the linear time complexity just like z-function """ From 42593489d974feff169cf4f3455e3f209d7bdfcf Mon Sep 17 00:00:00 2001 From: Kelvin Date: Sat, 20 Apr 2024 16:20:37 +0530 Subject: [PATCH 1369/1543] Add doctests in all functions in basic_string.py (#11374) * Add doctests in all functions in basic_string.py * Revert back to original basic_string.py * Add doctest in basic_string.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update genetic_algorithm/basic_string.py --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- genetic_algorithm/basic_string.py | 31 ++++++++++++++++++++++++++++--- 1 file changed, 28 insertions(+), 3 deletions(-) diff --git a/genetic_algorithm/basic_string.py b/genetic_algorithm/basic_string.py index 089c5c99a1ec..a906ce85a779 100644 --- a/genetic_algorithm/basic_string.py +++ b/genetic_algorithm/basic_string.py @@ -33,7 +33,12 @@ def evaluate(item: str, main_target: str) -> tuple[str, float]: def crossover(parent_1: str, parent_2: str) -> tuple[str, str]: - """Slice and combine two string at a random point.""" + """ + Slice and combine two strings at a random point. + >>> random.seed(42) + >>> crossover("123456", "abcdef") + ('12345f', 'abcde6') + """ random_slice = random.randint(0, len(parent_1) - 1) child_1 = parent_1[:random_slice] + parent_2[random_slice:] child_2 = parent_2[:random_slice] + parent_1[random_slice:] @@ -41,7 +46,12 @@ def crossover(parent_1: str, parent_2: str) -> tuple[str, str]: def mutate(child: str, genes: list[str]) -> str: - """Mutate a random gene of a child with another one from the list.""" + """ + Mutate a random gene of a child with another one from the list. + >>> random.seed(123) + >>> mutate("123456", list("ABCDEF")) + '12345A' + """ child_list = list(child) if random.uniform(0, 1) < MUTATION_PROBABILITY: child_list[random.randint(0, len(child)) - 1] = random.choice(genes) @@ -54,7 +64,22 @@ def select( population_score: list[tuple[str, float]], genes: list[str], ) -> list[str]: - """Select the second parent and generate new population""" + """ + Select the second parent and generate new population + + >>> random.seed(42) + >>> parent_1 = ("123456", 8.0) + >>> population_score = [("abcdef", 4.0), ("ghijkl", 5.0), ("mnopqr", 7.0)] + >>> genes = list("ABCDEF") + >>> child_n = int(min(parent_1[1] + 1, 10)) + >>> population = [] + >>> for _ in range(child_n): + ... parent_2 = population_score[random.randrange(len(population_score))][0] + ... child_1, child_2 = crossover(parent_1[0], parent_2) + ... population.extend((mutate(child_1, genes), mutate(child_2, genes))) + >>> len(population) == (int(parent_1[1]) + 1) * 2 + True + """ pop = [] # Generate more children proportionally to the fitness score. child_n = int(parent_1[1] * 100) + 1 From 7b88e15b1cc67c784872b0d16189e516474cf5a5 Mon Sep 17 00:00:00 2001 From: Maxim Smolskiy Date: Sat, 20 Apr 2024 17:20:27 +0300 Subject: [PATCH 1370/1543] Enable ruff RUF007 rule (#11349) * Enable ruff RUF005 rule * Enable ruff RUF007 rule * Fix * Fix * Fix * Update sorts/bead_sort.py Co-authored-by: Christian Clauss * Update sorts/bead_sort.py * Revert "Update sorts/bead_sort.py" This reverts commit b10e5632e4479c2117c8b67113b5aa6545f127aa. * Revert "Update sorts/bead_sort.py" This reverts commit 2c1816bf102eeec5aa39cb2f1806afb64b672d14. * Update sorts/bead_sort.py --------- Co-authored-by: Christian Clauss --- data_structures/linked_list/skip_list.py | 3 ++- pyproject.toml | 1 - sorts/bead_sort.py | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/data_structures/linked_list/skip_list.py b/data_structures/linked_list/skip_list.py index 88d3e0daddf0..13e9a94a8698 100644 --- a/data_structures/linked_list/skip_list.py +++ b/data_structures/linked_list/skip_list.py @@ -5,6 +5,7 @@ from __future__ import annotations +from itertools import pairwise from random import random from typing import Generic, TypeVar @@ -389,7 +390,7 @@ def traverse_keys(node): def test_iter_always_yields_sorted_values(): def is_sorted(lst): - return all(next_item >= item for item, next_item in zip(lst, lst[1:])) + return all(next_item >= item for item, next_item in pairwise(lst)) skip_list = SkipList() for i in range(10): diff --git a/pyproject.toml b/pyproject.toml index 1ac70b2fab93..e46293a8d526 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -13,7 +13,6 @@ lint.ignore = [ # `ruff rule S101` for a description of that rule "RUF001", # String contains ambiguous {}. Did you mean {}? "RUF002", # Docstring contains ambiguous {}. Did you mean {}? "RUF003", # Comment contains ambiguous {}. Did you mean {}? - "RUF007", # Prefer itertools.pairwise() over zip() when iterating over successive pairs "S101", # Use of `assert` detected -- DO NOT FIX "S113", # Probable use of requests call without timeout -- FIX ME "S311", # Standard pseudo-random generators are not suitable for cryptographic purposes -- FIX ME diff --git a/sorts/bead_sort.py b/sorts/bead_sort.py index e51173643d81..8ce0619fd573 100644 --- a/sorts/bead_sort.py +++ b/sorts/bead_sort.py @@ -31,7 +31,7 @@ def bead_sort(sequence: list) -> list: if any(not isinstance(x, int) or x < 0 for x in sequence): raise TypeError("Sequence must be list of non-negative integers") for _ in range(len(sequence)): - for i, (rod_upper, rod_lower) in enumerate(zip(sequence, sequence[1:])): + for i, (rod_upper, rod_lower) in enumerate(zip(sequence, sequence[1:])): # noqa: RUF007 if rod_upper > rod_lower: sequence[i] -= rod_upper - rod_lower sequence[i + 1] += rod_upper - rod_lower From 2702bf9400faece97a1ebc76d0f91b9cfe9658f6 Mon Sep 17 00:00:00 2001 From: Maxim Smolskiy Date: Sun, 21 Apr 2024 20:34:18 +0300 Subject: [PATCH 1371/1543] Enable ruff S113 rule (#11375) * Enable ruff S113 rule * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- machine_learning/linear_regression.py | 3 ++- pyproject.toml | 1 - scripts/validate_solutions.py | 2 +- web_programming/co2_emission.py | 4 ++-- web_programming/covid_stats_via_xpath.py | 4 +++- web_programming/crawl_google_results.py | 2 +- web_programming/crawl_google_scholar_citation.py | 4 +++- web_programming/currency_converter.py | 2 +- web_programming/current_stock_price.py | 4 +++- web_programming/current_weather.py | 4 ++-- web_programming/daily_horoscope.py | 2 +- web_programming/download_images_from_google_query.py | 4 +++- web_programming/emails_from_url.py | 4 ++-- web_programming/fetch_anime_and_play.py | 8 +++++--- web_programming/fetch_bbc_news.py | 2 +- web_programming/fetch_github_info.py | 2 +- web_programming/fetch_jobs.py | 4 +++- web_programming/fetch_quotes.py | 4 ++-- web_programming/fetch_well_rx_price.py | 2 +- web_programming/get_amazon_product_data.py | 4 +++- web_programming/get_imdb_top_250_movies_csv.py | 2 +- web_programming/get_ip_geolocation.py | 2 +- web_programming/get_top_billionaires.py | 2 +- web_programming/get_top_hn_posts.py | 4 ++-- web_programming/giphy.py | 2 +- web_programming/instagram_crawler.py | 2 +- web_programming/instagram_pic.py | 4 ++-- web_programming/instagram_video.py | 4 ++-- web_programming/nasa_data.py | 6 +++--- web_programming/open_google_results.py | 1 + web_programming/random_anime_character.py | 6 ++++-- web_programming/recaptcha_verification.py | 4 +++- web_programming/reddit.py | 1 + web_programming/search_books_by_isbn.py | 2 +- web_programming/slack_message.py | 4 +++- web_programming/world_covid19_stats.py | 2 +- 36 files changed, 68 insertions(+), 46 deletions(-) diff --git a/machine_learning/linear_regression.py b/machine_learning/linear_regression.py index 39bee5712c16..839a5366d1cc 100644 --- a/machine_learning/linear_regression.py +++ b/machine_learning/linear_regression.py @@ -19,7 +19,8 @@ def collect_dataset(): """ response = requests.get( "https://raw.githubusercontent.com/yashLadha/The_Math_of_Intelligence/" - "master/Week1/ADRvsRating.csv" + "master/Week1/ADRvsRating.csv", + timeout=10, ) lines = response.text.splitlines() data = [] diff --git a/pyproject.toml b/pyproject.toml index e46293a8d526..ff22fba81c8a 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -14,7 +14,6 @@ lint.ignore = [ # `ruff rule S101` for a description of that rule "RUF002", # Docstring contains ambiguous {}. Did you mean {}? "RUF003", # Comment contains ambiguous {}. Did you mean {}? "S101", # Use of `assert` detected -- DO NOT FIX - "S113", # Probable use of requests call without timeout -- FIX ME "S311", # Standard pseudo-random generators are not suitable for cryptographic purposes -- FIX ME "SLF001", # Private member accessed: `_Iterator` -- FIX ME "UP038", # Use `X | Y` in `{}` call instead of `(X, Y)` -- DO NOT FIX diff --git a/scripts/validate_solutions.py b/scripts/validate_solutions.py index 68dcd68b3947..325c245e0d77 100755 --- a/scripts/validate_solutions.py +++ b/scripts/validate_solutions.py @@ -57,7 +57,7 @@ def added_solution_file_path() -> list[pathlib.Path]: "Accept": "application/vnd.github.v3+json", "Authorization": "token " + os.environ["GITHUB_TOKEN"], } - files = requests.get(get_files_url(), headers=headers).json() + files = requests.get(get_files_url(), headers=headers, timeout=10).json() for file in files: filepath = pathlib.Path.cwd().joinpath(file["filename"]) if ( diff --git a/web_programming/co2_emission.py b/web_programming/co2_emission.py index 88a426cb976d..19af70489d1d 100644 --- a/web_programming/co2_emission.py +++ b/web_programming/co2_emission.py @@ -11,13 +11,13 @@ # Emission in the last half hour def fetch_last_half_hour() -> str: - last_half_hour = requests.get(BASE_URL).json()["data"][0] + last_half_hour = requests.get(BASE_URL, timeout=10).json()["data"][0] return last_half_hour["intensity"]["actual"] # Emissions in a specific date range def fetch_from_to(start, end) -> list: - return requests.get(f"{BASE_URL}/{start}/{end}").json()["data"] + return requests.get(f"{BASE_URL}/{start}/{end}", timeout=10).json()["data"] if __name__ == "__main__": diff --git a/web_programming/covid_stats_via_xpath.py b/web_programming/covid_stats_via_xpath.py index 7011a02bffa8..c27a5d12bb3f 100644 --- a/web_programming/covid_stats_via_xpath.py +++ b/web_programming/covid_stats_via_xpath.py @@ -18,7 +18,9 @@ class CovidData(NamedTuple): def covid_stats(url: str = "https://www.worldometers.info/coronavirus/") -> CovidData: xpath_str = '//div[@class = "maincounter-number"]/span/text()' - return CovidData(*html.fromstring(requests.get(url).content).xpath(xpath_str)) + return CovidData( + *html.fromstring(requests.get(url, timeout=10).content).xpath(xpath_str) + ) fmt = """Total COVID-19 cases in the world: {} diff --git a/web_programming/crawl_google_results.py b/web_programming/crawl_google_results.py index 1f5e6d31992b..cb75d450ff82 100644 --- a/web_programming/crawl_google_results.py +++ b/web_programming/crawl_google_results.py @@ -8,7 +8,7 @@ if __name__ == "__main__": print("Googling.....") url = "https://www.google.com/search?q=" + " ".join(sys.argv[1:]) - res = requests.get(url, headers={"UserAgent": UserAgent().random}) + res = requests.get(url, headers={"UserAgent": UserAgent().random}, timeout=10) # res.raise_for_status() with open("project1a.html", "wb") as out_file: # only for knowing the class for data in res.iter_content(10000): diff --git a/web_programming/crawl_google_scholar_citation.py b/web_programming/crawl_google_scholar_citation.py index f92a3d139520..5f2ccad5f414 100644 --- a/web_programming/crawl_google_scholar_citation.py +++ b/web_programming/crawl_google_scholar_citation.py @@ -11,7 +11,9 @@ def get_citation(base_url: str, params: dict) -> str: """ Return the citation number. """ - soup = BeautifulSoup(requests.get(base_url, params=params).content, "html.parser") + soup = BeautifulSoup( + requests.get(base_url, params=params, timeout=10).content, "html.parser" + ) div = soup.find("div", attrs={"class": "gs_ri"}) anchors = div.find("div", attrs={"class": "gs_fl"}).find_all("a") return anchors[2].get_text() diff --git a/web_programming/currency_converter.py b/web_programming/currency_converter.py index 3bbcafa8f89b..9623504b89ea 100644 --- a/web_programming/currency_converter.py +++ b/web_programming/currency_converter.py @@ -176,7 +176,7 @@ def convert_currency( params = locals() # from is a reserved keyword params["from"] = params.pop("from_") - res = requests.get(URL_BASE, params=params).json() + res = requests.get(URL_BASE, params=params, timeout=10).json() return str(res["amount"]) if res["error"] == 0 else res["error_message"] diff --git a/web_programming/current_stock_price.py b/web_programming/current_stock_price.py index 0c06354d8998..9567c05b0558 100644 --- a/web_programming/current_stock_price.py +++ b/web_programming/current_stock_price.py @@ -4,7 +4,9 @@ def stock_price(symbol: str = "AAPL") -> str: url = f"https://finance.yahoo.com/quote/{symbol}?p={symbol}" - yahoo_finance_source = requests.get(url, headers={"USER-AGENT": "Mozilla/5.0"}).text + yahoo_finance_source = requests.get( + url, headers={"USER-AGENT": "Mozilla/5.0"}, timeout=10 + ).text soup = BeautifulSoup(yahoo_finance_source, "html.parser") specific_fin_streamer_tag = soup.find("fin-streamer", {"data-test": "qsp-price"}) diff --git a/web_programming/current_weather.py b/web_programming/current_weather.py index 3b6cd177cdfb..4a8fa5e3c845 100644 --- a/web_programming/current_weather.py +++ b/web_programming/current_weather.py @@ -20,13 +20,13 @@ def current_weather(location: str) -> list[dict]: if OPENWEATHERMAP_API_KEY: params_openweathermap = {"q": location, "appid": OPENWEATHERMAP_API_KEY} response_openweathermap = requests.get( - OPENWEATHERMAP_URL_BASE, params=params_openweathermap + OPENWEATHERMAP_URL_BASE, params=params_openweathermap, timeout=10 ) weather_data.append({"OpenWeatherMap": response_openweathermap.json()}) if WEATHERSTACK_API_KEY: params_weatherstack = {"query": location, "access_key": WEATHERSTACK_API_KEY} response_weatherstack = requests.get( - WEATHERSTACK_URL_BASE, params=params_weatherstack + WEATHERSTACK_URL_BASE, params=params_weatherstack, timeout=10 ) weather_data.append({"Weatherstack": response_weatherstack.json()}) if not weather_data: diff --git a/web_programming/daily_horoscope.py b/web_programming/daily_horoscope.py index b0dd1cd65924..75e637d8e52c 100644 --- a/web_programming/daily_horoscope.py +++ b/web_programming/daily_horoscope.py @@ -7,7 +7,7 @@ def horoscope(zodiac_sign: int, day: str) -> str: "https://www.horoscope.com/us/horoscopes/general/" f"horoscope-general-daily-{day}.aspx?sign={zodiac_sign}" ) - soup = BeautifulSoup(requests.get(url).content, "html.parser") + soup = BeautifulSoup(requests.get(url, timeout=10).content, "html.parser") return soup.find("div", class_="main-horoscope").p.text diff --git a/web_programming/download_images_from_google_query.py b/web_programming/download_images_from_google_query.py index 441347459f8e..235cd35763ef 100644 --- a/web_programming/download_images_from_google_query.py +++ b/web_programming/download_images_from_google_query.py @@ -39,7 +39,9 @@ def download_images_from_google_query(query: str = "dhaka", max_images: int = 5) "ijn": "0", } - html = requests.get("https://www.google.com/search", params=params, headers=headers) + html = requests.get( + "https://www.google.com/search", params=params, headers=headers, timeout=10 + ) soup = BeautifulSoup(html.text, "html.parser") matched_images_data = "".join( re.findall(r"AF_initDataCallback\(([^<]+)\);", str(soup.select("script"))) diff --git a/web_programming/emails_from_url.py b/web_programming/emails_from_url.py index 26c88e1b13a5..43fd78dcf5a4 100644 --- a/web_programming/emails_from_url.py +++ b/web_programming/emails_from_url.py @@ -77,7 +77,7 @@ def emails_from_url(url: str = "https://github.com") -> list[str]: try: # Open URL - r = requests.get(url) + r = requests.get(url, timeout=10) # pass the raw HTML to the parser to get links parser.feed(r.text) @@ -88,7 +88,7 @@ def emails_from_url(url: str = "https://github.com") -> list[str]: # open URL. # read = requests.get(link) try: - read = requests.get(link) + read = requests.get(link, timeout=10) # Get the valid email. emails = re.findall("[a-zA-Z0-9]+@" + domain, read.text) # If not in list then append it. diff --git a/web_programming/fetch_anime_and_play.py b/web_programming/fetch_anime_and_play.py index 366807785e85..fd7c3a3a7381 100644 --- a/web_programming/fetch_anime_and_play.py +++ b/web_programming/fetch_anime_and_play.py @@ -28,7 +28,7 @@ def search_scraper(anime_name: str) -> list: search_url = f"{BASE_URL}/search/{anime_name}" response = requests.get( - search_url, headers={"UserAgent": UserAgent().chrome} + search_url, headers={"UserAgent": UserAgent().chrome}, timeout=10 ) # request the url. # Is the response ok? @@ -82,7 +82,9 @@ def search_anime_episode_list(episode_endpoint: str) -> list: request_url = f"{BASE_URL}{episode_endpoint}" - response = requests.get(url=request_url, headers={"UserAgent": UserAgent().chrome}) + response = requests.get( + url=request_url, headers={"UserAgent": UserAgent().chrome}, timeout=10 + ) response.raise_for_status() soup = BeautifulSoup(response.text, "html.parser") @@ -132,7 +134,7 @@ def get_anime_episode(episode_endpoint: str) -> list: episode_page_url = f"{BASE_URL}{episode_endpoint}" response = requests.get( - url=episode_page_url, headers={"User-Agent": UserAgent().chrome} + url=episode_page_url, headers={"User-Agent": UserAgent().chrome}, timeout=10 ) response.raise_for_status() diff --git a/web_programming/fetch_bbc_news.py b/web_programming/fetch_bbc_news.py index 7f8bc57b69f5..e5cd864a9d83 100644 --- a/web_programming/fetch_bbc_news.py +++ b/web_programming/fetch_bbc_news.py @@ -7,7 +7,7 @@ def fetch_bbc_news(bbc_news_api_key: str) -> None: # fetching a list of articles in json format - bbc_news_page = requests.get(_NEWS_API + bbc_news_api_key).json() + bbc_news_page = requests.get(_NEWS_API + bbc_news_api_key, timeout=10).json() # each article in the list is a dict for i, article in enumerate(bbc_news_page["articles"], 1): print(f"{i}.) {article['title']}") diff --git a/web_programming/fetch_github_info.py b/web_programming/fetch_github_info.py index 7a4985b68841..25d44245bb58 100644 --- a/web_programming/fetch_github_info.py +++ b/web_programming/fetch_github_info.py @@ -42,7 +42,7 @@ def fetch_github_info(auth_token: str) -> dict[Any, Any]: "Authorization": f"token {auth_token}", "Accept": "application/vnd.github.v3+json", } - return requests.get(AUTHENTICATED_USER_ENDPOINT, headers=headers).json() + return requests.get(AUTHENTICATED_USER_ENDPOINT, headers=headers, timeout=10).json() if __name__ == "__main__": # pragma: no cover diff --git a/web_programming/fetch_jobs.py b/web_programming/fetch_jobs.py index 49abd3c88eec..0d89bf45de57 100644 --- a/web_programming/fetch_jobs.py +++ b/web_programming/fetch_jobs.py @@ -13,7 +13,9 @@ def fetch_jobs(location: str = "mumbai") -> Generator[tuple[str, str], None, None]: - soup = BeautifulSoup(requests.get(url + location).content, "html.parser") + soup = BeautifulSoup( + requests.get(url + location, timeout=10).content, "html.parser" + ) # This attribute finds out all the specifics listed in a job for job in soup.find_all("div", attrs={"data-tn-component": "organicJob"}): job_title = job.find("a", attrs={"data-tn-element": "jobTitle"}).text.strip() diff --git a/web_programming/fetch_quotes.py b/web_programming/fetch_quotes.py index d557e2d95e74..cf0add43f002 100644 --- a/web_programming/fetch_quotes.py +++ b/web_programming/fetch_quotes.py @@ -14,11 +14,11 @@ def quote_of_the_day() -> list: - return requests.get(API_ENDPOINT_URL + "/today").json() + return requests.get(API_ENDPOINT_URL + "/today", timeout=10).json() def random_quotes() -> list: - return requests.get(API_ENDPOINT_URL + "/random").json() + return requests.get(API_ENDPOINT_URL + "/random", timeout=10).json() if __name__ == "__main__": diff --git a/web_programming/fetch_well_rx_price.py b/web_programming/fetch_well_rx_price.py index ee51b9a5051b..93be2a9235d9 100644 --- a/web_programming/fetch_well_rx_price.py +++ b/web_programming/fetch_well_rx_price.py @@ -42,7 +42,7 @@ def fetch_pharmacy_and_price_list(drug_name: str, zip_code: str) -> list | None: return None request_url = BASE_URL.format(drug_name, zip_code) - response = get(request_url) + response = get(request_url, timeout=10) # Is the response ok? response.raise_for_status() diff --git a/web_programming/get_amazon_product_data.py b/web_programming/get_amazon_product_data.py index c2f2ac5ab291..b98ff2c030af 100644 --- a/web_programming/get_amazon_product_data.py +++ b/web_programming/get_amazon_product_data.py @@ -24,7 +24,9 @@ def get_amazon_product_data(product: str = "laptop") -> DataFrame: ), "Accept-Language": "en-US, en;q=0.5", } - soup = BeautifulSoup(requests.get(url, headers=header).text, features="lxml") + soup = BeautifulSoup( + requests.get(url, headers=header, timeout=10).text, features="lxml" + ) # Initialize a Pandas dataframe with the column titles data_frame = DataFrame( columns=[ diff --git a/web_programming/get_imdb_top_250_movies_csv.py b/web_programming/get_imdb_top_250_movies_csv.py index e54b076ebd94..c914b29cb3b3 100644 --- a/web_programming/get_imdb_top_250_movies_csv.py +++ b/web_programming/get_imdb_top_250_movies_csv.py @@ -8,7 +8,7 @@ def get_imdb_top_250_movies(url: str = "") -> dict[str, float]: url = url or "https://www.imdb.com/chart/top/?ref_=nv_mv_250" - soup = BeautifulSoup(requests.get(url).text, "html.parser") + soup = BeautifulSoup(requests.get(url, timeout=10).text, "html.parser") titles = soup.find_all("td", attrs="titleColumn") ratings = soup.find_all("td", class_="ratingColumn imdbRating") return { diff --git a/web_programming/get_ip_geolocation.py b/web_programming/get_ip_geolocation.py index 62eaeafceb7e..574d287f0db1 100644 --- a/web_programming/get_ip_geolocation.py +++ b/web_programming/get_ip_geolocation.py @@ -8,7 +8,7 @@ def get_ip_geolocation(ip_address: str) -> str: url = f"https://ipinfo.io/{ip_address}/json" # Send a GET request to the API - response = requests.get(url) + response = requests.get(url, timeout=10) # Check if the HTTP request was successful response.raise_for_status() diff --git a/web_programming/get_top_billionaires.py b/web_programming/get_top_billionaires.py index 703b635eef82..24828b6d787c 100644 --- a/web_programming/get_top_billionaires.py +++ b/web_programming/get_top_billionaires.py @@ -57,7 +57,7 @@ def get_forbes_real_time_billionaires() -> list[dict[str, int | str]]: Returns: List of top 10 realtime billionaires data. """ - response_json = requests.get(API_URL).json() + response_json = requests.get(API_URL, timeout=10).json() return [ { "Name": person["personName"], diff --git a/web_programming/get_top_hn_posts.py b/web_programming/get_top_hn_posts.py index fbb7c051a88e..f5d4f874c6c6 100644 --- a/web_programming/get_top_hn_posts.py +++ b/web_programming/get_top_hn_posts.py @@ -5,7 +5,7 @@ def get_hackernews_story(story_id: str) -> dict: url = f"https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty" - return requests.get(url).json() + return requests.get(url, timeout=10).json() def hackernews_top_stories(max_stories: int = 10) -> list[dict]: @@ -13,7 +13,7 @@ def hackernews_top_stories(max_stories: int = 10) -> list[dict]: Get the top max_stories posts from HackerNews - https://news.ycombinator.com/ """ url = "https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty" - story_ids = requests.get(url).json()[:max_stories] + story_ids = requests.get(url, timeout=10).json()[:max_stories] return [get_hackernews_story(story_id) for story_id in story_ids] diff --git a/web_programming/giphy.py b/web_programming/giphy.py index a5c3f8f7493e..2bf3e3ea9c0b 100644 --- a/web_programming/giphy.py +++ b/web_programming/giphy.py @@ -11,7 +11,7 @@ def get_gifs(query: str, api_key: str = giphy_api_key) -> list: """ formatted_query = "+".join(query.split()) url = f"https://api.giphy.com/v1/gifs/search?q={formatted_query}&api_key={api_key}" - gifs = requests.get(url).json()["data"] + gifs = requests.get(url, timeout=10).json()["data"] return [gif["url"] for gif in gifs] diff --git a/web_programming/instagram_crawler.py b/web_programming/instagram_crawler.py index 0816cd181051..df62735fb328 100644 --- a/web_programming/instagram_crawler.py +++ b/web_programming/instagram_crawler.py @@ -39,7 +39,7 @@ def get_json(self) -> dict: """ Return a dict of user information """ - html = requests.get(self.url, headers=headers).text + html = requests.get(self.url, headers=headers, timeout=10).text scripts = BeautifulSoup(html, "html.parser").find_all("script") try: return extract_user_profile(scripts[4]) diff --git a/web_programming/instagram_pic.py b/web_programming/instagram_pic.py index 2d987c1766dc..292cacc16c04 100644 --- a/web_programming/instagram_pic.py +++ b/web_programming/instagram_pic.py @@ -15,7 +15,7 @@ def download_image(url: str) -> str: A message indicating the result of the operation. """ try: - response = requests.get(url) + response = requests.get(url, timeout=10) response.raise_for_status() except requests.exceptions.RequestException as e: return f"An error occurred during the HTTP request to {url}: {e!r}" @@ -30,7 +30,7 @@ def download_image(url: str) -> str: return f"Image URL not found in meta tag {image_meta_tag}." try: - image_data = requests.get(image_url).content + image_data = requests.get(image_url, timeout=10).content except requests.exceptions.RequestException as e: return f"An error occurred during the HTTP request to {image_url}: {e!r}" if not image_data: diff --git a/web_programming/instagram_video.py b/web_programming/instagram_video.py index 1f1b0e297034..a4cddce25138 100644 --- a/web_programming/instagram_video.py +++ b/web_programming/instagram_video.py @@ -5,8 +5,8 @@ def download_video(url: str) -> bytes: base_url = "https://downloadgram.net/wp-json/wppress/video-downloader/video?url=" - video_url = requests.get(base_url + url).json()[0]["urls"][0]["src"] - return requests.get(video_url).content + video_url = requests.get(base_url + url, timeout=10).json()[0]["urls"][0]["src"] + return requests.get(video_url, timeout=10).content if __name__ == "__main__": diff --git a/web_programming/nasa_data.py b/web_programming/nasa_data.py index 81125e0a4f05..33a6406c52a6 100644 --- a/web_programming/nasa_data.py +++ b/web_programming/nasa_data.py @@ -9,14 +9,14 @@ def get_apod_data(api_key: str) -> dict: Get your API Key from: https://api.nasa.gov/ """ url = "https://api.nasa.gov/planetary/apod" - return requests.get(url, params={"api_key": api_key}).json() + return requests.get(url, params={"api_key": api_key}, timeout=10).json() def save_apod(api_key: str, path: str = ".") -> dict: apod_data = get_apod_data(api_key) img_url = apod_data["url"] img_name = img_url.split("/")[-1] - response = requests.get(img_url, stream=True) + response = requests.get(img_url, stream=True, timeout=10) with open(f"{path}/{img_name}", "wb+") as img_file: shutil.copyfileobj(response.raw, img_file) @@ -29,7 +29,7 @@ def get_archive_data(query: str) -> dict: Get the data of a particular query from NASA archives """ url = "https://images-api.nasa.gov/search" - return requests.get(url, params={"q": query}).json() + return requests.get(url, params={"q": query}, timeout=10).json() if __name__ == "__main__": diff --git a/web_programming/open_google_results.py b/web_programming/open_google_results.py index f61e3666dd7e..52dd37d7b91a 100644 --- a/web_programming/open_google_results.py +++ b/web_programming/open_google_results.py @@ -16,6 +16,7 @@ res = requests.get( url, headers={"User-Agent": str(UserAgent().random)}, + timeout=10, ) try: diff --git a/web_programming/random_anime_character.py b/web_programming/random_anime_character.py index f15a9c05d9e5..aed932866258 100644 --- a/web_programming/random_anime_character.py +++ b/web_programming/random_anime_character.py @@ -12,7 +12,7 @@ def save_image(image_url: str, image_title: str) -> None: """ Saves the image of anime character """ - image = requests.get(image_url, headers=headers) + image = requests.get(image_url, headers=headers, timeout=10) with open(image_title, "wb") as file: file.write(image.content) @@ -21,7 +21,9 @@ def random_anime_character() -> tuple[str, str, str]: """ Returns the Title, Description, and Image Title of a random anime character . """ - soup = BeautifulSoup(requests.get(URL, headers=headers).text, "html.parser") + soup = BeautifulSoup( + requests.get(URL, headers=headers, timeout=10).text, "html.parser" + ) title = soup.find("meta", attrs={"property": "og:title"}).attrs["content"] image_url = soup.find("meta", attrs={"property": "og:image"}).attrs["content"] description = soup.find("p", id="description").get_text() diff --git a/web_programming/recaptcha_verification.py b/web_programming/recaptcha_verification.py index c9b691b28a8b..168862204fa9 100644 --- a/web_programming/recaptcha_verification.py +++ b/web_programming/recaptcha_verification.py @@ -56,7 +56,9 @@ def login_using_recaptcha(request): client_key = request.POST.get("g-recaptcha-response") # post recaptcha response to Google's recaptcha api - response = requests.post(url, data={"secret": secret_key, "response": client_key}) + response = requests.post( + url, data={"secret": secret_key, "response": client_key}, timeout=10 + ) # if the recaptcha api verified our keys if response.json().get("success", False): # authenticate the user diff --git a/web_programming/reddit.py b/web_programming/reddit.py index 1c165ecc49ec..6cc1a6b62009 100644 --- a/web_programming/reddit.py +++ b/web_programming/reddit.py @@ -31,6 +31,7 @@ def get_subreddit_data( response = requests.get( f"https://reddit.com/r/{subreddit}/{age}.json?limit={limit}", headers={"User-agent": "A random string"}, + timeout=10, ) if response.status_code == 429: raise requests.HTTPError(response=response) diff --git a/web_programming/search_books_by_isbn.py b/web_programming/search_books_by_isbn.py index 07429e9a9678..6b69018e6639 100644 --- a/web_programming/search_books_by_isbn.py +++ b/web_programming/search_books_by_isbn.py @@ -25,7 +25,7 @@ def get_openlibrary_data(olid: str = "isbn/0140328726") -> dict: if new_olid.count("/") != 1: msg = f"{olid} is not a valid Open Library olid" raise ValueError(msg) - return requests.get(f"https://openlibrary.org/{new_olid}.json").json() + return requests.get(f"https://openlibrary.org/{new_olid}.json", timeout=10).json() def summarize_book(ol_book_data: dict) -> dict: diff --git a/web_programming/slack_message.py b/web_programming/slack_message.py index 5e97d6b64c75..d4d5658898ac 100644 --- a/web_programming/slack_message.py +++ b/web_programming/slack_message.py @@ -5,7 +5,9 @@ def send_slack_message(message_body: str, slack_url: str) -> None: headers = {"Content-Type": "application/json"} - response = requests.post(slack_url, json={"text": message_body}, headers=headers) + response = requests.post( + slack_url, json={"text": message_body}, headers=headers, timeout=10 + ) if response.status_code != 200: msg = ( "Request to slack returned an error " diff --git a/web_programming/world_covid19_stats.py b/web_programming/world_covid19_stats.py index ca81abdc4ce9..4948d8cfd43c 100644 --- a/web_programming/world_covid19_stats.py +++ b/web_programming/world_covid19_stats.py @@ -13,7 +13,7 @@ def world_covid19_stats(url: str = "https://www.worldometers.info/coronavirus") """ Return a dict of current worldwide COVID-19 statistics """ - soup = BeautifulSoup(requests.get(url).text, "html.parser") + soup = BeautifulSoup(requests.get(url, timeout=10).text, "html.parser") keys = soup.findAll("h1") values = soup.findAll("div", {"class": "maincounter-number"}) keys += soup.findAll("span", {"class": "panel-title"}) From dbfa21813ff6fe2d7b439dfd6daa60b14a64d24f Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 22 Apr 2024 21:43:19 +0200 Subject: [PATCH 1372/1543] [pre-commit.ci] pre-commit autoupdate (#11380) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * [pre-commit.ci] pre-commit autoupdate updates: - [github.com/astral-sh/ruff-pre-commit: v0.3.7 → v0.4.1](https://github.com/astral-sh/ruff-pre-commit/compare/v0.3.7...v0.4.1) - [github.com/tox-dev/pyproject-fmt: 1.7.0 → 1.8.0](https://github.com/tox-dev/pyproject-fmt/compare/1.7.0...1.8.0) * from keras import layers, models * Update lstm_prediction.py --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- .pre-commit-config.yaml | 4 ++-- computer_vision/cnn_classification.py | 2 +- machine_learning/lstm/lstm_prediction.py | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 9472bcfa3e07..eedf6d939748 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -16,7 +16,7 @@ repos: - id: auto-walrus - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.3.7 + rev: v0.4.1 hooks: - id: ruff - id: ruff-format @@ -29,7 +29,7 @@ repos: - tomli - repo: https://github.com/tox-dev/pyproject-fmt - rev: "1.7.0" + rev: "1.8.0" hooks: - id: pyproject-fmt diff --git a/computer_vision/cnn_classification.py b/computer_vision/cnn_classification.py index b813b71033f3..115333eba0d1 100644 --- a/computer_vision/cnn_classification.py +++ b/computer_vision/cnn_classification.py @@ -25,7 +25,7 @@ # Importing the Keras libraries and packages import tensorflow as tf -from tensorflow.keras import layers, models +from keras import layers, models if __name__ == "__main__": # Initialising the CNN diff --git a/machine_learning/lstm/lstm_prediction.py b/machine_learning/lstm/lstm_prediction.py index f0fd12c9de7f..81ac5f01d3d6 100644 --- a/machine_learning/lstm/lstm_prediction.py +++ b/machine_learning/lstm/lstm_prediction.py @@ -7,9 +7,9 @@ import numpy as np import pandas as pd +from keras.layers import LSTM, Dense +from keras.models import Sequential from sklearn.preprocessing import MinMaxScaler -from tensorflow.keras.layers import LSTM, Dense -from tensorflow.keras.models import Sequential if __name__ == "__main__": """ From 79dc7c97acc492d657b5f2f50686cee5b0f64b30 Mon Sep 17 00:00:00 2001 From: Maxim Smolskiy Date: Mon, 22 Apr 2024 22:45:24 +0300 Subject: [PATCH 1373/1543] Enable ruff RUF001 rule (#11378) * Enable ruff RUF001 rule * Fix * Fix --- fuzzy_logic/fuzzy_operations.py | 6 +++--- physics/basic_orbital_capture.py | 6 +++--- physics/malus_law.py | 2 +- pyproject.toml | 1 - 4 files changed, 7 insertions(+), 8 deletions(-) diff --git a/fuzzy_logic/fuzzy_operations.py b/fuzzy_logic/fuzzy_operations.py index e41cd2120049..c5e4cbde019d 100644 --- a/fuzzy_logic/fuzzy_operations.py +++ b/fuzzy_logic/fuzzy_operations.py @@ -57,7 +57,7 @@ class FuzzySet: # Union Operations >>> siya.union(sheru) - FuzzySet(name='Siya ∪ Sheru', left_boundary=0.4, peak=0.7, right_boundary=1.0) + FuzzySet(name='Siya U Sheru', left_boundary=0.4, peak=0.7, right_boundary=1.0) """ name: str @@ -147,10 +147,10 @@ def union(self, other) -> FuzzySet: FuzzySet: A new fuzzy set representing the union. >>> FuzzySet("a", 0.1, 0.2, 0.3).union(FuzzySet("b", 0.4, 0.5, 0.6)) - FuzzySet(name='a ∪ b', left_boundary=0.1, peak=0.6, right_boundary=0.35) + FuzzySet(name='a U b', left_boundary=0.1, peak=0.6, right_boundary=0.35) """ return FuzzySet( - f"{self.name} ∪ {other.name}", + f"{self.name} U {other.name}", min(self.left_boundary, other.left_boundary), max(self.right_boundary, other.right_boundary), (self.peak + other.peak) / 2, diff --git a/physics/basic_orbital_capture.py b/physics/basic_orbital_capture.py index eeb45e60240c..a5434b5cb7cb 100644 --- a/physics/basic_orbital_capture.py +++ b/physics/basic_orbital_capture.py @@ -4,14 +4,14 @@ """ These two functions will return the radii of impact for a target object -of mass M and radius R as well as it's effective cross sectional area σ(sigma). -That is to say any projectile with velocity v passing within σ, will impact the +of mass M and radius R as well as it's effective cross sectional area sigma. +That is to say any projectile with velocity v passing within sigma, will impact the target object with mass M. The derivation of which is given at the bottom of this file. The derivation shows that a projectile does not need to aim directly at the target body in order to hit it, as R_capture>R_target. Astronomers refer to the effective -cross section for capture as σ=π*R_capture**2. +cross section for capture as sigma=π*R_capture**2. This algorithm does not account for an N-body problem. diff --git a/physics/malus_law.py b/physics/malus_law.py index ae77d45cf614..374b3423f8ff 100644 --- a/physics/malus_law.py +++ b/physics/malus_law.py @@ -31,7 +31,7 @@ Real polarizers are also not perfect blockers of the polarization orthogonal to their polarization axis; the ratio of the transmission of the unwanted component to the wanted component is called the extinction ratio, and varies from around -1:500 for Polaroid to about 1:106 for Glan–Taylor prism polarizers. +1:500 for Polaroid to about 1:106 for Glan-Taylor prism polarizers. Reference : "https://en.wikipedia.org/wiki/Polarizer#Malus's_law_and_other_properties" """ diff --git a/pyproject.toml b/pyproject.toml index ff22fba81c8a..0185f4d7b987 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -10,7 +10,6 @@ lint.ignore = [ # `ruff rule S101` for a description of that rule "PLW2901", # PLW2901: Redefined loop variable -- FIX ME "PT011", # `pytest.raises(Exception)` is too broad, set the `match` parameter or use a more specific exception "PT018", # Assertion should be broken down into multiple parts - "RUF001", # String contains ambiguous {}. Did you mean {}? "RUF002", # Docstring contains ambiguous {}. Did you mean {}? "RUF003", # Comment contains ambiguous {}. Did you mean {}? "S101", # Use of `assert` detected -- DO NOT FIX From 4700297b3e332701eed1d0667f3afefc5b9b66be Mon Sep 17 00:00:00 2001 From: Maxim Smolskiy Date: Mon, 22 Apr 2024 22:51:47 +0300 Subject: [PATCH 1374/1543] Enable ruff RUF002 rule (#11377) * Enable ruff RUF002 rule * Fix --------- Co-authored-by: Christian Clauss --- backtracking/sudoku.py | 4 ++-- .../single_bit_manipulation_operations.py | 14 +++++++------- compression/burrows_wheeler.py | 2 +- compression/lempel_ziv.py | 4 ++-- compression/lempel_ziv_decompress.py | 4 ++-- data_structures/binary_tree/red_black_tree.py | 2 +- digital_image_processing/edge_detection/canny.py | 4 ++-- digital_image_processing/index_calculation.py | 2 +- dynamic_programming/combination_sum_iv.py | 2 +- electronics/coulombs_law.py | 4 ++-- hashes/fletcher16.py | 2 +- linear_algebra/lu_decomposition.py | 2 +- linear_algebra/src/schur_complement.py | 2 +- machine_learning/polynomial_regression.py | 4 ++-- maths/chudnovsky_algorithm.py | 2 +- maths/entropy.py | 4 ++-- maths/lucas_lehmer_primality_test.py | 4 ++-- maths/modular_division.py | 2 +- maths/numerical_analysis/bisection_2.py | 2 +- maths/numerical_analysis/nevilles_method.py | 2 +- maths/simultaneous_linear_equation_solver.py | 6 +++--- matrix/largest_square_area_in_matrix.py | 4 ++-- matrix/spiral_print.py | 2 +- neural_network/back_propagation_neural_network.py | 4 ++-- other/davis_putnam_logemann_loveland.py | 2 +- other/fischer_yates_shuffle.py | 2 +- physics/archimedes_principle_of_buoyant_force.py | 2 +- physics/center_of_mass.py | 8 ++++---- physics/centripetal_force.py | 2 +- physics/lorentz_transformation_four_vector.py | 14 +++++++------- physics/reynolds_number.py | 4 ++-- physics/terminal_velocity.py | 4 ++-- project_euler/problem_004/sol1.py | 2 +- project_euler/problem_004/sol2.py | 2 +- project_euler/problem_008/sol1.py | 2 +- project_euler/problem_008/sol2.py | 2 +- project_euler/problem_008/sol3.py | 2 +- project_euler/problem_015/sol1.py | 4 ++-- project_euler/problem_020/sol1.py | 4 ++-- project_euler/problem_020/sol2.py | 4 ++-- project_euler/problem_020/sol3.py | 4 ++-- project_euler/problem_020/sol4.py | 4 ++-- project_euler/problem_022/sol1.py | 2 +- project_euler/problem_022/sol2.py | 2 +- project_euler/problem_025/sol1.py | 2 +- project_euler/problem_025/sol2.py | 2 +- project_euler/problem_025/sol3.py | 2 +- project_euler/problem_027/sol1.py | 8 ++++---- project_euler/problem_031/sol1.py | 10 +++++----- project_euler/problem_031/sol2.py | 12 ++++++------ project_euler/problem_032/sol32.py | 2 +- project_euler/problem_038/sol1.py | 6 +++--- project_euler/problem_040/sol1.py | 2 +- project_euler/problem_044/sol1.py | 6 +++--- project_euler/problem_045/sol1.py | 4 ++-- project_euler/problem_046/sol1.py | 12 ++++++------ project_euler/problem_047/sol1.py | 10 +++++----- project_euler/problem_053/sol1.py | 2 +- project_euler/problem_097/sol1.py | 4 ++-- project_euler/problem_104/sol1.py | 2 +- project_euler/problem_120/sol1.py | 2 +- project_euler/problem_123/sol1.py | 2 +- project_euler/problem_135/sol1.py | 4 ++-- project_euler/problem_144/sol1.py | 4 ++-- project_euler/problem_174/sol1.py | 2 +- pyproject.toml | 1 + strings/jaro_winkler.py | 2 +- strings/manacher.py | 2 +- strings/prefix_function.py | 2 +- 69 files changed, 132 insertions(+), 131 deletions(-) diff --git a/backtracking/sudoku.py b/backtracking/sudoku.py index 8f5459c76d45..cabeebb90433 100644 --- a/backtracking/sudoku.py +++ b/backtracking/sudoku.py @@ -1,7 +1,7 @@ """ -Given a partially filled 9×9 2D array, the objective is to fill a 9×9 +Given a partially filled 9x9 2D array, the objective is to fill a 9x9 square grid with digits numbered 1 to 9, so that every row, column, and -and each of the nine 3×3 sub-grids contains all of the digits. +and each of the nine 3x3 sub-grids contains all of the digits. This can be solved using Backtracking and is similar to n-queens. We check to see if a cell is safe or not and recursively call the diff --git a/bit_manipulation/single_bit_manipulation_operations.py b/bit_manipulation/single_bit_manipulation_operations.py index b43ff07b776f..fcbf033ccb24 100644 --- a/bit_manipulation/single_bit_manipulation_operations.py +++ b/bit_manipulation/single_bit_manipulation_operations.py @@ -8,8 +8,8 @@ def set_bit(number: int, position: int) -> int: Set the bit at position to 1. Details: perform bitwise or for given number and X. - Where X is a number with all the bits – zeroes and bit on given - position – one. + Where X is a number with all the bits - zeroes and bit on given + position - one. >>> set_bit(0b1101, 1) # 0b1111 15 @@ -26,8 +26,8 @@ def clear_bit(number: int, position: int) -> int: Set the bit at position to 0. Details: perform bitwise and for given number and X. - Where X is a number with all the bits – ones and bit on given - position – zero. + Where X is a number with all the bits - ones and bit on given + position - zero. >>> clear_bit(0b10010, 1) # 0b10000 16 @@ -42,8 +42,8 @@ def flip_bit(number: int, position: int) -> int: Flip the bit at position. Details: perform bitwise xor for given number and X. - Where X is a number with all the bits – zeroes and bit on given - position – one. + Where X is a number with all the bits - zeroes and bit on given + position - one. >>> flip_bit(0b101, 1) # 0b111 7 @@ -79,7 +79,7 @@ def get_bit(number: int, position: int) -> int: Get the bit at the given position Details: perform bitwise and for the given number and X, - Where X is a number with all the bits – zeroes and bit on given position – one. + Where X is a number with all the bits - zeroes and bit on given position - one. If the result is not equal to 0, then the bit on the given position is 1, else 0. >>> get_bit(0b1010, 0) diff --git a/compression/burrows_wheeler.py b/compression/burrows_wheeler.py index ce493a70c8f9..857d677c904e 100644 --- a/compression/burrows_wheeler.py +++ b/compression/burrows_wheeler.py @@ -1,7 +1,7 @@ """ https://en.wikipedia.org/wiki/Burrows%E2%80%93Wheeler_transform -The Burrows–Wheeler transform (BWT, also called block-sorting compression) +The Burrows-Wheeler transform (BWT, also called block-sorting compression) rearranges a character string into runs of similar characters. This is useful for compression, since it tends to be easy to compress a string that has runs of repeated characters by techniques such as move-to-front transform and diff --git a/compression/lempel_ziv.py b/compression/lempel_ziv.py index ac3f0c6cfc06..2751a0ebcdb6 100644 --- a/compression/lempel_ziv.py +++ b/compression/lempel_ziv.py @@ -1,5 +1,5 @@ """ -One of the several implementations of Lempel–Ziv–Welch compression algorithm +One of the several implementations of Lempel-Ziv-Welch compression algorithm https://en.wikipedia.org/wiki/Lempel%E2%80%93Ziv%E2%80%93Welch """ @@ -43,7 +43,7 @@ def add_key_to_lexicon( def compress_data(data_bits: str) -> str: """ - Compresses given data_bits using Lempel–Ziv–Welch compression algorithm + Compresses given data_bits using Lempel-Ziv-Welch compression algorithm and returns the result as a string """ lexicon = {"0": "0", "1": "1"} diff --git a/compression/lempel_ziv_decompress.py b/compression/lempel_ziv_decompress.py index 0e49c83fb790..225e96236c2c 100644 --- a/compression/lempel_ziv_decompress.py +++ b/compression/lempel_ziv_decompress.py @@ -1,5 +1,5 @@ """ -One of the several implementations of Lempel–Ziv–Welch decompression algorithm +One of the several implementations of Lempel-Ziv-Welch decompression algorithm https://en.wikipedia.org/wiki/Lempel%E2%80%93Ziv%E2%80%93Welch """ @@ -26,7 +26,7 @@ def read_file_binary(file_path: str) -> str: def decompress_data(data_bits: str) -> str: """ - Decompresses given data_bits using Lempel–Ziv–Welch compression algorithm + Decompresses given data_bits using Lempel-Ziv-Welch compression algorithm and returns the result as a string """ lexicon = {"0": "0", "1": "1"} diff --git a/data_structures/binary_tree/red_black_tree.py b/data_structures/binary_tree/red_black_tree.py index e68d8d1e3735..a9ecf897c701 100644 --- a/data_structures/binary_tree/red_black_tree.py +++ b/data_structures/binary_tree/red_black_tree.py @@ -17,7 +17,7 @@ class RedBlackTree: and slower for reading in the average case, though, because they're both balanced binary search trees, both will get the same asymptotic performance. - To read more about them, https://en.wikipedia.org/wiki/Red–black_tree + To read more about them, https://en.wikipedia.org/wiki/Red-black_tree Unless otherwise specified, all asymptotic runtimes are specified in terms of the size of the tree. """ diff --git a/digital_image_processing/edge_detection/canny.py b/digital_image_processing/edge_detection/canny.py index f8cbeedb3874..944161c31cfc 100644 --- a/digital_image_processing/edge_detection/canny.py +++ b/digital_image_processing/edge_detection/canny.py @@ -74,9 +74,9 @@ def detect_high_low_threshold( image_shape, destination, threshold_low, threshold_high, weak, strong ): """ - High-Low threshold detection. If an edge pixel’s gradient value is higher + High-Low threshold detection. If an edge pixel's gradient value is higher than the high threshold value, it is marked as a strong edge pixel. If an - edge pixel’s gradient value is smaller than the high threshold value and + edge pixel's gradient value is smaller than the high threshold value and larger than the low threshold value, it is marked as a weak edge pixel. If an edge pixel's value is smaller than the low threshold value, it will be suppressed. diff --git a/digital_image_processing/index_calculation.py b/digital_image_processing/index_calculation.py index 67830668b0da..988f8e72b9a8 100644 --- a/digital_image_processing/index_calculation.py +++ b/digital_image_processing/index_calculation.py @@ -182,7 +182,7 @@ def arv12(self): Atmospherically Resistant Vegetation Index 2 https://www.indexdatabase.de/db/i-single.php?id=396 :return: index - −0.18+1.17*(self.nir−self.red)/(self.nir+self.red) + -0.18+1.17*(self.nir-self.red)/(self.nir+self.red) """ return -0.18 + (1.17 * ((self.nir - self.red) / (self.nir + self.red))) diff --git a/dynamic_programming/combination_sum_iv.py b/dynamic_programming/combination_sum_iv.py index 4526729b70b7..113c06a27a9e 100644 --- a/dynamic_programming/combination_sum_iv.py +++ b/dynamic_programming/combination_sum_iv.py @@ -18,7 +18,7 @@ The basic idea is to go over recursively to find the way such that the sum of chosen elements is “tar”. For every element, we have two choices 1. Include the element in our set of chosen elements. - 2. Don’t include the element in our set of chosen elements. + 2. Don't include the element in our set of chosen elements. """ diff --git a/electronics/coulombs_law.py b/electronics/coulombs_law.py index 18c1a8179eb6..74bbea5ea8ec 100644 --- a/electronics/coulombs_law.py +++ b/electronics/coulombs_law.py @@ -20,8 +20,8 @@ def couloumbs_law( Reference ---------- - Coulomb (1785) "Premier mémoire sur l’électricité et le magnétisme," - Histoire de l’Académie Royale des Sciences, pp. 569–577. + Coulomb (1785) "Premier mémoire sur l'électricité et le magnétisme," + Histoire de l'Académie Royale des Sciences, pp. 569-577. Parameters ---------- diff --git a/hashes/fletcher16.py b/hashes/fletcher16.py index 7c23c98d72c5..add8e185bc06 100644 --- a/hashes/fletcher16.py +++ b/hashes/fletcher16.py @@ -1,6 +1,6 @@ """ The Fletcher checksum is an algorithm for computing a position-dependent -checksum devised by John G. Fletcher (1934–2012) at Lawrence Livermore Labs +checksum devised by John G. Fletcher (1934-2012) at Lawrence Livermore Labs in the late 1970s.[1] The objective of the Fletcher checksum was to provide error-detection properties approaching those of a cyclic redundancy check but with the lower computational effort associated diff --git a/linear_algebra/lu_decomposition.py b/linear_algebra/lu_decomposition.py index 1d364163d9a7..3620674835cd 100644 --- a/linear_algebra/lu_decomposition.py +++ b/linear_algebra/lu_decomposition.py @@ -1,5 +1,5 @@ """ -Lower–upper (LU) decomposition factors a matrix as a product of a lower +Lower-upper (LU) decomposition factors a matrix as a product of a lower triangular matrix and an upper triangular matrix. A square matrix has an LU decomposition under the following conditions: - If the matrix is invertible, then it has an LU decomposition if and only diff --git a/linear_algebra/src/schur_complement.py b/linear_algebra/src/schur_complement.py index 1cc084043856..7c79bb70abfc 100644 --- a/linear_algebra/src/schur_complement.py +++ b/linear_algebra/src/schur_complement.py @@ -18,7 +18,7 @@ def schur_complement( the pseudo_inv argument. Link to Wiki: https://en.wikipedia.org/wiki/Schur_complement - See also Convex Optimization – Boyd and Vandenberghe, A.5.5 + See also Convex Optimization - Boyd and Vandenberghe, A.5.5 >>> import numpy as np >>> a = np.array([[1, 2], [2, 1]]) >>> b = np.array([[0, 3], [3, 0]]) diff --git a/machine_learning/polynomial_regression.py b/machine_learning/polynomial_regression.py index 5bafea96f41e..19f7dc994017 100644 --- a/machine_learning/polynomial_regression.py +++ b/machine_learning/polynomial_regression.py @@ -11,7 +11,7 @@ β = (XᵀX)⁻¹Xᵀy = X⁺y -where X is the design matrix, y is the response vector, and X⁺ denotes the Moore–Penrose +where X is the design matrix, y is the response vector, and X⁺ denotes the Moore-Penrose pseudoinverse of X. In the case of polynomial regression, the design matrix is |1 x₁ x₁² ⋯ x₁ᵐ| @@ -106,7 +106,7 @@ def fit(self, x_train: np.ndarray, y_train: np.ndarray) -> None: β = (XᵀX)⁻¹Xᵀy = X⁺y - where X⁺ denotes the Moore–Penrose pseudoinverse of the design matrix X. This + where X⁺ denotes the Moore-Penrose pseudoinverse of the design matrix X. This function computes X⁺ using singular value decomposition (SVD). References: diff --git a/maths/chudnovsky_algorithm.py b/maths/chudnovsky_algorithm.py index aaee7462822e..d122bf0756f7 100644 --- a/maths/chudnovsky_algorithm.py +++ b/maths/chudnovsky_algorithm.py @@ -5,7 +5,7 @@ def pi(precision: int) -> str: """ The Chudnovsky algorithm is a fast method for calculating the digits of PI, - based on Ramanujan’s PI formulae. + based on Ramanujan's PI formulae. https://en.wikipedia.org/wiki/Chudnovsky_algorithm diff --git a/maths/entropy.py b/maths/entropy.py index 39ec67bea038..b816f1d193f7 100644 --- a/maths/entropy.py +++ b/maths/entropy.py @@ -21,10 +21,10 @@ def calculate_prob(text: str) -> None: :return: Prints 1) Entropy of information based on 1 alphabet 2) Entropy of information based on couples of 2 alphabet - 3) print Entropy of H(X n∣Xn−1) + 3) print Entropy of H(X n|Xn-1) Text from random books. Also, random quotes. - >>> text = ("Behind Winston’s back the voice " + >>> text = ("Behind Winston's back the voice " ... "from the telescreen was still " ... "babbling and the overfulfilment") >>> calculate_prob(text) diff --git a/maths/lucas_lehmer_primality_test.py b/maths/lucas_lehmer_primality_test.py index 292387414dee..af5c81133044 100644 --- a/maths/lucas_lehmer_primality_test.py +++ b/maths/lucas_lehmer_primality_test.py @@ -1,12 +1,12 @@ """ -In mathematics, the Lucas–Lehmer test (LLT) is a primality test for Mersenne +In mathematics, the Lucas-Lehmer test (LLT) is a primality test for Mersenne numbers. https://en.wikipedia.org/wiki/Lucas%E2%80%93Lehmer_primality_test A Mersenne number is a number that is one less than a power of two. That is M_p = 2^p - 1 https://en.wikipedia.org/wiki/Mersenne_prime -The Lucas–Lehmer test is the primality test used by the +The Lucas-Lehmer test is the primality test used by the Great Internet Mersenne Prime Search (GIMPS) to locate large primes. """ diff --git a/maths/modular_division.py b/maths/modular_division.py index 260d5683705d..2f8f4479b27d 100644 --- a/maths/modular_division.py +++ b/maths/modular_division.py @@ -9,7 +9,7 @@ def modular_division(a: int, b: int, n: int) -> int: GCD ( Greatest Common Divisor ) or HCF ( Highest Common Factor ) Given three integers a, b, and n, such that gcd(a,n)=1 and n>1, the algorithm should - return an integer x such that 0≤x≤n−1, and b/a=x(modn) (that is, b=ax(modn)). + return an integer x such that 0≤x≤n-1, and b/a=x(modn) (that is, b=ax(modn)). Theorem: a has a multiplicative inverse modulo n iff gcd(a,n) = 1 diff --git a/maths/numerical_analysis/bisection_2.py b/maths/numerical_analysis/bisection_2.py index 45f26d8d88e4..68ba6577ce29 100644 --- a/maths/numerical_analysis/bisection_2.py +++ b/maths/numerical_analysis/bisection_2.py @@ -1,5 +1,5 @@ """ -Given a function on floating number f(x) and two floating numbers ‘a’ and ‘b’ such that +Given a function on floating number f(x) and two floating numbers `a` and `b` such that f(a) * f(b) < 0 and f(x) is continuous in [a, b]. Here f(x) represents algebraic or transcendental equation. Find root of function in interval [a, b] (Or find a value of x such that f(x) is 0) diff --git a/maths/numerical_analysis/nevilles_method.py b/maths/numerical_analysis/nevilles_method.py index 256b61f5f218..25c93ac6c531 100644 --- a/maths/numerical_analysis/nevilles_method.py +++ b/maths/numerical_analysis/nevilles_method.py @@ -1,7 +1,7 @@ """ Python program to show how to interpolate and evaluate a polynomial using Neville's method. -Neville’s method evaluates a polynomial that passes through a +Neville's method evaluates a polynomial that passes through a given set of x and y points for a particular x value (x0) using the Newton polynomial form. Reference: diff --git a/maths/simultaneous_linear_equation_solver.py b/maths/simultaneous_linear_equation_solver.py index 1287b2002d00..9685a33e82fe 100644 --- a/maths/simultaneous_linear_equation_solver.py +++ b/maths/simultaneous_linear_equation_solver.py @@ -2,10 +2,10 @@ https://en.wikipedia.org/wiki/Augmented_matrix This algorithm solves simultaneous linear equations of the form -λa + λb + λc + λd + ... = γ as [λ, λ, λ, λ, ..., γ] -Where λ & γ are individual coefficients, the no. of equations = no. of coefficients - 1 +λa + λb + λc + λd + ... = y as [λ, λ, λ, λ, ..., y] +Where λ & y are individual coefficients, the no. of equations = no. of coefficients - 1 -Note in order to work there must exist 1 equation where all instances of λ and γ != 0 +Note in order to work there must exist 1 equation where all instances of λ and y != 0 """ diff --git a/matrix/largest_square_area_in_matrix.py b/matrix/largest_square_area_in_matrix.py index a93369c56bbd..16263fb798f1 100644 --- a/matrix/largest_square_area_in_matrix.py +++ b/matrix/largest_square_area_in_matrix.py @@ -31,7 +31,7 @@ Approach: We initialize another matrix (dp) with the same dimensions -as the original one initialized with all 0’s. +as the original one initialized with all 0's. dp_array(i,j) represents the side length of the maximum square whose bottom right corner is the cell with index (i,j) in the original matrix. @@ -39,7 +39,7 @@ Starting from index (0,0), for every 1 found in the original matrix, we update the value of the current element as -dp_array(i,j)=dp_array(dp(i−1,j),dp_array(i−1,j−1),dp_array(i,j−1)) + 1. +dp_array(i,j)=dp_array(dp(i-1,j),dp_array(i-1,j-1),dp_array(i,j-1)) + 1. """ diff --git a/matrix/spiral_print.py b/matrix/spiral_print.py index c16dde69cb56..88bde1db594d 100644 --- a/matrix/spiral_print.py +++ b/matrix/spiral_print.py @@ -89,7 +89,7 @@ def spiral_traversal(matrix: list[list]) -> list[int]: Algorithm: Step 1. first pop the 0 index list. (which is [1,2,3,4] and concatenate the output of [step 2]) - Step 2. Now perform matrix’s Transpose operation (Change rows to column + Step 2. Now perform matrix's Transpose operation (Change rows to column and vice versa) and reverse the resultant matrix. Step 3. Pass the output of [2nd step], to same recursive function till base case hits. diff --git a/neural_network/back_propagation_neural_network.py b/neural_network/back_propagation_neural_network.py index 6131a13e945e..182f759c5fc7 100644 --- a/neural_network/back_propagation_neural_network.py +++ b/neural_network/back_propagation_neural_network.py @@ -2,10 +2,10 @@ """ -A Framework of Back Propagation Neural Network(BP) model +A Framework of Back Propagation Neural Network (BP) model Easy to use: - * add many layers as you want !!! + * add many layers as you want ! ! ! * clearly see how the loss decreasing Easy to expand: * more activation functions diff --git a/other/davis_putnam_logemann_loveland.py b/other/davis_putnam_logemann_loveland.py index 3a76f3dfef08..0f3100b1bc2e 100644 --- a/other/davis_putnam_logemann_loveland.py +++ b/other/davis_putnam_logemann_loveland.py @@ -1,7 +1,7 @@ #!/usr/bin/env python3 """ -Davis–Putnam–Logemann–Loveland (DPLL) algorithm is a complete, backtracking-based +Davis-Putnam-Logemann-Loveland (DPLL) algorithm is a complete, backtracking-based search algorithm for deciding the satisfiability of propositional logic formulae in conjunctive normal form, i.e, for solving the Conjunctive Normal Form SATisfiability (CNF-SAT) problem. diff --git a/other/fischer_yates_shuffle.py b/other/fischer_yates_shuffle.py index 37e11479a4c9..5e90b10edd89 100644 --- a/other/fischer_yates_shuffle.py +++ b/other/fischer_yates_shuffle.py @@ -1,6 +1,6 @@ #!/usr/bin/python """ -The Fisher–Yates shuffle is an algorithm for generating a random permutation of a +The Fisher-Yates shuffle is an algorithm for generating a random permutation of a finite sequence. For more details visit wikipedia/Fischer-Yates-Shuffle. diff --git a/physics/archimedes_principle_of_buoyant_force.py b/physics/archimedes_principle_of_buoyant_force.py index 71043e0e1111..38f1a0a83832 100644 --- a/physics/archimedes_principle_of_buoyant_force.py +++ b/physics/archimedes_principle_of_buoyant_force.py @@ -3,7 +3,7 @@ fluid. This principle was discovered by the Greek mathematician Archimedes. Equation for calculating buoyant force: -Fb = ρ * V * g +Fb = p * V * g https://en.wikipedia.org/wiki/Archimedes%27_principle """ diff --git a/physics/center_of_mass.py b/physics/center_of_mass.py index 59c3b807f401..7a20e71be801 100644 --- a/physics/center_of_mass.py +++ b/physics/center_of_mass.py @@ -16,8 +16,8 @@ is the particle equivalent of a given object for the application of Newton's laws of motion. -In the case of a system of particles P_i, i = 1, ..., n , each with mass m_i that are -located in space with coordinates r_i, i = 1, ..., n , the coordinates R of the center +In the case of a system of particles P_i, i = 1, ..., n , each with mass m_i that are +located in space with coordinates r_i, i = 1, ..., n , the coordinates R of the center of mass corresponds to: R = (Σ(mi * ri) / Σ(mi)) @@ -36,8 +36,8 @@ def center_of_mass(particles: list[Particle]) -> Coord3D: Input Parameters ---------------- particles: list(Particle): - A list of particles where each particle is a tuple with it´s (x, y, z) position and - it´s mass. + A list of particles where each particle is a tuple with it's (x, y, z) position and + it's mass. Returns ------- diff --git a/physics/centripetal_force.py b/physics/centripetal_force.py index 04069d256468..a4c624582475 100644 --- a/physics/centripetal_force.py +++ b/physics/centripetal_force.py @@ -6,7 +6,7 @@ The unit of centripetal force is newton. The centripetal force is always directed perpendicular to the -direction of the object’s displacement. Using Newton’s second +direction of the object's displacement. Using Newton's second law of motion, it is found that the centripetal force of an object moving in a circular path always acts towards the centre of the circle. The Centripetal Force Formula is given as the product of mass (in kg) diff --git a/physics/lorentz_transformation_four_vector.py b/physics/lorentz_transformation_four_vector.py index f4fda4dff8cd..3b0fd83d45df 100644 --- a/physics/lorentz_transformation_four_vector.py +++ b/physics/lorentz_transformation_four_vector.py @@ -12,13 +12,13 @@ with respect to X, then the Lorentz transformation from X to X' is X' = BX, where - | γ -γβ 0 0| -B = |-γβ γ 0 0| + | y -γβ 0 0| +B = |-γβ y 0 0| | 0 0 1 0| | 0 0 0 1| is the matrix describing the Lorentz boost between X and X', -γ = 1 / √(1 - v²/c²) is the Lorentz factor, and β = v/c is the velocity as +y = 1 / √(1 - v²/c²) is the Lorentz factor, and β = v/c is the velocity as a fraction of c. Reference: https://en.wikipedia.org/wiki/Lorentz_transformation @@ -63,7 +63,7 @@ def beta(velocity: float) -> float: def gamma(velocity: float) -> float: """ - Calculate the Lorentz factor γ = 1 / √(1 - v²/c²) for a given velocity + Calculate the Lorentz factor y = 1 / √(1 - v²/c²) for a given velocity >>> gamma(4) 1.0000000000000002 >>> gamma(1e5) @@ -90,12 +90,12 @@ def transformation_matrix(velocity: float) -> np.ndarray: """ Calculate the Lorentz transformation matrix for movement in the x direction: - | γ -γβ 0 0| - |-γβ γ 0 0| + | y -γβ 0 0| + |-γβ y 0 0| | 0 0 1 0| | 0 0 0 1| - where γ is the Lorentz factor and β is the velocity as a fraction of c + where y is the Lorentz factor and β is the velocity as a fraction of c >>> transformation_matrix(29979245) array([[ 1.00503781, -0.10050378, 0. , 0. ], [-0.10050378, 1.00503781, 0. , 0. ], diff --git a/physics/reynolds_number.py b/physics/reynolds_number.py index dffe690f8822..c24a9e002855 100644 --- a/physics/reynolds_number.py +++ b/physics/reynolds_number.py @@ -8,10 +8,10 @@ viscous forces. R = Inertial Forces / Viscous Forces -R = (ρ * V * D)/μ +R = (p * V * D)/μ where : -ρ = Density of fluid (in Kg/m^3) +p = Density of fluid (in Kg/m^3) D = Diameter of pipe through which fluid flows (in m) V = Velocity of flow of the fluid (in m/s) μ = Viscosity of the fluid (in Ns/m^2) diff --git a/physics/terminal_velocity.py b/physics/terminal_velocity.py index cec54162e2b4..16714bd02671 100644 --- a/physics/terminal_velocity.py +++ b/physics/terminal_velocity.py @@ -8,13 +8,13 @@ object. The acceleration of the object is zero as the net force acting on the object is zero. -Vt = ((2 * m * g)/(ρ * A * Cd))^0.5 +Vt = ((2 * m * g)/(p * A * Cd))^0.5 where : Vt = Terminal velocity (in m/s) m = Mass of the falling object (in Kg) g = Acceleration due to gravity (value taken : imported from scipy) -ρ = Density of the fluid through which the object is falling (in Kg/m^3) +p = Density of the fluid through which the object is falling (in Kg/m^3) A = Projected area of the object (in m^2) Cd = Drag coefficient (dimensionless) diff --git a/project_euler/problem_004/sol1.py b/project_euler/problem_004/sol1.py index f237afdd942d..f80a3253e741 100644 --- a/project_euler/problem_004/sol1.py +++ b/project_euler/problem_004/sol1.py @@ -4,7 +4,7 @@ Largest palindrome product A palindromic number reads the same both ways. The largest palindrome made -from the product of two 2-digit numbers is 9009 = 91 × 99. +from the product of two 2-digit numbers is 9009 = 91 x 99. Find the largest palindrome made from the product of two 3-digit numbers. diff --git a/project_euler/problem_004/sol2.py b/project_euler/problem_004/sol2.py index abc880966d58..1fa75e7d0c83 100644 --- a/project_euler/problem_004/sol2.py +++ b/project_euler/problem_004/sol2.py @@ -4,7 +4,7 @@ Largest palindrome product A palindromic number reads the same both ways. The largest palindrome made -from the product of two 2-digit numbers is 9009 = 91 × 99. +from the product of two 2-digit numbers is 9009 = 91 x 99. Find the largest palindrome made from the product of two 3-digit numbers. diff --git a/project_euler/problem_008/sol1.py b/project_euler/problem_008/sol1.py index 69dd1b4736c1..adbac8d5ad1f 100644 --- a/project_euler/problem_008/sol1.py +++ b/project_euler/problem_008/sol1.py @@ -4,7 +4,7 @@ Largest product in a series The four adjacent digits in the 1000-digit number that have the greatest -product are 9 × 9 × 8 × 9 = 5832. +product are 9 x 9 x 8 x 9 = 5832. 73167176531330624919225119674426574742355349194934 96983520312774506326239578318016984801869478851843 diff --git a/project_euler/problem_008/sol2.py b/project_euler/problem_008/sol2.py index f83cb1db30b6..e48231e4023b 100644 --- a/project_euler/problem_008/sol2.py +++ b/project_euler/problem_008/sol2.py @@ -4,7 +4,7 @@ Largest product in a series The four adjacent digits in the 1000-digit number that have the greatest -product are 9 × 9 × 8 × 9 = 5832. +product are 9 x 9 x 8 x 9 = 5832. 73167176531330624919225119674426574742355349194934 96983520312774506326239578318016984801869478851843 diff --git a/project_euler/problem_008/sol3.py b/project_euler/problem_008/sol3.py index bf3bcb05b7e9..0d319b9684dd 100644 --- a/project_euler/problem_008/sol3.py +++ b/project_euler/problem_008/sol3.py @@ -4,7 +4,7 @@ Largest product in a series The four adjacent digits in the 1000-digit number that have the greatest -product are 9 × 9 × 8 × 9 = 5832. +product are 9 x 9 x 8 x 9 = 5832. 73167176531330624919225119674426574742355349194934 96983520312774506326239578318016984801869478851843 diff --git a/project_euler/problem_015/sol1.py b/project_euler/problem_015/sol1.py index fd9014a406f6..3c9dae1aed77 100644 --- a/project_euler/problem_015/sol1.py +++ b/project_euler/problem_015/sol1.py @@ -1,9 +1,9 @@ """ Problem 15: https://projecteuler.net/problem=15 -Starting in the top left corner of a 2×2 grid, and only being able to move to +Starting in the top left corner of a 2x2 grid, and only being able to move to the right and down, there are exactly 6 routes to the bottom right corner. -How many such routes are there through a 20×20 grid? +How many such routes are there through a 20x20 grid? """ from math import factorial diff --git a/project_euler/problem_020/sol1.py b/project_euler/problem_020/sol1.py index b472024e54c0..1439bdca38e6 100644 --- a/project_euler/problem_020/sol1.py +++ b/project_euler/problem_020/sol1.py @@ -1,9 +1,9 @@ """ Problem 20: https://projecteuler.net/problem=20 -n! means n × (n − 1) × ... × 3 × 2 × 1 +n! means n x (n - 1) x ... x 3 x 2 x 1 -For example, 10! = 10 × 9 × ... × 3 × 2 × 1 = 3628800, +For example, 10! = 10 x 9 x ... x 3 x 2 x 1 = 3628800, and the sum of the digits in the number 10! is 3 + 6 + 2 + 8 + 8 + 0 + 0 = 27. Find the sum of the digits in the number 100! diff --git a/project_euler/problem_020/sol2.py b/project_euler/problem_020/sol2.py index a1d56ade7708..61684cd5ef6d 100644 --- a/project_euler/problem_020/sol2.py +++ b/project_euler/problem_020/sol2.py @@ -1,9 +1,9 @@ """ Problem 20: https://projecteuler.net/problem=20 -n! means n × (n − 1) × ... × 3 × 2 × 1 +n! means n x (n - 1) x ... x 3 x 2 x 1 -For example, 10! = 10 × 9 × ... × 3 × 2 × 1 = 3628800, +For example, 10! = 10 x 9 x ... x 3 x 2 x 1 = 3628800, and the sum of the digits in the number 10! is 3 + 6 + 2 + 8 + 8 + 0 + 0 = 27. Find the sum of the digits in the number 100! diff --git a/project_euler/problem_020/sol3.py b/project_euler/problem_020/sol3.py index 1886e05463f4..8984def9c34e 100644 --- a/project_euler/problem_020/sol3.py +++ b/project_euler/problem_020/sol3.py @@ -1,9 +1,9 @@ """ Problem 20: https://projecteuler.net/problem=20 -n! means n × (n − 1) × ... × 3 × 2 × 1 +n! means n x (n - 1) x ... x 3 x 2 x 1 -For example, 10! = 10 × 9 × ... × 3 × 2 × 1 = 3628800, +For example, 10! = 10 x 9 x ... x 3 x 2 x 1 = 3628800, and the sum of the digits in the number 10! is 3 + 6 + 2 + 8 + 8 + 0 + 0 = 27. Find the sum of the digits in the number 100! diff --git a/project_euler/problem_020/sol4.py b/project_euler/problem_020/sol4.py index b32ce309dfa6..511ac81e176b 100644 --- a/project_euler/problem_020/sol4.py +++ b/project_euler/problem_020/sol4.py @@ -1,9 +1,9 @@ """ Problem 20: https://projecteuler.net/problem=20 -n! means n × (n − 1) × ... × 3 × 2 × 1 +n! means n x (n - 1) x ... x 3 x 2 x 1 -For example, 10! = 10 × 9 × ... × 3 × 2 × 1 = 3628800, +For example, 10! = 10 x 9 x ... x 3 x 2 x 1 = 3628800, and the sum of the digits in the number 10! is 3 + 6 + 2 + 8 + 8 + 0 + 0 = 27. Find the sum of the digits in the number 100! diff --git a/project_euler/problem_022/sol1.py b/project_euler/problem_022/sol1.py index b6386186e7df..c4af5dfa81df 100644 --- a/project_euler/problem_022/sol1.py +++ b/project_euler/problem_022/sol1.py @@ -10,7 +10,7 @@ For example, when the list is sorted into alphabetical order, COLIN, which is worth 3 + 15 + 12 + 9 + 14 = 53, is the 938th name in the list. So, COLIN would -obtain a score of 938 × 53 = 49714. +obtain a score of 938 x 53 = 49714. What is the total of all the name scores in the file? """ diff --git a/project_euler/problem_022/sol2.py b/project_euler/problem_022/sol2.py index f7092ea1cd12..9c22b6bba0cc 100644 --- a/project_euler/problem_022/sol2.py +++ b/project_euler/problem_022/sol2.py @@ -10,7 +10,7 @@ For example, when the list is sorted into alphabetical order, COLIN, which is worth 3 + 15 + 12 + 9 + 14 = 53, is the 938th name in the list. So, COLIN would -obtain a score of 938 × 53 = 49714. +obtain a score of 938 x 53 = 49714. What is the total of all the name scores in the file? """ diff --git a/project_euler/problem_025/sol1.py b/project_euler/problem_025/sol1.py index 803464b5d786..b3bbb56d20be 100644 --- a/project_euler/problem_025/sol1.py +++ b/project_euler/problem_025/sol1.py @@ -1,7 +1,7 @@ """ The Fibonacci sequence is defined by the recurrence relation: - Fn = Fn−1 + Fn−2, where F1 = 1 and F2 = 1. + Fn = Fn-1 + Fn-2, where F1 = 1 and F2 = 1. Hence the first 12 terms will be: diff --git a/project_euler/problem_025/sol2.py b/project_euler/problem_025/sol2.py index 9e950b355f7a..a0f056023bc9 100644 --- a/project_euler/problem_025/sol2.py +++ b/project_euler/problem_025/sol2.py @@ -1,7 +1,7 @@ """ The Fibonacci sequence is defined by the recurrence relation: - Fn = Fn−1 + Fn−2, where F1 = 1 and F2 = 1. + Fn = Fn-1 + Fn-2, where F1 = 1 and F2 = 1. Hence the first 12 terms will be: diff --git a/project_euler/problem_025/sol3.py b/project_euler/problem_025/sol3.py index 0b9f3a0c84ef..e33b159ac65c 100644 --- a/project_euler/problem_025/sol3.py +++ b/project_euler/problem_025/sol3.py @@ -1,7 +1,7 @@ """ The Fibonacci sequence is defined by the recurrence relation: - Fn = Fn−1 + Fn−2, where F1 = 1 and F2 = 1. + Fn = Fn-1 + Fn-2, where F1 = 1 and F2 = 1. Hence the first 12 terms will be: diff --git a/project_euler/problem_027/sol1.py b/project_euler/problem_027/sol1.py index c93e2b4fa251..48755ec19763 100644 --- a/project_euler/problem_027/sol1.py +++ b/project_euler/problem_027/sol1.py @@ -9,12 +9,12 @@ It turns out that the formula will produce 40 primes for the consecutive values n = 0 to 39. However, when n = 40, 402 + 40 + 41 = 40(40 + 1) + 41 is divisible by 41, and certainly when n = 41, 412 + 41 + 41 is clearly divisible by 41. -The incredible formula n2 − 79n + 1601 was discovered, which produces 80 primes -for the consecutive values n = 0 to 79. The product of the coefficients, −79 and -1601, is −126479. +The incredible formula n2 - 79n + 1601 was discovered, which produces 80 primes +for the consecutive values n = 0 to 79. The product of the coefficients, -79 and +1601, is -126479. Considering quadratics of the form: n² + an + b, where |a| < 1000 and |b| < 1000 -where |n| is the modulus/absolute value of ne.g. |11| = 11 and |−4| = 4 +where |n| is the modulus/absolute value of ne.g. |11| = 11 and |-4| = 4 Find the product of the coefficients, a and b, for the quadratic expression that produces the maximum number of primes for consecutive values of n, starting with n = 0. diff --git a/project_euler/problem_031/sol1.py b/project_euler/problem_031/sol1.py index ba40cf383175..4c9c533eecb7 100644 --- a/project_euler/problem_031/sol1.py +++ b/project_euler/problem_031/sol1.py @@ -2,14 +2,14 @@ Coin sums Problem 31: https://projecteuler.net/problem=31 -In England the currency is made up of pound, £, and pence, p, and there are +In England the currency is made up of pound, f, and pence, p, and there are eight coins in general circulation: -1p, 2p, 5p, 10p, 20p, 50p, £1 (100p) and £2 (200p). -It is possible to make £2 in the following way: +1p, 2p, 5p, 10p, 20p, 50p, f1 (100p) and f2 (200p). +It is possible to make f2 in the following way: -1×£1 + 1×50p + 2×20p + 1×5p + 1×2p + 3×1p -How many different ways can £2 be made using any number of coins? +1xf1 + 1x50p + 2x20p + 1x5p + 1x2p + 3x1p +How many different ways can f2 be made using any number of coins? """ diff --git a/project_euler/problem_031/sol2.py b/project_euler/problem_031/sol2.py index f9e4dc384bff..574f8d4107a1 100644 --- a/project_euler/problem_031/sol2.py +++ b/project_euler/problem_031/sol2.py @@ -3,17 +3,17 @@ Coin sums -In England the currency is made up of pound, £, and pence, p, and there are +In England the currency is made up of pound, f, and pence, p, and there are eight coins in general circulation: -1p, 2p, 5p, 10p, 20p, 50p, £1 (100p) and £2 (200p). -It is possible to make £2 in the following way: +1p, 2p, 5p, 10p, 20p, 50p, f1 (100p) and f2 (200p). +It is possible to make f2 in the following way: -1×£1 + 1×50p + 2×20p + 1×5p + 1×2p + 3×1p -How many different ways can £2 be made using any number of coins? +1xf1 + 1x50p + 2x20p + 1x5p + 1x2p + 3x1p +How many different ways can f2 be made using any number of coins? Hint: - > There are 100 pence in a pound (£1 = 100p) + > There are 100 pence in a pound (f1 = 100p) > There are coins(in pence) are available: 1, 2, 5, 10, 20, 50, 100 and 200. > how many different ways you can combine these values to create 200 pence. diff --git a/project_euler/problem_032/sol32.py b/project_euler/problem_032/sol32.py index a402b5584061..c0ca2ce10791 100644 --- a/project_euler/problem_032/sol32.py +++ b/project_euler/problem_032/sol32.py @@ -3,7 +3,7 @@ digits 1 to n exactly once; for example, the 5-digit number, 15234, is 1 through 5 pandigital. -The product 7254 is unusual, as the identity, 39 × 186 = 7254, containing +The product 7254 is unusual, as the identity, 39 x 186 = 7254, containing multiplicand, multiplier, and product is 1 through 9 pandigital. Find the sum of all products whose multiplicand/multiplier/product identity can diff --git a/project_euler/problem_038/sol1.py b/project_euler/problem_038/sol1.py index 5bef273ea2a9..382892723b7d 100644 --- a/project_euler/problem_038/sol1.py +++ b/project_euler/problem_038/sol1.py @@ -3,9 +3,9 @@ Take the number 192 and multiply it by each of 1, 2, and 3: -192 × 1 = 192 -192 × 2 = 384 -192 × 3 = 576 +192 x 1 = 192 +192 x 2 = 384 +192 x 3 = 576 By concatenating each product we get the 1 to 9 pandigital, 192384576. We will call 192384576 the concatenated product of 192 and (1,2,3) diff --git a/project_euler/problem_040/sol1.py b/project_euler/problem_040/sol1.py index 69be377723a5..721bd063c28a 100644 --- a/project_euler/problem_040/sol1.py +++ b/project_euler/problem_040/sol1.py @@ -11,7 +11,7 @@ If dn represents the nth digit of the fractional part, find the value of the following expression. -d1 × d10 × d100 × d1000 × d10000 × d100000 × d1000000 +d1 x d10 x d100 x d1000 x d10000 x d100000 x d1000000 """ diff --git a/project_euler/problem_044/sol1.py b/project_euler/problem_044/sol1.py index 3b75b6a56a8e..2613563a4bf1 100644 --- a/project_euler/problem_044/sol1.py +++ b/project_euler/problem_044/sol1.py @@ -1,14 +1,14 @@ """ Problem 44: https://projecteuler.net/problem=44 -Pentagonal numbers are generated by the formula, Pn=n(3n−1)/2. The first ten +Pentagonal numbers are generated by the formula, Pn=n(3n-1)/2. The first ten pentagonal numbers are: 1, 5, 12, 22, 35, 51, 70, 92, 117, 145, ... It can be seen that P4 + P7 = 22 + 70 = 92 = P8. However, their difference, -70 − 22 = 48, is not pentagonal. +70 - 22 = 48, is not pentagonal. Find the pair of pentagonal numbers, Pj and Pk, for which their sum and difference -are pentagonal and D = |Pk − Pj| is minimised; what is the value of D? +are pentagonal and D = |Pk - Pj| is minimised; what is the value of D? """ diff --git a/project_euler/problem_045/sol1.py b/project_euler/problem_045/sol1.py index d921b2802c2d..8d016de6e542 100644 --- a/project_euler/problem_045/sol1.py +++ b/project_euler/problem_045/sol1.py @@ -3,8 +3,8 @@ Triangle, pentagonal, and hexagonal numbers are generated by the following formulae: Triangle T(n) = (n * (n + 1)) / 2 1, 3, 6, 10, 15, ... -Pentagonal P(n) = (n * (3 * n − 1)) / 2 1, 5, 12, 22, 35, ... -Hexagonal H(n) = n * (2 * n − 1) 1, 6, 15, 28, 45, ... +Pentagonal P(n) = (n * (3 * n - 1)) / 2 1, 5, 12, 22, 35, ... +Hexagonal H(n) = n * (2 * n - 1) 1, 6, 15, 28, 45, ... It can be verified that T(285) = P(165) = H(143) = 40755. Find the next triangle number that is also pentagonal and hexagonal. diff --git a/project_euler/problem_046/sol1.py b/project_euler/problem_046/sol1.py index 07dd9bbf84c8..f27f658e63e5 100644 --- a/project_euler/problem_046/sol1.py +++ b/project_euler/problem_046/sol1.py @@ -4,12 +4,12 @@ It was proposed by Christian Goldbach that every odd composite number can be written as the sum of a prime and twice a square. -9 = 7 + 2 × 12 -15 = 7 + 2 × 22 -21 = 3 + 2 × 32 -25 = 7 + 2 × 32 -27 = 19 + 2 × 22 -33 = 31 + 2 × 12 +9 = 7 + 2 x 12 +15 = 7 + 2 x 22 +21 = 3 + 2 x 32 +25 = 7 + 2 x 32 +27 = 19 + 2 x 22 +33 = 31 + 2 x 12 It turns out that the conjecture was false. diff --git a/project_euler/problem_047/sol1.py b/project_euler/problem_047/sol1.py index 1287e0d9e107..c9c44a9832dd 100644 --- a/project_euler/problem_047/sol1.py +++ b/project_euler/problem_047/sol1.py @@ -5,14 +5,14 @@ The first two consecutive numbers to have two distinct prime factors are: -14 = 2 × 7 -15 = 3 × 5 +14 = 2 x 7 +15 = 3 x 5 The first three consecutive numbers to have three distinct prime factors are: -644 = 2² × 7 × 23 -645 = 3 × 5 × 43 -646 = 2 × 17 × 19. +644 = 2² x 7 x 23 +645 = 3 x 5 x 43 +646 = 2 x 17 x 19. Find the first four consecutive integers to have four distinct prime factors each. What is the first of these numbers? diff --git a/project_euler/problem_053/sol1.py b/project_euler/problem_053/sol1.py index a32b73c545d6..192cbf25e50c 100644 --- a/project_euler/problem_053/sol1.py +++ b/project_euler/problem_053/sol1.py @@ -10,7 +10,7 @@ In general, -nCr = n!/(r!(n−r)!),where r ≤ n, n! = n×(n−1)×...×3×2×1, and 0! = 1. +nCr = n!/(r!(n-r)!),where r ≤ n, n! = nx(n-1)x...x3x2x1, and 0! = 1. It is not until n = 23, that a value exceeds one-million: 23C10 = 1144066. How many, not necessarily distinct, values of nCr, for 1 ≤ n ≤ 100, are greater diff --git a/project_euler/problem_097/sol1.py b/project_euler/problem_097/sol1.py index 2807e893ded0..a349f3a1dbc9 100644 --- a/project_euler/problem_097/sol1.py +++ b/project_euler/problem_097/sol1.py @@ -1,7 +1,7 @@ """ The first known prime found to exceed one million digits was discovered in 1999, -and is a Mersenne prime of the form 2**6972593 − 1; it contains exactly 2,098,960 -digits. Subsequently other Mersenne primes, of the form 2**p − 1, have been found +and is a Mersenne prime of the form 2**6972593 - 1; it contains exactly 2,098,960 +digits. Subsequently other Mersenne primes, of the form 2**p - 1, have been found which contain more digits. However, in 2004 there was found a massive non-Mersenne prime which contains 2,357,207 digits: (28433 * (2 ** 7830457 + 1)). diff --git a/project_euler/problem_104/sol1.py b/project_euler/problem_104/sol1.py index d84dbcfc9c65..a0267faa6a38 100644 --- a/project_euler/problem_104/sol1.py +++ b/project_euler/problem_104/sol1.py @@ -3,7 +3,7 @@ The Fibonacci sequence is defined by the recurrence relation: -Fn = Fn−1 + Fn−2, where F1 = 1 and F2 = 1. +Fn = Fn-1 + Fn-2, where F1 = 1 and F2 = 1. It turns out that F541, which contains 113 digits, is the first Fibonacci number for which the last nine digits are 1-9 pandigital (contain all the digits 1 to 9, but not necessarily in order). And F2749, which contains 575 digits, is the first diff --git a/project_euler/problem_120/sol1.py b/project_euler/problem_120/sol1.py index 0e6821214560..2f403972502f 100644 --- a/project_euler/problem_120/sol1.py +++ b/project_euler/problem_120/sol1.py @@ -3,7 +3,7 @@ Description: -Let r be the remainder when (a−1)^n + (a+1)^n is divided by a^2. +Let r be the remainder when (a-1)^n + (a+1)^n is divided by a^2. For example, if a = 7 and n = 3, then r = 42: 6^3 + 8^3 = 728 ≡ 42 mod 49. And as n varies, so too will r, but for a = 7 it turns out that r_max = 42. For 3 ≤ a ≤ 1000, find ∑ r_max. diff --git a/project_euler/problem_123/sol1.py b/project_euler/problem_123/sol1.py index 7239e13a51e9..3dd31a2e8505 100644 --- a/project_euler/problem_123/sol1.py +++ b/project_euler/problem_123/sol1.py @@ -4,7 +4,7 @@ Name: Prime square remainders Let pn be the nth prime: 2, 3, 5, 7, 11, ..., and -let r be the remainder when (pn−1)^n + (pn+1)^n is divided by pn^2. +let r be the remainder when (pn-1)^n + (pn+1)^n is divided by pn^2. For example, when n = 3, p3 = 5, and 43 + 63 = 280 ≡ 5 mod 25. The least value of n for which the remainder first exceeds 10^9 is 7037. diff --git a/project_euler/problem_135/sol1.py b/project_euler/problem_135/sol1.py index ac91fa4e2b9d..d57ace489191 100644 --- a/project_euler/problem_135/sol1.py +++ b/project_euler/problem_135/sol1.py @@ -3,9 +3,9 @@ Given the positive integers, x, y, and z, are consecutive terms of an arithmetic progression, the least value of the positive integer, n, for which the equation, -x2 − y2 − z2 = n, has exactly two solutions is n = 27: +x2 - y2 - z2 = n, has exactly two solutions is n = 27: -342 − 272 − 202 = 122 − 92 − 62 = 27 +342 - 272 - 202 = 122 - 92 - 62 = 27 It turns out that n = 1155 is the least value which has exactly ten solutions. diff --git a/project_euler/problem_144/sol1.py b/project_euler/problem_144/sol1.py index bc16bf985f41..9070455de79f 100644 --- a/project_euler/problem_144/sol1.py +++ b/project_euler/problem_144/sol1.py @@ -6,7 +6,7 @@ The specific white cell we will be considering is an ellipse with the equation 4x^2 + y^2 = 100 -The section corresponding to −0.01 ≤ x ≤ +0.01 at the top is missing, allowing the +The section corresponding to -0.01 ≤ x ≤ +0.01 at the top is missing, allowing the light to enter and exit through the hole.  The light beam in this problem starts at the point (0.0,10.1) just outside the white @@ -20,7 +20,7 @@ the laser beam and the wall of the white cell; the blue line shows the line tangent to the ellipse at the point of incidence of the first bounce. -The slope m of the tangent line at any point (x,y) of the given ellipse is: m = −4x/y +The slope m of the tangent line at any point (x,y) of the given ellipse is: m = -4x/y The normal line is perpendicular to this tangent line at the point of incidence. diff --git a/project_euler/problem_174/sol1.py b/project_euler/problem_174/sol1.py index 33c1b158adbb..9a75e8638880 100644 --- a/project_euler/problem_174/sol1.py +++ b/project_euler/problem_174/sol1.py @@ -14,7 +14,7 @@ Let N(n) be the number of t ≤ 1000000 such that t is type L(n); for example, N(15) = 832. -What is ∑ N(n) for 1 ≤ n ≤ 10? +What is sum N(n) for 1 ≤ n ≤ 10? """ from collections import defaultdict diff --git a/pyproject.toml b/pyproject.toml index 0185f4d7b987..ff22fba81c8a 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -10,6 +10,7 @@ lint.ignore = [ # `ruff rule S101` for a description of that rule "PLW2901", # PLW2901: Redefined loop variable -- FIX ME "PT011", # `pytest.raises(Exception)` is too broad, set the `match` parameter or use a more specific exception "PT018", # Assertion should be broken down into multiple parts + "RUF001", # String contains ambiguous {}. Did you mean {}? "RUF002", # Docstring contains ambiguous {}. Did you mean {}? "RUF003", # Comment contains ambiguous {}. Did you mean {}? "S101", # Use of `assert` detected -- DO NOT FIX diff --git a/strings/jaro_winkler.py b/strings/jaro_winkler.py index c18f0d85d9f4..cae2068fabc1 100644 --- a/strings/jaro_winkler.py +++ b/strings/jaro_winkler.py @@ -3,7 +3,7 @@ def jaro_winkler(str1: str, str2: str) -> float: """ - Jaro–Winkler distance is a string metric measuring an edit distance between two + Jaro-Winkler distance is a string metric measuring an edit distance between two sequences. Output value is between 0.0 and 1.0. diff --git a/strings/manacher.py b/strings/manacher.py index fc8b01cd9c1c..af1b10cf81fb 100644 --- a/strings/manacher.py +++ b/strings/manacher.py @@ -5,7 +5,7 @@ def palindromic_string(input_string: str) -> str: >>> palindromic_string('ababa') 'ababa' - Manacher’s algorithm which finds Longest palindromic Substring in linear time. + Manacher's algorithm which finds Longest palindromic Substring in linear time. 1. first this convert input_string("xyx") into new_string("x|y|x") where odd positions are actual input characters. diff --git a/strings/prefix_function.py b/strings/prefix_function.py index 65bbe9100735..04987deef469 100644 --- a/strings/prefix_function.py +++ b/strings/prefix_function.py @@ -1,7 +1,7 @@ """ https://cp-algorithms.com/string/prefix-function.html -Prefix function Knuth–Morris–Pratt algorithm +Prefix function Knuth-Morris-Pratt algorithm Different algorithm than Knuth-Morris-Pratt pattern finding From d016fda51c08a604738e556a7ccb19e0f9c81dcb Mon Sep 17 00:00:00 2001 From: Maxim Smolskiy Date: Mon, 22 Apr 2024 22:56:14 +0300 Subject: [PATCH 1375/1543] Enable ruff RUF003 rule (#11376) * Enable ruff RUF003 rule * Update pyproject.toml --------- Co-authored-by: Christian Clauss --- dynamic_programming/fast_fibonacci.py | 2 +- graphs/ant_colony_optimization_algorithms.py | 4 ++-- machine_learning/polynomial_regression.py | 2 +- pyproject.toml | 3 --- strings/credit_card_validator.py | 2 +- 5 files changed, 5 insertions(+), 8 deletions(-) diff --git a/dynamic_programming/fast_fibonacci.py b/dynamic_programming/fast_fibonacci.py index 9f956ca2f979..d04a5ac8249b 100644 --- a/dynamic_programming/fast_fibonacci.py +++ b/dynamic_programming/fast_fibonacci.py @@ -26,7 +26,7 @@ def _fib(n: int) -> tuple[int, int]: if n == 0: # (F(0), F(1)) return (0, 1) - # F(2n) = F(n)[2F(n+1) − F(n)] + # F(2n) = F(n)[2F(n+1) - F(n)] # F(2n+1) = F(n+1)^2+F(n)^2 a, b = _fib(n // 2) c = a * (b * 2 - a) diff --git a/graphs/ant_colony_optimization_algorithms.py b/graphs/ant_colony_optimization_algorithms.py index 652ad6144297..13637da44874 100644 --- a/graphs/ant_colony_optimization_algorithms.py +++ b/graphs/ant_colony_optimization_algorithms.py @@ -33,7 +33,7 @@ def main( pheromone_evaporation: float, alpha: float, beta: float, - q: float, # Pheromone system parameters Q,which is a constant + q: float, # Pheromone system parameters Q, which is a constant ) -> tuple[list[int], float]: """ Ant colony algorithm main function @@ -117,7 +117,7 @@ def pheromone_update( cities: dict[int, list[int]], pheromone_evaporation: float, ants_route: list[list[int]], - q: float, # Pheromone system parameters Q,which is a constant + q: float, # Pheromone system parameters Q, which is a constant best_path: list[int], best_distance: float, ) -> tuple[list[list[float]], list[int], float]: diff --git a/machine_learning/polynomial_regression.py b/machine_learning/polynomial_regression.py index 19f7dc994017..212f40bea197 100644 --- a/machine_learning/polynomial_regression.py +++ b/machine_learning/polynomial_regression.py @@ -146,7 +146,7 @@ def fit(self, x_train: np.ndarray, y_train: np.ndarray) -> None: "Design matrix is not full rank, can't compute coefficients" ) - # np.linalg.pinv() computes the Moore–Penrose pseudoinverse using SVD + # np.linalg.pinv() computes the Moore-Penrose pseudoinverse using SVD self.params = np.linalg.pinv(X) @ y_train def predict(self, data: np.ndarray) -> np.ndarray: diff --git a/pyproject.toml b/pyproject.toml index ff22fba81c8a..1134b773308e 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -10,9 +10,6 @@ lint.ignore = [ # `ruff rule S101` for a description of that rule "PLW2901", # PLW2901: Redefined loop variable -- FIX ME "PT011", # `pytest.raises(Exception)` is too broad, set the `match` parameter or use a more specific exception "PT018", # Assertion should be broken down into multiple parts - "RUF001", # String contains ambiguous {}. Did you mean {}? - "RUF002", # Docstring contains ambiguous {}. Did you mean {}? - "RUF003", # Comment contains ambiguous {}. Did you mean {}? "S101", # Use of `assert` detected -- DO NOT FIX "S311", # Standard pseudo-random generators are not suitable for cryptographic purposes -- FIX ME "SLF001", # Private member accessed: `_Iterator` -- FIX ME diff --git a/strings/credit_card_validator.py b/strings/credit_card_validator.py index 78bf45740a63..b8da1c745124 100644 --- a/strings/credit_card_validator.py +++ b/strings/credit_card_validator.py @@ -36,7 +36,7 @@ def luhn_validation(credit_card_number: str) -> bool: digit = int(cc_number[i]) digit *= 2 # If doubling of a number results in a two digit number - # i.e greater than 9(e.g., 6 × 2 = 12), + # i.e greater than 9(e.g., 6 x 2 = 12), # then add the digits of the product (e.g., 12: 1 + 2 = 3, 15: 1 + 5 = 6), # to get a single digit number. if digit > 9: From 3925b8155bebd84eababfba0f5a12e5129cfaa44 Mon Sep 17 00:00:00 2001 From: Maxim Smolskiy Date: Wed, 24 Apr 2024 07:32:25 +0300 Subject: [PATCH 1376/1543] Fix ARG005 per file ignore (#11383) --- machine_learning/linear_discriminant_analysis.py | 2 +- pyproject.toml | 1 - 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/machine_learning/linear_discriminant_analysis.py b/machine_learning/linear_discriminant_analysis.py index 606e11f3698e..86f28aef671a 100644 --- a/machine_learning/linear_discriminant_analysis.py +++ b/machine_learning/linear_discriminant_analysis.py @@ -256,7 +256,7 @@ def valid_input( input_type: Callable[[object], num], # Usually float or int input_msg: str, err_msg: str, - condition: Callable[[num], bool] = lambda x: True, + condition: Callable[[num], bool] = lambda _: True, default: str | None = None, ) -> num: """ diff --git a/pyproject.toml b/pyproject.toml index 1134b773308e..37ebeeb9ce37 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -86,7 +86,6 @@ max-complexity = 17 # default: 10 "graphs/minimum_spanning_tree_prims.py" = ["SIM114"] "hashes/enigma_machine.py" = ["BLE001"] "machine_learning/decision_tree.py" = ["SIM114"] -"machine_learning/linear_discriminant_analysis.py" = ["ARG005"] "machine_learning/sequential_minimum_optimization.py" = ["SIM115"] "matrix/sherman_morrison.py" = ["SIM103", "SIM114"] "other/l*u_cache.py" = ["RUF012"] From 2d6be5fbb0be2b738d2c246138db9ccda9b6a853 Mon Sep 17 00:00:00 2001 From: Maxim Smolskiy Date: Tue, 30 Apr 2024 07:40:26 +0300 Subject: [PATCH 1377/1543] Enable ruff UP031 rule (#11388) --- data_structures/arrays/sudoku_solver.py | 4 ++-- neural_network/input_data.py | 3 ++- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/data_structures/arrays/sudoku_solver.py b/data_structures/arrays/sudoku_solver.py index 5c1cff06f9d4..a8157a520c97 100644 --- a/data_structures/arrays/sudoku_solver.py +++ b/data_structures/arrays/sudoku_solver.py @@ -150,7 +150,7 @@ def time_solve(grid): display(grid_values(grid)) if values: display(values) - print("(%.5f seconds)\n" % t) + print(f"({t:.5f} seconds)\n") return (t, solved(values)) times, results = zip(*[time_solve(grid) for grid in grids]) @@ -217,4 +217,4 @@ def shuffled(seq): start = time.monotonic() solve(puzzle) t = time.monotonic() - start - print("Solved: %.5f sec" % t) + print(f"Solved: {t:.5f} sec") diff --git a/neural_network/input_data.py b/neural_network/input_data.py index d189e3f9e0d9..f90287fe3f5b 100644 --- a/neural_network/input_data.py +++ b/neural_network/input_data.py @@ -156,7 +156,8 @@ def __init__( self._rng = np.random.default_rng(seed1 if seed is None else seed2) dtype = dtypes.as_dtype(dtype).base_dtype if dtype not in (dtypes.uint8, dtypes.float32): - raise TypeError("Invalid image dtype %r, expected uint8 or float32" % dtype) + msg = f"Invalid image dtype {dtype!r}, expected uint8 or float32" + raise TypeError(msg) if fake_data: self._num_examples = 10000 self.one_hot = one_hot From a7e0b141d8eac30e8f9c4f01c3050e6cdb90f7d4 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Tue, 30 Apr 2024 06:58:03 +0200 Subject: [PATCH 1378/1543] [pre-commit.ci] pre-commit autoupdate (#11387) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - [github.com/MarcoGorelli/auto-walrus: 0.3.3 → 0.3.4](https://github.com/MarcoGorelli/auto-walrus/compare/0.3.3...0.3.4) - [github.com/astral-sh/ruff-pre-commit: v0.4.1 → v0.4.2](https://github.com/astral-sh/ruff-pre-commit/compare/v0.4.1...v0.4.2) - [github.com/pre-commit/mirrors-mypy: v1.9.0 → v1.10.0](https://github.com/pre-commit/mirrors-mypy/compare/v1.9.0...v1.10.0) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index eedf6d939748..744efc55f41b 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -11,12 +11,12 @@ repos: - id: requirements-txt-fixer - repo: https://github.com/MarcoGorelli/auto-walrus - rev: 0.3.3 + rev: 0.3.4 hooks: - id: auto-walrus - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.4.1 + rev: v0.4.2 hooks: - id: ruff - id: ruff-format @@ -47,7 +47,7 @@ repos: - id: validate-pyproject - repo: https://github.com/pre-commit/mirrors-mypy - rev: v1.9.0 + rev: v1.10.0 hooks: - id: mypy args: From c026b1952f92836c58e63017f4c75e76c43448a1 Mon Sep 17 00:00:00 2001 From: Margaret <62753112+meg-1@users.noreply.github.com> Date: Wed, 1 May 2024 13:42:54 +0300 Subject: [PATCH 1379/1543] adding a matrix equalization algorithm (#11360) * adding a matrix equalization algorithm * Adding url for more details * Implementing suggestions --- matrix/matrix_equalization.py | 55 +++++++++++++++++++++++++++++++++++ 1 file changed, 55 insertions(+) create mode 100644 matrix/matrix_equalization.py diff --git a/matrix/matrix_equalization.py b/matrix/matrix_equalization.py new file mode 100644 index 000000000000..e7e76505cf63 --- /dev/null +++ b/matrix/matrix_equalization.py @@ -0,0 +1,55 @@ +from sys import maxsize + + +def array_equalization(vector: list[int], step_size: int) -> int: + """ + This algorithm equalizes all elements of the input vector + to a common value, by making the minimal number of + "updates" under the constraint of a step size (step_size). + + >>> array_equalization([1, 1, 6, 2, 4, 6, 5, 1, 7, 2, 2, 1, 7, 2, 2], 4) + 4 + >>> array_equalization([22, 81, 88, 71, 22, 81, 632, 81, 81, 22, 92], 2) + 5 + >>> array_equalization([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 5) + 0 + >>> array_equalization([22, 22, 22, 33, 33, 33], 2) + 2 + >>> array_equalization([1, 2, 3], 0) + Traceback (most recent call last): + ValueError: Step size must be positive and non-zero. + >>> array_equalization([1, 2, 3], -1) + Traceback (most recent call last): + ValueError: Step size must be positive and non-zero. + >>> array_equalization([1, 2, 3], 0.5) + Traceback (most recent call last): + ValueError: Step size must be an integer. + >>> array_equalization([1, 2, 3], maxsize) + 1 + """ + if step_size <= 0: + raise ValueError("Step size must be positive and non-zero.") + if not isinstance(step_size, int): + raise ValueError("Step size must be an integer.") + + unique_elements = set(vector) + min_updates = maxsize + + for element in unique_elements: + elem_index = 0 + updates = 0 + while elem_index < len(vector): + if vector[elem_index] != element: + updates += 1 + elem_index += step_size + else: + elem_index += 1 + min_updates = min(min_updates, updates) + + return min_updates + + +if __name__ == "__main__": + from doctest import testmod + + testmod() From 5131e3145dcec9e232c8e8a807ad387f4f9a3d38 Mon Sep 17 00:00:00 2001 From: Maxim Smolskiy Date: Wed, 1 May 2024 22:27:59 +0300 Subject: [PATCH 1380/1543] Fix some ARG002 per file ignores (#11382) * Fix some ARG002 per file ignores * Fix * updating DIRECTORY.md * Fix review issue * Fix review issue --------- Co-authored-by: MaximSmolskiy --- DIRECTORY.md | 1 + audio_filters/show_response.py | 3 ++- data_structures/hashing/hash_table.py | 3 +++ data_structures/hashing/quadratic_probing.py | 2 +- pyproject.toml | 3 --- 5 files changed, 7 insertions(+), 5 deletions(-) diff --git a/DIRECTORY.md b/DIRECTORY.md index f6d6cb463faa..4a053a3f1b7f 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -773,6 +773,7 @@ * [Inverse Of Matrix](matrix/inverse_of_matrix.py) * [Largest Square Area In Matrix](matrix/largest_square_area_in_matrix.py) * [Matrix Class](matrix/matrix_class.py) + * [Matrix Equalization](matrix/matrix_equalization.py) * [Matrix Multiplication Recursion](matrix/matrix_multiplication_recursion.py) * [Matrix Operation](matrix/matrix_operation.py) * [Max Area Of Island](matrix/max_area_of_island.py) diff --git a/audio_filters/show_response.py b/audio_filters/show_response.py index 097b8152b4e6..f9c9537c047c 100644 --- a/audio_filters/show_response.py +++ b/audio_filters/show_response.py @@ -1,5 +1,6 @@ from __future__ import annotations +from abc import abstractmethod from math import pi from typing import Protocol @@ -8,6 +9,7 @@ class FilterType(Protocol): + @abstractmethod def process(self, sample: float) -> float: """ Calculate y[n] @@ -15,7 +17,6 @@ def process(self, sample: float) -> float: >>> issubclass(FilterType, Protocol) True """ - return 0.0 def get_bounds( diff --git a/data_structures/hashing/hash_table.py b/data_structures/hashing/hash_table.py index 7fe57068f6a3..40fcad9a3dab 100644 --- a/data_structures/hashing/hash_table.py +++ b/data_structures/hashing/hash_table.py @@ -1,4 +1,6 @@ #!/usr/bin/env python3 +from abc import abstractmethod + from .number_theory.prime_numbers import next_prime @@ -173,6 +175,7 @@ def _set_value(self, key, data): self.values[key] = data self._keys[key] = data + @abstractmethod def _collision_resolution(self, key, data=None): """ This method is a type of open addressing which is used for handling collision. diff --git a/data_structures/hashing/quadratic_probing.py b/data_structures/hashing/quadratic_probing.py index 2f3401ec8918..56d4926eee9b 100644 --- a/data_structures/hashing/quadratic_probing.py +++ b/data_structures/hashing/quadratic_probing.py @@ -11,7 +11,7 @@ class QuadraticProbing(HashTable): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) - def _collision_resolution(self, key, data=None): + def _collision_resolution(self, key, data=None): # noqa: ARG002 """ Quadratic probing is an open addressing scheme used for resolving collisions in hash table. diff --git a/pyproject.toml b/pyproject.toml index 37ebeeb9ce37..4c512ca896b4 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -76,11 +76,8 @@ max-complexity = 17 # default: 10 [tool.ruff.lint.per-file-ignores] "arithmetic_analysis/newton_raphson.py" = ["PGH001"] -"audio_filters/show_response.py" = ["ARG002"] "data_structures/binary_tree/binary_search_tree_recursive.py" = ["BLE001"] "data_structures/binary_tree/treap.py" = ["SIM114"] -"data_structures/hashing/hash_table.py" = ["ARG002"] -"data_structures/hashing/quadratic_probing.py" = ["ARG002"] "data_structures/hashing/tests/test_hash_map.py" = ["BLE001"] "data_structures/heap/max_heap.py" = ["SIM114"] "graphs/minimum_spanning_tree_prims.py" = ["SIM114"] From ea53051576a9c5e7398ca2ae6a0823ca54ac3947 Mon Sep 17 00:00:00 2001 From: Xuehai Pan Date: Fri, 3 May 2024 00:43:59 +0800 Subject: [PATCH 1381/1543] Use `spawn` start method in multiprocessing programs (#11391) * Use `spawn` start method in multiprocessing programs * Set `spawn` start method in doctest * Use `with` statement for locks * Pass multiprocessing context explicitly --- sorts/odd_even_transposition_parallel.py | 79 ++++++++++++++++-------- 1 file changed, 53 insertions(+), 26 deletions(-) diff --git a/sorts/odd_even_transposition_parallel.py b/sorts/odd_even_transposition_parallel.py index 9d2bcdbd7576..5d4e09b211c0 100644 --- a/sorts/odd_even_transposition_parallel.py +++ b/sorts/odd_even_transposition_parallel.py @@ -11,11 +11,11 @@ synchronization could be used. """ -from multiprocessing import Lock, Pipe, Process +import multiprocessing as mp # lock used to ensure that two processes do not access a pipe at the same time # NOTE This breaks testing on build runner. May work better locally -# process_lock = Lock() +# process_lock = mp.Lock() """ The function run by the processes that sorts the list @@ -29,8 +29,17 @@ """ -def oe_process(position, value, l_send, r_send, lr_cv, rr_cv, result_pipe): - process_lock = Lock() +def oe_process( + position, + value, + l_send, + r_send, + lr_cv, + rr_cv, + result_pipe, + multiprocessing_context, +): + process_lock = multiprocessing_context.Lock() # we perform n swaps since after n swaps we know we are sorted # we *could* stop early if we are sorted already, but it takes as long to @@ -38,27 +47,23 @@ def oe_process(position, value, l_send, r_send, lr_cv, rr_cv, result_pipe): for i in range(10): if (i + position) % 2 == 0 and r_send is not None: # send your value to your right neighbor - process_lock.acquire() - r_send[1].send(value) - process_lock.release() + with process_lock: + r_send[1].send(value) # receive your right neighbor's value - process_lock.acquire() - temp = rr_cv[0].recv() - process_lock.release() + with process_lock: + temp = rr_cv[0].recv() # take the lower value since you are on the left value = min(value, temp) elif (i + position) % 2 != 0 and l_send is not None: # send your value to your left neighbor - process_lock.acquire() - l_send[1].send(value) - process_lock.release() + with process_lock: + l_send[1].send(value) # receive your left neighbor's value - process_lock.acquire() - temp = lr_cv[0].recv() - process_lock.release() + with process_lock: + temp = lr_cv[0].recv() # take the higher value since you are on the right value = max(value, temp) @@ -94,39 +99,60 @@ def odd_even_transposition(arr): >>> odd_even_transposition(unsorted_list) == sorted(unsorted_list + [1]) False """ + # spawn method is considered safer than fork + multiprocessing_context = mp.get_context("spawn") + process_array_ = [] result_pipe = [] # initialize the list of pipes where the values will be retrieved for _ in arr: - result_pipe.append(Pipe()) + result_pipe.append(multiprocessing_context.Pipe()) # creates the processes # the first and last process only have one neighbor so they are made outside # of the loop - temp_rs = Pipe() - temp_rr = Pipe() + temp_rs = multiprocessing_context.Pipe() + temp_rr = multiprocessing_context.Pipe() process_array_.append( - Process( + multiprocessing_context.Process( target=oe_process, - args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]), + args=( + 0, + arr[0], + None, + temp_rs, + None, + temp_rr, + result_pipe[0], + multiprocessing_context, + ), ) ) temp_lr = temp_rs temp_ls = temp_rr for i in range(1, len(arr) - 1): - temp_rs = Pipe() - temp_rr = Pipe() + temp_rs = multiprocessing_context.Pipe() + temp_rr = multiprocessing_context.Pipe() process_array_.append( - Process( + multiprocessing_context.Process( target=oe_process, - args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]), + args=( + i, + arr[i], + temp_ls, + temp_rs, + temp_lr, + temp_rr, + result_pipe[i], + multiprocessing_context, + ), ) ) temp_lr = temp_rs temp_ls = temp_rr process_array_.append( - Process( + multiprocessing_context.Process( target=oe_process, args=( len(arr) - 1, @@ -136,6 +162,7 @@ def odd_even_transposition(arr): temp_lr, None, result_pipe[len(arr) - 1], + multiprocessing_context, ), ) ) From 1868c0b6375188a9034478a2711e40c343d00c2e Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 6 May 2024 21:38:58 +0200 Subject: [PATCH 1382/1543] [pre-commit.ci] pre-commit autoupdate (#11394) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - [github.com/astral-sh/ruff-pre-commit: v0.4.2 → v0.4.3](https://github.com/astral-sh/ruff-pre-commit/compare/v0.4.2...v0.4.3) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 744efc55f41b..210b7494036e 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -16,7 +16,7 @@ repos: - id: auto-walrus - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.4.2 + rev: v0.4.3 hooks: - id: ruff - id: ruff-format From c599f6c9107a1b09c08ddce17053d7b5d0895a83 Mon Sep 17 00:00:00 2001 From: Maxim Smolskiy Date: Fri, 10 May 2024 22:59:53 +0300 Subject: [PATCH 1383/1543] Fix some SIM114 per file ignores (#11395) * updating DIRECTORY.md * Fix some SIM114 per file ignores * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fix review issue --------- Co-authored-by: MaximSmolskiy Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- data_structures/binary_tree/treap.py | 4 +--- data_structures/heap/max_heap.py | 2 +- graphs/minimum_spanning_tree_prims.py | 2 +- machine_learning/decision_tree.py | 2 +- matrix/sherman_morrison.py | 2 +- pyproject.toml | 6 +----- 6 files changed, 6 insertions(+), 12 deletions(-) diff --git a/data_structures/binary_tree/treap.py b/data_structures/binary_tree/treap.py index e7ddf931b83a..3114c6fa1c26 100644 --- a/data_structures/binary_tree/treap.py +++ b/data_structures/binary_tree/treap.py @@ -39,9 +39,7 @@ def split(root: Node | None, value: int) -> tuple[Node | None, Node | None]: Left tree contains all values less than split value. Right tree contains all values greater or equal, than split value """ - if root is None: # None tree is split into 2 Nones - return None, None - elif root.value is None: + if root is None or root.value is None: # None tree is split into 2 Nones return None, None elif value < root.value: """ diff --git a/data_structures/heap/max_heap.py b/data_structures/heap/max_heap.py index 5a9f9cf88433..589f2595a8da 100644 --- a/data_structures/heap/max_heap.py +++ b/data_structures/heap/max_heap.py @@ -38,7 +38,7 @@ def insert(self, value: int) -> None: def __swap_down(self, i: int) -> None: """Swap the element down""" while self.__size >= 2 * i: - if 2 * i + 1 > self.__size: + if 2 * i + 1 > self.__size: # noqa: SIM114 bigger_child = 2 * i elif self.__heap[2 * i] > self.__heap[2 * i + 1]: bigger_child = 2 * i diff --git a/graphs/minimum_spanning_tree_prims.py b/graphs/minimum_spanning_tree_prims.py index 90c9f4c91e86..d0b45d7ef139 100644 --- a/graphs/minimum_spanning_tree_prims.py +++ b/graphs/minimum_spanning_tree_prims.py @@ -16,7 +16,7 @@ def top_to_bottom(self, heap, start, size, positions): if start > size // 2 - 1: return else: - if 2 * start + 2 >= size: + if 2 * start + 2 >= size: # noqa: SIM114 smallest_child = 2 * start + 1 elif heap[2 * start + 1] < heap[2 * start + 2]: smallest_child = 2 * start + 1 diff --git a/machine_learning/decision_tree.py b/machine_learning/decision_tree.py index e48905eeac6a..d0bd6ab0b555 100644 --- a/machine_learning/decision_tree.py +++ b/machine_learning/decision_tree.py @@ -105,7 +105,7 @@ def train(self, x, y): the predictor """ for i in range(len(x)): - if len(x[:i]) < self.min_leaf_size: + if len(x[:i]) < self.min_leaf_size: # noqa: SIM114 continue elif len(x[i:]) < self.min_leaf_size: continue diff --git a/matrix/sherman_morrison.py b/matrix/sherman_morrison.py index 7f10ae706e85..e2a09c1d0070 100644 --- a/matrix/sherman_morrison.py +++ b/matrix/sherman_morrison.py @@ -65,7 +65,7 @@ def validate_indices(self, loc: tuple[int, int]) -> bool: >>> a.validate_indices((0, 0)) True """ - if not (isinstance(loc, (list, tuple)) and len(loc) == 2): + if not (isinstance(loc, (list, tuple)) and len(loc) == 2): # noqa: SIM114 return False elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column): return False diff --git a/pyproject.toml b/pyproject.toml index 4c512ca896b4..c07bc9c48e51 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -77,14 +77,10 @@ max-complexity = 17 # default: 10 [tool.ruff.lint.per-file-ignores] "arithmetic_analysis/newton_raphson.py" = ["PGH001"] "data_structures/binary_tree/binary_search_tree_recursive.py" = ["BLE001"] -"data_structures/binary_tree/treap.py" = ["SIM114"] "data_structures/hashing/tests/test_hash_map.py" = ["BLE001"] -"data_structures/heap/max_heap.py" = ["SIM114"] -"graphs/minimum_spanning_tree_prims.py" = ["SIM114"] "hashes/enigma_machine.py" = ["BLE001"] -"machine_learning/decision_tree.py" = ["SIM114"] "machine_learning/sequential_minimum_optimization.py" = ["SIM115"] -"matrix/sherman_morrison.py" = ["SIM103", "SIM114"] +"matrix/sherman_morrison.py" = ["SIM103"] "other/l*u_cache.py" = ["RUF012"] "physics/newtons_second_law_of_motion.py" = ["BLE001"] "project_euler/problem_099/sol1.py" = ["SIM115"] From 1f368da06d361e3d1415a2ec7d8857068b746586 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Tue, 14 May 2024 13:38:55 +0200 Subject: [PATCH 1384/1543] [pre-commit.ci] pre-commit autoupdate (#11402) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * [pre-commit.ci] pre-commit autoupdate updates: - [github.com/astral-sh/ruff-pre-commit: v0.4.3 → v0.4.4](https://github.com/astral-sh/ruff-pre-commit/compare/v0.4.3...v0.4.4) - [github.com/tox-dev/pyproject-fmt: 1.8.0 → 2.0.4](https://github.com/tox-dev/pyproject-fmt/compare/1.8.0...2.0.4) - [github.com/abravalheri/validate-pyproject: v0.16 → v0.17](https://github.com/abravalheri/validate-pyproject/compare/v0.16...v0.17) * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 6 +- pyproject.toml | 184 +++++++++++++++++++++++----------------- 2 files changed, 107 insertions(+), 83 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 210b7494036e..521769096369 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -16,7 +16,7 @@ repos: - id: auto-walrus - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.4.3 + rev: v0.4.4 hooks: - id: ruff - id: ruff-format @@ -29,7 +29,7 @@ repos: - tomli - repo: https://github.com/tox-dev/pyproject-fmt - rev: "1.8.0" + rev: "2.0.4" hooks: - id: pyproject-fmt @@ -42,7 +42,7 @@ repos: pass_filenames: false - repo: https://github.com/abravalheri/validate-pyproject - rev: v0.16 + rev: v0.17 hooks: - id: validate-pyproject diff --git a/pyproject.toml b/pyproject.toml index c07bc9c48e51..89ed22bc6ab1 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,61 +1,61 @@ [tool.ruff] -lint.ignore = [ # `ruff rule S101` for a description of that rule - "B904", # Within an `except` clause, raise exceptions with `raise ... from err` -- FIX ME - "B905", # `zip()` without an explicit `strict=` parameter -- FIX ME - "EM101", # Exception must not use a string literal, assign to variable first - "EXE001", # Shebang is present but file is not executable -- DO NOT FIX - "G004", # Logging statement uses f-string - "PLC1901", # `{}` can be simplified to `{}` as an empty string is falsey - "PLW060", # Using global for `{name}` but no assignment is done -- DO NOT FIX - "PLW2901", # PLW2901: Redefined loop variable -- FIX ME - "PT011", # `pytest.raises(Exception)` is too broad, set the `match` parameter or use a more specific exception - "PT018", # Assertion should be broken down into multiple parts - "S101", # Use of `assert` detected -- DO NOT FIX - "S311", # Standard pseudo-random generators are not suitable for cryptographic purposes -- FIX ME - "SLF001", # Private member accessed: `_Iterator` -- FIX ME - "UP038", # Use `X | Y` in `{}` call instead of `(X, Y)` -- DO NOT FIX -] -lint.select = [ # https://beta.ruff.rs/docs/rules - "A", # flake8-builtins - "ARG", # flake8-unused-arguments - "ASYNC", # flake8-async - "B", # flake8-bugbear - "BLE", # flake8-blind-except - "C4", # flake8-comprehensions - "C90", # McCabe cyclomatic complexity - "DJ", # flake8-django - "DTZ", # flake8-datetimez - "E", # pycodestyle - "EM", # flake8-errmsg - "EXE", # flake8-executable - "F", # Pyflakes - "FA", # flake8-future-annotations - "FLY", # flynt - "G", # flake8-logging-format - "I", # isort - "ICN", # flake8-import-conventions - "INP", # flake8-no-pep420 - "INT", # flake8-gettext - "ISC", # flake8-implicit-str-concat - "N", # pep8-naming - "NPY", # NumPy-specific rules - "PD", # pandas-vet - "PGH", # pygrep-hooks - "PIE", # flake8-pie - "PL", # Pylint - "PT", # flake8-pytest-style - "PYI", # flake8-pyi - "RSE", # flake8-raise - "RUF", # Ruff-specific rules - "S", # flake8-bandit - "SIM", # flake8-simplify - "SLF", # flake8-self - "T10", # flake8-debugger - "TD", # flake8-todos - "TID", # flake8-tidy-imports - "UP", # pyupgrade - "W", # pycodestyle - "YTT", # flake8-2020 +lint.ignore = [ # `ruff rule S101` for a description of that rule + "B904", # Within an `except` clause, raise exceptions with `raise ... from err` -- FIX ME + "B905", # `zip()` without an explicit `strict=` parameter -- FIX ME + "EM101", # Exception must not use a string literal, assign to variable first + "EXE001", # Shebang is present but file is not executable -- DO NOT FIX + "G004", # Logging statement uses f-string + "PLC1901", # `{}` can be simplified to `{}` as an empty string is falsey + "PLW060", # Using global for `{name}` but no assignment is done -- DO NOT FIX + "PLW2901", # PLW2901: Redefined loop variable -- FIX ME + "PT011", # `pytest.raises(Exception)` is too broad, set the `match` parameter or use a more specific exception + "PT018", # Assertion should be broken down into multiple parts + "S101", # Use of `assert` detected -- DO NOT FIX + "S311", # Standard pseudo-random generators are not suitable for cryptographic purposes -- FIX ME + "SLF001", # Private member accessed: `_Iterator` -- FIX ME + "UP038", # Use `X | Y` in `{}` call instead of `(X, Y)` -- DO NOT FIX +] +lint.select = [ # https://beta.ruff.rs/docs/rules + "A", # flake8-builtins + "ARG", # flake8-unused-arguments + "ASYNC", # flake8-async + "B", # flake8-bugbear + "BLE", # flake8-blind-except + "C4", # flake8-comprehensions + "C90", # McCabe cyclomatic complexity + "DJ", # flake8-django + "DTZ", # flake8-datetimez + "E", # pycodestyle + "EM", # flake8-errmsg + "EXE", # flake8-executable + "F", # Pyflakes + "FA", # flake8-future-annotations + "FLY", # flynt + "G", # flake8-logging-format + "I", # isort + "ICN", # flake8-import-conventions + "INP", # flake8-no-pep420 + "INT", # flake8-gettext + "ISC", # flake8-implicit-str-concat + "N", # pep8-naming + "NPY", # NumPy-specific rules + "PD", # pandas-vet + "PGH", # pygrep-hooks + "PIE", # flake8-pie + "PL", # Pylint + "PT", # flake8-pytest-style + "PYI", # flake8-pyi + "RSE", # flake8-raise + "RUF", # Ruff-specific rules + "S", # flake8-bandit + "SIM", # flake8-simplify + "SLF", # flake8-self + "T10", # flake8-debugger + "TD", # flake8-todos + "TID", # flake8-tidy-imports + "UP", # pyupgrade + "W", # pycodestyle + "YTT", # flake8-2020 # "ANN", # flake8-annotations # FIX ME? # "COM", # flake8-commas # "D", # pydocstyle -- FIX ME? @@ -71,27 +71,51 @@ lint.select = [ # https://beta.ruff.rs/docs/rules output-format = "full" target-version = "py312" -[tool.ruff.lint.mccabe] # DO NOT INCREASE THIS VALUE -max-complexity = 17 # default: 10 +[tool.ruff.lint.mccabe] # DO NOT INCREASE THIS VALUE +max-complexity = 17 # default: 10 [tool.ruff.lint.per-file-ignores] -"arithmetic_analysis/newton_raphson.py" = ["PGH001"] -"data_structures/binary_tree/binary_search_tree_recursive.py" = ["BLE001"] -"data_structures/hashing/tests/test_hash_map.py" = ["BLE001"] -"hashes/enigma_machine.py" = ["BLE001"] -"machine_learning/sequential_minimum_optimization.py" = ["SIM115"] -"matrix/sherman_morrison.py" = ["SIM103"] -"other/l*u_cache.py" = ["RUF012"] -"physics/newtons_second_law_of_motion.py" = ["BLE001"] -"project_euler/problem_099/sol1.py" = ["SIM115"] -"sorts/external_sort.py" = ["SIM115"] +"arithmetic_analysis/newton_raphson.py" = [ + "PGH001", +] +"data_structures/binary_tree/binary_search_tree_recursive.py" = [ + "BLE001", +] +"data_structures/hashing/tests/test_hash_map.py" = [ + "BLE001", +] +"hashes/enigma_machine.py" = [ + "BLE001", +] +"machine_learning/sequential_minimum_optimization.py" = [ + "SIM115", +] +"matrix/sherman_morrison.py" = [ + "SIM103", +] +"other/l*u_cache.py" = [ + "RUF012", +] +"physics/newtons_second_law_of_motion.py" = [ + "BLE001", +] +"project_euler/problem_099/sol1.py" = [ + "SIM115", +] +"sorts/external_sort.py" = [ + "SIM115", +] -[tool.ruff.lint.pylint] # DO NOT INCREASE THESE VALUES -allow-magic-value-types = ["float", "int", "str"] -max-args = 10 # default: 5 -max-branches = 20 # default: 12 -max-returns = 8 # default: 6 -max-statements = 88 # default: 50 +[tool.ruff.lint.pylint] # DO NOT INCREASE THESE VALUES +allow-magic-value-types = [ + "float", + "int", + "str", +] +max-args = 10 # default: 5 +max-branches = 20 # default: 12 +max-returns = 8 # default: 6 +max-statements = 88 # default: 50 [tool.codespell] ignore-words-list = "3rt,ans,bitap,crate,damon,fo,followings,hist,iff,kwanza,manuel,mater,secant,som,sur,tim,toi,zar" @@ -99,17 +123,17 @@ skip = "./.*,*.json,ciphers/prehistoric_men.txt,project_euler/problem_022/p022_n [tool.pytest.ini_options] markers = [ - "mat_ops: mark a test as utilizing matrix operations.", + "mat_ops: mark a test as utilizing matrix operations.", ] addopts = [ - "--durations=10", - "--doctest-modules", - "--showlocals", + "--durations=10", + "--doctest-modules", + "--showlocals", ] [tool.coverage.report] omit = [ ".env/*", - "project_euler/*" + "project_euler/*", ] sort = "Cover" From 0139143abb286027bd3954f3862aab4558642019 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 20 May 2024 22:44:57 +0200 Subject: [PATCH 1385/1543] [pre-commit.ci] pre-commit autoupdate (#11408) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * [pre-commit.ci] pre-commit autoupdate updates: - [github.com/tox-dev/pyproject-fmt: 2.0.4 → 2.1.1](https://github.com/tox-dev/pyproject-fmt/compare/2.0.4...2.1.1) - [github.com/abravalheri/validate-pyproject: v0.17 → v0.18](https://github.com/abravalheri/validate-pyproject/compare/v0.17...v0.18) * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 4 +-- pyproject.toml | 79 ++++++++++++++++++++--------------------- 2 files changed, 40 insertions(+), 43 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 521769096369..b63457ca85e3 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -29,7 +29,7 @@ repos: - tomli - repo: https://github.com/tox-dev/pyproject-fmt - rev: "2.0.4" + rev: "2.1.1" hooks: - id: pyproject-fmt @@ -42,7 +42,7 @@ repos: pass_filenames: false - repo: https://github.com/abravalheri/validate-pyproject - rev: v0.17 + rev: v0.18 hooks: - id: validate-pyproject diff --git a/pyproject.toml b/pyproject.toml index 89ed22bc6ab1..5b8ce4e72dfd 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,21 +1,9 @@ [tool.ruff] -lint.ignore = [ # `ruff rule S101` for a description of that rule - "B904", # Within an `except` clause, raise exceptions with `raise ... from err` -- FIX ME - "B905", # `zip()` without an explicit `strict=` parameter -- FIX ME - "EM101", # Exception must not use a string literal, assign to variable first - "EXE001", # Shebang is present but file is not executable -- DO NOT FIX - "G004", # Logging statement uses f-string - "PLC1901", # `{}` can be simplified to `{}` as an empty string is falsey - "PLW060", # Using global for `{name}` but no assignment is done -- DO NOT FIX - "PLW2901", # PLW2901: Redefined loop variable -- FIX ME - "PT011", # `pytest.raises(Exception)` is too broad, set the `match` parameter or use a more specific exception - "PT018", # Assertion should be broken down into multiple parts - "S101", # Use of `assert` detected -- DO NOT FIX - "S311", # Standard pseudo-random generators are not suitable for cryptographic purposes -- FIX ME - "SLF001", # Private member accessed: `_Iterator` -- FIX ME - "UP038", # Use `X | Y` in `{}` call instead of `(X, Y)` -- DO NOT FIX -] -lint.select = [ # https://beta.ruff.rs/docs/rules +target-version = "py312" + +output-format = "full" +lint.select = [ + # https://beta.ruff.rs/docs/rules "A", # flake8-builtins "ARG", # flake8-unused-arguments "ASYNC", # flake8-async @@ -68,54 +56,63 @@ lint.select = [ # https://beta.ruff.rs/docs/rules # "TCH", # flake8-type-checking # "TRY", # tryceratops ] -output-format = "full" -target-version = "py312" - -[tool.ruff.lint.mccabe] # DO NOT INCREASE THIS VALUE -max-complexity = 17 # default: 10 - -[tool.ruff.lint.per-file-ignores] -"arithmetic_analysis/newton_raphson.py" = [ +lint.per-file-ignores."arithmetic_analysis/newton_raphson.py" = [ "PGH001", ] -"data_structures/binary_tree/binary_search_tree_recursive.py" = [ +lint.per-file-ignores."data_structures/binary_tree/binary_search_tree_recursive.py" = [ "BLE001", ] -"data_structures/hashing/tests/test_hash_map.py" = [ +lint.per-file-ignores."data_structures/hashing/tests/test_hash_map.py" = [ "BLE001", ] -"hashes/enigma_machine.py" = [ +lint.per-file-ignores."hashes/enigma_machine.py" = [ "BLE001", ] -"machine_learning/sequential_minimum_optimization.py" = [ +lint.per-file-ignores."machine_learning/sequential_minimum_optimization.py" = [ "SIM115", ] -"matrix/sherman_morrison.py" = [ +lint.per-file-ignores."matrix/sherman_morrison.py" = [ "SIM103", ] -"other/l*u_cache.py" = [ +lint.per-file-ignores."other/l*u_cache.py" = [ "RUF012", ] -"physics/newtons_second_law_of_motion.py" = [ +lint.per-file-ignores."physics/newtons_second_law_of_motion.py" = [ "BLE001", ] -"project_euler/problem_099/sol1.py" = [ +lint.per-file-ignores."project_euler/problem_099/sol1.py" = [ "SIM115", ] -"sorts/external_sort.py" = [ +lint.per-file-ignores."sorts/external_sort.py" = [ "SIM115", ] - -[tool.ruff.lint.pylint] # DO NOT INCREASE THESE VALUES -allow-magic-value-types = [ +lint.mccabe.max-complexity = 17 # default: 10 +lint.pylint.allow-magic-value-types = [ "float", "int", "str", ] -max-args = 10 # default: 5 -max-branches = 20 # default: 12 -max-returns = 8 # default: 6 -max-statements = 88 # default: 50 +lint.pylint.max-args = 10 # default: 5 +lint.pylint.max-branches = 20 # default: 12 +lint.pylint.max-returns = 8 # default: 6 +lint.pylint.max-statements = 88 # default: 50 +lint.ignore = [ + # `ruff rule S101` for a description of that rule + "B904", # Within an `except` clause, raise exceptions with `raise ... from err` -- FIX ME + "B905", # `zip()` without an explicit `strict=` parameter -- FIX ME + "EM101", # Exception must not use a string literal, assign to variable first + "EXE001", # Shebang is present but file is not executable -- DO NOT FIX + "G004", # Logging statement uses f-string + "PLC1901", # `{}` can be simplified to `{}` as an empty string is falsey + "PLW060", # Using global for `{name}` but no assignment is done -- DO NOT FIX + "PLW2901", # PLW2901: Redefined loop variable -- FIX ME + "PT011", # `pytest.raises(Exception)` is too broad, set the `match` parameter or use a more specific exception + "PT018", # Assertion should be broken down into multiple parts + "S101", # Use of `assert` detected -- DO NOT FIX + "S311", # Standard pseudo-random generators are not suitable for cryptographic purposes -- FIX ME + "SLF001", # Private member accessed: `_Iterator` -- FIX ME + "UP038", # Use `X | Y` in `{}` call instead of `(X, Y)` -- DO NOT FIX +] [tool.codespell] ignore-words-list = "3rt,ans,bitap,crate,damon,fo,followings,hist,iff,kwanza,manuel,mater,secant,som,sur,tim,toi,zar" From 82aa909db7736d8022532bee4dc381072d8c5b1f Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 27 May 2024 21:56:48 -0400 Subject: [PATCH 1386/1543] [pre-commit.ci] pre-commit autoupdate (#11417) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * [pre-commit.ci] pre-commit autoupdate updates: - [github.com/astral-sh/ruff-pre-commit: v0.4.4 → v0.4.5](https://github.com/astral-sh/ruff-pre-commit/compare/v0.4.4...v0.4.5) - [github.com/codespell-project/codespell: v2.2.6 → v2.3.0](https://github.com/codespell-project/codespell/compare/v2.2.6...v2.3.0) - [github.com/tox-dev/pyproject-fmt: 2.1.1 → 2.1.3](https://github.com/tox-dev/pyproject-fmt/compare/2.1.1...2.1.3) * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * iterable * at most --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- .pre-commit-config.yaml | 6 +++--- graphs/dijkstra_algorithm.py | 2 +- project_euler/problem_047/sol1.py | 2 +- pyproject.toml | 35 ++++++++++++++++--------------- 4 files changed, 23 insertions(+), 22 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index b63457ca85e3..43bf547dec6e 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -16,20 +16,20 @@ repos: - id: auto-walrus - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.4.4 + rev: v0.4.5 hooks: - id: ruff - id: ruff-format - repo: https://github.com/codespell-project/codespell - rev: v2.2.6 + rev: v2.3.0 hooks: - id: codespell additional_dependencies: - tomli - repo: https://github.com/tox-dev/pyproject-fmt - rev: "2.1.1" + rev: "2.1.3" hooks: - id: pyproject-fmt diff --git a/graphs/dijkstra_algorithm.py b/graphs/dijkstra_algorithm.py index 2efa2cb634ff..51412b790bac 100644 --- a/graphs/dijkstra_algorithm.py +++ b/graphs/dijkstra_algorithm.py @@ -215,7 +215,7 @@ def decrease_key(self, tup, new_d): [(5, 'A'), (15, 'B')] """ idx = self.pos[tup[1]] - # assuming the new_d is atmost old_d + # assuming the new_d is at most old_d self.array[idx] = (new_d, tup[1]) while idx > 0 and self.array[self.par(idx)][0] > self.array[idx][0]: self.swap(idx, self.par(idx)) diff --git a/project_euler/problem_047/sol1.py b/project_euler/problem_047/sol1.py index c9c44a9832dd..4ecd4f4b44c1 100644 --- a/project_euler/problem_047/sol1.py +++ b/project_euler/problem_047/sol1.py @@ -58,7 +58,7 @@ def upf_len(num: int) -> int: def equality(iterable: list) -> bool: """ - Check equality of ALL elements in an interable. + Check equality of ALL elements in an iterable >>> equality([1, 2, 3, 4]) False >>> equality([2, 2, 2, 2]) diff --git a/pyproject.toml b/pyproject.toml index 5b8ce4e72dfd..429f4fab9a52 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -56,6 +56,24 @@ lint.select = [ # "TCH", # flake8-type-checking # "TRY", # tryceratops ] +lint.ignore = [ + # `ruff rule S101` for a description of that rule + "B904", # Within an `except` clause, raise exceptions with `raise ... from err` -- FIX ME + "B905", # `zip()` without an explicit `strict=` parameter -- FIX ME + "EM101", # Exception must not use a string literal, assign to variable first + "EXE001", # Shebang is present but file is not executable -- DO NOT FIX + "G004", # Logging statement uses f-string + "PLC1901", # `{}` can be simplified to `{}` as an empty string is falsey + "PLW060", # Using global for `{name}` but no assignment is done -- DO NOT FIX + "PLW2901", # PLW2901: Redefined loop variable -- FIX ME + "PT011", # `pytest.raises(Exception)` is too broad, set the `match` parameter or use a more specific exception + "PT018", # Assertion should be broken down into multiple parts + "S101", # Use of `assert` detected -- DO NOT FIX + "S311", # Standard pseudo-random generators are not suitable for cryptographic purposes -- FIX ME + "SLF001", # Private member accessed: `_Iterator` -- FIX ME + "UP038", # Use `X | Y` in `{}` call instead of `(X, Y)` -- DO NOT FIX +] + lint.per-file-ignores."arithmetic_analysis/newton_raphson.py" = [ "PGH001", ] @@ -96,23 +114,6 @@ lint.pylint.max-args = 10 # default: 5 lint.pylint.max-branches = 20 # default: 12 lint.pylint.max-returns = 8 # default: 6 lint.pylint.max-statements = 88 # default: 50 -lint.ignore = [ - # `ruff rule S101` for a description of that rule - "B904", # Within an `except` clause, raise exceptions with `raise ... from err` -- FIX ME - "B905", # `zip()` without an explicit `strict=` parameter -- FIX ME - "EM101", # Exception must not use a string literal, assign to variable first - "EXE001", # Shebang is present but file is not executable -- DO NOT FIX - "G004", # Logging statement uses f-string - "PLC1901", # `{}` can be simplified to `{}` as an empty string is falsey - "PLW060", # Using global for `{name}` but no assignment is done -- DO NOT FIX - "PLW2901", # PLW2901: Redefined loop variable -- FIX ME - "PT011", # `pytest.raises(Exception)` is too broad, set the `match` parameter or use a more specific exception - "PT018", # Assertion should be broken down into multiple parts - "S101", # Use of `assert` detected -- DO NOT FIX - "S311", # Standard pseudo-random generators are not suitable for cryptographic purposes -- FIX ME - "SLF001", # Private member accessed: `_Iterator` -- FIX ME - "UP038", # Use `X | Y` in `{}` call instead of `(X, Y)` -- DO NOT FIX -] [tool.codespell] ignore-words-list = "3rt,ans,bitap,crate,damon,fo,followings,hist,iff,kwanza,manuel,mater,secant,som,sur,tim,toi,zar" From b8afb214f8c8d185dc42dafb9676becf512ca7fa Mon Sep 17 00:00:00 2001 From: Marco-campione-github <80974790+Marco-campione-github@users.noreply.github.com> Date: Fri, 31 May 2024 10:11:09 +0200 Subject: [PATCH 1387/1543] Changed the N to self.N in show_data in segment_tree.py (#11276) --- data_structures/binary_tree/segment_tree.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/data_structures/binary_tree/segment_tree.py b/data_structures/binary_tree/segment_tree.py index c7069b3f6069..084fcf84955d 100644 --- a/data_structures/binary_tree/segment_tree.py +++ b/data_structures/binary_tree/segment_tree.py @@ -98,7 +98,7 @@ def query_recursive(self, idx, left, right, a, b): def show_data(self): show_list = [] - for i in range(1, N + 1): + for i in range(1, self.N + 1): show_list += [self.query(i, i)] print(show_list) From 70bd06db4642a2323ff397b041d40bc95ed6a5bf Mon Sep 17 00:00:00 2001 From: Pedram_Mohajer <48964282+pedram-mohajer@users.noreply.github.com> Date: Sat, 1 Jun 2024 05:09:03 -0400 Subject: [PATCH 1388/1543] add doctest/document to actual_power and document to power (#11187) * Update power.py * Update divide_and_conquer/power.py --------- Co-authored-by: Tianyi Zheng --- divide_and_conquer/power.py | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/divide_and_conquer/power.py b/divide_and_conquer/power.py index f2e023afd536..faf6a3476d40 100644 --- a/divide_and_conquer/power.py +++ b/divide_and_conquer/power.py @@ -2,6 +2,20 @@ def actual_power(a: int, b: int): """ Function using divide and conquer to calculate a^b. It only works for integer a,b. + + :param a: The base of the power operation, an integer. + :param b: The exponent of the power operation, a non-negative integer. + :return: The result of a^b. + + Examples: + >>> actual_power(3, 2) + 9 + >>> actual_power(5, 3) + 125 + >>> actual_power(2, 5) + 32 + >>> actual_power(7, 0) + 1 """ if b == 0: return 1 @@ -13,6 +27,10 @@ def actual_power(a: int, b: int): def power(a: int, b: int) -> float: """ + :param a: The base (integer). + :param b: The exponent (integer). + :return: The result of a^b, as a float for negative exponents. + >>> power(4,6) 4096 >>> power(2,3) From 723cf9c42839c47e9e6fb83362a7391177355505 Mon Sep 17 00:00:00 2001 From: Tianyi Zheng Date: Sat, 1 Jun 2024 02:17:07 -0700 Subject: [PATCH 1389/1543] Remove duplicate implementation of median of two arrays algorithm (#11420) * Remove duplicate implementation of median of two arrays algorithm Remove maths/median_of_two_arrays.py because the repo has two implementations of this algorithm, with data_structures/arrays/median_two_array.py being the other. Even though maths/median_of_two_arrays.py is the older implementation, the newer implementation is better documented, has better error handling, and is already located in a more appropriate directory. * updating DIRECTORY.md --------- Co-authored-by: tianyizheng02 --- DIRECTORY.md | 1 - maths/median_of_two_arrays.py | 33 --------------------------------- 2 files changed, 34 deletions(-) delete mode 100644 maths/median_of_two_arrays.py diff --git a/DIRECTORY.md b/DIRECTORY.md index 4a053a3f1b7f..2094fc3a980e 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -661,7 +661,6 @@ * [Manhattan Distance](maths/manhattan_distance.py) * [Matrix Exponentiation](maths/matrix_exponentiation.py) * [Max Sum Sliding Window](maths/max_sum_sliding_window.py) - * [Median Of Two Arrays](maths/median_of_two_arrays.py) * [Minkowski Distance](maths/minkowski_distance.py) * [Mobius Function](maths/mobius_function.py) * [Modular Division](maths/modular_division.py) diff --git a/maths/median_of_two_arrays.py b/maths/median_of_two_arrays.py deleted file mode 100644 index 55aa587a9c4b..000000000000 --- a/maths/median_of_two_arrays.py +++ /dev/null @@ -1,33 +0,0 @@ -from __future__ import annotations - - -def median_of_two_arrays(nums1: list[float], nums2: list[float]) -> float: - """ - >>> median_of_two_arrays([1, 2], [3]) - 2 - >>> median_of_two_arrays([0, -1.1], [2.5, 1]) - 0.5 - >>> median_of_two_arrays([], [2.5, 1]) - 1.75 - >>> median_of_two_arrays([], [0]) - 0 - >>> median_of_two_arrays([], []) - Traceback (most recent call last): - ... - IndexError: list index out of range - """ - all_numbers = sorted(nums1 + nums2) - div, mod = divmod(len(all_numbers), 2) - if mod == 1: - return all_numbers[div] - else: - return (all_numbers[div] + all_numbers[div - 1]) / 2 - - -if __name__ == "__main__": - import doctest - - doctest.testmod() - array_1 = [float(x) for x in input("Enter the elements of first array: ").split()] - array_2 = [float(x) for x in input("Enter the elements of second array: ").split()] - print(f"The median of two arrays is: {median_of_two_arrays(array_1, array_2)}") From edee8e644b09a21a1f70d3a59d57feed51c74004 Mon Sep 17 00:00:00 2001 From: Vishal Kumar Gupta Date: Sun, 2 Jun 2024 02:41:40 +0100 Subject: [PATCH 1390/1543] use format to remove '0b' (#11307) * use format to remove '0b' * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fix: error message for float input --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- bit_manipulation/binary_and_operator.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/bit_manipulation/binary_and_operator.py b/bit_manipulation/binary_and_operator.py index 36f6c668d9b3..f33b8b1c0ab4 100644 --- a/bit_manipulation/binary_and_operator.py +++ b/bit_manipulation/binary_and_operator.py @@ -26,7 +26,7 @@ def binary_and(a: int, b: int) -> str: >>> binary_and(0, 1.1) Traceback (most recent call last): ... - TypeError: 'float' object cannot be interpreted as an integer + ValueError: Unknown format code 'b' for object of type 'float' >>> binary_and("0", "1") Traceback (most recent call last): ... @@ -35,8 +35,8 @@ def binary_and(a: int, b: int) -> str: if a < 0 or b < 0: raise ValueError("the value of both inputs must be positive") - a_binary = str(bin(a))[2:] # remove the leading "0b" - b_binary = str(bin(b))[2:] # remove the leading "0b" + a_binary = format(a, "b") + b_binary = format(b, "b") max_len = max(len(a_binary), len(b_binary)) From 2f1704dae579295ea2f47584ef80b4b321a284d7 Mon Sep 17 00:00:00 2001 From: Mandeep Singh <135956602+MannCode@users.noreply.github.com> Date: Sun, 2 Jun 2024 18:27:35 -0700 Subject: [PATCH 1391/1543] issue #11150 Ensure explicit column selection and data type setting in data reading process. (#11302) * issue #11150 Ensure explicit column selection and data type setting in data reading process. * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- machine_learning/sequential_minimum_optimization.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/machine_learning/sequential_minimum_optimization.py b/machine_learning/sequential_minimum_optimization.py index 3abdd6ccbed8..2ebdeb764a80 100644 --- a/machine_learning/sequential_minimum_optimization.py +++ b/machine_learning/sequential_minimum_optimization.py @@ -463,7 +463,11 @@ def test_cancel_data(): with open(r"cancel_data.csv", "w") as f: f.write(content) - data = pd.read_csv(r"cancel_data.csv", header=None) + data = pd.read_csv( + "cancel_data.csv", + header=None, + dtype={0: str}, # Assuming the first column contains string data + ) # 1: pre-processing data del data[data.columns.tolist()[0]] From ffaa976f6c5a5de30e284ae2fc8122f40cd3fa6a Mon Sep 17 00:00:00 2001 From: Harsh buddhdev Date: Sun, 2 Jun 2024 23:00:26 -0400 Subject: [PATCH 1392/1543] Fixes #9943 (#10252) * added doctest for all_permutations.py * added doctest for all_subsequences.py * added doctest for all_subsequences.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * doctest added * updated * Update backtracking/all_subsequences.py --------- Co-authored-by: Harsh Buddhdev Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Tianyi Zheng --- backtracking/all_permutations.py | 36 ++++++++++++++++++++++ backtracking/all_subsequences.py | 52 +++++++++++++++++++++++++++++++- 2 files changed, 87 insertions(+), 1 deletion(-) diff --git a/backtracking/all_permutations.py b/backtracking/all_permutations.py index c483cd62c99b..f376e6fa0945 100644 --- a/backtracking/all_permutations.py +++ b/backtracking/all_permutations.py @@ -23,6 +23,42 @@ def create_state_space_tree( Creates a state space tree to iterate through each branch using DFS. We know that each state has exactly len(sequence) - index children. It terminates when it reaches the end of the given sequence. + + :param sequence: The input sequence for which permutations are generated. + :param current_sequence: The current permutation being built. + :param index: The current index in the sequence. + :param index_used: list to track which elements are used in permutation. + + Example 1: + >>> sequence = [1, 2, 3] + >>> current_sequence = [] + >>> index_used = [False, False, False] + >>> create_state_space_tree(sequence, current_sequence, 0, index_used) + [1, 2, 3] + [1, 3, 2] + [2, 1, 3] + [2, 3, 1] + [3, 1, 2] + [3, 2, 1] + + Example 2: + >>> sequence = ["A", "B", "C"] + >>> current_sequence = [] + >>> index_used = [False, False, False] + >>> create_state_space_tree(sequence, current_sequence, 0, index_used) + ['A', 'B', 'C'] + ['A', 'C', 'B'] + ['B', 'A', 'C'] + ['B', 'C', 'A'] + ['C', 'A', 'B'] + ['C', 'B', 'A'] + + Example 3: + >>> sequence = [1] + >>> current_sequence = [] + >>> index_used = [False] + >>> create_state_space_tree(sequence, current_sequence, 0, index_used) + [1] """ if index == len(sequence): diff --git a/backtracking/all_subsequences.py b/backtracking/all_subsequences.py index 7844a829d046..18696054eb7e 100644 --- a/backtracking/all_subsequences.py +++ b/backtracking/all_subsequences.py @@ -22,6 +22,56 @@ def create_state_space_tree( Creates a state space tree to iterate through each branch using DFS. We know that each state has exactly two children. It terminates when it reaches the end of the given sequence. + + :param sequence: The input sequence for which subsequences are generated. + :param current_subsequence: The current subsequence being built. + :param index: The current index in the sequence. + + Example: + >>> sequence = [3, 2, 1] + >>> current_subsequence = [] + >>> create_state_space_tree(sequence, current_subsequence, 0) + [] + [1] + [2] + [2, 1] + [3] + [3, 1] + [3, 2] + [3, 2, 1] + + >>> sequence = ["A", "B"] + >>> current_subsequence = [] + >>> create_state_space_tree(sequence, current_subsequence, 0) + [] + ['B'] + ['A'] + ['A', 'B'] + + >>> sequence = [] + >>> current_subsequence = [] + >>> create_state_space_tree(sequence, current_subsequence, 0) + [] + + >>> sequence = [1, 2, 3, 4] + >>> current_subsequence = [] + >>> create_state_space_tree(sequence, current_subsequence, 0) + [] + [4] + [3] + [3, 4] + [2] + [2, 4] + [2, 3] + [2, 3, 4] + [1] + [1, 4] + [1, 3] + [1, 3, 4] + [1, 2] + [1, 2, 4] + [1, 2, 3] + [1, 2, 3, 4] """ if index == len(sequence): @@ -35,7 +85,7 @@ def create_state_space_tree( if __name__ == "__main__": - seq: list[Any] = [3, 1, 2, 4] + seq: list[Any] = [1, 2, 3] generate_all_subsequences(seq) seq.clear() From c919579869ae9f57d6878336af6de6bc9a001c61 Mon Sep 17 00:00:00 2001 From: AtomicVar Date: Mon, 3 Jun 2024 11:15:01 +0800 Subject: [PATCH 1393/1543] Add KL divergence loss algorithm (#11238) * Add KL divergence loss algorithm * Apply suggestions from code review --------- Co-authored-by: Tianyi Zheng --- machine_learning/loss_functions.py | 34 ++++++++++++++++++++++++++++++ 1 file changed, 34 insertions(+) diff --git a/machine_learning/loss_functions.py b/machine_learning/loss_functions.py index 16e5a3278b73..150035661eb7 100644 --- a/machine_learning/loss_functions.py +++ b/machine_learning/loss_functions.py @@ -629,6 +629,40 @@ def smooth_l1_loss(y_true: np.ndarray, y_pred: np.ndarray, beta: float = 1.0) -> return np.mean(loss) +def kullback_leibler_divergence(y_true: np.ndarray, y_pred: np.ndarray) -> float: + """ + Calculate the Kullback-Leibler divergence (KL divergence) loss between true labels + and predicted probabilities. + + KL divergence loss quantifies dissimilarity between true labels and predicted + probabilities. It's often used in training generative models. + + KL = Σ(y_true * ln(y_true / y_pred)) + + Reference: https://en.wikipedia.org/wiki/Kullback%E2%80%93Leibler_divergence + + Parameters: + - y_true: True class probabilities + - y_pred: Predicted class probabilities + + >>> true_labels = np.array([0.2, 0.3, 0.5]) + >>> predicted_probs = np.array([0.3, 0.3, 0.4]) + >>> kullback_leibler_divergence(true_labels, predicted_probs) + 0.030478754035472025 + >>> true_labels = np.array([0.2, 0.3, 0.5]) + >>> predicted_probs = np.array([0.3, 0.3, 0.4, 0.5]) + >>> kullback_leibler_divergence(true_labels, predicted_probs) + Traceback (most recent call last): + ... + ValueError: Input arrays must have the same length. + """ + if len(y_true) != len(y_pred): + raise ValueError("Input arrays must have the same length.") + + kl_loss = y_true * np.log(y_true / y_pred) + return np.sum(kl_loss) + + if __name__ == "__main__": import doctest From 5827aac79a36f0d43e9bd9f1c9ca11da07b2d623 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 3 Jun 2024 18:21:27 -0300 Subject: [PATCH 1394/1543] [pre-commit.ci] pre-commit autoupdate (#11430) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - [github.com/astral-sh/ruff-pre-commit: v0.4.5 → v0.4.7](https://github.com/astral-sh/ruff-pre-commit/compare/v0.4.5...v0.4.7) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 43bf547dec6e..a04f4f8b2165 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -16,7 +16,7 @@ repos: - id: auto-walrus - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.4.5 + rev: v0.4.7 hooks: - id: ruff - id: ruff-format From 41a1cdf38d9cb1a14c9149d2d815efa2259679ef Mon Sep 17 00:00:00 2001 From: Yuri Batista Ishizawa Date: Tue, 11 Jun 2024 06:45:00 -0300 Subject: [PATCH 1395/1543] Add rainfall intensity calculation function (#11432) * Add rainfall intensity calculation function * chore: improve fuction and coefficient documentation * Update physics/rainfall_intensity.py --------- Co-authored-by: Tianyi Zheng --- physics/rainfall_intensity.py | 143 ++++++++++++++++++++++++++++++++++ 1 file changed, 143 insertions(+) create mode 100644 physics/rainfall_intensity.py diff --git a/physics/rainfall_intensity.py b/physics/rainfall_intensity.py new file mode 100644 index 000000000000..cee8d50ddc2f --- /dev/null +++ b/physics/rainfall_intensity.py @@ -0,0 +1,143 @@ +""" +Rainfall Intensity +================== +This module contains functions to calculate the intensity of +a rainfall event for a given duration and return period. + +This function uses the Sherman intensity-duration-frequency curve. + +References +---------- +- Aparicio, F. (1997): Fundamentos de Hidrología de Superficie. + Balderas, México, Limusa. 303 p. +- https://en.wikipedia.org/wiki/Intensity-duration-frequency_curve +""" + + +def rainfall_intensity( + coefficient_k: float, + coefficient_a: float, + coefficient_b: float, + coefficient_c: float, + return_period: float, + duration: float, +) -> float: + """ + Calculate the intensity of a rainfall event for a given duration and return period. + It's based on the Sherman intensity-duration-frequency curve: + + I = k * T^a / (D + b)^c + + where: + I = Intensity of the rainfall event [mm/h] + k, a, b, c = Coefficients obtained through statistical distribution adjust + T = Return period in years + D = Rainfall event duration in minutes + + Parameters + ---------- + coefficient_k : float + Coefficient obtained through statistical distribution adjust. + coefficient_a : float + Coefficient obtained through statistical distribution adjust. + coefficient_b : float + Coefficient obtained through statistical distribution adjust. + coefficient_c : float + Coefficient obtained through statistical distribution adjust. + return_period : float + Return period in years. + duration : float + Rainfall event duration in minutes. + + Returns + ------- + intensity : float + Intensity of the rainfall event in mm/h. + + Raises + ------ + ValueError + If any of the parameters are not positive. + + Examples + -------- + + >>> rainfall_intensity(1000, 0.2, 11.6, 0.81, 10, 60) + 49.83339231138578 + + >>> rainfall_intensity(1000, 0.2, 11.6, 0.81, 10, 30) + 77.36319588106228 + + >>> rainfall_intensity(1000, 0.2, 11.6, 0.81, 5, 60) + 43.382487747633625 + + >>> rainfall_intensity(0, 0.2, 11.6, 0.81, 10, 60) + Traceback (most recent call last): + ... + ValueError: All parameters must be positive. + + >>> rainfall_intensity(1000, -0.2, 11.6, 0.81, 10, 60) + Traceback (most recent call last): + ... + ValueError: All parameters must be positive. + + >>> rainfall_intensity(1000, 0.2, -11.6, 0.81, 10, 60) + Traceback (most recent call last): + ... + ValueError: All parameters must be positive. + + >>> rainfall_intensity(1000, 0.2, 11.6, -0.81, 10, 60) + Traceback (most recent call last): + ... + ValueError: All parameters must be positive. + + >>> rainfall_intensity(1000, 0, 11.6, 0.81, 10, 60) + Traceback (most recent call last): + ... + ValueError: All parameters must be positive. + + >>> rainfall_intensity(1000, 0.2, 0, 0.81, 10, 60) + Traceback (most recent call last): + ... + ValueError: All parameters must be positive. + + >>> rainfall_intensity(1000, 0.2, 11.6, 0, 10, 60) + Traceback (most recent call last): + ... + ValueError: All parameters must be positive. + + >>> rainfall_intensity(0, 0.2, 11.6, 0.81, 10, 60) + Traceback (most recent call last): + ... + ValueError: All parameters must be positive. + + >>> rainfall_intensity(1000, 0.2, 11.6, 0.81, 0, 60) + Traceback (most recent call last): + ... + ValueError: All parameters must be positive. + + >>> rainfall_intensity(1000, 0.2, 11.6, 0.81, 10, 0) + Traceback (most recent call last): + ... + ValueError: All parameters must be positive. + + """ + if ( + coefficient_k <= 0 + or coefficient_a <= 0 + or coefficient_b <= 0 + or coefficient_c <= 0 + or return_period <= 0 + or duration <= 0 + ): + raise ValueError("All parameters must be positive.") + intensity = (coefficient_k * (return_period**coefficient_a)) / ( + (duration + coefficient_b) ** coefficient_c + ) + return intensity + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From 446742387e83f94f3d54ce640cb07004180130ee Mon Sep 17 00:00:00 2001 From: Tianyi Zheng Date: Thu, 13 Jun 2024 14:47:29 -0700 Subject: [PATCH 1396/1543] Fix grammar and spelling mistakes in sequential_minimum_optimization.py (#11427) --- .../sequential_minimum_optimization.py | 135 +++++++++--------- 1 file changed, 66 insertions(+), 69 deletions(-) diff --git a/machine_learning/sequential_minimum_optimization.py b/machine_learning/sequential_minimum_optimization.py index 2ebdeb764a80..625fc28fe60c 100644 --- a/machine_learning/sequential_minimum_optimization.py +++ b/machine_learning/sequential_minimum_optimization.py @@ -1,11 +1,9 @@ """ - Implementation of sequential minimal optimization (SMO) for support vector machines - (SVM). +Sequential minimal optimization (SMO) for support vector machines (SVM) - Sequential minimal optimization (SMO) is an algorithm for solving the quadratic - programming (QP) problem that arises during the training of support vector - machines. - It was invented by John Platt in 1998. +Sequential minimal optimization (SMO) is an algorithm for solving the quadratic +programming (QP) problem that arises during the training of SVMs. It was invented by +John Platt in 1998. Input: 0: type: numpy.ndarray. @@ -124,8 +122,7 @@ def fit(self): b_old = self._b self._b = b - # 4: update error value,here we only calculate those non-bound samples' - # error + # 4: update error, here we only calculate the error for non-bound samples self._unbound = [i for i in self._all_samples if self._is_unbound(i)] for s in self.unbound: if s in (i1, i2): @@ -136,7 +133,7 @@ def fit(self): + (self._b - b_old) ) - # if i1 or i2 is non-bound,update there error value to zero + # if i1 or i2 is non-bound, update their error value to zero if self._is_unbound(i1): self._error[i1] = 0 if self._is_unbound(i2): @@ -161,7 +158,7 @@ def predict(self, test_samples, classify=True): results.append(result) return np.array(results) - # Check if alpha violate KKT condition + # Check if alpha violates the KKT condition def _check_obey_kkt(self, index): alphas = self.alphas tol = self._tol @@ -172,20 +169,19 @@ def _check_obey_kkt(self, index): # Get value calculated from kernel function def _k(self, i1, i2): - # for test samples,use Kernel function + # for test samples, use kernel function if isinstance(i2, np.ndarray): return self.Kernel(self.samples[i1], i2) - # for train samples,Kernel values have been saved in matrix + # for training samples, kernel values have been saved in matrix else: return self._K_matrix[i1, i2] - # Get sample's error + # Get error for sample def _e(self, index): """ Two cases: - 1:Sample[index] is non-bound,Fetch error from list: _error - 2:sample[index] is bound,Use predicted value deduct true value: g(xi) - yi - + 1: Sample[index] is non-bound, fetch error from list: _error + 2: sample[index] is bound, use predicted value minus true value: g(xi) - yi """ # get from error data if self._is_unbound(index): @@ -196,7 +192,7 @@ def _e(self, index): yi = self.tags[index] return gx - yi - # Calculate Kernel matrix of all possible i1,i2 ,saving time + # Calculate kernel matrix of all possible i1, i2, saving time def _calculate_k_matrix(self): k_matrix = np.zeros([self.length, self.length]) for i in self._all_samples: @@ -206,7 +202,7 @@ def _calculate_k_matrix(self): ) return k_matrix - # Predict test sample's tag + # Predict tag for test sample def _predict(self, sample): k = self._k predicted_value = ( @@ -222,30 +218,31 @@ def _predict(self, sample): # Choose alpha1 and alpha2 def _choose_alphas(self): - locis = yield from self._choose_a1() - if not locis: + loci = yield from self._choose_a1() + if not loci: return None - return locis + return loci def _choose_a1(self): """ - Choose first alpha ;steps: - 1:First loop over all sample - 2:Second loop over all non-bound samples till all non-bound samples does not - voilate kkt condition. - 3:Repeat this two process endlessly,till all samples does not voilate kkt - condition samples after first loop. + Choose first alpha + Steps: + 1: First loop over all samples + 2: Second loop over all non-bound samples until no non-bound samples violate + the KKT condition. + 3: Repeat these two processes until no samples violate the KKT condition + after the first loop. """ while True: all_not_obey = True # all sample - print("scanning all sample!") + print("Scanning all samples!") for i1 in [i for i in self._all_samples if self._check_obey_kkt(i)]: all_not_obey = False yield from self._choose_a2(i1) # non-bound sample - print("scanning non-bound sample!") + print("Scanning non-bound samples!") while True: not_obey = True for i1 in [ @@ -256,20 +253,21 @@ def _choose_a1(self): not_obey = False yield from self._choose_a2(i1) if not_obey: - print("all non-bound samples fit the KKT condition!") + print("All non-bound samples satisfy the KKT condition!") break if all_not_obey: - print("all samples fit the KKT condition! Optimization done!") + print("All samples satisfy the KKT condition!") break return False def _choose_a2(self, i1): """ - Choose the second alpha by using heuristic algorithm ;steps: - 1: Choose alpha2 which gets the maximum step size (|E1 - E2|). - 2: Start in a random point,loop over all non-bound samples till alpha1 and + Choose the second alpha using a heuristic algorithm + Steps: + 1: Choose alpha2 that maximizes the step size (|E1 - E2|). + 2: Start in a random point, loop over all non-bound samples till alpha1 and alpha2 are optimized. - 3: Start in a random point,loop over all samples till alpha1 and alpha2 are + 3: Start in a random point, loop over all samples till alpha1 and alpha2 are optimized. """ self._unbound = [i for i in self._all_samples if self._is_unbound(i)] @@ -306,7 +304,7 @@ def _get_new_alpha(self, i1, i2, a1, a2, e1, e2, y1, y2): if i1 == i2: return None, None - # calculate L and H which bound the new alpha2 + # calculate L and H which bound the new alpha2 s = y1 * y2 if s == -1: l, h = max(0.0, a2 - a1), min(self._c, self._c + a2 - a1) # noqa: E741 @@ -320,7 +318,7 @@ def _get_new_alpha(self, i1, i2, a1, a2, e1, e2, y1, y2): k22 = k(i2, i2) k12 = k(i1, i2) - # select the new alpha2 which could get the minimal objectives + # select the new alpha2 which could achieve the minimal objectives if (eta := k11 + k22 - 2.0 * k12) > 0.0: a2_new_unc = a2 + (y2 * (e1 - e2)) / eta # a2_new has a boundary @@ -335,7 +333,7 @@ def _get_new_alpha(self, i1, i2, a1, a2, e1, e2, y1, y2): l1 = a1 + s * (a2 - l) h1 = a1 + s * (a2 - h) - # way 1 + # Method 1 f1 = y1 * (e1 + b) - a1 * k(i1, i1) - s * a2 * k(i1, i2) f2 = y2 * (e2 + b) - a2 * k(i2, i2) - s * a1 * k(i1, i2) ol = ( @@ -353,9 +351,8 @@ def _get_new_alpha(self, i1, i2, a1, a2, e1, e2, y1, y2): + s * h * h1 * k(i1, i2) ) """ - # way 2 - Use objective function check which alpha2 new could get the minimal - objectives + Method 2: Use objective function to check which alpha2_new could achieve the + minimal objectives """ if ol < (oh - self._eps): a2_new = l @@ -375,7 +372,7 @@ def _get_new_alpha(self, i1, i2, a1, a2, e1, e2, y1, y2): return a1_new, a2_new - # Normalise data using min_max way + # Normalize data using min-max method def _norm(self, data): if self._init: self._min = np.min(data, axis=0) @@ -424,7 +421,7 @@ def _rbf(self, v1, v2): def _check(self): if self._kernel == self._rbf and self.gamma < 0: - raise ValueError("gamma value must greater than 0") + raise ValueError("gamma value must be non-negative") def _get_kernel(self, kernel_name): maps = {"linear": self._linear, "poly": self._polynomial, "rbf": self._rbf} @@ -444,27 +441,27 @@ def call_func(*args, **kwargs): start_time = time.time() func(*args, **kwargs) end_time = time.time() - print(f"smo algorithm cost {end_time - start_time} seconds") + print(f"SMO algorithm cost {end_time - start_time} seconds") return call_func @count_time -def test_cancel_data(): - print("Hello!\nStart test svm by smo algorithm!") +def test_cancer_data(): + print("Hello!\nStart test SVM using the SMO algorithm!") # 0: download dataset and load into pandas' dataframe - if not os.path.exists(r"cancel_data.csv"): + if not os.path.exists(r"cancer_data.csv"): request = urllib.request.Request( # noqa: S310 CANCER_DATASET_URL, headers={"User-Agent": "Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)"}, ) response = urllib.request.urlopen(request) # noqa: S310 content = response.read().decode("utf-8") - with open(r"cancel_data.csv", "w") as f: + with open(r"cancer_data.csv", "w") as f: f.write(content) data = pd.read_csv( - "cancel_data.csv", + "cancer_data.csv", header=None, dtype={0: str}, # Assuming the first column contains string data ) @@ -479,14 +476,14 @@ def test_cancel_data(): train_data, test_data = samples[:328, :], samples[328:, :] test_tags, test_samples = test_data[:, 0], test_data[:, 1:] - # 3: choose kernel function,and set initial alphas to zero(optional) - mykernel = Kernel(kernel="rbf", degree=5, coef0=1, gamma=0.5) + # 3: choose kernel function, and set initial alphas to zero (optional) + my_kernel = Kernel(kernel="rbf", degree=5, coef0=1, gamma=0.5) al = np.zeros(train_data.shape[0]) # 4: calculating best alphas using SMO algorithm and predict test_data samples mysvm = SmoSVM( train=train_data, - kernel_func=mykernel, + kernel_func=my_kernel, alpha_list=al, cost=0.4, b=0.0, @@ -501,30 +498,30 @@ def test_cancel_data(): for i in range(test_tags.shape[0]): if test_tags[i] == predict[i]: score += 1 - print(f"\nall: {test_num}\nright: {score}\nfalse: {test_num - score}") + print(f"\nAll: {test_num}\nCorrect: {score}\nIncorrect: {test_num - score}") print(f"Rough Accuracy: {score / test_tags.shape[0]}") def test_demonstration(): # change stdout - print("\nStart plot,please wait!!!") + print("\nStarting plot, please wait!") sys.stdout = open(os.devnull, "w") ax1 = plt.subplot2grid((2, 2), (0, 0)) ax2 = plt.subplot2grid((2, 2), (0, 1)) ax3 = plt.subplot2grid((2, 2), (1, 0)) ax4 = plt.subplot2grid((2, 2), (1, 1)) - ax1.set_title("linear svm,cost:0.1") + ax1.set_title("Linear SVM, cost = 0.1") test_linear_kernel(ax1, cost=0.1) - ax2.set_title("linear svm,cost:500") + ax2.set_title("Linear SVM, cost = 500") test_linear_kernel(ax2, cost=500) - ax3.set_title("rbf kernel svm,cost:0.1") + ax3.set_title("RBF kernel SVM, cost = 0.1") test_rbf_kernel(ax3, cost=0.1) - ax4.set_title("rbf kernel svm,cost:500") + ax4.set_title("RBF kernel SVM, cost = 500") test_rbf_kernel(ax4, cost=500) sys.stdout = sys.__stdout__ - print("Plot done!!!") + print("Plot done!") def test_linear_kernel(ax, cost): @@ -535,10 +532,10 @@ def test_linear_kernel(ax, cost): scaler = StandardScaler() train_x_scaled = scaler.fit_transform(train_x, train_y) train_data = np.hstack((train_y.reshape(500, 1), train_x_scaled)) - mykernel = Kernel(kernel="linear", degree=5, coef0=1, gamma=0.5) + my_kernel = Kernel(kernel="linear", degree=5, coef0=1, gamma=0.5) mysvm = SmoSVM( train=train_data, - kernel_func=mykernel, + kernel_func=my_kernel, cost=cost, tolerance=0.001, auto_norm=False, @@ -555,10 +552,10 @@ def test_rbf_kernel(ax, cost): scaler = StandardScaler() train_x_scaled = scaler.fit_transform(train_x, train_y) train_data = np.hstack((train_y.reshape(500, 1), train_x_scaled)) - mykernel = Kernel(kernel="rbf", degree=5, coef0=1, gamma=0.5) + my_kernel = Kernel(kernel="rbf", degree=5, coef0=1, gamma=0.5) mysvm = SmoSVM( train=train_data, - kernel_func=mykernel, + kernel_func=my_kernel, cost=cost, tolerance=0.001, auto_norm=False, @@ -571,11 +568,11 @@ def plot_partition_boundary( model, train_data, ax, resolution=100, colors=("b", "k", "r") ): """ - We can not get the optimum w of our kernel svm model which is different from linear - svm. For this reason, we generate randomly distributed points with high desity and - prediced values of these points are calculated by using our trained model. Then we - could use this prediced values to draw contour map. - And this contour map can represent svm's partition boundary. + We cannot get the optimal w of our kernel SVM model, which is different from a + linear SVM. For this reason, we generate randomly distributed points with high + density, and predicted values of these points are calculated using our trained + model. Then we could use this predicted values to draw contour map, and this contour + map represents the SVM's partition boundary. """ train_data_x = train_data[:, 1] train_data_y = train_data[:, 2] @@ -620,6 +617,6 @@ def plot_partition_boundary( if __name__ == "__main__": - test_cancel_data() + test_cancer_data() test_demonstration() plt.show() From af6a45e982213ef52a2f747dec6b58d668bfce5b Mon Sep 17 00:00:00 2001 From: Maxim Smolskiy Date: Mon, 17 Jun 2024 00:19:32 +0300 Subject: [PATCH 1397/1543] Remove some per file ignores (#11381) * Remove some per file ignores * updating DIRECTORY.md * updating DIRECTORY.md --------- Co-authored-by: MaximSmolskiy --- DIRECTORY.md | 1 + pyproject.toml | 6 ------ 2 files changed, 1 insertion(+), 6 deletions(-) diff --git a/DIRECTORY.md b/DIRECTORY.md index 2094fc3a980e..04551fad3685 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -863,6 +863,7 @@ * [Newtons Second Law Of Motion](physics/newtons_second_law_of_motion.py) * [Photoelectric Effect](physics/photoelectric_effect.py) * [Potential Energy](physics/potential_energy.py) + * [Rainfall Intensity](physics/rainfall_intensity.py) * [Reynolds Number](physics/reynolds_number.py) * [Rms Speed Of Molecule](physics/rms_speed_of_molecule.py) * [Shear Stress](physics/shear_stress.py) diff --git a/pyproject.toml b/pyproject.toml index 429f4fab9a52..bb8657183164 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -74,12 +74,6 @@ lint.ignore = [ "UP038", # Use `X | Y` in `{}` call instead of `(X, Y)` -- DO NOT FIX ] -lint.per-file-ignores."arithmetic_analysis/newton_raphson.py" = [ - "PGH001", -] -lint.per-file-ignores."data_structures/binary_tree/binary_search_tree_recursive.py" = [ - "BLE001", -] lint.per-file-ignores."data_structures/hashing/tests/test_hash_map.py" = [ "BLE001", ] From df94d460ac8d220f97851f358abc0102ae47d3db Mon Sep 17 00:00:00 2001 From: raj <64704676+ra230537@users.noreply.github.com> Date: Sun, 16 Jun 2024 19:17:55 -0300 Subject: [PATCH 1398/1543] Fix/fixes get top billionaries code (#11466) * fix: modify the depracated code and add new tests * fix: remove test from pr * fix: remove the useless utc import * fix: add explicit tz argument * fix: fixes ruff checking * Remove UP017 #noqa comments from code * Update get_top_billionaires.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update get_top_billionaires.py --------- Co-authored-by: Christian Clauss Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- web_programming/get_top_billionaires.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/web_programming/get_top_billionaires.py b/web_programming/get_top_billionaires.py index 24828b6d787c..99f6e0be948a 100644 --- a/web_programming/get_top_billionaires.py +++ b/web_programming/get_top_billionaires.py @@ -65,7 +65,7 @@ def get_forbes_real_time_billionaires() -> list[dict[str, int | str]]: "Country": person["countryOfCitizenship"], "Gender": person["gender"], "Worth ($)": f"{person['finalWorth'] / 1000:.1f} Billion", - "Age": years_old(person["birthDate"]), + "Age": str(years_old(person["birthDate"] / 1000)), } for person in response_json["personList"]["personsLists"] ] @@ -95,4 +95,7 @@ def display_billionaires(forbes_billionaires: list[dict[str, int | str]]) -> Non if __name__ == "__main__": + from doctest import testmod + + testmod() display_billionaires(get_forbes_real_time_billionaires()) From 31d1cd8402ba48aca26d9f1d2774f929610e7180 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 17 Jun 2024 08:31:32 -0400 Subject: [PATCH 1399/1543] [pre-commit.ci] pre-commit autoupdate (#11435) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * [pre-commit.ci] pre-commit autoupdate updates: - [github.com/astral-sh/ruff-pre-commit: v0.4.7 → v0.4.8](https://github.com/astral-sh/ruff-pre-commit/compare/v0.4.7...v0.4.8) * Update .pre-commit-config.yaml --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- .pre-commit-config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index a04f4f8b2165..fc8545b5159b 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -16,7 +16,7 @@ repos: - id: auto-walrus - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.4.7 + rev: v0.4.9 hooks: - id: ruff - id: ruff-format From 1cfca52db73ee18b9e9e08febe9e7d42f96e43db Mon Sep 17 00:00:00 2001 From: Snoppy Date: Mon, 17 Jun 2024 21:27:07 +0800 Subject: [PATCH 1400/1543] chore: fix typos (#11467) * chore: fix typos Signed-off-by: snoppy * Apply suggestions from code review Co-authored-by: Tianyi Zheng --------- Signed-off-by: snoppy Co-authored-by: Christian Clauss Co-authored-by: Tianyi Zheng --- computer_vision/haralick_descriptors.py | 2 +- graphs/strongly_connected_components.py | 2 +- maths/points_are_collinear_3d.py | 10 +++++----- neural_network/convolution_neural_network.py | 8 ++++---- 4 files changed, 11 insertions(+), 11 deletions(-) diff --git a/computer_vision/haralick_descriptors.py b/computer_vision/haralick_descriptors.py index 712bd49668f8..634f0495797b 100644 --- a/computer_vision/haralick_descriptors.py +++ b/computer_vision/haralick_descriptors.py @@ -141,7 +141,7 @@ def transform( center_x, center_y = (x // 2 for x in kernel.shape) - # Use padded image when applying convolotion + # Use padded image when applying convolution # to not go out of bounds of the original the image transformed = np.zeros(image.shape, dtype=np.uint8) padded = np.pad(image, 1, "constant", constant_values=constant) diff --git a/graphs/strongly_connected_components.py b/graphs/strongly_connected_components.py index 325e5c1f33a3..4d4cf88035b5 100644 --- a/graphs/strongly_connected_components.py +++ b/graphs/strongly_connected_components.py @@ -38,7 +38,7 @@ def find_components( reversed_graph: dict[int, list[int]], vert: int, visited: list[bool] ) -> list[int]: """ - Use depth first search to find strongliy connected + Use depth first search to find strongly connected vertices. Now graph is reversed >>> find_components({0: [1], 1: [2], 2: [0]}, 0, 5 * [False]) [0, 1, 2] diff --git a/maths/points_are_collinear_3d.py b/maths/points_are_collinear_3d.py index 3bc0b3b9ebe5..c7adddda9494 100644 --- a/maths/points_are_collinear_3d.py +++ b/maths/points_are_collinear_3d.py @@ -76,9 +76,9 @@ def get_3d_vectors_cross(ab: Vector3d, ac: Vector3d) -> Vector3d: def is_zero_vector(vector: Vector3d, accuracy: int) -> bool: """ - Check if vector is equal to (0, 0, 0) of not. + Check if vector is equal to (0, 0, 0) or not. - Sine the algorithm is very accurate, we will never get a zero vector, + Since the algorithm is very accurate, we will never get a zero vector, so we need to round the vector axis, because we want a result that is either True or False. In other applications, we can return a float that represents the collinearity ratio. @@ -97,9 +97,9 @@ def are_collinear(a: Point3d, b: Point3d, c: Point3d, accuracy: int = 10) -> boo """ Check if three points are collinear or not. - 1- Create tow vectors AB and AC. - 2- Get the cross vector of the tow vectors. - 3- Calcolate the length of the cross vector. + 1- Create two vectors AB and AC. + 2- Get the cross vector of the two vectors. + 3- Calculate the length of the cross vector. 4- If the length is zero then the points are collinear, else they are not. The use of the accuracy parameter is explained in is_zero_vector docstring. diff --git a/neural_network/convolution_neural_network.py b/neural_network/convolution_neural_network.py index 3c551924442d..d4ac360a98de 100644 --- a/neural_network/convolution_neural_network.py +++ b/neural_network/convolution_neural_network.py @@ -1,7 +1,7 @@ """ - - - - - -- - - - - - - - - - - - - - - - - - - - - - - Name - - CNN - Convolution Neural Network For Photo Recognizing -Goal - - Recognize Handing Writing Word Photo +Goal - - Recognize Handwriting Word Photo Detail: Total 5 layers neural network * Convolution layer * Pooling layer @@ -135,7 +135,7 @@ def convolute(self, data, convs, w_convs, thre_convs, conv_step): ) data_featuremap.append(featuremap) - # expanding the data slice to One dimenssion + # expanding the data slice to one dimension focus1_list = [] for each_focus in data_focus: focus1_list.extend(self.Expand_Mat(each_focus)) @@ -304,7 +304,7 @@ def draw_error(): plt.grid(True, alpha=0.5) plt.show() - print("------------------Training Complished---------------------") + print("------------------Training Complete---------------------") print((" - - Training epoch: ", rp, f" - - Mse: {mse:.6f}")) if draw_e: draw_error() @@ -353,5 +353,5 @@ def convolution(self, data): if __name__ == "__main__": """ - I will put the example on other file + I will put the example in another file """ From 75b86671879cfbb83d241c3a3487b32c6dac9d91 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Tue, 25 Jun 2024 00:00:47 +0200 Subject: [PATCH 1401/1543] [pre-commit.ci] pre-commit autoupdate (#11472) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - [github.com/astral-sh/ruff-pre-commit: v0.4.9 → v0.4.10](https://github.com/astral-sh/ruff-pre-commit/compare/v0.4.9...v0.4.10) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index fc8545b5159b..1eddff7ab0e6 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -16,7 +16,7 @@ repos: - id: auto-walrus - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.4.9 + rev: v0.4.10 hooks: - id: ruff - id: ruff-format From 6882a8b80806f2dc53d53a0ecc00c2c98bec3fba Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Vitor?= <92267577+ShiryuReb@users.noreply.github.com> Date: Wed, 26 Jun 2024 03:06:57 -0300 Subject: [PATCH 1402/1543] Tests/add new test case weight_conversion (#11468) * add new test * add new test --- conversions/weight_conversion.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/conversions/weight_conversion.py b/conversions/weight_conversion.py index e8326e0b688f..0777aead9f02 100644 --- a/conversions/weight_conversion.py +++ b/conversions/weight_conversion.py @@ -297,6 +297,12 @@ def weight_conversion(from_type: str, to_type: str, value: float) -> float: 1.660540199e-23 >>> weight_conversion("atomic-mass-unit","atomic-mass-unit",2) 1.999999998903455 + >>> weight_conversion("slug", "kilogram", 1) + Traceback (most recent call last): + ... + ValueError: Invalid 'from_type' or 'to_type' value: 'slug', 'kilogram' + Supported values are: kilogram, gram, milligram, metric-ton, long-ton, short-ton, \ +pound, stone, ounce, carrat, atomic-mass-unit """ if to_type not in KILOGRAM_CHART or from_type not in WEIGHT_TYPE_CHART: msg = ( From 716bdeb68b1e81aafe886e382319c6dab882dacc Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Tue, 2 Jul 2024 07:02:29 +0200 Subject: [PATCH 1403/1543] [pre-commit.ci] pre-commit autoupdate (#11473) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * [pre-commit.ci] pre-commit autoupdate updates: - [github.com/astral-sh/ruff-pre-commit: v0.4.10 → v0.5.0](https://github.com/astral-sh/ruff-pre-commit/compare/v0.4.10...v0.5.0) - [github.com/pre-commit/mirrors-mypy: v1.10.0 → v1.10.1](https://github.com/pre-commit/mirrors-mypy/compare/v1.10.0...v1.10.1) * Fix ruff issues * Fix ruff issues --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- .pre-commit-config.yaml | 5 ++- backtracking/knight_tour.py | 6 +-- data_structures/binary_tree/is_sorted.py | 6 +-- data_structures/binary_tree/red_black_tree.py | 37 +++++-------------- docs/source/__init__.py | 0 graphs/graph_adjacency_matrix.py | 8 ++-- graphs/multi_heuristic_astar.py | 4 +- graphs/tarjans_scc.py | 2 +- hashes/md5.py | 4 +- maths/radix2_fft.py | 1 - project_euler/problem_034/__init__.py | 1 - project_euler/problem_035/__init__.py | 1 - project_euler/problem_037/__init__.py | 1 - project_euler/problem_037/sol1.py | 9 ++--- project_euler/problem_039/__init__.py | 1 - project_euler/problem_041/__init__.py | 1 - project_euler/problem_043/__init__.py | 1 - project_euler/problem_044/__init__.py | 1 - project_euler/problem_045/__init__.py | 1 - project_euler/problem_046/__init__.py | 1 - project_euler/problem_055/__init__.py | 1 - project_euler/problem_058/__init__.py | 1 - project_euler/problem_063/__init__.py | 1 - project_euler/problem_072/sol1.py | 2 +- project_euler/problem_089/__init__.py | 1 - project_euler/problem_097/__init__.py | 1 - searches/binary_tree_traversal.py | 6 +-- sorts/external_sort.py | 5 +-- source/__init__.py | 0 .../can_string_be_rearranged_as_palindrome.py | 4 +- strings/is_valid_email_address.py | 4 +- strings/text_justification.py | 12 +++--- 32 files changed, 44 insertions(+), 85 deletions(-) create mode 100644 docs/source/__init__.py create mode 100644 source/__init__.py diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 1eddff7ab0e6..a3f5a5e51855 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -16,7 +16,7 @@ repos: - id: auto-walrus - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.4.10 + rev: v0.5.0 hooks: - id: ruff - id: ruff-format @@ -47,10 +47,11 @@ repos: - id: validate-pyproject - repo: https://github.com/pre-commit/mirrors-mypy - rev: v1.10.0 + rev: v1.10.1 hooks: - id: mypy args: + - --explicit-package-bases - --ignore-missing-imports - --install-types # See mirrors-mypy README.md - --non-interactive diff --git a/backtracking/knight_tour.py b/backtracking/knight_tour.py index 5f7dee8d97bf..8906aaa1094c 100644 --- a/backtracking/knight_tour.py +++ b/backtracking/knight_tour.py @@ -24,10 +24,10 @@ def get_valid_pos(position: tuple[int, int], n: int) -> list[tuple[int, int]]: ] permissible_positions = [] - for position in positions: - y_test, x_test = position + for inner_position in positions: + y_test, x_test = inner_position if 0 <= y_test < n and 0 <= x_test < n: - permissible_positions.append(position) + permissible_positions.append(inner_position) return permissible_positions diff --git a/data_structures/binary_tree/is_sorted.py b/data_structures/binary_tree/is_sorted.py index 509a426611e5..91fc8ca82633 100644 --- a/data_structures/binary_tree/is_sorted.py +++ b/data_structures/binary_tree/is_sorted.py @@ -80,9 +80,9 @@ def is_sorted(self) -> bool: """ if self.left and (self.data < self.left.data or not self.left.is_sorted): return False - if self.right and (self.data > self.right.data or not self.right.is_sorted): - return False - return True + return not ( + self.right and (self.data > self.right.data or not self.right.is_sorted) + ) if __name__ == "__main__": diff --git a/data_structures/binary_tree/red_black_tree.py b/data_structures/binary_tree/red_black_tree.py index a9ecf897c701..752db1e7026c 100644 --- a/data_structures/binary_tree/red_black_tree.py +++ b/data_structures/binary_tree/red_black_tree.py @@ -1,8 +1,3 @@ -""" -psf/black : true -ruff : passed -""" - from __future__ import annotations from collections.abc import Iterator @@ -321,9 +316,7 @@ def check_coloring(self) -> bool: return False if self.left and not self.left.check_coloring(): return False - if self.right and not self.right.check_coloring(): - return False - return True + return not (self.right and not self.right.check_coloring()) def black_height(self) -> int | None: """Returns the number of black nodes from this node to the @@ -561,9 +554,7 @@ def test_rotations() -> bool: right_rot.right.right = RedBlackTree(10, parent=right_rot.right) right_rot.right.right.left = RedBlackTree(5, parent=right_rot.right.right) right_rot.right.right.right = RedBlackTree(20, parent=right_rot.right.right) - if tree != right_rot: - return False - return True + return tree == right_rot def test_insertion_speed() -> bool: @@ -606,13 +597,11 @@ def test_insert_and_search() -> bool: tree.insert(12) tree.insert(10) tree.insert(11) - if 5 in tree or -6 in tree or -10 in tree or 13 in tree: + if any(i in tree for i in (5, -6, -10, 13)): # Found something not in there return False - if not (11 in tree and 12 in tree and -8 in tree and 0 in tree): - # Didn't find something in there - return False - return True + # Find all these things in there + return all(i in tree for i in (11, 12, -8, 0)) def test_insert_delete() -> bool: @@ -634,9 +623,7 @@ def test_insert_delete() -> bool: tree = tree.remove(9) if not tree.check_color_properties(): return False - if list(tree.inorder_traverse()) != [-8, 0, 4, 8, 10, 11, 12]: - return False - return True + return list(tree.inorder_traverse()) == [-8, 0, 4, 8, 10, 11, 12] def test_floor_ceil() -> bool: @@ -664,9 +651,7 @@ def test_min_max() -> bool: tree.insert(24) tree.insert(20) tree.insert(22) - if tree.get_max() != 22 or tree.get_min() != -16: - return False - return True + return not (tree.get_max() != 22 or tree.get_min() != -16) def test_tree_traversal() -> bool: @@ -682,9 +667,7 @@ def test_tree_traversal() -> bool: return False if list(tree.preorder_traverse()) != [0, -16, 16, 8, 22, 20, 24]: return False - if list(tree.postorder_traverse()) != [-16, 8, 20, 24, 22, 16, 0]: - return False - return True + return list(tree.postorder_traverse()) == [-16, 8, 20, 24, 22, 16, 0] def test_tree_chaining() -> bool: @@ -695,9 +678,7 @@ def test_tree_chaining() -> bool: return False if list(tree.preorder_traverse()) != [0, -16, 16, 8, 22, 20, 24]: return False - if list(tree.postorder_traverse()) != [-16, 8, 20, 24, 22, 16, 0]: - return False - return True + return list(tree.postorder_traverse()) == [-16, 8, 20, 24, 22, 16, 0] def print_results(msg: str, passes: bool) -> None: diff --git a/docs/source/__init__.py b/docs/source/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/graphs/graph_adjacency_matrix.py b/graphs/graph_adjacency_matrix.py index 059a6aa9ffb5..568c84166e4b 100644 --- a/graphs/graph_adjacency_matrix.py +++ b/graphs/graph_adjacency_matrix.py @@ -156,9 +156,11 @@ def remove_vertex(self, vertex: T) -> None: self.vertex_to_index.pop(vertex) # decrement indices for vertices shifted by the deleted vertex in the adj matrix - for vertex in self.vertex_to_index: - if self.vertex_to_index[vertex] >= start_index: - self.vertex_to_index[vertex] = self.vertex_to_index[vertex] - 1 + for inner_vertex in self.vertex_to_index: + if self.vertex_to_index[inner_vertex] >= start_index: + self.vertex_to_index[inner_vertex] = ( + self.vertex_to_index[inner_vertex] - 1 + ) def contains_vertex(self, vertex: T) -> bool: """ diff --git a/graphs/multi_heuristic_astar.py b/graphs/multi_heuristic_astar.py index 6af9a187a4e9..47509beb8efb 100644 --- a/graphs/multi_heuristic_astar.py +++ b/graphs/multi_heuristic_astar.py @@ -123,9 +123,7 @@ def do_something(back_pointer, goal, start): def valid(p: TPos): if p[0] < 0 or p[0] > n - 1: return False - if p[1] < 0 or p[1] > n - 1: - return False - return True + return not (p[1] < 0 or p[1] > n - 1) def expand_state( diff --git a/graphs/tarjans_scc.py b/graphs/tarjans_scc.py index a75dc4d2ca95..b4a3bd5c4c35 100644 --- a/graphs/tarjans_scc.py +++ b/graphs/tarjans_scc.py @@ -103,4 +103,4 @@ def create_graph(n: int, edges: list[tuple[int, int]]) -> list[list[int]]: edges = list(zip(source, target)) g = create_graph(n_vertices, edges) - assert [[5], [6], [4], [3, 2, 1, 0]] == tarjan(g) + assert tarjan(g) == [[5], [6], [4], [3, 2, 1, 0]] diff --git a/hashes/md5.py b/hashes/md5.py index 2187006ec8a9..622a50d290e1 100644 --- a/hashes/md5.py +++ b/hashes/md5.py @@ -82,8 +82,8 @@ def reformat_hex(i: int) -> bytes: hex_rep = format(i, "08x")[-8:] little_endian_hex = b"" - for i in [3, 2, 1, 0]: - little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode("utf-8") + for j in [3, 2, 1, 0]: + little_endian_hex += hex_rep[2 * j : 2 * j + 2].encode("utf-8") return little_endian_hex diff --git a/maths/radix2_fft.py b/maths/radix2_fft.py index 2c5cdc004d1d..d41dc82d5588 100644 --- a/maths/radix2_fft.py +++ b/maths/radix2_fft.py @@ -84,7 +84,6 @@ def __dft(self, which): # Corner case if len(dft) <= 1: return dft[0] - # next_ncol = self.c_max_length // 2 while next_ncol > 0: new_dft = [[] for i in range(next_ncol)] diff --git a/project_euler/problem_034/__init__.py b/project_euler/problem_034/__init__.py index 792d6005489e..e69de29bb2d1 100644 --- a/project_euler/problem_034/__init__.py +++ b/project_euler/problem_034/__init__.py @@ -1 +0,0 @@ -# diff --git a/project_euler/problem_035/__init__.py b/project_euler/problem_035/__init__.py index 792d6005489e..e69de29bb2d1 100644 --- a/project_euler/problem_035/__init__.py +++ b/project_euler/problem_035/__init__.py @@ -1 +0,0 @@ -# diff --git a/project_euler/problem_037/__init__.py b/project_euler/problem_037/__init__.py index 792d6005489e..e69de29bb2d1 100644 --- a/project_euler/problem_037/__init__.py +++ b/project_euler/problem_037/__init__.py @@ -1 +0,0 @@ -# diff --git a/project_euler/problem_037/sol1.py b/project_euler/problem_037/sol1.py index 9c09065f4bd0..c66eb9fb1735 100644 --- a/project_euler/problem_037/sol1.py +++ b/project_euler/problem_037/sol1.py @@ -85,11 +85,10 @@ def validate(n: int) -> bool: >>> validate(3797) True """ - if len(str(n)) > 3 and ( - not is_prime(int(str(n)[-3:])) or not is_prime(int(str(n)[:3])) - ): - return False - return True + return not ( + len(str(n)) > 3 + and (not is_prime(int(str(n)[-3:])) or not is_prime(int(str(n)[:3]))) + ) def compute_truncated_primes(count: int = 11) -> list[int]: diff --git a/project_euler/problem_039/__init__.py b/project_euler/problem_039/__init__.py index 792d6005489e..e69de29bb2d1 100644 --- a/project_euler/problem_039/__init__.py +++ b/project_euler/problem_039/__init__.py @@ -1 +0,0 @@ -# diff --git a/project_euler/problem_041/__init__.py b/project_euler/problem_041/__init__.py index 792d6005489e..e69de29bb2d1 100644 --- a/project_euler/problem_041/__init__.py +++ b/project_euler/problem_041/__init__.py @@ -1 +0,0 @@ -# diff --git a/project_euler/problem_043/__init__.py b/project_euler/problem_043/__init__.py index 792d6005489e..e69de29bb2d1 100644 --- a/project_euler/problem_043/__init__.py +++ b/project_euler/problem_043/__init__.py @@ -1 +0,0 @@ -# diff --git a/project_euler/problem_044/__init__.py b/project_euler/problem_044/__init__.py index 792d6005489e..e69de29bb2d1 100644 --- a/project_euler/problem_044/__init__.py +++ b/project_euler/problem_044/__init__.py @@ -1 +0,0 @@ -# diff --git a/project_euler/problem_045/__init__.py b/project_euler/problem_045/__init__.py index 792d6005489e..e69de29bb2d1 100644 --- a/project_euler/problem_045/__init__.py +++ b/project_euler/problem_045/__init__.py @@ -1 +0,0 @@ -# diff --git a/project_euler/problem_046/__init__.py b/project_euler/problem_046/__init__.py index 792d6005489e..e69de29bb2d1 100644 --- a/project_euler/problem_046/__init__.py +++ b/project_euler/problem_046/__init__.py @@ -1 +0,0 @@ -# diff --git a/project_euler/problem_055/__init__.py b/project_euler/problem_055/__init__.py index 792d6005489e..e69de29bb2d1 100644 --- a/project_euler/problem_055/__init__.py +++ b/project_euler/problem_055/__init__.py @@ -1 +0,0 @@ -# diff --git a/project_euler/problem_058/__init__.py b/project_euler/problem_058/__init__.py index 792d6005489e..e69de29bb2d1 100644 --- a/project_euler/problem_058/__init__.py +++ b/project_euler/problem_058/__init__.py @@ -1 +0,0 @@ -# diff --git a/project_euler/problem_063/__init__.py b/project_euler/problem_063/__init__.py index 792d6005489e..e69de29bb2d1 100644 --- a/project_euler/problem_063/__init__.py +++ b/project_euler/problem_063/__init__.py @@ -1 +0,0 @@ -# diff --git a/project_euler/problem_072/sol1.py b/project_euler/problem_072/sol1.py index 5a28be564556..f09db0673323 100644 --- a/project_euler/problem_072/sol1.py +++ b/project_euler/problem_072/sol1.py @@ -43,7 +43,7 @@ def solution(limit: int = 1_000_000) -> int: ind = np.arange(2 * i, limit + 1, i) # indexes for selection phi[ind] -= phi[ind] // i - return np.sum(phi[2 : limit + 1]) + return int(np.sum(phi[2 : limit + 1])) if __name__ == "__main__": diff --git a/project_euler/problem_089/__init__.py b/project_euler/problem_089/__init__.py index 792d6005489e..e69de29bb2d1 100644 --- a/project_euler/problem_089/__init__.py +++ b/project_euler/problem_089/__init__.py @@ -1 +0,0 @@ -# diff --git a/project_euler/problem_097/__init__.py b/project_euler/problem_097/__init__.py index 792d6005489e..e69de29bb2d1 100644 --- a/project_euler/problem_097/__init__.py +++ b/project_euler/problem_097/__init__.py @@ -1 +0,0 @@ -# diff --git a/searches/binary_tree_traversal.py b/searches/binary_tree_traversal.py index 4897ef17299c..47af57f7f94d 100644 --- a/searches/binary_tree_traversal.py +++ b/searches/binary_tree_traversal.py @@ -36,7 +36,7 @@ def build_tree() -> TreeNode: right_node = TreeNode(int(check)) node_found.right = right_node q.put(right_node) - raise + raise ValueError("Something went wrong") def pre_order(node: TreeNode) -> None: @@ -164,8 +164,8 @@ def level_order_actual(node: TreeNode) -> None: if node_dequeued.right: list_.append(node_dequeued.right) print() - for node in list_: - q.put(node) + for inner_node in list_: + q.put(inner_node) # iteration version diff --git a/sorts/external_sort.py b/sorts/external_sort.py index e6b0d47f79f5..3fa7cacc0592 100644 --- a/sorts/external_sort.py +++ b/sorts/external_sort.py @@ -77,10 +77,7 @@ def refresh(self): self.empty.add(i) self.files[i].close() - if len(self.empty) == self.num_buffers: - return False - - return True + return len(self.empty) != self.num_buffers def unshift(self, index): value = self.buffers[index] diff --git a/source/__init__.py b/source/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/strings/can_string_be_rearranged_as_palindrome.py b/strings/can_string_be_rearranged_as_palindrome.py index 21d653db1405..95cda8b72180 100644 --- a/strings/can_string_be_rearranged_as_palindrome.py +++ b/strings/can_string_be_rearranged_as_palindrome.py @@ -72,9 +72,7 @@ def can_string_be_rearranged_as_palindrome(input_str: str = "") -> bool: for character_count in character_freq_dict.values(): if character_count % 2: odd_char += 1 - if odd_char > 1: - return False - return True + return not odd_char > 1 def benchmark(input_str: str = "") -> None: diff --git a/strings/is_valid_email_address.py b/strings/is_valid_email_address.py index 205394f81297..c3bf7df7349d 100644 --- a/strings/is_valid_email_address.py +++ b/strings/is_valid_email_address.py @@ -101,9 +101,7 @@ def is_valid_email_address(email: str) -> bool: return False # (7.) Validate the placement of "." characters - if domain.startswith(".") or domain.endswith(".") or ".." in domain: - return False - return True + return not (domain.startswith(".") or domain.endswith(".") or ".." in domain) if __name__ == "__main__": diff --git a/strings/text_justification.py b/strings/text_justification.py index b0ef12231224..e025edcfe13f 100644 --- a/strings/text_justification.py +++ b/strings/text_justification.py @@ -67,19 +67,19 @@ def justify(line: list, width: int, max_width: int) -> str: answer = [] line: list[str] = [] width = 0 - for word in words: - if width + len(word) + len(line) <= max_width: + for inner_word in words: + if width + len(inner_word) + len(line) <= max_width: # keep adding words until we can fill out max_width # width = sum of length of all words (without overall_spaces_count) - # len(word) = length of current word + # len(inner_word) = length of current inner_word # len(line) = number of overall_spaces_count to insert between words - line.append(word) - width += len(word) + line.append(inner_word) + width += len(inner_word) else: # justify the line and add it to result answer.append(justify(line, width, max_width)) # reset new line and new width - line, width = [word], len(word) + line, width = [inner_word], len(inner_word) remaining_spaces = max_width - width - len(line) answer.append(" ".join(line) + (remaining_spaces + 1) * " ") return answer From c1dc8e97f7992c132c671da2da60da9d926d0fca Mon Sep 17 00:00:00 2001 From: Saurabh Mahapatra <98408932+its-100rabh@users.noreply.github.com> Date: Thu, 4 Jul 2024 23:46:24 +0530 Subject: [PATCH 1404/1543] Create count_vowels.py (#11474) * Create count_vowels.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- strings/count_vowels.py | 34 ++++++++++++++++++++++++++++++++++ 1 file changed, 34 insertions(+) create mode 100644 strings/count_vowels.py diff --git a/strings/count_vowels.py b/strings/count_vowels.py new file mode 100644 index 000000000000..8a52b331c81b --- /dev/null +++ b/strings/count_vowels.py @@ -0,0 +1,34 @@ +def count_vowels(s: str) -> int: + """ + Count the number of vowels in a given string. + + :param s: Input string to count vowels in. + :return: Number of vowels in the input string. + + Examples: + >>> count_vowels("hello world") + 3 + >>> count_vowels("HELLO WORLD") + 3 + >>> count_vowels("123 hello world") + 3 + >>> count_vowels("") + 0 + >>> count_vowels("a quick brown fox") + 5 + >>> count_vowels("the quick BROWN fox") + 5 + >>> count_vowels("PYTHON") + 1 + """ + if not isinstance(s, str): + raise ValueError("Input must be a string") + + vowels = "aeiouAEIOU" + return sum(1 for char in s if char in vowels) + + +if __name__ == "__main__": + from doctest import testmod + + testmod() From 9190888f89c55d927881c7b08f6df361ab1b0af4 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 8 Jul 2024 22:55:30 +0200 Subject: [PATCH 1405/1543] [pre-commit.ci] pre-commit autoupdate (#11481) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * [pre-commit.ci] pre-commit autoupdate updates: - [github.com/astral-sh/ruff-pre-commit: v0.5.0 → v0.5.1](https://github.com/astral-sh/ruff-pre-commit/compare/v0.5.0...v0.5.1) - [github.com/tox-dev/pyproject-fmt: 2.1.3 → 2.1.4](https://github.com/tox-dev/pyproject-fmt/compare/2.1.3...2.1.4) * updating DIRECTORY.md * grid = np.char.chararray((n, n)) --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: pre-commit-ci[bot] Co-authored-by: Christian Clauss --- .pre-commit-config.yaml | 4 ++-- DIRECTORY.md | 1 + graphs/multi_heuristic_astar.py | 2 +- 3 files changed, 4 insertions(+), 3 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index a3f5a5e51855..7fd689adca3b 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -16,7 +16,7 @@ repos: - id: auto-walrus - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.5.0 + rev: v0.5.1 hooks: - id: ruff - id: ruff-format @@ -29,7 +29,7 @@ repos: - tomli - repo: https://github.com/tox-dev/pyproject-fmt - rev: "2.1.3" + rev: "2.1.4" hooks: - id: pyproject-fmt diff --git a/DIRECTORY.md b/DIRECTORY.md index 04551fad3685..54bb8f148c32 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -1260,6 +1260,7 @@ * [Can String Be Rearranged As Palindrome](strings/can_string_be_rearranged_as_palindrome.py) * [Capitalize](strings/capitalize.py) * [Check Anagrams](strings/check_anagrams.py) + * [Count Vowels](strings/count_vowels.py) * [Credit Card Validator](strings/credit_card_validator.py) * [Damerau Levenshtein Distance](strings/damerau_levenshtein_distance.py) * [Detecting English Programmatically](strings/detecting_english_programmatically.py) diff --git a/graphs/multi_heuristic_astar.py b/graphs/multi_heuristic_astar.py index 47509beb8efb..38b07e1ca675 100644 --- a/graphs/multi_heuristic_astar.py +++ b/graphs/multi_heuristic_astar.py @@ -79,7 +79,7 @@ def key(start: TPos, i: int, goal: TPos, g_function: dict[TPos, float]): def do_something(back_pointer, goal, start): - grid = np.chararray((n, n)) + grid = np.char.chararray((n, n)) for i in range(n): for j in range(n): grid[i][j] = "*" From 2d8f22ab615085d36c53346283528f33b18a3b6d Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 15 Jul 2024 21:52:48 +0200 Subject: [PATCH 1406/1543] [pre-commit.ci] pre-commit autoupdate (#11489) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - [github.com/astral-sh/ruff-pre-commit: v0.5.1 → v0.5.2](https://github.com/astral-sh/ruff-pre-commit/compare/v0.5.1...v0.5.2) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 7fd689adca3b..c72b55fdec44 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -16,7 +16,7 @@ repos: - id: auto-walrus - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.5.1 + rev: v0.5.2 hooks: - id: ruff - id: ruff-format From d9ded0727a7a209bfcbf9bd81c5c75183cfd026f Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Tue, 23 Jul 2024 10:40:10 +0200 Subject: [PATCH 1407/1543] [pre-commit.ci] pre-commit autoupdate (#11495) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * [pre-commit.ci] pre-commit autoupdate updates: - [github.com/astral-sh/ruff-pre-commit: v0.5.2 → v0.5.4](https://github.com/astral-sh/ruff-pre-commit/compare/v0.5.2...v0.5.4) - [github.com/pre-commit/mirrors-mypy: v1.10.1 → v1.11.0](https://github.com/pre-commit/mirrors-mypy/compare/v1.10.1...v1.11.0) * ruff rule PLR1714 Consider merging multiple comparisons * ruff rule RUF005 Consider `[*self.urls, "", "#"]` instead of concatenation * Update emails_from_url.py * Update emails_from_url.py --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- .pre-commit-config.yaml | 4 ++-- web_programming/emails_from_url.py | 7 +------ 2 files changed, 3 insertions(+), 8 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index c72b55fdec44..e9f57a7b746a 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -16,7 +16,7 @@ repos: - id: auto-walrus - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.5.2 + rev: v0.5.4 hooks: - id: ruff - id: ruff-format @@ -47,7 +47,7 @@ repos: - id: validate-pyproject - repo: https://github.com/pre-commit/mirrors-mypy - rev: v1.10.1 + rev: v1.11.0 hooks: - id: mypy args: diff --git a/web_programming/emails_from_url.py b/web_programming/emails_from_url.py index 43fd78dcf5a4..d41dc4893608 100644 --- a/web_programming/emails_from_url.py +++ b/web_programming/emails_from_url.py @@ -31,12 +31,7 @@ def handle_starttag(self, tag: str, attrs: list[tuple[str, str | None]]) -> None # Check the list of defined attributes. for name, value in attrs: # If href is defined, not empty nor # print it and not already in urls. - if ( - name == "href" - and value != "#" - and value != "" - and value not in self.urls - ): + if name == "href" and value not in (*self.urls, "", "#"): url = parse.urljoin(self.domain, value) self.urls.append(url) From 146800307c5d2a4393d57b7c97c63b89a21abba1 Mon Sep 17 00:00:00 2001 From: Ihor Pryyma <83470037+Ihor-Pryyma@users.noreply.github.com> Date: Thu, 25 Jul 2024 18:56:31 +0300 Subject: [PATCH 1408/1543] Add doctests to interpolation_search.py (#11492) * Add doctests to interpolation_search.py * update docs * update tests * update tests 2 * clean code --- searches/interpolation_search.py | 139 ++++++++++++++++--------------- 1 file changed, 70 insertions(+), 69 deletions(-) diff --git a/searches/interpolation_search.py b/searches/interpolation_search.py index 0591788aa40b..cb3e0011d0da 100644 --- a/searches/interpolation_search.py +++ b/searches/interpolation_search.py @@ -3,13 +3,41 @@ """ -def interpolation_search(sorted_collection, item): - """Pure implementation of interpolation search algorithm in Python - Be careful collection must be ascending sorted, otherwise result will be - unpredictable - :param sorted_collection: some ascending sorted collection with comparable items - :param item: item value to search - :return: index of found item or None if item is not found +def interpolation_search(sorted_collection: list[int], item: int) -> int | None: + """ + Searches for an item in a sorted collection by interpolation search algorithm. + + Args: + sorted_collection: sorted list of integers + item: item value to search + + Returns: + int: The index of the found item, or None if the item is not found. + Examples: + >>> interpolation_search([1, 2, 3, 4, 5], 2) + 1 + >>> interpolation_search([1, 2, 3, 4, 5], 4) + 3 + >>> interpolation_search([1, 2, 3, 4, 5], 6) is None + True + >>> interpolation_search([], 1) is None + True + >>> interpolation_search([100], 100) + 0 + >>> interpolation_search([1, 2, 3, 4, 5], 0) is None + True + >>> interpolation_search([1, 2, 3, 4, 5], 7) is None + True + >>> interpolation_search([1, 2, 3, 4, 5], 2) + 1 + >>> interpolation_search([1, 2, 3, 4, 5], 0) is None + True + >>> interpolation_search([1, 2, 3, 4, 5], 7) is None + True + >>> interpolation_search([1, 2, 3, 4, 5], 2) + 1 + >>> interpolation_search([5, 5, 5, 5, 5], 3) is None + True """ left = 0 right = len(sorted_collection) - 1 @@ -19,8 +47,7 @@ def interpolation_search(sorted_collection, item): if sorted_collection[left] == sorted_collection[right]: if sorted_collection[left] == item: return left - else: - return None + return None point = left + ((item - sorted_collection[left]) * (right - left)) // ( sorted_collection[right] - sorted_collection[left] @@ -33,7 +60,7 @@ def interpolation_search(sorted_collection, item): current_item = sorted_collection[point] if current_item == item: return point - elif point < left: + if point < left: right = left left = point elif point > right: @@ -46,22 +73,42 @@ def interpolation_search(sorted_collection, item): return None -def interpolation_search_by_recursion(sorted_collection, item, left, right): +def interpolation_search_by_recursion( + sorted_collection: list[int], item: int, left: int = 0, right: int | None = None +) -> int | None: """Pure implementation of interpolation search algorithm in Python by recursion Be careful collection must be ascending sorted, otherwise result will be unpredictable First recursion should be started with left=0 and right=(len(sorted_collection)-1) - :param sorted_collection: some ascending sorted collection with comparable items - :param item: item value to search - :return: index of found item or None if item is not found - """ + Args: + sorted_collection: some sorted collection with comparable items + item: item value to search + left: left index in collection + right: right index in collection + + Returns: + index of item in collection or None if item is not present + + Examples: + >>> interpolation_search_by_recursion([0, 5, 7, 10, 15], 0) + 0 + >>> interpolation_search_by_recursion([0, 5, 7, 10, 15], 15) + 4 + >>> interpolation_search_by_recursion([0, 5, 7, 10, 15], 5) + 1 + >>> interpolation_search_by_recursion([0, 5, 7, 10, 15], 100) is None + True + >>> interpolation_search_by_recursion([5, 5, 5, 5, 5], 3) is None + True + """ + if right is None: + right = len(sorted_collection) - 1 # avoid divided by 0 during interpolation if sorted_collection[left] == sorted_collection[right]: if sorted_collection[left] == item: return left - else: - return None + return None point = left + ((item - sorted_collection[left]) * (right - left)) // ( sorted_collection[right] - sorted_collection[left] @@ -73,64 +120,18 @@ def interpolation_search_by_recursion(sorted_collection, item, left, right): if sorted_collection[point] == item: return point - elif point < left: + if point < left: return interpolation_search_by_recursion(sorted_collection, item, point, left) - elif point > right: + if point > right: return interpolation_search_by_recursion(sorted_collection, item, right, left) - elif sorted_collection[point] > item: + if sorted_collection[point] > item: return interpolation_search_by_recursion( sorted_collection, item, left, point - 1 ) - else: - return interpolation_search_by_recursion( - sorted_collection, item, point + 1, right - ) - - -def __assert_sorted(collection): - """Check if collection is ascending sorted, if not - raises :py:class:`ValueError` - :param collection: collection - :return: True if collection is ascending sorted - :raise: :py:class:`ValueError` if collection is not ascending sorted - Examples: - >>> __assert_sorted([0, 1, 2, 4]) - True - >>> __assert_sorted([10, -1, 5]) - Traceback (most recent call last): - ... - ValueError: Collection must be ascending sorted - """ - if collection != sorted(collection): - raise ValueError("Collection must be ascending sorted") - return True + return interpolation_search_by_recursion(sorted_collection, item, point + 1, right) if __name__ == "__main__": - import sys + import doctest - """ - user_input = input('Enter numbers separated by comma:\n').strip() - collection = [int(item) for item in user_input.split(',')] - try: - __assert_sorted(collection) - except ValueError: - sys.exit('Sequence must be ascending sorted to apply interpolation search') - - target_input = input('Enter a single number to be found in the list:\n') - target = int(target_input) - """ - - debug = 0 - if debug == 1: - collection = [10, 30, 40, 45, 50, 66, 77, 93] - try: - __assert_sorted(collection) - except ValueError: - sys.exit("Sequence must be ascending sorted to apply interpolation search") - target = 67 - - result = interpolation_search(collection, target) - if result is not None: - print(f"{target} found at positions: {result}") - else: - print("Not found") + doctest.testmod() From 240d1b7cd47df86d86b26f4d658b26e3656a27d9 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 29 Jul 2024 21:41:09 +0200 Subject: [PATCH 1409/1543] [pre-commit.ci] pre-commit autoupdate (#11500) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - [github.com/astral-sh/ruff-pre-commit: v0.5.4 → v0.5.5](https://github.com/astral-sh/ruff-pre-commit/compare/v0.5.4...v0.5.5) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index e9f57a7b746a..09542dd7e255 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -16,7 +16,7 @@ repos: - id: auto-walrus - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.5.4 + rev: v0.5.5 hooks: - id: ruff - id: ruff-format From dfe67954f7218703e3aadca1768a0ad4c97c73a1 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Tue, 6 Aug 2024 00:11:14 +0200 Subject: [PATCH 1410/1543] [pre-commit.ci] pre-commit autoupdate (#11507) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - [github.com/astral-sh/ruff-pre-commit: v0.5.5 → v0.5.6](https://github.com/astral-sh/ruff-pre-commit/compare/v0.5.5...v0.5.6) - [github.com/tox-dev/pyproject-fmt: 2.1.4 → 2.2.1](https://github.com/tox-dev/pyproject-fmt/compare/2.1.4...2.2.1) - [github.com/pre-commit/mirrors-mypy: v1.11.0 → v1.11.1](https://github.com/pre-commit/mirrors-mypy/compare/v1.11.0...v1.11.1) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 09542dd7e255..c112b6d86da0 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -16,7 +16,7 @@ repos: - id: auto-walrus - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.5.5 + rev: v0.5.6 hooks: - id: ruff - id: ruff-format @@ -29,7 +29,7 @@ repos: - tomli - repo: https://github.com/tox-dev/pyproject-fmt - rev: "2.1.4" + rev: "2.2.1" hooks: - id: pyproject-fmt @@ -47,7 +47,7 @@ repos: - id: validate-pyproject - repo: https://github.com/pre-commit/mirrors-mypy - rev: v1.11.0 + rev: v1.11.1 hooks: - id: mypy args: From ed1900f1b37234f25486cfb3223988b3295a5549 Mon Sep 17 00:00:00 2001 From: CarlosZamG <54159355+CarlosZamG@users.noreply.github.com> Date: Tue, 6 Aug 2024 02:44:58 -0600 Subject: [PATCH 1411/1543] Fix typo in integration_by_simpson_approx.py (#11501) --- maths/numerical_analysis/integration_by_simpson_approx.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/maths/numerical_analysis/integration_by_simpson_approx.py b/maths/numerical_analysis/integration_by_simpson_approx.py index f77ae76135ee..934299997aac 100644 --- a/maths/numerical_analysis/integration_by_simpson_approx.py +++ b/maths/numerical_analysis/integration_by_simpson_approx.py @@ -4,7 +4,7 @@ Purpose : You have one function f(x) which takes float integer and returns float you have to integrate the function in limits a to b. -The approximation proposed by Thomas Simpsons in 1743 is one way to calculate +The approximation proposed by Thomas Simpson in 1743 is one way to calculate integration. ( read article : https://cp-algorithms.com/num_methods/simpson-integration.html ) From 31c424fc8654877d3731bdcb50dcc1ce5d6860ab Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 12 Aug 2024 22:55:46 +0200 Subject: [PATCH 1412/1543] [pre-commit.ci] pre-commit autoupdate (#11515) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - [github.com/astral-sh/ruff-pre-commit: v0.5.6 → v0.5.7](https://github.com/astral-sh/ruff-pre-commit/compare/v0.5.6...v0.5.7) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index c112b6d86da0..c797af6c5088 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -16,7 +16,7 @@ repos: - id: auto-walrus - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.5.6 + rev: v0.5.7 hooks: - id: ruff - id: ruff-format From 48418280b1331d1efaa14dc48da62d313dfcee43 Mon Sep 17 00:00:00 2001 From: Tianyi Zheng Date: Thu, 22 Aug 2024 09:42:40 -0700 Subject: [PATCH 1413/1543] Remove separate directory for `gaussian_elimination_pivoting.py` (#11445) * updating DIRECTORY.md * Remove separate directory for gaussian_elimination_pivoting.py Delete the directory linear_algebra/src/gaussian_elimination_pivoting/ and move its algorithm file, gaussian_elimination_pivoting.py, into the parent src/ directory. The gaussian_elimination_pivoting/ directory only exists because gaussian_elimination_pivoting.py reads an example numpy array from matrix.txt, but this input file and IO operation is entirely unnecessary because gaussian_elimination_pivoting.py already has the exact same array hard-coded into a variable. * updating DIRECTORY.md --------- Co-authored-by: tianyizheng02 --- DIRECTORY.md | 3 +- .../gaussian_elimination_pivoting.py | 33 ++++++++----------- .../gaussian_elimination_pivoting/__init__.py | 0 .../gaussian_elimination_pivoting/matrix.txt | 4 --- 4 files changed, 14 insertions(+), 26 deletions(-) rename linear_algebra/src/{gaussian_elimination_pivoting => }/gaussian_elimination_pivoting.py (83%) delete mode 100644 linear_algebra/src/gaussian_elimination_pivoting/__init__.py delete mode 100644 linear_algebra/src/gaussian_elimination_pivoting/matrix.txt diff --git a/DIRECTORY.md b/DIRECTORY.md index 54bb8f148c32..11de569a2c25 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -540,8 +540,7 @@ * [Lu Decomposition](linear_algebra/lu_decomposition.py) * Src * [Conjugate Gradient](linear_algebra/src/conjugate_gradient.py) - * Gaussian Elimination Pivoting - * [Gaussian Elimination Pivoting](linear_algebra/src/gaussian_elimination_pivoting/gaussian_elimination_pivoting.py) + * [Gaussian Elimination Pivoting](linear_algebra/src/gaussian_elimination_pivoting.py) * [Lib](linear_algebra/src/lib.py) * [Polynom For Points](linear_algebra/src/polynom_for_points.py) * [Power Iteration](linear_algebra/src/power_iteration.py) diff --git a/linear_algebra/src/gaussian_elimination_pivoting/gaussian_elimination_pivoting.py b/linear_algebra/src/gaussian_elimination_pivoting.py similarity index 83% rename from linear_algebra/src/gaussian_elimination_pivoting/gaussian_elimination_pivoting.py rename to linear_algebra/src/gaussian_elimination_pivoting.py index 2a86350e9fc6..ecaacce19a31 100644 --- a/linear_algebra/src/gaussian_elimination_pivoting/gaussian_elimination_pivoting.py +++ b/linear_algebra/src/gaussian_elimination_pivoting.py @@ -1,15 +1,5 @@ import numpy as np -matrix = np.array( - [ - [5.0, -5.0, -3.0, 4.0, -11.0], - [1.0, -4.0, 6.0, -4.0, -10.0], - [-2.0, -5.0, 4.0, -5.0, -12.0], - [-3.0, -3.0, 5.0, -5.0, 8.0], - ], - dtype=float, -) - def solve_linear_system(matrix: np.ndarray) -> np.ndarray: """ @@ -87,15 +77,18 @@ def solve_linear_system(matrix: np.ndarray) -> np.ndarray: if __name__ == "__main__": from doctest import testmod - from pathlib import Path testmod() - file_path = Path(__file__).parent / "matrix.txt" - try: - matrix = np.loadtxt(file_path) - except FileNotFoundError: - print(f"Error: {file_path} not found. Using default matrix instead.") - - # Example usage: - print(f"Matrix:\n{matrix}") - print(f"{solve_linear_system(matrix) = }") + + example_matrix = np.array( + [ + [5.0, -5.0, -3.0, 4.0, -11.0], + [1.0, -4.0, 6.0, -4.0, -10.0], + [-2.0, -5.0, 4.0, -5.0, -12.0], + [-3.0, -3.0, 5.0, -5.0, 8.0], + ], + dtype=float, + ) + + print(f"Matrix:\n{example_matrix}") + print(f"{solve_linear_system(example_matrix) = }") diff --git a/linear_algebra/src/gaussian_elimination_pivoting/__init__.py b/linear_algebra/src/gaussian_elimination_pivoting/__init__.py deleted file mode 100644 index e69de29bb2d1..000000000000 diff --git a/linear_algebra/src/gaussian_elimination_pivoting/matrix.txt b/linear_algebra/src/gaussian_elimination_pivoting/matrix.txt deleted file mode 100644 index dd895ad856ee..000000000000 --- a/linear_algebra/src/gaussian_elimination_pivoting/matrix.txt +++ /dev/null @@ -1,4 +0,0 @@ -5.0 -5.0 -3.0 4.0 -11.0 -1.0 -4.0 6.0 -4.0 -10.0 --2.0 -5.0 4.0 -5.0 -12.0 --3.0 -3.0 5.0 -5.0 8.0 \ No newline at end of file From e3fa014a5ab4887f93aae7bb193b152bb155323a Mon Sep 17 00:00:00 2001 From: Maxim Smolskiy Date: Sun, 25 Aug 2024 18:33:11 +0300 Subject: [PATCH 1414/1543] Fix ruff (#11527) * updating DIRECTORY.md * Fix ruff * Fix * Fix * Fix * Revert "Fix" This reverts commit 5bc3bf342208dd707da02dea7173c059317b6bc6. * find_max.py: noqa: PLR1730 --------- Co-authored-by: MaximSmolskiy Co-authored-by: Christian Clauss --- .pre-commit-config.yaml | 2 +- .../binary_tree/number_of_possible_binary_trees.py | 3 +-- divide_and_conquer/closest_pair_of_points.py | 6 ++---- graphs/kahns_algorithm_long.py | 3 +-- maths/find_max.py | 2 +- maths/special_numbers/bell_numbers.py | 3 +-- matrix/tests/test_matrix_operation.py | 12 ++++++------ project_euler/problem_008/sol1.py | 3 +-- project_euler/problem_009/sol2.py | 3 +-- project_euler/problem_011/sol1.py | 3 +-- project_euler/problem_011/sol2.py | 12 ++++-------- scheduling/highest_response_ratio_next.py | 3 +-- scheduling/shortest_job_first.py | 3 +-- 13 files changed, 22 insertions(+), 36 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index c797af6c5088..06f8ba00494a 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -16,7 +16,7 @@ repos: - id: auto-walrus - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.5.7 + rev: v0.6.2 hooks: - id: ruff - id: ruff-format diff --git a/data_structures/binary_tree/number_of_possible_binary_trees.py b/data_structures/binary_tree/number_of_possible_binary_trees.py index 1c3dff37e7d9..b39cbafd0a61 100644 --- a/data_structures/binary_tree/number_of_possible_binary_trees.py +++ b/data_structures/binary_tree/number_of_possible_binary_trees.py @@ -31,8 +31,7 @@ def binomial_coefficient(n: int, k: int) -> int: """ result = 1 # To kept the Calculated Value # Since C(n, k) = C(n, n-k) - if k > (n - k): - k = n - k + k = min(k, n - k) # Calculate C(n,k) for i in range(k): result *= n - i diff --git a/divide_and_conquer/closest_pair_of_points.py b/divide_and_conquer/closest_pair_of_points.py index cb7fa00d1c8f..534cbba9b718 100644 --- a/divide_and_conquer/closest_pair_of_points.py +++ b/divide_and_conquer/closest_pair_of_points.py @@ -54,8 +54,7 @@ def dis_between_closest_pair(points, points_counts, min_dis=float("inf")): for i in range(points_counts - 1): for j in range(i + 1, points_counts): current_dis = euclidean_distance_sqr(points[i], points[j]) - if current_dis < min_dis: - min_dis = current_dis + min_dis = min(min_dis, current_dis) return min_dis @@ -76,8 +75,7 @@ def dis_between_closest_in_strip(points, points_counts, min_dis=float("inf")): for i in range(min(6, points_counts - 1), points_counts): for j in range(max(0, i - 6), i): current_dis = euclidean_distance_sqr(points[i], points[j]) - if current_dis < min_dis: - min_dis = current_dis + min_dis = min(min_dis, current_dis) return min_dis diff --git a/graphs/kahns_algorithm_long.py b/graphs/kahns_algorithm_long.py index 63cbeb909a8a..1f16b90c0745 100644 --- a/graphs/kahns_algorithm_long.py +++ b/graphs/kahns_algorithm_long.py @@ -17,8 +17,7 @@ def longest_distance(graph): for x in graph[vertex]: indegree[x] -= 1 - if long_dist[vertex] + 1 > long_dist[x]: - long_dist[x] = long_dist[vertex] + 1 + long_dist[x] = max(long_dist[x], long_dist[vertex] + 1) if indegree[x] == 0: queue.append(x) diff --git a/maths/find_max.py b/maths/find_max.py index 729a80ab421c..4765d300634e 100644 --- a/maths/find_max.py +++ b/maths/find_max.py @@ -20,7 +20,7 @@ def find_max_iterative(nums: list[int | float]) -> int | float: raise ValueError("find_max_iterative() arg is an empty sequence") max_num = nums[0] for x in nums: - if x > max_num: + if x > max_num: # noqa: PLR1730 max_num = x return max_num diff --git a/maths/special_numbers/bell_numbers.py b/maths/special_numbers/bell_numbers.py index 660ec6e6aa09..5d99334d7add 100644 --- a/maths/special_numbers/bell_numbers.py +++ b/maths/special_numbers/bell_numbers.py @@ -61,8 +61,7 @@ def _binomial_coefficient(total_elements: int, elements_to_choose: int) -> int: if elements_to_choose in {0, total_elements}: return 1 - if elements_to_choose > total_elements - elements_to_choose: - elements_to_choose = total_elements - elements_to_choose + elements_to_choose = min(elements_to_choose, total_elements - elements_to_choose) coefficient = 1 for i in range(elements_to_choose): diff --git a/matrix/tests/test_matrix_operation.py b/matrix/tests/test_matrix_operation.py index addc870ca205..21ed7e371fd8 100644 --- a/matrix/tests/test_matrix_operation.py +++ b/matrix/tests/test_matrix_operation.py @@ -31,7 +31,7 @@ logger.addHandler(stream_handler) -@pytest.mark.mat_ops() +@pytest.mark.mat_ops @pytest.mark.parametrize( ("mat1", "mat2"), [(mat_a, mat_b), (mat_c, mat_d), (mat_d, mat_e), (mat_f, mat_h)] ) @@ -51,7 +51,7 @@ def test_addition(mat1, mat2): matop.add(mat1, mat2) -@pytest.mark.mat_ops() +@pytest.mark.mat_ops @pytest.mark.parametrize( ("mat1", "mat2"), [(mat_a, mat_b), (mat_c, mat_d), (mat_d, mat_e), (mat_f, mat_h)] ) @@ -71,7 +71,7 @@ def test_subtraction(mat1, mat2): assert matop.subtract(mat1, mat2) -@pytest.mark.mat_ops() +@pytest.mark.mat_ops @pytest.mark.parametrize( ("mat1", "mat2"), [(mat_a, mat_b), (mat_c, mat_d), (mat_d, mat_e), (mat_f, mat_h)] ) @@ -93,21 +93,21 @@ def test_multiplication(mat1, mat2): assert matop.subtract(mat1, mat2) -@pytest.mark.mat_ops() +@pytest.mark.mat_ops def test_scalar_multiply(): act = (3.5 * np.array(mat_a)).tolist() theo = matop.scalar_multiply(mat_a, 3.5) assert theo == act -@pytest.mark.mat_ops() +@pytest.mark.mat_ops def test_identity(): act = (np.identity(5)).tolist() theo = matop.identity(5) assert theo == act -@pytest.mark.mat_ops() +@pytest.mark.mat_ops @pytest.mark.parametrize("mat", [mat_a, mat_b, mat_c, mat_d, mat_e, mat_f]) def test_transpose(mat): if (np.array(mat)).shape < (2, 2): diff --git a/project_euler/problem_008/sol1.py b/project_euler/problem_008/sol1.py index adbac8d5ad1f..a38b2045f996 100644 --- a/project_euler/problem_008/sol1.py +++ b/project_euler/problem_008/sol1.py @@ -75,8 +75,7 @@ def solution(n: str = N) -> int: product = 1 for j in range(13): product *= int(n[i + j]) - if product > largest_product: - largest_product = product + largest_product = max(largest_product, product) return largest_product diff --git a/project_euler/problem_009/sol2.py b/project_euler/problem_009/sol2.py index 722ad522ee45..443a529571cc 100644 --- a/project_euler/problem_009/sol2.py +++ b/project_euler/problem_009/sol2.py @@ -39,8 +39,7 @@ def solution(n: int = 1000) -> int: c = n - a - b if c * c == (a * a + b * b): candidate = a * b * c - if candidate >= product: - product = candidate + product = max(product, candidate) return product diff --git a/project_euler/problem_011/sol1.py b/project_euler/problem_011/sol1.py index ad45f0983a7c..3d3e864f927b 100644 --- a/project_euler/problem_011/sol1.py +++ b/project_euler/problem_011/sol1.py @@ -63,8 +63,7 @@ def largest_product(grid): max_product = max( vert_product, horz_product, lr_diag_product, rl_diag_product ) - if max_product > largest: - largest = max_product + largest = max(largest, max_product) return largest diff --git a/project_euler/problem_011/sol2.py b/project_euler/problem_011/sol2.py index 09bf315702c5..7637deafc3cb 100644 --- a/project_euler/problem_011/sol2.py +++ b/project_euler/problem_011/sol2.py @@ -45,15 +45,13 @@ def solution(): for i in range(20): for j in range(17): temp = grid[i][j] * grid[i][j + 1] * grid[i][j + 2] * grid[i][j + 3] - if temp > maximum: - maximum = temp + maximum = max(maximum, temp) # down for i in range(17): for j in range(20): temp = grid[i][j] * grid[i + 1][j] * grid[i + 2][j] * grid[i + 3][j] - if temp > maximum: - maximum = temp + maximum = max(maximum, temp) # diagonal 1 for i in range(17): @@ -64,8 +62,7 @@ def solution(): * grid[i + 2][j + 2] * grid[i + 3][j + 3] ) - if temp > maximum: - maximum = temp + maximum = max(maximum, temp) # diagonal 2 for i in range(17): @@ -76,8 +73,7 @@ def solution(): * grid[i + 2][j - 2] * grid[i + 3][j - 3] ) - if temp > maximum: - maximum = temp + maximum = max(maximum, temp) return maximum diff --git a/scheduling/highest_response_ratio_next.py b/scheduling/highest_response_ratio_next.py index b549835616bf..f858be2ee44a 100644 --- a/scheduling/highest_response_ratio_next.py +++ b/scheduling/highest_response_ratio_next.py @@ -46,8 +46,7 @@ def calculate_turn_around_time( i = 0 while finished_process[i] == 1: i += 1 - if current_time < arrival_time[i]: - current_time = arrival_time[i] + current_time = max(current_time, arrival_time[i]) response_ratio = 0 # Index showing the location of the process being performed diff --git a/scheduling/shortest_job_first.py b/scheduling/shortest_job_first.py index 6899ec87c591..91012ee3ac35 100644 --- a/scheduling/shortest_job_first.py +++ b/scheduling/shortest_job_first.py @@ -66,8 +66,7 @@ def calculate_waitingtime( finar = finish_time - arrival_time[short] waiting_time[short] = finar - burst_time[short] - if waiting_time[short] < 0: - waiting_time[short] = 0 + waiting_time[short] = max(waiting_time[short], 0) # Increment time increment_time += 1 From c8e131b86c35c8fa4ca14aa85edbd4a106575882 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 26 Aug 2024 21:49:42 +0200 Subject: [PATCH 1415/1543] [pre-commit.ci] pre-commit autoupdate (#11522) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - [github.com/abravalheri/validate-pyproject: v0.18 → v0.19](https://github.com/abravalheri/validate-pyproject/compare/v0.18...v0.19) - [github.com/pre-commit/mirrors-mypy: v1.11.1 → v1.11.2](https://github.com/pre-commit/mirrors-mypy/compare/v1.11.1...v1.11.2) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 06f8ba00494a..2724dff230e7 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -42,12 +42,12 @@ repos: pass_filenames: false - repo: https://github.com/abravalheri/validate-pyproject - rev: v0.18 + rev: v0.19 hooks: - id: validate-pyproject - repo: https://github.com/pre-commit/mirrors-mypy - rev: v1.11.1 + rev: v1.11.2 hooks: - id: mypy args: From bd8085cfc18784a21d792a44dcd683e11e802c6b Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 2 Sep 2024 21:41:55 +0200 Subject: [PATCH 1416/1543] [pre-commit.ci] pre-commit autoupdate (#11535) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - [github.com/astral-sh/ruff-pre-commit: v0.6.2 → v0.6.3](https://github.com/astral-sh/ruff-pre-commit/compare/v0.6.2...v0.6.3) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 2724dff230e7..e363197497ac 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -16,7 +16,7 @@ repos: - id: auto-walrus - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.6.2 + rev: v0.6.3 hooks: - id: ruff - id: ruff-format From f16d38f26f13683cf3ea75caf0474dedde059b86 Mon Sep 17 00:00:00 2001 From: Ramy <126559907+Ramy-Badr-Ahmed@users.noreply.github.com> Date: Tue, 3 Sep 2024 14:39:09 +0200 Subject: [PATCH 1417/1543] kd tree data structure implementation (#11532) * Implemented KD-Tree Data Structure * Implemented KD-Tree Data Structure. updated DIRECTORY.md. * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Create __init__.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Replaced legacy `np.random.rand` call with `np.random.Generator` in kd_tree/example_usage.py * Replaced legacy `np.random.rand` call with `np.random.Generator` in kd_tree/hypercube_points.py * added typehints and docstrings * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * docstring for search() * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Added tests. Updated docstrings/typehints * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * updated tests and used | for type annotations * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * E501 for build_kdtree.py, hypercube_points.py, nearest_neighbour_search.py * I001 for example_usage.py and test_kdtree.py * I001 for example_usage.py and test_kdtree.py * Update data_structures/kd_tree/build_kdtree.py Co-authored-by: Christian Clauss * Update data_structures/kd_tree/example/hypercube_points.py Co-authored-by: Christian Clauss * Update data_structures/kd_tree/example/hypercube_points.py Co-authored-by: Christian Clauss * Added new test cases requested in Review. Refactored the test_build_kdtree() to include various checks. * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Considered ruff errors * Considered ruff errors * Apply suggestions from code review * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update kd_node.py * imported annotations from __future__ * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- DIRECTORY.md | 6 ++ data_structures/kd_tree/__init__.py | 0 data_structures/kd_tree/build_kdtree.py | 35 ++++++ data_structures/kd_tree/example/__init__.py | 0 .../kd_tree/example/example_usage.py | 38 +++++++ .../kd_tree/example/hypercube_points.py | 21 ++++ data_structures/kd_tree/kd_node.py | 30 ++++++ .../kd_tree/nearest_neighbour_search.py | 71 +++++++++++++ data_structures/kd_tree/tests/__init__.py | 0 data_structures/kd_tree/tests/test_kdtree.py | 100 ++++++++++++++++++ 10 files changed, 301 insertions(+) create mode 100644 data_structures/kd_tree/__init__.py create mode 100644 data_structures/kd_tree/build_kdtree.py create mode 100644 data_structures/kd_tree/example/__init__.py create mode 100644 data_structures/kd_tree/example/example_usage.py create mode 100644 data_structures/kd_tree/example/hypercube_points.py create mode 100644 data_structures/kd_tree/kd_node.py create mode 100644 data_structures/kd_tree/nearest_neighbour_search.py create mode 100644 data_structures/kd_tree/tests/__init__.py create mode 100644 data_structures/kd_tree/tests/test_kdtree.py diff --git a/DIRECTORY.md b/DIRECTORY.md index 11de569a2c25..1ca537b991c8 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -285,6 +285,12 @@ * Trie * [Radix Tree](data_structures/trie/radix_tree.py) * [Trie](data_structures/trie/trie.py) + * KD Tree + * [KD Tree Node](data_structures/kd_tree/kd_node.py) + * [Build KD Tree](data_structures/kd_tree/build_kdtree.py) + * [Nearest Neighbour Search](data_structures/kd_tree/nearest_neighbour_search.py) + * [Hypercibe Points](data_structures/kd_tree/example/hypercube_points.py) + * [Example Usage](data_structures/kd_tree/example/example_usage.py) ## Digital Image Processing * [Change Brightness](digital_image_processing/change_brightness.py) diff --git a/data_structures/kd_tree/__init__.py b/data_structures/kd_tree/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/data_structures/kd_tree/build_kdtree.py b/data_structures/kd_tree/build_kdtree.py new file mode 100644 index 000000000000..c5b800a2c992 --- /dev/null +++ b/data_structures/kd_tree/build_kdtree.py @@ -0,0 +1,35 @@ +from data_structures.kd_tree.kd_node import KDNode + + +def build_kdtree(points: list[list[float]], depth: int = 0) -> KDNode | None: + """ + Builds a KD-Tree from a list of points. + + Args: + points: The list of points to build the KD-Tree from. + depth: The current depth in the tree + (used to determine axis for splitting). + + Returns: + The root node of the KD-Tree, + or None if no points are provided. + """ + if not points: + return None + + k = len(points[0]) # Dimensionality of the points + axis = depth % k + + # Sort point list and choose median as pivot element + points.sort(key=lambda point: point[axis]) + median_idx = len(points) // 2 + + # Create node and construct subtrees + left_points = points[:median_idx] + right_points = points[median_idx + 1 :] + + return KDNode( + point=points[median_idx], + left=build_kdtree(left_points, depth + 1), + right=build_kdtree(right_points, depth + 1), + ) diff --git a/data_structures/kd_tree/example/__init__.py b/data_structures/kd_tree/example/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/data_structures/kd_tree/example/example_usage.py b/data_structures/kd_tree/example/example_usage.py new file mode 100644 index 000000000000..e270f0cdd245 --- /dev/null +++ b/data_structures/kd_tree/example/example_usage.py @@ -0,0 +1,38 @@ +import numpy as np + +from data_structures.kd_tree.build_kdtree import build_kdtree +from data_structures.kd_tree.example.hypercube_points import hypercube_points +from data_structures.kd_tree.nearest_neighbour_search import nearest_neighbour_search + + +def main() -> None: + """ + Demonstrates the use of KD-Tree by building it from random points + in a 10-dimensional hypercube and performing a nearest neighbor search. + """ + num_points: int = 5000 + cube_size: float = 10.0 # Size of the hypercube (edge length) + num_dimensions: int = 10 + + # Generate random points within the hypercube + points: np.ndarray = hypercube_points(num_points, cube_size, num_dimensions) + hypercube_kdtree = build_kdtree(points.tolist()) + + # Generate a random query point within the same space + rng = np.random.default_rng() + query_point: list[float] = rng.random(num_dimensions).tolist() + + # Perform nearest neighbor search + nearest_point, nearest_dist, nodes_visited = nearest_neighbour_search( + hypercube_kdtree, query_point + ) + + # Print the results + print(f"Query point: {query_point}") + print(f"Nearest point: {nearest_point}") + print(f"Distance: {nearest_dist:.4f}") + print(f"Nodes visited: {nodes_visited}") + + +if __name__ == "__main__": + main() diff --git a/data_structures/kd_tree/example/hypercube_points.py b/data_structures/kd_tree/example/hypercube_points.py new file mode 100644 index 000000000000..2d8800ac9338 --- /dev/null +++ b/data_structures/kd_tree/example/hypercube_points.py @@ -0,0 +1,21 @@ +import numpy as np + + +def hypercube_points( + num_points: int, hypercube_size: float, num_dimensions: int +) -> np.ndarray: + """ + Generates random points uniformly distributed within an n-dimensional hypercube. + + Args: + num_points: Number of points to generate. + hypercube_size: Size of the hypercube. + num_dimensions: Number of dimensions of the hypercube. + + Returns: + An array of shape (num_points, num_dimensions) + with generated points. + """ + rng = np.random.default_rng() + shape = (num_points, num_dimensions) + return hypercube_size * rng.random(shape) diff --git a/data_structures/kd_tree/kd_node.py b/data_structures/kd_tree/kd_node.py new file mode 100644 index 000000000000..e1011027938d --- /dev/null +++ b/data_structures/kd_tree/kd_node.py @@ -0,0 +1,30 @@ +from __future__ import annotations + + +class KDNode: + """ + Represents a node in a KD-Tree. + + Attributes: + point: The point stored in this node. + left: The left child node. + right: The right child node. + """ + + def __init__( + self, + point: list[float], + left: KDNode | None = None, + right: KDNode | None = None, + ) -> None: + """ + Initializes a KDNode with the given point and child nodes. + + Args: + point (list[float]): The point stored in this node. + left (Optional[KDNode]): The left child node. + right (Optional[KDNode]): The right child node. + """ + self.point = point + self.left = left + self.right = right diff --git a/data_structures/kd_tree/nearest_neighbour_search.py b/data_structures/kd_tree/nearest_neighbour_search.py new file mode 100644 index 000000000000..d9727736f21c --- /dev/null +++ b/data_structures/kd_tree/nearest_neighbour_search.py @@ -0,0 +1,71 @@ +from data_structures.kd_tree.kd_node import KDNode + + +def nearest_neighbour_search( + root: KDNode | None, query_point: list[float] +) -> tuple[list[float] | None, float, int]: + """ + Performs a nearest neighbor search in a KD-Tree for a given query point. + + Args: + root (KDNode | None): The root node of the KD-Tree. + query_point (list[float]): The point for which the nearest neighbor + is being searched. + + Returns: + tuple[list[float] | None, float, int]: + - The nearest point found in the KD-Tree to the query point, + or None if no point is found. + - The squared distance to the nearest point. + - The number of nodes visited during the search. + """ + nearest_point: list[float] | None = None + nearest_dist: float = float("inf") + nodes_visited: int = 0 + + def search(node: KDNode | None, depth: int = 0) -> None: + """ + Recursively searches for the nearest neighbor in the KD-Tree. + + Args: + node: The current node in the KD-Tree. + depth: The current depth in the KD-Tree. + """ + nonlocal nearest_point, nearest_dist, nodes_visited + if node is None: + return + + nodes_visited += 1 + + # Calculate the current distance (squared distance) + current_point = node.point + current_dist = sum( + (query_coord - point_coord) ** 2 + for query_coord, point_coord in zip(query_point, current_point) + ) + + # Update nearest point if the current node is closer + if nearest_point is None or current_dist < nearest_dist: + nearest_point = current_point + nearest_dist = current_dist + + # Determine which subtree to search first (based on axis and query point) + k = len(query_point) # Dimensionality of points + axis = depth % k + + if query_point[axis] <= current_point[axis]: + nearer_subtree = node.left + further_subtree = node.right + else: + nearer_subtree = node.right + further_subtree = node.left + + # Search the nearer subtree first + search(nearer_subtree, depth + 1) + + # If the further subtree has a closer point + if (query_point[axis] - current_point[axis]) ** 2 < nearest_dist: + search(further_subtree, depth + 1) + + search(root, 0) + return nearest_point, nearest_dist, nodes_visited diff --git a/data_structures/kd_tree/tests/__init__.py b/data_structures/kd_tree/tests/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/data_structures/kd_tree/tests/test_kdtree.py b/data_structures/kd_tree/tests/test_kdtree.py new file mode 100644 index 000000000000..81f2cc990074 --- /dev/null +++ b/data_structures/kd_tree/tests/test_kdtree.py @@ -0,0 +1,100 @@ +import numpy as np +import pytest + +from data_structures.kd_tree.build_kdtree import build_kdtree +from data_structures.kd_tree.example.hypercube_points import hypercube_points +from data_structures.kd_tree.kd_node import KDNode +from data_structures.kd_tree.nearest_neighbour_search import nearest_neighbour_search + + +@pytest.mark.parametrize( + ("num_points", "cube_size", "num_dimensions", "depth", "expected_result"), + [ + (0, 10.0, 2, 0, None), # Empty points list + (10, 10.0, 2, 2, KDNode), # Depth = 2, 2D points + (10, 10.0, 3, -2, KDNode), # Depth = -2, 3D points + ], +) +def test_build_kdtree(num_points, cube_size, num_dimensions, depth, expected_result): + """ + Test that KD-Tree is built correctly. + + Cases: + - Empty points list. + - Positive depth value. + - Negative depth value. + """ + points = ( + hypercube_points(num_points, cube_size, num_dimensions).tolist() + if num_points > 0 + else [] + ) + + kdtree = build_kdtree(points, depth=depth) + + if expected_result is None: + # Empty points list case + assert kdtree is None, f"Expected None for empty points list, got {kdtree}" + else: + # Check if root node is not None + assert kdtree is not None, "Expected a KDNode, got None" + + # Check if root has correct dimensions + assert ( + len(kdtree.point) == num_dimensions + ), f"Expected point dimension {num_dimensions}, got {len(kdtree.point)}" + + # Check that the tree is balanced to some extent (simplistic check) + assert isinstance( + kdtree, KDNode + ), f"Expected KDNode instance, got {type(kdtree)}" + + +def test_nearest_neighbour_search(): + """ + Test the nearest neighbor search function. + """ + num_points = 10 + cube_size = 10.0 + num_dimensions = 2 + points = hypercube_points(num_points, cube_size, num_dimensions) + kdtree = build_kdtree(points.tolist()) + + rng = np.random.default_rng() + query_point = rng.random(num_dimensions).tolist() + + nearest_point, nearest_dist, nodes_visited = nearest_neighbour_search( + kdtree, query_point + ) + + # Check that nearest point is not None + assert nearest_point is not None + + # Check that distance is a non-negative number + assert nearest_dist >= 0 + + # Check that nodes visited is a non-negative integer + assert nodes_visited >= 0 + + +def test_edge_cases(): + """ + Test edge cases such as an empty KD-Tree. + """ + empty_kdtree = build_kdtree([]) + query_point = [0.0] * 2 # Using a default 2D query point + + nearest_point, nearest_dist, nodes_visited = nearest_neighbour_search( + empty_kdtree, query_point + ) + + # With an empty KD-Tree, nearest_point should be None + assert nearest_point is None + assert nearest_dist == float("inf") + assert nodes_visited == 0 + + +if __name__ == "__main__": + import pytest + + pytest.main() From 729c1f923bb621ed246983a5d3309135c3b1fc8c Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 9 Sep 2024 22:15:17 +0200 Subject: [PATCH 1418/1543] [pre-commit.ci] pre-commit autoupdate (#11557) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * [pre-commit.ci] pre-commit autoupdate updates: - [github.com/astral-sh/ruff-pre-commit: v0.6.3 → v0.6.4](https://github.com/astral-sh/ruff-pre-commit/compare/v0.6.3...v0.6.4) - [github.com/tox-dev/pyproject-fmt: 2.2.1 → 2.2.3](https://github.com/tox-dev/pyproject-fmt/compare/2.2.1...2.2.3) * updating DIRECTORY.md --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: pre-commit-ci[bot] --- .pre-commit-config.yaml | 4 ++-- DIRECTORY.md | 15 +++++++++------ 2 files changed, 11 insertions(+), 8 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index e363197497ac..ff76e87a3aa1 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -16,7 +16,7 @@ repos: - id: auto-walrus - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.6.3 + rev: v0.6.4 hooks: - id: ruff - id: ruff-format @@ -29,7 +29,7 @@ repos: - tomli - repo: https://github.com/tox-dev/pyproject-fmt - rev: "2.2.1" + rev: "2.2.3" hooks: - id: pyproject-fmt diff --git a/DIRECTORY.md b/DIRECTORY.md index 1ca537b991c8..e965d3b32ccf 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -243,6 +243,15 @@ * [Min Heap](data_structures/heap/min_heap.py) * [Randomized Heap](data_structures/heap/randomized_heap.py) * [Skew Heap](data_structures/heap/skew_heap.py) + * Kd Tree + * [Build Kdtree](data_structures/kd_tree/build_kdtree.py) + * Example + * [Example Usage](data_structures/kd_tree/example/example_usage.py) + * [Hypercube Points](data_structures/kd_tree/example/hypercube_points.py) + * [Kd Node](data_structures/kd_tree/kd_node.py) + * [Nearest Neighbour Search](data_structures/kd_tree/nearest_neighbour_search.py) + * Tests + * [Test Kdtree](data_structures/kd_tree/tests/test_kdtree.py) * Linked List * [Circular Linked List](data_structures/linked_list/circular_linked_list.py) * [Deque Doubly](data_structures/linked_list/deque_doubly.py) @@ -285,12 +294,6 @@ * Trie * [Radix Tree](data_structures/trie/radix_tree.py) * [Trie](data_structures/trie/trie.py) - * KD Tree - * [KD Tree Node](data_structures/kd_tree/kd_node.py) - * [Build KD Tree](data_structures/kd_tree/build_kdtree.py) - * [Nearest Neighbour Search](data_structures/kd_tree/nearest_neighbour_search.py) - * [Hypercibe Points](data_structures/kd_tree/example/hypercube_points.py) - * [Example Usage](data_structures/kd_tree/example/example_usage.py) ## Digital Image Processing * [Change Brightness](digital_image_processing/change_brightness.py) From 77bbe584216c0925e249e0baab77fef34561ecaa Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Tue, 17 Sep 2024 00:14:55 +0200 Subject: [PATCH 1419/1543] [pre-commit.ci] pre-commit autoupdate (#11568) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - [github.com/astral-sh/ruff-pre-commit: v0.6.4 → v0.6.5](https://github.com/astral-sh/ruff-pre-commit/compare/v0.6.4...v0.6.5) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index ff76e87a3aa1..a4a45686537d 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -16,7 +16,7 @@ repos: - id: auto-walrus - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.6.4 + rev: v0.6.5 hooks: - id: ruff - id: ruff-format From 50cc00bb2da26fd234dabdfa7f93c96d6b7d72d5 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 23 Sep 2024 21:45:14 +0200 Subject: [PATCH 1420/1543] [pre-commit.ci] pre-commit autoupdate (#11579) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - [github.com/astral-sh/ruff-pre-commit: v0.6.5 → v0.6.7](https://github.com/astral-sh/ruff-pre-commit/compare/v0.6.5...v0.6.7) - [github.com/tox-dev/pyproject-fmt: 2.2.3 → 2.2.4](https://github.com/tox-dev/pyproject-fmt/compare/2.2.3...2.2.4) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index a4a45686537d..7b219597f7b6 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -16,7 +16,7 @@ repos: - id: auto-walrus - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.6.5 + rev: v0.6.7 hooks: - id: ruff - id: ruff-format @@ -29,7 +29,7 @@ repos: - tomli - repo: https://github.com/tox-dev/pyproject-fmt - rev: "2.2.3" + rev: "2.2.4" hooks: - id: pyproject-fmt From 9b5641d2d333d04eb474ecbcb15c40ccf18a3d7b Mon Sep 17 00:00:00 2001 From: apples53 Date: Tue, 24 Sep 2024 13:00:36 +0530 Subject: [PATCH 1421/1543] balance parenthesis (add closing bracket) (#11563) * balance parenthesis (add closing bracket) * Apply suggestions from code review --------- Co-authored-by: Tianyi Zheng --- fuzzy_logic/fuzzy_operations.py.DISABLED.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fuzzy_logic/fuzzy_operations.py.DISABLED.txt b/fuzzy_logic/fuzzy_operations.py.DISABLED.txt index 0786ef8b0c67..67fd587f4baf 100644 --- a/fuzzy_logic/fuzzy_operations.py.DISABLED.txt +++ b/fuzzy_logic/fuzzy_operations.py.DISABLED.txt @@ -28,7 +28,7 @@ if __name__ == "__main__": union = fuzz.fuzzy_or(X, young, X, middle_aged)[1] # 2. Intersection = min(µA(x), µB(x)) intersection = fuzz.fuzzy_and(X, young, X, middle_aged)[1] - # 3. Complement (A) = (1- min(µA(x)) + # 3. Complement (A) = (1 - min(µA(x))) complement_a = fuzz.fuzzy_not(young) # 4. Difference (A/B) = min(µA(x),(1- µB(x))) difference = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1] From 976e385c1d9df92c075575125475b22c423205b9 Mon Sep 17 00:00:00 2001 From: Ramy Date: Sat, 28 Sep 2024 15:37:00 +0200 Subject: [PATCH 1422/1543] Implemented Suffix Tree Data Structure (#11554) * Implemented KD-Tree Data Structure * Implemented KD-Tree Data Structure. updated DIRECTORY.md. * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Create __init__.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Replaced legacy `np.random.rand` call with `np.random.Generator` in kd_tree/example_usage.py * Replaced legacy `np.random.rand` call with `np.random.Generator` in kd_tree/hypercube_points.py * added typehints and docstrings * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * docstring for search() * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Added tests. Updated docstrings/typehints * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * updated tests and used | for type annotations * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * E501 for build_kdtree.py, hypercube_points.py, nearest_neighbour_search.py * I001 for example_usage.py and test_kdtree.py * I001 for example_usage.py and test_kdtree.py * Update data_structures/kd_tree/build_kdtree.py Co-authored-by: Christian Clauss * Update data_structures/kd_tree/example/hypercube_points.py Co-authored-by: Christian Clauss * Update data_structures/kd_tree/example/hypercube_points.py Co-authored-by: Christian Clauss * Added new test cases requested in Review. Refactored the test_build_kdtree() to include various checks. * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Considered ruff errors * Considered ruff errors * Apply suggestions from code review * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update kd_node.py * imported annotations from __future__ * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Implementation of the suffix tree data structure * Adding data to DIRECTORY.md * Minor file renaming * minor correction * renaming in DIRECTORY.md * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Considering ruff part-1 * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Considering ruff part-2 * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Considering ruff part-3 * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Considering ruff part-4 * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Considering ruff part-5 * Implemented Suffix Tree Data Structure. Added some comments to my files in #11532, #11554. * updating DIRECTORY.md * Implemented Suffix Tree Data Structure. Added some comments to my files in #11532, #11554. --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss Co-authored-by: Ramy-Badr-Ahmed --- DIRECTORY.md | 7 ++ data_structures/kd_tree/build_kdtree.py | 8 +++ .../kd_tree/example/example_usage.py | 8 +++ .../kd_tree/example/hypercube_points.py | 8 +++ data_structures/kd_tree/kd_node.py | 8 +++ .../kd_tree/nearest_neighbour_search.py | 8 +++ data_structures/kd_tree/tests/test_kdtree.py | 8 +++ data_structures/suffix_tree/__init__.py | 0 .../suffix_tree/example/__init__.py | 0 .../suffix_tree/example/example_usage.py | 37 +++++++++++ data_structures/suffix_tree/suffix_tree.py | 66 +++++++++++++++++++ .../suffix_tree/suffix_tree_node.py | 36 ++++++++++ data_structures/suffix_tree/tests/__init__.py | 0 .../suffix_tree/tests/test_suffix_tree.py | 59 +++++++++++++++++ 14 files changed, 253 insertions(+) create mode 100644 data_structures/suffix_tree/__init__.py create mode 100644 data_structures/suffix_tree/example/__init__.py create mode 100644 data_structures/suffix_tree/example/example_usage.py create mode 100644 data_structures/suffix_tree/suffix_tree.py create mode 100644 data_structures/suffix_tree/suffix_tree_node.py create mode 100644 data_structures/suffix_tree/tests/__init__.py create mode 100644 data_structures/suffix_tree/tests/test_suffix_tree.py diff --git a/DIRECTORY.md b/DIRECTORY.md index e965d3b32ccf..955001e2aa23 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -291,6 +291,13 @@ * [Stack With Doubly Linked List](data_structures/stacks/stack_with_doubly_linked_list.py) * [Stack With Singly Linked List](data_structures/stacks/stack_with_singly_linked_list.py) * [Stock Span Problem](data_structures/stacks/stock_span_problem.py) + * Suffix Tree + * Example + * [Example Usage](data_structures/suffix_tree/example/example_usage.py) + * [Suffix Tree](data_structures/suffix_tree/suffix_tree.py) + * [Suffix Tree Node](data_structures/suffix_tree/suffix_tree_node.py) + * Tests + * [Test Suffix Tree](data_structures/suffix_tree/tests/test_suffix_tree.py) * Trie * [Radix Tree](data_structures/trie/radix_tree.py) * [Trie](data_structures/trie/trie.py) diff --git a/data_structures/kd_tree/build_kdtree.py b/data_structures/kd_tree/build_kdtree.py index c5b800a2c992..074a5dac4d42 100644 --- a/data_structures/kd_tree/build_kdtree.py +++ b/data_structures/kd_tree/build_kdtree.py @@ -1,3 +1,11 @@ +# Created by: Ramy-Badr-Ahmed (https://github.com/Ramy-Badr-Ahmed) +# in Pull Request: #11532 +# https://github.com/TheAlgorithms/Python/pull/11532 +# +# Please mention me (@Ramy-Badr-Ahmed) in any issue or pull request +# addressing bugs/corrections to this file. +# Thank you! + from data_structures.kd_tree.kd_node import KDNode diff --git a/data_structures/kd_tree/example/example_usage.py b/data_structures/kd_tree/example/example_usage.py index e270f0cdd245..892c3b8c4a2a 100644 --- a/data_structures/kd_tree/example/example_usage.py +++ b/data_structures/kd_tree/example/example_usage.py @@ -1,3 +1,11 @@ +# Created by: Ramy-Badr-Ahmed (https://github.com/Ramy-Badr-Ahmed) +# in Pull Request: #11532 +# https://github.com/TheAlgorithms/Python/pull/11532 +# +# Please mention me (@Ramy-Badr-Ahmed) in any issue or pull request +# addressing bugs/corrections to this file. +# Thank you! + import numpy as np from data_structures.kd_tree.build_kdtree import build_kdtree diff --git a/data_structures/kd_tree/example/hypercube_points.py b/data_structures/kd_tree/example/hypercube_points.py index 2d8800ac9338..66744856e6d5 100644 --- a/data_structures/kd_tree/example/hypercube_points.py +++ b/data_structures/kd_tree/example/hypercube_points.py @@ -1,3 +1,11 @@ +# Created by: Ramy-Badr-Ahmed (https://github.com/Ramy-Badr-Ahmed) +# in Pull Request: #11532 +# https://github.com/TheAlgorithms/Python/pull/11532 +# +# Please mention me (@Ramy-Badr-Ahmed) in any issue or pull request +# addressing bugs/corrections to this file. +# Thank you! + import numpy as np diff --git a/data_structures/kd_tree/kd_node.py b/data_structures/kd_tree/kd_node.py index e1011027938d..5a22ef609077 100644 --- a/data_structures/kd_tree/kd_node.py +++ b/data_structures/kd_tree/kd_node.py @@ -1,3 +1,11 @@ +# Created by: Ramy-Badr-Ahmed (https://github.com/Ramy-Badr-Ahmed) +# in Pull Request: #11532 +# https://github.com/TheAlgorithms/Python/pull/11532 +# +# Please mention me (@Ramy-Badr-Ahmed) in any issue or pull request +# addressing bugs/corrections to this file. +# Thank you! + from __future__ import annotations diff --git a/data_structures/kd_tree/nearest_neighbour_search.py b/data_structures/kd_tree/nearest_neighbour_search.py index d9727736f21c..8104944c08f0 100644 --- a/data_structures/kd_tree/nearest_neighbour_search.py +++ b/data_structures/kd_tree/nearest_neighbour_search.py @@ -1,3 +1,11 @@ +# Created by: Ramy-Badr-Ahmed (https://github.com/Ramy-Badr-Ahmed) +# in Pull Request: #11532 +# https://github.com/TheAlgorithms/Python/pull/11532 +# +# Please mention me (@Ramy-Badr-Ahmed) in any issue or pull request +# addressing bugs/corrections to this file. +# Thank you! + from data_structures.kd_tree.kd_node import KDNode diff --git a/data_structures/kd_tree/tests/test_kdtree.py b/data_structures/kd_tree/tests/test_kdtree.py index 81f2cc990074..dce5e4f34ff4 100644 --- a/data_structures/kd_tree/tests/test_kdtree.py +++ b/data_structures/kd_tree/tests/test_kdtree.py @@ -1,3 +1,11 @@ +# Created by: Ramy-Badr-Ahmed (https://github.com/Ramy-Badr-Ahmed) +# in Pull Request: #11532 +# https://github.com/TheAlgorithms/Python/pull/11532 +# +# Please mention me (@Ramy-Badr-Ahmed) in any issue or pull request +# addressing bugs/corrections to this file. +# Thank you! + import numpy as np import pytest diff --git a/data_structures/suffix_tree/__init__.py b/data_structures/suffix_tree/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/data_structures/suffix_tree/example/__init__.py b/data_structures/suffix_tree/example/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/data_structures/suffix_tree/example/example_usage.py b/data_structures/suffix_tree/example/example_usage.py new file mode 100644 index 000000000000..724ac57e8bfb --- /dev/null +++ b/data_structures/suffix_tree/example/example_usage.py @@ -0,0 +1,37 @@ +# Created by: Ramy-Badr-Ahmed (https://github.com/Ramy-Badr-Ahmed) +# in Pull Request: #11554 +# https://github.com/TheAlgorithms/Python/pull/11554 +# +# Please mention me (@Ramy-Badr-Ahmed) in any issue or pull request +# addressing bugs/corrections to this file. +# Thank you! + +from data_structures.suffix_tree.suffix_tree import SuffixTree + + +def main() -> None: + """ + Demonstrate the usage of the SuffixTree class. + + - Initializes a SuffixTree with a predefined text. + - Defines a list of patterns to search for within the suffix tree. + - Searches for each pattern in the suffix tree. + + Patterns tested: + - "ana" (found) --> True + - "ban" (found) --> True + - "na" (found) --> True + - "xyz" (not found) --> False + - "mon" (found) --> True + """ + text = "monkey banana" + suffix_tree = SuffixTree(text) + + patterns = ["ana", "ban", "na", "xyz", "mon"] + for pattern in patterns: + found = suffix_tree.search(pattern) + print(f"Pattern '{pattern}' found: {found}") + + +if __name__ == "__main__": + main() diff --git a/data_structures/suffix_tree/suffix_tree.py b/data_structures/suffix_tree/suffix_tree.py new file mode 100644 index 000000000000..ad54fb0ba009 --- /dev/null +++ b/data_structures/suffix_tree/suffix_tree.py @@ -0,0 +1,66 @@ +# Created by: Ramy-Badr-Ahmed (https://github.com/Ramy-Badr-Ahmed) +# in Pull Request: #11554 +# https://github.com/TheAlgorithms/Python/pull/11554 +# +# Please mention me (@Ramy-Badr-Ahmed) in any issue or pull request +# addressing bugs/corrections to this file. +# Thank you! + +from data_structures.suffix_tree.suffix_tree_node import SuffixTreeNode + + +class SuffixTree: + def __init__(self, text: str) -> None: + """ + Initializes the suffix tree with the given text. + + Args: + text (str): The text for which the suffix tree is to be built. + """ + self.text: str = text + self.root: SuffixTreeNode = SuffixTreeNode() + self.build_suffix_tree() + + def build_suffix_tree(self) -> None: + """ + Builds the suffix tree for the given text by adding all suffixes. + """ + text = self.text + n = len(text) + for i in range(n): + suffix = text[i:] + self._add_suffix(suffix, i) + + def _add_suffix(self, suffix: str, index: int) -> None: + """ + Adds a suffix to the suffix tree. + + Args: + suffix (str): The suffix to add. + index (int): The starting index of the suffix in the original text. + """ + node = self.root + for char in suffix: + if char not in node.children: + node.children[char] = SuffixTreeNode() + node = node.children[char] + node.is_end_of_string = True + node.start = index + node.end = index + len(suffix) - 1 + + def search(self, pattern: str) -> bool: + """ + Searches for a pattern in the suffix tree. + + Args: + pattern (str): The pattern to search for. + + Returns: + bool: True if the pattern is found, False otherwise. + """ + node = self.root + for char in pattern: + if char not in node.children: + return False + node = node.children[char] + return True diff --git a/data_structures/suffix_tree/suffix_tree_node.py b/data_structures/suffix_tree/suffix_tree_node.py new file mode 100644 index 000000000000..e5b628645063 --- /dev/null +++ b/data_structures/suffix_tree/suffix_tree_node.py @@ -0,0 +1,36 @@ +# Created by: Ramy-Badr-Ahmed (https://github.com/Ramy-Badr-Ahmed) +# in Pull Request: #11554 +# https://github.com/TheAlgorithms/Python/pull/11554 +# +# Please mention me (@Ramy-Badr-Ahmed) in any issue or pull request +# addressing bugs/corrections to this file. +# Thank you! + +from __future__ import annotations + + +class SuffixTreeNode: + def __init__( + self, + children: dict[str, SuffixTreeNode] | None = None, + is_end_of_string: bool = False, + start: int | None = None, + end: int | None = None, + suffix_link: SuffixTreeNode | None = None, + ) -> None: + """ + Initializes a suffix tree node. + + Parameters: + children (dict[str, SuffixTreeNode] | None): The children of this node. + is_end_of_string (bool): Indicates if this node represents + the end of a string. + start (int | None): The start index of the suffix in the text. + end (int | None): The end index of the suffix in the text. + suffix_link (SuffixTreeNode | None): Link to another suffix tree node. + """ + self.children = children or {} + self.is_end_of_string = is_end_of_string + self.start = start + self.end = end + self.suffix_link = suffix_link diff --git a/data_structures/suffix_tree/tests/__init__.py b/data_structures/suffix_tree/tests/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/data_structures/suffix_tree/tests/test_suffix_tree.py b/data_structures/suffix_tree/tests/test_suffix_tree.py new file mode 100644 index 000000000000..45c6790ac48a --- /dev/null +++ b/data_structures/suffix_tree/tests/test_suffix_tree.py @@ -0,0 +1,59 @@ +# Created by: Ramy-Badr-Ahmed (https://github.com/Ramy-Badr-Ahmed) +# in Pull Request: #11554 +# https://github.com/TheAlgorithms/Python/pull/11554 +# +# Please mention me (@Ramy-Badr-Ahmed) in any issue or pull request +# addressing bugs/corrections to this file. +# Thank you! + +import unittest + +from data_structures.suffix_tree.suffix_tree import SuffixTree + + +class TestSuffixTree(unittest.TestCase): + def setUp(self) -> None: + """Set up the initial conditions for each test.""" + self.text = "banana" + self.suffix_tree = SuffixTree(self.text) + + def test_search_existing_patterns(self) -> None: + """Test searching for patterns that exist in the suffix tree.""" + patterns = ["ana", "ban", "na"] + for pattern in patterns: + with self.subTest(pattern=pattern): + assert self.suffix_tree.search( + pattern + ), f"Pattern '{pattern}' should be found." + + def test_search_non_existing_patterns(self) -> None: + """Test searching for patterns that do not exist in the suffix tree.""" + patterns = ["xyz", "apple", "cat"] + for pattern in patterns: + with self.subTest(pattern=pattern): + assert not self.suffix_tree.search( + pattern + ), f"Pattern '{pattern}' should not be found." + + def test_search_empty_pattern(self) -> None: + """Test searching for an empty pattern.""" + assert self.suffix_tree.search(""), "An empty pattern should be found." + + def test_search_full_text(self) -> None: + """Test searching for the full text.""" + assert self.suffix_tree.search( + self.text + ), "The full text should be found in the suffix tree." + + def test_search_substrings(self) -> None: + """Test searching for substrings of the full text.""" + substrings = ["ban", "ana", "a", "na"] + for substring in substrings: + with self.subTest(substring=substring): + assert self.suffix_tree.search( + substring + ), f"Substring '{substring}' should be found." + + +if __name__ == "__main__": + unittest.main() From a9ca110d6b6e4921119fdcca3b2a01e7f649f1ed Mon Sep 17 00:00:00 2001 From: Christian Clauss Date: Mon, 30 Sep 2024 12:49:31 +0200 Subject: [PATCH 1423/1543] Scripts for closing pull requests for Hacktoberfest (#11587) * Scripts for closing pull requests for Hacktoberfest * --limit=500 * Lose 2024 --- ...ose_pull_requests_with_awaiting_changes.sh | 22 +++++++++++++++++++ .../close_pull_requests_with_failing_tests.sh | 22 +++++++++++++++++++ ...requests_with_require_descriptive_names.sh | 21 ++++++++++++++++++ .../close_pull_requests_with_require_tests.sh | 22 +++++++++++++++++++ ...e_pull_requests_with_require_type_hints.sh | 21 ++++++++++++++++++ 5 files changed, 108 insertions(+) create mode 100755 scripts/close_pull_requests_with_awaiting_changes.sh create mode 100755 scripts/close_pull_requests_with_failing_tests.sh create mode 100755 scripts/close_pull_requests_with_require_descriptive_names.sh create mode 100755 scripts/close_pull_requests_with_require_tests.sh create mode 100755 scripts/close_pull_requests_with_require_type_hints.sh diff --git a/scripts/close_pull_requests_with_awaiting_changes.sh b/scripts/close_pull_requests_with_awaiting_changes.sh new file mode 100755 index 000000000000..55e19c980596 --- /dev/null +++ b/scripts/close_pull_requests_with_awaiting_changes.sh @@ -0,0 +1,22 @@ +#!/bin/bash + +# List all open pull requests +prs=$(gh pr list --state open --json number,title,labels --limit 500) + +# Loop through each pull request +echo "$prs" | jq -c '.[]' | while read -r pr; do + pr_number=$(echo "$pr" | jq -r '.number') + pr_title=$(echo "$pr" | jq -r '.title') + pr_labels=$(echo "$pr" | jq -r '.labels') + + # Check if the "awaiting changes" label is present + awaiting_changes=$(echo "$pr_labels" | jq -r '.[] | select(.name == "awaiting changes")') + echo "Checking PR #$pr_number $pr_title ($awaiting_changes) ($pr_labels)" + + # If awaiting_changes, close the pull request + if [[ -n "$awaiting_changes" ]]; then + echo "Closing PR #$pr_number $pr_title due to awaiting_changes label" + gh pr close "$pr_number" --comment "Closing awaiting_changes PRs to prepare for Hacktoberfest" + sleep 2 + fi +done diff --git a/scripts/close_pull_requests_with_failing_tests.sh b/scripts/close_pull_requests_with_failing_tests.sh new file mode 100755 index 000000000000..3ec5960aed27 --- /dev/null +++ b/scripts/close_pull_requests_with_failing_tests.sh @@ -0,0 +1,22 @@ +#!/bin/bash + +# List all open pull requests +prs=$(gh pr list --state open --json number,title,labels --limit 500) + +# Loop through each pull request +echo "$prs" | jq -c '.[]' | while read -r pr; do + pr_number=$(echo "$pr" | jq -r '.number') + pr_title=$(echo "$pr" | jq -r '.title') + pr_labels=$(echo "$pr" | jq -r '.labels') + + # Check if the "tests are failing" label is present + tests_are_failing=$(echo "$pr_labels" | jq -r '.[] | select(.name == "tests are failing")') + echo "Checking PR #$pr_number $pr_title ($tests_are_failing) ($pr_labels)" + + # If there are failing tests, close the pull request + if [[ -n "$tests_are_failing" ]]; then + echo "Closing PR #$pr_number $pr_title due to tests_are_failing label" + gh pr close "$pr_number" --comment "Closing tests_are_failing PRs to prepare for Hacktoberfest" + sleep 2 + fi +done diff --git a/scripts/close_pull_requests_with_require_descriptive_names.sh b/scripts/close_pull_requests_with_require_descriptive_names.sh new file mode 100755 index 000000000000..0fc3cec1d247 --- /dev/null +++ b/scripts/close_pull_requests_with_require_descriptive_names.sh @@ -0,0 +1,21 @@ +#!/bin/bash + +# List all open pull requests +prs=$(gh pr list --state open --json number,title,labels --limit 500) + +# Loop through each pull request +echo "$prs" | jq -c '.[]' | while read -r pr; do + pr_number=$(echo "$pr" | jq -r '.number') + pr_title=$(echo "$pr" | jq -r '.title') + pr_labels=$(echo "$pr" | jq -r '.labels') + + # Check if the "require descriptive names" label is present + require_descriptive_names=$(echo "$pr_labels" | jq -r '.[] | select(.name == "require descriptive names")') + echo "Checking PR #$pr_number $pr_title ($require_descriptive_names) ($pr_labels)" + + # If there are require_descriptive_names, close the pull request + if [[ -n "$require_descriptive_names" ]]; then + echo "Closing PR #$pr_number $pr_title due to require_descriptive_names label" + gh pr close "$pr_number" --comment "Closing require_descriptive_names PRs to prepare for Hacktoberfest" + fi +done diff --git a/scripts/close_pull_requests_with_require_tests.sh b/scripts/close_pull_requests_with_require_tests.sh new file mode 100755 index 000000000000..89a54996b584 --- /dev/null +++ b/scripts/close_pull_requests_with_require_tests.sh @@ -0,0 +1,22 @@ +#!/bin/bash + +# List all open pull requests +prs=$(gh pr list --state open --json number,title,labels --limit 500) + +# Loop through each pull request +echo "$prs" | jq -c '.[]' | while read -r pr; do + pr_number=$(echo "$pr" | jq -r '.number') + pr_title=$(echo "$pr" | jq -r '.title') + pr_labels=$(echo "$pr" | jq -r '.labels') + + # Check if the "require_tests" label is present + require_tests=$(echo "$pr_labels" | jq -r '.[] | select(.name == "require tests")') + echo "Checking PR #$pr_number $pr_title ($require_tests) ($pr_labels)" + + # If there require tests, close the pull request + if [[ -n "$require_tests" ]]; then + echo "Closing PR #$pr_number $pr_title due to require_tests label" + gh pr close "$pr_number" --comment "Closing require_tests PRs to prepare for Hacktoberfest" + # sleep 2 + fi +done diff --git a/scripts/close_pull_requests_with_require_type_hints.sh b/scripts/close_pull_requests_with_require_type_hints.sh new file mode 100755 index 000000000000..df5d88289cf0 --- /dev/null +++ b/scripts/close_pull_requests_with_require_type_hints.sh @@ -0,0 +1,21 @@ +#!/bin/bash + +# List all open pull requests +prs=$(gh pr list --state open --json number,title,labels --limit 500) + +# Loop through each pull request +echo "$prs" | jq -c '.[]' | while read -r pr; do + pr_number=$(echo "$pr" | jq -r '.number') + pr_title=$(echo "$pr" | jq -r '.title') + pr_labels=$(echo "$pr" | jq -r '.labels') + + # Check if the "require type hints" label is present + require_type_hints=$(echo "$pr_labels" | jq -r '.[] | select(.name == "require type hints")') + echo "Checking PR #$pr_number $pr_title ($require_type_hints) ($pr_labels)" + + # If require_type_hints, close the pull request + if [[ -n "$require_type_hints" ]]; then + echo "Closing PR #$pr_number $pr_title due to require_type_hints label" + gh pr close "$pr_number" --comment "Closing require_type_hints PRs to prepare for Hacktoberfest" + fi +done From a7bfa224554f277ed68be9e4ef3f6d1cd89008af Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 30 Sep 2024 22:16:17 +0200 Subject: [PATCH 1424/1543] [pre-commit.ci] pre-commit autoupdate (#11594) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - [github.com/astral-sh/ruff-pre-commit: v0.6.7 → v0.6.8](https://github.com/astral-sh/ruff-pre-commit/compare/v0.6.7...v0.6.8) - [github.com/abravalheri/validate-pyproject: v0.19 → v0.20.2](https://github.com/abravalheri/validate-pyproject/compare/v0.19...v0.20.2) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 7b219597f7b6..8a8e5c1f6ad9 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -16,7 +16,7 @@ repos: - id: auto-walrus - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.6.7 + rev: v0.6.8 hooks: - id: ruff - id: ruff-format @@ -42,7 +42,7 @@ repos: pass_filenames: false - repo: https://github.com/abravalheri/validate-pyproject - rev: v0.19 + rev: v0.20.2 hooks: - id: validate-pyproject From 0177ae1cd596f4f3c0ee7490666d74504deb0298 Mon Sep 17 00:00:00 2001 From: Christian Clauss Date: Mon, 30 Sep 2024 23:01:15 +0200 Subject: [PATCH 1425/1543] Upgrade to Python 3.13 (#11588) --- .github/workflows/build.yml | 6 ++- DIRECTORY.md | 1 - computer_vision/haralick_descriptors.py | 8 ++-- data_structures/heap/binomial_heap.py | 6 +-- electronics/circular_convolution.py | 6 +-- fractals/julia_sets.py | 18 ++++----- graphics/bezier_curve.py | 8 ++-- graphs/dijkstra_binary_grid.py | 2 +- linear_algebra/src/power_iteration.py | 2 +- linear_programming/simplex.py | 32 +++++++-------- machine_learning/decision_tree.py | 8 ++-- machine_learning/forecasting/run.py | 8 ++-- machine_learning/k_nearest_neighbours.py | 2 +- machine_learning/logistic_regression.py | 4 +- machine_learning/loss_functions.py | 40 +++++++++---------- machine_learning/mfcc.py | 13 +++--- .../multilayer_perceptron_classifier.py | 2 +- machine_learning/scoring_functions.py | 22 +++++----- machine_learning/similarity_search.py | 2 +- machine_learning/support_vector_machines.py | 6 +-- maths/euclidean_distance.py | 8 ++-- maths/euler_method.py | 2 +- maths/euler_modified.py | 4 +- maths/gaussian.py | 16 ++++---- maths/minkowski_distance.py | 2 +- maths/numerical_analysis/adams_bashforth.py | 8 ++-- maths/numerical_analysis/runge_kutta.py | 2 +- .../runge_kutta_fehlberg_45.py | 4 +- maths/numerical_analysis/runge_kutta_gills.py | 2 +- maths/softmax.py | 2 +- .../two_hidden_layers_neural_network.py | 6 +-- other/bankers_algorithm.py | 8 ++-- physics/in_static_equilibrium.py | 2 +- requirements.txt | 4 +- ..._tweets.py => get_user_tweets.py.DISABLED} | 0 35 files changed, 135 insertions(+), 131 deletions(-) rename web_programming/{get_user_tweets.py => get_user_tweets.py.DISABLED} (100%) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index a113b4608678..dad2b2fac086 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -12,7 +12,7 @@ jobs: - uses: actions/checkout@v4 - uses: actions/setup-python@v5 with: - python-version: 3.12 + python-version: 3.13 allow-prereleases: true - uses: actions/cache@v4 with: @@ -26,6 +26,10 @@ jobs: # TODO: #8818 Re-enable quantum tests run: pytest --ignore=quantum/q_fourier_transform.py + --ignore=computer_vision/cnn_classification.py + --ignore=dynamic_programming/k_means_clustering_tensorflow.py + --ignore=machine_learning/lstm/lstm_prediction.py + --ignore=neural_network/input_data.py --ignore=project_euler/ --ignore=scripts/validate_solutions.py --cov-report=term-missing:skip-covered diff --git a/DIRECTORY.md b/DIRECTORY.md index 955001e2aa23..56ab8377f16b 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -1343,7 +1343,6 @@ * [Get Ip Geolocation](web_programming/get_ip_geolocation.py) * [Get Top Billionaires](web_programming/get_top_billionaires.py) * [Get Top Hn Posts](web_programming/get_top_hn_posts.py) - * [Get User Tweets](web_programming/get_user_tweets.py) * [Giphy](web_programming/giphy.py) * [Instagram Crawler](web_programming/instagram_crawler.py) * [Instagram Pic](web_programming/instagram_pic.py) diff --git a/computer_vision/haralick_descriptors.py b/computer_vision/haralick_descriptors.py index 634f0495797b..54632160dcf2 100644 --- a/computer_vision/haralick_descriptors.py +++ b/computer_vision/haralick_descriptors.py @@ -19,7 +19,7 @@ def root_mean_square_error(original: np.ndarray, reference: np.ndarray) -> float >>> root_mean_square_error(np.array([1, 2, 3]), np.array([6, 4, 2])) 3.1622776601683795 """ - return np.sqrt(((original - reference) ** 2).mean()) + return float(np.sqrt(((original - reference) ** 2).mean())) def normalize_image( @@ -273,7 +273,7 @@ def haralick_descriptors(matrix: np.ndarray) -> list[float]: >>> morphological = opening_filter(binary) >>> mask_1 = binary_mask(gray, morphological)[0] >>> concurrency = matrix_concurrency(mask_1, (0, 1)) - >>> haralick_descriptors(concurrency) + >>> [float(f) for f in haralick_descriptors(concurrency)] [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] """ # Function np.indices could be used for bigger input types, @@ -335,7 +335,7 @@ def get_descriptors( return np.concatenate(descriptors, axis=None) -def euclidean(point_1: np.ndarray, point_2: np.ndarray) -> np.float32: +def euclidean(point_1: np.ndarray, point_2: np.ndarray) -> float: """ Simple method for calculating the euclidean distance between two points, with type np.ndarray. @@ -346,7 +346,7 @@ def euclidean(point_1: np.ndarray, point_2: np.ndarray) -> np.float32: >>> euclidean(a, b) 3.3166247903554 """ - return np.sqrt(np.sum(np.square(point_1 - point_2))) + return float(np.sqrt(np.sum(np.square(point_1 - point_2)))) def get_distances(descriptors: np.ndarray, base: int) -> list[tuple[int, float]]: diff --git a/data_structures/heap/binomial_heap.py b/data_structures/heap/binomial_heap.py index 099bd2871023..9cfdf0c12fe0 100644 --- a/data_structures/heap/binomial_heap.py +++ b/data_structures/heap/binomial_heap.py @@ -73,7 +73,7 @@ class BinomialHeap: 30 Deleting - delete() test - >>> [first_heap.delete_min() for _ in range(20)] + >>> [int(first_heap.delete_min()) for _ in range(20)] [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19] Create a new Heap @@ -118,7 +118,7 @@ class BinomialHeap: values in merged heap; (merge is inplace) >>> results = [] >>> while not first_heap.is_empty(): - ... results.append(first_heap.delete_min()) + ... results.append(int(first_heap.delete_min())) >>> results [17, 20, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 31, 34] """ @@ -354,7 +354,7 @@ def delete_min(self): # Merge heaps self.merge_heaps(new_heap) - return min_value + return int(min_value) def pre_order(self): """ diff --git a/electronics/circular_convolution.py b/electronics/circular_convolution.py index 768f2ad941bc..d06e76be759b 100644 --- a/electronics/circular_convolution.py +++ b/electronics/circular_convolution.py @@ -39,7 +39,7 @@ def circular_convolution(self) -> list[float]: Usage: >>> convolution = CircularConvolution() >>> convolution.circular_convolution() - [10, 10, 6, 14] + [10.0, 10.0, 6.0, 14.0] >>> convolution.first_signal = [0.2, 0.4, 0.6, 0.8, 1.0, 1.2, 1.4, 1.6] >>> convolution.second_signal = [0.1, 0.3, 0.5, 0.7, 0.9, 1.1, 1.3, 1.5] @@ -54,7 +54,7 @@ def circular_convolution(self) -> list[float]: >>> convolution.first_signal = [1, -1, 2, 3, -1] >>> convolution.second_signal = [1, 2, 3] >>> convolution.circular_convolution() - [8, -2, 3, 4, 11] + [8.0, -2.0, 3.0, 4.0, 11.0] """ @@ -91,7 +91,7 @@ def circular_convolution(self) -> list[float]: final_signal = np.matmul(np.transpose(matrix), np.transpose(self.first_signal)) # rounding-off to two decimal places - return [round(i, 2) for i in final_signal] + return [float(round(i, 2)) for i in final_signal] if __name__ == "__main__": diff --git a/fractals/julia_sets.py b/fractals/julia_sets.py index 1eef4573ba19..bea599d44339 100644 --- a/fractals/julia_sets.py +++ b/fractals/julia_sets.py @@ -40,11 +40,11 @@ def eval_exponential(c_parameter: complex, z_values: np.ndarray) -> np.ndarray: """ Evaluate $e^z + c$. - >>> eval_exponential(0, 0) + >>> float(eval_exponential(0, 0)) 1.0 - >>> abs(eval_exponential(1, np.pi*1.j)) < 1e-15 + >>> bool(abs(eval_exponential(1, np.pi*1.j)) < 1e-15) True - >>> abs(eval_exponential(1.j, 0)-1-1.j) < 1e-15 + >>> bool(abs(eval_exponential(1.j, 0)-1-1.j) < 1e-15) True """ return np.exp(z_values) + c_parameter @@ -98,20 +98,20 @@ def iterate_function( >>> iterate_function(eval_quadratic_polynomial, 0, 3, np.array([0,1,2])).shape (3,) - >>> np.round(iterate_function(eval_quadratic_polynomial, + >>> complex(np.round(iterate_function(eval_quadratic_polynomial, ... 0, ... 3, - ... np.array([0,1,2]))[0]) + ... np.array([0,1,2]))[0])) 0j - >>> np.round(iterate_function(eval_quadratic_polynomial, + >>> complex(np.round(iterate_function(eval_quadratic_polynomial, ... 0, ... 3, - ... np.array([0,1,2]))[1]) + ... np.array([0,1,2]))[1])) (1+0j) - >>> np.round(iterate_function(eval_quadratic_polynomial, + >>> complex(np.round(iterate_function(eval_quadratic_polynomial, ... 0, ... 3, - ... np.array([0,1,2]))[2]) + ... np.array([0,1,2]))[2])) (256+0j) """ diff --git a/graphics/bezier_curve.py b/graphics/bezier_curve.py index 9d906f179c92..6c7dcd4f06e7 100644 --- a/graphics/bezier_curve.py +++ b/graphics/bezier_curve.py @@ -30,9 +30,9 @@ def basis_function(self, t: float) -> list[float]: returns the x, y values of basis function at time t >>> curve = BezierCurve([(1,1), (1,2)]) - >>> curve.basis_function(0) + >>> [float(x) for x in curve.basis_function(0)] [1.0, 0.0] - >>> curve.basis_function(1) + >>> [float(x) for x in curve.basis_function(1)] [0.0, 1.0] """ assert 0 <= t <= 1, "Time t must be between 0 and 1." @@ -55,9 +55,9 @@ def bezier_curve_function(self, t: float) -> tuple[float, float]: The last point in the curve is when t = 1. >>> curve = BezierCurve([(1,1), (1,2)]) - >>> curve.bezier_curve_function(0) + >>> tuple(float(x) for x in curve.bezier_curve_function(0)) (1.0, 1.0) - >>> curve.bezier_curve_function(1) + >>> tuple(float(x) for x in curve.bezier_curve_function(1)) (1.0, 2.0) """ diff --git a/graphs/dijkstra_binary_grid.py b/graphs/dijkstra_binary_grid.py index c23d8234328a..06293a87da2d 100644 --- a/graphs/dijkstra_binary_grid.py +++ b/graphs/dijkstra_binary_grid.py @@ -69,7 +69,7 @@ def dijkstra( x, y = predecessors[x, y] path.append(source) # add the source manually path.reverse() - return matrix[destination], path + return float(matrix[destination]), path for i in range(len(dx)): nx, ny = x + dx[i], y + dy[i] diff --git a/linear_algebra/src/power_iteration.py b/linear_algebra/src/power_iteration.py index 24fbd9a5e002..83c2ce48c3a0 100644 --- a/linear_algebra/src/power_iteration.py +++ b/linear_algebra/src/power_iteration.py @@ -78,7 +78,7 @@ def power_iteration( if is_complex: lambda_ = np.real(lambda_) - return lambda_, vector + return float(lambda_), vector def test_power_iteration() -> None: diff --git a/linear_programming/simplex.py b/linear_programming/simplex.py index dc171bacd3a2..a8affe1b72d2 100644 --- a/linear_programming/simplex.py +++ b/linear_programming/simplex.py @@ -107,8 +107,8 @@ def generate_col_titles(self) -> list[str]: def find_pivot(self) -> tuple[Any, Any]: """Finds the pivot row and column. - >>> Tableau(np.array([[-2,1,0,0,0], [3,1,1,0,6], [1,2,0,1,7.]]), - ... 2, 0).find_pivot() + >>> tuple(int(x) for x in Tableau(np.array([[-2,1,0,0,0], [3,1,1,0,6], + ... [1,2,0,1,7.]]), 2, 0).find_pivot()) (1, 0) """ objective = self.objectives[-1] @@ -215,8 +215,8 @@ def run_simplex(self) -> dict[Any, Any]: Max: x1 + x2 ST: x1 + 3x2 <= 4 3x1 + x2 <= 4 - >>> Tableau(np.array([[-1,-1,0,0,0],[1,3,1,0,4],[3,1,0,1,4.]]), - ... 2, 0).run_simplex() + >>> {key: float(value) for key, value in Tableau(np.array([[-1,-1,0,0,0], + ... [1,3,1,0,4],[3,1,0,1,4.]]), 2, 0).run_simplex().items()} {'P': 2.0, 'x1': 1.0, 'x2': 1.0} # Standard linear program with 3 variables: @@ -224,21 +224,21 @@ def run_simplex(self) -> dict[Any, Any]: ST: 2x1 + x2 + x3 ≤ 2 x1 + 2x2 + 3x3 ≤ 5 2x1 + 2x2 + x3 ≤ 6 - >>> Tableau(np.array([ + >>> {key: float(value) for key, value in Tableau(np.array([ ... [-3,-1,-3,0,0,0,0], ... [2,1,1,1,0,0,2], ... [1,2,3,0,1,0,5], ... [2,2,1,0,0,1,6.] - ... ]),3,0).run_simplex() # doctest: +ELLIPSIS + ... ]),3,0).run_simplex().items()} # doctest: +ELLIPSIS {'P': 5.4, 'x1': 0.199..., 'x3': 1.6} # Optimal tableau input: - >>> Tableau(np.array([ + >>> {key: float(value) for key, value in Tableau(np.array([ ... [0, 0, 0.25, 0.25, 2], ... [0, 1, 0.375, -0.125, 1], ... [1, 0, -0.125, 0.375, 1] - ... ]), 2, 0).run_simplex() + ... ]), 2, 0).run_simplex().items()} {'P': 2.0, 'x1': 1.0, 'x2': 1.0} # Non-standard: >= constraints @@ -246,25 +246,25 @@ def run_simplex(self) -> dict[Any, Any]: ST: x1 + x2 + x3 <= 40 2x1 + x2 - x3 >= 10 - x2 + x3 >= 10 - >>> Tableau(np.array([ + >>> {key: float(value) for key, value in Tableau(np.array([ ... [2, 0, 0, 0, -1, -1, 0, 0, 20], ... [-2, -3, -1, 0, 0, 0, 0, 0, 0], ... [1, 1, 1, 1, 0, 0, 0, 0, 40], ... [2, 1, -1, 0, -1, 0, 1, 0, 10], ... [0, -1, 1, 0, 0, -1, 0, 1, 10.] - ... ]), 3, 2).run_simplex() + ... ]), 3, 2).run_simplex().items()} {'P': 70.0, 'x1': 10.0, 'x2': 10.0, 'x3': 20.0} # Non standard: minimisation and equalities Min: x1 + x2 ST: 2x1 + x2 = 12 6x1 + 5x2 = 40 - >>> Tableau(np.array([ + >>> {key: float(value) for key, value in Tableau(np.array([ ... [8, 6, 0, 0, 52], ... [1, 1, 0, 0, 0], ... [2, 1, 1, 0, 12], ... [6, 5, 0, 1, 40.], - ... ]), 2, 2).run_simplex() + ... ]), 2, 2).run_simplex().items()} {'P': 7.0, 'x1': 5.0, 'x2': 2.0} @@ -275,7 +275,7 @@ def run_simplex(self) -> dict[Any, Any]: 2x1 + 4x2 <= 48 x1 + x2 >= 10 x1 >= 2 - >>> Tableau(np.array([ + >>> {key: float(value) for key, value in Tableau(np.array([ ... [2, 1, 0, 0, 0, -1, -1, 0, 0, 12.0], ... [-8, -6, 0, 0, 0, 0, 0, 0, 0, 0.0], ... [1, 3, 1, 0, 0, 0, 0, 0, 0, 33.0], @@ -283,7 +283,7 @@ def run_simplex(self) -> dict[Any, Any]: ... [2, 4, 0, 0, 1, 0, 0, 0, 0, 48.0], ... [1, 1, 0, 0, 0, -1, 0, 1, 0, 10.0], ... [1, 0, 0, 0, 0, 0, -1, 0, 1, 2.0] - ... ]), 2, 2).run_simplex() # doctest: +ELLIPSIS + ... ]), 2, 2).run_simplex().items()} # doctest: +ELLIPSIS {'P': 132.0, 'x1': 12.000... 'x2': 5.999...} """ # Stop simplex algorithm from cycling. @@ -307,11 +307,11 @@ def run_simplex(self) -> dict[Any, Any]: def interpret_tableau(self) -> dict[str, float]: """Given the final tableau, add the corresponding values of the basic decision variables to the `output_dict` - >>> Tableau(np.array([ + >>> {key: float(value) for key, value in Tableau(np.array([ ... [0,0,0.875,0.375,5], ... [0,1,0.375,-0.125,1], ... [1,0,-0.125,0.375,1] - ... ]),2, 0).interpret_tableau() + ... ]),2, 0).interpret_tableau().items()} {'P': 5.0, 'x1': 1.0, 'x2': 1.0} """ # P = RHS of final tableau diff --git a/machine_learning/decision_tree.py b/machine_learning/decision_tree.py index d0bd6ab0b555..72970431c3fc 100644 --- a/machine_learning/decision_tree.py +++ b/machine_learning/decision_tree.py @@ -26,15 +26,15 @@ def mean_squared_error(self, labels, prediction): >>> tester = DecisionTree() >>> test_labels = np.array([1,2,3,4,5,6,7,8,9,10]) >>> test_prediction = float(6) - >>> tester.mean_squared_error(test_labels, test_prediction) == ( + >>> bool(tester.mean_squared_error(test_labels, test_prediction) == ( ... TestDecisionTree.helper_mean_squared_error_test(test_labels, - ... test_prediction)) + ... test_prediction))) True >>> test_labels = np.array([1,2,3]) >>> test_prediction = float(2) - >>> tester.mean_squared_error(test_labels, test_prediction) == ( + >>> bool(tester.mean_squared_error(test_labels, test_prediction) == ( ... TestDecisionTree.helper_mean_squared_error_test(test_labels, - ... test_prediction)) + ... test_prediction))) True """ if labels.ndim != 1: diff --git a/machine_learning/forecasting/run.py b/machine_learning/forecasting/run.py index dbb86caf8568..9d81b03cd09e 100644 --- a/machine_learning/forecasting/run.py +++ b/machine_learning/forecasting/run.py @@ -28,7 +28,7 @@ def linear_regression_prediction( input : training data (date, total_user, total_event) in list of float output : list of total user prediction in float >>> n = linear_regression_prediction([2,3,4,5], [5,3,4,6], [3,1,2,4], [2,1], [2,2]) - >>> abs(n - 5.0) < 1e-6 # Checking precision because of floating point errors + >>> bool(abs(n - 5.0) < 1e-6) # Checking precision because of floating point errors True """ x = np.array([[1, item, train_mtch[i]] for i, item in enumerate(train_dt)]) @@ -56,7 +56,7 @@ def sarimax_predictor(train_user: list, train_match: list, test_match: list) -> ) model_fit = model.fit(disp=False, maxiter=600, method="nm") result = model_fit.predict(1, len(test_match), exog=[test_match]) - return result[0] + return float(result[0]) def support_vector_regressor(x_train: list, x_test: list, train_user: list) -> float: @@ -75,7 +75,7 @@ def support_vector_regressor(x_train: list, x_test: list, train_user: list) -> f regressor = SVR(kernel="rbf", C=1, gamma=0.1, epsilon=0.1) regressor.fit(x_train, train_user) y_pred = regressor.predict(x_test) - return y_pred[0] + return float(y_pred[0]) def interquartile_range_checker(train_user: list) -> float: @@ -92,7 +92,7 @@ def interquartile_range_checker(train_user: list) -> float: q3 = np.percentile(train_user, 75) iqr = q3 - q1 low_lim = q1 - (iqr * 0.1) - return low_lim + return float(low_lim) def data_safety_checker(list_vote: list, actual_result: float) -> bool: diff --git a/machine_learning/k_nearest_neighbours.py b/machine_learning/k_nearest_neighbours.py index a43757c5c20e..fbc1b8bd227e 100644 --- a/machine_learning/k_nearest_neighbours.py +++ b/machine_learning/k_nearest_neighbours.py @@ -42,7 +42,7 @@ def _euclidean_distance(a: np.ndarray[float], b: np.ndarray[float]) -> float: >>> KNN._euclidean_distance(np.array([1, 2, 3]), np.array([1, 8, 11])) 10.0 """ - return np.linalg.norm(a - b) + return float(np.linalg.norm(a - b)) def classify(self, pred_point: np.ndarray[float], k: int = 5) -> str: """ diff --git a/machine_learning/logistic_regression.py b/machine_learning/logistic_regression.py index 090af5382185..496026631fbe 100644 --- a/machine_learning/logistic_regression.py +++ b/machine_learning/logistic_regression.py @@ -45,7 +45,7 @@ def sigmoid_function(z: float | np.ndarray) -> float | np.ndarray: @returns: returns value in the range 0 to 1 Examples: - >>> sigmoid_function(4) + >>> float(sigmoid_function(4)) 0.9820137900379085 >>> sigmoid_function(np.array([-3, 3])) array([0.04742587, 0.95257413]) @@ -100,7 +100,7 @@ def cost_function(h: np.ndarray, y: np.ndarray) -> float: References: - https://en.wikipedia.org/wiki/Logistic_regression """ - return (-y * np.log(h) - (1 - y) * np.log(1 - h)).mean() + return float((-y * np.log(h) - (1 - y) * np.log(1 - h)).mean()) def log_likelihood(x, y, weights): diff --git a/machine_learning/loss_functions.py b/machine_learning/loss_functions.py index 150035661eb7..0bd9aa8b5401 100644 --- a/machine_learning/loss_functions.py +++ b/machine_learning/loss_functions.py @@ -22,7 +22,7 @@ def binary_cross_entropy( >>> true_labels = np.array([0, 1, 1, 0, 1]) >>> predicted_probs = np.array([0.2, 0.7, 0.9, 0.3, 0.8]) - >>> binary_cross_entropy(true_labels, predicted_probs) + >>> float(binary_cross_entropy(true_labels, predicted_probs)) 0.2529995012327421 >>> true_labels = np.array([0, 1, 1, 0, 1]) >>> predicted_probs = np.array([0.3, 0.8, 0.9, 0.2]) @@ -68,7 +68,7 @@ def binary_focal_cross_entropy( >>> true_labels = np.array([0, 1, 1, 0, 1]) >>> predicted_probs = np.array([0.2, 0.7, 0.9, 0.3, 0.8]) - >>> binary_focal_cross_entropy(true_labels, predicted_probs) + >>> float(binary_focal_cross_entropy(true_labels, predicted_probs)) 0.008257977659239775 >>> true_labels = np.array([0, 1, 1, 0, 1]) >>> predicted_probs = np.array([0.3, 0.8, 0.9, 0.2]) @@ -108,7 +108,7 @@ def categorical_cross_entropy( >>> true_labels = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]) >>> pred_probs = np.array([[0.9, 0.1, 0.0], [0.2, 0.7, 0.1], [0.0, 0.1, 0.9]]) - >>> categorical_cross_entropy(true_labels, pred_probs) + >>> float(categorical_cross_entropy(true_labels, pred_probs)) 0.567395975254385 >>> true_labels = np.array([[1, 0], [0, 1]]) >>> pred_probs = np.array([[0.9, 0.1, 0.0], [0.2, 0.7, 0.1]]) @@ -179,13 +179,13 @@ def categorical_focal_cross_entropy( >>> true_labels = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]) >>> pred_probs = np.array([[0.9, 0.1, 0.0], [0.2, 0.7, 0.1], [0.0, 0.1, 0.9]]) >>> alpha = np.array([0.6, 0.2, 0.7]) - >>> categorical_focal_cross_entropy(true_labels, pred_probs, alpha) + >>> float(categorical_focal_cross_entropy(true_labels, pred_probs, alpha)) 0.0025966118981496423 >>> true_labels = np.array([[0, 1, 0], [0, 0, 1]]) >>> pred_probs = np.array([[0.05, 0.95, 0], [0.1, 0.8, 0.1]]) >>> alpha = np.array([0.25, 0.25, 0.25]) - >>> categorical_focal_cross_entropy(true_labels, pred_probs, alpha) + >>> float(categorical_focal_cross_entropy(true_labels, pred_probs, alpha)) 0.23315276982014324 >>> true_labels = np.array([[1, 0], [0, 1]]) @@ -265,7 +265,7 @@ def hinge_loss(y_true: np.ndarray, y_pred: np.ndarray) -> float: >>> true_labels = np.array([-1, 1, 1, -1, 1]) >>> pred = np.array([-4, -0.3, 0.7, 5, 10]) - >>> hinge_loss(true_labels, pred) + >>> float(hinge_loss(true_labels, pred)) 1.52 >>> true_labels = np.array([-1, 1, 1, -1, 1, 1]) >>> pred = np.array([-4, -0.3, 0.7, 5, 10]) @@ -309,11 +309,11 @@ def huber_loss(y_true: np.ndarray, y_pred: np.ndarray, delta: float) -> float: >>> true_values = np.array([0.9, 10.0, 2.0, 1.0, 5.2]) >>> predicted_values = np.array([0.8, 2.1, 2.9, 4.2, 5.2]) - >>> np.isclose(huber_loss(true_values, predicted_values, 1.0), 2.102) + >>> bool(np.isclose(huber_loss(true_values, predicted_values, 1.0), 2.102)) True >>> true_labels = np.array([11.0, 21.0, 3.32, 4.0, 5.0]) >>> predicted_probs = np.array([8.3, 20.8, 2.9, 11.2, 5.0]) - >>> np.isclose(huber_loss(true_labels, predicted_probs, 1.0), 1.80164) + >>> bool(np.isclose(huber_loss(true_labels, predicted_probs, 1.0), 1.80164)) True >>> true_labels = np.array([11.0, 21.0, 3.32, 4.0]) >>> predicted_probs = np.array([8.3, 20.8, 2.9, 11.2, 5.0]) @@ -347,7 +347,7 @@ def mean_squared_error(y_true: np.ndarray, y_pred: np.ndarray) -> float: >>> true_values = np.array([1.0, 2.0, 3.0, 4.0, 5.0]) >>> predicted_values = np.array([0.8, 2.1, 2.9, 4.2, 5.2]) - >>> np.isclose(mean_squared_error(true_values, predicted_values), 0.028) + >>> bool(np.isclose(mean_squared_error(true_values, predicted_values), 0.028)) True >>> true_labels = np.array([1.0, 2.0, 3.0, 4.0, 5.0]) >>> predicted_probs = np.array([0.3, 0.8, 0.9, 0.2]) @@ -381,11 +381,11 @@ def mean_absolute_error(y_true: np.ndarray, y_pred: np.ndarray) -> float: >>> true_values = np.array([1.0, 2.0, 3.0, 4.0, 5.0]) >>> predicted_values = np.array([0.8, 2.1, 2.9, 4.2, 5.2]) - >>> np.isclose(mean_absolute_error(true_values, predicted_values), 0.16) + >>> bool(np.isclose(mean_absolute_error(true_values, predicted_values), 0.16)) True >>> true_values = np.array([1.0, 2.0, 3.0, 4.0, 5.0]) >>> predicted_values = np.array([0.8, 2.1, 2.9, 4.2, 5.2]) - >>> np.isclose(mean_absolute_error(true_values, predicted_values), 2.16) + >>> bool(np.isclose(mean_absolute_error(true_values, predicted_values), 2.16)) False >>> true_labels = np.array([1.0, 2.0, 3.0, 4.0, 5.0]) >>> predicted_probs = np.array([0.3, 0.8, 0.9, 5.2]) @@ -420,7 +420,7 @@ def mean_squared_logarithmic_error(y_true: np.ndarray, y_pred: np.ndarray) -> fl >>> true_values = np.array([1.0, 2.0, 3.0, 4.0, 5.0]) >>> predicted_values = np.array([0.8, 2.1, 2.9, 4.2, 5.2]) - >>> mean_squared_logarithmic_error(true_values, predicted_values) + >>> float(mean_squared_logarithmic_error(true_values, predicted_values)) 0.0030860877925181344 >>> true_labels = np.array([1.0, 2.0, 3.0, 4.0, 5.0]) >>> predicted_probs = np.array([0.3, 0.8, 0.9, 0.2]) @@ -459,17 +459,17 @@ def mean_absolute_percentage_error( Examples: >>> y_true = np.array([10, 20, 30, 40]) >>> y_pred = np.array([12, 18, 33, 45]) - >>> mean_absolute_percentage_error(y_true, y_pred) + >>> float(mean_absolute_percentage_error(y_true, y_pred)) 0.13125 >>> y_true = np.array([1, 2, 3, 4]) >>> y_pred = np.array([2, 3, 4, 5]) - >>> mean_absolute_percentage_error(y_true, y_pred) + >>> float(mean_absolute_percentage_error(y_true, y_pred)) 0.5208333333333333 >>> y_true = np.array([34, 37, 44, 47, 48, 48, 46, 43, 32, 27, 26, 24]) >>> y_pred = np.array([37, 40, 46, 44, 46, 50, 45, 44, 34, 30, 22, 23]) - >>> mean_absolute_percentage_error(y_true, y_pred) + >>> float(mean_absolute_percentage_error(y_true, y_pred)) 0.064671076436071 """ if len(y_true) != len(y_pred): @@ -511,7 +511,7 @@ def perplexity_loss( ... [[0.03, 0.26, 0.21, 0.18, 0.30], ... [0.28, 0.10, 0.33, 0.15, 0.12]]] ... ) - >>> perplexity_loss(y_true, y_pred) + >>> float(perplexity_loss(y_true, y_pred)) 5.0247347775367945 >>> y_true = np.array([[1, 4], [2, 3]]) >>> y_pred = np.array( @@ -600,17 +600,17 @@ def smooth_l1_loss(y_true: np.ndarray, y_pred: np.ndarray, beta: float = 1.0) -> >>> y_true = np.array([3, 5, 2, 7]) >>> y_pred = np.array([2.9, 4.8, 2.1, 7.2]) - >>> smooth_l1_loss(y_true, y_pred, 1.0) + >>> float(smooth_l1_loss(y_true, y_pred, 1.0)) 0.012500000000000022 >>> y_true = np.array([2, 4, 6]) >>> y_pred = np.array([1, 5, 7]) - >>> smooth_l1_loss(y_true, y_pred, 1.0) + >>> float(smooth_l1_loss(y_true, y_pred, 1.0)) 0.5 >>> y_true = np.array([1, 3, 5, 7]) >>> y_pred = np.array([1, 3, 5, 7]) - >>> smooth_l1_loss(y_true, y_pred, 1.0) + >>> float(smooth_l1_loss(y_true, y_pred, 1.0)) 0.0 >>> y_true = np.array([1, 3, 5]) @@ -647,7 +647,7 @@ def kullback_leibler_divergence(y_true: np.ndarray, y_pred: np.ndarray) -> float >>> true_labels = np.array([0.2, 0.3, 0.5]) >>> predicted_probs = np.array([0.3, 0.3, 0.4]) - >>> kullback_leibler_divergence(true_labels, predicted_probs) + >>> float(kullback_leibler_divergence(true_labels, predicted_probs)) 0.030478754035472025 >>> true_labels = np.array([0.2, 0.3, 0.5]) >>> predicted_probs = np.array([0.3, 0.3, 0.4, 0.5]) diff --git a/machine_learning/mfcc.py b/machine_learning/mfcc.py index a1e99ce4ad40..dcc3151d5a1a 100644 --- a/machine_learning/mfcc.py +++ b/machine_learning/mfcc.py @@ -162,9 +162,9 @@ def normalize(audio: np.ndarray) -> np.ndarray: Examples: >>> audio = np.array([1, 2, 3, 4, 5]) >>> normalized_audio = normalize(audio) - >>> np.max(normalized_audio) + >>> float(np.max(normalized_audio)) 1.0 - >>> np.min(normalized_audio) + >>> float(np.min(normalized_audio)) 0.2 """ # Divide the entire audio signal by the maximum absolute value @@ -229,7 +229,8 @@ def calculate_fft(audio_windowed: np.ndarray, ftt_size: int = 1024) -> np.ndarra Examples: >>> audio_windowed = np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]) >>> audio_fft = calculate_fft(audio_windowed, ftt_size=4) - >>> np.allclose(audio_fft[0], np.array([6.0+0.j, -1.5+0.8660254j, -1.5-0.8660254j])) + >>> bool(np.allclose(audio_fft[0], np.array([6.0+0.j, -1.5+0.8660254j, + ... -1.5-0.8660254j]))) True """ # Transpose the audio data to have time in rows and channels in columns @@ -281,7 +282,7 @@ def freq_to_mel(freq: float) -> float: The frequency in mel scale. Examples: - >>> round(freq_to_mel(1000), 2) + >>> float(round(freq_to_mel(1000), 2)) 999.99 """ # Use the formula to convert frequency to the mel scale @@ -321,7 +322,7 @@ def mel_spaced_filterbank( Mel-spaced filter bank. Examples: - >>> round(mel_spaced_filterbank(8000, 10, 1024)[0][1], 10) + >>> float(round(mel_spaced_filterbank(8000, 10, 1024)[0][1], 10)) 0.0004603981 """ freq_min = 0 @@ -438,7 +439,7 @@ def discrete_cosine_transform(dct_filter_num: int, filter_num: int) -> np.ndarra The DCT basis matrix. Examples: - >>> round(discrete_cosine_transform(3, 5)[0][0], 5) + >>> float(round(discrete_cosine_transform(3, 5)[0][0], 5)) 0.44721 """ basis = np.empty((dct_filter_num, filter_num)) diff --git a/machine_learning/multilayer_perceptron_classifier.py b/machine_learning/multilayer_perceptron_classifier.py index e99a4131e972..40f998c7dfa2 100644 --- a/machine_learning/multilayer_perceptron_classifier.py +++ b/machine_learning/multilayer_perceptron_classifier.py @@ -17,7 +17,7 @@ def wrapper(y): """ - >>> wrapper(Y) + >>> [int(x) for x in wrapper(Y)] [0, 0, 1] """ return list(y) diff --git a/machine_learning/scoring_functions.py b/machine_learning/scoring_functions.py index 08b969a95c3b..f6b685f4f98a 100644 --- a/machine_learning/scoring_functions.py +++ b/machine_learning/scoring_functions.py @@ -20,11 +20,11 @@ def mae(predict, actual): """ Examples(rounded for precision): >>> actual = [1,2,3];predict = [1,4,3] - >>> np.around(mae(predict,actual),decimals = 2) + >>> float(np.around(mae(predict,actual),decimals = 2)) 0.67 >>> actual = [1,1,1];predict = [1,1,1] - >>> mae(predict,actual) + >>> float(mae(predict,actual)) 0.0 """ predict = np.array(predict) @@ -41,11 +41,11 @@ def mse(predict, actual): """ Examples(rounded for precision): >>> actual = [1,2,3];predict = [1,4,3] - >>> np.around(mse(predict,actual),decimals = 2) + >>> float(np.around(mse(predict,actual),decimals = 2)) 1.33 >>> actual = [1,1,1];predict = [1,1,1] - >>> mse(predict,actual) + >>> float(mse(predict,actual)) 0.0 """ predict = np.array(predict) @@ -63,11 +63,11 @@ def rmse(predict, actual): """ Examples(rounded for precision): >>> actual = [1,2,3];predict = [1,4,3] - >>> np.around(rmse(predict,actual),decimals = 2) + >>> float(np.around(rmse(predict,actual),decimals = 2)) 1.15 >>> actual = [1,1,1];predict = [1,1,1] - >>> rmse(predict,actual) + >>> float(rmse(predict,actual)) 0.0 """ predict = np.array(predict) @@ -84,12 +84,10 @@ def rmse(predict, actual): def rmsle(predict, actual): """ Examples(rounded for precision): - >>> actual = [10,10,30];predict = [10,2,30] - >>> np.around(rmsle(predict,actual),decimals = 2) + >>> float(np.around(rmsle(predict=[10, 2, 30], actual=[10, 10, 30]), decimals=2)) 0.75 - >>> actual = [1,1,1];predict = [1,1,1] - >>> rmsle(predict,actual) + >>> float(rmsle(predict=[1, 1, 1], actual=[1, 1, 1])) 0.0 """ predict = np.array(predict) @@ -117,12 +115,12 @@ def mbd(predict, actual): Here the model overpredicts >>> actual = [1,2,3];predict = [2,3,4] - >>> np.around(mbd(predict,actual),decimals = 2) + >>> float(np.around(mbd(predict,actual),decimals = 2)) 50.0 Here the model underpredicts >>> actual = [1,2,3];predict = [0,1,1] - >>> np.around(mbd(predict,actual),decimals = 2) + >>> float(np.around(mbd(predict,actual),decimals = 2)) -66.67 """ predict = np.array(predict) diff --git a/machine_learning/similarity_search.py b/machine_learning/similarity_search.py index 0bc3b17d7e5a..c8a573796882 100644 --- a/machine_learning/similarity_search.py +++ b/machine_learning/similarity_search.py @@ -153,7 +153,7 @@ def cosine_similarity(input_a: np.ndarray, input_b: np.ndarray) -> float: >>> cosine_similarity(np.array([1, 2]), np.array([6, 32])) 0.9615239476408232 """ - return np.dot(input_a, input_b) / (norm(input_a) * norm(input_b)) + return float(np.dot(input_a, input_b) / (norm(input_a) * norm(input_b))) if __name__ == "__main__": diff --git a/machine_learning/support_vector_machines.py b/machine_learning/support_vector_machines.py index 24046115ebc4..d17c9044a3e9 100644 --- a/machine_learning/support_vector_machines.py +++ b/machine_learning/support_vector_machines.py @@ -14,11 +14,11 @@ def norm_squared(vector: ndarray) -> float: Returns: float: squared second norm of vector - >>> norm_squared([1, 2]) + >>> int(norm_squared([1, 2])) 5 - >>> norm_squared(np.asarray([1, 2])) + >>> int(norm_squared(np.asarray([1, 2]))) 5 - >>> norm_squared([0, 0]) + >>> int(norm_squared([0, 0])) 0 """ return np.dot(vector, vector) diff --git a/maths/euclidean_distance.py b/maths/euclidean_distance.py index 9b29b37b0ce6..aa7f3efc7684 100644 --- a/maths/euclidean_distance.py +++ b/maths/euclidean_distance.py @@ -13,13 +13,13 @@ def euclidean_distance(vector_1: Vector, vector_2: Vector) -> VectorOut: """ Calculate the distance between the two endpoints of two vectors. A vector is defined as a list, tuple, or numpy 1D array. - >>> euclidean_distance((0, 0), (2, 2)) + >>> float(euclidean_distance((0, 0), (2, 2))) 2.8284271247461903 - >>> euclidean_distance(np.array([0, 0, 0]), np.array([2, 2, 2])) + >>> float(euclidean_distance(np.array([0, 0, 0]), np.array([2, 2, 2]))) 3.4641016151377544 - >>> euclidean_distance(np.array([1, 2, 3, 4]), np.array([5, 6, 7, 8])) + >>> float(euclidean_distance(np.array([1, 2, 3, 4]), np.array([5, 6, 7, 8]))) 8.0 - >>> euclidean_distance([1, 2, 3, 4], [5, 6, 7, 8]) + >>> float(euclidean_distance([1, 2, 3, 4], [5, 6, 7, 8])) 8.0 """ return np.sqrt(np.sum((np.asarray(vector_1) - np.asarray(vector_2)) ** 2)) diff --git a/maths/euler_method.py b/maths/euler_method.py index 30f193e6daa5..c6adb07e2d3d 100644 --- a/maths/euler_method.py +++ b/maths/euler_method.py @@ -26,7 +26,7 @@ def explicit_euler( ... return y >>> y0 = 1 >>> y = explicit_euler(f, y0, 0.0, 0.01, 5) - >>> y[-1] + >>> float(y[-1]) 144.77277243257308 """ n = int(np.ceil((x_end - x0) / step_size)) diff --git a/maths/euler_modified.py b/maths/euler_modified.py index d02123e1e2fb..bb282e9f0ab9 100644 --- a/maths/euler_modified.py +++ b/maths/euler_modified.py @@ -24,13 +24,13 @@ def euler_modified( >>> def f1(x, y): ... return -2*x*(y**2) >>> y = euler_modified(f1, 1.0, 0.0, 0.2, 1.0) - >>> y[-1] + >>> float(y[-1]) 0.503338255442106 >>> import math >>> def f2(x, y): ... return -2*y + (x**3)*math.exp(-2*x) >>> y = euler_modified(f2, 1.0, 0.0, 0.1, 0.3) - >>> y[-1] + >>> float(y[-1]) 0.5525976431951775 """ n = int(np.ceil((x_end - x0) / step_size)) diff --git a/maths/gaussian.py b/maths/gaussian.py index 0e02010a9c67..b1e62ea77fe2 100644 --- a/maths/gaussian.py +++ b/maths/gaussian.py @@ -5,18 +5,18 @@ from numpy import exp, pi, sqrt -def gaussian(x, mu: float = 0.0, sigma: float = 1.0) -> int: +def gaussian(x, mu: float = 0.0, sigma: float = 1.0) -> float: """ - >>> gaussian(1) + >>> float(gaussian(1)) 0.24197072451914337 - >>> gaussian(24) + >>> float(gaussian(24)) 3.342714441794458e-126 - >>> gaussian(1, 4, 2) + >>> float(gaussian(1, 4, 2)) 0.06475879783294587 - >>> gaussian(1, 5, 3) + >>> float(gaussian(1, 5, 3)) 0.05467002489199788 Supports NumPy Arrays @@ -29,7 +29,7 @@ def gaussian(x, mu: float = 0.0, sigma: float = 1.0) -> int: 5.05227108e-15, 1.02797736e-18, 7.69459863e-23, 2.11881925e-27, 2.14638374e-32, 7.99882776e-38, 1.09660656e-43]) - >>> gaussian(15) + >>> float(gaussian(15)) 5.530709549844416e-50 >>> gaussian([1,2, 'string']) @@ -47,10 +47,10 @@ def gaussian(x, mu: float = 0.0, sigma: float = 1.0) -> int: ... OverflowError: (34, 'Result too large') - >>> gaussian(10**-326) + >>> float(gaussian(10**-326)) 0.3989422804014327 - >>> gaussian(2523, mu=234234, sigma=3425) + >>> float(gaussian(2523, mu=234234, sigma=3425)) 0.0 """ return 1 / sqrt(2 * pi * sigma**2) * exp(-((x - mu) ** 2) / (2 * sigma**2)) diff --git a/maths/minkowski_distance.py b/maths/minkowski_distance.py index 3237124e8d36..99f02e31e417 100644 --- a/maths/minkowski_distance.py +++ b/maths/minkowski_distance.py @@ -19,7 +19,7 @@ def minkowski_distance( >>> minkowski_distance([1.0, 2.0, 3.0, 4.0], [5.0, 6.0, 7.0, 8.0], 2) 8.0 >>> import numpy as np - >>> np.isclose(5.0, minkowski_distance([5.0], [0.0], 3)) + >>> bool(np.isclose(5.0, minkowski_distance([5.0], [0.0], 3))) True >>> minkowski_distance([1.0], [2.0], -1) Traceback (most recent call last): diff --git a/maths/numerical_analysis/adams_bashforth.py b/maths/numerical_analysis/adams_bashforth.py index fb406171098a..26244a58552f 100644 --- a/maths/numerical_analysis/adams_bashforth.py +++ b/maths/numerical_analysis/adams_bashforth.py @@ -102,7 +102,7 @@ def step_3(self) -> np.ndarray: >>> def f(x, y): ... return x + y >>> y = AdamsBashforth(f, [0, 0.2, 0.4], [0, 0, 0.04], 0.2, 1).step_3() - >>> y[3] + >>> float(y[3]) 0.15533333333333332 >>> AdamsBashforth(f, [0, 0.2], [0, 0], 0.2, 1).step_3() @@ -140,9 +140,9 @@ def step_4(self) -> np.ndarray: ... return x + y >>> y = AdamsBashforth( ... f, [0, 0.2, 0.4, 0.6], [0, 0, 0.04, 0.128], 0.2, 1).step_4() - >>> y[4] + >>> float(y[4]) 0.30699999999999994 - >>> y[5] + >>> float(y[5]) 0.5771083333333333 >>> AdamsBashforth(f, [0, 0.2, 0.4], [0, 0, 0.04], 0.2, 1).step_4() @@ -185,7 +185,7 @@ def step_5(self) -> np.ndarray: >>> y = AdamsBashforth( ... f, [0, 0.2, 0.4, 0.6, 0.8], [0, 0.02140, 0.02140, 0.22211, 0.42536], ... 0.2, 1).step_5() - >>> y[-1] + >>> float(y[-1]) 0.05436839444444452 >>> AdamsBashforth(f, [0, 0.2, 0.4], [0, 0, 0.04], 0.2, 1).step_5() diff --git a/maths/numerical_analysis/runge_kutta.py b/maths/numerical_analysis/runge_kutta.py index 4cac017ee89e..3a25b0fb0173 100644 --- a/maths/numerical_analysis/runge_kutta.py +++ b/maths/numerical_analysis/runge_kutta.py @@ -19,7 +19,7 @@ def runge_kutta(f, y0, x0, h, x_end): ... return y >>> y0 = 1 >>> y = runge_kutta(f, y0, 0.0, 0.01, 5) - >>> y[-1] + >>> float(y[-1]) 148.41315904125113 """ n = int(np.ceil((x_end - x0) / h)) diff --git a/maths/numerical_analysis/runge_kutta_fehlberg_45.py b/maths/numerical_analysis/runge_kutta_fehlberg_45.py index 8181fe3015fc..0fbd60a35c1a 100644 --- a/maths/numerical_analysis/runge_kutta_fehlberg_45.py +++ b/maths/numerical_analysis/runge_kutta_fehlberg_45.py @@ -34,12 +34,12 @@ def runge_kutta_fehlberg_45( >>> def f(x, y): ... return 1 + y**2 >>> y = runge_kutta_fehlberg_45(f, 0, 0, 0.2, 1) - >>> y[1] + >>> float(y[1]) 0.2027100937470787 >>> def f(x,y): ... return x >>> y = runge_kutta_fehlberg_45(f, -1, 0, 0.2, 0) - >>> y[1] + >>> float(y[1]) -0.18000000000000002 >>> y = runge_kutta_fehlberg_45(5, 0, 0, 0.1, 1) Traceback (most recent call last): diff --git a/maths/numerical_analysis/runge_kutta_gills.py b/maths/numerical_analysis/runge_kutta_gills.py index 451cde4cb935..5d9672679813 100644 --- a/maths/numerical_analysis/runge_kutta_gills.py +++ b/maths/numerical_analysis/runge_kutta_gills.py @@ -34,7 +34,7 @@ def runge_kutta_gills( >>> def f(x, y): ... return (x-y)/2 >>> y = runge_kutta_gills(f, 0, 3, 0.2, 5) - >>> y[-1] + >>> float(y[-1]) 3.4104259225717537 >>> def f(x,y): diff --git a/maths/softmax.py b/maths/softmax.py index 04cf77525420..95c95e66f59e 100644 --- a/maths/softmax.py +++ b/maths/softmax.py @@ -28,7 +28,7 @@ def softmax(vector): The softmax vector adds up to one. We need to ceil to mitigate for precision - >>> np.ceil(np.sum(softmax([1,2,3,4]))) + >>> float(np.ceil(np.sum(softmax([1,2,3,4])))) 1.0 >>> vec = np.array([5,5]) diff --git a/neural_network/two_hidden_layers_neural_network.py b/neural_network/two_hidden_layers_neural_network.py index d488de590cc2..1b7c0beed3ba 100644 --- a/neural_network/two_hidden_layers_neural_network.py +++ b/neural_network/two_hidden_layers_neural_network.py @@ -64,7 +64,7 @@ def feedforward(self) -> np.ndarray: >>> nn = TwoHiddenLayerNeuralNetwork(input_val, output_val) >>> res = nn.feedforward() >>> array_sum = np.sum(res) - >>> np.isnan(array_sum) + >>> bool(np.isnan(array_sum)) False """ # Layer_between_input_and_first_hidden_layer is the layer connecting the @@ -105,7 +105,7 @@ def back_propagation(self) -> None: >>> res = nn.feedforward() >>> nn.back_propagation() >>> updated_weights = nn.second_hidden_layer_and_output_layer_weights - >>> (res == updated_weights).all() + >>> bool((res == updated_weights).all()) False """ @@ -171,7 +171,7 @@ def train(self, output: np.ndarray, iterations: int, give_loss: bool) -> None: >>> first_iteration_weights = nn.feedforward() >>> nn.back_propagation() >>> updated_weights = nn.second_hidden_layer_and_output_layer_weights - >>> (first_iteration_weights == updated_weights).all() + >>> bool((first_iteration_weights == updated_weights).all()) False """ for iteration in range(1, iterations + 1): diff --git a/other/bankers_algorithm.py b/other/bankers_algorithm.py index 858eb0b2c524..d4254f479a4f 100644 --- a/other/bankers_algorithm.py +++ b/other/bankers_algorithm.py @@ -87,9 +87,11 @@ def __need_index_manager(self) -> dict[int, list[int]]: This function builds an index control dictionary to track original ids/indices of processes when altered during execution of method "main" Return: {0: [a: int, b: int], 1: [c: int, d: int]} - >>> (BankersAlgorithm(test_claim_vector, test_allocated_res_table, - ... test_maximum_claim_table)._BankersAlgorithm__need_index_manager() - ... ) # doctest: +NORMALIZE_WHITESPACE + >>> index_control = BankersAlgorithm( + ... test_claim_vector, test_allocated_res_table, test_maximum_claim_table + ... )._BankersAlgorithm__need_index_manager() + >>> {key: [int(x) for x in value] for key, value + ... in index_control.items()} # doctest: +NORMALIZE_WHITESPACE {0: [1, 2, 0, 3], 1: [0, 1, 3, 1], 2: [1, 1, 0, 2], 3: [1, 3, 2, 0], 4: [2, 0, 0, 3]} """ diff --git a/physics/in_static_equilibrium.py b/physics/in_static_equilibrium.py index e3c2f9d07aed..fb5a9b5fff66 100644 --- a/physics/in_static_equilibrium.py +++ b/physics/in_static_equilibrium.py @@ -53,7 +53,7 @@ def in_static_equilibrium( # summation of moments is zero moments: NDArray[float64] = cross(location, forces) sum_moments: float = sum(moments) - return abs(sum_moments) < eps + return bool(abs(sum_moments) < eps) if __name__ == "__main__": diff --git a/requirements.txt b/requirements.txt index bb3d671393b9..afbf25ba6edc 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,7 +1,7 @@ beautifulsoup4 fake_useragent imageio -keras ; python_version < '3.12' +keras lxml matplotlib numpy @@ -17,7 +17,7 @@ rich scikit-learn statsmodels sympy -tensorflow +tensorflow ; python_version < '3.13' tweepy # yulewalker # uncomment once audio_filters/equal_loudness_filter.py is fixed typing_extensions diff --git a/web_programming/get_user_tweets.py b/web_programming/get_user_tweets.py.DISABLED similarity index 100% rename from web_programming/get_user_tweets.py rename to web_programming/get_user_tweets.py.DISABLED From 0abeeab39f4a612968a10b0541f630239b78f34f Mon Sep 17 00:00:00 2001 From: Christian Clauss Date: Tue, 1 Oct 2024 17:32:31 +0200 Subject: [PATCH 1426/1543] Drop six from our GitHub Actions (#11621) Drop https://six.readthedocs.io --- .github/workflows/build.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index dad2b2fac086..f54cc982d1ec 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -20,7 +20,7 @@ jobs: key: ${{ runner.os }}-pip-${{ hashFiles('requirements.txt') }} - name: Install dependencies run: | - python -m pip install --upgrade pip setuptools six wheel + python -m pip install --upgrade pip setuptools wheel python -m pip install pytest-cov -r requirements.txt - name: Run tests # TODO: #8818 Re-enable quantum tests From 43a47e01eb2c2b681fa377b02150edba5cc76e32 Mon Sep 17 00:00:00 2001 From: Hardik Pawar <97388607+Hardvan@users.noreply.github.com> Date: Wed, 2 Oct 2024 08:18:17 +0530 Subject: [PATCH 1427/1543] Add word ladder algorithm in backtracking (#11590) * Add word ladder algorithm in backtracking * Improve comments and implement ruff checks * updating DIRECTORY.md * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Change BFS to Backtracking * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Incorporate PR Changes * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Add type hints for backtrack function * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: Hardvan Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- DIRECTORY.md | 1 + backtracking/word_ladder.py | 100 ++++++++++++++++++++++++++++++++++++ 2 files changed, 101 insertions(+) create mode 100644 backtracking/word_ladder.py diff --git a/DIRECTORY.md b/DIRECTORY.md index 56ab8377f16b..cdbbac684fd2 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -22,6 +22,7 @@ * [Rat In Maze](backtracking/rat_in_maze.py) * [Sudoku](backtracking/sudoku.py) * [Sum Of Subsets](backtracking/sum_of_subsets.py) + * [Word Ladder](backtracking/word_ladder.py) * [Word Search](backtracking/word_search.py) ## Bit Manipulation diff --git a/backtracking/word_ladder.py b/backtracking/word_ladder.py new file mode 100644 index 000000000000..7d9fd00f6669 --- /dev/null +++ b/backtracking/word_ladder.py @@ -0,0 +1,100 @@ +""" +Word Ladder is a classic problem in computer science. +The problem is to transform a start word into an end word +by changing one letter at a time. +Each intermediate word must be a valid word from a given list of words. +The goal is to find a transformation sequence +from the start word to the end word. + +Wikipedia: https://en.wikipedia.org/wiki/Word_ladder +""" + +import string + + +def backtrack( + current_word: str, path: list[str], end_word: str, word_set: set[str] +) -> list[str]: + """ + Helper function to perform backtracking to find the transformation + from the current_word to the end_word. + + Parameters: + current_word (str): The current word in the transformation sequence. + path (list[str]): The list of transformations from begin_word to current_word. + end_word (str): The target word for transformation. + word_set (set[str]): The set of valid words for transformation. + + Returns: + list[str]: The list of transformations from begin_word to end_word. + Returns an empty list if there is no valid + transformation from current_word to end_word. + + Example: + >>> backtrack("hit", ["hit"], "cog", {"hot", "dot", "dog", "lot", "log", "cog"}) + ['hit', 'hot', 'dot', 'lot', 'log', 'cog'] + + >>> backtrack("hit", ["hit"], "cog", {"hot", "dot", "dog", "lot", "log"}) + [] + + >>> backtrack("lead", ["lead"], "gold", {"load", "goad", "gold", "lead", "lord"}) + ['lead', 'lead', 'load', 'goad', 'gold'] + + >>> backtrack("game", ["game"], "code", {"came", "cage", "code", "cade", "gave"}) + ['game', 'came', 'cade', 'code'] + """ + + # Base case: If the current word is the end word, return the path + if current_word == end_word: + return path + + # Try all possible single-letter transformations + for i in range(len(current_word)): + for c in string.ascii_lowercase: # Try changing each letter + transformed_word = current_word[:i] + c + current_word[i + 1 :] + if transformed_word in word_set: + word_set.remove(transformed_word) + # Recur with the new word added to the path + result = backtrack( + transformed_word, [*path, transformed_word], end_word, word_set + ) + if result: # valid transformation found + return result + word_set.add(transformed_word) # backtrack + + return [] # No valid transformation found + + +def word_ladder(begin_word: str, end_word: str, word_set: set[str]) -> list[str]: + """ + Solve the Word Ladder problem using Backtracking and return + the list of transformations from begin_word to end_word. + + Parameters: + begin_word (str): The word from which the transformation starts. + end_word (str): The target word for transformation. + word_list (list[str]): The list of valid words for transformation. + + Returns: + list[str]: The list of transformations from begin_word to end_word. + Returns an empty list if there is no valid transformation. + + Example: + >>> word_ladder("hit", "cog", ["hot", "dot", "dog", "lot", "log", "cog"]) + ['hit', 'hot', 'dot', 'lot', 'log', 'cog'] + + >>> word_ladder("hit", "cog", ["hot", "dot", "dog", "lot", "log"]) + [] + + >>> word_ladder("lead", "gold", ["load", "goad", "gold", "lead", "lord"]) + ['lead', 'lead', 'load', 'goad', 'gold'] + + >>> word_ladder("game", "code", ["came", "cage", "code", "cade", "gave"]) + ['game', 'came', 'cade', 'code'] + """ + + if end_word not in word_set: # no valid transformation possible + return [] + + # Perform backtracking starting from the begin_word + return backtrack(begin_word, [begin_word], end_word, word_set) From 00e9d862248a27281d4de24c8c7eb2d7b018531c Mon Sep 17 00:00:00 2001 From: Hardik Pawar <97388607+Hardvan@users.noreply.github.com> Date: Wed, 2 Oct 2024 08:24:12 +0530 Subject: [PATCH 1428/1543] Improve comments, add doctests in symmetric_tree.py (#11619) --- data_structures/binary_tree/symmetric_tree.py | 67 +++++++++++++++++-- 1 file changed, 62 insertions(+), 5 deletions(-) diff --git a/data_structures/binary_tree/symmetric_tree.py b/data_structures/binary_tree/symmetric_tree.py index 98a766cab988..2bfeac98b2c9 100644 --- a/data_structures/binary_tree/symmetric_tree.py +++ b/data_structures/binary_tree/symmetric_tree.py @@ -13,7 +13,21 @@ @dataclass class Node: """ - A Node has data variable and pointers to Nodes to its left and right. + A Node represents an element of a binary tree, which contains: + + Attributes: + data: The value stored in the node (int). + left: Pointer to the left child node (Node or None). + right: Pointer to the right child node (Node or None). + + Example: + >>> node = Node(1, Node(2), Node(3)) + >>> node.data + 1 + >>> node.left.data + 2 + >>> node.right.data + 3 """ data: int @@ -24,12 +38,25 @@ class Node: def make_symmetric_tree() -> Node: r""" Create a symmetric tree for testing. + The tree looks like this: 1 / \ 2 2 / \ / \ 3 4 4 3 + + Returns: + Node: Root node of a symmetric tree. + + Example: + >>> tree = make_symmetric_tree() + >>> tree.data + 1 + >>> tree.left.data == tree.right.data + True + >>> tree.left.left.data == tree.right.right.data + True """ root = Node(1) root.left = Node(2) @@ -43,13 +70,26 @@ def make_symmetric_tree() -> Node: def make_asymmetric_tree() -> Node: r""" - Create a asymmetric tree for testing. + Create an asymmetric tree for testing. + The tree looks like this: 1 / \ 2 2 / \ / \ 3 4 3 4 + + Returns: + Node: Root node of an asymmetric tree. + + Example: + >>> tree = make_asymmetric_tree() + >>> tree.data + 1 + >>> tree.left.data == tree.right.data + True + >>> tree.left.left.data == tree.right.right.data + False """ root = Node(1) root.left = Node(2) @@ -63,7 +103,15 @@ def make_asymmetric_tree() -> Node: def is_symmetric_tree(tree: Node) -> bool: """ - Test cases for is_symmetric_tree function + Check if a binary tree is symmetric (i.e., a mirror of itself). + + Parameters: + tree: The root node of the binary tree. + + Returns: + bool: True if the tree is symmetric, False otherwise. + + Example: >>> is_symmetric_tree(make_symmetric_tree()) True >>> is_symmetric_tree(make_asymmetric_tree()) @@ -76,8 +124,17 @@ def is_symmetric_tree(tree: Node) -> bool: def is_mirror(left: Node | None, right: Node | None) -> bool: """ + Check if two subtrees are mirror images of each other. + + Parameters: + left: The root node of the left subtree. + right: The root node of the right subtree. + + Returns: + bool: True if the two subtrees are mirrors of each other, False otherwise. + + Example: >>> tree1 = make_symmetric_tree() - >>> tree1.right.right = Node(3) >>> is_mirror(tree1.left, tree1.right) True >>> tree2 = make_asymmetric_tree() @@ -91,7 +148,7 @@ def is_mirror(left: Node | None, right: Node | None) -> bool: # One side is empty while the other is not, which is not symmetric. return False if left.data == right.data: - # The values match, so check the subtree + # The values match, so check the subtrees recursively. return is_mirror(left.left, right.right) and is_mirror(left.right, right.left) return False From 918fa8bb8ae1f052921fffd188d229d4713c73c9 Mon Sep 17 00:00:00 2001 From: 1227haran <68032825+1227haran@users.noreply.github.com> Date: Wed, 2 Oct 2024 23:37:07 +0530 Subject: [PATCH 1429/1543] Optimized O(n) to O(1) (#11669) --- data_structures/linked_list/has_loop.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/data_structures/linked_list/has_loop.py b/data_structures/linked_list/has_loop.py index bc06ffe150e8..f49e01579adc 100644 --- a/data_structures/linked_list/has_loop.py +++ b/data_structures/linked_list/has_loop.py @@ -14,11 +14,11 @@ def __init__(self, data: Any) -> None: def __iter__(self): node = self - visited = [] + visited = set() while node: if node in visited: raise ContainsLoopError - visited.append(node) + visited.add(node) yield node.data node = node.next_node From f4b4ac159a17e0621e7f37141b165d58ca655b81 Mon Sep 17 00:00:00 2001 From: Ali Rashid <110668489+alirashidAR@users.noreply.github.com> Date: Thu, 3 Oct 2024 05:24:56 +0530 Subject: [PATCH 1430/1543] Adding Doctests to floyd_warshall.py (#11690) * Ruff test resolution * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- dynamic_programming/floyd_warshall.py | 47 +++++++++++++++++++++++++-- 1 file changed, 45 insertions(+), 2 deletions(-) diff --git a/dynamic_programming/floyd_warshall.py b/dynamic_programming/floyd_warshall.py index 2331f3e65483..b92c6667fb5c 100644 --- a/dynamic_programming/floyd_warshall.py +++ b/dynamic_programming/floyd_warshall.py @@ -12,19 +12,58 @@ def __init__(self, n=0): # a graph with Node 0,1,...,N-1 ] # dp[i][j] stores minimum distance from i to j def add_edge(self, u, v, w): + """ + Adds a directed edge from node u + to node v with weight w. + + >>> g = Graph(3) + >>> g.add_edge(0, 1, 5) + >>> g.dp[0][1] + 5 + """ self.dp[u][v] = w def floyd_warshall(self): + """ + Computes the shortest paths between all pairs of + nodes using the Floyd-Warshall algorithm. + + >>> g = Graph(3) + >>> g.add_edge(0, 1, 1) + >>> g.add_edge(1, 2, 2) + >>> g.floyd_warshall() + >>> g.show_min(0, 2) + 3 + >>> g.show_min(2, 0) + inf + """ for k in range(self.n): for i in range(self.n): for j in range(self.n): self.dp[i][j] = min(self.dp[i][j], self.dp[i][k] + self.dp[k][j]) def show_min(self, u, v): + """ + Returns the minimum distance from node u to node v. + + >>> g = Graph(3) + >>> g.add_edge(0, 1, 3) + >>> g.add_edge(1, 2, 4) + >>> g.floyd_warshall() + >>> g.show_min(0, 2) + 7 + >>> g.show_min(1, 0) + inf + """ return self.dp[u][v] if __name__ == "__main__": + import doctest + + doctest.testmod() + + # Example usage graph = Graph(5) graph.add_edge(0, 2, 9) graph.add_edge(0, 4, 10) @@ -38,5 +77,9 @@ def show_min(self, u, v): graph.add_edge(4, 2, 4) graph.add_edge(4, 3, 9) graph.floyd_warshall() - graph.show_min(1, 4) - graph.show_min(0, 3) + print( + graph.show_min(1, 4) + ) # Should output the minimum distance from node 1 to node 4 + print( + graph.show_min(0, 3) + ) # Should output the minimum distance from node 0 to node 3 From 080e7903a06765808c12c0c9c0b242f485cb9ce7 Mon Sep 17 00:00:00 2001 From: Aswin P Kumar <118362715+AswinPKumar01@users.noreply.github.com> Date: Thu, 3 Oct 2024 05:33:48 +0530 Subject: [PATCH 1431/1543] Add Word Break algorithm (#11687) * Add Word Break algorithm * Add Word Break algorithm * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- backtracking/word_break.py | 71 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 71 insertions(+) create mode 100644 backtracking/word_break.py diff --git a/backtracking/word_break.py b/backtracking/word_break.py new file mode 100644 index 000000000000..1f2ab073f499 --- /dev/null +++ b/backtracking/word_break.py @@ -0,0 +1,71 @@ +""" +Word Break Problem is a well-known problem in computer science. +Given a string and a dictionary of words, the task is to determine if +the string can be segmented into a sequence of one or more dictionary words. + +Wikipedia: https://en.wikipedia.org/wiki/Word_break_problem +""" + + +def backtrack(input_string: str, word_dict: set[str], start: int) -> bool: + """ + Helper function that uses backtracking to determine if a valid + word segmentation is possible starting from index 'start'. + + Parameters: + input_string (str): The input string to be segmented. + word_dict (set[str]): A set of valid dictionary words. + start (int): The starting index of the substring to be checked. + + Returns: + bool: True if a valid segmentation is possible, otherwise False. + + Example: + >>> backtrack("leetcode", {"leet", "code"}, 0) + True + + >>> backtrack("applepenapple", {"apple", "pen"}, 0) + True + + >>> backtrack("catsandog", {"cats", "dog", "sand", "and", "cat"}, 0) + False + """ + + # Base case: if the starting index has reached the end of the string + if start == len(input_string): + return True + + # Try every possible substring from 'start' to 'end' + for end in range(start + 1, len(input_string) + 1): + if input_string[start:end] in word_dict and backtrack( + input_string, word_dict, end + ): + return True + + return False + + +def word_break(input_string: str, word_dict: set[str]) -> bool: + """ + Determines if the input string can be segmented into a sequence of + valid dictionary words using backtracking. + + Parameters: + input_string (str): The input string to segment. + word_dict (set[str]): The set of valid words. + + Returns: + bool: True if the string can be segmented into valid words, otherwise False. + + Example: + >>> word_break("leetcode", {"leet", "code"}) + True + + >>> word_break("applepenapple", {"apple", "pen"}) + True + + >>> word_break("catsandog", {"cats", "dog", "sand", "and", "cat"}) + False + """ + + return backtrack(input_string, word_dict, 0) From 40f65e8150045dc82a7a58fe7cff6bfb353999f2 Mon Sep 17 00:00:00 2001 From: JeevaRamanathan <64531160+JeevaRamanathan@users.noreply.github.com> Date: Thu, 3 Oct 2024 05:48:01 +0530 Subject: [PATCH 1432/1543] Improve comments, docstrings in next_greatest_element.py (#11685) * Improve comments in next_greatest_element.py Signed-off-by: JeevaRamanathan * few changes Signed-off-by: JeevaRamanathan * updated descriptions of the functions parameters Signed-off-by: JeevaRamanathan --------- Signed-off-by: JeevaRamanathan --- .../stacks/next_greater_element.py | 60 ++++++++++++++----- 1 file changed, 46 insertions(+), 14 deletions(-) diff --git a/data_structures/stacks/next_greater_element.py b/data_structures/stacks/next_greater_element.py index 7d76d1f47dfa..216850b4b894 100644 --- a/data_structures/stacks/next_greater_element.py +++ b/data_structures/stacks/next_greater_element.py @@ -6,9 +6,20 @@ def next_greatest_element_slow(arr: list[float]) -> list[float]: """ - Get the Next Greatest Element (NGE) for all elements in a list. - Maximum element present after the current one which is also greater than the - current one. + Get the Next Greatest Element (NGE) for each element in the array + by checking all subsequent elements to find the next greater one. + + This is a brute-force implementation, and it has a time complexity + of O(n^2), where n is the size of the array. + + Args: + arr: List of numbers for which the NGE is calculated. + + Returns: + List containing the next greatest elements. If no + greater element is found, -1 is placed in the result. + + Example: >>> next_greatest_element_slow(arr) == expect True """ @@ -28,9 +39,21 @@ def next_greatest_element_slow(arr: list[float]) -> list[float]: def next_greatest_element_fast(arr: list[float]) -> list[float]: """ - Like next_greatest_element_slow() but changes the loops to use - enumerate() instead of range(len()) for the outer loop and - for in a slice of arr for the inner loop. + Find the Next Greatest Element (NGE) for each element in the array + using a more readable approach. This implementation utilizes + enumerate() for the outer loop and slicing for the inner loop. + + While this improves readability over next_greatest_element_slow(), + it still has a time complexity of O(n^2). + + Args: + arr: List of numbers for which the NGE is calculated. + + Returns: + List containing the next greatest elements. If no + greater element is found, -1 is placed in the result. + + Example: >>> next_greatest_element_fast(arr) == expect True """ @@ -47,14 +70,23 @@ def next_greatest_element_fast(arr: list[float]) -> list[float]: def next_greatest_element(arr: list[float]) -> list[float]: """ - Get the Next Greatest Element (NGE) for all elements in a list. - Maximum element present after the current one which is also greater than the - current one. - - A naive way to solve this is to take two loops and check for the next bigger - number but that will make the time complexity as O(n^2). The better way to solve - this would be to use a stack to keep track of maximum number giving a linear time - solution. + Efficient solution to find the Next Greatest Element (NGE) for all elements + using a stack. The time complexity is reduced to O(n), making it suitable + for larger arrays. + + The stack keeps track of elements for which the next greater element hasn't + been found yet. By iterating through the array in reverse (from the last + element to the first), the stack is used to efficiently determine the next + greatest element for each element. + + Args: + arr: List of numbers for which the NGE is calculated. + + Returns: + List containing the next greatest elements. If no + greater element is found, -1 is placed in the result. + + Example: >>> next_greatest_element(arr) == expect True """ From e20b503b24fc271321a23584772ad8f0db17daf2 Mon Sep 17 00:00:00 2001 From: Hardik Pawar <97388607+Hardvan@users.noreply.github.com> Date: Fri, 4 Oct 2024 14:36:08 +0530 Subject: [PATCH 1433/1543] Improve comments, add doctests for kahns_algorithm_topo.py (#11668) * Improve comments, add doctests for kahns_algorithm_topo.py * Improve function docstring * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Rename variables, remove print --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- graphs/kahns_algorithm_topo.py | 67 +++++++++++++++++++++++----------- 1 file changed, 46 insertions(+), 21 deletions(-) diff --git a/graphs/kahns_algorithm_topo.py b/graphs/kahns_algorithm_topo.py index b1260bd5bd9b..c956cf9f48fd 100644 --- a/graphs/kahns_algorithm_topo.py +++ b/graphs/kahns_algorithm_topo.py @@ -1,36 +1,61 @@ -def topological_sort(graph): +def topological_sort(graph: dict[int, list[int]]) -> list[int] | None: """ - Kahn's Algorithm is used to find Topological ordering of Directed Acyclic Graph - using BFS + Perform topological sorting of a Directed Acyclic Graph (DAG) + using Kahn's Algorithm via Breadth-First Search (BFS). + + Topological sorting is a linear ordering of vertices in a graph such that for + every directed edge u → v, vertex u comes before vertex v in the ordering. + + Parameters: + graph: Adjacency list representing the directed graph where keys are + vertices, and values are lists of adjacent vertices. + + Returns: + The topologically sorted order of vertices if the graph is a DAG. + Returns None if the graph contains a cycle. + + Example: + >>> graph = {0: [1, 2], 1: [3], 2: [3], 3: [4, 5], 4: [], 5: []} + >>> topological_sort(graph) + [0, 1, 2, 3, 4, 5] + + >>> graph_with_cycle = {0: [1], 1: [2], 2: [0]} + >>> topological_sort(graph_with_cycle) """ + indegree = [0] * len(graph) queue = [] - topo = [] - cnt = 0 + topo_order = [] + processed_vertices_count = 0 + # Calculate the indegree of each vertex for values in graph.values(): for i in values: indegree[i] += 1 + # Add all vertices with 0 indegree to the queue for i in range(len(indegree)): if indegree[i] == 0: queue.append(i) + # Perform BFS while queue: vertex = queue.pop(0) - cnt += 1 - topo.append(vertex) - for x in graph[vertex]: - indegree[x] -= 1 - if indegree[x] == 0: - queue.append(x) - - if cnt != len(graph): - print("Cycle exists") - else: - print(topo) - - -# Adjacency List of Graph -graph = {0: [1, 2], 1: [3], 2: [3], 3: [4, 5], 4: [], 5: []} -topological_sort(graph) + processed_vertices_count += 1 + topo_order.append(vertex) + + # Traverse neighbors + for neighbor in graph[vertex]: + indegree[neighbor] -= 1 + if indegree[neighbor] == 0: + queue.append(neighbor) + + if processed_vertices_count != len(graph): + return None # no topological ordering exists due to cycle + return topo_order # valid topological ordering + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From 917ad62105dc829e45c0732d9ac2aae7ef358627 Mon Sep 17 00:00:00 2001 From: Sai Aswin Madhavan Date: Fri, 4 Oct 2024 14:58:50 +0530 Subject: [PATCH 1434/1543] Removed incorrect type hints (#11711) --- strings/min_cost_string_conversion.py | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/strings/min_cost_string_conversion.py b/strings/min_cost_string_conversion.py index d147a9d7954c..40d54f0e8420 100644 --- a/strings/min_cost_string_conversion.py +++ b/strings/min_cost_string_conversion.py @@ -21,7 +21,6 @@ def compute_transform_tables( destination_seq = list(destination_string) len_source_seq = len(source_seq) len_destination_seq = len(destination_seq) - costs = [ [0 for _ in range(len_destination_seq + 1)] for _ in range(len_source_seq + 1) ] @@ -31,28 +30,28 @@ def compute_transform_tables( for i in range(1, len_source_seq + 1): costs[i][0] = i * delete_cost - ops[i][0] = f"D{source_seq[i - 1]:c}" + ops[i][0] = f"D{source_seq[i - 1]}" for i in range(1, len_destination_seq + 1): costs[0][i] = i * insert_cost - ops[0][i] = f"I{destination_seq[i - 1]:c}" + ops[0][i] = f"I{destination_seq[i - 1]}" for i in range(1, len_source_seq + 1): for j in range(1, len_destination_seq + 1): if source_seq[i - 1] == destination_seq[j - 1]: costs[i][j] = costs[i - 1][j - 1] + copy_cost - ops[i][j] = f"C{source_seq[i - 1]:c}" + ops[i][j] = f"C{source_seq[i - 1]}" else: costs[i][j] = costs[i - 1][j - 1] + replace_cost - ops[i][j] = f"R{source_seq[i - 1]:c}" + str(destination_seq[j - 1]) + ops[i][j] = f"R{source_seq[i - 1]}" + str(destination_seq[j - 1]) if costs[i - 1][j] + delete_cost < costs[i][j]: costs[i][j] = costs[i - 1][j] + delete_cost - ops[i][j] = f"D{source_seq[i - 1]:c}" + ops[i][j] = f"D{source_seq[i - 1]}" if costs[i][j - 1] + insert_cost < costs[i][j]: costs[i][j] = costs[i][j - 1] + insert_cost - ops[i][j] = f"I{destination_seq[j - 1]:c}" + ops[i][j] = f"I{destination_seq[j - 1]}" return costs, ops From 59ff87dc55b704dc7d3683bb6fabc7c4dc0afade Mon Sep 17 00:00:00 2001 From: Lonercode <91500485+Lonercode@users.noreply.github.com> Date: Fri, 4 Oct 2024 10:36:14 +0100 Subject: [PATCH 1435/1543] Added doctests to min_cost_string_conversion.py and removed :c specifier (#11721) * Added doctests to min_cost_string_conversion.py and removed :c specifier * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * resolved line length issues based on ruff requirements * modified in compliance with ruff for line length * Update strings/min_cost_string_conversion.py --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Tianyi Zheng --- strings/min_cost_string_conversion.py | 35 +++++++++++++++++++++++++++ 1 file changed, 35 insertions(+) diff --git a/strings/min_cost_string_conversion.py b/strings/min_cost_string_conversion.py index 40d54f0e8420..a5a3c4a4e3f8 100644 --- a/strings/min_cost_string_conversion.py +++ b/strings/min_cost_string_conversion.py @@ -17,6 +17,23 @@ def compute_transform_tables( delete_cost: int, insert_cost: int, ) -> tuple[list[list[int]], list[list[str]]]: + """ + Finds the most cost efficient sequence + for converting one string into another. + + >>> costs, operations = compute_transform_tables("cat", "cut", 1, 2, 3, 3) + >>> costs[0][:4] + [0, 3, 6, 9] + >>> costs[2][:4] + [6, 4, 3, 6] + >>> operations[0][:4] + ['0', 'Ic', 'Iu', 'It'] + >>> operations[3][:4] + ['Dt', 'Dt', 'Rtu', 'Ct'] + + >>> compute_transform_tables("", "", 1, 2, 3, 3) + ([[0]], [['0']]) + """ source_seq = list(source_string) destination_seq = list(destination_string) len_source_seq = len(source_seq) @@ -57,6 +74,24 @@ def compute_transform_tables( def assemble_transformation(ops: list[list[str]], i: int, j: int) -> list[str]: + """ + Assembles the transformations based on the ops table. + + >>> ops = [['0', 'Ic', 'Iu', 'It'], + ... ['Dc', 'Cc', 'Iu', 'It'], + ... ['Da', 'Da', 'Rau', 'Rat'], + ... ['Dt', 'Dt', 'Rtu', 'Ct']] + >>> x = len(ops) - 1 + >>> y = len(ops[0]) - 1 + >>> assemble_transformation(ops, x, y) + ['Cc', 'Rau', 'Ct'] + + >>> ops1 = [['0']] + >>> x1 = len(ops1) - 1 + >>> y1 = len(ops1[0]) - 1 + >>> assemble_transformation(ops1, x1, y1) + [] + """ if i == 0 and j == 0: return [] elif ops[i][j][0] in {"C", "R"}: From 9a572dec2b6011e7c2c0d82f50989b3a404ea426 Mon Sep 17 00:00:00 2001 From: ARNAV RAJ <126798788+Acuspeedster@users.noreply.github.com> Date: Fri, 4 Oct 2024 21:59:39 +0530 Subject: [PATCH 1436/1543] feat: Implemented Matrix Exponentiation Method (#11747) * feat: add Matrix Exponentiation method docs: updated the header documentation and added new documentation for the new function. * feat: added new function matrix exponetiation method * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * feat: This function uses the tail-recursive form of the Euclidean algorithm to calculate * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * reduced the number of characters per line in the comments * removed unwanted code * feat: Implemented a new function to swaap numbers without dummy variable * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * removed previos code * Done with the required changes * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Done with the required changes * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Done with the required changes * Done with the required changes * Done with the required changes * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update maths/fibonacci.py Co-authored-by: Tianyi Zheng * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Done with the required changes * Done with the required changes * Done with the required changes * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Tianyi Zheng --- maths/fibonacci.py | 88 ++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 88 insertions(+) diff --git a/maths/fibonacci.py b/maths/fibonacci.py index 927700b0418e..24b2d7ae449e 100644 --- a/maths/fibonacci.py +++ b/maths/fibonacci.py @@ -7,6 +7,8 @@ NOTE 2: the Binet's formula function is much more limited in the size of inputs that it can handle due to the size limitations of Python floats +NOTE 3: the matrix function is the fastest and most memory efficient for large n + See benchmark numbers in __main__ for performance comparisons/ https://en.wikipedia.org/wiki/Fibonacci_number for more information @@ -17,6 +19,9 @@ from math import sqrt from time import time +import numpy as np +from numpy import ndarray + def time_func(func, *args, **kwargs): """ @@ -230,6 +235,88 @@ def fib_binet(n: int) -> list[int]: return [round(phi**i / sqrt_5) for i in range(n + 1)] +def matrix_pow_np(m: ndarray, power: int) -> ndarray: + """ + Raises a matrix to the power of 'power' using binary exponentiation. + + Args: + m: Matrix as a numpy array. + power: The power to which the matrix is to be raised. + + Returns: + The matrix raised to the power. + + Raises: + ValueError: If power is negative. + + >>> m = np.array([[1, 1], [1, 0]], dtype=int) + >>> matrix_pow_np(m, 0) # Identity matrix when raised to the power of 0 + array([[1, 0], + [0, 1]]) + + >>> matrix_pow_np(m, 1) # Same matrix when raised to the power of 1 + array([[1, 1], + [1, 0]]) + + >>> matrix_pow_np(m, 5) + array([[8, 5], + [5, 3]]) + + >>> matrix_pow_np(m, -1) + Traceback (most recent call last): + ... + ValueError: power is negative + """ + result = np.array([[1, 0], [0, 1]], dtype=int) # Identity Matrix + base = m + if power < 0: # Negative power is not allowed + raise ValueError("power is negative") + while power: + if power % 2 == 1: + result = np.dot(result, base) + base = np.dot(base, base) + power //= 2 + return result + + +def fib_matrix_np(n: int) -> int: + """ + Calculates the n-th Fibonacci number using matrix exponentiation. + https://www.nayuki.io/page/fast-fibonacci-algorithms#:~:text= + Summary:%20The%20two%20fast%20Fibonacci%20algorithms%20are%20matrix + + Args: + n: Fibonacci sequence index + + Returns: + The n-th Fibonacci number. + + Raises: + ValueError: If n is negative. + + >>> fib_matrix_np(0) + 0 + >>> fib_matrix_np(1) + 1 + >>> fib_matrix_np(5) + 5 + >>> fib_matrix_np(10) + 55 + >>> fib_matrix_np(-1) + Traceback (most recent call last): + ... + ValueError: n is negative + """ + if n < 0: + raise ValueError("n is negative") + if n == 0: + return 0 + + m = np.array([[1, 1], [1, 0]], dtype=int) + result = matrix_pow_np(m, n - 1) + return int(result[0, 0]) + + if __name__ == "__main__": from doctest import testmod @@ -242,3 +329,4 @@ def fib_binet(n: int) -> list[int]: time_func(fib_memoization, num) # 0.0100 ms time_func(fib_recursive_cached, num) # 0.0153 ms time_func(fib_recursive, num) # 257.0910 ms + time_func(fib_matrix_np, num) # 0.0000 ms From 5a8655d306d872085112d965067fcdc440286928 Mon Sep 17 00:00:00 2001 From: 1227haran <68032825+1227haran@users.noreply.github.com> Date: Sat, 5 Oct 2024 22:49:58 +0530 Subject: [PATCH 1437/1543] Added new algorithm to generate numbers in lexicographical order (#11674) * Added algorithm to generate numbers in lexicographical order * Removed the test cases * Updated camelcase to snakecase * Added doctest * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Added descriptive name for n * Reduced the number of letters * Updated the return type * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Updated import statement * Updated return type to Iterator[int] * removed parentheses --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .../stacks/lexicographical_numbers.py | 38 +++++++++++++++++++ 1 file changed, 38 insertions(+) create mode 100644 data_structures/stacks/lexicographical_numbers.py diff --git a/data_structures/stacks/lexicographical_numbers.py b/data_structures/stacks/lexicographical_numbers.py new file mode 100644 index 000000000000..6a174e7d9e95 --- /dev/null +++ b/data_structures/stacks/lexicographical_numbers.py @@ -0,0 +1,38 @@ +from collections.abc import Iterator + + +def lexical_order(max_number: int) -> Iterator[int]: + """ + Generate numbers in lexical order from 1 to max_number. + + >>> " ".join(map(str, lexical_order(13))) + '1 10 11 12 13 2 3 4 5 6 7 8 9' + >>> list(lexical_order(1)) + [1] + >>> " ".join(map(str, lexical_order(20))) + '1 10 11 12 13 14 15 16 17 18 19 2 20 3 4 5 6 7 8 9' + >>> " ".join(map(str, lexical_order(25))) + '1 10 11 12 13 14 15 16 17 18 19 2 20 21 22 23 24 25 3 4 5 6 7 8 9' + >>> list(lexical_order(12)) + [1, 10, 11, 12, 2, 3, 4, 5, 6, 7, 8, 9] + """ + + stack = [1] + + while stack: + num = stack.pop() + if num > max_number: + continue + + yield num + if (num % 10) != 9: + stack.append(num + 1) + + stack.append(num * 10) + + +if __name__ == "__main__": + from doctest import testmod + + testmod() + print(f"Numbers from 1 to 25 in lexical order: {list(lexical_order(26))}") From 50aca04c67315ef7de7ef03e51a018075d8d026b Mon Sep 17 00:00:00 2001 From: Jeel Rupapara Date: Sat, 5 Oct 2024 22:51:43 +0530 Subject: [PATCH 1438/1543] feat: increase test coverage of longest_common_subsequence to 75% (#11777) --- .../longest_common_subsequence.py | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/dynamic_programming/longest_common_subsequence.py b/dynamic_programming/longest_common_subsequence.py index 9a98b1736ed5..4a6c880aff61 100644 --- a/dynamic_programming/longest_common_subsequence.py +++ b/dynamic_programming/longest_common_subsequence.py @@ -28,6 +28,24 @@ def longest_common_subsequence(x: str, y: str): (2, 'ph') >>> longest_common_subsequence("computer", "food") (1, 'o') + >>> longest_common_subsequence("", "abc") # One string is empty + (0, '') + >>> longest_common_subsequence("abc", "") # Other string is empty + (0, '') + >>> longest_common_subsequence("", "") # Both strings are empty + (0, '') + >>> longest_common_subsequence("abc", "def") # No common subsequence + (0, '') + >>> longest_common_subsequence("abc", "abc") # Identical strings + (3, 'abc') + >>> longest_common_subsequence("a", "a") # Single character match + (1, 'a') + >>> longest_common_subsequence("a", "b") # Single character no match + (0, '') + >>> longest_common_subsequence("abcdef", "ace") # Interleaved subsequence + (3, 'ace') + >>> longest_common_subsequence("ABCD", "ACBD") # No repeated characters + (3, 'ABD') """ # find the length of strings From ad6395d3408b9d80a0bef4d180d1e7613a55d807 Mon Sep 17 00:00:00 2001 From: Andrey Ivanov <97749666+ivnvxd@users.noreply.github.com> Date: Sat, 5 Oct 2024 18:24:58 +0100 Subject: [PATCH 1439/1543] Update ruff usage example in CONTRIBUTING.md (#11772) * Update ruff usage example * Update CONTRIBUTING.md Co-authored-by: Tianyi Zheng --------- Co-authored-by: Tianyi Zheng --- CONTRIBUTING.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 096582e45afa..b5113212929a 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -96,7 +96,7 @@ We want your work to be readable by others; therefore, we encourage you to note ```bash python3 -m pip install ruff # only required the first time - ruff . + ruff check ``` - Original code submission require docstrings or comments to describe your work. From fcf82a1eda21dcf36254a8fcaadc913f6a94c8da Mon Sep 17 00:00:00 2001 From: Vineet Kumar <108144301+whyvineet@users.noreply.github.com> Date: Sat, 5 Oct 2024 23:04:48 +0530 Subject: [PATCH 1440/1543] =?UTF-8?q?Implemented=20Exponential=20Search=20?= =?UTF-8?q?with=20binary=20search=20for=20improved=20perfor=E2=80=A6=20(#1?= =?UTF-8?q?1666)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Implemented Exponential Search with binary search for improved performance on large sorted arrays. * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Added type hints and doctests for binary_search and exponential_search functions. Improved code documentation and ensured testability. * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update and rename Exponential_Search.py to exponential_search.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- searches/exponential_search.py | 113 +++++++++++++++++++++++++++++++++ 1 file changed, 113 insertions(+) create mode 100644 searches/exponential_search.py diff --git a/searches/exponential_search.py b/searches/exponential_search.py new file mode 100644 index 000000000000..ed09b14e101c --- /dev/null +++ b/searches/exponential_search.py @@ -0,0 +1,113 @@ +#!/usr/bin/env python3 + +""" +Pure Python implementation of exponential search algorithm + +For more information, see the Wikipedia page: +https://en.wikipedia.org/wiki/Exponential_search + +For doctests run the following command: +python3 -m doctest -v exponential_search.py + +For manual testing run: +python3 exponential_search.py +""" + +from __future__ import annotations + + +def binary_search_by_recursion( + sorted_collection: list[int], item: int, left: int = 0, right: int = -1 +) -> int: + """Pure implementation of binary search algorithm in Python using recursion + + Be careful: the collection must be ascending sorted otherwise, the result will be + unpredictable. + + :param sorted_collection: some ascending sorted collection with comparable items + :param item: item value to search + :param left: starting index for the search + :param right: ending index for the search + :return: index of the found item or -1 if the item is not found + + Examples: + >>> binary_search_by_recursion([0, 5, 7, 10, 15], 0, 0, 4) + 0 + >>> binary_search_by_recursion([0, 5, 7, 10, 15], 15, 0, 4) + 4 + >>> binary_search_by_recursion([0, 5, 7, 10, 15], 5, 0, 4) + 1 + >>> binary_search_by_recursion([0, 5, 7, 10, 15], 6, 0, 4) + -1 + """ + if right < 0: + right = len(sorted_collection) - 1 + if list(sorted_collection) != sorted(sorted_collection): + raise ValueError("sorted_collection must be sorted in ascending order") + if right < left: + return -1 + + midpoint = left + (right - left) // 2 + + if sorted_collection[midpoint] == item: + return midpoint + elif sorted_collection[midpoint] > item: + return binary_search_by_recursion(sorted_collection, item, left, midpoint - 1) + else: + return binary_search_by_recursion(sorted_collection, item, midpoint + 1, right) + + +def exponential_search(sorted_collection: list[int], item: int) -> int: + """ + Pure implementation of an exponential search algorithm in Python. + For more information, refer to: + https://en.wikipedia.org/wiki/Exponential_search + + Be careful: the collection must be ascending sorted, otherwise the result will be + unpredictable. + + :param sorted_collection: some ascending sorted collection with comparable items + :param item: item value to search + :return: index of the found item or -1 if the item is not found + + The time complexity of this algorithm is O(log i) where i is the index of the item. + + Examples: + >>> exponential_search([0, 5, 7, 10, 15], 0) + 0 + >>> exponential_search([0, 5, 7, 10, 15], 15) + 4 + >>> exponential_search([0, 5, 7, 10, 15], 5) + 1 + >>> exponential_search([0, 5, 7, 10, 15], 6) + -1 + """ + if list(sorted_collection) != sorted(sorted_collection): + raise ValueError("sorted_collection must be sorted in ascending order") + + if sorted_collection[0] == item: + return 0 + + bound = 1 + while bound < len(sorted_collection) and sorted_collection[bound] < item: + bound *= 2 + + left = bound // 2 + right = min(bound, len(sorted_collection) - 1) + return binary_search_by_recursion(sorted_collection, item, left, right) + + +if __name__ == "__main__": + import doctest + + doctest.testmod() + + # Manual testing + user_input = input("Enter numbers separated by commas: ").strip() + collection = sorted(int(item) for item in user_input.split(",")) + target = int(input("Enter a number to search for: ")) + result = exponential_search(sorted_collection=collection, item=target) + if result == -1: + print(f"{target} was not found in {collection}.") + else: + print(f"{target} was found at index {result} in {collection}.") From 3422ebc75bda6aba9b234eb217a79f25bec65f21 Mon Sep 17 00:00:00 2001 From: Jeel Rupapara Date: Mon, 7 Oct 2024 12:00:11 +0530 Subject: [PATCH 1441/1543] feat: add testcase of polynom_for_points (#11811) * feat: add testcase of polynom_for_points * fix: remove the print from the testcase of points_to_polynomial * fix: remove print statement from old test cases --- linear_algebra/src/polynom_for_points.py | 42 ++++++++++++++---------- 1 file changed, 24 insertions(+), 18 deletions(-) diff --git a/linear_algebra/src/polynom_for_points.py b/linear_algebra/src/polynom_for_points.py index a9a9a8117c18..452f3edd4aee 100644 --- a/linear_algebra/src/polynom_for_points.py +++ b/linear_algebra/src/polynom_for_points.py @@ -3,30 +3,36 @@ def points_to_polynomial(coordinates: list[list[int]]) -> str: coordinates is a two dimensional matrix: [[x, y], [x, y], ...] number of points you want to use - >>> print(points_to_polynomial([])) + >>> points_to_polynomial([]) Traceback (most recent call last): ... ValueError: The program cannot work out a fitting polynomial. - >>> print(points_to_polynomial([[]])) + >>> points_to_polynomial([[]]) + Traceback (most recent call last): + ... + ValueError: The program cannot work out a fitting polynomial. + >>> points_to_polynomial([[1, 0], [2, 0], [3, 0]]) + 'f(x)=x^2*0.0+x^1*-0.0+x^0*0.0' + >>> points_to_polynomial([[1, 1], [2, 1], [3, 1]]) + 'f(x)=x^2*0.0+x^1*-0.0+x^0*1.0' + >>> points_to_polynomial([[1, 3], [2, 3], [3, 3]]) + 'f(x)=x^2*0.0+x^1*-0.0+x^0*3.0' + >>> points_to_polynomial([[1, 1], [2, 2], [3, 3]]) + 'f(x)=x^2*0.0+x^1*1.0+x^0*0.0' + >>> points_to_polynomial([[1, 1], [2, 4], [3, 9]]) + 'f(x)=x^2*1.0+x^1*-0.0+x^0*0.0' + >>> points_to_polynomial([[1, 3], [2, 6], [3, 11]]) + 'f(x)=x^2*1.0+x^1*-0.0+x^0*2.0' + >>> points_to_polynomial([[1, -3], [2, -6], [3, -11]]) + 'f(x)=x^2*-1.0+x^1*-0.0+x^0*-2.0' + >>> points_to_polynomial([[1, 5], [2, 2], [3, 9]]) + 'f(x)=x^2*5.0+x^1*-18.0+x^0*18.0' + >>> points_to_polynomial([[1, 1], [1, 2], [1, 3]]) + 'x=1' + >>> points_to_polynomial([[1, 1], [2, 2], [2, 2]]) Traceback (most recent call last): ... ValueError: The program cannot work out a fitting polynomial. - >>> print(points_to_polynomial([[1, 0], [2, 0], [3, 0]])) - f(x)=x^2*0.0+x^1*-0.0+x^0*0.0 - >>> print(points_to_polynomial([[1, 1], [2, 1], [3, 1]])) - f(x)=x^2*0.0+x^1*-0.0+x^0*1.0 - >>> print(points_to_polynomial([[1, 3], [2, 3], [3, 3]])) - f(x)=x^2*0.0+x^1*-0.0+x^0*3.0 - >>> print(points_to_polynomial([[1, 1], [2, 2], [3, 3]])) - f(x)=x^2*0.0+x^1*1.0+x^0*0.0 - >>> print(points_to_polynomial([[1, 1], [2, 4], [3, 9]])) - f(x)=x^2*1.0+x^1*-0.0+x^0*0.0 - >>> print(points_to_polynomial([[1, 3], [2, 6], [3, 11]])) - f(x)=x^2*1.0+x^1*-0.0+x^0*2.0 - >>> print(points_to_polynomial([[1, -3], [2, -6], [3, -11]])) - f(x)=x^2*-1.0+x^1*-0.0+x^0*-2.0 - >>> print(points_to_polynomial([[1, 5], [2, 2], [3, 9]])) - f(x)=x^2*5.0+x^1*-18.0+x^0*18.0 """ if len(coordinates) == 0 or not all(len(pair) == 2 for pair in coordinates): raise ValueError("The program cannot work out a fitting polynomial.") From cfd6d095f122d1d3ef2f3c2cdcf84864aac56fa7 Mon Sep 17 00:00:00 2001 From: 1227haran <68032825+1227haran@users.noreply.github.com> Date: Mon, 7 Oct 2024 14:06:15 +0530 Subject: [PATCH 1442/1543] Added max_sum_bst.py (#11832) * Added new algorithm * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Updated changes * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Updated filename * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Updated code * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Updated the code * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Updated code * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Updated the code * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Updated code * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Updated the code * Updated code * Updated code * Updated code * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Updated the code * Updated code * Updated code * Updated code * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Updated code * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Updated code * updated * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Updated code * Updated code * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Updated code * Updated code * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Updated code * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Updated code * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Apply suggestions from code review * Update maximum_sum_bst.py * def max_sum_bst(root: TreeNode | None) -> int: * def solver(node: TreeNode | None) -> tuple[bool, int, int, int]: --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- .../binary_tree/maximum_sum_bst.py | 78 +++++++++++++++++++ 1 file changed, 78 insertions(+) create mode 100644 data_structures/binary_tree/maximum_sum_bst.py diff --git a/data_structures/binary_tree/maximum_sum_bst.py b/data_structures/binary_tree/maximum_sum_bst.py new file mode 100644 index 000000000000..7dadc7b95920 --- /dev/null +++ b/data_structures/binary_tree/maximum_sum_bst.py @@ -0,0 +1,78 @@ +from __future__ import annotations + +import sys +from dataclasses import dataclass + +INT_MIN = -sys.maxsize + 1 +INT_MAX = sys.maxsize - 1 + + +@dataclass +class TreeNode: + val: int = 0 + left: TreeNode | None = None + right: TreeNode | None = None + + +def max_sum_bst(root: TreeNode | None) -> int: + """ + The solution traverses a binary tree to find the maximum sum of + keys in any subtree that is a Binary Search Tree (BST). It uses + recursion to validate BST properties and calculates sums, returning + the highest sum found among all valid BST subtrees. + + >>> t1 = TreeNode(4) + >>> t1.left = TreeNode(3) + >>> t1.left.left = TreeNode(1) + >>> t1.left.right = TreeNode(2) + >>> print(max_sum_bst(t1)) + 2 + >>> t2 = TreeNode(-4) + >>> t2.left = TreeNode(-2) + >>> t2.right = TreeNode(-5) + >>> print(max_sum_bst(t2)) + 0 + >>> t3 = TreeNode(1) + >>> t3.left = TreeNode(4) + >>> t3.left.left = TreeNode(2) + >>> t3.left.right = TreeNode(4) + >>> t3.right = TreeNode(3) + >>> t3.right.left = TreeNode(2) + >>> t3.right.right = TreeNode(5) + >>> t3.right.right.left = TreeNode(4) + >>> t3.right.right.right = TreeNode(6) + >>> print(max_sum_bst(t3)) + 20 + """ + ans: int = 0 + + def solver(node: TreeNode | None) -> tuple[bool, int, int, int]: + """ + Returns the maximum sum by making recursive calls + >>> t1 = TreeNode(1) + >>> print(solver(t1)) + 1 + """ + nonlocal ans + + if not node: + return True, INT_MAX, INT_MIN, 0 # Valid BST, min, max, sum + + is_left_valid, min_left, max_left, sum_left = solver(node.left) + is_right_valid, min_right, max_right, sum_right = solver(node.right) + + if is_left_valid and is_right_valid and max_left < node.val < min_right: + total_sum = sum_left + sum_right + node.val + ans = max(ans, total_sum) + return True, min(min_left, node.val), max(max_right, node.val), total_sum + + return False, -1, -1, -1 # Not a valid BST + + solver(root) + return ans + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From dba8eecb47cea7f11ac383344524afbc0ca7cf5b Mon Sep 17 00:00:00 2001 From: Lonercode <91500485+Lonercode@users.noreply.github.com> Date: Mon, 7 Oct 2024 10:58:07 +0100 Subject: [PATCH 1443/1543] added gronsfeld cipher implementation (#11835) * added gronsfeld cipher implementation * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * from string import ascii_uppercase * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update gronsfeld_cipher.py --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- ciphers/gronsfeld_cipher.py | 45 +++++++++++++++++++++++++++++++++++++ 1 file changed, 45 insertions(+) create mode 100644 ciphers/gronsfeld_cipher.py diff --git a/ciphers/gronsfeld_cipher.py b/ciphers/gronsfeld_cipher.py new file mode 100644 index 000000000000..8fbeab4307fc --- /dev/null +++ b/ciphers/gronsfeld_cipher.py @@ -0,0 +1,45 @@ +from string import ascii_uppercase + + +def gronsfeld(text: str, key: str) -> str: + """ + Encrypt plaintext with the Gronsfeld cipher + + >>> gronsfeld('hello', '412') + 'LFNPP' + >>> gronsfeld('hello', '123') + 'IGOMQ' + >>> gronsfeld('', '123') + '' + >>> gronsfeld('yes, ¥€$ - _!@#%?', '0') + 'YES, ¥€$ - _!@#%?' + >>> gronsfeld('yes, ¥€$ - _!@#%?', '01') + 'YFS, ¥€$ - _!@#%?' + >>> gronsfeld('yes, ¥€$ - _!@#%?', '012') + 'YFU, ¥€$ - _!@#%?' + >>> gronsfeld('yes, ¥€$ - _!@#%?', '') + Traceback (most recent call last): + ... + ZeroDivisionError: integer modulo by zero + """ + ascii_len = len(ascii_uppercase) + key_len = len(key) + encrypted_text = "" + keys = [int(char) for char in key] + upper_case_text = text.upper() + + for i, char in enumerate(upper_case_text): + if char in ascii_uppercase: + new_position = (ascii_uppercase.index(char) + keys[i % key_len]) % ascii_len + shifted_letter = ascii_uppercase[new_position] + encrypted_text += shifted_letter + else: + encrypted_text += char + + return encrypted_text + + +if __name__ == "__main__": + from doctest import testmod + + testmod() From 2d671df073770f0122658f462c17b838ddbe4d2a Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 7 Oct 2024 22:49:29 +0200 Subject: [PATCH 1444/1543] [pre-commit.ci] pre-commit autoupdate (#11874) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * [pre-commit.ci] pre-commit autoupdate updates: - [github.com/pre-commit/pre-commit-hooks: v4.6.0 → v5.0.0](https://github.com/pre-commit/pre-commit-hooks/compare/v4.6.0...v5.0.0) - [github.com/astral-sh/ruff-pre-commit: v0.6.8 → v0.6.9](https://github.com/astral-sh/ruff-pre-commit/compare/v0.6.8...v0.6.9) * updating DIRECTORY.md --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: pre-commit-ci[bot] --- .pre-commit-config.yaml | 4 ++-- DIRECTORY.md | 5 +++++ 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 8a8e5c1f6ad9..77541027afb3 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,6 +1,6 @@ repos: - repo: https://github.com/pre-commit/pre-commit-hooks - rev: v4.6.0 + rev: v5.0.0 hooks: - id: check-executables-have-shebangs - id: check-toml @@ -16,7 +16,7 @@ repos: - id: auto-walrus - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.6.8 + rev: v0.6.9 hooks: - id: ruff - id: ruff-format diff --git a/DIRECTORY.md b/DIRECTORY.md index cdbbac684fd2..0a3be2a06533 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -22,6 +22,7 @@ * [Rat In Maze](backtracking/rat_in_maze.py) * [Sudoku](backtracking/sudoku.py) * [Sum Of Subsets](backtracking/sum_of_subsets.py) + * [Word Break](backtracking/word_break.py) * [Word Ladder](backtracking/word_ladder.py) * [Word Search](backtracking/word_search.py) @@ -99,6 +100,7 @@ * [Elgamal Key Generator](ciphers/elgamal_key_generator.py) * [Enigma Machine2](ciphers/enigma_machine2.py) * [Fractionated Morse Cipher](ciphers/fractionated_morse_cipher.py) + * [Gronsfeld Cipher](ciphers/gronsfeld_cipher.py) * [Hill Cipher](ciphers/hill_cipher.py) * [Mixed Keyword Cypher](ciphers/mixed_keyword_cypher.py) * [Mono Alphabetic Ciphers](ciphers/mono_alphabetic_ciphers.py) @@ -211,6 +213,7 @@ * [Lazy Segment Tree](data_structures/binary_tree/lazy_segment_tree.py) * [Lowest Common Ancestor](data_structures/binary_tree/lowest_common_ancestor.py) * [Maximum Fenwick Tree](data_structures/binary_tree/maximum_fenwick_tree.py) + * [Maximum Sum Bst](data_structures/binary_tree/maximum_sum_bst.py) * [Merge Two Binary Trees](data_structures/binary_tree/merge_two_binary_trees.py) * [Mirror Binary Tree](data_structures/binary_tree/mirror_binary_tree.py) * [Non Recursive Segment Tree](data_structures/binary_tree/non_recursive_segment_tree.py) @@ -284,6 +287,7 @@ * [Dijkstras Two Stack Algorithm](data_structures/stacks/dijkstras_two_stack_algorithm.py) * [Infix To Postfix Conversion](data_structures/stacks/infix_to_postfix_conversion.py) * [Infix To Prefix Conversion](data_structures/stacks/infix_to_prefix_conversion.py) + * [Lexicographical Numbers](data_structures/stacks/lexicographical_numbers.py) * [Next Greater Element](data_structures/stacks/next_greater_element.py) * [Postfix Evaluation](data_structures/stacks/postfix_evaluation.py) * [Prefix Evaluation](data_structures/stacks/prefix_evaluation.py) @@ -1201,6 +1205,7 @@ * [Binary Tree Traversal](searches/binary_tree_traversal.py) * [Double Linear Search](searches/double_linear_search.py) * [Double Linear Search Recursion](searches/double_linear_search_recursion.py) + * [Exponential Search](searches/exponential_search.py) * [Fibonacci Search](searches/fibonacci_search.py) * [Hill Climbing](searches/hill_climbing.py) * [Interpolation Search](searches/interpolation_search.py) From 260e3d8b350c64e927ecb1d62b953b8bf25490ea Mon Sep 17 00:00:00 2001 From: Jeel Rupapara Date: Tue, 8 Oct 2024 17:03:28 +0530 Subject: [PATCH 1445/1543] feat: add test cases in cipher's autokey (#11881) --- ciphers/autokey.py | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/ciphers/autokey.py b/ciphers/autokey.py index 8683e6d37001..05d8c066b139 100644 --- a/ciphers/autokey.py +++ b/ciphers/autokey.py @@ -24,6 +24,14 @@ def encrypt(plaintext: str, key: str) -> str: Traceback (most recent call last): ... ValueError: plaintext is empty + >>> encrypt("coffee is good as python", "") + Traceback (most recent call last): + ... + ValueError: key is empty + >>> encrypt(527.26, "TheAlgorithms") + Traceback (most recent call last): + ... + TypeError: plaintext must be a string """ if not isinstance(plaintext, str): raise TypeError("plaintext must be a string") @@ -80,6 +88,14 @@ def decrypt(ciphertext: str, key: str) -> str: Traceback (most recent call last): ... TypeError: ciphertext must be a string + >>> decrypt("", "TheAlgorithms") + Traceback (most recent call last): + ... + ValueError: ciphertext is empty + >>> decrypt("vvjfpk wj ohvp su ddylsv", 2) + Traceback (most recent call last): + ... + TypeError: key must be a string """ if not isinstance(ciphertext, str): raise TypeError("ciphertext must be a string") From e9e7c964655015819e0120694465928df1abefb0 Mon Sep 17 00:00:00 2001 From: Christian Clauss Date: Tue, 8 Oct 2024 19:09:28 +0200 Subject: [PATCH 1446/1543] Create GitHub Pages docs with Sphinx (#11888) --- .devcontainer/Dockerfile | 2 +- .devcontainer/devcontainer.json | 2 +- .github/CODEOWNERS | 2 - .github/workflows/build.yml | 3 +- .github/workflows/sphinx.yml | 50 +++++++++ CONTRIBUTING.md | 2 +- DIRECTORY.md | 3 + LICENSE.md | 2 +- docs/{source => }/__init__.py | 0 docs/conf.py | 3 + financial/{ABOUT.md => README.md} | 2 +- index.md | 10 ++ .../{local_weighted_learning.md => README.md} | 0 pyproject.toml | 106 +++++++++++++++++- requirements.txt | 1 + source/__init__.py | 0 16 files changed, 179 insertions(+), 9 deletions(-) create mode 100644 .github/workflows/sphinx.yml rename docs/{source => }/__init__.py (100%) create mode 100644 docs/conf.py rename financial/{ABOUT.md => README.md} (97%) create mode 100644 index.md rename machine_learning/local_weighted_learning/{local_weighted_learning.md => README.md} (100%) delete mode 100644 source/__init__.py diff --git a/.devcontainer/Dockerfile b/.devcontainer/Dockerfile index 6aa0073bf95b..a0bd05f47ec8 100644 --- a/.devcontainer/Dockerfile +++ b/.devcontainer/Dockerfile @@ -1,5 +1,5 @@ # https://github.com/microsoft/vscode-dev-containers/blob/main/containers/python-3/README.md -ARG VARIANT=3.12-bookworm +ARG VARIANT=3.13-bookworm FROM mcr.microsoft.com/vscode/devcontainers/python:${VARIANT} COPY requirements.txt /tmp/pip-tmp/ RUN python3 -m pip install --upgrade pip \ diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json index ae1d4fb7494d..e23263f5b9de 100644 --- a/.devcontainer/devcontainer.json +++ b/.devcontainer/devcontainer.json @@ -7,7 +7,7 @@ // Update 'VARIANT' to pick a Python version: 3, 3.11, 3.10, 3.9, 3.8 // Append -bullseye or -buster to pin to an OS version. // Use -bullseye variants on local on arm64/Apple Silicon. - "VARIANT": "3.12-bookworm", + "VARIANT": "3.13-bookworm", } }, diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index d2ac43c7df31..3cc25d1bae1c 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -9,8 +9,6 @@ /.* @cclauss -# /arithmetic_analysis/ - # /backtracking/ # /bit_manipulation/ diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index f54cc982d1ec..b5703e2f1ab6 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -25,12 +25,13 @@ jobs: - name: Run tests # TODO: #8818 Re-enable quantum tests run: pytest - --ignore=quantum/q_fourier_transform.py --ignore=computer_vision/cnn_classification.py + --ignore=docs/conf.py --ignore=dynamic_programming/k_means_clustering_tensorflow.py --ignore=machine_learning/lstm/lstm_prediction.py --ignore=neural_network/input_data.py --ignore=project_euler/ + --ignore=quantum/q_fourier_transform.py --ignore=scripts/validate_solutions.py --cov-report=term-missing:skip-covered --cov=. . diff --git a/.github/workflows/sphinx.yml b/.github/workflows/sphinx.yml new file mode 100644 index 000000000000..9dfe344f9743 --- /dev/null +++ b/.github/workflows/sphinx.yml @@ -0,0 +1,50 @@ +name: sphinx + +on: + # Triggers the workflow on push or pull request events but only for the "master" branch + push: + branches: ["master"] + pull_request: + branches: ["master"] + # Or manually from the Actions tab + workflow_dispatch: + +# Sets permissions of the GITHUB_TOKEN to allow deployment to GitHub Pages +permissions: + contents: read + pages: write + id-token: write + +# Allow only one concurrent deployment, skipping runs queued between the run in-progress and latest queued. +# However, do NOT cancel in-progress runs as we want to allow these production deployments to complete. +concurrency: + group: "pages" + cancel-in-progress: false + +jobs: + build_docs: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - uses: actions/setup-python@v5 + with: + python-version: 3.13 + allow-prereleases: true + - run: pip install --upgrade pip + - run: pip install myst-parser sphinx-autoapi sphinx-pyproject + - uses: actions/configure-pages@v5 + - run: sphinx-build -c docs . docs/_build/html + - uses: actions/upload-pages-artifact@v3 + with: + path: docs/_build/html + + deploy_docs: + environment: + name: github-pages + url: ${{ steps.deployment.outputs.page_url }} + if: github.event_name != 'pull_request' + needs: build_docs + runs-on: ubuntu-latest + steps: + - uses: actions/deploy-pages@v4 + id: deployment diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index b5113212929a..3df39f95b784 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -77,7 +77,7 @@ pre-commit run --all-files --show-diff-on-failure We want your work to be readable by others; therefore, we encourage you to note the following: -- Please write in Python 3.12+. For instance: `print()` is a function in Python 3 so `print "Hello"` will *not* work but `print("Hello")` will. +- Please write in Python 3.13+. For instance: `print()` is a function in Python 3 so `print "Hello"` will *not* work but `print("Hello")` will. - Please focus hard on the naming of functions, classes, and variables. Help your reader by using __descriptive names__ that can help you to remove redundant comments. - Single letter variable names are *old school* so please avoid them unless their life only spans a few lines. - Expand acronyms because `gcd()` is hard to understand but `greatest_common_divisor()` is not. diff --git a/DIRECTORY.md b/DIRECTORY.md index 0a3be2a06533..f0a34a553946 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -351,6 +351,9 @@ * [Power](divide_and_conquer/power.py) * [Strassen Matrix Multiplication](divide_and_conquer/strassen_matrix_multiplication.py) +## Docs + * [Conf](docs/conf.py) + ## Dynamic Programming * [Abbreviation](dynamic_programming/abbreviation.py) * [All Construct](dynamic_programming/all_construct.py) diff --git a/LICENSE.md b/LICENSE.md index 2897d02e2a01..de631c3ef333 100644 --- a/LICENSE.md +++ b/LICENSE.md @@ -1,4 +1,4 @@ -MIT License +## MIT License Copyright (c) 2016-2022 TheAlgorithms and contributors diff --git a/docs/source/__init__.py b/docs/__init__.py similarity index 100% rename from docs/source/__init__.py rename to docs/__init__.py diff --git a/docs/conf.py b/docs/conf.py new file mode 100644 index 000000000000..f2481f107267 --- /dev/null +++ b/docs/conf.py @@ -0,0 +1,3 @@ +from sphinx_pyproject import SphinxConfig + +project = SphinxConfig("../pyproject.toml", globalns=globals()).name diff --git a/financial/ABOUT.md b/financial/README.md similarity index 97% rename from financial/ABOUT.md rename to financial/README.md index f6b0647f8201..e5d3a84c8381 100644 --- a/financial/ABOUT.md +++ b/financial/README.md @@ -1,4 +1,4 @@ -### Interest +# Interest * Compound Interest: "Compound interest is calculated by multiplying the initial principal amount by one plus the annual interest rate raised to the number of compound periods minus one." [Compound Interest](https://www.investopedia.com/) * Simple Interest: "Simple interest paid or received over a certain period is a fixed percentage of the principal amount that was borrowed or lent. " [Simple Interest](https://www.investopedia.com/) diff --git a/index.md b/index.md new file mode 100644 index 000000000000..134520cb94aa --- /dev/null +++ b/index.md @@ -0,0 +1,10 @@ +# TheAlgorithms/Python +```{toctree} +:maxdepth: 2 +:caption: index.md + + +CONTRIBUTING.md +README.md +LICENSE.md +``` diff --git a/machine_learning/local_weighted_learning/local_weighted_learning.md b/machine_learning/local_weighted_learning/README.md similarity index 100% rename from machine_learning/local_weighted_learning/local_weighted_learning.md rename to machine_learning/local_weighted_learning/README.md diff --git a/pyproject.toml b/pyproject.toml index bb8657183164..c57419e79db3 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,5 +1,22 @@ +[project] +name = "thealgorithms-python" +version = "0.0.1" +description = "TheAlgorithms in Python" +authors = [ { name = "TheAlgorithms Contributors" } ] +requires-python = ">=3.13" +classifiers = [ + "Programming Language :: Python :: 3 :: Only", + "Programming Language :: Python :: 3.13", + +] +optional-dependencies.docs = [ + "myst-parser", + "sphinx-autoapi", + "sphinx-pyproject", +] + [tool.ruff] -target-version = "py312" +target-version = "py313" output-format = "full" lint.select = [ @@ -113,6 +130,9 @@ lint.pylint.max-statements = 88 # default: 50 ignore-words-list = "3rt,ans,bitap,crate,damon,fo,followings,hist,iff,kwanza,manuel,mater,secant,som,sur,tim,toi,zar" skip = "./.*,*.json,ciphers/prehistoric_men.txt,project_euler/problem_022/p022_names.txt,pyproject.toml,strings/dictionary.txt,strings/words.txt" +[tool.pyproject-fmt] +max_supported_python = "3.13" + [tool.pytest.ini_options] markers = [ "mat_ops: mark a test as utilizing matrix operations.", @@ -129,3 +149,87 @@ omit = [ "project_euler/*", ] sort = "Cover" + +[tool.sphinx-pyproject] +copyright = "2014, TheAlgorithms" +autoapi_dirs = [ + "audio_filters", + "backtracking", + "bit_manipulation", + "blockchain", + "boolean_algebra", + "cellular_automata", + "ciphers", + "compression", + "computer_vision", + "conversions", + "data_structures", + "digital_image_processing", + "divide_and_conquer", + "dynamic_programming", + "electronics", + "file_transfer", + "financial", + "fractals", + "fuzzy_logic", + "genetic_algorithm", + "geodesy", + "geometry", + "graphics", + "graphs", + "greedy_methods", + "hashes", + "knapsack", + "linear_algebra", + "linear_programming", + "machine_learning", + "maths", + "matrix", + "networking_flow", + "neural_network", + "other", + "physics", + "project_euler", + "quantum", + "scheduling", + "searches", + "sorts", + "strings", + "web_programming", +] +autoapi_member_order = "groupwise" +# autoapi_python_use_implicit_namespaces = true +exclude_patterns = [ + ".*/*", + "docs/", +] +extensions = [ + "autoapi.extension", + "myst_parser", +] +html_static_path = [ "_static" ] +html_theme = "alabaster" +myst_enable_extensions = [ + "amsmath", + "attrs_inline", + "colon_fence", + "deflist", + "dollarmath", + "fieldlist", + "html_admonition", + "html_image", + # "linkify", + "replacements", + "smartquotes", + "strikethrough", + "substitution", + "tasklist", +] +myst_fence_as_directive = [ + "include", +] +templates_path = [ "_templates" ] +[tool.sphinx-pyproject.source_suffix] +".rst" = "restructuredtext" +# ".txt" = "markdown" +".md" = "markdown" diff --git a/requirements.txt b/requirements.txt index afbf25ba6edc..6754363332c4 100644 --- a/requirements.txt +++ b/requirements.txt @@ -15,6 +15,7 @@ requests rich # scikit-fuzzy # uncomment once fuzzy_logic/fuzzy_operations.py is fixed scikit-learn +sphinx_pyproject statsmodels sympy tensorflow ; python_version < '3.13' diff --git a/source/__init__.py b/source/__init__.py deleted file mode 100644 index e69de29bb2d1..000000000000 From 03a42510b01c574292ca9c6525cbf0572ff5a2a5 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 14 Oct 2024 22:42:24 +0200 Subject: [PATCH 1447/1543] [pre-commit.ci] pre-commit autoupdate (#12071) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - [github.com/tox-dev/pyproject-fmt: 2.2.4 → 2.3.0](https://github.com/tox-dev/pyproject-fmt/compare/2.2.4...2.3.0) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 77541027afb3..e1d185fabc12 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -29,7 +29,7 @@ repos: - tomli - repo: https://github.com/tox-dev/pyproject-fmt - rev: "2.2.4" + rev: "2.3.0" hooks: - id: pyproject-fmt From 6e24935f8860965dd7f2f5a50fd05724e84e9e8d Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Tue, 22 Oct 2024 11:22:34 +0200 Subject: [PATCH 1448/1543] [pre-commit.ci] pre-commit autoupdate (#12234) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * [pre-commit.ci] pre-commit autoupdate updates: - [github.com/astral-sh/ruff-pre-commit: v0.6.9 → v0.7.0](https://github.com/astral-sh/ruff-pre-commit/compare/v0.6.9...v0.7.0) - [github.com/tox-dev/pyproject-fmt: 2.3.0 → 2.4.3](https://github.com/tox-dev/pyproject-fmt/compare/2.3.0...2.4.3) - [github.com/abravalheri/validate-pyproject: v0.20.2 → v0.21](https://github.com/abravalheri/validate-pyproject/compare/v0.20.2...v0.21) - [github.com/pre-commit/mirrors-mypy: v1.11.2 → v1.12.1](https://github.com/pre-commit/mirrors-mypy/compare/v1.11.2...v1.12.1) * project_euler/problem_047/sol1.py: def solution(n: int = 4) -> int | None: * Update sol1.py --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- .pre-commit-config.yaml | 8 ++++---- project_euler/problem_047/sol1.py | 12 ++++++------ 2 files changed, 10 insertions(+), 10 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index e1d185fabc12..a849de0c4e16 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -16,7 +16,7 @@ repos: - id: auto-walrus - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.6.9 + rev: v0.7.0 hooks: - id: ruff - id: ruff-format @@ -29,7 +29,7 @@ repos: - tomli - repo: https://github.com/tox-dev/pyproject-fmt - rev: "2.3.0" + rev: "2.4.3" hooks: - id: pyproject-fmt @@ -42,12 +42,12 @@ repos: pass_filenames: false - repo: https://github.com/abravalheri/validate-pyproject - rev: v0.20.2 + rev: v0.21 hooks: - id: validate-pyproject - repo: https://github.com/pre-commit/mirrors-mypy - rev: v1.11.2 + rev: v1.12.1 hooks: - id: mypy args: diff --git a/project_euler/problem_047/sol1.py b/project_euler/problem_047/sol1.py index 4ecd4f4b44c1..d174de27dcd0 100644 --- a/project_euler/problem_047/sol1.py +++ b/project_euler/problem_047/sol1.py @@ -24,7 +24,7 @@ def unique_prime_factors(n: int) -> set: """ Find unique prime factors of an integer. - Tests include sorting because only the set really matters, + Tests include sorting because only the set matters, not the order in which it is produced. >>> sorted(set(unique_prime_factors(14))) [2, 7] @@ -58,7 +58,7 @@ def upf_len(num: int) -> int: def equality(iterable: list) -> bool: """ - Check equality of ALL elements in an iterable + Check the equality of ALL elements in an iterable >>> equality([1, 2, 3, 4]) False >>> equality([2, 2, 2, 2]) @@ -69,7 +69,7 @@ def equality(iterable: list) -> bool: return len(set(iterable)) in (0, 1) -def run(n: int) -> list: +def run(n: int) -> list[int]: """ Runs core process to find problem solution. >>> run(3) @@ -77,7 +77,7 @@ def run(n: int) -> list: """ # Incrementor variable for our group list comprehension. - # This serves as the first number in each list of values + # This is the first number in each list of values # to test. base = 2 @@ -85,7 +85,7 @@ def run(n: int) -> list: # Increment each value of a generated range group = [base + i for i in range(n)] - # Run elements through out unique_prime_factors function + # Run elements through the unique_prime_factors function # Append our target number to the end. checker = [upf_len(x) for x in group] checker.append(n) @@ -98,7 +98,7 @@ def run(n: int) -> list: base += 1 -def solution(n: int = 4) -> int: +def solution(n: int = 4) -> int | None: """Return the first value of the first four consecutive integers to have four distinct prime factors each. >>> solution() From 52602ea5b6dd8179aa662c002891c6506f519435 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 28 Oct 2024 21:27:00 +0100 Subject: [PATCH 1449/1543] [pre-commit.ci] pre-commit autoupdate (#12313) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - [github.com/astral-sh/ruff-pre-commit: v0.7.0 → v0.7.1](https://github.com/astral-sh/ruff-pre-commit/compare/v0.7.0...v0.7.1) - [github.com/tox-dev/pyproject-fmt: 2.4.3 → v2.4.3](https://github.com/tox-dev/pyproject-fmt/compare/2.4.3...v2.4.3) - [github.com/abravalheri/validate-pyproject: v0.21 → v0.22](https://github.com/abravalheri/validate-pyproject/compare/v0.21...v0.22) - [github.com/pre-commit/mirrors-mypy: v1.12.1 → v1.13.0](https://github.com/pre-commit/mirrors-mypy/compare/v1.12.1...v1.13.0) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index a849de0c4e16..0828b715106d 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -16,7 +16,7 @@ repos: - id: auto-walrus - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.7.0 + rev: v0.7.1 hooks: - id: ruff - id: ruff-format @@ -29,7 +29,7 @@ repos: - tomli - repo: https://github.com/tox-dev/pyproject-fmt - rev: "2.4.3" + rev: "v2.4.3" hooks: - id: pyproject-fmt @@ -42,12 +42,12 @@ repos: pass_filenames: false - repo: https://github.com/abravalheri/validate-pyproject - rev: v0.21 + rev: v0.22 hooks: - id: validate-pyproject - repo: https://github.com/pre-commit/mirrors-mypy - rev: v1.12.1 + rev: v1.13.0 hooks: - id: mypy args: From a19bede190ddb4fa3c1c9850b612a47fc69d6709 Mon Sep 17 00:00:00 2001 From: Christian Clauss Date: Fri, 1 Nov 2024 13:40:09 +0100 Subject: [PATCH 1450/1543] Add scripts/find_git_conflicts.sh (#12343) --- scripts/find_git_conflicts.sh | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) create mode 100755 scripts/find_git_conflicts.sh diff --git a/scripts/find_git_conflicts.sh b/scripts/find_git_conflicts.sh new file mode 100755 index 000000000000..8af33fa75279 --- /dev/null +++ b/scripts/find_git_conflicts.sh @@ -0,0 +1,16 @@ +#!/bin/bash + +# Replace with your repository (format: owner/repo) +REPO="TheAlgorithms/Python" + +# Fetch open pull requests with conflicts into a variable +echo "Checking for pull requests with conflicts in $REPO..." + +prs=$(gh pr list --repo "$REPO" --state open --json number,title,mergeable --jq '.[] | select(.mergeable == "CONFLICTING") | {number, title}' --limit 500) + +# Process each conflicting PR +echo "$prs" | jq -c '.[]' | while read -r pr; do + PR_NUMBER=$(echo "$pr" | jq -r '.number') + PR_TITLE=$(echo "$pr" | jq -r '.title') + echo "PR #$PR_NUMBER - $PR_TITLE has conflicts." +done From 3e9ca92ca972bbe752d32b43c71a88789dce94c0 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 4 Nov 2024 21:09:03 +0100 Subject: [PATCH 1451/1543] [pre-commit.ci] pre-commit autoupdate (#12349) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - [github.com/astral-sh/ruff-pre-commit: v0.7.1 → v0.7.2](https://github.com/astral-sh/ruff-pre-commit/compare/v0.7.1...v0.7.2) - [github.com/tox-dev/pyproject-fmt: v2.4.3 → v2.5.0](https://github.com/tox-dev/pyproject-fmt/compare/v2.4.3...v2.5.0) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 0828b715106d..f112ee553b51 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -16,7 +16,7 @@ repos: - id: auto-walrus - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.7.1 + rev: v0.7.2 hooks: - id: ruff - id: ruff-format @@ -29,7 +29,7 @@ repos: - tomli - repo: https://github.com/tox-dev/pyproject-fmt - rev: "v2.4.3" + rev: "v2.5.0" hooks: - id: pyproject-fmt From e3f3d668be4ada7aee82eea0bc75c50436c1ab3a Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 11 Nov 2024 21:05:50 +0100 Subject: [PATCH 1452/1543] [pre-commit.ci] pre-commit autoupdate (#12370) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * [pre-commit.ci] pre-commit autoupdate updates: - [github.com/astral-sh/ruff-pre-commit: v0.7.2 → v0.7.3](https://github.com/astral-sh/ruff-pre-commit/compare/v0.7.2...v0.7.3) - [github.com/abravalheri/validate-pyproject: v0.22 → v0.23](https://github.com/abravalheri/validate-pyproject/compare/v0.22...v0.23) * Update sudoku_solver.py --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- .pre-commit-config.yaml | 4 ++-- data_structures/arrays/sudoku_solver.py | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index f112ee553b51..9d794473cc01 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -16,7 +16,7 @@ repos: - id: auto-walrus - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.7.2 + rev: v0.7.3 hooks: - id: ruff - id: ruff-format @@ -42,7 +42,7 @@ repos: pass_filenames: false - repo: https://github.com/abravalheri/validate-pyproject - rev: v0.22 + rev: v0.23 hooks: - id: validate-pyproject diff --git a/data_structures/arrays/sudoku_solver.py b/data_structures/arrays/sudoku_solver.py index a8157a520c97..70bcdc748195 100644 --- a/data_structures/arrays/sudoku_solver.py +++ b/data_structures/arrays/sudoku_solver.py @@ -172,7 +172,7 @@ def unitsolved(unit): def from_file(filename, sep="\n"): "Parse a file into a list of strings, separated by sep." - return open(filename).read().strip().split(sep) # noqa: SIM115 + return open(filename).read().strip().split(sep) def random_puzzle(assignments=17): From e3bd7721c8241a6db77254bac44757dced1b96f8 Mon Sep 17 00:00:00 2001 From: Christian Clauss Date: Fri, 15 Nov 2024 14:59:14 +0100 Subject: [PATCH 1453/1543] `validate_filenames.py` Shebang `python` for Windows (#12371) --- scripts/validate_filenames.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/validate_filenames.py b/scripts/validate_filenames.py index 0890024dd349..e76b4dbfe288 100755 --- a/scripts/validate_filenames.py +++ b/scripts/validate_filenames.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python3 +#!python import os try: From f3f32ae3ca818f64de2ed3267803882956681044 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 18 Nov 2024 22:07:12 +0100 Subject: [PATCH 1454/1543] [pre-commit.ci] pre-commit autoupdate (#12385) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - [github.com/astral-sh/ruff-pre-commit: v0.7.3 → v0.7.4](https://github.com/astral-sh/ruff-pre-commit/compare/v0.7.3...v0.7.4) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 9d794473cc01..6ad19f1fdcb1 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -16,7 +16,7 @@ repos: - id: auto-walrus - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.7.3 + rev: v0.7.4 hooks: - id: ruff - id: ruff-format From fc33c505935e9927cffb6142591891f721a7bcd9 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 25 Nov 2024 21:46:20 +0100 Subject: [PATCH 1455/1543] [pre-commit.ci] pre-commit autoupdate (#12398) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - [github.com/astral-sh/ruff-pre-commit: v0.4.10 → v0.5.0](https://github.com/astral-sh/ruff-pre-commit/compare/v0.4.10...v0.5.0) - [github.com/pre-commit/mirrors-mypy: v1.10.0 → v1.10.1](https://github.com/pre-commit/mirrors-mypy/compare/v1.10.0...v1.10.1) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 2 +- cellular_automata/conways_game_of_life.py | 6 ++--- ciphers/playfair_cipher.py | 2 +- ciphers/simple_keyword_cypher.py | 2 +- ciphers/transposition_cipher.py | 6 ++--- compression/lempel_ziv.py | 4 ++-- data_structures/arrays/sudoku_solver.py | 2 +- .../binary_tree/binary_tree_traversals.py | 24 ++++++++----------- data_structures/linked_list/deque_doubly.py | 2 +- data_structures/queue/double_ended_queue.py | 2 +- docs/source/__init__.py | 0 electronics/electrical_impedance.py | 2 +- graphs/ant_colony_optimization_algorithms.py | 6 ++--- graphs/basic_graphs.py | 22 ++++++++--------- graphs/minimum_spanning_tree_boruvka.py | 8 +++---- hashes/md5.py | 2 +- machine_learning/frequent_pattern_growth.py | 4 ++-- maths/collatz_sequence.py | 2 +- maths/prime_numbers.py | 6 ++--- maths/volume.py | 2 +- neural_network/input_data.py | 10 ++++---- physics/basic_orbital_capture.py | 9 ++++--- physics/grahams_law.py | 2 +- project_euler/problem_025/sol2.py | 2 +- project_euler/problem_123/sol1.py | 2 +- pyproject.toml | 1 + source/__init__.py | 0 strings/frequency_finder.py | 2 +- strings/min_cost_string_conversion.py | 8 +++---- web_programming/fetch_jobs.py | 2 +- 30 files changed, 66 insertions(+), 78 deletions(-) create mode 100644 docs/source/__init__.py create mode 100644 source/__init__.py diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 6ad19f1fdcb1..64d9a833cd21 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -16,7 +16,7 @@ repos: - id: auto-walrus - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.7.4 + rev: v0.8.0 hooks: - id: ruff - id: ruff-format diff --git a/cellular_automata/conways_game_of_life.py b/cellular_automata/conways_game_of_life.py index 364a34c3aba6..485f0d47bd8b 100644 --- a/cellular_automata/conways_game_of_life.py +++ b/cellular_automata/conways_game_of_life.py @@ -58,10 +58,8 @@ def new_generation(cells: list[list[int]]) -> list[list[int]]: # 3. All other live cells die in the next generation. # Similarly, all other dead cells stay dead. alive = cells[i][j] == 1 - if ( - (alive and 2 <= neighbour_count <= 3) - or not alive - and neighbour_count == 3 + if (alive and 2 <= neighbour_count <= 3) or ( + not alive and neighbour_count == 3 ): next_generation_row.append(1) else: diff --git a/ciphers/playfair_cipher.py b/ciphers/playfair_cipher.py index 86b45bc4fb6a..d48f113f02e0 100644 --- a/ciphers/playfair_cipher.py +++ b/ciphers/playfair_cipher.py @@ -24,7 +24,7 @@ from collections.abc import Generator, Iterable -def chunker(seq: Iterable[str], size: int) -> Generator[tuple[str, ...], None, None]: +def chunker(seq: Iterable[str], size: int) -> Generator[tuple[str, ...]]: it = iter(seq) while True: chunk = tuple(itertools.islice(it, size)) diff --git a/ciphers/simple_keyword_cypher.py b/ciphers/simple_keyword_cypher.py index 1635471aebd1..9dc624e7762c 100644 --- a/ciphers/simple_keyword_cypher.py +++ b/ciphers/simple_keyword_cypher.py @@ -10,7 +10,7 @@ def remove_duplicates(key: str) -> str: key_no_dups = "" for ch in key: - if ch == " " or ch not in key_no_dups and ch.isalpha(): + if ch == " " or (ch not in key_no_dups and ch.isalpha()): key_no_dups += ch return key_no_dups diff --git a/ciphers/transposition_cipher.py b/ciphers/transposition_cipher.py index f1f07ddc3f35..76178cb6a1bc 100644 --- a/ciphers/transposition_cipher.py +++ b/ciphers/transposition_cipher.py @@ -52,10 +52,8 @@ def decrypt_message(key: int, message: str) -> str: plain_text[col] += symbol col += 1 - if ( - (col == num_cols) - or (col == num_cols - 1) - and (row >= num_rows - num_shaded_boxes) + if (col == num_cols) or ( + (col == num_cols - 1) and (row >= num_rows - num_shaded_boxes) ): col = 0 row += 1 diff --git a/compression/lempel_ziv.py b/compression/lempel_ziv.py index 2751a0ebcdb6..648b029471bd 100644 --- a/compression/lempel_ziv.py +++ b/compression/lempel_ziv.py @@ -35,8 +35,8 @@ def add_key_to_lexicon( lexicon[curr_string + "0"] = last_match_id if math.log2(index).is_integer(): - for curr_key in lexicon: - lexicon[curr_key] = "0" + lexicon[curr_key] + for curr_key, value in lexicon.items(): + lexicon[curr_key] = f"0{value}" lexicon[curr_string + "1"] = bin(index)[2:] diff --git a/data_structures/arrays/sudoku_solver.py b/data_structures/arrays/sudoku_solver.py index 70bcdc748195..7e38e1465728 100644 --- a/data_structures/arrays/sudoku_solver.py +++ b/data_structures/arrays/sudoku_solver.py @@ -156,7 +156,7 @@ def time_solve(grid): times, results = zip(*[time_solve(grid) for grid in grids]) if (n := len(grids)) > 1: print( - "Solved %d of %d %s puzzles (avg %.2f secs (%d Hz), max %.2f secs)." + "Solved %d of %d %s puzzles (avg %.2f secs (%d Hz), max %.2f secs)." # noqa: UP031 % (sum(results), n, name, sum(times) / n, n / sum(times), max(times)) ) diff --git a/data_structures/binary_tree/binary_tree_traversals.py b/data_structures/binary_tree/binary_tree_traversals.py index 49c208335b2c..5ba149d0cbc6 100644 --- a/data_structures/binary_tree/binary_tree_traversals.py +++ b/data_structures/binary_tree/binary_tree_traversals.py @@ -30,7 +30,7 @@ def make_tree() -> Node | None: return tree -def preorder(root: Node | None) -> Generator[int, None, None]: +def preorder(root: Node | None) -> Generator[int]: """ Pre-order traversal visits root node, left subtree, right subtree. >>> list(preorder(make_tree())) @@ -43,7 +43,7 @@ def preorder(root: Node | None) -> Generator[int, None, None]: yield from preorder(root.right) -def postorder(root: Node | None) -> Generator[int, None, None]: +def postorder(root: Node | None) -> Generator[int]: """ Post-order traversal visits left subtree, right subtree, root node. >>> list(postorder(make_tree())) @@ -56,7 +56,7 @@ def postorder(root: Node | None) -> Generator[int, None, None]: yield root.data -def inorder(root: Node | None) -> Generator[int, None, None]: +def inorder(root: Node | None) -> Generator[int]: """ In-order traversal visits left subtree, root node, right subtree. >>> list(inorder(make_tree())) @@ -69,7 +69,7 @@ def inorder(root: Node | None) -> Generator[int, None, None]: yield from inorder(root.right) -def reverse_inorder(root: Node | None) -> Generator[int, None, None]: +def reverse_inorder(root: Node | None) -> Generator[int]: """ Reverse in-order traversal visits right subtree, root node, left subtree. >>> list(reverse_inorder(make_tree())) @@ -93,7 +93,7 @@ def height(root: Node | None) -> int: return (max(height(root.left), height(root.right)) + 1) if root else 0 -def level_order(root: Node | None) -> Generator[int, None, None]: +def level_order(root: Node | None) -> Generator[int]: """ Returns a list of nodes value from a whole binary tree in Level Order Traverse. Level Order traverse: Visit nodes of the tree level-by-level. @@ -116,9 +116,7 @@ def level_order(root: Node | None) -> Generator[int, None, None]: process_queue.append(node.right) -def get_nodes_from_left_to_right( - root: Node | None, level: int -) -> Generator[int, None, None]: +def get_nodes_from_left_to_right(root: Node | None, level: int) -> Generator[int]: """ Returns a list of nodes value from a particular level: Left to right direction of the binary tree. @@ -128,7 +126,7 @@ def get_nodes_from_left_to_right( [2, 3] """ - def populate_output(root: Node | None, level: int) -> Generator[int, None, None]: + def populate_output(root: Node | None, level: int) -> Generator[int]: if not root: return if level == 1: @@ -140,9 +138,7 @@ def populate_output(root: Node | None, level: int) -> Generator[int, None, None] yield from populate_output(root, level) -def get_nodes_from_right_to_left( - root: Node | None, level: int -) -> Generator[int, None, None]: +def get_nodes_from_right_to_left(root: Node | None, level: int) -> Generator[int]: """ Returns a list of nodes value from a particular level: Right to left direction of the binary tree. @@ -152,7 +148,7 @@ def get_nodes_from_right_to_left( [3, 2] """ - def populate_output(root: Node | None, level: int) -> Generator[int, None, None]: + def populate_output(root: Node | None, level: int) -> Generator[int]: if not root: return if level == 1: @@ -164,7 +160,7 @@ def populate_output(root: Node | None, level: int) -> Generator[int, None, None] yield from populate_output(root, level) -def zigzag(root: Node | None) -> Generator[int, None, None]: +def zigzag(root: Node | None) -> Generator[int]: """ ZigZag traverse: Returns a list of nodes value from left to right and right to left, alternatively. diff --git a/data_structures/linked_list/deque_doubly.py b/data_structures/linked_list/deque_doubly.py index 2b9d70c223c4..e554ead91c5a 100644 --- a/data_structures/linked_list/deque_doubly.py +++ b/data_structures/linked_list/deque_doubly.py @@ -12,7 +12,7 @@ class _DoublyLinkedBase: """A Private class (to be inherited)""" class _Node: - __slots__ = "_prev", "_data", "_next" + __slots__ = "_data", "_next", "_prev" def __init__(self, link_p, element, link_n): self._prev = link_p diff --git a/data_structures/queue/double_ended_queue.py b/data_structures/queue/double_ended_queue.py index 607d0bda3df4..c28d46c65168 100644 --- a/data_structures/queue/double_ended_queue.py +++ b/data_structures/queue/double_ended_queue.py @@ -33,7 +33,7 @@ class Deque: the number of nodes """ - __slots__ = ("_front", "_back", "_len") + __slots__ = ("_back", "_front", "_len") @dataclass class _Node: diff --git a/docs/source/__init__.py b/docs/source/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/electronics/electrical_impedance.py b/electronics/electrical_impedance.py index 44041ff790b6..4f4f1d308293 100644 --- a/electronics/electrical_impedance.py +++ b/electronics/electrical_impedance.py @@ -6,7 +6,7 @@ from __future__ import annotations -from math import pow, sqrt +from math import pow, sqrt # noqa: A004 def electrical_impedance( diff --git a/graphs/ant_colony_optimization_algorithms.py b/graphs/ant_colony_optimization_algorithms.py index 13637da44874..753f4c0962c8 100644 --- a/graphs/ant_colony_optimization_algorithms.py +++ b/graphs/ant_colony_optimization_algorithms.py @@ -194,10 +194,8 @@ def city_select( IndexError: list index out of range """ probabilities = [] - for city in unvisited_cities: - city_distance = distance( - unvisited_cities[city], next(iter(current_city.values())) - ) + for city, value in unvisited_cities.items(): + city_distance = distance(value, next(iter(current_city.values()))) probability = (pheromone[city][next(iter(current_city.keys()))] ** alpha) * ( (1 / city_distance) ** beta ) diff --git a/graphs/basic_graphs.py b/graphs/basic_graphs.py index 25c8045b3d2b..567fa65040ae 100644 --- a/graphs/basic_graphs.py +++ b/graphs/basic_graphs.py @@ -133,18 +133,18 @@ def dijk(g, s): if len(known) == len(g) - 1: break mini = 100000 - for i in dist: - if i not in known and dist[i] < mini: - mini = dist[i] - u = i + for key, value in dist: + if key not in known and value < mini: + mini = value + u = key known.add(u) for v in g[u]: if v[0] not in known and dist[u] + v[1] < dist.get(v[0], 100000): dist[v[0]] = dist[u] + v[1] path[v[0]] = u - for i in dist: - if i != s: - print(dist[i]) + for key, value in dist.items(): + if key != s: + print(value) """ @@ -255,10 +255,10 @@ def prim(g, s): if len(known) == len(g) - 1: break mini = 100000 - for i in dist: - if i not in known and dist[i] < mini: - mini = dist[i] - u = i + for key, value in dist.items(): + if key not in known and value < mini: + mini = value + u = key known.add(u) for v in g[u]: if v[0] not in known and v[1] < dist.get(v[0], 100000): diff --git a/graphs/minimum_spanning_tree_boruvka.py b/graphs/minimum_spanning_tree_boruvka.py index 3c6888037948..f234d65ab765 100644 --- a/graphs/minimum_spanning_tree_boruvka.py +++ b/graphs/minimum_spanning_tree_boruvka.py @@ -185,12 +185,12 @@ def boruvka_mst(graph): if cheap_edge[set2] == -1 or cheap_edge[set2][2] > weight: cheap_edge[set2] = [head, tail, weight] - for vertex in cheap_edge: - if cheap_edge[vertex] != -1: - head, tail, weight = cheap_edge[vertex] + for head_tail_weight in cheap_edge.values(): + if head_tail_weight != -1: + head, tail, weight = head_tail_weight if union_find.find(head) != union_find.find(tail): union_find.union(head, tail) - mst_edges.append(cheap_edge[vertex]) + mst_edges.append(head_tail_weight) num_components = num_components - 1 mst = Graph.build(edges=mst_edges) return mst diff --git a/hashes/md5.py b/hashes/md5.py index 622a50d290e1..f9d802ff0308 100644 --- a/hashes/md5.py +++ b/hashes/md5.py @@ -131,7 +131,7 @@ def preprocess(message: bytes) -> bytes: return bit_string -def get_block_words(bit_string: bytes) -> Generator[list[int], None, None]: +def get_block_words(bit_string: bytes) -> Generator[list[int]]: """ Splits bit string into blocks of 512 chars and yields each block as a list of 32-bit words diff --git a/machine_learning/frequent_pattern_growth.py b/machine_learning/frequent_pattern_growth.py index 947f8692f298..fae2df16efb1 100644 --- a/machine_learning/frequent_pattern_growth.py +++ b/machine_learning/frequent_pattern_growth.py @@ -107,8 +107,8 @@ def create_tree(data_set: list, min_sup: int = 1) -> tuple[TreeNode, dict]: if not (freq_item_set := set(header_table)): return TreeNode("Null Set", 1, None), {} - for k in header_table: - header_table[k] = [header_table[k], None] + for key, value in header_table.items(): + header_table[key] = [value, None] fp_tree = TreeNode("Null Set", 1, None) # Parent is None for the root node for tran_set in data_set: diff --git a/maths/collatz_sequence.py b/maths/collatz_sequence.py index b47017146a1e..b00dca8d70b7 100644 --- a/maths/collatz_sequence.py +++ b/maths/collatz_sequence.py @@ -17,7 +17,7 @@ from collections.abc import Generator -def collatz_sequence(n: int) -> Generator[int, None, None]: +def collatz_sequence(n: int) -> Generator[int]: """ Generate the Collatz sequence starting at n. >>> tuple(collatz_sequence(2.1)) diff --git a/maths/prime_numbers.py b/maths/prime_numbers.py index 38cc6670385d..5ad12baf3dc3 100644 --- a/maths/prime_numbers.py +++ b/maths/prime_numbers.py @@ -2,7 +2,7 @@ from collections.abc import Generator -def slow_primes(max_n: int) -> Generator[int, None, None]: +def slow_primes(max_n: int) -> Generator[int]: """ Return a list of all primes numbers up to max. >>> list(slow_primes(0)) @@ -29,7 +29,7 @@ def slow_primes(max_n: int) -> Generator[int, None, None]: yield i -def primes(max_n: int) -> Generator[int, None, None]: +def primes(max_n: int) -> Generator[int]: """ Return a list of all primes numbers up to max. >>> list(primes(0)) @@ -58,7 +58,7 @@ def primes(max_n: int) -> Generator[int, None, None]: yield i -def fast_primes(max_n: int) -> Generator[int, None, None]: +def fast_primes(max_n: int) -> Generator[int]: """ Return a list of all primes numbers up to max. >>> list(fast_primes(0)) diff --git a/maths/volume.py b/maths/volume.py index 33be9bdd131a..23fcf6be6ef1 100644 --- a/maths/volume.py +++ b/maths/volume.py @@ -6,7 +6,7 @@ from __future__ import annotations -from math import pi, pow +from math import pi, pow # noqa: A004 def vol_cube(side_length: float) -> float: diff --git a/neural_network/input_data.py b/neural_network/input_data.py index f90287fe3f5b..72debabb566a 100644 --- a/neural_network/input_data.py +++ b/neural_network/input_data.py @@ -61,9 +61,8 @@ def _extract_images(f): with gzip.GzipFile(fileobj=f) as bytestream: magic = _read32(bytestream) if magic != 2051: - raise ValueError( - "Invalid magic number %d in MNIST image file: %s" % (magic, f.name) - ) + msg = f"Invalid magic number {magic} in MNIST image file: {f.name}" + raise ValueError(msg) num_images = _read32(bytestream) rows = _read32(bytestream) cols = _read32(bytestream) @@ -102,9 +101,8 @@ def _extract_labels(f, one_hot=False, num_classes=10): with gzip.GzipFile(fileobj=f) as bytestream: magic = _read32(bytestream) if magic != 2049: - raise ValueError( - "Invalid magic number %d in MNIST label file: %s" % (magic, f.name) - ) + msg = f"Invalid magic number {magic} in MNIST label file: {f.name}" + raise ValueError(msg) num_items = _read32(bytestream) buf = bytestream.read(num_items) labels = np.frombuffer(buf, dtype=np.uint8) diff --git a/physics/basic_orbital_capture.py b/physics/basic_orbital_capture.py index a5434b5cb7cb..eb1fdd9d6420 100644 --- a/physics/basic_orbital_capture.py +++ b/physics/basic_orbital_capture.py @@ -1,7 +1,3 @@ -from math import pow, sqrt - -from scipy.constants import G, c, pi - """ These two functions will return the radii of impact for a target object of mass M and radius R as well as it's effective cross sectional area sigma. @@ -14,9 +10,12 @@ cross section for capture as sigma=π*R_capture**2. This algorithm does not account for an N-body problem. - """ +from math import pow, sqrt # noqa: A004 + +from scipy.constants import G, c, pi + def capture_radii( target_body_radius: float, target_body_mass: float, projectile_velocity: float diff --git a/physics/grahams_law.py b/physics/grahams_law.py index 6e5d75127e83..c56359280ea4 100644 --- a/physics/grahams_law.py +++ b/physics/grahams_law.py @@ -14,7 +14,7 @@ (Description adapted from https://en.wikipedia.org/wiki/Graham%27s_law) """ -from math import pow, sqrt +from math import pow, sqrt # noqa: A004 def validate(*values: float) -> bool: diff --git a/project_euler/problem_025/sol2.py b/project_euler/problem_025/sol2.py index a0f056023bc9..4094b6251d50 100644 --- a/project_euler/problem_025/sol2.py +++ b/project_euler/problem_025/sol2.py @@ -27,7 +27,7 @@ from collections.abc import Generator -def fibonacci_generator() -> Generator[int, None, None]: +def fibonacci_generator() -> Generator[int]: """ A generator that produces numbers in the Fibonacci sequence diff --git a/project_euler/problem_123/sol1.py b/project_euler/problem_123/sol1.py index 3dd31a2e8505..265348d2d4c8 100644 --- a/project_euler/problem_123/sol1.py +++ b/project_euler/problem_123/sol1.py @@ -43,7 +43,7 @@ from collections.abc import Generator -def sieve() -> Generator[int, None, None]: +def sieve() -> Generator[int]: """ Returns a prime number generator using sieve method. >>> type(sieve()) diff --git a/pyproject.toml b/pyproject.toml index c57419e79db3..c60ec246144e 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -80,6 +80,7 @@ lint.ignore = [ "EM101", # Exception must not use a string literal, assign to variable first "EXE001", # Shebang is present but file is not executable -- DO NOT FIX "G004", # Logging statement uses f-string + "ISC001", # Conflicts with ruff format -- DO NOT FIX "PLC1901", # `{}` can be simplified to `{}` as an empty string is falsey "PLW060", # Using global for `{name}` but no assignment is done -- DO NOT FIX "PLW2901", # PLW2901: Redefined loop variable -- FIX ME diff --git a/source/__init__.py b/source/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/strings/frequency_finder.py b/strings/frequency_finder.py index 8479c81ae464..e5afee891bd9 100644 --- a/strings/frequency_finder.py +++ b/strings/frequency_finder.py @@ -67,7 +67,7 @@ def get_frequency_order(message: str) -> str: freq_to_letter_str: dict[int, str] = {} - for freq in freq_to_letter: + for freq in freq_to_letter: # noqa: PLC0206 freq_to_letter[freq].sort(key=ETAOIN.find, reverse=True) freq_to_letter_str[freq] = "".join(freq_to_letter[freq]) diff --git a/strings/min_cost_string_conversion.py b/strings/min_cost_string_conversion.py index a5a3c4a4e3f8..93791e2a7ed3 100644 --- a/strings/min_cost_string_conversion.py +++ b/strings/min_cost_string_conversion.py @@ -124,7 +124,7 @@ def assemble_transformation(ops: list[list[str]], i: int, j: int) -> list[str]: print("".join(string)) if op[0] == "C": - file.write("%-16s" % "Copy %c" % op[1]) + file.write("%-16s" % "Copy %c" % op[1]) # noqa: UP031 file.write("\t\t\t" + "".join(string)) file.write("\r\n") @@ -132,7 +132,7 @@ def assemble_transformation(ops: list[list[str]], i: int, j: int) -> list[str]: elif op[0] == "R": string[i] = op[2] - file.write("%-16s" % ("Replace %c" % op[1] + " with " + str(op[2]))) + file.write("%-16s" % ("Replace %c" % op[1] + " with " + str(op[2]))) # noqa: UP031 file.write("\t\t" + "".join(string)) file.write("\r\n") @@ -140,7 +140,7 @@ def assemble_transformation(ops: list[list[str]], i: int, j: int) -> list[str]: elif op[0] == "D": string.pop(i) - file.write("%-16s" % "Delete %c" % op[1]) + file.write("%-16s" % "Delete %c" % op[1]) # noqa: UP031 file.write("\t\t\t" + "".join(string)) file.write("\r\n") @@ -148,7 +148,7 @@ def assemble_transformation(ops: list[list[str]], i: int, j: int) -> list[str]: else: string.insert(i, op[1]) - file.write("%-16s" % "Insert %c" % op[1]) + file.write("%-16s" % "Insert %c" % op[1]) # noqa: UP031 file.write("\t\t\t" + "".join(string)) file.write("\r\n") diff --git a/web_programming/fetch_jobs.py b/web_programming/fetch_jobs.py index 0d89bf45de57..3753d25bbe5f 100644 --- a/web_programming/fetch_jobs.py +++ b/web_programming/fetch_jobs.py @@ -12,7 +12,7 @@ url = "https://www.indeed.co.in/jobs?q=mobile+app+development&l=" -def fetch_jobs(location: str = "mumbai") -> Generator[tuple[str, str], None, None]: +def fetch_jobs(location: str = "mumbai") -> Generator[tuple[str, str]]: soup = BeautifulSoup( requests.get(url + location, timeout=10).content, "html.parser" ) From c7921226326f35932bbc9d214e9742c2f3d310bf Mon Sep 17 00:00:00 2001 From: Anamaria Miranda Date: Mon, 2 Dec 2024 11:57:04 +0100 Subject: [PATCH 1456/1543] Added matrix based color game algorithm (#12400) * Added matrix based color game * updating DIRECTORY.md --------- Co-authored-by: Miranda13 --- DIRECTORY.md | 1 + matrix/matrix_based_game.py | 284 ++++++++++++++++++++++++++++++++++++ 2 files changed, 285 insertions(+) create mode 100644 matrix/matrix_based_game.py diff --git a/DIRECTORY.md b/DIRECTORY.md index f0a34a553946..d234d366df06 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -794,6 +794,7 @@ * [Cramers Rule 2X2](matrix/cramers_rule_2x2.py) * [Inverse Of Matrix](matrix/inverse_of_matrix.py) * [Largest Square Area In Matrix](matrix/largest_square_area_in_matrix.py) + * [Matrix Based Game](matrix/matrix_based_game.py) * [Matrix Class](matrix/matrix_class.py) * [Matrix Equalization](matrix/matrix_equalization.py) * [Matrix Multiplication Recursion](matrix/matrix_multiplication_recursion.py) diff --git a/matrix/matrix_based_game.py b/matrix/matrix_based_game.py new file mode 100644 index 000000000000..1ff0cbe93435 --- /dev/null +++ b/matrix/matrix_based_game.py @@ -0,0 +1,284 @@ +""" +Matrix-Based Game Script +========================= +This script implements a matrix-based game where players interact with a grid of +elements. The primary goals are to: +- Identify connected elements of the same type from a selected position. +- Remove those elements, adjust the matrix by simulating gravity, and reorganize empty + columns. +- Calculate and display the score based on the number of elements removed in each move. + +Functions: +----------- +1. `find_repeat`: Finds all connected elements of the same type. +2. `increment_score`: Calculates the score for a given move. +3. `move_x`: Simulates gravity in a column. +4. `move_y`: Reorganizes the matrix by shifting columns leftward when a column becomes + empty. +5. `play`: Executes a single move, updating the matrix and returning the score. + +Input Format: +-------------- +1. Matrix size (`lines`): Integer specifying the size of the matrix (N x N). +2. Matrix content (`matrix`): Rows of the matrix, each consisting of characters. +3. Number of moves (`movs`): Integer indicating the number of moves. +4. List of moves (`movements`): A comma-separated string of coordinates for each move. + +(0,0) position starts from first left column to last right, and below row to up row + + +Example Input: +--------------- +4 +RRBG +RBBG +YYGG +XYGG +2 +0 1,1 1 + +Example (0,0) = X + +Output: +-------- +The script outputs the total score after processing all moves. + +Usage: +------- +Run the script and provide the required inputs as prompted. + +""" + + +def validate_matrix_size(size: int) -> None: + """ + >>> validate_matrix_size(-1) + Traceback (most recent call last): + ... + ValueError: Matrix size must be a positive integer. + """ + if not isinstance(size, int) or size <= 0: + raise ValueError("Matrix size must be a positive integer.") + + +def validate_matrix_content(matrix: list[str], size: int) -> None: + """ + Validates that the number of elements in the matrix matches the given size. + + >>> validate_matrix_content(['aaaa', 'aaaa', 'aaaa', 'aaaa'], 3) + Traceback (most recent call last): + ... + ValueError: The matrix dont match with size. + >>> validate_matrix_content(['aa%', 'aaa', 'aaa'], 3) + Traceback (most recent call last): + ... + ValueError: Matrix rows can only contain letters and numbers. + >>> validate_matrix_content(['aaa', 'aaa', 'aaaa'], 3) + Traceback (most recent call last): + ... + ValueError: Each row in the matrix must have exactly 3 characters. + """ + print(matrix) + if len(matrix) != size: + raise ValueError("The matrix dont match with size.") + for row in matrix: + if len(row) != size: + msg = f"Each row in the matrix must have exactly {size} characters." + raise ValueError(msg) + if not all(char.isalnum() for char in row): + raise ValueError("Matrix rows can only contain letters and numbers.") + + +def validate_moves(moves: list[tuple[int, int]], size: int) -> None: + """ + >>> validate_moves([(1, 2), (-1, 0)], 3) + Traceback (most recent call last): + ... + ValueError: Move is out of bounds for a matrix. + """ + for move in moves: + x, y = move + if not (0 <= x < size and 0 <= y < size): + raise ValueError("Move is out of bounds for a matrix.") + + +def parse_moves(input_str: str) -> list[tuple[int, int]]: + """ + >>> parse_moves("0 1, 1 1") + [(0, 1), (1, 1)] + >>> parse_moves("0 1, 1 1, 2") + Traceback (most recent call last): + ... + ValueError: Each move must have exactly two numbers. + >>> parse_moves("0 1, 1 1, 2 4 5 6") + Traceback (most recent call last): + ... + ValueError: Each move must have exactly two numbers. + """ + moves = [] + for pair in input_str.split(","): + parts = pair.strip().split() + if len(parts) != 2: + raise ValueError("Each move must have exactly two numbers.") + x, y = map(int, parts) + moves.append((x, y)) + return moves + + +def find_repeat( + matrix_g: list[list[str]], row: int, column: int, size: int +) -> set[tuple[int, int]]: + """ + Finds all connected elements of the same type from a given position. + + >>> find_repeat([['A', 'B', 'A'], ['A', 'B', 'A'], ['A', 'A', 'A']], 0, 0, 3) + {(1, 2), (2, 1), (0, 0), (2, 0), (0, 2), (2, 2), (1, 0)} + >>> find_repeat([['-', '-', '-'], ['-', '-', '-'], ['-', '-', '-']], 1, 1, 3) + set() + """ + + column = size - 1 - column + visited = set() + repeated = set() + + if (color := matrix_g[column][row]) != "-": + + def dfs(row_n: int, column_n: int) -> None: + if row_n < 0 or row_n >= size or column_n < 0 or column_n >= size: + return + if (row_n, column_n) in visited: + return + visited.add((row_n, column_n)) + if matrix_g[row_n][column_n] == color: + repeated.add((row_n, column_n)) + dfs(row_n - 1, column_n) + dfs(row_n + 1, column_n) + dfs(row_n, column_n - 1) + dfs(row_n, column_n + 1) + + dfs(column, row) + + return repeated + + +def increment_score(count: int) -> int: + """ + Calculates the score for a move based on the number of elements removed. + + >>> increment_score(3) + 6 + >>> increment_score(0) + 0 + """ + return int(count * (count + 1) / 2) + + +def move_x(matrix_g: list[list[str]], column: int, size: int) -> list[list[str]]: + """ + Simulates gravity in a specific column. + + >>> move_x([['-', 'A'], ['-', '-'], ['-', 'C']], 1, 2) + [['-', '-'], ['-', 'A'], ['-', 'C']] + """ + + new_list = [] + + for row in range(size): + if matrix_g[row][column] != "-": + new_list.append(matrix_g[row][column]) + else: + new_list.insert(0, matrix_g[row][column]) + for row in range(size): + matrix_g[row][column] = new_list[row] + return matrix_g + + +def move_y(matrix_g: list[list[str]], size: int) -> list[list[str]]: + """ + Shifts all columns leftward when an entire column becomes empty. + + >>> move_y([['-', 'A'], ['-', '-'], ['-', 'C']], 2) + [['A', '-'], ['-', '-'], ['-', 'C']] + """ + + empty_columns = [] + + for column in range(size - 1, -1, -1): + if all(matrix_g[row][column] == "-" for row in range(size)): + empty_columns.append(column) + + for column in empty_columns: + for col in range(column + 1, size): + for row in range(size): + matrix_g[row][col - 1] = matrix_g[row][col] + for row in range(size): + matrix_g[row][-1] = "-" + + return matrix_g + + +def play( + matrix_g: list[list[str]], pos_x: int, pos_y: int, size: int +) -> tuple[list[list[str]], int]: + """ + Processes a single move, updating the matrix and calculating the score. + + >>> play([['R', 'G'], ['R', 'G']], 0, 0, 2) + ([['G', '-'], ['G', '-']], 3) + """ + + same_colors = find_repeat(matrix_g, pos_x, pos_y, size) + + if len(same_colors) != 0: + for pos in same_colors: + matrix_g[pos[0]][pos[1]] = "-" + for column in range(size): + matrix_g = move_x(matrix_g, column, size) + + matrix_g = move_y(matrix_g, size) + + return (matrix_g, increment_score(len(same_colors))) + + +def process_game(size: int, matrix: list[str], moves: list[tuple[int, int]]) -> int: + """Processes the game logic for the given matrix and moves. + + Args: + size (int): Size of the game board. + matrix (List[str]): Initial game matrix. + moves (List[Tuple[int, int]]): List of moves as (x, y) coordinates. + + Returns: + int: The total score obtained. + >>> process_game(3, ['aaa', 'bbb', 'ccc'], [(0, 0)]) + 6 + """ + + game_matrix = [list(row) for row in matrix] + total_score = 0 + + for move in moves: + pos_x, pos_y = move + game_matrix, score = play(game_matrix, pos_x, pos_y, size) + total_score += score + + return total_score + + +if __name__ == "__main__": + import doctest + + doctest.testmod(verbose=True) + try: + size = int(input("Enter the size of the matrix: ")) + validate_matrix_size(size) + print(f"Enter the {size} rows of the matrix:") + matrix = [input(f"Row {i+1}: ") for i in range(size)] + validate_matrix_content(matrix, size) + moves_input = input("Enter the moves (e.g., '0 0, 1 1'): ") + moves = parse_moves(moves_input) + validate_moves(moves, size) + score = process_game(size, matrix, moves) + print(f"Total score: {score}") + except ValueError as e: + print(f"{e}") From b22fab0ea46c7b625d8137d1fb07d082e20d6d7b Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 2 Dec 2024 22:35:21 +0100 Subject: [PATCH 1457/1543] [pre-commit.ci] pre-commit autoupdate (#12404) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - [github.com/astral-sh/ruff-pre-commit: v0.8.0 → v0.8.1](https://github.com/astral-sh/ruff-pre-commit/compare/v0.8.0...v0.8.1) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 64d9a833cd21..bef251749c19 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -16,7 +16,7 @@ repos: - id: auto-walrus - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.8.0 + rev: v0.8.1 hooks: - id: ruff - id: ruff-format From 0bcdfbdb34e03e24e2f5da90a7236226b721981d Mon Sep 17 00:00:00 2001 From: Christian Clauss Date: Thu, 5 Dec 2024 05:34:48 +0100 Subject: [PATCH 1458/1543] Use Astral uv (#12402) * Use Astral uv * uvx vs uv run * uv sync --group=euler-validate,test * uv sync --group=euler-validate --group=test * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * --group=test --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .github/workflows/build.yml | 15 +- .github/workflows/project_euler.yml | 16 +- .github/workflows/ruff.yml | 4 +- .github/workflows/sphinx.yml | 6 +- pyproject.toml | 48 +- requirements.txt | 6 - uv.lock | 1246 +++++++++++++++++++++++++++ 7 files changed, 1301 insertions(+), 40 deletions(-) create mode 100644 uv.lock diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index b5703e2f1ab6..a6f308715cc2 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -10,21 +10,18 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 + - uses: astral-sh/setup-uv@v4 + with: + enable-cache: true + cache-dependency-glob: uv.lock - uses: actions/setup-python@v5 with: python-version: 3.13 allow-prereleases: true - - uses: actions/cache@v4 - with: - path: ~/.cache/pip - key: ${{ runner.os }}-pip-${{ hashFiles('requirements.txt') }} - - name: Install dependencies - run: | - python -m pip install --upgrade pip setuptools wheel - python -m pip install pytest-cov -r requirements.txt + - run: uv sync --group=test - name: Run tests # TODO: #8818 Re-enable quantum tests - run: pytest + run: uv run pytest --ignore=computer_vision/cnn_classification.py --ignore=docs/conf.py --ignore=dynamic_programming/k_means_clustering_tensorflow.py diff --git a/.github/workflows/project_euler.yml b/.github/workflows/project_euler.yml index 59e1208a650d..84c55335451e 100644 --- a/.github/workflows/project_euler.yml +++ b/.github/workflows/project_euler.yml @@ -15,25 +15,21 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 + - uses: astral-sh/setup-uv@v4 - uses: actions/setup-python@v5 with: python-version: 3.x - - name: Install pytest and pytest-cov - run: | - python -m pip install --upgrade pip - python -m pip install --upgrade numpy pytest pytest-cov - - run: pytest --doctest-modules --cov-report=term-missing:skip-covered --cov=project_euler/ project_euler/ + - run: uv sync --group=euler-validate --group=test + - run: uv run pytest --doctest-modules --cov-report=term-missing:skip-covered --cov=project_euler/ project_euler/ validate-solutions: runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 + - uses: astral-sh/setup-uv@v4 - uses: actions/setup-python@v5 with: python-version: 3.x - - name: Install pytest and requests - run: | - python -m pip install --upgrade pip - python -m pip install --upgrade numpy pytest requests - - run: pytest scripts/validate_solutions.py + - run: uv sync --group=euler-validate --group=test + - run: uv run pytest scripts/validate_solutions.py env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/ruff.yml b/.github/workflows/ruff.yml index d354eba672ae..2c6f92fcf7bf 100644 --- a/.github/workflows/ruff.yml +++ b/.github/workflows/ruff.yml @@ -12,5 +12,5 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 - - run: pip install --user ruff - - run: ruff check --output-format=github . + - uses: astral-sh/setup-uv@v4 + - run: uvx ruff check --output-format=github . diff --git a/.github/workflows/sphinx.yml b/.github/workflows/sphinx.yml index 9dfe344f9743..e3e2ce81a95d 100644 --- a/.github/workflows/sphinx.yml +++ b/.github/workflows/sphinx.yml @@ -26,14 +26,14 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 + - uses: astral-sh/setup-uv@v4 - uses: actions/setup-python@v5 with: python-version: 3.13 allow-prereleases: true - - run: pip install --upgrade pip - - run: pip install myst-parser sphinx-autoapi sphinx-pyproject + - run: uv sync --group=docs - uses: actions/configure-pages@v5 - - run: sphinx-build -c docs . docs/_build/html + - run: uv run sphinx-build -c docs . docs/_build/html - uses: actions/upload-pages-artifact@v3 with: path: docs/_build/html diff --git a/pyproject.toml b/pyproject.toml index c60ec246144e..7b7176705c44 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -7,12 +7,43 @@ requires-python = ">=3.13" classifiers = [ "Programming Language :: Python :: 3 :: Only", "Programming Language :: Python :: 3.13", +] +dependencies = [ + "beautifulsoup4>=4.12.3", + "fake-useragent>=1.5.1", + "imageio>=2.36.1", + "keras>=3.7", + "lxml>=5.3", + "matplotlib>=3.9.3", + "numpy>=2.1.3", + "opencv-python>=4.10.0.84", + "pandas>=2.2.3", + "pillow>=11", + "requests>=2.32.3", + "rich>=13.9.4", + "scikit-learn>=1.5.2", + "sphinx-pyproject>=0.3", + "statsmodels>=0.14.4", + "sympy>=1.13.3", + "tweepy>=4.14", + "typing-extensions>=4.12.2", + "xgboost>=2.1.3", +] +[dependency-groups] +test = [ + "pytest>=8.3.4", + "pytest-cov>=6", ] -optional-dependencies.docs = [ - "myst-parser", - "sphinx-autoapi", - "sphinx-pyproject", + +docs = [ + "myst-parser>=4", + "sphinx-autoapi>=3.4", + "sphinx-pyproject>=0.3", +] +euler-validate = [ + "numpy>=2.1.3", + "requests>=2.32.3", ] [tool.ruff] @@ -61,8 +92,8 @@ lint.select = [ "UP", # pyupgrade "W", # pycodestyle "YTT", # flake8-2020 - # "ANN", # flake8-annotations # FIX ME? - # "COM", # flake8-commas + # "ANN", # flake8-annotations -- FIX ME? + # "COM", # flake8-commas -- DO NOT FIX # "D", # pydocstyle -- FIX ME? # "ERA", # eradicate -- DO NOT FIX # "FBT", # flake8-boolean-trap # FIX ME @@ -129,10 +160,7 @@ lint.pylint.max-statements = 88 # default: 50 [tool.codespell] ignore-words-list = "3rt,ans,bitap,crate,damon,fo,followings,hist,iff,kwanza,manuel,mater,secant,som,sur,tim,toi,zar" -skip = "./.*,*.json,ciphers/prehistoric_men.txt,project_euler/problem_022/p022_names.txt,pyproject.toml,strings/dictionary.txt,strings/words.txt" - -[tool.pyproject-fmt] -max_supported_python = "3.13" +skip = "./.*,*.json,*.lock,ciphers/prehistoric_men.txt,project_euler/problem_022/p022_names.txt,pyproject.toml,strings/dictionary.txt,strings/words.txt" [tool.pytest.ini_options] markers = [ diff --git a/requirements.txt b/requirements.txt index 6754363332c4..4cc83f44987d 100644 --- a/requirements.txt +++ b/requirements.txt @@ -8,18 +8,12 @@ numpy opencv-python pandas pillow -# projectq # uncomment once quantum/quantum_random.py is fixed -qiskit ; python_version < '3.12' -qiskit-aer ; python_version < '3.12' requests rich -# scikit-fuzzy # uncomment once fuzzy_logic/fuzzy_operations.py is fixed scikit-learn sphinx_pyproject statsmodels sympy -tensorflow ; python_version < '3.13' tweepy -# yulewalker # uncomment once audio_filters/equal_loudness_filter.py is fixed typing_extensions xgboost diff --git a/uv.lock b/uv.lock new file mode 100644 index 000000000000..077288f041a1 --- /dev/null +++ b/uv.lock @@ -0,0 +1,1246 @@ +version = 1 +requires-python = ">=3.13" +resolution-markers = [ + "platform_system == 'Darwin'", + "platform_machine == 'aarch64' and platform_system == 'Linux'", + "(platform_machine != 'aarch64' and platform_system != 'Darwin') or (platform_system != 'Darwin' and platform_system != 'Linux')", +] + +[[package]] +name = "absl-py" +version = "2.1.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/7a/8f/fc001b92ecc467cc32ab38398bd0bfb45df46e7523bf33c2ad22a505f06e/absl-py-2.1.0.tar.gz", hash = "sha256:7820790efbb316739cde8b4e19357243fc3608a152024288513dd968d7d959ff", size = 118055 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a2/ad/e0d3c824784ff121c03cc031f944bc7e139a8f1870ffd2845cc2dd76f6c4/absl_py-2.1.0-py3-none-any.whl", hash = "sha256:526a04eadab8b4ee719ce68f204172ead1027549089702d99b9059f129ff1308", size = 133706 }, +] + +[[package]] +name = "alabaster" +version = "1.0.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/a6/f8/d9c74d0daf3f742840fd818d69cfae176fa332022fd44e3469487d5a9420/alabaster-1.0.0.tar.gz", hash = "sha256:c00dca57bca26fa62a6d7d0a9fcce65f3e026e9bfe33e9c538fd3fbb2144fd9e", size = 24210 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/7e/b3/6b4067be973ae96ba0d615946e314c5ae35f9f993eca561b356540bb0c2b/alabaster-1.0.0-py3-none-any.whl", hash = "sha256:fc6786402dc3fcb2de3cabd5fe455a2db534b371124f1f21de8731783dec828b", size = 13929 }, +] + +[[package]] +name = "astroid" +version = "3.3.5" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/38/1e/326fb1d3d83a3bb77c9f9be29d31f2901e35acb94b0605c3f2e5085047f9/astroid-3.3.5.tar.gz", hash = "sha256:5cfc40ae9f68311075d27ef68a4841bdc5cc7f6cf86671b49f00607d30188e2d", size = 397229 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/41/30/624365383fa4a40329c0f0bbbc151abc4a64e30dfc110fc8f6e2afcd02bb/astroid-3.3.5-py3-none-any.whl", hash = "sha256:a9d1c946ada25098d790e079ba2a1b112157278f3fb7e718ae6a9252f5835dc8", size = 274586 }, +] + +[[package]] +name = "babel" +version = "2.16.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/2a/74/f1bc80f23eeba13393b7222b11d95ca3af2c1e28edca18af487137eefed9/babel-2.16.0.tar.gz", hash = "sha256:d1f3554ca26605fe173f3de0c65f750f5a42f924499bf134de6423582298e316", size = 9348104 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ed/20/bc79bc575ba2e2a7f70e8a1155618bb1301eaa5132a8271373a6903f73f8/babel-2.16.0-py3-none-any.whl", hash = "sha256:368b5b98b37c06b7daf6696391c3240c938b37767d4584413e8438c5c435fa8b", size = 9587599 }, +] + +[[package]] +name = "beautifulsoup4" +version = "4.12.3" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "soupsieve" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/b3/ca/824b1195773ce6166d388573fc106ce56d4a805bd7427b624e063596ec58/beautifulsoup4-4.12.3.tar.gz", hash = "sha256:74e3d1928edc070d21748185c46e3fb33490f22f52a3addee9aee0f4f7781051", size = 581181 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/b1/fe/e8c672695b37eecc5cbf43e1d0638d88d66ba3a44c4d321c796f4e59167f/beautifulsoup4-4.12.3-py3-none-any.whl", hash = "sha256:b80878c9f40111313e55da8ba20bdba06d8fa3969fc68304167741bbf9e082ed", size = 147925 }, +] + +[[package]] +name = "certifi" +version = "2024.8.30" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/b0/ee/9b19140fe824b367c04c5e1b369942dd754c4c5462d5674002f75c4dedc1/certifi-2024.8.30.tar.gz", hash = "sha256:bec941d2aa8195e248a60b31ff9f0558284cf01a52591ceda73ea9afffd69fd9", size = 168507 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/12/90/3c9ff0512038035f59d279fddeb79f5f1eccd8859f06d6163c58798b9487/certifi-2024.8.30-py3-none-any.whl", hash = "sha256:922820b53db7a7257ffbda3f597266d435245903d80737e34f8a45ff3e3230d8", size = 167321 }, +] + +[[package]] +name = "charset-normalizer" +version = "3.4.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/f2/4f/e1808dc01273379acc506d18f1504eb2d299bd4131743b9fc54d7be4df1e/charset_normalizer-3.4.0.tar.gz", hash = "sha256:223217c3d4f82c3ac5e29032b3f1c2eb0fb591b72161f86d93f5719079dae93e", size = 106620 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/f3/89/68a4c86f1a0002810a27f12e9a7b22feb198c59b2f05231349fbce5c06f4/charset_normalizer-3.4.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:dd4eda173a9fcccb5f2e2bd2a9f423d180194b1bf17cf59e3269899235b2a114", size = 194617 }, + { url = "https://files.pythonhosted.org/packages/4f/cd/8947fe425e2ab0aa57aceb7807af13a0e4162cd21eee42ef5b053447edf5/charset_normalizer-3.4.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:e9e3c4c9e1ed40ea53acf11e2a386383c3304212c965773704e4603d589343ed", size = 125310 }, + { url = "https://files.pythonhosted.org/packages/5b/f0/b5263e8668a4ee9becc2b451ed909e9c27058337fda5b8c49588183c267a/charset_normalizer-3.4.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:92a7e36b000bf022ef3dbb9c46bfe2d52c047d5e3f3343f43204263c5addc250", size = 119126 }, + { url = "https://files.pythonhosted.org/packages/ff/6e/e445afe4f7fda27a533f3234b627b3e515a1b9429bc981c9a5e2aa5d97b6/charset_normalizer-3.4.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:54b6a92d009cbe2fb11054ba694bc9e284dad30a26757b1e372a1fdddaf21920", size = 139342 }, + { url = "https://files.pythonhosted.org/packages/a1/b2/4af9993b532d93270538ad4926c8e37dc29f2111c36f9c629840c57cd9b3/charset_normalizer-3.4.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1ffd9493de4c922f2a38c2bf62b831dcec90ac673ed1ca182fe11b4d8e9f2a64", size = 149383 }, + { url = "https://files.pythonhosted.org/packages/fb/6f/4e78c3b97686b871db9be6f31d64e9264e889f8c9d7ab33c771f847f79b7/charset_normalizer-3.4.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:35c404d74c2926d0287fbd63ed5d27eb911eb9e4a3bb2c6d294f3cfd4a9e0c23", size = 142214 }, + { url = "https://files.pythonhosted.org/packages/2b/c9/1c8fe3ce05d30c87eff498592c89015b19fade13df42850aafae09e94f35/charset_normalizer-3.4.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4796efc4faf6b53a18e3d46343535caed491776a22af773f366534056c4e1fbc", size = 144104 }, + { url = "https://files.pythonhosted.org/packages/ee/68/efad5dcb306bf37db7db338338e7bb8ebd8cf38ee5bbd5ceaaaa46f257e6/charset_normalizer-3.4.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e7fdd52961feb4c96507aa649550ec2a0d527c086d284749b2f582f2d40a2e0d", size = 146255 }, + { url = "https://files.pythonhosted.org/packages/0c/75/1ed813c3ffd200b1f3e71121c95da3f79e6d2a96120163443b3ad1057505/charset_normalizer-3.4.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:92db3c28b5b2a273346bebb24857fda45601aef6ae1c011c0a997106581e8a88", size = 140251 }, + { url = "https://files.pythonhosted.org/packages/7d/0d/6f32255c1979653b448d3c709583557a4d24ff97ac4f3a5be156b2e6a210/charset_normalizer-3.4.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:ab973df98fc99ab39080bfb0eb3a925181454d7c3ac8a1e695fddfae696d9e90", size = 148474 }, + { url = "https://files.pythonhosted.org/packages/ac/a0/c1b5298de4670d997101fef95b97ac440e8c8d8b4efa5a4d1ef44af82f0d/charset_normalizer-3.4.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:4b67fdab07fdd3c10bb21edab3cbfe8cf5696f453afce75d815d9d7223fbe88b", size = 151849 }, + { url = "https://files.pythonhosted.org/packages/04/4f/b3961ba0c664989ba63e30595a3ed0875d6790ff26671e2aae2fdc28a399/charset_normalizer-3.4.0-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:aa41e526a5d4a9dfcfbab0716c7e8a1b215abd3f3df5a45cf18a12721d31cb5d", size = 149781 }, + { url = "https://files.pythonhosted.org/packages/d8/90/6af4cd042066a4adad58ae25648a12c09c879efa4849c705719ba1b23d8c/charset_normalizer-3.4.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:ffc519621dce0c767e96b9c53f09c5d215578e10b02c285809f76509a3931482", size = 144970 }, + { url = "https://files.pythonhosted.org/packages/cc/67/e5e7e0cbfefc4ca79025238b43cdf8a2037854195b37d6417f3d0895c4c2/charset_normalizer-3.4.0-cp313-cp313-win32.whl", hash = "sha256:f19c1585933c82098c2a520f8ec1227f20e339e33aca8fa6f956f6691b784e67", size = 94973 }, + { url = "https://files.pythonhosted.org/packages/65/97/fc9bbc54ee13d33dc54a7fcf17b26368b18505500fc01e228c27b5222d80/charset_normalizer-3.4.0-cp313-cp313-win_amd64.whl", hash = "sha256:707b82d19e65c9bd28b81dde95249b07bf9f5b90ebe1ef17d9b57473f8a64b7b", size = 102308 }, + { url = "https://files.pythonhosted.org/packages/bf/9b/08c0432272d77b04803958a4598a51e2a4b51c06640af8b8f0f908c18bf2/charset_normalizer-3.4.0-py3-none-any.whl", hash = "sha256:fe9f97feb71aa9896b81973a7bbada8c49501dc73e58a10fcef6663af95e5079", size = 49446 }, +] + +[[package]] +name = "codespell" +version = "2.3.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/a0/a9/98353dfc7afcdf18cffd2dd3e959a25eaaf2728cf450caa59af89648a8e4/codespell-2.3.0.tar.gz", hash = "sha256:360c7d10f75e65f67bad720af7007e1060a5d395670ec11a7ed1fed9dd17471f", size = 329791 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/0e/20/b6019add11e84f821184234cea0ad91442373489ef7ccfa3d73a71b908fa/codespell-2.3.0-py3-none-any.whl", hash = "sha256:a9c7cef2501c9cfede2110fd6d4e5e62296920efe9abfb84648df866e47f58d1", size = 329167 }, +] + +[[package]] +name = "colorama" +version = "0.4.6" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/d8/53/6f443c9a4a8358a93a6792e2acffb9d9d5cb0a5cfd8802644b7b1c9a02e4/colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44", size = 27697 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/d1/d6/3965ed04c63042e047cb6a3e6ed1a63a35087b6a609aa3a15ed8ac56c221/colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6", size = 25335 }, +] + +[[package]] +name = "contourpy" +version = "1.3.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "numpy" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/25/c2/fc7193cc5383637ff390a712e88e4ded0452c9fbcf84abe3de5ea3df1866/contourpy-1.3.1.tar.gz", hash = "sha256:dfd97abd83335045a913e3bcc4a09c0ceadbe66580cf573fe961f4a825efa699", size = 13465753 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/9a/e7/de62050dce687c5e96f946a93546910bc67e483fe05324439e329ff36105/contourpy-1.3.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:a761d9ccfc5e2ecd1bf05534eda382aa14c3e4f9205ba5b1684ecfe400716ef2", size = 271548 }, + { url = "https://files.pythonhosted.org/packages/78/4d/c2a09ae014ae984c6bdd29c11e74d3121b25eaa117eca0bb76340efd7e1c/contourpy-1.3.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:523a8ee12edfa36f6d2a49407f705a6ef4c5098de4f498619787e272de93f2d5", size = 255576 }, + { url = "https://files.pythonhosted.org/packages/ab/8a/915380ee96a5638bda80cd061ccb8e666bfdccea38d5741cb69e6dbd61fc/contourpy-1.3.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ece6df05e2c41bd46776fbc712e0996f7c94e0d0543af1656956d150c4ca7c81", size = 306635 }, + { url = "https://files.pythonhosted.org/packages/29/5c/c83ce09375428298acd4e6582aeb68b1e0d1447f877fa993d9bf6cd3b0a0/contourpy-1.3.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:573abb30e0e05bf31ed067d2f82500ecfdaec15627a59d63ea2d95714790f5c2", size = 345925 }, + { url = "https://files.pythonhosted.org/packages/29/63/5b52f4a15e80c66c8078a641a3bfacd6e07106835682454647aca1afc852/contourpy-1.3.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a9fa36448e6a3a1a9a2ba23c02012c43ed88905ec80163f2ffe2421c7192a5d7", size = 318000 }, + { url = "https://files.pythonhosted.org/packages/9a/e2/30ca086c692691129849198659bf0556d72a757fe2769eb9620a27169296/contourpy-1.3.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3ea9924d28fc5586bf0b42d15f590b10c224117e74409dd7a0be3b62b74a501c", size = 322689 }, + { url = "https://files.pythonhosted.org/packages/6b/77/f37812ef700f1f185d348394debf33f22d531e714cf6a35d13d68a7003c7/contourpy-1.3.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:5b75aa69cb4d6f137b36f7eb2ace9280cfb60c55dc5f61c731fdf6f037f958a3", size = 1268413 }, + { url = "https://files.pythonhosted.org/packages/3f/6d/ce84e79cdd128542ebeb268f84abb4b093af78e7f8ec504676673d2675bc/contourpy-1.3.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:041b640d4ec01922083645a94bb3b2e777e6b626788f4095cf21abbe266413c1", size = 1326530 }, + { url = "https://files.pythonhosted.org/packages/72/22/8282f4eae20c73c89bee7a82a19c4e27af9b57bb602ecaa00713d5bdb54d/contourpy-1.3.1-cp313-cp313-win32.whl", hash = "sha256:36987a15e8ace5f58d4d5da9dca82d498c2bbb28dff6e5d04fbfcc35a9cb3a82", size = 175315 }, + { url = "https://files.pythonhosted.org/packages/e3/d5/28bca491f65312b438fbf076589dcde7f6f966b196d900777f5811b9c4e2/contourpy-1.3.1-cp313-cp313-win_amd64.whl", hash = "sha256:a7895f46d47671fa7ceec40f31fae721da51ad34bdca0bee83e38870b1f47ffd", size = 220987 }, + { url = "https://files.pythonhosted.org/packages/2f/24/a4b285d6adaaf9746e4700932f579f1a7b6f9681109f694cfa233ae75c4e/contourpy-1.3.1-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:9ddeb796389dadcd884c7eb07bd14ef12408aaae358f0e2ae24114d797eede30", size = 285001 }, + { url = "https://files.pythonhosted.org/packages/48/1d/fb49a401b5ca4f06ccf467cd6c4f1fd65767e63c21322b29b04ec40b40b9/contourpy-1.3.1-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:19c1555a6801c2f084c7ddc1c6e11f02eb6a6016ca1318dd5452ba3f613a1751", size = 268553 }, + { url = "https://files.pythonhosted.org/packages/79/1e/4aef9470d13fd029087388fae750dccb49a50c012a6c8d1d634295caa644/contourpy-1.3.1-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:841ad858cff65c2c04bf93875e384ccb82b654574a6d7f30453a04f04af71342", size = 310386 }, + { url = "https://files.pythonhosted.org/packages/b0/34/910dc706ed70153b60392b5305c708c9810d425bde12499c9184a1100888/contourpy-1.3.1-cp313-cp313t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4318af1c925fb9a4fb190559ef3eec206845f63e80fb603d47f2d6d67683901c", size = 349806 }, + { url = "https://files.pythonhosted.org/packages/31/3c/faee6a40d66d7f2a87f7102236bf4780c57990dd7f98e5ff29881b1b1344/contourpy-1.3.1-cp313-cp313t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:14c102b0eab282427b662cb590f2e9340a9d91a1c297f48729431f2dcd16e14f", size = 321108 }, + { url = "https://files.pythonhosted.org/packages/17/69/390dc9b20dd4bb20585651d7316cc3054b7d4a7b4f8b710b2b698e08968d/contourpy-1.3.1-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:05e806338bfeaa006acbdeba0ad681a10be63b26e1b17317bfac3c5d98f36cda", size = 327291 }, + { url = "https://files.pythonhosted.org/packages/ef/74/7030b67c4e941fe1e5424a3d988080e83568030ce0355f7c9fc556455b01/contourpy-1.3.1-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:4d76d5993a34ef3df5181ba3c92fabb93f1eaa5729504fb03423fcd9f3177242", size = 1263752 }, + { url = "https://files.pythonhosted.org/packages/f0/ed/92d86f183a8615f13f6b9cbfc5d4298a509d6ce433432e21da838b4b63f4/contourpy-1.3.1-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:89785bb2a1980c1bd87f0cb1517a71cde374776a5f150936b82580ae6ead44a1", size = 1318403 }, + { url = "https://files.pythonhosted.org/packages/b3/0e/c8e4950c77dcfc897c71d61e56690a0a9df39543d2164040301b5df8e67b/contourpy-1.3.1-cp313-cp313t-win32.whl", hash = "sha256:8eb96e79b9f3dcadbad2a3891672f81cdcab7f95b27f28f1c67d75f045b6b4f1", size = 185117 }, + { url = "https://files.pythonhosted.org/packages/c1/31/1ae946f11dfbd229222e6d6ad8e7bd1891d3d48bde5fbf7a0beb9491f8e3/contourpy-1.3.1-cp313-cp313t-win_amd64.whl", hash = "sha256:287ccc248c9e0d0566934e7d606201abd74761b5703d804ff3df8935f523d546", size = 236668 }, +] + +[[package]] +name = "coverage" +version = "7.6.8" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/ab/75/aecfd0a3adbec6e45753976bc2a9fed62b42cea9a206d10fd29244a77953/coverage-7.6.8.tar.gz", hash = "sha256:8b2b8503edb06822c86d82fa64a4a5cb0760bb8f31f26e138ec743f422f37cfc", size = 801425 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/9a/84/6f0ccf94a098ac3d6d6f236bd3905eeac049a9e0efcd9a63d4feca37ac4b/coverage-7.6.8-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:0b0c69f4f724c64dfbfe79f5dfb503b42fe6127b8d479b2677f2b227478db2eb", size = 207313 }, + { url = "https://files.pythonhosted.org/packages/db/2b/e3b3a3a12ebec738c545897ac9f314620470fcbc368cdac88cf14974ba20/coverage-7.6.8-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:c15b32a7aca8038ed7644f854bf17b663bc38e1671b5d6f43f9a2b2bd0c46f63", size = 207574 }, + { url = "https://files.pythonhosted.org/packages/db/c0/5bf95d42b6a8d21dfce5025ce187f15db57d6460a59b67a95fe8728162f1/coverage-7.6.8-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:63068a11171e4276f6ece913bde059e77c713b48c3a848814a6537f35afb8365", size = 240090 }, + { url = "https://files.pythonhosted.org/packages/57/b8/d6fd17d1a8e2b0e1a4e8b9cb1f0f261afd422570735899759c0584236916/coverage-7.6.8-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6f4548c5ead23ad13fb7a2c8ea541357474ec13c2b736feb02e19a3085fac002", size = 237237 }, + { url = "https://files.pythonhosted.org/packages/d4/e4/a91e9bb46809c8b63e68fc5db5c4d567d3423b6691d049a4f950e38fbe9d/coverage-7.6.8-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3b4b4299dd0d2c67caaaf286d58aef5e75b125b95615dda4542561a5a566a1e3", size = 239225 }, + { url = "https://files.pythonhosted.org/packages/31/9c/9b99b0591ec4555b7292d271e005f27b465388ce166056c435b288db6a69/coverage-7.6.8-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:c9ebfb2507751f7196995142f057d1324afdab56db1d9743aab7f50289abd022", size = 238888 }, + { url = "https://files.pythonhosted.org/packages/a6/85/285c2df9a04bc7c31f21fd9d4a24d19e040ec5e2ff06e572af1f6514c9e7/coverage-7.6.8-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:c1b4474beee02ede1eef86c25ad4600a424fe36cff01a6103cb4533c6bf0169e", size = 236974 }, + { url = "https://files.pythonhosted.org/packages/cb/a1/95ec8522206f76cdca033bf8bb61fff56429fb414835fc4d34651dfd29fc/coverage-7.6.8-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:d9fd2547e6decdbf985d579cf3fc78e4c1d662b9b0ff7cc7862baaab71c9cc5b", size = 238815 }, + { url = "https://files.pythonhosted.org/packages/8d/ac/687e9ba5e6d0979e9dab5c02e01c4f24ac58260ef82d88d3b433b3f84f1e/coverage-7.6.8-cp313-cp313-win32.whl", hash = "sha256:8aae5aea53cbfe024919715eca696b1a3201886ce83790537d1c3668459c7146", size = 209957 }, + { url = "https://files.pythonhosted.org/packages/2f/a3/b61cc8e3fcf075293fb0f3dee405748453c5ba28ac02ceb4a87f52bdb105/coverage-7.6.8-cp313-cp313-win_amd64.whl", hash = "sha256:ae270e79f7e169ccfe23284ff5ea2d52a6f401dc01b337efb54b3783e2ce3f28", size = 210711 }, + { url = "https://files.pythonhosted.org/packages/ee/4b/891c8b9acf1b62c85e4a71dac142ab9284e8347409b7355de02e3f38306f/coverage-7.6.8-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:de38add67a0af869b0d79c525d3e4588ac1ffa92f39116dbe0ed9753f26eba7d", size = 208053 }, + { url = "https://files.pythonhosted.org/packages/18/a9/9e330409b291cc002723d339346452800e78df1ce50774ca439ade1d374f/coverage-7.6.8-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:b07c25d52b1c16ce5de088046cd2432b30f9ad5e224ff17c8f496d9cb7d1d451", size = 208329 }, + { url = "https://files.pythonhosted.org/packages/9c/0d/33635fd429f6589c6e1cdfc7bf581aefe4c1792fbff06383f9d37f59db60/coverage-7.6.8-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:62a66ff235e4c2e37ed3b6104d8b478d767ff73838d1222132a7a026aa548764", size = 251052 }, + { url = "https://files.pythonhosted.org/packages/23/32/8a08da0e46f3830bbb9a5b40614241b2e700f27a9c2889f53122486443ed/coverage-7.6.8-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:09b9f848b28081e7b975a3626e9081574a7b9196cde26604540582da60235fdf", size = 246765 }, + { url = "https://files.pythonhosted.org/packages/56/3f/3b86303d2c14350fdb1c6c4dbf9bc76000af2382f42ca1d4d99c6317666e/coverage-7.6.8-cp313-cp313t-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:093896e530c38c8e9c996901858ac63f3d4171268db2c9c8b373a228f459bbc5", size = 249125 }, + { url = "https://files.pythonhosted.org/packages/36/cb/c4f081b9023f9fd8646dbc4ef77be0df090263e8f66f4ea47681e0dc2cff/coverage-7.6.8-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:9a7b8ac36fd688c8361cbc7bf1cb5866977ece6e0b17c34aa0df58bda4fa18a4", size = 248615 }, + { url = "https://files.pythonhosted.org/packages/32/ee/53bdbf67760928c44b57b2c28a8c0a4bf544f85a9ee129a63ba5c78fdee4/coverage-7.6.8-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:38c51297b35b3ed91670e1e4efb702b790002e3245a28c76e627478aa3c10d83", size = 246507 }, + { url = "https://files.pythonhosted.org/packages/57/49/5a57910bd0af6d8e802b4ca65292576d19b54b49f81577fd898505dee075/coverage-7.6.8-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:2e4e0f60cb4bd7396108823548e82fdab72d4d8a65e58e2c19bbbc2f1e2bfa4b", size = 247785 }, + { url = "https://files.pythonhosted.org/packages/bd/37/e450c9f6b297c79bb9858407396ed3e084dcc22990dd110ab01d5ceb9770/coverage-7.6.8-cp313-cp313t-win32.whl", hash = "sha256:6535d996f6537ecb298b4e287a855f37deaf64ff007162ec0afb9ab8ba3b8b71", size = 210605 }, + { url = "https://files.pythonhosted.org/packages/44/79/7d0c7dd237c6905018e2936cd1055fe1d42e7eba2ebab3c00f4aad2a27d7/coverage-7.6.8-cp313-cp313t-win_amd64.whl", hash = "sha256:c79c0685f142ca53256722a384540832420dff4ab15fec1863d7e5bc8691bdcc", size = 211777 }, +] + +[[package]] +name = "cycler" +version = "0.12.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/a9/95/a3dbbb5028f35eafb79008e7522a75244477d2838f38cbb722248dabc2a8/cycler-0.12.1.tar.gz", hash = "sha256:88bb128f02ba341da8ef447245a9e138fae777f6a23943da4540077d3601eb1c", size = 7615 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e7/05/c19819d5e3d95294a6f5947fb9b9629efb316b96de511b418c53d245aae6/cycler-0.12.1-py3-none-any.whl", hash = "sha256:85cef7cff222d8644161529808465972e51340599459b8ac3ccbac5a854e0d30", size = 8321 }, +] + +[[package]] +name = "docutils" +version = "0.21.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/ae/ed/aefcc8cd0ba62a0560c3c18c33925362d46c6075480bfa4df87b28e169a9/docutils-0.21.2.tar.gz", hash = "sha256:3a6b18732edf182daa3cd12775bbb338cf5691468f91eeeb109deff6ebfa986f", size = 2204444 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/8f/d7/9322c609343d929e75e7e5e6255e614fcc67572cfd083959cdef3b7aad79/docutils-0.21.2-py3-none-any.whl", hash = "sha256:dafca5b9e384f0e419294eb4d2ff9fa826435bf15f15b7bd45723e8ad76811b2", size = 587408 }, +] + +[[package]] +name = "dom-toml" +version = "2.0.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "domdf-python-tools" }, + { name = "tomli" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/70/34/f7690cf288eaa86b55c8f1b890d0834e6df44a026a88eca12274fcd624ab/dom_toml-2.0.0.tar.gz", hash = "sha256:3c07e8436538994974127b1ae037661d1a779ac915c44fd06b3ab5fe140ff589", size = 11133 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/22/99/b6fc87dff3138491d81676bdcbf1531080925ba41486ec1dafd86e33fdbc/dom_toml-2.0.0-py3-none-any.whl", hash = "sha256:0b6d02a72bcbc6be8175c61afc30623bbb6b74c4650f2a806fbc3fb7fe86935d", size = 13376 }, +] + +[[package]] +name = "domdf-python-tools" +version = "3.9.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "natsort" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/6b/78/974e10c583ba9d2302e748c9585313a7f2c7ba00e4f600324f432e38fe68/domdf_python_tools-3.9.0.tar.gz", hash = "sha256:1f8a96971178333a55e083e35610d7688cd7620ad2b99790164e1fc1a3614c18", size = 103792 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/de/e9/7447a88b217650a74927d3444a89507986479a69b83741900eddd34167fe/domdf_python_tools-3.9.0-py3-none-any.whl", hash = "sha256:4e1ef365cbc24627d6d1e90cf7d46d8ab8df967e1237f4a26885f6986c78872e", size = 127106 }, +] + +[[package]] +name = "fake-useragent" +version = "1.5.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/24/a1/1f662631ab153975fa8dbf09296324ecbaf53370dce922054e8de6b57370/fake-useragent-1.5.1.tar.gz", hash = "sha256:6387269f5a2196b5ba7ed8935852f75486845a1c95c50e72460e6a8e762f5c49", size = 22631 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e4/99/60d8cf1b26938c2e0a57e232f7f15641dfcd6f8deda454d73e4145910ff6/fake_useragent-1.5.1-py3-none-any.whl", hash = "sha256:57415096557c8a4e23b62a375c21c55af5fd4ba30549227f562d2c4f5b60e3b3", size = 17190 }, +] + +[[package]] +name = "fonttools" +version = "4.55.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/d7/4e/053fe1b5c0ce346c0a9d0557492c654362bafb14f026eae0d3ee98009152/fonttools-4.55.0.tar.gz", hash = "sha256:7636acc6ab733572d5e7eec922b254ead611f1cdad17be3f0be7418e8bfaca71", size = 3490431 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c3/87/a669ac26c6077e37ffb06abf29c5571789eefe518d06c52df392181ee694/fonttools-4.55.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:8118dc571921dc9e4b288d9cb423ceaf886d195a2e5329cc427df82bba872cd9", size = 2752519 }, + { url = "https://files.pythonhosted.org/packages/0c/e9/4822ad238fe215133c7df20f1cdb1a58cfb634a31523e77ff0fb2033970a/fonttools-4.55.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:01124f2ca6c29fad4132d930da69158d3f49b2350e4a779e1efbe0e82bd63f6c", size = 2286819 }, + { url = "https://files.pythonhosted.org/packages/3e/a4/d7941c3897129e60fe68d20e4819fda4d0c4858d77badae0e80ca6440b36/fonttools-4.55.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:81ffd58d2691f11f7c8438796e9f21c374828805d33e83ff4b76e4635633674c", size = 4770382 }, + { url = "https://files.pythonhosted.org/packages/31/cf/c51ea1348f9fba9c627439afad9dee0090040809ab431f4422b5bfdda34c/fonttools-4.55.0-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5435e5f1eb893c35c2bc2b9cd3c9596b0fcb0a59e7a14121562986dd4c47b8dd", size = 4858336 }, + { url = "https://files.pythonhosted.org/packages/73/be/36c1fe0e5c9a96b068ddd7e82001243bbe7fe12549c8d14e1bd025bf40c9/fonttools-4.55.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:d12081729280c39d001edd0f4f06d696014c26e6e9a0a55488fabc37c28945e4", size = 4756072 }, + { url = "https://files.pythonhosted.org/packages/5c/18/6dd381c29f215a017f79aa9fea0722424a0046b47991c4390a78ff87ce0c/fonttools-4.55.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:a7ad1f1b98ab6cb927ab924a38a8649f1ffd7525c75fe5b594f5dab17af70e18", size = 5008668 }, + { url = "https://files.pythonhosted.org/packages/b8/95/316f20092b389b927dba1d1dccd3f541853f96e707e210f1b9f4e7bacdd5/fonttools-4.55.0-cp313-cp313-win32.whl", hash = "sha256:abe62987c37630dca69a104266277216de1023cf570c1643bb3a19a9509e7a1b", size = 2155841 }, + { url = "https://files.pythonhosted.org/packages/35/ca/b4638aa3e446184892e2f9cc8ef44bb506f47fea04580df7fb84f5a4363d/fonttools-4.55.0-cp313-cp313-win_amd64.whl", hash = "sha256:2863555ba90b573e4201feaf87a7e71ca3b97c05aa4d63548a4b69ea16c9e998", size = 2200587 }, + { url = "https://files.pythonhosted.org/packages/b4/4a/786589606d4989cb34d8bc766cd687d955aaf3039c367fe7104bcf82dc98/fonttools-4.55.0-py3-none-any.whl", hash = "sha256:12db5888cd4dd3fcc9f0ee60c6edd3c7e1fd44b7dd0f31381ea03df68f8a153f", size = 1100249 }, +] + +[[package]] +name = "h5py" +version = "3.12.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "numpy" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/cc/0c/5c2b0a88158682aeafb10c1c2b735df5bc31f165bfe192f2ee9f2a23b5f1/h5py-3.12.1.tar.gz", hash = "sha256:326d70b53d31baa61f00b8aa5f95c2fcb9621a3ee8365d770c551a13dbbcbfdf", size = 411457 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/23/1c/ecdd0efab52c24f2a9bf2324289828b860e8dd1e3c5ada3cf0889e14fdc1/h5py-3.12.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:513171e90ed92236fc2ca363ce7a2fc6f2827375efcbb0cc7fbdd7fe11fecafc", size = 3346239 }, + { url = "https://files.pythonhosted.org/packages/93/cd/5b6f574bf3e318bbe305bc93ba45181676550eb44ba35e006d2e98004eaa/h5py-3.12.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:59400f88343b79655a242068a9c900001a34b63e3afb040bd7cdf717e440f653", size = 2843416 }, + { url = "https://files.pythonhosted.org/packages/8a/4f/b74332f313bfbe94ba03fff784219b9db385e6139708e55b11490149f90a/h5py-3.12.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d3e465aee0ec353949f0f46bf6c6f9790a2006af896cee7c178a8c3e5090aa32", size = 5154390 }, + { url = "https://files.pythonhosted.org/packages/1a/57/93ea9e10a6457ea8d3b867207deb29a527e966a08a84c57ffd954e32152a/h5py-3.12.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ba51c0c5e029bb5420a343586ff79d56e7455d496d18a30309616fdbeed1068f", size = 5378244 }, + { url = "https://files.pythonhosted.org/packages/50/51/0bbf3663062b2eeee78aa51da71e065f8a0a6e3cb950cc7020b4444999e6/h5py-3.12.1-cp313-cp313-win_amd64.whl", hash = "sha256:52ab036c6c97055b85b2a242cb540ff9590bacfda0c03dd0cf0661b311f522f8", size = 2979760 }, +] + +[[package]] +name = "idna" +version = "3.10" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/f1/70/7703c29685631f5a7590aa73f1f1d3fa9a380e654b86af429e0934a32f7d/idna-3.10.tar.gz", hash = "sha256:12f65c9b470abda6dc35cf8e63cc574b1c52b11df2c86030af0ac09b01b13ea9", size = 190490 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/76/c6/c88e154df9c4e1a2a66ccf0005a88dfb2650c1dffb6f5ce603dfbd452ce3/idna-3.10-py3-none-any.whl", hash = "sha256:946d195a0d259cbba61165e88e65941f16e9b36ea6ddb97f00452bae8b1287d3", size = 70442 }, +] + +[[package]] +name = "imageio" +version = "2.36.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "numpy" }, + { name = "pillow" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/70/aa/2e7a49259339e691ff2b477ae0696b1784a09313c5872700bbbdd00a3030/imageio-2.36.1.tar.gz", hash = "sha256:e4e1d231f47f9a9e16100b0f7ce1a86e8856fb4d1c0fa2c4365a316f1746be62", size = 389522 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/5c/f9/f78e7f5ac8077c481bf6b43b8bc736605363034b3d5eb3ce8eb79f53f5f1/imageio-2.36.1-py3-none-any.whl", hash = "sha256:20abd2cae58e55ca1af8a8dcf43293336a59adf0391f1917bf8518633cfc2cdf", size = 315435 }, +] + +[[package]] +name = "imagesize" +version = "1.4.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/a7/84/62473fb57d61e31fef6e36d64a179c8781605429fd927b5dd608c997be31/imagesize-1.4.1.tar.gz", hash = "sha256:69150444affb9cb0d5cc5a92b3676f0b2fb7cd9ae39e947a5e11a36b4497cd4a", size = 1280026 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ff/62/85c4c919272577931d407be5ba5d71c20f0b616d31a0befe0ae45bb79abd/imagesize-1.4.1-py2.py3-none-any.whl", hash = "sha256:0d8d18d08f840c19d0ee7ca1fd82490fdc3729b7ac93f49870406ddde8ef8d8b", size = 8769 }, +] + +[[package]] +name = "iniconfig" +version = "2.0.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/d7/4b/cbd8e699e64a6f16ca3a8220661b5f83792b3017d0f79807cb8708d33913/iniconfig-2.0.0.tar.gz", hash = "sha256:2d91e135bf72d31a410b17c16da610a82cb55f6b0477d1a902134b24a455b8b3", size = 4646 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ef/a6/62565a6e1cf69e10f5727360368e451d4b7f58beeac6173dc9db836a5b46/iniconfig-2.0.0-py3-none-any.whl", hash = "sha256:b6a85871a79d2e3b22d2d1b94ac2824226a63c6b741c88f7ae975f18b6778374", size = 5892 }, +] + +[[package]] +name = "jinja2" +version = "3.1.4" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "markupsafe" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/ed/55/39036716d19cab0747a5020fc7e907f362fbf48c984b14e62127f7e68e5d/jinja2-3.1.4.tar.gz", hash = "sha256:4a3aee7acbbe7303aede8e9648d13b8bf88a429282aa6122a993f0ac800cb369", size = 240245 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/31/80/3a54838c3fb461f6fec263ebf3a3a41771bd05190238de3486aae8540c36/jinja2-3.1.4-py3-none-any.whl", hash = "sha256:bc5dd2abb727a5319567b7a813e6a2e7318c39f4f487cfe6c89c6f9c7d25197d", size = 133271 }, +] + +[[package]] +name = "joblib" +version = "1.4.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/64/33/60135848598c076ce4b231e1b1895170f45fbcaeaa2c9d5e38b04db70c35/joblib-1.4.2.tar.gz", hash = "sha256:2382c5816b2636fbd20a09e0f4e9dad4736765fdfb7dca582943b9c1366b3f0e", size = 2116621 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/91/29/df4b9b42f2be0b623cbd5e2140cafcaa2bef0759a00b7b70104dcfe2fb51/joblib-1.4.2-py3-none-any.whl", hash = "sha256:06d478d5674cbc267e7496a410ee875abd68e4340feff4490bcb7afb88060ae6", size = 301817 }, +] + +[[package]] +name = "keras" +version = "3.7.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "absl-py" }, + { name = "h5py" }, + { name = "ml-dtypes" }, + { name = "namex" }, + { name = "numpy" }, + { name = "optree" }, + { name = "packaging" }, + { name = "rich" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/c9/c3/56fc6800c5eab94bd0f5e930751bd4c0fa1ee0aee272fad4a72723ffae87/keras-3.7.0.tar.gz", hash = "sha256:a4451a5591e75dfb414d0b84a3fd2fb9c0240cc87ebe7e397f547ce10b0e67b7", size = 924719 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/8a/bf/9e3f10e55df30b0fb4bf6c2ee7d50bda2e070599b86f62ea3f9954af172b/keras-3.7.0-py3-none-any.whl", hash = "sha256:546a64f302e4779c129c06d9826fa586de752cdfd43d7dc4010c31b282587969", size = 1228365 }, +] + +[[package]] +name = "kiwisolver" +version = "1.4.7" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/85/4d/2255e1c76304cbd60b48cee302b66d1dde4468dc5b1160e4b7cb43778f2a/kiwisolver-1.4.7.tar.gz", hash = "sha256:9893ff81bd7107f7b685d3017cc6583daadb4fc26e4a888350df530e41980a60", size = 97286 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c4/06/7da99b04259b0f18b557a4effd1b9c901a747f7fdd84cf834ccf520cb0b2/kiwisolver-1.4.7-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:2e6039dcbe79a8e0f044f1c39db1986a1b8071051efba3ee4d74f5b365f5226e", size = 121913 }, + { url = "https://files.pythonhosted.org/packages/97/f5/b8a370d1aa593c17882af0a6f6755aaecd643640c0ed72dcfd2eafc388b9/kiwisolver-1.4.7-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:a1ecf0ac1c518487d9d23b1cd7139a6a65bc460cd101ab01f1be82ecf09794b6", size = 65627 }, + { url = "https://files.pythonhosted.org/packages/2a/fc/6c0374f7503522539e2d4d1b497f5ebad3f8ed07ab51aed2af988dd0fb65/kiwisolver-1.4.7-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:7ab9ccab2b5bd5702ab0803676a580fffa2aa178c2badc5557a84cc943fcf750", size = 63888 }, + { url = "https://files.pythonhosted.org/packages/bf/3e/0b7172793d0f41cae5c923492da89a2ffcd1adf764c16159ca047463ebd3/kiwisolver-1.4.7-cp313-cp313-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f816dd2277f8d63d79f9c8473a79fe54047bc0467754962840782c575522224d", size = 1369145 }, + { url = "https://files.pythonhosted.org/packages/77/92/47d050d6f6aced2d634258123f2688fbfef8ded3c5baf2c79d94d91f1f58/kiwisolver-1.4.7-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cf8bcc23ceb5a1b624572a1623b9f79d2c3b337c8c455405ef231933a10da379", size = 1461448 }, + { url = "https://files.pythonhosted.org/packages/9c/1b/8f80b18e20b3b294546a1adb41701e79ae21915f4175f311a90d042301cf/kiwisolver-1.4.7-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:dea0bf229319828467d7fca8c7c189780aa9ff679c94539eed7532ebe33ed37c", size = 1578750 }, + { url = "https://files.pythonhosted.org/packages/a4/fe/fe8e72f3be0a844f257cadd72689c0848c6d5c51bc1d60429e2d14ad776e/kiwisolver-1.4.7-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7c06a4c7cf15ec739ce0e5971b26c93638730090add60e183530d70848ebdd34", size = 1507175 }, + { url = "https://files.pythonhosted.org/packages/39/fa/cdc0b6105d90eadc3bee525fecc9179e2b41e1ce0293caaf49cb631a6aaf/kiwisolver-1.4.7-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:913983ad2deb14e66d83c28b632fd35ba2b825031f2fa4ca29675e665dfecbe1", size = 1463963 }, + { url = "https://files.pythonhosted.org/packages/6e/5c/0c03c4e542720c6177d4f408e56d1c8315899db72d46261a4e15b8b33a41/kiwisolver-1.4.7-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:5337ec7809bcd0f424c6b705ecf97941c46279cf5ed92311782c7c9c2026f07f", size = 2248220 }, + { url = "https://files.pythonhosted.org/packages/3d/ee/55ef86d5a574f4e767df7da3a3a7ff4954c996e12d4fbe9c408170cd7dcc/kiwisolver-1.4.7-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:4c26ed10c4f6fa6ddb329a5120ba3b6db349ca192ae211e882970bfc9d91420b", size = 2404463 }, + { url = "https://files.pythonhosted.org/packages/0f/6d/73ad36170b4bff4825dc588acf4f3e6319cb97cd1fb3eb04d9faa6b6f212/kiwisolver-1.4.7-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:c619b101e6de2222c1fcb0531e1b17bbffbe54294bfba43ea0d411d428618c27", size = 2352842 }, + { url = "https://files.pythonhosted.org/packages/0b/16/fa531ff9199d3b6473bb4d0f47416cdb08d556c03b8bc1cccf04e756b56d/kiwisolver-1.4.7-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:073a36c8273647592ea332e816e75ef8da5c303236ec0167196793eb1e34657a", size = 2501635 }, + { url = "https://files.pythonhosted.org/packages/78/7e/aa9422e78419db0cbe75fb86d8e72b433818f2e62e2e394992d23d23a583/kiwisolver-1.4.7-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:3ce6b2b0231bda412463e152fc18335ba32faf4e8c23a754ad50ffa70e4091ee", size = 2314556 }, + { url = "https://files.pythonhosted.org/packages/a8/b2/15f7f556df0a6e5b3772a1e076a9d9f6c538ce5f05bd590eca8106508e06/kiwisolver-1.4.7-cp313-cp313-win32.whl", hash = "sha256:f4c9aee212bc89d4e13f58be11a56cc8036cabad119259d12ace14b34476fd07", size = 46364 }, + { url = "https://files.pythonhosted.org/packages/0b/db/32e897e43a330eee8e4770bfd2737a9584b23e33587a0812b8e20aac38f7/kiwisolver-1.4.7-cp313-cp313-win_amd64.whl", hash = "sha256:8a3ec5aa8e38fc4c8af308917ce12c536f1c88452ce554027e55b22cbbfbff76", size = 55887 }, + { url = "https://files.pythonhosted.org/packages/c8/a4/df2bdca5270ca85fd25253049eb6708d4127be2ed0e5c2650217450b59e9/kiwisolver-1.4.7-cp313-cp313-win_arm64.whl", hash = "sha256:76c8094ac20ec259471ac53e774623eb62e6e1f56cd8690c67ce6ce4fcb05650", size = 48530 }, +] + +[[package]] +name = "lxml" +version = "5.3.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/e7/6b/20c3a4b24751377aaa6307eb230b66701024012c29dd374999cc92983269/lxml-5.3.0.tar.gz", hash = "sha256:4e109ca30d1edec1ac60cdbe341905dc3b8f55b16855e03a54aaf59e51ec8c6f", size = 3679318 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/94/6a/42141e4d373903bfea6f8e94b2f554d05506dfda522ada5343c651410dc8/lxml-5.3.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:8c72e9563347c7395910de6a3100a4840a75a6f60e05af5e58566868d5eb2d6a", size = 8156284 }, + { url = "https://files.pythonhosted.org/packages/91/5e/fa097f0f7d8b3d113fb7312c6308af702f2667f22644441715be961f2c7e/lxml-5.3.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:e92ce66cd919d18d14b3856906a61d3f6b6a8500e0794142338da644260595cd", size = 4432407 }, + { url = "https://files.pythonhosted.org/packages/2d/a1/b901988aa6d4ff937f2e5cfc114e4ec561901ff00660c3e56713642728da/lxml-5.3.0-cp313-cp313-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1d04f064bebdfef9240478f7a779e8c5dc32b8b7b0b2fc6a62e39b928d428e51", size = 5048331 }, + { url = "https://files.pythonhosted.org/packages/30/0f/b2a54f48e52de578b71bbe2a2f8160672a8a5e103df3a78da53907e8c7ed/lxml-5.3.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5c2fb570d7823c2bbaf8b419ba6e5662137f8166e364a8b2b91051a1fb40ab8b", size = 4744835 }, + { url = "https://files.pythonhosted.org/packages/82/9d/b000c15538b60934589e83826ecbc437a1586488d7c13f8ee5ff1f79a9b8/lxml-5.3.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:0c120f43553ec759f8de1fee2f4794452b0946773299d44c36bfe18e83caf002", size = 5316649 }, + { url = "https://files.pythonhosted.org/packages/e3/ee/ffbb9eaff5e541922611d2c56b175c45893d1c0b8b11e5a497708a6a3b3b/lxml-5.3.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:562e7494778a69086f0312ec9689f6b6ac1c6b65670ed7d0267e49f57ffa08c4", size = 4812046 }, + { url = "https://files.pythonhosted.org/packages/15/ff/7ff89d567485c7b943cdac316087f16b2399a8b997007ed352a1248397e5/lxml-5.3.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:423b121f7e6fa514ba0c7918e56955a1d4470ed35faa03e3d9f0e3baa4c7e492", size = 4918597 }, + { url = "https://files.pythonhosted.org/packages/c6/a3/535b6ed8c048412ff51268bdf4bf1cf052a37aa7e31d2e6518038a883b29/lxml-5.3.0-cp313-cp313-manylinux_2_28_aarch64.whl", hash = "sha256:c00f323cc00576df6165cc9d21a4c21285fa6b9989c5c39830c3903dc4303ef3", size = 4738071 }, + { url = "https://files.pythonhosted.org/packages/7a/8f/cbbfa59cb4d4fd677fe183725a76d8c956495d7a3c7f111ab8f5e13d2e83/lxml-5.3.0-cp313-cp313-manylinux_2_28_ppc64le.whl", hash = "sha256:1fdc9fae8dd4c763e8a31e7630afef517eab9f5d5d31a278df087f307bf601f4", size = 5342213 }, + { url = "https://files.pythonhosted.org/packages/5c/fb/db4c10dd9958d4b52e34d1d1f7c1f434422aeaf6ae2bbaaff2264351d944/lxml-5.3.0-cp313-cp313-manylinux_2_28_s390x.whl", hash = "sha256:658f2aa69d31e09699705949b5fc4719cbecbd4a97f9656a232e7d6c7be1a367", size = 4893749 }, + { url = "https://files.pythonhosted.org/packages/f2/38/bb4581c143957c47740de18a3281a0cab7722390a77cc6e610e8ebf2d736/lxml-5.3.0-cp313-cp313-manylinux_2_28_x86_64.whl", hash = "sha256:1473427aff3d66a3fa2199004c3e601e6c4500ab86696edffdbc84954c72d832", size = 4945901 }, + { url = "https://files.pythonhosted.org/packages/fc/d5/18b7de4960c731e98037bd48fa9f8e6e8f2558e6fbca4303d9b14d21ef3b/lxml-5.3.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:a87de7dd873bf9a792bf1e58b1c3887b9264036629a5bf2d2e6579fe8e73edff", size = 4815447 }, + { url = "https://files.pythonhosted.org/packages/97/a8/cd51ceaad6eb849246559a8ef60ae55065a3df550fc5fcd27014361c1bab/lxml-5.3.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:0d7b36afa46c97875303a94e8f3ad932bf78bace9e18e603f2085b652422edcd", size = 5411186 }, + { url = "https://files.pythonhosted.org/packages/89/c3/1e3dabab519481ed7b1fdcba21dcfb8832f57000733ef0e71cf6d09a5e03/lxml-5.3.0-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:cf120cce539453ae086eacc0130a324e7026113510efa83ab42ef3fcfccac7fb", size = 5324481 }, + { url = "https://files.pythonhosted.org/packages/b6/17/71e9984cf0570cd202ac0a1c9ed5c1b8889b0fc8dc736f5ef0ffb181c284/lxml-5.3.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:df5c7333167b9674aa8ae1d4008fa4bc17a313cc490b2cca27838bbdcc6bb15b", size = 5011053 }, + { url = "https://files.pythonhosted.org/packages/69/68/9f7e6d3312a91e30829368c2b3217e750adef12a6f8eb10498249f4e8d72/lxml-5.3.0-cp313-cp313-win32.whl", hash = "sha256:c802e1c2ed9f0c06a65bc4ed0189d000ada8049312cfeab6ca635e39c9608957", size = 3485634 }, + { url = "https://files.pythonhosted.org/packages/7d/db/214290d58ad68c587bd5d6af3d34e56830438733d0d0856c0275fde43652/lxml-5.3.0-cp313-cp313-win_amd64.whl", hash = "sha256:406246b96d552e0503e17a1006fd27edac678b3fcc9f1be71a2f94b4ff61528d", size = 3814417 }, +] + +[[package]] +name = "markdown-it-py" +version = "3.0.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "mdurl" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/38/71/3b932df36c1a044d397a1f92d1cf91ee0a503d91e470cbd670aa66b07ed0/markdown-it-py-3.0.0.tar.gz", hash = "sha256:e3f60a94fa066dc52ec76661e37c851cb232d92f9886b15cb560aaada2df8feb", size = 74596 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/42/d7/1ec15b46af6af88f19b8e5ffea08fa375d433c998b8a7639e76935c14f1f/markdown_it_py-3.0.0-py3-none-any.whl", hash = "sha256:355216845c60bd96232cd8d8c40e8f9765cc86f46880e43a8fd22dc1a1a8cab1", size = 87528 }, +] + +[[package]] +name = "markupsafe" +version = "3.0.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/b2/97/5d42485e71dfc078108a86d6de8fa46db44a1a9295e89c5d6d4a06e23a62/markupsafe-3.0.2.tar.gz", hash = "sha256:ee55d3edf80167e48ea11a923c7386f4669df67d7994554387f84e7d8b0a2bf0", size = 20537 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/83/0e/67eb10a7ecc77a0c2bbe2b0235765b98d164d81600746914bebada795e97/MarkupSafe-3.0.2-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:ba9527cdd4c926ed0760bc301f6728ef34d841f405abf9d4f959c478421e4efd", size = 14274 }, + { url = "https://files.pythonhosted.org/packages/2b/6d/9409f3684d3335375d04e5f05744dfe7e9f120062c9857df4ab490a1031a/MarkupSafe-3.0.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:f8b3d067f2e40fe93e1ccdd6b2e1d16c43140e76f02fb1319a05cf2b79d99430", size = 12352 }, + { url = "https://files.pythonhosted.org/packages/d2/f5/6eadfcd3885ea85fe2a7c128315cc1bb7241e1987443d78c8fe712d03091/MarkupSafe-3.0.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:569511d3b58c8791ab4c2e1285575265991e6d8f8700c7be0e88f86cb0672094", size = 24122 }, + { url = "https://files.pythonhosted.org/packages/0c/91/96cf928db8236f1bfab6ce15ad070dfdd02ed88261c2afafd4b43575e9e9/MarkupSafe-3.0.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:15ab75ef81add55874e7ab7055e9c397312385bd9ced94920f2802310c930396", size = 23085 }, + { url = "https://files.pythonhosted.org/packages/c2/cf/c9d56af24d56ea04daae7ac0940232d31d5a8354f2b457c6d856b2057d69/MarkupSafe-3.0.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f3818cb119498c0678015754eba762e0d61e5b52d34c8b13d770f0719f7b1d79", size = 22978 }, + { url = "https://files.pythonhosted.org/packages/2a/9f/8619835cd6a711d6272d62abb78c033bda638fdc54c4e7f4272cf1c0962b/MarkupSafe-3.0.2-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:cdb82a876c47801bb54a690c5ae105a46b392ac6099881cdfb9f6e95e4014c6a", size = 24208 }, + { url = "https://files.pythonhosted.org/packages/f9/bf/176950a1792b2cd2102b8ffeb5133e1ed984547b75db47c25a67d3359f77/MarkupSafe-3.0.2-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:cabc348d87e913db6ab4aa100f01b08f481097838bdddf7c7a84b7575b7309ca", size = 23357 }, + { url = "https://files.pythonhosted.org/packages/ce/4f/9a02c1d335caabe5c4efb90e1b6e8ee944aa245c1aaaab8e8a618987d816/MarkupSafe-3.0.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:444dcda765c8a838eaae23112db52f1efaf750daddb2d9ca300bcae1039adc5c", size = 23344 }, + { url = "https://files.pythonhosted.org/packages/ee/55/c271b57db36f748f0e04a759ace9f8f759ccf22b4960c270c78a394f58be/MarkupSafe-3.0.2-cp313-cp313-win32.whl", hash = "sha256:bcf3e58998965654fdaff38e58584d8937aa3096ab5354d493c77d1fdd66d7a1", size = 15101 }, + { url = "https://files.pythonhosted.org/packages/29/88/07df22d2dd4df40aba9f3e402e6dc1b8ee86297dddbad4872bd5e7b0094f/MarkupSafe-3.0.2-cp313-cp313-win_amd64.whl", hash = "sha256:e6a2a455bd412959b57a172ce6328d2dd1f01cb2135efda2e4576e8a23fa3b0f", size = 15603 }, + { url = "https://files.pythonhosted.org/packages/62/6a/8b89d24db2d32d433dffcd6a8779159da109842434f1dd2f6e71f32f738c/MarkupSafe-3.0.2-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:b5a6b3ada725cea8a5e634536b1b01c30bcdcd7f9c6fff4151548d5bf6b3a36c", size = 14510 }, + { url = "https://files.pythonhosted.org/packages/7a/06/a10f955f70a2e5a9bf78d11a161029d278eeacbd35ef806c3fd17b13060d/MarkupSafe-3.0.2-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:a904af0a6162c73e3edcb969eeeb53a63ceeb5d8cf642fade7d39e7963a22ddb", size = 12486 }, + { url = "https://files.pythonhosted.org/packages/34/cf/65d4a571869a1a9078198ca28f39fba5fbb910f952f9dbc5220afff9f5e6/MarkupSafe-3.0.2-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4aa4e5faecf353ed117801a068ebab7b7e09ffb6e1d5e412dc852e0da018126c", size = 25480 }, + { url = "https://files.pythonhosted.org/packages/0c/e3/90e9651924c430b885468b56b3d597cabf6d72be4b24a0acd1fa0e12af67/MarkupSafe-3.0.2-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c0ef13eaeee5b615fb07c9a7dadb38eac06a0608b41570d8ade51c56539e509d", size = 23914 }, + { url = "https://files.pythonhosted.org/packages/66/8c/6c7cf61f95d63bb866db39085150df1f2a5bd3335298f14a66b48e92659c/MarkupSafe-3.0.2-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d16a81a06776313e817c951135cf7340a3e91e8c1ff2fac444cfd75fffa04afe", size = 23796 }, + { url = "https://files.pythonhosted.org/packages/bb/35/cbe9238ec3f47ac9a7c8b3df7a808e7cb50fe149dc7039f5f454b3fba218/MarkupSafe-3.0.2-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:6381026f158fdb7c72a168278597a5e3a5222e83ea18f543112b2662a9b699c5", size = 25473 }, + { url = "https://files.pythonhosted.org/packages/e6/32/7621a4382488aa283cc05e8984a9c219abad3bca087be9ec77e89939ded9/MarkupSafe-3.0.2-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:3d79d162e7be8f996986c064d1c7c817f6df3a77fe3d6859f6f9e7be4b8c213a", size = 24114 }, + { url = "https://files.pythonhosted.org/packages/0d/80/0985960e4b89922cb5a0bac0ed39c5b96cbc1a536a99f30e8c220a996ed9/MarkupSafe-3.0.2-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:131a3c7689c85f5ad20f9f6fb1b866f402c445b220c19fe4308c0b147ccd2ad9", size = 24098 }, + { url = "https://files.pythonhosted.org/packages/82/78/fedb03c7d5380df2427038ec8d973587e90561b2d90cd472ce9254cf348b/MarkupSafe-3.0.2-cp313-cp313t-win32.whl", hash = "sha256:ba8062ed2cf21c07a9e295d5b8a2a5ce678b913b45fdf68c32d95d6c1291e0b6", size = 15208 }, + { url = "https://files.pythonhosted.org/packages/4f/65/6079a46068dfceaeabb5dcad6d674f5f5c61a6fa5673746f42a9f4c233b3/MarkupSafe-3.0.2-cp313-cp313t-win_amd64.whl", hash = "sha256:e444a31f8db13eb18ada366ab3cf45fd4b31e4db1236a4448f68778c1d1a5a2f", size = 15739 }, +] + +[[package]] +name = "matplotlib" +version = "3.9.3" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "contourpy" }, + { name = "cycler" }, + { name = "fonttools" }, + { name = "kiwisolver" }, + { name = "numpy" }, + { name = "packaging" }, + { name = "pillow" }, + { name = "pyparsing" }, + { name = "python-dateutil" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/75/9f/562ed484b11ac9f4bb4f9d2d7546954ec106a8c0f06cc755d6f63e519274/matplotlib-3.9.3.tar.gz", hash = "sha256:cd5dbbc8e25cad5f706845c4d100e2c8b34691b412b93717ce38d8ae803bcfa5", size = 36113438 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/60/04/949640040982822416c471d9ebe4e9e6c69ca9f9bb6ba82ed30808863c02/matplotlib-3.9.3-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:203d18df84f5288973b2d56de63d4678cc748250026ca9e1ad8f8a0fd8a75d83", size = 7883417 }, + { url = "https://files.pythonhosted.org/packages/9f/90/ebd37143cd3150b6c650ee1580024df3dd649d176e68d346f826b8d24e37/matplotlib-3.9.3-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:b651b0d3642991259109dc0351fc33ad44c624801367bb8307be9bfc35e427ad", size = 7768720 }, + { url = "https://files.pythonhosted.org/packages/dc/84/6591e6b55d755d16dacdc113205067031867c1f5e3c08b32c01aad831420/matplotlib-3.9.3-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:66d7b171fecf96940ce069923a08ba3df33ef542de82c2ff4fe8caa8346fa95a", size = 8192723 }, + { url = "https://files.pythonhosted.org/packages/29/09/146a17d37e32313507f11ac984e65311f2d5805d731eb981d4f70eb928dc/matplotlib-3.9.3-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6be0ba61f6ff2e6b68e4270fb63b6813c9e7dec3d15fc3a93f47480444fd72f0", size = 8305801 }, + { url = "https://files.pythonhosted.org/packages/85/cb/d2690572c08f19ca7c0f44b1fb4d11c121d63467a57b508cc3656ff80b43/matplotlib-3.9.3-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:9d6b2e8856dec3a6db1ae51aec85c82223e834b228c1d3228aede87eee2b34f9", size = 9086564 }, + { url = "https://files.pythonhosted.org/packages/28/dd/0a5176027c1cb94fe75f69f76cb274180c8abf740df6fc0e6a1e4cbaec3f/matplotlib-3.9.3-cp313-cp313-win_amd64.whl", hash = "sha256:90a85a004fefed9e583597478420bf904bb1a065b0b0ee5b9d8d31b04b0f3f70", size = 7833257 }, + { url = "https://files.pythonhosted.org/packages/42/d4/e477d50a8e4b437c2afbb5c665cb8e5d79b06abe6fe3c6915d6f7f0c2ef2/matplotlib-3.9.3-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:3119b2f16de7f7b9212ba76d8fe6a0e9f90b27a1e04683cd89833a991682f639", size = 7911906 }, + { url = "https://files.pythonhosted.org/packages/ae/a1/ba5ab89666c42ace8e31b4ff5a2c76a17e4d6f91aefce476b064c56ff61d/matplotlib-3.9.3-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:87ad73763d93add1b6c1f9fcd33af662fd62ed70e620c52fcb79f3ac427cf3a6", size = 7801336 }, + { url = "https://files.pythonhosted.org/packages/77/59/4dcdb3a6695af6c698a95aec13016a550ef2f85144d22f61f81d1e064148/matplotlib-3.9.3-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:026bdf3137ab6022c866efa4813b6bbeddc2ed4c9e7e02f0e323a7bca380dfa0", size = 8218178 }, + { url = "https://files.pythonhosted.org/packages/4f/27/7c72db0d0ee35d9237572565ffa3c0eb25fc46a3f47e0f16412a587bc9d8/matplotlib-3.9.3-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:760a5e89ebbb172989e8273024a1024b0f084510b9105261b3b00c15e9c9f006", size = 8327768 }, + { url = "https://files.pythonhosted.org/packages/de/ad/213eee624feadba7b77e881c9d2c04c1e036efe69d19031e3fa927fdb5dc/matplotlib-3.9.3-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:a42b9dc42de2cfe357efa27d9c50c7833fc5ab9b2eb7252ccd5d5f836a84e1e4", size = 9094075 }, + { url = "https://files.pythonhosted.org/packages/19/1b/cb8e99a5fe2e2b14e3b8234cb1649a675be63f74a5224a648ae4ab61f60c/matplotlib-3.9.3-cp313-cp313t-win_amd64.whl", hash = "sha256:e0fcb7da73fbf67b5f4bdaa57d85bb585a4e913d4a10f3e15b32baea56a67f0a", size = 7888937 }, +] + +[[package]] +name = "mdit-py-plugins" +version = "0.4.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "markdown-it-py" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/19/03/a2ecab526543b152300717cf232bb4bb8605b6edb946c845016fa9c9c9fd/mdit_py_plugins-0.4.2.tar.gz", hash = "sha256:5f2cd1fdb606ddf152d37ec30e46101a60512bc0e5fa1a7002c36647b09e26b5", size = 43542 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a7/f7/7782a043553ee469c1ff49cfa1cdace2d6bf99a1f333cf38676b3ddf30da/mdit_py_plugins-0.4.2-py3-none-any.whl", hash = "sha256:0c673c3f889399a33b95e88d2f0d111b4447bdfea7f237dab2d488f459835636", size = 55316 }, +] + +[[package]] +name = "mdurl" +version = "0.1.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/d6/54/cfe61301667036ec958cb99bd3efefba235e65cdeb9c84d24a8293ba1d90/mdurl-0.1.2.tar.gz", hash = "sha256:bb413d29f5eea38f31dd4754dd7377d4465116fb207585f97bf925588687c1ba", size = 8729 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/b3/38/89ba8ad64ae25be8de66a6d463314cf1eb366222074cfda9ee839c56a4b4/mdurl-0.1.2-py3-none-any.whl", hash = "sha256:84008a41e51615a49fc9966191ff91509e3c40b939176e643fd50a5c2196b8f8", size = 9979 }, +] + +[[package]] +name = "ml-dtypes" +version = "0.5.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "numpy", marker = "python_full_version >= '3.13'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/ab/79/717c5e22ad25d63ce3acdfe8ff8d64bdedec18914256c59b838218708b16/ml_dtypes-0.5.0.tar.gz", hash = "sha256:3e7d3a380fe73a63c884f06136f8baa7a5249cc8e9fdec677997dd78549f8128", size = 699367 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/b3/4a/18f670a2703e771a6775fbc354208e597ff062a88efb0cecc220a282210b/ml_dtypes-0.5.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:d3b3db9990c3840986a0e70524e122cfa32b91139c3653df76121ba7776e015f", size = 753345 }, + { url = "https://files.pythonhosted.org/packages/ed/c6/358d85e274e22d53def0c85f3cbe0933475fa3cf6922e9dca66eb25cb22f/ml_dtypes-0.5.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e04fde367b2fe901b1d47234426fe8819909bd1dd862a5adb630f27789c20599", size = 4424962 }, + { url = "https://files.pythonhosted.org/packages/4c/b4/d766586e24e7a073333c8eb8bd9275f3c6fe0569b509ae7b1699d4f00c74/ml_dtypes-0.5.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:54415257f00eb44fbcc807454efac3356f75644f1cbfc2d4e5522a72ae1dacab", size = 4475201 }, + { url = "https://files.pythonhosted.org/packages/14/87/30323ad2e52f56262019a4493fe5f5e71067c5561ce7e2f9c75de520f5e8/ml_dtypes-0.5.0-cp313-cp313-win_amd64.whl", hash = "sha256:cb5cc7b25acabd384f75bbd78892d0c724943f3e2e1986254665a1aa10982e07", size = 213195 }, +] + +[[package]] +name = "mpmath" +version = "1.3.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/e0/47/dd32fa426cc72114383ac549964eecb20ecfd886d1e5ccf5340b55b02f57/mpmath-1.3.0.tar.gz", hash = "sha256:7a28eb2a9774d00c7bc92411c19a89209d5da7c4c9a9e227be8330a23a25b91f", size = 508106 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/43/e3/7d92a15f894aa0c9c4b49b8ee9ac9850d6e63b03c9c32c0367a13ae62209/mpmath-1.3.0-py3-none-any.whl", hash = "sha256:a0b2b9fe80bbcd81a6647ff13108738cfb482d481d826cc0e02f5b35e5c88d2c", size = 536198 }, +] + +[[package]] +name = "myst-parser" +version = "4.0.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "docutils" }, + { name = "jinja2" }, + { name = "markdown-it-py" }, + { name = "mdit-py-plugins" }, + { name = "pyyaml" }, + { name = "sphinx" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/85/55/6d1741a1780e5e65038b74bce6689da15f620261c490c3511eb4c12bac4b/myst_parser-4.0.0.tar.gz", hash = "sha256:851c9dfb44e36e56d15d05e72f02b80da21a9e0d07cba96baf5e2d476bb91531", size = 93858 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ca/b4/b036f8fdb667587bb37df29dc6644681dd78b7a2a6321a34684b79412b28/myst_parser-4.0.0-py3-none-any.whl", hash = "sha256:b9317997552424448c6096c2558872fdb6f81d3ecb3a40ce84a7518798f3f28d", size = 84563 }, +] + +[[package]] +name = "namex" +version = "0.0.8" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/9d/48/d275cdb6216c6bb4f9351675795a0b48974e138f16b1ffe0252c1f8faa28/namex-0.0.8.tar.gz", hash = "sha256:32a50f6c565c0bb10aa76298c959507abdc0e850efe085dc38f3440fcb3aa90b", size = 6623 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/73/59/7854fbfb59f8ae35483ce93493708be5942ebb6328cd85b3a609df629736/namex-0.0.8-py3-none-any.whl", hash = "sha256:7ddb6c2bb0e753a311b7590f84f6da659dd0c05e65cb89d519d54c0a250c0487", size = 5806 }, +] + +[[package]] +name = "natsort" +version = "8.4.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/e2/a9/a0c57aee75f77794adaf35322f8b6404cbd0f89ad45c87197a937764b7d0/natsort-8.4.0.tar.gz", hash = "sha256:45312c4a0e5507593da193dedd04abb1469253b601ecaf63445ad80f0a1ea581", size = 76575 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ef/82/7a9d0550484a62c6da82858ee9419f3dd1ccc9aa1c26a1e43da3ecd20b0d/natsort-8.4.0-py3-none-any.whl", hash = "sha256:4732914fb471f56b5cce04d7bae6f164a592c7712e1c85f9ef585e197299521c", size = 38268 }, +] + +[[package]] +name = "numpy" +version = "2.1.3" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/25/ca/1166b75c21abd1da445b97bf1fa2f14f423c6cfb4fc7c4ef31dccf9f6a94/numpy-2.1.3.tar.gz", hash = "sha256:aa08e04e08aaf974d4458def539dece0d28146d866a39da5639596f4921fd761", size = 20166090 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/4d/0b/620591441457e25f3404c8057eb924d04f161244cb8a3680d529419aa86e/numpy-2.1.3-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:96fe52fcdb9345b7cd82ecd34547fca4321f7656d500eca497eb7ea5a926692f", size = 20836263 }, + { url = "https://files.pythonhosted.org/packages/45/e1/210b2d8b31ce9119145433e6ea78046e30771de3fe353f313b2778142f34/numpy-2.1.3-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:f653490b33e9c3a4c1c01d41bc2aef08f9475af51146e4a7710c450cf9761598", size = 13507771 }, + { url = "https://files.pythonhosted.org/packages/55/44/aa9ee3caee02fa5a45f2c3b95cafe59c44e4b278fbbf895a93e88b308555/numpy-2.1.3-cp313-cp313-macosx_14_0_arm64.whl", hash = "sha256:dc258a761a16daa791081d026f0ed4399b582712e6fc887a95af09df10c5ca57", size = 5075805 }, + { url = "https://files.pythonhosted.org/packages/78/d6/61de6e7e31915ba4d87bbe1ae859e83e6582ea14c6add07c8f7eefd8488f/numpy-2.1.3-cp313-cp313-macosx_14_0_x86_64.whl", hash = "sha256:016d0f6f5e77b0f0d45d77387ffa4bb89816b57c835580c3ce8e099ef830befe", size = 6608380 }, + { url = "https://files.pythonhosted.org/packages/3e/46/48bdf9b7241e317e6cf94276fe11ba673c06d1fdf115d8b4ebf616affd1a/numpy-2.1.3-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c181ba05ce8299c7aa3125c27b9c2167bca4a4445b7ce73d5febc411ca692e43", size = 13602451 }, + { url = "https://files.pythonhosted.org/packages/70/50/73f9a5aa0810cdccda9c1d20be3cbe4a4d6ea6bfd6931464a44c95eef731/numpy-2.1.3-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5641516794ca9e5f8a4d17bb45446998c6554704d888f86df9b200e66bdcce56", size = 16039822 }, + { url = "https://files.pythonhosted.org/packages/ad/cd/098bc1d5a5bc5307cfc65ee9369d0ca658ed88fbd7307b0d49fab6ca5fa5/numpy-2.1.3-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:ea4dedd6e394a9c180b33c2c872b92f7ce0f8e7ad93e9585312b0c5a04777a4a", size = 16411822 }, + { url = "https://files.pythonhosted.org/packages/83/a2/7d4467a2a6d984549053b37945620209e702cf96a8bc658bc04bba13c9e2/numpy-2.1.3-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:b0df3635b9c8ef48bd3be5f862cf71b0a4716fa0e702155c45067c6b711ddcef", size = 14079598 }, + { url = "https://files.pythonhosted.org/packages/e9/6a/d64514dcecb2ee70bfdfad10c42b76cab657e7ee31944ff7a600f141d9e9/numpy-2.1.3-cp313-cp313-win32.whl", hash = "sha256:50ca6aba6e163363f132b5c101ba078b8cbd3fa92c7865fd7d4d62d9779ac29f", size = 6236021 }, + { url = "https://files.pythonhosted.org/packages/bb/f9/12297ed8d8301a401e7d8eb6b418d32547f1d700ed3c038d325a605421a4/numpy-2.1.3-cp313-cp313-win_amd64.whl", hash = "sha256:747641635d3d44bcb380d950679462fae44f54b131be347d5ec2bce47d3df9ed", size = 12560405 }, + { url = "https://files.pythonhosted.org/packages/a7/45/7f9244cd792e163b334e3a7f02dff1239d2890b6f37ebf9e82cbe17debc0/numpy-2.1.3-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:996bb9399059c5b82f76b53ff8bb686069c05acc94656bb259b1d63d04a9506f", size = 20859062 }, + { url = "https://files.pythonhosted.org/packages/b1/b4/a084218e7e92b506d634105b13e27a3a6645312b93e1c699cc9025adb0e1/numpy-2.1.3-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:45966d859916ad02b779706bb43b954281db43e185015df6eb3323120188f9e4", size = 13515839 }, + { url = "https://files.pythonhosted.org/packages/27/45/58ed3f88028dcf80e6ea580311dc3edefdd94248f5770deb980500ef85dd/numpy-2.1.3-cp313-cp313t-macosx_14_0_arm64.whl", hash = "sha256:baed7e8d7481bfe0874b566850cb0b85243e982388b7b23348c6db2ee2b2ae8e", size = 5116031 }, + { url = "https://files.pythonhosted.org/packages/37/a8/eb689432eb977d83229094b58b0f53249d2209742f7de529c49d61a124a0/numpy-2.1.3-cp313-cp313t-macosx_14_0_x86_64.whl", hash = "sha256:a9f7f672a3388133335589cfca93ed468509cb7b93ba3105fce780d04a6576a0", size = 6629977 }, + { url = "https://files.pythonhosted.org/packages/42/a3/5355ad51ac73c23334c7caaed01adadfda49544f646fcbfbb4331deb267b/numpy-2.1.3-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d7aac50327da5d208db2eec22eb11e491e3fe13d22653dce51b0f4109101b408", size = 13575951 }, + { url = "https://files.pythonhosted.org/packages/c4/70/ea9646d203104e647988cb7d7279f135257a6b7e3354ea6c56f8bafdb095/numpy-2.1.3-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4394bc0dbd074b7f9b52024832d16e019decebf86caf909d94f6b3f77a8ee3b6", size = 16022655 }, + { url = "https://files.pythonhosted.org/packages/14/ce/7fc0612903e91ff9d0b3f2eda4e18ef9904814afcae5b0f08edb7f637883/numpy-2.1.3-cp313-cp313t-musllinux_1_1_x86_64.whl", hash = "sha256:50d18c4358a0a8a53f12a8ba9d772ab2d460321e6a93d6064fc22443d189853f", size = 16399902 }, + { url = "https://files.pythonhosted.org/packages/ef/62/1d3204313357591c913c32132a28f09a26357e33ea3c4e2fe81269e0dca1/numpy-2.1.3-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:14e253bd43fc6b37af4921b10f6add6925878a42a0c5fe83daee390bca80bc17", size = 14067180 }, + { url = "https://files.pythonhosted.org/packages/24/d7/78a40ed1d80e23a774cb8a34ae8a9493ba1b4271dde96e56ccdbab1620ef/numpy-2.1.3-cp313-cp313t-win32.whl", hash = "sha256:08788d27a5fd867a663f6fc753fd7c3ad7e92747efc73c53bca2f19f8bc06f48", size = 6291907 }, + { url = "https://files.pythonhosted.org/packages/86/09/a5ab407bd7f5f5599e6a9261f964ace03a73e7c6928de906981c31c38082/numpy-2.1.3-cp313-cp313t-win_amd64.whl", hash = "sha256:2564fbdf2b99b3f815f2107c1bbc93e2de8ee655a69c261363a1172a79a257d4", size = 12644098 }, +] + +[[package]] +name = "nvidia-nccl-cu12" +version = "2.23.4" +source = { registry = "https://pypi.org/simple" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c8/3a/0112397396dec37ffc8edd7836d48261b4d14ca60ec8ed7bc857cce1d916/nvidia_nccl_cu12-2.23.4-py3-none-manylinux2014_aarch64.whl", hash = "sha256:aa946c8327e22ced28e7cef508a334673abc42064ec85f02d005ba1785ea4cec", size = 198953892 }, + { url = "https://files.pythonhosted.org/packages/ed/1f/6482380ec8dcec4894e7503490fc536d846b0d59694acad9cf99f27d0e7d/nvidia_nccl_cu12-2.23.4-py3-none-manylinux2014_x86_64.whl", hash = "sha256:b097258d9aab2fa9f686e33c6fe40ae57b27df60cedbd15d139701bb5509e0c1", size = 198954603 }, +] + +[[package]] +name = "oauthlib" +version = "3.2.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/6d/fa/fbf4001037904031639e6bfbfc02badfc7e12f137a8afa254df6c4c8a670/oauthlib-3.2.2.tar.gz", hash = "sha256:9859c40929662bec5d64f34d01c99e093149682a3f38915dc0655d5a633dd918", size = 177352 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/7e/80/cab10959dc1faead58dc8384a781dfbf93cb4d33d50988f7a69f1b7c9bbe/oauthlib-3.2.2-py3-none-any.whl", hash = "sha256:8139f29aac13e25d502680e9e19963e83f16838d48a0d71c287fe40e7067fbca", size = 151688 }, +] + +[[package]] +name = "opencv-python" +version = "4.10.0.84" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "numpy", marker = "python_full_version >= '3.13'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/4a/e7/b70a2d9ab205110d715906fc8ec83fbb00404aeb3a37a0654fdb68eb0c8c/opencv-python-4.10.0.84.tar.gz", hash = "sha256:72d234e4582e9658ffea8e9cae5b63d488ad06994ef12d81dc303b17472f3526", size = 95103981 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/66/82/564168a349148298aca281e342551404ef5521f33fba17b388ead0a84dc5/opencv_python-4.10.0.84-cp37-abi3-macosx_11_0_arm64.whl", hash = "sha256:fc182f8f4cda51b45f01c64e4cbedfc2f00aff799debebc305d8d0210c43f251", size = 54835524 }, + { url = "https://files.pythonhosted.org/packages/64/4a/016cda9ad7cf18c58ba074628a4eaae8aa55f3fd06a266398cef8831a5b9/opencv_python-4.10.0.84-cp37-abi3-macosx_12_0_x86_64.whl", hash = "sha256:71e575744f1d23f79741450254660442785f45a0797212852ee5199ef12eed98", size = 56475426 }, + { url = "https://files.pythonhosted.org/packages/81/e4/7a987ebecfe5ceaf32db413b67ff18eb3092c598408862fff4d7cc3fd19b/opencv_python-4.10.0.84-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:09a332b50488e2dda866a6c5573ee192fe3583239fb26ff2f7f9ceb0bc119ea6", size = 41746971 }, + { url = "https://files.pythonhosted.org/packages/3f/a4/d2537f47fd7fcfba966bd806e3ec18e7ee1681056d4b0a9c8d983983e4d5/opencv_python-4.10.0.84-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9ace140fc6d647fbe1c692bcb2abce768973491222c067c131d80957c595b71f", size = 62548253 }, + { url = "https://files.pythonhosted.org/packages/1e/39/bbf57e7b9dab623e8773f6ff36385456b7ae7fa9357a5e53db732c347eac/opencv_python-4.10.0.84-cp37-abi3-win32.whl", hash = "sha256:2db02bb7e50b703f0a2d50c50ced72e95c574e1e5a0bb35a8a86d0b35c98c236", size = 28737688 }, + { url = "https://files.pythonhosted.org/packages/ec/6c/fab8113424af5049f85717e8e527ca3773299a3c6b02506e66436e19874f/opencv_python-4.10.0.84-cp37-abi3-win_amd64.whl", hash = "sha256:32dbbd94c26f611dc5cc6979e6b7aa1f55a64d6b463cc1dcd3c95505a63e48fe", size = 38842521 }, +] + +[[package]] +name = "optree" +version = "0.13.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/f7/f2/56afdaeaae36b076659be7db8e72be0924dd64ebd1c131675c77f7e704a6/optree-0.13.1.tar.gz", hash = "sha256:af67856aa8073d237fe67313d84f8aeafac32c1cef7239c628a2768d02679c43", size = 155738 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/3f/53/f3727cad24f16a06666f328f1212476988cadac9b9e7919ddfb2c22eb662/optree-0.13.1-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:f788b2ad120deb73b4908a74473cd6de79cfb9f33bbe9dcb59cea2e2477d4e28", size = 608270 }, + { url = "https://files.pythonhosted.org/packages/64/f2/68beb9da2dd52baa50e7a589ed2bd8434fdd70cdba06754aa5910263da06/optree-0.13.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:2909cb42add6bb1a5a2b0243bdd8c4b861bf072f3741e26239481907ac8ad4e6", size = 325703 }, + { url = "https://files.pythonhosted.org/packages/45/db/08921e56f3425bf649eb593eb28775263c935d029985d35572dc5690cc1a/optree-0.13.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bbc5fa2ff5090389f3a906567446f01d692bd6fe5cfcc5ae2d5861f24e8e0e4d", size = 355813 }, + { url = "https://files.pythonhosted.org/packages/e5/e3/587e0d28dc2cee064902adfebca97db124e12b275dbe9c2b05a70a22345f/optree-0.13.1-cp313-cp313-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4711f5cac5a2a49c3d6c9f0eca7b77c22b452170bb33ea01c3214ebb17931db9", size = 402566 }, + { url = "https://files.pythonhosted.org/packages/8a/1d/0d5bbab8c99580b732b89ef2c5fcdd6ef410478295949fdf2984fa1bfc28/optree-0.13.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6c4ab1d391b89cb88eb3c63383d5eb0930bc21141de9d5acd277feed9e38eb65", size = 397005 }, + { url = "https://files.pythonhosted.org/packages/16/fa/fc2a8183e14f0d195d25824bf65095ff32b34bd469614a6c30d0a596a30f/optree-0.13.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b5e5f09c85ae558a6bdaea57e63168082e728e777391393e9e2792f0d15b7b59", size = 369400 }, + { url = "https://files.pythonhosted.org/packages/9f/42/8c08ce4ebb3d9a6e4415f1a97830c84879e2d1a43710a7c8a18b2c3e169d/optree-0.13.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9c8ee1e988c634a451146b87d9ebdbf650a75dc1f52a9cffcd89fabb7289321c", size = 390179 }, + { url = "https://files.pythonhosted.org/packages/06/02/3a701d6307fdfefe4fcecbac644803e2a4314ab2406ff465e03129cc85f6/optree-0.13.1-cp313-cp313-win32.whl", hash = "sha256:5b6531cd4eb23fadbbf77faf834e1119da06d7af3154f55786b59953cd87bb8a", size = 264264 }, + { url = "https://files.pythonhosted.org/packages/ef/f9/8a1421181c5eb0c0f81d1423a900baeb3faba68a48747bbdffb7581239ac/optree-0.13.1-cp313-cp313-win_amd64.whl", hash = "sha256:27d81dc43b522ba47ba7d2e7d91dbb486940348b1bf85caeb0afc2815c0aa492", size = 293682 }, + { url = "https://files.pythonhosted.org/packages/80/34/d1b1849a6240385c4a3af5da9425b11912204d0b1cf142d802815319b73a/optree-0.13.1-cp313-cp313-win_arm64.whl", hash = "sha256:f39c7174a3f3cdc3f5fe6fb4b832f608c40ac174d7567ed6734b2ee952094631", size = 293670 }, + { url = "https://files.pythonhosted.org/packages/0d/d6/f81e6748bcc3f35a2f570a814014e3418b0ed425d7cbc2b42d88d12863d5/optree-0.13.1-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:3010ae24e994f6e00071098d34e98e78eb995b7454a2ef629a0bf7df17441b24", size = 702861 }, + { url = "https://files.pythonhosted.org/packages/08/7f/70a2d02110ccb245bc57bd9ad57668acfea0ff364c27d7dfe1735ede79ed/optree-0.13.1-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:5b5626c38d4a18a144063db5c1dbb558431d83ca10682324f74665a12214801f", size = 370740 }, + { url = "https://files.pythonhosted.org/packages/63/37/4ddf05267467809236203e2007e9443519c4d55e0744ce7eea1aa74dffee/optree-0.13.1-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1935639dd498a42367633e3877797e1330e39d44d48bbca1a136bb4dbe4c1bc9", size = 374695 }, + { url = "https://files.pythonhosted.org/packages/19/f2/51a63a799f6dce31813d7e02a7547394aebcb39f407e62038ecbd999d490/optree-0.13.1-cp313-cp313t-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:01819c3df950696f32c91faf8d376ae6b695ffdba18f330f1cab6b8e314e4612", size = 418671 }, + { url = "https://files.pythonhosted.org/packages/f0/7c/a08191e0c9202f2be9c415057eea3cf3a5af18e9a6d81f4c7b0e6faf0a1f/optree-0.13.1-cp313-cp313t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:48c29d9c6c64c8dc48c8ee97f7c1d5cdb83e37320f0be0857c06ce4b97994aea", size = 414966 }, + { url = "https://files.pythonhosted.org/packages/8f/37/7bf815f4da7234e387863228b17246b42b8c02553882581a4013a64a88d0/optree-0.13.1-cp313-cp313t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:025d23400b8b579462a251420f0a9ae77d3d3593f84276f3465985731d79d722", size = 389219 }, + { url = "https://files.pythonhosted.org/packages/3d/84/bb521a66d3a84fe2f1500ef67d245c2cc1a26277fcaaf4bc70b22c06e99b/optree-0.13.1-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:55e82426bef151149cfa41d68ac957730fcd420996c0db8324fca81aa6a810ba", size = 405377 }, + { url = "https://files.pythonhosted.org/packages/06/99/3eb53829c4c0b6dc20115d957d2d8e945630ddf40c656dc4e39c5a6e51f2/optree-0.13.1-cp313-cp313t-win32.whl", hash = "sha256:e40f018f522fcfd244688d1b3a360518e636ba7f636385aae0566eae3e7d29bc", size = 292734 }, + { url = "https://files.pythonhosted.org/packages/2f/59/d7601959ad0b90d309794c0975a256304488b4c5671f24e3e12101ade7ef/optree-0.13.1-cp313-cp313t-win_amd64.whl", hash = "sha256:d580f1bf23bb352c4db6b3544f282f1ac08dcb0d9ab537d25e56220353438cf7", size = 331457 }, + { url = "https://files.pythonhosted.org/packages/8b/36/c01a5bc34660d46c6a3b1fe090bbdc8c76af7b5c1a6613cc671aa6df8349/optree-0.13.1-cp313-cp313t-win_arm64.whl", hash = "sha256:c4d13f55dbd509d27be3af54d53b4ca0751bc518244ced6d0567e518e51452a2", size = 331470 }, +] + +[[package]] +name = "packaging" +version = "24.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/d0/63/68dbb6eb2de9cb10ee4c9c14a0148804425e13c4fb20d61cce69f53106da/packaging-24.2.tar.gz", hash = "sha256:c228a6dc5e932d346bc5739379109d49e8853dd8223571c7c5b55260edc0b97f", size = 163950 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/88/ef/eb23f262cca3c0c4eb7ab1933c3b1f03d021f2c48f54763065b6f0e321be/packaging-24.2-py3-none-any.whl", hash = "sha256:09abb1bccd265c01f4a3aa3f7a7db064b36514d2cba19a2f694fe6150451a759", size = 65451 }, +] + +[[package]] +name = "pandas" +version = "2.2.3" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "numpy", marker = "python_full_version >= '3.13'" }, + { name = "python-dateutil" }, + { name = "pytz" }, + { name = "tzdata" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/9c/d6/9f8431bacc2e19dca897724cd097b1bb224a6ad5433784a44b587c7c13af/pandas-2.2.3.tar.gz", hash = "sha256:4f18ba62b61d7e192368b84517265a99b4d7ee8912f8708660fb4a366cc82667", size = 4399213 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/64/22/3b8f4e0ed70644e85cfdcd57454686b9057c6c38d2f74fe4b8bc2527214a/pandas-2.2.3-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:f00d1345d84d8c86a63e476bb4955e46458b304b9575dcf71102b5c705320015", size = 12477643 }, + { url = "https://files.pythonhosted.org/packages/e4/93/b3f5d1838500e22c8d793625da672f3eec046b1a99257666c94446969282/pandas-2.2.3-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:3508d914817e153ad359d7e069d752cdd736a247c322d932eb89e6bc84217f28", size = 11281573 }, + { url = "https://files.pythonhosted.org/packages/f5/94/6c79b07f0e5aab1dcfa35a75f4817f5c4f677931d4234afcd75f0e6a66ca/pandas-2.2.3-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:22a9d949bfc9a502d320aa04e5d02feab689d61da4e7764b62c30b991c42c5f0", size = 15196085 }, + { url = "https://files.pythonhosted.org/packages/e8/31/aa8da88ca0eadbabd0a639788a6da13bb2ff6edbbb9f29aa786450a30a91/pandas-2.2.3-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f3a255b2c19987fbbe62a9dfd6cff7ff2aa9ccab3fc75218fd4b7530f01efa24", size = 12711809 }, + { url = "https://files.pythonhosted.org/packages/ee/7c/c6dbdb0cb2a4344cacfb8de1c5808ca885b2e4dcfde8008266608f9372af/pandas-2.2.3-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:800250ecdadb6d9c78eae4990da62743b857b470883fa27f652db8bdde7f6659", size = 16356316 }, + { url = "https://files.pythonhosted.org/packages/57/b7/8b757e7d92023b832869fa8881a992696a0bfe2e26f72c9ae9f255988d42/pandas-2.2.3-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:6374c452ff3ec675a8f46fd9ab25c4ad0ba590b71cf0656f8b6daa5202bca3fb", size = 14022055 }, + { url = "https://files.pythonhosted.org/packages/3b/bc/4b18e2b8c002572c5a441a64826252ce5da2aa738855747247a971988043/pandas-2.2.3-cp313-cp313-win_amd64.whl", hash = "sha256:61c5ad4043f791b61dd4752191d9f07f0ae412515d59ba8f005832a532f8736d", size = 11481175 }, + { url = "https://files.pythonhosted.org/packages/76/a3/a5d88146815e972d40d19247b2c162e88213ef51c7c25993942c39dbf41d/pandas-2.2.3-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:3b71f27954685ee685317063bf13c7709a7ba74fc996b84fc6821c59b0f06468", size = 12615650 }, + { url = "https://files.pythonhosted.org/packages/9c/8c/f0fd18f6140ddafc0c24122c8a964e48294acc579d47def376fef12bcb4a/pandas-2.2.3-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:38cf8125c40dae9d5acc10fa66af8ea6fdf760b2714ee482ca691fc66e6fcb18", size = 11290177 }, + { url = "https://files.pythonhosted.org/packages/ed/f9/e995754eab9c0f14c6777401f7eece0943840b7a9fc932221c19d1abee9f/pandas-2.2.3-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:ba96630bc17c875161df3818780af30e43be9b166ce51c9a18c1feae342906c2", size = 14651526 }, + { url = "https://files.pythonhosted.org/packages/25/b0/98d6ae2e1abac4f35230aa756005e8654649d305df9a28b16b9ae4353bff/pandas-2.2.3-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1db71525a1538b30142094edb9adc10be3f3e176748cd7acc2240c2f2e5aa3a4", size = 11871013 }, + { url = "https://files.pythonhosted.org/packages/cc/57/0f72a10f9db6a4628744c8e8f0df4e6e21de01212c7c981d31e50ffc8328/pandas-2.2.3-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:15c0e1e02e93116177d29ff83e8b1619c93ddc9c49083f237d4312337a61165d", size = 15711620 }, + { url = "https://files.pythonhosted.org/packages/ab/5f/b38085618b950b79d2d9164a711c52b10aefc0ae6833b96f626b7021b2ed/pandas-2.2.3-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:ad5b65698ab28ed8d7f18790a0dc58005c7629f227be9ecc1072aa74c0c1d43a", size = 13098436 }, +] + +[[package]] +name = "patsy" +version = "1.0.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "numpy" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/d1/81/74f6a65b848ffd16c18f920620ce999fe45fe27f01ab3911260ce4ed85e4/patsy-1.0.1.tar.gz", hash = "sha256:e786a9391eec818c054e359b737bbce692f051aee4c661f4141cc88fb459c0c4", size = 396010 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/87/2b/b50d3d08ea0fc419c183a84210571eba005328efa62b6b98bc28e9ead32a/patsy-1.0.1-py2.py3-none-any.whl", hash = "sha256:751fb38f9e97e62312e921a1954b81e1bb2bcda4f5eeabaf94db251ee791509c", size = 232923 }, +] + +[[package]] +name = "pillow" +version = "11.0.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/a5/26/0d95c04c868f6bdb0c447e3ee2de5564411845e36a858cfd63766bc7b563/pillow-11.0.0.tar.gz", hash = "sha256:72bacbaf24ac003fea9bff9837d1eedb6088758d41e100c1552930151f677739", size = 46737780 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/63/24/e2e15e392d00fcf4215907465d8ec2a2f23bcec1481a8ebe4ae760459995/pillow-11.0.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:bcd1fb5bb7b07f64c15618c89efcc2cfa3e95f0e3bcdbaf4642509de1942a699", size = 3147300 }, + { url = "https://files.pythonhosted.org/packages/43/72/92ad4afaa2afc233dc44184adff289c2e77e8cd916b3ddb72ac69495bda3/pillow-11.0.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:0e038b0745997c7dcaae350d35859c9715c71e92ffb7e0f4a8e8a16732150f38", size = 2978742 }, + { url = "https://files.pythonhosted.org/packages/9e/da/c8d69c5bc85d72a8523fe862f05ababdc52c0a755cfe3d362656bb86552b/pillow-11.0.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0ae08bd8ffc41aebf578c2af2f9d8749d91f448b3bfd41d7d9ff573d74f2a6b2", size = 4194349 }, + { url = "https://files.pythonhosted.org/packages/cd/e8/686d0caeed6b998351d57796496a70185376ed9c8ec7d99e1d19ad591fc6/pillow-11.0.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d69bfd8ec3219ae71bcde1f942b728903cad25fafe3100ba2258b973bd2bc1b2", size = 4298714 }, + { url = "https://files.pythonhosted.org/packages/ec/da/430015cec620d622f06854be67fd2f6721f52fc17fca8ac34b32e2d60739/pillow-11.0.0-cp313-cp313-manylinux_2_28_aarch64.whl", hash = "sha256:61b887f9ddba63ddf62fd02a3ba7add935d053b6dd7d58998c630e6dbade8527", size = 4208514 }, + { url = "https://files.pythonhosted.org/packages/44/ae/7e4f6662a9b1cb5f92b9cc9cab8321c381ffbee309210940e57432a4063a/pillow-11.0.0-cp313-cp313-manylinux_2_28_x86_64.whl", hash = "sha256:c6a660307ca9d4867caa8d9ca2c2658ab685de83792d1876274991adec7b93fa", size = 4380055 }, + { url = "https://files.pythonhosted.org/packages/74/d5/1a807779ac8a0eeed57f2b92a3c32ea1b696e6140c15bd42eaf908a261cd/pillow-11.0.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:73e3a0200cdda995c7e43dd47436c1548f87a30bb27fb871f352a22ab8dcf45f", size = 4296751 }, + { url = "https://files.pythonhosted.org/packages/38/8c/5fa3385163ee7080bc13026d59656267daaaaf3c728c233d530e2c2757c8/pillow-11.0.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:fba162b8872d30fea8c52b258a542c5dfd7b235fb5cb352240c8d63b414013eb", size = 4430378 }, + { url = "https://files.pythonhosted.org/packages/ca/1d/ad9c14811133977ff87035bf426875b93097fb50af747793f013979facdb/pillow-11.0.0-cp313-cp313-win32.whl", hash = "sha256:f1b82c27e89fffc6da125d5eb0ca6e68017faf5efc078128cfaa42cf5cb38798", size = 2249588 }, + { url = "https://files.pythonhosted.org/packages/fb/01/3755ba287dac715e6afdb333cb1f6d69740a7475220b4637b5ce3d78cec2/pillow-11.0.0-cp313-cp313-win_amd64.whl", hash = "sha256:8ba470552b48e5835f1d23ecb936bb7f71d206f9dfeee64245f30c3270b994de", size = 2567509 }, + { url = "https://files.pythonhosted.org/packages/c0/98/2c7d727079b6be1aba82d195767d35fcc2d32204c7a5820f822df5330152/pillow-11.0.0-cp313-cp313-win_arm64.whl", hash = "sha256:846e193e103b41e984ac921b335df59195356ce3f71dcfd155aa79c603873b84", size = 2254791 }, + { url = "https://files.pythonhosted.org/packages/eb/38/998b04cc6f474e78b563716b20eecf42a2fa16a84589d23c8898e64b0ffd/pillow-11.0.0-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:4ad70c4214f67d7466bea6a08061eba35c01b1b89eaa098040a35272a8efb22b", size = 3150854 }, + { url = "https://files.pythonhosted.org/packages/13/8e/be23a96292113c6cb26b2aa3c8b3681ec62b44ed5c2bd0b258bd59503d3c/pillow-11.0.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:6ec0d5af64f2e3d64a165f490d96368bb5dea8b8f9ad04487f9ab60dc4bb6003", size = 2982369 }, + { url = "https://files.pythonhosted.org/packages/97/8a/3db4eaabb7a2ae8203cd3a332a005e4aba00067fc514aaaf3e9721be31f1/pillow-11.0.0-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c809a70e43c7977c4a42aefd62f0131823ebf7dd73556fa5d5950f5b354087e2", size = 4333703 }, + { url = "https://files.pythonhosted.org/packages/28/ac/629ffc84ff67b9228fe87a97272ab125bbd4dc462745f35f192d37b822f1/pillow-11.0.0-cp313-cp313t-manylinux_2_28_x86_64.whl", hash = "sha256:4b60c9520f7207aaf2e1d94de026682fc227806c6e1f55bba7606d1c94dd623a", size = 4412550 }, + { url = "https://files.pythonhosted.org/packages/d6/07/a505921d36bb2df6868806eaf56ef58699c16c388e378b0dcdb6e5b2fb36/pillow-11.0.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:1e2688958a840c822279fda0086fec1fdab2f95bf2b717b66871c4ad9859d7e8", size = 4461038 }, + { url = "https://files.pythonhosted.org/packages/d6/b9/fb620dd47fc7cc9678af8f8bd8c772034ca4977237049287e99dda360b66/pillow-11.0.0-cp313-cp313t-win32.whl", hash = "sha256:607bbe123c74e272e381a8d1957083a9463401f7bd01287f50521ecb05a313f8", size = 2253197 }, + { url = "https://files.pythonhosted.org/packages/df/86/25dde85c06c89d7fc5db17940f07aae0a56ac69aa9ccb5eb0f09798862a8/pillow-11.0.0-cp313-cp313t-win_amd64.whl", hash = "sha256:5c39ed17edea3bc69c743a8dd3e9853b7509625c2462532e62baa0732163a904", size = 2572169 }, + { url = "https://files.pythonhosted.org/packages/51/85/9c33f2517add612e17f3381aee7c4072779130c634921a756c97bc29fb49/pillow-11.0.0-cp313-cp313t-win_arm64.whl", hash = "sha256:75acbbeb05b86bc53cbe7b7e6fe00fbcf82ad7c684b3ad82e3d711da9ba287d3", size = 2256828 }, +] + +[[package]] +name = "pluggy" +version = "1.5.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/96/2d/02d4312c973c6050a18b314a5ad0b3210edb65a906f868e31c111dede4a6/pluggy-1.5.0.tar.gz", hash = "sha256:2cffa88e94fdc978c4c574f15f9e59b7f4201d439195c3715ca9e2486f1d0cf1", size = 67955 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/88/5f/e351af9a41f866ac3f1fac4ca0613908d9a41741cfcf2228f4ad853b697d/pluggy-1.5.0-py3-none-any.whl", hash = "sha256:44e1ad92c8ca002de6377e165f3e0f1be63266ab4d554740532335b9d75ea669", size = 20556 }, +] + +[[package]] +name = "pygments" +version = "2.18.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/8e/62/8336eff65bcbc8e4cb5d05b55faf041285951b6e80f33e2bff2024788f31/pygments-2.18.0.tar.gz", hash = "sha256:786ff802f32e91311bff3889f6e9a86e81505fe99f2735bb6d60ae0c5004f199", size = 4891905 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/f7/3f/01c8b82017c199075f8f788d0d906b9ffbbc5a47dc9918a945e13d5a2bda/pygments-2.18.0-py3-none-any.whl", hash = "sha256:b8e6aca0523f3ab76fee51799c488e38782ac06eafcf95e7ba832985c8e7b13a", size = 1205513 }, +] + +[[package]] +name = "pyparsing" +version = "3.2.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/8c/d5/e5aeee5387091148a19e1145f63606619cb5f20b83fccb63efae6474e7b2/pyparsing-3.2.0.tar.gz", hash = "sha256:cbf74e27246d595d9a74b186b810f6fbb86726dbf3b9532efb343f6d7294fe9c", size = 920984 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/be/ec/2eb3cd785efd67806c46c13a17339708ddc346cbb684eade7a6e6f79536a/pyparsing-3.2.0-py3-none-any.whl", hash = "sha256:93d9577b88da0bbea8cc8334ee8b918ed014968fd2ec383e868fb8afb1ccef84", size = 106921 }, +] + +[[package]] +name = "pytest" +version = "8.3.4" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "colorama", marker = "sys_platform == 'win32'" }, + { name = "iniconfig" }, + { name = "packaging" }, + { name = "pluggy" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/05/35/30e0d83068951d90a01852cb1cef56e5d8a09d20c7f511634cc2f7e0372a/pytest-8.3.4.tar.gz", hash = "sha256:965370d062bce11e73868e0335abac31b4d3de0e82f4007408d242b4f8610761", size = 1445919 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/11/92/76a1c94d3afee238333bc0a42b82935dd8f9cf8ce9e336ff87ee14d9e1cf/pytest-8.3.4-py3-none-any.whl", hash = "sha256:50e16d954148559c9a74109af1eaf0c945ba2d8f30f0a3d3335edde19788b6f6", size = 343083 }, +] + +[[package]] +name = "pytest-cov" +version = "6.0.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "coverage" }, + { name = "pytest" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/be/45/9b538de8cef30e17c7b45ef42f538a94889ed6a16f2387a6c89e73220651/pytest-cov-6.0.0.tar.gz", hash = "sha256:fde0b595ca248bb8e2d76f020b465f3b107c9632e6a1d1705f17834c89dcadc0", size = 66945 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/36/3b/48e79f2cd6a61dbbd4807b4ed46cb564b4fd50a76166b1c4ea5c1d9e2371/pytest_cov-6.0.0-py3-none-any.whl", hash = "sha256:eee6f1b9e61008bd34975a4d5bab25801eb31898b032dd55addc93e96fcaaa35", size = 22949 }, +] + +[[package]] +name = "python-dateutil" +version = "2.9.0.post0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "six" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/66/c0/0c8b6ad9f17a802ee498c46e004a0eb49bc148f2fd230864601a86dcf6db/python-dateutil-2.9.0.post0.tar.gz", hash = "sha256:37dd54208da7e1cd875388217d5e00ebd4179249f90fb72437e91a35459a0ad3", size = 342432 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ec/57/56b9bcc3c9c6a792fcbaf139543cee77261f3651ca9da0c93f5c1221264b/python_dateutil-2.9.0.post0-py2.py3-none-any.whl", hash = "sha256:a8b2bc7bffae282281c8140a97d3aa9c14da0b136dfe83f850eea9a5f7470427", size = 229892 }, +] + +[[package]] +name = "pytz" +version = "2024.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/3a/31/3c70bf7603cc2dca0f19bdc53b4537a797747a58875b552c8c413d963a3f/pytz-2024.2.tar.gz", hash = "sha256:2aa355083c50a0f93fa581709deac0c9ad65cca8a9e9beac660adcbd493c798a", size = 319692 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/11/c3/005fcca25ce078d2cc29fd559379817424e94885510568bc1bc53d7d5846/pytz-2024.2-py2.py3-none-any.whl", hash = "sha256:31c7c1817eb7fae7ca4b8c7ee50c72f93aa2dd863de768e1ef4245d426aa0725", size = 508002 }, +] + +[[package]] +name = "pyyaml" +version = "6.0.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/54/ed/79a089b6be93607fa5cdaedf301d7dfb23af5f25c398d5ead2525b063e17/pyyaml-6.0.2.tar.gz", hash = "sha256:d584d9ec91ad65861cc08d42e834324ef890a082e591037abe114850ff7bbc3e", size = 130631 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ef/e3/3af305b830494fa85d95f6d95ef7fa73f2ee1cc8ef5b495c7c3269fb835f/PyYAML-6.0.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:efdca5630322a10774e8e98e1af481aad470dd62c3170801852d752aa7a783ba", size = 181309 }, + { url = "https://files.pythonhosted.org/packages/45/9f/3b1c20a0b7a3200524eb0076cc027a970d320bd3a6592873c85c92a08731/PyYAML-6.0.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:50187695423ffe49e2deacb8cd10510bc361faac997de9efef88badc3bb9e2d1", size = 171679 }, + { url = "https://files.pythonhosted.org/packages/7c/9a/337322f27005c33bcb656c655fa78325b730324c78620e8328ae28b64d0c/PyYAML-6.0.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0ffe8360bab4910ef1b9e87fb812d8bc0a308b0d0eef8c8f44e0254ab3b07133", size = 733428 }, + { url = "https://files.pythonhosted.org/packages/a3/69/864fbe19e6c18ea3cc196cbe5d392175b4cf3d5d0ac1403ec3f2d237ebb5/PyYAML-6.0.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:17e311b6c678207928d649faa7cb0d7b4c26a0ba73d41e99c4fff6b6c3276484", size = 763361 }, + { url = "https://files.pythonhosted.org/packages/04/24/b7721e4845c2f162d26f50521b825fb061bc0a5afcf9a386840f23ea19fa/PyYAML-6.0.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:70b189594dbe54f75ab3a1acec5f1e3faa7e8cf2f1e08d9b561cb41b845f69d5", size = 759523 }, + { url = "https://files.pythonhosted.org/packages/2b/b2/e3234f59ba06559c6ff63c4e10baea10e5e7df868092bf9ab40e5b9c56b6/PyYAML-6.0.2-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:41e4e3953a79407c794916fa277a82531dd93aad34e29c2a514c2c0c5fe971cc", size = 726660 }, + { url = "https://files.pythonhosted.org/packages/fe/0f/25911a9f080464c59fab9027482f822b86bf0608957a5fcc6eaac85aa515/PyYAML-6.0.2-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:68ccc6023a3400877818152ad9a1033e3db8625d899c72eacb5a668902e4d652", size = 751597 }, + { url = "https://files.pythonhosted.org/packages/14/0d/e2c3b43bbce3cf6bd97c840b46088a3031085179e596d4929729d8d68270/PyYAML-6.0.2-cp313-cp313-win32.whl", hash = "sha256:bc2fa7c6b47d6bc618dd7fb02ef6fdedb1090ec036abab80d4681424b84c1183", size = 140527 }, + { url = "https://files.pythonhosted.org/packages/fa/de/02b54f42487e3d3c6efb3f89428677074ca7bf43aae402517bc7cca949f3/PyYAML-6.0.2-cp313-cp313-win_amd64.whl", hash = "sha256:8388ee1976c416731879ac16da0aff3f63b286ffdd57cdeb95f3f2e085687563", size = 156446 }, +] + +[[package]] +name = "requests" +version = "2.32.3" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "certifi" }, + { name = "charset-normalizer" }, + { name = "idna" }, + { name = "urllib3" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/63/70/2bf7780ad2d390a8d301ad0b550f1581eadbd9a20f896afe06353c2a2913/requests-2.32.3.tar.gz", hash = "sha256:55365417734eb18255590a9ff9eb97e9e1da868d4ccd6402399eaf68af20a760", size = 131218 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/f9/9b/335f9764261e915ed497fcdeb11df5dfd6f7bf257d4a6a2a686d80da4d54/requests-2.32.3-py3-none-any.whl", hash = "sha256:70761cfe03c773ceb22aa2f671b4757976145175cdfca038c02654d061d6dcc6", size = 64928 }, +] + +[[package]] +name = "requests-oauthlib" +version = "1.3.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "oauthlib" }, + { name = "requests" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/95/52/531ef197b426646f26b53815a7d2a67cb7a331ef098bb276db26a68ac49f/requests-oauthlib-1.3.1.tar.gz", hash = "sha256:75beac4a47881eeb94d5ea5d6ad31ef88856affe2332b9aafb52c6452ccf0d7a", size = 52027 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/6f/bb/5deac77a9af870143c684ab46a7934038a53eb4aa975bc0687ed6ca2c610/requests_oauthlib-1.3.1-py2.py3-none-any.whl", hash = "sha256:2577c501a2fb8d05a304c09d090d6e47c306fef15809d102b327cf8364bddab5", size = 23892 }, +] + +[[package]] +name = "rich" +version = "13.9.4" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "markdown-it-py" }, + { name = "pygments" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/ab/3a/0316b28d0761c6734d6bc14e770d85506c986c85ffb239e688eeaab2c2bc/rich-13.9.4.tar.gz", hash = "sha256:439594978a49a09530cff7ebc4b5c7103ef57baf48d5ea3184f21d9a2befa098", size = 223149 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/19/71/39c7c0d87f8d4e6c020a393182060eaefeeae6c01dab6a84ec346f2567df/rich-13.9.4-py3-none-any.whl", hash = "sha256:6049d5e6ec054bf2779ab3358186963bac2ea89175919d699e378b99738c2a90", size = 242424 }, +] + +[[package]] +name = "ruff" +version = "0.8.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/95/d0/8ff5b189d125f4260f2255d143bf2fa413b69c2610c405ace7a0a8ec81ec/ruff-0.8.1.tar.gz", hash = "sha256:3583db9a6450364ed5ca3f3b4225958b24f78178908d5c4bc0f46251ccca898f", size = 3313222 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a2/d6/1a6314e568db88acdbb5121ed53e2c52cebf3720d3437a76f82f923bf171/ruff-0.8.1-py3-none-linux_armv6l.whl", hash = "sha256:fae0805bd514066f20309f6742f6ee7904a773eb9e6c17c45d6b1600ca65c9b5", size = 10532605 }, + { url = "https://files.pythonhosted.org/packages/89/a8/a957a8812e31facffb6a26a30be0b5b4af000a6e30c7d43a22a5232a3398/ruff-0.8.1-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:b8a4f7385c2285c30f34b200ca5511fcc865f17578383db154e098150ce0a087", size = 10278243 }, + { url = "https://files.pythonhosted.org/packages/a8/23/9db40fa19c453fabf94f7a35c61c58f20e8200b4734a20839515a19da790/ruff-0.8.1-py3-none-macosx_11_0_arm64.whl", hash = "sha256:cd054486da0c53e41e0086e1730eb77d1f698154f910e0cd9e0d64274979a209", size = 9917739 }, + { url = "https://files.pythonhosted.org/packages/e2/a0/6ee2d949835d5701d832fc5acd05c0bfdad5e89cfdd074a171411f5ccad5/ruff-0.8.1-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2029b8c22da147c50ae577e621a5bfbc5d1fed75d86af53643d7a7aee1d23871", size = 10779153 }, + { url = "https://files.pythonhosted.org/packages/7a/25/9c11dca9404ef1eb24833f780146236131a3c7941de394bc356912ef1041/ruff-0.8.1-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:2666520828dee7dfc7e47ee4ea0d928f40de72056d929a7c5292d95071d881d1", size = 10304387 }, + { url = "https://files.pythonhosted.org/packages/c8/b9/84c323780db1b06feae603a707d82dbbd85955c8c917738571c65d7d5aff/ruff-0.8.1-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:333c57013ef8c97a53892aa56042831c372e0bb1785ab7026187b7abd0135ad5", size = 11360351 }, + { url = "https://files.pythonhosted.org/packages/6b/e1/9d4bbb2ace7aad14ded20e4674a48cda5b902aed7a1b14e6b028067060c4/ruff-0.8.1-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:288326162804f34088ac007139488dcb43de590a5ccfec3166396530b58fb89d", size = 12022879 }, + { url = "https://files.pythonhosted.org/packages/75/28/752ff6120c0e7f9981bc4bc275d540c7f36db1379ba9db9142f69c88db21/ruff-0.8.1-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b12c39b9448632284561cbf4191aa1b005882acbc81900ffa9f9f471c8ff7e26", size = 11610354 }, + { url = "https://files.pythonhosted.org/packages/ba/8c/967b61c2cc8ebd1df877607fbe462bc1e1220b4a30ae3352648aec8c24bd/ruff-0.8.1-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:364e6674450cbac8e998f7b30639040c99d81dfb5bbc6dfad69bc7a8f916b3d1", size = 12813976 }, + { url = "https://files.pythonhosted.org/packages/7f/29/e059f945d6bd2d90213387b8c360187f2fefc989ddcee6bbf3c241329b92/ruff-0.8.1-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b22346f845fec132aa39cd29acb94451d030c10874408dbf776af3aaeb53284c", size = 11154564 }, + { url = "https://files.pythonhosted.org/packages/55/47/cbd05e5a62f3fb4c072bc65c1e8fd709924cad1c7ec60a1000d1e4ee8307/ruff-0.8.1-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:b2f2f7a7e7648a2bfe6ead4e0a16745db956da0e3a231ad443d2a66a105c04fa", size = 10760604 }, + { url = "https://files.pythonhosted.org/packages/bb/ee/4c3981c47147c72647a198a94202633130cfda0fc95cd863a553b6f65c6a/ruff-0.8.1-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:adf314fc458374c25c5c4a4a9270c3e8a6a807b1bec018cfa2813d6546215540", size = 10391071 }, + { url = "https://files.pythonhosted.org/packages/6b/e6/083eb61300214590b188616a8ac6ae1ef5730a0974240fb4bec9c17de78b/ruff-0.8.1-py3-none-musllinux_1_2_i686.whl", hash = "sha256:a885d68342a231b5ba4d30b8c6e1b1ee3a65cf37e3d29b3c74069cdf1ee1e3c9", size = 10896657 }, + { url = "https://files.pythonhosted.org/packages/77/bd/aacdb8285d10f1b943dbeb818968efca35459afc29f66ae3bd4596fbf954/ruff-0.8.1-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:d2c16e3508c8cc73e96aa5127d0df8913d2290098f776416a4b157657bee44c5", size = 11228362 }, + { url = "https://files.pythonhosted.org/packages/39/72/fcb7ad41947f38b4eaa702aca0a361af0e9c2bf671d7fd964480670c297e/ruff-0.8.1-py3-none-win32.whl", hash = "sha256:93335cd7c0eaedb44882d75a7acb7df4b77cd7cd0d2255c93b28791716e81790", size = 8803476 }, + { url = "https://files.pythonhosted.org/packages/e4/ea/cae9aeb0f4822c44651c8407baacdb2e5b4dcd7b31a84e1c5df33aa2cc20/ruff-0.8.1-py3-none-win_amd64.whl", hash = "sha256:2954cdbe8dfd8ab359d4a30cd971b589d335a44d444b6ca2cb3d1da21b75e4b6", size = 9614463 }, + { url = "https://files.pythonhosted.org/packages/eb/76/fbb4bd23dfb48fa7758d35b744413b650a9fd2ddd93bca77e30376864414/ruff-0.8.1-py3-none-win_arm64.whl", hash = "sha256:55873cc1a473e5ac129d15eccb3c008c096b94809d693fc7053f588b67822737", size = 8959621 }, +] + +[[package]] +name = "scikit-learn" +version = "1.5.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "joblib" }, + { name = "numpy" }, + { name = "scipy" }, + { name = "threadpoolctl" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/37/59/44985a2bdc95c74e34fef3d10cb5d93ce13b0e2a7baefffe1b53853b502d/scikit_learn-1.5.2.tar.gz", hash = "sha256:b4237ed7b3fdd0a4882792e68ef2545d5baa50aca3bb45aa7df468138ad8f94d", size = 7001680 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a4/50/8891028437858cc510e13578fe7046574a60c2aaaa92b02d64aac5b1b412/scikit_learn-1.5.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:e9a702e2de732bbb20d3bad29ebd77fc05a6b427dc49964300340e4c9328b3f5", size = 12025584 }, + { url = "https://files.pythonhosted.org/packages/d2/79/17feef8a1c14149436083bec0e61d7befb4812e272d5b20f9d79ea3e9ab1/scikit_learn-1.5.2-cp313-cp313-macosx_12_0_arm64.whl", hash = "sha256:b0768ad641981f5d3a198430a1d31c3e044ed2e8a6f22166b4d546a5116d7908", size = 10959795 }, + { url = "https://files.pythonhosted.org/packages/b1/c8/f08313f9e2e656bd0905930ae8bf99a573ea21c34666a813b749c338202f/scikit_learn-1.5.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:178ddd0a5cb0044464fc1bfc4cca5b1833bfc7bb022d70b05db8530da4bb3dd3", size = 12077302 }, + { url = "https://files.pythonhosted.org/packages/a7/48/fbfb4dc72bed0fe31fe045fb30e924909ad03f717c36694351612973b1a9/scikit_learn-1.5.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f7284ade780084d94505632241bf78c44ab3b6f1e8ccab3d2af58e0e950f9c12", size = 13002811 }, + { url = "https://files.pythonhosted.org/packages/a5/e7/0c869f9e60d225a77af90d2aefa7a4a4c0e745b149325d1450f0f0ce5399/scikit_learn-1.5.2-cp313-cp313-win_amd64.whl", hash = "sha256:b7b0f9a0b1040830d38c39b91b3a44e1b643f4b36e36567b80b7c6bd2202a27f", size = 10951354 }, +] + +[[package]] +name = "scipy" +version = "1.14.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "numpy" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/62/11/4d44a1f274e002784e4dbdb81e0ea96d2de2d1045b2132d5af62cc31fd28/scipy-1.14.1.tar.gz", hash = "sha256:5a275584e726026a5699459aa72f828a610821006228e841b94275c4a7c08417", size = 58620554 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/50/ef/ac98346db016ff18a6ad7626a35808f37074d25796fd0234c2bb0ed1e054/scipy-1.14.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:1729560c906963fc8389f6aac023739ff3983e727b1a4d87696b7bf108316a79", size = 39091068 }, + { url = "https://files.pythonhosted.org/packages/b9/cc/70948fe9f393b911b4251e96b55bbdeaa8cca41f37c26fd1df0232933b9e/scipy-1.14.1-cp313-cp313-macosx_12_0_arm64.whl", hash = "sha256:4079b90df244709e675cdc8b93bfd8a395d59af40b72e339c2287c91860deb8e", size = 29875417 }, + { url = "https://files.pythonhosted.org/packages/3b/2e/35f549b7d231c1c9f9639f9ef49b815d816bf54dd050da5da1c11517a218/scipy-1.14.1-cp313-cp313-macosx_14_0_arm64.whl", hash = "sha256:e0cf28db0f24a38b2a0ca33a85a54852586e43cf6fd876365c86e0657cfe7d73", size = 23084508 }, + { url = "https://files.pythonhosted.org/packages/3f/d6/b028e3f3e59fae61fb8c0f450db732c43dd1d836223a589a8be9f6377203/scipy-1.14.1-cp313-cp313-macosx_14_0_x86_64.whl", hash = "sha256:0c2f95de3b04e26f5f3ad5bb05e74ba7f68b837133a4492414b3afd79dfe540e", size = 25503364 }, + { url = "https://files.pythonhosted.org/packages/a7/2f/6c142b352ac15967744d62b165537a965e95d557085db4beab2a11f7943b/scipy-1.14.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b99722ea48b7ea25e8e015e8341ae74624f72e5f21fc2abd45f3a93266de4c5d", size = 35292639 }, + { url = "https://files.pythonhosted.org/packages/56/46/2449e6e51e0d7c3575f289f6acb7f828938eaab8874dbccfeb0cd2b71a27/scipy-1.14.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5149e3fd2d686e42144a093b206aef01932a0059c2a33ddfa67f5f035bdfe13e", size = 40798288 }, + { url = "https://files.pythonhosted.org/packages/32/cd/9d86f7ed7f4497c9fd3e39f8918dd93d9f647ba80d7e34e4946c0c2d1a7c/scipy-1.14.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:e4f5a7c49323533f9103d4dacf4e4f07078f360743dec7f7596949149efeec06", size = 42524647 }, + { url = "https://files.pythonhosted.org/packages/f5/1b/6ee032251bf4cdb0cc50059374e86a9f076308c1512b61c4e003e241efb7/scipy-1.14.1-cp313-cp313-win_amd64.whl", hash = "sha256:baff393942b550823bfce952bb62270ee17504d02a1801d7fd0719534dfb9c84", size = 44469524 }, +] + +[[package]] +name = "six" +version = "1.16.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/71/39/171f1c67cd00715f190ba0b100d606d440a28c93c7714febeca8b79af85e/six-1.16.0.tar.gz", hash = "sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926", size = 34041 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/d9/5a/e7c31adbe875f2abbb91bd84cf2dc52d792b5a01506781dbcf25c91daf11/six-1.16.0-py2.py3-none-any.whl", hash = "sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254", size = 11053 }, +] + +[[package]] +name = "snowballstemmer" +version = "2.2.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/44/7b/af302bebf22c749c56c9c3e8ae13190b5b5db37a33d9068652e8f73b7089/snowballstemmer-2.2.0.tar.gz", hash = "sha256:09b16deb8547d3412ad7b590689584cd0fe25ec8db3be37788be3810cbf19cb1", size = 86699 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ed/dc/c02e01294f7265e63a7315fe086dd1df7dacb9f840a804da846b96d01b96/snowballstemmer-2.2.0-py2.py3-none-any.whl", hash = "sha256:c8e1716e83cc398ae16824e5572ae04e0d9fc2c6b985fb0f900f5f0c96ecba1a", size = 93002 }, +] + +[[package]] +name = "soupsieve" +version = "2.6" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/d7/ce/fbaeed4f9fb8b2daa961f90591662df6a86c1abf25c548329a86920aedfb/soupsieve-2.6.tar.gz", hash = "sha256:e2e68417777af359ec65daac1057404a3c8a5455bb8abc36f1a9866ab1a51abb", size = 101569 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/d1/c2/fe97d779f3ef3b15f05c94a2f1e3d21732574ed441687474db9d342a7315/soupsieve-2.6-py3-none-any.whl", hash = "sha256:e72c4ff06e4fb6e4b5a9f0f55fe6e81514581fca1515028625d0f299c602ccc9", size = 36186 }, +] + +[[package]] +name = "sphinx" +version = "8.1.3" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "alabaster" }, + { name = "babel" }, + { name = "colorama", marker = "sys_platform == 'win32'" }, + { name = "docutils" }, + { name = "imagesize" }, + { name = "jinja2" }, + { name = "packaging" }, + { name = "pygments" }, + { name = "requests" }, + { name = "snowballstemmer" }, + { name = "sphinxcontrib-applehelp" }, + { name = "sphinxcontrib-devhelp" }, + { name = "sphinxcontrib-htmlhelp" }, + { name = "sphinxcontrib-jsmath" }, + { name = "sphinxcontrib-qthelp" }, + { name = "sphinxcontrib-serializinghtml" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/6f/6d/be0b61178fe2cdcb67e2a92fc9ebb488e3c51c4f74a36a7824c0adf23425/sphinx-8.1.3.tar.gz", hash = "sha256:43c1911eecb0d3e161ad78611bc905d1ad0e523e4ddc202a58a821773dc4c927", size = 8184611 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/26/60/1ddff83a56d33aaf6f10ec8ce84b4c007d9368b21008876fceda7e7381ef/sphinx-8.1.3-py3-none-any.whl", hash = "sha256:09719015511837b76bf6e03e42eb7595ac8c2e41eeb9c29c5b755c6b677992a2", size = 3487125 }, +] + +[[package]] +name = "sphinx-autoapi" +version = "3.4.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "astroid", marker = "python_full_version >= '3.13'" }, + { name = "jinja2" }, + { name = "pyyaml" }, + { name = "sphinx" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/4a/eb/cc243583bb1d518ca3b10998c203d919a8ed90affd4831f2b61ad09043d2/sphinx_autoapi-3.4.0.tar.gz", hash = "sha256:e6d5371f9411bbb9fca358c00a9e57aef3ac94cbfc5df4bab285946462f69e0c", size = 29292 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/de/d6/f2acdc2567337fd5f5dc091a4e58d8a0fb14927b9779fc1e5ecee96d9824/sphinx_autoapi-3.4.0-py3-none-any.whl", hash = "sha256:4027fef2875a22c5f2a57107c71641d82f6166bf55beb407a47aaf3ef14e7b92", size = 34095 }, +] + +[[package]] +name = "sphinx-pyproject" +version = "0.3.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "dom-toml" }, + { name = "domdf-python-tools" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/39/97/aa8cec3da3e78f2c396b63332e2fe92fe43f7ff2ad19b3998735f28b0a7f/sphinx_pyproject-0.3.0.tar.gz", hash = "sha256:efc4ee9d96f579c4e4ed1ac273868c64565e88c8e37fe6ec2dc59fbcd57684ab", size = 7695 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/90/d5/89cb47c6399fd57ca451af15361499813c5d53e588cb6e00d89411ce724f/sphinx_pyproject-0.3.0-py3-none-any.whl", hash = "sha256:3aca968919f5ecd390f96874c3f64a43c9c7fcfdc2fd4191a781ad9228501b52", size = 23076 }, +] + +[[package]] +name = "sphinxcontrib-applehelp" +version = "2.0.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/ba/6e/b837e84a1a704953c62ef8776d45c3e8d759876b4a84fe14eba2859106fe/sphinxcontrib_applehelp-2.0.0.tar.gz", hash = "sha256:2f29ef331735ce958efa4734873f084941970894c6090408b079c61b2e1c06d1", size = 20053 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/5d/85/9ebeae2f76e9e77b952f4b274c27238156eae7979c5421fba91a28f4970d/sphinxcontrib_applehelp-2.0.0-py3-none-any.whl", hash = "sha256:4cd3f0ec4ac5dd9c17ec65e9ab272c9b867ea77425228e68ecf08d6b28ddbdb5", size = 119300 }, +] + +[[package]] +name = "sphinxcontrib-devhelp" +version = "2.0.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/f6/d2/5beee64d3e4e747f316bae86b55943f51e82bb86ecd325883ef65741e7da/sphinxcontrib_devhelp-2.0.0.tar.gz", hash = "sha256:411f5d96d445d1d73bb5d52133377b4248ec79db5c793ce7dbe59e074b4dd1ad", size = 12967 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/35/7a/987e583882f985fe4d7323774889ec58049171828b58c2217e7f79cdf44e/sphinxcontrib_devhelp-2.0.0-py3-none-any.whl", hash = "sha256:aefb8b83854e4b0998877524d1029fd3e6879210422ee3780459e28a1f03a8a2", size = 82530 }, +] + +[[package]] +name = "sphinxcontrib-htmlhelp" +version = "2.1.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/43/93/983afd9aa001e5201eab16b5a444ed5b9b0a7a010541e0ddfbbfd0b2470c/sphinxcontrib_htmlhelp-2.1.0.tar.gz", hash = "sha256:c9e2916ace8aad64cc13a0d233ee22317f2b9025b9cf3295249fa985cc7082e9", size = 22617 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/0a/7b/18a8c0bcec9182c05a0b3ec2a776bba4ead82750a55ff798e8d406dae604/sphinxcontrib_htmlhelp-2.1.0-py3-none-any.whl", hash = "sha256:166759820b47002d22914d64a075ce08f4c46818e17cfc9470a9786b759b19f8", size = 98705 }, +] + +[[package]] +name = "sphinxcontrib-jsmath" +version = "1.0.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/b2/e8/9ed3830aeed71f17c026a07a5097edcf44b692850ef215b161b8ad875729/sphinxcontrib-jsmath-1.0.1.tar.gz", hash = "sha256:a9925e4a4587247ed2191a22df5f6970656cb8ca2bd6284309578f2153e0c4b8", size = 5787 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c2/42/4c8646762ee83602e3fb3fbe774c2fac12f317deb0b5dbeeedd2d3ba4b77/sphinxcontrib_jsmath-1.0.1-py2.py3-none-any.whl", hash = "sha256:2ec2eaebfb78f3f2078e73666b1415417a116cc848b72e5172e596c871103178", size = 5071 }, +] + +[[package]] +name = "sphinxcontrib-qthelp" +version = "2.0.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/68/bc/9104308fc285eb3e0b31b67688235db556cd5b0ef31d96f30e45f2e51cae/sphinxcontrib_qthelp-2.0.0.tar.gz", hash = "sha256:4fe7d0ac8fc171045be623aba3e2a8f613f8682731f9153bb2e40ece16b9bbab", size = 17165 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/27/83/859ecdd180cacc13b1f7e857abf8582a64552ea7a061057a6c716e790fce/sphinxcontrib_qthelp-2.0.0-py3-none-any.whl", hash = "sha256:b18a828cdba941ccd6ee8445dbe72ffa3ef8cbe7505d8cd1fa0d42d3f2d5f3eb", size = 88743 }, +] + +[[package]] +name = "sphinxcontrib-serializinghtml" +version = "2.0.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/3b/44/6716b257b0aa6bfd51a1b31665d1c205fb12cb5ad56de752dfa15657de2f/sphinxcontrib_serializinghtml-2.0.0.tar.gz", hash = "sha256:e9d912827f872c029017a53f0ef2180b327c3f7fd23c87229f7a8e8b70031d4d", size = 16080 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/52/a7/d2782e4e3f77c8450f727ba74a8f12756d5ba823d81b941f1b04da9d033a/sphinxcontrib_serializinghtml-2.0.0-py3-none-any.whl", hash = "sha256:6e2cb0eef194e10c27ec0023bfeb25badbbb5868244cf5bc5bdc04e4464bf331", size = 92072 }, +] + +[[package]] +name = "statsmodels" +version = "0.14.4" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "numpy" }, + { name = "packaging" }, + { name = "pandas" }, + { name = "patsy" }, + { name = "scipy" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/1f/3b/963a015dd8ea17e10c7b0e2f14d7c4daec903baf60a017e756b57953a4bf/statsmodels-0.14.4.tar.gz", hash = "sha256:5d69e0f39060dc72c067f9bb6e8033b6dccdb0bae101d76a7ef0bcc94e898b67", size = 20354802 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/31/f8/2662e6a101315ad336f75168fa9bac71f913ebcb92a6be84031d84a0f21f/statsmodels-0.14.4-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:b5a24f5d2c22852d807d2b42daf3a61740820b28d8381daaf59dcb7055bf1a79", size = 10186886 }, + { url = "https://files.pythonhosted.org/packages/fa/c0/ee6e8ed35fc1ca9c7538c592f4974547bf72274bc98db1ae4a6e87481a83/statsmodels-0.14.4-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:df4f7864606fa843d7e7c0e6af288f034a2160dba14e6ccc09020a3cf67cb092", size = 9880066 }, + { url = "https://files.pythonhosted.org/packages/d1/97/3380ca6d8fd66cfb3d12941e472642f26e781a311c355a4e97aab2ed0216/statsmodels-0.14.4-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:91341cbde9e8bea5fb419a76e09114e221567d03f34ca26e6d67ae2c27d8fe3c", size = 10283521 }, + { url = "https://files.pythonhosted.org/packages/fe/2a/55c5b5c5e5124a202ea3fe0bcdbdeceaf91b4ec6164b8434acb9dd97409c/statsmodels-0.14.4-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1322286a7bfdde2790bf72d29698a1b76c20b8423a55bdcd0d457969d0041f72", size = 10723228 }, + { url = "https://files.pythonhosted.org/packages/4f/76/67747e49dc758daae06f33aad8247b718cd7d224f091d2cd552681215bb2/statsmodels-0.14.4-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:e31b95ac603415887c9f0d344cb523889cf779bc52d68e27e2d23c358958fec7", size = 10859503 }, + { url = "https://files.pythonhosted.org/packages/1d/eb/cb8b01f5edf8f135eb3d0553d159db113a35b2948d0e51eeb735e7ae09ea/statsmodels-0.14.4-cp313-cp313-win_amd64.whl", hash = "sha256:81030108d27aecc7995cac05aa280cf8c6025f6a6119894eef648997936c2dd0", size = 9817574 }, +] + +[[package]] +name = "sympy" +version = "1.13.3" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "mpmath" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/11/8a/5a7fd6284fa8caac23a26c9ddf9c30485a48169344b4bd3b0f02fef1890f/sympy-1.13.3.tar.gz", hash = "sha256:b27fd2c6530e0ab39e275fc9b683895367e51d5da91baa8d3d64db2565fec4d9", size = 7533196 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/99/ff/c87e0622b1dadea79d2fb0b25ade9ed98954c9033722eb707053d310d4f3/sympy-1.13.3-py3-none-any.whl", hash = "sha256:54612cf55a62755ee71824ce692986f23c88ffa77207b30c1368eda4a7060f73", size = 6189483 }, +] + +[[package]] +name = "thealgorithms-python" +version = "0.0.1" +source = { virtual = "." } +dependencies = [ + { name = "beautifulsoup4" }, + { name = "fake-useragent" }, + { name = "imageio" }, + { name = "keras" }, + { name = "lxml" }, + { name = "matplotlib" }, + { name = "numpy" }, + { name = "opencv-python" }, + { name = "pandas" }, + { name = "pillow" }, + { name = "requests" }, + { name = "rich" }, + { name = "scikit-learn" }, + { name = "sphinx-pyproject" }, + { name = "statsmodels" }, + { name = "sympy" }, + { name = "tweepy" }, + { name = "typing-extensions" }, + { name = "xgboost" }, +] + +[package.dev-dependencies] +dev = [ + { name = "pytest" }, + { name = "pytest-cov" }, +] +docs = [ + { name = "myst-parser" }, + { name = "sphinx-autoapi" }, + { name = "sphinx-pyproject" }, +] +euler-validate = [ + { name = "numpy" }, + { name = "pytest" }, + { name = "pytest-cov" }, + { name = "requests" }, +] +lint = [ + { name = "codespell" }, + { name = "ruff" }, +] + +[package.metadata] +requires-dist = [ + { name = "beautifulsoup4", specifier = ">=4.12.3" }, + { name = "fake-useragent", specifier = ">=1.5.1" }, + { name = "imageio", specifier = ">=2.36.1" }, + { name = "keras", specifier = ">=3.7" }, + { name = "lxml", specifier = ">=5.3" }, + { name = "matplotlib", specifier = ">=3.9.3" }, + { name = "numpy", specifier = ">=2.1.3" }, + { name = "opencv-python", specifier = ">=4.10.0.84" }, + { name = "pandas", specifier = ">=2.2.3" }, + { name = "pillow", specifier = ">=11" }, + { name = "requests", specifier = ">=2.32.3" }, + { name = "rich", specifier = ">=13.9.4" }, + { name = "scikit-learn", specifier = ">=1.5.2" }, + { name = "sphinx-pyproject", specifier = ">=0.3" }, + { name = "statsmodels", specifier = ">=0.14.4" }, + { name = "sympy", specifier = ">=1.13.3" }, + { name = "tweepy", specifier = ">=4.14" }, + { name = "typing-extensions", specifier = ">=4.12.2" }, + { name = "xgboost", specifier = ">=2.1.3" }, +] + +[package.metadata.requires-dev] +dev = [ + { name = "pytest", specifier = ">=8.3.4" }, + { name = "pytest-cov", specifier = ">=6" }, +] +docs = [ + { name = "myst-parser", specifier = ">=4.0.0" }, + { name = "sphinx-autoapi", specifier = ">=3.4.0" }, + { name = "sphinx-pyproject", specifier = ">=0.3.0" }, +] +euler-validate = [ + { name = "numpy", specifier = ">=2.1.3" }, + { name = "pytest", specifier = ">=8.3.4" }, + { name = "pytest-cov", specifier = ">=6.0.0" }, + { name = "requests", specifier = ">=2.32.3" }, +] +lint = [ + { name = "codespell", specifier = ">=2.3" }, + { name = "ruff", specifier = ">=0.8.1" }, +] + +[[package]] +name = "threadpoolctl" +version = "3.5.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/bd/55/b5148dcbf72f5cde221f8bfe3b6a540da7aa1842f6b491ad979a6c8b84af/threadpoolctl-3.5.0.tar.gz", hash = "sha256:082433502dd922bf738de0d8bcc4fdcbf0979ff44c42bd40f5af8a282f6fa107", size = 41936 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/4b/2c/ffbf7a134b9ab11a67b0cf0726453cedd9c5043a4fe7a35d1cefa9a1bcfb/threadpoolctl-3.5.0-py3-none-any.whl", hash = "sha256:56c1e26c150397e58c4926da8eeee87533b1e32bef131bd4bf6a2f45f3185467", size = 18414 }, +] + +[[package]] +name = "tomli" +version = "2.2.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/18/87/302344fed471e44a87289cf4967697d07e532f2421fdaf868a303cbae4ff/tomli-2.2.1.tar.gz", hash = "sha256:cd45e1dc79c835ce60f7404ec8119f2eb06d38b1deba146f07ced3bbc44505ff", size = 17175 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/04/90/2ee5f2e0362cb8a0b6499dc44f4d7d48f8fff06d28ba46e6f1eaa61a1388/tomli-2.2.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:f4039b9cbc3048b2416cc57ab3bda989a6fcf9b36cf8937f01a6e731b64f80d7", size = 132708 }, + { url = "https://files.pythonhosted.org/packages/c0/ec/46b4108816de6b385141f082ba99e315501ccd0a2ea23db4a100dd3990ea/tomli-2.2.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:286f0ca2ffeeb5b9bd4fcc8d6c330534323ec51b2f52da063b11c502da16f30c", size = 123582 }, + { url = "https://files.pythonhosted.org/packages/a0/bd/b470466d0137b37b68d24556c38a0cc819e8febe392d5b199dcd7f578365/tomli-2.2.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a92ef1a44547e894e2a17d24e7557a5e85a9e1d0048b0b5e7541f76c5032cb13", size = 232543 }, + { url = "https://files.pythonhosted.org/packages/d9/e5/82e80ff3b751373f7cead2815bcbe2d51c895b3c990686741a8e56ec42ab/tomli-2.2.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9316dc65bed1684c9a98ee68759ceaed29d229e985297003e494aa825ebb0281", size = 241691 }, + { url = "https://files.pythonhosted.org/packages/05/7e/2a110bc2713557d6a1bfb06af23dd01e7dde52b6ee7dadc589868f9abfac/tomli-2.2.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e85e99945e688e32d5a35c1ff38ed0b3f41f43fad8df0bdf79f72b2ba7bc5272", size = 251170 }, + { url = "https://files.pythonhosted.org/packages/64/7b/22d713946efe00e0adbcdfd6d1aa119ae03fd0b60ebed51ebb3fa9f5a2e5/tomli-2.2.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:ac065718db92ca818f8d6141b5f66369833d4a80a9d74435a268c52bdfa73140", size = 236530 }, + { url = "https://files.pythonhosted.org/packages/38/31/3a76f67da4b0cf37b742ca76beaf819dca0ebef26d78fc794a576e08accf/tomli-2.2.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:d920f33822747519673ee656a4b6ac33e382eca9d331c87770faa3eef562aeb2", size = 258666 }, + { url = "https://files.pythonhosted.org/packages/07/10/5af1293da642aded87e8a988753945d0cf7e00a9452d3911dd3bb354c9e2/tomli-2.2.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:a198f10c4d1b1375d7687bc25294306e551bf1abfa4eace6650070a5c1ae2744", size = 243954 }, + { url = "https://files.pythonhosted.org/packages/5b/b9/1ed31d167be802da0fc95020d04cd27b7d7065cc6fbefdd2f9186f60d7bd/tomli-2.2.1-cp313-cp313-win32.whl", hash = "sha256:d3f5614314d758649ab2ab3a62d4f2004c825922f9e370b29416484086b264ec", size = 98724 }, + { url = "https://files.pythonhosted.org/packages/c7/32/b0963458706accd9afcfeb867c0f9175a741bf7b19cd424230714d722198/tomli-2.2.1-cp313-cp313-win_amd64.whl", hash = "sha256:a38aa0308e754b0e3c67e344754dff64999ff9b513e691d0e786265c93583c69", size = 109383 }, + { url = "https://files.pythonhosted.org/packages/6e/c2/61d3e0f47e2b74ef40a68b9e6ad5984f6241a942f7cd3bbfbdbd03861ea9/tomli-2.2.1-py3-none-any.whl", hash = "sha256:cb55c73c5f4408779d0cf3eef9f762b9c9f147a77de7b258bef0a5628adc85cc", size = 14257 }, +] + +[[package]] +name = "tweepy" +version = "4.14.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "oauthlib" }, + { name = "requests" }, + { name = "requests-oauthlib" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/75/1c/0db8c3cf9d31bf63853ff612d201060ae78e6db03468a70e063bef0eda62/tweepy-4.14.0.tar.gz", hash = "sha256:1f9f1707d6972de6cff6c5fd90dfe6a449cd2e0d70bd40043ffab01e07a06c8c", size = 88623 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/4d/78/ba0065d5636bbf4a35b78c4f81b74e7858b609cdf69e629d6da5c91b9d92/tweepy-4.14.0-py3-none-any.whl", hash = "sha256:db6d3844ccc0c6d27f339f12ba8acc89912a961da513c1ae50fa2be502a56afb", size = 98520 }, +] + +[[package]] +name = "typing-extensions" +version = "4.12.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/df/db/f35a00659bc03fec321ba8bce9420de607a1d37f8342eee1863174c69557/typing_extensions-4.12.2.tar.gz", hash = "sha256:1a7ead55c7e559dd4dee8856e3a88b41225abfe1ce8df57b7c13915fe121ffb8", size = 85321 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/26/9f/ad63fc0248c5379346306f8668cda6e2e2e9c95e01216d2b8ffd9ff037d0/typing_extensions-4.12.2-py3-none-any.whl", hash = "sha256:04e5ca0351e0f3f85c6853954072df659d0d13fac324d0072316b67d7794700d", size = 37438 }, +] + +[[package]] +name = "tzdata" +version = "2024.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/e1/34/943888654477a574a86a98e9896bae89c7aa15078ec29f490fef2f1e5384/tzdata-2024.2.tar.gz", hash = "sha256:7d85cc416e9382e69095b7bdf4afd9e3880418a2413feec7069d533d6b4e31cc", size = 193282 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a6/ab/7e5f53c3b9d14972843a647d8d7a853969a58aecc7559cb3267302c94774/tzdata-2024.2-py2.py3-none-any.whl", hash = "sha256:a48093786cdcde33cad18c2555e8532f34422074448fbc874186f0abd79565cd", size = 346586 }, +] + +[[package]] +name = "urllib3" +version = "2.2.3" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/ed/63/22ba4ebfe7430b76388e7cd448d5478814d3032121827c12a2cc287e2260/urllib3-2.2.3.tar.gz", hash = "sha256:e7d814a81dad81e6caf2ec9fdedb284ecc9c73076b62654547cc64ccdcae26e9", size = 300677 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ce/d9/5f4c13cecde62396b0d3fe530a50ccea91e7dfc1ccf0e09c228841bb5ba8/urllib3-2.2.3-py3-none-any.whl", hash = "sha256:ca899ca043dcb1bafa3e262d73aa25c465bfb49e0bd9dd5d59f1d0acba2f8fac", size = 126338 }, +] + +[[package]] +name = "xgboost" +version = "2.1.3" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "numpy" }, + { name = "nvidia-nccl-cu12", marker = "platform_machine != 'aarch64' and platform_system == 'Linux'" }, + { name = "scipy" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/48/b0/131ffc4a15fd3acee9be3a7baa6b2fa6faa479799c51b880de9fc3ddf550/xgboost-2.1.3.tar.gz", hash = "sha256:7699ec4226156887d3afc665c63ab87469db9d46e361c702ba9fccd22535730c", size = 1090326 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/cd/c6/773ebd84414879bd0566788868ae46a6574f6efaf81e694f01ea1fed3277/xgboost-2.1.3-py3-none-macosx_10_15_x86_64.macosx_11_0_x86_64.macosx_12_0_x86_64.whl", hash = "sha256:c9b0c92f13e3650e1e1cf92ff9ecef3efc6f5dc3d10ce17858df2081a89976ef", size = 2139909 }, + { url = "https://files.pythonhosted.org/packages/28/3c/ddf5d9eb742cdb7fbcd5c854bce07471bad01194ac37de91db64fbef0c58/xgboost-2.1.3-py3-none-macosx_12_0_arm64.whl", hash = "sha256:fcbf1912a852bd07a7007be350c8dc3a484c5e775b612f2b3cd082fc76240eb3", size = 1938631 }, + { url = "https://files.pythonhosted.org/packages/4a/3a/8cd69a216993fd9d54ceb079d1b357b7ef50678b3c2695d8a71962b8d0aa/xgboost-2.1.3-py3-none-manylinux2014_aarch64.whl", hash = "sha256:27af88df1162cee016c67f267a0a16c3db1c48f256e12f64c45c8f8edf9571cd", size = 4441261 }, + { url = "https://files.pythonhosted.org/packages/48/bc/05d7db90d421c5e3d681a12fd1eb087e37bf2e9bbe2b105422d6319ecc92/xgboost-2.1.3-py3-none-manylinux2014_x86_64.whl", hash = "sha256:32a43526208fe676527c698cb852e0e9515e6d7294143780e476d335290a131b", size = 4532380 }, + { url = "https://files.pythonhosted.org/packages/0f/c8/f679a816c06a4a6d23da3f4b448d5f0615b51de2886ad3e3e695d17121b3/xgboost-2.1.3-py3-none-manylinux_2_28_aarch64.whl", hash = "sha256:5d33090880f3d474f8cf5dda557c7bf8dbceefb62f2fd655c77efcabb9cac222", size = 4207000 }, + { url = "https://files.pythonhosted.org/packages/32/93/66826e2f50cefecbb0a44bd1e667316bf0a3c8e78cd1f0cdf52f5b2c5c6f/xgboost-2.1.3-py3-none-manylinux_2_28_x86_64.whl", hash = "sha256:8d85d38553855a1f8c40b8fbccca86af19202f91b244e2c7f77afbb2a6d9d785", size = 153894508 }, + { url = "https://files.pythonhosted.org/packages/70/58/2f94976df39470fb00eec2cb4f914dde44cd0df8d96483208bf7db4bc97e/xgboost-2.1.3-py3-none-win_amd64.whl", hash = "sha256:25c0ffcbd62aac5bc22c79e08b5b2edad1d5e37f16610ebefa5f06f3e2ea3d96", size = 124909665 }, +] From 98391e33ea2a87375a7f744eba3d57918237b4e7 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Thu, 12 Dec 2024 18:40:43 +0100 Subject: [PATCH 1459/1543] [pre-commit.ci] pre-commit autoupdate (#12428) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - [github.com/astral-sh/ruff-pre-commit: v0.8.1 → v0.8.2](https://github.com/astral-sh/ruff-pre-commit/compare/v0.8.1...v0.8.2) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index bef251749c19..884b10661a49 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -16,7 +16,7 @@ repos: - id: auto-walrus - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.8.1 + rev: v0.8.2 hooks: - id: ruff - id: ruff-format From f8e595e048f1cbd763e0a1f8c0ffb4dff335b841 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 16 Dec 2024 20:53:36 +0100 Subject: [PATCH 1460/1543] [pre-commit.ci] pre-commit autoupdate (#12439) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - [github.com/astral-sh/ruff-pre-commit: v0.8.2 → v0.8.3](https://github.com/astral-sh/ruff-pre-commit/compare/v0.8.2...v0.8.3) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 884b10661a49..0c8108ac55be 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -16,7 +16,7 @@ repos: - id: auto-walrus - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.8.2 + rev: v0.8.3 hooks: - id: ruff - id: ruff-format From 4abfce2791c081f65580bc1fefdf5a4d8ee7b5fc Mon Sep 17 00:00:00 2001 From: Maxim Smolskiy Date: Mon, 23 Dec 2024 06:55:22 +0300 Subject: [PATCH 1461/1543] Fix sphinx/build_docs warnings for audio_filters (#12449) * updating DIRECTORY.md * Fix sphinx/build_docs warnings for audio_filters * Improve * Fix * Fix * Fix --------- Co-authored-by: MaximSmolskiy --- audio_filters/iir_filter.py | 32 +++++++++++++++++++------------- 1 file changed, 19 insertions(+), 13 deletions(-) diff --git a/audio_filters/iir_filter.py b/audio_filters/iir_filter.py index f3c1ad43b001..fa3e6c54b33f 100644 --- a/audio_filters/iir_filter.py +++ b/audio_filters/iir_filter.py @@ -10,13 +10,17 @@ class IIRFilter: Implementation details: Based on the 2nd-order function from - https://en.wikipedia.org/wiki/Digital_biquad_filter, + https://en.wikipedia.org/wiki/Digital_biquad_filter, this generalized N-order function was made. Using the following transfer function - H(z)=\frac{b_{0}+b_{1}z^{-1}+b_{2}z^{-2}+...+b_{k}z^{-k}}{a_{0}+a_{1}z^{-1}+a_{2}z^{-2}+...+a_{k}z^{-k}} + .. math:: H(z)=\frac{b_{0}+b_{1}z^{-1}+b_{2}z^{-2}+...+b_{k}z^{-k}} + {a_{0}+a_{1}z^{-1}+a_{2}z^{-2}+...+a_{k}z^{-k}} + we can rewrite this to - y[n]={\frac{1}{a_{0}}}\left(\left(b_{0}x[n]+b_{1}x[n-1]+b_{2}x[n-2]+...+b_{k}x[n-k]\right)-\left(a_{1}y[n-1]+a_{2}y[n-2]+...+a_{k}y[n-k]\right)\right) + .. math:: y[n]={\frac{1}{a_{0}}} + \left(\left(b_{0}x[n]+b_{1}x[n-1]+b_{2}x[n-2]+...+b_{k}x[n-k]\right)- + \left(a_{1}y[n-1]+a_{2}y[n-2]+...+a_{k}y[n-k]\right)\right) """ def __init__(self, order: int) -> None: @@ -34,17 +38,19 @@ def __init__(self, order: int) -> None: def set_coefficients(self, a_coeffs: list[float], b_coeffs: list[float]) -> None: """ - Set the coefficients for the IIR filter. These should both be of size order + 1. - a_0 may be left out, and it will use 1.0 as default value. + Set the coefficients for the IIR filter. + These should both be of size `order` + 1. + :math:`a_0` may be left out, and it will use 1.0 as default value. This method works well with scipy's filter design functions - >>> # Make a 2nd-order 1000Hz butterworth lowpass filter - >>> import scipy.signal - >>> b_coeffs, a_coeffs = scipy.signal.butter(2, 1000, - ... btype='lowpass', - ... fs=48000) - >>> filt = IIRFilter(2) - >>> filt.set_coefficients(a_coeffs, b_coeffs) + + >>> # Make a 2nd-order 1000Hz butterworth lowpass filter + >>> import scipy.signal + >>> b_coeffs, a_coeffs = scipy.signal.butter(2, 1000, + ... btype='lowpass', + ... fs=48000) + >>> filt = IIRFilter(2) + >>> filt.set_coefficients(a_coeffs, b_coeffs) """ if len(a_coeffs) < self.order: a_coeffs = [1.0, *a_coeffs] @@ -68,7 +74,7 @@ def set_coefficients(self, a_coeffs: list[float], b_coeffs: list[float]) -> None def process(self, sample: float) -> float: """ - Calculate y[n] + Calculate :math:`y[n]` >>> filt = IIRFilter(2) >>> filt.process(0) From 47cd21a110d8e2fc038414bc7f3c7ca8e91d6653 Mon Sep 17 00:00:00 2001 From: Maxim Smolskiy Date: Mon, 23 Dec 2024 14:56:42 +0300 Subject: [PATCH 1462/1543] Fix sphinx/build_docs warnings for cellular_automata (#12454) * updating DIRECTORY.md * Fix sphinx/build_docs warnings for cellular_automata * Fix * Improve --------- Co-authored-by: MaximSmolskiy --- cellular_automata/wa_tor.py | 54 ++++++++++++++++++------------------- 1 file changed, 26 insertions(+), 28 deletions(-) diff --git a/cellular_automata/wa_tor.py b/cellular_automata/wa_tor.py index e423d1595bdb..29f7ea510bfe 100644 --- a/cellular_automata/wa_tor.py +++ b/cellular_automata/wa_tor.py @@ -1,9 +1,9 @@ """ Wa-Tor algorithm (1984) -@ https://en.wikipedia.org/wiki/Wa-Tor -@ https://beltoforion.de/en/wator/ -@ https://beltoforion.de/en/wator/images/wator_medium.webm +| @ https://en.wikipedia.org/wiki/Wa-Tor +| @ https://beltoforion.de/en/wator/ +| @ https://beltoforion.de/en/wator/images/wator_medium.webm This solution aims to completely remove any systematic approach to the Wa-Tor planet, and utilise fully random methods. @@ -97,8 +97,8 @@ class WaTor: :attr time_passed: A function that is called every time time passes (a chronon) in order to visually display - the new Wa-Tor planet. The time_passed function can block - using time.sleep to slow the algorithm progression. + the new Wa-Tor planet. The `time_passed` function can block + using ``time.sleep`` to slow the algorithm progression. >>> wt = WaTor(10, 15) >>> wt.width @@ -216,7 +216,7 @@ def get_surrounding_prey(self, entity: Entity) -> list[Entity]: """ Returns all the prey entities around (N, S, E, W) a predator entity. - Subtly different to the try_to_move_to_unoccupied square. + Subtly different to the `move_and_reproduce`. >>> wt = WaTor(WIDTH, HEIGHT) >>> wt.set_planet([ @@ -260,7 +260,7 @@ def move_and_reproduce( """ Attempts to move to an unoccupied neighbouring square in either of the four directions (North, South, East, West). - If the move was successful and the remaining_reproduction time is + If the move was successful and the `remaining_reproduction_time` is equal to 0, then a new prey or predator can also be created in the previous square. @@ -351,12 +351,12 @@ def perform_prey_actions( Performs the actions for a prey entity For prey the rules are: - 1. At each chronon, a prey moves randomly to one of the adjacent unoccupied - squares. If there are no free squares, no movement takes place. - 2. Once a prey has survived a certain number of chronons it may reproduce. - This is done as it moves to a neighbouring square, - leaving behind a new prey in its old position. - Its reproduction time is also reset to zero. + 1. At each chronon, a prey moves randomly to one of the adjacent unoccupied + squares. If there are no free squares, no movement takes place. + 2. Once a prey has survived a certain number of chronons it may reproduce. + This is done as it moves to a neighbouring square, + leaving behind a new prey in its old position. + Its reproduction time is also reset to zero. >>> wt = WaTor(WIDTH, HEIGHT) >>> reproducable_entity = Entity(True, coords=(0, 1)) @@ -382,15 +382,15 @@ def perform_predator_actions( :param occupied_by_prey_coords: Move to this location if there is prey there For predators the rules are: - 1. At each chronon, a predator moves randomly to an adjacent square occupied - by a prey. If there is none, the predator moves to a random adjacent - unoccupied square. If there are no free squares, no movement takes place. - 2. At each chronon, each predator is deprived of a unit of energy. - 3. Upon reaching zero energy, a predator dies. - 4. If a predator moves to a square occupied by a prey, - it eats the prey and earns a certain amount of energy. - 5. Once a predator has survived a certain number of chronons - it may reproduce in exactly the same way as the prey. + 1. At each chronon, a predator moves randomly to an adjacent square occupied + by a prey. If there is none, the predator moves to a random adjacent + unoccupied square. If there are no free squares, no movement takes place. + 2. At each chronon, each predator is deprived of a unit of energy. + 3. Upon reaching zero energy, a predator dies. + 4. If a predator moves to a square occupied by a prey, + it eats the prey and earns a certain amount of energy. + 5. Once a predator has survived a certain number of chronons + it may reproduce in exactly the same way as the prey. >>> wt = WaTor(WIDTH, HEIGHT) >>> wt.set_planet([[Entity(True, coords=(0, 0)), Entity(False, coords=(0, 1))]]) @@ -430,7 +430,7 @@ def perform_predator_actions( def run(self, *, iteration_count: int) -> None: """ - Emulate time passing by looping iteration_count times + Emulate time passing by looping `iteration_count` times >>> wt = WaTor(WIDTH, HEIGHT) >>> wt.run(iteration_count=PREDATOR_INITIAL_ENERGY_VALUE - 1) @@ -484,11 +484,9 @@ def visualise(wt: WaTor, iter_number: int, *, colour: bool = True) -> None: an ascii code in terminal to clear and re-print the Wa-Tor planet at intervals. - Uses ascii colour codes to colourfully display - the predators and prey. - - (0x60f197) Prey = # - (0xfffff) Predator = x + Uses ascii colour codes to colourfully display the predators and prey: + * (0x60f197) Prey = ``#`` + * (0xfffff) Predator = ``x`` >>> wt = WaTor(30, 30) >>> wt.set_planet([ From c5e603ae4234e5d516d700b01d47f78d42c18008 Mon Sep 17 00:00:00 2001 From: Maxim Smolskiy Date: Mon, 23 Dec 2024 15:43:16 +0300 Subject: [PATCH 1463/1543] Fix sphinx/build_docs warnings for geodesy (#12462) * updating DIRECTORY.md * Fix sphinx/build_docs warnings for geodesy/haversine_distance.py * Improve --------- Co-authored-by: MaximSmolskiy --- geodesy/haversine_distance.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/geodesy/haversine_distance.py b/geodesy/haversine_distance.py index 93e625770f9d..39cd250af965 100644 --- a/geodesy/haversine_distance.py +++ b/geodesy/haversine_distance.py @@ -21,10 +21,11 @@ def haversine_distance(lat1: float, lon1: float, lat2: float, lon2: float) -> fl computation like Haversine can be handy for shorter range distances. Args: - lat1, lon1: latitude and longitude of coordinate 1 - lat2, lon2: latitude and longitude of coordinate 2 + * `lat1`, `lon1`: latitude and longitude of coordinate 1 + * `lat2`, `lon2`: latitude and longitude of coordinate 2 Returns: geographical distance between two points in metres + >>> from collections import namedtuple >>> point_2d = namedtuple("point_2d", "lat lon") >>> SAN_FRANCISCO = point_2d(37.774856, -122.424227) From b0cb13fea54854b3a60eced27026db9a9c5dc5ab Mon Sep 17 00:00:00 2001 From: Maxim Smolskiy Date: Mon, 23 Dec 2024 16:11:58 +0300 Subject: [PATCH 1464/1543] Fix sphinx/build_docs warnings for greedy_methods (#12463) * updating DIRECTORY.md * Fix sphinx/build_docs warnings for greedy_methods * Improve --------- Co-authored-by: MaximSmolskiy --- greedy_methods/smallest_range.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/greedy_methods/smallest_range.py b/greedy_methods/smallest_range.py index e2b7f8d7e96a..9adb12bf9029 100644 --- a/greedy_methods/smallest_range.py +++ b/greedy_methods/smallest_range.py @@ -14,12 +14,13 @@ def smallest_range(nums: list[list[int]]) -> list[int]: Uses min heap for efficiency. The range includes at least one number from each list. Args: - nums: List of k sorted integer lists. + `nums`: List of k sorted integer lists. Returns: list: Smallest range as a two-element list. Examples: + >>> smallest_range([[4, 10, 15, 24, 26], [0, 9, 12, 20], [5, 18, 22, 30]]) [20, 24] >>> smallest_range([[1, 2, 3], [1, 2, 3], [1, 2, 3]]) From 04fbfd6eae38b9897c1b8ff6aee487dd2523665b Mon Sep 17 00:00:00 2001 From: Maxim Smolskiy Date: Tue, 24 Dec 2024 03:14:11 +0300 Subject: [PATCH 1465/1543] Fix sphinx/build_docs warnings for maths/volume (#12464) * Fix sphinx/build_docs warnings for maths/volume * Fix * Fix * Fix * Fix * Fix * Fix * Fix --- maths/volume.py | 149 ++++++++++++++++++++++++++++++------------------ 1 file changed, 95 insertions(+), 54 deletions(-) diff --git a/maths/volume.py b/maths/volume.py index 23fcf6be6ef1..08bdf72b013b 100644 --- a/maths/volume.py +++ b/maths/volume.py @@ -1,5 +1,6 @@ """ Find the volume of various shapes. + * https://en.wikipedia.org/wiki/Volume * https://en.wikipedia.org/wiki/Spherical_cap """ @@ -12,6 +13,7 @@ def vol_cube(side_length: float) -> float: """ Calculate the Volume of a Cube. + >>> vol_cube(1) 1.0 >>> vol_cube(3) @@ -33,6 +35,7 @@ def vol_cube(side_length: float) -> float: def vol_spherical_cap(height: float, radius: float) -> float: """ Calculate the volume of the spherical cap. + >>> vol_spherical_cap(1, 2) 5.235987755982988 >>> vol_spherical_cap(1.6, 2.6) @@ -57,20 +60,29 @@ def vol_spherical_cap(height: float, radius: float) -> float: def vol_spheres_intersect( radius_1: float, radius_2: float, centers_distance: float ) -> float: - """ + r""" Calculate the volume of the intersection of two spheres. + The intersection is composed by two spherical caps and therefore its volume is the - sum of the volumes of the spherical caps. First, it calculates the heights (h1, h2) - of the spherical caps, then the two volumes and it returns the sum. + sum of the volumes of the spherical caps. + First, it calculates the heights :math:`(h_1, h_2)` of the spherical caps, + then the two volumes and it returns the sum. The height formulas are - h1 = (radius_1 - radius_2 + centers_distance) - * (radius_1 + radius_2 - centers_distance) - / (2 * centers_distance) - h2 = (radius_2 - radius_1 + centers_distance) - * (radius_2 + radius_1 - centers_distance) - / (2 * centers_distance) - if centers_distance is 0 then it returns the volume of the smallers sphere - :return vol_spherical_cap(h1, radius_2) + vol_spherical_cap(h2, radius_1) + + .. math:: + h_1 = \frac{(radius_1 - radius_2 + centers\_distance) + \cdot (radius_1 + radius_2 - centers\_distance)} + {2 \cdot centers\_distance} + + h_2 = \frac{(radius_2 - radius_1 + centers\_distance) + \cdot (radius_2 + radius_1 - centers\_distance)} + {2 \cdot centers\_distance} + + if `centers_distance` is 0 then it returns the volume of the smallers sphere + + :return: ``vol_spherical_cap`` (:math:`h_1`, :math:`radius_2`) + + ``vol_spherical_cap`` (:math:`h_2`, :math:`radius_1`) + >>> vol_spheres_intersect(2, 2, 1) 21.205750411731103 >>> vol_spheres_intersect(2.6, 2.6, 1.6) @@ -112,14 +124,18 @@ def vol_spheres_intersect( def vol_spheres_union( radius_1: float, radius_2: float, centers_distance: float ) -> float: - """ + r""" Calculate the volume of the union of two spheres that possibly intersect. - It is the sum of sphere A and sphere B minus their intersection. - First, it calculates the volumes (v1, v2) of the spheres, - then the volume of the intersection (i) and it returns the sum v1+v2-i. - If centers_distance is 0 then it returns the volume of the larger sphere - :return vol_sphere(radius_1) + vol_sphere(radius_2) - - vol_spheres_intersect(radius_1, radius_2, centers_distance) + + It is the sum of sphere :math:`A` and sphere :math:`B` minus their intersection. + First, it calculates the volumes :math:`(v_1, v_2)` of the spheres, + then the volume of the intersection :math:`i` and + it returns the sum :math:`v_1 + v_2 - i`. + If `centers_distance` is 0 then it returns the volume of the larger sphere + + :return: ``vol_sphere`` (:math:`radius_1`) + ``vol_sphere`` (:math:`radius_2`) + - ``vol_spheres_intersect`` + (:math:`radius_1`, :math:`radius_2`, :math:`centers\_distance`) >>> vol_spheres_union(2, 2, 1) 45.814892864851146 @@ -157,7 +173,9 @@ def vol_spheres_union( def vol_cuboid(width: float, height: float, length: float) -> float: """ Calculate the Volume of a Cuboid. - :return multiple of width, length and height + + :return: multiple of `width`, `length` and `height` + >>> vol_cuboid(1, 1, 1) 1.0 >>> vol_cuboid(1, 2, 3) @@ -185,10 +203,12 @@ def vol_cuboid(width: float, height: float, length: float) -> float: def vol_cone(area_of_base: float, height: float) -> float: - """ - Calculate the Volume of a Cone. - Wikipedia reference: https://en.wikipedia.org/wiki/Cone - :return (1/3) * area_of_base * height + r""" + | Calculate the Volume of a Cone. + | Wikipedia reference: https://en.wikipedia.org/wiki/Cone + + :return: :math:`\frac{1}{3} \cdot area\_of\_base \cdot height` + >>> vol_cone(10, 3) 10.0 >>> vol_cone(1, 1) @@ -212,10 +232,12 @@ def vol_cone(area_of_base: float, height: float) -> float: def vol_right_circ_cone(radius: float, height: float) -> float: - """ - Calculate the Volume of a Right Circular Cone. - Wikipedia reference: https://en.wikipedia.org/wiki/Cone - :return (1/3) * pi * radius^2 * height + r""" + | Calculate the Volume of a Right Circular Cone. + | Wikipedia reference: https://en.wikipedia.org/wiki/Cone + + :return: :math:`\frac{1}{3} \cdot \pi \cdot radius^2 \cdot height` + >>> vol_right_circ_cone(2, 3) 12.566370614359172 >>> vol_right_circ_cone(0, 0) @@ -237,10 +259,12 @@ def vol_right_circ_cone(radius: float, height: float) -> float: def vol_prism(area_of_base: float, height: float) -> float: - """ - Calculate the Volume of a Prism. - Wikipedia reference: https://en.wikipedia.org/wiki/Prism_(geometry) - :return V = Bh + r""" + | Calculate the Volume of a Prism. + | Wikipedia reference: https://en.wikipedia.org/wiki/Prism_(geometry) + + :return: :math:`V = B \cdot h` + >>> vol_prism(10, 2) 20.0 >>> vol_prism(11, 1) @@ -264,10 +288,12 @@ def vol_prism(area_of_base: float, height: float) -> float: def vol_pyramid(area_of_base: float, height: float) -> float: - """ - Calculate the Volume of a Pyramid. - Wikipedia reference: https://en.wikipedia.org/wiki/Pyramid_(geometry) - :return (1/3) * Bh + r""" + | Calculate the Volume of a Pyramid. + | Wikipedia reference: https://en.wikipedia.org/wiki/Pyramid_(geometry) + + :return: :math:`\frac{1}{3} \cdot B \cdot h` + >>> vol_pyramid(10, 3) 10.0 >>> vol_pyramid(1.5, 3) @@ -291,10 +317,12 @@ def vol_pyramid(area_of_base: float, height: float) -> float: def vol_sphere(radius: float) -> float: - """ - Calculate the Volume of a Sphere. - Wikipedia reference: https://en.wikipedia.org/wiki/Sphere - :return (4/3) * pi * r^3 + r""" + | Calculate the Volume of a Sphere. + | Wikipedia reference: https://en.wikipedia.org/wiki/Sphere + + :return: :math:`\frac{4}{3} \cdot \pi \cdot r^3` + >>> vol_sphere(5) 523.5987755982989 >>> vol_sphere(1) @@ -315,10 +343,13 @@ def vol_sphere(radius: float) -> float: def vol_hemisphere(radius: float) -> float: - """Calculate the volume of a hemisphere - Wikipedia reference: https://en.wikipedia.org/wiki/Hemisphere - Other references: https://www.cuemath.com/geometry/hemisphere - :return 2/3 * pi * radius^3 + r""" + | Calculate the volume of a hemisphere + | Wikipedia reference: https://en.wikipedia.org/wiki/Hemisphere + | Other references: https://www.cuemath.com/geometry/hemisphere + + :return: :math:`\frac{2}{3} \cdot \pi \cdot radius^3` + >>> vol_hemisphere(1) 2.0943951023931953 >>> vol_hemisphere(7) @@ -339,9 +370,12 @@ def vol_hemisphere(radius: float) -> float: def vol_circular_cylinder(radius: float, height: float) -> float: - """Calculate the Volume of a Circular Cylinder. - Wikipedia reference: https://en.wikipedia.org/wiki/Cylinder - :return pi * radius^2 * height + r""" + | Calculate the Volume of a Circular Cylinder. + | Wikipedia reference: https://en.wikipedia.org/wiki/Cylinder + + :return: :math:`\pi \cdot radius^2 \cdot height` + >>> vol_circular_cylinder(1, 1) 3.141592653589793 >>> vol_circular_cylinder(4, 3) @@ -368,7 +402,9 @@ def vol_circular_cylinder(radius: float, height: float) -> float: def vol_hollow_circular_cylinder( inner_radius: float, outer_radius: float, height: float ) -> float: - """Calculate the Volume of a Hollow Circular Cylinder. + """ + Calculate the Volume of a Hollow Circular Cylinder. + >>> vol_hollow_circular_cylinder(1, 2, 3) 28.274333882308138 >>> vol_hollow_circular_cylinder(1.6, 2.6, 3.6) @@ -405,8 +441,9 @@ def vol_hollow_circular_cylinder( def vol_conical_frustum(height: float, radius_1: float, radius_2: float) -> float: - """Calculate the Volume of a Conical Frustum. - Wikipedia reference: https://en.wikipedia.org/wiki/Frustum + """ + | Calculate the Volume of a Conical Frustum. + | Wikipedia reference: https://en.wikipedia.org/wiki/Frustum >>> vol_conical_frustum(45, 7, 28) 48490.482608158454 @@ -443,9 +480,12 @@ def vol_conical_frustum(height: float, radius_1: float, radius_2: float) -> floa def vol_torus(torus_radius: float, tube_radius: float) -> float: - """Calculate the Volume of a Torus. - Wikipedia reference: https://en.wikipedia.org/wiki/Torus - :return 2pi^2 * torus_radius * tube_radius^2 + r""" + | Calculate the Volume of a Torus. + | Wikipedia reference: https://en.wikipedia.org/wiki/Torus + + :return: :math:`2 \pi^2 \cdot torus\_radius \cdot tube\_radius^2` + >>> vol_torus(1, 1) 19.739208802178716 >>> vol_torus(4, 3) @@ -471,8 +511,9 @@ def vol_torus(torus_radius: float, tube_radius: float) -> float: def vol_icosahedron(tri_side: float) -> float: - """Calculate the Volume of an Icosahedron. - Wikipedia reference: https://en.wikipedia.org/wiki/Regular_icosahedron + """ + | Calculate the Volume of an Icosahedron. + | Wikipedia reference: https://en.wikipedia.org/wiki/Regular_icosahedron >>> from math import isclose >>> isclose(vol_icosahedron(2.5), 34.088984228514256) From e9721aad59743d01e82582017884db528bad3e21 Mon Sep 17 00:00:00 2001 From: Maxim Smolskiy Date: Tue, 24 Dec 2024 06:06:59 +0300 Subject: [PATCH 1466/1543] Fix sphinx/build_docs warnings for physics/horizontal_projectile_motion (#12467) --- physics/horizontal_projectile_motion.py | 68 +++++++++++++++---------- 1 file changed, 41 insertions(+), 27 deletions(-) diff --git a/physics/horizontal_projectile_motion.py b/physics/horizontal_projectile_motion.py index 80f85a1b7146..60f21c2b39c4 100644 --- a/physics/horizontal_projectile_motion.py +++ b/physics/horizontal_projectile_motion.py @@ -1,15 +1,18 @@ """ Horizontal Projectile Motion problem in physics. + This algorithm solves a specific problem in which -the motion starts from the ground as can be seen below: - (v = 0) - * * - * * - * * - * * - * * - * * -GROUND GROUND +the motion starts from the ground as can be seen below:: + + (v = 0) + * * + * * + * * + * * + * * + * * + GROUND GROUND + For more info: https://en.wikipedia.org/wiki/Projectile_motion """ @@ -43,14 +46,17 @@ def check_args(init_velocity: float, angle: float) -> None: def horizontal_distance(init_velocity: float, angle: float) -> float: - """ + r""" Returns the horizontal distance that the object cover + Formula: - v_0^2 * sin(2 * alpha) - --------------------- - g - v_0 - initial velocity - alpha - angle + .. math:: + \frac{v_0^2 \cdot \sin(2 \alpha)}{g} + + v_0 - \text{initial velocity} + + \alpha - \text{angle} + >>> horizontal_distance(30, 45) 91.77 >>> horizontal_distance(100, 78) @@ -70,14 +76,17 @@ def horizontal_distance(init_velocity: float, angle: float) -> float: def max_height(init_velocity: float, angle: float) -> float: - """ + r""" Returns the maximum height that the object reach + Formula: - v_0^2 * sin^2(alpha) - -------------------- - 2g - v_0 - initial velocity - alpha - angle + .. math:: + \frac{v_0^2 \cdot \sin^2 (\alpha)}{2 g} + + v_0 - \text{initial velocity} + + \alpha - \text{angle} + >>> max_height(30, 45) 22.94 >>> max_height(100, 78) @@ -97,14 +106,17 @@ def max_height(init_velocity: float, angle: float) -> float: def total_time(init_velocity: float, angle: float) -> float: - """ + r""" Returns total time of the motion + Formula: - 2 * v_0 * sin(alpha) - -------------------- - g - v_0 - initial velocity - alpha - angle + .. math:: + \frac{2 v_0 \cdot \sin (\alpha)}{g} + + v_0 - \text{initial velocity} + + \alpha - \text{angle} + >>> total_time(30, 45) 4.33 >>> total_time(100, 78) @@ -125,6 +137,8 @@ def total_time(init_velocity: float, angle: float) -> float: def test_motion() -> None: """ + Test motion + >>> test_motion() """ v0, angle = 25, 20 From c36aaf0fbcbc0f1a6c82b689ee87e383104b9e96 Mon Sep 17 00:00:00 2001 From: Maxim Smolskiy Date: Tue, 24 Dec 2024 11:48:37 +0300 Subject: [PATCH 1467/1543] Fix sphinx/build_docs warnings for graphs/check_bipatrite (#12469) * Fix sphinx/build_docs warnings for graphs/check_bipatrite * Fix --- graphs/check_bipatrite.py | 24 ++++++++++++++---------- 1 file changed, 14 insertions(+), 10 deletions(-) diff --git a/graphs/check_bipatrite.py b/graphs/check_bipatrite.py index 10b9cc965251..213f3f9480b5 100644 --- a/graphs/check_bipatrite.py +++ b/graphs/check_bipatrite.py @@ -6,16 +6,17 @@ def is_bipartite_dfs(graph: defaultdict[int, list[int]]) -> bool: Check if a graph is bipartite using depth-first search (DFS). Args: - graph: Adjacency list representing the graph. + `graph`: Adjacency list representing the graph. Returns: - True if bipartite, False otherwise. + ``True`` if bipartite, ``False`` otherwise. Checks if the graph can be divided into two sets of vertices, such that no two vertices within the same set are connected by an edge. Examples: - # FIXME: This test should pass. + + >>> # FIXME: This test should pass. >>> is_bipartite_dfs(defaultdict(list, {0: [1, 2], 1: [0, 3], 2: [0, 4]})) Traceback (most recent call last): ... @@ -37,7 +38,7 @@ def is_bipartite_dfs(graph: defaultdict[int, list[int]]) -> bool: ... KeyError: 0 - # FIXME: This test should fails with KeyError: 4. + >>> # FIXME: This test should fails with KeyError: 4. >>> is_bipartite_dfs({0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 9: [0]}) False >>> is_bipartite_dfs({0: [-1, 3], 1: [0, -2]}) @@ -51,7 +52,8 @@ def is_bipartite_dfs(graph: defaultdict[int, list[int]]) -> bool: ... KeyError: 0 - # FIXME: This test should fails with TypeError: list indices must be integers or... + >>> # FIXME: This test should fails with + >>> # TypeError: list indices must be integers or... >>> is_bipartite_dfs({0: [1.0, 3.0], 1.0: [0, 2.0], 2.0: [1.0, 3.0], 3.0: [0, 2.0]}) True >>> is_bipartite_dfs({"a": [1, 3], "b": [0, 2], "c": [1, 3], "d": [0, 2]}) @@ -95,16 +97,17 @@ def is_bipartite_bfs(graph: defaultdict[int, list[int]]) -> bool: Check if a graph is bipartite using a breadth-first search (BFS). Args: - graph: Adjacency list representing the graph. + `graph`: Adjacency list representing the graph. Returns: - True if bipartite, False otherwise. + ``True`` if bipartite, ``False`` otherwise. Check if the graph can be divided into two sets of vertices, such that no two vertices within the same set are connected by an edge. Examples: - # FIXME: This test should pass. + + >>> # FIXME: This test should pass. >>> is_bipartite_bfs(defaultdict(list, {0: [1, 2], 1: [0, 3], 2: [0, 4]})) Traceback (most recent call last): ... @@ -126,7 +129,7 @@ def is_bipartite_bfs(graph: defaultdict[int, list[int]]) -> bool: ... KeyError: 0 - # FIXME: This test should fails with KeyError: 4. + >>> # FIXME: This test should fails with KeyError: 4. >>> is_bipartite_bfs({0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 9: [0]}) False >>> is_bipartite_bfs({0: [-1, 3], 1: [0, -2]}) @@ -140,7 +143,8 @@ def is_bipartite_bfs(graph: defaultdict[int, list[int]]) -> bool: ... KeyError: 0 - # FIXME: This test should fails with TypeError: list indices must be integers or... + >>> # FIXME: This test should fails with + >>> # TypeError: list indices must be integers or... >>> is_bipartite_bfs({0: [1.0, 3.0], 1.0: [0, 2.0], 2.0: [1.0, 3.0], 3.0: [0, 2.0]}) True >>> is_bipartite_bfs({"a": [1, 3], "b": [0, 2], "c": [1, 3], "d": [0, 2]}) From ae28fa7fe362c8cb0238dbb6b237d42179e8beb3 Mon Sep 17 00:00:00 2001 From: Maxim Smolskiy Date: Tue, 24 Dec 2024 16:17:22 +0300 Subject: [PATCH 1468/1543] Fix sphinx/build_docs warnings for data_structures/binary_tree/mirror_binary_tree (#12470) --- .../binary_tree/mirror_binary_tree.py | 52 +++++++++++-------- 1 file changed, 29 insertions(+), 23 deletions(-) diff --git a/data_structures/binary_tree/mirror_binary_tree.py b/data_structures/binary_tree/mirror_binary_tree.py index 62e2f08dd4e0..f6611d66d676 100644 --- a/data_structures/binary_tree/mirror_binary_tree.py +++ b/data_structures/binary_tree/mirror_binary_tree.py @@ -56,6 +56,8 @@ def mirror(self) -> Node: def make_tree_seven() -> Node: r""" Return a binary tree with 7 nodes that looks like this: + :: + 1 / \ 2 3 @@ -81,13 +83,15 @@ def make_tree_seven() -> Node: def make_tree_nine() -> Node: r""" Return a binary tree with 9 nodes that looks like this: - 1 - / \ - 2 3 - / \ \ - 4 5 6 - / \ \ - 7 8 9 + :: + + 1 + / \ + 2 3 + / \ \ + 4 5 6 + / \ \ + 7 8 9 >>> tree_nine = make_tree_nine() >>> len(tree_nine) @@ -117,23 +121,25 @@ def main() -> None: >>> tuple(tree.mirror()) (6, 3, 1, 9, 5, 2, 8, 4, 7) - nine_tree: - 1 - / \ - 2 3 - / \ \ - 4 5 6 - / \ \ - 7 8 9 - - The mirrored tree looks like this: + nine_tree:: + + 1 + / \ + 2 3 + / \ \ + 4 5 6 + / \ \ + 7 8 9 + + The mirrored tree looks like this:: + 1 - / \ - 3 2 - / / \ - 6 5 4 - / / \ - 9 8 7 + / \ + 3 2 + / / \ + 6 5 4 + / / \ + 9 8 7 """ trees = {"zero": Node(0), "seven": make_tree_seven(), "nine": make_tree_nine()} for name, tree in trees.items(): From eb652cf3d48fbd3b51450e95640ce5aec63a066b Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sat, 28 Dec 2024 00:18:08 +0300 Subject: [PATCH 1469/1543] Bump astral-sh/setup-uv from 4 to 5 (#12445) Bumps [astral-sh/setup-uv](https://github.com/astral-sh/setup-uv) from 4 to 5. - [Release notes](https://github.com/astral-sh/setup-uv/releases) - [Commits](https://github.com/astral-sh/setup-uv/compare/v4...v5) --- updated-dependencies: - dependency-name: astral-sh/setup-uv dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/build.yml | 2 +- .github/workflows/project_euler.yml | 4 ++-- .github/workflows/ruff.yml | 2 +- .github/workflows/sphinx.yml | 2 +- 4 files changed, 5 insertions(+), 5 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index a6f308715cc2..62829b2b45a5 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -10,7 +10,7 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 - - uses: astral-sh/setup-uv@v4 + - uses: astral-sh/setup-uv@v5 with: enable-cache: true cache-dependency-glob: uv.lock diff --git a/.github/workflows/project_euler.yml b/.github/workflows/project_euler.yml index 84c55335451e..8d51ad8850cf 100644 --- a/.github/workflows/project_euler.yml +++ b/.github/workflows/project_euler.yml @@ -15,7 +15,7 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 - - uses: astral-sh/setup-uv@v4 + - uses: astral-sh/setup-uv@v5 - uses: actions/setup-python@v5 with: python-version: 3.x @@ -25,7 +25,7 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 - - uses: astral-sh/setup-uv@v4 + - uses: astral-sh/setup-uv@v5 - uses: actions/setup-python@v5 with: python-version: 3.x diff --git a/.github/workflows/ruff.yml b/.github/workflows/ruff.yml index 2c6f92fcf7bf..cfe127b3521f 100644 --- a/.github/workflows/ruff.yml +++ b/.github/workflows/ruff.yml @@ -12,5 +12,5 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 - - uses: astral-sh/setup-uv@v4 + - uses: astral-sh/setup-uv@v5 - run: uvx ruff check --output-format=github . diff --git a/.github/workflows/sphinx.yml b/.github/workflows/sphinx.yml index e3e2ce81a95d..d02435d98028 100644 --- a/.github/workflows/sphinx.yml +++ b/.github/workflows/sphinx.yml @@ -26,7 +26,7 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 - - uses: astral-sh/setup-uv@v4 + - uses: astral-sh/setup-uv@v5 - uses: actions/setup-python@v5 with: python-version: 3.13 From 5bef6ac9296c20250db7d494bbbc9c8bf4bfccdc Mon Sep 17 00:00:00 2001 From: Scarfinos <158184182+Scarfinos@users.noreply.github.com> Date: Fri, 27 Dec 2024 23:22:36 +0100 Subject: [PATCH 1470/1543] Improve coverage special_numbers (#12414) * Improve coverage bell_numbers * improve more function * Update hamming_numbers.py --------- Co-authored-by: Maxim Smolskiy --- maths/special_numbers/bell_numbers.py | 4 ++++ maths/special_numbers/hamming_numbers.py | 6 +++++- maths/special_numbers/harshad_numbers.py | 8 ++++++++ 3 files changed, 17 insertions(+), 1 deletion(-) diff --git a/maths/special_numbers/bell_numbers.py b/maths/special_numbers/bell_numbers.py index 5d99334d7add..d573e7a3962d 100644 --- a/maths/special_numbers/bell_numbers.py +++ b/maths/special_numbers/bell_numbers.py @@ -21,6 +21,10 @@ def bell_numbers(max_set_length: int) -> list[int]: list: A list of Bell numbers for sets of lengths from 0 to max_set_length. Examples: + >>> bell_numbers(-2) + Traceback (most recent call last): + ... + ValueError: max_set_length must be non-negative >>> bell_numbers(0) [1] >>> bell_numbers(1) diff --git a/maths/special_numbers/hamming_numbers.py b/maths/special_numbers/hamming_numbers.py index 4575119c8a95..a473cc93883b 100644 --- a/maths/special_numbers/hamming_numbers.py +++ b/maths/special_numbers/hamming_numbers.py @@ -13,6 +13,10 @@ def hamming(n_element: int) -> list: :param n_element: The number of elements on the list :return: The nth element of the list + >>> hamming(-5) + Traceback (most recent call last): + ... + ValueError: n_element should be a positive number >>> hamming(5) [1, 2, 3, 4, 5] >>> hamming(10) @@ -22,7 +26,7 @@ def hamming(n_element: int) -> list: """ n_element = int(n_element) if n_element < 1: - my_error = ValueError("a should be a positive number") + my_error = ValueError("n_element should be a positive number") raise my_error hamming_list = [1] diff --git a/maths/special_numbers/harshad_numbers.py b/maths/special_numbers/harshad_numbers.py index 61667adfa127..417120bd840e 100644 --- a/maths/special_numbers/harshad_numbers.py +++ b/maths/special_numbers/harshad_numbers.py @@ -11,6 +11,8 @@ def int_to_base(number: int, base: int) -> str: Where 'base' ranges from 2 to 36. Examples: + >>> int_to_base(0, 21) + '0' >>> int_to_base(23, 2) '10111' >>> int_to_base(58, 5) @@ -26,6 +28,10 @@ def int_to_base(number: int, base: int) -> str: Traceback (most recent call last): ... ValueError: 'base' must be between 2 and 36 inclusive + >>> int_to_base(-99, 16) + Traceback (most recent call last): + ... + ValueError: number must be a positive integer """ if base < 2 or base > 36: @@ -101,6 +107,8 @@ def harshad_numbers_in_base(limit: int, base: int) -> list[str]: Traceback (most recent call last): ... ValueError: 'base' must be between 2 and 36 inclusive + >>> harshad_numbers_in_base(-12, 6) + [] """ if base < 2 or base > 36: From 8bbe8caa256882ef2ebdbb3274e6f99f804716bd Mon Sep 17 00:00:00 2001 From: Scarfinos <158184182+Scarfinos@users.noreply.github.com> Date: Fri, 27 Dec 2024 23:40:35 +0100 Subject: [PATCH 1471/1543] Improve test coverage for matrix exponentiation (#12388) * #9943 : Adding coverage test for basic_graphs.py * #9943 : Adding coverage test for basic_graphs.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Solve problem of line too long * Improving coverage for matrix_exponentiation.py * fix more than one file * Update matrix_exponentiation.py * Update matrix_exponentiation.py --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Maxim Smolskiy --- maths/matrix_exponentiation.py | 36 ++++++++++++++++++++++++++++++---- 1 file changed, 32 insertions(+), 4 deletions(-) diff --git a/maths/matrix_exponentiation.py b/maths/matrix_exponentiation.py index 7c37151c87ca..7cdac9d34674 100644 --- a/maths/matrix_exponentiation.py +++ b/maths/matrix_exponentiation.py @@ -39,6 +39,21 @@ def modular_exponentiation(a, b): def fibonacci_with_matrix_exponentiation(n, f1, f2): + """ + Returns the nth number of the Fibonacci sequence that + starts with f1 and f2 + Uses the matrix exponentiation + >>> fibonacci_with_matrix_exponentiation(1, 5, 6) + 5 + >>> fibonacci_with_matrix_exponentiation(2, 10, 11) + 11 + >>> fibonacci_with_matrix_exponentiation(13, 0, 1) + 144 + >>> fibonacci_with_matrix_exponentiation(10, 5, 9) + 411 + >>> fibonacci_with_matrix_exponentiation(9, 2, 3) + 89 + """ # Trivial Cases if n == 1: return f1 @@ -50,21 +65,34 @@ def fibonacci_with_matrix_exponentiation(n, f1, f2): def simple_fibonacci(n, f1, f2): + """ + Returns the nth number of the Fibonacci sequence that + starts with f1 and f2 + Uses the definition + >>> simple_fibonacci(1, 5, 6) + 5 + >>> simple_fibonacci(2, 10, 11) + 11 + >>> simple_fibonacci(13, 0, 1) + 144 + >>> simple_fibonacci(10, 5, 9) + 411 + >>> simple_fibonacci(9, 2, 3) + 89 + """ # Trivial Cases if n == 1: return f1 elif n == 2: return f2 - fn_1 = f1 - fn_2 = f2 n -= 2 while n > 0: - fn_1, fn_2 = fn_1 + fn_2, fn_1 + f2, f1 = f1 + f2, f2 n -= 1 - return fn_1 + return f2 def matrix_exponentiation_time(): From 76471819bd5b9df6fe5fde4c763396412ce45edc Mon Sep 17 00:00:00 2001 From: Scarfinos <158184182+Scarfinos@users.noreply.github.com> Date: Fri, 27 Dec 2024 23:52:40 +0100 Subject: [PATCH 1472/1543] Improve test coverage for armstrong numbers (#12327) --- maths/special_numbers/armstrong_numbers.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/maths/special_numbers/armstrong_numbers.py b/maths/special_numbers/armstrong_numbers.py index b2b4010a8f5b..a3cb69b814de 100644 --- a/maths/special_numbers/armstrong_numbers.py +++ b/maths/special_numbers/armstrong_numbers.py @@ -43,9 +43,9 @@ def armstrong_number(n: int) -> bool: def pluperfect_number(n: int) -> bool: """Return True if n is a pluperfect number or False if it is not - >>> all(armstrong_number(n) for n in PASSING) + >>> all(pluperfect_number(n) for n in PASSING) True - >>> any(armstrong_number(n) for n in FAILING) + >>> any(pluperfect_number(n) for n in FAILING) False """ if not isinstance(n, int) or n < 1: @@ -70,9 +70,9 @@ def pluperfect_number(n: int) -> bool: def narcissistic_number(n: int) -> bool: """Return True if n is a narcissistic number or False if it is not. - >>> all(armstrong_number(n) for n in PASSING) + >>> all(narcissistic_number(n) for n in PASSING) True - >>> any(armstrong_number(n) for n in FAILING) + >>> any(narcissistic_number(n) for n in FAILING) False """ if not isinstance(n, int) or n < 1: From 2ae9534fc68b1901d8056331aa2a4dbedc9d947e Mon Sep 17 00:00:00 2001 From: Anamaria Miranda Date: Sat, 28 Dec 2024 00:03:13 +0100 Subject: [PATCH 1473/1543] Added test to linear regression (#12353) --- machine_learning/linear_regression.py | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/machine_learning/linear_regression.py b/machine_learning/linear_regression.py index 839a5366d1cc..1d11e5a9cc2b 100644 --- a/machine_learning/linear_regression.py +++ b/machine_learning/linear_regression.py @@ -41,6 +41,14 @@ def run_steep_gradient_descent(data_x, data_y, len_data, alpha, theta): :param theta : Feature vector (weight's for our model) ;param return : Updated Feature's, using curr_features - alpha_ * gradient(w.r.t. feature) + >>> import numpy as np + >>> data_x = np.array([[1, 2], [3, 4]]) + >>> data_y = np.array([5, 6]) + >>> len_data = len(data_x) + >>> alpha = 0.01 + >>> theta = np.array([0.1, 0.2]) + >>> run_steep_gradient_descent(data_x, data_y, len_data, alpha, theta) + array([0.196, 0.343]) """ n = len_data @@ -58,6 +66,12 @@ def sum_of_square_error(data_x, data_y, len_data, theta): :param len_data : len of the dataset :param theta : contains the feature vector :return : sum of square error computed from given feature's + + Example: + >>> vc_x = np.array([[1.1], [2.1], [3.1]]) + >>> vc_y = np.array([1.2, 2.2, 3.2]) + >>> round(sum_of_square_error(vc_x, vc_y, 3, np.array([1])),3) + np.float64(0.005) """ prod = np.dot(theta, data_x.transpose()) prod -= data_y.transpose() @@ -93,6 +107,11 @@ def mean_absolute_error(predicted_y, original_y): :param predicted_y : contains the output of prediction (result vector) :param original_y : contains values of expected outcome :return : mean absolute error computed from given feature's + + >>> predicted_y = [3, -0.5, 2, 7] + >>> original_y = [2.5, 0.0, 2, 8] + >>> mean_absolute_error(predicted_y, original_y) + 0.5 """ total = sum(abs(y - predicted_y[i]) for i, y in enumerate(original_y)) return total / len(original_y) @@ -114,4 +133,7 @@ def main(): if __name__ == "__main__": + import doctest + + doctest.testmod() main() From 1652d05e9ee25d54eea5576976d537975dcad9bc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Julia=20Arag=C3=A3o?= <101305675+juliaaragao@users.noreply.github.com> Date: Sat, 28 Dec 2024 00:26:29 +0100 Subject: [PATCH 1474/1543] adding test to electronics/electric_power.py (#12387) * test electric_power * Update electric_power.py * Update electric_power.py * Update electric_power.py * Update electric_power.py --------- Co-authored-by: Julia Co-authored-by: Maxim Smolskiy --- electronics/electric_power.py | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/electronics/electric_power.py b/electronics/electric_power.py index 8b92e320ace3..8e3454e39c3f 100644 --- a/electronics/electric_power.py +++ b/electronics/electric_power.py @@ -23,20 +23,22 @@ def electric_power(voltage: float, current: float, power: float) -> tuple: >>> electric_power(voltage=2, current=4, power=2) Traceback (most recent call last): ... - ValueError: Only one argument must be 0 + ValueError: Exactly one argument must be 0 >>> electric_power(voltage=0, current=0, power=2) Traceback (most recent call last): ... - ValueError: Only one argument must be 0 + ValueError: Exactly one argument must be 0 >>> electric_power(voltage=0, current=2, power=-4) Traceback (most recent call last): ... ValueError: Power cannot be negative in any electrical/electronics system >>> electric_power(voltage=2.2, current=2.2, power=0) Result(name='power', value=4.84) + >>> electric_power(current=0, power=6, voltage=2) + Result(name='current', value=3.0) """ if (voltage, current, power).count(0) != 1: - raise ValueError("Only one argument must be 0") + raise ValueError("Exactly one argument must be 0") elif power < 0: raise ValueError( "Power cannot be negative in any electrical/electronics system" @@ -48,7 +50,7 @@ def electric_power(voltage: float, current: float, power: float) -> tuple: elif power == 0: return Result("power", float(round(abs(voltage * current), 2))) else: - raise ValueError("Exactly one argument must be 0") + raise AssertionError if __name__ == "__main__": From 929b7dc057cd56f90b260cd665fb67886bcadeea Mon Sep 17 00:00:00 2001 From: Maxim Smolskiy Date: Sat, 28 Dec 2024 11:43:25 +0300 Subject: [PATCH 1475/1543] Fix Gaussian elimination pivoting (#11393) * updating DIRECTORY.md * Fix Gaussian elimination pivoting * Fix review issues * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: MaximSmolskiy Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .../src/gaussian_elimination_pivoting.py | 39 ++++++++----------- 1 file changed, 16 insertions(+), 23 deletions(-) diff --git a/linear_algebra/src/gaussian_elimination_pivoting.py b/linear_algebra/src/gaussian_elimination_pivoting.py index ecaacce19a31..efc1ddd64a2e 100644 --- a/linear_algebra/src/gaussian_elimination_pivoting.py +++ b/linear_algebra/src/gaussian_elimination_pivoting.py @@ -22,40 +22,33 @@ def solve_linear_system(matrix: np.ndarray) -> np.ndarray: >>> solution = solve_linear_system(np.column_stack((A, B))) >>> np.allclose(solution, np.array([2., 3., -1.])) True - >>> solve_linear_system(np.array([[0, 0], [0, 0]], dtype=float)) - array([nan, nan]) + >>> solve_linear_system(np.array([[0, 0, 0]], dtype=float)) + Traceback (most recent call last): + ... + ValueError: Matrix is not square + >>> solve_linear_system(np.array([[0, 0, 0], [0, 0, 0]], dtype=float)) + Traceback (most recent call last): + ... + ValueError: Matrix is singular """ ab = np.copy(matrix) num_of_rows = ab.shape[0] num_of_columns = ab.shape[1] - 1 x_lst: list[float] = [] - # Lead element search - for column_num in range(num_of_rows): - for i in range(column_num, num_of_columns): - if abs(ab[i][column_num]) > abs(ab[column_num][column_num]): - ab[[column_num, i]] = ab[[i, column_num]] - if ab[column_num, column_num] == 0.0: - raise ValueError("Matrix is not correct") - else: - pass - if column_num != 0: - for i in range(column_num, num_of_rows): - ab[i, :] -= ( - ab[i, column_num - 1] - / ab[column_num - 1, column_num - 1] - * ab[column_num - 1, :] - ) + if num_of_rows != num_of_columns: + raise ValueError("Matrix is not square") - # Upper triangular matrix for column_num in range(num_of_rows): + # Lead element search for i in range(column_num, num_of_columns): if abs(ab[i][column_num]) > abs(ab[column_num][column_num]): ab[[column_num, i]] = ab[[i, column_num]] - if ab[column_num, column_num] == 0.0: - raise ValueError("Matrix is not correct") - else: - pass + + # Upper triangular matrix + if abs(ab[column_num, column_num]) < 1e-8: + raise ValueError("Matrix is singular") + if column_num != 0: for i in range(column_num, num_of_rows): ab[i, :] -= ( From b5c8fbf2e8254b53056b741aacce3842736ba177 Mon Sep 17 00:00:00 2001 From: Joy Khandelwal <116290658+joy-programs@users.noreply.github.com> Date: Sat, 28 Dec 2024 14:21:28 +0530 Subject: [PATCH 1476/1543] Add additional doctests, fix grammatical errors for maths/perfect_number.py (#12477) * Add additional doctests for the perfect number algorithm and fix grammatical errors. Contributes to #9943 * Added newline at End of file * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Apply suggestions from code review --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Tianyi Zheng --- maths/perfect_number.py | 19 ++++++++++++++----- 1 file changed, 14 insertions(+), 5 deletions(-) diff --git a/maths/perfect_number.py b/maths/perfect_number.py index df6b6e3d91d8..52c816cc7895 100644 --- a/maths/perfect_number.py +++ b/maths/perfect_number.py @@ -46,17 +46,27 @@ def perfect(number: int) -> bool: False >>> perfect(-1) False + >>> perfect(33550336) # Large perfect number + True + >>> perfect(33550337) # Just above a large perfect number + False + >>> perfect(1) # Edge case: 1 is not a perfect number + False + >>> perfect("123") # String representation of a number + Traceback (most recent call last): + ... + ValueError: number must be an integer >>> perfect(12.34) Traceback (most recent call last): ... - ValueError: number must an integer + ValueError: number must be an integer >>> perfect("Hello") Traceback (most recent call last): ... - ValueError: number must an integer + ValueError: number must be an integer """ if not isinstance(number, int): - raise ValueError("number must an integer") + raise ValueError("number must be an integer") if number <= 0: return False return sum(i for i in range(1, number // 2 + 1) if number % i == 0) == number @@ -70,8 +80,7 @@ def perfect(number: int) -> bool: try: number = int(input("Enter a positive integer: ").strip()) except ValueError: - msg = "number must an integer" - print(msg) + msg = "number must be an integer" raise ValueError(msg) print(f"{number} is {'' if perfect(number) else 'not '}a Perfect Number.") From d496d5611fe266b7123b4a1f3efb4bcb7c2f38f2 Mon Sep 17 00:00:00 2001 From: Shi Entong <144505619+setbit123@users.noreply.github.com> Date: Sat, 28 Dec 2024 17:22:07 +0800 Subject: [PATCH 1477/1543] Remove inaccessible URL in computer_vision/README.md (#12383) Remove inaccessible URL. --- computer_vision/README.md | 1 - 1 file changed, 1 deletion(-) diff --git a/computer_vision/README.md b/computer_vision/README.md index 1657128fd25e..61462567b662 100644 --- a/computer_vision/README.md +++ b/computer_vision/README.md @@ -8,4 +8,3 @@ Image processing and computer vision are a little different from each other. Ima While computer vision comes from modelling image processing using the techniques of machine learning, computer vision applies machine learning to recognize patterns for interpretation of images (much like the process of visual reasoning of human vision). * -* From 1909f2272f11ebe7626d2dee78c11a91134e39e7 Mon Sep 17 00:00:00 2001 From: jperezr <122382210+MRJPEREZR@users.noreply.github.com> Date: Sat, 28 Dec 2024 11:03:24 +0100 Subject: [PATCH 1478/1543] adding doctests to maths/trapezoidal_rule.py (#12193) * adding doctests to trapezoidal_rule.py * adding algorithm delta-star transformation * updating DIRECTORY.md * delete file star_delta_transform.py * updating DIRECTORY.md * modified: ../DIRECTORY.md --------- Co-authored-by: MRJPEREZR --- maths/trapezoidal_rule.py | 48 ++++++++++++++++++++++++++++++++++++--- 1 file changed, 45 insertions(+), 3 deletions(-) diff --git a/maths/trapezoidal_rule.py b/maths/trapezoidal_rule.py index 9a4ddc8af66b..0186629ee378 100644 --- a/maths/trapezoidal_rule.py +++ b/maths/trapezoidal_rule.py @@ -5,13 +5,25 @@ method 1: "extended trapezoidal rule" +int(f) = dx/2 * (f1 + 2f2 + ... + fn) """ def method_1(boundary, steps): - # "extended trapezoidal rule" - # int(f) = dx/2 * (f1 + 2f2 + ... + fn) + """ + Apply the extended trapezoidal rule to approximate the integral of function f(x) + over the interval defined by 'boundary' with the number of 'steps'. + + Args: + boundary (list of floats): A list containing the start and end values [a, b]. + steps (int): The number of steps or subintervals. + Returns: + float: Approximation of the integral of f(x) over [a, b]. + Examples: + >>> method_1([0, 1], 10) + 0.3349999999999999 + """ h = (boundary[1] - boundary[0]) / steps a = boundary[0] b = boundary[1] @@ -26,13 +38,40 @@ def method_1(boundary, steps): def make_points(a, b, h): + """ + Generates points between 'a' and 'b' with step size 'h', excluding the end points. + Args: + a (float): Start value + b (float): End value + h (float): Step size + Examples: + >>> list(make_points(0, 10, 2.5)) + [2.5, 5.0, 7.5] + + >>> list(make_points(0, 10, 2)) + [2, 4, 6, 8] + + >>> list(make_points(1, 21, 5)) + [6, 11, 16] + + >>> list(make_points(1, 5, 2)) + [3] + + >>> list(make_points(1, 4, 3)) + [] + """ x = a + h - while x < (b - h): + while x <= (b - h): yield x x = x + h def f(x): # enter your function here + """ + Example: + >>> f(2) + 4 + """ y = (x - 0) * (x - 0) return y @@ -47,4 +86,7 @@ def main(): if __name__ == "__main__": + import doctest + + doctest.testmod() main() From 2b58ab040295fbbf2e463ba8cd77ad935d942968 Mon Sep 17 00:00:00 2001 From: Andrwaa <165920381+Andrwaa@users.noreply.github.com> Date: Sat, 28 Dec 2024 12:17:48 +0100 Subject: [PATCH 1479/1543] compare-method added to Vector class in lib.py (#12448) * compare-method added to Vector class in lib.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Updated lib.py with suggestions * Updated lib.py with suggestions * Updated lib.py with __eq__ method --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- linear_algebra/src/lib.py | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/linear_algebra/src/lib.py b/linear_algebra/src/lib.py index 5af6c62e3ad4..0d6a348475cd 100644 --- a/linear_algebra/src/lib.py +++ b/linear_algebra/src/lib.py @@ -46,7 +46,6 @@ class Vector: change_component(pos: int, value: float): changes specified component euclidean_length(): returns the euclidean length of the vector angle(other: Vector, deg: bool): returns the angle between two vectors - TODO: compare-operator """ def __init__(self, components: Collection[float] | None = None) -> None: @@ -96,6 +95,16 @@ def __sub__(self, other: Vector) -> Vector: else: # error case raise Exception("must have the same size") + def __eq__(self, other: object) -> bool: + """ + performs the comparison between two vectors + """ + if not isinstance(other, Vector): + return NotImplemented + if len(self) != len(other): + return False + return all(self.component(i) == other.component(i) for i in range(len(self))) + @overload def __mul__(self, other: float) -> Vector: ... From 2d68bb50e5f12532b5a0d616305c4f805d2b8ff9 Mon Sep 17 00:00:00 2001 From: KICH Yassine Date: Sun, 29 Dec 2024 12:56:36 +0100 Subject: [PATCH 1480/1543] Fix split function to handle trailing delimiters correctly (#12423) * Fix split function to handle trailing delimiters correctly * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update split.py --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Maxim Smolskiy --- strings/split.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/strings/split.py b/strings/split.py index b62b86d2401f..ed194ec69c2f 100644 --- a/strings/split.py +++ b/strings/split.py @@ -14,6 +14,9 @@ def split(string: str, separator: str = " ") -> list: >>> split("12:43:39",separator = ":") ['12', '43', '39'] + + >>> split(";abbb;;c;", separator=';') + ['', 'abbb', '', 'c', ''] """ split_words = [] @@ -23,7 +26,7 @@ def split(string: str, separator: str = " ") -> list: if char == separator: split_words.append(string[last_index:index]) last_index = index + 1 - elif index + 1 == len(string): + if index + 1 == len(string): split_words.append(string[last_index : index + 1]) return split_words From 972a5c1e432e0a3fa9e990422318269219192a53 Mon Sep 17 00:00:00 2001 From: RajdeepBakolia2004 <144157867+RajdeepBakolia2004@users.noreply.github.com> Date: Sun, 29 Dec 2024 19:05:33 +0530 Subject: [PATCH 1481/1543] fixed the issue in strings/join.py (#12434) * fixed the issue in strings/join.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update join.py * Update join.py --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Maxim Smolskiy --- strings/join.py | 28 ++++++++++++++++++++++++---- 1 file changed, 24 insertions(+), 4 deletions(-) diff --git a/strings/join.py b/strings/join.py index 5c02f65a20ce..cdcc3a1377f4 100644 --- a/strings/join.py +++ b/strings/join.py @@ -24,6 +24,8 @@ def join(separator: str, separated: list[str]) -> str: 'a' >>> join(" ", ["You", "are", "amazing!"]) 'You are amazing!' + >>> join(",", ["", "", ""]) + ',,' This example should raise an exception for non-string elements: @@ -37,15 +39,33 @@ def join(separator: str, separated: list[str]) -> str: 'apple-banana-cherry' """ - joined = "" + # Check that all elements are strings for word_or_phrase in separated: + # If the element is not a string, raise an exception if not isinstance(word_or_phrase, str): raise Exception("join() accepts only strings") + + joined: str = "" + """ + The last element of the list is not followed by the separator. + So, we need to iterate through the list and join each element + with the separator except the last element. + """ + last_index: int = len(separated) - 1 + """ + Iterate through the list and join each element with the separator. + Except the last element, all other elements are followed by the separator. + """ + for word_or_phrase in separated[:last_index]: + # join the element with the separator. joined += word_or_phrase + separator - # Remove the trailing separator - # by stripping it from the result - return joined.strip(separator) + # If the list is not empty, join the last element. + if separated != []: + joined += separated[last_index] + + # Return the joined string. + return joined if __name__ == "__main__": From d9092d88dd8b47323d14f87025195e0c76fe7889 Mon Sep 17 00:00:00 2001 From: Sankalpa Sarkar <137193167+sanks011@users.noreply.github.com> Date: Sun, 29 Dec 2024 19:23:31 +0530 Subject: [PATCH 1482/1543] fixes requirements error (#12438) * fixes join.py action * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fixes split.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fixed two requirements * Custom Implementation of join.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * updated join.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update split.py * Update join.py * Update join.py --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Maxim Smolskiy --- requirements.txt | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/requirements.txt b/requirements.txt index 4cc83f44987d..b104505e01bc 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,5 +1,5 @@ beautifulsoup4 -fake_useragent +fake-useragent imageio keras lxml @@ -11,7 +11,7 @@ pillow requests rich scikit-learn -sphinx_pyproject +sphinx-pyproject statsmodels sympy tweepy From bfc804a41c6fb7f3c2e371b15d50ba4830bab3a7 Mon Sep 17 00:00:00 2001 From: Maxim Smolskiy Date: Sun, 29 Dec 2024 18:41:28 +0300 Subject: [PATCH 1483/1543] Fix sphinx/build_docs warnings for physics/newtons_second_law_of_motion (#12480) * Fix sphinx/build_docs warnings for physics/newtons_second_law_of_motion * Fix * Fix * Fix review issue --- physics/newtons_second_law_of_motion.py | 83 ++++++++++++++----------- 1 file changed, 47 insertions(+), 36 deletions(-) diff --git a/physics/newtons_second_law_of_motion.py b/physics/newtons_second_law_of_motion.py index 53fab6ce78b9..4149e2494f31 100644 --- a/physics/newtons_second_law_of_motion.py +++ b/physics/newtons_second_law_of_motion.py @@ -1,18 +1,22 @@ -""" -Description : -Newton's second law of motion pertains to the behavior of objects for which -all existing forces are not balanced. -The second law states that the acceleration of an object is dependent upon two variables -- the net force acting upon the object and the mass of the object. -The acceleration of an object depends directly -upon the net force acting upon the object, -and inversely upon the mass of the object. -As the force acting upon an object is increased, -the acceleration of the object is increased. -As the mass of an object is increased, the acceleration of the object is decreased. +r""" +Description: + Newton's second law of motion pertains to the behavior of objects for which + all existing forces are not balanced. + The second law states that the acceleration of an object is dependent upon + two variables - the net force acting upon the object and the mass of the object. + The acceleration of an object depends directly + upon the net force acting upon the object, + and inversely upon the mass of the object. + As the force acting upon an object is increased, + the acceleration of the object is increased. + As the mass of an object is increased, the acceleration of the object is decreased. + Source: https://www.physicsclassroom.com/class/newtlaws/Lesson-3/Newton-s-Second-Law -Formulation: Fnet = m • a -Diagrammatic Explanation: + +Formulation: F_net = m • a + +Diagrammatic Explanation:: + Forces are unbalanced | | @@ -26,35 +30,42 @@ / \ / \ / \ - __________________ ____ ________________ - |The acceleration | |The acceleration | - |depends directly | |depends inversely | - |on the net Force | |upon the object's | - |_________________| |mass_______________| -Units: -1 Newton = 1 kg X meters / (seconds^2) + __________________ ____________________ + | The acceleration | | The acceleration | + | depends directly | | depends inversely | + | on the net force | | upon the object's | + | | | mass | + |__________________| |____________________| + +Units: 1 Newton = 1 kg • meters/seconds^2 + How to use? -Inputs: - ___________________________________________________ - |Name | Units | Type | - |-------------|-------------------------|-----------| - |mass | (in kgs) | float | - |-------------|-------------------------|-----------| - |acceleration | (in meters/(seconds^2)) | float | - |_____________|_________________________|___________| - -Output: - ___________________________________________________ - |Name | Units | Type | - |-------------|-------------------------|-----------| - |force | (in Newtons) | float | - |_____________|_________________________|___________| + +Inputs:: + + ______________ _____________________ ___________ + | Name | Units | Type | + |--------------|---------------------|-----------| + | mass | in kgs | float | + |--------------|---------------------|-----------| + | acceleration | in meters/seconds^2 | float | + |______________|_____________________|___________| + +Output:: + + ______________ _______________________ ___________ + | Name | Units | Type | + |--------------|-----------------------|-----------| + | force | in Newtons | float | + |______________|_______________________|___________| """ def newtons_second_law_of_motion(mass: float, acceleration: float) -> float: """ + Calculates force from `mass` and `acceleration` + >>> newtons_second_law_of_motion(10, 10) 100 >>> newtons_second_law_of_motion(2.0, 1) From c93288389d220297f972137293f4565c62131516 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Sun, 29 Dec 2024 18:16:45 +0100 Subject: [PATCH 1484/1543] [pre-commit.ci] pre-commit autoupdate (#12466) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * [pre-commit.ci] pre-commit autoupdate updates: - [github.com/astral-sh/ruff-pre-commit: v0.8.3 → v0.8.4](https://github.com/astral-sh/ruff-pre-commit/compare/v0.8.3...v0.8.4) - [github.com/pre-commit/mirrors-mypy: v1.13.0 → v1.14.0](https://github.com/pre-commit/mirrors-mypy/compare/v1.13.0...v1.14.0) * Update convert_number_to_words.py * Update convert_number_to_words.py --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- .pre-commit-config.yaml | 4 ++-- conversions/convert_number_to_words.py | 8 ++++---- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 0c8108ac55be..71ac72c29b5f 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -16,7 +16,7 @@ repos: - id: auto-walrus - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.8.3 + rev: v0.8.4 hooks: - id: ruff - id: ruff-format @@ -47,7 +47,7 @@ repos: - id: validate-pyproject - repo: https://github.com/pre-commit/mirrors-mypy - rev: v1.13.0 + rev: v1.14.0 hooks: - id: mypy args: diff --git a/conversions/convert_number_to_words.py b/conversions/convert_number_to_words.py index dbab44c72e1f..6aa43738b9fe 100644 --- a/conversions/convert_number_to_words.py +++ b/conversions/convert_number_to_words.py @@ -1,5 +1,5 @@ from enum import Enum -from typing import ClassVar, Literal +from typing import Literal class NumberingSystem(Enum): @@ -54,7 +54,7 @@ def max_value(cls, system: str) -> int: class NumberWords(Enum): - ONES: ClassVar[dict[int, str]] = { + ONES = { # noqa: RUF012 0: "", 1: "one", 2: "two", @@ -67,7 +67,7 @@ class NumberWords(Enum): 9: "nine", } - TEENS: ClassVar[dict[int, str]] = { + TEENS = { # noqa: RUF012 0: "ten", 1: "eleven", 2: "twelve", @@ -80,7 +80,7 @@ class NumberWords(Enum): 9: "nineteen", } - TENS: ClassVar[dict[int, str]] = { + TENS = { # noqa: RUF012 2: "twenty", 3: "thirty", 4: "forty", From bfb0447efb73dd049c6a56331cea36cb1345686b Mon Sep 17 00:00:00 2001 From: Maxim Smolskiy Date: Sun, 29 Dec 2024 20:29:48 +0300 Subject: [PATCH 1485/1543] Fix sphinx/build_docs warnings for maths/zellers_congruence (#12481) * Fix sphinx/build_docs warnings for maths/zellers_congruence * Fix --- maths/zellers_congruence.py | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-) diff --git a/maths/zellers_congruence.py b/maths/zellers_congruence.py index 483fb000f86b..b958ed3b8659 100644 --- a/maths/zellers_congruence.py +++ b/maths/zellers_congruence.py @@ -4,13 +4,14 @@ def zeller(date_input: str) -> str: """ - Zellers Congruence Algorithm - Find the day of the week for nearly any Gregorian or Julian calendar date + | Zellers Congruence Algorithm + | Find the day of the week for nearly any Gregorian or Julian calendar date >>> zeller('01-31-2010') 'Your date 01-31-2010, is a Sunday!' - Validate out of range month + Validate out of range month: + >>> zeller('13-31-2010') Traceback (most recent call last): ... @@ -21,6 +22,7 @@ def zeller(date_input: str) -> str: ValueError: invalid literal for int() with base 10: '.2' Validate out of range date: + >>> zeller('01-33-2010') Traceback (most recent call last): ... @@ -31,30 +33,35 @@ def zeller(date_input: str) -> str: ValueError: invalid literal for int() with base 10: '.4' Validate second separator: + >>> zeller('01-31*2010') Traceback (most recent call last): ... ValueError: Date separator must be '-' or '/' Validate first separator: + >>> zeller('01^31-2010') Traceback (most recent call last): ... ValueError: Date separator must be '-' or '/' Validate out of range year: + >>> zeller('01-31-8999') Traceback (most recent call last): ... ValueError: Year out of range. There has to be some sort of limit...right? Test null input: + >>> zeller() Traceback (most recent call last): ... TypeError: zeller() missing 1 required positional argument: 'date_input' - Test length of date_input: + Test length of `date_input`: + >>> zeller('') Traceback (most recent call last): ... From ce036db2131626b86b94ab87854c82a9bc6c3d0e Mon Sep 17 00:00:00 2001 From: Maxim Smolskiy Date: Sun, 29 Dec 2024 23:01:15 +0300 Subject: [PATCH 1486/1543] Fix sphinx/build_docs warnings for physics/speeds_of_gas_molecules (#12471) * Fix sphinx/build_docs warnings for physics/speeds_of_gas_molecules * Fix * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fix * Fix review issue * Fix * Fix * Fix --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- physics/speeds_of_gas_molecules.py | 36 ++++++++++++++++-------------- 1 file changed, 19 insertions(+), 17 deletions(-) diff --git a/physics/speeds_of_gas_molecules.py b/physics/speeds_of_gas_molecules.py index a50d1c0f6d76..42f90a9fd6f3 100644 --- a/physics/speeds_of_gas_molecules.py +++ b/physics/speeds_of_gas_molecules.py @@ -4,43 +4,43 @@ distribution is a probability distribution that describes the distribution of speeds of particles in an ideal gas. -The distribution is given by the following equation: +The distribution is given by the following equation:: ------------------------------------------------- | f(v) = (M/2πRT)^(3/2) * 4πv^2 * e^(-Mv^2/2RT) | ------------------------------------------------- where: - f(v) is the fraction of molecules with a speed v - M is the molar mass of the gas in kg/mol - R is the gas constant - T is the absolute temperature + * ``f(v)`` is the fraction of molecules with a speed ``v`` + * ``M`` is the molar mass of the gas in kg/mol + * ``R`` is the gas constant + * ``T`` is the absolute temperature More information about the Maxwell-Boltzmann distribution can be found here: https://en.wikipedia.org/wiki/Maxwell%E2%80%93Boltzmann_distribution The average speed can be calculated by integrating the Maxwell-Boltzmann distribution -from 0 to infinity and dividing by the total number of molecules. The result is: +from 0 to infinity and dividing by the total number of molecules. The result is:: - --------------------- - | vavg = √(8RT/πM) | - --------------------- + ---------------------- + | v_avg = √(8RT/πM) | + ---------------------- The most probable speed is the speed at which the Maxwell-Boltzmann distribution is at its maximum. This can be found by differentiating the Maxwell-Boltzmann -distribution with respect to v and setting the result equal to zero. The result is: +distribution with respect to ``v`` and setting the result equal to zero. The result is:: - --------------------- - | vmp = √(2RT/M) | - --------------------- + ---------------------- + | v_mp = √(2RT/M) | + ---------------------- The root-mean-square speed is another measure of the average speed of the molecules in a gas. It is calculated by taking the square root -of the average of the squares of the speeds of the molecules. The result is: +of the average of the squares of the speeds of the molecules. The result is:: - --------------------- - | vrms = √(3RT/M) | - --------------------- + ---------------------- + | v_rms = √(3RT/M) | + ---------------------- Here we have defined functions to calculate the average and most probable speeds of molecules in a gas given the @@ -57,6 +57,7 @@ def avg_speed_of_molecule(temperature: float, molar_mass: float) -> float: and returns the average speed of a molecule in the gas (in m/s). Examples: + >>> avg_speed_of_molecule(273, 0.028) # nitrogen at 273 K 454.3488755020387 >>> avg_speed_of_molecule(300, 0.032) # oxygen at 300 K @@ -84,6 +85,7 @@ def mps_speed_of_molecule(temperature: float, molar_mass: float) -> float: and returns the most probable speed of a molecule in the gas (in m/s). Examples: + >>> mps_speed_of_molecule(273, 0.028) # nitrogen at 273 K 402.65620701908966 >>> mps_speed_of_molecule(300, 0.032) # oxygen at 300 K From 3622e940c9db74ebac06a5b12f83fd638d7c5511 Mon Sep 17 00:00:00 2001 From: Maxim Smolskiy Date: Sun, 29 Dec 2024 23:31:53 +0300 Subject: [PATCH 1487/1543] Fix sphinx/build_docs warnings for other (#12482) * Fix sphinx/build_docs warnings for other * Fix * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fix --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- other/bankers_algorithm.py | 16 +++-- other/davis_putnam_logemann_loveland.py | 94 ++++++++++++++----------- other/scoring_algorithm.py | 30 ++++---- 3 files changed, 77 insertions(+), 63 deletions(-) diff --git a/other/bankers_algorithm.py b/other/bankers_algorithm.py index d4254f479a4f..b1da851fc0f3 100644 --- a/other/bankers_algorithm.py +++ b/other/bankers_algorithm.py @@ -10,9 +10,10 @@ predetermined maximum possible amounts of all resources, and then makes a "s-state" check to test for possible deadlock conditions for all other pending activities, before deciding whether allocation should be allowed to continue. -[Source] Wikipedia -[Credit] Rosetta Code C implementation helped very much. - (https://rosettacode.org/wiki/Banker%27s_algorithm) + +| [Source] Wikipedia +| [Credit] Rosetta Code C implementation helped very much. +| (https://rosettacode.org/wiki/Banker%27s_algorithm) """ from __future__ import annotations @@ -75,7 +76,7 @@ def __available_resources(self) -> list[int]: def __need(self) -> list[list[int]]: """ Implement safety checker that calculates the needs by ensuring that - max_claim[i][j] - alloc_table[i][j] <= avail[j] + ``max_claim[i][j] - alloc_table[i][j] <= avail[j]`` """ return [ list(np.array(self.__maximum_claim_table[i]) - np.array(allocated_resource)) @@ -86,7 +87,9 @@ def __need_index_manager(self) -> dict[int, list[int]]: """ This function builds an index control dictionary to track original ids/indices of processes when altered during execution of method "main" - Return: {0: [a: int, b: int], 1: [c: int, d: int]} + + :Return: {0: [a: int, b: int], 1: [c: int, d: int]} + >>> index_control = BankersAlgorithm( ... test_claim_vector, test_allocated_res_table, test_maximum_claim_table ... )._BankersAlgorithm__need_index_manager() @@ -100,7 +103,8 @@ def __need_index_manager(self) -> dict[int, list[int]]: def main(self, **kwargs) -> None: """ Utilize various methods in this class to simulate the Banker's algorithm - Return: None + :Return: None + >>> BankersAlgorithm(test_claim_vector, test_allocated_res_table, ... test_maximum_claim_table).main(describe=True) Allocated Resource Table diff --git a/other/davis_putnam_logemann_loveland.py b/other/davis_putnam_logemann_loveland.py index 0f3100b1bc2e..e95bf371a817 100644 --- a/other/davis_putnam_logemann_loveland.py +++ b/other/davis_putnam_logemann_loveland.py @@ -17,13 +17,15 @@ class Clause: """ - A clause represented in Conjunctive Normal Form. - A clause is a set of literals, either complemented or otherwise. + | A clause represented in Conjunctive Normal Form. + | A clause is a set of literals, either complemented or otherwise. + For example: - {A1, A2, A3'} is the clause (A1 v A2 v A3') - {A5', A2', A1} is the clause (A5' v A2' v A1) + * {A1, A2, A3'} is the clause (A1 v A2 v A3') + * {A5', A2', A1} is the clause (A5' v A2' v A1) Create model + >>> clause = Clause(["A1", "A2'", "A3"]) >>> clause.evaluate({"A1": True}) True @@ -39,6 +41,7 @@ def __init__(self, literals: list[str]) -> None: def __str__(self) -> str: """ To print a clause as in Conjunctive Normal Form. + >>> str(Clause(["A1", "A2'", "A3"])) "{A1 , A2' , A3}" """ @@ -47,6 +50,7 @@ def __str__(self) -> str: def __len__(self) -> int: """ To print a clause as in Conjunctive Normal Form. + >>> len(Clause([])) 0 >>> len(Clause(["A1", "A2'", "A3"])) @@ -72,11 +76,13 @@ def assign(self, model: dict[str, bool | None]) -> None: def evaluate(self, model: dict[str, bool | None]) -> bool | None: """ Evaluates the clause with the assignments in model. + This has the following steps: - 1. Return True if both a literal and its complement exist in the clause. - 2. Return True if a single literal has the assignment True. - 3. Return None(unable to complete evaluation) if a literal has no assignment. - 4. Compute disjunction of all values assigned in clause. + 1. Return ``True`` if both a literal and its complement exist in the clause. + 2. Return ``True`` if a single literal has the assignment ``True``. + 3. Return ``None`` (unable to complete evaluation) + if a literal has no assignment. + 4. Compute disjunction of all values assigned in clause. """ for literal in self.literals: symbol = literal.rstrip("'") if literal.endswith("'") else literal + "'" @@ -92,10 +98,10 @@ def evaluate(self, model: dict[str, bool | None]) -> bool | None: class Formula: """ - A formula represented in Conjunctive Normal Form. - A formula is a set of clauses. - For example, - {{A1, A2, A3'}, {A5', A2', A1}} is ((A1 v A2 v A3') and (A5' v A2' v A1)) + | A formula represented in Conjunctive Normal Form. + | A formula is a set of clauses. + | For example, + | {{A1, A2, A3'}, {A5', A2', A1}} is ((A1 v A2 v A3') and (A5' v A2' v A1)) """ def __init__(self, clauses: Iterable[Clause]) -> None: @@ -107,7 +113,8 @@ def __init__(self, clauses: Iterable[Clause]) -> None: def __str__(self) -> str: """ To print a formula as in Conjunctive Normal Form. - str(Formula([Clause(["A1", "A2'", "A3"]), Clause(["A5'", "A2'", "A1"])])) + + >>> str(Formula([Clause(["A1", "A2'", "A3"]), Clause(["A5'", "A2'", "A1"])])) "{{A1 , A2' , A3} , {A5' , A2' , A1}}" """ return "{" + " , ".join(str(clause) for clause in self.clauses) + "}" @@ -115,8 +122,8 @@ def __str__(self) -> str: def generate_clause() -> Clause: """ - Randomly generate a clause. - All literals have the name Ax, where x is an integer from 1 to 5. + | Randomly generate a clause. + | All literals have the name Ax, where x is an integer from ``1`` to ``5``. """ literals = [] no_of_literals = random.randint(1, 5) @@ -149,11 +156,12 @@ def generate_formula() -> Formula: def generate_parameters(formula: Formula) -> tuple[list[Clause], list[str]]: """ - Return the clauses and symbols from a formula. - A symbol is the uncomplemented form of a literal. + | Return the clauses and symbols from a formula. + | A symbol is the uncomplemented form of a literal. + For example, - Symbol of A3 is A3. - Symbol of A5' is A5. + * Symbol of A3 is A3. + * Symbol of A5' is A5. >>> formula = Formula([Clause(["A1", "A2'", "A3"]), Clause(["A5'", "A2'", "A1"])]) >>> clauses, symbols = generate_parameters(formula) @@ -177,21 +185,20 @@ def find_pure_symbols( clauses: list[Clause], symbols: list[str], model: dict[str, bool | None] ) -> tuple[list[str], dict[str, bool | None]]: """ - Return pure symbols and their values to satisfy clause. - Pure symbols are symbols in a formula that exist only - in one form, either complemented or otherwise. - For example, - { { A4 , A3 , A5' , A1 , A3' } , { A4 } , { A3 } } has - pure symbols A4, A5' and A1. + | Return pure symbols and their values to satisfy clause. + | Pure symbols are symbols in a formula that exist only in one form, + | either complemented or otherwise. + | For example, + | {{A4 , A3 , A5' , A1 , A3'} , {A4} , {A3}} has pure symbols A4, A5' and A1. + This has the following steps: - 1. Ignore clauses that have already evaluated to be True. - 2. Find symbols that occur only in one form in the rest of the clauses. - 3. Assign value True or False depending on whether the symbols occurs - in normal or complemented form respectively. + 1. Ignore clauses that have already evaluated to be ``True``. + 2. Find symbols that occur only in one form in the rest of the clauses. + 3. Assign value ``True`` or ``False`` depending on whether the symbols occurs + in normal or complemented form respectively. >>> formula = Formula([Clause(["A1", "A2'", "A3"]), Clause(["A5'", "A2'", "A1"])]) >>> clauses, symbols = generate_parameters(formula) - >>> pure_symbols, values = find_pure_symbols(clauses, symbols, {}) >>> pure_symbols ['A1', 'A2', 'A3', 'A5'] @@ -231,20 +238,21 @@ def find_unit_clauses( ) -> tuple[list[str], dict[str, bool | None]]: """ Returns the unit symbols and their values to satisfy clause. + Unit symbols are symbols in a formula that are: - - Either the only symbol in a clause - - Or all other literals in that clause have been assigned False + - Either the only symbol in a clause + - Or all other literals in that clause have been assigned ``False`` + This has the following steps: - 1. Find symbols that are the only occurrences in a clause. - 2. Find symbols in a clause where all other literals are assigned False. - 3. Assign True or False depending on whether the symbols occurs in - normal or complemented form respectively. + 1. Find symbols that are the only occurrences in a clause. + 2. Find symbols in a clause where all other literals are assigned ``False``. + 3. Assign ``True`` or ``False`` depending on whether the symbols occurs in + normal or complemented form respectively. >>> clause1 = Clause(["A4", "A3", "A5'", "A1", "A3'"]) >>> clause2 = Clause(["A4"]) >>> clause3 = Clause(["A3"]) >>> clauses, symbols = generate_parameters(Formula([clause1, clause2, clause3])) - >>> unit_clauses, values = find_unit_clauses(clauses, {}) >>> unit_clauses ['A4', 'A3'] @@ -278,16 +286,16 @@ def dpll_algorithm( clauses: list[Clause], symbols: list[str], model: dict[str, bool | None] ) -> tuple[bool | None, dict[str, bool | None] | None]: """ - Returns the model if the formula is satisfiable, else None + Returns the model if the formula is satisfiable, else ``None`` + This has the following steps: - 1. If every clause in clauses is True, return True. - 2. If some clause in clauses is False, return False. - 3. Find pure symbols. - 4. Find unit symbols. + 1. If every clause in clauses is ``True``, return ``True``. + 2. If some clause in clauses is ``False``, return ``False``. + 3. Find pure symbols. + 4. Find unit symbols. >>> formula = Formula([Clause(["A4", "A3", "A5'", "A1", "A3'"]), Clause(["A4"])]) >>> clauses, symbols = generate_parameters(formula) - >>> soln, model = dpll_algorithm(clauses, symbols, {}) >>> soln True diff --git a/other/scoring_algorithm.py b/other/scoring_algorithm.py index af04f432e433..0185d7a2e0c0 100644 --- a/other/scoring_algorithm.py +++ b/other/scoring_algorithm.py @@ -1,25 +1,26 @@ """ -developed by: markmelnic -original repo: https://github.com/markmelnic/Scoring-Algorithm +| developed by: markmelnic +| original repo: https://github.com/markmelnic/Scoring-Algorithm Analyse data using a range based percentual proximity algorithm and calculate the linear maximum likelihood estimation. The basic principle is that all values supplied will be broken -down to a range from 0 to 1 and each column's score will be added +down to a range from ``0`` to ``1`` and each column's score will be added up to get the total score. -========== Example for data of vehicles -price|mileage|registration_year -20k |60k |2012 -22k |50k |2011 -23k |90k |2015 -16k |210k |2010 +:: + + price|mileage|registration_year + 20k |60k |2012 + 22k |50k |2011 + 23k |90k |2015 + 16k |210k |2010 We want the vehicle with the lowest price, lowest mileage but newest registration year. Thus the weights for each column are as follows: -[0, 0, 1] +``[0, 0, 1]`` """ @@ -97,10 +98,11 @@ def procentual_proximity( source_data: list[list[float]], weights: list[int] ) -> list[list[float]]: """ - weights - int list - possible values - 0 / 1 - 0 if lower values have higher weight in the data set - 1 if higher values have higher weight in the data set + | `weights` - ``int`` list + | possible values - ``0`` / ``1`` + + * ``0`` if lower values have higher weight in the data set + * ``1`` if higher values have higher weight in the data set >>> procentual_proximity([[20, 60, 2012],[23, 90, 2015],[22, 50, 2011]], [0, 0, 1]) [[20, 60, 2012, 2.0], [23, 90, 2015, 1.0], [22, 50, 2011, 1.3333333333333335]] From 94b3777936101bcc592fc5ef143ac08ad49195e7 Mon Sep 17 00:00:00 2001 From: Maxim Smolskiy Date: Mon, 30 Dec 2024 00:35:34 +0300 Subject: [PATCH 1488/1543] Fix sphinx/build_docs warnings for linear_algebra (#12483) * Fix sphinx/build_docs warnings for linear_algebra/ * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- linear_algebra/gaussian_elimination.py | 28 +++++++++++++------ linear_algebra/lu_decomposition.py | 20 +++++++------ .../src/gaussian_elimination_pivoting.py | 7 +++-- linear_algebra/src/rank_of_matrix.py | 6 +++- linear_algebra/src/schur_complement.py | 13 +++++---- linear_algebra/src/transformations_2d.py | 14 ++++++---- 6 files changed, 54 insertions(+), 34 deletions(-) diff --git a/linear_algebra/gaussian_elimination.py b/linear_algebra/gaussian_elimination.py index 724773c0db98..6f4075b710fd 100644 --- a/linear_algebra/gaussian_elimination.py +++ b/linear_algebra/gaussian_elimination.py @@ -1,6 +1,6 @@ """ -Gaussian elimination method for solving a system of linear equations. -Gaussian elimination - https://en.wikipedia.org/wiki/Gaussian_elimination +| Gaussian elimination method for solving a system of linear equations. +| Gaussian elimination - https://en.wikipedia.org/wiki/Gaussian_elimination """ import numpy as np @@ -13,12 +13,17 @@ def retroactive_resolution( ) -> NDArray[float64]: """ This function performs a retroactive linear system resolution - for triangular matrix + for triangular matrix Examples: - 2x1 + 2x2 - 1x3 = 5 2x1 + 2x2 = -1 - 0x1 - 2x2 - 1x3 = -7 0x1 - 2x2 = -1 - 0x1 + 0x2 + 5x3 = 15 + 1. + * 2x1 + 2x2 - 1x3 = 5 + * 0x1 - 2x2 - 1x3 = -7 + * 0x1 + 0x2 + 5x3 = 15 + 2. + * 2x1 + 2x2 = -1 + * 0x1 - 2x2 = -1 + >>> gaussian_elimination([[2, 2, -1], [0, -2, -1], [0, 0, 5]], [[5], [-7], [15]]) array([[2.], [2.], @@ -45,9 +50,14 @@ def gaussian_elimination( This function performs Gaussian elimination method Examples: - 1x1 - 4x2 - 2x3 = -2 1x1 + 2x2 = 5 - 5x1 + 2x2 - 2x3 = -3 5x1 + 2x2 = 5 - 1x1 - 1x2 + 0x3 = 4 + 1. + * 1x1 - 4x2 - 2x3 = -2 + * 5x1 + 2x2 - 2x3 = -3 + * 1x1 - 1x2 + 0x3 = 4 + 2. + * 1x1 + 2x2 = 5 + * 5x1 + 2x2 = 5 + >>> gaussian_elimination([[1, -4, -2], [5, 2, -2], [1, -1, 0]], [[-2], [-3], [4]]) array([[ 2.3 ], [-1.7 ], diff --git a/linear_algebra/lu_decomposition.py b/linear_algebra/lu_decomposition.py index 3620674835cd..3d89b53a48fb 100644 --- a/linear_algebra/lu_decomposition.py +++ b/linear_algebra/lu_decomposition.py @@ -2,13 +2,14 @@ Lower-upper (LU) decomposition factors a matrix as a product of a lower triangular matrix and an upper triangular matrix. A square matrix has an LU decomposition under the following conditions: + - If the matrix is invertible, then it has an LU decomposition if and only - if all of its leading principal minors are non-zero (see - https://en.wikipedia.org/wiki/Minor_(linear_algebra) for an explanation of - leading principal minors of a matrix). + if all of its leading principal minors are non-zero (see + https://en.wikipedia.org/wiki/Minor_(linear_algebra) for an explanation of + leading principal minors of a matrix). - If the matrix is singular (i.e., not invertible) and it has a rank of k - (i.e., it has k linearly independent columns), then it has an LU - decomposition if its first k leading principal minors are non-zero. + (i.e., it has k linearly independent columns), then it has an LU + decomposition if its first k leading principal minors are non-zero. This algorithm will simply attempt to perform LU decomposition on any square matrix and raise an error if no such decomposition exists. @@ -25,6 +26,7 @@ def lower_upper_decomposition(table: np.ndarray) -> tuple[np.ndarray, np.ndarray """ Perform LU decomposition on a given matrix and raises an error if the matrix isn't square or if no such decomposition exists + >>> matrix = np.array([[2, -2, 1], [0, 1, 2], [5, 3, 1]]) >>> lower_mat, upper_mat = lower_upper_decomposition(matrix) >>> lower_mat @@ -45,7 +47,7 @@ def lower_upper_decomposition(table: np.ndarray) -> tuple[np.ndarray, np.ndarray array([[ 4. , 3. ], [ 0. , -1.5]]) - # Matrix is not square + >>> # Matrix is not square >>> matrix = np.array([[2, -2, 1], [0, 1, 2]]) >>> lower_mat, upper_mat = lower_upper_decomposition(matrix) Traceback (most recent call last): @@ -54,14 +56,14 @@ def lower_upper_decomposition(table: np.ndarray) -> tuple[np.ndarray, np.ndarray [[ 2 -2 1] [ 0 1 2]] - # Matrix is invertible, but its first leading principal minor is 0 + >>> # Matrix is invertible, but its first leading principal minor is 0 >>> matrix = np.array([[0, 1], [1, 0]]) >>> lower_mat, upper_mat = lower_upper_decomposition(matrix) Traceback (most recent call last): ... ArithmeticError: No LU decomposition exists - # Matrix is singular, but its first leading principal minor is 1 + >>> # Matrix is singular, but its first leading principal minor is 1 >>> matrix = np.array([[1, 0], [1, 0]]) >>> lower_mat, upper_mat = lower_upper_decomposition(matrix) >>> lower_mat @@ -71,7 +73,7 @@ def lower_upper_decomposition(table: np.ndarray) -> tuple[np.ndarray, np.ndarray array([[1., 0.], [0., 0.]]) - # Matrix is singular, but its first leading principal minor is 0 + >>> # Matrix is singular, but its first leading principal minor is 0 >>> matrix = np.array([[0, 1], [0, 1]]) >>> lower_mat, upper_mat = lower_upper_decomposition(matrix) Traceback (most recent call last): diff --git a/linear_algebra/src/gaussian_elimination_pivoting.py b/linear_algebra/src/gaussian_elimination_pivoting.py index efc1ddd64a2e..540f57b0cff6 100644 --- a/linear_algebra/src/gaussian_elimination_pivoting.py +++ b/linear_algebra/src/gaussian_elimination_pivoting.py @@ -6,17 +6,18 @@ def solve_linear_system(matrix: np.ndarray) -> np.ndarray: Solve a linear system of equations using Gaussian elimination with partial pivoting Args: - - matrix: Coefficient matrix with the last column representing the constants. + - `matrix`: Coefficient matrix with the last column representing the constants. Returns: - - Solution vector. + - Solution vector. Raises: - - ValueError: If the matrix is not correct (i.e., singular). + - ``ValueError``: If the matrix is not correct (i.e., singular). https://courses.engr.illinois.edu/cs357/su2013/lect.htm Lecture 7 Example: + >>> A = np.array([[2, 1, -1], [-3, -1, 2], [-2, 1, 2]], dtype=float) >>> B = np.array([8, -11, -3], dtype=float) >>> solution = solve_linear_system(np.column_stack((A, B))) diff --git a/linear_algebra/src/rank_of_matrix.py b/linear_algebra/src/rank_of_matrix.py index 7ff3c1699a69..2c4fe2a8d1da 100644 --- a/linear_algebra/src/rank_of_matrix.py +++ b/linear_algebra/src/rank_of_matrix.py @@ -8,11 +8,15 @@ def rank_of_matrix(matrix: list[list[int | float]]) -> int: """ Finds the rank of a matrix. + Args: - matrix: The matrix as a list of lists. + `matrix`: The matrix as a list of lists. + Returns: The rank of the matrix. + Example: + >>> matrix1 = [[1, 2, 3], ... [4, 5, 6], ... [7, 8, 9]] diff --git a/linear_algebra/src/schur_complement.py b/linear_algebra/src/schur_complement.py index 7c79bb70abfc..74ac75e3fce2 100644 --- a/linear_algebra/src/schur_complement.py +++ b/linear_algebra/src/schur_complement.py @@ -12,13 +12,14 @@ def schur_complement( ) -> np.ndarray: """ Schur complement of a symmetric matrix X given as a 2x2 block matrix - consisting of matrices A, B and C. - Matrix A must be quadratic and non-singular. - In case A is singular, a pseudo-inverse may be provided using - the pseudo_inv argument. + consisting of matrices `A`, `B` and `C`. + Matrix `A` must be quadratic and non-singular. + In case `A` is singular, a pseudo-inverse may be provided using + the `pseudo_inv` argument. + + | Link to Wiki: https://en.wikipedia.org/wiki/Schur_complement + | See also Convex Optimization - Boyd and Vandenberghe, A.5.5 - Link to Wiki: https://en.wikipedia.org/wiki/Schur_complement - See also Convex Optimization - Boyd and Vandenberghe, A.5.5 >>> import numpy as np >>> a = np.array([[1, 2], [2, 1]]) >>> b = np.array([[0, 3], [3, 0]]) diff --git a/linear_algebra/src/transformations_2d.py b/linear_algebra/src/transformations_2d.py index b4185cd2848f..5dee59024752 100644 --- a/linear_algebra/src/transformations_2d.py +++ b/linear_algebra/src/transformations_2d.py @@ -3,13 +3,15 @@ I have added the codes for reflection, projection, scaling and rotation 2D matrices. +.. code-block:: python + scaling(5) = [[5.0, 0.0], [0.0, 5.0]] - rotation(45) = [[0.5253219888177297, -0.8509035245341184], - [0.8509035245341184, 0.5253219888177297]] -projection(45) = [[0.27596319193541496, 0.446998331800279], - [0.446998331800279, 0.7240368080645851]] -reflection(45) = [[0.05064397763545947, 0.893996663600558], - [0.893996663600558, 0.7018070490682369]] + rotation(45) = [[0.5253219888177297, -0.8509035245341184], + [0.8509035245341184, 0.5253219888177297]] + projection(45) = [[0.27596319193541496, 0.446998331800279], + [0.446998331800279, 0.7240368080645851]] + reflection(45) = [[0.05064397763545947, 0.893996663600558], + [0.893996663600558, 0.7018070490682369]] """ from math import cos, sin From f45e392cf6e94259eca8c47b13cd3ae22bcd901e Mon Sep 17 00:00:00 2001 From: Maxim Smolskiy Date: Mon, 30 Dec 2024 12:56:24 +0300 Subject: [PATCH 1489/1543] Fix sphinx/build_docs warnings for ciphers (#12485) * Fix sphinx/build_docs warnings for ciphers * Fix --- ciphers/autokey.py | 7 +- ciphers/caesar_cipher.py | 77 ++++++++++------- ciphers/decrypt_caesar_with_chi_squared.py | 99 +++++++++++----------- ciphers/enigma_machine2.py | 64 +++++++------- ciphers/rsa_factorization.py | 13 +-- ciphers/simple_keyword_cypher.py | 10 ++- ciphers/trifid_cipher.py | 25 +++--- 7 files changed, 170 insertions(+), 125 deletions(-) diff --git a/ciphers/autokey.py b/ciphers/autokey.py index 05d8c066b139..7751a32d7546 100644 --- a/ciphers/autokey.py +++ b/ciphers/autokey.py @@ -1,5 +1,6 @@ """ https://en.wikipedia.org/wiki/Autokey_cipher + An autokey cipher (also known as the autoclave cipher) is a cipher that incorporates the message (the plaintext) into the key. The key is generated from the message in some automated fashion, @@ -10,8 +11,9 @@ def encrypt(plaintext: str, key: str) -> str: """ - Encrypt a given plaintext (string) and key (string), returning the + Encrypt a given `plaintext` (string) and `key` (string), returning the encrypted ciphertext. + >>> encrypt("hello world", "coffee") 'jsqqs avvwo' >>> encrypt("coffee is good as python", "TheAlgorithms") @@ -74,8 +76,9 @@ def encrypt(plaintext: str, key: str) -> str: def decrypt(ciphertext: str, key: str) -> str: """ - Decrypt a given ciphertext (string) and key (string), returning the decrypted + Decrypt a given `ciphertext` (string) and `key` (string), returning the decrypted ciphertext. + >>> decrypt("jsqqs avvwo", "coffee") 'hello world' >>> decrypt("vvjfpk wj ohvp su ddylsv", "TheAlgorithms") diff --git a/ciphers/caesar_cipher.py b/ciphers/caesar_cipher.py index d19b9a337221..9c096fe8a7da 100644 --- a/ciphers/caesar_cipher.py +++ b/ciphers/caesar_cipher.py @@ -7,24 +7,29 @@ def encrypt(input_string: str, key: int, alphabet: str | None = None) -> str: """ encrypt ======= + Encodes a given string with the caesar cipher and returns the encoded message Parameters: ----------- - * input_string: the plain-text that needs to be encoded - * key: the number of letters to shift the message by + + * `input_string`: the plain-text that needs to be encoded + * `key`: the number of letters to shift the message by Optional: - * alphabet (None): the alphabet used to encode the cipher, if not + + * `alphabet` (``None``): the alphabet used to encode the cipher, if not specified, the standard english alphabet with upper and lowercase letters is used Returns: + * A string containing the encoded cipher-text More on the caesar cipher ========================= + The caesar cipher is named after Julius Caesar who used it when sending secret military messages to his troops. This is a simple substitution cipher where every character in the plain-text is shifted by a certain number known @@ -32,26 +37,28 @@ def encrypt(input_string: str, key: int, alphabet: str | None = None) -> str: Example: Say we have the following message: - "Hello, captain" + ``Hello, captain`` And our alphabet is made up of lower and uppercase letters: - "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ" + ``abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ`` - And our shift is "2" + And our shift is ``2`` - We can then encode the message, one letter at a time. "H" would become "J", - since "J" is two letters away, and so on. If the shift is ever two large, or + We can then encode the message, one letter at a time. ``H`` would become ``J``, + since ``J`` is two letters away, and so on. If the shift is ever two large, or our letter is at the end of the alphabet, we just start at the beginning - ("Z" would shift to "a" then "b" and so on). + (``Z`` would shift to ``a`` then ``b`` and so on). - Our final message would be "Jgnnq, ecrvckp" + Our final message would be ``Jgnnq, ecrvckp`` Further reading =============== + * https://en.m.wikipedia.org/wiki/Caesar_cipher Doctests ======== + >>> encrypt('The quick brown fox jumps over the lazy dog', 8) 'bpm yCqks jzwEv nwF rCuxA wDmz Bpm tiHG lwo' @@ -85,23 +92,28 @@ def decrypt(input_string: str, key: int, alphabet: str | None = None) -> str: """ decrypt ======= + Decodes a given string of cipher-text and returns the decoded plain-text Parameters: ----------- - * input_string: the cipher-text that needs to be decoded - * key: the number of letters to shift the message backwards by to decode + + * `input_string`: the cipher-text that needs to be decoded + * `key`: the number of letters to shift the message backwards by to decode Optional: - * alphabet (None): the alphabet used to decode the cipher, if not + + * `alphabet` (``None``): the alphabet used to decode the cipher, if not specified, the standard english alphabet with upper and lowercase letters is used Returns: + * A string containing the decoded plain-text More on the caesar cipher ========================= + The caesar cipher is named after Julius Caesar who used it when sending secret military messages to his troops. This is a simple substitution cipher where very character in the plain-text is shifted by a certain number known @@ -110,27 +122,29 @@ def decrypt(input_string: str, key: int, alphabet: str | None = None) -> str: Example: Say we have the following cipher-text: - "Jgnnq, ecrvckp" + ``Jgnnq, ecrvckp`` And our alphabet is made up of lower and uppercase letters: - "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ" + ``abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ`` - And our shift is "2" + And our shift is ``2`` To decode the message, we would do the same thing as encoding, but in - reverse. The first letter, "J" would become "H" (remember: we are decoding) - because "H" is two letters in reverse (to the left) of "J". We would - continue doing this. A letter like "a" would shift back to the end of - the alphabet, and would become "Z" or "Y" and so on. + reverse. The first letter, ``J`` would become ``H`` (remember: we are decoding) + because ``H`` is two letters in reverse (to the left) of ``J``. We would + continue doing this. A letter like ``a`` would shift back to the end of + the alphabet, and would become ``Z`` or ``Y`` and so on. - Our final message would be "Hello, captain" + Our final message would be ``Hello, captain`` Further reading =============== + * https://en.m.wikipedia.org/wiki/Caesar_cipher Doctests ======== + >>> decrypt('bpm yCqks jzwEv nwF rCuxA wDmz Bpm tiHG lwo', 8) 'The quick brown fox jumps over the lazy dog' @@ -150,41 +164,44 @@ def brute_force(input_string: str, alphabet: str | None = None) -> dict[int, str """ brute_force =========== + Returns all the possible combinations of keys and the decoded strings in the form of a dictionary Parameters: ----------- - * input_string: the cipher-text that needs to be used during brute-force + + * `input_string`: the cipher-text that needs to be used during brute-force Optional: - * alphabet: (None): the alphabet used to decode the cipher, if not + + * `alphabet` (``None``): the alphabet used to decode the cipher, if not specified, the standard english alphabet with upper and lowercase letters is used More about brute force ====================== + Brute force is when a person intercepts a message or password, not knowing the key and tries every single combination. This is easy with the caesar cipher since there are only all the letters in the alphabet. The more complex the cipher, the larger amount of time it will take to do brute force Ex: - Say we have a 5 letter alphabet (abcde), for simplicity and we intercepted the - following message: - - "dbc" - + Say we have a ``5`` letter alphabet (``abcde``), for simplicity and we intercepted + the following message: ``dbc``, we could then just write out every combination: - ecd... and so on, until we reach a combination that makes sense: - "cab" + ``ecd``... and so on, until we reach a combination that makes sense: + ``cab`` Further reading =============== + * https://en.wikipedia.org/wiki/Brute_force Doctests ======== + >>> brute_force("jFyuMy xIH'N vLONy zILwy Gy!")[20] "Please don't brute force me!" diff --git a/ciphers/decrypt_caesar_with_chi_squared.py b/ciphers/decrypt_caesar_with_chi_squared.py index 10832203e531..fb95c0f90628 100644 --- a/ciphers/decrypt_caesar_with_chi_squared.py +++ b/ciphers/decrypt_caesar_with_chi_squared.py @@ -11,33 +11,31 @@ def decrypt_caesar_with_chi_squared( """ Basic Usage =========== + Arguments: - * ciphertext (str): the text to decode (encoded with the caesar cipher) + * `ciphertext` (str): the text to decode (encoded with the caesar cipher) Optional Arguments: - * cipher_alphabet (list): the alphabet used for the cipher (each letter is - a string separated by commas) - * frequencies_dict (dict): a dictionary of word frequencies where keys are - the letters and values are a percentage representation of the frequency as - a decimal/float - * case_sensitive (bool): a boolean value: True if the case matters during - decryption, False if it doesn't + * `cipher_alphabet` (list): the alphabet used for the cipher (each letter is + a string separated by commas) + * `frequencies_dict` (dict): a dictionary of word frequencies where keys are + the letters and values are a percentage representation of the frequency as + a decimal/float + * `case_sensitive` (bool): a boolean value: ``True`` if the case matters during + decryption, ``False`` if it doesn't Returns: - * A tuple in the form of: - ( - most_likely_cipher, - most_likely_cipher_chi_squared_value, - decoded_most_likely_cipher - ) + * A tuple in the form of: + (`most_likely_cipher`, `most_likely_cipher_chi_squared_value`, + `decoded_most_likely_cipher`) - where... - - most_likely_cipher is an integer representing the shift of the smallest - chi-squared statistic (most likely key) - - most_likely_cipher_chi_squared_value is a float representing the - chi-squared statistic of the most likely shift - - decoded_most_likely_cipher is a string with the decoded cipher - (decoded by the most_likely_cipher key) + where... + - `most_likely_cipher` is an integer representing the shift of the smallest + chi-squared statistic (most likely key) + - `most_likely_cipher_chi_squared_value` is a float representing the + chi-squared statistic of the most likely shift + - `decoded_most_likely_cipher` is a string with the decoded cipher + (decoded by the most_likely_cipher key) The Chi-squared test @@ -45,52 +43,57 @@ def decrypt_caesar_with_chi_squared( The caesar cipher ----------------- + The caesar cipher is a very insecure encryption algorithm, however it has been used since Julius Caesar. The cipher is a simple substitution cipher where each character in the plain text is replaced by a character in the alphabet a certain number of characters after the original character. The number of characters away is called the shift or key. For example: - Plain text: hello - Key: 1 - Cipher text: ifmmp - (each letter in hello has been shifted one to the right in the eng. alphabet) + | Plain text: ``hello`` + | Key: ``1`` + | Cipher text: ``ifmmp`` + | (each letter in ``hello`` has been shifted one to the right in the eng. alphabet) As you can imagine, this doesn't provide lots of security. In fact decrypting ciphertext by brute-force is extremely easy even by hand. However - one way to do that is the chi-squared test. + one way to do that is the chi-squared test. The chi-squared test - ------------------- + -------------------- + Each letter in the english alphabet has a frequency, or the amount of times it shows up compared to other letters (usually expressed as a decimal representing the percentage likelihood). The most common letter in the - english language is "e" with a frequency of 0.11162 or 11.162%. The test is - completed in the following fashion. + english language is ``e`` with a frequency of ``0.11162`` or ``11.162%``. + The test is completed in the following fashion. 1. The ciphertext is decoded in a brute force way (every combination of the - 26 possible combinations) + ``26`` possible combinations) 2. For every combination, for each letter in the combination, the average amount of times the letter should appear the message is calculated by - multiplying the total number of characters by the frequency of the letter + multiplying the total number of characters by the frequency of the letter. + + | For example: + | In a message of ``100`` characters, ``e`` should appear around ``11.162`` + times. - For example: - In a message of 100 characters, e should appear around 11.162 times. + 3. Then, to calculate the margin of error (the amount of times the letter + SHOULD appear with the amount of times the letter DOES appear), we use + the chi-squared test. The following formula is used: - 3. Then, to calculate the margin of error (the amount of times the letter - SHOULD appear with the amount of times the letter DOES appear), we use - the chi-squared test. The following formula is used: + Let: + - n be the number of times the letter actually appears + - p be the predicted value of the number of times the letter should + appear (see item ``2``) + - let v be the chi-squared test result (referred to here as chi-squared + value/statistic) - Let: - - n be the number of times the letter actually appears - - p be the predicted value of the number of times the letter should - appear (see #2) - - let v be the chi-squared test result (referred to here as chi-squared - value/statistic) + :: - (n - p)^2 - --------- = v - p + (n - p)^2 + --------- = v + p 4. Each chi squared value for each letter is then added up to the total. The total is the chi-squared statistic for that encryption key. @@ -98,16 +101,16 @@ def decrypt_caesar_with_chi_squared( to be the decoded answer. Further Reading - ================ + =============== - * http://practicalcryptography.com/cryptanalysis/text-characterisation/chi-squared- - statistic/ + * http://practicalcryptography.com/cryptanalysis/text-characterisation/chi-squared-statistic/ * https://en.wikipedia.org/wiki/Letter_frequency * https://en.wikipedia.org/wiki/Chi-squared_test * https://en.m.wikipedia.org/wiki/Caesar_cipher Doctests ======== + >>> decrypt_caesar_with_chi_squared( ... 'dof pz aol jhlzhy jpwoly zv wvwbshy? pa pz avv lhzf av jyhjr!' ... ) # doctest: +NORMALIZE_WHITESPACE diff --git a/ciphers/enigma_machine2.py b/ciphers/enigma_machine2.py index 163aa7172c11..e42fdd82ed41 100644 --- a/ciphers/enigma_machine2.py +++ b/ciphers/enigma_machine2.py @@ -1,14 +1,16 @@ """ -Wikipedia: https://en.wikipedia.org/wiki/Enigma_machine -Video explanation: https://youtu.be/QwQVMqfoB2E -Also check out Numberphile's and Computerphile's videos on this topic +| Wikipedia: https://en.wikipedia.org/wiki/Enigma_machine +| Video explanation: https://youtu.be/QwQVMqfoB2E +| Also check out Numberphile's and Computerphile's videos on this topic -This module contains function 'enigma' which emulates +This module contains function ``enigma`` which emulates the famous Enigma machine from WWII. + Module includes: -- enigma function + +- ``enigma`` function - showcase of function usage -- 9 randomly generated rotors +- ``9`` randomly generated rotors - reflector (aka static rotor) - original alphabet @@ -73,7 +75,7 @@ def _validator( rotpos: RotorPositionT, rotsel: RotorSelectionT, pb: str ) -> tuple[RotorPositionT, RotorSelectionT, dict[str, str]]: """ - Checks if the values can be used for the 'enigma' function + Checks if the values can be used for the ``enigma`` function >>> _validator((1,1,1), (rotor1, rotor2, rotor3), 'POLAND') ((1, 1, 1), ('EGZWVONAHDCLFQMSIPJBYUKXTR', 'FOBHMDKEXQNRAULPGSJVTYICZW', \ @@ -83,7 +85,7 @@ def _validator( :param rotpos: rotor_positon :param rotsel: rotor_selection :param pb: plugb -> validated and transformed - :return: (rotpos, rotsel, pb) + :return: (`rotpos`, `rotsel`, `pb`) """ # Checks if there are 3 unique rotors @@ -118,9 +120,10 @@ def _plugboard(pbstring: str) -> dict[str, str]: >>> _plugboard('POLAND') {'P': 'O', 'O': 'P', 'L': 'A', 'A': 'L', 'N': 'D', 'D': 'N'} - In the code, 'pb' stands for 'plugboard' + In the code, ``pb`` stands for ``plugboard`` Pairs can be separated by spaces + :param pbstring: string containing plugboard setting for the Enigma machine :return: dictionary containing converted pairs """ @@ -168,31 +171,34 @@ def enigma( plugb: str = "", ) -> str: """ - The only difference with real-world enigma is that I allowed string input. + The only difference with real-world enigma is that ``I`` allowed string input. All characters are converted to uppercase. (non-letter symbol are ignored) - How it works: - (for every letter in the message) + + | How it works: + | (for every letter in the message) - Input letter goes into the plugboard. - If it is connected to another one, switch it. + If it is connected to another one, switch it. + + - Letter goes through ``3`` rotors. + Each rotor can be represented as ``2`` sets of symbol, where one is shuffled. + Each symbol from the first set has corresponding symbol in + the second set and vice versa. - - Letter goes through 3 rotors. - Each rotor can be represented as 2 sets of symbol, where one is shuffled. - Each symbol from the first set has corresponding symbol in - the second set and vice versa. + example:: - example: - | ABCDEFGHIJKLMNOPQRSTUVWXYZ | e.g. F=D and D=F - | VKLEPDBGRNWTFCJOHQAMUZYIXS | + | ABCDEFGHIJKLMNOPQRSTUVWXYZ | e.g. F=D and D=F + | VKLEPDBGRNWTFCJOHQAMUZYIXS | - Symbol then goes through reflector (static rotor). - There it is switched with paired symbol - The reflector can be represented as2 sets, each with half of the alphanet. - There are usually 10 pairs of letters. + There it is switched with paired symbol. + The reflector can be represented as ``2`` sets, each with half of the alphanet. + There are usually ``10`` pairs of letters. + + Example:: - Example: - | ABCDEFGHIJKLM | e.g. E is paired to X - | ZYXWVUTSRQPON | so when E goes in X goes out and vice versa + | ABCDEFGHIJKLM | e.g. E is paired to X + | ZYXWVUTSRQPON | so when E goes in X goes out and vice versa - Letter then goes through the rotors again @@ -211,9 +217,9 @@ def enigma( :param text: input message - :param rotor_position: tuple with 3 values in range 1..26 - :param rotor_selection: tuple with 3 rotors () - :param plugb: string containing plugboard configuration (default '') + :param rotor_position: tuple with ``3`` values in range ``1``.. ``26`` + :param rotor_selection: tuple with ``3`` rotors + :param plugb: string containing plugboard configuration (default ``''``) :return: en/decrypted string """ diff --git a/ciphers/rsa_factorization.py b/ciphers/rsa_factorization.py index 0a358a4fc2d4..585b21fac856 100644 --- a/ciphers/rsa_factorization.py +++ b/ciphers/rsa_factorization.py @@ -3,8 +3,10 @@ The program can efficiently factor RSA prime number given the private key d and public key e. -Source: on page 3 of https://crypto.stanford.edu/~dabo/papers/RSA-survey.pdf -More readable source: https://www.di-mgt.com.au/rsa_factorize_n.html + +| Source: on page ``3`` of https://crypto.stanford.edu/~dabo/papers/RSA-survey.pdf +| More readable source: https://www.di-mgt.com.au/rsa_factorize_n.html + large number can take minutes to factor, therefore are not included in doctest. """ @@ -17,13 +19,14 @@ def rsafactor(d: int, e: int, n: int) -> list[int]: """ This function returns the factors of N, where p*q=N - Return: [p, q] + + Return: [p, q] We call N the RSA modulus, e the encryption exponent, and d the decryption exponent. The pair (N, e) is the public key. As its name suggests, it is public and is used to - encrypt messages. + encrypt messages. The pair (N, d) is the secret key or private key and is known only to the recipient - of encrypted messages. + of encrypted messages. >>> rsafactor(3, 16971, 25777) [149, 173] diff --git a/ciphers/simple_keyword_cypher.py b/ciphers/simple_keyword_cypher.py index 9dc624e7762c..bde137d826c3 100644 --- a/ciphers/simple_keyword_cypher.py +++ b/ciphers/simple_keyword_cypher.py @@ -1,9 +1,11 @@ def remove_duplicates(key: str) -> str: """ Removes duplicate alphabetic characters in a keyword (letter is ignored after its - first appearance). + first appearance). + :param key: Keyword to use :return: String with duplicates removed + >>> remove_duplicates('Hello World!!') 'Helo Wrd' """ @@ -18,6 +20,7 @@ def remove_duplicates(key: str) -> str: def create_cipher_map(key: str) -> dict[str, str]: """ Returns a cipher map given a keyword. + :param key: keyword to use :return: dictionary cipher map """ @@ -43,9 +46,11 @@ def create_cipher_map(key: str) -> dict[str, str]: def encipher(message: str, cipher_map: dict[str, str]) -> str: """ Enciphers a message given a cipher map. + :param message: Message to encipher :param cipher_map: Cipher map :return: enciphered string + >>> encipher('Hello World!!', create_cipher_map('Goodbye!!')) 'CYJJM VMQJB!!' """ @@ -55,9 +60,11 @@ def encipher(message: str, cipher_map: dict[str, str]) -> str: def decipher(message: str, cipher_map: dict[str, str]) -> str: """ Deciphers a message given a cipher map + :param message: Message to decipher :param cipher_map: Dictionary mapping to use :return: Deciphered string + >>> cipher_map = create_cipher_map('Goodbye!!') >>> decipher(encipher('Hello World!!', cipher_map), cipher_map) 'HELLO WORLD!!' @@ -70,6 +77,7 @@ def decipher(message: str, cipher_map: dict[str, str]) -> str: def main() -> None: """ Handles I/O + :return: void """ message = input("Enter message to encode or decode: ").strip() diff --git a/ciphers/trifid_cipher.py b/ciphers/trifid_cipher.py index 16b9faf67688..9613cee0669d 100644 --- a/ciphers/trifid_cipher.py +++ b/ciphers/trifid_cipher.py @@ -22,7 +22,7 @@ def __encrypt_part(message_part: str, character_to_number: dict[str, str]) -> str: """ - Arrange the triagram value of each letter of 'message_part' vertically and join + Arrange the triagram value of each letter of `message_part` vertically and join them horizontally. >>> __encrypt_part('ASK', TEST_CHARACTER_TO_NUMBER) @@ -65,8 +65,8 @@ def __prepare( """ A helper function that generates the triagrams and assigns each letter of the alphabet to its corresponding triagram and stores this in a dictionary - ("character_to_number" and "number_to_character") after confirming if the - alphabet's length is 27. + (`character_to_number` and `number_to_character`) after confirming if the + alphabet's length is ``27``. >>> test = __prepare('I aM a BOy','abCdeFghijkLmnopqrStuVwxYZ+') >>> expected = ('IAMABOY','ABCDEFGHIJKLMNOPQRSTUVWXYZ+', @@ -75,24 +75,28 @@ def __prepare( True Testing with incomplete alphabet + >>> __prepare('I aM a BOy','abCdeFghijkLmnopqrStuVw') Traceback (most recent call last): ... KeyError: 'Length of alphabet has to be 27.' Testing with extra long alphabets + >>> __prepare('I aM a BOy','abCdeFghijkLmnopqrStuVwxyzzwwtyyujjgfd') Traceback (most recent call last): ... KeyError: 'Length of alphabet has to be 27.' Testing with punctuations that are not in the given alphabet + >>> __prepare('am i a boy?','abCdeFghijkLmnopqrStuVwxYZ+') Traceback (most recent call last): ... ValueError: Each message character has to be included in alphabet! Testing with numbers + >>> __prepare(500,'abCdeFghijkLmnopqrStuVwxYZ+') Traceback (most recent call last): ... @@ -130,9 +134,9 @@ def encrypt_message( PARAMETERS ---------- - * message: The message you want to encrypt. - * alphabet (optional): The characters to be used for the cipher . - * period (optional): The number of characters you want in a group whilst + * `message`: The message you want to encrypt. + * `alphabet` (optional): The characters to be used for the cipher . + * `period` (optional): The number of characters you want in a group whilst encrypting. >>> encrypt_message('I am a boy') @@ -169,20 +173,21 @@ def decrypt_message( decrypt_message =============== - Decrypts a trifid_cipher encrypted message . + Decrypts a trifid_cipher encrypted message. PARAMETERS ---------- - * message: The message you want to decrypt . - * alphabet (optional): The characters used for the cipher. - * period (optional): The number of characters used in grouping when it + * `message`: The message you want to decrypt. + * `alphabet` (optional): The characters used for the cipher. + * `period` (optional): The number of characters used in grouping when it was encrypted. >>> decrypt_message('BCDGBQY') 'IAMABOY' Decrypting with your own alphabet and period + >>> decrypt_message('FMJFVOISSUFTFPUFEQQC','FELIXMARDSTBCGHJKNOPQUVWYZ+',5) 'AIDETOILECIELTAIDERA' """ From 68b4c6b4793867126f71ebf2a399402b02472edb Mon Sep 17 00:00:00 2001 From: mahdi tavasoli Date: Mon, 30 Dec 2024 13:52:20 +0330 Subject: [PATCH 1490/1543] fix is_ip_v4_address_valid.py (#12394) * fix is_ip_v4_address_valid * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update is_ip_v4_address_valid.py --------- Co-authored-by: m.tavasoli Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Maxim Smolskiy --- maths/is_ip_v4_address_valid.py | 37 +++++++++++++++++++++++++-------- 1 file changed, 28 insertions(+), 9 deletions(-) diff --git a/maths/is_ip_v4_address_valid.py b/maths/is_ip_v4_address_valid.py index 0ae8e021ead1..305afabffed3 100644 --- a/maths/is_ip_v4_address_valid.py +++ b/maths/is_ip_v4_address_valid.py @@ -1,13 +1,15 @@ """ +wiki: https://en.wikipedia.org/wiki/IPv4 + Is IP v4 address valid? A valid IP address must be four octets in the form of A.B.C.D, -where A,B,C and D are numbers from 0-254 -for example: 192.168.23.1, 172.254.254.254 are valid IP address - 192.168.255.0, 255.192.3.121 are invalid IP address +where A, B, C and D are numbers from 0-255 +for example: 192.168.23.1, 172.255.255.255 are valid IP address + 192.168.256.0, 256.192.3.121 are invalid IP address """ -def is_ip_v4_address_valid(ip_v4_address: str) -> bool: +def is_ip_v4_address_valid(ip: str) -> bool: """ print "Valid IP address" If IP is valid. or @@ -16,13 +18,13 @@ def is_ip_v4_address_valid(ip_v4_address: str) -> bool: >>> is_ip_v4_address_valid("192.168.0.23") True - >>> is_ip_v4_address_valid("192.255.15.8") + >>> is_ip_v4_address_valid("192.256.15.8") False >>> is_ip_v4_address_valid("172.100.0.8") True - >>> is_ip_v4_address_valid("254.255.0.255") + >>> is_ip_v4_address_valid("255.256.0.256") False >>> is_ip_v4_address_valid("1.2.33333333.4") @@ -45,12 +47,29 @@ def is_ip_v4_address_valid(ip_v4_address: str) -> bool: >>> is_ip_v4_address_valid("1.2.3.") False + + >>> is_ip_v4_address_valid("1.2.3.05") + False """ - octets = [int(i) for i in ip_v4_address.split(".") if i.isdigit()] - return len(octets) == 4 and all(0 <= int(octet) <= 254 for octet in octets) + octets = ip.split(".") + if len(octets) != 4: + return False + + for octet in octets: + if not octet.isdigit(): + return False + + number = int(octet) + if len(str(number)) != len(octet): + return False + + if not 0 <= number <= 255: + return False + + return True if __name__ == "__main__": ip = input().strip() valid_or_invalid = "valid" if is_ip_v4_address_valid(ip) else "invalid" - print(f"{ip} is a {valid_or_invalid} IP v4 address.") + print(f"{ip} is a {valid_or_invalid} IPv4 address.") From 2ca96b7c8ec2134a7282bed13f1cc93358c13c45 Mon Sep 17 00:00:00 2001 From: jperezr <122382210+MRJPEREZR@users.noreply.github.com> Date: Mon, 30 Dec 2024 11:37:21 +0100 Subject: [PATCH 1491/1543] current_stock_price test added (#12390) * adding test to web_programming/current_stock_price * adding test to web_programming/current_stock_price * Update current_stock_price.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update current_stock_price.py --------- Co-authored-by: Maxim Smolskiy Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- web_programming/current_stock_price.py | 22 +++++++++++++++++++++- 1 file changed, 21 insertions(+), 1 deletion(-) diff --git a/web_programming/current_stock_price.py b/web_programming/current_stock_price.py index 9567c05b0558..d0a65e9aac84 100644 --- a/web_programming/current_stock_price.py +++ b/web_programming/current_stock_price.py @@ -1,14 +1,30 @@ import requests from bs4 import BeautifulSoup +""" +Get the HTML code of finance yahoo and select the current qsp-price +Current AAPL stock price is 228.43 +Current AMZN stock price is 201.85 +Current IBM stock price is 210.30 +Current GOOG stock price is 177.86 +Current MSFT stock price is 414.82 +Current ORCL stock price is 188.87 +""" + def stock_price(symbol: str = "AAPL") -> str: + """ + >>> stock_price("EEEE") + '-' + >>> isinstance(float(stock_price("GOOG")),float) + True + """ url = f"https://finance.yahoo.com/quote/{symbol}?p={symbol}" yahoo_finance_source = requests.get( url, headers={"USER-AGENT": "Mozilla/5.0"}, timeout=10 ).text soup = BeautifulSoup(yahoo_finance_source, "html.parser") - specific_fin_streamer_tag = soup.find("fin-streamer", {"data-test": "qsp-price"}) + specific_fin_streamer_tag = soup.find("fin-streamer", {"data-testid": "qsp-price"}) if specific_fin_streamer_tag: text = specific_fin_streamer_tag.get_text() @@ -18,5 +34,9 @@ def stock_price(symbol: str = "AAPL") -> str: # Search for the symbol at https://finance.yahoo.com/lookup if __name__ == "__main__": + from doctest import testmod + + testmod() + for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split(): print(f"Current {symbol:<4} stock price is {stock_price(symbol):>8}") From 24923ee635973a05f7713dd672fea07361fa0466 Mon Sep 17 00:00:00 2001 From: Shikhar Maheshwari <83123897+shikhar-sm@users.noreply.github.com> Date: Mon, 30 Dec 2024 16:21:10 +0530 Subject: [PATCH 1492/1543] Add doctest to maths/numerical_analysis/intersection.py (#12148) --- maths/numerical_analysis/intersection.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/maths/numerical_analysis/intersection.py b/maths/numerical_analysis/intersection.py index 826c0ead0a00..325abeaca996 100644 --- a/maths/numerical_analysis/intersection.py +++ b/maths/numerical_analysis/intersection.py @@ -42,6 +42,11 @@ def intersection(function: Callable[[float], float], x0: float, x1: float) -> fl def f(x: float) -> float: + """ + function is f(x) = x^3 - 2x - 5 + >>> f(2) + -1.0 + """ return math.pow(x, 3) - (2 * x) - 5 From da587d06ac88e338e7db8f10fa8ca2ae556e7bae Mon Sep 17 00:00:00 2001 From: Sankabapur <152031570+Parthjhalani07@users.noreply.github.com> Date: Mon, 30 Dec 2024 16:29:03 +0530 Subject: [PATCH 1493/1543] Added doctest to /maths/power_using_recursion.py (#11994) --- maths/power_using_recursion.py | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/maths/power_using_recursion.py b/maths/power_using_recursion.py index 29283ca0f67c..eb775b161ae8 100644 --- a/maths/power_using_recursion.py +++ b/maths/power_using_recursion.py @@ -38,6 +38,14 @@ def power(base: int, exponent: int) -> float: Traceback (most recent call last): ... RecursionError: maximum recursion depth exceeded + >>> power(0, 0) + 1 + >>> power(0, 1) + 0 + >>> power(5,6) + 15625 + >>> power(23, 12) + 21914624432020321 """ return base * power(base, (exponent - 1)) if exponent else 1 From 493a7c153c1ca1805e60c109842ee1a1ee63cde2 Mon Sep 17 00:00:00 2001 From: Jeel Rupapara Date: Mon, 30 Dec 2024 16:40:44 +0530 Subject: [PATCH 1494/1543] feat: add testcase of assemble_transformation (#11810) --- strings/min_cost_string_conversion.py | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/strings/min_cost_string_conversion.py b/strings/min_cost_string_conversion.py index 93791e2a7ed3..87eb5189e16a 100644 --- a/strings/min_cost_string_conversion.py +++ b/strings/min_cost_string_conversion.py @@ -91,6 +91,14 @@ def assemble_transformation(ops: list[list[str]], i: int, j: int) -> list[str]: >>> y1 = len(ops1[0]) - 1 >>> assemble_transformation(ops1, x1, y1) [] + + >>> ops2 = [['0', 'I1', 'I2', 'I3'], + ... ['D1', 'C1', 'I2', 'I3'], + ... ['D2', 'D2', 'R23', 'R23']] + >>> x2 = len(ops2) - 1 + >>> y2 = len(ops2[0]) - 1 + >>> assemble_transformation(ops2, x2, y2) + ['C1', 'I2', 'R23'] """ if i == 0 and j == 0: return [] From 7fa9b4bf1bc9822517bb0046aebc2e8b2997d3e1 Mon Sep 17 00:00:00 2001 From: Maxim Smolskiy Date: Mon, 30 Dec 2024 14:52:03 +0300 Subject: [PATCH 1495/1543] Fix sphinx/build_docs warnings for dynamic_programming (#12484) * Fix sphinx/build_docs warnings for dynamic_programming * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fix * Fix * Fix * Fix * Fix * Fix --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- dynamic_programming/all_construct.py | 7 +- dynamic_programming/combination_sum_iv.py | 23 +- dynamic_programming/fizz_buzz.py | 11 +- dynamic_programming/knapsack.py | 40 ++-- .../longest_common_substring.py | 14 +- .../longest_increasing_subsequence.py | 13 +- .../matrix_chain_multiplication.py | 89 ++++---- dynamic_programming/max_product_subarray.py | 3 +- .../minimum_squares_to_represent_a_number.py | 1 + dynamic_programming/regex_match.py | 22 +- dynamic_programming/rod_cutting.py | 83 ++++--- dynamic_programming/subset_generation.py | 63 +++--- dynamic_programming/viterbi.py | 212 ++++++++---------- 13 files changed, 295 insertions(+), 286 deletions(-) diff --git a/dynamic_programming/all_construct.py b/dynamic_programming/all_construct.py index 5d585fc7fcec..ca00f2beb06a 100644 --- a/dynamic_programming/all_construct.py +++ b/dynamic_programming/all_construct.py @@ -8,9 +8,10 @@ def all_construct(target: str, word_bank: list[str] | None = None) -> list[list[str]]: """ - returns the list containing all the possible - combinations a string(target) can be constructed from - the given list of substrings(word_bank) + returns the list containing all the possible + combinations a string(`target`) can be constructed from + the given list of substrings(`word_bank`) + >>> all_construct("hello", ["he", "l", "o"]) [['he', 'l', 'l', 'o']] >>> all_construct("purple",["purp","p","ur","le","purpl"]) diff --git a/dynamic_programming/combination_sum_iv.py b/dynamic_programming/combination_sum_iv.py index 113c06a27a9e..ed8dcd88e6fd 100644 --- a/dynamic_programming/combination_sum_iv.py +++ b/dynamic_programming/combination_sum_iv.py @@ -1,24 +1,25 @@ """ Question: -You are given an array of distinct integers and you have to tell how many -different ways of selecting the elements from the array are there such that -the sum of chosen elements is equal to the target number tar. + You are given an array of distinct integers and you have to tell how many + different ways of selecting the elements from the array are there such that + the sum of chosen elements is equal to the target number tar. Example Input: -N = 3 -target = 5 -array = [1, 2, 5] + * N = 3 + * target = 5 + * array = [1, 2, 5] Output: -9 + 9 Approach: -The basic idea is to go over recursively to find the way such that the sum -of chosen elements is “tar”. For every element, we have two choices - 1. Include the element in our set of chosen elements. - 2. Don't include the element in our set of chosen elements. + The basic idea is to go over recursively to find the way such that the sum + of chosen elements is `target`. For every element, we have two choices + + 1. Include the element in our set of chosen elements. + 2. Don't include the element in our set of chosen elements. """ diff --git a/dynamic_programming/fizz_buzz.py b/dynamic_programming/fizz_buzz.py index e29116437a93..0cb48897875b 100644 --- a/dynamic_programming/fizz_buzz.py +++ b/dynamic_programming/fizz_buzz.py @@ -3,11 +3,12 @@ def fizz_buzz(number: int, iterations: int) -> str: """ - Plays FizzBuzz. - Prints Fizz if number is a multiple of 3. - Prints Buzz if its a multiple of 5. - Prints FizzBuzz if its a multiple of both 3 and 5 or 15. - Else Prints The Number Itself. + | Plays FizzBuzz. + | Prints Fizz if number is a multiple of ``3``. + | Prints Buzz if its a multiple of ``5``. + | Prints FizzBuzz if its a multiple of both ``3`` and ``5`` or ``15``. + | Else Prints The Number Itself. + >>> fizz_buzz(1,7) '1 2 Fizz 4 Buzz Fizz 7 ' >>> fizz_buzz(1,0) diff --git a/dynamic_programming/knapsack.py b/dynamic_programming/knapsack.py index 489b5ada450a..28c5b19dbe36 100644 --- a/dynamic_programming/knapsack.py +++ b/dynamic_programming/knapsack.py @@ -11,7 +11,7 @@ def mf_knapsack(i, wt, val, j): """ This code involves the concept of memory functions. Here we solve the subproblems which are needed unlike the below example - F is a 2D array with -1s filled up + F is a 2D array with ``-1`` s filled up """ global f # a global dp table for knapsack if f[i][j] < 0: @@ -45,22 +45,24 @@ def knapsack_with_example_solution(w: int, wt: list, val: list): the several possible optimal subsets. Parameters - --------- + ---------- - W: int, the total maximum weight for the given knapsack problem. - wt: list, the vector of weights for all items where wt[i] is the weight - of the i-th item. - val: list, the vector of values for all items where val[i] is the value - of the i-th item + * `w`: int, the total maximum weight for the given knapsack problem. + * `wt`: list, the vector of weights for all items where ``wt[i]`` is the weight + of the ``i``-th item. + * `val`: list, the vector of values for all items where ``val[i]`` is the value + of the ``i``-th item Returns ------- - optimal_val: float, the optimal value for the given knapsack problem - example_optional_set: set, the indices of one of the optimal subsets - which gave rise to the optimal value. + + * `optimal_val`: float, the optimal value for the given knapsack problem + * `example_optional_set`: set, the indices of one of the optimal subsets + which gave rise to the optimal value. Examples - ------- + -------- + >>> knapsack_with_example_solution(10, [1, 3, 5, 2], [10, 20, 100, 22]) (142, {2, 3, 4}) >>> knapsack_with_example_solution(6, [4, 3, 2, 3], [3, 2, 4, 4]) @@ -104,19 +106,19 @@ def _construct_solution(dp: list, wt: list, i: int, j: int, optimal_set: set): a filled DP table and the vector of weights Parameters - --------- - - dp: list of list, the table of a solved integer weight dynamic programming problem + ---------- - wt: list or tuple, the vector of weights of the items - i: int, the index of the item under consideration - j: int, the current possible maximum weight - optimal_set: set, the optimal subset so far. This gets modified by the function. + * `dp`: list of list, the table of a solved integer weight dynamic programming + problem + * `wt`: list or tuple, the vector of weights of the items + * `i`: int, the index of the item under consideration + * `j`: int, the current possible maximum weight + * `optimal_set`: set, the optimal subset so far. This gets modified by the function. Returns ------- - None + ``None`` """ # for the current item i at a maximum weight j to be part of an optimal subset, # the optimal value at (i, j) must be greater than the optimal value at (i-1, j). diff --git a/dynamic_programming/longest_common_substring.py b/dynamic_programming/longest_common_substring.py index e2f944a5e336..ea5233eb2d17 100644 --- a/dynamic_programming/longest_common_substring.py +++ b/dynamic_programming/longest_common_substring.py @@ -1,15 +1,19 @@ """ -Longest Common Substring Problem Statement: Given two sequences, find the -longest common substring present in both of them. A substring is -necessarily continuous. -Example: "abcdef" and "xabded" have two longest common substrings, "ab" or "de". -Therefore, algorithm should return any one of them. +Longest Common Substring Problem Statement: + Given two sequences, find the + longest common substring present in both of them. A substring is + necessarily continuous. + +Example: + ``abcdef`` and ``xabded`` have two longest common substrings, ``ab`` or ``de``. + Therefore, algorithm should return any one of them. """ def longest_common_substring(text1: str, text2: str) -> str: """ Finds the longest common substring between two strings. + >>> longest_common_substring("", "") '' >>> longest_common_substring("a","") diff --git a/dynamic_programming/longest_increasing_subsequence.py b/dynamic_programming/longest_increasing_subsequence.py index 2a78e2e7ad1d..d839757f6da5 100644 --- a/dynamic_programming/longest_increasing_subsequence.py +++ b/dynamic_programming/longest_increasing_subsequence.py @@ -4,11 +4,13 @@ This is a pure Python implementation of Dynamic Programming solution to the longest increasing subsequence of a given sequence. -The problem is : -Given an array, to find the longest and increasing sub-array in that given array and -return it. -Example: [10, 22, 9, 33, 21, 50, 41, 60, 80] as input will return - [10, 22, 33, 41, 60, 80] as output +The problem is: + Given an array, to find the longest and increasing sub-array in that given array and + return it. + +Example: + ``[10, 22, 9, 33, 21, 50, 41, 60, 80]`` as input will return + ``[10, 22, 33, 41, 60, 80]`` as output """ from __future__ import annotations @@ -17,6 +19,7 @@ def longest_subsequence(array: list[int]) -> list[int]: # This function is recursive """ Some examples + >>> longest_subsequence([10, 22, 9, 33, 21, 50, 41, 60, 80]) [10, 22, 33, 41, 60, 80] >>> longest_subsequence([4, 8, 7, 5, 1, 12, 2, 3, 9]) diff --git a/dynamic_programming/matrix_chain_multiplication.py b/dynamic_programming/matrix_chain_multiplication.py index da6e525ce816..10e136b9f0db 100644 --- a/dynamic_programming/matrix_chain_multiplication.py +++ b/dynamic_programming/matrix_chain_multiplication.py @@ -1,42 +1,48 @@ """ -Find the minimum number of multiplications needed to multiply chain of matrices. -Reference: https://www.geeksforgeeks.org/matrix-chain-multiplication-dp-8/ +| Find the minimum number of multiplications needed to multiply chain of matrices. +| Reference: https://www.geeksforgeeks.org/matrix-chain-multiplication-dp-8/ -The algorithm has interesting real-world applications. Example: -1. Image transformations in Computer Graphics as images are composed of matrix. -2. Solve complex polynomial equations in the field of algebra using least processing - power. -3. Calculate overall impact of macroeconomic decisions as economic equations involve a - number of variables. -4. Self-driving car navigation can be made more accurate as matrix multiplication can - accurately determine position and orientation of obstacles in short time. +The algorithm has interesting real-world applications. -Python doctests can be run with the following command: -python -m doctest -v matrix_chain_multiply.py +Example: + 1. Image transformations in Computer Graphics as images are composed of matrix. + 2. Solve complex polynomial equations in the field of algebra using least processing + power. + 3. Calculate overall impact of macroeconomic decisions as economic equations involve a + number of variables. + 4. Self-driving car navigation can be made more accurate as matrix multiplication can + accurately determine position and orientation of obstacles in short time. -Given a sequence arr[] that represents chain of 2D matrices such that the dimension of -the ith matrix is arr[i-1]*arr[i]. -So suppose arr = [40, 20, 30, 10, 30] means we have 4 matrices of dimensions -40*20, 20*30, 30*10 and 10*30. +Python doctests can be run with the following command:: -matrix_chain_multiply() returns an integer denoting minimum number of multiplications to -multiply the chain. + python -m doctest -v matrix_chain_multiply.py + +Given a sequence ``arr[]`` that represents chain of 2D matrices such that the dimension +of the ``i`` th matrix is ``arr[i-1]*arr[i]``. +So suppose ``arr = [40, 20, 30, 10, 30]`` means we have ``4`` matrices of dimensions +``40*20``, ``20*30``, ``30*10`` and ``10*30``. + +``matrix_chain_multiply()`` returns an integer denoting minimum number of +multiplications to multiply the chain. We do not need to perform actual multiplication here. We only need to decide the order in which to perform the multiplication. Hints: -1. Number of multiplications (ie cost) to multiply 2 matrices -of size m*p and p*n is m*p*n. -2. Cost of matrix multiplication is associative ie (M1*M2)*M3 != M1*(M2*M3) -3. Matrix multiplication is not commutative. So, M1*M2 does not mean M2*M1 can be done. -4. To determine the required order, we can try different combinations. + 1. Number of multiplications (ie cost) to multiply ``2`` matrices + of size ``m*p`` and ``p*n`` is ``m*p*n``. + 2. Cost of matrix multiplication is not associative ie ``(M1*M2)*M3 != M1*(M2*M3)`` + 3. Matrix multiplication is not commutative. So, ``M1*M2`` does not mean ``M2*M1`` + can be done. + 4. To determine the required order, we can try different combinations. + So, this problem has overlapping sub-problems and can be solved using recursion. We use Dynamic Programming for optimal time complexity. Example input: -arr = [40, 20, 30, 10, 30] -output: 26000 + ``arr = [40, 20, 30, 10, 30]`` +output: + ``26000`` """ from collections.abc import Iterator @@ -50,25 +56,25 @@ def matrix_chain_multiply(arr: list[int]) -> int: Find the minimum number of multiplcations required to multiply the chain of matrices Args: - arr: The input array of integers. + `arr`: The input array of integers. Returns: Minimum number of multiplications needed to multiply the chain Examples: - >>> matrix_chain_multiply([1, 2, 3, 4, 3]) - 30 - >>> matrix_chain_multiply([10]) - 0 - >>> matrix_chain_multiply([10, 20]) - 0 - >>> matrix_chain_multiply([19, 2, 19]) - 722 - >>> matrix_chain_multiply(list(range(1, 100))) - 323398 - - # >>> matrix_chain_multiply(list(range(1, 251))) - # 2626798 + + >>> matrix_chain_multiply([1, 2, 3, 4, 3]) + 30 + >>> matrix_chain_multiply([10]) + 0 + >>> matrix_chain_multiply([10, 20]) + 0 + >>> matrix_chain_multiply([19, 2, 19]) + 722 + >>> matrix_chain_multiply(list(range(1, 100))) + 323398 + >>> # matrix_chain_multiply(list(range(1, 251))) + # 2626798 """ if len(arr) < 2: return 0 @@ -93,8 +99,10 @@ def matrix_chain_multiply(arr: list[int]) -> int: def matrix_chain_order(dims: list[int]) -> int: """ Source: https://en.wikipedia.org/wiki/Matrix_chain_multiplication + The dynamic programming solution is faster than cached the recursive solution and can handle larger inputs. + >>> matrix_chain_order([1, 2, 3, 4, 3]) 30 >>> matrix_chain_order([10]) @@ -105,8 +113,7 @@ def matrix_chain_order(dims: list[int]) -> int: 722 >>> matrix_chain_order(list(range(1, 100))) 323398 - - # >>> matrix_chain_order(list(range(1, 251))) # Max before RecursionError is raised + >>> # matrix_chain_order(list(range(1, 251))) # Max before RecursionError is raised # 2626798 """ diff --git a/dynamic_programming/max_product_subarray.py b/dynamic_programming/max_product_subarray.py index 425859bc03e3..6f4f38e38942 100644 --- a/dynamic_programming/max_product_subarray.py +++ b/dynamic_programming/max_product_subarray.py @@ -1,9 +1,10 @@ def max_product_subarray(numbers: list[int]) -> int: """ Returns the maximum product that can be obtained by multiplying a - contiguous subarray of the given integer list `nums`. + contiguous subarray of the given integer list `numbers`. Example: + >>> max_product_subarray([2, 3, -2, 4]) 6 >>> max_product_subarray((-2, 0, -1)) diff --git a/dynamic_programming/minimum_squares_to_represent_a_number.py b/dynamic_programming/minimum_squares_to_represent_a_number.py index bf5849f5bcb3..98c0602fa831 100644 --- a/dynamic_programming/minimum_squares_to_represent_a_number.py +++ b/dynamic_programming/minimum_squares_to_represent_a_number.py @@ -5,6 +5,7 @@ def minimum_squares_to_represent_a_number(number: int) -> int: """ Count the number of minimum squares to represent a number + >>> minimum_squares_to_represent_a_number(25) 1 >>> minimum_squares_to_represent_a_number(37) diff --git a/dynamic_programming/regex_match.py b/dynamic_programming/regex_match.py index 200a882831c0..e94d82093c8b 100644 --- a/dynamic_programming/regex_match.py +++ b/dynamic_programming/regex_match.py @@ -1,23 +1,25 @@ """ Regex matching check if a text matches pattern or not. Pattern: - '.' Matches any single character. - '*' Matches zero or more of the preceding element. + + 1. ``.`` Matches any single character. + 2. ``*`` Matches zero or more of the preceding element. + More info: https://medium.com/trick-the-interviwer/regular-expression-matching-9972eb74c03 """ def recursive_match(text: str, pattern: str) -> bool: - """ + r""" Recursive matching algorithm. - Time complexity: O(2 ^ (|text| + |pattern|)) - Space complexity: Recursion depth is O(|text| + |pattern|). + | Time complexity: O(2^(\|text\| + \|pattern\|)) + | Space complexity: Recursion depth is O(\|text\| + \|pattern\|). :param text: Text to match. :param pattern: Pattern to match. - :return: True if text matches pattern, False otherwise. + :return: ``True`` if `text` matches `pattern`, ``False`` otherwise. >>> recursive_match('abc', 'a.c') True @@ -48,15 +50,15 @@ def recursive_match(text: str, pattern: str) -> bool: def dp_match(text: str, pattern: str) -> bool: - """ + r""" Dynamic programming matching algorithm. - Time complexity: O(|text| * |pattern|) - Space complexity: O(|text| * |pattern|) + | Time complexity: O(\|text\| * \|pattern\|) + | Space complexity: O(\|text\| * \|pattern\|) :param text: Text to match. :param pattern: Pattern to match. - :return: True if text matches pattern, False otherwise. + :return: ``True`` if `text` matches `pattern`, ``False`` otherwise. >>> dp_match('abc', 'a.c') True diff --git a/dynamic_programming/rod_cutting.py b/dynamic_programming/rod_cutting.py index f80fa440ae86..d12c759dc928 100644 --- a/dynamic_programming/rod_cutting.py +++ b/dynamic_programming/rod_cutting.py @@ -1,7 +1,7 @@ """ This module provides two implementations for the rod-cutting problem: -1. A naive recursive implementation which has an exponential runtime -2. Two dynamic programming implementations which have quadratic runtime + 1. A naive recursive implementation which has an exponential runtime + 2. Two dynamic programming implementations which have quadratic runtime The rod-cutting problem is the problem of finding the maximum possible revenue obtainable from a rod of length ``n`` given a list of prices for each integral piece @@ -20,18 +20,21 @@ def naive_cut_rod_recursive(n: int, prices: list): Runtime: O(2^n) Arguments - ------- - n: int, the length of the rod - prices: list, the prices for each piece of rod. ``p[i-i]`` is the - price for a rod of length ``i`` + --------- + + * `n`: int, the length of the rod + * `prices`: list, the prices for each piece of rod. ``p[i-i]`` is the + price for a rod of length ``i`` Returns ------- - The maximum revenue obtainable for a rod of length n given the list of prices + + The maximum revenue obtainable for a rod of length `n` given the list of prices for each piece. Examples -------- + >>> naive_cut_rod_recursive(4, [1, 5, 8, 9]) 10 >>> naive_cut_rod_recursive(10, [1, 5, 8, 9, 10, 17, 17, 20, 24, 30]) @@ -54,28 +57,30 @@ def top_down_cut_rod(n: int, prices: list): """ Constructs a top-down dynamic programming solution for the rod-cutting problem via memoization. This function serves as a wrapper for - _top_down_cut_rod_recursive + ``_top_down_cut_rod_recursive`` Runtime: O(n^2) Arguments - -------- - n: int, the length of the rod - prices: list, the prices for each piece of rod. ``p[i-i]`` is the - price for a rod of length ``i`` + --------- - Note - ---- - For convenience and because Python's lists using 0-indexing, length(max_rev) = - n + 1, to accommodate for the revenue obtainable from a rod of length 0. + * `n`: int, the length of the rod + * `prices`: list, the prices for each piece of rod. ``p[i-i]`` is the + price for a rod of length ``i`` + + .. note:: + For convenience and because Python's lists using ``0``-indexing, ``length(max_rev) + = n + 1``, to accommodate for the revenue obtainable from a rod of length ``0``. Returns ------- - The maximum revenue obtainable for a rod of length n given the list of prices + + The maximum revenue obtainable for a rod of length `n` given the list of prices for each piece. Examples - ------- + -------- + >>> top_down_cut_rod(4, [1, 5, 8, 9]) 10 >>> top_down_cut_rod(10, [1, 5, 8, 9, 10, 17, 17, 20, 24, 30]) @@ -94,16 +99,18 @@ def _top_down_cut_rod_recursive(n: int, prices: list, max_rev: list): Runtime: O(n^2) Arguments - -------- - n: int, the length of the rod - prices: list, the prices for each piece of rod. ``p[i-i]`` is the - price for a rod of length ``i`` - max_rev: list, the computed maximum revenue for a piece of rod. - ``max_rev[i]`` is the maximum revenue obtainable for a rod of length ``i`` + --------- + + * `n`: int, the length of the rod + * `prices`: list, the prices for each piece of rod. ``p[i-i]`` is the + price for a rod of length ``i`` + * `max_rev`: list, the computed maximum revenue for a piece of rod. + ``max_rev[i]`` is the maximum revenue obtainable for a rod of length ``i`` Returns ------- - The maximum revenue obtainable for a rod of length n given the list of prices + + The maximum revenue obtainable for a rod of length `n` given the list of prices for each piece. """ if max_rev[n] >= 0: @@ -130,18 +137,21 @@ def bottom_up_cut_rod(n: int, prices: list): Runtime: O(n^2) Arguments - ---------- - n: int, the maximum length of the rod. - prices: list, the prices for each piece of rod. ``p[i-i]`` is the - price for a rod of length ``i`` + --------- + + * `n`: int, the maximum length of the rod. + * `prices`: list, the prices for each piece of rod. ``p[i-i]`` is the + price for a rod of length ``i`` Returns ------- - The maximum revenue obtainable from cutting a rod of length n given + + The maximum revenue obtainable from cutting a rod of length `n` given the prices for each piece of rod p. Examples - ------- + -------- + >>> bottom_up_cut_rod(4, [1, 5, 8, 9]) 10 >>> bottom_up_cut_rod(10, [1, 5, 8, 9, 10, 17, 17, 20, 24, 30]) @@ -168,13 +178,12 @@ def _enforce_args(n: int, prices: list): """ Basic checks on the arguments to the rod-cutting algorithms - n: int, the length of the rod - prices: list, the price list for each piece of rod. - - Throws ValueError: + * `n`: int, the length of the rod + * `prices`: list, the price list for each piece of rod. - if n is negative or there are fewer items in the price list than the length of - the rod + Throws ``ValueError``: + if `n` is negative or there are fewer items in the price list than the length of + the rod """ if n < 0: msg = f"n must be greater than or equal to 0. Got n = {n}" diff --git a/dynamic_programming/subset_generation.py b/dynamic_programming/subset_generation.py index d490bca737ba..08daaac6f88a 100644 --- a/dynamic_programming/subset_generation.py +++ b/dynamic_programming/subset_generation.py @@ -1,38 +1,41 @@ def subset_combinations(elements: list[int], n: int) -> list: """ Compute n-element combinations from a given list using dynamic programming. + Args: - elements: The list of elements from which combinations will be generated. - n: The number of elements in each combination. + * `elements`: The list of elements from which combinations will be generated. + * `n`: The number of elements in each combination. + Returns: - A list of tuples, each representing a combination of n elements. - >>> subset_combinations(elements=[10, 20, 30, 40], n=2) - [(10, 20), (10, 30), (10, 40), (20, 30), (20, 40), (30, 40)] - >>> subset_combinations(elements=[1, 2, 3], n=1) - [(1,), (2,), (3,)] - >>> subset_combinations(elements=[1, 2, 3], n=3) - [(1, 2, 3)] - >>> subset_combinations(elements=[42], n=1) - [(42,)] - >>> subset_combinations(elements=[6, 7, 8, 9], n=4) - [(6, 7, 8, 9)] - >>> subset_combinations(elements=[10, 20, 30, 40, 50], n=0) - [()] - >>> subset_combinations(elements=[1, 2, 3, 4], n=2) - [(1, 2), (1, 3), (1, 4), (2, 3), (2, 4), (3, 4)] - >>> subset_combinations(elements=[1, 'apple', 3.14], n=2) - [(1, 'apple'), (1, 3.14), ('apple', 3.14)] - >>> subset_combinations(elements=['single'], n=0) - [()] - >>> subset_combinations(elements=[], n=9) - [] - >>> from itertools import combinations - >>> all(subset_combinations(items, n) == list(combinations(items, n)) - ... for items, n in ( - ... ([10, 20, 30, 40], 2), ([1, 2, 3], 1), ([1, 2, 3], 3), ([42], 1), - ... ([6, 7, 8, 9], 4), ([10, 20, 30, 40, 50], 1), ([1, 2, 3, 4], 2), - ... ([1, 'apple', 3.14], 2), (['single'], 0), ([], 9))) - True + A list of tuples, each representing a combination of `n` elements. + + >>> subset_combinations(elements=[10, 20, 30, 40], n=2) + [(10, 20), (10, 30), (10, 40), (20, 30), (20, 40), (30, 40)] + >>> subset_combinations(elements=[1, 2, 3], n=1) + [(1,), (2,), (3,)] + >>> subset_combinations(elements=[1, 2, 3], n=3) + [(1, 2, 3)] + >>> subset_combinations(elements=[42], n=1) + [(42,)] + >>> subset_combinations(elements=[6, 7, 8, 9], n=4) + [(6, 7, 8, 9)] + >>> subset_combinations(elements=[10, 20, 30, 40, 50], n=0) + [()] + >>> subset_combinations(elements=[1, 2, 3, 4], n=2) + [(1, 2), (1, 3), (1, 4), (2, 3), (2, 4), (3, 4)] + >>> subset_combinations(elements=[1, 'apple', 3.14], n=2) + [(1, 'apple'), (1, 3.14), ('apple', 3.14)] + >>> subset_combinations(elements=['single'], n=0) + [()] + >>> subset_combinations(elements=[], n=9) + [] + >>> from itertools import combinations + >>> all(subset_combinations(items, n) == list(combinations(items, n)) + ... for items, n in ( + ... ([10, 20, 30, 40], 2), ([1, 2, 3], 1), ([1, 2, 3], 3), ([42], 1), + ... ([6, 7, 8, 9], 4), ([10, 20, 30, 40, 50], 1), ([1, 2, 3, 4], 2), + ... ([1, 'apple', 3.14], 2), (['single'], 0), ([], 9))) + True """ r = len(elements) if n > r: diff --git a/dynamic_programming/viterbi.py b/dynamic_programming/viterbi.py index 764d45dc2c05..5b78fa9e46d0 100644 --- a/dynamic_programming/viterbi.py +++ b/dynamic_programming/viterbi.py @@ -9,119 +9,102 @@ def viterbi( emission_probabilities: dict, ) -> list: """ - Viterbi Algorithm, to find the most likely path of - states from the start and the expected output. - https://en.wikipedia.org/wiki/Viterbi_algorithm - sdafads - Wikipedia example - >>> observations = ["normal", "cold", "dizzy"] - >>> states = ["Healthy", "Fever"] - >>> start_p = {"Healthy": 0.6, "Fever": 0.4} - >>> trans_p = { - ... "Healthy": {"Healthy": 0.7, "Fever": 0.3}, - ... "Fever": {"Healthy": 0.4, "Fever": 0.6}, - ... } - >>> emit_p = { - ... "Healthy": {"normal": 0.5, "cold": 0.4, "dizzy": 0.1}, - ... "Fever": {"normal": 0.1, "cold": 0.3, "dizzy": 0.6}, - ... } - >>> viterbi(observations, states, start_p, trans_p, emit_p) - ['Healthy', 'Healthy', 'Fever'] + Viterbi Algorithm, to find the most likely path of + states from the start and the expected output. - >>> viterbi((), states, start_p, trans_p, emit_p) - Traceback (most recent call last): - ... - ValueError: There's an empty parameter - - >>> viterbi(observations, (), start_p, trans_p, emit_p) - Traceback (most recent call last): - ... - ValueError: There's an empty parameter - - >>> viterbi(observations, states, {}, trans_p, emit_p) - Traceback (most recent call last): - ... - ValueError: There's an empty parameter - - >>> viterbi(observations, states, start_p, {}, emit_p) - Traceback (most recent call last): - ... - ValueError: There's an empty parameter - - >>> viterbi(observations, states, start_p, trans_p, {}) - Traceback (most recent call last): - ... - ValueError: There's an empty parameter - - >>> viterbi("invalid", states, start_p, trans_p, emit_p) - Traceback (most recent call last): - ... - ValueError: observations_space must be a list + https://en.wikipedia.org/wiki/Viterbi_algorithm - >>> viterbi(["valid", 123], states, start_p, trans_p, emit_p) - Traceback (most recent call last): - ... - ValueError: observations_space must be a list of strings - - >>> viterbi(observations, "invalid", start_p, trans_p, emit_p) - Traceback (most recent call last): - ... - ValueError: states_space must be a list - - >>> viterbi(observations, ["valid", 123], start_p, trans_p, emit_p) - Traceback (most recent call last): - ... - ValueError: states_space must be a list of strings - - >>> viterbi(observations, states, "invalid", trans_p, emit_p) - Traceback (most recent call last): - ... - ValueError: initial_probabilities must be a dict - - >>> viterbi(observations, states, {2:2}, trans_p, emit_p) - Traceback (most recent call last): - ... - ValueError: initial_probabilities all keys must be strings - - >>> viterbi(observations, states, {"a":2}, trans_p, emit_p) - Traceback (most recent call last): - ... - ValueError: initial_probabilities all values must be float + Wikipedia example - >>> viterbi(observations, states, start_p, "invalid", emit_p) - Traceback (most recent call last): - ... - ValueError: transition_probabilities must be a dict - - >>> viterbi(observations, states, start_p, {"a":2}, emit_p) - Traceback (most recent call last): - ... - ValueError: transition_probabilities all values must be dict - - >>> viterbi(observations, states, start_p, {2:{2:2}}, emit_p) - Traceback (most recent call last): - ... - ValueError: transition_probabilities all keys must be strings - - >>> viterbi(observations, states, start_p, {"a":{2:2}}, emit_p) - Traceback (most recent call last): - ... - ValueError: transition_probabilities all keys must be strings - - >>> viterbi(observations, states, start_p, {"a":{"b":2}}, emit_p) - Traceback (most recent call last): - ... - ValueError: transition_probabilities nested dictionary all values must be float - - >>> viterbi(observations, states, start_p, trans_p, "invalid") - Traceback (most recent call last): - ... - ValueError: emission_probabilities must be a dict - - >>> viterbi(observations, states, start_p, trans_p, None) - Traceback (most recent call last): - ... - ValueError: There's an empty parameter + >>> observations = ["normal", "cold", "dizzy"] + >>> states = ["Healthy", "Fever"] + >>> start_p = {"Healthy": 0.6, "Fever": 0.4} + >>> trans_p = { + ... "Healthy": {"Healthy": 0.7, "Fever": 0.3}, + ... "Fever": {"Healthy": 0.4, "Fever": 0.6}, + ... } + >>> emit_p = { + ... "Healthy": {"normal": 0.5, "cold": 0.4, "dizzy": 0.1}, + ... "Fever": {"normal": 0.1, "cold": 0.3, "dizzy": 0.6}, + ... } + >>> viterbi(observations, states, start_p, trans_p, emit_p) + ['Healthy', 'Healthy', 'Fever'] + >>> viterbi((), states, start_p, trans_p, emit_p) + Traceback (most recent call last): + ... + ValueError: There's an empty parameter + >>> viterbi(observations, (), start_p, trans_p, emit_p) + Traceback (most recent call last): + ... + ValueError: There's an empty parameter + >>> viterbi(observations, states, {}, trans_p, emit_p) + Traceback (most recent call last): + ... + ValueError: There's an empty parameter + >>> viterbi(observations, states, start_p, {}, emit_p) + Traceback (most recent call last): + ... + ValueError: There's an empty parameter + >>> viterbi(observations, states, start_p, trans_p, {}) + Traceback (most recent call last): + ... + ValueError: There's an empty parameter + >>> viterbi("invalid", states, start_p, trans_p, emit_p) + Traceback (most recent call last): + ... + ValueError: observations_space must be a list + >>> viterbi(["valid", 123], states, start_p, trans_p, emit_p) + Traceback (most recent call last): + ... + ValueError: observations_space must be a list of strings + >>> viterbi(observations, "invalid", start_p, trans_p, emit_p) + Traceback (most recent call last): + ... + ValueError: states_space must be a list + >>> viterbi(observations, ["valid", 123], start_p, trans_p, emit_p) + Traceback (most recent call last): + ... + ValueError: states_space must be a list of strings + >>> viterbi(observations, states, "invalid", trans_p, emit_p) + Traceback (most recent call last): + ... + ValueError: initial_probabilities must be a dict + >>> viterbi(observations, states, {2:2}, trans_p, emit_p) + Traceback (most recent call last): + ... + ValueError: initial_probabilities all keys must be strings + >>> viterbi(observations, states, {"a":2}, trans_p, emit_p) + Traceback (most recent call last): + ... + ValueError: initial_probabilities all values must be float + >>> viterbi(observations, states, start_p, "invalid", emit_p) + Traceback (most recent call last): + ... + ValueError: transition_probabilities must be a dict + >>> viterbi(observations, states, start_p, {"a":2}, emit_p) + Traceback (most recent call last): + ... + ValueError: transition_probabilities all values must be dict + >>> viterbi(observations, states, start_p, {2:{2:2}}, emit_p) + Traceback (most recent call last): + ... + ValueError: transition_probabilities all keys must be strings + >>> viterbi(observations, states, start_p, {"a":{2:2}}, emit_p) + Traceback (most recent call last): + ... + ValueError: transition_probabilities all keys must be strings + >>> viterbi(observations, states, start_p, {"a":{"b":2}}, emit_p) + Traceback (most recent call last): + ... + ValueError: transition_probabilities nested dictionary all values must be float + >>> viterbi(observations, states, start_p, trans_p, "invalid") + Traceback (most recent call last): + ... + ValueError: emission_probabilities must be a dict + >>> viterbi(observations, states, start_p, trans_p, None) + Traceback (most recent call last): + ... + ValueError: There's an empty parameter """ _validation( @@ -213,7 +196,6 @@ def _validation( ... "Fever": {"normal": 0.1, "cold": 0.3, "dizzy": 0.6}, ... } >>> _validation(observations, states, start_p, trans_p, emit_p) - >>> _validation([], states, start_p, trans_p, emit_p) Traceback (most recent call last): ... @@ -242,7 +224,6 @@ def _validate_not_empty( """ >>> _validate_not_empty(["a"], ["b"], {"c":0.5}, ... {"d": {"e": 0.6}}, {"f": {"g": 0.7}}) - >>> _validate_not_empty(["a"], ["b"], {"c":0.5}, {}, {"f": {"g": 0.7}}) Traceback (most recent call last): ... @@ -267,12 +248,10 @@ def _validate_not_empty( def _validate_lists(observations_space: Any, states_space: Any) -> None: """ >>> _validate_lists(["a"], ["b"]) - >>> _validate_lists(1234, ["b"]) Traceback (most recent call last): ... ValueError: observations_space must be a list - >>> _validate_lists(["a"], [3]) Traceback (most recent call last): ... @@ -285,7 +264,6 @@ def _validate_lists(observations_space: Any, states_space: Any) -> None: def _validate_list(_object: Any, var_name: str) -> None: """ >>> _validate_list(["a"], "mock_name") - >>> _validate_list("a", "mock_name") Traceback (most recent call last): ... @@ -294,7 +272,6 @@ def _validate_list(_object: Any, var_name: str) -> None: Traceback (most recent call last): ... ValueError: mock_name must be a list of strings - """ if not isinstance(_object, list): msg = f"{var_name} must be a list" @@ -313,7 +290,6 @@ def _validate_dicts( ) -> None: """ >>> _validate_dicts({"c":0.5}, {"d": {"e": 0.6}}, {"f": {"g": 0.7}}) - >>> _validate_dicts("invalid", {"d": {"e": 0.6}}, {"f": {"g": 0.7}}) Traceback (most recent call last): ... @@ -339,7 +315,6 @@ def _validate_dicts( def _validate_nested_dict(_object: Any, var_name: str) -> None: """ >>> _validate_nested_dict({"a":{"b": 0.5}}, "mock_name") - >>> _validate_nested_dict("invalid", "mock_name") Traceback (most recent call last): ... @@ -367,7 +342,6 @@ def _validate_dict( ) -> None: """ >>> _validate_dict({"b": 0.5}, "mock_name", float) - >>> _validate_dict("invalid", "mock_name", float) Traceback (most recent call last): ... From a2be5adf673c32720bbfb649c368af8fd7ae9dbb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Julia=20Arag=C3=A3o?= <101305675+juliaaragao@users.noreply.github.com> Date: Mon, 30 Dec 2024 13:36:55 +0100 Subject: [PATCH 1496/1543] Tests electronics/electric_conductivity.py #9943 (#12437) * Function conversion rectangular number to polar * #9943 : adding test to elelectronics/electric_conductivity.py * updating DIRECTORY.md * Apply suggestions from code review * updating DIRECTORY.md * Rename rec_to_pol.py to rectangular_to_polar.py * updating DIRECTORY.md * Update conversions/rectangular_to_polar.py * Update conversions/rectangular_to_polar.py --------- Co-authored-by: Julia Co-authored-by: juliaaragao Co-authored-by: Christian Clauss Co-authored-by: cclauss --- DIRECTORY.md | 1 + conversions/rectangular_to_polar.py | 32 ++++++++++++++++++++++++++++ electronics/electric_conductivity.py | 20 +++++++++++++++++ 3 files changed, 53 insertions(+) create mode 100644 conversions/rectangular_to_polar.py diff --git a/DIRECTORY.md b/DIRECTORY.md index d234d366df06..44d0414a37c8 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -169,6 +169,7 @@ * [Prefix Conversions](conversions/prefix_conversions.py) * [Prefix Conversions String](conversions/prefix_conversions_string.py) * [Pressure Conversions](conversions/pressure_conversions.py) + * [Rectangular To Polar](conversions/rectangular_to_polar.py) * [Rgb Cmyk Conversion](conversions/rgb_cmyk_conversion.py) * [Rgb Hsv Conversion](conversions/rgb_hsv_conversion.py) * [Roman Numerals](conversions/roman_numerals.py) diff --git a/conversions/rectangular_to_polar.py b/conversions/rectangular_to_polar.py new file mode 100644 index 000000000000..bed97d7410ec --- /dev/null +++ b/conversions/rectangular_to_polar.py @@ -0,0 +1,32 @@ +import math + + +def rectangular_to_polar(real: float, img: float) -> tuple[float, float]: + """ + https://en.wikipedia.org/wiki/Polar_coordinate_system + + >>> rectangular_to_polar(5,-5) + (7.07, -45.0) + >>> rectangular_to_polar(-1,1) + (1.41, 135.0) + >>> rectangular_to_polar(-1,-1) + (1.41, -135.0) + >>> rectangular_to_polar(1e-10,1e-10) + (0.0, 45.0) + >>> rectangular_to_polar(-1e-10,1e-10) + (0.0, 135.0) + >>> rectangular_to_polar(9.75,5.93) + (11.41, 31.31) + >>> rectangular_to_polar(10000,99999) + (100497.76, 84.29) + """ + + mod = round(math.sqrt((real**2) + (img**2)), 2) + ang = round(math.degrees(math.atan2(img, real)), 2) + return (mod, ang) + + +if __name__ == "__main__": + import doctest + + doctest.testmod() diff --git a/electronics/electric_conductivity.py b/electronics/electric_conductivity.py index 11f2a607d214..65bb6c5ceaf0 100644 --- a/electronics/electric_conductivity.py +++ b/electronics/electric_conductivity.py @@ -21,6 +21,26 @@ def electric_conductivity( ('conductivity', 5.12672e-14) >>> electric_conductivity(conductivity=1000, electron_conc=0, mobility=1200) ('electron_conc', 5.201506356240767e+18) + >>> electric_conductivity(conductivity=-10, electron_conc=100, mobility=0) + Traceback (most recent call last): + ... + ValueError: Conductivity cannot be negative + >>> electric_conductivity(conductivity=50, electron_conc=-10, mobility=0) + Traceback (most recent call last): + ... + ValueError: Electron concentration cannot be negative + >>> electric_conductivity(conductivity=50, electron_conc=0, mobility=-10) + Traceback (most recent call last): + ... + ValueError: mobility cannot be negative + >>> electric_conductivity(conductivity=50, electron_conc=0, mobility=0) + Traceback (most recent call last): + ... + ValueError: You cannot supply more or less than 2 values + >>> electric_conductivity(conductivity=50, electron_conc=200, mobility=300) + Traceback (most recent call last): + ... + ValueError: You cannot supply more or less than 2 values """ if (conductivity, electron_conc, mobility).count(0) != 1: raise ValueError("You cannot supply more or less than 2 values") From f24ddba5b2600486f7c3a4c5807cf2aeed421870 Mon Sep 17 00:00:00 2001 From: Matej <83732219+IsxImattI@users.noreply.github.com> Date: Mon, 30 Dec 2024 16:04:28 +0100 Subject: [PATCH 1497/1543] Implemented doctests for geometry-related classes (#12368) * Implemented doctests for geometry-related classes * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Removed unused noqa directive * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * refactored sudoku_solver.py * refactored sudoku_solver.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * context manager for file handling changed too in from_file function --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Tianyi Zheng --- data_structures/arrays/sudoku_solver.py | 5 +++-- geometry/geometry.py | 29 +++++++++++++++++++++++++ 2 files changed, 32 insertions(+), 2 deletions(-) diff --git a/data_structures/arrays/sudoku_solver.py b/data_structures/arrays/sudoku_solver.py index 7e38e1465728..fd1a4f3e37b8 100644 --- a/data_structures/arrays/sudoku_solver.py +++ b/data_structures/arrays/sudoku_solver.py @@ -23,7 +23,7 @@ def cross(items_a, items_b): + [cross(rs, cs) for rs in ("ABC", "DEF", "GHI") for cs in ("123", "456", "789")] ) units = {s: [u for u in unitlist if s in u] for s in squares} -peers = {s: set(sum(units[s], [])) - {s} for s in squares} # noqa: RUF017 +peers = {s: {x for u in units[s] for x in u} - {s} for s in squares} def test(): @@ -172,7 +172,8 @@ def unitsolved(unit): def from_file(filename, sep="\n"): "Parse a file into a list of strings, separated by sep." - return open(filename).read().strip().split(sep) + with open(filename) as file: + return file.read().strip().split(sep) def random_puzzle(assignments=17): diff --git a/geometry/geometry.py b/geometry/geometry.py index 9e353dee17a7..a0be8eb3befc 100644 --- a/geometry/geometry.py +++ b/geometry/geometry.py @@ -48,6 +48,18 @@ class Side: Side(length=5, angle=Angle(degrees=45.6), next_side=None) >>> Side(5, Angle(45.6), Side(1, Angle(2))) # doctest: +ELLIPSIS Side(length=5, angle=Angle(degrees=45.6), next_side=Side(length=1, angle=Angle(d... + >>> Side(-1) + Traceback (most recent call last): + ... + TypeError: length must be a positive numeric value. + >>> Side(5, None) + Traceback (most recent call last): + ... + TypeError: angle must be an Angle object. + >>> Side(5, Angle(90), "Invalid next_side") + Traceback (most recent call last): + ... + TypeError: next_side must be a Side or None. """ length: float @@ -162,6 +174,19 @@ class Polygon: >>> Polygon() Polygon(sides=[]) + >>> polygon = Polygon() + >>> polygon.add_side(Side(5)).get_side(0) + Side(length=5, angle=Angle(degrees=90), next_side=None) + >>> polygon.get_side(1) + Traceback (most recent call last): + ... + IndexError: list index out of range + >>> polygon.set_side(0, Side(10)).get_side(0) + Side(length=10, angle=Angle(degrees=90), next_side=None) + >>> polygon.set_side(1, Side(10)) + Traceback (most recent call last): + ... + IndexError: list assignment index out of range """ sides: list[Side] = field(default_factory=list) @@ -207,6 +232,10 @@ class Rectangle(Polygon): 30 >>> rectangle_one.area() 50 + >>> Rectangle(-5, 10) + Traceback (most recent call last): + ... + TypeError: length must be a positive numeric value. """ def __init__(self, short_side_length: float, long_side_length: float) -> None: From 77425364c87908bf061ad78b770ec840086b4efb Mon Sep 17 00:00:00 2001 From: SUDO_USER <110802232+AtharvMalusare@users.noreply.github.com> Date: Mon, 30 Dec 2024 20:42:04 +0530 Subject: [PATCH 1498/1543] Intensity_based_Segmentation (#12491) * Add files via upload * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update intensity-based_segmentation.py * Update and rename intensity-based_segmentation.py to intensity_based_segmentation.py * Update intensity_based_segmentation.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Apply suggestions from code review * [0, 1, 1]], dtype=int32) --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- .../intensity_based_segmentation.py | 62 +++++++++++++++++++ 1 file changed, 62 insertions(+) create mode 100644 computer_vision/intensity_based_segmentation.py diff --git a/computer_vision/intensity_based_segmentation.py b/computer_vision/intensity_based_segmentation.py new file mode 100644 index 000000000000..7f2b1141acc4 --- /dev/null +++ b/computer_vision/intensity_based_segmentation.py @@ -0,0 +1,62 @@ +# Source: "https://www.ijcse.com/docs/IJCSE11-02-03-117.pdf" + +# Importing necessary libraries +import matplotlib.pyplot as plt +import numpy as np +from PIL import Image + + +def segment_image(image: np.ndarray, thresholds: list[int]) -> np.ndarray: + """ + Performs image segmentation based on intensity thresholds. + + Args: + image: Input grayscale image as a 2D array. + thresholds: Intensity thresholds to define segments. + + Returns: + A labeled 2D array where each region corresponds to a threshold range. + + Example: + >>> img = np.array([[80, 120, 180], [40, 90, 150], [20, 60, 100]]) + >>> segment_image(img, [50, 100, 150]) + array([[1, 2, 3], + [0, 1, 2], + [0, 1, 1]], dtype=int32) + """ + # Initialize segmented array with zeros + segmented = np.zeros_like(image, dtype=np.int32) + + # Assign labels based on thresholds + for i, threshold in enumerate(thresholds): + segmented[image > threshold] = i + 1 + + return segmented + + +if __name__ == "__main__": + # Load the image + image_path = "path_to_image" # Replace with your image path + original_image = Image.open(image_path).convert("L") + image_array = np.array(original_image) + + # Define thresholds + thresholds = [50, 100, 150, 200] + + # Perform segmentation + segmented_image = segment_image(image_array, thresholds) + + # Display the results + plt.figure(figsize=(10, 5)) + + plt.subplot(1, 2, 1) + plt.title("Original Image") + plt.imshow(image_array, cmap="gray") + plt.axis("off") + + plt.subplot(1, 2, 2) + plt.title("Segmented Image") + plt.imshow(segmented_image, cmap="tab20") + plt.axis("off") + + plt.show() From 75c5c411133f7e0f339c8d68c7c76c8054eb4249 Mon Sep 17 00:00:00 2001 From: Scarfinos <158184182+Scarfinos@users.noreply.github.com> Date: Mon, 30 Dec 2024 16:12:26 +0100 Subject: [PATCH 1499/1543] #9943 : Adding coverage test for basic_graphs.py (#12354) * #9943 : Adding coverage test for basic_graphs.py * #9943 : Adding coverage test for basic_graphs.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Solve problem of line too long --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- graphs/basic_graphs.py | 32 ++++++++++++++++++++++++++++++++ 1 file changed, 32 insertions(+) diff --git a/graphs/basic_graphs.py b/graphs/basic_graphs.py index 567fa65040ae..286e9b195796 100644 --- a/graphs/basic_graphs.py +++ b/graphs/basic_graphs.py @@ -77,6 +77,14 @@ def initialize_weighted_undirected_graph( def dfs(g, s): + """ + >>> dfs({1: [2, 3], 2: [4, 5], 3: [], 4: [], 5: []}, 1) + 1 + 2 + 4 + 5 + 3 + """ vis, _s = {s}, [s] print(s) while _s: @@ -104,6 +112,17 @@ def dfs(g, s): def bfs(g, s): + """ + >>> bfs({1: [2, 3], 2: [4, 5], 3: [6, 7], 4: [], 5: [8], 6: [], 7: [], 8: []}, 1) + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + """ vis, q = {s}, deque([s]) print(s) while q: @@ -128,6 +147,19 @@ def bfs(g, s): def dijk(g, s): + """ + dijk({1: [(2, 7), (3, 9), (6, 14)], + 2: [(1, 7), (3, 10), (4, 15)], + 3: [(1, 9), (2, 10), (4, 11), (6, 2)], + 4: [(2, 15), (3, 11), (5, 6)], + 5: [(4, 6), (6, 9)], + 6: [(1, 14), (3, 2), (5, 9)]}, 1) + 7 + 9 + 11 + 20 + 20 + """ dist, known, path = {s: 0}, set(), {s: 0} while True: if len(known) == len(g) - 1: From 7e55fb6474a06ecf0000fe11494fe5eefeeb54ab Mon Sep 17 00:00:00 2001 From: Jeffrey Yancey Date: Mon, 30 Dec 2024 12:00:30 -0700 Subject: [PATCH 1500/1543] - Implemented `find_lanczos_eigenvectors` to approximate the largest eigenvalues and corresponding eigenvectors of a graph based on its adjacency list. (#11906) - Utilized `lanczos_iteration` to construct tridiagonal matrices, optimized for large, sparse matrices. - Added `multiply_matrix_vector` for efficient matrix-vector multiplication using adjacency lists. - Included `validate_adjacency_list` for input validation. - Supports varied graph analysis applications, particularly for analyzing graph centrality. - Included type hints, comprehensive docstrings, and doctests. - PEP-8 compliant, with optimized handling of inputs and outputs. This module provides essential tools for eigenvalue-based graph analysis, ideal for centrality insights and structural assessments. --- graphs/lanczos_eigenvectors.py | 206 +++++++++++++++++++++++++++++++++ 1 file changed, 206 insertions(+) create mode 100644 graphs/lanczos_eigenvectors.py diff --git a/graphs/lanczos_eigenvectors.py b/graphs/lanczos_eigenvectors.py new file mode 100644 index 000000000000..581a81a1127f --- /dev/null +++ b/graphs/lanczos_eigenvectors.py @@ -0,0 +1,206 @@ +""" +Lanczos Method for Finding Eigenvalues and Eigenvectors of a Graph. + +This module demonstrates the Lanczos method to approximate the largest eigenvalues +and corresponding eigenvectors of a symmetric matrix represented as a graph's +adjacency list. The method efficiently handles large, sparse matrices by converting +the graph to a tridiagonal matrix, whose eigenvalues and eigenvectors are then +computed. + +Key Functions: +- `find_lanczos_eigenvectors`: Computes the k largest eigenvalues and vectors. +- `lanczos_iteration`: Constructs the tridiagonal matrix and orthonormal basis vectors. +- `multiply_matrix_vector`: Multiplies an adjacency list graph with a vector. + +Complexity: +- Time: O(k * n), where k is the number of eigenvalues and n is the matrix size. +- Space: O(n), due to sparse representation and tridiagonal matrix structure. + +Further Reading: +- Lanczos Algorithm: https://en.wikipedia.org/wiki/Lanczos_algorithm +- Eigenvector Centrality: https://en.wikipedia.org/wiki/Eigenvector_centrality + +Example Usage: +Given a graph represented by an adjacency list, the `find_lanczos_eigenvectors` +function returns the largest eigenvalues and eigenvectors. This can be used to +analyze graph centrality. +""" + +import numpy as np + + +def validate_adjacency_list(graph: list[list[int | None]]) -> None: + """Validates the adjacency list format for the graph. + + Args: + graph: A list of lists where each sublist contains the neighbors of a node. + + Raises: + ValueError: If the graph is not a list of lists, or if any node has + invalid neighbors (e.g., out-of-range or non-integer values). + + >>> validate_adjacency_list([[1, 2], [0], [0, 1]]) + >>> validate_adjacency_list([[]]) # No neighbors, valid case + >>> validate_adjacency_list([[1], [2], [-1]]) # Invalid neighbor + Traceback (most recent call last): + ... + ValueError: Invalid neighbor -1 in node 2 adjacency list. + """ + if not isinstance(graph, list): + raise ValueError("Graph should be a list of lists.") + + for node_index, neighbors in enumerate(graph): + if not isinstance(neighbors, list): + no_neighbors_message: str = ( + f"Node {node_index} should have a list of neighbors." + ) + raise ValueError(no_neighbors_message) + for neighbor_index in neighbors: + if ( + not isinstance(neighbor_index, int) + or neighbor_index < 0 + or neighbor_index >= len(graph) + ): + invalid_neighbor_message: str = ( + f"Invalid neighbor {neighbor_index} in node {node_index} " + f"adjacency list." + ) + raise ValueError(invalid_neighbor_message) + + +def lanczos_iteration( + graph: list[list[int | None]], num_eigenvectors: int +) -> tuple[np.ndarray, np.ndarray]: + """Constructs the tridiagonal matrix and orthonormal basis vectors using the + Lanczos method. + + Args: + graph: The graph represented as a list of adjacency lists. + num_eigenvectors: The number of largest eigenvalues and eigenvectors + to approximate. + + Returns: + A tuple containing: + - tridiagonal_matrix: A (num_eigenvectors x num_eigenvectors) symmetric + matrix. + - orthonormal_basis: A (num_nodes x num_eigenvectors) matrix of orthonormal + basis vectors. + + Raises: + ValueError: If num_eigenvectors is less than 1 or greater than the number of + nodes. + + >>> graph = [[1, 2], [0, 2], [0, 1]] + >>> T, Q = lanczos_iteration(graph, 2) + >>> T.shape == (2, 2) and Q.shape == (3, 2) + True + """ + num_nodes: int = len(graph) + if not (1 <= num_eigenvectors <= num_nodes): + raise ValueError( + "Number of eigenvectors must be between 1 and the number of " + "nodes in the graph." + ) + + orthonormal_basis: np.ndarray = np.zeros((num_nodes, num_eigenvectors)) + tridiagonal_matrix: np.ndarray = np.zeros((num_eigenvectors, num_eigenvectors)) + + rng = np.random.default_rng() + initial_vector: np.ndarray = rng.random(num_nodes) + initial_vector /= np.sqrt(np.dot(initial_vector, initial_vector)) + orthonormal_basis[:, 0] = initial_vector + + prev_beta: float = 0.0 + for iter_index in range(num_eigenvectors): + result_vector: np.ndarray = multiply_matrix_vector( + graph, orthonormal_basis[:, iter_index] + ) + if iter_index > 0: + result_vector -= prev_beta * orthonormal_basis[:, iter_index - 1] + alpha_value: float = np.dot(orthonormal_basis[:, iter_index], result_vector) + result_vector -= alpha_value * orthonormal_basis[:, iter_index] + + prev_beta = np.sqrt(np.dot(result_vector, result_vector)) + if iter_index < num_eigenvectors - 1 and prev_beta > 1e-10: + orthonormal_basis[:, iter_index + 1] = result_vector / prev_beta + tridiagonal_matrix[iter_index, iter_index] = alpha_value + if iter_index < num_eigenvectors - 1: + tridiagonal_matrix[iter_index, iter_index + 1] = prev_beta + tridiagonal_matrix[iter_index + 1, iter_index] = prev_beta + return tridiagonal_matrix, orthonormal_basis + + +def multiply_matrix_vector( + graph: list[list[int | None]], vector: np.ndarray +) -> np.ndarray: + """Performs multiplication of a graph's adjacency list representation with a vector. + + Args: + graph: The adjacency list of the graph. + vector: A 1D numpy array representing the vector to multiply. + + Returns: + A numpy array representing the product of the adjacency list and the vector. + + Raises: + ValueError: If the vector's length does not match the number of nodes in the + graph. + + >>> multiply_matrix_vector([[1, 2], [0, 2], [0, 1]], np.array([1, 1, 1])) + array([2., 2., 2.]) + >>> multiply_matrix_vector([[1, 2], [0, 2], [0, 1]], np.array([0, 1, 0])) + array([1., 0., 1.]) + """ + num_nodes: int = len(graph) + if vector.shape[0] != num_nodes: + raise ValueError("Vector length must match the number of nodes in the graph.") + + result: np.ndarray = np.zeros(num_nodes) + for node_index, neighbors in enumerate(graph): + for neighbor_index in neighbors: + result[node_index] += vector[neighbor_index] + return result + + +def find_lanczos_eigenvectors( + graph: list[list[int | None]], num_eigenvectors: int +) -> tuple[np.ndarray, np.ndarray]: + """Computes the largest eigenvalues and their corresponding eigenvectors using the + Lanczos method. + + Args: + graph: The graph as a list of adjacency lists. + num_eigenvectors: Number of largest eigenvalues and eigenvectors to compute. + + Returns: + A tuple containing: + - eigenvalues: 1D array of the largest eigenvalues in descending order. + - eigenvectors: 2D array where each column is an eigenvector corresponding + to an eigenvalue. + + Raises: + ValueError: If the graph format is invalid or num_eigenvectors is out of bounds. + + >>> eigenvalues, eigenvectors = find_lanczos_eigenvectors( + ... [[1, 2], [0, 2], [0, 1]], 2 + ... ) + >>> len(eigenvalues) == 2 and eigenvectors.shape[1] == 2 + True + """ + validate_adjacency_list(graph) + tridiagonal_matrix, orthonormal_basis = lanczos_iteration(graph, num_eigenvectors) + eigenvalues, eigenvectors = np.linalg.eigh(tridiagonal_matrix) + return eigenvalues[::-1], np.dot(orthonormal_basis, eigenvectors[:, ::-1]) + + +def main() -> None: + """ + Main driver function for testing the implementation with doctests. + """ + import doctest + + doctest.testmod() + + +if __name__ == "__main__": + main() From 8921b56a8517cdc9455d764d7cddb10b2d7f2145 Mon Sep 17 00:00:00 2001 From: Melih Mehmet Sahin Date: Mon, 30 Dec 2024 19:53:50 +0000 Subject: [PATCH 1501/1543] Adding tests to monotonic_array.py (#12073) * Contributes to #9943 by adding tests to monotonic_array.py Addeded doctest in the if __name__. Checks for negaitves and an array of same integers * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- data_structures/arrays/monotonic_array.py | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/data_structures/arrays/monotonic_array.py b/data_structures/arrays/monotonic_array.py index c50a21530814..342d443a9cfc 100644 --- a/data_structures/arrays/monotonic_array.py +++ b/data_structures/arrays/monotonic_array.py @@ -9,6 +9,16 @@ def is_monotonic(nums: list[int]) -> bool: True >>> is_monotonic([1, 3, 2]) False + >>> is_monotonic([1,2,3,4,5,6,5]) + False + >>> is_monotonic([-3,-2,-1]) + True + >>> is_monotonic([-5,-6,-7]) + True + >>> is_monotonic([0,0,0]) + True + >>> is_monotonic([-100,0,100]) + True """ return all(nums[i] <= nums[i + 1] for i in range(len(nums) - 1)) or all( nums[i] >= nums[i + 1] for i in range(len(nums) - 1) @@ -21,3 +31,7 @@ def is_monotonic(nums: list[int]) -> bool: print(is_monotonic([1, 2, 2, 3])) # Output: True print(is_monotonic([6, 5, 4, 4])) # Output: True print(is_monotonic([1, 3, 2])) # Output: False + + import doctest + + doctest.testmod() From 5942059cb571b213e5ec82fe9b45e5a9bef4864b Mon Sep 17 00:00:00 2001 From: Giulio Tantaro Date: Mon, 30 Dec 2024 21:03:31 +0100 Subject: [PATCH 1502/1543] add doctest for quick_sort_3_partition (#11779) --- sorts/quick_sort_3_partition.py | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) diff --git a/sorts/quick_sort_3_partition.py b/sorts/quick_sort_3_partition.py index 1a6db6a364f0..279b9a68f5a6 100644 --- a/sorts/quick_sort_3_partition.py +++ b/sorts/quick_sort_3_partition.py @@ -1,4 +1,27 @@ def quick_sort_3partition(sorting: list, left: int, right: int) -> None: + """ " + Python implementation of quick sort algorithm with 3-way partition. + The idea of 3-way quick sort is based on "Dutch National Flag algorithm". + + :param sorting: sort list + :param left: left endpoint of sorting + :param right: right endpoint of sorting + :return: None + + Examples: + >>> array1 = [5, -1, -1, 5, 5, 24, 0] + >>> quick_sort_3partition(array1, 0, 6) + >>> array1 + [-1, -1, 0, 5, 5, 5, 24] + >>> array2 = [9, 0, 2, 6] + >>> quick_sort_3partition(array2, 0, 3) + >>> array2 + [0, 2, 6, 9] + >>> array3 = [] + >>> quick_sort_3partition(array3, 0, 0) + >>> array3 + [] + """ if right <= left: return a = i = left From 8767d1d72436b8aff89f9c11d045ad95bec02ba4 Mon Sep 17 00:00:00 2001 From: Rodrigo Castro Date: Mon, 30 Dec 2024 21:36:41 -0300 Subject: [PATCH 1503/1543] add some documentation for heap sort (#9949) * add some documentation * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fix typing * Update heap_sort.py * Update heap_sort.py --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- sorts/heap_sort.py | 47 +++++++++++++++++++++++++++------------------- 1 file changed, 28 insertions(+), 19 deletions(-) diff --git a/sorts/heap_sort.py b/sorts/heap_sort.py index 4dca879bd89c..44ee1d4b39f1 100644 --- a/sorts/heap_sort.py +++ b/sorts/heap_sort.py @@ -1,17 +1,22 @@ """ -This is a pure Python implementation of the heap sort algorithm. - -For doctests run following command: -python -m doctest -v heap_sort.py -or -python3 -m doctest -v heap_sort.py - -For manual testing run: -python heap_sort.py +A pure Python implementation of the heap sort algorithm. """ -def heapify(unsorted, index, heap_size): +def heapify(unsorted: list[int], index: int, heap_size: int) -> None: + """ + :param unsorted: unsorted list containing integers numbers + :param index: index + :param heap_size: size of the heap + :return: None + >>> unsorted = [1, 4, 3, 5, 2] + >>> heapify(unsorted, 0, len(unsorted)) + >>> unsorted + [4, 5, 3, 1, 2] + >>> heapify(unsorted, 0, len(unsorted)) + >>> unsorted + [5, 4, 3, 1, 2] + """ largest = index left_index = 2 * index + 1 right_index = 2 * index + 2 @@ -22,26 +27,26 @@ def heapify(unsorted, index, heap_size): largest = right_index if largest != index: - unsorted[largest], unsorted[index] = unsorted[index], unsorted[largest] + unsorted[largest], unsorted[index] = (unsorted[index], unsorted[largest]) heapify(unsorted, largest, heap_size) -def heap_sort(unsorted): +def heap_sort(unsorted: list[int]) -> list[int]: """ - Pure implementation of the heap sort algorithm in Python - :param collection: some mutable ordered collection with heterogeneous - comparable items inside + A pure Python implementation of the heap sort algorithm + + :param collection: a mutable ordered collection of heterogeneous comparable items :return: the same collection ordered by ascending Examples: >>> heap_sort([0, 5, 3, 2, 2]) [0, 2, 2, 3, 5] - >>> heap_sort([]) [] - >>> heap_sort([-2, -5, -45]) [-45, -5, -2] + >>> heap_sort([3, 7, 9, 28, 123, -5, 8, -30, -200, 0, 4]) + [-200, -30, -5, 0, 3, 4, 7, 8, 9, 28, 123] """ n = len(unsorted) for i in range(n // 2 - 1, -1, -1): @@ -53,6 +58,10 @@ def heap_sort(unsorted): if __name__ == "__main__": + import doctest + + doctest.testmod() user_input = input("Enter numbers separated by a comma:\n").strip() - unsorted = [int(item) for item in user_input.split(",")] - print(heap_sort(unsorted)) + if user_input: + unsorted = [int(item) for item in user_input.split(",")] + print(f"{heap_sort(unsorted) = }") From 8439fa8d1da94370250d153cd57f9bdcc382a062 Mon Sep 17 00:00:00 2001 From: Paarth Goyal <138299656+pluto-tofu@users.noreply.github.com> Date: Tue, 31 Dec 2024 06:17:41 +0530 Subject: [PATCH 1504/1543] Added the algorithm to compute the time period of a simple pendulum (#10265) * Added the algorithm to compute the time period of a simple pendulum * imported g form scipy and changed doctests accordingly * fixed formatting * applied all suggested changes from code review * Apply suggestions from code review * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: Tianyi Zheng Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- physics/period_of_pendulum.py | 53 +++++++++++++++++++++++++++++++++++ 1 file changed, 53 insertions(+) create mode 100644 physics/period_of_pendulum.py diff --git a/physics/period_of_pendulum.py b/physics/period_of_pendulum.py new file mode 100644 index 000000000000..2e3c7bc3ef1e --- /dev/null +++ b/physics/period_of_pendulum.py @@ -0,0 +1,53 @@ +""" +Title : Computing the time period of a simple pendulum + +The simple pendulum is a mechanical system that sways or moves in an +oscillatory motion. The simple pendulum comprises of a small bob of +mass m suspended by a thin string of length L and secured to a platform +at its upper end. Its motion occurs in a vertical plane and is mainly +driven by gravitational force. The period of the pendulum depends on the +length of the string and the amplitude (the maximum angle) of oscillation. +However, the effect of the amplitude can be ignored if the amplitude is +small. It should be noted that the period does not depend on the mass of +the bob. + +For small amplitudes, the period of a simple pendulum is given by the +following approximation: +T ≈ 2π * √(L / g) + +where: +L = length of string from which the bob is hanging (in m) +g = acceleration due to gravity (approx 9.8 m/s²) + +Reference : https://byjus.com/jee/simple-pendulum/ +""" + +from math import pi + +from scipy.constants import g + + +def period_of_pendulum(length: float) -> float: + """ + >>> period_of_pendulum(1.23) + 2.2252155506257845 + >>> period_of_pendulum(2.37) + 3.0888278441908574 + >>> period_of_pendulum(5.63) + 4.76073193364765 + >>> period_of_pendulum(-12) + Traceback (most recent call last): + ... + ValueError: The length should be non-negative + >>> period_of_pendulum(0) + 0.0 + """ + if length < 0: + raise ValueError("The length should be non-negative") + return 2 * pi * (length / g) ** 0.5 + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From a4399022e516dfba1097f91cedc0b7f4213bab84 Mon Sep 17 00:00:00 2001 From: Julien Richard Date: Tue, 31 Dec 2024 02:11:29 +0100 Subject: [PATCH 1505/1543] chore: improve comments and add tests to trapezoidal rule (#11640) * chore: improve comments and add tests to trapezoidal rule * fix: too much characters in line * Update maths/trapezoidal_rule.py Co-authored-by: Tianyi Zheng * Update maths/trapezoidal_rule.py Co-authored-by: Tianyi Zheng * Update maths/trapezoidal_rule.py Co-authored-by: Tianyi Zheng * Update maths/trapezoidal_rule.py Co-authored-by: Tianyi Zheng * fix: change function name in calls * modify tests, changes numbers to remove coma * updating DIRECTORY.md * Fix doctest whitespace * Try to fix line length in doctest --------- Co-authored-by: Tianyi Zheng Co-authored-by: tianyizheng02 --- DIRECTORY.md | 3 ++ maths/trapezoidal_rule.py | 97 ++++++++++++++++++++++----------------- 2 files changed, 58 insertions(+), 42 deletions(-) diff --git a/DIRECTORY.md b/DIRECTORY.md index 44d0414a37c8..1248a290d294 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -142,6 +142,7 @@ * [Haralick Descriptors](computer_vision/haralick_descriptors.py) * [Harris Corner](computer_vision/harris_corner.py) * [Horn Schunck](computer_vision/horn_schunck.py) + * [Intensity Based Segmentation](computer_vision/intensity_based_segmentation.py) * [Mean Threshold](computer_vision/mean_threshold.py) * [Mosaic Augmentation](computer_vision/mosaic_augmentation.py) * [Pooling Functions](computer_vision/pooling_functions.py) @@ -507,6 +508,7 @@ * [Kahns Algorithm Long](graphs/kahns_algorithm_long.py) * [Kahns Algorithm Topo](graphs/kahns_algorithm_topo.py) * [Karger](graphs/karger.py) + * [Lanczos Eigenvectors](graphs/lanczos_eigenvectors.py) * [Markov Chain](graphs/markov_chain.py) * [Matching Min Vertex Cover](graphs/matching_min_vertex_cover.py) * [Minimum Path Sum](graphs/minimum_path_sum.py) @@ -886,6 +888,7 @@ * [N Body Simulation](physics/n_body_simulation.py) * [Newtons Law Of Gravitation](physics/newtons_law_of_gravitation.py) * [Newtons Second Law Of Motion](physics/newtons_second_law_of_motion.py) + * [Period Of Pendulum](physics/period_of_pendulum.py) * [Photoelectric Effect](physics/photoelectric_effect.py) * [Potential Energy](physics/potential_energy.py) * [Rainfall Intensity](physics/rainfall_intensity.py) diff --git a/maths/trapezoidal_rule.py b/maths/trapezoidal_rule.py index 0186629ee378..21b10b239b5f 100644 --- a/maths/trapezoidal_rule.py +++ b/maths/trapezoidal_rule.py @@ -1,28 +1,25 @@ """ Numerical integration or quadrature for a smooth function f with known values at x_i - -This method is the classical approach of suming 'Equally Spaced Abscissas' - -method 1: -"extended trapezoidal rule" -int(f) = dx/2 * (f1 + 2f2 + ... + fn) - """ -def method_1(boundary, steps): +def trapezoidal_rule(boundary, steps): """ - Apply the extended trapezoidal rule to approximate the integral of function f(x) - over the interval defined by 'boundary' with the number of 'steps'. - - Args: - boundary (list of floats): A list containing the start and end values [a, b]. - steps (int): The number of steps or subintervals. - Returns: - float: Approximation of the integral of f(x) over [a, b]. - Examples: - >>> method_1([0, 1], 10) - 0.3349999999999999 + Implements the extended trapezoidal rule for numerical integration. + The function f(x) is provided below. + + :param boundary: List containing the lower and upper bounds of integration [a, b] + :param steps: The number of steps (intervals) used in the approximation + :return: The numerical approximation of the integral + + >>> abs(trapezoidal_rule([0, 1], 10) - 0.33333) < 0.01 + True + >>> abs(trapezoidal_rule([0, 1], 100) - 0.33333) < 0.01 + True + >>> abs(trapezoidal_rule([0, 2], 1000) - 2.66667) < 0.01 + True + >>> abs(trapezoidal_rule([1, 2], 1000) - 2.33333) < 0.01 + True """ h = (boundary[1] - boundary[0]) / steps a = boundary[0] @@ -31,7 +28,6 @@ def method_1(boundary, steps): y = 0.0 y += (h / 2.0) * f(a) for i in x_i: - # print(i) y += h * f(i) y += (h / 2.0) * f(b) return y @@ -39,49 +35,66 @@ def method_1(boundary, steps): def make_points(a, b, h): """ - Generates points between 'a' and 'b' with step size 'h', excluding the end points. - Args: - a (float): Start value - b (float): End value - h (float): Step size - Examples: + Generates points between a and b with step size h for trapezoidal integration. + + :param a: The lower bound of integration + :param b: The upper bound of integration + :param h: The step size + :yield: The next x-value in the range (a, b) + + >>> list(make_points(0, 1, 0.1)) # doctest: +NORMALIZE_WHITESPACE + [0.1, 0.2, 0.30000000000000004, 0.4, 0.5, 0.6, 0.7, 0.7999999999999999, \ + 0.8999999999999999] >>> list(make_points(0, 10, 2.5)) [2.5, 5.0, 7.5] - >>> list(make_points(0, 10, 2)) [2, 4, 6, 8] - >>> list(make_points(1, 21, 5)) [6, 11, 16] - >>> list(make_points(1, 5, 2)) [3] - >>> list(make_points(1, 4, 3)) [] """ x = a + h while x <= (b - h): yield x - x = x + h + x += h -def f(x): # enter your function here +def f(x): """ - Example: - >>> f(2) - 4 + This is the function to integrate, f(x) = (x - 0)^2 = x^2. + + :param x: The input value + :return: The value of f(x) + + >>> f(0) + 0 + >>> f(1) + 1 + >>> f(0.5) + 0.25 """ - y = (x - 0) * (x - 0) - return y + return x**2 def main(): - a = 0.0 # Lower bound of integration - b = 1.0 # Upper bound of integration - steps = 10.0 # define number of steps or resolution - boundary = [a, b] # define boundary of integration - y = method_1(boundary, steps) + """ + Main function to test the trapezoidal rule. + :a: Lower bound of integration + :b: Upper bound of integration + :steps: define number of steps or resolution + :boundary: define boundary of integration + + >>> main() + y = 0.3349999999999999 + """ + a = 0.0 + b = 1.0 + steps = 10.0 + boundary = [a, b] + y = trapezoidal_rule(boundary, steps) print(f"y = {y}") From 91a22c2e36477623b1f81518ff18c6f8617f81fb Mon Sep 17 00:00:00 2001 From: SEIKH NABAB UDDIN <93948993+nababuddin@users.noreply.github.com> Date: Tue, 31 Dec 2024 07:39:14 +0530 Subject: [PATCH 1506/1543] Create digital differential analyzer_line.py (#10929) * Create DDA_line_drawing.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Rename DDA_line_drawing.py to digital differential analyzer_line_drawing.py * Rename DDA_line_drawing.py to digital_differential_analyzer_line_drawing.py * Update digital_differential_analyzer_line_drawing.py * Update digital_differential_analyzer_line_drawing.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update digital_differential_analyzer_line_drawing.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update digital_differential_analyzer_line_drawing.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update digital_differential_analyzer_line_drawing.py * Update digital_differential_analyzer_line_drawing.py * Update digital_differential_analyzer_line_drawing.py * Update digital_differential_analyzer_line_drawing.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Apply suggestions from code review Co-authored-by: Tianyi Zheng * Update and rename digital_differential_analyzer_line_drawing.py to digital_differential_analyzer_line.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update digital_differential_analyzer_line.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update digital_differential_analyzer_line.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update digital_differential_analyzer_line.py * Update digital_differential_analyzer_line.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update digital_differential_analyzer_line.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update digital_differential_analyzer_line.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update digital_differential_analyzer_line.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Apply suggestions from code review * Fix doctest * Trigger GH workflows * Fix function call in main block --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Tianyi Zheng --- .../digital_differential_analyzer_line.py | 52 +++++++++++++++++++ 1 file changed, 52 insertions(+) create mode 100644 graphics/digital_differential_analyzer_line.py diff --git a/graphics/digital_differential_analyzer_line.py b/graphics/digital_differential_analyzer_line.py new file mode 100644 index 000000000000..a51cb0b8dc37 --- /dev/null +++ b/graphics/digital_differential_analyzer_line.py @@ -0,0 +1,52 @@ +import matplotlib.pyplot as plt + + +def digital_differential_analyzer_line( + p1: tuple[int, int], p2: tuple[int, int] +) -> list[tuple[int, int]]: + """ + Draws a line between two points using the DDA algorithm. + + Args: + - p1: Coordinates of the starting point. + - p2: Coordinates of the ending point. + Returns: + - List of coordinate points that form the line. + + >>> digital_differential_analyzer_line((1, 1), (4, 4)) + [(2, 2), (3, 3), (4, 4)] + """ + x1, y1 = p1 + x2, y2 = p2 + dx = x2 - x1 + dy = y2 - y1 + steps = max(abs(dx), abs(dy)) + x_increment = dx / float(steps) + y_increment = dy / float(steps) + coordinates = [] + x: float = x1 + y: float = y1 + for _ in range(steps): + x += x_increment + y += y_increment + coordinates.append((int(round(x)), int(round(y)))) + return coordinates + + +if __name__ == "__main__": + import doctest + + doctest.testmod() + + x1 = int(input("Enter the x-coordinate of the starting point: ")) + y1 = int(input("Enter the y-coordinate of the starting point: ")) + x2 = int(input("Enter the x-coordinate of the ending point: ")) + y2 = int(input("Enter the y-coordinate of the ending point: ")) + coordinates = digital_differential_analyzer_line((x1, y1), (x2, y2)) + x_points, y_points = zip(*coordinates) + plt.plot(x_points, y_points, marker="o") + plt.title("Digital Differential Analyzer Line Drawing Algorithm") + plt.xlabel("X-axis") + plt.ylabel("Y-axis") + plt.grid() + plt.show() From 12b1023a9d97ca19be761c10129cba5509c9b450 Mon Sep 17 00:00:00 2001 From: Kaustubh Mani Tripathi <129510465+kmtGryffindor20@users.noreply.github.com> Date: Tue, 31 Dec 2024 07:46:32 +0530 Subject: [PATCH 1507/1543] [ADDED] Implementation of Geometric Mean. (#10421) * [ADDED] Implementation of Geometric Mean. * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Rectified type hints * Typo * Apply suggestions from code review --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Tianyi Zheng --- maths/geometric_mean.py | 55 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 55 insertions(+) create mode 100644 maths/geometric_mean.py diff --git a/maths/geometric_mean.py b/maths/geometric_mean.py new file mode 100644 index 000000000000..240d519ad398 --- /dev/null +++ b/maths/geometric_mean.py @@ -0,0 +1,55 @@ +""" +The Geometric Mean of n numbers is defined as the n-th root of the product +of those numbers. It is used to measure the central tendency of the numbers. +https://en.wikipedia.org/wiki/Geometric_mean +""" + + +def compute_geometric_mean(*args: int) -> float: + """ + Return the geometric mean of the argument numbers. + >>> compute_geometric_mean(2,8) + 4.0 + >>> compute_geometric_mean('a', 4) + Traceback (most recent call last): + ... + TypeError: Not a Number + >>> compute_geometric_mean(5, 125) + 25.0 + >>> compute_geometric_mean(1, 0) + 0.0 + >>> compute_geometric_mean(1, 5, 25, 5) + 5.0 + >>> compute_geometric_mean(2, -2) + Traceback (most recent call last): + ... + ArithmeticError: Cannot Compute Geometric Mean for these numbers. + >>> compute_geometric_mean(-5, 25, 1) + -5.0 + """ + product = 1 + for number in args: + if not isinstance(number, int) and not isinstance(number, float): + raise TypeError("Not a Number") + product *= number + # Cannot calculate the even root for negative product. + # Frequently they are restricted to being positive. + if product < 0 and len(args) % 2 == 0: + raise ArithmeticError("Cannot Compute Geometric Mean for these numbers.") + mean = abs(product) ** (1 / len(args)) + # Since python calculates complex roots for negative products with odd roots. + if product < 0: + mean = -mean + # Since it does floating point arithmetic, it gives 64**(1/3) as 3.99999996 + possible_mean = float(round(mean)) + # To check if the rounded number is actually the mean. + if possible_mean ** len(args) == product: + mean = possible_mean + return mean + + +if __name__ == "__main__": + from doctest import testmod + + testmod(name="compute_geometric_mean") + print(compute_geometric_mean(-3, -27)) From bae33acf9008aa6c80351b3c68f492ba0c4a1352 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 6 Jan 2025 21:12:13 +0100 Subject: [PATCH 1508/1543] [pre-commit.ci] pre-commit autoupdate (#12507) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * [pre-commit.ci] pre-commit autoupdate updates: - [github.com/astral-sh/ruff-pre-commit: v0.8.4 → v0.8.6](https://github.com/astral-sh/ruff-pre-commit/compare/v0.8.4...v0.8.6) - [github.com/pre-commit/mirrors-mypy: v1.14.0 → v1.14.1](https://github.com/pre-commit/mirrors-mypy/compare/v1.14.0...v1.14.1) * updating DIRECTORY.md --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: pre-commit-ci[bot] --- .pre-commit-config.yaml | 4 ++-- DIRECTORY.md | 2 ++ 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 71ac72c29b5f..ec1dbca3a41c 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -16,7 +16,7 @@ repos: - id: auto-walrus - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.8.4 + rev: v0.8.6 hooks: - id: ruff - id: ruff-format @@ -47,7 +47,7 @@ repos: - id: validate-pyproject - repo: https://github.com/pre-commit/mirrors-mypy - rev: v1.14.0 + rev: v1.14.1 hooks: - id: mypy args: diff --git a/DIRECTORY.md b/DIRECTORY.md index 1248a290d294..3f0a5dbb140f 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -462,6 +462,7 @@ ## Graphics * [Bezier Curve](graphics/bezier_curve.py) + * [Digital Differential Analyzer Line](graphics/digital_differential_analyzer_line.py) * [Vector3 For 2D Rendering](graphics/vector3_for_2d_rendering.py) ## Graphs @@ -663,6 +664,7 @@ * [Gamma](maths/gamma.py) * [Gaussian](maths/gaussian.py) * [Gcd Of N Numbers](maths/gcd_of_n_numbers.py) + * [Geometric Mean](maths/geometric_mean.py) * [Germain Primes](maths/germain_primes.py) * [Greatest Common Divisor](maths/greatest_common_divisor.py) * [Hardy Ramanujanalgo](maths/hardy_ramanujanalgo.py) From b653aee627a95de423f1cad97f283de904271ff7 Mon Sep 17 00:00:00 2001 From: Maxim Smolskiy Date: Sun, 12 Jan 2025 19:05:08 +0300 Subject: [PATCH 1509/1543] Fix ruff (#12515) * Empty commit * Fix * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fix * Fix * Fix * updating DIRECTORY.md --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: MaximSmolskiy --- DIRECTORY.md | 4 ++-- ciphers/{base64.py => base64_cipher.py} | 0 project_euler/problem_002/sol4.py | 2 +- strings/{wave.py => wave_string.py} | 0 4 files changed, 3 insertions(+), 3 deletions(-) rename ciphers/{base64.py => base64_cipher.py} (100%) rename strings/{wave.py => wave_string.py} (100%) diff --git a/DIRECTORY.md b/DIRECTORY.md index 3f0a5dbb140f..aad6c72aa8ee 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -86,7 +86,7 @@ * [Baconian Cipher](ciphers/baconian_cipher.py) * [Base16](ciphers/base16.py) * [Base32](ciphers/base32.py) - * [Base64](ciphers/base64.py) + * [Base64 Cipher](ciphers/base64_cipher.py) * [Base85](ciphers/base85.py) * [Beaufort Cipher](ciphers/beaufort_cipher.py) * [Bifid](ciphers/bifid.py) @@ -1331,7 +1331,7 @@ * [Title](strings/title.py) * [Top K Frequent Words](strings/top_k_frequent_words.py) * [Upper](strings/upper.py) - * [Wave](strings/wave.py) + * [Wave String](strings/wave_string.py) * [Wildcard Pattern Matching](strings/wildcard_pattern_matching.py) * [Word Occurrence](strings/word_occurrence.py) * [Word Patterns](strings/word_patterns.py) diff --git a/ciphers/base64.py b/ciphers/base64_cipher.py similarity index 100% rename from ciphers/base64.py rename to ciphers/base64_cipher.py diff --git a/project_euler/problem_002/sol4.py b/project_euler/problem_002/sol4.py index 3a2e4fce341c..a13d34fd760e 100644 --- a/project_euler/problem_002/sol4.py +++ b/project_euler/problem_002/sol4.py @@ -61,7 +61,7 @@ def solution(n: int = 4000000) -> int: if n <= 0: raise ValueError("Parameter n must be greater than or equal to one.") getcontext().prec = 100 - phi = (Decimal(5) ** Decimal(0.5) + 1) / Decimal(2) + phi = (Decimal(5) ** Decimal("0.5") + 1) / Decimal(2) index = (math.floor(math.log(n * (phi + 2), phi) - 1) // 3) * 3 + 2 num = Decimal(round(phi ** Decimal(index + 1))) / (phi + 2) diff --git a/strings/wave.py b/strings/wave_string.py similarity index 100% rename from strings/wave.py rename to strings/wave_string.py From 4c92de5e03310811a376058e110db8d615769087 Mon Sep 17 00:00:00 2001 From: Sanjay Muthu Date: Mon, 13 Jan 2025 05:05:22 +0530 Subject: [PATCH 1510/1543] Fix dynamic_programming/longest_increasing_subsequence.py (#12517) * Fix #12510 * Added the doctest mentioned in the issue * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fixed Grammer Mistake * Update longest_increasing_subsequence.py * Update longest_increasing_subsequence.py * Update longest_increasing_subsequence.py * Update longest_increasing_subsequence.py --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Maxim Smolskiy --- dynamic_programming/longest_increasing_subsequence.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/dynamic_programming/longest_increasing_subsequence.py b/dynamic_programming/longest_increasing_subsequence.py index d839757f6da5..1863a882c41e 100644 --- a/dynamic_programming/longest_increasing_subsequence.py +++ b/dynamic_programming/longest_increasing_subsequence.py @@ -24,8 +24,10 @@ def longest_subsequence(array: list[int]) -> list[int]: # This function is recu [10, 22, 33, 41, 60, 80] >>> longest_subsequence([4, 8, 7, 5, 1, 12, 2, 3, 9]) [1, 2, 3, 9] + >>> longest_subsequence([28, 26, 12, 23, 35, 39]) + [12, 23, 35, 39] >>> longest_subsequence([9, 8, 7, 6, 5, 7]) - [8] + [5, 7] >>> longest_subsequence([1, 1, 1]) [1, 1, 1] >>> longest_subsequence([]) @@ -44,7 +46,7 @@ def longest_subsequence(array: list[int]) -> list[int]: # This function is recu while not is_found and i < array_length: if array[i] < pivot: is_found = True - temp_array = [element for element in array[i:] if element >= array[i]] + temp_array = array[i:] temp_array = longest_subsequence(temp_array) if len(temp_array) > len(longest_subseq): longest_subseq = temp_array From 787aa5d3b59640b2d9161b56ca8fde763597efe4 Mon Sep 17 00:00:00 2001 From: Siddhant <87547498+Siddhant231xyz@users.noreply.github.com> Date: Sun, 12 Jan 2025 20:54:23 -0500 Subject: [PATCH 1511/1543] doctest all_combinations.py (#12506) * doctest in all_combinations.py * added doctest in all_combinations.py * doctests in all_combinations.py * add doctest all_combinations.py * add --------- Co-authored-by: Siddhant Jain --- backtracking/all_combinations.py | 34 ++++++++++++++++++++++++++++++++ 1 file changed, 34 insertions(+) diff --git a/backtracking/all_combinations.py b/backtracking/all_combinations.py index 390decf3a05b..1d15c6263e14 100644 --- a/backtracking/all_combinations.py +++ b/backtracking/all_combinations.py @@ -12,6 +12,8 @@ def combination_lists(n: int, k: int) -> list[list[int]]: """ + Generates all possible combinations of k numbers out of 1 ... n using itertools. + >>> combination_lists(n=4, k=2) [[1, 2], [1, 3], [1, 4], [2, 3], [2, 4], [3, 4]] """ @@ -20,6 +22,8 @@ def combination_lists(n: int, k: int) -> list[list[int]]: def generate_all_combinations(n: int, k: int) -> list[list[int]]: """ + Generates all possible combinations of k numbers out of 1 ... n using backtracking. + >>> generate_all_combinations(n=4, k=2) [[1, 2], [1, 3], [1, 4], [2, 3], [2, 4], [3, 4]] >>> generate_all_combinations(n=0, k=0) @@ -34,6 +38,14 @@ def generate_all_combinations(n: int, k: int) -> list[list[int]]: ValueError: n must not be negative >>> generate_all_combinations(n=5, k=4) [[1, 2, 3, 4], [1, 2, 3, 5], [1, 2, 4, 5], [1, 3, 4, 5], [2, 3, 4, 5]] + >>> generate_all_combinations(n=3, k=3) + [[1, 2, 3]] + >>> generate_all_combinations(n=3, k=1) + [[1], [2], [3]] + >>> generate_all_combinations(n=1, k=0) + [[]] + >>> generate_all_combinations(n=1, k=1) + [[1]] >>> from itertools import combinations >>> all(generate_all_combinations(n, k) == combination_lists(n, k) ... for n in range(1, 6) for k in range(1, 6)) @@ -56,6 +68,28 @@ def create_all_state( current_list: list[int], total_list: list[list[int]], ) -> None: + """ + Helper function to recursively build all combinations. + + >>> create_all_state(1, 4, 2, [], result := []) + >>> result + [[1, 2], [1, 3], [1, 4], [2, 3], [2, 4], [3, 4]] + >>> create_all_state(1, 3, 3, [], result := []) + >>> result + [[1, 2, 3]] + >>> create_all_state(2, 2, 1, [1], result := []) + >>> result + [[1, 2]] + >>> create_all_state(1, 0, 0, [], result := []) + >>> result + [[]] + >>> create_all_state(1, 4, 0, [1, 2], result := []) + >>> result + [[1, 2]] + >>> create_all_state(5, 4, 2, [1, 2], result := []) + >>> result + [] + """ if level == 0: total_list.append(current_list[:]) return From cfcc84edf7d14cb56f52ba6fbd8c8deb2e9a7852 Mon Sep 17 00:00:00 2001 From: Maxim Smolskiy Date: Mon, 13 Jan 2025 23:49:07 +0300 Subject: [PATCH 1512/1543] Fix build (#12516) * Empty commit * Fix * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fix * Apply suggestions from code review --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- web_programming/current_stock_price.py | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/web_programming/current_stock_price.py b/web_programming/current_stock_price.py index d0a65e9aac84..573e1f575c8e 100644 --- a/web_programming/current_stock_price.py +++ b/web_programming/current_stock_price.py @@ -15,7 +15,7 @@ def stock_price(symbol: str = "AAPL") -> str: """ >>> stock_price("EEEE") - '-' + '- ' >>> isinstance(float(stock_price("GOOG")),float) True """ @@ -24,12 +24,10 @@ def stock_price(symbol: str = "AAPL") -> str: url, headers={"USER-AGENT": "Mozilla/5.0"}, timeout=10 ).text soup = BeautifulSoup(yahoo_finance_source, "html.parser") - specific_fin_streamer_tag = soup.find("fin-streamer", {"data-testid": "qsp-price"}) - if specific_fin_streamer_tag: - text = specific_fin_streamer_tag.get_text() - return text - return "No tag with the specified data-test attribute found." + if specific_fin_streamer_tag := soup.find("span", {"data-testid": "qsp-price"}): + return specific_fin_streamer_tag.get_text() + return "No tag with the specified data-testid attribute found." # Search for the symbol at https://finance.yahoo.com/lookup From 4fe50bc1fcf82fceb61839bae314720c092c0692 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 13 Jan 2025 21:52:12 +0100 Subject: [PATCH 1513/1543] [pre-commit.ci] pre-commit autoupdate -- ruff 2025 stable format (#12521) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * [pre-commit.ci] pre-commit autoupdate updates: - [github.com/astral-sh/ruff-pre-commit: v0.8.6 → v0.9.1](https://github.com/astral-sh/ruff-pre-commit/compare/v0.8.6...v0.9.1) * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update maths/dual_number_automatic_differentiation.py * Update maths/dual_number_automatic_differentiation.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update dual_number_automatic_differentiation.py * Update dual_number_automatic_differentiation.py * No tag with the specified data-test attribute found. --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- .pre-commit-config.yaml | 2 +- ciphers/base64_cipher.py | 12 +++--- ciphers/caesar_cipher.py | 2 +- computer_vision/flip_augmentation.py | 2 +- computer_vision/mosaic_augmentation.py | 2 +- .../hashing/number_theory/prime_numbers.py | 6 +-- data_structures/heap/min_heap.py | 6 +-- data_structures/kd_tree/tests/test_kdtree.py | 12 +++--- .../suffix_tree/tests/test_suffix_tree.py | 24 +++++------ dynamic_programming/climbing_stairs.py | 6 +-- .../iterating_through_submasks.py | 6 +-- .../matrix_chain_multiplication.py | 2 +- .../linear_discriminant_analysis.py | 4 +- .../dual_number_automatic_differentiation.py | 6 +-- maths/max_sum_sliding_window.py | 4 +- .../integration_by_simpson_approx.py | 12 +++--- maths/prime_check.py | 12 +++--- maths/primelib.py | 42 +++++++++---------- matrix/matrix_based_game.py | 2 +- neural_network/input_data.py | 6 +-- scripts/validate_solutions.py | 6 +-- strings/jaro_winkler.py | 4 +- web_programming/fetch_anime_and_play.py | 4 +- 23 files changed, 93 insertions(+), 91 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index ec1dbca3a41c..3b1dd9658d7f 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -16,7 +16,7 @@ repos: - id: auto-walrus - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.8.6 + rev: v0.9.1 hooks: - id: ruff - id: ruff-format diff --git a/ciphers/base64_cipher.py b/ciphers/base64_cipher.py index 2b950b1be37d..038d13963d95 100644 --- a/ciphers/base64_cipher.py +++ b/ciphers/base64_cipher.py @@ -105,13 +105,13 @@ def base64_decode(encoded_data: str) -> bytes: # Check if the encoded string contains non base64 characters if padding: - assert all( - char in B64_CHARSET for char in encoded_data[:-padding] - ), "Invalid base64 character(s) found." + assert all(char in B64_CHARSET for char in encoded_data[:-padding]), ( + "Invalid base64 character(s) found." + ) else: - assert all( - char in B64_CHARSET for char in encoded_data - ), "Invalid base64 character(s) found." + assert all(char in B64_CHARSET for char in encoded_data), ( + "Invalid base64 character(s) found." + ) # Check the padding assert len(encoded_data) % 4 == 0 and padding < 3, "Incorrect padding" diff --git a/ciphers/caesar_cipher.py b/ciphers/caesar_cipher.py index 9c096fe8a7da..1cf4d67cbaed 100644 --- a/ciphers/caesar_cipher.py +++ b/ciphers/caesar_cipher.py @@ -225,7 +225,7 @@ def brute_force(input_string: str, alphabet: str | None = None) -> dict[int, str if __name__ == "__main__": while True: - print(f'\n{"-" * 10}\n Menu\n{"-" * 10}') + print(f"\n{'-' * 10}\n Menu\n{'-' * 10}") print(*["1.Encrypt", "2.Decrypt", "3.BruteForce", "4.Quit"], sep="\n") # get user input diff --git a/computer_vision/flip_augmentation.py b/computer_vision/flip_augmentation.py index 77a8cbd7b14f..7301424824df 100644 --- a/computer_vision/flip_augmentation.py +++ b/computer_vision/flip_augmentation.py @@ -33,7 +33,7 @@ def main() -> None: file_name = paths[index].split(os.sep)[-1].rsplit(".", 1)[0] file_root = f"{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}" cv2.imwrite(f"{file_root}.jpg", image, [cv2.IMWRITE_JPEG_QUALITY, 85]) - print(f"Success {index+1}/{len(new_images)} with {file_name}") + print(f"Success {index + 1}/{len(new_images)} with {file_name}") annos_list = [] for anno in new_annos[index]: obj = f"{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}" diff --git a/computer_vision/mosaic_augmentation.py b/computer_vision/mosaic_augmentation.py index cd923dfe095f..d881347121ea 100644 --- a/computer_vision/mosaic_augmentation.py +++ b/computer_vision/mosaic_augmentation.py @@ -41,7 +41,7 @@ def main() -> None: file_name = path.split(os.sep)[-1].rsplit(".", 1)[0] file_root = f"{OUTPUT_DIR}/{file_name}_MOSAIC_{letter_code}" cv2.imwrite(f"{file_root}.jpg", new_image, [cv2.IMWRITE_JPEG_QUALITY, 85]) - print(f"Succeeded {index+1}/{NUMBER_IMAGES} with {file_name}") + print(f"Succeeded {index + 1}/{NUMBER_IMAGES} with {file_name}") annos_list = [] for anno in new_annos: width = anno[3] - anno[1] diff --git a/data_structures/hashing/number_theory/prime_numbers.py b/data_structures/hashing/number_theory/prime_numbers.py index 2549a1477b2b..82071b5e9f09 100644 --- a/data_structures/hashing/number_theory/prime_numbers.py +++ b/data_structures/hashing/number_theory/prime_numbers.py @@ -32,9 +32,9 @@ def is_prime(number: int) -> bool: """ # precondition - assert isinstance(number, int) and ( - number >= 0 - ), "'number' must been an int and positive" + assert isinstance(number, int) and (number >= 0), ( + "'number' must been an int and positive" + ) if 1 < number < 4: # 2 and 3 are primes diff --git a/data_structures/heap/min_heap.py b/data_structures/heap/min_heap.py index ce7ed570a58d..577b98d788a1 100644 --- a/data_structures/heap/min_heap.py +++ b/data_structures/heap/min_heap.py @@ -124,9 +124,9 @@ def is_empty(self): return len(self.heap) == 0 def decrease_key(self, node, new_value): - assert ( - self.heap[self.idx_of_element[node]].val > new_value - ), "newValue must be less that current value" + assert self.heap[self.idx_of_element[node]].val > new_value, ( + "newValue must be less that current value" + ) node.val = new_value self.heap_dict[node.name] = new_value self.sift_up(self.idx_of_element[node]) diff --git a/data_structures/kd_tree/tests/test_kdtree.py b/data_structures/kd_tree/tests/test_kdtree.py index dce5e4f34ff4..d6a4a66dd24d 100644 --- a/data_structures/kd_tree/tests/test_kdtree.py +++ b/data_structures/kd_tree/tests/test_kdtree.py @@ -48,14 +48,14 @@ def test_build_kdtree(num_points, cube_size, num_dimensions, depth, expected_res assert kdtree is not None, "Expected a KDNode, got None" # Check if root has correct dimensions - assert ( - len(kdtree.point) == num_dimensions - ), f"Expected point dimension {num_dimensions}, got {len(kdtree.point)}" + assert len(kdtree.point) == num_dimensions, ( + f"Expected point dimension {num_dimensions}, got {len(kdtree.point)}" + ) # Check that the tree is balanced to some extent (simplistic check) - assert isinstance( - kdtree, KDNode - ), f"Expected KDNode instance, got {type(kdtree)}" + assert isinstance(kdtree, KDNode), ( + f"Expected KDNode instance, got {type(kdtree)}" + ) def test_nearest_neighbour_search(): diff --git a/data_structures/suffix_tree/tests/test_suffix_tree.py b/data_structures/suffix_tree/tests/test_suffix_tree.py index 45c6790ac48a..c9dbe199d19d 100644 --- a/data_structures/suffix_tree/tests/test_suffix_tree.py +++ b/data_structures/suffix_tree/tests/test_suffix_tree.py @@ -22,18 +22,18 @@ def test_search_existing_patterns(self) -> None: patterns = ["ana", "ban", "na"] for pattern in patterns: with self.subTest(pattern=pattern): - assert self.suffix_tree.search( - pattern - ), f"Pattern '{pattern}' should be found." + assert self.suffix_tree.search(pattern), ( + f"Pattern '{pattern}' should be found." + ) def test_search_non_existing_patterns(self) -> None: """Test searching for patterns that do not exist in the suffix tree.""" patterns = ["xyz", "apple", "cat"] for pattern in patterns: with self.subTest(pattern=pattern): - assert not self.suffix_tree.search( - pattern - ), f"Pattern '{pattern}' should not be found." + assert not self.suffix_tree.search(pattern), ( + f"Pattern '{pattern}' should not be found." + ) def test_search_empty_pattern(self) -> None: """Test searching for an empty pattern.""" @@ -41,18 +41,18 @@ def test_search_empty_pattern(self) -> None: def test_search_full_text(self) -> None: """Test searching for the full text.""" - assert self.suffix_tree.search( - self.text - ), "The full text should be found in the suffix tree." + assert self.suffix_tree.search(self.text), ( + "The full text should be found in the suffix tree." + ) def test_search_substrings(self) -> None: """Test searching for substrings of the full text.""" substrings = ["ban", "ana", "a", "na"] for substring in substrings: with self.subTest(substring=substring): - assert self.suffix_tree.search( - substring - ), f"Substring '{substring}' should be found." + assert self.suffix_tree.search(substring), ( + f"Substring '{substring}' should be found." + ) if __name__ == "__main__": diff --git a/dynamic_programming/climbing_stairs.py b/dynamic_programming/climbing_stairs.py index d6273d025f08..38bdb427eedc 100644 --- a/dynamic_programming/climbing_stairs.py +++ b/dynamic_programming/climbing_stairs.py @@ -25,9 +25,9 @@ def climb_stairs(number_of_steps: int) -> int: ... AssertionError: number_of_steps needs to be positive integer, your input -7 """ - assert ( - isinstance(number_of_steps, int) and number_of_steps > 0 - ), f"number_of_steps needs to be positive integer, your input {number_of_steps}" + assert isinstance(number_of_steps, int) and number_of_steps > 0, ( + f"number_of_steps needs to be positive integer, your input {number_of_steps}" + ) if number_of_steps == 1: return 1 previous, current = 1, 1 diff --git a/dynamic_programming/iterating_through_submasks.py b/dynamic_programming/iterating_through_submasks.py index 372dd2c74a71..efab6dacff3f 100644 --- a/dynamic_programming/iterating_through_submasks.py +++ b/dynamic_programming/iterating_through_submasks.py @@ -37,9 +37,9 @@ def list_of_submasks(mask: int) -> list[int]: """ - assert ( - isinstance(mask, int) and mask > 0 - ), f"mask needs to be positive integer, your input {mask}" + assert isinstance(mask, int) and mask > 0, ( + f"mask needs to be positive integer, your input {mask}" + ) """ first submask iterated will be mask itself then operation will be performed diff --git a/dynamic_programming/matrix_chain_multiplication.py b/dynamic_programming/matrix_chain_multiplication.py index 10e136b9f0db..4c0c771f9092 100644 --- a/dynamic_programming/matrix_chain_multiplication.py +++ b/dynamic_programming/matrix_chain_multiplication.py @@ -134,7 +134,7 @@ def elapsed_time(msg: str) -> Iterator: start = perf_counter_ns() yield - print(f"Finished: {msg} in {(perf_counter_ns() - start) / 10 ** 9} seconds.") + print(f"Finished: {msg} in {(perf_counter_ns() - start) / 10**9} seconds.") if __name__ == "__main__": diff --git a/machine_learning/linear_discriminant_analysis.py b/machine_learning/linear_discriminant_analysis.py index 86f28aef671a..8528ccbbae51 100644 --- a/machine_learning/linear_discriminant_analysis.py +++ b/machine_learning/linear_discriminant_analysis.py @@ -322,7 +322,7 @@ def main(): user_count = valid_input( input_type=int, condition=lambda x: x > 0, - input_msg=(f"Enter The number of instances for class_{i+1}: "), + input_msg=(f"Enter The number of instances for class_{i + 1}: "), err_msg="Number of instances should be positive!", ) counts.append(user_count) @@ -333,7 +333,7 @@ def main(): for a in range(n_classes): user_mean = valid_input( input_type=float, - input_msg=(f"Enter the value of mean for class_{a+1}: "), + input_msg=(f"Enter the value of mean for class_{a + 1}: "), err_msg="This is an invalid value.", ) user_means.append(user_mean) diff --git a/maths/dual_number_automatic_differentiation.py b/maths/dual_number_automatic_differentiation.py index f98997c8be4d..09aeb17a4aea 100644 --- a/maths/dual_number_automatic_differentiation.py +++ b/maths/dual_number_automatic_differentiation.py @@ -17,10 +17,8 @@ def __init__(self, real, rank): self.duals = rank def __repr__(self): - return ( - f"{self.real}+" - f"{'+'.join(str(dual)+'E'+str(n+1)for n,dual in enumerate(self.duals))}" - ) + s = "+".join(f"{dual}E{n}" for n, dual in enumerate(self.duals, 1)) + return f"{self.real}+{s}" def reduce(self): cur = self.duals.copy() diff --git a/maths/max_sum_sliding_window.py b/maths/max_sum_sliding_window.py index 090117429604..c7492978a6c9 100644 --- a/maths/max_sum_sliding_window.py +++ b/maths/max_sum_sliding_window.py @@ -43,4 +43,6 @@ def max_sum_in_array(array: list[int], k: int) -> int: testmod() array = [randint(-1000, 1000) for i in range(100)] k = randint(0, 110) - print(f"The maximum sum of {k} consecutive elements is {max_sum_in_array(array,k)}") + print( + f"The maximum sum of {k} consecutive elements is {max_sum_in_array(array, k)}" + ) diff --git a/maths/numerical_analysis/integration_by_simpson_approx.py b/maths/numerical_analysis/integration_by_simpson_approx.py index 934299997aac..043f3a9a72af 100644 --- a/maths/numerical_analysis/integration_by_simpson_approx.py +++ b/maths/numerical_analysis/integration_by_simpson_approx.py @@ -88,18 +88,18 @@ def simpson_integration(function, a: float, b: float, precision: int = 4) -> flo AssertionError: precision should be positive integer your input : -1 """ - assert callable( - function - ), f"the function(object) passed should be callable your input : {function}" + assert callable(function), ( + f"the function(object) passed should be callable your input : {function}" + ) assert isinstance(a, (float, int)), f"a should be float or integer your input : {a}" assert isinstance(function(a), (float, int)), ( "the function should return integer or float return type of your function, " f"{type(a)}" ) assert isinstance(b, (float, int)), f"b should be float or integer your input : {b}" - assert ( - isinstance(precision, int) and precision > 0 - ), f"precision should be positive integer your input : {precision}" + assert isinstance(precision, int) and precision > 0, ( + f"precision should be positive integer your input : {precision}" + ) # just applying the formula of simpson for approximate integration written in # mentioned article in first comment of this file and above this function diff --git a/maths/prime_check.py b/maths/prime_check.py index f1bc4def2469..a757c4108f24 100644 --- a/maths/prime_check.py +++ b/maths/prime_check.py @@ -73,12 +73,12 @@ def test_primes(self): def test_not_primes(self): with pytest.raises(ValueError): is_prime(-19) - assert not is_prime( - 0 - ), "Zero doesn't have any positive factors, primes must have exactly two." - assert not is_prime( - 1 - ), "One only has 1 positive factor, primes must have exactly two." + assert not is_prime(0), ( + "Zero doesn't have any positive factors, primes must have exactly two." + ) + assert not is_prime(1), ( + "One only has 1 positive factor, primes must have exactly two." + ) assert not is_prime(2 * 2) assert not is_prime(2 * 3) assert not is_prime(3 * 3) diff --git a/maths/primelib.py b/maths/primelib.py index a26b0eaeb328..3a966e5cd936 100644 --- a/maths/primelib.py +++ b/maths/primelib.py @@ -66,9 +66,9 @@ def is_prime(number: int) -> bool: """ # precondition - assert isinstance(number, int) and ( - number >= 0 - ), "'number' must been an int and positive" + assert isinstance(number, int) and (number >= 0), ( + "'number' must been an int and positive" + ) status = True @@ -254,9 +254,9 @@ def greatest_prime_factor(number): """ # precondition - assert isinstance(number, int) and ( - number >= 0 - ), "'number' must been an int and >= 0" + assert isinstance(number, int) and (number >= 0), ( + "'number' must been an int and >= 0" + ) ans = 0 @@ -296,9 +296,9 @@ def smallest_prime_factor(number): """ # precondition - assert isinstance(number, int) and ( - number >= 0 - ), "'number' must been an int and >= 0" + assert isinstance(number, int) and (number >= 0), ( + "'number' must been an int and >= 0" + ) ans = 0 @@ -399,9 +399,9 @@ def goldbach(number): """ # precondition - assert ( - isinstance(number, int) and (number > 2) and is_even(number) - ), "'number' must been an int, even and > 2" + assert isinstance(number, int) and (number > 2) and is_even(number), ( + "'number' must been an int, even and > 2" + ) ans = [] # this list will returned @@ -525,9 +525,9 @@ def kg_v(number1, number2): done.append(n) # precondition - assert isinstance(ans, int) and ( - ans >= 0 - ), "'ans' must been from type int and positive" + assert isinstance(ans, int) and (ans >= 0), ( + "'ans' must been from type int and positive" + ) return ans @@ -574,9 +574,9 @@ def get_prime(n): ans += 1 # precondition - assert isinstance(ans, int) and is_prime( - ans - ), "'ans' must been a prime number and from type int" + assert isinstance(ans, int) and is_prime(ans), ( + "'ans' must been a prime number and from type int" + ) return ans @@ -705,9 +705,9 @@ def is_perfect_number(number): """ # precondition - assert isinstance(number, int) and ( - number > 1 - ), "'number' must been an int and >= 1" + assert isinstance(number, int) and (number > 1), ( + "'number' must been an int and >= 1" + ) divisors = get_divisors(number) diff --git a/matrix/matrix_based_game.py b/matrix/matrix_based_game.py index 1ff0cbe93435..6181086c6704 100644 --- a/matrix/matrix_based_game.py +++ b/matrix/matrix_based_game.py @@ -273,7 +273,7 @@ def process_game(size: int, matrix: list[str], moves: list[tuple[int, int]]) -> size = int(input("Enter the size of the matrix: ")) validate_matrix_size(size) print(f"Enter the {size} rows of the matrix:") - matrix = [input(f"Row {i+1}: ") for i in range(size)] + matrix = [input(f"Row {i + 1}: ") for i in range(size)] validate_matrix_content(matrix, size) moves_input = input("Enter the moves (e.g., '0 0, 1 1'): ") moves = parse_moves(moves_input) diff --git a/neural_network/input_data.py b/neural_network/input_data.py index 72debabb566a..3a8628f939f8 100644 --- a/neural_network/input_data.py +++ b/neural_network/input_data.py @@ -160,9 +160,9 @@ def __init__( self._num_examples = 10000 self.one_hot = one_hot else: - assert ( - images.shape[0] == labels.shape[0] - ), f"images.shape: {images.shape} labels.shape: {labels.shape}" + assert images.shape[0] == labels.shape[0], ( + f"images.shape: {images.shape} labels.shape: {labels.shape}" + ) self._num_examples = images.shape[0] # Convert shape from [num examples, rows, columns, depth] diff --git a/scripts/validate_solutions.py b/scripts/validate_solutions.py index 325c245e0d77..df5d01086bbe 100755 --- a/scripts/validate_solutions.py +++ b/scripts/validate_solutions.py @@ -94,6 +94,6 @@ def test_project_euler(solution_path: pathlib.Path) -> None: solution_module = convert_path_to_module(solution_path) answer = str(solution_module.solution()) answer = hashlib.sha256(answer.encode()).hexdigest() - assert ( - answer == expected - ), f"Expected solution to {problem_number} to have hash {expected}, got {answer}" + assert answer == expected, ( + f"Expected solution to {problem_number} to have hash {expected}, got {answer}" + ) diff --git a/strings/jaro_winkler.py b/strings/jaro_winkler.py index cae2068fabc1..0ce5d83b3c41 100644 --- a/strings/jaro_winkler.py +++ b/strings/jaro_winkler.py @@ -33,7 +33,9 @@ def get_matched_characters(_str1: str, _str2: str) -> str: right = int(min(i + limit + 1, len(_str2))) if char in _str2[left:right]: matched.append(char) - _str2 = f"{_str2[0:_str2.index(char)]} {_str2[_str2.index(char) + 1:]}" + _str2 = ( + f"{_str2[0 : _str2.index(char)]} {_str2[_str2.index(char) + 1 :]}" + ) return "".join(matched) diff --git a/web_programming/fetch_anime_and_play.py b/web_programming/fetch_anime_and_play.py index fd7c3a3a7381..e56b7124eeb5 100644 --- a/web_programming/fetch_anime_and_play.py +++ b/web_programming/fetch_anime_and_play.py @@ -165,7 +165,7 @@ def get_anime_episode(episode_endpoint: str) -> list: print(f"Found {len(anime_list)} results: ") for i, anime in enumerate(anime_list): anime_title = anime["title"] - print(f"{i+1}. {anime_title}") + print(f"{i + 1}. {anime_title}") anime_choice = int(input("\nPlease choose from the following list: ").strip()) chosen_anime = anime_list[anime_choice - 1] @@ -177,7 +177,7 @@ def get_anime_episode(episode_endpoint: str) -> list: else: print(f"Found {len(episode_list)} results: ") for i, episode in enumerate(episode_list): - print(f"{i+1}. {episode['title']}") + print(f"{i + 1}. {episode['title']}") episode_choice = int(input("\nChoose an episode by serial no: ").strip()) chosen_episode = episode_list[episode_choice - 1] From f04d308431266759dce36265d8701dfb106932af Mon Sep 17 00:00:00 2001 From: Sanjay Muthu Date: Wed, 15 Jan 2025 02:19:04 +0530 Subject: [PATCH 1514/1543] Create longest_increasing_subsequence_iterative.py (#12524) * Create longest_increasing_subsequence_iterative.py * Update longest_increasing_subsequence_iterative.py * Update longest_increasing_subsequence_iterative.py --------- Co-authored-by: Maxim Smolskiy --- ...ongest_increasing_subsequence_iterative.py | 72 +++++++++++++++++++ 1 file changed, 72 insertions(+) create mode 100644 dynamic_programming/longest_increasing_subsequence_iterative.py diff --git a/dynamic_programming/longest_increasing_subsequence_iterative.py b/dynamic_programming/longest_increasing_subsequence_iterative.py new file mode 100644 index 000000000000..665c86a35d2e --- /dev/null +++ b/dynamic_programming/longest_increasing_subsequence_iterative.py @@ -0,0 +1,72 @@ +""" +Author : Sanjay Muthu + +This is a pure Python implementation of Dynamic Programming solution to the longest +increasing subsequence of a given sequence. + +The problem is: + Given an array, to find the longest and increasing sub-array in that given array and + return it. + +Example: + ``[10, 22, 9, 33, 21, 50, 41, 60, 80]`` as input will return + ``[10, 22, 33, 50, 60, 80]`` as output +""" + +from __future__ import annotations + +import copy + + +def longest_subsequence(array: list[int]) -> list[int]: + """ + Some examples + + >>> longest_subsequence([10, 22, 9, 33, 21, 50, 41, 60, 80]) + [10, 22, 33, 50, 60, 80] + >>> longest_subsequence([4, 8, 7, 5, 1, 12, 2, 3, 9]) + [1, 2, 3, 9] + >>> longest_subsequence([9, 8, 7, 6, 5, 7]) + [7, 7] + >>> longest_subsequence([28, 26, 12, 23, 35, 39]) + [12, 23, 35, 39] + >>> longest_subsequence([1, 1, 1]) + [1, 1, 1] + >>> longest_subsequence([]) + [] + """ + n = len(array) + # The longest increasing subsequence ending at array[i] + longest_increasing_subsequence = [] + for i in range(n): + longest_increasing_subsequence.append([array[i]]) + + for i in range(1, n): + for prev in range(i): + # If array[prev] is less than or equal to array[i], then + # longest_increasing_subsequence[prev] + array[i] + # is a valid increasing subsequence + + # longest_increasing_subsequence[i] is only set to + # longest_increasing_subsequence[prev] + array[i] if the length is longer. + + if array[prev] <= array[i] and len( + longest_increasing_subsequence[prev] + ) + 1 > len(longest_increasing_subsequence[i]): + longest_increasing_subsequence[i] = copy.copy( + longest_increasing_subsequence[prev] + ) + longest_increasing_subsequence[i].append(array[i]) + + result: list[int] = [] + for i in range(n): + if len(longest_increasing_subsequence[i]) > len(result): + result = longest_increasing_subsequence[i] + + return result + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From 0040ad47f928f299dadbb97c5cea00bc1daf8c75 Mon Sep 17 00:00:00 2001 From: aydinomer00 <109145643+aydinomer00@users.noreply.github.com> Date: Wed, 15 Jan 2025 00:24:36 +0300 Subject: [PATCH 1515/1543] Add butterfly pattern implementation (#12493) * Add butterfly pattern implementation * Add butterfly pattern implementation * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Add finalized butterfly pattern implementation and test * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Delete graphics/test_butterfly_pattern.py * Update butterfly_pattern.py * Update butterfly_pattern.py * Update butterfly_pattern.py * Update butterfly_pattern.py * Update butterfly_pattern.py --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Maxim Smolskiy --- graphics/butterfly_pattern.py | 46 +++++++++++++++++++++++++++++++++++ 1 file changed, 46 insertions(+) create mode 100644 graphics/butterfly_pattern.py diff --git a/graphics/butterfly_pattern.py b/graphics/butterfly_pattern.py new file mode 100644 index 000000000000..7913b03a7e95 --- /dev/null +++ b/graphics/butterfly_pattern.py @@ -0,0 +1,46 @@ +def butterfly_pattern(n: int) -> str: + """ + Creates a butterfly pattern of size n and returns it as a string. + + >>> print(butterfly_pattern(3)) + * * + ** ** + ***** + ** ** + * * + >>> print(butterfly_pattern(5)) + * * + ** ** + *** *** + **** **** + ********* + **** **** + *** *** + ** ** + * * + """ + result = [] + + # Upper part + for i in range(1, n): + left_stars = "*" * i + spaces = " " * (2 * (n - i) - 1) + right_stars = "*" * i + result.append(left_stars + spaces + right_stars) + + # Middle part + result.append("*" * (2 * n - 1)) + + # Lower part + for i in range(n - 1, 0, -1): + left_stars = "*" * i + spaces = " " * (2 * (n - i) - 1) + right_stars = "*" * i + result.append(left_stars + spaces + right_stars) + + return "\n".join(result) + + +if __name__ == "__main__": + n = int(input("Enter the size of the butterfly pattern: ")) + print(butterfly_pattern(n)) From 533767ff46bbcf5c594ff8196894ae2e8130bc3e Mon Sep 17 00:00:00 2001 From: Nguyen Thi Thanh Minh <140883075+minh-swinburne@users.noreply.github.com> Date: Sat, 18 Jan 2025 10:07:44 +0700 Subject: [PATCH 1516/1543] Doomsday Algorithm: Fix leap year check (#12396) * Fix leap year check Replace `!=` in `(year % 400) != 0` (line 49) with `==` Justification: Years that are divisible by 100 (centurian == 100) but not by 400 (year % 400 != 0) are skipped and NOT leap year. * Update parentheses Correct the parentheses to make clear the precedence of the conditional check * Update other/doomsday.py Co-authored-by: Tianyi Zheng --------- Co-authored-by: Tianyi Zheng --- other/doomsday.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/other/doomsday.py b/other/doomsday.py index d8fe261156a1..be3b18eeecaa 100644 --- a/other/doomsday.py +++ b/other/doomsday.py @@ -46,7 +46,7 @@ def get_week_day(year: int, month: int, day: int) -> str: ) % 7 day_anchor = ( DOOMSDAY_NOT_LEAP[month - 1] - if (year % 4 != 0) or (centurian == 0 and (year % 400) == 0) + if year % 4 != 0 or (centurian == 0 and year % 400 != 0) else DOOMSDAY_LEAP[month - 1] ) week_day = (dooms_day + day - day_anchor) % 7 From 91ebea1d99735ee2798b01ebcea0fc06e9a6af49 Mon Sep 17 00:00:00 2001 From: Christian Clauss Date: Sun, 19 Jan 2025 08:33:35 +0100 Subject: [PATCH 1517/1543] Sphinx runs on ubuntu 24.04 arm (#12530) * Speed up our Sphinx GitHub Action with ARM # `runs-on: ubuntu-24.04-arm` https://docs.github.com/en/actions/using-github-hosted-runners/using-github-hosted-runners/about-github-hosted-runners#supported-runners-and-hardware-resources * updating DIRECTORY.md --------- Co-authored-by: cclauss --- .github/workflows/sphinx.yml | 2 +- DIRECTORY.md | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/.github/workflows/sphinx.yml b/.github/workflows/sphinx.yml index d02435d98028..16ff284a74f2 100644 --- a/.github/workflows/sphinx.yml +++ b/.github/workflows/sphinx.yml @@ -23,7 +23,7 @@ concurrency: jobs: build_docs: - runs-on: ubuntu-latest + runs-on: ubuntu-24.04-arm steps: - uses: actions/checkout@v4 - uses: astral-sh/setup-uv@v5 diff --git a/DIRECTORY.md b/DIRECTORY.md index aad6c72aa8ee..941e30dfe721 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -377,6 +377,7 @@ * [Longest Common Subsequence](dynamic_programming/longest_common_subsequence.py) * [Longest Common Substring](dynamic_programming/longest_common_substring.py) * [Longest Increasing Subsequence](dynamic_programming/longest_increasing_subsequence.py) + * [Longest Increasing Subsequence Iterative](dynamic_programming/longest_increasing_subsequence_iterative.py) * [Longest Increasing Subsequence O Nlogn](dynamic_programming/longest_increasing_subsequence_o_nlogn.py) * [Longest Palindromic Subsequence](dynamic_programming/longest_palindromic_subsequence.py) * [Matrix Chain Multiplication](dynamic_programming/matrix_chain_multiplication.py) @@ -462,6 +463,7 @@ ## Graphics * [Bezier Curve](graphics/bezier_curve.py) + * [Butterfly Pattern](graphics/butterfly_pattern.py) * [Digital Differential Analyzer Line](graphics/digital_differential_analyzer_line.py) * [Vector3 For 2D Rendering](graphics/vector3_for_2d_rendering.py) From 1f74db0c06df7557e7ae3a17ebcc303f753f824e Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 20 Jan 2025 21:22:02 +0100 Subject: [PATCH 1518/1543] [pre-commit.ci] pre-commit autoupdate (#12536) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - [github.com/astral-sh/ruff-pre-commit: v0.9.1 → v0.9.2](https://github.com/astral-sh/ruff-pre-commit/compare/v0.9.1...v0.9.2) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 3b1dd9658d7f..c4480f47faa1 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -16,7 +16,7 @@ repos: - id: auto-walrus - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.9.1 + rev: v0.9.2 hooks: - id: ruff - id: ruff-format From 9fb51b4169e0f7a4952e9eb460b91f4d7ffb819f Mon Sep 17 00:00:00 2001 From: Ronald Ngounou <74538524+ronaldngounou@users.noreply.github.com> Date: Thu, 23 Jan 2025 09:02:46 +0100 Subject: [PATCH 1519/1543] Update docstrings in the functions definitions. (#11797) --- data_structures/arrays/sudoku_solver.py | 61 +++++++++++++++++-------- 1 file changed, 43 insertions(+), 18 deletions(-) diff --git a/data_structures/arrays/sudoku_solver.py b/data_structures/arrays/sudoku_solver.py index fd1a4f3e37b8..e1714e57ece8 100644 --- a/data_structures/arrays/sudoku_solver.py +++ b/data_structures/arrays/sudoku_solver.py @@ -9,7 +9,9 @@ def cross(items_a, items_b): - "Cross product of elements in A and elements in B." + """ + Cross product of elements in A and elements in B. + """ return [a + b for a in items_a for b in items_b] @@ -27,7 +29,7 @@ def cross(items_a, items_b): def test(): - "A set of unit tests." + """A set of unit tests.""" assert len(squares) == 81 assert len(unitlist) == 27 assert all(len(units[s]) == 3 for s in squares) @@ -47,8 +49,10 @@ def test(): def parse_grid(grid): - """Convert grid to a dict of possible values, {square: digits}, or - return False if a contradiction is detected.""" + """ + Convert grid to a dict of possible values, {square: digits}, or + return False if a contradiction is detected. + """ ## To start, every square can be any digit; then assign values from the grid. values = {s: digits for s in squares} for s, d in grid_values(grid).items(): @@ -58,15 +62,19 @@ def parse_grid(grid): def grid_values(grid): - "Convert grid into a dict of {square: char} with '0' or '.' for empties." + """ + Convert grid into a dict of {square: char} with '0' or '.' for empties. + """ chars = [c for c in grid if c in digits or c in "0."] assert len(chars) == 81 return dict(zip(squares, chars)) def assign(values, s, d): - """Eliminate all the other values (except d) from values[s] and propagate. - Return values, except return False if a contradiction is detected.""" + """ + Eliminate all the other values (except d) from values[s] and propagate. + Return values, except return False if a contradiction is detected. + """ other_values = values[s].replace(d, "") if all(eliminate(values, s, d2) for d2 in other_values): return values @@ -75,8 +83,10 @@ def assign(values, s, d): def eliminate(values, s, d): - """Eliminate d from values[s]; propagate when values or places <= 2. - Return values, except return False if a contradiction is detected.""" + """ + Eliminate d from values[s]; propagate when values or places <= 2. + Return values, except return False if a contradiction is detected. + """ if d not in values[s]: return values ## Already eliminated values[s] = values[s].replace(d, "") @@ -99,7 +109,9 @@ def eliminate(values, s, d): def display(values): - "Display these values as a 2-D grid." + """ + Display these values as a 2-D grid. + """ width = 1 + max(len(values[s]) for s in squares) line = "+".join(["-" * (width * 3)] * 3) for r in rows: @@ -114,11 +126,14 @@ def display(values): def solve(grid): + """ + Solve the grid. + """ return search(parse_grid(grid)) def some(seq): - "Return some element of seq that is true." + """Return some element of seq that is true.""" for e in seq: if e: return e @@ -126,7 +141,9 @@ def some(seq): def search(values): - "Using depth-first search and propagation, try all possible values." + """ + Using depth-first search and propagation, try all possible values. + """ if values is False: return False ## Failed earlier if all(len(values[s]) == 1 for s in squares): @@ -137,9 +154,11 @@ def search(values): def solve_all(grids, name="", showif=0.0): - """Attempt to solve a sequence of grids. Report results. + """ + Attempt to solve a sequence of grids. Report results. When showif is a number of seconds, display puzzles that take longer. - When showif is None, don't display any puzzles.""" + When showif is None, don't display any puzzles. + """ def time_solve(grid): start = time.monotonic() @@ -162,7 +181,9 @@ def time_solve(grid): def solved(values): - "A puzzle is solved if each unit is a permutation of the digits 1 to 9." + """ + A puzzle is solved if each unit is a permutation of the digits 1 to 9. + """ def unitsolved(unit): return {values[s] for s in unit} == set(digits) @@ -177,9 +198,11 @@ def from_file(filename, sep="\n"): def random_puzzle(assignments=17): - """Make a random puzzle with N or more assignments. Restart on contradictions. + """ + Make a random puzzle with N or more assignments. Restart on contradictions. Note the resulting puzzle is not guaranteed to be solvable, but empirically - about 99.8% of them are solvable. Some have multiple solutions.""" + about 99.8% of them are solvable. Some have multiple solutions. + """ values = {s: digits for s in squares} for s in shuffled(squares): if not assign(values, s, random.choice(values[s])): @@ -191,7 +214,9 @@ def random_puzzle(assignments=17): def shuffled(seq): - "Return a randomly shuffled copy of the input sequence." + """ + Return a randomly shuffled copy of the input sequence. + """ seq = list(seq) random.shuffle(seq) return seq From c666db3729b6d9f73e2f7756a3974f53279caa50 Mon Sep 17 00:00:00 2001 From: Vijayalaxmi Wakode Date: Fri, 24 Jan 2025 03:31:47 +0530 Subject: [PATCH 1520/1543] Add Doc test bubble sort (#12070) * The string manipulation - replace() * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update replace.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * updating DIRECTORY.md * Add doc test to bubble_sort * Update DIRECTORY.md * Delete strings/replace.py * Update bubble_sort.py --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: vijayalaxmi777 Co-authored-by: Maxim Smolskiy --- sorts/bubble_sort.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/sorts/bubble_sort.py b/sorts/bubble_sort.py index bdf85c70dd35..9ec3d5384f38 100644 --- a/sorts/bubble_sort.py +++ b/sorts/bubble_sort.py @@ -85,6 +85,8 @@ def bubble_sort_recursive(collection: list[Any]) -> list[Any]: [1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7] >>> bubble_sort_recursive([1, 3.3, 5, 7.7, 2, 4.4, 6]) [1, 2, 3.3, 4.4, 5, 6, 7.7] + >>> bubble_sort_recursive(['a', 'Z', 'B', 'C', 'A', 'c']) + ['A', 'B', 'C', 'Z', 'a', 'c'] >>> import random >>> collection_arg = random.sample(range(-50, 50), 100) >>> bubble_sort_recursive(collection_arg) == sorted(collection_arg) From 13e4d3e76cfaa74d8b14314d319fb6c089aa051e Mon Sep 17 00:00:00 2001 From: Rachel Spears <103690982+Rosepetal2022@users.noreply.github.com> Date: Thu, 23 Jan 2025 21:59:36 -0800 Subject: [PATCH 1521/1543] Fix error in avl_tree del_node function (#11510) * fixed error in del_node function * Update avl_tree.py --------- Co-authored-by: Maxim Smolskiy --- data_structures/binary_tree/avl_tree.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/data_structures/binary_tree/avl_tree.py b/data_structures/binary_tree/avl_tree.py index 9fca7237404c..8558305eefe4 100644 --- a/data_structures/binary_tree/avl_tree.py +++ b/data_structures/binary_tree/avl_tree.py @@ -221,6 +221,10 @@ def del_node(root: MyNode, data: Any) -> MyNode | None: else: root.set_right(del_node(right_child, data)) + # Re-fetch left_child and right_child references + left_child = root.get_left() + right_child = root.get_right() + if get_height(right_child) - get_height(left_child) == 2: assert right_child is not None if get_height(right_child.get_right()) > get_height(right_child.get_left()): From 6c92c5a539276d387b85eedc89be1f888962647d Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 27 Jan 2025 22:05:20 +0100 Subject: [PATCH 1522/1543] [pre-commit.ci] pre-commit autoupdate (#12542) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * [pre-commit.ci] pre-commit autoupdate updates: - [github.com/astral-sh/ruff-pre-commit: v0.9.2 → v0.9.3](https://github.com/astral-sh/ruff-pre-commit/compare/v0.9.2...v0.9.3) - [github.com/codespell-project/codespell: v2.3.0 → v2.4.0](https://github.com/codespell-project/codespell/compare/v2.3.0...v2.4.0) * Update trifid_cipher.py * Update pyproject.toml * Update trifid_cipher.py --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- .pre-commit-config.yaml | 4 ++-- ciphers/trifid_cipher.py | 4 ++-- pyproject.toml | 2 +- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index c4480f47faa1..e34b563b05dd 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -16,13 +16,13 @@ repos: - id: auto-walrus - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.9.2 + rev: v0.9.3 hooks: - id: ruff - id: ruff-format - repo: https://github.com/codespell-project/codespell - rev: v2.3.0 + rev: v2.4.0 hooks: - id: codespell additional_dependencies: diff --git a/ciphers/trifid_cipher.py b/ciphers/trifid_cipher.py index 9613cee0669d..13a47e9dd03b 100644 --- a/ciphers/trifid_cipher.py +++ b/ciphers/trifid_cipher.py @@ -88,7 +88,7 @@ def __prepare( ... KeyError: 'Length of alphabet has to be 27.' - Testing with punctuations that are not in the given alphabet + Testing with punctuation not in the given alphabet >>> __prepare('am i a boy?','abCdeFghijkLmnopqrStuVwxYZ+') Traceback (most recent call last): @@ -128,7 +128,7 @@ def encrypt_message( encrypt_message =============== - Encrypts a message using the trifid_cipher. Any punctuatuions that + Encrypts a message using the trifid_cipher. Any punctuatuion chars that would be used should be added to the alphabet. PARAMETERS diff --git a/pyproject.toml b/pyproject.toml index 7b7176705c44..2135f1f5825a 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -159,7 +159,7 @@ lint.pylint.max-returns = 8 # default: 6 lint.pylint.max-statements = 88 # default: 50 [tool.codespell] -ignore-words-list = "3rt,ans,bitap,crate,damon,fo,followings,hist,iff,kwanza,manuel,mater,secant,som,sur,tim,toi,zar" +ignore-words-list = "3rt,abd,aer,ans,bitap,crate,damon,fo,followings,hist,iff,kwanza,manuel,mater,secant,som,sur,tim,toi,zar" skip = "./.*,*.json,*.lock,ciphers/prehistoric_men.txt,project_euler/problem_022/p022_names.txt,pyproject.toml,strings/dictionary.txt,strings/words.txt" [tool.pytest.ini_options] From e59d819d091efdb30e385f4ecfe9ab5d36c3be71 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Wed, 5 Feb 2025 20:47:41 +0100 Subject: [PATCH 1523/1543] [pre-commit.ci] pre-commit autoupdate (#12554) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - [github.com/astral-sh/ruff-pre-commit: v0.9.3 → v0.9.4](https://github.com/astral-sh/ruff-pre-commit/compare/v0.9.3...v0.9.4) - [github.com/codespell-project/codespell: v2.4.0 → v2.4.1](https://github.com/codespell-project/codespell/compare/v2.4.0...v2.4.1) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index e34b563b05dd..d9477e216b96 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -16,13 +16,13 @@ repos: - id: auto-walrus - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.9.3 + rev: v0.9.4 hooks: - id: ruff - id: ruff-format - repo: https://github.com/codespell-project/codespell - rev: v2.4.0 + rev: v2.4.1 hooks: - id: codespell additional_dependencies: From 338cbafe0d5b07d57f83060ea0f9ba3a6c1155e7 Mon Sep 17 00:00:00 2001 From: lighting9999 <120090117+lighting9999@users.noreply.github.com> Date: Mon, 10 Feb 2025 01:51:18 +0800 Subject: [PATCH 1524/1543] Improve power.py (#12567) * Fix And Add power.py To fix the inaccuracies and allow handling of negative exponents and bases, the key issue lies in how negative numbers are handled in the power calculation, especially when dividing. ## Example Output: ```python >>> power(4, 6) 4096 >>> power(2, 3) 8 >>> power(-2, 3) -8 >>> power(2, -3) 0.125 >>> power(-2, -3) -0.125 ``` * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update power.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update power.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update power.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update power.py --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Maxim Smolskiy --- divide_and_conquer/power.py | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/divide_and_conquer/power.py b/divide_and_conquer/power.py index faf6a3476d40..492ee6dd12f0 100644 --- a/divide_and_conquer/power.py +++ b/divide_and_conquer/power.py @@ -1,4 +1,4 @@ -def actual_power(a: int, b: int): +def actual_power(a: int, b: int) -> int: """ Function using divide and conquer to calculate a^b. It only works for integer a,b. @@ -19,10 +19,12 @@ def actual_power(a: int, b: int): """ if b == 0: return 1 + half = actual_power(a, b // 2) + if (b % 2) == 0: - return actual_power(a, int(b / 2)) * actual_power(a, int(b / 2)) + return half * half else: - return a * actual_power(a, int(b / 2)) * actual_power(a, int(b / 2)) + return a * half * half def power(a: int, b: int) -> float: @@ -43,9 +45,9 @@ def power(a: int, b: int) -> float: -0.125 """ if b < 0: - return 1 / actual_power(a, b) + return 1 / actual_power(a, -b) return actual_power(a, b) if __name__ == "__main__": - print(power(-2, -3)) + print(power(-2, -3)) # output -0.125 From 738253e80030ffdd35ac57ff64cda816f85eda71 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Fri, 14 Feb 2025 10:05:23 +0100 Subject: [PATCH 1525/1543] git mv data_structures/queue data_structures/queues (#12577) Co-authored-by: Christian Clauss --- .pre-commit-config.yaml | 4 ++-- DIRECTORY.md | 18 +++++++++--------- data_structures/{queue => queues}/__init__.py | 0 .../{queue => queues}/circular_queue.py | 10 +++++++--- .../circular_queue_linked_list.py | 0 .../{queue => queues}/double_ended_queue.py | 0 .../{queue => queues}/linked_queue.py | 0 .../priority_queue_using_list.py | 6 +++--- .../{queue => queues}/queue_by_list.py | 0 .../{queue => queues}/queue_by_two_stacks.py | 0 .../{queue => queues}/queue_on_pseudo_stack.py | 0 11 files changed, 21 insertions(+), 17 deletions(-) rename data_structures/{queue => queues}/__init__.py (100%) rename data_structures/{queue => queues}/circular_queue.py (87%) rename data_structures/{queue => queues}/circular_queue_linked_list.py (100%) rename data_structures/{queue => queues}/double_ended_queue.py (100%) rename data_structures/{queue => queues}/linked_queue.py (100%) rename data_structures/{queue => queues}/priority_queue_using_list.py (96%) rename data_structures/{queue => queues}/queue_by_list.py (100%) rename data_structures/{queue => queues}/queue_by_two_stacks.py (100%) rename data_structures/{queue => queues}/queue_on_pseudo_stack.py (100%) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index d9477e216b96..a603109fd79f 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -16,7 +16,7 @@ repos: - id: auto-walrus - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.9.4 + rev: v0.9.6 hooks: - id: ruff - id: ruff-format @@ -47,7 +47,7 @@ repos: - id: validate-pyproject - repo: https://github.com/pre-commit/mirrors-mypy - rev: v1.14.1 + rev: v1.15.0 hooks: - id: mypy args: diff --git a/DIRECTORY.md b/DIRECTORY.md index 941e30dfe721..a535f12cb59a 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -275,15 +275,15 @@ * [Singly Linked List](data_structures/linked_list/singly_linked_list.py) * [Skip List](data_structures/linked_list/skip_list.py) * [Swap Nodes](data_structures/linked_list/swap_nodes.py) - * Queue - * [Circular Queue](data_structures/queue/circular_queue.py) - * [Circular Queue Linked List](data_structures/queue/circular_queue_linked_list.py) - * [Double Ended Queue](data_structures/queue/double_ended_queue.py) - * [Linked Queue](data_structures/queue/linked_queue.py) - * [Priority Queue Using List](data_structures/queue/priority_queue_using_list.py) - * [Queue By List](data_structures/queue/queue_by_list.py) - * [Queue By Two Stacks](data_structures/queue/queue_by_two_stacks.py) - * [Queue On Pseudo Stack](data_structures/queue/queue_on_pseudo_stack.py) + * Queues + * [Circular Queue](data_structures/queues/circular_queue.py) + * [Circular Queue Linked List](data_structures/queues/circular_queue_linked_list.py) + * [Double Ended Queue](data_structures/queues/double_ended_queue.py) + * [Linked Queue](data_structures/queues/linked_queue.py) + * [Priority Queue Using List](data_structures/queues/priority_queue_using_list.py) + * [Queue By List](data_structures/queues/queue_by_list.py) + * [Queue By Two Stacks](data_structures/queues/queue_by_two_stacks.py) + * [Queue On Pseudo Stack](data_structures/queues/queue_on_pseudo_stack.py) * Stacks * [Balanced Parentheses](data_structures/stacks/balanced_parentheses.py) * [Dijkstras Two Stack Algorithm](data_structures/stacks/dijkstras_two_stack_algorithm.py) diff --git a/data_structures/queue/__init__.py b/data_structures/queues/__init__.py similarity index 100% rename from data_structures/queue/__init__.py rename to data_structures/queues/__init__.py diff --git a/data_structures/queue/circular_queue.py b/data_structures/queues/circular_queue.py similarity index 87% rename from data_structures/queue/circular_queue.py rename to data_structures/queues/circular_queue.py index f2fb4c01e467..efbf1efdc42d 100644 --- a/data_structures/queue/circular_queue.py +++ b/data_structures/queues/circular_queue.py @@ -17,7 +17,9 @@ def __len__(self) -> int: >>> len(cq) 0 >>> cq.enqueue("A") # doctest: +ELLIPSIS - >> cq.array + ['A', None, None, None, None] >>> len(cq) 1 """ @@ -51,11 +53,13 @@ def enqueue(self, data): as an index. >>> cq = CircularQueue(5) >>> cq.enqueue("A") # doctest: +ELLIPSIS - >> (cq.size, cq.first()) (1, 'A') >>> cq.enqueue("B") # doctest: +ELLIPSIS - >> cq.array + ['A', 'B', None, None, None] >>> (cq.size, cq.first()) (2, 'A') """ diff --git a/data_structures/queue/circular_queue_linked_list.py b/data_structures/queues/circular_queue_linked_list.py similarity index 100% rename from data_structures/queue/circular_queue_linked_list.py rename to data_structures/queues/circular_queue_linked_list.py diff --git a/data_structures/queue/double_ended_queue.py b/data_structures/queues/double_ended_queue.py similarity index 100% rename from data_structures/queue/double_ended_queue.py rename to data_structures/queues/double_ended_queue.py diff --git a/data_structures/queue/linked_queue.py b/data_structures/queues/linked_queue.py similarity index 100% rename from data_structures/queue/linked_queue.py rename to data_structures/queues/linked_queue.py diff --git a/data_structures/queue/priority_queue_using_list.py b/data_structures/queues/priority_queue_using_list.py similarity index 96% rename from data_structures/queue/priority_queue_using_list.py rename to data_structures/queues/priority_queue_using_list.py index f61b5e8e664d..15e56c557069 100644 --- a/data_structures/queue/priority_queue_using_list.py +++ b/data_structures/queues/priority_queue_using_list.py @@ -59,12 +59,12 @@ class FixedPriorityQueue: >>> fpq.dequeue() Traceback (most recent call last): ... - data_structures.queue.priority_queue_using_list.UnderFlowError: All queues are empty + data_structures.queues.priority_queue_using_list.UnderFlowError: All queues are empty >>> print(fpq) Priority 0: [] Priority 1: [] Priority 2: [] - """ + """ # noqa: E501 def __init__(self): self.queues = [ @@ -141,7 +141,7 @@ class ElementPriorityQueue: >>> epq.dequeue() Traceback (most recent call last): ... - data_structures.queue.priority_queue_using_list.UnderFlowError: The queue is empty + data_structures.queues.priority_queue_using_list.UnderFlowError: The queue is empty >>> print(epq) [] """ diff --git a/data_structures/queue/queue_by_list.py b/data_structures/queues/queue_by_list.py similarity index 100% rename from data_structures/queue/queue_by_list.py rename to data_structures/queues/queue_by_list.py diff --git a/data_structures/queue/queue_by_two_stacks.py b/data_structures/queues/queue_by_two_stacks.py similarity index 100% rename from data_structures/queue/queue_by_two_stacks.py rename to data_structures/queues/queue_by_two_stacks.py diff --git a/data_structures/queue/queue_on_pseudo_stack.py b/data_structures/queues/queue_on_pseudo_stack.py similarity index 100% rename from data_structures/queue/queue_on_pseudo_stack.py rename to data_structures/queues/queue_on_pseudo_stack.py From a5aed92b4c20fd3e99c6e7a9202afcc9cf502883 Mon Sep 17 00:00:00 2001 From: Maxim Evtush <154841002+maximevtush@users.noreply.github.com> Date: Thu, 20 Feb 2025 21:09:01 +0100 Subject: [PATCH 1526/1543] fix: typo in data_structures/linked_list/from_sequence.py (#12584) --- data_structures/linked_list/from_sequence.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/data_structures/linked_list/from_sequence.py b/data_structures/linked_list/from_sequence.py index 94b44f15037f..fa43f4d10e08 100644 --- a/data_structures/linked_list/from_sequence.py +++ b/data_structures/linked_list/from_sequence.py @@ -1,4 +1,4 @@ -# Recursive Prorgam to create a Linked List from a sequence and +# Recursive Program to create a Linked List from a sequence and # print a string representation of it. From 183fa06f40e80c6e86ceda6e7c7d23eaf91507ac Mon Sep 17 00:00:00 2001 From: sector <104625848+infrablue1@users.noreply.github.com> Date: Sat, 22 Feb 2025 16:16:29 +0800 Subject: [PATCH 1527/1543] Fix n-queens problem (#12583) * Fix n-queens problem * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update n_queens.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update n_queens.py * Update n_queens.py --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Maxim Smolskiy --- backtracking/n_queens.py | 19 +++++++++++++------ 1 file changed, 13 insertions(+), 6 deletions(-) diff --git a/backtracking/n_queens.py b/backtracking/n_queens.py index 81668b17a0ac..d10181f319b3 100644 --- a/backtracking/n_queens.py +++ b/backtracking/n_queens.py @@ -27,21 +27,28 @@ def is_safe(board: list[list[int]], row: int, column: int) -> bool: >>> is_safe([[0, 0, 0], [0, 0, 0], [0, 0, 0]], 1, 1) True + >>> is_safe([[0, 1, 0], [0, 0, 0], [0, 0, 0]], 1, 1) + False >>> is_safe([[1, 0, 0], [0, 0, 0], [0, 0, 0]], 1, 1) False + >>> is_safe([[0, 0, 1], [0, 0, 0], [0, 0, 0]], 1, 1) + False """ n = len(board) # Size of the board - # Check if there is any queen in the same row, column, - # left upper diagonal, and right upper diagonal + # Check if there is any queen in the same upper column, + # left upper diagonal and right upper diagonal return ( - all(board[i][j] != 1 for i, j in zip(range(row, -1, -1), range(column, n))) + all(board[i][j] != 1 for i, j in zip(range(row), [column] * row)) + and all( + board[i][j] != 1 + for i, j in zip(range(row - 1, -1, -1), range(column - 1, -1, -1)) + ) and all( - board[i][j] != 1 for i, j in zip(range(row, -1, -1), range(column, -1, -1)) + board[i][j] != 1 + for i, j in zip(range(row - 1, -1, -1), range(column + 1, n)) ) - and all(board[i][j] != 1 for i, j in zip(range(row, n), range(column, n))) - and all(board[i][j] != 1 for i, j in zip(range(row, n), range(column, -1, -1))) ) From 114d4283b98e52396e2460c802f18d45eeacd90c Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 24 Feb 2025 19:27:10 +0100 Subject: [PATCH 1528/1543] [pre-commit.ci] pre-commit autoupdate (#12591) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - [github.com/astral-sh/ruff-pre-commit: v0.9.6 → v0.9.7](https://github.com/astral-sh/ruff-pre-commit/compare/v0.9.6...v0.9.7) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index a603109fd79f..8de90b11767f 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -16,7 +16,7 @@ repos: - id: auto-walrus - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.9.6 + rev: v0.9.7 hooks: - id: ruff - id: ruff-format From f528ce350b366ce40e0494fc94da65cfd4509c7d Mon Sep 17 00:00:00 2001 From: Sanjay Muthu Date: Thu, 27 Feb 2025 17:01:08 +0530 Subject: [PATCH 1529/1543] Added dynamic_programming/range_sum_query.py (#12592) * Create prefix_sum.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fix pre-commit and ruff errors * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Rename prefix_sum.py to range_sum_query.py * Refactor description * Fix * Refactor code * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fix --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Maxim Smolskiy --- dynamic_programming/range_sum_query.py | 92 ++++++++++++++++++++++++++ 1 file changed, 92 insertions(+) create mode 100644 dynamic_programming/range_sum_query.py diff --git a/dynamic_programming/range_sum_query.py b/dynamic_programming/range_sum_query.py new file mode 100644 index 000000000000..484fcf785fda --- /dev/null +++ b/dynamic_programming/range_sum_query.py @@ -0,0 +1,92 @@ +""" +Author: Sanjay Muthu + +This is an implementation of the Dynamic Programming solution to the Range Sum Query. + +The problem statement is: + Given an array and q queries, + each query stating you to find the sum of elements from l to r (inclusive) + +Example: + arr = [1, 4, 6, 2, 61, 12] + queries = 3 + l_1 = 2, r_1 = 5 + l_2 = 1, r_2 = 5 + l_3 = 3, r_3 = 4 + + as input will return + + [81, 85, 63] + + as output + +0-indexing: +NOTE: 0-indexing means the indexing of the array starts from 0 +Example: a = [1, 2, 3, 4, 5, 6] + Here, the 0th index of a is 1, + the 1st index of a is 2, + and so forth + +Time Complexity: O(N + Q) +* O(N) pre-calculation time to calculate the prefix sum array +* and O(1) time per each query = O(1 * Q) = O(Q) time + +Space Complexity: O(N) +* O(N) to store the prefix sum + +Algorithm: +So, first we calculate the prefix sum (dp) of the array. +The prefix sum of the index i is the sum of all elements indexed +from 0 to i (inclusive). +The prefix sum of the index i is the prefix sum of index (i - 1) + the current element. +So, the state of the dp is dp[i] = dp[i - 1] + a[i]. + +After we calculate the prefix sum, +for each query [l, r] +the answer is dp[r] - dp[l - 1] (we need to be careful because l might be 0). +For example take this array: + [4, 2, 1, 6, 3] +The prefix sum calculated for this array would be: + [4, 4 + 2, 4 + 2 + 1, 4 + 2 + 1 + 6, 4 + 2 + 1 + 6 + 3] + ==> [4, 6, 7, 13, 16] +If the query was l = 3, r = 4, +the answer would be 6 + 3 = 9 but this would require O(r - l + 1) time ≈ O(N) time + +If we use prefix sums we can find it in O(1) by using the formula +prefix[r] - prefix[l - 1]. +This formula works because prefix[r] is the sum of elements from [0, r] +and prefix[l - 1] is the sum of elements from [0, l - 1], +so if we do prefix[r] - prefix[l - 1] it will be +[0, r] - [0, l - 1] = [0, l - 1] + [l, r] - [0, l - 1] = [l, r] +""" + + +def prefix_sum(array: list[int], queries: list[tuple[int, int]]) -> list[int]: + """ + >>> prefix_sum([1, 4, 6, 2, 61, 12], [(2, 5), (1, 5), (3, 4)]) + [81, 85, 63] + >>> prefix_sum([4, 2, 1, 6, 3], [(3, 4), (1, 3), (0, 2)]) + [9, 9, 7] + """ + # The prefix sum array + dp = [0] * len(array) + dp[0] = array[0] + for i in range(1, len(array)): + dp[i] = dp[i - 1] + array[i] + + # See Algorithm section (Line 44) + result = [] + for query in queries: + left, right = query + res = dp[right] + if left > 0: + res -= dp[left - 1] + result.append(res) + + return result + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From 8826ad3a4d75f7a4e1d7b1a682682528c2c73672 Mon Sep 17 00:00:00 2001 From: PARIKSHIT SINGH <90330646+parikshit2111@users.noreply.github.com> Date: Sun, 2 Mar 2025 16:33:12 +0530 Subject: [PATCH 1530/1543] feat: Implement Principal Component Analysis (PCA) (#12596) - Added PCA implementation with dataset standardization. - Used Singular Value Decomposition (SVD) for computing principal components. - Fixed import sorting to comply with PEP 8 (Ruff I001). - Ensured type hints and docstrings for better readability. - Added doctests to validate correctness. - Passed all Ruff checks and automated tests. --- DIRECTORY.md | 2 + .../principle_component_analysis.py | 85 +++++++++++++++++++ 2 files changed, 87 insertions(+) create mode 100644 machine_learning/principle_component_analysis.py diff --git a/DIRECTORY.md b/DIRECTORY.md index a535f12cb59a..ab3259b9a766 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -395,6 +395,7 @@ * [Minimum Tickets Cost](dynamic_programming/minimum_tickets_cost.py) * [Optimal Binary Search Tree](dynamic_programming/optimal_binary_search_tree.py) * [Palindrome Partitioning](dynamic_programming/palindrome_partitioning.py) + * [Range Sum Query](dynamic_programming/range_sum_query.py) * [Regex Match](dynamic_programming/regex_match.py) * [Rod Cutting](dynamic_programming/rod_cutting.py) * [Smith Waterman](dynamic_programming/smith_waterman.py) @@ -608,6 +609,7 @@ * [Mfcc](machine_learning/mfcc.py) * [Multilayer Perceptron Classifier](machine_learning/multilayer_perceptron_classifier.py) * [Polynomial Regression](machine_learning/polynomial_regression.py) + * [Principle Component Analysis](machine_learning/principle_component_analysis.py) * [Scoring Functions](machine_learning/scoring_functions.py) * [Self Organizing Map](machine_learning/self_organizing_map.py) * [Sequential Minimum Optimization](machine_learning/sequential_minimum_optimization.py) diff --git a/machine_learning/principle_component_analysis.py b/machine_learning/principle_component_analysis.py new file mode 100644 index 000000000000..46ccdb968494 --- /dev/null +++ b/machine_learning/principle_component_analysis.py @@ -0,0 +1,85 @@ +""" +Principal Component Analysis (PCA) is a dimensionality reduction technique +used in machine learning. It transforms high-dimensional data into a lower-dimensional +representation while retaining as much variance as possible. + +This implementation follows best practices, including: +- Standardizing the dataset. +- Computing principal components using Singular Value Decomposition (SVD). +- Returning transformed data and explained variance ratio. +""" + +import doctest + +import numpy as np +from sklearn.datasets import load_iris +from sklearn.decomposition import PCA +from sklearn.preprocessing import StandardScaler + + +def collect_dataset() -> tuple[np.ndarray, np.ndarray]: + """ + Collects the dataset (Iris dataset) and returns feature matrix and target values. + + :return: Tuple containing feature matrix (X) and target labels (y) + + Example: + >>> X, y = collect_dataset() + >>> X.shape + (150, 4) + >>> y.shape + (150,) + """ + data = load_iris() + return np.array(data.data), np.array(data.target) + + +def apply_pca(data_x: np.ndarray, n_components: int) -> tuple[np.ndarray, np.ndarray]: + """ + Applies Principal Component Analysis (PCA) to reduce dimensionality. + + :param data_x: Original dataset (features) + :param n_components: Number of principal components to retain + :return: Tuple containing transformed dataset and explained variance ratio + + Example: + >>> X, _ = collect_dataset() + >>> transformed_X, variance = apply_pca(X, 2) + >>> transformed_X.shape + (150, 2) + >>> len(variance) == 2 + True + """ + # Standardizing the dataset + scaler = StandardScaler() + data_x_scaled = scaler.fit_transform(data_x) + + # Applying PCA + pca = PCA(n_components=n_components) + principal_components = pca.fit_transform(data_x_scaled) + + return principal_components, pca.explained_variance_ratio_ + + +def main() -> None: + """ + Driver function to execute PCA and display results. + """ + data_x, data_y = collect_dataset() + + # Number of principal components to retain + n_components = 2 + + # Apply PCA + transformed_data, variance_ratio = apply_pca(data_x, n_components) + + print("Transformed Dataset (First 5 rows):") + print(transformed_data[:5]) + + print("\nExplained Variance Ratio:") + print(variance_ratio) + + +if __name__ == "__main__": + doctest.testmod() + main() From fff34ed528a7c1af373aeae68693d67639ff616b Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 3 Mar 2025 19:10:41 +0100 Subject: [PATCH 1531/1543] [pre-commit.ci] pre-commit autoupdate (#12599) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - [github.com/astral-sh/ruff-pre-commit: v0.9.7 → v0.9.9](https://github.com/astral-sh/ruff-pre-commit/compare/v0.9.7...v0.9.9) - [github.com/tox-dev/pyproject-fmt: v2.5.0 → v2.5.1](https://github.com/tox-dev/pyproject-fmt/compare/v2.5.0...v2.5.1) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 8de90b11767f..a0952928a775 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -16,7 +16,7 @@ repos: - id: auto-walrus - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.9.7 + rev: v0.9.9 hooks: - id: ruff - id: ruff-format @@ -29,7 +29,7 @@ repos: - tomli - repo: https://github.com/tox-dev/pyproject-fmt - rev: "v2.5.0" + rev: "v2.5.1" hooks: - id: pyproject-fmt From a415a953c3f1bb741f14a4ba06f067e6d94653ed Mon Sep 17 00:00:00 2001 From: Ankana Pari <143877643+ankana2113@users.noreply.github.com> Date: Sun, 9 Mar 2025 03:05:07 +0530 Subject: [PATCH 1532/1543] Add largest rectangle histogram (#12269) * added ridge regression * added ridge regression * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * added ridge regression * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * ridge regression * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * resolved errors * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * resolved conflicts * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * added doctests * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * ruff and minor checks * minor chenges * minor checks * minor checks * minor changes * descriptive names * Fix ruff check in loss_functions.py * fixed pre-commit issues * added largest rectangle histogram function * added largest rectangle histogram function * Update frequent_pattern_growth.py * Update loss_functions.py * Delete machine_learning/ridge_regression/__init__.py * Delete machine_learning/ridge_regression/ADRvsRating.csv * Delete machine_learning/ridge_regression/ridge_regression.py * Delete machine_learning/ridge_regression/test_ridge_regression.py --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Maxim Smolskiy --- .../stacks/largest_rectangle_histogram.py | 39 +++++++++++++++++++ 1 file changed, 39 insertions(+) create mode 100644 data_structures/stacks/largest_rectangle_histogram.py diff --git a/data_structures/stacks/largest_rectangle_histogram.py b/data_structures/stacks/largest_rectangle_histogram.py new file mode 100644 index 000000000000..7575bd9f628d --- /dev/null +++ b/data_structures/stacks/largest_rectangle_histogram.py @@ -0,0 +1,39 @@ +def largest_rectangle_area(heights: list[int]) -> int: + """ + Inputs an array of integers representing the heights of bars, + and returns the area of the largest rectangle that can be formed + + >>> largest_rectangle_area([2, 1, 5, 6, 2, 3]) + 10 + + >>> largest_rectangle_area([2, 4]) + 4 + + >>> largest_rectangle_area([6, 2, 5, 4, 5, 1, 6]) + 12 + + >>> largest_rectangle_area([1]) + 1 + """ + stack: list[int] = [] + max_area = 0 + heights = [*heights, 0] # make a new list by appending the sentinel 0 + n = len(heights) + + for i in range(n): + # make sure the stack remains in increasing order + while stack and heights[i] < heights[stack[-1]]: + h = heights[stack.pop()] # height of the bar + # if stack is empty, it means entire width can be taken from index 0 to i-1 + w = i if not stack else i - stack[-1] - 1 # calculate width + max_area = max(max_area, h * w) + + stack.append(i) + + return max_area + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From 4fbd350b6e08aeb22741d694ce2e64182c66ac92 Mon Sep 17 00:00:00 2001 From: PAUL ADUTWUM Date: Sat, 8 Mar 2025 16:47:04 -0500 Subject: [PATCH 1533/1543] Improved test coverage in decimal_to_fraction.py (#12608) * Imporved test coverage in decimal_to_fraction.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update decimal_to_fraction.py * Update decimal_to_fraction.py --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Maxim Smolskiy --- maths/decimal_to_fraction.py | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/maths/decimal_to_fraction.py b/maths/decimal_to_fraction.py index 2aa8e3c3dfd6..7f1299b33c5c 100644 --- a/maths/decimal_to_fraction.py +++ b/maths/decimal_to_fraction.py @@ -16,6 +16,20 @@ def decimal_to_fraction(decimal: float | str) -> tuple[int, int]: >>> decimal_to_fraction("78td") Traceback (most recent call last): ValueError: Please enter a valid number + >>> decimal_to_fraction(0) + (0, 1) + >>> decimal_to_fraction(-2.5) + (-5, 2) + >>> decimal_to_fraction(0.125) + (1, 8) + >>> decimal_to_fraction(1000000.25) + (4000001, 4) + >>> decimal_to_fraction(1.3333) + (13333, 10000) + >>> decimal_to_fraction("1.23e2") + (123, 1) + >>> decimal_to_fraction("0.500") + (1, 2) """ try: decimal = float(decimal) From e3fb5309da98e2d07699ae39eb0a55836a063532 Mon Sep 17 00:00:00 2001 From: PAUL ADUTWUM Date: Sat, 8 Mar 2025 16:52:20 -0500 Subject: [PATCH 1534/1543] Improve decimal_to_fraction.py (#12611) * Update decimal_to_fraction.py * Update decimal_to_fraction.py --------- Co-authored-by: Maxim Smolskiy --- maths/decimal_to_fraction.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/maths/decimal_to_fraction.py b/maths/decimal_to_fraction.py index 7f1299b33c5c..be42b9fb3b5a 100644 --- a/maths/decimal_to_fraction.py +++ b/maths/decimal_to_fraction.py @@ -48,8 +48,8 @@ def decimal_to_fraction(decimal: float | str) -> tuple[int, int]: if remainder == 0: break dividend, divisor = divisor, remainder - numerator, denominator = numerator / divisor, denominator / divisor - return int(numerator), int(denominator) + numerator, denominator = numerator // divisor, denominator // divisor + return numerator, denominator if __name__ == "__main__": From 23eb17462940e20b830aacce5d2eb80113a7f973 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Tue, 11 Mar 2025 08:51:28 +0100 Subject: [PATCH 1535/1543] [pre-commit.ci] pre-commit autoupdate (#12614) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * [pre-commit.ci] pre-commit autoupdate updates: - [github.com/astral-sh/ruff-pre-commit: v0.9.9 → v0.9.10](https://github.com/astral-sh/ruff-pre-commit/compare/v0.9.9...v0.9.10) * updating DIRECTORY.md --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: pre-commit-ci[bot] --- .pre-commit-config.yaml | 2 +- DIRECTORY.md | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index a0952928a775..32580f8c7398 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -16,7 +16,7 @@ repos: - id: auto-walrus - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.9.9 + rev: v0.9.10 hooks: - id: ruff - id: ruff-format diff --git a/DIRECTORY.md b/DIRECTORY.md index ab3259b9a766..1c02c191bd14 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -289,6 +289,7 @@ * [Dijkstras Two Stack Algorithm](data_structures/stacks/dijkstras_two_stack_algorithm.py) * [Infix To Postfix Conversion](data_structures/stacks/infix_to_postfix_conversion.py) * [Infix To Prefix Conversion](data_structures/stacks/infix_to_prefix_conversion.py) + * [Largest Rectangle Histogram](data_structures/stacks/largest_rectangle_histogram.py) * [Lexicographical Numbers](data_structures/stacks/lexicographical_numbers.py) * [Next Greater Element](data_structures/stacks/next_greater_element.py) * [Postfix Evaluation](data_structures/stacks/postfix_evaluation.py) From 7ce998b91c45090bd9c4cdfac6ed0220497b4810 Mon Sep 17 00:00:00 2001 From: Maxim Smolskiy Date: Tue, 11 Mar 2025 17:29:13 +0300 Subject: [PATCH 1536/1543] Fix some RUF012 per file ignores (#11399) * updating DIRECTORY.md * Fix some RUF012 per file ignores * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fix * Fix * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fix * Improve * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Improve * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: MaximSmolskiy Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- other/lfu_cache.py | 18 ++++++++++-------- other/lru_cache.py | 18 ++++++++++-------- pyproject.toml | 3 --- 3 files changed, 20 insertions(+), 19 deletions(-) diff --git a/other/lfu_cache.py b/other/lfu_cache.py index 788fdf19bb60..5a143c739b9d 100644 --- a/other/lfu_cache.py +++ b/other/lfu_cache.py @@ -196,9 +196,6 @@ class LFUCache(Generic[T, U]): CacheInfo(hits=196, misses=100, capacity=100, current_size=100) """ - # class variable to map the decorator functions to their respective instance - decorator_function_to_instance_map: dict[Callable[[T], U], LFUCache[T, U]] = {} - def __init__(self, capacity: int): self.list: DoubleLinkedList[T, U] = DoubleLinkedList() self.capacity = capacity @@ -291,18 +288,23 @@ def decorator( """ def cache_decorator_inner(func: Callable[[T], U]) -> Callable[..., U]: + # variable to map the decorator functions to their respective instance + decorator_function_to_instance_map: dict[ + Callable[[T], U], LFUCache[T, U] + ] = {} + def cache_decorator_wrapper(*args: T) -> U: - if func not in cls.decorator_function_to_instance_map: - cls.decorator_function_to_instance_map[func] = LFUCache(size) + if func not in decorator_function_to_instance_map: + decorator_function_to_instance_map[func] = LFUCache(size) - result = cls.decorator_function_to_instance_map[func].get(args[0]) + result = decorator_function_to_instance_map[func].get(args[0]) if result is None: result = func(*args) - cls.decorator_function_to_instance_map[func].put(args[0], result) + decorator_function_to_instance_map[func].put(args[0], result) return result def cache_info() -> LFUCache[T, U]: - return cls.decorator_function_to_instance_map[func] + return decorator_function_to_instance_map[func] setattr(cache_decorator_wrapper, "cache_info", cache_info) # noqa: B010 diff --git a/other/lru_cache.py b/other/lru_cache.py index 1e5eeac45b4e..4f0c843c86cc 100644 --- a/other/lru_cache.py +++ b/other/lru_cache.py @@ -209,9 +209,6 @@ class LRUCache(Generic[T, U]): CacheInfo(hits=194, misses=99, capacity=100, current size=99) """ - # class variable to map the decorator functions to their respective instance - decorator_function_to_instance_map: dict[Callable[[T], U], LRUCache[T, U]] = {} - def __init__(self, capacity: int): self.list: DoubleLinkedList[T, U] = DoubleLinkedList() self.capacity = capacity @@ -308,18 +305,23 @@ def decorator( """ def cache_decorator_inner(func: Callable[[T], U]) -> Callable[..., U]: + # variable to map the decorator functions to their respective instance + decorator_function_to_instance_map: dict[ + Callable[[T], U], LRUCache[T, U] + ] = {} + def cache_decorator_wrapper(*args: T) -> U: - if func not in cls.decorator_function_to_instance_map: - cls.decorator_function_to_instance_map[func] = LRUCache(size) + if func not in decorator_function_to_instance_map: + decorator_function_to_instance_map[func] = LRUCache(size) - result = cls.decorator_function_to_instance_map[func].get(args[0]) + result = decorator_function_to_instance_map[func].get(args[0]) if result is None: result = func(*args) - cls.decorator_function_to_instance_map[func].put(args[0], result) + decorator_function_to_instance_map[func].put(args[0], result) return result def cache_info() -> LRUCache[T, U]: - return cls.decorator_function_to_instance_map[func] + return decorator_function_to_instance_map[func] setattr(cache_decorator_wrapper, "cache_info", cache_info) # noqa: B010 diff --git a/pyproject.toml b/pyproject.toml index 2135f1f5825a..4a76c4ad6d11 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -135,9 +135,6 @@ lint.per-file-ignores."machine_learning/sequential_minimum_optimization.py" = [ lint.per-file-ignores."matrix/sherman_morrison.py" = [ "SIM103", ] -lint.per-file-ignores."other/l*u_cache.py" = [ - "RUF012", -] lint.per-file-ignores."physics/newtons_second_law_of_motion.py" = [ "BLE001", ] From edf7c372a9a6a3e01a33ef92021d958029e99319 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Tue, 18 Mar 2025 09:53:49 +0100 Subject: [PATCH 1537/1543] [pre-commit.ci] pre-commit autoupdate (#12623) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * [pre-commit.ci] pre-commit autoupdate updates: - [github.com/astral-sh/ruff-pre-commit: v0.9.10 → v0.11.0](https://github.com/astral-sh/ruff-pre-commit/compare/v0.9.10...v0.11.0) - [github.com/abravalheri/validate-pyproject: v0.23 → v0.24](https://github.com/abravalheri/validate-pyproject/compare/v0.23...v0.24) * Fix ruff issues --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- .pre-commit-config.yaml | 4 ++-- conversions/prefix_conversions_string.py | 4 ++-- data_structures/arrays/sudoku_solver.py | 4 ++-- graphics/digital_differential_analyzer_line.py | 2 +- graphs/minimum_spanning_tree_prims2.py | 4 ++-- hashes/enigma_machine.py | 4 ++-- linear_algebra/src/test_linear_algebra.py | 2 +- maths/primelib.py | 2 +- other/davis_putnam_logemann_loveland.py | 2 +- other/quine.py | 2 +- project_euler/problem_028/sol1.py | 2 +- pyproject.toml | 1 + scripts/validate_filenames.py | 17 +++++++---------- sorts/external_sort.py | 2 +- strings/frequency_finder.py | 2 +- 15 files changed, 26 insertions(+), 28 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 32580f8c7398..5deb66a5e5a2 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -16,7 +16,7 @@ repos: - id: auto-walrus - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.9.10 + rev: v0.11.0 hooks: - id: ruff - id: ruff-format @@ -42,7 +42,7 @@ repos: pass_filenames: false - repo: https://github.com/abravalheri/validate-pyproject - rev: v0.23 + rev: v0.24 hooks: - id: validate-pyproject diff --git a/conversions/prefix_conversions_string.py b/conversions/prefix_conversions_string.py index 9344c9672a1f..c5fef49874ca 100644 --- a/conversions/prefix_conversions_string.py +++ b/conversions/prefix_conversions_string.py @@ -53,7 +53,7 @@ class SIUnit(Enum): yocto = -24 @classmethod - def get_positive(cls: type[T]) -> dict: + def get_positive(cls) -> dict: """ Returns a dictionary with only the elements of this enum that has a positive value @@ -68,7 +68,7 @@ def get_positive(cls: type[T]) -> dict: return {unit.name: unit.value for unit in cls if unit.value > 0} @classmethod - def get_negative(cls: type[T]) -> dict: + def get_negative(cls) -> dict: """ Returns a dictionary with only the elements of this enum that has a negative value diff --git a/data_structures/arrays/sudoku_solver.py b/data_structures/arrays/sudoku_solver.py index e1714e57ece8..4c722f12fd6e 100644 --- a/data_structures/arrays/sudoku_solver.py +++ b/data_structures/arrays/sudoku_solver.py @@ -54,7 +54,7 @@ def parse_grid(grid): return False if a contradiction is detected. """ ## To start, every square can be any digit; then assign values from the grid. - values = {s: digits for s in squares} + values = dict.fromkeys(squares, digits) for s, d in grid_values(grid).items(): if d in digits and not assign(values, s, d): return False ## (Fail if we can't assign d to square s.) @@ -203,7 +203,7 @@ def random_puzzle(assignments=17): Note the resulting puzzle is not guaranteed to be solvable, but empirically about 99.8% of them are solvable. Some have multiple solutions. """ - values = {s: digits for s in squares} + values = dict.fromkeys(squares, digits) for s in shuffled(squares): if not assign(values, s, random.choice(values[s])): break diff --git a/graphics/digital_differential_analyzer_line.py b/graphics/digital_differential_analyzer_line.py index a51cb0b8dc37..f7269ab09856 100644 --- a/graphics/digital_differential_analyzer_line.py +++ b/graphics/digital_differential_analyzer_line.py @@ -29,7 +29,7 @@ def digital_differential_analyzer_line( for _ in range(steps): x += x_increment y += y_increment - coordinates.append((int(round(x)), int(round(y)))) + coordinates.append((round(x), round(y))) return coordinates diff --git a/graphs/minimum_spanning_tree_prims2.py b/graphs/minimum_spanning_tree_prims2.py index cc918f81dfe8..6870cc80f844 100644 --- a/graphs/minimum_spanning_tree_prims2.py +++ b/graphs/minimum_spanning_tree_prims2.py @@ -239,8 +239,8 @@ def prims_algo( 13 """ # prim's algorithm for minimum spanning tree - dist: dict[T, int] = {node: maxsize for node in graph.connections} - parent: dict[T, T | None] = {node: None for node in graph.connections} + dist: dict[T, int] = dict.fromkeys(graph.connections, maxsize) + parent: dict[T, T | None] = dict.fromkeys(graph.connections) priority_queue: MinPriorityQueue[T] = MinPriorityQueue() for node, weight in dist.items(): diff --git a/hashes/enigma_machine.py b/hashes/enigma_machine.py index d95437d12c34..0da8e4113de9 100644 --- a/hashes/enigma_machine.py +++ b/hashes/enigma_machine.py @@ -15,12 +15,12 @@ def rotator(): gear_one.append(i) del gear_one[0] gear_one_pos += 1 - if gear_one_pos % int(len(alphabets)) == 0: + if gear_one_pos % len(alphabets) == 0: i = gear_two[0] gear_two.append(i) del gear_two[0] gear_two_pos += 1 - if gear_two_pos % int(len(alphabets)) == 0: + if gear_two_pos % len(alphabets) == 0: i = gear_three[0] gear_three.append(i) del gear_three[0] diff --git a/linear_algebra/src/test_linear_algebra.py b/linear_algebra/src/test_linear_algebra.py index fc5f90fd5cbe..5209c152013e 100644 --- a/linear_algebra/src/test_linear_algebra.py +++ b/linear_algebra/src/test_linear_algebra.py @@ -181,7 +181,7 @@ def test_component_matrix(self) -> None: test for Matrix method component() """ a = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]], 3, 3) - assert a.component(2, 1) == 7, 0.01 + assert a.component(2, 1) == 7, "0.01" def test__add__matrix(self) -> None: """ diff --git a/maths/primelib.py b/maths/primelib.py index 3a966e5cd936..9f031efc50a9 100644 --- a/maths/primelib.py +++ b/maths/primelib.py @@ -76,7 +76,7 @@ def is_prime(number: int) -> bool: if number <= 1: status = False - for divisor in range(2, int(round(sqrt(number))) + 1): + for divisor in range(2, round(sqrt(number)) + 1): # if 'number' divisible by 'divisor' then sets 'status' # of false and break up the loop. if number % divisor == 0: diff --git a/other/davis_putnam_logemann_loveland.py b/other/davis_putnam_logemann_loveland.py index e95bf371a817..7d0bcce15a29 100644 --- a/other/davis_putnam_logemann_loveland.py +++ b/other/davis_putnam_logemann_loveland.py @@ -36,7 +36,7 @@ def __init__(self, literals: list[str]) -> None: Represent the literals and an assignment in a clause." """ # Assign all literals to None initially - self.literals: dict[str, bool | None] = {literal: None for literal in literals} + self.literals: dict[str, bool | None] = dict.fromkeys(literals) def __str__(self) -> str: """ diff --git a/other/quine.py b/other/quine.py index 08e885bc1ce7..0fc78333fed1 100644 --- a/other/quine.py +++ b/other/quine.py @@ -1,5 +1,5 @@ #!/bin/python3 -# ruff: noqa +# ruff: noqa: PLC3002 """ Quine: diff --git a/project_euler/problem_028/sol1.py b/project_euler/problem_028/sol1.py index 1ea5d4fcafd4..0a4648af36c4 100644 --- a/project_euler/problem_028/sol1.py +++ b/project_euler/problem_028/sol1.py @@ -37,7 +37,7 @@ def solution(n: int = 1001) -> int: """ total = 1 - for i in range(1, int(ceil(n / 2.0))): + for i in range(1, ceil(n / 2.0)): odd = 2 * i + 1 even = 2 * i total = total + 4 * odd**2 - 6 * even diff --git a/pyproject.toml b/pyproject.toml index 4a76c4ad6d11..60f8d4ffc96f 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -119,6 +119,7 @@ lint.ignore = [ "PT018", # Assertion should be broken down into multiple parts "S101", # Use of `assert` detected -- DO NOT FIX "S311", # Standard pseudo-random generators are not suitable for cryptographic purposes -- FIX ME + "SIM905", # Consider using a list literal instead of `str.split` -- DO NOT FIX "SLF001", # Private member accessed: `_Iterator` -- FIX ME "UP038", # Use `X | Y` in `{}` call instead of `(X, Y)` -- DO NOT FIX ] diff --git a/scripts/validate_filenames.py b/scripts/validate_filenames.py index e76b4dbfe288..80399673cced 100755 --- a/scripts/validate_filenames.py +++ b/scripts/validate_filenames.py @@ -9,28 +9,25 @@ filepaths = list(good_file_paths()) assert filepaths, "good_file_paths() failed!" -upper_files = [file for file in filepaths if file != file.lower()] -if upper_files: +if upper_files := [file for file in filepaths if file != file.lower()]: print(f"{len(upper_files)} files contain uppercase characters:") print("\n".join(upper_files) + "\n") -space_files = [file for file in filepaths if " " in file] -if space_files: +if space_files := [file for file in filepaths if " " in file]: print(f"{len(space_files)} files contain space characters:") print("\n".join(space_files) + "\n") -hyphen_files = [file for file in filepaths if "-" in file] -if hyphen_files: +if hyphen_files := [ + file for file in filepaths if "-" in file and "/site-packages/" not in file +]: print(f"{len(hyphen_files)} files contain hyphen characters:") print("\n".join(hyphen_files) + "\n") -nodir_files = [file for file in filepaths if os.sep not in file] -if nodir_files: +if nodir_files := [file for file in filepaths if os.sep not in file]: print(f"{len(nodir_files)} files are not in a directory:") print("\n".join(nodir_files) + "\n") -bad_files = len(upper_files + space_files + hyphen_files + nodir_files) -if bad_files: +if bad_files := len(upper_files + space_files + hyphen_files + nodir_files): import sys sys.exit(bad_files) diff --git a/sorts/external_sort.py b/sorts/external_sort.py index 3fa7cacc0592..cfddee4fe7f8 100644 --- a/sorts/external_sort.py +++ b/sorts/external_sort.py @@ -61,7 +61,7 @@ def __init__(self, files): self.files = files self.empty = set() self.num_buffers = len(files) - self.buffers = {i: None for i in range(self.num_buffers)} + self.buffers = dict.fromkeys(range(self.num_buffers)) def get_dict(self): return { diff --git a/strings/frequency_finder.py b/strings/frequency_finder.py index e5afee891bd9..98720dc36d6e 100644 --- a/strings/frequency_finder.py +++ b/strings/frequency_finder.py @@ -36,7 +36,7 @@ def get_letter_count(message: str) -> dict[str, int]: - letter_count = {letter: 0 for letter in string.ascii_uppercase} + letter_count = dict.fromkeys(string.ascii_uppercase, 0) for letter in message.upper(): if letter in LETTERS: letter_count[letter] += 1 From 580273eeca28c30a8a5da114800d21b89fdfb930 Mon Sep 17 00:00:00 2001 From: Pranjay kumar <110048711+pranjaykumar926@users.noreply.github.com> Date: Thu, 20 Mar 2025 05:03:46 +0530 Subject: [PATCH 1538/1543] Improve prefix_sum.py (#12560) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Update prefix_sum.py Index Validation for get_sum Raises ValueError if start or end is out of range or start > end. Handles cases where the array is empty. ✅ Empty Array Support If an empty array is passed, get_sum raises an appropriate error instead of failing unexpectedly. ✅ Optimized contains_sum Initialization Initializes sums with {0} for efficient subarray sum checking. * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update prefix_sum.py * Update prefix_sum.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update prefix_sum.py * Update prefix_sum.py --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Maxim Smolskiy --- data_structures/arrays/prefix_sum.py | 20 +++++++++++++++++++- 1 file changed, 19 insertions(+), 1 deletion(-) diff --git a/data_structures/arrays/prefix_sum.py b/data_structures/arrays/prefix_sum.py index 2243a5308937..717b5f9d7e7e 100644 --- a/data_structures/arrays/prefix_sum.py +++ b/data_structures/arrays/prefix_sum.py @@ -30,11 +30,29 @@ def get_sum(self, start: int, end: int) -> int: 5 >>> PrefixSum([1,2,3]).get_sum(2, 2) 3 + >>> PrefixSum([]).get_sum(0, 0) + Traceback (most recent call last): + ... + ValueError: The array is empty. + >>> PrefixSum([1,2,3]).get_sum(-1, 2) + Traceback (most recent call last): + ... + ValueError: Invalid range specified. >>> PrefixSum([1,2,3]).get_sum(2, 3) Traceback (most recent call last): ... - IndexError: list index out of range + ValueError: Invalid range specified. + >>> PrefixSum([1,2,3]).get_sum(2, 1) + Traceback (most recent call last): + ... + ValueError: Invalid range specified. """ + if not self.prefix_sum: + raise ValueError("The array is empty.") + + if start < 0 or end >= len(self.prefix_sum) or start > end: + raise ValueError("Invalid range specified.") + if start == 0: return self.prefix_sum[end] From e3773dbec1504de17047c4fe013c0f1aaef20b38 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 24 Mar 2025 20:05:41 +0100 Subject: [PATCH 1539/1543] [pre-commit.ci] pre-commit autoupdate (#12631) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - [github.com/astral-sh/ruff-pre-commit: v0.11.0 → v0.11.2](https://github.com/astral-sh/ruff-pre-commit/compare/v0.11.0...v0.11.2) - [github.com/abravalheri/validate-pyproject: v0.24 → v0.24.1](https://github.com/abravalheri/validate-pyproject/compare/v0.24...v0.24.1) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 5deb66a5e5a2..0fc8b2b14e07 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -16,7 +16,7 @@ repos: - id: auto-walrus - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.11.0 + rev: v0.11.2 hooks: - id: ruff - id: ruff-format @@ -42,7 +42,7 @@ repos: pass_filenames: false - repo: https://github.com/abravalheri/validate-pyproject - rev: v0.24 + rev: v0.24.1 hooks: - id: validate-pyproject From 74b540ad73bd3b1187ed6e3c89bb8f309ef543fd Mon Sep 17 00:00:00 2001 From: Tony Dang <62843153+Dang-Hoang-Tung@users.noreply.github.com> Date: Sat, 29 Mar 2025 08:13:47 +0000 Subject: [PATCH 1540/1543] Genetic Algorithm: Fix bug in multi-threading (#12644) * Fix bug in multi-threading - Multi-threading (despite being commented out) had a tiny bug: missing target argument (2nd argument). - Commented out code was also slightly hard to understand, added (Option 1/2) in comments to clarify where a user may choose between 2 implementations. * Update basic_string.py --------- Co-authored-by: Maxim Smolskiy --- genetic_algorithm/basic_string.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/genetic_algorithm/basic_string.py b/genetic_algorithm/basic_string.py index a906ce85a779..b75491d9a949 100644 --- a/genetic_algorithm/basic_string.py +++ b/genetic_algorithm/basic_string.py @@ -144,18 +144,18 @@ def basic(target: str, genes: list[str], debug: bool = True) -> tuple[int, int, # Random population created. Now it's time to evaluate. - # Adding a bit of concurrency can make everything faster, + # (Option 1) Adding a bit of concurrency can make everything faster, # # import concurrent.futures # population_score: list[tuple[str, float]] = [] # with concurrent.futures.ThreadPoolExecutor( # max_workers=NUM_WORKERS) as executor: - # futures = {executor.submit(evaluate, item) for item in population} + # futures = {executor.submit(evaluate, item, target) for item in population} # concurrent.futures.wait(futures) # population_score = [item.result() for item in futures] # # but with a simple algorithm like this, it will probably be slower. - # We just need to call evaluate for every item inside the population. + # (Option 2) We just need to call evaluate for every item inside the population. population_score = [evaluate(item, target) for item in population] # Check if there is a matching evolution. From f10a5cbfccc5ee9ddb5ddd9906591ecaad58f672 Mon Sep 17 00:00:00 2001 From: Isidro Date: Mon, 31 Mar 2025 23:09:14 +0200 Subject: [PATCH 1541/1543] prefix_evaluation: Add alternative recursive implementation (#12646) * prefix_evaluation: Add alternative recursive implementation * improve doc * better variable name calc->operators * Update prefix_evaluation.py * Update prefix_evaluation.py * Update prefix_evaluation.py * Update prefix_evaluation.py --------- Co-authored-by: Maxim Smolskiy --- data_structures/stacks/prefix_evaluation.py | 39 +++++++++++++++++++-- 1 file changed, 36 insertions(+), 3 deletions(-) diff --git a/data_structures/stacks/prefix_evaluation.py b/data_structures/stacks/prefix_evaluation.py index f48eca23d7b5..03a70d884725 100644 --- a/data_structures/stacks/prefix_evaluation.py +++ b/data_structures/stacks/prefix_evaluation.py @@ -1,8 +1,9 @@ """ -Python3 program to evaluate a prefix expression. +Program to evaluate a prefix expression. +https://en.wikipedia.org/wiki/Polish_notation """ -calc = { +operators = { "+": lambda x, y: x + y, "-": lambda x, y: x - y, "*": lambda x, y: x * y, @@ -31,6 +32,10 @@ def evaluate(expression): 21 >>> evaluate("/ * 10 2 + 4 1 ") 4.0 + >>> evaluate("2") + 2 + >>> evaluate("+ * 2 3 / 8 4") + 8.0 """ stack = [] @@ -45,11 +50,39 @@ def evaluate(expression): # push the result onto the stack again o1 = stack.pop() o2 = stack.pop() - stack.append(calc[c](o1, o2)) + stack.append(operators[c](o1, o2)) return stack.pop() +def evaluate_recursive(expression: list[str]): + """ + Alternative recursive implementation + + >>> evaluate_recursive(['2']) + 2 + >>> expression = ['+', '*', '2', '3', '/', '8', '4'] + >>> evaluate_recursive(expression) + 8.0 + >>> expression + [] + >>> evaluate_recursive(['+', '9', '*', '2', '6']) + 21 + >>> evaluate_recursive(['/', '*', '10', '2', '+', '4', '1']) + 4.0 + """ + + op = expression.pop(0) + if is_operand(op): + return int(op) + + operation = operators[op] + + a = evaluate_recursive(expression) + b = evaluate_recursive(expression) + return operation(a, b) + + # Driver code if __name__ == "__main__": test_expression = "+ 9 * 2 6" From baab802965c37fa1740054a559cad8c119b2ee35 Mon Sep 17 00:00:00 2001 From: Isidro Date: Tue, 1 Apr 2025 20:55:14 +0200 Subject: [PATCH 1542/1543] doubly linked list: add dataclass and typing (#12647) * Node is a dataclass * fix mypy errors * LinkedList is a dataclass * fix mypy errors * Update doubly_linked_list_two.py * Update doubly_linked_list_two.py * Update doubly_linked_list_two.py * Update doubly_linked_list_two.py * Update doubly_linked_list_two.py * Update doubly_linked_list_two.py * Update doubly_linked_list_two.py * Update doubly_linked_list_two.py * Update doubly_linked_list_two.py * Update doubly_linked_list_two.py * Update doubly_linked_list_two.py --------- Co-authored-by: Maxim Smolskiy --- .../linked_list/doubly_linked_list_two.py | 62 +++++++++---------- 1 file changed, 29 insertions(+), 33 deletions(-) diff --git a/data_structures/linked_list/doubly_linked_list_two.py b/data_structures/linked_list/doubly_linked_list_two.py index e993cc5a20af..3d3bfb0cde30 100644 --- a/data_structures/linked_list/doubly_linked_list_two.py +++ b/data_structures/linked_list/doubly_linked_list_two.py @@ -9,25 +9,19 @@ Delete operation is more efficient """ +from dataclasses import dataclass +from typing import Self + +@dataclass class Node: - def __init__(self, data: int, previous=None, next_node=None): - self.data = data - self.previous = previous - self.next = next_node + data: int + previous: Self | None = None + next: Self | None = None def __str__(self) -> str: return f"{self.data}" - def get_data(self) -> int: - return self.data - - def get_next(self): - return self.next - - def get_previous(self): - return self.previous - class LinkedListIterator: def __init__(self, head): @@ -40,30 +34,30 @@ def __next__(self): if not self.current: raise StopIteration else: - value = self.current.get_data() - self.current = self.current.get_next() + value = self.current.data + self.current = self.current.next return value +@dataclass class LinkedList: - def __init__(self): - self.head = None # First node in list - self.tail = None # Last node in list + head: Node | None = None # First node in list + tail: Node | None = None # Last node in list def __str__(self): current = self.head nodes = [] while current is not None: - nodes.append(current.get_data()) - current = current.get_next() + nodes.append(current.data) + current = current.next return " ".join(str(node) for node in nodes) def __contains__(self, value: int): current = self.head while current: - if current.get_data() == value: + if current.data == value: return True - current = current.get_next() + current = current.next return False def __iter__(self): @@ -71,12 +65,12 @@ def __iter__(self): def get_head_data(self): if self.head: - return self.head.get_data() + return self.head.data return None def get_tail_data(self): if self.tail: - return self.tail.get_data() + return self.tail.data return None def set_head(self, node: Node) -> None: @@ -103,18 +97,20 @@ def insert_before_node(self, node: Node, node_to_insert: Node) -> None: node_to_insert.next = node node_to_insert.previous = node.previous - if node.get_previous() is None: + if node.previous is None: self.head = node_to_insert else: node.previous.next = node_to_insert node.previous = node_to_insert - def insert_after_node(self, node: Node, node_to_insert: Node) -> None: + def insert_after_node(self, node: Node | None, node_to_insert: Node) -> None: + assert node is not None + node_to_insert.previous = node node_to_insert.next = node.next - if node.get_next() is None: + if node.next is None: self.tail = node_to_insert else: node.next.previous = node_to_insert @@ -136,27 +132,27 @@ def insert_at_position(self, position: int, value: int) -> None: def get_node(self, item: int) -> Node: node = self.head while node: - if node.get_data() == item: + if node.data == item: return node - node = node.get_next() + node = node.next raise Exception("Node not found") def delete_value(self, value): if (node := self.get_node(value)) is not None: if node == self.head: - self.head = self.head.get_next() + self.head = self.head.next if node == self.tail: - self.tail = self.tail.get_previous() + self.tail = self.tail.previous self.remove_node_pointers(node) @staticmethod def remove_node_pointers(node: Node) -> None: - if node.get_next(): + if node.next: node.next.previous = node.previous - if node.get_previous(): + if node.previous: node.previous.next = node.next node.next = None From 0c8cf8e9871a5f91182d767adf173dccf87c2c0f Mon Sep 17 00:00:00 2001 From: Maxim Smolskiy Date: Wed, 2 Apr 2025 10:23:55 +0300 Subject: [PATCH 1543/1543] Fix bug for data_structures/linked_list/doubly_linked_list_two.py (#12651) * Fix bug for data_structures/linked_list/doubly_linked_list_two.py * Fix * Fix * Fix * Fix * Fix * Fix * Fix * Update doubly_linked_list_two.py * Update doubly_linked_list_two.py * Update doubly_linked_list_two.py * Update doubly_linked_list_two.py * Update doubly_linked_list_two.py * Update doubly_linked_list_two.py * Update doubly_linked_list_two.py * Update doubly_linked_list_two.py * Update doubly_linked_list_two.py * Update doubly_linked_list_two.py * Update doubly_linked_list_two.py * Update doubly_linked_list_two.py --- .../linked_list/doubly_linked_list_two.py | 27 ++++++++++++++----- 1 file changed, 21 insertions(+), 6 deletions(-) diff --git a/data_structures/linked_list/doubly_linked_list_two.py b/data_structures/linked_list/doubly_linked_list_two.py index 3d3bfb0cde30..8c93cddd5d31 100644 --- a/data_structures/linked_list/doubly_linked_list_two.py +++ b/data_structures/linked_list/doubly_linked_list_two.py @@ -81,8 +81,9 @@ def set_head(self, node: Node) -> None: self.insert_before_node(self.head, node) def set_tail(self, node: Node) -> None: - if self.head is None: - self.set_head(node) + if self.tail is None: + self.head = node + self.tail = node else: self.insert_after_node(self.tail, node) @@ -104,9 +105,7 @@ def insert_before_node(self, node: Node, node_to_insert: Node) -> None: node.previous = node_to_insert - def insert_after_node(self, node: Node | None, node_to_insert: Node) -> None: - assert node is not None - + def insert_after_node(self, node: Node, node_to_insert: Node) -> None: node_to_insert.previous = node node_to_insert.next = node.next @@ -127,7 +126,7 @@ def insert_at_position(self, position: int, value: int) -> None: return current_position += 1 node = node.next - self.insert_after_node(self.tail, new_node) + self.set_tail(new_node) def get_node(self, item: int) -> Node: node = self.head @@ -237,6 +236,22 @@ def create_linked_list() -> None: 7 8 9 + >>> linked_list = LinkedList() + >>> linked_list.insert_at_position(position=1, value=10) + >>> str(linked_list) + '10' + >>> linked_list.insert_at_position(position=2, value=20) + >>> str(linked_list) + '10 20' + >>> linked_list.insert_at_position(position=1, value=30) + >>> str(linked_list) + '30 10 20' + >>> linked_list.insert_at_position(position=3, value=40) + >>> str(linked_list) + '30 10 40 20' + >>> linked_list.insert_at_position(position=5, value=50) + >>> str(linked_list) + '30 10 40 20 50' """