From f27ca680ac7945ac217ecce3e172f22166560bf6 Mon Sep 17 00:00:00 2001 From: nhsmith85 Date: Mon, 2 Dec 2024 02:12:41 -0500 Subject: [PATCH] prompts and utils --- system/eval_config.yml | 14 ++++- system/graph.py | 49 +++++++++++------ system/output_tracker.txt | 104 ++++++++++++++++++++++++++++++++++++ system/prompts.py | 107 +++++++++++++++++++++++++++++++++----- system/structure.py | 16 +++++- system/utils.py | 55 +++++++++++--------- 6 files changed, 287 insertions(+), 58 deletions(-) create mode 100644 system/output_tracker.txt diff --git a/system/eval_config.yml b/system/eval_config.yml index 089b70f..bb6c00e 100644 --- a/system/eval_config.yml +++ b/system/eval_config.yml @@ -32,4 +32,16 @@ tracker_v3: particle-swarm-optimization: 2 # had an error on unit test feedback string object doesn't have content attrib readtime: 1 stocktrends: 1 - TextCNN: 1 \ No newline at end of file + TextCNN: 1 + +tracker_v4: + ArXiv_Digest: 1 + chakin: 1 # changed file saving issue not related to LLMs + geotext: many # revised unit test and implementation prompt iterations + hone: + Hybrid_Images: + lice: + particle-swarm-optimization: + readtime: + stocktrends: + TextCNN: \ No newline at end of file diff --git a/system/graph.py b/system/graph.py index 2600d51..abf5281 100644 --- a/system/graph.py +++ b/system/graph.py @@ -7,7 +7,7 @@ from copy import deepcopy from langchain_openai import ChatOpenAI import subprocess -from system.utils import check_and_install_packages, create_repository +from system.utils import check_and_install_packages, create_repository, get_root_dir from system.structure import (GraphState, Design, ApproveDesign, @@ -110,7 +110,7 @@ def software_design(state: GraphState): prompt = [ SystemMessage(content="You are a helpful assistant. Generate improved content based on the original request and reviewer feedback."), HumanMessage(content=DESIGN_PROMPT.format(PRD=state["documents"]['PRD'])), - HumanMessage(content=f"Your previous response needed improvement. Here's the reviewer feedback:\n{state['messages'][-1].content}\n\nPlease generate an improved version addressing these specific issues.") + HumanMessage(content=f"Your previous response needed improvement. Here's the reviewer feedback:\n{state['messages'][-1]}\n\nPlease generate an improved version addressing these specific issues.") ] structured_llm = llm.with_structured_output(Design) response = structured_llm.invoke(prompt) @@ -143,6 +143,7 @@ def implementation(state: GraphState): Implement the software design. """ logging.info("---IMPLEMENTATION---") + state['approvals']['implementation_iter'] = state['approvals'].get('implementation_iter', 0) + 1 prompt = [HumanMessage(content=IMPLEMENTATION_PROMPT.format(**state["documents"]))] if 'implementation' in state['approvals']: if not state['approvals']['implementation']: @@ -169,7 +170,7 @@ def approve_implementation(state: GraphState): return state def route_implementation(state: GraphState) -> Literal['acceptance_tests', 'implementation']: - if all(state["approvals"].values()): + if all(state["approvals"].values()) or state['approvals']['implementation_iter'] > 2: return "acceptance_tests" else: return "implementation" @@ -193,7 +194,7 @@ def approve_acceptance_tests(state: GraphState): logging.info("---APPROVE ACCEPTANCE TESTS---") try: - root_dir = next(iter(state['documents']['code'])).split('/')[0] + root_dir = get_root_dir(state['documents']['code']) cmd = f"cd temp/{root_dir} && {state['documents']['acceptance_tests']['command']}" # Run command in shell, capture output process = subprocess.run( @@ -236,16 +237,32 @@ def unit_tests(state: GraphState): code = '\n\n'.join(f"# ---{filename}---\n{content}" for filename, content in state['documents']['code'].items()) - prompt = [HumanMessage(content=UNIT_TEST_PROMPT.format(PRD=state["documents"]['PRD'], - architecture_design=state["documents"]['architecture_design'], - code=code))] - if 'unit_tests' in state['approvals']: - if not state['approvals']['unit_tests_coverage']: - prompt = [ - SystemMessage(content="You are a helpful assistant. Generate improved content based on the original request and reviewer feedback."), - prompt[-1], - HumanMessage(content=f"Your previous response needed improvement. Here's the reviewer feedback:\n{state['messages'][-1]}\n\nPlease generate an improved version addressing these specific issues.") - ] + # prompt = [HumanMessage(content=UNIT_TEST_PROMPT.format(PRD=state["documents"]['PRD'], + # architecture_design=state["documents"]['architecture_design'], + # code=code))] + # if 'unit_tests' in state['approvals']: + # if not state['approvals']['unit_tests_coverage']: + # prompt = [ + # SystemMessage(content="You are a helpful assistant. Generate improved content based on the original request and reviewer feedback."), + # prompt[-1], + # HumanMessage(content=f"Your previous response needed improvement. Here's the reviewer feedback:\n{state['messages'][-1]}\n\nPlease generate an improved version addressing these specific issues.") + # ] + base_prompt = UNIT_TEST_PROMPT.format( + PRD=state["documents"]['PRD'], + architecture_design=state["documents"]['architecture_design'], + code=code + ) + + if 'unit_tests' in state['approvals'] and not state['approvals']['unit_tests_coverage']: + prompt = [ + SystemMessage(content=UNIT_TEST_REVISION_SYSTEM.format()), + HumanMessage(content=base_prompt), + HumanMessage(content=UNIT_TEST_REVISION_FEEDBACK.format( + feedback=state['messages'][-1] + )) + ] + else: + prompt = [HumanMessage(content=base_prompt)] structured_llm = llm.with_structured_output(UnitTests) test = structured_llm.invoke(prompt) state["documents"].update(test.dict()) @@ -255,7 +272,7 @@ def approve_unit_tests(state: GraphState): logging.info("---APPROVE UNIT TESTS---") # Get first directory name, handling paths with leading slash - root_dir = next(name for name in next(iter(state['documents']['code'])).split('/') if name) + root_dir = get_root_dir(state['documents']['code']) cmd = f"cd temp/{root_dir} && {state['documents']['unit_tests']['command'].replace('python ', 'coverage run ')}" try: @@ -314,7 +331,7 @@ def approve_unit_tests(state: GraphState): break else: msg = f"Coverage report failed execution: {coverage_process.stdout}" - state['messages'].append(msg) + state['messages'].append(msg + process.stdout) # add the unit test output logging.info(msg) state['approvals'].update({"unit_tests_coverage": False}) diff --git a/system/output_tracker.txt b/system/output_tracker.txt new file mode 100644 index 0000000..16978a7 --- /dev/null +++ b/system/output_tracker.txt @@ -0,0 +1,104 @@ +(lc_env) PS C:\Users\smith\code\autoSWE> python -m system.main --prd_path system/benchmark_data/python/ArXiv_digest/docs/PRD.md +INFO:root:TRACING True +INFO:root:autoSWE-1 +INFO:root:---SOFTWARE DESIGN--- +INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions "HTTP/1.1 200 OK" +INFO:root:---APPROVE SOFTWARE DESIGN--- +INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions "HTTP/1.1 200 OK" +INFO:root:---IMPLEMENTATION--- +INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions "HTTP/1.1 200 OK" +INFO:root:---APPROVE IMPLEMENTATION--- +INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions "HTTP/1.1 200 OK" +INFO:root:---ACCEPTANCE TESTS--- +INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions "HTTP/1.1 200 OK" +INFO:root:---UNIT TESTS--- +INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions "HTTP/1.1 200 OK" +INFO:root:---ENVIRONMENT SETUP--- +INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions "HTTP/1.1 200 OK" +INFO:root:---APPROVE ACCEPTANCE TESTS--- +INFO:root:---APPROVE UNIT TESTS--- +INFO:root:Coverage report successful: +Name Stmts Miss Cover +--------------------------------------------------------- +src\query_arxiv.py 48 0 100% +src\utils\api_interaction.py 5 2 60% +src\utils\command_line_interface.py 15 1 93% +src\utils\output_handler.py 17 13 24% +src\utils\xml_parser.py 10 7 30% +--------------------------------------------------------- +TOTAL 95 23 76% + + +(lc_env) PS C:\Users\smith\code\autoSWE> python -m system.main --prd_path system/benchmark_data/python/chakin/PRD.md +INFO:root:TRACING True +INFO:root:autoSWE-1 +INFO:root:---SOFTWARE DESIGN--- +INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions "HTTP/1.1 200 OK" +INFO:root:---APPROVE SOFTWARE DESIGN--- +INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions "HTTP/1.1 200 OK" +INFO:root:---IMPLEMENTATION--- +INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions "HTTP/1.1 200 OK" +INFO:root:---APPROVE IMPLEMENTATION--- +INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions "HTTP/1.1 200 OK" +INFO:root:---IMPLEMENTATION--- +INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions "HTTP/1.1 200 OK" +INFO:root:---APPROVE IMPLEMENTATION--- +INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions "HTTP/1.1 200 OK" +INFO:root:---ACCEPTANCE TESTS--- +INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions "HTTP/1.1 200 OK" +INFO:root:---UNIT TESTS--- +INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions "HTTP/1.1 200 OK" +INFO:root:---ENVIRONMENT SETUP--- +INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions "HTTP/1.1 200 OK" +Requirement already satisfied: progressbar2 in c:\users\smith\anaconda3\envs\lc_env\lib\site-packages (4.5.0) +Requirement already satisfied: python-utils>=3.8.1 in c:\users\smith\anaconda3\envs\lc_env\lib\site-packages (from progressbar2) (3.9.0) +Requirement already satisfied: typing-extensions>3.10.0.2 in c:\users\smith\anaconda3\envs\lc_env\lib\site-packages (from python-utils>=3.8.1->progressbar2) (4.12.2) +INFO:root:---APPROVE ACCEPTANCE TESTS--- +INFO:root:---APPROVE UNIT TESTS--- +INFO:root:Coverage report successful: +Name Stmts Miss Cover +-------------------------------------- +chakin\chakin.py 26 0 100% +-------------------------------------- +TOTAL 26 0 100% + +(lc_env) PS C:\Users\smith\code\autoSWE> python -m system.main --prd_path system/benchmark_data/python/geotext/PRD.md +INFO:root:TRACING True +INFO:root:autoSWE-1 +INFO:root:---SOFTWARE DESIGN--- +INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions "HTTP/1.1 200 OK" +INFO:root:---APPROVE SOFTWARE DESIGN--- +INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions "HTTP/1.1 200 OK" +INFO:root:---IMPLEMENTATION--- +INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions "HTTP/1.1 200 OK" +INFO:root:---APPROVE IMPLEMENTATION--- +INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions "HTTP/1.1 200 OK" +INFO:root:---ACCEPTANCE TESTS--- +INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions "HTTP/1.1 200 OK" +INFO:root:---UNIT TESTS--- +INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions "HTTP/1.1 200 OK" +INFO:root:---ENVIRONMENT SETUP--- +INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions "HTTP/1.1 200 OK" +INFO:root:---APPROVE ACCEPTANCE TESTS--- +INFO:root:---APPROVE UNIT TESTS--- +INFO:root:Coverage report failed execution: No data to report. + +INFO:root:---UNIT TESTS--- +INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions "HTTP/1.1 200 OK" +INFO:root:---ENVIRONMENT SETUP--- +INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions "HTTP/1.1 200 OK" +INFO:root:---APPROVE ACCEPTANCE TESTS--- +INFO:root:---APPROVE UNIT TESTS--- +INFO:root:Coverage report failed execution: No data to report. + +INFO:root:---UNIT TESTS--- +INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions "HTTP/1.1 200 OK" +INFO:root:---ENVIRONMENT SETUP--- +INFO:httpx:HTTP Request: POST https://api.openai.com/v1/chat/completions "HTTP/1.1 200 OK" +INFO:root:---APPROVE ACCEPTANCE TESTS--- +INFO:root:---APPROVE UNIT TESTS--- +INFO:root:Coverage report failed execution: No data to report. + + + + diff --git a/system/prompts.py b/system/prompts.py index ba27ed4..77d30f5 100644 --- a/system/prompts.py +++ b/system/prompts.py @@ -92,13 +92,32 @@ -----Instructions------ Using the PRD as a source of truth and guideline, I am going to ask you to generate some specific artifacts. +Do not suggest in your design that we should import a package that already does the things in the PRD. I want to generate code that performs the things outlined in the PRD from scratch. Based on the specifications outlined in the PRD document, return a dictionary with the following keys: 1. UML_class: A Mermaid 11 class diagram that reflects the class structure and relationships defined in the PRD 2. UML_sequence: A Mermaid 11 sequence diagram showing the key interactions and flow between components as specified in the PRD -3. architecture_design: A detailed text based representation of the file tree that is true to the PRD and includes but is not limited to: - - A root-level README.md file documenting the system overview +3. architecture_design: A detailed text based representation of the file tree (with all files and folders under a root directory "project-root") that is true to the PRD and includes but is not limited to: + - A README.md file documenting the system overview - An 'examples' directory (inside the root directory) containing: - example_usage.sh demonstrating core functionality along with any additional example files that align with use cases mentioned in the PRD + - Every single file and directory MUST be connected with ASCII lines: + - Vertical lines: │ + - Branch lines: ├─── + - Last item lines: └─── + - Each item must have a horizontal line (───) connecting to it + - The directory structure must be clear with proper ASCII connection lines like this example: +project-root/ +├── README.md +├── src/ +│ ├── __init__.py +│ ├── main.py +│ └── utils/ +│ ├── __init__.py +│ ├── helpers.py +│ └── config.py +└── tests/ + ├── __init__.py + └── test_main.py """ APPROVE_DESIGN_PROMPT = """ @@ -246,6 +265,22 @@ -----Instructions------ +Your task is to implement the software based on the PRD and architecture design. +1. Return a dictionary with one key "code" and the value should be another dictionary where: + - Each key is a full file path as specified in the architecture design + - Each value is the content of that file +2. Follow the architecture design precisely +3. Include all necessary files from the design, including __init__.py files +4. Keep file paths consistent with this structure throughout your implementation +5. Implement all code files with full, working implementations do not use placeholders such as "TODO" or "pass" +6. Don't specify empty directories +7. Include and generate any CSV/JSON files mentioned in the PRD or architecture design if they are necessary +8. Ensure the code is production-ready and follows best practices +9. Tests will be run from the root directory of the repository so keep that in mind for import statements +10. Do not import a package that already does the things in the PRD. I want you to generate code that performs the things outlined in the PRD from scratch. +""" + +other_imp = """ 1. Return a dictionary with a single key "code" 2. The value of "code" should be another dictionary where: - Keys: Full file paths as specified in the architecture design @@ -255,13 +290,13 @@ - README.md in the root directory with complete documentation - example_usage.sh in the "examples" directory with working examples that are consistent with the PRD - if the PRD or architecture design call for other files to be in the "examples" directory then include them as well -5. Implement all necessary code files with full, working implementations, don't specify empty directories -6. Include any CSV/JSON files mentioned in the PRD or architecture design -7. Ensure the code is production-ready and follows best practices -8. Tests will be run from the root directory of the repository so keep that in mind for import statements +5. Implement all code files with full, working implementations +6. Don't specify empty directories +7. Include any CSV/JSON files mentioned in the PRD or architecture design +8. Ensure the code is production-ready and follows best practices +9. Tests will be run from the root directory of the repository so keep that in mind for import statements """ - APPROVE_IMPLEMENTATION_PROMPT = """ Below is the architectural design: @@ -284,6 +319,7 @@ - `"message"`: A descriptive confirmation message. If the document names do not agree with the architecture_design (which is the source of truth) write a prompt saying what the previous implementation did incorrectly and how it should fixed. Say something like "The previous implementation did not correctly map the architecture_design to the documents. Here's how it should be fixed: ..." +Also, if there were any issues with the implementation include a reminder not to generate placeholders like "TODO" or "pass" in the code, but to provide full implementations. Your analysis and response will help ensure consistency and correctness between the architecture_design and its representation in code. @@ -331,10 +367,55 @@ -----Instructions-------- -Your task is to generate unit tests to ensure the software adheres to the requirements in the PRD. -Pay close attention to the code and the PRD to ensure the tests are comprehensive and accurate. -The unit tests will be written using the unittest module and ultimately written to a file at: tests/unit/test_module.py. Keep this in mind for relative imports and file paths. -Write the content of the unit tests to a dictionary where the key is "test_module" and the value is the content of the unit test. -Make another key in this dictionary called "command" and write the command to run the unit tests as the value for the "command" key. -Nest this dictionary in another dictionary with the key "unit_tests" and return this nested dictionary. +Your task is to generate unit tests to ensure the software adheres to the requirements in the PRD. + +Write the content of the unit tests to a dictionary where the key is "test_module" and the value is the content of the unit tests. +Make another key in this dictionary called "command" and write the command to run the unit test as the value for the "command" key. +Nest this dictionary in another dictionary with the key "unit_tests" and return this nested dictionary as your response. + +Requirements: +1. The test_module must contain complete unittest code that tests all functionality +2. Your tests should achieve at least 60 percent coverage +3. Tests will be written to tests/unit/test_module.py - keep this in mind for imports +4. Use the unittest module for all tests +5. Include assertions for both expected and error cases +""" + +UNIT_TEST_REVISION_SYSTEM = """ +You are a Python unittest expert. Your task is to improve the test coverage of the previous unittest implementation. """ + +UNIT_TEST_REVISION_FEEDBACK = """ +The previous tests had insufficient coverage (below 60%). +Your task is to generate improved tests with better coverage. + +To improve coverage, make sure to: +1. Add tests for edge cases (empty inputs, invalid inputs, boundary values) +2. Add tests for error conditions (exceptions, error handling) +3. Test all code paths (different branches, conditional logic) +4. Include more assertions per test case +5. Test both positive and negative scenarios +6. Add tests for any missing functionality + +Previous feedback for reference: {feedback} + +Remember: +- Keep using the unittest framework +- Tests will be written to tests/unit/test_module.py +- Make sure your response is a properly formatted dictionary +- Include all necessary imports and test classes in the test_module value + +Write the content of the unit tests to a dictionary where the key is "test_module" and the value is the content of the unit tests. +Make another key in this dictionary called "command" and write the command to run the unit test as the value for the "command" key. +Nest this dictionary in another dictionary with the key "unit_tests" and return this nested dictionary as your response. +""" + +otherstring = """ +-----Instructions-------- +Your task is to generate unit tests to ensure the software adheres to the requirements in the PRD. +1. Return a dictionary with one key "unit_tests" and the value should be another dictionary where: + - The value of the key "test_module" should be the content of the unit test written in the unittest module + - The value of the key "command" should be the command to run the unit tests +2. Pay close attention to the code and the PRD to ensure the tests are comprehensive and accurate. +3. The unit tests should be written using the unittest module and ultimately written to a file at: tests/unit/test_module.py. Keep this in mind for relative imports and file paths. +""" \ No newline at end of file diff --git a/system/structure.py b/system/structure.py index e688229..dc8de5d 100644 --- a/system/structure.py +++ b/system/structure.py @@ -34,10 +34,22 @@ class ApproveImplementation(BaseModel): message: str class AcceptanceTests(BaseModel): - acceptance_tests: Dict[str, str] + acceptance_tests: Dict[str, str] = Field( + description="Dictionary containing acceptance test content and command", + example={ + "test_features": "import unittest\n...", + "command": "python -m unittest tests/acceptance/test_features.py" + } + ) class UnitTests(BaseModel): - unit_tests: Dict[str, str] + unit_tests: Dict[str, str] = Field( + description="Dictionary containing test_module and command keys", + example={ + "test_module": "import unittest\nclass TestExample(unittest.TestCase):\n def test_something(self):\n pass", + "command": "python -m unittest tests/unit/test_module.py" + } + ) class UpdateDocument(BaseModel): content: str diff --git a/system/utils.py b/system/utils.py index a2c6665..9b42146 100644 --- a/system/utils.py +++ b/system/utils.py @@ -5,6 +5,17 @@ import copy from typing import Dict, List, Union +def get_root_dir(files: Dict[str, str]) -> str: + # Check if all filenames have the same root directory + root_dirs = {filename.split('/')[0] for filename in files.keys()} + + if len(root_dirs) > 1: + # Multiple different root dirs found implying that a root directory was never specified + return "" + else: + # All files have same root dir + return next(iter(root_dirs)) + '/' if root_dirs else "" + def check_and_install_packages(packages: List[str]) -> Dict[str, Dict[str, Union[bool, str]]]: """Check if packages are installed and install if needed""" results = {} @@ -43,13 +54,9 @@ def check_and_install_packages(packages: List[str]) -> Dict[str, Dict[str, Union return results -def write_files(base_path: str, files_content: Dict[str, str]) -> None: - """Write files based on dictionary input""" - for file_path, content in files_content.items(): - # Strip leading slash to ensure relative path - clean_path = file_path.lstrip('/') - full_path = os.path.join(base_path, clean_path) - +def write_files(base_path: str, files: Dict[str, str]) -> None: + for filename, content in files.items(): + full_path = os.path.join(base_path, filename) # Create directory if it doesn't exist os.makedirs(os.path.dirname(full_path), exist_ok=True) @@ -75,29 +82,25 @@ def create_repository(base_path: str, documents: Dict) -> None: files = copy.deepcopy(documents['code']) - # Get first directory name, handling paths with leading slash - root_dir = next(name for name in next(iter(files)).split('/') if name) - + root_dir = get_root_dir(files) + coverage = "[run]\nomit =\n */__init__.py\n tests/*\n" - + files.update({ - f'{root_dir}/tests/unit/test_module.py': documents['unit_tests']['test_module'], - f'{root_dir}/tests/unit/__init__.py': '', - f'{root_dir}/tests/acceptance/test_features.py': documents['acceptance_tests']['test_features'], - f'{root_dir}/tests/acceptance/__init__.py': '', - f'{root_dir}/docs/PRD.md': documents['PRD'], - f'{root_dir}/docs/UML_class.md': documents['UML_class'], - f'{root_dir}/docs/UML_sequence.md': documents['UML_sequence'], - f'{root_dir}/docs/architecture_design.md': documents['architecture_design'], - f'{root_dir}/requirements.txt': documents['requirements'], - f'{root_dir}/.coveragerc': coverage, + f'{root_dir}tests/unit/test_module.py': documents['unit_tests']['test_module'], + f'{root_dir}tests/unit/__init__.py': '', + f'{root_dir}tests/acceptance/test_features.py': documents['acceptance_tests']['test_features'], + f'{root_dir}tests/acceptance/__init__.py': '', + f'{root_dir}docs/PRD.md': documents['PRD'], + f'{root_dir}docs/UML_class.md': documents['UML_class'], + f'{root_dir}docs/UML_sequence.md': documents['UML_sequence'], + f'{root_dir}docs/architecture_design.md': documents['architecture_design'], + f'{root_dir}requirements.txt': documents['requirements'], + f'{root_dir}.coveragerc': coverage, }) - - # Write files - write_files(base_path, files) - - + # Write files + write_files(base_path, files) # Example usage if __name__ == "__main__":