From 9026e94ea7e5b0894193eb4752c6e71f065d7936 Mon Sep 17 00:00:00 2001 From: DefectDojo release bot Date: Mon, 30 Jan 2023 17:49:22 +0000 Subject: [PATCH 1/6] Update versions in application files --- components/package.json | 2 +- dojo/__init__.py | 2 +- helm/defectdojo/Chart.yaml | 4 ++-- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/components/package.json b/components/package.json index 76d50443a45..29ac440d5c2 100644 --- a/components/package.json +++ b/components/package.json @@ -1,6 +1,6 @@ { "name": "defectdojo", - "version": "2.18.4", + "version": "2.19.0-dev", "license" : "BSD-3-Clause", "private": true, "dependencies": { diff --git a/dojo/__init__.py b/dojo/__init__.py index 0235e885ea9..d5e3d63f92f 100644 --- a/dojo/__init__.py +++ b/dojo/__init__.py @@ -4,6 +4,6 @@ # Django starts so that shared_task will use this app. from .celery import app as celery_app # noqa -__version__ = '2.18.4' +__version__ = '2.19.0-dev' __url__ = 'https://github.com/DefectDojo/django-DefectDojo' __docs__ = 'https://documentation.defectdojo.com' diff --git a/helm/defectdojo/Chart.yaml b/helm/defectdojo/Chart.yaml index 5d5f13cd39f..adcd837a5bf 100644 --- a/helm/defectdojo/Chart.yaml +++ b/helm/defectdojo/Chart.yaml @@ -1,8 +1,8 @@ apiVersion: v2 -appVersion: "2.18.4" +appVersion: "2.19.0-dev" description: A Helm chart for Kubernetes to install DefectDojo name: defectdojo -version: 1.6.52 +version: 1.6.53-dev icon: https://www.defectdojo.org/img/favicon.ico maintainers: - name: madchap From e33ba8445efe28ce0126323670869f404810bac7 Mon Sep 17 00:00:00 2001 From: Colm O hEigeartaigh Date: Thu, 2 Feb 2023 18:16:15 +0000 Subject: [PATCH 2/6] Add a link to the source code in the JIRA description (#7535) --- dojo/models.py | 11 +++++++++-- .../issue-trackers/jira_full/jira-description.tpl | 4 +++- 2 files changed, 12 insertions(+), 3 deletions(-) diff --git a/dojo/models.py b/dojo/models.py index a85a72fbe45..6d45d9df902 100755 --- a/dojo/models.py +++ b/dojo/models.py @@ -2860,8 +2860,15 @@ def get_file_path_with_link(self): return None if self.test.engagement.source_code_management_uri is None: return escape(self.file_path) + link = self.get_file_path_with_raw_link() + return create_bleached_link(link, self.file_path) + + def get_file_path_with_raw_link(self): + if self.file_path is None: + return None link = self.test.engagement.source_code_management_uri - if "https://github.com/" in self.test.engagement.source_code_management_uri: + if (self.test.engagement.source_code_management_uri is not None + and "https://github.com/" in self.test.engagement.source_code_management_uri): if self.test.commit_hash: link += '/blob/' + self.test.commit_hash + '/' + self.file_path elif self.test.engagement.commit_hash: @@ -2876,7 +2883,7 @@ def get_file_path_with_link(self): link += '/' + self.file_path if self.line: link = link + '#L' + str(self.line) - return create_bleached_link(link, self.file_path) + return link def get_references_with_links(self): import re diff --git a/dojo/templates/issue-trackers/jira_full/jira-description.tpl b/dojo/templates/issue-trackers/jira_full/jira-description.tpl index 63d540b15d5..b4e60a64a88 100644 --- a/dojo/templates/issue-trackers/jira_full/jira-description.tpl +++ b/dojo/templates/issue-trackers/jira_full/jira-description.tpl @@ -61,7 +61,9 @@ *Source Line*: {{ finding.sast_source_line }} *Sink Object*: {{ finding.sast_sink_object }} {% elif finding.static_finding %} -{% if finding.file_path %} +{% if finding.file_path and finding.get_file_path_with_raw_link %} +*Source File*: [{{ finding.file_path }} | {{ finding.get_file_path_with_raw_link }}] +{% elif finding.file_path %} *Source File*: {{ finding.file_path }} {% endif %} {% if finding.line %} From 337dec6b293dbb7d1628ad1eff23efc1858eabb5 Mon Sep 17 00:00:00 2001 From: Colm O hEigeartaigh Date: Thu, 2 Feb 2023 18:17:45 +0000 Subject: [PATCH 3/6] Remove trailing bracket for SLA information in UI (#7533) --- dojo/templatetags/display_tags.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/dojo/templatetags/display_tags.py b/dojo/templatetags/display_tags.py index 9e7a649e2c1..16a791c0fac 100644 --- a/dojo/templatetags/display_tags.py +++ b/dojo/templatetags/display_tags.py @@ -267,11 +267,11 @@ def finding_sla(finding): find_sla) + ' days past SLA for ' + severity.lower() + ' findings (' + str(sla_age) + ' days since ' + finding.get_sla_start_date().strftime("%b %d, %Y") + ')' else: status = "green" - status_text = 'Remediation for ' + severity.lower() + ' findings in ' + str(sla_age) + ' days or less since ' + finding.get_sla_start_date().strftime("%b %d, %Y") + ')' + status_text = 'Remediation for ' + severity.lower() + ' findings in ' + str(sla_age) + ' days or less since ' + finding.get_sla_start_date().strftime("%b %d, %Y") if find_sla and find_sla < 0: status = "red" status_text = 'Overdue: Remediation for ' + severity.lower() + ' findings in ' + str( - sla_age) + ' days or less since ' + finding.get_sla_start_date().strftime("%b %d, %Y") + ')' + sla_age) + ' days or less since ' + finding.get_sla_start_date().strftime("%b %d, %Y") if find_sla is not None: title = '' \ From 1cdeca7c094626b9f1b5f24002a7bf66266e6fd2 Mon Sep 17 00:00:00 2001 From: Colm O hEigeartaigh Date: Fri, 3 Feb 2023 14:19:46 +0000 Subject: [PATCH 4/6] Add the ability to group by the finding title (#7540) --- dojo/finding/helper.py | 2 ++ dojo/models.py | 5 ++++- 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/dojo/finding/helper.py b/dojo/finding/helper.py index f8efb176591..76e0c4cf0f8 100644 --- a/dojo/finding/helper.py +++ b/dojo/finding/helper.py @@ -229,6 +229,8 @@ def get_group_by_group_name(finding, finding_group_by_option): elif finding_group_by_option == 'file_path': if finding.file_path: group_name = 'Filepath %s' % (finding.file_path) + elif finding_group_by_option == 'finding_title': + group_name = finding.title else: raise ValueError("Invalid group_by option %s" % finding_group_by_option) diff --git a/dojo/models.py b/dojo/models.py index 6d45d9df902..a9ba1423d7f 100755 --- a/dojo/models.py +++ b/dojo/models.py @@ -2969,7 +2969,10 @@ def get_breadcrumbs(self): class Finding_Group(TimeStampedModel): - GROUP_BY_OPTIONS = [('component_name', 'Component Name'), ('component_name+component_version', 'Component Name + Version'), ('file_path', 'File path')] + GROUP_BY_OPTIONS = [('component_name', 'Component Name'), + ('component_name+component_version', 'Component Name + Version'), + ('file_path', 'File path'), + ('finding_title', 'Finding Title')] name = models.CharField(max_length=255, blank=False, null=False) test = models.ForeignKey(Test, on_delete=models.CASCADE) From 780afde094f342285360820d0c7e1b896b916761 Mon Sep 17 00:00:00 2001 From: Colm O hEigeartaigh Date: Fri, 3 Feb 2023 16:18:33 +0000 Subject: [PATCH 5/6] A cleanup of the endpoint code used by the importer/reimporter (#7546) --- dojo/importers/importer/importer.py | 11 ++----- dojo/importers/reimporter/reimporter.py | 41 +++++-------------------- dojo/importers/reimporter/utils.py | 15 +++++++++ dojo/importers/utils.py | 17 +++++----- 4 files changed, 34 insertions(+), 50 deletions(-) diff --git a/dojo/importers/importer/importer.py b/dojo/importers/importer/importer.py index 265f1c7d72f..44524283ffb 100644 --- a/dojo/importers/importer/importer.py +++ b/dojo/importers/importer/importer.py @@ -132,16 +132,9 @@ def process_parsed_findings(self, test, parsed_findings, scan_type, user, active burp_rr.clean() burp_rr.save() - if settings.ASYNC_FINDING_IMPORT: - importer_utils.chunk_endpoints_and_disperse(item, test, item.unsaved_endpoints) - else: - importer_utils.add_endpoints_to_unsaved_finding(item, test, item.unsaved_endpoints, sync=True) - + importer_utils.chunk_endpoints_and_disperse(item, test, item.unsaved_endpoints) if endpoints_to_add: - if settings.ASYNC_FINDING_IMPORT: - importer_utils.chunk_endpoints_and_disperse(item, test, endpoints_to_add) - else: - importer_utils.add_endpoints_to_unsaved_finding(item, test, endpoints_to_add, sync=True) + importer_utils.chunk_endpoints_and_disperse(item, test, endpoints_to_add) if item.unsaved_tags: item.tags = item.unsaved_tags diff --git a/dojo/importers/reimporter/reimporter.py b/dojo/importers/reimporter/reimporter.py index 8b355892c98..d95599d859d 100644 --- a/dojo/importers/reimporter/reimporter.py +++ b/dojo/importers/reimporter/reimporter.py @@ -12,6 +12,7 @@ from django.core.files.base import ContentFile from django.utils import timezone from dojo.importers import utils as importer_utils +from dojo.importers.reimporter import utils as reimporter_utils from dojo.models import (BurpRawRequestResponse, FileUpload, Finding, Notes, Test_Import) from dojo.tools.factory import get_parser @@ -42,10 +43,6 @@ def process_parsed_findings(self, test, parsed_findings, scan_type, user, active unchanged_items = [] logger.debug('starting reimport of %i items.', len(items) if items else 0) - from dojo.importers.reimporter.utils import ( - match_new_finding_to_existing_finding, - update_endpoint_status, - reactivate_endpoint_status) deduplication_algorithm = test.deduplication_algorithm i = 0 @@ -84,7 +81,7 @@ def process_parsed_findings(self, test, parsed_findings, scan_type, user, active item.hash_code = item.compute_hash_code() deduplicationLogger.debug("item's hash_code: %s", item.hash_code) - findings = match_new_finding_to_existing_finding(item, test, deduplication_algorithm) + findings = reimporter_utils.match_new_finding_to_existing_finding(item, test, deduplication_algorithm) deduplicationLogger.debug('found %i findings matching with current new finding', len(findings)) @@ -146,19 +143,7 @@ def process_parsed_findings(self, test, parsed_findings, scan_type, user, active endpoint_statuses = finding.status_finding.exclude(Q(false_positive=True) | Q(out_of_scope=True) | Q(risk_accepted=True)) - - # Determine if this can be run async - if settings.ASYNC_FINDING_IMPORT: - chunk_list = importer_utils.chunk_list(endpoint_statuses) - # If there is only one chunk, then do not bother with async - if len(chunk_list) < 2: - reactivate_endpoint_status(endpoint_statuses, sync=True) - logger.debug('IMPORT_SCAN: Split endpoints into ' + str(len(chunk_list)) + ' chunks of ' + str(chunk_list[0])) - # First kick off all the workers - for endpoint_status_list in chunk_list: - reactivate_endpoint_status(endpoint_status_list, sync=False) - else: - reactivate_endpoint_status(endpoint_statuses, sync=True) + reimporter_utils.chunk_endpoints_and_reactivate(endpoint_statuses) finding.notes.add(note) reactivated_items.append(finding) @@ -190,7 +175,7 @@ def process_parsed_findings(self, test, parsed_findings, scan_type, user, active unchanged_count += 1 if finding.dynamic_finding: logger.debug("Re-import found an existing dynamic finding for this new finding. Checking the status of endpoints") - update_endpoint_status(finding, item, user) + reimporter_utils.update_endpoint_status(finding, item, user) else: # no existing finding found item.reporter = user @@ -247,16 +232,9 @@ def process_parsed_findings(self, test, parsed_findings, scan_type, user, active # for existing findings: make sure endpoints are present or created if finding: finding_count += 1 - if settings.ASYNC_FINDING_IMPORT: - importer_utils.chunk_endpoints_and_disperse(finding, test, item.unsaved_endpoints) - else: - importer_utils.add_endpoints_to_unsaved_finding(finding, test, item.unsaved_endpoints, sync=True) - + importer_utils.chunk_endpoints_and_disperse(finding, test, item.unsaved_endpoints) if endpoints_to_add: - if settings.ASYNC_FINDING_IMPORT: - importer_utils.chunk_endpoints_and_disperse(finding, test, endpoints_to_add) - else: - importer_utils.add_endpoints_to_unsaved_finding(finding, test, endpoints_to_add, sync=True) + importer_utils.chunk_endpoints_and_disperse(finding, test, endpoints_to_add) if item.unsaved_tags: finding.tags = item.unsaved_tags @@ -332,12 +310,7 @@ def close_old_findings(self, test, to_mitigate, scan_date_time, user, push_to_ji finding.active = False endpoint_status = finding.status_finding.all() - for status in endpoint_status: - status.mitigated_by = user - status.mitigated_time = timezone.now() - status.mitigated = True - status.last_modified = timezone.now() - status.save() + reimporter_utils.mitigate_endpoint_status(endpoint_status, user, kwuser=user, sync=True) # to avoid pushing a finding group multiple times, we push those outside of the loop if is_finding_groups_enabled() and finding.finding_group: diff --git a/dojo/importers/reimporter/utils.py b/dojo/importers/reimporter/utils.py index 0a5e714b9d3..225867952c3 100644 --- a/dojo/importers/reimporter/utils.py +++ b/dojo/importers/reimporter/utils.py @@ -95,6 +95,21 @@ def mitigate_endpoint_status(endpoint_status_list, user, **kwargs): endpoint_status.save() +def chunk_endpoints_and_reactivate(endpoint_statuses, **kwargs): + # Determine if this can be run async + if settings.ASYNC_FINDING_IMPORT: + chunk_list = importer_utils.chunk_list(endpoint_statuses) + # If there is only one chunk, then do not bother with async + if len(chunk_list) < 2: + reactivate_endpoint_status(endpoint_statuses, sync=True) + logger.debug('IMPORT_SCAN: Split endpoints into ' + str(len(chunk_list)) + ' chunks of ' + str(chunk_list[0])) + # First kick off all the workers + for endpoint_status_list in chunk_list: + reactivate_endpoint_status(endpoint_status_list, sync=False) + else: + reactivate_endpoint_status(endpoint_statuses, sync=True) + + @dojo_async_task @app.task() def reactivate_endpoint_status(endpoint_status_list, **kwargs): diff --git a/dojo/importers/utils.py b/dojo/importers/utils.py index 836f4688b6e..76c594fa8c6 100644 --- a/dojo/importers/utils.py +++ b/dojo/importers/utils.py @@ -108,14 +108,17 @@ def chunk_list(list): def chunk_endpoints_and_disperse(finding, test, endpoints, **kwargs): - chunked_list = chunk_list(endpoints) - # If there is only one chunk, then do not bother with async - if len(chunked_list) < 2: + if settings.ASYNC_FINDING_IMPORT: + chunked_list = chunk_list(endpoints) + # If there is only one chunk, then do not bother with async + if len(chunked_list) < 2: + add_endpoints_to_unsaved_finding(finding, test, endpoints, sync=True) + return [] + # First kick off all the workers + for endpoints_list in chunked_list: + add_endpoints_to_unsaved_finding(finding, test, endpoints_list, sync=False) + else: add_endpoints_to_unsaved_finding(finding, test, endpoints, sync=True) - return [] - # First kick off all the workers - for endpoints_list in chunked_list: - add_endpoints_to_unsaved_finding(finding, test, endpoints_list, sync=False) # Since adding a model to a ManyToMany relationship does not require an additional From 115cf57bfb3bc2da733d88b732257907594392d1 Mon Sep 17 00:00:00 2001 From: Colm O hEigeartaigh Date: Fri, 3 Feb 2023 19:27:36 +0000 Subject: [PATCH 6/6] Set the product name as the description in the importer (#7534) --- dojo/importers/reimporter/utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dojo/importers/reimporter/utils.py b/dojo/importers/reimporter/utils.py index 225867952c3..23c4a85732c 100644 --- a/dojo/importers/reimporter/utils.py +++ b/dojo/importers/reimporter/utils.py @@ -204,7 +204,7 @@ def get_or_create_product(product_name=None, product_type_name=None, auto_create member.role = Role.objects.get(is_owner=True) member.save() - product = Product.objects.create(name=product_name, prod_type=product_type) + product = Product.objects.create(name=product_name, prod_type=product_type, description=product_name) member = Product_Member() member.user = get_current_user() member.product = product