Skip to content

Commit

Permalink
Merge pull request DefectDojo#7560 from DefectDojo/bugfix
Browse files Browse the repository at this point in the history
Merge Bugfix into Dev
  • Loading branch information
Maffooch authored Feb 6, 2023
2 parents 27feaaa + 115cf57 commit ab60d27
Show file tree
Hide file tree
Showing 8 changed files with 55 additions and 57 deletions.
2 changes: 2 additions & 0 deletions dojo/finding/helper.py
Original file line number Diff line number Diff line change
Expand Up @@ -229,6 +229,8 @@ def get_group_by_group_name(finding, finding_group_by_option):
elif finding_group_by_option == 'file_path':
if finding.file_path:
group_name = 'Filepath %s' % (finding.file_path)
elif finding_group_by_option == 'finding_title':
group_name = finding.title
else:
raise ValueError("Invalid group_by option %s" % finding_group_by_option)

Expand Down
11 changes: 2 additions & 9 deletions dojo/importers/importer/importer.py
Original file line number Diff line number Diff line change
Expand Up @@ -132,16 +132,9 @@ def process_parsed_findings(self, test, parsed_findings, scan_type, user, active
burp_rr.clean()
burp_rr.save()

if settings.ASYNC_FINDING_IMPORT:
importer_utils.chunk_endpoints_and_disperse(item, test, item.unsaved_endpoints)
else:
importer_utils.add_endpoints_to_unsaved_finding(item, test, item.unsaved_endpoints, sync=True)

importer_utils.chunk_endpoints_and_disperse(item, test, item.unsaved_endpoints)
if endpoints_to_add:
if settings.ASYNC_FINDING_IMPORT:
importer_utils.chunk_endpoints_and_disperse(item, test, endpoints_to_add)
else:
importer_utils.add_endpoints_to_unsaved_finding(item, test, endpoints_to_add, sync=True)
importer_utils.chunk_endpoints_and_disperse(item, test, endpoints_to_add)

if item.unsaved_tags:
item.tags = item.unsaved_tags
Expand Down
41 changes: 7 additions & 34 deletions dojo/importers/reimporter/reimporter.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@
from django.core.files.base import ContentFile
from django.utils import timezone
from dojo.importers import utils as importer_utils
from dojo.importers.reimporter import utils as reimporter_utils
from dojo.models import (BurpRawRequestResponse, FileUpload, Finding,
Notes, Test_Import)
from dojo.tools.factory import get_parser
Expand Down Expand Up @@ -42,10 +43,6 @@ def process_parsed_findings(self, test, parsed_findings, scan_type, user, active
unchanged_items = []

logger.debug('starting reimport of %i items.', len(items) if items else 0)
from dojo.importers.reimporter.utils import (
match_new_finding_to_existing_finding,
update_endpoint_status,
reactivate_endpoint_status)
deduplication_algorithm = test.deduplication_algorithm

i = 0
Expand Down Expand Up @@ -84,7 +81,7 @@ def process_parsed_findings(self, test, parsed_findings, scan_type, user, active
item.hash_code = item.compute_hash_code()
deduplicationLogger.debug("item's hash_code: %s", item.hash_code)

findings = match_new_finding_to_existing_finding(item, test, deduplication_algorithm)
findings = reimporter_utils.match_new_finding_to_existing_finding(item, test, deduplication_algorithm)

deduplicationLogger.debug('found %i findings matching with current new finding', len(findings))

Expand Down Expand Up @@ -146,19 +143,7 @@ def process_parsed_findings(self, test, parsed_findings, scan_type, user, active
endpoint_statuses = finding.status_finding.exclude(Q(false_positive=True) |
Q(out_of_scope=True) |
Q(risk_accepted=True))

# Determine if this can be run async
if settings.ASYNC_FINDING_IMPORT:
chunk_list = importer_utils.chunk_list(endpoint_statuses)
# If there is only one chunk, then do not bother with async
if len(chunk_list) < 2:
reactivate_endpoint_status(endpoint_statuses, sync=True)
logger.debug('IMPORT_SCAN: Split endpoints into ' + str(len(chunk_list)) + ' chunks of ' + str(chunk_list[0]))
# First kick off all the workers
for endpoint_status_list in chunk_list:
reactivate_endpoint_status(endpoint_status_list, sync=False)
else:
reactivate_endpoint_status(endpoint_statuses, sync=True)
reimporter_utils.chunk_endpoints_and_reactivate(endpoint_statuses)

finding.notes.add(note)
reactivated_items.append(finding)
Expand Down Expand Up @@ -190,7 +175,7 @@ def process_parsed_findings(self, test, parsed_findings, scan_type, user, active
unchanged_count += 1
if finding.dynamic_finding:
logger.debug("Re-import found an existing dynamic finding for this new finding. Checking the status of endpoints")
update_endpoint_status(finding, item, user)
reimporter_utils.update_endpoint_status(finding, item, user)
else:
# no existing finding found
item.reporter = user
Expand Down Expand Up @@ -247,16 +232,9 @@ def process_parsed_findings(self, test, parsed_findings, scan_type, user, active
# for existing findings: make sure endpoints are present or created
if finding:
finding_count += 1
if settings.ASYNC_FINDING_IMPORT:
importer_utils.chunk_endpoints_and_disperse(finding, test, item.unsaved_endpoints)
else:
importer_utils.add_endpoints_to_unsaved_finding(finding, test, item.unsaved_endpoints, sync=True)

importer_utils.chunk_endpoints_and_disperse(finding, test, item.unsaved_endpoints)
if endpoints_to_add:
if settings.ASYNC_FINDING_IMPORT:
importer_utils.chunk_endpoints_and_disperse(finding, test, endpoints_to_add)
else:
importer_utils.add_endpoints_to_unsaved_finding(finding, test, endpoints_to_add, sync=True)
importer_utils.chunk_endpoints_and_disperse(finding, test, endpoints_to_add)

if item.unsaved_tags:
finding.tags = item.unsaved_tags
Expand Down Expand Up @@ -332,12 +310,7 @@ def close_old_findings(self, test, to_mitigate, scan_date_time, user, push_to_ji
finding.active = False

endpoint_status = finding.status_finding.all()
for status in endpoint_status:
status.mitigated_by = user
status.mitigated_time = timezone.now()
status.mitigated = True
status.last_modified = timezone.now()
status.save()
reimporter_utils.mitigate_endpoint_status(endpoint_status, user, kwuser=user, sync=True)

# to avoid pushing a finding group multiple times, we push those outside of the loop
if is_finding_groups_enabled() and finding.finding_group:
Expand Down
17 changes: 16 additions & 1 deletion dojo/importers/reimporter/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -95,6 +95,21 @@ def mitigate_endpoint_status(endpoint_status_list, user, **kwargs):
endpoint_status.save()


def chunk_endpoints_and_reactivate(endpoint_statuses, **kwargs):
# Determine if this can be run async
if settings.ASYNC_FINDING_IMPORT:
chunk_list = importer_utils.chunk_list(endpoint_statuses)
# If there is only one chunk, then do not bother with async
if len(chunk_list) < 2:
reactivate_endpoint_status(endpoint_statuses, sync=True)
logger.debug('IMPORT_SCAN: Split endpoints into ' + str(len(chunk_list)) + ' chunks of ' + str(chunk_list[0]))
# First kick off all the workers
for endpoint_status_list in chunk_list:
reactivate_endpoint_status(endpoint_status_list, sync=False)
else:
reactivate_endpoint_status(endpoint_statuses, sync=True)


@dojo_async_task
@app.task()
def reactivate_endpoint_status(endpoint_status_list, **kwargs):
Expand Down Expand Up @@ -189,7 +204,7 @@ def get_or_create_product(product_name=None, product_type_name=None, auto_create
member.role = Role.objects.get(is_owner=True)
member.save()

product = Product.objects.create(name=product_name, prod_type=product_type)
product = Product.objects.create(name=product_name, prod_type=product_type, description=product_name)
member = Product_Member()
member.user = get_current_user()
member.product = product
Expand Down
17 changes: 10 additions & 7 deletions dojo/importers/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -114,14 +114,17 @@ def chunk_list(list):


def chunk_endpoints_and_disperse(finding, test, endpoints, **kwargs):
chunked_list = chunk_list(endpoints)
# If there is only one chunk, then do not bother with async
if len(chunked_list) < 2:
if settings.ASYNC_FINDING_IMPORT:
chunked_list = chunk_list(endpoints)
# If there is only one chunk, then do not bother with async
if len(chunked_list) < 2:
add_endpoints_to_unsaved_finding(finding, test, endpoints, sync=True)
return []
# First kick off all the workers
for endpoints_list in chunked_list:
add_endpoints_to_unsaved_finding(finding, test, endpoints_list, sync=False)
else:
add_endpoints_to_unsaved_finding(finding, test, endpoints, sync=True)
return []
# First kick off all the workers
for endpoints_list in chunked_list:
add_endpoints_to_unsaved_finding(finding, test, endpoints_list, sync=False)


# Since adding a model to a ManyToMany relationship does not require an additional
Expand Down
16 changes: 13 additions & 3 deletions dojo/models.py
Original file line number Diff line number Diff line change
Expand Up @@ -2860,8 +2860,15 @@ def get_file_path_with_link(self):
return None
if self.test.engagement.source_code_management_uri is None:
return escape(self.file_path)
link = self.get_file_path_with_raw_link()
return create_bleached_link(link, self.file_path)

def get_file_path_with_raw_link(self):
if self.file_path is None:
return None
link = self.test.engagement.source_code_management_uri
if "https://github.com/" in self.test.engagement.source_code_management_uri:
if (self.test.engagement.source_code_management_uri is not None
and "https://github.com/" in self.test.engagement.source_code_management_uri):
if self.test.commit_hash:
link += '/blob/' + self.test.commit_hash + '/' + self.file_path
elif self.test.engagement.commit_hash:
Expand All @@ -2876,7 +2883,7 @@ def get_file_path_with_link(self):
link += '/' + self.file_path
if self.line:
link = link + '#L' + str(self.line)
return create_bleached_link(link, self.file_path)
return link

def get_references_with_links(self):
import re
Expand Down Expand Up @@ -2962,7 +2969,10 @@ def get_breadcrumbs(self):

class Finding_Group(TimeStampedModel):

GROUP_BY_OPTIONS = [('component_name', 'Component Name'), ('component_name+component_version', 'Component Name + Version'), ('file_path', 'File path')]
GROUP_BY_OPTIONS = [('component_name', 'Component Name'),
('component_name+component_version', 'Component Name + Version'),
('file_path', 'File path'),
('finding_title', 'Finding Title')]

name = models.CharField(max_length=255, blank=False, null=False)
test = models.ForeignKey(Test, on_delete=models.CASCADE)
Expand Down
4 changes: 3 additions & 1 deletion dojo/templates/issue-trackers/jira_full/jira-description.tpl
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,9 @@
*Source Line*: {{ finding.sast_source_line }}
*Sink Object*: {{ finding.sast_sink_object }}
{% elif finding.static_finding %}
{% if finding.file_path %}
{% if finding.file_path and finding.get_file_path_with_raw_link %}
*Source File*: [{{ finding.file_path }} | {{ finding.get_file_path_with_raw_link }}]
{% elif finding.file_path %}
*Source File*: {{ finding.file_path }}
{% endif %}
{% if finding.line %}
Expand Down
4 changes: 2 additions & 2 deletions dojo/templatetags/display_tags.py
Original file line number Diff line number Diff line change
Expand Up @@ -267,11 +267,11 @@ def finding_sla(finding):
find_sla) + ' days past SLA for ' + severity.lower() + ' findings (' + str(sla_age) + ' days since ' + finding.get_sla_start_date().strftime("%b %d, %Y") + ')'
else:
status = "green"
status_text = 'Remediation for ' + severity.lower() + ' findings in ' + str(sla_age) + ' days or less since ' + finding.get_sla_start_date().strftime("%b %d, %Y") + ')'
status_text = 'Remediation for ' + severity.lower() + ' findings in ' + str(sla_age) + ' days or less since ' + finding.get_sla_start_date().strftime("%b %d, %Y")
if find_sla and find_sla < 0:
status = "red"
status_text = 'Overdue: Remediation for ' + severity.lower() + ' findings in ' + str(
sla_age) + ' days or less since ' + finding.get_sla_start_date().strftime("%b %d, %Y") + ')'
sla_age) + ' days or less since ' + finding.get_sla_start_date().strftime("%b %d, %Y")

if find_sla is not None:
title = '<a class="has-popover" data-toggle="tooltip" data-placement="bottom" title="" href="#" data-content="' + status_text + '">' \
Expand Down

0 comments on commit ab60d27

Please sign in to comment.