From a475cd7682e22bae55ccca43992d300815d32fb3 Mon Sep 17 00:00:00 2001 From: apallier Date: Thu, 12 Apr 2018 12:42:17 +0200 Subject: [PATCH] New option that prevents publishing blocked tests (#44) * Manage existing test run or test plan * Fill the test failure comment and duration with information from test runner * Log only last error content * Add log * Add log * Fix: In case of 'parametrize', the test status reflects the last test performed (should reflect all the tests status) Now, worst results are published at the end (sort by status) * Manage existing testrun testplan (#3) Synchronization with upstream * Merge branch 'fill-comment-in-test-result' * Synchro with upstream * Synchro with upstream * Write result except if last status is "Blocked" in TestRail * Change command line option "--tr-dont-publish-blocked" + update doc * Invert internal logic * Add a dict for test status values in TestRail * Add an explicit return of function * Fix an error in option name --- README.md | 6 ++- README.rst | 4 +- pytest_testrail/conftest.py | 9 ++++- pytest_testrail/plugin.py | 62 ++++++++++++++++++++++------ tests/test_plugin.py | 80 +++++++++++++++++++++++++------------ 5 files changed, 118 insertions(+), 43 deletions(-) diff --git a/README.md b/README.md index 654d83f..483b87b 100644 --- a/README.md +++ b/README.md @@ -106,4 +106,8 @@ py.test --testrail --tr-config=.cfg Do not check for valid SSL certificate on TestRail host --tr-close-on-complete - Close a test plan or test run on completion. \ No newline at end of file + Close a test plan or test run on completion. + --tr-dont-publish-blocked + Do not publish results of "blocked" testcases in + TestRail +``` \ No newline at end of file diff --git a/README.rst b/README.rst index b67829a..1286530 100644 --- a/README.rst +++ b/README.rst @@ -113,7 +113,9 @@ All available options host --tr-close-on-complete Close a test plan or test run on completion. - + --tr-dont-publish-blocked + Do not publish results of "blocked" testcases in + TestRail .. |Build Status| image:: https://travis-ci.org/dubner/pytest-testrail.svg?branch=master :target: https://travis-ci.org/dubner/pytest-testrail diff --git a/pytest_testrail/conftest.py b/pytest_testrail/conftest.py index 5a4094f..411029d 100644 --- a/pytest_testrail/conftest.py +++ b/pytest_testrail/conftest.py @@ -80,7 +80,11 @@ def pytest_addoption(parser): default=False, required=False, help='Close a test run on completion') - + group.addoption( + '--tr-dont-publish-blocked', + action='store_true', + required=False, + help='Determine if results of "blocked" testcases (in TestRail) are published or not') def pytest_configure(config): if config.getoption('--testrail'): @@ -101,7 +105,8 @@ def pytest_configure(config): run_id=config.getoption('--tr-run-id'), plan_id=config.getoption('--tr-plan-id'), version=config.getoption('--tr-version'), - close_on_complete=config.getoption('--tr-close-on-complete') + close_on_complete=config.getoption('--tr-close-on-complete'), + publish_blocked=config.getoption('--tr-dont-publish-blocked') ), # Name of plugin instance (allow to be used by other plugins) name="pytest-testrail-instance" diff --git a/pytest_testrail/plugin.py b/pytest_testrail/plugin.py index 71b693a..1d05281 100644 --- a/pytest_testrail/plugin.py +++ b/pytest_testrail/plugin.py @@ -6,11 +6,19 @@ import re import warnings +# Reference: http://docs.gurock.com/testrail-api2/reference-statuses +TESTRAIL_TEST_STATUS = { + "passed": 1, + "blocked": 2, + "untested": 3, + "retest": 4, + "failed": 5 +} PYTEST_TO_TESTRAIL_STATUS = { - "passed": 1, - "failed": 5, - "skipped": 2, + "passed": TESTRAIL_TEST_STATUS["passed"], + "failed": TESTRAIL_TEST_STATUS["failed"], + "skipped": TESTRAIL_TEST_STATUS["blocked"], } DT_FORMAT = '%d-%m-%Y %H:%M:%S' @@ -23,6 +31,7 @@ CLOSE_TESTPLAN_URL = 'close_plan/{}' GET_TESTRUN_URL = 'get_run/{}' GET_TESTPLAN_URL = 'get_plan/{}' +GET_TESTS_URL = 'get_tests/{}' COMMENT_SIZE_LIMIT = 4000 @@ -106,7 +115,7 @@ def get_testrail_keys(items): class PyTestRailPlugin(object): def __init__(self, client, assign_user_id, project_id, suite_id, cert_check, tr_name, run_id=0, plan_id=0, - version='', close_on_complete=False): + version='', close_on_complete=False, publish_blocked=True): self.assign_user_id = assign_user_id self.cert_check = cert_check self.client = client @@ -118,7 +127,8 @@ def __init__(self, client, assign_user_id, project_id, suite_id, cert_check, tr_ self.testplan_id = plan_id self.version = version self.close_on_complete = close_on_complete - + self.publish_blocked = publish_blocked + # pytest hooks def pytest_report_header(self, config, startdir): @@ -184,13 +194,13 @@ def pytest_sessionfinish(self, session, exitstatus): self.add_results(testrun_id) else: print('[{}] No data published'.format(TESTRAIL_PREFIX)) - + if self.close_on_complete and self.testrun_id: self.close_test_run(self.testrun_id) elif self.close_on_complete and self.testplan_id: self.close_test_plan(self.testplan_id) print('[{}] End publishing'.format(TESTRAIL_PREFIX)) - + # plugin def add_result(self, test_ids, status, comment='', duration=0): @@ -222,6 +232,17 @@ def add_results(self, testrun_id): self.results.sort(key=itemgetter('status_id')) self.results.sort(key=itemgetter('case_id')) + # Manage case of "blocked" testcases + if self.publish_blocked is False: + print('[{}] Option "Don\'t publish blocked testcases" activated'.format(TESTRAIL_PREFIX)) + blocked_tests_list = [ + test.get('case_id') for test in self.get_tests(testrun_id) + if test.get('status_id') == TESTRAIL_TEST_STATUS["blocked"] + ] + print('[{}] Blocked testcases excluded: {}'.format(TESTRAIL_PREFIX, + ', '.join(str(elt) for elt in blocked_tests_list))) + self.results = [result for result in self.results if result.get('case_id') not in blocked_tests_list] + # Publish results for result in self.results: data = {'status_id': result['status_id']} @@ -276,8 +297,8 @@ def create_test_run( print('[{}] New testrun created with name "{}" and ID={}'.format(TESTRAIL_PREFIX, testrun_name, self.testrun_id)) - - + + def close_test_run(self, testrun_id): """ Closes testrun. @@ -293,8 +314,8 @@ def close_test_run(self, testrun_id): print('[{}] Failed to close test run: "{}"'.format(TESTRAIL_PREFIX, error)) else: print('[{}] Test run with ID={} was closed'.format(TESTRAIL_PREFIX, self.testrun_id)) - - + + def close_test_plan(self, testplan_id): """ Closes testrun. @@ -310,8 +331,8 @@ def close_test_plan(self, testplan_id): print('[{}] Failed to close test plan: "{}"'.format(TESTRAIL_PREFIX, error)) else: print('[{}] Test plan with ID={} was closed'.format(TESTRAIL_PREFIX, self.testplan_id)) - - + + def is_testrun_available(self): """ Ask if testrun is available in TestRail. @@ -365,3 +386,18 @@ def get_available_testruns(self, plan_id): if not run['is_completed']: testruns_list.append(run['id']) return testruns_list + + def get_tests(self, run_id): + """ + :return: the list of tests containing in a testrun. + + """ + response = self.client.send_get( + GET_TESTS_URL.format(run_id), + cert_check=self.cert_check + ) + error = self.client.get_error(response) + if error: + print('[{}] Failed to get tests: "{}"'.format(TESTRAIL_PREFIX, error)) + return None + return response diff --git a/tests/test_plugin.py b/tests/test_plugin.py index f2e2c36..e560d86 100644 --- a/tests/test_plugin.py +++ b/tests/test_plugin.py @@ -5,7 +5,7 @@ import pytest from pytest_testrail import plugin -from pytest_testrail.plugin import PyTestRailPlugin +from pytest_testrail.plugin import PyTestRailPlugin, TESTRAIL_TEST_STATUS from pytest_testrail.testrail_api import APIClient @@ -95,18 +95,19 @@ def test_get_testrail_keys(pytest_test_items, testdir): def test_add_result(tr_plugin): - tr_plugin.add_result([1, 2], 3, comment='ERROR!', duration=3600) + status = TESTRAIL_TEST_STATUS["passed"] + tr_plugin.add_result([1, 2], status, comment='ERROR!', duration=3600) expected_results = [ { 'case_id': 1, - 'status_id': 3, + 'status_id': status, 'comment': "ERROR!", 'duration': 3600 }, { 'case_id': 2, - 'status_id': 3, + 'status_id': status, 'comment': "ERROR!", 'duration': 3600 } @@ -144,13 +145,13 @@ def get_result(self): expected_results = [ { 'case_id': 1234, - 'status_id': 5, + 'status_id': TESTRAIL_TEST_STATUS["failed"], 'comment': "An error", 'duration': 2 }, { 'case_id': 5678, - 'status_id': 5, + 'status_id': TESTRAIL_TEST_STATUS["failed"], 'comment': "An error", 'duration': 2 } @@ -160,40 +161,40 @@ def get_result(self): def test_pytest_sessionfinish(api_client, tr_plugin): tr_plugin.results = [ - {'case_id': 1234, 'status_id': 5, 'duration': 2.6}, - {'case_id': 5678, 'status_id': 2, 'comment': "An error", 'duration': 0.1}, - {'case_id': 1234, 'status_id': 1, 'duration': 2.6} + {'case_id': 1234, 'status_id': TESTRAIL_TEST_STATUS["failed"], 'duration': 2.6}, + {'case_id': 5678, 'status_id': TESTRAIL_TEST_STATUS["blocked"], 'comment': "An error", 'duration': 0.1}, + {'case_id': 1234, 'status_id': TESTRAIL_TEST_STATUS["passed"], 'duration': 2.6} ] tr_plugin.testrun_id = 10 tr_plugin.pytest_sessionfinish(None, 0) - expected_uri = plugin.ADD_RESULT_URL.format(10, 1234) - expected_data = {'status_id': 1, 'version': '1.0.0.0', 'elapsed': '3s'} + expected_uri = plugin.ADD_RESULT_URL.format(tr_plugin.testrun_id, 1234) + expected_data = {'status_id': TESTRAIL_TEST_STATUS["passed"], 'version': '1.0.0.0', 'elapsed': '3s'} api_client.send_post.call_args_list[0] == call(expected_uri, expected_data, cert_check=True) - expected_uri = plugin.ADD_RESULT_URL.format(10, 1234) - expected_data = {'status_id': 5, 'version': '1.0.0.0', 'elapsed': '3s'} + expected_uri = plugin.ADD_RESULT_URL.format(tr_plugin.testrun_id, 1234) + expected_data = {'status_id': TESTRAIL_TEST_STATUS["failed"], 'version': '1.0.0.0', 'elapsed': '3s'} api_client.send_post.call_args_list[1] == call(expected_uri, expected_data, cert_check=True) - expected_uri = plugin.ADD_RESULT_URL.format(10, 5678) - expected_data = {'status_id': 2, 'version': '1.0.0.0', 'elapsed': '1s', + expected_uri = plugin.ADD_RESULT_URL.format(tr_plugin.testrun_id, 5678) + expected_data = {'status_id': TESTRAIL_TEST_STATUS["blocked"], 'version': '1.0.0.0', 'elapsed': '1s', 'comment': "# Pytest result: #\n An error"} api_client.send_post.call_args_list[2] == call(expected_uri, expected_data, cert_check=True) def test_pytest_sessionfinish_testplan(api_client, tr_plugin): tr_plugin.results = [ - {'case_id': 5678, 'status_id': 2, 'comment': "An error", 'duration': 0.1}, - {'case_id': 1234, 'status_id': 1, 'duration': 2.6} + {'case_id': 5678, 'status_id': TESTRAIL_TEST_STATUS["blocked"], 'comment': "An error", 'duration': 0.1}, + {'case_id': 1234, 'status_id': TESTRAIL_TEST_STATUS["passed"], 'duration': 2.6} ] tr_plugin.testplan_id = 100 tr_plugin.testrun_id = 0 api_client.send_get.return_value = TESTPLAN tr_plugin.pytest_sessionfinish(None, 0) - expected_data_1234 = {'status_id': 1, 'version': '1.0.0.0', 'elapsed': '3s'} - expected_data_5678 = {'status_id': 2, 'version': '1.0.0.0', 'elapsed': '1s', + expected_data_1234 = {'status_id': TESTRAIL_TEST_STATUS["passed"], 'version': '1.0.0.0', 'elapsed': '3s'} + expected_data_5678 = {'status_id': TESTRAIL_TEST_STATUS["blocked"], 'version': '1.0.0.0', 'elapsed': '1s', 'comment': "# Pytest result: #\n An error"} print(api_client.send_post.call_args_list) @@ -263,9 +264,9 @@ def test_get_available_testruns(api_client, tr_plugin): def test_close_test_run(api_client, tr_plugin): tr_plugin.results = [ - {'case_id': 1234, 'status_id': 5, 'duration': 2.6}, - {'case_id': 5678, 'status_id': 2, 'comment': "An error", 'duration': 0.1}, - {'case_id': 1234, 'status_id': 1, 'duration': 2.6} + {'case_id': 1234, 'status_id': TESTRAIL_TEST_STATUS["failed"], 'duration': 2.6}, + {'case_id': 5678, 'status_id': TESTRAIL_TEST_STATUS["blocked"], 'comment': "An error", 'duration': 0.1}, + {'case_id': 1234, 'status_id': TESTRAIL_TEST_STATUS["passed"], 'duration': 2.6} ] tr_plugin.testrun_id = 10 tr_plugin.close_on_complete = True @@ -277,16 +278,43 @@ def test_close_test_run(api_client, tr_plugin): def test_close_test_plan(api_client, tr_plugin): tr_plugin.results = [ - {'case_id': 5678, 'status_id': 2, 'comment': "An error", 'duration': 0.1}, - {'case_id': 1234, 'status_id': 1, 'duration': 2.6} + {'case_id': 5678, 'status_id': TESTRAIL_TEST_STATUS["blocked"], 'comment': "An error", 'duration': 0.1}, + {'case_id': 1234, 'status_id': TESTRAIL_TEST_STATUS["passed"], 'duration': 2.6} ] tr_plugin.testplan_id = 100 tr_plugin.testrun_id = 0 tr_plugin.close_on_complete = True - + api_client.send_get.return_value = TESTPLAN tr_plugin.pytest_sessionfinish(None, 0) expected_uri = plugin.CLOSE_TESTPLAN_URL.format(tr_plugin.testplan_id) api_client.send_post.call_args_list[3] = call(expected_uri, {}, cert_check=True) - \ No newline at end of file + + +def test_dont_publish_blocked(api_client): + """ Case: one test is blocked""" + my_plugin = PyTestRailPlugin(api_client, ASSIGN_USER_ID, PROJECT_ID, SUITE_ID, True, TR_NAME, + version='1.0.0.0', + publish_blocked=False + ) + + my_plugin.results = [ + {'case_id': 1234, 'status_id': TESTRAIL_TEST_STATUS["blocked"]}, + {'case_id': 5678, 'status_id': TESTRAIL_TEST_STATUS["passed"]} + ] + my_plugin.testrun_id = 10 + + api_client.send_get.return_value = [ + {'case_id': 1234, 'status_id': TESTRAIL_TEST_STATUS["blocked"]}, + {'case_id': 5678, 'status_id': TESTRAIL_TEST_STATUS["passed"]} + ] + + my_plugin.pytest_sessionfinish(None, 0) + + api_client.send_get.assert_called_once_with(plugin.GET_TESTS_URL.format(my_plugin.testrun_id), + cert_check=True) + expected_uri = plugin.ADD_RESULT_URL.format(my_plugin.testrun_id, 1234) + expected_data = {'status_id': TESTRAIL_TEST_STATUS["blocked"], 'version': '1.0.0.0'} + len(api_client.send_post.call_args_list) == 1 + api_client.send_post.call_args_list[0] == call(expected_uri, expected_data, cert_check=True)