Skip to content

Commit

Permalink
New option that prevents publishing blocked tests (allankp#44)
Browse files Browse the repository at this point in the history
* Manage existing test run or test plan

* Fill the test failure comment and duration with information from test runner

* Log only last error content

* Add log

* Add log

* Fix: In case of 'parametrize', the test status reflects the last test performed (should reflect all the tests status)
Now, worst results are published at the end (sort by status)

* Manage existing testrun testplan (allankp#3)

Synchronization with upstream

* Merge branch 'fill-comment-in-test-result'

* Synchro with upstream

* Synchro with upstream

* Write result except if last status is "Blocked" in TestRail

* Change command line option "--tr-dont-publish-blocked" + update doc

* Invert internal logic

* Add a dict for test status values in TestRail

* Add an explicit return of function

* Fix an error in option name
  • Loading branch information
apallier authored and allankp committed Apr 12, 2018
1 parent 682d1ac commit a475cd7
Show file tree
Hide file tree
Showing 5 changed files with 118 additions and 43 deletions.
6 changes: 5 additions & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -106,4 +106,8 @@ py.test --testrail --tr-config=<settings file>.cfg
Do not check for valid SSL certificate on TestRail
host
--tr-close-on-complete
Close a test plan or test run on completion.
Close a test plan or test run on completion.
--tr-dont-publish-blocked
Do not publish results of "blocked" testcases in
TestRail
```
4 changes: 3 additions & 1 deletion README.rst
Original file line number Diff line number Diff line change
Expand Up @@ -113,7 +113,9 @@ All available options
host
--tr-close-on-complete
Close a test plan or test run on completion.

--tr-dont-publish-blocked
Do not publish results of "blocked" testcases in
TestRail

.. |Build Status| image:: https://travis-ci.org/dubner/pytest-testrail.svg?branch=master
:target: https://travis-ci.org/dubner/pytest-testrail
9 changes: 7 additions & 2 deletions pytest_testrail/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -80,7 +80,11 @@ def pytest_addoption(parser):
default=False,
required=False,
help='Close a test run on completion')

group.addoption(
'--tr-dont-publish-blocked',
action='store_true',
required=False,
help='Determine if results of "blocked" testcases (in TestRail) are published or not')

def pytest_configure(config):
if config.getoption('--testrail'):
Expand All @@ -101,7 +105,8 @@ def pytest_configure(config):
run_id=config.getoption('--tr-run-id'),
plan_id=config.getoption('--tr-plan-id'),
version=config.getoption('--tr-version'),
close_on_complete=config.getoption('--tr-close-on-complete')
close_on_complete=config.getoption('--tr-close-on-complete'),
publish_blocked=config.getoption('--tr-dont-publish-blocked')
),
# Name of plugin instance (allow to be used by other plugins)
name="pytest-testrail-instance"
Expand Down
62 changes: 49 additions & 13 deletions pytest_testrail/plugin.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,11 +6,19 @@
import re
import warnings

# Reference: http://docs.gurock.com/testrail-api2/reference-statuses
TESTRAIL_TEST_STATUS = {
"passed": 1,
"blocked": 2,
"untested": 3,
"retest": 4,
"failed": 5
}

PYTEST_TO_TESTRAIL_STATUS = {
"passed": 1,
"failed": 5,
"skipped": 2,
"passed": TESTRAIL_TEST_STATUS["passed"],
"failed": TESTRAIL_TEST_STATUS["failed"],
"skipped": TESTRAIL_TEST_STATUS["blocked"],
}

DT_FORMAT = '%d-%m-%Y %H:%M:%S'
Expand All @@ -23,6 +31,7 @@
CLOSE_TESTPLAN_URL = 'close_plan/{}'
GET_TESTRUN_URL = 'get_run/{}'
GET_TESTPLAN_URL = 'get_plan/{}'
GET_TESTS_URL = 'get_tests/{}'

COMMENT_SIZE_LIMIT = 4000

Expand Down Expand Up @@ -106,7 +115,7 @@ def get_testrail_keys(items):

class PyTestRailPlugin(object):
def __init__(self, client, assign_user_id, project_id, suite_id, cert_check, tr_name, run_id=0, plan_id=0,
version='', close_on_complete=False):
version='', close_on_complete=False, publish_blocked=True):
self.assign_user_id = assign_user_id
self.cert_check = cert_check
self.client = client
Expand All @@ -118,7 +127,8 @@ def __init__(self, client, assign_user_id, project_id, suite_id, cert_check, tr_
self.testplan_id = plan_id
self.version = version
self.close_on_complete = close_on_complete

self.publish_blocked = publish_blocked

# pytest hooks

def pytest_report_header(self, config, startdir):
Expand Down Expand Up @@ -184,13 +194,13 @@ def pytest_sessionfinish(self, session, exitstatus):
self.add_results(testrun_id)
else:
print('[{}] No data published'.format(TESTRAIL_PREFIX))

if self.close_on_complete and self.testrun_id:
self.close_test_run(self.testrun_id)
elif self.close_on_complete and self.testplan_id:
self.close_test_plan(self.testplan_id)
print('[{}] End publishing'.format(TESTRAIL_PREFIX))

# plugin

def add_result(self, test_ids, status, comment='', duration=0):
Expand Down Expand Up @@ -222,6 +232,17 @@ def add_results(self, testrun_id):
self.results.sort(key=itemgetter('status_id'))
self.results.sort(key=itemgetter('case_id'))

# Manage case of "blocked" testcases
if self.publish_blocked is False:
print('[{}] Option "Don\'t publish blocked testcases" activated'.format(TESTRAIL_PREFIX))
blocked_tests_list = [
test.get('case_id') for test in self.get_tests(testrun_id)
if test.get('status_id') == TESTRAIL_TEST_STATUS["blocked"]
]
print('[{}] Blocked testcases excluded: {}'.format(TESTRAIL_PREFIX,
', '.join(str(elt) for elt in blocked_tests_list)))
self.results = [result for result in self.results if result.get('case_id') not in blocked_tests_list]

# Publish results
for result in self.results:
data = {'status_id': result['status_id']}
Expand Down Expand Up @@ -276,8 +297,8 @@ def create_test_run(
print('[{}] New testrun created with name "{}" and ID={}'.format(TESTRAIL_PREFIX,
testrun_name,
self.testrun_id))


def close_test_run(self, testrun_id):
"""
Closes testrun.
Expand All @@ -293,8 +314,8 @@ def close_test_run(self, testrun_id):
print('[{}] Failed to close test run: "{}"'.format(TESTRAIL_PREFIX, error))
else:
print('[{}] Test run with ID={} was closed'.format(TESTRAIL_PREFIX, self.testrun_id))


def close_test_plan(self, testplan_id):
"""
Closes testrun.
Expand All @@ -310,8 +331,8 @@ def close_test_plan(self, testplan_id):
print('[{}] Failed to close test plan: "{}"'.format(TESTRAIL_PREFIX, error))
else:
print('[{}] Test plan with ID={} was closed'.format(TESTRAIL_PREFIX, self.testplan_id))


def is_testrun_available(self):
"""
Ask if testrun is available in TestRail.
Expand Down Expand Up @@ -365,3 +386,18 @@ def get_available_testruns(self, plan_id):
if not run['is_completed']:
testruns_list.append(run['id'])
return testruns_list

def get_tests(self, run_id):
"""
:return: the list of tests containing in a testrun.
"""
response = self.client.send_get(
GET_TESTS_URL.format(run_id),
cert_check=self.cert_check
)
error = self.client.get_error(response)
if error:
print('[{}] Failed to get tests: "{}"'.format(TESTRAIL_PREFIX, error))
return None
return response
80 changes: 54 additions & 26 deletions tests/test_plugin.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
import pytest

from pytest_testrail import plugin
from pytest_testrail.plugin import PyTestRailPlugin
from pytest_testrail.plugin import PyTestRailPlugin, TESTRAIL_TEST_STATUS
from pytest_testrail.testrail_api import APIClient


Expand Down Expand Up @@ -95,18 +95,19 @@ def test_get_testrail_keys(pytest_test_items, testdir):


def test_add_result(tr_plugin):
tr_plugin.add_result([1, 2], 3, comment='ERROR!', duration=3600)
status = TESTRAIL_TEST_STATUS["passed"]
tr_plugin.add_result([1, 2], status, comment='ERROR!', duration=3600)

expected_results = [
{
'case_id': 1,
'status_id': 3,
'status_id': status,
'comment': "ERROR!",
'duration': 3600
},
{
'case_id': 2,
'status_id': 3,
'status_id': status,
'comment': "ERROR!",
'duration': 3600
}
Expand Down Expand Up @@ -144,13 +145,13 @@ def get_result(self):
expected_results = [
{
'case_id': 1234,
'status_id': 5,
'status_id': TESTRAIL_TEST_STATUS["failed"],
'comment': "An error",
'duration': 2
},
{
'case_id': 5678,
'status_id': 5,
'status_id': TESTRAIL_TEST_STATUS["failed"],
'comment': "An error",
'duration': 2
}
Expand All @@ -160,40 +161,40 @@ def get_result(self):

def test_pytest_sessionfinish(api_client, tr_plugin):
tr_plugin.results = [
{'case_id': 1234, 'status_id': 5, 'duration': 2.6},
{'case_id': 5678, 'status_id': 2, 'comment': "An error", 'duration': 0.1},
{'case_id': 1234, 'status_id': 1, 'duration': 2.6}
{'case_id': 1234, 'status_id': TESTRAIL_TEST_STATUS["failed"], 'duration': 2.6},
{'case_id': 5678, 'status_id': TESTRAIL_TEST_STATUS["blocked"], 'comment': "An error", 'duration': 0.1},
{'case_id': 1234, 'status_id': TESTRAIL_TEST_STATUS["passed"], 'duration': 2.6}
]
tr_plugin.testrun_id = 10

tr_plugin.pytest_sessionfinish(None, 0)

expected_uri = plugin.ADD_RESULT_URL.format(10, 1234)
expected_data = {'status_id': 1, 'version': '1.0.0.0', 'elapsed': '3s'}
expected_uri = plugin.ADD_RESULT_URL.format(tr_plugin.testrun_id, 1234)
expected_data = {'status_id': TESTRAIL_TEST_STATUS["passed"], 'version': '1.0.0.0', 'elapsed': '3s'}
api_client.send_post.call_args_list[0] == call(expected_uri, expected_data, cert_check=True)

expected_uri = plugin.ADD_RESULT_URL.format(10, 1234)
expected_data = {'status_id': 5, 'version': '1.0.0.0', 'elapsed': '3s'}
expected_uri = plugin.ADD_RESULT_URL.format(tr_plugin.testrun_id, 1234)
expected_data = {'status_id': TESTRAIL_TEST_STATUS["failed"], 'version': '1.0.0.0', 'elapsed': '3s'}
api_client.send_post.call_args_list[1] == call(expected_uri, expected_data, cert_check=True)

expected_uri = plugin.ADD_RESULT_URL.format(10, 5678)
expected_data = {'status_id': 2, 'version': '1.0.0.0', 'elapsed': '1s',
expected_uri = plugin.ADD_RESULT_URL.format(tr_plugin.testrun_id, 5678)
expected_data = {'status_id': TESTRAIL_TEST_STATUS["blocked"], 'version': '1.0.0.0', 'elapsed': '1s',
'comment': "# Pytest result: #\n An error"}
api_client.send_post.call_args_list[2] == call(expected_uri, expected_data, cert_check=True)


def test_pytest_sessionfinish_testplan(api_client, tr_plugin):
tr_plugin.results = [
{'case_id': 5678, 'status_id': 2, 'comment': "An error", 'duration': 0.1},
{'case_id': 1234, 'status_id': 1, 'duration': 2.6}
{'case_id': 5678, 'status_id': TESTRAIL_TEST_STATUS["blocked"], 'comment': "An error", 'duration': 0.1},
{'case_id': 1234, 'status_id': TESTRAIL_TEST_STATUS["passed"], 'duration': 2.6}
]
tr_plugin.testplan_id = 100
tr_plugin.testrun_id = 0

api_client.send_get.return_value = TESTPLAN
tr_plugin.pytest_sessionfinish(None, 0)
expected_data_1234 = {'status_id': 1, 'version': '1.0.0.0', 'elapsed': '3s'}
expected_data_5678 = {'status_id': 2, 'version': '1.0.0.0', 'elapsed': '1s',
expected_data_1234 = {'status_id': TESTRAIL_TEST_STATUS["passed"], 'version': '1.0.0.0', 'elapsed': '3s'}
expected_data_5678 = {'status_id': TESTRAIL_TEST_STATUS["blocked"], 'version': '1.0.0.0', 'elapsed': '1s',
'comment': "# Pytest result: #\n An error"}

print(api_client.send_post.call_args_list)
Expand Down Expand Up @@ -263,9 +264,9 @@ def test_get_available_testruns(api_client, tr_plugin):

def test_close_test_run(api_client, tr_plugin):
tr_plugin.results = [
{'case_id': 1234, 'status_id': 5, 'duration': 2.6},
{'case_id': 5678, 'status_id': 2, 'comment': "An error", 'duration': 0.1},
{'case_id': 1234, 'status_id': 1, 'duration': 2.6}
{'case_id': 1234, 'status_id': TESTRAIL_TEST_STATUS["failed"], 'duration': 2.6},
{'case_id': 5678, 'status_id': TESTRAIL_TEST_STATUS["blocked"], 'comment': "An error", 'duration': 0.1},
{'case_id': 1234, 'status_id': TESTRAIL_TEST_STATUS["passed"], 'duration': 2.6}
]
tr_plugin.testrun_id = 10
tr_plugin.close_on_complete = True
Expand All @@ -277,16 +278,43 @@ def test_close_test_run(api_client, tr_plugin):

def test_close_test_plan(api_client, tr_plugin):
tr_plugin.results = [
{'case_id': 5678, 'status_id': 2, 'comment': "An error", 'duration': 0.1},
{'case_id': 1234, 'status_id': 1, 'duration': 2.6}
{'case_id': 5678, 'status_id': TESTRAIL_TEST_STATUS["blocked"], 'comment': "An error", 'duration': 0.1},
{'case_id': 1234, 'status_id': TESTRAIL_TEST_STATUS["passed"], 'duration': 2.6}
]
tr_plugin.testplan_id = 100
tr_plugin.testrun_id = 0
tr_plugin.close_on_complete = True

api_client.send_get.return_value = TESTPLAN
tr_plugin.pytest_sessionfinish(None, 0)

expected_uri = plugin.CLOSE_TESTPLAN_URL.format(tr_plugin.testplan_id)
api_client.send_post.call_args_list[3] = call(expected_uri, {}, cert_check=True)



def test_dont_publish_blocked(api_client):
""" Case: one test is blocked"""
my_plugin = PyTestRailPlugin(api_client, ASSIGN_USER_ID, PROJECT_ID, SUITE_ID, True, TR_NAME,
version='1.0.0.0',
publish_blocked=False
)

my_plugin.results = [
{'case_id': 1234, 'status_id': TESTRAIL_TEST_STATUS["blocked"]},
{'case_id': 5678, 'status_id': TESTRAIL_TEST_STATUS["passed"]}
]
my_plugin.testrun_id = 10

api_client.send_get.return_value = [
{'case_id': 1234, 'status_id': TESTRAIL_TEST_STATUS["blocked"]},
{'case_id': 5678, 'status_id': TESTRAIL_TEST_STATUS["passed"]}
]

my_plugin.pytest_sessionfinish(None, 0)

api_client.send_get.assert_called_once_with(plugin.GET_TESTS_URL.format(my_plugin.testrun_id),
cert_check=True)
expected_uri = plugin.ADD_RESULT_URL.format(my_plugin.testrun_id, 1234)
expected_data = {'status_id': TESTRAIL_TEST_STATUS["blocked"], 'version': '1.0.0.0'}
len(api_client.send_post.call_args_list) == 1
api_client.send_post.call_args_list[0] == call(expected_uri, expected_data, cert_check=True)

0 comments on commit a475cd7

Please sign in to comment.