Hot-keys on this page
r m x p toggle line displays
j k next/prev highlighted chunk
0 (zero) top of page
1 (one) first highlighted chunk
1# Authors: Sylvain MARIE <sylvain.marie@se.com>
2# + All contributors to <https://github.com/smarie/python-pytest-steps>
3#
4# License: 3-clause BSD, <https://github.com/smarie/python-pytest-steps/blob/master/LICENSE>
6from sys import version_info
8try: # python 3.2+
9 from functools import lru_cache
10except ImportError:
11 from functools32 import lru_cache
13try: # python 3.3+
14 from inspect import signature, Parameter
15except ImportError:
16 from funcsigs import signature, Parameter
18from inspect import getmodule
19from makefun import wraps, add_signature_parameters, with_signature
21import pytest
22from .steps_common import create_pytest_param_str_id, get_fixture_or_param_value, get_pytest_node_hash_id
25class StepsDataHolder:
26 """
27 An object that is passed along the various steps of your tests.
28 You can put intermediate results in here, and find them in the following steps.
30 Note: you can use `vars(results)` to see the available results.
31 """
32 pass
35STEP_SUCCESS_FIELD = "__test_step_successful_for__"
38def get_parametrize_decorator(steps, steps_data_holder_name, test_step_argname):
39 """
40 Subroutine of `pytest_steps` used to perform the test function parametrization when test step mode is 'parametrize'.
41 See `pytest_steps` for details.
43 :param steps:
44 :param steps_data_holder_name:
45 :param test_step_argname:
46 :return:
47 """
48 def steps_decorator(test_func):
49 """
50 The generated test function decorator.
52 It is equivalent to @mark.parametrize('case_data', cases) where cases is a tuple containing a CaseDataGetter for
53 all case generator functions
55 :param test_func:
56 :return:
57 """
59 # Step ids
60 step_ids = [create_pytest_param_str_id(f) for f in steps]
62 # Depending on the presence of steps_data_holder_name in signature, create a cached fixture for steps data
63 s = signature(test_func)
64 if steps_data_holder_name in s.parameters:
65 # the user wishes to share results across test steps. Create a cached fixture
66 @lru_cache(maxsize=None)
67 def get_results_holder(**kwargs):
68 """
69 A factory for the StepsDataHolder objects. Since it uses @lru_cache, the same StepsDataHolder will be
70 returned when the keyword arguments are the same.
72 :param kwargs:
73 :return:
74 """
75 return StepsDataHolder() # TODO use Munch or MaxiMunch from `mixture` project, when publicly available?
77 def results(request):
78 """
79 The fixture for the StepsDataHolder.
81 It is function-scoped (so oit is called for each step of each param combination)
82 but it implements an intelligent cache so that the same StepsDataHolder object is returned across all
83 test steps belonging to the same param combination.
85 :param request:
86 :return:
87 """
88 # Get a good unique identifier of the test.
89 # The id should be different everytime anything changes, except when the test step changes
90 # Note: when the id was using not only param values but also fixture values we had to discard
91 # steps_data_holder_name and 'request'. But that's not the case anymore,simply discard "test step" param
92 test_id = get_pytest_node_hash_id(request.node, params_to_ignore={test_step_argname})
94 # Get or create the cached Result holder for this combination of parameters
95 return get_results_holder(id=test_id)
97 # Create a fixture with custom name : this seems to work also for old pytest versions
98 results.__name__ = steps_data_holder_name
99 results = pytest.fixture(results)
101 # Add the fixture dynamically: we have to add it to the function holder module as explained in
102 # https://github.com/pytest-dev/pytest/issues/2424
103 module = getmodule(test_func)
104 if steps_data_holder_name not in dir(module): 104 ↛ 107line 104 didn't jump to line 107, because the condition on line 104 was never false
105 setattr(module, steps_data_holder_name, results)
106 else:
107 raise ValueError("The {} fixture already exists in module {}: please specify a different "
108 "`steps_data_holder_name` in `@test_steps`".format(steps_data_holder_name, module))
110 # Parametrize the function with the test steps
111 parametrizer = pytest.mark.parametrize(test_step_argname, steps, ids=step_ids)
113 # We will expose a new signature with additional 'request' arguments if needed
114 orig_sig = signature(test_func)
115 func_needs_request = 'request' in orig_sig.parameters
116 if not func_needs_request:
117 # add request parameter last, as first may be 'self'
118 new_sig = add_signature_parameters(orig_sig, last=Parameter('request',
119 kind=Parameter.POSITIONAL_OR_KEYWORD))
120 else:
121 new_sig = orig_sig
123 # Finally, if there are some steps that are marked as having a dependency,
124 use_dependency = any(hasattr(step, DEPENDS_ON_FIELD) for step in steps)
125 if not use_dependency:
126 # no dependencies: no need to do complex things
127 # Create a light function wrapper that will allow for manual execution
128 @wraps(test_func, new_sig=new_sig)
129 def wrapped_test_function(*args, **kwargs):
130 request = kwargs['request'] if func_needs_request else kwargs.pop('request')
131 if request is None:
132 # manual call (maybe for pre-loading?), ability to execute several steps
133 _execute_manually(test_func, s, test_step_argname, step_ids, steps, args, kwargs)
134 else:
135 return test_func(*args, **kwargs)
136 else:
137 # Create a test function wrapper that will replace the test steps with monitored ones before injecting them
138 @wraps(test_func, new_sig=new_sig)
139 def wrapped_test_function(*args, **kwargs):
140 """Executes the current step only if its dependencies are correct, and registers its execution result"""
141 request = kwargs['request'] if func_needs_request else kwargs.pop('request')
142 if request is None:
143 # manual call (maybe for pre-loading?), no dependency management, ability to execute several steps
144 _execute_manually(test_func, s, test_step_argname, step_ids, steps, args, kwargs)
145 else:
146 # (a) retrieve the "current step" function
147 current_step_fun = get_fixture_or_param_value(request, test_step_argname)
149 # Get the unique id that is shared between the steps of the same execution
150 # Note: when the id was using not only param values but also fixture values we had to discard
151 # steps_data_holder_name and 'request'. But that's not the case anymore, simply discard "test step"
152 test_id_without_steps = get_pytest_node_hash_id(request.node, params_to_ignore={test_step_argname})
154 # Make sure that it has a field to store its execution success
155 if not hasattr(current_step_fun, STEP_SUCCESS_FIELD): 155 ↛ 160line 155 didn't jump to line 160, because the condition on line 155 was never false
156 # this is a dict where the key is the `test_id_without_steps` and the value is a boolean
157 setattr(current_step_fun, STEP_SUCCESS_FIELD, dict())
159 # (b) skip or fail it if needed
160 dependencies, should_fail = getattr(current_step_fun, DEPENDS_ON_FIELD, ([], False))
161 # -- check that dependencies have all run (execution order is correct)
162 if not all(hasattr(step, STEP_SUCCESS_FIELD) for step in dependencies): 162 ↛ 163line 162 didn't jump to line 163, because the condition on line 162 was never true
163 raise ValueError("Test step {} depends on another step that has not yet been executed. In "
164 "current version the steps execution order is manual, make sure it is correct."
165 "".format(current_step_fun.__name__))
166 # -- check that dependencies all ran with success
167 deps_successess = {step: getattr(step, STEP_SUCCESS_FIELD).get(test_id_without_steps, False)
168 for step in dependencies}
169 failed_deps = [d.__name__ for d, res in deps_successess.items() if res is False]
170 if not all(deps_successess.values()):
171 msg = "This test step depends on other steps, and the following have failed: %s" % failed_deps
172 if should_fail: 172 ↛ 173line 172 didn't jump to line 173, because the condition on line 172 was never true
173 pytest.fail(msg)
174 else:
175 pytest.skip(msg)
177 # (c) execute the test function for this step
178 res = test_func(*args, **kwargs)
180 # (d) declare execution as a success
181 getattr(current_step_fun, STEP_SUCCESS_FIELD)[test_id_without_steps] = True
183 return res
185 # With this hack we will be ordered correctly by pytest https://github.com/pytest-dev/pytest/issues/4429
186 wrapped_test_function.place_as = test_func
188 # finally apply parametrizer
189 wrapped_parametrized_test_function = parametrizer(wrapped_test_function)
190 return wrapped_parametrized_test_function
192 return steps_decorator
195def _execute_manually(test_func, s, test_step_argname, all_step_ids, all_steps, args, kwargs):
196 """
197 Internal utility method to execute all steps of a test function manually
199 :param test_func:
200 :param s:
201 :param test_step_argname:
202 :param all_step_ids:
203 :param all_steps:
204 :param args:
205 :param kwargs:
206 :return:
207 """
208 bound = s.bind(*args, **kwargs)
209 steps_to_run = bound.arguments[test_step_argname]
210 if steps_to_run is None:
211 # print("@test_steps - decorated function '%s' is being called manually. The `%s` parameter is "
212 # "set to None so all steps will be executed in order" % (f, test_step_argname))
213 steps_to_run = all_steps
214 else:
215 # print("@test_steps - decorated function '%s' is being called manually. The `%s` parameter is "
216 # "set to %s so only these steps will be executed in order."
217 # "" % (f, test_step_argname, steps_to_run))
218 if not isinstance(steps_to_run, (list, tuple)):
219 steps_to_run = [steps_to_run]
220 # execute specified steps
221 for step in steps_to_run:
222 try:
223 # if step is in step_ids, replace it with the step object
224 idx = all_step_ids.index(step)
225 step = all_steps[idx]
226 except ValueError:
227 pass
229 # set the step
230 bound.arguments[test_step_argname] = step
232 # execute
233 test_func(*bound.args, **bound.kwargs)
235 return
238DEPENDS_ON_FIELD = '__depends_on__'
239_FAIL_INSTEAD_OF_SKIP_DEFAULT = False
242# Python 3+: load the 'more explicit api' for `test_steps`
243if version_info >= (3, 0): 243 ↛ 247line 243 didn't jump to line 247, because the condition on line 243 was never false
244 new_sig = """(*steps,
245 fail_instead_of_skip: bool = _FAIL_INSTEAD_OF_SKIP_DEFAULT)"""
246else:
247 new_sig = None
250@with_signature(new_sig)
251def depends_on(*steps, **kwargs):
252 """
253 Decorates a test step object so as to automatically mark it as skipped (default) or failed if the dependency
254 has not succeeded.
256 :param steps: a list of test steps that this step depends on. They can be anything, but typically they are non-test
257 (not prefixed with 'test') functions.
258 :param fail_instead_of_skip: if set to True, the test will be marked as failed instead of skipped when the
259 dependencies have not succeeded.
260 :return:
261 """
262 # python 2 compatibility: no keyword arguments can follow an *args.
263 fail_instead_of_skip = kwargs.pop('fail_instead_of_skip', _FAIL_INSTEAD_OF_SKIP_DEFAULT)
264 if len(kwargs) > 0: 264 ↛ 265line 264 didn't jump to line 265, because the condition on line 264 was never true
265 raise ValueError("Invalid argument(s): " + str(kwargs.keys()))
267 def depends_on_decorator(step_func):
268 """
269 The generated test function decorator.
271 :param step_func:
272 :return:
273 """
274 if not callable(step_func): 274 ↛ 275line 274 didn't jump to line 275, because the condition on line 274 was never true
275 raise TypeError("@depends_on can only be used on test steps that are callables")
277 # Remember the dependencies so that @test_steps knows
278 setattr(step_func, DEPENDS_ON_FIELD, (steps, fail_instead_of_skip))
280 return step_func
282 return depends_on_decorator