-
Notifications
You must be signed in to change notification settings - Fork 9
/
conftest.py
473 lines (349 loc) · 17.9 KB
/
conftest.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
# Unless explicitly stated otherwise all files in this repository are licensed under the the Apache License Version 2.0.
# This product includes software developed at Datadog (https://www.datadoghq.com/).
# Copyright 2021 Datadog, Inc.
import json
import os
import time
import types
import pytest
from pytest_jsonreport.plugin import JSONReport
from manifests.parser.core import load as load_manifests
from utils import context
from utils._context._scenarios import scenarios
from utils.tools import logger
from utils.scripts.junit_report import junit_modifyreport
from utils._context.library_version import LibraryVersion
from utils._decorators import released, configure as configure_decorators
from utils.properties_serialization import SetupProperties
# Monkey patch JSON-report plugin to avoid noise in report
JSONReport.pytest_terminal_summary = lambda *args, **kwargs: None
# pytest does not keep a trace of deselected items, so we keep it in a global variable
_deselected_items = []
setup_properties = SetupProperties()
def pytest_addoption(parser):
parser.addoption(
"--scenario", "-S", type=str, action="store", default="DEFAULT", help="Unique identifier of scenario"
)
parser.addoption("--replay", "-R", action="store_true", help="Replay tests based on logs")
parser.addoption("--sleep", action="store_true", help="Startup scenario without launching the tests (keep running)")
parser.addoption(
"--force-execute", "-F", action="append", default=[], help="Item to execute, even if they are skipped"
)
parser.addoption("--scenario-report", action="store_true", help="Produce a report on nodeids and their scenario")
parser.addoption("--force-dd-trace-debug", action="store_true", help="Set DD_TRACE_DEBUG to true")
parser.addoption("--force-dd-iast-debug", action="store_true", help="Set DD_IAST_DEBUG_ENABLED to true")
# Onboarding scenarios mandatory parameters
parser.addoption("--vm-weblog", type=str, action="store", help="Set virtual machine weblog")
parser.addoption("--vm-library", type=str, action="store", help="Set virtual machine library to test")
parser.addoption("--vm-env", type=str, action="store", help="Set virtual machine environment")
parser.addoption("--vm-provider", type=str, action="store", help="Set provider for VMs")
parser.addoption("--vm-only-branch", type=str, action="store", help="Filter to execute only one vm branch")
parser.addoption("--vm-skip-branches", type=str, action="store", help="Filter exclude vm branches")
parser.addoption(
"--vm-default-vms",
type=str,
action="store",
help="True launch vms marked as default, False launch only no default vm. All launch all vms",
default="True",
)
# Docker ssi scenarios
parser.addoption("--ssi-weblog", type=str, action="store", help="Set docker ssi weblog")
parser.addoption("--ssi-library", type=str, action="store", help="Set docker ssi library to test")
parser.addoption("--ssi-base-image", type=str, action="store", help="Set docker ssi base image to build")
parser.addoption("--ssi-arch", type=str, action="store", help="Set docker ssi archictecture of the base image")
parser.addoption(
"--ssi-installable-runtime",
type=str,
action="store",
help="Set the language runtime to install on the docker base image.Empty if we don't want to install any runtime",
)
parser.addoption("--ssi-push-base-images", "-P", action="store_true", help="Push docker ssi base images")
parser.addoption("--ssi-force-build", "-B", action="store_true", help="Force build ssi base images")
# Parametric scenario options
parser.addoption(
"--library",
"-L",
type=str,
action="store",
default="",
help="Library to test (e.g. 'python', 'ruby')",
choices=["cpp", "golang", "dotnet", "java", "nodejs", "php", "python", "ruby"],
)
# report data to feature parity dashboard
parser.addoption(
"--report-run-url", type=str, action="store", default=None, help="URI of the run who produced the report",
)
parser.addoption(
"--report-environment", type=str, action="store", default=None, help="The environment the test is run under",
)
def pytest_configure(config):
if not config.option.force_dd_trace_debug and os.environ.get("SYSTEM_TESTS_FORCE_DD_TRACE_DEBUG") == "true":
config.option.force_dd_trace_debug = True
if not config.option.force_dd_iast_debug and os.environ.get("SYSTEM_TESTS_FORCE_DD_IAST_DEBUG") == "true":
config.option.force_dd_iast_debug = True
# handle options that can be filled by environ
if not config.option.report_environment and "SYSTEM_TESTS_REPORT_ENVIRONMENT" in os.environ:
config.option.report_environment = os.environ["SYSTEM_TESTS_REPORT_ENVIRONMENT"]
if not config.option.report_run_url and "SYSTEM_TESTS_REPORT_RUN_URL" in os.environ:
config.option.report_run_url = os.environ["SYSTEM_TESTS_REPORT_RUN_URL"]
# First of all, we must get the current scenario
for name in dir(scenarios):
if name.upper() == config.option.scenario:
context.scenario = getattr(scenarios, name)
break
if context.scenario is None:
pytest.exit(f"Scenario {config.option.scenario} does not exist", 1)
context.scenario.pytest_configure(config)
if not config.option.replay and not config.option.collectonly:
config.option.json_report_file = f"{context.scenario.host_log_folder}/report.json"
config.option.xmlpath = f"{context.scenario.host_log_folder}/reportJunit.xml"
configure_decorators(config)
# Called at the very begening
def pytest_sessionstart(session):
# get the terminal to allow logging directly in stdout
setattr(logger, "terminal", session.config.pluginmanager.get_plugin("terminalreporter"))
# if only collect tests, do not start the scenario
if not session.config.option.collectonly:
context.scenario.pytest_sessionstart(session)
if session.config.option.sleep:
logger.terminal.write("\n ********************************************************** \n")
logger.terminal.write(" *** .:: Sleep mode activated. Press Ctrl+C to exit ::. *** ")
logger.terminal.write("\n ********************************************************** \n\n")
# called when each test item is collected
def _collect_item_metadata(item):
result = {
"details": None,
"testDeclaration": None,
"features": [marker.kwargs["feature_id"] for marker in item.iter_markers("features")],
}
# get the reason form skip before xfail
markers = [*item.iter_markers("skip"), *item.iter_markers("skipif"), *item.iter_markers("xfail")]
for marker in markers:
skip_reason = _get_skip_reason_from_marker(marker)
if skip_reason is not None:
# if any irrelevant declaration exists, it is the one we need to expose
if skip_reason.startswith("irrelevant"):
result["details"] = skip_reason
# otherwise, we keep the first one we found
elif result["details"] is None:
result["details"] = skip_reason
if result["details"]:
logger.debug(f"{item.nodeid} => {result['details']} => skipped")
if result["details"].startswith("irrelevant"):
result["testDeclaration"] = "irrelevant"
elif result["details"].startswith("flaky"):
result["testDeclaration"] = "flaky"
elif result["details"].startswith("bug"):
result["testDeclaration"] = "bug"
elif result["details"].startswith("missing_feature"):
result["testDeclaration"] = "notImplemented"
elif "got empty parameter set" in result["details"]:
# Case of a test with no parameters. Onboarding: we removed the parameter/machine with excludedBranches
logger.info(f"No parameters found for ${item.nodeid}")
else:
raise ValueError(f"Unexpected test declaration for {item.nodeid} : {result['details']}")
return result
def _get_skip_reason_from_marker(marker):
if marker.name == "skipif":
if all(marker.args):
return marker.kwargs.get("reason", "")
elif marker.name in ("skip", "xfail"):
if len(marker.args): # if un-named arguments are present, the first one is the reason
return marker.args[0]
# otherwise, search in named arguments
return marker.kwargs.get("reason", "")
return None
def pytest_pycollect_makemodule(module_path, parent):
# As now, declaration only works for tracers at module level
library = context.scenario.library.library
manifests = load_manifests()
nodeid = str(module_path.relative_to(module_path.cwd()))
if nodeid in manifests and library in manifests[nodeid]:
declaration = manifests[nodeid][library]
logger.info(f"Manifest declaration found for {nodeid}: {declaration}")
mod: pytest.Module = pytest.Module.from_parent(parent, path=module_path)
if declaration.startswith("irrelevant") or declaration.startswith("flaky"):
mod.add_marker(pytest.mark.skip(reason=declaration))
logger.debug(f"Module {nodeid} is skipped by manifest file because {declaration}")
else:
mod.add_marker(pytest.mark.xfail(reason=declaration))
logger.debug(f"Module {nodeid} is xfailed by manifest file because {declaration}")
return mod
@pytest.hookimpl(tryfirst=True)
def pytest_pycollect_makeitem(collector, name, obj):
if collector.istestclass(obj, name):
if obj is None:
message = f"""{collector.nodeid} is not properly collected.
You may have forgotten to return a value in a decorator like @features"""
raise ValueError(message)
manifest = load_manifests()
nodeid = f"{collector.nodeid}::{name}"
if nodeid in manifest:
declaration = manifest[nodeid]
logger.info(f"Manifest declaration found for {nodeid}: {declaration}")
try:
released(**declaration)(obj)
except Exception as e:
raise ValueError(f"Unexpected error for {nodeid}.") from e
def pytest_collection_modifyitems(session, config, items: list[pytest.Item]):
"""unselect items that are not included in the current scenario"""
logger.debug("pytest_collection_modifyitems")
selected = []
deselected = []
declared_scenarios = {}
def iter_markers(self, name=None):
return (x[1] for x in self.iter_markers_with_node(name=name) if x[1].name not in ("skip", "skipif", "xfail"))
for item in items:
scenario_markers = list(item.iter_markers("scenario"))
declared_scenario = scenario_markers[0].args[0] if len(scenario_markers) != 0 else "DEFAULT"
declared_scenarios[item.nodeid] = declared_scenario
# If we are running scenario with the option sleep, we deselect all
if session.config.option.sleep:
deselected.append(item)
continue
if context.scenario.is_part_of(declared_scenario):
logger.info(f"{item.nodeid} is included in {context.scenario}")
selected.append(item)
for forced in config.option.force_execute:
if item.nodeid.startswith(forced):
logger.info(f"{item.nodeid} is normally skipped, but forced thanks to -F {forced}")
# when user specified a test to be forced, we need to run it if it is skipped/xfailed, but also
# if any of it's parent is marked as skipped/xfailed. The trick is to monkey path the
# iter_markers method (this method is used by pytest internally to get all markers of a test item,
# including parent's markers) to exclude the skip, skipif and xfail markers.
item.iter_markers = types.MethodType(iter_markers, item)
else:
logger.debug(f"{item.nodeid} is not included in {context.scenario}")
deselected.append(item)
items[:] = selected
config.hook.pytest_deselected(items=deselected)
if config.option.scenario_report:
with open(f"{context.scenario.host_log_folder}/scenarios.json", "w", encoding="utf-8") as f:
json.dump(declared_scenarios, f, indent=2)
def pytest_deselected(items):
_deselected_items.extend(items)
def _item_is_skipped(item):
return any(item.iter_markers("skip"))
def pytest_collection_finish(session: pytest.Session):
if session.config.option.collectonly:
return
if session.config.option.sleep: # on this mode, we simply sleep, not running any test or setup
try:
while True:
time.sleep(1)
except KeyboardInterrupt: # catching ctrl+C
context.scenario.close_targets()
return
except Exception as e:
raise e
if session.config.option.replay:
setup_properties.load(context.scenario.host_log_folder)
last_item_file = ""
for item in session.items:
if _item_is_skipped(item):
continue
if not item.instance: # item is a method bounded to a class
continue
# the test method name is like test_xxxx
# we replace the test_ by setup_, and call it if it exists
setup_method_name = f"setup_{item.name[5:]}"
if not hasattr(item.instance, setup_method_name):
continue
item_file = item.nodeid.split(":", 1)[0]
if last_item_file != item_file:
if len(last_item_file) == 0:
logger.terminal.write_sep("-", "tests setup", bold=True)
logger.terminal.write(f"\n{item_file} ")
last_item_file = item_file
setup_method = getattr(item.instance, setup_method_name)
try:
if session.config.option.replay:
logger.debug(f"Restore properties of {setup_method} for {item}")
setup_properties.restore_properties(item)
else:
logger.debug(f"Call {setup_method} for {item}")
setup_method()
setup_properties.store_properties(item)
except Exception:
logger.exception("Unexpected failure during setup method call")
logger.terminal.write("x", bold=True, red=True)
context.scenario.close_targets()
raise
else:
logger.terminal.write(".", bold=True, green=True)
logger.terminal.write("\n\n")
if not session.config.option.replay:
setup_properties.dump(context.scenario.host_log_folder)
context.scenario.post_setup()
def pytest_runtest_call(item):
# add a log line for each request made by the setup, to help debugging
setup_properties.log_requests(item)
@pytest.hookimpl(optionalhook=True)
def pytest_json_runtest_metadata(item, call):
if call.when != "setup":
return {}
return _collect_item_metadata(item)
def pytest_json_modifyreport(json_report):
try:
# add usefull data for reporting
json_report["context"] = context.serialize()
logger.debug("Modifying JSON report finished")
except:
logger.error("Fail to modify json report", exc_info=True)
def pytest_sessionfinish(session, exitstatus):
logger.info("Executing pytest_sessionfinish")
context.scenario.close_targets()
if session.config.option.collectonly or session.config.option.replay:
return
# xdist: pytest_sessionfinish function runs at the end of all tests. If you check for the worker input attribute,
# it will run in the master thread after all other processes have finished testing
if context.scenario.is_main_worker:
with open(f"{context.scenario.host_log_folder}/known_versions.json", "w", encoding="utf-8") as f:
json.dump(
{library: sorted(versions) for library, versions in LibraryVersion.known_versions.items()}, f, indent=2,
)
data = session.config._json_report.report # pylint: disable=protected-access
try:
junit_modifyreport(
data, session.config.option.xmlpath, junit_properties=context.scenario.get_junit_properties(),
)
export_feature_parity_dashboard(session, data)
except Exception:
logger.exception("Fail to export export reports", exc_info=True)
def export_feature_parity_dashboard(session, data):
tests = [convert_test_to_feature_parity_model(test) for test in data["tests"]]
result = {
"runUrl": session.config.option.report_run_url or "https://github.com/DataDog/system-tests",
"runDate": data["created"],
"environment": session.config.option.report_environment or "local",
"testSource": "systemtests",
"language": context.scenario.library.library,
"variant": context.weblog_variant,
"testedDependencies": [
{"name": name, "version": str(version)} for name, version in context.scenario.components.items()
],
"configuration": context.configuration,
"scenario": context.scenario.name,
"tests": [test for test in tests if test is not None],
}
context.scenario.customize_feature_parity_dashboard(result)
with open(f"{context.scenario.host_log_folder}/feature_parity.json", "w", encoding="utf-8") as f:
json.dump(result, f, indent=2)
def convert_test_to_feature_parity_model(test):
result = {
"path": test["nodeid"],
"lineNumber": test["lineno"],
"outcome": test["outcome"],
"testDeclaration": test["metadata"]["testDeclaration"],
"details": test["metadata"]["details"],
"features": test["metadata"]["features"],
}
# exclude features.not_reported
return result if -1 not in result["features"] else None
## Fixtures corners
@pytest.fixture(scope="session", name="session")
def fixture_session(request):
return request.session
@pytest.fixture(scope="session", name="deselected_items")
def fixture_deselected_items():
return _deselected_items