__init__.py 3.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116
  1. import os
  2. import warnings
  3. import py
  4. # we know this bit is bad, but we cant help it with the current pytest setup
  5. from _pytest import runner
  6. import pytest
  7. # copied from xdist remote
  8. def serialize_report(rep):
  9. import py
  10. d = rep.__dict__.copy()
  11. if hasattr(rep.longrepr, 'toterminal'):
  12. d['longrepr'] = str(rep.longrepr)
  13. else:
  14. d['longrepr'] = rep.longrepr
  15. for name in d:
  16. if isinstance(d[name], py.path.local):
  17. d[name] = str(d[name])
  18. elif name == "result":
  19. d[name] = None # for now
  20. return d
  21. def pytest_addoption(parser):
  22. group = parser.getgroup("forked", "forked subprocess test execution")
  23. group.addoption(
  24. '--forked',
  25. action="store_true", dest="forked", default=False,
  26. help="box each test run in a separate process (unix)")
  27. def pytest_load_initial_conftests(early_config, parser, args):
  28. early_config.addinivalue_line(
  29. "markers",
  30. "forked: Always fork for this test.",
  31. )
  32. @pytest.hookimpl(tryfirst=True)
  33. def pytest_runtest_protocol(item):
  34. if item.config.getvalue("forked") or item.get_closest_marker("forked"):
  35. ihook = item.ihook
  36. ihook.pytest_runtest_logstart(nodeid=item.nodeid, location=item.location)
  37. reports = forked_run_report(item)
  38. for rep in reports:
  39. ihook.pytest_runtest_logreport(report=rep)
  40. ihook.pytest_runtest_logfinish(nodeid=item.nodeid, location=item.location)
  41. return True
  42. def forked_run_report(item):
  43. # for now, we run setup/teardown in the subprocess
  44. # XXX optionally allow sharing of setup/teardown
  45. from _pytest.runner import runtestprotocol
  46. EXITSTATUS_TESTEXIT = 4
  47. import marshal
  48. def runforked():
  49. try:
  50. reports = runtestprotocol(item, log=False)
  51. except KeyboardInterrupt:
  52. os._exit(EXITSTATUS_TESTEXIT)
  53. return marshal.dumps([serialize_report(x) for x in reports])
  54. ff = py.process.ForkedFunc(runforked)
  55. result = ff.waitfinish()
  56. if result.retval is not None:
  57. report_dumps = marshal.loads(result.retval)
  58. return [runner.TestReport(**x) for x in report_dumps]
  59. else:
  60. if result.exitstatus == EXITSTATUS_TESTEXIT:
  61. pytest.exit("forked test item %s raised Exit" % (item,))
  62. return [report_process_crash(item, result)]
  63. def report_process_crash(item, result):
  64. from _pytest._code import getfslineno
  65. path, lineno = getfslineno(item)
  66. info = ("%s:%s: running the test CRASHED with signal %d" %
  67. (path, lineno, result.signal))
  68. from _pytest import runner
  69. # pytest >= 4.1
  70. has_from_call = getattr(runner.CallInfo, "from_call", None) is not None
  71. if has_from_call:
  72. call = runner.CallInfo.from_call(lambda: 0/0, "???")
  73. else:
  74. call = runner.CallInfo(lambda: 0/0, "???")
  75. call.excinfo = info
  76. rep = runner.pytest_runtest_makereport(item, call)
  77. if result.out:
  78. rep.sections.append(("captured stdout", result.out))
  79. if result.err:
  80. rep.sections.append(("captured stderr", result.err))
  81. xfail_marker = item.get_closest_marker('xfail')
  82. if not xfail_marker:
  83. return rep
  84. rep.outcome = "skipped"
  85. rep.wasxfail = (
  86. "reason: {xfail_reason}; "
  87. "pytest-forked reason: {crash_info}".
  88. format(
  89. xfail_reason=xfail_marker.kwargs['reason'],
  90. crash_info=info,
  91. )
  92. )
  93. warnings.warn(
  94. 'pytest-forked xfail support is incomplete at the moment and may '
  95. 'output a misleading reason message',
  96. RuntimeWarning,
  97. )
  98. return rep