Line 0
Link Here
|
|
|
1 |
#!/usr/bin/env python2.3 |
2 |
# |
3 |
# SchoolTool - common information systems platform for school administration |
4 |
# Copyright (c) 2003 Shuttleworth Foundation |
5 |
# |
6 |
# This program is free software; you can redistribute it and/or modify |
7 |
# it under the terms of the GNU General Public License as published by |
8 |
# the Free Software Foundation; either version 2 of the License, or |
9 |
# (at your option) any later version. |
10 |
# |
11 |
# This program is distributed in the hope that it will be useful, |
12 |
# but WITHOUT ANY WARRANTY; without even the implied warranty of |
13 |
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
14 |
# GNU General Public License for more details. |
15 |
# |
16 |
# You should have received a copy of the GNU General Public License |
17 |
# along with this program; if not, write to the Free Software |
18 |
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
19 |
# |
20 |
""" |
21 |
SchoolTool test runner. |
22 |
|
23 |
Syntax: testlxml.py [options] [pathname-regexp [test-regexp]] |
24 |
|
25 |
There are two kinds of tests: |
26 |
- unit tests (or programmer tests) test the internal workings of various |
27 |
components of the system |
28 |
- functional tests (acceptance tests, customer tests) test only externaly |
29 |
visible system behaviour |
30 |
|
31 |
You can choose to run unit tests (this is the default mode), functional tests |
32 |
(by giving a -f option to testlxml.py) or both (by giving both -u and -f options). |
33 |
|
34 |
Test cases are located in the directory tree starting at the location of this |
35 |
script, in subdirectories named 'tests' for unit tests and 'ftests' for |
36 |
functional tests, in Python modules named 'test*.py'. They are then filtered |
37 |
according to pathname and test regexes. Alternatively, packages may just have |
38 |
'tests.py' and 'ftests.py' instead of subpackages 'tests' and 'ftests' |
39 |
respectively. |
40 |
|
41 |
A leading "!" in a regexp is stripped and negates the regexp. Pathname |
42 |
regexp is applied to the whole path (package/package/module.py). Test regexp |
43 |
is applied to a full test id (package.package.module.class.test_method). |
44 |
|
45 |
Options: |
46 |
-h print this help message |
47 |
-v verbose (print dots for each test run) |
48 |
-vv very verbose (print test names) |
49 |
-q quiet (do not print anything on success) |
50 |
-w enable warnings about omitted test cases |
51 |
-p show progress bar (can be combined with -v or -vv) |
52 |
-u select unit tests (default) |
53 |
-f select functional tests |
54 |
--level n select only tests at level n or lower |
55 |
--all-levels select all tests |
56 |
--list-files list all selected test files |
57 |
--list-tests list all selected test cases |
58 |
--list-hooks list all loaded test hooks |
59 |
--coverage create code coverage reports |
60 |
""" |
61 |
# |
62 |
# This script borrows ideas from Zope 3's test runner heavily. It is smaller |
63 |
# and cleaner though, at the expense of more limited functionality. |
64 |
# |
65 |
|
66 |
import re |
67 |
import os |
68 |
import sys |
69 |
import time |
70 |
import types |
71 |
import getopt |
72 |
import unittest |
73 |
import traceback |
74 |
try: |
75 |
set |
76 |
except NameError: |
77 |
from sets import Set as set |
78 |
|
79 |
__metaclass__ = type |
80 |
|
81 |
def stderr(text): |
82 |
sys.stderr.write(text) |
83 |
sys.stderr.write("\n") |
84 |
|
85 |
class Options: |
86 |
"""Configurable properties of the test runner.""" |
87 |
|
88 |
# test location |
89 |
basedir = '' # base directory for tests (defaults to |
90 |
# basedir of argv[0] + 'src'), must be absolute |
91 |
follow_symlinks = True # should symlinks to subdirectories be |
92 |
# followed? (hardcoded, may cause loops) |
93 |
|
94 |
# which tests to run |
95 |
unit_tests = False # unit tests (default if both are false) |
96 |
functional_tests = False # functional tests |
97 |
|
98 |
# test filtering |
99 |
level = 1 # run only tests at this or lower level |
100 |
# (if None, runs all tests) |
101 |
pathname_regex = '' # regexp for filtering filenames |
102 |
test_regex = '' # regexp for filtering test cases |
103 |
|
104 |
# actions to take |
105 |
list_files = False # --list-files |
106 |
list_tests = False # --list-tests |
107 |
list_hooks = False # --list-hooks |
108 |
run_tests = True # run tests (disabled by --list-foo) |
109 |
|
110 |
# output verbosity |
111 |
verbosity = 0 # verbosity level (-v) |
112 |
quiet = 0 # do not print anything on success (-q) |
113 |
warn_omitted = False # produce warnings when a test case is |
114 |
# not included in a test suite (-w) |
115 |
progress = False # show running progress (-p) |
116 |
coverage = False # produce coverage reports (--coverage) |
117 |
coverdir = 'coverage' # where to put them (currently hardcoded) |
118 |
immediate_errors = False # show tracebacks twice (currently hardcoded) |
119 |
screen_width = 80 # screen width (autodetected) |
120 |
|
121 |
|
122 |
def compile_matcher(regex): |
123 |
"""Returns a function that takes one argument and returns True or False. |
124 |
|
125 |
Regex is a regular expression. Empty regex matches everything. There |
126 |
is one expression: if the regex starts with "!", the meaning of it is |
127 |
reversed. |
128 |
""" |
129 |
if not regex: |
130 |
return lambda x: True |
131 |
elif regex == '!': |
132 |
return lambda x: False |
133 |
elif regex.startswith('!'): |
134 |
rx = re.compile(regex[1:]) |
135 |
return lambda x: rx.search(x) is None |
136 |
else: |
137 |
rx = re.compile(regex) |
138 |
return lambda x: rx.search(x) is not None |
139 |
|
140 |
|
141 |
def walk_with_symlinks(top, func, arg): |
142 |
"""Like os.path.walk, but follows symlinks on POSIX systems. |
143 |
|
144 |
If the symlinks create a loop, this function will never finish. |
145 |
""" |
146 |
try: |
147 |
names = os.listdir(top) |
148 |
except os.error: |
149 |
return |
150 |
func(arg, top, names) |
151 |
exceptions = ('.', '..') |
152 |
for name in names: |
153 |
if name not in exceptions: |
154 |
name = os.path.join(top, name) |
155 |
if os.path.isdir(name): |
156 |
walk_with_symlinks(name, func, arg) |
157 |
|
158 |
|
159 |
def get_test_files(cfg): |
160 |
"""Returns a list of test module filenames.""" |
161 |
matcher = compile_matcher(cfg.pathname_regex) |
162 |
results = [] |
163 |
test_names = [] |
164 |
if cfg.unit_tests: |
165 |
test_names.append('tests') |
166 |
if cfg.functional_tests: |
167 |
test_names.append('ftests') |
168 |
baselen = len(cfg.basedir) + 1 |
169 |
def visit(ignored, dir, files): |
170 |
if os.path.basename(dir) not in test_names: |
171 |
for name in test_names: |
172 |
if name + '.py' in files: |
173 |
path = os.path.join(dir, name + '.py') |
174 |
if matcher(path[baselen:]): |
175 |
results.append(path) |
176 |
return |
177 |
if '__init__.py' not in files: |
178 |
stderr("%s is not a package" % dir) |
179 |
return |
180 |
for file in files: |
181 |
if file.startswith('test') and file.endswith('.py'): |
182 |
path = os.path.join(dir, file) |
183 |
if matcher(path[baselen:]): |
184 |
results.append(path) |
185 |
if cfg.follow_symlinks: |
186 |
walker = walk_with_symlinks |
187 |
else: |
188 |
walker = os.path.walk |
189 |
walker(cfg.basedir, visit, None) |
190 |
results.sort() |
191 |
return results |
192 |
|
193 |
|
194 |
def import_module(filename, cfg, tracer=None): |
195 |
"""Imports and returns a module.""" |
196 |
filename = os.path.splitext(filename)[0] |
197 |
modname = filename[len(cfg.basedir):].replace(os.path.sep, '.') |
198 |
if modname.startswith('.'): |
199 |
modname = modname[1:] |
200 |
if tracer is not None: |
201 |
mod = tracer.runfunc(__import__, modname) |
202 |
else: |
203 |
mod = __import__(modname) |
204 |
components = modname.split('.') |
205 |
for comp in components[1:]: |
206 |
mod = getattr(mod, comp) |
207 |
return mod |
208 |
|
209 |
|
210 |
def filter_testsuite(suite, matcher, level=None): |
211 |
"""Returns a flattened list of test cases that match the given matcher.""" |
212 |
if not isinstance(suite, unittest.TestSuite): |
213 |
raise TypeError('not a TestSuite', suite) |
214 |
results = [] |
215 |
for test in suite._tests: |
216 |
if level is not None and getattr(test, 'level', 0) > level: |
217 |
continue |
218 |
if isinstance(test, unittest.TestCase): |
219 |
testname = test.id() # package.module.class.method |
220 |
if matcher(testname): |
221 |
results.append(test) |
222 |
else: |
223 |
filtered = filter_testsuite(test, matcher, level) |
224 |
results.extend(filtered) |
225 |
return results |
226 |
|
227 |
|
228 |
def get_all_test_cases(module): |
229 |
"""Returns a list of all test case classes defined in a given module.""" |
230 |
results = [] |
231 |
for name in dir(module): |
232 |
if not name.startswith('Test'): |
233 |
continue |
234 |
item = getattr(module, name) |
235 |
if (isinstance(item, (type, types.ClassType)) and |
236 |
issubclass(item, unittest.TestCase)): |
237 |
results.append(item) |
238 |
return results |
239 |
|
240 |
|
241 |
def get_test_classes_from_testsuite(suite): |
242 |
"""Returns a set of test case classes used in a test suite.""" |
243 |
if not isinstance(suite, unittest.TestSuite): |
244 |
raise TypeError('not a TestSuite', suite) |
245 |
results = set() |
246 |
for test in suite._tests: |
247 |
if isinstance(test, unittest.TestCase): |
248 |
results.add(test.__class__) |
249 |
else: |
250 |
classes = get_test_classes_from_testsuite(test) |
251 |
results.update(classes) |
252 |
return results |
253 |
|
254 |
|
255 |
def get_test_cases(test_files, cfg, tracer=None): |
256 |
"""Returns a list of test cases from a given list of test modules.""" |
257 |
matcher = compile_matcher(cfg.test_regex) |
258 |
results = [] |
259 |
for file in test_files: |
260 |
module = import_module(file, cfg, tracer=tracer) |
261 |
if tracer is not None: |
262 |
test_suite = tracer.runfunc(module.test_suite) |
263 |
else: |
264 |
test_suite = module.test_suite() |
265 |
if test_suite is None: |
266 |
continue |
267 |
if cfg.warn_omitted: |
268 |
all_classes = set(get_all_test_cases(module)) |
269 |
classes_in_suite = get_test_classes_from_testsuite(test_suite) |
270 |
difference = all_classes - classes_in_suite |
271 |
for test_class in difference: |
272 |
# surround the warning with blank lines, otherwise it tends |
273 |
# to get lost in the noise |
274 |
stderr("\n%s: WARNING: %s not in test suite\n" |
275 |
% (file, test_class.__name__)) |
276 |
if (cfg.level is not None and |
277 |
getattr(test_suite, 'level', 0) > cfg.level): |
278 |
continue |
279 |
filtered = filter_testsuite(test_suite, matcher, cfg.level) |
280 |
results.extend(filtered) |
281 |
return results |
282 |
|
283 |
|
284 |
def get_test_hooks(test_files, cfg, tracer=None): |
285 |
"""Returns a list of test hooks from a given list of test modules.""" |
286 |
results = [] |
287 |
dirs = set(map(os.path.dirname, test_files)) |
288 |
for dir in list(dirs): |
289 |
if os.path.basename(dir) == 'ftests': |
290 |
dirs.add(os.path.join(os.path.dirname(dir), 'tests')) |
291 |
dirs = list(dirs) |
292 |
dirs.sort() |
293 |
for dir in dirs: |
294 |
filename = os.path.join(dir, 'checks.py') |
295 |
if os.path.exists(filename): |
296 |
module = import_module(filename, cfg, tracer=tracer) |
297 |
if tracer is not None: |
298 |
hooks = tracer.runfunc(module.test_hooks) |
299 |
else: |
300 |
hooks = module.test_hooks() |
301 |
results.extend(hooks) |
302 |
return results |
303 |
|
304 |
|
305 |
class CustomTestResult(unittest._TextTestResult): |
306 |
"""Customised TestResult. |
307 |
|
308 |
It can show a progress bar, and displays tracebacks for errors and failures |
309 |
as soon as they happen, in addition to listing them all at the end. |
310 |
""" |
311 |
|
312 |
__super = unittest._TextTestResult |
313 |
__super_init = __super.__init__ |
314 |
__super_startTest = __super.startTest |
315 |
__super_stopTest = __super.stopTest |
316 |
__super_printErrors = __super.printErrors |
317 |
|
318 |
def __init__(self, stream, descriptions, verbosity, count, cfg, hooks): |
319 |
self.__super_init(stream, descriptions, verbosity) |
320 |
self.count = count |
321 |
self.cfg = cfg |
322 |
self.hooks = hooks |
323 |
if cfg.progress: |
324 |
self.dots = False |
325 |
self._lastWidth = 0 |
326 |
self._maxWidth = cfg.screen_width - len("xxxx/xxxx (xxx.x%): ") - 1 |
327 |
|
328 |
def startTest(self, test): |
329 |
if self.cfg.progress: |
330 |
# verbosity == 0: 'xxxx/xxxx (xxx.x%)' |
331 |
# verbosity == 1: 'xxxx/xxxx (xxx.x%): test name' |
332 |
# verbosity >= 2: 'xxxx/xxxx (xxx.x%): test name ... ok' |
333 |
n = self.testsRun + 1 |
334 |
self.stream.write("\r%4d" % n) |
335 |
if self.count: |
336 |
self.stream.write("/%d (%5.1f%%)" |
337 |
% (self.count, n * 100.0 / self.count)) |
338 |
if self.showAll: # self.cfg.verbosity == 1 |
339 |
self.stream.write(": ") |
340 |
elif self.cfg.verbosity: |
341 |
name = self.getShortDescription(test) |
342 |
width = len(name) |
343 |
if width < self._lastWidth: |
344 |
name += " " * (self._lastWidth - width) |
345 |
self.stream.write(": %s" % name) |
346 |
self._lastWidth = width |
347 |
self.stream.flush() |
348 |
self.__super_startTest(test) |
349 |
for hook in self.hooks: |
350 |
hook.startTest(test) |
351 |
|
352 |
def stopTest(self, test): |
353 |
for hook in self.hooks: |
354 |
hook.stopTest(test) |
355 |
self.__super_stopTest(test) |
356 |
|
357 |
def getShortDescription(self, test): |
358 |
s = self.getDescription(test) |
359 |
if len(s) > self._maxWidth: |
360 |
# s is 'testname (package.module.class)' |
361 |
# try to shorten it to 'testname (...age.module.class)' |
362 |
# if it is still too long, shorten it to 'testnam...' |
363 |
# limit case is 'testname (...)' |
364 |
pos = s.find(" (") |
365 |
if pos + len(" (...)") > self._maxWidth: |
366 |
s = s[:self._maxWidth - 3] + "..." |
367 |
else: |
368 |
s = "%s...%s" % (s[:pos + 2], s[pos + 5 - self._maxWidth:]) |
369 |
return s |
370 |
|
371 |
def printErrors(self): |
372 |
if self.cfg.progress and not (self.dots or self.showAll): |
373 |
self.stream.writeln() |
374 |
self.__super_printErrors() |
375 |
|
376 |
def formatError(self, err): |
377 |
return "".join(traceback.format_exception(*err)) |
378 |
|
379 |
def printTraceback(self, kind, test, err): |
380 |
self.stream.writeln() |
381 |
self.stream.writeln() |
382 |
self.stream.writeln("%s: %s" % (kind, test)) |
383 |
self.stream.writeln(self.formatError(err)) |
384 |
self.stream.writeln() |
385 |
|
386 |
def addFailure(self, test, err): |
387 |
if self.cfg.immediate_errors: |
388 |
self.printTraceback("FAIL", test, err) |
389 |
self.failures.append((test, self.formatError(err))) |
390 |
|
391 |
def addError(self, test, err): |
392 |
if self.cfg.immediate_errors: |
393 |
self.printTraceback("ERROR", test, err) |
394 |
self.errors.append((test, self.formatError(err))) |
395 |
|
396 |
|
397 |
class CustomTestRunner(unittest.TextTestRunner): |
398 |
"""Customised TestRunner. |
399 |
|
400 |
See CustomisedTextResult for a list of extensions. |
401 |
""" |
402 |
|
403 |
__super = unittest.TextTestRunner |
404 |
__super_init = __super.__init__ |
405 |
__super_run = __super.run |
406 |
|
407 |
def __init__(self, cfg, hooks=None): |
408 |
self.__super_init(verbosity=cfg.verbosity) |
409 |
self.cfg = cfg |
410 |
if hooks is not None: |
411 |
self.hooks = hooks |
412 |
else: |
413 |
self.hooks = [] |
414 |
|
415 |
def run(self, test): |
416 |
"""Run the given test case or test suite.""" |
417 |
self.count = test.countTestCases() |
418 |
result = self._makeResult() |
419 |
startTime = time.time() |
420 |
test(result) |
421 |
stopTime = time.time() |
422 |
timeTaken = float(stopTime - startTime) |
423 |
result.printErrors() |
424 |
run = result.testsRun |
425 |
if not self.cfg.quiet: |
426 |
self.stream.writeln(result.separator2) |
427 |
self.stream.writeln("Ran %d test%s in %.3fs" % |
428 |
(run, run != 1 and "s" or "", timeTaken)) |
429 |
self.stream.writeln() |
430 |
if not result.wasSuccessful(): |
431 |
self.stream.write("FAILED (") |
432 |
failed, errored = list(map(len, (result.failures, result.errors))) |
433 |
if failed: |
434 |
self.stream.write("failures=%d" % failed) |
435 |
if errored: |
436 |
if failed: self.stream.write(", ") |
437 |
self.stream.write("errors=%d" % errored) |
438 |
self.stream.writeln(")") |
439 |
elif not self.cfg.quiet: |
440 |
self.stream.writeln("OK") |
441 |
return result |
442 |
|
443 |
def _makeResult(self): |
444 |
return CustomTestResult(self.stream, self.descriptions, self.verbosity, |
445 |
cfg=self.cfg, count=self.count, |
446 |
hooks=self.hooks) |
447 |
|
448 |
|
449 |
def main(argv): |
450 |
"""Main program.""" |
451 |
|
452 |
# Environment |
453 |
if sys.version_info < (2, 3): |
454 |
stderr('%s: need Python 2.3 or later' % argv[0]) |
455 |
stderr('your python is %s' % sys.version) |
456 |
return 1 |
457 |
|
458 |
# Defaults |
459 |
cfg = Options() |
460 |
cfg.basedir = os.path.join(os.path.dirname(argv[0]), 'src') |
461 |
cfg.basedir = os.path.abspath(cfg.basedir) |
462 |
|
463 |
# Figure out terminal size |
464 |
try: |
465 |
import curses |
466 |
except ImportError: |
467 |
pass |
468 |
else: |
469 |
try: |
470 |
curses.setupterm() |
471 |
cols = curses.tigetnum('cols') |
472 |
if cols > 0: |
473 |
cfg.screen_width = cols |
474 |
except curses.error: |
475 |
pass |
476 |
|
477 |
# Option processing |
478 |
opts, args = getopt.gnu_getopt(argv[1:], 'hvpqufw', |
479 |
['list-files', 'list-tests', 'list-hooks', |
480 |
'level=', 'all-levels', 'coverage']) |
481 |
for k, v in opts: |
482 |
if k == '-h': |
483 |
print(__doc__) |
484 |
return 0 |
485 |
elif k == '-v': |
486 |
cfg.verbosity += 1 |
487 |
cfg.quiet = False |
488 |
elif k == '-p': |
489 |
cfg.progress = True |
490 |
cfg.quiet = False |
491 |
elif k == '-q': |
492 |
cfg.verbosity = 0 |
493 |
cfg.progress = False |
494 |
cfg.quiet = True |
495 |
elif k == '-u': |
496 |
cfg.unit_tests = True |
497 |
elif k == '-f': |
498 |
cfg.functional_tests = True |
499 |
elif k == '-w': |
500 |
cfg.warn_omitted = True |
501 |
elif k == '--list-files': |
502 |
cfg.list_files = True |
503 |
cfg.run_tests = False |
504 |
elif k == '--list-tests': |
505 |
cfg.list_tests = True |
506 |
cfg.run_tests = False |
507 |
elif k == '--list-hooks': |
508 |
cfg.list_hooks = True |
509 |
cfg.run_tests = False |
510 |
elif k == '--coverage': |
511 |
cfg.coverage = True |
512 |
elif k == '--level': |
513 |
try: |
514 |
cfg.level = int(v) |
515 |
except ValueError: |
516 |
stderr('%s: invalid level: %s' % (argv[0], v)) |
517 |
stderr('run %s -h for help') |
518 |
return 1 |
519 |
elif k == '--all-levels': |
520 |
cfg.level = None |
521 |
else: |
522 |
stderr('%s: invalid option: %s' % (argv[0], k)) |
523 |
stderr('run %s -h for help') |
524 |
return 1 |
525 |
if args: |
526 |
cfg.pathname_regex = args[0] |
527 |
if len(args) > 1: |
528 |
cfg.test_regex = args[1] |
529 |
if len(args) > 2: |
530 |
stderr('%s: too many arguments: %s' % (argv[0], args[2])) |
531 |
stderr('run %s -h for help') |
532 |
return 1 |
533 |
if not cfg.unit_tests and not cfg.functional_tests: |
534 |
cfg.unit_tests = True |
535 |
|
536 |
# Set up the python path |
537 |
sys.path[0] = cfg.basedir |
538 |
|
539 |
# Set up tracing before we start importing things |
540 |
tracer = None |
541 |
if cfg.run_tests and cfg.coverage: |
542 |
import trace |
543 |
# trace.py in Python 2.3.1 is buggy: |
544 |
# 1) Despite sys.prefix being in ignoredirs, a lot of system-wide |
545 |
# modules are included in the coverage reports |
546 |
# 2) Some module file names do not have the first two characters, |
547 |
# and in general the prefix used seems to be arbitrary |
548 |
# These bugs are fixed in src/trace.py which should be in PYTHONPATH |
549 |
# before the official one. |
550 |
ignoremods = ['test'] |
551 |
ignoredirs = [sys.prefix, sys.exec_prefix] |
552 |
tracer = trace.Trace(count=True, trace=False, |
553 |
ignoremods=ignoremods, ignoredirs=ignoredirs) |
554 |
|
555 |
# Finding and importing |
556 |
test_files = get_test_files(cfg) |
557 |
if cfg.list_tests or cfg.run_tests: |
558 |
test_cases = get_test_cases(test_files, cfg, tracer=tracer) |
559 |
if cfg.list_hooks or cfg.run_tests: |
560 |
test_hooks = get_test_hooks(test_files, cfg, tracer=tracer) |
561 |
|
562 |
# Configure the logging module |
563 |
import logging |
564 |
logging.basicConfig() |
565 |
logging.root.setLevel(logging.CRITICAL) |
566 |
|
567 |
# Running |
568 |
success = True |
569 |
if cfg.list_files: |
570 |
baselen = len(cfg.basedir) + 1 |
571 |
print("\n".join([fn[baselen:] for fn in test_files])) |
572 |
if cfg.list_tests: |
573 |
print("\n".join([test.id() for test in test_cases])) |
574 |
if cfg.list_hooks: |
575 |
print("\n".join([str(hook) for hook in test_hooks])) |
576 |
if cfg.run_tests: |
577 |
runner = CustomTestRunner(cfg, test_hooks) |
578 |
suite = unittest.TestSuite() |
579 |
suite.addTests(test_cases) |
580 |
if tracer is not None: |
581 |
success = tracer.runfunc(runner.run, suite).wasSuccessful() |
582 |
results = tracer.results() |
583 |
results.write_results(show_missing=True, coverdir=cfg.coverdir) |
584 |
else: |
585 |
success = runner.run(suite).wasSuccessful() |
586 |
|
587 |
# That's all |
588 |
if success: |
589 |
return 0 |
590 |
else: |
591 |
return 1 |
592 |
|
593 |
|
594 |
if __name__ == '__main__': |
595 |
exitcode = main(sys.argv) |
596 |
sys.exit(exitcode) |