Planet
navi homePPSaboutscreenshotsdownloaddevelopmentforum

source: downloads/boost_1_34_1/tools/build/v2/tools/testing.jam @ 29

Last change on this file since 29 was 29, checked in by landauf, 16 years ago

updated boost from 1_33_1 to 1_34_1

File size: 16.4 KB
Line 
1# Copyright 2005 Dave Abrahams
2# Copyright 2002, 2003, 2004, 2005, 2006 Vladimir Prus
3# Distributed under the Boost Software License, Version 1.0.
4# (See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt)
5
6#  This module implements regression testing framework. It declares a number of
7#  main target rules, which perform some action, and if the results are ok,
8#  creates an output file.
9
10#  The exact list of rules is:
11#  'compile'       -- creates .test file if compilation of sources was successfull
12#  'compile-fail'  -- creates .test file if compilation of sources failed
13#  'run'           -- creates .test file is running of executable produced from
14#                     sources was successfull. Also leaves behing .output file
15#                     with the output from program run.
16#  'run-fail'      -- same as above, but .test file is created if running fails.
17#
18#  In all cases, presense of .test file is an incication that
19#  the test passed. For more convenient reporting, you might want to use C++ Boost
20#  regression testing utilities, see
21#  http://www.boost.org/more/regression.html
22#
23#  For historical reason, a 'unit-test' rule is available which
24#  has the same syntax as 'exe' and behaves just like 'run'.
25
26# Things to do:
27#  - Teach compiler_status handle Jamfile.v2.
28# Notes:
29#  - <no-warn> is not implemented, since in Como-specific, and it's not clear how
30#     to implement it
31#  - std::locale-support is not impelemted (it's used in one test).
32
33 
34import targets ;
35import "class" : new ;
36import property ;
37import feature ;
38import toolset ;
39import alias ;
40import type ;
41import generators ;
42import project ;
43import property-set ;
44import virtual-target ;
45import path ;
46import os ;
47import common ;
48import sequence ;
49import errors ;
50
51rule init ( ) { }
52
53# The feature which controls the name of program used to
54# lanch test programs.
55feature.feature testing.launcher : : optional free ;
56feature.feature test-info : : free incidental ;
57feature.feature testing.arg : : free incidental ;
58feature.feature testing.input-file : : free dependency ;
59
60# Register target types.
61type.register TEST : test ;
62type.register COMPILE : : TEST ;
63type.register COMPILE_FAIL : : TEST ;
64type.register RUN_OUTPUT : run ;
65type.register RUN : : TEST ;
66type.register RUN_FAIL : : TEST ;
67type.register LINK_FAIL : : TEST ;
68type.register LINK : : TEST ;
69type.register UNIT_TEST : passed : TEST ;
70
71# Declare the rules which create main targets.
72# While the 'type' module already creates rules with the same names for us,
73# we need extra convenience: default name of main target, so write
74# our own versions.
75
76# Helper rule. Create a test target, using basename of first source if no
77# target name is explicitly passed. Remembers the created target in
78# a global variable.
79rule make-test ( target-type : sources + : requirements * : target-name ? )
80{
81    target-name ?= $(sources[1]:D=:S=) ;
82
83    local project = [ project.current ] ;
84    # The <location-prefix> forces the build system for generate paths in the form
85    # $build_dir/array1.test/gcc/debug
86    # This is necessary to allow post-processing tools to work.
87    local t =
88      [ targets.create-typed-target
89          [ type.type-from-rule-name $(target-type) ] : $(project)
90            : $(target-name) : $(sources)
91            : $(requirements) <location-prefix>$(target-name).test ] ;
92   
93    # Remember the test (for --dump-test).
94    # A good way would be to collect all given a project.
95    # This has some technical problems: e.g. we can't call this dump from
96    # Jamfile since projects referred by 'build-project' are not available until
97    # whole Jamfile is loaded.
98    .all-tests += $(t) ;
99    return $(t) ;   
100}
101
102# Note: passing more that one cpp file here is know to
103# fail. Passing a cpp file and a library target works.
104rule compile ( sources + : requirements * : target-name ? )
105{   
106    return [ make-test compile : $(sources) : $(requirements) : $(target-name) ] ;
107}
108
109rule compile-fail ( sources + : requirements * : target-name ? )
110{   
111    return [ make-test compile-fail : $(sources) : $(requirements) : $(target-name) ] ;
112}
113
114rule link ( sources + : requirements * : target-name ? )
115{   
116    return [ make-test link : $(sources) : $(requirements) : $(target-name) ] ;
117}
118
119
120rule link-fail ( sources + : requirements * : target-name ? )
121{   
122    return [ make-test link-fail : $(sources) : $(requirements) : $(target-name) ] ;
123}
124
125
126rule handle-input-files ( input-files * )
127{
128    if $(input-files[2])
129    {
130        # Check that sorting made when creating property-set instance
131        # won't change the ordering.
132        if [ sequence.insertion-sort $(input-files) ] != $(input-files)
133        {
134            errors.user-error "Names of input files must be sorted alphabetically"
135              : "due to internal limitations" ;
136        }       
137    }
138    return <testing.input-file>$(input-files) ;
139}
140
141
142rule run ( sources + : args * : input-files * : requirements * : target-name ?
143    : default-build * )
144{     
145    requirements += <testing.arg>$(args:J=" ") ;
146    requirements += [ handle-input-files $(input-files) ] ;
147    return [ make-test run : $(sources) : $(requirements) : $(target-name) ] ;
148}
149
150rule run-fail ( sources + : args * : input-files * : requirements * : target-name ?
151    : default-build * )
152{   
153    requirements += <testing.arg>$(args:J=" ") ;   
154    requirements += [ handle-input-files $(input-files) ] ;
155    return [ make-test run-fail : $(sources) : $(requirements) : $(target-name) ] ;
156}
157
158
159# Use 'test-suite' as synonym for 'alias', for backward compatibility.
160IMPORT : alias : : test-suite ;
161
162
163
164# For all main target in 'project-module',
165# which are typed target with type derived from 'TEST',
166# produce some interesting information.
167rule dump-tests # ( project-module )
168{
169    for local t in $(.all-tests)
170    {
171        dump-test $(t) ;
172    }   
173}
174
175# Given a project location, compute the name of Boost library
176local rule get-library-name ( path )
177{
178    # Path is in normalized form, so all slashes are forward.
179
180    local match1 = [ MATCH /libs/(.*)/(test|example) : $(path) ] ;
181    local match2 = [ MATCH /libs/(.*)$ : $(path) ] ;
182    local match3 = [ MATCH (/status$) : $(path) ] ;
183
184    if $(match1) { return $(match1[0]) ; }
185    else if $(match2) { return $(match2[0]) ; }
186    else if $(match3) { return "" ; }
187    else if --dump-tests in [ modules.peek : ARGV ]
188    {
189        # The 'run' rule and others might be used outside
190        # boost. In that case, just return the path,
191        # since the 'library name' makes no sense.
192        return $(path) ;
193    }
194}
195
196
197# Take a target (instance of 'basic-target') and prints
198# - its type
199# - its name
200# - comments specified via the <test-info> property
201# - relative location of all source from the project root.
202rule dump-test ( target )
203{
204    local type = [ $(target).type ] ;
205    local name = [ $(target).name ] ;
206    local project = [ $(target).project ] ;
207   
208    local project-root = [ $(project).get project-root ] ;
209    local library = [ get-library-name
210        [ path.root [ $(project).get location ] [ path.pwd ] ] ] ;
211    if $(library)
212    {
213        name = $(library)/$(name) ;
214    }
215       
216    local sources = [ $(target).sources ] ;
217    local source-files ;
218    for local s in $(sources)
219    {
220        if [ class.is-a $(s) : file-reference ]
221        {
222            local location =
223              [ path.root
224                  [ path.root [ $(s).name ] [ $(s).location ] ]
225                  [ path.pwd ] ] ; 
226                       
227            source-files +=
228              [ path.relative
229                  $(location)
230                  [ path.root $(project-root) [ path.pwd ] ] ] ;
231        }               
232    }
233         
234    local r = [ $(target).requirements ] ;
235    # Extract values of the <test-info> feature
236    local test-info = [ $(r).get <test-info> ] ;
237   
238    # Format them into a single string of quoted strings
239    test-info = \"$(test-info:J=\"\ \")\" ;
240   
241    ECHO boost-test($(type)) \"$(name)\"
242      [$(test-info)]           
243        ":" \"$(source-files)\"
244          ;           
245}
246
247# Register generators. Depending on target type, either
248# 'expect-success' or 'expect-failure' rule will be used.
249generators.register-standard testing.expect-success : OBJ : COMPILE ;
250generators.register-standard testing.expect-failure : OBJ : COMPILE_FAIL ;
251generators.register-standard testing.expect-success : RUN_OUTPUT : RUN ;
252generators.register-standard testing.expect-failure : RUN_OUTPUT : RUN_FAIL ;
253generators.register-standard testing.expect-failure : EXE : LINK_FAIL ;
254generators.register-standard testing.expect-success : EXE : LINK ;
255
256# Generator which runs an EXE and captures output.
257generators.register-standard testing.capture-output : EXE : RUN_OUTPUT ;
258
259# Generator which creates target if sources runs successfully.
260# Differers from RUN in that run output is not captured.
261# The reason why it exists is that the 'run' rule is much better for
262# automated testing, but is not user-friendly. See
263# http://article.gmane.org/gmane.comp.lib.boost.build/6353/
264generators.register-standard testing.unit-test : EXE : UNIT_TEST ;
265
266
267# The action rules called by generators.
268
269# Causes the 'target' to exist after bjam invocation if and only if all the
270# dependencies were successfully built.
271rule expect-success ( target : dependency + : requirements * )
272{
273    **passed** $(target) : $(sources) ;   
274}
275
276# Causes the 'target' to exist after bjam invocation if and only if all some
277# of the dependencies were not successfully built.
278rule expect-failure ( target : dependency + : properties * )
279{
280    local grist = [ MATCH ^<(.*)> : $(dependency:G) ] ;
281    local marker = $(dependency:G=$(grist)*fail) ;
282    (failed-as-expected) $(marker) ;
283    FAIL_EXPECTED $(dependency) ;   
284    LOCATE on $(marker) = [ on $(dependency) return $(LOCATE) ] ;
285    RMOLD $(marker) ;
286    DEPENDS $(marker) : $(dependency) ;
287   
288    DEPENDS $(target) : $(marker) ;
289    **passed** $(target) : $(marker) ;
290}
291
292# The rule/action combination used to report successfull passing
293# of a test.
294rule **passed**
295{
296    # Dump all the tests, if needed.
297    # We do it here, since dump should happen after all Jamfiles are read,
298    # and there's no such place currently defined (but should).
299    if ! $(.dumped-tests) && --dump-tests in [ modules.peek : ARGV ]
300    {
301        .dumped-tests = true ;
302        dump-tests ;
303    }
304                 
305    # Force deletion of the target, in case any dependencies failed
306    # to build.
307    RMOLD $(<) ;
308}
309
310actions **passed**
311{
312    echo passed > $(<)
313}
314
315actions (failed-as-expected)
316{
317    echo failed as expected > $(<)
318}
319
320rule run-path-setup ( target : source : properties * )
321{
322    # For testing, we need to make sure that all dynamic libraries needed by
323    # the test are found. So, we collect all paths from dependency libraries
324    # (via xdll-path property) and add whatever explicit dll-path user has
325    # specified. The resulting paths are added to environment on each test
326    # invocation.
327    local dll-paths = [ feature.get-values <dll-path> : $(properties) ] ;
328    dll-paths += [ feature.get-values <xdll-path> : $(properties) ] ;
329    dll-paths += [ on $(source) return $(RUN_PATH) ] ;
330    dll-paths = [ sequence.unique $(dll-paths) ] ;
331    if $(dll-paths)
332    {   
333        dll-paths = [ sequence.transform path.native : $(dll-paths) ] ;
334       
335        PATH_SETUP on $(target) =
336          [ common.prepend-path-variable-command
337              [ os.shared-library-path-variable ] : $(dll-paths) ] ;
338    }           
339}
340
341local argv = [ modules.peek : ARGV ] ;
342if --preserve-test-targets in $(argv)
343{
344    preserve-test-targets = true ;
345}
346
347toolset.flags testing.capture-output ARGS <testing.arg> ;
348toolset.flags testing.capture-output INPUT_FILES <testing.input-file> ;
349toolset.flags testing.capture-output LAUNCHER <testing.launcher> ;
350
351# Runs executable 'sources' and stores stdout in file 'target'.
352# If --preserve-test-targets command line option, removes the executable.
353# The 'target-to-remove' parameter controls what should be removed:
354#   - if 'none', does not remove anything, ever
355#   - if empty, removes 'source'
356#   - if non-empty and not 'none', contains a list of sources to remove.
357rule capture-output ( target : source : properties * : targets-to-remove * )
358{
359    output-file on $(target) = $(target:S=.output) ;
360    LOCATE on $(target:S=.output) = [ on $(target) return $(LOCATE) ] ;
361   
362    # The INCLUDES kill a warning about independent target...
363    INCLUDES $(target) : $(target:S=.output) ;
364    # but it also puts .output into dependency graph, so we must tell jam
365    # it's OK if it cannot find the target or updating rule.
366    NOCARE $(target:S=.output) ;       
367   
368    # This has two-fold effect. First it adds input files to the dependendency
369    # graph, preventing a warning. Second, it causes input files to be bound
370    # before target is created. Therefore, they are bound using SEARCH setting
371    # on them and not LOCATE setting of $(target), as in other case (due to jam bug).
372    DEPENDS $(target) : [ on $(target) return $(INPUT_FILES) ] ;
373   
374    if $(targets-to-remove) = none
375    {
376        targets-to-remove = ;
377    }
378    else if ! $(targets-to-remove)
379    {
380        targets-to-remove = $(source) ;
381    }
382           
383    run-path-setup $(target) : $(source) : $(properties) ;
384   
385    if ! $(preserve-test-targets)
386    {
387        TEMPORARY $(targets-to-remove) ;
388        # Set a second action on target that will
389        # be executed after capture output action.
390        # The 'RmTemps' rule has the 'ignore' modifier
391        # so it's always considered succeeded.
392        # This is needed for 'run-fail' test. For that
393        # test the target will be marked with FAIL_EXPECTED,
394        # and without 'ingore' successfull execution
395        # will be negated and be reported as failure.
396        # With 'ignore' we don't detect a case where
397        # removing files, but it's not likely to
398        # happen.
399        RmTemps $(target) : $(targets-to-remove) ;
400    }
401}
402
403
404if [ os.name ] = NT
405{
406    STATUS = %status% ;
407    SET_STATUS = "set status=%ERRORLEVEL%" ;
408    RUN_OUTPUT_NL = "echo." ;
409    STATUS_0 = "%status% EQU 0 (" ;
410    STATUS_NOT_0 = "%status% NEQ 0 (" ;
411    VERBOSE = "%verbose% EQU 1 (" ;
412    ENDIF = ")" ;
413    SHELL_SET = "set " ;
414   
415    CATENATE = type ;
416    CP = copy ;
417}
418else
419{
420    STATUS = "$status" ;
421    SET_STATUS = "status=$?" ;
422    RUN_OUTPUT_NL = "echo" ;
423    STATUS_0 = "test $status -eq 0 ; then" ;
424    STATUS_NOT_0 = "test $status -ne 0 ; then" ;
425    VERBOSE = "test $verbose -eq 1 ; then" ;
426    ENDIF = "fi" ;
427    SHELL_SET = "" ;
428   
429    CATENATE = cat ;
430    CP = cp ;
431}
432
433if --verbose-test in [ modules.peek : ARGV ]
434{
435    VERBOSE_TEST = 1 ;
436}
437else
438{
439    VERBOSE_TEST = 0 ;
440}
441
442
443RM = [ common.rm-command ] ;
444
445actions capture-output bind INPUT_FILES output-file
446{
447    $(PATH_SETUP)
448    $(LAUNCHER) "$(>)" $(ARGS) "$(INPUT_FILES)" > "$(output-file)" 2>&1     
449    $(SET_STATUS)
450    $(RUN_OUTPUT_NL) >> "$(output-file)"
451    echo EXIT STATUS: $(STATUS) >> "$(output-file)"
452    if $(STATUS_0)
453        $(CP) "$(output-file)" "$(<)"
454    $(ENDIF)
455    $(SHELL_SET)verbose=$(VERBOSE_TEST)
456    if $(STATUS_NOT_0)
457        $(SHELL_SET)verbose=1
458    $(ENDIF)
459    if $(VERBOSE)
460        echo ====== BEGIN OUTPUT ======
461        $(CATENATE) "$(output-file)"
462        echo ====== END OUTPUT ======
463    $(ENDIF)   
464    exit $(STATUS)     
465}
466
467actions quietly updated ignore piecemeal together RmTemps
468{
469    $(RM) "$(>)"
470}
471
472MAKE_FILE = [ common.file-creation-command ] ;
473
474toolset.flags testing.unit-test LAUNCHER <testing.launcher> ;
475rule unit-test ( target : source : properties * )
476{
477    run-path-setup $(target) : $(source) : $(properties) ;
478}
479
480actions unit-test
481{
482    $(PATH_SETUP)
483    $(LAUNCHER) $(>) && $(MAKE_FILE) $(<)
484}
485
486IMPORT $(__name__) : compile compile-fail run run-fail link link-fail
487  : : compile compile-fail run run-fail link link-fail ;
488
489
490type.register TIME : time ;
491generators.register-standard testing.time : : TIME ;
492
493rule record-time ( target source : user : system )
494{
495    local src-string = [$(source:G=:J=",")"] " ;
496    USER_TIME on $(target) += $(src-string)$(user) ;
497    SYSTEM_TIME on $(target) += $(src-string)$(system) ;
498}
499
500IMPORT testing : record-time : : testing.record-time ;
501rule time ( target : source : properties *  )
502{
503    # Set up rule for recording timing information
504    __TIMING_RULE__ on $(source) = testing.record-time $(target) ;
505   
506    # Make sure that the source is rebuilt any time we need to
507    # retrieve that information
508    REBUILDS $(target) : $(source) ;
509}
510
511actions time
512{
513    echo user: $(USER_TIME)
514    echo system: $(SYSTEM_TIME)
515     
516    echo user: $(USER_TIME)" seconds" > $(<)
517    echo system: $(SYSTEM_TIME)" seconds" > $(<)
518}
Note: See TracBrowser for help on using the repository browser.