|
Boost-Commit : |
From: grafikrobot_at_[hidden]
Date: 2007-10-12 12:43:47
Author: grafik
Date: 2007-10-12 12:43:46 EDT (Fri, 12 Oct 2007)
New Revision: 39963
URL: http://svn.boost.org/trac/boost/changeset/39963
Log:
Partial rework of regression scripts for branch independent testing.
Added:
branches/release-tools/regression/run.py
- copied, changed from r39765, /branches/release-tools/regression/xsl_reports/runner/regression.py
branches/release-tools/regression/src/
branches/release-tools/regression/src/__init__.py
- copied, changed from r39765, /branches/release-tools/regression/xsl_reports/runner/__init__.py
branches/release-tools/regression/src/collect_and_upload_logs.py
- copied unchanged from r39765, /branches/release-tools/regression/xsl_reports/runner/collect_and_upload_logs.py
branches/release-tools/regression/src/regression.py
- copied, changed from r39765, /branches/release-tools/regression/xsl_reports/runner/regression.py
Text files modified:
branches/release-tools/regression/run.py | 1043 ---------------------------------------
branches/release-tools/regression/src/__init__.py | 7
branches/release-tools/regression/src/regression.py | 404 ++++++--------
3 files changed, 192 insertions(+), 1262 deletions(-)
Copied: branches/release-tools/regression/run.py (from r39765, /branches/release-tools/regression/xsl_reports/runner/regression.py)
==============================================================================
--- /branches/release-tools/regression/xsl_reports/runner/regression.py (original)
+++ branches/release-tools/regression/run.py 2007-10-12 12:43:46 EDT (Fri, 12 Oct 2007)
@@ -1,1042 +1,21 @@
#!/usr/bin/python
-# Copyright (c) MetaCommunications, Inc. 2003-2007
+# Copyright Redshift Software, Inc. 2007
#
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
-import urllib
-import tarfile
-import socket
-import time
-import getopt
-import glob
-import shutil
-import stat
import os.path
-import os
-import platform
-import traceback
-import string
import sys
-regression_root = os.path.abspath( os.path.dirname( sys.argv[0] ) )
-regression_results = os.path.join( regression_root, 'results' )
-regression_log = os.path.join( regression_results, 'bjam.log' )
-install_log = os.path.join( regression_results, 'bjam_install.log' )
-boostbook_log = os.path.join( regression_results, 'boostbook.log' )
-boostbook_archive_name = os.path.join( regression_results, 'boostbook.zip' )
-
-boost_root = os.path.join( regression_root, 'boost' )
-xsl_reports_dir = os.path.join( boost_root, 'tools', 'regression', 'xsl_reports' )
-timestamp_path = os.path.join( regression_root, 'timestamp' )
-
-svn_anonymous_command_line = 'svn %(command)s'
-svn_command_line = 'svn --non-interactive --username=%(user)s %(command)s'
-
-
-bjam = {}
-process_jam_log = {}
-
-
-if sys.platform == 'win32':
- bjam[ 'name' ] = 'bjam.exe'
- bjam[ 'build_cmd' ] = lambda toolset, v2: bjam_build_script_cmd( 'build.bat %s' % toolset )
- bjam[ 'is_supported_toolset' ] = lambda x: x in [ \
- 'borland', 'como', 'gcc', 'gcc-nocygwin', 'intel-win32', 'metrowerks', 'mingw', \
- 'msvc', 'vc7', 'vc8' \
- ]
- process_jam_log[ 'name' ] = 'process_jam_log.exe'
-
- def default_toolset(v2):
- if v2:
- return 'msvc'
- else:
- return 'vc-7_1'
-
- process_jam_log[ 'default_toolset' ] = default_toolset
- patch_boost_name = 'patch_boost.bat'
-else:
- bjam[ 'name' ] = 'bjam'
- bjam[ 'build_cmd' ] = lambda toolset, v2: bjam_build_script_cmd( './build.sh %s' % toolset )
- bjam[ 'is_supported_toolset' ] = lambda x: x in [ \
- 'acc', 'como', 'darwin', 'gcc', 'intel-linux', 'kcc', 'kylix', 'mipspro', \
- 'pathscale', 'pgi', 'qcc', 'sun', 'sunpro', 'tru64cxx', 'vacpp' \
- ]
- process_jam_log[ 'name' ] = 'process_jam_log'
- process_jam_log[ 'default_toolset' ] = lambda x: 'gcc'
- patch_boost_name = 'patch_boost'
-
-bjam[ 'default_toolset' ] = lambda x: ''
-bjam[ 'path' ] = os.path.join( regression_root, bjam[ 'name' ] )
-bjam[ 'source_dir' ] = os.path.join( boost_root, 'tools', 'jam', 'src' )
-bjam[ 'build_path_root' ] = lambda unused: bjam[ 'source_dir' ]
-
-process_jam_log[ 'path' ] = os.path.join( regression_root, process_jam_log[ 'name' ] )
-process_jam_log[ 'source_dir' ] = os.path.join( boost_root, 'tools', 'regression', 'build' )
-
-
-def process_jam_build_root(v2):
- if v2:
- return os.path.join(boost_root, 'dist', 'bin')
- else:
- return os.path.join(
- boost_root, 'bin', 'boost', 'tools', 'regression', 'build'
- , process_jam_log[ 'name' ])
-
-
-process_jam_log[ 'build_path_root' ] = process_jam_build_root
-
-process_jam_log[ 'build_cmd' ] = lambda toolset, v2: bjam_command( toolset, v2 )
-process_jam_log[ 'is_supported_toolset' ] = lambda x : True
-
-build_monitor_url = 'http://engineering.meta-comm.com/resources/build_monitor.zip'
-pskill_url = 'http://www.sysinternals.com/files/pskill.zip'
-
-utils = None
-
-
-def log( message ):
- sys.stdout.flush()
- sys.stderr.flush()
- sys.stderr.write( '# %s\n' % message )
- sys.stderr.flush()
-
-
-def platform_name():
- # See http://article.gmane.org/gmane.comp.lib.boost.testing/933
- if sys.platform == 'win32':
- return 'Windows'
- elif sys.platform == 'cygwin':
- return 'Windows/Cygwin'
-
- return platform.system()
-
-
-def rmtree( path ):
- if os.path.exists( path ):
- if sys.platform == 'win32':
- os.system( 'del /f /s /q "%s" >nul 2>&1' % path )
- shutil.rmtree( unicode( path ) )
- else:
- os.system( 'rm -f -r "%s"' % path )
-
-
-def retry( f, args, max_attempts=5, sleep_secs=10 ):
- for attempts in range( max_attempts, -1, -1 ):
- try:
- return f( *args )
- except Exception, msg:
- log( '%s failed with message "%s"' % ( f.__name__, msg ) )
- if attempts == 0:
- log( 'Giving up.' )
- raise
-
- log( 'Retrying (%d more attempts).' % attempts )
- time.sleep( sleep_secs )
-
-
-def cleanup( args, **unused ):
- if args == []: args = [ 'source', 'bin' ]
-
- if 'source' in args:
- log( 'Cleaning up "%s" directory ...' % boost_root )
- rmtree( boost_root )
-
- if 'bin' in args:
- boost_bin_dir = os.path.join( boost_root, 'bin' )
- log( 'Cleaning up "%s" directory ...' % boost_bin_dir )
- rmtree( boost_bin_dir )
-
- boost_binv2_dir = os.path.join( boost_root, 'bin.v2' )
- log( 'Cleaning up "%s" directory ...' % boost_binv2_dir )
- rmtree( boost_binv2_dir )
-
- log( 'Cleaning up "%s" directory ...' % regression_results )
- rmtree( regression_results )
-
-
-def http_get( source_url, destination, proxy ):
- if proxy is None: proxies = None
- else: proxies = { 'http' : proxy }
-
- src = urllib.urlopen( source_url, proxies = proxies )
-
- f = open( destination, 'wb' )
- while True:
- data = src.read( 16*1024 )
- if len( data ) == 0: break
- f.write( data )
-
- f.close()
- src.close()
-
-
-def tarball_name_for_tag( tag, timestamp = False ):
- tag = tag.split( '/' )[-1]
- if not timestamp: return 'boost-%s.tar.bz2' % tag
- else: return 'boost-%s.timestamp' % tag
-
-
-def download_boost_tarball( destination, tag, proxy, timestamp_only = False ):
- tarball_name = tarball_name_for_tag( tag, timestamp_only )
- tarball_path = os.path.join( destination, tarball_name )
- tarball_url = 'http://beta.boost.org/development/snapshot.php/%s' % tag
-
- log( 'Downloading "%s" to "%s"...' % ( tarball_url, os.path.dirname( tarball_path ) ) )
- if os.path.exists( tarball_path ):
- os.unlink( tarball_path )
-
- http_get(
- tarball_url
- , tarball_path
- , proxy
- )
-
- return tarball_path
-
-
-def find_boost_dirs( path ):
- return [ x for x in glob.glob( os.path.join( path, 'boost[-_]*' ) )
- if os.path.isdir( x ) ]
-
-
-def unpack_tarball( tarball_path, destination ):
- log( 'Looking for old unpacked archives...' )
- old_boost_dirs = find_boost_dirs( destination )
-
- for old_boost_dir in old_boost_dirs:
- if old_boost_dir != tarball_path:
- log( 'Deleting old directory %s.' % old_boost_dir )
- rmtree( old_boost_dir )
-
- log( 'Unpacking boost tarball ("%s")...' % tarball_path )
-
- tarball_name = os.path.basename( tarball_path )
- extension = tarball_name[ tarball_name.find( '.' ) : ]
-
- if extension in ( ".tar.gz", ".tar.bz2" ):
- mode = os.path.splitext( extension )[1][1:]
- tar = tarfile.open( tarball_path, 'r:%s' % mode )
- for tarinfo in tar:
- tar.extract( tarinfo, destination )
- if sys.platform == 'win32' and not tarinfo.isdir():
- # workaround what appears to be a Win32-specific bug in 'tarfile'
- # (modification times for extracted files are not set properly)
- f = os.path.join( destination, tarinfo.name )
- os.chmod( f, stat.S_IWRITE )
- os.utime( f, ( tarinfo.mtime, tarinfo.mtime ) )
- tar.close()
- elif extension in ( ".zip" ):
- import zipfile
-
- z = zipfile.ZipFile( tarball_path, 'r', zipfile.ZIP_DEFLATED )
- for f in z.infolist():
- destination_file_path = os.path.join( destination, f.filename )
- if destination_file_path[-1] == "/": # directory
- if not os.path.exists( destination_file_path ):
- os.makedirs( destination_file_path )
- else: # file
- result = open( destination_file_path, 'wb' )
- result.write( z.read( f.filename ) )
- result.close()
- z.close()
- else:
- raise 'Do not know how to unpack archives with extension \"%s\"' % extension
-
- boost_dir = find_boost_dirs( destination )[0]
- log( ' Unpacked into directory "%s"' % boost_dir )
-
- if os.path.exists( boost_root ):
- log( 'Deleting "%s" directory...' % boost_root )
- rmtree( boost_root )
-
- log( 'Renaming "%s" into "%s"' % ( boost_dir, boost_root ) )
- os.rename( boost_dir, boost_root )
-
-
-def svn_command( user, command ):
- if user is None or user == 'anonymous':
- cmd = svn_anonymous_command_line % { 'command': command }
- else:
- cmd = svn_command_line % { 'user': user, 'command': command }
-
- log( 'Executing SVN command "%s"' % cmd )
- rc = os.system( cmd )
- if rc != 0:
- raise Exception( 'SVN command "%s" failed with code %d' % ( cmd, rc ) )
-
-
-def svn_repository_url( user, tag ):
- if user != 'anonymous': return 'https://svn.boost.org/svn/boost/%s' % tag
- else: return 'http://svn.boost.org/svn/boost/%s' % tag
-
-
-def svn_checkout( user, tag, args ):
- command = 'co %s boost' % svn_repository_url( user, tag )
- os.chdir( regression_root )
- svn_command( user, command )
-
-
-def svn_update( user, tag, args ):
- os.chdir( boost_root )
- svn_command( user, 'update' )
-
-
-def format_time( t ):
- return time.strftime(
- '%a, %d %b %Y %H:%M:%S +0000'
- , t
- )
-
-
-def refresh_timestamp():
- if os.path.exists( timestamp_path ):
- os. unlink( timestamp_path )
-
- open( timestamp_path, 'w' ).close()
-
-
-def timestamp():
- return time.strftime(
- '%Y-%m-%dT%H:%M:%SZ'
- , time.gmtime( os.stat( timestamp_path ).st_mtime )
- )
-
-
-def get_tarball( tag, proxy, args, **unused ):
- if args == []: args = [ 'download', 'unpack' ]
-
- tarball_path = None
-
- if 'download' in args:
- tarball_path = download_boost_tarball( regression_root, tag, proxy )
-
- if 'unpack' in args:
- if not tarball_path:
- tarball_path = os.path.join( regression_root, tarball_name_for_tag( tag ) )
- unpack_tarball( tarball_path, regression_root )
-
-
-def get_source( user, tag, proxy, args, **unused ):
- refresh_timestamp()
- log( 'Getting sources (%s)...' % timestamp() )
-
- if user is not None:
- retry(
- svn_checkout
- , ( user, tag, args )
- )
- else:
- retry(
- get_tarball
- , ( tag, proxy, args )
- )
-
-
-def update_source( user, tag, proxy, args, **unused ):
- if user is not None or os.path.exists( os.path.join( boost_root, '.svn' ) ):
- open( timestamp_path, 'w' ).close()
- log( 'Updating sources from SVN (%s)...' % timestamp() )
- retry(
- svn_update
- , ( user, tag, args )
- )
- else:
- get_source( user, tag, proxy, args )
-
-
-def tool_path( name_or_spec, v2=None ):
- if isinstance( name_or_spec, basestring ):
- return os.path.join( regression_root, name_or_spec )
-
- if os.path.exists( name_or_spec[ 'path' ] ):
- return name_or_spec[ 'path' ]
-
- if name_or_spec.has_key( 'build_path' ):
- return name_or_spec[ 'build_path' ]
-
- build_path_root = name_or_spec[ 'build_path_root' ]( v2 )
- log( 'Searching for "%s" in "%s"...' % ( name_or_spec[ 'name' ], build_path_root ) )
- for root, dirs, files in os.walk( build_path_root ):
- if name_or_spec[ 'name' ] in files:
- return os.path.join( root, name_or_spec[ 'name' ] )
-
- raise Exception( 'Cannot find "%s" in any of the following locations:\n%s' % (
- name_or_spec[ 'name' ]
- , '\n'.join( [ name_or_spec[ 'path' ], build_path_root ] )
- ) )
-
-
-def build_if_needed( tool, toolset, toolsets, v2 ):
- if os.path.exists( tool[ 'path' ] ):
- log( 'Found preinstalled "%s"; will use it.' % tool[ 'path' ] )
- return
-
- log( 'Preinstalled "%s" is not found; building one...' % tool[ 'path' ] )
-
- if toolset is None:
- if toolsets is not None:
- toolset = string.split( toolsets, ',' )[0]
- if not tool[ 'is_supported_toolset' ]( toolset ):
- log( 'Warning: Specified toolset (%s) cannot be used to bootstrap "%s".'\
- % ( toolset, tool[ 'name' ] ) )
-
- toolset = tool[ 'default_toolset' ](v2)
- log( ' Using default toolset for the platform (%s).' % toolset )
- else:
- toolset = tool[ 'default_toolset' ](v2)
- log( 'Warning: No bootstrap toolset for "%s" was specified.' % tool[ 'name' ] )
- log( ' Using default toolset for the platform (%s).' % toolset )
-
- if os.path.exists( tool[ 'source_dir' ] ):
- log( 'Found "%s" source directory "%s"' % ( tool[ 'name' ], tool[ 'source_dir' ] ) )
- build_cmd = tool[ 'build_cmd' ]( toolset, v2 )
- log( 'Building "%s" (%s)...' % ( tool[ 'name'], build_cmd ) )
- utils.system( [
- 'cd "%s"' % tool[ 'source_dir' ]
- , build_cmd
- ] )
- else:
- raise 'Could not find "%s" source directory "%s"' % ( tool[ 'name' ], tool[ 'source_dir' ] )
-
- if not tool.has_key( 'build_path' ):
- tool[ 'build_path' ] = tool_path( tool, v2 )
-
- if not os.path.exists( tool[ 'build_path' ] ):
- raise 'Failed to find "%s" after build.' % tool[ 'build_path' ]
-
- log( '%s succesfully built in "%s" location' % ( tool[ 'name' ], tool[ 'build_path' ] ) )
-
-
-def import_utils():
- global utils
- if utils is None:
- sys.path.append( xsl_reports_dir )
- import utils as utils_module
- utils = utils_module
-
-
-def download_if_needed( tool_name, tool_url, proxy ):
- path = tool_path( tool_name )
- if not os.path.exists( path ):
- log( 'Preinstalled "%s" is not found.' % path )
- log( ' Downloading from %s...' % tool_url )
-
- zip_path = '%s.zip' % os.path.splitext( path )[0]
- http_get( tool_url, zip_path, proxy )
-
- log( ' Unzipping %s...' % path )
- utils.unzip( zip_path, os.path.dirname( path ) )
-
- log( ' Removing %s...' % zip_path )
- os.unlink( zip_path )
- log( 'Done.' )
-
-
-def setup(
- comment
- , toolsets
- , book
- , bjam_toolset
- , pjl_toolset
- , monitored
- , proxy
- , v2
- , args
- , **unused
- ):
- import_utils()
-
- patch_boost_path = os.path.join( regression_root, patch_boost_name )
- if os.path.exists( patch_boost_path ):
- log( 'Found patch file "%s". Executing it.' % patch_boost_path )
- os.chdir( regression_root )
- utils.system( [ patch_boost_path ] )
-
- build_if_needed( bjam, bjam_toolset, toolsets, v2 )
- build_if_needed( process_jam_log, pjl_toolset, toolsets, v2 )
-
- if monitored:
- if sys.platform == 'win32':
- download_if_needed( 'build_monitor.exe', build_monitor_url, proxy )
- download_if_needed( 'pskill.exe', pskill_url, proxy )
- else:
- log( 'Warning: Test monitoring is not supported on this platform (yet).' )
- log( ' Please consider contributing this piece!' )
-
-
-def bjam_build_script_cmd( cmd ):
- env_setup_key = 'BJAM_ENVIRONMENT_SETUP'
- if os.environ.has_key( env_setup_key ):
- return '%s & %s' % ( os.environ[env_setup_key], cmd )
-
- return cmd
-
-
-def bjam_command( toolsets, v2 ):
- build_path = regression_root
- if build_path[-1] == '\\': build_path += '\\'
-
- v2_option = ""
- if v2:
- v2_option = "--v2"
-
- result = '"%s" %s "-sBOOST_BUILD_PATH=%s" "-sBOOST_ROOT=%s"'\
- % (
- tool_path( bjam, v2 )
- , v2_option
- , build_path
- , boost_root
- )
-
- if toolsets:
- if v2:
- result += ' ' + string.join(string.split( toolsets, ',' ), ' ' )
- else:
- result += ' "-sTOOLS=%s"' % string.join( string.split( toolsets, ',' ), ' ' )
-
- return result
-
-
-def install( toolsets, v2, **unused ):
- import_utils()
- os.chdir( os.path.join( boost_root ) )
-
- log( 'Making "%s" directory...' % regression_results )
- utils.makedirs( regression_results )
-
- install_cmd = '%s -d2 install >>%s 2>&1' % ( bjam_command( toolsets, v2 ), install_log )
- log( 'Installing libraries (%s)...' % install_cmd )
- utils.system( [ install_cmd ] )
-
-
-def start_build_monitor( timeout ):
- if sys.platform == 'win32':
- build_monitor_path = tool_path( 'build_monitor.exe' )
- if os.path.exists( build_monitor_path ):
- utils.system( [ 'start /belownormal "" "%s" bjam.exe %d' % ( build_monitor_path, timeout*60 ) ] )
- else:
- log( 'Warning: Build monitor is not found at "%s"' % build_monitor_path )
-
-
-def stop_build_monitor():
- if sys.platform == 'win32':
- build_monitor_path = tool_path( 'build_monitor.exe' )
- if os.path.exists( build_monitor_path ):
- utils.system( [ '"%s" build_monitor' % tool_path( 'pskill.exe' ) ] )
-
-
-def run_process_jam_log(v2):
- log( 'Getting test case results out of "%s"...' % regression_log )
-
- if v2:
- v2 = "--v2"
- else:
- v2 = ""
-
- utils.checked_system( [
- '"%s" %s "%s" <"%s"' % (
- tool_path( process_jam_log, v2 )
- , v2
- , regression_results
- , regression_log
- )
- ] )
-
-
-def test(
- toolsets
- , bjam_options
- , monitored
- , timeout
- , v2
- , args
- , **unused
- ):
- if args == []:
- args = [ "test", "process" ]
-
- import_utils()
-
- try:
- if monitored:
- start_build_monitor( timeout )
-
- cd = os.getcwd()
- os.chdir( os.path.join( boost_root, 'status' ) )
-
- log( 'Making "%s" directory...' % regression_results )
- utils.makedirs( regression_results )
-
- results_libs = os.path.join( regression_results, 'libs' )
- results_status = os.path.join( regression_results, 'status' )
-
- if "clean" in args:
- rmtree( results_libs )
- rmtree( results_status )
-
- build_dir_option = "-sALL_LOCATE_TARGET"
- if v2:
- build_dir_option = "--build-dir"
-
- if "test" in args:
- test_cmd = '%s -d2 --dump-tests %s "%s=%s" >>"%s" 2>&1' % (
- bjam_command( toolsets, v2 )
- , bjam_options
- , build_dir_option
- , regression_results
- , regression_log
- )
-
- log( 'Starting tests (%s)...' % test_cmd )
- utils.system( [ test_cmd ] )
-
- if "process" in args:
- run_process_jam_log(v2)
-
- os.chdir( cd )
- finally:
- if monitored:
- stop_build_monitor()
-
-
-def build_book( **kargs ):
- # To do
- # 1. PDF generation
- # 2. Do we need to cleanup before the build?
- # 3. Incremental builds
- if not os.path.exists( regression_results ):
- os.makedirs( regression_results )
- import_utils()
- cwd = os.getcwd()
- try:
- os.chdir( os.path.join( boost_root, 'doc' ) )
- if os.path.exists( boostbook_log ):
- os.unlink( boostbook_log )
- utils.system( [ '%s --v2 html >>%s 2>&1' % ( tool_path( bjam, v2=True ), boostbook_log ) ] )
- # utils.system( [ '%s --v2 pdf >>%s 2>&1' % ( tool_path( bjam, v2=True ), boostbook_log ) ] )
- finally:
- os.chdir( cwd )
-
-def collect_logs(
- tag
- , runner
- , platform
- , user
- , comment
- , incremental
- , dart_server
- , ftp_proxy
- , args
- , **unused
- ):
- import_utils()
-
- if comment is None:
- comment = 'comment.html'
-
- comment_path = os.path.join( regression_root, comment )
- if not os.path.exists( comment_path ):
- log( 'Comment file "%s" not found; creating default comment.' % comment_path )
- f = open( comment_path, 'w' )
- f.write( '<p>Tests are run on %s platform.</p>' % platform_name() )
- f.close()
-
- run_type = ''
- if incremental: run_type = 'incremental'
- else: run_type = 'full'
-
- source = 'tarball'
- revision = ''
- svn_root_file = os.path.join( boost_root, '.svn' )
- svn_info_file = os.path.join( boost_root, 'svn_info.txt' )
- if os.path.exists( svn_root_file ):
- source = 'SVN'
- svn_command( 'user', 'info ' + boost_root + ' >' + svn_info_file )
-
- if os.path.exists( svn_info_file ):
- f = open( svn_info_file, 'r' )
- svn_info = f.read()
- f.close()
- i = svn_info.find( 'Revision:' )
- if i >= 0:
- i += 10
- while svn_info[i] >= '0' and svn_info[i] <= '9':
- revision += svn_info[i]
- i += 1
-
-
- from runner import collect_logs
- collect_logs(
- regression_results
- , runner
- , tag
- , platform
- , comment_path
- , timestamp_path
- , user
- , source
- , run_type
- , dart_server
- , ftp_proxy
- , revision
- )
-
-
-def collect_book( **unused ):
- log( 'Collecting files for BoostBook into "%s"...' % boostbook_archive_name )
- import zipfile
- boostbook_archive = zipfile.ZipFile( boostbook_archive_name, 'w', zipfile.ZIP_DEFLATED )
- html_root = os.path.join( boost_root, 'doc/html' )
-
- boostbook_archive.writestr( 'timestamp', timestamp())
- boostbook_archive.write( boostbook_log, os.path.basename( boostbook_log ) )
-
- def add_files( arg, dirname, names ):
- for name in names:
- path = os.path.join( dirname, name )
- if not os.path.isdir( path ):
- boostbook_archive.write( path, path[ len( html_root ) + 1: ] )
-
- os.path.walk( html_root, add_files, None )
-
-
-def upload_logs(
- tag
- , runner
- , user
- , ftp_proxy
- , debug_level
- , send_bjam_log
- , dart_server
- , **unused
- ):
- import_utils()
- from runner import upload_logs
- retry(
- upload_logs
- , ( regression_results, runner, tag, user, ftp_proxy, debug_level,
- send_bjam_log, timestamp_path, dart_server )
- )
-
-
-def upload_book( tag, runner, ftp_proxy, debug_level, **unused ):
- import_utils()
- from runner import upload_to_ftp
- upload_to_ftp( tag, boostbook_archive_name, ftp_proxy, debug_level )
-
-
-def update_itself( tag, **unused ):
- source = os.path.join( xsl_reports_dir, 'runner', os.path.basename( sys.argv[0] ) )
- self = os.path.join( regression_root, os.path.basename( sys.argv[0] ) )
-
- # Through revision 38985, the update copy was not done if
- # os.stat(self).st_mtime > os.stat(source).st_mtime. This was not
- # reliable on all systems, so the copy is now done unconditionally.
- log( ' Saving a backup copy of the current script...' )
- os.chmod( self, stat.S_IWRITE ) # Win32 workaround
- shutil.move( self, '%s~' % self )
- log( 'Updating %s from %s...' % ( self, source ) )
- shutil.copy2( source, self )
-
-
-def send_mail( smtp_login, mail, subject, msg = '', debug_level = 0 ):
- import smtplib
- if not smtp_login:
- server_name = 'mail.%s' % mail.split( '@' )[-1]
- user_name = None
- password = None
- else:
- server_name = smtp_login.split( '@' )[-1]
- ( user_name, password ) = string.split( smtp_login.split( '@' )[0], ':' )
-
- log( ' Sending mail through "%s"...' % server_name )
- smtp_server = smtplib.SMTP( server_name )
- smtp_server.set_debuglevel( debug_level )
- if user_name:
- smtp_server.login( user_name, password )
-
- smtp_server.sendmail(
- mail
- , [ mail ]
- , 'Subject: %s\nTo: %s\n\n%s' % ( subject, mail, msg )
- )
-
-
-def regression(
- tag
- , local
- , runner
- , platform
- , user
- , comment
- , toolsets
- , book
- , bjam_options
- , bjam_toolset
- , pjl_toolset
- , incremental
- , send_bjam_log
- , force_update
- , have_source
- , skip_tests
- , monitored
- , timeout
- , mail = None
- , smtp_login = None
- , proxy = None
- , ftp_proxy = None
- , debug_level = 0
- , v2 = 1
- , dart_server = None
- , args = []
- ):
-
- try:
- mail_subject = 'Boost regression for %s on %s' % ( tag, string.split(socket.gethostname(), '.')[0] )
- start_time = time.localtime()
- if mail:
- log( 'Sending start notification to "%s"' % mail )
- send_mail(
- smtp_login
- , mail
- , '%s started at %s.' % ( mail_subject, format_time( start_time ) )
- , debug_level = debug_level
- )
-
- if local is not None:
- log( 'Using local file "%s"' % local )
-
- b = os.path.basename( local )
- tag = b[ 0: b.find( '.' ) ]
- log( 'Tag: "%s"' % tag )
-
- unpack_tarball( local, regression_root )
- elif have_source:
- if not incremental: cleanup( [ 'bin' ] )
- else:
- if incremental or force_update:
- if not incremental: cleanup( [ 'bin' ] )
- update_source( user, tag, proxy, [] )
- else:
- cleanup( [] )
- get_source( user, tag, proxy, [] )
-
- setup( comment, toolsets, book, bjam_toolset, pjl_toolset, monitored, proxy,
- v2, [] )
-
- # Not specifying --toolset in command line is not enough
- # that would mean to use Boost.Build default ones
- # We can skip test only we were explictly
- # told to have no toolsets in command line "--toolset="
- if toolsets != '': # --toolset=,
- if not skip_tests: test( toolsets, bjam_options, monitored, timeout, v2, [] )
- collect_logs( tag, runner, platform, user, comment, incremental, dart_server, proxy, [] )
- upload_logs( tag, runner, user, ftp_proxy, debug_level, send_bjam_log, dart_server )
-
- if book:
- build_book()
- collect_book()
- upload_book( tag, runner, ftp_proxy, debug_level )
-
- update_itself( tag )
-
- if mail:
- log( 'Sending report to "%s"' % mail )
- end_time = time.localtime()
- send_mail(
- smtp_login
- , mail
- , '%s completed successfully at %s.' % ( mail_subject, format_time( end_time ) )
- , debug_level = debug_level
- )
- except:
- if mail:
- log( 'Sending report to "%s"' % mail )
- traceback_ = '\n'.join( apply( traceback.format_exception, sys.exc_info() ) )
- end_time = time.localtime()
- send_mail(
- smtp_login
- , mail
- , '%s failed at %s.' % ( mail_subject, format_time( end_time ) )
- , traceback_
- , debug_level
- )
- raise
-
-
-def show_revision( **unused ):
- modified = '$Date$'
- revision = '$Revision$'
-
- import re
- re_keyword_value = re.compile( r'^\$\w+:\s+(.*)\s+\$$' )
- print '\n\tRevision: %s' % re_keyword_value.match( revision ).group( 1 )
- print '\tLast modified on: %s\n' % re_keyword_value.match( modified ).group( 1 )
-
-
-def accept_args( args ):
- args_spec = [
- 'tag='
- , 'local='
- , 'runner='
- , 'platform='
- , 'user='
- , 'comment='
- , 'toolsets='
- , 'book'
- , 'bjam-options='
- , 'bjam-toolset='
- , 'pjl-toolset='
- , 'timeout='
- , 'mail='
- , 'smtp-login='
- , 'proxy='
- , 'ftp-proxy='
- , 'debug-level='
- , 'incremental'
- , 'force-update'
- , 'have-source'
- , 'skip-tests'
- , 'dont-send-bjam-log'
- , 'monitored'
- , 'help'
- , 'v2'
- , 'v1'
- , 'dart-server='
- ]
-
- options = {
- '--tag' : 'trunk'
- , '--local' : None
- , '--platform' : platform_name()
- , '--user' : None
- , '--comment' : None
- , '--toolsets' : None
- , '--book' : False
- , '--bjam-options' : ''
- , '--bjam-toolset' : None
- , '--pjl-toolset' : None
- , '--timeout' : 5
- , '--mail' : None
- , '--smtp-login' : None
- , '--proxy' : None
- , '--debug-level' : 0
- , '--ftp-proxy' : None
- , '--dart-server' : 'beta.boost.org:8081'
- }
-
- ( option_pairs, other_args ) = getopt.getopt( args, '', args_spec )
- map( lambda x: options.__setitem__( x[0], x[1] ), option_pairs )
-
- if not options.has_key( '--runner' ) or options.has_key( '--help' ):
- usage()
- sys.exit( 1 )
-
- return {
- 'tag' : options[ '--tag' ]
- , 'local' : options[ '--local' ]
- , 'runner' : options[ '--runner' ]
- , 'platform' : options[ '--platform']
- , 'user' : options[ '--user' ]
- , 'comment' : options[ '--comment' ]
- , 'toolsets' : options[ '--toolsets' ]
- , 'book' : options.has_key( '--book' )
- , 'bjam_options' : options[ '--bjam-options' ]
- , 'bjam_toolset' : options[ '--bjam-toolset' ]
- , 'pjl_toolset' : options[ '--pjl-toolset' ]
- , 'incremental' : options.has_key( '--incremental' )
- , 'send_bjam_log' : not options.has_key( '--dont-send-bjam-log' )
- , 'force_update' : options.has_key( '--force-update' )
- , 'have_source' : options.has_key( '--have-source' )
- , 'skip_tests' : options.has_key( '--skip-tests' )
- , 'monitored' : options.has_key( '--monitored' )
- , 'timeout' : options[ '--timeout' ]
- , 'mail' : options[ '--mail' ]
- , 'smtp_login' : options[ '--smtp-login' ]
- , 'proxy' : options[ '--proxy' ]
- , 'ftp_proxy' : options[ '--ftp-proxy' ]
- , 'debug_level' : int(options[ '--debug-level' ])
- , 'v2' : not options.has_key( '--v1' )
- , 'dart_server' : options[ '--dart-server' ]
- , 'args' : other_args
- }
-
-commands = {
- 'cleanup' : cleanup
- , 'get-source' : get_source
- , 'update-source' : update_source
- , 'setup' : setup
- , 'install' : install
- , 'test' : test
- , 'build-book' : build_book
- , 'collect-logs' : collect_logs
- , 'collect-book' : collect_book
- , 'upload-logs' : upload_logs
- , 'upload-book' : upload_book
- , 'update-itself' : update_itself
- , 'regression' : regression
- , 'show-revision' : show_revision
- }
-
-def usage():
- print 'Usage:\n\t%s [command] options' % os.path.basename( sys.argv[0] )
- print '''
-Commands:
-\t%s
-
-Options:
-\t--runner runner ID (e.g. 'Metacomm')
-\t--tag the tag for the results ('trunk' by default)
-\t--local the name of the boost tarball
-\t--comment an HTML comment file to be inserted in the reports
-\t ('comment.html' by default)
-\t--incremental do incremental run (do not remove previous binaries)
-\t--dont-send-bjam-log
-\t do not send full bjam log of the regression run
-\t--force-update do an SVN update (if applicable) instead of a clean
-\t checkout, even when performing a full run
-\t--have-source do neither a tarball download nor an SVN update;
-\t used primarily for testing script changes
-\t--skip-tests do no run bjam; used for testing script changes
-\t--monitored do a monitored run
-\t--timeout specifies the timeout, in minutes, for a single test
-\t run/compilation (enforced only in monitored runs, 5 by
-\t default)
-\t--user Boost SVN user ID (optional)
-\t--toolsets comma-separated list of toolsets to test with (optional)
-\t--book build BoostBook (optional)
-\t--bjam-options options to pass to the regression test (optional)
-\t--bjam-toolset bootstrap toolset for 'bjam' executable (optional)
-\t--pjl-toolset bootstrap toolset for 'process_jam_log' executable
-\t (optional)
-\t--mail email address to send run notification to (optional)
-\t--smtp-login STMP server address/login information, in the following
-\t form: <user>:<password>@<host>[:<port>] (optional).
-\t--proxy HTTP proxy server address and port (e.g.
-\t 'http://www.someproxy.com:3128', optional)
-\t--ftp-proxy FTP proxy server (e.g. 'ftpproxy', optional)
-\t--debug-level debugging level; controls the amount of debugging
-\t output printed; 0 by default (no debug output)
-\t--v1 Use Boost.Build V1
-\t--v2 Use Boost.Build V2 (default)
-\t--dart-server The dart server to send results to.
-''' % '\n\t'.join( commands.keys() )
-
- print 'Example:\n\t%s --runner=Metacomm\n' % os.path.basename( sys.argv[0] )
- print 'For more documentation, see http://tinyurl.com/4f2zp\n'
-
-
-if __name__ == '__main__':
- if len(sys.argv) > 1 and sys.argv[1] in commands:
- command = sys.argv[1]
- args = sys.argv[ 2: ]
- if command not in [ 'collect-logs', 'upload-logs' ]:
- args.insert( 0, '--runner=' )
- else:
- command = 'regression'
- args = sys.argv[ 1: ]
-
- commands[ command ]( **accept_args( args ) )
+#~ The directory this file is in.
+root = os.path.abspath(os.path.dirname(os.path.dirname(os.path.realpath(__file__))))
+print '--- %s' % root
+
+#~ Bootstrap
+sys.path.insert(0,root)
+
+#~ Launch runner
+from src.regression import runner
+runner(root)
Copied: branches/release-tools/regression/src/__init__.py (from r39765, /branches/release-tools/regression/xsl_reports/runner/__init__.py)
==============================================================================
--- /branches/release-tools/regression/xsl_reports/runner/__init__.py (original)
+++ branches/release-tools/regression/src/__init__.py 2007-10-12 12:43:46 EDT (Fri, 12 Oct 2007)
@@ -1,2 +1,5 @@
-
-from collect_and_upload_logs import *
+# Copyright Redshift Software, Inc. 2007
+#
+# Distributed under the Boost Software License, Version 1.0.
+# (See accompanying file LICENSE_1_0.txt or copy at
+# http://www.boost.org/LICENSE_1_0.txt)
Copied: branches/release-tools/regression/src/regression.py (from r39765, /branches/release-tools/regression/xsl_reports/runner/regression.py)
==============================================================================
--- /branches/release-tools/regression/xsl_reports/runner/regression.py (original)
+++ branches/release-tools/regression/src/regression.py 2007-10-12 12:43:46 EDT (Fri, 12 Oct 2007)
@@ -6,6 +6,181 @@
# (See accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
+import optparse
+import os
+import os.path
+import platform
+import sys
+
+class runner:
+
+ def __init__(self,root):
+ commands = "commands: %s" % ', '.join(
+ map(
+ lambda m: m[8:].replace('_','-'),
+ filter(
+ lambda m: m.startswith('command_'),
+ runner.__dict__.keys())
+ )
+ )
+
+ opt = optparse.OptionParser(
+ usage="%prog options [commands]",
+ description=commands)
+
+ #~ Base Options:
+ opt.add_option( '--runner',
+ help="runner ID (e.g. 'Metacomm')" )
+ opt.add_option( '--comment',
+ help="an HTML comment file to be inserted in the reports",
+ default='comment.html' )
+ opt.add_option( '--tag',
+ help="the tag for the results",
+ default='trunk' )
+ opt.add_option( '--toolsets',
+ help="comma-separated list of toolsets to test with" )
+ opt.add_option( '--incremental',
+ help="do incremental run (do not remove previous binaries)",
+ action='store_true',
+ default=False )
+ opt.add_option( '--timeout',
+ help="specifies the timeout, in minutes, for a single test run/compilation",
+ default=5, type='int' )
+ opt.add_option( '--bjam-options',
+ help="options to pass to the regression test" )
+ opt.add_option( '--bjam-toolset',
+ help="bootstrap toolset for 'bjam' executable" )
+ opt.add_option( '--pjl-toolset',
+ help="bootstrap toolset for 'process_jam_log' executable" )
+ opt.add_option( '--platform',
+ default=self.platform_name() )
+
+ #~ Source Options:
+ opt.add_option( '--user',
+ help="Boost SVN user ID" )
+ opt.add_option( '--local',
+ help="the name of the boost tarball" )
+ opt.add_option( '--force-update',
+ help="do an SVN update (if applicable) instead of a clean checkout, even when performing a full run" )
+ opt.add_option( '--have-source',
+ help="do neither a tarball download nor an SVN update; used primarily for testing script changes" )
+
+ #~ Connection Options:
+ opt.add_option( '--proxy',
+ help="HTTP proxy server address and port (e.g.'http://www.someproxy.com:3128')" )
+ opt.add_option( '--ftp-proxy',
+ help="FTP proxy server (e.g. 'ftpproxy')" )
+ opt.add_option( '--dart-server',
+ help="the dart server to send results to" )
+
+ #~ Debug Options:
+ opt.add_option( '--debug-level',
+ help="debugging level; controls the amount of debugging output printed",
+ default=0, type='int' )
+ opt.add_option( '--send-bjam-log',
+ help="send full bjam log of the regression run",
+ action='store_true',
+ default=False )
+ opt.add_option( '--mail',
+ help="email address to send run notification to" )
+ opt.add_option( '--smtp-login',
+ help="STMP server address/login information, in the following form: <user>:<password>@<host>[:<port>]" )
+ opt.add_option( '--skip-tests',
+ help="do not run bjam; used for testing script changes" )
+
+ ( _opt_, self.actions ) = opt.parse_args(None,self)
+
+ self.tools_root = os.path.abspath(
+ os.path.dirname(os.path.dirname(os.path.dirname(os.path.realpath(__file__)))) )
+ self.regression_root = root
+ self.boost_root = os.path.join( self.regression_root, 'boost' )
+ self.regression_results = os.path.join( self.regression_root, 'results' )
+ self.regression_log = os.path.join( self.regression_results, 'bjam.log' )
+ self.xsl_reports_dir = os.path.join( self.tools_root, 'regression', 'xsl_reports' )
+ self.timestamp_path = os.path.join( self.regression_root, 'timestamp' )
+ self.main()
+
+ def command_cleanup(self,*args):
+ if args == []: args = [ 'source', 'bin' ]
+
+ if 'source' in args:
+ self.log( 'Cleaning up "%s" directory ...' % self.boost_root )
+ self.rmtree( self.boost_root )
+
+ if 'bin' in args:
+ boost_bin_dir = os.path.join( self.boost_root, 'bin' )
+ self.log( 'Cleaning up "%s" directory ...' % self.boost_bin_dir )
+ self.rmtree( self.boost_bin_dir )
+
+ boost_binv2_dir = os.path.join( self.boost_root, 'bin.v2' )
+ self.log( 'Cleaning up "%s" directory ...' % boost_binv2_dir )
+ self.rmtree( boost_binv2_dir )
+
+ self.log( 'Cleaning up "%s" directory ...' % self.regression_results )
+ self.rmtree( self.regression_results )
+
+ def command_get_source(self):
+ pass
+
+ def command_update_source(self):
+ pass
+
+ def command_setup(self):
+ pass
+
+ def command_install(self):
+ pass
+
+ def command_test(self):
+ pass
+
+ def command_collect_logs(self):
+ pass
+
+ def command_upoad_logs(self):
+ pass
+
+ def command_regression(self):
+ pass
+
+ def command_show_revision(self):
+ modified = '$Date$'
+ revision = '$Revision$'
+
+ import re
+ re_keyword_value = re.compile( r'^\$\w+:\s+(.*)\s+\$$' )
+ print '\n\tRevision: %s' % re_keyword_value.match( revision ).group( 1 )
+ print '\tLast modified on: %s\n' % re_keyword_value.match( modified ).group( 1 )
+
+ def main(self):
+ for action in self.actions:
+ action_m = "command_"+action.replace('-','_')
+ if hasattr(self,action_m):
+ getattr(self,action_m)()
+
+ def platform_name(self):
+ # See http://article.gmane.org/gmane.comp.lib.boost.testing/933
+ if sys.platform == 'win32':
+ return 'Windows'
+ elif sys.platform == 'cygwin':
+ return 'Windows/Cygwin'
+ return platform.system()
+
+ def log(self,message):
+ sys.stdout.flush()
+ sys.stderr.flush()
+ sys.stderr.write( '# %s\n' % message )
+ sys.stderr.flush()
+
+ def rmtree(self,path):
+ if os.path.exists( path ):
+ if sys.platform == 'win32':
+ os.system( 'del /f /s /q "%s" >nul 2>&1' % path )
+ shutil.rmtree( unicode( path ) )
+ else:
+ os.system( 'rm -f -r "%s"' % path )
+
+'''
import urllib
import tarfile
import socket
@@ -21,16 +196,6 @@
import string
import sys
-regression_root = os.path.abspath( os.path.dirname( sys.argv[0] ) )
-regression_results = os.path.join( regression_root, 'results' )
-regression_log = os.path.join( regression_results, 'bjam.log' )
-install_log = os.path.join( regression_results, 'bjam_install.log' )
-boostbook_log = os.path.join( regression_results, 'boostbook.log' )
-boostbook_archive_name = os.path.join( regression_results, 'boostbook.zip' )
-
-boost_root = os.path.join( regression_root, 'boost' )
-xsl_reports_dir = os.path.join( boost_root, 'tools', 'regression', 'xsl_reports' )
-timestamp_path = os.path.join( regression_root, 'timestamp' )
svn_anonymous_command_line = 'svn %(command)s'
svn_command_line = 'svn --non-interactive --username=%(user)s %(command)s'
@@ -97,31 +262,6 @@
utils = None
-def log( message ):
- sys.stdout.flush()
- sys.stderr.flush()
- sys.stderr.write( '# %s\n' % message )
- sys.stderr.flush()
-
-
-def platform_name():
- # See http://article.gmane.org/gmane.comp.lib.boost.testing/933
- if sys.platform == 'win32':
- return 'Windows'
- elif sys.platform == 'cygwin':
- return 'Windows/Cygwin'
-
- return platform.system()
-
-
-def rmtree( path ):
- if os.path.exists( path ):
- if sys.platform == 'win32':
- os.system( 'del /f /s /q "%s" >nul 2>&1' % path )
- shutil.rmtree( unicode( path ) )
- else:
- os.system( 'rm -f -r "%s"' % path )
-
def retry( f, args, max_attempts=5, sleep_secs=10 ):
for attempts in range( max_attempts, -1, -1 ):
@@ -137,24 +277,6 @@
time.sleep( sleep_secs )
-def cleanup( args, **unused ):
- if args == []: args = [ 'source', 'bin' ]
-
- if 'source' in args:
- log( 'Cleaning up "%s" directory ...' % boost_root )
- rmtree( boost_root )
-
- if 'bin' in args:
- boost_bin_dir = os.path.join( boost_root, 'bin' )
- log( 'Cleaning up "%s" directory ...' % boost_bin_dir )
- rmtree( boost_bin_dir )
-
- boost_binv2_dir = os.path.join( boost_root, 'bin.v2' )
- log( 'Cleaning up "%s" directory ...' % boost_binv2_dir )
- rmtree( boost_binv2_dir )
-
- log( 'Cleaning up "%s" directory ...' % regression_results )
- rmtree( regression_results )
def http_get( source_url, destination, proxy ):
@@ -865,178 +987,4 @@
, debug_level
)
raise
-
-
-def show_revision( **unused ):
- modified = '$Date$'
- revision = '$Revision$'
-
- import re
- re_keyword_value = re.compile( r'^\$\w+:\s+(.*)\s+\$$' )
- print '\n\tRevision: %s' % re_keyword_value.match( revision ).group( 1 )
- print '\tLast modified on: %s\n' % re_keyword_value.match( modified ).group( 1 )
-
-
-def accept_args( args ):
- args_spec = [
- 'tag='
- , 'local='
- , 'runner='
- , 'platform='
- , 'user='
- , 'comment='
- , 'toolsets='
- , 'book'
- , 'bjam-options='
- , 'bjam-toolset='
- , 'pjl-toolset='
- , 'timeout='
- , 'mail='
- , 'smtp-login='
- , 'proxy='
- , 'ftp-proxy='
- , 'debug-level='
- , 'incremental'
- , 'force-update'
- , 'have-source'
- , 'skip-tests'
- , 'dont-send-bjam-log'
- , 'monitored'
- , 'help'
- , 'v2'
- , 'v1'
- , 'dart-server='
- ]
-
- options = {
- '--tag' : 'trunk'
- , '--local' : None
- , '--platform' : platform_name()
- , '--user' : None
- , '--comment' : None
- , '--toolsets' : None
- , '--book' : False
- , '--bjam-options' : ''
- , '--bjam-toolset' : None
- , '--pjl-toolset' : None
- , '--timeout' : 5
- , '--mail' : None
- , '--smtp-login' : None
- , '--proxy' : None
- , '--debug-level' : 0
- , '--ftp-proxy' : None
- , '--dart-server' : 'beta.boost.org:8081'
- }
-
- ( option_pairs, other_args ) = getopt.getopt( args, '', args_spec )
- map( lambda x: options.__setitem__( x[0], x[1] ), option_pairs )
-
- if not options.has_key( '--runner' ) or options.has_key( '--help' ):
- usage()
- sys.exit( 1 )
-
- return {
- 'tag' : options[ '--tag' ]
- , 'local' : options[ '--local' ]
- , 'runner' : options[ '--runner' ]
- , 'platform' : options[ '--platform']
- , 'user' : options[ '--user' ]
- , 'comment' : options[ '--comment' ]
- , 'toolsets' : options[ '--toolsets' ]
- , 'book' : options.has_key( '--book' )
- , 'bjam_options' : options[ '--bjam-options' ]
- , 'bjam_toolset' : options[ '--bjam-toolset' ]
- , 'pjl_toolset' : options[ '--pjl-toolset' ]
- , 'incremental' : options.has_key( '--incremental' )
- , 'send_bjam_log' : not options.has_key( '--dont-send-bjam-log' )
- , 'force_update' : options.has_key( '--force-update' )
- , 'have_source' : options.has_key( '--have-source' )
- , 'skip_tests' : options.has_key( '--skip-tests' )
- , 'monitored' : options.has_key( '--monitored' )
- , 'timeout' : options[ '--timeout' ]
- , 'mail' : options[ '--mail' ]
- , 'smtp_login' : options[ '--smtp-login' ]
- , 'proxy' : options[ '--proxy' ]
- , 'ftp_proxy' : options[ '--ftp-proxy' ]
- , 'debug_level' : int(options[ '--debug-level' ])
- , 'v2' : not options.has_key( '--v1' )
- , 'dart_server' : options[ '--dart-server' ]
- , 'args' : other_args
- }
-
-commands = {
- 'cleanup' : cleanup
- , 'get-source' : get_source
- , 'update-source' : update_source
- , 'setup' : setup
- , 'install' : install
- , 'test' : test
- , 'build-book' : build_book
- , 'collect-logs' : collect_logs
- , 'collect-book' : collect_book
- , 'upload-logs' : upload_logs
- , 'upload-book' : upload_book
- , 'update-itself' : update_itself
- , 'regression' : regression
- , 'show-revision' : show_revision
- }
-
-def usage():
- print 'Usage:\n\t%s [command] options' % os.path.basename( sys.argv[0] )
- print '''
-Commands:
-\t%s
-
-Options:
-\t--runner runner ID (e.g. 'Metacomm')
-\t--tag the tag for the results ('trunk' by default)
-\t--local the name of the boost tarball
-\t--comment an HTML comment file to be inserted in the reports
-\t ('comment.html' by default)
-\t--incremental do incremental run (do not remove previous binaries)
-\t--dont-send-bjam-log
-\t do not send full bjam log of the regression run
-\t--force-update do an SVN update (if applicable) instead of a clean
-\t checkout, even when performing a full run
-\t--have-source do neither a tarball download nor an SVN update;
-\t used primarily for testing script changes
-\t--skip-tests do no run bjam; used for testing script changes
-\t--monitored do a monitored run
-\t--timeout specifies the timeout, in minutes, for a single test
-\t run/compilation (enforced only in monitored runs, 5 by
-\t default)
-\t--user Boost SVN user ID (optional)
-\t--toolsets comma-separated list of toolsets to test with (optional)
-\t--book build BoostBook (optional)
-\t--bjam-options options to pass to the regression test (optional)
-\t--bjam-toolset bootstrap toolset for 'bjam' executable (optional)
-\t--pjl-toolset bootstrap toolset for 'process_jam_log' executable
-\t (optional)
-\t--mail email address to send run notification to (optional)
-\t--smtp-login STMP server address/login information, in the following
-\t form: <user>:<password>@<host>[:<port>] (optional).
-\t--proxy HTTP proxy server address and port (e.g.
-\t 'http://www.someproxy.com:3128', optional)
-\t--ftp-proxy FTP proxy server (e.g. 'ftpproxy', optional)
-\t--debug-level debugging level; controls the amount of debugging
-\t output printed; 0 by default (no debug output)
-\t--v1 Use Boost.Build V1
-\t--v2 Use Boost.Build V2 (default)
-\t--dart-server The dart server to send results to.
-''' % '\n\t'.join( commands.keys() )
-
- print 'Example:\n\t%s --runner=Metacomm\n' % os.path.basename( sys.argv[0] )
- print 'For more documentation, see http://tinyurl.com/4f2zp\n'
-
-
-if __name__ == '__main__':
- if len(sys.argv) > 1 and sys.argv[1] in commands:
- command = sys.argv[1]
- args = sys.argv[ 2: ]
- if command not in [ 'collect-logs', 'upload-logs' ]:
- args.insert( 0, '--runner=' )
- else:
- command = 'regression'
- args = sys.argv[ 1: ]
-
- commands[ command ]( **accept_args( args ) )
+'''
Boost-Commit list run by bdawes at acm.org, david.abrahams at rcn.com, gregod at cs.rpi.edu, cpdaniel at pacbell.net, john at johnmaddock.co.uk