Boost logo

Boost-Commit :

From: grafikrobot_at_[hidden]
Date: 2007-10-13 13:20:59


Author: grafik
Date: 2007-10-13 13:20:58 EDT (Sat, 13 Oct 2007)
New Revision: 39974
URL: http://svn.boost.org/trac/boost/changeset/39974

Log:
Partial rework of regression scripts for branch independent testing.
Added:
   branches/release-tools/regression/src/run.py
      - copied, changed from r39963, /branches/release-tools/regression/run.py
Removed:
   branches/release-tools/regression/run.py
Text files modified:
   branches/release-tools/regression/src/regression.py | 1096 +++++++++++----------------------------
   branches/release-tools/regression/src/run.py | 35 +
   2 files changed, 335 insertions(+), 796 deletions(-)

Deleted: branches/release-tools/regression/run.py
==============================================================================
--- branches/release-tools/regression/run.py 2007-10-13 13:20:58 EDT (Sat, 13 Oct 2007)
+++ (empty file)
@@ -1,21 +0,0 @@
-#!/usr/bin/python
-
-# Copyright Redshift Software, Inc. 2007
-#
-# Distributed under the Boost Software License, Version 1.0.
-# (See accompanying file LICENSE_1_0.txt or copy at
-# http://www.boost.org/LICENSE_1_0.txt)
-
-import os.path
-import sys
-
-#~ The directory this file is in.
-root = os.path.abspath(os.path.dirname(os.path.dirname(os.path.realpath(__file__))))
-print '--- %s' % root
-
-#~ Bootstrap
-sys.path.insert(0,root)
-
-#~ Launch runner
-from src.regression import runner
-runner(root)

Modified: branches/release-tools/regression/src/regression.py
==============================================================================
--- branches/release-tools/regression/src/regression.py (original)
+++ branches/release-tools/regression/src/regression.py 2007-10-13 13:20:58 EDT (Sat, 13 Oct 2007)
@@ -1,16 +1,34 @@
 #!/usr/bin/python
 
-# Copyright (c) MetaCommunications, Inc. 2003-2007
+# Copyright MetaCommunications, Inc. 2003-2007
+# Copyright Redshift Software, Inc. 2007
 #
 # Distributed under the Boost Software License, Version 1.0.
 # (See accompanying file LICENSE_1_0.txt or copy at
 # http://www.boost.org/LICENSE_1_0.txt)
 
+import glob
 import optparse
 import os
 import os.path
 import platform
 import sys
+import time
+
+#~ Place holder for xsl_reports/util module
+utils = None
+
+repo_root = {
+ 'anon' : 'http://svn.boost.org/svn/boost/',
+ 'user' : 'https://svn.boost.org/svn/boost/'
+ }
+repo_path = {
+ 'trunk' : 'trunk',
+ 'release' : 'branches/release',
+ 'build' : 'branches/release-tools/build/v2',
+ 'jam' : 'branches/release-tools/jam/src',
+ 'regression' : 'branches/release-tools/regression'
+ }
 
 class runner:
     
@@ -25,35 +43,31 @@
             )
         
         opt = optparse.OptionParser(
- usage="%prog options [commands]",
+ usage="%prog [options] [commands]",
             description=commands)
             
         #~ Base Options:
         opt.add_option( '--runner',
             help="runner ID (e.g. 'Metacomm')" )
         opt.add_option( '--comment',
- help="an HTML comment file to be inserted in the reports",
- default='comment.html' )
+ help="an HTML comment file to be inserted in the reports" )
         opt.add_option( '--tag',
- help="the tag for the results",
- default='trunk' )
+ help="the tag for the results" )
         opt.add_option( '--toolsets',
             help="comma-separated list of toolsets to test with" )
         opt.add_option( '--incremental',
             help="do incremental run (do not remove previous binaries)",
- action='store_true',
- default=False )
+ action='store_true' )
         opt.add_option( '--timeout',
             help="specifies the timeout, in minutes, for a single test run/compilation",
- default=5, type='int' )
+ type='int' )
         opt.add_option( '--bjam-options',
             help="options to pass to the regression test" )
         opt.add_option( '--bjam-toolset',
             help="bootstrap toolset for 'bjam' executable" )
         opt.add_option( '--pjl-toolset',
             help="bootstrap toolset for 'process_jam_log' executable" )
- opt.add_option( '--platform',
- default=self.platform_name() )
+ opt.add_option( '--platform' )
 
         #~ Source Options:
         opt.add_option( '--user',
@@ -76,11 +90,10 @@
         #~ Debug Options:
         opt.add_option( '--debug-level',
             help="debugging level; controls the amount of debugging output printed",
- default=0, type='int' )
+ type='int' )
         opt.add_option( '--send-bjam-log',
             help="send full bjam log of the regression run",
- action='store_true',
- default=False )
+ action='store_true' )
         opt.add_option( '--mail',
             help="email address to send run notification to" )
         opt.add_option( '--smtp-login',
@@ -88,18 +101,67 @@
         opt.add_option( '--skip-tests',
             help="do not run bjam; used for testing script changes" )
         
+ self.comment='comment.html'
+ self.tag='trunk'
+ self.incremental=False
+ self.timeout=5
+ self.platform=self.platform_name()
+ self.debug_level=0
+ self.send_bjam_log=False
+ self.bjam_toolset=''
+ self.pjl_toolset=''
         ( _opt_, self.actions ) = opt.parse_args(None,self)
         
- self.tools_root = os.path.abspath(
- os.path.dirname(os.path.dirname(os.path.dirname(os.path.realpath(__file__)))) )
         self.regression_root = root
         self.boost_root = os.path.join( self.regression_root, 'boost' )
         self.regression_results = os.path.join( self.regression_root, 'results' )
         self.regression_log = os.path.join( self.regression_results, 'bjam.log' )
- self.xsl_reports_dir = os.path.join( self.tools_root, 'regression', 'xsl_reports' )
+ self.tools_bb_root = os.path.join( self.regression_root,'tools_bb' )
+ self.tools_bjam_root = os.path.join( self.regression_root,'tools_bjam' )
+ self.tools_regression_root = os.path.join( self.regression_root,'tools_regression' )
+ self.xsl_reports_dir = os.path.join( self.tools_regression_root, 'xsl_reports' )
         self.timestamp_path = os.path.join( self.regression_root, 'timestamp' )
+ if sys.platform == 'win32':
+ self.patch_boost = 'patch_boost.bat'
+ self.bjam = {
+ 'name' : 'bjam.exe',
+ #~ 'build_cmd' : 'build.bat %s' % self.bjam_toolset,
+ 'path' : os.path.join(self.regression_root,'bjam.exe'),
+ 'source_dir' : self.tools_bjam_root
+ }
+ self.process_jam_log = {
+ 'name' : 'process_jam_log.exe',
+ 'source_dir' : os.path.join(self.tools_regression_root,'build')
+ }
+ else:
+ self.patch_boost = 'patch_boost'
+ self.bjam = {
+ 'name' : 'bjam',
+ #~ 'build_cmd' : './build.sh %s' % self.bjam_toolset,
+ 'path' : os.path.join(self.regression_root,'bjam'),
+ 'source_dir' : self.tools_bjam_root
+ }
+ self.process_jam_log = {
+ 'name' : 'process_jam_log',
+ 'source_dir' : os.path.join(self.tools_regression_root,'build')
+ }
+
+ if self.debug_level > 0:
+ self.log('Regression root = %s'%self.regression_root)
+ self.log('Boost root = %s'%self.boost_root)
+ self.log('Regression results = %s'%self.regression_results)
+ self.log('Regression log = %s'%self.regression_log)
+ self.log('BB root = %s'%self.tools_bb_root)
+ self.log('Bjam root = %s'%self.tools_bjam_root)
+ self.log('Tools root = %s'%self.tools_regression_root)
+ self.log('XSL reports dir = %s'%self.xsl_reports_dir)
+ self.log('Timestamp = %s'%self.timestamp_path)
+ self.log('Patch Boost script = %s'%self.patch_boost)
+
         self.main()
     
+ #~ The various commands that make up the testing sequence...
+
     def command_cleanup(self,*args):
         if args == []: args = [ 'source', 'bin' ]
 
@@ -119,10 +181,51 @@
             self.log( 'Cleaning up "%s" directory ...' % self.regression_results )
             self.rmtree( self.regression_results )
     
+ def command_get_tools(self):
+ #~ Get Boost.Build v2...
+ os.chdir( os.path.dirname(self.tools_bb_root) )
+ self.svn_command( 'co %s %s' % (
+ self.svn_repository_url(repo_path['build']),
+ os.path.basename(self.tools_bb_root) ) )
+ #~ Get Boost.Jam...
+ os.chdir( os.path.dirname(self.tools_bjam_root) )
+ self.svn_command( 'co %s %s' % (
+ self.svn_repository_url(repo_path['jam']),
+ os.path.basename(self.tools_bjam_root) ) )
+ #~ Get the regression tools and utilities...
+ os.chdir( os.path.dirname(self.tools_regression_root) )
+ self.svn_command( 'co %s %s' % (
+ self.svn_repository_url(repo_path['regression']),
+ os.path.basename(self.tools_regression_root) ) )
+
     def command_get_source(self):
+ self.refresh_timestamp()
+ self.log( 'Getting sources (%s)...' % self.timestamp() )
+
+ if hasattr(self,'user') and self.user is not None:
+ self.retry( self.svn_checkout )
+ else:
+ self.retry( self.get_tarball )
         pass
     
     def command_update_source(self):
+ if hasattr(self,'user') \
+ and self.user is not None \
+ or os.path.exists( os.path.join( self.boost_root, '.svn' ) ):
+ open( self.timestamp_path, 'w' ).close()
+ self.log( 'Updating sources from SVN (%s)...' % self.timestamp() )
+ self.retry( self.svn_update )
+ else:
+ self.command_get_source( )
+ pass
+
+ def command_patch(self):
+ self.import_utils()
+ patch_boost_path = os.path.join( self.regression_root, self.patch_boost )
+ if os.path.exists( self.patch_boost ):
+ self.log( 'Found patch file "%s". Executing it.' % self.patch_boost )
+ os.chdir( self.regression_root )
+ utils.system( [ self.patch_boost ] )
         pass
     
     def command_setup(self):
@@ -152,6 +255,8 @@
         print '\n\tRevision: %s' % re_keyword_value.match( revision ).group( 1 )
         print '\tLast modified on: %s\n' % re_keyword_value.match( modified ).group( 1 )
     
+ #~ Utilities...
+
     def main(self):
         for action in self.actions:
             action_m = "command_"+action.replace('-','_')
@@ -179,812 +284,223 @@
                 shutil.rmtree( unicode( path ) )
             else:
                 os.system( 'rm -f -r "%s"' % path )
-
-'''
-import urllib
-import tarfile
-import socket
-import time
-import getopt
-import glob
-import shutil
-import stat
-import os.path
-import os
-import platform
-import traceback
-import string
-import sys
-
-
-svn_anonymous_command_line = 'svn %(command)s'
-svn_command_line = 'svn --non-interactive --username=%(user)s %(command)s'
-
-
-bjam = {}
-process_jam_log = {}
-
-
-if sys.platform == 'win32':
- bjam[ 'name' ] = 'bjam.exe'
- bjam[ 'build_cmd' ] = lambda toolset, v2: bjam_build_script_cmd( 'build.bat %s' % toolset )
- bjam[ 'is_supported_toolset' ] = lambda x: x in [ \
- 'borland', 'como', 'gcc', 'gcc-nocygwin', 'intel-win32', 'metrowerks', 'mingw', \
- 'msvc', 'vc7', 'vc8' \
- ]
- process_jam_log[ 'name' ] = 'process_jam_log.exe'
-
- def default_toolset(v2):
- if v2:
- return 'msvc'
- else:
- return 'vc-7_1'
-
- process_jam_log[ 'default_toolset' ] = default_toolset
- patch_boost_name = 'patch_boost.bat'
-else:
- bjam[ 'name' ] = 'bjam'
- bjam[ 'build_cmd' ] = lambda toolset, v2: bjam_build_script_cmd( './build.sh %s' % toolset )
- bjam[ 'is_supported_toolset' ] = lambda x: x in [ \
- 'acc', 'como', 'darwin', 'gcc', 'intel-linux', 'kcc', 'kylix', 'mipspro', \
- 'pathscale', 'pgi', 'qcc', 'sun', 'sunpro', 'tru64cxx', 'vacpp' \
- ]
- process_jam_log[ 'name' ] = 'process_jam_log'
- process_jam_log[ 'default_toolset' ] = lambda x: 'gcc'
- patch_boost_name = 'patch_boost'
-
-bjam[ 'default_toolset' ] = lambda x: ''
-bjam[ 'path' ] = os.path.join( regression_root, bjam[ 'name' ] )
-bjam[ 'source_dir' ] = os.path.join( boost_root, 'tools', 'jam', 'src' )
-bjam[ 'build_path_root' ] = lambda unused: bjam[ 'source_dir' ]
-
-process_jam_log[ 'path' ] = os.path.join( regression_root, process_jam_log[ 'name' ] )
-process_jam_log[ 'source_dir' ] = os.path.join( boost_root, 'tools', 'regression', 'build' )
-
-
-def process_jam_build_root(v2):
- if v2:
- return os.path.join(boost_root, 'dist', 'bin')
- else:
- return os.path.join(
- boost_root, 'bin', 'boost', 'tools', 'regression', 'build'
- , process_jam_log[ 'name' ])
-
-
-process_jam_log[ 'build_path_root' ] = process_jam_build_root
-
-process_jam_log[ 'build_cmd' ] = lambda toolset, v2: bjam_command( toolset, v2 )
-process_jam_log[ 'is_supported_toolset' ] = lambda x : True
-
-build_monitor_url = 'http://engineering.meta-comm.com/resources/build_monitor.zip'
-pskill_url = 'http://www.sysinternals.com/files/pskill.zip'
-
-utils = None
-
-
-
-def retry( f, args, max_attempts=5, sleep_secs=10 ):
- for attempts in range( max_attempts, -1, -1 ):
- try:
- return f( *args )
- except Exception, msg:
- log( '%s failed with message "%s"' % ( f.__name__, msg ) )
- if attempts == 0:
- log( 'Giving up.' )
- raise
-
- log( 'Retrying (%d more attempts).' % attempts )
- time.sleep( sleep_secs )
-
-
-
-
-def http_get( source_url, destination, proxy ):
- if proxy is None: proxies = None
- else: proxies = { 'http' : proxy }
-
- src = urllib.urlopen( source_url, proxies = proxies )
-
- f = open( destination, 'wb' )
- while True:
- data = src.read( 16*1024 )
- if len( data ) == 0: break
- f.write( data )
-
- f.close()
- src.close()
-
-
-def tarball_name_for_tag( tag, timestamp = False ):
- tag = tag.split( '/' )[-1]
- if not timestamp: return 'boost-%s.tar.bz2' % tag
- else: return 'boost-%s.timestamp' % tag
-
-
-def download_boost_tarball( destination, tag, proxy, timestamp_only = False ):
- tarball_name = tarball_name_for_tag( tag, timestamp_only )
- tarball_path = os.path.join( destination, tarball_name )
- tarball_url = 'http://beta.boost.org/development/snapshot.php/%s' % tag
-
- log( 'Downloading "%s" to "%s"...' % ( tarball_url, os.path.dirname( tarball_path ) ) )
- if os.path.exists( tarball_path ):
- os.unlink( tarball_path )
-
- http_get(
- tarball_url
- , tarball_path
- , proxy
- )
-
- return tarball_path
-
-
-def find_boost_dirs( path ):
- return [ x for x in glob.glob( os.path.join( path, 'boost[-_]*' ) )
- if os.path.isdir( x ) ]
-
-
-def unpack_tarball( tarball_path, destination ):
- log( 'Looking for old unpacked archives...' )
- old_boost_dirs = find_boost_dirs( destination )
-
- for old_boost_dir in old_boost_dirs:
- if old_boost_dir != tarball_path:
- log( 'Deleting old directory %s.' % old_boost_dir )
- rmtree( old_boost_dir )
-
- log( 'Unpacking boost tarball ("%s")...' % tarball_path )
-
- tarball_name = os.path.basename( tarball_path )
- extension = tarball_name[ tarball_name.find( '.' ) : ]
-
- if extension in ( ".tar.gz", ".tar.bz2" ):
- mode = os.path.splitext( extension )[1][1:]
- tar = tarfile.open( tarball_path, 'r:%s' % mode )
- for tarinfo in tar:
- tar.extract( tarinfo, destination )
- if sys.platform == 'win32' and not tarinfo.isdir():
- # workaround what appears to be a Win32-specific bug in 'tarfile'
- # (modification times for extracted files are not set properly)
- f = os.path.join( destination, tarinfo.name )
- os.chmod( f, stat.S_IWRITE )
- os.utime( f, ( tarinfo.mtime, tarinfo.mtime ) )
- tar.close()
- elif extension in ( ".zip" ):
- import zipfile
-
- z = zipfile.ZipFile( tarball_path, 'r', zipfile.ZIP_DEFLATED )
- for f in z.infolist():
- destination_file_path = os.path.join( destination, f.filename )
- if destination_file_path[-1] == "/": # directory
- if not os.path.exists( destination_file_path ):
- os.makedirs( destination_file_path )
- else: # file
- result = open( destination_file_path, 'wb' )
- result.write( z.read( f.filename ) )
- result.close()
- z.close()
- else:
- raise 'Do not know how to unpack archives with extension \"%s\"' % extension
 
- boost_dir = find_boost_dirs( destination )[0]
- log( ' Unpacked into directory "%s"' % boost_dir )
+ def refresh_timestamp( self ):
+ if os.path.exists( self.timestamp_path ):
+ os.unlink( self.timestamp_path )
+ open( self.timestamp_path, 'w' ).close()
+
+ def timestamp( self ):
+ return time.strftime(
+ '%Y-%m-%dT%H:%M:%SZ',
+ time.gmtime( os.stat( self.timestamp_path ).st_mtime ) )
+
+ def retry( self, f, max_attempts=5, sleep_secs=10 ):
+ for attempts in range( max_attempts, -1, -1 ):
+ try:
+ return f()
+ except Exception, msg:
+ self.log( '%s failed with message "%s"' % ( f.__name__, msg ) )
+ if attempts == 0:
+ self.log( 'Giving up.' )
+ raise
 
- if os.path.exists( boost_root ):
- log( 'Deleting "%s" directory...' % boost_root )
- rmtree( boost_root )
+ self.log( 'Retrying (%d more attempts).' % attempts )
+ time.sleep( sleep_secs )
 
- log( 'Renaming "%s" into "%s"' % ( boost_dir, boost_root ) )
- os.rename( boost_dir, boost_root )
-
-
-def svn_command( user, command ):
- if user is None or user == 'anonymous':
- cmd = svn_anonymous_command_line % { 'command': command }
- else:
- cmd = svn_command_line % { 'user': user, 'command': command }
-
- log( 'Executing SVN command "%s"' % cmd )
- rc = os.system( cmd )
- if rc != 0:
- raise Exception( 'SVN command "%s" failed with code %d' % ( cmd, rc ) )
-
-
-def svn_repository_url( user, tag ):
- if user != 'anonymous': return 'https://svn.boost.org/svn/boost/%s' % tag
- else: return 'http://svn.boost.org/svn/boost/%s' % tag
-
-
-def svn_checkout( user, tag, args ):
- command = 'co %s boost' % svn_repository_url( user, tag )
- os.chdir( regression_root )
- svn_command( user, command )
-
-
-def svn_update( user, tag, args ):
- os.chdir( boost_root )
- svn_command( user, 'update' )
-
-
-def format_time( t ):
- return time.strftime(
- '%a, %d %b %Y %H:%M:%S +0000'
- , t
- )
-
-
-def refresh_timestamp():
- if os.path.exists( timestamp_path ):
- os. unlink( timestamp_path )
-
- open( timestamp_path, 'w' ).close()
-
-
-def timestamp():
- return time.strftime(
- '%Y-%m-%dT%H:%M:%SZ'
- , time.gmtime( os.stat( timestamp_path ).st_mtime )
- )
-
-
-def get_tarball( tag, proxy, args, **unused ):
- if args == []: args = [ 'download', 'unpack' ]
-
- tarball_path = None
-
- if 'download' in args:
- tarball_path = download_boost_tarball( regression_root, tag, proxy )
-
- if 'unpack' in args:
- if not tarball_path:
- tarball_path = os.path.join( regression_root, tarball_name_for_tag( tag ) )
- unpack_tarball( tarball_path, regression_root )
-
-
-def get_source( user, tag, proxy, args, **unused ):
- refresh_timestamp()
- log( 'Getting sources (%s)...' % timestamp() )
-
- if user is not None:
- retry(
- svn_checkout
- , ( user, tag, args )
- )
- else:
- retry(
- get_tarball
- , ( tag, proxy, args )
- )
-
-
-def update_source( user, tag, proxy, args, **unused ):
- if user is not None or os.path.exists( os.path.join( boost_root, '.svn' ) ):
- open( timestamp_path, 'w' ).close()
- log( 'Updating sources from SVN (%s)...' % timestamp() )
- retry(
- svn_update
- , ( user, tag, args )
- )
- else:
- get_source( user, tag, proxy, args )
-
-
-def tool_path( name_or_spec, v2=None ):
- if isinstance( name_or_spec, basestring ):
- return os.path.join( regression_root, name_or_spec )
-
- if os.path.exists( name_or_spec[ 'path' ] ):
- return name_or_spec[ 'path' ]
-
- if name_or_spec.has_key( 'build_path' ):
- return name_or_spec[ 'build_path' ]
-
- build_path_root = name_or_spec[ 'build_path_root' ]( v2 )
- log( 'Searching for "%s" in "%s"...' % ( name_or_spec[ 'name' ], build_path_root ) )
- for root, dirs, files in os.walk( build_path_root ):
- if name_or_spec[ 'name' ] in files:
- return os.path.join( root, name_or_spec[ 'name' ] )
-
- raise Exception( 'Cannot find "%s" in any of the following locations:\n%s' % (
- name_or_spec[ 'name' ]
- , '\n'.join( [ name_or_spec[ 'path' ], build_path_root ] )
- ) )
-
-
-def build_if_needed( tool, toolset, toolsets, v2 ):
- if os.path.exists( tool[ 'path' ] ):
- log( 'Found preinstalled "%s"; will use it.' % tool[ 'path' ] )
- return
+ def http_get( self, source_url, destination_file ):
+ import urllib
+
+ proxies = None
+ if hasattr(self,'proxy') and self.proxy is not None:
+ proxies = { 'http' : self.proxy }
+
+ src = urllib.urlopen( source_url, proxies = proxies )
+
+ f = open( destination_file, 'wb' )
+ while True:
+ data = src.read( 16*1024 )
+ if len( data ) == 0: break
+ f.write( data )
 
- log( 'Preinstalled "%s" is not found; building one...' % tool[ 'path' ] )
+ f.close()
+ src.close()
 
- if toolset is None:
- if toolsets is not None:
- toolset = string.split( toolsets, ',' )[0]
- if not tool[ 'is_supported_toolset' ]( toolset ):
- log( 'Warning: Specified toolset (%s) cannot be used to bootstrap "%s".'\
- % ( toolset, tool[ 'name' ] ) )
+ def import_utils(self):
+ global utils
+ if utils is None:
+ sys.path.append( self.xsl_reports_dir )
+ import utils as utils_module
+ utils = utils_module
+
+ def build_if_needed( tool, toolset, toolsets ):
+ if os.path.exists( tool[ 'path' ] ):
+ log( 'Found preinstalled "%s"; will use it.' % tool[ 'path' ] )
+ return
+
+ log( 'Preinstalled "%s" is not found; building one...' % tool[ 'path' ] )
+
+ if toolset is None:
+ if toolsets is not None:
+ toolset = string.split( toolsets, ',' )[0]
+ if not tool[ 'is_supported_toolset' ]( toolset ):
+ log( 'Warning: Specified toolset (%s) cannot be used to bootstrap "%s".'\
+ % ( toolset, tool[ 'name' ] ) )
 
+ toolset = tool[ 'default_toolset' ](v2)
+ log( ' Using default toolset for the platform (%s).' % toolset )
+ else:
                 toolset = tool[ 'default_toolset' ](v2)
+ log( 'Warning: No bootstrap toolset for "%s" was specified.' % tool[ 'name' ] )
                 log( ' Using default toolset for the platform (%s).' % toolset )
- else:
- toolset = tool[ 'default_toolset' ](v2)
- log( 'Warning: No bootstrap toolset for "%s" was specified.' % tool[ 'name' ] )
- log( ' Using default toolset for the platform (%s).' % toolset )
-
- if os.path.exists( tool[ 'source_dir' ] ):
- log( 'Found "%s" source directory "%s"' % ( tool[ 'name' ], tool[ 'source_dir' ] ) )
- build_cmd = tool[ 'build_cmd' ]( toolset, v2 )
- log( 'Building "%s" (%s)...' % ( tool[ 'name'], build_cmd ) )
- utils.system( [
- 'cd "%s"' % tool[ 'source_dir' ]
- , build_cmd
- ] )
- else:
- raise 'Could not find "%s" source directory "%s"' % ( tool[ 'name' ], tool[ 'source_dir' ] )
-
- if not tool.has_key( 'build_path' ):
- tool[ 'build_path' ] = tool_path( tool, v2 )
-
- if not os.path.exists( tool[ 'build_path' ] ):
- raise 'Failed to find "%s" after build.' % tool[ 'build_path' ]
-
- log( '%s succesfully built in "%s" location' % ( tool[ 'name' ], tool[ 'build_path' ] ) )
-
-
-def import_utils():
- global utils
- if utils is None:
- sys.path.append( xsl_reports_dir )
- import utils as utils_module
- utils = utils_module
-
-
-def download_if_needed( tool_name, tool_url, proxy ):
- path = tool_path( tool_name )
- if not os.path.exists( path ):
- log( 'Preinstalled "%s" is not found.' % path )
- log( ' Downloading from %s...' % tool_url )
-
- zip_path = '%s.zip' % os.path.splitext( path )[0]
- http_get( tool_url, zip_path, proxy )
-
- log( ' Unzipping %s...' % path )
- utils.unzip( zip_path, os.path.dirname( path ) )
-
- log( ' Removing %s...' % zip_path )
- os.unlink( zip_path )
- log( 'Done.' )
-
-
-def setup(
- comment
- , toolsets
- , book
- , bjam_toolset
- , pjl_toolset
- , monitored
- , proxy
- , v2
- , args
- , **unused
- ):
- import_utils()
-
- patch_boost_path = os.path.join( regression_root, patch_boost_name )
- if os.path.exists( patch_boost_path ):
- log( 'Found patch file "%s". Executing it.' % patch_boost_path )
- os.chdir( regression_root )
- utils.system( [ patch_boost_path ] )
-
- build_if_needed( bjam, bjam_toolset, toolsets, v2 )
- build_if_needed( process_jam_log, pjl_toolset, toolsets, v2 )
 
- if monitored:
- if sys.platform == 'win32':
- download_if_needed( 'build_monitor.exe', build_monitor_url, proxy )
- download_if_needed( 'pskill.exe', pskill_url, proxy )
+ if os.path.exists( tool[ 'source_dir' ] ):
+ log( 'Found "%s" source directory "%s"' % ( tool[ 'name' ], tool[ 'source_dir' ] ) )
+ build_cmd = tool[ 'build_cmd' ]( toolset, v2 )
+ log( 'Building "%s" (%s)...' % ( tool[ 'name'], build_cmd ) )
+ utils.system( [
+ 'cd "%s"' % tool[ 'source_dir' ]
+ , build_cmd
+ ] )
         else:
- log( 'Warning: Test monitoring is not supported on this platform (yet).' )
- log( ' Please consider contributing this piece!' )
-
+ raise 'Could not find "%s" source directory "%s"' % ( tool[ 'name' ], tool[ 'source_dir' ] )
 
-def bjam_build_script_cmd( cmd ):
- env_setup_key = 'BJAM_ENVIRONMENT_SETUP'
- if os.environ.has_key( env_setup_key ):
- return '%s & %s' % ( os.environ[env_setup_key], cmd )
+ if not tool.has_key( 'build_path' ):
+ tool[ 'build_path' ] = tool_path( tool, v2 )
 
- return cmd
+ if not os.path.exists( tool[ 'build_path' ] ):
+ raise 'Failed to find "%s" after build.' % tool[ 'build_path' ]
 
+ log( '%s succesfully built in "%s" location' % ( tool[ 'name' ], tool[ 'build_path' ] ) )
+
+ #~ Dowloading source, from SVN...
 
-def bjam_command( toolsets, v2 ):
- build_path = regression_root
- if build_path[-1] == '\\': build_path += '\\'
-
- v2_option = ""
- if v2:
- v2_option = "--v2"
+ def svn_checkout( self ):
+ os.chdir( self.regression_root )
+ self.svn_command( 'co %s %s' % (self.svn_repository_url(self.tag),'boost') )
 
- result = '"%s" %s "-sBOOST_BUILD_PATH=%s" "-sBOOST_ROOT=%s"'\
- % (
- tool_path( bjam, v2 )
- , v2_option
- , build_path
- , boost_root
- )
+ def svn_update( self ):
+ os.chdir( self.boost_root )
+ self.svn_command( 'update' )
 
- if toolsets:
- if v2:
- result += ' ' + string.join(string.split( toolsets, ',' ), ' ' )
+ def svn_command( self, command ):
+ svn_anonymous_command_line = 'svn %(command)s'
+ svn_command_line = 'svn --non-interactive --username=%(user)s %(command)s'
+
+ if not hasattr(self,'user') or self.user is None or self.user == 'anonymous':
+ cmd = svn_anonymous_command_line % { 'command': command }
         else:
- result += ' "-sTOOLS=%s"' % string.join( string.split( toolsets, ',' ), ' ' )
+ cmd = svn_command_line % { 'user': self.user, 'command': command }
 
- return result
+ self.log( 'Executing SVN command "%s"' % cmd )
+ rc = os.system( cmd )
+ if rc != 0:
+ raise Exception( 'SVN command "%s" failed with code %d' % ( cmd, rc ) )
+
+ def svn_repository_url( self, path ):
+ if hasattr(self,'user') and self.user is not None and self.user != 'anonymous':
+ return '%s%s' % (repo_root['user'],path)
+ else:
+ return '%s%s' % (repo_root['anon'],path)
+
+ #~ Downloading and extracting source archives, from tarballs or zipballs...
+
+ def get_tarball( self, *args ):
+ if not args or args == []:
+ args = [ 'download', 'unpack' ]
 
+ tarball_path = None
 
-def install( toolsets, v2, **unused ):
- import_utils()
- os.chdir( os.path.join( boost_root ) )
+ if hasattr(self,'local') and self.local is not None:
+ tarball_path = self.local
+ elif 'download' in args:
+ tarball_path = self.download_boost_tarball()
+ if not tarball_path:
+ tarball_path = os.path.join( self.regression_root, self.boost_tarball_url() )
 
- log( 'Making "%s" directory...' % regression_results )
- utils.makedirs( regression_results )
+ if 'unpack' in args:
+ self.unpack_tarball( tarball_path )
+ pass
 
- install_cmd = '%s -d2 install >>%s 2>&1' % ( bjam_command( toolsets, v2 ), install_log )
- log( 'Installing libraries (%s)...' % install_cmd )
- utils.system( [ install_cmd ] )
+ def download_boost_tarball( self ):
+ tarball_name = self.boost_tarball_name()
+ tarball_path = os.path.join( self.regression_root, tarball_name )
+ tarball_url = self.boost_tarball_url()
 
+ self.log( 'Downloading "%s" to "%s"...' % ( tarball_url, os.path.dirname( tarball_path ) ) )
 
-def start_build_monitor( timeout ):
- if sys.platform == 'win32':
- build_monitor_path = tool_path( 'build_monitor.exe' )
- if os.path.exists( build_monitor_path ):
- utils.system( [ 'start /belownormal "" "%s" bjam.exe %d' % ( build_monitor_path, timeout*60 ) ] )
- else:
- log( 'Warning: Build monitor is not found at "%s"' % build_monitor_path )
+ if os.path.exists( tarball_path ):
+ os.unlink( tarball_path )
+ self.http_get( tarball_url, tarball_path )
 
+ return tarball_path
 
-def stop_build_monitor():
- if sys.platform == 'win32':
- build_monitor_path = tool_path( 'build_monitor.exe' )
- if os.path.exists( build_monitor_path ):
- utils.system( [ '"%s" build_monitor' % tool_path( 'pskill.exe' ) ] )
-
-
-def run_process_jam_log(v2):
- log( 'Getting test case results out of "%s"...' % regression_log )
-
- if v2:
- v2 = "--v2"
- else:
- v2 = ""
-
- utils.checked_system( [
- '"%s" %s "%s" <"%s"' % (
- tool_path( process_jam_log, v2 )
- , v2
- , regression_results
- , regression_log
- )
- ] )
+ def boost_tarball_name( self ):
+ return 'boost-%s.tar.bz2' % self.tag.split( '/' )[-1]
 
+ def boost_tarball_url( self ):
+ return 'http://beta.boost.org/development/snapshot.php/%s' % self.tag
 
-def test(
- toolsets
- , bjam_options
- , monitored
- , timeout
- , v2
- , args
- , **unused
- ):
- if args == []:
- args = [ "test", "process" ]
-
- import_utils()
-
- try:
- if monitored:
- start_build_monitor( timeout )
-
- cd = os.getcwd()
- os.chdir( os.path.join( boost_root, 'status' ) )
-
- log( 'Making "%s" directory...' % regression_results )
- utils.makedirs( regression_results )
-
- results_libs = os.path.join( regression_results, 'libs' )
- results_status = os.path.join( regression_results, 'status' )
-
- if "clean" in args:
- rmtree( results_libs )
- rmtree( results_status )
-
- build_dir_option = "-sALL_LOCATE_TARGET"
- if v2:
- build_dir_option = "--build-dir"
-
- if "test" in args:
- test_cmd = '%s -d2 --dump-tests %s "%s=%s" >>"%s" 2>&1' % (
- bjam_command( toolsets, v2 )
- , bjam_options
- , build_dir_option
- , regression_results
- , regression_log
- )
+ def unpack_tarball( self, tarball_path ):
+ self.log( 'Looking for old unpacked archives...' )
+ old_boost_dirs = self.find_boost_dirs( )
 
- log( 'Starting tests (%s)...' % test_cmd )
- utils.system( [ test_cmd ] )
+ for old_boost_dir in old_boost_dirs:
+ if old_boost_dir != tarball_path:
+ self.log( 'Deleting old directory %s.' % old_boost_dir )
+ self.rmtree( old_boost_dir )
 
- if "process" in args:
- run_process_jam_log(v2)
+ self.log( 'Unpacking boost tarball ("%s")...' % tarball_path )
 
- os.chdir( cd )
- finally:
- if monitored:
- stop_build_monitor()
-
-
-def build_book( **kargs ):
- # To do
- # 1. PDF generation
- # 2. Do we need to cleanup before the build?
- # 3. Incremental builds
- if not os.path.exists( regression_results ):
- os.makedirs( regression_results )
- import_utils()
- cwd = os.getcwd()
- try:
- os.chdir( os.path.join( boost_root, 'doc' ) )
- if os.path.exists( boostbook_log ):
- os.unlink( boostbook_log )
- utils.system( [ '%s --v2 html >>%s 2>&1' % ( tool_path( bjam, v2=True ), boostbook_log ) ] )
- # utils.system( [ '%s --v2 pdf >>%s 2>&1' % ( tool_path( bjam, v2=True ), boostbook_log ) ] )
- finally:
- os.chdir( cwd )
-
-def collect_logs(
- tag
- , runner
- , platform
- , user
- , comment
- , incremental
- , dart_server
- , ftp_proxy
- , args
- , **unused
- ):
- import_utils()
-
- if comment is None:
- comment = 'comment.html'
-
- comment_path = os.path.join( regression_root, comment )
- if not os.path.exists( comment_path ):
- log( 'Comment file "%s" not found; creating default comment.' % comment_path )
- f = open( comment_path, 'w' )
- f.write( '<p>Tests are run on %s platform.</p>' % platform_name() )
- f.close()
+ tarball_name = os.path.basename( tarball_path )
+ extension = tarball_name[ tarball_name.find( '.' ) : ]
 
- run_type = ''
- if incremental: run_type = 'incremental'
- else: run_type = 'full'
-
- source = 'tarball'
- revision = ''
- svn_root_file = os.path.join( boost_root, '.svn' )
- svn_info_file = os.path.join( boost_root, 'svn_info.txt' )
- if os.path.exists( svn_root_file ):
- source = 'SVN'
- svn_command( 'user', 'info ' + boost_root + ' >' + svn_info_file )
-
- if os.path.exists( svn_info_file ):
- f = open( svn_info_file, 'r' )
- svn_info = f.read()
- f.close()
- i = svn_info.find( 'Revision:' )
- if i >= 0:
- i += 10
- while svn_info[i] >= '0' and svn_info[i] <= '9':
- revision += svn_info[i]
- i += 1
-
-
- from runner import collect_logs
- collect_logs(
- regression_results
- , runner
- , tag
- , platform
- , comment_path
- , timestamp_path
- , user
- , source
- , run_type
- , dart_server
- , ftp_proxy
- , revision
- )
-
-
-def collect_book( **unused ):
- log( 'Collecting files for BoostBook into "%s"...' % boostbook_archive_name )
- import zipfile
- boostbook_archive = zipfile.ZipFile( boostbook_archive_name, 'w', zipfile.ZIP_DEFLATED )
- html_root = os.path.join( boost_root, 'doc/html' )
-
- boostbook_archive.writestr( 'timestamp', timestamp())
- boostbook_archive.write( boostbook_log, os.path.basename( boostbook_log ) )
-
- def add_files( arg, dirname, names ):
- for name in names:
- path = os.path.join( dirname, name )
- if not os.path.isdir( path ):
- boostbook_archive.write( path, path[ len( html_root ) + 1: ] )
-
- os.path.walk( html_root, add_files, None )
-
-
-def upload_logs(
- tag
- , runner
- , user
- , ftp_proxy
- , debug_level
- , send_bjam_log
- , dart_server
- , **unused
- ):
- import_utils()
- from runner import upload_logs
- retry(
- upload_logs
- , ( regression_results, runner, tag, user, ftp_proxy, debug_level,
- send_bjam_log, timestamp_path, dart_server )
- )
-
-
-def upload_book( tag, runner, ftp_proxy, debug_level, **unused ):
- import_utils()
- from runner import upload_to_ftp
- upload_to_ftp( tag, boostbook_archive_name, ftp_proxy, debug_level )
-
-
-def update_itself( tag, **unused ):
- source = os.path.join( xsl_reports_dir, 'runner', os.path.basename( sys.argv[0] ) )
- self = os.path.join( regression_root, os.path.basename( sys.argv[0] ) )
-
- # Through revision 38985, the update copy was not done if
- # os.stat(self).st_mtime > os.stat(source).st_mtime. This was not
- # reliable on all systems, so the copy is now done unconditionally.
- log( ' Saving a backup copy of the current script...' )
- os.chmod( self, stat.S_IWRITE ) # Win32 workaround
- shutil.move( self, '%s~' % self )
- log( 'Updating %s from %s...' % ( self, source ) )
- shutil.copy2( source, self )
-
-
-def send_mail( smtp_login, mail, subject, msg = '', debug_level = 0 ):
- import smtplib
- if not smtp_login:
- server_name = 'mail.%s' % mail.split( '@' )[-1]
- user_name = None
- password = None
- else:
- server_name = smtp_login.split( '@' )[-1]
- ( user_name, password ) = string.split( smtp_login.split( '@' )[0], ':' )
-
- log( ' Sending mail through "%s"...' % server_name )
- smtp_server = smtplib.SMTP( server_name )
- smtp_server.set_debuglevel( debug_level )
- if user_name:
- smtp_server.login( user_name, password )
-
- smtp_server.sendmail(
- mail
- , [ mail ]
- , 'Subject: %s\nTo: %s\n\n%s' % ( subject, mail, msg )
- )
-
-
-def regression(
- tag
- , local
- , runner
- , platform
- , user
- , comment
- , toolsets
- , book
- , bjam_options
- , bjam_toolset
- , pjl_toolset
- , incremental
- , send_bjam_log
- , force_update
- , have_source
- , skip_tests
- , monitored
- , timeout
- , mail = None
- , smtp_login = None
- , proxy = None
- , ftp_proxy = None
- , debug_level = 0
- , v2 = 1
- , dart_server = None
- , args = []
- ):
-
- try:
- mail_subject = 'Boost regression for %s on %s' % ( tag, string.split(socket.gethostname(), '.')[0] )
- start_time = time.localtime()
- if mail:
- log( 'Sending start notification to "%s"' % mail )
- send_mail(
- smtp_login
- , mail
- , '%s started at %s.' % ( mail_subject, format_time( start_time ) )
- , debug_level = debug_level
- )
+ if extension in ( ".tar.gz", ".tar.bz2" ):
+ import tarfile
+
+ mode = os.path.splitext( extension )[1][1:]
+ tar = tarfile.open( tarball_path, 'r:%s' % mode )
+ for tarinfo in tar:
+ tar.extract( tarinfo, self.regression_root )
+ if sys.platform == 'win32' and not tarinfo.isdir():
+ # workaround what appears to be a Win32-specific bug in 'tarfile'
+ # (modification times for extracted files are not set properly)
+ f = os.path.join( self.regression_root, tarinfo.name )
+ os.chmod( f, stat.S_IWRITE )
+ os.utime( f, ( tarinfo.mtime, tarinfo.mtime ) )
+ tar.close()
+ elif extension in ( ".zip" ):
+ import zipfile
+
+ z = zipfile.ZipFile( tarball_path, 'r', zipfile.ZIP_DEFLATED )
+ for f in z.infolist():
+ destination_file_path = os.path.join( self.regression_root, f.filename )
+ if destination_file_path[-1] == "/": # directory
+ if not os.path.exists( destination_file_path ):
+ os.makedirs( destination_file_path )
+ else: # file
+ result = open( destination_file_path, 'wb' )
+ result.write( z.read( f.filename ) )
+ result.close()
+ z.close()
+ else:
+ raise 'Do not know how to unpack archives with extension \"%s\"' % extension
 
- if local is not None:
- log( 'Using local file "%s"' % local )
+ boost_dir = self.find_boost_dirs()[0]
+ self.log( ' Unpacked into directory "%s"' % boost_dir )
 
- b = os.path.basename( local )
- tag = b[ 0: b.find( '.' ) ]
- log( 'Tag: "%s"' % tag )
-
- unpack_tarball( local, regression_root )
- elif have_source:
- if not incremental: cleanup( [ 'bin' ] )
- else:
- if incremental or force_update:
- if not incremental: cleanup( [ 'bin' ] )
- update_source( user, tag, proxy, [] )
- else:
- cleanup( [] )
- get_source( user, tag, proxy, [] )
+ if os.path.exists( boost_root ):
+ self.log( 'Deleting "%s" directory...' % boost_root )
+ self.rmtree( boost_root )
+
+ self.log( 'Renaming "%s" into "%s"' % ( boost_dir, boost_root ) )
+ os.rename( boost_dir, boost_root )
+
+ def find_boost_dirs( self ):
+ return [
+ x for x in
+ glob.glob( os.path.join( self.regression_root, 'boost[-_]*' ) )
+ if os.path.isdir( x )
+ ]
 
- setup( comment, toolsets, book, bjam_toolset, pjl_toolset, monitored, proxy,
- v2, [] )
 
- # Not specifying --toolset in command line is not enough
- # that would mean to use Boost.Build default ones
- # We can skip test only we were explictly
- # told to have no toolsets in command line "--toolset="
- if toolsets != '': # --toolset=,
- if not skip_tests: test( toolsets, bjam_options, monitored, timeout, v2, [] )
- collect_logs( tag, runner, platform, user, comment, incremental, dart_server, proxy, [] )
- upload_logs( tag, runner, user, ftp_proxy, debug_level, send_bjam_log, dart_server )
-
- if book:
- build_book()
- collect_book()
- upload_book( tag, runner, ftp_proxy, debug_level )
-
- update_itself( tag )
-
- if mail:
- log( 'Sending report to "%s"' % mail )
- end_time = time.localtime()
- send_mail(
- smtp_login
- , mail
- , '%s completed successfully at %s.' % ( mail_subject, format_time( end_time ) )
- , debug_level = debug_level
- )
- except:
- if mail:
- log( 'Sending report to "%s"' % mail )
- traceback_ = '\n'.join( apply( traceback.format_exception, sys.exc_info() ) )
- end_time = time.localtime()
- send_mail(
- smtp_login
- , mail
- , '%s failed at %s.' % ( mail_subject, format_time( end_time ) )
- , traceback_
- , debug_level
- )
- raise
-'''

Copied: branches/release-tools/regression/src/run.py (from r39963, /branches/release-tools/regression/run.py)
==============================================================================
--- /branches/release-tools/regression/run.py (original)
+++ branches/release-tools/regression/src/run.py 2007-10-13 13:20:58 EDT (Sat, 13 Oct 2007)
@@ -6,16 +6,39 @@
 # (See accompanying file LICENSE_1_0.txt or copy at
 # http://www.boost.org/LICENSE_1_0.txt)
 
+import os
 import os.path
+import shutil
 import sys
+import urllib
 
 #~ The directory this file is in.
-root = os.path.abspath(os.path.dirname(os.path.dirname(os.path.realpath(__file__))))
-print '--- %s' % root
+root = os.path.abspath(os.path.dirname(os.path.realpath(__file__)))
+print '# Running regressions in %s...' % root
 
-#~ Bootstrap
-sys.path.insert(0,root)
+script_sources = [ 'collect_and_upload_logs.py', 'regression.py' ]
+script_local = os.path.join(root,'tools','regression','src')
+script_remote = 'http://svn.boost.org/svn/boost/branches/release-tools/regression/src'
+script_dir = os.path.join(root,'tools_regression_src')
 
-#~ Launch runner
-from src.regression import runner
+#~ Bootstrap.
+#~ * Clear out any old versions of the scripts
+print '# Creating regression scripts at %s...' % script_dir
+if os.path.exists(script_dir):
+ shutil.rmtree(script_dir)
+os.mkdir(script_dir)
+#~ * Get new scripts, either from local working copy, or from svn
+if os.path.exists(script_local):
+ print '# Copying regression scripts from %s...' % script_local
+ for src in script_sources:
+ shutil.copyfile( os.path.join(script_local,src), os.path.join(script_dir,src) )
+else:
+ print '# Dowloading regression scripts from %s...' % script_remote
+ for src in script_sources:
+ urllib.urlretrieve( '%s/%s' % (script_remote,src), os.path.join(script_dir,src) )
+#~ * Make the scripts available to Python
+sys.path.insert(0,os.path.join(root,'tools_regression_src'))
+
+#~ Launch runner.
+from regression import runner
 runner(root)


Boost-Commit list run by bdawes at acm.org, david.abrahams at rcn.com, gregod at cs.rpi.edu, cpdaniel at pacbell.net, john at johnmaddock.co.uk