pygccxml-commit Mailing List for C++ Python language bindings
                
                Brought to you by:
                
                    mbaas,
                    
                
                    roman_yakovenko
                    
                
            
            
        
        
        
    You can subscribe to this list here.
| 2006 | Jan | Feb | Mar (190) | Apr (166) | May (170) | Jun (75) | Jul (105) | Aug (131) | Sep (99) | Oct (84) | Nov (67) | Dec (54) | 
|---|---|---|---|---|---|---|---|---|---|---|---|---|
| 2007 | Jan (66) | Feb (49) | Mar (25) | Apr (62) | May (21) | Jun (34) | Jul (9) | Aug (21) | Sep (5) | Oct | Nov (63) | Dec (34) | 
| 2008 | Jan (10) | Feb (42) | Mar (26) | Apr (25) | May (6) | Jun (40) | Jul (18) | Aug (29) | Sep (6) | Oct (32) | Nov (14) | Dec (56) | 
| 2009 | Jan (127) | Feb (52) | Mar (2) | Apr (10) | May (29) | Jun (3) | Jul | Aug (16) | Sep (4) | Oct (11) | Nov (8) | Dec (14) | 
| 2010 | Jan (31) | Feb (1) | Mar (7) | Apr (9) | May (1) | Jun | Jul (2) | Aug | Sep | Oct | Nov | Dec | 
| 2011 | Jan | Feb (8) | Mar (4) | Apr | May | Jun | Jul | Aug | Sep | Oct | Nov | Dec | 
| 2014 | Jan | Feb | Mar (1) | Apr | May | Jun | Jul | Aug | Sep | Oct | Nov | Dec | 
| 
      
      
      From: Robin  <182...@16...> - 2014-03-02 05:05:30
      
     | 
| sorry for bother you,could you take a look at my problem. thanks Enviroment: os:win7 64 tools:gccxml 0.9.0 (called by pyplusplus 1.0) boost 1.4.8 Problem: i use pyogre and also want to expose some of my c++ moudle to python use py++ which used boost library. i try to wraper shared_ptr.hpp (boost) for python(use gccxml to parse the file),i got some problem and cannot through it .tanks for you help ? 1.【the error message is 】 utility.hpp:21:21 errror: D:boost_1_48_0/boost/utility:permission denied. there have a similar problem which i found in internet viewtopic.php?f=4&t=77207 Is Compiler mismatch between header and directory if they have same name. 2.【the error message is 】 gcc-xml:xml_find_template_parm encountered unsupported type identifier_node i cant find the switch case of identifier_node in xml:xml_find_template_parm func. there alse have a similar problem in internet https://savannah.cern.ch/bugs/?printer= ... _id=102568 thanks for any help (advice resourse web-link) This message was sent to you via the SourceForge web mail form. Replying to this email will not work, please send a message to robinliouhu at http://sourceforge.net/u/robinliouhu/profile/send_message | 
| 
      
      
      From: <rom...@us...> - 2011-03-17 20:25:38
      
     | 
| Revision: 1856
          http://pygccxml.svn.sourceforge.net/pygccxml/?rev=1856&view=rev
Author:   roman_yakovenko
Date:     2011-03-17 20:25:25 +0000 (Thu, 17 Mar 2011)
Log Message:
-----------
improving bit fields related generated code - thanks to Scott Sturdivant for the bug report
Modified Paths:
--------------
    pyplusplus_dev/docs/history/history.rest
    pyplusplus_dev/pyplusplus/code_creators/member_variable.py
    pyplusplus_dev/unittests/data/member_variables_to_be_exported.hpp
    pyplusplus_dev/unittests/member_variables_tester.py
    sphinx/conf.py
Modified: pyplusplus_dev/docs/history/history.rest
===================================================================
--- pyplusplus_dev/docs/history/history.rest	2011-03-07 21:02:46 UTC (rev 1855)
+++ pyplusplus_dev/docs/history/history.rest	2011-03-17 20:25:25 UTC (rev 1856)
@@ -80,6 +80,9 @@
 13. Thanks to Aron Xu, for pointing out that it is better to use "os.name",
     instead of "sys.platform" for platform specific logic
 
+14. Thanks to Scott Sturdivant, for reporting the bug, related to bit fields code generation.
+    The bug was fixed.
+
 -----------
 Version 1.0
 -----------
Modified: pyplusplus_dev/pyplusplus/code_creators/member_variable.py
===================================================================
--- pyplusplus_dev/pyplusplus/code_creators/member_variable.py	2011-03-07 21:02:46 UTC (rev 1855)
+++ pyplusplus_dev/pyplusplus/code_creators/member_variable.py	2011-03-17 20:25:25 UTC (rev 1856)
@@ -281,27 +281,28 @@
         member_variable_base_t.__init__( self, variable=variable, wrapper=wrapper )
 
     def _create_impl( self ):
-        doc = ''
-        if self.declaration.type_qualifiers.has_static:
-            add_property = 'add_static_property'
-        else:
-            if self.documentation:
-                doc = self.documentation
-            add_property = 'add_property'
-        answer = [ add_property ]
+        answer = [ 'add_property' ]
         answer.append( '( ' )
         answer.append('"%s"' % self.alias)
         answer.append( self.PARAM_SEPARATOR )
-        answer.append( '(%s)(&%s)'
-                       % ( self.wrapper.getter_type, self.wrapper.getter_full_name ) )
 
+        make_function = algorithm.create_identifier( self, '::boost::python::make_function' )
+
+        answer.append( '%(mk_func)s( (%(getter_type)s)(&%(wfname)s) )'
+                       % { 'mk_func' : make_function
+                           , 'getter_type' : self.wrapper.getter_type
+                           , 'wfname' : self.wrapper.getter_full_name } )
+
         if self.wrapper.has_setter:
             answer.append( self.PARAM_SEPARATOR )
-            answer.append( '(%s)(&%s)'
-                           % ( self.wrapper.setter_type, self.wrapper.setter_full_name ) )
-        if doc:
+            answer.append( '%(mk_func)s( (%(setter_type)s)(&%(wfname)s) )'
+                       % { 'mk_func' : make_function
+                           , 'setter_type' : self.wrapper.setter_type
+                           , 'wfname' : self.wrapper.setter_full_name } )
+                           
+        if self.documentation:
             answer.append( self.PARAM_SEPARATOR )
-            answer.append( doc )
+            answer.append( self.documentation )
         answer.append( ' ) ' )
 
         code = ''.join( answer )
@@ -318,18 +319,17 @@
     """
     creates get/set accessors for bit fields
     """
-
     indent = code_creator.code_creator_t.indent
-    BF_GET_TEMPLATE = os.linesep.join([
-          '%(type)s get_%(name)s() const {'
-        , indent( 'return %(name)s;' )
+    GET_TEMPLATE =os.linesep.join([
+          'static %(type)s get_%(name)s(%(cls_type)s inst ){'
+        , indent( 'return inst.%(name)s;' )
         , '}'
         , ''
     ])
 
-    BF_SET_TEMPLATE = os.linesep.join([
-          'void set_%(name)s( %(type)s new_value ){ '
-        , indent( '%(name)s = new_value;' )
+    SET_TEMPLATE = os.linesep.join([
+          'static void set_%(name)s( %(cls_type)s inst, %(type)s new_value ){ '
+        , indent( 'inst.%(name)s = new_value;' )
         , '}'
         , ''
     ])
@@ -342,12 +342,17 @@
         return self.parent.full_name + '::' + 'get_' + self.declaration.name
     getter_full_name = property( _get_getter_full_name )
 
+    def inst_arg_type( self, has_const ):
+        inst_arg_type = declarations.declarated_t( self.declaration.parent )
+        if has_const:
+            inst_arg_type = declarations.const_t(inst_arg_type)
+        inst_arg_type = declarations.reference_t(inst_arg_type)
+        return inst_arg_type
+
     def _get_getter_type(self):
-        return declarations.member_function_type_t.create_decl_string(
+        return declarations.free_function_type_t.create_decl_string(
                 return_type=self.declaration.type
-                , class_decl_string=self.parent.full_name
-                , arguments_types=[]
-                , has_const=True
+                , arguments_types=[ self.inst_arg_type(True) ]
                 , with_defaults=False)
     getter_type = property( _get_getter_type )
 
@@ -356,11 +361,9 @@
     setter_full_name = property(_get_setter_full_name)
 
     def _get_setter_type(self):
-        return declarations.member_function_type_t.create_decl_string(
+        return declarations.free_function_type_t.create_decl_string(
                 return_type=declarations.void_t()
-                , class_decl_string=self.parent.full_name
-                , arguments_types=[self.declaration.type]
-                , has_const=False
+                , arguments_types=[ self.inst_arg_type(False), self.declaration.type  ]
                 , with_defaults=False)
     setter_type = property( _get_setter_type )
 
@@ -370,11 +373,15 @@
 
     def _create_impl(self):
         answer = []
-        substitutions = dict( type=self.declaration.type.decl_string
-                              , name=self.declaration.name )
-        answer.append( self.BF_GET_TEMPLATE % substitutions )
+        answer.append( self.GET_TEMPLATE % {
+            'type' : self.declaration.type.decl_string
+            , 'name' : self.declaration.name
+            , 'cls_type' : self.inst_arg_type( has_const=True ) })
         if self.has_setter:
-            answer.append( self.BF_SET_TEMPLATE % substitutions )
+            answer.append( self.SET_TEMPLATE % {
+            'type' : self.declaration.type.decl_string
+            , 'name' : self.declaration.name
+            , 'cls_type' : self.inst_arg_type( has_const=False ) })
         return os.linesep.join( answer )
 
     def _get_system_files_impl( self ):
Modified: pyplusplus_dev/unittests/data/member_variables_to_be_exported.hpp
===================================================================
--- pyplusplus_dev/unittests/data/member_variables_to_be_exported.hpp	2011-03-07 21:02:46 UTC (rev 1855)
+++ pyplusplus_dev/unittests/data/member_variables_to_be_exported.hpp	2011-03-17 20:25:25 UTC (rev 1856)
@@ -48,6 +48,19 @@
 void set_a( bit_fields_t& inst, unsigned int new_value );
 unsigned int get_b(const bit_fields_t& inst);
 
+struct status_bits_t{
+    int bcr : 3;
+    int status : 3;
+};
+
+struct status_bits_keeper_t{
+
+    int get_sb_bcr(){ return status_bits.bcr; }
+    int get_sb_status(){ return status_bits.status; }
+
+    status_bits_t status_bits;
+};
+
 struct array_t{
     array_t()
     {
@@ -165,11 +178,11 @@
 
         static int* none_image;
     };
-    
+
     class Andy{
     protected:
         Andy() : userData(NULL) {}
-   
+
         virtual ~Andy()    {}
 
     public:
Modified: pyplusplus_dev/unittests/member_variables_tester.py
===================================================================
--- pyplusplus_dev/unittests/member_variables_tester.py	2011-03-07 21:02:46 UTC (rev 1855)
+++ pyplusplus_dev/unittests/member_variables_tester.py	2011-03-17 20:25:25 UTC (rev 1856)
@@ -37,6 +37,11 @@
         bf.b = value
 
     def run_tests(self, module):
+        sbk = module.status_bits_keeper_t()
+        sb = sbk.status_bits
+        sb.bcr = 2
+        self.failUnless( sbk.get_sb_bcr() == 2 )
+
         self.failIfRaisesAny( module.point )
         xypoint = module.point()
         self.failUnless( module.point.instance_count == 1)
@@ -78,7 +83,7 @@
         data = data_type.from_address( image.data )
         for j in range(5):
             self.failUnless( j == data[j] )
-            
+
         int_array = ctypes.c_int * 5
         array = int_array()
         for i in range( 5 ):
@@ -87,7 +92,7 @@
         data = data_type.from_address( image.data )
         for j in range(5):
             self.failUnless( j*2 == data[j] )
- 
+
         data_type = ctypes.POINTER( ctypes.c_int )
         data = data_type.from_address( module.image_t.none_image )
         self.failUnless( 1997 == data.contents.value )
Modified: sphinx/conf.py
===================================================================
--- sphinx/conf.py	2011-03-07 21:02:46 UTC (rev 1855)
+++ sphinx/conf.py	2011-03-17 20:25:25 UTC (rev 1856)
@@ -107,9 +107,9 @@
 # built documents.
 #
 # The short X.Y version.
-version = 'SVN - Mar 7 2011'
+version = 'ersion( SVN - Mar 17 2011 )'
 # The full version, including alpha/beta/rc tags.
-release = 'SVN - Mar 7 2011'
+release = 'ersion( SVN - Mar 17 2011 )'
 
 # The language for content autogenerated by Sphinx. Refer to documentation
 # for a list of supported languages.
This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site.
 | 
| 
      
      
      From: <rom...@us...> - 2011-03-07 21:02:53
      
     | 
| Revision: 1855
          http://pygccxml.svn.sourceforge.net/pygccxml/?rev=1855&view=rev
Author:   roman_yakovenko
Date:     2011-03-07 21:02:46 +0000 (Mon, 07 Mar 2011)
Log Message:
-----------
remove reference to "language-binding.net" site
Modified Paths:
--------------
    sphinx/conf.py
    sphinx/readme.txt
Removed Paths:
-------------
    sphinx/__templates_www/
    sphinx/sitemap_gen.py
Modified: sphinx/conf.py
===================================================================
--- sphinx/conf.py	2011-03-07 20:44:32 UTC (rev 1854)
+++ sphinx/conf.py	2011-03-07 21:02:46 UTC (rev 1855)
@@ -28,7 +28,7 @@
 
 project_root = os.path.abspath('..')
 doc_project_root = os.path.abspath('.')
-packages = ( 'pydsc', 'pygccxml', 'pyplusplus' )
+packages = ( 'pygccxml', 'pyplusplus' ) #'pydsc' - it is an internal package, used to fix spelling mistakes
 
 sys.path.append( doc_project_root )
 
@@ -47,6 +47,7 @@
         os.symlink( source, target )
     else:
         shutil.copytree( source, target, ignore=shutil.ignore_patterns( r'.svn', '*.pyc', 'osdc2006' ) )
+        
 if has_true_links:
     if os.path.exists(os.path.join( doc_project_root, 'index.rest' )):
         os.unlink( os.path.join( doc_project_root, 'index.rest' ) )
@@ -76,44 +77,8 @@
         shutil.rmtree(target_dir)
     shutil.copytree( source_dir, target_dir, ignore=shutil.ignore_patterns( r'.svn' ) )
 
-def generate_sitemap(app, exception):
-    if 'www' not in outdir:
-        return
-    if exception:
-        print 'SITEMAP generation was skipped - there were errors during the build process'
-        return
-    try:
-        import sitemap_gen
-
-        working_dir = os.path.join( doc_project_root, outdir )
-        config = \
-        """<?xml version="1.0" encoding="UTF-8"?>
-        <site base_url="http://www.language-binding.net/" store_into="%(path)s/sitemap.xml.gz" verbose="1">
-            <directory path="%(path)s" url="http://www.language-binding.net/" default_file="index.html" />
-            <filter  action="http://sourceforge.net/drop"  type="regexp"    pattern="/\.[^/]*" />
-            <filter  action="http://sourceforge.net/drop"  type="regexp"    pattern="/_[^/]*" />
-        </site>
-        """ % dict( path=os.path.join( doc_project_root, working_dir ) )
-
-        f_config_path = os.path.join( working_dir, 'sitemap_config.xml' )
-        f_config = file( f_config_path, 'w+' )
-        f_config.write( config )
-        f_config.close()
-
-        sitemap = sitemap_gen.CreateSitemapFromFile(f_config_path, True)
-        if not sitemap:
-            print 'ERROR(SITEMAP): configuration file errors'
-        else:
-            sitemap.Generate()
-            print 'ERRORS(SITEMAP): %d' % sitemap_gen.output.num_errors
-            print 'WARNINGS(SITEMAP): %d' % sitemap_gen.output.num_warns
-    except Exception, error:
-        print "ERROR(SITEMAP): sitemap file was not generated - ", str(error)
-
-
 def setup(app):
     app.connect('build-finished', copy_indexing_suite_v2_files)
-    app.connect('build-finished', generate_sitemap)
 
 # General configuration
 # ---------------------
@@ -123,8 +88,6 @@
 extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.intersphinx', 'sphinx.ext.todo', 'sphinx.ext.coverage']
 # Add any paths that contain templates here, relative to this directory.
 templates_path = ['__templates']
-if 'www' in outdir:
-    templates_path = ['__templates_www']
 
 # The suffix of source filenames.
 source_suffix = '.rest'
@@ -144,9 +107,9 @@
 # built documents.
 #
 # The short X.Y version.
-version = '1.1'
+version = 'SVN - Mar 7 2011'
 # The full version, including alpha/beta/rc tags.
-release = '1.1'
+release = 'SVN - Mar 7 2011'
 
 # The language for content autogenerated by Sphinx. Refer to documentation
 # for a list of supported languages.
Modified: sphinx/readme.txt
===================================================================
--- sphinx/readme.txt	2011-03-07 20:44:32 UTC (rev 1854)
+++ sphinx/readme.txt	2011-03-07 21:02:46 UTC (rev 1855)
@@ -1,7 +1 @@
-language-binding web site:
-
-    sphinx-build -b [changes|linkcheck|doctest] . www
-
-ads free doumentation:
-
-    sphinx-build . docs
+$ sphinx-build -E . docs
Deleted: sphinx/sitemap_gen.py
===================================================================
--- sphinx/sitemap_gen.py	2011-03-07 20:44:32 UTC (rev 1854)
+++ sphinx/sitemap_gen.py	2011-03-07 21:02:46 UTC (rev 1855)
@@ -1,2205 +0,0 @@
-#!/usr/bin/python
-#
-# Copyright (c) 2004, 2005 Google Inc.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-#
-# * Redistributions of source code must retain the above copyright
-#   notice, this list of conditions and the following disclaimer.
-#
-# * Redistributions in binary form must reproduce the above copyright
-#   notice, this list of conditions and the following disclaimer in
-#   the documentation and/or other materials provided with the
-#   distribution.
-#
-# * Neither the name of Google nor the names of its contributors may
-#   be used to endorse or promote products derived from this software
-#   without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
-# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
-# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
-# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
-# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
-# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
-# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
-# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-# POSSIBILITY OF SUCH DAMAGE.
-#
-#
-# The sitemap_gen.py script is written in Python 2.2 and released to
-# the open source community for continuous improvements under the BSD
-# 2.0 new license, which can be found at:
-#
-#   http://www.opensource.org/licenses/bsd-license.php
-#
-
-__usage__ = \
-"""A simple script to automatically produce sitemaps for a webserver,
-in the Google Sitemap Protocol (GSP).
-
-Usage: python sitemap_gen.py --config=config.xml [--help] [--testing]
-            --config=config.xml, specifies config file location
-            --help, displays usage message
-            --testing, specified when user is experimenting
-"""
-
-# Please be careful that all syntax used in this file can be parsed on
-# Python 1.5 -- this version check is not evaluated until after the
-# entire file has been parsed.
-import sys
-if sys.hexversion < 0x02020000:
-  print 'This script requires Python 2.2 or later.'
-  print 'Currently run with version: %s' % sys.version
-  sys.exit(1)
-
-import fnmatch
-import glob
-import gzip
-import md5
-import os
-import re
-import stat
-import time
-import types
-import urllib
-import urlparse
-import xml.sax
-
-# True and False were introduced in Python2.2.2
-try:
-  testTrue=True
-  del testTrue
-except NameError:
-  True=1
-  False=0
-
-# Text encodings
-ENC_ASCII = 'ASCII'
-ENC_UTF8  = 'UTF-8'
-ENC_IDNA  = 'IDNA'
-ENC_ASCII_LIST = ['ASCII', 'US-ASCII', 'US', 'IBM367', 'CP367', 'ISO646-US'
-                  'ISO_646.IRV:1991', 'ISO-IR-6', 'ANSI_X3.4-1968',
-                  'ANSI_X3.4-1986', 'CPASCII' ]
-ENC_DEFAULT_LIST = ['ISO-8859-1', 'ISO-8859-2', 'ISO-8859-5']
-
-# Maximum number of urls in each sitemap, before next Sitemap is created
-MAXURLS_PER_SITEMAP = 50000
-
-# Suffix on a Sitemap index file
-SITEINDEX_SUFFIX = '_index.xml'
-
-# Regular expressions tried for extracting URLs from access logs.
-ACCESSLOG_CLF_PATTERN = re.compile(
-  r'.+\s+"([^\s]+)\s+([^\s]+)\s+HTTP/\d+\.\d+"\s+200\s+.*'
-  )
-
-# Match patterns for lastmod attributes
-LASTMOD_PATTERNS = map(re.compile, [
-  r'^\d\d\d\d$',
-  r'^\d\d\d\d-\d\d$',
-  r'^\d\d\d\d-\d\d-\d\d$',
-  r'^\d\d\d\d-\d\d-\d\dT\d\d:\d\dZ$',
-  r'^\d\d\d\d-\d\d-\d\dT\d\d:\d\d[+-]\d\d:\d\d$',
-  r'^\d\d\d\d-\d\d-\d\dT\d\d:\d\d:\d\d(\.\d+)?Z$',
-  r'^\d\d\d\d-\d\d-\d\dT\d\d:\d\d:\d\d(\.\d+)?[+-]\d\d:\d\d$',
-  ])
-
-# Match patterns for changefreq attributes
-CHANGEFREQ_PATTERNS = [
-  'always', 'hourly', 'daily', 'weekly', 'monthly', 'yearly', 'never'
-  ]
-
-# XML formats
-SITEINDEX_HEADER   = \
-  '<?xml version="1.0" encoding="UTF-8"?>\n' \
-  '<sitemapindex\n' \
-  '  xmlns="http://www.google.com/schemas/sitemap/0.84"\n' \
-  '  xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"\n' \
-  '  xsi:schemaLocation="http://www.google.com/schemas/sitemap/0.84\n' \
-  '                      http://www.google.com/schemas/sitemap/0.84/' \
-  'siteindex.xsd">\n'
-SITEINDEX_FOOTER   = '</sitemapindex>\n'
-SITEINDEX_ENTRY    = \
-  ' <sitemap>\n' \
-  '  <loc>%(loc)s</loc>\n' \
-  '  <lastmod>%(lastmod)s</lastmod>\n' \
-  ' </sitemap>\n'
-SITEMAP_HEADER     = \
-  '<?xml version="1.0" encoding="UTF-8"?>\n' \
-  '<urlset\n' \
-  '  xmlns="http://www.google.com/schemas/sitemap/0.84"\n' \
-  '  xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"\n' \
-  '  xsi:schemaLocation="http://www.google.com/schemas/sitemap/0.84\n' \
-  '                      http://www.google.com/schemas/sitemap/0.84/' \
-  'sitemap.xsd">\n'
-SITEMAP_FOOTER     = '</urlset>\n'
-SITEURL_XML_PREFIX = ' <url>\n'
-SITEURL_XML_SUFFIX = ' </url>\n'
-
-# Search engines to notify with the updated sitemaps
-#
-# This list is very non-obvious in what's going on.  Here's the gist:
-# Each item in the list is a 6-tuple of items.  The first 5 are "almost"
-# the same as the input arguments to urlparse.urlunsplit():
-#   0 - schema
-#   1 - netloc
-#   2 - path
-#   3 - query    <-- EXCEPTION: specify a query map rather than a string
-#   4 - fragment
-# Additionally, add item 5:
-#   5 - query attribute that should be set to the new Sitemap URL
-# Clear as mud, I know.
-NOTIFICATION_SITES = [
-  ('http', 'www.google.com', 'webmasters/sitemaps/ping', {}, '', 'sitemap')
-  ]
-
-
-class Error(Exception):
-  """
-  Base exception class.  In this module we tend not to use our own exception
-  types for very much, but they come in very handy on XML parsing with SAX.
-  """
-  pass
-#end class Error
-
-
-class SchemaError(Error):
-  """Failure to process an XML file according to the schema we know."""
-  pass
-#end class SchemeError
-
-
-class Encoder:
-  """
-  Manages wide-character/narrow-character conversions for just about all
-  text that flows into or out of the script.
-
-  You should always use this class for string coercion, as opposed to
-  letting Python handle coercions automatically.  Reason: Python
-  usually assumes ASCII (7-bit) as a default narrow character encoding,
-  which is not the kind of data we generally deal with.
-
-  General high-level methodologies used in sitemap_gen:
-
-  [PATHS]
-  File system paths may be wide or narrow, depending on platform.
-  This works fine, just be aware of it and be very careful to not
-  mix them.  That is, if you have to pass several file path arguments
-  into a library call, make sure they are all narrow or all wide.
-  This class has MaybeNarrowPath() which should be called on every
-  file system path you deal with.
-
-  [URLS]
-  URL locations are stored in Narrow form, already escaped.  This has the
-  benefit of keeping escaping and encoding as close as possible to the format
-  we read them in.  The downside is we may end up with URLs that have
-  intermingled encodings -- the root path may be encoded in one way
-  while the filename is encoded in another.  This is obviously wrong, but
-  it should hopefully be an issue hit by very few users.  The workaround
-  from the user level (assuming they notice) is to specify a default_encoding
-  parameter in their config file.
-
-  [OTHER]
-  Other text, such as attributes of the URL class, configuration options,
-  etc, are generally stored in Unicode for simplicity.
-  """
-
-  def __init__(self):
-    self._user      = None                  # User-specified default encoding
-    self._learned   = []                    # Learned default encodings
-    self._widefiles = False                 # File system can be wide
-
-    # Can the file system be Unicode?
-    try:
-      self._widefiles = os.path.supports_unicode_filenames
-    except AttributeError:
-      try:
-        self._widefiles = sys.getwindowsversion() == os.VER_PLATFORM_WIN32_NT
-      except AttributeError:
-        pass
-
-    # Try to guess a working default
-    try:
-      encoding = sys.getfilesystemencoding()
-      if encoding and not (encoding.upper() in ENC_ASCII_LIST):
-        self._learned = [ encoding ]
-    except AttributeError:
-      pass
-
-    if not self._learned:
-      encoding = sys.getdefaultencoding()
-      if encoding and not (encoding.upper() in ENC_ASCII_LIST):
-        self._learned = [ encoding ]
-
-    # If we had no guesses, start with some European defaults
-    if not self._learned:
-      self._learned = ENC_DEFAULT_LIST
-  #end def __init__
-
-  def SetUserEncoding(self, encoding):
-    self._user = encoding
-  #end def SetUserEncoding
-
-  def NarrowText(self, text, encoding):
-    """ Narrow a piece of arbitrary text """
-    if type(text) != types.UnicodeType:
-      return text
-
-    # Try the passed in preference
-    if encoding:
-      try:
-        result = text.encode(encoding)
-        if not encoding in self._learned:
-          self._learned.append(encoding)
-        return result
-      except UnicodeError:
-        pass
-      except LookupError:
-        output.Warn('Unknown encoding: %s' % encoding)
-
-    # Try the user preference
-    if self._user:
-      try:
-        return text.encode(self._user)
-      except UnicodeError:
-        pass
-      except LookupError:
-        temp = self._user
-        self._user = None
-        output.Warn('Unknown default_encoding: %s' % temp)
-
-    # Look through learned defaults, knock any failing ones out of the list
-    while self._learned:
-      try:
-        return text.encode(self._learned[0])
-      except:
-        del self._learned[0]
-
-    # When all other defaults are exhausted, use UTF-8
-    try:
-      return text.encode(ENC_UTF8)
-    except UnicodeError:
-      pass
-
-    # Something is seriously wrong if we get to here
-    return text.encode(ENC_ASCII, 'ignore')
-  #end def NarrowText
-  
-  def MaybeNarrowPath(self, text):
-    """ Paths may be allowed to stay wide """
-    if self._widefiles:
-      return text
-    return self.NarrowText(text, None)
-  #end def MaybeNarrowPath
-
-  def WidenText(self, text, encoding):
-    """ Widen a piece of arbitrary text """
-    if type(text) != types.StringType:
-      return text
-
-    # Try the passed in preference
-    if encoding:
-      try:
-        result = unicode(text, encoding)
-        if not encoding in self._learned:
-          self._learned.append(encoding)
-        return result
-      except UnicodeError:
-        pass
-      except LookupError:
-        output.Warn('Unknown encoding: %s' % encoding)
-
-    # Try the user preference
-    if self._user:
-      try:
-        return unicode(text, self._user)
-      except UnicodeError:
-        pass
-      except LookupError:
-        temp = self._user
-        self._user = None
-        output.Warn('Unknown default_encoding: %s' % temp)
-
-    # Look through learned defaults, knock any failing ones out of the list
-    while self._learned:
-      try:
-        return unicode(text, self._learned[0])
-      except:
-        del self._learned[0]
-
-    # When all other defaults are exhausted, use UTF-8
-    try:
-      return unicode(text, ENC_UTF8)
-    except UnicodeError:
-      pass
-
-    # Getting here means it wasn't UTF-8 and we had no working default.
-    # We really don't have anything "right" we can do anymore.
-    output.Warn('Unrecognized encoding in text: %s' % text)
-    if not self._user:
-      output.Warn('You may need to set a default_encoding in your '
-                  'configuration file.')
-    return text.decode(ENC_ASCII, 'ignore')
-  #end def WidenText
-#end class Encoder
-encoder = Encoder()
-
-
-class Output:
-  """
-  Exposes logging functionality, and tracks how many errors
-  we have thus output.
-
-  Logging levels should be used as thus:
-    Fatal     -- extremely sparingly
-    Error     -- config errors, entire blocks of user 'intention' lost
-    Warn      -- individual URLs lost
-    Log(,0)   -- Un-suppressable text that's not an error
-    Log(,1)   -- touched files, major actions
-    Log(,2)   -- parsing notes, filtered or duplicated URLs
-    Log(,3)   -- each accepted URL
-  """
-
-  def __init__(self):
-    self.num_errors    = 0                   # Count of errors
-    self.num_warns     = 0                   # Count of warnings
-
-    self._errors_shown = {}                  # Shown errors
-    self._warns_shown  = {}                  # Shown warnings
-    self._verbose      = 0                   # Level of verbosity
-  #end def __init__
-
-  def Log(self, text, level):
-    """ Output a blurb of diagnostic text, if the verbose level allows it """
-    if text:
-      text = encoder.NarrowText(text, None)
-      if self._verbose >= level:
-        print text
-  #end def Log
-
-  def Warn(self, text):
-    """ Output and count a warning.  Suppress duplicate warnings. """
-    if text:
-      text = encoder.NarrowText(text, None)
-      hash = md5.new(text).digest()
-      if not self._warns_shown.has_key(hash):
-        self._warns_shown[hash] = 1
-        print '[WARNING] ' + text
-      else:
-        self.Log('(suppressed) [WARNING] ' + text, 3)
-      self.num_warns = self.num_warns + 1
-  #end def Warn
-
-  def Error(self, text):
-    """ Output and count an error.  Suppress duplicate errors. """
-    if text:
-      text = encoder.NarrowText(text, None)
-      hash = md5.new(text).digest()
-      if not self._errors_shown.has_key(hash):
-        self._errors_shown[hash] = 1
-        print '[ERROR] ' + text
-      else:
-        self.Log('(suppressed) [ERROR] ' + text, 3)
-      self.num_errors = self.num_errors + 1
-  #end def Error
-
-  def Fatal(self, text):
-    """ Output an error and terminate the program. """
-    if text:
-      text = encoder.NarrowText(text, None)
-      print '[FATAL] ' + text
-    else:
-      print 'Fatal error.'
-    sys.exit(1)
-  #end def Fatal
-
-  def SetVerbose(self, level):
-    """ Sets the verbose level. """
-    try:
-      if type(level) != types.IntType:
-        level = int(level)
-      if (level >= 0) and (level <= 3):
-        self._verbose = level
-        return
-    except ValueError:
-      pass
-    self.Error('Verbose level (%s) must be between 0 and 3 inclusive.' % level)
-  #end def SetVerbose
-#end class Output
-output = Output()
-
-
-class URL(object):
-  """ URL is a smart structure grouping together the properties we
-  care about for a single web reference. """
-  __slots__ = 'loc', 'lastmod', 'changefreq', 'priority'
-
-  def __init__(self):
-    self.loc        = None                  # URL -- in Narrow characters
-    self.lastmod    = None                  # ISO8601 timestamp of last modify
-    self.changefreq = None                  # Text term for update frequency
-    self.priority   = None                  # Float between 0 and 1 (inc)
-  #end def __init__
-
-  def __cmp__(self, other):
-    if self.loc < other.loc:
-      return -1
-    if self.loc > other.loc:
-      return 1
-    return 0
-  #end def __cmp__
-
-  def TrySetAttribute(self, attribute, value):
-    """ Attempt to set the attribute to the value, with a pretty try
-    block around it.  """
-    if attribute == 'loc':
-      self.loc = self.Canonicalize(value)
-    else:
-      try:
-        setattr(self, attribute, value)
-      except AttributeError:
-        output.Warn('Unknown URL attribute: %s' % attribute)
-  #end def TrySetAttribute
-
-  def IsAbsolute(loc):
-    """ Decide if the URL is absolute or not """
-    if not loc:
-      return False
-    narrow = encoder.NarrowText(loc, None)
-    (scheme, netloc, path, query, frag) = urlparse.urlsplit(narrow)
-    if (not scheme) or (not netloc):
-      return False
-    return True
-  #end def IsAbsolute
-  IsAbsolute = staticmethod(IsAbsolute)
-
-  def Canonicalize(loc):
-    """ Do encoding and canonicalization on a URL string """
-    if not loc:
-      return loc
-    
-    # Let the encoder try to narrow it
-    narrow = encoder.NarrowText(loc, None)
-
-    # Escape components individually
-    (scheme, netloc, path, query, frag) = urlparse.urlsplit(narrow)
-    unr    = '-._~'
-    sub    = '!$&\'()*+,;='
-    netloc = urllib.quote(netloc, unr + sub + '%:@/[]')
-    path   = urllib.quote(path,   unr + sub + '%:@/')
-    query  = urllib.quote(query,  unr + sub + '%:@/?')
-    frag   = urllib.quote(frag,   unr + sub + '%:@/?')
-
-    # Try built-in IDNA encoding on the netloc
-    try:
-      (ignore, widenetloc, ignore, ignore, ignore) = urlparse.urlsplit(loc)
-      for c in widenetloc:
-        if c >= unichr(128):
-          netloc = widenetloc.encode(ENC_IDNA)
-          netloc = urllib.quote(netloc, unr + sub + '%:@/[]')
-          break
-    except UnicodeError:
-      # urlsplit must have failed, based on implementation differences in the
-      # library.  There is not much we can do here, except ignore it.
-      pass
-    except LookupError:
-      output.Warn('An International Domain Name (IDN) is being used, but this '
-                  'version of Python does not have support for IDNA encoding. '
-                  ' (IDNA support was introduced in Python 2.3)  The encoding '
-                  'we have used instead is wrong and will probably not yield '
-                  'valid URLs.')
-    bad_netloc = False
-    if '%' in netloc:
-      bad_netloc = True
-
-    # Put it all back together
-    narrow = urlparse.urlunsplit((scheme, netloc, path, query, frag))
-
-    # I let '%' through.  Fix any that aren't pre-existing escapes.
-    HEXDIG = '0123456789abcdefABCDEF'
-    list   = narrow.split('%')
-    narrow = list[0]
-    del list[0]
-    for item in list:
-      if (len(item) >= 2) and (item[0] in HEXDIG) and (item[1] in HEXDIG):
-        narrow = narrow + '%' + item
-      else:
-        narrow = narrow + '%25' + item
-
-    # Issue a warning if this is a bad URL
-    if bad_netloc:
-      output.Warn('Invalid characters in the host or domain portion of a URL: '
-                  + narrow)
-
-    return narrow
-  #end def Canonicalize
-  Canonicalize = staticmethod(Canonicalize)
-
-  def Validate(self, base_url, allow_fragment):
-    """ Verify the data in this URL is well-formed, and override if not. """
-    assert type(base_url) == types.StringType
-    
-    # Test (and normalize) the ref
-    if not self.loc:
-      output.Warn('Empty URL')
-      return False
-    if allow_fragment:
-      self.loc = urlparse.urljoin(base_url, self.loc)
-    if not self.loc.startswith(base_url):
-      output.Warn('Discarded URL for not starting with the base_url: %s' %
-                  self.loc)
-      self.loc = None
-      return False
-
-    # Test the lastmod
-    if self.lastmod:
-      match = False
-      self.lastmod = self.lastmod.upper()
-      for pattern in LASTMOD_PATTERNS:
-        match = pattern.match(self.lastmod)
-        if match:
-          break
-      if not match:
-        output.Warn('Lastmod "%s" does not appear to be in ISO8601 format on '
-                    'URL: %s' % (self.lastmod, self.loc))
-        self.lastmod = None
-
-    # Test the changefreq
-    if self.changefreq:
-      match = False
-      self.changefreq = self.changefreq.lower()
-      for pattern in CHANGEFREQ_PATTERNS:
-        if self.changefreq == pattern:
-          match = True
-          break
-      if not match:
-        output.Warn('Changefreq "%s" is not a valid change frequency on URL '
-                    ': %s' % (self.changefreq, self.loc))
-        self.changefreq = None
-
-    # Test the priority
-    if self.priority:
-      priority = -1.0
-      try:
-        priority = float(self.priority)
-      except ValueError:
-        pass
-      if (priority < 0.0) or (priority > 1.0):
-        output.Warn('Priority "%s" is not a number between 0 and 1 inclusive '
-                    'on URL: %s' % (self.priority, self.loc))
-        self.priority = None
-
-    return True
-  #end def Validate
-
-  def MakeHash(self):
-    """ Provides a uniform way of hashing URLs """
-    if not self.loc:
-      return None
-    if self.loc.endswith('/'):
-      return md5.new(self.loc[:-1]).digest()
-    return md5.new(self.loc).digest()
-  #end def MakeHash
-
-  def Log(self, prefix='URL', level=3):
-    """ Dump the contents, empty or not, to the log. """
-    out = prefix + ':'
-    
-    for attribute in self.__slots__:
-      value = getattr(self, attribute)
-      if not value:
-        value = ''
-      out = out + ('  %s=[%s]' % (attribute, value))
-
-    output.Log('%s' % encoder.NarrowText(out, None), level)
-  #end def Log
-
-  def WriteXML(self, file):
-    """ Dump non-empty contents to the output file, in XML format. """
-    if not self.loc:
-      return
-    out = SITEURL_XML_PREFIX
-
-    for attribute in self.__slots__:
-      value = getattr(self, attribute)
-      if value:
-        if type(value) == types.UnicodeType:
-          value = encoder.NarrowText(value, None)
-        elif type(value) != types.StringType:
-          value = str(value)
-        value = xml.sax.saxutils.escape(value)
-        out = out + ('  <%s>%s</%s>\n' % (attribute, value, attribute))
-    
-    out = out + SITEURL_XML_SUFFIX
-    file.write(out)
-  #end def WriteXML
-#end class URL
-
-
-class Filter:
-  """
-  A filter on the stream of URLs we find.  A filter is, in essence,
-  a wildcard applied to the stream.  You can think of this as an
-  operator that returns a tri-state when given a URL:
-
-    True  -- this URL is to be included in the sitemap
-    None  -- this URL is undecided
-    False -- this URL is to be dropped from the sitemap
-  """
-
-  def __init__(self, attributes):
-    self._wildcard  = None                  # Pattern for wildcard match
-    self._regexp    = None                  # Pattern for regexp match
-    self._pass      = False                 # "Drop" filter vs. "Pass" filter
-
-    if not ValidateAttributes('FILTER', attributes,
-                              ('pattern', 'type', 'action')):
-      return
-
-    # Check error count on the way in
-    num_errors = output.num_errors
-
-    # Fetch the attributes
-    pattern = attributes.get('pattern')
-    type    = attributes.get('type', 'wildcard')
-    action  = attributes.get('action', 'drop')
-    if type:
-      type = type.lower()
-    if action:
-      action = action.lower()
-
-    # Verify the attributes
-    if not pattern:
-      output.Error('On a filter you must specify a "pattern" to match')
-    elif (not type) or ((type != 'wildcard') and (type != 'regexp')):
-      output.Error('On a filter you must specify either \'type="wildcard"\' '
-                   'or \'type="regexp"\'')
-    elif (action != 'pass') and (action != 'drop'):
-      output.Error('If you specify a filter action, it must be either '
-                   '\'action="http://sourceforge.net/pass"\' or \'action="http://sourceforge.net/drop"\'')
-
-    # Set the rule
-    if action == 'drop':
-      self._pass = False
-    elif action == 'pass':
-      self._pass = True
-
-    if type == 'wildcard':
-      self._wildcard = pattern
-    elif type == 'regexp':
-      try:
-        self._regexp = re.compile(pattern)
-      except re.error:
-        output.Error('Bad regular expression: %s' %  pattern)
-
-    # Log the final results iff we didn't add any errors
-    if num_errors == output.num_errors:
-      output.Log('Filter: %s any URL that matches %s "%s"' %
-                 (action, type, pattern), 2)
-  #end def __init__
-
-  def Apply(self, url):
-    """ Process the URL, as above. """
-    if (not url) or (not url.loc):
-      return None
-    
-    if self._wildcard:
-      if fnmatch.fnmatchcase(url.loc, self._wildcard):
-        return self._pass
-      return None
-
-    if self._regexp:
-      if self._regexp.search(url.loc):
-        return self._pass
-      return None
-
-    assert False # unreachable
-  #end def Apply
-#end class Filter
-
-
-class InputURL:
-  """
-  Each Input class knows how to yield a set of URLs from a data source.
-
-  This one handles a single URL, manually specified in the config file.
-  """
-
-  def __init__(self, attributes):
-    self._url = None                        # The lonely URL
-
-    if not ValidateAttributes('URL', attributes,
-                                ('href', 'lastmod', 'changefreq', 'priority')):
-      return
-    
-    url = URL()
-    for attr in attributes.keys():
-      if attr == 'href':
-        url.TrySetAttribute('loc', attributes[attr])
-      else:
-        url.TrySetAttribute(attr, attributes[attr])
-
-    if not url.loc:
-      output.Error('Url entries must have an href attribute.')
-      return
-    
-    self._url = url
-    output.Log('Input: From URL "%s"' % self._url.loc, 2)
-  #end def __init__
-
-  def ProduceURLs(self, consumer):
-    """ Produces URLs from our data source, hands them in to the consumer. """
-    if self._url:
-      consumer(self._url, True)
-  #end def ProduceURLs
-#end class InputURL
-
-
-class InputURLList:
-  """
-  Each Input class knows how to yield a set of URLs from a data source.
-
-  This one handles a text file with a list of URLs
-  """
-
-  def __init__(self, attributes):
-    self._path      = None                  # The file path
-    self._encoding  = None                  # Encoding of that file
-
-    if not ValidateAttributes('URLLIST', attributes, ('path', 'encoding')):
-      return
-    
-    self._path      = attributes.get('path')
-    self._encoding  = attributes.get('encoding', ENC_UTF8)
-    if self._path:
-      self._path    = encoder.MaybeNarrowPath(self._path)
-      if os.path.isfile(self._path):
-        output.Log('Input: From URLLIST "%s"' % self._path, 2)
-      else:
-        output.Error('Can not locate file: %s' % self._path)
-        self._path = None
-    else:
-      output.Error('Urllist entries must have a "path" attribute.')
-  #end def __init__
-
-  def ProduceURLs(self, consumer):
-    """ Produces URLs from our data source, hands them in to the consumer. """
-
-    # Open the file
-    (frame, file) = OpenFileForRead(self._path, 'URLLIST')
-    if not file:
-      return
-
-    # Iterate lines
-    linenum = 0
-    for line in file.readlines():
-      linenum = linenum + 1
-
-      # Strip comments and empty lines
-      if self._encoding:
-        line = encoder.WidenText(line, self._encoding)
-      line = line.strip()
-      if (not line) or line[0] == '#':
-        continue
-      
-      # Split the line on space
-      url = URL()
-      cols = line.split(' ')
-      for i in range(0,len(cols)):
-        cols[i] = cols[i].strip()
-      url.TrySetAttribute('loc', cols[0])
-
-      # Extract attributes from the other columns
-      for i in range(1,len(cols)):
-        if cols[i]:
-          try:
-            (attr_name, attr_val) = cols[i].split('=', 1)
-            url.TrySetAttribute(attr_name, attr_val)
-          except ValueError:
-            output.Warn('Line %d: Unable to parse attribute: %s' %
-                        (linenum, cols[i]))
-
-      # Pass it on
-      consumer(url, False)
-
-    file.close()
-    if frame:
-      frame.close()
-  #end def ProduceURLs
-#end class InputURLList
-
-
-class InputDirectory:
-  """
-  Each Input class knows how to yield a set of URLs from a data source.
-
-  This one handles a directory that acts as base for walking the filesystem.
-  """
-
-  def __init__(self, attributes, base_url):
-    self._path         = None               # The directory
-    self._url          = None               # The URL equivelant
-    self._default_file = None
-
-    if not ValidateAttributes('DIRECTORY', attributes, ('path', 'url',
-                                                           'default_file')):
-      return
-
-    # Prep the path -- it MUST end in a sep
-    path = attributes.get('path')
-    if not path:
-      output.Error('Directory entries must have both "path" and "url" '
-                  'attributes')
-      return
-    path = encoder.MaybeNarrowPath(path)
-    if not path.endswith(os.sep):
-      path = path + os.sep
-    if not os.path.isdir(path):
-      output.Error('Can not locate directory: %s' % path)
-      return
-
-    # Prep the URL -- it MUST end in a sep
-    url = attributes.get('url')
-    if not url:
-      output.Error('Directory entries must have both "path" and "url" '
-                  'attributes')
-      return
-    url = URL.Canonicalize(url)
-    if not url.endswith('/'):
-      url = url + '/'
-    if not url.startswith(base_url):
-      url = urlparse.urljoin(base_url, url)
-      if not url.startswith(base_url):
-        output.Error('The directory URL "%s" is not relative to the '
-                    'base_url: %s' % (url, base_url))
-        return
-
-    # Prep the default file -- it MUST be just a filename
-    file = attributes.get('default_file')
-    if file:
-      file = encoder.MaybeNarrowPath(file)
-      if os.sep in file:
-        output.Error('The default_file "%s" can not include path information.'
-                     % file)
-        file = None
-
-    self._path         = path
-    self._url          = url
-    self._default_file = file
-    if file:
-      output.Log('Input: From DIRECTORY "%s" (%s) with default file "%s"'
-                 % (path, url, file), 2)
-    else:
-      output.Log('Input: From DIRECTORY "%s" (%s) with no default file'
-                 % (path, url), 2)
-  #end def __init__
-
-  def ProduceURLs(self, consumer):
-    """ Produces URLs from our data source, hands them in to the consumer. """
-    if not self._path:
-      return
-
-    root_path = self._path
-    root_URL  = self._url
-    root_file = self._default_file
-
-    def PerFile(dirpath, name):
-      """
-      Called once per file.
-      Note that 'name' will occasionally be None -- for a directory itself
-      """
-      # Pull a timestamp
-      url           = URL()
-      isdir         = False
-      try:
-        if name:
-          path      = os.path.join(dirpath, name)
-        else:
-          path      = dirpath
-        isdir       = os.path.isdir(path)
-        time        = None
-        if isdir and root_file:
-          file      = os.path.join(path, root_file)
-          try:
-            time    = os.stat(file)[stat.ST_MTIME];
-          except OSError:
-            pass
-        if not time:
-          time      = os.stat(path)[stat.ST_MTIME];
-        url.lastmod = TimestampISO8601(time)
-      except OSError:
-        pass
-      except ValueError:
-        pass
-
-      # Build a URL
-      middle        = dirpath[len(root_path):]
-      if os.sep != '/':
-        middle = middle.replace(os.sep, '/')
-      if middle:
-        middle      = middle + '/'
-      if name:
-        middle      = middle + name
-        if isdir:
-          middle    = middle + '/'
-      url.TrySetAttribute('loc', root_URL + encoder.WidenText(middle, None))
-
-      # Suppress default files.  (All the way down here so we can log it.)
-      if name and (root_file == name):
-        url.Log(prefix='IGNORED (default file)', level=2)
-        return
-
-      consumer(url, False)
-    #end def PerFile
-
-    def PerDirectory(ignore, dirpath, namelist):
-      """
-      Called once per directory with a list of all the contained files/dirs.
-      """
-      ignore = ignore  # Avoid warnings of an unused parameter
-
-      if not dirpath.startswith(root_path):
-        output.Warn('Unable to decide what the root path is for directory: '
-                    '%s' % dirpath)
-        return
-
-      for name in namelist:
-        PerFile(dirpath, name)
-    #end def PerDirectory
-
-    output.Log('Walking DIRECTORY "%s"' % self._path, 1)
-    PerFile(self._path, None)
-    os.path.walk(self._path, PerDirectory, None)
-  #end def ProduceURLs
-#end class InputDirectory
-
-
-class InputAccessLog:
-  """
-  Each Input class knows how to yield a set of URLs from a data source.
-
-  This one handles access logs.  It's non-trivial in that we want to
-  auto-detect log files in the Common Logfile Format (as used by Apache,
-  for instance) and the Extended Log File Format (as used by IIS, for
-  instance).
-  """
-
-  def __init__(self, attributes):
-    self._path         = None               # The file path
-    self._encoding     = None               # Encoding of that file
-    self._is_elf       = False              # Extended Log File Format?
-    self._is_clf       = False              # Common Logfile Format?
-    self._elf_status   = -1                 # ELF field: '200'
-    self._elf_method   = -1                 # ELF field: 'HEAD'
-    self._elf_uri      = -1                 # ELF field: '/foo?bar=1'
-    self._elf_urifrag1 = -1                 # ELF field: '/foo'
-    self._elf_urifrag2 = -1                 # ELF field: 'bar=1'
-
-    if not ValidateAttributes('ACCESSLOG', attributes, ('path', 'encoding')):
-      return
-
-    self._path      = attributes.get('path')
-    self._encoding  = attributes.get('encoding', ENC_UTF8)
-    if self._path:
-      self._path    = encoder.MaybeNarrowPath(self._path)
-      if os.path.isfile(self._path):
-        output.Log('Input: From ACCESSLOG "%s"' % self._path, 2)
-      else:
-        output.Error('Can not locate file: %s' % self._path)
-        self._path = None
-    else:
-      output.Error('Accesslog entries must have a "path" attribute.')
-  #end def __init__
-
-  def RecognizeELFLine(self, line):
-    """ Recognize the Fields directive that heads an ELF file """
-    if not line.startswith('#Fields:'):
-      return False
-    fields = line.split(' ')
-    del fields[0]
-    for i in range(0, len(fields)):
-      field = fields[i].strip()
-      if field == 'sc-status':
-        self._elf_status   = i
-      elif field == 'cs-method':
-        self._elf_method   = i
-      elif field == 'cs-uri':
-        self._elf_uri      = i
-      elif field == 'cs-uri-stem':
-        self._elf_urifrag1 = i
-      elif field == 'cs-uri-query':
-        self._elf_urifrag2 = i
-    output.Log('Recognized an Extended Log File Format file.', 2)
-    return True
-  #end def RecognizeELFLine
-
-  def GetELFLine(self, line):
-    """ Fetch the requested URL from an ELF line """
-    fields = line.split(' ')
-    count  = len(fields)
-
-    # Verify status was Ok
-    if self._elf_status >= 0:
-      if self._elf_status >= count:
-        return None
-      if not fields[self._elf_status].strip() == '200':
-        return None
-
-    # Verify method was HEAD or GET
-    if self._elf_method >= 0:
-      if self._elf_method >= count:
-        return None
-      if not fields[self._elf_method].strip() in ('HEAD', 'GET'):
-        return None
-
-    # Pull the full URL if we can
-    if self._elf_uri >= 0:
-      if self._elf_uri >= count:
-        return None
-      url = fields[self._elf_uri].strip()
-      if url != '-':
-        return url
-
-    # Put together a fragmentary URL
-    if self._elf_urifrag1 >= 0:
-      if self._elf_urifrag1 >= count or self._elf_urifrag2 >= count:
-        return None
-      urlfrag1 = fields[self._elf_urifrag1].strip()
-      urlfrag2 = None
-      if self._elf_urifrag2 >= 0:
-        urlfrag2 = fields[self._elf_urifrag2]
-      if urlfrag1 and (urlfrag1 != '-'):
-        if urlfrag2 and (urlfrag2 != '-'):
-          urlfrag1 = urlfrag1 + '?' + urlfrag2
-        return urlfrag1
-
-    return None
-  #end def GetELFLine
-
-  def RecognizeCLFLine(self, line):
-    """ Try to tokenize a logfile line according to CLF pattern and see if
-    it works. """
-    match = ACCESSLOG_CLF_PATTERN.match(line)
-    recognize = match and (match.group(1) in ('HEAD', 'GET'))
-    if recognize:
-      output.Log('Recognized a Common Logfile Format file.', 2)
-    return recognize
-  #end def RecognizeCLFLine
-
-  def GetCLFLine(self, line):
-    """ Fetch the requested URL from a CLF line """
-    match = ACCESSLOG_CLF_PATTERN.match(line)
-    if match:
-      request = match.group(1)
-      if request in ('HEAD', 'GET'):
-        return match.group(2)
-    return None
-  #end def GetCLFLine
-
-  def ProduceURLs(self, consumer):
-    """ Produces URLs from our data source, hands them in to the consumer. """
-
-    # Open the file
-    (frame, file) = OpenFileForRead(self._path, 'ACCESSLOG')
-    if not file:
-      return
-
-    # Iterate lines
-    for line in file.readlines():
-      if self._encoding:
-        line = encoder.WidenText(line, self._encoding)
-      line = line.strip()
-
-      # If we don't know the format yet, try them both
-      if (not self._is_clf) and (not self._is_elf):
-        self._is_elf = self.RecognizeELFLine(line)
-        self._is_clf = self.RecognizeCLFLine(line)
-
-      # Digest the line
-      match = None
-      if self._is_elf:
-        match = self.GetELFLine(line)
-      elif self._is_clf:
-        match = self.GetCLFLine(line)
-      if not match:
-        continue
-
-      # Pass it on
-      url = URL()
-      url.TrySetAttribute('loc', match)
-      consumer(url, True)
-
-    file.close()
-    if frame:
-      frame.close()
-  #end def ProduceURLs
-#end class InputAccessLog
-
-
-class InputSitemap(xml.sax.handler.ContentHandler):
-
-  """
-  Each Input class knows how to yield a set of URLs from a data source.
-
-  This one handles Sitemap files and Sitemap index files.  For the sake
-  of simplicity in design (and simplicity in interfacing with the SAX
-  package), we do not handle these at the same time, recursively.  Instead
-  we read an index file completely and make a list of Sitemap files, then
-  go back and process each Sitemap.
-  """
-
-  class _ContextBase(object):
-    
-    """Base class for context handlers in our SAX processing.  A context
-    handler is a class that is responsible for understanding one level of
-    depth in the XML schema.  The class knows what sub-tags are allowed,
-    and doing any processing specific for the tag we're in.
-
-    This base class is the API filled in by specific context handlers,
-    all defined below.
-    """
-    
-    def __init__(self, subtags):
-      """Initialize with a sequence of the sub-tags that would be valid in
-      this context."""
-      self._allowed_tags = subtags          # Sequence of sub-tags we can have
-      self._last_tag     = None             # Most recent seen sub-tag
-    #end def __init__
-
-    def AcceptTag(self, tag):
-      """Returns True iff opening a sub-tag is valid in this context."""
-      valid = tag in self._allowed_tags
-      if valid:
-        self._last_tag = tag
-      else:
-        self._last_tag = None
-      return valid
-    #end def AcceptTag
-
-    def AcceptText(self, text):
-      """Returns True iff a blurb of text is valid in this context."""
-      return False
-    #end def AcceptText
-
-    def Open(self):
-      """The context is opening.  Do initialization."""
-      pass
-    #end def Open
-
-    def Close(self):
-      """The context is closing.  Return our result, if any."""
-      pass
-    #end def Close
-
-    def Return(self, result):
-      """We're returning to this context after handling a sub-tag.  This
-      method is called with the result data from the sub-tag that just
-      closed.  Here in _ContextBase, if we ever see a result it means
-      the derived child class forgot to override this method."""
-      if result:
-        raise NotImplementedError
-    #end def Return
-  #end class _ContextBase
-
-  class _ContextUrlSet(_ContextBase):
-    
-    """Context handler for the document node in a Sitemap."""
-    
-    def __init__(self):
-      InputSitemap._ContextBase.__init__(self, ('url',))
-    #end def __init__
-  #end class _ContextUrlSet
-
-  class _ContextUrl(_ContextBase):
-    
-    """Context handler for a URL node in a Sitemap."""
-    
-    def __init__(self, consumer):
-      """Initialize this context handler with the callable consumer that
-      wants our URLs."""
-      InputSitemap._ContextBase.__init__(self, URL.__slots__)
-      self._url          = None            # The URL object we're building
-      self._consumer     = consumer        # Who wants to consume it
-    #end def __init__
-
-    def Open(self):
-      """Initialize the URL."""
-      assert not self._url
-      self._url = URL()
-    #end def Open
-
-    def Close(self):
-      """Pass the URL to the consumer and reset it to None."""
-      assert self._url
-      self._consumer(self._url, False)
-      self._url = None
-    #end def Close
-  
-    def Return(self, result):
-      """A value context has closed, absorb the data it gave us."""
-      assert self._url
-      if result:
-        self._url.TrySetAttribute(self._last_tag, result)
-    #end def Return
-  #end class _ContextUrl
-
-  class _ContextSitemapIndex(_ContextBase):
-    
-    """Context handler for the document node in an index file."""
-    
-    def __init__(self):
-      InputSitemap._ContextBase.__init__(self, ('sitemap',))
-      self._loclist = []                    # List of accumulated Sitemap URLs
-    #end def __init__
-
-    def Open(self):
-      """Just a quick verify of state."""
-      assert not self._loclist
-    #end def Open
-
-    def Close(self):
-      """Return our list of accumulated URLs."""
-      if self._loclist:
-        temp = self._loclist
-        self._loclist = []
-        return temp
-    #end def Close
-  
-    def Return(self, result):
-      """Getting a new loc URL, add it to the collection."""
-      if result:
-        self._loclist.append(result)
-    #end def Return
-  #end class _ContextSitemapIndex
-
-  class _ContextSitemap(_ContextBase):
-    
-    """Context handler for a Sitemap entry in an index file."""
-    
-    def __init__(self):
-      InputSitemap._ContextBase.__init__(self, ('loc', 'lastmod'))
-      self._loc = None                      # The URL to the Sitemap
-    #end def __init__
-
-    def Open(self):
-      """Just a quick verify of state."""
-      assert not self._loc
-    #end def Open
-
-    def Close(self):
-      """Return our URL to our parent."""
-      if self._loc:
-        temp = self._loc
-        self._loc = None
-        return temp
-      output.Warn('In the Sitemap index file, a "sitemap" entry had no "loc".')
-    #end def Close
-
-    def Return(self, result):
-      """A value has closed.  If it was a 'loc', absorb it."""
-      if result and (self._last_tag == 'loc'):
-        self._loc = result
-    #end def Return
-  #end class _ContextSitemap
-
-  class _ContextValue(_ContextBase):
-    
-    """Context handler for a single value.  We return just the value.  The
-    higher level context has to remember what tag led into us."""
-    
-    def __init__(self):
-      InputSitemap._ContextBase.__init__(self, ())
-      self._text        = None
-    #end def __init__
-
-    def AcceptText(self, text):
-      """Allow all text, adding it to our buffer."""
-      if self._text:
-        self._text = self._text + text
-      else:
-        self._text = text
-      return True
-    #end def AcceptText
-
-    def Open(self):
-      """Initialize our buffer."""
-      self._text = None
-    #end def Open
-
-    def Close(self):
-      """Return what's in our buffer."""
-      text = self._text
-      self._text = None
-      if text:
-        text = text.strip()
-      return text
-    #end def Close
-  #end class _ContextValue
-
-  def __init__(self, attributes):
-    """Initialize with a dictionary of attributes from our entry in the
-    config file."""
-    xml.sax.handler.ContentHandler.__init__(self)
-    self._pathlist      = None              # A list of files
-    self._current       = -1                # Current context in _contexts
-    self._contexts      = None              # The stack of contexts we allow
-    self._contexts_idx  = None              # ...contexts for index files
-    self._contexts_stm  = None              # ...contexts for Sitemap files
-
-    if not ValidateAttributes('SITEMAP', attributes, ['path']):
-      return
-    
-    # Init the first file path
-    path = attributes.get('path')
-    if path:
-      path = encoder.MaybeNarrowPath(path)
-      if os.path.isfile(path):
-        output.Log('Input: From SITEMAP "%s"' % path, 2)
-        self._pathlist = [path]
-      else:
-        output.Error('Can not locate file "%s"' % path)
-    else:
-      output.Error('Sitemap entries must have a "path" attribute.')
-  #end def __init__
-
-  def ProduceURLs(self, consumer):
-    """In general: Produces URLs from our data source, hand them to the
-    callable consumer.
-
-    In specific: Iterate over our list of paths and delegate the actual
-    processing to helper methods.  This is a complexity no other data source
-    needs to suffer.  We are unique in that we can have files that tell us
-    to bring in other files.
-
-    Note the decision to allow an index file or not is made in this method.
-    If we call our parser with (self._contexts == None) the parser will
-    grab whichever context stack can handle the file.  IE: index is allowed.
-    If instead we set (self._contexts = ...) before parsing, the parser
-    will only use the stack we specify.  IE: index not allowed.
-    """
-    # Set up two stacks of contexts
-    self._contexts_idx = [InputSitemap._ContextSitemapIndex(),
-                          InputSitemap._ContextSitemap(),
-                          InputSitemap._ContextValue()]
-    
-    self._contexts_stm = [InputSitemap._ContextUrlSet(),
-                          InputSitemap._ContextUrl(consumer),
-                          InputSitemap._ContextValue()]
-
-    # Process the first file
-    assert self._pathlist
-    path = self._pathlist[0]
-    self._contexts = None                # We allow an index file here
-    self._ProcessFile(path)
-
-    # Iterate over remaining files
-    self._contexts = self._contexts_stm  # No index files allowed
-    for path in self._pathlist[1:]:
-      self._ProcessFile(path)
-  #end def ProduceURLs
-
-  def _ProcessFile(self, path):
-    """Do per-file reading/parsing/consuming for the file path passed in."""
-    assert path
-    
-    # Open our file
-    (frame, file) = OpenFileForRead(path, 'SITEMAP')
-    if not file:
-      return
-
-    # Rev up the SAX engine
-    try:
-      self._current = -1
-      xml.sax.parse(file, self)
-    except SchemaError:
-      output.Error('An error in file "%s" made us abort reading the Sitemap.'
-                   % path)
-    except IOError:
-      output.Error('Cannot read from file "%s"' % path)
-    except xml.sax._exceptions.SAXParseException, e:
-      output.Error('XML error in the file "%s" (line %d, column %d): %s' %
-                   (path, e._linenum, e._colnum, e.getMessage()))
-
-    # Clean up
-    file.close()
-    if frame:
-      frame.close()
-  #end def _ProcessFile
-
-  def _MungeLocationListIntoFiles(self, urllist):
-    """Given a list of URLs, munge them into our self._pathlist property.
-    We do this by assuming all the files live in the same directory as
-    the first file in the existing pathlist.  That is, we assume a
-    Sitemap index points to Sitemaps only in the same directory.  This
-    is not true in general, but will be true for any output produced
-    by this script.
-    """
-    assert self._pathlist
-    path = self._pathlist[0]
-    path = os.path.normpath(path)
-    dir  = os.path.dirname(path)
-    wide = False
-    if type(path) == types.UnicodeType:
-      wide = True
-
-    for url in urllist:
-      url = URL.Canonicalize(url)
-      output.Log('Index points to Sitemap file at: %s' % url, 2)
-      (scheme, netloc, path, query, frag) = urlparse.urlsplit(url)
-      file = os.path.basename(path)
-      file = urllib.unquote(file)
-      if wide:
-        file = encoder.WidenText(file)
-      if dir:
-        file = dir + os.sep + file
-      if file:
-        self._pathlist.append(file)
-        output.Log('Will attempt to read Sitemap file: %s' % file, 1)
-  #end def _MungeLocationListIntoFiles
-
-  def startElement(self, tag, attributes):
-    """SAX processing, called per node in the config stream.
-    As long as the new tag is legal in our current context, this
-    becomes an Open call on one context deeper.
-    """
-    # If this is the document node, we may have to look for a context stack
-    if (self._current < 0) and not self._contexts:
-      assert self._contexts_idx and self._contexts_stm
-      if tag == 'urlset':
-        self._contexts = self._contexts_stm
-      elif tag == 'sitemapindex':
-        self._contexts = self._contexts_idx
-        output.Log('File is a Sitemap index.', 2)
-      else:
-        output.Error('The document appears to be neither a Sitemap nor a '
-                     'Sitemap index.')
-        raise SchemaError
-
-    # Display a kinder error on a common mistake
-    if (self._current < 0) and (self._contexts == self._contexts_stm) and (
-      tag == 'sitemapindex'):
-      output.Error('A Sitemap index can not refer to another Sitemap index.')
-      raise SchemaError
-
-    # Verify no unexpected attributes
-    if attributes:
-      text = ''
-      for attr in attributes.keys():
-        # The document node will probably have namespaces
-        if self._current < 0:
-          if attr.find('xmlns') >= 0:
-            continue
-          if attr.find('xsi') >= 0:
-            continue
-        if text:
-          text = text + ', '
-        text = text + attr
-      if text:
-        output.Warn('Did not expect any attributes on any tag, instead tag '
-                     '"%s" had attributes: %s' % (tag, text))
-
-    # Switch contexts
-    if (self._current < 0) or (self._contexts[self._current].AcceptTag(tag)):
-      self._current = self._current + 1
-      assert self._current < len(self._contexts)
-      self._contexts[self._current].Open()
-    else:
-      output.Error('Can not accept tag "%s" where it appears.' % tag)
-      raise SchemaError
-  #end def startElement
-
-  def endElement(self, tag):
-    """SAX processing, called per node in the config stream.
-    This becomes a call to Close on one context followed by a call
-    to Return on the previous.
-    """
-    tag = tag  # Avoid warning on unused argument
-    assert self._current >= 0
-    retval = self._contexts[self._current].Close()
-    self._current = self._current - 1
-    if self._current >= 0:
-      self._contexts[self._current].Return(retval)
-    elif retval and (self._contexts == self._contexts_idx):
-      self._MungeLocationListIntoFiles(retval)
-  #end def endElement
-
-  def characters(self, text):
-    """SAX processing, called when text values are read.  Important to
-    note that one single text value may be split across multiple calls
-    of this method.
-    """
-    if (self._current < 0) or (
-      not self._contexts[self._current].AcceptText(text)):
-      if text.strip():
-        output.Error('Can not accept text "%s" where it appears.' % text)
-        raise SchemaError
-  #end def characters
-#end class InputSitemap
-
-
-class FilePathGenerator:
-  """
-  This class generates filenames in a series, upon request.
-  You can request any iteration number at any time, you don't
-  have to go in order.
-
-  Example of iterations for '/path/foo.xml.gz':
-    0           --> /path/foo.xml.gz
-    1           --> /path/foo1.xml.gz
-    2           --> /path/foo2.xml.gz
-    _index.xml  --> /path/foo_index.xml
-  """
-
-  def __init__(self):
-    self.is_gzip     = False                 # Is this a  GZIP file?
-
-    self._path       = None                  # '/path/'
-    self._prefix     = None                  # 'foo'
-    self._suffix     = None                  # '.xml.gz'
-  #end def __init__
-
-  def Preload(self, path):
-    """ Splits up a path into forms ready for recombination. """
-    path = encoder.MaybeNarrowPath(path)
-
-    # Get down to a base name
-    path = os.path.normpath(path)
-    base = os.path.basename(path).lower()
-    if not base:
-      output.Error('Couldn\'t parse the file path: %s' % path)
-      return False
-    lenbase = len(base)
-
-    # Recognize extension
-    lensuffix = 0
-    compare_suffix = ['.xml', '.xml.gz', '.gz']
-    for suffix in compare_suffix:
-      if base.endswith(suffix):
-        lensuffix = len(suffix)
-        break
-    if not lensuffix:
-      output.Error('The path "%s" doesn\'t end in a supported file '
-                   'extension.' % path)
-      return False
-    self.is_gzip = suffix.endswith('.gz')
-
-    # Split the original path
-    lenpath = len(path)
-    self._path   = path[:lenpath-lenbase]
-    self._prefix = path[lenpath-lenbase:lenpath-lensuffix]
-    self._suffix = path[lenpath-lensuffix:]
-
-    return True
-  #end def Preload
-
-  def GeneratePath(self, instance):
-    """ Generates the iterations, as described above. """
-    prefix = self._path + self._prefix
-    if type(instance) == types.IntType:
-      if instance:
-        return '%s%d%s' % (prefix, instance, self._suffix)
-      return prefix + self._suffix
-    return prefix + instance
-  #end def GeneratePath
-
-  def GenerateURL(self, instance, root_url):
-    """ Generates iterations, but as a URL instead of a path. """
-    prefix = root_url + self._prefix
-    retval = None
-    if type(instance) == types.IntType:
-      if instance:
-        retval = '%s%d%s' % (prefix, instance, self._suffix)
-      else:
-        retval = prefix + self._suffix
-    else:
-      retval = prefix + instance
-    return URL.Canonicalize(retval)
-  #end def GenerateURL
-
-  def GenerateWildURL(self, root_url):
-    """ Generates a wildcard that should match all our iterations """
-    prefix = URL.Canonicalize(root_url + self._prefix)
-    temp   = URL.Canonicalize(prefix + self._suffix)
-    suffix = temp[len(prefix):]
-    return prefix + '*' + suffix
-  #end def GenerateURL
-#end class FilePathGenerator
-
-
-class PerURLStatistics:
-  """ Keep track of some simple per-URL statistics, like file extension. """
-
-  def __init__(self):
-    self._extensions  = {}                  # Count of extension instances
-  #end def __init__
-
-  def Consume(self, url):
-    """ Log some stats for the URL.  At the moment, that means extension. """
-    if url and url.loc:
-      (scheme, netloc, path, query, frag) = urlparse.urlsplit(url.loc)
-      if not path:
-        return
-
-      # Recognize directories
-      if path.endswith('/'):
-        if self._extensions.has_key('/'):
-          self._extensions['/'] = self._extensions['/'] + 1
-        else:
-          self._extensions['/'] = 1
-        return
-
-      # Strip to a filename
-      i = path.rfind('/')
-      if i >= 0:
-        assert i < len(path)
-        path = path[i:]
-
-      # Find extension
-      i = path.rfind('.')
-      if i > 0:
-        assert i < len(path)
-        ext = path[i:].lower()
-        if self._extensions.has_key(ext):
-          self._extensions[ext] = self._extensions[ext] + 1
-        else:
-          self._extensions[ext] = 1
-      else:
-        if self._extensions.has_key('(no extension)'):
-          self._extensions['(no extension)'] = self._extensions[
-            '(no extension)'] + 1
-        else:
-          self._extensions['(no extension)'] = 1
-  #end def Consume
-
-  def Log(self):
-    """ Dump out stats to the output. """
-    if len(self._extensions):
-      output.Log('Count of file extensions on URLs:', 1)
-      set = self._extensions.keys()
-      set.sort()
-      for ext in set:
-        output.Log(' %7d  %s' % (self._extensions[ext], ext), 1)
-  #end def Log
-
-class Sitemap(xml.sax.handler.ContentHandler):
-  """
-  This is the big workhorse class that processes your inputs and spits
-  out sitemap files.  It is built as a SAX handler for set up purposes.
-  That is, it processes an XML stream to bring itself up.
-  """
-
-  def __init__(self, suppress_notify):
-    xml.sax.handler.ContentHandler.__init__(self)
-    self._filters      = []                  # Filter objects
-    self._inputs       = []          ...
 
[truncated message content] | 
| 
      
      
      From: <rom...@us...> - 2011-03-07 20:44:40
      
     | 
| Revision: 1854
          http://pygccxml.svn.sourceforge.net/pygccxml/?rev=1854&view=rev
Author:   roman_yakovenko
Date:     2011-03-07 20:44:32 +0000 (Mon, 07 Mar 2011)
Log Message:
-----------
remove reference to "language-binding.net" site
Modified Paths:
--------------
    pydsc_dev/docs/pkg-info.txt
    pydsc_dev/pydsc.py
    pydsc_dev/setup.py
    pygccxml_dev/docs/history/history.rest
    pygccxml_dev/docs/pkg-info.txt
    pygccxml_dev/setup.py
    pyplusplus_dev/README.txt
    pyplusplus_dev/docs/documentation/apidocs/code_creators.rest
    pyplusplus_dev/docs/documentation/functions/transformation/input_static_matrix.rest
    pyplusplus_dev/docs/examples/easybmp/pkg-info.txt
    pyplusplus_dev/docs/pkg-info.txt
    pyplusplus_dev/examples/pyboost_dev/setup.py
    pyplusplus_dev/pyplusplus/decl_wrappers/algorithm.py
    pyplusplus_dev/pyplusplus/decl_wrappers/decl_wrapper.py
    pyplusplus_dev/pyplusplus/module_builder/boost_python_builder.py
    pyplusplus_dev/setup.py
Modified: pydsc_dev/docs/pkg-info.txt
===================================================================
--- pydsc_dev/docs/pkg-info.txt	2011-03-07 20:42:15 UTC (rev 1853)
+++ pydsc_dev/docs/pkg-info.txt	2011-03-07 20:44:32 UTC (rev 1854)
@@ -5,7 +5,7 @@
 Author-email: roman yakovenko at gmail com
 Maintainer: Roman Yakovenko
 Maintainer-email: roman yakovenko at gmail com
-Home-page: http://www.language-binding.net/
+Home-page: 
 Download-url: http://sourceforge.net/project/showfiles.php?group_id=118209
 Summary: Python Documentation Spell Checker
 License: Boost Software License
@@ -16,4 +16,4 @@
 Classifier: Intended Audience :: Developers
 Classifier: License :: Freeware
 Classifier: Programming Language :: Python
-Classifier: Topic :: Software Development
\ No newline at end of file
+Classifier: Topic :: Software Development
Modified: pydsc_dev/pydsc.py
===================================================================
--- pydsc_dev/pydsc.py	2011-03-07 20:42:15 UTC (rev 1853)
+++ pydsc_dev/pydsc.py	2011-03-07 20:44:32 UTC (rev 1854)
@@ -19,7 +19,6 @@
 
 __version__ = '0.3' #current version
 __author__ = 'Roman Yakovenko <rom...@gm...>'
-__url__ = 'http://www.language-binding.net'
 __license__ = 'Boost Software License <http://boost.org/more/license_info.html>'
 
 import os
Modified: pydsc_dev/setup.py
===================================================================
--- pydsc_dev/setup.py	2011-03-07 20:42:15 UTC (rev 1853)
+++ pydsc_dev/setup.py	2011-03-07 20:44:32 UTC (rev 1854)
@@ -14,6 +14,5 @@
        , description="Python documentation and comments spell checker"
        , author="Roman Yakovenko"
        , author_email="rom...@gm..."
-       , url='http://www.language-binding.net'
        , py_modules=[ 'pydsc' ]
 )
Modified: pygccxml_dev/docs/history/history.rest
===================================================================
--- pygccxml_dev/docs/history/history.rest	2011-03-07 20:42:15 UTC (rev 1853)
+++ pygccxml_dev/docs/history/history.rest	2011-03-07 20:44:32 UTC (rev 1854)
@@ -62,7 +62,7 @@
     as suggested by `Jakub Wilk <http://groups.google.com/group/linux.debian.bugs.dist/browse_thread/thread/572d2286ca0b2cec?pli=1>`
 
 11. "__int128_t" and "__uint128_t" types were introduced. Many thanks to Gustavo Carneiro
-   for providing the patch.
+    for providing the patch.
 
 12. Thanks to Aron Xu, for pointing out that it is better to use "os.name",
     instead of "sys.platform" for platform specific logic.
Modified: pygccxml_dev/docs/pkg-info.txt
===================================================================
--- pygccxml_dev/docs/pkg-info.txt	2011-03-07 20:42:15 UTC (rev 1853)
+++ pygccxml_dev/docs/pkg-info.txt	2011-03-07 20:44:32 UTC (rev 1854)
@@ -5,7 +5,6 @@
 Author-email: roman yakovenko at gmail com
 Maintainer: Roman Yakovenko
 Maintainer-email: roman yakovenko at gmail com
-Home-page: http://www.language-binding.net/
 Download-url: http://sourceforge.net/project/showfiles.php?group_id=118209
 Summary: Python package for easy C++ declarations navigation
 License: Boost Software License
@@ -24,4 +23,4 @@
 Classifier: Intended Audience :: Developers
 Classifier: License :: Freeware
 Classifier: Programming Language :: Python
-Classifier: Topic :: Software Development
\ No newline at end of file
+Classifier: Topic :: Software Development
Modified: pygccxml_dev/setup.py
===================================================================
--- pygccxml_dev/setup.py	2011-03-07 20:42:15 UTC (rev 1853)
+++ pygccxml_dev/setup.py	2011-03-07 20:44:32 UTC (rev 1854)
@@ -11,7 +11,6 @@
        description = "GCC-XML generated file reader",
        author = "Roman Yakovenko",
        author_email = "rom...@gm...",
-       url = 'http://www.language-binding.net/pygccxml/pygccxml.html',
        packages = [ 'pygccxml',
                     'pygccxml.declarations',
                     'pygccxml.parser',
Modified: pyplusplus_dev/README.txt
===================================================================
--- pyplusplus_dev/README.txt	2011-03-07 20:42:15 UTC (rev 1853)
+++ pyplusplus_dev/README.txt	2011-03-07 20:44:32 UTC (rev 1854)
@@ -5,17 +5,15 @@
 Python bindings of a C/C++ library. The tool is implemented as a Python
 module which is controlled by a user script.
 
-Homepage: http://www.language-binding.net/pyplusplus/pyplusplus.html
 
-
 Requirements
 ------------
 
 In order to use Py++ you need the following additional components:
 
 - Python v2.6 (or higher)
-- pygccxml (http://www.language-binding.net/pygccxml/pygccxml.html)
-- GCC-XML (http://www.gccxml.org)
+- pygccxml
+- GCC-XML
 
 
 Install
Modified: pyplusplus_dev/docs/documentation/apidocs/code_creators.rest
===================================================================
--- pyplusplus_dev/docs/documentation/apidocs/code_creators.rest	2011-03-07 20:42:15 UTC (rev 1853)
+++ pyplusplus_dev/docs/documentation/apidocs/code_creators.rest	2011-03-07 20:44:32 UTC (rev 1854)
@@ -111,14 +111,6 @@
     :undoc-members:
     :show-inheritance:
 
-ctypes_module
--------------
-
-.. automodule:: pyplusplus.code_creators.ctypes_module
-    :members:
-    :undoc-members:
-    :show-inheritance:
-
 custom
 ------
 
Modified: pyplusplus_dev/docs/documentation/functions/transformation/input_static_matrix.rest
===================================================================
--- pyplusplus_dev/docs/documentation/functions/transformation/input_static_matrix.rest	2011-03-07 20:42:15 UTC (rev 1853)
+++ pyplusplus_dev/docs/documentation/functions/transformation/input_static_matrix.rest	2011-03-07 20:44:32 UTC (rev 1854)
@@ -1,6 +1,6 @@
-==================================
+===================================
 ``input_static_matrix`` transformer
-==================================
+===================================
 
 ----------
 Definition
Modified: pyplusplus_dev/docs/examples/easybmp/pkg-info.txt
===================================================================
--- pyplusplus_dev/docs/examples/easybmp/pkg-info.txt	2011-03-07 20:42:15 UTC (rev 1853)
+++ pyplusplus_dev/docs/examples/easybmp/pkg-info.txt	2011-03-07 20:44:32 UTC (rev 1854)
@@ -5,7 +5,6 @@
 Author-email: roman yakovenko at gmail com
 Maintainer: Roman Yakovenko
 Maintainer-email: roman yakovenko at gmail com
-Home-page: http://www.language-binding.net/pyplusplus/examples/py_easybmp/py_easybmp.html
 Download-url: http://sourceforge.net/project/showfiles.php?group_id=118209
 Summary: Python bindings for C++ EasyBMP library
 License: Boost Software License
@@ -15,4 +14,4 @@
 Classifier: Intended Audience :: Developers
 Classifier: License :: Freeware
 Classifier: Programming Language :: Python
-Classifier: Topic :: Software Development
\ No newline at end of file
+Classifier: Topic :: Software Development
Modified: pyplusplus_dev/docs/pkg-info.txt
===================================================================
--- pyplusplus_dev/docs/pkg-info.txt	2011-03-07 20:42:15 UTC (rev 1853)
+++ pyplusplus_dev/docs/pkg-info.txt	2011-03-07 20:44:32 UTC (rev 1854)
@@ -4,8 +4,7 @@
 Author: Roman Yakovenko
 Author-email: roman yakovenko at gmail com
 Maintainer: Roman Yakovenko
-Maintainer-email: roman yakovenko at gmail com
-Home-page: http://www.language-binding.net/
+Maintainer-email: roman yakovenko at gmail co
 Download-url: http://sourceforge.net/project/showfiles.php?group_id=118209
 Summary: pyplusplus is an object-oriented framework for creating a code generator for boost.python library
 License: Boost Software License
@@ -16,4 +15,4 @@
 Classifier: Intended Audience :: Developers
 Classifier: License :: Freeware
 Classifier: Programming Language :: Python
-Classifier: Topic :: Software Development
\ No newline at end of file
+Classifier: Topic :: Software Development
Modified: pyplusplus_dev/examples/pyboost_dev/setup.py
===================================================================
--- pyplusplus_dev/examples/pyboost_dev/setup.py	2011-03-07 20:42:15 UTC (rev 1853)
+++ pyplusplus_dev/examples/pyboost_dev/setup.py	2011-03-07 20:44:32 UTC (rev 1854)
@@ -16,7 +16,6 @@
     , description = ""
     , author = "Roman Yakovenko"
     , author_email = "rom...@gm..."
-    , url = 'http://www.language-binding.net/pygccxml/pygccxml.html'
     , packages = [ 'pyboost'
                    , 'pyboost.boost_random'
                    , 'pyboost.crc'
Modified: pyplusplus_dev/pyplusplus/decl_wrappers/algorithm.py
===================================================================
--- pyplusplus_dev/pyplusplus/decl_wrappers/algorithm.py	2011-03-07 20:42:15 UTC (rev 1853)
+++ pyplusplus_dev/pyplusplus/decl_wrappers/algorithm.py	2011-03-07 20:44:32 UTC (rev 1854)
@@ -100,9 +100,7 @@
         return full_name
 
 class registration_order:
-    """class-namespace, introduce few functions, which deals with functions
-    registration order problem: http://www.language-binding.net/pyplusplus/documentation/functions/registration_order.html
-    """
+    """the class, introduces few functions, which deals with functions registration order problem"""
 
     @staticmethod
     def is_related( t1, t2 ):
Modified: pyplusplus_dev/pyplusplus/decl_wrappers/decl_wrapper.py
===================================================================
--- pyplusplus_dev/pyplusplus/decl_wrappers/decl_wrapper.py	2011-03-07 20:42:15 UTC (rev 1853)
+++ pyplusplus_dev/pyplusplus/decl_wrappers/decl_wrapper.py	2011-03-07 20:44:32 UTC (rev 1854)
@@ -184,7 +184,7 @@
         this declaration.
 
         skip_ignored argument allows you to control the information reported to you.
-        For more information please read: http://www.language-binding.net/pyplusplus/documentation/warnings.html
+        For more information please read documentation about warnings.
         """
         msgs = []
         if not self.exportable:
Modified: pyplusplus_dev/pyplusplus/module_builder/boost_python_builder.py
===================================================================
--- pyplusplus_dev/pyplusplus/module_builder/boost_python_builder.py	2011-03-07 20:42:15 UTC (rev 1853)
+++ pyplusplus_dev/pyplusplus/module_builder/boost_python_builder.py	2011-03-07 20:44:32 UTC (rev 1854)
@@ -25,7 +25,7 @@
     """
     This class provides users with simple and intuitive interface to `Py++`
     and/or pygccxml functionality. If this is your first attempt to use `Py++`
-    consider to read tutorials. You can find them on `web site <http://www.language-binding.net>`_.
+    consider to read tutorials. 
     """
 
     def __init__( self
Modified: pyplusplus_dev/setup.py
===================================================================
--- pyplusplus_dev/setup.py	2011-03-07 20:42:15 UTC (rev 1853)
+++ pyplusplus_dev/setup.py	2011-03-07 20:44:32 UTC (rev 1854)
@@ -13,7 +13,6 @@
        description="Py++ is a framework of components for creating C++ code generator for Boost.Python library",
        author="Roman Yakovenko",
        author_email="rom...@gm...",
-       url='http://www.language-binding.net/pyplusplus/pyplusplus.html',
        scripts = [],
        packages=[ 'pyplusplus',
                   'pyplusplus.file_writers',
This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site.
 | 
| 
      
      
      From: <rom...@us...> - 2011-03-07 20:42:24
      
     | 
| Revision: 1853
          http://pygccxml.svn.sourceforge.net/pygccxml/?rev=1853&view=rev
Author:   roman_yakovenko
Date:     2011-03-07 20:42:15 +0000 (Mon, 07 Mar 2011)
Log Message:
-----------
remove reference to "language-binding.net" site
Modified Paths:
--------------
    index.rest
Modified: index.rest
===================================================================
--- index.rest	2011-02-26 20:08:30 UTC (rev 1852)
+++ index.rest	2011-03-07 20:42:15 UTC (rev 1853)
@@ -28,16 +28,9 @@
 
 *European Space Agency*, *Ogre*, *PyOpenSG* and many others :doc:`use <pyplusplus/quotes>` `Py++`.
 
--------------
-pydsc package
--------------
-
-Documentation strings contain spelling errors? :doc:`Fix them in a minute <pydsc/pydsc>`!
-
 .. toctree::
    :hidden:
 
-   pydsc/pydsc.rest
    pygccxml/pygccxml.rest
    pyplusplus/pyplusplus.rest
 
This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site.
 | 
| 
      
      
      From: <rom...@us...> - 2011-02-26 20:08:38
      
     | 
| Revision: 1852
          http://pygccxml.svn.sourceforge.net/pygccxml/?rev=1852&view=rev
Author:   roman_yakovenko
Date:     2011-02-26 20:08:30 +0000 (Sat, 26 Feb 2011)
Log Message:
-----------
"has_inline" property was added to ``declarations.calldef_t`` class.
Modified Paths:
--------------
    pygccxml_dev/docs/history/history.rest
    pygccxml_dev/pygccxml/declarations/calldef.py
    pygccxml_dev/pygccxml/parser/scanner.py
    pygccxml_dev/unittests/test_all.py
Added Paths:
-----------
    pygccxml_dev/unittests/data/inline_specifier.hpp
    pygccxml_dev/unittests/inline_specifier_tester.py
Modified: pygccxml_dev/docs/history/history.rest
===================================================================
--- pygccxml_dev/docs/history/history.rest	2011-02-26 19:32:41 UTC (rev 1851)
+++ pygccxml_dev/docs/history/history.rest	2011-02-26 20:08:30 UTC (rev 1852)
@@ -65,7 +65,9 @@
    for providing the patch.
 
 12. Thanks to Aron Xu, for pointing out that it is better to use "os.name",
-    instead of "sys.platform" for platform specific logic
+    instead of "sys.platform" for platform specific logic.
+    
+13. "has_inline" property was added to ``declarations.calldef_t`` class.
 
 
 -----------
Modified: pygccxml_dev/pygccxml/declarations/calldef.py
===================================================================
--- pygccxml_dev/pygccxml/declarations/calldef.py	2011-02-26 19:32:41 UTC (rev 1851)
+++ pygccxml_dev/pygccxml/declarations/calldef.py	2011-02-26 20:08:30 UTC (rev 1852)
@@ -166,6 +166,7 @@
         self._has_extern = has_extern
         self._demangled_name = None
         self._calling_convention = None
+        self._has_inline = None
 
     def _get__cmp__call_items(self):
         """implementation details"""
@@ -178,7 +179,8 @@
                   , self.has_extern
                   , self.does_throw
                   , self._sorted_list( self.exceptions )
-                  , self.demangled_name ]
+                  , self.demangled_name
+                  , self.has_inline ]
         items.extend( self._get__cmp__call_items() )
         return items
 
@@ -274,6 +276,15 @@
                            @type: bool
                            """)
 
+    def _get_has_inline(self):
+        return self._has_inline
+    def _set_has_inline(self, has_inline):
+        self._has_inline = has_inline
+    has_inline = property( _get_has_inline, _set_has_inline,
+                           doc="""Was this callable declared with "inline" specifier
+                           @type: bool
+                           """)
+
     def __remove_parent_fname( self, demangled ):
         """implementation details"""
         demangled = demangled.strip()
Modified: pygccxml_dev/pygccxml/parser/scanner.py
===================================================================
--- pygccxml_dev/pygccxml/parser/scanner.py	2011-02-26 19:32:41 UTC (rev 1851)
+++ pygccxml_dev/pygccxml/parser/scanner.py	2011-02-26 20:08:30 UTC (rev 1852)
@@ -35,6 +35,7 @@
 XML_AN_ID = "id"
 XML_AN_INCOMPLETE = "incomplete"
 XML_AN_INIT = "init"
+XML_AN_INLINE = "inline"
 XML_AN_LINE = "line"
 XML_AN_MANGLED = "mangled"
 XML_AN_MAX = "max"
@@ -406,6 +407,7 @@
             self.__calldefs.append( calldef )
             calldef.name = attrs.get(XML_AN_NAME, '')
             calldef.has_extern = attrs.get( XML_AN_EXTERN, False )
+            calldef.has_inline = bool( attrs.get( XML_AN_INLINE, "" ) == "1" )
             throw_stmt = attrs.get( XML_AN_THROW, None )
             if None is throw_stmt:
                 calldef.does_throw = True
Added: pygccxml_dev/unittests/data/inline_specifier.hpp
===================================================================
--- pygccxml_dev/unittests/data/inline_specifier.hpp	                        (rev 0)
+++ pygccxml_dev/unittests/data/inline_specifier.hpp	2011-02-26 20:08:30 UTC (rev 1852)
@@ -0,0 +1,8 @@
+struct text_t{
+	inline bool inlined() const { return true; }
+	unsigned long not_inlined() const;
+};
+
+
+inline bool inlined(text_t){ return true; }
+unsigned long not_inlined(text_t);
Added: pygccxml_dev/unittests/inline_specifier_tester.py
===================================================================
--- pygccxml_dev/unittests/inline_specifier_tester.py	                        (rev 0)
+++ pygccxml_dev/unittests/inline_specifier_tester.py	2011-02-26 20:08:30 UTC (rev 1852)
@@ -0,0 +1,52 @@
+# Copyright 2004-2008 Roman Yakovenko.
+# Distributed under the Boost Software License, Version 1.0. (See
+# accompanying file LICENSE_1_0.txt or copy at
+# http://www.boost.org/LICENSE_1_0.txt)
+
+import unittest
+import autoconfig
+import parser_test_case
+
+from pygccxml import utils
+from pygccxml import parser
+from pygccxml import declarations
+
+class tester_t( parser_test_case.parser_test_case_t ):
+    
+    global_ns = None
+    
+    def __init__(self, *args ):
+        parser_test_case.parser_test_case_t.__init__( self, *args )
+        self.header = 'inline_specifier.hpp'
+        
+    def setUp(self):
+        if not tester_t.global_ns:
+            decls = parser.parse( [self.header], self.config )
+            tester_t.global_ns = declarations.get_global_namespace( decls )
+            tester_t.global_ns.init_optimizer()
+            
+    def test( self ):                
+		inlined_funcs = self.global_ns.calldefs( 'inlined' )
+		self.failUnless( len(inlined_funcs) )
+		for f in inlined_funcs:
+			self.failUnless( f.has_inline == True )
+
+		not_inlined_funcs = self.global_ns.calldefs( 'not_inlined' )
+		self.failUnless( len(not_inlined_funcs) )
+		for f in not_inlined_funcs:
+			self.failUnless( f.has_inline == False )
+
+        
+    def test2( self ):
+        pass
+
+def create_suite():
+    suite = unittest.TestSuite()        
+    suite.addTest( unittest.makeSuite(tester_t))
+    return suite
+
+def run_suite():
+    unittest.TextTestRunner(verbosity=2).run( create_suite() )
+
+if __name__ == "__main__":
+    run_suite()
Modified: pygccxml_dev/unittests/test_all.py
===================================================================
--- pygccxml_dev/unittests/test_all.py	2011-02-26 19:32:41 UTC (rev 1851)
+++ pygccxml_dev/unittests/test_all.py	2011-02-26 20:08:30 UTC (rev 1852)
@@ -58,6 +58,7 @@
 import gccxml10183_tester
 import gccxml10184_tester
 import gccxml10185_tester
+import inline_specifier_tester
 
 testers = [
     decl_string_tester
@@ -112,6 +113,7 @@
     , gccxml10183_tester
     , gccxml10184_tester
     , gccxml10185_tester    
+    , inline_specifier_tester
 ]
 
 def create_suite():
This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site.
 | 
| 
      
      
      From: <rom...@us...> - 2011-02-26 19:32:47
      
     | 
| Revision: 1851
          http://pygccxml.svn.sourceforge.net/pygccxml/?rev=1851&view=rev
Author:   roman_yakovenko
Date:     2011-02-26 19:32:41 +0000 (Sat, 26 Feb 2011)
Log Message:
-----------
on Linux, starting to use gccxml from the distribution
Modified Paths:
--------------
    ui/web/config.py
Modified: ui/web/config.py
===================================================================
--- ui/web/config.py	2011-02-26 19:18:19 UTC (rev 1850)
+++ ui/web/config.py	2011-02-26 19:32:41 UTC (rev 1851)
@@ -17,7 +17,7 @@
 import pygccxml
 
 gccxml = pygccxml.parser.load_gccxml_configuration( os.path.join( this_module_dir_path, 'gccxml.cfg' )
-                                                    , gccxml_path=os.path.join( projects_root_dir, 'gccxml_bin', 'v09', sys.platform, 'bin' )
+                                                    , gccxml_path="/usr/bin/gccxml"
                                                     , compiler=pygccxml.utils.native_compiler.get_gccxml_compiler() )
 
 temp_dir = tempfile.gettempdir()
This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site.
 | 
| 
      
      
      From: <rom...@us...> - 2011-02-26 19:18:27
      
     | 
| Revision: 1850
          http://pygccxml.svn.sourceforge.net/pygccxml/?rev=1850&view=rev
Author:   roman_yakovenko
Date:     2011-02-26 19:18:19 +0000 (Sat, 26 Feb 2011)
Log Message:
-----------
deleting unused functionality
Modified Paths:
--------------
    pyplusplus_dev/setup.py
Modified: pyplusplus_dev/setup.py
===================================================================
--- pyplusplus_dev/setup.py	2011-02-26 19:09:53 UTC (rev 1849)
+++ pyplusplus_dev/setup.py	2011-02-26 19:18:19 UTC (rev 1850)
@@ -14,8 +14,7 @@
        author="Roman Yakovenko",
        author_email="rom...@gm...",
        url='http://www.language-binding.net/pyplusplus/pyplusplus.html',
-       scripts = ["scripts/pyplusplus_gui",
-                  "scripts/pyplusplus_gui.pyw"],
+       scripts = [],
        packages=[ 'pyplusplus',
                   'pyplusplus.file_writers',
                   'pyplusplus.code_creators',
This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site.
 | 
| 
      
      
      From: <rom...@us...> - 2011-02-26 19:09:59
      
     | 
| Revision: 1849
          http://pygccxml.svn.sourceforge.net/pygccxml/?rev=1849&view=rev
Author:   roman_yakovenko
Date:     2011-02-26 19:09:53 +0000 (Sat, 26 Feb 2011)
Log Message:
-----------
deleting ui/ide subproject - old code, wrong direction, doesn't actually work
Removed Paths:
-------------
    ui/ide/
This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site.
 | 
| 
      
      
      From: <rom...@us...> - 2011-02-26 18:55:33
      
     | 
| Revision: 1848
          http://pygccxml.svn.sourceforge.net/pygccxml/?rev=1848&view=rev
Author:   roman_yakovenko
Date:     2011-02-26 18:55:27 +0000 (Sat, 26 Feb 2011)
Log Message:
-----------
it is better to use "os.name" instead of "sys.platform" for platform specific logic - thanks to Aron Xu
Modified Paths:
--------------
    pygccxml_dev/docs/history/history.rest
    pygccxml_dev/pygccxml/binary_parsers/undname.py
    pygccxml_dev/pygccxml/parser/config.py
    pygccxml_dev/pygccxml/parser/source_reader.py
    pygccxml_dev/pygccxml/utils/__init__.py
    pygccxml_dev/unittests/autoconfig.py
    pygccxml_dev/unittests/complex_types_tester.py
    pygccxml_dev/unittests/declarations_tester.py
    pygccxml_dev/unittests/undname_creator_tester.py
    pyplusplus_dev/docs/history/history.rest
    pyplusplus_dev/environment.py
    pyplusplus_dev/unittests/autoconfig.py
    pyplusplus_dev/unittests/ctypes_tester.py
    pyplusplus_dev/unittests/indexing_suites_v2_bug_tester.py
    pyplusplus_dev/unittests/unions_tester.py
    sphinx/conf.py
    ui/simple/freeze.py
Modified: pygccxml_dev/docs/history/history.rest
===================================================================
--- pygccxml_dev/docs/history/history.rest	2011-02-26 16:58:26 UTC (rev 1847)
+++ pygccxml_dev/docs/history/history.rest	2011-02-26 18:55:27 UTC (rev 1848)
@@ -23,6 +23,7 @@
 * Gustavo Carneiro
 * Christopher Bruns
 * Alejandro Dubrovsky
+* Aron Xu
 
 -----------
 SVN Version
@@ -63,6 +64,10 @@
 11. "__int128_t" and "__uint128_t" types were introduced. Many thanks to Gustavo Carneiro
    for providing the patch.
 
+12. Thanks to Aron Xu, for pointing out that it is better to use "os.name",
+    instead of "sys.platform" for platform specific logic
+
+
 -----------
 Version 1.0
 -----------
Modified: pygccxml_dev/pygccxml/binary_parsers/undname.py
===================================================================
--- pygccxml_dev/pygccxml/binary_parsers/undname.py	2011-02-26 16:58:26 UTC (rev 1847)
+++ pygccxml_dev/pygccxml/binary_parsers/undname.py	2011-02-26 18:55:27 UTC (rev 1848)
@@ -73,7 +73,7 @@
     compiler and demangled name produced by "nm" utility.
     """
     def __init__( self ):
-        if 'win32' in sys.platform:
+        if 'nt' == os.name:
             import ctypes.wintypes
             self.__undname = ctypes.windll.dbghelp.UnDecorateSymbolName
             self.__undname.argtypes = [ctypes.c_char_p, ctypes.c_char_p, ctypes.c_uint, ctypes.c_uint]
@@ -209,7 +209,7 @@
         """
         name = None
         if hint is None:
-            if 'win32' in sys.platform:
+            if 'nt' == os.name:
                 hint = 'msvc'
             else:
                 hint = 'nm'
Modified: pygccxml_dev/pygccxml/parser/config.py
===================================================================
--- pygccxml_dev/pygccxml/parser/config.py	2011-02-26 16:58:26 UTC (rev 1847)
+++ pygccxml_dev/pygccxml/parser/config.py	2011-02-26 18:55:27 UTC (rev 1848)
@@ -173,7 +173,7 @@
         super( gccxml_configuration_t, self ).raise_on_wrong_settings()
         if os.path.isfile( self.gccxml_path ):
             return
-        if sys.platform == 'win32':
+        if os.name == 'nt':
             gccxml_name = 'gccxml' + '.exe'
             environment_var_delimiter = ';'
         elif os.name == 'posix':
Modified: pygccxml_dev/pygccxml/parser/source_reader.py
===================================================================
--- pygccxml_dev/pygccxml/parser/source_reader.py	2011-02-26 16:58:26 UTC (rev 1847)
+++ pygccxml_dev/pygccxml/parser/source_reader.py	2011-02-26 18:55:27 UTC (rev 1848)
@@ -95,7 +95,7 @@
         #returns
         cmd = []
         #first is gccxml executable
-        if 'win32' in sys.platform:
+        if 'nt' == os.name:
             cmd.append( '"%s"' % os.path.normpath( self.__config.gccxml_path ) )
         else:
             cmd.append(  '%s' % os.path.normpath( self.__config.gccxml_path ) )
@@ -118,7 +118,7 @@
         if self.__config.compiler:
             cmd.append( " --gccxml-compiler %s" % self.__config.compiler )
         cmd_line = ' '.join(cmd)
-        if 'win32' in sys.platform :
+        if 'nt' == os.name:
             cmd_line = '"%s"' % cmd_line
         self.logger.info( 'gccxml cmd: %s' % cmd_line )
         return cmd_line
@@ -284,7 +284,7 @@
         raise RuntimeError( "pygccxml error: file '%s' does not exist" % file )
 
     def __produce_full_file( self, file_path ):
-        if 'win' in sys.platform or 'linux' in sys.platform:
+        if os.name in ['nt', 'posix']:
             file_path = file_path.replace( r'\/', os.path.sep )
         if os.path.isabs( file_path ):
             return file_path
Modified: pygccxml_dev/pygccxml/utils/__init__.py
===================================================================
--- pygccxml_dev/pygccxml/utils/__init__.py	2011-02-26 16:58:26 UTC (rev 1847)
+++ pygccxml_dev/pygccxml/utils/__init__.py	2011-02-26 18:55:27 UTC (rev 1848)
@@ -170,7 +170,7 @@
 
     @staticmethod
     def get_version():
-        if 'win' not in sys.platform:
+        if 'nt' != os.name:
             return None #not implemented yet
         else:
             from distutils import msvccompiler
Modified: pygccxml_dev/unittests/autoconfig.py
===================================================================
--- pygccxml_dev/unittests/autoconfig.py	2011-02-26 16:58:26 UTC (rev 1847)
+++ pygccxml_dev/unittests/autoconfig.py	2011-02-26 18:55:27 UTC (rev 1848)
@@ -39,7 +39,7 @@
                                                         , compiler=pygccxml.utils.native_compiler.get_gccxml_compiler() )
 
     gccxml.define_symbols.append( gccxml_version )
-    if 'win' in sys.platform:
+    if 'nt' == os.name:
         gccxml.define_symbols.append( '__PYGCCXML_%s__' % gccxml.compiler.upper() )
         if 'msvc9' == gccxml.compiler:
             gccxml.define_symbols.append( '_HAS_TR1=0' )
Modified: pygccxml_dev/unittests/complex_types_tester.py
===================================================================
--- pygccxml_dev/unittests/complex_types_tester.py	2011-02-26 16:58:26 UTC (rev 1847)
+++ pygccxml_dev/unittests/complex_types_tester.py	2011-02-26 18:55:27 UTC (rev 1848)
@@ -3,6 +3,7 @@
 # accompanying file LICENSE_1_0.txt or copy at
 # http://www.boost.org/LICENSE_1_0.txt)
 
+import os
 import sys
 import unittest
 import autoconfig
@@ -32,7 +33,7 @@
 
 def create_suite():
     suite = unittest.TestSuite()
-    if sys.platform != 'win32':
+    if os.name != 'nt':
         suite.addTest( unittest.makeSuite(tester_t))
     return suite
 
Modified: pygccxml_dev/unittests/declarations_tester.py
===================================================================
--- pygccxml_dev/unittests/declarations_tester.py	2011-02-26 16:58:26 UTC (rev 1847)
+++ pygccxml_dev/unittests/declarations_tester.py	2011-02-26 18:55:27 UTC (rev 1848)
@@ -3,6 +3,7 @@
 # accompanying file LICENSE_1_0.txt or copy at
 # http://www.boost.org/LICENSE_1_0.txt)
 
+import os
 import sys
 import pprint
 import unittest
@@ -205,7 +206,7 @@
     suite = unittest.TestSuite()
     suite.addTest( unittest.makeSuite(file_by_file_tester_t))
     suite.addTest( unittest.makeSuite(all_at_once_tester_t))
-    #~ if sys.platform == 'win32' and autoconfig.get_pdb_global_ns():
+    #~ if os.name == 'nt' and autoconfig.get_pdb_global_ns():
         #~ suite.addTest( unittest.makeSuite(pdb_based_tester_t))
 
     return suite
Modified: pygccxml_dev/unittests/undname_creator_tester.py
===================================================================
--- pygccxml_dev/unittests/undname_creator_tester.py	2011-02-26 16:58:26 UTC (rev 1847)
+++ pygccxml_dev/unittests/undname_creator_tester.py	2011-02-26 18:55:27 UTC (rev 1848)
@@ -21,7 +21,7 @@
 
     @property
     def known_issues( self ):
-        if 'win32' in sys.platform:
+        if 'nt' == os.name:
             issues = set([
                 # array as function argument: 'int FA10_i_i(int * const)'
                   '?FA10_i_i@@YAHQAH@Z'
@@ -101,7 +101,7 @@
         for blob in parser.loaded_symbols:
             if isinstance( blob, tuple ):
                 blob = blob[0]
-            if 'win32' in sys.platform:
+            if 'nt' == os.name:
                 #TODO: find out where undecorate function is exposed on linux
                 undname = binary_parsers.undecorate_blob( blob )
                 if "`" in undname:
@@ -131,15 +131,15 @@
                 self.fail( os.linesep.join(msg) )
 
     def test_map_file( self ):
-        if 'win32' in sys.platform:
+        if 'nt' == os.name:
             self.__tester_impl( self.map_file, 71 )
 
     def test_dll_file( self ):
-        if 'win32' in sys.platform:
+        if 'nt' == os.name:
             self.__tester_impl( self.dll_file, 71 )
 
     def test_z_compare_parsers( self ):
-        if 'win32' not in sys.platform:
+        if 'nt' != os.name:
             return
         dsymbols, dparser = binary_parsers.merge_information( self.global_ns, self.dll_file, runs_under_unittest=True )
         msymbols, mparser = binary_parsers.merge_information( self.global_ns, self.map_file, runs_under_unittest=True )
@@ -158,7 +158,7 @@
         self.failUnless( was_error == False )
 
     def test_so_file( self ):
-        if 'linux2' in sys.platform:
+        if 'posix' in os.name:
             self.__tester_impl( self.so_file, 64 )
 
     def dont_test_print( self ):
Modified: pyplusplus_dev/docs/history/history.rest
===================================================================
--- pyplusplus_dev/docs/history/history.rest	2011-02-26 16:58:26 UTC (rev 1847)
+++ pyplusplus_dev/docs/history/history.rest	2011-02-26 18:55:27 UTC (rev 1848)
@@ -29,6 +29,7 @@
 * Nikolaus Rath
 * Alan Birtles 
 * Minh-Tri Pham
+* Aron Xu 
 
 -----------
 SVN Version
@@ -75,6 +76,9 @@
 
 12. Thanks to Minh-Tri Pham, for reporting bug and providing patch for
     "from_address" transformer, on 64 bit platforms.
+    
+13. Thanks to Aron Xu, for pointing out that it is better to use "os.name",
+    instead of "sys.platform" for platform specific logic
 
 -----------
 Version 1.0
Modified: pyplusplus_dev/environment.py
===================================================================
--- pyplusplus_dev/environment.py	2011-02-26 16:58:26 UTC (rev 1847)
+++ pyplusplus_dev/environment.py	2011-02-26 18:55:27 UTC (rev 1848)
@@ -33,7 +33,7 @@
     ccflags = []
 
 if 'roman' in getpass.getuser():
-    if sys.platform == 'win32':
+    if os.name == 'nt':
         scons.suffix = '.pyd'
         scons.ccflags = ['/MD', '/EHsc', '/GR', '/Zc:wchar_t', '/Zc:forScope' ]
         boost.libs = [ r'e:\dev\boost_svn\bin.v2\libs\python\build\msvc-9.0\release\threading-multi' ]
@@ -59,7 +59,7 @@
 			boost.include = '/home/roman/boost_svn'
 			python.include = '/usr/include/python2.6'
 elif 'root' == getpass.getuser():
-    if sys.platform == 'win32':
+    if os.name == 'nt':
         scons.suffix = '.pyd'
         scons.ccflags = ['/MD', '/EHsc', '/GR', '/Zc:wchar_t', '/Zc:forScope' ]
         boost.libs = [ 'd:/dev/boost_svn/bin.v2/libs/python/build/msvc-7.1/release/threading-multi' ]
Modified: pyplusplus_dev/unittests/autoconfig.py
===================================================================
--- pyplusplus_dev/unittests/autoconfig.py	2011-02-26 16:58:26 UTC (rev 1847)
+++ pyplusplus_dev/unittests/autoconfig.py	2011-02-26 18:55:27 UTC (rev 1848)
@@ -33,7 +33,7 @@
                                                         , compiler=pygccxml.utils.native_compiler.get_gccxml_compiler() )
 
     gccxml.define_symbols.append( gccxml_version )
-    if 'win' in sys.platform:
+    if 'nt' == os.name:
         gccxml.define_symbols.append( '__PYGCCXML_%s__' % gccxml.compiler.upper() )
         if 'msvc9' == gccxml.compiler:
             gccxml.define_symbols.append( '_HAS_TR1=0' )
@@ -55,14 +55,15 @@
     @staticmethod
     def create_sconstruct():
         msvc_compiler = ''
-        if 'linux' not in sys.platform:
+        if 'posix' != os.name:
             msvc_compiler = str( pygccxml.utils.native_compiler.get_version()[1] )
         else:
             scons_config.libs.append( 'boost_python' )
         code = [
-              "import sys"
+               "import os"
+            ,  "import sys"
             , "env = Environment()"
-            , "if 'linux' not in sys.platform:"
+            , "if 'posix' != os.name:"
             , "    env['MSVS'] = {'VERSION': '%s'}" % msvc_compiler
             , "    env['MSVS_VERSION'] = '%s'" % msvc_compiler
             , "    Tool('msvc')(env)"
@@ -106,7 +107,7 @@
 
 os.chdir( build_dir )
 
-if sys.platform == 'win32':
+if 'nt' == os.name:
     PATH = os.environ.get( 'PATH', '' )
     PATH=PATH + ';' + ';'.join( scons_config.libpath )
     os.environ['PATH'] = PATH
Modified: pyplusplus_dev/unittests/ctypes_tester.py
===================================================================
--- pyplusplus_dev/unittests/ctypes_tester.py	2011-02-26 16:58:26 UTC (rev 1847)
+++ pyplusplus_dev/unittests/ctypes_tester.py	2011-02-26 18:55:27 UTC (rev 1848)
@@ -34,14 +34,14 @@
     def symbols_file( self ):
         ext = '.so'
         prefix = 'lib'
-        if 'win32' in sys.platform:
+        if 'nt' == os.name:
             prefix = ''
             ext = '.map'
         return os.path.join( self.project_dir, 'binaries', prefix + self.base_name + ext )
 
     @property
     def library_file( self ):
-        if 'win32' in sys.platform:
+        if 'nt' == os.name:
             return os.path.join( self.project_dir, 'binaries', self.base_name + '.dll' )
         else:
             return self.symbols_file
Modified: pyplusplus_dev/unittests/indexing_suites_v2_bug_tester.py
===================================================================
--- pyplusplus_dev/unittests/indexing_suites_v2_bug_tester.py	2011-02-26 16:58:26 UTC (rev 1847)
+++ pyplusplus_dev/unittests/indexing_suites_v2_bug_tester.py	2011-02-26 18:55:27 UTC (rev 1848)
@@ -10,7 +10,7 @@
 import fundamental_tester_base
 from pyplusplus import code_creators
 
-if 'linux' in sys.platform:
+if 'posix' == os.name:
     try:
         from ctypes import RTLD_NOW, RTLD_GLOBAL
     except ImportError:
Modified: pyplusplus_dev/unittests/unions_tester.py
===================================================================
--- pyplusplus_dev/unittests/unions_tester.py	2011-02-26 16:58:26 UTC (rev 1847)
+++ pyplusplus_dev/unittests/unions_tester.py	2011-02-26 18:55:27 UTC (rev 1848)
@@ -43,7 +43,7 @@
         obj2.set_i( 1977 )
         self.failUnless( obj2.i == 1977 )
 
-        if 'win' not in sys.platform:
+        if 'nt' != os.name:
             mdll = ctypes.cdll.LoadLibrary( module.__file__ )
             self.failUnless( 4 == mdll.mmm( 1, 3 ) )
 
Modified: sphinx/conf.py
===================================================================
--- sphinx/conf.py	2011-02-26 16:58:26 UTC (rev 1847)
+++ sphinx/conf.py	2011-02-26 18:55:27 UTC (rev 1848)
@@ -32,7 +32,7 @@
 
 sys.path.append( doc_project_root )
 
-has_true_links = 'linux' in sys.platform
+has_true_links = 'posix' == os.name
 for pkg in packages:
     target = os.path.join( doc_project_root, pkg )
     source = os.path.join( project_root, pkg + '_dev', 'docs' )
Modified: ui/simple/freeze.py
===================================================================
--- ui/simple/freeze.py	2011-02-26 16:58:26 UTC (rev 1847)
+++ ui/simple/freeze.py	2011-02-26 18:55:27 UTC (rev 1848)
@@ -20,7 +20,7 @@
     target_dir = os.path.join( target_dir, target_name, sys.platform )
     if not os.path.exists( target_dir ):
         os.makedirs( target_dir )
-    if 'win32' == sys.platform:
+    if 'nt' == os.name:
         target_name = target_name + '.exe'
 
     cmd = [ freeze_executable ]
@@ -43,9 +43,9 @@
     if exit_status:
         raise RuntimeError('unable to create executable. error: %s' % msg )
 
-    if sys.platform == 'win32':
+    if os.name == 'nt':
         dlls = os.path.join( os.path.split( sys.executable )[0], 'dlls' )        
         files_to_copy = [ 'tk84.dll', 'tcl84.dll' ]
         for f in files_to_copy:
             shutil.copyfile( os.path.join( dlls, f )
-                             , os.path.join( target_dir, f ) )
\ No newline at end of file
+                             , os.path.join( target_dir, f ) )
This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site.
 | 
| 
      
      
      From: <rom...@us...> - 2011-02-26 16:58:32
      
     | 
| Revision: 1847
          http://pygccxml.svn.sourceforge.net/pygccxml/?rev=1847&view=rev
Author:   roman_yakovenko
Date:     2011-02-26 16:58:26 +0000 (Sat, 26 Feb 2011)
Log Message:
-----------
fixing bug on 64 bit platform, in "from_addres" transformer, reported by Minh-Tri Pham 
Modified Paths:
--------------
    pyplusplus_dev/docs/history/history.rest
    pyplusplus_dev/pyplusplus/function_transformers/transformers.py
Modified: pyplusplus_dev/docs/history/history.rest
===================================================================
--- pyplusplus_dev/docs/history/history.rest	2011-02-26 16:57:10 UTC (rev 1846)
+++ pyplusplus_dev/docs/history/history.rest	2011-02-26 16:58:26 UTC (rev 1847)
@@ -28,6 +28,7 @@
 * Benoît Leveau
 * Nikolaus Rath
 * Alan Birtles 
+* Minh-Tri Pham
 
 -----------
 SVN Version
@@ -70,8 +71,11 @@
 
 10. Numerous bugs in "ctypes code generator" were fixed. Many thanks to Nikolaus Rath.
 
-11. Thanks to Alan Birtles, for fixing a small issue on cygwin
+11. Thanks to Alan Birtles, for fixing a small issue on cygwin.
 
+12. Thanks to Minh-Tri Pham, for reporting bug and providing patch for
+    "from_address" transformer, on 64 bit platforms.
+
 -----------
 Version 1.0
 -----------
Modified: pyplusplus_dev/pyplusplus/function_transformers/transformers.py
===================================================================
--- pyplusplus_dev/pyplusplus/function_transformers/transformers.py	2011-02-26 16:57:10 UTC (rev 1846)
+++ pyplusplus_dev/pyplusplus/function_transformers/transformers.py	2011-02-26 16:58:26 UTC (rev 1847)
@@ -183,7 +183,7 @@
         :param arg_ref: Index of the argument that is an output value
         :type arg_ref: int
         """
-        modifier = lambda type_: declarations.FUNDAMENTAL_TYPES[ 'unsigned int' ]
+        modifier = lambda type_: declarations.dummy_type_t('size_t')
         type_modifier_t.__init__( self, function, arg_ref, modifier )
 
         if not is_ptr_or_array( self.arg.type ):
This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site.
 | 
| 
      
      
      From: <rom...@us...> - 2011-02-26 16:57:16
      
     | 
| Revision: 1846
          http://pygccxml.svn.sourceforge.net/pygccxml/?rev=1846&view=rev
Author:   roman_yakovenko
Date:     2011-02-26 16:57:10 +0000 (Sat, 26 Feb 2011)
Log Message:
-----------
adding another configuration
Modified Paths:
--------------
    pyplusplus_dev/environment.py
Modified: pyplusplus_dev/environment.py
===================================================================
--- pyplusplus_dev/environment.py	2011-02-26 16:34:31 UTC (rev 1845)
+++ pyplusplus_dev/environment.py	2011-02-26 16:57:10 UTC (rev 1846)
@@ -1,5 +1,6 @@
 import os
 import sys
+import socket
 import getpass
 import platform
 
@@ -40,13 +41,23 @@
         python.libs = 'c:/program files/python26/libs'
         python.include = 'c:/program files/python26/include'
     else:
-        os.nice( 20 )
-        print 'test process niceness: 20'
-        scons.suffix = '.so'
-        scons.ccflags = []
-        boost.libs = ['/home/roman/include/libs', '/home/roman/include/lib' ]
-        boost.include = '/home/roman/boost_svn'
-        python.include = '/usr/include/python2.6'
+		if 'kubunu-vbox' == socket.gethostname():
+			os.nice( 20 )
+			print 'test process niceness: 20'
+			scons.suffix = '.so'
+			scons.ccflags = []
+			boost.libs = ['/usr/lib'] #'/home/roman/include/libs', '/home/roman/include/lib' ]
+			boost.include = '/usr/include/boost'
+			python.include = '/usr/include/python2.6'
+
+		else:
+			os.nice( 20 )
+			print 'test process niceness: 20'
+			scons.suffix = '.so'
+			scons.ccflags = []
+			boost.libs = ['/home/roman/include/libs', '/home/roman/include/lib' ]
+			boost.include = '/home/roman/boost_svn'
+			python.include = '/usr/include/python2.6'
 elif 'root' == getpass.getuser():
     if sys.platform == 'win32':
         scons.suffix = '.pyd'
This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site.
 | 
| 
      
      
      From: <rom...@us...> - 2011-02-26 16:34:37
      
     | 
| Revision: 1845
          http://pygccxml.svn.sourceforge.net/pygccxml/?rev=1845&view=rev
Author:   roman_yakovenko
Date:     2011-02-26 16:34:31 +0000 (Sat, 26 Feb 2011)
Log Message:
-----------
applying path from Alan Birtles  - py++ fails on cygwin - ID: 3057341
Modified Paths:
--------------
    pyplusplus_dev/docs/history/history.rest
    pyplusplus_dev/pyplusplus/file_writers/writer.py
Modified: pyplusplus_dev/docs/history/history.rest
===================================================================
--- pyplusplus_dev/docs/history/history.rest	2010-07-19 06:28:14 UTC (rev 1844)
+++ pyplusplus_dev/docs/history/history.rest	2011-02-26 16:34:31 UTC (rev 1845)
@@ -27,6 +27,7 @@
 * Pertti Kellomäki
 * Benoît Leveau
 * Nikolaus Rath
+* Alan Birtles 
 
 -----------
 SVN Version
@@ -69,6 +70,8 @@
 
 10. Numerous bugs in "ctypes code generator" were fixed. Many thanks to Nikolaus Rath.
 
+11. Thanks to Alan Birtles, for fixing a small issue on cygwin
+
 -----------
 Version 1.0
 -----------
Modified: pyplusplus_dev/pyplusplus/file_writers/writer.py
===================================================================
--- pyplusplus_dev/pyplusplus/file_writers/writer.py	2010-07-19 06:28:14 UTC (rev 1844)
+++ pyplusplus_dev/pyplusplus/file_writers/writer.py	2011-02-26 16:34:31 UTC (rev 1845)
@@ -37,6 +37,9 @@
 
     def makedirs_for_file( self, file_path ):
         destination_dir = os.path.dirname( file_path )
+        if destination_dir == "":
+            return
+
         if not os.path.exists( destination_dir ):
             os.makedirs( destination_dir )
 
This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site.
 | 
| 
      
      
      From: <rom...@us...> - 2010-07-19 06:28:21
      
     | 
| Revision: 1844
          http://pygccxml.svn.sourceforge.net/pygccxml/?rev=1844&view=rev
Author:   roman_yakovenko
Date:     2010-07-19 06:28:14 +0000 (Mon, 19 Jul 2010)
Log Message:
-----------
 "__int128_t" and "__uint128_t" types were introduced. Many thanks to Gustavo Carneiro for the patch
Modified Paths:
--------------
    pygccxml_dev/docs/history/history.rest
    pygccxml_dev/pygccxml/declarations/__init__.py
    pygccxml_dev/pygccxml/declarations/cpptypes.py
    pygccxml_dev/pygccxml/declarations/type_traits.py
    pygccxml_dev/pygccxml/declarations/type_visitor.py
    pygccxml_dev/pygccxml/parser/linker.py
    pygccxml_dev/unittests/core_tester.py
Modified: pygccxml_dev/docs/history/history.rest
===================================================================
--- pygccxml_dev/docs/history/history.rest	2010-07-12 19:50:31 UTC (rev 1843)
+++ pygccxml_dev/docs/history/history.rest	2010-07-19 06:28:14 UTC (rev 1844)
@@ -60,6 +60,9 @@
     `elif sys.platform == 'linux2' or sys.platform == 'darwin'` with `os.name == 'posix'`,
     as suggested by `Jakub Wilk <http://groups.google.com/group/linux.debian.bugs.dist/browse_thread/thread/572d2286ca0b2cec?pli=1>`
 
+11. "__int128_t" and "__uint128_t" types were introduced. Many thanks to Gustavo Carneiro
+   for providing the patch.
+
 -----------
 Version 1.0
 -----------
Modified: pygccxml_dev/pygccxml/declarations/__init__.py
===================================================================
--- pygccxml_dev/pygccxml/declarations/__init__.py	2010-07-12 19:50:31 UTC (rev 1843)
+++ pygccxml_dev/pygccxml/declarations/__init__.py	2010-07-19 06:28:14 UTC (rev 1844)
@@ -42,6 +42,8 @@
 from cpptypes import long_unsigned_int_t
 from cpptypes import long_long_int_t
 from cpptypes import long_long_unsigned_int_t
+from cpptypes import int128_t
+from cpptypes import uint128_t
 from cpptypes import float_t
 from cpptypes import double_t
 from cpptypes import long_double_t
Modified: pygccxml_dev/pygccxml/declarations/cpptypes.py
===================================================================
--- pygccxml_dev/pygccxml/declarations/cpptypes.py	2010-07-12 19:50:31 UTC (rev 1843)
+++ pygccxml_dev/pygccxml/declarations/cpptypes.py	2010-07-19 06:28:14 UTC (rev 1844)
@@ -302,6 +302,19 @@
     def __init__( self ):
         java_fundamental_t.__init__( self, jboolean_t.JNAME )
 
+class int128_t( fundamental_t ):
+    """represents __int128_t type"""
+    CPPNAME = '__int128_t'
+    def __init__( self ):
+        fundamental_t.__init__( self, int128_t.CPPNAME )
+
+class uint128_t( fundamental_t ):
+    """represents __uint128_t type"""
+    CPPNAME = '__uint128_t'
+    def __init__( self ):
+        fundamental_t.__init__( self, uint128_t.CPPNAME )
+
+
 FUNDAMENTAL_TYPES = {
     void_t.CPPNAME : void_t()
     , char_t.CPPNAME : char_t()
@@ -319,6 +332,8 @@
     , long_unsigned_int_t.CPPNAME : long_unsigned_int_t()
     , long_long_int_t.CPPNAME : long_long_int_t()
     , long_long_unsigned_int_t.CPPNAME : long_long_unsigned_int_t()
+    , int128_t.CPPNAME : int128_t()
+    , uint128_t.CPPNAME : uint128_t()
     , float_t.CPPNAME : float_t()
     , double_t.CPPNAME : double_t()
     , long_double_t.CPPNAME : long_double_t()
Modified: pygccxml_dev/pygccxml/declarations/type_traits.py
===================================================================
--- pygccxml_dev/pygccxml/declarations/type_traits.py	2010-07-12 19:50:31 UTC (rev 1843)
+++ pygccxml_dev/pygccxml/declarations/type_traits.py	2010-07-19 06:28:14 UTC (rev 1844)
@@ -142,7 +142,9 @@
                    + create_cv_types( cpptypes.long_int_t() )              \
                    + create_cv_types( cpptypes.long_unsigned_int_t() )     \
                    + create_cv_types( cpptypes.long_long_int_t() )         \
-                   + create_cv_types( cpptypes.long_long_unsigned_int_t() )
+                   + create_cv_types( cpptypes.long_long_unsigned_int_t() ) \
+                   + create_cv_types( cpptypes.int128_t() )                 \
+                   + create_cv_types( cpptypes.uint128_t() )
 
     return remove_alias( type ) in integral_def
 
Modified: pygccxml_dev/pygccxml/declarations/type_visitor.py
===================================================================
--- pygccxml_dev/pygccxml/declarations/type_visitor.py	2010-07-12 19:50:31 UTC (rev 1843)
+++ pygccxml_dev/pygccxml/declarations/type_visitor.py	2010-07-19 06:28:14 UTC (rev 1844)
@@ -58,6 +58,12 @@
     def visit_long_long_unsigned_int( self ):
         raise NotImplementedError()
 
+    def visit_int128( self ):
+        raise NotImplementedError()
+
+    def visit_uint128( self ):
+        raise NotImplementedError()
+
     def visit_float( self ):
         raise NotImplementedError()
 
Modified: pygccxml_dev/pygccxml/parser/linker.py
===================================================================
--- pygccxml_dev/pygccxml/parser/linker.py	2010-07-12 19:50:31 UTC (rev 1843)
+++ pygccxml_dev/pygccxml/parser/linker.py	2010-07-19 06:28:14 UTC (rev 1844)
@@ -188,6 +188,12 @@
     def visit_long_long_unsigned_int( self ):
         pass
 
+    def visit_int128( self ):
+        pass
+
+    def visit_uint128( self ):
+        pass
+
     def visit_float( self ):
         pass
 
Modified: pygccxml_dev/unittests/core_tester.py
===================================================================
--- pygccxml_dev/unittests/core_tester.py	2010-07-12 19:50:31 UTC (rev 1843)
+++ pygccxml_dev/unittests/core_tester.py	2010-07-19 06:28:14 UTC (rev 1844)
@@ -177,6 +177,8 @@
         for fundamental_type_name, fundamental_type in FUNDAMENTAL_TYPES.iteritems():
             if 'complex' in fundamental_type_name:
                 continue #I check this in an other tester
+            if isinstance( fundamental_type, (int128_t, uint128_t) ):
+                continue #I don't have test case for this
             if isinstance( fundamental_type, java_fundamental_t ):
                 continue #I don't check this at all
             typedef_name = 'typedef_' + fundamental_type_name.replace( ' ', '_' )
This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site.
 | 
| 
      
      
      From: <rom...@us...> - 2010-07-12 19:50:37
      
     | 
| Revision: 1843
          http://pygccxml.svn.sourceforge.net/pygccxml/?rev=1843&view=rev
Author:   roman_yakovenko
Date:     2010-07-12 19:50:31 +0000 (Mon, 12 Jul 2010)
Log Message:
-----------
allow pygccxml to run on FreeBSD
Modified Paths:
--------------
    pygccxml_dev/docs/history/history.rest
    pygccxml_dev/pygccxml/__init__.py
    pygccxml_dev/pygccxml/parser/config.py
Modified: pygccxml_dev/docs/history/history.rest
===================================================================
--- pygccxml_dev/docs/history/history.rest	2010-05-16 10:53:45 UTC (rev 1842)
+++ pygccxml_dev/docs/history/history.rest	2010-07-12 19:50:31 UTC (rev 1843)
@@ -56,6 +56,10 @@
    right relative paths and paths with spaces. Many thanks to Alejandro Dubrovsky
    for providing the patch.
 
+10. Small fix, which allows pygccxml to be usable on FreeBSD too: replace
+    `elif sys.platform == 'linux2' or sys.platform == 'darwin'` with `os.name == 'posix'`,
+    as suggested by `Jakub Wilk <http://groups.google.com/group/linux.debian.bugs.dist/browse_thread/thread/572d2286ca0b2cec?pli=1>`
+
 -----------
 Version 1.0
 -----------
Modified: pygccxml_dev/pygccxml/__init__.py
===================================================================
--- pygccxml_dev/pygccxml/__init__.py	2010-05-16 10:53:45 UTC (rev 1842)
+++ pygccxml_dev/pygccxml/__init__.py	2010-07-12 19:50:31 UTC (rev 1843)
@@ -33,6 +33,6 @@
 #TODO:
 #  1. Add "explicit" property for constructors
 
-__version__ = '1.5.0'
+__version__ = '1.5.1'
 
 __revision__ = 1080
Modified: pygccxml_dev/pygccxml/parser/config.py
===================================================================
--- pygccxml_dev/pygccxml/parser/config.py	2010-05-16 10:53:45 UTC (rev 1842)
+++ pygccxml_dev/pygccxml/parser/config.py	2010-07-12 19:50:31 UTC (rev 1843)
@@ -176,7 +176,7 @@
         if sys.platform == 'win32':
             gccxml_name = 'gccxml' + '.exe'
             environment_var_delimiter = ';'
-        elif sys.platform == 'linux2' or sys.platform == 'darwin':
+        elif os.name == 'posix':
             gccxml_name = 'gccxml'
             environment_var_delimiter = ':'
         else:
This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site.
 | 
| 
      
      
      From: <rom...@us...> - 2010-05-16 10:53:51
      
     | 
| Revision: 1842
          http://pygccxml.svn.sourceforge.net/pygccxml/?rev=1842&view=rev
Author:   roman_yakovenko
Date:     2010-05-16 10:53:45 +0000 (Sun, 16 May 2010)
Log Message:
-----------
adding new test case, where Py++ fails to generate correct code
Modified Paths:
--------------
    pyplusplus_dev/unittests/test_all.py
Added Paths:
-----------
    pyplusplus_dev/unittests/data/indexing_suite2_shared_ptr_value_traits_to_be_exported.cpp
    pyplusplus_dev/unittests/data/indexing_suite2_shared_ptr_value_traits_to_be_exported.hpp
    pyplusplus_dev/unittests/indexing_suite2_shared_ptr_value_traits_tester.py
Added: pyplusplus_dev/unittests/data/indexing_suite2_shared_ptr_value_traits_to_be_exported.cpp
===================================================================
--- pyplusplus_dev/unittests/data/indexing_suite2_shared_ptr_value_traits_to_be_exported.cpp	                        (rev 0)
+++ pyplusplus_dev/unittests/data/indexing_suite2_shared_ptr_value_traits_to_be_exported.cpp	2010-05-16 10:53:45 UTC (rev 1842)
@@ -0,0 +1,34 @@
+// Copyright 2004-2008 Roman Yakovenko.
+// Distributed under the Boost Software License, Version 1.0. (See
+// accompanying file LICENSE_1_0.txt or copy at
+// http://www.boost.org/LICENSE_1_0.txt)
+
+#include "indexing_suite2_shared_ptr_value_traits_to_be_exported.hpp"
+
+
+namespace samples
+{
+
+boost::shared_ptr<A> func()
+{
+    return boost::shared_ptr<A>(new A());
+}
+
+std::vector<A> funcVector()
+{
+    std::vector<A> items;
+    items.push_back(A());
+    items.push_back(A());
+    return items;
+}
+
+std::vector<boost::shared_ptr<A> > funcVectorShared()
+{
+    std::vector<boost::shared_ptr<A> > items;
+    items.push_back(boost::shared_ptr<A>(new A()));
+    items.push_back(boost::shared_ptr<A>(new A()));
+    return items;
+}
+
+}
+
Added: pyplusplus_dev/unittests/data/indexing_suite2_shared_ptr_value_traits_to_be_exported.hpp
===================================================================
--- pyplusplus_dev/unittests/data/indexing_suite2_shared_ptr_value_traits_to_be_exported.hpp	                        (rev 0)
+++ pyplusplus_dev/unittests/data/indexing_suite2_shared_ptr_value_traits_to_be_exported.hpp	2010-05-16 10:53:45 UTC (rev 1842)
@@ -0,0 +1,26 @@
+// Copyright 2004-2008 Roman Yakovenko.
+// Distributed under the Boost Software License, Version 1.0. (See
+// accompanying file LICENSE_1_0.txt or copy at
+// http://www.boost.org/LICENSE_1_0.txt)
+
+#ifndef __indexing_suite2_shared_ptr_value_traits_to_be_exported_hpp__
+#define __indexing_suite2_shared_ptr_value_traits_to_be_exported_hpp__
+
+#include <vector>
+#include <boost/shared_ptr.hpp>
+
+namespace samples
+{
+
+class A
+{};
+
+boost::shared_ptr<A> func();
+
+std::vector<A> funcVector();
+
+std::vector<boost::shared_ptr<A> > funcVectorShared();
+
+}
+
+#endif//__indexing_suite2_shared_ptr_value_traits_to_be_exported_hpp__
Added: pyplusplus_dev/unittests/indexing_suite2_shared_ptr_value_traits_tester.py
===================================================================
--- pyplusplus_dev/unittests/indexing_suite2_shared_ptr_value_traits_tester.py	                        (rev 0)
+++ pyplusplus_dev/unittests/indexing_suite2_shared_ptr_value_traits_tester.py	2010-05-16 10:53:45 UTC (rev 1842)
@@ -0,0 +1,39 @@
+# Copyright 2004-2008 Roman Yakovenko.
+# Distributed under the Boost Software License, Version 1.0. (See
+# accompanying file LICENSE_1_0.txt or copy at
+# http://www.boost.org/LICENSE_1_0.txt)
+
+import os
+import sys
+import unittest
+import fundamental_tester_base
+from pygccxml import declarations
+from pyplusplus import module_builder
+
+
+class tester_t(fundamental_tester_base.fundamental_tester_base_t):
+    EXTENSION_NAME = 'indexing_suite2_shared_ptr_value_traits'
+
+    def __init__( self, *args ):
+        fundamental_tester_base.fundamental_tester_base_t.__init__(
+            self
+            , tester_t.EXTENSION_NAME
+            , indexing_suite_version=2
+            , *args)
+
+    def customize(self, generator):
+        pass
+
+    def run_tests( self, module):
+        pass
+
+def create_suite():
+    suite = unittest.TestSuite()
+    suite.addTest( unittest.makeSuite(tester_t))
+    return suite
+
+def run_suite():
+    unittest.TextTestRunner(verbosity=2).run( create_suite() )
+
+if __name__ == "__main__":
+    run_suite()
Modified: pyplusplus_dev/unittests/test_all.py
===================================================================
--- pyplusplus_dev/unittests/test_all.py	2010-04-29 18:41:50 UTC (rev 1841)
+++ pyplusplus_dev/unittests/test_all.py	2010-05-16 10:53:45 UTC (rev 1842)
@@ -127,6 +127,7 @@
 import ft_inout_static_matrix_tester
 import ft_inout_static_array_tester
 import inner_base_class_tester
+import indexing_suite2_shared_ptr_value_traits_tester
 
 testers = [
     algorithms_tester
@@ -242,6 +243,7 @@
     , ft_inout_static_matrix_tester
     , ft_inout_static_array_tester
     , inner_base_class_tester
+    , indexing_suite2_shared_ptr_value_traits_tester
 #    , ogre_generate_tester too much time
 ]
 
This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site.
 | 
| 
      
      
      From: <rom...@us...> - 2010-04-29 18:41:57
      
     | 
| Revision: 1841
          http://pygccxml.svn.sourceforge.net/pygccxml/?rev=1841&view=rev
Author:   roman_yakovenko
Date:     2010-04-29 18:41:50 +0000 (Thu, 29 Apr 2010)
Log Message:
-----------
adding new use case to the test
Modified Paths:
--------------
    pyplusplus_dev/unittests/data/operators_bug_to_be_exported.hpp
Modified: pyplusplus_dev/unittests/data/operators_bug_to_be_exported.hpp
===================================================================
--- pyplusplus_dev/unittests/data/operators_bug_to_be_exported.hpp	2010-04-29 18:39:11 UTC (rev 1840)
+++ pyplusplus_dev/unittests/data/operators_bug_to_be_exported.hpp	2010-04-29 18:41:50 UTC (rev 1841)
@@ -30,6 +30,12 @@
         tmp.value = value + x; 
         return tmp;
     }
+    integral operator++( ){
+        integral tmp;
+        tmp.value = value + 1; 
+        return tmp;
+    }
+    
 };
 
 struct integral2 : public number< integral, int >{
This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site.
 | 
| 
      
      
      From: <rom...@us...> - 2010-04-29 18:39:17
      
     | 
| Revision: 1840
          http://pygccxml.svn.sourceforge.net/pygccxml/?rev=1840&view=rev
Author:   roman_yakovenko
Date:     2010-04-29 18:39:11 +0000 (Thu, 29 Apr 2010)
Log Message:
-----------
adding "__len__" method to set, map and multimap
Modified Paths:
--------------
    pyplusplus_dev/indexing_suite_v2/indexing_suite/map.hpp
    pyplusplus_dev/indexing_suite_v2/indexing_suite/multimap.hpp
    pyplusplus_dev/indexing_suite_v2/indexing_suite/set.hpp
    pyplusplus_dev/pyplusplus/code_repository/indexing_suite/map_header.py
    pyplusplus_dev/pyplusplus/code_repository/indexing_suite/multimap_header.py
    pyplusplus_dev/pyplusplus/code_repository/indexing_suite/set_header.py
    pyplusplus_dev/unittests/indexing_suites2_tester.py
Modified: pyplusplus_dev/indexing_suite_v2/indexing_suite/map.hpp
===================================================================
--- pyplusplus_dev/indexing_suite_v2/indexing_suite/map.hpp	2010-04-29 05:26:06 UTC (rev 1839)
+++ pyplusplus_dev/indexing_suite_v2/indexing_suite/map.hpp	2010-04-29 18:39:11 UTC (rev 1840)
@@ -12,6 +12,7 @@
 // =======
 // 2003/10/28   rmg     File creation from algo_selector.hpp
 // 2008/12/08   Roman   Change indexing suite layout
+// 2010/04/29   Roman   Adding "__len__" method
 //
 // $Id: map.hpp,v 1.1.2.6 2004/02/08 18:57:42 raoulgough Exp $
 //
@@ -63,6 +64,7 @@
             | method_contains
             | method_count
             | method_has_key
+            | method_len
 
             | detail::method_set_if<
                   base_class::is_mutable,
Modified: pyplusplus_dev/indexing_suite_v2/indexing_suite/multimap.hpp
===================================================================
--- pyplusplus_dev/indexing_suite_v2/indexing_suite/multimap.hpp	2010-04-29 05:26:06 UTC (rev 1839)
+++ pyplusplus_dev/indexing_suite_v2/indexing_suite/multimap.hpp	2010-04-29 18:39:11 UTC (rev 1840)
@@ -10,6 +10,7 @@
 // =======
 // 2006/10/27   Roman     File creation from map.hpp
 // 2008/12/08   Roman   Change indexing suite layout
+// 2010/04/29   Roman   Adding "__len__" method
 //
 
 #ifndef BOOST_PYTHON_INDEXING_MULTIMAP_HPP
@@ -60,6 +61,7 @@
             | method_contains
             | method_count
             | method_has_key
+            | method_len
 
             | detail::method_set_if<
                   base_class::is_mutable,
Modified: pyplusplus_dev/indexing_suite_v2/indexing_suite/set.hpp
===================================================================
--- pyplusplus_dev/indexing_suite_v2/indexing_suite/set.hpp	2010-04-29 05:26:06 UTC (rev 1839)
+++ pyplusplus_dev/indexing_suite_v2/indexing_suite/set.hpp	2010-04-29 18:39:11 UTC (rev 1840)
@@ -12,6 +12,7 @@
 // =======
 // 2003/10/28   rmg     File creation from algo_selector.hpp
 // 2008/12/08   Roman   Change indexing suite layout
+// 2010/04/29   Roman   Adding "__len__" method
 //
 // $Id: set.hpp,v 1.1.2.6 2004/02/08 18:57:42 raoulgough Exp $
 //
@@ -55,6 +56,7 @@
             | method_contains
             | method_count
             | method_has_key
+            | method_len
 
             | detail::method_set_if<
                   base_class::is_mutable,
Modified: pyplusplus_dev/pyplusplus/code_repository/indexing_suite/map_header.py
===================================================================
--- pyplusplus_dev/pyplusplus/code_repository/indexing_suite/map_header.py	2010-04-29 05:26:06 UTC (rev 1839)
+++ pyplusplus_dev/pyplusplus/code_repository/indexing_suite/map_header.py	2010-04-29 18:39:11 UTC (rev 1840)
@@ -23,6 +23,7 @@
 // =======
 // 2003/10/28   rmg     File creation from algo_selector.hpp
 // 2008/12/08   Roman   Change indexing suite layout
+// 2010/04/29   Roman   Adding "__len__" method
 //
 // $Id: map.hpp,v 1.1.2.6 2004/02/08 18:57:42 raoulgough Exp $
 //
@@ -74,6 +75,7 @@
             | method_contains
             | method_count
             | method_has_key
+            | method_len
 
             | detail::method_set_if<
                   base_class::is_mutable,
Modified: pyplusplus_dev/pyplusplus/code_repository/indexing_suite/multimap_header.py
===================================================================
--- pyplusplus_dev/pyplusplus/code_repository/indexing_suite/multimap_header.py	2010-04-29 05:26:06 UTC (rev 1839)
+++ pyplusplus_dev/pyplusplus/code_repository/indexing_suite/multimap_header.py	2010-04-29 18:39:11 UTC (rev 1840)
@@ -21,6 +21,7 @@
 // =======
 // 2006/10/27   Roman     File creation from map.hpp
 // 2008/12/08   Roman   Change indexing suite layout
+// 2010/04/29   Roman   Adding "__len__" method
 //
 
 #ifndef BOOST_PYTHON_INDEXING_MULTIMAP_HPP
@@ -71,6 +72,7 @@
             | method_contains
             | method_count
             | method_has_key
+            | method_len
 
             | detail::method_set_if<
                   base_class::is_mutable,
Modified: pyplusplus_dev/pyplusplus/code_repository/indexing_suite/set_header.py
===================================================================
--- pyplusplus_dev/pyplusplus/code_repository/indexing_suite/set_header.py	2010-04-29 05:26:06 UTC (rev 1839)
+++ pyplusplus_dev/pyplusplus/code_repository/indexing_suite/set_header.py	2010-04-29 18:39:11 UTC (rev 1840)
@@ -23,6 +23,7 @@
 // =======
 // 2003/10/28   rmg     File creation from algo_selector.hpp
 // 2008/12/08   Roman   Change indexing suite layout
+// 2010/04/29   Roman   Adding "__len__" method
 //
 // $Id: set.hpp,v 1.1.2.6 2004/02/08 18:57:42 raoulgough Exp $
 //
@@ -66,6 +67,7 @@
             | method_contains
             | method_count
             | method_has_key
+            | method_len
 
             | detail::method_set_if<
                   base_class::is_mutable,
Modified: pyplusplus_dev/unittests/indexing_suites2_tester.py
===================================================================
--- pyplusplus_dev/unittests/indexing_suites2_tester.py	2010-04-29 05:26:06 UTC (rev 1839)
+++ pyplusplus_dev/unittests/indexing_suites2_tester.py	2010-04-29 18:39:11 UTC (rev 1840)
@@ -55,6 +55,7 @@
         
         name2value = module.name2value_t()
         name2value[ "x" ] = "y"
+        self.failUnless( len(name2value) == 1 )
         self.failUnless( "x" == module.get_first_name( name2value ) )
         for kv in name2value:
             self.failUnless( kv.key == "x" and kv.value == "y" )
This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site.
 | 
| 
      
      
      From: <rom...@us...> - 2010-04-29 05:26:13
      
     | 
| Revision: 1839
          http://pygccxml.svn.sourceforge.net/pygccxml/?rev=1839&view=rev
Author:   roman_yakovenko
Date:     2010-04-29 05:26:06 +0000 (Thu, 29 Apr 2010)
Log Message:
-----------
fixing broken link
Modified Paths:
--------------
    pyplusplus_dev/docs/documentation/containers.rest
Modified: pyplusplus_dev/docs/documentation/containers.rest
===================================================================
--- pyplusplus_dev/docs/documentation/containers.rest	2010-04-23 20:39:09 UTC (rev 1838)
+++ pyplusplus_dev/docs/documentation/containers.rest	2010-04-29 05:26:06 UTC (rev 1839)
@@ -47,7 +47,7 @@
 * http://mail.python.org/pipermail/c++-sig/2006-June/010835.html
 
 
-.. _`post` : http://mail.python.org/pipermail/c++-sig/2003-October/005802.html
+.. _`post` : http://mail.python.org/pipermail/cplusplus-sig/2003-October/005453.html
 
 
 ------------------------
This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site.
 | 
| 
      
      
      From: <rom...@us...> - 2010-04-23 20:39:15
      
     | 
| Revision: 1838
          http://pygccxml.svn.sourceforge.net/pygccxml/?rev=1838&view=rev
Author:   roman_yakovenko
Date:     2010-04-23 20:39:09 +0000 (Fri, 23 Apr 2010)
Log Message:
-----------
update gccxml to 1.133 version
Modified Paths:
--------------
    gccxml_bin/v09/linux2/bin/gccxml
    gccxml_bin/v09/linux2/bin/gccxml_cc1plus
    gccxml_bin/v09/linux2/share/man/man1/gccxml.1
Modified: gccxml_bin/v09/linux2/bin/gccxml
===================================================================
(Binary files differ)
Modified: gccxml_bin/v09/linux2/bin/gccxml_cc1plus
===================================================================
(Binary files differ)
Modified: gccxml_bin/v09/linux2/share/man/man1/gccxml.1
===================================================================
--- gccxml_bin/v09/linux2/share/man/man1/gccxml.1	2010-04-15 06:11:14 UTC (rev 1837)
+++ gccxml_bin/v09/linux2/share/man/man1/gccxml.1	2010-04-23 20:39:09 UTC (rev 1838)
@@ -1,4 +1,4 @@
-.TH GCC-XML 1 "January 26, 2010" "GCC-XML 0.9.0"
+.TH GCC-XML 1 "April 23, 2010" "GCC-XML 0.9.0"
 .SH NAME
 .TP
 .B gccxml
This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site.
 | 
| 
      
      
      From: <rom...@us...> - 2010-04-15 06:11:20
      
     | 
| Revision: 1837
          http://pygccxml.svn.sourceforge.net/pygccxml/?rev=1837&view=rev
Author:   roman_yakovenko
Date:     2010-04-15 06:11:14 +0000 (Thu, 15 Apr 2010)
Log Message:
-----------
misc: testing a work around for an auto_ptr in constructor
Modified Paths:
--------------
    pyplusplus_dev/unittests/data/smart_pointers_to_be_exported.cpp
    pyplusplus_dev/unittests/data/smart_pointers_to_be_exported.hpp
    pyplusplus_dev/unittests/fundamental_tester_base.py
    pyplusplus_dev/unittests/smart_pointers_tester.py
Modified: pyplusplus_dev/unittests/data/smart_pointers_to_be_exported.cpp
===================================================================
--- pyplusplus_dev/unittests/data/smart_pointers_to_be_exported.cpp	2010-04-14 08:48:52 UTC (rev 1836)
+++ pyplusplus_dev/unittests/data/smart_pointers_to_be_exported.cpp	2010-04-15 06:11:14 UTC (rev 1837)
@@ -6,7 +6,7 @@
 #include "smart_pointers_to_be_exported.hpp"
 
 namespace smart_pointers{
-   
+
 data_a_ptr create_auto(){ return data_a_ptr( new data() ); }
 data_s_ptr create_shared(){ return data_s_ptr( new data() ); }
 
@@ -27,7 +27,7 @@
 
 int const_ref_auto_base_value( const base_a_ptr& a ){ return a->get_base_value(); }
 int const_ref_shared_base_value( const base_s_ptr& a ){ return a->get_base_value(); }
-   
+
 
 
 int ref_auto_some_value( base_a_ptr& a ){ return a->get_some_value(); }
@@ -38,7 +38,11 @@
 
 int const_ref_auto_some_value( const base_a_ptr& a ){ return a->get_some_value(); }
 int const_ref_shared_some_value( const base_s_ptr& a ){ return a->get_some_value(); }
-
-
 
-}    
+namespace autoptr_init_bug{
+std::auto_ptr< B > createB(int value, std::auto_ptr<A> a){
+    return std::auto_ptr< B >( new B( value, a ) );
+}
+}
+
+}
Modified: pyplusplus_dev/unittests/data/smart_pointers_to_be_exported.hpp
===================================================================
--- pyplusplus_dev/unittests/data/smart_pointers_to_be_exported.hpp	2010-04-14 08:48:52 UTC (rev 1836)
+++ pyplusplus_dev/unittests/data/smart_pointers_to_be_exported.hpp	2010-04-15 06:11:14 UTC (rev 1837)
@@ -29,7 +29,7 @@
 
 typedef std::auto_ptr< data > data_a_ptr;
 typedef boost::shared_ptr< data > data_s_ptr;
-    
+
 data_a_ptr create_auto();
 data_s_ptr create_shared();
 
@@ -61,7 +61,7 @@
 int const_ref_shared_some_value( const base_s_ptr& a );
 
 struct shared_data_buffer_t{
-    shared_data_buffer_t() 
+    shared_data_buffer_t()
     : size( 0 )
     {}
     int size;
@@ -73,11 +73,33 @@
     : buffer( new shared_data_buffer_t() )
       , const_buffer( new shared_data_buffer_t() )
     {}
-        
+
     holder_impl_t buffer;
     const holder_impl_t const_buffer;
 };
 
-}    
+namespace autoptr_init_bug{
 
+struct A{
+   A(int value) : m_value(value) {}
+   int m_value;
+};
+
+struct B{
+   B(int value, std::auto_ptr<A> a) : m_value(value), m_a(a.release() ) {}
+
+   int m_value;
+
+   int get_a_value(){ return m_a->m_value; }
+
+private:
+   std::auto_ptr<A> m_a;
+};
+
+std::auto_ptr< B > createB(int value, std::auto_ptr<A> a);
+
+}
+
+}
+
 #endif//__smart_pointers_to_be_exported_hpp__
Modified: pyplusplus_dev/unittests/fundamental_tester_base.py
===================================================================
--- pyplusplus_dev/unittests/fundamental_tester_base.py	2010-04-14 08:48:52 UTC (rev 1836)
+++ pyplusplus_dev/unittests/fundamental_tester_base.py	2010-04-15 06:11:14 UTC (rev 1837)
@@ -97,6 +97,9 @@
     def _create_extension_source_file(self):
         global LICENSE
 
+        if os.path.exists( self.__generated_source_file_name + '.xml' ):
+            os.remove( self.__generated_source_file_name + '.xml' )
+
         test_header_cfg \
             = pygccxml.parser.create_cached_source_fc( self.__to_be_exported_header
                                                        , self.__generated_source_file_name + '.xml' )
Modified: pyplusplus_dev/unittests/smart_pointers_tester.py
===================================================================
--- pyplusplus_dev/unittests/smart_pointers_tester.py	2010-04-14 08:48:52 UTC (rev 1836)
+++ pyplusplus_dev/unittests/smart_pointers_tester.py	2010-04-15 06:11:14 UTC (rev 1837)
@@ -12,41 +12,49 @@
 
 class tester_t(fundamental_tester_base.fundamental_tester_base_t):
     EXTENSION_NAME = 'smart_pointers'
-    
+
     def __init__( self, *args ):
-        fundamental_tester_base.fundamental_tester_base_t.__init__( 
+        fundamental_tester_base.fundamental_tester_base_t.__init__(
             self
             , tester_t.EXTENSION_NAME
             , *args )
-   
+
     def customize( self, mb ):
         base = mb.class_( 'base' )
         shared_ptrs = mb.decls( lambda decl: decl.name.startswith( 'shared_ptr<' ) )
         shared_ptrs.disable_warnings( messages.W1040 )
         mb.variable( 'buffer' ).apply_smart_ptr_wa = True
         mb.variable( 'const_buffer' ).apply_smart_ptr_wa = True
-   
+
+
+        A = mb.class_('A' )
+        A.held_type = 'std::auto_ptr< %s >' % A.decl_string
+        B = mb.class_( 'B' )
+        B.constructors().exclude()
+        B.add_fake_constructors( mb.free_function( 'createB' ) )
+
+
     def create_py_derived( self, module ):
         class py_derived_t( module.base ):
             def __init__( self ):
                 module.base.__init__( self )
-            
+
             def get_some_value( self ):
                 return 28
-        
+
         return py_derived_t()
-        
+
     def run_tests( self, module):
         da = module.create_auto()
         py_derived = self.create_py_derived( module )
-        
+
         self.failUnless( 11 == da.value )
         ds = module.create_shared()
         self.failUnless( 11 == ds.value )
-        
+
         self.failUnless( 11 == module.ref_auto(da) )
         self.failUnless( 11 == module.ref_shared(ds) )
-        
+
         #why? because in this case held type could not be set
         #self.failUnless( 11 == module.ref_shared(py_derived) )
 
@@ -57,7 +65,7 @@
 
         self.failUnless( 11 == module.const_ref_auto(da) )
         self.failUnless( 11 == module.const_ref_shared(ds) )
-        
+
         #TODO: find out why this fails
         #self.failUnless( 19 == module.ref_auto_base_value(da) )
         #self.failUnless( 19 == module.ref_shared_base_value(ds) )
@@ -73,9 +81,9 @@
         self.failUnless( 19 == module.val_auto_base_value(da) )
         self.failUnless( 19 == module.val_shared_base_value(ds) )
         self.failUnless( 19 == module.val_shared_base_value(py_derived) )
-        
+
         da = module.create_auto()
-        
+
         self.failUnless( 23 == module.val_auto_some_value(da) )
         self.failUnless( 28 == module.val_shared_some_value(py_derived) )
 
@@ -86,17 +94,22 @@
 
         holder1 = module.shared_data_buffer_holder_t()
         self.failUnless( holder1.buffer.size == 0 )
-        
+
         holder2 = module.shared_data_buffer_holder_t()
         holder2.buffer.size = 2
-        
+
         holder1.buffer = holder2.buffer
         self.failUnless( holder1.buffer.size == 2 )
         holder1.buffer.size = 3
         self.failUnless( holder2.buffer.size == 3 )
 
+        a = module.A( 23 )
+        b = module.B( 21, a )
+
+        self.failUnless( b.get_a_value() == 23 )
+
 def create_suite():
-    suite = unittest.TestSuite()    
+    suite = unittest.TestSuite()
     suite.addTest( unittest.makeSuite(tester_t))
     return suite
 
This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site.
 | 
| 
      
      
      From: <rom...@us...> - 2010-04-14 08:48:58
      
     | 
| Revision: 1836
          http://pygccxml.svn.sourceforge.net/pygccxml/?rev=1836&view=rev
Author:   roman_yakovenko
Date:     2010-04-14 08:48:52 +0000 (Wed, 14 Apr 2010)
Log Message:
-----------
fix documentation bug. Thanks to Benoit Leveau for pointing to it
Modified Paths:
--------------
    pyplusplus_dev/docs/documentation/functions/call_policies/call_policies.rest
Modified: pyplusplus_dev/docs/documentation/functions/call_policies/call_policies.rest
===================================================================
--- pyplusplus_dev/docs/documentation/functions/call_policies/call_policies.rest	2010-04-06 04:21:48 UTC (rev 1835)
+++ pyplusplus_dev/docs/documentation/functions/call_policies/call_policies.rest	2010-04-14 08:48:52 UTC (rev 1836)
@@ -109,7 +109,7 @@
 
     * return type is ``T&``, for member ``operator[]`` that returns reference to immutable type
 
-  * ``return_internal_reference``
+* ``return_internal_reference``
 
     * return type is ``T&``, for member ``operator[]``
 
This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site.
 | 
| 
      
      
      From: <rom...@us...> - 2010-04-06 04:21:54
      
     | 
| Revision: 1835
          http://pygccxml.svn.sourceforge.net/pygccxml/?rev=1835&view=rev
Author:   roman_yakovenko
Date:     2010-04-06 04:21:48 +0000 (Tue, 06 Apr 2010)
Log Message:
-----------
adding sitemap generator
Modified Paths:
--------------
    sphinx/conf.py
Added Paths:
-----------
    sphinx/sitemap_gen.py
Modified: sphinx/conf.py
===================================================================
--- sphinx/conf.py	2010-04-05 09:08:06 UTC (rev 1834)
+++ sphinx/conf.py	2010-04-06 04:21:48 UTC (rev 1835)
@@ -30,6 +30,8 @@
 doc_project_root = os.path.abspath('.')
 packages = ( 'pydsc', 'pygccxml', 'pyplusplus' )
 
+sys.path.append( doc_project_root )
+
 has_true_links = 'linux' in sys.platform
 for pkg in packages:
     target = os.path.join( doc_project_root, pkg )
Added: sphinx/sitemap_gen.py
===================================================================
--- sphinx/sitemap_gen.py	                        (rev 0)
+++ sphinx/sitemap_gen.py	2010-04-06 04:21:48 UTC (rev 1835)
@@ -0,0 +1,2205 @@
+#!/usr/bin/python
+#
+# Copyright (c) 2004, 2005 Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+#   notice, this list of conditions and the following disclaimer.
+#
+# * Redistributions in binary form must reproduce the above copyright
+#   notice, this list of conditions and the following disclaimer in
+#   the documentation and/or other materials provided with the
+#   distribution.
+#
+# * Neither the name of Google nor the names of its contributors may
+#   be used to endorse or promote products derived from this software
+#   without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+#
+#
+# The sitemap_gen.py script is written in Python 2.2 and released to
+# the open source community for continuous improvements under the BSD
+# 2.0 new license, which can be found at:
+#
+#   http://www.opensource.org/licenses/bsd-license.php
+#
+
+__usage__ = \
+"""A simple script to automatically produce sitemaps for a webserver,
+in the Google Sitemap Protocol (GSP).
+
+Usage: python sitemap_gen.py --config=config.xml [--help] [--testing]
+            --config=config.xml, specifies config file location
+            --help, displays usage message
+            --testing, specified when user is experimenting
+"""
+
+# Please be careful that all syntax used in this file can be parsed on
+# Python 1.5 -- this version check is not evaluated until after the
+# entire file has been parsed.
+import sys
+if sys.hexversion < 0x02020000:
+  print 'This script requires Python 2.2 or later.'
+  print 'Currently run with version: %s' % sys.version
+  sys.exit(1)
+
+import fnmatch
+import glob
+import gzip
+import md5
+import os
+import re
+import stat
+import time
+import types
+import urllib
+import urlparse
+import xml.sax
+
+# True and False were introduced in Python2.2.2
+try:
+  testTrue=True
+  del testTrue
+except NameError:
+  True=1
+  False=0
+
+# Text encodings
+ENC_ASCII = 'ASCII'
+ENC_UTF8  = 'UTF-8'
+ENC_IDNA  = 'IDNA'
+ENC_ASCII_LIST = ['ASCII', 'US-ASCII', 'US', 'IBM367', 'CP367', 'ISO646-US'
+                  'ISO_646.IRV:1991', 'ISO-IR-6', 'ANSI_X3.4-1968',
+                  'ANSI_X3.4-1986', 'CPASCII' ]
+ENC_DEFAULT_LIST = ['ISO-8859-1', 'ISO-8859-2', 'ISO-8859-5']
+
+# Maximum number of urls in each sitemap, before next Sitemap is created
+MAXURLS_PER_SITEMAP = 50000
+
+# Suffix on a Sitemap index file
+SITEINDEX_SUFFIX = '_index.xml'
+
+# Regular expressions tried for extracting URLs from access logs.
+ACCESSLOG_CLF_PATTERN = re.compile(
+  r'.+\s+"([^\s]+)\s+([^\s]+)\s+HTTP/\d+\.\d+"\s+200\s+.*'
+  )
+
+# Match patterns for lastmod attributes
+LASTMOD_PATTERNS = map(re.compile, [
+  r'^\d\d\d\d$',
+  r'^\d\d\d\d-\d\d$',
+  r'^\d\d\d\d-\d\d-\d\d$',
+  r'^\d\d\d\d-\d\d-\d\dT\d\d:\d\dZ$',
+  r'^\d\d\d\d-\d\d-\d\dT\d\d:\d\d[+-]\d\d:\d\d$',
+  r'^\d\d\d\d-\d\d-\d\dT\d\d:\d\d:\d\d(\.\d+)?Z$',
+  r'^\d\d\d\d-\d\d-\d\dT\d\d:\d\d:\d\d(\.\d+)?[+-]\d\d:\d\d$',
+  ])
+
+# Match patterns for changefreq attributes
+CHANGEFREQ_PATTERNS = [
+  'always', 'hourly', 'daily', 'weekly', 'monthly', 'yearly', 'never'
+  ]
+
+# XML formats
+SITEINDEX_HEADER   = \
+  '<?xml version="1.0" encoding="UTF-8"?>\n' \
+  '<sitemapindex\n' \
+  '  xmlns="http://www.google.com/schemas/sitemap/0.84"\n' \
+  '  xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"\n' \
+  '  xsi:schemaLocation="http://www.google.com/schemas/sitemap/0.84\n' \
+  '                      http://www.google.com/schemas/sitemap/0.84/' \
+  'siteindex.xsd">\n'
+SITEINDEX_FOOTER   = '</sitemapindex>\n'
+SITEINDEX_ENTRY    = \
+  ' <sitemap>\n' \
+  '  <loc>%(loc)s</loc>\n' \
+  '  <lastmod>%(lastmod)s</lastmod>\n' \
+  ' </sitemap>\n'
+SITEMAP_HEADER     = \
+  '<?xml version="1.0" encoding="UTF-8"?>\n' \
+  '<urlset\n' \
+  '  xmlns="http://www.google.com/schemas/sitemap/0.84"\n' \
+  '  xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"\n' \
+  '  xsi:schemaLocation="http://www.google.com/schemas/sitemap/0.84\n' \
+  '                      http://www.google.com/schemas/sitemap/0.84/' \
+  'sitemap.xsd">\n'
+SITEMAP_FOOTER     = '</urlset>\n'
+SITEURL_XML_PREFIX = ' <url>\n'
+SITEURL_XML_SUFFIX = ' </url>\n'
+
+# Search engines to notify with the updated sitemaps
+#
+# This list is very non-obvious in what's going on.  Here's the gist:
+# Each item in the list is a 6-tuple of items.  The first 5 are "almost"
+# the same as the input arguments to urlparse.urlunsplit():
+#   0 - schema
+#   1 - netloc
+#   2 - path
+#   3 - query    <-- EXCEPTION: specify a query map rather than a string
+#   4 - fragment
+# Additionally, add item 5:
+#   5 - query attribute that should be set to the new Sitemap URL
+# Clear as mud, I know.
+NOTIFICATION_SITES = [
+  ('http', 'www.google.com', 'webmasters/sitemaps/ping', {}, '', 'sitemap')
+  ]
+
+
+class Error(Exception):
+  """
+  Base exception class.  In this module we tend not to use our own exception
+  types for very much, but they come in very handy on XML parsing with SAX.
+  """
+  pass
+#end class Error
+
+
+class SchemaError(Error):
+  """Failure to process an XML file according to the schema we know."""
+  pass
+#end class SchemeError
+
+
+class Encoder:
+  """
+  Manages wide-character/narrow-character conversions for just about all
+  text that flows into or out of the script.
+
+  You should always use this class for string coercion, as opposed to
+  letting Python handle coercions automatically.  Reason: Python
+  usually assumes ASCII (7-bit) as a default narrow character encoding,
+  which is not the kind of data we generally deal with.
+
+  General high-level methodologies used in sitemap_gen:
+
+  [PATHS]
+  File system paths may be wide or narrow, depending on platform.
+  This works fine, just be aware of it and be very careful to not
+  mix them.  That is, if you have to pass several file path arguments
+  into a library call, make sure they are all narrow or all wide.
+  This class has MaybeNarrowPath() which should be called on every
+  file system path you deal with.
+
+  [URLS]
+  URL locations are stored in Narrow form, already escaped.  This has the
+  benefit of keeping escaping and encoding as close as possible to the format
+  we read them in.  The downside is we may end up with URLs that have
+  intermingled encodings -- the root path may be encoded in one way
+  while the filename is encoded in another.  This is obviously wrong, but
+  it should hopefully be an issue hit by very few users.  The workaround
+  from the user level (assuming they notice) is to specify a default_encoding
+  parameter in their config file.
+
+  [OTHER]
+  Other text, such as attributes of the URL class, configuration options,
+  etc, are generally stored in Unicode for simplicity.
+  """
+
+  def __init__(self):
+    self._user      = None                  # User-specified default encoding
+    self._learned   = []                    # Learned default encodings
+    self._widefiles = False                 # File system can be wide
+
+    # Can the file system be Unicode?
+    try:
+      self._widefiles = os.path.supports_unicode_filenames
+    except AttributeError:
+      try:
+        self._widefiles = sys.getwindowsversion() == os.VER_PLATFORM_WIN32_NT
+      except AttributeError:
+        pass
+
+    # Try to guess a working default
+    try:
+      encoding = sys.getfilesystemencoding()
+      if encoding and not (encoding.upper() in ENC_ASCII_LIST):
+        self._learned = [ encoding ]
+    except AttributeError:
+      pass
+
+    if not self._learned:
+      encoding = sys.getdefaultencoding()
+      if encoding and not (encoding.upper() in ENC_ASCII_LIST):
+        self._learned = [ encoding ]
+
+    # If we had no guesses, start with some European defaults
+    if not self._learned:
+      self._learned = ENC_DEFAULT_LIST
+  #end def __init__
+
+  def SetUserEncoding(self, encoding):
+    self._user = encoding
+  #end def SetUserEncoding
+
+  def NarrowText(self, text, encoding):
+    """ Narrow a piece of arbitrary text """
+    if type(text) != types.UnicodeType:
+      return text
+
+    # Try the passed in preference
+    if encoding:
+      try:
+        result = text.encode(encoding)
+        if not encoding in self._learned:
+          self._learned.append(encoding)
+        return result
+      except UnicodeError:
+        pass
+      except LookupError:
+        output.Warn('Unknown encoding: %s' % encoding)
+
+    # Try the user preference
+    if self._user:
+      try:
+        return text.encode(self._user)
+      except UnicodeError:
+        pass
+      except LookupError:
+        temp = self._user
+        self._user = None
+        output.Warn('Unknown default_encoding: %s' % temp)
+
+    # Look through learned defaults, knock any failing ones out of the list
+    while self._learned:
+      try:
+        return text.encode(self._learned[0])
+      except:
+        del self._learned[0]
+
+    # When all other defaults are exhausted, use UTF-8
+    try:
+      return text.encode(ENC_UTF8)
+    except UnicodeError:
+      pass
+
+    # Something is seriously wrong if we get to here
+    return text.encode(ENC_ASCII, 'ignore')
+  #end def NarrowText
+  
+  def MaybeNarrowPath(self, text):
+    """ Paths may be allowed to stay wide """
+    if self._widefiles:
+      return text
+    return self.NarrowText(text, None)
+  #end def MaybeNarrowPath
+
+  def WidenText(self, text, encoding):
+    """ Widen a piece of arbitrary text """
+    if type(text) != types.StringType:
+      return text
+
+    # Try the passed in preference
+    if encoding:
+      try:
+        result = unicode(text, encoding)
+        if not encoding in self._learned:
+          self._learned.append(encoding)
+        return result
+      except UnicodeError:
+        pass
+      except LookupError:
+        output.Warn('Unknown encoding: %s' % encoding)
+
+    # Try the user preference
+    if self._user:
+      try:
+        return unicode(text, self._user)
+      except UnicodeError:
+        pass
+      except LookupError:
+        temp = self._user
+        self._user = None
+        output.Warn('Unknown default_encoding: %s' % temp)
+
+    # Look through learned defaults, knock any failing ones out of the list
+    while self._learned:
+      try:
+        return unicode(text, self._learned[0])
+      except:
+        del self._learned[0]
+
+    # When all other defaults are exhausted, use UTF-8
+    try:
+      return unicode(text, ENC_UTF8)
+    except UnicodeError:
+      pass
+
+    # Getting here means it wasn't UTF-8 and we had no working default.
+    # We really don't have anything "right" we can do anymore.
+    output.Warn('Unrecognized encoding in text: %s' % text)
+    if not self._user:
+      output.Warn('You may need to set a default_encoding in your '
+                  'configuration file.')
+    return text.decode(ENC_ASCII, 'ignore')
+  #end def WidenText
+#end class Encoder
+encoder = Encoder()
+
+
+class Output:
+  """
+  Exposes logging functionality, and tracks how many errors
+  we have thus output.
+
+  Logging levels should be used as thus:
+    Fatal     -- extremely sparingly
+    Error     -- config errors, entire blocks of user 'intention' lost
+    Warn      -- individual URLs lost
+    Log(,0)   -- Un-suppressable text that's not an error
+    Log(,1)   -- touched files, major actions
+    Log(,2)   -- parsing notes, filtered or duplicated URLs
+    Log(,3)   -- each accepted URL
+  """
+
+  def __init__(self):
+    self.num_errors    = 0                   # Count of errors
+    self.num_warns     = 0                   # Count of warnings
+
+    self._errors_shown = {}                  # Shown errors
+    self._warns_shown  = {}                  # Shown warnings
+    self._verbose      = 0                   # Level of verbosity
+  #end def __init__
+
+  def Log(self, text, level):
+    """ Output a blurb of diagnostic text, if the verbose level allows it """
+    if text:
+      text = encoder.NarrowText(text, None)
+      if self._verbose >= level:
+        print text
+  #end def Log
+
+  def Warn(self, text):
+    """ Output and count a warning.  Suppress duplicate warnings. """
+    if text:
+      text = encoder.NarrowText(text, None)
+      hash = md5.new(text).digest()
+      if not self._warns_shown.has_key(hash):
+        self._warns_shown[hash] = 1
+        print '[WARNING] ' + text
+      else:
+        self.Log('(suppressed) [WARNING] ' + text, 3)
+      self.num_warns = self.num_warns + 1
+  #end def Warn
+
+  def Error(self, text):
+    """ Output and count an error.  Suppress duplicate errors. """
+    if text:
+      text = encoder.NarrowText(text, None)
+      hash = md5.new(text).digest()
+      if not self._errors_shown.has_key(hash):
+        self._errors_shown[hash] = 1
+        print '[ERROR] ' + text
+      else:
+        self.Log('(suppressed) [ERROR] ' + text, 3)
+      self.num_errors = self.num_errors + 1
+  #end def Error
+
+  def Fatal(self, text):
+    """ Output an error and terminate the program. """
+    if text:
+      text = encoder.NarrowText(text, None)
+      print '[FATAL] ' + text
+    else:
+      print 'Fatal error.'
+    sys.exit(1)
+  #end def Fatal
+
+  def SetVerbose(self, level):
+    """ Sets the verbose level. """
+    try:
+      if type(level) != types.IntType:
+        level = int(level)
+      if (level >= 0) and (level <= 3):
+        self._verbose = level
+        return
+    except ValueError:
+      pass
+    self.Error('Verbose level (%s) must be between 0 and 3 inclusive.' % level)
+  #end def SetVerbose
+#end class Output
+output = Output()
+
+
+class URL(object):
+  """ URL is a smart structure grouping together the properties we
+  care about for a single web reference. """
+  __slots__ = 'loc', 'lastmod', 'changefreq', 'priority'
+
+  def __init__(self):
+    self.loc        = None                  # URL -- in Narrow characters
+    self.lastmod    = None                  # ISO8601 timestamp of last modify
+    self.changefreq = None                  # Text term for update frequency
+    self.priority   = None                  # Float between 0 and 1 (inc)
+  #end def __init__
+
+  def __cmp__(self, other):
+    if self.loc < other.loc:
+      return -1
+    if self.loc > other.loc:
+      return 1
+    return 0
+  #end def __cmp__
+
+  def TrySetAttribute(self, attribute, value):
+    """ Attempt to set the attribute to the value, with a pretty try
+    block around it.  """
+    if attribute == 'loc':
+      self.loc = self.Canonicalize(value)
+    else:
+      try:
+        setattr(self, attribute, value)
+      except AttributeError:
+        output.Warn('Unknown URL attribute: %s' % attribute)
+  #end def TrySetAttribute
+
+  def IsAbsolute(loc):
+    """ Decide if the URL is absolute or not """
+    if not loc:
+      return False
+    narrow = encoder.NarrowText(loc, None)
+    (scheme, netloc, path, query, frag) = urlparse.urlsplit(narrow)
+    if (not scheme) or (not netloc):
+      return False
+    return True
+  #end def IsAbsolute
+  IsAbsolute = staticmethod(IsAbsolute)
+
+  def Canonicalize(loc):
+    """ Do encoding and canonicalization on a URL string """
+    if not loc:
+      return loc
+    
+    # Let the encoder try to narrow it
+    narrow = encoder.NarrowText(loc, None)
+
+    # Escape components individually
+    (scheme, netloc, path, query, frag) = urlparse.urlsplit(narrow)
+    unr    = '-._~'
+    sub    = '!$&\'()*+,;='
+    netloc = urllib.quote(netloc, unr + sub + '%:@/[]')
+    path   = urllib.quote(path,   unr + sub + '%:@/')
+    query  = urllib.quote(query,  unr + sub + '%:@/?')
+    frag   = urllib.quote(frag,   unr + sub + '%:@/?')
+
+    # Try built-in IDNA encoding on the netloc
+    try:
+      (ignore, widenetloc, ignore, ignore, ignore) = urlparse.urlsplit(loc)
+      for c in widenetloc:
+        if c >= unichr(128):
+          netloc = widenetloc.encode(ENC_IDNA)
+          netloc = urllib.quote(netloc, unr + sub + '%:@/[]')
+          break
+    except UnicodeError:
+      # urlsplit must have failed, based on implementation differences in the
+      # library.  There is not much we can do here, except ignore it.
+      pass
+    except LookupError:
+      output.Warn('An International Domain Name (IDN) is being used, but this '
+                  'version of Python does not have support for IDNA encoding. '
+                  ' (IDNA support was introduced in Python 2.3)  The encoding '
+                  'we have used instead is wrong and will probably not yield '
+                  'valid URLs.')
+    bad_netloc = False
+    if '%' in netloc:
+      bad_netloc = True
+
+    # Put it all back together
+    narrow = urlparse.urlunsplit((scheme, netloc, path, query, frag))
+
+    # I let '%' through.  Fix any that aren't pre-existing escapes.
+    HEXDIG = '0123456789abcdefABCDEF'
+    list   = narrow.split('%')
+    narrow = list[0]
+    del list[0]
+    for item in list:
+      if (len(item) >= 2) and (item[0] in HEXDIG) and (item[1] in HEXDIG):
+        narrow = narrow + '%' + item
+      else:
+        narrow = narrow + '%25' + item
+
+    # Issue a warning if this is a bad URL
+    if bad_netloc:
+      output.Warn('Invalid characters in the host or domain portion of a URL: '
+                  + narrow)
+
+    return narrow
+  #end def Canonicalize
+  Canonicalize = staticmethod(Canonicalize)
+
+  def Validate(self, base_url, allow_fragment):
+    """ Verify the data in this URL is well-formed, and override if not. """
+    assert type(base_url) == types.StringType
+    
+    # Test (and normalize) the ref
+    if not self.loc:
+      output.Warn('Empty URL')
+      return False
+    if allow_fragment:
+      self.loc = urlparse.urljoin(base_url, self.loc)
+    if not self.loc.startswith(base_url):
+      output.Warn('Discarded URL for not starting with the base_url: %s' %
+                  self.loc)
+      self.loc = None
+      return False
+
+    # Test the lastmod
+    if self.lastmod:
+      match = False
+      self.lastmod = self.lastmod.upper()
+      for pattern in LASTMOD_PATTERNS:
+        match = pattern.match(self.lastmod)
+        if match:
+          break
+      if not match:
+        output.Warn('Lastmod "%s" does not appear to be in ISO8601 format on '
+                    'URL: %s' % (self.lastmod, self.loc))
+        self.lastmod = None
+
+    # Test the changefreq
+    if self.changefreq:
+      match = False
+      self.changefreq = self.changefreq.lower()
+      for pattern in CHANGEFREQ_PATTERNS:
+        if self.changefreq == pattern:
+          match = True
+          break
+      if not match:
+        output.Warn('Changefreq "%s" is not a valid change frequency on URL '
+                    ': %s' % (self.changefreq, self.loc))
+        self.changefreq = None
+
+    # Test the priority
+    if self.priority:
+      priority = -1.0
+      try:
+        priority = float(self.priority)
+      except ValueError:
+        pass
+      if (priority < 0.0) or (priority > 1.0):
+        output.Warn('Priority "%s" is not a number between 0 and 1 inclusive '
+                    'on URL: %s' % (self.priority, self.loc))
+        self.priority = None
+
+    return True
+  #end def Validate
+
+  def MakeHash(self):
+    """ Provides a uniform way of hashing URLs """
+    if not self.loc:
+      return None
+    if self.loc.endswith('/'):
+      return md5.new(self.loc[:-1]).digest()
+    return md5.new(self.loc).digest()
+  #end def MakeHash
+
+  def Log(self, prefix='URL', level=3):
+    """ Dump the contents, empty or not, to the log. """
+    out = prefix + ':'
+    
+    for attribute in self.__slots__:
+      value = getattr(self, attribute)
+      if not value:
+        value = ''
+      out = out + ('  %s=[%s]' % (attribute, value))
+
+    output.Log('%s' % encoder.NarrowText(out, None), level)
+  #end def Log
+
+  def WriteXML(self, file):
+    """ Dump non-empty contents to the output file, in XML format. """
+    if not self.loc:
+      return
+    out = SITEURL_XML_PREFIX
+
+    for attribute in self.__slots__:
+      value = getattr(self, attribute)
+      if value:
+        if type(value) == types.UnicodeType:
+          value = encoder.NarrowText(value, None)
+        elif type(value) != types.StringType:
+          value = str(value)
+        value = xml.sax.saxutils.escape(value)
+        out = out + ('  <%s>%s</%s>\n' % (attribute, value, attribute))
+    
+    out = out + SITEURL_XML_SUFFIX
+    file.write(out)
+  #end def WriteXML
+#end class URL
+
+
+class Filter:
+  """
+  A filter on the stream of URLs we find.  A filter is, in essence,
+  a wildcard applied to the stream.  You can think of this as an
+  operator that returns a tri-state when given a URL:
+
+    True  -- this URL is to be included in the sitemap
+    None  -- this URL is undecided
+    False -- this URL is to be dropped from the sitemap
+  """
+
+  def __init__(self, attributes):
+    self._wildcard  = None                  # Pattern for wildcard match
+    self._regexp    = None                  # Pattern for regexp match
+    self._pass      = False                 # "Drop" filter vs. "Pass" filter
+
+    if not ValidateAttributes('FILTER', attributes,
+                              ('pattern', 'type', 'action')):
+      return
+
+    # Check error count on the way in
+    num_errors = output.num_errors
+
+    # Fetch the attributes
+    pattern = attributes.get('pattern')
+    type    = attributes.get('type', 'wildcard')
+    action  = attributes.get('action', 'drop')
+    if type:
+      type = type.lower()
+    if action:
+      action = action.lower()
+
+    # Verify the attributes
+    if not pattern:
+      output.Error('On a filter you must specify a "pattern" to match')
+    elif (not type) or ((type != 'wildcard') and (type != 'regexp')):
+      output.Error('On a filter you must specify either \'type="wildcard"\' '
+                   'or \'type="regexp"\'')
+    elif (action != 'pass') and (action != 'drop'):
+      output.Error('If you specify a filter action, it must be either '
+                   '\'action="http://sourceforge.net/pass"\' or \'action="http://sourceforge.net/drop"\'')
+
+    # Set the rule
+    if action == 'drop':
+      self._pass = False
+    elif action == 'pass':
+      self._pass = True
+
+    if type == 'wildcard':
+      self._wildcard = pattern
+    elif type == 'regexp':
+      try:
+        self._regexp = re.compile(pattern)
+      except re.error:
+        output.Error('Bad regular expression: %s' %  pattern)
+
+    # Log the final results iff we didn't add any errors
+    if num_errors == output.num_errors:
+      output.Log('Filter: %s any URL that matches %s "%s"' %
+                 (action, type, pattern), 2)
+  #end def __init__
+
+  def Apply(self, url):
+    """ Process the URL, as above. """
+    if (not url) or (not url.loc):
+      return None
+    
+    if self._wildcard:
+      if fnmatch.fnmatchcase(url.loc, self._wildcard):
+        return self._pass
+      return None
+
+    if self._regexp:
+      if self._regexp.search(url.loc):
+        return self._pass
+      return None
+
+    assert False # unreachable
+  #end def Apply
+#end class Filter
+
+
+class InputURL:
+  """
+  Each Input class knows how to yield a set of URLs from a data source.
+
+  This one handles a single URL, manually specified in the config file.
+  """
+
+  def __init__(self, attributes):
+    self._url = None                        # The lonely URL
+
+    if not ValidateAttributes('URL', attributes,
+                                ('href', 'lastmod', 'changefreq', 'priority')):
+      return
+    
+    url = URL()
+    for attr in attributes.keys():
+      if attr == 'href':
+        url.TrySetAttribute('loc', attributes[attr])
+      else:
+        url.TrySetAttribute(attr, attributes[attr])
+
+    if not url.loc:
+      output.Error('Url entries must have an href attribute.')
+      return
+    
+    self._url = url
+    output.Log('Input: From URL "%s"' % self._url.loc, 2)
+  #end def __init__
+
+  def ProduceURLs(self, consumer):
+    """ Produces URLs from our data source, hands them in to the consumer. """
+    if self._url:
+      consumer(self._url, True)
+  #end def ProduceURLs
+#end class InputURL
+
+
+class InputURLList:
+  """
+  Each Input class knows how to yield a set of URLs from a data source.
+
+  This one handles a text file with a list of URLs
+  """
+
+  def __init__(self, attributes):
+    self._path      = None                  # The file path
+    self._encoding  = None                  # Encoding of that file
+
+    if not ValidateAttributes('URLLIST', attributes, ('path', 'encoding')):
+      return
+    
+    self._path      = attributes.get('path')
+    self._encoding  = attributes.get('encoding', ENC_UTF8)
+    if self._path:
+      self._path    = encoder.MaybeNarrowPath(self._path)
+      if os.path.isfile(self._path):
+        output.Log('Input: From URLLIST "%s"' % self._path, 2)
+      else:
+        output.Error('Can not locate file: %s' % self._path)
+        self._path = None
+    else:
+      output.Error('Urllist entries must have a "path" attribute.')
+  #end def __init__
+
+  def ProduceURLs(self, consumer):
+    """ Produces URLs from our data source, hands them in to the consumer. """
+
+    # Open the file
+    (frame, file) = OpenFileForRead(self._path, 'URLLIST')
+    if not file:
+      return
+
+    # Iterate lines
+    linenum = 0
+    for line in file.readlines():
+      linenum = linenum + 1
+
+      # Strip comments and empty lines
+      if self._encoding:
+        line = encoder.WidenText(line, self._encoding)
+      line = line.strip()
+      if (not line) or line[0] == '#':
+        continue
+      
+      # Split the line on space
+      url = URL()
+      cols = line.split(' ')
+      for i in range(0,len(cols)):
+        cols[i] = cols[i].strip()
+      url.TrySetAttribute('loc', cols[0])
+
+      # Extract attributes from the other columns
+      for i in range(1,len(cols)):
+        if cols[i]:
+          try:
+            (attr_name, attr_val) = cols[i].split('=', 1)
+            url.TrySetAttribute(attr_name, attr_val)
+          except ValueError:
+            output.Warn('Line %d: Unable to parse attribute: %s' %
+                        (linenum, cols[i]))
+
+      # Pass it on
+      consumer(url, False)
+
+    file.close()
+    if frame:
+      frame.close()
+  #end def ProduceURLs
+#end class InputURLList
+
+
+class InputDirectory:
+  """
+  Each Input class knows how to yield a set of URLs from a data source.
+
+  This one handles a directory that acts as base for walking the filesystem.
+  """
+
+  def __init__(self, attributes, base_url):
+    self._path         = None               # The directory
+    self._url          = None               # The URL equivelant
+    self._default_file = None
+
+    if not ValidateAttributes('DIRECTORY', attributes, ('path', 'url',
+                                                           'default_file')):
+      return
+
+    # Prep the path -- it MUST end in a sep
+    path = attributes.get('path')
+    if not path:
+      output.Error('Directory entries must have both "path" and "url" '
+                  'attributes')
+      return
+    path = encoder.MaybeNarrowPath(path)
+    if not path.endswith(os.sep):
+      path = path + os.sep
+    if not os.path.isdir(path):
+      output.Error('Can not locate directory: %s' % path)
+      return
+
+    # Prep the URL -- it MUST end in a sep
+    url = attributes.get('url')
+    if not url:
+      output.Error('Directory entries must have both "path" and "url" '
+                  'attributes')
+      return
+    url = URL.Canonicalize(url)
+    if not url.endswith('/'):
+      url = url + '/'
+    if not url.startswith(base_url):
+      url = urlparse.urljoin(base_url, url)
+      if not url.startswith(base_url):
+        output.Error('The directory URL "%s" is not relative to the '
+                    'base_url: %s' % (url, base_url))
+        return
+
+    # Prep the default file -- it MUST be just a filename
+    file = attributes.get('default_file')
+    if file:
+      file = encoder.MaybeNarrowPath(file)
+      if os.sep in file:
+        output.Error('The default_file "%s" can not include path information.'
+                     % file)
+        file = None
+
+    self._path         = path
+    self._url          = url
+    self._default_file = file
+    if file:
+      output.Log('Input: From DIRECTORY "%s" (%s) with default file "%s"'
+                 % (path, url, file), 2)
+    else:
+      output.Log('Input: From DIRECTORY "%s" (%s) with no default file'
+                 % (path, url), 2)
+  #end def __init__
+
+  def ProduceURLs(self, consumer):
+    """ Produces URLs from our data source, hands them in to the consumer. """
+    if not self._path:
+      return
+
+    root_path = self._path
+    root_URL  = self._url
+    root_file = self._default_file
+
+    def PerFile(dirpath, name):
+      """
+      Called once per file.
+      Note that 'name' will occasionally be None -- for a directory itself
+      """
+      # Pull a timestamp
+      url           = URL()
+      isdir         = False
+      try:
+        if name:
+          path      = os.path.join(dirpath, name)
+        else:
+          path      = dirpath
+        isdir       = os.path.isdir(path)
+        time        = None
+        if isdir and root_file:
+          file      = os.path.join(path, root_file)
+          try:
+            time    = os.stat(file)[stat.ST_MTIME];
+          except OSError:
+            pass
+        if not time:
+          time      = os.stat(path)[stat.ST_MTIME];
+        url.lastmod = TimestampISO8601(time)
+      except OSError:
+        pass
+      except ValueError:
+        pass
+
+      # Build a URL
+      middle        = dirpath[len(root_path):]
+      if os.sep != '/':
+        middle = middle.replace(os.sep, '/')
+      if middle:
+        middle      = middle + '/'
+      if name:
+        middle      = middle + name
+        if isdir:
+          middle    = middle + '/'
+      url.TrySetAttribute('loc', root_URL + encoder.WidenText(middle, None))
+
+      # Suppress default files.  (All the way down here so we can log it.)
+      if name and (root_file == name):
+        url.Log(prefix='IGNORED (default file)', level=2)
+        return
+
+      consumer(url, False)
+    #end def PerFile
+
+    def PerDirectory(ignore, dirpath, namelist):
+      """
+      Called once per directory with a list of all the contained files/dirs.
+      """
+      ignore = ignore  # Avoid warnings of an unused parameter
+
+      if not dirpath.startswith(root_path):
+        output.Warn('Unable to decide what the root path is for directory: '
+                    '%s' % dirpath)
+        return
+
+      for name in namelist:
+        PerFile(dirpath, name)
+    #end def PerDirectory
+
+    output.Log('Walking DIRECTORY "%s"' % self._path, 1)
+    PerFile(self._path, None)
+    os.path.walk(self._path, PerDirectory, None)
+  #end def ProduceURLs
+#end class InputDirectory
+
+
+class InputAccessLog:
+  """
+  Each Input class knows how to yield a set of URLs from a data source.
+
+  This one handles access logs.  It's non-trivial in that we want to
+  auto-detect log files in the Common Logfile Format (as used by Apache,
+  for instance) and the Extended Log File Format (as used by IIS, for
+  instance).
+  """
+
+  def __init__(self, attributes):
+    self._path         = None               # The file path
+    self._encoding     = None               # Encoding of that file
+    self._is_elf       = False              # Extended Log File Format?
+    self._is_clf       = False              # Common Logfile Format?
+    self._elf_status   = -1                 # ELF field: '200'
+    self._elf_method   = -1                 # ELF field: 'HEAD'
+    self._elf_uri      = -1                 # ELF field: '/foo?bar=1'
+    self._elf_urifrag1 = -1                 # ELF field: '/foo'
+    self._elf_urifrag2 = -1                 # ELF field: 'bar=1'
+
+    if not ValidateAttributes('ACCESSLOG', attributes, ('path', 'encoding')):
+      return
+
+    self._path      = attributes.get('path')
+    self._encoding  = attributes.get('encoding', ENC_UTF8)
+    if self._path:
+      self._path    = encoder.MaybeNarrowPath(self._path)
+      if os.path.isfile(self._path):
+        output.Log('Input: From ACCESSLOG "%s"' % self._path, 2)
+      else:
+        output.Error('Can not locate file: %s' % self._path)
+        self._path = None
+    else:
+      output.Error('Accesslog entries must have a "path" attribute.')
+  #end def __init__
+
+  def RecognizeELFLine(self, line):
+    """ Recognize the Fields directive that heads an ELF file """
+    if not line.startswith('#Fields:'):
+      return False
+    fields = line.split(' ')
+    del fields[0]
+    for i in range(0, len(fields)):
+      field = fields[i].strip()
+      if field == 'sc-status':
+        self._elf_status   = i
+      elif field == 'cs-method':
+        self._elf_method   = i
+      elif field == 'cs-uri':
+        self._elf_uri      = i
+      elif field == 'cs-uri-stem':
+        self._elf_urifrag1 = i
+      elif field == 'cs-uri-query':
+        self._elf_urifrag2 = i
+    output.Log('Recognized an Extended Log File Format file.', 2)
+    return True
+  #end def RecognizeELFLine
+
+  def GetELFLine(self, line):
+    """ Fetch the requested URL from an ELF line """
+    fields = line.split(' ')
+    count  = len(fields)
+
+    # Verify status was Ok
+    if self._elf_status >= 0:
+      if self._elf_status >= count:
+        return None
+      if not fields[self._elf_status].strip() == '200':
+        return None
+
+    # Verify method was HEAD or GET
+    if self._elf_method >= 0:
+      if self._elf_method >= count:
+        return None
+      if not fields[self._elf_method].strip() in ('HEAD', 'GET'):
+        return None
+
+    # Pull the full URL if we can
+    if self._elf_uri >= 0:
+      if self._elf_uri >= count:
+        return None
+      url = fields[self._elf_uri].strip()
+      if url != '-':
+        return url
+
+    # Put together a fragmentary URL
+    if self._elf_urifrag1 >= 0:
+      if self._elf_urifrag1 >= count or self._elf_urifrag2 >= count:
+        return None
+      urlfrag1 = fields[self._elf_urifrag1].strip()
+      urlfrag2 = None
+      if self._elf_urifrag2 >= 0:
+        urlfrag2 = fields[self._elf_urifrag2]
+      if urlfrag1 and (urlfrag1 != '-'):
+        if urlfrag2 and (urlfrag2 != '-'):
+          urlfrag1 = urlfrag1 + '?' + urlfrag2
+        return urlfrag1
+
+    return None
+  #end def GetELFLine
+
+  def RecognizeCLFLine(self, line):
+    """ Try to tokenize a logfile line according to CLF pattern and see if
+    it works. """
+    match = ACCESSLOG_CLF_PATTERN.match(line)
+    recognize = match and (match.group(1) in ('HEAD', 'GET'))
+    if recognize:
+      output.Log('Recognized a Common Logfile Format file.', 2)
+    return recognize
+  #end def RecognizeCLFLine
+
+  def GetCLFLine(self, line):
+    """ Fetch the requested URL from a CLF line """
+    match = ACCESSLOG_CLF_PATTERN.match(line)
+    if match:
+      request = match.group(1)
+      if request in ('HEAD', 'GET'):
+        return match.group(2)
+    return None
+  #end def GetCLFLine
+
+  def ProduceURLs(self, consumer):
+    """ Produces URLs from our data source, hands them in to the consumer. """
+
+    # Open the file
+    (frame, file) = OpenFileForRead(self._path, 'ACCESSLOG')
+    if not file:
+      return
+
+    # Iterate lines
+    for line in file.readlines():
+      if self._encoding:
+        line = encoder.WidenText(line, self._encoding)
+      line = line.strip()
+
+      # If we don't know the format yet, try them both
+      if (not self._is_clf) and (not self._is_elf):
+        self._is_elf = self.RecognizeELFLine(line)
+        self._is_clf = self.RecognizeCLFLine(line)
+
+      # Digest the line
+      match = None
+      if self._is_elf:
+        match = self.GetELFLine(line)
+      elif self._is_clf:
+        match = self.GetCLFLine(line)
+      if not match:
+        continue
+
+      # Pass it on
+      url = URL()
+      url.TrySetAttribute('loc', match)
+      consumer(url, True)
+
+    file.close()
+    if frame:
+      frame.close()
+  #end def ProduceURLs
+#end class InputAccessLog
+
+
+class InputSitemap(xml.sax.handler.ContentHandler):
+
+  """
+  Each Input class knows how to yield a set of URLs from a data source.
+
+  This one handles Sitemap files and Sitemap index files.  For the sake
+  of simplicity in design (and simplicity in interfacing with the SAX
+  package), we do not handle these at the same time, recursively.  Instead
+  we read an index file completely and make a list of Sitemap files, then
+  go back and process each Sitemap.
+  """
+
+  class _ContextBase(object):
+    
+    """Base class for context handlers in our SAX processing.  A context
+    handler is a class that is responsible for understanding one level of
+    depth in the XML schema.  The class knows what sub-tags are allowed,
+    and doing any processing specific for the tag we're in.
+
+    This base class is the API filled in by specific context handlers,
+    all defined below.
+    """
+    
+    def __init__(self, subtags):
+      """Initialize with a sequence of the sub-tags that would be valid in
+      this context."""
+      self._allowed_tags = subtags          # Sequence of sub-tags we can have
+      self._last_tag     = None             # Most recent seen sub-tag
+    #end def __init__
+
+    def AcceptTag(self, tag):
+      """Returns True iff opening a sub-tag is valid in this context."""
+      valid = tag in self._allowed_tags
+      if valid:
+        self._last_tag = tag
+      else:
+        self._last_tag = None
+      return valid
+    #end def AcceptTag
+
+    def AcceptText(self, text):
+      """Returns True iff a blurb of text is valid in this context."""
+      return False
+    #end def AcceptText
+
+    def Open(self):
+      """The context is opening.  Do initialization."""
+      pass
+    #end def Open
+
+    def Close(self):
+      """The context is closing.  Return our result, if any."""
+      pass
+    #end def Close
+
+    def Return(self, result):
+      """We're returning to this context after handling a sub-tag.  This
+      method is called with the result data from the sub-tag that just
+      closed.  Here in _ContextBase, if we ever see a result it means
+      the derived child class forgot to override this method."""
+      if result:
+        raise NotImplementedError
+    #end def Return
+  #end class _ContextBase
+
+  class _ContextUrlSet(_ContextBase):
+    
+    """Context handler for the document node in a Sitemap."""
+    
+    def __init__(self):
+      InputSitemap._ContextBase.__init__(self, ('url',))
+    #end def __init__
+  #end class _ContextUrlSet
+
+  class _ContextUrl(_ContextBase):
+    
+    """Context handler for a URL node in a Sitemap."""
+    
+    def __init__(self, consumer):
+      """Initialize this context handler with the callable consumer that
+      wants our URLs."""
+      InputSitemap._ContextBase.__init__(self, URL.__slots__)
+      self._url          = None            # The URL object we're building
+      self._consumer     = consumer        # Who wants to consume it
+    #end def __init__
+
+    def Open(self):
+      """Initialize the URL."""
+      assert not self._url
+      self._url = URL()
+    #end def Open
+
+    def Close(self):
+      """Pass the URL to the consumer and reset it to None."""
+      assert self._url
+      self._consumer(self._url, False)
+      self._url = None
+    #end def Close
+  
+    def Return(self, result):
+      """A value context has closed, absorb the data it gave us."""
+      assert self._url
+      if result:
+        self._url.TrySetAttribute(self._last_tag, result)
+    #end def Return
+  #end class _ContextUrl
+
+  class _ContextSitemapIndex(_ContextBase):
+    
+    """Context handler for the document node in an index file."""
+    
+    def __init__(self):
+      InputSitemap._ContextBase.__init__(self, ('sitemap',))
+      self._loclist = []                    # List of accumulated Sitemap URLs
+    #end def __init__
+
+    def Open(self):
+      """Just a quick verify of state."""
+      assert not self._loclist
+    #end def Open
+
+    def Close(self):
+      """Return our list of accumulated URLs."""
+      if self._loclist:
+        temp = self._loclist
+        self._loclist = []
+        return temp
+    #end def Close
+  
+    def Return(self, result):
+      """Getting a new loc URL, add it to the collection."""
+      if result:
+        self._loclist.append(result)
+    #end def Return
+  #end class _ContextSitemapIndex
+
+  class _ContextSitemap(_ContextBase):
+    
+    """Context handler for a Sitemap entry in an index file."""
+    
+    def __init__(self):
+      InputSitemap._ContextBase.__init__(self, ('loc', 'lastmod'))
+      self._loc = None                      # The URL to the Sitemap
+    #end def __init__
+
+    def Open(self):
+      """Just a quick verify of state."""
+      assert not self._loc
+    #end def Open
+
+    def Close(self):
+      """Return our URL to our parent."""
+      if self._loc:
+        temp = self._loc
+        self._loc = None
+        return temp
+      output.Warn('In the Sitemap index file, a "sitemap" entry had no "loc".')
+    #end def Close
+
+    def Return(self, result):
+      """A value has closed.  If it was a 'loc', absorb it."""
+      if result and (self._last_tag == 'loc'):
+        self._loc = result
+    #end def Return
+  #end class _ContextSitemap
+
+  class _ContextValue(_ContextBase):
+    
+    """Context handler for a single value.  We return just the value.  The
+    higher level context has to remember what tag led into us."""
+    
+    def __init__(self):
+      InputSitemap._ContextBase.__init__(self, ())
+      self._text        = None
+    #end def __init__
+
+    def AcceptText(self, text):
+      """Allow all text, adding it to our buffer."""
+      if self._text:
+        self._text = self._text + text
+      else:
+        self._text = text
+      return True
+    #end def AcceptText
+
+    def Open(self):
+      """Initialize our buffer."""
+      self._text = None
+    #end def Open
+
+    def Close(self):
+      """Return what's in our buffer."""
+      text = self._text
+      self._text = None
+      if text:
+        text = text.strip()
+      return text
+    #end def Close
+  #end class _ContextValue
+
+  def __init__(self, attributes):
+    """Initialize with a dictionary of attributes from our entry in the
+    config file."""
+    xml.sax.handler.ContentHandler.__init__(self)
+    self._pathlist      = None              # A list of files
+    self._current       = -1                # Current context in _contexts
+    self._contexts      = None              # The stack of contexts we allow
+    self._contexts_idx  = None              # ...contexts for index files
+    self._contexts_stm  = None              # ...contexts for Sitemap files
+
+    if not ValidateAttributes('SITEMAP', attributes, ['path']):
+      return
+    
+    # Init the first file path
+    path = attributes.get('path')
+    if path:
+      path = encoder.MaybeNarrowPath(path)
+      if os.path.isfile(path):
+        output.Log('Input: From SITEMAP "%s"' % path, 2)
+        self._pathlist = [path]
+      else:
+        output.Error('Can not locate file "%s"' % path)
+    else:
+      output.Error('Sitemap entries must have a "path" attribute.')
+  #end def __init__
+
+  def ProduceURLs(self, consumer):
+    """In general: Produces URLs from our data source, hand them to the
+    callable consumer.
+
+    In specific: Iterate over our list of paths and delegate the actual
+    processing to helper methods.  This is a complexity no other data source
+    needs to suffer.  We are unique in that we can have files that tell us
+    to bring in other files.
+
+    Note the decision to allow an index file or not is made in this method.
+    If we call our parser with (self._contexts == None) the parser will
+    grab whichever context stack can handle the file.  IE: index is allowed.
+    If instead we set (self._contexts = ...) before parsing, the parser
+    will only use the stack we specify.  IE: index not allowed.
+    """
+    # Set up two stacks of contexts
+    self._contexts_idx = [InputSitemap._ContextSitemapIndex(),
+                          InputSitemap._ContextSitemap(),
+                          InputSitemap._ContextValue()]
+    
+    self._contexts_stm = [InputSitemap._ContextUrlSet(),
+                          InputSitemap._ContextUrl(consumer),
+                          InputSitemap._ContextValue()]
+
+    # Process the first file
+    assert self._pathlist
+    path = self._pathlist[0]
+    self._contexts = None                # We allow an index file here
+    self._ProcessFile(path)
+
+    # Iterate over remaining files
+    self._contexts = self._contexts_stm  # No index files allowed
+    for path in self._pathlist[1:]:
+      self._ProcessFile(path)
+  #end def ProduceURLs
+
+  def _ProcessFile(self, path):
+    """Do per-file reading/parsing/consuming for the file path passed in."""
+    assert path
+    
+    # Open our file
+    (frame, file) = OpenFileForRead(path, 'SITEMAP')
+    if not file:
+      return
+
+    # Rev up the SAX engine
+    try:
+      self._current = -1
+      xml.sax.parse(file, self)
+    except SchemaError:
+      output.Error('An error in file "%s" made us abort reading the Sitemap.'
+                   % path)
+    except IOError:
+      output.Error('Cannot read from file "%s"' % path)
+    except xml.sax._exceptions.SAXParseException, e:
+      output.Error('XML error in the file "%s" (line %d, column %d): %s' %
+                   (path, e._linenum, e._colnum, e.getMessage()))
+
+    # Clean up
+    file.close()
+    if frame:
+      frame.close()
+  #end def _ProcessFile
+
+  def _MungeLocationListIntoFiles(self, urllist):
+    """Given a list of URLs, munge them into our self._pathlist property.
+    We do this by assuming all the files live in the same directory as
+    the first file in the existing pathlist.  That is, we assume a
+    Sitemap index points to Sitemaps only in the same directory.  This
+    is not true in general, but will be true for any output produced
+    by this script.
+    """
+    assert self._pathlist
+    path = self._pathlist[0]
+    path = os.path.normpath(path)
+    dir  = os.path.dirname(path)
+    wide = False
+    if type(path) == types.UnicodeType:
+      wide = True
+
+    for url in urllist:
+      url = URL.Canonicalize(url)
+      output.Log('Index points to Sitemap file at: %s' % url, 2)
+      (scheme, netloc, path, query, frag) = urlparse.urlsplit(url)
+      file = os.path.basename(path)
+      file = urllib.unquote(file)
+      if wide:
+        file = encoder.WidenText(file)
+      if dir:
+        file = dir + os.sep + file
+      if file:
+        self._pathlist.append(file)
+        output.Log('Will attempt to read Sitemap file: %s' % file, 1)
+  #end def _MungeLocationListIntoFiles
+
+  def startElement(self, tag, attributes):
+    """SAX processing, called per node in the config stream.
+    As long as the new tag is legal in our current context, this
+    becomes an Open call on one context deeper.
+    """
+    # If this is the document node, we may have to look for a context stack
+    if (self._current < 0) and not self._contexts:
+      assert self._contexts_idx and self._contexts_stm
+      if tag == 'urlset':
+        self._contexts = self._contexts_stm
+      elif tag == 'sitemapindex':
+        self._contexts = self._contexts_idx
+        output.Log('File is a Sitemap index.', 2)
+      else:
+        output.Error('The document appears to be neither a Sitemap nor a '
+                     'Sitemap index.')
+        raise SchemaError
+
+    # Display a kinder error on a common mistake
+    if (self._current < 0) and (self._contexts == self._contexts_stm) and (
+      tag == 'sitemapindex'):
+      output.Error('A Sitemap index can not refer to another Sitemap index.')
+      raise SchemaError
+
+    # Verify no unexpected attributes
+    if attributes:
+      text = ''
+      for attr in attributes.keys():
+        # The document node will probably have namespaces
+        if self._current < 0:
+          if attr.find('xmlns') >= 0:
+            continue
+          if attr.find('xsi') >= 0:
+            continue
+        if text:
+          text = text + ', '
+        text = text + attr
+      if text:
+        output.Warn('Did not expect any attributes on any tag, instead tag '
+                     '"%s" had attributes: %s' % (tag, text))
+
+    # Switch contexts
+    if (self._current < 0) or (self._contexts[self._current].AcceptTag(tag)):
+      self._current = self._current + 1
+      assert self._current < len(self._contexts)
+      self._contexts[self._current].Open()
+    else:
+      output.Error('Can not accept tag "%s" where it appears.' % tag)
+      raise SchemaError
+  #end def startElement
+
+  def endElement(self, tag):
+    """SAX processing, called per node in the config stream.
+    This becomes a call to Close on one context followed by a call
+    to Return on the previous.
+    """
+    tag = tag  # Avoid warning on unused argument
+    assert self._current >= 0
+    retval = self._contexts[self._current].Close()
+    self._current = self._current - 1
+    if self._current >= 0:
+      self._contexts[self._current].Return(retval)
+    elif retval and (self._contexts == self._contexts_idx):
+      self._MungeLocationListIntoFiles(retval)
+  #end def endElement
+
+  def characters(self, text):
+    """SAX processing, called when text values are read.  Important to
+    note that one single text value may be split across multiple calls
+    of this method.
+    """
+    if (self._current < 0) or (
+      not self._contexts[self._current].AcceptText(text)):
+      if text.strip():
+        output.Error('Can not accept text "%s" where it appears.' % text)
+        raise SchemaError
+  #end def characters
+#end class InputSitemap
+
+
+class FilePathGenerator:
+  """
+  This class generates filenames in a series, upon request.
+  You can request any iteration number at any time, you don't
+  have to go in order.
+
+  Example of iterations for '/path/foo.xml.gz':
+    0           --> /path/foo.xml.gz
+    1           --> /path/foo1.xml.gz
+    2           --> /path/foo2.xml.gz
+    _index.xml  --> /path/foo_index.xml
+  """
+
+  def __init__(self):
+    self.is_gzip     = False                 # Is this a  GZIP file?
+
+    self._path       = None                  # '/path/'
+    self._prefix     = None                  # 'foo'
+    self._suffix     = None                  # '.xml.gz'
+  #end def __init__
+
+  def Preload(self, path):
+    """ Splits up a path into forms ready for recombination. """
+    path = encoder.MaybeNarrowPath(path)
+
+    # Get down to a base name
+    path = os.path.normpath(path)
+    base = os.path.basename(path).lower()
+    if not base:
+      output.Error('Couldn\'t parse the file path: %s' % path)
+      return False
+    lenbase = len(base)
+
+    # Recognize extension
+    lensuffix = 0
+    compare_suffix = ['.xml', '.xml.gz', '.gz']
+    for suffix in compare_suffix:
+      if base.endswith(suffix):
+        lensuffix = len(suffix)
+        break
+    if not lensuffix:
+      output.Error('The path "%s" doesn\'t end in a supported file '
+                   'extension.' % path)
+      return False
+    self.is_gzip = suffix.endswith('.gz')
+
+    # Split the original path
+    lenpath = len(path)
+    self._path   = path[:lenpath-lenbase]
+    self._prefix = path[lenpath-lenbase:lenpath-lensuffix]
+    self._suffix = path[lenpath-lensuffix:]
+
+    return True
+  #end def Preload
+
+  def GeneratePath(self, instance):
+    """ Generates the iterations, as described above. """
+    prefix = self._path + self._prefix
+    if type(instance) == types.IntType:
+      if instance:
+        return '%s%d%s' % (prefix, instance, self._suffix)
+      return prefix + self._suffix
+    return prefix + instance
+  #end def GeneratePath
+
+  def GenerateURL(self, instance, root_url):
+    """ Generates iterations, but as a URL instead of a path. """
+    prefix = root_url + self._prefix
+    retval = None
+    if type(instance) == types.IntType:
+      if instance:
+        retval = '%s%d%s' % (prefix, instance, self._suffix)
+      else:
+        retval = prefix + self._suffix
+    else:
+      retval = prefix + instance
+    return URL.Canonicalize(retval)
+  #end def GenerateURL
+
+  def GenerateWildURL(self, root_url):
+    """ Generates a wildcard that should match all our iterations """
+    prefix = URL.Canonicalize(root_url + self._prefix)
+    temp   = URL.Canonicalize(prefix + self._suffix)
+    suffix = temp[len(prefix):]
+    return prefix + '*' + suffix
+  #end def GenerateURL
+#end class FilePathGenerator
+
+
+class PerURLStatistics:
+  """ Keep track of some simple per-URL statistics, like file extension. """
+
+  def __init__(self):
+    self._extensions  = {}                  # Count of extension instances
+  #end def __init__
+
+  def Consume(self, url):
+    """ Log some stats for the URL.  At the moment, that means extension. """
+    if url and url.loc:
+      (scheme, netloc, path, query, frag) = urlparse.urlsplit(url.loc)
+      if not path:
+        return
+
+      # Recognize directories
+      if path.endswith('/'):
+        if self._extensions.has_key('/'):
+          self._extensions['/'] = self._extensions['/'] + 1
+        else:
+          self._extensions['/'] = 1
+        return
+
+      # Strip to a filename
+      i = path.rfind('/')
+      if i >= 0:
+        assert i < len(path)
+        path = path[i:]
+
+      # Find extension
+      i = path.rfind('.')
+      if i > 0:
+        assert i < len(path)
+        ext = path[i:].lower()
+        if self._extensions.has_key(ext):
+          self._extensions[ext] = self._extensions[ext] + 1
+        else:
+          self._extensions[ext] = 1
+      else:
+        if self._extensions.has_key('(no extension)'):
+          self._extensions['(no extension)'] = self._extensions[
+            '(no extension)'] + 1
+        else:
+          self._extensions['(no extension)'] = 1
+  #end def Consume
+
+  def Log(self):
+    """ Dump out stats to the output. """
+    if len(self._extensions):
+      output.Log('Count of file extensions on URLs:', 1)
+      set = self._extensions.keys()
+      set.sort()
+      for ext in set:
+        output.Log(' %7d  %s' % (self._extensions[ext], ext), 1)
+  #end def Log
+
+class Sitemap(xml.sax.handler.ContentHandler):
+  """
+  This is the big workhorse class that processes your inputs and spits
+  out sitemap files.  It is built as a SAX handler for set up purposes.
+  That is, it processes an XML stream to bring itself up.
+  """
+
+  def __init__(self, suppress_notify):
+    xml.sax.handler.ContentHandler.__init__(self)
+    self._filters      = []                  # Filter objects
+    self._inputs       = []                  # Input objects
+    self._urls         = {}                  # Maps URLs to count of dups
+    self._set          = []                  # Current set of URLs
+    self._filegen      = None                # Path generator for output files
+    self._wildurl1     = None                # Sitemap URLs to filter out
+    self._wildurl2     = None                # Sitemap URLs to filter out
+    self._sitemaps     = 0                   # Number of output files
+    # We init _dup_max to 2 so the default priority is 0.5 instead of 1.0
+    self._dup_max      = 2                   # Max number of duplicate URLs
+    self._stat         = PerURLStatistics()  # Some simple stats
+    self._in_site      = False               # SAX: are we in a Site node?
+    self._in_Site_ever = False               # SAX: were we ever in a Site?
+
+    self._default_enc  = None                # Best encoding to try on URLs
+    self._base_url     = None                # Prefix to all valid URLs
+    self._store_into   = None                # Output filepath
+    self._suppress     = suppress_notify     # Suppress notify of servers
+  #end def __init__
+
+  def ValidateBasicConfig(self):
+    """ Verifies (and cleans up) the basic user-configurable options. """
+    all_good = True
+
+    if self._default_enc:
+      encoder.SetUserEncoding(self._default_enc)
+
+    # Canonicalize the base_url
+    if all_good and not self._base_url:
+      output.Error('A site needs a "base_url" attribute.')
+      all_good = False
+    if all_good and not URL.IsAbsolute(self._base_url):
+        output.Error('The "base_url" must be absolute, not relative: %s' %
+                     self._base_url)
+        all_good = False
+    if all_good:
+      self._base_url = URL.Canonicalize(self._base_url)
+      if not self._base_url.endswith('/'):
+        self._base_url = self._base_url + '/'
+      output.Log('BaseURL is set to: %s' % self._base_url, 2)
+
+    # Load store_into into a generator
+    if all_good:
+      if self._store_into:
+        self._filegen = FilePathGenerator()
+        if not self._filegen.Preload(self._store_into):
+          all_good = False
+      else:
+        output.Error('A site needs a "store_into" attribute.')
+        all_good = False
+
+    # Ask the generator for patterns on what its output will look like
+    if all_good:
+      self._wildurl1 = self._filegen.GenerateWildURL(self._base_url)
+      self._wildurl2 = self._filegen.GenerateURL(SITEINDEX_SUFFIX,
+                                                 self._base_url)
+
+    # Unify various forms of False
+    if all_good:
+      if self._suppress:
+        if (type(self._suppress) == types.StringType) or (type(self._suppress)
+                                 == types.UnicodeType):
+          if (self._suppress == '0') or (self._suppress.lower() == 'false'):
+            self._suppress = False
+
+    # Done
+    if not all_good:
+      output.Log('See "example_config.xml" for more information.', 0)
+    return all_good
+  #end def ValidateBasicConfig
+
+  def Generate(self):
+    """ Run over all the Inputs and ask them to Produce """
+    # Run the inputs
+    for input in self._inputs:
+      input.ProduceURLs(self.ConsumeURL)
+
+    # Do last flushes
+    if len(self._set):
+      self.FlushSet()
+    if not self._sitemaps:
+      output.Warn('No URLs were recorded, writing ...
 
[truncated message content] | 
| 
      
      
      From: <rom...@us...> - 2010-04-05 09:08:12
      
     | 
| Revision: 1834
          http://pygccxml.svn.sourceforge.net/pygccxml/?rev=1834&view=rev
Author:   roman_yakovenko
Date:     2010-04-05 09:08:06 +0000 (Mon, 05 Apr 2010)
Log Message:
-----------
adding disqus
Modified Paths:
--------------
    sphinx/__templates_www/layout.html
Added Paths:
-----------
    sphinx/__templates_www/disqus_any.html
    sphinx/__templates_www/disqus_before_body_ends.html
Added: sphinx/__templates_www/disqus_any.html
===================================================================
--- sphinx/__templates_www/disqus_any.html	                        (rev 0)
+++ sphinx/__templates_www/disqus_any.html	2010-04-05 09:08:06 UTC (rev 1834)
@@ -0,0 +1,14 @@
+<div id="disqus_thread"></div>
+<script type="text/javascript">
+  /**
+    * var disqus_identifier; [Optional but recommended: Define a unique identifier (e.g. post id or slug) for this thread] 
+    */
+  (function() {
+   var dsq = document.createElement('script'); dsq.type = 'text/javascript'; dsq.async = true;
+   dsq.src = 'http://ccpythonlanguagebinding.disqus.com/embed.js';
+   (document.getElementsByTagName('head')[0] || document.getElementsByTagName('body')[0]).appendChild(dsq);
+  })();
+</script>
+<noscript>Please enable JavaScript to view the <a href="http://sourceforge.net/%3Ca%20href="http://disqus.com/?ref_noscript=ccpythonlanguagebinding"%3Ecomments" rel="nofollow">http://disqus.com/?ref_noscript=ccpythonlanguagebinding">comments powered by Disqus.</a></noscript>
+<a href="http://sourceforge.net/%3Ca%20href="http://disqus.com/" rel="nofollow">http://disqus.com" class="dsq-brlink">blog comments powered by <span class="logo-disqus">Disqus</span></a>
+
Added: sphinx/__templates_www/disqus_before_body_ends.html
===================================================================
--- sphinx/__templates_www/disqus_before_body_ends.html	                        (rev 0)
+++ sphinx/__templates_www/disqus_before_body_ends.html	2010-04-05 09:08:06 UTC (rev 1834)
@@ -0,0 +1,14 @@
+<script type="text/javascript">
+//<![CDATA[
+(function() {
+	var links = document.getElementsByTagName('a');
+	var query = '?';
+	for(var i = 0; i < links.length; i++) {
+	if(links[i].href.indexOf('#disqus_thread') >= 0) {
+		query += 'url' + i + '=' + encodeURIComponent(links[i].href) + '&';
+	}
+	}
+	document.write('<script charset="utf-8" type="text/javascript" src="http://sourceforge.net/%3Ca%20href="http://disqus.com/forums/ccpythonlanguagebinding/get_num_replies.js'" rel="nofollow">http://disqus.com/forums/ccpythonlanguagebinding/get_num_replies.js' + query + '"></' + 'script>');
+})();
+//]]>
+</script>
Modified: sphinx/__templates_www/layout.html
===================================================================
--- sphinx/__templates_www/layout.html	2010-04-03 09:59:41 UTC (rev 1833)
+++ sphinx/__templates_www/layout.html	2010-04-05 09:08:06 UTC (rev 1834)
@@ -26,7 +26,9 @@
         <div class="bodywrapper">
       {%- endif %}{% endif %}
           <div class="body">
-            {% block body %} {% endblock %} 
+            {% block body %} {% endblock %}
+            {% include "disqus_any.html" %}
+            {% include "disqus_before_body_ends.html" %}
             <hr width=60%>
             {% include "bottom_ad_unit.html" %}
             {% include "in_text_ads.html" %}
This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site.
 | 
| 
      
      
      From: <rom...@us...> - 2010-04-03 09:59:47
      
     | 
| Revision: 1833
          http://pygccxml.svn.sourceforge.net/pygccxml/?rev=1833&view=rev
Author:   roman_yakovenko
Date:     2010-04-03 09:59:41 +0000 (Sat, 03 Apr 2010)
Log Message:
-----------
rolling back protected variables changes
Modified Paths:
--------------
    pyplusplus_dev/pyplusplus/code_creators/member_variable.py
    pyplusplus_dev/pyplusplus/decl_wrappers/class_wrapper.py
    pyplusplus_dev/pyplusplus/decl_wrappers/variable_wrapper.py
    pyplusplus_dev/pyplusplus/messages/warnings_.py
    pyplusplus_dev/unittests/test_all.py
Removed Paths:
-------------
    pyplusplus_dev/unittests/data/member_variables_protected_to_be_exported.cpp
    pyplusplus_dev/unittests/data/member_variables_protected_to_be_exported.hpp
    pyplusplus_dev/unittests/member_variables_protected_tester.py
Modified: pyplusplus_dev/pyplusplus/code_creators/member_variable.py
===================================================================
--- pyplusplus_dev/pyplusplus/code_creators/member_variable.py	2010-03-25 21:12:47 UTC (rev 1832)
+++ pyplusplus_dev/pyplusplus/code_creators/member_variable.py	2010-04-03 09:59:41 UTC (rev 1833)
@@ -437,39 +437,8 @@
     def __init__(self, variable ):
         code_creator.code_creator_t.__init__( self )
         declaration_based.declaration_based_t.__init__( self, declaration=variable)
-        self.__is_protected = bool( variable.access_type == declarations.ACCESS_TYPES.PROTECTED )
 
     @property
-    def public_accessor_name(self):
-        return "pyplusplus_%s_accessor" % self.declaration.name
-
-    def generate_public_accessor( self ):
-        tmpl = os.linesep.join([
-            "%(static)s%(const_item_type)s%(item_type)s* %(accessor_name)s()%(const_function)s{"
-          , "    return %(name)s;"
-          , "}"
-        ])
-        constness = ''
-        const_function = ''
-        if declarations.is_const( self.declaration.type ):
-            constness = 'const '
-            const_function = ' const'
-
-        static = ''
-        if self.declaration.type_qualifiers.has_static:
-            static = 'static '
-            const_function = ''
-
-        return tmpl % {
-                'static' : static
-              , 'const_item_type' : constness
-              , 'item_type' : declarations.array_item_type( self.declaration.type ).decl_string
-              , 'accessor_name': self.public_accessor_name
-              , 'name' : self.declaration.name
-              , 'const_function' : const_function
-        }
-
-    @property
     def wrapper_type( self ):
         tmpl = "%(namespace)s::%(constness)sarray_1_t< %(item_type)s, %(array_size)d>"
 
@@ -486,10 +455,7 @@
 
     @property
     def wrapped_class_type( self ):
-        if self.__is_protected:
-            wrapped_cls_type = declarations.dummy_type_t( self.parent.full_name )
-        else:
-            wrapped_cls_type = declarations.declarated_t( self.declaration.parent )
+        wrapped_cls_type = declarations.declarated_t( self.declaration.parent )
         if declarations.is_const( self.declaration.type ):
             wrapped_cls_type = declarations.const_t( wrapped_cls_type )
         return declarations.reference_t( wrapped_cls_type )
@@ -511,40 +477,26 @@
     def wrapper_creator_full_name(self):
         return '::'.join( [self.parent.full_name, self.wrapper_creator_name] )
 
-    def _create_impl( self ):
-        result = []
-        if self.__is_protected:
-            result.append( self.generate_public_accessor() )
-            result.append( '' )
-
+    def _create_impl( self ):        
         tmpl = [ "static %(wrapper_type)s" ]
         if self.declaration.type_qualifiers.has_static:
             tmpl.append( "%(wrapper_creator_name)s(){" )
-            if self.__is_protected:
-                tmpl.append( self.indent( "return %(wrapper_type)s( %(wrapped_class_type_only)s::%(public_accessor_name)s() );" ) )
-            else:
-                tmpl.append( self.indent( "return %(wrapper_type)s( %(parent_class_type)s::%(mem_var_ref)s );" ) )
+            tmpl.append( self.indent( "return %(wrapper_type)s( %(parent_class_type)s::%(mem_var_ref)s );" ) )
         else:
             tmpl.append( "%(wrapper_creator_name)s( %(wrapped_class_type)s inst ){" )
-            if self.__is_protected:
-                tmpl.append( self.indent( "return %(wrapper_type)s( inst.%(public_accessor_name)s() );" ) )
-            else:
-                tmpl.append( self.indent( "return %(wrapper_type)s( inst.%(mem_var_ref)s );" ) )
-
+            tmpl.append( self.indent( "return %(wrapper_type)s( inst.%(mem_var_ref)s );" ) )
         tmpl.append( "}" )
-
+        
         tmpl = os.linesep.join( tmpl )
+        
+        return tmpl % {
+                'wrapper_type' : self.wrapper_type.decl_string
+              , 'parent_class_type' : self.parent.declaration.partial_decl_string
+              , 'wrapper_creator_name' : self.wrapper_creator_name
+              , 'wrapped_class_type' : self.wrapped_class_type.decl_string
+              , 'mem_var_ref' : self.declaration.name
+            }
 
-        result.append( tmpl % { 'wrapper_type' : self.wrapper_type.decl_string
-                                , 'parent_class_type' : self.parent.declaration.partial_decl_string
-                                , 'wrapper_creator_name' : self.wrapper_creator_name
-                                , 'wrapped_class_type' : self.wrapped_class_type.decl_string
-                                , 'wrapped_class_type_only' : self.parent.full_name
-                                , 'mem_var_ref' : self.declaration.name
-                                , 'public_accessor_name' : self.public_accessor_name
-                        } )
-        return os.linesep.join( result )
-
     def _get_system_files_impl( self ):
         return [code_repository.array_1.file_name]
 
@@ -627,17 +579,13 @@
     def __init__(self, variable ):
         code_creator.code_creator_t.__init__( self )
         declaration_based.declaration_based_t.__init__( self, declaration=variable)
-        self.__is_protected = bool( variable.access_type == declarations.ACCESS_TYPES.PROTECTED )
 
     def _get_getter_full_name(self):
         return self.parent.full_name + '::' + 'get_' + self.declaration.name
     getter_full_name = property( _get_getter_full_name )
 
     def _get_class_inst_type( self ):
-        if self.__is_protected:
-            return declarations.dummy_type_t( self.parent.full_name )
-        else:
-            return declarations.declarated_t( self.declaration.parent )
+        return declarations.declarated_t( self.declaration.parent )
 
     def _get_exported_var_type( self ):
         type_ = declarations.remove_reference( self.declaration.type )
@@ -689,7 +637,7 @@
 
     def _create_impl(self):
         answer = []
-        cls_type = algorithm.create_identifier( self, self._get_class_inst_type().decl_string )
+        cls_type = algorithm.create_identifier( self, self.declaration.parent.decl_string )
 
         substitutions = dict( type=self._get_exported_var_type().decl_string
                               , class_type=cls_type
Modified: pyplusplus_dev/pyplusplus/decl_wrappers/class_wrapper.py
===================================================================
--- pyplusplus_dev/pyplusplus/decl_wrappers/class_wrapper.py	2010-03-25 21:12:47 UTC (rev 1832)
+++ pyplusplus_dev/pyplusplus/decl_wrappers/class_wrapper.py	2010-04-03 09:59:41 UTC (rev 1833)
@@ -429,8 +429,6 @@
         for member in self.protected_members:
             if isinstance( member, declarations.calldef_t ):
                 members.append( member )
-            elif isinstance( member, declarations.variable_t ) and variable_wrapper.variable_t.EXPOSE_PROTECTED_VARIABLES:
-                members.append( member )
             else:
                 pass
 
Modified: pyplusplus_dev/pyplusplus/decl_wrappers/variable_wrapper.py
===================================================================
--- pyplusplus_dev/pyplusplus/decl_wrappers/variable_wrapper.py	2010-03-25 21:12:47 UTC (rev 1832)
+++ pyplusplus_dev/pyplusplus/decl_wrappers/variable_wrapper.py	2010-04-03 09:59:41 UTC (rev 1833)
@@ -15,8 +15,6 @@
 class variable_t(decl_wrapper.decl_wrapper_t, declarations.variable_t):
     """defines a set of properties, that will instruct `Py++` how to expose the variable"""
 
-    EXPOSE_PROTECTED_VARIABLES = False
-
     def __init__(self, *arguments, **keywords):
         declarations.variable_t.__init__(self, *arguments, **keywords )
         decl_wrapper.decl_wrapper_t.__init__( self )
@@ -216,11 +214,7 @@
             #    return messages.W1061 % ( str( self ), str( cls ) )
         if isinstance( self.parent, declarations.class_t ):
             if self.access_type != declarations.ACCESS_TYPES.PUBLIC:
-                if self.access_type == declarations.ACCESS_TYPES.PRIVATE:
-                    return messages.W1039
-                else: #protected
-                    if not self.EXPOSE_PROTECTED_VARIABLES:
-                        return messages.W1039
+                return messages.W1039
         if declarations.is_array( type_ ):
             item_type = declarations.array_item_type( type_ )
             if declarations.is_pointer( item_type ):
@@ -243,6 +237,4 @@
             explanation.append( messages.W1026 % self.name )
         if declarations.is_array( self.type ):
             explanation.append( messages.W1027 % self.name)
-        if self.access_type == declarations.ACCESS_TYPES.PROTECTED:
-            explanation.append( messages.W1066 % self.name)
         return explanation
Modified: pyplusplus_dev/pyplusplus/messages/warnings_.py
===================================================================
--- pyplusplus_dev/pyplusplus/messages/warnings_.py	2010-03-25 21:12:47 UTC (rev 1832)
+++ pyplusplus_dev/pyplusplus/messages/warnings_.py	2010-04-03 09:59:41 UTC (rev 1833)
@@ -257,9 +257,6 @@
             'Use `wrapper_alias` property to change class wrapper alias value'
             'Other classes : %s' )
 
-
-W1066 = warning( '`Py++` will generate class wrapper - class contains "%s" - protected member variable' )
-
 warnings = globals()
 
 all_warning_msgs = []
Deleted: pyplusplus_dev/unittests/data/member_variables_protected_to_be_exported.cpp
===================================================================
--- pyplusplus_dev/unittests/data/member_variables_protected_to_be_exported.cpp	2010-03-25 21:12:47 UTC (rev 1832)
+++ pyplusplus_dev/unittests/data/member_variables_protected_to_be_exported.cpp	2010-04-03 09:59:41 UTC (rev 1833)
@@ -1,54 +0,0 @@
-// Copyright 2004-2008 Roman Yakovenko.
-// Distributed under the Boost Software License, Version 1.0. (See
-// accompanying file LICENSE_1_0.txt or copy at
-// http://www.boost.org/LICENSE_1_0.txt)
-
-#include "member_variables_protected_to_be_exported.hpp"
-
-namespace member_variables{
-
-const array_t::variable_t array_t::vars[] = { array_t::variable_t(), array_t::variable_t(), array_t::variable_t() };
-array_t::variable_t array_t::vars_nonconst[] = { array_t::variable_t(), array_t::variable_t(), array_t::variable_t() };
-/*
-int point::instance_count = 0;
-const point::color point::default_color = point::red;
-*/
-unsigned int get_a(const bit_fields_t& inst){
-    return inst.a;
-}
-
-void set_a( bit_fields_t& inst, unsigned int new_value ){
-    inst.a = new_value;
-}
-
-unsigned int get_b(const bit_fields_t& inst){
-    return inst.b;
-}
-/*
-namespace pointers{
-
-std::auto_ptr<tree_node_t> create_tree(){
-    std::auto_ptr<tree_node_t> root( new tree_node_t() );
-    root->data = new data_t();
-    root->data->value = 0;
-
-    root->left = new tree_node_t( root.get() );
-    root->left->data = new data_t();
-    root->left->data->value = 1;
-
-    return root;
-}
-
-}
-
-namespace statics{
-    std::string mem_var_str_t::class_name( "mem_var_str_t" );
-}
-
-
-namespace ctypes{
-    int xxx = 1997;
-    int* image_t::none_image = &xxx;
-}
-*/
-}
Deleted: pyplusplus_dev/unittests/data/member_variables_protected_to_be_exported.hpp
===================================================================
--- pyplusplus_dev/unittests/data/member_variables_protected_to_be_exported.hpp	2010-03-25 21:12:47 UTC (rev 1832)
+++ pyplusplus_dev/unittests/data/member_variables_protected_to_be_exported.hpp	2010-04-03 09:59:41 UTC (rev 1833)
@@ -1,199 +0,0 @@
-// Copyright 2004-2008 Roman Yakovenko.
-// Distributed under the Boost Software License, Version 1.0. (See
-// accompanying file LICENSE_1_0.txt or copy at
-// http://www.boost.org/LICENSE_1_0.txt)
-
-#ifndef __member_variables_protected_to_be_exported_hpp__
-#define __member_variables_protected_to_be_exported_hpp__
-#include <memory>
-#include <string>
-#include <iostream>
-
-namespace member_variables{
-/*
-struct point{
-    enum color{ red, green, blue };
-
-    point()
-    : prefered_color( blue )
-      , x( -1 )
-      , y( 2 )
-    {++instance_count;}
-
-    point( const point& other )
-    : prefered_color( other.prefered_color )
-      , x( other.x )
-      , y( other.y )
-    {}
-
-    ~point()
-    { --instance_count; }
-protected:
-    int x;
-    int y;
-    const color prefered_color;
-    static int instance_count;
-    static const color default_color;
-};
-*/
-struct bit_fields_t{
-    friend unsigned int get_a(const bit_fields_t& inst);
-    friend void set_a( bit_fields_t& inst, unsigned int new_value );
-    friend unsigned int get_b(const bit_fields_t& inst);
-
-    bit_fields_t()
-    : b(28){}
-protected:
-    unsigned int a : 1;
-    unsigned int : 0;
-    const unsigned int b : 11;
-};
-
-unsigned int get_a(const bit_fields_t& inst);
-void set_a( bit_fields_t& inst, unsigned int new_value );
-unsigned int get_b(const bit_fields_t& inst);
-
-struct array_t{
-    array_t()
-    {
-        for( int i = 0; i < 10; ++i ){
-            ivars[i] = -i;
-        }
-    }
-
-    struct variable_t{
-        variable_t() : value(-9){}
-        int value;
-    };
-
-    int get_ivars_item( int index ){
-        return ivars[index];
-    }
-protected:
-    static const variable_t vars[3];
-    static variable_t vars_nonconst[3];
-    int ivars[10];
-    int ivars2[10];
-};
-/*
-namespace pointers{
-
-struct  tree_node_t;
-
-struct data_t{
-    friend struct tree_node_t;
-    friend std::auto_ptr<tree_node_t> create_tree();
-    data_t() : value( 201 ) {}
-protected:
-    int value;
-    static char* reserved;
-};
-
-struct tree_node_t{
-protected:
-    data_t *data;
-    tree_node_t *left;
-    tree_node_t *right;
-    const tree_node_t *parent;
-public:
-    tree_node_t(const tree_node_t* parent=0)
-    : data(0)
-      , left( 0 )
-      , right( 0 )
-      , parent( parent )
-    {}
-
-    ~tree_node_t(){
-        std::cout << "\n~tree_node_t";
-    }
-    friend std::auto_ptr<tree_node_t> create_tree();
-};
-
-std::auto_ptr<tree_node_t> create_tree();
-
-}
-*/
-namespace reference{
-
-enum EFruit{ apple, orange };
-
-struct fundamental_t{
-    fundamental_t( EFruit& fruit, const int& i )
-    : m_fruit( fruit ), m_i( i )
-    {}
-protected:
-    EFruit& m_fruit;
-    const int& m_i;
-};
-
-struct A{};
-
-
-struct B {
- B( A& a_ ): a( a_ ){}
-protected:
- A& a;
-};
-
-struct C {
- C( A& a_ ): a( a_ ){}
-protected:
- const A& a;
-};
-
-}
-/*
-namespace statics{
-
-struct mem_var_str_t{
-protected:
-    static std::string class_name;
-public:
-    std::string identity(std::string x){ return x; }
-};
-
-}
-
-namespace bugs{
-struct allocator_ {
-   void * (*alloc) (unsigned);
-   void (*dispose) (void *p);
-};
-
-typedef struct allocator_ *allocator_t;
-
-struct faulty {
-protected:
-   allocator_t allocator;
-};
-
-}
-
-
-namespace ctypes{
-    struct image_t{
-        image_t(){
-            data = new int[5];
-            for(int i=0; i<5; i++){
-                data[i] = i;
-            }
-        }
-        int* data;
-
-        static int* none_image;
-    };
-
-    class Andy{
-    protected:
-        Andy() : userData(NULL) {}
-
-        virtual ~Andy()    {}
-
-    public:
-        void * userData;
-    };
-
-}*/
-
-}
-#endif//__member_variables_protected_to_be_exported_hpp__
Deleted: pyplusplus_dev/unittests/member_variables_protected_tester.py
===================================================================
--- pyplusplus_dev/unittests/member_variables_protected_tester.py	2010-03-25 21:12:47 UTC (rev 1832)
+++ pyplusplus_dev/unittests/member_variables_protected_tester.py	2010-04-03 09:59:41 UTC (rev 1833)
@@ -1,131 +0,0 @@
-# Copyright 2004-2008 Roman Yakovenko.
-# Distributed under the Boost Software License, Version 1.0. (See
-# accompanying file LICENSE_1_0.txt or copy at
-# http://www.boost.org/LICENSE_1_0.txt)
-
-import os
-import sys
-import ctypes
-import unittest
-import fundamental_tester_base
-from pygccxml import declarations
-from pyplusplus import decl_wrappers
-
-decl_wrappers.variable_t.EXPOSE_PROTECTED_VARIABLES = True
-
-class tester_t(fundamental_tester_base.fundamental_tester_base_t):
-    EXTENSION_NAME = 'member_variables_protected'
-
-    def __init__( self, *args ):
-        fundamental_tester_base.fundamental_tester_base_t.__init__(
-            self
-            , tester_t.EXTENSION_NAME
-            , *args )
-
-    def customize(self, mb ):
-        #mb.variable( 'prefered_color' ).alias = 'PreferedColor'
-        mb.classes().always_expose_using_scope = True
-        #image = mb.class_( 'image_t' )
-        #image.var( 'data' ).expose_address = True
-        #image.var( 'none_image' ).expose_address = True
-        #mb.class_( 'Andy' ).var('userData').expose_address = True
-
-    def change_default_color( self, module ):
-        module.point.default_color = module.point.color.blue
-
-    def change_prefered_color( self, module ):
-        xypoint = module.point()
-        xypoint.PreferedColor = module.point.color.blue
-
-    def set_b( self, bf, value ):
-        bf.b = value
-
-    def run_tests(self, module):
-        #self.failIfRaisesAny( module.point )
-        #xypoint = module.point()
-        #self.failUnless( module.point.instance_count == 1)
-        #self.failUnless( xypoint.instance_count == 1)
-        #self.failUnless( module.point.default_color == module.point.color.red)
-        #self.failUnless( xypoint.default_color == module.point.color.red)
-        #self.failUnless( xypoint.x == -1)
-        #self.failUnless( xypoint.y == 2 )
-        #self.failUnless( xypoint.PreferedColor == xypoint.color.blue )
-        #self.failUnlessRaises( Exception, self.change_default_color )
-        #self.failUnlessRaises( Exception, self.change_prefered_color )
-
-        bf = module.bit_fields_t()
-        module.set_a( bf, 1 )
-        self.failUnless( 1 == bf.a )
-        self.failUnless( bf.b == module.get_b( bf ) )
-        self.failIfNotRaisesAny( lambda: self.set_b( bf, 23 ) )
-
-        #tree = module.create_tree()
-        #self.failUnless( tree.parent is None )
-        #self.failUnless( tree.data.value == 0 )
-        #self.failUnless( tree.right is None )
-        #self.failUnless( tree.left )
-        #self.failUnless( tree.left.data.value == 1 )
-
-        #tree.right = module.create_tree()
-        #self.failUnless( tree.right.parent is None )
-        #self.failUnless( tree.right.data.value == 0 )
-        #self.failUnless( tree.right.right is None )
-        #self.failUnless( tree.right.left )
-        #self.failUnless( tree.right.left.data.value == 1 )
-
-        #mem_var_str = module.mem_var_str_t()
-        #mem_var_str.identity( module.mem_var_str_t.class_name )
-
-        #image = module.image_t()
-
-        #data_type = ctypes.POINTER( ctypes.c_int )
-        #data = data_type.from_address( image.data )
-        #for j in range(5):
-            #self.failUnless( j == data[j] )
-
-        #int_array = ctypes.c_int * 5
-        #array = int_array()
-        #for i in range( 5 ):
-            #array[i] = 2*i
-        #image.data = ctypes.addressof(array)
-        #data = data_type.from_address( image.data )
-        #for j in range(5):
-            #self.failUnless( j*2 == data[j] )
-
-        #data_type = ctypes.POINTER( ctypes.c_int )
-        #data = data_type.from_address( module.image_t.none_image )
-        #self.failUnless( 1997 == data.contents.value )
-
-        array = module.array_t()
-        self.failUnless( len( array.ivars ) == 10 )
-
-        ivars = array.ivars
-        del array #testing call policies
-        for i in range(20):
-            for index in range(10):
-                self.failUnless( ivars[index] == -index )
-
-        array = module.array_t()
-        for index in range( len(array.ivars) ):
-            array.ivars[index] = index * index
-            self.failUnless( array.get_ivars_item( index ) == index * index )
-
-
-        self.failUnless( len( module.array_t.vars ) == 3 )
-        for i in range( len( module.array_t.vars ) ):
-            self.failUnless( module.array_t.vars[i].value == -9 )
-
-        self.failUnless( len( module.array_t.vars_nonconst ) == 3 )
-        for i in range( len( module.array_t.vars_nonconst ) ):
-            self.failUnless( module.array_t.vars_nonconst[i].value == -9 )
-
-def create_suite():
-    suite = unittest.TestSuite()
-    suite.addTest( unittest.makeSuite(tester_t))
-    return suite
-
-def run_suite():
-    unittest.TextTestRunner(verbosity=2).run( create_suite() )
-
-if __name__ == "__main__":
-    run_suite()
Modified: pyplusplus_dev/unittests/test_all.py
===================================================================
--- pyplusplus_dev/unittests/test_all.py	2010-03-25 21:12:47 UTC (rev 1832)
+++ pyplusplus_dev/unittests/test_all.py	2010-04-03 09:59:41 UTC (rev 1833)
@@ -126,7 +126,7 @@
 import ft_output_static_matrix_tester
 import ft_inout_static_matrix_tester
 import ft_inout_static_array_tester
-import member_variables_protected_tester
+import inner_base_class_tester
 
 testers = [
     algorithms_tester
@@ -241,7 +241,7 @@
     , ft_output_static_matrix_tester
     , ft_inout_static_matrix_tester
     , ft_inout_static_array_tester
-    , member_variables_protected_tester
+    , inner_base_class_tester
 #    , ogre_generate_tester too much time
 ]
 
This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site.
 |