Boost logo

Boost-Commit :

Subject: [Boost-commit] svn:boost r56244 - in trunk: boost/spirit/home/karma/char boost/spirit/home/lex/lexer boost/spirit/home/qi/char libs/spirit/example/lex libs/spirit/example/lex/static_lexer libs/spirit/example/qi/mini_c libs/spirit/test/karma libs/spirit/test/lex
From: hartmut.kaiser_at_[hidden]
Date: 2009-09-16 13:46:49


Author: hkaiser
Date: 2009-09-16 13:46:46 EDT (Wed, 16 Sep 2009)
New Revision: 56244
URL: http://svn.boost.org/trac/boost/changeset/56244

Log:
Spirit: imported spirit::standard namespace into spirit::qi and spirit::karma
Text files modified:
   trunk/boost/spirit/home/karma/char/char_class.hpp | 12 +++++++++
   trunk/boost/spirit/home/lex/lexer/char_token_def.hpp | 8 ++++-
   trunk/boost/spirit/home/lex/lexer/string_token_def.hpp | 6 ++++
   trunk/boost/spirit/home/qi/char/char_class.hpp | 8 +++++
   trunk/libs/spirit/example/lex/example1.cpp | 21 +++++++---------
   trunk/libs/spirit/example/lex/example2.cpp | 18 ++++++--------
   trunk/libs/spirit/example/lex/example3.cpp | 18 ++++++--------
   trunk/libs/spirit/example/lex/example4.cpp | 31 +++++++++++-------------
   trunk/libs/spirit/example/lex/example5.cpp | 35 +++++++++++++---------------
   trunk/libs/spirit/example/lex/example6.cpp | 29 ++++++++++-------------
   trunk/libs/spirit/example/lex/print_numbers.cpp | 22 ++++++++---------
   trunk/libs/spirit/example/lex/static_lexer/word_count_generate.cpp | 5 +--
   trunk/libs/spirit/example/lex/static_lexer/word_count_lexer_generate.cpp | 5 +--
   trunk/libs/spirit/example/lex/static_lexer/word_count_lexer_static.cpp | 9 +++----
   trunk/libs/spirit/example/lex/static_lexer/word_count_static.cpp | 20 +++++++--------
   trunk/libs/spirit/example/lex/strip_comments.cpp | 22 ++++++++--------
   trunk/libs/spirit/example/lex/strip_comments_lexer.cpp | 17 +++++++------
   trunk/libs/spirit/example/lex/word_count.cpp | 22 ++++++++---------
   trunk/libs/spirit/example/lex/word_count_functor.cpp | 8 +++---
   trunk/libs/spirit/example/lex/word_count_lexer.cpp | 13 +++++----
   trunk/libs/spirit/example/qi/mini_c/mini_c.hpp | 3 +
   trunk/libs/spirit/test/karma/format_manip.cpp | 49 +++++++++++++++++++--------------------
   trunk/libs/spirit/test/karma/grammar.cpp | 6 ++--
   trunk/libs/spirit/test/karma/grammar_fail.cpp | 5 +--
   trunk/libs/spirit/test/karma/pattern.cpp | 29 +++++++++++------------
   trunk/libs/spirit/test/karma/pattern2.cpp | 33 +++++++++++++-------------
   trunk/libs/spirit/test/karma/rule_fail.cpp | 3 -
   trunk/libs/spirit/test/lex/lexertl1.cpp | 4 --
   trunk/libs/spirit/test/lex/state_switcher_test.cpp | 1
   29 files changed, 229 insertions(+), 233 deletions(-)

Modified: trunk/boost/spirit/home/karma/char/char_class.hpp
==============================================================================
--- trunk/boost/spirit/home/karma/char/char_class.hpp (original)
+++ trunk/boost/spirit/home/karma/char/char_class.hpp 2009-09-16 13:46:46 EDT (Wed, 16 Sep 2009)
@@ -40,6 +40,18 @@
 ///////////////////////////////////////////////////////////////////////////////
 namespace boost { namespace spirit { namespace karma
 {
+ // hoist the char classification namespaces into karma sub-namespaces of
+ // the same name
+ namespace ascii { using namespace boost::spirit::ascii; }
+ namespace iso8859_1 { using namespace boost::spirit::iso8859_1; }
+ namespace standard { using namespace boost::spirit::standard; }
+ namespace standard_wide { using namespace boost::spirit::standard_wide; }
+
+ // Import the standard namespace into the karma namespace. This allows
+ // for default handling of all character/string related operations if not
+ // prefixed with a character set namespace.
+ using namespace boost::spirit::standard;
+
     ///////////////////////////////////////////////////////////////////////////
     //
     // char_class

Modified: trunk/boost/spirit/home/lex/lexer/char_token_def.hpp
==============================================================================
--- trunk/boost/spirit/home/lex/lexer/char_token_def.hpp (original)
+++ trunk/boost/spirit/home/lex/lexer/char_token_def.hpp 2009-09-16 13:46:46 EDT (Wed, 16 Sep 2009)
@@ -49,7 +49,11 @@
 
 namespace boost { namespace spirit { namespace lex
 {
- using spirit::lit; // lit('x') is equivalent to 'x'
+ using spirit::lit; // lit('x') is equivalent to 'x'
+
+ // use char_ from standard character set by default
+ using spirit::standard::char_type;
+ using spirit::standard::char_;
 
     ///////////////////////////////////////////////////////////////////////////
     //
@@ -57,7 +61,7 @@
     // represents a single character token definition
     //
     ///////////////////////////////////////////////////////////////////////////
- template <typename CharEncoding>
+ template <typename CharEncoding = char_encoding::standard>
     struct char_token_def
       : primitive_lexer<char_token_def<CharEncoding> >
     {

Modified: trunk/boost/spirit/home/lex/lexer/string_token_def.hpp
==============================================================================
--- trunk/boost/spirit/home/lex/lexer/string_token_def.hpp (original)
+++ trunk/boost/spirit/home/lex/lexer/string_token_def.hpp 2009-09-16 13:46:46 EDT (Wed, 16 Sep 2009)
@@ -42,13 +42,17 @@
 
 namespace boost { namespace spirit { namespace lex
 {
+ // use string from standard character set by default
+ using spirit::standard::string_type;
+ using spirit::standard::string;
+
     ///////////////////////////////////////////////////////////////////////////
     //
     // string_token_def
     // represents a string based token definition
     //
     ///////////////////////////////////////////////////////////////////////////
- template <typename String, typename CharEncoding = unused_type>
+ template <typename String, typename CharEncoding = char_encoding::standard>
     struct string_token_def
       : primitive_lexer<string_token_def<String, CharEncoding> >
     {

Modified: trunk/boost/spirit/home/qi/char/char_class.hpp
==============================================================================
--- trunk/boost/spirit/home/qi/char/char_class.hpp (original)
+++ trunk/boost/spirit/home/qi/char/char_class.hpp 2009-09-16 13:46:46 EDT (Wed, 16 Sep 2009)
@@ -31,12 +31,18 @@
 
 namespace boost { namespace spirit { namespace qi
 {
- // hoist the char classification namespaces into qi sub-namespaces of the same name
+ // hoist the char classification namespaces into qi sub-namespaces of the
+ // same name
     namespace ascii { using namespace boost::spirit::ascii; }
     namespace iso8859_1 { using namespace boost::spirit::iso8859_1; }
     namespace standard { using namespace boost::spirit::standard; }
     namespace standard_wide { using namespace boost::spirit::standard_wide; }
 
+ // Import the standard namespace into the qi namespace. This allows
+ // for default handling of all character/string related operations if not
+ // prefixed with a character set namespace.
+ using namespace boost::spirit::standard;
+
     ///////////////////////////////////////////////////////////////////////////
     // Generic char classification parser (for alnum, alpha, graph, etc.)
     ///////////////////////////////////////////////////////////////////////////

Modified: trunk/libs/spirit/example/lex/example1.cpp
==============================================================================
--- trunk/libs/spirit/example/lex/example1.cpp (original)
+++ trunk/libs/spirit/example/lex/example1.cpp 2009-09-16 13:46:46 EDT (Wed, 16 Sep 2009)
@@ -32,21 +32,18 @@
 #include "example.hpp"
 
 using namespace boost::spirit;
-using namespace boost::spirit::ascii;
-using namespace boost::spirit::qi;
-using namespace boost::spirit::lex;
 
 ///////////////////////////////////////////////////////////////////////////////
 // Token definition
 ///////////////////////////////////////////////////////////////////////////////
 template <typename Lexer>
-struct example1_tokens : lexer<Lexer>
+struct example1_tokens : lex::lexer<Lexer>
 {
     example1_tokens()
     {
         // define tokens and associate them with the lexer
         identifier = "[a-zA-Z_][a-zA-Z0-9_]*";
- this->self = char_(',') | '{' | '}' | identifier;
+ this->self = lex::char_(',') | '{' | '}' | identifier;
 
         // any token definition to be used as the skip parser during parsing
         // has to be associated with a separate lexer state (here 'WS')
@@ -54,7 +51,7 @@
         this->self("WS") = white_space;
     }
 
- token_def<> identifier, white_space;
+ lex::token_def<> identifier, white_space;
 };
 
 ///////////////////////////////////////////////////////////////////////////////
@@ -62,16 +59,16 @@
 ///////////////////////////////////////////////////////////////////////////////
 template <typename Iterator>
 struct example1_grammar
- : grammar<Iterator, in_state_skipper<token_def<> > >
+ : qi::grammar<Iterator, qi::in_state_skipper<lex::token_def<> > >
 {
     template <typename TokenDef>
     example1_grammar(TokenDef const& tok)
       : example1_grammar::base_type(start)
     {
- start = '{' >> *(tok.identifier >> -char_(',')) >> '}';
+ start = '{' >> *(tok.identifier >> -ascii::char_(',')) >> '}';
     }
 
- rule<Iterator, in_state_skipper<token_def<> > > start;
+ qi::rule<Iterator, qi::in_state_skipper<lex::token_def<> > > start;
 };
 
 ///////////////////////////////////////////////////////////////////////////////
@@ -81,11 +78,11 @@
     typedef std::string::iterator base_iterator_type;
 
     // This is the token type to return from the lexer iterator
- typedef lexertl::token<base_iterator_type> token_type;
+ typedef lex::lexertl::token<base_iterator_type> token_type;
 
     // This is the lexer type to use to tokenize the input.
     // We use the lexertl based lexer engine.
- typedef lexertl::lexer<token_type> lexer_type;
+ typedef lex::lexertl::lexer<token_type> lexer_type;
 
     // This is the lexer type (derived from the given lexer type).
     typedef example1_tokens<lexer_type> example1_lex;
@@ -114,7 +111,7 @@
     // Note, how we use the token_def defined above as the skip parser. It must
     // be explicitly wrapped inside a state directive, switching the lexer
     // state for the duration of skipping whitespace.
- bool r = phrase_parse(iter, end, calc, in_state("WS")[lex.white_space]);
+ bool r = qi::phrase_parse(iter, end, calc, qi::in_state("WS")[lex.white_space]);
 
     if (r && iter == end)
     {

Modified: trunk/libs/spirit/example/lex/example2.cpp
==============================================================================
--- trunk/libs/spirit/example/lex/example2.cpp (original)
+++ trunk/libs/spirit/example/lex/example2.cpp 2009-09-16 13:46:46 EDT (Wed, 16 Sep 2009)
@@ -32,15 +32,13 @@
 
 using namespace boost::spirit;
 using namespace boost::spirit::ascii;
-using namespace boost::spirit::qi;
-using namespace boost::spirit::lex;
 using boost::phoenix::ref;
 
 ///////////////////////////////////////////////////////////////////////////////
 // Token definition
 ///////////////////////////////////////////////////////////////////////////////
 template <typename Lexer>
-struct example2_tokens : lexer<Lexer>
+struct example2_tokens : lex::lexer<Lexer>
 {
     example2_tokens()
     {
@@ -56,17 +54,17 @@
         // interpreted literally and never as special regex characters. This is
         // done to be able to assign single characters the id of their character
         // code value, allowing to reference those as literals in Qi grammars.
- this->self = token_def<>(',') | '!' | '.' | '?' | ' ' | '\n' | word;
+ this->self = lex::token_def<>(',') | '!' | '.' | '?' | ' ' | '\n' | word;
     }
 
- token_def<> word;
+ lex::token_def<> word;
 };
 
 ///////////////////////////////////////////////////////////////////////////////
 // Grammar definition
 ///////////////////////////////////////////////////////////////////////////////
 template <typename Iterator>
-struct example2_grammar : grammar<Iterator>
+struct example2_grammar : qi::grammar<Iterator>
 {
     template <typename TokenDef>
     example2_grammar(TokenDef const& tok)
@@ -106,7 +104,7 @@
         BOOST_SPIRIT_DEBUG_NODE(statement);
     }
 
- rule<Iterator> story, paragraph, command, question, statement;
+ qi::rule<Iterator> story, paragraph, command, question, statement;
     int paragraphs, commands, questions, statements;
 };
 
@@ -117,11 +115,11 @@
     typedef std::string::iterator base_iterator_type;
 
     // This is the token type to return from the lexer iterator
- typedef lexertl::token<base_iterator_type> token_type;
+ typedef lex::lexertl::token<base_iterator_type> token_type;
 
     // This is the lexer type to use to tokenize the input.
     // Here we use the lexertl based lexer engine.
- typedef lexertl::lexer<token_type> lexer_type;
+ typedef lex::lexertl::lexer<token_type> lexer_type;
 
     // This is the token definition type (derived from the given lexer type).
     typedef example2_tokens<lexer_type> example2_tokens;
@@ -147,7 +145,7 @@
 
     // Parsing is done based on the the token stream, not the character
     // stream read from the input.
- bool r = parse(iter, end, calc);
+ bool r = qi::parse(iter, end, calc);
 
     if (r && iter == end)
     {

Modified: trunk/libs/spirit/example/lex/example3.cpp
==============================================================================
--- trunk/libs/spirit/example/lex/example3.cpp (original)
+++ trunk/libs/spirit/example/lex/example3.cpp 2009-09-16 13:46:46 EDT (Wed, 16 Sep 2009)
@@ -32,14 +32,12 @@
 #include "example.hpp"
 
 using namespace boost::spirit;
-using namespace boost::spirit::qi;
-using namespace boost::spirit::lex;
 
 ///////////////////////////////////////////////////////////////////////////////
 // Token definition
 ///////////////////////////////////////////////////////////////////////////////
 template <typename Lexer>
-struct example3_tokens : lexer<Lexer>
+struct example3_tokens : lex::lexer<Lexer>
 {
     example3_tokens()
     {
@@ -53,13 +51,13 @@
         // define the whitespace to ignore (spaces, tabs, newlines and C-style
         // comments)
         this->self("WS")
- = token_def<>("[ \\t\\n]+") // whitespace
+ = lex::token_def<>("[ \\t\\n]+") // whitespace
             | "\\/\\*[^*]*\\*+([^/*][^*]*\\*+)*\\/" // C style comments
             ;
     }
 
     // these tokens expose the iterator_range of the matched input sequence
- token_def<> ellipses, identifier, number;
+ lex::token_def<> ellipses, identifier, number;
 };
 
 ///////////////////////////////////////////////////////////////////////////////
@@ -67,7 +65,7 @@
 ///////////////////////////////////////////////////////////////////////////////
 template <typename Iterator, typename Lexer>
 struct example3_grammar
- : grammar<Iterator, in_state_skipper<Lexer> >
+ : qi::grammar<Iterator, qi::in_state_skipper<Lexer> >
 {
     template <typename TokenDef>
     example3_grammar(TokenDef const& tok)
@@ -91,7 +89,7 @@
         BOOST_SPIRIT_DEBUG_NODE(couplet);
     }
 
- rule<Iterator, in_state_skipper<Lexer> > start, couplet;
+ qi::rule<Iterator, qi::in_state_skipper<Lexer> > start, couplet;
 };
 
 ///////////////////////////////////////////////////////////////////////////////
@@ -101,11 +99,11 @@
     typedef std::string::iterator base_iterator_type;
 
     // This is the token type to return from the lexer iterator
- typedef lexertl::token<base_iterator_type> token_type;
+ typedef lex::lexertl::token<base_iterator_type> token_type;
 
     // This is the lexer type to use to tokenize the input.
     // Here we use the lexertl based lexer engine.
- typedef lexertl::lexer<token_type> lexer_type;
+ typedef lex::lexertl::lexer<token_type> lexer_type;
 
     // This is the token definition type (derived from the given lexer type).
     typedef example3_tokens<lexer_type> example3_tokens;
@@ -132,7 +130,7 @@
     // Parsing is done based on the the token stream, not the character
     // stream read from the input.
     // Note how we use the lexer defined above as the skip parser.
- bool r = phrase_parse(iter, end, calc, in_state("WS")[tokens.self]);
+ bool r = qi::phrase_parse(iter, end, calc, qi::in_state("WS")[tokens.self]);
 
     if (r && iter == end)
     {

Modified: trunk/libs/spirit/example/lex/example4.cpp
==============================================================================
--- trunk/libs/spirit/example/lex/example4.cpp (original)
+++ trunk/libs/spirit/example/lex/example4.cpp 2009-09-16 13:46:46 EDT (Wed, 16 Sep 2009)
@@ -33,16 +33,13 @@
 #include "example.hpp"
 
 using namespace boost::spirit;
-using namespace boost::spirit::qi;
-using namespace boost::spirit::lex;
-
 using boost::phoenix::val;
 
 ///////////////////////////////////////////////////////////////////////////////
 // Token definition
 ///////////////////////////////////////////////////////////////////////////////
 template <typename Lexer>
-struct example4_tokens : lexer<Lexer>
+struct example4_tokens : lex::lexer<Lexer>
 {
     example4_tokens()
     {
@@ -54,20 +51,20 @@
         while_ = "while";
 
         // associate the tokens and the token set with the lexer
- this->self = token_def<>('(') | ')' | '{' | '}' | '=' | ';' | constant;
+ this->self = lex::token_def<>('(') | ')' | '{' | '}' | '=' | ';' | constant;
         this->self += if_ | else_ | while_ | identifier;
 
         // define the whitespace to ignore (spaces, tabs, newlines and C-style
         // comments)
         this->self("WS")
- = token_def<>("[ \\t\\n]+")
+ = lex::token_def<>("[ \\t\\n]+")
             | "\\/\\*[^*]*\\*+([^/*][^*]*\\*+)*\\/"
             ;
     }
 
 //[example4_token_def
     // these tokens expose the iterator_range of the matched input sequence
- token_def<> if_, else_, while_;
+ lex::token_def<> if_, else_, while_;
 
     // The following two tokens have an associated attribute type, 'identifier'
     // carries a string (the identifier name) and 'constant' carries the
@@ -82,8 +79,8 @@
     // possible. Moreover, token instances are constructed once by the lexer
     // library. From this point on tokens are passed by reference only,
     // avoiding them being copied around.
- token_def<std::string> identifier;
- token_def<unsigned int> constant;
+ lex::token_def<std::string> identifier;
+ lex::token_def<unsigned int> constant;
 //]
 };
 
@@ -92,7 +89,7 @@
 ///////////////////////////////////////////////////////////////////////////////
 template <typename Iterator, typename Lexer>
 struct example4_grammar
- : grammar<Iterator, in_state_skipper<Lexer> >
+ : qi::grammar<Iterator, qi::in_state_skipper<Lexer> >
 {
     template <typename TokenDef>
     example4_grammar(TokenDef const& tok)
@@ -148,12 +145,12 @@
 
     typedef boost::variant<unsigned int, std::string> expression_type;
 
- rule<Iterator, in_state_skipper<Lexer> > program, block, statement;
- rule<Iterator, in_state_skipper<Lexer> > assignment, if_stmt;
- rule<Iterator, in_state_skipper<Lexer> > while_stmt;
+ qi::rule<Iterator, qi::in_state_skipper<Lexer> > program, block, statement;
+ qi::rule<Iterator, qi::in_state_skipper<Lexer> > assignment, if_stmt;
+ qi::rule<Iterator, qi::in_state_skipper<Lexer> > while_stmt;
 
     // the expression is the only rule having a return value
- rule<Iterator, expression_type(), in_state_skipper<Lexer> > expression;
+ qi::rule<Iterator, expression_type(), qi::in_state_skipper<Lexer> > expression;
 };
 
 ///////////////////////////////////////////////////////////////////////////////
@@ -176,12 +173,12 @@
     // least one token attribute type you'll have to list all attribute types
     // used for token_def<> declarations in the token definition class above,
     // otherwise compilation errors will occur.
- typedef lexertl::token<
+ typedef lex::lexertl::token<
         base_iterator_type, boost::mpl::vector<unsigned int, std::string>
> token_type;
 //]
     // Here we use the lexertl based lexer engine.
- typedef lexertl::lexer<token_type> lexer_type;
+ typedef lex::lexertl::lexer<token_type> lexer_type;
 
     // This is the token definition type (derived from the given lexer type).
     typedef example4_tokens<lexer_type> example4_tokens;
@@ -210,7 +207,7 @@
     // Note how we use the lexer defined above as the skip parser. It must
     // be explicitly wrapped inside a state directive, switching the lexer
     // state for the duration of skipping whitespace.
- bool r = phrase_parse(iter, end, calc, in_state("WS")[tokens.self]);
+ bool r = qi::phrase_parse(iter, end, calc, qi::in_state("WS")[tokens.self]);
 
     if (r && iter == end)
     {

Modified: trunk/libs/spirit/example/lex/example5.cpp
==============================================================================
--- trunk/libs/spirit/example/lex/example5.cpp (original)
+++ trunk/libs/spirit/example/lex/example5.cpp 2009-09-16 13:46:46 EDT (Wed, 16 Sep 2009)
@@ -35,16 +35,13 @@
 #include "example.hpp"
 
 using namespace boost::spirit;
-using namespace boost::spirit::qi;
-using namespace boost::spirit::lex;
-
 using boost::phoenix::val;
 
 ///////////////////////////////////////////////////////////////////////////////
 // Token definition base, defines all tokens for the base grammar below
 ///////////////////////////////////////////////////////////////////////////////
 template <typename Lexer>
-struct example5_base_tokens : lexer<Lexer>
+struct example5_base_tokens : lex::lexer<Lexer>
 {
 protected:
     // this lexer is supposed to be used as a base type only
@@ -60,19 +57,19 @@
         while_ = "while";
 
         // associate the tokens and the token set with the lexer
- this->self += token_def<>('(') | ')' | '{' | '}' | '=' | ';' | constant;
+ this->self += lex::token_def<>('(') | ')' | '{' | '}' | '=' | ';' | constant;
         this->self += if_ | while_ | identifier;
 
         // define the whitespace to ignore (spaces, tabs, newlines and C-style
         // comments)
         this->self("WS")
- = token_def<>("[ \\t\\n]+")
+ = lex::token_def<>("[ \\t\\n]+")
             | "\\/\\*[^*]*\\*+([^/*][^*]*\\*+)*\\/"
             ;
     }
 
     // these tokens have no attribute
- token_def<lex::omit> if_, while_;
+ lex::token_def<lex::omit> if_, while_;
 
     // The following two tokens have an associated attribute type, 'identifier'
     // carries a string (the identifier name) and 'constant' carries the
@@ -87,8 +84,8 @@
     // possible. Moreover, token instances are constructed once by the lexer
     // library. From this point on tokens are passed by reference only,
     // avoiding them being copied around.
- token_def<std::string> identifier;
- token_def<unsigned int> constant;
+ lex::token_def<std::string> identifier;
+ lex::token_def<unsigned int> constant;
 };
 
 ///////////////////////////////////////////////////////////////////////////////
@@ -96,7 +93,7 @@
 ///////////////////////////////////////////////////////////////////////////////
 template <typename Iterator, typename Lexer>
 struct example5_base_grammar
- : grammar<Iterator, in_state_skipper<Lexer> >
+ : qi::grammar<Iterator, qi::in_state_skipper<Lexer> >
 {
     template <typename TokenDef>
     example5_base_grammar(TokenDef const& tok)
@@ -148,15 +145,15 @@
             ;
     }
 
- typedef in_state_skipper<Lexer> skipper_type;
+ typedef qi::in_state_skipper<Lexer> skipper_type;
 
- rule<Iterator, skipper_type> program, block, statement;
- rule<Iterator, skipper_type> assignment, if_stmt;
- rule<Iterator, skipper_type> while_stmt;
+ qi::rule<Iterator, skipper_type> program, block, statement;
+ qi::rule<Iterator, skipper_type> assignment, if_stmt;
+ qi::rule<Iterator, skipper_type> while_stmt;
 
     // the expression is the only rule having a return value
     typedef boost::variant<unsigned int, std::string> expression_type;
- rule<Iterator, expression_type(), skipper_type> expression;
+ qi::rule<Iterator, expression_type(), skipper_type> expression;
 };
 
 ///////////////////////////////////////////////////////////////////////////////
@@ -182,7 +179,7 @@
     }
 
     // this token has no attribute
- token_def<lex::omit> else_;
+ lex::token_def<lex::omit> else_;
 };
 
 ///////////////////////////////////////////////////////////////////////////////
@@ -221,12 +218,12 @@
     // least one token attribute type you'll have to list all attribute types
     // used for token_def<> declarations in the token definition class above,
     // otherwise compilation errors will occur.
- typedef lexertl::token<
+ typedef lex::lexertl::token<
         base_iterator_type, boost::mpl::vector<unsigned int, std::string>
> token_type;
 
     // Here we use the lexertl based lexer engine.
- typedef lexertl::lexer<token_type> lexer_type;
+ typedef lex::lexertl::lexer<token_type> lexer_type;
 
     // This is the token definition type (derived from the given lexer type).
     typedef example5_tokens<lexer_type> example5_tokens;
@@ -256,7 +253,7 @@
     // be explicitly wrapped inside a state directive, switching the lexer
     // state for the duration of skipping whitespace.
     std::string ws("WS");
- bool r = phrase_parse(iter, end, calc, in_state(ws)[tokens.self]);
+ bool r = qi::phrase_parse(iter, end, calc, qi::in_state(ws)[tokens.self]);
 
     if (r && iter == end)
     {

Modified: trunk/libs/spirit/example/lex/example6.cpp
==============================================================================
--- trunk/libs/spirit/example/lex/example6.cpp (original)
+++ trunk/libs/spirit/example/lex/example6.cpp 2009-09-16 13:46:46 EDT (Wed, 16 Sep 2009)
@@ -37,9 +37,6 @@
 #include "example.hpp"
 
 using namespace boost::spirit;
-using namespace boost::spirit::qi;
-using namespace boost::spirit::lex;
-
 using boost::phoenix::val;
 
 ///////////////////////////////////////////////////////////////////////////////
@@ -58,7 +55,7 @@
 // Token definitions
 ///////////////////////////////////////////////////////////////////////////////
 template <typename Lexer>
-struct example6_tokens : lexer<Lexer>
+struct example6_tokens : lex::lexer<Lexer>
 {
     example6_tokens()
     {
@@ -67,7 +64,7 @@
         constant = "[0-9]+";
 
         // associate the tokens and the token set with the lexer
- this->self = token_def<>('(') | ')' | '{' | '}' | '=' | ';';
+ this->self = lex::token_def<>('(') | ')' | '{' | '}' | '=' | ';';
 
         // Token definitions can be added by using some special syntactic
         // construct as shown below.
@@ -84,7 +81,7 @@
         // define the whitespace to ignore (spaces, tabs, newlines and C-style
         // comments) and add those to another lexer state (here: "WS")
         this->self("WS")
- = token_def<>("[ \\t\\n]+")
+ = lex::token_def<>("[ \\t\\n]+")
             | "\\/\\*[^*]*\\*+([^/*][^*]*\\*+)*\\/"
             ;
     }
@@ -102,8 +99,8 @@
     // possible. Moreover, token instances are constructed once by the lexer
     // library. From this point on tokens are passed by reference only,
     // avoiding them being copied around.
- token_def<std::string> identifier;
- token_def<unsigned int> constant;
+ lex::token_def<std::string> identifier;
+ lex::token_def<unsigned int> constant;
 };
 
 ///////////////////////////////////////////////////////////////////////////////
@@ -111,7 +108,7 @@
 ///////////////////////////////////////////////////////////////////////////////
 template <typename Iterator, typename Lexer>
 struct example6_grammar
- : grammar<Iterator, in_state_skipper<Lexer> >
+ : qi::grammar<Iterator, qi::in_state_skipper<Lexer> >
 {
     template <typename TokenDef>
     example6_grammar(TokenDef const& tok)
@@ -170,12 +167,12 @@
 
     typedef boost::variant<unsigned int, std::string> expression_type;
 
- rule<Iterator, in_state_skipper<Lexer> > program, block, statement;
- rule<Iterator, in_state_skipper<Lexer> > assignment, if_stmt;
- rule<Iterator, in_state_skipper<Lexer> > while_stmt;
+ qi::rule<Iterator, qi::in_state_skipper<Lexer> > program, block, statement;
+ qi::rule<Iterator, qi::in_state_skipper<Lexer> > assignment, if_stmt;
+ qi::rule<Iterator, qi::in_state_skipper<Lexer> > while_stmt;
 
     // the expression is the only rule having a return value
- rule<Iterator, expression_type(), in_state_skipper<Lexer> > expression;
+ qi::rule<Iterator, expression_type(), qi::in_state_skipper<Lexer> > expression;
 };
 
 ///////////////////////////////////////////////////////////////////////////////
@@ -197,12 +194,12 @@
     // least one token attribute type you'll have to list all attribute types
     // used for token_def<> declarations in the token definition class above,
     // otherwise compilation errors will occur.
- typedef lexertl::token<
+ typedef lex::lexertl::token<
         base_iterator_type, boost::mpl::vector<unsigned int, std::string>
> token_type;
 
     // Here we use the lexertl based lexer engine.
- typedef lexertl::lexer<token_type> lexer_type;
+ typedef lex::lexertl::lexer<token_type> lexer_type;
 
     // This is the token definition type (derived from the given lexer type).
     typedef example6_tokens<lexer_type> example6_tokens;
@@ -232,7 +229,7 @@
     // be explicitly wrapped inside a state directive, switching the lexer
     // state for the duration of skipping whitespace.
     std::string ws("WS");
- bool r = phrase_parse(iter, end, calc, in_state(ws)[tokens.self]);
+ bool r = qi::phrase_parse(iter, end, calc, qi::in_state(ws)[tokens.self]);
 
     if (r && iter == end)
     {

Modified: trunk/libs/spirit/example/lex/print_numbers.cpp
==============================================================================
--- trunk/libs/spirit/example/lex/print_numbers.cpp (original)
+++ trunk/libs/spirit/example/lex/print_numbers.cpp 2009-09-16 13:46:46 EDT (Wed, 16 Sep 2009)
@@ -30,23 +30,21 @@
 #include "example.hpp"
 
 using namespace boost::spirit;
-using namespace boost::spirit::qi;
-using namespace boost::spirit::lex;
 
 ///////////////////////////////////////////////////////////////////////////////
 // Token definition: We use the lexertl based lexer engine as the underlying
 // lexer type.
 ///////////////////////////////////////////////////////////////////////////////
 template <typename Lexer>
-struct print_numbers_tokens : lexer<Lexer>
+struct print_numbers_tokens : lex::lexer<Lexer>
 {
     // define tokens and associate it with the lexer, we set the lexer flags
     // not to match newlines while matching a dot, so we need to add the
     // '\n' explicitly below
     print_numbers_tokens()
- : print_numbers_tokens::base_type(match_flags::match_not_dot_newline)
+ : print_numbers_tokens::base_type(lex::match_flags::match_not_dot_newline)
     {
- this->self = token_def<int>("[0-9]*") | ".|\n";
+ this->self = lex::token_def<int>("[0-9]*") | ".|\n";
     }
 };
 
@@ -54,7 +52,7 @@
 // Grammar definition
 ///////////////////////////////////////////////////////////////////////////////
 template <typename Iterator>
-struct print_numbers_grammar : grammar<Iterator>
+struct print_numbers_grammar : qi::grammar<Iterator>
 {
     print_numbers_grammar()
       : print_numbers_grammar::base_type(start)
@@ -62,13 +60,13 @@
         // we just know, that the token ids get assigned starting min_token_id
         // so, "[0-9]*" gets the id 'min_token_id' and ".|\n" gets the id
         // 'min_token_id+1'.
- start = *( token(lex::min_token_id) [ std::cout << _1 << "\n" ]
- | token(lex::min_token_id+1)
+ start = *( qi::token(lex::min_token_id) [ std::cout << _1 << "\n" ]
+ | qi::token(lex::min_token_id+1)
                   )
               ;
     }
 
- rule<Iterator> start;
+ qi::rule<Iterator> start;
 };
 
 ///////////////////////////////////////////////////////////////////////////////
@@ -79,11 +77,11 @@
 
     // the token type to be used, 'int' is available as the type of the token
     // attribute and no lexer state is supported
- typedef lexertl::token<base_iterator_type, boost::mpl::vector<int>
+ typedef lex::lexertl::token<base_iterator_type, boost::mpl::vector<int>
       , boost::mpl::false_> token_type;
 
     // lexer type
- typedef lexertl::lexer<token_type> lexer_type;
+ typedef lex::lexertl::lexer<token_type> lexer_type;
 
     // iterator type exposed by the lexer
     typedef print_numbers_tokens<lexer_type>::iterator_type iterator_type;
@@ -97,7 +95,7 @@
     // stream read from the input.
     std::string str (read_from_file(1 == argc ? "print_numbers.input" : argv[1]));
     base_iterator_type first = str.begin();
- bool r = tokenize_and_parse(first, str.end(), print_tokens, print);
+ bool r = lex::tokenize_and_parse(first, str.end(), print_tokens, print);
 
     if (r) {
         std::cout << "-------------------------\n";

Modified: trunk/libs/spirit/example/lex/static_lexer/word_count_generate.cpp
==============================================================================
--- trunk/libs/spirit/example/lex/static_lexer/word_count_generate.cpp (original)
+++ trunk/libs/spirit/example/lex/static_lexer/word_count_generate.cpp 2009-09-16 13:46:46 EDT (Wed, 16 Sep 2009)
@@ -22,14 +22,13 @@
 #include "word_count_tokens.hpp"
 
 using namespace boost::spirit;
-using namespace boost::spirit::lex;
 
 ///////////////////////////////////////////////////////////////////////////////
 //[wc_static_generate_main
 int main(int argc, char* argv[])
 {
     // create the lexer object instance needed to invoke the generator
- word_count_tokens<lexertl::lexer<> > word_count; // the token definition
+ word_count_tokens<lex::lexertl::lexer<> > word_count; // the token definition
 
     // open the output file, where the generated tokenizer function will be
     // written to
@@ -41,6 +40,6 @@
     // The suffix "wc" used below results in a type lexertl::static_::lexer_wc
     // to be generated, which needs to be passed as a template parameter to the
     // lexertl::static_lexer template (see word_count_static.cpp).
- return lexertl::generate_static(word_count, out, "wc") ? 0 : -1;
+ return lex::lexertl::generate_static(word_count, out, "wc") ? 0 : -1;
 }
 //]

Modified: trunk/libs/spirit/example/lex/static_lexer/word_count_lexer_generate.cpp
==============================================================================
--- trunk/libs/spirit/example/lex/static_lexer/word_count_lexer_generate.cpp (original)
+++ trunk/libs/spirit/example/lex/static_lexer/word_count_lexer_generate.cpp 2009-09-16 13:46:46 EDT (Wed, 16 Sep 2009)
@@ -22,14 +22,13 @@
 #include "word_count_lexer_tokens.hpp"
 
 using namespace boost::spirit;
-using namespace boost::spirit::lex;
 
 ///////////////////////////////////////////////////////////////////////////////
 //[wcl_static_generate_main
 int main(int argc, char* argv[])
 {
     // create the lexer object instance needed to invoke the generator
- word_count_lexer_tokens<lexertl::actor_lexer<> > word_count; // the token definition
+ word_count_lexer_tokens<lex::lexertl::actor_lexer<> > word_count; // the token definition
 
     // open the output file, where the generated tokenizer function will be
     // written to
@@ -41,6 +40,6 @@
     // The suffix "wcl" used below results in a type lexertl::static_::lexer_wcl
     // to be generated, which needs to be passed as a template parameter to the
     // lexertl::static_lexer template (see word_count_lexer_static.cpp).
- return lexertl::generate_static(word_count, out, "wcl") ? 0 : -1;
+ return lex::lexertl::generate_static(word_count, out, "wcl") ? 0 : -1;
 }
 //]

Modified: trunk/libs/spirit/example/lex/static_lexer/word_count_lexer_static.cpp
==============================================================================
--- trunk/libs/spirit/example/lex/static_lexer/word_count_lexer_static.cpp (original)
+++ trunk/libs/spirit/example/lex/static_lexer/word_count_lexer_static.cpp 2009-09-16 13:46:46 EDT (Wed, 16 Sep 2009)
@@ -25,7 +25,6 @@
 #include "word_count_lexer_static.hpp" // generated tokenizer
 
 using namespace boost::spirit;
-using namespace boost::spirit::lex;
 
 ///////////////////////////////////////////////////////////////////////////////
 //[wcl_static_main
@@ -45,7 +44,7 @@
     //
     // As a result the token instances contain the token ids as the only data
     // member.
- typedef lexertl::token<char const*, lex::omit, boost::mpl::false_> token_type;
+ typedef lex::lexertl::token<char const*, lex::omit, boost::mpl::false_> token_type;
 
     // Define the lexer type to be used as the base class for our token
     // definition.
@@ -57,8 +56,8 @@
     // As we specified the suffix "wcl" while generating the static tables we
     // need to pass the type lexertl::static_::lexer_wcl as the second template
     // parameter below (see word_count_lexer_generate.cpp).
- typedef lexertl::static_actor_lexer<
- token_type, lexertl::static_::lexer_wcl
+ typedef lex::lexertl::static_actor_lexer<
+ token_type, lex::lexertl::static_::lexer_wcl
> lexer_type;
 
     // create the lexer object instance needed to invoke the lexical analysis
@@ -67,7 +66,7 @@
     // tokenize the given string, all generated tokens are discarded
     char const* first = str.c_str();
     char const* last = &first[str.size()];
- bool r = tokenize(first, last, word_count_lexer);
+ bool r = lex::tokenize(first, last, word_count_lexer);
 
     if (r) {
         std::cout << "lines: " << word_count_lexer.l

Modified: trunk/libs/spirit/example/lex/static_lexer/word_count_static.cpp
==============================================================================
--- trunk/libs/spirit/example/lex/static_lexer/word_count_static.cpp (original)
+++ trunk/libs/spirit/example/lex/static_lexer/word_count_static.cpp 2009-09-16 13:46:46 EDT (Wed, 16 Sep 2009)
@@ -32,8 +32,6 @@
 
 using namespace boost::spirit;
 using namespace boost::spirit::ascii;
-using namespace boost::spirit::qi;
-using namespace boost::spirit::lex;
 
 ///////////////////////////////////////////////////////////////////////////////
 // Grammar definition
@@ -44,7 +42,7 @@
 // definition class instance passed to the constructor to allow accessing the
 // embedded token_def<> instances.
 template <typename Iterator>
-struct word_count_grammar : grammar<Iterator>
+struct word_count_grammar : qi::grammar<Iterator>
 {
     template <typename TokenDef>
     word_count_grammar(TokenDef const& tok)
@@ -56,15 +54,15 @@
 
         // associate the defined tokens with the lexer, at the same time
         // defining the actions to be executed
- start = *( tok.word [ ++ref(w), ref(c) += size(_1) ]
- | lit('\n') [ ++ref(l), ++ref(c) ]
- | token(IDANY) [ ++ref(c) ]
+ start = *( tok.word [ ++ref(w), ref(c) += size(_1) ]
+ | lit('\n') [ ++ref(l), ++ref(c) ]
+ | qi::token(IDANY) [ ++ref(c) ]
                   )
               ;
     }
 
     std::size_t c, w, l; // counter for characters, words, and lines
- rule<Iterator> start;
+ qi::rule<Iterator> start;
 };
 //]
 
@@ -74,7 +72,7 @@
 {
     // Define the token type to be used: 'std::string' is available as the type
     // of the token value.
- typedef lexertl::token<
+ typedef lex::lexertl::token<
         char const*, boost::mpl::vector<std::string>
> token_type;
 
@@ -88,8 +86,8 @@
     // As we specified the suffix "wc" while generating the static tables we
     // need to pass the type lexertl::static_::lexer_wc as the second template
     // parameter below (see word_count_generate.cpp).
- typedef lexertl::static_lexer<
- token_type, lexertl::static_::lexer_wc
+ typedef lex::lexertl::static_lexer<
+ token_type, lex::lexertl::static_::lexer_wc
> lexer_type;
 
     // Define the iterator type exposed by the lexer.
@@ -106,7 +104,7 @@
     char const* last = &first[str.size()];
 
     // Parsing is done based on the the token stream, not the character stream.
- bool r = tokenize_and_parse(first, last, word_count, g);
+ bool r = lex::tokenize_and_parse(first, last, word_count, g);
 
     if (r) { // success
         std::cout << "lines: " << g.l << ", words: " << g.w

Modified: trunk/libs/spirit/example/lex/strip_comments.cpp
==============================================================================
--- trunk/libs/spirit/example/lex/strip_comments.cpp (original)
+++ trunk/libs/spirit/example/lex/strip_comments.cpp 2009-09-16 13:46:46 EDT (Wed, 16 Sep 2009)
@@ -45,8 +45,6 @@
 #include "example.hpp"
 
 using namespace boost::spirit;
-using namespace boost::spirit::qi;
-using namespace boost::spirit::lex;
 
 ///////////////////////////////////////////////////////////////////////////////
 // Token definition: We use the lexertl based lexer engine as the underlying
@@ -58,10 +56,10 @@
 };
 
 template <typename Lexer>
-struct strip_comments_tokens : lexer<Lexer>
+struct strip_comments_tokens : lex::lexer<Lexer>
 {
     strip_comments_tokens()
- : strip_comments_tokens::base_type(match_flags::match_default)
+ : strip_comments_tokens::base_type(lex::match_flags::match_default)
     {
         // define tokens and associate them with the lexer
         cppcomment = "\\/\\/[^\n]*"; // '//[^\n]*'
@@ -87,14 +85,14 @@
         ;
     }
 
- token_def<> cppcomment, ccomment, endcomment;
+ lex::token_def<> cppcomment, ccomment, endcomment;
 };
 
 ///////////////////////////////////////////////////////////////////////////////
 // Grammar definition
 ///////////////////////////////////////////////////////////////////////////////
 template <typename Iterator>
-struct strip_comments_grammar : grammar<Iterator>
+struct strip_comments_grammar : qi::grammar<Iterator>
 {
     template <typename TokenDef>
     strip_comments_grammar(TokenDef const& tok)
@@ -103,19 +101,19 @@
         // The in_state("COMMENT")[...] parser component switches the lexer
         // state to be 'COMMENT' during the matching of the embedded parser.
         start = *( tok.ccomment
- >> in_state("COMMENT")
+ >> qi::in_state("COMMENT")
                           [
                               // the lexer is in the 'COMMENT' state during
                               // matching of the following parser components
                               *token(IDANY) >> tok.endcomment
                           ]
                   | tok.cppcomment
- | token(IDANY) [ std::cout << _1 ]
+ | qi::token(IDANY) [ std::cout << _1 ]
                   )
               ;
     }
 
- rule<Iterator> start;
+ qi::rule<Iterator> start;
 };
 
 ///////////////////////////////////////////////////////////////////////////////
@@ -125,7 +123,9 @@
     typedef std::string::iterator base_iterator_type;
 
     // lexer type
- typedef lexertl::lexer<lexertl::token<base_iterator_type> > lexer_type;
+ typedef
+ lex::lexertl::lexer<lex::lexertl::token<base_iterator_type> >
+ lexer_type;
 
     // iterator type exposed by the lexer
     typedef strip_comments_tokens<lexer_type>::iterator_type iterator_type;
@@ -140,7 +140,7 @@
     std::string str (read_from_file(1 == argc ? "strip_comments.input" : argv[1]));
     base_iterator_type first = str.begin();
 
- bool r = tokenize_and_parse(first, str.end(), strip_comments, g);
+ bool r = lex::tokenize_and_parse(first, str.end(), strip_comments, g);
 
     if (r) {
         std::cout << "-------------------------\n";

Modified: trunk/libs/spirit/example/lex/strip_comments_lexer.cpp
==============================================================================
--- trunk/libs/spirit/example/lex/strip_comments_lexer.cpp (original)
+++ trunk/libs/spirit/example/lex/strip_comments_lexer.cpp 2009-09-16 13:46:46 EDT (Wed, 16 Sep 2009)
@@ -45,8 +45,7 @@
 #include "example.hpp"
 
 using namespace boost::spirit;
-using namespace boost::spirit::lex;
-
+
 ///////////////////////////////////////////////////////////////////////////////
 // Token definition: We use the lexertl based lexer engine as the underlying
 // lexer type.
@@ -107,10 +106,10 @@
 
 ///////////////////////////////////////////////////////////////////////////////
 template <typename Lexer>
-struct strip_comments_tokens : lexer<Lexer>
+struct strip_comments_tokens : lex::lexer<Lexer>
 {
     strip_comments_tokens()
- : strip_comments_tokens::base_type(match_flags::match_default)
+ : strip_comments_tokens::base_type(lex::match_flags::match_default)
     {
         // define tokens and associate them with the lexer
         cppcomment = "\\/\\/[^\n]*"; // '//[^\n]*'
@@ -137,7 +136,7 @@
             ;
     }
 
- token_def<> cppcomment, ccomment, endcomment, any, eol;
+ lex::token_def<> cppcomment, ccomment, endcomment, any, eol;
 };
 
   ///////////////////////////////////////////////////////////////////////////////
@@ -147,17 +146,19 @@
     typedef std::string::iterator base_iterator_type;
 
     // lexer type
- typedef lexertl::actor_lexer<lexertl::token<base_iterator_type> > lexer_type;
+ typedef
+ lex::lexertl::actor_lexer<lex::lexertl::token<base_iterator_type> >
+ lexer_type;
 
     // now we use the types defined above to create the lexer and grammar
     // object instances needed to invoke the parsing process
     strip_comments_tokens<lexer_type> strip_comments; // Our lexer
 
- // No parsing is done alltogether, everzthing happens in the lexer semantic
+ // No parsing is done alltogether, everything happens in the lexer semantic
     // actions.
     std::string str (read_from_file(1 == argc ? "strip_comments.input" : argv[1]));
     base_iterator_type first = str.begin();
- bool r = tokenize(first, str.end(), strip_comments);
+ bool r = lex::tokenize(first, str.end(), strip_comments);
 
     if (!r) {
         std::string rest(first, str.end());

Modified: trunk/libs/spirit/example/lex/word_count.cpp
==============================================================================
--- trunk/libs/spirit/example/lex/word_count.cpp (original)
+++ trunk/libs/spirit/example/lex/word_count.cpp 2009-09-16 13:46:46 EDT (Wed, 16 Sep 2009)
@@ -51,8 +51,6 @@
 //[wcp_namespaces
 using namespace boost::spirit;
 using namespace boost::spirit::ascii;
-using namespace boost::spirit::qi;
-using namespace boost::spirit::lex;
 //]
 
 ///////////////////////////////////////////////////////////////////////////////
@@ -68,7 +66,7 @@
 
 //[wcp_token_definition
 template <typename Lexer>
-struct word_count_tokens : lexer<Lexer>
+struct word_count_tokens : lex::lexer<Lexer>
 {
     word_count_tokens()
     {
@@ -91,7 +89,7 @@
     }
 
     // the token 'word' exposes the matched string as its parser attribute
- token_def<std::string> word;
+ lex::token_def<std::string> word;
 };
 //]
 
@@ -100,7 +98,7 @@
 ///////////////////////////////////////////////////////////////////////////////
 //[wcp_grammar_definition
 template <typename Iterator>
-struct word_count_grammar : grammar<Iterator>
+struct word_count_grammar : qi::grammar<Iterator>
 {
     template <typename TokenDef>
     word_count_grammar(TokenDef const& tok)
@@ -110,15 +108,15 @@
         using boost::phoenix::ref;
         using boost::phoenix::size;
 
- start = *( tok.word [++ref(w), ref(c) += size(_1)]
- | lit('\n') [++ref(c), ++ref(l)]
- | token(IDANY) [++ref(c)]
+ start = *( tok.word [++ref(w), ref(c) += size(_1)]
+ | lit('\n') [++ref(c), ++ref(l)]
+ | qi::token(IDANY) [++ref(c)]
                   )
               ;
     }
 
     std::size_t c, w, l;
- rule<Iterator> start;
+ qi::rule<Iterator> start;
 };
 //]
 
@@ -128,12 +126,12 @@
 {
 /*< Define the token type to be used: `std::string` is available as the
      type of the token attribute
->*/ typedef lexertl::token<
+>*/ typedef lex::lexertl::token<
         char const*, boost::mpl::vector<std::string>
> token_type;
 
 /*< Define the lexer type to use implementing the state machine
->*/ typedef lexertl::lexer<token_type> lexer_type;
+>*/ typedef lex::lexertl::lexer<token_type> lexer_type;
 
 /*< Define the iterator type exposed by the lexer type
>*/ typedef word_count_tokens<lexer_type>::iterator_type iterator_type;
@@ -152,7 +150,7 @@
      stream read from the input. The function `tokenize_and_parse()` wraps
      the passed iterator range `[first, last)` by the lexical analyzer and
      uses its exposed iterators to parse the toke stream.
->*/ bool r = tokenize_and_parse(first, last, word_count, g);
+>*/ bool r = lex::tokenize_and_parse(first, last, word_count, g);
 
     if (r) {
         std::cout << "lines: " << g.l << ", words: " << g.w

Modified: trunk/libs/spirit/example/lex/word_count_functor.cpp
==============================================================================
--- trunk/libs/spirit/example/lex/word_count_functor.cpp (original)
+++ trunk/libs/spirit/example/lex/word_count_functor.cpp 2009-09-16 13:46:46 EDT (Wed, 16 Sep 2009)
@@ -62,7 +62,7 @@
 #include "example.hpp"
 
 //[wcf_namespaces
-using namespace boost::spirit::lex;
+namespace lex = boost::spirit::lex;
 //]
 
 ///////////////////////////////////////////////////////////////////////////////
@@ -90,7 +90,7 @@
     example we use the Lexertl based lexer engine as the underlying lexer type.
 */
 template <typename Lexer>
-struct word_count_tokens : lexer<Lexer>
+struct word_count_tokens : lex::lexer<Lexer>
 {
     word_count_tokens()
     {
@@ -158,13 +158,13 @@
     std::string str (read_from_file(1 == argc ? "word_count.input" : argv[1]));
 
     // create the token definition instance needed to invoke the lexical analyzer
- word_count_tokens<lexertl::lexer<> > word_count_functor;
+ word_count_tokens<lex::lexertl::lexer<> > word_count_functor;
 
     // tokenize the given string, the bound functor gets invoked for each of
     // the matched tokens
     char const* first = str.c_str();
     char const* last = &first[str.size()];
- bool r = tokenize(first, last, word_count_functor,
+ bool r = lex::tokenize(first, last, word_count_functor,
         boost::bind(counter(), _1, boost::ref(c), boost::ref(w), boost::ref(l)));
 
     // print results

Modified: trunk/libs/spirit/example/lex/word_count_lexer.cpp
==============================================================================
--- trunk/libs/spirit/example/lex/word_count_lexer.cpp (original)
+++ trunk/libs/spirit/example/lex/word_count_lexer.cpp 2009-09-16 13:46:46 EDT (Wed, 16 Sep 2009)
@@ -47,8 +47,7 @@
 #include "example.hpp"
 
 //[wcl_namespaces
-using namespace boost::spirit;
-using namespace boost::spirit::lex;
+namespace lex = boost::spirit::lex;
 //]
 
 ///////////////////////////////////////////////////////////////////////////////
@@ -74,7 +73,7 @@
 
 //[wcl_token_definition
 template <typename Lexer>
-struct word_count_tokens : lexer<Lexer>
+struct word_count_tokens : lex::lexer<Lexer>
 {
     word_count_tokens()
       : c(0), w(0), l(0)
@@ -95,7 +94,7 @@
     }
 
     std::size_t c, w, l;
- token_def<> word, eol, any;
+ lex::token_def<> word, eol, any;
 };
 //]
 
@@ -112,10 +111,12 @@
      type and an iterator, both holding no lexer state, allowing for even more
      aggressive optimizations. As a result the token instances contain the token
      ids as the only data member.
->*/ typedef lexertl::token<char const*, lex::omit, boost::mpl::false_> token_type;
+>*/ typedef
+ lex::lexertl::token<char const*, lex::omit, boost::mpl::false_>
+ token_type;
 
 /*< This defines the lexer type to use
->*/ typedef lexertl::actor_lexer<token_type> lexer_type;
+>*/ typedef lex::lexertl::actor_lexer<token_type> lexer_type;
 
 /*< Create the lexer object instance needed to invoke the lexical analysis
>*/ word_count_tokens<lexer_type> word_count_lexer;

Modified: trunk/libs/spirit/example/qi/mini_c/mini_c.hpp
==============================================================================
--- trunk/libs/spirit/example/qi/mini_c/mini_c.hpp (original)
+++ trunk/libs/spirit/example/qi/mini_c/mini_c.hpp 2009-09-16 13:46:46 EDT (Wed, 16 Sep 2009)
@@ -26,7 +26,6 @@
 
 using namespace boost::spirit;
 using namespace boost::spirit::qi;
-using namespace boost::spirit::ascii;
 
 ///////////////////////////////////////////////////////////////////////////////
 // The Virtual Machine
@@ -249,6 +248,8 @@
 {
     white_space() : white_space::base_type(start)
     {
+ using boost::spirit::ascii::char_;
+
         start =
                 space // tab/space/cr/lf
             | "/*" >> *(char_ - "*/") >> "*/" // C-style comments

Modified: trunk/libs/spirit/test/karma/format_manip.cpp
==============================================================================
--- trunk/libs/spirit/test/karma/format_manip.cpp (original)
+++ trunk/libs/spirit/test/karma/format_manip.cpp 2009-09-16 13:46:46 EDT (Wed, 16 Sep 2009)
@@ -49,7 +49,6 @@
 {
     using namespace boost::spirit;
     using namespace boost::spirit::ascii;
- using namespace boost::spirit::karma;
 
     namespace fusion = boost::fusion;
     using namespace boost::phoenix;
@@ -62,16 +61,16 @@
             char_[_1 = val('a')]
         ));
         BOOST_TEST(test( "a",
- format(char_[_1 = val('a')])
+ karma::format(char_[_1 = val('a')])
         ));
         BOOST_TEST(test( "a ",
- format_delimited(char_[_1 = val('a')], space)
+ karma::format_delimited(char_[_1 = val('a')], space)
         ));
         BOOST_TEST(test( "a",
- format(char_, 'a')
+ karma::format(char_, 'a')
         ));
         BOOST_TEST(test( "a ",
- format_delimited(char_, space, 'a')
+ karma::format_delimited(char_, space, 'a')
         ));
     }
 
@@ -80,19 +79,19 @@
             char_[_1 = val('a')] << char_[_1 = val('b')]
         ));
         BOOST_TEST(test( "ab",
- format(char_[_1 = val('a')] << char_[_1 = val('b')])
+ karma::format(char_[_1 = val('a')] << char_[_1 = val('b')])
         ));
         BOOST_TEST(test( "a b ",
- format_delimited(char_[_1 = val('a')] << char_[_1 = val('b')], space)
+ karma::format_delimited(char_[_1 = val('a')] << char_[_1 = val('b')], space)
         ));
 
         fusion::vector<char, char> t('a', 'b');
 
         BOOST_TEST(test( "ab",
- format(char_ << char_, t)
+ karma::format(char_ << char_, t)
         ));
         BOOST_TEST(test( "a b ",
- format_delimited(char_ << char_, space, t)
+ karma::format_delimited(char_ << char_, space, t)
         ));
     }
 
@@ -101,19 +100,19 @@
             char_[_1 = 'a'] << char_[_1 = 'b'] << char_[_1 = 'c']
         ));
         BOOST_TEST(test( "abc",
- format(char_('a') << char_('b') << char_('c'))
+ karma::format(char_('a') << char_('b') << char_('c'))
         ));
         BOOST_TEST(test( "a b c ",
- format_delimited(char_('a') << char_('b') << char_('c'), space)
+ karma::format_delimited(char_('a') << char_('b') << char_('c'), space)
         ));
 
         fusion::vector<char, char, char> t('a', 'b', 'c');
 
         BOOST_TEST(test( "abc",
- format(char_ << char_ << char_, t)
+ karma::format(char_ << char_ << char_, t)
         ));
         BOOST_TEST(test( "a b c ",
- format_delimited(char_ << char_ << char_, space, t)
+ karma::format_delimited(char_ << char_ << char_, space, t)
         ));
     }
 
@@ -125,10 +124,10 @@
         fusion::vector<char, int> t('a', 2);
 
         BOOST_TEST(test( "a2",
- format(char_ << int_, t)
+ karma::format(char_ << int_, t)
         ));
         BOOST_TEST(test( "a 2 ",
- format_delimited(char_ << int_, space, t)
+ karma::format_delimited(char_ << int_, space, t)
         ));
     }
     
@@ -143,10 +142,10 @@
             (*char_)[_1 = v]
         ));
         BOOST_TEST(test( "abc",
- format(*char_, v)
+ karma::format(*char_, v)
         ));
         BOOST_TEST(test( "a b c ",
- format_delimited(*char_, space, v)
+ karma::format_delimited(*char_, space, v)
         ));
 
         // output a comma separated list of vector elements
@@ -154,16 +153,16 @@
             (char_ % lit(", "))[_0 = fusion::make_single_view(v)]
         ));
         BOOST_TEST(test( "a, b, c",
- format((char_ % lit(", "))[_0 = fusion::make_single_view(v)])
+ karma::format((char_ % lit(", "))[_0 = fusion::make_single_view(v)])
         ));
         BOOST_TEST(test( "a , b , c ",
- format_delimited((char_ % ',')[_0 = fusion::make_single_view(v)], space)
+ karma::format_delimited((char_ % ',')[_0 = fusion::make_single_view(v)], space)
         ));
         BOOST_TEST(test( "a,b,c",
- format(char_ % ',', v)
+ karma::format(char_ % ',', v)
         ));
         BOOST_TEST(test( "a , b , c ",
- format_delimited(char_ % ',', space, v)
+ karma::format_delimited(char_ % ',', space, v)
         ));
 
         // output all elements of a list
@@ -174,16 +173,16 @@
 // (*char_)[_1 = l]
 // ));
 // BOOST_TEST(test( "abc",
-// format((*char_)[_1 = l])
+// karma::format((*char_)[_1 = l])
 // ));
 // BOOST_TEST(test( "a b c ",
-// format_delimited((*char_)[_1 = l], space)
+// karma::format_delimited((*char_)[_1 = l], space)
 // ));
         BOOST_TEST(test( "abc",
- format(*char_, l)
+ karma::format(*char_, l)
         ));
         BOOST_TEST(test( "a b c ",
- format_delimited(*char_, space, l)
+ karma::format_delimited(*char_, space, l)
         ));
     }
 

Modified: trunk/libs/spirit/test/karma/grammar.cpp
==============================================================================
--- trunk/libs/spirit/test/karma/grammar.cpp (original)
+++ trunk/libs/spirit/test/karma/grammar.cpp 2009-09-16 13:46:46 EDT (Wed, 16 Sep 2009)
@@ -21,12 +21,12 @@
 #include "test.hpp"
 
 using namespace spirit_test;
-using namespace boost::spirit::karma;
+using namespace boost::spirit;
 using namespace boost::spirit::ascii;
 
 typedef spirit_test::output_iterator<char>::type outiter_type;
 
-struct num_list : grammar<outiter_type, space_type>
+struct num_list : karma::grammar<outiter_type, space_type>
 {
     num_list() : num_list::base_type(start)
     {
@@ -37,7 +37,7 @@
         start = num1 << ',' << num2 << ',' << num3;
     }
 
- rule<outiter_type, space_type> start, num1, num2, num3;
+ karma::rule<outiter_type, space_type> start, num1, num2, num3;
 };
 
 int

Modified: trunk/libs/spirit/test/karma/grammar_fail.cpp
==============================================================================
--- trunk/libs/spirit/test/karma/grammar_fail.cpp (original)
+++ trunk/libs/spirit/test/karma/grammar_fail.cpp 2009-09-16 13:46:46 EDT (Wed, 16 Sep 2009)
@@ -17,19 +17,18 @@
 #include "test.hpp"
 
 using namespace boost::spirit;
-using namespace boost::spirit::karma;
 using namespace boost::spirit::ascii;
 
 typedef spirit_test::output_iterator<char>::type outiter_type;
 
-struct num_list : grammar<outiter_type, rule<outiter_type> >
+struct num_list : karma::grammar<outiter_type, karma::rule<outiter_type> >
 {
     num_list() : num_list::base_type(start)
     {
         start = int_(1) << ',' << int_(0);
     }
 
- rule<outiter_type, rule<outiter_type> > start;
+ karma::rule<outiter_type, karma::rule<outiter_type> > start;
 };
 
 // this test must fail compiling as the rule is used with an incompatible

Modified: trunk/libs/spirit/test/karma/pattern.cpp
==============================================================================
--- trunk/libs/spirit/test/karma/pattern.cpp (original)
+++ trunk/libs/spirit/test/karma/pattern.cpp 2009-09-16 13:46:46 EDT (Wed, 16 Sep 2009)
@@ -27,7 +27,6 @@
 {
     using namespace boost;
     using namespace boost::spirit;
- using namespace boost::spirit::karma;
     using namespace boost::spirit::ascii;
 
     typedef spirit_test::output_iterator<char>::type outiter_type;
@@ -36,15 +35,15 @@
     {
         using boost::phoenix::at_c;
 
- rule<outiter_type, fusion::vector<char, int, double>()> start;
+ karma::rule<outiter_type, fusion::vector<char, int, double>()> start;
         fusion::vector<char, int, double> vec('a', 10, 12.4);
 
         start %= char_ << int_ << double_;
         BOOST_TEST(test("a1012.4", start, vec));
 
- rule<outiter_type, char()> a;
- rule<outiter_type, int()> b;
- rule<outiter_type, double()> c;
+ karma::rule<outiter_type, char()> a;
+ karma::rule<outiter_type, int()> b;
+ karma::rule<outiter_type, double()> c;
 
         a %= char_ << eps;
         b %= int_;
@@ -62,15 +61,15 @@
     {
         using boost::phoenix::at_c;
 
- rule<outiter_type, space_type, fusion::vector<char, int, double>()> start;
+ karma::rule<outiter_type, space_type, fusion::vector<char, int, double>()> start;
         fusion::vector<char, int, double> vec('a', 10, 12.4);
 
         start %= char_ << int_ << double_;
         BOOST_TEST(test_delimited("a 10 12.4 ", start, vec, space));
 
- rule<outiter_type, space_type, char()> a;
- rule<outiter_type, space_type, int()> b;
- rule<outiter_type, space_type, double()> c;
+ karma::rule<outiter_type, space_type, char()> a;
+ karma::rule<outiter_type, space_type, int()> b;
+ karma::rule<outiter_type, space_type, double()> c;
 
         a %= char_ << eps;
         b %= int_;
@@ -87,14 +86,14 @@
 
     // locals test
     {
- rule<outiter_type, locals<std::string> > start;
+ karma::rule<outiter_type, locals<std::string> > start;
 
         start = string[_1 = "abc", _a = _1] << int_[_1 = 10] << string[_1 = _a];
         BOOST_TEST(test("abc10abc", start));
     }
 
     {
- rule<outiter_type, space_type, locals<std::string> > start;
+ karma::rule<outiter_type, space_type, locals<std::string> > start;
 
         start = string[_1 = "abc", _a = _1] << int_[_1 = 10] << string[_1 = _a];
         BOOST_TEST(test_delimited("abc 10 abc ", start, space));
@@ -104,7 +103,7 @@
     {
         typedef variant<char, int, double> var_type;
 
- rule<outiter_type, var_type()> d, start;
+ karma::rule<outiter_type, var_type()> d, start;
 
         d = start.alias(); // d will always track start
 
@@ -121,7 +120,7 @@
     {
         typedef variant<char, int, double> var_type;
 
- rule<outiter_type, space_type, var_type()> d, start;
+ karma::rule<outiter_type, space_type, var_type()> d, start;
 
         d = start.alias(); // d will always track start
 
@@ -138,7 +137,7 @@
     {
         typedef variant<char, int, double> var_type;
 
- rule<outiter_type, var_type()> d, start;
+ karma::rule<outiter_type, var_type()> d, start;
 
         d = start.alias(); // d will always track start
 
@@ -155,7 +154,7 @@
     {
         typedef variant<char, int, double> var_type;
 
- rule<outiter_type, space_type, var_type()> d, start;
+ karma::rule<outiter_type, space_type, var_type()> d, start;
 
         d = start.alias(); // d will always track start
 

Modified: trunk/libs/spirit/test/karma/pattern2.cpp
==============================================================================
--- trunk/libs/spirit/test/karma/pattern2.cpp (original)
+++ trunk/libs/spirit/test/karma/pattern2.cpp 2009-09-16 13:46:46 EDT (Wed, 16 Sep 2009)
@@ -27,14 +27,13 @@
 {
     using namespace boost;
     using namespace boost::spirit;
- using namespace boost::spirit::karma;
     using namespace boost::spirit::ascii;
 
     typedef spirit_test::output_iterator<char>::type outiter_type;
 
     // basic tests
     {
- rule<outiter_type> start;
+ karma::rule<outiter_type> start;
 
         start = char_[_1 = 'a'] << int_[_1 = 10] << double_[_1 = 12.4];
         BOOST_TEST(test("a1012.4", start));
@@ -42,7 +41,7 @@
         start = (char_ << int_ << double_)[_1 = 'a', _2 = 10, _3 = 12.4];
         BOOST_TEST(test("a1012.4", start));
 
- rule<outiter_type> a, b, c;
+ karma::rule<outiter_type> a, b, c;
         a = char_[_1 = 'a'];
         b = int_[_1 = 10];
         c = double_[_1 = 12.4];
@@ -53,7 +52,7 @@
 
     // basic tests with delimiter
     {
- rule<outiter_type, space_type> start;
+ karma::rule<outiter_type, space_type> start;
 
         start = char_[_1 = 'a'] << int_[_1 = 10] << double_[_1 = 12.4];
         BOOST_TEST(test_delimited("a 10 12.4 ", start, space));
@@ -61,7 +60,7 @@
         start = (char_ << int_ << double_)[_1 = 'a', _2 = 10, _3 = 12.4];
         BOOST_TEST(test_delimited("a 10 12.4 ", start, space));
 
- rule<outiter_type, space_type> a, b, c;
+ karma::rule<outiter_type, space_type> a, b, c;
         a = char_[_1 = 'a'];
         b = int_[_1 = 10];
         c = double_[_1 = 12.4];
@@ -74,7 +73,7 @@
     {
         typedef variant<char, int, double> var_type;
 
- rule<outiter_type, var_type()> start;
+ karma::rule<outiter_type, var_type()> start;
 
         start = (char_ | int_ | double_)[_1 = _r0];
 
@@ -89,7 +88,7 @@
     {
         typedef variant<char, int, double> var_type;
 
- rule<outiter_type, space_type, var_type()> start;
+ karma::rule<outiter_type, space_type, var_type()> start;
 
         start = (char_ | int_ | double_)[_1 = _r0];
 
@@ -102,7 +101,7 @@
     }
 
     {
- rule<outiter_type, void(char, int, double)> start;
+ karma::rule<outiter_type, void(char, int, double)> start;
         fusion::vector<char, int, double> vec('a', 10, 12.4);
 
         start = char_[_1 = _r1] << int_[_1 = _r2] << double_[_1 = _r3];
@@ -111,9 +110,9 @@
         start = (char_ << int_ << double_)[_1 = _r1, _2 = _r2, _3 = _r3];
         BOOST_TEST(test("a1012.4", start('a', 10, 12.4)));
 
- rule<outiter_type, void(char)> a;
- rule<outiter_type, void(int)> b;
- rule<outiter_type, void(double)> c;
+ karma::rule<outiter_type, void(char)> a;
+ karma::rule<outiter_type, void(int)> b;
+ karma::rule<outiter_type, void(double)> c;
 
         a = char_[_1 = _r1];
         b = int_[_1 = _r1];
@@ -123,7 +122,7 @@
     }
 
     {
- rule<outiter_type, space_type, void(char, int, double)> start;
+ karma::rule<outiter_type, space_type, void(char, int, double)> start;
         fusion::vector<char, int, double> vec('a', 10, 12.4);
 
         start = char_[_1 = _r1] << int_[_1 = _r2] << double_[_1 = _r3];
@@ -132,9 +131,9 @@
         start = (char_ << int_ << double_)[_1 = _r1, _2 = _r2, _3 = _r3];
         BOOST_TEST(test_delimited("a 10 12.4 ", start('a', 10, 12.4), space));
 
- rule<outiter_type, space_type, void(char)> a;
- rule<outiter_type, space_type, void(int)> b;
- rule<outiter_type, space_type, void(double)> c;
+ karma::rule<outiter_type, space_type, void(char)> a;
+ karma::rule<outiter_type, space_type, void(int)> b;
+ karma::rule<outiter_type, space_type, void(double)> c;
 
         a = char_[_1 = _r1];
         b = int_[_1 = _r1];
@@ -147,7 +146,7 @@
     {
         typedef variant<char, int, double> var_type;
 
- rule<outiter_type> a, b, c, start;
+ karma::rule<outiter_type> a, b, c, start;
 
         a = 'a';
         b = int_(10);
@@ -165,7 +164,7 @@
     {
         typedef variant<char, int, double> var_type;
 
- rule<outiter_type, space_type> a, b, c, start;
+ karma::rule<outiter_type, space_type> a, b, c, start;
 
         a = 'a';
         b = int_(10);

Modified: trunk/libs/spirit/test/karma/rule_fail.cpp
==============================================================================
--- trunk/libs/spirit/test/karma/rule_fail.cpp (original)
+++ trunk/libs/spirit/test/karma/rule_fail.cpp 2009-09-16 13:46:46 EDT (Wed, 16 Sep 2009)
@@ -16,7 +16,6 @@
 #include "test.hpp"
 
 using namespace boost::spirit;
-using namespace boost::spirit::karma;
 using namespace boost::spirit::ascii;
 
 // this test must fail compiling as the rule is used with an incompatible
@@ -27,7 +26,7 @@
 
     std::string generated;
 
- rule<outiter_type, rule<outiter_type> > def;
+ karma::rule<outiter_type, karma::rule<outiter_type> > def;
     def = int_(1) << ',' << int_(0);
 
     std::back_insert_iterator<std::string> outit(generated);

Modified: trunk/libs/spirit/test/lex/lexertl1.cpp
==============================================================================
--- trunk/libs/spirit/test/lex/lexertl1.cpp (original)
+++ trunk/libs/spirit/test/lex/lexertl1.cpp 2009-09-16 13:46:46 EDT (Wed, 16 Sep 2009)
@@ -51,12 +51,10 @@
     }
 
     {
- using namespace boost::spirit::ascii;
-
         // initialize lexer
         lexer_def lex;
         lex.self = token_def('+') | '-' | c_comment;
- lex.self += char_('*') | '/' | cpp_comment;
+ lex.self += lex::char_('*') | '/' | cpp_comment;
 
         // test lexer for two different input strings
         BOOST_TEST(test (lex, "/* this is a comment */", CCOMMENT));

Modified: trunk/libs/spirit/test/lex/state_switcher_test.cpp
==============================================================================
--- trunk/libs/spirit/test/lex/state_switcher_test.cpp (original)
+++ trunk/libs/spirit/test/lex/state_switcher_test.cpp 2009-09-16 13:46:46 EDT (Wed, 16 Sep 2009)
@@ -37,7 +37,6 @@
 {
     using namespace boost::spirit;
     using namespace boost::spirit::qi;
- using namespace boost::spirit::lex;
     using namespace spirit_test;
 
     typedef std::string::iterator base_iterator_type;


Boost-Commit list run by bdawes at acm.org, david.abrahams at rcn.com, gregod at cs.rpi.edu, cpdaniel at pacbell.net, john at johnmaddock.co.uk