Boost logo

Boost-Commit :

From: hartmut.kaiser_at_[hidden]
Date: 2008-07-12 22:32:30


Author: hkaiser
Date: 2008-07-12 22:32:29 EDT (Sat, 12 Jul 2008)
New Revision: 47373
URL: http://svn.boost.org/trac/boost/changeset/47373

Log:
Spirit.Lex: Fixed examples to conform to new qi::grammar class
Text files modified:
   trunk/boost/spirit/home/lex/tokenize_and_parse.hpp | 62 ----------------------------------------
   trunk/libs/spirit/example/lex/example1.cpp | 6 +-
   trunk/libs/spirit/example/lex/example2.cpp | 14 ++++----
   trunk/libs/spirit/example/lex/example3.cpp | 16 +++++-----
   trunk/libs/spirit/example/lex/example4.cpp | 6 +-
   trunk/libs/spirit/example/lex/example5.cpp | 8 ++--
   trunk/libs/spirit/example/lex/example6.cpp | 10 ++++-
   trunk/libs/spirit/example/lex/print_numbers.cpp | 10 +++---
   trunk/libs/spirit/example/lex/static_lexer/word_count_static.cpp | 13 +++----
   trunk/libs/spirit/example/lex/strip_comments.cpp | 8 ++--
   trunk/libs/spirit/example/lex/word_count.cpp | 13 +++----
   trunk/libs/spirit/example/qi/calc3_lexer.cpp | 5 +-
   12 files changed, 55 insertions(+), 116 deletions(-)

Modified: trunk/boost/spirit/home/lex/tokenize_and_parse.hpp
==============================================================================
--- trunk/boost/spirit/home/lex/tokenize_and_parse.hpp (original)
+++ trunk/boost/spirit/home/lex/tokenize_and_parse.hpp 2008-07-12 22:32:29 EDT (Sat, 12 Jul 2008)
@@ -82,21 +82,6 @@
     }
 
     ///////////////////////////////////////////////////////////////////////////
- template <
- typename Iterator, typename LexerExpr,
- template <typename, typename> class Def
- >
- inline bool
- tokenize_and_parse(Iterator& first, Iterator last, LexerExpr const& lex,
- qi::grammar_class<Def>& gc)
- {
- typedef typename LexerExpr::iterator_type iterator_type;
- Def<iterator_type, unused_type> def(gc);
- qi::grammar<Def<iterator_type, unused_type> > g(def);
- return tokenize_and_parse(first, last, lex, g);
- }
-
- ///////////////////////////////////////////////////////////////////////////
     template <typename Iterator, typename LexerExpr, typename ParserExpr,
         typename Attribute>
     inline bool
@@ -123,21 +108,6 @@
     }
 
     ///////////////////////////////////////////////////////////////////////////
- template <
- typename Iterator, typename LexerExpr,
- template <typename, typename> class Def, typename Attribute
- >
- inline bool
- tokenize_and_parse(Iterator& first, Iterator last, LexerExpr const& lex,
- qi::grammar_class<Def>& gc, Attribute& attr)
- {
- typedef typename LexerExpr::iterator_type iterator_type;
- Def<iterator_type, unused_type> def(gc);
- qi::grammar<Def<iterator_type, unused_type> > g(def);
- return tokenize_and_parse(first, last, lex, g, attr);
- }
-
- ///////////////////////////////////////////////////////////////////////////
     //
     // The tokenize_and_phrase_parse() function is one of the main Spirit API
     // functions. It simplifies using a lexer as the underlying token source
@@ -229,21 +199,6 @@
 
     ///////////////////////////////////////////////////////////////////////////
     template <
- typename Iterator, typename LexerExpr,
- template <typename, typename> class Def, typename Skipper
- >
- inline bool
- tokenize_and_phrase_parse(Iterator& first, Iterator last,
- LexerExpr const& lex, qi::grammar_class<Def>& gc, Skipper const& skipper)
- {
- typedef typename LexerExpr::iterator_type iterator_type;
- Def<iterator_type, unused_type> def(gc);
- qi::grammar<Def<iterator_type, unused_type> > g(def);
- return tokenize_and_phrase_parse(first, last, lex, g, skipper);
- }
-
- ///////////////////////////////////////////////////////////////////////////
- template <
         typename Iterator, typename LexerExpr, typename ParserExpr,
         typename Attribute, typename Skipper
>
@@ -288,23 +243,6 @@
     }
 
     ///////////////////////////////////////////////////////////////////////////
- template <
- typename Iterator, typename LexerExpr,
- template <typename, typename> class Def,
- typename Skipper, typename Attribute
- >
- inline bool
- tokenize_and_phrase_parse(Iterator& first, Iterator last,
- LexerExpr const& lex, qi::grammar_class<Def>& gc, Skipper const& skipper,
- Attribute& attr)
- {
- typedef typename LexerExpr::iterator_type iterator_type;
- Def<iterator_type, unused_type> def(gc);
- qi::grammar<Def<iterator_type, unused_type> > g(def);
- return tokenize_and_phrase_parse(first, last, lex, g, skipper, attr);
- }
-
- ///////////////////////////////////////////////////////////////////////////
     //
     // The tokenize() function is one of the main Spirit API functions. It
     // simplifies using a lexer to tokenize a given input sequence. It's main

Modified: trunk/libs/spirit/example/lex/example1.cpp
==============================================================================
--- trunk/libs/spirit/example/lex/example1.cpp (original)
+++ trunk/libs/spirit/example/lex/example1.cpp 2008-07-12 22:32:29 EDT (Sat, 12 Jul 2008)
@@ -61,10 +61,11 @@
 ///////////////////////////////////////////////////////////////////////////////
 template <typename Iterator>
 struct example1_grammar
- : grammar_def<Iterator, in_state_skipper<token_def<> > >
+ : grammar<Iterator, in_state_skipper<token_def<> > >
 {
     template <typename TokenDef>
     example1_grammar(TokenDef const& tok)
+ : example1_grammar::base_type(start)
     {
         start = '{' >> *(tok.identifier >> -char_(',')) >> '}';
     }
@@ -97,10 +98,9 @@
     // now we use the types defined above to create the lexer and grammar
     // object instances needed to invoke the parsing process
     example1_tokens tokens; // Our token definition
- example1_grammar def (tokens); // Our grammar definition
+ example1_grammar calc(tokens); // Our grammar definition
 
     lexer<example1_tokens> lex(tokens); // Our lexer
- grammar<example1_grammar> calc(def); // Our parser
 
     std::string str (read_from_file("example1.input"));
 

Modified: trunk/libs/spirit/example/lex/example2.cpp
==============================================================================
--- trunk/libs/spirit/example/lex/example2.cpp (original)
+++ trunk/libs/spirit/example/lex/example2.cpp 2008-07-12 22:32:29 EDT (Sat, 12 Jul 2008)
@@ -63,11 +63,12 @@
 // Grammar definition
 ///////////////////////////////////////////////////////////////////////////////
 template <typename Iterator>
-struct example2_grammar : grammar_def<Iterator>
+struct example2_grammar : grammar<Iterator>
 {
     template <typename TokenDef>
     example2_grammar(TokenDef const& tok)
- : paragraphs(0), commands(0), questions(0), statements(0)
+ : example2_grammar::base_type(story),
+ paragraphs(0), commands(0), questions(0), statements(0)
     {
         story
             = +paragraph
@@ -131,10 +132,9 @@
     // now we use the types defined above to create the lexer and grammar
     // object instances needed to invoke the parsing process
     example2_tokens tokens; // Our token definition
- example2_grammar def (tokens); // Our grammar definition
+ example2_grammar calc(tokens); // Our grammar definition
 
     lexer<example2_tokens> lex(tokens); // Our lexer
- grammar<example2_grammar> calc(def, def.story); // Our grammar
 
     std::string str (read_from_file("example2.input"));
 
@@ -153,9 +153,9 @@
         std::cout << "-------------------------\n";
         std::cout << "Parsing succeeded\n";
         std::cout << "There were "
- << def.commands << " commands, "
- << def.questions << " questions, and "
- << def.statements << " statements.\n";
+ << calc.commands << " commands, "
+ << calc.questions << " questions, and "
+ << calc.statements << " statements.\n";
         std::cout << "-------------------------\n";
     }
     else

Modified: trunk/libs/spirit/example/lex/example3.cpp
==============================================================================
--- trunk/libs/spirit/example/lex/example3.cpp (original)
+++ trunk/libs/spirit/example/lex/example3.cpp 2008-07-12 22:32:29 EDT (Sat, 12 Jul 2008)
@@ -73,10 +73,11 @@
 ///////////////////////////////////////////////////////////////////////////////
 template <typename Iterator, typename Lexer>
 struct example3_grammar
- : grammar_def<Iterator, in_state_skipper<typename Lexer::token_set> >
+ : grammar<Iterator, in_state_skipper<typename Lexer::token_set> >
 {
     template <typename TokenDef>
     example3_grammar(TokenDef const& tok)
+ : example3_grammar::base_type(start)
     {
         start
             = +(couplet | tok.ellipses)
@@ -105,17 +106,17 @@
 {
     // iterator type used to expose the underlying input stream
     typedef std::string::iterator base_iterator_type;
-
+
     // This is the token type to return from the lexer iterator
     typedef lexertl_token<base_iterator_type> token_type;
-
+
     // This is the lexer type to use to tokenize the input.
     // Here we use the lexertl based lexer engine.
     typedef lexertl_lexer<token_type> lexer_type;
-
+
     // This is the token definition type (derived from the given lexer type).
     typedef example3_tokens<lexer_type> example3_tokens;
-
+
     // this is the iterator type exposed by the lexer
     typedef lexer<example3_tokens>::iterator_type iterator_type;
 
@@ -125,10 +126,9 @@
     // now we use the types defined above to create the lexer and grammar
     // object instances needed to invoke the parsing process
     example3_tokens tokens; // Our token definition
- example3_grammar def (tokens); // Our grammar definition
+ example3_grammar calc(tokens); // Our grammar definition
 
     lexer<example3_tokens> lex(tokens); // Our lexer
- grammar<example3_grammar> calc(def); // Our grammar
 
     std::string str (read_from_file("example3.input"));
 
@@ -137,7 +137,7 @@
     std::string::iterator it = str.begin();
     iterator_type iter = lex.begin(it, str.end());
     iterator_type end = lex.end();
-
+
     // Parsing is done based on the the token stream, not the character
     // stream read from the input.
     // Note, how we use the token_set defined above as the skip parser.

Modified: trunk/libs/spirit/example/lex/example4.cpp
==============================================================================
--- trunk/libs/spirit/example/lex/example4.cpp (original)
+++ trunk/libs/spirit/example/lex/example4.cpp 2008-07-12 22:32:29 EDT (Sat, 12 Jul 2008)
@@ -101,10 +101,11 @@
 ///////////////////////////////////////////////////////////////////////////////
 template <typename Iterator, typename Lexer>
 struct example4_grammar
- : grammar_def<Iterator, in_state_skipper<typename Lexer::token_set> >
+ : grammar<Iterator, in_state_skipper<typename Lexer::token_set> >
 {
     template <typename TokenDef>
     example4_grammar(TokenDef const& tok)
+ : example4_grammar::base_type(program)
     {
         program
             = +block
@@ -202,10 +203,9 @@
     // now we use the types defined above to create the lexer and grammar
     // object instances needed to invoke the parsing process
     example4_tokens tokens; // Our token definition
- example4_grammar def (tokens); // Our grammar definition
+ example4_grammar calc(tokens); // Our grammar definition
 
     lexer<example4_tokens> lex(tokens); // Our lexer
- grammar<example4_grammar> calc(def, def.program); // Our grammar
 
     std::string str (read_from_file("example4.input"));
 

Modified: trunk/libs/spirit/example/lex/example5.cpp
==============================================================================
--- trunk/libs/spirit/example/lex/example5.cpp (original)
+++ trunk/libs/spirit/example/lex/example5.cpp 2008-07-12 22:32:29 EDT (Sat, 12 Jul 2008)
@@ -100,10 +100,11 @@
 ///////////////////////////////////////////////////////////////////////////////
 template <typename Iterator, typename Lexer>
 struct example5_base_grammar
- : grammar_def<Iterator, in_state_skipper<typename Lexer::token_set> >
+ : grammar<Iterator, in_state_skipper<typename Lexer::token_set> >
 {
     template <typename TokenDef>
     example5_base_grammar(TokenDef const& tok)
+ : example5_base_grammar::base_type(program)
     {
         program
             = +block
@@ -150,7 +151,7 @@
     }
 
     typedef
- grammar_def<Iterator, in_state_skipper<typename Lexer::token_set> >
+ grammar<Iterator, in_state_skipper<typename Lexer::token_set> >
     base_type;
     typedef typename base_type::skipper_type skipper_type;
     
@@ -245,10 +246,9 @@
     // now we use the types defined above to create the lexer and grammar
     // object instances needed to invoke the parsing process
     example5_tokens tokens; // Our token definition
- example5_grammar def (tokens); // Our grammar definition
+ example5_grammar calc(tokens); // Our grammar definition
 
     lexer<example5_tokens> lex(tokens); // Our lexer
- grammar<example5_grammar> calc(def, def.program); // Our grammar
 
     std::string str (read_from_file("example5.input"));
 

Modified: trunk/libs/spirit/example/lex/example6.cpp
==============================================================================
--- trunk/libs/spirit/example/lex/example6.cpp (original)
+++ trunk/libs/spirit/example/lex/example6.cpp 2008-07-12 22:32:29 EDT (Sat, 12 Jul 2008)
@@ -122,10 +122,15 @@
 ///////////////////////////////////////////////////////////////////////////////
 template <typename Iterator, typename Lexer>
 struct example6_grammar
- : grammar_def<Iterator, in_state_skipper<typename Lexer::token_set> >
+ : grammar<Iterator, in_state_skipper<typename Lexer::token_set> >
 {
+ typedef
+ grammar<Iterator, in_state_skipper<typename Lexer::token_set> >
+ base_type;
+
     template <typename TokenDef>
     example6_grammar(TokenDef const& tok)
+ : base_type(program)
     {
         program
             = +block
@@ -225,10 +230,9 @@
     // now we use the types defined above to create the lexer and grammar
     // object instances needed to invoke the parsing process
     example6_tokens tokens; // Our token definition
- example6_grammar def (tokens); // Our grammar definition
+ example6_grammar calc(tokens); // Our grammar definition
 
     lexer<example6_tokens> lex(tokens); // Our lexer
- grammar<example6_grammar> calc(def, def.program); // Our grammar
 
     std::string str (read_from_file("example6.input"));
 

Modified: trunk/libs/spirit/example/lex/print_numbers.cpp
==============================================================================
--- trunk/libs/spirit/example/lex/print_numbers.cpp (original)
+++ trunk/libs/spirit/example/lex/print_numbers.cpp 2008-07-12 22:32:29 EDT (Sat, 12 Jul 2008)
@@ -52,11 +52,11 @@
 ///////////////////////////////////////////////////////////////////////////////
 // Grammar definition
 ///////////////////////////////////////////////////////////////////////////////
-template <typename Iterator, typename Skipper>
-struct print_numbers_grammar : grammar_def<Iterator, Skipper>
+template <typename Iterator>
+struct print_numbers_grammar : grammar<Iterator>
 {
- template <typename Class>
- print_numbers_grammar(Class& self)
+ print_numbers_grammar()
+ : grammar<Iterator>(start)
     {
         start = *( token(lex::min_token_id) [ std::cout << _1 << "\n" ]
                   | token(lex::min_token_id+1)
@@ -90,7 +90,7 @@
     // now we use the types defined above to create the lexer and grammar
     // object instances needed to invoke the parsing process
     print_numbers_tokens<lexer_type> print_tokens; // Our token definition
- grammar_class<print_numbers_grammar> print; // Our grammar definition
+ print_numbers_grammar<iterator_type> print; // Our grammar definition
 
     // Parsing is done based on the the token stream, not the character
     // stream read from the input.

Modified: trunk/libs/spirit/example/lex/static_lexer/word_count_static.cpp
==============================================================================
--- trunk/libs/spirit/example/lex/static_lexer/word_count_static.cpp (original)
+++ trunk/libs/spirit/example/lex/static_lexer/word_count_static.cpp 2008-07-12 22:32:29 EDT (Sat, 12 Jul 2008)
@@ -44,11 +44,11 @@
 // definition class instance passed to the constructor to allow accessing the
 // embedded token_def<> instances.
 template <typename Iterator>
-struct word_count_grammar : grammar_def<Iterator>
+struct word_count_grammar : grammar<Iterator>
 {
     template <typename TokenDef>
     word_count_grammar(TokenDef const& tok)
- : c(0), w(0), l(0)
+ : grammar<Iterator>(start), c(0), w(0), l(0)
     {
         using boost::spirit::arg_names::_1;
         using boost::phoenix::ref;
@@ -92,8 +92,8 @@
 
     // Now we use the types defined above to create the lexer and grammar
     // object instances needed to invoke the parsing process.
- word_count_tokens<lexer_type> word_count; // Our token definition
- word_count_grammar<iterator_type> def (word_count); // Our grammar definition
+ word_count_tokens<lexer_type> word_count; // Our token definition
+ word_count_grammar<iterator_type> g (word_count); // Our grammar definition
 
     // Read in the file into memory.
     std::string str (read_from_file(1 == argc ? "word_count.input" : argv[1]));
@@ -101,12 +101,11 @@
     char const* last = &first[str.size()];
     
     // Parsing is done based on the the token stream, not the character stream.
- qi::grammar<word_count_grammar<iterator_type> > g(def);
     bool r = tokenize_and_parse(first, last, make_lexer(word_count), g);
 
     if (r) { // success
- std::cout << "lines: " << def.l << ", words: " << def.w
- << ", characters: " << def.c << "\n";
+ std::cout << "lines: " << g.l << ", words: " << g.w
+ << ", characters: " << g.c << "\n";
     }
     else {
         std::string rest(first, last);

Modified: trunk/libs/spirit/example/lex/strip_comments.cpp
==============================================================================
--- trunk/libs/spirit/example/lex/strip_comments.cpp (original)
+++ trunk/libs/spirit/example/lex/strip_comments.cpp 2008-07-12 22:32:29 EDT (Sat, 12 Jul 2008)
@@ -95,10 +95,11 @@
 // Grammar definition
 ///////////////////////////////////////////////////////////////////////////////
 template <typename Iterator>
-struct strip_comments_grammar : grammar_def<Iterator>
+struct strip_comments_grammar : grammar<Iterator>
 {
     template <typename TokenDef>
     strip_comments_grammar(TokenDef const& tok)
+ : grammar<Iterator>(start)
     {
         // The in_state("COMMENT")[...] parser component switches the lexer
         // state to be 'COMMENT' during the matching of the embedded parser.
@@ -134,15 +135,14 @@
 
     // now we use the types defined above to create the lexer and grammar
     // object instances needed to invoke the parsing process
- strip_comments_tokens<lexer_type> strip_comments; // Our token definition
- strip_comments_grammar<iterator_type> def (strip_comments); // Our grammar definition
+ strip_comments_tokens<lexer_type> strip_comments; // Our token definition
+ strip_comments_grammar<iterator_type> g (strip_comments); // Our grammar definition
 
     // Parsing is done based on the the token stream, not the character
     // stream read from the input.
     std::string str (read_from_file(1 == argc ? "strip_comments.input" : argv[1]));
     base_iterator_type first = str.begin();
 
- qi::grammar<strip_comments_grammar<iterator_type> > g(def);
     bool r = tokenize_and_parse(first, str.end(), make_lexer(strip_comments), g);
 
     if (r) {

Modified: trunk/libs/spirit/example/lex/word_count.cpp
==============================================================================
--- trunk/libs/spirit/example/lex/word_count.cpp (original)
+++ trunk/libs/spirit/example/lex/word_count.cpp 2008-07-12 22:32:29 EDT (Sat, 12 Jul 2008)
@@ -99,11 +99,11 @@
 ///////////////////////////////////////////////////////////////////////////////
 //[wcp_grammar_definition
 template <typename Iterator>
-struct word_count_grammar : grammar_def<Iterator>
+struct word_count_grammar : grammar<Iterator>
 {
     template <typename TokenDef>
     word_count_grammar(TokenDef const& tok)
- : c(0), w(0), l(0)
+ : grammar<Iterator>(start), c(0), w(0), l(0)
     {
         using boost::phoenix::ref;
         using boost::phoenix::size;
@@ -144,8 +144,8 @@
 
     // now we use the types defined above to create the lexer and grammar
     // object instances needed to invoke the parsing process
- word_count_tokens<lexer_type> word_count; // Our token definition
- word_count_grammar<iterator_type> def (word_count); // Our grammar definition
+ word_count_tokens<lexer_type> word_count; // Our token definition
+ word_count_grammar<iterator_type> g (word_count); // Our grammar definition
 
     // read in the file int memory
     std::string str (read_from_file(1 == argc ? "word_count.input" : argv[1]));
@@ -156,12 +156,11 @@
     // stream read from the input. The function `tokenize_and_parse()` wraps
     // the passed iterator range `[first, last)` by the lexical analyzer and
     // uses its exposed iterators to parse the toke stream.
- qi::grammar<word_count_grammar<iterator_type> > g(def);
     bool r = tokenize_and_parse(first, last, make_lexer(word_count), g);
 
     if (r) {
- std::cout << "lines: " << def.l << ", words: " << def.w
- << ", characters: " << def.c << "\n";
+ std::cout << "lines: " << g.l << ", words: " << g.w
+ << ", characters: " << g.c << "\n";
     }
     else {
         std::string rest(first, last);

Modified: trunk/libs/spirit/example/qi/calc3_lexer.cpp
==============================================================================
--- trunk/libs/spirit/example/qi/calc3_lexer.cpp (original)
+++ trunk/libs/spirit/example/qi/calc3_lexer.cpp 2008-07-12 22:32:29 EDT (Sat, 12 Jul 2008)
@@ -144,7 +144,7 @@
     
     // This is the lexer type to use to tokenize the input.
     // Here we use the lexertl based lexer engine.
- typedef lexertl_lexer<base_iterator_type, token_type> lexer_type;
+ typedef lexertl_lexer<token_type> lexer_type;
     
     // This is the token definition type (derived from the given lexer type).
     typedef calculator_tokens<lexer_type> calculator_tokens;
@@ -158,10 +158,9 @@
     // now we use the types defined above to create the lexer and grammar
     // object instances needed to invoke the parsing process
     calculator_tokens tokens; // Our token definition
- calculator def (tokens); // Our grammar definition
+ calculator calc(tokens); // Our grammar definition
 
     lexer<calculator_tokens> lex(tokens); // Our lexer
- calculator calc; // Our grammar
 
     // get input line by line and feed the parser to evaluate the expressions
     // read in from the input


Boost-Commit list run by bdawes at acm.org, david.abrahams at rcn.com, gregod at cs.rpi.edu, cpdaniel at pacbell.net, john at johnmaddock.co.uk