Boost logo

Boost-Commit :

From: hartmut.kaiser_at_[hidden]
Date: 2008-05-30 14:38:12


Author: hkaiser
Date: 2008-05-30 14:38:11 EDT (Fri, 30 May 2008)
New Revision: 45959
URL: http://svn.boost.org/trac/boost/changeset/45959

Log:
Spirit.Lex: Fixed lexer examples failing to compile using gcc 4.x
Text files modified:
   trunk/boost/spirit/home/lex/tokenize_and_parse.hpp | 63 ++++++++++++++++++++++++++++++++++++++++
   trunk/boost/spirit/home/qi/nonterminal/grammar.hpp | 19 ------------
   trunk/libs/spirit/doc/qi_and_karma/generating.qbk | 2 -
   trunk/libs/spirit/doc/qi_and_karma/parsing.qbk | 26 +++++++++------
   trunk/libs/spirit/example/lex/print_numbers.cpp | 11 +++---
   trunk/libs/spirit/example/lex/static_lexer/word_count_static.cpp | 7 +--
   trunk/libs/spirit/example/lex/strip_comments.cpp | 5 +-
   trunk/libs/spirit/example/lex/word_count.cpp | 4 +-
   trunk/libs/spirit/test/qi/grammar_fail.cpp | 3 +
   9 files changed, 94 insertions(+), 46 deletions(-)

Modified: trunk/boost/spirit/home/lex/tokenize_and_parse.hpp
==============================================================================
--- trunk/boost/spirit/home/lex/tokenize_and_parse.hpp (original)
+++ trunk/boost/spirit/home/lex/tokenize_and_parse.hpp 2008-05-30 14:38:11 EDT (Fri, 30 May 2008)
@@ -9,6 +9,7 @@
 
 #include <boost/spirit/home/qi/meta_grammar.hpp>
 #include <boost/spirit/home/qi/skip.hpp>
+#include <boost/spirit/home/qi/nonterminal/grammar.hpp>
 #include <boost/spirit/home/support/unused.hpp>
 #include <boost/spirit/home/lex/lexer.hpp>
 #include <boost/mpl/assert.hpp>
@@ -81,6 +82,21 @@
     }
 
     ///////////////////////////////////////////////////////////////////////////
+ template <
+ typename Iterator, typename LexerExpr,
+ template <typename, typename> class Def
+ >
+ inline bool
+ tokenize_and_parse(Iterator& first, Iterator last, LexerExpr const& lex,
+ qi::grammar_class<Def>& gc)
+ {
+ typedef typename LexerExpr::iterator_type iterator_type;
+ Def<iterator_type, unused_type> def(gc);
+ qi::grammar<Def<iterator_type, unused_type> > g(def);
+ return tokenize_and_parse(first, last, lex, g);
+ }
+
+ ///////////////////////////////////////////////////////////////////////////
     template <typename Iterator, typename LexerExpr, typename ParserExpr,
         typename Attribute>
     inline bool
@@ -107,6 +123,21 @@
     }
 
     ///////////////////////////////////////////////////////////////////////////
+ template <
+ typename Iterator, typename LexerExpr,
+ template <typename, typename> class Def, typename Attribute
+ >
+ inline bool
+ tokenize_and_parse(Iterator& first, Iterator last, LexerExpr const& lex,
+ qi::grammar_class<Def>& gc, Attribute& attr)
+ {
+ typedef typename LexerExpr::iterator_type iterator_type;
+ Def<iterator_type, unused_type> def(gc);
+ qi::grammar<Def<iterator_type, unused_type> > g(def);
+ return tokenize_and_parse(first, last, lex, g, attr);
+ }
+
+ ///////////////////////////////////////////////////////////////////////////
     //
     // The tokenize_and_phrase_parse() function is one of the main Spirit API
     // functions. It simplifies using a lexer as the underlying token source
@@ -198,6 +229,21 @@
 
     ///////////////////////////////////////////////////////////////////////////
     template <
+ typename Iterator, typename LexerExpr,
+ template <typename, typename> class Def, typename Skipper
+ >
+ inline bool
+ tokenize_and_phrase_parse(Iterator& first, Iterator last,
+ LexerExpr const& lex, qi::grammar_class<Def>& gc, Skipper const& skipper)
+ {
+ typedef typename LexerExpr::iterator_type iterator_type;
+ Def<iterator_type, unused_type> def(gc);
+ qi::grammar<Def<iterator_type, unused_type> > g(def);
+ return tokenize_and_phrase_parse(first, last, lex, g, skipper);
+ }
+
+ ///////////////////////////////////////////////////////////////////////////
+ template <
         typename Iterator, typename LexerExpr, typename ParserExpr,
         typename Attribute, typename Skipper
>
@@ -242,6 +288,23 @@
     }
 
     ///////////////////////////////////////////////////////////////////////////
+ template <
+ typename Iterator, typename LexerExpr,
+ template <typename, typename> class Def,
+ typename Skipper, typename Attribute
+ >
+ inline bool
+ tokenize_and_phrase_parse(Iterator& first, Iterator last,
+ LexerExpr const& lex, qi::grammar_class<Def>& gc, Skipper const& skipper,
+ Attribute& attr)
+ {
+ typedef typename LexerExpr::iterator_type iterator_type;
+ Def<iterator_type, unused_type> def(gc);
+ qi::grammar<Def<iterator_type, unused_type> > g(def);
+ return tokenize_and_phrase_parse(first, last, lex, g, skipper, attr);
+ }
+
+ ///////////////////////////////////////////////////////////////////////////
     //
     // The tokenize() function is one of the main Spirit API functions. It
     // simplifies using a lexer to tokenize a given input sequence. It's main

Modified: trunk/boost/spirit/home/qi/nonterminal/grammar.hpp
==============================================================================
--- trunk/boost/spirit/home/qi/nonterminal/grammar.hpp (original)
+++ trunk/boost/spirit/home/qi/nonterminal/grammar.hpp 2008-05-30 14:38:11 EDT (Fri, 30 May 2008)
@@ -164,25 +164,6 @@
         friend struct nonterminal_director;
     };
 
-
- ///////////////////////////////////////////////////////////////////////////
- // Generator functions helping to construct a proper grammar object
- // instance
- ///////////////////////////////////////////////////////////////////////////
- template <typename Definition>
- inline grammar<Definition>
- make_parser(Definition const& def)
- {
- return grammar<Definition>(def);
- }
-
- template <typename Definition, typename Start>
- inline grammar<Definition>
- make_parser(Definition const& def, Start const& start)
- {
- return grammar<Definition>(def, start);
- }
-
     ///////////////////////////////////////////////////////////////////////////
     // The grammar_class template
     ///////////////////////////////////////////////////////////////////////////

Modified: trunk/libs/spirit/doc/qi_and_karma/generating.qbk
==============================================================================
--- trunk/libs/spirit/doc/qi_and_karma/generating.qbk (original)
+++ trunk/libs/spirit/doc/qi_and_karma/generating.qbk 2008-05-30 14:38:11 EDT (Fri, 30 May 2008)
@@ -18,7 +18,5 @@
 
 [heading The tokenize_and_phrase_parse() function]
 
-[heading The make_parser() function]
-
 [endsect]
 

Modified: trunk/libs/spirit/doc/qi_and_karma/parsing.qbk
==============================================================================
--- trunk/libs/spirit/doc/qi_and_karma/parsing.qbk (original)
+++ trunk/libs/spirit/doc/qi_and_karma/parsing.qbk 2008-05-30 14:38:11 EDT (Fri, 30 May 2008)
@@ -18,15 +18,21 @@
 information depending on the data passed by the parser and the
 hierarchical context of the parser it is attached to.
 
-Parsers come in different flavors. The Spirit library comes bundled with an extensive set of pre-defined parsers that perform various parsing tasks from the trivial to the complex. The parser, as a concept, has a public conceptual interface contract. Following the contract, anyone can write a conforming parser that will play along well with the library's predefined components. We shall provide a blueprint detailing the conceptual interface of the parser later.
-
-Clients of the library generally do not need to write their own hand-coded parsers at all. Spirit has an immense repertoire of pre-defined parsers covering all aspects of syntax and semantic analysis. We shall examine this repertoire of parsers in the following sections. In the rare case where a specific functionality is not available, it is extremely easy to write a user-defined parser. The ease in writing a parser entity is the main reason for Spirit's extensibility.
-
-
-
-
-
-
+Parsers come in different flavors. The Spirit library comes bundled with an
+extensive set of pre-defined parsers that perform various parsing tasks from
+the trivial to the complex. The parser, as a concept, has a public conceptual
+interface contract. Following the contract, anyone can write a conforming
+parser that will play along well with the library's predefined components. We
+shall provide a blueprint detailing the conceptual interface of the parser
+later.
+
+Clients of the library generally do not need to write their own hand-coded
+parsers at all. Spirit has an immense repertoire of pre-defined parsers
+covering all aspects of syntax and semantic analysis. We shall examine this
+repertoire of parsers in the following sections. In the rare case where a
+specific functionality is not available, it is extremely easy to write a
+user-defined parser. The ease in writing a parser entity is the main reason
+for Spirit's extensibility.
 
 [heading The API functions exposed by __qi__ ]
 
@@ -38,7 +44,5 @@
 
 [heading The tokenize_and_phrase_parse() function]
 
-[heading The make_parser() function]
-
 [endsect]
 

Modified: trunk/libs/spirit/example/lex/print_numbers.cpp
==============================================================================
--- trunk/libs/spirit/example/lex/print_numbers.cpp (original)
+++ trunk/libs/spirit/example/lex/print_numbers.cpp 2008-05-30 14:38:11 EDT (Fri, 30 May 2008)
@@ -52,10 +52,11 @@
 ///////////////////////////////////////////////////////////////////////////////
 // Grammar definition
 ///////////////////////////////////////////////////////////////////////////////
-template <typename Iterator>
-struct print_numbers_grammar : grammar_def<Iterator>
+template <typename Iterator, typename Skipper>
+struct print_numbers_grammar : grammar_def<Iterator, Skipper>
 {
- print_numbers_grammar()
+ template <typename Class>
+ print_numbers_grammar(Class& self)
     {
         start = *( token(lex::min_token_id) [ std::cout << _1 << "\n" ]
                   | token(lex::min_token_id+1)
@@ -89,14 +90,14 @@
     // now we use the types defined above to create the lexer and grammar
     // object instances needed to invoke the parsing process
     print_numbers_tokens<lexer_type> print_tokens; // Our token definition
- print_numbers_grammar<iterator_type> def; // Our grammar definition
+ grammar_class<print_numbers_grammar> print; // Our grammar definition
 
     // Parsing is done based on the the token stream, not the character
     // stream read from the input.
     std::string str (read_from_file(1 == argc ? "print_numbers.input" : argv[1]));
     base_iterator_type first = str.begin();
     bool r = tokenize_and_parse(first, str.end(), make_lexer(print_tokens),
- make_parser(def));
+ print);
 
     if (r) {
         std::cout << "-------------------------\n";

Modified: trunk/libs/spirit/example/lex/static_lexer/word_count_static.cpp
==============================================================================
--- trunk/libs/spirit/example/lex/static_lexer/word_count_static.cpp (original)
+++ trunk/libs/spirit/example/lex/static_lexer/word_count_static.cpp 2008-05-30 14:38:11 EDT (Fri, 30 May 2008)
@@ -100,10 +100,9 @@
     char const* first = str.c_str();
     char const* last = &first[str.size()];
     
- // Parsing is done based on the the token stream, not the character
- // stream read from the input.
- bool r = tokenize_and_parse(first, last, make_lexer(word_count),
- make_parser(def));
+ // Parsing is done based on the the token stream, not the character stream.
+ qi::grammar<word_count_grammar<iterator_type> > g(def);
+ bool r = tokenize_and_parse(first, last, make_lexer(word_count), g);
 
     if (r) { // success
         std::cout << "lines: " << def.l << ", words: " << def.w

Modified: trunk/libs/spirit/example/lex/strip_comments.cpp
==============================================================================
--- trunk/libs/spirit/example/lex/strip_comments.cpp (original)
+++ trunk/libs/spirit/example/lex/strip_comments.cpp 2008-05-30 14:38:11 EDT (Fri, 30 May 2008)
@@ -141,8 +141,9 @@
     // stream read from the input.
     std::string str (read_from_file(1 == argc ? "strip_comments.input" : argv[1]));
     base_iterator_type first = str.begin();
- bool r = tokenize_and_parse(first, str.end(), make_lexer(strip_comments),
- make_parser(def));
+
+ qi::grammar<strip_comments_grammar<iterator_type> > g(def);
+ bool r = tokenize_and_parse(first, str.end(), make_lexer(strip_comments), g);
 
     if (r) {
         std::cout << "-------------------------\n";

Modified: trunk/libs/spirit/example/lex/word_count.cpp
==============================================================================
--- trunk/libs/spirit/example/lex/word_count.cpp (original)
+++ trunk/libs/spirit/example/lex/word_count.cpp 2008-05-30 14:38:11 EDT (Fri, 30 May 2008)
@@ -156,8 +156,8 @@
     // stream read from the input. The function `tokenize_and_parse()` wraps
     // the passed iterator range `[first, last)` by the lexical analyzer and
     // uses its exposed iterators to parse the toke stream.
- bool r = tokenize_and_parse(first, last, make_lexer(word_count),
- make_parser(def));
+ qi::grammar<word_count_grammar<iterator_type> > g(def);
+ bool r = tokenize_and_parse(first, last, make_lexer(word_count), g);
 
     if (r) {
         std::cout << "lines: " << def.l << ", words: " << def.w

Modified: trunk/libs/spirit/test/qi/grammar_fail.cpp
==============================================================================
--- trunk/libs/spirit/test/qi/grammar_fail.cpp (original)
+++ trunk/libs/spirit/test/qi/grammar_fail.cpp 2008-05-30 14:38:11 EDT (Fri, 30 May 2008)
@@ -35,7 +35,8 @@
     char const* end = &input[strlen(input)+1];
 
     num_list def;
- bool r = phrase_parse(input, end, make_parser(def),
+ grammar<num_list> g(def);
+ bool r = phrase_parse(input, end, g,
         space | ('%' >> *~char_('\n') >> '\n'));
 
     return 0;


Boost-Commit list run by bdawes at acm.org, david.abrahams at rcn.com, gregod at cs.rpi.edu, cpdaniel at pacbell.net, john at johnmaddock.co.uk