|
Boost-Commit : |
Subject: [Boost-commit] svn:boost r72114 - in trunk: boost/spirit/home/lex/lexer boost/spirit/home/lex/lexer/lexertl boost/spirit/home/qi/detail boost/spirit/include libs/spirit/doc libs/spirit/test libs/spirit/test/lex
From: hartmut.kaiser_at_[hidden]
Date: 2011-05-22 21:58:22
Author: hkaiser
Date: 2011-05-22 21:58:19 EDT (Sun, 22 May 2011)
New Revision: 72114
URL: http://svn.boost.org/trac/boost/changeset/72114
Log:
Spirit: added position_token type
Added:
trunk/boost/spirit/home/lex/lexer/lexertl/position_token.hpp (contents, props changed)
trunk/boost/spirit/include/lex_lexertl_position_token.hpp (contents, props changed)
trunk/boost/spirit/include/lex_lexertl_token.hpp (contents, props changed)
trunk/libs/spirit/test/lex/token_iterpair.cpp (contents, props changed)
trunk/libs/spirit/test/lex/token_moretypes.cpp (contents, props changed)
trunk/libs/spirit/test/lex/token_omit.cpp (contents, props changed)
trunk/libs/spirit/test/lex/token_onetype.cpp (contents, props changed)
Text files modified:
trunk/boost/spirit/home/lex/lexer/lexer.hpp | 7 +
trunk/boost/spirit/home/lex/lexer/lexertl/functor_data.hpp | 142 ++++++++++++++++++++++++++++++++++++++++
trunk/boost/spirit/home/lex/lexer/lexertl/lexer.hpp | 8 +
trunk/boost/spirit/home/lex/lexer/lexertl/token.hpp | 48 +++++++------
trunk/boost/spirit/home/qi/detail/assign_to.hpp | 4
trunk/libs/spirit/doc/what_s_new.qbk | 9 ++
trunk/libs/spirit/test/Jamfile | 14 ++-
trunk/libs/spirit/test/lex/lexertl1.cpp | 44 ++++++++++++
8 files changed, 246 insertions(+), 30 deletions(-)
Modified: trunk/boost/spirit/home/lex/lexer/lexer.hpp
==============================================================================
--- trunk/boost/spirit/home/lex/lexer/lexer.hpp (original)
+++ trunk/boost/spirit/home/lex/lexer/lexer.hpp 2011-05-22 21:58:19 EDT (Sun, 22 May 2011)
@@ -265,6 +265,13 @@
return *this;
}
+ // explicitly tell the lexer that the given state will be defined
+ // (useful in conjunction with "*")
+ std::size_t add_state(char_type const* state = 0)
+ {
+ return def.add_state(state ? state : def.initial_state().c_str());
+ }
+
adder add;
pattern_adder add_pattern;
Modified: trunk/boost/spirit/home/lex/lexer/lexertl/functor_data.hpp
==============================================================================
--- trunk/boost/spirit/home/lex/lexer/lexertl/functor_data.hpp (original)
+++ trunk/boost/spirit/home/lex/lexer/lexertl/functor_data.hpp 2011-05-22 21:58:19 EDT (Sun, 22 May 2011)
@@ -10,6 +10,7 @@
#pragma once
#endif
+#include <boost/spirit/home/qi/detail/assign_to.hpp>
#include <boost/spirit/home/support/detail/lexer/generator.hpp>
#include <boost/spirit/home/support/detail/lexer/rules.hpp>
#include <boost/spirit/home/support/detail/lexer/state_machine.hpp>
@@ -17,6 +18,7 @@
#include <boost/spirit/home/lex/lexer/lexertl/semantic_action_data.hpp>
#include <boost/spirit/home/lex/lexer/lexertl/wrap_action.hpp>
#include <boost/mpl/bool.hpp>
+#include <boost/optional.hpp>
namespace boost { namespace spirit { namespace lex { namespace lexertl
{
@@ -401,6 +403,146 @@
// silence MSVC warning C4512: assignment operator could not be generated
data& operator= (data const&);
};
+
+ ///////////////////////////////////////////////////////////////////////
+ // does support lexer semantic actions, may support state, is used for
+ // position_token exposing exactly one type
+ template <typename Iterator, typename HasState, typename TokenValue>
+ class data<Iterator, mpl::true_, HasState, boost::optional<TokenValue> >
+ : public data<Iterator, mpl::false_, HasState, TokenValue>
+ {
+ public:
+ typedef semantic_actions<Iterator, HasState, data>
+ semantic_actions_type;
+
+ protected:
+ typedef data<Iterator, mpl::false_, HasState, TokenValue> base_type;
+ typedef typename base_type::char_type char_type;
+ typedef typename semantic_actions_type::functor_wrapper_type
+ functor_wrapper_type;
+
+ public:
+ typedef Iterator base_iterator_type;
+ typedef boost::optional<TokenValue> token_value_type;
+ typedef boost::optional<TokenValue> const& get_value_type;
+ typedef typename base_type::state_type state_type;
+ typedef typename base_type::state_name_type state_name_type;
+
+ typedef detail::wrap_action<functor_wrapper_type
+ , Iterator, data, std::size_t> wrap_action_type;
+
+ template <typename IterData>
+ data (IterData const& data_, Iterator& first, Iterator const& last)
+ : base_type(data_, first, last)
+ , actions_(data_.actions_), hold_()
+ , has_value_(false), has_hold_(false)
+ {
+ spirit::traits::assign_to(first, last, value_);
+ has_value_ = true;
+ }
+
+ // invoke attached semantic actions, if defined
+ BOOST_SCOPED_ENUM(pass_flags) invoke_actions(std::size_t state
+ , std::size_t& id, std::size_t unique_id, Iterator& end)
+ {
+ return actions_.invoke_actions(state, id, unique_id, end, *this);
+ }
+
+ // The function less() is used by the implementation of the support
+ // function lex::less(). Its functionality is equivalent to flex'
+ // function yyless(): it returns an iterator positioned to the
+ // nth input character beyond the current start iterator (i.e. by
+ // assigning the return value to the placeholder '_end' it is
+ // possible to return all but the first n characters of the current
+ // token back to the input stream).
+ Iterator const& less(Iterator& it, int n)
+ {
+ it = this->get_first();
+ std::advance(it, n);
+ return it;
+ }
+
+ // The function more() is used by the implementation of the support
+ // function lex::more(). Its functionality is equivalent to flex'
+ // function yymore(): it tells the lexer that the next time it
+ // matches a rule, the corresponding token should be appended onto
+ // the current token value rather than replacing it.
+ void more()
+ {
+ hold_ = this->get_first();
+ has_hold_ = true;
+ }
+
+ // The function lookahead() is used by the implementation of the
+ // support function lex::lookahead. It can be used to implement
+ // lookahead for lexer engines not supporting constructs like flex'
+ // a/b (match a, but only when followed by b)
+ bool lookahead(std::size_t id, std::size_t state = std::size_t(~0))
+ {
+ Iterator end = end_;
+ std::size_t unique_id = boost::lexer::npos;
+ bool bol = this->bol_;
+
+ if (std::size_t(~0) == state)
+ state = this->state_;
+
+ typedef basic_iterator_tokeniser<Iterator> tokenizer;
+ return id == tokenizer::next(this->state_machine_, state,
+ bol, end, this->get_eoi(), unique_id);
+ }
+
+ // The adjust_start() and revert_adjust_start() are helper
+ // functions needed to implement the functionality required for
+ // lex::more(). It is called from the functor body below.
+ bool adjust_start()
+ {
+ if (!has_hold_)
+ return false;
+
+ std::swap(this->get_first(), hold_);
+ has_hold_ = false;
+ return true;
+ }
+ void revert_adjust_start()
+ {
+ // this will be called only if adjust_start above returned true
+ std::swap(this->get_first(), hold_);
+ has_hold_ = true;
+ }
+
+ token_value_type const& get_value() const
+ {
+ if (!has_value_) {
+ spirit::traits::assign_to(this->get_first(), end_, value_);
+ has_value_ = true;
+ }
+ return value_;
+ }
+ template <typename Value>
+ void set_value(Value const& val)
+ {
+ value_ = val;
+ has_value_ = true;
+ }
+ void set_end(Iterator const& it)
+ {
+ end_ = it;
+ }
+ bool has_value() const { return has_value_; }
+ void reset_value() { has_value_ = false; }
+
+ protected:
+ semantic_actions_type const& actions_;
+ Iterator hold_; // iterator needed to support lex::more()
+ Iterator end_; // iterator pointing to end of matched token
+ mutable token_value_type value_; // token value to use
+ mutable bool has_value_; // 'true' if value_ is valid
+ bool has_hold_; // 'true' if hold_ is valid
+
+ private:
+ // silence MSVC warning C4512: assignment operator could not be generated
+ data& operator= (data const&);
+ };
}
}}}}
Modified: trunk/boost/spirit/home/lex/lexer/lexertl/lexer.hpp
==============================================================================
--- trunk/boost/spirit/home/lex/lexer/lexertl/lexer.hpp (original)
+++ trunk/boost/spirit/home/lex/lexer/lexertl/lexer.hpp 2011-05-22 21:58:19 EDT (Sun, 22 May 2011)
@@ -302,8 +302,12 @@
typedef typename
basic_rules_type::string_size_t_map::value_type
state_type;
- BOOST_FOREACH(state_type const& s, rules_.statemap())
- actions_.add_action(unique_id, s.second, wrapper_type::call(act));
+
+ std::size_t states = rules_.statemap().size();
+ BOOST_FOREACH(state_type const& s, rules_.statemap()) {
+ for (std::size_t j = 0; j < states; ++j)
+ actions_.add_action(unique_id + j, s.second, wrapper_type::call(act));
+ }
}
else {
actions_.add_action(unique_id, state, wrapper_type::call(act));
Added: trunk/boost/spirit/home/lex/lexer/lexertl/position_token.hpp
==============================================================================
--- (empty file)
+++ trunk/boost/spirit/home/lex/lexer/lexertl/position_token.hpp 2011-05-22 21:58:19 EDT (Sun, 22 May 2011)
@@ -0,0 +1,901 @@
+// Copyright (c) 2001-2011 Hartmut Kaiser
+//
+// Distributed under the Boost Software License, Version 1.0. (See accompanying
+// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
+
+#if !defined(BOOST_SPIRIT_LEX_POSITION_TOKEN_MAY_13_2011_0846PM)
+#define BOOST_SPIRIT_LEX_POSITION_TOKEN_MAY_13_2011_0846PM
+
+#if defined(_MSC_VER)
+#pragma once
+#endif
+
+#include <boost/config.hpp>
+#include <boost/detail/workaround.hpp>
+#include <boost/spirit/home/qi/detail/assign_to.hpp>
+#include <boost/spirit/home/support/attributes.hpp>
+#include <boost/spirit/home/support/argument.hpp>
+#include <boost/spirit/home/support/detail/lexer/generator.hpp>
+#include <boost/spirit/home/support/detail/lexer/rules.hpp>
+#include <boost/spirit/home/support/detail/lexer/consts.hpp>
+#include <boost/spirit/home/support/utree/utree_traits_fwd.hpp>
+#include <boost/fusion/include/vector.hpp>
+#include <boost/fusion/include/at.hpp>
+#include <boost/fusion/include/value_at.hpp>
+#include <boost/detail/iterator.hpp>
+#include <boost/variant.hpp>
+#include <boost/mpl/vector.hpp>
+#include <boost/mpl/bool.hpp>
+#include <boost/mpl/is_sequence.hpp>
+#include <boost/mpl/begin.hpp>
+#include <boost/mpl/insert.hpp>
+#include <boost/mpl/vector.hpp>
+#include <boost/mpl/if.hpp>
+#include <boost/mpl/or.hpp>
+#include <boost/type_traits/is_same.hpp>
+#include <boost/range/iterator_range.hpp>
+#if !BOOST_WORKAROUND(BOOST_MSVC, <= 1300)
+#include <boost/static_assert.hpp>
+#endif
+
+#if defined(BOOST_SPIRIT_DEBUG)
+#include <iosfwd>
+#endif
+
+namespace boost { namespace spirit { namespace lex { namespace lexertl
+{
+ ///////////////////////////////////////////////////////////////////////////
+ //
+ // The position_token is the type of the objects returned by the
+ // iterator if it has been specified while instantiating the lexer object.
+ //
+ // template parameters:
+ // Iterator The type of the iterator used to access the
+ // underlying character stream.
+ // AttributeTypes A mpl sequence containing the types of all
+ // required different token values to be supported
+ // by this token type.
+ // HasState A mpl::bool_ indicating, whether this token type
+ // should support lexer states.
+ // Idtype The type to use for the token id (defaults to
+ // std::size_t).
+ //
+ // It is possible to use other token types with the spirit::lex
+ // framework as well. If you plan to use a different type as your token
+ // type, you'll need to expose the following things from your token type
+ // to make it compatible with spirit::lex:
+ //
+ // typedefs
+ // iterator_type The type of the iterator used to access the
+ // underlying character stream.
+ //
+ // id_type The type of the token id used.
+ //
+ // methods
+ // default constructor
+ // This should initialize the token as an end of
+ // input token.
+ // constructors The prototype of the other required
+ // constructors should be:
+ //
+ // token(int)
+ // This constructor should initialize the token as
+ // an invalid token (not carrying any specific
+ // values)
+ //
+ // where: the int is used as a tag only and its value is
+ // ignored
+ //
+ // and:
+ //
+ // token(Idtype id, std::size_t state,
+ // iterator_type first, iterator_type last);
+ //
+ // where: id: token id
+ // state: lexer state this token was matched in
+ // first, last: pair of iterators marking the matched
+ // range in the underlying input stream
+ //
+ // accessors
+ // id() return the token id of the matched input sequence
+ // id(newid) set the token id of the token instance
+ //
+ // state() return the lexer state this token was matched in
+ //
+ // value() return the token value
+ //
+ // Additionally, you will have to implement a couple of helper functions
+ // in the same namespace as the token type: a comparison operator==() to
+ // compare your token instances, a token_is_valid() function and different
+ // specializations of the Spirit customization point
+ // assign_to_attribute_from_value as shown below.
+ //
+ ///////////////////////////////////////////////////////////////////////////
+ template <typename Iterator = char const*
+ , typename AttributeTypes = mpl::vector0<>
+ , typename HasState = mpl::true_
+ , typename Idtype = std::size_t>
+ struct position_token;
+
+ ///////////////////////////////////////////////////////////////////////////
+ // This specialization of the token type doesn't contain any item data and
+ // doesn't support working with lexer states. Although, like all other
+ // variants of position_token, it carries a pair of iterators marking the
+ // begin and the end of the matched character sequence.
+ ///////////////////////////////////////////////////////////////////////////
+ template <typename Iterator, typename Idtype>
+ struct position_token<Iterator, lex::omit, mpl::false_, Idtype>
+ {
+ typedef Iterator iterator_type;
+ typedef iterator_range<iterator_type> iterpair_type;
+ typedef mpl::false_ has_state;
+ typedef Idtype id_type;
+ typedef unused_type token_value_type;
+
+ // default constructed tokens correspond to EOI tokens
+ position_token()
+ : id_(id_type(boost::lexer::npos)) {}
+
+ // construct an invalid token
+ explicit position_token(int)
+ : id_(id_type(0)) {}
+
+ position_token(id_type id, std::size_t)
+ : id_(id) {}
+
+ position_token(id_type id, std::size_t, token_value_type)
+ : id_(id) {}
+
+ position_token(id_type id, std::size_t, Iterator const& first
+ , Iterator const& last)
+ : id_(id), matched_(first, last) {}
+
+ // this default conversion operator is needed to allow the direct
+ // usage of tokens in conjunction with the primitive parsers defined
+ // in Qi
+ operator id_type() const { return id_; }
+
+ // Retrieve or set the token id of this token instance.
+ id_type id() const { return id_; }
+ void id(id_type newid) { id_ = newid; }
+
+ std::size_t state() const { return 0; } // always '0' (INITIAL state)
+
+ bool is_valid() const
+ {
+ return 0 != id_ && id_type(boost::lexer::npos) != id_;
+ }
+
+ // access the stored iterator range of the matched input sequence
+ iterator_type begin() const { return matched_.begin(); }
+ iterator_type end() const { return matched_.end(); }
+
+ iterpair_type& matched() { return matched_; }
+ iterpair_type const& matched() const { return matched_; }
+
+ token_value_type& value() { return unused; }
+ token_value_type const& value() const { return unused; }
+
+#if BOOST_WORKAROUND(BOOST_MSVC, == 1600)
+ // workaround for MSVC10 which has problems copying a default
+ // constructed iterator_range
+ position_token& operator= (position_token const& rhs)
+ {
+ if (this != &rhs)
+ {
+ id_ = rhs.id_;
+ if (id_ != boost::lexer::npos && id_ != 0)
+ matched_ = rhs.matched_;
+ }
+ return *this;
+ }
+#endif
+
+ protected:
+ id_type id_; // token id, 0 if nothing has been matched
+ iterpair_type matched_; // matched input sequence
+ };
+
+#if defined(BOOST_SPIRIT_DEBUG)
+ template <typename Char, typename Traits, typename Iterator
+ , typename AttributeTypes, typename HasState, typename Idtype>
+ inline std::basic_ostream<Char, Traits>&
+ operator<< (std::basic_ostream<Char, Traits>& os
+ , position_token<Iterator, AttributeTypes, HasState, Idtype> const& t)
+ {
+ if (t.is_valid()) {
+ Iterator end = t.end();
+ for (Iterator it = t.begin(); it != end; ++it)
+ os << *it;
+ }
+ else {
+ os << "<invalid token>";
+ }
+ return os;
+ }
+#endif
+
+ ///////////////////////////////////////////////////////////////////////////
+ // This specialization of the token type doesn't contain any item data but
+ // supports working with lexer states.
+ ///////////////////////////////////////////////////////////////////////////
+ template <typename Iterator, typename Idtype>
+ struct position_token<Iterator, lex::omit, mpl::true_, Idtype>
+ : position_token<Iterator, lex::omit, mpl::false_, Idtype>
+ {
+ private:
+ typedef position_token<Iterator, lex::omit, mpl::false_, Idtype>
+ base_type;
+
+ public:
+ typedef typename base_type::id_type id_type;
+ typedef Iterator iterator_type;
+ typedef mpl::true_ has_state;
+ typedef unused_type token_value_type;
+
+ // default constructed tokens correspond to EOI tokens
+ position_token() : state_(boost::lexer::npos) {}
+
+ // construct an invalid token
+ explicit position_token(int)
+ : base_type(0), state_(boost::lexer::npos) {}
+
+ position_token(id_type id, std::size_t state)
+ : base_type(id, boost::lexer::npos), state_(state) {}
+
+ position_token(id_type id, std::size_t state, token_value_type)
+ : base_type(id, boost::lexer::npos, unused)
+ , state_(state) {}
+
+ position_token(id_type id, std::size_t state
+ , Iterator const& first, Iterator const& last)
+ : base_type(id, boost::lexer::npos, first, last)
+ , state_(state) {}
+
+ std::size_t state() const { return state_; }
+
+#if BOOST_WORKAROUND(BOOST_MSVC, == 1600)
+ // workaround for MSVC10 which has problems copying a default
+ // constructed iterator_range
+ position_token& operator= (position_token const& rhs)
+ {
+ if (this != &rhs)
+ {
+ this->base_type::operator=(static_cast<base_type const&>(rhs));
+ state_ = rhs.state_;
+ }
+ return *this;
+ }
+#endif
+
+ protected:
+ std::size_t state_; // lexer state this token was matched in
+ };
+
+ ///////////////////////////////////////////////////////////////////////////
+ // These specializations for an empty attribute list cause all token
+ // instances to expose as it attribute the iterator_range pointing to the
+ // matched input sequence.
+ ///////////////////////////////////////////////////////////////////////////
+ template <typename Iterator, typename HasState, typename Idtype>
+ struct position_token<Iterator, mpl::vector<>, HasState, Idtype>
+ : position_token<Iterator, lex::omit, HasState, Idtype>
+ {
+ private:
+ typedef position_token<Iterator, lex::omit, HasState, Idtype> base_type;
+
+ public:
+ typedef typename base_type::id_type id_type;
+ typedef typename base_type::iterator_type iterator_type;
+ typedef typename base_type::iterpair_type iterpair_type;
+ typedef HasState has_state;
+ typedef iterpair_type token_value_type;
+
+ // default constructed tokens correspond to EOI tokens
+ position_token() {}
+
+ // construct an invalid token
+ explicit position_token(int)
+ : base_type(0) {}
+
+ position_token(id_type id, std::size_t state)
+ : base_type(id, state) {}
+
+ position_token(id_type id, std::size_t state, token_value_type)
+ : base_type(id, state, unused) {}
+
+ position_token(id_type id, std::size_t state
+ , Iterator const& first, Iterator const& last)
+ : base_type(id, state, first, last) {}
+
+ token_value_type& value() { return this->base_type::matched(); }
+ token_value_type const& value() const { return this->base_type::matched(); }
+ };
+
+ template <typename Iterator, typename HasState, typename Idtype>
+ struct position_token<Iterator, mpl::vector0<>, HasState, Idtype>
+ : position_token<Iterator, lex::omit, HasState, Idtype>
+ {
+ private:
+ typedef position_token<Iterator, lex::omit, HasState, Idtype> base_type;
+
+ public:
+ typedef typename base_type::id_type id_type;
+ typedef typename base_type::iterator_type iterator_type;
+ typedef typename base_type::iterpair_type iterpair_type;
+ typedef HasState has_state;
+ typedef iterpair_type token_value_type;
+
+ // default constructed tokens correspond to EOI tokens
+ position_token() {}
+
+ // construct an invalid token
+ explicit position_token(int)
+ : base_type(0) {}
+
+ position_token(id_type id, std::size_t state)
+ : base_type(id, state) {}
+
+ position_token(id_type id, std::size_t state, token_value_type)
+ : base_type(id, state, unused) {}
+
+ position_token(id_type id, std::size_t state
+ , Iterator const& first, Iterator const& last)
+ : base_type(id, state, first, last) {}
+
+ token_value_type& value() { return this->base_type::matched(); }
+ token_value_type const& value() const { return this->base_type::matched(); }
+ };
+
+ ///////////////////////////////////////////////////////////////////////////
+ // These specializations for an attribute list of length one cause all token
+ // instances to expose as it attribute the specified type.
+ ///////////////////////////////////////////////////////////////////////////
+ template <typename Iterator, typename Attribute, typename HasState
+ , typename Idtype>
+ struct position_token<Iterator, mpl::vector<Attribute>, HasState, Idtype>
+ : position_token<Iterator, lex::omit, HasState, Idtype>
+ {
+ private:
+ typedef position_token<Iterator, lex::omit, HasState, Idtype> base_type;
+
+ public:
+ typedef typename base_type::id_type id_type;
+ typedef typename base_type::iterator_type iterator_type;
+ typedef typename base_type::iterpair_type iterpair_type;
+ typedef HasState has_state;
+ typedef boost::optional<Attribute> token_value_type;
+
+ // default constructed tokens correspond to EOI tokens
+ position_token() {}
+
+ // construct an invalid token
+ explicit position_token(int)
+ : base_type(0) {}
+
+ position_token(id_type id, std::size_t state)
+ : base_type(id, state) {}
+
+ position_token(id_type id, std::size_t state, token_value_type const& v)
+ : base_type(id, state, unused), value_(v) {}
+
+ position_token(id_type id, std::size_t state
+ , Iterator const& first, Iterator const& last)
+ : base_type(id, state, first, last) {}
+
+ token_value_type& value() { return value_; }
+ token_value_type const& value() const { return value_; }
+
+ bool has_value() const { return value_; }
+
+#if BOOST_WORKAROUND(BOOST_MSVC, == 1600)
+ // workaround for MSVC10 which has problems copying a default
+ // constructed iterator_range
+ position_token& operator= (position_token const& rhs)
+ {
+ if (this != &rhs)
+ {
+ this->base_type::operator=(static_cast<base_type const&>(rhs));
+ if (this->id_ != boost::lexer::npos && this->id_ != 0)
+ value_ = rhs.value_;
+ }
+ return *this;
+ }
+#endif
+
+ protected:
+ token_value_type value_; // token value
+ };
+
+ template <typename Iterator, typename Attribute, typename HasState
+ , typename Idtype>
+ struct position_token<Iterator, mpl::vector1<Attribute>, HasState, Idtype>
+ : position_token<Iterator, lex::omit, HasState, Idtype>
+ {
+ private:
+ typedef position_token<Iterator, lex::omit, HasState, Idtype> base_type;
+
+ public:
+ typedef typename base_type::id_type id_type;
+ typedef typename base_type::iterator_type iterator_type;
+ typedef typename base_type::iterpair_type iterpair_type;
+ typedef HasState has_state;
+ typedef boost::optional<Attribute> token_value_type;
+
+ // default constructed tokens correspond to EOI tokens
+ position_token() {}
+
+ // construct an invalid token
+ explicit position_token(int)
+ : base_type(0) {}
+
+ position_token(id_type id, std::size_t state)
+ : base_type(id, state) {}
+
+ position_token(id_type id, std::size_t state, token_value_type const& v)
+ : base_type(id, state, unused), value_(v) {}
+
+ position_token(id_type id, std::size_t state
+ , Iterator const& first, Iterator const& last)
+ : base_type(id, state, first, last) {}
+
+ token_value_type& value() { return value_; }
+ token_value_type const& value() const { return value_; }
+
+ bool has_value() const { return value_; }
+
+#if BOOST_WORKAROUND(BOOST_MSVC, == 1600)
+ // workaround for MSVC10 which has problems copying a default
+ // constructed iterator_range
+ position_token& operator= (position_token const& rhs)
+ {
+ if (this != &rhs)
+ {
+ this->base_type::operator=(static_cast<base_type const&>(rhs));
+ if (this->id_ != boost::lexer::npos && this->id_ != 0)
+ value_ = rhs.value_;
+ }
+ return *this;
+ }
+#endif
+
+ protected:
+ token_value_type value_; // token value
+ };
+
+ ///////////////////////////////////////////////////////////////////////////
+ // The generic version of the position_token type derives from the
+ // specialization above and adds a single data member holding the item
+ // data carried by the token instance.
+ ///////////////////////////////////////////////////////////////////////////
+ namespace detail
+ {
+ ///////////////////////////////////////////////////////////////////////
+ // Meta-function to calculate the type of the variant data item to be
+ // stored with each token instance.
+ //
+ // Note: The iterator pair needs to be the first type in the list of
+ // types supported by the generated variant type (this is being
+ // used to identify whether the stored data item in a particular
+ // token instance needs to be converted from the pair of
+ // iterators (see the first of the assign_to_attribute_from_value
+ // specializations below).
+ ///////////////////////////////////////////////////////////////////////
+ template <typename AttributeTypes>
+ struct position_token_value
+ {
+ typedef typename mpl::insert<
+ AttributeTypes
+ , typename mpl::begin<AttributeTypes>::type
+ , unused_type
+ >::type sequence_type;
+ typedef typename make_variant_over<sequence_type>::type type;
+ };
+ }
+
+ template <typename Iterator, typename AttributeTypes, typename HasState
+ , typename Idtype>
+ struct position_token
+ : position_token<Iterator, lex::omit, HasState, Idtype>
+ {
+ private: // precondition assertions
+#if !BOOST_WORKAROUND(BOOST_MSVC, <= 1300)
+ BOOST_STATIC_ASSERT((mpl::is_sequence<AttributeTypes>::value ||
+ is_same<AttributeTypes, lex::omit>::value));
+#endif
+ typedef position_token<Iterator, lex::omit, HasState, Idtype>
+ base_type;
+
+ protected:
+ // If no additional token value types are given, the the token will
+ // hold no token value at all as the base class already has the
+ // iterator pair of the matched range in the underlying input sequence.
+ // Otherwise the token value is stored as a variant and will
+ // initially hold an unused_type but is able to hold any of
+ // the given data types as well. The conversion from the iterator pair
+ // to the required data type is done when it is accessed for the first
+ // time.
+
+ public:
+ typedef typename base_type::id_type id_type;
+ typedef typename detail::position_token_value<AttributeTypes>::type
+ token_value_type;
+
+ typedef Iterator iterator_type;
+
+ // default constructed tokens correspond to EOI tokens
+ position_token() : value_(unused) {}
+
+ // construct an invalid token
+ explicit position_token(int)
+ : base_type(0), value_(unused) {}
+
+ position_token(id_type id, std::size_t state, token_value_type const& value)
+ : base_type(id, state, value), value_(value) {}
+
+ position_token(id_type id, std::size_t state, Iterator const& first
+ , Iterator const& last)
+ : base_type(id, state, first, last), value_(unused) {}
+
+ token_value_type& value() { return value_; }
+ token_value_type const& value() const { return value_; }
+
+#if BOOST_WORKAROUND(BOOST_MSVC, == 1600)
+ // workaround for MSVC10 which has problems copying a default
+ // constructed iterator_range
+ position_token& operator= (position_token const& rhs)
+ {
+ if (this != &rhs)
+ {
+ this->base_type::operator=(static_cast<base_type const&>(rhs));
+ if (this->id_ != boost::lexer::npos && this->id_ != 0)
+ value_ = rhs.value_;
+ }
+ return *this;
+ }
+#endif
+
+ protected:
+ token_value_type value_; // token value, by default a pair of iterators
+ };
+
+ ///////////////////////////////////////////////////////////////////////////
+ // tokens are considered equal, if their id's match (these are unique)
+ template <typename Iterator, typename AttributeTypes, typename HasState
+ , typename Idtype>
+ inline bool
+ operator== (position_token<Iterator, AttributeTypes, HasState, Idtype> const& lhs,
+ position_token<Iterator, AttributeTypes, HasState, Idtype> const& rhs)
+ {
+ return lhs.id() == rhs.id();
+ }
+
+ ///////////////////////////////////////////////////////////////////////////
+ // This overload is needed by the multi_pass/functor_input_policy to
+ // validate a token instance. It has to be defined in the same namespace
+ // as the token class itself to allow ADL to find it.
+ ///////////////////////////////////////////////////////////////////////////
+ template <typename Iterator, typename AttributeTypes, typename HasState
+ , typename Idtype>
+ inline bool
+ token_is_valid(position_token<Iterator, AttributeTypes, HasState, Idtype> const& t)
+ {
+ return t.is_valid();
+ }
+}}}}
+
+namespace boost { namespace spirit { namespace traits
+{
+ ///////////////////////////////////////////////////////////////////////////
+ // We have to provide specializations for the customization point
+ // assign_to_attribute_from_value allowing to extract the needed value
+ // from the token.
+ ///////////////////////////////////////////////////////////////////////////
+
+ // This is called from the parse function of token_def if the token_def
+ // has been defined to carry a special attribute type
+ template <typename Attribute, typename Iterator, typename AttributeTypes
+ , typename HasState, typename Idtype>
+ struct assign_to_attribute_from_value<Attribute
+ , lex::lexertl::position_token<Iterator, AttributeTypes, HasState, Idtype> >
+ {
+ static void
+ call(lex::lexertl::position_token<
+ Iterator, AttributeTypes, HasState, Idtype> const& t
+ , Attribute& attr)
+ {
+ // The goal of this function is to avoid the conversion of the pair of
+ // iterators (to the matched character sequence) into the token value
+ // of the required type being done more than once. For this purpose it
+ // checks whether the stored value type is still the default one (pair
+ // of iterators) and if yes, replaces the pair of iterators with the
+ // converted value to be returned from subsequent calls.
+
+ if (0 == t.value().which()) {
+ // first access to the token value
+ typedef iterator_range<Iterator> iterpair_type;
+ iterpair_type const& ip = t.matched();
+
+ // Interestingly enough we use the assign_to() framework defined in
+ // Spirit.Qi allowing to convert the pair of iterators to almost any
+ // required type (assign_to(), if available, uses the standard Spirit
+ // parsers to do the conversion).
+ spirit::traits::assign_to(ip.begin(), ip.end(), attr);
+
+ // If you get an error during the compilation of the following
+ // assignment expression, you probably forgot to list one or more
+ // types used as token value types (in your token_def<...>
+ // definitions) in your definition of the token class. I.e. any token
+ // value type used for a token_def<...> definition has to be listed
+ // during the declaration of the token type to use. For instance let's
+ // assume we have two token_def's:
+ //
+ // token_def<int> number; number = "...";
+ // token_def<std::string> identifier; identifier = "...";
+ //
+ // Then you'll have to use the following token type definition
+ // (assuming you are using the token class):
+ //
+ // typedef mpl::vector<int, std::string> token_values;
+ // typedef token<base_iter_type, token_values> token_type;
+ //
+ // where: base_iter_type is the iterator type used to expose the
+ // underlying input stream.
+ //
+ // This token_type has to be used as the second template parameter
+ // to the lexer class:
+ //
+ // typedef lexer<base_iter_type, token_type> lexer_type;
+ //
+ // again, assuming you're using the lexer<> template for your
+ // tokenization.
+
+ typedef lex::lexertl::position_token<
+ Iterator, AttributeTypes, HasState, Idtype> token_type;
+ spirit::traits::assign_to(
+ attr, const_cast<token_type&>(t).value()); // re-assign value
+ }
+ else {
+ // reuse the already assigned value
+ spirit::traits::assign_to(get<Attribute>(t.value()), attr);
+ }
+ }
+ };
+
+ template <typename Attribute, typename Iterator, typename AttributeTypes
+ , typename HasState, typename Idtype>
+ struct assign_to_container_from_value<Attribute
+ , lex::lexertl::position_token<Iterator, AttributeTypes, HasState, Idtype> >
+ : assign_to_attribute_from_value<Attribute
+ , lex::lexertl::position_token<Iterator, AttributeTypes, HasState, Idtype> >
+ {};
+
+ ///////////////////////////////////////////////////////////////////////////
+ // These are called from the parse function of token_def if the token type
+ // has no special attribute type assigned
+ template <typename Attribute, typename Iterator, typename HasState
+ , typename Idtype>
+ struct assign_to_attribute_from_value<Attribute
+ , lex::lexertl::position_token<Iterator, mpl::vector0<>, HasState, Idtype> >
+ {
+ static void
+ call(lex::lexertl::position_token<
+ Iterator, mpl::vector0<>, HasState, Idtype> const& t
+ , Attribute& attr)
+ {
+ // The default type returned by the token_def parser component (if
+ // it has no token value type assigned) is the pair of iterators
+ // to the matched character sequence.
+ spirit::traits::assign_to(t.begin(), t.end(), attr);
+ }
+ };
+
+// template <typename Attribute, typename Iterator, typename HasState
+// , typename Idtype>
+// struct assign_to_container_from_value<Attribute
+// , lex::lexertl::position_token<Iterator, mpl::vector0<>, HasState, Idtype> >
+// : assign_to_attribute_from_value<Attribute
+// , lex::lexertl::position_token<Iterator, mpl::vector0<>, HasState, Idtype> >
+// {};
+
+ // same as above but using mpl::vector<> instead of mpl::vector0<>
+ template <typename Attribute, typename Iterator, typename HasState
+ , typename Idtype>
+ struct assign_to_attribute_from_value<Attribute
+ , lex::lexertl::position_token<Iterator, mpl::vector<>, HasState, Idtype> >
+ {
+ static void
+ call(lex::lexertl::position_token<
+ Iterator, mpl::vector<>, HasState, Idtype> const& t
+ , Attribute& attr)
+ {
+ // The default type returned by the token_def parser component (if
+ // it has no token value type assigned) is the pair of iterators
+ // to the matched character sequence.
+ spirit::traits::assign_to(t.begin(), t.end(), attr);
+ }
+ };
+
+// template <typename Attribute, typename Iterator, typename HasState
+// , typename Idtype>
+// struct assign_to_container_from_value<Attribute
+// , lex::lexertl::position_token<Iterator, mpl::vector<>, HasState, Idtype> >
+// : assign_to_attribute_from_value<Attribute
+// , lex::lexertl::position_token<Iterator, mpl::vector<>, HasState, Idtype> >
+// {};
+
+ ///////////////////////////////////////////////////////////////////////////
+ // These are called from the parse function of token_def if the token type
+ // has no special attribute type assigned
+ template <typename Attribute, typename Iterator, typename Attr
+ , typename HasState, typename Idtype>
+ struct assign_to_attribute_from_value<Attribute
+ , lex::lexertl::position_token<Iterator, mpl::vector1<Attr>, HasState, Idtype> >
+ {
+ static void
+ call(lex::lexertl::position_token<
+ Iterator, mpl::vector1<Attr>, HasState, Idtype> const& t
+ , Attribute& attr)
+ {
+ // The goal of this function is to avoid the conversion of the pair of
+ // iterators (to the matched character sequence) into the token value
+ // of the required type being done more than once.
+
+ if (!t.has_value()) {
+ // first access to the token value
+ typedef iterator_range<Iterator> iterpair_type;
+ iterpair_type const& ip = t.matched();
+
+ // Interestingly enough we use the assign_to() framework defined in
+ // Spirit.Qi allowing to convert the pair of iterators to almost any
+ // required type (assign_to(), if available, uses the standard Spirit
+ // parsers to do the conversion).
+ spirit::traits::assign_to(ip.begin(), ip.end(), attr);
+
+ // Re-assign the attribute to the stored value
+ typedef lex::lexertl::position_token<
+ Iterator, mpl::vector1<Attr>, HasState, Idtype> token_type;
+ spirit::traits::assign_to(
+ attr, const_cast<token_type&>(t).value());
+ }
+ else {
+ // reuse the already assigned value
+ spirit::traits::assign_to(t.value(), attr);
+ }
+ }
+ };
+
+// template <typename Attribute, typename Iterator, typename Attr
+// , typename HasState, typename Idtype>
+// struct assign_to_container_from_value<Attribute
+// , lex::lexertl::position_token<Iterator, mpl::vector1<Attr>, HasState, Idtype> >
+// : assign_to_attribute_from_value<Attribute
+// , lex::lexertl::position_token<Iterator, mpl::vector1<Attr>, HasState, Idtype> >
+// {};
+
+ // same as above but using mpl::vector<Attr> instead of mpl::vector1<Attr>
+ template <typename Attribute, typename Iterator, typename Attr
+ , typename HasState, typename Idtype>
+ struct assign_to_attribute_from_value<Attribute
+ , lex::lexertl::position_token<Iterator, mpl::vector<Attr>, HasState, Idtype> >
+ {
+ static void
+ call(lex::lexertl::position_token<
+ Iterator, mpl::vector<Attr>, HasState, Idtype> const& t
+ , Attribute& attr)
+ {
+ // The goal of this function is to avoid the conversion of the pair of
+ // iterators (to the matched character sequence) into the token value
+ // of the required type being done more than once.
+
+ if (!t.has_value()) {
+ // first access to the token value
+ typedef iterator_range<Iterator> iterpair_type;
+ iterpair_type const& ip = t.matched();
+
+ // Interestingly enough we use the assign_to() framework defined in
+ // Spirit.Qi allowing to convert the pair of iterators to almost any
+ // required type (assign_to(), if available, uses the standard Spirit
+ // parsers to do the conversion).
+ spirit::traits::assign_to(ip.begin(), ip.end(), attr);
+
+ // Re-assign the attribute to the stored value
+ typedef lex::lexertl::position_token<
+ Iterator, mpl::vector<Attr>, HasState, Idtype> token_type;
+ spirit::traits::assign_to(
+ attr, const_cast<token_type&>(t).value());
+ }
+ else {
+ // reuse the already assigned value
+ spirit::traits::assign_to(t.value(), attr);
+ }
+ }
+ };
+
+// template <typename Attribute, typename Iterator, typename Attr
+// , typename HasState, typename Idtype>
+// struct assign_to_container_from_value<Attribute
+// , lex::lexertl::position_token<Iterator, mpl::vector<Attr>, HasState, Idtype> >
+// : assign_to_attribute_from_value<Attribute
+// , lex::lexertl::position_token<Iterator, mpl::vector<Attr>, HasState, Idtype> >
+// {};
+
+ // This is called from the parse function of token_def if the token type
+ // has been explicitly omitted (i.e. no attribute value is used), which
+ // essentially means that every attribute gets initialized using default
+ // constructed values.
+ template <typename Attribute, typename Iterator, typename HasState
+ , typename Idtype>
+ struct assign_to_attribute_from_value<Attribute
+ , lex::lexertl::position_token<Iterator, lex::omit, HasState, Idtype> >
+ {
+ static void
+ call(lex::lexertl::position_token<Iterator, lex::omit, HasState, Idtype> const& t
+ , Attribute& attr)
+ {
+ // do nothing
+ }
+ };
+
+ template <typename Attribute, typename Iterator, typename HasState
+ , typename Idtype>
+ struct assign_to_container_from_value<Attribute
+ , lex::lexertl::position_token<Iterator, lex::omit, HasState, Idtype> >
+ : assign_to_attribute_from_value<Attribute
+ , lex::lexertl::position_token<Iterator, lex::omit, HasState, Idtype> >
+ {};
+
+ // This is called from the parse function of lexer_def_
+ template <typename Iterator, typename AttributeTypes, typename HasState
+ , typename Idtype_, typename Idtype>
+ struct assign_to_attribute_from_value<
+ fusion::vector2<Idtype_, iterator_range<Iterator> >
+ , lex::lexertl::position_token<Iterator, AttributeTypes, HasState, Idtype> >
+ {
+ static void
+ call(lex::lexertl::position_token<Iterator, AttributeTypes, HasState, Idtype> const& t
+ , fusion::vector2<Idtype_, iterator_range<Iterator> >& attr)
+ {
+ // The type returned by the lexer_def_ parser components is a
+ // fusion::vector containing the token id of the matched token
+ // and the pair of iterators to the matched character sequence.
+ typedef iterator_range<Iterator> iterpair_type;
+ typedef fusion::vector2<Idtype_, iterator_range<Iterator> >
+ attribute_type;
+
+ iterpair_type const& ip = t.matched();
+ attr = attribute_type(t.id(), ip);
+ }
+ };
+
+ template <typename Iterator, typename AttributeTypes, typename HasState
+ , typename Idtype_, typename Idtype>
+ struct assign_to_container_from_value<
+ fusion::vector2<Idtype_, iterator_range<Iterator> >
+ , lex::lexertl::position_token<Iterator, AttributeTypes, HasState, Idtype> >
+ : assign_to_attribute_from_value<
+ fusion::vector2<Idtype_, iterator_range<Iterator> >
+ , lex::lexertl::position_token<Iterator, AttributeTypes, HasState, Idtype> >
+ {};
+
+ ///////////////////////////////////////////////////////////////////////////
+ // Overload debug output for a single token, this integrates lexer tokens
+ // with Qi's simple_trace debug facilities
+ template <typename Iterator, typename Attribute, typename HasState
+ , typename Idtype>
+ struct token_printer_debug<
+ lex::lexertl::position_token<Iterator, Attribute, HasState, Idtype> >
+ {
+ typedef lex::lexertl::position_token<Iterator, Attribute, HasState, Idtype> token_type;
+
+ template <typename Out>
+ static void print(Out& out, token_type const& val)
+ {
+ out << '[';
+ spirit::traits::print_token(out, val.value());
+ out << ']';
+ }
+ };
+}}}
+
+#endif
Modified: trunk/boost/spirit/home/lex/lexer/lexertl/token.hpp
==============================================================================
--- trunk/boost/spirit/home/lex/lexer/lexertl/token.hpp (original)
+++ trunk/boost/spirit/home/lex/lexer/lexertl/token.hpp 2011-05-22 21:58:19 EDT (Sun, 22 May 2011)
@@ -24,12 +24,15 @@
#include <boost/fusion/include/value_at.hpp>
#include <boost/detail/iterator.hpp>
#include <boost/variant.hpp>
+#include <boost/mpl/bool.hpp>
#include <boost/mpl/vector.hpp>
-#include <boost/mpl/insert.hpp>
+#include <boost/mpl/is_sequence.hpp>
#include <boost/mpl/begin.hpp>
-#include <boost/mpl/bool.hpp>
-#include <boost/mpl/identity.hpp>
+#include <boost/mpl/insert.hpp>
+#include <boost/mpl/vector.hpp>
#include <boost/mpl/if.hpp>
+#include <boost/mpl/or.hpp>
+#include <boost/type_traits/is_same.hpp>
#include <boost/range/iterator_range.hpp>
#if !BOOST_WORKAROUND(BOOST_MSVC, <= 1300)
#include <boost/static_assert.hpp>
@@ -137,16 +140,17 @@
token(id_type id, std::size_t, token_value_type)
: id_(id) {}
+ token_value_type& value() { return unused; }
+ token_value_type const& value() const { return unused; }
+
#if defined(BOOST_SPIRIT_DEBUG)
token(id_type id, std::size_t, Iterator const& first
, Iterator const& last)
: matched_(first, last)
- , id_(id)
- {}
+ , id_(id) {}
#else
token(id_type id, std::size_t, Iterator const&, Iterator const&)
- : id_(id)
- {}
+ : id_(id) {}
#endif
// this default conversion operator is needed to allow the direct
@@ -526,13 +530,13 @@
}
};
- template <typename Attribute, typename Iterator, typename HasState
- , typename Idtype>
- struct assign_to_container_from_value<Attribute
- , lex::lexertl::token<Iterator, mpl::vector0<>, HasState, Idtype> >
- : assign_to_attribute_from_value<Attribute
- , lex::lexertl::token<Iterator, mpl::vector0<>, HasState, Idtype> >
- {};
+// template <typename Attribute, typename Iterator, typename HasState
+// , typename Idtype>
+// struct assign_to_container_from_value<Attribute
+// , lex::lexertl::token<Iterator, mpl::vector0<>, HasState, Idtype> >
+// : assign_to_attribute_from_value<Attribute
+// , lex::lexertl::token<Iterator, mpl::vector0<>, HasState, Idtype> >
+// {};
// same as above but using mpl::vector<> instead of mpl::vector0<>
template <typename Attribute, typename Iterator, typename HasState
@@ -551,13 +555,13 @@
}
};
- template <typename Attribute, typename Iterator, typename HasState
- , typename Idtype>
- struct assign_to_container_from_value<Attribute
- , lex::lexertl::token<Iterator, mpl::vector<>, HasState, Idtype> >
- : assign_to_attribute_from_value<Attribute
- , lex::lexertl::token<Iterator, mpl::vector<>, HasState, Idtype> >
- {};
+// template <typename Attribute, typename Iterator, typename HasState
+// , typename Idtype>
+// struct assign_to_container_from_value<Attribute
+// , lex::lexertl::token<Iterator, mpl::vector<>, HasState, Idtype> >
+// : assign_to_attribute_from_value<Attribute
+// , lex::lexertl::token<Iterator, mpl::vector<>, HasState, Idtype> >
+// {};
// This is called from the parse function of token_def if the token type
// has been explicitly omitted (i.e. no attribute value is used), which
@@ -603,7 +607,7 @@
attribute_type;
iterpair_type const& ip = get<iterpair_type>(t.value());
- attr = attribute_type(t.id(), get<iterpair_type>(t.value()));
+ attr = attribute_type(t.id(), ip);
}
};
Modified: trunk/boost/spirit/home/qi/detail/assign_to.hpp
==============================================================================
--- trunk/boost/spirit/home/qi/detail/assign_to.hpp (original)
+++ trunk/boost/spirit/home/qi/detail/assign_to.hpp 2011-05-22 21:58:19 EDT (Sun, 22 May 2011)
@@ -68,7 +68,9 @@
call(Iterator const& first, Iterator const& last
, boost::optional<Attribute>& attr)
{
- attr = Attribute(first, last);
+ Attribute val;
+ assign_to(first, last, val);
+ attr = val;
}
};
Added: trunk/boost/spirit/include/lex_lexertl_position_token.hpp
==============================================================================
--- (empty file)
+++ trunk/boost/spirit/include/lex_lexertl_position_token.hpp 2011-05-22 21:58:19 EDT (Sun, 22 May 2011)
@@ -0,0 +1,18 @@
+/*=============================================================================
+ Copyright (c) 2001-2011 Joel de Guzman
+ Copyright (c) 2001-2011 Hartmut Kaiser
+ http://spirit.sourceforge.net/
+
+ Distributed under the Boost Software License, Version 1.0. (See accompanying
+ file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
+=============================================================================*/
+#ifndef BOOST_SPIRIT_INCLUDE_LEX_LEXERTL_POSITION_TOKEN
+#define BOOST_SPIRIT_INCLUDE_LEX_LEXERTL_POSITION_TOKEN
+
+#if defined(_MSC_VER)
+#pragma once
+#endif
+
+#include <boost/spirit/home/lex/lexer/lexertl/position_token.hpp>
+
+#endif
Added: trunk/boost/spirit/include/lex_lexertl_token.hpp
==============================================================================
--- (empty file)
+++ trunk/boost/spirit/include/lex_lexertl_token.hpp 2011-05-22 21:58:19 EDT (Sun, 22 May 2011)
@@ -0,0 +1,18 @@
+/*=============================================================================
+ Copyright (c) 2001-2011 Joel de Guzman
+ Copyright (c) 2001-2011 Hartmut Kaiser
+ http://spirit.sourceforge.net/
+
+ Distributed under the Boost Software License, Version 1.0. (See accompanying
+ file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
+=============================================================================*/
+#ifndef BOOST_SPIRIT_INCLUDE_LEX_LEXERTL_TOKEN
+#define BOOST_SPIRIT_INCLUDE_LEX_LEXERTL_TOKEN
+
+#if defined(_MSC_VER)
+#pragma once
+#endif
+
+#include <boost/spirit/home/lex/lexer/lexertl/token.hpp>
+
+#endif
Modified: trunk/libs/spirit/doc/what_s_new.qbk
==============================================================================
--- trunk/libs/spirit/doc/what_s_new.qbk (original)
+++ trunk/libs/spirit/doc/what_s_new.qbk 2011-05-22 21:58:19 EDT (Sun, 22 May 2011)
@@ -98,6 +98,15 @@
using `lex::char_` and `lex::string`. Both primitives now accept a second
parameter which will be interpreted as the requested token id for any token
generated from this definition.
+* Added a new token type `lex::lexertl::position_token<>`, which is essentially
+ plup-in compatible with the existing `lex::lexertl::token<>` class. However
+ it additionally stores the pair of iterators pointing to the underlying
+ matched input sequence as an iterator_range.
+
+[heading Bug Fixes in Lex]
+
+* Fixed a problem with associating token definitions with all states (using
+ `"*"` as the state name) when actions were attached to them.
[heading Making Stuff Work]
Modified: trunk/libs/spirit/test/Jamfile
==============================================================================
--- trunk/libs/spirit/test/Jamfile (original)
+++ trunk/libs/spirit/test/Jamfile 2011-05-22 21:58:19 EDT (Sun, 22 May 2011)
@@ -253,20 +253,24 @@
###########################################################################
test-suite spirit_v2/lex :
+ [ run lex/auto_switch_lexerstate.cpp : : : : lex_auto_switch_lexerstate ]
+ [ run lex/dedent_handling_phoenix.cpp : : : : lex_dedent_handling_phoenix ]
+ [ run lex/id_type_enum.cpp : : : : lex_id_type_enum ]
[ run lex/lexertl1.cpp : : : : lex_lexertl1 ]
[ run lex/lexertl2.cpp : : : : lex_lexertl2 ]
[ run lex/lexertl3.cpp : : : : lex_lexertl3 ]
[ run lex/lexertl4.cpp : : : : lex_lexertl4 ]
[ run lex/lexertl5.cpp : : : : lex_lexertl5 ]
- [ run lex/state_switcher.cpp : : : : lex_state_switcher ]
[ run lex/lexer_state_switcher.cpp : : : : lex_lexer_state_switcher ]
+ [ run lex/semantic_actions.cpp : : : : lex_semantic_actions ]
[ run lex/set_token_value.cpp : : : : lex_set_token_value ]
- [ run lex/dedent_handling_phoenix.cpp : : : : lex_dedent_handling_phoenix ]
[ run lex/set_token_value_phoenix.cpp : : : : lex_set_token_value_phoenix ]
- [ run lex/semantic_actions.cpp : : : : lex_semantic_actions ]
- [ run lex/auto_switch_lexerstate.cpp : : : : lex_auto_switch_lexerstate ]
- [ run lex/id_type_enum.cpp : : : : lex_id_type_enum ]
+ [ run lex/state_switcher.cpp : : : : lex_state_switcher ]
[ run lex/string_token_id.cpp : : : : lex_string_token_id ]
+ [ run lex/token_iterpair.cpp : : : : lex_token_iterpair ]
+ [ run lex/token_moretypes.cpp : : : : lex_token_moretypes ]
+ [ run lex/token_omit.cpp : : : : lex_token_omit ]
+ [ run lex/token_onetype.cpp : : : : lex_token_onetype ]
;
Modified: trunk/libs/spirit/test/lex/lexertl1.cpp
==============================================================================
--- trunk/libs/spirit/test/lex/lexertl1.cpp (original)
+++ trunk/libs/spirit/test/lex/lexertl1.cpp 2011-05-22 21:58:19 EDT (Sun, 22 May 2011)
@@ -5,6 +5,7 @@
#include <boost/detail/lightweight_test.hpp>
#include <boost/spirit/include/lex_lexertl.hpp>
+#include <boost/spirit/include/lex_lexertl_position_token.hpp>
#include "test.hpp"
///////////////////////////////////////////////////////////////////////////////
@@ -25,6 +26,8 @@
token_def cpp_comment ("\\/\\/[^\\n\\r]*(\\n|\\r|\\r\\n)", CPPCOMMENT);
typedef std::string::iterator base_iterator_type;
+
+ // test with default token type
typedef lex::lexertl::token<base_iterator_type> token_type;
typedef lex::lexertl::lexer<token_type> lexer_type;
typedef lex::lexer<lexer_type> lexer_def;
@@ -65,5 +68,46 @@
BOOST_TEST(test (lex, "/", '/'));
}
+ // test with position_token
+ typedef lex::lexertl::position_token<base_iterator_type> position_token_type;
+ typedef lex::lexertl::lexer<position_token_type> position_lexer_type;
+ typedef lex::lexer<position_lexer_type> position_lexer_def;
+
+ {
+ // initialize lexer
+ position_lexer_def lex;
+ lex.self = c_comment;
+ lex.self += cpp_comment;
+
+ // test lexer for two different input strings
+ BOOST_TEST(test (lex, "/* this is a comment */", CCOMMENT));
+ BOOST_TEST(test (lex, "// this is a comment as well\n", CPPCOMMENT));
+ }
+
+ {
+ // initialize lexer
+ position_lexer_def lex;
+ lex.self = c_comment | cpp_comment;
+
+ // test lexer for two different input strings
+ BOOST_TEST(test (lex, "/* this is a comment */", CCOMMENT));
+ BOOST_TEST(test (lex, "// this is a comment as well\n", CPPCOMMENT));
+ }
+
+ {
+ // initialize lexer
+ position_lexer_def lex;
+ lex.self = token_def('+') | '-' | c_comment;
+ lex.self += lex::char_('*') | '/' | cpp_comment;
+
+ // test lexer for two different input strings
+ BOOST_TEST(test (lex, "/* this is a comment */", CCOMMENT));
+ BOOST_TEST(test (lex, "// this is a comment as well\n", CPPCOMMENT));
+ BOOST_TEST(test (lex, "+", '+'));
+ BOOST_TEST(test (lex, "-", '-'));
+ BOOST_TEST(test (lex, "*", '*'));
+ BOOST_TEST(test (lex, "/", '/'));
+ }
+
return boost::report_errors();
}
Added: trunk/libs/spirit/test/lex/token_iterpair.cpp
==============================================================================
--- (empty file)
+++ trunk/libs/spirit/test/lex/token_iterpair.cpp 2011-05-22 21:58:19 EDT (Sun, 22 May 2011)
@@ -0,0 +1,256 @@
+// Copyright (c) 2001-2011 Hartmut Kaiser
+//
+// Distributed under the Boost Software License, Version 1.0. (See accompanying
+// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
+
+// #define BOOST_SPIRIT_LEXERTL_DEBUG
+
+#include <boost/config/warning_disable.hpp>
+#include <boost/detail/lightweight_test.hpp>
+
+#include <boost/spirit/include/lex_lexertl.hpp>
+#include <boost/spirit/include/lex_lexertl_position_token.hpp>
+#include <boost/spirit/include/phoenix_object.hpp>
+#include <boost/spirit/include/phoenix_operator.hpp>
+#include <boost/spirit/include/phoenix_statement.hpp>
+#include <boost/spirit/include/phoenix_stl.hpp>
+
+namespace lex = boost::spirit::lex;
+namespace phoenix = boost::phoenix;
+namespace mpl = boost::mpl;
+
+///////////////////////////////////////////////////////////////////////////////
+enum tokenids
+{
+ ID_INT = 1000,
+ ID_DOUBLE
+};
+
+template <typename Lexer>
+struct token_definitions : lex::lexer<Lexer>
+{
+ token_definitions()
+ {
+ this->self.add_pattern("HEXDIGIT", "[0-9a-fA-F]");
+ this->self.add_pattern("OCTALDIGIT", "[0-7]");
+ this->self.add_pattern("DIGIT", "[0-9]");
+
+ this->self.add_pattern("OPTSIGN", "[-+]?");
+ this->self.add_pattern("EXPSTART", "[eE][-+]");
+ this->self.add_pattern("EXPONENT", "[eE]{OPTSIGN}{DIGIT}+");
+
+ // define tokens and associate them with the lexer
+ int_ = "(0x|0X){HEXDIGIT}+|0{OCTALDIGIT}*|{OPTSIGN}[1-9]{DIGIT}*";
+ int_.id(ID_INT);
+
+ double_ = "{OPTSIGN}({DIGIT}*\\.{DIGIT}+|{DIGIT}+\\.){EXPONENT}?|{DIGIT}+{EXPONENT}";
+ double_.id(ID_DOUBLE);
+
+ whitespace = "[ \t\n]+";
+
+ this->self =
+ double_
+ | int_
+ | whitespace[ lex::_pass = lex::pass_flags::pass_ignore ]
+ ;
+ }
+
+ lex::token_def<lex::omit> int_;
+ lex::token_def<lex::omit> double_;
+ lex::token_def<lex::omit> whitespace;
+};
+
+template <typename Lexer>
+struct token_definitions_with_state : lex::lexer<Lexer>
+{
+ token_definitions_with_state()
+ {
+ this->self.add_pattern("HEXDIGIT", "[0-9a-fA-F]");
+ this->self.add_pattern("OCTALDIGIT", "[0-7]");
+ this->self.add_pattern("DIGIT", "[0-9]");
+
+ this->self.add_pattern("OPTSIGN", "[-+]?");
+ this->self.add_pattern("EXPSTART", "[eE][-+]");
+ this->self.add_pattern("EXPONENT", "[eE]{OPTSIGN}{DIGIT}+");
+
+ this->self.add_state();
+ this->self.add_state("INT");
+ this->self.add_state("DOUBLE");
+
+ // define tokens and associate them with the lexer
+ int_ = "(0x|0X){HEXDIGIT}+|0{OCTALDIGIT}*|{OPTSIGN}[1-9]{DIGIT}*";
+ int_.id(ID_INT);
+
+ double_ = "{OPTSIGN}({DIGIT}*\\.{DIGIT}+|{DIGIT}+\\.){EXPONENT}?|{DIGIT}+{EXPONENT}";
+ double_.id(ID_DOUBLE);
+
+ whitespace = "[ \t\n]+";
+
+ this->self("*") =
+ double_ [ lex::_state = "DOUBLE"]
+ | int_ [ lex::_state = "INT" ]
+ | whitespace[ lex::_pass = lex::pass_flags::pass_ignore ]
+ ;
+ }
+
+ lex::token_def<lex::omit> int_;
+ lex::token_def<lex::omit> double_;
+ lex::token_def<lex::omit> whitespace;
+};
+
+///////////////////////////////////////////////////////////////////////////////
+template <typename Token>
+inline bool
+test_token_ids(int const* ids, std::vector<Token> const& tokens)
+{
+ BOOST_FOREACH(Token const& t, tokens)
+ {
+ if (*ids == -1)
+ return false; // reached end of expected data
+
+ typename Token::token_value_type const& value (t.value());
+ if (t.id() != static_cast<std::size_t>(*ids)) // token id must match
+ return false;
+ ++ids;
+ }
+
+ return (*ids == -1) ? true : false;
+}
+
+template <typename Token>
+inline bool
+test_token_states(std::size_t const* states, std::vector<Token> const& tokens)
+{
+ BOOST_FOREACH(Token const& t, tokens)
+ {
+ if (*states == std::size_t(-1))
+ return false; // reached end of expected data
+
+ typename Token::token_value_type const& value (t.value());
+ if (t.state() != *states) // token state must match
+ return false;
+ ++states;
+ }
+
+ return (*states == std::size_t(-1)) ? true : false;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+struct position_type
+{
+ std::size_t begin, end;
+};
+
+template <typename Iterator, typename Token>
+inline bool
+test_token_positions(Iterator begin, position_type const* positions,
+ std::vector<Token> const& tokens)
+{
+ BOOST_FOREACH(Token const& t, tokens)
+ {
+ if (positions->begin == std::size_t(-1) &&
+ positions->end == std::size_t(-1))
+ {
+ return false; // reached end of expected data
+ }
+
+ boost::iterator_range<Iterator> matched = t.matched();
+ std::size_t start = std::distance(begin, matched.begin());
+ std::size_t end = std::distance(begin, matched.end());
+
+ // position must match
+ if (start != positions->begin || end != positions->end)
+ return false;
+
+ ++positions;
+ }
+
+ return (positions->begin == std::size_t(-1) &&
+ positions->end == std::size_t(-1)) ? true : false;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+int main()
+{
+ typedef std::string::iterator base_iterator_type;
+ std::string input(" 01 1.2 -2 0x3 2.3e6 -3.4");
+ int ids[] = { ID_INT, ID_DOUBLE, ID_INT, ID_INT, ID_DOUBLE, ID_DOUBLE, -1 };
+ std::size_t states[] = { 0, 1, 2, 1, 1, 2, std::size_t(-1) };
+ position_type positions[] =
+ {
+ { 1, 3 }, { 4, 7 }, { 8, 10 }, { 11, 14 }, { 15, 20 }, { 21, 25 },
+ { std::size_t(-1), std::size_t(-1) }
+ };
+
+ // token type: token id, iterator_pair as token value, no state
+ {
+ typedef lex::lexertl::token<
+ base_iterator_type, mpl::vector<>, mpl::false_> token_type;
+ typedef lex::lexertl::actor_lexer<token_type> lexer_type;
+
+ token_definitions<lexer_type> lexer;
+ std::vector<token_type> tokens;
+ base_iterator_type first = input.begin();
+
+ using phoenix::arg_names::_1;
+ BOOST_TEST(lex::tokenize(first, input.end(), lexer
+ , phoenix::push_back(phoenix::ref(tokens), _1)));
+
+ BOOST_TEST(test_token_ids(ids, tokens));
+ }
+
+ {
+ typedef lex::lexertl::position_token<
+ base_iterator_type, mpl::vector<>, mpl::false_> token_type;
+ typedef lex::lexertl::actor_lexer<token_type> lexer_type;
+
+ token_definitions<lexer_type> lexer;
+ std::vector<token_type> tokens;
+ base_iterator_type first = input.begin();
+
+ using phoenix::arg_names::_1;
+ BOOST_TEST(lex::tokenize(first, input.end(), lexer
+ , phoenix::push_back(phoenix::ref(tokens), _1)));
+
+ BOOST_TEST(test_token_ids(ids, tokens));
+ BOOST_TEST(test_token_positions(input.begin(), positions, tokens));
+ }
+
+ // token type: holds token id, state, iterator_pair as token value
+ {
+ typedef lex::lexertl::token<
+ base_iterator_type, mpl::vector<>, mpl::true_> token_type;
+ typedef lex::lexertl::actor_lexer<token_type> lexer_type;
+
+ token_definitions_with_state<lexer_type> lexer;
+ std::vector<token_type> tokens;
+ base_iterator_type first = input.begin();
+
+ using phoenix::arg_names::_1;
+ BOOST_TEST(lex::tokenize(first, input.end(), lexer
+ , phoenix::push_back(phoenix::ref(tokens), _1)));
+
+ BOOST_TEST(test_token_ids(ids, tokens));
+ BOOST_TEST(test_token_states(states, tokens));
+ }
+
+ {
+ typedef lex::lexertl::position_token<
+ base_iterator_type, mpl::vector<>, mpl::true_> token_type;
+ typedef lex::lexertl::actor_lexer<token_type> lexer_type;
+
+ token_definitions_with_state<lexer_type> lexer;
+ std::vector<token_type> tokens;
+ base_iterator_type first = input.begin();
+
+ using phoenix::arg_names::_1;
+ BOOST_TEST(lex::tokenize(first, input.end(), lexer
+ , phoenix::push_back(phoenix::ref(tokens), _1)));
+
+ BOOST_TEST(test_token_ids(ids, tokens));
+ BOOST_TEST(test_token_states(states, tokens));
+ BOOST_TEST(test_token_positions(input.begin(), positions, tokens));
+ }
+
+ return boost::report_errors();
+}
Added: trunk/libs/spirit/test/lex/token_moretypes.cpp
==============================================================================
--- (empty file)
+++ trunk/libs/spirit/test/lex/token_moretypes.cpp 2011-05-22 21:58:19 EDT (Sun, 22 May 2011)
@@ -0,0 +1,307 @@
+// Copyright (c) 2001-2011 Hartmut Kaiser
+//
+// Distributed under the Boost Software License, Version 1.0. (See accompanying
+// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
+
+// #define BOOST_SPIRIT_LEXERTL_DEBUG
+
+#include <boost/config/warning_disable.hpp>
+#include <boost/detail/lightweight_test.hpp>
+
+#include <boost/spirit/include/lex_lexertl.hpp>
+#include <boost/spirit/include/lex_lexertl_position_token.hpp>
+#include <boost/spirit/include/phoenix_object.hpp>
+#include <boost/spirit/include/phoenix_operator.hpp>
+#include <boost/spirit/include/phoenix_statement.hpp>
+#include <boost/spirit/include/phoenix_stl.hpp>
+#include <boost/spirit/include/qi_numeric.hpp>
+
+namespace spirit = boost::spirit;
+namespace lex = boost::spirit::lex;
+namespace phoenix = boost::phoenix;
+namespace mpl = boost::mpl;
+
+///////////////////////////////////////////////////////////////////////////////
+enum tokenids
+{
+ ID_INT = 1000,
+ ID_DOUBLE
+};
+
+template <typename Lexer>
+struct token_definitions : lex::lexer<Lexer>
+{
+ token_definitions()
+ {
+ this->self.add_pattern("HEXDIGIT", "[0-9a-fA-F]");
+ this->self.add_pattern("OCTALDIGIT", "[0-7]");
+ this->self.add_pattern("DIGIT", "[0-9]");
+
+ this->self.add_pattern("OPTSIGN", "[-+]?");
+ this->self.add_pattern("EXPSTART", "[eE][-+]");
+ this->self.add_pattern("EXPONENT", "[eE]{OPTSIGN}{DIGIT}+");
+
+ // define tokens and associate them with the lexer
+ int_ = "(0x|0X){HEXDIGIT}+|0{OCTALDIGIT}*|{OPTSIGN}[1-9]{DIGIT}*";
+ int_.id(ID_INT);
+
+ double_ = "{OPTSIGN}({DIGIT}*\\.{DIGIT}+|{DIGIT}+\\.){EXPONENT}?|{DIGIT}+{EXPONENT}";
+ double_.id(ID_DOUBLE);
+
+ whitespace = "[ \t\n]+";
+
+ this->self =
+ double_
+ | int_
+ | whitespace[ lex::_pass = lex::pass_flags::pass_ignore ]
+ ;
+ }
+
+ lex::token_def<int> int_;
+ lex::token_def<double> double_;
+ lex::token_def<lex::omit> whitespace;
+};
+
+template <typename Lexer>
+struct token_definitions_with_state : lex::lexer<Lexer>
+{
+ token_definitions_with_state()
+ {
+ this->self.add_pattern("HEXDIGIT", "[0-9a-fA-F]");
+ this->self.add_pattern("OCTALDIGIT", "[0-7]");
+ this->self.add_pattern("DIGIT", "[0-9]");
+
+ this->self.add_pattern("OPTSIGN", "[-+]?");
+ this->self.add_pattern("EXPSTART", "[eE][-+]");
+ this->self.add_pattern("EXPONENT", "[eE]{OPTSIGN}{DIGIT}+");
+
+ this->self.add_state();
+ this->self.add_state("INT");
+ this->self.add_state("DOUBLE");
+
+ // define tokens and associate them with the lexer
+ int_ = "(0x|0X){HEXDIGIT}+|0{OCTALDIGIT}*|{OPTSIGN}[1-9]{DIGIT}*";
+ int_.id(ID_INT);
+
+ double_ = "{OPTSIGN}({DIGIT}*\\.{DIGIT}+|{DIGIT}+\\.){EXPONENT}?|{DIGIT}+{EXPONENT}";
+ double_.id(ID_DOUBLE);
+
+ whitespace = "[ \t\n]+";
+
+ this->self("*") =
+ double_ [ lex::_state = "DOUBLE"]
+ | int_ [ lex::_state = "INT" ]
+ | whitespace[ lex::_pass = lex::pass_flags::pass_ignore ]
+ ;
+ }
+
+ lex::token_def<int> int_;
+ lex::token_def<double> double_;
+ lex::token_def<lex::omit> whitespace;
+};
+
+///////////////////////////////////////////////////////////////////////////////
+template <typename Token>
+inline bool
+test_token_ids(int const* ids, std::vector<Token> const& tokens)
+{
+ BOOST_FOREACH(Token const& t, tokens)
+ {
+ if (*ids == -1)
+ return false; // reached end of expected data
+
+ if (t.id() != static_cast<std::size_t>(*ids)) // token id must match
+ return false;
+
+ ++ids;
+ }
+
+ return (*ids == -1) ? true : false;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+template <typename Token>
+inline bool
+test_token_states(std::size_t const* states, std::vector<Token> const& tokens)
+{
+ BOOST_FOREACH(Token const& t, tokens)
+ {
+ if (*states == std::size_t(-1))
+ return false; // reached end of expected data
+
+ if (t.state() != *states) // token state must match
+ return false;
+
+ ++states;
+ }
+
+ return (*states == std::size_t(-1)) ? true : false;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+struct position_type
+{
+ std::size_t begin, end;
+};
+
+template <typename Iterator, typename Token>
+inline bool
+test_token_positions(Iterator begin, position_type const* positions,
+ std::vector<Token> const& tokens)
+{
+ BOOST_FOREACH(Token const& t, tokens)
+ {
+ if (positions->begin == std::size_t(-1) &&
+ positions->end == std::size_t(-1))
+ {
+ return false; // reached end of expected data
+ }
+
+ boost::iterator_range<Iterator> matched = t.matched();
+ std::size_t start = std::distance(begin, matched.begin());
+ std::size_t end = std::distance(begin, matched.end());
+
+ // position must match
+ if (start != positions->begin || end != positions->end)
+ return false;
+
+ ++positions;
+ }
+
+ return (positions->begin == std::size_t(-1) &&
+ positions->end == std::size_t(-1)) ? true : false;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+template <typename T>
+struct value
+{
+ bool valid;
+ T val;
+};
+
+template <typename T, typename Token>
+inline bool
+test_token_values(value<T> const* values, std::vector<Token> const& tokens)
+{
+ BOOST_FOREACH(Token const& t, tokens)
+ {
+ if (values->valid && values->val == 0)
+ return false; // reached end of expected data
+
+ if (values->valid) {
+ T val;
+ spirit::traits::assign_to(t, val);
+ if (val != values->val) // token value must match
+ return false;
+ }
+
+ ++values;
+ }
+
+ return (values->valid && values->val == 0) ? true : false;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+int main()
+{
+ typedef std::string::iterator base_iterator_type;
+ std::string input(" 01 1.2 -2 03 2.3e6 -3.4");
+ int ids[] = { ID_INT, ID_DOUBLE, ID_INT, ID_INT, ID_DOUBLE, ID_DOUBLE, -1 };
+ std::size_t states[] = { 0, 1, 2, 1, 1, 2, std::size_t(-1) };
+ position_type positions[] =
+ {
+ { 1, 3 }, { 4, 7 }, { 8, 10 }, { 11, 13 }, { 15, 20 }, { 21, 25 },
+ { std::size_t(-1), std::size_t(-1) }
+ };
+ value<int> ivalues[] = {
+ { true, 1 }, { false }, { true, -2 },
+ { true, 3 }, { false }, { false },
+ { true, 0 }
+ };
+ value<double> dvalues[] = {
+ { false }, { true, 1.2 }, { false },
+ { false }, { true, 2.3e6 }, { true, -3.4 },
+ { true, 0.0 }
+ };
+
+ // token type: token id, iterator_pair as token value, no state
+ {
+ typedef lex::lexertl::token<
+ base_iterator_type, mpl::vector<double, int>, mpl::false_> token_type;
+ typedef lex::lexertl::actor_lexer<token_type> lexer_type;
+
+ token_definitions<lexer_type> lexer;
+ std::vector<token_type> tokens;
+ base_iterator_type first = input.begin();
+
+ using phoenix::arg_names::_1;
+ BOOST_TEST(lex::tokenize(first, input.end(), lexer
+ , phoenix::push_back(phoenix::ref(tokens), _1)));
+
+ BOOST_TEST(test_token_ids(ids, tokens));
+ BOOST_TEST(test_token_values(ivalues, tokens));
+ BOOST_TEST(test_token_values(dvalues, tokens));
+ }
+
+ {
+ typedef lex::lexertl::position_token<
+ base_iterator_type, mpl::vector<double, int>, mpl::false_> token_type;
+ typedef lex::lexertl::actor_lexer<token_type> lexer_type;
+
+ token_definitions<lexer_type> lexer;
+ std::vector<token_type> tokens;
+ base_iterator_type first = input.begin();
+
+ using phoenix::arg_names::_1;
+ BOOST_TEST(lex::tokenize(first, input.end(), lexer
+ , phoenix::push_back(phoenix::ref(tokens), _1)));
+
+ BOOST_TEST(test_token_ids(ids, tokens));
+ BOOST_TEST(test_token_positions(input.begin(), positions, tokens));
+ BOOST_TEST(test_token_values(ivalues, tokens));
+ BOOST_TEST(test_token_values(dvalues, tokens));
+ }
+
+ // token type: holds token id, state, iterator_pair as token value
+ {
+ typedef lex::lexertl::token<
+ base_iterator_type, mpl::vector<double, int>, mpl::true_> token_type;
+ typedef lex::lexertl::actor_lexer<token_type> lexer_type;
+
+ token_definitions_with_state<lexer_type> lexer;
+ std::vector<token_type> tokens;
+ base_iterator_type first = input.begin();
+
+ using phoenix::arg_names::_1;
+ BOOST_TEST(lex::tokenize(first, input.end(), lexer
+ , phoenix::push_back(phoenix::ref(tokens), _1)));
+
+ BOOST_TEST(test_token_ids(ids, tokens));
+ BOOST_TEST(test_token_states(states, tokens));
+ BOOST_TEST(test_token_values(ivalues, tokens));
+ BOOST_TEST(test_token_values(dvalues, tokens));
+ }
+
+ {
+ typedef lex::lexertl::position_token<
+ base_iterator_type, mpl::vector<double, int>, mpl::true_> token_type;
+ typedef lex::lexertl::actor_lexer<token_type> lexer_type;
+
+ token_definitions_with_state<lexer_type> lexer;
+ std::vector<token_type> tokens;
+ base_iterator_type first = input.begin();
+
+ using phoenix::arg_names::_1;
+ BOOST_TEST(lex::tokenize(first, input.end(), lexer
+ , phoenix::push_back(phoenix::ref(tokens), _1)));
+
+ BOOST_TEST(test_token_ids(ids, tokens));
+ BOOST_TEST(test_token_states(states, tokens));
+ BOOST_TEST(test_token_positions(input.begin(), positions, tokens));
+ BOOST_TEST(test_token_values(ivalues, tokens));
+ BOOST_TEST(test_token_values(dvalues, tokens));
+ }
+
+ return boost::report_errors();
+}
Added: trunk/libs/spirit/test/lex/token_omit.cpp
==============================================================================
--- (empty file)
+++ trunk/libs/spirit/test/lex/token_omit.cpp 2011-05-22 21:58:19 EDT (Sun, 22 May 2011)
@@ -0,0 +1,257 @@
+// Copyright (c) 2001-2011 Hartmut Kaiser
+//
+// Distributed under the Boost Software License, Version 1.0. (See accompanying
+// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
+
+// #define BOOST_SPIRIT_LEXERTL_DEBUG
+
+#include <boost/config/warning_disable.hpp>
+#include <boost/detail/lightweight_test.hpp>
+
+#include <boost/spirit/include/lex_lexertl.hpp>
+#include <boost/spirit/include/lex_lexertl_position_token.hpp>
+#include <boost/spirit/include/phoenix_object.hpp>
+#include <boost/spirit/include/phoenix_operator.hpp>
+#include <boost/spirit/include/phoenix_statement.hpp>
+#include <boost/spirit/include/phoenix_stl.hpp>
+
+namespace lex = boost::spirit::lex;
+namespace phoenix = boost::phoenix;
+namespace mpl = boost::mpl;
+
+///////////////////////////////////////////////////////////////////////////////
+enum tokenids
+{
+ ID_INT = 1000,
+ ID_DOUBLE
+};
+
+template <typename Lexer>
+struct token_definitions : lex::lexer<Lexer>
+{
+ token_definitions()
+ {
+ this->self.add_pattern("HEXDIGIT", "[0-9a-fA-F]");
+ this->self.add_pattern("OCTALDIGIT", "[0-7]");
+ this->self.add_pattern("DIGIT", "[0-9]");
+
+ this->self.add_pattern("OPTSIGN", "[-+]?");
+ this->self.add_pattern("EXPSTART", "[eE][-+]");
+ this->self.add_pattern("EXPONENT", "[eE]{OPTSIGN}{DIGIT}+");
+
+ // define tokens and associate them with the lexer
+ int_ = "(0x|0X){HEXDIGIT}+|0{OCTALDIGIT}*|{OPTSIGN}[1-9]{DIGIT}*";
+ int_.id(ID_INT);
+
+ double_ = "{OPTSIGN}({DIGIT}*\\.{DIGIT}+|{DIGIT}+\\.){EXPONENT}?|{DIGIT}+{EXPONENT}";
+ double_.id(ID_DOUBLE);
+
+ whitespace = "[ \t\n]+";
+
+ this->self =
+ double_
+ | int_
+ | whitespace[ lex::_pass = lex::pass_flags::pass_ignore ]
+ ;
+ }
+
+ lex::token_def<lex::omit> int_;
+ lex::token_def<lex::omit> double_;
+ lex::token_def<lex::omit> whitespace;
+};
+
+template <typename Lexer>
+struct token_definitions_with_state : lex::lexer<Lexer>
+{
+ token_definitions_with_state()
+ {
+ this->self.add_pattern("HEXDIGIT", "[0-9a-fA-F]");
+ this->self.add_pattern("OCTALDIGIT", "[0-7]");
+ this->self.add_pattern("DIGIT", "[0-9]");
+
+ this->self.add_pattern("OPTSIGN", "[-+]?");
+ this->self.add_pattern("EXPSTART", "[eE][-+]");
+ this->self.add_pattern("EXPONENT", "[eE]{OPTSIGN}{DIGIT}+");
+
+ this->self.add_state();
+ this->self.add_state("INT");
+ this->self.add_state("DOUBLE");
+
+ // define tokens and associate them with the lexer
+ int_ = "(0x|0X){HEXDIGIT}+|0{OCTALDIGIT}*|{OPTSIGN}[1-9]{DIGIT}*";
+ int_.id(ID_INT);
+
+ double_ = "{OPTSIGN}({DIGIT}*\\.{DIGIT}+|{DIGIT}+\\.){EXPONENT}?|{DIGIT}+{EXPONENT}";
+ double_.id(ID_DOUBLE);
+
+ whitespace = "[ \t\n]+";
+
+ this->self("*") =
+ double_ [ lex::_state = "DOUBLE"]
+ | int_ [ lex::_state = "INT" ]
+ | whitespace[ lex::_pass = lex::pass_flags::pass_ignore ]
+ ;
+ }
+
+ lex::token_def<lex::omit> int_;
+ lex::token_def<lex::omit> double_;
+ lex::token_def<lex::omit> whitespace;
+};
+
+///////////////////////////////////////////////////////////////////////////////
+template <typename Token>
+inline bool
+test_token_ids(int const* ids, std::vector<Token> const& tokens)
+{
+ BOOST_FOREACH(Token const& t, tokens)
+ {
+ if (*ids == -1)
+ return false; // reached end of expected data
+
+ if (t.id() != static_cast<std::size_t>(*ids)) // token id must match
+ return false;
+
+ ++ids;
+ }
+
+ return (*ids == -1) ? true : false;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+template <typename Token>
+inline bool
+test_token_states(std::size_t const* states, std::vector<Token> const& tokens)
+{
+ BOOST_FOREACH(Token const& t, tokens)
+ {
+ if (*states == std::size_t(-1))
+ return false; // reached end of expected data
+
+ if (t.state() != *states) // token state must match
+ return false;
+
+ ++states;
+ }
+
+ return (*states == std::size_t(-1)) ? true : false;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+struct position_type
+{
+ std::size_t begin, end;
+};
+
+template <typename Iterator, typename Token>
+inline bool
+test_token_positions(Iterator begin, position_type const* positions,
+ std::vector<Token> const& tokens)
+{
+ BOOST_FOREACH(Token const& t, tokens)
+ {
+ if (positions->begin == std::size_t(-1) &&
+ positions->end == std::size_t(-1))
+ {
+ return false; // reached end of expected data
+ }
+
+ boost::iterator_range<Iterator> matched = t.matched();
+ std::size_t start = std::distance(begin, matched.begin());
+ std::size_t end = std::distance(begin, matched.end());
+
+ // position must match
+ if (start != positions->begin || end != positions->end)
+ return false;
+
+ ++positions;
+ }
+
+ return (positions->begin == std::size_t(-1) &&
+ positions->end == std::size_t(-1)) ? true : false;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+int main()
+{
+ typedef std::string::iterator base_iterator_type;
+ std::string input(" 01 1.2 -2 0x3 2.3e6 -3.4");
+ int ids[] = { ID_INT, ID_DOUBLE, ID_INT, ID_INT, ID_DOUBLE, ID_DOUBLE, -1 };
+ std::size_t states[] = { 0, 1, 2, 1, 1, 2, std::size_t(-1) };
+ position_type positions[] =
+ {
+ { 1, 3 }, { 4, 7 }, { 8, 10 }, { 11, 14 }, { 15, 20 }, { 21, 25 },
+ { std::size_t(-1), std::size_t(-1) }
+ };
+
+ // minimal token type: holds just token id, no state, no value
+ {
+ typedef lex::lexertl::token<
+ base_iterator_type, lex::omit, mpl::false_> token_type;
+ typedef lex::lexertl::actor_lexer<token_type> lexer_type;
+
+ token_definitions<lexer_type> lexer;
+ std::vector<token_type> tokens;
+ base_iterator_type first = input.begin();
+
+ using phoenix::arg_names::_1;
+ BOOST_TEST(lex::tokenize(first, input.end(), lexer
+ , phoenix::push_back(phoenix::ref(tokens), _1)));
+
+ BOOST_TEST(test_token_ids(ids, tokens));
+ }
+
+ {
+ typedef lex::lexertl::position_token<
+ base_iterator_type, lex::omit, mpl::false_> token_type;
+ typedef lex::lexertl::actor_lexer<token_type> lexer_type;
+
+ token_definitions<lexer_type> lexer;
+ std::vector<token_type> tokens;
+ base_iterator_type first = input.begin();
+
+ using phoenix::arg_names::_1;
+ BOOST_TEST(lex::tokenize(first, input.end(), lexer
+ , phoenix::push_back(phoenix::ref(tokens), _1)));
+
+ BOOST_TEST(test_token_ids(ids, tokens));
+ BOOST_TEST(test_token_positions(input.begin(), positions, tokens));
+ }
+
+ // minimal token type: holds just token id and state, no value
+ {
+ typedef lex::lexertl::token<
+ base_iterator_type, lex::omit, mpl::true_> token_type;
+ typedef lex::lexertl::actor_lexer<token_type> lexer_type;
+
+ token_definitions_with_state<lexer_type> lexer;
+ std::vector<token_type> tokens;
+ base_iterator_type first = input.begin();
+
+ using phoenix::arg_names::_1;
+ BOOST_TEST(lex::tokenize(first, input.end(), lexer
+ , phoenix::push_back(phoenix::ref(tokens), _1)));
+
+ BOOST_TEST(test_token_ids(ids, tokens));
+ BOOST_TEST(test_token_states(states, tokens));
+ }
+
+ {
+ typedef lex::lexertl::position_token<
+ base_iterator_type, lex::omit, mpl::true_> token_type;
+ typedef lex::lexertl::actor_lexer<token_type> lexer_type;
+
+ token_definitions_with_state<lexer_type> lexer;
+ std::vector<token_type> tokens;
+ base_iterator_type first = input.begin();
+
+ using phoenix::arg_names::_1;
+ BOOST_TEST(lex::tokenize(first, input.end(), lexer
+ , phoenix::push_back(phoenix::ref(tokens), _1)));
+
+ BOOST_TEST(test_token_ids(ids, tokens));
+ BOOST_TEST(test_token_states(states, tokens));
+ BOOST_TEST(test_token_positions(input.begin(), positions, tokens));
+ }
+
+ return boost::report_errors();
+}
Added: trunk/libs/spirit/test/lex/token_onetype.cpp
==============================================================================
--- (empty file)
+++ trunk/libs/spirit/test/lex/token_onetype.cpp 2011-05-22 21:58:19 EDT (Sun, 22 May 2011)
@@ -0,0 +1,285 @@
+// Copyright (c) 2001-2011 Hartmut Kaiser
+//
+// Distributed under the Boost Software License, Version 1.0. (See accompanying
+// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
+
+// #define BOOST_SPIRIT_LEXERTL_DEBUG
+
+#include <boost/config/warning_disable.hpp>
+#include <boost/detail/lightweight_test.hpp>
+
+#include <boost/spirit/include/lex_lexertl.hpp>
+#include <boost/spirit/include/lex_lexertl_position_token.hpp>
+#include <boost/spirit/include/phoenix_object.hpp>
+#include <boost/spirit/include/phoenix_operator.hpp>
+#include <boost/spirit/include/phoenix_statement.hpp>
+#include <boost/spirit/include/phoenix_stl.hpp>
+#include <boost/spirit/include/qi_numeric.hpp>
+
+namespace spirit = boost::spirit;
+namespace lex = boost::spirit::lex;
+namespace phoenix = boost::phoenix;
+namespace mpl = boost::mpl;
+
+///////////////////////////////////////////////////////////////////////////////
+enum tokenids
+{
+ ID_INT = 1000,
+ ID_DOUBLE
+};
+
+template <typename Lexer>
+struct token_definitions : lex::lexer<Lexer>
+{
+ token_definitions()
+ {
+ this->self.add_pattern("HEXDIGIT", "[0-9a-fA-F]");
+ this->self.add_pattern("OCTALDIGIT", "[0-7]");
+ this->self.add_pattern("DIGIT", "[0-9]");
+
+ this->self.add_pattern("OPTSIGN", "[-+]?");
+ this->self.add_pattern("EXPSTART", "[eE][-+]");
+ this->self.add_pattern("EXPONENT", "[eE]{OPTSIGN}{DIGIT}+");
+
+ // define tokens and associate them with the lexer
+ int_ = "{OPTSIGN}[1-9]{DIGIT}*";
+ int_.id(ID_INT);
+
+ double_ = "{OPTSIGN}({DIGIT}*\\.{DIGIT}+|{DIGIT}+\\.){EXPONENT}?|{DIGIT}+{EXPONENT}";
+ double_.id(ID_DOUBLE);
+
+ whitespace = "[ \t\n]+";
+
+ this->self =
+ double_
+ | int_
+ | whitespace[ lex::_pass = lex::pass_flags::pass_ignore ]
+ ;
+ }
+
+ lex::token_def<double> int_;
+ lex::token_def<double> double_;
+ lex::token_def<lex::omit> whitespace;
+};
+
+template <typename Lexer>
+struct token_definitions_with_state : lex::lexer<Lexer>
+{
+ token_definitions_with_state()
+ {
+ this->self.add_pattern("HEXDIGIT", "[0-9a-fA-F]");
+ this->self.add_pattern("OCTALDIGIT", "[0-7]");
+ this->self.add_pattern("DIGIT", "[0-9]");
+
+ this->self.add_pattern("OPTSIGN", "[-+]?");
+ this->self.add_pattern("EXPSTART", "[eE][-+]");
+ this->self.add_pattern("EXPONENT", "[eE]{OPTSIGN}{DIGIT}+");
+
+ this->self.add_state();
+ this->self.add_state("INT");
+ this->self.add_state("DOUBLE");
+
+ // define tokens and associate them with the lexer
+ int_ = "{OPTSIGN}[1-9]{DIGIT}*";
+ int_.id(ID_INT);
+
+ double_ = "{OPTSIGN}({DIGIT}*\\.{DIGIT}+|{DIGIT}+\\.){EXPONENT}?|{DIGIT}+{EXPONENT}";
+ double_.id(ID_DOUBLE);
+
+ whitespace = "[ \t\n]+";
+
+ this->self("*") =
+ double_ [ lex::_state = "DOUBLE"]
+ | int_ [ lex::_state = "INT" ]
+ | whitespace[ lex::_pass = lex::pass_flags::pass_ignore ]
+ ;
+ }
+
+ lex::token_def<double> int_;
+ lex::token_def<double> double_;
+ lex::token_def<lex::omit> whitespace;
+};
+
+///////////////////////////////////////////////////////////////////////////////
+template <typename Token>
+inline bool
+test_token_ids(int const* ids, std::vector<Token> const& tokens)
+{
+ BOOST_FOREACH(Token const& t, tokens)
+ {
+ if (*ids == -1)
+ return false; // reached end of expected data
+
+ if (t.id() != static_cast<std::size_t>(*ids)) // token id must match
+ return false;
+
+ ++ids;
+ }
+
+ return (*ids == -1) ? true : false;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+template <typename Token>
+inline bool
+test_token_states(std::size_t const* states, std::vector<Token> const& tokens)
+{
+ BOOST_FOREACH(Token const& t, tokens)
+ {
+ if (*states == std::size_t(-1))
+ return false; // reached end of expected data
+
+ if (t.state() != *states) // token state must match
+ return false;
+
+ ++states;
+ }
+
+ return (*states == std::size_t(-1)) ? true : false;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+struct position_type
+{
+ std::size_t begin, end;
+};
+
+template <typename Iterator, typename Token>
+inline bool
+test_token_positions(Iterator begin, position_type const* positions,
+ std::vector<Token> const& tokens)
+{
+ BOOST_FOREACH(Token const& t, tokens)
+ {
+ if (positions->begin == std::size_t(-1) &&
+ positions->end == std::size_t(-1))
+ {
+ return false; // reached end of expected data
+ }
+
+ boost::iterator_range<Iterator> matched = t.matched();
+ std::size_t start = std::distance(begin, matched.begin());
+ std::size_t end = std::distance(begin, matched.end());
+
+ // position must match
+ if (start != positions->begin || end != positions->end)
+ return false;
+
+ ++positions;
+ }
+
+ return (positions->begin == std::size_t(-1) &&
+ positions->end == std::size_t(-1)) ? true : false;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+template <typename Token>
+inline bool
+test_token_values(double const* values, std::vector<Token> const& tokens)
+{
+ BOOST_FOREACH(Token const& t, tokens)
+ {
+ if (*values == 0.0)
+ return false; // reached end of expected data
+
+ double val;
+ spirit::traits::assign_to(t, val);
+ if (val != *values) // token value must match
+ return false;
+
+ ++values;
+ }
+
+ return (*values == 0.0) ? true : false;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+int main()
+{
+ typedef std::string::iterator base_iterator_type;
+ std::string input(" 1 1.2 -2 3 2.3e6 -3.4");
+ int ids[] = { ID_INT, ID_DOUBLE, ID_INT, ID_INT, ID_DOUBLE, ID_DOUBLE, -1 };
+ std::size_t states[] = { 0, 1, 2, 1, 1, 2, std::size_t(-1) };
+ position_type positions[] =
+ {
+ { 2, 3 }, { 4, 7 }, { 8, 10 }, { 13, 14 }, { 15, 20 }, { 21, 25 },
+ { std::size_t(-1), std::size_t(-1) }
+ };
+ double values[] = { 1.0, 1.2, -2.0, 3.0, 2.3e6, -3.4, 0.0 };
+
+ // token type: token id, iterator_pair as token value, no state
+ {
+ typedef lex::lexertl::token<
+ base_iterator_type, mpl::vector<double>, mpl::false_> token_type;
+ typedef lex::lexertl::actor_lexer<token_type> lexer_type;
+
+ token_definitions<lexer_type> lexer;
+ std::vector<token_type> tokens;
+ base_iterator_type first = input.begin();
+
+ using phoenix::arg_names::_1;
+ BOOST_TEST(lex::tokenize(first, input.end(), lexer
+ , phoenix::push_back(phoenix::ref(tokens), _1)));
+
+ BOOST_TEST(test_token_ids(ids, tokens));
+ BOOST_TEST(test_token_values(values, tokens));
+ }
+
+ {
+ typedef lex::lexertl::position_token<
+ base_iterator_type, mpl::vector<double>, mpl::false_> token_type;
+ typedef lex::lexertl::actor_lexer<token_type> lexer_type;
+
+ token_definitions<lexer_type> lexer;
+ std::vector<token_type> tokens;
+ base_iterator_type first = input.begin();
+
+ using phoenix::arg_names::_1;
+ BOOST_TEST(lex::tokenize(first, input.end(), lexer
+ , phoenix::push_back(phoenix::ref(tokens), _1)));
+
+ BOOST_TEST(test_token_ids(ids, tokens));
+ BOOST_TEST(test_token_positions(input.begin(), positions, tokens));
+ BOOST_TEST(test_token_values(values, tokens));
+ }
+
+ // token type: holds token id, state, iterator_pair as token value
+ {
+ typedef lex::lexertl::token<
+ base_iterator_type, mpl::vector<double>, mpl::true_> token_type;
+ typedef lex::lexertl::actor_lexer<token_type> lexer_type;
+
+ token_definitions_with_state<lexer_type> lexer;
+ std::vector<token_type> tokens;
+ base_iterator_type first = input.begin();
+
+ using phoenix::arg_names::_1;
+ BOOST_TEST(lex::tokenize(first, input.end(), lexer
+ , phoenix::push_back(phoenix::ref(tokens), _1)));
+
+ BOOST_TEST(test_token_ids(ids, tokens));
+ BOOST_TEST(test_token_states(states, tokens));
+ BOOST_TEST(test_token_values(values, tokens));
+ }
+
+ {
+ typedef lex::lexertl::position_token<
+ base_iterator_type, mpl::vector<double>, mpl::true_> token_type;
+ typedef lex::lexertl::actor_lexer<token_type> lexer_type;
+
+ token_definitions_with_state<lexer_type> lexer;
+ std::vector<token_type> tokens;
+ base_iterator_type first = input.begin();
+
+ using phoenix::arg_names::_1;
+ BOOST_TEST(lex::tokenize(first, input.end(), lexer
+ , phoenix::push_back(phoenix::ref(tokens), _1)));
+
+ BOOST_TEST(test_token_ids(ids, tokens));
+ BOOST_TEST(test_token_states(states, tokens));
+ BOOST_TEST(test_token_positions(input.begin(), positions, tokens));
+ BOOST_TEST(test_token_values(values, tokens));
+ }
+
+ return boost::report_errors();
+}
Boost-Commit list run by bdawes at acm.org, david.abrahams at rcn.com, gregod at cs.rpi.edu, cpdaniel at pacbell.net, john at johnmaddock.co.uk