Boost logo

Boost Users :

Subject: [Boost-users] boost asio synchronous vs asynchronous operations performance
From: Donald Alan (donaldalan003_at_[hidden])
Date: 2014-03-21 12:54:59


Hi,

I'm trying to compare the performance of boost::asio asynchronous vs
synchronous IO operations for a single client.

Below, I've sample synchronous and asynchronous server applications, which
send 25 byte message to the client in a loop continuously. On the client
side, I'm checking at what rate it is able to receive the messages. The
sample setup is pretty simple. In synchronous server case, it spawns a new
thread per client connection and the thread keeps sending the 25-byte
message in a loop. In asynchronous server case as well it spawns a new
thread per client connection and the thread keeps sending the 25-byte
message in a loop, using asynchronous write (main thread is the one which
calls ioservice.run()). For the performance testing I'm using only one
client.

*Synchronous server code*

#include <iostream>
#include <boost/bind.hpp>
#include <boost/shared_ptr.hpp>
#include <boost/enable_shared_from_this.hpp>
#include <boost/asio.hpp>
#include <boost/thread.hpp>

using boost::asio::ip::tcp;

class tcp_connection : public boost::enable_shared_from_this<tcp_connection>
{
public:
    typedef boost::shared_ptr<tcp_connection> pointer;

    static pointer create(boost::asio::io_service& io_service)
    {
       return pointer(new tcp_connection(io_service));
    }

    tcp::socket& socket()
    {
        return socket_;
    }

    void start()
    {
        for (;;) {
            try {
                ssize_t len = boost::asio::write(socket_,
boost::asio::buffer(message_));
                if (len != message_.length()) {
                    std::cerr<<"Unable to write all the bytes"<<std::endl;
                    break;
                }
                if (len == -1) {
                    std::cerr&lt;&lt;&quot;Remote end closed the
connection&quot;&lt;&lt;std::endl;
                    break;
                }
            }
            catch (std::exception&amp; e) {
                std::cerr&lt;&lt;&quot;Error while sending
data&quot;&lt;&lt;std::endl;
                break;
            }
        }
    }

private:
    tcp_connection(boost::asio::io_service&amp; io_service)
        : socket_(io_service),
          message_(25, 'A')
    {
    }

    tcp::socket socket_;
    std::string message_;
};

class tcp_server
{
public:
    tcp_server(boost::asio::io_service&amp; io_service)
        : acceptor_(io_service, tcp::endpoint(tcp::v4(), 1234))
    {
        start_accept();
    }

private:
    void start_accept()
    {
        for (;;) {
            tcp_connection::pointer new_connection =
                tcp_connection::create(acceptor_.get_io_service());
            acceptor_.accept(new_connection->socket());
            boost::thread(boost::bind(&tcp_connection::start,
new_connection));
        }
    }
    tcp::acceptor acceptor_;
};

int main()
{
    try {
        boost::asio::io_service io_service;
        tcp_server server(io_service);
    }
    catch (std::exception& e) {
        std::cerr << e.what() << std::endl;
    }
    return 0;
}

*ASynchronous server code:*
#include <iostream>
#include <string>
#include <boost/bind.hpp>
#include <boost/shared_ptr.hpp>
#include <boost/enable_shared_from_this.hpp>
#include <boost/asio.hpp>

#include <boost/thread.hpp>

using boost::asio::ip::tcp;

class tcp_connection
        : public boost::enable_shared_from_this<tcp_connection>
{
public:
    typedef boost::shared_ptr<tcp_connection> pointer;

    static pointer create(boost::asio::io_service& io_service)
    {
        return pointer(new tcp_connection(io_service));
    }

    tcp::socket& socket()
    {
        return socket_;
    }

    void start()
    {
        while (socket_.is_open()) {
            boost::asio::async_write(socket_, boost::asio::buffer(message_),
                boost::bind(&tcp_connection::handle_write,
shared_from_this(),
                            boost::asio::placeholders::error,
                            boost::asio::placeholders::bytes_transferred));
        }
    }

private:
    tcp_connection(boost::asio::io_service& io_service)
        : socket_(io_service),
          message_(25, 'A')
    {
    }

    void handle_write(const boost::system::error_code& error,
                      size_t bytes_transferred)
    {
        if (error) {
            if (socket_.is_open()) {
                std::cout<<"Error while sending data
asynchronously"<<std::endl;
                socket_.close();
            }
        }
    }

    tcp::socket socket_;
    std::string message_;
};

class tcp_server
{
public:
    tcp_server(boost::asio::io_service&amp; io_service)
        : acceptor_(io_service, tcp::endpoint(tcp::v4(), 1234))
    {
        start_accept();
    }

private:
    void start_accept()
    {
        tcp_connection::pointer new_connection =
                tcp_connection::create(acceptor_.get_io_service());
        acceptor_.async_accept(new_connection->socket(),
                boost::bind(&tcp_server::handle_accept, this,
new_connection,
                        boost::asio::placeholders::error));
    }

    void handle_accept(tcp_connection::pointer new_connection,
                       const boost::system::error_code& error)
    {
        if (!error) {
            boost::thread(boost::bind(&tcp_connection::start,
new_connection));
        }

        start_accept();
    }

    tcp::acceptor acceptor_;
};

int main()
{
    try {
        boost::asio::io_service io_service;
        tcp_server server(io_service);
        io_service.run();
    }
    catch (std::exception& e) {
        std::cerr << e.what() << std::endl;
    }

    return 0;
}

*Client code*
#include <iostream>

#include <boost/asio.hpp>
#include <boost/array.hpp>

int main(int argc, char* argv[])
{
    if (argc != 3) {
        std::cerr<<"Usage: client <server-host> <server-port>"<<std::endl;
        return 1;
    }

    boost::asio::io_service io_service;
    boost::asio::ip::tcp::resolver resolver(io_service);
    boost::asio::ip::tcp::resolver::query query(argv[1], argv[2]);
    boost::asio::ip::tcp::resolver::iterator it = resolver.resolve(query);
    boost::asio::ip::tcp::resolver::iterator end;
    boost::asio::ip::tcp::socket socket(io_service);
    boost::asio::connect(socket, it);

// Statscollector to periodically print received messages stats
// sample::myboost::StatsCollector stats_collector(5);
// sample::myboost::StatsCollectorScheduler
statsScheduler(stats_collector);
// statsScheduler.start();

    for (;;) {
        boost::array&lt;char, 25> buf;
        boost::system::error_code error;
        size_t len = socket.read_some(boost::asio::buffer(buf), error);
// size_t len = boost::asio::read(socket, boost::asio::buffer(buf));
        if (len != buf.size()) {
            std::cerr<<"Length is not "<< buf.size() << " but
"<<len&lt;&lt;std::endl;
        }
// stats_collector.incr_msgs_received();
    }
}

&lt;b>Question:*
When the client is running against synchronous server it is able to receive
around 700K msgs/sec but when it is running against asynchronous server the
performance is dropped to around 100K-120K msgs/sec. I know that one should
use asynchronous IO for scalability when we have more number of clients and
in the above case as I'm using only a single client, the obvious advantage
of asynchronous IO is not evident. But the question is, is asynchronous IO
expected to effect the performance so badly for a single client case or am I
missing some obvious best practices to follow with asynchronous IO? Is the
significant drop in the performance is because of the thread switch between
ioservice thread (which is main thread in the above case) and connection
thread?

*Setup:*
I'm using BOOST 1.47 on Linux machine.

--
View this message in context: http://boost.2283326.n4.nabble.com/boost-asio-synchronous-vs-asynchronous-operations-performance-tp4660578.html
Sent from the Boost - Users mailing list archive at Nabble.com.

Boost-users list run by williamkempf at hotmail.com, kalb at libertysoft.com, bjorn.karlsson at readsoft.com, gregod at cs.rpi.edu, wekempf at cox.net