Skip to content

Commit b1f0917

Browse files
committed
Boost.Bind -> c++11 lambda
1 parent bfb2c4b commit b1f0917

File tree

9 files changed

+92
-118
lines changed

9 files changed

+92
-118
lines changed

src/cancellable_streambuf.h

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -27,7 +27,6 @@
2727
#include <boost/asio/io_context.hpp>
2828
#include <boost/asio/ip/tcp.hpp>
2929
#include <boost/asio/stream_socket_service.hpp>
30-
#include <boost/bind/bind.hpp>
3130
#include <boost/utility/base_from_member.hpp>
3231
#include <exception>
3332
#include <set>
@@ -68,8 +67,7 @@ class cancellable_streambuf : public std::streambuf,
6867
cancel_issued_ = true;
6968
lslboost::lock_guard<lslboost::recursive_mutex> lock(cancel_mut_);
7069
cancel_started_ = false;
71-
this->get_service().get_io_context().post(
72-
lslboost::bind(&cancellable_streambuf::close_if_open, this));
70+
this->get_service().get_io_context().post([this]() { close_if_open(); });
7371
}
7472

7573

src/data_receiver.cpp

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,6 @@
22
#include "cancellable_streambuf.h"
33
#include "sample.h"
44
#include "socket_utils.h"
5-
#include <boost/bind.hpp>
65
#include <iostream>
76
#include <memory>
87

src/info_receiver.cpp

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,6 @@
11
#include "info_receiver.h"
22
#include "cancellable_streambuf.h"
33
#include "inlet_connection.h"
4-
#include <boost/bind.hpp>
54
#include <iostream>
65
#include <memory>
76

src/resolve_attempt_udp.cpp

Lines changed: 20 additions & 30 deletions
Original file line numberDiff line numberDiff line change
@@ -4,13 +4,10 @@
44
#include "socket_utils.h"
55
#include <boost/asio/ip/multicast.hpp>
66
#include <boost/asio/placeholders.hpp>
7-
#include <boost/bind.hpp>
8-
9-
10-
// === implementation of the resolver_burst_udp class ===
117

128
using namespace lsl;
139
using namespace lslboost::asio;
10+
using err_t = const lslboost::system::error_code &;
1411

1512
resolve_attempt_udp::resolve_attempt_udp(io_context &io, const udp &protocol,
1613
const std::vector<udp::endpoint> &targets, const std::string &query, result_container &results,
@@ -73,22 +70,25 @@ void resolve_attempt_udp::begin() {
7370
// also initiate the cancel event, if desired
7471
if (cancel_after_ != FOREVER) {
7572
cancel_timer_.expires_after(timeout_sec(cancel_after_));
76-
cancel_timer_.async_wait(lslboost::bind(
77-
&resolve_attempt_udp::handle_timeout, shared_from_this(), placeholders::error));
73+
auto keepalive(shared_from_this());
74+
cancel_timer_.async_wait([keepalive, this](err_t err) {
75+
if (!err) do_cancel();
76+
});
7877
}
7978
}
8079

8180
void resolve_attempt_udp::cancel() {
82-
post(io_, lslboost::bind(&resolve_attempt_udp::do_cancel, shared_from_this()));
81+
auto keepalive(shared_from_this());
82+
post(io_, [keepalive]() { keepalive->do_cancel(); });
8383
}
8484

8585

8686
// === receive loop ===
8787

8888
void resolve_attempt_udp::receive_next_result() {
89+
auto keepalive(shared_from_this());
8990
recv_socket_.async_receive_from(buffer(resultbuf_), remote_endpoint_,
90-
lslboost::bind(&resolve_attempt_udp::handle_receive_outcome, shared_from_this(),
91-
placeholders::error, placeholders::bytes_transferred));
91+
[keepalive, this](err_t err, size_t len) { handle_receive_outcome(err, len); });
9292
}
9393

9494
void resolve_attempt_udp::handle_receive_outcome(error_code err, std::size_t len) {
@@ -142,9 +142,9 @@ void resolve_attempt_udp::handle_receive_outcome(error_code err, std::size_t len
142142

143143
// === send loop ===
144144

145-
void resolve_attempt_udp::send_next_query(endpoint_list::const_iterator i) {
146-
if (i != targets_.end() && !cancelled_) {
147-
udp::endpoint ep(*i);
145+
void resolve_attempt_udp::send_next_query(endpoint_list::const_iterator next) {
146+
if (next != targets_.end() && !cancelled_) {
147+
udp::endpoint ep(*next++);
148148
// endpoint matches our active protocol?
149149
if (ep.protocol() == recv_socket_.local_endpoint().protocol()) {
150150
// select socket to use
@@ -153,29 +153,19 @@ void resolve_attempt_udp::send_next_query(endpoint_list::const_iterator i) {
153153
? broadcast_socket_
154154
: (ep.address().is_multicast() ? multicast_socket_ : unicast_socket_);
155155
// and send the query over it
156-
sock.async_send_to(lslboost::asio::buffer(query_msg_), ep,
157-
lslboost::bind(&resolve_attempt_udp::handle_send_outcome, shared_from_this(), ++i,
158-
placeholders::error));
156+
auto keepalive(shared_from_this());
157+
sock.async_send_to(
158+
lslboost::asio::buffer(query_msg_), ep, [keepalive, this, next](err_t err, size_t) {
159+
if (!cancelled_ && err != error::operation_aborted &&
160+
err != error::not_connected && err != error::not_socket)
161+
send_next_query(next);
162+
});
159163
} else
160164
// otherwise just go directly to the next query
161-
send_next_query(++i);
165+
send_next_query(next);
162166
}
163167
}
164168

165-
/// Handler that gets called when a send has completed
166-
void resolve_attempt_udp::handle_send_outcome(endpoint_list::const_iterator i, error_code err) {
167-
if (!cancelled_ && err != error::operation_aborted && err != error::not_connected &&
168-
err != error::not_socket)
169-
send_next_query(i);
170-
}
171-
172-
173-
// === cancellation logic ===
174-
175-
void resolve_attempt_udp::handle_timeout(error_code err) {
176-
if (!err) do_cancel();
177-
}
178-
179169
void resolve_attempt_udp::do_cancel() {
180170
try {
181171
cancelled_ = true;

src/resolve_attempt_udp.h

Lines changed: 0 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -86,14 +86,8 @@ class resolve_attempt_udp : public cancellable_obj,
8686
/// Handler that gets called when a receive has completed.
8787
void handle_receive_outcome(error_code err, std::size_t len);
8888

89-
/// Handler that gets called when a send has completed.
90-
void handle_send_outcome(endpoint_list::const_iterator i, error_code err);
91-
9289
// === cancellation ===
9390

94-
/// Handler that gets called when the give up timeout has expired.
95-
void handle_timeout(error_code err);
96-
9791
/// Cancel the outstanding operations.
9892
void do_cancel();
9993

src/resolver_impl.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,6 @@
44
#include "socket_utils.h"
55
#include <boost/asio/ip/udp.hpp>
66
#include <boost/asio/placeholders.hpp>
7-
#include <boost/bind.hpp>
87
#include <boost/thread/thread_only.hpp>
98
#include <memory>
109

@@ -137,7 +136,8 @@ void resolver_impl::resolve_continuous(const std::string &query, double forget_a
137136
// start a wave of resolve packets
138137
next_resolve_wave();
139138
// spawn a thread that runs the IO operations
140-
background_io_ = std::make_shared<lslboost::thread>(lslboost::bind(&io_context::run, io_));
139+
auto io_keepalive(io_);
140+
background_io_ = std::make_shared<lslboost::thread>([io_keepalive]() { io_keepalive->run(); });
141141
}
142142

143143
std::vector<stream_info_impl> resolver_impl::results(uint32_t max_results) {

src/tcp_server.cpp

Lines changed: 50 additions & 51 deletions
Original file line numberDiff line numberDiff line change
@@ -23,6 +23,7 @@
2323
#include "portable_archive/portable_oarchive.hpp"
2424

2525
using namespace lslboost::asio;
26+
using err_t = const lslboost::system::error_code &;
2627

2728
namespace lsl {
2829
/**
@@ -77,16 +78,9 @@ class client_session : public std::enable_shared_from_this<client_session> {
7778
/// Handler that gets called after finishing reading of the query line.
7879
void handle_read_query_outcome(error_code err);
7980

80-
/// Handler that gets called after finishing the sending of a reply (nothing to do here).
81-
void handle_send_outcome(error_code) {}
82-
8381
/// Helper function to send a status message to the connected party.
8482
void send_status_message(const std::string &msg);
8583

86-
/// Handler that gets called after finishing the sending of a message, holding a reference
87-
/// to the message.
88-
void handle_status_outcome(string_p, error_code) {}
89-
9084
/// Handler that gets called after finishing the reading of feedparameters.
9185
void handle_read_feedparams(
9286
int request_protocol_version, std::string request_uid, error_code err);
@@ -188,7 +182,8 @@ void tcp_server::end_serving() {
188182
shutdown_ = true;
189183
// issue closure of the server socket; this will result in a cancellation of the associated IO
190184
// operations
191-
post(*io_, lslboost::bind(&tcp::acceptor::close, acceptor_));
185+
auto keepalive(acceptor_);
186+
post(*io_, [keepalive]() { keepalive->close(); });
192187
// issue closure of all active client session sockets; cancels the related outstanding IO jobs
193188
close_inflight_sockets();
194189
// also notify any transfer threads that are blocked waiting for a sample by sending them one (=
@@ -204,9 +199,9 @@ void tcp_server::accept_next_connection() {
204199
std::shared_ptr<client_session> newsession{
205200
std::make_shared<client_session>(shared_from_this())};
206201
// accept a connection on the session's socket
207-
acceptor_->async_accept(
208-
*newsession->socket(), lslboost::bind(&tcp_server::handle_accept_outcome,
209-
shared_from_this(), newsession, placeholders::error));
202+
auto keepalive(shared_from_this());
203+
acceptor_->async_accept(*newsession->socket(),
204+
[keepalive, newsession, this](err_t err) { handle_accept_outcome(newsession, err); });
210205
} catch (std::exception &e) {
211206
LOG_F(ERROR, "Error during tcp_server::accept_next_connection: %s", e.what());
212207
}
@@ -234,22 +229,22 @@ void tcp_server::unregister_inflight_socket(const tcp_socket_p &sock) {
234229
inflight_.erase(sock);
235230
}
236231

237-
template <class SocketPtr, class Protocol> void shutdown_and_close(SocketPtr sock) {
238-
try {
239-
if (sock->is_open()) {
240-
try {
241-
// (in some cases shutdown may fail)
242-
sock->shutdown(Protocol::socket::shutdown_both);
243-
} catch (...) {}
244-
sock->close();
245-
}
246-
} catch (std::exception &e) { LOG_F(WARNING, "Error during shutdown_and_close: %s", e.what()); }
247-
}
248-
249232
void tcp_server::close_inflight_sockets() {
250233
lslboost::lock_guard<lslboost::recursive_mutex> lock(inflight_mut_);
251-
for (const auto &i : inflight_)
252-
post(*io_, lslboost::bind(&shutdown_and_close<tcp_socket_p, tcp>, i));
234+
for (auto sock : inflight_)
235+
post(*io_, [sock]() {
236+
try {
237+
if (sock->is_open()) {
238+
try {
239+
// (in some cases shutdown may fail)
240+
sock->shutdown(sock->shutdown_both);
241+
} catch (...) {}
242+
sock->close();
243+
}
244+
} catch (std::exception &e) {
245+
LOG_F(WARNING, "Error during shutdown_and_close: %s", e.what());
246+
}
247+
});
253248
}
254249

255250

@@ -272,9 +267,9 @@ void client_session::begin_processing() {
272267
serv_->register_inflight_socket(sock_);
273268
registered_ = true;
274269
// read the request line
270+
auto keepalive(shared_from_this());
275271
async_read_until(*sock_, requestbuf_, "\r\n",
276-
lslboost::bind(&client_session::handle_read_command_outcome, shared_from_this(),
277-
placeholders::error));
272+
[keepalive, this](err_t err, size_t) { handle_read_command_outcome(err); });
278273
} catch (std::exception &e) {
279274
LOG_F(ERROR, "Error during client_session::begin_processing: %s", e.what());
280275
}
@@ -287,29 +282,29 @@ void client_session::handle_read_command_outcome(error_code err) {
287282
std::string method;
288283
getline(requeststream_, method);
289284
method = trim(method);
285+
auto keepalive(shared_from_this());
290286
if (method == "LSL:shortinfo")
291287
// shortinfo request: read the content query string
292288
async_read_until(*sock_, requestbuf_, "\r\n",
293-
lslboost::bind(&client_session::handle_read_query_outcome, shared_from_this(),
294-
placeholders::error));
289+
[keepalive, this](err_t err, std::size_t) { handle_read_query_outcome(err); });
295290
if (method == "LSL:fullinfo")
296291
// fullinfo request: reply right away
297292
async_write(*sock_, lslboost::asio::buffer(serv_->fullinfo_msg_),
298-
lslboost::bind(&client_session::handle_send_outcome, shared_from_this(),
299-
placeholders::error));
293+
[keepalive, this](err_t, std::size_t) { });
300294
if (method == "LSL:streamfeed")
301295
// streamfeed request (1.00): read feed parameters
302-
async_read_until(*sock_, requestbuf_, "\r\n",
303-
lslboost::bind(&client_session::handle_read_feedparams, shared_from_this(), 100,
304-
"", placeholders::error));
296+
async_read_until(
297+
*sock_, requestbuf_, "\r\n", [keepalive, this](err_t err, std::size_t) {
298+
handle_read_feedparams(100, "", err);
299+
});
305300
if (method.compare(0, 15, "LSL:streamfeed/") == 0) {
306301
// streamfeed request with version: read feed parameters
307302
std::vector<std::string> parts = splitandtrim(method, ' ', true);
308303
int request_protocol_version = std::stoi(parts[0].substr(15));
309304
std::string request_uid = (parts.size() > 1) ? parts[1] : "";
310-
async_read_until(*sock_, requestbuf_, "\r\n\r\n",
311-
lslboost::bind(&client_session::handle_read_feedparams, shared_from_this(),
312-
request_protocol_version, request_uid, placeholders::error));
305+
async_read_until(*sock_, requestbuf_, "\r\n\r\n", [=](err_t err, std::size_t) {
306+
keepalive->handle_read_feedparams(request_protocol_version, request_uid, err);
307+
});
313308
}
314309
}
315310
} catch (std::exception &e) {
@@ -324,12 +319,14 @@ void client_session::handle_read_query_outcome(error_code err) {
324319
std::string query;
325320
getline(requeststream_, query);
326321
query = trim(query);
327-
if (serv_->info_->matches_query(query))
322+
if (serv_->info_->matches_query(query)) {
328323
// matches: reply (otherwise just close the stream)
324+
auto keepalive(shared_from_this());
329325
async_write(*sock_, lslboost::asio::buffer(serv_->shortinfo_msg_),
330-
lslboost::bind(&client_session::handle_send_outcome, shared_from_this(),
331-
placeholders::error));
332-
else
326+
[keepalive](err_t, std::size_t) {
327+
/* keep the client_session alive until the shortinfo is sent completely*/
328+
});
329+
} else
333330
LOG_F(INFO, "%p got a shortinfo query response for the wrong query", this);
334331
}
335332
} catch (std::exception &e) {
@@ -338,10 +335,11 @@ void client_session::handle_read_query_outcome(error_code err) {
338335
}
339336

340337
void client_session::send_status_message(const std::string &str) {
341-
string_p msg(std::make_shared<std::string>(str));
338+
auto msg(std::make_shared<std::string>(str));
339+
auto keepalive(shared_from_this());
342340
async_write(*sock_, lslboost::asio::buffer(*msg),
343-
lslboost::bind(
344-
&client_session::handle_status_outcome, shared_from_this(), msg, placeholders::error));
341+
[msg, keepalive](
342+
err_t, std::size_t) { /* keep objects alive until the message is sent */ });
345343
}
346344

347345
void client_session::handle_read_feedparams(
@@ -489,9 +487,10 @@ void client_session::handle_read_feedparams(
489487
else
490488
*outarch_ << *temp;
491489
// send off the newly created feedheader
492-
async_write(*sock_, feedbuf_.data(),
493-
lslboost::bind(&client_session::handle_send_feedheader_outcome, shared_from_this(),
494-
placeholders::error, placeholders::bytes_transferred));
490+
auto keepalive(shared_from_this());
491+
async_write(*sock_, feedbuf_.data(), [keepalive, this](err_t err, size_t len) {
492+
handle_send_feedheader_outcome(err, len);
493+
});
495494
DLOG_F(4, "%p sent test pattern samples", this);
496495
}
497496
} catch (std::exception &e) {
@@ -545,10 +544,10 @@ void client_session::transfer_samples_thread(std::shared_ptr<client_session>) {
545544
// send off the chunk that we aggregated so far
546545
lslboost::unique_lock<lslboost::mutex> lock(completion_mut_);
547546
transfer_completed_ = false;
548-
async_write(*sock_, feedbuf_.data(),
549-
lslboost::bind(&client_session::handle_chunk_transfer_outcome,
550-
shared_from_this(), placeholders::error,
551-
placeholders::bytes_transferred));
547+
auto keepalive(shared_from_this());
548+
async_write(*sock_, feedbuf_.data(), [keepalive, this](err_t err, size_t len) {
549+
handle_chunk_transfer_outcome(err, len);
550+
});
552551
// wait for the completion condition
553552
completion_cond_.wait(lock, [this]() { return transfer_completed_; });
554553
// handle transfer outcome

0 commit comments

Comments
 (0)