-
Notifications
You must be signed in to change notification settings - Fork 223
Open
Description
Apologies if this is a known limitation. In short it looks like when timeouts are configured, Beast may use the executor associated with a completion handler after that completion handler has been destroyed. The completion handler in the stdexec integration captures an operation state by reference/ pointer, so once the operation is done the executor must not be used. Beast is breaking this assumption I suppose.
As a workaround I simply don't set a timeout.
Interestingly I couldn't reproduce this on Linux, only on my Mac, Clang 19.
Repro:
#include <asioexec/use_sender.hpp>
#include <execpools/asio/asio_thread_pool.hpp>
#include <boost/asio.hpp>
#include <boost/beast/core.hpp>
#include <boost/beast/websocket.hpp>
#include <stdexec/execution.hpp>
#include <chrono>
#include <iostream>
#include <thread>
namespace net = boost::asio;
namespace beast = boost::beast;
namespace websocket = beast::websocket;
using tcp = net::ip::tcp;
int main()
{
execpools::asio_thread_pool pool(1);
tcp::acceptor acceptor(pool.get_executor(),
{net::ip::make_address("127.0.0.1"), 0});
const auto port = acceptor.local_endpoint().port();
// Client (plain beast, no stdexec)
std::thread client(
[&]
{
try
{
net::io_context cioc(1);
websocket::stream<tcp::socket> ws(cioc);
tcp::resolver r(cioc);
auto ep = *r.resolve("127.0.0.1", std::to_string(port)).begin();
ws.next_layer().connect(ep.endpoint());
ws.handshake("127.0.0.1", "/");
}
catch (const std::exception& e)
{
std::cerr << "client error: " << e.what() << "\n";
}
});
// Server accept+handshake using asioexec/stdexec
auto run = [&]
{
return acceptor.async_accept(asioexec::use_sender) |
stdexec::let_value(
[&](tcp::socket& s)
{
auto wsp =
std::make_shared<websocket::stream<tcp::socket>>(
std::move(s));
wsp->set_option(
websocket::stream_base::timeout::suggested(
beast::role_type::server));
return wsp->async_accept(asioexec::use_sender) |
stdexec::then(
[wsp]
{
/* keep
* wsp
* alive
*/
std::cout << "got here" << std::endl;
});
});
};
// Wait for handshake to complete, then leave op-state scope.
stdexec::sync_wait(pool.get_scheduler().schedule() |
stdexec::let_value([&] { return run(); }));
// Give Beast timeout/cancel completions time to run.
std::this_thread::sleep_for(std::chrono::milliseconds(250));
return 0;
}
Error when running normally:
got here
libc++abi: terminating due to uncaught exception of type std::__1::system_error: recursive_mutex lock failed: Invalid argument
Abort trap: 6
LLDB stack dump running under debugger:
(lldb) bt
* thread #2, stop reason = signal SIGABRT
* frame #0: 0x00000001a00b6388 libsystem_kernel.dylib`__pthread_kill + 8
frame #1: 0x00000001a00ef88c libsystem_pthread.dylib`pthread_kill + 296
frame #2: 0x000000019fff8a3c libsystem_c.dylib`abort + 124
frame #3: 0x00000001a00a5384 libc++abi.dylib`abort_message + 132
frame #4: 0x00000001a0093cd8 libc++abi.dylib`demangling_terminate_handler() + 316
frame #5: 0x000000019fd18de4 libobjc.A.dylib`_objc_terminate() + 172
frame #6: 0x00000001a00a4698 libc++abi.dylib`std::__terminate(void (*)()) + 16
frame #7: 0x00000001a00a463c libc++abi.dylib`std::terminate() + 108
frame #8: 0x0000000100005fe4 repro`__clang_call_terminate + 16
frame #9: 0x000000010001c1d8 repro`boost::asio::detail::wait_handler<boost::beast::websocket::stream<boost::asio::basic_stream_socket<boost::asio::ip::tcp, boost::asio::any_io_executor>, true>::impl_type::timeout_handler<asioexec::detail::completion_token::executor<stdexec::completion_signatures<stdexec::__rcvrs::set_value_t (boost::system::error_code), stdexec::__rcvrs::set_error_t (std::exception_ptr), stdexec::__rcvrs::set_stopped_t ()>, asioexec::detail::use_sender::receiver<stdexec::__rcvr<stdexec::__let::__rcvr_env<stdexec::__let::__rcvr_env<stdexec::__sync_wait::__receiver<>, stdexec::__sched_env<execpools::thread_pool_base<execpools::asio_thread_pool>::scheduler>>, stdexec::__env::prop<stdexec::__queries::get_domain_t, stdexec::default_domain>>, stdexec::(anonymous namespace)::__sexpr<stdexec::'lambda11'(){}>, 0ul>::__t>, boost::asio::any_io_executor>>, boost::asio::any_io_executor>::do_complete(void*, boost::asio::detail::scheduler_operation*, boost::system::error_code const&, unsigned long) + 1856
frame #10: 0x000000010000c41c repro`boost::asio::detail::scheduler::do_run_one(boost::asio::detail::conditionally_enabled_mutex::scoped_lock&, boost::asio::detail::scheduler_thread_info&, boost::system::error_code const&) + 920
frame #11: 0x000000010000bdf0 repro`boost::asio::detail::scheduler::run(boost::system::error_code&) + 200
frame #12: 0x000000010000cd8c repro`boost::asio::detail::posix_thread::func<boost::asio::thread_pool::thread_function>::run() + 48
frame #13: 0x000000010000bc80 repro`boost_asio_detail_posix_thread_function + 28
frame #14: 0x00000001a00efc0c libsystem_pthread.dylib`_pthread_start + 136
Metadata
Metadata
Assignees
Labels
No labels