Added stack size variability on dispatch

This commit is contained in:
LasterAlex 2024-10-20 04:19:17 +03:00
parent 435257244c
commit f8be5ccd8c
No known key found for this signature in database
3 changed files with 74 additions and 28 deletions

View file

@ -15,6 +15,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
- `bot.forward`, `bot.edit_live_location`, `bot.stop_live_location`, `bot.set_reaction`, `bot.pin`, `bot.unpin`, `bot.edit_text`, `bot.edit_caption`, `bot.edit_media`, `bot.edit_reply_markup`, `bot.stop_poll_message`, `bot.delete` and `bot.copy` methods to the new `crate::sugar::bot::BotMessagesExt` trait
- `req.reply_to` method to the new `crate::sugar::request::RequestReplyExt` trait
- `req.disable_link_preview` method to the new `crate::sugar::request::RequestLinkPreviewExt` trait
- `stack_size` setter to `DispatcherBuilder`
### Changed
@ -28,6 +29,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
- Now Vec<MessageId> in requests serializes into [number] instead of [ {message_id: number} ], `forward_messages`, `copy_messages` and `delete_messages` now work properly
- Now `InlineQueryResultsButton` serializes properly ([issue 1181](https://github.com/teloxide/teloxide/issues/1181))
- Now `ThreadId` is able to serialize in multipart requests ([PR 1179](https://github.com/teloxide/teloxide/pull/1179))
- Now stack does not overflow on dispatch ([issue 1154](https://github.com/teloxide/teloxide/issues/1154))
## 0.13.0 - 2024-08-16

View file

@ -89,7 +89,7 @@ dptree = "0.3.0"
# Uncomment this if you want to test teloxide with a specific dptree commit
# dptree = { git = "https://github.com/teloxide/dptree", rev = "df578e4" }
tokio = { version = "1.39", features = ["fs"] }
tokio = { version = "1.39", features = ["fs", "rt-multi-thread"] }
tokio-util = "0.7"
tokio-stream = "0.1.8"

View file

@ -5,6 +5,7 @@ use crate::{
},
error_handlers::{ErrorHandler, LoggingErrorHandler},
requests::{Request, Requester},
stop::StopToken,
types::{Update, UpdateKind},
update_listeners::{self, UpdateListener},
};
@ -16,6 +17,7 @@ use futures::{
stream::FuturesUnordered,
FutureExt as _, StreamExt as _,
};
use tokio::runtime::Builder;
use tokio_stream::wrappers::ReceiverStream;
use std::{
@ -29,6 +31,7 @@ use std::{
atomic::{AtomicBool, AtomicU32, Ordering},
Arc,
},
thread,
};
/// The builder for [`Dispatcher`].
@ -44,6 +47,7 @@ pub struct DispatcherBuilder<R, Err, Key> {
ctrlc_handler: bool,
distribution_f: fn(&Update) -> Option<Key>,
worker_queue_size: usize,
stack_size: usize,
}
impl<R, Err, Key> DispatcherBuilder<R, Err, Key>
@ -104,6 +108,14 @@ where
Self { worker_queue_size: size, ..self }
}
/// Specifies the stack size of the dispatcher.
///
/// By default it's 8 * 1024 * 1024 bytes (8 MiB).
#[must_use]
pub fn stack_size(self, size: usize) -> Self {
Self { stack_size: size, ..self }
}
/// Specifies the distribution function that decides how updates are grouped
/// before execution.
///
@ -176,6 +188,7 @@ where
ctrlc_handler,
distribution_f: _,
worker_queue_size,
stack_size,
} = self;
DispatcherBuilder {
@ -187,6 +200,7 @@ where
ctrlc_handler,
distribution_f: f,
worker_queue_size,
stack_size,
}
}
@ -202,6 +216,7 @@ where
distribution_f,
worker_queue_size,
ctrlc_handler,
stack_size,
} = self;
// If the `ctrlc_handler` feature is not enabled, don't emit a warning.
@ -216,6 +231,7 @@ where
state: ShutdownToken::new(),
distribution_f,
worker_queue_size,
stack_size,
workers: HashMap::new(),
default_worker: None,
current_number_of_active_workers: Default::default(),
@ -258,6 +274,7 @@ pub struct Dispatcher<R, Err, Key> {
distribution_f: fn(&Update) -> Option<Key>,
worker_queue_size: usize,
stack_size: usize,
current_number_of_active_workers: Arc<AtomicU32>,
max_number_of_active_workers: Arc<AtomicU32>,
// Tokio TX channel parts associated with chat IDs that consume updates sequentially.
@ -297,6 +314,7 @@ where
Err: Debug,
{
const DEFAULT_WORKER_QUEUE_SIZE: usize = 64;
const DEFAULT_STACK_SIZE: usize = 8 * 1024 * 1024;
DispatcherBuilder {
bot,
@ -310,6 +328,7 @@ where
ctrlc_handler: false,
worker_queue_size: DEFAULT_WORKER_QUEUE_SIZE,
distribution_f: default_distribution_function,
stack_size: DEFAULT_STACK_SIZE,
}
}
}
@ -318,7 +337,7 @@ impl<R, Err, Key> Dispatcher<R, Err, Key>
where
R: Requester + Clone + Send + Sync + 'static,
Err: Send + Sync + 'static,
Key: Hash + Eq + Clone,
Key: Hash + Eq + Clone + Send,
{
/// Starts your bot with the default parameters.
///
@ -355,8 +374,8 @@ where
update_listener: UListener,
update_listener_error_handler: Arc<Eh>,
) where
UListener: UpdateListener + 'a,
Eh: ErrorHandler<UListener::Err> + 'a,
UListener: UpdateListener + Send + 'a,
Eh: ErrorHandler<UListener::Err> + Send + Sync + 'a,
UListener::Err: Debug,
{
self.try_dispatch_with_listener(update_listener, update_listener_error_handler)
@ -377,8 +396,8 @@ where
update_listener_error_handler: Arc<Eh>,
) -> Result<(), R::Err>
where
UListener: UpdateListener + 'a,
Eh: ErrorHandler<UListener::Err> + 'a,
UListener: UpdateListener + Send + 'a,
Eh: ErrorHandler<UListener::Err> + Send + Sync + 'a,
UListener::Err: Debug,
{
// FIXME: there should be a way to check if dependency is already inserted
@ -391,11 +410,38 @@ where
log::debug!("hinting allowed updates: {:?}", allowed_updates);
update_listener.hint_allowed_updates(&mut allowed_updates.into_iter());
let mut stop_token = Some(update_listener.stop_token());
let stop_token = Some(update_listener.stop_token());
thread::scope(|scope| {
scope.spawn(move || {
let runtime = Builder::new_multi_thread()
.thread_stack_size(self.stack_size)
.enable_all()
.build()
.unwrap();
runtime.block_on(self.start_listening(
update_listener,
update_listener_error_handler,
stop_token,
));
});
});
Ok(())
}
async fn start_listening<'a, UListener, Eh>(
&'a mut self,
mut update_listener: UListener,
update_listener_error_handler: Arc<Eh>,
mut stop_token: Option<StopToken>,
) where
UListener: UpdateListener + Send + 'a,
Eh: ErrorHandler<UListener::Err> + Send + Sync + 'a,
UListener::Err: Debug,
{
self.state.start_dispatching();
{
let stream = update_listener.as_stream();
tokio::pin!(stream);
@ -422,7 +468,6 @@ where
}
}
}
}
self.workers
.drain()
@ -435,7 +480,6 @@ where
.await;
self.state.done();
Ok(())
}
async fn process_update<LErr, LErrHandler>(