5 Commits

Author SHA1 Message Date
VC
b3e7ee9d84 Merge branch '5-migrate-from-tokio-loop-to-futures-stream' into 'master'
refactor: use futures instead of tokio for media upload

Closes #5

See merge request veretcle/scootaloo!28
2022-11-15 09:11:28 +00:00
VC
7f7219ea78 feat: turn tokio-based async logic into futures 2022-11-15 10:06:00 +01:00
VC
f371b8a297 feat: add default rate_limiting option 2022-11-15 10:06:00 +01:00
VC
ec3956eabb doc: add rate_limiting option 2022-11-15 10:06:00 +01:00
VC
ce84c05581 refactor: use futures instead of tokio for media upload 2022-11-15 10:05:57 +01:00
6 changed files with 130 additions and 129 deletions

3
Cargo.lock generated
View File

@@ -2103,12 +2103,13 @@ dependencies = [
[[package]]
name = "scootaloo"
version = "0.8.3"
version = "0.9.0"
dependencies = [
"chrono",
"clap",
"egg-mode",
"elefren",
"futures 0.3.25",
"html-escape",
"log",
"mime",

View File

@@ -1,6 +1,6 @@
[package]
name = "scootaloo"
version = "0.8.3"
version = "0.9.0"
authors = ["VC <veretcle+framagit@mateu.be>"]
edition = "2021"
@@ -13,7 +13,8 @@ toml = "^0.5"
clap = "^4"
egg-mode = "^0.16"
rusqlite = "^0.27"
tokio = { version = "1", features = ["full"]}
tokio = { version = "^1", features = ["full"]}
futures = "^0.3"
elefren = "^0.22"
html-escape = "^0.2"
reqwest = "^0.11"

View File

@@ -18,6 +18,7 @@ First up, create a configuration file (default path is `/usr/local/etc/scootaloo
[scootaloo]
db_path = "/var/lib/scootaloo/scootaloo.sqlite" ## file containing the SQLite Tweet corresponding Toot DB, must be writeable
cache_path = "/tmp/scootaloo" ## a dir where the temporary files will be download, must be writeable
rate_limiting = 4 ## optional, default 4, number of accounts handled simultaneously
[twitter]
## Consumer/Access key for Twitter (can be generated at https://developer.twitter.com/en/apps)

View File

@@ -32,6 +32,7 @@ pub struct MastodonConfig {
pub struct ScootalooConfig {
pub db_path: String,
pub cache_path: String,
pub rate_limit: Option<usize>,
}
/// Parses the TOML file into a Config Struct

View File

@@ -25,12 +25,13 @@ use rusqlite::Connection;
use std::sync::Arc;
use tokio::{spawn, sync::Mutex};
use futures::StreamExt;
const DEFAULT_RATE_LIMIT: usize = 4;
/// This is where the magic happens
#[tokio::main]
pub async fn run(config: Config) {
// create the task vector for handling multiple accounts
let mut mtask = vec![];
// open the SQLite connection
let conn = Arc::new(Mutex::new(
Connection::open(&config.scootaloo.db_path).unwrap_or_else(|e| {
@@ -41,19 +42,20 @@ pub async fn run(config: Config) {
}),
));
for mastodon_config in config.mastodon.into_values() {
let mut stream = futures::stream::iter(config.mastodon.into_values())
.map(|mastodon_config| {
// create temporary value for each task
let scootaloo_cache_path = config.scootaloo.cache_path.clone();
let token = get_oauth2_token(&config.twitter);
let task_conn = conn.clone();
let task = spawn(async move {
spawn(async move {
info!("Starting treating {}", &mastodon_config.twitter_screen_name);
// retrieve the last tweet ID for the username
let lconn = task_conn.lock().await;
let last_tweet_id =
read_state(&lconn, &mastodon_config.twitter_screen_name, None)?.map(|r| r.tweet_id);
let last_tweet_id = read_state(&lconn, &mastodon_config.twitter_screen_name, None)?
.map(|r| r.tweet_id);
drop(lconn);
// get user timeline feed (Vec<tweet>)
@@ -137,15 +139,13 @@ pub async fn run(config: Config) {
drop(lconn);
}
Ok::<(), ScootalooError>(())
});
// push each task into the vec task
mtask.push(task);
}
})
})
.buffer_unordered(config.scootaloo.rate_limit.unwrap_or(DEFAULT_RATE_LIMIT));
// launch and wait for every handle
for handle in mtask {
match handle.await {
while let Some(result) = stream.next().await {
match result {
Ok(Err(e)) => eprintln!("Error within thread: {}", e),
Err(e) => eprintln!("Error with thread: {}", e),
_ => (),

View File

@@ -15,6 +15,8 @@ use tokio::{
io::copy,
};
use futures::{stream, stream::StreamExt};
/// Generate associative table between media ids and tweet extended entities
pub async fn generate_media_ids(
tweet: &Tweet,
@@ -25,24 +27,20 @@ pub async fn generate_media_ids(
let mut media_ids: Vec<String> = vec![];
if let Some(m) = &tweet.extended_entities {
// create tasks list
let mut tasks = vec![];
// size of media_ids vector, should be equal to the media vector
media_ids.resize(m.media.len(), String::new());
info!("{} medias in tweet", m.media.len());
for (i, media) in m.media.iter().enumerate() {
let medias = m.media.clone();
let mut stream = stream::iter(medias)
.map(|media| {
// attribute media url
media_url = media.url.clone();
// clone everything we need
let cache_path = String::from(cache_path);
let media = media.clone();
let mastodon = mastodon.clone();
let task = tokio::task::spawn(async move {
tokio::task::spawn(async move {
info!("Start treating {}", media.media_url_https);
// get the tweet embedded media
let local_tweet_media_path = get_tweet_media(&media, &cache_path).await?;
@@ -54,16 +52,15 @@ pub async fn generate_media_ids(
// it doesnt matter if we cant remove, cache_media fn is idempotent
remove_file(&local_tweet_media_path).await.ok();
Ok::<(usize, String), ScootalooError>((i, mastodon_media.id))
});
Ok::<String, ScootalooError>(mastodon_media.id)
})
})
.buffered(4); // there are max four medias per tweet and they need to be treated in
// order
tasks.push(task);
}
for task in tasks {
match task.await {
// insert the media at the right place
Ok(Ok((i, v))) => media_ids[i] = v,
while let Some(result) = stream.next().await {
match result {
Ok(Ok(v)) => media_ids.push(v),
Ok(Err(e)) => warn!("Cannot treat media: {}", e),
Err(e) => error!("Something went wrong when joining the main thread: {}", e),
}