9 Commits

Author SHA1 Message Date
VC
496dde60d6 Merge branch 'fix_twitter_pool_length' into 'main'
feat: truncate poll when too long

See merge request veretcle/oolatoocs!14
2024-01-17 14:24:12 +00:00
VC
567dfae7ab feat: truncate poll when too long 2024-01-17 15:18:12 +01:00
VC
eeaea52e80 Merge branch 'refactor_delete' into 'main'
Refactor delete

See merge request veretcle/oolatoocs!13
2024-01-10 10:31:36 +00:00
VC
4a0dbb06af 📦: bump version 2024-01-10 11:24:56 +01:00
VC
5c17ea6989 ♻ : avoid url duplication 2024-01-10 11:23:09 +01:00
VC
8674048e8d Merge branch '6-feat-add-the-ability-to-rollback-last-tweet' into 'main'
feat: add the ability to rollback last tweet

Closes #6

See merge request veretcle/oolatoocs!12
2024-01-09 13:04:01 +00:00
VC
378d973697 feat: add the ability to rewrite an edited toot 2024-01-09 13:57:43 +01:00
VC
2cb732efed Merge branch 'refresh_main' into 'main'
chore: update megalodon-rs to 0.11.7

See merge request veretcle/oolatoocs!11
2023-12-22 08:09:55 +00:00
VC
5d685b5748 chore: update megalodon-rs to 0.11.7 2023-12-22 09:06:00 +01:00
7 changed files with 756 additions and 246 deletions

531
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -1,6 +1,6 @@
[package] [package]
name = "oolatoocs" name = "oolatoocs"
version = "1.5.3" version = "2.0.2"
edition = "2021" edition = "2021"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
@@ -16,7 +16,7 @@ megalodon = "^0.11"
oauth1-request = "^0.6" oauth1-request = "^0.6"
regex = "^1.10" regex = "^1.10"
reqwest = { version = "^0.11", features = ["json", "stream", "multipart"] } reqwest = { version = "^0.11", features = ["json", "stream", "multipart"] }
rusqlite = "^0.27" rusqlite = { version = "^0.30", features = ["chrono"] }
serde = { version = "^1.0", features = ["derive"] } serde = { version = "^1.0", features = ["derive"] }
tokio = { version = "^1.33", features = ["rt-multi-thread", "macros", "time"] } tokio = { version = "^1.33", features = ["rt-multi-thread", "macros", "time"] }
toml = "^0.8" toml = "^0.8"

View File

@@ -5,20 +5,20 @@ mod config;
pub use config::{parse_toml, Config}; pub use config::{parse_toml, Config};
mod state; mod state;
pub use state::init_db;
#[allow(unused_imports)] #[allow(unused_imports)]
use state::{read_state, write_state, TweetToToot}; use state::{delete_state, read_all_tweet_state, read_state, write_state, TweetToToot};
pub use state::{init_db, migrate_db};
mod mastodon; mod mastodon;
use mastodon::get_mastodon_timeline_since;
pub use mastodon::register; pub use mastodon::register;
use mastodon::{get_mastodon_instance, get_mastodon_timeline_since, get_status_edited_at};
mod utils; mod utils;
use utils::{generate_multi_tweets, strip_everything}; use utils::{generate_multi_tweets, strip_everything};
mod twitter; mod twitter;
#[allow(unused_imports)] #[allow(unused_imports)]
use twitter::{generate_media_ids, post_tweet, transform_poll}; use twitter::{delete_tweet, generate_media_ids, post_tweet, transform_poll};
use rusqlite::Connection; use rusqlite::Connection;
@@ -27,11 +27,51 @@ pub async fn run(config: &Config) {
let conn = Connection::open(&config.oolatoocs.db_path) let conn = Connection::open(&config.oolatoocs.db_path)
.unwrap_or_else(|e| panic!("Cannot open DB: {}", e)); .unwrap_or_else(|e| panic!("Cannot open DB: {}", e));
let last_toot_id = read_state(&conn, None) let mastodon = get_mastodon_instance(&config.mastodon);
.unwrap_or_else(|e| panic!("Cannot get last toot id: {}", e))
.map(|r| r.toot_id);
let timeline = get_mastodon_timeline_since(&config.mastodon, last_toot_id) let last_entry =
read_state(&conn, None).unwrap_or_else(|e| panic!("Cannot get last toot id: {}", e));
let last_toot_id: Option<u64> = match last_entry {
None => None, // Does not exist, this is the same as previously
Some(t) => {
match get_status_edited_at(&mastodon, t.toot_id).await {
None => Some(t.toot_id),
Some(d) => {
// a date has been found
if d > t.datetime.unwrap() {
// said date is posterior to the previously
// written tweet, we need to delete/rewrite
for local_tweet_id in read_all_tweet_state(&conn, t.toot_id)
.unwrap_or_else(|e| {
panic!(
"Cannot fetch all tweets associated with Toot ID {}: {}",
t.toot_id, e
)
})
.into_iter()
{
delete_tweet(&config.twitter, local_tweet_id)
.await
.unwrap_or_else(|e| {
panic!("Cannot delete Tweet ID ({}): {}", t.tweet_id, e)
});
}
delete_state(&conn, t.toot_id).unwrap_or_else(|e| {
panic!("Cannot delete Toot ID ({}): {}", t.toot_id, e)
});
read_state(&conn, None)
.unwrap_or_else(|e| panic!("Cannot get last toot id: {}", e))
.map(|a| a.toot_id)
} else {
Some(t.toot_id)
}
}
}
}
};
let timeline = get_mastodon_timeline_since(&mastodon, last_toot_id)
.await .await
.unwrap_or_else(|e| panic!("Cannot get instance: {}", e)); .unwrap_or_else(|e| panic!("Cannot get instance: {}", e));
@@ -57,9 +97,22 @@ pub async fn run(config: &Config) {
// if the toot is too long, we cut it in half here // if the toot is too long, we cut it in half here
if let Some((first_half, second_half)) = generate_multi_tweets(&tweet_content) { if let Some((first_half, second_half)) = generate_multi_tweets(&tweet_content) {
tweet_content = second_half; tweet_content = second_half;
// post the first half
let reply_id = post_tweet(&config.twitter, first_half, vec![], reply_to, None) let reply_id = post_tweet(&config.twitter, first_half, vec![], reply_to, None)
.await .await
.unwrap_or_else(|e| panic!("Cannot post the first half of {}: {}", &toot.id, e)); .unwrap_or_else(|e| panic!("Cannot post the first half of {}: {}", &toot.id, e));
// write it to db
write_state(
&conn,
TweetToToot {
tweet_id: reply_id,
toot_id: toot.id.parse::<u64>().unwrap(),
datetime: None,
},
)
.unwrap_or_else(|e| {
panic!("Cannot store Toot/Tweet ({}/{}): {}", &toot.id, reply_id, e)
});
reply_to = Some(reply_id); reply_to = Some(reply_id);
}; };
@@ -80,6 +133,7 @@ pub async fn run(config: &Config) {
TweetToToot { TweetToToot {
tweet_id, tweet_id,
toot_id: toot.id.parse::<u64>().unwrap(), toot_id: toot.id.parse::<u64>().unwrap(),
datetime: None,
}, },
) )
.unwrap_or_else(|e| panic!("Cannot store Toot/Tweet ({}/{}): {}", &toot.id, tweet_id, e)); .unwrap_or_else(|e| panic!("Cannot store Toot/Tweet ({}/{}): {}", &toot.id, tweet_id, e));

View File

@@ -49,6 +49,21 @@ fn main() {
.display_order(1), .display_order(1),
), ),
) )
.subcommand(
Command::new("migrate")
.version(env!("CARGO_PKG_VERSION"))
.about("Command to register to Mastodon Instance")
.arg(
Arg::new("config")
.short('c')
.long("config")
.value_name("CONFIG_FILE")
.help(format!("TOML config file for {}", env!("CARGO_PKG_NAME")))
.num_args(1)
.default_value(DEFAULT_CONFIG_PATH)
.display_order(1),
),
)
.get_matches(); .get_matches();
env_logger::init(); env_logger::init();
@@ -63,6 +78,11 @@ fn main() {
register(sub_m.get_one::<String>("host").unwrap()); register(sub_m.get_one::<String>("host").unwrap());
return; return;
} }
Some(("migrate", sub_m)) => {
let config = parse_toml(sub_m.get_one::<String>("config").unwrap());
migrate_db(&config.oolatoocs.db_path).unwrap();
return;
}
_ => (), _ => (),
} }

View File

@@ -1,4 +1,5 @@
use crate::config::MastodonConfig; use crate::config::MastodonConfig;
use chrono::{DateTime, Utc};
use megalodon::{ use megalodon::{
entities::{Status, StatusVisibility}, entities::{Status, StatusVisibility},
generator, generator,
@@ -10,16 +11,29 @@ use megalodon::{
use std::error::Error; use std::error::Error;
use std::io::stdin; use std::io::stdin;
pub async fn get_mastodon_timeline_since( /// Get Mastodon Object instance
config: &MastodonConfig, pub fn get_mastodon_instance(config: &MastodonConfig) -> Mastodon {
id: Option<u64>, Mastodon::new(
) -> Result<Vec<Status>, Box<dyn Error>> {
let mastodon = Mastodon::new(
config.base.to_string(), config.base.to_string(),
Some(config.token.to_string()), Some(config.token.to_string()),
None, None,
); )
}
/// Get the edited_at field from the specified toot
pub async fn get_status_edited_at(mastodon: &Mastodon, t: u64) -> Option<DateTime<Utc>> {
mastodon
.get_status(t.to_string())
.await
.ok()
.and_then(|t| t.json.edited_at)
}
/// Get the home timeline since the last toot
pub async fn get_mastodon_timeline_since(
mastodon: &Mastodon,
id: Option<u64>,
) -> Result<Vec<Status>, Box<dyn Error>> {
let input_options = GetHomeTimelineInputOptions { let input_options = GetHomeTimelineInputOptions {
only_media: Some(false), only_media: Some(false),
limit: None, limit: None,

View File

@@ -1,3 +1,4 @@
use chrono::{DateTime, Utc};
use log::debug; use log::debug;
use rusqlite::{params, Connection, OptionalExtension}; use rusqlite::{params, Connection, OptionalExtension};
use std::error::Error; use std::error::Error;
@@ -7,6 +8,34 @@ use std::error::Error;
pub struct TweetToToot { pub struct TweetToToot {
pub tweet_id: u64, pub tweet_id: u64,
pub toot_id: u64, pub toot_id: u64,
pub datetime: Option<DateTime<Utc>>,
}
/// Deletes a given state
pub fn delete_state(conn: &Connection, toot_id: u64) -> Result<(), Box<dyn Error>> {
debug!("Deleting Toot ID {}", toot_id);
conn.execute(
&format!("DELETE FROM tweet_to_toot WHERE toot_id = {}", toot_id),
[],
)?;
Ok(())
}
/// Retrieves all tweets associated to a toot in the form of a vector
pub fn read_all_tweet_state(conn: &Connection, toot_id: u64) -> Result<Vec<u64>, Box<dyn Error>> {
let query = format!(
"SELECT tweet_id FROM tweet_to_toot WHERE toot_id = {};",
toot_id
);
let mut stmt = conn.prepare(&query)?;
let mut rows = stmt.query([])?;
let mut v = Vec::new();
while let Some(row) = rows.next()? {
v.push(row.get(0)?);
}
Ok(v)
} }
/// if None is passed, read the last tweet from DB /// if None is passed, read the last tweet from DB
@@ -17,8 +46,10 @@ pub fn read_state(
) -> Result<Option<TweetToToot>, Box<dyn Error>> { ) -> Result<Option<TweetToToot>, Box<dyn Error>> {
debug!("Reading toot_id {:?}", s); debug!("Reading toot_id {:?}", s);
let query: String = match s { let query: String = match s {
Some(i) => format!("SELECT * FROM tweet_to_toot WHERE toot_id = {i}"), Some(i) => format!(
None => "SELECT * FROM tweet_to_toot ORDER BY toot_id DESC LIMIT 1".to_string(), "SELECT tweet_id, toot_id, UNIXEPOCH(datetime) AS datetime FROM tweet_to_toot WHERE toot_id = {i} ORDER BY tweet_id DESC LIMIT 1"
),
None => "SELECT tweet_id, toot_id, UNIXEPOCH(datetime) AS datetime FROM tweet_to_toot ORDER BY toot_id DESC LIMIT 1".to_string(),
}; };
let mut stmt = conn.prepare(&query)?; let mut stmt = conn.prepare(&query)?;
@@ -28,6 +59,7 @@ pub fn read_state(
Ok(TweetToToot { Ok(TweetToToot {
tweet_id: row.get("tweet_id")?, tweet_id: row.get("tweet_id")?,
toot_id: row.get("toot_id")?, toot_id: row.get("toot_id")?,
datetime: Some(DateTime::from_timestamp(row.get("datetime").unwrap(), 0).unwrap()),
}) })
}) })
.optional()?; .optional()?;
@@ -56,8 +88,9 @@ pub fn init_db(d: &str) -> Result<(), Box<dyn Error>> {
conn.execute( conn.execute(
"CREATE TABLE IF NOT EXISTS tweet_to_toot ( "CREATE TABLE IF NOT EXISTS tweet_to_toot (
tweet_id INTEGER, tweet_id INTEGER PRIMARY KEY,
toot_id INTEGER PRIMARY KEY toot_id INTEGER,
datetime INTEGER DEFAULT CURRENT_TIMESTAMP
)", )",
[], [],
)?; )?;
@@ -65,6 +98,56 @@ pub fn init_db(d: &str) -> Result<(), Box<dyn Error>> {
Ok(()) Ok(())
} }
/// Migrate DB from 1.5.x to 1.6.x
pub fn migrate_db(d: &str) -> Result<(), Box<dyn Error>> {
debug!("Migration DB for Oolatoocs");
let conn = Connection::open(d)?;
let res = conn.execute("SELECT datetime from tweet_to_toot;", []);
// If the column can be selected then, its OK
// if not, see if the error is a missing column and add it
match res {
Err(e) => match e.to_string().as_str() {
"no such column: datetime" => migrate_db_alter_table(&conn), //column does not exist
"Execute returned results - did you mean to call query?" => Ok(()), // return results,
// column does
// exist
_ => Err(e.into()),
},
Ok(_) => Ok(()),
}
}
/// Creates a new table, copy the data from the old table and rename it
fn migrate_db_alter_table(c: &Connection) -> Result<(), Box<dyn Error>> {
// create the new table
c.execute(
"CREATE TABLE IF NOT EXISTS tweet_to_toot_new (
tweet_id INTEGER PRIMARY KEY,
toot_id INTEGER,
datetime INTEGER DEFAULT CURRENT_TIMESTAMP
)",
[],
)?;
// copy data from the old table
c.execute(
"INSERT INTO tweet_to_toot_new (tweet_id, toot_id)
SELECT tweet_id, toot_id FROM tweet_to_toot;",
[],
)?;
// drop the old table
c.execute("DROP TABLE tweet_to_toot;", [])?;
// rename the new table
c.execute("ALTER TABLE tweet_to_toot_new RENAME TO tweet_to_toot;", [])?;
Ok(())
}
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::*; use super::*;
@@ -119,17 +202,25 @@ mod tests {
let t_in = TweetToToot { let t_in = TweetToToot {
tweet_id: 123456789, tweet_id: 123456789,
toot_id: 987654321, toot_id: 987654321,
datetime: None,
}; };
write_state(&conn, t_in).unwrap(); write_state(&conn, t_in).unwrap();
let mut stmt = conn.prepare("SELECT * FROM tweet_to_toot;").unwrap(); let mut stmt = conn
.prepare(
"SELECT tweet_id, toot_id, UNIXEPOCH(datetime) AS datetime FROM tweet_to_toot;",
)
.unwrap();
let t_out = stmt let t_out = stmt
.query_row([], |row| { .query_row([], |row| {
Ok(TweetToToot { Ok(TweetToToot {
tweet_id: row.get("tweet_id").unwrap(), tweet_id: row.get("tweet_id").unwrap(),
toot_id: row.get("toot_id").unwrap(), toot_id: row.get("toot_id").unwrap(),
datetime: Some(
DateTime::from_timestamp(row.get("datetime").unwrap(), 0).unwrap(),
),
}) })
}) })
.unwrap(); .unwrap();
@@ -226,4 +317,180 @@ mod tests {
assert_eq!(t_out.tweet_id, 100); assert_eq!(t_out.tweet_id, 100);
assert_eq!(t_out.toot_id, 1000); assert_eq!(t_out.toot_id, 1000);
} }
#[test]
fn test_last_toot_id_read_state() {
let d = "/tmp/test_last_toot_id_read_state.sqlite";
init_db(d).unwrap();
let conn = Connection::open(d).unwrap();
conn.execute(
"INSERT INTO tweet_to_toot(tweet_id, toot_id)
VALUES (100, 1000), (101, 1000);",
[],
)
.unwrap();
let t_out = read_state(&conn, Some(1000)).unwrap().unwrap();
remove_file(d).unwrap();
assert_eq!(t_out.tweet_id, 101);
assert_eq!(t_out.toot_id, 1000);
}
#[test]
fn test_migrate_db_alter_table() {
let d = "/tmp/test_migrate_db_alter_table.sqlite";
let conn = Connection::open(d).unwrap();
init_db(d).unwrap();
write_state(
&conn,
TweetToToot {
tweet_id: 0,
toot_id: 0,
datetime: None,
},
)
.unwrap();
write_state(
&conn,
TweetToToot {
tweet_id: 1,
toot_id: 1,
datetime: None,
},
)
.unwrap();
migrate_db_alter_table(&conn).unwrap();
let mut stmt = conn.prepare("PRAGMA table_info(tweet_to_toot);").unwrap();
let mut t = stmt.query([]).unwrap();
while let Some(row) = t.next().unwrap() {
if row.get::<usize, u8>(0).unwrap() == 2 {
assert_eq!(row.get::<usize, String>(1).unwrap(), "datetime".to_string());
}
}
remove_file(d).unwrap();
}
#[test]
fn test_migrate_db() {
// this should be idempotent
let d = "/tmp/test_migrate_db.sqlite";
let conn = Connection::open(d).unwrap();
conn.execute(
"CREATE TABLE IF NOT EXISTS tweet_to_toot (
tweet_id INTEGER,
toot_id INTEGER PRIMARY KEY
)",
[],
)
.unwrap();
conn.execute("INSERT INTO tweet_to_toot VALUES (0, 0), (1, 1);", [])
.unwrap();
migrate_db(d).unwrap();
let last_state = read_state(&conn, None).unwrap().unwrap();
assert_eq!(last_state.tweet_id, 1);
assert_eq!(last_state.toot_id, 1);
migrate_db(d).unwrap(); // shouldnt do anything
remove_file(d).unwrap();
}
#[test]
fn test_delete_state() {
let d = "/tmp/test_delete_state.sqlite";
init_db(d).unwrap();
let conn = Connection::open(d).unwrap();
conn.execute(
"INSERT INTO tweet_to_toot(tweet_id, toot_id) VALUES (0, 0);",
[],
)
.unwrap();
delete_state(&conn, 0).unwrap();
let mut stmt = conn
.prepare(
"SELECT tweet_id, toot_id, UNIXEPOCH(datetime) AS datetime FROM tweet_to_toot;",
)
.unwrap();
let t_out = stmt.query_row([], |row| {
Ok(TweetToToot {
tweet_id: row.get("tweet_id").unwrap(),
toot_id: row.get("toot_id").unwrap(),
datetime: Some(DateTime::from_timestamp(row.get("datetime").unwrap(), 0).unwrap()),
})
});
assert!(t_out.is_err_and(|x| x == rusqlite::Error::QueryReturnedNoRows));
conn.execute(
"INSERT INTO tweet_to_toot(tweet_id, toot_id) VALUES(102,42), (103,42);",
[],
)
.unwrap();
delete_state(&conn, 42).unwrap();
let mut stmt = conn
.prepare(
"SELECT tweet_id, toot_id, UNIXEPOCH(datetime) AS datetime FROM tweet_to_toot;",
)
.unwrap();
let t_out = stmt.query_row([], |row| {
Ok(TweetToToot {
tweet_id: row.get("tweet_id").unwrap(),
toot_id: row.get("toot_id").unwrap(),
datetime: Some(DateTime::from_timestamp(row.get("datetime").unwrap(), 0).unwrap()),
})
});
assert!(t_out.is_err_and(|x| x == rusqlite::Error::QueryReturnedNoRows));
remove_file(d).unwrap();
}
#[test]
fn test_read_all_tweet_state() {
let d = "/tmp/read_all_tweet_state.sqlite";
init_db(d).unwrap();
let conn = Connection::open(d).unwrap();
conn.execute(
"INSERT INTO tweet_to_toot(tweet_id, toot_id) VALUES (102, 42), (103, 42), (105, 43);",
[],
)
.unwrap();
let v1 = read_all_tweet_state(&conn, 43).unwrap();
let v2 = read_all_tweet_state(&conn, 42).unwrap();
assert_eq!(v1, vec![105]);
assert_eq!(v2, vec![102, 103]);
remove_file(d).unwrap();
}
} }

View File

@@ -113,6 +113,36 @@ fn get_token(config: &TwitterConfig) -> Token {
) )
} }
/// This functions deletes a tweet, given its id
pub async fn delete_tweet(config: &TwitterConfig, id: u64) -> Result<(), Box<dyn Error>> {
debug!("Deleting Tweet {}", id);
let empty_request = EmptyRequest {}; // Why? Because fuck you, thats why!
let token = get_token(config);
let delete_uri = format!("{}/{}", TWITTER_API_TWEET_URL, id);
let client = Client::new();
let res = client
.delete(&delete_uri)
.header(
"Authorization",
oauth1_request::delete(
&delete_uri,
&empty_request,
&token,
oauth1_request::HMAC_SHA1,
),
)
.send()
.await?;
if !res.status().is_success() {
return Err(OolatoocsError::new(&format!("Cannot delete Tweet {}", id)).into());
}
Ok(())
}
/// This function generates a media_ids vec to be used by Twitter
pub async fn generate_media_ids(config: &TwitterConfig, media_attach: &[Attachment]) -> Vec<u64> { pub async fn generate_media_ids(config: &TwitterConfig, media_attach: &[Attachment]) -> Vec<u64> {
let mut medias: Vec<u64> = vec![]; let mut medias: Vec<u64> = vec![];
@@ -434,7 +464,11 @@ pub fn transform_poll(p: &Poll) -> TweetPoll {
let diff = poll_end_datetime.signed_duration_since(now); let diff = poll_end_datetime.signed_duration_since(now);
TweetPoll { TweetPoll {
options: p.options.iter().map(|i| i.title.clone()).collect(), options: p
.options
.iter()
.map(|i| i.title.chars().take(25).collect::<String>())
.collect(),
duration_minutes: diff.num_minutes().try_into().unwrap(), // safe here, number is positive duration_minutes: diff.num_minutes().try_into().unwrap(), // safe here, number is positive
// and cant be over 21600 // and cant be over 21600
} }
@@ -482,3 +516,41 @@ pub async fn post_tweet(
Ok(res.data.id.parse::<u64>().unwrap()) Ok(res.data.id.parse::<u64>().unwrap())
} }
#[cfg(test)]
mod tests {
use super::*;
use megalodon::entities::PollOption;
#[test]
fn test_transform_poll() {
let poll = Poll {
id: "youpi".to_string(),
expires_at: Some(Utc::now()),
expired: false,
multiple: false,
votes_count: 0,
voters_count: None,
options: vec![
PollOption {
title: "Je suis beaucoup trop long comme option, tronque-moi !".to_string(),
votes_count: None,
},
PollOption {
title: "nope".to_string(),
votes_count: None,
},
],
voted: None,
emojis: vec![],
};
let tweet_poll_res = transform_poll(&poll);
let tweet_pool_expected = TweetPoll {
duration_minutes: 0,
options: vec!["Je suis beaucoup trop lon".to_string(), "nope".to_string()],
};
assert_eq!(tweet_poll_res.options, tweet_pool_expected.options);
}
}