: add bluesky support

This commit is contained in:
VC
2024-09-26 16:32:39 +02:00
parent f7e2aafa7b
commit ac8af5ce95
8 changed files with 1090 additions and 245 deletions

206
src/bsky.rs Normal file
View File

@@ -0,0 +1,206 @@
use crate::config::BlueskyConfig;
use atrium_api::{
app::bsky::feed::post::RecordData, com::atproto::repo::upload_blob::Output,
types::string::Datetime, types::string::Language,
};
use bsky_sdk::{rich_text::RichText, BskyAgent};
use log::error;
use megalodon::entities::attachment::{Attachment, AttachmentType};
use regex::Regex;
use std::error::Error;
/// Intermediary struct to deal with replies more easily
#[derive(Debug)]
pub struct BskyReply {
pub record_uri: String,
pub root_record_uri: String,
}
pub async fn get_session(user: &str, pass: &str) -> Result<BskyAgent, Box<dyn Error>> {
let agent = BskyAgent::builder().build().await?;
agent.login(user, pass).await?;
Ok(agent)
}
pub async fn build_post_record(
config: &BlueskyConfig,
text: &str,
language: &Option<String>,
embed: Option<atrium_api::types::Union<atrium_api::app::bsky::feed::post::RecordEmbedRefs>>,
reply_to: &Option<BskyReply>,
) -> Result<RecordData, Box<dyn Error>> {
let mut rt = RichText::new_with_detect_facets(text).await?;
let re = Regex::new(r#"(https?://)(\S{1,29})(\S*)"#).unwrap();
while let Some(found) = re.captures(&rt.text.clone()) {
if let Some(group) = found.get(3) {
rt.delete(group.start(), group.start() + group.len());
}
if let Some(group) = found.get(1) {
rt.delete(group.start(), group.start() + group.len());
}
}
let langs = language.clone().map(|s| vec![Language::new(s).unwrap()]);
let reply = if let Some(x) = reply_to {
let root_record = get_record(&config.handle, &rkey(&x.root_record_uri)).await?;
let parent_record = get_record(&config.handle, &rkey(&x.record_uri)).await?;
Some(
atrium_api::app::bsky::feed::post::ReplyRefData {
parent: atrium_api::com::atproto::repo::strong_ref::MainData {
cid: parent_record.data.cid.unwrap(),
uri: parent_record.data.uri.to_owned(),
}
.into(),
root: atrium_api::com::atproto::repo::strong_ref::MainData {
cid: root_record.data.cid.unwrap(),
uri: root_record.data.uri.to_owned(),
}
.into(),
}
.into(),
)
} else {
None
};
Ok(RecordData {
created_at: Datetime::now(),
embed,
entities: None,
facets: rt.facets,
labels: None,
langs,
reply,
tags: None,
text: rt.text,
})
}
async fn get_record(
config: &str,
rkey: &str,
) -> Result<
atrium_api::types::Object<atrium_api::com::atproto::repo::get_record::OutputData>,
Box<dyn Error>,
> {
let bsky = BskyAgent::builder().build().await?;
let record = bsky
.api
.com
.atproto
.repo
.get_record(
atrium_api::com::atproto::repo::get_record::ParametersData {
cid: None,
collection: atrium_api::types::string::Nsid::new("app.bsky.feed.post".to_string())?,
repo: atrium_api::types::string::Handle::new(config.to_string())?.into(),
rkey: rkey.to_string(),
}
.into(),
)
.await?;
Ok(record)
}
// its ugly af but it gets the job done for now
pub async fn generate_media_records(
bsky: &BskyAgent,
media_attach: &[Attachment],
) -> Option<atrium_api::types::Union<atrium_api::app::bsky::feed::post::RecordEmbedRefs>> {
let mut embed: Option<
atrium_api::types::Union<atrium_api::app::bsky::feed::post::RecordEmbedRefs>,
> = None;
let mut images = Vec::new();
let mut videos: Vec<atrium_api::app::bsky::embed::video::MainData> = Vec::new();
for media in media_attach.iter() {
let blob = upload_media(bsky, &media.url).await.unwrap();
match media.r#type {
AttachmentType::Image => {
images.push(
atrium_api::app::bsky::embed::images::ImageData {
alt: media
.description
.clone()
.map_or("".to_string(), |v| v.to_owned()),
aspect_ratio: None,
image: blob.data.blob,
}
.into(),
);
}
AttachmentType::Gifv | AttachmentType::Video => {
videos.push(atrium_api::app::bsky::embed::video::MainData {
alt: media.description.clone(),
aspect_ratio: None,
captions: None,
video: blob.data.blob,
});
}
_ => {
error!("Not an image, not a video, what happened here?");
}
}
}
if !images.is_empty() {
embed = Some(atrium_api::types::Union::Refs(
atrium_api::app::bsky::feed::post::RecordEmbedRefs::AppBskyEmbedImagesMain(Box::new(
atrium_api::app::bsky::embed::images::MainData { images }.into(),
)),
));
}
// if a video has been uploaded, it takes priority as you can only have 1 video per post
if !videos.is_empty() {
embed = Some(atrium_api::types::Union::Refs(
atrium_api::app::bsky::feed::post::RecordEmbedRefs::AppBskyEmbedVideoMain(Box::new(
videos[0].clone().into(),
)),
))
}
embed
}
async fn upload_media(bsky: &BskyAgent, u: &str) -> Result<Output, Box<dyn Error>> {
let dl = reqwest::get(u).await?;
let bytes = dl.bytes().await?;
let record = bsky.api.com.atproto.repo.upload_blob(bytes.into()).await?;
Ok(record)
}
fn rkey(record_id: &str) -> String {
record_id.split('/').nth(4).unwrap().to_string()
}
#[cfg(test)]
mod tests {
use super::*;
#[tokio::test]
async fn test_build_post_record() {
let text = "@factornews@piaille.fr Retrouvez-nous ici https://www.nintendojo.fr/articles/editos/le-mod-renovation-de-8bitdo-pour-manette-n64 et là https://www.nintendojo.fr/articles/analyses/vite-vu/vite-vu-morbid-the-lords-of-ire";
let expected_text = "@factornews@piaille.fr Retrouvez-nous ici www.nintendojo.fr/articles/ed et là www.nintendojo.fr/articles/an";
let bsky_conf = BlueskyConfig {
handle: "tamerelol.bsky.social".to_string(),
password: "dtc".to_string(),
};
let created_record_data = build_post_record(&bsky_conf, text, &None, None, &None)
.await
.unwrap();
assert_eq!(expected_text, &created_record_data.text);
}
}

View File

@@ -6,6 +6,7 @@ pub struct Config {
pub oolatoocs: OolatoocsConfig,
pub mastodon: MastodonConfig,
pub twitter: TwitterConfig,
pub bluesky: BlueskyConfig,
}
#[derive(Debug, Deserialize, Clone)]
@@ -30,6 +31,12 @@ pub struct MastodonConfig {
pub token: String,
}
#[derive(Debug, Deserialize)]
pub struct BlueskyConfig {
pub handle: String,
pub password: String,
}
/// parses TOML file into Config struct
pub fn parse_toml(toml_file: &str) -> Config {
let toml_config =

View File

@@ -1,3 +1,5 @@
use log::debug;
mod error;
pub use error::OolatoocsError;
@@ -5,8 +7,7 @@ mod config;
pub use config::{parse_toml, Config};
mod state;
#[allow(unused_imports)]
use state::{delete_state, read_all_tweet_state, read_state, write_state, TweetToToot};
use state::{delete_state, read_all_state, read_state, write_state, TootTweetRecord};
pub use state::{init_db, migrate_db};
mod mastodon;
@@ -17,9 +18,11 @@ mod utils;
use utils::{generate_multi_tweets, strip_everything};
mod twitter;
#[allow(unused_imports)]
use twitter::{delete_tweet, generate_media_ids, post_tweet, transform_poll};
mod bsky;
use bsky::{build_post_record, generate_media_records, get_session, BskyReply};
use rusqlite::Connection;
#[tokio::main]
@@ -29,6 +32,10 @@ pub async fn run(config: &Config) {
let mastodon = get_mastodon_instance(&config.mastodon);
let bluesky = get_session(&config.bluesky.handle, &config.bluesky.password)
.await
.unwrap_or_else(|e| panic!("Cannot connect to Bsky: {}", e));
let last_entry =
read_state(&conn, None).unwrap_or_else(|e| panic!("Cannot get last toot id: {}", e));
@@ -40,23 +47,29 @@ pub async fn run(config: &Config) {
Some(d) => {
// a date has been found
if d > t.datetime.unwrap() {
// said date is posterior to the previously
// written tweet, we need to delete/rewrite
for local_tweet_id in read_all_tweet_state(&conn, t.toot_id)
debug!("Last toot date is posterior to the previously written tweet, deleting…");
let (local_tweet_ids, local_record_uris) = read_all_state(&conn, t.toot_id)
.unwrap_or_else(|e| {
panic!(
"Cannot fetch all tweets associated with Toot ID {}: {}",
t.toot_id, e
)
})
.into_iter()
{
});
for local_tweet_id in local_tweet_ids.into_iter() {
delete_tweet(&config.twitter, local_tweet_id)
.await
.unwrap_or_else(|e| {
panic!("Cannot delete Tweet ID ({}): {}", t.tweet_id, e)
});
}
for local_record_uri in local_record_uris.into_iter() {
bluesky
.delete_record(&local_record_uri)
.await
.unwrap_or_else(|e| {
panic!("Cannot delete record ID ({}): {}", &t.record_uri, e)
});
}
delete_state(&conn, t.toot_id).unwrap_or_else(|e| {
panic!("Cannot delete Toot ID ({}): {}", t.toot_id, e)
});
@@ -87,52 +100,132 @@ pub async fn run(config: &Config) {
};
// threads if necessary
let mut reply_to = toot.in_reply_to_id.and_then(|t| {
read_state(&conn, Some(t.parse::<u64>().unwrap()))
.ok()
.flatten()
.map(|s| s.tweet_id)
});
let (mut tweet_reply_to, mut record_reply_to) = toot
.in_reply_to_id
.and_then(|t| {
read_state(&conn, Some(t.parse::<u64>().unwrap()))
.ok()
.flatten()
.map(|s| {
(
s.tweet_id,
BskyReply {
record_uri: s.record_uri.to_owned(),
root_record_uri: s.root_record_uri.to_owned(),
},
)
})
})
.unzip();
// if the toot is too long, we cut it in half here
if let Some((first_half, second_half)) = generate_multi_tweets(&tweet_content) {
tweet_content = second_half;
// post the first half
let reply_id = post_tweet(&config.twitter, first_half, vec![], reply_to, None)
.await
.unwrap_or_else(|e| panic!("Cannot post the first half of {}: {}", &toot.id, e));
let tweet_reply_id =
post_tweet(&config.twitter, &first_half, vec![], tweet_reply_to, None)
.await
.unwrap_or_else(|e| {
panic!(
"Cannot post the first half of {} for Twitter: {}",
&toot.id, e
)
});
let record = build_post_record(
&config.bluesky,
&first_half,
&toot.language,
None,
&record_reply_to,
)
.await
.unwrap_or_else(|e| panic!("Cannot create valid record for {}: {}", &toot.id, e));
let record_reply_id = bluesky.create_record(record).await.unwrap_or_else(|e| {
panic!(
"Cannot post the first half of {} for Bluesky: {}",
&toot.id, e
)
});
// write it to db
write_state(
&conn,
TweetToToot {
tweet_id: reply_id,
TootTweetRecord {
toot_id: toot.id.parse::<u64>().unwrap(),
tweet_id: tweet_reply_id,
record_uri: record_reply_id.data.uri.to_owned(),
root_record_uri: record_reply_to
.as_ref()
.map_or(record_reply_id.data.uri.to_owned(), |v| {
v.root_record_uri.to_owned()
}),
datetime: None,
},
)
.unwrap_or_else(|e| {
panic!("Cannot store Toot/Tweet ({}/{}): {}", &toot.id, reply_id, e)
panic!(
"Cannot store Toot/Tweet/Record ({}/{}/{}): {}",
&toot.id, tweet_reply_id, &record_reply_id.data.uri, e
)
});
reply_to = Some(reply_id);
record_reply_to = Some(BskyReply {
record_uri: record_reply_id.data.uri.to_owned(),
root_record_uri: record_reply_to
.as_ref()
.map_or(record_reply_id.data.uri.clone(), |v| {
v.root_record_uri.clone()
}),
});
tweet_reply_to = Some(tweet_reply_id);
};
// treats poll if any
let in_poll = toot.poll.map(|p| transform_poll(&p));
// treats medias
let medias = generate_media_ids(&config.twitter, &toot.media_attachments).await;
let record_medias = generate_media_records(&bluesky, &toot.media_attachments).await;
let tweet_medias = generate_media_ids(&config.twitter, &toot.media_attachments).await;
// posts corresponding tweet
let tweet_id = post_tweet(&config.twitter, tweet_content, medias, reply_to, in_poll)
let tweet_id = post_tweet(
&config.twitter,
&tweet_content,
tweet_medias,
tweet_reply_to,
in_poll,
)
.await
.unwrap_or_else(|e| panic!("Cannot Tweet {}: {}", toot.id, e));
let record = build_post_record(
&config.bluesky,
&tweet_content,
&toot.language,
record_medias,
&record_reply_to,
)
.await
.unwrap_or_else(|e| panic!("Cannot build record for {}: {}", &toot.id, e));
let created_record = bluesky
.create_record(record)
.await
.unwrap_or_else(|e| panic!("Cannot Tweet {}: {}", toot.id, e));
.unwrap_or_else(|e| panic!("Cannot put record {}: {}", &toot.id, e));
// writes the current state of the tweet
write_state(
&conn,
TweetToToot {
tweet_id,
TootTweetRecord {
toot_id: toot.id.parse::<u64>().unwrap(),
tweet_id,
record_uri: created_record.data.uri.clone(),
root_record_uri: record_reply_to
.as_ref()
.map_or(created_record.data.uri.clone(), |v| {
v.root_record_uri.clone()
}),
datetime: None,
},
)

View File

@@ -5,9 +5,14 @@ use std::error::Error;
/// Struct for each query line
#[derive(Debug)]
pub struct TweetToToot {
pub tweet_id: u64,
pub struct TootTweetRecord {
// Mastodon part
pub toot_id: u64,
// Twitter part
pub tweet_id: u64,
// Bluesky part
pub record_uri: String,
pub root_record_uri: String,
pub datetime: Option<DateTime<Utc>>,
}
@@ -15,27 +20,32 @@ pub struct TweetToToot {
pub fn delete_state(conn: &Connection, toot_id: u64) -> Result<(), Box<dyn Error>> {
debug!("Deleting Toot ID {}", toot_id);
conn.execute(
&format!("DELETE FROM tweet_to_toot WHERE toot_id = {}", toot_id),
&format!("DELETE FROM toot_tweet_record WHERE toot_id = {}", toot_id),
[],
)?;
Ok(())
}
/// Retrieves all tweets associated to a toot in the form of a vector
pub fn read_all_tweet_state(conn: &Connection, toot_id: u64) -> Result<Vec<u64>, Box<dyn Error>> {
pub fn read_all_state(
conn: &Connection,
toot_id: u64,
) -> Result<(Vec<u64>, Vec<String>), Box<dyn Error>> {
let query = format!(
"SELECT tweet_id FROM tweet_to_toot WHERE toot_id = {};",
"SELECT tweet_id, record_uri FROM toot_tweet_record WHERE toot_id = {};",
toot_id
);
let mut stmt = conn.prepare(&query)?;
let mut rows = stmt.query([])?;
let mut v = Vec::new();
let mut tweet_v: Vec<u64> = Vec::new();
let mut record_v: Vec<String> = Vec::new();
while let Some(row) = rows.next()? {
v.push(row.get(0)?);
tweet_v.push(row.get(0)?);
record_v.push(row.get(1)?);
}
Ok(v)
Ok((tweet_v, record_v))
}
/// if None is passed, read the last tweet from DB
@@ -43,23 +53,26 @@ pub fn read_all_tweet_state(conn: &Connection, toot_id: u64) -> Result<Vec<u64>,
pub fn read_state(
conn: &Connection,
s: Option<u64>,
) -> Result<Option<TweetToToot>, Box<dyn Error>> {
) -> Result<Option<TootTweetRecord>, Box<dyn Error>> {
debug!("Reading toot_id {:?}", s);
let begin_query = "SELECT *, UNIXEPOCH(datetime) AS unix_datetime FROM toot_tweet_record";
let query: String = match s {
Some(i) => format!(
"SELECT tweet_id, toot_id, UNIXEPOCH(datetime) AS datetime FROM tweet_to_toot WHERE toot_id = {i} ORDER BY tweet_id DESC LIMIT 1"
),
None => "SELECT tweet_id, toot_id, UNIXEPOCH(datetime) AS datetime FROM tweet_to_toot ORDER BY toot_id DESC LIMIT 1".to_string(),
Some(i) => format!("{begin_query} WHERE toot_id = {i} ORDER BY tweet_id DESC LIMIT 1"),
None => format!("{begin_query} ORDER BY toot_id DESC LIMIT 1"),
};
let mut stmt = conn.prepare(&query)?;
let t = stmt
.query_row([], |row| {
Ok(TweetToToot {
tweet_id: row.get("tweet_id")?,
Ok(TootTweetRecord {
toot_id: row.get("toot_id")?,
datetime: Some(DateTime::from_timestamp(row.get("datetime").unwrap(), 0).unwrap()),
tweet_id: row.get("tweet_id")?,
record_uri: row.get("record_uri")?,
root_record_uri: row.get("root_record_uri")?,
datetime: Some(
DateTime::from_timestamp(row.get("unix_datetime").unwrap(), 0).unwrap(),
),
})
})
.optional()?;
@@ -68,11 +81,11 @@ pub fn read_state(
}
/// Writes last treated tweet id and toot id to the db
pub fn write_state(conn: &Connection, t: TweetToToot) -> Result<(), Box<dyn Error>> {
pub fn write_state(conn: &Connection, t: TootTweetRecord) -> Result<(), Box<dyn Error>> {
debug!("Write struct {:?}", t);
conn.execute(
"INSERT INTO tweet_to_toot (tweet_id, toot_id) VALUES (?1, ?2)",
params![t.tweet_id, t.toot_id],
"INSERT INTO toot_tweet_record (toot_id, tweet_id, record_uri, root_record_uri) VALUES (?1, ?2, ?3, ?4)",
params![t.toot_id, t.tweet_id, t.record_uri, t.root_record_uri],
)?;
Ok(())
@@ -87,9 +100,11 @@ pub fn init_db(d: &str) -> Result<(), Box<dyn Error>> {
let conn = Connection::open(d)?;
conn.execute(
"CREATE TABLE IF NOT EXISTS tweet_to_toot (
tweet_id INTEGER PRIMARY KEY,
"CREATE TABLE IF NOT EXISTS toot_tweet_record (
toot_id INTEGER,
tweet_id INTEGER PRIMARY KEY,
record_uri VARCHAR(128) DEFAULT '',
root_record_uri VARCHAR(128) DEFAULT '',
datetime INTEGER DEFAULT CURRENT_TIMESTAMP
)",
[],
@@ -98,20 +113,20 @@ pub fn init_db(d: &str) -> Result<(), Box<dyn Error>> {
Ok(())
}
/// Migrate DB from 1.5.x to 1.6.x
/// Migrate DB from 1.6+ to 3+
pub fn migrate_db(d: &str) -> Result<(), Box<dyn Error>> {
debug!("Migration DB for Oolatoocs");
let conn = Connection::open(d)?;
let res = conn.execute("SELECT datetime from tweet_to_toot;", []);
let res = conn.execute("SELECT datetime FROM toot_tweet_record;", []);
// If the column can be selected then, its OK
// if not, see if the error is a missing column and add it
match res {
Err(e) => match e.to_string().as_str() {
"no such column: datetime" => migrate_db_alter_table(&conn), //column does not exist
"Execute returned results - did you mean to call query?" => Ok(()), // return results,
"no such table: toot_tweet_record" => migrate_db_alter_table(&conn), // table does not exist
"Execute returned results - did you mean to call query?" => Ok(()), // return results,
// column does
// exist
_ => Err(e.into()),
@@ -124,9 +139,11 @@ pub fn migrate_db(d: &str) -> Result<(), Box<dyn Error>> {
fn migrate_db_alter_table(c: &Connection) -> Result<(), Box<dyn Error>> {
// create the new table
c.execute(
"CREATE TABLE IF NOT EXISTS tweet_to_toot_new (
tweet_id INTEGER PRIMARY KEY,
"CREATE TABLE IF NOT EXISTS toot_tweet_record (
toot_id INTEGER,
tweet_id INTEGER PRIMARY KEY,
record_uri VARCHAR(128) DEFAULT '',
root_record_uri VARCHAR(128) DEFAULT '',
datetime INTEGER DEFAULT CURRENT_TIMESTAMP
)",
[],
@@ -134,16 +151,13 @@ fn migrate_db_alter_table(c: &Connection) -> Result<(), Box<dyn Error>> {
// copy data from the old table
c.execute(
"INSERT INTO tweet_to_toot_new (tweet_id, toot_id)
SELECT tweet_id, toot_id FROM tweet_to_toot;",
"INSERT INTO toot_tweet_record (toot_id, tweet_id, datetime)
SELECT toot_id, tweet_id, datetime FROM tweet_to_toot;",
[],
)?;
// drop the old table
c.execute("DROP TABLE tweet_to_toot;", [])?;
// rename the new table
c.execute("ALTER TABLE tweet_to_toot_new RENAME TO tweet_to_toot;", [])?;
c.execute("DROP TABLE IF EXISTS tweet_to_toot;", [])?;
Ok(())
}
@@ -164,7 +178,8 @@ mod tests {
// open said file
let conn = Connection::open(d).unwrap();
conn.execute("SELECT * from tweet_to_toot;", []).unwrap();
conn.execute("SELECT * from toot_tweet_record;", [])
.unwrap();
remove_file(d).unwrap();
}
@@ -179,7 +194,7 @@ mod tests {
let conn = Connection::open(d).unwrap();
conn.execute(
"INSERT INTO tweet_to_toot (tweet_id, toot_id)
"INSERT INTO toot_tweet_record (tweet_id, toot_id)
VALUES
(100, 1001);",
[],
@@ -199,34 +214,38 @@ mod tests {
let conn = Connection::open(d).unwrap();
let t_in = TweetToToot {
tweet_id: 123456789,
let t_in = TootTweetRecord {
toot_id: 987654321,
tweet_id: 123456789,
record_uri: "a".to_string(),
root_record_uri: "c".to_string(),
datetime: None,
};
write_state(&conn, t_in).unwrap();
let mut stmt = conn
.prepare(
"SELECT tweet_id, toot_id, UNIXEPOCH(datetime) AS datetime FROM tweet_to_toot;",
)
.prepare("SELECT *, UNIXEPOCH(datetime) AS unix_datetime FROM toot_tweet_record;")
.unwrap();
let t_out = stmt
.query_row([], |row| {
Ok(TweetToToot {
tweet_id: row.get("tweet_id").unwrap(),
Ok(TootTweetRecord {
toot_id: row.get("toot_id").unwrap(),
tweet_id: row.get("tweet_id").unwrap(),
record_uri: row.get("record_uri").unwrap(),
root_record_uri: row.get("root_record_uri").unwrap(),
datetime: Some(
DateTime::from_timestamp(row.get("datetime").unwrap(), 0).unwrap(),
DateTime::from_timestamp(row.get("unix_datetime").unwrap(), 0).unwrap(),
),
})
})
.unwrap();
assert_eq!(t_out.tweet_id, 123456789);
assert_eq!(t_out.toot_id, 987654321);
assert_eq!(t_out.tweet_id, 123456789);
assert_eq!(t_out.record_uri, "a".to_string());
assert_eq!(t_out.root_record_uri, "c".to_string());
remove_file(d).unwrap();
}
@@ -240,10 +259,10 @@ mod tests {
let conn = Connection::open(d).unwrap();
conn.execute(
"INSERT INTO tweet_to_toot (tweet_id, toot_id)
"INSERT INTO toot_tweet_record (toot_id, tweet_id, record_uri)
VALUES
(101, 1001),
(102, 1002);",
(101, 1001, 'abc'),
(102, 1002, 'def');",
[],
)
.unwrap();
@@ -252,8 +271,9 @@ mod tests {
remove_file(d).unwrap();
assert_eq!(t_out.tweet_id, 102);
assert_eq!(t_out.toot_id, 1002);
assert_eq!(t_out.toot_id, 102);
assert_eq!(t_out.tweet_id, 1002);
assert_eq!(t_out.record_uri, "def".to_string());
}
#[test]
@@ -280,9 +300,9 @@ mod tests {
let conn = Connection::open(d).unwrap();
conn.execute(
"INSERT INTO tweet_to_toot (tweet_id, toot_id)
"INSERT INTO toot_tweet_record (toot_id, tweet_id, record_uri)
VALUES
(100, 1000);",
(100, 1000, 'abc');",
[],
)
.unwrap();
@@ -303,19 +323,20 @@ mod tests {
let conn = Connection::open(d).unwrap();
conn.execute(
"INSERT INTO tweet_to_toot (tweet_id, toot_id)
"INSERT INTO toot_tweet_record (toot_id, tweet_id, record_uri)
VALUES
(100, 1000);",
(100, 1000, 'abc');",
[],
)
.unwrap();
let t_out = read_state(&conn, Some(1000)).unwrap().unwrap();
let t_out = read_state(&conn, Some(100)).unwrap().unwrap();
remove_file(d).unwrap();
assert_eq!(t_out.tweet_id, 100);
assert_eq!(t_out.toot_id, 1000);
assert_eq!(t_out.toot_id, 100);
assert_eq!(t_out.tweet_id, 1000);
assert_eq!(t_out.record_uri, "abc".to_string());
}
#[test]
@@ -327,8 +348,10 @@ mod tests {
let conn = Connection::open(d).unwrap();
conn.execute(
"INSERT INTO tweet_to_toot(tweet_id, toot_id)
VALUES (100, 1000), (101, 1000);",
"INSERT INTO toot_tweet_record (toot_id, tweet_id, record_uri)
VALUES
(1000, 100, 'abc'),
(1000, 101, 'def');",
[],
)
.unwrap();
@@ -337,49 +360,9 @@ mod tests {
remove_file(d).unwrap();
assert_eq!(t_out.tweet_id, 101);
assert_eq!(t_out.toot_id, 1000);
}
#[test]
fn test_migrate_db_alter_table() {
let d = "/tmp/test_migrate_db_alter_table.sqlite";
let conn = Connection::open(d).unwrap();
init_db(d).unwrap();
write_state(
&conn,
TweetToToot {
tweet_id: 0,
toot_id: 0,
datetime: None,
},
)
.unwrap();
write_state(
&conn,
TweetToToot {
tweet_id: 1,
toot_id: 1,
datetime: None,
},
)
.unwrap();
migrate_db_alter_table(&conn).unwrap();
let mut stmt = conn.prepare("PRAGMA table_info(tweet_to_toot);").unwrap();
let mut t = stmt.query([]).unwrap();
while let Some(row) = t.next().unwrap() {
if row.get::<usize, u8>(0).unwrap() == 2 {
assert_eq!(row.get::<usize, String>(1).unwrap(), "datetime".to_string());
}
}
remove_file(d).unwrap();
assert_eq!(t_out.tweet_id, 101);
assert_eq!(t_out.record_uri, "def".to_string());
}
#[test]
@@ -391,14 +374,18 @@ mod tests {
conn.execute(
"CREATE TABLE IF NOT EXISTS tweet_to_toot (
tweet_id INTEGER,
toot_id INTEGER PRIMARY KEY
toot_id INTEGER PRIMARY KEY,
datetime INTEGER DEFAULT CURRENT_TIMESTAMP
)",
[],
)
.unwrap();
conn.execute("INSERT INTO tweet_to_toot VALUES (0, 0), (1, 1);", [])
.unwrap();
conn.execute(
"INSERT INTO tweet_to_toot (tweet_id, toot_id) VALUES (0, 0), (1, 1);",
[],
)
.unwrap();
migrate_db(d).unwrap();
@@ -421,7 +408,7 @@ mod tests {
let conn = Connection::open(d).unwrap();
conn.execute(
"INSERT INTO tweet_to_toot(tweet_id, toot_id) VALUES (0, 0);",
"INSERT INTO toot_tweet_record(toot_id, tweet_id, record_uri) VALUES (0, 0, 'abc');",
[],
)
.unwrap();
@@ -429,23 +416,25 @@ mod tests {
delete_state(&conn, 0).unwrap();
let mut stmt = conn
.prepare(
"SELECT tweet_id, toot_id, UNIXEPOCH(datetime) AS datetime FROM tweet_to_toot;",
)
.prepare("SELECT *, UNIXEPOCH(datetime) AS unix_datetime FROM toot_tweet_record;")
.unwrap();
let t_out = stmt.query_row([], |row| {
Ok(TweetToToot {
tweet_id: row.get("tweet_id").unwrap(),
Ok(TootTweetRecord {
toot_id: row.get("toot_id").unwrap(),
datetime: Some(DateTime::from_timestamp(row.get("datetime").unwrap(), 0).unwrap()),
tweet_id: row.get("tweet_id").unwrap(),
record_uri: row.get("record_uri").unwrap(),
root_record_uri: row.get("root_record_uri").unwrap(),
datetime: Some(
DateTime::from_timestamp(row.get("unix_datetime").unwrap(), 0).unwrap(),
),
})
});
assert!(t_out.is_err_and(|x| x == rusqlite::Error::QueryReturnedNoRows));
conn.execute(
"INSERT INTO tweet_to_toot(tweet_id, toot_id) VALUES(102,42), (103,42);",
"INSERT INTO toot_tweet_record(toot_id, tweet_id, record_uri) VALUES(42, 102, 'abc'), (42, 103, 'def');",
[],
)
.unwrap();
@@ -453,16 +442,18 @@ mod tests {
delete_state(&conn, 42).unwrap();
let mut stmt = conn
.prepare(
"SELECT tweet_id, toot_id, UNIXEPOCH(datetime) AS datetime FROM tweet_to_toot;",
)
.prepare("SELECT *, UNIXEPOCH(datetime) AS unix_datetime FROM toot_tweet_record;")
.unwrap();
let t_out = stmt.query_row([], |row| {
Ok(TweetToToot {
tweet_id: row.get("tweet_id").unwrap(),
Ok(TootTweetRecord {
toot_id: row.get("toot_id").unwrap(),
datetime: Some(DateTime::from_timestamp(row.get("datetime").unwrap(), 0).unwrap()),
tweet_id: row.get("tweet_id").unwrap(),
record_uri: row.get("record_uri").unwrap(),
root_record_uri: row.get("root_record_uri").unwrap(),
datetime: Some(
DateTime::from_timestamp(row.get("unix_datetime").unwrap(), 0).unwrap(),
),
})
});
@@ -472,24 +463,27 @@ mod tests {
}
#[test]
fn test_read_all_tweet_state() {
let d = "/tmp/read_all_tweet_state.sqlite";
fn test_read_all_state() {
let d = "/tmp/read_all_state.sqlite";
init_db(d).unwrap();
let conn = Connection::open(d).unwrap();
conn.execute(
"INSERT INTO tweet_to_toot(tweet_id, toot_id) VALUES (102, 42), (103, 42), (105, 43);",
"INSERT INTO toot_tweet_record (toot_id, tweet_id, record_uri) VALUES (42, 102, 'abc'), (42, 103, 'def'), (43, 105, 'ghi');",
[],
)
.unwrap();
let v1 = read_all_tweet_state(&conn, 43).unwrap();
let v2 = read_all_tweet_state(&conn, 42).unwrap();
let (tweet_v1, record_v1) = read_all_state(&conn, 43).unwrap();
let (tweet_v2, record_v2) = read_all_state(&conn, 42).unwrap();
assert_eq!(v1, vec![105]);
assert_eq!(v2, vec![102, 103]);
assert_eq!(tweet_v1, vec![105]);
assert_eq!(tweet_v2, vec![102, 103]);
assert_eq!(record_v1, vec!["ghi".to_string()]);
assert_eq!(record_v2, vec!["abc".to_string(), "def".to_string()]);
remove_file(d).unwrap();
}

View File

@@ -477,7 +477,7 @@ pub fn transform_poll(p: &Poll) -> TweetPoll {
/// This posts Tweets with all the associated medias
pub async fn post_tweet(
config: &TwitterConfig,
content: String,
content: &str,
medias: Vec<u64>,
reply_to: Option<u64>,
poll: Option<TweetPoll>,
@@ -486,7 +486,7 @@ pub async fn post_tweet(
let token = get_token(config);
let tweet = Tweet {
text: content,
text: content.to_string(),
media: medias.is_empty().not().then(|| TweetMediasIds {
media_ids: medias.iter().map(|m| m.to_string()).collect(),
}),