38 Commits

Author SHA1 Message Date
VC
e99a666b18 Merge branch 'fix_rand_doc' into 'main'
Fix rand doc

See merge request veretcle/oolatoocs!22
2024-10-09 09:26:25 +00:00
VC
3b18dac2fb 📝: update doc 2024-10-09 11:22:06 +02:00
VC
af977a1ee0 : remove rand 2024-10-09 11:14:07 +02:00
VC
b90b727783 Merge branch 'feat_www' into 'main'
💄: remove www. from url

See merge request veretcle/oolatoocs!21
2024-10-08 08:33:03 +00:00
VC
f8227f99c1 💄: remove www. from url 2024-10-08 10:24:22 +02:00
VC
9f2ff119ff Merge branch 'feat_better_session_handling' into 'main'
♻️: refactor bsky session

See merge request veretcle/oolatoocs!20
2024-10-07 12:50:34 +00:00
VC
c0244c8c30 ♻️: refactor bsky session 2024-10-07 11:55:20 +02:00
VC
89aec3e0ed Merge branch 'fix_429' into 'main'
🚑️: avoid opening session when not necessary

See merge request veretcle/oolatoocs!19
2024-10-03 07:44:31 +00:00
VC
6a7eef757a 🚑️: avoid opening session when not necessary 2024-10-03 09:38:50 +02:00
VC
f46f90ad34 Merge branch 'fix_rt_links' into 'main'
🐛: … was apposed to every link regardless of their length

See merge request veretcle/oolatoocs!18
2024-10-02 08:50:37 +00:00
VC
4b4f9abe2f 🐛: … was apposed to every link regardless of their length 2024-10-02 10:44:43 +02:00
VC
9f9cf52722 Merge branch '7-feat-add-bsky-support' into 'main'
feat: add bsky support

Closes #7

See merge request veretcle/oolatoocs!17
2024-10-02 06:55:42 +00:00
VC
e0d3667fb9 💄: better record link content 2024-10-02 08:51:11 +02:00
VC
ac8af5ce95 : add bluesky support 2024-10-01 12:37:31 +02:00
VC
f7e2aafa7b Merge branch 'rust_1.81.0' into 'main'
⬆️: update version

See merge request veretcle/oolatoocs!16
2024-09-09 08:52:57 +00:00
VC
6e9bb6b42c ⬆️: update version 2024-09-09 10:45:45 +02:00
VC
88edb1b2e1 Merge branch 'rust_1_76' into 'main'
📦: cargo update

See merge request veretcle/oolatoocs!15
2024-03-21 09:53:17 +00:00
VC
bf9d27df61 📦: cargo update 2024-03-21 10:48:58 +01:00
VC
496dde60d6 Merge branch 'fix_twitter_pool_length' into 'main'
feat: truncate poll when too long

See merge request veretcle/oolatoocs!14
2024-01-17 14:24:12 +00:00
VC
567dfae7ab feat: truncate poll when too long 2024-01-17 15:18:12 +01:00
VC
eeaea52e80 Merge branch 'refactor_delete' into 'main'
Refactor delete

See merge request veretcle/oolatoocs!13
2024-01-10 10:31:36 +00:00
VC
4a0dbb06af 📦: bump version 2024-01-10 11:24:56 +01:00
VC
5c17ea6989 ♻ : avoid url duplication 2024-01-10 11:23:09 +01:00
VC
8674048e8d Merge branch '6-feat-add-the-ability-to-rollback-last-tweet' into 'main'
feat: add the ability to rollback last tweet

Closes #6

See merge request veretcle/oolatoocs!12
2024-01-09 13:04:01 +00:00
VC
378d973697 feat: add the ability to rewrite an edited toot 2024-01-09 13:57:43 +01:00
VC
2cb732efed Merge branch 'refresh_main' into 'main'
chore: update megalodon-rs to 0.11.7

See merge request veretcle/oolatoocs!11
2023-12-22 08:09:55 +00:00
VC
5d685b5748 chore: update megalodon-rs to 0.11.7 2023-12-22 09:06:00 +01:00
VC
66664ff621 Merge branch 'feat_better_split' into 'main'
Feat better split

See merge request veretcle/oolatoocs!10
2023-11-29 12:36:00 +00:00
VC
fd84730bdc feat: better split for twitter_count 2023-11-29 13:32:04 +01:00
VC
692f4ff040 chore: bump version 2023-11-29 13:31:45 +01:00
VC
3397416a93 Merge branch 'fix_twitter_count' into 'main'
Fix twitter count

See merge request veretcle/oolatoocs!9
2023-11-29 10:38:27 +00:00
VC
f782987991 chore: bump dependencies’ version 2023-11-29 11:28:10 +01:00
VC
26788f9d37 fix: properly count URL when preceeded by '\n' 2023-11-29 11:25:27 +01:00
VC
ca9b388a50 chore: bump version 2023-11-29 11:24:38 +01:00
VC
42958e0a92 Merge branch 'fix_u16' into 'main'
fix: use u16 instead of i64

See merge request veretcle/oolatoocs!8
2023-11-22 07:57:03 +00:00
VC
77be17e7bf fix: use u16 instead of i64 2023-11-22 08:53:17 +01:00
VC
bd9fd27fd1 Merge branch '5-feat-repeat-mastodon-poll-in-twitter' into 'main'
feat: add poll from Mastodon to Twitter + pass owned values in post_tweet

Closes #5

See merge request veretcle/oolatoocs!7
2023-11-21 22:20:49 +00:00
VC
3e6cae6136 feat: add poll from Mastodon to Twitter + pass owned values in post_tweet 2023-11-21 23:13:40 +01:00
12 changed files with 2031 additions and 546 deletions

1
.gitignore vendored
View File

@@ -1,3 +1,4 @@
/target
.last_tweet
.config.toml
.config.json

1594
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -1,24 +1,27 @@
[package]
name = "oolatoocs"
version = "1.4.0"
version = "3.1.1"
edition = "2021"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
chrono = "^0.4"
clap = "^4"
env_logger = "^0.10"
futures = "^0.3"
html-escape = "^0.2"
log = "^0.4"
megalodon = "^0.11"
megalodon = "^0.13"
oauth1-request = "^0.6"
regex = "^1.10"
reqwest = { version = "^0.11", features = ["json", "stream", "multipart"] }
rusqlite = "^0.27"
rusqlite = { version = "^0.30", features = ["chrono"] }
serde = { version = "^1.0", features = ["derive"] }
tokio = { version = "^1.33", features = ["rt-multi-thread", "macros", "time"] }
toml = "^0.8"
bsky-sdk = "^0.1"
atrium-api = "^0.24"
[profile.release]
strip = true

View File

@@ -1,22 +1,30 @@
# oolatoocs, a Mastodon to Twitter bot
# oolatoocs, a Mastodon to Twitter/Bluesky bot
## A little bit of history
So what is it? Originally, I wrote, with some help, [Scootaloo](https://framagit.org/veretcle/scootaloo/) which was a Twitter to Mastodon Bot to help the [writers at NintendojoFR](https://www.nintendojo.fr) not to worry about Mastodon: the vast majority of writers were posting to Twitter, the bot scooped everything and arranged it properly for Mastodon and everything was fine and dandy. It was also used, in an altered beefed-up version, for [Nupes.social](https://nupes.social) to make the tweets from the NUPES political alliance on Twitter, more easily accessible in Mastodon.
So what is it? Originally, I wrote, with some help, [Scootaloo](https://framagit.org/veretcle/scootaloo/) which was a Twitter to Mastodon Bot to help the [writers at NintendojoFR](https://www.nintendojo.fr) not to worry about Mastodon: the vast majority of writers were posting to Twitter, the bot scooped everything and arranged it properly for Mastodon and everything was fine and dandy. It was also used, in an altered beefed-up version, for the (now defunct) Mastodon Instance [Nupes.social](https://nupes.social) to make the tweets from the NUPES political alliance on Twitter, more easily accessible for Mastodon users.
But then Elon came, and we couldnt read data from Twitter anymore. So we had to rely on copy/pasting things from one to another, which is not fun nor efficient.
Hence `oolatoocs`, which takes a Mastodon Timeline and reposts it to Twitter as properly as possible.
## And now…
Hence `oolatoocs`, which takes a Mastodon Timeline and reposts it to Twitter as properly as possible. And since Bluesky seems to be hype right now, it also incorporates Bluesky support since v3.
Bluesky support is mandatory for now on: you cant have Twitter or Bluesky, you must have both. I might change this behaviour in a near future, especially when I will inevitably have to drop support for Twitter. If you just want Twitter support, just stick with v2.4.x release, itll get the job done exactly as the newer version for now.
If you dont want Twitter support, open an issue and I will get motivated to comply (maybe…).
# Remarkable features
What it can do:
* Reproduces the Toot content into the Tweet;
* Cuts (poorly) the Toot in half in its too long for Twitter and thread it (this is cut using a word count, not the best method, but it gets the job done);
* Reuploads images/gifs/videos from Mastodon to Twitter
* Can reproduce threads from Mastodon to Twitter
* Can prevent a Toot from being tweeted by using the #NoTweet (case-insensitive) hashtag in Mastodon
What it cant do:
* Poll (no idea on how to do it)
* Reproduces the Toot content into the Tweet/Record;
* Cuts (poorly) the Toot in half in its too long for Twitter/Bluesky and thread it (this is cut using a word count, not the best method, but it gets the job done);
* Reuploads images/gifs/videos from Mastodon to Twitter/Bluesky
* ⚠️ Bluesky does not support mixing images and videos. You can have up to 4 images on a Bsky record **or** 1 video but not mix around. If you do so, only the video will be posted on Bluesky.
* ⚠️ Bluesky does not support images greater than 1Mb (that is 1,000,000,000 bytes or 976.6 KiB). I might incorporate soon a image quality reducer or WebP transcoding to avoid this issue.
* Can reproduce threads from Mastodon to Twitter/Bluesky
* Can reproduce poll from Mastodon to Twitter
* ⚠️ Bluesky does support polls for now. So the poll itself is just presented as text from Mastodon instead which is not the most elegant.
* Can prevent a Toot from being tweeted/recorded to Bluesky by using the #NoTweet (case-insensitive) hashtag in Mastodon
# Configuration file
@@ -24,7 +32,7 @@ The configuration is relatively easy to follow:
```toml
[oolatoocs]
db_path = "/var/lib/oolatoocs/db.sqlite3" # the path to the DB where toot/tweet are stored
db_path = "/var/lib/oolatoocs/db.sqlite3" # the path to the DB where toots/tweets/records are stored
[mastodon] # This part can be generated, see below
base = "https://m.nintendojo.fr"
@@ -38,6 +46,11 @@ consumer_key = "<REDACTED>"
consumer_secret = "<REDACTED>"
oauth_token = "<REDACTED>"
oauth_token_secret = "<REDACTED>"
[bluesky] # this is your Bsky handle and password + a writable path for the session handling
handle = "nintendojofr.bsky.social"
password = "<REDACTED>"
config_path = "/var/lib/oolatoocs/bsky.json"
```
## How to generate the Mastodon keys?
@@ -56,6 +69,10 @@ Youll need to generate a key. This is a real pain in the ass, but you can use
Will I some day make a subcommand to generate it? Maybe…
## How to generate the Bluesky part?
Youll need your handle and password. I strongly recommend a dedicated application password. Youll also need a writable path to store the Bsky session.
# How to run
First of all, the `--help`:

241
src/bsky.rs Normal file
View File

@@ -0,0 +1,241 @@
use crate::config::BlueskyConfig;
use atrium_api::{
app::bsky::feed::post::RecordData, com::atproto::repo::upload_blob::Output,
types::string::Datetime, types::string::Language,
};
use bsky_sdk::{
agent::config::{Config, FileStore},
rich_text::RichText,
BskyAgent,
};
use log::error;
use megalodon::entities::attachment::{Attachment, AttachmentType};
use regex::Regex;
use std::{error::Error, fs::exists};
/// Intermediary struct to deal with replies more easily
#[derive(Debug)]
pub struct BskyReply {
pub record_uri: String,
pub root_record_uri: String,
}
pub async fn get_session(config: &BlueskyConfig) -> Result<BskyAgent, Box<dyn Error>> {
if exists(&config.config_path)? {
let bluesky = BskyAgent::builder()
.config(Config::load(&FileStore::new(&config.config_path)).await?)
.build()
.await?;
if bluesky.api.com.atproto.server.get_session().await.is_ok() {
bluesky
.to_config()
.await
.save(&FileStore::new(&config.config_path))
.await?;
return Ok(bluesky);
}
}
let bluesky = BskyAgent::builder().build().await?;
bluesky.login(&config.handle, &config.password).await?;
bluesky
.to_config()
.await
.save(&FileStore::new(&config.config_path))
.await?;
Ok(bluesky)
}
pub async fn build_post_record(
config: &BlueskyConfig,
text: &str,
language: &Option<String>,
embed: Option<atrium_api::types::Union<atrium_api::app::bsky::feed::post::RecordEmbedRefs>>,
reply_to: &Option<BskyReply>,
) -> Result<RecordData, Box<dyn Error>> {
let mut rt = RichText::new_with_detect_facets(text).await?;
let insert_chars = "";
let re = Regex::new(r#"(https?://)(www\.)?(\S{1,26})(\S*)"#).unwrap();
while let Some(found) = re.captures(&rt.text.clone()) {
if let Some(group) = found.get(4) {
if !group.is_empty() {
rt.insert(group.start(), insert_chars);
rt.delete(
group.start() + insert_chars.len(),
group.start() + insert_chars.len() + group.len(),
);
}
}
if let Some(group) = found.get(1) {
let www: usize = found.get(2).map_or(0, |x| x.len());
rt.delete(group.start(), group.start() + www + group.len());
}
}
let langs = language.clone().map(|s| vec![Language::new(s).unwrap()]);
let reply = if let Some(x) = reply_to {
let root_record = get_record(&config.handle, &rkey(&x.root_record_uri)).await?;
let parent_record = get_record(&config.handle, &rkey(&x.record_uri)).await?;
Some(
atrium_api::app::bsky::feed::post::ReplyRefData {
parent: atrium_api::com::atproto::repo::strong_ref::MainData {
cid: parent_record.data.cid.unwrap(),
uri: parent_record.data.uri.to_owned(),
}
.into(),
root: atrium_api::com::atproto::repo::strong_ref::MainData {
cid: root_record.data.cid.unwrap(),
uri: root_record.data.uri.to_owned(),
}
.into(),
}
.into(),
)
} else {
None
};
Ok(RecordData {
created_at: Datetime::now(),
embed,
entities: None,
facets: rt.facets,
labels: None,
langs,
reply,
tags: None,
text: rt.text,
})
}
async fn get_record(
config: &str,
rkey: &str,
) -> Result<
atrium_api::types::Object<atrium_api::com::atproto::repo::get_record::OutputData>,
Box<dyn Error>,
> {
let bsky = BskyAgent::builder().build().await?;
let record = bsky
.api
.com
.atproto
.repo
.get_record(
atrium_api::com::atproto::repo::get_record::ParametersData {
cid: None,
collection: atrium_api::types::string::Nsid::new("app.bsky.feed.post".to_string())?,
repo: atrium_api::types::string::Handle::new(config.to_string())?.into(),
rkey: rkey.to_string(),
}
.into(),
)
.await?;
Ok(record)
}
// its ugly af but it gets the job done for now
pub async fn generate_media_records(
bsky: &BskyAgent,
media_attach: &[Attachment],
) -> Option<atrium_api::types::Union<atrium_api::app::bsky::feed::post::RecordEmbedRefs>> {
let mut embed: Option<
atrium_api::types::Union<atrium_api::app::bsky::feed::post::RecordEmbedRefs>,
> = None;
let mut images = Vec::new();
let mut videos: Vec<atrium_api::app::bsky::embed::video::MainData> = Vec::new();
for media in media_attach.iter() {
let blob = upload_media(bsky, &media.url).await.unwrap();
match media.r#type {
AttachmentType::Image => {
images.push(
atrium_api::app::bsky::embed::images::ImageData {
alt: media
.description
.clone()
.map_or("".to_string(), |v| v.to_owned()),
aspect_ratio: None,
image: blob.data.blob,
}
.into(),
);
}
AttachmentType::Gifv | AttachmentType::Video => {
videos.push(atrium_api::app::bsky::embed::video::MainData {
alt: media.description.clone(),
aspect_ratio: None,
captions: None,
video: blob.data.blob,
});
}
_ => {
error!("Not an image, not a video, what happened here?");
}
}
}
if !images.is_empty() {
embed = Some(atrium_api::types::Union::Refs(
atrium_api::app::bsky::feed::post::RecordEmbedRefs::AppBskyEmbedImagesMain(Box::new(
atrium_api::app::bsky::embed::images::MainData { images }.into(),
)),
));
}
// if a video has been uploaded, it takes priority as you can only have 1 video per post
if !videos.is_empty() {
embed = Some(atrium_api::types::Union::Refs(
atrium_api::app::bsky::feed::post::RecordEmbedRefs::AppBskyEmbedVideoMain(Box::new(
videos[0].clone().into(),
)),
))
}
embed
}
async fn upload_media(bsky: &BskyAgent, u: &str) -> Result<Output, Box<dyn Error>> {
let dl = reqwest::get(u).await?;
let bytes = dl.bytes().await?;
let record = bsky.api.com.atproto.repo.upload_blob(bytes.into()).await?;
Ok(record)
}
fn rkey(record_id: &str) -> String {
record_id.split('/').nth(4).unwrap().to_string()
}
#[cfg(test)]
mod tests {
use super::*;
#[tokio::test]
async fn test_build_post_record() {
let text = "@factornews@piaille.fr Retrouvez-nous ici https://www.nintendojo.fr/articles/editos/le-mod-renovation-de-8bitdo-pour-manette-n64 et là https://www.nintendojo.fr/articles/analyses/vite-vu/vite-vu-morbid-the-lords-of-ire et un lien très court http://vsl.ie/TaMere et un autre https://p.nintendojo.fr/w/kV3CBbKKt1nPEChHhZiNve + http://www.xxx.com + https://www.youtube.com/watch?v=dQw4w9WgXcQ&pp=ygUJcmljayByb2xs";
let expected_text = "@factornews@piaille.fr Retrouvez-nous ici nintendojo.fr/articles/edi… et là nintendojo.fr/articles/ana… et un lien très court vsl.ie/TaMere et un autre p.nintendojo.fr/w/kV3CBbKK… + xxx.com + youtube.com/watch?v=dQw4w9…";
let bsky_conf = BlueskyConfig {
handle: "tamerelol.bsky.social".to_string(),
password: "dtc".to_string(),
config_path: "nope".to_string(),
};
let created_record_data = build_post_record(&bsky_conf, text, &None, None, &None)
.await
.unwrap();
assert_eq!(expected_text, &created_record_data.text);
}
}

View File

@@ -6,6 +6,7 @@ pub struct Config {
pub oolatoocs: OolatoocsConfig,
pub mastodon: MastodonConfig,
pub twitter: TwitterConfig,
pub bluesky: BlueskyConfig,
}
#[derive(Debug, Deserialize, Clone)]
@@ -30,6 +31,13 @@ pub struct MastodonConfig {
pub token: String,
}
#[derive(Debug, Deserialize)]
pub struct BlueskyConfig {
pub handle: String,
pub password: String,
pub config_path: String,
}
/// parses TOML file into Config struct
pub fn parse_toml(toml_file: &str) -> Config {
let toml_config =

View File

@@ -1,3 +1,5 @@
use log::debug;
mod error;
pub use error::OolatoocsError;
@@ -5,20 +7,21 @@ mod config;
pub use config::{parse_toml, Config};
mod state;
pub use state::init_db;
#[allow(unused_imports)]
use state::{read_state, write_state, TweetToToot};
use state::{delete_state, read_all_state, read_state, write_state, TootTweetRecord};
pub use state::{init_db, migrate_db};
mod mastodon;
use mastodon::get_mastodon_timeline_since;
pub use mastodon::register;
use mastodon::{get_mastodon_instance, get_mastodon_timeline_since, get_status_edited_at};
mod utils;
use utils::{generate_multi_tweets, strip_everything};
mod twitter;
#[allow(unused_imports)]
use twitter::{generate_media_ids, post_tweet};
use twitter::{delete_tweet, generate_media_ids, post_tweet, transform_poll};
mod bsky;
use bsky::{build_post_record, generate_media_records, get_session, BskyReply};
use rusqlite::Connection;
@@ -27,11 +30,61 @@ pub async fn run(config: &Config) {
let conn = Connection::open(&config.oolatoocs.db_path)
.unwrap_or_else(|e| panic!("Cannot open DB: {}", e));
let last_toot_id = read_state(&conn, None)
.unwrap_or_else(|e| panic!("Cannot get last toot id: {}", e))
.map(|r| r.toot_id);
let mastodon = get_mastodon_instance(&config.mastodon);
let timeline = get_mastodon_timeline_since(&config.mastodon, last_toot_id)
let bluesky = get_session(&config.bluesky)
.await
.unwrap_or_else(|e| panic!("Cannot get Bsky session: {}", e));
let last_entry =
read_state(&conn, None).unwrap_or_else(|e| panic!("Cannot get last toot id: {}", e));
let last_toot_id: Option<u64> = match last_entry {
None => None, // Does not exist, this is the same as previously
Some(t) => {
match get_status_edited_at(&mastodon, t.toot_id).await {
None => Some(t.toot_id),
Some(d) => {
// a date has been found
if d > t.datetime.unwrap() {
debug!("Last toot date is posterior to the previously written tweet, deleting…");
let (local_tweet_ids, local_record_uris) = read_all_state(&conn, t.toot_id)
.unwrap_or_else(|e| {
panic!(
"Cannot fetch all tweets associated with Toot ID {}: {}",
t.toot_id, e
)
});
for local_tweet_id in local_tweet_ids.into_iter() {
delete_tweet(&config.twitter, local_tweet_id)
.await
.unwrap_or_else(|e| {
panic!("Cannot delete Tweet ID ({}): {}", t.tweet_id, e)
});
}
for local_record_uri in local_record_uris.into_iter() {
bluesky
.delete_record(&local_record_uri)
.await
.unwrap_or_else(|e| {
panic!("Cannot delete record ID ({}): {}", &t.record_uri, e)
});
}
delete_state(&conn, t.toot_id).unwrap_or_else(|e| {
panic!("Cannot delete Toot ID ({}): {}", t.toot_id, e)
});
read_state(&conn, None)
.unwrap_or_else(|e| panic!("Cannot get last toot id: {}", e))
.map(|a| a.toot_id)
} else {
Some(t.toot_id)
}
}
}
}
};
let timeline = get_mastodon_timeline_since(&mastodon, last_toot_id)
.await
.unwrap_or_else(|e| panic!("Cannot get instance: {}", e));
@@ -47,36 +100,133 @@ pub async fn run(config: &Config) {
};
// threads if necessary
let mut reply_to = toot.in_reply_to_id.and_then(|t| {
read_state(&conn, Some(t.parse::<u64>().unwrap()))
.ok()
.flatten()
.map(|s| s.tweet_id)
});
let (mut tweet_reply_to, mut record_reply_to) = toot
.in_reply_to_id
.and_then(|t| {
read_state(&conn, Some(t.parse::<u64>().unwrap()))
.ok()
.flatten()
.map(|s| {
(
s.tweet_id,
BskyReply {
record_uri: s.record_uri.to_owned(),
root_record_uri: s.root_record_uri.to_owned(),
},
)
})
})
.unzip();
// if the toot is too long, we cut it in half here
if let Some((first_half, second_half)) = generate_multi_tweets(&tweet_content) {
tweet_content = second_half;
let reply_id = post_tweet(&config.twitter, &first_half, &[], &reply_to)
.await
.unwrap_or_else(|e| panic!("Cannot post the first half of {}: {}", &toot.id, e));
reply_to = Some(reply_id);
// post the first half
let tweet_reply_id =
post_tweet(&config.twitter, &first_half, vec![], tweet_reply_to, None)
.await
.unwrap_or_else(|e| {
panic!(
"Cannot post the first half of {} for Twitter: {}",
&toot.id, e
)
});
let record = build_post_record(
&config.bluesky,
&first_half,
&toot.language,
None,
&record_reply_to,
)
.await
.unwrap_or_else(|e| panic!("Cannot create valid record for {}: {}", &toot.id, e));
let record_reply_id = bluesky.create_record(record).await.unwrap_or_else(|e| {
panic!(
"Cannot post the first half of {} for Bluesky: {}",
&toot.id, e
)
});
// write it to db
write_state(
&conn,
TootTweetRecord {
toot_id: toot.id.parse::<u64>().unwrap(),
tweet_id: tweet_reply_id,
record_uri: record_reply_id.data.uri.to_owned(),
root_record_uri: record_reply_to
.as_ref()
.map_or(record_reply_id.data.uri.to_owned(), |v| {
v.root_record_uri.to_owned()
}),
datetime: None,
},
)
.unwrap_or_else(|e| {
panic!(
"Cannot store Toot/Tweet/Record ({}/{}/{}): {}",
&toot.id, tweet_reply_id, &record_reply_id.data.uri, e
)
});
record_reply_to = Some(BskyReply {
record_uri: record_reply_id.data.uri.to_owned(),
root_record_uri: record_reply_to
.as_ref()
.map_or(record_reply_id.data.uri.clone(), |v| {
v.root_record_uri.clone()
}),
});
tweet_reply_to = Some(tweet_reply_id);
};
// treats poll if any
let in_poll = toot.poll.map(|p| transform_poll(&p));
// treats medias
let medias = generate_media_ids(&config.twitter, &toot.media_attachments).await;
let record_medias = generate_media_records(&bluesky, &toot.media_attachments).await;
let tweet_medias = generate_media_ids(&config.twitter, &toot.media_attachments).await;
// posts corresponding tweet
let tweet_id = post_tweet(&config.twitter, &tweet_content, &medias, &reply_to)
let tweet_id = post_tweet(
&config.twitter,
&tweet_content,
tweet_medias,
tweet_reply_to,
in_poll,
)
.await
.unwrap_or_else(|e| panic!("Cannot Tweet {}: {}", toot.id, e));
let record = build_post_record(
&config.bluesky,
&tweet_content,
&toot.language,
record_medias,
&record_reply_to,
)
.await
.unwrap_or_else(|e| panic!("Cannot build record for {}: {}", &toot.id, e));
let created_record = bluesky
.create_record(record)
.await
.unwrap_or_else(|e| panic!("Cannot Tweet {}: {}", toot.id, e));
.unwrap_or_else(|e| panic!("Cannot put record {}: {}", &toot.id, e));
// writes the current state of the tweet
write_state(
&conn,
TweetToToot {
tweet_id,
TootTweetRecord {
toot_id: toot.id.parse::<u64>().unwrap(),
tweet_id,
record_uri: created_record.data.uri.clone(),
root_record_uri: record_reply_to
.as_ref()
.map_or(created_record.data.uri.clone(), |v| {
v.root_record_uri.clone()
}),
datetime: None,
},
)
.unwrap_or_else(|e| panic!("Cannot store Toot/Tweet ({}/{}): {}", &toot.id, tweet_id, e));

View File

@@ -49,6 +49,21 @@ fn main() {
.display_order(1),
),
)
.subcommand(
Command::new("migrate")
.version(env!("CARGO_PKG_VERSION"))
.about("Command to register to Mastodon Instance")
.arg(
Arg::new("config")
.short('c')
.long("config")
.value_name("CONFIG_FILE")
.help(format!("TOML config file for {}", env!("CARGO_PKG_NAME")))
.num_args(1)
.default_value(DEFAULT_CONFIG_PATH)
.display_order(1),
),
)
.get_matches();
env_logger::init();
@@ -63,6 +78,11 @@ fn main() {
register(sub_m.get_one::<String>("host").unwrap());
return;
}
Some(("migrate", sub_m)) => {
let config = parse_toml(sub_m.get_one::<String>("config").unwrap());
migrate_db(&config.oolatoocs.db_path).unwrap();
return;
}
_ => (),
}

View File

@@ -1,4 +1,5 @@
use crate::config::MastodonConfig;
use chrono::{DateTime, Utc};
use megalodon::{
entities::{Status, StatusVisibility},
generator,
@@ -10,16 +11,29 @@ use megalodon::{
use std::error::Error;
use std::io::stdin;
pub async fn get_mastodon_timeline_since(
config: &MastodonConfig,
id: Option<u64>,
) -> Result<Vec<Status>, Box<dyn Error>> {
let mastodon = Mastodon::new(
/// Get Mastodon Object instance
pub fn get_mastodon_instance(config: &MastodonConfig) -> Mastodon {
Mastodon::new(
config.base.to_string(),
Some(config.token.to_string()),
None,
);
)
}
/// Get the edited_at field from the specified toot
pub async fn get_status_edited_at(mastodon: &Mastodon, t: u64) -> Option<DateTime<Utc>> {
mastodon
.get_status(t.to_string())
.await
.ok()
.and_then(|t| t.json.edited_at)
}
/// Get the home timeline since the last toot
pub async fn get_mastodon_timeline_since(
mastodon: &Mastodon,
id: Option<u64>,
) -> Result<Vec<Status>, Box<dyn Error>> {
let input_options = GetHomeTimelineInputOptions {
only_media: Some(false),
limit: None,

View File

@@ -1,12 +1,51 @@
use chrono::{DateTime, Utc};
use log::debug;
use rusqlite::{params, Connection, OptionalExtension};
use std::error::Error;
/// Struct for each query line
#[derive(Debug)]
pub struct TweetToToot {
pub tweet_id: u64,
pub struct TootTweetRecord {
// Mastodon part
pub toot_id: u64,
// Twitter part
pub tweet_id: u64,
// Bluesky part
pub record_uri: String,
pub root_record_uri: String,
pub datetime: Option<DateTime<Utc>>,
}
/// Deletes a given state
pub fn delete_state(conn: &Connection, toot_id: u64) -> Result<(), Box<dyn Error>> {
debug!("Deleting Toot ID {}", toot_id);
conn.execute(
&format!("DELETE FROM toot_tweet_record WHERE toot_id = {}", toot_id),
[],
)?;
Ok(())
}
/// Retrieves all tweets associated to a toot in the form of a vector
pub fn read_all_state(
conn: &Connection,
toot_id: u64,
) -> Result<(Vec<u64>, Vec<String>), Box<dyn Error>> {
let query = format!(
"SELECT tweet_id, record_uri FROM toot_tweet_record WHERE toot_id = {};",
toot_id
);
let mut stmt = conn.prepare(&query)?;
let mut rows = stmt.query([])?;
let mut tweet_v: Vec<u64> = Vec::new();
let mut record_v: Vec<String> = Vec::new();
while let Some(row) = rows.next()? {
tweet_v.push(row.get(0)?);
record_v.push(row.get(1)?);
}
Ok((tweet_v, record_v))
}
/// if None is passed, read the last tweet from DB
@@ -14,20 +53,26 @@ pub struct TweetToToot {
pub fn read_state(
conn: &Connection,
s: Option<u64>,
) -> Result<Option<TweetToToot>, Box<dyn Error>> {
) -> Result<Option<TootTweetRecord>, Box<dyn Error>> {
debug!("Reading toot_id {:?}", s);
let begin_query = "SELECT *, UNIXEPOCH(datetime) AS unix_datetime FROM toot_tweet_record";
let query: String = match s {
Some(i) => format!("SELECT * FROM tweet_to_toot WHERE toot_id = {i}"),
None => "SELECT * FROM tweet_to_toot ORDER BY toot_id DESC LIMIT 1".to_string(),
Some(i) => format!("{begin_query} WHERE toot_id = {i} ORDER BY tweet_id DESC LIMIT 1"),
None => format!("{begin_query} ORDER BY toot_id DESC LIMIT 1"),
};
let mut stmt = conn.prepare(&query)?;
let t = stmt
.query_row([], |row| {
Ok(TweetToToot {
tweet_id: row.get("tweet_id")?,
Ok(TootTweetRecord {
toot_id: row.get("toot_id")?,
tweet_id: row.get("tweet_id")?,
record_uri: row.get("record_uri")?,
root_record_uri: row.get("root_record_uri")?,
datetime: Some(
DateTime::from_timestamp(row.get("unix_datetime").unwrap(), 0).unwrap(),
),
})
})
.optional()?;
@@ -36,11 +81,11 @@ pub fn read_state(
}
/// Writes last treated tweet id and toot id to the db
pub fn write_state(conn: &Connection, t: TweetToToot) -> Result<(), Box<dyn Error>> {
pub fn write_state(conn: &Connection, t: TootTweetRecord) -> Result<(), Box<dyn Error>> {
debug!("Write struct {:?}", t);
conn.execute(
"INSERT INTO tweet_to_toot (tweet_id, toot_id) VALUES (?1, ?2)",
params![t.tweet_id, t.toot_id],
"INSERT INTO toot_tweet_record (toot_id, tweet_id, record_uri, root_record_uri) VALUES (?1, ?2, ?3, ?4)",
params![t.toot_id, t.tweet_id, t.record_uri, t.root_record_uri],
)?;
Ok(())
@@ -55,9 +100,12 @@ pub fn init_db(d: &str) -> Result<(), Box<dyn Error>> {
let conn = Connection::open(d)?;
conn.execute(
"CREATE TABLE IF NOT EXISTS tweet_to_toot (
tweet_id INTEGER,
toot_id INTEGER PRIMARY KEY
"CREATE TABLE IF NOT EXISTS toot_tweet_record (
toot_id INTEGER,
tweet_id INTEGER PRIMARY KEY,
record_uri VARCHAR(128) DEFAULT '',
root_record_uri VARCHAR(128) DEFAULT '',
datetime INTEGER DEFAULT CURRENT_TIMESTAMP
)",
[],
)?;
@@ -65,6 +113,55 @@ pub fn init_db(d: &str) -> Result<(), Box<dyn Error>> {
Ok(())
}
/// Migrate DB from 1.6+ to 3+
pub fn migrate_db(d: &str) -> Result<(), Box<dyn Error>> {
debug!("Migration DB for Oolatoocs");
let conn = Connection::open(d)?;
let res = conn.execute("SELECT datetime FROM toot_tweet_record;", []);
// If the column can be selected then, its OK
// if not, see if the error is a missing column and add it
match res {
Err(e) => match e.to_string().as_str() {
"no such table: toot_tweet_record" => migrate_db_alter_table(&conn), // table does not exist
"Execute returned results - did you mean to call query?" => Ok(()), // return results,
// column does
// exist
_ => Err(e.into()),
},
Ok(_) => Ok(()),
}
}
/// Creates a new table, copy the data from the old table and rename it
fn migrate_db_alter_table(c: &Connection) -> Result<(), Box<dyn Error>> {
// create the new table
c.execute(
"CREATE TABLE IF NOT EXISTS toot_tweet_record (
toot_id INTEGER,
tweet_id INTEGER PRIMARY KEY,
record_uri VARCHAR(128) DEFAULT '',
root_record_uri VARCHAR(128) DEFAULT '',
datetime INTEGER DEFAULT CURRENT_TIMESTAMP
)",
[],
)?;
// copy data from the old table
c.execute(
"INSERT INTO toot_tweet_record (toot_id, tweet_id, datetime)
SELECT toot_id, tweet_id, datetime FROM tweet_to_toot;",
[],
)?;
// drop the old table
c.execute("DROP TABLE IF EXISTS tweet_to_toot;", [])?;
Ok(())
}
#[cfg(test)]
mod tests {
use super::*;
@@ -81,7 +178,8 @@ mod tests {
// open said file
let conn = Connection::open(d).unwrap();
conn.execute("SELECT * from tweet_to_toot;", []).unwrap();
conn.execute("SELECT * from toot_tweet_record;", [])
.unwrap();
remove_file(d).unwrap();
}
@@ -96,7 +194,7 @@ mod tests {
let conn = Connection::open(d).unwrap();
conn.execute(
"INSERT INTO tweet_to_toot (tweet_id, toot_id)
"INSERT INTO toot_tweet_record (tweet_id, toot_id)
VALUES
(100, 1001);",
[],
@@ -116,26 +214,38 @@ mod tests {
let conn = Connection::open(d).unwrap();
let t_in = TweetToToot {
tweet_id: 123456789,
let t_in = TootTweetRecord {
toot_id: 987654321,
tweet_id: 123456789,
record_uri: "a".to_string(),
root_record_uri: "c".to_string(),
datetime: None,
};
write_state(&conn, t_in).unwrap();
let mut stmt = conn.prepare("SELECT * FROM tweet_to_toot;").unwrap();
let mut stmt = conn
.prepare("SELECT *, UNIXEPOCH(datetime) AS unix_datetime FROM toot_tweet_record;")
.unwrap();
let t_out = stmt
.query_row([], |row| {
Ok(TweetToToot {
tweet_id: row.get("tweet_id").unwrap(),
Ok(TootTweetRecord {
toot_id: row.get("toot_id").unwrap(),
tweet_id: row.get("tweet_id").unwrap(),
record_uri: row.get("record_uri").unwrap(),
root_record_uri: row.get("root_record_uri").unwrap(),
datetime: Some(
DateTime::from_timestamp(row.get("unix_datetime").unwrap(), 0).unwrap(),
),
})
})
.unwrap();
assert_eq!(t_out.tweet_id, 123456789);
assert_eq!(t_out.toot_id, 987654321);
assert_eq!(t_out.tweet_id, 123456789);
assert_eq!(t_out.record_uri, "a".to_string());
assert_eq!(t_out.root_record_uri, "c".to_string());
remove_file(d).unwrap();
}
@@ -149,10 +259,10 @@ mod tests {
let conn = Connection::open(d).unwrap();
conn.execute(
"INSERT INTO tweet_to_toot (tweet_id, toot_id)
"INSERT INTO toot_tweet_record (toot_id, tweet_id, record_uri)
VALUES
(101, 1001),
(102, 1002);",
(101, 1001, 'abc'),
(102, 1002, 'def');",
[],
)
.unwrap();
@@ -161,8 +271,9 @@ mod tests {
remove_file(d).unwrap();
assert_eq!(t_out.tweet_id, 102);
assert_eq!(t_out.toot_id, 1002);
assert_eq!(t_out.toot_id, 102);
assert_eq!(t_out.tweet_id, 1002);
assert_eq!(t_out.record_uri, "def".to_string());
}
#[test]
@@ -189,9 +300,9 @@ mod tests {
let conn = Connection::open(d).unwrap();
conn.execute(
"INSERT INTO tweet_to_toot (tweet_id, toot_id)
"INSERT INTO toot_tweet_record (toot_id, tweet_id, record_uri)
VALUES
(100, 1000);",
(100, 1000, 'abc');",
[],
)
.unwrap();
@@ -212,9 +323,35 @@ mod tests {
let conn = Connection::open(d).unwrap();
conn.execute(
"INSERT INTO tweet_to_toot (tweet_id, toot_id)
"INSERT INTO toot_tweet_record (toot_id, tweet_id, record_uri)
VALUES
(100, 1000);",
(100, 1000, 'abc');",
[],
)
.unwrap();
let t_out = read_state(&conn, Some(100)).unwrap().unwrap();
remove_file(d).unwrap();
assert_eq!(t_out.toot_id, 100);
assert_eq!(t_out.tweet_id, 1000);
assert_eq!(t_out.record_uri, "abc".to_string());
}
#[test]
fn test_last_toot_id_read_state() {
let d = "/tmp/test_last_toot_id_read_state.sqlite";
init_db(d).unwrap();
let conn = Connection::open(d).unwrap();
conn.execute(
"INSERT INTO toot_tweet_record (toot_id, tweet_id, record_uri)
VALUES
(1000, 100, 'abc'),
(1000, 101, 'def');",
[],
)
.unwrap();
@@ -223,7 +360,131 @@ mod tests {
remove_file(d).unwrap();
assert_eq!(t_out.tweet_id, 100);
assert_eq!(t_out.toot_id, 1000);
assert_eq!(t_out.tweet_id, 101);
assert_eq!(t_out.record_uri, "def".to_string());
}
#[test]
fn test_migrate_db() {
// this should be idempotent
let d = "/tmp/test_migrate_db.sqlite";
let conn = Connection::open(d).unwrap();
conn.execute(
"CREATE TABLE IF NOT EXISTS tweet_to_toot (
tweet_id INTEGER,
toot_id INTEGER PRIMARY KEY,
datetime INTEGER DEFAULT CURRENT_TIMESTAMP
)",
[],
)
.unwrap();
conn.execute(
"INSERT INTO tweet_to_toot (tweet_id, toot_id) VALUES (0, 0), (1, 1);",
[],
)
.unwrap();
migrate_db(d).unwrap();
let last_state = read_state(&conn, None).unwrap().unwrap();
assert_eq!(last_state.tweet_id, 1);
assert_eq!(last_state.toot_id, 1);
migrate_db(d).unwrap(); // shouldnt do anything
remove_file(d).unwrap();
}
#[test]
fn test_delete_state() {
let d = "/tmp/test_delete_state.sqlite";
init_db(d).unwrap();
let conn = Connection::open(d).unwrap();
conn.execute(
"INSERT INTO toot_tweet_record(toot_id, tweet_id, record_uri) VALUES (0, 0, 'abc');",
[],
)
.unwrap();
delete_state(&conn, 0).unwrap();
let mut stmt = conn
.prepare("SELECT *, UNIXEPOCH(datetime) AS unix_datetime FROM toot_tweet_record;")
.unwrap();
let t_out = stmt.query_row([], |row| {
Ok(TootTweetRecord {
toot_id: row.get("toot_id").unwrap(),
tweet_id: row.get("tweet_id").unwrap(),
record_uri: row.get("record_uri").unwrap(),
root_record_uri: row.get("root_record_uri").unwrap(),
datetime: Some(
DateTime::from_timestamp(row.get("unix_datetime").unwrap(), 0).unwrap(),
),
})
});
assert!(t_out.is_err_and(|x| x == rusqlite::Error::QueryReturnedNoRows));
conn.execute(
"INSERT INTO toot_tweet_record(toot_id, tweet_id, record_uri) VALUES(42, 102, 'abc'), (42, 103, 'def');",
[],
)
.unwrap();
delete_state(&conn, 42).unwrap();
let mut stmt = conn
.prepare("SELECT *, UNIXEPOCH(datetime) AS unix_datetime FROM toot_tweet_record;")
.unwrap();
let t_out = stmt.query_row([], |row| {
Ok(TootTweetRecord {
toot_id: row.get("toot_id").unwrap(),
tweet_id: row.get("tweet_id").unwrap(),
record_uri: row.get("record_uri").unwrap(),
root_record_uri: row.get("root_record_uri").unwrap(),
datetime: Some(
DateTime::from_timestamp(row.get("unix_datetime").unwrap(), 0).unwrap(),
),
})
});
assert!(t_out.is_err_and(|x| x == rusqlite::Error::QueryReturnedNoRows));
remove_file(d).unwrap();
}
#[test]
fn test_read_all_state() {
let d = "/tmp/read_all_state.sqlite";
init_db(d).unwrap();
let conn = Connection::open(d).unwrap();
conn.execute(
"INSERT INTO toot_tweet_record (toot_id, tweet_id, record_uri) VALUES (42, 102, 'abc'), (42, 103, 'def'), (43, 105, 'ghi');",
[],
)
.unwrap();
let (tweet_v1, record_v1) = read_all_state(&conn, 43).unwrap();
let (tweet_v2, record_v2) = read_all_state(&conn, 42).unwrap();
assert_eq!(tweet_v1, vec![105]);
assert_eq!(tweet_v2, vec![102, 103]);
assert_eq!(record_v1, vec!["ghi".to_string()]);
assert_eq!(record_v2, vec!["abc".to_string(), "def".to_string()]);
remove_file(d).unwrap();
}
}

View File

@@ -1,8 +1,12 @@
use crate::config::TwitterConfig;
use crate::error::OolatoocsError;
use chrono::Utc;
use futures::{stream, StreamExt};
use log::{debug, error, warn};
use megalodon::entities::attachment::{Attachment, AttachmentType};
use megalodon::entities::{
attachment::{Attachment, AttachmentType},
Poll,
};
use oauth1_request::Token;
use reqwest::{
multipart::{Form, Part},
@@ -28,6 +32,8 @@ struct Tweet {
media: Option<TweetMediasIds>,
#[serde(skip_serializing_if = "Option::is_none")]
reply: Option<TweetReply>,
#[serde(skip_serializing_if = "Option::is_none")]
poll: Option<TweetPoll>,
}
#[derive(Serialize, Debug)]
@@ -40,6 +46,12 @@ struct TweetReply {
in_reply_to_tweet_id: String,
}
#[derive(Serialize, Debug)]
pub struct TweetPoll {
pub options: Vec<String>,
pub duration_minutes: u16,
}
#[derive(Deserialize, Debug)]
struct TweetResponse {
data: TweetResponseData,
@@ -101,6 +113,36 @@ fn get_token(config: &TwitterConfig) -> Token {
)
}
/// This functions deletes a tweet, given its id
pub async fn delete_tweet(config: &TwitterConfig, id: u64) -> Result<(), Box<dyn Error>> {
debug!("Deleting Tweet {}", id);
let empty_request = EmptyRequest {}; // Why? Because fuck you, thats why!
let token = get_token(config);
let delete_uri = format!("{}/{}", TWITTER_API_TWEET_URL, id);
let client = Client::new();
let res = client
.delete(&delete_uri)
.header(
"Authorization",
oauth1_request::delete(
&delete_uri,
&empty_request,
&token,
oauth1_request::HMAC_SHA1,
),
)
.send()
.await?;
if !res.status().is_success() {
return Err(OolatoocsError::new(&format!("Cannot delete Tweet {}", id)).into());
}
Ok(())
}
/// This function generates a media_ids vec to be used by Twitter
pub async fn generate_media_ids(config: &TwitterConfig, media_attach: &[Attachment]) -> Vec<u64> {
let mut medias: Vec<u64> = vec![];
@@ -416,12 +458,29 @@ async fn upload_chunk_media(
Ok(orig_media_id.media_id)
}
pub fn transform_poll(p: &Poll) -> TweetPoll {
let poll_end_datetime = p.expires_at.unwrap(); // should be safe at this point
let now = Utc::now();
let diff = poll_end_datetime.signed_duration_since(now);
TweetPoll {
options: p
.options
.iter()
.map(|i| i.title.chars().take(25).collect::<String>())
.collect(),
duration_minutes: diff.num_minutes().try_into().unwrap(), // safe here, number is positive
// and cant be over 21600
}
}
/// This posts Tweets with all the associated medias
pub async fn post_tweet(
config: &TwitterConfig,
content: &str,
medias: &[u64],
reply_to: &Option<u64>,
medias: Vec<u64>,
reply_to: Option<u64>,
poll: Option<TweetPoll>,
) -> Result<u64, Box<dyn Error>> {
let empty_request = EmptyRequest {}; // Why? Because fuck you, thats why!
let token = get_token(config);
@@ -434,6 +493,7 @@ pub async fn post_tweet(
reply: reply_to.map(|s| TweetReply {
in_reply_to_tweet_id: s.to_string(),
}),
poll,
};
let client = Client::new();
@@ -456,3 +516,41 @@ pub async fn post_tweet(
Ok(res.data.id.parse::<u64>().unwrap())
}
#[cfg(test)]
mod tests {
use super::*;
use megalodon::entities::PollOption;
#[test]
fn test_transform_poll() {
let poll = Poll {
id: "youpi".to_string(),
expires_at: Some(Utc::now()),
expired: false,
multiple: false,
votes_count: 0,
voters_count: None,
options: vec![
PollOption {
title: "Je suis beaucoup trop long comme option, tronque-moi !".to_string(),
votes_count: None,
},
PollOption {
title: "nope".to_string(),
votes_count: None,
},
],
voted: None,
emojis: vec![],
};
let tweet_poll_res = transform_poll(&poll);
let tweet_pool_expected = TweetPoll {
duration_minutes: 0,
options: vec!["Je suis beaucoup trop lon".to_string(), "nope".to_string()],
};
assert_eq!(tweet_poll_res.options, tweet_pool_expected.options);
}
}

View File

@@ -33,7 +33,7 @@ pub fn generate_multi_tweets(content: &str) -> Option<(String, String)> {
fn twitter_count(content: &str) -> usize {
let mut count = 0;
let split_content = content.split(' ');
let split_content = content.split(&[' ', '\n']);
count += split_content.clone().count() - 1; // count the spaces
for word in split_content {
@@ -105,6 +105,14 @@ mod tests {
let content = "this is the link https://www.google.com/tamerelol/youpi/tonperemdr/tarace.html if you like! What if I shit a final";
assert_eq!(twitter_count(content), 76);
let content = "multi ple space";
assert_eq!(twitter_count(content), content.chars().count());
let content = "This link is LEEEEET\n\nhttps://www.factornews.com/actualites/ca-sent-le-sapin-pour-free-radical-design-49985.html";
assert_eq!(twitter_count(content), 45);
}
#[test]