add: full initial

This commit is contained in:
trisua 2025-08-21 00:30:58 -04:00
parent f5c663495d
commit d06bc5e726
29 changed files with 592 additions and 1928 deletions

View file

@ -0,0 +1,19 @@
[package]
name = "buckets-core"
description = "Buckets media upload types"
version = "1.0.1"
edition = "2024"
readme = "../../README.md"
authors.workspace = true
repository.workspace = true
license.workspace = true
homepage.workspace = true
[dependencies]
tetratto-core = "15.0.2"
tetratto-shared = "12.0.6"
pathbufd = "0.1.4"
serde = { version = "1.0.219", features = ["derive"] }
serde_json = "1.0.142"
toml = "0.9.4"
oiseau = { version = "0.1.2", default-features = false, features = ["postgres", "redis",] }

View file

@ -0,0 +1,59 @@
use oiseau::config::{Configuration, DatabaseConfig};
use pathbufd::PathBufD;
use serde::{Deserialize, Serialize};
#[derive(Clone, Debug, Serialize, Deserialize)]
pub struct Config {
/// The directory files are stored in (relative to cwd).
#[serde(default = "default_directory")]
pub directory: String,
/// Database configuration.
#[serde(default = "default_database")]
pub database: DatabaseConfig,
}
fn default_directory() -> String {
"buckets".to_string()
}
fn default_database() -> DatabaseConfig {
DatabaseConfig::default()
}
impl Configuration for Config {
fn db_config(&self) -> DatabaseConfig {
self.database.to_owned()
}
}
impl Default for Config {
fn default() -> Self {
Self {
directory: default_directory(),
database: default_database(),
}
}
}
impl Config {
/// Read the configuration file.
pub fn read() -> Self {
toml::from_str(
&match std::fs::read_to_string(PathBufD::current().join("app.toml")) {
Ok(x) => x,
Err(_) => {
let x = Config::default();
std::fs::write(
PathBufD::current().join("app.toml"),
&toml::to_string_pretty(&x).expect("failed to serialize config"),
)
.expect("failed to write config");
return x;
}
},
)
.expect("failed to deserialize config")
}
}

View file

@ -0,0 +1,28 @@
mod sql;
mod uploads;
use crate::config::Config;
use oiseau::{execute, postgres::DataManager as OiseauManager, postgres::Result as PgResult};
use tetratto_core::model::{Error, Result};
#[derive(Clone)]
pub struct DataManager(pub OiseauManager<Config>);
impl DataManager {
/// Create a new [`DataManager`].
pub async fn new(config: Config) -> PgResult<Self> {
Ok(Self(OiseauManager::new(config).await?))
}
/// Initialize tables.
pub async fn init(&self) -> Result<()> {
let conn = match self.0.connect().await {
Ok(c) => c,
Err(e) => return Err(Error::DatabaseConnection(e.to_string())),
};
execute!(&conn, sql::CREATE_TABLE_UPLOADS).unwrap();
Ok(())
}
}

View file

@ -0,0 +1,7 @@
CREATE TABLE IF NOT EXISTS uploads (
id BIGINT NOT NULL PRIMARY KEY,
created BIGINT NOT NULL,
owner BIGINT NOT NULL,
bucket TEXT NOT NULL,
metadata TEXT NOT NULL
)

View file

@ -0,0 +1 @@
pub const CREATE_TABLE_UPLOADS: &str = include_str!("./create_uploads.sql");

View file

@ -0,0 +1,167 @@
use crate::{
DataManager,
model::{MediaUpload, UploadMetadata},
};
use oiseau::{PostgresRow, cache::Cache, execute, get, params, query_rows};
use tetratto_core::auto_method;
use tetratto_core::model::{Error, Result};
impl DataManager {
/// Get a [`MediaUpload`] from an SQL row.
pub(crate) fn get_upload_from_row(x: &PostgresRow) -> MediaUpload {
MediaUpload {
id: get!(x->0(i64)) as usize,
created: get!(x->1(i64)) as usize,
owner: get!(x->2(i64)) as usize,
bucket: get!(x->3(String)),
metadata: serde_json::from_str(&get!(x->4(String))).unwrap(),
}
}
auto_method!(get_upload_by_id(usize as i64)@get_upload_from_row -> "SELECT * FROM uploads WHERE id = $1" --name="upload" --returns=MediaUpload --cache-key-tmpl="atto.upload:{}");
/// Get all uploads (paginated).
///
/// # Arguments
/// * `batch` - the limit of items in each page
/// * `page` - the page number
pub async fn get_uploads(&self, batch: usize, page: usize) -> Result<Vec<MediaUpload>> {
let conn = match self.0.connect().await {
Ok(c) => c,
Err(e) => return Err(Error::DatabaseConnection(e.to_string())),
};
let res = query_rows!(
&conn,
"SELECT * FROM uploads ORDER BY created DESC LIMIT $1 OFFSET $2",
&[&(batch as i64), &((page * batch) as i64)],
|x| { Self::get_upload_from_row(x) }
);
if res.is_err() {
return Err(Error::GeneralNotFound("upload".to_string()));
}
Ok(res.unwrap())
}
/// Get all uploads by their owner (paginated).
///
/// # Arguments
/// * `owner` - the ID of the owner of the upload
/// * `batch` - the limit of items in each page
/// * `page` - the page number
pub async fn get_uploads_by_owner(
&self,
owner: usize,
batch: usize,
page: usize,
) -> Result<Vec<MediaUpload>> {
let conn = match self.0.connect().await {
Ok(c) => c,
Err(e) => return Err(Error::DatabaseConnection(e.to_string())),
};
let res = query_rows!(
&conn,
"SELECT * FROM uploads WHERE owner = $1 ORDER BY created DESC LIMIT $2 OFFSET $3",
&[&(owner as i64), &(batch as i64), &((page * batch) as i64)],
|x| { Self::get_upload_from_row(x) }
);
if res.is_err() {
return Err(Error::GeneralNotFound("upload".to_string()));
}
Ok(res.unwrap())
}
/// Get all uploads by their owner.
///
/// # Arguments
/// * `owner` - the ID of the owner of the upload
pub async fn get_uploads_by_owner_all(&self, owner: usize) -> Result<Vec<MediaUpload>> {
let conn = match self.0.connect().await {
Ok(c) => c,
Err(e) => return Err(Error::DatabaseConnection(e.to_string())),
};
let res = query_rows!(
&conn,
"SELECT * FROM uploads WHERE owner = $1 ORDER BY created DESC",
&[&(owner as i64)],
|x| { Self::get_upload_from_row(x) }
);
if res.is_err() {
return Err(Error::GeneralNotFound("upload".to_string()));
}
Ok(res.unwrap())
}
/// Create a new upload in the database.
///
/// # Arguments
/// * `data` - a mock [`MediaUpload`] object to insert
pub async fn create_upload(&self, data: MediaUpload) -> Result<MediaUpload> {
let conn = match self.0.connect().await {
Ok(c) => c,
Err(e) => return Err(Error::DatabaseConnection(e.to_string())),
};
data.metadata.validate_kv()?;
let res = execute!(
&conn,
"INSERT INTO uploads VALUES ($1, $2, $3, $4, $5)",
params![
&(data.id as i64),
&(data.created as i64),
&(data.owner as i64),
&data.bucket,
&serde_json::to_string(&data.metadata).unwrap().as_str(),
]
);
if let Err(e) = res {
return Err(Error::DatabaseError(e.to_string()));
}
// return
Ok(data)
}
pub async fn delete_upload(&self, id: usize) -> Result<()> {
// if !user.permissions.check(FinePermission::MANAGE_UPLOADS) {
// return Err(Error::NotAllowed);
// }
// delete file
// it's most important that the file gets off the file system first, even
// if there's an issue in the database
//
// the actual file takes up much more space than the database entry.
let upload = self.get_upload_by_id(id).await?;
upload.remove(&self.0.0.directory)?;
// delete from database
let conn = match self.0.connect().await {
Ok(c) => c,
Err(e) => return Err(Error::DatabaseConnection(e.to_string())),
};
let res = execute!(&conn, "DELETE FROM uploads WHERE id = $1", &[&(id as i64)]);
if let Err(e) = res {
return Err(Error::DatabaseError(e.to_string()));
}
self.0.1.remove(format!("atto.upload:{}", id)).await;
// return
Ok(())
}
auto_method!(update_upload_metadata(UploadMetadata) -> "UPDATE uploads SET metadata = $1 WHERE id = $2" --serde --cache-key-tmpl="atto.upload:{}");
}

View file

@ -0,0 +1,7 @@
pub mod model;
mod database;
pub use database::DataManager;
mod config;
pub use config::Config;

View file

@ -0,0 +1,122 @@
use pathbufd::PathBufD;
use serde::{Deserialize, Serialize};
use std::{
collections::HashMap,
fs::{exists, remove_file, write},
};
use tetratto_core::model::{Error, Result};
use tetratto_shared::{snow::Snowflake, unix_epoch_timestamp};
#[derive(Serialize, Deserialize, PartialEq, Eq)]
pub enum MediaType {
#[serde(alias = "image/webp")]
Webp,
#[serde(alias = "image/avif")]
Avif,
#[serde(alias = "image/png")]
Png,
#[serde(alias = "image/jpg")]
Jpg,
#[serde(alias = "image/gif")]
Gif,
#[serde(alias = "image/carpgraph")]
Carpgraph,
}
impl MediaType {
pub fn extension(&self) -> &str {
match self {
Self::Webp => "webp",
Self::Avif => "avif",
Self::Png => "png",
Self::Jpg => "jpg",
Self::Gif => "gif",
Self::Carpgraph => "carpgraph",
}
}
pub fn mime(&self) -> String {
format!("image/{}", self.extension())
}
}
#[derive(Serialize, Deserialize)]
pub struct UploadMetadata {
pub what: MediaType,
#[serde(default)]
pub alt: String,
#[serde(default)]
pub kv: HashMap<String, String>,
}
impl UploadMetadata {
pub fn validate_kv(&self) -> Result<()> {
for x in &self.kv {
if x.0.len() > 32 {
return Err(Error::DataTooLong("key".to_string()));
}
if x.1.len() > 128 {
return Err(Error::DataTooLong("value".to_string()));
}
}
Ok(())
}
}
#[derive(Serialize, Deserialize)]
pub struct MediaUpload {
pub id: usize,
pub created: usize,
pub owner: usize,
pub bucket: String,
pub metadata: UploadMetadata,
}
impl MediaUpload {
/// Create a new [`MediaUpload`].
pub fn new(what: MediaType, owner: usize, bucket: String) -> Self {
Self {
id: Snowflake::new().to_string().parse::<usize>().unwrap(),
created: unix_epoch_timestamp(),
owner,
bucket,
metadata: UploadMetadata {
alt: String::new(),
what,
kv: HashMap::new(),
},
}
}
/// Get the path to the fs file for this upload.
pub fn path(&self, directory: &str) -> PathBufD {
PathBufD::current().extend(&[
directory,
&format!("{}.{}", self.id, self.metadata.what.extension()),
])
}
/// Write to this upload in the file system.
pub fn write(&self, directory: &str, bytes: &[u8]) -> Result<()> {
match write(self.path(directory), bytes) {
Ok(_) => Ok(()),
Err(e) => Err(Error::MiscError(e.to_string())),
}
}
/// Delete this upload in the file system.
pub fn remove(&self, directory: &str) -> Result<()> {
let path = self.path(directory);
if !exists(&path).unwrap() {
return Ok(());
}
match remove_file(path) {
Ok(_) => Ok(()),
Err(e) => Err(Error::MiscError(e.to_string())),
}
}
}

25
crates/buckets/Cargo.toml Normal file
View file

@ -0,0 +1,25 @@
[package]
name = "buckets"
description = "Buckets API service"
version = "1.0.0"
edition = "2024"
authors.workspace = true
repository.workspace = true
license.workspace = true
homepage.workspace = true
[dependencies]
tetratto-core = "15.0.2"
tokio = { version = "1.47.1", features = ["macros", "rt-multi-thread"] }
pathbufd = "0.1.4"
tracing = "0.1.41"
tracing-subscriber = { version = "0.3.19", features = ["env-filter"] }
tower-http = { version = "0.6.6", features = [
"trace",
"fs",
"catch-panic",
"set-header",
] }
axum = { version = "0.8.4", features = ["macros", "ws"] }
dotenv = "0.15.0"
buckets-core = { path = "../buckets-core" }

View file

@ -0,0 +1,74 @@
mod routes;
use axum::{Extension, Router};
use buckets_core::{Config, DataManager};
use std::{env::var, net::SocketAddr, sync::Arc};
use tokio::sync::RwLock;
use tower_http::{
catch_panic::CatchPanicLayer,
trace::{self, TraceLayer},
};
use tracing::{Level, info};
pub(crate) type State = Arc<RwLock<DataManager>>;
#[macro_export]
macro_rules! create_dir_if_not_exists {
($dir_path:expr) => {
if !std::fs::exists(&$dir_path).unwrap() {
std::fs::create_dir($dir_path).unwrap();
}
};
}
#[tokio::main(flavor = "multi_thread")]
async fn main() {
dotenv::dotenv().ok();
tracing_subscriber::fmt()
.with_target(false)
.compact()
.init();
let port = match var("PORT") {
Ok(port) => port.parse::<u16>().expect("port should be a u16"),
Err(_) => 8020,
};
// ...
let database = DataManager::new(Config::read())
.await
.expect("failed to connect to database");
database.init().await.expect("failed to init database");
create_dir_if_not_exists!(&database.0.0.directory);
// create app
let app = Router::new()
.merge(routes::routes())
.layer(Extension(Arc::new(RwLock::new(database))))
.layer(axum::extract::DefaultBodyLimit::max(
var("BODY_LIMIT")
.unwrap_or("8388608".to_string())
.parse::<usize>()
.unwrap(),
))
.layer(
TraceLayer::new_for_http()
.make_span_with(trace::DefaultMakeSpan::new().level(Level::INFO))
.on_response(trace::DefaultOnResponse::new().level(Level::INFO)),
)
.layer(CatchPanicLayer::new());
// ...
let listener = tokio::net::TcpListener::bind(format!("0.0.0.0:{}", port))
.await
.unwrap();
info!("🪣 buckets.");
info!("listening on http://0.0.0.0:{}", port);
axum::serve(
listener,
app.into_make_service_with_connect_info::<SocketAddr>(),
)
.await
.unwrap();
}

View file

@ -0,0 +1,94 @@
use crate::State;
use axum::{
Extension, Json, Router, body::Body, extract::Path, response::IntoResponse, routing::get,
};
use buckets_core::model::MediaType;
use pathbufd::PathBufD;
use std::{
fs::{File, exists},
io::Read,
};
use tetratto_core::model::{ApiReturn, Error, carp::CarpGraph};
pub fn routes() -> Router {
Router::new()
.route("/{bucket}/{id}", get(get_request))
.route("/{bucket}/{id}/json", get(get_json_request))
}
pub fn read_image(path: PathBufD) -> Vec<u8> {
let mut bytes = Vec::new();
for byte in File::open(path).unwrap().bytes() {
bytes.push(byte.unwrap())
}
bytes
}
// api
pub async fn get_request(
Path((bucket, id)): Path<(String, usize)>,
Extension(data): Extension<State>,
) -> impl IntoResponse {
let data = &(data.read().await);
let upload = match data.get_upload_by_id(id).await {
Ok(u) => u,
Err(e) => {
return Err(Json(e.into()));
}
};
if !upload.bucket.is_empty() && upload.bucket != bucket {
return Err(Json(ApiReturn {
ok: false,
message: Error::MiscError("Bucket mismatch".to_string()).to_string(),
payload: (),
}));
}
// ...
let path = upload.path(&data.0.0.directory);
if !exists(&path).unwrap() {
return Err(Json(Error::GeneralNotFound("file".to_string()).into()));
}
let bytes = read_image(path);
if upload.metadata.what == MediaType::Carpgraph {
// conver to svg and return
return Ok((
[("Content-Type", "image/svg+xml".to_string())],
Body::from(CarpGraph::from_bytes(bytes).to_svg()),
));
}
Ok((
[("Content-Type", upload.metadata.what.mime())],
Body::from(bytes),
))
}
pub async fn get_json_request(
Path((bucket, id)): Path<(String, usize)>,
Extension(data): Extension<State>,
) -> impl IntoResponse {
let data = &(data.read().await);
let upload = match data.get_upload_by_id(id).await {
Ok(u) => u,
Err(e) => return Json(e.into()),
};
if !upload.bucket.is_empty() && upload.bucket != bucket {
return Json(Error::MiscError("Bucket mismatch".to_string()).into());
}
Json(ApiReturn {
ok: true,
message: "Success".to_string(),
payload: Some(upload),
})
}