use crate::{ DataManager, model::{MediaUpload, UploadMetadata}, }; use oiseau::{PostgresRow, cache::Cache, execute, get, params, query_row, query_rows}; use tetratto_core::auto_method; use tetratto_core::model::{Error, Result}; impl DataManager { /// Get a [`MediaUpload`] from an SQL row. pub(crate) fn get_upload_from_row(x: &PostgresRow) -> MediaUpload { MediaUpload { id: get!(x->0(i64)) as usize, created: get!(x->1(i64)) as usize, owner: get!(x->2(i64)) as usize, bucket: get!(x->3(String)), metadata: serde_json::from_str(&get!(x->4(String))).unwrap(), } } auto_method!(get_upload_by_id(usize as i64)@get_upload_from_row -> "SELECT * FROM uploads WHERE id = $1" --name="upload" --returns=MediaUpload --cache-key-tmpl="atto.upload:{}"); /// Get an upload by its ID and bucket. pub async fn get_upload_by_id_bucket(&self, id: usize, bucket: &str) -> Result { if bucket.is_empty() { return self.get_upload_by_id(id).await; } let conn = match self.0.connect().await { Ok(c) => c, Err(e) => return Err(Error::DatabaseConnection(e.to_string())), }; let res = query_row!( &conn, "SELECT * FROM uploads WHERE id = $1 AND bucket = $2", &[&(id as i64), &bucket], |x| { Ok(Self::get_upload_from_row(x)) } ); if res.is_err() { return Err(Error::GeneralNotFound("upload".to_string())); } Ok(res.unwrap()) } /// Get all uploads (paginated). /// /// # Arguments /// * `batch` - the limit of items in each page /// * `page` - the page number pub async fn get_uploads(&self, batch: usize, page: usize) -> Result> { let conn = match self.0.connect().await { Ok(c) => c, Err(e) => return Err(Error::DatabaseConnection(e.to_string())), }; let res = query_rows!( &conn, "SELECT * FROM uploads ORDER BY created DESC LIMIT $1 OFFSET $2", &[&(batch as i64), &((page * batch) as i64)], |x| { Self::get_upload_from_row(x) } ); if res.is_err() { return Err(Error::GeneralNotFound("upload".to_string())); } Ok(res.unwrap()) } /// Get all uploads by their owner (paginated). /// /// # Arguments /// * `owner` - the ID of the owner of the upload /// * `batch` - the limit of items in each page /// * `page` - the page number pub async fn get_uploads_by_owner( &self, owner: usize, batch: usize, page: usize, ) -> Result> { let conn = match self.0.connect().await { Ok(c) => c, Err(e) => return Err(Error::DatabaseConnection(e.to_string())), }; let res = query_rows!( &conn, "SELECT * FROM uploads WHERE owner = $1 ORDER BY created DESC LIMIT $2 OFFSET $3", &[&(owner as i64), &(batch as i64), &((page * batch) as i64)], |x| { Self::get_upload_from_row(x) } ); if res.is_err() { return Err(Error::GeneralNotFound("upload".to_string())); } Ok(res.unwrap()) } /// Get all uploads by their owner. /// /// # Arguments /// * `owner` - the ID of the owner of the upload pub async fn get_uploads_by_owner_all(&self, owner: usize) -> Result> { let conn = match self.0.connect().await { Ok(c) => c, Err(e) => return Err(Error::DatabaseConnection(e.to_string())), }; let res = query_rows!( &conn, "SELECT * FROM uploads WHERE owner = $1 ORDER BY created DESC", &[&(owner as i64)], |x| { Self::get_upload_from_row(x) } ); if res.is_err() { return Err(Error::GeneralNotFound("upload".to_string())); } Ok(res.unwrap()) } /// Create a new upload in the database. /// /// Please note that the file must be manually written afterwards. You can use /// the returned [`MediaUpload`] to retrieve the correct path to write the file. /// /// # Arguments /// * `data` - a mock [`MediaUpload`] object to insert pub async fn create_upload(&self, data: MediaUpload) -> Result { let conn = match self.0.connect().await { Ok(c) => c, Err(e) => return Err(Error::DatabaseConnection(e.to_string())), }; data.metadata.validate_kv()?; let res = execute!( &conn, "INSERT INTO uploads VALUES ($1, $2, $3, $4, $5)", params![ &(data.id as i64), &(data.created as i64), &(data.owner as i64), &data.bucket, &serde_json::to_string(&data.metadata).unwrap().as_str(), ] ); if let Err(e) = res { return Err(Error::DatabaseError(e.to_string())); } // return Ok(data) } /// Delete an upload given its `id`. /// /// # Warning /// It's recommended that you use [`Self::delete_upload_with_id`] instead, /// as the table's primary key is on `(id, bucket)`, not `id`. pub async fn delete_upload(&self, id: usize) -> Result<()> { // if !user.permissions.check(FinePermission::MANAGE_UPLOADS) { // return Err(Error::NotAllowed); // } // delete file // it's most important that the file gets off the file system first, even // if there's an issue in the database // // the actual file takes up much more space than the database entry. let upload = self.get_upload_by_id(id).await?; upload.remove(&self.0.0.directory)?; // delete from database let conn = match self.0.connect().await { Ok(c) => c, Err(e) => return Err(Error::DatabaseConnection(e.to_string())), }; let res = execute!(&conn, "DELETE FROM uploads WHERE id = $1", &[&(id as i64)]); if let Err(e) = res { return Err(Error::DatabaseError(e.to_string())); } self.0.1.remove(format!("atto.upload:{}", id)).await; // return Ok(()) } /// Delete an upload given its `id` and `bucket`. pub async fn delete_upload_with_bucket(&self, id: usize, bucket: &str) -> Result<()> { // if !user.permissions.check(FinePermission::MANAGE_UPLOADS) { // return Err(Error::NotAllowed); // } if bucket.is_empty() { return self.delete_upload(id).await; } // delete file // it's most important that the file gets off the file system first, even // if there's an issue in the database // // the actual file takes up much more space than the database entry. let upload = self.get_upload_by_id_bucket(id, bucket).await?; upload.remove(&self.0.0.directory)?; // delete from database let conn = match self.0.connect().await { Ok(c) => c, Err(e) => return Err(Error::DatabaseConnection(e.to_string())), }; let res = execute!( &conn, "DELETE FROM uploads WHERE id = $1 AND bucket = $2", &[&(id as i64), &bucket] ); if let Err(e) = res { return Err(Error::DatabaseError(e.to_string())); } self.0.1.remove(format!("atto.upload:{}", id)).await; // return Ok(()) } auto_method!(update_upload_metadata(UploadMetadata) -> "UPDATE uploads SET metadata = $1 WHERE id = $2" --serde --cache-key-tmpl="atto.upload:{}"); }