add: ability to store entire directories

This commit is contained in:
trisua 2025-06-08 00:42:56 -04:00
parent 232a2fc2d9
commit 65c127b86f
6 changed files with 210 additions and 12 deletions

View file

@ -22,8 +22,28 @@ Delete file and all chunk off remote:
renbin -i ./path/to/file.ext.toml -e rentry -x renbin -i ./path/to/file.ext.toml -e rentry -x
``` ```
Store entire directory as ledger:
```
renbin -i ./ -r -s target -s .git -n dir_name -e rentry
```
Note that `-s` (`--skip`) is used to ignore directories/files. Any file matching the names given through skip will be ignored.
The `-n` (`--name`) flag is required to give a name to the ledger.
Restore from ledger:
```
renbin -i ./dir_name.toml -r -e rentry -d
```
### Local ### Local
You can also store files locally using `-e fs` (`--engine fs`). This mode is generally **_much_** quicker. This is the mode that will be used by default if no engine flag is provided. You can also store files locally using `-e fs` (`--engine fs`). This mode is generally **_much_** quicker. This is the mode that will be used by default if no engine flag is provided.
Local files are split into chunks of exactly 200 KB, while the rentry engine splits files into chunks of 150 KB. While larger chunks _could_ be used, that's not as fun. Local files are split into chunks of exactly 200 KB, while the rentry engine splits files into chunks of 150 KB. While larger chunks _could_ be used, that's not as fun.
```
```

View file

@ -34,7 +34,6 @@ impl Engine for FsEngine {
descriptor.chunks.push(id); descriptor.chunks.push(id);
} }
descriptor.write(PathBufD::current().join(format!("{}.toml", descriptor.name)))?;
Ok(descriptor) Ok(descriptor)
} }

View file

@ -145,7 +145,6 @@ impl Engine for RentryEngine {
} }
// ... // ...
descriptor.write(PathBufD::current().join(format!("{}.toml", descriptor.name)))?;
Ok(descriptor) Ok(descriptor)
} }

68
src/ledger.rs Normal file
View file

@ -0,0 +1,68 @@
use crate::FileDescriptor;
use pathbufd::PathBufD;
use serde::{Deserialize, Serialize};
use std::{
fs::{metadata, read_dir, read_to_string, write},
io::Result,
};
#[derive(Clone, Debug, Serialize, Deserialize)]
pub struct Ledger {
pub name: String,
pub dirs: Vec<String>,
pub files: Vec<FileDescriptor>,
}
impl Ledger {
/// Read a [`Ledger`] from the given `path`.
pub fn read(path: PathBufD) -> Self {
toml::from_str(&read_to_string(path).expect("failed to read file"))
.expect("failed to deserialize file")
}
/// Write a [`Ledger`] into the given `path`.
pub fn write(&self, path: PathBufD) -> Result<()> {
write(
path,
toml::to_string_pretty(&self).expect("failed to serialize file"),
)
}
/// Recursively collect paths from a directory.
///
/// # Returns
/// `(files, dirs)`
pub fn collect_paths(root: &PathBufD, skip: &Vec<String>) -> (Vec<PathBufD>, Vec<String>) {
let mut dirs = Vec::new();
let mut files = Vec::new();
for x in read_dir(&root).expect("failed to read dir") {
let x = x.unwrap();
let path = root.join(x.file_name());
if skip.contains(&x.file_name().into_string().unwrap()) {
continue;
}
let stat = metadata(&path).expect("failed to stat file");
if stat.is_dir() {
dirs.push(x.path().to_str().unwrap().replace("../", ""));
let (files_, dirs_) = Self::collect_paths(&path, skip);
for file in files_ {
files.push(file);
}
for dir in dirs_ {
dirs.push(dir);
}
} else {
files.push(x.path().into());
}
}
(files, dirs)
}
}

View file

@ -1,4 +1,5 @@
pub mod engine; pub mod engine;
pub mod ledger;
use aes_gcm::{ use aes_gcm::{
Aes256Gcm, Nonce, Aes256Gcm, Nonce,

View file

@ -1,13 +1,16 @@
extern crate renbin; extern crate renbin;
use clap::Parser; use clap::Parser;
use pathbufd::PathBufD;
use renbin::{ use renbin::{
FileDescriptor, FileDescriptor,
engine::{Engine, fs::FsEngine, rentry::RentryEngine}, engine::{Engine, fs::FsEngine, rentry::RentryEngine},
ledger::Ledger,
}; };
use std::{ use std::{
fs::{read, remove_file}, fs::{create_dir, read, remove_file},
path::PathBuf, path::PathBuf,
str::FromStr,
}; };
#[derive(Parser, Debug)] #[derive(Parser, Debug)]
@ -17,8 +20,14 @@ struct Args {
decode: bool, decode: bool,
#[arg(short = 'x', long = "delete", action)] #[arg(short = 'x', long = "delete", action)]
delete: bool, delete: bool,
#[arg(short = 'r', long = "recursive", action)]
recursive: bool,
#[arg(short = 's', long = "skip", action)]
skip: Vec<String>,
#[arg(short = 'n', long = "name")]
name: Option<String>,
#[arg(short = 'i', long = "input")] #[arg(short = 'i', long = "input")]
path: String, input: String,
#[arg(short = 'e', long = "engine")] #[arg(short = 'e', long = "engine")]
#[clap(default_value = "fs")] #[clap(default_value = "fs")]
engine: String, engine: String,
@ -28,9 +37,103 @@ struct Args {
async fn main() { async fn main() {
let args = Args::parse(); let args = Args::parse();
if args.recursive
&& let Some(name) = args.name
&& !args.decode
{
// build ledger
let pathbuf = PathBuf::from_str(&args.input).unwrap();
let (files, dirs) = Ledger::collect_paths(&pathbuf.clone().into(), &args.skip);
Ledger {
name: name.clone(),
dirs,
files: {
let mut descriptors = Vec::new();
if args.engine == "fs" {
for file in files {
let descriptor = FsEngine
.process(
file.as_path()
.to_str()
.unwrap()
.to_string()
.replace("../", ""),
read(file).expect("failed to read file"),
)
.await
.expect("failed to process file");
descriptors.push(descriptor);
}
} else if args.engine == "rentry" {
let mut engine = RentryEngine::new();
engine.auth().await.expect("failed to extract csrf token");
for file in files {
let descriptor = engine
.process(
file.as_path()
.to_str()
.unwrap()
.to_string()
.replace("../", ""),
read(file).expect("failed to read file"),
)
.await
.expect("failed to process file");
descriptors.push(descriptor);
}
} else {
unreachable!("invalid engine")
};
descriptors
},
}
.write(PathBuf::from_str(&format!("{}.toml", name)).unwrap().into())
.expect("failed to write ledger");
return;
} else if args.recursive && args.decode {
// decode ledger
let path = PathBuf::from(args.input);
let ledger = Ledger::read(path.clone().into());
// create dirs
for dir in ledger.dirs {
create_dir(dir).expect("failed to create dir");
}
// reconstruct files
for file in ledger.files {
if args.engine == "fs" {
FsEngine
.reconstruct(file)
.await
.expect("failed to reconstruct file");
} else if args.engine == "rentry" {
let mut engine = RentryEngine::new();
engine.auth().await.expect("failed to extract csrf token");
engine
.reconstruct(file)
.await
.expect("failed to reconstruct file");
} else {
panic!("unknown engine type");
};
}
// ...
return;
}
if args.delete { if args.delete {
// delete // delete
let path = PathBuf::from(args.path); let path = PathBuf::from(args.input);
if args.engine == "fs" { if args.engine == "fs" {
FsEngine FsEngine
@ -55,27 +158,35 @@ async fn main() {
if !args.decode { if !args.decode {
// encode // encode
let pathbuf = PathBuf::from(args.path); let pathbuf = PathBuf::from(args.input);
if args.engine == "fs" { if args.engine == "fs" {
FsEngine let descriptor = FsEngine
.process( .process(
pathbuf.file_name().unwrap().to_str().unwrap().to_string(), pathbuf.file_name().unwrap().to_str().unwrap().to_string(),
read(pathbuf).expect("failed to read file"), read(pathbuf).expect("failed to read file"),
) )
.await .await
.expect("failed to process file") .expect("failed to process file");
descriptor
.write(PathBufD::current().join(format!("{}.toml", descriptor.name)))
.expect("failed to write descriptor");
} else if args.engine == "rentry" { } else if args.engine == "rentry" {
let mut engine = RentryEngine::new(); let mut engine = RentryEngine::new();
engine.auth().await.expect("failed to extract csrf token"); engine.auth().await.expect("failed to extract csrf token");
engine let descriptor = engine
.process( .process(
pathbuf.file_name().unwrap().to_str().unwrap().to_string(), pathbuf.file_name().unwrap().to_str().unwrap().to_string(),
read(pathbuf).expect("failed to read file"), read(pathbuf).expect("failed to read file"),
) )
.await .await
.expect("failed to process file") .expect("failed to process file");
descriptor
.write(PathBufD::current().join(format!("{}.toml", descriptor.name)))
.expect("failed to write descriptor");
} else { } else {
panic!("unknown engine type"); panic!("unknown engine type");
}; };
@ -83,7 +194,7 @@ async fn main() {
// decode // decode
if args.engine == "fs" { if args.engine == "fs" {
FsEngine FsEngine
.reconstruct(FileDescriptor::read(PathBuf::from(args.path).into())) .reconstruct(FileDescriptor::read(PathBuf::from(args.input).into()))
.await .await
.expect("failed to reconstruct file"); .expect("failed to reconstruct file");
} else if args.engine == "rentry" { } else if args.engine == "rentry" {
@ -91,7 +202,7 @@ async fn main() {
engine.auth().await.expect("failed to extract csrf token"); engine.auth().await.expect("failed to extract csrf token");
engine engine
.reconstruct(FileDescriptor::read(PathBuf::from(args.path).into())) .reconstruct(FileDescriptor::read(PathBuf::from(args.input).into()))
.await .await
.expect("failed to reconstruct file"); .expect("failed to reconstruct file");
} else { } else {