Initial API implementation, built with Rust and Axum

This commit is contained in:
Mercurio 2025-05-28 22:34:21 +02:00
parent 1119a55937
commit fd019654ad
25 changed files with 2005 additions and 0 deletions

70
Dockerfile Normal file
View file

@ -0,0 +1,70 @@
# Multi-stage build for LiteCloud
# Stage 1: Build the Rust backend
FROM rust:1.70-slim as rust-builder
WORKDIR /app
# Install build dependencies
RUN apt-get update && apt-get install -y \
pkg-config \
libssl-dev \
&& rm -rf /var/lib/apt/lists/*
# Copy Rust project files
COPY api /app
# Build the Rust application in release mode
RUN cargo build --release
# Stage 2: Build the Flutter frontend
FROM debian:bullseye-slim as flutter-builder
WORKDIR /app
# Install dependencies for Flutter
RUN apt-get update && apt-get install -y \
curl \
git \
unzip \
xz-utils \
&& rm -rf /var/lib/apt/lists/*
# Install Flutter
RUN git clone https://github.com/flutter/flutter.git /flutter
ENV PATH="/flutter/bin:${PATH}"
RUN flutter channel stable && flutter upgrade && flutter config --enable-web
# Copy Flutter project files
COPY frontend /app
# Build Flutter web app
RUN flutter build web --release
# Stage 3: Final image
FROM debian:bullseye-slim
WORKDIR /app
# Install runtime dependencies
RUN apt-get update && apt-get install -y \
ca-certificates \
&& rm -rf /var/lib/apt/lists/*
# Create data directory
RUN mkdir -p /data && chmod 755 /data
# Copy the Rust binary from the rust-builder stage
COPY --from=rust-builder /app/target/release/litecloud /app/litecloud
# Copy the Flutter web build from the flutter-builder stage
COPY --from=flutter-builder /app/build/web /app/static
# Expose the port the server listens on
EXPOSE 8080
# Set environment variables
ENV RUST_LOG=info
# Run the server
CMD ["/app/litecloud"]

31
api/Cargo.toml Normal file
View file

@ -0,0 +1,31 @@
[package]
name = "litecloud"
version = "0.1.0"
edition = "2021"
[dependencies]
axum = { version = "0.6", features = ["multipart"] }
tokio = { version = "1", features = ["full"] }
tower = "0.4"
tower-http = { version = "0.4", features = ["fs", "trace", "cors"] }
sqlx = { version = "0.7", features = ["runtime-tokio", "tls-rustls", "postgres", "uuid", "time", "json"] }
argon2 = "0.5"
jsonwebtoken = "8"
aes-gcm = "0.10"
rand = "0.8"
base64 = "0.21"
uuid = { version = "1", features = ["v4", "serde"] }
serde = { version = "1", features = ["derive"] }
serde_json = "1"
thiserror = "1"
tracing = "0.1"
tracing-subscriber = { version = "0.3", features = ["env-filter"] }
dotenvy = "0.15"
once_cell = "1"
time = { version = "0.3", features = ["serde"] }
validator = { version = "0.16", features = ["derive"] }
futures = "0.3"
bytes = "1"
tokio-util = { version = "0.7", features = ["io"] }
tokio-stream = "0.1"
async-trait = "0.1"

60
api/migrations/schema.sql Normal file
View file

@ -0,0 +1,60 @@
CREATE TABLE users (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
email VARCHAR(255) NOT NULL UNIQUE,
username VARCHAR(255) NOT NULL,
password_hash VARCHAR(255) NOT NULL,
role VARCHAR(50) NOT NULL DEFAULT 'user',
storage_used BIGINT NOT NULL DEFAULT 0,
storage_quota BIGINT NOT NULL,
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
);
CREATE TABLE files (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
name VARCHAR(255) NOT NULL,
file_type VARCHAR(50) NOT NULL,
mime_type VARCHAR(255),
size BIGINT NOT NULL DEFAULT 0,
owner_id UUID NOT NULL REFERENCES users(id) ON DELETE CASCADE,
parent_id UUID REFERENCES files(id) ON DELETE CASCADE,
path_depth INT NOT NULL DEFAULT 0,
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
CONSTRAINT valid_path_depth CHECK (path_depth >= 0 AND path_depth <= 20)
);
CREATE INDEX idx_files_parent_id ON files(parent_id);
CREATE INDEX idx_files_owner_id ON files(owner_id);
CREATE TABLE permissions (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
name VARCHAR(50) NOT NULL UNIQUE,
can_read BOOLEAN NOT NULL DEFAULT TRUE,
can_write BOOLEAN NOT NULL DEFAULT FALSE,
can_share BOOLEAN NOT NULL DEFAULT FALSE,
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
);
CREATE TABLE shares (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
file_id UUID NOT NULL REFERENCES files(id) ON DELETE CASCADE,
owner_id UUID NOT NULL REFERENCES users(id) ON DELETE CASCADE,
recipient_id UUID REFERENCES users(id) ON DELETE CASCADE,
permission_id UUID NOT NULL REFERENCES permissions(id),
access_key VARCHAR(255) NOT NULL UNIQUE,
expires_at TIMESTAMPTZ,
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
);
CREATE INDEX idx_shares_file_id ON shares(file_id);
CREATE INDEX idx_shares_owner_id ON shares(owner_id);
CREATE INDEX idx_shares_recipient_id ON shares(recipient_id);
CREATE INDEX idx_shares_access_key ON shares(access_key);
INSERT INTO permissions (name, can_read, can_write, can_share) VALUES
('viewer', TRUE, FALSE, FALSE),
('editor', TRUE, TRUE, FALSE),
('owner', TRUE, TRUE, TRUE);

51
api/src/config.rs Normal file
View file

@ -0,0 +1,51 @@
use std::path::PathBuf;
pub struct Config {
pub database_url: String,
pub jwt_secret: String,
pub jwt_expiration: i64,
pub master_key: String,
pub storage_path: String,
pub default_user_quota: i64,
}
impl Config {
pub fn from_env() -> Self {
let database_url = std::env::var("DATABASE_URL")
.expect("DATABASE_URL must be set");
let jwt_secret = std::env::var("JWT_SECRET")
.expect("JWT_SECRET must be set");
let jwt_expiration = std::env::var("JWT_EXPIRATION")
.unwrap_or_else(|_| "86400".to_string()) // Default to 24 hours
.parse::<i64>()
.expect("JWT_EXPIRATION must be a valid number");
let master_key = std::env::var("MASTER_KEY")
.expect("MASTER_KEY must be set");
let storage_path = std::env::var("STORAGE_PATH")
.unwrap_or_else(|_| "./storage".to_string());
let default_user_quota = std::env::var("DEFAULT_USER_QUOTA")
.unwrap_or_else(|_| "5368709120".to_string()) // Default to 5GB
.parse::<i64>()
.expect("DEFAULT_USER_QUOTA must be a valid number");
Self {
database_url,
jwt_secret,
jwt_expiration,
master_key,
storage_path,
default_user_quota,
}
}
pub fn get_user_storage_path(&self, user_id: &str) -> PathBuf {
let mut path = PathBuf::from(&self.storage_path);
path.push(user_id);
path
}
}

119
api/src/error.rs Normal file
View file

@ -0,0 +1,119 @@
use axum::{
http::StatusCode,
response::{IntoResponse, Response},
Json,
};
use serde_json::json;
use thiserror::Error;
#[derive(Error, Debug)]
pub enum AppError {
#[error("Authentication required")]
Unauthorized,
#[error("Access denied")]
AccessDenied,
#[error("User not found")]
UserNotFound,
#[error("User already exists")]
UserAlreadyExists,
#[error("Invalid credentials")]
InvalidCredentials,
#[error("Invalid or expired token")]
InvalidToken,
#[error("File not found")]
FileNotFound,
#[error("File already exists")]
FileAlreadyExists,
#[error("Directory already exists")]
DirectoryAlreadyExists,
#[error("Not a directory")]
NotADirectory,
#[error("Storage quota exceeded")]
StorageQuotaExceeded,
#[error("Path too deep")]
PathTooDeep,
#[error("Database error: {0}")]
DatabaseError(#[from] sqlx::Error),
#[error("Password hashing error: {0}")]
PasswordHashingError(#[from] argon2::password_hash::Error),
#[error("JWT error: {0}")]
JwtError(#[from] jsonwebtoken::errors::Error),
#[error("Encryption error: {0}")]
EncryptionError(String),
#[error("IO error: {0}")]
IoError(#[from] std::io::Error),
#[error("Invalid input: {0}")]
ValidationError(String),
#[error("Internal server error")]
InternalServerError,
}
impl IntoResponse for AppError {
fn into_response(self) -> Response {
let (status, error_message) = match self {
Self::Unauthorized => (StatusCode::UNAUTHORIZED, self.to_string()),
Self::AccessDenied => (StatusCode::FORBIDDEN, self.to_string()),
Self::UserNotFound | Self::FileNotFound => (StatusCode::NOT_FOUND, self.to_string()),
Self::UserAlreadyExists | Self::FileAlreadyExists | Self::DirectoryAlreadyExists => {
(StatusCode::CONFLICT, self.to_string())
}
Self::InvalidCredentials | Self::InvalidToken => {
(StatusCode::UNAUTHORIZED, self.to_string())
}
Self::NotADirectory | Self::ValidationError(_) => {
(StatusCode::BAD_REQUEST, self.to_string())
}
Self::StorageQuotaExceeded => {
(StatusCode::PAYLOAD_TOO_LARGE, self.to_string())
}
Self::PathTooDeep => (StatusCode::BAD_REQUEST, self.to_string()),
Self::DatabaseError(e) => {
tracing::error!("Database error: {:?}", e);
(StatusCode::INTERNAL_SERVER_ERROR, "Internal server error".to_string())
}
Self::PasswordHashingError(e) => {
tracing::error!("Password hashing error: {:?}", e);
(StatusCode::INTERNAL_SERVER_ERROR, "Internal server error".to_string())
}
Self::JwtError(e) => {
tracing::error!("JWT error: {:?}", e);
(StatusCode::INTERNAL_SERVER_ERROR, "Internal server error".to_string())
}
Self::EncryptionError(e) => {
tracing::error!("Encryption error: {}", e);
(StatusCode::INTERNAL_SERVER_ERROR, "Internal server error".to_string())
}
Self::IoError(e) => {
tracing::error!("IO error: {:?}", e);
(StatusCode::INTERNAL_SERVER_ERROR, "Internal server error".to_string())
}
Self::InternalServerError => {
(StatusCode::INTERNAL_SERVER_ERROR, "Internal server error".to_string())
}
};
let body = Json(json!({
"error": error_message
}));
(status, body).into_response()
}
}

94
api/src/main.rs Normal file
View file

@ -0,0 +1,94 @@
use axum::{
error_handling::HandleErrorLayer,
extract::Extension,
http::{HeaderValue, Method, StatusCode},
routing::get,
Router,
};
use dotenvy::dotenv;
use sqlx::postgres::PgPoolOptions;
use std::{env, net::SocketAddr, path::PathBuf, sync::Arc, time::Duration};
use tower::ServiceBuilder;
use tower_http::{cors::CorsLayer, services::ServeDir, trace::TraceLayer};
use tracing_subscriber::{layer::SubscriberExt, util::SubscriberInitExt};
mod config;
mod error;
mod models;
mod routes;
mod services;
mod utils;
use config::Config;
use error::AppError;
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
dotenv().ok();
tracing_subscriber::registry()
.with(tracing_subscriber::EnvFilter::new(
env::var("RUST_LOG").unwrap_or_else(|_| "info,tower_http=debug".into()),
))
.with(tracing_subscriber::fmt::layer())
.init();
let config = Config::from_env();
let config = Arc::new(config);
let storage_path = PathBuf::from(&config.storage_path);
if !storage_path.exists() {
std::fs::create_dir_all(&storage_path)?;
}
let pool = PgPoolOptions::new()
.max_connections(10)
.connect(&config.database_url)
.await?
sqlx::migrate!("./migrations")
.run(&pool)
.await?
tracing::info!("Database migrations applied successfully");
let encryption_service = services::encryption::EncryptionService::new(&config.master_key);
let cors = CorsLayer::new()
.allow_origin("*".parse::<HeaderValue>().unwrap())
.allow_methods([Method::GET, Method::POST, Method::PUT, Method::DELETE])
.allow_headers([axum::http::header::CONTENT_TYPE, axum::http::header::AUTHORIZATION]);
let api_router = Router::new()
.nest("/api", routes::api_routes())
.layer(
ServiceBuilder::new()
.layer(HandleErrorLayer::new(|error| async move {
(
StatusCode::INTERNAL_SERVER_ERROR,
format!("Unhandled error: {}", error),
)
}))
.layer(TraceLayer::new_for_http())
.layer(Extension(pool.clone()))
.layer(Extension(Arc::clone(&config)))
.layer(Extension(encryption_service)),
);
let static_files_service = ServeDir::new("static");
let static_router = Router::new().nest_service("/", static_files_service.clone());
let app = Router::new()
.merge(api_router)
.fallback_service(static_router)
.layer(cors);
let addr = SocketAddr::from(([0, 0, 0, 0], 8080));
tracing::info!("Listening on {}", addr);
axum::Server::bind(&addr)
.serve(app.into_make_service())
.await?
Ok(())
}

264
api/src/models/file.rs Normal file
View file

@ -0,0 +1,264 @@
use serde::{Deserialize, Serialize};
use sqlx::{postgres::PgPool, FromRow};
use time::OffsetDateTime;
use uuid::Uuid;
use crate::error::AppError;
#[derive(Debug, Serialize, Deserialize, FromRow, Clone, PartialEq)]
#[serde(rename_all = "snake_case")]
pub enum FileType {
File,
Directory,
}
#[derive(Debug, Serialize, Deserialize, FromRow)]
pub struct File {
pub id: Uuid,
pub name: String,
pub file_type: FileType,
pub mime_type: Option<String>,
pub size: i64,
pub owner_id: Uuid,
pub parent_id: Option<Uuid>,
pub encryption_key: Option<String>, // Encrypted with master key
pub encryption_iv: Option<String>, // Initialization vector for AES-GCM
pub created_at: OffsetDateTime,
pub updated_at: OffsetDateTime,
}
#[derive(Debug, Deserialize)]
pub struct CreateFileDto {
pub name: String,
pub file_type: FileType,
pub mime_type: Option<String>,
pub size: i64,
pub parent_id: Option<Uuid>,
}
#[derive(Debug, Deserialize)]
pub struct CreateDirectoryDto {
pub name: String,
pub parent_id: Option<Uuid>,
}
impl File {
pub async fn create_file(
pool: &PgPool,
dto: CreateFileDto,
owner_id: Uuid,
encryption_key: Option<String>,
encryption_iv: Option<String>,
) -> Result<Self, AppError> {
// Validate parent directory if provided
if let Some(parent_id) = dto.parent_id {
let parent = Self::find_by_id(pool, parent_id).await?;
if parent.file_type != FileType::Directory {
return Err(AppError::NotADirectory);
}
// Check if user has access to parent directory
if parent.owner_id != owner_id {
// TODO: Check if user has write permission through sharing
return Err(AppError::AccessDenied);
}
}
// Check for duplicate filename in the same directory
let existing_file = sqlx::query!("SELECT id FROM files WHERE name = $1 AND parent_id IS NOT DISTINCT FROM $2 AND owner_id = $3",
dto.name, dto.parent_id, owner_id)
.fetch_optional(pool)
.await?;
if existing_file.is_some() {
return Err(AppError::FileAlreadyExists);
}
// Create the file record
let file = sqlx::query_as!(File,
r#"INSERT INTO files (id, name, file_type, mime_type, size, owner_id, parent_id, encryption_key, encryption_iv, created_at, updated_at)
VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11)
RETURNING id, name, file_type as "file_type: FileType", mime_type, size, owner_id, parent_id, encryption_key, encryption_iv, created_at, updated_at"#,
Uuid::new_v4(),
dto.name,
dto.file_type as FileType,
dto.mime_type,
dto.size,
owner_id,
dto.parent_id,
encryption_key,
encryption_iv,
OffsetDateTime::now_utc(),
OffsetDateTime::now_utc()
)
.fetch_one(pool)
.await?;
Ok(file)
}
pub async fn create_directory(
pool: &PgPool,
dto: CreateDirectoryDto,
owner_id: Uuid,
) -> Result<Self, AppError> {
// Validate parent directory if provided
if let Some(parent_id) = dto.parent_id {
let parent = Self::find_by_id(pool, parent_id).await?;
if parent.file_type != FileType::Directory {
return Err(AppError::NotADirectory);
}
// Check if user has access to parent directory
if parent.owner_id != owner_id {
// TODO: Check if user has write permission through sharing
return Err(AppError::AccessDenied);
}
}
// Check for duplicate directory name in the same parent
let existing_dir = sqlx::query!("SELECT id FROM files WHERE name = $1 AND parent_id IS NOT DISTINCT FROM $2 AND owner_id = $3 AND file_type = 'directory'",
dto.name, dto.parent_id, owner_id)
.fetch_optional(pool)
.await?;
if existing_dir.is_some() {
return Err(AppError::DirectoryAlreadyExists);
}
// Create the directory record
let directory = sqlx::query_as!(File,
r#"INSERT INTO files (id, name, file_type, mime_type, size, owner_id, parent_id, encryption_key, encryption_iv, created_at, updated_at)
VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11)
RETURNING id, name, file_type as "file_type: FileType", mime_type, size, owner_id, parent_id, encryption_key, encryption_iv, created_at, updated_at"#,
Uuid::new_v4(),
dto.name,
FileType::Directory,
None as Option<String>,
0i64,
owner_id,
dto.parent_id,
None as Option<String>,
None as Option<String>,
OffsetDateTime::now_utc(),
OffsetDateTime::now_utc()
)
.fetch_one(pool)
.await?;
Ok(directory)
}
pub async fn find_by_id(pool: &PgPool, id: Uuid) -> Result<Self, AppError> {
let file = sqlx::query_as!(File,
r#"SELECT id, name, file_type as "file_type: FileType", mime_type, size, owner_id, parent_id, encryption_key, encryption_iv, created_at, updated_at
FROM files WHERE id = $1"#,
id
)
.fetch_optional(pool)
.await?
.ok_or(AppError::FileNotFound)?;
Ok(file)
}
pub async fn list_directory(
pool: &PgPool,
directory_id: Option<Uuid>,
user_id: Uuid,
) -> Result<Vec<Self>, AppError> {
// If directory_id is None, list files at root level for the user
let files = if let Some(dir_id) = directory_id {
// Verify directory exists and user has access
let directory = Self::find_by_id(pool, dir_id).await?;
if directory.file_type != FileType::Directory {
return Err(AppError::NotADirectory);
}
// Check if user has access to this directory
if directory.owner_id != user_id {
// TODO: Check if user has read permission through sharing
return Err(AppError::AccessDenied);
}
sqlx::query_as!(File,
r#"SELECT id, name, file_type as "file_type: FileType", mime_type, size, owner_id, parent_id, encryption_key, encryption_iv, created_at, updated_at
FROM files WHERE parent_id = $1 ORDER BY file_type, name"#,
dir_id
)
.fetch_all(pool)
.await?
} else {
// List root level files for the user
sqlx::query_as!(File,
r#"SELECT id, name, file_type as "file_type: FileType", mime_type, size, owner_id, parent_id, encryption_key, encryption_iv, created_at, updated_at
FROM files WHERE parent_id IS NULL AND owner_id = $1 ORDER BY file_type, name"#,
user_id
)
.fetch_all(pool)
.await?
};
Ok(files)
}
pub async fn delete(
pool: &PgPool,
id: Uuid,
user_id: Uuid,
) -> Result<(), AppError> {
// Find the file/directory
let file = Self::find_by_id(pool, id).await?;
// Check if user has permission to delete
if file.owner_id != user_id {
// TODO: Check if user has write permission through sharing
return Err(AppError::AccessDenied);
}
// If it's a directory, recursively delete all contents
if file.file_type == FileType::Directory {
// Get all files in this directory
let files_in_dir = Self::list_directory(pool, Some(id), user_id).await?;
// Recursively delete each file/subdirectory
for file in files_in_dir {
Self::delete(pool, file.id, user_id).await?;
}
}
// Delete the file/directory record
sqlx::query!("DELETE FROM files WHERE id = $1", id)
.execute(pool)
.await?;
Ok(())
}
pub async fn get_file_path(pool: &PgPool, id: Uuid) -> Result<String, AppError> {
let file = Self::find_by_id(pool, id).await?;
// Build the path by traversing parent directories
let mut path_parts = vec![file.name.clone()];
let mut current_parent_id = file.parent_id;
// Prevent infinite loops by limiting depth
let mut depth = 0;
const MAX_DEPTH: usize = 100;
while let Some(parent_id) = current_parent_id {
if depth >= MAX_DEPTH {
return Err(AppError::PathTooDeep);
}
let parent = Self::find_by_id(pool, parent_id).await?;
path_parts.push(parent.name.clone());
current_parent_id = parent.parent_id;
depth += 1;
}
// Reverse to get the correct order (root -> leaf)
path_parts.reverse();
// Join with path separator
Ok(path_parts.join("/"))
}
}

9
api/src/models/mod.rs Normal file
View file

@ -0,0 +1,9 @@
pub mod user;
pub mod file;
pub mod share;
pub mod permission;
pub use user::User;
pub use file::{File, FileType};
pub use share::{Share, ShareType};
pub use permission::{Permission, Role};

View file

@ -0,0 +1,93 @@
use crate::error::AppError;
use serde::{Deserialize, Serialize};
use sqlx::PgPool;
use time::OffsetDateTime;
use uuid::Uuid;
#[derive(Debug, Serialize, Deserialize, sqlx::FromRow)]
pub struct Permission {
pub id: Uuid,
pub name: String,
pub can_read: bool,
pub can_write: bool,
pub can_share: bool,
pub created_at: OffsetDateTime,
pub updated_at: OffsetDateTime,
}
impl Permission {
pub async fn find_by_id(pool: &PgPool, id: Uuid) -> Result<Permission, AppError> {
let permission = sqlx::query_as::<_, Permission>(
r#"
SELECT * FROM permissions WHERE id = $1
"#,
)
.bind(id)
.fetch_optional(pool)
.await
.map_err(AppError::from)?;
permission.ok_or_else(|| AppError::NotFound("Permission not found".to_string()))
}
pub async fn find_by_name(pool: &PgPool, name: &str) -> Result<Permission, AppError> {
let permission = sqlx::query_as::<_, Permission>(
r#"
SELECT * FROM permissions WHERE name = $1
"#,
)
.bind(name)
.fetch_optional(pool)
.await
.map_err(AppError::from)?;
permission.ok_or_else(|| AppError::NotFound(format!("Permission '{}' not found", name)))
}
pub async fn list_all(pool: &PgPool) -> Result<Vec<Permission>, AppError> {
let permissions = sqlx::query_as::<_, Permission>(
r#"
SELECT * FROM permissions
"#,
)
.fetch_all(pool)
.await
.map_err(AppError::from)?;
Ok(permissions)
}
pub async fn create_default_permissions(pool: &PgPool) -> Result<(), AppError> {
// Check if permissions already exist
let existing = Self::list_all(pool).await?;
if !existing.is_empty() {
return Ok(());
}
// Create default permission levels
let permissions = [
("viewer", true, false, false),
("editor", true, true, false),
("admin", true, true, true),
];
for (name, can_read, can_write, can_share) in permissions {
sqlx::query(
r#"
INSERT INTO permissions (name, can_read, can_write, can_share)
VALUES ($1, $2, $3, $4)
ON CONFLICT (name) DO NOTHING
"#,
)
.bind(name)
.bind(can_read)
.bind(can_write)
.bind(can_share)
.execute(pool)
.await
.map_err(AppError::from)?;
}
Ok(())
}
}

202
api/src/models/share.rs Normal file
View file

@ -0,0 +1,202 @@
use crate::error::AppError;
use crate::models::file::File;
use crate::models::permission::Permission;
use crate::models::user::User;
use serde::{Deserialize, Serialize};
use sqlx::PgPool;
use time::OffsetDateTime;
use uuid::Uuid;
#[derive(Debug, Serialize, Deserialize, sqlx::FromRow)]
pub struct Share {
pub id: Uuid,
pub file_id: Uuid,
pub owner_id: Uuid,
pub recipient_id: Option<Uuid>,
pub permission_id: Uuid,
pub access_key: String,
pub expires_at: Option<OffsetDateTime>,
pub created_at: OffsetDateTime,
pub updated_at: OffsetDateTime,
}
#[derive(Debug, Serialize, Deserialize)]
pub struct CreateShareDto {
pub file_id: Uuid,
pub recipient_id: Option<Uuid>,
pub permission_id: Uuid,
pub expires_at: Option<OffsetDateTime>,
}
#[derive(Debug, Serialize, Deserialize)]
pub struct ShareResponse {
pub id: Uuid,
pub file: File,
pub owner: User,
pub recipient: Option<User>,
pub permission: Permission,
pub access_key: String,
pub expires_at: Option<OffsetDateTime>,
pub created_at: OffsetDateTime,
pub updated_at: OffsetDateTime,
}
impl Share {
pub async fn create(
pool: &PgPool,
owner_id: Uuid,
dto: CreateShareDto,
) -> Result<Share, AppError> {
// Verify file exists and user has access to it, then create a random key for it
let file = File::find_by_id(pool, dto.file_id).await?
if file.owner_id != owner_id {
return Err(AppError::AccessDenied("You can only share files you own".to_string()));
}
let access_key = Uuid::new_v4().to_string();
let share = sqlx::query_as::<_, Share>(
r#"
INSERT INTO shares (file_id, owner_id, recipient_id, permission_id, access_key, expires_at)
VALUES ($1, $2, $3, $4, $5, $6)
RETURNING *
"#,
)
.bind(dto.file_id)
.bind(owner_id)
.bind(dto.recipient_id)
.bind(dto.permission_id)
.bind(&access_key)
.bind(dto.expires_at)
.fetch_one(pool)
.await
.map_err(AppError::from)?;
Ok(share)
}
pub async fn find_by_id(pool: &PgPool, id: Uuid) -> Result<Share, AppError> {
let share = sqlx::query_as::<_, Share>(
r#"
SELECT * FROM shares WHERE id = $1
"#,
)
.bind(id)
.fetch_optional(pool)
.await
.map_err(AppError::from)?;
share.ok_or_else(|| AppError::NotFound("Share not found".to_string()))
}
pub async fn find_by_access_key(pool: &PgPool, access_key: &str) -> Result<Share, AppError> {
let share = sqlx::query_as::<_, Share>(
r#"
SELECT * FROM shares WHERE access_key = $1
"#,
)
.bind(access_key)
.fetch_optional(pool)
.await
.map_err(AppError::from)?;
share.ok_or_else(|| AppError::NotFound("Share not found".to_string()))
}
pub async fn list_by_owner(pool: &PgPool, owner_id: Uuid) -> Result<Vec<Share>, AppError> {
let shares = sqlx::query_as::<_, Share>(
r#"
SELECT * FROM shares WHERE owner_id = $1
"#,
)
.bind(owner_id)
.fetch_all(pool)
.await
.map_err(AppError::from)?;
Ok(shares)
}
pub async fn list_by_recipient(pool: &PgPool, recipient_id: Uuid) -> Result<Vec<Share>, AppError> {
let shares = sqlx::query_as::<_, Share>(
r#"
SELECT * FROM shares WHERE recipient_id = $1
"#,
)
.bind(recipient_id)
.fetch_all(pool)
.await
.map_err(AppError::from)?;
Ok(shares)
}
pub async fn delete(pool: &PgPool, id: Uuid, user_id: Uuid) -> Result<(), AppError> {
let share = Self::find_by_id(pool, id).await?
if share.owner_id != user_id {
return Err(AppError::AccessDenied("You can only delete shares you own".to_string()));
}
sqlx::query(
r#"
DELETE FROM shares WHERE id = $1
"#,
)
.bind(id)
.execute(pool)
.await
.map_err(AppError::from)?;
Ok(())
}
pub async fn get_full_share_details(pool: &PgPool, id: Uuid) -> Result<ShareResponse, AppError> {
let share = Self::find_by_id(pool, id).await?
let file = File::find_by_id(pool, share.file_id).await?
let owner = User::find_by_id(pool, share.owner_id).await?
let recipient = match share.recipient_id {
Some(recipient_id) => Some(User::find_by_id(pool, recipient_id).await?),
None => None,
};
let permission = Permission::find_by_id(pool, share.permission_id).await?
Ok(ShareResponse {
id: share.id,
file,
owner,
recipient,
permission,
access_key: share.access_key,
expires_at: share.expires_at,
created_at: share.created_at,
updated_at: share.updated_at,
})
}
pub async fn is_valid_share(
pool: &PgPool,
share_id: Uuid,
user_id: Option<Uuid>,
) -> Result<bool, AppError> {
let share = Self::find_by_id(pool, share_id).await?
if let Some(expires_at) = share.expires_at {
if expires_at < OffsetDateTime::now_utc() {
return Ok(false);
}
}
// If share has a specific recipient, check if the user is that recipient - otherwise return a valid share for everyone with the link
if let Some(recipient_id) = share.recipient_id {
if let Some(user_id) = user_id {
return Ok(recipient_id == user_id);
}
return Ok(false);
}
Ok(true)
}
}

137
api/src/models/user.rs Normal file
View file

@ -0,0 +1,137 @@
use argon2::{password_hash::SaltString, Argon2, PasswordHash, PasswordHasher, PasswordVerifier};
use rand::rngs::OsRng;
use serde::{Deserialize, Serialize};
use sqlx::{postgres::PgPool, FromRow};
use time::OffsetDateTime;
use uuid::Uuid;
use crate::error::AppError;
#[derive(Debug, Serialize, Deserialize, FromRow)]
pub struct User {
pub id: Uuid,
pub email: String,
#[serde(skip_serializing)]
pub password_hash: String,
pub display_name: Option<String>,
pub storage_used: i64,
pub storage_quota: i64,
pub created_at: OffsetDateTime,
pub updated_at: OffsetDateTime,
}
#[derive(Debug, Deserialize)]
pub struct CreateUserDto {
pub email: String,
pub password: String,
pub display_name: Option<String>,
}
#[derive(Debug, Deserialize)]
pub struct LoginUserDto {
pub email: String,
pub password: String,
}
impl User {
pub async fn create(pool: &PgPool, dto: CreateUserDto) -> Result<Self, AppError> {
let existing_user = sqlx::query!("SELECT id FROM users WHERE email = $1", dto.email)
.fetch_optional(pool)
.await?;
if existing_user.is_some() {
return Err(AppError::UserAlreadyExists);
}
let password_hash = Self::hash_password(&dto.password)?;
let storage_quota = 1_073_741_824;
let user = sqlx::query_as!(User,
r#"INSERT INTO users (id, email, password_hash, display_name, storage_used, storage_quota, created_at, updated_at)
VALUES ($1, $2, $3, $4, $5, $6, $7, $8)
RETURNING id, email, password_hash, display_name, storage_used, storage_quota, created_at, updated_at"#,
Uuid::new_v4(),
dto.email,
password_hash,
dto.display_name,
0i64,
storage_quota,
OffsetDateTime::now_utc(),
OffsetDateTime::now_utc()
)
.fetch_one(pool)
.await?;
Ok(user)
}
pub async fn find_by_id(pool: &PgPool, id: Uuid) -> Result<Self, AppError> {
let user = sqlx::query_as!(User,
r#"SELECT id, email, password_hash, display_name, storage_used, storage_quota, created_at, updated_at
FROM users WHERE id = $1"#,
id
)
.fetch_optional(pool)
.await?
.ok_or(AppError::UserNotFound)?;
Ok(user)
}
pub async fn find_by_email(pool: &PgPool, email: &str) -> Result<Self, AppError> {
let user = sqlx::query_as!(User,
r#"SELECT id, email, password_hash, display_name, storage_used, storage_quota, created_at, updated_at
FROM users WHERE email = $1"#,
email
)
.fetch_optional(pool)
.await?
.ok_or(AppError::UserNotFound)?;
Ok(user)
}
pub async fn authenticate(pool: &PgPool, dto: LoginUserDto) -> Result<Self, AppError> {
let user = Self::find_by_email(pool, &dto.email).await?;
if !Self::verify_password(&dto.password, &user.password_hash)? {
return Err(AppError::InvalidCredentials);
}
Ok(user)
}
pub async fn update_storage_used(pool: &PgPool, user_id: Uuid, bytes_added: i64) -> Result<(), AppError> {
let mut user = Self::find_by_id(pool, user_id).await?;
let new_storage_used = user.storage_used + bytes_added;
if new_storage_used > user.storage_quota {
return Err(AppError::StorageQuotaExceeded);
}
sqlx::query!("UPDATE users SET storage_used = $1, updated_at = $2 WHERE id = $3",
new_storage_used,
OffsetDateTime::now_utc(),
user_id
)
.execute(pool)
.await?;
Ok(())
}
fn hash_password(password: &str) -> Result<String, AppError> {
let salt = SaltString::generate(&mut OsRng);
let argon2 = Argon2::default();
let password_hash = argon2
.hash_password(password.as_bytes(), &salt)?
.to_string();
Ok(password_hash)
}
fn verify_password(password: &str, hash: &str) -> Result<bool, AppError> {
let parsed_hash = PasswordHash::new(hash)?;
let argon2 = Argon2::default();
Ok(argon2.verify_password(password.as_bytes(), &parsed_hash).is_ok())
}
}

58
api/src/routes/auth.rs Normal file
View file

@ -0,0 +1,58 @@
use axum::{
extract::Extension,
routing::post,
Json, Router,
};
use sqlx::PgPool;
use std::sync::Arc;
use crate::config::Config;
use crate::error::AppError;
use crate::models::user::{CreateUserDto, LoginUserDto, User};
use crate::utils::jwt;
pub fn routes() -> Router {
Router::new()
.route("/auth/register", post(register))
.route("/auth/login", post(login))
}
async fn register(
Extension(pool): Extension<PgPool>,
Extension(config): Extension<Arc<Config>>,
Json(create_user_dto): Json<CreateUserDto>,
) -> Result<Json<serde_json::Value>, AppError> {
let user = User::create(&pool, create_user_dto, &config).await?;
let token = jwt::generate_token(
&config,
user.id,
&user.email,
&user.role,
)?;
Ok(Json(serde_json::json!({
"user": user,
"token": token
})))
}
async fn login(
Extension(pool): Extension<PgPool>,
Extension(config): Extension<Arc<Config>>,
Json(login_dto): Json<LoginUserDto>,
) -> Result<Json<serde_json::Value>, AppError> {
let user = User::authenticate(&pool, &login_dto.email, &login_dto.password).await?;
let token = jwt::generate_token(
&config,
user.id,
&user.email,
&user.role,
)?;
Ok(Json(serde_json::json!({
"user": user,
"token": token
})))
}

205
api/src/routes/files.rs Normal file
View file

@ -0,0 +1,205 @@
use axum::{
body::StreamBody,
extract::{Extension, Multipart, Path},
http::{HeaderMap, StatusCode},
response::IntoResponse,
routing::{delete, get, post},
Json, Router,
};
use sqlx::PgPool;
use std::{path::PathBuf, sync::Arc};
use tokio::fs::File;
use tokio_util::io::ReaderStream;
use uuid::Uuid;
use crate::config::Config;
use crate::error::AppError;
use crate::models::file::{CreateDirectoryDto, CreateFileDto, File as FileModel, FileType};
use crate::services::auth::AuthUser;
use crate::services::encryption::EncryptionService;
use crate::services::storage::StorageService;
pub fn routes() -> Router {
Router::new()
.route("/files", get(list_files))
.route("/files/:id", get(get_file))
.route("/files/:id/download", get(download_file))
.route("/files/upload", post(upload_file))
.route("/files/directory", post(create_directory))
.route("/files/:id", delete(delete_file))
}
async fn list_files(
auth_user: AuthUser,
Extension(pool): Extension<PgPool>,
Path(parent_id): Option<Path<Uuid>>,
) -> Result<Json<Vec<FileModel>>, AppError> {
let files = match parent_id {
Some(parent_id) => FileModel::list_by_parent(&pool, parent_id, auth_user.id).await?,
None => FileModel::list_root_directory(&pool, auth_user.id).await?,
};
Ok(Json(files))
}
async fn get_file(
auth_user: AuthUser,
Extension(pool): Extension<PgPool>,
Path(id): Path<Uuid>,
) -> Result<Json<FileModel>, AppError> {
let file = FileModel::find_by_id(&pool, id).await?;
// Check if user has access to this file
if file.owner_id != auth_user.id {
return Err(AppError::AccessDenied("You don't have access to this file".to_string()));
}
Ok(Json(file))
}
async fn upload_file(
auth_user: AuthUser,
Extension(pool): Extension<PgPool>,
Extension(config): Extension<Arc<Config>>,
Extension(encryption_service): Extension<EncryptionService>,
mut multipart: Multipart,
) -> Result<Json<FileModel>, AppError> {
// Extract file data from multipart form
let mut file_name = None;
let mut file_data = None;
let mut parent_id = None;
while let Some(field) = multipart.next_field().await.map_err(|e| AppError::InvalidInput(e.to_string()))? {
let name = field.name().unwrap_or("").to_string();
if name == "file" {
file_name = field.file_name().map(|s| s.to_string());
file_data = Some(field.bytes().await.map_err(|e| AppError::InvalidInput(e.to_string()))?);
} else if name == "parent_id" {
let parent_id_str = field.text().await.map_err(|e| AppError::InvalidInput(e.to_string()))?;
if !parent_id_str.is_empty() {
parent_id = Some(Uuid::parse_str(&parent_id_str).map_err(|e| AppError::InvalidInput(e.to_string()))?);
}
}
}
let file_name = file_name.ok_or_else(|| AppError::InvalidInput("File name is required".to_string()))?;
let file_data = file_data.ok_or_else(|| AppError::InvalidInput("File data is required".to_string()))?;
// Check user storage quota
let user = crate::models::user::User::find_by_id(&pool, auth_user.id).await?;
if user.storage_used + file_data.len() as i64 > user.storage_quota {
return Err(AppError::StorageQuotaExceeded("Storage quota exceeded".to_string()));
}
// Create storage service
let storage_service = StorageService::new(&config.storage_path);
// Create file record in database
let create_file_dto = CreateFileDto {
name: file_name,
parent_id,
size: file_data.len() as i64,
mime_type: mime_guess::from_path(&file_name).first_or_octet_stream().to_string(),
};
let file = FileModel::create(&pool, auth_user.id, create_file_dto).await?;
// Encrypt and save file to disk
let encrypted_data = encryption_service.encrypt(&file_data)?;
storage_service.save_file(auth_user.id, file.id, &encrypted_data).await?;
// Update user storage used
crate::models::user::User::update_storage_used(&pool, auth_user.id, file_data.len() as i64).await?;
Ok(Json(file))
}
async fn download_file(
auth_user: AuthUser,
Extension(pool): Extension<PgPool>,
Extension(config): Extension<Arc<Config>>,
Extension(encryption_service): Extension<EncryptionService>,
Path(id): Path<Uuid>,
) -> Result<impl IntoResponse, AppError> {
// Get file metadata
let file = FileModel::find_by_id(&pool, id).await?;
// Check if user has access to this file
if file.owner_id != auth_user.id {
return Err(AppError::AccessDenied("You don't have access to this file".to_string()));
}
// Check if it's a file (not a directory)
if file.file_type != FileType::File {
return Err(AppError::InvalidInput("Cannot download a directory".to_string()));
}
// Create storage service
let storage_service = StorageService::new(&config.storage_path);
// Read encrypted file from disk
let encrypted_data = storage_service.read_file(auth_user.id, file.id).await?;
// Decrypt file
let decrypted_data = encryption_service.decrypt(&encrypted_data)?;
// Set up response headers
let mut headers = HeaderMap::new();
headers.insert(
axum::http::header::CONTENT_TYPE,
file.mime_type.parse().unwrap_or_else(|_| "application/octet-stream".parse().unwrap()),
);
headers.insert(
axum::http::header::CONTENT_DISPOSITION,
format!("attachment; filename=\"{}\"", file.name).parse().unwrap(),
);
// Create a stream from the decrypted data
let stream = tokio_util::io::ReaderStream::new(std::io::Cursor::new(decrypted_data));
let body = StreamBody::new(stream);
Ok((StatusCode::OK, headers, body))
}
async fn create_directory(
auth_user: AuthUser,
Extension(pool): Extension<PgPool>,
Json(create_dir_dto): Json<CreateDirectoryDto>,
) -> Result<Json<FileModel>, AppError> {
let directory = FileModel::create_directory(&pool, auth_user.id, create_dir_dto).await?;
Ok(Json(directory))
}
async fn delete_file(
auth_user: AuthUser,
Extension(pool): Extension<PgPool>,
Extension(config): Extension<Arc<Config>>,
Path(id): Path<Uuid>,
) -> Result<StatusCode, AppError> {
// Get file metadata
let file = FileModel::find_by_id(&pool, id).await?;
// Check if user has access to this file
if file.owner_id != auth_user.id {
return Err(AppError::AccessDenied("You don't have access to this file".to_string()));
}
// Create storage service
let storage_service = StorageService::new(&config.storage_path);
// Delete file or directory
if file.file_type == FileType::Directory {
// For directories, recursively delete all contents
FileModel::delete_directory_recursive(&pool, id, auth_user.id, &storage_service).await?;
} else {
// For files, delete the file from storage and update user quota
storage_service.delete_file(auth_user.id, file.id).await?;
FileModel::delete(&pool, id).await?;
// Update user storage used
crate::models::user::User::update_storage_used(&pool, auth_user.id, -file.size).await?;
}
Ok(StatusCode::NO_CONTENT)
}

14
api/src/routes/mod.rs Normal file
View file

@ -0,0 +1,14 @@
mod auth;
mod files;
mod shares;
mod users;
use axum::Router;
pub fn api_routes() -> Router {
Router::new()
.merge(auth::routes())
.merge(files::routes())
.merge(shares::routes())
.merge(users::routes())
}

98
api/src/routes/shares.rs Normal file
View file

@ -0,0 +1,98 @@
use axum::{
extract::{Extension, Path},
http::StatusCode,
routing::{delete, get, post},
Json, Router,
};
use sqlx::PgPool;
use uuid::Uuid;
use crate::error::AppError;
use crate::models::share::{CreateShareDto, Share, ShareResponse};
use crate::services::auth::AuthUser;
pub fn routes() -> Router {
Router::new()
.route("/shares", get(list_shares))
.route("/shares", post(create_share))
.route("/shares/:id", get(get_share))
.route("/shares/:id", delete(delete_share))
.route("/shares/access/:access_key", get(access_shared_file))
}
async fn list_shares(
auth_user: AuthUser,
Extension(pool): Extension<PgPool>,
) -> Result<Json<Vec<ShareResponse>>, AppError> {
// Get shares owned by the user
let owned_shares = Share::list_by_owner(&pool, auth_user.id).await?;
// Get shares shared with the user
let received_shares = Share::list_by_recipient(&pool, auth_user.id).await?;
// Combine and get full details for each share
let mut share_responses = Vec::new();
for share in owned_shares {
let share_response = Share::get_full_share_details(&pool, share.id).await?;
share_responses.push(share_response);
}
for share in received_shares {
let share_response = Share::get_full_share_details(&pool, share.id).await?;
share_responses.push(share_response);
}
Ok(Json(share_responses))
}
async fn create_share(
auth_user: AuthUser,
Extension(pool): Extension<PgPool>,
Json(create_share_dto): Json<CreateShareDto>,
) -> Result<Json<Share>, AppError> {
let share = Share::create(&pool, auth_user.id, create_share_dto).await?;
Ok(Json(share))
}
async fn get_share(
auth_user: AuthUser,
Extension(pool): Extension<PgPool>,
Path(id): Path<Uuid>,
) -> Result<Json<ShareResponse>, AppError> {
let share = Share::find_by_id(&pool, id).await?;
// Check if user has access to this share
if share.owner_id != auth_user.id && share.recipient_id != Some(auth_user.id) {
return Err(AppError::AccessDenied("You don't have access to this share".to_string()));
}
let share_response = Share::get_full_share_details(&pool, id).await?;
Ok(Json(share_response))
}
async fn delete_share(
auth_user: AuthUser,
Extension(pool): Extension<PgPool>,
Path(id): Path<Uuid>,
) -> Result<StatusCode, AppError> {
Share::delete(&pool, id, auth_user.id).await?;
Ok(StatusCode::NO_CONTENT)
}
async fn access_shared_file(
Extension(pool): Extension<PgPool>,
Path(access_key): Path<String>,
) -> Result<Json<ShareResponse>, AppError> {
let share = Share::find_by_access_key(&pool, &access_key).await?;
// Check if share is valid (not expired)
if let Some(expires_at) = share.expires_at {
if expires_at < time::OffsetDateTime::now_utc() {
return Err(AppError::AccessDenied("This share has expired".to_string()));
}
}
let share_response = Share::get_full_share_details(&pool, share.id).await?;
Ok(Json(share_response))
}

68
api/src/routes/users.rs Normal file
View file

@ -0,0 +1,68 @@
use axum::{
extract::Extension,
http::StatusCode,
routing::{get, put},
Json, Router,
};
use serde::{Deserialize, Serialize};
use sqlx::PgPool;
use crate::error::AppError;
use crate::models::user::User;
use crate::services::auth::AuthUser;
pub fn routes() -> Router {
Router::new()
.route("/users/me", get(get_current_user))
.route("/users/me/password", put(update_password))
}
async fn get_current_user(
auth_user: AuthUser,
Extension(pool): Extension<PgPool>,
) -> Result<Json<User>, AppError> {
let user = User::find_by_id(&pool, auth_user.id).await?;
Ok(Json(user))
}
#[derive(Debug, Serialize, Deserialize)]
pub struct UpdatePasswordDto {
pub current_password: String,
pub new_password: String,
}
async fn update_password(
auth_user: AuthUser,
Extension(pool): Extension<PgPool>,
Json(update_dto): Json<UpdatePasswordDto>,
) -> Result<StatusCode, AppError> {
// Verify current password
let user = User::find_by_id(&pool, auth_user.id).await?;
let is_valid = crate::utils::password::verify_password(
&update_dto.current_password,
&user.password_hash,
)?;
if !is_valid {
return Err(AppError::AuthenticationError("Current password is incorrect".to_string()));
}
// Hash new password
let new_password_hash = crate::utils::password::hash_password(&update_dto.new_password)?;
// Update password in database
sqlx::query!(
r#"
UPDATE users
SET password_hash = $1, updated_at = NOW()
WHERE id = $2
"#,
new_password_hash,
auth_user.id
)
.execute(&pool)
.await
.map_err(AppError::from)?;
Ok(StatusCode::OK)
}

72
api/src/services/auth.rs Normal file
View file

@ -0,0 +1,72 @@
use axum::{
async_trait,
extract::{FromRequest, RequestParts, TypedHeader},
headers::{authorization::Bearer, Authorization},
http::StatusCode,
response::{IntoResponse, Response},
Json,
};
use serde::{Deserialize, Serialize};
use std::sync::Arc;
use uuid::Uuid;
use crate::config::Config;
use crate::error::AppError;
use crate::utils::jwt;
#[derive(Debug, Serialize, Deserialize)]
pub struct AuthUser {
pub id: Uuid,
pub email: String,
pub role: String,
}
#[async_trait]
impl<B> FromRequest<B> for AuthUser
where
B: Send,
{
type Rejection = Response;
async fn from_request(req: &mut RequestParts<B>) -> Result<Self, Self::Rejection> {
let TypedHeader(Authorization(bearer)) =
TypedHeader::<Authorization<Bearer>>::from_request(req)
.await
.map_err(|_| {
let json = Json(serde_json::json!({
"error": "Missing or invalid authorization header"
}));
(StatusCode::UNAUTHORIZED, json).into_response()
})?;
let config = req
.extensions()
.get::<Arc<Config>>()
.ok_or_else(|| {
let json = Json(serde_json::json!({
"error": "Server configuration error"
}));
(StatusCode::INTERNAL_SERVER_ERROR, json).into_response()
})?;
let claims = jwt::verify_token(config, bearer.token()).map_err(|e| {
let json = Json(serde_json::json!({
"error": format!("Invalid token: {}", e)
}));
(StatusCode::UNAUTHORIZED, json).into_response()
})?;
let user_id = Uuid::parse_str(&claims.sub).map_err(|_| {
let json = Json(serde_json::json!({
"error": "Invalid user ID in token"
}));
(StatusCode::UNAUTHORIZED, json).into_response()
})?;
Ok(AuthUser {
id: user_id,
email: claims.email,
role: claims.role,
})
}
}

View file

@ -0,0 +1,69 @@
use aes_gcm::{aead::{Aead, KeyInit}, Aes256Gcm, Nonce};
use base64::{engine::general_purpose, Engine as _};
use rand::{rngs::OsRng, RngCore};
use crate::error::AppError;
pub struct EncryptionService {
cipher: Aes256Gcm,
}
impl EncryptionService {
pub fn new(master_key: &str) -> Self {
// Decode the base64 master key
let key_bytes = general_purpose::STANDARD
.decode(master_key)
.expect("Invalid master key format");
// Create the cipher
let cipher = Aes256Gcm::new_from_slice(&key_bytes)
.expect("Invalid key length");
Self { cipher }
}
pub fn encrypt(&self, data: &[u8]) -> Result<Vec<u8>, AppError> {
// Generate a random 12-byte nonce
let mut nonce_bytes = [0u8; 12];
OsRng.fill_bytes(&mut nonce_bytes);
let nonce = Nonce::from_slice(&nonce_bytes);
// Encrypt the data
let ciphertext = self.cipher
.encrypt(nonce, data)
.map_err(|e| AppError::EncryptionError(e.to_string()))?;
// Combine nonce and ciphertext
let mut result = Vec::with_capacity(nonce_bytes.len() + ciphertext.len());
result.extend_from_slice(&nonce_bytes);
result.extend_from_slice(&ciphertext);
Ok(result)
}
pub fn decrypt(&self, data: &[u8]) -> Result<Vec<u8>, AppError> {
// Extract nonce and ciphertext
if data.len() < 12 {
return Err(AppError::EncryptionError("Invalid encrypted data format".to_string()));
}
let nonce = Nonce::from_slice(&data[..12]);
let ciphertext = &data[12..];
// Decrypt the data
let plaintext = self.cipher
.decrypt(nonce, ciphertext)
.map_err(|e| AppError::EncryptionError(e.to_string()))?;
Ok(plaintext)
}
pub fn generate_master_key() -> String {
// Generate a random 32-byte key
let mut key = [0u8; 32];
OsRng.fill_bytes(&mut key);
// Encode as base64
general_purpose::STANDARD.encode(key)
}
}

3
api/src/services/mod.rs Normal file
View file

@ -0,0 +1,3 @@
pub mod auth;
pub mod encryption;
pub mod storage;

View file

@ -0,0 +1,82 @@
use std::path::{Path, PathBuf};
use tokio::fs;
use uuid::Uuid;
use crate::error::AppError;
pub struct StorageService {
base_path: PathBuf,
}
impl StorageService {
pub fn new<P: AsRef<Path>>(base_path: P) -> Self {
Self {
base_path: PathBuf::from(base_path.as_ref()),
}
}
pub async fn save_file(&self, user_id: Uuid, file_id: Uuid, data: &[u8]) -> Result<(), AppError> {
let file_path = self.get_file_path(user_id, file_id);
// Ensure the directory exists
if let Some(parent) = file_path.parent() {
fs::create_dir_all(parent).await
.map_err(|e| AppError::IoError(e.to_string()))?;
}
// Write the file
fs::write(&file_path, data).await
.map_err(|e| AppError::IoError(e.to_string()))?;
Ok(())
}
pub async fn read_file(&self, user_id: Uuid, file_id: Uuid) -> Result<Vec<u8>, AppError> {
let file_path = self.get_file_path(user_id, file_id);
// Read the file
let data = fs::read(&file_path).await
.map_err(|e| AppError::IoError(e.to_string()))?;
Ok(data)
}
pub async fn delete_file(&self, user_id: Uuid, file_id: Uuid) -> Result<(), AppError> {
let file_path = self.get_file_path(user_id, file_id);
// Check if file exists
if !file_path.exists() {
return Ok(());
}
// Delete the file
fs::remove_file(&file_path).await
.map_err(|e| AppError::IoError(e.to_string()))?;
Ok(())
}
pub async fn create_user_directory(&self, user_id: Uuid) -> Result<(), AppError> {
let dir_path = self.get_user_directory(user_id);
// Create the directory if it doesn't exist
if !dir_path.exists() {
fs::create_dir_all(&dir_path).await
.map_err(|e| AppError::IoError(e.to_string()))?;
}
Ok(())
}
pub fn get_file_path(&self, user_id: Uuid, file_id: Uuid) -> PathBuf {
let mut path = self.get_user_directory(user_id);
path.push(file_id.to_string());
path
}
pub fn get_user_directory(&self, user_id: Uuid) -> PathBuf {
let mut path = self.base_path.clone();
path.push(user_id.to_string());
path
}
}

54
api/src/utils/jwt.rs Normal file
View file

@ -0,0 +1,54 @@
use crate::config::Config;
use crate::error::AppError;
use jsonwebtoken::{decode, encode, DecodingKey, EncodingKey, Header, Validation};
use serde::{Deserialize, Serialize};
use std::sync::Arc;
use time::{Duration, OffsetDateTime};
use uuid::Uuid;
#[derive(Debug, Serialize, Deserialize)]
pub struct Claims {
pub sub: String,
pub exp: i64,
pub iat: i64,
pub email: String,
pub role: String,
}
pub fn generate_token(
config: &Arc<Config>,
user_id: Uuid,
email: &str,
role: &str,
) -> Result<String, AppError> {
let now = OffsetDateTime::now_utc();
let expires_at = now + Duration::seconds(config.jwt_expiration);
let claims = Claims {
sub: user_id.to_string(),
exp: expires_at.unix_timestamp(),
iat: now.unix_timestamp(),
email: email.to_string(),
role: role.to_string(),
};
let token = encode(
&Header::default(),
&claims,
&EncodingKey::from_secret(config.jwt_secret.as_bytes()),
)
.map_err(|e| AppError::JwtError(e.to_string()))?;
Ok(token)
}
pub fn verify_token(config: &Arc<Config>, token: &str) -> Result<Claims, AppError> {
let token_data = decode::<Claims>(
token,
&DecodingKey::from_secret(config.jwt_secret.as_bytes()),
&Validation::default(),
)
.map_err(|e| AppError::JwtError(e.to_string()))?;
Ok(token_data.claims)
}

2
api/src/utils/mod.rs Normal file
View file

@ -0,0 +1,2 @@
pub mod jwt;
pub mod password;

24
api/src/utils/password.rs Normal file
View file

@ -0,0 +1,24 @@
use argon2::{password_hash::SaltString, Argon2, PasswordHash, PasswordHasher, PasswordVerifier};
use rand::rngs::OsRng;
use crate::error::AppError;
pub fn hash_password(password: &str) -> Result<String, AppError> {
let salt = SaltString::generate(&mut OsRng);
let argon2 = Argon2::default();
let password_hash = argon2
.hash_password(password.as_bytes(), &salt)
.map_err(|e| AppError::PasswordHashError(e.to_string()))?;
Ok(password_hash.to_string())
}
pub fn verify_password(password: &str, password_hash: &str) -> Result<bool, AppError> {
let parsed_hash = PasswordHash::new(password_hash)
.map_err(|e| AppError::PasswordHashError(e.to_string()))?;
let result = Argon2::default().verify_password(password.as_bytes(), &parsed_hash);
Ok(result.is_ok())
}

36
docker-compose.yml Normal file
View file

@ -0,0 +1,36 @@
version: '3.8'
services:
web:
build:
context: .
dockerfile: Dockerfile
ports:
- "80:8080"
volumes:
- ./data:/data
environment:
- DATABASE_URL=postgres://litecloud:litecloud_password@db:5432/litecloud
- RUST_LOG=info
- MASTER_KEY=${MASTER_KEY:-default_master_key_change_in_production}
- JWT_SECRET=${JWT_SECRET:-default_jwt_secret_change_in_production}
- STORAGE_PATH=/data
- MAX_UPLOAD_SIZE=104857600 # 100MB default
- DEFAULT_USER_QUOTA=1073741824 # 1GB default
depends_on:
- db
restart: unless-stopped
db:
image: postgres:14-alpine
volumes:
- postgres_data:/var/lib/postgresql/data
environment:
- POSTGRES_USER=litecloud
- POSTGRES_PASSWORD=litecloud_password
- POSTGRES_DB=litecloud
restart: unless-stopped
volumes:
postgres_data:
driver: local

90
instructions.md Normal file
View file

@ -0,0 +1,90 @@
## Project Prompt: Secure File Hosting Platform (Nextcloud-like)
### Overview
Build a full-stack web application that mimics the core features of Nextcloud with secure file upload, download, and sharing functionality. The stack should be:
- **Frontend**: Flutter Web
- **Backend**: Rust (Axum framework)
- **Database**: PostgreSQL
- **Storage**: Encrypted file storage on local disk
- **Deployment**: Docker (two-container setup: web + db)
### Required Features
#### Core Features
- Users can upload and download files
- Files are encrypted at rest using AES-256 (server-side encryption)
- Users can generate public shareable links to download files
- File upload limits per user (configurable)
- Support for shared folders among users (with permissions)
- Serve Flutter web UI and backend API from the same container
#### Authentication and User Management
- User registration and login using email + password
- Passwords must be securely hashed using Argon2 or bcrypt
- JWT-based session handling for API authentication
- Role-based permission system:
- Owner, editor, viewer roles for shared folders
- Users can only access files and folders they own or are shared with them
#### File Handling
- Store files in `/data` directory, encrypted using a per-file key
- Save metadata and encryption keys in PostgreSQL (keys encrypted with a master key)
- Expose REST endpoints:
- POST `/api/upload`
- GET `/api/download/:id`
- POST `/api/share`
- GET `/api/shared/:token`
- Limit file uploads per user (configurable max size)
- Maintain a file tree (directories, nested folders)
### Infrastructure
- Use Docker Compose to define:
- `web`: Rust backend and Flutter frontend in a single container
- `db`: PostgreSQL container
- Only expose one public port (80), used by the web container
- Use Docker volume for persistent file storage (`./data`)
### Project Structure
```
project-root/
├── docker-compose.yml
├── Dockerfile (multi-stage for Flutter + Rust)
├── backend/ # Rust API
├── frontend/ # Flutter Web app
├── data/ # Mounted volume for encrypted files
```
### Libraries and Tools
- **Rust Backend**:
- `axum` for HTTP server
- `tokio` for async runtime
- `sqlx` for PostgreSQL
- `jsonwebtoken` for JWT
- `argon2` or `bcrypt` for password hashing
- `aes-gcm` or `ring` for file encryption
- `uuid` for file and share link identifiers
- `dotenvy` to manage environment variables
- **Flutter Frontend**:
- File upload UI
- Folder navigation
- Login/Register screens
- Share file dialog with permission settings
### Goals
Generate:
- Docker Compose config and Dockerfile
- Flutter web UI skeleton with login/upload functionality
- Rust backend with user authentication, file handling, and share APIs
- PostgreSQL schema with users, files, shares, and permissions